summaryrefslogtreecommitdiff
path: root/src/pmdas/linux
diff options
context:
space:
mode:
Diffstat (limited to 'src/pmdas/linux')
-rw-r--r--src/pmdas/linux/GNUmakefile128
-rw-r--r--src/pmdas/linux/clusters.h82
-rw-r--r--src/pmdas/linux/convert.h49
-rw-r--r--src/pmdas/linux/devmapper.c86
-rw-r--r--src/pmdas/linux/devmapper.h29
-rw-r--r--src/pmdas/linux/filesys.c122
-rw-r--r--src/pmdas/linux/filesys.h32
-rw-r--r--src/pmdas/linux/getinfo.c93
-rw-r--r--src/pmdas/linux/getinfo.h16
-rw-r--r--src/pmdas/linux/help1122
-rw-r--r--src/pmdas/linux/indom.h69
-rw-r--r--src/pmdas/linux/interrupts.c394
-rw-r--r--src/pmdas/linux/interrupts.h19
-rw-r--r--src/pmdas/linux/linux_table.c116
-rw-r--r--src/pmdas/linux/linux_table.h66
-rw-r--r--src/pmdas/linux/msg_limits.c49
-rw-r--r--src/pmdas/linux/msg_limits.h35
-rw-r--r--src/pmdas/linux/numa_meminfo.c137
-rw-r--r--src/pmdas/linux/numa_meminfo.h32
-rw-r--r--src/pmdas/linux/pmda.c5807
-rw-r--r--src/pmdas/linux/proc_cpuinfo.c246
-rw-r--r--src/pmdas/linux/proc_cpuinfo.h49
-rw-r--r--src/pmdas/linux/proc_loadavg.c45
-rw-r--r--src/pmdas/linux/proc_loadavg.h29
-rw-r--r--src/pmdas/linux/proc_meminfo.c188
-rw-r--r--src/pmdas/linux/proc_meminfo.h79
-rw-r--r--src/pmdas/linux/proc_net_dev.c444
-rw-r--r--src/pmdas/linux/proc_net_dev.h100
-rw-r--r--src/pmdas/linux/proc_net_netstat.c354
-rw-r--r--src/pmdas/linux/proc_net_netstat.h150
-rw-r--r--src/pmdas/linux/proc_net_rpc.c188
-rw-r--r--src/pmdas/linux/proc_net_rpc.h99
-rw-r--r--src/pmdas/linux/proc_net_snmp.c367
-rw-r--r--src/pmdas/linux/proc_net_snmp.h136
-rw-r--r--src/pmdas/linux/proc_net_snmp_migrate.conf8
-rw-r--r--src/pmdas/linux/proc_net_sockstat.c65
-rw-r--r--src/pmdas/linux/proc_net_sockstat.h29
-rw-r--r--src/pmdas/linux/proc_net_tcp.c71
-rw-r--r--src/pmdas/linux/proc_net_tcp.h44
-rw-r--r--src/pmdas/linux/proc_partitions.c808
-rw-r--r--src/pmdas/linux/proc_partitions.h44
-rw-r--r--src/pmdas/linux/proc_scsi.c159
-rw-r--r--src/pmdas/linux/proc_scsi.h38
-rw-r--r--src/pmdas/linux/proc_slabinfo.c237
-rw-r--r--src/pmdas/linux/proc_slabinfo.h53
-rw-r--r--src/pmdas/linux/proc_stat.c304
-rw-r--r--src/pmdas/linux/proc_stat.h65
-rw-r--r--src/pmdas/linux/proc_sys_fs.c80
-rw-r--r--src/pmdas/linux/proc_sys_fs.h32
-rw-r--r--src/pmdas/linux/proc_uptime.c47
-rw-r--r--src/pmdas/linux/proc_uptime.h29
-rw-r--r--src/pmdas/linux/proc_vmstat.c299
-rw-r--r--src/pmdas/linux/proc_vmstat.h131
-rw-r--r--src/pmdas/linux/root_linux1005
-rw-r--r--src/pmdas/linux/sem_limits.c51
-rw-r--r--src/pmdas/linux/sem_limits.h49
-rw-r--r--src/pmdas/linux/shm_limits.c43
-rw-r--r--src/pmdas/linux/shm_limits.h32
-rw-r--r--src/pmdas/linux/swapdev.c72
-rw-r--r--src/pmdas/linux/swapdev.h28
-rw-r--r--src/pmdas/linux/sysfs_kernel.c41
-rw-r--r--src/pmdas/linux/sysfs_kernel.h34
62 files changed, 14855 insertions, 0 deletions
diff --git a/src/pmdas/linux/GNUmakefile b/src/pmdas/linux/GNUmakefile
new file mode 100644
index 0000000..4a0cc4f
--- /dev/null
+++ b/src/pmdas/linux/GNUmakefile
@@ -0,0 +1,128 @@
+#
+# Copyright (c) 2000,2003,2004,2008 Silicon Graphics, Inc. All Rights Reserved.
+# Copyright (c) 2007-2010 Aconex. All Rights Reserved.
+# Copyright (c) 2013 Red Hat.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 2 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+# for more details.
+#
+
+TOPDIR = ../../..
+include $(TOPDIR)/src/include/builddefs
+
+IAM = linux
+DOMAIN = LINUX
+CMDTARGET = pmdalinux
+LIBTARGET = pmda_linux.so
+PMDAINIT = linux_init
+PMDADIR = $(PCP_PMDAS_DIR)/$(IAM)
+LOGREWRITEDIR = $(PCP_VAR_DIR)/config/pmlogrewrite
+CONF_LINE = "linux 60 dso $(PMDAINIT) $(PMDADIR)/$(LIBTARGET)"
+
+CFILES = pmda.c \
+ proc_stat.c proc_meminfo.c proc_loadavg.c \
+ proc_net_dev.c interrupts.c filesys.c \
+ swapdev.c devmapper.c proc_net_rpc.c proc_partitions.c \
+ getinfo.c proc_net_sockstat.c proc_net_snmp.c \
+ proc_scsi.c proc_cpuinfo.c proc_net_tcp.c \
+ proc_slabinfo.c sem_limits.c msg_limits.c shm_limits.c \
+ proc_uptime.c proc_sys_fs.c proc_vmstat.c \
+ sysfs_kernel.c linux_table.c numa_meminfo.c \
+ proc_net_netstat.c
+
+HFILES = clusters.h indom.h convert.h \
+ proc_stat.h proc_meminfo.h proc_loadavg.h \
+ proc_net_dev.h interrupts.h filesys.h \
+ swapdev.h devmapper.h proc_net_rpc.h proc_partitions.h \
+ getinfo.h proc_net_sockstat.h proc_net_snmp.h \
+ proc_scsi.h proc_cpuinfo.h proc_net_tcp.h \
+ proc_slabinfo.h sem_limits.h msg_limits.h shm_limits.h \
+ proc_uptime.h proc_sys_fs.h proc_vmstat.h \
+ sysfs_kernel.h linux_table.h numa_meminfo.h \
+ proc_net_netstat.h
+
+VERSION_SCRIPT = exports
+HELPTARGETS = help.dir help.pag
+LSRCFILES = help root_linux proc_net_snmp_migrate.conf
+LDIRT = $(HELPTARGETS) domain.h $(VERSION_SCRIPT)
+
+LLDLIBS = $(PCP_PMDALIB)
+LCFLAGS = $(INVISIBILITY)
+
+# Uncomment these flags for profiling
+# LCFLAGS += -pg
+# LLDFLAGS += -pg
+
+default: build-me
+
+include $(BUILDRULES)
+
+ifeq "$(TARGET_OS)" "linux"
+build-me: domain.h $(LIBTARGET) $(CMDTARGET) $(HELPTARGETS)
+ @if [ `grep -c $(CONF_LINE) ../pmcd.conf` -eq 0 ]; then \
+ echo $(CONF_LINE) >> ../pmcd.conf ; \
+ fi
+
+install: default
+ $(INSTALL) -m 755 -d $(PMDADIR)
+ $(INSTALL) -m 644 domain.h help $(HELPTARGETS) $(PMDADIR)
+ $(INSTALL) -m 755 $(LIBTARGET) $(CMDTARGET) $(PMDADIR)
+ $(INSTALL) -m 644 root_linux $(PCP_VAR_DIR)/pmns/root_linux
+ $(INSTALL) -m 644 proc_net_snmp_migrate.conf $(LOGREWRITEDIR)/linux_proc_net_snmp_migrate.conf
+else
+build-me:
+install:
+endif
+
+default_pcp : default
+
+install_pcp : install
+
+$(HELPTARGETS) : help
+ $(RUN_IN_BUILD_ENV) $(TOPDIR)/src/newhelp/newhelp -n root_linux -v 2 -o help < help
+
+$(VERSION_SCRIPT):
+ $(VERSION_SCRIPT_MAKERULE)
+
+domain.h: ../../pmns/stdpmid
+ $(DOMAIN_MAKERULE)
+
+interrupts.o pmda.o proc_partitions.o: clusters.h
+pmda.o proc_partitions.o: convert.h
+pmda.o: domain.h
+filesys.o interrupts.o pmda.o: filesys.h
+pmda.o: getinfo.h
+pmda.o devmapper.o: devmapper.h
+numa_meminfo.o pmda.o proc_cpuinfo.o proc_partitions.o proc_stat.o: indom.h
+interrupts.o pmda.o: interrupts.h
+linux_table.o numa_meminfo.o pmda.o: linux_table.h
+msg_limits.o pmda.o: msg_limits.h
+numa_meminfo.o pmda.o: numa_meminfo.h
+pmda.o proc_cpuinfo.o proc_stat.o: proc_cpuinfo.h
+pmda.o proc_loadavg.o: proc_loadavg.h
+pmda.o proc_meminfo.o: proc_meminfo.h
+pmda.o proc_net_dev.o: proc_net_dev.h
+pmda.o proc_net_netstat.o: proc_net_netstat.h
+pmda.o proc_net_rpc.o: proc_net_rpc.h
+pmda.o proc_net_snmp.o: proc_net_snmp.h
+pmda.o proc_net_sockstat.o: proc_net_sockstat.h
+pmda.o proc_net_tcp.o: proc_net_tcp.h
+pmda.o proc_partitions.o: proc_partitions.h
+pmda.o proc_scsi.o: proc_scsi.h
+pmda.o proc_slabinfo.o: proc_slabinfo.h
+pmda.o proc_stat.o: proc_stat.h
+pmda.o proc_sys_fs.o: proc_sys_fs.h
+pmda.o proc_uptime.o: proc_uptime.h
+pmda.o proc_vmstat.o: proc_vmstat.h
+pmda.o sem_limits.o: sem_limits.h
+pmda.o shm_limits.o: shm_limits.h
+pmda.o swapdev.o: swapdev.h
+pmda.o sysfs_kernel.o: sysfs_kernel.h
+pmda.o: $(VERSION_SCRIPT)
diff --git a/src/pmdas/linux/clusters.h b/src/pmdas/linux/clusters.h
new file mode 100644
index 0000000..0a1fdfe
--- /dev/null
+++ b/src/pmdas/linux/clusters.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2013 Red Hat.
+ * Copyright (c) 2005,2007-2008 Silicon Graphics, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#ifndef _CLUSTERS_H
+#define _CLUSTERS_H
+
+/*
+ * fetch cluster numbers
+ */
+enum {
+ CLUSTER_STAT = 0, /* 0 /proc/stat */
+ CLUSTER_MEMINFO, /* 1 /proc/meminfo */
+ CLUSTER_LOADAVG, /* 2 /proc/loadavg */
+ CLUSTER_NET_DEV, /* 3 /proc/net/dev */
+ CLUSTER_INTERRUPTS, /* 4 /proc/interrupts */
+ CLUSTER_FILESYS, /* 5 /proc/mounts + statfs */
+ CLUSTER_SWAPDEV, /* 6 /proc/swaps */
+ CLUSTER_NET_NFS, /* 7 /proc/net/rpc/nfs + /proc/net/rpc/nfsd */
+ PROC_PID_STAT, /* 8 /proc/<pid>/stat -> proc PMDA */
+ PROC_PID_STATM, /* 9 /proc/<pid>/statm + /proc/<pid>/maps -> proc PMDA */
+ CLUSTER_PARTITIONS, /* 10 /proc/partitions */
+ CLUSTER_NET_SOCKSTAT, /* 11 /proc/net/sockstat */
+ CLUSTER_KERNEL_UNAME, /* 12 uname() system call */
+ PROC_PROC_RUNQ, /* 13 number of processes in various states -> proc PMDA */
+ CLUSTER_NET_SNMP, /* 14 /proc/net/snmp */
+ CLUSTER_SCSI, /* 15 /proc/scsi/scsi */
+ CLUSTER_XFS, /* 16 /proc/fs/xfs/stat -> xfs PMDA */
+ CLUSTER_XFSBUF, /* 17 /proc/fs/pagebuf/stat -> xfs PMDA */
+ CLUSTER_CPUINFO, /* 18 /proc/cpuinfo */
+ CLUSTER_NET_TCP, /* 19 /proc/net/tcp */
+ CLUSTER_SLAB, /* 20 /proc/slabinfo */
+ CLUSTER_SEM_LIMITS, /* 21 semctl(IPC_INFO) system call */
+ CLUSTER_MSG_LIMITS, /* 22 msgctl(IPC_INFO) system call */
+ CLUSTER_SHM_LIMITS, /* 23 shmctl(IPC_INFO) system call */
+ PROC_PID_STATUS, /* 24 /proc/<pid>/status -> proc PMDA */
+ CLUSTER_NUSERS, /* 25 number of users */
+ CLUSTER_UPTIME, /* 26 /proc/uptime */
+ CLUSTER_VFS, /* 27 /proc/sys/fs */
+ CLUSTER_VMSTAT, /* 28 /proc/vmstat */
+ CLUSTER_IB, /* deprecated: do not re-use 29 infiniband */
+ CLUSTER_QUOTA, /* 30 quotactl() -> xfs PMDA */
+ PROC_PID_SCHEDSTAT, /* 31 /proc/<pid>/schedstat -> proc PMDA */
+ PROC_PID_IO, /* 32 /proc/<pid>/io -> proc PMDA */
+ CLUSTER_NET_ADDR, /* 33 /proc/net/dev and ioctl(SIOCGIFCONF) */
+ CLUSTER_TMPFS, /* 34 /proc/mounts + statfs (tmpfs only) */
+ CLUSTER_SYSFS_KERNEL, /* 35 /sys/kernel metrics */
+ CLUSTER_NUMA_MEMINFO, /* 36 /sys/devices/system/node* NUMA memory */
+ PROC_CGROUP_SUBSYS, /* 37 /proc/cgroups control group subsystems -> proc PMDA */
+ PROC_CGROUP_MOUNTS, /* 38 /proc/mounts active control groups -> proc PMDA */
+ PROC_CPUSET_GROUPS, /* 39 cpuset control groups -> proc PMDA */
+ PROC_CPUSET_PROCS, /* 40 cpuset control group processes -> proc PMDA */
+ PROC_CPUACCT_GROUPS, /* 41 cpu accounting control groups -> proc PMDA */
+ PROC_CPUACCT_PROCS, /* 42 cpu accounting group processes -> proc PMDA */
+ PROC_CPUSCHED_GROUPS, /* 43 scheduler control groups -> proc PMDA */
+ PROC_CPUSCHED_PROCS, /* 44 scheduler group processes -> proc PMDA */
+ PROC_MEMORY_GROUPS, /* 45 memory control groups -> proc PMDA */
+ PROC_MEMORY_PROCS, /* 46 memory group processes -> proc PMDA */
+ PROC_NET_CLS_GROUPS, /* 47 network classification control groups -> proc PMDA */
+ PROC_NET_CLS_PROCS, /* 48 network classification group processes -> proc PMDA */
+ CLUSTER_INTERRUPT_LINES,/* 49 /proc/interrupts percpu interrupts */
+ CLUSTER_INTERRUPT_OTHER,/* 50 /proc/interrupts percpu interrupts */
+ PROC_PID_FD, /* 51 /proc/<pid>/fd -> proc PMDA */
+ CLUSTER_LV, /* 52 /dev/mapper */
+ CLUSTER_NET_NETSTAT, /* 53 /proc/net/netstat */
+ CLUSTER_DM, /* 54 disk.dm.* */
+
+ NUM_CLUSTERS /* one more than highest numbered cluster */
+};
+
+#endif /* _CLUSTERS_H */
diff --git a/src/pmdas/linux/convert.h b/src/pmdas/linux/convert.h
new file mode 100644
index 0000000..8a31eda
--- /dev/null
+++ b/src/pmdas/linux/convert.h
@@ -0,0 +1,49 @@
+/*
+ * Size conversion for different sized types extracted from the
+ * kernel on different platforms, particularly where the sizeof
+ * "long" differs.
+ *
+ * Copyright (c) 2007-2008 Aconex. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/*
+ * Some metrics are exported by the kernel as "unsigned long".
+ * On most 64bit platforms this is not the same size as an
+ * "unsigned int".
+ */
+#if defined(HAVE_64BIT_LONG)
+#define KERNEL_ULONG PM_TYPE_U64
+#define _pm_assign_ulong(atomp, val) do { (atomp)->ull = (val); } while (0)
+#else
+#define KERNEL_ULONG PM_TYPE_U32
+#define _pm_assign_ulong(atomp, val) do { (atomp)->ul = (val); } while (0)
+#endif
+
+/*
+ * Some metrics need to have their type set at runtime, based on the
+ * running kernel version (not simply a 64 vs 32 bit machine issue).
+ */
+#define KERNEL_UTYPE PM_TYPE_NOSUPPORT /* set to real type at runtime */
+#define _pm_metric_type(type, size) \
+ do { \
+ (type) = ((size)==8 ? PM_TYPE_U64 : PM_TYPE_U32); \
+ } while (0)
+#define _pm_assign_utype(size, atomp, val) \
+ do { \
+ if ((size)==8) { (atomp)->ull = (val); } else { (atomp)->ul = (val); } \
+ } while (0)
+
diff --git a/src/pmdas/linux/devmapper.c b/src/pmdas/linux/devmapper.c
new file mode 100644
index 0000000..38f87d3
--- /dev/null
+++ b/src/pmdas/linux/devmapper.c
@@ -0,0 +1,86 @@
+/*
+ * Linux LVM Devices Cluster
+ *
+ * Copyright (c) 2013-2014 Red Hat.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include <sys/stat.h>
+#include "pmapi.h"
+#include "impl.h"
+#include "pmda.h"
+#include "indom.h"
+#include "devmapper.h"
+
+int
+refresh_dev_mapper(dev_mapper_t *lvs)
+{
+ int i;
+ DIR *dirp;
+ struct dirent *dentry;
+ struct stat statbuf;
+ char path[MAXPATHLEN];
+
+ snprintf(path, sizeof(path), "%s/dev/mapper", linux_statspath);
+ if ((dirp = opendir(path)) == NULL)
+ return 1;
+
+ for (i = 0; i < lvs->nlv; i++) {
+ free(lvs->lv[i].dev_name);
+ free(lvs->lv[i].lv_name);
+ }
+ lvs->nlv = 0;
+ lvs->lv = NULL;
+ while ((dentry = readdir(dirp)) != NULL) {
+ char linkname[MAXPATHLEN];
+ int linkname_len;
+
+ snprintf(path, sizeof(path),
+ "%s/dev/mapper/%s", linux_statspath, dentry->d_name);
+
+ if (stat(path, &statbuf) == -1)
+ continue;
+ if (!S_ISBLK(statbuf.st_mode))
+ continue;
+
+ if ((linkname_len = readlink(path, linkname, sizeof(linkname)-1)) < 0)
+ continue;
+ linkname[linkname_len] = '\0';
+
+ i = lvs->nlv;
+ lvs->nlv++;
+
+ lvs->lv = (lv_entry_t *)realloc(lvs->lv, lvs->nlv * sizeof(lv_entry_t));
+ lvs->lv[i].id = lvs->nlv;
+
+ lvs->lv[i].dev_name = malloc(strlen(dentry->d_name)+1);
+ strcpy(lvs->lv[i].dev_name, dentry->d_name);
+
+ lvs->lv[i].lv_name = malloc(linkname_len+1);
+ strcpy(lvs->lv[i].lv_name, linkname);
+ }
+ closedir(dirp);
+
+ if (lvs->lv_indom->it_numinst != lvs->nlv) {
+ lvs->lv_indom->it_numinst = lvs->nlv;
+ lvs->lv_indom->it_set = (pmdaInstid *)
+ realloc(lvs->lv_indom->it_set, lvs->nlv * sizeof(pmdaInstid));
+ }
+ for (i = 0; i < lvs->nlv; i++) {
+ int skip_prefix = 0;
+ lvs->lv_indom->it_set[i].i_inst = lvs->lv[i].id;
+ if (strncmp(lvs->lv[i].lv_name, "../", 3) == 0)
+ skip_prefix = 3;
+ lvs->lv_indom->it_set[i].i_name = lvs->lv[i].lv_name + skip_prefix;
+ }
+ return 0;
+}
diff --git a/src/pmdas/linux/devmapper.h b/src/pmdas/linux/devmapper.h
new file mode 100644
index 0000000..7b75281
--- /dev/null
+++ b/src/pmdas/linux/devmapper.h
@@ -0,0 +1,29 @@
+/*
+ * Linux /dev/mapper metrics cluster
+ *
+ * Copyright (c) 2013 Red Hat.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+typedef struct {
+ int id; /* internal instance id */
+ char *dev_name;
+ char *lv_name;
+} lv_entry_t;
+
+typedef struct {
+ int nlv;
+ lv_entry_t *lv;
+ pmdaIndom *lv_indom;
+} dev_mapper_t;
+
+extern int refresh_dev_mapper(dev_mapper_t *);
diff --git a/src/pmdas/linux/filesys.c b/src/pmdas/linux/filesys.c
new file mode 100644
index 0000000..3c06c13
--- /dev/null
+++ b/src/pmdas/linux/filesys.c
@@ -0,0 +1,122 @@
+/*
+ * Linux Filesystem Cluster
+ *
+ * Copyright (c) 2014 Red Hat.
+ * Copyright (c) 2000,2004,2007-2008 Silicon Graphics, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include "pmapi.h"
+#include "impl.h"
+#include "pmda.h"
+#include "indom.h"
+#include "filesys.h"
+
+char *
+scan_filesys_options(const char *options, const char *option)
+{
+ static char buffer[128];
+ char *s;
+
+ strncpy(buffer, options, sizeof(buffer));
+ buffer[sizeof(buffer)-1] = '\0';
+
+ s = strtok(buffer, ",");
+ while (s) {
+ if (strcmp(s, option) == 0)
+ return s;
+ s = strtok(NULL, ",");
+ }
+ return NULL;
+}
+
+int
+refresh_filesys(pmInDom filesys_indom, pmInDom tmpfs_indom)
+{
+ char buf[MAXPATHLEN];
+ char realdevice[MAXPATHLEN];
+ filesys_t *fs;
+ pmInDom indom;
+ FILE *fp;
+ char *path, *device, *type, *options;
+ int sts;
+
+ pmdaCacheOp(tmpfs_indom, PMDA_CACHE_INACTIVE);
+ pmdaCacheOp(filesys_indom, PMDA_CACHE_INACTIVE);
+
+ if ((fp = linux_statsfile("/proc/mounts", buf, sizeof(buf))) == NULL)
+ return -oserror();
+
+ while (fgets(buf, sizeof(buf), fp) != NULL) {
+ if ((device = strtok(buf, " ")) == 0)
+ continue;
+
+ path = strtok(NULL, " ");
+ type = strtok(NULL, " ");
+ options = strtok(NULL, " ");
+ if (strcmp(type, "proc") == 0 ||
+ strcmp(type, "nfs") == 0 ||
+ strcmp(type, "devfs") == 0 ||
+ strcmp(type, "devpts") == 0 ||
+ strcmp(type, "cgroup") == 0 ||
+ strncmp(type, "auto", 4) == 0)
+ continue;
+
+ indom = filesys_indom;
+ if (strcmp(type, "tmpfs") == 0) {
+ indom = tmpfs_indom;
+ device = path;
+ }
+ else if (strncmp(device, "/dev", 4) != 0)
+ continue;
+ if (realpath(device, realdevice) != NULL)
+ device = realdevice;
+
+ sts = pmdaCacheLookupName(indom, device, NULL, (void **)&fs);
+ if (sts == PMDA_CACHE_ACTIVE) /* repeated line in /proc/mounts? */
+ continue;
+ if (sts == PMDA_CACHE_INACTIVE) { /* re-activate an old mount */
+ pmdaCacheStore(indom, PMDA_CACHE_ADD, device, fs);
+ if (strcmp(path, fs->path) != 0) { /* old device, new path */
+ free(fs->path);
+ fs->path = strdup(path);
+ }
+ if (strcmp(options, fs->options) != 0) { /* old device, new opts */
+ free(fs->options);
+ fs->options = strdup(options);
+ }
+ }
+ else { /* new mount */
+ if ((fs = malloc(sizeof(filesys_t))) == NULL)
+ continue;
+ fs->device = strdup(device);
+ fs->path = strdup(path);
+ fs->options = strdup(options);
+#if PCP_DEBUG
+ if (pmDebug & DBG_TRACE_LIBPMDA) {
+ fprintf(stderr, "refresh_filesys: add \"%s\" \"%s\"\n",
+ fs->path, device);
+ }
+#endif
+ pmdaCacheStore(indom, PMDA_CACHE_ADD, device, fs);
+ }
+ fs->flags = 0;
+ }
+
+ /*
+ * success
+ * Note: we do not call statfs() here since only some instances
+ * may be requested (rather, we do it in linux_fetch, see pmda.c).
+ */
+ fclose(fp);
+ return 0;
+}
diff --git a/src/pmdas/linux/filesys.h b/src/pmdas/linux/filesys.h
new file mode 100644
index 0000000..d90bdf7
--- /dev/null
+++ b/src/pmdas/linux/filesys.h
@@ -0,0 +1,32 @@
+/*
+ * Linux Filesystem Cluster
+ *
+ * Copyright (c) 2000,2004,2007 Silicon Graphics, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include <sys/vfs.h>
+
+/* Values for flags in filesys_t */
+#define FSF_FETCHED (1U << 0)
+
+typedef struct filesys {
+ int id;
+ unsigned int flags;
+ char *device;
+ char *path;
+ char *options;
+ struct statfs stats;
+} filesys_t;
+
+extern int refresh_filesys(pmInDom, pmInDom);
+extern char *scan_filesys_options(const char *, const char *);
diff --git a/src/pmdas/linux/getinfo.c b/src/pmdas/linux/getinfo.c
new file mode 100644
index 0000000..d5f7f29
--- /dev/null
+++ b/src/pmdas/linux/getinfo.c
@@ -0,0 +1,93 @@
+/*
+ * Copyright (c) 2014 Red Hat.
+ * Copyright (c) 2010 Aconex. All Rights Reserved.
+ * Copyright (c) 2000,2004 Silicon Graphics, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include <sys/stat.h>
+#include <sys/dir.h>
+#include <ctype.h>
+#include <fcntl.h>
+#include "pmapi.h"
+#include "pmda.h"
+#include "indom.h"
+
+char *
+get_distro_info(void)
+{
+ /*
+ * Heuristic guesswork ... add code here as we learn
+ * more about how to identify each Linux distribution.
+ */
+ static char *distro_name;
+ struct stat sbuf;
+ int r, sts, fd = -1, len = 0;
+ char path[MAXPATHLEN];
+ char prefix[16];
+ enum { /* rfiles array offsets */
+ DEB_VERSION = 0,
+ LSB_RELEASE = 6,
+ };
+ char *rfiles[] = { "debian_version", "oracle-release", "fedora-release",
+ "redhat-release", "slackware-version", "SuSE-release", "lsb-release",
+ /* insert any new distribution release variants here */
+ NULL
+ };
+
+ if (distro_name)
+ return distro_name;
+
+ for (r = 0; rfiles[r] != NULL; r++) {
+ snprintf(path, sizeof(path), "%s/etc/%s", linux_statspath, rfiles[r]);
+ if (stat(path, &sbuf) == 0 && S_ISREG(sbuf.st_mode)) {
+ fd = open(path, O_RDONLY);
+ break;
+ }
+ }
+ if (fd != -1) {
+ if (r == DEB_VERSION) { /* Debian, needs a prefix */
+ strncpy(prefix, "Debian ", sizeof(prefix));
+ len = 7;
+ }
+ /*
+ * at this point, assume sbuf is good and file contains
+ * the string we want, probably with a \n terminator
+ */
+ distro_name = (char *)malloc(len + (int)sbuf.st_size + 1);
+ if (distro_name != NULL) {
+ if (len)
+ strncpy(distro_name, prefix, len);
+ sts = read(fd, distro_name + len, (int)sbuf.st_size);
+ if (sts <= 0) {
+ free(distro_name);
+ distro_name = NULL;
+ } else {
+ char *nl;
+
+ if (r == LSB_RELEASE) { /* may be Ubuntu */
+ if (!strncmp(distro_name, "DISTRIB_ID = ", 13))
+ distro_name += 13; /* ick */
+ if (!strncmp(distro_name, "DISTRIB_ID=", 11))
+ distro_name += 11; /* more ick */
+ }
+ distro_name[sts + len] = '\0';
+ if ((nl = strchr(distro_name, '\n')) != NULL)
+ *nl = '\0';
+ }
+ }
+ close(fd);
+ }
+ if (distro_name == NULL)
+ distro_name = "?";
+ return distro_name;
+}
diff --git a/src/pmdas/linux/getinfo.h b/src/pmdas/linux/getinfo.h
new file mode 100644
index 0000000..0bf170d
--- /dev/null
+++ b/src/pmdas/linux/getinfo.h
@@ -0,0 +1,16 @@
+/*
+ * Copyright (c) 2010 Aconex. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+extern char *get_distro_info(void);
+
diff --git a/src/pmdas/linux/help b/src/pmdas/linux/help
new file mode 100644
index 0000000..63166cf
--- /dev/null
+++ b/src/pmdas/linux/help
@@ -0,0 +1,1122 @@
+#
+# Copyright (c) 2000,2004-2008 Silicon Graphics, Inc. All Rights Reserved.
+# Portions Copyright (c) International Business Machines Corp., 2002
+# Portions Copyright (c) 2007-2009 Aconex. All Rights Reserved.
+# Portions Copyright (c) 2013 Red Hat.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 2 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+# for more details.
+#
+# Linux PMDA help file in the ASCII format
+#
+# lines beginning with a # are ignored
+# lines beginning @ introduce a new entry of the form
+# @ metric_name oneline-text
+# help test goes
+# here over multiple lines
+# ...
+#
+# the metric_name is decoded against the default PMNS -- as a special case,
+# a name of the form NNN.MM (for numeric NNN and MM) is interpreted as an
+# instance domain identification, and the text describes the instance domain
+#
+# blank lines before the @ line are ignored
+#
+@ kernel.uname.release release level of the running kernel
+Release level of the running kernel as reported via the release[]
+value returned from uname(2) or uname -r.
+
+See also pmda.uname.
+
+@ kernel.uname.version version level (build number) and build date of the running kernel
+Version level of the running kernel as reported by the version[]
+value returned from uname(2) or uname -v. Usually a build number
+followed by a build date.
+
+See also pmda.uname.
+
+@ kernel.uname.sysname name of the implementation of the operating system
+Name of the implementation of the running operating system as reported
+by the sysname[] value returned from uname(2) or uname -s. Usually
+"Linux".
+
+See also pmda.uname.
+
+@ kernel.uname.machine name of the hardware type the system is running on
+Name of the hardware type the system is running on as reported by the machine[]
+value returned from uname(2) or uname -m, e.g. "i686".
+
+See also pmda.uname.
+
+@ kernel.uname.nodename host name of this node on the network
+Name of this node on the network as reported by the nodename[]
+value returned from uname(2) or uname -n. Usually a synonym for
+the host name.
+
+See also pmda.uname.
+
+@ kernel.uname.distro Linux distribution name
+The Linux distribution name, as determined by a number of heuristics.
+For example:
++ on Fedora, the contents of /etc/fedora-release
++ on RedHat, the contents of /etc/redhat-release
+
+@ kernel.percpu.cpu.user percpu user CPU time metric from /proc/stat, including guest CPU time
+@ kernel.percpu.cpu.vuser percpu user CPU time metric from /proc/stat, excluding guest CPU time
+@ kernel.percpu.cpu.nice percpu nice user CPU time metric from /proc/stat
+@ kernel.percpu.cpu.sys percpu sys CPU time metric from /proc/stat
+@ kernel.percpu.cpu.idle percpu idle CPU time metric from /proc/stat
+@ kernel.percpu.cpu.wait.total percpu wait CPU time
+Per-CPU I/O wait CPU time - time spent with outstanding I/O requests.
+
+@ kernel.percpu.cpu.intr percpu interrupt CPU time
+Total time spent processing interrupts on each CPU (this includes
+both soft and hard interrupt processing time).
+
+@ kernel.percpu.cpu.irq.soft percpu soft interrupt CPU time
+Per-CPU soft interrupt CPU time (deferred interrupt handling code,
+not run in the initial interrupt handler).
+
+@ kernel.percpu.cpu.irq.hard percpu hard interrupt CPU time
+Per-CPU hard interrupt CPU time ("hard" interrupt handling code
+is the code run directly on receipt of the initial hardware
+interrupt, and does not include "soft" interrupt handling code
+which is deferred until later).
+
+@ kernel.percpu.cpu.steal percpu CPU steal time
+Per-CPU time when the CPU had a runnable process, but the hypervisor
+(virtualisation layer) chose to run something else instead.
+
+@ kernel.percpu.cpu.guest percpu guest CPU time
+Per-CPU time spent running (virtual) guest operating systems.
+
+@ kernel.all.interrupts.errors interrupt error count from /proc/interrupts
+This is a global counter (normally converted to a count/second)
+for any and all errors that occur while handling interrupts.
+
+@ disk.dev.read per-disk read operations
+Cumulative number of disk read operations since system boot time (subject
+to counter wrap).
+
+@ disk.dev.write per-disk write operations
+Cumulative number of disk write operations since system boot time (subject
+to counter wrap).
+
+@ disk.dev.total per-disk total (read+write) operations
+Cumulative number of disk read and write operations since system boot
+time (subject to counter wrap).
+
+@ disk.dev.blkread per-disk block read operations
+Cumulative number of disk block read operations since system boot time
+(subject to counter wrap).
+
+@ disk.dev.blkwrite per-disk block write operations
+Cumulative number of disk block write operations since system boot time
+(subject to counter wrap).
+
+@ disk.dev.blktotal per-disk total (read+write) block operations
+Cumulative number of disk block read and write operations since system
+boot time (subject to counter wrap).
+
+@ disk.dev.read_bytes per-disk count of bytes read
+@ disk.dev.write_bytes per-disk count of bytes written
+@ disk.dev.total_bytes per-disk count of total bytes read and written
+
+@ disk.dev.scheduler per-disk I/O scheduler
+The name of the I/O scheduler in use for each device. The scheduler
+is part of the block layer in the kernel, and attempts to optimise the
+I/O submission patterns using various techniques (typically, sorting
+and merging adjacent requests into larger ones to reduce seek activity,
+but certainly not limited to that).
+
+@ disk.dev.avactive per-disk count of active time
+When converted to a rate, this metric represents the average utilization of
+the disk during the sampling interval. A value of 0.5 (or 50%) means the
+disk was active (i.e. busy) half the time.
+
+@ disk.dev.read_rawactive per-disk raw count of active read time
+When converted to a rate, this metric represents the raw utilization of
+the disk during the sampling interval as a result of reads. Accounting for
+this metric is only done on I/O completion and can thus result in more than a
+second's worth of IO being accounted for within any one second, leading to
+>100% utilisation. It is suitable mainly for use in calculations with other
+metrics, e.g. mirroring the results from existing performance tools:
+
+ iostat.dev.r_await = delta(disk.dev.read_rawactive) / delta(disk.dev.read)
+
+@ disk.dev.write_rawactive per-disk raw count of active write time
+When converted to a rate, this metric represents the raw utilization of
+the disk during the sampling interval as a result of writes. Accounting for
+this metric is only done on I/O completion and can thus result in more than a
+second's worth of IO being accounted for within any one second, leading to
+>100% utilisation. It is suitable mainly for use in calculations with other
+metrics, e.g. mirroring the results from existing performance tools:
+
+ iostat.dev.w_await = delta(disk.dev.write_rawactive) / delta(disk.dev.write)
+
+@ disk.dev.aveq per-disk time averaged count of request queue length
+When converted to a rate, this metric represents the time averaged disk
+request queue length during the sampling interval. A value of 2.5 (or 250%)
+represents a time averaged queue length of 2.5 requests during the sampling
+interval.
+
+@ disk.dev.read_merge per-disk count of merged read requests
+Count of read requests that were merged with an already queued read request.
+
+@ disk.dev.write_merge per-disk count of merged write requests
+Count of write requests that were merged with an already queued write request.
+
+@ disk.dm.read per-device-mapper device read operations
+
+@ disk.dm.write per-device-mapper device write operations
+
+@ disk.dm.total per-device-mapper device total (read+write) operations
+
+@ disk.dm.blkread per-device-mapper device block read operations
+
+@ disk.dm.blkwrite per-device-mapper device block write operations
+
+@ disk.dm.blktotal per-device-mapper device total (read+write) block operations
+
+@ disk.dm.read_bytes per-device-mapper device count of bytes read
+
+@ disk.dm.write_bytes per-device-mapper device count of bytes written
+
+@ disk.dm.total_bytes per-device-mapper device count of total bytes read and written
+
+@ disk.dm.read_merge per-device-mapper device count of merged read requests
+
+@ disk.dm.write_merge per-device-mapper device count of merged write requests
+
+@ disk.dm.avactive per-device-mapper device count of active time
+
+@ disk.dm.aveq per-device-mapper device time averaged count of request queue length
+
+@ disk.dm.read_rawactive per-device-mapper raw count of active read time
+When converted to a rate, this metric represents the raw utilization of
+the device during the sampling interval as a result of reads. Accounting for
+this metric is only done on I/O completion and can thus result in more than a
+second's worth of IO being accounted for within any one second, leading to
+>100% utilisation. It is suitable mainly for use in calculations with other
+metrics, e.g. mirroring the results from existing performance tools:
+
+ iostat.dm.r_await = delta(disk.dm.read_rawactive) / delta(disk.dm.read)
+
+@ disk.dm.write_rawactive per-device-mapper raw count of active write time
+When converted to a rate, this metric represents the raw utilization of
+the device during the sampling interval as a result of writes. Accounting for
+this metric is only done on I/O completion and can thus result in more than a
+second's worth of IO being accounted for within any one second, leading to
+>100% utilisation. It is suitable mainly for use in calculations with other
+metrics, e.g. mirroring the results from existing performance tools:
+
+ iostat.dm.w_await = delta(disk.dm.write_rawactive) / delta(disk.dm.write)
+
+@ hinv.map.dmname per-device-mapper device persistent name mapping to dm-[0-9]*
+
+@ disk.all.read_merge total count of merged read requests, summed for all disks
+Total count of read requests that were merged with an already queued read request.
+
+@ disk.all.write_merge total count of merged write requests, summed for all disks
+Total count of write requests that were merged with an already queued write request.
+
+@ disk.all.avactive total count of active time, summed for all disks
+When converted to a rate, this metric represents the average utilization of
+all disks during the sampling interval. A value of 0.25 (or 25%) means that
+on average every disk was active (i.e. busy) one quarter of the time.
+
+@ disk.all.read_rawactive raw count of active read time, summed for all disks
+When converted to a rate, this metric represents the raw utilization of all
+disks during the sampling interval due to read requests. The accounting for
+this metric is only done on I/O completion and can thus result in more than a
+second's worth of IO being accounted for within any one second, leading to
+>100% utilisation. It is suitable mainly for use in calculations with other
+metrics, e.g. mirroring the results from existing performance tools:
+
+ iostat.all.r_await = delta(disk.all.read_rawactive) / delta(disk.all.read)
+
+@ disk.all.write_rawactive raw count of active write time, summed for all disks
+When converted to a rate, this metric represents the raw utilization of all
+disks during the sampling interval due to write requests. The accounting for
+this metric is only done on I/O completion and can thus result in more than a
+second's worth of IO being accounted for within any one second, leading to
+>100% utilisation. It is suitable mainly for use in calculations with other
+metrics, e.g. mirroring the result from existing performance tools:
+
+ iostat.all.w_await = delta(disk.all.write_rawactive) / delta(disk.all.write)
+
+@ disk.all.aveq total time averaged count of request queue length, summed for all disks
+When converted to a rate, this metric represents the average across all disks
+of the time averaged request queue length during the sampling interval. A
+value of 1.5 (or 150%) suggests that (on average) each all disk experienced a
+time averaged queue length of 1.5 requests during the sampling interval.
+
+@ disk.all.read total read operations, summed for all disks
+Cumulative number of disk read operations since system boot time
+(subject to counter wrap), summed over all disk devices.
+
+@ disk.all.write total write operations, summed for all disks
+Cumulative number of disk read operations since system boot time
+(subject to counter wrap), summed over all disk devices.
+
+@ disk.all.total total (read+write) operations, summed for all disks
+Cumulative number of disk read and write operations since system boot
+time (subject to counter wrap), summed over all disk devices.
+
+@ disk.all.blkread block read operations, summed for all disks
+Cumulative number of disk block read operations since system boot time
+(subject to counter wrap), summed over all disk devices.
+
+@ disk.all.blkwrite block write operations, summed for all disks
+Cumulative number of disk block write operations since system boot time
+(subject to counter wrap), summed over all disk devices.
+
+@ disk.all.blktotal total (read+write) block operations, summed for all disks
+Cumulative number of disk block read and write operations since system
+boot time (subject to counter wrap), summed over all disk devices.
+
+@ disk.all.read_bytes count of bytes read for all disk devices
+@ disk.all.write_bytes count of bytes written for all disk devices
+@ disk.all.total_bytes total count of bytes read and written for all disk devices
+
+@ disk.partitions.read read operations metric for storage partitions
+Cumulative number of disk read operations since system boot time
+(subject to counter wrap) for individual disk partitions or logical
+volumes.
+
+@ disk.partitions.write write operations metric for storage partitions
+Cumulative number of disk write operations since system boot time
+(subject to counter wrap) for individual disk partitions or logical
+volumes.
+
+@ disk.partitions.total total (read+write) I/O operations metric for storage partitions
+Cumulative number of disk read and write operations since system boot
+time (subject to counter wrap) for individual disk partitions or
+logical volumes.
+
+@ disk.partitions.blkread block read operations metric for storage partitions
+Cumulative number of disk block read operations since system boot time
+(subject to counter wrap) for individual disk partitions or logical
+volumes.
+
+@ disk.partitions.blkwrite block write operations metric for storage partitions
+Cumulative number of disk block write operations since system boot time
+(subject to counter wrap) for individual disk partitions or logical
+volumes.
+
+@ disk.partitions.blktotal total (read+write) block operations metric for storage partitions
+Cumulative number of disk block read and write operations since system
+boot time (subject to counter wrap) for individual disk partitions or
+logical volumes.
+
+@ disk.partitions.read_bytes number of bytes read for storage partitions
+Cumulative number of bytes read since system boot time (subject to
+counter wrap) for individual disk partitions or logical volumes.
+
+@ disk.partitions.write_bytes number of bytes written for storage partitions
+Cumulative number of bytes written since system boot time (subject to
+counter wrap) for individual disk partitions or logical volumes.
+
+@ disk.partitions.total_bytes total number of bytes read and written for storage partitions
+Cumulative number of bytes read and written since system boot time
+(subject to counter wrap) for individual disk partitions or logical
+volumes.
+
+@ swap.pagesin pages read from swap devices due to demand for physical memory
+@ swap.pagesout pages written to swap devices due to demand for physical memory
+@ swap.in number of swap in operations
+@ swap.out number of swap out operations
+@ kernel.all.pswitch context switches metric from /proc/stat
+@ kernel.all.sysfork fork rate metric from /proc/stat
+@ kernel.all.intr intrrupt rate metric from /proc/stat
+The value is the first value from the intr field in /proc/stat,
+which is a counter of the total number of interrupts processed.
+The value is normally converted to a rate (count/second).
+This counter usually increases by at least HZ/second,
+i.e. the clock interrupt rate, wehich is usually 100/second.
+
+See also kernel.percpu.interrupts to get a breakdown
+of interrupt rates by interrupt type and which CPU
+processed each one.
+
+@ mem.physmem total system memory metric reported by /proc/meminfo
+The value of this metric corresponds to the "MemTotal" field
+reported by /proc/meminfo. Note that this does not necessarily
+correspond to actual installed physical memory - there may
+be areas of the physical address space mapped as ROM in
+various peripheral devices and the bios may be mirroring
+certain ROMs in RAM.
+@ mem.freemem free system memory metric from /proc/meminfo
+@ mem.util.used used memory metric from /proc/meminfo
+Used memory is the difference between mem.physmem and mem.freemem.
+@ mem.util.free free memory metric from /proc/meminfo
+Alias for mem.freemem.
+@ mem.util.available available memory from /proc/meminfo
+The amount of memory that is available for a new workload,
+without pushing the system into swap. Estimated from MemFree,
+Active(file), Inactive(file), and SReclaimable, as well as the "low"
+watermarks from /proc/zoneinfo.
+
+@ mem.util.shared shared memory metric from /proc/meminfo
+Shared memory metric. Currently always zero on Linux 2.4 kernels
+and has been removed from 2.6 kernels.
+@ mem.util.bufmem I/O buffers metric from /proc/meminfo
+Memory allocated for buffer_heads.
+@ mem.util.cached page cache metric from /proc/meminfo
+Memory used by the page cache, including buffered file data.
+This is in-memory cache for files read from the disk (the pagecache)
+but doesn't include SwapCached.
+@ mem.util.other unaccounted memory
+Memory that is not free (i.e. has been referenced) and is not cached.
+mem.physmem - mem.util.free - mem.util.cached - mem.util.buffers
+@ mem.util.active Kbytes on the active page list (recently referenced pages)
+Memory that has been used more recently and usually not reclaimed unless
+absolutely necessary.
+@ mem.util.inactive Kbytes on the inactive page list (candidates for discarding)
+Memory which has been less recently used. It is more eligible to be
+reclaimed for other purposes
+@ mem.util.swapCached Kbytes in swap cache, from /proc/meminfo
+Memory that once was swapped out, is swapped back in but still also
+is in the swapfile (if memory is needed it doesn't need to be swapped
+out AGAIN because it is already in the swapfile. This saves I/O)
+@ mem.util.highTotal Kbytes in high memory, from /proc/meminfo
+This is apparently an i386 specific metric, and seems to be always zero
+on ia64 architecture (and possibly others). On i386 arch (at least),
+highmem is all memory above ~860MB of physical memory. Highmem areas
+are for use by userspace programs, or for the pagecache. The kernel
+must use tricks to access this memory, making it slower to access
+than lowmem.
+@ mem.util.highFree Kbytes free high memory, from /proc/meminfo
+See mem.util.highTotal. Not used on ia64 arch (and possibly others).
+@ mem.util.lowTotal Kbytes in low memory total, from /proc/meminfo
+Lowmem is memory which can be used for everything that highmem can be
+used for, but it is also availble for the kernel's use for its own
+data structures. Among many other things, it is where everything
+from the Slab is allocated. Bad things happen when you're out of lowmem.
+(this may only be true on i386 architectures).
+@ mem.util.lowFree Kbytes free low memory, from /proc/meminfo
+See mem.util.lowTotal
+@ mem.util.swapTotal Kbytes swap, from /proc/meminfo
+total amount of swap space available
+@ mem.util.swapFree Kbytes free swap, from /proc/meminfo
+Memory which has been evicted from RAM, and is temporarily on the disk
+@ mem.util.dirty Kbytes in dirty pages, from /proc/meminfo
+Memory which is waiting to get written back to the disk
+@ mem.util.writeback Kbytes in writeback pages, from /proc/meminfo
+Memory which is actively being written back to the disk
+@ mem.util.mapped Kbytes in mapped pages, from /proc/meminfo
+files which have been mmaped, such as libraries
+@ mem.util.slab Kbytes in slab memory, from /proc/meminfo
+in-kernel data structures cache
+@ mem.util.commitLimit Kbytes limit for address space commit, from /proc/meminfo
+The static total, in Kbytes, available for commitment to address
+spaces. Thus, mem.util.committed_AS may range up to this total. Normally
+the kernel overcommits memory, so this value may exceed mem.physmem
+@ mem.util.committed_AS Kbytes committed to address spaces, from /proc/meminfo
+An estimate of how much RAM you would need to make a 99.99% guarantee
+that there never is OOM (out of memory) for this workload. Normally
+the kernel will overcommit memory. That means, say you do a 1GB malloc,
+nothing happens, really. Only when you start USING that malloc memory
+you will get real memory on demand, and just as much as you use.
+@ mem.util.pageTables Kbytes in kernel page tables, from /proc/meminfo
+@ mem.util.reverseMaps Kbytes in reverse mapped pages, from /proc/meminfo
+@ mem.util.cache_clean Kbytes cached and not dirty or writeback, derived from /proc/meminfo
+@ mem.util.anonpages Kbytes in user pages not backed by files, from /proc/meminfo
+@ mem.util.bounce Kbytes in bounce buffers, from /proc/meminfo
+@ mem.util.NFS_Unstable Kbytes in NFS unstable memory, from /proc/meminfo
+@ mem.util.slabReclaimable Kbytes in reclaimable slab pages, from /proc/meminfo
+@ mem.util.slabUnreclaimable Kbytes in unreclaimable slab pages, from /proc/meminfo
+@ mem.util.active_anon anonymous Active list LRU memory
+@ mem.util.inactive_anon anonymous Inactive list LRU memory
+@ mem.util.active_file file-backed Active list LRU memory
+@ mem.util.inactive_file file-backed Inactive list LRU memory
+@ mem.util.unevictable kbytes of memory that is unevictable
+@ mem.util.mlocked kbytes of memory that is pinned via mlock()
+@ mem.util.shmem kbytes of shmem
+@ mem.util.kernelStack kbytes of memory used for kernel stacks
+@ mem.util.hugepagesTotal a count of total hugepages
+@ mem.util.hugepagesFree a count of free hugepages
+@ mem.util.hugepagesSurp a count of surplus hugepages
+@ mem.util.directMap4k amount of memory that is directly mapped in 4kB pages
+@ mem.util.directMap2M amount of memory that is directly mapped in 2MB pages
+@ mem.util.directMap1G amount of memory that is directly mapped in 1GB pages
+@ mem.util.vmallocTotal amount of kernel memory allocated via vmalloc
+@ mem.util.vmallocUsed amount of used vmalloc memory
+@ mem.util.vmallocChunk amount of vmalloc chunk memory
+@ mem.util.mmap_copy amount of mmap_copy space (non-MMU kernels only)
+@ mem.util.quicklists amount of memory in the per-CPU quicklists
+@ mem.util.corrupthardware amount of memory in hardware corrupted pages
+@ mem.util.anonhugepages amount of memory in anonymous huge pages
+
+User memory (Kbytes) in pages not backed by files, e.g. from malloc()
+@ mem.numa.util.total per-node total memory
+@ mem.numa.util.free per-node free memory
+@ mem.numa.util.used per-node used memory
+@ mem.numa.util.active per-node Active list LRU memory
+@ mem.numa.util.inactive per-node Inactive list LRU memory
+@ mem.numa.util.active_anon per-node anonymous Active list LRU memory
+@ mem.numa.util.inactive_anon per-node anonymous Inactive list LRU memory
+@ mem.numa.util.active_file per-node file-backed Active list LRU memory
+@ mem.numa.util.inactive_file per-node file-backed Inactive list LRU memory
+@ mem.numa.util.highTotal per-node highmem total
+@ mem.numa.util.highFree per-node highmem free
+@ mem.numa.util.lowTotal per-node lowmem total
+@ mem.numa.util.lowFree per-node lowmem free
+@ mem.numa.util.unevictable per-node Unevictable memory
+@ mem.numa.util.mlocked per-node count of Mlocked memory
+@ mem.numa.util.dirty per-node dirty memory
+@ mem.numa.util.writeback per-node count of memory locked for writeback to stable storage
+@ mem.numa.util.filePages per-node count of memory backed by files
+@ mem.numa.util.mapped per-node mapped memory
+@ mem.numa.util.anonpages per-node anonymous memory
+@ mem.numa.util.shmem per-node amount of shared memory
+@ mem.numa.util.kernelStack per-node memory used as kernel stacks
+@ mem.numa.util.pageTables per-node memory used for pagetables
+@ mem.numa.util.NFS_Unstable per-node memory holding NFS data that needs writeback
+@ mem.numa.util.bounce per-node memory used for bounce buffers
+@ mem.numa.util.writebackTmp per-node temporary memory used for writeback
+@ mem.numa.util.slab per-node memory used for slab objects
+@ mem.numa.util.slabReclaimable per-node memory used for slab objects that can be reclaimed
+@ mem.numa.util.slabUnreclaimable per-node memory used for slab objects that is unreclaimable
+@ mem.numa.util.hugepagesTotal per-node total count of hugepages
+@ mem.numa.util.hugepagesFree per-node count of free hugepages
+@ mem.numa.util.hugepagesSurp per-node count of surplus hugepages
+@ mem.numa.alloc.hit per-node count of times a task wanted alloc on local node and succeeded
+@ mem.numa.alloc.miss per-node count of times a task wanted alloc on local node but got another node
+@ mem.numa.alloc.foreign count of times a task on another node alloced on that node, but got this node
+@ mem.numa.alloc.interleave_hit count of times interleaving wanted to allocate on this node and succeeded
+@ mem.numa.alloc.local_node count of times a process ran on this node and got memory on this node
+@ mem.numa.alloc.other_node count of times a process ran on this node and got memory from another node
+@ mem.vmstat.nr_dirty number of pages in dirty state
+Instantaneous number of pages in dirty state, from /proc/vmstat
+@ mem.vmstat.nr_dirtied count of pages dirtied
+Count of pages entering dirty state, from /proc/vmstat
+@ mem.vmstat.nr_writeback number of pages in writeback state
+Instantaneous number of pages in writeback state, from /proc/vmstat
+@ mem.vmstat.nr_unstable number of pages in unstable state
+Instantaneous number of pages in unstable state, from /proc/vmstat
+@ mem.vmstat.nr_page_table_pages number of page table pages
+Instantaneous number of page table pages, from /proc/vmstat
+@ mem.vmstat.nr_mapped number of mapped pagecache pages
+Instantaneous number of mapped pagecache pages, from /proc/vmstat
+See also mem.vmstat.nr_anon for anonymous mapped pages.
+@ mem.vmstat.nr_slab number of slab pages
+Instantaneous number of slab pages, from /proc/vmstat
+This counter was retired in 2.6.18 kernels, and is now the sum of
+mem.vmstat.nr_slab_reclaimable and mem.vmstat.nr_slab_unreclaimable.
+@ mem.vmstat.nr_written count of pages written out
+Count of pages written out, from /proc/vmstat
+@ mem.vmstat.numa_foreign count of foreign NUMA zone allocations
+@ mem.vmstat.numa_hit count of successful allocations from preferred NUMA zone
+@ mem.vmstat.numa_interleave count of interleaved NUMA allocations
+@ mem.vmstat.numa_local count of successful allocations from local NUMA zone
+@ mem.vmstat.numa_miss count of unsuccessful allocations from preferred NUMA zona
+@ mem.vmstat.numa_other count of unsuccessful allocations from local NUMA zone
+@ mem.vmstat.pgpgin page in operations
+Count of page in operations since boot, from /proc/vmstat
+@ mem.vmstat.pgpgout page out operations
+Count of page out operations since boot, from /proc/vmstat
+@ mem.vmstat.pswpin pages swapped in
+Count of pages swapped in since boot, from /proc/vmstat
+@ mem.vmstat.pswpout pages swapped out
+Count of pages swapped out since boot, from /proc/vmstat
+@ mem.vmstat.pgalloc_high high mem page allocations
+Count of high mem page allocations since boot, from /proc/vmstat
+@ mem.vmstat.pgalloc_normal normal mem page allocations
+Count of normal mem page allocations since boot, from /proc/vmstat
+@ mem.vmstat.pgalloc_dma dma mem page allocations
+Count of dma mem page allocations since boot, from /proc/vmstat
+@ mem.vmstat.pgalloc_dma32 dma32 mem page allocations
+Count of dma32 mem page allocations since boot, from /proc/vmstat
+@ mem.vmstat.pgalloc_movable movable mem page allocations
+Count of movable mem page allocations since boot, from /proc/vmstat
+@ mem.vmstat.pgfree page free operations
+Count of page free operations since boot, from /proc/vmstat
+@ mem.vmstat.pgactivate pages moved from inactive to active
+Count of pages moved from inactive to active since boot, from /proc/vmstat
+@ mem.vmstat.pgdeactivate pages moved from active to inactive
+Count of pages moved from active to inactive since boot, from /proc/vmstat
+@ mem.vmstat.pgfault page major and minor fault operations
+Count of page major and minor fault operations since boot, from /proc/vmstat
+@ mem.vmstat.pgmajfault major page fault operations
+Count of major page fault operations since boot, from /proc/vmstat
+@ mem.vmstat.pgrefill_high high mem pages inspected in refill_inactive_zone
+Count of high mem pages inspected in refill_inactive_zone since boot,
+from /proc/vmstat
+@ mem.vmstat.pgrefill_normal normal mem pages inspected in refill_inactive_zone
+Count of normal mem pages inspected in refill_inactive_zone since boot,
+from /proc/vmstat
+@ mem.vmstat.pgrefill_dma dma mem pages inspected in refill_inactive_zone
+Count of dma mem pages inspected in refill_inactive_zone since boot,
+from /proc/vmstat
+@ mem.vmstat.pgrefill_dma32 dma32 mem pages inspected in refill_inactive_zone
+Count of dma32 mem pages inspected in refill_inactive_zone since boot,
+from /proc/vmstat
+@ mem.vmstat.pgrefill_movable movable mem pages inspected in refill_inactive_zone
+Count of movable mem pages inspected in refill_inactive_zone since boot,
+from /proc/vmstat
+@ mem.vmstat.pgsteal_high high mem pages reclaimed
+Count of high mem pages reclaimed since boot, from /proc/vmstat
+@ mem.vmstat.pgsteal_normal normal mem pages reclaimed
+Count of normal mem pages reclaimed since boot, from /proc/vmstat
+@ mem.vmstat.pgsteal_dma dma mem pages reclaimed
+Count of dma mem pages reclaimed since boot, from /proc/vmstat
+@ mem.vmstat.pgsteal_dma32 dma32 mem pages reclaimed
+Count of dma32 mem pages reclaimed since boot, from /proc/vmstat
+@ mem.vmstat.pgsteal_movable movable mem pages reclaimed
+Count of movable mem pages reclaimed since boot, from /proc/vmstat
+@ mem.vmstat.pgscan_kswapd_high high mem pages scanned by kswapd
+Count of high mem pages scanned by kswapd since boot, from /proc/vmstat
+@ mem.vmstat.pgscan_kswapd_normal normal mem pages scanned by kswapd
+Count of normal mem pages scanned by kswapd since boot, from /proc/vmstat
+@ mem.vmstat.pgscan_kswapd_dma dma mem pages scanned by kswapd
+Count of dma mem pages scanned by kswapd since boot, from /proc/vmstat
+@ mem.vmstat.pgscan_kswapd_dma32 dma32 mem pages scanned by kswapd
+Count of dma32 mem pages scanned by kswapd since boot, from /proc/vmstat
+@ mem.vmstat.pgscan_kswapd_movable movable mem pages scanned by kswapd
+Count of movable mem pages scanned by kswapd since boot, from /proc/vmstat
+@ mem.vmstat.pgscan_direct_high high mem pages scanned
+Count of high mem pages scanned since boot, from /proc/vmstat
+@ mem.vmstat.pgscan_direct_normal normal mem pages scanned
+Count of normal mem pages scanned since boot, from /proc/vmstat
+@ mem.vmstat.pgscan_direct_dma dma mem pages scanned
+Count of dma mem pages scanned since boot, from /proc/vmstat
+@ mem.vmstat.pgscan_direct_dma32 dma32 mem pages scanned
+Count of dma32 mem pages scanned since boot, from /proc/vmstat
+@ mem.vmstat.pgscan_direct_movable
+Count of movable mem pages scanned since boot, from /proc/vmstat
+@ mem.vmstat.pginodesteal pages reclaimed via inode freeing
+Count of pages reclaimed via inode freeing since boot, from /proc/vmstat
+@ mem.vmstat.slabs_scanned slab pages scanned
+Count of slab pages scanned since boot, from /proc/vmstat
+@ mem.vmstat.kswapd_steal pages reclaimed by kswapd
+Count of pages reclaimed by kswapd since boot, from /proc/vmstat
+@ mem.vmstat.kswapd_low_wmark_hit_quickly count of times low watermark reached quickly
+Count of times kswapd reached low watermark quickly, from /proc/vmstat
+@ mem.vmstat.kswapd_high_wmark_hit_quickly count of times high watermark reached quickly
+Count of times kswapd reached high watermark quickly, from /proc/vmstat
+@ mem.vmstat.kswapd_skip_congestion_wait count of times kswapd skipped waiting on device congestion
+Count of times kswapd skipped waiting due to device congestion as a
+result of being under the low watermark, from /proc/vmstat
+@ mem.vmstat.kswapd_inodesteal pages reclaimed via kswapd inode freeing
+Count of pages reclaimed via kswapd inode freeing since boot, from
+/proc/vmstat
+@ mem.vmstat.pageoutrun kswapd calls to page reclaim
+Count of kswapd calls to page reclaim since boot, from /proc/vmstat
+@ mem.vmstat.allocstall direct reclaim calls
+Count of direct reclaim calls since boot, from /proc/vmstat
+@ mem.vmstat.pgrotated pages rotated to tail of the LRU
+Count of pages rotated to tail of the LRU since boot, from /proc/vmstat
+@mem.vmstat.nr_anon_pages number of anonymous mapped pagecache pages
+Instantaneous number of anonymous mapped pagecache pages, from /proc/vmstat
+See also mem.vmstat.mapped for other mapped pages.
+@mem.vmstat.nr_anon_transparent_hugepages number of anonymous transparent huge pages
+Instantaneous number of anonymous transparent huge pages, from /proc/vmstat
+@mem.vmstat.nr_bounce number of bounce buffer pages
+Instantaneous number of bounce buffer pages, from /proc/vmstat
+@mem.vmstat.nr_slab_reclaimable reclaimable slab pages
+Instantaneous number of reclaimable slab pages, from /proc/vmstat.
+@mem.vmstat.nr_slab_unreclaimable unreclaimable slab pages
+Instantaneous number of unreclaimable slab pages, from /proc/vmstat.
+@mem.vmstat.nr_vmscan_write pages written by VM scanner from LRU
+Count of pages written from the LRU by the VM scanner, from /proc/vmstat.
+The VM is supposed to minimise the number of pages which get written
+from the LRU (for IO scheduling efficiency, and for high reclaim-success
+rates).
+@ mem.vmstat.htlb_buddy_alloc_fail huge TLB page buddy allocation failures
+Count of huge TLB page buddy allocation failures, from /proc/vmstat
+@ mem.vmstat.htlb_buddy_alloc_success huge TLB page buddy allocation successes
+Count of huge TLB page buddy allocation successes, from /proc/vmstat
+@ mem.vmstat.nr_active_anon number of active anonymous memory pages
+@ mem.vmstat.nr_active_file number of active file memory memory pages
+@ mem.vmstat.nr_free_pages number of free pages
+@ mem.vmstat.nr_inactive_anon number of inactive anonymous memory pages
+@ mem.vmstat.nr_inactive_file number of inactive file memory pages
+@ mem.vmstat.nr_isolated_anon number of isolated anonymous memory pages
+@ mem.vmstat.nr_isolated_file number of isolated file memory pages
+@ mem.vmstat.nr_kernel_stack number of pages of kernel stack
+@ mem.vmstat.nr_mlock number of pages under mlock
+@ mem.vmstat.nr_shmem number of shared memory pages
+@ mem.vmstat.nr_unevictable number of unevictable pages
+@ mem.vmstat.nr_writeback_temp number of temporary writeback pages
+@ mem.vmstat.compact_blocks_moved count of compact blocks moved
+@ mem.vmstat.compact_fail count of unsuccessful compactions for high order allocations
+@ mem.vmstat.compact_pagemigrate_failed count of pages unsuccessfully compacted
+@ mem.vmstat.compact_pages_moved count of pages successfully moved for compaction
+@ mem.vmstat.compact_stall count of failures to even start compacting
+@ mem.vmstat.compact_success count of successful compactions for high order allocations
+@ mem.vmstat.thp_fault_alloc transparent huge page fault allocations
+@ mem.vmstat.thp_fault_fallback transparent huge page fault fallbacks
+@ mem.vmstat.thp_collapse_alloc transparent huge page collapse allocations
+@ mem.vmstat.thp_collapse_alloc_failed transparent huge page collapse failures
+@ mem.vmstat.thp_split count of transparent huge page splits
+@ mem.vmstat.unevictable_pgs_cleared count of unevictable pages cleared
+@ mem.vmstat.unevictable_pgs_culled count of unevictable pages culled
+@ mem.vmstat.unevictable_pgs_mlocked count of mlocked unevictable pages
+@ mem.vmstat.unevictable_pgs_mlockfreed count of unevictable pages mlock freed
+@ mem.vmstat.unevictable_pgs_munlocked count of unevictable pages munlocked
+@ mem.vmstat.unevictable_pgs_rescued count of unevictable pages rescued
+@ mem.vmstat.unevictable_pgs_scanned count of unevictable pages scanned
+@ mem.vmstat.unevictable_pgs_stranded count of unevictable pages stranded
+@ mem.vmstat.zone_reclaim_failed number of zone reclaim failures
+
+
+@ swap.length total swap available metric from /proc/meminfo
+@ swap.used swap used metric from /proc/meminfo
+@ swap.free swap free metric from /proc/meminfo
+@ kernel.all.load 1, 5 and 15 minute load average
+@ kernel.all.cpu.user total user CPU time from /proc/stat for all CPUs, including guest CPU time
+@ kernel.all.cpu.vuser total user CPU time from /proc/stat for all CPUs, excluding guest CPU time
+@ kernel.all.cpu.intr total interrupt CPU time from /proc/stat for all CPUs
+Total time spent processing interrupts on all CPUs.
+This value includes both soft and hard interrupt processing time.
+@ kernel.all.cpu.wait.total total wait CPU time from /proc/stat for all CPUs
+@ kernel.all.cpu.nice total nice user CPU time from /proc/stat for all CPUs
+@ kernel.all.cpu.sys total sys CPU time from /proc/stat for all CPUs
+@ kernel.all.cpu.idle total idle CPU time from /proc/stat for all CPUs
+@ kernel.all.cpu.irq.soft soft interrupt CPU time from /proc/stat for all CPUs
+Total soft interrupt CPU time (deferred interrupt handling code,
+not run in the initial interrupt handler).
+@ kernel.all.cpu.irq.hard hard interrupt CPU time from /proc/stat for all CPUs
+Total hard interrupt CPU time ("hard" interrupt handling code
+is the code run directly on receipt of the initial hardware
+interrupt, and does not include "soft" interrupt handling code
+which is deferred until later).
+@ kernel.all.cpu.steal total virtualisation CPU steal time for all CPUs
+Total CPU time when a CPU had a runnable process, but the hypervisor
+(virtualisation layer) chose to run something else instead.
+@ kernel.all.cpu.guest total virtual guest CPU time for all CPUs
+Total CPU time spent running virtual guest operating systems.
+@ kernel.all.nusers number of user sessions on system
+
+@ hinv.ninterface number of active (up) network interfaces
+@ network.interface.in.bytes network recv read bytes from /proc/net/dev per network interface
+@ network.interface.in.packets network recv read packets from /proc/net/dev per network interface
+@ network.interface.in.errors network recv read errors from /proc/net/dev per network interface
+@ network.interface.in.drops network recv read drops from /proc/net/dev per network interface
+@ network.interface.in.mcasts network recv compressed from /proc/net/dev per network interface
+@ network.interface.in.fifo network recv read fifos from /proc/net/dev per network interface
+@ network.interface.in.frame network recv read frames from /proc/net/dev per network interface
+@ network.interface.in.compressed network recv compressed from /proc/net/dev per network interface
+@ network.interface.out.bytes network send bytes from /proc/net/dev per network interface
+@ network.interface.out.packets network send packets from /proc/net/dev per network interface
+@ network.interface.out.errors network send errors from /proc/net/dev per network interface
+@ network.interface.out.drops network send drops from /proc/net/dev per network interface
+@ network.interface.out.fifo network send fifos from /proc/net/dev per network interface
+@ network.interface.collisions network send collisions from /proc/net/dev per network interface
+@ network.interface.out.carrier network send carrier from /proc/net/dev per network interface
+@ network.interface.out.compressed network send compressed from /proc/net/dev per network interface
+@ network.interface.total.bytes network total (in+out) bytes from /proc/net/dev per network interface
+@ network.interface.total.packets network total (in+out) packets from /proc/net/dev per network interface
+@ network.interface.total.errors network total (in+out) errors from /proc/net/dev per network interface
+@ network.interface.total.drops network total (in+out) drops from /proc/net/dev per network interface
+@ network.interface.total.mcasts network total (in+out) mcasts from /proc/net/dev per network interface
+@ network.interface.mtu maximum transmission unit on network interface
+@ network.interface.speed interface speed in megabytes per second
+The linespeed on the network interface, as reported by the kernel,
+scaled from Megabits/second to Megabytes/second.
+See also network.interface.baudrate for the bytes/second value.
+@ network.interface.baudrate interface speed in bytes per second
+The linespeed on the network interface, as reported by the kernel,
+scaled up from Megabits/second to bits/second and divided by 8 to convert
+to bytes/second.
+See also network.interface.speed for the Megabytes/second value.
+@ network.interface.duplex value one for half or two for full duplex interface
+@ network.interface.up boolean for whether interface is currently up or down
+@ network.interface.running boolean for whether interface has resources allocated
+@ network.interface.inet_addr string INET interface address (ifconfig style)
+@ network.interface.ipv6_addr string IPv6 interface address (ifconfig style)
+@ network.interface.ipv6_scope string IPv6 interface scope (ifconfig style)
+@ network.interface.hw_addr hardware address (from sysfs)
+@ network.sockstat.tcp.inuse instantaneous number of tcp sockets currently in use
+@ network.sockstat.tcp.highest highest number of tcp sockets in use at any one time since boot
+@ network.sockstat.tcp.util instantaneous tcp socket utilization (100 * inuse/highest)
+@ network.sockstat.udp.inuse instantaneous number of udp sockets currently in use
+@ network.sockstat.udp.highest highest number of udp sockets in use at any one time since boot
+@ network.sockstat.udp.util instantaneous udp socket utilization (100 * inuse/highest)
+@ network.sockstat.raw.inuse instantaneous number of raw sockets currently in use
+@ network.sockstat.raw.highest highest number of raw sockets in use at any one time since boot
+@ network.sockstat.raw.util instantaneous raw socket utilization (100 * inuse/highest)
+@ hinv.physmem total system memory metric from /proc/meminfo
+@ hinv.pagesize Memory page size
+The memory page size of the running kernel in bytes.
+@ hinv.ncpu number of CPUs in the system
+@ hinv.ndisk number of disks in the system
+@ hinv.nfilesys number of (local) file systems currently mounted
+@ hinv.nnode number of NUMA nodes in the system
+@ hinv.map.scsi list of active SCSI devices
+There is one string value for each SCSI device active in the system,
+as extracted from /proc/scsi/scsi. The external instance name
+for each device is in the format scsiD:C:I:L where
+D is controller number, C is channel number, I is device ID
+and L is the SCSI LUN number for the device. The values for this
+metric are the actual device names (sd[a-z] are SCSI disks, st[0-9]
+are SCSI tapes and scd[0-9] are SCSI CD-ROMS.
+@ hinv.nlv number of logical volumes
+@ hinv.map.lvname mapping of logical volume names for devices
+Provides a logical-volume-name to device-name mapping for the device
+mapper subsystem.
+@ filesys.capacity Total capacity of mounted filesystem (Kbytes)
+@ filesys.used Total space used on mounted filesystem (Kbytes)
+@ filesys.free Total space free on mounted filesystem (Kbytes)
+@ filesys.maxfiles Inodes capacity of mounted filesystem
+@ filesys.usedfiles Number of inodes allocated on mounted filesystem
+@ filesys.freefiles Number of unallocated inodes on mounted filesystem
+@ filesys.mountdir File system mount point
+@ filesys.full Percentage of filesystem in use
+@ filesys.blocksize Size of each block on mounted filesystem (Bytes)
+@ filesys.avail Total space free to non-superusers on mounted filesystem (Kbytes)
+@ filesys.readonly Indicates whether a filesystem is mounted readonly
+@ tmpfs.capacity Total capacity of mounted tmpfs filesystem (Kbytes)
+@ tmpfs.used Total space used on mounted tmpfs filesystem (Kbytes)
+@ tmpfs.free Total space free on mounted tmpfs filesystem (Kbytes)
+@ tmpfs.maxfiles Inodes capacity of mounted tmpfs filesystem
+@ tmpfs.usedfiles Number of inodes allocated on mounted tmpfs filesystem
+@ tmpfs.freefiles Number of unallocated inodes on mounted tmpfs filesystem
+@ tmpfs.full Percentage of tmpfs filesystem in use
+@ swapdev.free physical swap free space
+@ swapdev.length physical swap size
+@ swapdev.maxswap maximum swap length (same as swapdev.length on Linux)
+@ swapdev.vlength virtual swap size (always zero on Linux)
+Virtual swap size (always zero on Linux since Linux does not support
+virtual swap).
+
+This metric is retained on Linux for interoperability with PCP monitor
+tools running on IRIX.
+
+@ swapdev.priority swap resource priority
+@ nfs.client.calls cumulative total of client NFSv2 requests
+@ nfs.client.reqs cumulative total of client NFSv2 requests by request type
+@ nfs.server.calls cumulative total of server NFSv2 requests
+@ nfs.server.reqs cumulative total of client NFSv2 requests by request type
+@ nfs3.client.calls cumulative total of client NFSv3 requests
+@ nfs3.client.reqs cumulative total of client NFSv3 requests by request type
+@ nfs3.server.calls cumulative total of server NFSv3 requests
+@ nfs3.server.reqs cumulative total of client NFSv3 requests by request type
+@ nfs4.client.calls cumulative total of client NFSv4 requests
+@ nfs4.client.reqs cumulative total for each client NFSv4 request type
+@ nfs4.server.calls cumulative total of server NFSv4 operations, plus NULL requests
+@ nfs4.server.reqs cumulative total for each server NFSv4 operation, and for NULL requests
+@ rpc.client.rpccnt cumulative total of client RPC requests
+@ rpc.client.rpcretrans cumulative total of client RPC retransmissions
+@ rpc.client.rpcauthrefresh cumulative total of client RPC auth refreshes
+@ rpc.client.netcnt cumulative total of client RPC network layer requests
+@ rpc.client.netudpcnt cumulative total of client RPC UDP network layer requests
+@ rpc.client.nettcpcnt cumulative total of client RPC TCP network layer requests
+@ rpc.client.nettcpconn cumulative total of client RPC TCP network layer connection requests
+@ rpc.server.rpccnt cumulative total of server RPC requests
+@ rpc.server.rpcerr cumulative total of server RPC errors
+@ rpc.server.rpcbadfmt cumulative total of server RPC bad format errors
+@ rpc.server.rpcbadauth cumulative total of server RPC bad auth errors
+@ rpc.server.rpcbadclnt cumulative total of server RPC bad client errors
+@ rpc.server.rchits cumulative total of request-reply-cache hits
+@ rpc.server.rcmisses cumulative total of request-reply-cache misses
+@ rpc.server.rcnocache cumulative total of uncached request-reply-cache requests
+@ rpc.server.fh_cached cumulative total of file handle cache requests
+@ rpc.server.fh_valid cumulative total of file handle cache validations
+@ rpc.server.fh_fixup cumulative total of file handle cache fixup validations
+@ rpc.server.fh_lookup cumulative total of file handle cache new lookups
+@ rpc.server.fh_stale cumulative total of stale file handle cache errors
+@ rpc.server.fh_concurrent cumulative total of concurrent file handle cache requests
+@ rpc.server.netcnt cumulative total of server RPC network layer requests
+@ rpc.server.netudpcnt cumulative total of server RPC UDP network layer requests
+@ rpc.server.nettcpcnt cumulative total of server RPC TCP network layer requests
+@ rpc.server.nettcpconn cumulative total of server RPC TCP network layer connection requests
+@ rpc.server.fh_anon cumulative total anonymous file dentries returned
+@ rpc.server.fh_nocache_dir count of directory file handles not found cached
+@ rpc.server.fh_nocache_nondir count of non-directory file handles not found cached
+@ rpc.server.io_read cumulative count of bytes returned from read requests
+@ rpc.server.io_write cumulative count of bytes passed into write requests
+@ rpc.server.th_cnt available nfsd threads
+@ rpc.server.th_fullcnt number of times the last free nfsd thread was used
+
+@ network.ip.forwarding count of ip forwarding
+@ network.ip.defaultttl count of ip defaultttl
+@ network.ip.inreceives count of ip inreceives
+@ network.ip.inhdrerrors count of ip inhdrerrors
+@ network.ip.inaddrerrors count of ip inaddrerrors
+@ network.ip.forwdatagrams count of ip forwdatagrams
+@ network.ip.inunknownprotos count of ip inunknownprotos
+@ network.ip.indiscards count of ip indiscards
+@ network.ip.indelivers count of ip indelivers
+@ network.ip.outrequests count of ip outrequests
+@ network.ip.outdiscards count of ip outdiscards
+@ network.ip.outnoroutes count of ip outnoroutes
+@ network.ip.reasmtimeout count of ip reasmtimeout
+@ network.ip.reasmreqds count of ip reasmreqds
+@ network.ip.reasmoks count of ip reasmoks
+@ network.ip.reasmfails count of ip reasmfails
+@ network.ip.fragoks count of ip fragoks
+@ network.ip.fragfails count of ip fragfails
+@ network.ip.fragcreates count of ip fragcreates
+@ network.icmp.inmsgs count of icmp inmsgs
+@ network.icmp.inerrors count of icmp inerrors
+@ network.icmp.indestunreachs count of icmp indestunreachs
+@ network.icmp.intimeexcds count of icmp intimeexcds
+@ network.icmp.inparmprobs count of icmp inparmprobs
+@ network.icmp.insrcquenchs count of icmp insrcquenchs
+@ network.icmp.inredirects count of icmp inredirects
+@ network.icmp.inechos count of icmp inechos
+@ network.icmp.inechoreps count of icmp inechoreps
+@ network.icmp.intimestamps count of icmp intimestamps
+@ network.icmp.intimestampreps count of icmp intimestampreps
+@ network.icmp.inaddrmasks count of icmp inaddrmasks
+@ network.icmp.inaddrmaskreps count of icmp inaddrmaskreps
+@ network.icmp.outmsgs count of icmp outmsgs
+@ network.icmp.outerrors count of icmp outerrors
+@ network.icmp.outdestunreachs count of icmp outdestunreachs
+@ network.icmp.outtimeexcds count of icmp outtimeexcds
+@ network.icmp.outparmprobs count of icmp outparmprobs
+@ network.icmp.outsrcquenchs count of icmp outsrcquenchs
+@ network.icmp.outredirects count of icmp outredirects
+@ network.icmp.outechos count of icmp outechos
+@ network.icmp.outechoreps count of icmp outechoreps
+@ network.icmp.outtimestamps count of icmp outtimestamps
+@ network.icmp.outtimestampreps count of icmp outtimestampreps
+@ network.icmp.outaddrmasks count of icmp outaddrmasks
+@ network.icmp.outaddrmaskreps count of icmp outaddrmaskreps
+@ network.icmp.incsumerrors count of icmp in checksum errors
+@ network.icmpmsg.intype count of icmp message types recvd
+@ network.icmpmsg.outtype count of icmp message types sent
+@ network.tcp.rtoalgorithm count of tcp rtoalgorithm
+@ network.tcp.rtomin count of tcp rtomin
+@ network.tcp.rtomax count of tcp rtomax
+@ network.tcp.maxconn count of tcp maxconn
+@ network.tcp.activeopens count of tcp activeopens
+@ network.tcp.passiveopens count of tcp passiveopens
+@ network.tcp.attemptfails count of tcp attemptfails
+@ network.tcp.estabresets count of tcp estabresets
+@ network.tcp.currestab count of tcp currestab
+@ network.tcp.insegs count of tcp insegs
+@ network.tcp.outsegs count of tcp outsegs
+@ network.tcp.retranssegs count of tcp retranssegs
+@ network.tcp.inerrs count of tcp inerrs
+@ network.tcp.outrsts count of tcp outrsts
+@ network.tcp.incsumerrors count of tcp in checksum errors
+@ network.tcpconn.established Number of established connections
+@ network.tcpconn.syn_sent Number of SYN_SENT connections
+@ network.tcpconn.syn_recv Number of SYN_RECV connections
+@ network.tcpconn.fin_wait1 Number of FIN_WAIT1 connections
+@ network.tcpconn.fin_wait2 Number of FIN_WAIT2 connections
+@ network.tcpconn.time_wait Number of TIME_WAIT connections
+@ network.tcpconn.close Number of CLOSE connections
+@ network.tcpconn.close_wait Number of CLOSE_WAIT connections
+@ network.tcpconn.last_ack Number of LAST_ACK connections
+@ network.tcpconn.listen Number of LISTEN connections
+@ network.tcpconn.closing Number of CLOSING connections
+@ network.udp.indatagrams count of udp indatagrams
+@ network.udp.noports count of udp noports
+@ network.udp.inerrors count of udp inerrors
+@ network.udp.outdatagrams count of udp outdatagrams
+@ network.udp.recvbuferrors count of udp receive buffer errors
+@ network.udp.sndbuferrors count of udp send buffer errors
+@ network.udp.incsumerrors count of udp in checksum errors
+@ network.udplite.indatagrams count of udplite indatagrams
+@ network.udplite.noports count of udplite noports
+@ network.udplite.inerrors count of udplite inerrors
+@ network.udplite.outdatagrams count of udplite outdatagrams
+@ network.udplite.recvbuferrors count of udplite receive buffer errors
+@ network.udplite.sndbuferrors count of udplite send buffer errors
+@ network.udplite.incsumerrors count of udplite in checksum errors
+
+@ network.ip.innoroutes Number of IP datagrams discarded due to no routes in forwarding path
+@ network.ip.intruncatedpkts Number of IP datagrams discarded due to frame not carrying enough data
+@ network.ip.inmcastpkts Number of received IP multicast datagrams
+@ network.ip.outmcastpkts Number of sent IP multicast datagrams
+@ network.ip.inbcastpkts Number of received IP broadcast datagrams
+@ network.ip.outbcastpkts Number of sent IP bradcast datagrams
+@ network.ip.inoctets Number of received octets
+@ network.ip.outoctets Number of sent octets
+@ network.ip.inmcastoctets Number of received IP multicast octets
+@ network.ip.outmcastoctets Number of sent IP multicast octets
+@ network.ip.inbcastoctets Number of received IP broadcast octets
+@ network.ip.outbcastoctets Number of sent IP broadcast octets
+@ network.ip.csumerrors Number of IP datagrams with checksum errors
+@ network.ip.noectpkts Number of packets received with NOECT
+@ network.ip.ect1pkts Number of packets received with ECT(1)
+@ network.ip.ect0pkts Number of packets received with ECT(0)
+@ network.ip.cepkts Number of packets received with Congestion Experimented
+
+@ network.tcp.syncookiessent Number of sent SYN cookies
+@ network.tcp.syncookiesrecv Number of received SYN cookies
+@ network.tcp.syncookiesfailed Number of failed SYN cookies
+@ network.tcp.embryonicrsts Number of resets received for embryonic SYN_RECV sockets
+@ network.tcp.prunecalled Number of packets pruned from receive queue because of socket buffer overrun
+@ network.tcp.rcvpruned Number of packets pruned from receive queue
+@ network.tcp.ofopruned Number of packets dropped from out-of-order queue because of socket buffer overrun
+@ network.tcp.outofwindowicmps Number of dropped out of window ICMPs
+@ network.tcp.lockdroppedicmps Number of dropped ICMP because socket was locked
+@ network.tcp.arpfilter Number of arp packets filtered
+@ network.tcp.timewaited Number of TCP sockets finished time wait in fast timer
+@ network.tcp.timewaitrecycled Number of time wait sockets recycled by time stamp
+@ network.tcp.timewaitkilled Number of TCP sockets finished time wait in slow timer
+@ network.tcp.pawspassiverejected Number of passive connections rejected because of timestamp
+@ network.tcp.pawsactiverejected Number of active connections rejected because of timestamp
+@ network.tcp.pawsestabrejected Number of packets rejects in established connections because of timestamp
+@ network.tcp.delayedacks Number of delayed acks sent
+@ network.tcp.delayedacklocked Number of delayed acks further delayed because of locked socket
+@ network.tcp.delayedacklost Number of times quick ack mode was activated times
+@ network.tcp.listenoverflows Number of times the listen queue of a socket overflowed
+@ network.tcp.listendrops Number of SYNs to LISTEN sockets dropped
+@ network.tcp.prequeued Number of packets directly queued to recvmsg prequeue
+@ network.tcp.directcopyfrombacklog Number of bytes directly in process context from backlog
+@ network.tcp.directcopyfromprequeue Number of bytes directly received in process context from prequeue
+@ network.tcp.prequeueddropped Number of packets dropped from prequeue
+@ network.tcp.hphits Number of packet headers predicted
+@ network.tcp.hphitstouser Number of packets header predicted and directly queued to user
+@ network.tcp.pureacks Number of acknowledgments not containing data payload received
+@ network.tcp.hpacks Number of predicted acknowledgments
+@ network.tcp.renorecovery Number of times recovered from packet loss due to fast retransmit
+@ network.tcp.sackrecovery Number of times recovered from packet loss by selective acknowledgements
+@ network.tcp.sackreneging Number of bad SACK blocks received
+@ network.tcp.fackreorder Number of times detected reordering using FACK
+@ network.tcp.sackreorder Number of times detected reordering using SACK
+@ network.tcp.renoreorder Number of times detected reordering using reno fast retransmit
+@ network.tcp.tsreorder Number of times detected reordering times using time stamp
+@ network.tcp.fullundo Number of congestion windows fully recovered without slow start
+@ network.tcp.partialundo Number of congestion windows partially recovered using Hoe heuristic
+@ network.tcp.dsackundo Number of congestion windows recovered without slow start using DSACK
+@ network.tcp.lossundo Number of congestion windows recovered without slow start after partial ack
+@ network.tcp.lostretransmit Number of retransmits lost
+@ network.tcp.renofailures Number of timeouts after reno fast retransmit
+@ network.tcp.sackfailures Number of timeouts after SACK recovery
+@ network.tcp.lossfailures Number of timeouts in loss state
+@ network.tcp.fastretrans Number of fast retransmits
+@ network.tcp.forwardretrans Number of forward retransmits
+@ network.tcp.slowstartretrans Number of retransmits in slow start
+@ network.tcp.timeouts Number of other TCP timeouts
+@ network.tcp.lossprobes Number of sent TCP loss probes
+@ network.tcp.lossproberecovery Number of TCP loss probe recoveries
+@ network.tcp.renorecoveryfail Number of reno fast retransmits failed
+@ network.tcp.sackrecoveryfail Number of SACK retransmits failed
+@ network.tcp.schedulerfail Number of times receiver scheduled too late for direct processing
+@ network.tcp.rcvcollapsed Number of packets collapsed in receive queue due to low socket buffer
+@ network.tcp.dsackoldsent Number of DSACKs sent for old packets
+@ network.tcp.dsackofosent Number of DSACKs sent for out of order packets
+@ network.tcp.dsackrecv Number of DSACKs received
+@ network.tcp.dsackoforecv Number of DSACKs for out of order packets received
+@ network.tcp.abortondata Number of connections reset due to unexpected data
+@ network.tcp.abortonclose Number of connections reset due to early user close
+@ network.tcp.abortonmemory Number of connections aborted due to memory pressure
+@ network.tcp.abortontimeout Number of connections aborted due to timeout
+@ network.tcp.abortonlinger Number of connections aborted after user close in linger timeout
+@ network.tcp.abortfailed Number of times unable to send RST due to no memory
+@ network.tcp.memorypressures Numer of times TCP ran low on memory
+@ network.tcp.sackdiscard Number of SACKs discarded
+@ network.tcp.dsackignoredold Number of ignored old duplicate SACKs
+@ network.tcp.dsackignorednoundo Number of ignored duplicate SACKs with undo_marker not set
+@ network.tcp.spuriousrtos Number of FRTO's successfully detected spurious RTOs
+@ network.tcp.md5notfound Number of times MD5 hash expected but not found
+@ network.tcp.md5unexpected Number of times MD5 hash unexpected but found
+@ network.tcp.sackshifted Number of SACKs shifted
+@ network.tcp.sackmerged Number of SACKs merged
+@ network.tcp.sackshiftfallback Number of SACKs fallbacks
+@ network.tcp.backlogdrop Number of frames dropped because of full backlog queue
+@ network.tcp.minttldrop Number of frames dropped when TTL is under the minimum
+@ network.tcp.deferacceptdrop Number of dropped ACK frames when socket is in SYN-RECV state
+Due to SYNACK retrans count lower than defer_accept value
+
+@ network.tcp.iprpfilter Number of packets dropped in input path because of rp_filter settings
+@ network.tcp.timewaitoverflow Number of occurences of time wait bucket overflow
+@ network.tcp.reqqfulldocookies Number of times a SYNCOOKIE was replied to client
+@ network.tcp.reqqfulldrop Number of times a SYN request was dropped due to disabled syncookies
+@ network.tcp.retransfail Number of failed tcp_retransmit_skb() calls
+@ network.tcp.rcvcoalesce Number of times tried to coalesce the receive queue
+@ network.tcp.ofoqueue Number of packets queued in OFO queue
+@ network.tcp.ofodrop Number of packets meant to be queued in OFO but dropped due to limits hit
+Number of packets meant to be queued in OFO but dropped because socket rcvbuf
+limit reached.
+@ network.tcp.ofomerge Number of packets in OFO that were merged with other packets
+@ network.tcp.challengeack Number of challenge ACKs sent (RFC 5961 3.2)
+@ network.tcp.synchallenge Number of challenge ACKs sent in response to SYN packets
+@ network.tcp.fastopenactive Number of successful active fast opens
+@ network.tcp.fastopenactivefail Number of fast open attempts failed due to remote not accepting it or time outs
+@ network.tcp.fastopenpassive Number of successful passive fast opens
+@ network.tcp.fastopenpassivefail Number of passive fast open attempts failed
+@ network.tcp.fastopenlistenoverflow Number of times the fastopen listen queue overflowed
+@ network.tcp.fastopencookiereqd Number of fast open cookies requested
+@ network.tcp.spuriousrtxhostqueues Number of times that the fast clone is not yet freed in tcp_transmit_skb()
+@ network.tcp.busypollrxpackets Number of low latency application-fetched packets
+@ network.tcp.autocorking Number of times stack detected skb was underused and its flush was deferred
+@ network.tcp.fromzerowindowadv Number of times window went from zero to non-zero
+@ network.tcp.tozerowindowadv Number of times window went from non-zero to zero
+@ network.tcp.wantzerowindowadv Number of times zero window announced
+@ network.tcp.synretrans Number of SYN-SYN/ACK retransmits
+Number of SYN-SYN/ACK retransmits to break down retransmissions in SYN, fast/timeout
+retransmits.
+@ network.tcp.origdatasent Number of outgoing packets with original data
+Excluding retransmission but including data-in-SYN). This counter is different from
+TcpOutSegs because TcpOutSegs also tracks pure ACKs. TCPOrigDataSent is
+more useful to track the TCP retransmission rate.
+
+@ pmda.uname identity and type of current system
+Identity and type of current system. The concatenation of the values
+returned from utsname(2), also similar to uname -a.
+
+See also the kernel.uname.* metrics
+
+@ pmda.version build version of Linux PMDA
+@ hinv.map.cpu_num logical to physical CPU mapping for each CPU
+@ hinv.map.cpu_node logical CPU to NUMA node mapping for each CPU
+@ hinv.machine machine name, IP35 if SGI SNIA, else simply linux
+@ hinv.cpu.clock clock rate in Mhz for each CPU as reported by /proc/cpuinfo
+@ hinv.cpu.vendor manafacturer of each CPU as reported by /proc/cpuinfo
+@ hinv.cpu.model model number of each CPU as reported by /proc/cpuinfo
+@ hinv.cpu.model_name model name of each CPU as reported by /proc/cpuinfo
+@ hinv.cpu.stepping stepping of each CPU as reported by /proc/cpuinfo
+@ hinv.cpu.cache primary cache size of each CPU as reported by /proc/cpuinfo
+@ hinv.cpu.bogomips bogo mips rating for each CPU as reported by /proc/cpuinfo
+@ kernel.all.hz value of HZ (jiffies/second) for the currently running kernel
+@ kernel.all.uptime time the current kernel has been running
+@ kernel.all.idletime time the current kernel has been idle since boot
+@ kernel.all.lastpid most recently allocated process id
+@ kernel.all.runnable total number of processes in the (per-CPU) run queues
+@ kernel.all.nprocs total number of processes (lightweight)
+@ mem.slabinfo.objects.active number of active objects in each cache
+@ mem.slabinfo.objects.total total number of objects in each cache
+@ mem.slabinfo.objects.size size of individual objects of each cache
+@ mem.slabinfo.slabs.active number of active slabs comprising each cache
+@ mem.slabinfo.slabs.total total number of slabs comprising each cache
+@ mem.slabinfo.slabs.pages_per_slab number of pages in each slab
+@ mem.slabinfo.slabs.objects_per_slab number of objects in each slab
+@ mem.slabinfo.slabs.total_size total number of bytes allocated for active objects in each slab
+@ ipc.sem.max_semmap maximum number of entries in a semaphore map (from semctl(..,IPC_INFO,..))
+@ ipc.sem.max_semid maximum number of semaphore identifiers (from semctl(..,IPC_INFO,..))
+@ ipc.sem.max_sem maximum number of semaphores in system (from semctl(..,IPC_INFO,..))
+@ ipc.sem.num_undo number of undo structures in system (from semctl(..,IPC_INFO,..))
+@ ipc.sem.max_perid maximum number of semaphores per identifier (from semctl(..,IPC_INFO,..))
+@ ipc.sem.max_ops maximum number of operations per semop call (from semctl(..,IPC_INFO,..))
+@ ipc.sem.max_undoent maximum number of undo entries per process (from semctl(..,IPC_INFO,..))
+@ ipc.sem.sz_semundo size of struct sem_undo (from semctl(..,IPC_INFO,..))
+@ ipc.sem.max_semval semaphore maximum value (from semctl(..,IPC_INFO,..))
+@ ipc.sem.max_exit adjust on exit maximum value (from semctl(..,IPC_INFO,..))
+@ ipc.msg.sz_pool size of message pool in kilobytes (from msgctl(..,IPC_INFO,..))
+@ ipc.msg.mapent number of entries in a message map (from msgctl(..,IPC_INFO,..))
+@ ipc.msg.max_msgsz maximum size of a message in bytes (from msgctl(..,IPC_INFO,..))
+@ ipc.msg.max_defmsgq default maximum size of a message queue (from msgctl(..,IPC_INFO,..))
+@ ipc.msg.max_msgqid maximum number of message queue identifiers (from msgctl(..,IPC_INFO,..))
+@ ipc.msg.max_msgseg message segment size (from msgctl(..,IPC_INFO,..))
+@ ipc.msg.num_smsghdr number of system message headers (from msgctl(..,IPC_INFO,..))
+@ ipc.msg.max_seg maximum number of message segments (from msgctl(..,IPC_INFO,..))
+@ ipc.shm.max_segsz maximum shared segment size in bytes (from shmctl(..,IPC_INFO,..))
+@ ipc.shm.min_segsz minimum shared segment size in bytes (from shmctl(..,IPC_INFO,..))
+@ ipc.shm.max_seg maximum number of shared segments in system (from shmctl(..,IPC_INFO,..))
+@ ipc.shm.max_segproc maximum number of shared segments per process (from shmctl(..,IPC_INFO,..))
+@ ipc.shm.max_shmsys maximum amount of shared memory in system in pages (from shmctl(..,IPC_INFO,..))
+
+@ vfs.files.count number of in-use file structures
+@ vfs.files.free number of available file structures
+@ vfs.files.max hard maximum on number of file structures
+@ vfs.inodes.count number of in-use inode structures
+@ vfs.inodes.free number of available inode structures
+@ vfs.dentry.count number of in-use dentry structures
+@ vfs.dentry.free number of available dentry structures
+
+@ sysfs.kernel.uevent_seqnum counter of the number of uevents processed by the udev subsystem
diff --git a/src/pmdas/linux/indom.h b/src/pmdas/linux/indom.h
new file mode 100644
index 0000000..89f5236
--- /dev/null
+++ b/src/pmdas/linux/indom.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2013-2014 Red Hat.
+ * Copyright (c) 2010 Aconex. All Rights Reserved.
+ * Copyright (c) 2005,2007-2008 Silicon Graphics, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#ifndef _INDOM_H
+#define _INDOM_H
+
+enum {
+ CPU_INDOM = 0, /* 0 - percpu */
+ DISK_INDOM, /* 1 - disks */
+ LOADAVG_INDOM, /* 2 - 1, 5, 15 minute load averages */
+ NET_DEV_INDOM, /* 3 - network interfaces */
+ PROC_INTERRUPTS_INDOM, /* 4 - interrupt lines -> proc PMDA */
+ FILESYS_INDOM, /* 5 - mounted bdev filesystems */
+ SWAPDEV_INDOM, /* 6 - swap devices */
+ NFS_INDOM, /* 7 - nfs operations */
+ NFS3_INDOM, /* 8 - nfs v3 operations */
+ PROC_PROC_INDOM, /* 9 - processes */
+ PARTITIONS_INDOM, /* 10 - disk partitions */
+ SCSI_INDOM, /* 11 - scsi devices */
+ SLAB_INDOM, /* 12 - kernel slabs */
+ STRINGS_INDOM, /* 13 - string dictionary */
+ NFS4_CLI_INDOM, /* 14 - nfs v4 client operations */
+ NFS4_SVR_INDOM, /* 15 - nfs n4 server operations */
+ QUOTA_PRJ_INDOM, /* 16 - project quota -> xfs PMDA */
+ NET_ADDR_INDOM, /* 17 - inet/ipv6 addresses */
+ TMPFS_INDOM, /* 18 - tmpfs mounts */
+ NODE_INDOM, /* 19 - NUMA nodes */
+ PROC_CGROUP_SUBSYS_INDOM, /* 20 - control group subsystems -> proc PMDA */
+ PROC_CGROUP_MOUNTS_INDOM, /* 21 - control group mounts -> proc PMDA */
+ LV_INDOM, /* 22 - lvm devices */
+ ICMPMSG_INDOM, /* 23 - icmp message types */
+ DM_INDOM, /* 24 device mapper devices */
+
+ NUM_INDOMS /* one more than highest numbered cluster */
+};
+
+extern pmInDom linux_indom(int);
+#define INDOM(i) linux_indom(i)
+
+extern pmdaIndom *linux_pmda_indom(int);
+#define PMDAINDOM(i) linux_pmda_indom(i)
+
+/*
+ * Optional path prefix for all stats files, used for testing.
+ */
+extern char *linux_statspath;
+extern FILE *linux_statsfile(const char *, char *, int);
+
+/*
+ * static string dictionary - one copy of oft-repeated strings;
+ * implemented using STRINGS_INDOM and pmdaCache(3) routines.
+ */
+char *linux_strings_lookup(int);
+int linux_strings_insert(const char *);
+
+#endif /* _INDOM_H */
diff --git a/src/pmdas/linux/interrupts.c b/src/pmdas/linux/interrupts.c
new file mode 100644
index 0000000..47377e8
--- /dev/null
+++ b/src/pmdas/linux/interrupts.c
@@ -0,0 +1,394 @@
+/*
+ * Copyright (c) 2012-2014 Red Hat.
+ * Copyright (c) 2011 Aconex. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include "pmapi.h"
+#include "impl.h"
+#include "pmda.h"
+#include "indom.h"
+#include "filesys.h"
+#include "clusters.h"
+#include "interrupts.h"
+#include <sys/stat.h>
+#ifdef HAVE_STRINGS_H
+#include <strings.h>
+#endif
+#include <ctype.h>
+
+typedef struct {
+ unsigned int id; /* becomes PMID item number */
+ char *name; /* becomes PMNS sub-component */
+ char *text; /* one-line metric help text */
+ unsigned long long *values; /* per-CPU values for this counter */
+} interrupt_t;
+
+static unsigned int cpu_count;
+static int *online_cpumap; /* maps input columns to CPU IDs */
+static unsigned int lines_count;
+static interrupt_t *interrupt_lines;
+static unsigned int other_count;
+static interrupt_t *interrupt_other;
+
+static __pmnsTree *interrupt_tree;
+unsigned int irq_err_count;
+
+static void
+update_lines_pmns(int domain, unsigned int item, unsigned int id)
+{
+ char entry[128];
+ pmID pmid = pmid_build(domain, CLUSTER_INTERRUPT_LINES, item);
+
+ snprintf(entry, sizeof(entry), "kernel.percpu.interrupts.line%d", id);
+ __pmAddPMNSNode(interrupt_tree, pmid, entry);
+}
+
+static void
+update_other_pmns(int domain, unsigned int item, const char *name)
+{
+ char entry[128];
+ pmID pmid = pmid_build(domain, CLUSTER_INTERRUPT_OTHER, item);
+
+ snprintf(entry, sizeof(entry), "kernel.percpu.interrupts.%s", name);
+ __pmAddPMNSNode(interrupt_tree, pmid, entry);
+}
+
+static int
+map_online_cpus(char *buffer)
+{
+ unsigned long i = 0, cpuid;
+ char *s, *end;
+
+ for (s = buffer; *s != '\0'; s++) {
+ if (!isdigit((int)*s))
+ continue;
+ cpuid = strtoul(s, &end, 10);
+ if (end == s)
+ break;
+ online_cpumap[i++] = cpuid;
+ s = end;
+ }
+ return i;
+}
+
+static int
+column_to_cpuid(int column)
+{
+ int i;
+
+ if (online_cpumap[column] == column)
+ return column;
+ for (i = 0; i < cpu_count; i++)
+ if (online_cpumap[i] == column)
+ return i;
+ return 0;
+}
+
+static char *
+extract_values(char *buffer, unsigned long long *values, int ncolumns)
+{
+ unsigned long i, value, cpuid;
+ char *s = buffer, *end = NULL;
+
+ for (i = 0; i < ncolumns; i++) {
+ value = strtoul(s, &end, 10);
+ if (*end != ' ')
+ return NULL;
+ s = end;
+ cpuid = column_to_cpuid(i);
+ values[cpuid] = value;
+ }
+ return end;
+}
+
+/* Create oneline help text - remove duplicates and end-of-line marker */
+static char *
+oneline_reformat(char *buf)
+{
+ char *result, *start, *end;
+
+ /* position end marker, and skip over whitespace at the start */
+ for (start = end = buf; *end != '\n' && *end != '\0'; end++)
+ if (isspace((int)*start) && isspace((int)*end))
+ start = end+1;
+ *end = '\0';
+
+ /* squash duplicate whitespace and remove trailing whitespace */
+ for (result = start; *result != '\0'; result++) {
+ if (isspace((int)result[0]) && (isspace((int)result[1]) || result[1] == '\0')) {
+ memmove(&result[0], &result[1], end - &result[0]);
+ result--;
+ }
+ }
+ return start;
+}
+
+static void
+initialise_interrupt(interrupt_t *ip, unsigned int id, char *s, char *end)
+{
+ ip->id = id;
+ ip->name = strdup(s);
+ if (end)
+ ip->text = strdup(oneline_reformat(end));
+}
+
+static int
+extend_interrupts(interrupt_t **interp, unsigned int *countp)
+{
+ int cnt = cpu_count * sizeof(unsigned long long);
+ unsigned long long *values = malloc(cnt);
+ interrupt_t *interrupt = *interp;
+ int count = *countp + 1;
+
+ if (!values)
+ return 0;
+
+ interrupt = realloc(interrupt, count * sizeof(interrupt_t));
+ if (!interrupt) {
+ free(values);
+ return 0;
+ }
+ interrupt[count-1].values = values;
+ *interp = interrupt;
+ *countp = count;
+ return 1;
+}
+
+static char *
+extract_interrupt_name(char *buffer, char **suffix)
+{
+ char *s = buffer, *end;
+
+ while (isspace((int)*s)) /* find start of name */
+ s++;
+ for (end = s; *end && isalnum((int)*end); end++) { }
+ *end = '\0'; /* mark end of name */
+ *suffix = end + 1; /* mark values start */
+ return s;
+}
+
+static int
+extract_interrupt_lines(char *buffer, int ncolumns, int nlines)
+{
+ unsigned long id;
+ char *name, *end, *values;
+ int resize = (nlines >= lines_count);
+
+ name = extract_interrupt_name(buffer, &values);
+ id = strtoul(name, &end, 10);
+ if (*end != '\0')
+ return 0;
+ if (resize && !extend_interrupts(&interrupt_lines, &lines_count))
+ return 0;
+ end = extract_values(values, interrupt_lines[nlines].values, ncolumns);
+ if (resize)
+ initialise_interrupt(&interrupt_lines[nlines], id, name, end);
+ return 1;
+}
+
+static int
+extract_interrupt_errors(char *buffer)
+{
+ return (sscanf(buffer, " ERR: %u", &irq_err_count) == 1 ||
+ sscanf(buffer, "Err: %u", &irq_err_count) == 1 ||
+ sscanf(buffer, "BAD: %u", &irq_err_count) == 1);
+}
+
+static int
+extract_interrupt_misses(char *buffer)
+{
+ unsigned int irq_mis_count; /* not exported */
+ return sscanf(buffer, " MIS: %u", &irq_mis_count) == 1;
+}
+
+static int
+extract_interrupt_other(char *buffer, int ncolumns, int nlines)
+{
+ char *name, *end, *values;
+ int resize = (nlines >= other_count);
+
+ name = extract_interrupt_name(buffer, &values);
+ if (resize && !extend_interrupts(&interrupt_other, &other_count))
+ return 0;
+ end = extract_values(values, interrupt_other[nlines].values, ncolumns);
+ if (resize)
+ initialise_interrupt(&interrupt_other[nlines], nlines, name, end);
+ return 1;
+}
+
+int
+refresh_interrupt_values(void)
+{
+ FILE *fp;
+ char buf[4096];
+ int i, ncolumns;
+
+ if (cpu_count == 0) {
+ long ncpus = sysconf(_SC_NPROCESSORS_CONF);
+ online_cpumap = malloc(ncpus * sizeof(int));
+ if (!online_cpumap)
+ return -oserror();
+ cpu_count = ncpus;
+ }
+ memset(online_cpumap, 0, cpu_count * sizeof(int));
+
+ if ((fp = linux_statsfile("/proc/interrupts", buf, sizeof(buf))) == NULL)
+ return -oserror();
+
+ /* first parse header, which maps online CPU number to column number */
+ if (fgets(buf, sizeof(buf), fp)) {
+ ncolumns = map_online_cpus(buf);
+ } else {
+ fclose(fp);
+ return -EINVAL; /* unrecognised file format */
+ }
+
+ /* next we parse each interrupt line row (starting with a digit) */
+ i = 0;
+ while (fgets(buf, sizeof(buf), fp))
+ if (!extract_interrupt_lines(buf, ncolumns, i++))
+ break;
+
+ /* parse other per-CPU interrupt counter rows (starts non-digit) */
+ i = 0;
+ while (fgets(buf, sizeof(buf), fp) != NULL) {
+ if (extract_interrupt_errors(buf))
+ continue;
+ if (extract_interrupt_misses(buf))
+ continue;
+ if (!extract_interrupt_other(buf, ncolumns, i++))
+ break;
+ }
+
+ fclose(fp);
+ return 0;
+}
+
+static int
+refresh_interrupts(pmdaExt *pmda, __pmnsTree **tree)
+{
+ int i, sts, dom = pmda->e_domain;
+
+ if (interrupt_tree) {
+ *tree = interrupt_tree;
+ } else if ((sts = __pmNewPMNS(&interrupt_tree)) < 0) {
+ __pmNotifyErr(LOG_ERR, "%s: failed to create interrupt names: %s\n",
+ pmProgname, pmErrStr(sts));
+ *tree = NULL;
+ } else if ((sts = refresh_interrupt_values()) < 0) {
+ __pmNotifyErr(LOG_ERR, "%s: failed to update interrupt values: %s\n",
+ pmProgname, pmErrStr(sts));
+ *tree = NULL;
+ } else {
+ for (i = 0; i < lines_count; i++)
+ update_lines_pmns(dom, i, interrupt_lines[i].id);
+ for (i = 0; i < other_count; i++)
+ update_other_pmns(dom, i, interrupt_other[i].name);
+ *tree = interrupt_tree;
+ return 1;
+ }
+ return 0;
+}
+
+int
+interrupts_fetch(int cluster, int item, unsigned int inst, pmAtomValue *atom)
+{
+ if (inst >= cpu_count)
+ return PM_ERR_INST;
+
+ switch (cluster) {
+ case CLUSTER_INTERRUPT_LINES:
+ if (item > lines_count)
+ return PM_ERR_PMID;
+ atom->ull = interrupt_lines[item].values[inst];
+ return 1;
+ case CLUSTER_INTERRUPT_OTHER:
+ if (item > other_count)
+ return PM_ERR_PMID;
+ atom->ull = interrupt_other[item].values[inst];
+ return 1;
+ }
+ return PM_ERR_PMID;
+}
+
+/*
+ * Create a new metric table entry based on an existing one.
+ */
+static void
+refresh_metrictable(pmdaMetric *source, pmdaMetric *dest, int id)
+{
+ int domain = pmid_domain(source->m_desc.pmid);
+ int cluster = pmid_cluster(source->m_desc.pmid);
+
+ memcpy(dest, source, sizeof(pmdaMetric));
+ dest->m_desc.pmid = pmid_build(domain, cluster, id);
+
+ if (pmDebug & DBG_TRACE_LIBPMDA)
+ fprintf(stderr, "interrupts refresh_metrictable: (%p -> %p) "
+ "metric ID dup: %d.%d.%d -> %d.%d.%d\n",
+ source, dest, domain, cluster,
+ pmid_item(source->m_desc.pmid), domain, cluster, id);
+}
+
+/*
+ * Needs to answer the question: how much extra space needs to be
+ * allocated in the metric table for (dynamic) interrupt metrics"?
+ * Return value is the number of additional entries/trees needed.
+ */
+static void
+size_metrictable(int *total, int *trees)
+{
+ *total = 2; /* lines and other */
+ *trees = lines_count > other_count ? lines_count : other_count;
+
+ if (pmDebug & DBG_TRACE_LIBPMDA)
+ fprintf(stderr, "interrupts size_metrictable: %d total x %d trees\n",
+ *total, *trees);
+}
+
+static int
+interrupts_text(pmdaExt *pmda, pmID pmid, int type, char **buf)
+{
+ int item = pmid_item(pmid);
+ int cluster = pmid_cluster(pmid);
+
+ switch (cluster) {
+ case CLUSTER_INTERRUPT_LINES:
+ if (item > lines_count)
+ return PM_ERR_PMID;
+ if (interrupt_lines[item].text == NULL)
+ return PM_ERR_TEXT;
+ *buf = interrupt_lines[item].text;
+ return 0;
+ case CLUSTER_INTERRUPT_OTHER:
+ if (item > other_count)
+ return PM_ERR_PMID;
+ if (interrupt_other[item].text == NULL)
+ return PM_ERR_TEXT;
+ *buf = interrupt_other[item].text;
+ return 0;
+ }
+ return PM_ERR_PMID;
+}
+
+void
+interrupts_init(pmdaMetric *metrictable, int nmetrics)
+{
+ int set[] = { CLUSTER_INTERRUPT_LINES, CLUSTER_INTERRUPT_OTHER };
+
+ pmdaDynamicPMNS("kernel.percpu.interrupts",
+ set, sizeof(set)/sizeof(int),
+ refresh_interrupts, interrupts_text,
+ refresh_metrictable, size_metrictable,
+ metrictable, nmetrics);
+}
diff --git a/src/pmdas/linux/interrupts.h b/src/pmdas/linux/interrupts.h
new file mode 100644
index 0000000..b8a0336
--- /dev/null
+++ b/src/pmdas/linux/interrupts.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright (c) 2011 Aconex. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+extern unsigned int irq_err_count;
+
+extern void interrupts_init(pmdaMetric *, int);
+extern int refresh_interrupt_values(void);
+extern int interrupts_fetch(int, int, unsigned int, pmAtomValue *);
diff --git a/src/pmdas/linux/linux_table.c b/src/pmdas/linux/linux_table.c
new file mode 100644
index 0000000..d04f454
--- /dev/null
+++ b/src/pmdas/linux/linux_table.c
@@ -0,0 +1,116 @@
+/*
+ * Copyright (c) 2012 Red Hat.
+ * Copyright (c) 2004 Silicon Graphics, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <ctype.h>
+#include <stdint.h>
+
+#include "linux_table.h"
+
+extern int linux_table_lookup(const char *field, struct linux_table *table, uint64_t *val);
+extern struct linux_table *linux_table_clone(struct linux_table *table);
+extern int linux_table_scan(FILE *fp, struct linux_table *table);
+
+inline int
+linux_table_lookup(const char *field, struct linux_table *table, uint64_t *val)
+{
+ struct linux_table *t;
+
+ for (t=table; t && t->field; t++) {
+ if (strncmp(field, t->field, t->field_len) == 0) {
+ if (t->valid) {
+ *val = t->val;
+ return 1;
+ }
+ /* Invalid */
+ return 0;
+ }
+ }
+
+ fprintf(stderr, "Warning: linux_table_lookup failed for \"%s\"\n", field);
+ return 0;
+}
+
+inline struct linux_table *
+linux_table_clone(struct linux_table *table)
+{
+ struct linux_table *ret;
+ struct linux_table *t;
+ int len;
+
+ if (!table)
+ return NULL;
+ for (len=1, t=table; t->field; t++)
+ len++;
+ ret = (struct linux_table *)malloc(len * sizeof(struct linux_table));
+ if (!ret)
+ return NULL;
+ memcpy(ret, table, len * sizeof(struct linux_table));
+
+ /* Initialize the table */
+ for (t=ret; t && t->field; t++) {
+ if (!t->field_len)
+ t->field_len = strlen(t->field);
+ t->valid = LINUX_TABLE_INVALID;
+ }
+
+ return ret;
+}
+
+inline int
+linux_table_scan(FILE *fp, struct linux_table *table)
+{
+ char *p;
+ struct linux_table *t;
+ char buf[1024];
+ int ret = 0;
+
+ while(fgets(buf, sizeof(buf), fp) != NULL) {
+ for (t=table; t && t->field; t++) {
+ if ((p = strstr(buf, t->field)) != NULL) {
+ /* first digit after the matched field */
+ for (p += t->field_len; *p; p++) {
+ if (isdigit((int)*p))
+ break;
+ }
+ if (isdigit((int)*p)) {
+ t->this = strtoul(p, NULL, 10);
+ t->valid = LINUX_TABLE_VALID;
+ ret++;
+ break;
+ }
+ }
+ }
+ }
+
+ /* calculate current value, accounting for counter wrap */
+ for (t=table; t && t->field; t++) {
+ if (t->maxval == 0)
+ /* instantaneous value */
+ t->val = t->this;
+ else {
+ /* counter value */
+ if (t->this >= t->prev)
+ t->val += t->this - t->prev;
+ else
+ t->val += t->this + (t->maxval - t->prev);
+ t->prev = t->this;
+ }
+ }
+
+ return ret;
+}
diff --git a/src/pmdas/linux/linux_table.h b/src/pmdas/linux/linux_table.h
new file mode 100644
index 0000000..3900690
--- /dev/null
+++ b/src/pmdas/linux/linux_table.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2004 Silicon Graphics, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#ifndef _LINUX_TABLE_H
+#define _LINUX_TABLE_H
+/*
+ * scans linux style /proc tables, e.g. :
+ *
+ * numa_hit 266809
+ * numa_miss 0
+ * numa_foreign 0
+ * interleave_hit 0
+ * local_node 265680
+ * other_node 1129
+ *
+ * Value is a counter that wraps at maxval,
+ * unless maxval is 0, in which case the
+ * value is treated as instantaneous and no
+ * wrap detection is attempted.
+ *
+ * Tables are typically declared as a static array, and
+ * then allocated dynamically with linux_table_clone().
+ * e.g. :
+ *
+ * static struct linux_table numa_meminfo_table[] = {
+ * { "numa_hit", 0xffffffffffffffff },
+ * { "numa_miss", 0xffffffffffffffff },
+ * { "numa_foreign", 0xffffffffffffffff },
+ * { "interleave_hit", 0xffffffffffffffff },
+ * { "local_node", 0xffffffffffffffff },
+ * { "other_node", 0xffffffffffffffff },
+ * { NULL };
+ * };
+ */
+
+enum {
+ LINUX_TABLE_INVALID,
+ LINUX_TABLE_VALID
+};
+
+struct linux_table {
+ char *field;
+ uint64_t maxval;
+ uint64_t val;
+ uint64_t this;
+ uint64_t prev;
+ int field_len;
+ int valid;
+};
+
+extern int linux_table_lookup(const char *field, struct linux_table *table, uint64_t *val);
+extern struct linux_table *linux_table_clone(struct linux_table *table);
+extern int linux_table_scan(FILE *fp, struct linux_table *table);
+
+#endif /* _LINUX_TABLE_H */
diff --git a/src/pmdas/linux/msg_limits.c b/src/pmdas/linux/msg_limits.c
new file mode 100644
index 0000000..afac20e
--- /dev/null
+++ b/src/pmdas/linux/msg_limits.c
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) International Business Machines Corp., 2002
+ * This code contributed by Mike Mason <mmlnx@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#define __USE_GNU 1 /* required for IPC_INFO define */
+#include <sys/ipc.h>
+#include <sys/msg.h>
+
+#include "pmapi.h"
+#include "msg_limits.h"
+
+int
+refresh_msg_limits(msg_limits_t *msg_limits)
+{
+ static struct msginfo msginfo;
+ static int started;
+
+ if (!started) {
+ started = 1;
+ memset(msg_limits, 0, sizeof(msg_limits_t));
+ }
+
+ if (msgctl(0, IPC_INFO, (struct msqid_ds *) &msginfo) < 0) {
+ return -oserror();
+ }
+
+ msg_limits->msgpool = msginfo.msgpool;
+ msg_limits->msgmap = msginfo.msgmap;
+ msg_limits->msgmax = msginfo.msgmax;
+ msg_limits->msgmnb = msginfo.msgmnb;
+ msg_limits->msgmni = msginfo.msgmni;
+ msg_limits->msgssz = msginfo.msgssz;
+ msg_limits->msgtql = msginfo.msgtql;
+ msg_limits->msgseg = msginfo.msgseg;
+
+ /* success */
+ return 0;
+}
diff --git a/src/pmdas/linux/msg_limits.h b/src/pmdas/linux/msg_limits.h
new file mode 100644
index 0000000..06fb7c5
--- /dev/null
+++ b/src/pmdas/linux/msg_limits.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) International Business Machines Corp., 2002
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/*
+ * This code contributed by Mike Mason (mmlnx@us.ibm.com)
+ */
+
+typedef struct {
+ unsigned int msgpool; /* size of message pool (kbytes) */
+ unsigned int msgmap; /* # of entries in message map */
+ unsigned int msgmax; /* maximum size of a message */
+ unsigned int msgmnb; /* default maximum size of message queue */
+ unsigned int msgmni; /* maximum # of message queue identifiers */
+ unsigned int msgssz; /* message segment size */
+ unsigned int msgtql; /* # of system message headers */
+ unsigned int msgseg; /* maximum # of message segments */
+} msg_limits_t;
+
+extern int refresh_msg_limits(msg_limits_t*);
+
diff --git a/src/pmdas/linux/numa_meminfo.c b/src/pmdas/linux/numa_meminfo.c
new file mode 100644
index 0000000..22d8351
--- /dev/null
+++ b/src/pmdas/linux/numa_meminfo.c
@@ -0,0 +1,137 @@
+/*
+ * Linux NUMA meminfo metrics cluster from sysfs
+ *
+ * Copyright (c) 2012 Red Hat.
+ * Copyright (c) 2009 Silicon Graphics, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include <dirent.h>
+#include <sys/stat.h>
+#include <string.h>
+
+#include "pmapi.h"
+#include "impl.h"
+#include "pmda.h"
+#include "indom.h"
+#include "linux_table.h"
+#include "proc_cpuinfo.h"
+#include "proc_stat.h"
+#include "numa_meminfo.h"
+
+/* sysfs file for numa meminfo */
+static struct linux_table numa_meminfo_table[] = {
+ { field: "MemTotal:", maxval: 0x0 },
+ { field: "MemFree:", maxval: 0x0 },
+ { field: "MemUsed:", maxval: 0x0 },
+ { field: "Active:", maxval: 0x0 },
+ { field: "Inactive:", maxval: 0x0 },
+ { field: "Active(anon):", maxval: 0x0 },
+ { field: "Inactive(anon):", maxval: 0x0 },
+ { field: "Active(file):", maxval: 0x0 },
+ { field: "Inactive(file):", maxval: 0x0 },
+ { field: "HighTotal:", maxval: 0x0 },
+ { field: "HighFree:", maxval: 0x0 },
+ { field: "LowTotal:", maxval: 0x0 },
+ { field: "LowFree:", maxval: 0x0 },
+ { field: "Unevictable:", maxval: 0x0 },
+ { field: "Mlocked:", maxval: 0x0 },
+ { field: "Dirty:", maxval: 0x0 },
+ { field: "Writeback:", maxval: 0x0 },
+ { field: "FilePages:", maxval: 0x0 },
+ { field: "Mapped:", maxval: 0x0 },
+ { field: "AnonPages:", maxval: 0x0 },
+ { field: "Shmem:", maxval: 0x0 },
+ { field: "KernelStack:", maxval: 0x0 },
+ { field: "PageTables:", maxval: 0x0 },
+ { field: "NFS_Unstable:", maxval: 0x0 },
+ { field: "Bounce:", maxval: 0x0 },
+ { field: "WritebackTmp:", maxval: 0x0 },
+ { field: "Slab:", maxval: 0x0 },
+ { field: "SReclaimable:", maxval: 0x0 },
+ { field: "SUnreclaim:", maxval: 0x0 },
+ { field: "HugePages_Total:", maxval: 0x0 },
+ { field: "HugePages_Free:", maxval: 0x0 },
+ { field: "HugePages_Surp:", maxval: 0x0 },
+ { field: NULL }
+};
+
+/* sysfs file for numastat */
+static struct linux_table numa_memstat_table[] = {
+ { field: "numa_hit", maxval: ULONGLONG_MAX },
+ { field: "numa_miss", maxval: ULONGLONG_MAX },
+ { field: "numa_foreign", maxval: ULONGLONG_MAX },
+ { field: "interleave_hit", maxval: ULONGLONG_MAX },
+ { field: "local_node", maxval: ULONGLONG_MAX },
+ { field: "other_node", maxval: ULONGLONG_MAX },
+ { field: NULL }
+};
+
+int refresh_numa_meminfo(numa_meminfo_t *numa_meminfo, proc_cpuinfo_t *proc_cpuinfo, proc_stat_t *proc_stat)
+{
+ int i;
+ FILE *fp;
+ pmdaIndom *idp = PMDAINDOM(NODE_INDOM);
+ static int started;
+
+ /* First time only */
+ if (!started) {
+ refresh_proc_stat(proc_cpuinfo, proc_stat);
+
+ if (!numa_meminfo->node_info) /* may have allocated this, but failed below */
+ numa_meminfo->node_info = (nodeinfo_t *)calloc(idp->it_numinst, sizeof(nodeinfo_t));
+ if (!numa_meminfo->node_info) {
+ fprintf(stderr, "%s: error allocating numa node_info: %s\n",
+ __FUNCTION__, osstrerror());
+ return -1;
+ }
+
+ for (i = 0; i < idp->it_numinst; i++) {
+ numa_meminfo->node_info[i].meminfo = linux_table_clone(numa_meminfo_table);
+ if (!numa_meminfo->node_info[i].meminfo) {
+ fprintf(stderr, "%s: error allocating meminfo: %s\n",
+ __FUNCTION__, osstrerror());
+ return -1;
+ }
+ numa_meminfo->node_info[i].memstat = linux_table_clone(numa_memstat_table);
+ if (!numa_meminfo->node_info[i].memstat) {
+ fprintf(stderr, "%s: error allocating memstat: %s\n",
+ __FUNCTION__, osstrerror());
+ return -1;
+ }
+ }
+
+ numa_meminfo->node_indom = idp;
+ started = 1;
+ }
+
+ /* Refresh */
+ for (i = 0; i < idp->it_numinst; i++) {
+ char buf[MAXPATHLEN];
+
+ snprintf(buf, sizeof(buf), "%s/sys/devices/system/node/node%d/meminfo",
+ linux_statspath, i);
+ if ((fp = fopen(buf, "r")) != NULL) {
+ linux_table_scan(fp, numa_meminfo->node_info[i].meminfo);
+ fclose(fp);
+ }
+
+ snprintf(buf, sizeof(buf), "%s/sys/devices/system/node/node%d/numastat",
+ linux_statspath, i);
+ if ((fp = fopen(buf, "r")) != NULL) {
+ linux_table_scan(fp, numa_meminfo->node_info[i].memstat);
+ fclose(fp);
+ }
+ }
+
+ return 0;
+}
diff --git a/src/pmdas/linux/numa_meminfo.h b/src/pmdas/linux/numa_meminfo.h
new file mode 100644
index 0000000..22c1289
--- /dev/null
+++ b/src/pmdas/linux/numa_meminfo.h
@@ -0,0 +1,32 @@
+/*
+ * Linux NUMA meminfo metrics cluster from sysfs
+ *
+ * Copyright (c) 2012 Red Hat.
+ * Copyright (c) 2009 Silicon Graphics Inc., All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+/*
+ * Information from /sys/devices/node/node[0-9]+/meminfo and numastat
+ */
+typedef struct {
+ struct linux_table *meminfo;
+ struct linux_table *memstat;
+} nodeinfo_t;
+
+typedef struct {
+ nodeinfo_t *node_info;
+ pmdaIndom *node_indom;
+} numa_meminfo_t;
+
+extern int refresh_numa_meminfo(numa_meminfo_t *, proc_cpuinfo_t *, proc_stat_t *);
+
diff --git a/src/pmdas/linux/pmda.c b/src/pmdas/linux/pmda.c
new file mode 100644
index 0000000..73d961a
--- /dev/null
+++ b/src/pmdas/linux/pmda.c
@@ -0,0 +1,5807 @@
+/*
+ * Linux PMDA
+ *
+ * Copyright (c) 2012-2014 Red Hat.
+ * Copyright (c) 2007-2011 Aconex. All Rights Reserved.
+ * Copyright (c) 2002 International Business Machines Corp.
+ * Copyright (c) 2000,2004,2007-2008 Silicon Graphics, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include "pmapi.h"
+#include "impl.h"
+#include "pmda.h"
+#undef LINUX /* defined in NSS/NSPR headers as something different, which we do not need. */
+#include "domain.h"
+
+#include <ctype.h>
+#include <sys/vfs.h>
+#include <sys/stat.h>
+#include <sys/times.h>
+#include <sys/utsname.h>
+#include <utmp.h>
+#include <pwd.h>
+#include <grp.h>
+
+#include "convert.h"
+#include "clusters.h"
+#include "indom.h"
+
+#include "proc_cpuinfo.h"
+#include "proc_stat.h"
+#include "proc_meminfo.h"
+#include "proc_loadavg.h"
+#include "proc_net_dev.h"
+#include "filesys.h"
+#include "swapdev.h"
+#include "getinfo.h"
+#include "proc_net_rpc.h"
+#include "proc_net_sockstat.h"
+#include "proc_net_tcp.h"
+#include "proc_partitions.h"
+#include "proc_net_netstat.h"
+#include "proc_net_snmp.h"
+#include "proc_scsi.h"
+#include "proc_slabinfo.h"
+#include "proc_uptime.h"
+#include "sem_limits.h"
+#include "msg_limits.h"
+#include "shm_limits.h"
+#include "proc_sys_fs.h"
+#include "proc_vmstat.h"
+#include "sysfs_kernel.h"
+#include "linux_table.h"
+#include "numa_meminfo.h"
+#include "interrupts.h"
+#include "devmapper.h"
+
+static proc_stat_t proc_stat;
+static proc_meminfo_t proc_meminfo;
+static proc_loadavg_t proc_loadavg;
+static proc_net_rpc_t proc_net_rpc;
+static proc_net_tcp_t proc_net_tcp;
+static proc_net_sockstat_t proc_net_sockstat;
+static struct utsname kernel_uname;
+static char uname_string[sizeof(kernel_uname)];
+static proc_scsi_t proc_scsi;
+static dev_mapper_t dev_mapper;
+static proc_cpuinfo_t proc_cpuinfo;
+static proc_slabinfo_t proc_slabinfo;
+static sem_limits_t sem_limits;
+static msg_limits_t msg_limits;
+static shm_limits_t shm_limits;
+static proc_uptime_t proc_uptime;
+static proc_sys_fs_t proc_sys_fs;
+static sysfs_kernel_t sysfs_kernel;
+static numa_meminfo_t numa_meminfo;
+
+static int _isDSO = 1; /* =0 I am a daemon */
+static char *username;
+
+/* globals */
+size_t _pm_system_pagesize; /* for hinv.pagesize and used elsewhere */
+int _pm_have_proc_vmstat; /* if /proc/vmstat is available */
+int _pm_intr_size; /* size in bytes of interrupt sum count metric */
+int _pm_ctxt_size; /* size in bytes of context switch count metric */
+int _pm_cputime_size; /* size in bytes of most of the cputime metrics */
+int _pm_idletime_size; /* size in bytes of the idle cputime metric */
+proc_vmstat_t _pm_proc_vmstat;
+proc_net_snmp_t _pm_proc_net_snmp;
+pmdaInstid _pm_proc_net_snmp_indom_id[NR_ICMPMSG_COUNTERS];
+proc_net_netstat_t _pm_proc_net_netstat;
+
+/*
+ * Metric Instance Domains (statically initialized ones only)
+ */
+static pmdaInstid loadavg_indom_id[] = {
+ { 1, "1 minute" }, { 5, "5 minute" }, { 15, "15 minute" }
+};
+
+static pmdaInstid nfs_indom_id[] = {
+ { 0, "null" },
+ { 1, "getattr" },
+ { 2, "setattr" },
+ { 3, "root" },
+ { 4, "lookup" },
+ { 5, "readlink" },
+ { 6, "read" },
+ { 7, "wrcache" },
+ { 8, "write" },
+ { 9, "create" },
+ { 10, "remove" },
+ { 11, "rename" },
+ { 12, "link" },
+ { 13, "symlink" },
+ { 14, "mkdir" },
+ { 15, "rmdir" },
+ { 16, "readdir" },
+ { 17, "statfs" }
+};
+
+static pmdaInstid nfs3_indom_id[] = {
+ { 0, "null" },
+ { 1, "getattr" },
+ { 2, "setattr" },
+ { 3, "lookup" },
+ { 4, "access" },
+ { 5, "readlink" },
+ { 6, "read" },
+ { 7, "write" },
+ { 8, "create" },
+ { 9, "mkdir" },
+ { 10, "symlink" },
+ { 11, "mknod" },
+ { 12, "remove" },
+ { 13, "rmdir" },
+ { 14, "rename" },
+ { 15, "link" },
+ { 16, "readdir" },
+ { 17, "readdir+" },
+ { 18, "statfs" },
+ { 19, "fsinfo" },
+ { 20, "pathconf" },
+ { 21, "commit" }
+};
+
+static pmdaInstid nfs4_cli_indom_id[] = {
+ { 0, "null" },
+ { 1, "read" },
+ { 2, "write" },
+ { 3, "commit" },
+ { 4, "open" },
+ { 5, "open_conf" },
+ { 6, "open_noat" },
+ { 7, "open_dgrd" },
+ { 8, "close" },
+ { 9, "setattr" },
+ { 10, "fsinfo" },
+ { 11, "renew" },
+ { 12, "setclntid" },
+ { 13, "confirm" },
+ { 14, "lock" },
+ { 15, "lockt" },
+ { 16, "locku" },
+ { 17, "access" },
+ { 18, "getattr" },
+ { 19, "lookup" },
+ { 20, "lookup_root" },
+ { 21, "remove" },
+ { 22, "rename" },
+ { 23, "link" },
+ { 24, "symlink" },
+ { 25, "create" },
+ { 26, "pathconf" },
+ { 27, "statfs" },
+ { 28, "readlink" },
+ { 29, "readdir" },
+ { 30, "server_caps" },
+ { 31, "delegreturn" },
+ { 32, "getacl" },
+ { 33, "setacl" },
+ { 34, "fs_locatns" },
+};
+
+static pmdaInstid nfs4_svr_indom_id[] = {
+ { 0, "null" },
+ { 1, "op0-unused" },
+ { 2, "op1-unused"},
+ { 3, "minorversion"}, /* future use */
+ { 4, "access" },
+ { 5, "close" },
+ { 6, "commit" },
+ { 7, "create" },
+ { 8, "delegpurge" },
+ { 9, "delegreturn" },
+ { 10, "getattr" },
+ { 11, "getfh" },
+ { 12, "link" },
+ { 13, "lock" },
+ { 14, "lockt" },
+ { 15, "locku" },
+ { 16, "lookup" },
+ { 17, "lookup_root" },
+ { 18, "nverify" },
+ { 19, "open" },
+ { 20, "openattr" },
+ { 21, "open_conf" },
+ { 22, "open_dgrd" },
+ { 23, "putfh" },
+ { 24, "putpubfh" },
+ { 25, "putrootfh" },
+ { 26, "read" },
+ { 27, "readdir" },
+ { 28, "readlink" },
+ { 29, "remove" },
+ { 30, "rename" },
+ { 31, "renew" },
+ { 32, "restorefh" },
+ { 33, "savefh" },
+ { 34, "secinfo" },
+ { 35, "setattr" },
+ { 36, "setcltid" },
+ { 37, "setcltidconf" },
+ { 38, "verify" },
+ { 39, "write" },
+ { 40, "rellockowner" },
+};
+
+static pmdaIndom indomtab[] = {
+ { CPU_INDOM, 0, NULL },
+ { DISK_INDOM, 0, NULL }, /* cached */
+ { LOADAVG_INDOM, 3, loadavg_indom_id },
+ { NET_DEV_INDOM, 0, NULL },
+ { PROC_INTERRUPTS_INDOM, 0, NULL }, /* deprecated */
+ { FILESYS_INDOM, 0, NULL },
+ { SWAPDEV_INDOM, 0, NULL },
+ { NFS_INDOM, NR_RPC_COUNTERS, nfs_indom_id },
+ { NFS3_INDOM, NR_RPC3_COUNTERS, nfs3_indom_id },
+ { PROC_PROC_INDOM, 0, NULL }, /* migrated to the proc PMDA */
+ { PARTITIONS_INDOM, 0, NULL }, /* cached */
+ { SCSI_INDOM, 0, NULL },
+ { SLAB_INDOM, 0, NULL },
+ { STRINGS_INDOM, 0, NULL },
+ { NFS4_CLI_INDOM, NR_RPC4_CLI_COUNTERS, nfs4_cli_indom_id },
+ { NFS4_SVR_INDOM, NR_RPC4_SVR_COUNTERS, nfs4_svr_indom_id },
+ { QUOTA_PRJ_INDOM, 0, NULL }, /* migrated to the xfs PMDA */
+ { NET_ADDR_INDOM, 0, NULL },
+ { TMPFS_INDOM, 0, NULL },
+ { NODE_INDOM, 0, NULL },
+ { PROC_CGROUP_SUBSYS_INDOM, 0, NULL },
+ { PROC_CGROUP_MOUNTS_INDOM, 0, NULL },
+ { LV_INDOM, 0, NULL },
+ { ICMPMSG_INDOM, NR_ICMPMSG_COUNTERS, _pm_proc_net_snmp_indom_id },
+ { DM_INDOM, 0, NULL }, /* cached */
+};
+
+
+/*
+ * all metrics supported in this PMDA - one table entry for each
+ */
+
+static pmdaMetric metrictab[] = {
+
+/*
+ * /proc/stat cluster
+ */
+
+/* kernel.percpu.cpu.user */
+ { NULL,
+ { PMDA_PMID(CLUSTER_STAT,0), KERNEL_UTYPE, CPU_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,1,0,0,PM_TIME_MSEC,0) }, },
+
+/* kernel.percpu.cpu.nice */
+ { NULL,
+ { PMDA_PMID(CLUSTER_STAT,1), KERNEL_UTYPE, CPU_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,1,0,0,PM_TIME_MSEC,0) }, },
+
+/* kernel.percpu.cpu.sys */
+ { NULL,
+ { PMDA_PMID(CLUSTER_STAT,2), KERNEL_UTYPE, CPU_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,1,0,0,PM_TIME_MSEC,0) }, },
+
+/* kernel.percpu.cpu.idle */
+ { NULL,
+ { PMDA_PMID(CLUSTER_STAT,3), KERNEL_UTYPE, CPU_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,1,0,0,PM_TIME_MSEC,0) }, },
+
+/* kernel.percpu.cpu.wait.total */
+ { NULL,
+ { PMDA_PMID(CLUSTER_STAT,30), KERNEL_UTYPE, CPU_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,1,0,0,PM_TIME_MSEC,0) }, },
+
+/* kernel.percpu.cpu.intr */
+ { NULL,
+ { PMDA_PMID(CLUSTER_STAT,31), KERNEL_UTYPE, CPU_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,1,0,0,PM_TIME_MSEC,0) }, },
+
+/* kernel.percpu.cpu.irq.soft */
+ { NULL,
+ { PMDA_PMID(CLUSTER_STAT,56), KERNEL_UTYPE, CPU_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,1,0,0,PM_TIME_MSEC,0) }, },
+
+/* kernel.percpu.cpu.irq.hard */
+ { NULL,
+ { PMDA_PMID(CLUSTER_STAT,57), KERNEL_UTYPE, CPU_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,1,0,0,PM_TIME_MSEC,0) }, },
+
+/* kernel.percpu.cpu.steal */
+ { NULL,
+ { PMDA_PMID(CLUSTER_STAT,58), KERNEL_UTYPE, CPU_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,1,0,0,PM_TIME_MSEC,0) }, },
+
+/* kernel.percpu.cpu.guest */
+ { NULL,
+ { PMDA_PMID(CLUSTER_STAT,61), KERNEL_UTYPE, CPU_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,1,0,0,PM_TIME_MSEC,0) }, },
+
+/* kernel.percpu.cpu.vuser */
+ { NULL,
+ { PMDA_PMID(CLUSTER_STAT,76), KERNEL_UTYPE, CPU_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,1,0,0,PM_TIME_MSEC,0) }, },
+
+
+/* kernel.pernode.cpu.user */
+ { NULL,
+ { PMDA_PMID(CLUSTER_STAT,62), KERNEL_UTYPE, NODE_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,1,0,0,PM_TIME_MSEC,0) }, },
+
+/* kernel.pernode.cpu.nice */
+ { NULL,
+ { PMDA_PMID(CLUSTER_STAT,63), KERNEL_UTYPE, NODE_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,1,0,0,PM_TIME_MSEC,0) }, },
+
+/* kernel.pernode.cpu.sys */
+ { NULL,
+ { PMDA_PMID(CLUSTER_STAT,64), KERNEL_UTYPE, NODE_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,1,0,0,PM_TIME_MSEC,0) }, },
+
+/* kernel.pernode.cpu.idle */
+ { NULL,
+ { PMDA_PMID(CLUSTER_STAT,65), KERNEL_UTYPE, NODE_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,1,0,0,PM_TIME_MSEC,0) }, },
+
+/* kernel.pernode.cpu.wait.total */
+ { NULL,
+ { PMDA_PMID(CLUSTER_STAT,69), KERNEL_UTYPE, NODE_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,1,0,0,PM_TIME_MSEC,0) }, },
+
+/* kernel.pernode.cpu.intr */
+ { NULL,
+ { PMDA_PMID(CLUSTER_STAT,66), KERNEL_UTYPE, NODE_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,1,0,0,PM_TIME_MSEC,0) }, },
+
+/* kernel.pernode.cpu.irq.soft */
+ { NULL,
+ { PMDA_PMID(CLUSTER_STAT,70), KERNEL_UTYPE, NODE_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,1,0,0,PM_TIME_MSEC,0) }, },
+
+/* kernel.pernode.cpu.irq.hard */
+ { NULL,
+ { PMDA_PMID(CLUSTER_STAT,71), KERNEL_UTYPE, NODE_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,1,0,0,PM_TIME_MSEC,0) }, },
+
+/* kernel.pernode.cpu.steal */
+ { NULL,
+ { PMDA_PMID(CLUSTER_STAT,67), KERNEL_UTYPE, NODE_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,1,0,0,PM_TIME_MSEC,0) }, },
+
+/* kernel.pernode.cpu.guest */
+ { NULL,
+ { PMDA_PMID(CLUSTER_STAT,68), KERNEL_UTYPE, NODE_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,1,0,0,PM_TIME_MSEC,0) }, },
+
+/* kernel.pernode.cpu.vuser */
+ { NULL,
+ { PMDA_PMID(CLUSTER_STAT,77), KERNEL_UTYPE, NODE_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,1,0,0,PM_TIME_MSEC,0) }, },
+
+/* disk.dev.read */
+ { NULL,
+ { PMDA_PMID(CLUSTER_STAT,4), KERNEL_ULONG, DISK_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+/* disk.dev.write */
+ { NULL,
+ { PMDA_PMID(CLUSTER_STAT,5), KERNEL_ULONG, DISK_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+/* disk.dev.blkread */
+ { NULL,
+ { PMDA_PMID(CLUSTER_STAT,6), PM_TYPE_U64, DISK_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+/* disk.dev.blkwrite */
+ { NULL,
+ { PMDA_PMID(CLUSTER_STAT,7), PM_TYPE_U64, DISK_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+/* disk.dev.avactive */
+ { NULL,
+ { PMDA_PMID(CLUSTER_STAT,46), PM_TYPE_U32, DISK_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,1,0,0,PM_TIME_MSEC,0) }, },
+
+/* disk.dev.aveq */
+ { NULL,
+ { PMDA_PMID(CLUSTER_STAT,47), PM_TYPE_U32, DISK_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,1,0,0,PM_TIME_MSEC,0) }, },
+
+/* disk.dev.read_merge */
+ { NULL,
+ { PMDA_PMID(CLUSTER_STAT,49), KERNEL_ULONG, DISK_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+/* disk.dev.write_merge */
+ { NULL,
+ { PMDA_PMID(CLUSTER_STAT,50), KERNEL_ULONG, DISK_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+/* disk.dev.scheduler */
+ { NULL,
+ { PMDA_PMID(CLUSTER_STAT,59), PM_TYPE_STRING, DISK_INDOM,
+ PM_SEM_INSTANT, PMDA_PMUNITS(0,0,0,0,0,0) }, },
+
+/* disk.dev.read_rawactive */
+ { NULL,
+ { PMDA_PMID(CLUSTER_STAT,72), PM_TYPE_U32, DISK_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,1,0,0,PM_TIME_MSEC,0) }, },
+
+/* disk.dev.write_rawactive */
+ { NULL,
+ { PMDA_PMID(CLUSTER_STAT,73), PM_TYPE_U32, DISK_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,1,0,0,PM_TIME_MSEC,0) }, },
+
+/* disk.all.avactive */
+ { NULL,
+ { PMDA_PMID(CLUSTER_STAT,44), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,1,0,0,PM_TIME_MSEC,0) }, },
+
+/* disk.all.aveq */
+ { NULL,
+ { PMDA_PMID(CLUSTER_STAT,45), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,1,0,0,PM_TIME_MSEC,0) }, },
+
+/* disk.all.read_merge */
+ { NULL,
+ { PMDA_PMID(CLUSTER_STAT,51), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+/* disk.all.write_merge */
+ { NULL,
+ { PMDA_PMID(CLUSTER_STAT,52), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+/* disk.all.read_rawactive */
+ { NULL,
+ { PMDA_PMID(CLUSTER_STAT,74), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,1,0,0,PM_TIME_MSEC,0) }, },
+
+/* disk.all.read_rawactive */
+ { NULL,
+ { PMDA_PMID(CLUSTER_STAT,75), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,1,0,0,PM_TIME_MSEC,0) }, },
+
+/* swap.pagesin */
+ { NULL,
+ { PMDA_PMID(CLUSTER_STAT,8), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+/* swap.pagesout */
+ { NULL,
+ { PMDA_PMID(CLUSTER_STAT,9), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+/* swap.in */
+ { NULL,
+ { PMDA_PMID(CLUSTER_STAT,10), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+/* swap.out */
+ { NULL,
+ { PMDA_PMID(CLUSTER_STAT,11), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+/* kernel.all.intr */
+ { NULL,
+ { PMDA_PMID(CLUSTER_STAT,12), KERNEL_UTYPE, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+/* kernel.all.pswitch */
+ { NULL,
+ { PMDA_PMID(CLUSTER_STAT,13), KERNEL_UTYPE, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+/* kernel.all.sysfork */
+ { NULL,
+ { PMDA_PMID(CLUSTER_STAT,14), KERNEL_ULONG, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+/* kernel.all.cpu.user */
+ { NULL,
+ { PMDA_PMID(CLUSTER_STAT,20), KERNEL_UTYPE, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,1,0,0,PM_TIME_MSEC,0) }, },
+
+/* kernel.all.cpu.nice */
+ { NULL,
+ { PMDA_PMID(CLUSTER_STAT,21), KERNEL_UTYPE, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,1,0,0,PM_TIME_MSEC,0) }, },
+
+/* kernel.all.cpu.sys */
+ { NULL,
+ { PMDA_PMID(CLUSTER_STAT,22), KERNEL_UTYPE, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,1,0,0,PM_TIME_MSEC,0) }, },
+
+/* kernel.all.cpu.idle */
+ { NULL,
+ { PMDA_PMID(CLUSTER_STAT,23), KERNEL_UTYPE, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,1,0,0,PM_TIME_MSEC,0) }, },
+
+/* kernel.all.cpu.intr */
+ { NULL,
+ { PMDA_PMID(CLUSTER_STAT,34), KERNEL_UTYPE, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,1,0,0,PM_TIME_MSEC,0) }, },
+
+/* kernel.all.cpu.wait.total */
+ { NULL,
+ { PMDA_PMID(CLUSTER_STAT,35), KERNEL_UTYPE, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,1,0,0,PM_TIME_MSEC,0) }, },
+
+/* kernel.all.cpu.irq.soft */
+ { NULL,
+ { PMDA_PMID(CLUSTER_STAT,53), KERNEL_UTYPE, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,1,0,0,PM_TIME_MSEC,0) }, },
+
+/* kernel.all.cpu.irq.hard */
+ { NULL,
+ { PMDA_PMID(CLUSTER_STAT,54), KERNEL_UTYPE, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,1,0,0,PM_TIME_MSEC,0) }, },
+
+/* kernel.all.cpu.steal */
+ { NULL,
+ { PMDA_PMID(CLUSTER_STAT,55), KERNEL_UTYPE, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,1,0,0,PM_TIME_MSEC,0) }, },
+
+/* kernel.all.cpu.guest */
+ { NULL,
+ { PMDA_PMID(CLUSTER_STAT,60), KERNEL_UTYPE, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,1,0,0,PM_TIME_MSEC,0) }, },
+
+/* kernel.all.cpu.vuser */
+ { NULL,
+ { PMDA_PMID(CLUSTER_STAT,78), KERNEL_UTYPE, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,1,0,0,PM_TIME_MSEC,0) }, },
+
+/* disk.all.read */
+ { NULL,
+ { PMDA_PMID(CLUSTER_STAT,24), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+/* disk.all.write */
+ { NULL,
+ { PMDA_PMID(CLUSTER_STAT,25), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+/* disk.all.blkread */
+ { NULL,
+ { PMDA_PMID(CLUSTER_STAT,26), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+/* disk.all.blkwrite */
+ { NULL,
+ { PMDA_PMID(CLUSTER_STAT,27), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+/* disk.dev.total */
+ { NULL,
+ { PMDA_PMID(CLUSTER_STAT,28), PM_TYPE_U64, DISK_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+/* disk.dev.blktotal */
+ { NULL,
+ { PMDA_PMID(CLUSTER_STAT,36), PM_TYPE_U64, DISK_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+/* disk.all.total */
+ { NULL,
+ { PMDA_PMID(CLUSTER_STAT,29), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+/* disk.all.blktotal */
+ { NULL,
+ { PMDA_PMID(CLUSTER_STAT,37), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+/* hinv.ncpu */
+ { NULL,
+ { PMDA_PMID(CLUSTER_STAT,32), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_DISCRETE,
+ PMDA_PMUNITS(0,0,0,0,0,0) }, },
+
+/* hinv.ndisk */
+ { NULL,
+ { PMDA_PMID(CLUSTER_STAT,33), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_DISCRETE,
+ PMDA_PMUNITS(0,0,0,0,0,0) }, },
+
+/* hinv.nnode */
+ { NULL,
+ { PMDA_PMID(CLUSTER_STAT,19), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_DISCRETE,
+ PMDA_PMUNITS(0,0,0,0,0,0) }, },
+
+/* kernel.all.hz */
+ { NULL,
+ { PMDA_PMID(CLUSTER_STAT,48), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_DISCRETE,
+ PMDA_PMUNITS(0,-1,1,0,PM_TIME_SEC,PM_COUNT_ONE) }, },
+
+/*
+ * /proc/uptime cluster
+ * Uptime modified and idletime added by Mike Mason <mmlnx@us.ibm.com>
+ */
+
+/* kernel.all.uptime */
+ { NULL,
+ { PMDA_PMID(CLUSTER_UPTIME,0), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(0,1,0,0,PM_TIME_SEC,0) }, },
+
+/* kernel.all.idletime */
+ { NULL,
+ { PMDA_PMID(CLUSTER_UPTIME,1), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(0,1,0,0,PM_TIME_SEC,0) }, },
+
+/*
+ * /proc/meminfo cluster
+ */
+
+/* mem.physmem */
+ { NULL,
+ { PMDA_PMID(CLUSTER_MEMINFO,0), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_DISCRETE,
+ PMDA_PMUNITS(1,0,0,PM_SPACE_KBYTE,0,0) }, },
+
+/* mem.util.used */
+ { NULL,
+ { PMDA_PMID(CLUSTER_MEMINFO,1), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(1,0,0,PM_SPACE_KBYTE,0,0) }, },
+
+/* mem.util.free */
+ { NULL,
+ { PMDA_PMID(CLUSTER_MEMINFO,2), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(1,0,0,PM_SPACE_KBYTE,0,0) }, },
+
+/* mem.util.shared */
+ { NULL,
+ { PMDA_PMID(CLUSTER_MEMINFO,3), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(1,0,0,PM_SPACE_KBYTE,0,0) }, },
+
+/* mem.util.bufmem */
+ { NULL,
+ { PMDA_PMID(CLUSTER_MEMINFO,4), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(1,0,0,PM_SPACE_KBYTE,0,0) }, },
+
+/* mem.util.cached */
+ { NULL,
+ { PMDA_PMID(CLUSTER_MEMINFO,5), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(1,0,0,PM_SPACE_KBYTE,0,0) }, },
+
+/* mem.util.active */
+ { NULL,
+ { PMDA_PMID(CLUSTER_MEMINFO,14), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(1,0,0,PM_SPACE_KBYTE,0,0) }, },
+
+/* mem.util.inactive */
+ { NULL,
+ { PMDA_PMID(CLUSTER_MEMINFO,15), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(1,0,0,PM_SPACE_KBYTE,0,0) }, },
+
+/* mem.util.swapCached */
+ { NULL,
+ { PMDA_PMID(CLUSTER_MEMINFO,13), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(1,0,0,PM_SPACE_KBYTE,0,0) }, },
+
+/* mem.util.highTotal */
+ { NULL,
+ { PMDA_PMID(CLUSTER_MEMINFO,16), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(1,0,0,PM_SPACE_KBYTE,0,0) }, },
+
+/* mem.util.highFree */
+ { NULL,
+ { PMDA_PMID(CLUSTER_MEMINFO,17), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(1,0,0,PM_SPACE_KBYTE,0,0) }, },
+
+/* mem.util.lowTotal */
+ { NULL,
+ { PMDA_PMID(CLUSTER_MEMINFO,18), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(1,0,0,PM_SPACE_KBYTE,0,0) }, },
+
+/* mem.util.lowFree */
+ { NULL,
+ { PMDA_PMID(CLUSTER_MEMINFO,19), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(1,0,0,PM_SPACE_KBYTE,0,0) }, },
+
+/* mem.util.swapTotal */
+ { NULL,
+ { PMDA_PMID(CLUSTER_MEMINFO,20), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(1,0,0,PM_SPACE_KBYTE,0,0) }, },
+
+/* mem.util.swapFree */
+ { NULL,
+ { PMDA_PMID(CLUSTER_MEMINFO,21), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(1,0,0,PM_SPACE_KBYTE,0,0) }, },
+
+/* mem.util.dirty */
+ { NULL,
+ { PMDA_PMID(CLUSTER_MEMINFO,22), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(1,0,0,PM_SPACE_KBYTE,0,0) }, },
+
+/* mem.util.writeback */
+ { NULL,
+ { PMDA_PMID(CLUSTER_MEMINFO,23), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(1,0,0,PM_SPACE_KBYTE,0,0) }, },
+
+/* mem.util.mapped */
+ { NULL,
+ { PMDA_PMID(CLUSTER_MEMINFO,24), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(1,0,0,PM_SPACE_KBYTE,0,0) }, },
+
+/* mem.util.slab */
+ { NULL,
+ { PMDA_PMID(CLUSTER_MEMINFO,25), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(1,0,0,PM_SPACE_KBYTE,0,0) }, },
+
+/* mem.util.committed_AS */
+ { NULL,
+ { PMDA_PMID(CLUSTER_MEMINFO,26), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(1,0,0,PM_SPACE_KBYTE,0,0) }, },
+
+/* mem.util.pageTables */
+ { NULL,
+ { PMDA_PMID(CLUSTER_MEMINFO,27), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(1,0,0,PM_SPACE_KBYTE,0,0) }, },
+
+/* mem.util.reverseMaps */
+ { NULL,
+ { PMDA_PMID(CLUSTER_MEMINFO,28), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(1,0,0,PM_SPACE_KBYTE,0,0) }, },
+
+/* mem.util.cache_clean */
+ { NULL,
+ { PMDA_PMID(CLUSTER_MEMINFO,29), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(1,0,0,PM_SPACE_KBYTE,0,0) }, },
+
+/* mem.util.anonpages */
+ { NULL,
+ { PMDA_PMID(CLUSTER_MEMINFO,30), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(1,0,0,PM_SPACE_KBYTE,0,0) }, },
+
+/* mem.util.commitLimit */
+ { NULL,
+ { PMDA_PMID(CLUSTER_MEMINFO,31), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(1,0,0,PM_SPACE_KBYTE,0,0) }, },
+
+/* mem.util.bounce */
+ { NULL,
+ { PMDA_PMID(CLUSTER_MEMINFO,32), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(1,0,0,PM_SPACE_KBYTE,0,0) }, },
+
+/* mem.util.NFS_Unstable */
+ { NULL,
+ { PMDA_PMID(CLUSTER_MEMINFO,33), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(1,0,0,PM_SPACE_KBYTE,0,0) }, },
+
+/* mem.util.slabReclaimable */
+ { NULL,
+ { PMDA_PMID(CLUSTER_MEMINFO,34), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(1,0,0,PM_SPACE_KBYTE,0,0) }, },
+
+/* mem.util.slabUnreclaimable */
+ { NULL,
+ { PMDA_PMID(CLUSTER_MEMINFO,35), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(1,0,0,PM_SPACE_KBYTE,0,0) }, },
+
+/* mem.util.active_anon */
+ { NULL,
+ { PMDA_PMID(CLUSTER_MEMINFO,36), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(1,0,0,PM_SPACE_KBYTE,0,0) }, },
+
+/* mem.util.inactive_anon */
+ { NULL,
+ { PMDA_PMID(CLUSTER_MEMINFO,37), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(1,0,0,PM_SPACE_KBYTE,0,0) }, },
+
+/* mem.util.active_file */
+ { NULL,
+ { PMDA_PMID(CLUSTER_MEMINFO,38), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(1,0,0,PM_SPACE_KBYTE,0,0) }, },
+
+/* mem.util.inactive_file */
+ { NULL,
+ { PMDA_PMID(CLUSTER_MEMINFO,39), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(1,0,0,PM_SPACE_KBYTE,0,0) }, },
+
+/* mem.util.unevictable */
+ { NULL,
+ { PMDA_PMID(CLUSTER_MEMINFO,40), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(1,0,0,PM_SPACE_KBYTE,0,0) }, },
+
+/* mem.util.mlocked */
+ { NULL,
+ { PMDA_PMID(CLUSTER_MEMINFO,41), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(1,0,0,PM_SPACE_KBYTE,0,0) }, },
+
+/* mem.util.shmem */
+ { NULL,
+ { PMDA_PMID(CLUSTER_MEMINFO,42), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(1,0,0,PM_SPACE_KBYTE,0,0) }, },
+
+/* mem.util.kernelStack */
+ { NULL,
+ { PMDA_PMID(CLUSTER_MEMINFO,43), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(1,0,0,PM_SPACE_KBYTE,0,0) }, },
+
+/* mem.util.hugepagesTotal */
+ { NULL,
+ { PMDA_PMID(CLUSTER_MEMINFO,44), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+/* mem.util.hugepagesFree */
+ { NULL,
+ { PMDA_PMID(CLUSTER_MEMINFO,45), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+/* mem.util.hugepagesRsvd */
+ { NULL,
+ { PMDA_PMID(CLUSTER_MEMINFO,46), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+/* mem.util.hugepagesSurp */
+ { NULL,
+ { PMDA_PMID(CLUSTER_MEMINFO,47), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+/* mem.util.directMap4k */
+ { NULL,
+ { PMDA_PMID(CLUSTER_MEMINFO,48), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(1,0,0,PM_SPACE_KBYTE,0,0) }, },
+
+/* mem.util.directMap2M */
+ { NULL,
+ { PMDA_PMID(CLUSTER_MEMINFO,49), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(1,0,0,PM_SPACE_KBYTE,0,0) }, },
+
+/* mem.util.vmallocTotal */
+ { NULL,
+ { PMDA_PMID(CLUSTER_MEMINFO,50), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(1,0,0,PM_SPACE_KBYTE,0,0) }, },
+
+/* mem.util.vmallocUsed */
+ { NULL,
+ { PMDA_PMID(CLUSTER_MEMINFO,51), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(1,0,0,PM_SPACE_KBYTE,0,0) }, },
+
+/* mem.util.vmallocChunk */
+ { NULL,
+ { PMDA_PMID(CLUSTER_MEMINFO,52), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(1,0,0,PM_SPACE_KBYTE,0,0) }, },
+
+/* mem.util.mmap_copy */
+ { NULL,
+ { PMDA_PMID(CLUSTER_MEMINFO,53), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(1,0,0,PM_SPACE_KBYTE,0,0) }, },
+
+/* mem.util.quicklists */
+ { NULL,
+ { PMDA_PMID(CLUSTER_MEMINFO,54), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(1,0,0,PM_SPACE_KBYTE,0,0) }, },
+
+/* mem.util.corrupthardware */
+ { NULL,
+ { PMDA_PMID(CLUSTER_MEMINFO,55), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(1,0,0,PM_SPACE_KBYTE,0,0) }, },
+
+/* mem.util.mmap_copy */
+ { NULL,
+ { PMDA_PMID(CLUSTER_MEMINFO,56), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(1,0,0,PM_SPACE_KBYTE,0,0) }, },
+
+/* mem.util.directMap1G */
+ { NULL,
+ { PMDA_PMID(CLUSTER_MEMINFO,57), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(1,0,0,PM_SPACE_KBYTE,0,0) }, },
+
+/* mem.util.available */
+ { NULL,
+ { PMDA_PMID(CLUSTER_MEMINFO,58), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(1,0,0,PM_SPACE_KBYTE,0,0) }, },
+
+/* mem.numa.util.total */
+ { NULL,
+ { PMDA_PMID(CLUSTER_NUMA_MEMINFO,0), PM_TYPE_U64, NODE_INDOM, PM_SEM_INSTANT,
+ PMDA_PMUNITS(1,0,0,PM_SPACE_KBYTE,0,0) }, },
+
+/* mem.numa.util.free */
+ { NULL,
+ { PMDA_PMID(CLUSTER_NUMA_MEMINFO,1), PM_TYPE_U64, NODE_INDOM, PM_SEM_INSTANT,
+ PMDA_PMUNITS(1,0,0,PM_SPACE_KBYTE,0,0) }, },
+
+/* mem.numa.util.used */
+ { NULL,
+ { PMDA_PMID(CLUSTER_NUMA_MEMINFO,2), PM_TYPE_U64, NODE_INDOM, PM_SEM_INSTANT,
+ PMDA_PMUNITS(1,0,0,PM_SPACE_KBYTE,0,0) }, },
+
+/* mem.numa.util.active */
+ { NULL,
+ { PMDA_PMID(CLUSTER_NUMA_MEMINFO,3), PM_TYPE_U64, NODE_INDOM, PM_SEM_INSTANT,
+ PMDA_PMUNITS(1,0,0,PM_SPACE_KBYTE,0,0) }, },
+
+/* mem.numa.util.inactive */
+ { NULL,
+ { PMDA_PMID(CLUSTER_NUMA_MEMINFO,4), PM_TYPE_U64, NODE_INDOM, PM_SEM_INSTANT,
+ PMDA_PMUNITS(1,0,0,PM_SPACE_KBYTE,0,0) }, },
+
+/* mem.numa.util.active_anon */
+ { NULL,
+ { PMDA_PMID(CLUSTER_NUMA_MEMINFO,5), PM_TYPE_U64, NODE_INDOM, PM_SEM_INSTANT,
+ PMDA_PMUNITS(1,0,0,PM_SPACE_KBYTE,0,0) }, },
+
+/* mem.numa.util.inactive_anon */
+ { NULL,
+ { PMDA_PMID(CLUSTER_NUMA_MEMINFO,6), PM_TYPE_U64, NODE_INDOM, PM_SEM_INSTANT,
+ PMDA_PMUNITS(1,0,0,PM_SPACE_KBYTE,0,0) }, },
+
+/* mem.numa.util.active_file */
+ { NULL,
+ { PMDA_PMID(CLUSTER_NUMA_MEMINFO,7), PM_TYPE_U64, NODE_INDOM, PM_SEM_INSTANT,
+ PMDA_PMUNITS(1,0,0,PM_SPACE_KBYTE,0,0) }, },
+
+/* mem.numa.util.inactive_file */
+ { NULL,
+ { PMDA_PMID(CLUSTER_NUMA_MEMINFO,8), PM_TYPE_U64, NODE_INDOM, PM_SEM_INSTANT,
+ PMDA_PMUNITS(1,0,0,PM_SPACE_KBYTE,0,0) }, },
+
+/* mem.numa.util.highTotal */
+ { NULL,
+ { PMDA_PMID(CLUSTER_NUMA_MEMINFO,9), PM_TYPE_U64, NODE_INDOM, PM_SEM_INSTANT,
+ PMDA_PMUNITS(1,0,0,PM_SPACE_KBYTE,0,0) }, },
+
+/* mem.numa.util.highFree */
+ { NULL,
+ { PMDA_PMID(CLUSTER_NUMA_MEMINFO,10), PM_TYPE_U64, NODE_INDOM, PM_SEM_INSTANT,
+ PMDA_PMUNITS(1,0,0,PM_SPACE_KBYTE,0,0) }, },
+
+/* mem.numa.util.lowTotal */
+ { NULL,
+ { PMDA_PMID(CLUSTER_NUMA_MEMINFO,11), PM_TYPE_U64, NODE_INDOM, PM_SEM_INSTANT,
+ PMDA_PMUNITS(1,0,0,PM_SPACE_KBYTE,0,0) }, },
+
+/* mem.numa.util.lowFree */
+ { NULL,
+ { PMDA_PMID(CLUSTER_NUMA_MEMINFO,12), PM_TYPE_U64, NODE_INDOM, PM_SEM_INSTANT,
+ PMDA_PMUNITS(1,0,0,PM_SPACE_KBYTE,0,0) }, },
+
+/* mem.numa.util.unevictable */
+ { NULL,
+ { PMDA_PMID(CLUSTER_NUMA_MEMINFO,13), PM_TYPE_U64, NODE_INDOM, PM_SEM_INSTANT,
+ PMDA_PMUNITS(1,0,0,PM_SPACE_KBYTE,0,0) }, },
+
+/* mem.numa.util.mlocked */
+ { NULL,
+ { PMDA_PMID(CLUSTER_NUMA_MEMINFO,14), PM_TYPE_U64, NODE_INDOM, PM_SEM_INSTANT,
+ PMDA_PMUNITS(1,0,0,PM_SPACE_KBYTE,0,0) }, },
+
+/* mem.numa.util.dirty */
+ { NULL,
+ { PMDA_PMID(CLUSTER_NUMA_MEMINFO,15), PM_TYPE_U64, NODE_INDOM, PM_SEM_INSTANT,
+ PMDA_PMUNITS(1,0,0,PM_SPACE_KBYTE,0,0) }, },
+
+/* mem.numa.util.writeback */
+ { NULL,
+ { PMDA_PMID(CLUSTER_NUMA_MEMINFO,16), PM_TYPE_U64, NODE_INDOM, PM_SEM_INSTANT,
+ PMDA_PMUNITS(1,0,0,PM_SPACE_KBYTE,0,0) }, },
+
+/* mem.numa.util.filePages */
+ { NULL,
+ { PMDA_PMID(CLUSTER_NUMA_MEMINFO,17), PM_TYPE_U64, NODE_INDOM, PM_SEM_INSTANT,
+ PMDA_PMUNITS(1,0,0,PM_SPACE_KBYTE,0,0) }, },
+
+/* mem.numa.util.mapped */
+ { NULL,
+ { PMDA_PMID(CLUSTER_NUMA_MEMINFO,18), PM_TYPE_U64, NODE_INDOM, PM_SEM_INSTANT,
+ PMDA_PMUNITS(1,0,0,PM_SPACE_KBYTE,0,0) }, },
+
+/* mem.numa.util.anonpages */
+ { NULL,
+ { PMDA_PMID(CLUSTER_NUMA_MEMINFO,19), PM_TYPE_U64, NODE_INDOM, PM_SEM_INSTANT,
+ PMDA_PMUNITS(1,0,0,PM_SPACE_KBYTE,0,0) }, },
+
+/* mem.numa.util.shmem */
+ { NULL,
+ { PMDA_PMID(CLUSTER_NUMA_MEMINFO,20), PM_TYPE_U64, NODE_INDOM, PM_SEM_INSTANT,
+ PMDA_PMUNITS(1,0,0,PM_SPACE_KBYTE,0,0) }, },
+
+/* mem.numa.util.kernelStack */
+ { NULL,
+ { PMDA_PMID(CLUSTER_NUMA_MEMINFO,21), PM_TYPE_U64, NODE_INDOM, PM_SEM_INSTANT,
+ PMDA_PMUNITS(1,0,0,PM_SPACE_KBYTE,0,0) }, },
+
+/* mem.numa.util.pageTables */
+ { NULL,
+ { PMDA_PMID(CLUSTER_NUMA_MEMINFO,22), PM_TYPE_U64, NODE_INDOM, PM_SEM_INSTANT,
+ PMDA_PMUNITS(1,0,0,PM_SPACE_KBYTE,0,0) }, },
+
+/* mem.numa.util.NFS_Unstable */
+ { NULL,
+ { PMDA_PMID(CLUSTER_NUMA_MEMINFO,23), PM_TYPE_U64, NODE_INDOM, PM_SEM_INSTANT,
+ PMDA_PMUNITS(1,0,0,PM_SPACE_KBYTE,0,0) }, },
+
+/* mem.numa.util.bounce */
+ { NULL,
+ { PMDA_PMID(CLUSTER_NUMA_MEMINFO,24), PM_TYPE_U64, NODE_INDOM, PM_SEM_INSTANT,
+ PMDA_PMUNITS(1,0,0,PM_SPACE_KBYTE,0,0) }, },
+
+/* mem.numa.util.writebackTmp */
+ { NULL,
+ { PMDA_PMID(CLUSTER_NUMA_MEMINFO,25), PM_TYPE_U64, NODE_INDOM, PM_SEM_INSTANT,
+ PMDA_PMUNITS(1,0,0,PM_SPACE_KBYTE,0,0) }, },
+
+/* mem.numa.util.slab */
+ { NULL,
+ { PMDA_PMID(CLUSTER_NUMA_MEMINFO,26), PM_TYPE_U64, NODE_INDOM, PM_SEM_INSTANT,
+ PMDA_PMUNITS(1,0,0,PM_SPACE_KBYTE,0,0) }, },
+
+/* mem.numa.util.slabReclaimable */
+ { NULL,
+ { PMDA_PMID(CLUSTER_NUMA_MEMINFO,27), PM_TYPE_U64, NODE_INDOM, PM_SEM_INSTANT,
+ PMDA_PMUNITS(1,0,0,PM_SPACE_KBYTE,0,0) }, },
+
+/* mem.numa.util.slabUnreclaimable */
+ { NULL,
+ { PMDA_PMID(CLUSTER_NUMA_MEMINFO,28), PM_TYPE_U64, NODE_INDOM, PM_SEM_INSTANT,
+ PMDA_PMUNITS(1,0,0,PM_SPACE_KBYTE,0,0) }, },
+
+/* mem.numa.util.hugepagesTotal */
+ { NULL,
+ { PMDA_PMID(CLUSTER_NUMA_MEMINFO,29), PM_TYPE_U64, NODE_INDOM, PM_SEM_INSTANT,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+/* mem.numa.util.hugepagesFree */
+ { NULL,
+ { PMDA_PMID(CLUSTER_NUMA_MEMINFO,30), PM_TYPE_U64, NODE_INDOM, PM_SEM_INSTANT,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+/* mem.numa.util.hugepagesSurp */
+ { NULL,
+ { PMDA_PMID(CLUSTER_NUMA_MEMINFO,31), PM_TYPE_U64, NODE_INDOM, PM_SEM_INSTANT,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+/* mem.numa.alloc.hit */
+ { NULL,
+ { PMDA_PMID(CLUSTER_NUMA_MEMINFO,32), PM_TYPE_U64, NODE_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+/* mem.numa.alloc.miss */
+ { NULL,
+ { PMDA_PMID(CLUSTER_NUMA_MEMINFO,33), PM_TYPE_U64, NODE_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+/* mem.numa.alloc.foreign */
+ { NULL,
+ { PMDA_PMID(CLUSTER_NUMA_MEMINFO,34), PM_TYPE_U64, NODE_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+/* mem.numa.alloc.interleave_hit */
+ { NULL,
+ { PMDA_PMID(CLUSTER_NUMA_MEMINFO,35), PM_TYPE_U64, NODE_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+/* mem.numa.alloc.local_node */
+ { NULL,
+ { PMDA_PMID(CLUSTER_NUMA_MEMINFO,36), PM_TYPE_U64, NODE_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+/* mem.numa.alloc.other_node */
+ { NULL,
+ { PMDA_PMID(CLUSTER_NUMA_MEMINFO,37), PM_TYPE_U64, NODE_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+
+/* swap.length */
+ { NULL,
+ { PMDA_PMID(CLUSTER_MEMINFO,6), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(1,0,0,PM_SPACE_BYTE,0,0) }, },
+
+/* swap.used */
+ { NULL,
+ { PMDA_PMID(CLUSTER_MEMINFO,7), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(1,0,0,PM_SPACE_BYTE,0,0) }, },
+
+/* swap.free */
+ { NULL,
+ { PMDA_PMID(CLUSTER_MEMINFO,8), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(1,0,0,PM_SPACE_BYTE,0,0) }, },
+
+/* hinv.physmem */
+ { NULL,
+ { PMDA_PMID(CLUSTER_MEMINFO,9), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_DISCRETE,
+ PMDA_PMUNITS(1,0,0,PM_SPACE_MBYTE,0,0) }, },
+
+/* mem.freemem */
+ { NULL,
+ { PMDA_PMID(CLUSTER_MEMINFO,10), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(1,0,0,PM_SPACE_KBYTE,0,0) }, },
+
+/* hinv.pagesize */
+ { NULL,
+ { PMDA_PMID(CLUSTER_MEMINFO,11), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_DISCRETE,
+ PMDA_PMUNITS(1,0,0,PM_SPACE_BYTE,0,0) }, },
+
+/* mem.util.other */
+ { NULL,
+ { PMDA_PMID(CLUSTER_MEMINFO,12), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(1,0,0,PM_SPACE_KBYTE,0,0) }, },
+
+/*
+ * /proc/slabinfo cluster
+ */
+
+ /* mem.slabinfo.objects.active */
+ { NULL,
+ { PMDA_PMID(CLUSTER_SLAB,0), PM_TYPE_U64, SLAB_INDOM, PM_SEM_INSTANT,
+ PMDA_PMUNITS(0,0,0,0,0,0) } },
+
+ /* mem.slabinfo.objects.total */
+ { NULL,
+ { PMDA_PMID(CLUSTER_SLAB,1), PM_TYPE_U64, SLAB_INDOM, PM_SEM_INSTANT,
+ PMDA_PMUNITS(0,0,0,0,0,0) } },
+
+ /* mem.slabinfo.objects.size */
+ { NULL,
+ { PMDA_PMID(CLUSTER_SLAB,2), PM_TYPE_U32, SLAB_INDOM, PM_SEM_INSTANT,
+ PMDA_PMUNITS(1,0,0,PM_SPACE_BYTE,0,0) }, },
+
+ /* mem.slabinfo.slabs.active */
+ { NULL,
+ { PMDA_PMID(CLUSTER_SLAB,3), PM_TYPE_U32, SLAB_INDOM, PM_SEM_INSTANT,
+ PMDA_PMUNITS(0,0,0,0,0,0) } },
+
+ /* mem.slabinfo.slabs.total */
+ { NULL,
+ { PMDA_PMID(CLUSTER_SLAB,4), PM_TYPE_U32, SLAB_INDOM, PM_SEM_INSTANT,
+ PMDA_PMUNITS(0,0,0,0,0,0) } },
+
+ /* mem.slabinfo.slabs.pages_per_slab */
+ { NULL,
+ { PMDA_PMID(CLUSTER_SLAB,5), PM_TYPE_U32, SLAB_INDOM, PM_SEM_INSTANT,
+ PMDA_PMUNITS(0,0,0,0,0,0) } },
+
+ /* mem.slabinfo.slabs.objects_per_slab */
+ { NULL,
+ { PMDA_PMID(CLUSTER_SLAB,6), PM_TYPE_U32, SLAB_INDOM, PM_SEM_INSTANT,
+ PMDA_PMUNITS(0,0,0,0,0,0) } },
+
+ /* mem.slabinfo.slabs.total_size */
+ { NULL,
+ { PMDA_PMID(CLUSTER_SLAB,7), PM_TYPE_U64, SLAB_INDOM, PM_SEM_INSTANT,
+ PMDA_PMUNITS(1,0,0,PM_SPACE_BYTE,0,0) }, },
+
+/*
+ * /proc/loadavg cluster
+ */
+
+ /* kernel.all.load */
+ { NULL,
+ { PMDA_PMID(CLUSTER_LOADAVG,0), PM_TYPE_FLOAT, LOADAVG_INDOM, PM_SEM_INSTANT,
+ PMDA_PMUNITS(0,0,0,0,0,0) } },
+
+ /* kernel.all.lastpid -- added by Mike Mason <mmlnx@us.ibm.com> */
+ { NULL,
+ { PMDA_PMID(CLUSTER_LOADAVG, 1), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(0,0,0,0,0,0) } },
+
+ /* kernel.all.runnable */
+ { NULL,
+ { PMDA_PMID(CLUSTER_LOADAVG, 2), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(0,0,0,0,0,0) } },
+
+ /* kernel.all.nprocs */
+ { NULL,
+ { PMDA_PMID(CLUSTER_LOADAVG, 3), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(0,0,0,0,0,0) } },
+
+/*
+ * /proc/net/dev cluster
+ */
+
+/* network.interface.in.bytes */
+ { NULL,
+ { PMDA_PMID(CLUSTER_NET_DEV,0), PM_TYPE_U64, NET_DEV_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(1,0,0,PM_SPACE_BYTE,0,0) }, },
+
+/* network.interface.in.packets */
+ { NULL,
+ { PMDA_PMID(CLUSTER_NET_DEV,1), PM_TYPE_U64, NET_DEV_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+/* network.interface.in.errors */
+ { NULL,
+ { PMDA_PMID(CLUSTER_NET_DEV,2), PM_TYPE_U64, NET_DEV_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+/* network.interface.in.drops */
+ { NULL,
+ { PMDA_PMID(CLUSTER_NET_DEV,3), PM_TYPE_U64, NET_DEV_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+/* network.interface.in.fifo */
+ { NULL,
+ { PMDA_PMID(CLUSTER_NET_DEV,4), PM_TYPE_U64, NET_DEV_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+/* network.interface.in.frame */
+ { NULL,
+ { PMDA_PMID(CLUSTER_NET_DEV,5), PM_TYPE_U64, NET_DEV_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+/* network.interface.in.compressed */
+ { NULL,
+ { PMDA_PMID(CLUSTER_NET_DEV,6), PM_TYPE_U64, NET_DEV_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+/* network.interface.in.mcasts */
+ { NULL,
+ { PMDA_PMID(CLUSTER_NET_DEV,7), PM_TYPE_U64, NET_DEV_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+/* network.interface.out.bytes */
+ { NULL,
+ { PMDA_PMID(CLUSTER_NET_DEV,8), PM_TYPE_U64, NET_DEV_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(1,0,0,PM_SPACE_BYTE,0,0) }, },
+
+/* network.interface.out.packets */
+ { NULL,
+ { PMDA_PMID(CLUSTER_NET_DEV,9), PM_TYPE_U64, NET_DEV_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+/* network.interface.out.errors */
+ { NULL,
+ { PMDA_PMID(CLUSTER_NET_DEV,10), PM_TYPE_U64, NET_DEV_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+/* network.interface.out.drops */
+ { NULL,
+ { PMDA_PMID(CLUSTER_NET_DEV,11), PM_TYPE_U64, NET_DEV_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+/* network.interface.out.fifo */
+ { NULL,
+ { PMDA_PMID(CLUSTER_NET_DEV,12), PM_TYPE_U64, NET_DEV_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+/* network.interface.collisions */
+ { NULL,
+ { PMDA_PMID(CLUSTER_NET_DEV,13), PM_TYPE_U64, NET_DEV_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+/* network.interface.out.carrier */
+ { NULL,
+ { PMDA_PMID(CLUSTER_NET_DEV,14), PM_TYPE_U64, NET_DEV_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+/* network.interface.out.compressed */
+ { NULL,
+ { PMDA_PMID(CLUSTER_NET_DEV,15), PM_TYPE_U64, NET_DEV_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+/* network.interface.total.bytes */
+ { NULL,
+ { PMDA_PMID(CLUSTER_NET_DEV,16), PM_TYPE_U64, NET_DEV_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(1,0,0,0,PM_SPACE_BYTE,0) }, },
+
+/* network.interface.total.packets */
+ { NULL,
+ { PMDA_PMID(CLUSTER_NET_DEV,17), PM_TYPE_U64, NET_DEV_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+/* network.interface.total.errors */
+ { NULL,
+ { PMDA_PMID(CLUSTER_NET_DEV,18), PM_TYPE_U64, NET_DEV_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+/* network.interface.total.drops */
+ { NULL,
+ { PMDA_PMID(CLUSTER_NET_DEV,19), PM_TYPE_U64, NET_DEV_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+/* network.interface.total.mcasts */
+ { NULL,
+ { PMDA_PMID(CLUSTER_NET_DEV,20), PM_TYPE_U64, NET_DEV_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+/* network.interface.mtu */
+ { NULL,
+ { PMDA_PMID(CLUSTER_NET_DEV,21), PM_TYPE_U32, NET_DEV_INDOM, PM_SEM_INSTANT,
+ PMDA_PMUNITS(1,0,0,0,PM_SPACE_BYTE,0) }, },
+
+/* network.interface.speed */
+ { NULL,
+ { PMDA_PMID(CLUSTER_NET_DEV,22), PM_TYPE_FLOAT, NET_DEV_INDOM, PM_SEM_DISCRETE,
+ PMDA_PMUNITS(1,-1,0,PM_SPACE_MBYTE,PM_TIME_SEC,0) }, },
+
+/* network.interface.baudrate */
+ { NULL,
+ { PMDA_PMID(CLUSTER_NET_DEV,23), PM_TYPE_U32, NET_DEV_INDOM, PM_SEM_DISCRETE,
+ PMDA_PMUNITS(1,-1,0,PM_SPACE_BYTE,PM_TIME_SEC,0) }, },
+
+/* network.interface.duplex */
+ { NULL,
+ { PMDA_PMID(CLUSTER_NET_DEV,24), PM_TYPE_U32, NET_DEV_INDOM, PM_SEM_DISCRETE,
+ PMDA_PMUNITS(0,0,0,0,0,0) }, },
+
+/* network.interface.up */
+ { NULL,
+ { PMDA_PMID(CLUSTER_NET_DEV,25), PM_TYPE_U32, NET_DEV_INDOM, PM_SEM_INSTANT,
+ PMDA_PMUNITS(0,0,0,0,0,0) }, },
+
+/* network.interface.running */
+ { NULL,
+ { PMDA_PMID(CLUSTER_NET_DEV,26), PM_TYPE_U32, NET_DEV_INDOM, PM_SEM_INSTANT,
+ PMDA_PMUNITS(0,0,0,0,0,0) }, },
+
+/* hinv.ninterface */
+ { NULL,
+ { PMDA_PMID(CLUSTER_NET_DEV,27), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_DISCRETE,
+ PMDA_PMUNITS(0,0,0,0,0,0) }, },
+
+/* network.interface.inet_addr */
+ { NULL,
+ { PMDA_PMID(CLUSTER_NET_ADDR,0), PM_TYPE_STRING, NET_ADDR_INDOM, PM_SEM_INSTANT,
+ PMDA_PMUNITS(0,0,0,0,0,0) }, },
+
+/* network.interface.ipv6_addr */
+ { NULL,
+ { PMDA_PMID(CLUSTER_NET_ADDR,1), PM_TYPE_STRING, NET_ADDR_INDOM, PM_SEM_INSTANT,
+ PMDA_PMUNITS(0,0,0,0,0,0) }, },
+
+/* network.interface.ipv6_scope */
+ { NULL,
+ { PMDA_PMID(CLUSTER_NET_ADDR,2), PM_TYPE_STRING, NET_ADDR_INDOM, PM_SEM_INSTANT,
+ PMDA_PMUNITS(0,0,0,0,0,0) }, },
+
+/* network.interface.hw_addr */
+ { NULL,
+ { PMDA_PMID(CLUSTER_NET_ADDR,3), PM_TYPE_STRING, NET_ADDR_INDOM, PM_SEM_INSTANT,
+ PMDA_PMUNITS(0,0,0,0,0,0) }, },
+
+/*
+ * filesys cluster
+ */
+
+/* hinv.nmounts */
+ { NULL,
+ { PMDA_PMID(CLUSTER_FILESYS,0), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_DISCRETE,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* filesys.capacity */
+ { NULL,
+ { PMDA_PMID(CLUSTER_FILESYS,1), PM_TYPE_U64, FILESYS_INDOM, PM_SEM_DISCRETE,
+ PMDA_PMUNITS(1,0,0,PM_SPACE_KBYTE,0,0) } },
+
+/* filesys.used */
+ { NULL,
+ { PMDA_PMID(CLUSTER_FILESYS,2), PM_TYPE_U64, FILESYS_INDOM, PM_SEM_INSTANT,
+ PMDA_PMUNITS(1,0,0,PM_SPACE_KBYTE,0,0) } },
+
+/* filesys.free */
+ { NULL,
+ { PMDA_PMID(CLUSTER_FILESYS,3), PM_TYPE_U64, FILESYS_INDOM, PM_SEM_INSTANT,
+ PMDA_PMUNITS(1,0,0,PM_SPACE_KBYTE,0,0) } },
+
+/* filesys.maxfiles */
+ { NULL,
+ { PMDA_PMID(CLUSTER_FILESYS,4), PM_TYPE_U32, FILESYS_INDOM, PM_SEM_DISCRETE,
+ PMDA_PMUNITS(0,0,0,0,0,0) } },
+
+/* filesys.usedfiles */
+ { NULL,
+ { PMDA_PMID(CLUSTER_FILESYS,5), PM_TYPE_U32, FILESYS_INDOM, PM_SEM_INSTANT,
+ PMDA_PMUNITS(0,0,0,0,0,0) } },
+
+/* filesys.freefiles */
+ { NULL,
+ { PMDA_PMID(CLUSTER_FILESYS,6), PM_TYPE_U32, FILESYS_INDOM, PM_SEM_INSTANT,
+ PMDA_PMUNITS(0,0,0,0,0,0) } },
+
+/* filesys.mountdir */
+ { NULL,
+ { PMDA_PMID(CLUSTER_FILESYS,7), PM_TYPE_STRING, FILESYS_INDOM, PM_SEM_DISCRETE,
+ PMDA_PMUNITS(0,0,0,0,0,0) } },
+
+/* filesys.full */
+ { NULL,
+ { PMDA_PMID(CLUSTER_FILESYS,8), PM_TYPE_DOUBLE, FILESYS_INDOM, PM_SEM_INSTANT,
+ PMDA_PMUNITS(0,0,0,0,0,0) } },
+
+/* filesys.blocksize */
+ { NULL,
+ { PMDA_PMID(CLUSTER_FILESYS,9), PM_TYPE_U32, FILESYS_INDOM, PM_SEM_INSTANT,
+ PMDA_PMUNITS(1,0,0,PM_SPACE_BYTE,0,0) } },
+
+/* filesys.avail */
+ { NULL,
+ { PMDA_PMID(CLUSTER_FILESYS,10), PM_TYPE_U64, FILESYS_INDOM, PM_SEM_INSTANT,
+ PMDA_PMUNITS(1,0,0,PM_SPACE_KBYTE,0,0) } },
+
+/* filesys.readonly */
+ { NULL,
+ { PMDA_PMID(CLUSTER_FILESYS,11), PM_TYPE_U32, FILESYS_INDOM, PM_SEM_INSTANT,
+ PMDA_PMUNITS(0,0,0,0,0,0) } },
+
+/*
+ * tmpfs filesystem cluster
+ */
+
+/* tmpfs.capacity */
+ { NULL,
+ { PMDA_PMID(CLUSTER_TMPFS,1), PM_TYPE_U64, TMPFS_INDOM, PM_SEM_DISCRETE,
+ PMDA_PMUNITS(1,0,0,PM_SPACE_KBYTE,0,0) } },
+
+/* tmpfs.used */
+ { NULL,
+ { PMDA_PMID(CLUSTER_TMPFS,2), PM_TYPE_U64, TMPFS_INDOM, PM_SEM_INSTANT,
+ PMDA_PMUNITS(1,0,0,PM_SPACE_KBYTE,0,0) } },
+
+/* tmpfs.free */
+ { NULL,
+ { PMDA_PMID(CLUSTER_TMPFS,3), PM_TYPE_U64, TMPFS_INDOM, PM_SEM_INSTANT,
+ PMDA_PMUNITS(1,0,0,PM_SPACE_KBYTE,0,0) } },
+
+/* tmpfs.maxfiles */
+ { NULL,
+ { PMDA_PMID(CLUSTER_TMPFS,4), PM_TYPE_U32, TMPFS_INDOM, PM_SEM_DISCRETE,
+ PMDA_PMUNITS(0,0,0,0,0,0) } },
+
+/* tmpfs.usedfiles */
+ { NULL,
+ { PMDA_PMID(CLUSTER_TMPFS,5), PM_TYPE_U32, TMPFS_INDOM, PM_SEM_INSTANT,
+ PMDA_PMUNITS(0,0,0,0,0,0) } },
+
+/* tmpfs.freefiles */
+ { NULL,
+ { PMDA_PMID(CLUSTER_TMPFS,6), PM_TYPE_U32, TMPFS_INDOM, PM_SEM_INSTANT,
+ PMDA_PMUNITS(0,0,0,0,0,0) } },
+
+/* tmpfs.full */
+ { NULL,
+ { PMDA_PMID(CLUSTER_TMPFS,7), PM_TYPE_DOUBLE, TMPFS_INDOM, PM_SEM_INSTANT,
+ PMDA_PMUNITS(0,0,0,0,0,0) } },
+
+/*
+ * swapdev cluster
+ */
+
+/* swapdev.free */
+ { NULL,
+ { PMDA_PMID(CLUSTER_SWAPDEV,0), PM_TYPE_U32, SWAPDEV_INDOM, PM_SEM_INSTANT,
+ PMDA_PMUNITS(1,0,0,PM_SPACE_KBYTE,0,0) } },
+
+/* swapdev.length */
+ { NULL,
+ { PMDA_PMID(CLUSTER_SWAPDEV,1), PM_TYPE_U32, SWAPDEV_INDOM, PM_SEM_DISCRETE,
+ PMDA_PMUNITS(1,0,0,PM_SPACE_KBYTE,0,0) } },
+
+/* swapdev.maxswap */
+ { NULL,
+ { PMDA_PMID(CLUSTER_SWAPDEV,2), PM_TYPE_U32, SWAPDEV_INDOM, PM_SEM_DISCRETE,
+ PMDA_PMUNITS(1,0,0,PM_SPACE_KBYTE,0,0) } },
+
+/* swapdev.vlength */
+ { NULL,
+ { PMDA_PMID(CLUSTER_SWAPDEV,3), PM_TYPE_U32, SWAPDEV_INDOM, PM_SEM_DISCRETE,
+ PMDA_PMUNITS(1,0,0,PM_SPACE_KBYTE,0,0) } },
+
+/* swapdev.priority */
+ { NULL,
+ { PMDA_PMID(CLUSTER_SWAPDEV,4), PM_TYPE_32, SWAPDEV_INDOM, PM_SEM_INSTANT,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/*
+ * socket stat cluster
+ */
+
+/* network.sockstat.tcp.inuse */
+ { &proc_net_sockstat.tcp[_PM_SOCKSTAT_INUSE],
+ { PMDA_PMID(CLUSTER_NET_SOCKSTAT,0), PM_TYPE_32, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.sockstat.tcp.highest */
+ { &proc_net_sockstat.tcp[_PM_SOCKSTAT_HIGHEST],
+ { PMDA_PMID(CLUSTER_NET_SOCKSTAT,1), PM_TYPE_32, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.sockstat.tcp.util */
+ { &proc_net_sockstat.tcp[_PM_SOCKSTAT_UTIL],
+ { PMDA_PMID(CLUSTER_NET_SOCKSTAT,2), PM_TYPE_32, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(0,0,0,0,0,0) } },
+
+/* network.sockstat.udp.inuse */
+ { &proc_net_sockstat.udp[_PM_SOCKSTAT_INUSE],
+ { PMDA_PMID(CLUSTER_NET_SOCKSTAT,3), PM_TYPE_32, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.sockstat.udp.highest */
+ { &proc_net_sockstat.udp[_PM_SOCKSTAT_HIGHEST],
+ { PMDA_PMID(CLUSTER_NET_SOCKSTAT,4), PM_TYPE_32, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.sockstat.udp.util */
+ { &proc_net_sockstat.udp[_PM_SOCKSTAT_UTIL],
+ { PMDA_PMID(CLUSTER_NET_SOCKSTAT,5), PM_TYPE_32, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(0,0,0,0,0,0) } },
+
+/* network.sockstat.raw.inuse */
+ { &proc_net_sockstat.raw[_PM_SOCKSTAT_INUSE],
+ { PMDA_PMID(CLUSTER_NET_SOCKSTAT,6), PM_TYPE_32, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.sockstat.raw.highest */
+ { &proc_net_sockstat.raw[_PM_SOCKSTAT_HIGHEST],
+ { PMDA_PMID(CLUSTER_NET_SOCKSTAT,7), PM_TYPE_32, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.sockstat.raw.util */
+ { &proc_net_sockstat.raw[_PM_SOCKSTAT_UTIL],
+ { PMDA_PMID(CLUSTER_NET_SOCKSTAT,8), PM_TYPE_32, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(0,0,0,0,0,0) } },
+
+/*
+ * nfs cluster
+ */
+
+/* nfs.client.calls */
+ { NULL,
+ { PMDA_PMID(CLUSTER_NET_NFS,1), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* nfs.client.reqs */
+ { NULL,
+ { PMDA_PMID(CLUSTER_NET_NFS,4), PM_TYPE_U32, NFS_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* nfs.server.calls */
+ { NULL,
+ { PMDA_PMID(CLUSTER_NET_NFS,50), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* nfs.server.reqs */
+ { NULL,
+ { PMDA_PMID(CLUSTER_NET_NFS,12), PM_TYPE_U32, NFS_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* nfs3.client.calls */
+ { NULL,
+ { PMDA_PMID(CLUSTER_NET_NFS,60), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* nfs3.client.reqs */
+ { NULL,
+ { PMDA_PMID(CLUSTER_NET_NFS,61), PM_TYPE_U32, NFS3_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* nfs3.server.calls */
+ { NULL,
+ { PMDA_PMID(CLUSTER_NET_NFS,62), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* nfs3.server.reqs */
+ { NULL,
+ { PMDA_PMID(CLUSTER_NET_NFS,63), PM_TYPE_U32, NFS3_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* nfs4.client.calls */
+ { NULL,
+ { PMDA_PMID(CLUSTER_NET_NFS,64), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* nfs4.client.reqs */
+ { NULL,
+ { PMDA_PMID(CLUSTER_NET_NFS,65), PM_TYPE_U32, NFS4_CLI_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* nfs4.server.calls */
+ { NULL,
+ { PMDA_PMID(CLUSTER_NET_NFS,66), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* nfs4.server.reqs */
+ { NULL,
+ { PMDA_PMID(CLUSTER_NET_NFS,67), PM_TYPE_U32, NFS4_SVR_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* rpc.client.rpccnt */
+ { &proc_net_rpc.client.rpccnt,
+ { PMDA_PMID(CLUSTER_NET_NFS,20), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* rpc.client.rpcretrans */
+ { &proc_net_rpc.client.rpcretrans,
+ { PMDA_PMID(CLUSTER_NET_NFS,21), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* rpc.client.rpcauthrefresh */
+ { &proc_net_rpc.client.rpcauthrefresh,
+ { PMDA_PMID(CLUSTER_NET_NFS,22), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* rpc.client.netcnt */
+ { &proc_net_rpc.client.netcnt,
+ { PMDA_PMID(CLUSTER_NET_NFS,24), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* rpc.client.netudpcnt */
+ { &proc_net_rpc.client.netudpcnt,
+ { PMDA_PMID(CLUSTER_NET_NFS,25), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* rpc.client.nettcpcnt */
+ { &proc_net_rpc.client.nettcpcnt,
+ { PMDA_PMID(CLUSTER_NET_NFS,26), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* rpc.client.nettcpconn */
+ { &proc_net_rpc.client.nettcpconn,
+ { PMDA_PMID(CLUSTER_NET_NFS,27), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* rpc.server.rpccnt */
+ { &proc_net_rpc.server.rpccnt,
+ { PMDA_PMID(CLUSTER_NET_NFS,30), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* rpc.server.rpcerr */
+ { &proc_net_rpc.server.rpcerr,
+ { PMDA_PMID(CLUSTER_NET_NFS,31), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* rpc.server.rpcbadfmt */
+ { &proc_net_rpc.server.rpcbadfmt,
+ { PMDA_PMID(CLUSTER_NET_NFS,32), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* rpc.server.rpcbadauth */
+ { &proc_net_rpc.server.rpcbadauth,
+ { PMDA_PMID(CLUSTER_NET_NFS,33), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* rpc.server.rpcbadclnt */
+ { &proc_net_rpc.server.rpcbadclnt,
+ { PMDA_PMID(CLUSTER_NET_NFS,34), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* rpc.server.rchits */
+ { &proc_net_rpc.server.rchits,
+ { PMDA_PMID(CLUSTER_NET_NFS,35), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* rpc.server.rcmisses */
+ { &proc_net_rpc.server.rcmisses,
+ { PMDA_PMID(CLUSTER_NET_NFS,36), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* rpc.server.rcnocache */
+ { &proc_net_rpc.server.rcnocache,
+ { PMDA_PMID(CLUSTER_NET_NFS,37), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* rpc.server.fh_cached */
+ { &proc_net_rpc.server.fh_cached,
+ { PMDA_PMID(CLUSTER_NET_NFS,38), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* rpc.server.fh_valid */
+ { &proc_net_rpc.server.fh_valid,
+ { PMDA_PMID(CLUSTER_NET_NFS,39), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* rpc.server.fh_fixup */
+ { &proc_net_rpc.server.fh_fixup,
+ { PMDA_PMID(CLUSTER_NET_NFS,40), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* rpc.server.fh_lookup */
+ { &proc_net_rpc.server.fh_lookup,
+ { PMDA_PMID(CLUSTER_NET_NFS,41), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* rpc.server.fh_stale */
+ { &proc_net_rpc.server.fh_stale,
+ { PMDA_PMID(CLUSTER_NET_NFS,42), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* rpc.server.fh_concurrent */
+ { &proc_net_rpc.server.fh_concurrent,
+ { PMDA_PMID(CLUSTER_NET_NFS,43), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* rpc.server.netcnt */
+ { &proc_net_rpc.server.netcnt,
+ { PMDA_PMID(CLUSTER_NET_NFS,44), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* rpc.server.netudpcnt */
+ { &proc_net_rpc.server.netudpcnt,
+ { PMDA_PMID(CLUSTER_NET_NFS,45), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* rpc.server.nettcpcnt */
+ { &proc_net_rpc.server.nettcpcnt,
+ { PMDA_PMID(CLUSTER_NET_NFS,46), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* rpc.server.nettcpconn */
+ { &proc_net_rpc.server.nettcpcnt,
+ { PMDA_PMID(CLUSTER_NET_NFS,47), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* rpc.server.fh_anon */
+ { &proc_net_rpc.server.fh_anon,
+ { PMDA_PMID(CLUSTER_NET_NFS,51), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* rpc.server.fh_nocache_dir */
+ { &proc_net_rpc.server.fh_nocache_dir,
+ { PMDA_PMID(CLUSTER_NET_NFS,52), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* rpc.server.fh_nocache_nondir */
+ { &proc_net_rpc.server.fh_nocache_nondir,
+ { PMDA_PMID(CLUSTER_NET_NFS,53), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* rpc.server.io_read */
+ { &proc_net_rpc.server.io_read,
+ { PMDA_PMID(CLUSTER_NET_NFS,54), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(1,0,0,PM_SPACE_BYTE,0,0) } },
+
+/* rpc.server.io_write */
+ { &proc_net_rpc.server.io_write,
+ { PMDA_PMID(CLUSTER_NET_NFS,55), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(1,0,0,PM_SPACE_BYTE,0,0) } },
+
+/* rpc.server.th_cnt */
+ { &proc_net_rpc.server.th_cnt,
+ { PMDA_PMID(CLUSTER_NET_NFS,56), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* rpc.server.th_fullcnt */
+ { &proc_net_rpc.server.th_fullcnt,
+ { PMDA_PMID(CLUSTER_NET_NFS,57), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/*
+ * /proc/partitions cluster
+ */
+
+/* disk.partitions.read */
+ { NULL,
+ { PMDA_PMID(CLUSTER_PARTITIONS,0), PM_TYPE_U32, PARTITIONS_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+/* disk.partitions.write */
+ { NULL,
+ { PMDA_PMID(CLUSTER_PARTITIONS,1), PM_TYPE_U32, PARTITIONS_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+/* disk.partitions.total */
+ { NULL,
+ { PMDA_PMID(CLUSTER_PARTITIONS,2), PM_TYPE_U32, PARTITIONS_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+/* disk.partitions.blkread */
+ { NULL,
+ { PMDA_PMID(CLUSTER_PARTITIONS,3), PM_TYPE_U32, PARTITIONS_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+/* disk.partitions.blkwrite */
+ { NULL,
+ { PMDA_PMID(CLUSTER_PARTITIONS,4), PM_TYPE_U32, PARTITIONS_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+/* disk.partitions.blktotal */
+ { NULL,
+ { PMDA_PMID(CLUSTER_PARTITIONS,5), PM_TYPE_U32, PARTITIONS_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+/* disk.partitions.read_bytes */
+ { NULL,
+ { PMDA_PMID(CLUSTER_PARTITIONS,6), PM_TYPE_U32, PARTITIONS_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(1,0,0,PM_SPACE_KBYTE,0,0) }, },
+
+/* disk.partitions.write_bytes */
+ { NULL,
+ { PMDA_PMID(CLUSTER_PARTITIONS,7), PM_TYPE_U32, PARTITIONS_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(1,0,0,PM_SPACE_KBYTE,0,0) }, },
+
+/* disk.partitions.total_bytes */
+ { NULL,
+ { PMDA_PMID(CLUSTER_PARTITIONS,8), PM_TYPE_U32, PARTITIONS_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(1,0,0,PM_SPACE_KBYTE,0,0) }, },
+
+
+/* disk.dev.read_bytes */
+ { NULL,
+ { PMDA_PMID(CLUSTER_STAT,38), PM_TYPE_U32, DISK_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(1,0,0,PM_SPACE_KBYTE,0,0) }, },
+
+/* disk.dev.write_bytes */
+ { NULL,
+ { PMDA_PMID(CLUSTER_STAT,39), PM_TYPE_U32, DISK_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(1,0,0,PM_SPACE_KBYTE,0,0) }, },
+
+/* disk.dev.total_bytes */
+ { NULL,
+ { PMDA_PMID(CLUSTER_STAT,40), PM_TYPE_U32, DISK_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(1,0,0,PM_SPACE_KBYTE,0,0) }, },
+
+/* disk.all.read_bytes */
+ { NULL,
+ { PMDA_PMID(CLUSTER_STAT,41), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(1,0,0,PM_SPACE_KBYTE,0,0) }, },
+
+/* disk.all.write_bytes */
+ { NULL,
+ { PMDA_PMID(CLUSTER_STAT,42), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(1,0,0,PM_SPACE_KBYTE,0,0) }, },
+
+/* disk.all.total_bytes */
+ { NULL,
+ { PMDA_PMID(CLUSTER_STAT,43), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(1,0,0,PM_SPACE_KBYTE,0,0) }, },
+
+/*
+ * kernel_uname cluster
+ */
+
+/* kernel.uname.release */
+ { kernel_uname.release,
+ { PMDA_PMID(CLUSTER_KERNEL_UNAME, 0), PM_TYPE_STRING, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(0,0,0,0,0,0) } },
+
+/* kernel.uname.version */
+ { kernel_uname.version,
+ { PMDA_PMID(CLUSTER_KERNEL_UNAME, 1), PM_TYPE_STRING, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(0,0,0,0,0,0) } },
+
+/* kernel.uname.sysname */
+ { kernel_uname.sysname,
+ { PMDA_PMID(CLUSTER_KERNEL_UNAME, 2), PM_TYPE_STRING, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(0,0,0,0,0,0) } },
+
+/* kernel.uname.machine */
+ { kernel_uname.machine,
+ { PMDA_PMID(CLUSTER_KERNEL_UNAME, 3), PM_TYPE_STRING, PM_INDOM_NULL, PM_SEM_DISCRETE,
+ PMDA_PMUNITS(0,0,0,0,0,0) } },
+
+/* kernel.uname.nodename */
+ { kernel_uname.nodename,
+ { PMDA_PMID(CLUSTER_KERNEL_UNAME, 4), PM_TYPE_STRING, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(0,0,0,0,0,0) } },
+
+/* pmda.uname */
+ { NULL,
+ { PMDA_PMID(CLUSTER_KERNEL_UNAME, 5), PM_TYPE_STRING, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(0,0,0,0,0,0) } },
+
+/* pmda.version */
+ { NULL,
+ { PMDA_PMID(CLUSTER_KERNEL_UNAME, 6), PM_TYPE_STRING, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(0,0,0,0,0,0) } },
+
+/* kernel.uname.distro */
+ { NULL,
+ { PMDA_PMID(CLUSTER_KERNEL_UNAME, 7), PM_TYPE_STRING, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(0,0,0,0,0,0) } },
+
+/*
+ * network snmp cluster
+ */
+
+/* network.ip.forwarding */
+ { &_pm_proc_net_snmp.ip[_PM_SNMP_IP_FORWARDING],
+ { PMDA_PMID(CLUSTER_NET_SNMP,0), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.ip.defaultttl */
+ { &_pm_proc_net_snmp.ip[_PM_SNMP_IP_DEFAULTTTL],
+ { PMDA_PMID(CLUSTER_NET_SNMP,1), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.ip.inreceives */
+ { &_pm_proc_net_snmp.ip[_PM_SNMP_IP_INRECEIVES],
+ { PMDA_PMID(CLUSTER_NET_SNMP,2), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.ip.inhdrerrors */
+ { &_pm_proc_net_snmp.ip[_PM_SNMP_IP_INHDRERRORS],
+ { PMDA_PMID(CLUSTER_NET_SNMP,3), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.ip.inaddrerrors */
+ { &_pm_proc_net_snmp.ip[_PM_SNMP_IP_INADDRERRORS],
+ { PMDA_PMID(CLUSTER_NET_SNMP,4), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.ip.forwdatagrams */
+ { &_pm_proc_net_snmp.ip[_PM_SNMP_IP_FORWDATAGRAMS],
+ { PMDA_PMID(CLUSTER_NET_SNMP,5), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.ip.inunknownprotos */
+ { &_pm_proc_net_snmp.ip[_PM_SNMP_IP_INUNKNOWNPROTOS],
+ { PMDA_PMID(CLUSTER_NET_SNMP,6), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.ip.indiscards */
+ { &_pm_proc_net_snmp.ip[_PM_SNMP_IP_INDISCARDS],
+ { PMDA_PMID(CLUSTER_NET_SNMP,7), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.ip.indelivers */
+ { &_pm_proc_net_snmp.ip[_PM_SNMP_IP_INDELIVERS],
+ { PMDA_PMID(CLUSTER_NET_SNMP,8), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.ip.outrequests */
+ { &_pm_proc_net_snmp.ip[_PM_SNMP_IP_OUTREQUESTS],
+ { PMDA_PMID(CLUSTER_NET_SNMP,9), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.ip.outdiscards */
+ { &_pm_proc_net_snmp.ip[_PM_SNMP_IP_OUTDISCARDS],
+ { PMDA_PMID(CLUSTER_NET_SNMP,10), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.ip.outnoroutes */
+ { &_pm_proc_net_snmp.ip[_PM_SNMP_IP_OUTNOROUTES],
+ { PMDA_PMID(CLUSTER_NET_SNMP,11), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.ip.reasmtimeout */
+ { &_pm_proc_net_snmp.ip[_PM_SNMP_IP_REASMTIMEOUT],
+ { PMDA_PMID(CLUSTER_NET_SNMP,12), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.ip.reasmreqds */
+ { &_pm_proc_net_snmp.ip[_PM_SNMP_IP_REASMREQDS],
+ { PMDA_PMID(CLUSTER_NET_SNMP,13), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.ip.reasmoks */
+ { &_pm_proc_net_snmp.ip[_PM_SNMP_IP_REASMOKS],
+ { PMDA_PMID(CLUSTER_NET_SNMP,14), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.ip.reasmfails */
+ { &_pm_proc_net_snmp.ip[_PM_SNMP_IP_REASMFAILS],
+ { PMDA_PMID(CLUSTER_NET_SNMP,15), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.ip.fragoks */
+ { &_pm_proc_net_snmp.ip[_PM_SNMP_IP_FRAGOKS],
+ { PMDA_PMID(CLUSTER_NET_SNMP,16), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.ip.fragfails */
+ { &_pm_proc_net_snmp.ip[_PM_SNMP_IP_FRAGFAILS],
+ { PMDA_PMID(CLUSTER_NET_SNMP,17), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.ip.fragcreates */
+ { &_pm_proc_net_snmp.ip[_PM_SNMP_IP_FRAGCREATES],
+ { PMDA_PMID(CLUSTER_NET_SNMP,18), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+
+/* network.icmp.inmsgs */
+ { &_pm_proc_net_snmp.icmp[_PM_SNMP_ICMP_INMSGS],
+ { PMDA_PMID(CLUSTER_NET_SNMP,20), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.icmp.inerrors */
+ { &_pm_proc_net_snmp.icmp[_PM_SNMP_ICMP_INERRORS],
+ { PMDA_PMID(CLUSTER_NET_SNMP,21), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.icmp.indestunreachs */
+ { &_pm_proc_net_snmp.icmp[_PM_SNMP_ICMP_INDESTUNREACHS],
+ { PMDA_PMID(CLUSTER_NET_SNMP,22), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.icmp.intimeexcds */
+ { &_pm_proc_net_snmp.icmp[_PM_SNMP_ICMP_INTIMEEXCDS],
+ { PMDA_PMID(CLUSTER_NET_SNMP,23), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.icmp.inparmprobs */
+ { &_pm_proc_net_snmp.icmp[_PM_SNMP_ICMP_INPARMPROBS],
+ { PMDA_PMID(CLUSTER_NET_SNMP,24), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.icmp.insrcquenchs */
+ { &_pm_proc_net_snmp.icmp[_PM_SNMP_ICMP_INSRCQUENCHS],
+ { PMDA_PMID(CLUSTER_NET_SNMP,25), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.icmp.inredirects */
+ { &_pm_proc_net_snmp.icmp[_PM_SNMP_ICMP_INREDIRECTS],
+ { PMDA_PMID(CLUSTER_NET_SNMP,26), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.icmp.inechos */
+ { &_pm_proc_net_snmp.icmp[_PM_SNMP_ICMP_INECHOS],
+ { PMDA_PMID(CLUSTER_NET_SNMP,27), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.icmp.inechoreps */
+ { &_pm_proc_net_snmp.icmp[_PM_SNMP_ICMP_INECHOREPS],
+ { PMDA_PMID(CLUSTER_NET_SNMP,28), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.icmp.intimestamps */
+ { &_pm_proc_net_snmp.icmp[_PM_SNMP_ICMP_INTIMESTAMPS],
+ { PMDA_PMID(CLUSTER_NET_SNMP,29), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.icmp.intimestampreps */
+ { &_pm_proc_net_snmp.icmp[_PM_SNMP_ICMP_INTIMESTAMPREPS],
+ { PMDA_PMID(CLUSTER_NET_SNMP,30), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.icmp.inaddrmasks */
+ { &_pm_proc_net_snmp.icmp[_PM_SNMP_ICMP_INADDRMASKS],
+ { PMDA_PMID(CLUSTER_NET_SNMP,31), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.icmp.inaddrmaskreps */
+ { &_pm_proc_net_snmp.icmp[_PM_SNMP_ICMP_INADDRMASKREPS],
+ { PMDA_PMID(CLUSTER_NET_SNMP,32), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.icmp.outmsgs */
+ { &_pm_proc_net_snmp.icmp[_PM_SNMP_ICMP_OUTMSGS],
+ { PMDA_PMID(CLUSTER_NET_SNMP,33), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.icmp.outerrors */
+ { &_pm_proc_net_snmp.icmp[_PM_SNMP_ICMP_OUTERRORS],
+ { PMDA_PMID(CLUSTER_NET_SNMP,34), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.icmp.outdestunreachs */
+ { &_pm_proc_net_snmp.icmp[_PM_SNMP_ICMP_OUTDESTUNREACHS],
+ { PMDA_PMID(CLUSTER_NET_SNMP,35), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.icmp.outtimeexcds */
+ { &_pm_proc_net_snmp.icmp[_PM_SNMP_ICMP_OUTTIMEEXCDS],
+ { PMDA_PMID(CLUSTER_NET_SNMP,36), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.icmp.outparmprobs */
+ { &_pm_proc_net_snmp.icmp[_PM_SNMP_ICMP_OUTPARMPROBS],
+ { PMDA_PMID(CLUSTER_NET_SNMP,37), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.icmp.outsrcquenchs */
+ { &_pm_proc_net_snmp.icmp[_PM_SNMP_ICMP_OUTSRCQUENCHS],
+ { PMDA_PMID(CLUSTER_NET_SNMP,38), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.icmp.outredirects */
+ { &_pm_proc_net_snmp.icmp[_PM_SNMP_ICMP_OUTREDIRECTS],
+ { PMDA_PMID(CLUSTER_NET_SNMP,39), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.icmp.outechos */
+ { &_pm_proc_net_snmp.icmp[_PM_SNMP_ICMP_OUTECHOS],
+ { PMDA_PMID(CLUSTER_NET_SNMP,40), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.icmp.outechoreps */
+ { &_pm_proc_net_snmp.icmp[_PM_SNMP_ICMP_OUTECHOREPS],
+ { PMDA_PMID(CLUSTER_NET_SNMP,41), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.icmp.outtimestamps */
+ { &_pm_proc_net_snmp.icmp[_PM_SNMP_ICMP_OUTTIMESTAMPS],
+ { PMDA_PMID(CLUSTER_NET_SNMP,42), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.icmp.outtimestampreps */
+ { &_pm_proc_net_snmp.icmp[_PM_SNMP_ICMP_OUTTIMESTAMPREPS],
+ { PMDA_PMID(CLUSTER_NET_SNMP,43), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.icmp.outaddrmasks */
+ { &_pm_proc_net_snmp.icmp[_PM_SNMP_ICMP_OUTADDRMASKS],
+ { PMDA_PMID(CLUSTER_NET_SNMP,44), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.icmp.outaddrmaskreps */
+ { &_pm_proc_net_snmp.icmp[_PM_SNMP_ICMP_OUTADDRMASKREPS],
+ { PMDA_PMID(CLUSTER_NET_SNMP,45), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.icmp.incsumerrors */
+ { &_pm_proc_net_snmp.icmp[_PM_SNMP_ICMP_INCSUMERRORS],
+ { PMDA_PMID(CLUSTER_NET_SNMP,46), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+
+/* network.tcp.rtoalgorithm */
+ { &_pm_proc_net_snmp.tcp[_PM_SNMP_TCP_RTOALGORITHM],
+ { PMDA_PMID(CLUSTER_NET_SNMP,50), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcp.rtomin */
+ { &_pm_proc_net_snmp.tcp[_PM_SNMP_TCP_RTOMIN],
+ { PMDA_PMID(CLUSTER_NET_SNMP,51), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcp.rtomax */
+ { &_pm_proc_net_snmp.tcp[_PM_SNMP_TCP_RTOMAX],
+ { PMDA_PMID(CLUSTER_NET_SNMP,52), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcp.maxconn */
+ { &_pm_proc_net_snmp.tcp[_PM_SNMP_TCP_MAXCONN],
+ { PMDA_PMID(CLUSTER_NET_SNMP,53), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcpconn.established */
+ { &proc_net_tcp.stat[_PM_TCP_ESTABLISHED],
+ { PMDA_PMID(CLUSTER_NET_TCP, 1), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcpconn.syn_sent */
+ { &proc_net_tcp.stat[_PM_TCP_SYN_SENT],
+ { PMDA_PMID(CLUSTER_NET_TCP, 2), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcpconn.syn_recv */
+ { &proc_net_tcp.stat[_PM_TCP_SYN_RECV],
+ { PMDA_PMID(CLUSTER_NET_TCP, 3), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcpconn.fin_wait1 */
+ { &proc_net_tcp.stat[_PM_TCP_FIN_WAIT1],
+ { PMDA_PMID(CLUSTER_NET_TCP, 4), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcpconn.fin_wait2 */
+ { &proc_net_tcp.stat[_PM_TCP_FIN_WAIT2],
+ { PMDA_PMID(CLUSTER_NET_TCP, 5), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcpconn.time_wait */
+ { &proc_net_tcp.stat[_PM_TCP_TIME_WAIT],
+ { PMDA_PMID(CLUSTER_NET_TCP, 6), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcpconn.close */
+ { &proc_net_tcp.stat[_PM_TCP_CLOSE],
+ { PMDA_PMID(CLUSTER_NET_TCP, 7), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcpconn.close_wait */
+ { &proc_net_tcp.stat[_PM_TCP_CLOSE_WAIT],
+ { PMDA_PMID(CLUSTER_NET_TCP, 8), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcpconn.last_ack */
+ { &proc_net_tcp.stat[_PM_TCP_LAST_ACK],
+ { PMDA_PMID(CLUSTER_NET_TCP, 9), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcpconn.listen */
+ { &proc_net_tcp.stat[_PM_TCP_LISTEN],
+ { PMDA_PMID(CLUSTER_NET_TCP, 10), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcpconn.closing */
+ { &proc_net_tcp.stat[_PM_TCP_CLOSING],
+ { PMDA_PMID(CLUSTER_NET_TCP, 11), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcp.activeopens */
+ { &_pm_proc_net_snmp.tcp[_PM_SNMP_TCP_ACTIVEOPENS],
+ { PMDA_PMID(CLUSTER_NET_SNMP,54), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcp.passiveopens */
+ { &_pm_proc_net_snmp.tcp[_PM_SNMP_TCP_PASSIVEOPENS],
+ { PMDA_PMID(CLUSTER_NET_SNMP,55), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcp.attemptfails */
+ { &_pm_proc_net_snmp.tcp[_PM_SNMP_TCP_ATTEMPTFAILS],
+ { PMDA_PMID(CLUSTER_NET_SNMP,56), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcp.estabresets */
+ { &_pm_proc_net_snmp.tcp[_PM_SNMP_TCP_ESTABRESETS],
+ { PMDA_PMID(CLUSTER_NET_SNMP,57), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcp.currestab */
+ { &_pm_proc_net_snmp.tcp[_PM_SNMP_TCP_CURRESTAB],
+ { PMDA_PMID(CLUSTER_NET_SNMP,58), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcp.insegs */
+ { &_pm_proc_net_snmp.tcp[_PM_SNMP_TCP_INSEGS],
+ { PMDA_PMID(CLUSTER_NET_SNMP,59), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcp.outsegs */
+ { &_pm_proc_net_snmp.tcp[_PM_SNMP_TCP_OUTSEGS],
+ { PMDA_PMID(CLUSTER_NET_SNMP,60), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcp.retranssegs */
+ { &_pm_proc_net_snmp.tcp[_PM_SNMP_TCP_RETRANSSEGS],
+ { PMDA_PMID(CLUSTER_NET_SNMP,61), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcp.inerrs */
+ { &_pm_proc_net_snmp.tcp[_PM_SNMP_TCP_INERRS],
+ { PMDA_PMID(CLUSTER_NET_SNMP,62), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcp.outrsts */
+ { &_pm_proc_net_snmp.tcp[_PM_SNMP_TCP_OUTRSTS],
+ { PMDA_PMID(CLUSTER_NET_SNMP,63), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcp.incsumerrors */
+ { &_pm_proc_net_snmp.tcp[_PM_SNMP_TCP_INCSUMERRORS],
+ { PMDA_PMID(CLUSTER_NET_SNMP,64), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.udp.indatagrams */
+ { &_pm_proc_net_snmp.udp[_PM_SNMP_UDP_INDATAGRAMS],
+ { PMDA_PMID(CLUSTER_NET_SNMP,70), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.udp.noports */
+ { &_pm_proc_net_snmp.udp[_PM_SNMP_UDP_NOPORTS],
+ { PMDA_PMID(CLUSTER_NET_SNMP,71), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.udp.inerrors */
+ { &_pm_proc_net_snmp.udp[_PM_SNMP_UDP_INERRORS],
+ { PMDA_PMID(CLUSTER_NET_SNMP,72), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.udp.outdatagrams */
+ { &_pm_proc_net_snmp.udp[_PM_SNMP_UDP_OUTDATAGRAMS],
+ { PMDA_PMID(CLUSTER_NET_SNMP,74), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.udp.recvbuferrors */
+ { &_pm_proc_net_snmp.udp[_PM_SNMP_UDP_RECVBUFERRORS],
+ { PMDA_PMID(CLUSTER_NET_SNMP,75), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.udp.sndbuferrors */
+ { &_pm_proc_net_snmp.udp[_PM_SNMP_UDP_SNDBUFERRORS],
+ { PMDA_PMID(CLUSTER_NET_SNMP,76), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.udp.incsumerrors */
+ { &_pm_proc_net_snmp.udp[_PM_SNMP_UDP_INCSUMERRORS],
+ { PMDA_PMID(CLUSTER_NET_SNMP,83), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.udplite.indatagrams */
+ { &_pm_proc_net_snmp.udplite[_PM_SNMP_UDPLITE_INDATAGRAMS],
+ { PMDA_PMID(CLUSTER_NET_SNMP,77), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.udplite.noports */
+ { &_pm_proc_net_snmp.udplite[_PM_SNMP_UDPLITE_NOPORTS],
+ { PMDA_PMID(CLUSTER_NET_SNMP,78), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.udplite.inerrors */
+ { &_pm_proc_net_snmp.udplite[_PM_SNMP_UDPLITE_INERRORS],
+ { PMDA_PMID(CLUSTER_NET_SNMP,79), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.udplite.outdatagrams */
+ { &_pm_proc_net_snmp.udplite[_PM_SNMP_UDPLITE_OUTDATAGRAMS],
+ { PMDA_PMID(CLUSTER_NET_SNMP,80), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.udplite.recvbuferrors */
+ { &_pm_proc_net_snmp.udplite[_PM_SNMP_UDPLITE_RECVBUFERRORS],
+ { PMDA_PMID(CLUSTER_NET_SNMP,81), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.udplite.sndbuferrors */
+ { &_pm_proc_net_snmp.udplite[_PM_SNMP_UDPLITE_SNDBUFERRORS],
+ { PMDA_PMID(CLUSTER_NET_SNMP,82), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.udplite.incsumerrors */
+ { &_pm_proc_net_snmp.udplite[_PM_SNMP_UDPLITE_INCSUMERRORS],
+ { PMDA_PMID(CLUSTER_NET_SNMP,84), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.icmpmsg.intype */
+ { &_pm_proc_net_snmp.icmpmsg[_PM_SNMP_ICMPMSG_INTYPE],
+ { PMDA_PMID(CLUSTER_NET_SNMP,88), PM_TYPE_U64, ICMPMSG_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.icmpmsg.outtype */
+ { &_pm_proc_net_snmp.icmpmsg[_PM_SNMP_ICMPMSG_OUTTYPE],
+ { PMDA_PMID(CLUSTER_NET_SNMP,89), PM_TYPE_U64, ICMPMSG_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/*
+ * network netstat cluster
+ */
+
+/* network.ip.innoroutes */
+ { &_pm_proc_net_netstat.ip[_PM_NETSTAT_IPEXT_INNOROUTES],
+ { PMDA_PMID(CLUSTER_NET_NETSTAT,0), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.ip.intruncatedpkts */
+ { &_pm_proc_net_netstat.ip[_PM_NETSTAT_IPEXT_INTRUNCATEDPKTS],
+ { PMDA_PMID(CLUSTER_NET_NETSTAT,1), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.ip.inmcastpkts */
+ { &_pm_proc_net_netstat.ip[_PM_NETSTAT_IPEXT_INMCASTPKTS],
+ { PMDA_PMID(CLUSTER_NET_NETSTAT,2), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.ip.outmcastpkts */
+ { &_pm_proc_net_netstat.ip[_PM_NETSTAT_IPEXT_OUTMCASTPKTS],
+ { PMDA_PMID(CLUSTER_NET_NETSTAT,3), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.ip.inbcastpkts */
+ { &_pm_proc_net_netstat.ip[_PM_NETSTAT_IPEXT_INBCASTPKTS],
+ { PMDA_PMID(CLUSTER_NET_NETSTAT,4), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.ip.outbcastpkts */
+ { &_pm_proc_net_netstat.ip[_PM_NETSTAT_IPEXT_OUTBCASTPKTS],
+ { PMDA_PMID(CLUSTER_NET_NETSTAT,5), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.ip.inoctets */
+ { &_pm_proc_net_netstat.ip[_PM_NETSTAT_IPEXT_INOCTETS],
+ { PMDA_PMID(CLUSTER_NET_NETSTAT,6), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.ip.outoctets */
+ { &_pm_proc_net_netstat.ip[_PM_NETSTAT_IPEXT_OUTOCTETS],
+ { PMDA_PMID(CLUSTER_NET_NETSTAT,7), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.ip.inmcastoctets */
+ { &_pm_proc_net_netstat.ip[_PM_NETSTAT_IPEXT_INMCASTOCTETS],
+ { PMDA_PMID(CLUSTER_NET_NETSTAT,8), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.ip.outmcastoctets */
+ { &_pm_proc_net_netstat.ip[_PM_NETSTAT_IPEXT_OUTMCASTOCTETS],
+ { PMDA_PMID(CLUSTER_NET_NETSTAT,9), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.ip.inbcastoctets */
+ { &_pm_proc_net_netstat.ip[_PM_NETSTAT_IPEXT_INBCASTOCTETS],
+ { PMDA_PMID(CLUSTER_NET_NETSTAT,10), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.ip.outbcastoctets */
+ { &_pm_proc_net_netstat.ip[_PM_NETSTAT_IPEXT_OUTBCASTOCTETS],
+ { PMDA_PMID(CLUSTER_NET_NETSTAT,11), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.ip.csumerrors */
+ { &_pm_proc_net_netstat.ip[_PM_NETSTAT_IPEXT_CSUMERRORS],
+ { PMDA_PMID(CLUSTER_NET_NETSTAT,12), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.ip.noectpkts */
+ { &_pm_proc_net_netstat.ip[_PM_NETSTAT_IPEXT_NOECTPKTS],
+ { PMDA_PMID(CLUSTER_NET_NETSTAT,13), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.ip.ect1pkts */
+ { &_pm_proc_net_netstat.ip[_PM_NETSTAT_IPEXT_ECT1PKTS],
+ { PMDA_PMID(CLUSTER_NET_NETSTAT,14), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.ip.ect0pkts */
+ { &_pm_proc_net_netstat.ip[_PM_NETSTAT_IPEXT_ECT0PKTS],
+ { PMDA_PMID(CLUSTER_NET_NETSTAT,15), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.ip.cepkts */
+ { &_pm_proc_net_netstat.ip[_PM_NETSTAT_IPEXT_CEPKTS],
+ { PMDA_PMID(CLUSTER_NET_NETSTAT,16), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcp.syncookiessent */
+ { &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_SYNCOOKIESSENT],
+ { PMDA_PMID(CLUSTER_NET_NETSTAT,17), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcp.syncookiesrecv */
+ { &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_SYNCOOKIESRECV],
+ { PMDA_PMID(CLUSTER_NET_NETSTAT,18), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcp.syncookiesfailed */
+ { &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_SYNCOOKIESFAILED],
+ { PMDA_PMID(CLUSTER_NET_NETSTAT,19), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcp.embryonicrsts */
+ { &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_EMBRYONICRSTS],
+ { PMDA_PMID(CLUSTER_NET_NETSTAT,20), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcp.prunecalled */
+ { &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_PRUNECALLED],
+ { PMDA_PMID(CLUSTER_NET_NETSTAT,21), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcp.rcvpruned */
+ { &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_RCVPRUNED],
+ { PMDA_PMID(CLUSTER_NET_NETSTAT,22), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcp.ofopruned */
+ { &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_OFOPRUNED],
+ { PMDA_PMID(CLUSTER_NET_NETSTAT,23), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcp.outofwindowicmps */
+ { &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_OUTOFWINDOWICMPS],
+ { PMDA_PMID(CLUSTER_NET_NETSTAT,24), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcp.lockdroppedicmps */
+ { &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_LOCKDROPPEDICMPS],
+ { PMDA_PMID(CLUSTER_NET_NETSTAT,25), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcp.arpfilter */
+ { &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_ARPFILTER],
+ { PMDA_PMID(CLUSTER_NET_NETSTAT,26), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcp.timewaited */
+ { &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TIMEWAITED],
+ { PMDA_PMID(CLUSTER_NET_NETSTAT,27), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcp.timewaitrecycled */
+ { &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TIMEWAITRECYCLED],
+ { PMDA_PMID(CLUSTER_NET_NETSTAT,28), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcp.timewaitkilled */
+ { &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TIMEWAITKILLED],
+ { PMDA_PMID(CLUSTER_NET_NETSTAT,29), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcp.pawspassiverejected */
+ { &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_PAWSPASSIVEREJECTED],
+ { PMDA_PMID(CLUSTER_NET_NETSTAT,30), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcp.pawsactiverejected */
+ { &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_PAWSACTIVEREJECTED],
+ { PMDA_PMID(CLUSTER_NET_NETSTAT,31), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcp.pawsestabrejected */
+ { &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_PAWSESTABREJECTED],
+ { PMDA_PMID(CLUSTER_NET_NETSTAT,32), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcp.delayedacks */
+ { &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_DELAYEDACKS],
+ { PMDA_PMID(CLUSTER_NET_NETSTAT,33), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcp.delayedacklocked */
+ { &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_DELAYEDACKLOCKED],
+ { PMDA_PMID(CLUSTER_NET_NETSTAT,34), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcp.delayedacklost */
+ { &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_DELAYEDACKLOST],
+ { PMDA_PMID(CLUSTER_NET_NETSTAT,35), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcp.listenoverflows */
+ { &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_LISTENOVERFLOWS],
+ { PMDA_PMID(CLUSTER_NET_NETSTAT,36), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcp.listendrops */
+ { &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_LISTENDROPS],
+ { PMDA_PMID(CLUSTER_NET_NETSTAT,37), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcp.prequeued */
+ { &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPPREQUEUED],
+ { PMDA_PMID(CLUSTER_NET_NETSTAT,38), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcp.directcopyfrombacklog */
+ { &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPDIRECTCOPYFROMBACKLOG],
+ { PMDA_PMID(CLUSTER_NET_NETSTAT,39), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcp.directcopyfromprequeue */
+ { &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPDIRECTCOPYFROMPREQUEUE],
+ { PMDA_PMID(CLUSTER_NET_NETSTAT,40), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcp.prequeuedropped */
+ { &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPPREQUEUEDROPPED],
+ { PMDA_PMID(CLUSTER_NET_NETSTAT,41), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcp.hphits*/
+ { &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPHPHITS],
+ { PMDA_PMID(CLUSTER_NET_NETSTAT,42), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcp.hphitstouser */
+ { &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPHPHITSTOUSER],
+ { PMDA_PMID(CLUSTER_NET_NETSTAT,43), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcp.pureacks */
+ { &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPPUREACKS],
+ { PMDA_PMID(CLUSTER_NET_NETSTAT,44), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcp.hpacks */
+ { &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPHPACKS],
+ { PMDA_PMID(CLUSTER_NET_NETSTAT,45), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcp.renorecovery */
+ { &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPRENORECOVERY],
+ { PMDA_PMID(CLUSTER_NET_NETSTAT,46), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcp.sackrecovery */
+ { &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPSACKRECOVERY],
+ { PMDA_PMID(CLUSTER_NET_NETSTAT,47), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcp.sackreneging */
+ { &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPSACKRENEGING],
+ { PMDA_PMID(CLUSTER_NET_NETSTAT,48), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcp.fackreorder */
+ { &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPFACKREORDER],
+ { PMDA_PMID(CLUSTER_NET_NETSTAT,49), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcp.sackreorder */
+ { &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPSACKREORDER],
+ { PMDA_PMID(CLUSTER_NET_NETSTAT,50), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcp.renoreorder */
+ { &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPRENOREORDER],
+ { PMDA_PMID(CLUSTER_NET_NETSTAT,51), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcp.tsreorder */
+ { &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPTSREORDER],
+ { PMDA_PMID(CLUSTER_NET_NETSTAT,52), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcp.fullundo */
+ { &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPFULLUNDO],
+ { PMDA_PMID(CLUSTER_NET_NETSTAT,53), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcp.partialundo */
+ { &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPPARTIALUNDO],
+ { PMDA_PMID(CLUSTER_NET_NETSTAT,54), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcp.dsackundo */
+ { &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPDSACKUNDO],
+ { PMDA_PMID(CLUSTER_NET_NETSTAT,55), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcp.lossundo */
+ { &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPLOSSUNDO],
+ { PMDA_PMID(CLUSTER_NET_NETSTAT,56), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcp.lostretransmit */
+ { &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPLOSTRETRANSMIT],
+ { PMDA_PMID(CLUSTER_NET_NETSTAT,57), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcp.renofailures */
+ { &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPRENOFAILURES],
+ { PMDA_PMID(CLUSTER_NET_NETSTAT,58), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcp.sackfailures */
+ { &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPSACKFAILURES],
+ { PMDA_PMID(CLUSTER_NET_NETSTAT,59), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcp.lossfailures */
+ { &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPLOSSFAILURES],
+ { PMDA_PMID(CLUSTER_NET_NETSTAT,60), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcp.fastretrans */
+ { &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPFASTRETRANS],
+ { PMDA_PMID(CLUSTER_NET_NETSTAT,61), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcp.forwardretrans */
+ { &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPFORWARDRETRANS],
+ { PMDA_PMID(CLUSTER_NET_NETSTAT,62), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcp.slowstartretrans */
+ { &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPSLOWSTARTRETRANS],
+ { PMDA_PMID(CLUSTER_NET_NETSTAT,63), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcp.timeouts */
+ { &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPTIMEOUTS],
+ { PMDA_PMID(CLUSTER_NET_NETSTAT,64), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcp.lossprobes */
+ { &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPLOSSPROBES],
+ { PMDA_PMID(CLUSTER_NET_NETSTAT,65), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcp.lossproberecovery */
+ { &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPLOSSPROBERECOVERY],
+ { PMDA_PMID(CLUSTER_NET_NETSTAT,66), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcp.renorecoveryfail */
+ { &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPRENORECOVERYFAIL],
+ { PMDA_PMID(CLUSTER_NET_NETSTAT,67), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcp.sackrecoveryfail */
+ { &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPSACKRECOVERYFAIL],
+ { PMDA_PMID(CLUSTER_NET_NETSTAT,68), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcp.schedulerfailed */
+ { &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPSCHEDULERFAILED],
+ { PMDA_PMID(CLUSTER_NET_NETSTAT,69), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcp.rcvcollapsed */
+ { &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPRCVCOLLAPSED],
+ { PMDA_PMID(CLUSTER_NET_NETSTAT,70), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcp.dsackoldsent */
+ { &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPDSACKOLDSENT],
+ { PMDA_PMID(CLUSTER_NET_NETSTAT,71), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcp.dsackofosent */
+ { &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPDSACKOFOSENT],
+ { PMDA_PMID(CLUSTER_NET_NETSTAT,72), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcp.dsackrecv */
+ { &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPDSACKRECV],
+ { PMDA_PMID(CLUSTER_NET_NETSTAT,73), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcp.dsackoforecv */
+ { &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPDSACKOFORECV],
+ { PMDA_PMID(CLUSTER_NET_NETSTAT,74), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcp.abortondata */
+ { &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPABORTONDATA],
+ { PMDA_PMID(CLUSTER_NET_NETSTAT,75), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcp.abortonclose */
+ { &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPABORTONCLOSE],
+ { PMDA_PMID(CLUSTER_NET_NETSTAT,76), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcp.abortonmemory */
+ { &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPABORTONMEMORY],
+ { PMDA_PMID(CLUSTER_NET_NETSTAT,77), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcp.abortontimeout */
+ { &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPABORTONTIMEOUT],
+ { PMDA_PMID(CLUSTER_NET_NETSTAT,78), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcp.abortonlinger */
+ { &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPABORTONLINGER],
+ { PMDA_PMID(CLUSTER_NET_NETSTAT,79), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcp.abortfailed */
+ { &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPABORTFAILED],
+ { PMDA_PMID(CLUSTER_NET_NETSTAT,80), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcp.memorypressures */
+ { &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPMEMORYPRESSURES],
+ { PMDA_PMID(CLUSTER_NET_NETSTAT,81), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcp.sackdiscard */
+ { &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPSACKDISCARD],
+ { PMDA_PMID(CLUSTER_NET_NETSTAT,82), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcp.dsackignoredold */
+ { &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPDSACKIGNOREDOLD],
+ { PMDA_PMID(CLUSTER_NET_NETSTAT,83), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcp.dsackignorednoundo */
+ { &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPDSACKIGNOREDNOUNDO],
+ { PMDA_PMID(CLUSTER_NET_NETSTAT,84), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcp.spuriousrtos */
+ { &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPSPURIOUSRTOS],
+ { PMDA_PMID(CLUSTER_NET_NETSTAT,85), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcp.md5notfound */
+ { &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPMD5NOTFOUND],
+ { PMDA_PMID(CLUSTER_NET_NETSTAT,86), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcp.md5unexpected */
+ { &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPMD5UNEXPECTED],
+ { PMDA_PMID(CLUSTER_NET_NETSTAT,87), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcp.sackshifted */
+ { &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_SACKSHIFTED],
+ { PMDA_PMID(CLUSTER_NET_NETSTAT,88), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcp.sackmerged */
+ { &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_SACKMERGED],
+ { PMDA_PMID(CLUSTER_NET_NETSTAT,89), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcp.sackshiftfallback */
+ { &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_SACKSHIFTFALLBACK],
+ { PMDA_PMID(CLUSTER_NET_NETSTAT,90), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcp.backlogdrop */
+ { &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPBACKLOGDROP],
+ { PMDA_PMID(CLUSTER_NET_NETSTAT,91), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcp.minttldrop */
+ { &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPMINTTLDROP],
+ { PMDA_PMID(CLUSTER_NET_NETSTAT,92), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcp.deferacceptdrop */
+ { &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPDEFERACCEPTDROP],
+ { PMDA_PMID(CLUSTER_NET_NETSTAT,93), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcp.iprpfilter */
+ { &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_IPRPFILTER],
+ { PMDA_PMID(CLUSTER_NET_NETSTAT,94), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcp.timewaitoverflow */
+ { &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPTIMEWAITOVERFLOW],
+ { PMDA_PMID(CLUSTER_NET_NETSTAT,95), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcp.reqqfulldocookies */
+ { &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPREQQFULLDOCOOKIES],
+ { PMDA_PMID(CLUSTER_NET_NETSTAT,96), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcp.reqqfulldrop */
+ { &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPREQQFULLDROP],
+ { PMDA_PMID(CLUSTER_NET_NETSTAT,97), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcp.retransfail */
+ { &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPRETRANSFAIL],
+ { PMDA_PMID(CLUSTER_NET_NETSTAT,98), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcp.rcvcoalesce */
+ { &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPRCVCOALESCE],
+ { PMDA_PMID(CLUSTER_NET_NETSTAT,99), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcp.ofoqueue */
+ { &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPOFOQUEUE],
+ { PMDA_PMID(CLUSTER_NET_NETSTAT,100), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcp.ofodrop */
+ { &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPOFODROP],
+ { PMDA_PMID(CLUSTER_NET_NETSTAT,101), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcp.ofomerge */
+ { &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPOFOMERGE],
+ { PMDA_PMID(CLUSTER_NET_NETSTAT,102), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcp.challengeack */
+ { &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPCHALLENGEACK],
+ { PMDA_PMID(CLUSTER_NET_NETSTAT,103), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcp.synchallenge */
+ { &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPSYNCHALLENGE],
+ { PMDA_PMID(CLUSTER_NET_NETSTAT,104), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcp.fastopenactive */
+ { &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPFASTOPENACTIVE],
+ { PMDA_PMID(CLUSTER_NET_NETSTAT,105), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcp.fastopenactivefail */
+ { &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPFASTOPENACTIVEFAIL],
+ { PMDA_PMID(CLUSTER_NET_NETSTAT,106), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcp.fastopenpassive */
+ { &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPFASTOPENPASSIVE],
+ { PMDA_PMID(CLUSTER_NET_NETSTAT,107), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcp.fastopenpassivefail */
+ { &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPFASTOPENPASSIVEFAIL],
+ { PMDA_PMID(CLUSTER_NET_NETSTAT,108), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcp.fastopenlistenoverflow */
+ { &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPFASTOPENLISTENOVERFLOW],
+ { PMDA_PMID(CLUSTER_NET_NETSTAT,109), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcp.fastopencookiereqd */
+ { &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPFASTOPENCOOKIEREQD],
+ { PMDA_PMID(CLUSTER_NET_NETSTAT,110), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcp.spuriousrtxhostqueues */
+ { &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPSPURIOUS_RTX_HOSTQUEUES],
+ { PMDA_PMID(CLUSTER_NET_NETSTAT,111), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcp.busypollrxpackets */
+ { &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_BUSYPOLLRXPACKETS],
+ { PMDA_PMID(CLUSTER_NET_NETSTAT,112), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcp.autocorking */
+ { &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPAUTOCORKING],
+ { PMDA_PMID(CLUSTER_NET_NETSTAT,113), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcp.fromzerowindowadv */
+ { &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPFROMZEROWINDOWADV],
+ { PMDA_PMID(CLUSTER_NET_NETSTAT,114), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcp.tozerowindowadv */
+ { &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPTOZEROWINDOWADV],
+ { PMDA_PMID(CLUSTER_NET_NETSTAT,115), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcp.wantzerowindowadv */
+ { &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPWANTZEROWINDOWADV],
+ { PMDA_PMID(CLUSTER_NET_NETSTAT,116), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcp.synretrans */
+ { &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPSYNRETRANS],
+ { PMDA_PMID(CLUSTER_NET_NETSTAT,117), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* network.tcp.origdatasent */
+ { &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPORIGDATASENT],
+ { PMDA_PMID(CLUSTER_NET_NETSTAT,118), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } },
+
+/* hinv.map.scsi */
+ { NULL,
+ { PMDA_PMID(CLUSTER_SCSI,0), PM_TYPE_STRING, SCSI_INDOM, PM_SEM_DISCRETE,
+ PMDA_PMUNITS(0,0,0,0,0,0) }, },
+
+/* hinv.map.lvname */
+ { NULL,
+ { PMDA_PMID(CLUSTER_LV,0), PM_TYPE_STRING, LV_INDOM, PM_SEM_DISCRETE,
+ PMDA_PMUNITS(0,0,0,0,0,0) }, },
+
+/* hinv.nlv */
+ { NULL,
+ { PMDA_PMID(CLUSTER_LV,1), PM_TYPE_U32, LV_INDOM, PM_SEM_DISCRETE,
+ PMDA_PMUNITS(0,0,0,0,0,0) }, },
+
+/*
+ * /proc/cpuinfo cluster (cpu indom)
+ */
+
+/* hinv.cpu.clock */
+ { NULL,
+ { PMDA_PMID(CLUSTER_CPUINFO, 0), PM_TYPE_FLOAT, CPU_INDOM, PM_SEM_DISCRETE,
+ PMDA_PMUNITS(0,0,1,0,-6,0) } },
+
+/* hinv.cpu.vendor */
+ { NULL,
+ { PMDA_PMID(CLUSTER_CPUINFO, 1), PM_TYPE_STRING, CPU_INDOM, PM_SEM_DISCRETE,
+ PMDA_PMUNITS(0,0,0,0,0,0) } },
+
+/* hinv.cpu.model */
+ { NULL,
+ { PMDA_PMID(CLUSTER_CPUINFO, 2), PM_TYPE_STRING, CPU_INDOM, PM_SEM_DISCRETE,
+ PMDA_PMUNITS(0,0,0,0,0,0) } },
+
+/* hinv.cpu.stepping */
+ { NULL,
+ { PMDA_PMID(CLUSTER_CPUINFO, 3), PM_TYPE_STRING, CPU_INDOM, PM_SEM_DISCRETE,
+ PMDA_PMUNITS(0,0,0,0,0,0) } },
+
+/* hinv.cpu.cache */
+ { NULL,
+ { PMDA_PMID(CLUSTER_CPUINFO, 4), PM_TYPE_U32, CPU_INDOM, PM_SEM_DISCRETE,
+ PMDA_PMUNITS(0,0,0,PM_SPACE_KBYTE,0,0) } },
+
+/* hinv.cpu.bogomips */
+ { NULL,
+ { PMDA_PMID(CLUSTER_CPUINFO, 5), PM_TYPE_FLOAT, CPU_INDOM, PM_SEM_DISCRETE,
+ PMDA_PMUNITS(0,0,0,0,0,0) } },
+
+/* hinv.map.cpu_num */
+ { NULL,
+ { PMDA_PMID(CLUSTER_CPUINFO, 6), PM_TYPE_U32, CPU_INDOM, PM_SEM_DISCRETE,
+ PMDA_PMUNITS(0,0,0,0,0,0) } },
+
+/* hinv.machine */
+ { NULL,
+ { PMDA_PMID(CLUSTER_CPUINFO, 7), PM_TYPE_STRING, PM_INDOM_NULL, PM_SEM_DISCRETE,
+ PMDA_PMUNITS(0,0,0,0,0,0) } },
+
+/* hinv.map.cpu_node */
+ { NULL,
+ { PMDA_PMID(CLUSTER_CPUINFO, 8), PM_TYPE_U32, CPU_INDOM, PM_SEM_DISCRETE,
+ PMDA_PMUNITS(0,0,0,0,0,0) } },
+
+/* hinv.cpu.model_name */
+ { NULL,
+ { PMDA_PMID(CLUSTER_CPUINFO, 9), PM_TYPE_STRING, CPU_INDOM, PM_SEM_DISCRETE,
+ PMDA_PMUNITS(0,0,0,0,0,0) } },
+
+/* hinv.cpu.flags */
+ { NULL,
+ { PMDA_PMID(CLUSTER_CPUINFO, 10), PM_TYPE_STRING, CPU_INDOM, PM_SEM_DISCRETE,
+ PMDA_PMUNITS(0,0,0,0,0,0) } },
+
+/* hinv.cpu.cache_alignment */
+ { NULL,
+ { PMDA_PMID(CLUSTER_CPUINFO, 11), PM_TYPE_U32, CPU_INDOM, PM_SEM_DISCRETE,
+ PMDA_PMUNITS(0,0,0,PM_SPACE_BYTE,0,0) } },
+
+/*
+ * semaphore limits cluster
+ * Cluster added by Mike Mason <mmlnx@us.ibm.com>
+ */
+
+/* ipc.sem.max_semmap */
+ { NULL,
+ { PMDA_PMID(CLUSTER_SEM_LIMITS, 0), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_DISCRETE,
+ PMDA_PMUNITS(0,0,0,0,0,0)}},
+
+/* ipc.sem.max_semid */
+ { NULL,
+ { PMDA_PMID(CLUSTER_SEM_LIMITS, 1), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_DISCRETE,
+ PMDA_PMUNITS(0,0,0,0,0,0)}},
+
+/* ipc.sem.max_sem */
+ { NULL,
+ { PMDA_PMID(CLUSTER_SEM_LIMITS, 2), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_DISCRETE,
+ PMDA_PMUNITS(0,0,0,0,0,0)}},
+
+/* ipc.sem.num_undo */
+ { NULL,
+ { PMDA_PMID(CLUSTER_SEM_LIMITS, 3), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_DISCRETE,
+ PMDA_PMUNITS(0,0,0,0,0,0)}},
+
+/* ipc.sem.max_perid */
+ { NULL,
+ { PMDA_PMID(CLUSTER_SEM_LIMITS, 4), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_DISCRETE,
+ PMDA_PMUNITS(0,0,0,0,0,0)}},
+
+/* ipc.sem.max_ops */
+ { NULL,
+ { PMDA_PMID(CLUSTER_SEM_LIMITS, 5), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_DISCRETE,
+ PMDA_PMUNITS(0,0,0,0,0,0)}},
+
+/* ipc.sem.max_undoent */
+ { NULL,
+ { PMDA_PMID(CLUSTER_SEM_LIMITS, 6), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_DISCRETE,
+ PMDA_PMUNITS(0,0,0,0,0,0)}},
+
+/* ipc.sem.sz_semundo */
+ { NULL,
+ { PMDA_PMID(CLUSTER_SEM_LIMITS, 7), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_DISCRETE,
+ PMDA_PMUNITS(0,0,0,0,0,0)}},
+
+/* ipc.sem.max_semval */
+ { NULL,
+ { PMDA_PMID(CLUSTER_SEM_LIMITS, 8), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_DISCRETE,
+ PMDA_PMUNITS(0,0,0,0,0,0)}},
+
+/* ipc.sem.max_exit */
+ { NULL,
+ { PMDA_PMID(CLUSTER_SEM_LIMITS, 9), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_DISCRETE,
+ PMDA_PMUNITS(0,0,0,0,0,0)}},
+
+/*
+ * message limits cluster
+ * Cluster added by Mike Mason <mmlnx@us.ibm.com>
+ */
+
+/* ipc.msg.sz_pool */
+ { NULL,
+ { PMDA_PMID(CLUSTER_MSG_LIMITS, 0), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_DISCRETE,
+ PMDA_PMUNITS(1,0,0, PM_SPACE_KBYTE,0,0)}},
+
+/* ipc.msg.mapent */
+ { NULL,
+ { PMDA_PMID(CLUSTER_MSG_LIMITS, 1), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_DISCRETE,
+ PMDA_PMUNITS(0,0,0,0,0,0)}},
+
+/* ipc.msg.max_msgsz */
+ { NULL,
+ { PMDA_PMID(CLUSTER_MSG_LIMITS, 2), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_DISCRETE,
+ PMDA_PMUNITS(0,0,0,0,0,0)}},
+
+/* ipc.msg.max_defmsgq */
+ { NULL,
+ { PMDA_PMID(CLUSTER_MSG_LIMITS, 3), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_DISCRETE,
+ PMDA_PMUNITS(0,0,0,0,0,0)}},
+
+/* ipc.msg.max_msgqid */
+ { NULL,
+ { PMDA_PMID(CLUSTER_MSG_LIMITS, 4), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_DISCRETE,
+ PMDA_PMUNITS(0,0,0,0,0,0)}},
+
+/* ipc.msg.max_msgseg */
+ { NULL,
+ { PMDA_PMID(CLUSTER_MSG_LIMITS, 5), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_DISCRETE,
+ PMDA_PMUNITS(0,0,0, 0,0,0)}},
+
+/* ipc.msg.max_smsghdr */
+ { NULL,
+ { PMDA_PMID(CLUSTER_MSG_LIMITS, 6), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_DISCRETE,
+ PMDA_PMUNITS(0,0,0,0,0,0)}},
+
+/* ipc.msg.max_seg */
+ { NULL,
+ { PMDA_PMID(CLUSTER_MSG_LIMITS, 7), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_DISCRETE,
+ PMDA_PMUNITS(0,0,0,0,0,0)}},
+
+/*
+ * shared memory limits cluster
+ * Cluster added by Mike Mason <mmlnx@us.ibm.com>
+ */
+
+/* ipc.shm.max_segsz */
+ { NULL,
+ { PMDA_PMID(CLUSTER_SHM_LIMITS, 0), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_DISCRETE,
+ PMDA_PMUNITS(1,0,0,PM_SPACE_BYTE,0,0)}},
+
+/* ipc.shm.min_segsz */
+ { NULL,
+ { PMDA_PMID(CLUSTER_SHM_LIMITS, 1), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_DISCRETE,
+ PMDA_PMUNITS(1,0,0,PM_SPACE_BYTE,0,0)}},
+
+/* ipc.shm.max_seg */
+ { NULL,
+ { PMDA_PMID(CLUSTER_SHM_LIMITS, 2), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_DISCRETE,
+ PMDA_PMUNITS(0,0,0,0,0,0)}},
+
+/* ipc.shm.max_segproc */
+ { NULL,
+ { PMDA_PMID(CLUSTER_SHM_LIMITS, 3), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_DISCRETE,
+ PMDA_PMUNITS(0,0,0,0,0,0)}},
+
+/* ipc.shm.max_shmsys */
+ { NULL,
+ { PMDA_PMID(CLUSTER_SHM_LIMITS, 4), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_DISCRETE,
+ PMDA_PMUNITS(0,0,0,0,0,0)}},
+
+/*
+ * number of users cluster
+ */
+
+/* kernel.all.nusers */
+ { NULL,
+ { PMDA_PMID(CLUSTER_NUSERS, 0), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(0,0,0,0,0,0)}},
+
+/*
+ * /proc/sys/fs vfs cluster
+ */
+
+/* vfs.files */
+ { &proc_sys_fs.fs_files_count,
+ { PMDA_PMID(CLUSTER_VFS,0), PM_TYPE_32, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(0,0,0,0,0,0) }, },
+ { &proc_sys_fs.fs_files_free,
+ { PMDA_PMID(CLUSTER_VFS,1), PM_TYPE_32, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(0,0,0,0,0,0) }, },
+ { &proc_sys_fs.fs_files_max,
+ { PMDA_PMID(CLUSTER_VFS,2), PM_TYPE_32, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(0,0,0,0,0,0) }, },
+ { &proc_sys_fs.fs_inodes_count,
+ { PMDA_PMID(CLUSTER_VFS,3), PM_TYPE_32, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(0,0,0,0,0,0) }, },
+ { &proc_sys_fs.fs_inodes_free,
+ { PMDA_PMID(CLUSTER_VFS,4), PM_TYPE_32, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(0,0,0,0,0,0) }, },
+ { &proc_sys_fs.fs_dentry_count,
+ { PMDA_PMID(CLUSTER_VFS,5), PM_TYPE_32, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(0,0,0,0,0,0) }, },
+ { &proc_sys_fs.fs_dentry_free,
+ { PMDA_PMID(CLUSTER_VFS,6), PM_TYPE_32, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(0,0,0,0,0,0) }, },
+
+ /*
+ * mem.vmstat cluster
+ */
+
+ /* mem.vmstat.nr_dirty */
+ { &_pm_proc_vmstat.nr_dirty,
+ {PMDA_PMID(28,0), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(0,0,0,0,0,0) }, },
+
+ /* mem.vmstat.nr_writeback */
+ { &_pm_proc_vmstat.nr_writeback,
+ {PMDA_PMID(28,1), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(0,0,0,0,0,0) }, },
+
+ /* mem.vmstat.nr_unstable */
+ { &_pm_proc_vmstat.nr_unstable,
+ {PMDA_PMID(28,2), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(0,0,0,0,0,0) }, },
+
+ /* mem.vmstat.nr_page_table_pages */
+ { &_pm_proc_vmstat.nr_page_table_pages,
+ {PMDA_PMID(28,3), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(0,0,0,0,0,0) }, },
+
+ /* mem.vmstat.nr_mapped */
+ { &_pm_proc_vmstat.nr_mapped,
+ {PMDA_PMID(28,4), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(0,0,0,0,0,0) }, },
+
+ /* mem.vmstat.nr_slab */
+ { &_pm_proc_vmstat.nr_slab,
+ {PMDA_PMID(28,5), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(0,0,0,0,0,0) }, },
+
+ /* mem.vmstat.pgpgin */
+ { &_pm_proc_vmstat.pgpgin,
+ {PMDA_PMID(28,6), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+ /* mem.vmstat.pgpgout */
+ { &_pm_proc_vmstat.pgpgout,
+ {PMDA_PMID(28,7), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+ /* mem.vmstat.pswpin */
+ { &_pm_proc_vmstat.pswpin,
+ {PMDA_PMID(28,8), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+ /* mem.vmstat.pswpout */
+ { &_pm_proc_vmstat.pswpout,
+ {PMDA_PMID(28,9), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+ /* mem.vmstat.pgalloc_high */
+ { &_pm_proc_vmstat.pgalloc_high,
+ {PMDA_PMID(28,10), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+ /* mem.vmstat.pgalloc_normal */
+ { &_pm_proc_vmstat.pgalloc_normal,
+ {PMDA_PMID(28,11), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+ /* mem.vmstat.pgalloc_dma */
+ { &_pm_proc_vmstat.pgalloc_dma,
+ {PMDA_PMID(28,12), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+ /* mem.vmstat.pgfree */
+ { &_pm_proc_vmstat.pgfree,
+ {PMDA_PMID(28,13), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+ /* mem.vmstat.pgactivate */
+ { &_pm_proc_vmstat.pgactivate,
+ {PMDA_PMID(28,14), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+ /* mem.vmstat.pgdeactivate */
+ { &_pm_proc_vmstat.pgdeactivate,
+ {PMDA_PMID(28,15), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+ /* mem.vmstat.pgfault */
+ { &_pm_proc_vmstat.pgfault,
+ {PMDA_PMID(28,16), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+ /* mem.vmstat.pgmajfault */
+ { &_pm_proc_vmstat.pgmajfault,
+ {PMDA_PMID(28,17), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+ /* mem.vmstat.pgrefill_high */
+ { &_pm_proc_vmstat.pgrefill_high,
+ {PMDA_PMID(28,18), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+ /* mem.vmstat.pgrefill_normal */
+ { &_pm_proc_vmstat.pgrefill_normal,
+ {PMDA_PMID(28,19), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+ /* mem.vmstat.pgrefill_dma */
+ { &_pm_proc_vmstat.pgrefill_dma,
+ {PMDA_PMID(28,20), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+ /* mem.vmstat.pgsteal_high */
+ { &_pm_proc_vmstat.pgsteal_high,
+ {PMDA_PMID(28,21), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+ /* mem.vmstat.pgsteal_normal */
+ { &_pm_proc_vmstat.pgsteal_normal,
+ {PMDA_PMID(28,22), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+ /* mem.vmstat.pgsteal_dma */
+ { &_pm_proc_vmstat.pgsteal_dma,
+ {PMDA_PMID(28,23), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+ /* mem.vmstat.pgscan_kswapd_high */
+ { &_pm_proc_vmstat.pgscan_kswapd_high,
+ {PMDA_PMID(28,24), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+ /* mem.vmstat.pgscan_kswapd_normal */
+ { &_pm_proc_vmstat.pgscan_kswapd_normal,
+ {PMDA_PMID(28,25), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+ /* mem.vmstat.pgscan_kswapd_dma */
+ { &_pm_proc_vmstat.pgscan_kswapd_dma,
+ {PMDA_PMID(28,26), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+ /* mem.vmstat.pgscan_direct_high */
+ { &_pm_proc_vmstat.pgscan_direct_high,
+ {PMDA_PMID(28,27), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+ /* mem.vmstat.pgscan_direct_normal */
+ { &_pm_proc_vmstat.pgscan_direct_normal,
+ {PMDA_PMID(28,28), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+ /* mem.vmstat.pgscan_direct_dma */
+ { &_pm_proc_vmstat.pgscan_direct_dma,
+ {PMDA_PMID(28,29), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+ /* mem.vmstat.pginodesteal */
+ { &_pm_proc_vmstat.pginodesteal,
+ {PMDA_PMID(28,30), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+ /* mem.vmstat.slabs_scanned */
+ { &_pm_proc_vmstat.slabs_scanned,
+ {PMDA_PMID(28,31), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+ /* mem.vmstat.kswapd_steal */
+ { &_pm_proc_vmstat.kswapd_steal,
+ {PMDA_PMID(28,32), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+ /* mem.vmstat.kswapd_inodesteal */
+ { &_pm_proc_vmstat.kswapd_inodesteal,
+ {PMDA_PMID(28,33), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+ /* mem.vmstat.pageoutrun */
+ { &_pm_proc_vmstat.pageoutrun,
+ {PMDA_PMID(28,34), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+ /* mem.vmstat.allocstall */
+ { &_pm_proc_vmstat.allocstall,
+ {PMDA_PMID(28,35), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+ /* mem.vmstat.pgrotated */
+ { &_pm_proc_vmstat.pgrotated,
+ {PMDA_PMID(28,36), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+ /* mem.vmstat.nr_slab_reclaimable */
+ { &_pm_proc_vmstat.nr_slab_reclaimable,
+ {PMDA_PMID(28,37), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(0,0,0,0,0,0) }, },
+
+ /* mem.vmstat.nr_slab_unreclaimable */
+ { &_pm_proc_vmstat.nr_slab_unreclaimable,
+ {PMDA_PMID(28,38), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(0,0,0,0,0,0) }, },
+
+ /* mem.vmstat.nr_anon_pages */
+ { &_pm_proc_vmstat.nr_anon_pages,
+ {PMDA_PMID(28,39), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(0,0,0,0,0,0) }, },
+
+ /* mem.vmstat.nr_bounce */
+ { &_pm_proc_vmstat.nr_bounce,
+ {PMDA_PMID(28,40), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(0,0,0,0,0,0) }, },
+
+ /* mem.vmstat.nr_file_pages */
+ { &_pm_proc_vmstat.nr_file_pages,
+ {PMDA_PMID(28,41), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(0,0,0,0,0,0) }, },
+
+ /* mem.vmstat.nr_vmscan_write */
+ { &_pm_proc_vmstat.nr_vmscan_write,
+ {PMDA_PMID(28,42), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+ /* mem.vmstat.htlb_buddy_alloc_fail */
+ { &_pm_proc_vmstat.htlb_buddy_alloc_fail,
+ {PMDA_PMID(28,43), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+ /* mem.vmstat.htlb_buddy_alloc_success */
+ { &_pm_proc_vmstat.htlb_buddy_alloc_success,
+ {PMDA_PMID(28,44), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+ /* mem.vmstat.nr_active_anon */
+ { &_pm_proc_vmstat.nr_active_anon,
+ {PMDA_PMID(28,45), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+ /* mem.vmstat.nr_active_file */
+ { &_pm_proc_vmstat.nr_active_file,
+ {PMDA_PMID(28,46), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+ /* mem.vmstat.nr_free_pages */
+ { &_pm_proc_vmstat.nr_free_pages,
+ {PMDA_PMID(28,47), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+ /* mem.vmstat.nr_inactive_anon */
+ { &_pm_proc_vmstat.nr_inactive_anon,
+ {PMDA_PMID(28,48), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+ /* mem.vmstat.nr_inactive_file */
+ { &_pm_proc_vmstat.nr_inactive_file,
+ {PMDA_PMID(28,49), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+ /* mem.vmstat.nr_isolated_anon */
+ { &_pm_proc_vmstat.nr_isolated_anon,
+ {PMDA_PMID(28,50), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+ /* mem.vmstat.nr_isolated_file */
+ { &_pm_proc_vmstat.nr_isolated_file,
+ {PMDA_PMID(28,51), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+ /* mem.vmstat.nr_kernel_stack */
+ { &_pm_proc_vmstat.nr_kernel_stack,
+ {PMDA_PMID(28,52), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+ /* mem.vmstat.nr_mlock */
+ { &_pm_proc_vmstat.nr_mlock,
+ {PMDA_PMID(28,53), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+ /* mem.vmstat.nr_shmem */
+ { &_pm_proc_vmstat.nr_shmem,
+ {PMDA_PMID(28,54), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+ /* mem.vmstat.nr_unevictable */
+ { &_pm_proc_vmstat.nr_unevictable,
+ {PMDA_PMID(28,55), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+ /* mem.vmstat.nr_writeback_temp */
+ { &_pm_proc_vmstat.nr_writeback_temp,
+ {PMDA_PMID(28,56), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+ /* mem.vmstat.compact_blocks_moved */
+ { &_pm_proc_vmstat.compact_blocks_moved,
+ {PMDA_PMID(28,57), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+ /* mem.vmstat.compact_fail */
+ { &_pm_proc_vmstat.compact_fail,
+ {PMDA_PMID(28,58), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+ /* mem.vmstat.compact_pagemigrate_failed */
+ { &_pm_proc_vmstat.compact_pagemigrate_failed,
+ {PMDA_PMID(28,59), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+ /* mem.vmstat.compact_pages_moved */
+ { &_pm_proc_vmstat.compact_pages_moved,
+ {PMDA_PMID(28,60), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+ /* mem.vmstat.compact_stall */
+ { &_pm_proc_vmstat.compact_stall,
+ {PMDA_PMID(28,61), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+ /* mem.vmstat.compact_success */
+ { &_pm_proc_vmstat.compact_success,
+ {PMDA_PMID(28,62), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+ /* mem.vmstat.pgalloc_dma32 */
+ { &_pm_proc_vmstat.pgalloc_dma32,
+ {PMDA_PMID(28,63), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+ /* mem.vmstat.pgalloc_movable */
+ { &_pm_proc_vmstat.pgalloc_movable,
+ {PMDA_PMID(28,64), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+ /* mem.vmstat.pgrefill_dma32 */
+ { &_pm_proc_vmstat.pgrefill_dma32,
+ {PMDA_PMID(28,65), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+ /* mem.vmstat.pgrefill_movable */
+ { &_pm_proc_vmstat.pgrefill_movable,
+ {PMDA_PMID(28,66), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+ /* mem.vmstat.pgscan_direct_dma32 */
+ { &_pm_proc_vmstat.pgscan_direct_dma32,
+ {PMDA_PMID(28,67), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+ /* mem.vmstat.pgscan_direct_movable */
+ { &_pm_proc_vmstat.pgscan_direct_movable,
+ {PMDA_PMID(28,68), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+ /* mem.vmstat.pgscan_kswapd_dma32 */
+ { &_pm_proc_vmstat.pgscan_kswapd_dma32,
+ {PMDA_PMID(28,69), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+ /* mem.vmstat.pgscan_kswapd_movable */
+ { &_pm_proc_vmstat.pgscan_kswapd_movable,
+ {PMDA_PMID(28,70), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+ /* mem.vmstat.pgsteal_dma32 */
+ { &_pm_proc_vmstat.pgsteal_dma32,
+ {PMDA_PMID(28,71), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+ /* mem.vmstat.pgsteal_movable */
+ { &_pm_proc_vmstat.pgsteal_movable,
+ {PMDA_PMID(28,72), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+ /* mem.vmstat.thp_fault_alloc */
+ { &_pm_proc_vmstat.thp_fault_alloc,
+ {PMDA_PMID(28,73), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+ /* mem.vmstat.thp_fault_fallback */
+ { &_pm_proc_vmstat.thp_fault_fallback,
+ {PMDA_PMID(28,74), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+ /* mem.vmstat.thp_collapse_alloc */
+ { &_pm_proc_vmstat.thp_collapse_alloc,
+ {PMDA_PMID(28,75), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+ /* mem.vmstat.thp_collapse_alloc_failed */
+ { &_pm_proc_vmstat.thp_collapse_alloc_failed,
+ {PMDA_PMID(28,76), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+ /* mem.vmstat.thp_split */
+ { &_pm_proc_vmstat.thp_split,
+ {PMDA_PMID(28,77), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+ /* mem.vmstat.unevictable_pgs_cleared */
+ { &_pm_proc_vmstat.unevictable_pgs_cleared,
+ {PMDA_PMID(28,78), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+ /* mem.vmstat.unevictable_pgs_culled */
+ { &_pm_proc_vmstat.unevictable_pgs_culled,
+ {PMDA_PMID(28,79), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+ /* mem.vmstat.unevictable_pgs_mlocked */
+ { &_pm_proc_vmstat.unevictable_pgs_mlocked,
+ {PMDA_PMID(28,80), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+ /* mem.vmstat.unevictable_pgs_mlockfreed */
+ { &_pm_proc_vmstat.unevictable_pgs_mlockfreed,
+ {PMDA_PMID(28,81), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+ /* mem.vmstat.unevictable_pgs_munlocked */
+ { &_pm_proc_vmstat.unevictable_pgs_munlocked,
+ {PMDA_PMID(28,82), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+ /* mem.vmstat.unevictable_pgs_rescued */
+ { &_pm_proc_vmstat.unevictable_pgs_rescued,
+ {PMDA_PMID(28,83), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+ /* mem.vmstat.unevictable_pgs_scanned */
+ { &_pm_proc_vmstat.unevictable_pgs_scanned,
+ {PMDA_PMID(28,84), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+ /* mem.vmstat.unevictable_pgs_stranded */
+ { &_pm_proc_vmstat.unevictable_pgs_stranded,
+ {PMDA_PMID(28,85), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+ /* mem.vmstat.zone_reclaim_failed */
+ { &_pm_proc_vmstat.zone_reclaim_failed,
+ {PMDA_PMID(28,86), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+ /* mem.vmstat.kswapd_low_wmark_hit_quickly */
+ { &_pm_proc_vmstat.kswapd_low_wmark_hit_quickly,
+ {PMDA_PMID(28,87), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+ /* mem.vmstat.kswapd_high_wmark_hit_quickly */
+ { &_pm_proc_vmstat.kswapd_high_wmark_hit_quickly,
+ {PMDA_PMID(28,88), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+ /* mem.vmstat.kswapd_skip_congestion_wait */
+ { &_pm_proc_vmstat.kswapd_skip_congestion_wait,
+ {PMDA_PMID(28,89), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+ /* mem.vmstat.nr_anon_transparent_hugepages */
+ { &_pm_proc_vmstat.nr_anon_transparent_hugepages,
+ {PMDA_PMID(28,90), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+ /* mem.vmstat.nr_dirtied */
+ { &_pm_proc_vmstat.nr_dirtied,
+ {PMDA_PMID(28,91), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+ /* mem.vmstat.nr_dirty_background_threshold */
+ { &_pm_proc_vmstat.nr_dirty_background_threshold,
+ {PMDA_PMID(28,92), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+ /* mem.vmstat.nr_dirty_threshold */
+ { &_pm_proc_vmstat.nr_dirty_threshold,
+ {PMDA_PMID(28,93), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+ /* mem.vmstat.nr_written */
+ { &_pm_proc_vmstat.nr_written,
+ {PMDA_PMID(28,94), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+ /* mem.vmstat.numa_foreign */
+ { &_pm_proc_vmstat.numa_foreign,
+ {PMDA_PMID(28,95), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+ /* mem.vmstat.numa_hit */
+ { &_pm_proc_vmstat.numa_hit,
+ {PMDA_PMID(28,96), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+ /* mem.vmstat.numa_interleave */
+ { &_pm_proc_vmstat.numa_interleave,
+ {PMDA_PMID(28,97), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+ /* mem.vmstat.numa_local */
+ { &_pm_proc_vmstat.numa_local,
+ {PMDA_PMID(28,98), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+ /* mem.vmstat.numa_miss */
+ { &_pm_proc_vmstat.numa_miss,
+ {PMDA_PMID(28,99), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+ /* mem.vmstat.numa_other */
+ { &_pm_proc_vmstat.numa_other,
+ {PMDA_PMID(28,100), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+/*
+ * sysfs_kernel cluster
+ */
+ /* sysfs.kernel.uevent_seqnum */
+ { &sysfs_kernel.uevent_seqnum,
+ { PMDA_PMID(CLUSTER_SYSFS_KERNEL,0), PM_TYPE_U64, PM_INDOM_NULL,
+ PM_SEM_COUNTER, PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+/*
+ * /proc/interrupts cluster
+ */
+ /* kernel.all.interrupts.errors */
+ { &irq_err_count,
+ { PMDA_PMID(CLUSTER_INTERRUPTS, 3), PM_TYPE_U32, PM_INDOM_NULL,
+ PM_SEM_COUNTER, PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+ /* kernel.percpu.interrupts.line[<N>] */
+ { NULL, { PMDA_PMID(CLUSTER_INTERRUPT_LINES, 0), PM_TYPE_U32,
+ CPU_INDOM, PM_SEM_COUNTER, PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+ /* kernel.percpu.interrupts.[<other>] */
+ { NULL, { PMDA_PMID(CLUSTER_INTERRUPT_OTHER, 0), PM_TYPE_U32,
+ CPU_INDOM, PM_SEM_COUNTER, PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+/*
+ * disk.dm cluster
+ */
+ /* disk.dm.read */
+ { NULL,
+ { PMDA_PMID(CLUSTER_DM,0), KERNEL_ULONG, DM_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+ /* disk.dm.write */
+ { NULL,
+ { PMDA_PMID(CLUSTER_DM,1), KERNEL_ULONG, DM_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+ /* disk.dm.total */
+ { NULL,
+ { PMDA_PMID(CLUSTER_DM,2), KERNEL_ULONG, DM_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+ /* disk.dm.blkread */
+ { NULL,
+ { PMDA_PMID(CLUSTER_DM,3), PM_TYPE_U64, DM_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+ /* disk.dm.blkwrite */
+ { NULL,
+ { PMDA_PMID(CLUSTER_DM,4), PM_TYPE_U64, DM_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+ /* disk.dm.blktotal */
+ { NULL,
+ { PMDA_PMID(CLUSTER_DM,5), PM_TYPE_U64, DM_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+ /* disk.dm.read_bytes */
+ { NULL,
+ { PMDA_PMID(CLUSTER_DM,6), PM_TYPE_U32, DM_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(1,0,0,PM_SPACE_KBYTE,0,0) }, },
+
+ /* disk.dm.write_bytes */
+ { NULL,
+ { PMDA_PMID(CLUSTER_DM,7), PM_TYPE_U32, DM_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(1,0,0,PM_SPACE_KBYTE,0,0) }, },
+
+ /* disk.dm.total_bytes */
+ { NULL,
+ { PMDA_PMID(CLUSTER_DM,8), PM_TYPE_U32, DM_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(1,0,0,PM_SPACE_KBYTE,0,0) }, },
+
+ /* disk.dm.read_merge */
+ { NULL,
+ { PMDA_PMID(CLUSTER_DM,9), KERNEL_ULONG, DM_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+ /* disk.dm.write_merge */
+ { NULL,
+ { PMDA_PMID(CLUSTER_DM,10), KERNEL_ULONG, DM_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, },
+
+ /* disk.dm.avactive */
+ { NULL,
+ { PMDA_PMID(CLUSTER_DM,11), PM_TYPE_U32, DM_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,1,0,0,PM_TIME_MSEC,0) }, },
+
+ /* disk.dm.aveq */
+ { NULL,
+ { PMDA_PMID(CLUSTER_DM,12), PM_TYPE_U32, DM_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,1,0,0,PM_TIME_MSEC,0) }, },
+
+ /* hinv.map.dmname */
+ { NULL,
+ { PMDA_PMID(CLUSTER_DM,13), PM_TYPE_STRING, DM_INDOM, PM_SEM_DISCRETE,
+ PMDA_PMUNITS(0,0,0,0,0,0) }, },
+
+ /* disk.dm.read_rawactive */
+ { NULL,
+ { PMDA_PMID(CLUSTER_DM,14), PM_TYPE_U32, DM_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,1,0,0,PM_TIME_MSEC,0) }, },
+
+ /* disk.dm.write_rawactive */
+ { NULL,
+ { PMDA_PMID(CLUSTER_DM,15), PM_TYPE_U32, DM_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0,1,0,0,PM_TIME_MSEC,0) }, },
+};
+
+char *linux_statspath = ""; /* optional path prefix for all stats files */
+
+FILE *
+linux_statsfile(const char *path, char *buffer, int size)
+{
+ snprintf(buffer, size, "%s%s", linux_statspath, path);
+ buffer[size-1] = '\0';
+ return fopen(buffer, "r");
+}
+
+static void
+linux_refresh(pmdaExt *pmda, int *need_refresh)
+{
+ int need_refresh_mtab = 0;
+
+ if (need_refresh[CLUSTER_PARTITIONS])
+ refresh_proc_partitions(INDOM(DISK_INDOM), INDOM(PARTITIONS_INDOM), INDOM(DM_INDOM));
+
+ if (need_refresh[CLUSTER_STAT])
+ refresh_proc_stat(&proc_cpuinfo, &proc_stat);
+
+ if (need_refresh[CLUSTER_CPUINFO])
+ refresh_proc_cpuinfo(&proc_cpuinfo);
+
+ if (need_refresh[CLUSTER_MEMINFO])
+ refresh_proc_meminfo(&proc_meminfo);
+
+ if (need_refresh[CLUSTER_NUMA_MEMINFO])
+ refresh_numa_meminfo(&numa_meminfo, &proc_cpuinfo, &proc_stat);
+
+ if (need_refresh[CLUSTER_LOADAVG])
+ refresh_proc_loadavg(&proc_loadavg);
+
+ if (need_refresh[CLUSTER_NET_DEV])
+ refresh_proc_net_dev(INDOM(NET_DEV_INDOM));
+
+ if (need_refresh[CLUSTER_NET_ADDR])
+ refresh_net_dev_addr(INDOM(NET_ADDR_INDOM));
+
+ if (need_refresh[CLUSTER_FILESYS] || need_refresh[CLUSTER_TMPFS])
+ refresh_filesys(INDOM(FILESYS_INDOM), INDOM(TMPFS_INDOM));
+
+ if (need_refresh[CLUSTER_INTERRUPTS] ||
+ need_refresh[CLUSTER_INTERRUPT_LINES] ||
+ need_refresh[CLUSTER_INTERRUPT_OTHER])
+ need_refresh_mtab |= refresh_interrupt_values();
+
+ if (need_refresh[CLUSTER_SWAPDEV])
+ refresh_swapdev(INDOM(SWAPDEV_INDOM));
+
+ if (need_refresh[CLUSTER_NET_NFS])
+ refresh_proc_net_rpc(&proc_net_rpc);
+
+ if (need_refresh[CLUSTER_NET_SOCKSTAT])
+ refresh_proc_net_sockstat(&proc_net_sockstat);
+
+ if (need_refresh[CLUSTER_KERNEL_UNAME])
+ uname(&kernel_uname);
+
+ if (need_refresh[CLUSTER_NET_SNMP])
+ refresh_proc_net_snmp(&_pm_proc_net_snmp);
+
+ if (need_refresh[CLUSTER_SCSI])
+ refresh_proc_scsi(&proc_scsi);
+
+ if (need_refresh[CLUSTER_LV])
+ refresh_dev_mapper(&dev_mapper);
+
+ if (need_refresh[CLUSTER_NET_TCP])
+ refresh_proc_net_tcp(&proc_net_tcp);
+
+ if (need_refresh[CLUSTER_NET_NETSTAT])
+ refresh_proc_net_netstat(&_pm_proc_net_netstat);
+
+ if (need_refresh[CLUSTER_SLAB])
+ refresh_proc_slabinfo(&proc_slabinfo);
+
+ if (need_refresh[CLUSTER_SEM_LIMITS])
+ refresh_sem_limits(&sem_limits);
+
+ if (need_refresh[CLUSTER_MSG_LIMITS])
+ refresh_msg_limits(&msg_limits);
+
+ if (need_refresh[CLUSTER_SHM_LIMITS])
+ refresh_shm_limits(&shm_limits);
+
+ if (need_refresh[CLUSTER_UPTIME])
+ refresh_proc_uptime(&proc_uptime);
+
+ if (need_refresh[CLUSTER_VFS])
+ refresh_proc_sys_fs(&proc_sys_fs);
+
+ if (need_refresh[CLUSTER_VMSTAT])
+ refresh_proc_vmstat(&_pm_proc_vmstat);
+
+ if (need_refresh[CLUSTER_SYSFS_KERNEL])
+ refresh_sysfs_kernel(&sysfs_kernel);
+
+ if (need_refresh_mtab)
+ pmdaDynamicMetricTable(pmda);
+}
+
+static int
+linux_instance(pmInDom indom, int inst, char *name, __pmInResult **result, pmdaExt *pmda)
+{
+ __pmInDom_int *indomp = (__pmInDom_int *)&indom;
+ int need_refresh[NUM_CLUSTERS];
+
+ memset(need_refresh, 0, sizeof(need_refresh));
+ switch (indomp->serial) {
+ case DISK_INDOM:
+ case PARTITIONS_INDOM:
+ case DM_INDOM:
+ need_refresh[CLUSTER_PARTITIONS]++;
+ break;
+ case CPU_INDOM:
+ need_refresh[CLUSTER_STAT]++;
+ break;
+ case NODE_INDOM:
+ need_refresh[CLUSTER_NUMA_MEMINFO]++;
+ break;
+ case LOADAVG_INDOM:
+ need_refresh[CLUSTER_LOADAVG]++;
+ break;
+ case NET_DEV_INDOM:
+ need_refresh[CLUSTER_NET_DEV]++;
+ break;
+ case FILESYS_INDOM:
+ need_refresh[CLUSTER_FILESYS]++;
+ break;
+ case TMPFS_INDOM:
+ need_refresh[CLUSTER_TMPFS]++;
+ break;
+ case SWAPDEV_INDOM:
+ need_refresh[CLUSTER_SWAPDEV]++;
+ break;
+ case NFS_INDOM:
+ case NFS3_INDOM:
+ case NFS4_CLI_INDOM:
+ case NFS4_SVR_INDOM:
+ need_refresh[CLUSTER_NET_NFS]++;
+ break;
+ case SCSI_INDOM:
+ need_refresh[CLUSTER_SCSI]++;
+ break;
+ case LV_INDOM:
+ need_refresh[CLUSTER_LV]++;
+ break;
+ case SLAB_INDOM:
+ need_refresh[CLUSTER_SLAB]++;
+ break;
+ case ICMPMSG_INDOM:
+ need_refresh[CLUSTER_NET_SNMP]++;
+ break;
+ /* no default label : pmdaInstance will pick up errors */
+ }
+
+ linux_refresh(pmda, need_refresh);
+ return pmdaInstance(indom, inst, name, result, pmda);
+}
+
+/*
+ * callback provided to pmdaFetch
+ */
+
+static int
+linux_fetchCallBack(pmdaMetric *mdesc, unsigned int inst, pmAtomValue *atom)
+{
+ __pmID_int *idp = (__pmID_int *)&(mdesc->m_desc.pmid);
+ int i;
+ int sts;
+ long sl;
+ struct filesys *fs;
+ net_addr_t *addrp;
+ net_interface_t *netip;
+
+ if (mdesc->m_user != NULL) {
+ /*
+ * The metric value is extracted directly via the address specified
+ * in metrictab. Note: not all metrics support this - those that
+ * don't have NULL for the m_user field in their respective
+ * metrictab slot.
+ */
+ if (idp->cluster == CLUSTER_VMSTAT) {
+ if (!(_pm_have_proc_vmstat) ||
+ *(__uint64_t *)mdesc->m_user == (__uint64_t)-1)
+ return 0; /* no value available on this kernel */
+ }
+ else
+ if (idp->cluster == CLUSTER_NET_SNMP) {
+ __uint64_t value;
+
+ /* network.icmpmsg has an indom - deal with it now */
+ if (idp->item == 88 || idp->item == 89) {
+ if (inst > NR_ICMPMSG_COUNTERS)
+ return PM_ERR_INST;
+ value = *((__uint64_t *)mdesc->m_user + inst);
+ if (value == (__uint64_t)-1)
+ return 0; /* no value for this instance */
+ atom->ull = value;
+ return 1;
+ }
+ if (*(__uint64_t *)mdesc->m_user == (__uint64_t)-1)
+ if (idp->item != 53) /* tcp.maxconn is special */
+ return 0; /* no value available on this kernel */
+ }
+ else
+ if (idp->cluster == CLUSTER_NET_NETSTAT) {
+ if (*(__uint64_t *)mdesc->m_user == (__uint64_t)-1)
+ return 0; /* no value available on this kernel */
+ }
+ else
+ if (idp->cluster == CLUSTER_NET_NFS) {
+ /*
+ * check if rpc stats are available
+ */
+ if (idp->item >= 20 && idp->item <= 27 && proc_net_rpc.client.errcode != 0)
+ /* no values available for client rpc/nfs - this is expected <= 2.0.36 */
+ return 0;
+ else
+ if (idp->item >= 30 && idp->item <= 47 && proc_net_rpc.server.errcode != 0)
+ /* no values available - expected without /proc/net/rpc/nfsd */
+ return 0; /* no values available */
+ if (idp->item >= 51 && idp->item <= 57 && proc_net_rpc.server.errcode != 0)
+ /* no values available - expected without /proc/net/rpc/nfsd */
+ return 0; /* no values available */
+ }
+ if (idp->cluster == CLUSTER_SYSFS_KERNEL) {
+ /* no values available for udev metrics */
+ if (idp->item == 0 && !sysfs_kernel.valid_uevent_seqnum) {
+ return 0;
+ }
+ }
+
+ switch (mdesc->m_desc.type) {
+ case PM_TYPE_32:
+ atom->l = *(__int32_t *)mdesc->m_user;
+ break;
+ case PM_TYPE_U32:
+ atom->ul = *(__uint32_t *)mdesc->m_user;
+ break;
+ case PM_TYPE_64:
+ atom->ll = *(__int64_t *)mdesc->m_user;
+ break;
+ case PM_TYPE_U64:
+ atom->ull = *(__uint64_t *)mdesc->m_user;
+ break;
+ case PM_TYPE_FLOAT:
+ atom->f = *(float *)mdesc->m_user;
+ break;
+ case PM_TYPE_DOUBLE:
+ atom->d = *(double *)mdesc->m_user;
+ break;
+ case PM_TYPE_STRING:
+ atom->cp = (char *)mdesc->m_user;
+ break;
+ default:
+ return 0;
+ }
+ }
+ else
+ switch (idp->cluster) {
+ case CLUSTER_STAT:
+ /*
+ * All metrics from /proc/stat
+ */
+ switch (idp->item) {
+ case 0: /* kernel.percpu.cpu.user */
+ _pm_assign_utype(_pm_cputime_size, atom,
+ 1000 * (double)proc_stat.p_user[inst] / proc_stat.hz);
+ break;
+ case 1: /* kernel.percpu.cpu.nice */
+ _pm_assign_utype(_pm_cputime_size, atom,
+ 1000 * (double)proc_stat.p_nice[inst] / proc_stat.hz);
+ break;
+ case 2: /* kernel.percpu.cpu.sys */
+ _pm_assign_utype(_pm_cputime_size, atom,
+ 1000 * (double)proc_stat.p_sys[inst] / proc_stat.hz);
+ break;
+ case 3: /* kernel.percpu.cpu.idle */
+ _pm_assign_utype(_pm_idletime_size, atom,
+ 1000 * (double)proc_stat.p_idle[inst] / proc_stat.hz);
+ break;
+ case 30: /* kernel.percpu.cpu.wait.total */
+ _pm_assign_utype(_pm_cputime_size, atom,
+ 1000 * (double)proc_stat.p_wait[inst] / proc_stat.hz);
+ break;
+ case 31: /* kernel.percpu.cpu.intr */
+ _pm_assign_utype(_pm_cputime_size, atom,
+ 1000 * ((double)proc_stat.p_irq[inst] +
+ (double)proc_stat.p_sirq[inst]) / proc_stat.hz);
+ break;
+ case 56: /* kernel.percpu.cpu.irq.soft */
+ _pm_assign_utype(_pm_cputime_size, atom,
+ 1000 * (double)proc_stat.p_sirq[inst] / proc_stat.hz);
+ break;
+ case 57: /* kernel.percpu.cpu.irq.hard */
+ _pm_assign_utype(_pm_cputime_size, atom,
+ 1000 * (double)proc_stat.p_irq[inst] / proc_stat.hz);
+ break;
+ case 58: /* kernel.percpu.cpu.steal */
+ _pm_assign_utype(_pm_cputime_size, atom,
+ 1000 * (double)proc_stat.p_steal[inst] / proc_stat.hz);
+ break;
+ case 61: /* kernel.percpu.cpu.guest */
+ _pm_assign_utype(_pm_cputime_size, atom,
+ 1000 * (double)proc_stat.p_guest[inst] / proc_stat.hz);
+ break;
+ case 76: /* kernel.percpu.cpu.vuser */
+ _pm_assign_utype(_pm_cputime_size, atom,
+ 1000 * ((double)proc_stat.p_user[inst] - (double)proc_stat.p_guest[inst])
+ / proc_stat.hz);
+ break;
+ case 62: /* kernel.pernode.cpu.user */
+ _pm_assign_utype(_pm_cputime_size, atom,
+ 1000 * (double)proc_stat.n_user[inst] / proc_stat.hz);
+ break;
+ case 63: /* kernel.pernode.cpu.nice */
+ _pm_assign_utype(_pm_cputime_size, atom,
+ 1000 * (double)proc_stat.n_nice[inst] / proc_stat.hz);
+ break;
+ case 64: /* kernel.pernode.cpu.sys */
+ _pm_assign_utype(_pm_cputime_size, atom,
+ 1000 * (double)proc_stat.n_sys[inst] / proc_stat.hz);
+ break;
+ case 65: /* kernel.pernode.cpu.idle */
+ _pm_assign_utype(_pm_idletime_size, atom,
+ 1000 * (double)proc_stat.n_idle[inst] / proc_stat.hz);
+ break;
+ case 69: /* kernel.pernode.cpu.wait.total */
+ _pm_assign_utype(_pm_cputime_size, atom,
+ 1000 * (double)proc_stat.n_wait[inst] / proc_stat.hz);
+ break;
+ case 66: /* kernel.pernode.cpu.intr */
+ _pm_assign_utype(_pm_cputime_size, atom,
+ 1000 * ((double)proc_stat.n_irq[inst] +
+ (double)proc_stat.n_sirq[inst]) / proc_stat.hz);
+ break;
+ case 70: /* kernel.pernode.cpu.irq.soft */
+ _pm_assign_utype(_pm_cputime_size, atom,
+ 1000 * (double)proc_stat.n_sirq[inst] / proc_stat.hz);
+ break;
+ case 71: /* kernel.pernode.cpu.irq.hard */
+ _pm_assign_utype(_pm_cputime_size, atom,
+ 1000 * (double)proc_stat.n_irq[inst] / proc_stat.hz);
+ break;
+ case 67: /* kernel.pernode.cpu.steal */
+ _pm_assign_utype(_pm_cputime_size, atom,
+ 1000 * (double)proc_stat.n_steal[inst] / proc_stat.hz);
+ break;
+ case 68: /* kernel.pernode.cpu.guest */
+ _pm_assign_utype(_pm_cputime_size, atom,
+ 1000 * (double)proc_stat.n_guest[inst] / proc_stat.hz);
+ break;
+ case 77: /* kernel.pernode.cpu.guest */
+ _pm_assign_utype(_pm_cputime_size, atom,
+ 1000 * ((double)proc_stat.n_user[inst] - (double)proc_stat.n_guest[inst])
+ / proc_stat.hz);
+ break;
+
+ case 8: /* pagesin */
+ if (_pm_have_proc_vmstat)
+ atom->ul = _pm_proc_vmstat.pswpin;
+ else
+ atom->ul = proc_stat.swap[0];
+ break;
+ case 9: /* pagesout */
+ if (_pm_have_proc_vmstat)
+ atom->ul = _pm_proc_vmstat.pswpout;
+ else
+ atom->ul = proc_stat.swap[1];
+ break;
+ case 10: /* in */
+ if (_pm_have_proc_vmstat)
+ return PM_ERR_APPVERSION; /* no swap operation counts in 2.6 */
+ else
+ atom->ul = proc_stat.page[0];
+ break;
+ case 11: /* out */
+ if (_pm_have_proc_vmstat)
+ return PM_ERR_APPVERSION; /* no swap operation counts in 2.6 */
+ else
+ atom->ul = proc_stat.page[1];
+ break;
+ case 12: /* intr */
+ _pm_assign_utype(_pm_intr_size, atom, proc_stat.intr);
+ break;
+ case 13: /* ctxt */
+ _pm_assign_utype(_pm_ctxt_size, atom, proc_stat.ctxt);
+ break;
+ case 14: /* processes */
+ _pm_assign_ulong(atom, proc_stat.processes);
+ break;
+
+ case 20: /* kernel.all.cpu.user */
+ _pm_assign_utype(_pm_cputime_size, atom,
+ 1000 * (double)proc_stat.user / proc_stat.hz);
+ break;
+ case 21: /* kernel.all.cpu.nice */
+ _pm_assign_utype(_pm_cputime_size, atom,
+ 1000 * (double)proc_stat.nice / proc_stat.hz);
+ break;
+ case 22: /* kernel.all.cpu.sys */
+ _pm_assign_utype(_pm_cputime_size, atom,
+ 1000 * (double)proc_stat.sys / proc_stat.hz);
+ break;
+ case 23: /* kernel.all.cpu.idle */
+ _pm_assign_utype(_pm_idletime_size, atom,
+ 1000 * (double)proc_stat.idle / proc_stat.hz);
+ break;
+ case 34: /* kernel.all.cpu.intr */
+ _pm_assign_utype(_pm_cputime_size, atom,
+ 1000 * ((double)proc_stat.irq +
+ (double)proc_stat.sirq) / proc_stat.hz);
+ break;
+ case 35: /* kernel.all.cpu.wait.total */
+ _pm_assign_utype(_pm_cputime_size, atom,
+ 1000 * (double)proc_stat.wait / proc_stat.hz);
+ break;
+ case 53: /* kernel.all.cpu.irq.soft */
+ _pm_assign_utype(_pm_cputime_size, atom,
+ 1000 * (double)proc_stat.sirq / proc_stat.hz);
+ break;
+ case 54: /* kernel.all.cpu.irq.hard */
+ _pm_assign_utype(_pm_cputime_size, atom,
+ 1000 * (double)proc_stat.irq / proc_stat.hz);
+ break;
+ case 55: /* kernel.all.cpu.steal */
+ _pm_assign_utype(_pm_cputime_size, atom,
+ 1000 * (double)proc_stat.steal / proc_stat.hz);
+ break;
+ case 60: /* kernel.all.cpu.guest */
+ _pm_assign_utype(_pm_cputime_size, atom,
+ 1000 * (double)proc_stat.guest / proc_stat.hz);
+ break;
+ case 78: /* kernel.all.cpu.vuser */
+ _pm_assign_utype(_pm_cputime_size, atom,
+ 1000 * ((double)proc_stat.user - (double)proc_stat.guest)
+ / proc_stat.hz);
+ break;
+
+ case 19: /* hinv.nnode */
+ atom->ul = indomtab[NODE_INDOM].it_numinst;
+ break;
+ case 32: /* hinv.ncpu */
+ atom->ul = indomtab[CPU_INDOM].it_numinst;
+ break;
+ case 33: /* hinv.ndisk */
+ atom->ul = pmdaCacheOp(INDOM(DISK_INDOM), PMDA_CACHE_SIZE_ACTIVE);
+ break;
+
+ case 48: /* kernel.all.hz */
+ atom->ul = proc_stat.hz;
+ break;
+
+ default:
+ /*
+ * Disk metrics used to be fetched from /proc/stat (2.2 kernels)
+ * but have since moved to /proc/partitions (2.4 kernels) and
+ * /proc/diskstats (2.6 kernels). We preserve the cluster number
+ * (middle bits of a PMID) for backward compatibility.
+ *
+ * Note that proc_partitions_fetch() will return PM_ERR_PMID
+ * if we have tried to fetch an unknown metric.
+ */
+ return proc_partitions_fetch(mdesc, inst, atom);
+ }
+ break;
+
+ case CLUSTER_UPTIME: /* uptime */
+ switch (idp->item) {
+ case 0:
+ /*
+ * kernel.all.uptime (in seconds)
+ * contributed by "gilly" <gilly@exanet.com>
+ * modified by Mike Mason" <mmlnx@us.ibm.com>
+ */
+ atom->ul = proc_uptime.uptime;
+ break;
+ case 1:
+ /*
+ * kernel.all.idletime (in seconds)
+ * contributed by "Mike Mason" <mmlnx@us.ibm.com>
+ */
+ atom->ul = proc_uptime.idletime;
+ break;
+ default:
+ return PM_ERR_PMID;
+ }
+ break;
+
+ case CLUSTER_MEMINFO: /* mem */
+ switch (idp->item) {
+ case 0: /* mem.physmem (in kbytes) */
+ if (!MEMINFO_VALID_VALUE(proc_meminfo.MemTotal))
+ return 0; /* no values available */
+ atom->ull = proc_meminfo.MemTotal >> 10;
+ break;
+ case 1: /* mem.util.used (in kbytes) */
+ if (!MEMINFO_VALID_VALUE(proc_meminfo.MemTotal) ||
+ !MEMINFO_VALID_VALUE(proc_meminfo.MemFree))
+ return 0; /* no values available */
+ atom->ull = (proc_meminfo.MemTotal - proc_meminfo.MemFree) >> 10;
+ break;
+ case 2: /* mem.util.free (in kbytes) */
+ if (!MEMINFO_VALID_VALUE(proc_meminfo.MemFree))
+ return 0; /* no values available */
+ atom->ull = proc_meminfo.MemFree >> 10;
+ break;
+ case 3: /* mem.util.shared (in kbytes) */
+ /*
+ * If this metric is exported by the running kernel, it is always
+ * zero (deprecated). PCP exports it for compatibility with older
+ * PCP monitoring tools, e.g. pmgsys running on IRIX(TM).
+ */
+ if (!MEMINFO_VALID_VALUE(proc_meminfo.MemShared))
+ return 0; /* no values available */
+ atom->ull = proc_meminfo.MemShared >> 10;
+ break;
+ case 4: /* mem.util.bufmem (in kbytes) */
+ if (!MEMINFO_VALID_VALUE(proc_meminfo.Buffers))
+ return 0; /* no values available */
+ atom->ull = proc_meminfo.Buffers >> 10;
+ break;
+ case 5: /* mem.util.cached (in kbytes) */
+ if (!MEMINFO_VALID_VALUE(proc_meminfo.Cached))
+ return 0; /* no values available */
+ atom->ull = proc_meminfo.Cached >> 10;
+ break;
+ case 6: /* swap.length (in bytes) */
+ if (!MEMINFO_VALID_VALUE(proc_meminfo.SwapTotal))
+ return 0; /* no values available */
+ atom->ull = proc_meminfo.SwapTotal;
+ break;
+ case 7: /* swap.used (in bytes) */
+ if (!MEMINFO_VALID_VALUE(proc_meminfo.SwapTotal) ||
+ !MEMINFO_VALID_VALUE(proc_meminfo.SwapFree))
+ return 0; /* no values available */
+ atom->ull = proc_meminfo.SwapTotal - proc_meminfo.SwapFree;
+ break;
+ case 8: /* swap.free (in bytes) */
+ if (!MEMINFO_VALID_VALUE(proc_meminfo.SwapFree))
+ return 0; /* no values available */
+ atom->ull = proc_meminfo.SwapFree;
+ break;
+ case 9: /* hinv.physmem (in mbytes) */
+ if (!MEMINFO_VALID_VALUE(proc_meminfo.MemTotal))
+ return 0; /* no values available */
+ atom->ul = proc_meminfo.MemTotal >> 20;
+ break;
+ case 10: /* mem.freemem (in kbytes) */
+ if (!MEMINFO_VALID_VALUE(proc_meminfo.MemFree))
+ return 0; /* no values available */
+ atom->ull = proc_meminfo.MemFree >> 10;
+ break;
+ case 11: /* hinv.pagesize (in bytes) */
+ atom->ul = _pm_system_pagesize;
+ break;
+ case 12: /* mem.util.other (in kbytes) */
+ /* other = used - (cached+buffers) */
+ if (!MEMINFO_VALID_VALUE(proc_meminfo.MemTotal) ||
+ !MEMINFO_VALID_VALUE(proc_meminfo.MemFree) ||
+ !MEMINFO_VALID_VALUE(proc_meminfo.Cached) ||
+ !MEMINFO_VALID_VALUE(proc_meminfo.Buffers))
+ return 0; /* no values available */
+ sl = (proc_meminfo.MemTotal -
+ proc_meminfo.MemFree -
+ proc_meminfo.Cached -
+ proc_meminfo.Buffers) >> 10;
+ atom->ull = sl >= 0 ? sl : 0;
+ break;
+ case 13: /* mem.util.swapCached (in kbytes) */
+ if (!MEMINFO_VALID_VALUE(proc_meminfo.SwapCached))
+ return 0; /* no values available */
+ atom->ull = proc_meminfo.SwapCached >> 10;
+ break;
+ case 14: /* mem.util.active (in kbytes) */
+ if (!MEMINFO_VALID_VALUE(proc_meminfo.Active))
+ return 0; /* no values available */
+ atom->ull = proc_meminfo.Active >> 10;
+ break;
+ case 15: /* mem.util.inactive (in kbytes) */
+ if (!MEMINFO_VALID_VALUE(proc_meminfo.Inactive))
+ return 0; /* no values available */
+ atom->ull = proc_meminfo.Inactive >> 10;
+ break;
+ case 16: /* mem.util.highTotal (in kbytes) */
+ if (!MEMINFO_VALID_VALUE(proc_meminfo.HighTotal))
+ return 0; /* no values available */
+ atom->ull = proc_meminfo.HighTotal >> 10;
+ break;
+ case 17: /* mem.util.highFree (in kbytes) */
+ if (!MEMINFO_VALID_VALUE(proc_meminfo.HighFree))
+ return 0; /* no values available */
+ atom->ull = proc_meminfo.HighFree >> 10;
+ break;
+ case 18: /* mem.util.lowTotal (in kbytes) */
+ if (!MEMINFO_VALID_VALUE(proc_meminfo.LowTotal))
+ return 0; /* no values available */
+ atom->ull = proc_meminfo.LowTotal >> 10;
+ break;
+ case 19: /* mem.util.lowFree (in kbytes) */
+ if (!MEMINFO_VALID_VALUE(proc_meminfo.LowFree))
+ return 0; /* no values available */
+ atom->ull = proc_meminfo.LowFree >> 10;
+ break;
+ case 20: /* mem.util.swapTotal (in kbytes) */
+ if (!MEMINFO_VALID_VALUE(proc_meminfo.SwapTotal))
+ return 0; /* no values available */
+ atom->ull = proc_meminfo.SwapTotal >> 10;
+ break;
+ case 21: /* mem.util.swapFree (in kbytes) */
+ if (!MEMINFO_VALID_VALUE(proc_meminfo.SwapFree))
+ return 0; /* no values available */
+ atom->ull = proc_meminfo.SwapFree >> 10;
+ break;
+ case 22: /* mem.util.dirty (in kbytes) */
+ if (!MEMINFO_VALID_VALUE(proc_meminfo.Dirty))
+ return 0; /* no values available */
+ atom->ull = proc_meminfo.Dirty >> 10;
+ break;
+ case 23: /* mem.util.writeback (in kbytes) */
+ if (!MEMINFO_VALID_VALUE(proc_meminfo.Writeback))
+ return 0; /* no values available */
+ atom->ull = proc_meminfo.Writeback >> 10;
+ break;
+ case 24: /* mem.util.mapped (in kbytes) */
+ if (!MEMINFO_VALID_VALUE(proc_meminfo.Mapped))
+ return 0; /* no values available */
+ atom->ull = proc_meminfo.Mapped >> 10;
+ break;
+ case 25: /* mem.util.slab (in kbytes) */
+ if (!MEMINFO_VALID_VALUE(proc_meminfo.Slab))
+ return 0; /* no values available */
+ atom->ull = proc_meminfo.Slab >> 10;
+ break;
+ case 26: /* mem.util.committed_AS (in kbytes) */
+ if (!MEMINFO_VALID_VALUE(proc_meminfo.Committed_AS))
+ return 0; /* no values available */
+ atom->ull = proc_meminfo.Committed_AS >> 10;
+ break;
+ case 27: /* mem.util.pageTables (in kbytes) */
+ if (!MEMINFO_VALID_VALUE(proc_meminfo.PageTables))
+ return 0; /* no values available */
+ atom->ull = proc_meminfo.PageTables >> 10;
+ break;
+ case 28: /* mem.util.reverseMaps (in kbytes) */
+ if (!MEMINFO_VALID_VALUE(proc_meminfo.ReverseMaps))
+ return 0; /* no values available */
+ atom->ull = proc_meminfo.ReverseMaps >> 10;
+ break;
+ case 29: /* mem.util.clean_cache (in kbytes) */
+ /* clean=cached-(dirty+writeback) */
+ if (!MEMINFO_VALID_VALUE(proc_meminfo.Cached) ||
+ !MEMINFO_VALID_VALUE(proc_meminfo.Dirty) ||
+ !MEMINFO_VALID_VALUE(proc_meminfo.Writeback))
+ return 0; /* no values available */
+ sl = (proc_meminfo.Cached -
+ proc_meminfo.Dirty -
+ proc_meminfo.Writeback) >> 10;
+ atom->ull = sl >= 0 ? sl : 0;
+ break;
+ case 30: /* mem.util.anonpages */
+ if (!MEMINFO_VALID_VALUE(proc_meminfo.AnonPages))
+ return 0; /* no values available */
+ atom->ull = proc_meminfo.AnonPages >> 10;
+ break;
+ case 31: /* mem.util.commitLimit (in kbytes) */
+ if (!MEMINFO_VALID_VALUE(proc_meminfo.CommitLimit))
+ return 0; /* no values available */
+ atom->ull = proc_meminfo.CommitLimit >> 10;
+ break;
+ case 32: /* mem.util.bounce */
+ if (!MEMINFO_VALID_VALUE(proc_meminfo.Bounce))
+ return 0; /* no values available */
+ atom->ull = proc_meminfo.Bounce >> 10;
+ break;
+ case 33: /* mem.util.NFS_Unstable */
+ if (!MEMINFO_VALID_VALUE(proc_meminfo.NFS_Unstable))
+ return 0; /* no values available */
+ atom->ull = proc_meminfo.NFS_Unstable >> 10;
+ break;
+ case 34: /* mem.util.slabReclaimable */
+ if (!MEMINFO_VALID_VALUE(proc_meminfo.SlabReclaimable))
+ return 0; /* no values available */
+ atom->ull = proc_meminfo.SlabReclaimable >> 10;
+ break;
+ case 35: /* mem.util.slabUnreclaimable */
+ if (!MEMINFO_VALID_VALUE(proc_meminfo.SlabUnreclaimable))
+ return 0; /* no values available */
+ atom->ull = proc_meminfo.SlabUnreclaimable >> 10;
+ break;
+ case 36: /* mem.util.active_anon */
+ if (!MEMINFO_VALID_VALUE(proc_meminfo.Active_anon))
+ return 0; /* no values available */
+ atom->ull = proc_meminfo.Active_anon >> 10;
+ break;
+ case 37: /* mem.util.inactive_anon */
+ if (!MEMINFO_VALID_VALUE(proc_meminfo.Inactive_anon))
+ return 0; /* no values available */
+ atom->ull = proc_meminfo.Inactive_anon >> 10;
+ break;
+ case 38: /* mem.util.active_file */
+ if (!MEMINFO_VALID_VALUE(proc_meminfo.Active_file))
+ return 0; /* no values available */
+ atom->ull = proc_meminfo.Active_file >> 10;
+ break;
+ case 39: /* mem.util.inactive_file */
+ if (!MEMINFO_VALID_VALUE(proc_meminfo.Inactive_file))
+ return 0; /* no values available */
+ atom->ull = proc_meminfo.Inactive_file >> 10;
+ break;
+ case 40: /* mem.util.unevictable */
+ if (!MEMINFO_VALID_VALUE(proc_meminfo.Unevictable))
+ return 0; /* no values available */
+ atom->ull = proc_meminfo.Unevictable >> 10;
+ break;
+ case 41: /* mem.util.mlocked */
+ if (!MEMINFO_VALID_VALUE(proc_meminfo.Mlocked))
+ return 0; /* no values available */
+ atom->ull = proc_meminfo.Mlocked >> 10;
+ break;
+ case 42: /* mem.util.shmem */
+ if (!MEMINFO_VALID_VALUE(proc_meminfo.Shmem))
+ return 0; /* no values available */
+ atom->ull = proc_meminfo.Shmem >> 10;
+ break;
+ case 43: /* mem.util.kernelStack */
+ if (!MEMINFO_VALID_VALUE(proc_meminfo.KernelStack))
+ return 0; /* no values available */
+ atom->ull = proc_meminfo.KernelStack >> 10;
+ break;
+ case 44: /* mem.util.hugepagesTotal */
+ if (!MEMINFO_VALID_VALUE(proc_meminfo.HugepagesTotal))
+ return 0; /* no values available */
+ atom->ull = proc_meminfo.HugepagesTotal;
+ break;
+ case 45: /* mem.util.hugepagesFree */
+ if (!MEMINFO_VALID_VALUE(proc_meminfo.HugepagesFree))
+ return 0; /* no values available */
+ atom->ull = proc_meminfo.HugepagesFree;
+ break;
+ case 46: /* mem.util.hugepagesRsvd */
+ if (!MEMINFO_VALID_VALUE(proc_meminfo.HugepagesRsvd))
+ return 0; /* no values available */
+ atom->ull = proc_meminfo.HugepagesRsvd;
+ break;
+ case 47: /* mem.util.hugepagesSurp */
+ if (!MEMINFO_VALID_VALUE(proc_meminfo.HugepagesSurp))
+ return 0; /* no values available */
+ atom->ull = proc_meminfo.HugepagesSurp;
+ break;
+ case 48: /* mem.util.directMap4k */
+ if (!MEMINFO_VALID_VALUE(proc_meminfo.directMap4k))
+ return 0; /* no values available */
+ atom->ull = proc_meminfo.directMap4k >> 10;
+ break;
+ case 49: /* mem.util.directMap2M */
+ if (!MEMINFO_VALID_VALUE(proc_meminfo.directMap2M))
+ return 0; /* no values available */
+ atom->ull = proc_meminfo.directMap2M >> 10;
+ break;
+ case 50: /* mem.util.vmallocTotal */
+ if (!MEMINFO_VALID_VALUE(proc_meminfo.VmallocTotal))
+ return 0; /* no values available */
+ atom->ull = proc_meminfo.VmallocTotal >> 10;
+ break;
+ case 51: /* mem.util.vmallocUsed */
+ if (!MEMINFO_VALID_VALUE(proc_meminfo.VmallocUsed))
+ return 0; /* no values available */
+ atom->ull = proc_meminfo.VmallocUsed >> 10;
+ break;
+ case 52: /* mem.util.vmallocChunk */
+ if (!MEMINFO_VALID_VALUE(proc_meminfo.VmallocChunk))
+ return 0; /* no values available */
+ atom->ull = proc_meminfo.VmallocChunk >> 10;
+ break;
+ case 53: /* mem.util.mmap_copy */
+ if (!MEMINFO_VALID_VALUE(proc_meminfo.MmapCopy))
+ return 0; /* no values available */
+ atom->ull = proc_meminfo.MmapCopy >> 10;
+ break;
+ case 54: /* mem.util.quicklists */
+ if (!MEMINFO_VALID_VALUE(proc_meminfo.Quicklists))
+ return 0; /* no values available */
+ atom->ull = proc_meminfo.Quicklists >> 10;
+ break;
+ case 55: /* mem.util.corrupthardware */
+ if (!MEMINFO_VALID_VALUE(proc_meminfo.HardwareCorrupted))
+ return 0; /* no values available */
+ atom->ull = proc_meminfo.HardwareCorrupted >> 10;
+ break;
+ case 56: /* mem.util.anonhugepages */
+ if (!MEMINFO_VALID_VALUE(proc_meminfo.AnonHugePages))
+ return 0; /* no values available */
+ atom->ull = proc_meminfo.AnonHugePages >> 10;
+ break;
+ case 57: /* mem.util.directMap1G */
+ if (!MEMINFO_VALID_VALUE(proc_meminfo.directMap1G))
+ return 0; /* no values available */
+ atom->ull = proc_meminfo.directMap1G >> 10;
+ break;
+ case 58: /* mem.util.available */
+ if (!MEMINFO_VALID_VALUE(proc_meminfo.MemAvailable))
+ return 0; /* no values available */
+ atom->ull = proc_meminfo.MemAvailable >> 10;
+ break;
+ default:
+ return PM_ERR_PMID;
+ }
+ break;
+
+ case CLUSTER_LOADAVG:
+ switch(idp->item) {
+ case 0: /* kernel.all.load */
+ if (inst == 1)
+ atom->f = proc_loadavg.loadavg[0];
+ else
+ if (inst == 5)
+ atom->f = proc_loadavg.loadavg[1];
+ else
+ if (inst == 15)
+ atom->f = proc_loadavg.loadavg[2];
+ else
+ return PM_ERR_INST;
+ break;
+ case 1: /* kernel.all.lastpid -- added by "Mike Mason" <mmlnx@us.ibm.com> */
+ atom->ul = proc_loadavg.lastpid;
+ break;
+ case 2: /* kernel.all.runnable */
+ atom->ul = proc_loadavg.runnable;
+ break;
+ case 3: /* kernel.all.nprocs */
+ atom->ul = proc_loadavg.nprocs;
+ break;
+ default:
+ return PM_ERR_PMID;
+ }
+ break;
+
+ case CLUSTER_NET_DEV: /* network.interface */
+ if (idp->item == 27) { /* hinv.ninterface */
+ atom->ul = pmdaCacheOp(INDOM(NET_DEV_INDOM), PMDA_CACHE_SIZE_ACTIVE);
+ break;
+ }
+ sts = pmdaCacheLookup(INDOM(NET_DEV_INDOM), inst, NULL, (void **)&netip);
+ if (sts < 0)
+ return sts;
+ if (idp->item <= 15) {
+ /* network.interface.{in,out} */
+ atom->ull = netip->counters[idp->item];
+ }
+ else
+ switch (idp->item) {
+ case 16: /* network.interface.total.bytes */
+ atom->ull = netip->counters[0] + netip->counters[8];
+ break;
+ case 17: /* network.interface.total.packets */
+ atom->ull = netip->counters[1] + netip->counters[9];
+ break;
+ case 18: /* network.interface.total.errors */
+ atom->ull = netip->counters[2] + netip->counters[10];
+ break;
+ case 19: /* network.interface.total.drops */
+ atom->ull = netip->counters[3] + netip->counters[11];
+ break;
+ case 20: /* network.interface.total.mcasts */
+ /*
+ * NOTE: there is no network.interface.out.mcasts metric
+ * so this total only includes network.interface.in.mcasts
+ */
+ atom->ull = netip->counters[7];
+ break;
+ case 21: /* network.interface.mtu */
+ if (!netip->ioc.mtu)
+ return 0;
+ atom->ul = netip->ioc.mtu;
+ break;
+ case 22: /* network.interface.speed */
+ if (!netip->ioc.speed)
+ return 0;
+ atom->f = ((float)netip->ioc.speed * 1000000) / 8 / 1024 / 1024;
+ break;
+ case 23: /* network.interface.baudrate */
+ if (!netip->ioc.speed)
+ return 0;
+ atom->ul = ((long long)netip->ioc.speed * 1000000 / 8);
+ break;
+ case 24: /* network.interface.duplex */
+ if (!netip->ioc.duplex)
+ return 0;
+ atom->ul = netip->ioc.duplex;
+ break;
+ case 25: /* network.interface.up */
+ atom->ul = netip->ioc.linkup;
+ break;
+ case 26: /* network.interface.running */
+ atom->ul = netip->ioc.running;
+ break;
+ default:
+ return PM_ERR_PMID;
+ }
+ break;
+
+ case CLUSTER_NET_ADDR:
+ sts = pmdaCacheLookup(INDOM(NET_ADDR_INDOM), inst, NULL, (void **)&addrp);
+ if (sts < 0)
+ return sts;
+ if (sts != PMDA_CACHE_ACTIVE)
+ return PM_ERR_INST;
+ switch (idp->item) {
+ case 0: /* network.interface.inet_addr */
+ if (addrp->has_inet == 0)
+ return 0;
+ atom->cp = addrp->inet;
+ break;
+ case 1: /* network.interface.ipv6_addr */
+ if (addrp->has_ipv6 == 0)
+ return 0;
+ atom->cp = addrp->ipv6;
+ break;
+ case 2: /* network.interface.ipv6_scope */
+ if (addrp->has_ipv6 == 0)
+ return 0;
+ atom->cp = lookup_ipv6_scope(addrp->ipv6scope);
+ break;
+ case 3: /* network.interface.hw_addr */
+ if (addrp->has_hw == 0)
+ return 0;
+ atom->cp = addrp->hw_addr;
+ break;
+ default:
+ return PM_ERR_PMID;
+ }
+ break;
+
+ case CLUSTER_FILESYS:
+ if (idp->item == 0)
+ atom->ul = pmdaCacheOp(INDOM(FILESYS_INDOM), PMDA_CACHE_SIZE_ACTIVE);
+ else {
+ struct statfs *sbuf;
+ __uint64_t ull, used;
+
+ sts = pmdaCacheLookup(INDOM(FILESYS_INDOM), inst, NULL, (void **)&fs);
+ if (sts < 0)
+ return sts;
+ if (sts != PMDA_CACHE_ACTIVE)
+ return PM_ERR_INST;
+
+ sbuf = &fs->stats;
+ if (!(fs->flags & FSF_FETCHED)) {
+ if (statfs(fs->path, sbuf) < 0)
+ return PM_ERR_INST;
+ fs->flags |= FSF_FETCHED;
+ }
+
+ switch (idp->item) {
+ case 1: /* filesys.capacity */
+ ull = (__uint64_t)sbuf->f_blocks;
+ atom->ull = ull * sbuf->f_bsize / 1024;
+ break;
+ case 2: /* filesys.used */
+ used = (__uint64_t)(sbuf->f_blocks - sbuf->f_bfree);
+ atom->ull = used * sbuf->f_bsize / 1024;
+ break;
+ case 3: /* filesys.free */
+ ull = (__uint64_t)sbuf->f_bfree;
+ atom->ull = ull * sbuf->f_bsize / 1024;
+ break;
+ case 4: /* filesys.maxfiles */
+ atom->ul = sbuf->f_files;
+ break;
+ case 5: /* filesys.usedfiles */
+ atom->ul = sbuf->f_files - sbuf->f_ffree;
+ break;
+ case 6: /* filesys.freefiles */
+ atom->ul = sbuf->f_ffree;
+ break;
+ case 7: /* filesys.mountdir */
+ atom->cp = fs->path;
+ break;
+ case 8: /* filesys.full */
+ used = (__uint64_t)(sbuf->f_blocks - sbuf->f_bfree);
+ ull = used + (__uint64_t)sbuf->f_bavail;
+ atom->d = (100.0 * (double)used) / (double)ull;
+ break;
+ case 9: /* filesys.blocksize -- added by Mike Mason <mmlnx@us.ibm.com> */
+ atom->ul = sbuf->f_bsize;
+ break;
+ case 10: /* filesys.avail -- added by Mike Mason <mmlnx@us.ibm.com> */
+ ull = (__uint64_t)sbuf->f_bavail;
+ atom->ull = ull * sbuf->f_bsize / 1024;
+ break;
+ case 11: /* filesys.readonly */
+ atom->ul = (scan_filesys_options(fs->options, "ro") != NULL);
+ break;
+ default:
+ return PM_ERR_PMID;
+ }
+ }
+ break;
+
+ case CLUSTER_TMPFS: {
+ struct statfs *sbuf;
+ __uint64_t ull, used;
+
+ sts = pmdaCacheLookup(INDOM(TMPFS_INDOM), inst, NULL, (void **)&fs);
+ if (sts < 0)
+ return sts;
+ if (sts != PMDA_CACHE_ACTIVE)
+ return PM_ERR_INST;
+
+ sbuf = &fs->stats;
+ if (!(fs->flags & FSF_FETCHED)) {
+ if (statfs(fs->path, sbuf) < 0)
+ return PM_ERR_INST;
+ fs->flags |= FSF_FETCHED;
+ }
+
+ switch (idp->item) {
+ case 1: /* tmpfs.capacity */
+ ull = (__uint64_t)sbuf->f_blocks;
+ atom->ull = ull * sbuf->f_bsize / 1024;
+ break;
+ case 2: /* tmpfs.used */
+ used = (__uint64_t)(sbuf->f_blocks - sbuf->f_bfree);
+ atom->ull = used * sbuf->f_bsize / 1024;
+ break;
+ case 3: /* tmpfs.free */
+ ull = (__uint64_t)sbuf->f_bfree;
+ atom->ull = ull * sbuf->f_bsize / 1024;
+ break;
+ case 4: /* tmpfs.maxfiles */
+ atom->ul = sbuf->f_files;
+ break;
+ case 5: /* tmpfs.usedfiles */
+ atom->ul = sbuf->f_files - sbuf->f_ffree;
+ break;
+ case 6: /* tmpfs.freefiles */
+ atom->ul = sbuf->f_ffree;
+ break;
+ case 7: /* tmpfs.full */
+ used = (__uint64_t)(sbuf->f_blocks - sbuf->f_bfree);
+ ull = used + (__uint64_t)sbuf->f_bavail;
+ atom->d = (100.0 * (double)used) / (double)ull;
+ break;
+ default:
+ return PM_ERR_PMID;
+ }
+ }
+ break;
+
+ case CLUSTER_SWAPDEV: {
+ struct swapdev *swap;
+
+ sts = pmdaCacheLookup(INDOM(SWAPDEV_INDOM), inst, NULL, (void **)&swap);
+ if (sts < 0)
+ return sts;
+ if (sts != PMDA_CACHE_ACTIVE)
+ return PM_ERR_INST;
+
+ switch (idp->item) {
+ case 0: /* swapdev.free (kbytes) */
+ atom->ul = swap->size - swap->used;
+ break;
+ case 1: /* swapdev.length (kbytes) */
+ case 2: /* swapdev.maxswap (kbytes) */
+ atom->ul = swap->size;
+ break;
+ case 3: /* swapdev.vlength (kbytes) */
+ atom->ul = 0;
+ break;
+ case 4: /* swapdev.priority */
+ atom->l = swap->priority;
+ break;
+ default:
+ return PM_ERR_PMID;
+ }
+ break;
+ }
+
+ case CLUSTER_NET_NFS:
+ switch (idp->item) {
+ case 1: /* nfs.client.calls */
+ if (proc_net_rpc.client.errcode != 0)
+ return 0; /* no values available */
+ for (atom->ul=0, i=0; i < NR_RPC_COUNTERS; i++) {
+ atom->ul += proc_net_rpc.client.reqcounts[i];
+ }
+ break;
+ case 50: /* nfs.server.calls */
+ if (proc_net_rpc.server.errcode != 0)
+ return 0; /* no values available */
+ for (atom->ul=0, i=0; i < NR_RPC_COUNTERS; i++) {
+ atom->ul += proc_net_rpc.server.reqcounts[i];
+ }
+ break;
+ case 4: /* nfs.client.reqs */
+ if (proc_net_rpc.client.errcode != 0)
+ return 0; /* no values available */
+ if (inst < NR_RPC_COUNTERS)
+ atom->ul = proc_net_rpc.client.reqcounts[inst];
+ else
+ return PM_ERR_INST;
+ break;
+
+ case 12: /* nfs.server.reqs */
+ if (proc_net_rpc.server.errcode != 0)
+ return 0; /* no values available */
+ if (inst < NR_RPC_COUNTERS)
+ atom->ul = proc_net_rpc.server.reqcounts[inst];
+ else
+ return PM_ERR_INST;
+ break;
+
+ case 60: /* nfs3.client.calls */
+ if (proc_net_rpc.client.errcode != 0)
+ return 0; /* no values available */
+ for (atom->ul=0, i=0; i < NR_RPC3_COUNTERS; i++) {
+ atom->ul += proc_net_rpc.client.reqcounts3[i];
+ }
+ break;
+
+ case 62: /* nfs3.server.calls */
+ if (proc_net_rpc.server.errcode != 0)
+ return 0; /* no values available */
+ for (atom->ul=0, i=0; i < NR_RPC3_COUNTERS; i++) {
+ atom->ul += proc_net_rpc.server.reqcounts3[i];
+ }
+ break;
+
+ case 61: /* nfs3.client.reqs */
+ if (proc_net_rpc.client.errcode != 0)
+ return 0; /* no values available */
+ if (inst < NR_RPC3_COUNTERS)
+ atom->ul = proc_net_rpc.client.reqcounts3[inst];
+ else
+ return PM_ERR_INST;
+ break;
+
+ case 63: /* nfs3.server.reqs */
+ if (proc_net_rpc.server.errcode != 0)
+ return 0; /* no values available */
+ if (inst < NR_RPC3_COUNTERS)
+ atom->ul = proc_net_rpc.server.reqcounts3[inst];
+ else
+ return PM_ERR_INST;
+ break;
+
+ case 64: /* nfs4.client.calls */
+ if (proc_net_rpc.client.errcode != 0)
+ return 0; /* no values available */
+ for (atom->ul=0, i=0; i < NR_RPC4_CLI_COUNTERS; i++) {
+ atom->ul += proc_net_rpc.client.reqcounts4[i];
+ }
+ break;
+
+ case 66: /* nfs4.server.calls */
+ if (proc_net_rpc.server.errcode != 0)
+ return 0; /* no values available */
+ for (atom->ul=0, i=0; i < NR_RPC4_SVR_COUNTERS; i++) {
+ atom->ul += proc_net_rpc.server.reqcounts4[i];
+ }
+ break;
+
+ case 65: /* nfs4.client.reqs */
+ if (proc_net_rpc.client.errcode != 0)
+ return 0; /* no values available */
+ if (inst < NR_RPC4_CLI_COUNTERS)
+ atom->ul = proc_net_rpc.client.reqcounts4[inst];
+ else
+ return PM_ERR_INST;
+ break;
+
+ case 67: /* nfs4.server.reqs */
+ if (proc_net_rpc.server.errcode != 0)
+ return 0; /* no values available */
+ if (inst < NR_RPC4_SVR_COUNTERS)
+ atom->ul = proc_net_rpc.server.reqcounts4[inst];
+ else
+ return PM_ERR_INST;
+ break;
+
+ /*
+ * Note: all other rpc metric values are extracted directly via the
+ * address specified in the metrictab (see above)
+ */
+ default:
+ return PM_ERR_PMID;
+ }
+ break;
+
+ case CLUSTER_SLAB:
+ if (proc_slabinfo.ncaches == 0)
+ return 0; /* no values available */
+
+ if (inst >= proc_slabinfo.ncaches)
+ return PM_ERR_INST;
+
+ switch(idp->item) {
+ case 0: /* mem.slabinfo.objects.active */
+ atom->ull = proc_slabinfo.caches[inst].num_active_objs;
+ break;
+ case 1: /* mem.slabinfo.objects.total */
+ atom->ull = proc_slabinfo.caches[inst].total_objs;
+ break;
+ case 2: /* mem.slabinfo.objects.size */
+ if (proc_slabinfo.caches[inst].seen < 11) /* version 1.1 or later only */
+ return 0;
+ atom->ul = proc_slabinfo.caches[inst].object_size;
+ break;
+ case 3: /* mem.slabinfo.slabs.active */
+ if (proc_slabinfo.caches[inst].seen < 11) /* version 1.1 or later only */
+ return 0;
+ atom->ul = proc_slabinfo.caches[inst].num_active_slabs;
+ break;
+ case 4: /* mem.slabinfo.slabs.total */
+ if (proc_slabinfo.caches[inst].seen == 11) /* version 1.1 only */
+ return 0;
+ atom->ul = proc_slabinfo.caches[inst].total_slabs;
+ break;
+ case 5: /* mem.slabinfo.slabs.pages_per_slab */
+ if (proc_slabinfo.caches[inst].seen < 11) /* version 1.1 or later only */
+ return 0;
+ atom->ul = proc_slabinfo.caches[inst].pages_per_slab;
+ break;
+ case 6: /* mem.slabinfo.slabs.objects_per_slab */
+ if (proc_slabinfo.caches[inst].seen != 20) /* version 2.0 only */
+ return 0;
+ atom->ul = proc_slabinfo.caches[inst].objects_per_slab;
+ break;
+ case 7: /* mem.slabinfo.slabs.total_size */
+ if (proc_slabinfo.caches[inst].seen < 11) /* version 1.1 or later only */
+ return 0;
+ atom->ull = proc_slabinfo.caches[inst].total_size;
+ break;
+ default:
+ return PM_ERR_PMID;
+ }
+ break;
+
+ case CLUSTER_PARTITIONS:
+ return proc_partitions_fetch(mdesc, inst, atom);
+
+ case CLUSTER_SCSI:
+ if (proc_scsi.nscsi == 0)
+ return 0; /* no values available */
+ switch(idp->item) {
+ case 0: /* hinv.map.scsi */
+ atom->cp = (char *)NULL;
+ for (i=0; i < proc_scsi.nscsi; i++) {
+ if (proc_scsi.scsi[i].id == inst) {
+ atom->cp = proc_scsi.scsi[i].dev_name;
+ break;
+ }
+ }
+ if (i == proc_scsi.nscsi)
+ return PM_ERR_INST;
+ break;
+ default:
+ return PM_ERR_PMID;
+ }
+ break;
+
+ case CLUSTER_LV:
+ switch(idp->item) {
+ case 0: /* hinv.map.lvname */
+ if (dev_mapper.nlv == 0)
+ return 0; /* no values available */
+ atom->cp = (char *)NULL;
+ for (i = 0; i < dev_mapper.nlv; i++) {
+ if (dev_mapper.lv[i].id == inst) {
+ atom->cp = dev_mapper.lv[i].dev_name;
+ break;
+ }
+ }
+ if (i == dev_mapper.nlv)
+ return PM_ERR_INST;
+ break;
+ case 1: /* hinv.nlv */
+ atom->ul = dev_mapper.nlv;
+ break;
+ default:
+ return PM_ERR_PMID;
+ }
+ break;
+
+ case CLUSTER_KERNEL_UNAME:
+ switch(idp->item) {
+ case 5: /* pmda.uname */
+ sprintf(uname_string, "%s %s %s %s %s",
+ kernel_uname.sysname,
+ kernel_uname.nodename,
+ kernel_uname.release,
+ kernel_uname.version,
+ kernel_uname.machine);
+ atom->cp = uname_string;
+ break;
+
+ case 6: /* pmda.version */
+ atom->cp = pmGetConfig("PCP_VERSION");
+ break;
+
+ case 7: /* kernel.uname.distro ... not from uname(2) */
+ atom->cp = get_distro_info();
+ break;
+
+ default:
+ return PM_ERR_PMID;
+ }
+ break;
+
+ case CLUSTER_CPUINFO:
+ if (idp->item != 7 && /* hinv.machine is singular */
+ (inst >= proc_cpuinfo.cpuindom->it_numinst))
+ return PM_ERR_INST;
+ switch(idp->item) {
+ case 0: /* hinv.cpu.clock */
+ if (proc_cpuinfo.cpuinfo[inst].clock == 0.0)
+ return 0;
+ atom->f = proc_cpuinfo.cpuinfo[inst].clock;
+ break;
+ case 1: /* hinv.cpu.vendor */
+ i = proc_cpuinfo.cpuinfo[inst].vendor;
+ atom->cp = linux_strings_lookup(i);
+ if (atom->cp == NULL)
+ atom->cp = "unknown";
+ break;
+ case 2: /* hinv.cpu.model */
+ if ((i = proc_cpuinfo.cpuinfo[inst].model) < 0)
+ i = proc_cpuinfo.cpuinfo[inst].model_name;
+ atom->cp = linux_strings_lookup(i);
+ if (atom->cp == NULL)
+ atom->cp = "unknown";
+ break;
+ case 3: /* hinv.cpu.stepping */
+ i = proc_cpuinfo.cpuinfo[inst].stepping;
+ atom->cp = linux_strings_lookup(i);
+ if (atom->cp == NULL)
+ atom->cp = "unknown";
+ break;
+ case 4: /* hinv.cpu.cache */
+ if (!proc_cpuinfo.cpuinfo[inst].cache)
+ return 0;
+ atom->ul = proc_cpuinfo.cpuinfo[inst].cache;
+ break;
+ case 5: /* hinv.cpu.bogomips */
+ if (proc_cpuinfo.cpuinfo[inst].bogomips == 0.0)
+ return 0;
+ atom->f = proc_cpuinfo.cpuinfo[inst].bogomips;
+ break;
+ case 6: /* hinv.map.cpu_num */
+ atom->ul = proc_cpuinfo.cpuinfo[inst].cpu_num;
+ break;
+ case 7: /* hinv.machine */
+ atom->cp = proc_cpuinfo.machine;
+ break;
+ case 8: /* hinv.map.cpu_node */
+ atom->ul = proc_cpuinfo.cpuinfo[inst].node;
+ break;
+ case 9: /* hinv.cpu.model_name */
+ if ((i = proc_cpuinfo.cpuinfo[inst].model_name) < 0)
+ i = proc_cpuinfo.cpuinfo[inst].model;
+ atom->cp = linux_strings_lookup(i);
+ if (atom->cp == NULL)
+ atom->cp = "unknown";
+ break;
+ case 10: /* hinv.cpu.flags */
+ i = proc_cpuinfo.cpuinfo[inst].flags;
+ atom->cp = linux_strings_lookup(i);
+ if (atom->cp == NULL)
+ atom->cp = "unknown";
+ break;
+ case 11: /* hinv.cpu.cache_alignment */
+ if (!proc_cpuinfo.cpuinfo[inst].cache_align)
+ return 0;
+ atom->ul = proc_cpuinfo.cpuinfo[inst].cache_align;
+ break;
+ default:
+ return PM_ERR_PMID;
+ }
+ break;
+
+ /*
+ * Cluster added by Mike Mason <mmlnx@us.ibm.com>
+ */
+ case CLUSTER_SEM_LIMITS:
+ switch (idp->item) {
+ case 0: /* ipc.sem.max_semmap */
+ atom->ul = sem_limits.semmap;
+ break;
+ case 1: /* ipc.sem.max_semid */
+ atom->ul = sem_limits.semmni;
+ break;
+ case 2: /* ipc.sem.max_sem */
+ atom->ul = sem_limits.semmns;
+ break;
+ case 3: /* ipc.sem.num_undo */
+ atom->ul = sem_limits.semmnu;
+ break;
+ case 4: /* ipc.sem.max_perid */
+ atom->ul = sem_limits.semmsl;
+ break;
+ case 5: /* ipc.sem.max_ops */
+ atom->ul = sem_limits.semopm;
+ break;
+ case 6: /* ipc.sem.max_undoent */
+ atom->ul = sem_limits.semume;
+ break;
+ case 7: /* ipc.sem.sz_semundo */
+ atom->ul = sem_limits.semusz;
+ break;
+ case 8: /* ipc.sem.max_semval */
+ atom->ul = sem_limits.semvmx;
+ break;
+ case 9: /* ipc.sem.max_exit */
+ atom->ul = sem_limits.semaem;
+ break;
+ default:
+ return PM_ERR_PMID;
+ }
+ break;
+
+ /*
+ * Cluster added by Mike Mason <mmlnx@us.ibm.com>
+ */
+ case CLUSTER_MSG_LIMITS:
+ switch (idp->item) {
+ case 0: /* ipc.msg.sz_pool */
+ atom->ul = msg_limits.msgpool;
+ break;
+ case 1: /* ipc.msg.mapent */
+ atom->ul = msg_limits.msgmap;
+ break;
+ case 2: /* ipc.msg.max_msgsz */
+ atom->ul = msg_limits.msgmax;
+ break;
+ case 3: /* ipc.msg.max_defmsgq */
+ atom->ul = msg_limits.msgmnb;
+ break;
+ case 4: /* ipc.msg.max_msgqid */
+ atom->ul = msg_limits.msgmni;
+ break;
+ case 5: /* ipc.msg.sz_msgseg */
+ atom->ul = msg_limits.msgssz;
+ break;
+ case 6: /* ipc.msg.num_smsghdr */
+ atom->ul = msg_limits.msgtql;
+ break;
+ case 7: /* ipc.msg.max_seg */
+ atom->ul = (unsigned long) msg_limits.msgseg;
+ break;
+ default:
+ return PM_ERR_PMID;
+ }
+ break;
+
+ /*
+ * Cluster added by Mike Mason <mmlnx@us.ibm.com>
+ */
+ case CLUSTER_SHM_LIMITS:
+ switch (idp->item) {
+ case 0: /* ipc.shm.max_segsz */
+ atom->ul = shm_limits.shmmax;
+ break;
+ case 1: /* ipc.shm.min_segsz */
+ atom->ul = shm_limits.shmmin;
+ break;
+ case 2: /* ipc.shm.max_seg */
+ atom->ul = shm_limits.shmmni;
+ break;
+ case 3: /* ipc.shm.max_segproc */
+ atom->ul = shm_limits.shmseg;
+ break;
+ case 4: /* ipc.shm.max_shmsys */
+ atom->ul = shm_limits.shmall;
+ break;
+ default:
+ return PM_ERR_PMID;
+ }
+ break;
+
+ /*
+ * Cluster added by Mike Mason <mmlnx@us.ibm.com>
+ */
+ case CLUSTER_NUSERS:
+ {
+ /* count the number of users */
+ struct utmp *ut;
+ atom->ul = 0;
+ setutent();
+ while ((ut = getutent())) {
+ if ((ut->ut_type == USER_PROCESS) && (ut->ut_name[0] != '\0'))
+ atom->ul++;
+ }
+ endutent();
+ }
+ break;
+
+
+ case CLUSTER_IB: /* deprecated: network.ib, use infiniband PMDA */
+ return PM_ERR_APPVERSION;
+
+ case CLUSTER_NUMA_MEMINFO:
+ /* NUMA memory metrics from /sys/devices/system/node/nodeX */
+ if (inst >= numa_meminfo.node_indom->it_numinst)
+ return PM_ERR_INST;
+
+ switch(idp->item) {
+ case 0: /* mem.numa.util.total */
+ sts = linux_table_lookup("MemTotal:", numa_meminfo.node_info[inst].meminfo,
+ &atom->ull);
+ break;
+ case 1: /* mem.numa.util.free */
+ sts = linux_table_lookup("MemFree:", numa_meminfo.node_info[inst].meminfo,
+ &atom->ull);
+ break;
+
+ case 2: /* mem.numa.util.used */
+ sts = linux_table_lookup("MemUsed:", numa_meminfo.node_info[inst].meminfo,
+ &atom->ull);
+ break;
+
+ case 3: /* mem.numa.util.active */
+ sts = linux_table_lookup("Active:", numa_meminfo.node_info[inst].meminfo,
+ &atom->ull);
+ break;
+
+ case 4: /* mem.numa.util.inactive */
+ sts = linux_table_lookup("Inactive:", numa_meminfo.node_info[inst].meminfo,
+ &atom->ull);
+ break;
+
+ case 5: /* mem.numa.util.active_anon */
+ sts = linux_table_lookup("Active(anon):", numa_meminfo.node_info[inst].meminfo,
+ &atom->ull);
+ break;
+
+ case 6: /* mem.numa.util.inactive_anon */
+ sts = linux_table_lookup("Inactive(anon):", numa_meminfo.node_info[inst].meminfo,
+ &atom->ull);
+ break;
+
+ case 7: /* mem.numa.util.active_file */
+ sts = linux_table_lookup("Active(file):", numa_meminfo.node_info[inst].meminfo,
+ &atom->ull);
+ break;
+
+ case 8: /* mem.numa.util.inactive_file */
+ sts = linux_table_lookup("Inactive(file):", numa_meminfo.node_info[inst].meminfo,
+ &atom->ull);
+ break;
+
+ case 9: /* mem.numa.util.highTotal */
+ sts = linux_table_lookup("HighTotal:", numa_meminfo.node_info[inst].meminfo,
+ &atom->ull);
+ break;
+
+ case 10: /* mem.numa.util.highFree */
+ sts = linux_table_lookup("HighFree:", numa_meminfo.node_info[inst].meminfo,
+ &atom->ull);
+ break;
+
+ case 11: /* mem.numa.util.lowTotal */
+ sts = linux_table_lookup("LowTotal:", numa_meminfo.node_info[inst].meminfo,
+ &atom->ull);
+ break;
+
+ case 12: /* mem.numa.util.lowFree */
+ sts = linux_table_lookup("LowFree:", numa_meminfo.node_info[inst].meminfo,
+ &atom->ull);
+ break;
+
+ case 13: /* mem.numa.util.unevictable */
+ sts = linux_table_lookup("Unevictable:", numa_meminfo.node_info[inst].meminfo,
+ &atom->ull);
+ break;
+
+ case 14: /* mem.numa.util.mlocked */
+ sts = linux_table_lookup("Mlocked:", numa_meminfo.node_info[inst].meminfo,
+ &atom->ull);
+ break;
+
+ case 15: /* mem.numa.util.dirty */
+ sts = linux_table_lookup("Dirty:", numa_meminfo.node_info[inst].meminfo,
+ &atom->ull);
+ break;
+
+ case 16: /* mem.numa.util.writeback */
+ sts = linux_table_lookup("Writeback:", numa_meminfo.node_info[inst].meminfo,
+ &atom->ull);
+ break;
+
+ case 17: /* mem.numa.util.filePages */
+ sts = linux_table_lookup("FilePages:", numa_meminfo.node_info[inst].meminfo,
+ &atom->ull);
+ break;
+
+ case 18: /* mem.numa.util.mapped */
+ sts = linux_table_lookup("Mapped:", numa_meminfo.node_info[inst].meminfo,
+ &atom->ull);
+ break;
+
+ case 19: /* mem.numa.util.anonPages */
+ sts = linux_table_lookup("AnonPages:", numa_meminfo.node_info[inst].meminfo,
+ &atom->ull);
+ break;
+
+ case 20: /* mem.numa.util.shmem */
+ sts = linux_table_lookup("Shmem:", numa_meminfo.node_info[inst].meminfo,
+ &atom->ull);
+ break;
+
+ case 21: /* mem.numa.util.kernelStack */
+ sts = linux_table_lookup("KernelStack:", numa_meminfo.node_info[inst].meminfo,
+ &atom->ull);
+ break;
+
+ case 22: /* mem.numa.util.pageTables */
+ sts = linux_table_lookup("PageTables:", numa_meminfo.node_info[inst].meminfo,
+ &atom->ull);
+ break;
+
+ case 23: /* mem.numa.util.NFS_Unstable */
+ sts = linux_table_lookup("NFS_Unstable:", numa_meminfo.node_info[inst].meminfo,
+ &atom->ull);
+ break;
+
+ case 24: /* mem.numa.util.bounce */
+ sts = linux_table_lookup("Bounce:", numa_meminfo.node_info[inst].meminfo,
+ &atom->ull);
+ break;
+
+ case 25: /* mem.numa.util.writebackTmp */
+ sts = linux_table_lookup("WritebackTmp:", numa_meminfo.node_info[inst].meminfo,
+ &atom->ull);
+ break;
+
+ case 26: /* mem.numa.util.slab */
+ sts = linux_table_lookup("Slab:", numa_meminfo.node_info[inst].meminfo,
+ &atom->ull);
+ break;
+
+ case 27: /* mem.numa.util.slabReclaimable */
+ sts = linux_table_lookup("SReclaimable:", numa_meminfo.node_info[inst].meminfo,
+ &atom->ull);
+ break;
+
+ case 28: /* mem.numa.util.slabUnreclaimable */
+ sts = linux_table_lookup("SUnreclaim:", numa_meminfo.node_info[inst].meminfo,
+ &atom->ull);
+ break;
+
+ case 29: /* mem.numa.util.hugepagesTotal */
+ sts = linux_table_lookup("HugePages_Total:", numa_meminfo.node_info[inst].meminfo,
+ &atom->ull);
+ break;
+
+ case 30: /* mem.numa.util.hugepagesFree */
+ sts = linux_table_lookup("HugePages_Free:", numa_meminfo.node_info[inst].meminfo,
+ &atom->ull);
+ break;
+
+ case 31: /* mem.numa.util.hugepagesSurp */
+ sts = linux_table_lookup("HugePages_Surp:", numa_meminfo.node_info[inst].meminfo,
+ &atom->ull);
+ break;
+
+ case 32: /* mem.numa.alloc.hit */
+ sts = linux_table_lookup("numa_hit", numa_meminfo.node_info[inst].memstat,
+ &atom->ull);
+ break;
+
+ case 33: /* mem.numa.alloc.miss */
+ sts = linux_table_lookup("numa_miss", numa_meminfo.node_info[inst].memstat,
+ &atom->ull);
+ break;
+
+ case 34: /* mem.numa.alloc.foreign */
+ sts = linux_table_lookup("numa_foreign", numa_meminfo.node_info[inst].memstat,
+ &atom->ull);
+ break;
+
+ case 35: /* mem.numa.alloc.interleave_hit */
+ sts = linux_table_lookup("interleave_hit", numa_meminfo.node_info[inst].memstat,
+ &atom->ull);
+ break;
+
+ case 36: /* mem.numa.alloc.local_node */
+ sts = linux_table_lookup("local_node", numa_meminfo.node_info[inst].memstat,
+ &atom->ull);
+ break;
+
+ case 37: /* mem.numa.alloc.other_node */
+ sts = linux_table_lookup("other_node", numa_meminfo.node_info[inst].memstat,
+ &atom->ull);
+ break;
+
+ default:
+ return PM_ERR_PMID;
+ }
+ return sts;
+
+ case CLUSTER_INTERRUPTS:
+ switch (idp->item) {
+ case 3: /* kernel.all.interrupts.error */
+ atom->ul = irq_err_count;
+ break;
+ default:
+ return PM_ERR_PMID;
+ }
+ break;
+
+ case CLUSTER_INTERRUPT_LINES:
+ case CLUSTER_INTERRUPT_OTHER:
+ if (inst >= indomtab[CPU_INDOM].it_numinst)
+ return PM_ERR_INST;
+ return interrupts_fetch(idp->cluster, idp->item, inst, atom);
+
+ case CLUSTER_DM:
+ return proc_partitions_fetch(mdesc, inst, atom);
+ break;
+
+ default: /* unknown cluster */
+ return PM_ERR_PMID;
+ }
+
+ return 1;
+}
+
+
+static int
+linux_fetch(int numpmid, pmID pmidlist[], pmResult **resp, pmdaExt *pmda)
+{
+ int i;
+ int need_refresh[NUM_CLUSTERS];
+
+ memset(need_refresh, 0, sizeof(need_refresh));
+ for (i=0; i < numpmid; i++) {
+ __pmID_int *idp = (__pmID_int *)&(pmidlist[i]);
+ if (idp->cluster < NUM_CLUSTERS) {
+ need_refresh[idp->cluster]++;
+
+ if ((idp->cluster == CLUSTER_STAT || idp->cluster == CLUSTER_DM) &&
+ need_refresh[CLUSTER_PARTITIONS] == 0 &&
+ is_partitions_metric(pmidlist[i]))
+ need_refresh[CLUSTER_PARTITIONS]++;
+
+ if (idp->cluster == CLUSTER_CPUINFO ||
+ idp->cluster == CLUSTER_INTERRUPT_LINES ||
+ idp->cluster == CLUSTER_INTERRUPT_OTHER ||
+ idp->cluster == CLUSTER_INTERRUPTS)
+ need_refresh[CLUSTER_STAT]++;
+ }
+
+ /* In 2.6 kernels, swap.{pagesin,pagesout} are in /proc/vmstat */
+ if (_pm_have_proc_vmstat && idp->cluster == CLUSTER_STAT) {
+ if (idp->item >= 8 && idp->item <= 11)
+ need_refresh[CLUSTER_VMSTAT]++;
+ }
+ }
+
+ linux_refresh(pmda, need_refresh);
+ return pmdaFetch(numpmid, pmidlist, resp, pmda);
+}
+
+static int
+linux_text(int ident, int type, char **buf, pmdaExt *pmda)
+{
+ if ((type & PM_TEXT_PMID) == PM_TEXT_PMID) {
+ int sts = pmdaDynamicLookupText(ident, type, buf, pmda);
+ if (sts != -ENOENT)
+ return sts;
+ }
+ return pmdaText(ident, type, buf, pmda);
+}
+
+static int
+linux_pmid(const char *name, pmID *pmid, pmdaExt *pmda)
+{
+ pmdaNameSpace *tree = pmdaDynamicLookupName(pmda, name);
+ return pmdaTreePMID(tree, name, pmid);
+}
+
+static int
+linux_name(pmID pmid, char ***nameset, pmdaExt *pmda)
+{
+ pmdaNameSpace *tree = pmdaDynamicLookupPMID(pmda, pmid);
+ return pmdaTreeName(tree, pmid, nameset);
+}
+
+static int
+linux_children(const char *name, int flag, char ***kids, int **sts, pmdaExt *pmda)
+{
+ pmdaNameSpace *tree = pmdaDynamicLookupName(pmda, name);
+ return pmdaTreeChildren(tree, name, flag, kids, sts);
+}
+
+pmInDom
+linux_indom(int serial)
+{
+ return indomtab[serial].it_indom;
+}
+
+pmdaIndom *
+linux_pmda_indom(int serial)
+{
+ return &indomtab[serial];
+}
+
+/*
+ * Helper routines for accessing a generic static string dictionary
+ */
+
+char *
+linux_strings_lookup(int index)
+{
+ char *value;
+ pmInDom dict = INDOM(STRINGS_INDOM);
+
+ if (pmdaCacheLookup(dict, index, &value, NULL) == PMDA_CACHE_ACTIVE)
+ return value;
+ return NULL;
+}
+
+int
+linux_strings_insert(const char *buf)
+{
+ pmInDom dict = INDOM(STRINGS_INDOM);
+ return pmdaCacheStore(dict, PMDA_CACHE_ADD, buf, NULL);
+}
+
+/*
+ * Initialise the agent (both daemon and DSO).
+ */
+
+void
+__PMDA_INIT_CALL
+linux_init(pmdaInterface *dp)
+{
+ int i, major, minor, point;
+ size_t nmetrics, nindoms;
+ char *envpath;
+ __pmID_int *idp;
+
+ _pm_system_pagesize = getpagesize();
+ if ((envpath = getenv("LINUX_STATSPATH")) != NULL)
+ linux_statspath = envpath;
+
+ if (_isDSO) {
+ char helppath[MAXPATHLEN];
+ int sep = __pmPathSeparator();
+ snprintf(helppath, sizeof(helppath), "%s%c" "linux" "%c" "help",
+ pmGetConfig("PCP_PMDAS_DIR"), sep, sep);
+ pmdaDSO(dp, PMDA_INTERFACE_4, "linux DSO", helppath);
+ } else {
+ __pmSetProcessIdentity(username);
+ }
+
+ if (dp->status != 0)
+ return;
+
+ dp->version.four.instance = linux_instance;
+ dp->version.four.fetch = linux_fetch;
+ dp->version.four.text = linux_text;
+ dp->version.four.pmid = linux_pmid;
+ dp->version.four.name = linux_name;
+ dp->version.four.children = linux_children;
+ pmdaSetFetchCallBack(dp, linux_fetchCallBack);
+
+ proc_stat.cpu_indom = proc_cpuinfo.cpuindom = &indomtab[CPU_INDOM];
+ numa_meminfo.node_indom = proc_cpuinfo.node_indom = &indomtab[NODE_INDOM];
+ proc_scsi.scsi_indom = &indomtab[SCSI_INDOM];
+ dev_mapper.lv_indom = &indomtab[LV_INDOM];
+ proc_slabinfo.indom = &indomtab[SLAB_INDOM];
+
+ /*
+ * Figure out kernel version. The precision of certain metrics
+ * (e.g. percpu time counters) has changed over kernel versions.
+ * See include/linux/kernel_stat.h for all the various flavours.
+ */
+ uname(&kernel_uname);
+ _pm_ctxt_size = 8;
+ _pm_intr_size = 8;
+ _pm_cputime_size = 8;
+ _pm_idletime_size = 8;
+ if (sscanf(kernel_uname.release, "%d.%d.%d", &major, &minor, &point) == 3) {
+ if (major < 2 || (major == 2 && minor <= 4)) { /* 2.4 and earlier */
+ _pm_ctxt_size = 4;
+ _pm_intr_size = 4;
+ _pm_cputime_size = 4;
+ _pm_idletime_size = sizeof(unsigned long);
+ }
+ else if (major == 2 && minor == 6 &&
+ point >= 0 && point <= 4) { /* 2.6.0->.4 */
+ _pm_cputime_size = 4;
+ _pm_idletime_size = 4;
+ }
+ }
+ for (i = 0; i < sizeof(metrictab)/sizeof(pmdaMetric); i++) {
+ idp = (__pmID_int *)&(metrictab[i].m_desc.pmid);
+ if (idp->cluster == CLUSTER_STAT) {
+ switch (idp->item) {
+ case 0: /* kernel.percpu.cpu.user */
+ case 1: /* kernel.percpu.cpu.nice */
+ case 2: /* kernel.percpu.cpu.sys */
+ case 20: /* kernel.all.cpu.user */
+ case 21: /* kernel.all.cpu.nice */
+ case 22: /* kernel.all.cpu.sys */
+ case 30: /* kernel.percpu.cpu.wait.total */
+ case 31: /* kernel.percpu.cpu.intr */
+ case 34: /* kernel.all.cpu.intr */
+ case 35: /* kernel.all.cpu.wait.total */
+ case 53: /* kernel.all.cpu.irq.soft */
+ case 54: /* kernel.all.cpu.irq.hard */
+ case 55: /* kernel.all.cpu.steal */
+ case 56: /* kernel.percpu.cpu.irq.soft */
+ case 57: /* kernel.percpu.cpu.irq.hard */
+ case 58: /* kernel.percpu.cpu.steal */
+ case 60: /* kernel.all.cpu.guest */
+ case 78: /* kernel.all.cpu.vuser */
+ case 61: /* kernel.percpu.cpu.guest */
+ case 76: /* kernel.percpu.cpu.vuser */
+ case 62: /* kernel.pernode.cpu.user */
+ case 63: /* kernel.pernode.cpu.nice */
+ case 64: /* kernel.pernode.cpu.sys */
+ case 69: /* kernel.pernode.cpu.wait.total */
+ case 66: /* kernel.pernode.cpu.intr */
+ case 70: /* kernel.pernode.cpu.irq.soft */
+ case 71: /* kernel.pernode.cpu.irq.hard */
+ case 67: /* kernel.pernode.cpu.steal */
+ case 68: /* kernel.pernode.cpu.guest */
+ case 77: /* kernel.pernode.cpu.vuser */
+ _pm_metric_type(metrictab[i].m_desc.type, _pm_cputime_size);
+ break;
+ case 3: /* kernel.percpu.cpu.idle */
+ case 23: /* kernel.all.cpu.idle */
+ case 65: /* kernel.pernode.cpu.idle */
+ _pm_metric_type(metrictab[i].m_desc.type, _pm_idletime_size);
+ break;
+ case 12: /* kernel.all.intr */
+ _pm_metric_type(metrictab[i].m_desc.type, _pm_intr_size);
+ break;
+ case 13: /* kernel.all.pswitch */
+ _pm_metric_type(metrictab[i].m_desc.type, _pm_ctxt_size);
+ break;
+ }
+ }
+ if (metrictab[i].m_desc.type == PM_TYPE_NOSUPPORT)
+ fprintf(stderr, "Bad kernel metric descriptor type (%u.%u)\n",
+ idp->cluster, idp->item);
+ }
+
+ nindoms = sizeof(indomtab)/sizeof(indomtab[0]);
+ nmetrics = sizeof(metrictab)/sizeof(metrictab[0]);
+
+ proc_vmstat_init();
+ interrupts_init(metrictab, nmetrics);
+
+ pmdaSetFlags(dp, PMDA_EXT_FLAG_HASHED);
+ pmdaInit(dp, indomtab, nindoms, metrictab, nmetrics);
+
+ /* string metrics use the pmdaCache API for value indexing */
+ pmdaCacheOp(INDOM(STRINGS_INDOM), PMDA_CACHE_STRINGS);
+}
+
+pmLongOptions longopts[] = {
+ PMDA_OPTIONS_HEADER("Options"),
+ PMOPT_DEBUG,
+ PMDAOPT_DOMAIN,
+ PMDAOPT_LOGFILE,
+ PMDAOPT_USERNAME,
+ PMOPT_HELP,
+ PMDA_OPTIONS_END
+};
+
+pmdaOptions opts = {
+ .short_options = "D:d:l:U:?",
+ .long_options = longopts,
+};
+
+/*
+ * Set up the agent if running as a daemon.
+ */
+int
+main(int argc, char **argv)
+{
+ int sep = __pmPathSeparator();
+ pmdaInterface dispatch;
+ char helppath[MAXPATHLEN];
+
+ _isDSO = 0;
+ __pmSetProgname(argv[0]);
+ __pmGetUsername(&username);
+
+ snprintf(helppath, sizeof(helppath), "%s%c" "linux" "%c" "help",
+ pmGetConfig("PCP_PMDAS_DIR"), sep, sep);
+ pmdaDaemon(&dispatch, PMDA_INTERFACE_4, pmProgname, LINUX, "linux.log", helppath);
+
+ pmdaGetOptions(argc, argv, &opts, &dispatch);
+ if (opts.errors) {
+ pmdaUsageMessage(&opts);
+ exit(1);
+ }
+ if (opts.username)
+ username = opts.username;
+
+ pmdaOpenLog(&dispatch);
+ linux_init(&dispatch);
+ pmdaConnect(&dispatch);
+ pmdaMain(&dispatch);
+ exit(0);
+}
diff --git a/src/pmdas/linux/proc_cpuinfo.c b/src/pmdas/linux/proc_cpuinfo.c
new file mode 100644
index 0000000..f76ac27
--- /dev/null
+++ b/src/pmdas/linux/proc_cpuinfo.c
@@ -0,0 +1,246 @@
+/*
+ * Linux /proc/cpuinfo metrics cluster
+ *
+ * Copyright (c) 2013-2014 Red Hat.
+ * Copyright (c) 2000-2005 Silicon Graphics, Inc. All Rights Reserved.
+ * Portions Copyright (c) 2001 Gilly Ran (gilly@exanet.com) - for the
+ * portions supporting the Alpha platform. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include <ctype.h>
+#include <dirent.h>
+#include <sys/stat.h>
+#include "pmapi.h"
+#include "impl.h"
+#include "pmda.h"
+#include "indom.h"
+#include "proc_cpuinfo.h"
+
+
+static void
+decode_map(proc_cpuinfo_t *proc_cpuinfo, char *cp, int node, int offset)
+{
+ uint32_t map = strtoul(cp, NULL, 16);
+
+ while (map) {
+ int i;
+
+ if ((i = ffsl(map))) {
+ /* the kernel returns 32bit words in the map file */
+ int cpu = i - 1 + 32*offset;
+
+ proc_cpuinfo->cpuinfo[cpu].node = node;
+ if (pmDebug & DBG_TRACE_APPL2) {
+ fprintf(stderr, "cpu %d -> node %d\n",
+ cpu, node);
+ }
+ map &= ~(1 << (i-1));
+ }
+ }
+}
+
+static void
+map_cpu_nodes(proc_cpuinfo_t *proc_cpuinfo)
+{
+ int i, j;
+ const char *node_path = "sys/devices/system/node";
+ char path[MAXPATHLEN];
+ char cpumap[4096];
+ DIR *nodes;
+ FILE *f;
+ struct dirent *de;
+ int node, max_node = -1;
+ char *cp;
+ pmdaIndom *idp = PMDAINDOM(NODE_INDOM);
+
+ for (i = 0; i < proc_cpuinfo->cpuindom->it_numinst; i++)
+ proc_cpuinfo->cpuinfo[i].node = -1;
+
+ snprintf(path, sizeof(path), "%s/%s", linux_statspath, node_path);
+ if ((nodes = opendir(path)) == NULL)
+ return;
+
+ while ((de = readdir(nodes)) != NULL) {
+ if (sscanf(de->d_name, "node%d", &node) != 1)
+ continue;
+
+ if (node > max_node)
+ max_node = node;
+
+ snprintf(path, sizeof(path), "%s/%s/%s/cpumap",
+ linux_statspath, node_path, de->d_name);
+ if ((f = fopen(path, "r")) == NULL)
+ continue;
+ i = fscanf(f, "%s", cpumap);
+ fclose(f);
+ if (i != 1)
+ continue;
+
+ for (j = 0; (cp = strrchr(cpumap, ',')); j++) {
+ decode_map(proc_cpuinfo, cp+1, node, j);
+ *cp = '\0';
+ }
+ decode_map(proc_cpuinfo, cpumap, node, j);
+ }
+ closedir(nodes);
+
+ /* initialize node indom */
+ idp->it_numinst = max_node + 1;
+ idp->it_set = calloc(max_node + 1, sizeof(pmdaInstid));
+ for (i = 0; i <= max_node; i++) {
+ char node_name[256];
+
+ sprintf(node_name, "node%d", i);
+ idp->it_set[i].i_inst = i;
+ idp->it_set[i].i_name = strdup(node_name);
+ }
+ proc_cpuinfo->node_indom = idp;
+}
+
+char *
+cpu_name(proc_cpuinfo_t *proc_cpuinfo, int c)
+{
+ char name[1024];
+ char *p;
+ FILE *f;
+ static int started = 0;
+
+ if (!started) {
+ refresh_proc_cpuinfo(proc_cpuinfo);
+
+ proc_cpuinfo->machine = NULL;
+ f = linux_statsfile("/proc/sgi_prominfo/node0/version", name, sizeof(name));
+ if (f != NULL) {
+ while (fgets(name, sizeof(name), f)) {
+ if (strncmp(name, "SGI", 3) == 0) {
+ if ((p = strstr(name, " IP")) != NULL)
+ proc_cpuinfo->machine = strndup(p+1, 4);
+ break;
+ }
+ }
+ fclose(f);
+ }
+ if (proc_cpuinfo->machine == NULL)
+ proc_cpuinfo->machine = strdup("linux");
+
+ started = 1;
+ }
+
+ snprintf(name, sizeof(name), "cpu%d", c);
+ return strdup(name);
+}
+
+int
+refresh_proc_cpuinfo(proc_cpuinfo_t *proc_cpuinfo)
+{
+ char buf[4096];
+ FILE *fp;
+ int cpunum;
+ cpuinfo_t *info;
+ char *val;
+ char *p;
+ static int started = 0;
+
+ if (!started) {
+ int need = proc_cpuinfo->cpuindom->it_numinst * sizeof(cpuinfo_t);
+ proc_cpuinfo->cpuinfo = (cpuinfo_t *)calloc(1, need);
+ for (cpunum=0; cpunum < proc_cpuinfo->cpuindom->it_numinst; cpunum++) {
+ proc_cpuinfo->cpuinfo[cpunum].sapic = -1;
+ proc_cpuinfo->cpuinfo[cpunum].vendor = -1;
+ proc_cpuinfo->cpuinfo[cpunum].model = -1;
+ proc_cpuinfo->cpuinfo[cpunum].model_name = -1;
+ proc_cpuinfo->cpuinfo[cpunum].stepping = -1;
+ proc_cpuinfo->cpuinfo[cpunum].flags = -1;
+ }
+ started = 1;
+ }
+
+ if ((fp = linux_statsfile("/proc/cpuinfo", buf, sizeof(buf))) == NULL)
+ return -oserror();
+
+#if defined(HAVE_ALPHA_LINUX)
+ cpunum = 0;
+#else //intel
+ cpunum = -1;
+#endif
+ while (fgets(buf, sizeof(buf), fp) != NULL) {
+ if ((val = strrchr(buf, '\n')) != NULL)
+ *val = '\0';
+ if ((val = strchr(buf, ':')) == NULL)
+ continue;
+ val += 2;
+
+#if !defined(HAVE_ALPHA_LINUX)
+ if (strncmp(buf, "processor", 9) == 0) {
+ cpunum++;
+ proc_cpuinfo->cpuinfo[cpunum].cpu_num = atoi(val);
+ continue;
+ }
+#endif
+
+ if (cpunum < 0 || cpunum >= proc_cpuinfo->cpuindom->it_numinst)
+ continue;
+
+ info = &proc_cpuinfo->cpuinfo[cpunum];
+
+ /* note: order is important due to strNcmp comparisons */
+ if (info->sapic < 0 && strncasecmp(buf, "sapic", 5) == 0)
+ info->sapic = linux_strings_insert(val);
+ else if (info->model_name < 0 && strncasecmp(buf, "model name", 10) == 0)
+ info->model_name = linux_strings_insert(val);
+ else if (info->model < 0 && strncasecmp(buf, "model", 5) == 0)
+ info->model = linux_strings_insert(val);
+ else if (info->model < 0 && strncasecmp(buf, "cpu model", 9) == 0)
+ info->model = linux_strings_insert(val);
+ else if (info->vendor < 0 && strncasecmp(buf, "vendor", 6) == 0)
+ info->vendor = linux_strings_insert(val);
+ else if (info->stepping < 0 && strncasecmp(buf, "step", 4) == 0)
+ info->stepping = linux_strings_insert(val);
+ else if (info->stepping < 0 && strncasecmp(buf, "revision", 8) == 0)
+ info->stepping = linux_strings_insert(val);
+ else if (info->stepping < 0 && strncasecmp(buf, "cpu revision", 12) == 0)
+ info->stepping = linux_strings_insert(val);
+ else if (info->flags < 0 && strncasecmp(buf, "flags", 5) == 0)
+ info->flags = linux_strings_insert(val);
+ else if (info->flags < 0 && strncasecmp(buf, "features", 8) == 0)
+ info->flags = linux_strings_insert(val);
+ else if (info->cache == 0 && strncasecmp(buf, "cache size", 10) == 0)
+ info->cache = atoi(val);
+ else if (info->cache_align == 0 && strncasecmp(buf, "cache_align", 11) == 0)
+ info->cache_align = atoi(val);
+ else if (info->bogomips == 0.0 && strncasecmp(buf, "bogo", 4) == 0)
+ info->bogomips = atof(val);
+ else if (info->clock == 0.0 && strncasecmp(buf, "cpu MHz", 7) == 0)
+ info->clock = atof(val);
+ else if (info->clock == 0.0 && strncasecmp(buf, "cycle frequency", 15) == 0) {
+ if ((p = strchr(val, ' ')) != NULL)
+ *p = '\0';
+ info->clock = (atof(val))/1000000;
+ }
+ }
+ fclose(fp);
+
+#if defined(HAVE_ALPHA_LINUX)
+ /* all processors are identical, therefore duplicate it to all the instances */
+ for (cpunum = 1; cpunum < proc_cpuinfo->cpuindom->it_numinst; cpunum++)
+ memcpy(&proc_cpuinfo->cpuinfo[cpunum], info, sizeof(cpuinfo_t));
+#endif
+
+ if (started < 2) {
+ map_cpu_nodes(proc_cpuinfo);
+ started = 2;
+ }
+
+ /* success */
+ return 0;
+}
diff --git a/src/pmdas/linux/proc_cpuinfo.h b/src/pmdas/linux/proc_cpuinfo.h
new file mode 100644
index 0000000..b0691b4
--- /dev/null
+++ b/src/pmdas/linux/proc_cpuinfo.h
@@ -0,0 +1,49 @@
+/*
+ * Linux /proc/cpuinfo metrics cluster
+ *
+ * Copyright (c) 2013 Red Hat.
+ * Copyright (c) 2001 Gilly Ran (gilly@exanet.com) for the
+ * portions of the code supporting the Alpha platform.
+ * All rights reserved.
+ * Copyright (c) 2000,2004 Silicon Graphics, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#ifdef __alpha__
+#define HAVE_ALPHA_LINUX
+#endif
+
+typedef struct {
+ int cpu_num;
+ int node;
+ char *name;
+ float clock;
+ float bogomips;
+ int sapic; /* strings dictionary hash key */
+ int vendor; /* strings dictionary hash key */
+ int model; /* strings dictionary hash key */
+ int model_name; /* strings dictionary hash key */
+ int stepping; /* strings dictionary hash key */
+ int flags; /* strings dictionary hash key */
+ unsigned int cache;
+ unsigned int cache_align;
+} cpuinfo_t;
+
+typedef struct {
+ char *machine;
+ cpuinfo_t *cpuinfo;
+ pmdaIndom *cpuindom;
+ pmdaIndom *node_indom;
+} proc_cpuinfo_t;
+
+extern int refresh_proc_cpuinfo(proc_cpuinfo_t *);
+extern char *cpu_name(proc_cpuinfo_t *, int);
diff --git a/src/pmdas/linux/proc_loadavg.c b/src/pmdas/linux/proc_loadavg.c
new file mode 100644
index 0000000..676cfc6
--- /dev/null
+++ b/src/pmdas/linux/proc_loadavg.c
@@ -0,0 +1,45 @@
+/*
+ * Linux /proc/loadavg metrics cluster
+ *
+ * Copyright (c) 2014 Red Hat.
+ * Copyright (c) 2000,2004 Silicon Graphics, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include "pmapi.h"
+#include "pmda.h"
+#include "indom.h"
+#include "proc_loadavg.h"
+
+int
+refresh_proc_loadavg(proc_loadavg_t *proc_loadavg)
+{
+ char buf[1024];
+ FILE *fp;
+
+ if ((fp = linux_statsfile("/proc/loadavg", buf, sizeof(buf))) == NULL)
+ return -oserror();
+
+ if (fgets(buf, sizeof(buf), fp) == NULL)
+ return -oserror();
+ fclose(fp);
+
+ /*
+ * 0.00 0.00 0.05 1/67 17563
+ * Lastpid added by Mike Mason <mmlnx@us.ibm.com>
+ */
+ sscanf((const char *)buf, "%f %f %f %u/%u %u",
+ &proc_loadavg->loadavg[0], &proc_loadavg->loadavg[1],
+ &proc_loadavg->loadavg[2], &proc_loadavg->runnable,
+ &proc_loadavg->nprocs, &proc_loadavg->lastpid);
+ return 0;
+}
diff --git a/src/pmdas/linux/proc_loadavg.h b/src/pmdas/linux/proc_loadavg.h
new file mode 100644
index 0000000..15152c8
--- /dev/null
+++ b/src/pmdas/linux/proc_loadavg.h
@@ -0,0 +1,29 @@
+/*
+ * Linux /proc/stat metrics cluster
+ *
+ * Copyright (c) 1995 Silicon Graphics, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+typedef struct {
+ float loadavg[3]; /* 1, 5 and 15 min load average */
+ unsigned int runnable;
+ unsigned int nprocs;
+ unsigned int lastpid;
+} proc_loadavg_t;
+
+extern int refresh_proc_loadavg(proc_loadavg_t *);
+
diff --git a/src/pmdas/linux/proc_meminfo.c b/src/pmdas/linux/proc_meminfo.c
new file mode 100644
index 0000000..471e2ce
--- /dev/null
+++ b/src/pmdas/linux/proc_meminfo.c
@@ -0,0 +1,188 @@
+/*
+ * Linux /proc/meminfo metrics cluster
+ *
+ * Copyright (c) 2013-2014 Red Hat.
+ * Copyright (c) 2002 Silicon Graphics, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include <ctype.h>
+#include "pmapi.h"
+#include "pmda.h"
+#include "indom.h"
+#include <sys/stat.h>
+#include "proc_meminfo.h"
+
+static proc_meminfo_t moff;
+extern size_t _pm_system_pagesize;
+
+static struct {
+ char *field;
+ int64_t *offset;
+} meminfo_fields[] = {
+ { "MemTotal", &moff.MemTotal },
+ { "MemFree", &moff.MemFree },
+ { "MemAvailable", &moff.MemAvailable },
+ { "MemShared", &moff.MemShared },
+ { "Buffers", &moff.Buffers },
+ { "Cached", &moff.Cached },
+ { "SwapCached", &moff.SwapCached },
+ { "Active", &moff.Active },
+ { "Inactive", &moff.Inactive },
+ { "Active(anon)", &moff.Active_anon },
+ { "Inactive(anon)", &moff.Inactive_anon },
+ { "Active(file)", &moff.Active_file },
+ { "Inactive(file)", &moff.Inactive_file },
+ { "Unevictable", &moff.Unevictable },
+ { "Mlocked", &moff.Mlocked },
+ { "HighTotal", &moff.HighTotal },
+ { "HighFree", &moff.HighFree },
+ { "LowTotal", &moff.LowTotal },
+ { "LowFree", &moff.LowFree },
+ { "MmapCopy", &moff.MmapCopy },
+ { "SwapTotal", &moff.SwapTotal },
+ { "SwapFree", &moff.SwapFree },
+ { "Dirty", &moff.Dirty },
+ { "Writeback", &moff.Writeback },
+ { "AnonPages", &moff.AnonPages },
+ { "Mapped", &moff.Mapped },
+ { "Shmem", &moff.Shmem },
+ { "Slab", &moff.Slab },
+ { "SReclaimable", &moff.SlabReclaimable },
+ { "SUnreclaim", &moff.SlabUnreclaimable },
+ { "KernelStack", &moff.KernelStack },
+ { "PageTables", &moff.PageTables },
+ { "Quicklists", &moff.Quicklists },
+ { "NFS_Unstable", &moff.NFS_Unstable },
+ { "Bounce", &moff.Bounce },
+ { "WritebackTmp", &moff.WritebackTmp },
+ { "CommitLimit", &moff.CommitLimit },
+ { "Committed_AS", &moff.Committed_AS },
+ { "VmallocTotal", &moff.VmallocTotal },
+ { "VmallocUsed", &moff.VmallocUsed },
+ { "VmallocChunk", &moff.VmallocChunk },
+ { "HardwareCorrupted", &moff.HardwareCorrupted },
+ { "AnonHugePages", &moff.AnonHugePages },
+ /* vendor kernel patches, some outdated now */
+ { "MemShared", &moff.MemShared },
+ { "ReverseMaps", &moff.ReverseMaps },
+ { "HugePages_Total", &moff.HugepagesTotal },
+ { "HugePages_Free", &moff.HugepagesFree },
+ { "HugePages_Rsvd", &moff.HugepagesRsvd },
+ { "HugePages_Surp", &moff.HugepagesSurp },
+ { "DirectMap4k", &moff.directMap4k },
+ { "DirectMap2M", &moff.directMap2M },
+ { "DirectMap1G", &moff.directMap1G },
+ { NULL, NULL }
+};
+
+#define MOFFSET(ii, pp) (int64_t *)((char *)pp + \
+ (__psint_t)meminfo_fields[ii].offset - (__psint_t)&moff)
+
+int
+refresh_proc_meminfo(proc_meminfo_t *proc_meminfo)
+{
+ char buf[1024];
+ char *bufp;
+ int64_t *p;
+ int i;
+ FILE *fp;
+
+ for (i = 0; meminfo_fields[i].field != NULL; i++) {
+ p = MOFFSET(i, proc_meminfo);
+ *p = -1; /* marked as "no value available" */
+ }
+
+ if ((fp = linux_statsfile("/proc/meminfo", buf, sizeof(buf))) == NULL)
+ return -oserror();
+
+ while (fgets(buf, sizeof(buf), fp) != NULL) {
+ if ((bufp = strchr(buf, ':')) == NULL)
+ continue;
+ *bufp = '\0';
+ for (i=0; meminfo_fields[i].field != NULL; i++) {
+ if (strcmp(buf, meminfo_fields[i].field) != 0)
+ continue;
+ p = MOFFSET(i, proc_meminfo);
+ for (bufp++; *bufp; bufp++) {
+ if (isdigit((int)*bufp)) {
+ sscanf(bufp, "%llu", (unsigned long long *)p);
+ *p *= 1024; /* kbytes -> bytes */
+ break;
+ }
+ }
+ }
+ }
+
+ fclose(fp);
+
+ /*
+ * MemAvailable is only in 3.x or later kernels but we can calculate it
+ * using other values, similar to upstream kernel commit 34e431b0ae.
+ * The environment variable is for QA purposes.
+ */
+ if (!MEMINFO_VALID_VALUE(proc_meminfo->MemAvailable) ||
+ getenv("PCP_QA_ESTIMATE_MEMAVAILABLE") != NULL) {
+ if (MEMINFO_VALID_VALUE(proc_meminfo->MemTotal) &&
+ MEMINFO_VALID_VALUE(proc_meminfo->MemFree) &&
+ MEMINFO_VALID_VALUE(proc_meminfo->Active_file) &&
+ MEMINFO_VALID_VALUE(proc_meminfo->Inactive_file) &&
+ MEMINFO_VALID_VALUE(proc_meminfo->SlabReclaimable)) {
+
+ int64_t pagecache;
+ int64_t wmark_low = 0;
+
+ /*
+ * sum for each zone->watermark[WMARK_LOW];
+ */
+ if ((fp = fopen("/proc/zoneinfo", "r")) != NULL) {
+ while (fgets(buf, sizeof(buf), fp) != NULL) {
+ if ((bufp = strstr(buf, "low ")) != NULL) {
+ int64_t low;
+ if (sscanf(bufp+4, "%lld", (long long int *)&low) == 1)
+ wmark_low += low;
+ }
+ }
+ fclose(fp);
+ wmark_low *= _pm_system_pagesize;
+ }
+
+ /*
+ * Free memory cannot be taken below the low watermark, before the
+ * system starts swapping.
+ */
+ proc_meminfo->MemAvailable = proc_meminfo->MemFree - wmark_low;
+
+ /*
+ * Not all the page cache can be freed, otherwise the system will
+ * start swapping. Assume at least half of the page cache, or the
+ * low watermark worth of cache, needs to stay.
+ */
+ pagecache = proc_meminfo->Active_file + proc_meminfo->Inactive_file;
+ pagecache -= MIN(pagecache / 2, wmark_low);
+ proc_meminfo->MemAvailable += pagecache;
+
+ /*
+ * Part of the reclaimable slab consists of items that are in use,
+ * and cannot be freed. Cap this estimate at the low watermark.
+ */
+ proc_meminfo->MemAvailable += proc_meminfo->SlabReclaimable;
+ proc_meminfo->MemAvailable -= MIN(proc_meminfo->SlabReclaimable / 2, wmark_low);
+
+ if (proc_meminfo->MemAvailable < 0)
+ proc_meminfo->MemAvailable = 0;
+ }
+ }
+
+ /* success */
+ return 0;
+}
diff --git a/src/pmdas/linux/proc_meminfo.h b/src/pmdas/linux/proc_meminfo.h
new file mode 100644
index 0000000..3185dcc
--- /dev/null
+++ b/src/pmdas/linux/proc_meminfo.h
@@ -0,0 +1,79 @@
+/*
+ * Linux /proc/meminfo metrics cluster
+ *
+ * Copyright (c) 2013 Red Hat.
+ * Copyright (c) 2002 Silicon Graphics, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#define MEMINFO_VALID_VALUE(x) ((x) != (int64_t)-1)
+#define MEMINFO_VALUE_OR_ZERO(x) (((x) == (int64_t)-1) ? 0 : (x))
+
+/*
+ * All fields in /proc/meminfo
+ */
+typedef struct {
+ int64_t MemTotal;
+ int64_t MemFree;
+ int64_t MemAvailable;
+ int64_t MemShared;
+ int64_t Buffers;
+ int64_t Cached;
+ int64_t SwapCached;
+ int64_t Active;
+ int64_t Inactive;
+ int64_t Active_anon;
+ int64_t Inactive_anon;
+ int64_t Active_file;
+ int64_t Inactive_file;
+ int64_t Unevictable;
+ int64_t Mlocked;
+ int64_t HighTotal;
+ int64_t HighFree;
+ int64_t LowTotal;
+ int64_t LowFree;
+ int64_t MmapCopy;
+ int64_t SwapTotal;
+ int64_t SwapFree;
+ int64_t SwapUsed; /* computed */
+ int64_t Dirty;
+ int64_t Writeback;
+ int64_t Mapped;
+ int64_t Shmem;
+ int64_t Slab;
+ int64_t SlabReclaimable;
+ int64_t SlabUnreclaimable;
+ int64_t KernelStack;
+ int64_t CommitLimit;
+ int64_t Committed_AS;
+ int64_t PageTables;
+ int64_t Quicklists;
+ int64_t ReverseMaps;
+ int64_t AnonPages;
+ int64_t Bounce;
+ int64_t NFS_Unstable;
+ int64_t WritebackTmp;
+ int64_t VmallocTotal;
+ int64_t VmallocUsed;
+ int64_t VmallocChunk;
+ int64_t HardwareCorrupted;
+ int64_t AnonHugePages;
+ int64_t HugepagesTotal;
+ int64_t HugepagesFree;
+ int64_t HugepagesRsvd;
+ int64_t HugepagesSurp;
+ int64_t directMap4k;
+ int64_t directMap2M;
+ int64_t directMap1G;
+} proc_meminfo_t;
+
+extern int refresh_proc_meminfo(proc_meminfo_t *);
diff --git a/src/pmdas/linux/proc_net_dev.c b/src/pmdas/linux/proc_net_dev.c
new file mode 100644
index 0000000..93e3057
--- /dev/null
+++ b/src/pmdas/linux/proc_net_dev.c
@@ -0,0 +1,444 @@
+/*
+ * Linux /proc/net/dev metrics cluster
+ *
+ * Copyright (c) 2013-2014 Red Hat.
+ * Copyright (c) 1995,2004 Silicon Graphics, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include "pmapi.h"
+#include "impl.h"
+#include "pmda.h"
+#include "indom.h"
+#include <sys/ioctl.h>
+#include <sys/stat.h>
+#include <net/if.h>
+#include <ctype.h>
+#include "proc_net_dev.h"
+
+static int
+refresh_inet_socket()
+{
+ static int netfd = -1;
+ if (netfd < 0)
+ netfd = socket(AF_INET, SOCK_DGRAM, 0);
+ return netfd;
+}
+
+static int
+refresh_net_dev_ioctl(char *name, net_interface_t *netip)
+{
+ struct ethtool_cmd ecmd = { 0 };
+ /*
+ * Note:
+ * Initialization of ecmd is not really needed. If the ioctl()s
+ * work, ecmd is filled in ... but valgrind (at least up to
+ * version 3.9.0) does not know about the SIOCETHTOOL ioctl()
+ * and thinks the use of ecmd after this call propagates
+ * uninitialized data in to ioc.speed and ioc.duplex, causing
+ * failures for qa/957
+ * - Ken McDonell, 11 Apr 2014
+ */
+ struct ifreq ifr;
+ int fd;
+
+ if ((fd = refresh_inet_socket()) < 0)
+ return 0;
+
+ ecmd.cmd = ETHTOOL_GSET;
+ ifr.ifr_data = (caddr_t)&ecmd;
+ strncpy(ifr.ifr_name, name, IF_NAMESIZE);
+ ifr.ifr_name[IF_NAMESIZE-1] = '\0';
+ if (!(ioctl(fd, SIOCGIFMTU, &ifr) < 0))
+ netip->ioc.mtu = ifr.ifr_mtu;
+
+ ecmd.cmd = ETHTOOL_GSET;
+ ifr.ifr_data = (caddr_t)&ecmd;
+ strncpy(ifr.ifr_name, name, IF_NAMESIZE);
+ ifr.ifr_name[IF_NAMESIZE-1] = '\0';
+ if (!(ioctl(fd, SIOCGIFFLAGS, &ifr) < 0)) {
+ netip->ioc.linkup = !!(ifr.ifr_flags & IFF_UP);
+ netip->ioc.running = !!(ifr.ifr_flags & IFF_RUNNING);
+ }
+ /* ETHTOOL ioctl -> non-root permissions issues for old kernels */
+ ecmd.cmd = ETHTOOL_GSET;
+ ifr.ifr_data = (caddr_t)&ecmd;
+ strncpy(ifr.ifr_name, name, IF_NAMESIZE);
+ ifr.ifr_name[IF_NAMESIZE-1] = '\0';
+ if (!(ioctl(fd, SIOCETHTOOL, &ifr) < 0)) {
+ /*
+ * speed is defined in ethtool.h and returns the speed in
+ * Mbps, so 100 for 100Mbps, 1000 for 1Gbps, etc
+ */
+ netip->ioc.speed = ecmd.speed;
+ netip->ioc.duplex = ecmd.duplex + 1;
+ return 0;
+ }
+ return -ENOSYS; /* caller should try ioctl alternatives */
+}
+
+static void
+refresh_net_ipv4_addr(char *name, net_addr_t *addr)
+{
+ struct ifreq ifr;
+ int fd;
+
+ if ((fd = refresh_inet_socket()) < 0)
+ return;
+ strncpy(ifr.ifr_name, name, IF_NAMESIZE);
+ ifr.ifr_name[IF_NAMESIZE-1] = '\0';
+ ifr.ifr_addr.sa_family = AF_INET;
+ if (ioctl(fd, SIOCGIFADDR, &ifr) >= 0) {
+ struct sockaddr_in *sin = (struct sockaddr_in *)&ifr.ifr_addr;
+ if (inet_ntop(AF_INET, &sin->sin_addr, addr->inet, INET_ADDRSTRLEN))
+ addr->has_inet = 1;
+ }
+}
+
+/*
+ * No ioctl support or no permissions (more likely), so we
+ * fall back to grovelling about in /sys/class/net in a last
+ * ditch attempt to find the ethtool interface data (duplex
+ * and speed).
+ */
+static char *
+read_oneline(const char *path, char *buffer)
+{
+ FILE *fp = fopen(path, "r");
+
+ if (fp) {
+ int i = fscanf(fp, "%63s", buffer);
+ fclose(fp);
+ if (i == 1)
+ return buffer;
+ }
+ return "";
+}
+
+static void
+refresh_net_dev_sysfs(char *name, net_interface_t *netip)
+{
+ char path[MAXPATHLEN];
+ char line[64];
+ char *duplex;
+
+ snprintf(path, sizeof(path), "%s/sys/class/net/%s/speed", linux_statspath, name);
+ path[sizeof(path)-1] = '\0';
+ netip->ioc.speed = atoi(read_oneline(path, line));
+
+ snprintf(path, sizeof(path), "%s/sys/class/net/%s/duplex", linux_statspath, name);
+ path[sizeof(path)-1] = '\0';
+ duplex = read_oneline(path, line);
+
+ if (strcmp(duplex, "full") == 0)
+ netip->ioc.duplex = 2;
+ else if (strcmp(duplex, "half") == 0)
+ netip->ioc.duplex = 1;
+ else /* eh? */
+ netip->ioc.duplex = 0;
+}
+
+static void
+refresh_net_hw_addr(char *name, net_addr_t *netip)
+{
+ char path[MAXPATHLEN];
+ char line[64];
+ char *value;
+
+ snprintf(path, sizeof(path), "%s/sys/class/net/%s/address", linux_statspath, name);
+ path[sizeof(path)-1] = '\0';
+
+ value = read_oneline(path, line);
+
+ if (value[0] != '\0')
+ netip->has_hw = 1;
+ strncpy(netip->hw_addr, value, sizeof(netip->hw_addr));
+ netip->hw_addr[sizeof(netip->hw_addr)-1] = '\0';
+}
+
+int
+refresh_proc_net_dev(pmInDom indom)
+{
+ char buf[1024];
+ FILE *fp;
+ unsigned long long llval;
+ char *p, *v;
+ int j, sts;
+ net_interface_t *netip;
+
+ static uint64_t gen; /* refresh generation number */
+ static uint32_t cache_err;
+
+ if ((fp = linux_statsfile("/proc/net/dev", buf, sizeof(buf))) == NULL)
+ return -oserror();
+
+ if (gen == 0) {
+ /*
+ * first time, reload cache from external file, and force any
+ * subsequent changes to be saved
+ */
+ pmdaCacheOp(indom, PMDA_CACHE_LOAD);
+ }
+ gen++;
+
+ /*
+Inter-| Receive | Transmit
+ face |bytes packets errs drop fifo frame compressed multicast|bytes packets errs drop fifo colls carrier compressed
+ lo: 4060748 39057 0 0 0 0 0 0 4060748 39057 0 0 0 0 0 0
+ eth0: 0 337614 0 0 0 0 0 0 0 267537 0 0 0 27346 62 0
+ */
+
+ pmdaCacheOp(indom, PMDA_CACHE_INACTIVE);
+
+ while (fgets(buf, sizeof(buf), fp) != NULL) {
+ if ((p = v = strchr(buf, ':')) == NULL)
+ continue;
+ *p = '\0';
+ for (p=buf; *p && isspace((int)*p); p++) {;}
+
+ sts = pmdaCacheLookupName(indom, p, NULL, (void **)&netip);
+ if (sts == PM_ERR_INST || (sts >= 0 && netip == NULL)) {
+ /* first time since re-loaded, else new one */
+ netip = (net_interface_t *)calloc(1, sizeof(net_interface_t));
+#if PCP_DEBUG
+ if (pmDebug & DBG_TRACE_LIBPMDA) {
+ fprintf(stderr, "refresh_proc_net_dev: initialize \"%s\"\n", p);
+ }
+#endif
+ }
+ else if (sts < 0) {
+ if (cache_err++ < 10) {
+ fprintf(stderr, "refresh_proc_net_dev: pmdaCacheLookupName(%s, %s, ...) failed: %s\n",
+ pmInDomStr(indom), p, pmErrStr(sts));
+ }
+ continue;
+ }
+ if (netip->last_gen != gen-1) {
+ /*
+ * rediscovered one that went away and has returned
+ *
+ * kernel counters are reset, so clear last_counters to
+ * avoid false overflows
+ */
+ for (j=0; j < PROC_DEV_COUNTERS_PER_LINE; j++) {
+ netip->last_counters[j] = 0;
+ }
+ }
+ netip->last_gen = gen;
+ if ((sts = pmdaCacheStore(indom, PMDA_CACHE_ADD, p, (void *)netip)) < 0) {
+ if (cache_err++ < 10) {
+ fprintf(stderr, "refresh_proc_net_dev: pmdaCacheStore(%s, PMDA_CACHE_ADD, %s, " PRINTF_P_PFX "%p) failed: %s\n",
+ pmInDomStr(indom), p, netip, pmErrStr(sts));
+ }
+ continue;
+ }
+
+ /* Issue ioctls for remaining data, not exported through proc */
+ memset(&netip->ioc, 0, sizeof(netip->ioc));
+ if (refresh_net_dev_ioctl(p, netip) < 0)
+ refresh_net_dev_sysfs(p, netip);
+
+ for (p=v, j=0; j < PROC_DEV_COUNTERS_PER_LINE; j++) {
+ for (; !isdigit((int)*p); p++) {;}
+ sscanf(p, "%llu", &llval);
+ if (llval >= netip->last_counters[j]) {
+ netip->counters[j] +=
+ llval - netip->last_counters[j];
+ }
+ else {
+ /* 32bit counter has wrapped */
+ netip->counters[j] +=
+ llval + (UINT_MAX - netip->last_counters[j]);
+ }
+ netip->last_counters[j] = llval;
+ for (; !isspace((int)*p); p++) {;}
+ }
+ }
+
+ pmdaCacheOp(indom, PMDA_CACHE_SAVE);
+
+ /* success */
+ fclose(fp);
+ return 0;
+}
+
+static int
+refresh_net_dev_ipv4_addr(pmInDom indom)
+{
+ int n, fd, sts, numreqs = 30;
+ struct ifconf ifc;
+ struct ifreq *ifr;
+ net_addr_t *netip;
+ static uint32_t cache_err;
+
+ if ((fd = refresh_inet_socket()) < 0)
+ return fd;
+
+ ifc.ifc_buf = NULL;
+ for (;;) {
+ ifc.ifc_len = sizeof(struct ifreq) * numreqs;
+ ifc.ifc_buf = realloc(ifc.ifc_buf, ifc.ifc_len);
+
+ if (ioctl(fd, SIOCGIFCONF, &ifc) < 0) {
+ free(ifc.ifc_buf);
+ return -oserror();
+ }
+ if (ifc.ifc_len == sizeof(struct ifreq) * numreqs) {
+ /* assume it overflowed and try again */
+ numreqs *= 2;
+ continue;
+ }
+ break;
+ }
+
+ for (n = 0, ifr = ifc.ifc_req;
+ n < ifc.ifc_len;
+ n += sizeof(struct ifreq), ifr++) {
+ sts = pmdaCacheLookupName(indom, ifr->ifr_name, NULL, (void **)&netip);
+ if (sts == PM_ERR_INST || (sts >= 0 && netip == NULL)) {
+ /* first time since re-loaded, else new one */
+ netip = (net_addr_t *)calloc(1, sizeof(net_addr_t));
+ }
+ else if (sts < 0) {
+ if (cache_err++ < 10) {
+ fprintf(stderr, "refresh_net_dev_ipv4_addr: "
+ "pmdaCacheLookupName(%s, %s, ...) failed: %s\n",
+ pmInDomStr(indom), ifr->ifr_name, pmErrStr(sts));
+ }
+ continue;
+ }
+ if ((sts = pmdaCacheStore(indom, PMDA_CACHE_ADD, ifr->ifr_name, (void *)netip)) < 0) {
+ if (cache_err++ < 10) {
+ fprintf(stderr, "refresh_net_dev_ipv4_addr: "
+ "pmdaCacheStore(%s, PMDA_CACHE_ADD, %s, "
+ PRINTF_P_PFX "%p) failed: %s\n",
+ pmInDomStr(indom), ifr->ifr_name, netip, pmErrStr(sts));
+ }
+ continue;
+ }
+
+ refresh_net_ipv4_addr(ifr->ifr_name, netip);
+ refresh_net_hw_addr(ifr->ifr_name, netip);
+ }
+ free(ifc.ifc_buf);
+ return 0;
+}
+
+static int
+refresh_net_dev_ipv6_addr(pmInDom indom)
+{
+ FILE *fp;
+ char addr6p[8][5];
+ char addr6[40], devname[20+1];
+ char addr[INET6_ADDRSTRLEN];
+ char buf[MAXPATHLEN];
+ struct sockaddr_in6 sin6;
+ int sts, plen, scope, dad_status, if_idx;
+ net_addr_t *netip;
+ static uint32_t cache_err;
+
+ if ((fp = linux_statsfile("/proc/net/if_inet6", buf, sizeof(buf))) == NULL)
+ return 0;
+
+ while (fscanf(fp, "%4s%4s%4s%4s%4s%4s%4s%4s %02x %02x %02x %02x %20s\n",
+ addr6p[0], addr6p[1], addr6p[2], addr6p[3],
+ addr6p[4], addr6p[5], addr6p[6], addr6p[7],
+ &if_idx, &plen, &scope, &dad_status, devname) != EOF) {
+ sts = pmdaCacheLookupName(indom, devname, NULL, (void **)&netip);
+ if (sts == PM_ERR_INST || (sts >= 0 && netip == NULL)) {
+ /* first time since re-loaded, else new one */
+ netip = (net_addr_t *)calloc(1, sizeof(net_addr_t));
+ }
+ else if (sts < 0) {
+ if (cache_err++ < 10) {
+ fprintf(stderr, "refresh_net_dev_ipv6_addr: "
+ "pmdaCacheLookupName(%s, %s, ...) failed: %s\n",
+ pmInDomStr(indom), devname, pmErrStr(sts));
+ }
+ continue;
+ }
+ if ((sts = pmdaCacheStore(indom, PMDA_CACHE_ADD, devname, (void *)netip)) < 0) {
+ if (cache_err++ < 10) {
+ fprintf(stderr, "refresh_net_dev_ipv6_addr: "
+ "pmdaCacheStore(%s, PMDA_CACHE_ADD, %s, "
+ PRINTF_P_PFX "%p) failed: %s\n",
+ pmInDomStr(indom), devname, netip, pmErrStr(sts));
+ }
+ continue;
+ }
+
+ sprintf(addr6, "%s:%s:%s:%s:%s:%s:%s:%s",
+ addr6p[0], addr6p[1], addr6p[2], addr6p[3],
+ addr6p[4], addr6p[5], addr6p[6], addr6p[7]);
+ if (inet_pton(AF_INET6, addr6, sin6.sin6_addr.s6_addr) != 1)
+ continue;
+
+ sin6.sin6_family = AF_INET6;
+ sin6.sin6_port = 0;
+ if (!inet_ntop(AF_INET6, &sin6.sin6_addr, addr, INET6_ADDRSTRLEN))
+ continue;
+ snprintf(netip->ipv6, sizeof(netip->ipv6), "%s/%d", addr, plen);
+ netip->ipv6scope = (uint16_t)scope;
+ netip->has_ipv6 = 1;
+
+ refresh_net_hw_addr(devname, netip);
+ }
+ fclose(fp);
+ return 0;
+}
+
+/*
+ * This separate indom provides the addresses for all interfaces including
+ * aliases (e.g. eth0, eth0:0, eth0:1, etc) - this is what ifconfig does.
+ */
+int
+refresh_net_dev_addr(pmInDom indom)
+{
+ int sts = 0;
+ net_addr_t*p;
+
+ for (pmdaCacheOp(indom, PMDA_CACHE_WALK_REWIND);;) {
+ if ((sts = pmdaCacheOp(indom, PMDA_CACHE_WALK_NEXT)) < 0)
+ break;
+ if (!pmdaCacheLookup(indom, sts, NULL, (void **)&p) || !p)
+ continue;
+ p->has_inet = 0;
+ p->has_ipv6 = 0;
+ p->has_hw = 0;
+ }
+
+ pmdaCacheOp(indom, PMDA_CACHE_INACTIVE);
+
+ sts |= refresh_net_dev_ipv4_addr(indom);
+ sts |= refresh_net_dev_ipv6_addr(indom);
+
+ pmdaCacheOp(indom, PMDA_CACHE_SAVE);
+ return sts;
+}
+
+char *
+lookup_ipv6_scope(int scope)
+{
+ switch (scope) {
+ case IPV6_ADDR_ANY:
+ return "Global";
+ case IPV6_ADDR_LINKLOCAL:
+ return "Link";
+ case IPV6_ADDR_SITELOCAL:
+ return "Site";
+ case IPV6_ADDR_COMPATv4:
+ return "Compat";
+ case IPV6_ADDR_LOOPBACK:
+ return "Host";
+ }
+ return "Unknown";
+}
diff --git a/src/pmdas/linux/proc_net_dev.h b/src/pmdas/linux/proc_net_dev.h
new file mode 100644
index 0000000..9bb09f3
--- /dev/null
+++ b/src/pmdas/linux/proc_net_dev.h
@@ -0,0 +1,100 @@
+/*
+ * Linux /proc/net/dev metrics cluster
+ *
+ * Copyright (c) 2013 Red Hat.
+ * Copyright (c) 1995,2005 Silicon Graphics, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+typedef struct {
+ uint32_t mtu;
+ uint32_t speed;
+ uint8_t duplex;
+ uint8_t linkup;
+ uint8_t running;
+ uint8_t pad;
+} net_dev_t;
+
+#define HWADDRSTRLEN 64
+
+typedef struct {
+ int has_inet : 1;
+ int has_ipv6 : 1;
+ int has_hw : 1;
+ int padding : 13;
+ uint16_t ipv6scope;
+ char inet[INET_ADDRSTRLEN];
+ char ipv6[INET6_ADDRSTRLEN+16]; /* extra for /plen */
+ char hw_addr[HWADDRSTRLEN];
+} net_addr_t;
+
+#define PROC_DEV_COUNTERS_PER_LINE 16
+
+typedef struct {
+ uint64_t last_gen;
+ uint64_t last_counters[PROC_DEV_COUNTERS_PER_LINE];
+ uint64_t counters[PROC_DEV_COUNTERS_PER_LINE];
+ net_dev_t ioc;
+} net_interface_t;
+
+#ifndef ETHTOOL_GSET
+#define ETHTOOL_GSET 0x1
+#endif
+
+#ifndef SIOCGIFCONF
+#define SIOCGIFCONF 0x8912
+#endif
+
+#ifndef SIOCGIFFLAGS
+#define SIOCGIFFLAGS 0x8913
+#endif
+
+#ifndef SIOCGIFADDR
+#define SIOCGIFADDR 0x8915
+#endif
+
+#ifndef SIOCGIFMTU
+#define SIOCGIFMTU 0x8921
+#endif
+
+#ifndef SIOCETHTOOL
+#define SIOCETHTOOL 0x8946
+#endif
+
+/* ioctl(SIOCIFETHTOOL) GSET ("get settings") structure */
+struct ethtool_cmd {
+ uint32_t cmd;
+ uint32_t supported; /* Features this interface supports */
+ uint32_t advertising; /* Features this interface advertises */
+ uint16_t speed; /* The forced speed, 10Mb, 100Mb, gigabit */
+ uint8_t duplex; /* Duplex, half or full */
+ uint8_t port; /* Which connector port */
+ uint8_t phy_address;
+ uint8_t transceiver; /* Which tranceiver to use */
+ uint8_t autoneg; /* Enable or disable autonegotiation */
+ uint32_t maxtxpkt; /* Tx pkts before generating tx int */
+ uint32_t maxrxpkt; /* Rx pkts before generating rx int */
+ uint32_t reserved[4];
+};
+
+#define IPV6_ADDR_ANY 0x0000U
+#define IPV6_ADDR_UNICAST 0x0001U
+#define IPV6_ADDR_MULTICAST 0x0002U
+#define IPV6_ADDR_ANYCAST 0x0004U
+#define IPV6_ADDR_LOOPBACK 0x0010U
+#define IPV6_ADDR_LINKLOCAL 0x0020U
+#define IPV6_ADDR_SITELOCAL 0x0040U
+#define IPV6_ADDR_COMPATv4 0x0080U
+
+extern int refresh_proc_net_dev(pmInDom);
+extern int refresh_net_dev_addr(pmInDom);
+extern char *lookup_ipv6_scope(int);
diff --git a/src/pmdas/linux/proc_net_netstat.c b/src/pmdas/linux/proc_net_netstat.c
new file mode 100644
index 0000000..a7bd34a
--- /dev/null
+++ b/src/pmdas/linux/proc_net_netstat.c
@@ -0,0 +1,354 @@
+/*
+ * Copyright (c) 2014 Red Hat.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include "pmapi.h"
+#include "impl.h"
+#include "pmda.h"
+#include "indom.h"
+#include "proc_net_netstat.h"
+
+extern proc_net_netstat_t _pm_proc_net_netstat;
+
+typedef struct {
+ const char *field;
+ __uint64_t *offset;
+} netstat_fields_t;
+
+netstat_fields_t netstat_ip_fields[] = {
+ { .field = "InNoRoutes",
+ .offset = &_pm_proc_net_netstat.ip[_PM_NETSTAT_IPEXT_INNOROUTES] },
+ { .field = "InTruncatedPkts",
+ .offset = &_pm_proc_net_netstat.ip[_PM_NETSTAT_IPEXT_INTRUNCATEDPKTS] },
+ { .field = "InMcastPkts",
+ .offset = &_pm_proc_net_netstat.ip[_PM_NETSTAT_IPEXT_INMCASTPKTS] },
+ { .field = "OutMcastPkts ",
+ .offset = &_pm_proc_net_netstat.ip[_PM_NETSTAT_IPEXT_OUTMCASTPKTS] },
+ { .field = "InBcastPkts",
+ .offset = &_pm_proc_net_netstat.ip[_PM_NETSTAT_IPEXT_INBCASTPKTS] },
+ { .field = "OutBcastPkts",
+ .offset = &_pm_proc_net_netstat.ip[_PM_NETSTAT_IPEXT_OUTBCASTPKTS] },
+ { .field = "InOctets",
+ .offset = &_pm_proc_net_netstat.ip[_PM_NETSTAT_IPEXT_INOCTETS] },
+ { .field = "OutOctets",
+ .offset = &_pm_proc_net_netstat.ip[_PM_NETSTAT_IPEXT_OUTOCTETS] },
+ { .field = "InMcastOctets",
+ .offset = &_pm_proc_net_netstat.ip[_PM_NETSTAT_IPEXT_INMCASTOCTETS] },
+ { .field = "OutMcastOctets",
+ .offset = &_pm_proc_net_netstat.ip[_PM_NETSTAT_IPEXT_OUTMCASTOCTETS] },
+ { .field = "InBcastOctets",
+ .offset = &_pm_proc_net_netstat.ip[_PM_NETSTAT_IPEXT_INBCASTOCTETS] },
+ { .field = "OutBcastOctets",
+ .offset = &_pm_proc_net_netstat.ip[_PM_NETSTAT_IPEXT_OUTBCASTOCTETS] },
+ { .field = "InCsumErrors",
+ .offset = &_pm_proc_net_netstat.ip[_PM_NETSTAT_IPEXT_CSUMERRORS] },
+ { .field = "InNoECTPkts",
+ .offset = &_pm_proc_net_netstat.ip[_PM_NETSTAT_IPEXT_NOECTPKTS] },
+ { .field = "InECT1Pkts",
+ .offset = &_pm_proc_net_netstat.ip[_PM_NETSTAT_IPEXT_ECT1PKTS] },
+ { .field = "InECT0Pkts",
+ .offset = &_pm_proc_net_netstat.ip[_PM_NETSTAT_IPEXT_ECT0PKTS] },
+ { .field = "InCEPkts",
+ .offset = &_pm_proc_net_netstat.ip[_PM_NETSTAT_IPEXT_CEPKTS] }
+};
+
+
+netstat_fields_t netstat_tcp_fields[] = {
+ { .field = "SyncookiesSent",
+ .offset = &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_SYNCOOKIESSENT] },
+ { .field = "SyncookiesRecv",
+ .offset = &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_SYNCOOKIESRECV] },
+ { .field = "SyncookiesFailed",
+ .offset = &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_SYNCOOKIESFAILED] },
+ { .field = "EmbryonicRsts",
+ .offset = &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_EMBRYONICRSTS] },
+ { .field = "PruneCalled",
+ .offset = &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_PRUNECALLED] },
+ { .field = "RcvPruned",
+ .offset = &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_RCVPRUNED] },
+ { .field = "OfoPruned",
+ .offset = &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_OFOPRUNED] },
+ { .field = "OutOfWindowIcmps",
+ .offset = &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_OUTOFWINDOWICMPS] },
+ { .field = "LockDroppedIcmps",
+ .offset = &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_LOCKDROPPEDICMPS] },
+ { .field = "ArpFilter",
+ .offset = &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_ARPFILTER] },
+ { .field = "TW",
+ .offset = &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TIMEWAITED] },
+ { .field = "TWRecycled",
+ .offset = &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TIMEWAITRECYCLED] },
+ { .field = "TWKilled",
+ .offset = &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TIMEWAITKILLED] },
+ { .field = "PAWSPassive",
+ .offset = &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_PAWSPASSIVEREJECTED] },
+ { .field = "PAWSActive",
+ .offset = &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_PAWSACTIVEREJECTED] },
+ { .field = "PAWSEstab",
+ .offset = &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_PAWSESTABREJECTED] },
+ { .field = "DelayedACKs",
+ .offset = &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_DELAYEDACKS] },
+ { .field = "DelayedACKLocked",
+ .offset = &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_DELAYEDACKLOCKED] },
+ { .field = "DelayedACKLost",
+ .offset = &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_DELAYEDACKLOST] },
+ { .field = "ListenOverflows",
+ .offset = &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_LISTENOVERFLOWS] },
+ { .field = "ListenDrops",
+ .offset = &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_LISTENDROPS] },
+ { .field = "TCPPrequeued",
+ .offset = &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPPREQUEUED] },
+ { .field = "TCPDirectCopyFromBacklog",
+ .offset = &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPDIRECTCOPYFROMBACKLOG] },
+ { .field = "TCPDirectCopyFromPrequeue",
+ .offset = &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPDIRECTCOPYFROMPREQUEUE] },
+ { .field = "TCPPrequeueDropped",
+ .offset = &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPPREQUEUEDROPPED] },
+ { .field = "TCPHPHits",
+ .offset = &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPHPHITS] },
+ { .field = "TCPHPHitsToUser",
+ .offset = &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPHPHITSTOUSER] },
+ { .field = "TCPPureAcks",
+ .offset = &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPPUREACKS] },
+ { .field = "TCPHPAcks",
+ .offset = &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPHPACKS] },
+ { .field = "TCPRenoRecovery",
+ .offset = &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPRENORECOVERY] },
+ { .field = "TCPSackRecovery",
+ .offset = &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPSACKRECOVERY] },
+ { .field = "TCPSACKReneging",
+ .offset = &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPSACKRENEGING] },
+ { .field = "TCPFACKReorder",
+ .offset = &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPFACKREORDER] },
+ { .field = "TCPSACKReorder",
+ .offset = &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPSACKREORDER] },
+ { .field = "TCPRenoReorder",
+ .offset = &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPRENOREORDER] },
+ { .field = "TCPTSReorder",
+ .offset = &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPTSREORDER] },
+ { .field = "TCPFullUndo",
+ .offset = &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPFULLUNDO] },
+ { .field = "TCPPartialUndo",
+ .offset = &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPPARTIALUNDO] },
+ { .field = "TCPDSACKUndo",
+ .offset = &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPDSACKUNDO] },
+ { .field = "TCPLossUndo",
+ .offset = &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPLOSSUNDO] },
+ { .field = "TCPLostRetransmit",
+ .offset = &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPLOSTRETRANSMIT] },
+ { .field = "TCPRenoFailures",
+ .offset = &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPRENOFAILURES] },
+ { .field = "TCPSackFailures",
+ .offset = &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPSACKFAILURES] },
+ { .field = "TCPLossFailures",
+ .offset = &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPLOSSFAILURES] },
+ { .field = "TCPFastRetrans",
+ .offset = &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPFASTRETRANS] },
+ { .field = "TCPForwardRetrans",
+ .offset = &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPFORWARDRETRANS] },
+ { .field = "TCPSlowStartRetrans",
+ .offset = &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPSLOWSTARTRETRANS] },
+ { .field = "TCPTimeouts",
+ .offset = &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPTIMEOUTS] },
+ { .field = "TCPLossProbes",
+ .offset = &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPLOSSPROBES] },
+ { .field = "TCPLossProbeRecovery",
+ .offset = &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPLOSSPROBERECOVERY] },
+ { .field = "TCPRenoRecoveryFail",
+ .offset = &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPRENORECOVERYFAIL] },
+ { .field = "TCPSackRecoveryFail",
+ .offset = &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPSACKRECOVERYFAIL] },
+ { .field = "TCPSchedulerFailed",
+ .offset = &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPSCHEDULERFAILED] },
+ { .field = "TCPRcvCollapsed",
+ .offset = &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPRCVCOLLAPSED] },
+ { .field = "TCPDSACKOldSent",
+ .offset = &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPDSACKOLDSENT] },
+ { .field = "TCPDSACKOfoSent",
+ .offset = &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPDSACKOFOSENT] },
+ { .field = "TCPDSACKRecv",
+ .offset = &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPDSACKRECV] },
+ { .field = "TCPDSACKOfoRecv",
+ .offset = &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPDSACKOFORECV] },
+ { .field = "TCPAbortOnData",
+ .offset = &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPABORTONDATA] },
+ { .field = "TCPAbortOnClose",
+ .offset = &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPABORTONCLOSE] },
+ { .field = "TCPAbortOnMemory",
+ .offset = &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPABORTONMEMORY] },
+ { .field = "TCPAbortOnTimeout",
+ .offset = &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPABORTONTIMEOUT] },
+ { .field = "TCPAbortOnLinger",
+ .offset = &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPABORTONLINGER] },
+ { .field = "TCPAbortFailed",
+ .offset = &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPABORTFAILED] },
+ { .field = "TCPMemoryPressures",
+ .offset = &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPMEMORYPRESSURES] },
+ { .field = "TCPSACKDiscard",
+ .offset = &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPSACKDISCARD] },
+ { .field = "TCPDSACKIgnoredOld",
+ .offset = &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPDSACKIGNOREDOLD] },
+ { .field = "TCPDSACKIgnoredNoUndo",
+ .offset = &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPDSACKIGNOREDNOUNDO] },
+ { .field = "TCPSpuriousRTOs",
+ .offset = &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPSPURIOUSRTOS] },
+ { .field = "TCPMD5NotFound",
+ .offset = &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPMD5NOTFOUND] },
+ { .field = "TCPMD5Unexpected",
+ .offset = &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPMD5UNEXPECTED] },
+ { .field = "TCPSackShifted",
+ .offset = &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_SACKSHIFTED] },
+ { .field = "TCPSackMerged",
+ .offset = &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_SACKMERGED] },
+ { .field = "TCPSackShiftFallback",
+ .offset = &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_SACKSHIFTFALLBACK] },
+ { .field = "TCPBacklogDrop",
+ .offset = &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPBACKLOGDROP] },
+ { .field = "TCPMinTTLDrop",
+ .offset = &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPMINTTLDROP] },
+ { .field = "TCPDeferAcceptDrop",
+ .offset = &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPDEFERACCEPTDROP] },
+ { .field = "IPReversePathFilter",
+ .offset = &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_IPRPFILTER] },
+ { .field = "TCPTimeWaitOverflow",
+ .offset = &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPTIMEWAITOVERFLOW] },
+ { .field = "TCPReqQFullDoCookies",
+ .offset = &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPREQQFULLDOCOOKIES] },
+ { .field = "TCPReqQFullDrop",
+ .offset = &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPREQQFULLDROP] },
+ { .field = "TCPRetransFail",
+ .offset = &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPRETRANSFAIL] },
+ { .field = "TCPRcvCoalesce",
+ .offset = &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPRCVCOALESCE] },
+ { .field = "TCPOFOQueue",
+ .offset = &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPOFOQUEUE] },
+ { .field = "TCPOFODrop",
+ .offset = &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPOFODROP] },
+ { .field = "TCPOFOMerge",
+ .offset = &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPOFOMERGE] },
+ { .field = "TCPChallengeACK",
+ .offset = &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPCHALLENGEACK] },
+ { .field = "TCPSYNChallenge",
+ .offset = &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPSYNCHALLENGE] },
+ { .field = "TCPFastOpenActive",
+ .offset = &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPFASTOPENACTIVE] },
+ { .field = "TCPFastOpenPassive",
+ .offset = &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPFASTOPENACTIVEFAIL] },
+ { .field = "TCPFastOpenPassiveFail",
+ .offset = &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPFASTOPENPASSIVEFAIL] },
+ { .field = "TCPFastOpenListenOverflow",
+ .offset = &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPFASTOPENLISTENOVERFLOW] },
+ { .field = "TCPFastOpenCookieReqd",
+ .offset = &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPFASTOPENCOOKIEREQD] },
+ { .field = "TCPSpuriousRtxHostQueues",
+ .offset = &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPSPURIOUS_RTX_HOSTQUEUES] },
+ { .field = "BusyPollRxPackets",
+ .offset = &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_BUSYPOLLRXPACKETS] },
+ { .field = "TCPAutoCorking",
+ .offset = &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPAUTOCORKING] },
+ { .field = "TCPFromZeroWindowAdv",
+ .offset = &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPFROMZEROWINDOWADV] },
+ { .field = "TCPToZeroWindowAdv",
+ .offset = &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPTOZEROWINDOWADV] },
+ { .field = "TCPWantZeroWindowAdv",
+ .offset = &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPWANTZEROWINDOWADV] },
+ { .field = "TCPSynRetrans",
+ .offset = &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPSYNRETRANS] },
+ { .field = "TCPOrigDataSent",
+ .offset = &_pm_proc_net_netstat.tcp[_PM_NETSTAT_TCPEXT_TCPORIGDATASENT] }
+};
+
+static void
+get_fields(netstat_fields_t *fields, char *header, char *buffer)
+{
+ int i, j, count;
+ char *p, *indices[NETSTAT_MAX_COLUMNS];
+
+ /* first get pointers to each of the column headings */
+ strtok(header, " ");
+ for (i = 0; i < NETSTAT_MAX_COLUMNS; i++) {
+ if ((p = strtok(NULL, " \n")) == NULL)
+ break;
+ indices[i] = p;
+ }
+ count = i;
+
+ /*
+ * Extract values via back-referencing column headings.
+ * "i" is the last found index, which we use for a bit
+ * of optimisation for the (common) in-order maps case
+ * (where "in order" means in the order defined by the
+ * passed in "fields" table which typically matches the
+ * kernel - but may be out-of-order for older kernels).
+ */
+ strtok(buffer, " ");
+ for (i = j = 0; j < count && fields[i].field; j++, i++) {
+ if ((p = strtok(NULL, " \n")) == NULL)
+ break;
+ if (strcmp(fields[i].field, indices[j]) == 0)
+ *fields[i].offset = strtoull(p, NULL, 10);
+ else {
+ for (i = 0; fields[i].field; i++) {
+ if (strcmp(fields[i].field, indices[j]) != 0)
+ continue;
+ *fields[i].offset = strtoull(p, NULL, 10);
+ break;
+ }
+ if (fields[i].field == NULL) /* not found, ignore */
+ i = 0;
+ }
+ }
+}
+
+
+#define NETSTAT_IP_OFFSET(ii, pp) (int64_t *)((char *)pp + \
+ (__psint_t)netstat_ip_fields[ii].offset - (__psint_t)&_pm_proc_net_netstat.ip)
+#define NETSTAT_TCP_OFFSET(ii, pp) (int64_t *)((char *)pp + \
+ (__psint_t)netstat_tcp_fields[ii].offset - (__psint_t)&_pm_proc_net_netstat.tcp)
+
+static void
+init_refresh_proc_net_netstat(proc_net_netstat_t *netstat)
+{
+ int i;
+
+ /* initially, all marked as "no value available" */
+ for (i = 0; netstat_ip_fields[i].field != NULL; i++)
+ *(NETSTAT_IP_OFFSET(i, netstat->ip)) = -1;
+ for (i = 0; netstat_tcp_fields[i].field != NULL; i++)
+ *(NETSTAT_TCP_OFFSET(i, netstat->tcp)) = -1;
+}
+
+int
+refresh_proc_net_netstat(proc_net_netstat_t *netstat)
+{
+ /* Need a sufficiently large value to hold a full line */
+ char buf[MAXPATHLEN];
+ char header[2048];
+ FILE *fp;
+
+ init_refresh_proc_net_netstat(netstat);
+ if ((fp = linux_statsfile("/proc/net/netstat", buf, sizeof(buf))) == NULL)
+ return -oserror();
+ while (fgets(header, sizeof(header), fp) != NULL) {
+ if (fgets(buf, sizeof(buf), fp) != NULL) {
+ if (strncmp(buf, "IpExt:", 6) == 0)
+ get_fields(netstat_ip_fields, header, buf);
+ else if (strncmp(buf, "TcpExt:", 7) == 0)
+ get_fields(netstat_tcp_fields, header, buf);
+ else
+ __pmNotifyErr(LOG_ERR, "Unrecognised netstat row: %s\n", buf);
+ }
+ }
+ fclose(fp);
+ return 0;
+}
diff --git a/src/pmdas/linux/proc_net_netstat.h b/src/pmdas/linux/proc_net_netstat.h
new file mode 100644
index 0000000..4a5a9c8
--- /dev/null
+++ b/src/pmdas/linux/proc_net_netstat.h
@@ -0,0 +1,150 @@
+/*
+ * Copyright (c) 2014 Red Hat.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#define NETSTAT_MAX_COLUMNS 256 /* arbitrary upper bound (228 observed as of 22/04/2014)*/
+
+enum {
+ _PM_NETSTAT_IPEXT_INNOROUTES = 0,
+ _PM_NETSTAT_IPEXT_INTRUNCATEDPKTS,
+ _PM_NETSTAT_IPEXT_INMCASTPKTS,
+ _PM_NETSTAT_IPEXT_OUTMCASTPKTS,
+ _PM_NETSTAT_IPEXT_INBCASTPKTS,
+ _PM_NETSTAT_IPEXT_OUTBCASTPKTS,
+ _PM_NETSTAT_IPEXT_INOCTETS,
+ _PM_NETSTAT_IPEXT_OUTOCTETS,
+ _PM_NETSTAT_IPEXT_INMCASTOCTETS,
+ _PM_NETSTAT_IPEXT_OUTMCASTOCTETS,
+ _PM_NETSTAT_IPEXT_INBCASTOCTETS,
+ _PM_NETSTAT_IPEXT_OUTBCASTOCTETS,
+ _PM_NETSTAT_IPEXT_CSUMERRORS,
+ _PM_NETSTAT_IPEXT_NOECTPKTS,
+ _PM_NETSTAT_IPEXT_ECT1PKTS,
+ _PM_NETSTAT_IPEXT_ECT0PKTS,
+ _PM_NETSTAT_IPEXT_CEPKTS,
+ _PM_NETSTAT_IPEXT_NFIELDS /* must be last */
+};
+
+enum {
+ _PM_NETSTAT_TCPEXT_SYNCOOKIESSENT = 0,
+ _PM_NETSTAT_TCPEXT_SYNCOOKIESRECV,
+ _PM_NETSTAT_TCPEXT_SYNCOOKIESFAILED,
+ _PM_NETSTAT_TCPEXT_EMBRYONICRSTS,
+ _PM_NETSTAT_TCPEXT_PRUNECALLED,
+ _PM_NETSTAT_TCPEXT_RCVPRUNED,
+ _PM_NETSTAT_TCPEXT_OFOPRUNED,
+ _PM_NETSTAT_TCPEXT_OUTOFWINDOWICMPS,
+ _PM_NETSTAT_TCPEXT_LOCKDROPPEDICMPS,
+ _PM_NETSTAT_TCPEXT_ARPFILTER,
+ _PM_NETSTAT_TCPEXT_TIMEWAITED,
+ _PM_NETSTAT_TCPEXT_TIMEWAITRECYCLED,
+ _PM_NETSTAT_TCPEXT_TIMEWAITKILLED,
+ _PM_NETSTAT_TCPEXT_PAWSPASSIVEREJECTED,
+ _PM_NETSTAT_TCPEXT_PAWSACTIVEREJECTED,
+ _PM_NETSTAT_TCPEXT_PAWSESTABREJECTED,
+ _PM_NETSTAT_TCPEXT_DELAYEDACKS,
+ _PM_NETSTAT_TCPEXT_DELAYEDACKLOCKED,
+ _PM_NETSTAT_TCPEXT_DELAYEDACKLOST,
+ _PM_NETSTAT_TCPEXT_LISTENOVERFLOWS,
+ _PM_NETSTAT_TCPEXT_LISTENDROPS,
+ _PM_NETSTAT_TCPEXT_TCPPREQUEUED,
+ _PM_NETSTAT_TCPEXT_TCPDIRECTCOPYFROMBACKLOG,
+ _PM_NETSTAT_TCPEXT_TCPDIRECTCOPYFROMPREQUEUE,
+ _PM_NETSTAT_TCPEXT_TCPPREQUEUEDROPPED,
+ _PM_NETSTAT_TCPEXT_TCPHPHITS,
+ _PM_NETSTAT_TCPEXT_TCPHPHITSTOUSER,
+ _PM_NETSTAT_TCPEXT_TCPPUREACKS,
+ _PM_NETSTAT_TCPEXT_TCPHPACKS,
+ _PM_NETSTAT_TCPEXT_TCPRENORECOVERY,
+ _PM_NETSTAT_TCPEXT_TCPSACKRECOVERY,
+ _PM_NETSTAT_TCPEXT_TCPSACKRENEGING,
+ _PM_NETSTAT_TCPEXT_TCPFACKREORDER,
+ _PM_NETSTAT_TCPEXT_TCPSACKREORDER,
+ _PM_NETSTAT_TCPEXT_TCPRENOREORDER,
+ _PM_NETSTAT_TCPEXT_TCPTSREORDER,
+ _PM_NETSTAT_TCPEXT_TCPFULLUNDO,
+ _PM_NETSTAT_TCPEXT_TCPPARTIALUNDO,
+ _PM_NETSTAT_TCPEXT_TCPDSACKUNDO,
+ _PM_NETSTAT_TCPEXT_TCPLOSSUNDO,
+ _PM_NETSTAT_TCPEXT_TCPLOSTRETRANSMIT,
+ _PM_NETSTAT_TCPEXT_TCPRENOFAILURES,
+ _PM_NETSTAT_TCPEXT_TCPSACKFAILURES,
+ _PM_NETSTAT_TCPEXT_TCPLOSSFAILURES,
+ _PM_NETSTAT_TCPEXT_TCPFASTRETRANS,
+ _PM_NETSTAT_TCPEXT_TCPFORWARDRETRANS,
+ _PM_NETSTAT_TCPEXT_TCPSLOWSTARTRETRANS,
+ _PM_NETSTAT_TCPEXT_TCPTIMEOUTS,
+ _PM_NETSTAT_TCPEXT_TCPLOSSPROBES,
+ _PM_NETSTAT_TCPEXT_TCPLOSSPROBERECOVERY,
+ _PM_NETSTAT_TCPEXT_TCPRENORECOVERYFAIL,
+ _PM_NETSTAT_TCPEXT_TCPSACKRECOVERYFAIL,
+ _PM_NETSTAT_TCPEXT_TCPSCHEDULERFAILED,
+ _PM_NETSTAT_TCPEXT_TCPRCVCOLLAPSED,
+ _PM_NETSTAT_TCPEXT_TCPDSACKOLDSENT,
+ _PM_NETSTAT_TCPEXT_TCPDSACKOFOSENT,
+ _PM_NETSTAT_TCPEXT_TCPDSACKRECV,
+ _PM_NETSTAT_TCPEXT_TCPDSACKOFORECV,
+ _PM_NETSTAT_TCPEXT_TCPABORTONDATA,
+ _PM_NETSTAT_TCPEXT_TCPABORTONCLOSE,
+ _PM_NETSTAT_TCPEXT_TCPABORTONMEMORY,
+ _PM_NETSTAT_TCPEXT_TCPABORTONTIMEOUT,
+ _PM_NETSTAT_TCPEXT_TCPABORTONLINGER,
+ _PM_NETSTAT_TCPEXT_TCPABORTFAILED,
+ _PM_NETSTAT_TCPEXT_TCPMEMORYPRESSURES,
+ _PM_NETSTAT_TCPEXT_TCPSACKDISCARD,
+ _PM_NETSTAT_TCPEXT_TCPDSACKIGNOREDOLD,
+ _PM_NETSTAT_TCPEXT_TCPDSACKIGNOREDNOUNDO,
+ _PM_NETSTAT_TCPEXT_TCPSPURIOUSRTOS,
+ _PM_NETSTAT_TCPEXT_TCPMD5NOTFOUND,
+ _PM_NETSTAT_TCPEXT_TCPMD5UNEXPECTED,
+ _PM_NETSTAT_TCPEXT_SACKSHIFTED,
+ _PM_NETSTAT_TCPEXT_SACKMERGED,
+ _PM_NETSTAT_TCPEXT_SACKSHIFTFALLBACK,
+ _PM_NETSTAT_TCPEXT_TCPBACKLOGDROP,
+ _PM_NETSTAT_TCPEXT_TCPMINTTLDROP,
+ _PM_NETSTAT_TCPEXT_TCPDEFERACCEPTDROP,
+ _PM_NETSTAT_TCPEXT_IPRPFILTER,
+ _PM_NETSTAT_TCPEXT_TCPTIMEWAITOVERFLOW,
+ _PM_NETSTAT_TCPEXT_TCPREQQFULLDOCOOKIES,
+ _PM_NETSTAT_TCPEXT_TCPREQQFULLDROP,
+ _PM_NETSTAT_TCPEXT_TCPRETRANSFAIL,
+ _PM_NETSTAT_TCPEXT_TCPRCVCOALESCE,
+ _PM_NETSTAT_TCPEXT_TCPOFOQUEUE,
+ _PM_NETSTAT_TCPEXT_TCPOFODROP,
+ _PM_NETSTAT_TCPEXT_TCPOFOMERGE,
+ _PM_NETSTAT_TCPEXT_TCPCHALLENGEACK,
+ _PM_NETSTAT_TCPEXT_TCPSYNCHALLENGE,
+ _PM_NETSTAT_TCPEXT_TCPFASTOPENACTIVE,
+ _PM_NETSTAT_TCPEXT_TCPFASTOPENACTIVEFAIL,
+ _PM_NETSTAT_TCPEXT_TCPFASTOPENPASSIVE,
+ _PM_NETSTAT_TCPEXT_TCPFASTOPENPASSIVEFAIL,
+ _PM_NETSTAT_TCPEXT_TCPFASTOPENLISTENOVERFLOW,
+ _PM_NETSTAT_TCPEXT_TCPFASTOPENCOOKIEREQD,
+ _PM_NETSTAT_TCPEXT_TCPSPURIOUS_RTX_HOSTQUEUES,
+ _PM_NETSTAT_TCPEXT_BUSYPOLLRXPACKETS,
+ _PM_NETSTAT_TCPEXT_TCPAUTOCORKING,
+ _PM_NETSTAT_TCPEXT_TCPFROMZEROWINDOWADV,
+ _PM_NETSTAT_TCPEXT_TCPTOZEROWINDOWADV,
+ _PM_NETSTAT_TCPEXT_TCPWANTZEROWINDOWADV,
+ _PM_NETSTAT_TCPEXT_TCPSYNRETRANS,
+ _PM_NETSTAT_TCPEXT_TCPORIGDATASENT,
+ _PM_NETSTAT_TCPEXT_NFIELDS /* must be last */
+};
+
+
+typedef struct {
+ __uint64_t ip[_PM_NETSTAT_IPEXT_NFIELDS];
+ __uint64_t tcp[_PM_NETSTAT_TCPEXT_NFIELDS];
+} proc_net_netstat_t;
+
+extern int refresh_proc_net_netstat(proc_net_netstat_t *);
diff --git a/src/pmdas/linux/proc_net_rpc.c b/src/pmdas/linux/proc_net_rpc.c
new file mode 100644
index 0000000..1b1a940
--- /dev/null
+++ b/src/pmdas/linux/proc_net_rpc.c
@@ -0,0 +1,188 @@
+/*
+ * Linux /proc/net/rpc metrics cluster
+ *
+ * Copyright (c) 2014 Red Hat.
+ * Copyright (c) 2000,2004 Silicon Graphics, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include "pmapi.h"
+#include "pmda.h"
+#include "indom.h"
+#include "proc_net_rpc.h"
+
+int
+refresh_proc_net_rpc(proc_net_rpc_t *proc_net_rpc)
+{
+ char buf[4096];
+ FILE *fp;
+ char *p;
+ int i;
+
+ memset(proc_net_rpc, 0, sizeof(proc_net_rpc_t));
+
+ /*
+ * client stats
+ */
+ if ((fp = linux_statsfile("/proc/net/rpc/nfs", buf, sizeof(buf))) == NULL) {
+ proc_net_rpc->client.errcode = -oserror();
+ }
+ else {
+ proc_net_rpc->client.errcode = 0;
+ while (fgets(buf, sizeof(buf), fp) != NULL) {
+ if (strncmp(buf, "net", 3) == 0)
+ sscanf(buf, "net %u %u %u %u",
+ &proc_net_rpc->client.netcnt,
+ &proc_net_rpc->client.netudpcnt,
+ &proc_net_rpc->client.nettcpcnt,
+ &proc_net_rpc->client.nettcpconn);
+ else
+ if (strncmp(buf, "rpc", 3) == 0)
+ sscanf(buf, "rpc %u %u %u",
+ &proc_net_rpc->client.rpccnt,
+ &proc_net_rpc->client.rpcretrans,
+ &proc_net_rpc->client.rpcauthrefresh);
+ else
+ if (strncmp(buf, "proc2", 5) == 0) {
+ if ((p = strtok(buf, " ")) != NULL)
+ p = strtok(NULL, " ");
+ for (i=0; p && i < NR_RPC_COUNTERS; i++) {
+ if ((p = strtok(NULL, " ")) == NULL)
+ break;
+ proc_net_rpc->client.reqcounts[i] = strtoul(p, (char **)NULL, 10);
+ }
+ }
+ else
+ if (strncmp(buf, "proc3", 5) == 0) {
+ if ((p = strtok(buf, " ")) != NULL)
+ p = strtok(NULL, " ");
+ for (i=0; p && i < NR_RPC3_COUNTERS; i++) {
+ if ((p = strtok(NULL, " ")) == NULL)
+ break;
+ proc_net_rpc->client.reqcounts3[i] = strtoul(p, (char **)NULL, 10);
+ }
+ }
+ else
+ if (strncmp(buf, "proc4", 5) == 0) {
+ if ((p = strtok(buf, " ")) != NULL)
+ p = strtok(NULL, " ");
+ for (i=0; p && i < NR_RPC4_CLI_COUNTERS; i++) {
+ if ((p = strtok(NULL, " ")) == NULL)
+ break;
+ proc_net_rpc->client.reqcounts4[i] = strtoul(p, (char **)NULL, 10);
+ }
+ }
+ }
+
+ fclose(fp);
+ }
+
+ /*
+ * server stats
+ */
+ if ((fp = linux_statsfile("/proc/net/rpc/nfsd", buf, sizeof(buf))) == NULL) {
+ proc_net_rpc->server.errcode = -oserror();
+ }
+ else {
+ proc_net_rpc->server.errcode = 0;
+ while (fgets(buf, sizeof(buf), fp) != NULL) {
+ if (strncmp(buf, "rc", 2) == 0)
+ sscanf(buf, "rc %u %u %u %u %u %u %u %u %u",
+ &proc_net_rpc->server.rchits,
+ &proc_net_rpc->server.rcmisses,
+ &proc_net_rpc->server.rcnocache,
+ &proc_net_rpc->server.fh_cached,
+ &proc_net_rpc->server.fh_valid,
+ &proc_net_rpc->server.fh_fixup,
+ &proc_net_rpc->server.fh_lookup,
+ &proc_net_rpc->server.fh_stale,
+ &proc_net_rpc->server.fh_concurrent);
+ else
+ if (strncmp(buf, "fh", 2) == 0)
+ sscanf(buf, "fh %u %u %u %u %u",
+ &proc_net_rpc->server.fh_stale,
+ &proc_net_rpc->server.fh_lookup,
+ &proc_net_rpc->server.fh_anon,
+ &proc_net_rpc->server.fh_nocache_dir,
+ &proc_net_rpc->server.fh_nocache_nondir);
+ else
+ if (strncmp(buf, "io", 2) == 0)
+ sscanf(buf, "io %u %u",
+ &proc_net_rpc->server.io_read,
+ &proc_net_rpc->server.io_write);
+ else
+ if (strncmp(buf, "th", 2) == 0)
+ sscanf(buf, "th %u %u",
+ &proc_net_rpc->server.th_cnt,
+ &proc_net_rpc->server.th_fullcnt);
+ else
+ if (strncmp(buf, "net", 3) == 0)
+ sscanf(buf, "net %u %u %u %u",
+ &proc_net_rpc->server.netcnt,
+ &proc_net_rpc->server.netudpcnt,
+ &proc_net_rpc->server.nettcpcnt,
+ &proc_net_rpc->server.nettcpconn);
+ else
+ if (strncmp(buf, "rpc", 3) == 0)
+ sscanf(buf, "rpc %u %u %u",
+ &proc_net_rpc->server.rpccnt,
+ &proc_net_rpc->server.rpcerr, /* always the sum of the following three fields */
+ &proc_net_rpc->server.rpcbadfmt);
+ else
+ if (strncmp(buf, "proc2", 5) == 0) {
+ if ((p = strtok(buf, " ")) != NULL)
+ p = strtok(NULL, " ");
+ for (i=0; p && i < NR_RPC_COUNTERS; i++) {
+ if ((p = strtok(NULL, " ")) == NULL)
+ break;
+ proc_net_rpc->server.reqcounts[i] = strtoul(p, (char **)NULL, 10);
+ }
+ }
+ else
+ if (strncmp(buf, "proc3", 5) == 0) {
+ if ((p = strtok(buf, " ")) != NULL)
+ p = strtok(NULL, " ");
+ for (i=0; p && i < NR_RPC3_COUNTERS; i++) {
+ if ((p = strtok(NULL, " ")) == NULL)
+ break;
+ proc_net_rpc->server.reqcounts3[i] = strtoul(p, (char **)NULL, 10);
+ }
+ }
+ else
+ if (strncmp(buf, "proc4ops", 8) == 0) {
+ if ((p = strtok(buf, " ")) != NULL)
+ p = strtok(NULL, " ");
+
+ /* Inst 0 is NULL count (below) */
+ for (i=1; p && i < NR_RPC4_SVR_COUNTERS; i++) {
+ if ((p = strtok(NULL, " ")) == NULL)
+ break;
+ proc_net_rpc->server.reqcounts4[i] = strtoul(p, (char **)NULL, 10);
+ }
+ }
+ else
+ if (strncmp(buf, "proc4", 5) == 0) {
+ if ((strtok(buf, " ")) != NULL &&
+ (strtok(NULL, " ")) != NULL &&
+ (p = strtok(NULL, " ")) != NULL) { /* 3rd token is NULL count */
+ proc_net_rpc->server.reqcounts4[0] = strtoul(p, (char **)NULL, 10);
+ }
+ }
+ }
+
+ fclose(fp);
+ }
+
+ if (proc_net_rpc->client.errcode == 0 && proc_net_rpc->server.errcode == 0)
+ return 0;
+ return -1;
+}
diff --git a/src/pmdas/linux/proc_net_rpc.h b/src/pmdas/linux/proc_net_rpc.h
new file mode 100644
index 0000000..880dbed
--- /dev/null
+++ b/src/pmdas/linux/proc_net_rpc.h
@@ -0,0 +1,99 @@
+/*
+ * Linux /proc/net/rpc metrics cluster
+ *
+ * Copyright (c) 2000,2004 Silicon Graphics, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#define NR_RPC_COUNTERS 18
+#define NR_RPC3_COUNTERS 22
+#define NR_RPC4_CLI_COUNTERS 35
+#define NR_RPC4_SVR_COUNTERS 41
+
+typedef struct {
+ struct {
+ int errcode; /* error from last refresh */
+ /* /proc/net/rpc/nfs "net" */
+ unsigned int netcnt;
+ unsigned int netudpcnt;
+ unsigned int nettcpcnt;
+ unsigned int nettcpconn;
+
+ /* /proc/net/rpc/nfs "rpc" */
+ unsigned int rpccnt;
+ unsigned int rpcretrans;
+ unsigned int rpcauthrefresh;
+
+ /* /proc/net/rpc/nfs "proc2" */
+ unsigned int reqcounts[NR_RPC_COUNTERS];
+
+ /* /proc/net/rpc/nfs "proc3" */
+ unsigned int reqcounts3[NR_RPC3_COUNTERS];
+
+ /* /proc/net/rpc/nfs "proc4" */
+ unsigned int reqcounts4[NR_RPC4_CLI_COUNTERS];
+ } client;
+
+ struct {
+ int errcode; /* error from last refresh */
+ /* /proc/net/rpc/nfsd "rc" and "fh" */
+ unsigned int rchits; /* repcache hits */
+ unsigned int rcmisses; /* repcache hits */
+ unsigned int rcnocache; /* uncached reqs */
+ unsigned int fh_cached; /* dentry cached */
+ unsigned int fh_valid; /* dentry validated */
+ unsigned int fh_fixup; /* dentry fixup validated */
+ unsigned int fh_lookup; /* new lookup required */
+ unsigned int fh_stale; /* FH stale error */
+ unsigned int fh_concurrent; /* concurrent request */
+ unsigned int fh_anon; /* anon file dentry returned */
+ unsigned int fh_nocache_dir; /* dir filehandle not found in dcache */
+ unsigned int fh_nocache_nondir; /* nondir filehandle not in dcache */
+
+ /* /proc/net/rpc/nfsd "io" */
+ unsigned int io_read; /* bytes returned to read requests */
+ unsigned int io_write; /* bytes passed in write requests */
+
+ /* /proc/net/rpc/nfsd "th" */
+ unsigned int th_cnt; /* available nfsd threads */
+ unsigned int th_fullcnt; /* times last free thread used */
+
+ /* /proc/net/rpc/nfsd "net" */
+ unsigned int netcnt;
+ unsigned int netudpcnt;
+ unsigned int nettcpcnt;
+ unsigned int nettcpconn;
+
+ /* /proc/net/rpc/nfsd "rpc" */
+ unsigned int rpccnt;
+ unsigned int rpcerr;
+ unsigned int rpcbadfmt;
+ unsigned int rpcbadauth;
+ unsigned int rpcbadclnt;
+
+ /* /proc/net/rpc/nfsd "proc2" */
+ unsigned int reqcounts[NR_RPC_COUNTERS];
+
+ /* /proc/net/rpc/nfsd "proc3" */
+ unsigned int reqcounts3[NR_RPC3_COUNTERS];
+
+ /* /proc/net/rpc/nfsd "proc4" & "proc4ops" */
+ unsigned int reqcounts4[NR_RPC4_SVR_COUNTERS];
+ } server;
+
+} proc_net_rpc_t;
+
+extern int refresh_proc_net_rpc(proc_net_rpc_t *);
diff --git a/src/pmdas/linux/proc_net_snmp.c b/src/pmdas/linux/proc_net_snmp.c
new file mode 100644
index 0000000..431d984
--- /dev/null
+++ b/src/pmdas/linux/proc_net_snmp.c
@@ -0,0 +1,367 @@
+/*
+ * Copyright (c) 2013-2014 Red Hat.
+ * Copyright (c) 2000,2004 Silicon Graphics, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include "pmapi.h"
+#include "impl.h"
+#include "pmda.h"
+#include "indom.h"
+#include "proc_net_snmp.h"
+
+extern proc_net_snmp_t _pm_proc_net_snmp;
+extern pmdaInstid _pm_proc_net_snmp_indom_id[];
+static char *proc_net_snmp_icmpmsg_names;
+
+typedef struct {
+ const char *field;
+ __uint64_t *offset;
+} snmp_fields_t;
+
+snmp_fields_t ip_fields[] = {
+ { .field = "Forwarding",
+ .offset = &_pm_proc_net_snmp.ip[_PM_SNMP_IP_FORWARDING] },
+ { .field = "DefaultTTL",
+ .offset = &_pm_proc_net_snmp.ip[_PM_SNMP_IP_DEFAULTTTL] },
+ { .field = "InReceives",
+ .offset = &_pm_proc_net_snmp.ip[_PM_SNMP_IP_INRECEIVES] },
+ { .field = "InHdrErrors",
+ .offset = &_pm_proc_net_snmp.ip[_PM_SNMP_IP_INHDRERRORS] },
+ { .field = "InAddrErrors",
+ .offset = &_pm_proc_net_snmp.ip[_PM_SNMP_IP_INADDRERRORS] },
+ { .field = "ForwDatagrams",
+ .offset = &_pm_proc_net_snmp.ip[_PM_SNMP_IP_FORWDATAGRAMS] },
+ { .field = "InUnknownProtos",
+ .offset = &_pm_proc_net_snmp.ip[_PM_SNMP_IP_INUNKNOWNPROTOS] },
+ { .field = "InDiscards",
+ .offset = &_pm_proc_net_snmp.ip[_PM_SNMP_IP_INDISCARDS] },
+ { .field = "InDelivers",
+ .offset = &_pm_proc_net_snmp.ip[_PM_SNMP_IP_INDELIVERS] },
+ { .field = "OutRequests",
+ .offset = &_pm_proc_net_snmp.ip[_PM_SNMP_IP_OUTREQUESTS] },
+ { .field = "OutDiscards",
+ .offset = &_pm_proc_net_snmp.ip[_PM_SNMP_IP_OUTDISCARDS] },
+ { .field = "OutNoRoutes",
+ .offset = &_pm_proc_net_snmp.ip[_PM_SNMP_IP_OUTNOROUTES] },
+ { .field = "ReasmTimeout",
+ .offset = &_pm_proc_net_snmp.ip[_PM_SNMP_IP_REASMTIMEOUT] },
+ { .field = "ReasmReqds",
+ .offset = &_pm_proc_net_snmp.ip[_PM_SNMP_IP_REASMREQDS] },
+ { .field = "ReasmOKs",
+ .offset = &_pm_proc_net_snmp.ip[_PM_SNMP_IP_REASMOKS] },
+ { .field = "ReasmFails",
+ .offset = &_pm_proc_net_snmp.ip[_PM_SNMP_IP_REASMFAILS] },
+ { .field = "FragOKs",
+ .offset = &_pm_proc_net_snmp.ip[_PM_SNMP_IP_FRAGOKS] },
+ { .field = "FragFails",
+ .offset = &_pm_proc_net_snmp.ip[_PM_SNMP_IP_FRAGFAILS] },
+ { .field = "FragCreates",
+ .offset = &_pm_proc_net_snmp.ip[_PM_SNMP_IP_FRAGCREATES] },
+ { .field = NULL, .offset = NULL }
+};
+
+snmp_fields_t icmp_fields[] = {
+ { .field = "InMsgs",
+ .offset = &_pm_proc_net_snmp.icmp[_PM_SNMP_ICMP_INMSGS] },
+ { .field = "InErrors",
+ .offset = &_pm_proc_net_snmp.icmp[_PM_SNMP_ICMP_INERRORS] },
+ { .field = "InCsumErrors",
+ .offset = &_pm_proc_net_snmp.icmp[_PM_SNMP_ICMP_INCSUMERRORS] },
+ { .field = "InDestUnreachs",
+ .offset = &_pm_proc_net_snmp.icmp[_PM_SNMP_ICMP_INDESTUNREACHS] },
+ { .field = "InTimeExcds",
+ .offset = &_pm_proc_net_snmp.icmp[_PM_SNMP_ICMP_INTIMEEXCDS] },
+ { .field = "InParmProbs",
+ .offset = &_pm_proc_net_snmp.icmp[_PM_SNMP_ICMP_INPARMPROBS] },
+ { .field = "InSrcQuenchs",
+ .offset = &_pm_proc_net_snmp.icmp[_PM_SNMP_ICMP_INSRCQUENCHS] },
+ { .field = "InRedirects",
+ .offset = &_pm_proc_net_snmp.icmp[_PM_SNMP_ICMP_INREDIRECTS] },
+ { .field = "InEchos",
+ .offset = &_pm_proc_net_snmp.icmp[_PM_SNMP_ICMP_INECHOS] },
+ { .field = "InEchoReps",
+ .offset = &_pm_proc_net_snmp.icmp[_PM_SNMP_ICMP_INECHOREPS] },
+ { .field = "InTimestamps",
+ .offset = &_pm_proc_net_snmp.icmp[_PM_SNMP_ICMP_INTIMESTAMPS] },
+ { .field = "InTimestampReps",
+ .offset = &_pm_proc_net_snmp.icmp[_PM_SNMP_ICMP_INTIMESTAMPREPS] },
+ { .field = "InAddrMasks",
+ .offset = &_pm_proc_net_snmp.icmp[_PM_SNMP_ICMP_INADDRMASKS] },
+ { .field = "InAddrMaskReps",
+ .offset = &_pm_proc_net_snmp.icmp[_PM_SNMP_ICMP_INADDRMASKREPS] },
+ { .field = "OutMsgs",
+ .offset = &_pm_proc_net_snmp.icmp[_PM_SNMP_ICMP_OUTMSGS] },
+ { .field = "OutErrors",
+ .offset = &_pm_proc_net_snmp.icmp[_PM_SNMP_ICMP_OUTERRORS] },
+ { .field = "OutDestUnreachs",
+ .offset = &_pm_proc_net_snmp.icmp[_PM_SNMP_ICMP_OUTDESTUNREACHS] },
+ { .field = "OutTimeExcds",
+ .offset = &_pm_proc_net_snmp.icmp[_PM_SNMP_ICMP_OUTTIMEEXCDS] },
+ { .field = "OutParmProbs",
+ .offset = &_pm_proc_net_snmp.icmp[_PM_SNMP_ICMP_OUTPARMPROBS] },
+ { .field = "OutSrcQuenchs",
+ .offset = &_pm_proc_net_snmp.icmp[_PM_SNMP_ICMP_OUTSRCQUENCHS] },
+ { .field = "OutRedirects",
+ .offset = &_pm_proc_net_snmp.icmp[_PM_SNMP_ICMP_OUTREDIRECTS] },
+ { .field = "OutEchos",
+ .offset = &_pm_proc_net_snmp.icmp[_PM_SNMP_ICMP_OUTECHOS] },
+ { .field = "OutEchoReps",
+ .offset = &_pm_proc_net_snmp.icmp[_PM_SNMP_ICMP_OUTECHOREPS] },
+ { .field = "OutTimestamps",
+ .offset = &_pm_proc_net_snmp.icmp[_PM_SNMP_ICMP_OUTTIMESTAMPS] },
+ { .field = "OutTimestampReps",
+ .offset = &_pm_proc_net_snmp.icmp[_PM_SNMP_ICMP_OUTTIMESTAMPREPS] },
+ { .field = "OutAddrMasks",
+ .offset = &_pm_proc_net_snmp.icmp[_PM_SNMP_ICMP_OUTADDRMASKS] },
+ { .field = "OutAddrMaskReps",
+ .offset = &_pm_proc_net_snmp.icmp[_PM_SNMP_ICMP_OUTADDRMASKREPS] },
+ { .field = NULL, .offset = NULL }
+};
+
+snmp_fields_t icmpmsg_fields[] = {
+ { .field = "InType%u",
+ .offset = &_pm_proc_net_snmp.icmpmsg[_PM_SNMP_ICMPMSG_INTYPE] },
+ { .field = "OutType%u",
+ .offset = &_pm_proc_net_snmp.icmpmsg[_PM_SNMP_ICMPMSG_OUTTYPE] },
+ { .field = NULL, .offset = NULL }
+};
+
+snmp_fields_t tcp_fields[] = {
+ { .field = "RtoAlgorithm",
+ .offset = &_pm_proc_net_snmp.tcp[_PM_SNMP_TCP_RTOALGORITHM] },
+ { .field = "RtoMin",
+ .offset = &_pm_proc_net_snmp.tcp[_PM_SNMP_TCP_RTOMIN] },
+ { .field = "RtoMax",
+ .offset = &_pm_proc_net_snmp.tcp[_PM_SNMP_TCP_RTOMAX] },
+ { .field = "MaxConn",
+ .offset = &_pm_proc_net_snmp.tcp[_PM_SNMP_TCP_MAXCONN] },
+ { .field = "ActiveOpens",
+ .offset = &_pm_proc_net_snmp.tcp[_PM_SNMP_TCP_ACTIVEOPENS] },
+ { .field = "PassiveOpens",
+ .offset = &_pm_proc_net_snmp.tcp[_PM_SNMP_TCP_PASSIVEOPENS] },
+ { .field = "AttemptFails",
+ .offset = &_pm_proc_net_snmp.tcp[_PM_SNMP_TCP_ATTEMPTFAILS] },
+ { .field = "EstabResets",
+ .offset = &_pm_proc_net_snmp.tcp[_PM_SNMP_TCP_ESTABRESETS] },
+ { .field = "CurrEstab",
+ .offset = &_pm_proc_net_snmp.tcp[_PM_SNMP_TCP_CURRESTAB] },
+ { .field = "InSegs",
+ .offset = &_pm_proc_net_snmp.tcp[_PM_SNMP_TCP_INSEGS] },
+ { .field = "OutSegs",
+ .offset = &_pm_proc_net_snmp.tcp[_PM_SNMP_TCP_OUTSEGS] },
+ { .field = "RetransSegs",
+ .offset = &_pm_proc_net_snmp.tcp[_PM_SNMP_TCP_RETRANSSEGS] },
+ { .field = "InErrs",
+ .offset = &_pm_proc_net_snmp.tcp[_PM_SNMP_TCP_INERRS] },
+ { .field = "OutRsts",
+ .offset = &_pm_proc_net_snmp.tcp[_PM_SNMP_TCP_OUTRSTS] },
+ { .field = "InCsumErrors",
+ .offset = &_pm_proc_net_snmp.tcp[_PM_SNMP_TCP_INCSUMERRORS] },
+ { .field = NULL, .offset = NULL }
+};
+
+snmp_fields_t udp_fields[] = {
+ { .field = "InDatagrams",
+ .offset = &_pm_proc_net_snmp.udp[_PM_SNMP_UDP_INDATAGRAMS] },
+ { .field = "NoPorts",
+ .offset = &_pm_proc_net_snmp.udp[_PM_SNMP_UDP_NOPORTS] },
+ { .field = "InErrors",
+ .offset = &_pm_proc_net_snmp.udp[_PM_SNMP_UDP_INERRORS] },
+ { .field = "OutDatagrams",
+ .offset = &_pm_proc_net_snmp.udp[_PM_SNMP_UDP_OUTDATAGRAMS] },
+ { .field = "RcvbufErrors",
+ .offset = &_pm_proc_net_snmp.udp[_PM_SNMP_UDP_RECVBUFERRORS] },
+ { .field = "SndbufErrors",
+ .offset = &_pm_proc_net_snmp.udp[_PM_SNMP_UDP_SNDBUFERRORS] },
+ { .field = "InCsumErrors",
+ .offset = &_pm_proc_net_snmp.udp[_PM_SNMP_UDP_INCSUMERRORS] },
+ { .field = NULL, .offset = NULL }
+};
+
+snmp_fields_t udplite_fields[] = {
+ { .field = "InDatagrams",
+ .offset = &_pm_proc_net_snmp.udplite[_PM_SNMP_UDPLITE_INDATAGRAMS] },
+ { .field = "NoPorts",
+ .offset = &_pm_proc_net_snmp.udplite[_PM_SNMP_UDPLITE_NOPORTS] },
+ { .field = "InErrors",
+ .offset = &_pm_proc_net_snmp.udplite[_PM_SNMP_UDPLITE_INERRORS] },
+ { .field = "OutDatagrams",
+ .offset = &_pm_proc_net_snmp.udplite[_PM_SNMP_UDPLITE_OUTDATAGRAMS] },
+ { .field = "RcvbufErrors",
+ .offset = &_pm_proc_net_snmp.udplite[_PM_SNMP_UDPLITE_RECVBUFERRORS] },
+ { .field = "SndbufErrors",
+ .offset = &_pm_proc_net_snmp.udplite[_PM_SNMP_UDPLITE_SNDBUFERRORS] },
+ { .field = "InCsumErrors",
+ .offset = &_pm_proc_net_snmp.udplite[_PM_SNMP_UDPLITE_INCSUMERRORS] },
+ { .field = NULL, .offset = NULL }
+};
+
+static void
+get_fields(snmp_fields_t *fields, char *header, char *buffer)
+{
+ int i, j, count;
+ char *p, *indices[SNMP_MAX_COLUMNS];
+
+ /* first get pointers to each of the column headings */
+ strtok(header, " ");
+ for (i = 0; i < SNMP_MAX_COLUMNS; i++) {
+ if ((p = strtok(NULL, " \n")) == NULL)
+ break;
+ indices[i] = p;
+ }
+ count = i;
+
+ /*
+ * Extract values via back-referencing column headings.
+ * "i" is the last found index, which we use for a bit
+ * of optimisation for the (common) in-order maps case
+ * (where "in order" means in the order defined by the
+ * passed in "fields" table which typically matches the
+ * kernel - but may be out-of-order for older kernels).
+ */
+ strtok(buffer, " ");
+ for (i = j = 0; j < count && fields[i].field; j++, i++) {
+ if ((p = strtok(NULL, " \n")) == NULL)
+ break;
+ if (strcmp(fields[i].field, indices[j]) == 0) {
+ *fields[i].offset = strtoull(p, NULL, 10);
+ } else {
+ for (i = 0; fields[i].field; i++) {
+ if (strcmp(fields[i].field, indices[j]) != 0)
+ continue;
+ *fields[i].offset = strtoull(p, NULL, 10);
+ break;
+ }
+ if (fields[i].field == NULL) /* not found, ignore */
+ i = 0;
+ }
+ }
+}
+
+static void
+get_ordinal_fields(snmp_fields_t *fields, char *header, char *buffer,
+ unsigned limit)
+{
+ int i, j, count;
+ unsigned int inst;
+ char *p, *indices[SNMP_MAX_COLUMNS];
+
+ strtok(header, " ");
+ for (i = 0; i < SNMP_MAX_COLUMNS; i++) {
+ if ((p = strtok(NULL, " \n")) == NULL)
+ break;
+ indices[i] = p;
+ }
+ count = i;
+
+ strtok(buffer, " ");
+ for (j = 0; j < count; j++) {
+ if ((p = strtok(NULL, " \n")) == NULL)
+ break;
+ for (i = 0; fields[i].field; i++) {
+ if (sscanf(indices[j], fields[i].field, &inst) != 1)
+ continue;
+ if (inst >= limit)
+ continue;
+ *(fields[i].offset + inst) = strtoull(p, NULL, 10);
+ break;
+ }
+ }
+}
+
+#define SNMP_IP_OFFSET(ii, pp) (int64_t *)((char *)pp + \
+ (__psint_t)ip_fields[ii].offset - (__psint_t)&_pm_proc_net_snmp.ip)
+#define SNMP_ICMP_OFFSET(ii, pp) (int64_t *)((char *)pp + \
+ (__psint_t)icmp_fields[ii].offset - (__psint_t)&_pm_proc_net_snmp.icmp)
+#define SNMP_ICMPMSG_OFFSET(ii, nn, pp) (int64_t *)((char *)pp + \
+ (__psint_t)(icmpmsg_fields[ii].offset + nn) - (__psint_t)&_pm_proc_net_snmp.icmpmsg)
+#define SNMP_TCP_OFFSET(ii, pp) (int64_t *)((char *)pp + \
+ (__psint_t)tcp_fields[ii].offset - (__psint_t)&_pm_proc_net_snmp.tcp)
+#define SNMP_UDP_OFFSET(ii, pp) (int64_t *)((char *)pp + \
+ (__psint_t)udp_fields[ii].offset - (__psint_t)&_pm_proc_net_snmp.udp)
+#define SNMP_UDPLITE_OFFSET(ii, pp) (int64_t *)((char *)pp + \
+ (__psint_t)udplite_fields[ii].offset - (__psint_t)&_pm_proc_net_snmp.udplite)
+
+static void
+init_refresh_proc_net_snmp(proc_net_snmp_t *snmp)
+{
+ pmdaIndom *idp;
+ char *s;
+ int i, n;
+
+ /* initially, all marked as "no value available" */
+ for (i = 0; ip_fields[i].field != NULL; i++)
+ *(SNMP_IP_OFFSET(i, snmp->ip)) = -1;
+ for (i = 0; icmp_fields[i].field != NULL; i++)
+ *(SNMP_ICMP_OFFSET(i, snmp->icmp)) = -1;
+ for (i = 0; tcp_fields[i].field != NULL; i++)
+ *(SNMP_TCP_OFFSET(i, snmp->tcp)) = -1;
+ for (i = 0; udp_fields[i].field != NULL; i++)
+ *(SNMP_UDP_OFFSET(i, snmp->udp)) = -1;
+ for (i = 0; udplite_fields[i].field != NULL; i++)
+ *(SNMP_UDPLITE_OFFSET(i, snmp->udplite)) = -1;
+ for (i = 0; icmpmsg_fields[i].field != NULL; i++)
+ for (n = 0; n < NR_ICMPMSG_COUNTERS; n++)
+ *(SNMP_ICMPMSG_OFFSET(i, n, snmp->icmpmsg)) = -1;
+
+ /* only need to allocate and setup the names once */
+ if (proc_net_snmp_icmpmsg_names)
+ return;
+ i = NR_ICMPMSG_COUNTERS * SNMP_MAX_ICMPMSG_TYPESTR;
+ proc_net_snmp_icmpmsg_names = malloc(i);
+ if (!proc_net_snmp_icmpmsg_names)
+ return;
+ s = proc_net_snmp_icmpmsg_names;
+ for (n = 0; n < NR_ICMPMSG_COUNTERS; n++) {
+ sprintf(s, "Type%u", n);
+ _pm_proc_net_snmp_indom_id[n].i_name = s;
+ _pm_proc_net_snmp_indom_id[n].i_inst = n;
+ s += SNMP_MAX_ICMPMSG_TYPESTR;
+ }
+ idp = PMDAINDOM(ICMPMSG_INDOM);
+ idp->it_numinst = NR_ICMPMSG_COUNTERS;
+ idp->it_set = _pm_proc_net_snmp_indom_id;
+}
+
+int
+refresh_proc_net_snmp(proc_net_snmp_t *snmp)
+{
+ char buf[MAXPATHLEN];
+ char header[1024];
+ FILE *fp;
+
+ init_refresh_proc_net_snmp(snmp);
+ if ((fp = linux_statsfile("/proc/net/snmp", buf, sizeof(buf))) == NULL)
+ return -oserror();
+ while (fgets(header, sizeof(header), fp) != NULL) {
+ if (fgets(buf, sizeof(buf), fp) != NULL) {
+ if (strncmp(buf, "Ip:", 3) == 0)
+ get_fields(ip_fields, header, buf);
+ else if (strncmp(buf, "Icmp:", 5) == 0)
+ get_fields(icmp_fields, header, buf);
+ else if (strncmp(buf, "IcmpMsg:", 8) == 0)
+ get_ordinal_fields(icmpmsg_fields, header, buf,
+ NR_ICMPMSG_COUNTERS);
+ else if (strncmp(buf, "Tcp:", 4) == 0)
+ get_fields(tcp_fields, header, buf);
+ else if (strncmp(buf, "Udp:", 4) == 0)
+ get_fields(udp_fields, header, buf);
+ else if (strncmp(buf, "UdpLite:", 8) == 0)
+ get_fields(udplite_fields, header, buf);
+ else
+ fprintf(stderr, "Error: unrecognised snmp row: %s", buf);
+ }
+ }
+ fclose(fp);
+ return 0;
+}
diff --git a/src/pmdas/linux/proc_net_snmp.h b/src/pmdas/linux/proc_net_snmp.h
new file mode 100644
index 0000000..eeeb2a6
--- /dev/null
+++ b/src/pmdas/linux/proc_net_snmp.h
@@ -0,0 +1,136 @@
+/*
+ * Copyright (c) 2013-2014 Red Hat.
+ * Copyright (c) 2000,2004 Silicon Graphics, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#define SNMP_MAX_COLUMNS 64 /* arbitrary upper bound */
+#define SNMP_PERLINE 16 /* see net/ipv4/proc.c */
+#define SNMP_MAX_ICMPMSG_TYPESTR 8 /* longest name for type */
+#define NR_ICMPMSG_COUNTERS 256 /* half of __ICMPMSG_MIB_MAX from kernel */
+
+enum {
+ _PM_SNMP_IP_FORWARDING = 0,
+ _PM_SNMP_IP_DEFAULTTTL,
+ _PM_SNMP_IP_INRECEIVES,
+ _PM_SNMP_IP_INHDRERRORS,
+ _PM_SNMP_IP_INADDRERRORS,
+ _PM_SNMP_IP_FORWDATAGRAMS,
+ _PM_SNMP_IP_INUNKNOWNPROTOS,
+ _PM_SNMP_IP_INDISCARDS,
+ _PM_SNMP_IP_INDELIVERS,
+ _PM_SNMP_IP_OUTREQUESTS,
+ _PM_SNMP_IP_OUTDISCARDS,
+ _PM_SNMP_IP_OUTNOROUTES,
+ _PM_SNMP_IP_REASMTIMEOUT,
+ _PM_SNMP_IP_REASMREQDS,
+ _PM_SNMP_IP_REASMOKS,
+ _PM_SNMP_IP_REASMFAILS,
+ _PM_SNMP_IP_FRAGOKS,
+ _PM_SNMP_IP_FRAGFAILS,
+ _PM_SNMP_IP_FRAGCREATES,
+
+ _PM_SNMP_IP_NFIELDS /* must be last */
+};
+
+enum {
+ _PM_SNMP_ICMP_INMSGS = 0,
+ _PM_SNMP_ICMP_INERRORS,
+ _PM_SNMP_ICMP_INCSUMERRORS,
+ _PM_SNMP_ICMP_INDESTUNREACHS,
+ _PM_SNMP_ICMP_INTIMEEXCDS,
+ _PM_SNMP_ICMP_INPARMPROBS,
+ _PM_SNMP_ICMP_INSRCQUENCHS,
+ _PM_SNMP_ICMP_INREDIRECTS,
+ _PM_SNMP_ICMP_INECHOS,
+ _PM_SNMP_ICMP_INECHOREPS,
+ _PM_SNMP_ICMP_INTIMESTAMPS,
+ _PM_SNMP_ICMP_INTIMESTAMPREPS,
+ _PM_SNMP_ICMP_INADDRMASKS,
+ _PM_SNMP_ICMP_INADDRMASKREPS,
+ _PM_SNMP_ICMP_OUTMSGS,
+ _PM_SNMP_ICMP_OUTERRORS,
+ _PM_SNMP_ICMP_OUTDESTUNREACHS,
+ _PM_SNMP_ICMP_OUTTIMEEXCDS,
+ _PM_SNMP_ICMP_OUTPARMPROBS,
+ _PM_SNMP_ICMP_OUTSRCQUENCHS,
+ _PM_SNMP_ICMP_OUTREDIRECTS,
+ _PM_SNMP_ICMP_OUTECHOS,
+ _PM_SNMP_ICMP_OUTECHOREPS,
+ _PM_SNMP_ICMP_OUTTIMESTAMPS,
+ _PM_SNMP_ICMP_OUTTIMESTAMPREPS,
+ _PM_SNMP_ICMP_OUTADDRMASKS,
+ _PM_SNMP_ICMP_OUTADDRMASKREPS,
+
+ _PM_SNMP_ICMP_NFIELDS /* must be last */
+};
+
+enum {
+ _PM_SNMP_ICMPMSG_INTYPE = 0,
+ _PM_SNMP_ICMPMSG_OUTTYPE = NR_ICMPMSG_COUNTERS,
+ _PM_SNMP_ICMPMSG_NFIELDS = (NR_ICMPMSG_COUNTERS*2)
+};
+
+enum {
+ _PM_SNMP_TCP_RTOALGORITHM = 0,
+ _PM_SNMP_TCP_RTOMIN,
+ _PM_SNMP_TCP_RTOMAX,
+ _PM_SNMP_TCP_MAXCONN,
+ _PM_SNMP_TCP_ACTIVEOPENS,
+ _PM_SNMP_TCP_PASSIVEOPENS,
+ _PM_SNMP_TCP_ATTEMPTFAILS,
+ _PM_SNMP_TCP_ESTABRESETS,
+ _PM_SNMP_TCP_CURRESTAB,
+ _PM_SNMP_TCP_INSEGS,
+ _PM_SNMP_TCP_OUTSEGS,
+ _PM_SNMP_TCP_RETRANSSEGS,
+ _PM_SNMP_TCP_INERRS,
+ _PM_SNMP_TCP_OUTRSTS,
+ _PM_SNMP_TCP_INCSUMERRORS,
+
+ _PM_SNMP_TCP_NFIELDS /* must be last */
+};
+
+enum {
+ _PM_SNMP_UDP_INDATAGRAMS = 0,
+ _PM_SNMP_UDP_NOPORTS,
+ _PM_SNMP_UDP_INERRORS,
+ _PM_SNMP_UDP_OUTDATAGRAMS,
+ _PM_SNMP_UDP_RECVBUFERRORS,
+ _PM_SNMP_UDP_SNDBUFERRORS,
+ _PM_SNMP_UDP_INCSUMERRORS,
+
+ _PM_SNMP_UDP_NFIELDS /* must be last */
+};
+
+enum {
+ _PM_SNMP_UDPLITE_INDATAGRAMS = 0,
+ _PM_SNMP_UDPLITE_NOPORTS,
+ _PM_SNMP_UDPLITE_INERRORS,
+ _PM_SNMP_UDPLITE_OUTDATAGRAMS,
+ _PM_SNMP_UDPLITE_RECVBUFERRORS,
+ _PM_SNMP_UDPLITE_SNDBUFERRORS,
+ _PM_SNMP_UDPLITE_INCSUMERRORS,
+
+ _PM_SNMP_UDPLITE_NFIELDS /* must be last */
+};
+
+typedef struct {
+ __uint64_t ip[_PM_SNMP_IP_NFIELDS];
+ __uint64_t icmp[_PM_SNMP_ICMP_NFIELDS];
+ __uint64_t icmpmsg[_PM_SNMP_ICMPMSG_NFIELDS];
+ __uint64_t tcp[_PM_SNMP_TCP_NFIELDS];
+ __uint64_t udp[_PM_SNMP_UDP_NFIELDS];
+ __uint64_t udplite[_PM_SNMP_UDPLITE_NFIELDS];
+} proc_net_snmp_t;
+
+extern int refresh_proc_net_snmp(proc_net_snmp_t *);
diff --git a/src/pmdas/linux/proc_net_snmp_migrate.conf b/src/pmdas/linux/proc_net_snmp_migrate.conf
new file mode 100644
index 0000000..4e3c4f7
--- /dev/null
+++ b/src/pmdas/linux/proc_net_snmp_migrate.conf
@@ -0,0 +1,8 @@
+# Copyright 2013 Red Hat.
+#
+# pmlogrewrite configuration for migrating archives containing old
+# 32 bit /proc/net/snmp values to the 64 bit variants, matching up
+# with changes in the metadata supplied by the PMDA (and the kernel).
+#
+
+metric 60.14.* { type -> U64 }
diff --git a/src/pmdas/linux/proc_net_sockstat.c b/src/pmdas/linux/proc_net_sockstat.c
new file mode 100644
index 0000000..94f5a79
--- /dev/null
+++ b/src/pmdas/linux/proc_net_sockstat.c
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2014 Red Hat.
+ * Copyright (c) 2000,2004 Silicon Graphics, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include "pmapi.h"
+#include "pmda.h"
+#include "indom.h"
+#include "proc_net_sockstat.h"
+
+int
+refresh_proc_net_sockstat(proc_net_sockstat_t *proc_net_sockstat)
+{
+ char buf[1024];
+ char fmt[64];
+ FILE *fp;
+
+ if ((fp = linux_statsfile("/proc/net/sockstat", buf, sizeof(buf))) == NULL)
+ return -oserror();
+
+ while (fgets(buf, sizeof(buf), fp) != NULL) {
+ if (strncmp(buf, "TCP:", 4) == 0) {
+ sscanf(buf, "%s %s %d %s %d", fmt, fmt,
+ &proc_net_sockstat->tcp[_PM_SOCKSTAT_INUSE], fmt,
+ &proc_net_sockstat->tcp[_PM_SOCKSTAT_HIGHEST]);
+ proc_net_sockstat->tcp[_PM_SOCKSTAT_UTIL] =
+ proc_net_sockstat->tcp[_PM_SOCKSTAT_HIGHEST] != 0 ?
+ 100 * proc_net_sockstat->tcp[_PM_SOCKSTAT_INUSE] /
+ proc_net_sockstat->tcp[_PM_SOCKSTAT_HIGHEST] : 0;
+ }
+ else
+ if (strncmp(buf, "UDP:", 4) == 0) {
+ sscanf(buf, "%s %s %d %s %d", fmt, fmt,
+ &proc_net_sockstat->udp[_PM_SOCKSTAT_INUSE], fmt,
+ &proc_net_sockstat->udp[_PM_SOCKSTAT_HIGHEST]);
+ proc_net_sockstat->udp[_PM_SOCKSTAT_UTIL] =
+ proc_net_sockstat->udp[_PM_SOCKSTAT_HIGHEST] != 0 ?
+ 100 * proc_net_sockstat->udp[_PM_SOCKSTAT_INUSE] /
+ proc_net_sockstat->udp[_PM_SOCKSTAT_HIGHEST] : 0;
+ }
+ else
+ if (strncmp(buf, "RAW:", 4) == 0) {
+ sscanf(buf, "%s %s %d %s %d", fmt, fmt,
+ &proc_net_sockstat->raw[_PM_SOCKSTAT_INUSE], fmt,
+ &proc_net_sockstat->raw[_PM_SOCKSTAT_HIGHEST]);
+ proc_net_sockstat->raw[_PM_SOCKSTAT_UTIL] =
+ proc_net_sockstat->raw[_PM_SOCKSTAT_HIGHEST] != 0 ?
+ 100 * proc_net_sockstat->raw[_PM_SOCKSTAT_INUSE] /
+ proc_net_sockstat->raw[_PM_SOCKSTAT_HIGHEST] : 0;
+ }
+ }
+
+ fclose(fp);
+ return 0;
+}
diff --git a/src/pmdas/linux/proc_net_sockstat.h b/src/pmdas/linux/proc_net_sockstat.h
new file mode 100644
index 0000000..4ad402e
--- /dev/null
+++ b/src/pmdas/linux/proc_net_sockstat.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2000,2004 Silicon Graphics, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#define _PM_SOCKSTAT_INUSE 0
+#define _PM_SOCKSTAT_HIGHEST 1
+#define _PM_SOCKSTAT_UTIL 2
+typedef struct {
+ int tcp[3];
+ int udp[3];
+ int raw[3];
+} proc_net_sockstat_t;
+
+extern int refresh_proc_net_sockstat(proc_net_sockstat_t *);
+
diff --git a/src/pmdas/linux/proc_net_tcp.c b/src/pmdas/linux/proc_net_tcp.c
new file mode 100644
index 0000000..52f59b0
--- /dev/null
+++ b/src/pmdas/linux/proc_net_tcp.c
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2014 Red Hat.
+ * Copyright (c) 1999,2004 Silicon Graphics, Inc. All Rights Reserved.
+ * This code contributed by Michal Kara (lemming@arthur.plbohnice.cz)
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include <ctype.h>
+#include "pmapi.h"
+#include "pmda.h"
+#include "indom.h"
+#include "proc_net_tcp.h"
+
+#define MYBUFSZ (1<<14) /*16k*/
+
+int
+refresh_proc_net_tcp(proc_net_tcp_t *proc_net_tcp)
+{
+ FILE *fp;
+ char buf[MYBUFSZ];
+ char *p = buf;
+ char *q;
+ unsigned int n;
+ ssize_t got = 0;
+ ptrdiff_t remnant = 0;
+
+ memset(proc_net_tcp, 0, sizeof(*proc_net_tcp));
+
+ if ((fp = linux_statsfile("/proc/net/tcp", buf, sizeof(buf))) == NULL)
+ return -oserror();
+
+ /* skip header */
+ if (fgets(buf, sizeof(buf), fp) == NULL) {
+ /* oops, no header! */
+ fclose(fp);
+ return -oserror();
+ }
+ for (buf[0]='\0';;) {
+ q = strchrnul(p, '\n');
+ if (*q == '\n') {
+ if (1 == sscanf(p, " %*s %*s %*s %x", &n)
+ && n < _PM_TCP_LAST) {
+ proc_net_tcp->stat[n]++;
+ }
+ p = q + 1;
+ continue;
+ }
+ remnant = (q - p);
+ if (remnant > 0 && p != buf)
+ memmove(buf, p, remnant);
+
+ got = read(fileno(fp), buf + remnant, MYBUFSZ - remnant - 1);
+ if (got <= 0)
+ break;
+
+ buf[remnant + got] = '\0';
+ p = buf;
+ }
+
+ fclose(fp);
+ return 0;
+}
diff --git a/src/pmdas/linux/proc_net_tcp.h b/src/pmdas/linux/proc_net_tcp.h
new file mode 100644
index 0000000..87e662f
--- /dev/null
+++ b/src/pmdas/linux/proc_net_tcp.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 1999,2004 Silicon Graphics, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/*
+ * This code contributed by Michal Kara (lemming@arthur.plbohnice.cz)
+ */
+
+enum {
+ _PM_TCP_ESTABLISHED = 1,
+ _PM_TCP_SYN_SENT,
+ _PM_TCP_SYN_RECV,
+ _PM_TCP_FIN_WAIT1,
+ _PM_TCP_FIN_WAIT2,
+ _PM_TCP_TIME_WAIT,
+ _PM_TCP_CLOSE,
+ _PM_TCP_CLOSE_WAIT,
+ _PM_TCP_LAST_ACK,
+ _PM_TCP_LISTEN,
+ _PM_TCP_CLOSING,
+ _PM_TCP_LAST
+};
+
+
+typedef struct {
+ int stat[_PM_TCP_LAST];
+} proc_net_tcp_t;
+
+extern int refresh_proc_net_tcp(proc_net_tcp_t *);
+
diff --git a/src/pmdas/linux/proc_partitions.c b/src/pmdas/linux/proc_partitions.c
new file mode 100644
index 0000000..4901892
--- /dev/null
+++ b/src/pmdas/linux/proc_partitions.c
@@ -0,0 +1,808 @@
+/*
+ * Linux Partitions (disk and disk partition IO stats) Cluster
+ *
+ * Copyright (c) 2012-2014 Red Hat.
+ * Copyright (c) 2008,2012 Aconex. All Rights Reserved.
+ * Copyright (c) 2000,2004 Silicon Graphics, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include <ctype.h>
+#include <sys/stat.h>
+#include <sys/sysmacros.h>
+#include <unistd.h>
+
+#include "pmapi.h"
+#include "impl.h"
+#include "pmda.h"
+#include "convert.h"
+#include "clusters.h"
+#include "indom.h"
+#include "proc_partitions.h"
+
+extern int _pm_numdisks;
+
+/*
+ * _pm_ispartition : return true if arg is a partition name
+ * return false if arg is a disk name
+ * ide disks are named e.g. hda
+ * ide partitions are named e.g. hda1
+ *
+ * scsi disks are named e.g. sda
+ * scsi partitions are named e.g. sda1
+ *
+ * device-mapper devices are named dm-[0-9]* and are mapped to their persistent
+ * name using the symlinks in /dev/mapper.
+ *
+ * devfs scsi disks are named e.g. scsi/host0/bus0/target1/lun0/disc
+ * devfs scsi partitions are named e.g. scsi/host0/bus0/target1/lun0/part1
+ *
+ * Mylex raid disks are named e.g. rd/c0d0 or dac960/c0d0
+ * Mylex raid partitions are named e.g. rd/c0d0p1 or dac960/c0d0p1
+ *
+ * What this now tries to do is be a bit smarter, and guess that names
+ * with slashes that end in the form .../c0t0d0[p0], and ones without
+ * are good old 19th century device names like xx0 or xx0a.
+ */
+static int
+_pm_isloop(char *dname)
+{
+ return strncmp(dname, "loop", 4) == 0;
+}
+
+static int
+_pm_isramdisk(char *dname)
+{
+ return strncmp(dname, "ram", 3) == 0;
+}
+
+static int
+_pm_ismmcdisk(char *dname)
+{
+ if (strncmp(dname, "mmcblk", 6) != 0)
+ return 0;
+ /*
+ * Are we a disk or a partition of the disk? If there is a "p"
+ * assume it is a partition - e.g. mmcblk0p6.
+ */
+ return (strchr(dname + 6, 'p') == NULL);
+}
+
+/*
+ * return true if arg is a device-mapper device
+ */
+static int
+_pm_isdm(char *dname)
+{
+ return strncmp(dname, "dm-", 3) == 0;
+}
+
+/*
+ * slight improvement to heuristic suggested by
+ * Tim Bradshaw <tfb@cley.com> on 29 Dec 2003
+ */
+int
+_pm_ispartition(char *dname)
+{
+ int p, m = strlen(dname) - 1;
+
+ /*
+ * looking at something like foo/x, and we hope x ends p<n>, for
+ * a partition, or not for a disk.
+ */
+ if (strchr(dname, '/')) {
+ for (p = m; p > 0 && isdigit((int)dname[p]); p--)
+ ;
+ if (p == m)
+ /* name had no trailing digits. Wildly guess a disk. */
+ return 1;
+ else
+ /*
+ * ends with digits, if preceding character is a 'p' punt
+ * on a partition
+ */
+ return (dname[p] == 'p'? 1 : 0);
+ }
+ else {
+ /*
+ * default test : partition names end in a digit do not
+ * look like loopback devices. Handle other special-cases
+ * here - mostly seems to be RAM-type disk drivers that're
+ * choosing to end device names with numbers.
+ */
+ return isdigit((int)dname[m]) &&
+ !_pm_isloop(dname) &&
+ !_pm_isramdisk(dname) &&
+ !_pm_ismmcdisk(dname) &&
+ !_pm_isdm(dname);
+ }
+}
+
+/*
+ * return true is arg is an xvm volume name
+ */
+static int
+_pm_isxvmvol(char *dname)
+{
+ return strstr(dname, "xvm") != NULL;
+}
+
+/*
+ * return true is arg is a disk name
+ */
+static int
+_pm_isdisk(char *dname)
+{
+ return !_pm_isloop(dname) && !_pm_isramdisk(dname) && !_pm_ispartition(dname) && !_pm_isxvmvol(dname) && !_pm_isdm(dname);
+}
+
+static void
+refresh_udev(pmInDom disk_indom, pmInDom partitions_indom)
+{
+ char buf[MAXNAMELEN];
+ char realname[MAXNAMELEN];
+ char *shortname;
+ char *p;
+ char *udevname;
+ FILE *pfp;
+ partitions_entry_t *entry;
+ int indom;
+ int inst;
+
+ if (access("/dev/xscsi", R_OK) != 0)
+ return;
+ if (!(pfp = popen("find /dev/xscsi -name disc -o -name part[0-9]*", "r")))
+ return;
+ while (fgets(buf, sizeof(buf), pfp)) {
+ if ((p = strrchr(buf, '\n')) != NULL)
+ *p = '\0';
+ if (realpath(buf, realname)) {
+ udevname = buf + 5; /* /dev/xscsi/.... */
+ if ((shortname = strrchr(realname, '/')) != NULL) {
+ shortname++;
+ indom = _pm_ispartition(shortname) ?
+ partitions_indom : disk_indom;
+ if (pmdaCacheLookupName(indom, shortname, &inst, (void **)&entry) != PMDA_CACHE_ACTIVE)
+ continue;
+ entry->udevnamebuf = strdup(udevname);
+ pmdaCacheStore(indom, PMDA_CACHE_HIDE, shortname, entry); /* inactive */
+ pmdaCacheStore(indom, PMDA_CACHE_ADD, udevname, entry); /* active */
+ }
+ }
+ }
+ pclose(pfp);
+}
+
+/*
+ * Replace dm-* in namebuf with it's persistent name. This is a symlink in
+ * /dev/mapper/something -> ../dm-X where dm-X is currently in namebuf. Some
+ * older platforms (e.g. RHEL5) don't have the symlinks, just block devices
+ * in /dev/mapper. On newer kernels, the persistent name mapping is also
+ * exported via sysfs, which we use in preference. If this fails we leave
+ * the argument namebuf unaltered and return 0.
+ */
+static int
+map_persistent_dm_name(char *namebuf, int namelen, int devmajor, int devminor)
+{
+ int fd;
+ char *p;
+ DIR *dp;
+ int found = 0;
+ struct dirent *dentry;
+ struct stat sb;
+ char path[MAXPATHLEN];
+
+ snprintf(path, sizeof(path), "%s/sys/block/%s/dm/name", linux_statspath, namebuf);
+ if ((fd = open(path, O_RDONLY)) >= 0) {
+ memset(path, 0, sizeof(path));
+ if (read(fd, path, sizeof(path)) > 0) {
+ if ((p = strchr(path, '\n')) != NULL)
+ *p = '\0';
+ strncpy(namebuf, path, MIN(sizeof(path), namelen));
+ found = 1;
+ }
+ close(fd);
+ }
+
+ if (!found) {
+ /*
+ * The sysfs name isn't available, so we'll have to walk /dev/mapper
+ * and match up dev_t instead [happens on RHEL5 and maybe elsewhere].
+ */
+ snprintf(path, sizeof(path), "%s/dev/mapper", linux_statspath);
+ if ((dp = opendir(path)) != NULL) {
+ while ((dentry = readdir(dp)) != NULL) {
+ snprintf(path, sizeof(path),
+ "%s/dev/mapper/%s", linux_statspath, dentry->d_name);
+ if (stat(path, &sb) != 0 || !S_ISBLK(sb.st_mode))
+ continue; /* only interested in block devices */
+ if (devmajor == major(sb.st_rdev) && devminor == minor(sb.st_rdev)) {
+ strncpy(namebuf, dentry->d_name, namelen);
+ found = 1;
+ break;
+ }
+ }
+ closedir(dp);
+ }
+ }
+
+ return found;
+}
+
+int
+refresh_proc_partitions(pmInDom disk_indom, pmInDom partitions_indom, pmInDom dm_indom)
+{
+ FILE *fp;
+ int devmin;
+ int devmaj;
+ int n;
+ int indom;
+ int have_proc_diskstats;
+ int inst;
+ unsigned long long blocks;
+ partitions_entry_t *p;
+ int indom_changes = 0;
+ char *dmname;
+ char buf[MAXPATHLEN];
+ char namebuf[MAXPATHLEN];
+ static int first = 1;
+
+ if (first) {
+ /* initialize the instance domain caches */
+ pmdaCacheOp(disk_indom, PMDA_CACHE_LOAD);
+ pmdaCacheOp(partitions_indom, PMDA_CACHE_LOAD);
+ pmdaCacheOp(dm_indom, PMDA_CACHE_LOAD);
+
+ first = 0;
+ indom_changes = 1;
+ }
+
+ pmdaCacheOp(disk_indom, PMDA_CACHE_INACTIVE);
+ pmdaCacheOp(partitions_indom, PMDA_CACHE_INACTIVE);
+ pmdaCacheOp(dm_indom, PMDA_CACHE_INACTIVE);
+
+ if ((fp = linux_statsfile("/proc/diskstats", buf, sizeof(buf))) != NULL)
+ /* 2.6 style disk stats */
+ have_proc_diskstats = 1;
+ else {
+ if ((fp = linux_statsfile("/proc/partitions", buf, sizeof(buf))) != NULL)
+ have_proc_diskstats = 0;
+ else
+ return -oserror();
+ }
+
+ while (fgets(buf, sizeof(buf), fp) != NULL) {
+ dmname = NULL;
+ if (buf[0] != ' ' || buf[0] == '\n') {
+ /* skip heading */
+ continue;
+ }
+
+ if (have_proc_diskstats) {
+ if ((n = sscanf(buf, "%d %d %s", &devmaj, &devmin, namebuf)) != 3)
+ continue;
+ }
+ else {
+ /* /proc/partitions */
+ if ((n = sscanf(buf, "%d %d %llu %s", &devmaj, &devmin, &blocks, namebuf)) != 4)
+ continue;
+ }
+
+ if (_pm_isdm(namebuf)) {
+ indom = dm_indom;
+ dmname = strdup(namebuf);
+ }
+ else if (_pm_ispartition(namebuf))
+ indom = partitions_indom;
+ else if (_pm_isdisk(namebuf))
+ indom = disk_indom;
+ else
+ continue;
+
+ if (indom == dm_indom) {
+ /* replace dm-[0-9]* with the persistent name from /dev/mapper */
+ if (!map_persistent_dm_name(namebuf, sizeof(namebuf), devmaj, devmin)) {
+ /* skip dm devices that have no persistent name mapping */
+ free(dmname);
+ continue;
+ }
+ }
+
+ p = NULL;
+ if (pmdaCacheLookupName(indom, namebuf, &inst, (void **)&p) < 0 || !p) {
+ /* not found: allocate and add a new entry */
+ p = (partitions_entry_t *)malloc(sizeof(partitions_entry_t));
+ memset(p, 0, sizeof(partitions_entry_t));
+ indom_changes++;
+ }
+
+ if (p->dmname)
+ free(p->dmname);
+ p->dmname = dmname; /* NULL if not a dm device */
+
+ if (!p->namebuf)
+ p->namebuf = strdup(namebuf);
+ else
+ if (strcmp(namebuf, p->namebuf) != 0) {
+ free(p->namebuf);
+ p->namebuf = strdup(namebuf);
+ }
+
+ /* activate this entry */
+ if (p->udevnamebuf)
+ /* long xscsi name */
+ inst = pmdaCacheStore(indom, PMDA_CACHE_ADD, p->udevnamebuf, p);
+ else
+ /* short /proc/diskstats or /proc/partitions name */
+ inst = pmdaCacheStore(indom, PMDA_CACHE_ADD, namebuf, p);
+
+ if (have_proc_diskstats) {
+ /* 2.6 style /proc/diskstats */
+ p->nr_blocks = 0;
+ namebuf[0] = '\0';
+ /* Linux source: block/genhd.c::diskstats_show(1) */
+ n = sscanf(buf, "%u %u %s %lu %lu %llu %u %lu %lu %llu %u %u %u %u",
+ &p->major, &p->minor, namebuf,
+ &p->rd_ios, &p->rd_merges, &p->rd_sectors, &p->rd_ticks,
+ &p->wr_ios, &p->wr_merges, &p->wr_sectors, &p->wr_ticks,
+ &p->ios_in_flight, &p->io_ticks, &p->aveq);
+ if (n != 14) {
+ p->rd_merges = p->wr_merges = p->wr_ticks =
+ p->ios_in_flight = p->io_ticks = p->aveq = 0;
+ /* Linux source: block/genhd.c::diskstats_show(2) */
+ n = sscanf(buf, "%u %u %s %u %u %u %u\n",
+ &p->major, &p->minor, namebuf,
+ (unsigned int *)&p->rd_ios, (unsigned int *)&p->rd_sectors,
+ (unsigned int *)&p->wr_ios, (unsigned int *)&p->wr_sectors);
+ }
+ }
+ else {
+ /* 2.4 style /proc/partitions */
+ namebuf[0] = '\0';
+ n = sscanf(buf, "%u %u %lu %s %lu %lu %llu %u %lu %lu %llu %u %u %u %u",
+ &p->major, &p->minor, &p->nr_blocks, namebuf,
+ &p->rd_ios, &p->rd_merges, &p->rd_sectors,
+ &p->rd_ticks, &p->wr_ios, &p->wr_merges,
+ &p->wr_sectors, &p->wr_ticks, &p->ios_in_flight,
+ &p->io_ticks, &p->aveq);
+ }
+
+ }
+
+ /*
+ * If any new disks or partitions have appeared then we
+ * we need to remap the long device names (if /dev/xscsi
+ * exists) and then flush the pmda cache.
+ *
+ * We just let inactive instances rot in the inactive state
+ * (this doesn't happen very often, so is only a minor leak).
+ */
+ if (indom_changes) {
+ refresh_udev(disk_indom, partitions_indom);
+ pmdaCacheOp(disk_indom, PMDA_CACHE_SAVE);
+ pmdaCacheOp(partitions_indom, PMDA_CACHE_SAVE);
+ pmdaCacheOp(dm_indom, PMDA_CACHE_SAVE);
+ }
+
+ /*
+ * success
+ */
+ if (fp)
+ fclose(fp);
+ return 0;
+}
+
+/*
+ * This table must always match the definitions in root_linux
+ * and metrictab[] in pmda.c
+ */
+static pmID disk_metric_table[] = {
+ /* disk.dev.read */ PMDA_PMID(CLUSTER_STAT,4),
+ /* disk.dev.write */ PMDA_PMID(CLUSTER_STAT,5),
+ /* disk.dev.total */ PMDA_PMID(CLUSTER_STAT,28),
+ /* disk.dev.blkread */ PMDA_PMID(CLUSTER_STAT,6),
+ /* disk.dev.blkwrite */ PMDA_PMID(CLUSTER_STAT,7),
+ /* disk.dev.blktotal */ PMDA_PMID(CLUSTER_STAT,36),
+ /* disk.dev.read_bytes */ PMDA_PMID(CLUSTER_STAT,38),
+ /* disk.dev.write_bytes */ PMDA_PMID(CLUSTER_STAT,39),
+ /* disk.dev.total_bytes */ PMDA_PMID(CLUSTER_STAT,40),
+ /* disk.dev.read_merge */ PMDA_PMID(CLUSTER_STAT,49),
+ /* disk.dev.write_merge */ PMDA_PMID(CLUSTER_STAT,50),
+ /* disk.dev.avactive */ PMDA_PMID(CLUSTER_STAT,46),
+ /* disk.dev.aveq */ PMDA_PMID(CLUSTER_STAT,47),
+ /* disk.dev.scheduler */ PMDA_PMID(CLUSTER_STAT,59),
+ /* disk.dev.read_rawactive */ PMDA_PMID(CLUSTER_STAT,72),
+ /* disk.dev.write_rawactive */ PMDA_PMID(CLUSTER_STAT,73),
+
+ /* disk.all.read */ PMDA_PMID(CLUSTER_STAT,24),
+ /* disk.all.write */ PMDA_PMID(CLUSTER_STAT,25),
+ /* disk.all.total */ PMDA_PMID(CLUSTER_STAT,29),
+ /* disk.all.blkread */ PMDA_PMID(CLUSTER_STAT,26),
+ /* disk.all.blkwrite */ PMDA_PMID(CLUSTER_STAT,27),
+ /* disk.all.blktotal */ PMDA_PMID(CLUSTER_STAT,37),
+ /* disk.all.read_bytes */ PMDA_PMID(CLUSTER_STAT,41),
+ /* disk.all.write_bytes */ PMDA_PMID(CLUSTER_STAT,42),
+ /* disk.all.total_bytes */ PMDA_PMID(CLUSTER_STAT,43),
+ /* disk.all.read_merge */ PMDA_PMID(CLUSTER_STAT,51),
+ /* disk.all.write_merge */ PMDA_PMID(CLUSTER_STAT,52),
+ /* disk.all.avactive */ PMDA_PMID(CLUSTER_STAT,44),
+ /* disk.all.aveq */ PMDA_PMID(CLUSTER_STAT,45),
+ /* disk.all.read_rawactive */ PMDA_PMID(CLUSTER_STAT,74),
+ /* disk.all.write_rawactive */ PMDA_PMID(CLUSTER_STAT,75),
+
+ /* disk.partitions.read */ PMDA_PMID(CLUSTER_PARTITIONS,0),
+ /* disk.partitions.write */ PMDA_PMID(CLUSTER_PARTITIONS,1),
+ /* disk.partitions.total */ PMDA_PMID(CLUSTER_PARTITIONS,2),
+ /* disk.partitions.blkread */ PMDA_PMID(CLUSTER_PARTITIONS,3),
+ /* disk.partitions.blkwrite */ PMDA_PMID(CLUSTER_PARTITIONS,4),
+ /* disk.partitions.blktotal */ PMDA_PMID(CLUSTER_PARTITIONS,5),
+ /* disk.partitions.read_bytes */ PMDA_PMID(CLUSTER_PARTITIONS,6),
+ /* disk.partitions.write_bytes */PMDA_PMID(CLUSTER_PARTITIONS,7),
+ /* disk.partitions.total_bytes */PMDA_PMID(CLUSTER_PARTITIONS,8),
+
+ /* hinv.ndisk */ PMDA_PMID(CLUSTER_STAT,33),
+
+ /* disk.dm.read */ PMDA_PMID(CLUSTER_DM,0),
+ /* disk.dm.write */ PMDA_PMID(CLUSTER_DM,1),
+ /* disk.dm.total */ PMDA_PMID(CLUSTER_DM,2),
+ /* disk.dm.blkread */ PMDA_PMID(CLUSTER_DM,3),
+ /* disk.dm.blkwrite */ PMDA_PMID(CLUSTER_DM,4),
+ /* disk.dm.blktotal */ PMDA_PMID(CLUSTER_DM,5),
+ /* disk.dm.read_bytes */ PMDA_PMID(CLUSTER_DM,6),
+ /* disk.dm.write_bytes */ PMDA_PMID(CLUSTER_DM,7),
+ /* disk.dm.total_bytes */ PMDA_PMID(CLUSTER_DM,8),
+ /* disk.dm.read_merge */ PMDA_PMID(CLUSTER_DM,9),
+ /* disk.dm.write_merge */ PMDA_PMID(CLUSTER_DM,10),
+ /* disk.dm.avactive */ PMDA_PMID(CLUSTER_DM,11),
+ /* disk.dm.aveq */ PMDA_PMID(CLUSTER_DM,12),
+ /* hinv.map.dmname */ PMDA_PMID(CLUSTER_DM,13),
+ /* disk.dm.read_rawactive */ PMDA_PMID(CLUSTER_DM,14),
+ /* disk.dm.write_rawactive */ PMDA_PMID(CLUSTER_DM,15),
+};
+
+int
+is_partitions_metric(pmID full_pmid)
+{
+ int i;
+ static pmID *p = NULL;
+ __pmID_int *idp = (__pmID_int *)&(full_pmid);
+ pmID pmid = PMDA_PMID(idp->cluster, idp->item);
+ int n = sizeof(disk_metric_table) / sizeof(disk_metric_table[0]);
+
+ if (p && *p == PMDA_PMID(idp->cluster, idp->item))
+ return 1;
+ for (p = disk_metric_table, i=0; i < n; i++, p++) {
+ if (*p == pmid)
+ return 1;
+ }
+ return 0;
+}
+
+char *
+_pm_ioscheduler(const char *device)
+{
+ FILE *fp;
+ char *p, *q;
+ static char buf[1024];
+ char path[MAXNAMELEN];
+
+ /*
+ * Extract scheduler from /sys/block/<device>/queue/scheduler.
+ * File format: "noop anticipatory [deadline] cfq"
+ * In older kernels (incl. RHEL5 and SLES10) this doesn't exist,
+ * but we can still look in /sys/block/<device>/queue/iosched to
+ * intuit the ones we know about (cfq, deadline, as, noop) based
+ * on the different status files they create.
+ */
+ sprintf(path, "%s/sys/block/%s/queue/scheduler", linux_statspath, device);
+ if ((fp = fopen(path, "r")) != NULL) {
+ p = fgets(buf, sizeof(buf), fp);
+ fclose(fp);
+ if (!p)
+ goto unknown;
+ for (p = q = buf; p && *p && *p != ']'; p++) {
+ if (*p == '[')
+ q = p+1;
+ }
+ if (q == buf)
+ goto unknown;
+ if (*p != ']')
+ goto unknown;
+ *p = '\0';
+ return q;
+ }
+ else {
+#define BLKQUEUE "%s/sys/block/%s/queue/"
+ /* sniff around, maybe we'll get lucky and find something */
+ sprintf(path, BLKQUEUE "iosched/quantum", linux_statspath, device);
+ if (access(path, F_OK) == 0)
+ return "cfq";
+ sprintf(path, BLKQUEUE "iosched/fifo_batch", linux_statspath, device);
+ if (access(path, F_OK) == 0)
+ return "deadline";
+ sprintf(path, BLKQUEUE "iosched/antic_expire", linux_statspath, device);
+ if (access(path, F_OK) == 0)
+ return "anticipatory";
+ /* punt. noop has no files to match on ... */
+ sprintf(path, BLKQUEUE "iosched", linux_statspath, device);
+ if (access(path, F_OK) == 0)
+ return "noop";
+ /* else fall though ... */
+#undef BLKQUEUE
+ }
+
+unknown:
+ return "unknown";
+}
+
+int
+proc_partitions_fetch(pmdaMetric *mdesc, unsigned int inst, pmAtomValue *atom)
+{
+ __pmID_int *idp = (__pmID_int *)&(mdesc->m_desc.pmid);
+ int i;
+ partitions_entry_t *p = NULL;
+
+ if (inst != PM_IN_NULL) {
+ if (pmdaCacheLookup(mdesc->m_desc.indom, inst, NULL, (void **)&p) < 0)
+ return PM_ERR_INST;
+ }
+
+ switch (idp->cluster) {
+ case CLUSTER_STAT:
+ /*
+ * disk.{dev,all} remain in CLUSTER_STAT for backward compatibility
+ */
+ switch(idp->item) {
+ case 4: /* disk.dev.read */
+ if (p == NULL)
+ return PM_ERR_INST;
+ _pm_assign_ulong(atom, p->rd_ios);
+ break;
+ case 5: /* disk.dev.write */
+ if (p == NULL)
+ return PM_ERR_INST;
+ _pm_assign_ulong(atom, p->wr_ios);
+ break;
+ case 6: /* disk.dev.blkread */
+ if (p == NULL)
+ return PM_ERR_INST;
+ atom->ull = p->rd_sectors;
+ break;
+ case 7: /* disk.dev.blkwrite */
+ if (p == NULL)
+ return PM_ERR_INST;
+ atom->ull = p->wr_sectors;
+ break;
+ case 28: /* disk.dev.total */
+ if (p == NULL)
+ return PM_ERR_INST;
+ atom->ull = p->rd_ios + p->wr_ios;
+ break;
+ case 36: /* disk.dev.blktotal */
+ if (p == NULL)
+ return PM_ERR_INST;
+ atom->ull = p->rd_sectors + p->wr_sectors;
+ break;
+ case 38: /* disk.dev.read_bytes */
+ if (p == NULL)
+ return PM_ERR_INST;
+ atom->ull = p->rd_sectors / 2;
+ break;
+ case 39: /* disk.dev.write_bytes */
+ if (p == NULL)
+ return PM_ERR_INST;
+ atom->ull = p->wr_sectors / 2;
+ break;
+ case 40: /* disk.dev.total_bytes */
+ if (p == NULL)
+ return PM_ERR_INST;
+ atom->ull = (p->rd_sectors + p->wr_sectors) / 2;
+ break;
+ case 46: /* disk.dev.avactive ... already msec from /proc/diskstats */
+ if (p == NULL)
+ return PM_ERR_INST;
+ atom->ul = p->io_ticks;
+ break;
+ case 47: /* disk.dev.aveq ... already msec from /proc/diskstats */
+ if (p == NULL)
+ return PM_ERR_INST;
+ atom->ul = p->aveq;
+ break;
+ case 49: /* disk.dev.read_merge */
+ if (p == NULL)
+ return PM_ERR_INST;
+ _pm_assign_ulong(atom, p->rd_merges);
+ break;
+ case 50: /* disk.dev.write_merge */
+ if (p == NULL)
+ return PM_ERR_INST;
+ _pm_assign_ulong(atom, p->wr_merges);
+ break;
+ case 59: /* disk.dev.scheduler */
+ if (p == NULL)
+ return PM_ERR_INST;
+ atom->cp = _pm_ioscheduler(p->namebuf);
+ break;
+ case 72: /* disk.dev.read_rawactive already ms from /proc/diskstats */
+ if (p == NULL)
+ return PM_ERR_INST;
+ atom->ul = p->rd_ticks;
+ break;
+ case 73: /* disk.dev.write_rawactive already ms from /proc/diskstats */
+ if (p == NULL)
+ return PM_ERR_INST;
+ atom->ul = p->wr_ticks;
+ break;
+ default:
+ /* disk.all.* is a singular instance domain */
+ atom->ull = 0;
+ for (pmdaCacheOp(INDOM(DISK_INDOM), PMDA_CACHE_WALK_REWIND);;) {
+ if ((i = pmdaCacheOp(INDOM(DISK_INDOM), PMDA_CACHE_WALK_NEXT)) < 0)
+ break;
+ if (!pmdaCacheLookup(INDOM(DISK_INDOM), i, NULL, (void **)&p) || !p)
+ continue;
+ switch (idp->item) {
+ case 24: /* disk.all.read */
+ atom->ull += p->rd_ios;
+ break;
+ case 25: /* disk.all.write */
+ atom->ull += p->wr_ios;
+ break;
+ case 26: /* disk.all.blkread */
+ atom->ull += p->rd_sectors;
+ break;
+ case 27: /* disk.all.blkwrite */
+ atom->ull += p->wr_sectors;
+ break;
+ case 29: /* disk.all.total */
+ atom->ull += p->rd_ios + p->wr_ios;
+ break;
+ case 37: /* disk.all.blktotal */
+ atom->ull += p->rd_sectors + p->wr_sectors;
+ break;
+ case 41: /* disk.all.read_bytes */
+ atom->ull += p->rd_sectors / 2;
+ break;
+ case 42: /* disk.all.write_bytes */
+ atom->ull += p->wr_sectors / 2;
+ break;
+ case 43: /* disk.all.total_bytes */
+ atom->ull += (p->rd_sectors + p->wr_sectors) / 2;
+ break;
+ case 44: /* disk.all.avactive ... already msec from /proc/diskstats */
+ atom->ull += p->io_ticks;
+ break;
+ case 45: /* disk.all.aveq ... already msec from /proc/diskstats */
+ atom->ull += p->aveq;
+ break;
+ case 51: /* disk.all.read_merge */
+ atom->ull += p->rd_merges;
+ break;
+ case 52: /* disk.all.write_merge */
+ atom->ull += p->wr_merges;
+ break;
+ case 74: /* disk.all.read_rawactive ... already msec from /proc/diskstats */
+ atom->ull += p->rd_ticks;
+ break;
+ case 75: /* disk.all.write_rawactive ... already msec from /proc/diskstats */
+ atom->ull += p->wr_ticks;
+ break;
+ default:
+ return PM_ERR_PMID;
+ }
+ } /* loop */
+ }
+ break;
+
+ case CLUSTER_PARTITIONS:
+ if (p == NULL)
+ return PM_ERR_INST;
+ switch(idp->item) {
+ /* disk.partitions */
+ case 0: /* disk.partitions.read */
+ atom->ul = p->rd_ios;
+ break;
+ case 1: /* disk.partitions.write */
+ atom->ul = p->wr_ios;
+ break;
+ case 2: /* disk.partitions.total */
+ atom->ul = p->wr_ios +
+ p->rd_ios;
+ break;
+ case 3: /* disk.partitions.blkread */
+ atom->ul = p->rd_sectors;
+ break;
+ case 4: /* disk.partitions.blkwrite */
+ atom->ul = p->wr_sectors;
+ break;
+ case 5: /* disk.partitions.blktotal */
+ atom->ul = p->rd_sectors +
+ p->wr_sectors;
+ break;
+ case 6: /* disk.partitions.read_bytes */
+ atom->ul = p->rd_sectors / 2;
+ break;
+ case 7: /* disk.partitions.write_bytes */
+ atom->ul = p->wr_sectors / 2;
+ break;
+ case 8: /* disk.partitions.total_bytes */
+ atom->ul = (p->rd_sectors +
+ p->wr_sectors) / 2;
+ break;
+ default:
+ return PM_ERR_PMID;
+ }
+ break;
+
+ case CLUSTER_DM:
+ if (p == NULL)
+ return PM_ERR_INST;
+ switch(idp->item) {
+ case 0: /* disk.dm.read */
+ atom->ull = p->rd_ios;
+ break;
+ case 1: /* disk.dm.write */
+ atom->ull = p->wr_ios;
+ break;
+ case 2: /* disk.dm.total */
+ atom->ull = p->rd_ios + p->wr_ios;
+ break;
+ case 3: /* disk.dm.blkread */
+ atom->ull = p->rd_sectors;
+ break;
+ case 4: /* disk.dm.blkwrite */
+ atom->ull = p->wr_sectors;
+ break;
+ case 5: /* disk.dm.blktotal */
+ atom->ull = p->rd_sectors + p->wr_sectors;
+ break;
+ case 6: /* disk.dm.read_bytes */
+ atom->ull = p->rd_sectors / 2;
+ break;
+ case 7: /* disk.dm.write_bytes */
+ atom->ull = p->wr_sectors / 2;
+ break;
+ case 8: /* disk.dm.total_bytes */
+ atom->ull = (p->rd_sectors + p->wr_sectors) / 2;
+ break;
+ case 9: /* disk.dm.read_merge */
+ atom->ull = p->rd_merges;
+ break;
+ case 10: /* disk.dm.write_merge */
+ atom->ull = p->wr_merges;
+ break;
+ case 11: /* disk.dm.avactive */
+ atom->ull = p->io_ticks;
+ break;
+ case 12: /* disk.dm.aveq */
+ atom->ull = p->aveq;
+ break;
+ case 13: /* hinv.map.dmname */
+ atom->cp = p->dmname;
+ break;
+ case 14: /* disk.dm.read_rawactive */
+ atom->ul = p->rd_ticks;
+ break;
+ case 15: /* disk.dm.write_rawactive */
+ atom->ul = p->wr_ticks;
+ break;
+ default:
+ return PM_ERR_PMID;
+ }
+ break;
+
+ default: /* switch cluster */
+ return PM_ERR_PMID;
+ }
+
+ return 1;
+}
diff --git a/src/pmdas/linux/proc_partitions.h b/src/pmdas/linux/proc_partitions.h
new file mode 100644
index 0000000..9348cde
--- /dev/null
+++ b/src/pmdas/linux/proc_partitions.h
@@ -0,0 +1,44 @@
+/*
+ * Linux /proc/partitions metrics cluster
+ *
+ * Copyright (c) 2000,2004 Silicon Graphics, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+typedef struct {
+ int id;
+ unsigned int major;
+ unsigned int minor;
+ unsigned long nr_blocks;
+ char *namebuf; /* from /proc/{partitions,diskstats} */
+ char *udevnamebuf; /* from udev if we have it, else NULL */
+ char *dmname; /* symlink from /dev/mapper, else NULL */
+ unsigned long rd_ios;
+ unsigned long rd_merges;
+ unsigned long long rd_sectors;
+ unsigned int rd_ticks;
+ unsigned long wr_ios;
+ unsigned long wr_merges;
+ unsigned long long wr_sectors;
+ unsigned int wr_ticks;
+ unsigned int ios_in_flight;
+ unsigned int io_ticks;
+ unsigned int aveq;
+} partitions_entry_t;
+
+extern int refresh_proc_partitions(pmInDom disk_indom, pmInDom partitions_indom, pmInDom dm_indom);
+extern int is_partitions_metric(pmID);
+extern int proc_partitions_fetch(pmdaMetric *, unsigned int, pmAtomValue *);
diff --git a/src/pmdas/linux/proc_scsi.c b/src/pmdas/linux/proc_scsi.c
new file mode 100644
index 0000000..2b3c952
--- /dev/null
+++ b/src/pmdas/linux/proc_scsi.c
@@ -0,0 +1,159 @@
+/*
+ * Linux Scsi Devices Cluster
+ *
+ * Copyright (c) 2014 Red Hat.
+ * Copyright (c) 2000,2004 Silicon Graphics, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include "pmapi.h"
+#include "impl.h"
+#include "pmda.h"
+#include "indom.h"
+#include "proc_scsi.h"
+
+static char diskname[64];
+static char tapename[64];
+static char cdromname[64];
+
+int
+refresh_proc_scsi(proc_scsi_t *scsi)
+{
+ char buf[1024];
+ char name[1024];
+ int i;
+ int n;
+ FILE *fp;
+ char *sp;
+ static int have_devfs = -1;
+ static int next_id = -1;
+
+ if (next_id < 0) {
+ /* one trip initialization */
+ next_id = 0;
+
+ scsi->nscsi = 0;
+ scsi->scsi = (scsi_entry_t *)malloc(sizeof(scsi_entry_t));
+
+ /* scsi indom */
+ scsi->scsi_indom->it_numinst = 0;
+ scsi->scsi_indom->it_set = (pmdaInstid *)malloc(sizeof(pmdaInstid));
+
+ /* devfs naming convention */
+ have_devfs = access("/dev/.devfsd", F_OK) == 0;
+ if (have_devfs) {
+ strcpy(diskname, "scsi/host%d/bus%d/target%d/lun%d/disc");
+ strcpy(tapename, "st0");
+ strcpy(cdromname, "scd0");
+ }
+ else {
+ strcpy(diskname, "sda");
+ strcpy(tapename, "st0");
+ strcpy(cdromname, "scd0");
+ }
+ }
+
+ if ((fp = linux_statsfile("/proc/scsi/scsi", buf, sizeof(buf))) == NULL)
+ return -oserror();
+
+ while (fgets(buf, sizeof(buf), fp) != NULL) {
+ scsi_entry_t x = { 0 };
+
+ if (strncmp(buf, "Host:", 5) != 0)
+ continue;
+
+ n = sscanf(buf, "Host: scsi%d Channel: %d Id: %d Lun: %d",
+ &x.dev_host, &x.dev_channel, &x.dev_id, &x.dev_lun);
+ if (n != 4)
+ continue;
+ for (i=0; i < scsi->nscsi; i++) {
+ if (scsi->scsi[i].dev_host == x.dev_host &&
+ scsi->scsi[i].dev_channel == x.dev_channel &&
+ scsi->scsi[i].dev_id == x.dev_id &&
+ scsi->scsi[i].dev_lun == x.dev_lun)
+ break;
+ }
+
+ if (i == scsi->nscsi) {
+ scsi->nscsi++;
+ scsi->scsi = (scsi_entry_t *)realloc(scsi->scsi,
+ scsi->nscsi * sizeof(scsi_entry_t));
+ memcpy(&scsi->scsi[i], &x, sizeof(scsi_entry_t));
+ scsi->scsi[i].id = next_id++;
+ /* scan for the Vendor: and Type: strings */
+ while (fgets(buf, sizeof(buf), fp) != NULL) {
+ if ((sp = strstr(buf, "Type:")) != (char *)NULL) {
+ if (sscanf(sp, "Type: %s", name) == 1)
+ scsi->scsi[i].dev_type = strdup(name);
+ else
+ scsi->scsi[i].dev_type = strdup("unknown");
+ break;
+ }
+ }
+
+ if (strcmp(scsi->scsi[i].dev_type, "Direct-Access") == 0) {
+ if (have_devfs) {
+ scsi->scsi[i].dev_name = (char *)malloc(64);
+ sprintf(scsi->scsi[i].dev_name, diskname,
+ scsi->scsi[i].dev_host, scsi->scsi[i].dev_channel,
+ scsi->scsi[i].dev_id, scsi->scsi[i].dev_lun);
+ }
+ else {
+ scsi->scsi[i].dev_name = strdup(diskname);
+ diskname[2]++; /* sd[a-z] bump to next disk device name */
+ }
+ }
+ else
+ if (strcmp(scsi->scsi[i].dev_type, "Sequential-Access") == 0) {
+ scsi->scsi[i].dev_name = strdup(tapename);
+ tapename[2]++; /* st[0-9] bump to next tape device name */
+ }
+ else
+ if (strcmp(scsi->scsi[i].dev_type, "CD-ROM") == 0) {
+ scsi->scsi[i].dev_name = strdup(cdromname);
+ cdromname[3]++; /* scd[0-9] bump to next CDROM device name */
+ }
+ else
+ if (strcmp(scsi->scsi[i].dev_type, "Processor") == 0)
+ scsi->scsi[i].dev_name = strdup("SCSI Controller");
+ else
+ scsi->scsi[i].dev_name = strdup("Unknown SCSI device");
+
+ sprintf(name, "scsi%d:%d:%d:%d %s", scsi->scsi[i].dev_host,
+ scsi->scsi[i].dev_channel, scsi->scsi[i].dev_id, scsi->scsi[i].dev_lun, scsi->scsi[i].dev_type);
+ scsi->scsi[i].namebuf = strdup(name);
+#if PCP_DEBUG
+ if (pmDebug & DBG_TRACE_LIBPMDA) {
+ fprintf(stderr, "refresh_proc_scsi: add host=scsi%d channel=%d id=%d lun=%d type=%s\n",
+ scsi->scsi[i].dev_host, scsi->scsi[i].dev_channel,
+ scsi->scsi[i].dev_id, scsi->scsi[i].dev_lun,
+ scsi->scsi[i].dev_type);
+ }
+#endif
+ }
+ }
+
+ /* refresh scsi indom */
+ if (scsi->scsi_indom->it_numinst != scsi->nscsi) {
+ scsi->scsi_indom->it_numinst = scsi->nscsi;
+ scsi->scsi_indom->it_set = (pmdaInstid *)realloc(scsi->scsi_indom->it_set,
+ scsi->nscsi * sizeof(pmdaInstid));
+ memset(scsi->scsi_indom->it_set, 0, scsi->nscsi * sizeof(pmdaInstid));
+ }
+ for (i=0; i < scsi->nscsi; i++) {
+ scsi->scsi_indom->it_set[i].i_inst = scsi->scsi[i].id;
+ scsi->scsi_indom->it_set[i].i_name = scsi->scsi[i].namebuf;
+ }
+
+ fclose(fp);
+ return 0;
+}
diff --git a/src/pmdas/linux/proc_scsi.h b/src/pmdas/linux/proc_scsi.h
new file mode 100644
index 0000000..2ede392
--- /dev/null
+++ b/src/pmdas/linux/proc_scsi.h
@@ -0,0 +1,38 @@
+/*
+ * Linux /proc/scsi/scsi metrics cluster
+ *
+ * Copyright (c) 2000,2004 Silicon Graphics, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+typedef struct {
+ int id; /* internal instance id */
+ char *namebuf; /* external name, i.e. host:channel:id:lun */
+ int dev_host;
+ int dev_channel;
+ int dev_id;
+ int dev_lun;
+ char *dev_type;
+ char *dev_name;
+} scsi_entry_t;
+
+typedef struct {
+ int nscsi;
+ scsi_entry_t *scsi;
+ pmdaIndom *scsi_indom;
+} proc_scsi_t;
+
+extern int refresh_proc_scsi(proc_scsi_t *);
diff --git a/src/pmdas/linux/proc_slabinfo.c b/src/pmdas/linux/proc_slabinfo.c
new file mode 100644
index 0000000..28dde3f
--- /dev/null
+++ b/src/pmdas/linux/proc_slabinfo.c
@@ -0,0 +1,237 @@
+/*
+ * Linux Memory Slab Cluster
+ *
+ * Copyright (c) 2014 Red Hat.
+ * Copyright (c) 2000,2004 Silicon Graphics, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include <ctype.h>
+#include "pmapi.h"
+#include "impl.h"
+#include "pmda.h"
+#include "indom.h"
+#include "proc_slabinfo.h"
+
+int
+refresh_proc_slabinfo(proc_slabinfo_t *slabinfo)
+{
+ char buf[1024];
+ slab_cache_t sbuf;
+ slab_cache_t *s;
+ FILE *fp;
+ int i, n;
+ int instcount;
+ char *w, *p;
+ int old_cache;
+ int err = 0;
+ static int next_id = -1;
+ static int major_version = -1;
+ static int minor_version = 0;
+
+ if (next_id < 0) {
+ /* one trip initialization */
+ next_id = 0;
+
+ slabinfo->ncaches = 0;
+ slabinfo->caches = (slab_cache_t *)malloc(sizeof(slab_cache_t));
+ slabinfo->indom->it_numinst = 0;
+ slabinfo->indom->it_set = (pmdaInstid *)malloc(sizeof(pmdaInstid));
+ }
+
+ if ((fp = linux_statsfile("/proc/slabinfo", buf, sizeof(buf))) == NULL)
+ return -oserror();
+
+ for (i=0; i < slabinfo->ncaches; i++)
+ slabinfo->caches[i].seen = 0;
+
+ /* skip header */
+ if (fgets(buf, sizeof(buf), fp) == NULL) {
+ /* oops, no header! */
+ err = -oserror();
+ goto out;
+ }
+
+ if (major_version < 0) {
+ major_version = minor_version = 0;
+ if (strstr(buf, "slabinfo - version:")) {
+ char *p;
+ for (p=buf; *p; p++) {
+ if (isdigit((int)*p)) {
+ sscanf(p, "%d.%d", &major_version, &minor_version);
+ break;
+ }
+ }
+ }
+ }
+
+ while (fgets(buf, sizeof(buf), fp) != NULL) {
+ /* try to convert whitespace in cache names to underscores, */
+ /* by looking for alphabetic chars which follow whitespace. */
+ if (buf[0] == '#')
+ continue;
+ for (w = NULL, p = buf; *p != '\0'; p++) {
+ if (isspace((int)*p))
+ w = p;
+ else if (isdigit((int)*p))
+ break;
+ else if (isalpha((int)*p) && w) {
+ for (; w && w != p; w++)
+ *w = '_';
+ w = NULL;
+ }
+ }
+
+ memset(&sbuf, 0, sizeof(slab_cache_t));
+
+ if (major_version == 1 && minor_version == 0) {
+ /*
+ * <name> <active_objs> <num_objs>
+ * (generally 2.2 kernels)
+ */
+ n = sscanf(buf, "%s %lu %lu", sbuf.name,
+ (unsigned long *)&sbuf.num_active_objs,
+ (unsigned long *)&sbuf.total_objs);
+ if (n != 3) {
+ err = PM_ERR_APPVERSION;
+ goto out;
+ }
+ }
+ else if (major_version == 1 && minor_version == 1) {
+ /*
+ * <name> <active_objs> <num_objs> <objsize> <active_slabs> <num_slabs> <pagesperslab>
+ * (generally 2.4 kernels)
+ */
+ n = sscanf(buf, "%s %lu %lu %u %u %u %u", sbuf.name,
+ (unsigned long *)&sbuf.num_active_objs,
+ (unsigned long *)&sbuf.total_objs,
+ &sbuf.object_size,
+ &sbuf.num_active_slabs,
+ &sbuf.total_slabs,
+ &sbuf.pages_per_slab);
+ if (n != 7) {
+ err = PM_ERR_APPVERSION;
+ goto out;
+ }
+
+ sbuf.total_size = sbuf.pages_per_slab * sbuf.num_active_slabs * _pm_system_pagesize;
+ }
+ else if (major_version == 2 && minor_version >= 0 && minor_version <= 1) {
+ /*
+ * <name> <active_objs> <num_objs> <objsize> <objperslab> <pagesperslab> .. and more
+ * (generally for kernels up to at least 2.6.11)
+ */
+ n = sscanf(buf, "%s %lu %lu %u %u %u", sbuf.name,
+ (unsigned long *)&sbuf.num_active_objs,
+ (unsigned long *)&sbuf.total_objs,
+ &sbuf.object_size,
+ &sbuf.objects_per_slab,
+ &sbuf.pages_per_slab);
+ if (n != 6) {
+ err = PM_ERR_APPVERSION;
+ goto out;
+ }
+
+ sbuf.total_size = sbuf.pages_per_slab * sbuf.num_active_objs * _pm_system_pagesize / sbuf.objects_per_slab;
+ }
+ else {
+ /* no support */
+ err = PM_ERR_APPVERSION;
+ goto out;
+ }
+
+ old_cache = -1;
+ for (i=0; i < slabinfo->ncaches; i++) {
+ if (strcmp(slabinfo->caches[i].name, sbuf.name) == 0) {
+ if (slabinfo->caches[i].valid)
+ break;
+ else
+ old_cache = i;
+ }
+ }
+
+ if (i == slabinfo->ncaches) {
+ /* new cache has appeared */
+ if (old_cache >= 0) {
+ /* same cache as last time : reuse the id */
+ i = old_cache;
+ }
+ else {
+ slabinfo->ncaches++;
+ slabinfo->caches = (slab_cache_t *)realloc(slabinfo->caches,
+ slabinfo->ncaches * sizeof(slab_cache_t));
+ slabinfo->caches[i].id = next_id++;
+ }
+ slabinfo->caches[i].valid = 1;
+#if PCP_DEBUG
+ if (pmDebug & DBG_TRACE_LIBPMDA) {
+ fprintf(stderr, "refresh_slabinfo: add \"%s\"\n", sbuf.name);
+ }
+#endif
+ }
+
+ s = &slabinfo->caches[i];
+ strcpy(s->name, sbuf.name);
+ s->num_active_objs = sbuf.num_active_objs;
+ s->total_objs = sbuf.total_objs;
+ s->object_size = sbuf.object_size;
+ s->num_active_slabs = sbuf.num_active_slabs;
+ s->total_slabs = sbuf.total_slabs;
+ s->pages_per_slab = sbuf.pages_per_slab;
+ s->objects_per_slab = sbuf.objects_per_slab;
+ s->total_size = sbuf.total_size;
+
+ s->seen = major_version * 10 + minor_version;
+ }
+
+ /* check for caches that have been deleted (eg. by rmmod) */
+ for (i=0, instcount=0; i < slabinfo->ncaches; i++) {
+ if (slabinfo->caches[i].valid) {
+ if (slabinfo->caches[i].seen == 0) {
+ slabinfo->caches[i].valid = 0;
+#if PCP_DEBUG
+ if (pmDebug & DBG_TRACE_LIBPMDA) {
+ fprintf(stderr, "refresh_slabinfo: drop \"%s\"\n", slabinfo->caches[i].name);
+ }
+#endif
+ }
+ else {
+ instcount++;
+ }
+ }
+ }
+
+ /* refresh slabinfo indom */
+ if (slabinfo->indom->it_numinst != instcount) {
+ slabinfo->indom->it_numinst = instcount;
+ slabinfo->indom->it_set = (pmdaInstid *)realloc(slabinfo->indom->it_set,
+ instcount * sizeof(pmdaInstid));
+ memset(slabinfo->indom->it_set, 0, instcount * sizeof(pmdaInstid));
+ }
+ for (n=0, i=0; i < slabinfo->ncaches; i++) {
+ if (slabinfo->caches[i].valid) {
+ slabinfo->indom->it_set[n].i_inst = slabinfo->caches[i].id;
+ slabinfo->indom->it_set[n].i_name = slabinfo->caches[i].name;
+#if PCP_DEBUG
+ if (pmDebug & DBG_TRACE_LIBPMDA) {
+ fprintf(stderr, "refresh_slabinfo: cache[%d] = \"%s\"\n",
+ n, slabinfo->indom->it_set[n].i_name);
+ }
+#endif
+ n++;
+ }
+ }
+
+out:
+ fclose(fp);
+ return err;
+}
diff --git a/src/pmdas/linux/proc_slabinfo.h b/src/pmdas/linux/proc_slabinfo.h
new file mode 100644
index 0000000..92d4e9c
--- /dev/null
+++ b/src/pmdas/linux/proc_slabinfo.h
@@ -0,0 +1,53 @@
+/*
+ * Linux /proc/slabinfo metrics cluster
+ *
+ * Copyright (c) 1995 Silicon Graphics, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/*** version 1.1
+ "cache-name" num-active-objs total-objs object-size num-active-slabs \
+ total-slabs num-pages-per-slab
+ + further values (not exported) on SMP and with statistics enabled
+ *** version 1.0
+ "cache-name" num-active-objs total-objs
+ ***/
+
+typedef struct {
+ int id;
+ int seen; /* have seen this time, and num values seen */
+ int valid;
+ char name[64];
+ __uint64_t num_active_objs;
+ __uint64_t total_objs;
+ __uint32_t object_size;
+ __uint64_t total_size;
+ __uint32_t num_active_slabs;
+ __uint32_t objects_per_slab;
+ __uint32_t total_slabs;
+ __uint32_t pages_per_slab;
+} slab_cache_t;
+
+typedef struct {
+ int ncaches;
+ slab_cache_t *caches;
+ pmdaIndom *indom;
+} proc_slabinfo_t;
+
+extern size_t _pm_system_pagesize;
+
+extern int refresh_proc_slabinfo(proc_slabinfo_t *);
+
diff --git a/src/pmdas/linux/proc_stat.c b/src/pmdas/linux/proc_stat.c
new file mode 100644
index 0000000..6a8a798
--- /dev/null
+++ b/src/pmdas/linux/proc_stat.c
@@ -0,0 +1,304 @@
+/*
+ * Linux /proc/stat metrics cluster
+ *
+ * Copyright (c) 2012-2014 Red Hat.
+ * Copyright (c) 2008-2009 Aconex. All Rights Reserved.
+ * Copyright (c) 2000,2004-2008 Silicon Graphics, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include "pmapi.h"
+#include "impl.h"
+#include "pmda.h"
+#include "indom.h"
+#include <dirent.h>
+#include <ctype.h>
+#include <sys/stat.h>
+#include "proc_cpuinfo.h"
+#include "proc_stat.h"
+
+int
+refresh_proc_stat(proc_cpuinfo_t *proc_cpuinfo, proc_stat_t *proc_stat)
+{
+ pmdaIndom *idp = PMDAINDOM(CPU_INDOM);
+ char buf[MAXPATHLEN];
+ char fmt[64];
+ static int fd = -1; /* kept open until exit() */
+ static int started;
+ static char *statbuf;
+ static int maxstatbuf;
+ static char **bufindex;
+ static int nbufindex;
+ static int maxbufindex;
+ int size;
+ int n;
+ int i;
+ int j;
+
+ if (fd >= 0) {
+ if (lseek(fd, 0, SEEK_SET) < 0)
+ return -oserror();
+ } else {
+ snprintf(buf, sizeof(buf), "%s/proc/stat", linux_statspath);
+ if ((fd = open(buf, O_RDONLY)) < 0)
+ return -oserror();
+ }
+
+ for (n=0;;) {
+ while (n >= maxstatbuf) {
+ size = maxstatbuf + 512;
+ if ((statbuf = (char *)realloc(statbuf, size)) == NULL)
+ return -ENOMEM;
+ maxstatbuf = size;
+ }
+ size = (statbuf + maxstatbuf) - (statbuf + n);
+ if ((i = read(fd, statbuf + n, size)) > 0)
+ n += i;
+ else
+ break;
+ }
+ statbuf[n] = '\0';
+
+ if (bufindex == NULL) {
+ size = 4 * sizeof(char *);
+ if ((bufindex = (char **)malloc(size)) == NULL)
+ return -ENOMEM;
+ maxbufindex = 4;
+ }
+
+ nbufindex = 0;
+ bufindex[nbufindex] = statbuf;
+ for (i=0; i < n; i++) {
+ if (statbuf[i] == '\n' || statbuf[i] == '\0') {
+ statbuf[i] = '\0';
+ if (nbufindex + 1 >= maxbufindex) {
+ size = (maxbufindex + 4) * sizeof(char *);
+ if ((bufindex = (char **)realloc(bufindex, size)) == NULL)
+ return -ENOMEM;
+ maxbufindex += 4;
+ }
+ bufindex[++nbufindex] = statbuf + i + 1;
+ }
+ }
+
+ if (!started) {
+ started = 1;
+ memset(proc_stat, 0, sizeof(*proc_stat));
+
+ /* hz of running kernel */
+ proc_stat->hz = sysconf(_SC_CLK_TCK);
+
+ /* scan ncpus */
+ for (i=0; i < nbufindex; i++) {
+ if (strncmp("cpu", bufindex[i], 3) == 0 && isdigit((int)bufindex[i][3]))
+ proc_stat->ncpu++;
+ }
+ if (proc_stat->ncpu == 0)
+ proc_stat->ncpu = 1; /* non-SMP kernel? */
+ proc_stat->cpu_indom = idp;
+ proc_stat->cpu_indom->it_numinst = proc_stat->ncpu;
+ proc_stat->cpu_indom->it_set = (pmdaInstid *)malloc(
+ proc_stat->ncpu * sizeof(pmdaInstid));
+ /*
+ * Map out the CPU instance domain.
+ *
+ * The first call to cpu_name() does initialization on the
+ * proc_cpuinfo structure.
+ */
+ for (i=0; i < proc_stat->ncpu; i++) {
+ proc_stat->cpu_indom->it_set[i].i_inst = i;
+ proc_stat->cpu_indom->it_set[i].i_name = cpu_name(proc_cpuinfo, i);
+ }
+
+ n = proc_stat->ncpu * sizeof(unsigned long long);
+ proc_stat->p_user = (unsigned long long *)calloc(1, n);
+ proc_stat->p_nice = (unsigned long long *)calloc(1, n);
+ proc_stat->p_sys = (unsigned long long *)calloc(1, n);
+ proc_stat->p_idle = (unsigned long long *)calloc(1, n);
+ proc_stat->p_wait = (unsigned long long *)calloc(1, n);
+ proc_stat->p_irq = (unsigned long long *)calloc(1, n);
+ proc_stat->p_sirq = (unsigned long long *)calloc(1, n);
+ proc_stat->p_steal = (unsigned long long *)calloc(1, n);
+ proc_stat->p_guest = (unsigned long long *)calloc(1, n);
+
+ n = proc_cpuinfo->node_indom->it_numinst * sizeof(unsigned long long);
+ proc_stat->n_user = calloc(1, n);
+ proc_stat->n_nice = calloc(1, n);
+ proc_stat->n_sys = calloc(1, n);
+ proc_stat->n_idle = calloc(1, n);
+ proc_stat->n_wait = calloc(1, n);
+ proc_stat->n_irq = calloc(1, n);
+ proc_stat->n_sirq = calloc(1, n);
+ proc_stat->n_steal = calloc(1, n);
+ proc_stat->n_guest = calloc(1, n);
+ }
+ else {
+ /* reset per-node stats */
+ n = proc_cpuinfo->node_indom->it_numinst * sizeof(unsigned long long);
+ memset(proc_stat->n_user, 0, n);
+ memset(proc_stat->n_nice, 0, n);
+ memset(proc_stat->n_sys, 0, n);
+ memset(proc_stat->n_idle, 0, n);
+ memset(proc_stat->n_wait, 0, n);
+ memset(proc_stat->n_irq, 0, n);
+ memset(proc_stat->n_sirq, 0, n);
+ memset(proc_stat->n_steal, 0, n);
+ memset(proc_stat->n_guest, 0, n);
+ }
+ /*
+ * cpu 95379 4 20053 6502503
+ * 2.6 kernels have 3 additional fields
+ * for wait, irq and soft_irq.
+ */
+ strcpy(fmt, "cpu %llu %llu %llu %llu %llu %llu %llu %llu %llu");
+ n = sscanf((const char *)bufindex[0], fmt,
+ &proc_stat->user, &proc_stat->nice,
+ &proc_stat->sys, &proc_stat->idle,
+ &proc_stat->wait, &proc_stat->irq,
+ &proc_stat->sirq, &proc_stat->steal,
+ &proc_stat->guest);
+
+ /*
+ * per-cpu stats
+ * e.g. cpu0 95379 4 20053 6502503
+ * 2.6 kernels have 3 additional fields for wait, irq and soft_irq.
+ * More recent (2008) 2.6 kernels have an extra field for guest.
+ */
+ if (proc_stat->ncpu == 1) {
+ /*
+ * Don't bother scanning - the counters are the same
+ * as for "all" cpus, as already scanned above.
+ * This also handles the non-SMP code where
+ * there is no line starting with "cpu0".
+ */
+ proc_stat->p_user[0] = proc_stat->user;
+ proc_stat->p_nice[0] = proc_stat->nice;
+ proc_stat->p_sys[0] = proc_stat->sys;
+ proc_stat->p_idle[0] = proc_stat->idle;
+ proc_stat->p_wait[0] = proc_stat->wait;
+ proc_stat->p_irq[0] = proc_stat->irq;
+ proc_stat->p_sirq[0] = proc_stat->sirq;
+ proc_stat->p_steal[0] = proc_stat->steal;
+ proc_stat->p_guest[0] = proc_stat->guest;
+ }
+ else {
+ strcpy(fmt, "cpu%d %llu %llu %llu %llu %llu %llu %llu %llu %llu");
+ for (i=0; i < proc_stat->ncpu; i++) {
+ for (j=0; j < nbufindex; j++) {
+ if (strncmp("cpu", bufindex[j], 3) == 0 && isdigit((int)bufindex[j][3])) {
+ int c;
+ int cpunum = atoi(&bufindex[j][3]);
+ int node;
+ if (cpunum >= 0 && cpunum < proc_stat->ncpu) {
+ n = sscanf(bufindex[j], fmt, &c,
+ &proc_stat->p_user[cpunum],
+ &proc_stat->p_nice[cpunum],
+ &proc_stat->p_sys[cpunum],
+ &proc_stat->p_idle[cpunum],
+ &proc_stat->p_wait[cpunum],
+ &proc_stat->p_irq[cpunum],
+ &proc_stat->p_sirq[cpunum],
+ &proc_stat->p_steal[cpunum],
+ &proc_stat->p_guest[cpunum]);
+ if ((node = proc_cpuinfo->cpuinfo[cpunum].node) != -1) {
+ proc_stat->n_user[node] += proc_stat->p_user[cpunum];
+ proc_stat->n_nice[node] += proc_stat->p_nice[cpunum];
+ proc_stat->n_sys[node] += proc_stat->p_sys[cpunum];
+ proc_stat->n_idle[node] += proc_stat->p_idle[cpunum];
+ proc_stat->n_wait[node] += proc_stat->p_wait[cpunum];
+ proc_stat->n_irq[node] += proc_stat->p_irq[cpunum];
+ proc_stat->n_sirq[node] += proc_stat->p_sirq[cpunum];
+ proc_stat->n_steal[node] += proc_stat->p_steal[cpunum];
+ proc_stat->n_guest[node] += proc_stat->p_guest[cpunum];
+ }
+ }
+ }
+ }
+ if (j == nbufindex)
+ break;
+ }
+ }
+
+ /*
+ * page 59739 34786
+ * Note: this has moved to /proc/vmstat in 2.6 kernels
+ */
+ strcpy(fmt, "page %u %u");
+ for (j=0; j < nbufindex; j++) {
+ if (strncmp(fmt, bufindex[j], 5) == 0) {
+ sscanf((const char *)bufindex[j], fmt,
+ &proc_stat->page[0], &proc_stat->page[1]);
+ break;
+ }
+ }
+
+ /*
+ * swap 0 1
+ * Note: this has moved to /proc/vmstat in 2.6 kernels
+ */
+ strcpy(fmt, "swap %u %u");
+ for (j=0; j < nbufindex; j++) {
+ if (strncmp(fmt, bufindex[j], 5) == 0) {
+ sscanf((const char *)bufindex[j], fmt,
+ &proc_stat->swap[0], &proc_stat->swap[1]);
+ break;
+ }
+ }
+
+ /*
+ * intr 32845463 24099228 2049 0 2 ....
+ * (just export the first number, which is total interrupts)
+ */
+ strcpy(fmt, "intr %llu");
+ for (j=0; j < nbufindex; j++) {
+ if (strncmp(fmt, bufindex[j], 5) == 0) {
+ sscanf((const char *)bufindex[j], fmt, &proc_stat->intr);
+ break;
+ }
+ }
+
+ /*
+ * ctxt 1733480
+ */
+ strcpy(fmt, "ctxt %llu");
+ for (j=0; j < nbufindex; j++) {
+ if (strncmp(fmt, bufindex[j], 5) == 0) {
+ sscanf((const char *)bufindex[j], fmt, &proc_stat->ctxt);
+ break;
+ }
+ }
+
+ /*
+ * btime 1733480
+ */
+ strcpy(fmt, "btime %lu");
+ for (j=0; j < nbufindex; j++) {
+ if (strncmp(fmt, bufindex[j], 6) == 0) {
+ sscanf((const char *)bufindex[j], fmt, &proc_stat->btime);
+ break;
+ }
+ }
+
+ /*
+ * processes 2213
+ */
+ strcpy(fmt, "processes %lu");
+ for (j=0; j < nbufindex; j++) {
+ if (strncmp(fmt, bufindex[j], 10) == 0) {
+ sscanf((const char *)bufindex[j], fmt, &proc_stat->processes);
+ break;
+ }
+ }
+
+ /* success */
+ return 0;
+}
diff --git a/src/pmdas/linux/proc_stat.h b/src/pmdas/linux/proc_stat.h
new file mode 100644
index 0000000..78b2c09
--- /dev/null
+++ b/src/pmdas/linux/proc_stat.h
@@ -0,0 +1,65 @@
+/*
+ * Linux /proc/stat metrics cluster
+ *
+ * Copyright (c) 2000,2004 Silicon Graphics, Inc. All Rights Reserved.
+ * Copyright (c) 2008 Aconex. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+typedef struct {
+ unsigned long long user;
+ unsigned long long sys;
+ unsigned long long nice;
+ unsigned long long idle;
+ unsigned long long wait;
+ unsigned long long irq;
+ unsigned long long sirq;
+ unsigned long long steal;
+ unsigned long long guest;
+ unsigned int ncpu;
+ /* per-cpu */
+ unsigned long long *p_user;
+ unsigned long long *p_sys;
+ unsigned long long *p_nice;
+ unsigned long long *p_idle;
+ unsigned long long *p_wait;
+ unsigned long long *p_irq;
+ unsigned long long *p_sirq;
+ unsigned long long *p_steal;
+ unsigned long long *p_guest;
+ /* per-node */
+ unsigned long long *n_user;
+ unsigned long long *n_sys;
+ unsigned long long *n_nice;
+ unsigned long long *n_idle;
+ unsigned long long *n_wait;
+ unsigned long long *n_irq;
+ unsigned long long *n_sirq;
+ unsigned long long *n_steal;
+ unsigned long long *n_guest;
+
+ unsigned int ndisk;
+ unsigned int page[2]; /* unused in 2.6 now in /proc/vmstat */
+ unsigned int swap[2]; /* unused in 2.6 now in /proc/vmstat */
+ unsigned long long intr;
+ unsigned long long ctxt;
+ unsigned long btime;
+ unsigned long processes;
+ pmdaIndom *cpu_indom;
+ unsigned int hz;
+} proc_stat_t;
+
+extern int refresh_proc_stat(proc_cpuinfo_t *, proc_stat_t *);
diff --git a/src/pmdas/linux/proc_sys_fs.c b/src/pmdas/linux/proc_sys_fs.c
new file mode 100644
index 0000000..021ae23
--- /dev/null
+++ b/src/pmdas/linux/proc_sys_fs.c
@@ -0,0 +1,80 @@
+/*
+ * Linux /proc/sys/fs metrics cluster
+ *
+ * Copyright (c) 2014 Red Hat.
+ * Copyright (c) 2003,2004 Silicon Graphics, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include "pmapi.h"
+#include "impl.h"
+#include "pmda.h"
+#include "indom.h"
+#include "proc_sys_fs.h"
+
+int
+refresh_proc_sys_fs(proc_sys_fs_t *proc_sys_fs)
+{
+ static int err_reported;
+ char buf[MAXPATHLEN];
+ FILE *filesp = NULL;
+ FILE *inodep = NULL;
+ FILE *dentryp = NULL;
+
+ memset(proc_sys_fs, 0, sizeof(proc_sys_fs_t));
+
+ if ((filesp = linux_statsfile("/proc/sys/fs/file-nr", buf, sizeof(buf))) == NULL ||
+ (inodep = linux_statsfile("/proc/sys/fs/inode-state", buf, sizeof(buf))) == NULL ||
+ (dentryp = linux_statsfile("/proc/sys/fs/dentry-state", buf, sizeof(buf))) == NULL) {
+ proc_sys_fs->errcode = -oserror();
+ if (err_reported == 0)
+ fprintf(stderr, "Warning: vfs metrics are not available : %s\n",
+ osstrerror());
+ }
+ else {
+ proc_sys_fs->errcode = 0;
+ if (fscanf(filesp, "%d %d %d",
+ &proc_sys_fs->fs_files_count,
+ &proc_sys_fs->fs_files_free,
+ &proc_sys_fs->fs_files_max) != 3)
+ proc_sys_fs->errcode = PM_ERR_VALUE;
+ if (fscanf(inodep, "%d %d",
+ &proc_sys_fs->fs_inodes_count,
+ &proc_sys_fs->fs_inodes_free) != 2)
+ proc_sys_fs->errcode = PM_ERR_VALUE;
+ if (fscanf(dentryp, "%d %d",
+ &proc_sys_fs->fs_dentry_count,
+ &proc_sys_fs->fs_dentry_free) != 2)
+ proc_sys_fs->errcode = PM_ERR_VALUE;
+#if PCP_DEBUG
+ if (pmDebug & DBG_TRACE_LIBPMDA) {
+ if (proc_sys_fs->errcode == 0)
+ fprintf(stderr, "refresh_proc_sys_fs: found vfs metrics\n");
+ else
+ fprintf(stderr, "refresh_proc_sys_fs: botch! missing vfs metrics\n");
+ }
+#endif
+ }
+ if (filesp)
+ fclose(filesp);
+ if (inodep)
+ fclose(inodep);
+ if (dentryp)
+ fclose(dentryp);
+
+ if (!err_reported)
+ err_reported = 1;
+
+ if (proc_sys_fs->errcode == 0)
+ return 0;
+ return -1;
+}
diff --git a/src/pmdas/linux/proc_sys_fs.h b/src/pmdas/linux/proc_sys_fs.h
new file mode 100644
index 0000000..d1a1ebf
--- /dev/null
+++ b/src/pmdas/linux/proc_sys_fs.h
@@ -0,0 +1,32 @@
+/*
+ * Linux /proc/sys/fs metrics cluster
+ *
+ * Copyright (c) 2003,2004 Silicon Graphics, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+typedef struct {
+ int errcode; /* error from previous refresh */
+ int fs_files_count;
+ int fs_files_free;
+ int fs_files_max;
+ int fs_inodes_count;
+ int fs_inodes_free;
+ int fs_dentry_count;
+ int fs_dentry_free;
+} proc_sys_fs_t;
+
+extern int refresh_proc_sys_fs(proc_sys_fs_t *);
diff --git a/src/pmdas/linux/proc_uptime.c b/src/pmdas/linux/proc_uptime.c
new file mode 100644
index 0000000..0d56bfd
--- /dev/null
+++ b/src/pmdas/linux/proc_uptime.c
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) Red Hat 2014.
+ * Copyright (c) International Business Machines Corp., 2002
+ * This code contributed by Mike Mason <mmlnx@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include <fcntl.h>
+#include "pmapi.h"
+#include "pmda.h"
+#include "indom.h"
+#include "proc_uptime.h"
+
+int
+refresh_proc_uptime(proc_uptime_t *proc_uptime)
+{
+ char buf[MAXPATHLEN];
+ int fd, n;
+ float uptime = 0.0, idletime = 0.0;
+
+ memset(proc_uptime, 0, sizeof(proc_uptime_t));
+ snprintf(buf, sizeof(buf), "%s/proc/uptime", linux_statspath);
+ if ((fd = open(buf, O_RDONLY)) < 0)
+ return -oserror();
+
+ n = read(fd, buf, sizeof(buf));
+ close(fd);
+ if (n < 0)
+ return -oserror();
+ else if (n > 0)
+ n--;
+ buf[n] = '\0';
+
+ sscanf((const char *)buf, "%f %f", &uptime, &idletime);
+ proc_uptime->uptime = (unsigned long) uptime;
+ proc_uptime->idletime = (unsigned long) idletime;
+ return 0;
+}
diff --git a/src/pmdas/linux/proc_uptime.h b/src/pmdas/linux/proc_uptime.h
new file mode 100644
index 0000000..f4a658f
--- /dev/null
+++ b/src/pmdas/linux/proc_uptime.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) International Business Machines Corp., 2002
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/*
+ * This code contributed by Mike Mason <mmlnx@us.ibm.com>
+ */
+
+typedef struct {
+ unsigned long uptime;
+ unsigned long idletime;
+} proc_uptime_t;
+
+extern int refresh_proc_uptime(proc_uptime_t *);
+
diff --git a/src/pmdas/linux/proc_vmstat.c b/src/pmdas/linux/proc_vmstat.c
new file mode 100644
index 0000000..f56c3c3
--- /dev/null
+++ b/src/pmdas/linux/proc_vmstat.c
@@ -0,0 +1,299 @@
+/*
+ * Linux /proc/vmstat metrics cluster
+ *
+ * Copyright (c) 2013-2014 Red Hat.
+ * Copyright (c) 2007,2011 Aconex. All Rights Reserved.
+ * Copyright (c) 2004 Silicon Graphics, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include <ctype.h>
+#include "pmapi.h"
+#include "pmda.h"
+#include "indom.h"
+#include "proc_vmstat.h"
+
+static struct {
+ const char *field;
+ __uint64_t *offset;
+} vmstat_fields[] = {
+ /* sorted by name to make maintenance easier */
+ { .field = "allocstall",
+ .offset = &_pm_proc_vmstat.allocstall },
+ { .field = "compact_blocks_moved",
+ .offset = &_pm_proc_vmstat.compact_blocks_moved },
+ { .field = "compact_fail",
+ .offset = &_pm_proc_vmstat.compact_fail },
+ { .field = "compact_pagemigrate_failed",
+ .offset = &_pm_proc_vmstat.compact_pagemigrate_failed },
+ { .field = "compact_pages_moved",
+ .offset = &_pm_proc_vmstat.compact_pages_moved },
+ { .field = "compact_stall",
+ .offset = &_pm_proc_vmstat.compact_stall },
+ { .field = "compact_success",
+ .offset = &_pm_proc_vmstat.compact_success },
+ { .field = "htlb_buddy_alloc_fail",
+ .offset = &_pm_proc_vmstat.htlb_buddy_alloc_fail },
+ { .field = "htlb_buddy_alloc_success",
+ .offset = &_pm_proc_vmstat.htlb_buddy_alloc_success },
+ { .field = "kswapd_inodesteal",
+ .offset = &_pm_proc_vmstat.kswapd_inodesteal },
+ { .field = "kswapd_low_wmark_hit_quickly",
+ .offset = &_pm_proc_vmstat.kswapd_low_wmark_hit_quickly },
+ { .field = "kswapd_high_wmark_hit_quickly",
+ .offset = &_pm_proc_vmstat.kswapd_high_wmark_hit_quickly },
+ { .field = "kswapd_skip_congestion_wait",
+ .offset = &_pm_proc_vmstat.kswapd_skip_congestion_wait },
+ { .field = "kswapd_steal",
+ .offset = &_pm_proc_vmstat.kswapd_steal },
+ { .field = "nr_active_anon",
+ .offset = &_pm_proc_vmstat.nr_active_anon },
+ { .field = "nr_active_file",
+ .offset = &_pm_proc_vmstat.nr_active_file },
+ { .field = "nr_anon_pages",
+ .offset = &_pm_proc_vmstat.nr_anon_pages },
+ { .field = "nr_anon_transparent_hugepages",
+ .offset = &_pm_proc_vmstat.nr_anon_transparent_hugepages },
+ { .field = "nr_bounce",
+ .offset = &_pm_proc_vmstat.nr_bounce },
+ { .field = "nr_dirty",
+ .offset = &_pm_proc_vmstat.nr_dirty },
+ { .field = "nr_dirtied",
+ .offset = &_pm_proc_vmstat.nr_dirtied },
+ { .field = "nr_dirty_threshold",
+ .offset = &_pm_proc_vmstat.nr_dirty_threshold },
+ { .field = "nr_dirty_background_threshold",
+ .offset = &_pm_proc_vmstat.nr_dirty_background_threshold },
+ { .field = "nr_file_pages",
+ .offset = &_pm_proc_vmstat.nr_file_pages },
+ { .field = "nr_free_pages",
+ .offset = &_pm_proc_vmstat.nr_free_pages },
+ { .field = "nr_inactive_anon",
+ .offset = &_pm_proc_vmstat.nr_inactive_anon },
+ { .field = "nr_inactive_file",
+ .offset = &_pm_proc_vmstat.nr_inactive_file },
+ { .field = "nr_isolated_anon",
+ .offset = &_pm_proc_vmstat.nr_isolated_anon },
+ { .field = "nr_isolated_file",
+ .offset = &_pm_proc_vmstat.nr_isolated_file },
+ { .field = "nr_kernel_stack",
+ .offset = &_pm_proc_vmstat.nr_kernel_stack },
+ { .field = "nr_mapped",
+ .offset = &_pm_proc_vmstat.nr_mapped },
+ { .field = "nr_mlock",
+ .offset = &_pm_proc_vmstat.nr_mlock },
+ { .field = "nr_page_table_pages",
+ .offset = &_pm_proc_vmstat.nr_page_table_pages },
+ { .field = "nr_shmem",
+ .offset = &_pm_proc_vmstat.nr_shmem },
+ { .field = "nr_slab_reclaimable",
+ .offset = &_pm_proc_vmstat.nr_slab_reclaimable },
+ { .field = "nr_slab_unreclaimable",
+ .offset = &_pm_proc_vmstat.nr_slab_unreclaimable },
+ { .field = "nr_slab",
+ .offset = &_pm_proc_vmstat.nr_slab }, /* not in later kernels */
+ { .field = "nr_unevictable",
+ .offset = &_pm_proc_vmstat.nr_unevictable },
+ { .field = "nr_unstable",
+ .offset = &_pm_proc_vmstat.nr_unstable },
+ { .field = "nr_vmscan_write",
+ .offset = &_pm_proc_vmstat.nr_vmscan_write },
+ { .field = "nr_writeback",
+ .offset = &_pm_proc_vmstat.nr_writeback },
+ { .field = "nr_writeback_temp",
+ .offset = &_pm_proc_vmstat.nr_writeback_temp },
+ { .field = "nr_written",
+ .offset = &_pm_proc_vmstat.nr_written },
+ { .field = "numa_hit",
+ .offset = &_pm_proc_vmstat.numa_hit },
+ { .field = "numa_miss",
+ .offset = &_pm_proc_vmstat.numa_miss },
+ { .field = "numa_foreign",
+ .offset = &_pm_proc_vmstat.numa_foreign },
+ { .field = "numa_interleave",
+ .offset = &_pm_proc_vmstat.numa_interleave },
+ { .field = "numa_local",
+ .offset = &_pm_proc_vmstat.numa_local },
+ { .field = "numa_other",
+ .offset = &_pm_proc_vmstat.numa_other },
+ { .field = "pageoutrun",
+ .offset = &_pm_proc_vmstat.pageoutrun },
+ { .field = "pgactivate",
+ .offset = &_pm_proc_vmstat.pgactivate },
+ { .field = "pgalloc_dma",
+ .offset = &_pm_proc_vmstat.pgalloc_dma },
+ { .field = "pgalloc_dma32",
+ .offset = &_pm_proc_vmstat.pgalloc_dma32 },
+ { .field = "pgalloc_high",
+ .offset = &_pm_proc_vmstat.pgalloc_high },
+ { .field = "pgalloc_movable",
+ .offset = &_pm_proc_vmstat.pgalloc_movable },
+ { .field = "pgalloc_normal",
+ .offset = &_pm_proc_vmstat.pgalloc_normal },
+ { .field = "pgdeactivate",
+ .offset = &_pm_proc_vmstat.pgdeactivate },
+ { .field = "pgfault",
+ .offset = &_pm_proc_vmstat.pgfault },
+ { .field = "pgfree",
+ .offset = &_pm_proc_vmstat.pgfree },
+ { .field = "pginodesteal",
+ .offset = &_pm_proc_vmstat.pginodesteal },
+ { .field = "pgmajfault",
+ .offset = &_pm_proc_vmstat.pgmajfault },
+ { .field = "pgpgin",
+ .offset = &_pm_proc_vmstat.pgpgin },
+ { .field = "pgpgout",
+ .offset = &_pm_proc_vmstat.pgpgout },
+ { .field = "pgrefill_dma",
+ .offset = &_pm_proc_vmstat.pgrefill_dma },
+ { .field = "pgrefill_dma32",
+ .offset = &_pm_proc_vmstat.pgrefill_dma32 },
+ { .field = "pgrefill_high",
+ .offset = &_pm_proc_vmstat.pgrefill_high },
+ { .field = "pgrefill_movable",
+ .offset = &_pm_proc_vmstat.pgrefill_movable },
+ { .field = "pgrefill_normal",
+ .offset = &_pm_proc_vmstat.pgrefill_normal },
+ { .field = "pgrotated",
+ .offset = &_pm_proc_vmstat.pgrotated },
+ { .field = "pgscan_direct_dma",
+ .offset = &_pm_proc_vmstat.pgscan_direct_dma },
+ { .field = "pgscan_direct_dma32",
+ .offset = &_pm_proc_vmstat.pgscan_direct_dma32 },
+ { .field = "pgscan_direct_high",
+ .offset = &_pm_proc_vmstat.pgscan_direct_high },
+ { .field = "pgscan_direct_movable",
+ .offset = &_pm_proc_vmstat.pgscan_direct_movable },
+ { .field = "pgscan_direct_normal",
+ .offset = &_pm_proc_vmstat.pgscan_direct_normal },
+ { .field = "pgscan_kswapd_dma",
+ .offset = &_pm_proc_vmstat.pgscan_kswapd_dma },
+ { .field = "pgscan_kswapd_dma32",
+ .offset = &_pm_proc_vmstat.pgscan_kswapd_dma32 },
+ { .field = "pgscan_kswapd_high",
+ .offset = &_pm_proc_vmstat.pgscan_kswapd_high },
+ { .field = "pgscan_kswapd_movable",
+ .offset = &_pm_proc_vmstat.pgscan_kswapd_movable },
+ { .field = "pgscan_kswapd_normal",
+ .offset = &_pm_proc_vmstat.pgscan_kswapd_normal },
+ { .field = "pgsteal_dma",
+ .offset = &_pm_proc_vmstat.pgsteal_dma },
+ { .field = "pgsteal_dma32",
+ .offset = &_pm_proc_vmstat.pgsteal_dma32 },
+ { .field = "pgsteal_high",
+ .offset = &_pm_proc_vmstat.pgsteal_high },
+ { .field = "pgsteal_movable",
+ .offset = &_pm_proc_vmstat.pgsteal_movable },
+ { .field = "pgsteal_normal",
+ .offset = &_pm_proc_vmstat.pgsteal_normal },
+ { .field = "pswpin",
+ .offset = &_pm_proc_vmstat.pswpin },
+ { .field = "pswpout",
+ .offset = &_pm_proc_vmstat.pswpout },
+ { .field = "slabs_scanned",
+ .offset = &_pm_proc_vmstat.slabs_scanned },
+ { .field = "thp_collapse_alloc",
+ .offset = &_pm_proc_vmstat.thp_collapse_alloc },
+ { .field = "thp_collapse_alloc_failed",
+ .offset = &_pm_proc_vmstat.thp_collapse_alloc_failed },
+ { .field = "thp_fault_alloc",
+ .offset = &_pm_proc_vmstat.thp_fault_alloc },
+ { .field = "thp_fault_fallback",
+ .offset = &_pm_proc_vmstat.thp_fault_fallback },
+ { .field = "thp_split",
+ .offset = &_pm_proc_vmstat.thp_split },
+ { .field = "unevictable_pgs_cleared",
+ .offset = &_pm_proc_vmstat.unevictable_pgs_cleared },
+ { .field = "unevictable_pgs_culled",
+ .offset = &_pm_proc_vmstat.unevictable_pgs_culled },
+ { .field = "unevictable_pgs_mlocked",
+ .offset = &_pm_proc_vmstat.unevictable_pgs_mlocked },
+ { .field = "unevictable_pgs_mlockfreed",
+ .offset = &_pm_proc_vmstat.unevictable_pgs_mlockfreed },
+ { .field = "unevictable_pgs_munlocked",
+ .offset = &_pm_proc_vmstat.unevictable_pgs_munlocked },
+ { .field = "unevictable_pgs_rescued",
+ .offset = &_pm_proc_vmstat.unevictable_pgs_rescued },
+ { .field = "unevictable_pgs_scanned",
+ .offset = &_pm_proc_vmstat.unevictable_pgs_scanned },
+ { .field = "unevictable_pgs_stranded",
+ .offset = &_pm_proc_vmstat.unevictable_pgs_stranded },
+ { .field = "zone_reclaim_failed",
+ .offset = &_pm_proc_vmstat.zone_reclaim_failed },
+
+ { .field = NULL, .offset = NULL }
+};
+
+#define VMSTAT_OFFSET(ii, pp) (int64_t *)((char *)pp + \
+ (__psint_t)vmstat_fields[ii].offset - (__psint_t)&_pm_proc_vmstat)
+
+void
+proc_vmstat_init(void)
+{
+ char buf[1024];
+
+ /*
+ * The swap metrics moved from /proc/stat to /proc/vmstat early in 2.6.
+ * In addition, the swap operation count was removed; the fetch routine
+ * needs to deal with these quirks and return something sensible based
+ * (initially) on whether the vmstat file exists.
+ *
+ * We'll re-evaluate this on each fetch of the mem.vmstat metrics, but
+ * that is not a problem. This routine makes sure any swap.xxx metric
+ * fetch without a preceding mem.vmstat fetch has the correct state.
+ */
+ snprintf(buf, sizeof(buf), "%s/proc/vmstat", linux_statspath);
+ _pm_have_proc_vmstat = (access(buf, R_OK) == 0);
+}
+
+int
+refresh_proc_vmstat(proc_vmstat_t *proc_vmstat)
+{
+ char buf[1024];
+ char *bufp;
+ int64_t *p;
+ int i;
+ FILE *fp;
+
+ for (i = 0; vmstat_fields[i].field != NULL; i++) {
+ p = VMSTAT_OFFSET(i, proc_vmstat);
+ *p = -1; /* marked as "no value available" */
+ }
+
+ if ((fp = linux_statsfile("/proc/vmstat", buf, sizeof(buf))) == NULL)
+ return -oserror();
+
+ _pm_have_proc_vmstat = 1;
+
+ while (fgets(buf, sizeof(buf), fp) != NULL) {
+ if ((bufp = strchr(buf, ' ')) == NULL)
+ continue;
+ *bufp = '\0';
+ for (i = 0; vmstat_fields[i].field != NULL; i++) {
+ if (strcmp(buf, vmstat_fields[i].field) != 0)
+ continue;
+ p = VMSTAT_OFFSET(i, proc_vmstat);
+ for (bufp++; *bufp; bufp++) {
+ if (isdigit((int)*bufp)) {
+ sscanf(bufp, "%llu", (unsigned long long *)p);
+ break;
+ }
+ }
+ }
+ }
+ fclose(fp);
+
+ if (proc_vmstat->nr_slab == -1) /* split apart in 2.6.18 */
+ proc_vmstat->nr_slab = proc_vmstat->nr_slab_reclaimable +
+ proc_vmstat->nr_slab_unreclaimable;
+ return 0;
+}
diff --git a/src/pmdas/linux/proc_vmstat.h b/src/pmdas/linux/proc_vmstat.h
new file mode 100644
index 0000000..adeac39
--- /dev/null
+++ b/src/pmdas/linux/proc_vmstat.h
@@ -0,0 +1,131 @@
+/*
+ * Linux /proc/vmstat metrics cluster
+ *
+ * Copyright (c) 2013 Red Hat.
+ * Copyright (c) 2007,2011 Aconex. All Rights Reserved.
+ * Copyright (c) 2004 Silicon Graphics, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+/*
+ * All fields (sorted!) in /proc/vmstat for 2.6.x
+ */
+typedef struct {
+ /* sorted by name to make maintenance easier */
+ __uint64_t allocstall;
+ __uint64_t compact_blocks_moved;
+ __uint64_t compact_fail;
+ __uint64_t compact_pagemigrate_failed;
+ __uint64_t compact_pages_moved;
+ __uint64_t compact_stall;
+ __uint64_t compact_success;
+ __uint64_t htlb_buddy_alloc_fail;
+ __uint64_t htlb_buddy_alloc_success;
+ __uint64_t kswapd_high_wmark_hit_quickly;
+ __uint64_t kswapd_inodesteal;
+ __uint64_t kswapd_low_wmark_hit_quickly;
+ __uint64_t kswapd_skip_congestion_wait;
+ __uint64_t kswapd_steal;
+ __uint64_t nr_active_anon;
+ __uint64_t nr_active_file;
+ __uint64_t nr_anon_pages;
+ __uint64_t nr_anon_transparent_hugepages;
+ __uint64_t nr_bounce;
+ __uint64_t nr_dirtied;
+ __uint64_t nr_dirty;
+ __uint64_t nr_dirty_threshold;
+ __uint64_t nr_dirty_background_threshold;
+ __uint64_t nr_file_pages;
+ __uint64_t nr_free_pages;
+ __uint64_t nr_inactive_anon;
+ __uint64_t nr_inactive_file;
+ __uint64_t nr_isolated_anon;
+ __uint64_t nr_isolated_file;
+ __uint64_t nr_kernel_stack;
+ __uint64_t nr_mapped;
+ __uint64_t nr_mlock;
+ __uint64_t nr_page_table_pages;
+ __uint64_t nr_shmem;
+ __uint64_t nr_slab; /* not in later kernels */
+ __uint64_t nr_slab_reclaimable;
+ __uint64_t nr_slab_unreclaimable;
+ __uint64_t nr_unevictable;
+ __uint64_t nr_unstable;
+ __uint64_t nr_vmscan_write;
+ __uint64_t nr_writeback;
+ __uint64_t nr_writeback_temp;
+ __uint64_t nr_written;
+ __uint64_t numa_foreign;
+ __uint64_t numa_hit;
+ __uint64_t numa_interleave;
+ __uint64_t numa_local;
+ __uint64_t numa_miss;
+ __uint64_t numa_other;
+ __uint64_t pageoutrun;
+ __uint64_t pgactivate;
+ __uint64_t pgalloc_dma;
+ __uint64_t pgalloc_dma32;
+ __uint64_t pgalloc_movable;
+ __uint64_t pgalloc_high;
+ __uint64_t pgalloc_normal;
+ __uint64_t pgdeactivate;
+ __uint64_t pgfault;
+ __uint64_t pgfree;
+ __uint64_t pginodesteal;
+ __uint64_t pgmajfault;
+ __uint64_t pgpgin;
+ __uint64_t pgpgout;
+ __uint64_t pgrefill_dma;
+ __uint64_t pgrefill_dma32;
+ __uint64_t pgrefill_high;
+ __uint64_t pgrefill_movable;
+ __uint64_t pgrefill_normal;
+ __uint64_t pgrotated;
+ __uint64_t pgscan_direct_dma;
+ __uint64_t pgscan_direct_dma32;
+ __uint64_t pgscan_direct_high;
+ __uint64_t pgscan_direct_movable;
+ __uint64_t pgscan_direct_normal;
+ __uint64_t pgscan_kswapd_dma;
+ __uint64_t pgscan_kswapd_dma32;
+ __uint64_t pgscan_kswapd_high;
+ __uint64_t pgscan_kswapd_movable;
+ __uint64_t pgscan_kswapd_normal;
+ __uint64_t pgsteal_dma;
+ __uint64_t pgsteal_dma32;
+ __uint64_t pgsteal_high;
+ __uint64_t pgsteal_movable;
+ __uint64_t pgsteal_normal;
+ __uint64_t pswpin;
+ __uint64_t pswpout;
+ __uint64_t slabs_scanned;
+ __uint64_t thp_fault_alloc;
+ __uint64_t thp_fault_fallback;
+ __uint64_t thp_collapse_alloc;
+ __uint64_t thp_collapse_alloc_failed;
+ __uint64_t thp_split;
+ __uint64_t unevictable_pgs_cleared;
+ __uint64_t unevictable_pgs_culled;
+ __uint64_t unevictable_pgs_mlocked;
+ __uint64_t unevictable_pgs_mlockfreed;
+ __uint64_t unevictable_pgs_munlocked;
+ __uint64_t unevictable_pgs_rescued;
+ __uint64_t unevictable_pgs_scanned;
+ __uint64_t unevictable_pgs_stranded;
+ __uint64_t zone_reclaim_failed;
+} proc_vmstat_t;
+
+extern void proc_vmstat_init(void);
+extern int refresh_proc_vmstat(proc_vmstat_t *);
+extern int _pm_have_proc_vmstat;
+extern proc_vmstat_t _pm_proc_vmstat;
+
diff --git a/src/pmdas/linux/root_linux b/src/pmdas/linux/root_linux
new file mode 100644
index 0000000..d66da28
--- /dev/null
+++ b/src/pmdas/linux/root_linux
@@ -0,0 +1,1005 @@
+/*
+ * Copyright (c) 2000,2004,2007-2008 Silicon Graphics, Inc. All Rights Reserved.
+ * Portions Copyright (c) International Business Machines Corp., 2002
+ * Portions Copyright (c) 2007-2009 Aconex. All Rights Reserved.
+ * Portions Copyright (c) 2013 Red Hat.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+root {
+ hinv
+ kernel
+ mem
+ swap
+ network
+ disk
+ filesys
+ swapdev
+ rpc
+ nfs
+ nfs3
+ nfs4
+ pmda
+ ipc
+ vfs
+ tmpfs
+ sysfs
+}
+
+hinv {
+ physmem 60:1:9
+ pagesize 60:1:11
+ ncpu 60:0:32
+ ndisk 60:0:33
+ nfilesys 60:5:0
+ ninterface 60:3:27
+ nlv 60:52:1
+ nnode 60:0:19
+ machine 60:18:7
+ map
+ cpu
+}
+
+hinv.map {
+ scsi 60:15:0
+ cpu_num 60:18:6
+ cpu_node 60:18:8
+ lvname 60:52:0
+ dmname 60:54:13
+}
+
+hinv.cpu {
+ clock 60:18:0
+ vendor 60:18:1
+ model 60:18:2
+ stepping 60:18:3
+ cache 60:18:4
+ bogomips 60:18:5
+ model_name 60:18:9
+ flags 60:18:10
+ cache_alignment 60:18:11
+}
+
+kernel {
+ all
+ percpu
+ pernode
+ uname
+}
+
+kernel.all {
+ cpu
+ load 60:2:0
+ intr 60:0:12
+ pswitch 60:0:13
+ sysfork 60:0:14
+ hz 60:0:48
+ uptime 60:26:0
+ idletime 60:26:1
+ nusers 60:25:0
+ lastpid 60:2:1
+ runnable 60:2:2
+ nprocs 60:2:3
+ interrupts
+}
+
+kernel.all.interrupts {
+ errors 60:4:3
+}
+
+kernel.all.cpu {
+ user 60:0:20
+ nice 60:0:21
+ sys 60:0:22
+ idle 60:0:23
+ intr 60:0:34
+ wait
+ irq
+ steal 60:0:55
+ guest 60:0:60
+ vuser 60:0:78
+}
+
+kernel.all.cpu.wait {
+ total 60:0:35
+}
+
+kernel.all.cpu.irq {
+ soft 60:0:53
+ hard 60:0:54
+}
+
+kernel.percpu {
+ interrupts 60:*:*
+ cpu
+}
+
+kernel.percpu.cpu {
+ user 60:0:0
+ nice 60:0:1
+ sys 60:0:2
+ idle 60:0:3
+ intr 60:0:31
+ wait
+ irq
+ steal 60:0:58
+ guest 60:0:61
+ vuser 60:0:76
+}
+
+kernel.percpu.cpu.wait {
+ total 60:0:30
+}
+
+kernel.percpu.cpu.irq {
+ soft 60:0:56
+ hard 60:0:57
+}
+
+kernel.pernode {
+ cpu
+}
+
+kernel.pernode.cpu {
+ user 60:0:62
+ nice 60:0:63
+ sys 60:0:64
+ idle 60:0:65
+ intr 60:0:66
+ wait
+ irq
+ steal 60:0:67
+ guest 60:0:68
+ vuser 60:0:77
+}
+
+kernel.pernode.cpu.wait {
+ total 60:0:69
+}
+
+kernel.pernode.cpu.irq {
+ soft 60:0:70
+ hard 60:0:71
+}
+
+kernel.uname {
+ release 60:12:0
+ version 60:12:1
+ sysname 60:12:2
+ machine 60:12:3
+ nodename 60:12:4
+ distro 60:12:7
+}
+
+ipc {
+ sem
+ msg
+ shm
+}
+
+ipc.sem {
+ max_semmap 60:21:0
+ max_semid 60:21:1
+ max_sem 60:21:2
+ num_undo 60:21:3
+ max_perid 60:21:4
+ max_ops 60:21:5
+ max_undoent 60:21:6
+ sz_semundo 60:21:7
+ max_semval 60:21:8
+ max_exit 60:21:9
+}
+
+ipc.msg {
+ sz_pool 60:22:0
+ mapent 60:22:1
+ max_msgsz 60:22:2
+ max_defmsgq 60:22:3
+ max_msgqid 60:22:4
+ max_msgseg 60:22:5
+ num_smsghdr 60:22:6
+ max_seg 60:22:7
+}
+
+ipc.shm {
+ max_segsz 60:23:0
+ min_segsz 60:23:1
+ max_seg 60:23:2
+ max_segproc 60:23:3
+ max_shmsys 60:23:4
+}
+
+pmda {
+ uname 60:12:5
+ version 60:12:6
+}
+
+disk {
+ dev
+ all
+ partitions
+ dm
+}
+
+disk.dev {
+ read 60:0:4
+ write 60:0:5
+ total 60:0:28
+ blkread 60:0:6
+ blkwrite 60:0:7
+ blktotal 60:0:36
+ read_bytes 60:0:38
+ write_bytes 60:0:39
+ total_bytes 60:0:40
+ read_merge 60:0:49
+ write_merge 60:0:50
+ avactive 60:0:46
+ read_rawactive 60:0:72
+ write_rawactive 60:0:73
+ aveq 60:0:47
+ scheduler 60:0:59
+}
+
+disk.all {
+ read 60:0:24
+ write 60:0:25
+ total 60:0:29
+ blkread 60:0:26
+ blkwrite 60:0:27
+ blktotal 60:0:37
+ read_bytes 60:0:41
+ write_bytes 60:0:42
+ total_bytes 60:0:43
+ read_merge 60:0:51
+ write_merge 60:0:52
+ avactive 60:0:44
+ read_rawactive 60:0:74
+ write_rawactive 60:0:75
+ aveq 60:0:45
+}
+
+disk.dm {
+ read 60:54:0
+ write 60:54:1
+ total 60:54:2
+ blkread 60:54:3
+ blkwrite 60:54:4
+ blktotal 60:54:5
+ read_bytes 60:54:6
+ write_bytes 60:54:7
+ total_bytes 60:54:8
+ read_merge 60:54:9
+ write_merge 60:54:10
+ avactive 60:54:11
+ aveq 60:54:12
+ read_rawactive 60:54:14
+ write_rawactive 60:54:15
+}
+
+disk.partitions {
+ read 60:10:0
+ write 60:10:1
+ total 60:10:2
+ blkread 60:10:3
+ blkwrite 60:10:4
+ blktotal 60:10:5
+ read_bytes 60:10:6
+ write_bytes 60:10:7
+ total_bytes 60:10:8
+}
+
+mem {
+ physmem 60:1:0
+ freemem 60:1:10
+ util
+ numa
+ slabinfo
+ vmstat
+}
+
+mem.util {
+ used 60:1:1
+ free 60:1:2
+ shared 60:1:3
+ bufmem 60:1:4
+ cached 60:1:5
+ other 60:1:12
+ swapCached 60:1:13
+ active 60:1:14
+ inactive 60:1:15
+ highTotal 60:1:16
+ highFree 60:1:17
+ lowTotal 60:1:18
+ lowFree 60:1:19
+ swapTotal 60:1:20
+ swapFree 60:1:21
+ dirty 60:1:22
+ writeback 60:1:23
+ mapped 60:1:24
+ slab 60:1:25
+ committed_AS 60:1:26
+ pageTables 60:1:27
+ reverseMaps 60:1:28
+ cache_clean 60:1:29
+ anonpages 60:1:30
+ commitLimit 60:1:31
+ bounce 60:1:32
+ NFS_Unstable 60:1:33
+ slabReclaimable 60:1:34
+ slabUnreclaimable 60:1:35
+ active_anon 60:1:36
+ inactive_anon 60:1:37
+ active_file 60:1:38
+ inactive_file 60:1:39
+ unevictable 60:1:40
+ mlocked 60:1:41
+ shmem 60:1:42
+ kernelStack 60:1:43
+ hugepagesTotal 60:1:44
+ hugepagesFree 60:1:45
+ hugepagesRsvd 60:1:46
+ hugepagesSurp 60:1:47
+ directMap4k 60:1:48
+ directMap2M 60:1:49
+ vmallocTotal 60:1:50
+ vmallocUsed 60:1:51
+ vmallocChunk 60:1:52
+ mmap_copy 60:1:53
+ quicklists 60:1:54
+ corrupthardware 60:1:55
+ anonhugepages 60:1:56
+ directMap1G 60:1:57
+ available 60:1:58
+}
+
+mem.numa {
+ util
+ alloc
+}
+
+mem.numa.util {
+ total 60:36:0
+ free 60:36:1
+ used 60:36:2
+ active 60:36:3
+ inactive 60:36:4
+ active_anon 60:36:5
+ inactive_anon 60:36:6
+ active_file 60:36:7
+ inactive_file 60:36:8
+ highTotal 60:36:9
+ highFree 60:36:10
+ lowTotal 60:36:11
+ lowFree 60:36:12
+ unevictable 60:36:13
+ mlocked 60:36:14
+ dirty 60:36:15
+ writeback 60:36:16
+ filePages 60:36:17
+ mapped 60:36:18
+ anonpages 60:36:19
+ shmem 60:36:20
+ kernelStack 60:36:21
+ pageTables 60:36:22
+ NFS_Unstable 60:36:23
+ bounce 60:36:24
+ writebackTmp 60:36:25
+ slab 60:36:26
+ slabReclaimable 60:36:27
+ slabUnreclaimable 60:36:28
+ hugepagesTotal 60:36:29
+ hugepagesFree 60:36:30
+ hugepagesSurp 60:36:31
+}
+
+mem.numa.alloc {
+ hit 60:36:32
+ miss 60:36:33
+ foreign 60:36:34
+ interleave_hit 60:36:35
+ local_node 60:36:36
+ other_node 60:36:37
+}
+
+swap {
+ pagesin 60:0:8
+ pagesout 60:0:9
+ in 60:0:10
+ out 60:0:11
+ free 60:1:8
+ length 60:1:6
+ used 60:1:7
+}
+
+network {
+ interface
+ sockstat
+ ip
+ icmp
+ icmpmsg
+ tcp
+ udp
+ udplite
+ tcpconn
+}
+
+network.interface {
+ collisions 60:3:13
+ in
+ out
+ total
+ mtu 60:3:21
+ speed 60:3:22
+ baudrate 60:3:23
+ duplex 60:3:24
+ up 60:3:25
+ running 60:3:26
+ inet_addr 60:33:0
+ ipv6_addr 60:33:1
+ ipv6_scope 60:33:2
+ hw_addr 60:33:3
+}
+
+network.interface.in {
+ bytes 60:3:0
+ packets 60:3:1
+ errors 60:3:2
+ drops 60:3:3
+ fifo 60:3:4
+ frame 60:3:5
+ compressed 60:3:6
+ mcasts 60:3:7
+}
+
+network.interface.out {
+ bytes 60:3:8
+ packets 60:3:9
+ errors 60:3:10
+ drops 60:3:11
+ fifo 60:3:12
+ carrier 60:3:14
+ compressed 60:3:15
+}
+
+network.interface.total {
+ bytes 60:3:16
+ packets 60:3:17
+ errors 60:3:18
+ drops 60:3:19
+ mcasts 60:3:20
+}
+
+network.sockstat {
+ tcp
+ udp
+ raw
+}
+
+network.sockstat.tcp {
+ inuse 60:11:0
+ highest 60:11:1
+ util 60:11:2
+}
+
+network.sockstat.udp {
+ inuse 60:11:3
+ highest 60:11:4
+ util 60:11:5
+}
+
+network.sockstat.raw {
+ inuse 60:11:6
+ highest 60:11:7
+ util 60:11:8
+}
+
+network.ip {
+ forwarding 60:14:00
+ defaultttl 60:14:01
+ inreceives 60:14:02
+ inhdrerrors 60:14:03
+ inaddrerrors 60:14:04
+ forwdatagrams 60:14:05
+ inunknownprotos 60:14:06
+ indiscards 60:14:07
+ indelivers 60:14:08
+ outrequests 60:14:09
+ outdiscards 60:14:10
+ outnoroutes 60:14:11
+ reasmtimeout 60:14:12
+ reasmreqds 60:14:13
+ reasmoks 60:14:14
+ reasmfails 60:14:15
+ fragoks 60:14:16
+ fragfails 60:14:17
+ fragcreates 60:14:18
+ innoroutes 60:53:00
+ intruncatedpkts 60:53:01
+ inmcastpkts 60:53:02
+ outmcastpkts 60:53:03
+ inbcastpkts 60:53:04
+ outbcastpkts 60:53:05
+ inoctets 60:53:06
+ outoctets 60:53:07
+ inmcastoctets 60:53:08
+ outmcastoctets 60:53:09
+ inbcastoctets 60:53:10
+ outbcastoctets 60:53:11
+ csumerrors 60:53:12
+ noectpkts 60:53:13
+ ect1pkts 60:53:14
+ ect0pkts 60:53:15
+ cepkts 60:53:16
+}
+
+network.icmp {
+ inmsgs 60:14:20
+ inerrors 60:14:21
+ indestunreachs 60:14:22
+ intimeexcds 60:14:23
+ inparmprobs 60:14:24
+ insrcquenchs 60:14:25
+ inredirects 60:14:26
+ inechos 60:14:27
+ inechoreps 60:14:28
+ intimestamps 60:14:29
+ intimestampreps 60:14:30
+ inaddrmasks 60:14:31
+ inaddrmaskreps 60:14:32
+ outmsgs 60:14:33
+ outerrors 60:14:34
+ outdestunreachs 60:14:35
+ outtimeexcds 60:14:36
+ outparmprobs 60:14:37
+ outsrcquenchs 60:14:38
+ outredirects 60:14:39
+ outechos 60:14:40
+ outechoreps 60:14:41
+ outtimestamps 60:14:42
+ outtimestampreps 60:14:43
+ outaddrmasks 60:14:44
+ outaddrmaskreps 60:14:45
+ incsumerrors 60:14:46
+}
+
+network.tcp {
+ rtoalgorithm 60:14:50
+ rtomin 60:14:51
+ rtomax 60:14:52
+ maxconn 60:14:53
+ activeopens 60:14:54
+ passiveopens 60:14:55
+ attemptfails 60:14:56
+ estabresets 60:14:57
+ currestab 60:14:58
+ insegs 60:14:59
+ outsegs 60:14:60
+ retranssegs 60:14:61
+ inerrs 60:14:62
+ outrsts 60:14:63
+ incsumerrors 60:14:64
+ syncookiessent 60:53:17
+ syncookiesrecv 60:53:18
+ syncookiesfailed 60:53:19
+ embryonicrsts 60:53:20
+ prunecalled 60:53:21
+ rcvpruned 60:53:22
+ ofopruned 60:53:23
+ outofwindowicmps 60:53:24
+ lockdroppedicmps 60:53:25
+ arpfilter 60:53:26
+ timewaited 60:53:27
+ timewaitrecycled 60:53:28
+ timewaitkilled 60:53:29
+ pawspassiverejected 60:53:30
+ pawsactiverejected 60:53:31
+ pawsestabrejected 60:53:32
+ delayedacks 60:53:33
+ delayedacklocked 60:53:34
+ delayedacklost 60:53:35
+ listenoverflows 60:53:36
+ listendrops 60:53:37
+ prequeued 60:53:38
+ directcopyfrombacklog 60:53:39
+ directcopyfromprequeue 60:53:40
+ prequeueddropped 60:53:41
+ hphits 60:53:42
+ hphitstouser 60:53:43
+ pureacks 60:53:44
+ hpacks 60:53:45
+ renorecovery 60:53:46
+ sackrecovery 60:53:47
+ sackreneging 60:53:48
+ fackreorder 60:53:49
+ sackreorder 60:53:50
+ renoreorder 60:53:51
+ tsreorder 60:53:52
+ fullundo 60:53:53
+ partialundo 60:53:54
+ dsackundo 60:53:55
+ lossundo 60:53:56
+ lostretransmit 60:53:57
+ renofailures 60:53:58
+ sackfailures 60:53:59
+ lossfailures 60:53:60
+ fastretrans 60:53:61
+ forwardretrans 60:53:62
+ slowstartretrans 60:53:63
+ timeouts 60:53:64
+ lossprobes 60:53:65
+ lossproberecovery 60:53:66
+ renorecoveryfail 60:53:67
+ sackrecoveryfail 60:53:68
+ schedulerfail 60:53:69
+ rcvcollapsed 60:53:70
+ dsackoldsent 60:53:71
+ dsackofosent 60:53:72
+ dsackrecv 60:53:73
+ dsackoforecv 60:53:74
+ abortondata 60:53:75
+ abortonclose 60:53:76
+ abortonmemory 60:53:77
+ abortontimeout 60:53:78
+ abortonlinger 60:53:79
+ abortfailed 60:53:80
+ memorypressures 60:53:81
+ sackdiscard 60:53:82
+ dsackignoredold 60:53:83
+ dsackignorednoundo 60:53:84
+ spuriousrtos 60:53:85
+ md5notfound 60:53:86
+ md5unexpected 60:53:87
+ sackshifted 60:53:88
+ sackmerged 60:53:89
+ sackshiftfallback 60:53:90
+ backlogdrop 60:53:91
+ minttldrop 60:53:92
+ deferacceptdrop 60:53:93
+ iprpfilter 60:53:94
+ timewaitoverflow 60:53:95
+ reqqfulldocookies 60:53:96
+ reqqfulldrop 60:53:97
+ retransfail 60:53:98
+ rcvcoalesce 60:53:99
+ ofoqueue 60:53:100
+ ofodrop 60:53:101
+ ofomerge 60:53:102
+ challengeack 60:53:103
+ synchallenge 60:53:104
+ fastopenactive 60:53:105
+ fastopenactivefail 60:53:106
+ fastopenpassive 60:53:107
+ fastopenpassivefail 60:53:108
+ fastopenlistenoverflow 60:53:109
+ fastopencookiereqd 60:53:110
+ spuriousrtxhostqueues 60:53:111
+ busypollrxpackets 60:53:112
+ autocorking 60:53:113
+ fromzerowindowadv 60:53:114
+ tozerowindowadv 60:53:115
+ wantzerowindowadv 60:53:116
+ synretrans 60:53:117
+ origdatasent 60:53:118
+}
+
+network.udp {
+ indatagrams 60:14:70
+ noports 60:14:71
+ inerrors 60:14:72
+ outdatagrams 60:14:74
+ recvbuferrors 60:14:75
+ sndbuferrors 60:14:76
+ incsumerrors 60:14:83
+}
+
+network.udplite {
+ indatagrams 60:14:77
+ noports 60:14:78
+ inerrors 60:14:79
+ outdatagrams 60:14:80
+ recvbuferrors 60:14:81
+ sndbuferrors 60:14:82
+ incsumerrors 60:14:84
+}
+
+network.icmpmsg {
+ intype 60:14:88
+ outtype 60:14:89
+}
+
+filesys {
+ capacity 60:5:1
+ used 60:5:2
+ free 60:5:3
+ maxfiles 60:5:4
+ usedfiles 60:5:5
+ freefiles 60:5:6
+ mountdir 60:5:7
+ full 60:5:8
+ blocksize 60:5:9
+ avail 60:5:10
+ readonly 60:5:11
+}
+
+tmpfs {
+ capacity 60:34:1
+ used 60:34:2
+ free 60:34:3
+ maxfiles 60:34:4
+ usedfiles 60:34:5
+ freefiles 60:34:6
+ full 60:34:7
+}
+
+swapdev {
+ free 60:6:0
+ length 60:6:1
+ maxswap 60:6:2
+ vlength 60:6:3
+ priority 60:6:4
+}
+
+nfs {
+ client
+ server
+}
+
+nfs.client {
+ calls 60:7:1
+ reqs 60:7:4
+}
+
+nfs.server {
+ calls 60:7:50
+ reqs 60:7:12
+}
+
+rpc {
+ client
+ server
+}
+
+rpc.client {
+ rpccnt 60:7:20
+ rpcretrans 60:7:21
+ rpcauthrefresh 60:7:22
+ netcnt 60:7:24
+ netudpcnt 60:7:25
+ nettcpcnt 60:7:26
+ nettcpconn 60:7:27
+}
+
+rpc.server {
+ rpccnt 60:7:30
+ rpcerr 60:7:31
+ rpcbadfmt 60:7:32
+ rpcbadauth 60:7:33
+ rpcbadclnt 60:7:34
+ rchits 60:7:35
+ rcmisses 60:7:36
+ rcnocache 60:7:37
+ fh_cached 60:7:38
+ fh_valid 60:7:39
+ fh_fixup 60:7:40
+ fh_lookup 60:7:41
+ fh_stale 60:7:42
+ fh_concurrent 60:7:43
+ netcnt 60:7:44
+ netudpcnt 60:7:45
+ nettcpcnt 60:7:46
+ nettcpconn 60:7:47
+ fh_anon 60:7:51
+ fh_nocache_dir 60:7:52
+ fh_nocache_nondir 60:7:53
+ io_read 60:7:54
+ io_write 60:7:55
+ th_cnt 60:7:56
+ th_fullcnt 60:7:57
+}
+
+nfs3 {
+ client
+ server
+}
+
+nfs3.client {
+ calls 60:7:60
+ reqs 60:7:61
+}
+
+nfs3.server {
+ calls 60:7:62
+ reqs 60:7:63
+}
+
+nfs4 {
+ client
+ server
+}
+
+nfs4.client {
+ calls 60:7:64
+ reqs 60:7:65
+}
+
+nfs4.server {
+ calls 60:7:66
+ reqs 60:7:67
+}
+
+network.tcpconn {
+ established 60:19:1
+ syn_sent 60:19:2
+ syn_recv 60:19:3
+ fin_wait1 60:19:4
+ fin_wait2 60:19:5
+ time_wait 60:19:6
+ close 60:19:7
+ close_wait 60:19:8
+ last_ack 60:19:9
+ listen 60:19:10
+ closing 60:19:11
+}
+
+mem.slabinfo {
+ objects
+ slabs
+}
+
+mem.slabinfo.objects {
+ active 60:20:0
+ total 60:20:1
+ size 60:20:2
+}
+
+mem.slabinfo.slabs {
+ active 60:20:3
+ total 60:20:4
+ pages_per_slab 60:20:5
+ objects_per_slab 60:20:6
+ total_size 60:20:7
+}
+
+mem.vmstat {
+ /* sorted by name to make maintenance easier */
+ allocstall 60:28:35
+ compact_blocks_moved 60:28:57
+ compact_fail 60:28:58
+ compact_pagemigrate_failed 60:28:59
+ compact_pages_moved 60:28:60
+ compact_stall 60:28:61
+ compact_success 60:28:62
+ htlb_buddy_alloc_fail 60:28:43
+ htlb_buddy_alloc_success 60:28:44
+ kswapd_inodesteal 60:28:33
+ kswapd_low_wmark_hit_quickly 60:28:87
+ kswapd_high_wmark_hit_quickly 60:28:88
+ kswapd_skip_congestion_wait 60:28:89
+ kswapd_steal 60:28:32
+ nr_active_anon 60:28:45
+ nr_active_file 60:28:46
+ nr_anon_pages 60:28:39
+ nr_anon_transparent_hugepages 60:28:90
+ nr_bounce 60:28:40
+ nr_dirtied 60:28:91
+ nr_dirty 60:28:0
+ nr_dirty_background_threshold 60:28:92
+ nr_dirty_threshold 60:28:93
+ nr_free_pages 60:28:47
+ nr_inactive_anon 60:28:48
+ nr_inactive_file 60:28:49
+ nr_isolated_anon 60:28:50
+ nr_isolated_file 60:28:51
+ nr_kernel_stack 60:28:52
+ nr_mapped 60:28:4
+ nr_mlock 60:28:53
+ nr_page_table_pages 60:28:3
+ nr_shmem 60:28:54
+ nr_slab 60:28:5
+ nr_slab_reclaimable 60:28:37
+ nr_slab_unreclaimable 60:28:38
+ nr_unevictable 60:28:55
+ nr_unstable 60:28:2
+ nr_vmscan_write 60:28:42
+ nr_writeback 60:28:1
+ nr_writeback_temp 60:28:56
+ nr_written 60:28:94
+ numa_foreign 60:28:95
+ numa_hit 60:28:96
+ numa_interleave 60:28:97
+ numa_local 60:28:98
+ numa_miss 60:28:99
+ numa_other 60:28:100
+ pageoutrun 60:28:34
+ pgactivate 60:28:14
+ pgalloc_dma 60:28:12
+ pgalloc_dma32 60:28:63
+ pgalloc_high 60:28:10
+ pgalloc_movable 60:28:64
+ pgalloc_normal 60:28:11
+ pgrefill_dma32 60:28:65
+ pgrefill_movable 60:28:66
+ pgdeactivate 60:28:15
+ pgfault 60:28:16
+ pgfree 60:28:13
+ pginodesteal 60:28:30
+ pgmajfault 60:28:17
+ pgpgin 60:28:6
+ pgpgout 60:28:7
+ pgrefill_dma 60:28:20
+ pgrefill_high 60:28:18
+ pgrefill_normal 60:28:19
+ pgrotated 60:28:36
+ pgscan_direct_dma 60:28:29
+ pgscan_direct_dma32 60:28:67
+ pgscan_direct_high 60:28:27
+ pgscan_direct_movable 60:28:68
+ pgscan_direct_normal 60:28:28
+ pgscan_kswapd_dma 60:28:26
+ pgscan_kswapd_dma32 60:28:69
+ pgscan_kswapd_high 60:28:24
+ pgscan_kswapd_movable 60:28:70
+ pgscan_kswapd_normal 60:28:25
+ pgsteal_dma 60:28:23
+ pgsteal_dma32 60:28:71
+ pgsteal_high 60:28:21
+ pgsteal_movable 60:28:72
+ pgsteal_normal 60:28:22
+ pswpin 60:28:8
+ pswpout 60:28:9
+ slabs_scanned 60:28:31
+ thp_fault_alloc 60:28:73
+ thp_fault_fallback 60:28:74
+ thp_collapse_alloc 60:28:75
+ thp_collapse_alloc_failed 60:28:76
+ thp_split 60:28:77
+ unevictable_pgs_cleared 60:28:78
+ unevictable_pgs_culled 60:28:79
+ unevictable_pgs_mlocked 60:28:80
+ unevictable_pgs_mlockfreed 60:28:81
+ unevictable_pgs_munlocked 60:28:82
+ unevictable_pgs_rescued 60:28:83
+ unevictable_pgs_scanned 60:28:84
+ unevictable_pgs_stranded 60:28:85
+ zone_reclaim_failed 60:28:86
+}
+
+vfs {
+ files
+ inodes
+ dentry
+}
+
+vfs.files {
+ count 60:27:0
+ free 60:27:1
+ max 60:27:2
+}
+
+vfs.inodes {
+ count 60:27:3
+ free 60:27:4
+}
+
+vfs.dentry {
+ count 60:27:5
+ free 60:27:6
+}
+
+sysfs {
+ kernel
+}
+
+sysfs.kernel {
+ uevent_seqnum 60:35:0
+}
+
diff --git a/src/pmdas/linux/sem_limits.c b/src/pmdas/linux/sem_limits.c
new file mode 100644
index 0000000..24e336e
--- /dev/null
+++ b/src/pmdas/linux/sem_limits.c
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) International Business Machines Corp., 2002
+ * This code contributed by Mike Mason <mmlnx@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#define __USE_GNU 1 /* required for IPC_INFO define */
+#include <sys/ipc.h>
+#include <sys/sem.h>
+
+#include "pmapi.h"
+#include "sem_limits.h"
+
+int
+refresh_sem_limits(sem_limits_t *sem_limits)
+{
+ static int started;
+ static struct seminfo seminfo;
+ static union semun arg;
+
+ if (!started) {
+ started = 1;
+ memset(sem_limits, 0, sizeof(sem_limits_t));
+ arg.array = (unsigned short *) &seminfo;
+ }
+
+ if (semctl(0, 0, IPC_INFO, arg) < 0) {
+ return -oserror();
+ }
+
+ sem_limits->semmap = seminfo.semmap;
+ sem_limits->semmni = seminfo.semmni;
+ sem_limits->semmns = seminfo.semmns;
+ sem_limits->semmnu = seminfo.semmnu;
+ sem_limits->semmsl = seminfo.semmsl;
+ sem_limits->semopm = seminfo.semopm;
+ sem_limits->semume = seminfo.semume;
+ sem_limits->semusz = seminfo.semusz;
+ sem_limits->semvmx = seminfo.semvmx;
+ sem_limits->semaem = seminfo.semaem;
+ return 0;
+}
diff --git a/src/pmdas/linux/sem_limits.h b/src/pmdas/linux/sem_limits.h
new file mode 100644
index 0000000..ef0694e
--- /dev/null
+++ b/src/pmdas/linux/sem_limits.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) International Business Machines Corp., 2002
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/*
+ * This code contributed by Mike Mason (mmlnx@us.ibm.com)
+ */
+
+#ifdef _SEM_SEMUN_UNDEFINED
+/* glibc 2.1 no longer defines semun, instead it defines
+ * _SEM_SEMUN_UNDEFINED so users can define semun on their own.
+ */
+union semun {
+ int val;
+ struct semid_ds *buf;
+ unsigned short int *array;
+ struct seminfo *__buf;
+};
+#endif
+
+typedef struct {
+ unsigned int semmap; /* # of entries in semaphore map */
+ unsigned int semmni; /* max # of semaphore identifiers */
+ unsigned int semmns; /* max # of semaphores in system */
+ unsigned int semmnu; /* num of undo structures system wide */
+ unsigned int semmsl; /* max num of semaphores per id */
+ unsigned int semopm; /* max num of ops per semop call */
+ unsigned int semume; /* max num of undo entries per process */
+ unsigned int semusz; /* sizeof struct sem_undo */
+ unsigned int semvmx; /* semaphore maximum value */
+ unsigned int semaem; /* adjust on exit max value */
+} sem_limits_t;
+
+extern int refresh_sem_limits(sem_limits_t*);
+
diff --git a/src/pmdas/linux/shm_limits.c b/src/pmdas/linux/shm_limits.c
new file mode 100644
index 0000000..e1c80f0
--- /dev/null
+++ b/src/pmdas/linux/shm_limits.c
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) International Business Machines Corp., 2002
+ * This code contributed by Mike Mason <mmlnx@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#define __USE_GNU 1 /* required for IPC_INFO define */
+#include <sys/ipc.h>
+#include <sys/shm.h>
+
+#include "pmapi.h"
+#include "shm_limits.h"
+
+int
+refresh_shm_limits(shm_limits_t *shm_limits)
+{
+ static int started;
+ static struct shminfo shminfo;
+
+ if (!started) {
+ started = 1;
+ memset(shm_limits, 0, sizeof(shm_limits_t));
+ }
+
+ if (shmctl(0, IPC_INFO, (struct shmid_ds *) &shminfo) < 0)
+ return -oserror();
+
+ shm_limits->shmmax = shminfo.shmmax;
+ shm_limits->shmmin = shminfo.shmmin;
+ shm_limits->shmmni = shminfo.shmmni;
+ shm_limits->shmseg = shminfo.shmseg;
+ shm_limits->shmall = shminfo.shmall;
+ return 0;
+}
diff --git a/src/pmdas/linux/shm_limits.h b/src/pmdas/linux/shm_limits.h
new file mode 100644
index 0000000..39d7f71
--- /dev/null
+++ b/src/pmdas/linux/shm_limits.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) International Business Machines Corp., 2002
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/*
+ * This code contributed by Mike Mason (mmlnx@us.ibm.com)
+ */
+
+typedef struct {
+ unsigned int shmmax; /* maximum shared segment size (bytes) */
+ unsigned int shmmin; /* minimum shared segment size (bytes) */
+ unsigned int shmmni; /* maximum number of segments system wide */
+ unsigned int shmseg; /* maximum shared segments per process */
+ unsigned int shmall; /* maximum shared memory system wide (pages) */
+} shm_limits_t;
+
+extern int refresh_shm_limits(shm_limits_t *);
+
diff --git a/src/pmdas/linux/swapdev.c b/src/pmdas/linux/swapdev.c
new file mode 100644
index 0000000..0b5521e
--- /dev/null
+++ b/src/pmdas/linux/swapdev.c
@@ -0,0 +1,72 @@
+/*
+ * Linux Swap Device Cluster
+ *
+ * Copyright (c) 2014 Red Hat.
+ * Copyright (c) 2000,2004 Silicon Graphics, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include "pmapi.h"
+#include "impl.h"
+#include "pmda.h"
+#include "indom.h"
+#include "swapdev.h"
+
+int
+refresh_swapdev(pmInDom swapdev_indom)
+{
+ char buf[MAXPATHLEN];
+ swapdev_t *swap;
+ FILE *fp;
+ char *path;
+ char *size;
+ char *used;
+ char *priority;
+ int sts;
+
+ pmdaCacheOp(swapdev_indom, PMDA_CACHE_INACTIVE);
+
+ if ((fp = linux_statsfile("/proc/swaps", buf, sizeof(buf))) == NULL)
+ return -oserror();
+
+ while (fgets(buf, sizeof(buf), fp) != NULL) {
+ if (buf[0] != '/')
+ continue;
+ if ((path = strtok(buf, " \t")) == 0)
+ continue;
+ if ((/*type: */ strtok(NULL, " \t")) == NULL ||
+ (size = strtok(NULL, " \t")) == NULL ||
+ (used = strtok(NULL, " \t")) == NULL ||
+ (priority = strtok(NULL, " \t")) == NULL)
+ continue;
+ sts = pmdaCacheLookupName(swapdev_indom, path, NULL, (void **)&swap);
+ if (sts == PMDA_CACHE_ACTIVE) /* repeated line in /proc/swaps? */
+ continue;
+ if (sts == PMDA_CACHE_INACTIVE) { /* re-activate an old swap device */
+ pmdaCacheStore(swapdev_indom, PMDA_CACHE_ADD, path, swap);
+ }
+ else { /* new swap device */
+ if ((swap = malloc(sizeof(swapdev_t))) == NULL)
+ continue;
+#if PCP_DEBUG
+ if (pmDebug & DBG_TRACE_LIBPMDA)
+ fprintf(stderr, "refresh_swapdev: add \"%s\"\n", path);
+#endif
+ pmdaCacheStore(swapdev_indom, PMDA_CACHE_ADD, path, swap);
+ }
+ sscanf(size, "%u", &swap->size);
+ sscanf(used, "%u", &swap->used);
+ sscanf(priority, "%d", &swap->priority);
+ }
+ fclose(fp);
+ return 0;
+}
diff --git a/src/pmdas/linux/swapdev.h b/src/pmdas/linux/swapdev.h
new file mode 100644
index 0000000..fa619a8
--- /dev/null
+++ b/src/pmdas/linux/swapdev.h
@@ -0,0 +1,28 @@
+/*
+ * Linux swap device Cluster
+ *
+ * Copyright (c) 1995 Silicon Graphics, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+typedef struct swapdev {
+ char *path;
+ unsigned int size;
+ unsigned int used;
+ int priority;
+} swapdev_t;
+
+extern int refresh_swapdev(pmInDom);
diff --git a/src/pmdas/linux/sysfs_kernel.c b/src/pmdas/linux/sysfs_kernel.c
new file mode 100644
index 0000000..8880634
--- /dev/null
+++ b/src/pmdas/linux/sysfs_kernel.c
@@ -0,0 +1,41 @@
+/*
+ * Linux sysfs_kernel cluster
+ *
+ * Copyright (c) 2009,2014 Red Hat.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include "sysfs_kernel.h"
+#include "indom.h"
+
+int
+refresh_sysfs_kernel(sysfs_kernel_t *sk)
+{
+ char buf[MAXPATHLEN];
+ int fd, n;
+
+ snprintf(buf, sizeof(buf), "%s/sys/kernel/uevent_seqnum", linux_statspath);
+ if ((fd = open(buf, O_RDONLY)) < 0) {
+ sk->valid_uevent_seqnum = 0;
+ return -oserror();
+ }
+
+ if ((n = read(fd, buf, sizeof(buf))) <= 0)
+ sk->valid_uevent_seqnum = 0;
+ else {
+ buf[n-1] = '\0';
+ sscanf(buf, "%llu", (long long unsigned int *)&sk->uevent_seqnum);
+ sk->valid_uevent_seqnum = 1;
+ }
+ close(fd);
+ return 0;
+}
diff --git a/src/pmdas/linux/sysfs_kernel.h b/src/pmdas/linux/sysfs_kernel.h
new file mode 100644
index 0000000..b43445d
--- /dev/null
+++ b/src/pmdas/linux/sysfs_kernel.h
@@ -0,0 +1,34 @@
+/*
+ * Linux sysfs_kernel cluster
+ *
+ * Copyright (c) 2009, Red Hat.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#ifndef _SYSFS_KERNEL_H
+#define _SYSFS_KERNEL_H
+
+#include "pmapi.h"
+#include "impl.h"
+#include "pmda.h"
+#include <ctype.h>
+
+typedef struct {
+ int valid_uevent_seqnum;
+ uint64_t uevent_seqnum; /* /sys/kernel/uevent_seqnum */
+ /* TODO queue length, event type counters and other metrics */
+} sysfs_kernel_t;
+
+/* refresh sysfs_kernel */
+extern int refresh_sysfs_kernel(sysfs_kernel_t *);
+
+#endif /* _SYSFS_KERNEL_H */