diff options
author | Igor Pashev <pashev.igor@gmail.com> | 2014-10-26 12:33:50 +0400 |
---|---|---|
committer | Igor Pashev <pashev.igor@gmail.com> | 2014-10-26 12:33:50 +0400 |
commit | 47e6e7c84f008a53061e661f31ae96629bc694ef (patch) | |
tree | 648a07f3b5b9d67ce19b0fd72e8caa1175c98f1a /src/pmdas/linux_xfs | |
download | pcp-debian.tar.gz |
Debian 3.9.10debian/3.9.10debian
Diffstat (limited to 'src/pmdas/linux_xfs')
-rw-r--r-- | src/pmdas/linux_xfs/GNUmakefile | 76 | ||||
-rwxr-xr-x | src/pmdas/linux_xfs/Install | 29 | ||||
-rwxr-xr-x | src/pmdas/linux_xfs/Remove | 23 | ||||
-rw-r--r-- | src/pmdas/linux_xfs/clusters.h | 31 | ||||
-rw-r--r-- | src/pmdas/linux_xfs/filesys.c | 183 | ||||
-rw-r--r-- | src/pmdas/linux_xfs/filesys.h | 108 | ||||
-rw-r--r-- | src/pmdas/linux_xfs/help | 469 | ||||
-rw-r--r-- | src/pmdas/linux_xfs/indom.h | 31 | ||||
-rw-r--r-- | src/pmdas/linux_xfs/linux_xfs_migrate.conf | 16 | ||||
-rw-r--r-- | src/pmdas/linux_xfs/pmda.c | 979 | ||||
-rw-r--r-- | src/pmdas/linux_xfs/proc_fs_xfs.c | 278 | ||||
-rw-r--r-- | src/pmdas/linux_xfs/proc_fs_xfs.h | 189 | ||||
-rw-r--r-- | src/pmdas/linux_xfs/root | 6 | ||||
-rw-r--r-- | src/pmdas/linux_xfs/root_xfs | 295 |
14 files changed, 2713 insertions, 0 deletions
diff --git a/src/pmdas/linux_xfs/GNUmakefile b/src/pmdas/linux_xfs/GNUmakefile new file mode 100644 index 0000000..211f651 --- /dev/null +++ b/src/pmdas/linux_xfs/GNUmakefile @@ -0,0 +1,76 @@ +# +# Copyright (c) 2013 Red Hat. +# Copyright (c) 2000,2003,2004,2008 Silicon Graphics, Inc. All Rights Reserved. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by the +# Free Software Foundation; either version 2 of the License, or (at your +# option) any later version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# for more details. +# + +TOPDIR = ../../.. +include $(TOPDIR)/src/include/builddefs + +IAM = xfs +DOMAIN = XFS +CMDTARGET = pmda$(IAM) +LIBTARGET = pmda_$(IAM).$(DSOSUFFIX) +PMDAINIT = $(IAM)_init +PMDADIR = $(PCP_PMDAS_DIR)/$(IAM) +CONF_LINE = "xfs 11 pipe binary $(PMDADIR)/$(CMDTARGET) -d 11" + +CFILES = proc_fs_xfs.c filesys.c pmda.c +HFILES = proc_fs_xfs.h filesys.h clusters.h indom.h + +SCRIPTS = Install Remove +VERSION_SCRIPT = exports +HELPTARGETS = help.dir help.pag +LSRCFILES = help root root_xfs linux_xfs_migrate.conf $(SCRIPTS) +LDIRT = $(HELPTARGETS) domain.h $(VERSION_SCRIPT) + +LLDLIBS = $(PCP_PMDALIB) +LCFLAGS = $(INVISIBILITY) + +default: build-me + +include $(BUILDRULES) + +ifeq "$(TARGET_OS)" "linux" +build-me: domain.h $(LIBTARGET) $(CMDTARGET) $(HELPTARGETS) + @if [ `grep -c $(CONF_LINE) ../pmcd.conf` -eq 0 ]; then \ + echo $(CONF_LINE) >> ../pmcd.conf ; \ + fi + +install: default + $(INSTALL) -m 755 -d $(PMDADIR) + $(INSTALL) -m 644 domain.h help help.dir help.pag root root_xfs $(PMDADIR) + $(INSTALL) -m 755 $(LIBTARGET) $(CMDTARGET) $(SCRIPTS) $(PMDADIR) + $(INSTALL) -m 644 root_xfs $(PCP_VAR_DIR)/pmns/root_xfs + $(INSTALL) -m 644 linux_xfs_migrate.conf $(PCP_VAR_DIR)/config/pmlogrewrite/linux_xfs_migrate.conf +else +build-me: +install: +endif + +default_pcp : default + +install_pcp : install + +$(HELPTARGETS) : help + $(RUN_IN_BUILD_ENV) $(TOPDIR)/src/newhelp/newhelp -n root_xfs -v 2 -o help < help + +$(VERSION_SCRIPT): + $(VERSION_SCRIPT_MAKERULE) + +domain.h: ../../pmns/stdpmid + $(DOMAIN_MAKERULE) + +pmda.o: domain.h +pmda.o proc_fs_xfs.o: proc_fs_xfs.h +filesys.o pmda.o: filesys.h +pmda.o: $(VERSION_SCRIPT) diff --git a/src/pmdas/linux_xfs/Install b/src/pmdas/linux_xfs/Install new file mode 100755 index 0000000..4f68af7 --- /dev/null +++ b/src/pmdas/linux_xfs/Install @@ -0,0 +1,29 @@ +#!/bin/sh +# +# Copyright (c) 2013 Red Hat Inc. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by the +# Free Software Foundation; either version 2 of the License, or (at your +# option) any later version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# for more details. +# +# Install the Linux XFS PMDA and/or PMNS +# + +. $PCP_DIR/etc/pcp.env +. $PCP_SHARE_DIR/lib/pmdaproc.sh + +iam=xfs +pmda_interface=3 +daemon_opt=true +pipe_opt=true +pmns_source=root_xfs + +pmdaSetup +pmdaInstall +exit 0 diff --git a/src/pmdas/linux_xfs/Remove b/src/pmdas/linux_xfs/Remove new file mode 100755 index 0000000..1210f45 --- /dev/null +++ b/src/pmdas/linux_xfs/Remove @@ -0,0 +1,23 @@ +#!/bin/sh +# +# Copyright (c) 2013 Red Hat Inc. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by the +# Free Software Foundation; either version 2 of the License, or (at your +# option) any later version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# for more details. +# +# Remove the Linux XFS PMDA +# + +. $PCP_DIR/etc/pcp.env +. $PCP_SHARE_DIR/lib/pmdaproc.sh +iam=xfs +pmdaSetup +pmdaRemove +exit 0 diff --git a/src/pmdas/linux_xfs/clusters.h b/src/pmdas/linux_xfs/clusters.h new file mode 100644 index 0000000..a984343 --- /dev/null +++ b/src/pmdas/linux_xfs/clusters.h @@ -0,0 +1,31 @@ +/* + * Copyright (c) 2013 Red Hat. + * Copyright (c) 2005,2007-2008 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#ifndef _CLUSTERS_H +#define _CLUSTERS_H + +/* + * PMID cluster values ... to manage the PMID migration after the + * linux -> linux + xfs PMDAs split, these need to match the enum + * assigned values for CLUSTER_* from the original Linux PMDA. + */ +#define CLUSTER_XFS 16 /* /proc/fs/xfs/stat */ +#define CLUSTER_XFSBUF 17 /* /proc/fs/pagebuf/stat */ +#define CLUSTER_QUOTA 30 /* quotactl() */ + +#define MIN_CLUSTER 16 /* first cluster number we use here */ +#define NUM_CLUSTERS 31 /* one more than highest cluster number used */ + +#endif /* _CLUSTERS_H */ diff --git a/src/pmdas/linux_xfs/filesys.c b/src/pmdas/linux_xfs/filesys.c new file mode 100644 index 0000000..34f8b66 --- /dev/null +++ b/src/pmdas/linux_xfs/filesys.c @@ -0,0 +1,183 @@ +/* + * XFS Filesystems Cluster + * + * Copyright (c) 2013 Red Hat. + * Copyright (c) 2004,2007 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#include "pmapi.h" +#include "impl.h" +#include "pmda.h" +#include "filesys.h" +#include "proc_fs_xfs.h" + +static void +refresh_filesys_projects(pmInDom qindom, filesys_t *fs) +{ + char buffer[MAXPATHLEN]; + project_t *qp; + fs_quota_stat_t s; + fs_disk_quota_t d; + size_t idsz, devsz; + FILE *projects; + char *p, *idend; + uint32_t prid; + int qcmd, sts; + + qcmd = QCMD(Q_XGETQSTAT, XQM_PRJQUOTA); + if (quotactl(qcmd, fs->device, 0, (void*)&s) < 0) + return; + + if (s.qs_flags & XFS_QUOTA_PDQ_ENFD) + fs->flags |= FSF_QUOT_PROJ_ENF; + if (s.qs_flags & XFS_QUOTA_PDQ_ACCT) + fs->flags |= FSF_QUOT_PROJ_ACC; + else + return; + + if ((projects = xfs_statsfile("/etc/projects", "r")) == NULL) + return; + + qcmd = QCMD(Q_XQUOTASYNC, XQM_PRJQUOTA); + quotactl(qcmd, fs->device, 0, NULL); + + while (fgets(buffer, sizeof(buffer), projects)) { + if (buffer[0] == '#') + continue; + + prid = strtol(buffer, &idend, 10); + idsz = idend - buffer; + qcmd = QCMD(Q_XGETQUOTA, XQM_PRJQUOTA); + if (!idsz || quotactl(qcmd, fs->device, prid, (void *)&d) < 0) + continue; + + devsz = strlen(fs->device); + p = malloc(idsz+devsz+2); + if (!p) + continue; + memcpy(p, buffer, idsz); + p[idsz] = ':'; + memcpy(&p[idsz+1], fs->device, devsz+1); + + qp = NULL; + sts = pmdaCacheLookupName(qindom, p, NULL, (void **)&qp); + if (sts == PMDA_CACHE_ACTIVE) /* repeated line in /etc/projects? */ + goto next; + if (sts != PMDA_CACHE_INACTIVE) { + qp = (project_t *)malloc(sizeof(project_t)); + if (!qp) + goto next; + if (pmDebug & DBG_TRACE_LIBPMDA) + fprintf(stderr, "refresh_filesys_projects: add \"%s\"\n", p); + } + qp->space_hard = d.d_blk_hardlimit; + qp->space_soft = d.d_blk_softlimit; + qp->space_used = d.d_bcount; + qp->space_time_left = d.d_btimer; + qp->files_hard = d.d_ino_hardlimit; + qp->files_soft = d.d_ino_softlimit; + qp->files_used = d.d_icount; + qp->files_time_left = d.d_itimer; + pmdaCacheStore(qindom, PMDA_CACHE_ADD, p, (void *)qp); +next: + free(p); + } + fclose(projects); +} + +char * +scan_filesys_options(const char *options, const char *option) +{ + static char buffer[128]; + char *s; + + strncpy(buffer, options, sizeof(buffer)); + buffer[sizeof(buffer)-1] = '\0'; + + s = strtok(buffer, ","); + while (s) { + if (strcmp(s, option) == 0) + return s; + s = strtok(NULL, ","); + } + return NULL; +} + +int +refresh_filesys(pmInDom filesys_indom, pmInDom quota_indom) +{ + char buf[MAXPATHLEN]; + char realdevice[MAXPATHLEN]; + filesys_t *fs; + pmInDom indom = filesys_indom; + FILE *fp; + char *path, *device, *type, *options; + int sts; + + pmdaCacheOp(quota_indom, PMDA_CACHE_INACTIVE); + pmdaCacheOp(filesys_indom, PMDA_CACHE_INACTIVE); + + if ((fp = xfs_statsfile("/proc/mounts", "r")) == NULL) + return -oserror(); + + while (fgets(buf, sizeof(buf), fp) != NULL) { + if ((device = strtok(buf, " ")) == 0) + continue; + + path = strtok(NULL, " "); + type = strtok(NULL, " "); + options = strtok(NULL, " "); + if (strcmp(type, "xfs") != 0) + continue; + if (strncmp(device, "/dev", 4) != 0) + continue; + if (realpath(device, realdevice) != NULL) + device = realdevice; + + sts = pmdaCacheLookupName(indom, device, NULL, (void **)&fs); + if (sts == PMDA_CACHE_ACTIVE) /* repeated line in /proc/mounts? */ + continue; + if (sts == PMDA_CACHE_INACTIVE) { /* re-activate an old mount */ + pmdaCacheStore(indom, PMDA_CACHE_ADD, device, fs); + if (strcmp(path, fs->path) != 0) { /* old device, new path */ + free(fs->path); + fs->path = strdup(path); + } + if (strcmp(options, fs->options) != 0) { /* old device, new opts */ + free(fs->options); + fs->options = strdup(options); + } + } + else { /* new mount */ + if ((fs = malloc(sizeof(filesys_t))) == NULL) + continue; + fs->device = strdup(device); + fs->path = strdup(path); + fs->options = strdup(options); + if (pmDebug & DBG_TRACE_LIBPMDA) + fprintf(stderr, "refresh_filesys: add \"%s\" \"%s\"\n", + fs->path, device); + pmdaCacheStore(indom, PMDA_CACHE_ADD, device, fs); + } + fs->flags = 0; + refresh_filesys_projects(quota_indom, fs); + } + + /* + * success + * Note: we do not call statfs() here since only some instances + * may be requested (rather, we do it in xfs_fetch, see pmda.c). + */ + fclose(fp); + return 0; +} diff --git a/src/pmdas/linux_xfs/filesys.h b/src/pmdas/linux_xfs/filesys.h new file mode 100644 index 0000000..d1a2ac4 --- /dev/null +++ b/src/pmdas/linux_xfs/filesys.h @@ -0,0 +1,108 @@ +/* + * XFS Filesystem Cluster + * + * Copyright (c) 2013 Red Hat. + * Copyright (c) 2004,2007 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#include <sys/vfs.h> +#include <sys/quota.h> + +#define XQM_CMD(x) (('X'<<8)+(x)) /* note: forms first QCMD argument */ +#define XQM_COMMAND(x) (((x) & (0xff<<8)) == ('X'<<8)) /* test if for XFS */ +#define XQM_PRJQUOTA 2 +#define Q_XGETQUOTA XQM_CMD(3) /* get disk limits and usage */ +#define Q_XGETQSTAT XQM_CMD(5) /* get quota subsystem status */ +#define Q_XQUOTASYNC XQM_CMD(7) /* delalloc flush, updates dquots */ + +#define XFS_QUOTA_PDQ_ACCT (1<<4) /* project quota accounting */ +#define XFS_QUOTA_PDQ_ENFD (1<<5) /* project quota limits enforcement */ + +#define FS_QSTAT_VERSION 1 /* fs_quota_stat.qs_version */ + +/* + * Some basic information about 'quota files'. + */ +typedef struct fs_qfilestat { + uint64_t qfs_ino; /* inode number */ + uint64_t qfs_nblks; /* number of BBs 512-byte-blks */ + uint32_t qfs_nextents; /* number of extents */ +} fs_qfilestat_t; + +typedef struct fs_quota_stat { + char qs_version; /* version number for future changes */ + uint16_t qs_flags; /* XFS_QUOTA_{U,P,G}DQ_{ACCT,ENFD} */ + char qs_pad; /* unused */ + fs_qfilestat_t qs_uquota; /* user quota storage information */ + fs_qfilestat_t qs_gquota; /* group quota storage information */ + uint32_t qs_incoredqs; /* number of dquots incore */ + int32_t qs_btimelimit; /* limit for blks timer */ + int32_t qs_itimelimit; /* limit for inodes timer */ + int32_t qs_rtbtimelimit;/* limit for rt blks timer */ + uint16_t qs_bwarnlimit; /* limit for num warnings */ + uint16_t qs_iwarnlimit; /* limit for num warnings */ +} fs_quota_stat_t; + +#define FS_DQUOT_VERSION 1 /* fs_disk_quota.d_version */ +typedef struct fs_disk_quota { + char d_version; /* version of this structure */ + char d_flags; /* XFS_{USER,PROJ,GROUP}_QUOTA */ + uint16_t d_fieldmask; /* field specifier */ + uint32_t d_id; /* user, project, or group ID */ + uint64_t d_blk_hardlimit;/* absolute limit on disk blks */ + uint64_t d_blk_softlimit;/* preferred limit on disk blks */ + uint64_t d_ino_hardlimit;/* maximum # allocated inodes */ + uint64_t d_ino_softlimit;/* preferred inode limit */ + uint64_t d_bcount; /* # disk blocks owned by the user */ + uint64_t d_icount; /* # inodes owned by the user */ + int32_t d_itimer; /* zero if within inode limits */ + int32_t d_btimer; /* similar to above; for disk blocks */ + uint16_t d_iwarns; /* # warnings issued wrt num inodes */ + uint16_t d_bwarns; /* # warnings issued wrt disk blocks */ + int32_t d_padding2; /* padding2 - for future use */ + uint64_t d_rtb_hardlimit;/* absolute limit on realtime blks */ + uint64_t d_rtb_softlimit;/* preferred limit on RT disk blks */ + uint64_t d_rtbcount; /* # realtime blocks owned */ + int32_t d_rtbtimer; /* similar to above; for RT disk blks */ + uint16_t d_rtbwarns; /* # warnings issued wrt RT disk blks */ + int16_t d_padding3; /* padding3 - for future use */ + char d_padding4[8]; /* yet more padding */ +} fs_disk_quota_t; + +typedef struct project { + int32_t space_time_left; /* seconds */ + int32_t files_time_left; /* seconds */ + uint64_t space_hard; /* blocks */ + uint64_t space_soft; /* blocks */ + uint64_t space_used; /* blocks */ + uint64_t files_hard; + uint64_t files_soft; + uint64_t files_used; +} project_t; + +/* Values for flags in filesys_t */ +#define FSF_FETCHED (1U << 0) +#define FSF_QUOT_PROJ_ACC (1U << 1) +#define FSF_QUOT_PROJ_ENF (1U << 2) + +typedef struct filesys { + int id; + unsigned int flags; + char *device; + char *path; + char *options; + struct statfs stats; +} filesys_t; + +extern int refresh_filesys(pmInDom, pmInDom); +extern char *scan_filesys_options(const char *, const char *); diff --git a/src/pmdas/linux_xfs/help b/src/pmdas/linux_xfs/help new file mode 100644 index 0000000..852165c --- /dev/null +++ b/src/pmdas/linux_xfs/help @@ -0,0 +1,469 @@ +# +# Copyright (c) 2013 Red Hat. +# Copyright (c) 2000,2004-2008 Silicon Graphics, Inc. All Rights Reserved. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by the +# Free Software Foundation; either version 2 of the License, or (at your +# option) any later version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# for more details. +# +# Linux XFS PMDA help file in the ASCII format +# +# lines beginning with a # are ignored +# lines beginning @ introduce a new entry of the form +# @ metric_name oneline-text +# help test goes +# here over multiple lines +# ... +# +# the metric_name is decoded against the default PMNS -- as a special case, +# a name of the form NNN.MM (for numeric NNN and MM) is interpreted as an +# instance domain identification, and the text describes the instance domain +# +# blank lines before the @ line are ignored +# + +@ xfs.allocs.alloc_extent XFS extents allocated +Number of file system extents allocated over all XFS filesystems +@ xfs.allocs.alloc_block XFS blocks allocated +Number of file system blocks allocated over all XFS filesystems +@ xfs.allocs.free_extent XFS extents freed +Number of file system extents freed over all XFS filesystems +@ xfs.allocs.free_block XFS blocks freed +Number of file system blocks freed over all XFS filesystems + +@ xfs.alloc_btree.lookup lookups in XFS alloc btrees +Number of lookup operations in XFS filesystem allocation btrees +@ xfs.alloc_btree.compare compares in XFS alloc btrees +Number of compares in XFS filesystem allocation btree lookups +@ xfs.alloc_btree.insrec insertions in XFS alloc btrees +Number of extent records inserted into XFS filesystem allocation btrees +@ xfs.alloc_btree.delrec deletions in XFS alloc btrees +Number of extent records deleted from XFS filesystem allocation btrees + +@ xfs.block_map.read_ops block map read ops in XFS +Number of block map for read operations performed on XFS files +@ xfs.block_map.write_ops block map write ops in XFS +Number of block map for write operations performed on XFS files +@ xfs.block_map.unmap block unmap ops in XFS +Number of block unmap (delete) operations performed on XFS files +@ xfs.block_map.add_exlist extent list add ops in XFS +Number of extent list insertion operations for XFS files +@ xfs.block_map.del_exlist extent list delete ops in XFS +Number of extent list deletion operations for XFS files +@ xfs.block_map.look_exlist extent list lookup ops in XFS +Number of extent list lookup operations for XFS files +@ xfs.block_map.cmp_exlist extent list compare ops in XFS +Number of extent list comparisons in XFS extent list lookups + +@ xfs.bmap_btree.lookup block map btree lookup ops in XFS +Number of block map btree lookup operations on XFS files +@ xfs.bmap_btree.compare block map btree compare ops in XFS +Number of block map btree compare operations in XFS block map lookups +@ xfs.bmap_btree.insrec block map btree insert ops in XFS +Number of block map btree records inserted for XFS files +@ xfs.bmap_btree.delrec block map btree delete ops in XFS +Number of block map btree records deleted for XFS files + +@ xfs.dir_ops.lookup number of file name directory lookups +This is a count of the number of file name directory lookups in XFS +filesystems. It counts only those lookups which miss in the operating +system's directory name lookup cache and must search the real directory +structure for the name in question. The count is incremented once for +each level of a pathname search that results in a directory lookup. + +@ xfs.dir_ops.create number of directory entry creation operations +This is the number of times a new directory entry was created in XFS +filesystems. Each time that a new file, directory, link, symbolic link, +or special file is created in the directory hierarchy the count is +incremented. + +@ xfs.dir_ops.remove number of directory entry remove operations +This is the number of times an existing directory entry was removed in +XFS filesystems. Each time that a file, directory, link, symbolic link, +or special file is removed from the directory hierarchy the count is +incremented. + +@ xfs.dir_ops.getdents number of times the getdents operation is performed +This is the number of times the XFS directory getdents operation was +performed. The getdents operation is used by programs to read the +contents of directories in a file system independent fashion. This +count corresponds exactly to the number of times the getdents(2) system +call was successfully used on an XFS directory. + +@ xfs.transactions.sync number of synchronous meta-data transactions performed +This is the number of meta-data transactions which waited to be +committed to the on-disk log before allowing the process performing the +transaction to continue. These transactions are slower and more +expensive than asynchronous transactions, because they force the in +memory log buffers to be forced to disk more often and they wait for +the completion of the log buffer writes. + +@ xfs.transactions.async number of asynchronous meta-data transactions performed +This is the number of meta-data transactions which did not wait to be +committed to the on-disk log before allowing the process performing the +transaction to continue. These transactions are faster and more +efficient than synchronous transactions, because they commit their data +to the in memory log buffers without forcing those buffers to be +written to disk. This allows multiple asynchronous transactions to be +committed to disk in a single log buffer write. Most transactions used +in XFS file systems are asynchronous. + +@ xfs.transactions.empty number of meta-data transactions which committed without changing anything +This is the number of meta-data transactions which did not actually +change anything. These are transactions which were started for some +purpose, but in the end it turned out that no change was necessary. + +@ xfs.inode_ops.ig_attempts number of in memory inode lookup operations +This is the number of times the operating system looked for an XFS +inode in the inode cache. Whether the inode was found in the cache or +needed to be read in from the disk is not indicated here, but this can +be computed from the ig_found and ig_missed counts. + +@ xfs.inode_ops.ig_found number of successful in memory inode lookup operations +This is the number of times the operating system looked for an XFS +inode in the inode cache and found it. The closer this count is to the +ig_attempts count the better the inode cache is performing. + +@ xfs.inode_ops.ig_frecycle number of just missed in memory inode lookup operations +This is the number of times the operating system looked for an XFS +inode in the inode cache and saw that it was there but was unable to +use the in memory inode because it was being recycled by another +process. + +@ xfs.inode_ops.ig_missed number of failed in memory inode lookup operations +This is the number of times the operating system looked for an XFS +inode in the inode cache and the inode was not there. The further this +count is from the ig_attempts count the better. + +@ xfs.inode_ops.ig_dup number of inode cache insertions that fail because the inode is there +This is the number of times the operating system looked for an XFS +inode in the inode cache and found that it was not there but upon +attempting to add the inode to the cache found that another process had +already inserted it. + +@ xfs.inode_ops.ig_reclaims number of in memory inode recycle operations +This is the number of times the operating system recycled an XFS inode +from the inode cache in order to use the memory for that inode for +another purpose. Inodes are recycled in order to keep the inode cache +from growing without bound. If the reclaim rate is high it may be +beneficial to raise the vnode_free_ratio kernel tunable variable to +increase the size of inode cache. + +@ xfs.inode_ops.ig_attrchg number of inode attribute change operations +This is the number of times the operating system explicitly changed the +attributes of an XFS inode. For example, this could be to change the +inode's owner, the inode's size, or the inode's timestamps. + +@ xfs.log.writes number of buffer writes going to the disk from the log +This variable counts the number of log buffer writes going to the +physical log partitions of all XFS filesystems. Log data traffic is +proportional to the level of meta-data updating. Log buffer writes get +generated when they fill up or external syncs occur. + +@ xfs.log.blocks write throughput to the physical XFS log +This variable counts the number of Kbytes of information being written +to the physical log partitions of all XFS filesystems. Log data traffic +is proportional to the level of meta-data updating. The rate with which +log data gets written depends on the size of internal log buffers and +disk write speed. Therefore, filesystems with very high meta-data +updating may need to stripe the log partition or put the log partition +on a separate drive. + +@ xfs.log.write_ratio ratio of count of XFS log blocks written to log writes +The ratio of log blocks written to log writes. If block count isn't a +"reasonable" multiple of writes, then many small log writes are being +performed - this is suboptimal. Perfection is 64. Fine-grain control +can be obtained when this metric is used in conjuntion with pmstore(1) +and the xfs.control.reset metric. + +@ xfs.log.noiclogs count of failures for immediate get of buffered/internal +This variable keeps track of times when a logged transaction can not +get any log buffer space. When this occurs, all of the internal log +buffers are busy flushing their data to the physical on-disk log. + +@ xfs.log.force value from xs_log_force field of struct xfsstats +The number of times the in-core log is forced to disk. It is +equivalent to the number of successful calls to the function +xfs_log_force(). + +@ xfs.log.force_sleep value from xs_log_force_sleep field of struct xfsstats +This metric is exported from the xs_log_force_sleep field of struct xfsstats + +@ xfs.log_tail.try_logspace value from xs_try_logspace field of struct xfsstats +This metric is exported from the xs_try_logspace field of struct xfsstats + +@ xfs.log_tail.sleep_logspace value from xs_sleep_logspace field of struct xfsstats +This metric is exported from the xs_sleep_logspace field of struct xfsstats + +@ xfs.log_tail.push_ail.pushes number of times the AIL tail is moved forward +The number of times the tail of the AIL is moved forward. It is +equivalent to the number of successful calls to the function +xfs_trans_push_ail(). + +@ xfs.log_tail.push_ail.success value from xs_push_ail_success field of struct xfsstats +@ xfs.log_tail.push_ail.pushbuf value from xs_push_ail_pushbuf field of struct xfsstats +@ xfs.log_tail.push_ail.pinned value from xs_push_ail_pinned field of struct xfsstats +@ xfs.log_tail.push_ail.locked value from xs_push_ail_locked field of struct xfsstats +@ xfs.log_tail.push_ail.flushing value from xs_push_ail_flushing field of struct xfsstats +@ xfs.log_tail.push_ail.restarts value from xs_push_ail_restarts field of struct xfsstats +@ xfs.log_tail.push_ail.flush value from xs_push_ail_flush field of struct xfsstats + +@ xfs.xstrat.bytes number of bytes of data processed by the XFS daemons +This is the number of bytes of file data flushed out by the XFS +flushing daemons. + +@ xfs.xstrat.quick number of buffers processed by the XFS daemons written to contiguous space on disk +This is the number of buffers flushed out by the XFS flushing daemons +which are written to contiguous space on disk. The buffers handled by +the XFS daemons are delayed allocation buffers, so this count gives an +indication of the success of the XFS daemons in allocating contiguous +disk space for the data being flushed to disk. + +@ xfs.xstrat.split number of buffers processed by the XFS daemons written to non-contiguous space on disk +This is the number of buffers flushed out by the XFS flushing daemons +which are written to non-contiguous space on disk. The buffers handled +by the XFS daemons are delayed allocation buffers, so this count gives +an indication of the failure of the XFS daemons in allocating +contiguous disk space for the data being flushed to disk. Large values +in this counter indicate that the file system has become fragmented. + +@ xfs.write number of XFS file system write operations +This is the number of write(2) system calls made to files in +XFS file systems. + +@ xfs.write_bytes number of bytes written in XFS file system write operations +This is the number of bytes written via write(2) system calls to files +in XFS file systems. It can be used in conjunction with the write_calls +count to calculate the average size of the write operations to files in +XFS file systems. + +@ xfs.read number of XFS file system read operations +This is the number of read(2) system calls made to files in XFS file +systems. + +@ xfs.read_bytes number of bytes read in XFS file system read operations +This is the number of bytes read via read(2) system calls to files in +XFS file systems. It can be used in conjunction with the read_calls +count to calculate the average size of the read operations to files in +XFS file systems. + +@ xfs.attr.get number of "get" operations on XFS extended file attributes +The number of "get" operations performed on extended file attributes +within XFS filesystems. The "get" operation retrieves the value of an +extended attribute. + +@ xfs.attr.set number of "set" operations on XFS extended file attributes +The number of "set" operations performed on extended file attributes +within XFS filesystems. The "set" operation creates and sets the value +of an extended attribute. + +@ xfs.attr.remove number of "remove" operations on XFS extended file attributes +The number of "remove" operations performed on extended file attributes +within XFS filesystems. The "remove" operation deletes an extended +attribute. + +@ xfs.attr.list number of "list" operations on XFS extended file attributes +The number of "list" operations performed on extended file attributes +within XFS filesystems. The "list" operation retrieves the set of +extended attributes associated with a file. + +@ xfs.quota.reclaims value from xs_qm_dqreclaims field of struct xfsstats +@ xfs.quota.reclaim_misses value from xs_qm_dqreclaim_misses field of struct xfsstats +@ xfs.quota.dquot_dups value from xs_qm_dquot_dups field of struct xfsstats +@ xfs.quota.cachemisses value from xs_qm_dqcachemisses field of struct xfsstats +@ xfs.quota.cachehits value from xs_qm_dqcachehits field of struct xfsstats +@ xfs.quota.wants value from xs_qm_dqwants field of struct xfsstats +@ xfs.quota.shake_reclaims value from xs_qm_dqshake_reclaims field of struct xfsstats +@ xfs.quota.inact_reclaims value from xs_qm_dqinact_reclaims field of struct xfsstats + +@ xfs.iflush_count the number of calls to xfs_iflush +This is the number of calls to xfs_iflush which gets called when an +inode is being flushed (such as by bdflush or tail pushing). +xfs_iflush searches for other inodes in the same cluster which are +dirty and flushable. + +@ xfs.icluster_flushcnt value from xs_icluster_flushcnt field of struct xfsstats + +@ xfs.icluster_flushinode number of flushes of only one inode in cluster +This is the number of times that the inode clustering was not able to +flush anything but the one inode it was called with. + +@ xfs.buffer.get number of request buffer calls +@ xfs.buffer.create number of buffers created +@ xfs.buffer.get_locked number of requests for a locked buffer which succeeded +@ xfs.buffer.get_locked_waited number of requests for a locked buffer which waited +@ xfs.buffer.miss_locked number of requests for a locked buffer which failed due to no buffer +@ xfs.buffer.busy_locked number of non-blocking requests for a locked buffer which failed +@ xfs.buffer.page_retries number of retry attempts when allocating a page for insertion in a buffer +@ xfs.buffer.page_found number of hits in the page cache when looking for a page +@ xfs.buffer.get_read number of buffer get calls requiring immediate device reads +@ xfs.vnodes.active number of vnodes not on free lists +@ xfs.vnodes.alloc number of times vn_alloc called +@ xfs.vnodes.get number of times vn_get called +@ xfs.vnodes.hold number of times vn_hold called +@ xfs.vnodes.rele number of times vn_rele called +@ xfs.vnodes.reclaim number of times vn_reclaim called +@ xfs.vnodes.remove number of times vn_remove called +@ xfs.vnodes.free number of times vn_free called +@ xfs.control.reset reset the values of all XFS metrics to zero + +@ quota.state.project.accounting 1 indicates quota accounting enabled, else 0 +@ quota.state.project.enforcement 1 indicates quotas enforced, else 0 +@ quota.project.space.hard hard limit for this project and filesys in Kbytes +@ quota.project.space.soft soft limit for this project and filesys in Kbytes +@ quota.project.space.used space used for this project and filesys in Kbytes +@ quota.project.space.time_left when soft limit is exceeded, seconds until it is enacted +@ quota.project.files.hard file count hard limit for this project and filesys +@ quota.project.files.soft file count soft limit for this project and filesys +@ quota.project.files.used file count for this project and filesys +@ quota.project.files.time_left when soft limit is exceeded, seconds until it is enacted + +@ xfs.btree.alloc_blocks.lookup +Number of free-space-by-block-number btree record lookups +@ xfs.btree.alloc_blocks.compare +Number of free-space-by-block-number btree record compares +@ xfs.btree.alloc_blocks.insrec +Number of free-space-by-block-number btree insert record operations executed +@ xfs.btree.alloc_blocks.delrec +Number of free-space-by-block-number btree delete record operations executed +@ xfs.btree.alloc_blocks.newroot +Number of times a new level is added to a free-space-by-block-number btree +@ xfs.btree.alloc_blocks.killroot +Number of times a level is removed from a free-space-by-block-number btree +@ xfs.btree.alloc_blocks.increment +Number of times a cursor has been moved forward one free-space-by-block-number +btree record +@ xfs.btree.alloc_blocks.decrement +Number of times a cursor has been moved backward one free-space-by-block-number +btree record +@ xfs.btree.alloc_blocks.lshift +Left shift block operations to make space for a new free-space-by-block-number +btree record +@ xfs.btree.alloc_blocks.rshift +Right shift block operations to make space for a new free-space-by-block-number +btree record +@ xfs.btree.alloc_blocks.split +Split block operations to make space for a new free-space-by-block-number +btree record +@ xfs.btree.alloc_blocks.join +Merge block operations when deleting free-space-by-block-number btree records +@ xfs.btree.alloc_blocks.alloc +Btree block allocations during free-space-by-block-number btree operations +@ xfs.btree.alloc_blocks.free +Btree blocks freed during free-space-by-block-number btree operations +@ xfs.btree.alloc_blocks.moves +Records moved inside blocks during free-space-by-block-number btree operations + +@ xfs.btree.alloc_contig.lookup +Number of free-space-by-size btree record lookups +@ xfs.btree.alloc_contig.compare +Number of free-space-by-size btree btree record compares +@ xfs.btree.alloc_contig.insrec +Number of free-space-by-size btree insert record operations executed +@ xfs.btree.alloc_contig.delrec +Number of free-space-by-size btree delete record operations executed +@ xfs.btree.alloc_contig.newroot +Number of times a new level is added to a free-space-by-size btree tree +@ xfs.btree.alloc_contig.killroot +Number of times a level is removed from a free-space-by-size btree tree +@ xfs.btree.alloc_contig.increment +Number of times a free-space-by-size btree cursor has been moved forward +one record +@ xfs.btree.alloc_contig.decrement +Number of times a free-space-by-size btree cursor has been moved backward +one record +@ xfs.btree.alloc_contig.lshift +Left shift block operations to make space for a new free-space-by-size +btree record +@ xfs.btree.alloc_contig.rshift +Right shift block operations to make space for a new free-space-by-size +btree record +@ xfs.btree.alloc_contig.split +Split block operations to make space for a new free-space-by-size btree +record +@ xfs.btree.alloc_contig.join +Merge block operations when deleting free-space-by-size btree records +@ xfs.btree.alloc_contig.alloc +Btree block allocations during free-space-by-size btree operations +@ xfs.btree.alloc_contig.free +Btree blocks freed during free-space-by-size btree operations +@ xfs.btree.alloc_contig.moves +Records moved inside blocks during free-space-by-size btree operations + +@ xfs.btree.block_map.lookup +Number of inode-block-map/extent btree record lookups +@ xfs.btree.block_map.compare +Number of inode-block-map/extent btree record compares +@ xfs.btree.block_map.insrec +Number of inode-block-map/extent btree insert record operations executed +@ xfs.btree.block_map.delrec +Number of inode-block-map/extent btree delete record operations executed +@ xfs.btree.block_map.newroot +Number of times a new level is added to an inode-block-map/extent btree +@ xfs.btree.block_map.killroot +Number of times a level is removed from an inode-block-map/extent btree +@ xfs.btree.block_map.increment +Number of times an inode-block-map/extent btree cursor has been moved +forward one record +@ xfs.btree.block_map.decrement +Number of times an inode-block-map/extent btree cursor has been moved +backward one record +@ xfs.btree.block_map.lshift +Left shift block operations to make space for a new inode-block-map/extent +btree record +@ xfs.btree.block_map.rshift +Right shift block operations to make space for a new inode-block-map/extent +btree record +@ xfs.btree.block_map.split +Split block operations to make space for a new inode-block-map/extent +btree record +@ xfs.btree.block_map.join +Merge block operations when deleting inode-block-map/extent btree records +@ xfs.btree.block_map.alloc +Btree block allocations during inode-block-map/extent btree operations +@ xfs.btree.block_map.free +Btree blocks freed during inode-block-map/extent btree operations +@ xfs.btree.block_map.moves +Records moved inside blocks during inode-block-map/extent btree operations + +@ xfs.btree.inode.lookup +Number of inode-allocation btree record lookups +@ xfs.btree.inode.compare +Number of inode-allocation btree record compares +@ xfs.btree.inode.insrec +Number of inode-allocation btree insert record operations executed +@ xfs.btree.inode.delrec +Number of inode-allocation btree delete record operations executed +@ xfs.btree.inode.newroot +Number of times a new level is added to an inode-allocation btree +@ xfs.btree.inode.killroot +Number of times a level is removed from an inode-allocation btree +@ xfs.btree.inode.increment +Number of times a cursor has been moved forward one inode-allocation +btree record +@ xfs.btree.inode.decrement +Number of times a cursor has been moved backward one inode-allocation +btree record +@ xfs.btree.inode.lshift +Left shift block operations to make space for a new inode-allocation +btree record +@ xfs.btree.inode.rshift +Right shift block operations to make space for a new inode-allocation +btree record +@ xfs.btree.inode.split +Split block operations to make space for a new inode-allocation btree record +@ xfs.btree.inode.join +Merge block operations when deleting inode-allocation btree records +@ xfs.btree.inode.alloc +Btree block allocations during inode-allocation btree operations +@ xfs.btree.inode.free +Btree blocks freed during inode-allocation btree operations +@ xfs.btree.inode.moves +Records moved inside blocks during inode-allocation btree operations + diff --git a/src/pmdas/linux_xfs/indom.h b/src/pmdas/linux_xfs/indom.h new file mode 100644 index 0000000..a2f51d1 --- /dev/null +++ b/src/pmdas/linux_xfs/indom.h @@ -0,0 +1,31 @@ +/* + * Copyright (c) 2013 Red Hat, Inc. All Rights Reserved. + * Copyright (c) 2005,2007-2008 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#ifndef _INDOM_H +#define _INDOM_H + +/* + * indom serial numbers ... to manage the indom migration after the + * linux -> linux + xfs PMDAs split, these need to match the enum + * assigned values for *_INDOM from the linux PMDA. Consequently, + * the xfs indom table is sparse. + */ +#define FILESYS_INDOM 5 /* mounted filesystems */ +#define QUOTA_PRJ_INDOM 16 /* project quota */ + +#define MIN_INDOM 5 /* first indom number we use here */ +#define NUM_INDOMS 17 /* one more than highest indom number used */ + +#endif /* _INDOM_H */ diff --git a/src/pmdas/linux_xfs/linux_xfs_migrate.conf b/src/pmdas/linux_xfs/linux_xfs_migrate.conf new file mode 100644 index 0000000..59cbb64 --- /dev/null +++ b/src/pmdas/linux_xfs/linux_xfs_migrate.conf @@ -0,0 +1,16 @@ +# +# Copyright 2013 Red Hat. +# +# pmlogrewrite configuration for migrating archives containing XFS metrics +# that were captured prior to the XFS PMDA split-off from the Linux PMDA. +# Basically, the PMID domain changed from 60 (linux) to 11 (xfs) but all +# cluster and item numbers remain unchanged. +# + +# +# Migrate the domain field of the metric and indom identifiers +# +indom 60.16 { indom -> duplicate 11.16 } # need 11.16 and 60.16 +metric 60.16.* { pmid -> 11.*.* } # CLUSTER_XFS +metric 60.17.* { pmid -> 11.*.* } # CLUSTER_XFSBUF +metric 60.30.* { pmid -> 11.*.* indom -> 11.16 } # CLUSTER_QUOTA diff --git a/src/pmdas/linux_xfs/pmda.c b/src/pmdas/linux_xfs/pmda.c new file mode 100644 index 0000000..a1b937c --- /dev/null +++ b/src/pmdas/linux_xfs/pmda.c @@ -0,0 +1,979 @@ +/* + * XFS PMDA + * + * Copyright (c) 2012-2014 Red Hat. + * Copyright (c) 2000,2004,2007-2008 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#include "pmapi.h" +#include "impl.h" +#include "pmda.h" +#include "indom.h" +#include "domain.h" +#include "clusters.h" +#include "filesys.h" +#include "proc_fs_xfs.h" + +static int _isDSO = 1; /* for local contexts */ +static proc_fs_xfs_t proc_fs_xfs; +static char * xfs_statspath = ""; + +/* + * The XFS instance domain table is direct lookup and sparse. + * It is initialized in xfs_init(), see below. + */ +static pmdaIndom xfs_indomtab[NUM_INDOMS]; +#define INDOM(x) (xfs_indomtab[x].it_indom) + +static pmdaMetric xfs_metrictab[] = { + +/* xfs.allocs.alloc_extent */ + { &proc_fs_xfs.xs_allocx, + { PMDA_PMID(CLUSTER_XFS,0), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.allocs.alloc_block */ + { &proc_fs_xfs.xs_allocb, + { PMDA_PMID(CLUSTER_XFS,1), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.allocs.free_extent*/ + { &proc_fs_xfs.xs_freex, + { PMDA_PMID(CLUSTER_XFS,2), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.allocs.free_block */ + { &proc_fs_xfs.xs_freeb, + { PMDA_PMID(CLUSTER_XFS,3), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, + +/* xfs.alloc_btree.lookup */ + { &proc_fs_xfs.xs_abt_lookup, + { PMDA_PMID(CLUSTER_XFS,4), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.alloc_btree.compare */ + { &proc_fs_xfs.xs_abt_compare, + { PMDA_PMID(CLUSTER_XFS,5), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.alloc_btree.insrec */ + { &proc_fs_xfs.xs_abt_insrec, + { PMDA_PMID(CLUSTER_XFS,6), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.alloc_btree.delrec */ + { &proc_fs_xfs.xs_abt_delrec, + { PMDA_PMID(CLUSTER_XFS,7), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, + +/* xfs.block_map.read_ops */ + { &proc_fs_xfs.xs_blk_mapr, + { PMDA_PMID(CLUSTER_XFS,8), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.block_map.write_ops */ + { &proc_fs_xfs.xs_blk_mapw, + { PMDA_PMID(CLUSTER_XFS,9), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.block_map.unmap */ + { &proc_fs_xfs.xs_blk_unmap, + { PMDA_PMID(CLUSTER_XFS,10), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.block_map.add_exlist */ + { &proc_fs_xfs.xs_add_exlist, + { PMDA_PMID(CLUSTER_XFS,11), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.block_map.del_exlist */ + { &proc_fs_xfs.xs_del_exlist, + { PMDA_PMID(CLUSTER_XFS,12), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.block_map.look_exlist */ + { &proc_fs_xfs.xs_look_exlist, + { PMDA_PMID(CLUSTER_XFS,13), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.block_map.cmp_exlist */ + { &proc_fs_xfs.xs_cmp_exlist, + { PMDA_PMID(CLUSTER_XFS,14), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, + +/* xfs.bmap_btree.lookup */ + { &proc_fs_xfs.xs_bmbt_lookup, + { PMDA_PMID(CLUSTER_XFS,15), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.bmap_btree.compare */ + { &proc_fs_xfs.xs_bmbt_compare, + { PMDA_PMID(CLUSTER_XFS,16), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.bmap_btree.insrec */ + { &proc_fs_xfs.xs_bmbt_insrec, + { PMDA_PMID(CLUSTER_XFS,17), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.bmap_btree.delrec */ + { &proc_fs_xfs.xs_bmbt_delrec, + { PMDA_PMID(CLUSTER_XFS,18), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, + +/* xfs.dir_ops.lookup */ + { &proc_fs_xfs.xs_dir_lookup, + { PMDA_PMID(CLUSTER_XFS,19), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.dir_ops.create */ + { &proc_fs_xfs.xs_dir_create, + { PMDA_PMID(CLUSTER_XFS,20), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.dir_ops.remove */ + { &proc_fs_xfs.xs_dir_remove, + { PMDA_PMID(CLUSTER_XFS,21), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.dir_ops.getdents */ + { &proc_fs_xfs.xs_dir_getdents, + { PMDA_PMID(CLUSTER_XFS,22), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, + +/* xfs.transactions.sync */ + { &proc_fs_xfs.xs_trans_sync, + { PMDA_PMID(CLUSTER_XFS,23), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.transactions.async */ + { &proc_fs_xfs.xs_trans_async, + { PMDA_PMID(CLUSTER_XFS,24), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.transactions.empty */ + { &proc_fs_xfs.xs_trans_empty, + { PMDA_PMID(CLUSTER_XFS,25), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, + +/* xfs.inode_ops.ig_attempts */ + { &proc_fs_xfs.xs_ig_attempts, + { PMDA_PMID(CLUSTER_XFS,26), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.inode_ops.ig_found */ + { &proc_fs_xfs.xs_ig_found, + { PMDA_PMID(CLUSTER_XFS,27), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.inode_ops.ig_frecycle */ + { &proc_fs_xfs.xs_ig_frecycle, + { PMDA_PMID(CLUSTER_XFS,28), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.inode_ops.ig_missed */ + { &proc_fs_xfs.xs_ig_missed, + { PMDA_PMID(CLUSTER_XFS,29), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.inode_ops.ig_dup */ + { &proc_fs_xfs.xs_ig_dup, + { PMDA_PMID(CLUSTER_XFS,30), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.inode_ops.ig_reclaims */ + { &proc_fs_xfs.xs_ig_reclaims, + { PMDA_PMID(CLUSTER_XFS,31), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.inode_ops.ig_attrchg */ + { &proc_fs_xfs.xs_ig_attrchg, + { PMDA_PMID(CLUSTER_XFS,32), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, + +/* xfs.log.writes */ + { &proc_fs_xfs.xs_log_writes, + { PMDA_PMID(CLUSTER_XFS,33), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.log.blocks */ + { &proc_fs_xfs.xs_log_blocks, + { PMDA_PMID(CLUSTER_XFS,34), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(1,0,0,PM_SPACE_KBYTE,0,0) }, }, +/* xfs.log.noiclogs */ + { &proc_fs_xfs.xs_log_noiclogs, + { PMDA_PMID(CLUSTER_XFS,35), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.log.force */ + { &proc_fs_xfs.xs_log_force, + { PMDA_PMID(CLUSTER_XFS,36), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.log.force_sleep */ + { &proc_fs_xfs.xs_log_force_sleep, + { PMDA_PMID(CLUSTER_XFS,37), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, + +/* xfs.log_tail.try_logspace */ + { &proc_fs_xfs.xs_try_logspace, + { PMDA_PMID(CLUSTER_XFS,38), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.log_tail.sleep_logspace */ + { &proc_fs_xfs.xs_sleep_logspace, + { PMDA_PMID(CLUSTER_XFS,39), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.log_tail.push_ail.pushes */ + { &proc_fs_xfs.xs_push_ail, + { PMDA_PMID(CLUSTER_XFS,40), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.log_tail.push_ail.success */ + { &proc_fs_xfs.xs_push_ail_success, + { PMDA_PMID(CLUSTER_XFS,41), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.log_tail.push_ail.pushbuf */ + { &proc_fs_xfs.xs_push_ail_pushbuf, + { PMDA_PMID(CLUSTER_XFS,42), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.log_tail.push_ail.pinned */ + { &proc_fs_xfs.xs_push_ail_pinned, + { PMDA_PMID(CLUSTER_XFS,43), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.log_tail.push_ail.locked */ + { &proc_fs_xfs.xs_push_ail_locked, + { PMDA_PMID(CLUSTER_XFS,44), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.log_tail.push_ail.flushing */ + { &proc_fs_xfs.xs_push_ail_flushing, + { PMDA_PMID(CLUSTER_XFS,45), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.log_tail.push_ail.restarts */ + { &proc_fs_xfs.xs_push_ail_restarts, + { PMDA_PMID(CLUSTER_XFS,46), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.log_tail.push_ail.flush */ + { &proc_fs_xfs.xs_push_ail_flush, + { PMDA_PMID(CLUSTER_XFS,47), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, + +/* xfs.xstrat.bytes */ + { &proc_fs_xfs.xpc.xs_xstrat_bytes, + { PMDA_PMID(CLUSTER_XFS,48), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(1,0,0,PM_SPACE_BYTE,0,0) }, }, +/* xfs.xstrat.quick */ + { &proc_fs_xfs.xs_xstrat_quick, + { PMDA_PMID(CLUSTER_XFS,49), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.xstrat.split */ + { &proc_fs_xfs.xs_xstrat_split, + { PMDA_PMID(CLUSTER_XFS,50), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, + +/* xfs.write */ + { &proc_fs_xfs.xs_write_calls, + { PMDA_PMID(CLUSTER_XFS,51), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.write_bytes */ + { &proc_fs_xfs.xpc.xs_write_bytes, + { PMDA_PMID(CLUSTER_XFS,52), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(1,0,0,PM_SPACE_BYTE,0,0) }, }, +/* xfs.read */ + { &proc_fs_xfs.xs_read_calls, + { PMDA_PMID(CLUSTER_XFS,53), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.read_bytes */ + { &proc_fs_xfs.xpc.xs_read_bytes, + { PMDA_PMID(CLUSTER_XFS,54), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(1,0,0,PM_SPACE_BYTE,0,0) }, }, + +/* xfs.attr.get */ + { &proc_fs_xfs.xs_attr_get, + { PMDA_PMID(CLUSTER_XFS,55), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.attr.set */ + { &proc_fs_xfs.xs_attr_set, + { PMDA_PMID(CLUSTER_XFS,56), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.attr.remove */ + { &proc_fs_xfs.xs_attr_remove, + { PMDA_PMID(CLUSTER_XFS,57), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.attr.list */ + { &proc_fs_xfs.xs_attr_list, + { PMDA_PMID(CLUSTER_XFS,58), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, + +/* xfs.quota.reclaims */ + { &proc_fs_xfs.xs_qm_dqreclaims, + { PMDA_PMID(CLUSTER_XFS,59), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.quota.reclaim_misses */ + { &proc_fs_xfs.xs_qm_dqreclaim_misses, + { PMDA_PMID(CLUSTER_XFS,60), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.quota.dquot_dups */ + { &proc_fs_xfs.xs_qm_dquot_dups, + { PMDA_PMID(CLUSTER_XFS,61), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.quota.cachemisses */ + { &proc_fs_xfs.xs_qm_dqcachemisses, + { PMDA_PMID(CLUSTER_XFS,62), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.quota.cachehits */ + { &proc_fs_xfs.xs_qm_dqcachehits, + { PMDA_PMID(CLUSTER_XFS,63), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.quota.wants */ + { &proc_fs_xfs.xs_qm_dqwants, + { PMDA_PMID(CLUSTER_XFS,64), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.quota.shake_reclaims */ + { &proc_fs_xfs.xs_qm_dqshake_reclaims, + { PMDA_PMID(CLUSTER_XFS,65), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.quota.inact_reclaims */ + { &proc_fs_xfs.xs_qm_dqinact_reclaims, + { PMDA_PMID(CLUSTER_XFS,66), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, + +/* xfs.iflush_count */ + { &proc_fs_xfs.xs_iflush_count, + { PMDA_PMID(CLUSTER_XFS,67), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.icluster_flushcnt */ + { &proc_fs_xfs.xs_icluster_flushcnt, + { PMDA_PMID(CLUSTER_XFS,68), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.icluster_flushinode */ + { &proc_fs_xfs.xs_icluster_flushinode, + { PMDA_PMID(CLUSTER_XFS,69), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, + +/* xfs.buffer.get */ + { &proc_fs_xfs.xs_buf_get, + { PMDA_PMID(CLUSTER_XFSBUF,0), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.buffer.create */ + { &proc_fs_xfs.xs_buf_create, + { PMDA_PMID(CLUSTER_XFSBUF,1), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.buffer.get_locked */ + { &proc_fs_xfs.xs_buf_get_locked, + { PMDA_PMID(CLUSTER_XFSBUF,2), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.buffer.get_locked_waited */ + { &proc_fs_xfs.xs_buf_get_locked_waited, + { PMDA_PMID(CLUSTER_XFSBUF,3), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.buffer.busy_locked */ + { &proc_fs_xfs.xs_buf_busy_locked, + { PMDA_PMID(CLUSTER_XFSBUF,4), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.buffer.miss_locked */ + { &proc_fs_xfs.xs_buf_miss_locked, + { PMDA_PMID(CLUSTER_XFSBUF,5), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.buffer.page_retries */ + { &proc_fs_xfs.xs_buf_page_retries, + { PMDA_PMID(CLUSTER_XFSBUF,6), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.buffer.page_found */ + { &proc_fs_xfs.xs_buf_page_found, + { PMDA_PMID(CLUSTER_XFSBUF,7), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.buffer.get_read */ + { &proc_fs_xfs.xs_buf_get_read, + { PMDA_PMID(CLUSTER_XFSBUF,8), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, + +/* xfs.vnodes.active */ + { &proc_fs_xfs.vnodes.vn_active, + { PMDA_PMID(CLUSTER_XFS,70), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_INSTANT, + PMDA_PMUNITS(0,0,0,0,0,0) }, }, +/* xfs.vnodes.alloc */ + { &proc_fs_xfs.vnodes.vn_alloc, + { PMDA_PMID(CLUSTER_XFS,71), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.vnodes.get */ + { &proc_fs_xfs.vnodes.vn_get, + { PMDA_PMID(CLUSTER_XFS,72), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.vnodes.hold */ + { &proc_fs_xfs.vnodes.vn_hold, + { PMDA_PMID(CLUSTER_XFS,73), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.vnodes.rele */ + { &proc_fs_xfs.vnodes.vn_rele, + { PMDA_PMID(CLUSTER_XFS,74), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.vnodes.reclaim */ + { &proc_fs_xfs.vnodes.vn_reclaim, + { PMDA_PMID(CLUSTER_XFS,75), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.vnodes.remove */ + { &proc_fs_xfs.vnodes.vn_remove, + { PMDA_PMID(CLUSTER_XFS,76), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.vnodes.free */ + { &proc_fs_xfs.vnodes.vn_free, + { PMDA_PMID(CLUSTER_XFS,77), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, + +/* xfs.log.write_ratio */ + { &proc_fs_xfs.xs_log_write_ratio, + { PMDA_PMID(CLUSTER_XFS,78), PM_TYPE_FLOAT, PM_INDOM_NULL, PM_SEM_INSTANT, + PMDA_PMUNITS(0,0,0,0,0,0) }, }, +/* xfs.control.reset */ + { NULL, + { PMDA_PMID(CLUSTER_XFS,79), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_DISCRETE, + PMDA_PMUNITS(0,0,0,0,0,0) }, }, + +/* xfs.btree.alloc_blocks.lookup */ + { &proc_fs_xfs.xs_abtb_2_lookup, + { PMDA_PMID(CLUSTER_XFS,80), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.btree.alloc_blocks.compare */ + { &proc_fs_xfs.xs_abtb_2_compare, + { PMDA_PMID(CLUSTER_XFS,81), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.btree.alloc_blocks.insrec */ + { &proc_fs_xfs.xs_abtb_2_insrec, + { PMDA_PMID(CLUSTER_XFS,82), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.btree.alloc_blocks.delrec */ + { &proc_fs_xfs.xs_abtb_2_delrec, + { PMDA_PMID(CLUSTER_XFS,83), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.btree.alloc_blocks.newroot */ + { &proc_fs_xfs.xs_abtb_2_newroot, + { PMDA_PMID(CLUSTER_XFS,84), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.btree.alloc_blocks.killroot */ + { &proc_fs_xfs.xs_abtb_2_killroot, + { PMDA_PMID(CLUSTER_XFS,85), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.btree.alloc_blocks.increment */ + { &proc_fs_xfs.xs_abtb_2_increment, + { PMDA_PMID(CLUSTER_XFS,86), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.btree.alloc_blocks.decrement */ + { &proc_fs_xfs.xs_abtb_2_decrement, + { PMDA_PMID(CLUSTER_XFS,87), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.btree.alloc_blocks.lshift */ + { &proc_fs_xfs.xs_abtb_2_lshift, + { PMDA_PMID(CLUSTER_XFS,88), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.btree.alloc_blocks.rshift */ + { &proc_fs_xfs.xs_abtb_2_rshift, + { PMDA_PMID(CLUSTER_XFS,89), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.btree.alloc_blocks.split */ + { &proc_fs_xfs.xs_abtb_2_split, + { PMDA_PMID(CLUSTER_XFS,90), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.btree.alloc_blocks.join */ + { &proc_fs_xfs.xs_abtb_2_join, + { PMDA_PMID(CLUSTER_XFS,91), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.btree.alloc_blocks.alloc */ + { &proc_fs_xfs.xs_abtb_2_alloc, + { PMDA_PMID(CLUSTER_XFS,92), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.btree.alloc_blocks.free */ + { &proc_fs_xfs.xs_abtb_2_free, + { PMDA_PMID(CLUSTER_XFS,93), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.btree.alloc_blocks.moves */ + { &proc_fs_xfs.xs_abtb_2_moves, + { PMDA_PMID(CLUSTER_XFS,94), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, + +/* xfs.btree.alloc_contig.lookup */ + { &proc_fs_xfs.xs_abtc_2_lookup, + { PMDA_PMID(CLUSTER_XFS,95), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.btree.alloc_contig.compare */ + { &proc_fs_xfs.xs_abtc_2_compare, + { PMDA_PMID(CLUSTER_XFS,96), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.btree.alloc_contig.insrec */ + { &proc_fs_xfs.xs_abtc_2_insrec, + { PMDA_PMID(CLUSTER_XFS,97), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.btree.alloc_contig.delrec */ + { &proc_fs_xfs.xs_abtc_2_delrec, + { PMDA_PMID(CLUSTER_XFS,98), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.btree.alloc_contig.newroot */ + { &proc_fs_xfs.xs_abtc_2_newroot, + { PMDA_PMID(CLUSTER_XFS,99), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.btree.alloc_contig.killroot */ + { &proc_fs_xfs.xs_abtc_2_killroot, + { PMDA_PMID(CLUSTER_XFS,100), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.btree.alloc_contig.increment */ + { &proc_fs_xfs.xs_abtc_2_increment, + { PMDA_PMID(CLUSTER_XFS,101), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.btree.alloc_contig.decrement */ + { &proc_fs_xfs.xs_abtc_2_decrement, + { PMDA_PMID(CLUSTER_XFS,102), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.btree.alloc_contig.lshift */ + { &proc_fs_xfs.xs_abtc_2_lshift, + { PMDA_PMID(CLUSTER_XFS,103), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.btree.alloc_contig.rshift */ + { &proc_fs_xfs.xs_abtc_2_rshift, + { PMDA_PMID(CLUSTER_XFS,104), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.btree.alloc_contig.split */ + { &proc_fs_xfs.xs_abtc_2_split, + { PMDA_PMID(CLUSTER_XFS,105), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.btree.alloc_contig.join */ + { &proc_fs_xfs.xs_abtc_2_join, + { PMDA_PMID(CLUSTER_XFS,106), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.btree.alloc_contig.alloc */ + { &proc_fs_xfs.xs_abtc_2_alloc, + { PMDA_PMID(CLUSTER_XFS,107), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.btree.alloc_contig.free */ + { &proc_fs_xfs.xs_abtc_2_free, + { PMDA_PMID(CLUSTER_XFS,108), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.btree.alloc_contig.moves */ + { &proc_fs_xfs.xs_abtc_2_moves, + { PMDA_PMID(CLUSTER_XFS,109), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, + +/* xfs.btree.block_map.lookup */ + { &proc_fs_xfs.xs_bmbt_2_lookup, + { PMDA_PMID(CLUSTER_XFS,110), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.btree.block_map.compare */ + { &proc_fs_xfs.xs_bmbt_2_compare, + { PMDA_PMID(CLUSTER_XFS,111), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.btree.block_map.insrec */ + { &proc_fs_xfs.xs_bmbt_2_insrec, + { PMDA_PMID(CLUSTER_XFS,112), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.btree.block_map.delrec */ + { &proc_fs_xfs.xs_bmbt_2_delrec, + { PMDA_PMID(CLUSTER_XFS,113), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.btree.block_map.newroot */ + { &proc_fs_xfs.xs_bmbt_2_newroot, + { PMDA_PMID(CLUSTER_XFS,114), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.btree.block_map.killroot */ + { &proc_fs_xfs.xs_bmbt_2_killroot, + { PMDA_PMID(CLUSTER_XFS,115), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.btree.block_map.increment */ + { &proc_fs_xfs.xs_bmbt_2_increment, + { PMDA_PMID(CLUSTER_XFS,116), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.btree.block_map.decrement */ + { &proc_fs_xfs.xs_bmbt_2_decrement, + { PMDA_PMID(CLUSTER_XFS,117), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.btree.block_map.lshift */ + { &proc_fs_xfs.xs_bmbt_2_lshift, + { PMDA_PMID(CLUSTER_XFS,118), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.btree.block_map.rshift */ + { &proc_fs_xfs.xs_bmbt_2_rshift, + { PMDA_PMID(CLUSTER_XFS,119), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.btree.block_map.split */ + { &proc_fs_xfs.xs_bmbt_2_split, + { PMDA_PMID(CLUSTER_XFS,120), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.btree.block_map.join */ + { &proc_fs_xfs.xs_bmbt_2_join, + { PMDA_PMID(CLUSTER_XFS,121), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.btree.block_map.alloc */ + { &proc_fs_xfs.xs_bmbt_2_alloc, + { PMDA_PMID(CLUSTER_XFS,122), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.btree.block_map.free */ + { &proc_fs_xfs.xs_bmbt_2_free, + { PMDA_PMID(CLUSTER_XFS,123), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.btree.block_map.moves */ + { &proc_fs_xfs.xs_bmbt_2_moves, + { PMDA_PMID(CLUSTER_XFS,124), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, + +/* xfs.btree.inode.lookup */ + { &proc_fs_xfs.xs_ibt_2_compare, + { PMDA_PMID(CLUSTER_XFS,125), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.btree.inode.compare */ + { &proc_fs_xfs.xs_ibt_2_lookup, + { PMDA_PMID(CLUSTER_XFS,126), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.btree.inode.insrec */ + { &proc_fs_xfs.xs_ibt_2_insrec, + { PMDA_PMID(CLUSTER_XFS,127), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.btree.inode.delrec */ + { &proc_fs_xfs.xs_ibt_2_delrec, + { PMDA_PMID(CLUSTER_XFS,128), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.btree.inode.newroot */ + { &proc_fs_xfs.xs_ibt_2_newroot, + { PMDA_PMID(CLUSTER_XFS,129), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.btree.inode.killroot */ + { &proc_fs_xfs.xs_ibt_2_killroot, + { PMDA_PMID(CLUSTER_XFS,130), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.btree.inode.increment */ + { &proc_fs_xfs.xs_ibt_2_increment, + { PMDA_PMID(CLUSTER_XFS,131), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.btree.inode.decrement */ + { &proc_fs_xfs.xs_ibt_2_decrement, + { PMDA_PMID(CLUSTER_XFS,132), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.btree.inode.lshift */ + { &proc_fs_xfs.xs_ibt_2_lshift, + { PMDA_PMID(CLUSTER_XFS,133), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.btree.inode.rshift */ + { &proc_fs_xfs.xs_ibt_2_rshift, + { PMDA_PMID(CLUSTER_XFS,134), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.btree.inode.split */ + { &proc_fs_xfs.xs_ibt_2_split, + { PMDA_PMID(CLUSTER_XFS,135), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.btree.inode.join */ + { &proc_fs_xfs.xs_ibt_2_join, + { PMDA_PMID(CLUSTER_XFS,136), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.btree.inode.alloc */ + { &proc_fs_xfs.xs_ibt_2_alloc, + { PMDA_PMID(CLUSTER_XFS,137), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.btree.inode.free */ + { &proc_fs_xfs.xs_ibt_2_free, + { PMDA_PMID(CLUSTER_XFS,138), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, +/* xfs.btree.inode.moves */ + { &proc_fs_xfs.xs_ibt_2_moves, + { PMDA_PMID(CLUSTER_XFS,139), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) }, }, + +/* quota.state.project.accounting */ + { NULL, + { PMDA_PMID(CLUSTER_QUOTA,0), PM_TYPE_U32, FILESYS_INDOM, PM_SEM_DISCRETE, + PMDA_PMUNITS(0,0,0,0,0,0) }, }, +/* quota.state.project.enforcement */ + { NULL, + { PMDA_PMID(CLUSTER_QUOTA,1), PM_TYPE_U32, FILESYS_INDOM, PM_SEM_DISCRETE, + PMDA_PMUNITS(0,0,0,0,0,0) }, }, +/* quota.project.space.hard */ + { NULL, + { PMDA_PMID(CLUSTER_QUOTA,6), PM_TYPE_U64, QUOTA_PRJ_INDOM, PM_SEM_DISCRETE, + PMDA_PMUNITS(1,0,0,PM_SPACE_KBYTE,0,0) }, }, +/* quota.project.space.soft */ + { NULL, + { PMDA_PMID(CLUSTER_QUOTA,7), PM_TYPE_U64, QUOTA_PRJ_INDOM, PM_SEM_DISCRETE, + PMDA_PMUNITS(1,0,0,PM_SPACE_KBYTE,0,0) }, }, +/* quota.project.space.used */ + { NULL, + { PMDA_PMID(CLUSTER_QUOTA,8), PM_TYPE_U64, QUOTA_PRJ_INDOM, PM_SEM_DISCRETE, + PMDA_PMUNITS(1,0,0,PM_SPACE_KBYTE,0,0) }, }, +/* quota.project.space.time_left */ + { NULL, + { PMDA_PMID(CLUSTER_QUOTA,9), PM_TYPE_32, QUOTA_PRJ_INDOM, PM_SEM_DISCRETE, + PMDA_PMUNITS(0,1,0,0,PM_TIME_SEC,0) }, }, +/* quota.project.files.hard */ + { NULL, + { PMDA_PMID(CLUSTER_QUOTA,10), PM_TYPE_U64, QUOTA_PRJ_INDOM, PM_SEM_DISCRETE, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } }, +/* quota.project.files.soft */ + { NULL, + { PMDA_PMID(CLUSTER_QUOTA,11), PM_TYPE_U64, QUOTA_PRJ_INDOM, PM_SEM_DISCRETE, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } }, +/* quota.project.files.used */ + { NULL, + { PMDA_PMID(CLUSTER_QUOTA,12), PM_TYPE_U64, QUOTA_PRJ_INDOM, PM_SEM_DISCRETE, + PMDA_PMUNITS(0,0,1,0,0,PM_COUNT_ONE) } }, +/* quota.project.files.time_left */ + { NULL, + { PMDA_PMID(CLUSTER_QUOTA,13), PM_TYPE_32, QUOTA_PRJ_INDOM, PM_SEM_DISCRETE, + PMDA_PMUNITS(0,1,0,0,PM_TIME_SEC,0) }, }, +}; + +FILE * +xfs_statsfile(const char *path, const char *mode) +{ + char buffer[MAXPATHLEN]; + + snprintf(buffer, sizeof(buffer), "%s%s", xfs_statspath, path); + buffer[MAXPATHLEN-1] = '\0'; + return fopen(buffer, mode); +} + +static void +xfs_refresh(pmdaExt *pmda, int *need_refresh) +{ + if (need_refresh[CLUSTER_QUOTA]) + refresh_filesys(INDOM(FILESYS_INDOM), INDOM(QUOTA_PRJ_INDOM)); + if (need_refresh[CLUSTER_XFS] || need_refresh[CLUSTER_XFSBUF]) + refresh_proc_fs_xfs(&proc_fs_xfs); +} + +static int +xfs_instance(pmInDom indom, int inst, char *name, __pmInResult **result, pmdaExt *pmda) +{ + __pmInDom_int *indomp = (__pmInDom_int *)&indom; + int need_refresh[NUM_CLUSTERS] = { 0 }; + + if (indomp->serial == FILESYS_INDOM || indomp->serial == QUOTA_PRJ_INDOM) + need_refresh[CLUSTER_QUOTA]++; + xfs_refresh(pmda, need_refresh); + return pmdaInstance(indom, inst, name, result, pmda); +} + +static int +xfs_fetchCallBack(pmdaMetric *mdesc, unsigned int inst, pmAtomValue *atom) +{ + __pmID_int *idp = (__pmID_int *)&(mdesc->m_desc.pmid); + struct filesys *fs; + int sts; + + if (mdesc->m_user != NULL) { + if ((idp->cluster == CLUSTER_XFS || idp->cluster == CLUSTER_XFSBUF) && + proc_fs_xfs.errcode != 0) { + /* no values available for XFS metrics */ + return 0; + } + + switch (mdesc->m_desc.type) { + case PM_TYPE_32: + atom->l = *(__int32_t *)mdesc->m_user; + break; + case PM_TYPE_U32: + atom->ul = *(__uint32_t *)mdesc->m_user; + break; + case PM_TYPE_64: + atom->ll = *(__int64_t *)mdesc->m_user; + break; + case PM_TYPE_U64: + atom->ull = *(__uint64_t *)mdesc->m_user; + break; + case PM_TYPE_FLOAT: + atom->f = *(float *)mdesc->m_user; + break; + case PM_TYPE_DOUBLE: + atom->d = *(double *)mdesc->m_user; + break; + case PM_TYPE_STRING: + atom->cp = (char *)mdesc->m_user; + break; + default: + return 0; + } + } + else + switch (idp->cluster) { + + case CLUSTER_XFS: + switch (idp->item) { + case 79: /* xfs.control.reset */ + atom->ul = 0; + break; + default: + return PM_ERR_PMID; + } + break; + + case CLUSTER_QUOTA: + if (idp->item <= 5) { + sts = pmdaCacheLookup(INDOM(FILESYS_INDOM), inst, NULL, + (void **)&fs); + if (sts < 0) + return sts; + if (sts != PMDA_CACHE_ACTIVE) + return PM_ERR_INST; + switch (idp->item) { + case 0: /* quota.state.project.accounting */ + atom->ul = !!(fs->flags & FSF_QUOT_PROJ_ACC); + break; + case 1: /* quota.state.project.enforcement */ + atom->ul = !!(fs->flags & FSF_QUOT_PROJ_ENF); + break; + default: + return PM_ERR_PMID; + } + } + else if (idp->item <= 13) { + struct project *pp; + sts = pmdaCacheLookup(INDOM(QUOTA_PRJ_INDOM), inst, NULL, + (void **)&pp); + if (sts < 0) + return sts; + if (sts != PMDA_CACHE_ACTIVE) + return PM_ERR_INST; + switch (idp->item) { + case 6: /* quota.project.space.hard */ + atom->ull = pp->space_hard >> 1; /* BBs to KB */ + break; + case 7: /* quota.project.space.soft */ + atom->ull = pp->space_soft >> 1; /* BBs to KB */ + break; + case 8: /* quota.project.space.used */ + atom->ull = pp->space_used >> 1; /* BBs to KB */ + break; + case 9: /* quota.project.space.time_left */ + atom->l = pp->space_time_left; + break; + case 10: /* quota.project.files.hard */ + atom->ull = pp->files_hard; + break; + case 11: /* quota.project.files.soft */ + atom->ull = pp->files_soft; + break; + case 12: /* quota.project.files.used */ + atom->ull = pp->files_used; + break; + case 13: /* quota.project.files.time_left */ + atom->l = pp->files_time_left; + break; + default: + return PM_ERR_PMID; + } + } + else + return PM_ERR_PMID; + break; + } + + return 1; +} + +static int +xfs_fetch(int numpmid, pmID pmidlist[], pmResult **resp, pmdaExt *pmda) +{ + int i, need_refresh[NUM_CLUSTERS] = { 0 }; + + for (i = 0; i < numpmid; i++) { + __pmID_int *idp = (__pmID_int *)&(pmidlist[i]); + if (idp->cluster >= MIN_CLUSTER && idp->cluster < NUM_CLUSTERS) + need_refresh[idp->cluster]++; + } + + xfs_refresh(pmda, need_refresh); + return pmdaFetch(numpmid, pmidlist, resp, pmda); +} + +static int +procfs_zero(const char *filename, pmValueSet *vsp) +{ + FILE *fp; + int value; + int sts = 0; + + value = vsp->vlist[0].value.lval; + if (value < 0) + return PM_ERR_SIGN; + + fp = xfs_statsfile(filename, "w"); + if (!fp) { + sts = PM_ERR_PERMISSION; + } else { + fprintf(fp, "%d\n", value); + fclose(fp); + } + return sts; +} + +static int +xfs_store(pmResult *result, pmdaExt *pmda) +{ + int i; + int sts = 0; + pmValueSet *vsp; + __pmID_int *pmidp; + + for (i = 0; i < result->numpmid && !sts; i++) { + vsp = result->vset[i]; + pmidp = (__pmID_int *)&vsp->pmid; + + if (pmidp->cluster == CLUSTER_XFS && pmidp->item == 79) { + if ((sts = procfs_zero("/proc/sys/fs/xfs/stats_clear", vsp)) < 0) + break; + } else { + sts = PM_ERR_PERMISSION; + break; + } + } + return sts; +} + +void +__PMDA_INIT_CALL +xfs_init(pmdaInterface *dp) +{ + char *envpath; + + if ((envpath = getenv("XFS_STATSPATH")) != NULL) + xfs_statspath = envpath; + + if (_isDSO) { + char helppath[MAXPATHLEN]; + int sep = __pmPathSeparator(); + snprintf(helppath, sizeof(helppath), "%s%c" "xfs" "%c" "help", + pmGetConfig("PCP_PMDAS_DIR"), sep, sep); + pmdaDSO(dp, PMDA_INTERFACE_3, "XFS DSO", helppath); + } + + if (dp->status != 0) + return; + + dp->version.any.fetch = xfs_fetch; + dp->version.any.store = xfs_store; + dp->version.any.instance = xfs_instance; + pmdaSetFetchCallBack(dp, xfs_fetchCallBack); + + xfs_indomtab[FILESYS_INDOM].it_indom = FILESYS_INDOM; + xfs_indomtab[QUOTA_PRJ_INDOM].it_indom = QUOTA_PRJ_INDOM; + + pmdaSetFlags(dp, PMDA_EXT_FLAG_HASHED); + pmdaInit(dp, xfs_indomtab, sizeof(xfs_indomtab)/sizeof(xfs_indomtab[0]), + xfs_metrictab, sizeof(xfs_metrictab)/sizeof(xfs_metrictab[0])); + pmdaCacheOp(INDOM(FILESYS_INDOM), PMDA_CACHE_CULL); + pmdaCacheOp(INDOM(QUOTA_PRJ_INDOM), PMDA_CACHE_CULL); +} + +pmLongOptions longopts[] = { + PMDA_OPTIONS_HEADER("Options"), + PMOPT_DEBUG, + PMDAOPT_DOMAIN, + PMDAOPT_LOGFILE, + PMOPT_HELP, + PMDA_OPTIONS_END +}; + +pmdaOptions opts = { + .short_options = "D:d:l:?", + .long_options = longopts, +}; + +int +main(int argc, char **argv) +{ + int sep = __pmPathSeparator(); + pmdaInterface dispatch; + char helppath[MAXPATHLEN]; + + _isDSO = 0; + __pmSetProgname(argv[0]); + snprintf(helppath, sizeof(helppath), "%s%c" "xfs" "%c" "help", + pmGetConfig("PCP_PMDAS_DIR"), sep, sep); + pmdaDaemon(&dispatch, PMDA_INTERFACE_3, pmProgname, XFS, "xfs.log", helppath); + + pmdaGetOptions(argc, argv, &opts, &dispatch); + if (opts.errors) { + pmdaUsageMessage(&opts); + exit(1); + } + + pmdaOpenLog(&dispatch); + xfs_init(&dispatch); + pmdaConnect(&dispatch); + pmdaMain(&dispatch); + exit(0); +} diff --git a/src/pmdas/linux_xfs/proc_fs_xfs.c b/src/pmdas/linux_xfs/proc_fs_xfs.c new file mode 100644 index 0000000..6ee83d4 --- /dev/null +++ b/src/pmdas/linux_xfs/proc_fs_xfs.c @@ -0,0 +1,278 @@ +/* + * Linux /proc/fs/xfs metrics cluster + * + * Copyright (c) 2014 Red Hat. + * Copyright (c) 2010 Aconex. All Rights Reserved. + * Copyright (c) 2000,2004 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#include "pmapi.h" +#include "proc_fs_xfs.h" + +int +refresh_proc_fs_xfs(proc_fs_xfs_t *proc_fs_xfs) +{ + char buf[4096]; + FILE *fp; + + memset(proc_fs_xfs, 0, sizeof(proc_fs_xfs_t)); + + if ((fp = xfs_statsfile("/proc/fs/xfs/stat", "r")) == NULL) + proc_fs_xfs->errcode = -oserror(); + else { + proc_fs_xfs->errcode = 0; + while (fgets(buf, sizeof(buf), fp) != NULL) { + if (strncmp(buf, "extent_alloc ", 13) == 0) + sscanf(buf, "extent_alloc %u %u %u %u", + &proc_fs_xfs->xs_allocx, + &proc_fs_xfs->xs_allocb, + &proc_fs_xfs->xs_freex, + &proc_fs_xfs->xs_freeb); + else + if (strncmp(buf, "abt ", 4) == 0) + sscanf(buf, "abt %u %u %u %u", + &proc_fs_xfs->xs_abt_lookup, + &proc_fs_xfs->xs_abt_compare, + &proc_fs_xfs->xs_abt_insrec, + &proc_fs_xfs->xs_abt_delrec); + else + if (strncmp(buf, "blk_map ", 8) == 0) + sscanf(buf, "blk_map %u %u %u %u %u %u %u", + &proc_fs_xfs->xs_blk_mapr, + &proc_fs_xfs->xs_blk_mapw, + &proc_fs_xfs->xs_blk_unmap, + &proc_fs_xfs->xs_add_exlist, + &proc_fs_xfs->xs_del_exlist, + &proc_fs_xfs->xs_look_exlist, + &proc_fs_xfs->xs_cmp_exlist); + else + if (strncmp(buf, "bmbt ", 5) == 0) + sscanf(buf, "bmbt %u %u %u %u", + &proc_fs_xfs->xs_bmbt_lookup, + &proc_fs_xfs->xs_bmbt_compare, + &proc_fs_xfs->xs_bmbt_insrec, + &proc_fs_xfs->xs_bmbt_delrec); + else + if (strncmp(buf, "dir ", 4) == 0) + sscanf(buf, "dir %u %u %u %u", + &proc_fs_xfs->xs_dir_lookup, + &proc_fs_xfs->xs_dir_create, + &proc_fs_xfs->xs_dir_remove, + &proc_fs_xfs->xs_dir_getdents); + else + if (strncmp(buf, "trans ", 6) == 0) + sscanf(buf, "trans %u %u %u", + &proc_fs_xfs->xs_trans_sync, + &proc_fs_xfs->xs_trans_async, + &proc_fs_xfs->xs_trans_empty); + else + if (strncmp(buf, "ig ", 3) == 0) + sscanf(buf, "ig %u %u %u %u %u %u %u", + &proc_fs_xfs->xs_ig_attempts, + &proc_fs_xfs->xs_ig_found, + &proc_fs_xfs->xs_ig_frecycle, + &proc_fs_xfs->xs_ig_missed, + &proc_fs_xfs->xs_ig_dup, + &proc_fs_xfs->xs_ig_reclaims, + &proc_fs_xfs->xs_ig_attrchg); + else + if (strncmp(buf, "log ", 4) == 0) { + sscanf(buf, "log %u %u %u %u %u", + &proc_fs_xfs->xs_log_writes, + &proc_fs_xfs->xs_log_blocks, + &proc_fs_xfs->xs_log_noiclogs, + &proc_fs_xfs->xs_log_force, + &proc_fs_xfs->xs_log_force_sleep); + } + else + if (strncmp(buf, "push_ail ", 9) == 0) + sscanf(buf, "push_ail %u %u %u %u %u %u %u %u %u %u", + &proc_fs_xfs->xs_try_logspace, + &proc_fs_xfs->xs_sleep_logspace, + &proc_fs_xfs->xs_push_ail, + &proc_fs_xfs->xs_push_ail_success, + &proc_fs_xfs->xs_push_ail_pushbuf, + &proc_fs_xfs->xs_push_ail_pinned, + &proc_fs_xfs->xs_push_ail_locked, + &proc_fs_xfs->xs_push_ail_flushing, + &proc_fs_xfs->xs_push_ail_restarts, + &proc_fs_xfs->xs_push_ail_flush); + else + if (strncmp(buf, "xstrat ", 7) == 0) + sscanf(buf, "xstrat %u %u", + &proc_fs_xfs->xs_xstrat_quick, + &proc_fs_xfs->xs_xstrat_split); + else + if (strncmp(buf, "rw ", 3) == 0) + sscanf(buf, "rw %u %u", + &proc_fs_xfs->xs_write_calls, + &proc_fs_xfs->xs_read_calls); + else + if (strncmp(buf, "attr ", 5) == 0) + sscanf(buf, "attr %u %u %u %u", + &proc_fs_xfs->xs_attr_get, + &proc_fs_xfs->xs_attr_set, + &proc_fs_xfs->xs_attr_remove, + &proc_fs_xfs->xs_attr_list); + else + if (strncmp(buf, "qm ", 3) == 0) + sscanf(buf, "qm %u %u %u %u %u %u %u %u", + &proc_fs_xfs->xs_qm_dqreclaims, + &proc_fs_xfs->xs_qm_dqreclaim_misses, + &proc_fs_xfs->xs_qm_dquot_dups, + &proc_fs_xfs->xs_qm_dqcachemisses, + &proc_fs_xfs->xs_qm_dqcachehits, + &proc_fs_xfs->xs_qm_dqwants, + &proc_fs_xfs->xs_qm_dqshake_reclaims, + &proc_fs_xfs->xs_qm_dqinact_reclaims); + else + if (strncmp(buf, "icluster ", 9) == 0) + sscanf(buf, "icluster %u %u %u", + &proc_fs_xfs->xs_iflush_count, + &proc_fs_xfs->xs_icluster_flushcnt, + &proc_fs_xfs->xs_icluster_flushinode); + else + if (strncmp(buf, "buf ", 4) == 0) { + sscanf(buf, "buf %u %u %u %u %u %u %u %u %u", + &proc_fs_xfs->xs_buf_get, + &proc_fs_xfs->xs_buf_create, + &proc_fs_xfs->xs_buf_get_locked, + &proc_fs_xfs->xs_buf_get_locked_waited, + &proc_fs_xfs->xs_buf_busy_locked, + &proc_fs_xfs->xs_buf_miss_locked, + &proc_fs_xfs->xs_buf_page_retries, + &proc_fs_xfs->xs_buf_page_found, + &proc_fs_xfs->xs_buf_get_read); + } else + if (strncmp(buf, "vnodes ", 7) == 0) { + sscanf(buf, "vnodes %u %u %u %u %u %u %u %u", + &proc_fs_xfs->vnodes.vn_active, + &proc_fs_xfs->vnodes.vn_alloc, + &proc_fs_xfs->vnodes.vn_get, + &proc_fs_xfs->vnodes.vn_hold, + &proc_fs_xfs->vnodes.vn_rele, + &proc_fs_xfs->vnodes.vn_reclaim, + &proc_fs_xfs->vnodes.vn_remove, + &proc_fs_xfs->vnodes.vn_free); + } else + if (strncmp(buf, "abtb2 ", 6) == 0) { + sscanf(buf, "abtb2 %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u", + &proc_fs_xfs->xs_abtb_2_lookup, + &proc_fs_xfs->xs_abtb_2_compare, + &proc_fs_xfs->xs_abtb_2_insrec, + &proc_fs_xfs->xs_abtb_2_delrec, + &proc_fs_xfs->xs_abtb_2_newroot, + &proc_fs_xfs->xs_abtb_2_killroot, + &proc_fs_xfs->xs_abtb_2_increment, + &proc_fs_xfs->xs_abtb_2_decrement, + &proc_fs_xfs->xs_abtb_2_lshift, + &proc_fs_xfs->xs_abtb_2_rshift, + &proc_fs_xfs->xs_abtb_2_split, + &proc_fs_xfs->xs_abtb_2_join, + &proc_fs_xfs->xs_abtb_2_alloc, + &proc_fs_xfs->xs_abtb_2_free, + &proc_fs_xfs->xs_abtb_2_moves); + } else + if (strncmp(buf, "abtc2 ", 6) == 0) { + sscanf(buf, "abtc2 %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u", + &proc_fs_xfs->xs_abtc_2_lookup, + &proc_fs_xfs->xs_abtc_2_compare, + &proc_fs_xfs->xs_abtc_2_insrec, + &proc_fs_xfs->xs_abtc_2_delrec, + &proc_fs_xfs->xs_abtc_2_newroot, + &proc_fs_xfs->xs_abtc_2_killroot, + &proc_fs_xfs->xs_abtc_2_increment, + &proc_fs_xfs->xs_abtc_2_decrement, + &proc_fs_xfs->xs_abtc_2_lshift, + &proc_fs_xfs->xs_abtc_2_rshift, + &proc_fs_xfs->xs_abtc_2_split, + &proc_fs_xfs->xs_abtc_2_join, + &proc_fs_xfs->xs_abtc_2_alloc, + &proc_fs_xfs->xs_abtc_2_free, + &proc_fs_xfs->xs_abtc_2_moves); + } else + if (strncmp(buf, "bmbt2 ", 6) == 0) { + sscanf(buf, "bmbt2 %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u", + &proc_fs_xfs->xs_bmbt_2_lookup, + &proc_fs_xfs->xs_bmbt_2_compare, + &proc_fs_xfs->xs_bmbt_2_insrec, + &proc_fs_xfs->xs_bmbt_2_delrec, + &proc_fs_xfs->xs_bmbt_2_newroot, + &proc_fs_xfs->xs_bmbt_2_killroot, + &proc_fs_xfs->xs_bmbt_2_increment, + &proc_fs_xfs->xs_bmbt_2_decrement, + &proc_fs_xfs->xs_bmbt_2_lshift, + &proc_fs_xfs->xs_bmbt_2_rshift, + &proc_fs_xfs->xs_bmbt_2_split, + &proc_fs_xfs->xs_bmbt_2_join, + &proc_fs_xfs->xs_bmbt_2_alloc, + &proc_fs_xfs->xs_bmbt_2_free, + &proc_fs_xfs->xs_bmbt_2_moves); + } else + if (strncmp(buf, "ibt2 ", 5) == 0) { + sscanf(buf, "ibt2 %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u", + &proc_fs_xfs->xs_ibt_2_lookup, + &proc_fs_xfs->xs_ibt_2_compare, + &proc_fs_xfs->xs_ibt_2_insrec, + &proc_fs_xfs->xs_ibt_2_delrec, + &proc_fs_xfs->xs_ibt_2_newroot, + &proc_fs_xfs->xs_ibt_2_killroot, + &proc_fs_xfs->xs_ibt_2_increment, + &proc_fs_xfs->xs_ibt_2_decrement, + &proc_fs_xfs->xs_ibt_2_lshift, + &proc_fs_xfs->xs_ibt_2_rshift, + &proc_fs_xfs->xs_ibt_2_split, + &proc_fs_xfs->xs_ibt_2_join, + &proc_fs_xfs->xs_ibt_2_alloc, + &proc_fs_xfs->xs_ibt_2_free, + &proc_fs_xfs->xs_ibt_2_moves); + } else + if (strncmp(buf, "xpc", 3) == 0) + sscanf(buf, "xpc %llu %llu %llu", + (unsigned long long *)&proc_fs_xfs->xpc.xs_xstrat_bytes, + (unsigned long long *)&proc_fs_xfs->xpc.xs_write_bytes, + (unsigned long long *)&proc_fs_xfs->xpc.xs_read_bytes); + } + fclose(fp); + + if (proc_fs_xfs->xs_log_writes) + proc_fs_xfs->xs_log_write_ratio = + proc_fs_xfs->xs_log_blocks / proc_fs_xfs->xs_log_writes; + /* + * Bug #824382. xs_log_blocks is counted in units + * of 512 bytes/block, but PCP exports it as Kbytes. + */ + proc_fs_xfs->xs_log_blocks >>= 1; + + fp = xfs_statsfile("/proc/fs/xfs/xqmstat", "r"); + if (fp != (FILE *)NULL) { + if (fgets(buf, sizeof(buf), fp) != NULL) { + if (strncmp(buf, "qm", 2) == 0) + sscanf(buf, "qm %u %u %u %u %u %u %u %u", + &proc_fs_xfs->xs_qm_dqreclaims, + &proc_fs_xfs->xs_qm_dqreclaim_misses, + &proc_fs_xfs->xs_qm_dquot_dups, + &proc_fs_xfs->xs_qm_dqcachemisses, + &proc_fs_xfs->xs_qm_dqcachehits, + &proc_fs_xfs->xs_qm_dqwants, + &proc_fs_xfs->xs_qm_dqshake_reclaims, + &proc_fs_xfs->xs_qm_dqinact_reclaims); + } + fclose(fp); + } + } + + if (proc_fs_xfs->errcode == 0) + return 0; + return -1; +} diff --git a/src/pmdas/linux_xfs/proc_fs_xfs.h b/src/pmdas/linux_xfs/proc_fs_xfs.h new file mode 100644 index 0000000..bec0514 --- /dev/null +++ b/src/pmdas/linux_xfs/proc_fs_xfs.h @@ -0,0 +1,189 @@ +/* + * Linux /proc/fs/xfs metrics cluster + * + * Copyright (c) 2014 Red Hat. + * Copyright (c) 2010 Aconex. All Rights Reserved. + * Copyright (c) 2000,2004 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +typedef struct { + int errcode; /* error from previous refresh */ + unsigned int xs_allocx; /* allocs.alloc_extent */ + unsigned int xs_allocb; /* allocs.alloc_block */ + unsigned int xs_freex; /* allocs.free_extent */ + unsigned int xs_freeb; /* allocs.free_block */ + + unsigned int xs_abt_lookup; /* alloc_btree.lookup */ + unsigned int xs_abt_compare; /* alloc_btree.compare */ + unsigned int xs_abt_insrec; /* alloc_btree.insrec */ + unsigned int xs_abt_delrec; /* alloc_btree.delrec */ + unsigned int xs_blk_mapr; /* block_map.read_ops */ + unsigned int xs_blk_mapw; /* block_map.write_ops */ + unsigned int xs_blk_unmap; /* block_map.unmap */ + unsigned int xs_add_exlist; /* block_map.add_exlist */ + unsigned int xs_del_exlist; /* block_map.del_exlist */ + unsigned int xs_look_exlist; /* block_map.look_exlist */ + unsigned int xs_cmp_exlist; /* block_map.cmp_exlist */ + unsigned int xs_bmbt_lookup; /* bmap_btree.lookup */ + unsigned int xs_bmbt_compare; /* bmap_btree.compare */ + unsigned int xs_bmbt_insrec; /* bmap_btree.insrec */ + unsigned int xs_bmbt_delrec; /* bmap_btree.delrec */ + + unsigned int xs_dir_lookup; /* dir_ops.lookup */ + unsigned int xs_dir_create; /* dir_ops.create */ + unsigned int xs_dir_remove; /* dir_ops.remove */ + unsigned int xs_dir_getdents; /* dir_ops.getdents */ + + unsigned int xs_trans_sync; /* transactions.sync */ + unsigned int xs_trans_async; /* transactions.async */ + unsigned int xs_trans_empty; /* transactions.empty */ + + unsigned int xs_ig_attempts; /* inode_ops.ig_attempts */ + unsigned int xs_ig_found; /* inode_ops.ig_found */ + unsigned int xs_ig_frecycle; /* inode_ops.ig_frecycle */ + unsigned int xs_ig_missed; /* inode_ops.ig_missed */ + unsigned int xs_ig_dup; /* inode_ops.ig_dup */ + unsigned int xs_ig_reclaims; /* inode_ops.ig_reclaims */ + unsigned int xs_ig_attrchg; /* inode_ops.ig_attrchg */ + + unsigned int xs_log_writes; /* log.writes */ + unsigned int xs_log_blocks; /* log.blocks */ + float xs_log_write_ratio; /* log.write_ratio */ + unsigned int xs_log_noiclogs; /* log.noiclogs */ + + unsigned int xs_xstrat_quick; /* xstrat.quick */ + unsigned int xs_xstrat_split; /* xstrat.split */ + unsigned int xs_write_calls; /* write */ + unsigned int xs_read_calls; /* read */ + + unsigned int xs_attr_get; /* attr.get */ + unsigned int xs_attr_set; /* attr.set */ + unsigned int xs_attr_remove; /* attr.remove */ + unsigned int xs_attr_list; /* attr.list */ + + unsigned int xs_log_force; /* log.force */ + unsigned int xs_log_force_sleep; /* log.force_sleep */ + unsigned int xs_try_logspace; /* log_tail.try_logspace */ + unsigned int xs_sleep_logspace; /* log_tail.sleep_logspace */ + unsigned int xs_push_ail; /* log_tail.push_ail.pushes */ + unsigned int xs_push_ail_success; /* log_tail.push_ail.success */ + unsigned int xs_push_ail_pushbuf; /* log_tail.push_ail.pushbuf */ + unsigned int xs_push_ail_pinned; /* log_tail.push_ail.pinned */ + unsigned int xs_push_ail_locked; /* log_tail.push_ail.locked */ + unsigned int xs_push_ail_flushing; /* log_tail.push_ail.flushing */ + unsigned int xs_push_ail_restarts; /* log_tail.push_ail.restarts */ + unsigned int xs_push_ail_flush; /* log_tail.push_ail.flush */ + + unsigned int xs_qm_dqreclaims; /* quota.reclaims */ + unsigned int xs_qm_dqreclaim_misses; /* quota.reclaim_misses */ + unsigned int xs_qm_dquot_dups; /* quota.dquot_dups */ + unsigned int xs_qm_dqcachemisses; /* quota.cachemisses */ + unsigned int xs_qm_dqcachehits; /* quota.cachehits */ + unsigned int xs_qm_dqwants; /* quota.wants */ + unsigned int xs_qm_dqshake_reclaims; /* quota.shake_reclaims */ + unsigned int xs_qm_dqinact_reclaims; /* quota.inact_reclaims */ + + unsigned int xs_iflush_count; /* iflush_count */ + unsigned int xs_icluster_flushcnt; /* icluster_flushcnt */ + unsigned int xs_icluster_flushinode; /* icluster_flushinode */ + + unsigned int xs_buf_get; /* buffer.get */ + unsigned int xs_buf_create; /* buffer.create */ + unsigned int xs_buf_get_locked; /* buffer.get_locked */ + unsigned int xs_buf_get_locked_waited; /* buffer.get_locked_waited */ + unsigned int xs_buf_busy_locked; /* buffer.busy_locked */ + unsigned int xs_buf_miss_locked; /* buffer.miss_locked */ + unsigned int xs_buf_page_retries; /* buffer.page_retries */ + unsigned int xs_buf_page_found; /* buffer.page_found */ + unsigned int xs_buf_get_read; /* buffer.get_read */ + + unsigned int xs_abtb_2_lookup; /* btree.alloc_blocks.lookup */ + unsigned int xs_abtb_2_compare; /* btree.alloc_blocks.compare */ + unsigned int xs_abtb_2_insrec; /* btree.alloc_blocks.insrec */ + unsigned int xs_abtb_2_delrec; /* btree.alloc_blocks.delrec */ + unsigned int xs_abtb_2_newroot; /* btree.alloc_blocks.newroot */ + unsigned int xs_abtb_2_killroot; /* btree.alloc_blocks.killroot */ + unsigned int xs_abtb_2_increment; /* btree.alloc_blocks.increment */ + unsigned int xs_abtb_2_decrement; /* btree.alloc_blocks.decrement */ + unsigned int xs_abtb_2_lshift; /* btree.alloc_blocks.lshift */ + unsigned int xs_abtb_2_rshift; /* btree.alloc_blocks.rshift */ + unsigned int xs_abtb_2_split; /* btree.alloc_blocks.split */ + unsigned int xs_abtb_2_join; /* btree.alloc_blocks.join */ + unsigned int xs_abtb_2_alloc; /* btree.alloc_blocks.alloc */ + unsigned int xs_abtb_2_free; /* btree.alloc_blocks.free */ + unsigned int xs_abtb_2_moves; /* btree.alloc_blocks.moves */ + unsigned int xs_abtc_2_lookup; /* btree.alloc_contig.lookup */ + unsigned int xs_abtc_2_compare; /* btree.alloc_contig.compare */ + unsigned int xs_abtc_2_insrec; /* btree.alloc_contig.insrec */ + unsigned int xs_abtc_2_delrec; /* btree.alloc_contig.delrec */ + unsigned int xs_abtc_2_newroot; /* btree.alloc_contig.newroot */ + unsigned int xs_abtc_2_killroot; /* btree.alloc_contig.killroot */ + unsigned int xs_abtc_2_increment; /* btree.alloc_contig.increment */ + unsigned int xs_abtc_2_decrement; /* btree.alloc_contig.decrement */ + unsigned int xs_abtc_2_lshift; /* btree.alloc_contig.lshift */ + unsigned int xs_abtc_2_rshift; /* btree.alloc_contig.rshift */ + unsigned int xs_abtc_2_split; /* btree.alloc_contig.split */ + unsigned int xs_abtc_2_join; /* btree.alloc_contig.join */ + unsigned int xs_abtc_2_alloc; /* btree.alloc_contig.alloc */ + unsigned int xs_abtc_2_free; /* btree.alloc_contig.free */ + unsigned int xs_abtc_2_moves; /* btree.alloc_contig.moves */ + unsigned int xs_bmbt_2_lookup; /* btree.block_map.lookup */ + unsigned int xs_bmbt_2_compare; /* btree.block_map.compare */ + unsigned int xs_bmbt_2_insrec; /* btree.block_map.insrec */ + unsigned int xs_bmbt_2_delrec; /* btree.block_map.delrec */ + unsigned int xs_bmbt_2_newroot; /* btree.block_map.newroot */ + unsigned int xs_bmbt_2_killroot; /* btree.block_map.killroot */ + unsigned int xs_bmbt_2_increment; /* btree.block_map.increment */ + unsigned int xs_bmbt_2_decrement; /* btree.block_map.decrement */ + unsigned int xs_bmbt_2_lshift; /* btree.block_map.lshift */ + unsigned int xs_bmbt_2_rshift; /* btree.block_map.rshift */ + unsigned int xs_bmbt_2_split; /* btree.block_map.split */ + unsigned int xs_bmbt_2_join; /* btree.block_map.join */ + unsigned int xs_bmbt_2_alloc; /* btree.block_map.alloc */ + unsigned int xs_bmbt_2_free; /* btree.block_map.free */ + unsigned int xs_bmbt_2_moves; /* btree.block_map.moves */ + unsigned int xs_ibt_2_lookup; /* btree.inode.lookup */ + unsigned int xs_ibt_2_compare; /* btree.inode.compare */ + unsigned int xs_ibt_2_insrec; /* btree.inode.insrec */ + unsigned int xs_ibt_2_delrec; /* btree.inode.delrec */ + unsigned int xs_ibt_2_newroot; /* btree.inode.newroot */ + unsigned int xs_ibt_2_killroot; /* btree.inode.killroot */ + unsigned int xs_ibt_2_increment; /* btree.inode.increment */ + unsigned int xs_ibt_2_decrement; /* btree.inode.decrement */ + unsigned int xs_ibt_2_lshift; /* btree.inode.lshift */ + unsigned int xs_ibt_2_rshift; /* btree.inode.rshift */ + unsigned int xs_ibt_2_split; /* btree.inode.split */ + unsigned int xs_ibt_2_join; /* btree.inode.join */ + unsigned int xs_ibt_2_alloc; /* btree.inode.alloc */ + unsigned int xs_ibt_2_free; /* btree.inode.free */ + unsigned int xs_ibt_2_moves; /* btree.inode.moves */ + + struct vnodes { + unsigned int vn_active; /* vnodes.active */ + unsigned int vn_alloc; /* vnodes.alloc */ + unsigned int vn_get; /* vnodes.get */ + unsigned int vn_hold; /* vnodes.hold */ + unsigned int vn_rele; /* vnodes.rele */ + unsigned int vn_reclaim; /* vnodes.reclaim */ + unsigned int vn_remove; /* vnodes.remove */ + unsigned int vn_free; /* vnodes.free */ + } vnodes; + struct xpc { + __uint64_t xs_write_bytes; /* write_bytes */ + __uint64_t xs_read_bytes; /* read_bytes */ + __uint64_t xs_xstrat_bytes; /* xstrat_bytes */ + } xpc; +} proc_fs_xfs_t; + +extern FILE *xfs_statsfile(const char *, const char *); +extern int refresh_proc_fs_xfs(proc_fs_xfs_t *); diff --git a/src/pmdas/linux_xfs/root b/src/pmdas/linux_xfs/root new file mode 100644 index 0000000..5f26a89 --- /dev/null +++ b/src/pmdas/linux_xfs/root @@ -0,0 +1,6 @@ +/* + * fake "root" for validating the local PMNS subtree + */ + +#include <stdpmid> +#include "root_xfs" diff --git a/src/pmdas/linux_xfs/root_xfs b/src/pmdas/linux_xfs/root_xfs new file mode 100644 index 0000000..16ebfd5 --- /dev/null +++ b/src/pmdas/linux_xfs/root_xfs @@ -0,0 +1,295 @@ +/* + * Portions Copyright (c) 2013 Red Hat. + * Copyright (c) 2000,2004,2007-2008 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#ifndef XFS +#define XFS 11 +#endif + +root { + xfs + quota +} + +xfs { + allocs + alloc_btree + block_map + bmap_btree + dir_ops + transactions + inode_ops + log + log_tail + xstrat + write XFS:16:51 + write_bytes XFS:16:52 + read XFS:16:53 + read_bytes XFS:16:54 + attr + quota + iflush_count XFS:16:67 + icluster_flushcnt XFS:16:68 + icluster_flushinode XFS:16:69 + buffer + vnodes + control + btree +} + +xfs.allocs { + alloc_extent XFS:16:0 + alloc_block XFS:16:1 + free_extent XFS:16:2 + free_block XFS:16:3 +} + + +xfs.alloc_btree { + lookup XFS:16:4 + compare XFS:16:5 + insrec XFS:16:6 + delrec XFS:16:7 +} + +xfs.block_map { + read_ops XFS:16:8 + write_ops XFS:16:9 + unmap XFS:16:10 + add_exlist XFS:16:11 + del_exlist XFS:16:12 + look_exlist XFS:16:13 + cmp_exlist XFS:16:14 +} + +xfs.bmap_btree { + lookup XFS:16:15 + compare XFS:16:16 + insrec XFS:16:17 + delrec XFS:16:18 +} + +xfs.dir_ops { + lookup XFS:16:19 + create XFS:16:20 + remove XFS:16:21 + getdents XFS:16:22 +} + +xfs.transactions { + sync XFS:16:23 + async XFS:16:24 + empty XFS:16:25 +} + +xfs.inode_ops { + ig_attempts XFS:16:26 + ig_found XFS:16:27 + ig_frecycle XFS:16:28 + ig_missed XFS:16:29 + ig_dup XFS:16:30 + ig_reclaims XFS:16:31 + ig_attrchg XFS:16:32 +} + +xfs.log { + writes XFS:16:33 + blocks XFS:16:34 + write_ratio XFS:16:78 + noiclogs XFS:16:35 + force XFS:16:36 + force_sleep XFS:16:37 +} + +xfs.log_tail { + try_logspace XFS:16:38 + sleep_logspace XFS:16:39 + push_ail +} + +xfs.log_tail.push_ail { + pushes XFS:16:40 + success XFS:16:41 + pushbuf XFS:16:42 + pinned XFS:16:43 + locked XFS:16:44 + flushing XFS:16:45 + restarts XFS:16:46 + flush XFS:16:47 +} + +xfs.xstrat { + bytes XFS:16:48 + quick XFS:16:49 + split XFS:16:50 +} + +xfs.attr { + get XFS:16:55 + set XFS:16:56 + remove XFS:16:57 + list XFS:16:58 +} + +xfs.quota { + reclaims XFS:16:59 + reclaim_misses XFS:16:60 + dquot_dups XFS:16:61 + cachemisses XFS:16:62 + cachehits XFS:16:63 + wants XFS:16:64 + shake_reclaims XFS:16:65 + inact_reclaims XFS:16:66 +} + +xfs.vnodes { + active XFS:16:70 + alloc XFS:16:71 + get XFS:16:72 + hold XFS:16:73 + rele XFS:16:74 + reclaim XFS:16:75 + remove XFS:16:76 + free XFS:16:77 +} + +xfs.control { + reset XFS:16:79 +} + +xfs.buffer { + get XFS:17:0 + create XFS:17:1 + get_locked XFS:17:2 + get_locked_waited XFS:17:3 + busy_locked XFS:17:4 + miss_locked XFS:17:5 + page_retries XFS:17:6 + page_found XFS:17:7 + get_read XFS:17:8 +} + +xfs.btree { + alloc_blocks + alloc_contig + block_map + inode +} + +xfs.btree.alloc_blocks { + lookup XFS:16:80 + compare XFS:16:81 + insrec XFS:16:82 + delrec XFS:16:83 + newroot XFS:16:84 + killroot XFS:16:85 + increment XFS:16:86 + decrement XFS:16:87 + lshift XFS:16:88 + rshift XFS:16:89 + split XFS:16:90 + join XFS:16:91 + alloc XFS:16:92 + free XFS:16:93 + moves XFS:16:94 +} + +xfs.btree.alloc_contig { + lookup XFS:16:95 + compare XFS:16:96 + insrec XFS:16:97 + delrec XFS:16:98 + newroot XFS:16:99 + killroot XFS:16:100 + increment XFS:16:101 + decrement XFS:16:102 + lshift XFS:16:103 + rshift XFS:16:104 + split XFS:16:105 + join XFS:16:106 + alloc XFS:16:107 + free XFS:16:108 + moves XFS:16:109 +} + +xfs.btree.block_map { + lookup XFS:16:110 + compare XFS:16:111 + insrec XFS:16:112 + delrec XFS:16:113 + newroot XFS:16:114 + killroot XFS:16:115 + increment XFS:16:116 + decrement XFS:16:117 + lshift XFS:16:118 + rshift XFS:16:119 + split XFS:16:120 + join XFS:16:121 + alloc XFS:16:122 + free XFS:16:123 + moves XFS:16:124 +} + +xfs.btree.inode { + lookup XFS:16:125 + compare XFS:16:126 + insrec XFS:16:127 + delrec XFS:16:128 + newroot XFS:16:129 + killroot XFS:16:130 + increment XFS:16:131 + decrement XFS:16:132 + lshift XFS:16:133 + rshift XFS:16:134 + split XFS:16:135 + join XFS:16:136 + alloc XFS:16:137 + free XFS:16:138 + moves XFS:16:139 +} + +quota { + state + project +} + +quota.state { + project +} + +quota.state.project { + accounting XFS:30:0 + enforcement XFS:30:1 +} + +quota.project { + space + files +} + +quota.project.space { + hard XFS:30:6 + soft XFS:30:7 + used XFS:30:8 + time_left XFS:30:9 +} + +quota.project.files { + hard XFS:30:10 + soft XFS:30:11 + used XFS:30:12 + time_left XFS:30:13 +} + +#undef XFS |