summaryrefslogtreecommitdiff
path: root/usr/src
diff options
context:
space:
mode:
authorvb70745 <none@none>2007-03-26 10:41:59 -0700
committervb70745 <none@none>2007-03-26 10:41:59 -0700
commit1e7ef456443516ebe1cb6722f4133b6f1f165284 (patch)
tree40dcf97e0526e8546350a0dc6032f0232f4bc7ab /usr/src
parent40e5e17b3361b3eea56a9723071c406894a20b78 (diff)
downloadillumos-gate-1e7ef456443516ebe1cb6722f4133b6f1f165284.tar.gz
6509040 hang in sfmmu_hblk_steal()
Diffstat (limited to 'usr/src')
-rw-r--r--usr/src/uts/sfmmu/vm/hat_sfmmu.c23
1 files changed, 17 insertions, 6 deletions
diff --git a/usr/src/uts/sfmmu/vm/hat_sfmmu.c b/usr/src/uts/sfmmu/vm/hat_sfmmu.c
index fb4acb30ae..a01e6d4122 100644
--- a/usr/src/uts/sfmmu/vm/hat_sfmmu.c
+++ b/usr/src/uts/sfmmu/vm/hat_sfmmu.c
@@ -9553,8 +9553,9 @@ sfmmu_get_free_hblk(struct hme_blk **hmeblkpp, uint_t critical)
mutex_enter(&freehblkp_lock);
if (freehblkp != NULL) {
/*
- * If the current thread is owning hblk_reserve,
- * let it succede even if freehblkcnt is really low.
+ * If the current thread is owning hblk_reserve OR
+ * critical request from sfmmu_hblk_steal()
+ * let it succeed even if freehblkcnt is really low.
*/
if (freehblkcnt <= HBLK_RESERVE_MIN && !critical) {
SFMMU_STAT(sf_get_free_throttle);
@@ -10260,15 +10261,17 @@ sfmmu_hblks_list_purge(struct hme_blk **listp)
}
#define BUCKETS_TO_SEARCH_BEFORE_UNLOAD 30
+#define SFMMU_HBLK_STEAL_THRESHOLD 5
static uint_t sfmmu_hblk_steal_twice;
static uint_t sfmmu_hblk_steal_count, sfmmu_hblk_steal_unload_count;
/*
- * Steal a hmeblk
- * Enough hmeblks were allocated at startup (nucleus hmeblks) and also
- * hmeblks were added dynamically. We should never ever not be able to
- * find one. Look for an unused/unlocked hmeblk in user hash table.
+ * Steal a hmeblk from user or kernel hme hash lists.
+ * For 8K tte grab one from reserve pool (freehblkp) before proceeding to
+ * steal and if we fail to steal after SFMMU_HBLK_STEAL_THRESHOLD attempts
+ * tap into critical reserve of freehblkp.
+ * Note: We remain looping in this routine until we find one.
*/
static struct hme_blk *
sfmmu_hblk_steal(int size)
@@ -10278,8 +10281,16 @@ sfmmu_hblk_steal(int size)
struct hme_blk *hmeblkp = NULL, *pr_hblk;
uint64_t hblkpa, prevpa;
int i;
+ uint_t loop_cnt = 0, critical;
for (;;) {
+ if (size == TTE8K) {
+ critical =
+ (++loop_cnt > SFMMU_HBLK_STEAL_THRESHOLD) ? 1 : 0;
+ if (sfmmu_get_free_hblk(&hmeblkp, critical))
+ return (hmeblkp);
+ }
+
hmebp = (uhmehash_steal_hand == NULL) ? uhme_hash :
uhmehash_steal_hand;
ASSERT(hmebp >= uhme_hash && hmebp <= &uhme_hash[UHMEHASH_SZ]);