看板 DFBSD_bugs 關於我們 聯絡資訊
Here is a new patch set. I missed a case in cache_nlookup(). The other patch will work but might still result in unnecessary vnlru process wakeups. The hysteresis is designed such that in a heavily loaded system only 1 out of 10 calls to the namecache will eat cpu cycles cleaning up negative cache entries. -Matt Index: sys/namecache.h =================================================================== RCS file: /cvs/src/sys/sys/namecache.h,v retrieving revision 1.13 diff -u -r1.13 namecache.h --- sys/namecache.h 12 Oct 2004 19:20:48 -0000 1.13 +++ sys/namecache.h 18 Oct 2004 20:35:08 -0000 @@ -164,6 +164,7 @@ void cache_purge(struct vnode *vp); void cache_purgevfs (struct mount *mp); int cache_get_nonblock(struct namecache *ncp); +void cache_cleanneg(int count); struct namecache *cache_get(struct namecache *ncp); struct namecache *cache_hold(struct namecache *ncp); void cache_put(struct namecache *ncp); Index: kern/vfs_cache.c =================================================================== RCS file: /cvs/src/sys/kern/vfs_cache.c,v retrieving revision 1.38 diff -u -r1.38 vfs_cache.c --- kern/vfs_cache.c 12 Oct 2004 19:20:46 -0000 1.38 +++ kern/vfs_cache.c 18 Oct 2004 21:58:16 -0000 @@ -777,6 +777,38 @@ --ncp->nc_refs; } +static enum { CHI_LOW, CHI_HIGH } cache_hysteresis_state = CHI_LOW; + +static __inline +void +cache_hysteresis(void) +{ + /* + * Don't cache too many negative hits. We use hysteresis to reduce + * the impact on the critical path. + */ + switch(cache_hysteresis_state) { + case CHI_LOW: + if (numneg > MINNEG && numneg * ncnegfactor > numcache) { + cache_cleanneg(10); + if (cache_hysteresis_state == CHI_LOW) + printf("H"); + cache_hysteresis_state = CHI_HIGH; + } + break; + case CHI_HIGH: + if (numneg > MINNEG * 9 / 10 && + numneg * ncnegfactor * 9 / 10 > numcache + ) { + cache_cleanneg(10); + } else { + cache_hysteresis_state = CHI_LOW; + printf("L"); + } + break; + } +} + /* * NEW NAMECACHE LOOKUP API * @@ -883,6 +915,7 @@ ncp->nc_flag |= NCF_HASHED; cache_link_parent(ncp, par); found: + cache_hysteresis(); return(ncp); } @@ -1388,15 +1421,35 @@ ncp->nc_flag |= NCF_WHITEOUT; } cache_put(ncp); + cache_hysteresis(); +} + +void +cache_cleanneg(int count) +{ + struct namecache *ncp; + + /* + * Automode from the vnlru proc - clean out 10% of the negative cache + * entries. + */ + if (count == 0) { + count = numneg / 10 + 1; + printf("X"); + } /* - * Don't cache too many negative hits + * Attempt to clean out the specified number of negative cache + * entries. */ - if (numneg > MINNEG && numneg * ncnegfactor > numcache) { + while (count) { ncp = TAILQ_FIRST(&ncneglist); KKASSERT(ncp != NULL); + TAILQ_REMOVE(&ncneglist, ncp, nc_vnode); + TAILQ_INSERT_TAIL(&ncneglist, ncp, nc_vnode); if (cache_get_nonblock(ncp) == 0) cache_zap(ncp); + --count; } } Index: kern/vfs_mount.c =================================================================== RCS file: /cvs/src/sys/kern/vfs_mount.c,v retrieving revision 1.1 diff -u -r1.1 vfs_mount.c --- kern/vfs_mount.c 12 Oct 2004 19:20:46 -0000 1.1 +++ kern/vfs_mount.c 18 Oct 2004 20:34:52 -0000 @@ -470,6 +470,7 @@ continue; } done = 0; + cache_cleanneg(0); lwkt_gettoken(&ilock, &mountlist_token); for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) { if (vfs_busy(mp, LK_NOWAIT, &ilock, td)) {