#define VSTATE_VALID(state) \
@@ -280,8 +307,7 @@ vstate_assert_wait_stable(vnode_t *vp, c
vnpanic(vp, "state is %s at %s:%d",
vstate_name(vip->vi_state), func, line);
if (! VSTATE_VALID(vip->vi_state))
vnpanic(vp, "state is %s at %s:%d",
@@ -312,11 +338,7 @@ vstate_assert_change(vnode_t *vp, enum v
vstate_name(from), vstate_name(to), vp->v_usecount,
func, line);
- vip->vi_state = to;
- if (from == VS_LOADING)
- cv_broadcast(&vcache_cv);
- if (to == VS_LOADED || to == VS_RECLAIMED)
- cv_broadcast(&vp->v_cv);
+ vstate_change(vp, from, to);
}
/*
+ * Asynchronous vnode release, vnode is released in different context.
+ */
+void
+vrele_async_cleaner(vnode_t *vp)
+{
+
+ mutex_enter(vp->v_interlock);
+
+ /*
+ * If this vnode has a cleaner ref, release that.
+ */
+ if (vcache_rele_cleaner(vp)) {
+ mutex_exit(vp->v_interlock);
+ return;
+ }
+
+ /*
+ * Otherwise do a normal async rele.
+ */
+ vrelel(vp, VRELEL_ASYNC_RELE);
+}
+
+/*
* Vnode reference, where a reference is already held by some other
* object (for example, a file structure).
*/
@@ -1241,12 +1264,38 @@ vcache_vget(vnode_t *vp)
return 0;
}
+static void
+vcache_ref_cleaner(vnode_t *vp)
+{
+
+ KASSERT(mutex_owned(vp->v_interlock));
+ KASSERT((vp->v_mount->mnt_iflag & IMNT_CLEANING) != 0);
+ KASSERTMSG((vp->v_iflag & VI_CLEANERREF) == 0, "vp %p", vp);
+ vp->v_iflag |= VI_CLEANERREF;
+}
+
+
+static bool
+vcache_rele_cleaner(vnode_t *vp)
+{
+
+ KASSERT(mutex_owned(vp->v_interlock));
+ KASSERT((vp->v_mount->mnt_iflag & IMNT_CLEANING) != 0);
+ if ((vp->v_iflag & VI_CLEANERREF) != 0) {
+ vp->v_iflag &= ~VI_CLEANERREF;
+ return true;
+ }
+ return false;
+}
+
/*
* Get a vnode / fs node pair by key and return it referenced through vpp.
+ * If requested and the vnode is being cleaned, take a cleaner reference
+ * rather than waiting for the reclamation process to finish.
*/
-int
-vcache_get(struct mount *mp, const void *key, size_t key_len,
- struct vnode **vpp)
+static int
+do_vcache_get(struct mount *mp, const void *key, size_t key_len,
+ struct vnode **vpp, bool cleaner)
{
int error;
uint32_t hash;
@@ -1269,6 +1318,7 @@ again:
/* If found, take a reference or retry. */
if (__predict_true(vip != NULL)) {
+
/*
* If the vnode is loading we cannot take the v_interlock
* here as it might change during load (see uvm_obj_setlock()).
@@ -1284,6 +1334,23 @@ again:
}
vp = VIMPL_TO_VNODE(vip);
mutex_enter(vp->v_interlock);
+
+ /*
+ * Take a cleaner reference if the vnode is being cleaned
+ * and a cleaner reference was requested.
+ */
+ if (__predict_false(vip->vi_state == VS_CLEANING &&
+ cleaner)) {
+ vcache_ref_cleaner(vp);
+ mutex_exit(vp->v_interlock);
+ mutex_exit(&vcache_lock);
+ *vpp = VIMPL_TO_VNODE(vip);
+ return 0;
+ }
+
+ /*
+ * Otherwise take a normal reference.
+ */
mutex_exit(&vcache_lock);
error = vcache_vget(vp);
if (error == ENOENT)
@@ -1350,6 +1417,25 @@ again:
}
/*
+ * Get a vnode / fs node pair by key and return it referenced through vpp.
+ */
+int
+vcache_get(struct mount *mp, const void *key, size_t key_len,
+ struct vnode **vpp)
+{
+
+ return do_vcache_get(mp, key, key_len, vpp, false);
+}
+
+int
+vcache_get_cleaner(struct mount *mp, const void *key, size_t key_len,
+ struct vnode **vpp)
+{
+
+ return do_vcache_get(mp, key, key_len, vpp, true);
+}
+
+/*
* Create a new vnode / fs node pair and return it referenced through vpp.
*/
int
@@ -1528,7 +1614,7 @@ vcache_reclaim(vnode_t *vp)
uint32_t hash;
uint8_t temp_buf[64], *temp_key;
size_t temp_key_len;
- bool recycle, active;
+ bool recycle, active, usecleaning;
int error;
active = (vp->v_usecount > 1);
temp_key_len = vip->vi_key.vk_key_len;
+ usecleaning = (vp->v_mount->mnt_iflag & IMNT_CLEANING) != 0;
+
/*
* Prevent the vnode from being recycled or brought into use
- * while we clean it out.
+ * while we clean it out. Use VS_CLEANING if the fs needs it,
+ * otherwise go straight to VS_RECLAIMING.
*/
- VSTATE_CHANGE(vp, VS_LOADED, VS_RECLAIMING);
+ VSTATE_CHANGE(vp, VS_LOADED, usecleaning ? VS_CLEANING : VS_RECLAIMING);
if (vp->v_iflag & VI_EXECMAP) {
atomic_add_int(&uvmexp.execpages, -vp->v_uobj.uo_npages);
atomic_add_int(&uvmexp.filepages, vp->v_uobj.uo_npages);
@@ -1581,6 +1670,17 @@ vcache_reclaim(vnode_t *vp)
}
/*
+ * Cleaning is done, move on to reclaiming.
+ * Change state to VS_RECLAIMING if we weren't already there.
+ */
+ if (usecleaning) {
+ KASSERT((vp->v_iflag & VI_CLEANERREF) == 0);
+ mutex_enter(vp->v_interlock);
+ VSTATE_CHANGE(vp, VS_CLEANING, VS_RECLAIMING);
+ mutex_exit(vp->v_interlock);
+ }
+
+ /*
* Disassociate the underlying file system from the vnode.
* VOP_INACTIVE leaves the vnode locked; VOP_RECLAIM unlocks
* the vnode, and may destroy the vnode so that VOP_UNLOCK