diff -urpNX dontdiff linux-2.5.24/fs/buffer.c linux-2.5.24-flock/fs/buffer.c
--- linux-2.5.24/fs/buffer.c    Thu Jun 20 16:53:42 2002
+++ linux-2.5.24-flock/fs/buffer.c      Wed Jul  3 13:45:49 2002
@@ -685,8 +685,7 @@ void buffer_insert_list(spinlock_t *lock
               struct buffer_head *bh, struct list_head *list)
{
       spin_lock(lock);
-       list_del(&bh->b_assoc_buffers);
-       list_add(&bh->b_assoc_buffers, list);
+       list_move(&bh->b_assoc_buffers, list);
       spin_unlock(lock);
}

@@ -864,12 +863,12 @@ EXPORT_SYMBOL(mark_buffer_dirty_inode);
 * dirtied buffers don't.  After all, we don't want fsync to last
 * forever if somebody is actively writing to the file.
 *
- * Do this in two main stages: first we copy dirty buffers to a
- * temporary inode list, queueing the writes as we go.  Then we clean
- * up, waiting for those writes to complete.
+ * Do this in three stages: first we copy dirty buffers to a
+ * temporary inode list, second we queue the writes.  Then we wait
+ * for those writes to complete.
 *
- * During this second stage, any subsequent updates to the file may end
- * up refiling the buffer on the original inode's dirty list again, so
+ * During the second or third stage, any subsequent updates to the file may
+ * end up refiling the buffer on the original inode's dirty list again, so
 * there is a chance we will end up with a buffer queued for write but
 * not yet completed on that list.  So, as a final cleanup we go through
 * the osync code to catch these locked, dirty buffers without requeuing
@@ -878,29 +877,32 @@ EXPORT_SYMBOL(mark_buffer_dirty_inode);
int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
{
       struct buffer_head *bh;
-       struct list_head tmp;
+       LIST_HEAD(wait);
+       struct list_head *entry;
       int err = 0, err2;

-       INIT_LIST_HEAD(&tmp);
-
       spin_lock(lock);
       while (!list_empty(list)) {
               bh = BH_ENTRY(list->next);
               list_del_init(&bh->b_assoc_buffers);
               if (buffer_dirty(bh) || buffer_locked(bh)) {
-                       list_add(&bh->b_assoc_buffers, &tmp);
-                       if (buffer_dirty(bh)) {
-                               get_bh(bh);
-                               spin_unlock(lock);
-                               ll_rw_block(WRITE, 1, &bh);
-                               brelse(bh);
-                               spin_lock(lock);
-                       }
+                       list_add(&bh->b_assoc_buffers, &wait);
+               }
+       }
+
+       list_for_each(entry, &wait) {
+               bh = BH_ENTRY(entry);
+               if (buffer_dirty(bh)) {
+                       get_bh(bh);
+                       spin_unlock(lock);
+                       ll_rw_block(WRITE, 1, &bh);
+                       brelse(bh);
+                       spin_lock(lock);
               }
       }

-       while (!list_empty(&tmp)) {
-               bh = BH_ENTRY(tmp.prev);
+       while (!list_empty(&wait)) {
+               bh = BH_ENTRY(wait.prev);
               __remove_assoc_queue(bh);
               get_bh(bh);
               spin_unlock(lock);