/* We need to protect against concurrent writers.. */
down(&inode->i_sem);
+ lock_kernel();
err = file->f_op->fsync(file, dentry);
+ unlock_kernel();
up(&inode->i_sem);
/* this needs further work, at the moment it is identical to fsync() */
down(&inode->i_sem);
+ lock_kernel();
err = file->f_op->fsync(file, dentry);
+ unlock_kernel();
up(&inode->i_sem);
diff -urN -X dontdiff linux/fs/proc/kcore.c vmalloc/fs/proc/kcore.c
--- linux/fs/proc/kcore.c Wed Dec 22 08:54:59 1999
+++ vmalloc/fs/proc/kcore.c Wed Dec 22 09:02:18 1999
@@ -325,13 +325,12 @@
size_t elf_buflen;
int num_vma;
- /* XXX we need to somehow lock vmlist between here
- * and after elf_kcore_store_hdr() returns.
- * For now assume that num_vma does not change (TA)
- */
+ read_lock(&vmlist_lock);
proc_root_kcore.size = size = get_kcore_size(&num_vma, &elf_buflen);
- if (buflen == 0 || *fpos >= size)
+ if (buflen == 0 || *fpos >= size) {
+ read_unlock(&vmlist_lock);
return 0;
+ }
/* trim buflen to not go beyond EOF */
if (buflen > size - *fpos)
@@ -345,10 +344,13 @@
if (buflen < tsz)
tsz = buflen;
elf_buf = kmalloc(elf_buflen, GFP_ATOMIC);
- if (!elf_buf)
+ if (!elf_buf) {
+ read_unlock(&vmlist_lock);
return -ENOMEM;
+ }
memset(elf_buf, 0, elf_buflen);
elf_kcore_store_hdr(elf_buf, num_vma, elf_buflen);
+ read_unlock(&vmlist_lock);
if (copy_to_user(buffer, elf_buf + *fpos, tsz)) {
kfree(elf_buf);
return -EFAULT;
@@ -362,7 +364,8 @@
/* leave now if filled buffer already */
if (buflen == 0)
return acc;
- }
+ } else
+ read_unlock(&vmlist_lock);
/* where page 0 not mapped, write zeros into buffer */
#if defined (__i386__) || defined (__mc68000__)
diff -urN -X dontdiff linux/include/linux/mm.h vmalloc/include/linux/mm.h
--- linux/include/linux/mm.h Tue Dec 21 00:01:14 1999
+++ vmalloc/include/linux/mm.h Wed Dec 22 09:05:53 1999
@@ -244,8 +244,14 @@
* The following discussion applies only to them.
*
* A page may belong to an inode's memory mapping. In this case,
- * page->inode is the pointer to the inode, and page->offset is the
- * file offset of the page (not necessarily a multiple of PAGE_SIZE).
+ * page->inode is the pointer to the inode, and page->index is the
+ * file offset of the page in PAGE_CACHE_SIZE (not PAGE_SIZE!) units.
+ * Although currently (2.3.34) PAGE_SIZE == PAGE_CACHE_SIZE, i.e. there
+ * happens to be one page per page cache entry and MM code can't hanlde
+ * anything else, this may well change. The link to the old page->offset
+ * is given by:
+ *
+ * page->index == (page->offset >> PAGE_CACHE_SHIFT);
*
* A page may have buffers allocated to it. In this case,
* page->buffers is a circular list of these buffer heads. Else,
diff -urN -X dontdiff linux/include/linux/vmalloc.h vmalloc/include/linux/vmalloc.h
--- linux/include/linux/vmalloc.h Tue Dec 21 00:01:18 1999
+++ vmalloc/include/linux/vmalloc.h Wed Dec 22 09:05:56 1999
@@ -3,6 +3,7 @@
@@ -24,6 +25,10 @@
void vmfree_area_pages(unsigned long address, unsigned long size);
int vmalloc_area_pages(unsigned long address, unsigned long size);
+/* vmlist_lock is a read-write spinlock that protects vmlist
+ * Currently (2.3.34) used in mm/vmalloc.c and fs/proc/kcore.c.
+ */
+extern rwlock_t vmlist_lock;
extern struct vm_struct * vmlist;
#endif