diff -urN -X dontdiff linux/drivers/char/mem.c vmalloc/drivers/char/mem.c
--- linux/drivers/char/mem.c    Thu Dec 16 21:59:38 1999
+++ vmalloc/drivers/char/mem.c  Wed Dec 22 09:02:18 1999
@@ -226,7 +226,8 @@
{
       unsigned long p = *ppos;
       ssize_t read = 0;
-       ssize_t virtr;
+       ssize_t virtr = 0;
+       char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */

       if (p < (unsigned long) high_memory) {
               read = count;
@@ -238,22 +239,45 @@
               if (p < PAGE_SIZE && read > 0) {
                       size_t tmp = PAGE_SIZE - p;
                       if (tmp > read) tmp = read;
-                       clear_user(buf, tmp);
+                       if (clear_user(buf, tmp))
+                               return -EFAULT;
                       buf += tmp;
                       p += tmp;
                       read -= tmp;
                       count -= tmp;
               }
#endif
-               copy_to_user(buf, (char *)p, read);
+               if(copy_to_user(buf, (char *)p, read))
+                       return -EFAULT;
               p += read;
               buf += read;
               count -= read;
       }

-       virtr = vread(buf, (char *)p, count);
-       if (virtr < 0)
-               return virtr;
+       kbuf = (char *)get_free_page(GFP_KERNEL);
+       if (!kbuf)
+               return -ENOMEM;
+       while (count > 0) {
+               int len = count;
+
+               if (len > PAGE_SIZE)
+                       len = PAGE_SIZE;
+
+               len = vread(kbuf, (char *)p, len);
+               if (len < 0) {
+                       free_page((unsigned long)kbuf);
+                       return len;
+               }
+               if (copy_to_user(buf, kbuf, len)) {
+                       free_page((unsigned long)kbuf);
+                       return -EFAULT;
+               }
+               count -= len;
+               buf += len;
+               virtr += len;
+               p += len;
+       }
+       free_page((unsigned long)kbuf);
       *ppos += p + virtr;
       return virtr + read;
}
diff -urN -X dontdiff linux/fs/buffer.c vmalloc/fs/buffer.c
--- linux/fs/buffer.c   Mon Dec 20 22:52:57 1999
+++ vmalloc/fs/buffer.c Wed Dec 22 09:02:18 1999
@@ -345,7 +345,6 @@
       struct inode * inode;
       int err;

-       lock_kernel();
       err = -EBADF;
       file = fget(fd);
       if (!file)
@@ -365,13 +364,14 @@

       /* We need to protect against concurrent writers.. */
       down(&inode->i_sem);
+       lock_kernel();
       err = file->f_op->fsync(file, dentry);
+       unlock_kernel();
       up(&inode->i_sem);

out_putf:
       fput(file);
out:
-       unlock_kernel();
       return err;
}

@@ -382,7 +382,6 @@
       struct inode * inode;
       int err;

-       lock_kernel();
       err = -EBADF;
       file = fget(fd);
       if (!file)
@@ -402,13 +401,14 @@

       /* this needs further work, at the moment it is identical to fsync() */
       down(&inode->i_sem);
+       lock_kernel();
       err = file->f_op->fsync(file, dentry);
+       unlock_kernel();
       up(&inode->i_sem);

out_putf:
       fput(file);
out:
-       unlock_kernel();
       return err;
}

diff -urN -X dontdiff linux/fs/proc/kcore.c vmalloc/fs/proc/kcore.c
--- linux/fs/proc/kcore.c       Wed Dec 22 08:54:59 1999
+++ vmalloc/fs/proc/kcore.c     Wed Dec 22 09:02:18 1999
@@ -325,13 +325,12 @@
       size_t elf_buflen;
       int num_vma;

-       /* XXX we need to somehow lock vmlist between here
-        * and after elf_kcore_store_hdr() returns.
-        * For now assume that num_vma does not change (TA)
-        */
+       read_lock(&vmlist_lock);
       proc_root_kcore.size = size = get_kcore_size(&num_vma, &elf_buflen);
-       if (buflen == 0 || *fpos >= size)
+       if (buflen == 0 || *fpos >= size) {
+               read_unlock(&vmlist_lock);
               return 0;
+       }

       /* trim buflen to not go beyond EOF */
       if (buflen > size - *fpos)
@@ -345,10 +344,13 @@
               if (buflen < tsz)
                       tsz = buflen;
               elf_buf = kmalloc(elf_buflen, GFP_ATOMIC);
-               if (!elf_buf)
+               if (!elf_buf) {
+                       read_unlock(&vmlist_lock);
                       return -ENOMEM;
+               }
               memset(elf_buf, 0, elf_buflen);
               elf_kcore_store_hdr(elf_buf, num_vma, elf_buflen);
+               read_unlock(&vmlist_lock);
               if (copy_to_user(buffer, elf_buf + *fpos, tsz)) {
                       kfree(elf_buf);
                       return -EFAULT;
@@ -362,7 +364,8 @@
               /* leave now if filled buffer already */
               if (buflen == 0)
                       return acc;
-       }
+       } else
+               read_unlock(&vmlist_lock);

       /* where page 0 not mapped, write zeros into buffer */
#if defined (__i386__) || defined (__mc68000__)
diff -urN -X dontdiff linux/include/linux/mm.h vmalloc/include/linux/mm.h
--- linux/include/linux/mm.h    Tue Dec 21 00:01:14 1999
+++ vmalloc/include/linux/mm.h  Wed Dec 22 09:05:53 1999
@@ -244,8 +244,14 @@
 * The following discussion applies only to them.
 *
 * A page may belong to an inode's memory mapping. In this case,
- * page->inode is the pointer to the inode, and page->offset is the
- * file offset of the page (not necessarily a multiple of PAGE_SIZE).
+ * page->inode is the pointer to the inode, and page->index is the
+ * file offset of the page in PAGE_CACHE_SIZE (not PAGE_SIZE!) units.
+ * Although currently (2.3.34) PAGE_SIZE == PAGE_CACHE_SIZE, i.e. there
+ * happens to be one page per page cache entry and MM code can't hanlde
+ * anything else, this may well change. The link to the old page->offset
+ * is given by:
+ *
+ *         page->index == (page->offset >> PAGE_CACHE_SHIFT);
 *
 * A page may have buffers allocated to it. In this case,
 * page->buffers is a circular list of these buffer heads. Else,
diff -urN -X dontdiff linux/include/linux/vmalloc.h vmalloc/include/linux/vmalloc.h
--- linux/include/linux/vmalloc.h       Tue Dec 21 00:01:18 1999
+++ vmalloc/include/linux/vmalloc.h     Wed Dec 22 09:05:56 1999
@@ -3,6 +3,7 @@

#include <linux/sched.h>
#include <linux/mm.h>
+#include <linux/spinlock.h>

#include <asm/pgtable.h>

@@ -24,6 +25,10 @@
void vmfree_area_pages(unsigned long address, unsigned long size);
int vmalloc_area_pages(unsigned long address, unsigned long size);

+/* vmlist_lock is a read-write spinlock that protects vmlist
+ * Currently (2.3.34) used in mm/vmalloc.c and fs/proc/kcore.c.
+ */
+extern rwlock_t vmlist_lock;
extern struct vm_struct * vmlist;
#endif

diff -urN -X dontdiff linux/ipc/util.c vmalloc/ipc/util.c
--- linux/ipc/util.c    Tue Dec 14 18:01:21 1999
+++ vmalloc/ipc/util.c  Wed Dec 22 11:03:35 1999
@@ -161,25 +161,19 @@
void* ipc_alloc(int size)
{
       void* out;
-       if(size > PAGE_SIZE) {
-               lock_kernel();
+       if(size > PAGE_SIZE)
               out = vmalloc(size);
-               unlock_kernel();
-       } else {
+       else
               out = kmalloc(size, GFP_KERNEL);
-       }
       return out;
}

void ipc_free(void* ptr, int size)
{
-       if(size > PAGE_SIZE) {
-               lock_kernel();
+       if(size > PAGE_SIZE)
               vfree(ptr);
-               unlock_kernel();
-       } else {
+       else
               kfree(ptr);
-       }
}

/*
diff -urN -X dontdiff linux/mm/vmalloc.c vmalloc/mm/vmalloc.c
--- linux/mm/vmalloc.c  Sat Nov 20 18:09:05 1999
+++ vmalloc/mm/vmalloc.c        Wed Dec 22 09:03:36 1999
@@ -3,14 +3,17 @@
 *
 *  Copyright (C) 1993  Linus Torvalds
 *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
+ *  SMP-threaded vmalloc/vfree/ioremap, Tigran Aivazian, Dec 1999
 */

#include <linux/malloc.h>
#include <linux/vmalloc.h>
+#include <linux/spinlock.h>

#include <asm/uaccess.h>
#include <asm/pgalloc.h>

+rwlock_t vmlist_lock = RW_LOCK_UNLOCKED;
struct vm_struct * vmlist = NULL;

static inline void free_area_pte(pmd_t * pmd, unsigned long address, unsigned long size)
@@ -162,11 +165,13 @@
       if (!area)
               return NULL;
       addr = VMALLOC_START;
+       write_lock(&vmlist_lock);
       for (p = &vmlist; (tmp = *p) ; p = &tmp->next) {
               if (size + addr < (unsigned long) tmp->addr)
                       break;
               addr = tmp->size + (unsigned long) tmp->addr;
               if (addr > VMALLOC_END-size) {
+                       write_unlock(&vmlist_lock);
                       kfree(area);
                       return NULL;
               }
@@ -176,6 +181,7 @@
       area->size = size + PAGE_SIZE;
       area->next = *p;
       *p = area;
+       write_unlock(&vmlist_lock);
       return area;
}

@@ -189,14 +195,17 @@
               printk(KERN_ERR "Trying to vfree() bad address (%p)\n", addr);
               return;
       }
+       write_lock(&vmlist_lock);
       for (p = &vmlist ; (tmp = *p) ; p = &tmp->next) {
               if (tmp->addr == addr) {
                       *p = tmp->next;
                       vmfree_area_pages(VMALLOC_VMADDR(tmp->addr), tmp->size);
+                       write_unlock(&vmlist_lock);
                       kfree(tmp);
                       return;
               }
       }
+       write_unlock(&vmlist_lock);
       printk(KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n", addr);
}

@@ -224,16 +233,17 @@
       return addr;
}

-long vread(char *buf, char *addr, unsigned long count)
+long vread(char *kbuf, char *addr, unsigned long count)
{
       struct vm_struct *tmp;
-       char *vaddr, *buf_start = buf;
+       char *vaddr, *buf_start = kbuf;
       unsigned long n;

       /* Don't allow overflow */
       if ((unsigned long) addr + count < count)
               count = -(unsigned long) addr;

+       read_lock(&vmlist_lock);
       for (tmp = vmlist; tmp; tmp = tmp->next) {
               vaddr = (char *) tmp->addr;
               if (addr >= vaddr + tmp->size - PAGE_SIZE)
@@ -241,8 +251,8 @@
               while (addr < vaddr) {
                       if (count == 0)
                               goto finished;
-                       put_user('\0', buf);
-                       buf++;
+                       *kbuf = '\0';
+                       kbuf++;
                       addr++;
                       count--;
               }
@@ -250,12 +260,13 @@
               do {
                       if (count == 0)
                               goto finished;
-                       put_user(*addr, buf);
-                       buf++;
+                       *kbuf = *addr;
+                       kbuf++;
                       addr++;
                       count--;
               } while (--n > 0);
       }
finished:
-       return buf - buf_start;
+       read_unlock(&vmlist_lock);
+       return kbuf - buf_start;
}