- virtr = vread(buf, (char *)p, count);
- if (virtr < 0)
- return virtr;
+ kbuf = (char *)get_free_page(GFP_KERNEL);
+ if (!kbuf)
+ return -ENOMEM;
+ while (count > 0) {
+ int len = count;
+
+ if (len > PAGE_SIZE)
+ len = PAGE_SIZE;
+
+ len = vread(kbuf, (char *)p, len);
+ if (len < 0) {
+ free_page((unsigned long)kbuf);
+ return len;
+ }
+ if (copy_to_user(buf, kbuf, len)) {
+ free_page((unsigned long)kbuf);
+ return -EFAULT;
+ }
+ count -= len;
+ buf += len;
+ virtr += len;
+ p += len;
+ }
+ free_page((unsigned long)kbuf);
*ppos += p + virtr;
return virtr + read;
}
diff -urN -X dontdiff linux/fs/proc/kcore.c vmalloc/fs/proc/kcore.c
--- linux/fs/proc/kcore.c Wed Dec 22 08:54:59 1999
+++ vmalloc/fs/proc/kcore.c Wed Dec 22 09:02:18 1999
@@ -325,13 +325,12 @@
size_t elf_buflen;
int num_vma;
- /* XXX we need to somehow lock vmlist between here
- * and after elf_kcore_store_hdr() returns.
- * For now assume that num_vma does not change (TA)
- */
+ read_lock(&vmlist_lock);
proc_root_kcore.size = size = get_kcore_size(&num_vma, &elf_buflen);
- if (buflen == 0 || *fpos >= size)
+ if (buflen == 0 || *fpos >= size) {
+ read_unlock(&vmlist_lock);
return 0;
+ }
/* trim buflen to not go beyond EOF */
if (buflen > size - *fpos)
@@ -345,10 +344,13 @@
if (buflen < tsz)
tsz = buflen;
elf_buf = kmalloc(elf_buflen, GFP_ATOMIC);
- if (!elf_buf)
+ if (!elf_buf) {
+ read_unlock(&vmlist_lock);
return -ENOMEM;
+ }
memset(elf_buf, 0, elf_buflen);
elf_kcore_store_hdr(elf_buf, num_vma, elf_buflen);
+ read_unlock(&vmlist_lock);
if (copy_to_user(buffer, elf_buf + *fpos, tsz)) {
kfree(elf_buf);
return -EFAULT;
@@ -362,7 +364,8 @@
/* leave now if filled buffer already */
if (buflen == 0)
return acc;
- }
+ } else
+ read_unlock(&vmlist_lock);
/* where page 0 not mapped, write zeros into buffer */
#if defined (__i386__) || defined (__mc68000__)
diff -urN -X dontdiff linux/include/linux/vmalloc.h vmalloc/include/linux/vmalloc.h
--- linux/include/linux/vmalloc.h Tue Dec 21 00:01:18 1999
+++ vmalloc/include/linux/vmalloc.h Wed Dec 22 09:05:56 1999
@@ -3,6 +3,7 @@
@@ -24,6 +25,10 @@
void vmfree_area_pages(unsigned long address, unsigned long size);
int vmalloc_area_pages(unsigned long address, unsigned long size);
+/* vmlist_lock is a read-write spinlock that protects vmlist
+ * Currently (2.3.34) used in mm/vmalloc.c and fs/proc/kcore.c.
+ */
+extern rwlock_t vmlist_lock;
extern struct vm_struct * vmlist;
#endif