patch-2.0.34 linux/mm/mmap.c
Next file: linux/mm/mremap.c
Previous file: linux/mm/filemap.c
Back to the patch index
Back to the overall index
- Lines: 56
- Date:
Wed Jun 3 15:17:50 1998
- Orig file:
v2.0.33/linux/mm/mmap.c
- Orig date:
Tue Dec 2 13:52:33 1997
diff -u --recursive --new-file v2.0.33/linux/mm/mmap.c linux/mm/mmap.c
@@ -45,7 +45,7 @@
* Check that a process has enough memory to allocate a
* new virtual mapping.
*/
-static inline int vm_enough_memory(long pages)
+int vm_enough_memory(long pages)
{
/*
* stupid algorithm to decide if we have enough memory: while
@@ -147,7 +147,7 @@
if ((len = PAGE_ALIGN(len)) == 0)
return addr;
- if (len > TASK_SIZE || addr > TASK_SIZE-len)
+ if (len > MAX_USER_ADDR || addr > MAX_USER_ADDR-len)
return -EINVAL;
/* offset overflow? */
@@ -178,6 +178,9 @@
*/
if (locks_verify_locked(file->f_inode))
return -EAGAIN;
+ /* cevans -- whoops another append-only file flaw */
+ if (IS_APPEND(file->f_inode) && (prot & PROT_WRITE))
+ return -EACCES;
/* fall through */
case MAP_PRIVATE:
if (!(file->f_mode & 1))
@@ -306,15 +309,15 @@
{
struct vm_area_struct * vmm;
- if (len > TASK_SIZE)
+ if (len > MAX_USER_ADDR)
return 0;
if (!addr)
- addr = TASK_SIZE / 3;
+ addr = MMAP_SEARCH_START;
addr = PAGE_ALIGN(addr);
for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) {
/* At this point: (!vmm || addr < vmm->vm_end). */
- if (TASK_SIZE - len < addr)
+ if (MAX_USER_ADDR - len < addr)
return 0;
if (!vmm || addr + len <= vmm->vm_start)
return addr;
@@ -794,7 +797,7 @@
{
struct vm_area_struct *mpnt, *prev, *next, **npp, *free;
- if ((addr & ~PAGE_MASK) || addr > TASK_SIZE || len > TASK_SIZE-addr)
+ if ((addr & ~PAGE_MASK) || addr > MAX_USER_ADDR || len > MAX_USER_ADDR-addr)
return -EINVAL;
if ((len = PAGE_ALIGN(len)) == 0)
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen, slshen@lbl.gov