patch-2.3.99-pre4 linux/mm/filemap.c

Next file: linux/mm/memory.c
Previous file: linux/mm/bootmem.c
Back to the patch index
Back to the overall index

diff -u --recursive --new-file v2.3.99-pre3/linux/mm/filemap.c linux/mm/filemap.c
@@ -250,6 +250,11 @@
 		count--;
 
 		dispose = &young;
+
+		/* avoid unscalable SMP locking */
+		if (!page->buffers && page_count(page) > 1)
+			goto dispose_continue;
+
 		if (TryLockPage(page))
 			goto dispose_continue;
 
@@ -260,22 +265,11 @@
 		   page locked down ;). */
 		spin_unlock(&pagemap_lru_lock);
 
-		/* avoid unscalable SMP locking */
-		if (!page->buffers && page_count(page) > 1)
-			goto unlock_noput_continue;
-
-		/* Take the pagecache_lock spinlock held to avoid
-		   other tasks to notice the page while we are looking at its
-		   page count. If it's a pagecache-page we'll free it
-		   in one atomic transaction after checking its page count. */
-		spin_lock(&pagecache_lock);
-
 		/* avoid freeing the page while it's locked */
 		get_page(page);
 
 		/* Is it a buffer page? */
 		if (page->buffers) {
-			spin_unlock(&pagecache_lock);
 			if (!try_to_free_buffers(page))
 				goto unlock_continue;
 			/* page was locked, inode can't go away under us */
@@ -283,9 +277,14 @@
 				atomic_dec(&buffermem_pages);
 				goto made_buffer_progress;
 			}
-			spin_lock(&pagecache_lock);
 		}
 
+		/* Take the pagecache_lock spinlock held to avoid
+		   other tasks to notice the page while we are looking at its
+		   page count. If it's a pagecache-page we'll free it
+		   in one atomic transaction after checking its page count. */
+		spin_lock(&pagecache_lock);
+
 		/*
 		 * We can't free pages unless there's just one user
 		 * (count == 2 because we added one ourselves above).
@@ -294,12 +293,6 @@
 			goto cache_unlock_continue;
 
 		/*
-		 * We did the page aging part.
-		 */
-		if (nr_lru_pages < freepages.min * priority)
-			goto cache_unlock_continue;
-
-		/*
 		 * Is it a page swap page? If so, we want to
 		 * drop it if it is no longer used, even if it
 		 * were to be marked referenced..
@@ -312,8 +305,7 @@
 
 		/* is it a page-cache page? */
 		if (page->mapping) {
-			if (!pgcache_under_min())
-			{
+			if (!PageDirty(page) && !pgcache_under_min()) {
 				remove_page_from_inode_queue(page);
 				remove_page_from_hash_queue(page);
 				page->mapping = NULL;
@@ -329,21 +321,12 @@
 cache_unlock_continue:
 		spin_unlock(&pagecache_lock);
 unlock_continue:
+		spin_lock(&pagemap_lru_lock);
 		UnlockPage(page);
 		put_page(page);
-dispose_relock_continue:
-		/* even if the dispose list is local, a truncate_inode_page()
-		   may remove a page from its queue so always
-		   synchronize with the lru lock while accesing the
-		   page->lru field */
-		spin_lock(&pagemap_lru_lock);
 		list_add(page_lru, dispose);
 		continue;
 
-unlock_noput_continue:
-		UnlockPage(page);
-		goto dispose_relock_continue;
-
 dispose_continue:
 		list_add(page_lru, dispose);
 	}
@@ -484,7 +467,7 @@
 	struct page *alias;
 	unsigned long flags;
 
-	flags = page->flags & ~((1 << PG_uptodate) | (1 << PG_error));
+	flags = page->flags & ~((1 << PG_uptodate) | (1 << PG_error) | (1 << PG_dirty));
 	page->flags = flags | (1 << PG_locked) | (1 << PG_referenced);
 	get_page(page);
 	page->index = offset;
@@ -1724,10 +1707,8 @@
 		error = vma->vm_ops->sync(vma, start, end-start, flags);
 		if (!error && (flags & MS_SYNC)) {
 			struct file * file = vma->vm_file;
-			if (file) {
-				struct dentry * dentry = file->f_dentry;
-				error = file_fsync(file, dentry);
-			}
+			if (file)
+				error = file_fsync(file, file->f_dentry);
 		}
 		return error;
 	}
@@ -2237,9 +2218,9 @@
 
 	down(&current->mm->mmap_sem);
 
-	if (start & ~PAGE_MASK)
+	if (start & ~PAGE_CACHE_MASK)
 		goto out;
-	len = (len + ~PAGE_MASK) & PAGE_MASK;
+	len = (len + ~PAGE_CACHE_MASK) & PAGE_CACHE_MASK;
 	end = start + len;
 	if (end < start)
 		goto out;
@@ -2371,8 +2352,7 @@
 }
 
 /*
- * Write to a file through the page cache. This is mainly for the
- * benefit of NFS and possibly other network-based file systems.
+ * Write to a file through the page cache. 
  *
  * We currently put everything into the page cache prior to writing it.
  * This is not a problem when writing full pages. With partial pages,
@@ -2389,8 +2369,7 @@
 ssize_t
 generic_file_write(struct file *file,const char *buf,size_t count,loff_t *ppos)
 {
-	struct dentry	*dentry = file->f_dentry; 
-	struct inode	*inode = dentry->d_inode; 
+	struct inode	*inode = file->f_dentry->d_inode; 
 	struct address_space *mapping = inode->i_mapping;
 	unsigned long	limit = current->rlim[RLIMIT_FSIZE].rlim_cur;
 	loff_t		pos;

FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)