android_kernel_motorola_sm6225/fs/gfs2/ops_vm.c
Nick Piggin d00806b183 mm: fix fault vs invalidate race for linear mappings
Fix the race between invalidate_inode_pages and do_no_page.

Andrea Arcangeli identified a subtle race between invalidation of pages from
pagecache with userspace mappings, and do_no_page.

The issue is that invalidation has to shoot down all mappings to the page,
before it can be discarded from the pagecache.  Between shooting down ptes to
a particular page, and actually dropping the struct page from the pagecache,
do_no_page from any process might fault on that page and establish a new
mapping to the page just before it gets discarded from the pagecache.

The most common case where such invalidation is used is in file truncation.
This case was catered for by doing a sort of open-coded seqlock between the
file's i_size, and its truncate_count.

Truncation will decrease i_size, then increment truncate_count before
unmapping userspace pages; do_no_page will read truncate_count, then find the
page if it is within i_size, and then check truncate_count under the page
table lock and back out and retry if it had subsequently been changed (ptl
will serialise against unmapping, and ensure a potentially updated
truncate_count is actually visible).

Complexity and documentation issues aside, the locking protocol fails in the
case where we would like to invalidate pagecache inside i_size.  do_no_page
can come in anytime and filemap_nopage is not aware of the invalidation in
progress (as it is when it is outside i_size).  The end result is that
dangling (->mapping == NULL) pages that appear to be from a particular file
may be mapped into userspace with nonsense data.  Valid mappings to the same
place will see a different page.

Andrea implemented two working fixes, one using a real seqlock, another using
a page->flags bit.  He also proposed using the page lock in do_no_page, but
that was initially considered too heavyweight.  However, it is not a global or
per-file lock, and the page cacheline is modified in do_no_page to increment
_count and _mapcount anyway, so a further modification should not be a large
performance hit.  Scalability is not an issue.

This patch implements this latter approach.  ->nopage implementations return
with the page locked if it is possible for their underlying file to be
invalidated (in that case, they must set a special vm_flags bit to indicate
so).  do_no_page only unlocks the page after setting up the mapping
completely.  invalidation is excluded because it holds the page lock during
invalidation of each page (and ensures that the page is not mapped while
holding the lock).

This also allows significant simplifications in do_no_page, because we have
the page locked in the right place in the pagecache from the start.

Signed-off-by: Nick Piggin <npiggin@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-07-19 10:04:41 -07:00

163 lines
3.8 KiB
C

/*
* Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
* Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
*
* This copyrighted material is made available to anyone wishing to use,
* modify, copy, or redistribute it subject to the terms and conditions
* of the GNU General Public License version 2.
*/
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/completion.h>
#include <linux/buffer_head.h>
#include <linux/mm.h>
#include <linux/pagemap.h>
#include <linux/gfs2_ondisk.h>
#include <linux/lm_interface.h>
#include "gfs2.h"
#include "incore.h"
#include "bmap.h"
#include "glock.h"
#include "inode.h"
#include "ops_vm.h"
#include "quota.h"
#include "rgrp.h"
#include "trans.h"
#include "util.h"
static struct page *gfs2_private_nopage(struct vm_area_struct *area,
unsigned long address, int *type)
{
struct gfs2_inode *ip = GFS2_I(area->vm_file->f_mapping->host);
set_bit(GIF_PAGED, &ip->i_flags);
return filemap_nopage(area, address, type);
}
static int alloc_page_backing(struct gfs2_inode *ip, struct page *page)
{
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
unsigned long index = page->index;
u64 lblock = index << (PAGE_CACHE_SHIFT -
sdp->sd_sb.sb_bsize_shift);
unsigned int blocks = PAGE_CACHE_SIZE >> sdp->sd_sb.sb_bsize_shift;
struct gfs2_alloc *al;
unsigned int data_blocks, ind_blocks;
unsigned int x;
int error;
al = gfs2_alloc_get(ip);
error = gfs2_quota_lock(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
if (error)
goto out;
error = gfs2_quota_check(ip, ip->i_inode.i_uid, ip->i_inode.i_gid);
if (error)
goto out_gunlock_q;
gfs2_write_calc_reserv(ip, PAGE_CACHE_SIZE, &data_blocks, &ind_blocks);
al->al_requested = data_blocks + ind_blocks;
error = gfs2_inplace_reserve(ip);
if (error)
goto out_gunlock_q;
error = gfs2_trans_begin(sdp, al->al_rgd->rd_length +
ind_blocks + RES_DINODE +
RES_STATFS + RES_QUOTA, 0);
if (error)
goto out_ipres;
if (gfs2_is_stuffed(ip)) {
error = gfs2_unstuff_dinode(ip, NULL);
if (error)
goto out_trans;
}
for (x = 0; x < blocks; ) {
u64 dblock;
unsigned int extlen;
int new = 1;
error = gfs2_extent_map(&ip->i_inode, lblock, &new, &dblock, &extlen);
if (error)
goto out_trans;
lblock += extlen;
x += extlen;
}
gfs2_assert_warn(sdp, al->al_alloced);
out_trans:
gfs2_trans_end(sdp);
out_ipres:
gfs2_inplace_release(ip);
out_gunlock_q:
gfs2_quota_unlock(ip);
out:
gfs2_alloc_put(ip);
return error;
}
static struct page *gfs2_sharewrite_nopage(struct vm_area_struct *area,
unsigned long address, int *type)
{
struct file *file = area->vm_file;
struct gfs2_file *gf = file->private_data;
struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
struct gfs2_holder i_gh;
struct page *result = NULL;
unsigned long index = ((address - area->vm_start) >> PAGE_CACHE_SHIFT) +
area->vm_pgoff;
int alloc_required;
int error;
error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
if (error)
return NULL;
set_bit(GIF_PAGED, &ip->i_flags);
set_bit(GIF_SW_PAGED, &ip->i_flags);
error = gfs2_write_alloc_required(ip, (u64)index << PAGE_CACHE_SHIFT,
PAGE_CACHE_SIZE, &alloc_required);
if (error)
goto out;
set_bit(GFF_EXLOCK, &gf->f_flags);
result = filemap_nopage(area, address, type);
clear_bit(GFF_EXLOCK, &gf->f_flags);
if (!result || result == NOPAGE_OOM)
goto out;
if (alloc_required) {
error = alloc_page_backing(ip, result);
if (error) {
if (area->vm_flags & VM_CAN_INVALIDATE)
unlock_page(result);
page_cache_release(result);
result = NULL;
goto out;
}
set_page_dirty(result);
}
out:
gfs2_glock_dq_uninit(&i_gh);
return result;
}
struct vm_operations_struct gfs2_vm_ops_private = {
.nopage = gfs2_private_nopage,
};
struct vm_operations_struct gfs2_vm_ops_sharewrite = {
.nopage = gfs2_sharewrite_nopage,
};