Commit ea1754a0 authored by Kirill A. Shutemov's avatar Kirill A. Shutemov Committed by Linus Torvalds
Browse files

mm, fs: remove remaining PAGE_CACHE_* and page_cache_{get,release} usage


Mostly direct substitution with occasional adjustment or removing
outdated comments.
Signed-off-by: default avatarKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: default avatarMichal Hocko <mhocko@suse.com>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 09cbfeaf
Showing with 32 additions and 36 deletions
+32 -36
......@@ -38,7 +38,7 @@ the update lasts only as long as the inode is cached in memory, after
which the timestamp reverts to 1970, i.e. moves backwards in time.
Currently, cramfs must be written and read with architectures of the
same endianness, and can be read only by kernels with PAGE_CACHE_SIZE
same endianness, and can be read only by kernels with PAGE_SIZE
== 4096. At least the latter of these is a bug, but it hasn't been
decided what the best fix is. For the moment if you have larger pages
you can just change the #define in mkcramfs.c, so long as you don't
......
......@@ -60,7 +60,7 @@ size: The limit of allocated bytes for this tmpfs instance. The
default is half of your physical RAM without swap. If you
oversize your tmpfs instances the machine will deadlock
since the OOM handler will not be able to free that memory.
nr_blocks: The same as size, but in blocks of PAGE_CACHE_SIZE.
nr_blocks: The same as size, but in blocks of PAGE_SIZE.
nr_inodes: The maximum number of inodes for this instance. The default
is half of the number of your physical RAM pages, or (on a
machine with highmem) the number of lowmem RAM pages,
......
......@@ -708,9 +708,9 @@ struct address_space_operations {
from the address space. This generally corresponds to either a
truncation, punch hole or a complete invalidation of the address
space (in the latter case 'offset' will always be 0 and 'length'
will be PAGE_CACHE_SIZE). Any private data associated with the page
will be PAGE_SIZE). Any private data associated with the page
should be updated to reflect this truncation. If offset is 0 and
length is PAGE_CACHE_SIZE, then the private data should be released,
length is PAGE_SIZE, then the private data should be released,
because the page must be able to be completely discarded. This may
be done by calling the ->releasepage function, but in this case the
release MUST succeed.
......
......@@ -22,7 +22,7 @@
#include <linux/swap.h>
#include <linux/unistd.h>
#include <linux/nodemask.h> /* for node_online_map */
#include <linux/pagemap.h> /* for release_pages and page_cache_release */
#include <linux/pagemap.h> /* for release_pages */
#include <linux/compat.h>
#include <asm/pgalloc.h>
......
......@@ -1615,8 +1615,8 @@ static void bio_release_pages(struct bio *bio)
* the BIO and the offending pages and re-dirty the pages in process context.
*
* It is expected that bio_check_pages_dirty() will wholly own the BIO from
* here on. It will run one page_cache_release() against each page and will
* run one bio_put() against the BIO.
* here on. It will run one put_page() against each page and will run one
* bio_put() against the BIO.
*/
static void bio_dirty_fn(struct work_struct *work);
......
......@@ -1327,8 +1327,8 @@ struct bm_extent {
#endif
#endif
/* BIO_MAX_SIZE is 256 * PAGE_CACHE_SIZE,
* so for typical PAGE_CACHE_SIZE of 4k, that is (1<<20) Byte.
/* BIO_MAX_SIZE is 256 * PAGE_SIZE,
* so for typical PAGE_SIZE of 4k, that is (1<<20) Byte.
* Since we may live in a mixed-platform cluster,
* we limit us to a platform agnostic constant here for now.
* A followup commit may allow even bigger BIO sizes,
......
......@@ -514,7 +514,7 @@ typedef struct {
/**
* Starting offset of the fragment within the page. Note that the
* end of the fragment must not pass the end of the page; i.e.,
* kiov_len + kiov_offset <= PAGE_CACHE_SIZE.
* kiov_len + kiov_offset <= PAGE_SIZE.
*/
unsigned int kiov_offset;
} lnet_kiov_t;
......
......@@ -390,8 +390,8 @@ typedef struct sfw_test_instance {
} tsi_u;
} sfw_test_instance_t;
/* XXX: trailing (PAGE_CACHE_SIZE % sizeof(lnet_process_id_t)) bytes at
* the end of pages are not used */
/* XXX: trailing (PAGE_SIZE % sizeof(lnet_process_id_t)) bytes at the end of
* pages are not used */
#define SFW_MAX_CONCUR LST_MAX_CONCUR
#define SFW_ID_PER_PAGE (PAGE_SIZE / sizeof(lnet_process_id_packed_t))
#define SFW_MAX_NDESTS (LNET_MAX_IOV * SFW_ID_PER_PAGE)
......
......@@ -1118,7 +1118,7 @@ struct lu_context_key {
{ \
type *value; \
\
CLASSERT(PAGE_CACHE_SIZE >= sizeof (*value)); \
CLASSERT(PAGE_SIZE >= sizeof (*value)); \
\
value = kzalloc(sizeof(*value), GFP_NOFS); \
if (!value) \
......
......@@ -1022,10 +1022,10 @@ static inline int lu_dirent_size(struct lu_dirent *ent)
* MDS_READPAGE page size
*
* This is the directory page size packed in MDS_READPAGE RPC.
* It's different than PAGE_CACHE_SIZE because the client needs to
* It's different than PAGE_SIZE because the client needs to
* access the struct lu_dirpage header packed at the beginning of
* the "page" and without this there isn't any way to know find the
* lu_dirpage header is if client and server PAGE_CACHE_SIZE differ.
* lu_dirpage header is if client and server PAGE_SIZE differ.
*/
#define LU_PAGE_SHIFT 12
#define LU_PAGE_SIZE (1UL << LU_PAGE_SHIFT)
......
......@@ -112,8 +112,8 @@
# if ((PTLRPC_MAX_BRW_PAGES & (PTLRPC_MAX_BRW_PAGES - 1)) != 0)
# error "PTLRPC_MAX_BRW_PAGES isn't a power of two"
# endif
# if (PTLRPC_MAX_BRW_SIZE != (PTLRPC_MAX_BRW_PAGES * PAGE_CACHE_SIZE))
# error "PTLRPC_MAX_BRW_SIZE isn't PTLRPC_MAX_BRW_PAGES * PAGE_CACHE_SIZE"
# if (PTLRPC_MAX_BRW_SIZE != (PTLRPC_MAX_BRW_PAGES * PAGE_SIZE))
# error "PTLRPC_MAX_BRW_SIZE isn't PTLRPC_MAX_BRW_PAGES * PAGE_SIZE"
# endif
# if (PTLRPC_MAX_BRW_SIZE > LNET_MTU * PTLRPC_BULK_OPS_COUNT)
# error "PTLRPC_MAX_BRW_SIZE too big"
......
......@@ -272,7 +272,7 @@ struct client_obd {
int cl_grant_shrink_interval; /* seconds */
/* A chunk is an optimal size used by osc_extent to determine
* the extent size. A chunk is max(PAGE_CACHE_SIZE, OST block size)
* the extent size. A chunk is max(PAGE_SIZE, OST block size)
*/
int cl_chunkbits;
int cl_chunk;
......
......@@ -134,9 +134,8 @@
* a header lu_dirpage which describes the start/end hash, and whether this
* page is empty (contains no dir entry) or hash collide with next page.
* After client receives reply, several pages will be integrated into dir page
* in PAGE_CACHE_SIZE (if PAGE_CACHE_SIZE greater than LU_PAGE_SIZE), and the
* lu_dirpage for this integrated page will be adjusted. See
* lmv_adjust_dirpages().
* in PAGE_SIZE (if PAGE_SIZE greater than LU_PAGE_SIZE), and the lu_dirpage
* for this integrated page will be adjusted. See lmv_adjust_dirpages().
*
*/
......
......@@ -521,7 +521,7 @@ static int ll_read_ahead_page(const struct lu_env *env, struct cl_io *io,
* striped over, rather than having a constant value for all files here.
*/
/* RAS_INCREASE_STEP should be (1UL << (inode->i_blkbits - PAGE_CACHE_SHIFT)).
/* RAS_INCREASE_STEP should be (1UL << (inode->i_blkbits - PAGE_SHIFT)).
* Temporarily set RAS_INCREASE_STEP to 1MB. After 4MB RPC is enabled
* by default, this should be adjusted corresponding with max_read_ahead_mb
* and max_read_ahead_per_file_mb otherwise the readahead budget can be used
......
......@@ -512,7 +512,7 @@ static int vvp_io_read_start(const struct lu_env *env,
vio->cui_ra_window_set = 1;
bead->lrr_start = cl_index(obj, pos);
/*
* XXX: explicit PAGE_CACHE_SIZE
* XXX: explicit PAGE_SIZE
*/
bead->lrr_count = cl_index(obj, tot + PAGE_SIZE - 1);
ll_ra_read_in(file, bead);
......
......@@ -2017,7 +2017,7 @@ static int lmv_sync(struct obd_export *exp, const struct lu_fid *fid,
* |s|e|f|p|ent| 0 | ... | 0 |
* '----------------- -----'
*
* However, on hosts where the native VM page size (PAGE_CACHE_SIZE) is
* However, on hosts where the native VM page size (PAGE_SIZE) is
* larger than LU_PAGE_SIZE, a single host page may contain multiple
* lu_dirpages. After reading the lu_dirpages from the MDS, the
* ldp_hash_end of the first lu_dirpage refers to the one immediately
......@@ -2048,7 +2048,7 @@ static int lmv_sync(struct obd_export *exp, const struct lu_fid *fid,
* - Adjust the lde_reclen of the ending entry of each lu_dirpage to span
* to the first entry of the next lu_dirpage.
*/
#if PAGE_CACHE_SIZE > LU_PAGE_SIZE
#if PAGE_SIZE > LU_PAGE_SIZE
static void lmv_adjust_dirpages(struct page **pages, int ncfspgs, int nlupgs)
{
int i;
......@@ -2101,7 +2101,7 @@ static void lmv_adjust_dirpages(struct page **pages, int ncfspgs, int nlupgs)
}
#else
#define lmv_adjust_dirpages(pages, ncfspgs, nlupgs) do {} while (0)
#endif /* PAGE_CACHE_SIZE > LU_PAGE_SIZE */
#endif /* PAGE_SIZE > LU_PAGE_SIZE */
static int lmv_readpage(struct obd_export *exp, struct md_op_data *op_data,
struct page **pages, struct ptlrpc_request **request)
......@@ -2110,7 +2110,7 @@ static int lmv_readpage(struct obd_export *exp, struct md_op_data *op_data,
struct lmv_obd *lmv = &obd->u.lmv;
__u64 offset = op_data->op_offset;
int rc;
int ncfspgs; /* pages read in PAGE_CACHE_SIZE */
int ncfspgs; /* pages read in PAGE_SIZE */
int nlupgs; /* pages read in LU_PAGE_SIZE */
struct lmv_tgt_desc *tgt;
......
......@@ -47,7 +47,6 @@
#include "../../include/lustre/lustre_idl.h"
#include <linux/fs.h>
#include <linux/pagemap.h> /* for PAGE_CACHE_SIZE */
void obdo_refresh_inode(struct inode *dst, struct obdo *src, u32 valid)
{
......
......@@ -1456,7 +1456,7 @@ static void osc_unreserve_grant(struct client_obd *cli,
* used, we should return these grants to OST. There're two cases where grants
* can be lost:
* 1. truncate;
* 2. blocksize at OST is less than PAGE_CACHE_SIZE and a partial page was
* 2. blocksize at OST is less than PAGE_SIZE and a partial page was
* written. In this case OST may use less chunks to serve this partial
* write. OSTs don't actually know the page size on the client side. so
* clients have to calculate lost grant by the blocksize on the OST.
......
......@@ -3039,13 +3039,13 @@ int btrfsic_mount(struct btrfs_root *root,
if (root->nodesize & ((u64)PAGE_SIZE - 1)) {
printk(KERN_INFO
"btrfsic: cannot handle nodesize %d not being a multiple of PAGE_CACHE_SIZE %ld!\n",
"btrfsic: cannot handle nodesize %d not being a multiple of PAGE_SIZE %ld!\n",
root->nodesize, PAGE_SIZE);
return -1;
}
if (root->sectorsize & ((u64)PAGE_SIZE - 1)) {
printk(KERN_INFO
"btrfsic: cannot handle sectorsize %d not being a multiple of PAGE_CACHE_SIZE %ld!\n",
"btrfsic: cannot handle sectorsize %d not being a multiple of PAGE_SIZE %ld!\n",
root->sectorsize, PAGE_SIZE);
return -1;
}
......
......@@ -3264,13 +3264,11 @@ static noinline_for_stack int writepage_delalloc(struct inode *inode,
goto done;
}
/*
* delalloc_end is already one less than the total
* length, so we don't subtract one from
* PAGE_CACHE_SIZE
* delalloc_end is already one less than the total length, so
* we don't subtract one from PAGE_SIZE
*/
delalloc_to_write += (delalloc_end - delalloc_start +
PAGE_SIZE) >>
PAGE_SHIFT;
PAGE_SIZE) >> PAGE_SHIFT;
delalloc_start = delalloc_end + 1;
}
if (wbc->nr_to_write < delalloc_to_write) {
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment