file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping)
{
ra->ra_pages = mapping->backing_dev_info->ra_pages;
- ra->prev_index = -1;
+ ra->prev_pos = -1;
}
EXPORT_SYMBOL_GPL(file_ra_state_init);
* indicator. The flag won't be set on already cached pages, to avoid the
* readahead-for-nothing fuss, saving pointless page cache lookups.
*
- * prev_index tracks the last visited page in the _previous_ read request.
+ * prev_pos tracks the last visited byte in the _previous_ read request.
* It should be maintained by the caller, and will be used for detecting
* small random reads. Note that the readahead algorithm checks loosely
* for sequential patterns. Hence interleaved reads might be served as
bool hit_readahead_marker, pgoff_t offset,
unsigned long req_size)
{
- int max; /* max readahead pages */
- int sequential;
-
- max = ra->ra_pages;
- sequential = (offset - ra->prev_index <= 1UL) || (req_size > max);
+ int max = ra->ra_pages; /* max readahead pages */
+ pgoff_t prev_offset;
+ int sequential;
/*
* It's the expected callback offset, assume sequential access.
goto readit;
}
+ prev_offset = ra->prev_pos >> PAGE_CACHE_SHIFT;
+ sequential = offset - prev_offset <= 1UL || req_size > max;
+
/*
* Standalone, small read.
* Read as is, and do not pollute the readahead state.