blob: 41a592886da77e33a26cd8a1eaa45ecdeb511d88 [file] [log] [blame]
Thomas Gleixner457c8992019-05-19 13:08:55 +01001// SPDX-License-Identifier: GPL-2.0-only
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * mm/readahead.c - address_space-level file readahead.
4 *
5 * Copyright (C) 2002, Linus Torvalds
6 *
Francois Camie1f8e872008-10-15 22:01:59 -07007 * 09Apr2002 Andrew Morton
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 * Initial version.
9 */
10
11#include <linux/kernel.h>
Ross Zwisler11bd9692016-08-25 15:17:17 -070012#include <linux/dax.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090013#include <linux/gfp.h>
Paul Gortmakerb95f1b312011-10-16 02:01:52 -040014#include <linux/export.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <linux/blkdev.h>
16#include <linux/backing-dev.h>
Andrew Morton8bde37f2006-12-10 02:19:40 -080017#include <linux/task_io_accounting_ops.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include <linux/pagevec.h>
Jens Axboef5ff8422007-09-21 09:19:54 +020019#include <linux/pagemap.h>
Cong Wang782182e2012-05-29 15:06:43 -070020#include <linux/syscalls.h>
21#include <linux/file.h>
Geliang Tangd72ee912016-01-14 15:22:01 -080022#include <linux/mm_inline.h>
Josef Bacikca47e8c2018-07-03 11:15:03 -040023#include <linux/blk-cgroup.h>
Amir Goldstein3d8f7612018-08-29 08:41:29 +030024#include <linux/fadvise.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070025
Fabian Frederick29f175d2014-04-07 15:37:55 -070026#include "internal.h"
27
Linus Torvalds1da177e2005-04-16 15:20:36 -070028/*
29 * Initialise a struct file's readahead state. Assumes that the caller has
30 * memset *ra to zero.
31 */
32void
33file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping)
34{
Christoph Hellwigde1414a2015-01-14 10:42:36 +010035 ra->ra_pages = inode_to_bdi(mapping->host)->ra_pages;
Fengguang Wuf4e6b492007-10-16 01:24:33 -070036 ra->prev_pos = -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -070037}
Steven Whitehoused41cc702006-01-30 08:53:33 +000038EXPORT_SYMBOL_GPL(file_ra_state_init);
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
David Howells03fb3d22009-04-03 16:42:35 +010040/*
41 * see if a page needs releasing upon read_cache_pages() failure
David Howells266cf652009-04-03 16:42:36 +010042 * - the caller of read_cache_pages() may have set PG_private or PG_fscache
43 * before calling, such as the NFS fs marking pages that are cached locally
44 * on disk, thus we need to give the fs a chance to clean up in the event of
45 * an error
David Howells03fb3d22009-04-03 16:42:35 +010046 */
47static void read_cache_pages_invalidate_page(struct address_space *mapping,
48 struct page *page)
49{
David Howells266cf652009-04-03 16:42:36 +010050 if (page_has_private(page)) {
David Howells03fb3d22009-04-03 16:42:35 +010051 if (!trylock_page(page))
52 BUG();
53 page->mapping = mapping;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +030054 do_invalidatepage(page, 0, PAGE_SIZE);
David Howells03fb3d22009-04-03 16:42:35 +010055 page->mapping = NULL;
56 unlock_page(page);
57 }
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +030058 put_page(page);
David Howells03fb3d22009-04-03 16:42:35 +010059}
60
61/*
62 * release a list of pages, invalidating them first if need be
63 */
64static void read_cache_pages_invalidate_pages(struct address_space *mapping,
65 struct list_head *pages)
66{
67 struct page *victim;
68
69 while (!list_empty(pages)) {
Geliang Tangc8ad6302016-01-14 15:20:51 -080070 victim = lru_to_page(pages);
David Howells03fb3d22009-04-03 16:42:35 +010071 list_del(&victim->lru);
72 read_cache_pages_invalidate_page(mapping, victim);
73 }
74}
75
Linus Torvalds1da177e2005-04-16 15:20:36 -070076/**
Randy Dunlapbd40cdd2006-06-25 05:48:08 -070077 * read_cache_pages - populate an address space with some pages & start reads against them
Linus Torvalds1da177e2005-04-16 15:20:36 -070078 * @mapping: the address_space
79 * @pages: The address of a list_head which contains the target pages. These
80 * pages have their ->index populated and are otherwise uninitialised.
81 * @filler: callback routine for filling a single page.
82 * @data: private data for the callback routine.
83 *
84 * Hides the details of the LRU cache etc from the filesystems.
Mike Rapoporta862f682019-03-05 15:48:42 -080085 *
86 * Returns: %0 on success, error return by @filler otherwise
Linus Torvalds1da177e2005-04-16 15:20:36 -070087 */
88int read_cache_pages(struct address_space *mapping, struct list_head *pages,
89 int (*filler)(void *, struct page *), void *data)
90{
91 struct page *page;
Linus Torvalds1da177e2005-04-16 15:20:36 -070092 int ret = 0;
93
Linus Torvalds1da177e2005-04-16 15:20:36 -070094 while (!list_empty(pages)) {
Geliang Tangc8ad6302016-01-14 15:20:51 -080095 page = lru_to_page(pages);
Linus Torvalds1da177e2005-04-16 15:20:36 -070096 list_del(&page->lru);
Michal Hocko063d99b2015-10-15 15:28:24 -070097 if (add_to_page_cache_lru(page, mapping, page->index,
Michal Hocko8a5c7432016-07-26 15:24:53 -070098 readahead_gfp_mask(mapping))) {
David Howells03fb3d22009-04-03 16:42:35 +010099 read_cache_pages_invalidate_page(mapping, page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100 continue;
101 }
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300102 put_page(page);
Nick Piggineb2be182007-10-16 01:24:57 -0700103
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104 ret = filler(data, page);
Nick Piggineb2be182007-10-16 01:24:57 -0700105 if (unlikely(ret)) {
David Howells03fb3d22009-04-03 16:42:35 +0100106 read_cache_pages_invalidate_pages(mapping, pages);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107 break;
108 }
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300109 task_io_account_read(PAGE_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111 return ret;
112}
113
114EXPORT_SYMBOL(read_cache_pages);
115
116static int read_pages(struct address_space *mapping, struct file *filp,
Michal Hocko8a5c7432016-07-26 15:24:53 -0700117 struct list_head *pages, unsigned int nr_pages, gfp_t gfp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118{
Jens Axboe5b417b12010-04-19 10:04:38 +0200119 struct blk_plug plug;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120 unsigned page_idx;
Zach Brown994fc28c2005-12-15 14:28:17 -0800121 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122
Jens Axboe5b417b12010-04-19 10:04:38 +0200123 blk_start_plug(&plug);
124
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125 if (mapping->a_ops->readpages) {
126 ret = mapping->a_ops->readpages(filp, mapping, pages, nr_pages);
OGAWA Hirofumi029e3322006-11-02 22:07:06 -0800127 /* Clean up the remaining pages */
128 put_pages_list(pages);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129 goto out;
130 }
131
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132 for (page_idx = 0; page_idx < nr_pages; page_idx++) {
Geliang Tangc8ad6302016-01-14 15:20:51 -0800133 struct page *page = lru_to_page(pages);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134 list_del(&page->lru);
Michal Hocko8a5c7432016-07-26 15:24:53 -0700135 if (!add_to_page_cache_lru(page, mapping, page->index, gfp))
Zach Brown9f1a3cf2006-06-25 05:46:46 -0700136 mapping->a_ops->readpage(filp, page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300137 put_page(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138 }
Zach Brown994fc28c2005-12-15 14:28:17 -0800139 ret = 0;
Jens Axboe5b417b12010-04-19 10:04:38 +0200140
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141out:
Jens Axboe5b417b12010-04-19 10:04:38 +0200142 blk_finish_plug(&plug);
143
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144 return ret;
145}
146
147/*
Christoph Hellwigb3751e62018-06-01 09:03:06 -0700148 * __do_page_cache_readahead() actually reads a chunk of disk. It allocates
149 * the pages first, then submits them for I/O. This avoids the very bad
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150 * behaviour which would occur if page allocations are causing VM writeback.
151 * We really don't want to intermingle reads and writes like that.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152 */
Matthew Wilcox (Oracle)9a428232020-06-01 21:46:10 -0700153void __do_page_cache_readahead(struct address_space *mapping,
Christoph Hellwigc534aa32018-06-01 09:03:05 -0700154 struct file *filp, pgoff_t offset, unsigned long nr_to_read,
155 unsigned long lookahead_size)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156{
157 struct inode *inode = mapping->host;
158 struct page *page;
159 unsigned long end_index; /* The last page we want to read */
160 LIST_HEAD(page_pool);
161 int page_idx;
Christoph Hellwigc534aa32018-06-01 09:03:05 -0700162 unsigned int nr_pages = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163 loff_t isize = i_size_read(inode);
Michal Hocko8a5c7432016-07-26 15:24:53 -0700164 gfp_t gfp_mask = readahead_gfp_mask(mapping);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165
166 if (isize == 0)
Matthew Wilcox (Oracle)9a428232020-06-01 21:46:10 -0700167 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300169 end_index = ((isize - 1) >> PAGE_SHIFT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170
171 /*
172 * Preallocate as many pages as we will need.
173 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174 for (page_idx = 0; page_idx < nr_to_read; page_idx++) {
Andrew Morton7361f4d2005-11-07 00:59:28 -0800175 pgoff_t page_offset = offset + page_idx;
Fengguang Wuc743d962007-07-19 01:48:04 -0700176
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177 if (page_offset > end_index)
178 break;
179
Matthew Wilcox560d4542017-12-04 04:30:18 -0500180 page = xa_load(&mapping->i_pages, page_offset);
Matthew Wilcox3159f942017-11-03 13:30:42 -0400181 if (page && !xa_is_value(page)) {
Christoph Hellwigb3751e62018-06-01 09:03:06 -0700182 /*
183 * Page already present? Kick off the current batch of
184 * contiguous pages before continuing with the next
185 * batch.
186 */
187 if (nr_pages)
188 read_pages(mapping, filp, &page_pool, nr_pages,
189 gfp_mask);
190 nr_pages = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191 continue;
Christoph Hellwigb3751e62018-06-01 09:03:06 -0700192 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193
Michal Hocko8a5c7432016-07-26 15:24:53 -0700194 page = __page_cache_alloc(gfp_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195 if (!page)
196 break;
197 page->index = page_offset;
198 list_add(&page->lru, &page_pool);
Fengguang Wu46fc3e72007-07-19 01:47:57 -0700199 if (page_idx == nr_to_read - lookahead_size)
200 SetPageReadahead(page);
Christoph Hellwig836978b2018-06-01 09:03:05 -0700201 nr_pages++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203
204 /*
205 * Now start the IO. We ignore I/O errors - if the page is not
206 * uptodate then the caller will launch readpage again, and
207 * will then handle the error.
208 */
Christoph Hellwig836978b2018-06-01 09:03:05 -0700209 if (nr_pages)
210 read_pages(mapping, filp, &page_pool, nr_pages, gfp_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211 BUG_ON(!list_empty(&page_pool));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212}
213
214/*
215 * Chunk the readahead into 2 megabyte units, so that we don't pin too much
216 * memory at once.
217 */
Matthew Wilcox (Oracle)9a428232020-06-01 21:46:10 -0700218void force_page_cache_readahead(struct address_space *mapping,
219 struct file *filp, pgoff_t offset, unsigned long nr_to_read)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220{
Jens Axboe9491ae42016-12-12 16:43:26 -0800221 struct backing_dev_info *bdi = inode_to_bdi(mapping->host);
222 struct file_ra_state *ra = &filp->f_ra;
223 unsigned long max_pages;
224
Linus Torvalds1da177e2005-04-16 15:20:36 -0700225 if (unlikely(!mapping->a_ops->readpage && !mapping->a_ops->readpages))
Matthew Wilcox (Oracle)9a428232020-06-01 21:46:10 -0700226 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227
Jens Axboe9491ae42016-12-12 16:43:26 -0800228 /*
229 * If the request exceeds the readahead window, allow the read to
230 * be up to the optimal hardware IO size
231 */
232 max_pages = max_t(unsigned long, bdi->io_pages, ra->ra_pages);
233 nr_to_read = min(nr_to_read, max_pages);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234 while (nr_to_read) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300235 unsigned long this_chunk = (2 * 1024 * 1024) / PAGE_SIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236
237 if (this_chunk > nr_to_read)
238 this_chunk = nr_to_read;
Christoph Hellwigc534aa32018-06-01 09:03:05 -0700239 __do_page_cache_readahead(mapping, filp, offset, this_chunk, 0);
Mark Rutland58d56402014-01-29 14:05:51 -0800240
Linus Torvalds1da177e2005-04-16 15:20:36 -0700241 offset += this_chunk;
242 nr_to_read -= this_chunk;
243 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244}
245
Fengguang Wu5ce1110b2007-07-19 01:47:59 -0700246/*
Fengguang Wuc743d962007-07-19 01:48:04 -0700247 * Set the initial window size, round to next power of 2 and square
248 * for small size, x 4 for medium, and x 2 for large
249 * for 128k (32 page) max ra
250 * 1-8 page = 32k initial, > 8 page = 128k initial
251 */
252static unsigned long get_init_ra_size(unsigned long size, unsigned long max)
253{
254 unsigned long newsize = roundup_pow_of_two(size);
255
256 if (newsize <= max / 32)
257 newsize = newsize * 4;
258 else if (newsize <= max / 4)
259 newsize = newsize * 2;
260 else
261 newsize = max;
262
263 return newsize;
264}
265
266/*
Fengguang Wu122a21d2007-07-19 01:48:01 -0700267 * Get the previous window size, ramp it up, and
268 * return it as the new window size.
269 */
Fengguang Wuc743d962007-07-19 01:48:04 -0700270static unsigned long get_next_ra_size(struct file_ra_state *ra,
Gao Xiang20ff1c92018-12-28 00:33:34 -0800271 unsigned long max)
Fengguang Wu122a21d2007-07-19 01:48:01 -0700272{
Fengguang Wuf9acc8c2007-07-19 01:48:08 -0700273 unsigned long cur = ra->size;
Fengguang Wu122a21d2007-07-19 01:48:01 -0700274
275 if (cur < max / 16)
Gao Xiang20ff1c92018-12-28 00:33:34 -0800276 return 4 * cur;
277 if (cur <= max / 2)
278 return 2 * cur;
279 return max;
Fengguang Wu122a21d2007-07-19 01:48:01 -0700280}
281
282/*
283 * On-demand readahead design.
284 *
285 * The fields in struct file_ra_state represent the most-recently-executed
286 * readahead attempt:
287 *
Fengguang Wuf9acc8c2007-07-19 01:48:08 -0700288 * |<----- async_size ---------|
289 * |------------------- size -------------------->|
290 * |==================#===========================|
291 * ^start ^page marked with PG_readahead
Fengguang Wu122a21d2007-07-19 01:48:01 -0700292 *
293 * To overlap application thinking time and disk I/O time, we do
294 * `readahead pipelining': Do not wait until the application consumed all
295 * readahead pages and stalled on the missing page at readahead_index;
Fengguang Wuf9acc8c2007-07-19 01:48:08 -0700296 * Instead, submit an asynchronous readahead I/O as soon as there are
297 * only async_size pages left in the readahead window. Normally async_size
298 * will be equal to size, for maximum pipelining.
Fengguang Wu122a21d2007-07-19 01:48:01 -0700299 *
300 * In interleaved sequential reads, concurrent streams on the same fd can
301 * be invalidating each other's readahead state. So we flag the new readahead
Fengguang Wuf9acc8c2007-07-19 01:48:08 -0700302 * page at (start+size-async_size) with PG_readahead, and use it as readahead
Fengguang Wu122a21d2007-07-19 01:48:01 -0700303 * indicator. The flag won't be set on already cached pages, to avoid the
304 * readahead-for-nothing fuss, saving pointless page cache lookups.
305 *
Fengguang Wuf4e6b492007-10-16 01:24:33 -0700306 * prev_pos tracks the last visited byte in the _previous_ read request.
Fengguang Wu122a21d2007-07-19 01:48:01 -0700307 * It should be maintained by the caller, and will be used for detecting
308 * small random reads. Note that the readahead algorithm checks loosely
309 * for sequential patterns. Hence interleaved reads might be served as
310 * sequential ones.
311 *
312 * There is a special-case: if the first page which the application tries to
313 * read happens to be the first page of the file, it is assumed that a linear
314 * read is about to happen and the window is immediately set to the initial size
315 * based on I/O request size and the max_readahead.
316 *
317 * The code ramps up the readahead size aggressively at first, but slow down as
318 * it approaches max_readhead.
319 */
320
321/*
Wu Fengguang10be0b32009-06-16 15:31:36 -0700322 * Count contiguously cached pages from @offset-1 to @offset-@max,
323 * this count is a conservative estimation of
324 * - length of the sequential read sequence, or
325 * - thrashing threshold in memory tight systems
326 */
327static pgoff_t count_history_pages(struct address_space *mapping,
Wu Fengguang10be0b32009-06-16 15:31:36 -0700328 pgoff_t offset, unsigned long max)
329{
330 pgoff_t head;
331
332 rcu_read_lock();
Matthew Wilcox0d3f9292017-11-21 14:07:06 -0500333 head = page_cache_prev_miss(mapping, offset - 1, max);
Wu Fengguang10be0b32009-06-16 15:31:36 -0700334 rcu_read_unlock();
335
336 return offset - 1 - head;
337}
338
339/*
340 * page cache context based read-ahead
341 */
342static int try_context_readahead(struct address_space *mapping,
343 struct file_ra_state *ra,
344 pgoff_t offset,
345 unsigned long req_size,
346 unsigned long max)
347{
348 pgoff_t size;
349
Fabian Frederick3e2faa082014-08-06 16:04:55 -0700350 size = count_history_pages(mapping, offset, max);
Wu Fengguang10be0b32009-06-16 15:31:36 -0700351
352 /*
Fengguang Wu2cad4012013-09-11 14:21:47 -0700353 * not enough history pages:
Wu Fengguang10be0b32009-06-16 15:31:36 -0700354 * it could be a random read
355 */
Fengguang Wu2cad4012013-09-11 14:21:47 -0700356 if (size <= req_size)
Wu Fengguang10be0b32009-06-16 15:31:36 -0700357 return 0;
358
359 /*
360 * starts from beginning of file:
361 * it is a strong indication of long-run stream (or whole-file-read)
362 */
363 if (size >= offset)
364 size *= 2;
365
366 ra->start = offset;
Fengguang Wu2cad4012013-09-11 14:21:47 -0700367 ra->size = min(size + req_size, max);
368 ra->async_size = 1;
Wu Fengguang10be0b32009-06-16 15:31:36 -0700369
370 return 1;
371}
372
373/*
Fengguang Wu122a21d2007-07-19 01:48:01 -0700374 * A minimal readahead algorithm for trivial sequential/random reads.
375 */
Matthew Wilcox (Oracle)9a428232020-06-01 21:46:10 -0700376static void ondemand_readahead(struct address_space *mapping,
377 struct file_ra_state *ra, struct file *filp,
378 bool hit_readahead_marker, pgoff_t offset,
379 unsigned long req_size)
Fengguang Wu122a21d2007-07-19 01:48:01 -0700380{
Jens Axboe9491ae42016-12-12 16:43:26 -0800381 struct backing_dev_info *bdi = inode_to_bdi(mapping->host);
382 unsigned long max_pages = ra->ra_pages;
Markus Stockhausendc30b962018-07-27 09:09:53 -0600383 unsigned long add_pages;
Damien Ramondaaf248a02013-11-12 15:08:16 -0800384 pgoff_t prev_offset;
Wu Fengguang045a2522009-06-16 15:31:33 -0700385
386 /*
Jens Axboe9491ae42016-12-12 16:43:26 -0800387 * If the request exceeds the readahead window, allow the read to
388 * be up to the optimal hardware IO size
389 */
390 if (req_size > max_pages && bdi->io_pages > max_pages)
391 max_pages = min(req_size, bdi->io_pages);
392
393 /*
Wu Fengguang045a2522009-06-16 15:31:33 -0700394 * start of file
395 */
396 if (!offset)
397 goto initial_readahead;
Fengguang Wu122a21d2007-07-19 01:48:01 -0700398
399 /*
Fengguang Wuf9acc8c2007-07-19 01:48:08 -0700400 * It's the expected callback offset, assume sequential access.
Fengguang Wu122a21d2007-07-19 01:48:01 -0700401 * Ramp up sizes, and push forward the readahead window.
402 */
Wu Fengguang045a2522009-06-16 15:31:33 -0700403 if ((offset == (ra->start + ra->size - ra->async_size) ||
404 offset == (ra->start + ra->size))) {
Fengguang Wuf9acc8c2007-07-19 01:48:08 -0700405 ra->start += ra->size;
Jens Axboe9491ae42016-12-12 16:43:26 -0800406 ra->size = get_next_ra_size(ra, max_pages);
Fengguang Wuf9acc8c2007-07-19 01:48:08 -0700407 ra->async_size = ra->size;
408 goto readit;
Fengguang Wu122a21d2007-07-19 01:48:01 -0700409 }
410
Fengguang Wu122a21d2007-07-19 01:48:01 -0700411 /*
Fengguang Wu6b10c6c2007-10-16 01:24:34 -0700412 * Hit a marked page without valid readahead state.
413 * E.g. interleaved reads.
414 * Query the pagecache for async_size, which normally equals to
415 * readahead size. Ramp it up and use it as the new readahead size.
416 */
417 if (hit_readahead_marker) {
418 pgoff_t start;
419
Nick Piggin30002ed2008-07-25 19:45:28 -0700420 rcu_read_lock();
Matthew Wilcox0d3f9292017-11-21 14:07:06 -0500421 start = page_cache_next_miss(mapping, offset + 1, max_pages);
Nick Piggin30002ed2008-07-25 19:45:28 -0700422 rcu_read_unlock();
Fengguang Wu6b10c6c2007-10-16 01:24:34 -0700423
Jens Axboe9491ae42016-12-12 16:43:26 -0800424 if (!start || start - offset > max_pages)
Matthew Wilcox (Oracle)9a428232020-06-01 21:46:10 -0700425 return;
Fengguang Wu6b10c6c2007-10-16 01:24:34 -0700426
427 ra->start = start;
428 ra->size = start - offset; /* old async_size */
Wu Fengguang160334a2009-06-16 15:31:23 -0700429 ra->size += req_size;
Jens Axboe9491ae42016-12-12 16:43:26 -0800430 ra->size = get_next_ra_size(ra, max_pages);
Fengguang Wu6b10c6c2007-10-16 01:24:34 -0700431 ra->async_size = ra->size;
432 goto readit;
433 }
434
435 /*
Wu Fengguang045a2522009-06-16 15:31:33 -0700436 * oversize read
Fengguang Wu122a21d2007-07-19 01:48:01 -0700437 */
Jens Axboe9491ae42016-12-12 16:43:26 -0800438 if (req_size > max_pages)
Wu Fengguang045a2522009-06-16 15:31:33 -0700439 goto initial_readahead;
440
441 /*
442 * sequential cache miss
Damien Ramondaaf248a02013-11-12 15:08:16 -0800443 * trivial case: (offset - prev_offset) == 1
444 * unaligned reads: (offset - prev_offset) == 0
Wu Fengguang045a2522009-06-16 15:31:33 -0700445 */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300446 prev_offset = (unsigned long long)ra->prev_pos >> PAGE_SHIFT;
Damien Ramondaaf248a02013-11-12 15:08:16 -0800447 if (offset - prev_offset <= 1UL)
Wu Fengguang045a2522009-06-16 15:31:33 -0700448 goto initial_readahead;
449
450 /*
Wu Fengguang10be0b32009-06-16 15:31:36 -0700451 * Query the page cache and look for the traces(cached history pages)
452 * that a sequential stream would leave behind.
453 */
Jens Axboe9491ae42016-12-12 16:43:26 -0800454 if (try_context_readahead(mapping, ra, offset, req_size, max_pages))
Wu Fengguang10be0b32009-06-16 15:31:36 -0700455 goto readit;
456
457 /*
Wu Fengguang045a2522009-06-16 15:31:33 -0700458 * standalone, small random read
459 * Read as is, and do not pollute the readahead state.
460 */
Matthew Wilcox (Oracle)9a428232020-06-01 21:46:10 -0700461 __do_page_cache_readahead(mapping, filp, offset, req_size, 0);
462 return;
Wu Fengguang045a2522009-06-16 15:31:33 -0700463
464initial_readahead:
Fengguang Wuf9acc8c2007-07-19 01:48:08 -0700465 ra->start = offset;
Jens Axboe9491ae42016-12-12 16:43:26 -0800466 ra->size = get_init_ra_size(req_size, max_pages);
Fengguang Wuf9acc8c2007-07-19 01:48:08 -0700467 ra->async_size = ra->size > req_size ? ra->size - req_size : ra->size;
Fengguang Wu122a21d2007-07-19 01:48:01 -0700468
Fengguang Wuf9acc8c2007-07-19 01:48:08 -0700469readit:
Wu Fengguang51daa882009-06-16 15:31:24 -0700470 /*
471 * Will this read hit the readahead marker made by itself?
472 * If so, trigger the readahead marker hit now, and merge
473 * the resulted next readahead window into the current one.
Markus Stockhausendc30b962018-07-27 09:09:53 -0600474 * Take care of maximum IO pages as above.
Wu Fengguang51daa882009-06-16 15:31:24 -0700475 */
476 if (offset == ra->start && ra->size == ra->async_size) {
Markus Stockhausendc30b962018-07-27 09:09:53 -0600477 add_pages = get_next_ra_size(ra, max_pages);
478 if (ra->size + add_pages <= max_pages) {
479 ra->async_size = add_pages;
480 ra->size += add_pages;
481 } else {
482 ra->size = max_pages;
483 ra->async_size = max_pages >> 1;
484 }
Wu Fengguang51daa882009-06-16 15:31:24 -0700485 }
486
Matthew Wilcox (Oracle)9a428232020-06-01 21:46:10 -0700487 ra_submit(ra, mapping, filp);
Fengguang Wu122a21d2007-07-19 01:48:01 -0700488}
489
490/**
Rusty Russellcf914a72007-07-19 01:48:08 -0700491 * page_cache_sync_readahead - generic file readahead
Fengguang Wu122a21d2007-07-19 01:48:01 -0700492 * @mapping: address_space which holds the pagecache and I/O vectors
493 * @ra: file_ra_state which holds the readahead state
494 * @filp: passed on to ->readpage() and ->readpages()
Rusty Russellcf914a72007-07-19 01:48:08 -0700495 * @offset: start offset into @mapping, in pagecache page-sized units
Fengguang Wu122a21d2007-07-19 01:48:01 -0700496 * @req_size: hint: total size of the read which the caller is performing in
Rusty Russellcf914a72007-07-19 01:48:08 -0700497 * pagecache pages
Fengguang Wu122a21d2007-07-19 01:48:01 -0700498 *
Rusty Russellcf914a72007-07-19 01:48:08 -0700499 * page_cache_sync_readahead() should be called when a cache miss happened:
500 * it will submit the read. The readahead logic may decide to piggyback more
501 * pages onto the read request if access patterns suggest it will improve
502 * performance.
Fengguang Wu122a21d2007-07-19 01:48:01 -0700503 */
Rusty Russellcf914a72007-07-19 01:48:08 -0700504void page_cache_sync_readahead(struct address_space *mapping,
505 struct file_ra_state *ra, struct file *filp,
506 pgoff_t offset, unsigned long req_size)
Fengguang Wu122a21d2007-07-19 01:48:01 -0700507{
508 /* no read-ahead */
509 if (!ra->ra_pages)
Rusty Russellcf914a72007-07-19 01:48:08 -0700510 return;
Fengguang Wu122a21d2007-07-19 01:48:01 -0700511
Josef Bacikca47e8c2018-07-03 11:15:03 -0400512 if (blk_cgroup_congested())
513 return;
514
Wu Fengguang01414502010-03-05 13:42:03 -0800515 /* be dumb */
Wu Fengguang70655c02010-04-06 14:34:53 -0700516 if (filp && (filp->f_mode & FMODE_RANDOM)) {
Wu Fengguang01414502010-03-05 13:42:03 -0800517 force_page_cache_readahead(mapping, filp, offset, req_size);
518 return;
519 }
520
Fengguang Wu122a21d2007-07-19 01:48:01 -0700521 /* do read-ahead */
Rusty Russellcf914a72007-07-19 01:48:08 -0700522 ondemand_readahead(mapping, ra, filp, false, offset, req_size);
Fengguang Wu122a21d2007-07-19 01:48:01 -0700523}
Rusty Russellcf914a72007-07-19 01:48:08 -0700524EXPORT_SYMBOL_GPL(page_cache_sync_readahead);
525
526/**
527 * page_cache_async_readahead - file readahead for marked pages
528 * @mapping: address_space which holds the pagecache and I/O vectors
529 * @ra: file_ra_state which holds the readahead state
530 * @filp: passed on to ->readpage() and ->readpages()
531 * @page: the page at @offset which has the PG_readahead flag set
532 * @offset: start offset into @mapping, in pagecache page-sized units
533 * @req_size: hint: total size of the read which the caller is performing in
534 * pagecache pages
535 *
Huang Shijiebf8abe82010-05-24 14:32:36 -0700536 * page_cache_async_readahead() should be called when a page is used which
Randy Dunlapf7850d92008-03-19 17:01:02 -0700537 * has the PG_readahead flag; this is a marker to suggest that the application
Rusty Russellcf914a72007-07-19 01:48:08 -0700538 * has used up enough of the readahead window that we should start pulling in
Randy Dunlapf7850d92008-03-19 17:01:02 -0700539 * more pages.
540 */
Rusty Russellcf914a72007-07-19 01:48:08 -0700541void
542page_cache_async_readahead(struct address_space *mapping,
543 struct file_ra_state *ra, struct file *filp,
544 struct page *page, pgoff_t offset,
545 unsigned long req_size)
546{
547 /* no read-ahead */
548 if (!ra->ra_pages)
549 return;
550
551 /*
552 * Same bit is used for PG_readahead and PG_reclaim.
553 */
554 if (PageWriteback(page))
555 return;
556
557 ClearPageReadahead(page);
558
559 /*
560 * Defer asynchronous read-ahead on IO congestion.
561 */
Tejun Heo703c2702015-05-22 17:13:44 -0400562 if (inode_read_congested(mapping->host))
Rusty Russellcf914a72007-07-19 01:48:08 -0700563 return;
564
Josef Bacikca47e8c2018-07-03 11:15:03 -0400565 if (blk_cgroup_congested())
566 return;
567
Rusty Russellcf914a72007-07-19 01:48:08 -0700568 /* do read-ahead */
569 ondemand_readahead(mapping, ra, filp, true, offset, req_size);
570}
571EXPORT_SYMBOL_GPL(page_cache_async_readahead);
Cong Wang782182e2012-05-29 15:06:43 -0700572
Dominik Brodowskic7b95d52018-03-19 17:51:36 +0100573ssize_t ksys_readahead(int fd, loff_t offset, size_t count)
Cong Wang782182e2012-05-29 15:06:43 -0700574{
575 ssize_t ret;
Al Viro2903ff02012-08-28 12:52:22 -0400576 struct fd f;
Cong Wang782182e2012-05-29 15:06:43 -0700577
578 ret = -EBADF;
Al Viro2903ff02012-08-28 12:52:22 -0400579 f = fdget(fd);
Amir Goldstein3d8f7612018-08-29 08:41:29 +0300580 if (!f.file || !(f.file->f_mode & FMODE_READ))
581 goto out;
582
583 /*
584 * The readahead() syscall is intended to run only on files
585 * that can execute readahead. If readahead is not possible
586 * on this file, then we must return -EINVAL.
587 */
588 ret = -EINVAL;
589 if (!f.file->f_mapping || !f.file->f_mapping->a_ops ||
590 !S_ISREG(file_inode(f.file)->i_mode))
591 goto out;
592
593 ret = vfs_fadvise(f.file, offset, count, POSIX_FADV_WILLNEED);
594out:
595 fdput(f);
Cong Wang782182e2012-05-29 15:06:43 -0700596 return ret;
597}
Dominik Brodowskic7b95d52018-03-19 17:51:36 +0100598
599SYSCALL_DEFINE3(readahead, int, fd, loff_t, offset, size_t, count)
600{
601 return ksys_readahead(fd, offset, count);
602}