blob: 04b3a7ebfe9e29636267e81f7506f635f914f3d3 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * linux/mm/page_io.c
4 *
5 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
6 *
7 * Swap reorganised 29.12.95,
8 * Asynchronous swapping added 30.12.95. Stephen Tweedie
9 * Removed race in async swapping. 14.4.1996. Bruno Haible
10 * Add swap of shared pages through the page cache. 20.2.1998. Stephen Tweedie
11 * Always use brw_page, life becomes simpler. 12 May 1998 Eric Biederman
12 */
13
14#include <linux/mm.h>
15#include <linux/kernel_stat.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090016#include <linux/gfp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <linux/pagemap.h>
18#include <linux/swap.h>
19#include <linux/bio.h>
20#include <linux/swapops.h>
Mel Gorman62c230b2012-07-31 16:44:55 -070021#include <linux/buffer_head.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#include <linux/writeback.h>
Dan Magenheimer38b5faf2012-04-09 17:08:06 -060023#include <linux/frontswap.h>
Minchan Kimb430e9d2013-07-03 15:01:24 -070024#include <linux/blkdev.h>
Minchan Kim93779062019-11-30 17:58:29 -080025#include <linux/psi.h>
Christoph Hellwige2e40f22015-02-22 08:58:50 -080026#include <linux/uio.h>
Tetsuo Handab0ba2d02017-08-02 13:32:09 -070027#include <linux/sched/task.h>
Bing Han851672a2022-05-30 15:03:52 +080028#include <trace/hooks/mm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070029
Hugh Dickinsf29ad6a2009-12-14 17:58:40 -080030static struct bio *get_swap_bio(gfp_t gfp_flags,
Linus Torvalds1da177e2005-04-16 15:20:36 -070031 struct page *page, bio_end_io_t end_io)
32{
33 struct bio *bio;
34
Huang Ying1a5f4392019-06-28 12:07:18 -070035 bio = bio_alloc(gfp_flags, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -070036 if (bio) {
Christoph Hellwig74d46992017-08-23 19:10:32 +020037 struct block_device *bdev;
38
39 bio->bi_iter.bi_sector = map_swap_page(page, &bdev);
40 bio_set_dev(bio, bdev);
Kent Overstreet4f024f32013-10-11 15:44:27 -070041 bio->bi_iter.bi_sector <<= PAGE_SHIFT - 9;
Linus Torvalds1da177e2005-04-16 15:20:36 -070042 bio->bi_end_io = end_io;
Kent Overstreet6cf66b42014-12-22 12:48:42 +010043
Matthew Wilcox (Oracle)af3bbc12020-08-14 17:30:33 -070044 bio_add_page(bio, page, thp_size(page), 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070045 }
46 return bio;
47}
48
Christoph Hellwig4246a0b2015-07-20 15:29:37 +020049void end_swap_bio_write(struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -070050{
Ming Lei263663c2017-12-18 20:22:04 +080051 struct page *page = bio_first_page_all(bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -070052
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +020053 if (bio->bi_status) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070054 SetPageError(page);
Peter Zijlstra6ddab3b2006-09-25 23:31:26 -070055 /*
56 * We failed to write the page out to swap-space.
57 * Re-dirty the page in order to avoid it being reclaimed.
58 * Also print a dire warning that things will go BAD (tm)
59 * very quickly.
60 *
61 * Also clear PG_reclaim to avoid rotate_reclaimable_page()
62 */
63 set_page_dirty(page);
Georgi Djakoved2e69d2021-02-24 12:03:01 -080064 pr_alert_ratelimited("Write-error on swap-device (%u:%u:%llu)\n",
65 MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)),
66 (unsigned long long)bio->bi_iter.bi_sector);
Peter Zijlstra6ddab3b2006-09-25 23:31:26 -070067 ClearPageReclaim(page);
68 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070069 end_page_writeback(page);
70 bio_put(bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -070071}
72
Christoph Hellwig4246a0b2015-07-20 15:29:37 +020073static void end_swap_bio_read(struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -070074{
Ming Lei263663c2017-12-18 20:22:04 +080075 struct page *page = bio_first_page_all(bio);
Shaohua Li23955622017-07-10 15:47:11 -070076 struct task_struct *waiter = bio->bi_private;
Linus Torvalds1da177e2005-04-16 15:20:36 -070077
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +020078 if (bio->bi_status) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070079 SetPageError(page);
80 ClearPageUptodate(page);
Georgi Djakoved2e69d2021-02-24 12:03:01 -080081 pr_alert_ratelimited("Read-error on swap-device (%u:%u:%llu)\n",
82 MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)),
83 (unsigned long long)bio->bi_iter.bi_sector);
Minchan Kimb430e9d2013-07-03 15:01:24 -070084 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -070085 }
Minchan Kimb430e9d2013-07-03 15:01:24 -070086
87 SetPageUptodate(page);
Minchan Kimb430e9d2013-07-03 15:01:24 -070088out:
Linus Torvalds1da177e2005-04-16 15:20:36 -070089 unlock_page(page);
Shaohua Li23955622017-07-10 15:47:11 -070090 WRITE_ONCE(bio->bi_private, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -070091 bio_put(bio);
Oleg Nesterov87518532019-07-04 15:14:49 -070092 if (waiter) {
93 blk_wake_io_task(waiter);
94 put_task_struct(waiter);
95 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070096}
97
Mel Gormana509bc12012-07-31 16:44:57 -070098int generic_swapfile_activate(struct swap_info_struct *sis,
99 struct file *swap_file,
100 sector_t *span)
101{
102 struct address_space *mapping = swap_file->f_mapping;
103 struct inode *inode = mapping->host;
104 unsigned blocks_per_page;
105 unsigned long page_no;
106 unsigned blkbits;
107 sector_t probe_block;
108 sector_t last_block;
109 sector_t lowest_block = -1;
110 sector_t highest_block = 0;
111 int nr_extents = 0;
112 int ret;
113
114 blkbits = inode->i_blkbits;
115 blocks_per_page = PAGE_SIZE >> blkbits;
116
117 /*
Aaron Lu4efaceb2019-07-11 20:55:41 -0700118 * Map all the blocks into the extent tree. This code doesn't try
Mel Gormana509bc12012-07-31 16:44:57 -0700119 * to be very smart.
120 */
121 probe_block = 0;
122 page_no = 0;
123 last_block = i_size_read(inode) >> blkbits;
124 while ((probe_block + blocks_per_page) <= last_block &&
125 page_no < sis->max) {
126 unsigned block_in_page;
127 sector_t first_block;
128
Mikulas Patocka7e4411b2016-07-28 15:48:47 -0700129 cond_resched();
130
Carlos Maiolino30460e12020-01-09 14:30:41 +0100131 first_block = probe_block;
132 ret = bmap(inode, &first_block);
133 if (ret || !first_block)
Mel Gormana509bc12012-07-31 16:44:57 -0700134 goto bad_bmap;
135
136 /*
137 * It must be PAGE_SIZE aligned on-disk
138 */
139 if (first_block & (blocks_per_page - 1)) {
140 probe_block++;
141 goto reprobe;
142 }
143
144 for (block_in_page = 1; block_in_page < blocks_per_page;
145 block_in_page++) {
146 sector_t block;
147
Carlos Maiolino30460e12020-01-09 14:30:41 +0100148 block = probe_block + block_in_page;
149 ret = bmap(inode, &block);
150 if (ret || !block)
Mel Gormana509bc12012-07-31 16:44:57 -0700151 goto bad_bmap;
Carlos Maiolino30460e12020-01-09 14:30:41 +0100152
Mel Gormana509bc12012-07-31 16:44:57 -0700153 if (block != first_block + block_in_page) {
154 /* Discontiguity */
155 probe_block++;
156 goto reprobe;
157 }
158 }
159
160 first_block >>= (PAGE_SHIFT - blkbits);
161 if (page_no) { /* exclude the header page */
162 if (first_block < lowest_block)
163 lowest_block = first_block;
164 if (first_block > highest_block)
165 highest_block = first_block;
166 }
167
168 /*
169 * We found a PAGE_SIZE-length, PAGE_SIZE-aligned run of blocks
170 */
171 ret = add_swap_extent(sis, page_no, 1, first_block);
172 if (ret < 0)
173 goto out;
174 nr_extents += ret;
175 page_no++;
176 probe_block += blocks_per_page;
177reprobe:
178 continue;
179 }
180 ret = nr_extents;
181 *span = 1 + highest_block - lowest_block;
182 if (page_no == 0)
183 page_no = 1; /* force Empty message */
184 sis->max = page_no;
185 sis->pages = page_no - 1;
186 sis->highest_bit = page_no - 1;
187out:
188 return ret;
189bad_bmap:
Joe Perches11705322016-03-17 14:19:50 -0700190 pr_err("swapon: swapfile has holes\n");
Mel Gormana509bc12012-07-31 16:44:57 -0700191 ret = -EINVAL;
192 goto out;
193}
194
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195/*
196 * We may have stale swap cache pages in memory: notice
197 * them here and get rid of the unnecessary final write.
198 */
199int swap_writepage(struct page *page, struct writeback_control *wbc)
200{
Seth Jennings2f772e62013-04-29 15:08:34 -0700201 int ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202
Hugh Dickinsa2c43ee2009-01-06 14:39:36 -0800203 if (try_to_free_swap(page)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204 unlock_page(page);
205 goto out;
206 }
Steven Price8a848022020-05-13 16:37:49 +0100207 /*
208 * Arch code may have to preserve more data than just the page
209 * contents, e.g. memory tags.
210 */
211 ret = arch_prepare_to_swap(page);
212 if (ret) {
213 set_page_dirty(page);
214 unlock_page(page);
215 goto out;
216 }
Konrad Rzeszutek Wilk165c8ae2012-05-15 11:32:15 -0400217 if (frontswap_store(page) == 0) {
Dan Magenheimer38b5faf2012-04-09 17:08:06 -0600218 set_page_writeback(page);
219 unlock_page(page);
220 end_page_writeback(page);
221 goto out;
222 }
Seth Jennings1eec6702013-04-29 15:08:35 -0700223 ret = __swap_writepage(page, wbc, end_swap_bio_write);
Seth Jennings2f772e62013-04-29 15:08:34 -0700224out:
225 return ret;
226}
227
Huang Ying225311a2017-09-06 16:22:30 -0700228static inline void count_swpout_vm_event(struct page *page)
229{
230#ifdef CONFIG_TRANSPARENT_HUGEPAGE
231 if (unlikely(PageTransHuge(page)))
232 count_vm_event(THP_SWPOUT);
233#endif
Matthew Wilcox (Oracle)6c357842020-08-14 17:30:37 -0700234 count_vm_events(PSWPOUT, thp_nr_pages(page));
Huang Ying225311a2017-09-06 16:22:30 -0700235}
236
Christoph Hellwiga18b9b12020-06-27 09:31:50 +0200237#if defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP)
238static void bio_associate_blkg_from_page(struct bio *bio, struct page *page)
239{
240 struct cgroup_subsys_state *css;
241
242 if (!page->mem_cgroup)
243 return;
244
245 rcu_read_lock();
246 css = cgroup_e_css(page->mem_cgroup->css.cgroup, &io_cgrp_subsys);
247 bio_associate_blkg_from_css(bio, css);
248 rcu_read_unlock();
249}
250#else
251#define bio_associate_blkg_from_page(bio, page) do { } while (0)
252#endif /* CONFIG_MEMCG && CONFIG_BLK_CGROUP */
253
Seth Jennings1eec6702013-04-29 15:08:35 -0700254int __swap_writepage(struct page *page, struct writeback_control *wbc,
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200255 bio_end_io_t end_write_func)
Seth Jennings2f772e62013-04-29 15:08:34 -0700256{
257 struct bio *bio;
Mike Christie4e49ea42016-06-05 14:31:41 -0500258 int ret;
Seth Jennings2f772e62013-04-29 15:08:34 -0700259 struct swap_info_struct *sis = page_swap_info(page);
Bing Han851672a2022-05-30 15:03:52 +0800260 bool skip = false;
Mel Gorman62c230b2012-07-31 16:44:55 -0700261
Andrew Mortoncc30c5d2016-10-07 17:00:52 -0700262 VM_BUG_ON_PAGE(!PageSwapCache(page), page);
Gao Xiang32646312020-10-13 16:52:04 -0700263 if (data_race(sis->flags & SWP_FS_OPS)) {
Mel Gorman62c230b2012-07-31 16:44:55 -0700264 struct kiocb kiocb;
265 struct file *swap_file = sis->swap_file;
266 struct address_space *mapping = swap_file->f_mapping;
Al Viro62a80672014-04-04 23:12:29 -0400267 struct bio_vec bv = {
268 .bv_page = page,
269 .bv_len = PAGE_SIZE,
270 .bv_offset = 0
Mel Gorman62c230b2012-07-31 16:44:55 -0700271 };
Al Viro05afcb72015-01-23 01:08:07 -0500272 struct iov_iter from;
Mel Gorman62c230b2012-07-31 16:44:55 -0700273
David Howellsaa563d72018-10-20 00:57:56 +0100274 iov_iter_bvec(&from, WRITE, &bv, 1, PAGE_SIZE);
Mel Gorman62c230b2012-07-31 16:44:55 -0700275 init_sync_kiocb(&kiocb, swap_file);
276 kiocb.ki_pos = page_file_offset(page);
Mel Gorman62c230b2012-07-31 16:44:55 -0700277
Mel Gorman0cdc4442013-04-29 15:08:48 -0700278 set_page_writeback(page);
Mel Gorman62c230b2012-07-31 16:44:55 -0700279 unlock_page(page);
Christoph Hellwigc8b8e322016-04-07 08:51:58 -0700280 ret = mapping->a_ops->direct_IO(&kiocb, &from);
Mel Gorman62c230b2012-07-31 16:44:55 -0700281 if (ret == PAGE_SIZE) {
Bing Han851672a2022-05-30 15:03:52 +0800282 trace_android_vh_count_pswpout(sis);
Mel Gorman62c230b2012-07-31 16:44:55 -0700283 count_vm_event(PSWPOUT);
284 ret = 0;
Jerome Marchand2d30d312013-04-29 15:08:47 -0700285 } else {
Mel Gorman0cdc4442013-04-29 15:08:48 -0700286 /*
287 * In the case of swap-over-nfs, this can be a
288 * temporary failure if the system has limited
289 * memory for allocating transmit buffers.
290 * Mark the page dirty and avoid
291 * rotate_reclaimable_page but rate-limit the
292 * messages but do not flag PageError like
293 * the normal direct-to-bio case as it could
294 * be temporary.
295 */
Jerome Marchand2d30d312013-04-29 15:08:47 -0700296 set_page_dirty(page);
Mel Gorman0cdc4442013-04-29 15:08:48 -0700297 ClearPageReclaim(page);
Joe Perches11705322016-03-17 14:19:50 -0700298 pr_err_ratelimited("Write error on dio swapfile (%llu)\n",
299 page_file_offset(page));
Mel Gorman62c230b2012-07-31 16:44:55 -0700300 }
Mel Gorman0cdc4442013-04-29 15:08:48 -0700301 end_page_writeback(page);
Mel Gorman62c230b2012-07-31 16:44:55 -0700302 return ret;
303 }
304
Matthew Wilcoxdd6bd0d2014-06-04 16:07:48 -0700305 ret = bdev_write_page(sis->bdev, swap_page_sector(page), page, wbc);
306 if (!ret) {
Bing Han851672a2022-05-30 15:03:52 +0800307 trace_android_vh_count_swpout_vm_event(sis, page, &skip);
308 if (!skip)
309 count_swpout_vm_event(page);
Matthew Wilcoxdd6bd0d2014-06-04 16:07:48 -0700310 return 0;
311 }
312
Seth Jennings1eec6702013-04-29 15:08:35 -0700313 bio = get_swap_bio(GFP_NOIO, page, end_write_func);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700314 if (bio == NULL) {
315 set_page_dirty(page);
316 unlock_page(page);
Miaohe Lin548d9782020-10-13 16:52:21 -0700317 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700318 }
Josef Bacik0d1e0c72018-07-03 11:14:53 -0400319 bio->bi_opf = REQ_OP_WRITE | REQ_SWAP | wbc_to_write_flags(wbc);
Dennis Zhou6a7f6d82018-12-05 12:10:33 -0500320 bio_associate_blkg_from_page(bio, page);
Bing Han851672a2022-05-30 15:03:52 +0800321 trace_android_vh_count_swpout_vm_event(sis, page, &skip);
322 if (!skip)
323 count_swpout_vm_event(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324 set_page_writeback(page);
325 unlock_page(page);
Mike Christie4e49ea42016-06-05 14:31:41 -0500326 submit_bio(bio);
Miaohe Lin548d9782020-10-13 16:52:21 -0700327
328 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700329}
330
Minchan Kim0bcac062017-11-15 17:33:07 -0800331int swap_readpage(struct page *page, bool synchronous)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700332{
333 struct bio *bio;
334 int ret = 0;
Mel Gorman62c230b2012-07-31 16:44:55 -0700335 struct swap_info_struct *sis = page_swap_info(page);
Shaohua Li23955622017-07-10 15:47:11 -0700336 blk_qc_t qc;
Christoph Hellwig74d46992017-08-23 19:10:32 +0200337 struct gendisk *disk;
Minchan Kim93779062019-11-30 17:58:29 -0800338 unsigned long pflags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339
Minchan Kim0bcac062017-11-15 17:33:07 -0800340 VM_BUG_ON_PAGE(!PageSwapCache(page) && !synchronous, page);
Sasha Levin309381fea2014-01-23 15:52:54 -0800341 VM_BUG_ON_PAGE(!PageLocked(page), page);
342 VM_BUG_ON_PAGE(PageUptodate(page), page);
Minchan Kim93779062019-11-30 17:58:29 -0800343
344 /*
345 * Count submission time as memory stall. When the device is congested,
346 * or the submitting cgroup IO-throttled, submission can be a
347 * significant part of overall IO time.
348 */
349 psi_memstall_enter(&pflags);
350
Konrad Rzeszutek Wilk165c8ae2012-05-15 11:32:15 -0400351 if (frontswap_load(page) == 0) {
Dan Magenheimer38b5faf2012-04-09 17:08:06 -0600352 SetPageUptodate(page);
353 unlock_page(page);
354 goto out;
355 }
Mel Gorman62c230b2012-07-31 16:44:55 -0700356
Gao Xiang32646312020-10-13 16:52:04 -0700357 if (data_race(sis->flags & SWP_FS_OPS)) {
Mel Gorman62c230b2012-07-31 16:44:55 -0700358 struct file *swap_file = sis->swap_file;
359 struct address_space *mapping = swap_file->f_mapping;
360
361 ret = mapping->a_ops->readpage(swap_file, page);
Bing Han851672a2022-05-30 15:03:52 +0800362 if (!ret) {
363 trace_android_vh_count_pswpin(sis);
Mel Gorman62c230b2012-07-31 16:44:55 -0700364 count_vm_event(PSWPIN);
Bing Han851672a2022-05-30 15:03:52 +0800365 }
Minchan Kim93779062019-11-30 17:58:29 -0800366 goto out;
Mel Gorman62c230b2012-07-31 16:44:55 -0700367 }
368
Christoph Hellwig5115db12020-09-24 08:51:37 +0200369 if (sis->flags & SWP_SYNCHRONOUS_IO) {
370 ret = bdev_read_page(sis->bdev, swap_page_sector(page), page);
371 if (!ret) {
Bing Han851672a2022-05-30 15:03:52 +0800372 trace_android_vh_count_pswpin(sis);
Christoph Hellwig5115db12020-09-24 08:51:37 +0200373 count_vm_event(PSWPIN);
374 goto out;
375 }
Matthew Wilcoxdd6bd0d2014-06-04 16:07:48 -0700376 }
377
378 ret = 0;
Hugh Dickinsf29ad6a2009-12-14 17:58:40 -0800379 bio = get_swap_bio(GFP_KERNEL, page, end_swap_bio_read);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380 if (bio == NULL) {
381 unlock_page(page);
382 ret = -ENOMEM;
383 goto out;
384 }
Christoph Hellwig74d46992017-08-23 19:10:32 +0200385 disk = bio->bi_disk;
Tetsuo Handab0ba2d02017-08-02 13:32:09 -0700386 /*
387 * Keep this task valid during swap readpage because the oom killer may
388 * attempt to access it in the page fault retry time check.
389 */
Mike Christie95fe6c12016-06-05 14:31:48 -0500390 bio_set_op_attrs(bio, REQ_OP_READ, 0);
Oleg Nesterov87518532019-07-04 15:14:49 -0700391 if (synchronous) {
Jens Axboeb685a732019-01-03 15:29:15 -0800392 bio->bi_opf |= REQ_HIPRI;
Oleg Nesterov87518532019-07-04 15:14:49 -0700393 get_task_struct(current);
394 bio->bi_private = current;
395 }
Bing Han851672a2022-05-30 15:03:52 +0800396 trace_android_vh_count_pswpin(sis);
Christoph Lameterf8891e52006-06-30 01:55:45 -0700397 count_vm_event(PSWPIN);
Shaohua Li23955622017-07-10 15:47:11 -0700398 bio_get(bio);
399 qc = submit_bio(bio);
Minchan Kim0bcac062017-11-15 17:33:07 -0800400 while (synchronous) {
Linus Torvalds1ac5cd42019-01-02 10:46:03 -0800401 set_current_state(TASK_UNINTERRUPTIBLE);
Shaohua Li23955622017-07-10 15:47:11 -0700402 if (!READ_ONCE(bio->bi_private))
403 break;
404
Jens Axboe0a1b8b82018-11-26 08:24:43 -0700405 if (!blk_poll(disk->queue, qc, true))
Xianting Tian0f190a72020-08-06 23:20:17 -0700406 blk_io_schedule();
Shaohua Li23955622017-07-10 15:47:11 -0700407 }
408 __set_current_state(TASK_RUNNING);
409 bio_put(bio);
410
Linus Torvalds1da177e2005-04-16 15:20:36 -0700411out:
Minchan Kim93779062019-11-30 17:58:29 -0800412 psi_memstall_leave(&pflags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700413 return ret;
414}
Mel Gorman62c230b2012-07-31 16:44:55 -0700415
416int swap_set_page_dirty(struct page *page)
417{
418 struct swap_info_struct *sis = page_swap_info(page);
419
Gao Xiang32646312020-10-13 16:52:04 -0700420 if (data_race(sis->flags & SWP_FS_OPS)) {
Mel Gorman62c230b2012-07-31 16:44:55 -0700421 struct address_space *mapping = sis->swap_file->f_mapping;
Andrew Mortoncc30c5d2016-10-07 17:00:52 -0700422
423 VM_BUG_ON_PAGE(!PageSwapCache(page), page);
Mel Gorman62c230b2012-07-31 16:44:55 -0700424 return mapping->a_ops->set_page_dirty(page);
425 } else {
426 return __set_page_dirty_no_writeback(page);
427 }
428}