blob: 8e3802bf03a968ad874d8150929853d0adfdf556 [file] [log] [blame]
Thomas Gleixner457c8992019-05-19 13:08:55 +01001// SPDX-License-Identifier: GPL-2.0-only
Andrew Morton3fcfab12006-10-19 23:28:16 -07002
3#include <linux/wait.h>
Tejun Heo34f8fe52019-08-26 09:06:53 -07004#include <linux/rbtree.h>
Andrew Morton3fcfab12006-10-19 23:28:16 -07005#include <linux/backing-dev.h>
Jens Axboe03ba3782009-09-09 09:08:54 +02006#include <linux/kthread.h>
7#include <linux/freezer.h>
Andrew Morton3fcfab12006-10-19 23:28:16 -07008#include <linux/fs.h>
Jens Axboe26160152009-03-17 09:35:06 +01009#include <linux/pagemap.h>
Jens Axboe03ba3782009-09-09 09:08:54 +020010#include <linux/mm.h>
Andrew Morton3fcfab12006-10-19 23:28:16 -070011#include <linux/sched.h>
12#include <linux/module.h>
Peter Zijlstracf0ca9f2008-04-30 00:54:32 -070013#include <linux/writeback.h>
14#include <linux/device.h>
Dave Chinner455b2862010-07-07 13:24:06 +100015#include <trace/events/writeback.h>
Peter Zijlstracf0ca9f2008-04-30 00:54:32 -070016
Jörn Engel5129a462010-04-25 08:54:42 +020017struct backing_dev_info noop_backing_dev_info = {
Jan Kara976e48f2010-09-21 11:48:55 +020018 .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK,
Jörn Engel5129a462010-04-25 08:54:42 +020019};
Tejun Heoa212b102015-05-22 17:13:33 -040020EXPORT_SYMBOL_GPL(noop_backing_dev_info);
Jörn Engel5129a462010-04-25 08:54:42 +020021
Peter Zijlstracf0ca9f2008-04-30 00:54:32 -070022static struct class *bdi_class;
Christoph Hellwigeb7ae5e2020-05-04 14:47:54 +020023static const char *bdi_unknown_name = "(unknown)";
Jens Axboecfc4ba52009-09-14 13:12:40 +020024
25/*
Tejun Heo34f8fe52019-08-26 09:06:53 -070026 * bdi_lock protects bdi_tree and updates to bdi_list. bdi_list has RCU
27 * reader side locking.
Jens Axboecfc4ba52009-09-14 13:12:40 +020028 */
Jens Axboe03ba3782009-09-09 09:08:54 +020029DEFINE_SPINLOCK(bdi_lock);
Tejun Heo34f8fe52019-08-26 09:06:53 -070030static u64 bdi_id_cursor;
31static struct rb_root bdi_tree = RB_ROOT;
Jens Axboe66f3b8e2009-09-02 09:19:46 +020032LIST_HEAD(bdi_list);
Jens Axboe03ba3782009-09-09 09:08:54 +020033
Tejun Heo839a8e82013-04-01 19:08:06 -070034/* bdi_wq serves all asynchronous writeback tasks */
35struct workqueue_struct *bdi_wq;
36
Miklos Szeredi76f14182008-04-30 00:54:36 -070037#ifdef CONFIG_DEBUG_FS
38#include <linux/debugfs.h>
39#include <linux/seq_file.h>
40
41static struct dentry *bdi_debug_root;
42
43static void bdi_debug_init(void)
44{
45 bdi_debug_root = debugfs_create_dir("bdi", NULL);
46}
47
48static int bdi_debug_stats_show(struct seq_file *m, void *v)
49{
50 struct backing_dev_info *bdi = m->private;
Christoph Hellwigc1955ce2010-06-19 23:08:06 +020051 struct bdi_writeback *wb = &bdi->wb;
David Rientjes364aeb22009-01-06 14:39:29 -080052 unsigned long background_thresh;
53 unsigned long dirty_thresh;
Tejun Heo0d960a32015-05-22 18:23:19 -040054 unsigned long wb_thresh;
Theodore Ts'o0ae45f62015-02-02 00:37:00 -050055 unsigned long nr_dirty, nr_io, nr_more_io, nr_dirty_time;
Jens Axboef09b00d2009-05-25 09:08:21 +020056 struct inode *inode;
57
Theodore Ts'o0ae45f62015-02-02 00:37:00 -050058 nr_dirty = nr_io = nr_more_io = nr_dirty_time = 0;
Christoph Hellwigf758eea2011-04-21 18:19:44 -060059 spin_lock(&wb->list_lock);
Dave Chinnerc7f54082015-03-04 14:07:22 -050060 list_for_each_entry(inode, &wb->b_dirty, i_io_list)
Christoph Hellwigc1955ce2010-06-19 23:08:06 +020061 nr_dirty++;
Dave Chinnerc7f54082015-03-04 14:07:22 -050062 list_for_each_entry(inode, &wb->b_io, i_io_list)
Christoph Hellwigc1955ce2010-06-19 23:08:06 +020063 nr_io++;
Dave Chinnerc7f54082015-03-04 14:07:22 -050064 list_for_each_entry(inode, &wb->b_more_io, i_io_list)
Christoph Hellwigc1955ce2010-06-19 23:08:06 +020065 nr_more_io++;
Dave Chinnerc7f54082015-03-04 14:07:22 -050066 list_for_each_entry(inode, &wb->b_dirty_time, i_io_list)
Theodore Ts'o0ae45f62015-02-02 00:37:00 -050067 if (inode->i_state & I_DIRTY_TIME)
68 nr_dirty_time++;
Christoph Hellwigf758eea2011-04-21 18:19:44 -060069 spin_unlock(&wb->list_lock);
Miklos Szeredi76f14182008-04-30 00:54:36 -070070
Wu Fengguang16c40422010-08-11 14:17:39 -070071 global_dirty_limits(&background_thresh, &dirty_thresh);
Tejun Heo0d960a32015-05-22 18:23:19 -040072 wb_thresh = wb_calc_thresh(wb, dirty_thresh);
Miklos Szeredi76f14182008-04-30 00:54:36 -070073
74#define K(x) ((x) << (PAGE_SHIFT - 10))
75 seq_printf(m,
Wu Fengguang00821b02010-08-29 11:28:45 -060076 "BdiWriteback: %10lu kB\n"
77 "BdiReclaimable: %10lu kB\n"
78 "BdiDirtyThresh: %10lu kB\n"
79 "DirtyThresh: %10lu kB\n"
80 "BackgroundThresh: %10lu kB\n"
Wu Fengguangc8e28ce2011-01-23 10:07:47 -060081 "BdiDirtied: %10lu kB\n"
Wu Fengguang00821b02010-08-29 11:28:45 -060082 "BdiWritten: %10lu kB\n"
83 "BdiWriteBandwidth: %10lu kBps\n"
84 "b_dirty: %10lu\n"
85 "b_io: %10lu\n"
86 "b_more_io: %10lu\n"
Theodore Ts'o0ae45f62015-02-02 00:37:00 -050087 "b_dirty_time: %10lu\n"
Wu Fengguang00821b02010-08-29 11:28:45 -060088 "bdi_list: %10u\n"
89 "state: %10lx\n",
Tejun Heo93f78d82015-05-22 17:13:27 -040090 (unsigned long) K(wb_stat(wb, WB_WRITEBACK)),
91 (unsigned long) K(wb_stat(wb, WB_RECLAIMABLE)),
Tejun Heo0d960a32015-05-22 18:23:19 -040092 K(wb_thresh),
Jan Karaf7d2b1e2010-12-08 22:44:24 -060093 K(dirty_thresh),
94 K(background_thresh),
Tejun Heo93f78d82015-05-22 17:13:27 -040095 (unsigned long) K(wb_stat(wb, WB_DIRTIED)),
96 (unsigned long) K(wb_stat(wb, WB_WRITTEN)),
Tejun Heoa88a3412015-05-22 17:13:28 -040097 (unsigned long) K(wb->write_bandwidth),
Jan Karaf7d2b1e2010-12-08 22:44:24 -060098 nr_dirty,
99 nr_io,
100 nr_more_io,
Theodore Ts'o0ae45f62015-02-02 00:37:00 -0500101 nr_dirty_time,
Tejun Heo44522262015-05-22 17:13:26 -0400102 !list_empty(&bdi->bdi_list), bdi->wb.state);
Miklos Szeredi76f14182008-04-30 00:54:36 -0700103#undef K
104
105 return 0;
106}
Andy Shevchenko5ad35092018-04-05 16:23:16 -0700107DEFINE_SHOW_ATTRIBUTE(bdi_debug_stats);
Miklos Szeredi76f14182008-04-30 00:54:36 -0700108
Greg Kroah-Hartman2d146b92019-01-22 16:21:07 +0100109static void bdi_debug_register(struct backing_dev_info *bdi, const char *name)
Miklos Szeredi76f14182008-04-30 00:54:36 -0700110{
111 bdi->debug_dir = debugfs_create_dir(name, bdi_debug_root);
weiping zhang97f07692017-10-31 18:37:54 +0800112
Greg Kroah-Hartman2d146b92019-01-22 16:21:07 +0100113 debugfs_create_file("stats", 0444, bdi->debug_dir, bdi,
114 &bdi_debug_stats_fops);
Miklos Szeredi76f14182008-04-30 00:54:36 -0700115}
116
117static void bdi_debug_unregister(struct backing_dev_info *bdi)
118{
Greg Kroah-Hartman2d146b92019-01-22 16:21:07 +0100119 debugfs_remove_recursive(bdi->debug_dir);
Miklos Szeredi76f14182008-04-30 00:54:36 -0700120}
121#else
122static inline void bdi_debug_init(void)
123{
124}
Greg Kroah-Hartman2d146b92019-01-22 16:21:07 +0100125static inline void bdi_debug_register(struct backing_dev_info *bdi,
Miklos Szeredi76f14182008-04-30 00:54:36 -0700126 const char *name)
127{
128}
129static inline void bdi_debug_unregister(struct backing_dev_info *bdi)
130{
131}
132#endif
133
Peter Zijlstracf0ca9f2008-04-30 00:54:32 -0700134static ssize_t read_ahead_kb_store(struct device *dev,
135 struct device_attribute *attr,
136 const char *buf, size_t count)
137{
138 struct backing_dev_info *bdi = dev_get_drvdata(dev);
Peter Zijlstracf0ca9f2008-04-30 00:54:32 -0700139 unsigned long read_ahead_kb;
Namjae Jeon7034ed12012-08-25 16:57:27 +0800140 ssize_t ret;
Peter Zijlstracf0ca9f2008-04-30 00:54:32 -0700141
Namjae Jeon7034ed12012-08-25 16:57:27 +0800142 ret = kstrtoul(buf, 10, &read_ahead_kb);
143 if (ret < 0)
144 return ret;
145
146 bdi->ra_pages = read_ahead_kb >> (PAGE_SHIFT - 10);
147
148 return count;
Peter Zijlstracf0ca9f2008-04-30 00:54:32 -0700149}
150
151#define K(pages) ((pages) << (PAGE_SHIFT - 10))
152
153#define BDI_SHOW(name, expr) \
154static ssize_t name##_show(struct device *dev, \
155 struct device_attribute *attr, char *page) \
156{ \
157 struct backing_dev_info *bdi = dev_get_drvdata(dev); \
158 \
159 return snprintf(page, PAGE_SIZE-1, "%lld\n", (long long)expr); \
Greg Kroah-Hartmand9e12412013-07-24 15:05:26 -0700160} \
161static DEVICE_ATTR_RW(name);
Peter Zijlstracf0ca9f2008-04-30 00:54:32 -0700162
163BDI_SHOW(read_ahead_kb, K(bdi->ra_pages))
164
Peter Zijlstra189d3c42008-04-30 00:54:35 -0700165static ssize_t min_ratio_store(struct device *dev,
166 struct device_attribute *attr, const char *buf, size_t count)
167{
168 struct backing_dev_info *bdi = dev_get_drvdata(dev);
Peter Zijlstra189d3c42008-04-30 00:54:35 -0700169 unsigned int ratio;
Namjae Jeon7034ed12012-08-25 16:57:27 +0800170 ssize_t ret;
Peter Zijlstra189d3c42008-04-30 00:54:35 -0700171
Namjae Jeon7034ed12012-08-25 16:57:27 +0800172 ret = kstrtouint(buf, 10, &ratio);
173 if (ret < 0)
174 return ret;
175
176 ret = bdi_set_min_ratio(bdi, ratio);
177 if (!ret)
178 ret = count;
179
Peter Zijlstra189d3c42008-04-30 00:54:35 -0700180 return ret;
181}
182BDI_SHOW(min_ratio, bdi->min_ratio)
183
Peter Zijlstraa42dde02008-04-30 00:54:36 -0700184static ssize_t max_ratio_store(struct device *dev,
185 struct device_attribute *attr, const char *buf, size_t count)
186{
187 struct backing_dev_info *bdi = dev_get_drvdata(dev);
Peter Zijlstraa42dde02008-04-30 00:54:36 -0700188 unsigned int ratio;
Namjae Jeon7034ed12012-08-25 16:57:27 +0800189 ssize_t ret;
Peter Zijlstraa42dde02008-04-30 00:54:36 -0700190
Namjae Jeon7034ed12012-08-25 16:57:27 +0800191 ret = kstrtouint(buf, 10, &ratio);
192 if (ret < 0)
193 return ret;
194
195 ret = bdi_set_max_ratio(bdi, ratio);
196 if (!ret)
197 ret = count;
198
Peter Zijlstraa42dde02008-04-30 00:54:36 -0700199 return ret;
200}
201BDI_SHOW(max_ratio, bdi->max_ratio)
202
Darrick J. Wong7d311cd2013-02-21 16:42:48 -0800203static ssize_t stable_pages_required_show(struct device *dev,
204 struct device_attribute *attr,
205 char *page)
206{
Christoph Hellwig1cb039f2020-09-24 08:51:38 +0200207 dev_warn_once(dev,
208 "the stable_pages_required attribute has been removed. Use the stable_writes queue attribute instead.\n");
209 return snprintf(page, PAGE_SIZE-1, "%d\n", 0);
Darrick J. Wong7d311cd2013-02-21 16:42:48 -0800210}
Greg Kroah-Hartmand9e12412013-07-24 15:05:26 -0700211static DEVICE_ATTR_RO(stable_pages_required);
Darrick J. Wong7d311cd2013-02-21 16:42:48 -0800212
Greg Kroah-Hartmand9e12412013-07-24 15:05:26 -0700213static struct attribute *bdi_dev_attrs[] = {
214 &dev_attr_read_ahead_kb.attr,
215 &dev_attr_min_ratio.attr,
216 &dev_attr_max_ratio.attr,
217 &dev_attr_stable_pages_required.attr,
218 NULL,
Peter Zijlstracf0ca9f2008-04-30 00:54:32 -0700219};
Greg Kroah-Hartmand9e12412013-07-24 15:05:26 -0700220ATTRIBUTE_GROUPS(bdi_dev);
Peter Zijlstracf0ca9f2008-04-30 00:54:32 -0700221
222static __init int bdi_class_init(void)
223{
224 bdi_class = class_create(THIS_MODULE, "bdi");
Anton Blanchard14421452010-04-02 09:46:55 +0200225 if (IS_ERR(bdi_class))
226 return PTR_ERR(bdi_class);
227
Greg Kroah-Hartmand9e12412013-07-24 15:05:26 -0700228 bdi_class->dev_groups = bdi_dev_groups;
Miklos Szeredi76f14182008-04-30 00:54:36 -0700229 bdi_debug_init();
Jan Karad03f6cd2017-02-02 15:56:51 +0100230
Peter Zijlstracf0ca9f2008-04-30 00:54:32 -0700231 return 0;
232}
Miklos Szeredi76f14182008-04-30 00:54:36 -0700233postcore_initcall(bdi_class_init);
Peter Zijlstracf0ca9f2008-04-30 00:54:32 -0700234
Jan Kara2e82b842017-04-12 12:24:48 +0200235static int bdi_init(struct backing_dev_info *bdi);
236
Jens Axboe26160152009-03-17 09:35:06 +0100237static int __init default_bdi_init(void)
238{
239 int err;
240
Mika Westerberga2b90f12019-10-04 13:00:24 +0300241 bdi_wq = alloc_workqueue("writeback", WQ_MEM_RECLAIM | WQ_UNBOUND |
242 WQ_SYSFS, 0);
Tejun Heo839a8e82013-04-01 19:08:06 -0700243 if (!bdi_wq)
244 return -ENOMEM;
245
Jan Kara976e48f2010-09-21 11:48:55 +0200246 err = bdi_init(&noop_backing_dev_info);
Jens Axboe26160152009-03-17 09:35:06 +0100247
248 return err;
249}
250subsys_initcall(default_bdi_init);
251
Artem Bityutskiy64677162010-07-25 14:29:22 +0300252/*
Tejun Heof0054bb2015-05-22 17:13:30 -0400253 * This function is used when the first inode for this wb is marked dirty. It
Artem Bityutskiy64677162010-07-25 14:29:22 +0300254 * wakes-up the corresponding bdi thread which should then take care of the
255 * periodic background write-out of dirty inodes. Since the write-out would
256 * starts only 'dirty_writeback_interval' centisecs from now anyway, we just
257 * set up a timer which wakes the bdi thread up later.
258 *
259 * Note, we wouldn't bother setting up the timer, but this function is on the
260 * fast-path (used by '__mark_inode_dirty()'), so we save few context switches
261 * by delaying the wake-up.
Derek Basehore6ca738d2014-04-03 14:46:22 -0700262 *
263 * We have to be careful not to postpone flush work if it is scheduled for
264 * earlier. Thus we use queue_delayed_work().
Artem Bityutskiy64677162010-07-25 14:29:22 +0300265 */
Tejun Heof0054bb2015-05-22 17:13:30 -0400266void wb_wakeup_delayed(struct bdi_writeback *wb)
Artem Bityutskiy64677162010-07-25 14:29:22 +0300267{
268 unsigned long timeout;
269
270 timeout = msecs_to_jiffies(dirty_writeback_interval * 10);
Tejun Heof0054bb2015-05-22 17:13:30 -0400271 spin_lock_bh(&wb->work_lock);
272 if (test_bit(WB_registered, &wb->state))
273 queue_delayed_work(bdi_wq, &wb->dwork, timeout);
274 spin_unlock_bh(&wb->work_lock);
Jens Axboe03ba3782009-09-09 09:08:54 +0200275}
276
Jens Axboecfc4ba52009-09-14 13:12:40 +0200277/*
Tejun Heoa88a3412015-05-22 17:13:28 -0400278 * Initial write bandwidth: 100 MB/s
Jens Axboecfc4ba52009-09-14 13:12:40 +0200279 */
Tejun Heoa88a3412015-05-22 17:13:28 -0400280#define INIT_BW (100 << (20 - PAGE_SHIFT))
Jens Axboecfc4ba52009-09-14 13:12:40 +0200281
Tejun Heo8395cd92015-05-22 17:13:34 -0400282static int wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi,
Christoph Hellwig8c911f32020-07-01 11:06:21 +0200283 gfp_t gfp)
Artem Bityutskiy64677162010-07-25 14:29:22 +0300284{
Tejun Heo93f78d82015-05-22 17:13:27 -0400285 int i, err;
286
Artem Bityutskiy64677162010-07-25 14:29:22 +0300287 memset(wb, 0, sizeof(*wb));
288
Jan Kara810df542017-03-23 01:36:55 +0100289 if (wb != &bdi->wb)
290 bdi_get(bdi);
Artem Bityutskiy64677162010-07-25 14:29:22 +0300291 wb->bdi = bdi;
292 wb->last_old_flush = jiffies;
293 INIT_LIST_HEAD(&wb->b_dirty);
294 INIT_LIST_HEAD(&wb->b_io);
295 INIT_LIST_HEAD(&wb->b_more_io);
Theodore Ts'o0ae45f62015-02-02 00:37:00 -0500296 INIT_LIST_HEAD(&wb->b_dirty_time);
Christoph Hellwigf758eea2011-04-21 18:19:44 -0600297 spin_lock_init(&wb->list_lock);
Tejun Heo93f78d82015-05-22 17:13:27 -0400298
Tejun Heoa88a3412015-05-22 17:13:28 -0400299 wb->bw_time_stamp = jiffies;
300 wb->balanced_dirty_ratelimit = INIT_BW;
301 wb->dirty_ratelimit = INIT_BW;
302 wb->write_bandwidth = INIT_BW;
303 wb->avg_write_bandwidth = INIT_BW;
304
Tejun Heof0054bb2015-05-22 17:13:30 -0400305 spin_lock_init(&wb->work_lock);
306 INIT_LIST_HEAD(&wb->work_list);
307 INIT_DELAYED_WORK(&wb->dwork, wb_workfn);
Jens Axboeb57d74a2016-09-01 10:20:33 -0600308 wb->dirty_sleep = jiffies;
Tejun Heof0054bb2015-05-22 17:13:30 -0400309
Tejun Heo8395cd92015-05-22 17:13:34 -0400310 err = fprop_local_init_percpu(&wb->completions, gfp);
Tejun Heoa88a3412015-05-22 17:13:28 -0400311 if (err)
Christoph Hellwig8c911f32020-07-01 11:06:21 +0200312 goto out_put_bdi;
Tejun Heoa88a3412015-05-22 17:13:28 -0400313
Tejun Heo93f78d82015-05-22 17:13:27 -0400314 for (i = 0; i < NR_WB_STAT_ITEMS; i++) {
Tejun Heo8395cd92015-05-22 17:13:34 -0400315 err = percpu_counter_init(&wb->stat[i], 0, gfp);
Tejun Heoa13f35e2015-07-02 08:44:34 -0600316 if (err)
317 goto out_destroy_stat;
Tejun Heo93f78d82015-05-22 17:13:27 -0400318 }
319
320 return 0;
Tejun Heoa13f35e2015-07-02 08:44:34 -0600321
322out_destroy_stat:
Rasmus Villemoes078c6c32016-02-11 16:13:06 -0800323 while (i--)
Tejun Heoa13f35e2015-07-02 08:44:34 -0600324 percpu_counter_destroy(&wb->stat[i]);
325 fprop_local_destroy_percpu(&wb->completions);
Jan Kara810df542017-03-23 01:36:55 +0100326out_put_bdi:
327 if (wb != &bdi->wb)
328 bdi_put(bdi);
Tejun Heoa13f35e2015-07-02 08:44:34 -0600329 return err;
Peter Zijlstracf0ca9f2008-04-30 00:54:32 -0700330}
Jens Axboe03ba3782009-09-09 09:08:54 +0200331
Jan Karae8cb72b2017-03-23 01:36:56 +0100332static void cgwb_remove_from_bdi_list(struct bdi_writeback *wb);
333
Tejun Heo46100072015-05-22 17:13:31 -0400334/*
335 * Remove bdi from the global list and shutdown any threads we have running
336 */
337static void wb_shutdown(struct bdi_writeback *wb)
338{
339 /* Make sure nobody queues further work */
340 spin_lock_bh(&wb->work_lock);
341 if (!test_and_clear_bit(WB_registered, &wb->state)) {
342 spin_unlock_bh(&wb->work_lock);
343 return;
344 }
345 spin_unlock_bh(&wb->work_lock);
346
Jan Karae8cb72b2017-03-23 01:36:56 +0100347 cgwb_remove_from_bdi_list(wb);
Tejun Heo46100072015-05-22 17:13:31 -0400348 /*
349 * Drain work list and shutdown the delayed_work. !WB_registered
350 * tells wb_workfn() that @wb is dying and its work_list needs to
351 * be drained no matter what.
352 */
353 mod_delayed_work(bdi_wq, &wb->dwork, 0);
354 flush_delayed_work(&wb->dwork);
355 WARN_ON(!list_empty(&wb->work_list));
356}
357
Tejun Heof0054bb2015-05-22 17:13:30 -0400358static void wb_exit(struct bdi_writeback *wb)
Tejun Heo93f78d82015-05-22 17:13:27 -0400359{
360 int i;
361
362 WARN_ON(delayed_work_pending(&wb->dwork));
363
364 for (i = 0; i < NR_WB_STAT_ITEMS; i++)
365 percpu_counter_destroy(&wb->stat[i]);
Artem Bityutskiy64677162010-07-25 14:29:22 +0300366
Tejun Heoa88a3412015-05-22 17:13:28 -0400367 fprop_local_destroy_percpu(&wb->completions);
Jan Kara810df542017-03-23 01:36:55 +0100368 if (wb != &wb->bdi->wb)
369 bdi_put(wb->bdi);
Tejun Heoa88a3412015-05-22 17:13:28 -0400370}
Wu Fengguange98be2d2010-08-29 11:22:30 -0600371
Tejun Heo52ebea72015-05-22 17:13:37 -0400372#ifdef CONFIG_CGROUP_WRITEBACK
373
374#include <linux/memcontrol.h>
375
376/*
Christoph Hellwig8c911f32020-07-01 11:06:21 +0200377 * cgwb_lock protects bdi->cgwb_tree, blkcg->cgwb_list, and memcg->cgwb_list.
378 * bdi->cgwb_tree is also RCU protected.
Tejun Heo52ebea72015-05-22 17:13:37 -0400379 */
380static DEFINE_SPINLOCK(cgwb_lock);
Tejun Heof1834642018-05-23 10:56:32 -0700381static struct workqueue_struct *cgwb_release_wq;
Tejun Heo52ebea72015-05-22 17:13:37 -0400382
Tejun Heo52ebea72015-05-22 17:13:37 -0400383static void cgwb_release_workfn(struct work_struct *work)
384{
385 struct bdi_writeback *wb = container_of(work, struct bdi_writeback,
386 release_work);
Dennis Zhou (Facebook)59b57712018-08-31 16:22:43 -0400387 struct blkcg *blkcg = css_to_blkcg(wb->blkcg_css);
Tejun Heo52ebea72015-05-22 17:13:37 -0400388
Jan Kara3ee7e862018-06-18 15:46:58 +0200389 mutex_lock(&wb->bdi->cgwb_release_mutex);
Tejun Heo52ebea72015-05-22 17:13:37 -0400390 wb_shutdown(wb);
391
392 css_put(wb->memcg_css);
393 css_put(wb->blkcg_css);
Jan Kara3ee7e862018-06-18 15:46:58 +0200394 mutex_unlock(&wb->bdi->cgwb_release_mutex);
Tejun Heo52ebea72015-05-22 17:13:37 -0400395
Tejun Heod866dbf2019-07-24 10:37:22 -0700396 /* triggers blkg destruction if no online users left */
397 blkcg_unpin_online(blkcg);
Dennis Zhou (Facebook)59b57712018-08-31 16:22:43 -0400398
Tejun Heo841710a2015-05-22 18:23:33 -0400399 fprop_local_destroy_percpu(&wb->memcg_completions);
Tejun Heo52ebea72015-05-22 17:13:37 -0400400 percpu_ref_exit(&wb->refcnt);
401 wb_exit(wb);
402 kfree_rcu(wb, rcu);
Tejun Heo52ebea72015-05-22 17:13:37 -0400403}
404
405static void cgwb_release(struct percpu_ref *refcnt)
406{
407 struct bdi_writeback *wb = container_of(refcnt, struct bdi_writeback,
408 refcnt);
Tejun Heof1834642018-05-23 10:56:32 -0700409 queue_work(cgwb_release_wq, &wb->release_work);
Tejun Heo52ebea72015-05-22 17:13:37 -0400410}
411
412static void cgwb_kill(struct bdi_writeback *wb)
413{
414 lockdep_assert_held(&cgwb_lock);
415
416 WARN_ON(!radix_tree_delete(&wb->bdi->cgwb_tree, wb->memcg_css->id));
417 list_del(&wb->memcg_node);
418 list_del(&wb->blkcg_node);
419 percpu_ref_kill(&wb->refcnt);
420}
421
Jan Karae8cb72b2017-03-23 01:36:56 +0100422static void cgwb_remove_from_bdi_list(struct bdi_writeback *wb)
423{
424 spin_lock_irq(&cgwb_lock);
425 list_del_rcu(&wb->bdi_node);
426 spin_unlock_irq(&cgwb_lock);
427}
428
Tejun Heo52ebea72015-05-22 17:13:37 -0400429static int cgwb_create(struct backing_dev_info *bdi,
430 struct cgroup_subsys_state *memcg_css, gfp_t gfp)
431{
432 struct mem_cgroup *memcg;
433 struct cgroup_subsys_state *blkcg_css;
434 struct blkcg *blkcg;
435 struct list_head *memcg_cgwb_list, *blkcg_cgwb_list;
436 struct bdi_writeback *wb;
437 unsigned long flags;
438 int ret = 0;
439
440 memcg = mem_cgroup_from_css(memcg_css);
Tejun Heoc165b3e2015-08-18 14:55:29 -0700441 blkcg_css = cgroup_get_e_css(memcg_css->cgroup, &io_cgrp_subsys);
Tejun Heo52ebea72015-05-22 17:13:37 -0400442 blkcg = css_to_blkcg(blkcg_css);
Wang Long9ccc3612018-06-07 17:07:19 -0700443 memcg_cgwb_list = &memcg->cgwb_list;
Tejun Heo52ebea72015-05-22 17:13:37 -0400444 blkcg_cgwb_list = &blkcg->cgwb_list;
445
446 /* look up again under lock and discard on blkcg mismatch */
447 spin_lock_irqsave(&cgwb_lock, flags);
448 wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id);
449 if (wb && wb->blkcg_css != blkcg_css) {
450 cgwb_kill(wb);
451 wb = NULL;
452 }
453 spin_unlock_irqrestore(&cgwb_lock, flags);
454 if (wb)
455 goto out_put;
456
457 /* need to create a new one */
458 wb = kmalloc(sizeof(*wb), gfp);
Christophe JAILLET0b045bd2017-09-11 21:43:23 +0200459 if (!wb) {
460 ret = -ENOMEM;
461 goto out_put;
462 }
Tejun Heo52ebea72015-05-22 17:13:37 -0400463
Christoph Hellwig8c911f32020-07-01 11:06:21 +0200464 ret = wb_init(wb, bdi, gfp);
Tejun Heo52ebea72015-05-22 17:13:37 -0400465 if (ret)
466 goto err_free;
467
468 ret = percpu_ref_init(&wb->refcnt, cgwb_release, 0, gfp);
469 if (ret)
470 goto err_wb_exit;
471
Tejun Heo841710a2015-05-22 18:23:33 -0400472 ret = fprop_local_init_percpu(&wb->memcg_completions, gfp);
473 if (ret)
474 goto err_ref_exit;
475
Tejun Heo52ebea72015-05-22 17:13:37 -0400476 wb->memcg_css = memcg_css;
477 wb->blkcg_css = blkcg_css;
478 INIT_WORK(&wb->release_work, cgwb_release_workfn);
479 set_bit(WB_registered, &wb->state);
480
481 /*
482 * The root wb determines the registered state of the whole bdi and
483 * memcg_cgwb_list and blkcg_cgwb_list's next pointers indicate
484 * whether they're still online. Don't link @wb if any is dead.
485 * See wb_memcg_offline() and wb_blkcg_offline().
486 */
487 ret = -ENODEV;
488 spin_lock_irqsave(&cgwb_lock, flags);
489 if (test_bit(WB_registered, &bdi->wb.state) &&
490 blkcg_cgwb_list->next && memcg_cgwb_list->next) {
491 /* we might have raced another instance of this function */
492 ret = radix_tree_insert(&bdi->cgwb_tree, memcg_css->id, wb);
493 if (!ret) {
Tejun Heob8175252015-10-02 14:47:05 -0400494 list_add_tail_rcu(&wb->bdi_node, &bdi->wb_list);
Tejun Heo52ebea72015-05-22 17:13:37 -0400495 list_add(&wb->memcg_node, memcg_cgwb_list);
496 list_add(&wb->blkcg_node, blkcg_cgwb_list);
Tejun Heod866dbf2019-07-24 10:37:22 -0700497 blkcg_pin_online(blkcg);
Tejun Heo52ebea72015-05-22 17:13:37 -0400498 css_get(memcg_css);
499 css_get(blkcg_css);
500 }
501 }
502 spin_unlock_irqrestore(&cgwb_lock, flags);
503 if (ret) {
504 if (ret == -EEXIST)
505 ret = 0;
Tejun Heoa13f35e2015-07-02 08:44:34 -0600506 goto err_fprop_exit;
Tejun Heo52ebea72015-05-22 17:13:37 -0400507 }
508 goto out_put;
509
Tejun Heo841710a2015-05-22 18:23:33 -0400510err_fprop_exit:
511 fprop_local_destroy_percpu(&wb->memcg_completions);
Tejun Heo52ebea72015-05-22 17:13:37 -0400512err_ref_exit:
513 percpu_ref_exit(&wb->refcnt);
514err_wb_exit:
515 wb_exit(wb);
516err_free:
517 kfree(wb);
518out_put:
519 css_put(blkcg_css);
520 return ret;
521}
522
523/**
Tejun Heoed288dc2019-08-26 09:06:54 -0700524 * wb_get_lookup - get wb for a given memcg
Tejun Heo52ebea72015-05-22 17:13:37 -0400525 * @bdi: target bdi
526 * @memcg_css: cgroup_subsys_state of the target memcg (must have positive ref)
Tejun Heo52ebea72015-05-22 17:13:37 -0400527 *
Tejun Heoed288dc2019-08-26 09:06:54 -0700528 * Try to get the wb for @memcg_css on @bdi. The returned wb has its
529 * refcount incremented.
Tejun Heo52ebea72015-05-22 17:13:37 -0400530 *
531 * This function uses css_get() on @memcg_css and thus expects its refcnt
532 * to be positive on invocation. IOW, rcu_read_lock() protection on
533 * @memcg_css isn't enough. try_get it before calling this function.
534 *
535 * A wb is keyed by its associated memcg. As blkcg implicitly enables
536 * memcg on the default hierarchy, memcg association is guaranteed to be
537 * more specific (equal or descendant to the associated blkcg) and thus can
538 * identify both the memcg and blkcg associations.
539 *
540 * Because the blkcg associated with a memcg may change as blkcg is enabled
541 * and disabled closer to root in the hierarchy, each wb keeps track of
542 * both the memcg and blkcg associated with it and verifies the blkcg on
543 * each lookup. On mismatch, the existing wb is discarded and a new one is
544 * created.
545 */
Tejun Heoed288dc2019-08-26 09:06:54 -0700546struct bdi_writeback *wb_get_lookup(struct backing_dev_info *bdi,
547 struct cgroup_subsys_state *memcg_css)
548{
549 struct bdi_writeback *wb;
550
551 if (!memcg_css->parent)
552 return &bdi->wb;
553
554 rcu_read_lock();
555 wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id);
556 if (wb) {
557 struct cgroup_subsys_state *blkcg_css;
558
559 /* see whether the blkcg association has changed */
560 blkcg_css = cgroup_get_e_css(memcg_css->cgroup, &io_cgrp_subsys);
561 if (unlikely(wb->blkcg_css != blkcg_css || !wb_tryget(wb)))
562 wb = NULL;
563 css_put(blkcg_css);
564 }
565 rcu_read_unlock();
566
567 return wb;
568}
569
570/**
571 * wb_get_create - get wb for a given memcg, create if necessary
572 * @bdi: target bdi
573 * @memcg_css: cgroup_subsys_state of the target memcg (must have positive ref)
574 * @gfp: allocation mask to use
575 *
576 * Try to get the wb for @memcg_css on @bdi. If it doesn't exist, try to
577 * create one. See wb_get_lookup() for more details.
578 */
Tejun Heo52ebea72015-05-22 17:13:37 -0400579struct bdi_writeback *wb_get_create(struct backing_dev_info *bdi,
580 struct cgroup_subsys_state *memcg_css,
581 gfp_t gfp)
582{
583 struct bdi_writeback *wb;
584
Mel Gormand0164ad2015-11-06 16:28:21 -0800585 might_sleep_if(gfpflags_allow_blocking(gfp));
Tejun Heo52ebea72015-05-22 17:13:37 -0400586
587 if (!memcg_css->parent)
588 return &bdi->wb;
589
590 do {
Tejun Heoed288dc2019-08-26 09:06:54 -0700591 wb = wb_get_lookup(bdi, memcg_css);
Tejun Heo52ebea72015-05-22 17:13:37 -0400592 } while (!wb && !cgwb_create(bdi, memcg_css, gfp));
593
594 return wb;
595}
596
Tejun Heoa13f35e2015-07-02 08:44:34 -0600597static int cgwb_bdi_init(struct backing_dev_info *bdi)
Tejun Heo52ebea72015-05-22 17:13:37 -0400598{
Tejun Heoa13f35e2015-07-02 08:44:34 -0600599 int ret;
600
Tejun Heo52ebea72015-05-22 17:13:37 -0400601 INIT_RADIX_TREE(&bdi->cgwb_tree, GFP_ATOMIC);
Jan Kara3ee7e862018-06-18 15:46:58 +0200602 mutex_init(&bdi->cgwb_release_mutex);
Tejun Heo7fc58542017-12-12 08:38:30 -0800603 init_rwsem(&bdi->wb_switch_rwsem);
Tejun Heoa13f35e2015-07-02 08:44:34 -0600604
Christoph Hellwig8c911f32020-07-01 11:06:21 +0200605 ret = wb_init(&bdi->wb, bdi, GFP_KERNEL);
Tejun Heoa13f35e2015-07-02 08:44:34 -0600606 if (!ret) {
Johannes Weiner7d828602016-01-14 15:20:56 -0800607 bdi->wb.memcg_css = &root_mem_cgroup->css;
Tejun Heoa13f35e2015-07-02 08:44:34 -0600608 bdi->wb.blkcg_css = blkcg_root_css;
609 }
610 return ret;
Tejun Heo52ebea72015-05-22 17:13:37 -0400611}
612
Jan Karab1c51af2017-03-23 01:36:59 +0100613static void cgwb_bdi_unregister(struct backing_dev_info *bdi)
Tejun Heo52ebea72015-05-22 17:13:37 -0400614{
615 struct radix_tree_iter iter;
616 void **slot;
Jan Kara5318ce72017-03-23 01:36:57 +0100617 struct bdi_writeback *wb;
Tejun Heo52ebea72015-05-22 17:13:37 -0400618
619 WARN_ON(test_bit(WB_registered, &bdi->wb.state));
620
621 spin_lock_irq(&cgwb_lock);
622 radix_tree_for_each_slot(slot, &bdi->cgwb_tree, &iter, 0)
623 cgwb_kill(*slot);
Jan Kara3ee7e862018-06-18 15:46:58 +0200624 spin_unlock_irq(&cgwb_lock);
Jan Kara5318ce72017-03-23 01:36:57 +0100625
Jan Kara3ee7e862018-06-18 15:46:58 +0200626 mutex_lock(&bdi->cgwb_release_mutex);
627 spin_lock_irq(&cgwb_lock);
Jan Kara5318ce72017-03-23 01:36:57 +0100628 while (!list_empty(&bdi->wb_list)) {
629 wb = list_first_entry(&bdi->wb_list, struct bdi_writeback,
630 bdi_node);
631 spin_unlock_irq(&cgwb_lock);
632 wb_shutdown(wb);
633 spin_lock_irq(&cgwb_lock);
634 }
Tejun Heo52ebea72015-05-22 17:13:37 -0400635 spin_unlock_irq(&cgwb_lock);
Jan Kara3ee7e862018-06-18 15:46:58 +0200636 mutex_unlock(&bdi->cgwb_release_mutex);
Tejun Heo52ebea72015-05-22 17:13:37 -0400637}
638
639/**
640 * wb_memcg_offline - kill all wb's associated with a memcg being offlined
641 * @memcg: memcg being offlined
642 *
643 * Also prevents creation of any new wb's associated with @memcg.
644 */
645void wb_memcg_offline(struct mem_cgroup *memcg)
646{
Wang Long9ccc3612018-06-07 17:07:19 -0700647 struct list_head *memcg_cgwb_list = &memcg->cgwb_list;
Tejun Heo52ebea72015-05-22 17:13:37 -0400648 struct bdi_writeback *wb, *next;
649
650 spin_lock_irq(&cgwb_lock);
651 list_for_each_entry_safe(wb, next, memcg_cgwb_list, memcg_node)
652 cgwb_kill(wb);
653 memcg_cgwb_list->next = NULL; /* prevent new wb's */
654 spin_unlock_irq(&cgwb_lock);
655}
656
657/**
658 * wb_blkcg_offline - kill all wb's associated with a blkcg being offlined
659 * @blkcg: blkcg being offlined
660 *
661 * Also prevents creation of any new wb's associated with @blkcg.
662 */
663void wb_blkcg_offline(struct blkcg *blkcg)
664{
Tejun Heo52ebea72015-05-22 17:13:37 -0400665 struct bdi_writeback *wb, *next;
666
667 spin_lock_irq(&cgwb_lock);
668 list_for_each_entry_safe(wb, next, &blkcg->cgwb_list, blkcg_node)
669 cgwb_kill(wb);
670 blkcg->cgwb_list.next = NULL; /* prevent new wb's */
671 spin_unlock_irq(&cgwb_lock);
672}
673
Jan Karae8cb72b2017-03-23 01:36:56 +0100674static void cgwb_bdi_register(struct backing_dev_info *bdi)
675{
676 spin_lock_irq(&cgwb_lock);
677 list_add_tail_rcu(&bdi->wb.bdi_node, &bdi->wb_list);
678 spin_unlock_irq(&cgwb_lock);
679}
680
Tejun Heof1834642018-05-23 10:56:32 -0700681static int __init cgwb_init(void)
682{
683 /*
684 * There can be many concurrent release work items overwhelming
685 * system_wq. Put them in a separate wq and limit concurrency.
686 * There's no point in executing many of these in parallel.
687 */
688 cgwb_release_wq = alloc_workqueue("cgwb_release", 0, 1);
689 if (!cgwb_release_wq)
690 return -ENOMEM;
691
692 return 0;
693}
694subsys_initcall(cgwb_init);
695
Tejun Heo52ebea72015-05-22 17:13:37 -0400696#else /* CONFIG_CGROUP_WRITEBACK */
697
Tejun Heoa13f35e2015-07-02 08:44:34 -0600698static int cgwb_bdi_init(struct backing_dev_info *bdi)
699{
Christoph Hellwig8c911f32020-07-01 11:06:21 +0200700 return wb_init(&bdi->wb, bdi, GFP_KERNEL);
Tejun Heoa13f35e2015-07-02 08:44:34 -0600701}
702
Jan Karab1c51af2017-03-23 01:36:59 +0100703static void cgwb_bdi_unregister(struct backing_dev_info *bdi) { }
Jan Karadf23de52017-03-08 17:48:32 +0100704
Jan Karae8cb72b2017-03-23 01:36:56 +0100705static void cgwb_bdi_register(struct backing_dev_info *bdi)
706{
707 list_add_tail_rcu(&bdi->wb.bdi_node, &bdi->wb_list);
708}
709
710static void cgwb_remove_from_bdi_list(struct bdi_writeback *wb)
711{
712 list_del_rcu(&wb->bdi_node);
713}
714
Tejun Heo52ebea72015-05-22 17:13:37 -0400715#endif /* CONFIG_CGROUP_WRITEBACK */
716
Jan Kara2e82b842017-04-12 12:24:48 +0200717static int bdi_init(struct backing_dev_info *bdi)
Peter Zijlstrab2e8fb62007-10-16 23:25:47 -0700718{
Tejun Heob8175252015-10-02 14:47:05 -0400719 int ret;
720
Peter Zijlstracf0ca9f2008-04-30 00:54:32 -0700721 bdi->dev = NULL;
722
Jan Karad03f6cd2017-02-02 15:56:51 +0100723 kref_init(&bdi->refcnt);
Peter Zijlstra189d3c42008-04-30 00:54:35 -0700724 bdi->min_ratio = 0;
Peter Zijlstraa42dde02008-04-30 00:54:36 -0700725 bdi->max_ratio = 100;
Jan Karaeb608e32012-05-24 18:59:11 +0200726 bdi->max_prop_frac = FPROP_FRAC_BASE;
Jens Axboe66f3b8e2009-09-02 09:19:46 +0200727 INIT_LIST_HEAD(&bdi->bdi_list);
Tejun Heob8175252015-10-02 14:47:05 -0400728 INIT_LIST_HEAD(&bdi->wb_list);
Tejun Heocc395d72015-05-22 17:13:58 -0400729 init_waitqueue_head(&bdi->wb_waitq);
Jens Axboe03ba3782009-09-09 09:08:54 +0200730
Tejun Heob8175252015-10-02 14:47:05 -0400731 ret = cgwb_bdi_init(bdi);
732
Tejun Heob8175252015-10-02 14:47:05 -0400733 return ret;
Peter Zijlstrab2e8fb62007-10-16 23:25:47 -0700734}
Peter Zijlstrab2e8fb62007-10-16 23:25:47 -0700735
Christoph Hellwigaef33c22020-05-04 14:48:00 +0200736struct backing_dev_info *bdi_alloc(int node_id)
Jan Karad03f6cd2017-02-02 15:56:51 +0100737{
738 struct backing_dev_info *bdi;
739
Christoph Hellwigaef33c22020-05-04 14:48:00 +0200740 bdi = kzalloc_node(sizeof(*bdi), GFP_KERNEL, node_id);
Jan Karad03f6cd2017-02-02 15:56:51 +0100741 if (!bdi)
742 return NULL;
743
744 if (bdi_init(bdi)) {
745 kfree(bdi);
746 return NULL;
747 }
Christoph Hellwig55b25982020-09-24 08:51:32 +0200748 bdi->ra_pages = VM_READAHEAD_PAGES;
749 bdi->io_pages = VM_READAHEAD_PAGES;
Jan Karad03f6cd2017-02-02 15:56:51 +0100750 return bdi;
751}
Christoph Hellwigaef33c22020-05-04 14:48:00 +0200752EXPORT_SYMBOL(bdi_alloc);
Jan Karad03f6cd2017-02-02 15:56:51 +0100753
Tejun Heo34f8fe52019-08-26 09:06:53 -0700754static struct rb_node **bdi_lookup_rb_node(u64 id, struct rb_node **parentp)
755{
756 struct rb_node **p = &bdi_tree.rb_node;
757 struct rb_node *parent = NULL;
758 struct backing_dev_info *bdi;
759
760 lockdep_assert_held(&bdi_lock);
761
762 while (*p) {
763 parent = *p;
764 bdi = rb_entry(parent, struct backing_dev_info, rb_node);
765
766 if (bdi->id > id)
767 p = &(*p)->rb_left;
768 else if (bdi->id < id)
769 p = &(*p)->rb_right;
770 else
771 break;
772 }
773
774 if (parentp)
775 *parentp = parent;
776 return p;
777}
778
779/**
780 * bdi_get_by_id - lookup and get bdi from its id
781 * @id: bdi id to lookup
782 *
783 * Find bdi matching @id and get it. Returns NULL if the matching bdi
784 * doesn't exist or is already unregistered.
785 */
786struct backing_dev_info *bdi_get_by_id(u64 id)
787{
788 struct backing_dev_info *bdi = NULL;
789 struct rb_node **p;
790
791 spin_lock_bh(&bdi_lock);
792 p = bdi_lookup_rb_node(id, NULL);
793 if (*p) {
794 bdi = rb_entry(*p, struct backing_dev_info, rb_node);
795 bdi_get(bdi);
796 }
797 spin_unlock_bh(&bdi_lock);
798
799 return bdi;
800}
801
Jan Kara7c4cc302017-04-12 12:24:49 +0200802int bdi_register_va(struct backing_dev_info *bdi, const char *fmt, va_list args)
Peter Zijlstrab2e8fb62007-10-16 23:25:47 -0700803{
Peter Zijlstrab2e8fb62007-10-16 23:25:47 -0700804 struct device *dev;
Tejun Heo34f8fe52019-08-26 09:06:53 -0700805 struct rb_node *parent, **p;
Peter Zijlstrab2e8fb62007-10-16 23:25:47 -0700806
807 if (bdi->dev) /* The driver needs to use separate queues per device */
808 return 0;
809
Christoph Hellwig6bd87ee2020-05-04 14:47:56 +0200810 vsnprintf(bdi->dev_name, sizeof(bdi->dev_name), fmt, args);
811 dev = device_create(bdi_class, NULL, MKDEV(0, 0), bdi, bdi->dev_name);
Peter Zijlstrab2e8fb62007-10-16 23:25:47 -0700812 if (IS_ERR(dev))
813 return PTR_ERR(dev);
814
Jan Karae8cb72b2017-03-23 01:36:56 +0100815 cgwb_bdi_register(bdi);
Peter Zijlstrab2e8fb62007-10-16 23:25:47 -0700816 bdi->dev = dev;
817
Jens Axboe6d0e4822017-12-21 10:01:30 -0700818 bdi_debug_register(bdi, dev_name(dev));
Tejun Heo46100072015-05-22 17:13:31 -0400819 set_bit(WB_registered, &bdi->wb.state);
Peter Zijlstrab2e8fb62007-10-16 23:25:47 -0700820
821 spin_lock_bh(&bdi_lock);
Tejun Heo34f8fe52019-08-26 09:06:53 -0700822
823 bdi->id = ++bdi_id_cursor;
824
825 p = bdi_lookup_rb_node(bdi->id, &parent);
826 rb_link_node(&bdi->rb_node, parent, p);
827 rb_insert_color(&bdi->rb_node, &bdi_tree);
828
Peter Zijlstrab2e8fb62007-10-16 23:25:47 -0700829 list_add_tail_rcu(&bdi->bdi_list, &bdi_list);
Tejun Heo34f8fe52019-08-26 09:06:53 -0700830
Peter Zijlstrab2e8fb62007-10-16 23:25:47 -0700831 spin_unlock_bh(&bdi_lock);
832
833 trace_writeback_bdi_register(bdi);
834 return 0;
835}
Jan Karabaf7a612017-04-12 12:24:25 +0200836
Jan Kara7c4cc302017-04-12 12:24:49 +0200837int bdi_register(struct backing_dev_info *bdi, const char *fmt, ...)
Jan Karabaf7a612017-04-12 12:24:25 +0200838{
839 va_list args;
840 int ret;
841
842 va_start(args, fmt);
Jan Kara7c4cc302017-04-12 12:24:49 +0200843 ret = bdi_register_va(bdi, fmt, args);
Jan Karabaf7a612017-04-12 12:24:25 +0200844 va_end(args);
845 return ret;
846}
Peter Zijlstrab2e8fb62007-10-16 23:25:47 -0700847EXPORT_SYMBOL(bdi_register);
848
Christoph Hellwig3c5d2022020-05-04 14:47:59 +0200849void bdi_set_owner(struct backing_dev_info *bdi, struct device *owner)
Dan Williamsdf08c322016-07-31 11:15:13 -0700850{
Christoph Hellwig3c5d2022020-05-04 14:47:59 +0200851 WARN_ON_ONCE(bdi->owner);
Dan Williamsdf08c322016-07-31 11:15:13 -0700852 bdi->owner = owner;
853 get_device(owner);
Dan Williamsdf08c322016-07-31 11:15:13 -0700854}
Dan Williamsdf08c322016-07-31 11:15:13 -0700855
Peter Zijlstrab2e8fb62007-10-16 23:25:47 -0700856/*
Tejun Heo46100072015-05-22 17:13:31 -0400857 * Remove bdi from bdi_list, and ensure that it is no longer visible
Peter Zijlstrab2e8fb62007-10-16 23:25:47 -0700858 */
Tejun Heo46100072015-05-22 17:13:31 -0400859static void bdi_remove_from_list(struct backing_dev_info *bdi)
Peter Zijlstra04fbfdc2007-10-16 23:25:50 -0700860{
Tejun Heo46100072015-05-22 17:13:31 -0400861 spin_lock_bh(&bdi_lock);
Tejun Heo34f8fe52019-08-26 09:06:53 -0700862 rb_erase(&bdi->rb_node, &bdi_tree);
Tejun Heo46100072015-05-22 17:13:31 -0400863 list_del_rcu(&bdi->bdi_list);
864 spin_unlock_bh(&bdi_lock);
Andrew Morton3fcfab12006-10-19 23:28:16 -0700865
Tejun Heo46100072015-05-22 17:13:31 -0400866 synchronize_rcu_expedited();
Andrew Morton3fcfab12006-10-19 23:28:16 -0700867}
868
Tejun Heob02176f2015-09-08 12:20:22 -0400869void bdi_unregister(struct backing_dev_info *bdi)
Andrew Morton3fcfab12006-10-19 23:28:16 -0700870{
Tejun Heof0054bb2015-05-22 17:13:30 -0400871 /* make sure nobody finds us on the bdi_list anymore */
872 bdi_remove_from_list(bdi);
873 wb_shutdown(&bdi->wb);
Jan Karab1c51af2017-03-23 01:36:59 +0100874 cgwb_bdi_unregister(bdi);
Rabin Vincent7a401a92011-11-11 13:29:04 +0100875
Christoph Hellwigc4db59d2015-01-20 14:05:00 -0700876 if (bdi->dev) {
877 bdi_debug_unregister(bdi);
878 device_unregister(bdi->dev);
879 bdi->dev = NULL;
880 }
Dan Williamsdf08c322016-07-31 11:15:13 -0700881
882 if (bdi->owner) {
883 put_device(bdi->owner);
884 bdi->owner = NULL;
885 }
Tejun Heob02176f2015-09-08 12:20:22 -0400886}
Christoph Hellwigc4db59d2015-01-20 14:05:00 -0700887
Jan Karad03f6cd2017-02-02 15:56:51 +0100888static void release_bdi(struct kref *ref)
889{
890 struct backing_dev_info *bdi =
891 container_of(ref, struct backing_dev_info, refcnt);
892
Jan Kara5af110b2017-04-12 12:24:26 +0200893 if (test_bit(WB_registered, &bdi->wb.state))
894 bdi_unregister(bdi);
Jan Kara2e82b842017-04-12 12:24:48 +0200895 WARN_ON_ONCE(bdi->dev);
896 wb_exit(&bdi->wb);
Jan Karad03f6cd2017-02-02 15:56:51 +0100897 kfree(bdi);
898}
899
900void bdi_put(struct backing_dev_info *bdi)
901{
902 kref_put(&bdi->refcnt, release_bdi);
903}
Jan Kara62bf42a2017-04-12 12:24:27 +0200904EXPORT_SYMBOL(bdi_put);
Jan Karad03f6cd2017-02-02 15:56:51 +0100905
Christoph Hellwigeb7ae5e2020-05-04 14:47:54 +0200906const char *bdi_dev_name(struct backing_dev_info *bdi)
907{
908 if (!bdi || !bdi->dev)
909 return bdi_unknown_name;
Christoph Hellwig6bd87ee2020-05-04 14:47:56 +0200910 return bdi->dev_name;
Christoph Hellwigeb7ae5e2020-05-04 14:47:54 +0200911}
912EXPORT_SYMBOL_GPL(bdi_dev_name);
913
Andrew Morton3fcfab12006-10-19 23:28:16 -0700914static wait_queue_head_t congestion_wqh[2] = {
915 __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[0]),
916 __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[1])
917 };
Tejun Heoec8a6f22015-05-22 17:13:41 -0400918static atomic_t nr_wb_congested[2];
Andrew Morton3fcfab12006-10-19 23:28:16 -0700919
Christoph Hellwig492d76b2020-07-01 11:06:20 +0200920void clear_bdi_congested(struct backing_dev_info *bdi, int sync)
Andrew Morton3fcfab12006-10-19 23:28:16 -0700921{
Jens Axboe1faa16d2009-04-06 14:48:01 +0200922 wait_queue_head_t *wqh = &congestion_wqh[sync];
Kaixu Xiac877ef82016-03-31 13:19:41 +0000923 enum wb_congested_state bit;
Andrew Morton3fcfab12006-10-19 23:28:16 -0700924
Tejun Heo44522262015-05-22 17:13:26 -0400925 bit = sync ? WB_sync_congested : WB_async_congested;
Christoph Hellwig8c911f32020-07-01 11:06:21 +0200926 if (test_and_clear_bit(bit, &bdi->wb.congested))
Tejun Heoec8a6f22015-05-22 17:13:41 -0400927 atomic_dec(&nr_wb_congested[sync]);
Peter Zijlstra4e857c52014-03-17 18:06:10 +0100928 smp_mb__after_atomic();
Andrew Morton3fcfab12006-10-19 23:28:16 -0700929 if (waitqueue_active(wqh))
930 wake_up(wqh);
931}
Christoph Hellwig492d76b2020-07-01 11:06:20 +0200932EXPORT_SYMBOL(clear_bdi_congested);
Andrew Morton3fcfab12006-10-19 23:28:16 -0700933
Christoph Hellwig492d76b2020-07-01 11:06:20 +0200934void set_bdi_congested(struct backing_dev_info *bdi, int sync)
Andrew Morton3fcfab12006-10-19 23:28:16 -0700935{
Kaixu Xiac877ef82016-03-31 13:19:41 +0000936 enum wb_congested_state bit;
Andrew Morton3fcfab12006-10-19 23:28:16 -0700937
Tejun Heo44522262015-05-22 17:13:26 -0400938 bit = sync ? WB_sync_congested : WB_async_congested;
Christoph Hellwig8c911f32020-07-01 11:06:21 +0200939 if (!test_and_set_bit(bit, &bdi->wb.congested))
Tejun Heoec8a6f22015-05-22 17:13:41 -0400940 atomic_inc(&nr_wb_congested[sync]);
Andrew Morton3fcfab12006-10-19 23:28:16 -0700941}
Christoph Hellwig492d76b2020-07-01 11:06:20 +0200942EXPORT_SYMBOL(set_bdi_congested);
Andrew Morton3fcfab12006-10-19 23:28:16 -0700943
944/**
945 * congestion_wait - wait for a backing_dev to become uncongested
Jens Axboe8aa7e842009-07-09 14:52:32 +0200946 * @sync: SYNC or ASYNC IO
Andrew Morton3fcfab12006-10-19 23:28:16 -0700947 * @timeout: timeout in jiffies
948 *
949 * Waits for up to @timeout jiffies for a backing_dev (any backing_dev) to exit
950 * write congestion. If no backing_devs are congested then just wait for the
951 * next write to be completed.
952 */
Jens Axboe8aa7e842009-07-09 14:52:32 +0200953long congestion_wait(int sync, long timeout)
Andrew Morton3fcfab12006-10-19 23:28:16 -0700954{
955 long ret;
Mel Gorman52bb9192010-10-26 14:21:41 -0700956 unsigned long start = jiffies;
Andrew Morton3fcfab12006-10-19 23:28:16 -0700957 DEFINE_WAIT(wait);
Jens Axboe8aa7e842009-07-09 14:52:32 +0200958 wait_queue_head_t *wqh = &congestion_wqh[sync];
Andrew Morton3fcfab12006-10-19 23:28:16 -0700959
960 prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
961 ret = io_schedule_timeout(timeout);
962 finish_wait(wqh, &wait);
Mel Gorman52bb9192010-10-26 14:21:41 -0700963
964 trace_writeback_congestion_wait(jiffies_to_usecs(timeout),
965 jiffies_to_usecs(jiffies - start));
966
Andrew Morton3fcfab12006-10-19 23:28:16 -0700967 return ret;
968}
969EXPORT_SYMBOL(congestion_wait);
970
Mel Gorman0e093d992010-10-26 14:21:45 -0700971/**
Mel Gorman599d0c92016-07-28 15:45:31 -0700972 * wait_iff_congested - Conditionally wait for a backing_dev to become uncongested or a pgdat to complete writes
Mel Gorman0e093d992010-10-26 14:21:45 -0700973 * @sync: SYNC or ASYNC IO
974 * @timeout: timeout in jiffies
975 *
Andrey Ryabinine3c1ac52018-04-10 16:28:03 -0700976 * In the event of a congested backing_dev (any backing_dev) this waits
977 * for up to @timeout jiffies for either a BDI to exit congestion of the
978 * given @sync queue or a write to complete.
Mel Gorman0e093d992010-10-26 14:21:45 -0700979 *
980 * The return value is 0 if the sleep is for the full timeout. Otherwise,
981 * it is the number of jiffies that were still remaining when the function
982 * returned. return_value == timeout implies the function did not sleep.
983 */
Andrey Ryabinine3c1ac52018-04-10 16:28:03 -0700984long wait_iff_congested(int sync, long timeout)
Mel Gorman0e093d992010-10-26 14:21:45 -0700985{
986 long ret;
987 unsigned long start = jiffies;
988 DEFINE_WAIT(wait);
989 wait_queue_head_t *wqh = &congestion_wqh[sync];
990
991 /*
Andrey Ryabinine3c1ac52018-04-10 16:28:03 -0700992 * If there is no congestion, yield if necessary instead
Mel Gorman0e093d992010-10-26 14:21:45 -0700993 * of sleeping on the congestion queue
994 */
Andrey Ryabinine3c1ac52018-04-10 16:28:03 -0700995 if (atomic_read(&nr_wb_congested[sync]) == 0) {
Michal Hockoede37712016-05-20 16:57:03 -0700996 cond_resched();
Mel Gorman599d0c92016-07-28 15:45:31 -0700997
Mel Gorman0e093d992010-10-26 14:21:45 -0700998 /* In case we scheduled, work out time remaining */
999 ret = timeout - (jiffies - start);
1000 if (ret < 0)
1001 ret = 0;
1002
1003 goto out;
1004 }
1005
1006 /* Sleep until uncongested or a write happens */
1007 prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
1008 ret = io_schedule_timeout(timeout);
1009 finish_wait(wqh, &wait);
1010
1011out:
1012 trace_writeback_wait_iff_congested(jiffies_to_usecs(timeout),
1013 jiffies_to_usecs(jiffies - start));
1014
1015 return ret;
1016}
1017EXPORT_SYMBOL(wait_iff_congested);