blob: dd08ab928e07119e2e7d2493989f33cd9725dece [file] [log] [blame]
Thomas Gleixner457c8992019-05-19 13:08:55 +01001// SPDX-License-Identifier: GPL-2.0-only
Andrew Morton3fcfab12006-10-19 23:28:16 -07002
3#include <linux/wait.h>
Tejun Heo34f8fe52019-08-26 09:06:53 -07004#include <linux/rbtree.h>
Andrew Morton3fcfab12006-10-19 23:28:16 -07005#include <linux/backing-dev.h>
Jens Axboe03ba3782009-09-09 09:08:54 +02006#include <linux/kthread.h>
7#include <linux/freezer.h>
Andrew Morton3fcfab12006-10-19 23:28:16 -07008#include <linux/fs.h>
Jens Axboe26160152009-03-17 09:35:06 +01009#include <linux/pagemap.h>
Jens Axboe03ba3782009-09-09 09:08:54 +020010#include <linux/mm.h>
Andrew Morton3fcfab12006-10-19 23:28:16 -070011#include <linux/sched.h>
12#include <linux/module.h>
Peter Zijlstracf0ca9f2008-04-30 00:54:32 -070013#include <linux/writeback.h>
14#include <linux/device.h>
Dave Chinner455b2862010-07-07 13:24:06 +100015#include <trace/events/writeback.h>
Peter Zijlstracf0ca9f2008-04-30 00:54:32 -070016
Christoph Hellwigf56753a2020-09-24 08:51:40 +020017struct backing_dev_info noop_backing_dev_info;
Tejun Heoa212b102015-05-22 17:13:33 -040018EXPORT_SYMBOL_GPL(noop_backing_dev_info);
Jörn Engel5129a462010-04-25 08:54:42 +020019
Peter Zijlstracf0ca9f2008-04-30 00:54:32 -070020static struct class *bdi_class;
Christoph Hellwigeb7ae5e2020-05-04 14:47:54 +020021static const char *bdi_unknown_name = "(unknown)";
Jens Axboecfc4ba52009-09-14 13:12:40 +020022
23/*
Tejun Heo34f8fe52019-08-26 09:06:53 -070024 * bdi_lock protects bdi_tree and updates to bdi_list. bdi_list has RCU
25 * reader side locking.
Jens Axboecfc4ba52009-09-14 13:12:40 +020026 */
Jens Axboe03ba3782009-09-09 09:08:54 +020027DEFINE_SPINLOCK(bdi_lock);
Tejun Heo34f8fe52019-08-26 09:06:53 -070028static u64 bdi_id_cursor;
29static struct rb_root bdi_tree = RB_ROOT;
Jens Axboe66f3b8e2009-09-02 09:19:46 +020030LIST_HEAD(bdi_list);
Jens Axboe03ba3782009-09-09 09:08:54 +020031
Tejun Heo839a8e82013-04-01 19:08:06 -070032/* bdi_wq serves all asynchronous writeback tasks */
33struct workqueue_struct *bdi_wq;
34
Miklos Szeredi76f14182008-04-30 00:54:36 -070035#ifdef CONFIG_DEBUG_FS
36#include <linux/debugfs.h>
37#include <linux/seq_file.h>
38
39static struct dentry *bdi_debug_root;
40
41static void bdi_debug_init(void)
42{
43 bdi_debug_root = debugfs_create_dir("bdi", NULL);
44}
45
46static int bdi_debug_stats_show(struct seq_file *m, void *v)
47{
48 struct backing_dev_info *bdi = m->private;
Christoph Hellwigc1955ce2010-06-19 23:08:06 +020049 struct bdi_writeback *wb = &bdi->wb;
David Rientjes364aeb22009-01-06 14:39:29 -080050 unsigned long background_thresh;
51 unsigned long dirty_thresh;
Tejun Heo0d960a32015-05-22 18:23:19 -040052 unsigned long wb_thresh;
Theodore Ts'o0ae45f62015-02-02 00:37:00 -050053 unsigned long nr_dirty, nr_io, nr_more_io, nr_dirty_time;
Jens Axboef09b00d2009-05-25 09:08:21 +020054 struct inode *inode;
55
Theodore Ts'o0ae45f62015-02-02 00:37:00 -050056 nr_dirty = nr_io = nr_more_io = nr_dirty_time = 0;
Christoph Hellwigf758eea2011-04-21 18:19:44 -060057 spin_lock(&wb->list_lock);
Dave Chinnerc7f54082015-03-04 14:07:22 -050058 list_for_each_entry(inode, &wb->b_dirty, i_io_list)
Christoph Hellwigc1955ce2010-06-19 23:08:06 +020059 nr_dirty++;
Dave Chinnerc7f54082015-03-04 14:07:22 -050060 list_for_each_entry(inode, &wb->b_io, i_io_list)
Christoph Hellwigc1955ce2010-06-19 23:08:06 +020061 nr_io++;
Dave Chinnerc7f54082015-03-04 14:07:22 -050062 list_for_each_entry(inode, &wb->b_more_io, i_io_list)
Christoph Hellwigc1955ce2010-06-19 23:08:06 +020063 nr_more_io++;
Dave Chinnerc7f54082015-03-04 14:07:22 -050064 list_for_each_entry(inode, &wb->b_dirty_time, i_io_list)
Theodore Ts'o0ae45f62015-02-02 00:37:00 -050065 if (inode->i_state & I_DIRTY_TIME)
66 nr_dirty_time++;
Christoph Hellwigf758eea2011-04-21 18:19:44 -060067 spin_unlock(&wb->list_lock);
Miklos Szeredi76f14182008-04-30 00:54:36 -070068
Wu Fengguang16c40422010-08-11 14:17:39 -070069 global_dirty_limits(&background_thresh, &dirty_thresh);
Tejun Heo0d960a32015-05-22 18:23:19 -040070 wb_thresh = wb_calc_thresh(wb, dirty_thresh);
Miklos Szeredi76f14182008-04-30 00:54:36 -070071
72#define K(x) ((x) << (PAGE_SHIFT - 10))
73 seq_printf(m,
Wu Fengguang00821b02010-08-29 11:28:45 -060074 "BdiWriteback: %10lu kB\n"
75 "BdiReclaimable: %10lu kB\n"
76 "BdiDirtyThresh: %10lu kB\n"
77 "DirtyThresh: %10lu kB\n"
78 "BackgroundThresh: %10lu kB\n"
Wu Fengguangc8e28ce2011-01-23 10:07:47 -060079 "BdiDirtied: %10lu kB\n"
Wu Fengguang00821b02010-08-29 11:28:45 -060080 "BdiWritten: %10lu kB\n"
81 "BdiWriteBandwidth: %10lu kBps\n"
82 "b_dirty: %10lu\n"
83 "b_io: %10lu\n"
84 "b_more_io: %10lu\n"
Theodore Ts'o0ae45f62015-02-02 00:37:00 -050085 "b_dirty_time: %10lu\n"
Wu Fengguang00821b02010-08-29 11:28:45 -060086 "bdi_list: %10u\n"
87 "state: %10lx\n",
Tejun Heo93f78d82015-05-22 17:13:27 -040088 (unsigned long) K(wb_stat(wb, WB_WRITEBACK)),
89 (unsigned long) K(wb_stat(wb, WB_RECLAIMABLE)),
Tejun Heo0d960a32015-05-22 18:23:19 -040090 K(wb_thresh),
Jan Karaf7d2b1e2010-12-08 22:44:24 -060091 K(dirty_thresh),
92 K(background_thresh),
Tejun Heo93f78d82015-05-22 17:13:27 -040093 (unsigned long) K(wb_stat(wb, WB_DIRTIED)),
94 (unsigned long) K(wb_stat(wb, WB_WRITTEN)),
Tejun Heoa88a3412015-05-22 17:13:28 -040095 (unsigned long) K(wb->write_bandwidth),
Jan Karaf7d2b1e2010-12-08 22:44:24 -060096 nr_dirty,
97 nr_io,
98 nr_more_io,
Theodore Ts'o0ae45f62015-02-02 00:37:00 -050099 nr_dirty_time,
Tejun Heo44522262015-05-22 17:13:26 -0400100 !list_empty(&bdi->bdi_list), bdi->wb.state);
Miklos Szeredi76f14182008-04-30 00:54:36 -0700101#undef K
102
103 return 0;
104}
Andy Shevchenko5ad35092018-04-05 16:23:16 -0700105DEFINE_SHOW_ATTRIBUTE(bdi_debug_stats);
Miklos Szeredi76f14182008-04-30 00:54:36 -0700106
Greg Kroah-Hartman2d146b92019-01-22 16:21:07 +0100107static void bdi_debug_register(struct backing_dev_info *bdi, const char *name)
Miklos Szeredi76f14182008-04-30 00:54:36 -0700108{
109 bdi->debug_dir = debugfs_create_dir(name, bdi_debug_root);
weiping zhang97f07692017-10-31 18:37:54 +0800110
Greg Kroah-Hartman2d146b92019-01-22 16:21:07 +0100111 debugfs_create_file("stats", 0444, bdi->debug_dir, bdi,
112 &bdi_debug_stats_fops);
Miklos Szeredi76f14182008-04-30 00:54:36 -0700113}
114
115static void bdi_debug_unregister(struct backing_dev_info *bdi)
116{
Greg Kroah-Hartman2d146b92019-01-22 16:21:07 +0100117 debugfs_remove_recursive(bdi->debug_dir);
Miklos Szeredi76f14182008-04-30 00:54:36 -0700118}
119#else
120static inline void bdi_debug_init(void)
121{
122}
Greg Kroah-Hartman2d146b92019-01-22 16:21:07 +0100123static inline void bdi_debug_register(struct backing_dev_info *bdi,
Miklos Szeredi76f14182008-04-30 00:54:36 -0700124 const char *name)
125{
126}
127static inline void bdi_debug_unregister(struct backing_dev_info *bdi)
128{
129}
130#endif
131
Peter Zijlstracf0ca9f2008-04-30 00:54:32 -0700132static ssize_t read_ahead_kb_store(struct device *dev,
133 struct device_attribute *attr,
134 const char *buf, size_t count)
135{
136 struct backing_dev_info *bdi = dev_get_drvdata(dev);
Peter Zijlstracf0ca9f2008-04-30 00:54:32 -0700137 unsigned long read_ahead_kb;
Namjae Jeon7034ed12012-08-25 16:57:27 +0800138 ssize_t ret;
Peter Zijlstracf0ca9f2008-04-30 00:54:32 -0700139
Namjae Jeon7034ed12012-08-25 16:57:27 +0800140 ret = kstrtoul(buf, 10, &read_ahead_kb);
141 if (ret < 0)
142 return ret;
143
144 bdi->ra_pages = read_ahead_kb >> (PAGE_SHIFT - 10);
145
146 return count;
Peter Zijlstracf0ca9f2008-04-30 00:54:32 -0700147}
148
149#define K(pages) ((pages) << (PAGE_SHIFT - 10))
150
151#define BDI_SHOW(name, expr) \
152static ssize_t name##_show(struct device *dev, \
153 struct device_attribute *attr, char *page) \
154{ \
155 struct backing_dev_info *bdi = dev_get_drvdata(dev); \
156 \
157 return snprintf(page, PAGE_SIZE-1, "%lld\n", (long long)expr); \
Greg Kroah-Hartmand9e12412013-07-24 15:05:26 -0700158} \
159static DEVICE_ATTR_RW(name);
Peter Zijlstracf0ca9f2008-04-30 00:54:32 -0700160
161BDI_SHOW(read_ahead_kb, K(bdi->ra_pages))
162
Peter Zijlstra189d3c42008-04-30 00:54:35 -0700163static ssize_t min_ratio_store(struct device *dev,
164 struct device_attribute *attr, const char *buf, size_t count)
165{
166 struct backing_dev_info *bdi = dev_get_drvdata(dev);
Peter Zijlstra189d3c42008-04-30 00:54:35 -0700167 unsigned int ratio;
Namjae Jeon7034ed12012-08-25 16:57:27 +0800168 ssize_t ret;
Peter Zijlstra189d3c42008-04-30 00:54:35 -0700169
Namjae Jeon7034ed12012-08-25 16:57:27 +0800170 ret = kstrtouint(buf, 10, &ratio);
171 if (ret < 0)
172 return ret;
173
174 ret = bdi_set_min_ratio(bdi, ratio);
175 if (!ret)
176 ret = count;
177
Peter Zijlstra189d3c42008-04-30 00:54:35 -0700178 return ret;
179}
180BDI_SHOW(min_ratio, bdi->min_ratio)
181
Peter Zijlstraa42dde02008-04-30 00:54:36 -0700182static ssize_t max_ratio_store(struct device *dev,
183 struct device_attribute *attr, const char *buf, size_t count)
184{
185 struct backing_dev_info *bdi = dev_get_drvdata(dev);
Peter Zijlstraa42dde02008-04-30 00:54:36 -0700186 unsigned int ratio;
Namjae Jeon7034ed12012-08-25 16:57:27 +0800187 ssize_t ret;
Peter Zijlstraa42dde02008-04-30 00:54:36 -0700188
Namjae Jeon7034ed12012-08-25 16:57:27 +0800189 ret = kstrtouint(buf, 10, &ratio);
190 if (ret < 0)
191 return ret;
192
193 ret = bdi_set_max_ratio(bdi, ratio);
194 if (!ret)
195 ret = count;
196
Peter Zijlstraa42dde02008-04-30 00:54:36 -0700197 return ret;
198}
199BDI_SHOW(max_ratio, bdi->max_ratio)
200
Darrick J. Wong7d311cd2013-02-21 16:42:48 -0800201static ssize_t stable_pages_required_show(struct device *dev,
202 struct device_attribute *attr,
203 char *page)
204{
Christoph Hellwig1cb039f2020-09-24 08:51:38 +0200205 dev_warn_once(dev,
206 "the stable_pages_required attribute has been removed. Use the stable_writes queue attribute instead.\n");
207 return snprintf(page, PAGE_SIZE-1, "%d\n", 0);
Darrick J. Wong7d311cd2013-02-21 16:42:48 -0800208}
Greg Kroah-Hartmand9e12412013-07-24 15:05:26 -0700209static DEVICE_ATTR_RO(stable_pages_required);
Darrick J. Wong7d311cd2013-02-21 16:42:48 -0800210
Greg Kroah-Hartmand9e12412013-07-24 15:05:26 -0700211static struct attribute *bdi_dev_attrs[] = {
212 &dev_attr_read_ahead_kb.attr,
213 &dev_attr_min_ratio.attr,
214 &dev_attr_max_ratio.attr,
215 &dev_attr_stable_pages_required.attr,
216 NULL,
Peter Zijlstracf0ca9f2008-04-30 00:54:32 -0700217};
Greg Kroah-Hartmand9e12412013-07-24 15:05:26 -0700218ATTRIBUTE_GROUPS(bdi_dev);
Peter Zijlstracf0ca9f2008-04-30 00:54:32 -0700219
220static __init int bdi_class_init(void)
221{
222 bdi_class = class_create(THIS_MODULE, "bdi");
Anton Blanchard14421452010-04-02 09:46:55 +0200223 if (IS_ERR(bdi_class))
224 return PTR_ERR(bdi_class);
225
Greg Kroah-Hartmand9e12412013-07-24 15:05:26 -0700226 bdi_class->dev_groups = bdi_dev_groups;
Miklos Szeredi76f14182008-04-30 00:54:36 -0700227 bdi_debug_init();
Jan Karad03f6cd2017-02-02 15:56:51 +0100228
Peter Zijlstracf0ca9f2008-04-30 00:54:32 -0700229 return 0;
230}
Miklos Szeredi76f14182008-04-30 00:54:36 -0700231postcore_initcall(bdi_class_init);
Peter Zijlstracf0ca9f2008-04-30 00:54:32 -0700232
Jan Kara2e82b842017-04-12 12:24:48 +0200233static int bdi_init(struct backing_dev_info *bdi);
234
Jens Axboe26160152009-03-17 09:35:06 +0100235static int __init default_bdi_init(void)
236{
237 int err;
238
Mika Westerberga2b90f12019-10-04 13:00:24 +0300239 bdi_wq = alloc_workqueue("writeback", WQ_MEM_RECLAIM | WQ_UNBOUND |
240 WQ_SYSFS, 0);
Tejun Heo839a8e82013-04-01 19:08:06 -0700241 if (!bdi_wq)
242 return -ENOMEM;
243
Jan Kara976e48f2010-09-21 11:48:55 +0200244 err = bdi_init(&noop_backing_dev_info);
Jens Axboe26160152009-03-17 09:35:06 +0100245
246 return err;
247}
248subsys_initcall(default_bdi_init);
249
Artem Bityutskiy64677162010-07-25 14:29:22 +0300250/*
Tejun Heof0054bb2015-05-22 17:13:30 -0400251 * This function is used when the first inode for this wb is marked dirty. It
Artem Bityutskiy64677162010-07-25 14:29:22 +0300252 * wakes-up the corresponding bdi thread which should then take care of the
253 * periodic background write-out of dirty inodes. Since the write-out would
254 * starts only 'dirty_writeback_interval' centisecs from now anyway, we just
255 * set up a timer which wakes the bdi thread up later.
256 *
257 * Note, we wouldn't bother setting up the timer, but this function is on the
258 * fast-path (used by '__mark_inode_dirty()'), so we save few context switches
259 * by delaying the wake-up.
Derek Basehore6ca738d2014-04-03 14:46:22 -0700260 *
261 * We have to be careful not to postpone flush work if it is scheduled for
262 * earlier. Thus we use queue_delayed_work().
Artem Bityutskiy64677162010-07-25 14:29:22 +0300263 */
Tejun Heof0054bb2015-05-22 17:13:30 -0400264void wb_wakeup_delayed(struct bdi_writeback *wb)
Artem Bityutskiy64677162010-07-25 14:29:22 +0300265{
266 unsigned long timeout;
267
268 timeout = msecs_to_jiffies(dirty_writeback_interval * 10);
Tejun Heof0054bb2015-05-22 17:13:30 -0400269 spin_lock_bh(&wb->work_lock);
270 if (test_bit(WB_registered, &wb->state))
271 queue_delayed_work(bdi_wq, &wb->dwork, timeout);
272 spin_unlock_bh(&wb->work_lock);
Jens Axboe03ba3782009-09-09 09:08:54 +0200273}
274
Jens Axboecfc4ba52009-09-14 13:12:40 +0200275/*
Tejun Heoa88a3412015-05-22 17:13:28 -0400276 * Initial write bandwidth: 100 MB/s
Jens Axboecfc4ba52009-09-14 13:12:40 +0200277 */
Tejun Heoa88a3412015-05-22 17:13:28 -0400278#define INIT_BW (100 << (20 - PAGE_SHIFT))
Jens Axboecfc4ba52009-09-14 13:12:40 +0200279
Tejun Heo8395cd92015-05-22 17:13:34 -0400280static int wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi,
Christoph Hellwig8c911f32020-07-01 11:06:21 +0200281 gfp_t gfp)
Artem Bityutskiy64677162010-07-25 14:29:22 +0300282{
Tejun Heo93f78d82015-05-22 17:13:27 -0400283 int i, err;
284
Artem Bityutskiy64677162010-07-25 14:29:22 +0300285 memset(wb, 0, sizeof(*wb));
286
Jan Kara810df542017-03-23 01:36:55 +0100287 if (wb != &bdi->wb)
288 bdi_get(bdi);
Artem Bityutskiy64677162010-07-25 14:29:22 +0300289 wb->bdi = bdi;
290 wb->last_old_flush = jiffies;
291 INIT_LIST_HEAD(&wb->b_dirty);
292 INIT_LIST_HEAD(&wb->b_io);
293 INIT_LIST_HEAD(&wb->b_more_io);
Theodore Ts'o0ae45f62015-02-02 00:37:00 -0500294 INIT_LIST_HEAD(&wb->b_dirty_time);
Christoph Hellwigf758eea2011-04-21 18:19:44 -0600295 spin_lock_init(&wb->list_lock);
Tejun Heo93f78d82015-05-22 17:13:27 -0400296
Tejun Heoa88a3412015-05-22 17:13:28 -0400297 wb->bw_time_stamp = jiffies;
298 wb->balanced_dirty_ratelimit = INIT_BW;
299 wb->dirty_ratelimit = INIT_BW;
300 wb->write_bandwidth = INIT_BW;
301 wb->avg_write_bandwidth = INIT_BW;
302
Tejun Heof0054bb2015-05-22 17:13:30 -0400303 spin_lock_init(&wb->work_lock);
304 INIT_LIST_HEAD(&wb->work_list);
305 INIT_DELAYED_WORK(&wb->dwork, wb_workfn);
Jens Axboeb57d74a2016-09-01 10:20:33 -0600306 wb->dirty_sleep = jiffies;
Tejun Heof0054bb2015-05-22 17:13:30 -0400307
Tejun Heo8395cd92015-05-22 17:13:34 -0400308 err = fprop_local_init_percpu(&wb->completions, gfp);
Tejun Heoa88a3412015-05-22 17:13:28 -0400309 if (err)
Christoph Hellwig8c911f32020-07-01 11:06:21 +0200310 goto out_put_bdi;
Tejun Heoa88a3412015-05-22 17:13:28 -0400311
Tejun Heo93f78d82015-05-22 17:13:27 -0400312 for (i = 0; i < NR_WB_STAT_ITEMS; i++) {
Tejun Heo8395cd92015-05-22 17:13:34 -0400313 err = percpu_counter_init(&wb->stat[i], 0, gfp);
Tejun Heoa13f35e2015-07-02 08:44:34 -0600314 if (err)
315 goto out_destroy_stat;
Tejun Heo93f78d82015-05-22 17:13:27 -0400316 }
317
318 return 0;
Tejun Heoa13f35e2015-07-02 08:44:34 -0600319
320out_destroy_stat:
Rasmus Villemoes078c6c32016-02-11 16:13:06 -0800321 while (i--)
Tejun Heoa13f35e2015-07-02 08:44:34 -0600322 percpu_counter_destroy(&wb->stat[i]);
323 fprop_local_destroy_percpu(&wb->completions);
Jan Kara810df542017-03-23 01:36:55 +0100324out_put_bdi:
325 if (wb != &bdi->wb)
326 bdi_put(bdi);
Tejun Heoa13f35e2015-07-02 08:44:34 -0600327 return err;
Peter Zijlstracf0ca9f2008-04-30 00:54:32 -0700328}
Jens Axboe03ba3782009-09-09 09:08:54 +0200329
Jan Karae8cb72b2017-03-23 01:36:56 +0100330static void cgwb_remove_from_bdi_list(struct bdi_writeback *wb);
331
Tejun Heo46100072015-05-22 17:13:31 -0400332/*
333 * Remove bdi from the global list and shutdown any threads we have running
334 */
335static void wb_shutdown(struct bdi_writeback *wb)
336{
337 /* Make sure nobody queues further work */
338 spin_lock_bh(&wb->work_lock);
339 if (!test_and_clear_bit(WB_registered, &wb->state)) {
340 spin_unlock_bh(&wb->work_lock);
341 return;
342 }
343 spin_unlock_bh(&wb->work_lock);
344
Jan Karae8cb72b2017-03-23 01:36:56 +0100345 cgwb_remove_from_bdi_list(wb);
Tejun Heo46100072015-05-22 17:13:31 -0400346 /*
347 * Drain work list and shutdown the delayed_work. !WB_registered
348 * tells wb_workfn() that @wb is dying and its work_list needs to
349 * be drained no matter what.
350 */
351 mod_delayed_work(bdi_wq, &wb->dwork, 0);
352 flush_delayed_work(&wb->dwork);
353 WARN_ON(!list_empty(&wb->work_list));
354}
355
Tejun Heof0054bb2015-05-22 17:13:30 -0400356static void wb_exit(struct bdi_writeback *wb)
Tejun Heo93f78d82015-05-22 17:13:27 -0400357{
358 int i;
359
360 WARN_ON(delayed_work_pending(&wb->dwork));
361
362 for (i = 0; i < NR_WB_STAT_ITEMS; i++)
363 percpu_counter_destroy(&wb->stat[i]);
Artem Bityutskiy64677162010-07-25 14:29:22 +0300364
Tejun Heoa88a3412015-05-22 17:13:28 -0400365 fprop_local_destroy_percpu(&wb->completions);
Jan Kara810df542017-03-23 01:36:55 +0100366 if (wb != &wb->bdi->wb)
367 bdi_put(wb->bdi);
Tejun Heoa88a3412015-05-22 17:13:28 -0400368}
Wu Fengguange98be2d2010-08-29 11:22:30 -0600369
Tejun Heo52ebea72015-05-22 17:13:37 -0400370#ifdef CONFIG_CGROUP_WRITEBACK
371
372#include <linux/memcontrol.h>
373
374/*
Christoph Hellwig8c911f32020-07-01 11:06:21 +0200375 * cgwb_lock protects bdi->cgwb_tree, blkcg->cgwb_list, and memcg->cgwb_list.
376 * bdi->cgwb_tree is also RCU protected.
Tejun Heo52ebea72015-05-22 17:13:37 -0400377 */
378static DEFINE_SPINLOCK(cgwb_lock);
Tejun Heof1834642018-05-23 10:56:32 -0700379static struct workqueue_struct *cgwb_release_wq;
Tejun Heo52ebea72015-05-22 17:13:37 -0400380
Baokun Li2b00b2a2023-04-10 21:08:26 +0800381static void cgwb_free_rcu(struct rcu_head *rcu_head)
382{
383 struct bdi_writeback *wb = container_of(rcu_head,
384 struct bdi_writeback, rcu);
385
386 percpu_ref_exit(&wb->refcnt);
387 kfree(wb);
388}
389
Tejun Heo52ebea72015-05-22 17:13:37 -0400390static void cgwb_release_workfn(struct work_struct *work)
391{
392 struct bdi_writeback *wb = container_of(work, struct bdi_writeback,
393 release_work);
Dennis Zhou (Facebook)59b57712018-08-31 16:22:43 -0400394 struct blkcg *blkcg = css_to_blkcg(wb->blkcg_css);
Tejun Heo52ebea72015-05-22 17:13:37 -0400395
Jan Kara3ee7e862018-06-18 15:46:58 +0200396 mutex_lock(&wb->bdi->cgwb_release_mutex);
Tejun Heo52ebea72015-05-22 17:13:37 -0400397 wb_shutdown(wb);
398
399 css_put(wb->memcg_css);
400 css_put(wb->blkcg_css);
Jan Kara3ee7e862018-06-18 15:46:58 +0200401 mutex_unlock(&wb->bdi->cgwb_release_mutex);
Tejun Heo52ebea72015-05-22 17:13:37 -0400402
Tejun Heod866dbf2019-07-24 10:37:22 -0700403 /* triggers blkg destruction if no online users left */
404 blkcg_unpin_online(blkcg);
Dennis Zhou (Facebook)59b57712018-08-31 16:22:43 -0400405
Tejun Heo841710a2015-05-22 18:23:33 -0400406 fprop_local_destroy_percpu(&wb->memcg_completions);
Tejun Heo52ebea72015-05-22 17:13:37 -0400407 wb_exit(wb);
Baokun Li2b00b2a2023-04-10 21:08:26 +0800408 call_rcu(&wb->rcu, cgwb_free_rcu);
Tejun Heo52ebea72015-05-22 17:13:37 -0400409}
410
411static void cgwb_release(struct percpu_ref *refcnt)
412{
413 struct bdi_writeback *wb = container_of(refcnt, struct bdi_writeback,
414 refcnt);
Tejun Heof1834642018-05-23 10:56:32 -0700415 queue_work(cgwb_release_wq, &wb->release_work);
Tejun Heo52ebea72015-05-22 17:13:37 -0400416}
417
418static void cgwb_kill(struct bdi_writeback *wb)
419{
420 lockdep_assert_held(&cgwb_lock);
421
422 WARN_ON(!radix_tree_delete(&wb->bdi->cgwb_tree, wb->memcg_css->id));
423 list_del(&wb->memcg_node);
424 list_del(&wb->blkcg_node);
425 percpu_ref_kill(&wb->refcnt);
426}
427
Jan Karae8cb72b2017-03-23 01:36:56 +0100428static void cgwb_remove_from_bdi_list(struct bdi_writeback *wb)
429{
430 spin_lock_irq(&cgwb_lock);
431 list_del_rcu(&wb->bdi_node);
432 spin_unlock_irq(&cgwb_lock);
433}
434
Tejun Heo52ebea72015-05-22 17:13:37 -0400435static int cgwb_create(struct backing_dev_info *bdi,
436 struct cgroup_subsys_state *memcg_css, gfp_t gfp)
437{
438 struct mem_cgroup *memcg;
439 struct cgroup_subsys_state *blkcg_css;
440 struct blkcg *blkcg;
441 struct list_head *memcg_cgwb_list, *blkcg_cgwb_list;
442 struct bdi_writeback *wb;
443 unsigned long flags;
444 int ret = 0;
445
446 memcg = mem_cgroup_from_css(memcg_css);
Tejun Heoc165b3e2015-08-18 14:55:29 -0700447 blkcg_css = cgroup_get_e_css(memcg_css->cgroup, &io_cgrp_subsys);
Tejun Heo52ebea72015-05-22 17:13:37 -0400448 blkcg = css_to_blkcg(blkcg_css);
Wang Long9ccc3612018-06-07 17:07:19 -0700449 memcg_cgwb_list = &memcg->cgwb_list;
Tejun Heo52ebea72015-05-22 17:13:37 -0400450 blkcg_cgwb_list = &blkcg->cgwb_list;
451
452 /* look up again under lock and discard on blkcg mismatch */
453 spin_lock_irqsave(&cgwb_lock, flags);
454 wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id);
455 if (wb && wb->blkcg_css != blkcg_css) {
456 cgwb_kill(wb);
457 wb = NULL;
458 }
459 spin_unlock_irqrestore(&cgwb_lock, flags);
460 if (wb)
461 goto out_put;
462
463 /* need to create a new one */
464 wb = kmalloc(sizeof(*wb), gfp);
Christophe JAILLET0b045bd2017-09-11 21:43:23 +0200465 if (!wb) {
466 ret = -ENOMEM;
467 goto out_put;
468 }
Tejun Heo52ebea72015-05-22 17:13:37 -0400469
Christoph Hellwig8c911f32020-07-01 11:06:21 +0200470 ret = wb_init(wb, bdi, gfp);
Tejun Heo52ebea72015-05-22 17:13:37 -0400471 if (ret)
472 goto err_free;
473
474 ret = percpu_ref_init(&wb->refcnt, cgwb_release, 0, gfp);
475 if (ret)
476 goto err_wb_exit;
477
Tejun Heo841710a2015-05-22 18:23:33 -0400478 ret = fprop_local_init_percpu(&wb->memcg_completions, gfp);
479 if (ret)
480 goto err_ref_exit;
481
Tejun Heo52ebea72015-05-22 17:13:37 -0400482 wb->memcg_css = memcg_css;
483 wb->blkcg_css = blkcg_css;
484 INIT_WORK(&wb->release_work, cgwb_release_workfn);
485 set_bit(WB_registered, &wb->state);
486
487 /*
488 * The root wb determines the registered state of the whole bdi and
489 * memcg_cgwb_list and blkcg_cgwb_list's next pointers indicate
490 * whether they're still online. Don't link @wb if any is dead.
491 * See wb_memcg_offline() and wb_blkcg_offline().
492 */
493 ret = -ENODEV;
494 spin_lock_irqsave(&cgwb_lock, flags);
495 if (test_bit(WB_registered, &bdi->wb.state) &&
496 blkcg_cgwb_list->next && memcg_cgwb_list->next) {
497 /* we might have raced another instance of this function */
498 ret = radix_tree_insert(&bdi->cgwb_tree, memcg_css->id, wb);
499 if (!ret) {
Tejun Heob8175252015-10-02 14:47:05 -0400500 list_add_tail_rcu(&wb->bdi_node, &bdi->wb_list);
Tejun Heo52ebea72015-05-22 17:13:37 -0400501 list_add(&wb->memcg_node, memcg_cgwb_list);
502 list_add(&wb->blkcg_node, blkcg_cgwb_list);
Tejun Heod866dbf2019-07-24 10:37:22 -0700503 blkcg_pin_online(blkcg);
Tejun Heo52ebea72015-05-22 17:13:37 -0400504 css_get(memcg_css);
505 css_get(blkcg_css);
506 }
507 }
508 spin_unlock_irqrestore(&cgwb_lock, flags);
509 if (ret) {
510 if (ret == -EEXIST)
511 ret = 0;
Tejun Heoa13f35e2015-07-02 08:44:34 -0600512 goto err_fprop_exit;
Tejun Heo52ebea72015-05-22 17:13:37 -0400513 }
514 goto out_put;
515
Tejun Heo841710a2015-05-22 18:23:33 -0400516err_fprop_exit:
517 fprop_local_destroy_percpu(&wb->memcg_completions);
Tejun Heo52ebea72015-05-22 17:13:37 -0400518err_ref_exit:
519 percpu_ref_exit(&wb->refcnt);
520err_wb_exit:
521 wb_exit(wb);
522err_free:
523 kfree(wb);
524out_put:
525 css_put(blkcg_css);
526 return ret;
527}
528
529/**
Tejun Heoed288dc2019-08-26 09:06:54 -0700530 * wb_get_lookup - get wb for a given memcg
Tejun Heo52ebea72015-05-22 17:13:37 -0400531 * @bdi: target bdi
532 * @memcg_css: cgroup_subsys_state of the target memcg (must have positive ref)
Tejun Heo52ebea72015-05-22 17:13:37 -0400533 *
Tejun Heoed288dc2019-08-26 09:06:54 -0700534 * Try to get the wb for @memcg_css on @bdi. The returned wb has its
535 * refcount incremented.
Tejun Heo52ebea72015-05-22 17:13:37 -0400536 *
537 * This function uses css_get() on @memcg_css and thus expects its refcnt
538 * to be positive on invocation. IOW, rcu_read_lock() protection on
539 * @memcg_css isn't enough. try_get it before calling this function.
540 *
541 * A wb is keyed by its associated memcg. As blkcg implicitly enables
542 * memcg on the default hierarchy, memcg association is guaranteed to be
543 * more specific (equal or descendant to the associated blkcg) and thus can
544 * identify both the memcg and blkcg associations.
545 *
546 * Because the blkcg associated with a memcg may change as blkcg is enabled
547 * and disabled closer to root in the hierarchy, each wb keeps track of
548 * both the memcg and blkcg associated with it and verifies the blkcg on
549 * each lookup. On mismatch, the existing wb is discarded and a new one is
550 * created.
551 */
Tejun Heoed288dc2019-08-26 09:06:54 -0700552struct bdi_writeback *wb_get_lookup(struct backing_dev_info *bdi,
553 struct cgroup_subsys_state *memcg_css)
554{
555 struct bdi_writeback *wb;
556
557 if (!memcg_css->parent)
558 return &bdi->wb;
559
560 rcu_read_lock();
561 wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id);
562 if (wb) {
563 struct cgroup_subsys_state *blkcg_css;
564
565 /* see whether the blkcg association has changed */
566 blkcg_css = cgroup_get_e_css(memcg_css->cgroup, &io_cgrp_subsys);
567 if (unlikely(wb->blkcg_css != blkcg_css || !wb_tryget(wb)))
568 wb = NULL;
569 css_put(blkcg_css);
570 }
571 rcu_read_unlock();
572
573 return wb;
574}
575
576/**
577 * wb_get_create - get wb for a given memcg, create if necessary
578 * @bdi: target bdi
579 * @memcg_css: cgroup_subsys_state of the target memcg (must have positive ref)
580 * @gfp: allocation mask to use
581 *
582 * Try to get the wb for @memcg_css on @bdi. If it doesn't exist, try to
583 * create one. See wb_get_lookup() for more details.
584 */
Tejun Heo52ebea72015-05-22 17:13:37 -0400585struct bdi_writeback *wb_get_create(struct backing_dev_info *bdi,
586 struct cgroup_subsys_state *memcg_css,
587 gfp_t gfp)
588{
589 struct bdi_writeback *wb;
590
Mel Gormand0164ad2015-11-06 16:28:21 -0800591 might_sleep_if(gfpflags_allow_blocking(gfp));
Tejun Heo52ebea72015-05-22 17:13:37 -0400592
593 if (!memcg_css->parent)
594 return &bdi->wb;
595
596 do {
Tejun Heoed288dc2019-08-26 09:06:54 -0700597 wb = wb_get_lookup(bdi, memcg_css);
Tejun Heo52ebea72015-05-22 17:13:37 -0400598 } while (!wb && !cgwb_create(bdi, memcg_css, gfp));
599
600 return wb;
601}
602
Tejun Heoa13f35e2015-07-02 08:44:34 -0600603static int cgwb_bdi_init(struct backing_dev_info *bdi)
Tejun Heo52ebea72015-05-22 17:13:37 -0400604{
Tejun Heoa13f35e2015-07-02 08:44:34 -0600605 int ret;
606
Tejun Heo52ebea72015-05-22 17:13:37 -0400607 INIT_RADIX_TREE(&bdi->cgwb_tree, GFP_ATOMIC);
Jan Kara3ee7e862018-06-18 15:46:58 +0200608 mutex_init(&bdi->cgwb_release_mutex);
Tejun Heo7fc58542017-12-12 08:38:30 -0800609 init_rwsem(&bdi->wb_switch_rwsem);
Tejun Heoa13f35e2015-07-02 08:44:34 -0600610
Christoph Hellwig8c911f32020-07-01 11:06:21 +0200611 ret = wb_init(&bdi->wb, bdi, GFP_KERNEL);
Tejun Heoa13f35e2015-07-02 08:44:34 -0600612 if (!ret) {
Johannes Weiner7d828602016-01-14 15:20:56 -0800613 bdi->wb.memcg_css = &root_mem_cgroup->css;
Tejun Heoa13f35e2015-07-02 08:44:34 -0600614 bdi->wb.blkcg_css = blkcg_root_css;
615 }
616 return ret;
Tejun Heo52ebea72015-05-22 17:13:37 -0400617}
618
Jan Karab1c51af2017-03-23 01:36:59 +0100619static void cgwb_bdi_unregister(struct backing_dev_info *bdi)
Tejun Heo52ebea72015-05-22 17:13:37 -0400620{
621 struct radix_tree_iter iter;
622 void **slot;
Jan Kara5318ce72017-03-23 01:36:57 +0100623 struct bdi_writeback *wb;
Tejun Heo52ebea72015-05-22 17:13:37 -0400624
625 WARN_ON(test_bit(WB_registered, &bdi->wb.state));
626
627 spin_lock_irq(&cgwb_lock);
628 radix_tree_for_each_slot(slot, &bdi->cgwb_tree, &iter, 0)
629 cgwb_kill(*slot);
Jan Kara3ee7e862018-06-18 15:46:58 +0200630 spin_unlock_irq(&cgwb_lock);
Jan Kara5318ce72017-03-23 01:36:57 +0100631
Jan Kara3ee7e862018-06-18 15:46:58 +0200632 mutex_lock(&bdi->cgwb_release_mutex);
633 spin_lock_irq(&cgwb_lock);
Jan Kara5318ce72017-03-23 01:36:57 +0100634 while (!list_empty(&bdi->wb_list)) {
635 wb = list_first_entry(&bdi->wb_list, struct bdi_writeback,
636 bdi_node);
637 spin_unlock_irq(&cgwb_lock);
638 wb_shutdown(wb);
639 spin_lock_irq(&cgwb_lock);
640 }
Tejun Heo52ebea72015-05-22 17:13:37 -0400641 spin_unlock_irq(&cgwb_lock);
Jan Kara3ee7e862018-06-18 15:46:58 +0200642 mutex_unlock(&bdi->cgwb_release_mutex);
Tejun Heo52ebea72015-05-22 17:13:37 -0400643}
644
645/**
646 * wb_memcg_offline - kill all wb's associated with a memcg being offlined
647 * @memcg: memcg being offlined
648 *
649 * Also prevents creation of any new wb's associated with @memcg.
650 */
651void wb_memcg_offline(struct mem_cgroup *memcg)
652{
Wang Long9ccc3612018-06-07 17:07:19 -0700653 struct list_head *memcg_cgwb_list = &memcg->cgwb_list;
Tejun Heo52ebea72015-05-22 17:13:37 -0400654 struct bdi_writeback *wb, *next;
655
656 spin_lock_irq(&cgwb_lock);
657 list_for_each_entry_safe(wb, next, memcg_cgwb_list, memcg_node)
658 cgwb_kill(wb);
659 memcg_cgwb_list->next = NULL; /* prevent new wb's */
660 spin_unlock_irq(&cgwb_lock);
661}
662
663/**
664 * wb_blkcg_offline - kill all wb's associated with a blkcg being offlined
665 * @blkcg: blkcg being offlined
666 *
667 * Also prevents creation of any new wb's associated with @blkcg.
668 */
669void wb_blkcg_offline(struct blkcg *blkcg)
670{
Tejun Heo52ebea72015-05-22 17:13:37 -0400671 struct bdi_writeback *wb, *next;
672
673 spin_lock_irq(&cgwb_lock);
674 list_for_each_entry_safe(wb, next, &blkcg->cgwb_list, blkcg_node)
675 cgwb_kill(wb);
676 blkcg->cgwb_list.next = NULL; /* prevent new wb's */
677 spin_unlock_irq(&cgwb_lock);
678}
679
Jan Karae8cb72b2017-03-23 01:36:56 +0100680static void cgwb_bdi_register(struct backing_dev_info *bdi)
681{
682 spin_lock_irq(&cgwb_lock);
683 list_add_tail_rcu(&bdi->wb.bdi_node, &bdi->wb_list);
684 spin_unlock_irq(&cgwb_lock);
685}
686
Tejun Heof1834642018-05-23 10:56:32 -0700687static int __init cgwb_init(void)
688{
689 /*
690 * There can be many concurrent release work items overwhelming
691 * system_wq. Put them in a separate wq and limit concurrency.
692 * There's no point in executing many of these in parallel.
693 */
694 cgwb_release_wq = alloc_workqueue("cgwb_release", 0, 1);
695 if (!cgwb_release_wq)
696 return -ENOMEM;
697
698 return 0;
699}
700subsys_initcall(cgwb_init);
701
Tejun Heo52ebea72015-05-22 17:13:37 -0400702#else /* CONFIG_CGROUP_WRITEBACK */
703
Tejun Heoa13f35e2015-07-02 08:44:34 -0600704static int cgwb_bdi_init(struct backing_dev_info *bdi)
705{
Christoph Hellwig8c911f32020-07-01 11:06:21 +0200706 return wb_init(&bdi->wb, bdi, GFP_KERNEL);
Tejun Heoa13f35e2015-07-02 08:44:34 -0600707}
708
Jan Karab1c51af2017-03-23 01:36:59 +0100709static void cgwb_bdi_unregister(struct backing_dev_info *bdi) { }
Jan Karadf23de52017-03-08 17:48:32 +0100710
Jan Karae8cb72b2017-03-23 01:36:56 +0100711static void cgwb_bdi_register(struct backing_dev_info *bdi)
712{
713 list_add_tail_rcu(&bdi->wb.bdi_node, &bdi->wb_list);
714}
715
716static void cgwb_remove_from_bdi_list(struct bdi_writeback *wb)
717{
718 list_del_rcu(&wb->bdi_node);
719}
720
Tejun Heo52ebea72015-05-22 17:13:37 -0400721#endif /* CONFIG_CGROUP_WRITEBACK */
722
Jan Kara2e82b842017-04-12 12:24:48 +0200723static int bdi_init(struct backing_dev_info *bdi)
Peter Zijlstrab2e8fb62007-10-16 23:25:47 -0700724{
Tejun Heob8175252015-10-02 14:47:05 -0400725 int ret;
726
Peter Zijlstracf0ca9f2008-04-30 00:54:32 -0700727 bdi->dev = NULL;
728
Jan Karad03f6cd2017-02-02 15:56:51 +0100729 kref_init(&bdi->refcnt);
Peter Zijlstra189d3c42008-04-30 00:54:35 -0700730 bdi->min_ratio = 0;
Peter Zijlstraa42dde02008-04-30 00:54:36 -0700731 bdi->max_ratio = 100;
Jan Karaeb608e32012-05-24 18:59:11 +0200732 bdi->max_prop_frac = FPROP_FRAC_BASE;
Jens Axboe66f3b8e2009-09-02 09:19:46 +0200733 INIT_LIST_HEAD(&bdi->bdi_list);
Tejun Heob8175252015-10-02 14:47:05 -0400734 INIT_LIST_HEAD(&bdi->wb_list);
Tejun Heocc395d72015-05-22 17:13:58 -0400735 init_waitqueue_head(&bdi->wb_waitq);
Jens Axboe03ba3782009-09-09 09:08:54 +0200736
Tejun Heob8175252015-10-02 14:47:05 -0400737 ret = cgwb_bdi_init(bdi);
738
Tejun Heob8175252015-10-02 14:47:05 -0400739 return ret;
Peter Zijlstrab2e8fb62007-10-16 23:25:47 -0700740}
Peter Zijlstrab2e8fb62007-10-16 23:25:47 -0700741
Christoph Hellwigaef33c22020-05-04 14:48:00 +0200742struct backing_dev_info *bdi_alloc(int node_id)
Jan Karad03f6cd2017-02-02 15:56:51 +0100743{
744 struct backing_dev_info *bdi;
745
Christoph Hellwigaef33c22020-05-04 14:48:00 +0200746 bdi = kzalloc_node(sizeof(*bdi), GFP_KERNEL, node_id);
Jan Karad03f6cd2017-02-02 15:56:51 +0100747 if (!bdi)
748 return NULL;
749
750 if (bdi_init(bdi)) {
751 kfree(bdi);
752 return NULL;
753 }
Christoph Hellwigf56753a2020-09-24 08:51:40 +0200754 bdi->capabilities = BDI_CAP_WRITEBACK | BDI_CAP_WRITEBACK_ACCT;
Christoph Hellwig55b25982020-09-24 08:51:32 +0200755 bdi->ra_pages = VM_READAHEAD_PAGES;
756 bdi->io_pages = VM_READAHEAD_PAGES;
Jan Karad03f6cd2017-02-02 15:56:51 +0100757 return bdi;
758}
Christoph Hellwigaef33c22020-05-04 14:48:00 +0200759EXPORT_SYMBOL(bdi_alloc);
Jan Karad03f6cd2017-02-02 15:56:51 +0100760
Tejun Heo34f8fe52019-08-26 09:06:53 -0700761static struct rb_node **bdi_lookup_rb_node(u64 id, struct rb_node **parentp)
762{
763 struct rb_node **p = &bdi_tree.rb_node;
764 struct rb_node *parent = NULL;
765 struct backing_dev_info *bdi;
766
767 lockdep_assert_held(&bdi_lock);
768
769 while (*p) {
770 parent = *p;
771 bdi = rb_entry(parent, struct backing_dev_info, rb_node);
772
773 if (bdi->id > id)
774 p = &(*p)->rb_left;
775 else if (bdi->id < id)
776 p = &(*p)->rb_right;
777 else
778 break;
779 }
780
781 if (parentp)
782 *parentp = parent;
783 return p;
784}
785
786/**
787 * bdi_get_by_id - lookup and get bdi from its id
788 * @id: bdi id to lookup
789 *
790 * Find bdi matching @id and get it. Returns NULL if the matching bdi
791 * doesn't exist or is already unregistered.
792 */
793struct backing_dev_info *bdi_get_by_id(u64 id)
794{
795 struct backing_dev_info *bdi = NULL;
796 struct rb_node **p;
797
798 spin_lock_bh(&bdi_lock);
799 p = bdi_lookup_rb_node(id, NULL);
800 if (*p) {
801 bdi = rb_entry(*p, struct backing_dev_info, rb_node);
802 bdi_get(bdi);
803 }
804 spin_unlock_bh(&bdi_lock);
805
806 return bdi;
807}
808
Jan Kara7c4cc302017-04-12 12:24:49 +0200809int bdi_register_va(struct backing_dev_info *bdi, const char *fmt, va_list args)
Peter Zijlstrab2e8fb62007-10-16 23:25:47 -0700810{
Peter Zijlstrab2e8fb62007-10-16 23:25:47 -0700811 struct device *dev;
Tejun Heo34f8fe52019-08-26 09:06:53 -0700812 struct rb_node *parent, **p;
Peter Zijlstrab2e8fb62007-10-16 23:25:47 -0700813
814 if (bdi->dev) /* The driver needs to use separate queues per device */
815 return 0;
816
Christoph Hellwig6bd87ee2020-05-04 14:47:56 +0200817 vsnprintf(bdi->dev_name, sizeof(bdi->dev_name), fmt, args);
818 dev = device_create(bdi_class, NULL, MKDEV(0, 0), bdi, bdi->dev_name);
Peter Zijlstrab2e8fb62007-10-16 23:25:47 -0700819 if (IS_ERR(dev))
820 return PTR_ERR(dev);
821
Jan Karae8cb72b2017-03-23 01:36:56 +0100822 cgwb_bdi_register(bdi);
Peter Zijlstrab2e8fb62007-10-16 23:25:47 -0700823 bdi->dev = dev;
824
Jens Axboe6d0e4822017-12-21 10:01:30 -0700825 bdi_debug_register(bdi, dev_name(dev));
Tejun Heo46100072015-05-22 17:13:31 -0400826 set_bit(WB_registered, &bdi->wb.state);
Peter Zijlstrab2e8fb62007-10-16 23:25:47 -0700827
828 spin_lock_bh(&bdi_lock);
Tejun Heo34f8fe52019-08-26 09:06:53 -0700829
830 bdi->id = ++bdi_id_cursor;
831
832 p = bdi_lookup_rb_node(bdi->id, &parent);
833 rb_link_node(&bdi->rb_node, parent, p);
834 rb_insert_color(&bdi->rb_node, &bdi_tree);
835
Peter Zijlstrab2e8fb62007-10-16 23:25:47 -0700836 list_add_tail_rcu(&bdi->bdi_list, &bdi_list);
Tejun Heo34f8fe52019-08-26 09:06:53 -0700837
Peter Zijlstrab2e8fb62007-10-16 23:25:47 -0700838 spin_unlock_bh(&bdi_lock);
839
840 trace_writeback_bdi_register(bdi);
841 return 0;
842}
Jan Karabaf7a612017-04-12 12:24:25 +0200843
Jan Kara7c4cc302017-04-12 12:24:49 +0200844int bdi_register(struct backing_dev_info *bdi, const char *fmt, ...)
Jan Karabaf7a612017-04-12 12:24:25 +0200845{
846 va_list args;
847 int ret;
848
849 va_start(args, fmt);
Jan Kara7c4cc302017-04-12 12:24:49 +0200850 ret = bdi_register_va(bdi, fmt, args);
Jan Karabaf7a612017-04-12 12:24:25 +0200851 va_end(args);
852 return ret;
853}
Peter Zijlstrab2e8fb62007-10-16 23:25:47 -0700854EXPORT_SYMBOL(bdi_register);
855
Christoph Hellwig3c5d2022020-05-04 14:47:59 +0200856void bdi_set_owner(struct backing_dev_info *bdi, struct device *owner)
Dan Williamsdf08c322016-07-31 11:15:13 -0700857{
Christoph Hellwig3c5d2022020-05-04 14:47:59 +0200858 WARN_ON_ONCE(bdi->owner);
Dan Williamsdf08c322016-07-31 11:15:13 -0700859 bdi->owner = owner;
860 get_device(owner);
Dan Williamsdf08c322016-07-31 11:15:13 -0700861}
Dan Williamsdf08c322016-07-31 11:15:13 -0700862
Peter Zijlstrab2e8fb62007-10-16 23:25:47 -0700863/*
Tejun Heo46100072015-05-22 17:13:31 -0400864 * Remove bdi from bdi_list, and ensure that it is no longer visible
Peter Zijlstrab2e8fb62007-10-16 23:25:47 -0700865 */
Tejun Heo46100072015-05-22 17:13:31 -0400866static void bdi_remove_from_list(struct backing_dev_info *bdi)
Peter Zijlstra04fbfdc2007-10-16 23:25:50 -0700867{
Tejun Heo46100072015-05-22 17:13:31 -0400868 spin_lock_bh(&bdi_lock);
Tejun Heo34f8fe52019-08-26 09:06:53 -0700869 rb_erase(&bdi->rb_node, &bdi_tree);
Tejun Heo46100072015-05-22 17:13:31 -0400870 list_del_rcu(&bdi->bdi_list);
871 spin_unlock_bh(&bdi_lock);
Andrew Morton3fcfab12006-10-19 23:28:16 -0700872
Tejun Heo46100072015-05-22 17:13:31 -0400873 synchronize_rcu_expedited();
Andrew Morton3fcfab12006-10-19 23:28:16 -0700874}
875
Tejun Heob02176f2015-09-08 12:20:22 -0400876void bdi_unregister(struct backing_dev_info *bdi)
Andrew Morton3fcfab12006-10-19 23:28:16 -0700877{
Tejun Heof0054bb2015-05-22 17:13:30 -0400878 /* make sure nobody finds us on the bdi_list anymore */
879 bdi_remove_from_list(bdi);
880 wb_shutdown(&bdi->wb);
Jan Karab1c51af2017-03-23 01:36:59 +0100881 cgwb_bdi_unregister(bdi);
Rabin Vincent7a401a92011-11-11 13:29:04 +0100882
Manjong Leec5810902021-12-10 14:47:11 -0800883 /*
884 * If this BDI's min ratio has been set, use bdi_set_min_ratio() to
885 * update the global bdi_min_ratio.
886 */
887 if (bdi->min_ratio)
888 bdi_set_min_ratio(bdi, 0);
889
Christoph Hellwigc4db59d2015-01-20 14:05:00 -0700890 if (bdi->dev) {
891 bdi_debug_unregister(bdi);
892 device_unregister(bdi->dev);
893 bdi->dev = NULL;
894 }
Dan Williamsdf08c322016-07-31 11:15:13 -0700895
896 if (bdi->owner) {
897 put_device(bdi->owner);
898 bdi->owner = NULL;
899 }
Tejun Heob02176f2015-09-08 12:20:22 -0400900}
Christoph Hellwigc4db59d2015-01-20 14:05:00 -0700901
Jan Karad03f6cd2017-02-02 15:56:51 +0100902static void release_bdi(struct kref *ref)
903{
904 struct backing_dev_info *bdi =
905 container_of(ref, struct backing_dev_info, refcnt);
906
Jan Kara5af110b2017-04-12 12:24:26 +0200907 if (test_bit(WB_registered, &bdi->wb.state))
908 bdi_unregister(bdi);
Jan Kara2e82b842017-04-12 12:24:48 +0200909 WARN_ON_ONCE(bdi->dev);
910 wb_exit(&bdi->wb);
Jan Karad03f6cd2017-02-02 15:56:51 +0100911 kfree(bdi);
912}
913
914void bdi_put(struct backing_dev_info *bdi)
915{
916 kref_put(&bdi->refcnt, release_bdi);
917}
Jan Kara62bf42a2017-04-12 12:24:27 +0200918EXPORT_SYMBOL(bdi_put);
Jan Karad03f6cd2017-02-02 15:56:51 +0100919
Christoph Hellwigeb7ae5e2020-05-04 14:47:54 +0200920const char *bdi_dev_name(struct backing_dev_info *bdi)
921{
922 if (!bdi || !bdi->dev)
923 return bdi_unknown_name;
Christoph Hellwig6bd87ee2020-05-04 14:47:56 +0200924 return bdi->dev_name;
Christoph Hellwigeb7ae5e2020-05-04 14:47:54 +0200925}
926EXPORT_SYMBOL_GPL(bdi_dev_name);
927
Andrew Morton3fcfab12006-10-19 23:28:16 -0700928static wait_queue_head_t congestion_wqh[2] = {
929 __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[0]),
930 __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[1])
931 };
Tejun Heoec8a6f22015-05-22 17:13:41 -0400932static atomic_t nr_wb_congested[2];
Andrew Morton3fcfab12006-10-19 23:28:16 -0700933
Christoph Hellwig492d76b2020-07-01 11:06:20 +0200934void clear_bdi_congested(struct backing_dev_info *bdi, int sync)
Andrew Morton3fcfab12006-10-19 23:28:16 -0700935{
Jens Axboe1faa16d2009-04-06 14:48:01 +0200936 wait_queue_head_t *wqh = &congestion_wqh[sync];
Kaixu Xiac877ef82016-03-31 13:19:41 +0000937 enum wb_congested_state bit;
Andrew Morton3fcfab12006-10-19 23:28:16 -0700938
Tejun Heo44522262015-05-22 17:13:26 -0400939 bit = sync ? WB_sync_congested : WB_async_congested;
Christoph Hellwig8c911f32020-07-01 11:06:21 +0200940 if (test_and_clear_bit(bit, &bdi->wb.congested))
Tejun Heoec8a6f22015-05-22 17:13:41 -0400941 atomic_dec(&nr_wb_congested[sync]);
Peter Zijlstra4e857c52014-03-17 18:06:10 +0100942 smp_mb__after_atomic();
Andrew Morton3fcfab12006-10-19 23:28:16 -0700943 if (waitqueue_active(wqh))
944 wake_up(wqh);
945}
Christoph Hellwig492d76b2020-07-01 11:06:20 +0200946EXPORT_SYMBOL(clear_bdi_congested);
Andrew Morton3fcfab12006-10-19 23:28:16 -0700947
Christoph Hellwig492d76b2020-07-01 11:06:20 +0200948void set_bdi_congested(struct backing_dev_info *bdi, int sync)
Andrew Morton3fcfab12006-10-19 23:28:16 -0700949{
Kaixu Xiac877ef82016-03-31 13:19:41 +0000950 enum wb_congested_state bit;
Andrew Morton3fcfab12006-10-19 23:28:16 -0700951
Tejun Heo44522262015-05-22 17:13:26 -0400952 bit = sync ? WB_sync_congested : WB_async_congested;
Christoph Hellwig8c911f32020-07-01 11:06:21 +0200953 if (!test_and_set_bit(bit, &bdi->wb.congested))
Tejun Heoec8a6f22015-05-22 17:13:41 -0400954 atomic_inc(&nr_wb_congested[sync]);
Andrew Morton3fcfab12006-10-19 23:28:16 -0700955}
Christoph Hellwig492d76b2020-07-01 11:06:20 +0200956EXPORT_SYMBOL(set_bdi_congested);
Andrew Morton3fcfab12006-10-19 23:28:16 -0700957
958/**
959 * congestion_wait - wait for a backing_dev to become uncongested
Jens Axboe8aa7e842009-07-09 14:52:32 +0200960 * @sync: SYNC or ASYNC IO
Andrew Morton3fcfab12006-10-19 23:28:16 -0700961 * @timeout: timeout in jiffies
962 *
963 * Waits for up to @timeout jiffies for a backing_dev (any backing_dev) to exit
964 * write congestion. If no backing_devs are congested then just wait for the
965 * next write to be completed.
966 */
Jens Axboe8aa7e842009-07-09 14:52:32 +0200967long congestion_wait(int sync, long timeout)
Andrew Morton3fcfab12006-10-19 23:28:16 -0700968{
969 long ret;
Mel Gorman52bb9192010-10-26 14:21:41 -0700970 unsigned long start = jiffies;
Andrew Morton3fcfab12006-10-19 23:28:16 -0700971 DEFINE_WAIT(wait);
Jens Axboe8aa7e842009-07-09 14:52:32 +0200972 wait_queue_head_t *wqh = &congestion_wqh[sync];
Andrew Morton3fcfab12006-10-19 23:28:16 -0700973
974 prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
975 ret = io_schedule_timeout(timeout);
976 finish_wait(wqh, &wait);
Mel Gorman52bb9192010-10-26 14:21:41 -0700977
978 trace_writeback_congestion_wait(jiffies_to_usecs(timeout),
979 jiffies_to_usecs(jiffies - start));
980
Andrew Morton3fcfab12006-10-19 23:28:16 -0700981 return ret;
982}
983EXPORT_SYMBOL(congestion_wait);
984
Mel Gorman0e093d992010-10-26 14:21:45 -0700985/**
Mel Gorman599d0c92016-07-28 15:45:31 -0700986 * wait_iff_congested - Conditionally wait for a backing_dev to become uncongested or a pgdat to complete writes
Mel Gorman0e093d992010-10-26 14:21:45 -0700987 * @sync: SYNC or ASYNC IO
988 * @timeout: timeout in jiffies
989 *
Andrey Ryabinine3c1ac52018-04-10 16:28:03 -0700990 * In the event of a congested backing_dev (any backing_dev) this waits
991 * for up to @timeout jiffies for either a BDI to exit congestion of the
992 * given @sync queue or a write to complete.
Mel Gorman0e093d992010-10-26 14:21:45 -0700993 *
994 * The return value is 0 if the sleep is for the full timeout. Otherwise,
995 * it is the number of jiffies that were still remaining when the function
996 * returned. return_value == timeout implies the function did not sleep.
997 */
Andrey Ryabinine3c1ac52018-04-10 16:28:03 -0700998long wait_iff_congested(int sync, long timeout)
Mel Gorman0e093d992010-10-26 14:21:45 -0700999{
1000 long ret;
1001 unsigned long start = jiffies;
1002 DEFINE_WAIT(wait);
1003 wait_queue_head_t *wqh = &congestion_wqh[sync];
1004
1005 /*
Andrey Ryabinine3c1ac52018-04-10 16:28:03 -07001006 * If there is no congestion, yield if necessary instead
Mel Gorman0e093d992010-10-26 14:21:45 -07001007 * of sleeping on the congestion queue
1008 */
Andrey Ryabinine3c1ac52018-04-10 16:28:03 -07001009 if (atomic_read(&nr_wb_congested[sync]) == 0) {
Michal Hockoede37712016-05-20 16:57:03 -07001010 cond_resched();
Mel Gorman599d0c92016-07-28 15:45:31 -07001011
Mel Gorman0e093d992010-10-26 14:21:45 -07001012 /* In case we scheduled, work out time remaining */
1013 ret = timeout - (jiffies - start);
1014 if (ret < 0)
1015 ret = 0;
1016
1017 goto out;
1018 }
1019
1020 /* Sleep until uncongested or a write happens */
1021 prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
1022 ret = io_schedule_timeout(timeout);
1023 finish_wait(wqh, &wait);
1024
1025out:
1026 trace_writeback_wait_iff_congested(jiffies_to_usecs(timeout),
1027 jiffies_to_usecs(jiffies - start));
1028
1029 return ret;
1030}
1031EXPORT_SYMBOL(wait_iff_congested);