blob: 60ce452e42e6a5cd89dc9c725660c0fe0b635827 [file] [log] [blame]
Thomas Gleixnerc942fdd2019-05-27 08:55:06 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Balbir Singh8cdea7c2008-02-07 00:13:50 -08002/* memcontrol.c - Memory Controller
3 *
4 * Copyright IBM Corporation, 2007
5 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
6 *
Pavel Emelianov78fb7462008-02-07 00:13:51 -08007 * Copyright 2007 OpenVZ SWsoft Inc
8 * Author: Pavel Emelianov <xemul@openvz.org>
9 *
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -080010 * Memory thresholds
11 * Copyright (C) 2009 Nokia Corporation
12 * Author: Kirill A. Shutemov
13 *
Glauber Costa7ae1e1d2012-12-18 14:21:56 -080014 * Kernel Memory Controller
15 * Copyright (C) 2012 Parallels Inc. and Google Inc.
16 * Authors: Glauber Costa and Suleiman Souhlal
17 *
Johannes Weiner1575e682015-04-14 15:44:51 -070018 * Native page reclaim
19 * Charge lifetime sanitation
20 * Lockless page tracking & accounting
21 * Unified hierarchy configuration model
22 * Copyright (C) 2015 Red Hat, Inc., Johannes Weiner
Alex Shi6168d0d2020-12-15 12:34:29 -080023 *
24 * Per memcg lru locking
25 * Copyright (C) 2020 Alibaba, Inc, Alex Shi
Balbir Singh8cdea7c2008-02-07 00:13:50 -080026 */
27
Johannes Weiner3e32cb22014-12-10 15:42:31 -080028#include <linux/page_counter.h>
Balbir Singh8cdea7c2008-02-07 00:13:50 -080029#include <linux/memcontrol.h>
30#include <linux/cgroup.h>
Christoph Hellwiga5201102019-08-28 16:19:53 +020031#include <linux/pagewalk.h>
Ingo Molnar6e84f312017-02-08 18:51:29 +010032#include <linux/sched/mm.h>
Hugh Dickins3a4f8a02017-02-24 14:59:36 -080033#include <linux/shmem_fs.h>
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -080034#include <linux/hugetlb.h>
KAMEZAWA Hiroyukid13d1442009-01-07 18:07:56 -080035#include <linux/pagemap.h>
Chris Down1ff9e6e2019-03-05 15:48:09 -080036#include <linux/vm_event_item.h>
KAMEZAWA Hiroyukid52aa412008-02-07 00:14:24 -080037#include <linux/smp.h>
Balbir Singh8a9f3cc2008-02-07 00:13:53 -080038#include <linux/page-flags.h>
Balbir Singh66e17072008-02-07 00:13:56 -080039#include <linux/backing-dev.h>
Balbir Singh8a9f3cc2008-02-07 00:13:53 -080040#include <linux/bit_spinlock.h>
41#include <linux/rcupdate.h>
Balbir Singhe2224322009-04-02 16:57:39 -070042#include <linux/limits.h>
Paul Gortmakerb9e15ba2011-05-26 16:00:52 -040043#include <linux/export.h>
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -080044#include <linux/mutex.h>
Andrew Mortonbb4cc1a82013-09-24 15:27:40 -070045#include <linux/rbtree.h>
Balbir Singhb6ac57d2008-04-29 01:00:19 -070046#include <linux/slab.h>
Balbir Singh66e17072008-02-07 00:13:56 -080047#include <linux/swap.h>
Daisuke Nishimura02491442010-03-10 15:22:17 -080048#include <linux/swapops.h>
Balbir Singh66e17072008-02-07 00:13:56 -080049#include <linux/spinlock.h>
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -080050#include <linux/eventfd.h>
Tejun Heo79bd9812013-11-22 18:20:42 -050051#include <linux/poll.h>
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -080052#include <linux/sort.h>
Balbir Singh66e17072008-02-07 00:13:56 -080053#include <linux/fs.h>
KAMEZAWA Hiroyukid2ceb9b2008-02-07 00:14:25 -080054#include <linux/seq_file.h>
Anton Vorontsov70ddf632013-04-29 15:08:31 -070055#include <linux/vmpressure.h>
Christoph Lameterb69408e2008-10-18 20:26:14 -070056#include <linux/mm_inline.h>
Johannes Weiner5d1ea482014-12-10 15:44:55 -080057#include <linux/swap_cgroup.h>
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -080058#include <linux/cpu.h>
KAMEZAWA Hiroyuki158e0a22010-08-10 18:03:00 -070059#include <linux/oom.h>
Johannes Weiner0056f4e2013-10-31 16:34:14 -070060#include <linux/lockdep.h>
Tejun Heo79bd9812013-11-22 18:20:42 -050061#include <linux/file.h>
Tejun Heob23afb92015-11-05 18:46:11 -080062#include <linux/tracehook.h>
Chris Down0e4b01d2019-09-23 15:34:55 -070063#include <linux/psi.h>
Johannes Weinerc8713d02019-07-11 20:55:59 -070064#include <linux/seq_buf.h>
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -080065#include "internal.h"
Glauber Costad1a4c0b2011-12-11 21:47:04 +000066#include <net/sock.h>
Michal Hocko4bd2c1e2012-10-08 16:33:10 -070067#include <net/ip.h>
Qiang Huangf35c3a82013-11-12 15:08:22 -080068#include "slab.h"
Balbir Singh8cdea7c2008-02-07 00:13:50 -080069
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080070#include <linux/uaccess.h>
Balbir Singh8697d332008-02-07 00:13:59 -080071
KOSAKI Motohirocc8e9702010-08-09 17:19:57 -070072#include <trace/events/vmscan.h>
73
Tejun Heo073219e2014-02-08 10:36:58 -050074struct cgroup_subsys memory_cgrp_subsys __read_mostly;
75EXPORT_SYMBOL(memory_cgrp_subsys);
David Rientjes68ae5642012-12-12 13:51:57 -080076
Johannes Weiner7d828602016-01-14 15:20:56 -080077struct mem_cgroup *root_mem_cgroup __read_mostly;
78
Roman Gushchin37d59852020-10-17 16:13:50 -070079/* Active memory cgroup to use from an interrupt context */
80DEFINE_PER_CPU(struct mem_cgroup *, int_active_memcg);
81
Johannes Weinerf7e1cb62016-01-14 15:21:29 -080082/* Socket memory accounting disabled? */
83static bool cgroup_memory_nosocket;
84
Vladimir Davydov04823c82016-01-20 15:02:38 -080085/* Kernel memory accounting disabled? */
86static bool cgroup_memory_nokmem;
87
Johannes Weiner21afa382015-02-11 15:26:36 -080088/* Whether the swap controller is active */
Andrew Mortonc255a452012-07-31 16:43:02 -070089#ifdef CONFIG_MEMCG_SWAP
Johannes Weinereccb52e2020-06-03 16:02:11 -070090bool cgroup_memory_noswap __read_mostly;
KAMEZAWA Hiroyukic0777192009-01-07 18:07:57 -080091#else
Johannes Weinereccb52e2020-06-03 16:02:11 -070092#define cgroup_memory_noswap 1
Johannes Weiner2d1c4982020-06-03 16:02:14 -070093#endif
KAMEZAWA Hiroyukic0777192009-01-07 18:07:57 -080094
Tejun Heo97b27822019-08-26 09:06:56 -070095#ifdef CONFIG_CGROUP_WRITEBACK
96static DECLARE_WAIT_QUEUE_HEAD(memcg_cgwb_frn_waitq);
97#endif
98
Johannes Weiner7941d212016-01-14 15:21:23 -080099/* Whether legacy memory+swap accounting is active */
100static bool do_memsw_account(void)
101{
Johannes Weinereccb52e2020-06-03 16:02:11 -0700102 return !cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_noswap;
Johannes Weiner7941d212016-01-14 15:21:23 -0800103}
104
Kirill A. Shutemova0db00f2012-05-29 15:06:56 -0700105#define THRESHOLDS_EVENTS_TARGET 128
106#define SOFTLIMIT_EVENTS_TARGET 1024
Johannes Weinere9f89742011-03-23 16:42:37 -0700107
Andrew Mortonbb4cc1a82013-09-24 15:27:40 -0700108/*
109 * Cgroups above their limits are maintained in a RB-Tree, independent of
110 * their hierarchy representation
111 */
112
Mel Gormanef8f2322016-07-28 15:46:05 -0700113struct mem_cgroup_tree_per_node {
Andrew Mortonbb4cc1a82013-09-24 15:27:40 -0700114 struct rb_root rb_root;
Davidlohr Buesofa90b2f2017-09-08 16:15:21 -0700115 struct rb_node *rb_rightmost;
Andrew Mortonbb4cc1a82013-09-24 15:27:40 -0700116 spinlock_t lock;
117};
118
Andrew Mortonbb4cc1a82013-09-24 15:27:40 -0700119struct mem_cgroup_tree {
120 struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES];
121};
122
123static struct mem_cgroup_tree soft_limit_tree __read_mostly;
124
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -0700125/* for OOM */
126struct mem_cgroup_eventfd_list {
127 struct list_head list;
128 struct eventfd_ctx *eventfd;
129};
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -0800130
Tejun Heo79bd9812013-11-22 18:20:42 -0500131/*
132 * cgroup_event represents events which userspace want to receive.
133 */
Tejun Heo3bc942f2013-11-22 18:20:44 -0500134struct mem_cgroup_event {
Tejun Heo79bd9812013-11-22 18:20:42 -0500135 /*
Tejun Heo59b6f872013-11-22 18:20:43 -0500136 * memcg which the event belongs to.
Tejun Heo79bd9812013-11-22 18:20:42 -0500137 */
Tejun Heo59b6f872013-11-22 18:20:43 -0500138 struct mem_cgroup *memcg;
Tejun Heo79bd9812013-11-22 18:20:42 -0500139 /*
Tejun Heo79bd9812013-11-22 18:20:42 -0500140 * eventfd to signal userspace about the event.
141 */
142 struct eventfd_ctx *eventfd;
143 /*
144 * Each of these stored in a list by the cgroup.
145 */
146 struct list_head list;
147 /*
Tejun Heofba94802013-11-22 18:20:43 -0500148 * register_event() callback will be used to add new userspace
149 * waiter for changes related to this event. Use eventfd_signal()
150 * on eventfd to send notification to userspace.
151 */
Tejun Heo59b6f872013-11-22 18:20:43 -0500152 int (*register_event)(struct mem_cgroup *memcg,
Tejun Heo347c4a82013-11-22 18:20:43 -0500153 struct eventfd_ctx *eventfd, const char *args);
Tejun Heofba94802013-11-22 18:20:43 -0500154 /*
155 * unregister_event() callback will be called when userspace closes
156 * the eventfd or on cgroup removing. This callback must be set,
157 * if you want provide notification functionality.
158 */
Tejun Heo59b6f872013-11-22 18:20:43 -0500159 void (*unregister_event)(struct mem_cgroup *memcg,
Tejun Heofba94802013-11-22 18:20:43 -0500160 struct eventfd_ctx *eventfd);
161 /*
Tejun Heo79bd9812013-11-22 18:20:42 -0500162 * All fields below needed to unregister event when
163 * userspace closes eventfd.
164 */
165 poll_table pt;
166 wait_queue_head_t *wqh;
Ingo Molnarac6424b2017-06-20 12:06:13 +0200167 wait_queue_entry_t wait;
Tejun Heo79bd9812013-11-22 18:20:42 -0500168 struct work_struct remove;
169};
170
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700171static void mem_cgroup_threshold(struct mem_cgroup *memcg);
172static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -0800173
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -0800174/* Stuffs for move charges at task migration. */
175/*
Johannes Weiner1dfab5a2015-02-11 15:26:09 -0800176 * Types of charges to be moved.
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -0800177 */
Johannes Weiner1dfab5a2015-02-11 15:26:09 -0800178#define MOVE_ANON 0x1U
179#define MOVE_FILE 0x2U
180#define MOVE_MASK (MOVE_ANON | MOVE_FILE)
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -0800181
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -0800182/* "mc" and its members are protected by cgroup_mutex */
183static struct move_charge_struct {
Daisuke Nishimurab1dd6932010-11-24 12:57:06 -0800184 spinlock_t lock; /* for from, to */
Tejun Heo264a0ae2016-04-21 19:09:02 -0400185 struct mm_struct *mm;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -0800186 struct mem_cgroup *from;
187 struct mem_cgroup *to;
Johannes Weiner1dfab5a2015-02-11 15:26:09 -0800188 unsigned long flags;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -0800189 unsigned long precharge;
Daisuke Nishimura854ffa82010-03-10 15:22:15 -0800190 unsigned long moved_charge;
Daisuke Nishimura483c30b2010-03-10 15:22:18 -0800191 unsigned long moved_swap;
Daisuke Nishimura8033b972010-03-10 15:22:16 -0800192 struct task_struct *moving_task; /* a task moving charges */
193 wait_queue_head_t waitq; /* a waitq for other context */
194} mc = {
KAMEZAWA Hiroyuki2bd9bb22010-08-10 18:02:58 -0700195 .lock = __SPIN_LOCK_UNLOCKED(mc.lock),
Daisuke Nishimura8033b972010-03-10 15:22:16 -0800196 .waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq),
197};
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -0800198
Balbir Singh4e416952009-09-23 15:56:39 -0700199/*
200 * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft
201 * limit reclaim to prevent infinite loops, if they ever occur.
202 */
Kirill A. Shutemova0db00f2012-05-29 15:06:56 -0700203#define MEM_CGROUP_MAX_RECLAIM_LOOPS 100
Andrew Mortonbb4cc1a82013-09-24 15:27:40 -0700204#define MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS 2
Balbir Singh4e416952009-09-23 15:56:39 -0700205
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -0800206/* for encoding cft->private value on file */
Glauber Costa86ae53e2012-12-18 14:21:45 -0800207enum res_type {
208 _MEM,
209 _MEMSWAP,
210 _OOM_TYPE,
Glauber Costa510fc4e2012-12-18 14:21:47 -0800211 _KMEM,
Vladimir Davydovd55f90b2016-01-20 15:02:44 -0800212 _TCP,
Glauber Costa86ae53e2012-12-18 14:21:45 -0800213};
214
Kirill A. Shutemova0db00f2012-05-29 15:06:56 -0700215#define MEMFILE_PRIVATE(x, val) ((x) << 16 | (val))
216#define MEMFILE_TYPE(val) ((val) >> 16 & 0xffff)
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -0800217#define MEMFILE_ATTR(val) ((val) & 0xffff)
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -0700218/* Used for OOM nofiier */
219#define OOM_CONTROL (0)
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -0800220
Kirill Tkhaib05706f2018-08-17 15:47:33 -0700221/*
222 * Iteration constructs for visiting all cgroups (under a tree). If
223 * loops are exited prematurely (break), mem_cgroup_iter_break() must
224 * be used for reference counting.
225 */
226#define for_each_mem_cgroup_tree(iter, root) \
227 for (iter = mem_cgroup_iter(root, NULL, NULL); \
228 iter != NULL; \
229 iter = mem_cgroup_iter(root, iter, NULL))
230
231#define for_each_mem_cgroup(iter) \
232 for (iter = mem_cgroup_iter(NULL, NULL, NULL); \
233 iter != NULL; \
234 iter = mem_cgroup_iter(NULL, iter, NULL))
235
Tetsuo Handa7775fac2019-03-05 15:46:47 -0800236static inline bool should_force_charge(void)
237{
238 return tsk_is_oom_victim(current) || fatal_signal_pending(current) ||
239 (current->flags & PF_EXITING);
240}
241
Anton Vorontsov70ddf632013-04-29 15:08:31 -0700242/* Some nice accessors for the vmpressure. */
243struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg)
244{
245 if (!memcg)
246 memcg = root_mem_cgroup;
247 return &memcg->vmpressure;
248}
249
250struct cgroup_subsys_state *vmpressure_to_css(struct vmpressure *vmpr)
251{
252 return &container_of(vmpr, struct mem_cgroup, vmpressure)->css;
253}
254
Kirill Tkhai84c07d12018-08-17 15:47:25 -0700255#ifdef CONFIG_MEMCG_KMEM
Roman Gushchinbf4f0592020-08-06 23:20:49 -0700256extern spinlock_t css_set_lock;
257
258static void obj_cgroup_release(struct percpu_ref *ref)
259{
260 struct obj_cgroup *objcg = container_of(ref, struct obj_cgroup, refcnt);
261 struct mem_cgroup *memcg;
262 unsigned int nr_bytes;
263 unsigned int nr_pages;
264 unsigned long flags;
265
266 /*
267 * At this point all allocated objects are freed, and
268 * objcg->nr_charged_bytes can't have an arbitrary byte value.
269 * However, it can be PAGE_SIZE or (x * PAGE_SIZE).
270 *
271 * The following sequence can lead to it:
272 * 1) CPU0: objcg == stock->cached_objcg
273 * 2) CPU1: we do a small allocation (e.g. 92 bytes),
274 * PAGE_SIZE bytes are charged
275 * 3) CPU1: a process from another memcg is allocating something,
276 * the stock if flushed,
277 * objcg->nr_charged_bytes = PAGE_SIZE - 92
278 * 5) CPU0: we do release this object,
279 * 92 bytes are added to stock->nr_bytes
280 * 6) CPU0: stock is flushed,
281 * 92 bytes are added to objcg->nr_charged_bytes
282 *
283 * In the result, nr_charged_bytes == PAGE_SIZE.
284 * This page will be uncharged in obj_cgroup_release().
285 */
286 nr_bytes = atomic_read(&objcg->nr_charged_bytes);
287 WARN_ON_ONCE(nr_bytes & (PAGE_SIZE - 1));
288 nr_pages = nr_bytes >> PAGE_SHIFT;
289
290 spin_lock_irqsave(&css_set_lock, flags);
291 memcg = obj_cgroup_memcg(objcg);
292 if (nr_pages)
293 __memcg_kmem_uncharge(memcg, nr_pages);
294 list_del(&objcg->list);
295 mem_cgroup_put(memcg);
296 spin_unlock_irqrestore(&css_set_lock, flags);
297
298 percpu_ref_exit(ref);
299 kfree_rcu(objcg, rcu);
300}
301
302static struct obj_cgroup *obj_cgroup_alloc(void)
303{
304 struct obj_cgroup *objcg;
305 int ret;
306
307 objcg = kzalloc(sizeof(struct obj_cgroup), GFP_KERNEL);
308 if (!objcg)
309 return NULL;
310
311 ret = percpu_ref_init(&objcg->refcnt, obj_cgroup_release, 0,
312 GFP_KERNEL);
313 if (ret) {
314 kfree(objcg);
315 return NULL;
316 }
317 INIT_LIST_HEAD(&objcg->list);
318 return objcg;
319}
320
321static void memcg_reparent_objcgs(struct mem_cgroup *memcg,
322 struct mem_cgroup *parent)
323{
324 struct obj_cgroup *objcg, *iter;
325
326 objcg = rcu_replace_pointer(memcg->objcg, NULL, true);
327
328 spin_lock_irq(&css_set_lock);
329
330 /* Move active objcg to the parent's list */
331 xchg(&objcg->memcg, parent);
332 css_get(&parent->css);
333 list_add(&objcg->list, &parent->objcg_list);
334
335 /* Move already reparented objcgs to the parent's list */
336 list_for_each_entry(iter, &memcg->objcg_list, list) {
337 css_get(&parent->css);
338 xchg(&iter->memcg, parent);
339 css_put(&memcg->css);
340 }
341 list_splice(&memcg->objcg_list, &parent->objcg_list);
342
343 spin_unlock_irq(&css_set_lock);
344
345 percpu_ref_kill(&objcg->refcnt);
346}
347
Glauber Costa55007d82012-12-18 14:22:38 -0800348/*
Roman Gushchin98556092020-08-06 23:21:10 -0700349 * This will be used as a shrinker list's index.
Li Zefanb8627832013-09-23 16:56:47 +0800350 * The main reason for not using cgroup id for this:
351 * this works better in sparse environments, where we have a lot of memcgs,
352 * but only a few kmem-limited. Or also, if we have, for instance, 200
353 * memcgs, and none but the 200th is kmem-limited, we'd have to have a
354 * 200 entry array for that.
Glauber Costa55007d82012-12-18 14:22:38 -0800355 *
Vladimir Davydovdbcf73e2015-02-12 14:58:57 -0800356 * The current size of the caches array is stored in memcg_nr_cache_ids. It
357 * will double each time we have to increase it.
Glauber Costa55007d82012-12-18 14:22:38 -0800358 */
Vladimir Davydovdbcf73e2015-02-12 14:58:57 -0800359static DEFINE_IDA(memcg_cache_ida);
360int memcg_nr_cache_ids;
Glauber Costa749c5412012-12-18 14:23:01 -0800361
Vladimir Davydov05257a12015-02-12 14:59:01 -0800362/* Protects memcg_nr_cache_ids */
363static DECLARE_RWSEM(memcg_cache_ids_sem);
364
365void memcg_get_cache_ids(void)
366{
367 down_read(&memcg_cache_ids_sem);
368}
369
370void memcg_put_cache_ids(void)
371{
372 up_read(&memcg_cache_ids_sem);
373}
374
Glauber Costa55007d82012-12-18 14:22:38 -0800375/*
376 * MIN_SIZE is different than 1, because we would like to avoid going through
377 * the alloc/free process all the time. In a small machine, 4 kmem-limited
378 * cgroups is a reasonable guess. In the future, it could be a parameter or
379 * tunable, but that is strictly not necessary.
380 *
Li Zefanb8627832013-09-23 16:56:47 +0800381 * MAX_SIZE should be as large as the number of cgrp_ids. Ideally, we could get
Glauber Costa55007d82012-12-18 14:22:38 -0800382 * this constant directly from cgroup, but it is understandable that this is
383 * better kept as an internal representation in cgroup.c. In any case, the
Li Zefanb8627832013-09-23 16:56:47 +0800384 * cgrp_id space is not getting any smaller, and we don't have to necessarily
Glauber Costa55007d82012-12-18 14:22:38 -0800385 * increase ours as well if it increases.
386 */
387#define MEMCG_CACHES_MIN_SIZE 4
Li Zefanb8627832013-09-23 16:56:47 +0800388#define MEMCG_CACHES_MAX_SIZE MEM_CGROUP_ID_MAX
Glauber Costa55007d82012-12-18 14:22:38 -0800389
Glauber Costad7f25f82012-12-18 14:22:40 -0800390/*
391 * A lot of the calls to the cache allocation functions are expected to be
Roman Gushchin272911a2020-08-06 23:21:17 -0700392 * inlined by the compiler. Since the calls to memcg_slab_pre_alloc_hook() are
Glauber Costad7f25f82012-12-18 14:22:40 -0800393 * conditional to this static branch, we'll have to allow modules that does
394 * kmem_cache_alloc and the such to see this symbol as well
395 */
Johannes Weineref129472016-01-14 15:21:34 -0800396DEFINE_STATIC_KEY_FALSE(memcg_kmem_enabled_key);
Glauber Costad7f25f82012-12-18 14:22:40 -0800397EXPORT_SYMBOL(memcg_kmem_enabled_key);
Yang Shi0a432dc2019-09-23 15:38:12 -0700398#endif
Tejun Heo17cc4df2017-02-22 15:41:36 -0800399
Kirill Tkhai0a4465d2018-08-17 15:47:37 -0700400static int memcg_shrinker_map_size;
401static DEFINE_MUTEX(memcg_shrinker_map_mutex);
402
403static void memcg_free_shrinker_map_rcu(struct rcu_head *head)
404{
405 kvfree(container_of(head, struct memcg_shrinker_map, rcu));
406}
407
408static int memcg_expand_one_shrinker_map(struct mem_cgroup *memcg,
409 int size, int old_size)
410{
411 struct memcg_shrinker_map *new, *old;
412 int nid;
413
414 lockdep_assert_held(&memcg_shrinker_map_mutex);
415
416 for_each_node(nid) {
417 old = rcu_dereference_protected(
418 mem_cgroup_nodeinfo(memcg, nid)->shrinker_map, true);
419 /* Not yet online memcg */
420 if (!old)
421 return 0;
422
Kirill Tkhai86daf942020-04-01 21:06:33 -0700423 new = kvmalloc_node(sizeof(*new) + size, GFP_KERNEL, nid);
Kirill Tkhai0a4465d2018-08-17 15:47:37 -0700424 if (!new)
425 return -ENOMEM;
426
427 /* Set all old bits, clear all new bits */
428 memset(new->map, (int)0xff, old_size);
429 memset((void *)new->map + old_size, 0, size - old_size);
430
431 rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_map, new);
432 call_rcu(&old->rcu, memcg_free_shrinker_map_rcu);
433 }
434
435 return 0;
436}
437
438static void memcg_free_shrinker_maps(struct mem_cgroup *memcg)
439{
440 struct mem_cgroup_per_node *pn;
441 struct memcg_shrinker_map *map;
442 int nid;
443
444 if (mem_cgroup_is_root(memcg))
445 return;
446
447 for_each_node(nid) {
448 pn = mem_cgroup_nodeinfo(memcg, nid);
449 map = rcu_dereference_protected(pn->shrinker_map, true);
450 if (map)
451 kvfree(map);
452 rcu_assign_pointer(pn->shrinker_map, NULL);
453 }
454}
455
456static int memcg_alloc_shrinker_maps(struct mem_cgroup *memcg)
457{
458 struct memcg_shrinker_map *map;
459 int nid, size, ret = 0;
460
461 if (mem_cgroup_is_root(memcg))
462 return 0;
463
464 mutex_lock(&memcg_shrinker_map_mutex);
465 size = memcg_shrinker_map_size;
466 for_each_node(nid) {
Kirill Tkhai86daf942020-04-01 21:06:33 -0700467 map = kvzalloc_node(sizeof(*map) + size, GFP_KERNEL, nid);
Kirill Tkhai0a4465d2018-08-17 15:47:37 -0700468 if (!map) {
469 memcg_free_shrinker_maps(memcg);
470 ret = -ENOMEM;
471 break;
472 }
473 rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_map, map);
474 }
475 mutex_unlock(&memcg_shrinker_map_mutex);
476
477 return ret;
478}
479
480int memcg_expand_shrinker_maps(int new_id)
481{
482 int size, old_size, ret = 0;
483 struct mem_cgroup *memcg;
484
485 size = DIV_ROUND_UP(new_id + 1, BITS_PER_LONG) * sizeof(unsigned long);
486 old_size = memcg_shrinker_map_size;
487 if (size <= old_size)
488 return 0;
489
490 mutex_lock(&memcg_shrinker_map_mutex);
491 if (!root_mem_cgroup)
492 goto unlock;
493
494 for_each_mem_cgroup(memcg) {
495 if (mem_cgroup_is_root(memcg))
496 continue;
497 ret = memcg_expand_one_shrinker_map(memcg, size, old_size);
Vasily Averin75866af2020-02-20 20:04:18 -0800498 if (ret) {
499 mem_cgroup_iter_break(NULL, memcg);
Kirill Tkhai0a4465d2018-08-17 15:47:37 -0700500 goto unlock;
Vasily Averin75866af2020-02-20 20:04:18 -0800501 }
Kirill Tkhai0a4465d2018-08-17 15:47:37 -0700502 }
503unlock:
504 if (!ret)
505 memcg_shrinker_map_size = size;
506 mutex_unlock(&memcg_shrinker_map_mutex);
507 return ret;
508}
Kirill Tkhaifae91d62018-08-17 15:48:10 -0700509
510void memcg_set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id)
511{
512 if (shrinker_id >= 0 && memcg && !mem_cgroup_is_root(memcg)) {
513 struct memcg_shrinker_map *map;
514
515 rcu_read_lock();
516 map = rcu_dereference(memcg->nodeinfo[nid]->shrinker_map);
Kirill Tkhaif90280d2018-08-17 15:48:25 -0700517 /* Pairs with smp mb in shrink_slab() */
518 smp_mb__before_atomic();
Kirill Tkhaifae91d62018-08-17 15:48:10 -0700519 set_bit(shrinker_id, map->map);
520 rcu_read_unlock();
521 }
522}
523
Tejun Heoad7fa852015-05-27 20:00:02 -0400524/**
525 * mem_cgroup_css_from_page - css of the memcg associated with a page
526 * @page: page of interest
527 *
528 * If memcg is bound to the default hierarchy, css of the memcg associated
529 * with @page is returned. The returned css remains associated with @page
530 * until it is released.
531 *
532 * If memcg is bound to a traditional hierarchy, the css of root_mem_cgroup
533 * is returned.
Tejun Heoad7fa852015-05-27 20:00:02 -0400534 */
535struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page)
536{
537 struct mem_cgroup *memcg;
538
Roman Gushchinbcfe06b2020-12-01 13:58:27 -0800539 memcg = page_memcg(page);
Tejun Heoad7fa852015-05-27 20:00:02 -0400540
Tejun Heo9e10a132015-09-18 11:56:28 -0400541 if (!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
Tejun Heoad7fa852015-05-27 20:00:02 -0400542 memcg = root_mem_cgroup;
543
Tejun Heoad7fa852015-05-27 20:00:02 -0400544 return &memcg->css;
545}
546
Vladimir Davydov2fc04522015-09-09 15:35:28 -0700547/**
548 * page_cgroup_ino - return inode number of the memcg a page is charged to
549 * @page: the page
550 *
551 * Look up the closest online ancestor of the memory cgroup @page is charged to
552 * and return its inode number or 0 if @page is not charged to any cgroup. It
553 * is safe to call this function without holding a reference to @page.
554 *
555 * Note, this function is inherently racy, because there is nothing to prevent
556 * the cgroup inode from getting torn down and potentially reallocated a moment
557 * after page_cgroup_ino() returns, so it only should be used by callers that
558 * do not care (such as procfs interfaces).
559 */
560ino_t page_cgroup_ino(struct page *page)
561{
562 struct mem_cgroup *memcg;
563 unsigned long ino = 0;
564
565 rcu_read_lock();
Roman Gushchinbcfe06b2020-12-01 13:58:27 -0800566 memcg = page_memcg_check(page);
Roman Gushchin286e04b2020-08-06 23:20:52 -0700567
Vladimir Davydov2fc04522015-09-09 15:35:28 -0700568 while (memcg && !(memcg->css.flags & CSS_ONLINE))
569 memcg = parent_mem_cgroup(memcg);
570 if (memcg)
571 ino = cgroup_ino(memcg->css.cgroup);
572 rcu_read_unlock();
573 return ino;
574}
575
Mel Gormanef8f2322016-07-28 15:46:05 -0700576static struct mem_cgroup_per_node *
577mem_cgroup_page_nodeinfo(struct mem_cgroup *memcg, struct page *page)
Balbir Singhf64c3f52009-09-23 15:56:37 -0700578{
Johannes Weiner97a6c372011-03-23 16:42:27 -0700579 int nid = page_to_nid(page);
Balbir Singhf64c3f52009-09-23 15:56:37 -0700580
Mel Gormanef8f2322016-07-28 15:46:05 -0700581 return memcg->nodeinfo[nid];
Balbir Singhf64c3f52009-09-23 15:56:37 -0700582}
583
Mel Gormanef8f2322016-07-28 15:46:05 -0700584static struct mem_cgroup_tree_per_node *
585soft_limit_tree_node(int nid)
Andrew Mortonbb4cc1a82013-09-24 15:27:40 -0700586{
Mel Gormanef8f2322016-07-28 15:46:05 -0700587 return soft_limit_tree.rb_tree_per_node[nid];
Andrew Mortonbb4cc1a82013-09-24 15:27:40 -0700588}
589
Mel Gormanef8f2322016-07-28 15:46:05 -0700590static struct mem_cgroup_tree_per_node *
Andrew Mortonbb4cc1a82013-09-24 15:27:40 -0700591soft_limit_tree_from_page(struct page *page)
592{
593 int nid = page_to_nid(page);
Andrew Mortonbb4cc1a82013-09-24 15:27:40 -0700594
Mel Gormanef8f2322016-07-28 15:46:05 -0700595 return soft_limit_tree.rb_tree_per_node[nid];
Andrew Mortonbb4cc1a82013-09-24 15:27:40 -0700596}
597
Mel Gormanef8f2322016-07-28 15:46:05 -0700598static void __mem_cgroup_insert_exceeded(struct mem_cgroup_per_node *mz,
599 struct mem_cgroup_tree_per_node *mctz,
Johannes Weiner3e32cb22014-12-10 15:42:31 -0800600 unsigned long new_usage_in_excess)
Andrew Mortonbb4cc1a82013-09-24 15:27:40 -0700601{
602 struct rb_node **p = &mctz->rb_root.rb_node;
603 struct rb_node *parent = NULL;
Mel Gormanef8f2322016-07-28 15:46:05 -0700604 struct mem_cgroup_per_node *mz_node;
Davidlohr Buesofa90b2f2017-09-08 16:15:21 -0700605 bool rightmost = true;
Andrew Mortonbb4cc1a82013-09-24 15:27:40 -0700606
607 if (mz->on_tree)
608 return;
609
610 mz->usage_in_excess = new_usage_in_excess;
611 if (!mz->usage_in_excess)
612 return;
613 while (*p) {
614 parent = *p;
Mel Gormanef8f2322016-07-28 15:46:05 -0700615 mz_node = rb_entry(parent, struct mem_cgroup_per_node,
Andrew Mortonbb4cc1a82013-09-24 15:27:40 -0700616 tree_node);
Davidlohr Buesofa90b2f2017-09-08 16:15:21 -0700617 if (mz->usage_in_excess < mz_node->usage_in_excess) {
Andrew Mortonbb4cc1a82013-09-24 15:27:40 -0700618 p = &(*p)->rb_left;
Davidlohr Buesofa90b2f2017-09-08 16:15:21 -0700619 rightmost = false;
Miaohe Lin378876b2020-12-14 19:06:28 -0800620 } else {
Andrew Mortonbb4cc1a82013-09-24 15:27:40 -0700621 p = &(*p)->rb_right;
Miaohe Lin378876b2020-12-14 19:06:28 -0800622 }
Andrew Mortonbb4cc1a82013-09-24 15:27:40 -0700623 }
Davidlohr Buesofa90b2f2017-09-08 16:15:21 -0700624
625 if (rightmost)
626 mctz->rb_rightmost = &mz->tree_node;
627
Andrew Mortonbb4cc1a82013-09-24 15:27:40 -0700628 rb_link_node(&mz->tree_node, parent, p);
629 rb_insert_color(&mz->tree_node, &mctz->rb_root);
630 mz->on_tree = true;
631}
632
Mel Gormanef8f2322016-07-28 15:46:05 -0700633static void __mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz,
634 struct mem_cgroup_tree_per_node *mctz)
Andrew Mortonbb4cc1a82013-09-24 15:27:40 -0700635{
636 if (!mz->on_tree)
637 return;
Davidlohr Buesofa90b2f2017-09-08 16:15:21 -0700638
639 if (&mz->tree_node == mctz->rb_rightmost)
640 mctz->rb_rightmost = rb_prev(&mz->tree_node);
641
Andrew Mortonbb4cc1a82013-09-24 15:27:40 -0700642 rb_erase(&mz->tree_node, &mctz->rb_root);
643 mz->on_tree = false;
644}
645
Mel Gormanef8f2322016-07-28 15:46:05 -0700646static void mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz,
647 struct mem_cgroup_tree_per_node *mctz)
Andrew Mortonbb4cc1a82013-09-24 15:27:40 -0700648{
Johannes Weiner0a31bc92014-08-08 14:19:22 -0700649 unsigned long flags;
650
651 spin_lock_irqsave(&mctz->lock, flags);
Johannes Weinercf2c8122014-06-06 14:38:21 -0700652 __mem_cgroup_remove_exceeded(mz, mctz);
Johannes Weiner0a31bc92014-08-08 14:19:22 -0700653 spin_unlock_irqrestore(&mctz->lock, flags);
Andrew Mortonbb4cc1a82013-09-24 15:27:40 -0700654}
655
Johannes Weiner3e32cb22014-12-10 15:42:31 -0800656static unsigned long soft_limit_excess(struct mem_cgroup *memcg)
657{
658 unsigned long nr_pages = page_counter_read(&memcg->memory);
Jason Low4db0c3c2015-04-15 16:14:08 -0700659 unsigned long soft_limit = READ_ONCE(memcg->soft_limit);
Johannes Weiner3e32cb22014-12-10 15:42:31 -0800660 unsigned long excess = 0;
661
662 if (nr_pages > soft_limit)
663 excess = nr_pages - soft_limit;
664
665 return excess;
666}
Andrew Mortonbb4cc1a82013-09-24 15:27:40 -0700667
668static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page)
669{
Johannes Weiner3e32cb22014-12-10 15:42:31 -0800670 unsigned long excess;
Mel Gormanef8f2322016-07-28 15:46:05 -0700671 struct mem_cgroup_per_node *mz;
672 struct mem_cgroup_tree_per_node *mctz;
Andrew Mortonbb4cc1a82013-09-24 15:27:40 -0700673
Jianyu Zhane2318752014-06-06 14:38:20 -0700674 mctz = soft_limit_tree_from_page(page);
Laurent Dufourbfc72282017-03-09 16:17:06 -0800675 if (!mctz)
676 return;
Andrew Mortonbb4cc1a82013-09-24 15:27:40 -0700677 /*
678 * Necessary to update all ancestors when hierarchy is used.
679 * because their event counter is not touched.
680 */
681 for (; memcg; memcg = parent_mem_cgroup(memcg)) {
Mel Gormanef8f2322016-07-28 15:46:05 -0700682 mz = mem_cgroup_page_nodeinfo(memcg, page);
Johannes Weiner3e32cb22014-12-10 15:42:31 -0800683 excess = soft_limit_excess(memcg);
Andrew Mortonbb4cc1a82013-09-24 15:27:40 -0700684 /*
685 * We have to update the tree if mz is on RB-tree or
686 * mem is over its softlimit.
687 */
688 if (excess || mz->on_tree) {
Johannes Weiner0a31bc92014-08-08 14:19:22 -0700689 unsigned long flags;
690
691 spin_lock_irqsave(&mctz->lock, flags);
Andrew Mortonbb4cc1a82013-09-24 15:27:40 -0700692 /* if on-tree, remove it */
693 if (mz->on_tree)
Johannes Weinercf2c8122014-06-06 14:38:21 -0700694 __mem_cgroup_remove_exceeded(mz, mctz);
Andrew Mortonbb4cc1a82013-09-24 15:27:40 -0700695 /*
696 * Insert again. mz->usage_in_excess will be updated.
697 * If excess is 0, no tree ops.
698 */
Johannes Weinercf2c8122014-06-06 14:38:21 -0700699 __mem_cgroup_insert_exceeded(mz, mctz, excess);
Johannes Weiner0a31bc92014-08-08 14:19:22 -0700700 spin_unlock_irqrestore(&mctz->lock, flags);
Andrew Mortonbb4cc1a82013-09-24 15:27:40 -0700701 }
702 }
703}
704
705static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg)
706{
Mel Gormanef8f2322016-07-28 15:46:05 -0700707 struct mem_cgroup_tree_per_node *mctz;
708 struct mem_cgroup_per_node *mz;
709 int nid;
Andrew Mortonbb4cc1a82013-09-24 15:27:40 -0700710
Jianyu Zhane2318752014-06-06 14:38:20 -0700711 for_each_node(nid) {
Mel Gormanef8f2322016-07-28 15:46:05 -0700712 mz = mem_cgroup_nodeinfo(memcg, nid);
713 mctz = soft_limit_tree_node(nid);
Laurent Dufourbfc72282017-03-09 16:17:06 -0800714 if (mctz)
715 mem_cgroup_remove_exceeded(mz, mctz);
Andrew Mortonbb4cc1a82013-09-24 15:27:40 -0700716 }
717}
718
Mel Gormanef8f2322016-07-28 15:46:05 -0700719static struct mem_cgroup_per_node *
720__mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
Andrew Mortonbb4cc1a82013-09-24 15:27:40 -0700721{
Mel Gormanef8f2322016-07-28 15:46:05 -0700722 struct mem_cgroup_per_node *mz;
Andrew Mortonbb4cc1a82013-09-24 15:27:40 -0700723
724retry:
725 mz = NULL;
Davidlohr Buesofa90b2f2017-09-08 16:15:21 -0700726 if (!mctz->rb_rightmost)
Andrew Mortonbb4cc1a82013-09-24 15:27:40 -0700727 goto done; /* Nothing to reclaim from */
728
Davidlohr Buesofa90b2f2017-09-08 16:15:21 -0700729 mz = rb_entry(mctz->rb_rightmost,
730 struct mem_cgroup_per_node, tree_node);
Andrew Mortonbb4cc1a82013-09-24 15:27:40 -0700731 /*
732 * Remove the node now but someone else can add it back,
733 * we will to add it back at the end of reclaim to its correct
734 * position in the tree.
735 */
Johannes Weinercf2c8122014-06-06 14:38:21 -0700736 __mem_cgroup_remove_exceeded(mz, mctz);
Johannes Weiner3e32cb22014-12-10 15:42:31 -0800737 if (!soft_limit_excess(mz->memcg) ||
Shakeel Butt8965aa22020-04-01 21:07:10 -0700738 !css_tryget(&mz->memcg->css))
Andrew Mortonbb4cc1a82013-09-24 15:27:40 -0700739 goto retry;
740done:
741 return mz;
742}
743
Mel Gormanef8f2322016-07-28 15:46:05 -0700744static struct mem_cgroup_per_node *
745mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
Andrew Mortonbb4cc1a82013-09-24 15:27:40 -0700746{
Mel Gormanef8f2322016-07-28 15:46:05 -0700747 struct mem_cgroup_per_node *mz;
Andrew Mortonbb4cc1a82013-09-24 15:27:40 -0700748
Johannes Weiner0a31bc92014-08-08 14:19:22 -0700749 spin_lock_irq(&mctz->lock);
Andrew Mortonbb4cc1a82013-09-24 15:27:40 -0700750 mz = __mem_cgroup_largest_soft_limit_node(mctz);
Johannes Weiner0a31bc92014-08-08 14:19:22 -0700751 spin_unlock_irq(&mctz->lock);
Andrew Mortonbb4cc1a82013-09-24 15:27:40 -0700752 return mz;
753}
754
Johannes Weinerdb9adbc2019-05-14 15:47:09 -0700755/**
756 * __mod_memcg_state - update cgroup memory statistics
757 * @memcg: the memory cgroup
758 * @idx: the stat item - can be enum memcg_stat_item or enum node_stat_item
759 * @val: delta to add to the counter, can be negative
760 */
761void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val)
762{
Roman Gushchinea426c22020-08-06 23:20:35 -0700763 long x, threshold = MEMCG_CHARGE_BATCH;
Johannes Weinerdb9adbc2019-05-14 15:47:09 -0700764
765 if (mem_cgroup_disabled())
766 return;
767
Roman Gushchin772616b2020-08-11 18:30:21 -0700768 if (memcg_stat_item_in_bytes(idx))
Roman Gushchinea426c22020-08-06 23:20:35 -0700769 threshold <<= PAGE_SHIFT;
770
Johannes Weinerdb9adbc2019-05-14 15:47:09 -0700771 x = val + __this_cpu_read(memcg->vmstats_percpu->stat[idx]);
Roman Gushchinea426c22020-08-06 23:20:35 -0700772 if (unlikely(abs(x) > threshold)) {
Johannes Weiner42a30032019-05-14 15:47:12 -0700773 struct mem_cgroup *mi;
774
Yafang Shao766a4c12019-07-16 16:26:06 -0700775 /*
776 * Batch local counters to keep them in sync with
777 * the hierarchical ones.
778 */
779 __this_cpu_add(memcg->vmstats_local->stat[idx], x);
Johannes Weiner42a30032019-05-14 15:47:12 -0700780 for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
781 atomic_long_add(x, &mi->vmstats[idx]);
Johannes Weinerdb9adbc2019-05-14 15:47:09 -0700782 x = 0;
783 }
784 __this_cpu_write(memcg->vmstats_percpu->stat[idx], x);
785}
786
Johannes Weiner42a30032019-05-14 15:47:12 -0700787static struct mem_cgroup_per_node *
788parent_nodeinfo(struct mem_cgroup_per_node *pn, int nid)
789{
790 struct mem_cgroup *parent;
791
792 parent = parent_mem_cgroup(pn->memcg);
793 if (!parent)
794 return NULL;
795 return mem_cgroup_nodeinfo(parent, nid);
796}
797
Roman Gushchineedc4e52020-08-06 23:20:32 -0700798void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
799 int val)
800{
801 struct mem_cgroup_per_node *pn;
802 struct mem_cgroup *memcg;
Roman Gushchinea426c22020-08-06 23:20:35 -0700803 long x, threshold = MEMCG_CHARGE_BATCH;
Roman Gushchineedc4e52020-08-06 23:20:32 -0700804
805 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
806 memcg = pn->memcg;
807
808 /* Update memcg */
809 __mod_memcg_state(memcg, idx, val);
810
811 /* Update lruvec */
812 __this_cpu_add(pn->lruvec_stat_local->count[idx], val);
813
Roman Gushchinea426c22020-08-06 23:20:35 -0700814 if (vmstat_item_in_bytes(idx))
815 threshold <<= PAGE_SHIFT;
816
Roman Gushchineedc4e52020-08-06 23:20:32 -0700817 x = val + __this_cpu_read(pn->lruvec_stat_cpu->count[idx]);
Roman Gushchinea426c22020-08-06 23:20:35 -0700818 if (unlikely(abs(x) > threshold)) {
Roman Gushchineedc4e52020-08-06 23:20:32 -0700819 pg_data_t *pgdat = lruvec_pgdat(lruvec);
820 struct mem_cgroup_per_node *pi;
821
822 for (pi = pn; pi; pi = parent_nodeinfo(pi, pgdat->node_id))
823 atomic_long_add(x, &pi->lruvec_stat[idx]);
824 x = 0;
825 }
826 __this_cpu_write(pn->lruvec_stat_cpu->count[idx], x);
827}
828
Johannes Weinerdb9adbc2019-05-14 15:47:09 -0700829/**
830 * __mod_lruvec_state - update lruvec memory statistics
831 * @lruvec: the lruvec
832 * @idx: the stat item
833 * @val: delta to add to the counter, can be negative
834 *
835 * The lruvec is the intersection of the NUMA node and a cgroup. This
836 * function updates the all three counters that are affected by a
837 * change of state at this level: per-node, per-cgroup, per-lruvec.
838 */
839void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
840 int val)
841{
Johannes Weinerdb9adbc2019-05-14 15:47:09 -0700842 /* Update node */
Roman Gushchineedc4e52020-08-06 23:20:32 -0700843 __mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
Johannes Weinerdb9adbc2019-05-14 15:47:09 -0700844
Roman Gushchineedc4e52020-08-06 23:20:32 -0700845 /* Update memcg and lruvec */
846 if (!mem_cgroup_disabled())
847 __mod_memcg_lruvec_state(lruvec, idx, val);
Johannes Weinerdb9adbc2019-05-14 15:47:09 -0700848}
849
Shakeel Buttc47d5032020-12-14 19:07:14 -0800850void __mod_lruvec_page_state(struct page *page, enum node_stat_item idx,
851 int val)
852{
853 struct page *head = compound_head(page); /* rmap on tail pages */
Linus Torvaldsd635a692020-12-15 13:22:29 -0800854 struct mem_cgroup *memcg = page_memcg(head);
Shakeel Buttc47d5032020-12-14 19:07:14 -0800855 pg_data_t *pgdat = page_pgdat(page);
856 struct lruvec *lruvec;
857
858 /* Untracked pages have no memcg, no lruvec. Update only the node */
Linus Torvaldsd635a692020-12-15 13:22:29 -0800859 if (!memcg) {
Shakeel Buttc47d5032020-12-14 19:07:14 -0800860 __mod_node_page_state(pgdat, idx, val);
861 return;
862 }
863
Linus Torvaldsd635a692020-12-15 13:22:29 -0800864 lruvec = mem_cgroup_lruvec(memcg, pgdat);
Shakeel Buttc47d5032020-12-14 19:07:14 -0800865 __mod_lruvec_state(lruvec, idx, val);
866}
Shakeel Buttf0c0c112020-12-14 19:07:17 -0800867EXPORT_SYMBOL(__mod_lruvec_page_state);
Shakeel Buttc47d5032020-12-14 19:07:14 -0800868
Muchun Songda3ceef2020-12-14 19:07:04 -0800869void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val)
Roman Gushchinec9f0232019-08-13 15:37:41 -0700870{
Roman Gushchin4f103c62020-04-01 21:06:36 -0700871 pg_data_t *pgdat = page_pgdat(virt_to_page(p));
Roman Gushchinec9f0232019-08-13 15:37:41 -0700872 struct mem_cgroup *memcg;
873 struct lruvec *lruvec;
874
875 rcu_read_lock();
Roman Gushchin4f103c62020-04-01 21:06:36 -0700876 memcg = mem_cgroup_from_obj(p);
Roman Gushchinec9f0232019-08-13 15:37:41 -0700877
Muchun Song8faeb1f2020-11-21 22:17:12 -0800878 /*
879 * Untracked pages have no memcg, no lruvec. Update only the
880 * node. If we reparent the slab objects to the root memcg,
881 * when we free the slab object, we need to update the per-memcg
882 * vmstats to keep it correct for the root memcg.
883 */
884 if (!memcg) {
Roman Gushchinec9f0232019-08-13 15:37:41 -0700885 __mod_node_page_state(pgdat, idx, val);
886 } else {
Johannes Weiner867e5e12019-11-30 17:55:34 -0800887 lruvec = mem_cgroup_lruvec(memcg, pgdat);
Roman Gushchinec9f0232019-08-13 15:37:41 -0700888 __mod_lruvec_state(lruvec, idx, val);
889 }
890 rcu_read_unlock();
891}
892
Johannes Weinerdb9adbc2019-05-14 15:47:09 -0700893/**
894 * __count_memcg_events - account VM events in a cgroup
895 * @memcg: the memory cgroup
896 * @idx: the event item
897 * @count: the number of events that occured
898 */
899void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
900 unsigned long count)
901{
902 unsigned long x;
903
904 if (mem_cgroup_disabled())
905 return;
906
907 x = count + __this_cpu_read(memcg->vmstats_percpu->events[idx]);
908 if (unlikely(x > MEMCG_CHARGE_BATCH)) {
Johannes Weiner42a30032019-05-14 15:47:12 -0700909 struct mem_cgroup *mi;
910
Yafang Shao766a4c12019-07-16 16:26:06 -0700911 /*
912 * Batch local counters to keep them in sync with
913 * the hierarchical ones.
914 */
915 __this_cpu_add(memcg->vmstats_local->events[idx], x);
Johannes Weiner42a30032019-05-14 15:47:12 -0700916 for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
917 atomic_long_add(x, &mi->vmevents[idx]);
Johannes Weinerdb9adbc2019-05-14 15:47:09 -0700918 x = 0;
919 }
920 __this_cpu_write(memcg->vmstats_percpu->events[idx], x);
921}
922
Johannes Weiner42a30032019-05-14 15:47:12 -0700923static unsigned long memcg_events(struct mem_cgroup *memcg, int event)
Johannes Weinere9f89742011-03-23 16:42:37 -0700924{
Chris Down871789d2019-05-14 15:46:57 -0700925 return atomic_long_read(&memcg->vmevents[event]);
Johannes Weinere9f89742011-03-23 16:42:37 -0700926}
927
Johannes Weiner42a30032019-05-14 15:47:12 -0700928static unsigned long memcg_events_local(struct mem_cgroup *memcg, int event)
929{
Johannes Weiner815744d2019-06-13 15:55:46 -0700930 long x = 0;
931 int cpu;
932
933 for_each_possible_cpu(cpu)
934 x += per_cpu(memcg->vmstats_local->events[event], cpu);
935 return x;
Johannes Weiner42a30032019-05-14 15:47:12 -0700936}
937
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700938static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
David Rientjesb070e652013-05-07 16:18:09 -0700939 struct page *page,
Johannes Weiner3fba69a2020-06-03 16:01:31 -0700940 int nr_pages)
KAMEZAWA Hiroyukid52aa412008-02-07 00:14:24 -0800941{
KAMEZAWA Hiroyukie401f172011-01-20 14:44:23 -0800942 /* pagein of a big page is an event. So, ignore page size */
943 if (nr_pages > 0)
Johannes Weinerc9019e92018-01-31 16:16:37 -0800944 __count_memcg_events(memcg, PGPGIN, 1);
KAMEZAWA Hiroyuki3751d602011-02-01 15:52:45 -0800945 else {
Johannes Weinerc9019e92018-01-31 16:16:37 -0800946 __count_memcg_events(memcg, PGPGOUT, 1);
KAMEZAWA Hiroyuki3751d602011-02-01 15:52:45 -0800947 nr_pages = -nr_pages; /* for event */
948 }
KAMEZAWA Hiroyukie401f172011-01-20 14:44:23 -0800949
Chris Down871789d2019-05-14 15:46:57 -0700950 __this_cpu_add(memcg->vmstats_percpu->nr_page_events, nr_pages);
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -0800951}
KAMEZAWA Hiroyukid52aa412008-02-07 00:14:24 -0800952
Johannes Weinerf53d7ce2012-01-12 17:18:23 -0800953static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
954 enum mem_cgroup_events_target target)
KAMEZAWA Hiroyukid2265e62010-03-10 15:22:31 -0800955{
Johannes Weiner7a159cc2011-03-23 16:42:38 -0700956 unsigned long val, next;
957
Chris Down871789d2019-05-14 15:46:57 -0700958 val = __this_cpu_read(memcg->vmstats_percpu->nr_page_events);
959 next = __this_cpu_read(memcg->vmstats_percpu->targets[target]);
Johannes Weiner7a159cc2011-03-23 16:42:38 -0700960 /* from time_after() in jiffies.h */
Michal Hocko6a1a8b82017-07-10 15:48:53 -0700961 if ((long)(next - val) < 0) {
Johannes Weinerf53d7ce2012-01-12 17:18:23 -0800962 switch (target) {
963 case MEM_CGROUP_TARGET_THRESH:
964 next = val + THRESHOLDS_EVENTS_TARGET;
965 break;
Andrew Mortonbb4cc1a82013-09-24 15:27:40 -0700966 case MEM_CGROUP_TARGET_SOFTLIMIT:
967 next = val + SOFTLIMIT_EVENTS_TARGET;
968 break;
Johannes Weinerf53d7ce2012-01-12 17:18:23 -0800969 default:
970 break;
971 }
Chris Down871789d2019-05-14 15:46:57 -0700972 __this_cpu_write(memcg->vmstats_percpu->targets[target], next);
Johannes Weinerf53d7ce2012-01-12 17:18:23 -0800973 return true;
Johannes Weiner7a159cc2011-03-23 16:42:38 -0700974 }
Johannes Weinerf53d7ce2012-01-12 17:18:23 -0800975 return false;
KAMEZAWA Hiroyukid2265e62010-03-10 15:22:31 -0800976}
977
978/*
979 * Check events in order.
980 *
981 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700982static void memcg_check_events(struct mem_cgroup *memcg, struct page *page)
KAMEZAWA Hiroyukid2265e62010-03-10 15:22:31 -0800983{
984 /* threshold event is triggered in finer grain than soft limit */
Johannes Weinerf53d7ce2012-01-12 17:18:23 -0800985 if (unlikely(mem_cgroup_event_ratelimit(memcg,
986 MEM_CGROUP_TARGET_THRESH))) {
Andrew Mortonbb4cc1a82013-09-24 15:27:40 -0700987 bool do_softlimit;
Johannes Weinerf53d7ce2012-01-12 17:18:23 -0800988
Andrew Mortonbb4cc1a82013-09-24 15:27:40 -0700989 do_softlimit = mem_cgroup_event_ratelimit(memcg,
990 MEM_CGROUP_TARGET_SOFTLIMIT);
Johannes Weinerf53d7ce2012-01-12 17:18:23 -0800991 mem_cgroup_threshold(memcg);
Andrew Mortonbb4cc1a82013-09-24 15:27:40 -0700992 if (unlikely(do_softlimit))
993 mem_cgroup_update_tree(memcg, page);
Johannes Weiner0a31bc92014-08-08 14:19:22 -0700994 }
KAMEZAWA Hiroyukid2265e62010-03-10 15:22:31 -0800995}
996
Balbir Singhcf475ad2008-04-29 01:00:16 -0700997struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
Pavel Emelianov78fb7462008-02-07 00:13:51 -0800998{
Balbir Singh31a78f22008-09-28 23:09:31 +0100999 /*
1000 * mm_update_next_owner() may clear mm->owner to NULL
1001 * if it races with swapoff, page migration, etc.
1002 * So this can be called with p == NULL.
1003 */
1004 if (unlikely(!p))
1005 return NULL;
1006
Tejun Heo073219e2014-02-08 10:36:58 -05001007 return mem_cgroup_from_css(task_css(p, memory_cgrp_id));
Pavel Emelianov78fb7462008-02-07 00:13:51 -08001008}
Michal Hocko33398cf2015-09-08 15:01:02 -07001009EXPORT_SYMBOL(mem_cgroup_from_task);
Pavel Emelianov78fb7462008-02-07 00:13:51 -08001010
Shakeel Buttd46eb14b2018-08-17 15:46:39 -07001011/**
1012 * get_mem_cgroup_from_mm: Obtain a reference on given mm_struct's memcg.
1013 * @mm: mm from which memcg should be extracted. It can be NULL.
1014 *
1015 * Obtain a reference on mm->memcg and returns it if successful. Otherwise
1016 * root_mem_cgroup is returned. However if mem_cgroup is disabled, NULL is
1017 * returned.
1018 */
1019struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
KAMEZAWA Hiroyuki54595fe2009-01-07 18:08:33 -08001020{
Shakeel Buttd46eb14b2018-08-17 15:46:39 -07001021 struct mem_cgroup *memcg;
1022
1023 if (mem_cgroup_disabled())
1024 return NULL;
KAMEZAWA Hiroyuki0b7f5692009-04-02 16:57:38 -07001025
KAMEZAWA Hiroyuki54595fe2009-01-07 18:08:33 -08001026 rcu_read_lock();
1027 do {
Michal Hocko6f6acb02014-05-22 11:54:19 -07001028 /*
1029 * Page cache insertions can happen withou an
1030 * actual mm context, e.g. during disk probing
1031 * on boot, loopback IO, acct() writes etc.
1032 */
1033 if (unlikely(!mm))
Johannes Weinerdf381972014-04-07 15:37:43 -07001034 memcg = root_mem_cgroup;
Michal Hocko6f6acb02014-05-22 11:54:19 -07001035 else {
1036 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
1037 if (unlikely(!memcg))
1038 memcg = root_mem_cgroup;
1039 }
Roman Gushchin00d484f2019-11-15 17:34:43 -08001040 } while (!css_tryget(&memcg->css));
KAMEZAWA Hiroyuki54595fe2009-01-07 18:08:33 -08001041 rcu_read_unlock();
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001042 return memcg;
KAMEZAWA Hiroyuki54595fe2009-01-07 18:08:33 -08001043}
Shakeel Buttd46eb14b2018-08-17 15:46:39 -07001044EXPORT_SYMBOL(get_mem_cgroup_from_mm);
1045
1046/**
Shakeel Buttf745c6f2018-08-17 15:46:44 -07001047 * get_mem_cgroup_from_page: Obtain a reference on given page's memcg.
1048 * @page: page from which memcg should be extracted.
1049 *
1050 * Obtain a reference on page->memcg and returns it if successful. Otherwise
1051 * root_mem_cgroup is returned.
1052 */
1053struct mem_cgroup *get_mem_cgroup_from_page(struct page *page)
1054{
Roman Gushchinbcfe06b2020-12-01 13:58:27 -08001055 struct mem_cgroup *memcg = page_memcg(page);
Shakeel Buttf745c6f2018-08-17 15:46:44 -07001056
1057 if (mem_cgroup_disabled())
1058 return NULL;
1059
1060 rcu_read_lock();
Shakeel Butt8965aa22020-04-01 21:07:10 -07001061 /* Page should not get uncharged and freed memcg under us. */
1062 if (!memcg || WARN_ON_ONCE(!css_tryget(&memcg->css)))
Shakeel Buttf745c6f2018-08-17 15:46:44 -07001063 memcg = root_mem_cgroup;
1064 rcu_read_unlock();
1065 return memcg;
1066}
1067EXPORT_SYMBOL(get_mem_cgroup_from_page);
1068
Roman Gushchin37d59852020-10-17 16:13:50 -07001069static __always_inline struct mem_cgroup *active_memcg(void)
1070{
1071 if (in_interrupt())
1072 return this_cpu_read(int_active_memcg);
1073 else
1074 return current->active_memcg;
1075}
1076
1077static __always_inline struct mem_cgroup *get_active_memcg(void)
1078{
1079 struct mem_cgroup *memcg;
1080
1081 rcu_read_lock();
1082 memcg = active_memcg();
1083 if (memcg) {
1084 /* current->active_memcg must hold a ref. */
1085 if (WARN_ON_ONCE(!css_tryget(&memcg->css)))
1086 memcg = root_mem_cgroup;
1087 else
1088 memcg = current->active_memcg;
1089 }
1090 rcu_read_unlock();
1091
1092 return memcg;
1093}
1094
Roman Gushchin4127c652020-10-17 16:13:53 -07001095static __always_inline bool memcg_kmem_bypass(void)
1096{
1097 /* Allow remote memcg charging from any context. */
1098 if (unlikely(active_memcg()))
1099 return false;
1100
1101 /* Memcg to charge can't be determined. */
1102 if (in_interrupt() || !current->mm || (current->flags & PF_KTHREAD))
1103 return true;
1104
1105 return false;
1106}
1107
Shakeel Buttf745c6f2018-08-17 15:46:44 -07001108/**
Roman Gushchin37d59852020-10-17 16:13:50 -07001109 * If active memcg is set, do not fallback to current->mm->memcg.
Shakeel Buttd46eb14b2018-08-17 15:46:39 -07001110 */
1111static __always_inline struct mem_cgroup *get_mem_cgroup_from_current(void)
1112{
Roman Gushchin279c3392020-10-17 16:13:44 -07001113 if (memcg_kmem_bypass())
1114 return NULL;
1115
Roman Gushchin37d59852020-10-17 16:13:50 -07001116 if (unlikely(active_memcg()))
1117 return get_active_memcg();
Shakeel Buttd46eb14b2018-08-17 15:46:39 -07001118
Shakeel Buttd46eb14b2018-08-17 15:46:39 -07001119 return get_mem_cgroup_from_mm(current->mm);
1120}
KAMEZAWA Hiroyuki54595fe2009-01-07 18:08:33 -08001121
Johannes Weiner56600482012-01-12 17:17:59 -08001122/**
1123 * mem_cgroup_iter - iterate over memory cgroup hierarchy
1124 * @root: hierarchy root
1125 * @prev: previously returned memcg, NULL on first invocation
1126 * @reclaim: cookie for shared reclaim walks, NULL for full walks
1127 *
1128 * Returns references to children of the hierarchy below @root, or
1129 * @root itself, or %NULL after a full round-trip.
1130 *
1131 * Caller must pass the return value in @prev on subsequent
1132 * invocations for reference counting, or use mem_cgroup_iter_break()
1133 * to cancel a hierarchy walk before the round-trip is complete.
1134 *
Miaohe Lin05bdc522020-10-13 16:52:45 -07001135 * Reclaimers can specify a node in @reclaim to divide up the memcgs
1136 * in the hierarchy among all concurrent reclaimers operating on the
1137 * same node.
Johannes Weiner56600482012-01-12 17:17:59 -08001138 */
Andrew Morton694fbc02013-09-24 15:27:37 -07001139struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
Johannes Weiner56600482012-01-12 17:17:59 -08001140 struct mem_cgroup *prev,
Andrew Morton694fbc02013-09-24 15:27:37 -07001141 struct mem_cgroup_reclaim_cookie *reclaim)
KAMEZAWA Hiroyuki14067bb2009-04-02 16:57:35 -07001142{
Kees Cook3f649ab2020-06-03 13:09:38 -07001143 struct mem_cgroup_reclaim_iter *iter;
Johannes Weiner5ac8fb32014-12-10 15:42:39 -08001144 struct cgroup_subsys_state *css = NULL;
Johannes Weiner9f3a0d02012-01-12 17:17:48 -08001145 struct mem_cgroup *memcg = NULL;
Johannes Weiner5ac8fb32014-12-10 15:42:39 -08001146 struct mem_cgroup *pos = NULL;
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -07001147
Andrew Morton694fbc02013-09-24 15:27:37 -07001148 if (mem_cgroup_disabled())
1149 return NULL;
Johannes Weiner56600482012-01-12 17:17:59 -08001150
KAMEZAWA Hiroyuki711d3d22010-10-27 15:33:42 -07001151 if (!root)
1152 root = root_mem_cgroup;
1153
Johannes Weiner9f3a0d02012-01-12 17:17:48 -08001154 if (prev && !reclaim)
Johannes Weiner5ac8fb32014-12-10 15:42:39 -08001155 pos = prev;
Johannes Weiner9f3a0d02012-01-12 17:17:48 -08001156
Michal Hocko542f85f2013-04-29 15:07:15 -07001157 rcu_read_lock();
Johannes Weiner9f3a0d02012-01-12 17:17:48 -08001158
Johannes Weiner5ac8fb32014-12-10 15:42:39 -08001159 if (reclaim) {
Mel Gormanef8f2322016-07-28 15:46:05 -07001160 struct mem_cgroup_per_node *mz;
Johannes Weiner527a5ec2012-01-12 17:17:55 -08001161
Mel Gormanef8f2322016-07-28 15:46:05 -07001162 mz = mem_cgroup_nodeinfo(root, reclaim->pgdat->node_id);
Yafang Shao9da83f32019-11-30 17:50:03 -08001163 iter = &mz->iter;
Michal Hocko5f578162013-04-29 15:07:17 -07001164
Johannes Weiner5ac8fb32014-12-10 15:42:39 -08001165 if (prev && reclaim->generation != iter->generation)
Michal Hocko542f85f2013-04-29 15:07:15 -07001166 goto out_unlock;
Johannes Weiner5ac8fb32014-12-10 15:42:39 -08001167
Vladimir Davydov6df38682015-12-29 14:54:10 -08001168 while (1) {
Jason Low4db0c3c2015-04-15 16:14:08 -07001169 pos = READ_ONCE(iter->position);
Vladimir Davydov6df38682015-12-29 14:54:10 -08001170 if (!pos || css_tryget(&pos->css))
1171 break;
Johannes Weiner5ac8fb32014-12-10 15:42:39 -08001172 /*
Vladimir Davydov6df38682015-12-29 14:54:10 -08001173 * css reference reached zero, so iter->position will
1174 * be cleared by ->css_released. However, we should not
1175 * rely on this happening soon, because ->css_released
1176 * is called from a work queue, and by busy-waiting we
1177 * might block it. So we clear iter->position right
1178 * away.
Johannes Weiner5ac8fb32014-12-10 15:42:39 -08001179 */
Vladimir Davydov6df38682015-12-29 14:54:10 -08001180 (void)cmpxchg(&iter->position, pos, NULL);
1181 }
Johannes Weiner9f3a0d02012-01-12 17:17:48 -08001182 }
Johannes Weiner5ac8fb32014-12-10 15:42:39 -08001183
1184 if (pos)
1185 css = &pos->css;
1186
1187 for (;;) {
1188 css = css_next_descendant_pre(css, &root->css);
1189 if (!css) {
1190 /*
1191 * Reclaimers share the hierarchy walk, and a
1192 * new one might jump in right at the end of
1193 * the hierarchy - make sure they see at least
1194 * one group and restart from the beginning.
1195 */
1196 if (!prev)
1197 continue;
1198 break;
1199 }
1200
1201 /*
1202 * Verify the css and acquire a reference. The root
1203 * is provided by the caller, so we know it's alive
1204 * and kicking, and don't take an extra reference.
1205 */
1206 memcg = mem_cgroup_from_css(css);
1207
1208 if (css == &root->css)
1209 break;
1210
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08001211 if (css_tryget(css))
1212 break;
Johannes Weiner5ac8fb32014-12-10 15:42:39 -08001213
1214 memcg = NULL;
1215 }
1216
1217 if (reclaim) {
Johannes Weiner5ac8fb32014-12-10 15:42:39 -08001218 /*
Vladimir Davydov6df38682015-12-29 14:54:10 -08001219 * The position could have already been updated by a competing
1220 * thread, so check that the value hasn't changed since we read
1221 * it to avoid reclaiming from the same cgroup twice.
Johannes Weiner5ac8fb32014-12-10 15:42:39 -08001222 */
Vladimir Davydov6df38682015-12-29 14:54:10 -08001223 (void)cmpxchg(&iter->position, pos, memcg);
1224
Johannes Weiner5ac8fb32014-12-10 15:42:39 -08001225 if (pos)
1226 css_put(&pos->css);
1227
1228 if (!memcg)
1229 iter->generation++;
1230 else if (!prev)
1231 reclaim->generation = iter->generation;
1232 }
1233
Michal Hocko542f85f2013-04-29 15:07:15 -07001234out_unlock:
1235 rcu_read_unlock();
Michal Hockoc40046f2013-04-29 15:07:14 -07001236 if (prev && prev != root)
1237 css_put(&prev->css);
1238
Johannes Weiner9f3a0d02012-01-12 17:17:48 -08001239 return memcg;
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -07001240}
Johannes Weiner9f3a0d02012-01-12 17:17:48 -08001241
Johannes Weiner56600482012-01-12 17:17:59 -08001242/**
1243 * mem_cgroup_iter_break - abort a hierarchy walk prematurely
1244 * @root: hierarchy root
1245 * @prev: last visited hierarchy member as returned by mem_cgroup_iter()
1246 */
1247void mem_cgroup_iter_break(struct mem_cgroup *root,
1248 struct mem_cgroup *prev)
Johannes Weiner9f3a0d02012-01-12 17:17:48 -08001249{
1250 if (!root)
1251 root = root_mem_cgroup;
1252 if (prev && prev != root)
1253 css_put(&prev->css);
1254}
1255
Miles Chen54a83d62019-08-13 15:37:28 -07001256static void __invalidate_reclaim_iterators(struct mem_cgroup *from,
1257 struct mem_cgroup *dead_memcg)
Vladimir Davydov6df38682015-12-29 14:54:10 -08001258{
Vladimir Davydov6df38682015-12-29 14:54:10 -08001259 struct mem_cgroup_reclaim_iter *iter;
Mel Gormanef8f2322016-07-28 15:46:05 -07001260 struct mem_cgroup_per_node *mz;
1261 int nid;
Vladimir Davydov6df38682015-12-29 14:54:10 -08001262
Miles Chen54a83d62019-08-13 15:37:28 -07001263 for_each_node(nid) {
1264 mz = mem_cgroup_nodeinfo(from, nid);
Yafang Shao9da83f32019-11-30 17:50:03 -08001265 iter = &mz->iter;
1266 cmpxchg(&iter->position, dead_memcg, NULL);
Vladimir Davydov6df38682015-12-29 14:54:10 -08001267 }
1268}
1269
Miles Chen54a83d62019-08-13 15:37:28 -07001270static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
1271{
1272 struct mem_cgroup *memcg = dead_memcg;
1273 struct mem_cgroup *last;
1274
1275 do {
1276 __invalidate_reclaim_iterators(memcg, dead_memcg);
1277 last = memcg;
1278 } while ((memcg = parent_mem_cgroup(memcg)));
1279
1280 /*
1281 * When cgruop1 non-hierarchy mode is used,
1282 * parent_mem_cgroup() does not walk all the way up to the
1283 * cgroup root (root_mem_cgroup). So we have to handle
1284 * dead_memcg from cgroup root separately.
1285 */
1286 if (last != root_mem_cgroup)
1287 __invalidate_reclaim_iterators(root_mem_cgroup,
1288 dead_memcg);
1289}
1290
Johannes Weiner925b7672012-01-12 17:18:15 -08001291/**
Vladimir Davydov7c5f64f2016-10-07 16:57:23 -07001292 * mem_cgroup_scan_tasks - iterate over tasks of a memory cgroup hierarchy
1293 * @memcg: hierarchy root
1294 * @fn: function to call for each task
1295 * @arg: argument passed to @fn
1296 *
1297 * This function iterates over tasks attached to @memcg or to any of its
1298 * descendants and calls @fn for each task. If @fn returns a non-zero
1299 * value, the function breaks the iteration loop and returns the value.
1300 * Otherwise, it will iterate over all tasks and return 0.
1301 *
1302 * This function must not be called for the root memory cgroup.
1303 */
1304int mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
1305 int (*fn)(struct task_struct *, void *), void *arg)
1306{
1307 struct mem_cgroup *iter;
1308 int ret = 0;
1309
1310 BUG_ON(memcg == root_mem_cgroup);
1311
1312 for_each_mem_cgroup_tree(iter, memcg) {
1313 struct css_task_iter it;
1314 struct task_struct *task;
1315
Tetsuo Handaf168a9a2019-07-11 21:00:20 -07001316 css_task_iter_start(&iter->css, CSS_TASK_ITER_PROCS, &it);
Vladimir Davydov7c5f64f2016-10-07 16:57:23 -07001317 while (!ret && (task = css_task_iter_next(&it)))
1318 ret = fn(task, arg);
1319 css_task_iter_end(&it);
1320 if (ret) {
1321 mem_cgroup_iter_break(memcg, iter);
1322 break;
1323 }
1324 }
1325 return ret;
1326}
1327
Alex Shi6168d0d2020-12-15 12:34:29 -08001328#ifdef CONFIG_DEBUG_VM
1329void lruvec_memcg_debug(struct lruvec *lruvec, struct page *page)
1330{
1331 struct mem_cgroup *memcg;
1332
1333 if (mem_cgroup_disabled())
1334 return;
1335
1336 memcg = page_memcg(page);
1337
1338 if (!memcg)
1339 VM_BUG_ON_PAGE(lruvec_memcg(lruvec) != root_mem_cgroup, page);
1340 else
1341 VM_BUG_ON_PAGE(lruvec_memcg(lruvec) != memcg, page);
1342}
1343#endif
1344
Vladimir Davydov7c5f64f2016-10-07 16:57:23 -07001345/**
Alex Shi6168d0d2020-12-15 12:34:29 -08001346 * lock_page_lruvec - lock and return lruvec for a given page.
1347 * @page: the page
1348 *
1349 * This series functions should be used in either conditions:
1350 * PageLRU is cleared or unset
1351 * or page->_refcount is zero
1352 * or page is locked.
1353 */
1354struct lruvec *lock_page_lruvec(struct page *page)
1355{
1356 struct lruvec *lruvec;
1357 struct pglist_data *pgdat = page_pgdat(page);
1358
1359 rcu_read_lock();
1360 lruvec = mem_cgroup_page_lruvec(page, pgdat);
1361 spin_lock(&lruvec->lru_lock);
1362 rcu_read_unlock();
1363
1364 lruvec_memcg_debug(lruvec, page);
1365
1366 return lruvec;
1367}
1368
1369struct lruvec *lock_page_lruvec_irq(struct page *page)
1370{
1371 struct lruvec *lruvec;
1372 struct pglist_data *pgdat = page_pgdat(page);
1373
1374 rcu_read_lock();
1375 lruvec = mem_cgroup_page_lruvec(page, pgdat);
1376 spin_lock_irq(&lruvec->lru_lock);
1377 rcu_read_unlock();
1378
1379 lruvec_memcg_debug(lruvec, page);
1380
1381 return lruvec;
1382}
1383
1384struct lruvec *lock_page_lruvec_irqsave(struct page *page, unsigned long *flags)
1385{
1386 struct lruvec *lruvec;
1387 struct pglist_data *pgdat = page_pgdat(page);
1388
1389 rcu_read_lock();
1390 lruvec = mem_cgroup_page_lruvec(page, pgdat);
1391 spin_lock_irqsave(&lruvec->lru_lock, *flags);
1392 rcu_read_unlock();
1393
1394 lruvec_memcg_debug(lruvec, page);
1395
1396 return lruvec;
1397}
1398
1399/**
Hugh Dickinsfa9add62012-05-29 15:07:09 -07001400 * mem_cgroup_update_lru_size - account for adding or removing an lru page
1401 * @lruvec: mem_cgroup per zone lru vector
1402 * @lru: index of lru list the page is sitting on
Michal Hockob4536f0c82017-01-10 16:58:04 -08001403 * @zid: zone id of the accounted pages
Hugh Dickinsfa9add62012-05-29 15:07:09 -07001404 * @nr_pages: positive when adding or negative when removing
Johannes Weiner925b7672012-01-12 17:18:15 -08001405 *
Hugh Dickinsca707232016-05-19 17:12:35 -07001406 * This function must be called under lru_lock, just before a page is added
1407 * to or just after a page is removed from an lru list (that ordering being
1408 * so as to allow it to check that lru_size 0 is consistent with list_empty).
Johannes Weiner925b7672012-01-12 17:18:15 -08001409 */
Hugh Dickinsfa9add62012-05-29 15:07:09 -07001410void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
Michal Hockob4536f0c82017-01-10 16:58:04 -08001411 int zid, int nr_pages)
Johannes Weiner925b7672012-01-12 17:18:15 -08001412{
Mel Gormanef8f2322016-07-28 15:46:05 -07001413 struct mem_cgroup_per_node *mz;
Hugh Dickinsfa9add62012-05-29 15:07:09 -07001414 unsigned long *lru_size;
Hugh Dickinsca707232016-05-19 17:12:35 -07001415 long size;
Johannes Weiner925b7672012-01-12 17:18:15 -08001416
1417 if (mem_cgroup_disabled())
1418 return;
1419
Mel Gormanef8f2322016-07-28 15:46:05 -07001420 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
Michal Hockob4536f0c82017-01-10 16:58:04 -08001421 lru_size = &mz->lru_zone_size[zid][lru];
Hugh Dickinsca707232016-05-19 17:12:35 -07001422
1423 if (nr_pages < 0)
1424 *lru_size += nr_pages;
1425
1426 size = *lru_size;
Michal Hockob4536f0c82017-01-10 16:58:04 -08001427 if (WARN_ONCE(size < 0,
1428 "%s(%p, %d, %d): lru_size %ld\n",
1429 __func__, lruvec, lru, nr_pages, size)) {
Hugh Dickinsca707232016-05-19 17:12:35 -07001430 VM_BUG_ON(1);
1431 *lru_size = 0;
1432 }
1433
1434 if (nr_pages > 0)
1435 *lru_size += nr_pages;
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -08001436}
KAMEZAWA Hiroyuki544122e2009-01-07 18:08:34 -08001437
Johannes Weiner19942822011-02-01 15:52:43 -08001438/**
Johannes Weiner9d11ea92011-03-23 16:42:21 -07001439 * mem_cgroup_margin - calculate chargeable space of a memory cgroup
Wanpeng Lidad75572012-06-20 12:53:01 -07001440 * @memcg: the memory cgroup
Johannes Weiner19942822011-02-01 15:52:43 -08001441 *
Johannes Weiner9d11ea92011-03-23 16:42:21 -07001442 * Returns the maximum amount of memory @mem can be charged with, in
Johannes Weiner7ec99d62011-03-23 16:42:36 -07001443 * pages.
Johannes Weiner19942822011-02-01 15:52:43 -08001444 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001445static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
Johannes Weiner19942822011-02-01 15:52:43 -08001446{
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001447 unsigned long margin = 0;
1448 unsigned long count;
1449 unsigned long limit;
Johannes Weiner9d11ea92011-03-23 16:42:21 -07001450
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001451 count = page_counter_read(&memcg->memory);
Roman Gushchinbbec2e12018-06-07 17:06:18 -07001452 limit = READ_ONCE(memcg->memory.max);
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001453 if (count < limit)
1454 margin = limit - count;
1455
Johannes Weiner7941d212016-01-14 15:21:23 -08001456 if (do_memsw_account()) {
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001457 count = page_counter_read(&memcg->memsw);
Roman Gushchinbbec2e12018-06-07 17:06:18 -07001458 limit = READ_ONCE(memcg->memsw.max);
Kaixu Xia1c4448e2020-06-01 21:49:36 -07001459 if (count < limit)
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001460 margin = min(margin, limit - count);
Li RongQingcbedbac2016-05-27 14:27:43 -07001461 else
1462 margin = 0;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001463 }
1464
1465 return margin;
Johannes Weiner19942822011-02-01 15:52:43 -08001466}
1467
KAMEZAWA Hiroyuki619d0942012-03-21 16:34:23 -07001468/*
Qiang Huangbdcbb652014-06-04 16:08:21 -07001469 * A routine for checking "mem" is under move_account() or not.
KAMEZAWA Hiroyuki32047e22010-10-27 15:33:40 -07001470 *
Qiang Huangbdcbb652014-06-04 16:08:21 -07001471 * Checking a cgroup is mc.from or mc.to or under hierarchy of
1472 * moving cgroups. This is for waiting at high-memory pressure
1473 * caused by "move".
KAMEZAWA Hiroyuki32047e22010-10-27 15:33:40 -07001474 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001475static bool mem_cgroup_under_move(struct mem_cgroup *memcg)
KAMEZAWA Hiroyuki4b534332010-08-10 18:02:57 -07001476{
KAMEZAWA Hiroyuki2bd9bb22010-08-10 18:02:58 -07001477 struct mem_cgroup *from;
1478 struct mem_cgroup *to;
KAMEZAWA Hiroyuki4b534332010-08-10 18:02:57 -07001479 bool ret = false;
KAMEZAWA Hiroyuki2bd9bb22010-08-10 18:02:58 -07001480 /*
1481 * Unlike task_move routines, we access mc.to, mc.from not under
1482 * mutual exclusion by cgroup_mutex. Here, we take spinlock instead.
1483 */
1484 spin_lock(&mc.lock);
1485 from = mc.from;
1486 to = mc.to;
1487 if (!from)
1488 goto unlock;
Michal Hocko3e920412011-07-26 16:08:29 -07001489
Johannes Weiner2314b422014-12-10 15:44:33 -08001490 ret = mem_cgroup_is_descendant(from, memcg) ||
1491 mem_cgroup_is_descendant(to, memcg);
KAMEZAWA Hiroyuki2bd9bb22010-08-10 18:02:58 -07001492unlock:
1493 spin_unlock(&mc.lock);
KAMEZAWA Hiroyuki4b534332010-08-10 18:02:57 -07001494 return ret;
1495}
1496
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001497static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg)
KAMEZAWA Hiroyuki4b534332010-08-10 18:02:57 -07001498{
1499 if (mc.moving_task && current != mc.moving_task) {
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001500 if (mem_cgroup_under_move(memcg)) {
KAMEZAWA Hiroyuki4b534332010-08-10 18:02:57 -07001501 DEFINE_WAIT(wait);
1502 prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE);
1503 /* moving charge context might have finished. */
1504 if (mc.moving_task)
1505 schedule();
1506 finish_wait(&mc.waitq, &wait);
1507 return true;
1508 }
1509 }
1510 return false;
1511}
1512
Muchun Song5f9a4f42020-10-13 16:52:59 -07001513struct memory_stat {
1514 const char *name;
1515 unsigned int ratio;
1516 unsigned int idx;
1517};
1518
1519static struct memory_stat memory_stats[] = {
1520 { "anon", PAGE_SIZE, NR_ANON_MAPPED },
1521 { "file", PAGE_SIZE, NR_FILE_PAGES },
1522 { "kernel_stack", 1024, NR_KERNEL_STACK_KB },
Shakeel Buttf0c0c112020-12-14 19:07:17 -08001523 { "pagetables", PAGE_SIZE, NR_PAGETABLE },
Muchun Song5f9a4f42020-10-13 16:52:59 -07001524 { "percpu", 1, MEMCG_PERCPU_B },
1525 { "sock", PAGE_SIZE, MEMCG_SOCK },
1526 { "shmem", PAGE_SIZE, NR_SHMEM },
1527 { "file_mapped", PAGE_SIZE, NR_FILE_MAPPED },
1528 { "file_dirty", PAGE_SIZE, NR_FILE_DIRTY },
1529 { "file_writeback", PAGE_SIZE, NR_WRITEBACK },
1530#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1531 /*
1532 * The ratio will be initialized in memory_stats_init(). Because
1533 * on some architectures, the macro of HPAGE_PMD_SIZE is not
1534 * constant(e.g. powerpc).
1535 */
1536 { "anon_thp", 0, NR_ANON_THPS },
Johannes Weinerb8eddff2020-12-14 19:06:20 -08001537 { "file_thp", 0, NR_FILE_THPS },
1538 { "shmem_thp", 0, NR_SHMEM_THPS },
Muchun Song5f9a4f42020-10-13 16:52:59 -07001539#endif
1540 { "inactive_anon", PAGE_SIZE, NR_INACTIVE_ANON },
1541 { "active_anon", PAGE_SIZE, NR_ACTIVE_ANON },
1542 { "inactive_file", PAGE_SIZE, NR_INACTIVE_FILE },
1543 { "active_file", PAGE_SIZE, NR_ACTIVE_FILE },
1544 { "unevictable", PAGE_SIZE, NR_UNEVICTABLE },
1545
1546 /*
1547 * Note: The slab_reclaimable and slab_unreclaimable must be
1548 * together and slab_reclaimable must be in front.
1549 */
1550 { "slab_reclaimable", 1, NR_SLAB_RECLAIMABLE_B },
1551 { "slab_unreclaimable", 1, NR_SLAB_UNRECLAIMABLE_B },
1552
1553 /* The memory events */
1554 { "workingset_refault_anon", 1, WORKINGSET_REFAULT_ANON },
1555 { "workingset_refault_file", 1, WORKINGSET_REFAULT_FILE },
1556 { "workingset_activate_anon", 1, WORKINGSET_ACTIVATE_ANON },
1557 { "workingset_activate_file", 1, WORKINGSET_ACTIVATE_FILE },
1558 { "workingset_restore_anon", 1, WORKINGSET_RESTORE_ANON },
1559 { "workingset_restore_file", 1, WORKINGSET_RESTORE_FILE },
1560 { "workingset_nodereclaim", 1, WORKINGSET_NODERECLAIM },
1561};
1562
1563static int __init memory_stats_init(void)
1564{
1565 int i;
1566
1567 for (i = 0; i < ARRAY_SIZE(memory_stats); i++) {
1568#ifdef CONFIG_TRANSPARENT_HUGEPAGE
Johannes Weinerb8eddff2020-12-14 19:06:20 -08001569 if (memory_stats[i].idx == NR_ANON_THPS ||
1570 memory_stats[i].idx == NR_FILE_THPS ||
1571 memory_stats[i].idx == NR_SHMEM_THPS)
Muchun Song5f9a4f42020-10-13 16:52:59 -07001572 memory_stats[i].ratio = HPAGE_PMD_SIZE;
1573#endif
1574 VM_BUG_ON(!memory_stats[i].ratio);
1575 VM_BUG_ON(memory_stats[i].idx >= MEMCG_NR_STAT);
1576 }
1577
1578 return 0;
1579}
1580pure_initcall(memory_stats_init);
1581
Johannes Weinerc8713d02019-07-11 20:55:59 -07001582static char *memory_stat_format(struct mem_cgroup *memcg)
1583{
1584 struct seq_buf s;
1585 int i;
Johannes Weiner71cd3112017-05-03 14:55:13 -07001586
Johannes Weinerc8713d02019-07-11 20:55:59 -07001587 seq_buf_init(&s, kmalloc(PAGE_SIZE, GFP_KERNEL), PAGE_SIZE);
1588 if (!s.buffer)
1589 return NULL;
1590
1591 /*
1592 * Provide statistics on the state of the memory subsystem as
1593 * well as cumulative event counters that show past behavior.
1594 *
1595 * This list is ordered following a combination of these gradients:
1596 * 1) generic big picture -> specifics and details
1597 * 2) reflecting userspace activity -> reflecting kernel heuristics
1598 *
1599 * Current memory state:
1600 */
1601
Muchun Song5f9a4f42020-10-13 16:52:59 -07001602 for (i = 0; i < ARRAY_SIZE(memory_stats); i++) {
1603 u64 size;
Johannes Weinerc8713d02019-07-11 20:55:59 -07001604
Muchun Song5f9a4f42020-10-13 16:52:59 -07001605 size = memcg_page_state(memcg, memory_stats[i].idx);
1606 size *= memory_stats[i].ratio;
1607 seq_buf_printf(&s, "%s %llu\n", memory_stats[i].name, size);
Johannes Weinerc8713d02019-07-11 20:55:59 -07001608
Muchun Song5f9a4f42020-10-13 16:52:59 -07001609 if (unlikely(memory_stats[i].idx == NR_SLAB_UNRECLAIMABLE_B)) {
1610 size = memcg_page_state(memcg, NR_SLAB_RECLAIMABLE_B) +
1611 memcg_page_state(memcg, NR_SLAB_UNRECLAIMABLE_B);
1612 seq_buf_printf(&s, "slab %llu\n", size);
1613 }
1614 }
Johannes Weinerc8713d02019-07-11 20:55:59 -07001615
1616 /* Accumulated memory events */
1617
Konstantin Khlebnikovebc5d83d2019-12-04 16:49:53 -08001618 seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGFAULT),
1619 memcg_events(memcg, PGFAULT));
1620 seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGMAJFAULT),
1621 memcg_events(memcg, PGMAJFAULT));
Konstantin Khlebnikovebc5d83d2019-12-04 16:49:53 -08001622 seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGREFILL),
1623 memcg_events(memcg, PGREFILL));
Johannes Weinerc8713d02019-07-11 20:55:59 -07001624 seq_buf_printf(&s, "pgscan %lu\n",
1625 memcg_events(memcg, PGSCAN_KSWAPD) +
1626 memcg_events(memcg, PGSCAN_DIRECT));
1627 seq_buf_printf(&s, "pgsteal %lu\n",
1628 memcg_events(memcg, PGSTEAL_KSWAPD) +
1629 memcg_events(memcg, PGSTEAL_DIRECT));
Konstantin Khlebnikovebc5d83d2019-12-04 16:49:53 -08001630 seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGACTIVATE),
1631 memcg_events(memcg, PGACTIVATE));
1632 seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGDEACTIVATE),
1633 memcg_events(memcg, PGDEACTIVATE));
1634 seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGLAZYFREE),
1635 memcg_events(memcg, PGLAZYFREE));
1636 seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGLAZYFREED),
1637 memcg_events(memcg, PGLAZYFREED));
Johannes Weinerc8713d02019-07-11 20:55:59 -07001638
1639#ifdef CONFIG_TRANSPARENT_HUGEPAGE
Konstantin Khlebnikovebc5d83d2019-12-04 16:49:53 -08001640 seq_buf_printf(&s, "%s %lu\n", vm_event_name(THP_FAULT_ALLOC),
Johannes Weinerc8713d02019-07-11 20:55:59 -07001641 memcg_events(memcg, THP_FAULT_ALLOC));
Konstantin Khlebnikovebc5d83d2019-12-04 16:49:53 -08001642 seq_buf_printf(&s, "%s %lu\n", vm_event_name(THP_COLLAPSE_ALLOC),
Johannes Weinerc8713d02019-07-11 20:55:59 -07001643 memcg_events(memcg, THP_COLLAPSE_ALLOC));
1644#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1645
1646 /* The above should easily fit into one page */
1647 WARN_ON_ONCE(seq_buf_has_overflowed(&s));
1648
1649 return s.buffer;
1650}
Johannes Weiner71cd3112017-05-03 14:55:13 -07001651
Sha Zhengju58cf1882013-02-22 16:32:05 -08001652#define K(x) ((x) << (PAGE_SHIFT-10))
Balbir Singhe2224322009-04-02 16:57:39 -07001653/**
yuzhoujianf0c867d2018-12-28 00:36:10 -08001654 * mem_cgroup_print_oom_context: Print OOM information relevant to
1655 * memory controller.
Balbir Singhe2224322009-04-02 16:57:39 -07001656 * @memcg: The memory cgroup that went over limit
1657 * @p: Task that is going to be killed
1658 *
1659 * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
1660 * enabled
1661 */
yuzhoujianf0c867d2018-12-28 00:36:10 -08001662void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p)
1663{
1664 rcu_read_lock();
1665
1666 if (memcg) {
1667 pr_cont(",oom_memcg=");
1668 pr_cont_cgroup_path(memcg->css.cgroup);
1669 } else
1670 pr_cont(",global_oom");
1671 if (p) {
1672 pr_cont(",task_memcg=");
1673 pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id));
1674 }
1675 rcu_read_unlock();
1676}
1677
1678/**
1679 * mem_cgroup_print_oom_meminfo: Print OOM memory information relevant to
1680 * memory controller.
1681 * @memcg: The memory cgroup that went over limit
1682 */
1683void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
Balbir Singhe2224322009-04-02 16:57:39 -07001684{
Johannes Weinerc8713d02019-07-11 20:55:59 -07001685 char *buf;
Balbir Singhe2224322009-04-02 16:57:39 -07001686
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001687 pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n",
1688 K((u64)page_counter_read(&memcg->memory)),
Chris Down15b42562020-04-01 21:07:20 -07001689 K((u64)READ_ONCE(memcg->memory.max)), memcg->memory.failcnt);
Johannes Weinerc8713d02019-07-11 20:55:59 -07001690 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
1691 pr_info("swap: usage %llukB, limit %llukB, failcnt %lu\n",
1692 K((u64)page_counter_read(&memcg->swap)),
Chris Down32d087c2020-04-01 21:07:30 -07001693 K((u64)READ_ONCE(memcg->swap.max)), memcg->swap.failcnt);
Johannes Weinerc8713d02019-07-11 20:55:59 -07001694 else {
1695 pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n",
1696 K((u64)page_counter_read(&memcg->memsw)),
1697 K((u64)memcg->memsw.max), memcg->memsw.failcnt);
1698 pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n",
1699 K((u64)page_counter_read(&memcg->kmem)),
1700 K((u64)memcg->kmem.max), memcg->kmem.failcnt);
Sha Zhengju58cf1882013-02-22 16:32:05 -08001701 }
Johannes Weinerc8713d02019-07-11 20:55:59 -07001702
1703 pr_info("Memory cgroup stats for ");
1704 pr_cont_cgroup_path(memcg->css.cgroup);
1705 pr_cont(":");
1706 buf = memory_stat_format(memcg);
1707 if (!buf)
1708 return;
1709 pr_info("%s", buf);
1710 kfree(buf);
Balbir Singhe2224322009-04-02 16:57:39 -07001711}
1712
KAMEZAWA Hiroyuki81d39c22009-04-02 16:57:36 -07001713/*
David Rientjesa63d83f2010-08-09 17:19:46 -07001714 * Return the memory (and swap, if configured) limit for a memcg.
1715 */
Roman Gushchinbbec2e12018-06-07 17:06:18 -07001716unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
David Rientjesa63d83f2010-08-09 17:19:46 -07001717{
Waiman Long8d387a52020-10-13 16:52:52 -07001718 unsigned long max = READ_ONCE(memcg->memory.max);
David Rientjesa63d83f2010-08-09 17:19:46 -07001719
Waiman Long8d387a52020-10-13 16:52:52 -07001720 if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
1721 if (mem_cgroup_swappiness(memcg))
1722 max += min(READ_ONCE(memcg->swap.max),
1723 (unsigned long)total_swap_pages);
1724 } else { /* v1 */
1725 if (mem_cgroup_swappiness(memcg)) {
1726 /* Calculate swap excess capacity from memsw limit */
1727 unsigned long swap = READ_ONCE(memcg->memsw.max) - max;
Michal Hocko9a5a8f12012-11-16 14:14:49 -08001728
Waiman Long8d387a52020-10-13 16:52:52 -07001729 max += min(swap, (unsigned long)total_swap_pages);
1730 }
Michal Hocko9a5a8f12012-11-16 14:14:49 -08001731 }
Roman Gushchinbbec2e12018-06-07 17:06:18 -07001732 return max;
David Rientjesa63d83f2010-08-09 17:19:46 -07001733}
1734
Chris Down9783aa92019-10-06 17:58:32 -07001735unsigned long mem_cgroup_size(struct mem_cgroup *memcg)
1736{
1737 return page_counter_read(&memcg->memory);
1738}
1739
Johannes Weinerb6e6edc2016-03-17 14:20:28 -07001740static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
David Rientjes19965462012-12-11 16:00:26 -08001741 int order)
David Rientjes9cbb78b2012-07-31 16:43:44 -07001742{
David Rientjes6e0fc462015-09-08 15:00:36 -07001743 struct oom_control oc = {
1744 .zonelist = NULL,
1745 .nodemask = NULL,
Vladimir Davydov2a966b72016-07-26 15:22:33 -07001746 .memcg = memcg,
David Rientjes6e0fc462015-09-08 15:00:36 -07001747 .gfp_mask = gfp_mask,
1748 .order = order,
David Rientjes6e0fc462015-09-08 15:00:36 -07001749 };
Yafang Shao1378b372020-08-06 23:22:08 -07001750 bool ret = true;
David Rientjes9cbb78b2012-07-31 16:43:44 -07001751
Tetsuo Handa7775fac2019-03-05 15:46:47 -08001752 if (mutex_lock_killable(&oom_lock))
1753 return true;
Yafang Shao1378b372020-08-06 23:22:08 -07001754
1755 if (mem_cgroup_margin(memcg) >= (1 << order))
1756 goto unlock;
1757
Tetsuo Handa7775fac2019-03-05 15:46:47 -08001758 /*
1759 * A few threads which were not waiting at mutex_lock_killable() can
1760 * fail to bail out. Therefore, check again after holding oom_lock.
1761 */
1762 ret = should_force_charge() || out_of_memory(&oc);
Yafang Shao1378b372020-08-06 23:22:08 -07001763
1764unlock:
Johannes Weinerdc564012015-06-24 16:57:19 -07001765 mutex_unlock(&oom_lock);
Vladimir Davydov7c5f64f2016-10-07 16:57:23 -07001766 return ret;
David Rientjes9cbb78b2012-07-31 16:43:44 -07001767}
1768
Andrew Morton0608f432013-09-24 15:27:41 -07001769static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg,
Mel Gormanef8f2322016-07-28 15:46:05 -07001770 pg_data_t *pgdat,
Andrew Morton0608f432013-09-24 15:27:41 -07001771 gfp_t gfp_mask,
1772 unsigned long *total_scanned)
Balbir Singh6d61ef42009-01-07 18:08:06 -08001773{
Andrew Morton0608f432013-09-24 15:27:41 -07001774 struct mem_cgroup *victim = NULL;
1775 int total = 0;
1776 int loop = 0;
1777 unsigned long excess;
1778 unsigned long nr_scanned;
1779 struct mem_cgroup_reclaim_cookie reclaim = {
Mel Gormanef8f2322016-07-28 15:46:05 -07001780 .pgdat = pgdat,
Andrew Morton0608f432013-09-24 15:27:41 -07001781 };
Johannes Weiner9d11ea92011-03-23 16:42:21 -07001782
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001783 excess = soft_limit_excess(root_memcg);
Balbir Singh6d61ef42009-01-07 18:08:06 -08001784
Andrew Morton0608f432013-09-24 15:27:41 -07001785 while (1) {
1786 victim = mem_cgroup_iter(root_memcg, victim, &reclaim);
1787 if (!victim) {
1788 loop++;
1789 if (loop >= 2) {
1790 /*
1791 * If we have not been able to reclaim
1792 * anything, it might because there are
1793 * no reclaimable pages under this hierarchy
1794 */
1795 if (!total)
1796 break;
1797 /*
1798 * We want to do more targeted reclaim.
1799 * excess >> 2 is not to excessive so as to
1800 * reclaim too much, nor too less that we keep
1801 * coming back to reclaim from this cgroup
1802 */
1803 if (total >= (excess >> 2) ||
1804 (loop > MEM_CGROUP_MAX_RECLAIM_LOOPS))
1805 break;
1806 }
1807 continue;
1808 }
Mel Gormana9dd0a82016-07-28 15:46:02 -07001809 total += mem_cgroup_shrink_node(victim, gfp_mask, false,
Mel Gormanef8f2322016-07-28 15:46:05 -07001810 pgdat, &nr_scanned);
Andrew Morton0608f432013-09-24 15:27:41 -07001811 *total_scanned += nr_scanned;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001812 if (!soft_limit_excess(root_memcg))
Andrew Morton0608f432013-09-24 15:27:41 -07001813 break;
Balbir Singh6d61ef42009-01-07 18:08:06 -08001814 }
Andrew Morton0608f432013-09-24 15:27:41 -07001815 mem_cgroup_iter_break(root_memcg, victim);
1816 return total;
Balbir Singh6d61ef42009-01-07 18:08:06 -08001817}
1818
Johannes Weiner0056f4e2013-10-31 16:34:14 -07001819#ifdef CONFIG_LOCKDEP
1820static struct lockdep_map memcg_oom_lock_dep_map = {
1821 .name = "memcg_oom_lock",
1822};
1823#endif
1824
Johannes Weinerfb2a6fc2013-09-12 15:13:43 -07001825static DEFINE_SPINLOCK(memcg_oom_lock);
1826
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08001827/*
1828 * Check OOM-Killer is already running under our hierarchy.
1829 * If someone is running, return false.
1830 */
Johannes Weinerfb2a6fc2013-09-12 15:13:43 -07001831static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg)
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08001832{
Michal Hocko79dfdac2011-07-26 16:08:23 -07001833 struct mem_cgroup *iter, *failed = NULL;
KAMEZAWA Hiroyukia636b322009-01-07 18:08:08 -08001834
Johannes Weinerfb2a6fc2013-09-12 15:13:43 -07001835 spin_lock(&memcg_oom_lock);
1836
Johannes Weiner9f3a0d02012-01-12 17:17:48 -08001837 for_each_mem_cgroup_tree(iter, memcg) {
Johannes Weiner23751be2011-08-25 15:59:16 -07001838 if (iter->oom_lock) {
Michal Hocko79dfdac2011-07-26 16:08:23 -07001839 /*
1840 * this subtree of our hierarchy is already locked
1841 * so we cannot give a lock.
1842 */
Michal Hocko79dfdac2011-07-26 16:08:23 -07001843 failed = iter;
Johannes Weiner9f3a0d02012-01-12 17:17:48 -08001844 mem_cgroup_iter_break(memcg, iter);
1845 break;
Johannes Weiner23751be2011-08-25 15:59:16 -07001846 } else
1847 iter->oom_lock = true;
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -07001848 }
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08001849
Johannes Weinerfb2a6fc2013-09-12 15:13:43 -07001850 if (failed) {
1851 /*
1852 * OK, we failed to lock the whole subtree so we have
1853 * to clean up what we set up to the failing subtree
1854 */
1855 for_each_mem_cgroup_tree(iter, memcg) {
1856 if (iter == failed) {
1857 mem_cgroup_iter_break(memcg, iter);
1858 break;
1859 }
1860 iter->oom_lock = false;
Michal Hocko79dfdac2011-07-26 16:08:23 -07001861 }
Johannes Weiner0056f4e2013-10-31 16:34:14 -07001862 } else
1863 mutex_acquire(&memcg_oom_lock_dep_map, 0, 1, _RET_IP_);
Johannes Weinerfb2a6fc2013-09-12 15:13:43 -07001864
1865 spin_unlock(&memcg_oom_lock);
1866
1867 return !failed;
KAMEZAWA Hiroyukia636b322009-01-07 18:08:08 -08001868}
KAMEZAWA Hiroyuki0b7f5692009-04-02 16:57:38 -07001869
Johannes Weinerfb2a6fc2013-09-12 15:13:43 -07001870static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg)
KAMEZAWA Hiroyuki0b7f5692009-04-02 16:57:38 -07001871{
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -07001872 struct mem_cgroup *iter;
1873
Johannes Weinerfb2a6fc2013-09-12 15:13:43 -07001874 spin_lock(&memcg_oom_lock);
Qian Cai5facae42019-09-19 12:09:40 -04001875 mutex_release(&memcg_oom_lock_dep_map, _RET_IP_);
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001876 for_each_mem_cgroup_tree(iter, memcg)
Michal Hocko79dfdac2011-07-26 16:08:23 -07001877 iter->oom_lock = false;
Johannes Weinerfb2a6fc2013-09-12 15:13:43 -07001878 spin_unlock(&memcg_oom_lock);
Michal Hocko79dfdac2011-07-26 16:08:23 -07001879}
1880
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001881static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg)
Michal Hocko79dfdac2011-07-26 16:08:23 -07001882{
1883 struct mem_cgroup *iter;
1884
Tejun Heoc2b42d32015-06-24 16:58:23 -07001885 spin_lock(&memcg_oom_lock);
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001886 for_each_mem_cgroup_tree(iter, memcg)
Tejun Heoc2b42d32015-06-24 16:58:23 -07001887 iter->under_oom++;
1888 spin_unlock(&memcg_oom_lock);
Michal Hocko79dfdac2011-07-26 16:08:23 -07001889}
1890
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001891static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg)
Michal Hocko79dfdac2011-07-26 16:08:23 -07001892{
1893 struct mem_cgroup *iter;
1894
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08001895 /*
Miaohe Lin7a52d4d82020-10-13 16:53:05 -07001896 * Be careful about under_oom underflows becase a child memcg
1897 * could have been added after mem_cgroup_mark_under_oom.
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08001898 */
Tejun Heoc2b42d32015-06-24 16:58:23 -07001899 spin_lock(&memcg_oom_lock);
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001900 for_each_mem_cgroup_tree(iter, memcg)
Tejun Heoc2b42d32015-06-24 16:58:23 -07001901 if (iter->under_oom > 0)
1902 iter->under_oom--;
1903 spin_unlock(&memcg_oom_lock);
KAMEZAWA Hiroyuki0b7f5692009-04-02 16:57:38 -07001904}
1905
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08001906static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);
1907
KAMEZAWA Hiroyukidc98df52010-05-26 14:42:36 -07001908struct oom_wait_info {
Hugh Dickinsd79154b2012-03-21 16:34:18 -07001909 struct mem_cgroup *memcg;
Ingo Molnarac6424b2017-06-20 12:06:13 +02001910 wait_queue_entry_t wait;
KAMEZAWA Hiroyukidc98df52010-05-26 14:42:36 -07001911};
1912
Ingo Molnarac6424b2017-06-20 12:06:13 +02001913static int memcg_oom_wake_function(wait_queue_entry_t *wait,
KAMEZAWA Hiroyukidc98df52010-05-26 14:42:36 -07001914 unsigned mode, int sync, void *arg)
1915{
Hugh Dickinsd79154b2012-03-21 16:34:18 -07001916 struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg;
1917 struct mem_cgroup *oom_wait_memcg;
KAMEZAWA Hiroyukidc98df52010-05-26 14:42:36 -07001918 struct oom_wait_info *oom_wait_info;
1919
1920 oom_wait_info = container_of(wait, struct oom_wait_info, wait);
Hugh Dickinsd79154b2012-03-21 16:34:18 -07001921 oom_wait_memcg = oom_wait_info->memcg;
KAMEZAWA Hiroyukidc98df52010-05-26 14:42:36 -07001922
Johannes Weiner2314b422014-12-10 15:44:33 -08001923 if (!mem_cgroup_is_descendant(wake_memcg, oom_wait_memcg) &&
1924 !mem_cgroup_is_descendant(oom_wait_memcg, wake_memcg))
KAMEZAWA Hiroyukidc98df52010-05-26 14:42:36 -07001925 return 0;
KAMEZAWA Hiroyukidc98df52010-05-26 14:42:36 -07001926 return autoremove_wake_function(wait, mode, sync, arg);
1927}
1928
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001929static void memcg_oom_recover(struct mem_cgroup *memcg)
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07001930{
Tejun Heoc2b42d32015-06-24 16:58:23 -07001931 /*
1932 * For the following lockless ->under_oom test, the only required
1933 * guarantee is that it must see the state asserted by an OOM when
1934 * this function is called as a result of userland actions
1935 * triggered by the notification of the OOM. This is trivially
1936 * achieved by invoking mem_cgroup_mark_under_oom() before
1937 * triggering notification.
1938 */
1939 if (memcg && memcg->under_oom)
Tejun Heof4b90b702015-06-24 16:58:21 -07001940 __wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg);
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07001941}
1942
Michal Hocko29ef6802018-08-17 15:47:11 -07001943enum oom_status {
1944 OOM_SUCCESS,
1945 OOM_FAILED,
1946 OOM_ASYNC,
1947 OOM_SKIPPED
1948};
1949
1950static enum oom_status mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08001951{
Michal Hocko7056d3a2018-12-28 00:39:57 -08001952 enum oom_status ret;
1953 bool locked;
1954
Michal Hocko29ef6802018-08-17 15:47:11 -07001955 if (order > PAGE_ALLOC_COSTLY_ORDER)
1956 return OOM_SKIPPED;
1957
Roman Gushchin7a1adfd2018-10-26 15:09:48 -07001958 memcg_memory_event(memcg, MEMCG_OOM);
1959
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08001960 /*
Johannes Weiner49426422013-10-16 13:46:59 -07001961 * We are in the middle of the charge context here, so we
1962 * don't want to block when potentially sitting on a callstack
1963 * that holds all kinds of filesystem and mm locks.
1964 *
Michal Hocko29ef6802018-08-17 15:47:11 -07001965 * cgroup1 allows disabling the OOM killer and waiting for outside
1966 * handling until the charge can succeed; remember the context and put
1967 * the task to sleep at the end of the page fault when all locks are
1968 * released.
Johannes Weiner49426422013-10-16 13:46:59 -07001969 *
Michal Hocko29ef6802018-08-17 15:47:11 -07001970 * On the other hand, in-kernel OOM killer allows for an async victim
1971 * memory reclaim (oom_reaper) and that means that we are not solely
1972 * relying on the oom victim to make a forward progress and we can
1973 * invoke the oom killer here.
1974 *
1975 * Please note that mem_cgroup_out_of_memory might fail to find a
1976 * victim and then we have to bail out from the charge path.
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08001977 */
Michal Hocko29ef6802018-08-17 15:47:11 -07001978 if (memcg->oom_kill_disable) {
1979 if (!current->in_user_fault)
1980 return OOM_SKIPPED;
1981 css_get(&memcg->css);
1982 current->memcg_in_oom = memcg;
1983 current->memcg_oom_gfp_mask = mask;
1984 current->memcg_oom_order = order;
1985
1986 return OOM_ASYNC;
1987 }
1988
Michal Hocko7056d3a2018-12-28 00:39:57 -08001989 mem_cgroup_mark_under_oom(memcg);
Michal Hocko29ef6802018-08-17 15:47:11 -07001990
Michal Hocko7056d3a2018-12-28 00:39:57 -08001991 locked = mem_cgroup_oom_trylock(memcg);
1992
1993 if (locked)
1994 mem_cgroup_oom_notify(memcg);
1995
1996 mem_cgroup_unmark_under_oom(memcg);
1997 if (mem_cgroup_out_of_memory(memcg, mask, order))
1998 ret = OOM_SUCCESS;
1999 else
2000 ret = OOM_FAILED;
2001
2002 if (locked)
2003 mem_cgroup_oom_unlock(memcg);
2004
2005 return ret;
Johannes Weiner49426422013-10-16 13:46:59 -07002006}
2007
2008/**
2009 * mem_cgroup_oom_synchronize - complete memcg OOM handling
2010 * @handle: actually kill/wait or just clean up the OOM state
2011 *
2012 * This has to be called at the end of a page fault if the memcg OOM
2013 * handler was enabled.
2014 *
2015 * Memcg supports userspace OOM handling where failed allocations must
2016 * sleep on a waitqueue until the userspace task resolves the
2017 * situation. Sleeping directly in the charge context with all kinds
2018 * of locks held is not a good idea, instead we remember an OOM state
2019 * in the task and mem_cgroup_oom_synchronize() has to be called at
2020 * the end of the page fault to complete the OOM handling.
2021 *
2022 * Returns %true if an ongoing memcg OOM situation was detected and
2023 * completed, %false otherwise.
2024 */
2025bool mem_cgroup_oom_synchronize(bool handle)
2026{
Tejun Heo626ebc42015-11-05 18:46:09 -08002027 struct mem_cgroup *memcg = current->memcg_in_oom;
Johannes Weiner49426422013-10-16 13:46:59 -07002028 struct oom_wait_info owait;
2029 bool locked;
2030
2031 /* OOM is global, do not handle */
2032 if (!memcg)
2033 return false;
2034
Vladimir Davydov7c5f64f2016-10-07 16:57:23 -07002035 if (!handle)
Johannes Weiner49426422013-10-16 13:46:59 -07002036 goto cleanup;
2037
2038 owait.memcg = memcg;
2039 owait.wait.flags = 0;
2040 owait.wait.func = memcg_oom_wake_function;
2041 owait.wait.private = current;
Ingo Molnar2055da92017-06-20 12:06:46 +02002042 INIT_LIST_HEAD(&owait.wait.entry);
Johannes Weiner49426422013-10-16 13:46:59 -07002043
2044 prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
Johannes Weinerfb2a6fc2013-09-12 15:13:43 -07002045 mem_cgroup_mark_under_oom(memcg);
2046
2047 locked = mem_cgroup_oom_trylock(memcg);
2048
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07002049 if (locked)
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002050 mem_cgroup_oom_notify(memcg);
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08002051
Johannes Weinerfb2a6fc2013-09-12 15:13:43 -07002052 if (locked && !memcg->oom_kill_disable) {
2053 mem_cgroup_unmark_under_oom(memcg);
Johannes Weiner49426422013-10-16 13:46:59 -07002054 finish_wait(&memcg_oom_waitq, &owait.wait);
Tejun Heo626ebc42015-11-05 18:46:09 -08002055 mem_cgroup_out_of_memory(memcg, current->memcg_oom_gfp_mask,
2056 current->memcg_oom_order);
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07002057 } else {
Johannes Weiner3812c8c2013-09-12 15:13:44 -07002058 schedule();
Johannes Weiner49426422013-10-16 13:46:59 -07002059 mem_cgroup_unmark_under_oom(memcg);
2060 finish_wait(&memcg_oom_waitq, &owait.wait);
2061 }
2062
2063 if (locked) {
Johannes Weinerfb2a6fc2013-09-12 15:13:43 -07002064 mem_cgroup_oom_unlock(memcg);
2065 /*
2066 * There is no guarantee that an OOM-lock contender
2067 * sees the wakeups triggered by the OOM kill
2068 * uncharges. Wake any sleepers explicitely.
2069 */
2070 memcg_oom_recover(memcg);
2071 }
Johannes Weiner49426422013-10-16 13:46:59 -07002072cleanup:
Tejun Heo626ebc42015-11-05 18:46:09 -08002073 current->memcg_in_oom = NULL;
Johannes Weiner3812c8c2013-09-12 15:13:44 -07002074 css_put(&memcg->css);
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08002075 return true;
KAMEZAWA Hiroyuki0b7f5692009-04-02 16:57:38 -07002076}
2077
Johannes Weinerd7365e72014-10-29 14:50:48 -07002078/**
Roman Gushchin3d8b38e2018-08-21 21:53:54 -07002079 * mem_cgroup_get_oom_group - get a memory cgroup to clean up after OOM
2080 * @victim: task to be killed by the OOM killer
2081 * @oom_domain: memcg in case of memcg OOM, NULL in case of system-wide OOM
2082 *
2083 * Returns a pointer to a memory cgroup, which has to be cleaned up
2084 * by killing all belonging OOM-killable tasks.
2085 *
2086 * Caller has to call mem_cgroup_put() on the returned non-NULL memcg.
2087 */
2088struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim,
2089 struct mem_cgroup *oom_domain)
2090{
2091 struct mem_cgroup *oom_group = NULL;
2092 struct mem_cgroup *memcg;
2093
2094 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
2095 return NULL;
2096
2097 if (!oom_domain)
2098 oom_domain = root_mem_cgroup;
2099
2100 rcu_read_lock();
2101
2102 memcg = mem_cgroup_from_task(victim);
2103 if (memcg == root_mem_cgroup)
2104 goto out;
2105
2106 /*
Roman Gushchin48fe2672020-04-01 21:07:39 -07002107 * If the victim task has been asynchronously moved to a different
2108 * memory cgroup, we might end up killing tasks outside oom_domain.
2109 * In this case it's better to ignore memory.group.oom.
2110 */
2111 if (unlikely(!mem_cgroup_is_descendant(memcg, oom_domain)))
2112 goto out;
2113
2114 /*
Roman Gushchin3d8b38e2018-08-21 21:53:54 -07002115 * Traverse the memory cgroup hierarchy from the victim task's
2116 * cgroup up to the OOMing cgroup (or root) to find the
2117 * highest-level memory cgroup with oom.group set.
2118 */
2119 for (; memcg; memcg = parent_mem_cgroup(memcg)) {
2120 if (memcg->oom_group)
2121 oom_group = memcg;
2122
2123 if (memcg == oom_domain)
2124 break;
2125 }
2126
2127 if (oom_group)
2128 css_get(&oom_group->css);
2129out:
2130 rcu_read_unlock();
2131
2132 return oom_group;
2133}
2134
2135void mem_cgroup_print_oom_group(struct mem_cgroup *memcg)
2136{
2137 pr_info("Tasks in ");
2138 pr_cont_cgroup_path(memcg->css.cgroup);
2139 pr_cont(" are going to be killed due to memory.oom.group set\n");
2140}
2141
2142/**
Roman Gushchinbcfe06b2020-12-01 13:58:27 -08002143 * lock_page_memcg - lock a page and memcg binding
Johannes Weiner81f8c3a2016-03-15 14:57:04 -07002144 * @page: the page
KAMEZAWA Hiroyuki32047e22010-10-27 15:33:40 -07002145 *
Johannes Weiner81f8c3a2016-03-15 14:57:04 -07002146 * This function protects unlocked LRU pages from being moved to
Johannes Weiner739f79f2017-08-18 15:15:48 -07002147 * another cgroup.
2148 *
2149 * It ensures lifetime of the returned memcg. Caller is responsible
2150 * for the lifetime of the page; __unlock_page_memcg() is available
2151 * when @page might get freed inside the locked section.
Balbir Singhd69b0422009-06-17 16:26:34 -07002152 */
Johannes Weiner739f79f2017-08-18 15:15:48 -07002153struct mem_cgroup *lock_page_memcg(struct page *page)
KAMEZAWA Hiroyuki89c06bd2012-03-21 16:34:25 -07002154{
Johannes Weiner9da7b522020-06-03 16:01:51 -07002155 struct page *head = compound_head(page); /* rmap on tail pages */
KAMEZAWA Hiroyuki89c06bd2012-03-21 16:34:25 -07002156 struct mem_cgroup *memcg;
Johannes Weiner6de22612015-02-11 15:25:01 -08002157 unsigned long flags;
KAMEZAWA Hiroyuki89c06bd2012-03-21 16:34:25 -07002158
Johannes Weiner6de22612015-02-11 15:25:01 -08002159 /*
2160 * The RCU lock is held throughout the transaction. The fast
2161 * path can get away without acquiring the memcg->move_lock
2162 * because page moving starts with an RCU grace period.
Johannes Weiner739f79f2017-08-18 15:15:48 -07002163 *
2164 * The RCU lock also protects the memcg from being freed when
2165 * the page state that is going to change is the only thing
2166 * preventing the page itself from being freed. E.g. writeback
2167 * doesn't hold a page reference and relies on PG_writeback to
2168 * keep off truncation, migration and so forth.
2169 */
Johannes Weinerd7365e72014-10-29 14:50:48 -07002170 rcu_read_lock();
2171
2172 if (mem_cgroup_disabled())
Johannes Weiner739f79f2017-08-18 15:15:48 -07002173 return NULL;
KAMEZAWA Hiroyuki89c06bd2012-03-21 16:34:25 -07002174again:
Roman Gushchinbcfe06b2020-12-01 13:58:27 -08002175 memcg = page_memcg(head);
Johannes Weiner29833312014-12-10 15:44:02 -08002176 if (unlikely(!memcg))
Johannes Weiner739f79f2017-08-18 15:15:48 -07002177 return NULL;
Johannes Weinerd7365e72014-10-29 14:50:48 -07002178
Alex Shi20ad50d2020-12-15 12:33:51 -08002179#ifdef CONFIG_PROVE_LOCKING
2180 local_irq_save(flags);
2181 might_lock(&memcg->move_lock);
2182 local_irq_restore(flags);
2183#endif
2184
Qiang Huangbdcbb652014-06-04 16:08:21 -07002185 if (atomic_read(&memcg->moving_account) <= 0)
Johannes Weiner739f79f2017-08-18 15:15:48 -07002186 return memcg;
KAMEZAWA Hiroyuki89c06bd2012-03-21 16:34:25 -07002187
Johannes Weiner6de22612015-02-11 15:25:01 -08002188 spin_lock_irqsave(&memcg->move_lock, flags);
Roman Gushchinbcfe06b2020-12-01 13:58:27 -08002189 if (memcg != page_memcg(head)) {
Johannes Weiner6de22612015-02-11 15:25:01 -08002190 spin_unlock_irqrestore(&memcg->move_lock, flags);
KAMEZAWA Hiroyuki89c06bd2012-03-21 16:34:25 -07002191 goto again;
2192 }
Johannes Weiner6de22612015-02-11 15:25:01 -08002193
2194 /*
2195 * When charge migration first begins, we can have locked and
2196 * unlocked page stat updates happening concurrently. Track
Johannes Weiner81f8c3a2016-03-15 14:57:04 -07002197 * the task who has the lock for unlock_page_memcg().
Johannes Weiner6de22612015-02-11 15:25:01 -08002198 */
2199 memcg->move_lock_task = current;
2200 memcg->move_lock_flags = flags;
Johannes Weinerd7365e72014-10-29 14:50:48 -07002201
Johannes Weiner739f79f2017-08-18 15:15:48 -07002202 return memcg;
KAMEZAWA Hiroyuki89c06bd2012-03-21 16:34:25 -07002203}
Johannes Weiner81f8c3a2016-03-15 14:57:04 -07002204EXPORT_SYMBOL(lock_page_memcg);
KAMEZAWA Hiroyuki89c06bd2012-03-21 16:34:25 -07002205
Johannes Weinerd7365e72014-10-29 14:50:48 -07002206/**
Johannes Weiner739f79f2017-08-18 15:15:48 -07002207 * __unlock_page_memcg - unlock and unpin a memcg
2208 * @memcg: the memcg
2209 *
2210 * Unlock and unpin a memcg returned by lock_page_memcg().
Johannes Weinerd7365e72014-10-29 14:50:48 -07002211 */
Johannes Weiner739f79f2017-08-18 15:15:48 -07002212void __unlock_page_memcg(struct mem_cgroup *memcg)
KAMEZAWA Hiroyuki89c06bd2012-03-21 16:34:25 -07002213{
Johannes Weiner6de22612015-02-11 15:25:01 -08002214 if (memcg && memcg->move_lock_task == current) {
2215 unsigned long flags = memcg->move_lock_flags;
2216
2217 memcg->move_lock_task = NULL;
2218 memcg->move_lock_flags = 0;
2219
2220 spin_unlock_irqrestore(&memcg->move_lock, flags);
2221 }
KAMEZAWA Hiroyuki89c06bd2012-03-21 16:34:25 -07002222
Johannes Weinerd7365e72014-10-29 14:50:48 -07002223 rcu_read_unlock();
KAMEZAWA Hiroyuki89c06bd2012-03-21 16:34:25 -07002224}
Johannes Weiner739f79f2017-08-18 15:15:48 -07002225
2226/**
Roman Gushchinbcfe06b2020-12-01 13:58:27 -08002227 * unlock_page_memcg - unlock a page and memcg binding
Johannes Weiner739f79f2017-08-18 15:15:48 -07002228 * @page: the page
2229 */
2230void unlock_page_memcg(struct page *page)
2231{
Johannes Weiner9da7b522020-06-03 16:01:51 -07002232 struct page *head = compound_head(page);
2233
Roman Gushchinbcfe06b2020-12-01 13:58:27 -08002234 __unlock_page_memcg(page_memcg(head));
Johannes Weiner739f79f2017-08-18 15:15:48 -07002235}
Johannes Weiner81f8c3a2016-03-15 14:57:04 -07002236EXPORT_SYMBOL(unlock_page_memcg);
KAMEZAWA Hiroyuki89c06bd2012-03-21 16:34:25 -07002237
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002238struct memcg_stock_pcp {
2239 struct mem_cgroup *cached; /* this never be root cgroup */
Johannes Weiner11c9ea42011-03-23 16:42:34 -07002240 unsigned int nr_pages;
Roman Gushchinbf4f0592020-08-06 23:20:49 -07002241
2242#ifdef CONFIG_MEMCG_KMEM
2243 struct obj_cgroup *cached_objcg;
2244 unsigned int nr_bytes;
2245#endif
2246
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002247 struct work_struct work;
KAMEZAWA Hiroyuki26fe6162011-06-15 15:08:45 -07002248 unsigned long flags;
Kirill A. Shutemova0db00f2012-05-29 15:06:56 -07002249#define FLUSHING_CACHED_CHARGE 0
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002250};
2251static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
Michal Hocko9f50fad2011-08-09 11:56:26 +02002252static DEFINE_MUTEX(percpu_charge_mutex);
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002253
Roman Gushchinbf4f0592020-08-06 23:20:49 -07002254#ifdef CONFIG_MEMCG_KMEM
2255static void drain_obj_stock(struct memcg_stock_pcp *stock);
2256static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
2257 struct mem_cgroup *root_memcg);
2258
2259#else
2260static inline void drain_obj_stock(struct memcg_stock_pcp *stock)
2261{
2262}
2263static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
2264 struct mem_cgroup *root_memcg)
2265{
2266 return false;
2267}
2268#endif
2269
Suleiman Souhlala0956d52012-12-18 14:21:36 -08002270/**
2271 * consume_stock: Try to consume stocked charge on this cpu.
2272 * @memcg: memcg to consume from.
2273 * @nr_pages: how many pages to charge.
2274 *
2275 * The charges will only happen if @memcg matches the current cpu's memcg
2276 * stock, and at least @nr_pages are available in that stock. Failure to
2277 * service an allocation will refill the stock.
2278 *
2279 * returns true if successful, false otherwise.
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002280 */
Suleiman Souhlala0956d52012-12-18 14:21:36 -08002281static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002282{
2283 struct memcg_stock_pcp *stock;
Johannes Weinerdb2ba40c2016-09-19 14:44:36 -07002284 unsigned long flags;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002285 bool ret = false;
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002286
Johannes Weinera983b5e2018-01-31 16:16:45 -08002287 if (nr_pages > MEMCG_CHARGE_BATCH)
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002288 return ret;
Suleiman Souhlala0956d52012-12-18 14:21:36 -08002289
Johannes Weinerdb2ba40c2016-09-19 14:44:36 -07002290 local_irq_save(flags);
2291
2292 stock = this_cpu_ptr(&memcg_stock);
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002293 if (memcg == stock->cached && stock->nr_pages >= nr_pages) {
Suleiman Souhlala0956d52012-12-18 14:21:36 -08002294 stock->nr_pages -= nr_pages;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002295 ret = true;
2296 }
Johannes Weinerdb2ba40c2016-09-19 14:44:36 -07002297
2298 local_irq_restore(flags);
2299
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002300 return ret;
2301}
2302
2303/*
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002304 * Returns stocks cached in percpu and reset cached information.
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002305 */
2306static void drain_stock(struct memcg_stock_pcp *stock)
2307{
2308 struct mem_cgroup *old = stock->cached;
2309
Johannes Weiner1a3e1f42020-08-06 23:20:45 -07002310 if (!old)
2311 return;
2312
Johannes Weiner11c9ea42011-03-23 16:42:34 -07002313 if (stock->nr_pages) {
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002314 page_counter_uncharge(&old->memory, stock->nr_pages);
Johannes Weiner7941d212016-01-14 15:21:23 -08002315 if (do_memsw_account())
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002316 page_counter_uncharge(&old->memsw, stock->nr_pages);
Johannes Weiner11c9ea42011-03-23 16:42:34 -07002317 stock->nr_pages = 0;
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002318 }
Johannes Weiner1a3e1f42020-08-06 23:20:45 -07002319
2320 css_put(&old->css);
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002321 stock->cached = NULL;
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002322}
2323
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002324static void drain_local_stock(struct work_struct *dummy)
2325{
Johannes Weinerdb2ba40c2016-09-19 14:44:36 -07002326 struct memcg_stock_pcp *stock;
2327 unsigned long flags;
2328
Michal Hocko72f01842017-10-03 16:14:53 -07002329 /*
2330 * The only protection from memory hotplug vs. drain_stock races is
2331 * that we always operate on local CPU stock here with IRQ disabled
2332 */
Johannes Weinerdb2ba40c2016-09-19 14:44:36 -07002333 local_irq_save(flags);
2334
2335 stock = this_cpu_ptr(&memcg_stock);
Roman Gushchinbf4f0592020-08-06 23:20:49 -07002336 drain_obj_stock(stock);
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002337 drain_stock(stock);
KAMEZAWA Hiroyuki26fe6162011-06-15 15:08:45 -07002338 clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
Johannes Weinerdb2ba40c2016-09-19 14:44:36 -07002339
2340 local_irq_restore(flags);
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002341}
2342
2343/*
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002344 * Cache charges(val) to local per_cpu area.
Greg Thelen320cc512010-03-15 15:27:28 +01002345 * This will be consumed by consume_stock() function, later.
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002346 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002347static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002348{
Johannes Weinerdb2ba40c2016-09-19 14:44:36 -07002349 struct memcg_stock_pcp *stock;
2350 unsigned long flags;
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002351
Johannes Weinerdb2ba40c2016-09-19 14:44:36 -07002352 local_irq_save(flags);
2353
2354 stock = this_cpu_ptr(&memcg_stock);
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002355 if (stock->cached != memcg) { /* reset if necessary */
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002356 drain_stock(stock);
Johannes Weiner1a3e1f42020-08-06 23:20:45 -07002357 css_get(&memcg->css);
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002358 stock->cached = memcg;
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002359 }
Johannes Weiner11c9ea42011-03-23 16:42:34 -07002360 stock->nr_pages += nr_pages;
Johannes Weinerdb2ba40c2016-09-19 14:44:36 -07002361
Johannes Weinera983b5e2018-01-31 16:16:45 -08002362 if (stock->nr_pages > MEMCG_CHARGE_BATCH)
Roman Gushchin475d0482017-09-08 16:13:09 -07002363 drain_stock(stock);
2364
Johannes Weinerdb2ba40c2016-09-19 14:44:36 -07002365 local_irq_restore(flags);
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002366}
2367
2368/*
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002369 * Drains all per-CPU charge caches for given root_memcg resp. subtree
Johannes Weiner6d3d6aa2014-12-10 15:42:50 -08002370 * of the hierarchy under it.
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002371 */
Johannes Weiner6d3d6aa2014-12-10 15:42:50 -08002372static void drain_all_stock(struct mem_cgroup *root_memcg)
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002373{
KAMEZAWA Hiroyuki26fe6162011-06-15 15:08:45 -07002374 int cpu, curcpu;
Michal Hockod38144b2011-07-26 16:08:28 -07002375
Johannes Weiner6d3d6aa2014-12-10 15:42:50 -08002376 /* If someone's already draining, avoid adding running more workers. */
2377 if (!mutex_trylock(&percpu_charge_mutex))
2378 return;
Michal Hocko72f01842017-10-03 16:14:53 -07002379 /*
2380 * Notify other cpus that system-wide "drain" is running
2381 * We do not care about races with the cpu hotplug because cpu down
2382 * as well as workers from this path always operate on the local
2383 * per-cpu data. CPU up doesn't touch memcg_stock at all.
2384 */
Johannes Weiner5af12d02011-08-25 15:59:07 -07002385 curcpu = get_cpu();
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002386 for_each_online_cpu(cpu) {
2387 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002388 struct mem_cgroup *memcg;
Roman Gushchine1a366b2019-09-23 15:34:58 -07002389 bool flush = false;
KAMEZAWA Hiroyuki26fe6162011-06-15 15:08:45 -07002390
Roman Gushchine1a366b2019-09-23 15:34:58 -07002391 rcu_read_lock();
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002392 memcg = stock->cached;
Roman Gushchine1a366b2019-09-23 15:34:58 -07002393 if (memcg && stock->nr_pages &&
2394 mem_cgroup_is_descendant(memcg, root_memcg))
2395 flush = true;
Roman Gushchinbf4f0592020-08-06 23:20:49 -07002396 if (obj_stock_flush_required(stock, root_memcg))
2397 flush = true;
Roman Gushchine1a366b2019-09-23 15:34:58 -07002398 rcu_read_unlock();
2399
2400 if (flush &&
2401 !test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) {
Michal Hockod1a05b62011-07-26 16:08:27 -07002402 if (cpu == curcpu)
2403 drain_local_stock(&stock->work);
2404 else
2405 schedule_work_on(cpu, &stock->work);
2406 }
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002407 }
Johannes Weiner5af12d02011-08-25 15:59:07 -07002408 put_cpu();
Michal Hocko9f50fad2011-08-09 11:56:26 +02002409 mutex_unlock(&percpu_charge_mutex);
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002410}
2411
Sebastian Andrzej Siewior308167f2016-11-03 15:49:59 +01002412static int memcg_hotplug_cpu_dead(unsigned int cpu)
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002413{
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002414 struct memcg_stock_pcp *stock;
Johannes Weiner42a30032019-05-14 15:47:12 -07002415 struct mem_cgroup *memcg, *mi;
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002416
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002417 stock = &per_cpu(memcg_stock, cpu);
2418 drain_stock(stock);
Johannes Weinera983b5e2018-01-31 16:16:45 -08002419
2420 for_each_mem_cgroup(memcg) {
2421 int i;
2422
2423 for (i = 0; i < MEMCG_NR_STAT; i++) {
2424 int nid;
2425 long x;
2426
Chris Down871789d2019-05-14 15:46:57 -07002427 x = this_cpu_xchg(memcg->vmstats_percpu->stat[i], 0);
Johannes Weiner815744d2019-06-13 15:55:46 -07002428 if (x)
Johannes Weiner42a30032019-05-14 15:47:12 -07002429 for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
2430 atomic_long_add(x, &memcg->vmstats[i]);
Johannes Weinera983b5e2018-01-31 16:16:45 -08002431
2432 if (i >= NR_VM_NODE_STAT_ITEMS)
2433 continue;
2434
2435 for_each_node(nid) {
2436 struct mem_cgroup_per_node *pn;
2437
2438 pn = mem_cgroup_nodeinfo(memcg, nid);
2439 x = this_cpu_xchg(pn->lruvec_stat_cpu->count[i], 0);
Johannes Weiner815744d2019-06-13 15:55:46 -07002440 if (x)
Johannes Weiner42a30032019-05-14 15:47:12 -07002441 do {
2442 atomic_long_add(x, &pn->lruvec_stat[i]);
2443 } while ((pn = parent_nodeinfo(pn, nid)));
Johannes Weinera983b5e2018-01-31 16:16:45 -08002444 }
2445 }
2446
Johannes Weinere27be242018-04-10 16:29:45 -07002447 for (i = 0; i < NR_VM_EVENT_ITEMS; i++) {
Johannes Weinera983b5e2018-01-31 16:16:45 -08002448 long x;
2449
Chris Down871789d2019-05-14 15:46:57 -07002450 x = this_cpu_xchg(memcg->vmstats_percpu->events[i], 0);
Johannes Weiner815744d2019-06-13 15:55:46 -07002451 if (x)
Johannes Weiner42a30032019-05-14 15:47:12 -07002452 for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
2453 atomic_long_add(x, &memcg->vmevents[i]);
Johannes Weinera983b5e2018-01-31 16:16:45 -08002454 }
2455 }
2456
Sebastian Andrzej Siewior308167f2016-11-03 15:49:59 +01002457 return 0;
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002458}
2459
Chris Downb3ff9292020-08-06 23:21:54 -07002460static unsigned long reclaim_high(struct mem_cgroup *memcg,
2461 unsigned int nr_pages,
2462 gfp_t gfp_mask)
Johannes Weinerf7e1cb62016-01-14 15:21:29 -08002463{
Chris Downb3ff9292020-08-06 23:21:54 -07002464 unsigned long nr_reclaimed = 0;
2465
Johannes Weinerf7e1cb62016-01-14 15:21:29 -08002466 do {
Johannes Weinere22c6ed2020-08-06 23:22:15 -07002467 unsigned long pflags;
2468
Jakub Kicinskid1663a92020-06-01 21:49:49 -07002469 if (page_counter_read(&memcg->memory) <=
2470 READ_ONCE(memcg->memory.high))
Johannes Weinerf7e1cb62016-01-14 15:21:29 -08002471 continue;
Johannes Weinere22c6ed2020-08-06 23:22:15 -07002472
Johannes Weinere27be242018-04-10 16:29:45 -07002473 memcg_memory_event(memcg, MEMCG_HIGH);
Johannes Weinere22c6ed2020-08-06 23:22:15 -07002474
2475 psi_memstall_enter(&pflags);
Chris Downb3ff9292020-08-06 23:21:54 -07002476 nr_reclaimed += try_to_free_mem_cgroup_pages(memcg, nr_pages,
2477 gfp_mask, true);
Johannes Weinere22c6ed2020-08-06 23:22:15 -07002478 psi_memstall_leave(&pflags);
Chris Down4bf17302020-04-06 20:03:30 -07002479 } while ((memcg = parent_mem_cgroup(memcg)) &&
2480 !mem_cgroup_is_root(memcg));
Chris Downb3ff9292020-08-06 23:21:54 -07002481
2482 return nr_reclaimed;
Johannes Weinerf7e1cb62016-01-14 15:21:29 -08002483}
2484
2485static void high_work_func(struct work_struct *work)
2486{
2487 struct mem_cgroup *memcg;
2488
2489 memcg = container_of(work, struct mem_cgroup, high_work);
Johannes Weinera983b5e2018-01-31 16:16:45 -08002490 reclaim_high(memcg, MEMCG_CHARGE_BATCH, GFP_KERNEL);
Johannes Weinerf7e1cb62016-01-14 15:21:29 -08002491}
2492
Tejun Heob23afb92015-11-05 18:46:11 -08002493/*
Chris Down0e4b01d2019-09-23 15:34:55 -07002494 * Clamp the maximum sleep time per allocation batch to 2 seconds. This is
2495 * enough to still cause a significant slowdown in most cases, while still
2496 * allowing diagnostics and tracing to proceed without becoming stuck.
2497 */
2498#define MEMCG_MAX_HIGH_DELAY_JIFFIES (2UL*HZ)
2499
2500/*
2501 * When calculating the delay, we use these either side of the exponentiation to
2502 * maintain precision and scale to a reasonable number of jiffies (see the table
2503 * below.
2504 *
2505 * - MEMCG_DELAY_PRECISION_SHIFT: Extra precision bits while translating the
2506 * overage ratio to a delay.
Randy Dunlapac5ddd02020-08-11 18:33:02 -07002507 * - MEMCG_DELAY_SCALING_SHIFT: The number of bits to scale down the
Chris Down0e4b01d2019-09-23 15:34:55 -07002508 * proposed penalty in order to reduce to a reasonable number of jiffies, and
2509 * to produce a reasonable delay curve.
2510 *
2511 * MEMCG_DELAY_SCALING_SHIFT just happens to be a number that produces a
2512 * reasonable delay curve compared to precision-adjusted overage, not
2513 * penalising heavily at first, but still making sure that growth beyond the
2514 * limit penalises misbehaviour cgroups by slowing them down exponentially. For
2515 * example, with a high of 100 megabytes:
2516 *
2517 * +-------+------------------------+
2518 * | usage | time to allocate in ms |
2519 * +-------+------------------------+
2520 * | 100M | 0 |
2521 * | 101M | 6 |
2522 * | 102M | 25 |
2523 * | 103M | 57 |
2524 * | 104M | 102 |
2525 * | 105M | 159 |
2526 * | 106M | 230 |
2527 * | 107M | 313 |
2528 * | 108M | 409 |
2529 * | 109M | 518 |
2530 * | 110M | 639 |
2531 * | 111M | 774 |
2532 * | 112M | 921 |
2533 * | 113M | 1081 |
2534 * | 114M | 1254 |
2535 * | 115M | 1439 |
2536 * | 116M | 1638 |
2537 * | 117M | 1849 |
2538 * | 118M | 2000 |
2539 * | 119M | 2000 |
2540 * | 120M | 2000 |
2541 * +-------+------------------------+
2542 */
2543 #define MEMCG_DELAY_PRECISION_SHIFT 20
2544 #define MEMCG_DELAY_SCALING_SHIFT 14
2545
Jakub Kicinski8a5dbc62020-06-01 21:49:42 -07002546static u64 calculate_overage(unsigned long usage, unsigned long high)
2547{
2548 u64 overage;
2549
2550 if (usage <= high)
2551 return 0;
2552
2553 /*
2554 * Prevent division by 0 in overage calculation by acting as if
2555 * it was a threshold of 1 page
2556 */
2557 high = max(high, 1UL);
2558
2559 overage = usage - high;
2560 overage <<= MEMCG_DELAY_PRECISION_SHIFT;
2561 return div64_u64(overage, high);
2562}
2563
2564static u64 mem_find_max_overage(struct mem_cgroup *memcg)
2565{
2566 u64 overage, max_overage = 0;
2567
2568 do {
2569 overage = calculate_overage(page_counter_read(&memcg->memory),
Jakub Kicinskid1663a92020-06-01 21:49:49 -07002570 READ_ONCE(memcg->memory.high));
Jakub Kicinski8a5dbc62020-06-01 21:49:42 -07002571 max_overage = max(overage, max_overage);
2572 } while ((memcg = parent_mem_cgroup(memcg)) &&
2573 !mem_cgroup_is_root(memcg));
2574
2575 return max_overage;
2576}
2577
Jakub Kicinski4b82ab42020-06-01 21:49:52 -07002578static u64 swap_find_max_overage(struct mem_cgroup *memcg)
2579{
2580 u64 overage, max_overage = 0;
2581
2582 do {
2583 overage = calculate_overage(page_counter_read(&memcg->swap),
2584 READ_ONCE(memcg->swap.high));
2585 if (overage)
2586 memcg_memory_event(memcg, MEMCG_SWAP_HIGH);
2587 max_overage = max(overage, max_overage);
2588 } while ((memcg = parent_mem_cgroup(memcg)) &&
2589 !mem_cgroup_is_root(memcg));
2590
2591 return max_overage;
2592}
2593
Chris Down0e4b01d2019-09-23 15:34:55 -07002594/*
Chris Downe26733e2020-03-21 18:22:23 -07002595 * Get the number of jiffies that we should penalise a mischievous cgroup which
2596 * is exceeding its memory.high by checking both it and its ancestors.
Tejun Heob23afb92015-11-05 18:46:11 -08002597 */
Chris Downe26733e2020-03-21 18:22:23 -07002598static unsigned long calculate_high_delay(struct mem_cgroup *memcg,
Jakub Kicinski8a5dbc62020-06-01 21:49:42 -07002599 unsigned int nr_pages,
2600 u64 max_overage)
Tejun Heob23afb92015-11-05 18:46:11 -08002601{
Chris Downe26733e2020-03-21 18:22:23 -07002602 unsigned long penalty_jiffies;
Chris Downe26733e2020-03-21 18:22:23 -07002603
2604 if (!max_overage)
2605 return 0;
Chris Down0e4b01d2019-09-23 15:34:55 -07002606
2607 /*
Chris Down0e4b01d2019-09-23 15:34:55 -07002608 * We use overage compared to memory.high to calculate the number of
2609 * jiffies to sleep (penalty_jiffies). Ideally this value should be
2610 * fairly lenient on small overages, and increasingly harsh when the
2611 * memcg in question makes it clear that it has no intention of stopping
2612 * its crazy behaviour, so we exponentially increase the delay based on
2613 * overage amount.
2614 */
Chris Downe26733e2020-03-21 18:22:23 -07002615 penalty_jiffies = max_overage * max_overage * HZ;
2616 penalty_jiffies >>= MEMCG_DELAY_PRECISION_SHIFT;
2617 penalty_jiffies >>= MEMCG_DELAY_SCALING_SHIFT;
Chris Down0e4b01d2019-09-23 15:34:55 -07002618
2619 /*
2620 * Factor in the task's own contribution to the overage, such that four
2621 * N-sized allocations are throttled approximately the same as one
2622 * 4N-sized allocation.
2623 *
2624 * MEMCG_CHARGE_BATCH pages is nominal, so work out how much smaller or
2625 * larger the current charge patch is than that.
2626 */
Jakub Kicinskiff144e62020-06-01 21:49:45 -07002627 return penalty_jiffies * nr_pages / MEMCG_CHARGE_BATCH;
Chris Downe26733e2020-03-21 18:22:23 -07002628}
2629
2630/*
2631 * Scheduled by try_charge() to be executed from the userland return path
2632 * and reclaims memory over the high limit.
2633 */
2634void mem_cgroup_handle_over_high(void)
2635{
2636 unsigned long penalty_jiffies;
2637 unsigned long pflags;
Chris Downb3ff9292020-08-06 23:21:54 -07002638 unsigned long nr_reclaimed;
Chris Downe26733e2020-03-21 18:22:23 -07002639 unsigned int nr_pages = current->memcg_nr_pages_over_high;
Chris Downd977aa92020-08-06 23:21:58 -07002640 int nr_retries = MAX_RECLAIM_RETRIES;
Chris Downe26733e2020-03-21 18:22:23 -07002641 struct mem_cgroup *memcg;
Chris Downb3ff9292020-08-06 23:21:54 -07002642 bool in_retry = false;
Chris Downe26733e2020-03-21 18:22:23 -07002643
2644 if (likely(!nr_pages))
2645 return;
2646
2647 memcg = get_mem_cgroup_from_mm(current->mm);
Chris Downe26733e2020-03-21 18:22:23 -07002648 current->memcg_nr_pages_over_high = 0;
2649
Chris Downb3ff9292020-08-06 23:21:54 -07002650retry_reclaim:
2651 /*
2652 * The allocating task should reclaim at least the batch size, but for
2653 * subsequent retries we only want to do what's necessary to prevent oom
2654 * or breaching resource isolation.
2655 *
2656 * This is distinct from memory.max or page allocator behaviour because
2657 * memory.high is currently batched, whereas memory.max and the page
2658 * allocator run every time an allocation is made.
2659 */
2660 nr_reclaimed = reclaim_high(memcg,
2661 in_retry ? SWAP_CLUSTER_MAX : nr_pages,
2662 GFP_KERNEL);
2663
Chris Downe26733e2020-03-21 18:22:23 -07002664 /*
2665 * memory.high is breached and reclaim is unable to keep up. Throttle
2666 * allocators proactively to slow down excessive growth.
2667 */
Jakub Kicinski8a5dbc62020-06-01 21:49:42 -07002668 penalty_jiffies = calculate_high_delay(memcg, nr_pages,
2669 mem_find_max_overage(memcg));
Chris Down0e4b01d2019-09-23 15:34:55 -07002670
Jakub Kicinski4b82ab42020-06-01 21:49:52 -07002671 penalty_jiffies += calculate_high_delay(memcg, nr_pages,
2672 swap_find_max_overage(memcg));
2673
Chris Down0e4b01d2019-09-23 15:34:55 -07002674 /*
Jakub Kicinskiff144e62020-06-01 21:49:45 -07002675 * Clamp the max delay per usermode return so as to still keep the
2676 * application moving forwards and also permit diagnostics, albeit
2677 * extremely slowly.
2678 */
2679 penalty_jiffies = min(penalty_jiffies, MEMCG_MAX_HIGH_DELAY_JIFFIES);
2680
2681 /*
Chris Down0e4b01d2019-09-23 15:34:55 -07002682 * Don't sleep if the amount of jiffies this memcg owes us is so low
2683 * that it's not even worth doing, in an attempt to be nice to those who
2684 * go only a small amount over their memory.high value and maybe haven't
2685 * been aggressively reclaimed enough yet.
2686 */
2687 if (penalty_jiffies <= HZ / 100)
2688 goto out;
2689
2690 /*
Chris Downb3ff9292020-08-06 23:21:54 -07002691 * If reclaim is making forward progress but we're still over
2692 * memory.high, we want to encourage that rather than doing allocator
2693 * throttling.
2694 */
2695 if (nr_reclaimed || nr_retries--) {
2696 in_retry = true;
2697 goto retry_reclaim;
2698 }
2699
2700 /*
Chris Down0e4b01d2019-09-23 15:34:55 -07002701 * If we exit early, we're guaranteed to die (since
2702 * schedule_timeout_killable sets TASK_KILLABLE). This means we don't
2703 * need to account for any ill-begotten jiffies to pay them off later.
2704 */
2705 psi_memstall_enter(&pflags);
2706 schedule_timeout_killable(penalty_jiffies);
2707 psi_memstall_leave(&pflags);
2708
2709out:
2710 css_put(&memcg->css);
Tejun Heob23afb92015-11-05 18:46:11 -08002711}
2712
Johannes Weiner00501b52014-08-08 14:19:20 -07002713static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
2714 unsigned int nr_pages)
Balbir Singh8a9f3cc2008-02-07 00:13:53 -08002715{
Johannes Weinera983b5e2018-01-31 16:16:45 -08002716 unsigned int batch = max(MEMCG_CHARGE_BATCH, nr_pages);
Chris Downd977aa92020-08-06 23:21:58 -07002717 int nr_retries = MAX_RECLAIM_RETRIES;
Johannes Weiner6539cc02014-08-06 16:05:42 -07002718 struct mem_cgroup *mem_over_limit;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002719 struct page_counter *counter;
Johannes Weinere22c6ed2020-08-06 23:22:15 -07002720 enum oom_status oom_status;
Johannes Weiner6539cc02014-08-06 16:05:42 -07002721 unsigned long nr_reclaimed;
Johannes Weinerb70a2a22014-10-09 15:28:56 -07002722 bool may_swap = true;
2723 bool drained = false;
Johannes Weinere22c6ed2020-08-06 23:22:15 -07002724 unsigned long pflags;
KAMEZAWA Hiroyukia636b322009-01-07 18:08:08 -08002725
Johannes Weinerce00a962014-09-05 08:43:57 -04002726 if (mem_cgroup_is_root(memcg))
Tejun Heo10d53c72015-11-05 18:46:17 -08002727 return 0;
Johannes Weiner6539cc02014-08-06 16:05:42 -07002728retry:
Michal Hockob6b6cc72014-04-07 15:37:44 -07002729 if (consume_stock(memcg, nr_pages))
Tejun Heo10d53c72015-11-05 18:46:17 -08002730 return 0;
Balbir Singh8a9f3cc2008-02-07 00:13:53 -08002731
Johannes Weiner7941d212016-01-14 15:21:23 -08002732 if (!do_memsw_account() ||
Johannes Weiner6071ca52015-11-05 18:50:26 -08002733 page_counter_try_charge(&memcg->memsw, batch, &counter)) {
2734 if (page_counter_try_charge(&memcg->memory, batch, &counter))
Johannes Weiner6539cc02014-08-06 16:05:42 -07002735 goto done_restock;
Johannes Weiner7941d212016-01-14 15:21:23 -08002736 if (do_memsw_account())
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002737 page_counter_uncharge(&memcg->memsw, batch);
2738 mem_over_limit = mem_cgroup_from_counter(counter, memory);
Johannes Weiner3fbe7242014-10-09 15:28:54 -07002739 } else {
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002740 mem_over_limit = mem_cgroup_from_counter(counter, memsw);
Johannes Weinerb70a2a22014-10-09 15:28:56 -07002741 may_swap = false;
Johannes Weiner3fbe7242014-10-09 15:28:54 -07002742 }
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -08002743
Johannes Weiner6539cc02014-08-06 16:05:42 -07002744 if (batch > nr_pages) {
2745 batch = nr_pages;
2746 goto retry;
2747 }
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002748
Johannes Weiner06b078f2014-08-06 16:05:44 -07002749 /*
Johannes Weiner869712f2019-11-05 21:17:13 -08002750 * Memcg doesn't have a dedicated reserve for atomic
2751 * allocations. But like the global atomic pool, we need to
2752 * put the burden of reclaim on regular allocation requests
2753 * and let these go through as privileged allocations.
2754 */
2755 if (gfp_mask & __GFP_ATOMIC)
2756 goto force;
2757
2758 /*
Johannes Weiner06b078f2014-08-06 16:05:44 -07002759 * Unlike in global OOM situations, memcg is not in a physical
2760 * memory shortage. Allow dying and OOM-killed tasks to
2761 * bypass the last charges so that they can exit quickly and
2762 * free their memory.
2763 */
Tetsuo Handa7775fac2019-03-05 15:46:47 -08002764 if (unlikely(should_force_charge()))
Tejun Heo10d53c72015-11-05 18:46:17 -08002765 goto force;
Johannes Weiner06b078f2014-08-06 16:05:44 -07002766
Johannes Weiner89a28482016-10-27 17:46:56 -07002767 /*
2768 * Prevent unbounded recursion when reclaim operations need to
2769 * allocate memory. This might exceed the limits temporarily,
2770 * but we prefer facilitating memory reclaim and getting back
2771 * under the limit over triggering OOM kills in these cases.
2772 */
2773 if (unlikely(current->flags & PF_MEMALLOC))
2774 goto force;
2775
Johannes Weiner06b078f2014-08-06 16:05:44 -07002776 if (unlikely(task_in_memcg_oom(current)))
2777 goto nomem;
2778
Mel Gormand0164ad2015-11-06 16:28:21 -08002779 if (!gfpflags_allow_blocking(gfp_mask))
Johannes Weiner6539cc02014-08-06 16:05:42 -07002780 goto nomem;
KAMEZAWA Hiroyuki4b534332010-08-10 18:02:57 -07002781
Johannes Weinere27be242018-04-10 16:29:45 -07002782 memcg_memory_event(mem_over_limit, MEMCG_MAX);
Johannes Weiner241994ed2015-02-11 15:26:06 -08002783
Johannes Weinere22c6ed2020-08-06 23:22:15 -07002784 psi_memstall_enter(&pflags);
Johannes Weinerb70a2a22014-10-09 15:28:56 -07002785 nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages,
2786 gfp_mask, may_swap);
Johannes Weinere22c6ed2020-08-06 23:22:15 -07002787 psi_memstall_leave(&pflags);
Johannes Weiner6539cc02014-08-06 16:05:42 -07002788
Johannes Weiner61e02c72014-08-06 16:08:16 -07002789 if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
Johannes Weiner6539cc02014-08-06 16:05:42 -07002790 goto retry;
Johannes Weiner28c34c22014-08-06 16:05:47 -07002791
Johannes Weinerb70a2a22014-10-09 15:28:56 -07002792 if (!drained) {
Johannes Weiner6d3d6aa2014-12-10 15:42:50 -08002793 drain_all_stock(mem_over_limit);
Johannes Weinerb70a2a22014-10-09 15:28:56 -07002794 drained = true;
2795 goto retry;
2796 }
2797
Johannes Weiner28c34c22014-08-06 16:05:47 -07002798 if (gfp_mask & __GFP_NORETRY)
2799 goto nomem;
Johannes Weiner6539cc02014-08-06 16:05:42 -07002800 /*
2801 * Even though the limit is exceeded at this point, reclaim
2802 * may have been able to free some pages. Retry the charge
2803 * before killing the task.
2804 *
2805 * Only for regular pages, though: huge pages are rather
2806 * unlikely to succeed so close to the limit, and we fall back
2807 * to regular pages anyway in case of failure.
2808 */
Johannes Weiner61e02c72014-08-06 16:08:16 -07002809 if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER))
Johannes Weiner6539cc02014-08-06 16:05:42 -07002810 goto retry;
2811 /*
2812 * At task move, charge accounts can be doubly counted. So, it's
2813 * better to wait until the end of task_move if something is going on.
2814 */
2815 if (mem_cgroup_wait_acct_move(mem_over_limit))
2816 goto retry;
2817
Johannes Weiner9b130612014-08-06 16:05:51 -07002818 if (nr_retries--)
2819 goto retry;
2820
Shakeel Butt38d38492019-07-11 20:55:48 -07002821 if (gfp_mask & __GFP_RETRY_MAYFAIL)
Michal Hocko29ef6802018-08-17 15:47:11 -07002822 goto nomem;
2823
Johannes Weiner06b078f2014-08-06 16:05:44 -07002824 if (gfp_mask & __GFP_NOFAIL)
Tejun Heo10d53c72015-11-05 18:46:17 -08002825 goto force;
Johannes Weiner06b078f2014-08-06 16:05:44 -07002826
Johannes Weiner6539cc02014-08-06 16:05:42 -07002827 if (fatal_signal_pending(current))
Tejun Heo10d53c72015-11-05 18:46:17 -08002828 goto force;
Johannes Weiner6539cc02014-08-06 16:05:42 -07002829
Michal Hocko29ef6802018-08-17 15:47:11 -07002830 /*
2831 * keep retrying as long as the memcg oom killer is able to make
2832 * a forward progress or bypass the charge if the oom killer
2833 * couldn't make any progress.
2834 */
2835 oom_status = mem_cgroup_oom(mem_over_limit, gfp_mask,
Jerome Marchand3608de02015-11-05 18:47:29 -08002836 get_order(nr_pages * PAGE_SIZE));
Michal Hocko29ef6802018-08-17 15:47:11 -07002837 switch (oom_status) {
2838 case OOM_SUCCESS:
Chris Downd977aa92020-08-06 23:21:58 -07002839 nr_retries = MAX_RECLAIM_RETRIES;
Michal Hocko29ef6802018-08-17 15:47:11 -07002840 goto retry;
2841 case OOM_FAILED:
2842 goto force;
2843 default:
2844 goto nomem;
2845 }
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -08002846nomem:
Johannes Weiner6d1fdc42014-04-07 15:37:45 -07002847 if (!(gfp_mask & __GFP_NOFAIL))
Johannes Weiner3168ecb2013-10-31 16:34:13 -07002848 return -ENOMEM;
Tejun Heo10d53c72015-11-05 18:46:17 -08002849force:
2850 /*
2851 * The allocation either can't fail or will lead to more memory
2852 * being freed very soon. Allow memory usage go over the limit
2853 * temporarily by force charging it.
2854 */
2855 page_counter_charge(&memcg->memory, nr_pages);
Johannes Weiner7941d212016-01-14 15:21:23 -08002856 if (do_memsw_account())
Tejun Heo10d53c72015-11-05 18:46:17 -08002857 page_counter_charge(&memcg->memsw, nr_pages);
Tejun Heo10d53c72015-11-05 18:46:17 -08002858
2859 return 0;
Johannes Weiner6539cc02014-08-06 16:05:42 -07002860
2861done_restock:
2862 if (batch > nr_pages)
2863 refill_stock(memcg, batch - nr_pages);
Tejun Heob23afb92015-11-05 18:46:11 -08002864
Johannes Weiner241994ed2015-02-11 15:26:06 -08002865 /*
Tejun Heob23afb92015-11-05 18:46:11 -08002866 * If the hierarchy is above the normal consumption range, schedule
2867 * reclaim on returning to userland. We can perform reclaim here
Mel Gorman71baba42015-11-06 16:28:28 -08002868 * if __GFP_RECLAIM but let's always punt for simplicity and so that
Tejun Heob23afb92015-11-05 18:46:11 -08002869 * GFP_KERNEL can consistently be used during reclaim. @memcg is
2870 * not recorded as it most likely matches current's and won't
2871 * change in the meantime. As high limit is checked again before
2872 * reclaim, the cost of mismatch is negligible.
Johannes Weiner241994ed2015-02-11 15:26:06 -08002873 */
2874 do {
Jakub Kicinski4b82ab42020-06-01 21:49:52 -07002875 bool mem_high, swap_high;
2876
2877 mem_high = page_counter_read(&memcg->memory) >
2878 READ_ONCE(memcg->memory.high);
2879 swap_high = page_counter_read(&memcg->swap) >
2880 READ_ONCE(memcg->swap.high);
2881
2882 /* Don't bother a random interrupted task */
2883 if (in_interrupt()) {
2884 if (mem_high) {
Johannes Weinerf7e1cb62016-01-14 15:21:29 -08002885 schedule_work(&memcg->high_work);
2886 break;
2887 }
Jakub Kicinski4b82ab42020-06-01 21:49:52 -07002888 continue;
2889 }
2890
2891 if (mem_high || swap_high) {
2892 /*
2893 * The allocating tasks in this cgroup will need to do
2894 * reclaim or be throttled to prevent further growth
2895 * of the memory or swap footprints.
2896 *
2897 * Target some best-effort fairness between the tasks,
2898 * and distribute reclaim work and delay penalties
2899 * based on how much each task is actually allocating.
2900 */
Vladimir Davydov9516a182015-12-11 13:40:24 -08002901 current->memcg_nr_pages_over_high += batch;
Tejun Heob23afb92015-11-05 18:46:11 -08002902 set_notify_resume(current);
2903 break;
2904 }
Johannes Weiner241994ed2015-02-11 15:26:06 -08002905 } while ((memcg = parent_mem_cgroup(memcg)));
Tejun Heo10d53c72015-11-05 18:46:17 -08002906
2907 return 0;
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -08002908}
Balbir Singh8a9f3cc2008-02-07 00:13:53 -08002909
Johannes Weinerf0e45fb2020-06-03 16:02:07 -07002910#if defined(CONFIG_MEMCG_KMEM) || defined(CONFIG_MMU)
Johannes Weiner00501b52014-08-08 14:19:20 -07002911static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages)
Daisuke Nishimuraa3032a22009-12-15 16:47:10 -08002912{
Johannes Weinerce00a962014-09-05 08:43:57 -04002913 if (mem_cgroup_is_root(memcg))
2914 return;
2915
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002916 page_counter_uncharge(&memcg->memory, nr_pages);
Johannes Weiner7941d212016-01-14 15:21:23 -08002917 if (do_memsw_account())
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002918 page_counter_uncharge(&memcg->memsw, nr_pages);
KAMEZAWA Hiroyukid01dd172012-05-29 15:07:03 -07002919}
Johannes Weinerf0e45fb2020-06-03 16:02:07 -07002920#endif
KAMEZAWA Hiroyukid01dd172012-05-29 15:07:03 -07002921
Johannes Weinerd9eb1ea2020-06-03 16:02:24 -07002922static void commit_charge(struct page *page, struct mem_cgroup *memcg)
Johannes Weiner0a31bc92014-08-08 14:19:22 -07002923{
Roman Gushchinbcfe06b2020-12-01 13:58:27 -08002924 VM_BUG_ON_PAGE(page_memcg(page), page);
Johannes Weiner0a31bc92014-08-08 14:19:22 -07002925 /*
Alex Shia5eb0112020-12-14 19:06:42 -08002926 * Any of the following ensures page's memcg stability:
Johannes Weiner0a31bc92014-08-08 14:19:22 -07002927 *
Johannes Weinera0b5b412020-06-03 16:02:27 -07002928 * - the page lock
2929 * - LRU isolation
2930 * - lock_page_memcg()
2931 * - exclusive reference
Johannes Weiner0a31bc92014-08-08 14:19:22 -07002932 */
Roman Gushchinbcfe06b2020-12-01 13:58:27 -08002933 page->memcg_data = (unsigned long)memcg;
Balbir Singh8a9f3cc2008-02-07 00:13:53 -08002934}
2935
Kirill Tkhai84c07d12018-08-17 15:47:25 -07002936#ifdef CONFIG_MEMCG_KMEM
Roman Gushchin10befea2020-08-06 23:21:27 -07002937int memcg_alloc_page_obj_cgroups(struct page *page, struct kmem_cache *s,
Roman Gushchin2e9bd482021-02-24 12:03:11 -08002938 gfp_t gfp, bool new_page)
Roman Gushchin10befea2020-08-06 23:21:27 -07002939{
2940 unsigned int objects = objs_per_slab_page(s, page);
Roman Gushchin2e9bd482021-02-24 12:03:11 -08002941 unsigned long memcg_data;
Roman Gushchin10befea2020-08-06 23:21:27 -07002942 void *vec;
2943
2944 vec = kcalloc_node(objects, sizeof(struct obj_cgroup *), gfp,
2945 page_to_nid(page));
2946 if (!vec)
2947 return -ENOMEM;
2948
Roman Gushchin2e9bd482021-02-24 12:03:11 -08002949 memcg_data = (unsigned long) vec | MEMCG_DATA_OBJCGS;
2950 if (new_page) {
2951 /*
2952 * If the slab page is brand new and nobody can yet access
2953 * it's memcg_data, no synchronization is required and
2954 * memcg_data can be simply assigned.
2955 */
2956 page->memcg_data = memcg_data;
2957 } else if (cmpxchg(&page->memcg_data, 0, memcg_data)) {
2958 /*
2959 * If the slab page is already in use, somebody can allocate
2960 * and assign obj_cgroups in parallel. In this case the existing
2961 * objcg vector should be reused.
2962 */
Roman Gushchin10befea2020-08-06 23:21:27 -07002963 kfree(vec);
Roman Gushchin2e9bd482021-02-24 12:03:11 -08002964 return 0;
2965 }
Roman Gushchin10befea2020-08-06 23:21:27 -07002966
Roman Gushchin2e9bd482021-02-24 12:03:11 -08002967 kmemleak_not_leak(vec);
Roman Gushchin10befea2020-08-06 23:21:27 -07002968 return 0;
2969}
2970
Roman Gushchin8380ce42020-03-28 19:17:25 -07002971/*
2972 * Returns a pointer to the memory cgroup to which the kernel object is charged.
2973 *
Roman Gushchinbcfe06b2020-12-01 13:58:27 -08002974 * A passed kernel object can be a slab object or a generic kernel page, so
2975 * different mechanisms for getting the memory cgroup pointer should be used.
2976 * In certain cases (e.g. kernel stacks or large kmallocs with SLUB) the caller
2977 * can not know for sure how the kernel object is implemented.
2978 * mem_cgroup_from_obj() can be safely used in such cases.
2979 *
Roman Gushchin8380ce42020-03-28 19:17:25 -07002980 * The caller must ensure the memcg lifetime, e.g. by taking rcu_read_lock(),
2981 * cgroup_mutex, etc.
2982 */
2983struct mem_cgroup *mem_cgroup_from_obj(void *p)
2984{
2985 struct page *page;
2986
2987 if (mem_cgroup_disabled())
2988 return NULL;
2989
2990 page = virt_to_head_page(p);
2991
2992 /*
Roman Gushchin98556092020-08-06 23:21:10 -07002993 * Slab objects are accounted individually, not per-page.
2994 * Memcg membership data for each individual object is saved in
2995 * the page->obj_cgroups.
Roman Gushchin8380ce42020-03-28 19:17:25 -07002996 */
Roman Gushchin270c6a72020-12-01 13:58:28 -08002997 if (page_objcgs_check(page)) {
Roman Gushchin98556092020-08-06 23:21:10 -07002998 struct obj_cgroup *objcg;
2999 unsigned int off;
3000
3001 off = obj_to_index(page->slab_cache, page, p);
Roman Gushchin270c6a72020-12-01 13:58:28 -08003002 objcg = page_objcgs(page)[off];
Roman Gushchin10befea2020-08-06 23:21:27 -07003003 if (objcg)
3004 return obj_cgroup_memcg(objcg);
3005
3006 return NULL;
Roman Gushchin98556092020-08-06 23:21:10 -07003007 }
Roman Gushchin8380ce42020-03-28 19:17:25 -07003008
Roman Gushchinbcfe06b2020-12-01 13:58:27 -08003009 /*
3010 * page_memcg_check() is used here, because page_has_obj_cgroups()
3011 * check above could fail because the object cgroups vector wasn't set
3012 * at that moment, but it can be set concurrently.
3013 * page_memcg_check(page) will guarantee that a proper memory
3014 * cgroup pointer or NULL will be returned.
3015 */
3016 return page_memcg_check(page);
Roman Gushchin8380ce42020-03-28 19:17:25 -07003017}
3018
Roman Gushchinbf4f0592020-08-06 23:20:49 -07003019__always_inline struct obj_cgroup *get_obj_cgroup_from_current(void)
3020{
3021 struct obj_cgroup *objcg = NULL;
3022 struct mem_cgroup *memcg;
3023
Roman Gushchin279c3392020-10-17 16:13:44 -07003024 if (memcg_kmem_bypass())
3025 return NULL;
3026
Roman Gushchinbf4f0592020-08-06 23:20:49 -07003027 rcu_read_lock();
Roman Gushchin37d59852020-10-17 16:13:50 -07003028 if (unlikely(active_memcg()))
3029 memcg = active_memcg();
Roman Gushchinbf4f0592020-08-06 23:20:49 -07003030 else
3031 memcg = mem_cgroup_from_task(current);
3032
3033 for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg)) {
3034 objcg = rcu_dereference(memcg->objcg);
3035 if (objcg && obj_cgroup_tryget(objcg))
3036 break;
Muchun Song2f7659a2020-12-14 19:06:31 -08003037 objcg = NULL;
Roman Gushchinbf4f0592020-08-06 23:20:49 -07003038 }
3039 rcu_read_unlock();
3040
3041 return objcg;
3042}
3043
Vladimir Davydovf3bb3042014-10-09 15:28:45 -07003044static int memcg_alloc_cache_id(void)
Glauber Costa55007d82012-12-18 14:22:38 -08003045{
Vladimir Davydovf3bb3042014-10-09 15:28:45 -07003046 int id, size;
3047 int err;
Glauber Costa55007d82012-12-18 14:22:38 -08003048
Vladimir Davydovdbcf73e2015-02-12 14:58:57 -08003049 id = ida_simple_get(&memcg_cache_ida,
Vladimir Davydovf3bb3042014-10-09 15:28:45 -07003050 0, MEMCG_CACHES_MAX_SIZE, GFP_KERNEL);
3051 if (id < 0)
3052 return id;
3053
Vladimir Davydovdbcf73e2015-02-12 14:58:57 -08003054 if (id < memcg_nr_cache_ids)
Vladimir Davydovf3bb3042014-10-09 15:28:45 -07003055 return id;
3056
3057 /*
3058 * There's no space for the new id in memcg_caches arrays,
3059 * so we have to grow them.
3060 */
Vladimir Davydov05257a12015-02-12 14:59:01 -08003061 down_write(&memcg_cache_ids_sem);
Vladimir Davydovf3bb3042014-10-09 15:28:45 -07003062
3063 size = 2 * (id + 1);
Glauber Costa55007d82012-12-18 14:22:38 -08003064 if (size < MEMCG_CACHES_MIN_SIZE)
3065 size = MEMCG_CACHES_MIN_SIZE;
3066 else if (size > MEMCG_CACHES_MAX_SIZE)
3067 size = MEMCG_CACHES_MAX_SIZE;
3068
Roman Gushchin98556092020-08-06 23:21:10 -07003069 err = memcg_update_all_list_lrus(size);
Vladimir Davydov60d3fd32015-02-12 14:59:10 -08003070 if (!err)
Vladimir Davydov05257a12015-02-12 14:59:01 -08003071 memcg_nr_cache_ids = size;
3072
3073 up_write(&memcg_cache_ids_sem);
3074
Vladimir Davydovf3bb3042014-10-09 15:28:45 -07003075 if (err) {
Vladimir Davydovdbcf73e2015-02-12 14:58:57 -08003076 ida_simple_remove(&memcg_cache_ida, id);
Vladimir Davydovf3bb3042014-10-09 15:28:45 -07003077 return err;
3078 }
3079 return id;
3080}
3081
3082static void memcg_free_cache_id(int id)
3083{
Vladimir Davydovdbcf73e2015-02-12 14:58:57 -08003084 ida_simple_remove(&memcg_cache_ida, id);
Glauber Costa55007d82012-12-18 14:22:38 -08003085}
3086
Vladimir Davydov45264772016-07-26 15:24:21 -07003087/**
Roman Gushchin4b13f642020-04-01 21:06:56 -07003088 * __memcg_kmem_charge: charge a number of kernel pages to a memcg
Roman Gushchin10eaec22020-04-01 21:06:39 -07003089 * @memcg: memory cgroup to charge
Vladimir Davydov45264772016-07-26 15:24:21 -07003090 * @gfp: reclaim mode
Roman Gushchin92d0510c2020-04-01 21:06:49 -07003091 * @nr_pages: number of pages to charge
Vladimir Davydov45264772016-07-26 15:24:21 -07003092 *
3093 * Returns 0 on success, an error code on failure.
3094 */
Roman Gushchin4b13f642020-04-01 21:06:56 -07003095int __memcg_kmem_charge(struct mem_cgroup *memcg, gfp_t gfp,
3096 unsigned int nr_pages)
Vladimir Davydovf3ccb2c42015-11-05 18:49:01 -08003097{
Vladimir Davydovf3ccb2c42015-11-05 18:49:01 -08003098 struct page_counter *counter;
Johannes Weiner6071ca52015-11-05 18:50:26 -08003099 int ret;
Vladimir Davydovf3ccb2c42015-11-05 18:49:01 -08003100
Vladimir Davydovf3ccb2c42015-11-05 18:49:01 -08003101 ret = try_charge(memcg, gfp, nr_pages);
Johannes Weiner52c29b02016-01-20 15:02:35 -08003102 if (ret)
Vladimir Davydovf3ccb2c42015-11-05 18:49:01 -08003103 return ret;
Johannes Weiner52c29b02016-01-20 15:02:35 -08003104
3105 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) &&
3106 !page_counter_try_charge(&memcg->kmem, nr_pages, &counter)) {
Michal Hockoe55d9d92019-09-25 16:45:53 -07003107
3108 /*
3109 * Enforce __GFP_NOFAIL allocation because callers are not
3110 * prepared to see failures and likely do not have any failure
3111 * handling code.
3112 */
3113 if (gfp & __GFP_NOFAIL) {
3114 page_counter_charge(&memcg->kmem, nr_pages);
3115 return 0;
3116 }
Johannes Weiner52c29b02016-01-20 15:02:35 -08003117 cancel_charge(memcg, nr_pages);
3118 return -ENOMEM;
Vladimir Davydovf3ccb2c42015-11-05 18:49:01 -08003119 }
Vladimir Davydovf3ccb2c42015-11-05 18:49:01 -08003120 return 0;
3121}
3122
Vladimir Davydov45264772016-07-26 15:24:21 -07003123/**
Roman Gushchin4b13f642020-04-01 21:06:56 -07003124 * __memcg_kmem_uncharge: uncharge a number of kernel pages from a memcg
3125 * @memcg: memcg to uncharge
3126 * @nr_pages: number of pages to uncharge
3127 */
3128void __memcg_kmem_uncharge(struct mem_cgroup *memcg, unsigned int nr_pages)
3129{
3130 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
3131 page_counter_uncharge(&memcg->kmem, nr_pages);
3132
Roman Gushchin3de7d4f2021-01-23 21:01:07 -08003133 refill_stock(memcg, nr_pages);
Roman Gushchin4b13f642020-04-01 21:06:56 -07003134}
3135
3136/**
Roman Gushchinf4b00ea2020-04-01 21:06:46 -07003137 * __memcg_kmem_charge_page: charge a kmem page to the current memory cgroup
Vladimir Davydov45264772016-07-26 15:24:21 -07003138 * @page: page to charge
3139 * @gfp: reclaim mode
3140 * @order: allocation order
3141 *
3142 * Returns 0 on success, an error code on failure.
3143 */
Roman Gushchinf4b00ea2020-04-01 21:06:46 -07003144int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order)
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08003145{
3146 struct mem_cgroup *memcg;
Vladimir Davydovfcff7d72016-03-17 14:17:29 -07003147 int ret = 0;
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08003148
Shakeel Buttd46eb14b2018-08-17 15:46:39 -07003149 memcg = get_mem_cgroup_from_current();
Roman Gushchin279c3392020-10-17 16:13:44 -07003150 if (memcg && !mem_cgroup_is_root(memcg)) {
Roman Gushchin4b13f642020-04-01 21:06:56 -07003151 ret = __memcg_kmem_charge(memcg, gfp, 1 << order);
Roman Gushchin4d96ba32019-07-11 20:56:31 -07003152 if (!ret) {
Roman Gushchin18b2db32020-12-01 13:58:30 -08003153 page->memcg_data = (unsigned long)memcg |
3154 MEMCG_DATA_KMEM;
Johannes Weiner1a3e1f42020-08-06 23:20:45 -07003155 return 0;
Roman Gushchin4d96ba32019-07-11 20:56:31 -07003156 }
Roman Gushchin279c3392020-10-17 16:13:44 -07003157 css_put(&memcg->css);
Vladimir Davydovc4159a72016-08-08 23:03:12 +03003158 }
Vladimir Davydovd05e83a2015-11-05 18:48:59 -08003159 return ret;
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08003160}
Roman Gushchin49a18ea2019-07-11 20:56:13 -07003161
3162/**
Roman Gushchinf4b00ea2020-04-01 21:06:46 -07003163 * __memcg_kmem_uncharge_page: uncharge a kmem page
Vladimir Davydov45264772016-07-26 15:24:21 -07003164 * @page: page to uncharge
3165 * @order: allocation order
3166 */
Roman Gushchinf4b00ea2020-04-01 21:06:46 -07003167void __memcg_kmem_uncharge_page(struct page *page, int order)
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08003168{
Roman Gushchinbcfe06b2020-12-01 13:58:27 -08003169 struct mem_cgroup *memcg = page_memcg(page);
Vladimir Davydovf3ccb2c42015-11-05 18:49:01 -08003170 unsigned int nr_pages = 1 << order;
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08003171
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08003172 if (!memcg)
3173 return;
3174
Sasha Levin309381fea2014-01-23 15:52:54 -08003175 VM_BUG_ON_PAGE(mem_cgroup_is_root(memcg), page);
Roman Gushchin4b13f642020-04-01 21:06:56 -07003176 __memcg_kmem_uncharge(memcg, nr_pages);
Roman Gushchinbcfe06b2020-12-01 13:58:27 -08003177 page->memcg_data = 0;
Johannes Weiner1a3e1f42020-08-06 23:20:45 -07003178 css_put(&memcg->css);
Vladimir Davydov60d3fd32015-02-12 14:59:10 -08003179}
Roman Gushchinbf4f0592020-08-06 23:20:49 -07003180
3181static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes)
3182{
3183 struct memcg_stock_pcp *stock;
3184 unsigned long flags;
3185 bool ret = false;
3186
3187 local_irq_save(flags);
3188
3189 stock = this_cpu_ptr(&memcg_stock);
3190 if (objcg == stock->cached_objcg && stock->nr_bytes >= nr_bytes) {
3191 stock->nr_bytes -= nr_bytes;
3192 ret = true;
3193 }
3194
3195 local_irq_restore(flags);
3196
3197 return ret;
3198}
3199
3200static void drain_obj_stock(struct memcg_stock_pcp *stock)
3201{
3202 struct obj_cgroup *old = stock->cached_objcg;
3203
3204 if (!old)
3205 return;
3206
3207 if (stock->nr_bytes) {
3208 unsigned int nr_pages = stock->nr_bytes >> PAGE_SHIFT;
3209 unsigned int nr_bytes = stock->nr_bytes & (PAGE_SIZE - 1);
3210
3211 if (nr_pages) {
3212 rcu_read_lock();
3213 __memcg_kmem_uncharge(obj_cgroup_memcg(old), nr_pages);
3214 rcu_read_unlock();
3215 }
3216
3217 /*
3218 * The leftover is flushed to the centralized per-memcg value.
3219 * On the next attempt to refill obj stock it will be moved
3220 * to a per-cpu stock (probably, on an other CPU), see
3221 * refill_obj_stock().
3222 *
3223 * How often it's flushed is a trade-off between the memory
3224 * limit enforcement accuracy and potential CPU contention,
3225 * so it might be changed in the future.
3226 */
3227 atomic_add(nr_bytes, &old->nr_charged_bytes);
3228 stock->nr_bytes = 0;
3229 }
3230
3231 obj_cgroup_put(old);
3232 stock->cached_objcg = NULL;
3233}
3234
3235static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
3236 struct mem_cgroup *root_memcg)
3237{
3238 struct mem_cgroup *memcg;
3239
3240 if (stock->cached_objcg) {
3241 memcg = obj_cgroup_memcg(stock->cached_objcg);
3242 if (memcg && mem_cgroup_is_descendant(memcg, root_memcg))
3243 return true;
3244 }
3245
3246 return false;
3247}
3248
3249static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes)
3250{
3251 struct memcg_stock_pcp *stock;
3252 unsigned long flags;
3253
3254 local_irq_save(flags);
3255
3256 stock = this_cpu_ptr(&memcg_stock);
3257 if (stock->cached_objcg != objcg) { /* reset if necessary */
3258 drain_obj_stock(stock);
3259 obj_cgroup_get(objcg);
3260 stock->cached_objcg = objcg;
3261 stock->nr_bytes = atomic_xchg(&objcg->nr_charged_bytes, 0);
3262 }
3263 stock->nr_bytes += nr_bytes;
3264
3265 if (stock->nr_bytes > PAGE_SIZE)
3266 drain_obj_stock(stock);
3267
3268 local_irq_restore(flags);
3269}
3270
3271int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size)
3272{
3273 struct mem_cgroup *memcg;
3274 unsigned int nr_pages, nr_bytes;
3275 int ret;
3276
3277 if (consume_obj_stock(objcg, size))
3278 return 0;
3279
3280 /*
3281 * In theory, memcg->nr_charged_bytes can have enough
3282 * pre-charged bytes to satisfy the allocation. However,
3283 * flushing memcg->nr_charged_bytes requires two atomic
3284 * operations, and memcg->nr_charged_bytes can't be big,
3285 * so it's better to ignore it and try grab some new pages.
3286 * memcg->nr_charged_bytes will be flushed in
3287 * refill_obj_stock(), called from this function or
3288 * independently later.
3289 */
3290 rcu_read_lock();
Muchun Songeefbfa72020-12-14 19:06:35 -08003291retry:
Roman Gushchinbf4f0592020-08-06 23:20:49 -07003292 memcg = obj_cgroup_memcg(objcg);
Muchun Songeefbfa72020-12-14 19:06:35 -08003293 if (unlikely(!css_tryget(&memcg->css)))
3294 goto retry;
Roman Gushchinbf4f0592020-08-06 23:20:49 -07003295 rcu_read_unlock();
3296
3297 nr_pages = size >> PAGE_SHIFT;
3298 nr_bytes = size & (PAGE_SIZE - 1);
3299
3300 if (nr_bytes)
3301 nr_pages += 1;
3302
3303 ret = __memcg_kmem_charge(memcg, gfp, nr_pages);
3304 if (!ret && nr_bytes)
3305 refill_obj_stock(objcg, PAGE_SIZE - nr_bytes);
3306
3307 css_put(&memcg->css);
3308 return ret;
3309}
3310
3311void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size)
3312{
3313 refill_obj_stock(objcg, size);
3314}
3315
Kirill Tkhai84c07d12018-08-17 15:47:25 -07003316#endif /* CONFIG_MEMCG_KMEM */
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08003317
KAMEZAWA Hiroyukica3e0212011-01-20 14:44:24 -08003318#ifdef CONFIG_TRANSPARENT_HUGEPAGE
KAMEZAWA Hiroyukica3e0212011-01-20 14:44:24 -08003319/*
Alex Shi6168d0d2020-12-15 12:34:29 -08003320 * Because page_memcg(head) is not set on compound tails, set it now.
KAMEZAWA Hiroyukica3e0212011-01-20 14:44:24 -08003321 */
KAMEZAWA Hiroyukie94c8a92012-01-12 17:18:20 -08003322void mem_cgroup_split_huge_fixup(struct page *head)
KAMEZAWA Hiroyukica3e0212011-01-20 14:44:24 -08003323{
Roman Gushchinbcfe06b2020-12-01 13:58:27 -08003324 struct mem_cgroup *memcg = page_memcg(head);
KAMEZAWA Hiroyukie94c8a92012-01-12 17:18:20 -08003325 int i;
KAMEZAWA Hiroyukica3e0212011-01-20 14:44:24 -08003326
KAMEZAWA Hiroyuki3d37c4a2011-01-25 15:07:28 -08003327 if (mem_cgroup_disabled())
3328 return;
David Rientjesb070e652013-05-07 16:18:09 -07003329
Johannes Weiner1a3e1f42020-08-06 23:20:45 -07003330 for (i = 1; i < HPAGE_PMD_NR; i++) {
3331 css_get(&memcg->css);
Roman Gushchinbcfe06b2020-12-01 13:58:27 -08003332 head[i].memcg_data = (unsigned long)memcg;
Johannes Weiner1a3e1f42020-08-06 23:20:45 -07003333 }
KAMEZAWA Hiroyukica3e0212011-01-20 14:44:24 -08003334}
Hugh Dickins12d27102012-01-12 17:19:52 -08003335#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
KAMEZAWA Hiroyukica3e0212011-01-20 14:44:24 -08003336
Andrew Mortonc255a452012-07-31 16:43:02 -07003337#ifdef CONFIG_MEMCG_SWAP
Daisuke Nishimura02491442010-03-10 15:22:17 -08003338/**
3339 * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record.
3340 * @entry: swap entry to be moved
3341 * @from: mem_cgroup which the entry is moved from
3342 * @to: mem_cgroup which the entry is moved to
3343 *
3344 * It succeeds only when the swap_cgroup's record for this entry is the same
3345 * as the mem_cgroup's id of @from.
3346 *
3347 * Returns 0 on success, -EINVAL on failure.
3348 *
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003349 * The caller must have charged to @to, IOW, called page_counter_charge() about
Daisuke Nishimura02491442010-03-10 15:22:17 -08003350 * both res and memsw, and called css_get().
3351 */
3352static int mem_cgroup_move_swap_account(swp_entry_t entry,
Hugh Dickinse91cbb42012-05-29 15:06:51 -07003353 struct mem_cgroup *from, struct mem_cgroup *to)
Daisuke Nishimura02491442010-03-10 15:22:17 -08003354{
3355 unsigned short old_id, new_id;
3356
Li Zefan34c00c32013-09-23 16:56:01 +08003357 old_id = mem_cgroup_id(from);
3358 new_id = mem_cgroup_id(to);
Daisuke Nishimura02491442010-03-10 15:22:17 -08003359
3360 if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
Johannes Weinerc9019e92018-01-31 16:16:37 -08003361 mod_memcg_state(from, MEMCG_SWAP, -1);
3362 mod_memcg_state(to, MEMCG_SWAP, 1);
Daisuke Nishimura02491442010-03-10 15:22:17 -08003363 return 0;
3364 }
3365 return -EINVAL;
3366}
3367#else
3368static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
Hugh Dickinse91cbb42012-05-29 15:06:51 -07003369 struct mem_cgroup *from, struct mem_cgroup *to)
Daisuke Nishimura02491442010-03-10 15:22:17 -08003370{
3371 return -EINVAL;
3372}
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08003373#endif
3374
Roman Gushchinbbec2e12018-06-07 17:06:18 -07003375static DEFINE_MUTEX(memcg_max_mutex);
Daisuke Nishimuraf212ad72011-03-23 16:42:25 -07003376
Roman Gushchinbbec2e12018-06-07 17:06:18 -07003377static int mem_cgroup_resize_max(struct mem_cgroup *memcg,
3378 unsigned long max, bool memsw)
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07003379{
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003380 bool enlarge = false;
Shakeel Buttbb4a7ea2018-06-07 17:07:27 -07003381 bool drained = false;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003382 int ret;
Yu Zhaoc054a782018-01-31 16:20:02 -08003383 bool limits_invariant;
3384 struct page_counter *counter = memsw ? &memcg->memsw : &memcg->memory;
KAMEZAWA Hiroyuki81d39c22009-04-02 16:57:36 -07003385
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003386 do {
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07003387 if (signal_pending(current)) {
3388 ret = -EINTR;
3389 break;
3390 }
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003391
Roman Gushchinbbec2e12018-06-07 17:06:18 -07003392 mutex_lock(&memcg_max_mutex);
Yu Zhaoc054a782018-01-31 16:20:02 -08003393 /*
3394 * Make sure that the new limit (memsw or memory limit) doesn't
Roman Gushchinbbec2e12018-06-07 17:06:18 -07003395 * break our basic invariant rule memory.max <= memsw.max.
Yu Zhaoc054a782018-01-31 16:20:02 -08003396 */
Chris Down15b42562020-04-01 21:07:20 -07003397 limits_invariant = memsw ? max >= READ_ONCE(memcg->memory.max) :
Roman Gushchinbbec2e12018-06-07 17:06:18 -07003398 max <= memcg->memsw.max;
Yu Zhaoc054a782018-01-31 16:20:02 -08003399 if (!limits_invariant) {
Roman Gushchinbbec2e12018-06-07 17:06:18 -07003400 mutex_unlock(&memcg_max_mutex);
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08003401 ret = -EINVAL;
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07003402 break;
3403 }
Roman Gushchinbbec2e12018-06-07 17:06:18 -07003404 if (max > counter->max)
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003405 enlarge = true;
Roman Gushchinbbec2e12018-06-07 17:06:18 -07003406 ret = page_counter_set_max(counter, max);
3407 mutex_unlock(&memcg_max_mutex);
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08003408
3409 if (!ret)
3410 break;
3411
Shakeel Buttbb4a7ea2018-06-07 17:07:27 -07003412 if (!drained) {
3413 drain_all_stock(memcg);
3414 drained = true;
3415 continue;
3416 }
3417
Andrey Ryabinin1ab5c052018-01-31 16:20:37 -08003418 if (!try_to_free_mem_cgroup_pages(memcg, 1,
3419 GFP_KERNEL, !memsw)) {
3420 ret = -EBUSY;
3421 break;
3422 }
3423 } while (true);
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003424
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07003425 if (!ret && enlarge)
3426 memcg_oom_recover(memcg);
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003427
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07003428 return ret;
3429}
3430
Mel Gormanef8f2322016-07-28 15:46:05 -07003431unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
Andrew Morton0608f432013-09-24 15:27:41 -07003432 gfp_t gfp_mask,
3433 unsigned long *total_scanned)
3434{
3435 unsigned long nr_reclaimed = 0;
Mel Gormanef8f2322016-07-28 15:46:05 -07003436 struct mem_cgroup_per_node *mz, *next_mz = NULL;
Andrew Morton0608f432013-09-24 15:27:41 -07003437 unsigned long reclaimed;
3438 int loop = 0;
Mel Gormanef8f2322016-07-28 15:46:05 -07003439 struct mem_cgroup_tree_per_node *mctz;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003440 unsigned long excess;
Andrew Morton0608f432013-09-24 15:27:41 -07003441 unsigned long nr_scanned;
3442
3443 if (order > 0)
3444 return 0;
3445
Mel Gormanef8f2322016-07-28 15:46:05 -07003446 mctz = soft_limit_tree_node(pgdat->node_id);
Michal Hockod6507ff2016-08-02 14:02:37 -07003447
3448 /*
3449 * Do not even bother to check the largest node if the root
3450 * is empty. Do it lockless to prevent lock bouncing. Races
3451 * are acceptable as soft limit is best effort anyway.
3452 */
Laurent Dufourbfc72282017-03-09 16:17:06 -08003453 if (!mctz || RB_EMPTY_ROOT(&mctz->rb_root))
Michal Hockod6507ff2016-08-02 14:02:37 -07003454 return 0;
3455
Andrew Morton0608f432013-09-24 15:27:41 -07003456 /*
3457 * This loop can run a while, specially if mem_cgroup's continuously
3458 * keep exceeding their soft limit and putting the system under
3459 * pressure
3460 */
3461 do {
3462 if (next_mz)
3463 mz = next_mz;
3464 else
3465 mz = mem_cgroup_largest_soft_limit_node(mctz);
3466 if (!mz)
3467 break;
3468
3469 nr_scanned = 0;
Mel Gormanef8f2322016-07-28 15:46:05 -07003470 reclaimed = mem_cgroup_soft_reclaim(mz->memcg, pgdat,
Andrew Morton0608f432013-09-24 15:27:41 -07003471 gfp_mask, &nr_scanned);
3472 nr_reclaimed += reclaimed;
3473 *total_scanned += nr_scanned;
Johannes Weiner0a31bc92014-08-08 14:19:22 -07003474 spin_lock_irq(&mctz->lock);
Vladimir Davydovbc2f2e72014-12-10 15:43:40 -08003475 __mem_cgroup_remove_exceeded(mz, mctz);
Andrew Morton0608f432013-09-24 15:27:41 -07003476
3477 /*
3478 * If we failed to reclaim anything from this memory cgroup
3479 * it is time to move on to the next cgroup
3480 */
3481 next_mz = NULL;
Vladimir Davydovbc2f2e72014-12-10 15:43:40 -08003482 if (!reclaimed)
3483 next_mz = __mem_cgroup_largest_soft_limit_node(mctz);
3484
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003485 excess = soft_limit_excess(mz->memcg);
Andrew Morton0608f432013-09-24 15:27:41 -07003486 /*
3487 * One school of thought says that we should not add
3488 * back the node to the tree if reclaim returns 0.
3489 * But our reclaim could return 0, simply because due
3490 * to priority we are exposing a smaller subset of
3491 * memory to reclaim from. Consider this as a longer
3492 * term TODO.
3493 */
3494 /* If excess == 0, no tree ops */
Johannes Weinercf2c8122014-06-06 14:38:21 -07003495 __mem_cgroup_insert_exceeded(mz, mctz, excess);
Johannes Weiner0a31bc92014-08-08 14:19:22 -07003496 spin_unlock_irq(&mctz->lock);
Andrew Morton0608f432013-09-24 15:27:41 -07003497 css_put(&mz->memcg->css);
3498 loop++;
3499 /*
3500 * Could not reclaim anything and there are no more
3501 * mem cgroups to try or we seem to be looping without
3502 * reclaiming anything.
3503 */
3504 if (!nr_reclaimed &&
3505 (next_mz == NULL ||
3506 loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS))
3507 break;
3508 } while (!nr_reclaimed);
3509 if (next_mz)
3510 css_put(&next_mz->memcg->css);
3511 return nr_reclaimed;
3512}
3513
Tejun Heoea280e72014-05-16 13:22:48 -04003514/*
Greg Thelen51038172016-05-20 16:58:18 -07003515 * Reclaims as many pages from the given memcg as possible.
Michal Hockoc26251f2012-10-26 13:37:28 +02003516 *
3517 * Caller is responsible for holding css reference for memcg.
3518 */
3519static int mem_cgroup_force_empty(struct mem_cgroup *memcg)
3520{
Chris Downd977aa92020-08-06 23:21:58 -07003521 int nr_retries = MAX_RECLAIM_RETRIES;
Michal Hockoc26251f2012-10-26 13:37:28 +02003522
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08003523 /* we call try-to-free pages for make this cgroup empty */
3524 lru_add_drain_all();
Junaid Shahidd12c60f2018-06-07 17:07:31 -07003525
3526 drain_all_stock(memcg);
3527
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08003528 /* try to free all pages in this cgroup */
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003529 while (nr_retries && page_counter_read(&memcg->memory)) {
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08003530 int progress;
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08003531
Michal Hockoc26251f2012-10-26 13:37:28 +02003532 if (signal_pending(current))
3533 return -EINTR;
3534
Johannes Weinerb70a2a22014-10-09 15:28:56 -07003535 progress = try_to_free_mem_cgroup_pages(memcg, 1,
3536 GFP_KERNEL, true);
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08003537 if (!progress) {
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08003538 nr_retries--;
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08003539 /* maybe some writeback is necessary */
Jens Axboe8aa7e842009-07-09 14:52:32 +02003540 congestion_wait(BLK_RW_ASYNC, HZ/10);
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08003541 }
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08003542
3543 }
Michal Hockoab5196c2012-10-26 13:37:32 +02003544
3545 return 0;
KAMEZAWA Hiroyukicc847582008-02-07 00:14:16 -08003546}
3547
Tejun Heo6770c642014-05-13 12:16:21 -04003548static ssize_t mem_cgroup_force_empty_write(struct kernfs_open_file *of,
3549 char *buf, size_t nbytes,
3550 loff_t off)
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08003551{
Tejun Heo6770c642014-05-13 12:16:21 -04003552 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
Michal Hockoc26251f2012-10-26 13:37:28 +02003553
Michal Hockod8423012012-10-26 13:37:29 +02003554 if (mem_cgroup_is_root(memcg))
3555 return -EINVAL;
Tejun Heo6770c642014-05-13 12:16:21 -04003556 return mem_cgroup_force_empty(memcg) ?: nbytes;
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08003557}
3558
Tejun Heo182446d2013-08-08 20:11:24 -04003559static u64 mem_cgroup_hierarchy_read(struct cgroup_subsys_state *css,
3560 struct cftype *cft)
Balbir Singh18f59ea2009-01-07 18:08:07 -08003561{
Roman Gushchinbef86202020-12-14 19:06:49 -08003562 return 1;
Balbir Singh18f59ea2009-01-07 18:08:07 -08003563}
3564
Tejun Heo182446d2013-08-08 20:11:24 -04003565static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css,
3566 struct cftype *cft, u64 val)
Balbir Singh18f59ea2009-01-07 18:08:07 -08003567{
Roman Gushchinbef86202020-12-14 19:06:49 -08003568 if (val == 1)
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08003569 return 0;
Glauber Costa567fb432012-07-31 16:43:07 -07003570
Roman Gushchinbef86202020-12-14 19:06:49 -08003571 pr_warn_once("Non-hierarchical mode is deprecated. "
3572 "Please report your usecase to linux-mm@kvack.org if you "
3573 "depend on this functionality.\n");
Glauber Costa567fb432012-07-31 16:43:07 -07003574
Roman Gushchinbef86202020-12-14 19:06:49 -08003575 return -EINVAL;
Balbir Singh18f59ea2009-01-07 18:08:07 -08003576}
3577
Andrew Morton6f646152015-11-06 16:28:58 -08003578static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
Johannes Weinerce00a962014-09-05 08:43:57 -04003579{
Johannes Weiner42a30032019-05-14 15:47:12 -07003580 unsigned long val;
Johannes Weinerce00a962014-09-05 08:43:57 -04003581
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003582 if (mem_cgroup_is_root(memcg)) {
Johannes Weiner0d1c2072020-06-03 16:01:54 -07003583 val = memcg_page_state(memcg, NR_FILE_PAGES) +
Johannes Weinerbe5d0a72020-06-03 16:01:57 -07003584 memcg_page_state(memcg, NR_ANON_MAPPED);
Johannes Weiner42a30032019-05-14 15:47:12 -07003585 if (swap)
3586 val += memcg_page_state(memcg, MEMCG_SWAP);
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003587 } else {
Johannes Weinerce00a962014-09-05 08:43:57 -04003588 if (!swap)
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003589 val = page_counter_read(&memcg->memory);
Johannes Weinerce00a962014-09-05 08:43:57 -04003590 else
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003591 val = page_counter_read(&memcg->memsw);
Johannes Weinerce00a962014-09-05 08:43:57 -04003592 }
Michal Hockoc12176d2015-11-05 18:50:29 -08003593 return val;
Johannes Weinerce00a962014-09-05 08:43:57 -04003594}
3595
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003596enum {
3597 RES_USAGE,
3598 RES_LIMIT,
3599 RES_MAX_USAGE,
3600 RES_FAILCNT,
3601 RES_SOFT_LIMIT,
3602};
Johannes Weinerce00a962014-09-05 08:43:57 -04003603
Tejun Heo791badb2013-12-05 12:28:02 -05003604static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css,
Johannes Weiner05b84302014-08-06 16:05:59 -07003605 struct cftype *cft)
Balbir Singh8cdea7c2008-02-07 00:13:50 -08003606{
Tejun Heo182446d2013-08-08 20:11:24 -04003607 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003608 struct page_counter *counter;
Tejun Heoaf36f902012-04-01 12:09:55 -07003609
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003610 switch (MEMFILE_TYPE(cft->private)) {
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08003611 case _MEM:
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003612 counter = &memcg->memory;
Glauber Costa510fc4e2012-12-18 14:21:47 -08003613 break;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003614 case _MEMSWAP:
3615 counter = &memcg->memsw;
3616 break;
3617 case _KMEM:
3618 counter = &memcg->kmem;
3619 break;
Vladimir Davydovd55f90b2016-01-20 15:02:44 -08003620 case _TCP:
Johannes Weiner0db15292016-01-20 15:02:50 -08003621 counter = &memcg->tcpmem;
Vladimir Davydovd55f90b2016-01-20 15:02:44 -08003622 break;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003623 default:
3624 BUG();
3625 }
3626
3627 switch (MEMFILE_ATTR(cft->private)) {
3628 case RES_USAGE:
3629 if (counter == &memcg->memory)
Michal Hockoc12176d2015-11-05 18:50:29 -08003630 return (u64)mem_cgroup_usage(memcg, false) * PAGE_SIZE;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003631 if (counter == &memcg->memsw)
Michal Hockoc12176d2015-11-05 18:50:29 -08003632 return (u64)mem_cgroup_usage(memcg, true) * PAGE_SIZE;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003633 return (u64)page_counter_read(counter) * PAGE_SIZE;
3634 case RES_LIMIT:
Roman Gushchinbbec2e12018-06-07 17:06:18 -07003635 return (u64)counter->max * PAGE_SIZE;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003636 case RES_MAX_USAGE:
3637 return (u64)counter->watermark * PAGE_SIZE;
3638 case RES_FAILCNT:
3639 return counter->failcnt;
3640 case RES_SOFT_LIMIT:
3641 return (u64)memcg->soft_limit * PAGE_SIZE;
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08003642 default:
3643 BUG();
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08003644 }
Balbir Singh8cdea7c2008-02-07 00:13:50 -08003645}
Glauber Costa510fc4e2012-12-18 14:21:47 -08003646
Roman Gushchin4a87e2a2020-01-13 16:29:16 -08003647static void memcg_flush_percpu_vmstats(struct mem_cgroup *memcg)
Roman Gushchinc350a992019-08-24 17:54:47 -07003648{
Roman Gushchin4a87e2a2020-01-13 16:29:16 -08003649 unsigned long stat[MEMCG_NR_STAT] = {0};
Roman Gushchinc350a992019-08-24 17:54:47 -07003650 struct mem_cgroup *mi;
3651 int node, cpu, i;
Roman Gushchinc350a992019-08-24 17:54:47 -07003652
3653 for_each_online_cpu(cpu)
Roman Gushchin4a87e2a2020-01-13 16:29:16 -08003654 for (i = 0; i < MEMCG_NR_STAT; i++)
Shakeel Butt6c1c2802019-08-30 16:04:53 -07003655 stat[i] += per_cpu(memcg->vmstats_percpu->stat[i], cpu);
Roman Gushchinc350a992019-08-24 17:54:47 -07003656
3657 for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
Roman Gushchin4a87e2a2020-01-13 16:29:16 -08003658 for (i = 0; i < MEMCG_NR_STAT; i++)
Roman Gushchinc350a992019-08-24 17:54:47 -07003659 atomic_long_add(stat[i], &mi->vmstats[i]);
3660
3661 for_each_node(node) {
3662 struct mem_cgroup_per_node *pn = memcg->nodeinfo[node];
3663 struct mem_cgroup_per_node *pi;
3664
Roman Gushchin4a87e2a2020-01-13 16:29:16 -08003665 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
Roman Gushchinc350a992019-08-24 17:54:47 -07003666 stat[i] = 0;
3667
3668 for_each_online_cpu(cpu)
Roman Gushchin4a87e2a2020-01-13 16:29:16 -08003669 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
Shakeel Butt6c1c2802019-08-30 16:04:53 -07003670 stat[i] += per_cpu(
3671 pn->lruvec_stat_cpu->count[i], cpu);
Roman Gushchinc350a992019-08-24 17:54:47 -07003672
3673 for (pi = pn; pi; pi = parent_nodeinfo(pi, node))
Roman Gushchin4a87e2a2020-01-13 16:29:16 -08003674 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
Roman Gushchinc350a992019-08-24 17:54:47 -07003675 atomic_long_add(stat[i], &pi->lruvec_stat[i]);
3676 }
3677}
3678
Roman Gushchinbb65f892019-08-24 17:54:50 -07003679static void memcg_flush_percpu_vmevents(struct mem_cgroup *memcg)
3680{
3681 unsigned long events[NR_VM_EVENT_ITEMS];
3682 struct mem_cgroup *mi;
3683 int cpu, i;
3684
3685 for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
3686 events[i] = 0;
3687
3688 for_each_online_cpu(cpu)
3689 for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
Shakeel Butt6c1c2802019-08-30 16:04:53 -07003690 events[i] += per_cpu(memcg->vmstats_percpu->events[i],
3691 cpu);
Roman Gushchinbb65f892019-08-24 17:54:50 -07003692
3693 for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
3694 for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
3695 atomic_long_add(events[i], &mi->vmevents[i]);
3696}
3697
Kirill Tkhai84c07d12018-08-17 15:47:25 -07003698#ifdef CONFIG_MEMCG_KMEM
Johannes Weiner567e9ab2016-01-20 15:02:24 -08003699static int memcg_online_kmem(struct mem_cgroup *memcg)
Vladimir Davydovd6441632014-01-23 15:53:09 -08003700{
Roman Gushchinbf4f0592020-08-06 23:20:49 -07003701 struct obj_cgroup *objcg;
Vladimir Davydovd6441632014-01-23 15:53:09 -08003702 int memcg_id;
3703
Vladimir Davydovb313aee2016-03-17 14:18:27 -07003704 if (cgroup_memory_nokmem)
3705 return 0;
3706
Vladimir Davydov2a4db7e2015-02-12 14:59:32 -08003707 BUG_ON(memcg->kmemcg_id >= 0);
Johannes Weiner567e9ab2016-01-20 15:02:24 -08003708 BUG_ON(memcg->kmem_state);
Vladimir Davydovd6441632014-01-23 15:53:09 -08003709
Vladimir Davydovf3bb3042014-10-09 15:28:45 -07003710 memcg_id = memcg_alloc_cache_id();
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08003711 if (memcg_id < 0)
3712 return memcg_id;
Vladimir Davydovd6441632014-01-23 15:53:09 -08003713
Roman Gushchinbf4f0592020-08-06 23:20:49 -07003714 objcg = obj_cgroup_alloc();
3715 if (!objcg) {
3716 memcg_free_cache_id(memcg_id);
3717 return -ENOMEM;
3718 }
3719 objcg->memcg = memcg;
3720 rcu_assign_pointer(memcg->objcg, objcg);
3721
Roman Gushchind648bcc2020-08-06 23:20:28 -07003722 static_branch_enable(&memcg_kmem_enabled_key);
3723
Vladimir Davydov900a38f2014-12-12 16:55:10 -08003724 memcg->kmemcg_id = memcg_id;
Johannes Weiner567e9ab2016-01-20 15:02:24 -08003725 memcg->kmem_state = KMEM_ONLINE;
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08003726
3727 return 0;
Vladimir Davydovd6441632014-01-23 15:53:09 -08003728}
3729
Johannes Weiner8e0a8912016-01-20 15:02:26 -08003730static void memcg_offline_kmem(struct mem_cgroup *memcg)
3731{
3732 struct cgroup_subsys_state *css;
3733 struct mem_cgroup *parent, *child;
3734 int kmemcg_id;
3735
3736 if (memcg->kmem_state != KMEM_ONLINE)
3737 return;
Roman Gushchin98556092020-08-06 23:21:10 -07003738
Johannes Weiner8e0a8912016-01-20 15:02:26 -08003739 memcg->kmem_state = KMEM_ALLOCATED;
3740
Johannes Weiner8e0a8912016-01-20 15:02:26 -08003741 parent = parent_mem_cgroup(memcg);
3742 if (!parent)
3743 parent = root_mem_cgroup;
3744
Roman Gushchinbf4f0592020-08-06 23:20:49 -07003745 memcg_reparent_objcgs(memcg, parent);
Roman Gushchinfb2f2b02019-07-11 20:56:34 -07003746
3747 kmemcg_id = memcg->kmemcg_id;
3748 BUG_ON(kmemcg_id < 0);
3749
Johannes Weiner8e0a8912016-01-20 15:02:26 -08003750 /*
3751 * Change kmemcg_id of this cgroup and all its descendants to the
3752 * parent's id, and then move all entries from this cgroup's list_lrus
3753 * to ones of the parent. After we have finished, all list_lrus
3754 * corresponding to this cgroup are guaranteed to remain empty. The
3755 * ordering is imposed by list_lru_node->lock taken by
3756 * memcg_drain_all_list_lrus().
3757 */
Tejun Heo3a06bb72016-06-03 14:55:44 -07003758 rcu_read_lock(); /* can be called from css_free w/o cgroup_mutex */
Johannes Weiner8e0a8912016-01-20 15:02:26 -08003759 css_for_each_descendant_pre(css, &memcg->css) {
3760 child = mem_cgroup_from_css(css);
3761 BUG_ON(child->kmemcg_id != kmemcg_id);
3762 child->kmemcg_id = parent->kmemcg_id;
Johannes Weiner8e0a8912016-01-20 15:02:26 -08003763 }
Tejun Heo3a06bb72016-06-03 14:55:44 -07003764 rcu_read_unlock();
3765
Kirill Tkhai9bec5c32018-08-17 15:47:58 -07003766 memcg_drain_all_list_lrus(kmemcg_id, parent);
Johannes Weiner8e0a8912016-01-20 15:02:26 -08003767
3768 memcg_free_cache_id(kmemcg_id);
3769}
3770
3771static void memcg_free_kmem(struct mem_cgroup *memcg)
3772{
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08003773 /* css_alloc() failed, offlining didn't happen */
3774 if (unlikely(memcg->kmem_state == KMEM_ONLINE))
3775 memcg_offline_kmem(memcg);
Johannes Weiner8e0a8912016-01-20 15:02:26 -08003776}
Vladimir Davydovd6441632014-01-23 15:53:09 -08003777#else
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08003778static int memcg_online_kmem(struct mem_cgroup *memcg)
Johannes Weiner127424c2016-01-20 15:02:32 -08003779{
3780 return 0;
3781}
3782static void memcg_offline_kmem(struct mem_cgroup *memcg)
3783{
3784}
3785static void memcg_free_kmem(struct mem_cgroup *memcg)
3786{
3787}
Kirill Tkhai84c07d12018-08-17 15:47:25 -07003788#endif /* CONFIG_MEMCG_KMEM */
Johannes Weiner127424c2016-01-20 15:02:32 -08003789
Roman Gushchinbbec2e12018-06-07 17:06:18 -07003790static int memcg_update_kmem_max(struct mem_cgroup *memcg,
3791 unsigned long max)
Johannes Weiner127424c2016-01-20 15:02:32 -08003792{
Vladimir Davydovb313aee2016-03-17 14:18:27 -07003793 int ret;
Johannes Weiner127424c2016-01-20 15:02:32 -08003794
Roman Gushchinbbec2e12018-06-07 17:06:18 -07003795 mutex_lock(&memcg_max_mutex);
3796 ret = page_counter_set_max(&memcg->kmem, max);
3797 mutex_unlock(&memcg_max_mutex);
Johannes Weiner127424c2016-01-20 15:02:32 -08003798 return ret;
3799}
Glauber Costa510fc4e2012-12-18 14:21:47 -08003800
Roman Gushchinbbec2e12018-06-07 17:06:18 -07003801static int memcg_update_tcp_max(struct mem_cgroup *memcg, unsigned long max)
Vladimir Davydovd55f90b2016-01-20 15:02:44 -08003802{
3803 int ret;
3804
Roman Gushchinbbec2e12018-06-07 17:06:18 -07003805 mutex_lock(&memcg_max_mutex);
Vladimir Davydovd55f90b2016-01-20 15:02:44 -08003806
Roman Gushchinbbec2e12018-06-07 17:06:18 -07003807 ret = page_counter_set_max(&memcg->tcpmem, max);
Vladimir Davydovd55f90b2016-01-20 15:02:44 -08003808 if (ret)
3809 goto out;
3810
Johannes Weiner0db15292016-01-20 15:02:50 -08003811 if (!memcg->tcpmem_active) {
Vladimir Davydovd55f90b2016-01-20 15:02:44 -08003812 /*
3813 * The active flag needs to be written after the static_key
3814 * update. This is what guarantees that the socket activation
Johannes Weiner2d758072016-10-07 17:00:58 -07003815 * function is the last one to run. See mem_cgroup_sk_alloc()
3816 * for details, and note that we don't mark any socket as
3817 * belonging to this memcg until that flag is up.
Vladimir Davydovd55f90b2016-01-20 15:02:44 -08003818 *
3819 * We need to do this, because static_keys will span multiple
3820 * sites, but we can't control their order. If we mark a socket
3821 * as accounted, but the accounting functions are not patched in
3822 * yet, we'll lose accounting.
3823 *
Johannes Weiner2d758072016-10-07 17:00:58 -07003824 * We never race with the readers in mem_cgroup_sk_alloc(),
Vladimir Davydovd55f90b2016-01-20 15:02:44 -08003825 * because when this value change, the code to process it is not
3826 * patched in yet.
3827 */
3828 static_branch_inc(&memcg_sockets_enabled_key);
Johannes Weiner0db15292016-01-20 15:02:50 -08003829 memcg->tcpmem_active = true;
Vladimir Davydovd55f90b2016-01-20 15:02:44 -08003830 }
3831out:
Roman Gushchinbbec2e12018-06-07 17:06:18 -07003832 mutex_unlock(&memcg_max_mutex);
Vladimir Davydovd55f90b2016-01-20 15:02:44 -08003833 return ret;
3834}
Vladimir Davydovd55f90b2016-01-20 15:02:44 -08003835
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07003836/*
3837 * The user of this function is...
3838 * RES_LIMIT.
3839 */
Tejun Heo451af502014-05-13 12:16:21 -04003840static ssize_t mem_cgroup_write(struct kernfs_open_file *of,
3841 char *buf, size_t nbytes, loff_t off)
Balbir Singh8cdea7c2008-02-07 00:13:50 -08003842{
Tejun Heo451af502014-05-13 12:16:21 -04003843 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003844 unsigned long nr_pages;
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07003845 int ret;
3846
Tejun Heo451af502014-05-13 12:16:21 -04003847 buf = strstrip(buf);
Johannes Weiner650c5e52015-02-11 15:26:03 -08003848 ret = page_counter_memparse(buf, "-1", &nr_pages);
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003849 if (ret)
3850 return ret;
Tejun Heoaf36f902012-04-01 12:09:55 -07003851
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003852 switch (MEMFILE_ATTR(of_cft(of)->private)) {
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07003853 case RES_LIMIT:
Balbir Singh4b3bde42009-09-23 15:56:32 -07003854 if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */
3855 ret = -EINVAL;
3856 break;
3857 }
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003858 switch (MEMFILE_TYPE(of_cft(of)->private)) {
3859 case _MEM:
Roman Gushchinbbec2e12018-06-07 17:06:18 -07003860 ret = mem_cgroup_resize_max(memcg, nr_pages, false);
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08003861 break;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003862 case _MEMSWAP:
Roman Gushchinbbec2e12018-06-07 17:06:18 -07003863 ret = mem_cgroup_resize_max(memcg, nr_pages, true);
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003864 break;
3865 case _KMEM:
Michal Hocko01581152019-09-23 15:37:22 -07003866 pr_warn_once("kmem.limit_in_bytes is deprecated and will be removed. "
3867 "Please report your usecase to linux-mm@kvack.org if you "
3868 "depend on this functionality.\n");
Roman Gushchinbbec2e12018-06-07 17:06:18 -07003869 ret = memcg_update_kmem_max(memcg, nr_pages);
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003870 break;
Vladimir Davydovd55f90b2016-01-20 15:02:44 -08003871 case _TCP:
Roman Gushchinbbec2e12018-06-07 17:06:18 -07003872 ret = memcg_update_tcp_max(memcg, nr_pages);
Vladimir Davydovd55f90b2016-01-20 15:02:44 -08003873 break;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003874 }
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07003875 break;
Balbir Singh296c81d2009-09-23 15:56:36 -07003876 case RES_SOFT_LIMIT:
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003877 memcg->soft_limit = nr_pages;
3878 ret = 0;
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07003879 break;
3880 }
Tejun Heo451af502014-05-13 12:16:21 -04003881 return ret ?: nbytes;
Balbir Singh8cdea7c2008-02-07 00:13:50 -08003882}
3883
Tejun Heo6770c642014-05-13 12:16:21 -04003884static ssize_t mem_cgroup_reset(struct kernfs_open_file *of, char *buf,
3885 size_t nbytes, loff_t off)
Pavel Emelyanovc84872e2008-04-29 01:00:17 -07003886{
Tejun Heo6770c642014-05-13 12:16:21 -04003887 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003888 struct page_counter *counter;
Pavel Emelyanovc84872e2008-04-29 01:00:17 -07003889
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003890 switch (MEMFILE_TYPE(of_cft(of)->private)) {
3891 case _MEM:
3892 counter = &memcg->memory;
3893 break;
3894 case _MEMSWAP:
3895 counter = &memcg->memsw;
3896 break;
3897 case _KMEM:
3898 counter = &memcg->kmem;
3899 break;
Vladimir Davydovd55f90b2016-01-20 15:02:44 -08003900 case _TCP:
Johannes Weiner0db15292016-01-20 15:02:50 -08003901 counter = &memcg->tcpmem;
Vladimir Davydovd55f90b2016-01-20 15:02:44 -08003902 break;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003903 default:
3904 BUG();
3905 }
Tejun Heoaf36f902012-04-01 12:09:55 -07003906
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003907 switch (MEMFILE_ATTR(of_cft(of)->private)) {
Pavel Emelyanov29f2a4d2008-04-29 01:00:21 -07003908 case RES_MAX_USAGE:
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003909 page_counter_reset_watermark(counter);
Pavel Emelyanov29f2a4d2008-04-29 01:00:21 -07003910 break;
3911 case RES_FAILCNT:
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003912 counter->failcnt = 0;
Pavel Emelyanov29f2a4d2008-04-29 01:00:21 -07003913 break;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003914 default:
3915 BUG();
Pavel Emelyanov29f2a4d2008-04-29 01:00:21 -07003916 }
Balbir Singhf64c3f52009-09-23 15:56:37 -07003917
Tejun Heo6770c642014-05-13 12:16:21 -04003918 return nbytes;
Pavel Emelyanovc84872e2008-04-29 01:00:17 -07003919}
3920
Tejun Heo182446d2013-08-08 20:11:24 -04003921static u64 mem_cgroup_move_charge_read(struct cgroup_subsys_state *css,
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08003922 struct cftype *cft)
3923{
Tejun Heo182446d2013-08-08 20:11:24 -04003924 return mem_cgroup_from_css(css)->move_charge_at_immigrate;
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08003925}
3926
Daisuke Nishimura02491442010-03-10 15:22:17 -08003927#ifdef CONFIG_MMU
Tejun Heo182446d2013-08-08 20:11:24 -04003928static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08003929 struct cftype *cft, u64 val)
3930{
Tejun Heo182446d2013-08-08 20:11:24 -04003931 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08003932
Johannes Weiner1dfab5a2015-02-11 15:26:09 -08003933 if (val & ~MOVE_MASK)
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08003934 return -EINVAL;
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08003935
Glauber Costaee5e8472013-02-22 16:34:50 -08003936 /*
3937 * No kind of locking is needed in here, because ->can_attach() will
3938 * check this value once in the beginning of the process, and then carry
3939 * on with stale data. This means that changes to this value will only
3940 * affect task migrations starting after the change.
3941 */
3942 memcg->move_charge_at_immigrate = val;
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08003943 return 0;
3944}
Daisuke Nishimura02491442010-03-10 15:22:17 -08003945#else
Tejun Heo182446d2013-08-08 20:11:24 -04003946static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
Daisuke Nishimura02491442010-03-10 15:22:17 -08003947 struct cftype *cft, u64 val)
3948{
3949 return -ENOSYS;
3950}
3951#endif
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08003952
Ying Han406eb0c2011-05-26 16:25:37 -07003953#ifdef CONFIG_NUMA
Johannes Weiner113b7df2019-05-13 17:18:11 -07003954
3955#define LRU_ALL_FILE (BIT(LRU_INACTIVE_FILE) | BIT(LRU_ACTIVE_FILE))
3956#define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON))
3957#define LRU_ALL ((1 << NR_LRU_LISTS) - 1)
3958
3959static unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
Shakeel Buttdd8657b2020-06-03 15:56:24 -07003960 int nid, unsigned int lru_mask, bool tree)
Johannes Weiner113b7df2019-05-13 17:18:11 -07003961{
Johannes Weiner867e5e12019-11-30 17:55:34 -08003962 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
Johannes Weiner113b7df2019-05-13 17:18:11 -07003963 unsigned long nr = 0;
3964 enum lru_list lru;
3965
3966 VM_BUG_ON((unsigned)nid >= nr_node_ids);
3967
3968 for_each_lru(lru) {
3969 if (!(BIT(lru) & lru_mask))
3970 continue;
Shakeel Buttdd8657b2020-06-03 15:56:24 -07003971 if (tree)
3972 nr += lruvec_page_state(lruvec, NR_LRU_BASE + lru);
3973 else
3974 nr += lruvec_page_state_local(lruvec, NR_LRU_BASE + lru);
Johannes Weiner113b7df2019-05-13 17:18:11 -07003975 }
3976 return nr;
3977}
3978
3979static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg,
Shakeel Buttdd8657b2020-06-03 15:56:24 -07003980 unsigned int lru_mask,
3981 bool tree)
Johannes Weiner113b7df2019-05-13 17:18:11 -07003982{
3983 unsigned long nr = 0;
3984 enum lru_list lru;
3985
3986 for_each_lru(lru) {
3987 if (!(BIT(lru) & lru_mask))
3988 continue;
Shakeel Buttdd8657b2020-06-03 15:56:24 -07003989 if (tree)
3990 nr += memcg_page_state(memcg, NR_LRU_BASE + lru);
3991 else
3992 nr += memcg_page_state_local(memcg, NR_LRU_BASE + lru);
Johannes Weiner113b7df2019-05-13 17:18:11 -07003993 }
3994 return nr;
3995}
3996
Tejun Heo2da8ca82013-12-05 12:28:04 -05003997static int memcg_numa_stat_show(struct seq_file *m, void *v)
Ying Han406eb0c2011-05-26 16:25:37 -07003998{
Greg Thelen25485de2013-11-12 15:07:40 -08003999 struct numa_stat {
4000 const char *name;
4001 unsigned int lru_mask;
4002 };
4003
4004 static const struct numa_stat stats[] = {
4005 { "total", LRU_ALL },
4006 { "file", LRU_ALL_FILE },
4007 { "anon", LRU_ALL_ANON },
4008 { "unevictable", BIT(LRU_UNEVICTABLE) },
4009 };
4010 const struct numa_stat *stat;
Ying Han406eb0c2011-05-26 16:25:37 -07004011 int nid;
Chris Downaa9694b2019-03-05 15:45:52 -08004012 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
Ying Han406eb0c2011-05-26 16:25:37 -07004013
Greg Thelen25485de2013-11-12 15:07:40 -08004014 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
Shakeel Buttdd8657b2020-06-03 15:56:24 -07004015 seq_printf(m, "%s=%lu", stat->name,
4016 mem_cgroup_nr_lru_pages(memcg, stat->lru_mask,
4017 false));
4018 for_each_node_state(nid, N_MEMORY)
4019 seq_printf(m, " N%d=%lu", nid,
4020 mem_cgroup_node_nr_lru_pages(memcg, nid,
4021 stat->lru_mask, false));
Greg Thelen25485de2013-11-12 15:07:40 -08004022 seq_putc(m, '\n');
Ying Han406eb0c2011-05-26 16:25:37 -07004023 }
Ying Han406eb0c2011-05-26 16:25:37 -07004024
Ying Han071aee12013-11-12 15:07:41 -08004025 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
Ying Han406eb0c2011-05-26 16:25:37 -07004026
Shakeel Buttdd8657b2020-06-03 15:56:24 -07004027 seq_printf(m, "hierarchical_%s=%lu", stat->name,
4028 mem_cgroup_nr_lru_pages(memcg, stat->lru_mask,
4029 true));
4030 for_each_node_state(nid, N_MEMORY)
4031 seq_printf(m, " N%d=%lu", nid,
4032 mem_cgroup_node_nr_lru_pages(memcg, nid,
4033 stat->lru_mask, true));
Ying Han071aee12013-11-12 15:07:41 -08004034 seq_putc(m, '\n');
Ying Han406eb0c2011-05-26 16:25:37 -07004035 }
Ying Han406eb0c2011-05-26 16:25:37 -07004036
Ying Han406eb0c2011-05-26 16:25:37 -07004037 return 0;
4038}
4039#endif /* CONFIG_NUMA */
4040
Johannes Weinerc8713d02019-07-11 20:55:59 -07004041static const unsigned int memcg1_stats[] = {
Johannes Weiner0d1c2072020-06-03 16:01:54 -07004042 NR_FILE_PAGES,
Johannes Weinerbe5d0a72020-06-03 16:01:57 -07004043 NR_ANON_MAPPED,
Johannes Weiner468c3982020-06-03 16:02:01 -07004044#ifdef CONFIG_TRANSPARENT_HUGEPAGE
4045 NR_ANON_THPS,
4046#endif
Johannes Weinerc8713d02019-07-11 20:55:59 -07004047 NR_SHMEM,
4048 NR_FILE_MAPPED,
4049 NR_FILE_DIRTY,
4050 NR_WRITEBACK,
4051 MEMCG_SWAP,
4052};
4053
4054static const char *const memcg1_stat_names[] = {
4055 "cache",
4056 "rss",
Johannes Weiner468c3982020-06-03 16:02:01 -07004057#ifdef CONFIG_TRANSPARENT_HUGEPAGE
Johannes Weinerc8713d02019-07-11 20:55:59 -07004058 "rss_huge",
Johannes Weiner468c3982020-06-03 16:02:01 -07004059#endif
Johannes Weinerc8713d02019-07-11 20:55:59 -07004060 "shmem",
4061 "mapped_file",
4062 "dirty",
4063 "writeback",
4064 "swap",
4065};
4066
Johannes Weinerdf0e53d2017-05-03 14:55:10 -07004067/* Universal VM events cgroup1 shows, original sort order */
Greg Thelen8dd53fd2018-06-07 17:07:23 -07004068static const unsigned int memcg1_events[] = {
Johannes Weinerdf0e53d2017-05-03 14:55:10 -07004069 PGPGIN,
4070 PGPGOUT,
4071 PGFAULT,
4072 PGMAJFAULT,
4073};
4074
Tejun Heo2da8ca82013-12-05 12:28:04 -05004075static int memcg_stat_show(struct seq_file *m, void *v)
KAMEZAWA Hiroyukid2ceb9b2008-02-07 00:14:25 -08004076{
Chris Downaa9694b2019-03-05 15:45:52 -08004077 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
Johannes Weiner3e32cb22014-12-10 15:42:31 -08004078 unsigned long memory, memsw;
Johannes Weineraf7c4b02012-05-29 15:07:08 -07004079 struct mem_cgroup *mi;
4080 unsigned int i;
KAMEZAWA Hiroyukid2ceb9b2008-02-07 00:14:25 -08004081
Johannes Weiner71cd3112017-05-03 14:55:13 -07004082 BUILD_BUG_ON(ARRAY_SIZE(memcg1_stat_names) != ARRAY_SIZE(memcg1_stats));
Rickard Strandqvist70bc0682014-12-12 16:56:41 -08004083
Johannes Weiner71cd3112017-05-03 14:55:13 -07004084 for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
Johannes Weiner468c3982020-06-03 16:02:01 -07004085 unsigned long nr;
4086
Johannes Weiner71cd3112017-05-03 14:55:13 -07004087 if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account())
Daisuke Nishimura1dd3a272009-09-23 15:56:43 -07004088 continue;
Johannes Weiner468c3982020-06-03 16:02:01 -07004089 nr = memcg_page_state_local(memcg, memcg1_stats[i]);
4090#ifdef CONFIG_TRANSPARENT_HUGEPAGE
4091 if (memcg1_stats[i] == NR_ANON_THPS)
4092 nr *= HPAGE_PMD_NR;
4093#endif
4094 seq_printf(m, "%s %lu\n", memcg1_stat_names[i], nr * PAGE_SIZE);
Daisuke Nishimura1dd3a272009-09-23 15:56:43 -07004095 }
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -08004096
Johannes Weinerdf0e53d2017-05-03 14:55:10 -07004097 for (i = 0; i < ARRAY_SIZE(memcg1_events); i++)
Konstantin Khlebnikovebc5d83d2019-12-04 16:49:53 -08004098 seq_printf(m, "%s %lu\n", vm_event_name(memcg1_events[i]),
Johannes Weiner205b20c2019-05-14 15:47:06 -07004099 memcg_events_local(memcg, memcg1_events[i]));
Johannes Weineraf7c4b02012-05-29 15:07:08 -07004100
4101 for (i = 0; i < NR_LRU_LISTS; i++)
Konstantin Khlebnikovebc5d83d2019-12-04 16:49:53 -08004102 seq_printf(m, "%s %lu\n", lru_list_name(i),
Johannes Weiner205b20c2019-05-14 15:47:06 -07004103 memcg_page_state_local(memcg, NR_LRU_BASE + i) *
Johannes Weiner21d89d12019-05-13 17:18:08 -07004104 PAGE_SIZE);
Johannes Weineraf7c4b02012-05-29 15:07:08 -07004105
KAMEZAWA Hiroyuki14067bb2009-04-02 16:57:35 -07004106 /* Hierarchical information */
Johannes Weiner3e32cb22014-12-10 15:42:31 -08004107 memory = memsw = PAGE_COUNTER_MAX;
4108 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) {
Chris Down15b42562020-04-01 21:07:20 -07004109 memory = min(memory, READ_ONCE(mi->memory.max));
4110 memsw = min(memsw, READ_ONCE(mi->memsw.max));
KAMEZAWA Hiroyukifee7b542009-01-07 18:08:26 -08004111 }
Johannes Weiner3e32cb22014-12-10 15:42:31 -08004112 seq_printf(m, "hierarchical_memory_limit %llu\n",
4113 (u64)memory * PAGE_SIZE);
Johannes Weiner7941d212016-01-14 15:21:23 -08004114 if (do_memsw_account())
Johannes Weiner3e32cb22014-12-10 15:42:31 -08004115 seq_printf(m, "hierarchical_memsw_limit %llu\n",
4116 (u64)memsw * PAGE_SIZE);
KOSAKI Motohiro7f016ee2009-01-07 18:08:22 -08004117
Shakeel Butt8de7ecc62018-08-21 21:53:17 -07004118 for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
zhongjiang-ali7de2e9f2020-11-01 17:07:30 -08004119 unsigned long nr;
4120
Johannes Weiner71cd3112017-05-03 14:55:13 -07004121 if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account())
Daisuke Nishimura1dd3a272009-09-23 15:56:43 -07004122 continue;
zhongjiang-ali7de2e9f2020-11-01 17:07:30 -08004123 nr = memcg_page_state(memcg, memcg1_stats[i]);
4124#ifdef CONFIG_TRANSPARENT_HUGEPAGE
4125 if (memcg1_stats[i] == NR_ANON_THPS)
4126 nr *= HPAGE_PMD_NR;
4127#endif
Shakeel Butt8de7ecc62018-08-21 21:53:17 -07004128 seq_printf(m, "total_%s %llu\n", memcg1_stat_names[i],
zhongjiang-ali7de2e9f2020-11-01 17:07:30 -08004129 (u64)nr * PAGE_SIZE);
Johannes Weineraf7c4b02012-05-29 15:07:08 -07004130 }
4131
Shakeel Butt8de7ecc62018-08-21 21:53:17 -07004132 for (i = 0; i < ARRAY_SIZE(memcg1_events); i++)
Konstantin Khlebnikovebc5d83d2019-12-04 16:49:53 -08004133 seq_printf(m, "total_%s %llu\n",
4134 vm_event_name(memcg1_events[i]),
Yafang Shaodd923992019-07-11 20:52:11 -07004135 (u64)memcg_events(memcg, memcg1_events[i]));
Johannes Weineraf7c4b02012-05-29 15:07:08 -07004136
Shakeel Butt8de7ecc62018-08-21 21:53:17 -07004137 for (i = 0; i < NR_LRU_LISTS; i++)
Konstantin Khlebnikovebc5d83d2019-12-04 16:49:53 -08004138 seq_printf(m, "total_%s %llu\n", lru_list_name(i),
Johannes Weiner42a30032019-05-14 15:47:12 -07004139 (u64)memcg_page_state(memcg, NR_LRU_BASE + i) *
4140 PAGE_SIZE);
KAMEZAWA Hiroyuki14067bb2009-04-02 16:57:35 -07004141
KOSAKI Motohiro7f016ee2009-01-07 18:08:22 -08004142#ifdef CONFIG_DEBUG_VM
KOSAKI Motohiro7f016ee2009-01-07 18:08:22 -08004143 {
Mel Gormanef8f2322016-07-28 15:46:05 -07004144 pg_data_t *pgdat;
4145 struct mem_cgroup_per_node *mz;
Johannes Weiner1431d4d2020-06-03 16:02:53 -07004146 unsigned long anon_cost = 0;
4147 unsigned long file_cost = 0;
KOSAKI Motohiro7f016ee2009-01-07 18:08:22 -08004148
Mel Gormanef8f2322016-07-28 15:46:05 -07004149 for_each_online_pgdat(pgdat) {
4150 mz = mem_cgroup_nodeinfo(memcg, pgdat->node_id);
KOSAKI Motohiro7f016ee2009-01-07 18:08:22 -08004151
Johannes Weiner1431d4d2020-06-03 16:02:53 -07004152 anon_cost += mz->lruvec.anon_cost;
4153 file_cost += mz->lruvec.file_cost;
Mel Gormanef8f2322016-07-28 15:46:05 -07004154 }
Johannes Weiner1431d4d2020-06-03 16:02:53 -07004155 seq_printf(m, "anon_cost %lu\n", anon_cost);
4156 seq_printf(m, "file_cost %lu\n", file_cost);
KOSAKI Motohiro7f016ee2009-01-07 18:08:22 -08004157 }
4158#endif
4159
KAMEZAWA Hiroyukid2ceb9b2008-02-07 00:14:25 -08004160 return 0;
4161}
4162
Tejun Heo182446d2013-08-08 20:11:24 -04004163static u64 mem_cgroup_swappiness_read(struct cgroup_subsys_state *css,
4164 struct cftype *cft)
KOSAKI Motohiroa7885eb2009-01-07 18:08:24 -08004165{
Tejun Heo182446d2013-08-08 20:11:24 -04004166 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
KOSAKI Motohiroa7885eb2009-01-07 18:08:24 -08004167
KAMEZAWA Hiroyuki1f4c0252011-07-26 16:08:21 -07004168 return mem_cgroup_swappiness(memcg);
KOSAKI Motohiroa7885eb2009-01-07 18:08:24 -08004169}
4170
Tejun Heo182446d2013-08-08 20:11:24 -04004171static int mem_cgroup_swappiness_write(struct cgroup_subsys_state *css,
4172 struct cftype *cft, u64 val)
KOSAKI Motohiroa7885eb2009-01-07 18:08:24 -08004173{
Tejun Heo182446d2013-08-08 20:11:24 -04004174 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
Li Zefan068b38c2009-01-15 13:51:26 -08004175
Johannes Weiner3dae7fe2014-06-04 16:07:01 -07004176 if (val > 100)
KOSAKI Motohiroa7885eb2009-01-07 18:08:24 -08004177 return -EINVAL;
4178
Linus Torvalds14208b02014-06-09 15:03:33 -07004179 if (css->parent)
Johannes Weiner3dae7fe2014-06-04 16:07:01 -07004180 memcg->swappiness = val;
4181 else
4182 vm_swappiness = val;
Li Zefan068b38c2009-01-15 13:51:26 -08004183
KOSAKI Motohiroa7885eb2009-01-07 18:08:24 -08004184 return 0;
4185}
4186
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004187static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
4188{
4189 struct mem_cgroup_threshold_ary *t;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08004190 unsigned long usage;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004191 int i;
4192
4193 rcu_read_lock();
4194 if (!swap)
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07004195 t = rcu_dereference(memcg->thresholds.primary);
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004196 else
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07004197 t = rcu_dereference(memcg->memsw_thresholds.primary);
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004198
4199 if (!t)
4200 goto unlock;
4201
Johannes Weinerce00a962014-09-05 08:43:57 -04004202 usage = mem_cgroup_usage(memcg, swap);
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004203
4204 /*
Sha Zhengju748dad32012-05-29 15:06:57 -07004205 * current_threshold points to threshold just below or equal to usage.
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004206 * If it's not true, a threshold was crossed after last
4207 * call of __mem_cgroup_threshold().
4208 */
Phil Carmody5407a562010-05-26 14:42:42 -07004209 i = t->current_threshold;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004210
4211 /*
4212 * Iterate backward over array of thresholds starting from
4213 * current_threshold and check if a threshold is crossed.
4214 * If none of thresholds below usage is crossed, we read
4215 * only one element of the array here.
4216 */
4217 for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--)
4218 eventfd_signal(t->entries[i].eventfd, 1);
4219
4220 /* i = current_threshold + 1 */
4221 i++;
4222
4223 /*
4224 * Iterate forward over array of thresholds starting from
4225 * current_threshold+1 and check if a threshold is crossed.
4226 * If none of thresholds above usage is crossed, we read
4227 * only one element of the array here.
4228 */
4229 for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++)
4230 eventfd_signal(t->entries[i].eventfd, 1);
4231
4232 /* Update current_threshold */
Phil Carmody5407a562010-05-26 14:42:42 -07004233 t->current_threshold = i - 1;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004234unlock:
4235 rcu_read_unlock();
4236}
4237
4238static void mem_cgroup_threshold(struct mem_cgroup *memcg)
4239{
Kirill A. Shutemovad4ca5f2010-10-07 12:59:27 -07004240 while (memcg) {
4241 __mem_cgroup_threshold(memcg, false);
Johannes Weiner7941d212016-01-14 15:21:23 -08004242 if (do_memsw_account())
Kirill A. Shutemovad4ca5f2010-10-07 12:59:27 -07004243 __mem_cgroup_threshold(memcg, true);
4244
4245 memcg = parent_mem_cgroup(memcg);
4246 }
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004247}
4248
4249static int compare_thresholds(const void *a, const void *b)
4250{
4251 const struct mem_cgroup_threshold *_a = a;
4252 const struct mem_cgroup_threshold *_b = b;
4253
Greg Thelen2bff24a2013-09-11 14:23:08 -07004254 if (_a->threshold > _b->threshold)
4255 return 1;
4256
4257 if (_a->threshold < _b->threshold)
4258 return -1;
4259
4260 return 0;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004261}
4262
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07004263static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg)
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07004264{
4265 struct mem_cgroup_eventfd_list *ev;
4266
Michal Hocko2bcf2e92014-07-30 16:08:33 -07004267 spin_lock(&memcg_oom_lock);
4268
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07004269 list_for_each_entry(ev, &memcg->oom_notify, list)
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07004270 eventfd_signal(ev->eventfd, 1);
Michal Hocko2bcf2e92014-07-30 16:08:33 -07004271
4272 spin_unlock(&memcg_oom_lock);
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07004273 return 0;
4274}
4275
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07004276static void mem_cgroup_oom_notify(struct mem_cgroup *memcg)
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07004277{
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -07004278 struct mem_cgroup *iter;
4279
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07004280 for_each_mem_cgroup_tree(iter, memcg)
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -07004281 mem_cgroup_oom_notify_cb(iter);
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07004282}
4283
Tejun Heo59b6f872013-11-22 18:20:43 -05004284static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
Tejun Heo347c4a82013-11-22 18:20:43 -05004285 struct eventfd_ctx *eventfd, const char *args, enum res_type type)
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004286{
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07004287 struct mem_cgroup_thresholds *thresholds;
4288 struct mem_cgroup_threshold_ary *new;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08004289 unsigned long threshold;
4290 unsigned long usage;
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07004291 int i, size, ret;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004292
Johannes Weiner650c5e52015-02-11 15:26:03 -08004293 ret = page_counter_memparse(args, "-1", &threshold);
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004294 if (ret)
4295 return ret;
4296
4297 mutex_lock(&memcg->thresholds_lock);
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07004298
Johannes Weiner05b84302014-08-06 16:05:59 -07004299 if (type == _MEM) {
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07004300 thresholds = &memcg->thresholds;
Johannes Weinerce00a962014-09-05 08:43:57 -04004301 usage = mem_cgroup_usage(memcg, false);
Johannes Weiner05b84302014-08-06 16:05:59 -07004302 } else if (type == _MEMSWAP) {
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07004303 thresholds = &memcg->memsw_thresholds;
Johannes Weinerce00a962014-09-05 08:43:57 -04004304 usage = mem_cgroup_usage(memcg, true);
Johannes Weiner05b84302014-08-06 16:05:59 -07004305 } else
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004306 BUG();
4307
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004308 /* Check if a threshold crossed before adding a new one */
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07004309 if (thresholds->primary)
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004310 __mem_cgroup_threshold(memcg, type == _MEMSWAP);
4311
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07004312 size = thresholds->primary ? thresholds->primary->size + 1 : 1;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004313
4314 /* Allocate memory for new array of thresholds */
Gustavo A. R. Silva67b80462019-03-05 15:44:05 -08004315 new = kmalloc(struct_size(new, entries, size), GFP_KERNEL);
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07004316 if (!new) {
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004317 ret = -ENOMEM;
4318 goto unlock;
4319 }
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07004320 new->size = size;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004321
4322 /* Copy thresholds (if any) to new array */
Gustavo A. R. Silvae90342e2020-10-13 16:52:36 -07004323 if (thresholds->primary)
4324 memcpy(new->entries, thresholds->primary->entries,
4325 flex_array_size(new, entries, size - 1));
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07004326
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004327 /* Add new threshold */
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07004328 new->entries[size - 1].eventfd = eventfd;
4329 new->entries[size - 1].threshold = threshold;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004330
4331 /* Sort thresholds. Registering of new threshold isn't time-critical */
Gustavo A. R. Silva61e604e62020-10-13 16:52:39 -07004332 sort(new->entries, size, sizeof(*new->entries),
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004333 compare_thresholds, NULL);
4334
4335 /* Find current threshold */
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07004336 new->current_threshold = -1;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004337 for (i = 0; i < size; i++) {
Sha Zhengju748dad32012-05-29 15:06:57 -07004338 if (new->entries[i].threshold <= usage) {
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004339 /*
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07004340 * new->current_threshold will not be used until
4341 * rcu_assign_pointer(), so it's safe to increment
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004342 * it here.
4343 */
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07004344 ++new->current_threshold;
Sha Zhengju748dad32012-05-29 15:06:57 -07004345 } else
4346 break;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004347 }
4348
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07004349 /* Free old spare buffer and save old primary buffer as spare */
4350 kfree(thresholds->spare);
4351 thresholds->spare = thresholds->primary;
4352
4353 rcu_assign_pointer(thresholds->primary, new);
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004354
Kirill A. Shutemov907860e2010-05-26 14:42:46 -07004355 /* To be sure that nobody uses thresholds */
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004356 synchronize_rcu();
4357
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004358unlock:
4359 mutex_unlock(&memcg->thresholds_lock);
4360
4361 return ret;
4362}
4363
Tejun Heo59b6f872013-11-22 18:20:43 -05004364static int mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
Tejun Heo347c4a82013-11-22 18:20:43 -05004365 struct eventfd_ctx *eventfd, const char *args)
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004366{
Tejun Heo59b6f872013-11-22 18:20:43 -05004367 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEM);
Tejun Heo347c4a82013-11-22 18:20:43 -05004368}
4369
Tejun Heo59b6f872013-11-22 18:20:43 -05004370static int memsw_cgroup_usage_register_event(struct mem_cgroup *memcg,
Tejun Heo347c4a82013-11-22 18:20:43 -05004371 struct eventfd_ctx *eventfd, const char *args)
4372{
Tejun Heo59b6f872013-11-22 18:20:43 -05004373 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEMSWAP);
Tejun Heo347c4a82013-11-22 18:20:43 -05004374}
4375
Tejun Heo59b6f872013-11-22 18:20:43 -05004376static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
Tejun Heo347c4a82013-11-22 18:20:43 -05004377 struct eventfd_ctx *eventfd, enum res_type type)
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004378{
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07004379 struct mem_cgroup_thresholds *thresholds;
4380 struct mem_cgroup_threshold_ary *new;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08004381 unsigned long usage;
Chunguang Xu7d366652020-03-21 18:22:10 -07004382 int i, j, size, entries;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004383
4384 mutex_lock(&memcg->thresholds_lock);
Johannes Weiner05b84302014-08-06 16:05:59 -07004385
4386 if (type == _MEM) {
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07004387 thresholds = &memcg->thresholds;
Johannes Weinerce00a962014-09-05 08:43:57 -04004388 usage = mem_cgroup_usage(memcg, false);
Johannes Weiner05b84302014-08-06 16:05:59 -07004389 } else if (type == _MEMSWAP) {
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07004390 thresholds = &memcg->memsw_thresholds;
Johannes Weinerce00a962014-09-05 08:43:57 -04004391 usage = mem_cgroup_usage(memcg, true);
Johannes Weiner05b84302014-08-06 16:05:59 -07004392 } else
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004393 BUG();
4394
Anton Vorontsov371528c2012-02-24 05:14:46 +04004395 if (!thresholds->primary)
4396 goto unlock;
4397
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004398 /* Check if a threshold crossed before removing */
4399 __mem_cgroup_threshold(memcg, type == _MEMSWAP);
4400
4401 /* Calculate new number of threshold */
Chunguang Xu7d366652020-03-21 18:22:10 -07004402 size = entries = 0;
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07004403 for (i = 0; i < thresholds->primary->size; i++) {
4404 if (thresholds->primary->entries[i].eventfd != eventfd)
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004405 size++;
Chunguang Xu7d366652020-03-21 18:22:10 -07004406 else
4407 entries++;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004408 }
4409
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07004410 new = thresholds->spare;
Kirill A. Shutemov907860e2010-05-26 14:42:46 -07004411
Chunguang Xu7d366652020-03-21 18:22:10 -07004412 /* If no items related to eventfd have been cleared, nothing to do */
4413 if (!entries)
4414 goto unlock;
4415
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004416 /* Set thresholds array to NULL if we don't have thresholds */
4417 if (!size) {
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07004418 kfree(new);
4419 new = NULL;
Kirill A. Shutemov907860e2010-05-26 14:42:46 -07004420 goto swap_buffers;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004421 }
4422
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07004423 new->size = size;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004424
4425 /* Copy thresholds and find current threshold */
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07004426 new->current_threshold = -1;
4427 for (i = 0, j = 0; i < thresholds->primary->size; i++) {
4428 if (thresholds->primary->entries[i].eventfd == eventfd)
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004429 continue;
4430
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07004431 new->entries[j] = thresholds->primary->entries[i];
Sha Zhengju748dad32012-05-29 15:06:57 -07004432 if (new->entries[j].threshold <= usage) {
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004433 /*
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07004434 * new->current_threshold will not be used
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004435 * until rcu_assign_pointer(), so it's safe to increment
4436 * it here.
4437 */
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07004438 ++new->current_threshold;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004439 }
4440 j++;
4441 }
4442
Kirill A. Shutemov907860e2010-05-26 14:42:46 -07004443swap_buffers:
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07004444 /* Swap primary and spare array */
4445 thresholds->spare = thresholds->primary;
Sha Zhengju8c757762012-05-10 13:01:45 -07004446
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07004447 rcu_assign_pointer(thresholds->primary, new);
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004448
Kirill A. Shutemov907860e2010-05-26 14:42:46 -07004449 /* To be sure that nobody uses thresholds */
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004450 synchronize_rcu();
Martijn Coenen6611d8d2016-01-15 16:57:49 -08004451
4452 /* If all events are unregistered, free the spare array */
4453 if (!new) {
4454 kfree(thresholds->spare);
4455 thresholds->spare = NULL;
4456 }
Anton Vorontsov371528c2012-02-24 05:14:46 +04004457unlock:
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004458 mutex_unlock(&memcg->thresholds_lock);
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004459}
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08004460
Tejun Heo59b6f872013-11-22 18:20:43 -05004461static void mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
Tejun Heo347c4a82013-11-22 18:20:43 -05004462 struct eventfd_ctx *eventfd)
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07004463{
Tejun Heo59b6f872013-11-22 18:20:43 -05004464 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEM);
Tejun Heo347c4a82013-11-22 18:20:43 -05004465}
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07004466
Tejun Heo59b6f872013-11-22 18:20:43 -05004467static void memsw_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
Tejun Heo347c4a82013-11-22 18:20:43 -05004468 struct eventfd_ctx *eventfd)
4469{
Tejun Heo59b6f872013-11-22 18:20:43 -05004470 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEMSWAP);
Tejun Heo347c4a82013-11-22 18:20:43 -05004471}
4472
Tejun Heo59b6f872013-11-22 18:20:43 -05004473static int mem_cgroup_oom_register_event(struct mem_cgroup *memcg,
Tejun Heo347c4a82013-11-22 18:20:43 -05004474 struct eventfd_ctx *eventfd, const char *args)
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07004475{
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07004476 struct mem_cgroup_eventfd_list *event;
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07004477
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07004478 event = kmalloc(sizeof(*event), GFP_KERNEL);
4479 if (!event)
4480 return -ENOMEM;
4481
Michal Hocko1af8efe2011-07-26 16:08:24 -07004482 spin_lock(&memcg_oom_lock);
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07004483
4484 event->eventfd = eventfd;
4485 list_add(&event->list, &memcg->oom_notify);
4486
4487 /* already in OOM ? */
Tejun Heoc2b42d32015-06-24 16:58:23 -07004488 if (memcg->under_oom)
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07004489 eventfd_signal(eventfd, 1);
Michal Hocko1af8efe2011-07-26 16:08:24 -07004490 spin_unlock(&memcg_oom_lock);
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07004491
4492 return 0;
4493}
4494
Tejun Heo59b6f872013-11-22 18:20:43 -05004495static void mem_cgroup_oom_unregister_event(struct mem_cgroup *memcg,
Tejun Heo347c4a82013-11-22 18:20:43 -05004496 struct eventfd_ctx *eventfd)
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07004497{
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07004498 struct mem_cgroup_eventfd_list *ev, *tmp;
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07004499
Michal Hocko1af8efe2011-07-26 16:08:24 -07004500 spin_lock(&memcg_oom_lock);
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07004501
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07004502 list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) {
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07004503 if (ev->eventfd == eventfd) {
4504 list_del(&ev->list);
4505 kfree(ev);
4506 }
4507 }
4508
Michal Hocko1af8efe2011-07-26 16:08:24 -07004509 spin_unlock(&memcg_oom_lock);
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07004510}
4511
Tejun Heo2da8ca82013-12-05 12:28:04 -05004512static int mem_cgroup_oom_control_read(struct seq_file *sf, void *v)
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07004513{
Chris Downaa9694b2019-03-05 15:45:52 -08004514 struct mem_cgroup *memcg = mem_cgroup_from_seq(sf);
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07004515
Tejun Heo791badb2013-12-05 12:28:02 -05004516 seq_printf(sf, "oom_kill_disable %d\n", memcg->oom_kill_disable);
Tejun Heoc2b42d32015-06-24 16:58:23 -07004517 seq_printf(sf, "under_oom %d\n", (bool)memcg->under_oom);
Roman Gushchinfe6bdfc2018-06-14 15:28:05 -07004518 seq_printf(sf, "oom_kill %lu\n",
4519 atomic_long_read(&memcg->memory_events[MEMCG_OOM_KILL]));
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07004520 return 0;
4521}
4522
Tejun Heo182446d2013-08-08 20:11:24 -04004523static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css,
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07004524 struct cftype *cft, u64 val)
4525{
Tejun Heo182446d2013-08-08 20:11:24 -04004526 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07004527
4528 /* cannot set to root cgroup and only 0 and 1 are allowed */
Linus Torvalds14208b02014-06-09 15:03:33 -07004529 if (!css->parent || !((val == 0) || (val == 1)))
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07004530 return -EINVAL;
4531
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07004532 memcg->oom_kill_disable = val;
KAMEZAWA Hiroyuki4d845eb2010-06-29 15:05:18 -07004533 if (!val)
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07004534 memcg_oom_recover(memcg);
Johannes Weiner3dae7fe2014-06-04 16:07:01 -07004535
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07004536 return 0;
4537}
4538
Tejun Heo52ebea72015-05-22 17:13:37 -04004539#ifdef CONFIG_CGROUP_WRITEBACK
4540
Tejun Heo3a8e9ac2019-08-29 15:47:19 -07004541#include <trace/events/writeback.h>
4542
Tejun Heo841710a2015-05-22 18:23:33 -04004543static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
4544{
4545 return wb_domain_init(&memcg->cgwb_domain, gfp);
4546}
4547
4548static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
4549{
4550 wb_domain_exit(&memcg->cgwb_domain);
4551}
4552
Tejun Heo2529bb32015-05-22 18:23:34 -04004553static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
4554{
4555 wb_domain_size_changed(&memcg->cgwb_domain);
4556}
4557
Tejun Heo841710a2015-05-22 18:23:33 -04004558struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
4559{
4560 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
4561
4562 if (!memcg->css.parent)
4563 return NULL;
4564
4565 return &memcg->cgwb_domain;
4566}
4567
Greg Thelen0b3d6e62019-04-05 18:39:18 -07004568/*
4569 * idx can be of type enum memcg_stat_item or node_stat_item.
4570 * Keep in sync with memcg_exact_page().
4571 */
4572static unsigned long memcg_exact_page_state(struct mem_cgroup *memcg, int idx)
4573{
Chris Down871789d2019-05-14 15:46:57 -07004574 long x = atomic_long_read(&memcg->vmstats[idx]);
Greg Thelen0b3d6e62019-04-05 18:39:18 -07004575 int cpu;
4576
4577 for_each_online_cpu(cpu)
Chris Down871789d2019-05-14 15:46:57 -07004578 x += per_cpu_ptr(memcg->vmstats_percpu, cpu)->stat[idx];
Greg Thelen0b3d6e62019-04-05 18:39:18 -07004579 if (x < 0)
4580 x = 0;
4581 return x;
4582}
4583
Tejun Heoc2aa7232015-05-22 18:23:35 -04004584/**
4585 * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg
4586 * @wb: bdi_writeback in question
Tejun Heoc5edf9c2015-09-29 13:04:26 -04004587 * @pfilepages: out parameter for number of file pages
4588 * @pheadroom: out parameter for number of allocatable pages according to memcg
Tejun Heoc2aa7232015-05-22 18:23:35 -04004589 * @pdirty: out parameter for number of dirty pages
4590 * @pwriteback: out parameter for number of pages under writeback
4591 *
Tejun Heoc5edf9c2015-09-29 13:04:26 -04004592 * Determine the numbers of file, headroom, dirty, and writeback pages in
4593 * @wb's memcg. File, dirty and writeback are self-explanatory. Headroom
4594 * is a bit more involved.
Tejun Heoc2aa7232015-05-22 18:23:35 -04004595 *
Tejun Heoc5edf9c2015-09-29 13:04:26 -04004596 * A memcg's headroom is "min(max, high) - used". In the hierarchy, the
4597 * headroom is calculated as the lowest headroom of itself and the
4598 * ancestors. Note that this doesn't consider the actual amount of
4599 * available memory in the system. The caller should further cap
4600 * *@pheadroom accordingly.
Tejun Heoc2aa7232015-05-22 18:23:35 -04004601 */
Tejun Heoc5edf9c2015-09-29 13:04:26 -04004602void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
4603 unsigned long *pheadroom, unsigned long *pdirty,
4604 unsigned long *pwriteback)
Tejun Heoc2aa7232015-05-22 18:23:35 -04004605{
4606 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
4607 struct mem_cgroup *parent;
Tejun Heoc2aa7232015-05-22 18:23:35 -04004608
Greg Thelen0b3d6e62019-04-05 18:39:18 -07004609 *pdirty = memcg_exact_page_state(memcg, NR_FILE_DIRTY);
Tejun Heoc2aa7232015-05-22 18:23:35 -04004610
Greg Thelen0b3d6e62019-04-05 18:39:18 -07004611 *pwriteback = memcg_exact_page_state(memcg, NR_WRITEBACK);
Johannes Weiner21d89d12019-05-13 17:18:08 -07004612 *pfilepages = memcg_exact_page_state(memcg, NR_INACTIVE_FILE) +
4613 memcg_exact_page_state(memcg, NR_ACTIVE_FILE);
Tejun Heoc5edf9c2015-09-29 13:04:26 -04004614 *pheadroom = PAGE_COUNTER_MAX;
Tejun Heoc2aa7232015-05-22 18:23:35 -04004615
Tejun Heoc2aa7232015-05-22 18:23:35 -04004616 while ((parent = parent_mem_cgroup(memcg))) {
Chris Down15b42562020-04-01 21:07:20 -07004617 unsigned long ceiling = min(READ_ONCE(memcg->memory.max),
Jakub Kicinskid1663a92020-06-01 21:49:49 -07004618 READ_ONCE(memcg->memory.high));
Tejun Heoc2aa7232015-05-22 18:23:35 -04004619 unsigned long used = page_counter_read(&memcg->memory);
4620
Tejun Heoc5edf9c2015-09-29 13:04:26 -04004621 *pheadroom = min(*pheadroom, ceiling - min(ceiling, used));
Tejun Heoc2aa7232015-05-22 18:23:35 -04004622 memcg = parent;
4623 }
Tejun Heoc2aa7232015-05-22 18:23:35 -04004624}
4625
Tejun Heo97b27822019-08-26 09:06:56 -07004626/*
4627 * Foreign dirty flushing
4628 *
4629 * There's an inherent mismatch between memcg and writeback. The former
4630 * trackes ownership per-page while the latter per-inode. This was a
4631 * deliberate design decision because honoring per-page ownership in the
4632 * writeback path is complicated, may lead to higher CPU and IO overheads
4633 * and deemed unnecessary given that write-sharing an inode across
4634 * different cgroups isn't a common use-case.
4635 *
4636 * Combined with inode majority-writer ownership switching, this works well
4637 * enough in most cases but there are some pathological cases. For
4638 * example, let's say there are two cgroups A and B which keep writing to
4639 * different but confined parts of the same inode. B owns the inode and
4640 * A's memory is limited far below B's. A's dirty ratio can rise enough to
4641 * trigger balance_dirty_pages() sleeps but B's can be low enough to avoid
4642 * triggering background writeback. A will be slowed down without a way to
4643 * make writeback of the dirty pages happen.
4644 *
4645 * Conditions like the above can lead to a cgroup getting repatedly and
4646 * severely throttled after making some progress after each
4647 * dirty_expire_interval while the underyling IO device is almost
4648 * completely idle.
4649 *
4650 * Solving this problem completely requires matching the ownership tracking
4651 * granularities between memcg and writeback in either direction. However,
4652 * the more egregious behaviors can be avoided by simply remembering the
4653 * most recent foreign dirtying events and initiating remote flushes on
4654 * them when local writeback isn't enough to keep the memory clean enough.
4655 *
4656 * The following two functions implement such mechanism. When a foreign
4657 * page - a page whose memcg and writeback ownerships don't match - is
4658 * dirtied, mem_cgroup_track_foreign_dirty() records the inode owning
4659 * bdi_writeback on the page owning memcg. When balance_dirty_pages()
4660 * decides that the memcg needs to sleep due to high dirty ratio, it calls
4661 * mem_cgroup_flush_foreign() which queues writeback on the recorded
4662 * foreign bdi_writebacks which haven't expired. Both the numbers of
4663 * recorded bdi_writebacks and concurrent in-flight foreign writebacks are
4664 * limited to MEMCG_CGWB_FRN_CNT.
4665 *
4666 * The mechanism only remembers IDs and doesn't hold any object references.
4667 * As being wrong occasionally doesn't matter, updates and accesses to the
4668 * records are lockless and racy.
4669 */
4670void mem_cgroup_track_foreign_dirty_slowpath(struct page *page,
4671 struct bdi_writeback *wb)
4672{
Roman Gushchinbcfe06b2020-12-01 13:58:27 -08004673 struct mem_cgroup *memcg = page_memcg(page);
Tejun Heo97b27822019-08-26 09:06:56 -07004674 struct memcg_cgwb_frn *frn;
4675 u64 now = get_jiffies_64();
4676 u64 oldest_at = now;
4677 int oldest = -1;
4678 int i;
4679
Tejun Heo3a8e9ac2019-08-29 15:47:19 -07004680 trace_track_foreign_dirty(page, wb);
4681
Tejun Heo97b27822019-08-26 09:06:56 -07004682 /*
4683 * Pick the slot to use. If there is already a slot for @wb, keep
4684 * using it. If not replace the oldest one which isn't being
4685 * written out.
4686 */
4687 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) {
4688 frn = &memcg->cgwb_frn[i];
4689 if (frn->bdi_id == wb->bdi->id &&
4690 frn->memcg_id == wb->memcg_css->id)
4691 break;
4692 if (time_before64(frn->at, oldest_at) &&
4693 atomic_read(&frn->done.cnt) == 1) {
4694 oldest = i;
4695 oldest_at = frn->at;
4696 }
4697 }
4698
4699 if (i < MEMCG_CGWB_FRN_CNT) {
4700 /*
4701 * Re-using an existing one. Update timestamp lazily to
4702 * avoid making the cacheline hot. We want them to be
4703 * reasonably up-to-date and significantly shorter than
4704 * dirty_expire_interval as that's what expires the record.
4705 * Use the shorter of 1s and dirty_expire_interval / 8.
4706 */
4707 unsigned long update_intv =
4708 min_t(unsigned long, HZ,
4709 msecs_to_jiffies(dirty_expire_interval * 10) / 8);
4710
4711 if (time_before64(frn->at, now - update_intv))
4712 frn->at = now;
4713 } else if (oldest >= 0) {
4714 /* replace the oldest free one */
4715 frn = &memcg->cgwb_frn[oldest];
4716 frn->bdi_id = wb->bdi->id;
4717 frn->memcg_id = wb->memcg_css->id;
4718 frn->at = now;
4719 }
4720}
4721
4722/* issue foreign writeback flushes for recorded foreign dirtying events */
4723void mem_cgroup_flush_foreign(struct bdi_writeback *wb)
4724{
4725 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
4726 unsigned long intv = msecs_to_jiffies(dirty_expire_interval * 10);
4727 u64 now = jiffies_64;
4728 int i;
4729
4730 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) {
4731 struct memcg_cgwb_frn *frn = &memcg->cgwb_frn[i];
4732
4733 /*
4734 * If the record is older than dirty_expire_interval,
4735 * writeback on it has already started. No need to kick it
4736 * off again. Also, don't start a new one if there's
4737 * already one in flight.
4738 */
4739 if (time_after64(frn->at, now - intv) &&
4740 atomic_read(&frn->done.cnt) == 1) {
4741 frn->at = 0;
Tejun Heo3a8e9ac2019-08-29 15:47:19 -07004742 trace_flush_foreign(wb, frn->bdi_id, frn->memcg_id);
Tejun Heo97b27822019-08-26 09:06:56 -07004743 cgroup_writeback_by_id(frn->bdi_id, frn->memcg_id, 0,
4744 WB_REASON_FOREIGN_FLUSH,
4745 &frn->done);
4746 }
4747 }
4748}
4749
Tejun Heo841710a2015-05-22 18:23:33 -04004750#else /* CONFIG_CGROUP_WRITEBACK */
4751
4752static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
4753{
4754 return 0;
4755}
4756
4757static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
4758{
4759}
4760
Tejun Heo2529bb32015-05-22 18:23:34 -04004761static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
4762{
4763}
4764
Tejun Heo52ebea72015-05-22 17:13:37 -04004765#endif /* CONFIG_CGROUP_WRITEBACK */
4766
Tejun Heo79bd9812013-11-22 18:20:42 -05004767/*
Tejun Heo3bc942f2013-11-22 18:20:44 -05004768 * DO NOT USE IN NEW FILES.
4769 *
4770 * "cgroup.event_control" implementation.
4771 *
4772 * This is way over-engineered. It tries to support fully configurable
4773 * events for each user. Such level of flexibility is completely
4774 * unnecessary especially in the light of the planned unified hierarchy.
4775 *
4776 * Please deprecate this and replace with something simpler if at all
4777 * possible.
4778 */
4779
4780/*
Tejun Heo79bd9812013-11-22 18:20:42 -05004781 * Unregister event and free resources.
4782 *
4783 * Gets called from workqueue.
4784 */
Tejun Heo3bc942f2013-11-22 18:20:44 -05004785static void memcg_event_remove(struct work_struct *work)
Tejun Heo79bd9812013-11-22 18:20:42 -05004786{
Tejun Heo3bc942f2013-11-22 18:20:44 -05004787 struct mem_cgroup_event *event =
4788 container_of(work, struct mem_cgroup_event, remove);
Tejun Heo59b6f872013-11-22 18:20:43 -05004789 struct mem_cgroup *memcg = event->memcg;
Tejun Heo79bd9812013-11-22 18:20:42 -05004790
4791 remove_wait_queue(event->wqh, &event->wait);
4792
Tejun Heo59b6f872013-11-22 18:20:43 -05004793 event->unregister_event(memcg, event->eventfd);
Tejun Heo79bd9812013-11-22 18:20:42 -05004794
4795 /* Notify userspace the event is going away. */
4796 eventfd_signal(event->eventfd, 1);
4797
4798 eventfd_ctx_put(event->eventfd);
4799 kfree(event);
Tejun Heo59b6f872013-11-22 18:20:43 -05004800 css_put(&memcg->css);
Tejun Heo79bd9812013-11-22 18:20:42 -05004801}
4802
4803/*
Linus Torvaldsa9a08842018-02-11 14:34:03 -08004804 * Gets called on EPOLLHUP on eventfd when user closes it.
Tejun Heo79bd9812013-11-22 18:20:42 -05004805 *
4806 * Called with wqh->lock held and interrupts disabled.
4807 */
Ingo Molnarac6424b2017-06-20 12:06:13 +02004808static int memcg_event_wake(wait_queue_entry_t *wait, unsigned mode,
Tejun Heo3bc942f2013-11-22 18:20:44 -05004809 int sync, void *key)
Tejun Heo79bd9812013-11-22 18:20:42 -05004810{
Tejun Heo3bc942f2013-11-22 18:20:44 -05004811 struct mem_cgroup_event *event =
4812 container_of(wait, struct mem_cgroup_event, wait);
Tejun Heo59b6f872013-11-22 18:20:43 -05004813 struct mem_cgroup *memcg = event->memcg;
Al Viro3ad6f932017-07-03 20:14:56 -04004814 __poll_t flags = key_to_poll(key);
Tejun Heo79bd9812013-11-22 18:20:42 -05004815
Linus Torvaldsa9a08842018-02-11 14:34:03 -08004816 if (flags & EPOLLHUP) {
Tejun Heo79bd9812013-11-22 18:20:42 -05004817 /*
4818 * If the event has been detached at cgroup removal, we
4819 * can simply return knowing the other side will cleanup
4820 * for us.
4821 *
4822 * We can't race against event freeing since the other
4823 * side will require wqh->lock via remove_wait_queue(),
4824 * which we hold.
4825 */
Tejun Heofba94802013-11-22 18:20:43 -05004826 spin_lock(&memcg->event_list_lock);
Tejun Heo79bd9812013-11-22 18:20:42 -05004827 if (!list_empty(&event->list)) {
4828 list_del_init(&event->list);
4829 /*
4830 * We are in atomic context, but cgroup_event_remove()
4831 * may sleep, so we have to call it in workqueue.
4832 */
4833 schedule_work(&event->remove);
4834 }
Tejun Heofba94802013-11-22 18:20:43 -05004835 spin_unlock(&memcg->event_list_lock);
Tejun Heo79bd9812013-11-22 18:20:42 -05004836 }
4837
4838 return 0;
4839}
4840
Tejun Heo3bc942f2013-11-22 18:20:44 -05004841static void memcg_event_ptable_queue_proc(struct file *file,
Tejun Heo79bd9812013-11-22 18:20:42 -05004842 wait_queue_head_t *wqh, poll_table *pt)
4843{
Tejun Heo3bc942f2013-11-22 18:20:44 -05004844 struct mem_cgroup_event *event =
4845 container_of(pt, struct mem_cgroup_event, pt);
Tejun Heo79bd9812013-11-22 18:20:42 -05004846
4847 event->wqh = wqh;
4848 add_wait_queue(wqh, &event->wait);
4849}
4850
4851/*
Tejun Heo3bc942f2013-11-22 18:20:44 -05004852 * DO NOT USE IN NEW FILES.
4853 *
Tejun Heo79bd9812013-11-22 18:20:42 -05004854 * Parse input and register new cgroup event handler.
4855 *
4856 * Input must be in format '<event_fd> <control_fd> <args>'.
4857 * Interpretation of args is defined by control file implementation.
4858 */
Tejun Heo451af502014-05-13 12:16:21 -04004859static ssize_t memcg_write_event_control(struct kernfs_open_file *of,
4860 char *buf, size_t nbytes, loff_t off)
Tejun Heo79bd9812013-11-22 18:20:42 -05004861{
Tejun Heo451af502014-05-13 12:16:21 -04004862 struct cgroup_subsys_state *css = of_css(of);
Tejun Heofba94802013-11-22 18:20:43 -05004863 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
Tejun Heo3bc942f2013-11-22 18:20:44 -05004864 struct mem_cgroup_event *event;
Tejun Heo79bd9812013-11-22 18:20:42 -05004865 struct cgroup_subsys_state *cfile_css;
4866 unsigned int efd, cfd;
4867 struct fd efile;
4868 struct fd cfile;
Tejun Heofba94802013-11-22 18:20:43 -05004869 const char *name;
Tejun Heo79bd9812013-11-22 18:20:42 -05004870 char *endp;
4871 int ret;
4872
Tejun Heo451af502014-05-13 12:16:21 -04004873 buf = strstrip(buf);
4874
4875 efd = simple_strtoul(buf, &endp, 10);
Tejun Heo79bd9812013-11-22 18:20:42 -05004876 if (*endp != ' ')
4877 return -EINVAL;
Tejun Heo451af502014-05-13 12:16:21 -04004878 buf = endp + 1;
Tejun Heo79bd9812013-11-22 18:20:42 -05004879
Tejun Heo451af502014-05-13 12:16:21 -04004880 cfd = simple_strtoul(buf, &endp, 10);
Tejun Heo79bd9812013-11-22 18:20:42 -05004881 if ((*endp != ' ') && (*endp != '\0'))
4882 return -EINVAL;
Tejun Heo451af502014-05-13 12:16:21 -04004883 buf = endp + 1;
Tejun Heo79bd9812013-11-22 18:20:42 -05004884
4885 event = kzalloc(sizeof(*event), GFP_KERNEL);
4886 if (!event)
4887 return -ENOMEM;
4888
Tejun Heo59b6f872013-11-22 18:20:43 -05004889 event->memcg = memcg;
Tejun Heo79bd9812013-11-22 18:20:42 -05004890 INIT_LIST_HEAD(&event->list);
Tejun Heo3bc942f2013-11-22 18:20:44 -05004891 init_poll_funcptr(&event->pt, memcg_event_ptable_queue_proc);
4892 init_waitqueue_func_entry(&event->wait, memcg_event_wake);
4893 INIT_WORK(&event->remove, memcg_event_remove);
Tejun Heo79bd9812013-11-22 18:20:42 -05004894
4895 efile = fdget(efd);
4896 if (!efile.file) {
4897 ret = -EBADF;
4898 goto out_kfree;
4899 }
4900
4901 event->eventfd = eventfd_ctx_fileget(efile.file);
4902 if (IS_ERR(event->eventfd)) {
4903 ret = PTR_ERR(event->eventfd);
4904 goto out_put_efile;
4905 }
4906
4907 cfile = fdget(cfd);
4908 if (!cfile.file) {
4909 ret = -EBADF;
4910 goto out_put_eventfd;
4911 }
4912
4913 /* the process need read permission on control file */
4914 /* AV: shouldn't we check that it's been opened for read instead? */
Christian Brauner02f92b32021-01-21 14:19:22 +01004915 ret = file_permission(cfile.file, MAY_READ);
Tejun Heo79bd9812013-11-22 18:20:42 -05004916 if (ret < 0)
4917 goto out_put_cfile;
4918
Tejun Heo79bd9812013-11-22 18:20:42 -05004919 /*
Tejun Heofba94802013-11-22 18:20:43 -05004920 * Determine the event callbacks and set them in @event. This used
4921 * to be done via struct cftype but cgroup core no longer knows
4922 * about these events. The following is crude but the whole thing
4923 * is for compatibility anyway.
Tejun Heo3bc942f2013-11-22 18:20:44 -05004924 *
4925 * DO NOT ADD NEW FILES.
Tejun Heofba94802013-11-22 18:20:43 -05004926 */
Al Virob5830432014-10-31 01:22:04 -04004927 name = cfile.file->f_path.dentry->d_name.name;
Tejun Heofba94802013-11-22 18:20:43 -05004928
4929 if (!strcmp(name, "memory.usage_in_bytes")) {
4930 event->register_event = mem_cgroup_usage_register_event;
4931 event->unregister_event = mem_cgroup_usage_unregister_event;
4932 } else if (!strcmp(name, "memory.oom_control")) {
4933 event->register_event = mem_cgroup_oom_register_event;
4934 event->unregister_event = mem_cgroup_oom_unregister_event;
4935 } else if (!strcmp(name, "memory.pressure_level")) {
4936 event->register_event = vmpressure_register_event;
4937 event->unregister_event = vmpressure_unregister_event;
4938 } else if (!strcmp(name, "memory.memsw.usage_in_bytes")) {
Tejun Heo347c4a82013-11-22 18:20:43 -05004939 event->register_event = memsw_cgroup_usage_register_event;
4940 event->unregister_event = memsw_cgroup_usage_unregister_event;
Tejun Heofba94802013-11-22 18:20:43 -05004941 } else {
4942 ret = -EINVAL;
4943 goto out_put_cfile;
4944 }
4945
4946 /*
Tejun Heob5557c42013-11-22 18:20:42 -05004947 * Verify @cfile should belong to @css. Also, remaining events are
4948 * automatically removed on cgroup destruction but the removal is
4949 * asynchronous, so take an extra ref on @css.
Tejun Heo79bd9812013-11-22 18:20:42 -05004950 */
Al Virob5830432014-10-31 01:22:04 -04004951 cfile_css = css_tryget_online_from_dir(cfile.file->f_path.dentry->d_parent,
Tejun Heoec903c02014-05-13 12:11:01 -04004952 &memory_cgrp_subsys);
Tejun Heo79bd9812013-11-22 18:20:42 -05004953 ret = -EINVAL;
Tejun Heo5a17f542014-02-11 11:52:47 -05004954 if (IS_ERR(cfile_css))
Tejun Heo79bd9812013-11-22 18:20:42 -05004955 goto out_put_cfile;
Tejun Heo5a17f542014-02-11 11:52:47 -05004956 if (cfile_css != css) {
4957 css_put(cfile_css);
4958 goto out_put_cfile;
4959 }
Tejun Heo79bd9812013-11-22 18:20:42 -05004960
Tejun Heo451af502014-05-13 12:16:21 -04004961 ret = event->register_event(memcg, event->eventfd, buf);
Tejun Heo79bd9812013-11-22 18:20:42 -05004962 if (ret)
4963 goto out_put_css;
4964
Christoph Hellwig9965ed172018-03-05 07:26:05 -08004965 vfs_poll(efile.file, &event->pt);
Tejun Heo79bd9812013-11-22 18:20:42 -05004966
Tejun Heofba94802013-11-22 18:20:43 -05004967 spin_lock(&memcg->event_list_lock);
4968 list_add(&event->list, &memcg->event_list);
4969 spin_unlock(&memcg->event_list_lock);
Tejun Heo79bd9812013-11-22 18:20:42 -05004970
4971 fdput(cfile);
4972 fdput(efile);
4973
Tejun Heo451af502014-05-13 12:16:21 -04004974 return nbytes;
Tejun Heo79bd9812013-11-22 18:20:42 -05004975
4976out_put_css:
Tejun Heob5557c42013-11-22 18:20:42 -05004977 css_put(css);
Tejun Heo79bd9812013-11-22 18:20:42 -05004978out_put_cfile:
4979 fdput(cfile);
4980out_put_eventfd:
4981 eventfd_ctx_put(event->eventfd);
4982out_put_efile:
4983 fdput(efile);
4984out_kfree:
4985 kfree(event);
4986
4987 return ret;
4988}
4989
Johannes Weiner241994ed2015-02-11 15:26:06 -08004990static struct cftype mem_cgroup_legacy_files[] = {
Balbir Singh8cdea7c2008-02-07 00:13:50 -08004991 {
Balbir Singh0eea1032008-02-07 00:13:57 -08004992 .name = "usage_in_bytes",
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08004993 .private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
Tejun Heo791badb2013-12-05 12:28:02 -05004994 .read_u64 = mem_cgroup_read_u64,
Balbir Singh8cdea7c2008-02-07 00:13:50 -08004995 },
4996 {
Pavel Emelyanovc84872e2008-04-29 01:00:17 -07004997 .name = "max_usage_in_bytes",
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08004998 .private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
Tejun Heo6770c642014-05-13 12:16:21 -04004999 .write = mem_cgroup_reset,
Tejun Heo791badb2013-12-05 12:28:02 -05005000 .read_u64 = mem_cgroup_read_u64,
Pavel Emelyanovc84872e2008-04-29 01:00:17 -07005001 },
5002 {
Balbir Singh0eea1032008-02-07 00:13:57 -08005003 .name = "limit_in_bytes",
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08005004 .private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
Tejun Heo451af502014-05-13 12:16:21 -04005005 .write = mem_cgroup_write,
Tejun Heo791badb2013-12-05 12:28:02 -05005006 .read_u64 = mem_cgroup_read_u64,
Balbir Singh8cdea7c2008-02-07 00:13:50 -08005007 },
5008 {
Balbir Singh296c81d2009-09-23 15:56:36 -07005009 .name = "soft_limit_in_bytes",
5010 .private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT),
Tejun Heo451af502014-05-13 12:16:21 -04005011 .write = mem_cgroup_write,
Tejun Heo791badb2013-12-05 12:28:02 -05005012 .read_u64 = mem_cgroup_read_u64,
Balbir Singh296c81d2009-09-23 15:56:36 -07005013 },
5014 {
Balbir Singh8cdea7c2008-02-07 00:13:50 -08005015 .name = "failcnt",
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08005016 .private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
Tejun Heo6770c642014-05-13 12:16:21 -04005017 .write = mem_cgroup_reset,
Tejun Heo791badb2013-12-05 12:28:02 -05005018 .read_u64 = mem_cgroup_read_u64,
Balbir Singh8cdea7c2008-02-07 00:13:50 -08005019 },
Balbir Singh8697d332008-02-07 00:13:59 -08005020 {
KAMEZAWA Hiroyukid2ceb9b2008-02-07 00:14:25 -08005021 .name = "stat",
Tejun Heo2da8ca82013-12-05 12:28:04 -05005022 .seq_show = memcg_stat_show,
KAMEZAWA Hiroyukid2ceb9b2008-02-07 00:14:25 -08005023 },
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08005024 {
5025 .name = "force_empty",
Tejun Heo6770c642014-05-13 12:16:21 -04005026 .write = mem_cgroup_force_empty_write,
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08005027 },
Balbir Singh18f59ea2009-01-07 18:08:07 -08005028 {
5029 .name = "use_hierarchy",
5030 .write_u64 = mem_cgroup_hierarchy_write,
5031 .read_u64 = mem_cgroup_hierarchy_read,
5032 },
KOSAKI Motohiroa7885eb2009-01-07 18:08:24 -08005033 {
Tejun Heo3bc942f2013-11-22 18:20:44 -05005034 .name = "cgroup.event_control", /* XXX: for compat */
Tejun Heo451af502014-05-13 12:16:21 -04005035 .write = memcg_write_event_control,
Tejun Heo7dbdb192015-09-18 17:54:23 -04005036 .flags = CFTYPE_NO_PREFIX | CFTYPE_WORLD_WRITABLE,
Tejun Heo79bd9812013-11-22 18:20:42 -05005037 },
5038 {
KOSAKI Motohiroa7885eb2009-01-07 18:08:24 -08005039 .name = "swappiness",
5040 .read_u64 = mem_cgroup_swappiness_read,
5041 .write_u64 = mem_cgroup_swappiness_write,
5042 },
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08005043 {
5044 .name = "move_charge_at_immigrate",
5045 .read_u64 = mem_cgroup_move_charge_read,
5046 .write_u64 = mem_cgroup_move_charge_write,
5047 },
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07005048 {
5049 .name = "oom_control",
Tejun Heo2da8ca82013-12-05 12:28:04 -05005050 .seq_show = mem_cgroup_oom_control_read,
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07005051 .write_u64 = mem_cgroup_oom_control_write,
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07005052 .private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL),
5053 },
Anton Vorontsov70ddf632013-04-29 15:08:31 -07005054 {
5055 .name = "pressure_level",
Anton Vorontsov70ddf632013-04-29 15:08:31 -07005056 },
Ying Han406eb0c2011-05-26 16:25:37 -07005057#ifdef CONFIG_NUMA
5058 {
5059 .name = "numa_stat",
Tejun Heo2da8ca82013-12-05 12:28:04 -05005060 .seq_show = memcg_numa_stat_show,
Ying Han406eb0c2011-05-26 16:25:37 -07005061 },
5062#endif
Glauber Costa510fc4e2012-12-18 14:21:47 -08005063 {
5064 .name = "kmem.limit_in_bytes",
5065 .private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT),
Tejun Heo451af502014-05-13 12:16:21 -04005066 .write = mem_cgroup_write,
Tejun Heo791badb2013-12-05 12:28:02 -05005067 .read_u64 = mem_cgroup_read_u64,
Glauber Costa510fc4e2012-12-18 14:21:47 -08005068 },
5069 {
5070 .name = "kmem.usage_in_bytes",
5071 .private = MEMFILE_PRIVATE(_KMEM, RES_USAGE),
Tejun Heo791badb2013-12-05 12:28:02 -05005072 .read_u64 = mem_cgroup_read_u64,
Glauber Costa510fc4e2012-12-18 14:21:47 -08005073 },
5074 {
5075 .name = "kmem.failcnt",
5076 .private = MEMFILE_PRIVATE(_KMEM, RES_FAILCNT),
Tejun Heo6770c642014-05-13 12:16:21 -04005077 .write = mem_cgroup_reset,
Tejun Heo791badb2013-12-05 12:28:02 -05005078 .read_u64 = mem_cgroup_read_u64,
Glauber Costa510fc4e2012-12-18 14:21:47 -08005079 },
5080 {
5081 .name = "kmem.max_usage_in_bytes",
5082 .private = MEMFILE_PRIVATE(_KMEM, RES_MAX_USAGE),
Tejun Heo6770c642014-05-13 12:16:21 -04005083 .write = mem_cgroup_reset,
Tejun Heo791badb2013-12-05 12:28:02 -05005084 .read_u64 = mem_cgroup_read_u64,
Glauber Costa510fc4e2012-12-18 14:21:47 -08005085 },
Yafang Shaoa87425a2020-04-01 21:06:30 -07005086#if defined(CONFIG_MEMCG_KMEM) && \
5087 (defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG))
Glauber Costa749c5412012-12-18 14:23:01 -08005088 {
5089 .name = "kmem.slabinfo",
Vladimir Davydovb0475012014-12-10 15:44:19 -08005090 .seq_show = memcg_slab_show,
Glauber Costa749c5412012-12-18 14:23:01 -08005091 },
5092#endif
Vladimir Davydovd55f90b2016-01-20 15:02:44 -08005093 {
5094 .name = "kmem.tcp.limit_in_bytes",
5095 .private = MEMFILE_PRIVATE(_TCP, RES_LIMIT),
5096 .write = mem_cgroup_write,
5097 .read_u64 = mem_cgroup_read_u64,
5098 },
5099 {
5100 .name = "kmem.tcp.usage_in_bytes",
5101 .private = MEMFILE_PRIVATE(_TCP, RES_USAGE),
5102 .read_u64 = mem_cgroup_read_u64,
5103 },
5104 {
5105 .name = "kmem.tcp.failcnt",
5106 .private = MEMFILE_PRIVATE(_TCP, RES_FAILCNT),
5107 .write = mem_cgroup_reset,
5108 .read_u64 = mem_cgroup_read_u64,
5109 },
5110 {
5111 .name = "kmem.tcp.max_usage_in_bytes",
5112 .private = MEMFILE_PRIVATE(_TCP, RES_MAX_USAGE),
5113 .write = mem_cgroup_reset,
5114 .read_u64 = mem_cgroup_read_u64,
5115 },
Tejun Heo6bc10342012-04-01 12:09:55 -07005116 { }, /* terminate */
Tejun Heoaf36f902012-04-01 12:09:55 -07005117};
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08005118
Johannes Weiner73f576c2016-07-20 15:44:57 -07005119/*
5120 * Private memory cgroup IDR
5121 *
5122 * Swap-out records and page cache shadow entries need to store memcg
5123 * references in constrained space, so we maintain an ID space that is
5124 * limited to 16 bit (MEM_CGROUP_ID_MAX), limiting the total number of
5125 * memory-controlled cgroups to 64k.
5126 *
Ethon Paulb8f29352020-06-04 16:49:28 -07005127 * However, there usually are many references to the offline CSS after
Johannes Weiner73f576c2016-07-20 15:44:57 -07005128 * the cgroup has been destroyed, such as page cache or reclaimable
5129 * slab objects, that don't need to hang on to the ID. We want to keep
5130 * those dead CSS from occupying IDs, or we might quickly exhaust the
5131 * relatively small ID space and prevent the creation of new cgroups
5132 * even when there are much fewer than 64k cgroups - possibly none.
5133 *
5134 * Maintain a private 16-bit ID space for memcg, and allow the ID to
5135 * be freed and recycled when it's no longer needed, which is usually
5136 * when the CSS is offlined.
5137 *
5138 * The only exception to that are records of swapped out tmpfs/shmem
5139 * pages that need to be attributed to live ancestors on swapin. But
5140 * those references are manageable from userspace.
5141 */
5142
5143static DEFINE_IDR(mem_cgroup_idr);
5144
Kirill Tkhai7e97de02018-08-02 15:36:01 -07005145static void mem_cgroup_id_remove(struct mem_cgroup *memcg)
5146{
5147 if (memcg->id.id > 0) {
5148 idr_remove(&mem_cgroup_idr, memcg->id.id);
5149 memcg->id.id = 0;
5150 }
5151}
5152
Vincenzo Frascinoc1514c02020-04-01 21:07:13 -07005153static void __maybe_unused mem_cgroup_id_get_many(struct mem_cgroup *memcg,
5154 unsigned int n)
Johannes Weiner73f576c2016-07-20 15:44:57 -07005155{
Kirill Tkhai1c2d4792018-10-26 15:09:28 -07005156 refcount_add(n, &memcg->id.ref);
Johannes Weiner73f576c2016-07-20 15:44:57 -07005157}
5158
Vladimir Davydov615d66c2016-08-11 15:33:03 -07005159static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n)
Johannes Weiner73f576c2016-07-20 15:44:57 -07005160{
Kirill Tkhai1c2d4792018-10-26 15:09:28 -07005161 if (refcount_sub_and_test(n, &memcg->id.ref)) {
Kirill Tkhai7e97de02018-08-02 15:36:01 -07005162 mem_cgroup_id_remove(memcg);
Johannes Weiner73f576c2016-07-20 15:44:57 -07005163
5164 /* Memcg ID pins CSS */
5165 css_put(&memcg->css);
5166 }
5167}
5168
Vladimir Davydov615d66c2016-08-11 15:33:03 -07005169static inline void mem_cgroup_id_put(struct mem_cgroup *memcg)
5170{
5171 mem_cgroup_id_put_many(memcg, 1);
5172}
5173
Johannes Weiner73f576c2016-07-20 15:44:57 -07005174/**
5175 * mem_cgroup_from_id - look up a memcg from a memcg id
5176 * @id: the memcg id to look up
5177 *
5178 * Caller must hold rcu_read_lock().
5179 */
5180struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
5181{
5182 WARN_ON_ONCE(!rcu_read_lock_held());
5183 return idr_find(&mem_cgroup_idr, id);
5184}
5185
Mel Gormanef8f2322016-07-28 15:46:05 -07005186static int alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -08005187{
5188 struct mem_cgroup_per_node *pn;
Mel Gormanef8f2322016-07-28 15:46:05 -07005189 int tmp = node;
KAMEZAWA Hiroyuki1ecaab22008-02-07 00:14:38 -08005190 /*
5191 * This routine is called against possible nodes.
5192 * But it's BUG to call kmalloc() against offline node.
5193 *
5194 * TODO: this routine can waste much memory for nodes which will
5195 * never be onlined. It's better to use memory hotplug callback
5196 * function.
5197 */
KAMEZAWA Hiroyuki41e33552008-04-08 17:41:54 -07005198 if (!node_state(node, N_NORMAL_MEMORY))
5199 tmp = -1;
Jesper Juhl17295c82011-01-13 15:47:42 -08005200 pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -08005201 if (!pn)
5202 return 1;
KAMEZAWA Hiroyuki1ecaab22008-02-07 00:14:38 -08005203
Roman Gushchin3e38e0a2020-08-11 18:30:25 -07005204 pn->lruvec_stat_local = alloc_percpu_gfp(struct lruvec_stat,
5205 GFP_KERNEL_ACCOUNT);
Johannes Weiner815744d2019-06-13 15:55:46 -07005206 if (!pn->lruvec_stat_local) {
5207 kfree(pn);
5208 return 1;
5209 }
5210
Roman Gushchin3e38e0a2020-08-11 18:30:25 -07005211 pn->lruvec_stat_cpu = alloc_percpu_gfp(struct lruvec_stat,
5212 GFP_KERNEL_ACCOUNT);
Johannes Weinera983b5e2018-01-31 16:16:45 -08005213 if (!pn->lruvec_stat_cpu) {
Johannes Weiner815744d2019-06-13 15:55:46 -07005214 free_percpu(pn->lruvec_stat_local);
Johannes Weiner00f3ca22017-07-06 15:40:52 -07005215 kfree(pn);
5216 return 1;
5217 }
5218
Mel Gormanef8f2322016-07-28 15:46:05 -07005219 lruvec_init(&pn->lruvec);
5220 pn->usage_in_excess = 0;
5221 pn->on_tree = false;
5222 pn->memcg = memcg;
5223
Johannes Weiner54f72fe2013-07-08 15:59:49 -07005224 memcg->nodeinfo[node] = pn;
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -08005225 return 0;
5226}
5227
Mel Gormanef8f2322016-07-28 15:46:05 -07005228static void free_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
KAMEZAWA Hiroyuki1ecaab22008-02-07 00:14:38 -08005229{
Johannes Weiner00f3ca22017-07-06 15:40:52 -07005230 struct mem_cgroup_per_node *pn = memcg->nodeinfo[node];
5231
Michal Hocko4eaf4312018-04-10 16:29:52 -07005232 if (!pn)
5233 return;
5234
Johannes Weinera983b5e2018-01-31 16:16:45 -08005235 free_percpu(pn->lruvec_stat_cpu);
Johannes Weiner815744d2019-06-13 15:55:46 -07005236 free_percpu(pn->lruvec_stat_local);
Johannes Weiner00f3ca22017-07-06 15:40:52 -07005237 kfree(pn);
KAMEZAWA Hiroyuki1ecaab22008-02-07 00:14:38 -08005238}
5239
Tahsin Erdogan40e952f2017-03-09 16:17:26 -08005240static void __mem_cgroup_free(struct mem_cgroup *memcg)
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08005241{
5242 int node;
5243
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08005244 for_each_node(node)
Mel Gormanef8f2322016-07-28 15:46:05 -07005245 free_mem_cgroup_per_node_info(memcg, node);
Chris Down871789d2019-05-14 15:46:57 -07005246 free_percpu(memcg->vmstats_percpu);
Johannes Weiner815744d2019-06-13 15:55:46 -07005247 free_percpu(memcg->vmstats_local);
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08005248 kfree(memcg);
5249}
5250
Tahsin Erdogan40e952f2017-03-09 16:17:26 -08005251static void mem_cgroup_free(struct mem_cgroup *memcg)
5252{
5253 memcg_wb_domain_exit(memcg);
Shakeel Butt7961eee2019-11-05 21:16:21 -08005254 /*
5255 * Flush percpu vmstats and vmevents to guarantee the value correctness
5256 * on parent's and all ancestor levels.
5257 */
Roman Gushchin4a87e2a2020-01-13 16:29:16 -08005258 memcg_flush_percpu_vmstats(memcg);
Shakeel Butt7961eee2019-11-05 21:16:21 -08005259 memcg_flush_percpu_vmevents(memcg);
Tahsin Erdogan40e952f2017-03-09 16:17:26 -08005260 __mem_cgroup_free(memcg);
5261}
5262
KAMEZAWA Hiroyuki33327942008-04-29 01:00:24 -07005263static struct mem_cgroup *mem_cgroup_alloc(void)
5264{
Hugh Dickinsd79154b2012-03-21 16:34:18 -07005265 struct mem_cgroup *memcg;
Alexey Dobriyanb9726c22019-03-05 15:48:26 -08005266 unsigned int size;
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08005267 int node;
Tejun Heo97b27822019-08-26 09:06:56 -07005268 int __maybe_unused i;
Yafang Shao11d67612020-05-07 18:35:43 -07005269 long error = -ENOMEM;
KAMEZAWA Hiroyuki33327942008-04-29 01:00:24 -07005270
Vladimir Davydov8ff69e22014-01-23 15:52:52 -08005271 size = sizeof(struct mem_cgroup);
5272 size += nr_node_ids * sizeof(struct mem_cgroup_per_node *);
KAMEZAWA Hiroyuki33327942008-04-29 01:00:24 -07005273
Vladimir Davydov8ff69e22014-01-23 15:52:52 -08005274 memcg = kzalloc(size, GFP_KERNEL);
Hugh Dickinsd79154b2012-03-21 16:34:18 -07005275 if (!memcg)
Yafang Shao11d67612020-05-07 18:35:43 -07005276 return ERR_PTR(error);
Dan Carpentere7bbcdf2010-03-23 13:35:12 -07005277
Johannes Weiner73f576c2016-07-20 15:44:57 -07005278 memcg->id.id = idr_alloc(&mem_cgroup_idr, NULL,
5279 1, MEM_CGROUP_ID_MAX,
5280 GFP_KERNEL);
Yafang Shao11d67612020-05-07 18:35:43 -07005281 if (memcg->id.id < 0) {
5282 error = memcg->id.id;
Johannes Weiner73f576c2016-07-20 15:44:57 -07005283 goto fail;
Yafang Shao11d67612020-05-07 18:35:43 -07005284 }
Johannes Weiner73f576c2016-07-20 15:44:57 -07005285
Roman Gushchin3e38e0a2020-08-11 18:30:25 -07005286 memcg->vmstats_local = alloc_percpu_gfp(struct memcg_vmstats_percpu,
5287 GFP_KERNEL_ACCOUNT);
Johannes Weiner815744d2019-06-13 15:55:46 -07005288 if (!memcg->vmstats_local)
5289 goto fail;
5290
Roman Gushchin3e38e0a2020-08-11 18:30:25 -07005291 memcg->vmstats_percpu = alloc_percpu_gfp(struct memcg_vmstats_percpu,
5292 GFP_KERNEL_ACCOUNT);
Chris Down871789d2019-05-14 15:46:57 -07005293 if (!memcg->vmstats_percpu)
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08005294 goto fail;
Pavel Emelianov78fb7462008-02-07 00:13:51 -08005295
Bob Liu3ed28fa2012-01-12 17:19:04 -08005296 for_each_node(node)
Mel Gormanef8f2322016-07-28 15:46:05 -07005297 if (alloc_mem_cgroup_per_node_info(memcg, node))
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08005298 goto fail;
Balbir Singhf64c3f52009-09-23 15:56:37 -07005299
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08005300 if (memcg_wb_domain_init(memcg, GFP_KERNEL))
5301 goto fail;
Balbir Singh28dbc4b2009-01-07 18:08:05 -08005302
Johannes Weinerf7e1cb62016-01-14 15:21:29 -08005303 INIT_WORK(&memcg->high_work, high_work_func);
Glauber Costad142e3e2013-02-22 16:34:52 -08005304 INIT_LIST_HEAD(&memcg->oom_notify);
Glauber Costad142e3e2013-02-22 16:34:52 -08005305 mutex_init(&memcg->thresholds_lock);
5306 spin_lock_init(&memcg->move_lock);
Anton Vorontsov70ddf632013-04-29 15:08:31 -07005307 vmpressure_init(&memcg->vmpressure);
Tejun Heofba94802013-11-22 18:20:43 -05005308 INIT_LIST_HEAD(&memcg->event_list);
5309 spin_lock_init(&memcg->event_list_lock);
Johannes Weinerd886f4e2016-01-20 15:02:47 -08005310 memcg->socket_pressure = jiffies;
Kirill Tkhai84c07d12018-08-17 15:47:25 -07005311#ifdef CONFIG_MEMCG_KMEM
Vladimir Davydov900a38f2014-12-12 16:55:10 -08005312 memcg->kmemcg_id = -1;
Roman Gushchinbf4f0592020-08-06 23:20:49 -07005313 INIT_LIST_HEAD(&memcg->objcg_list);
Vladimir Davydov900a38f2014-12-12 16:55:10 -08005314#endif
Tejun Heo52ebea72015-05-22 17:13:37 -04005315#ifdef CONFIG_CGROUP_WRITEBACK
5316 INIT_LIST_HEAD(&memcg->cgwb_list);
Tejun Heo97b27822019-08-26 09:06:56 -07005317 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++)
5318 memcg->cgwb_frn[i].done =
5319 __WB_COMPLETION_INIT(&memcg_cgwb_frn_waitq);
Tejun Heo52ebea72015-05-22 17:13:37 -04005320#endif
Yang Shi87eaceb2019-09-23 15:38:15 -07005321#ifdef CONFIG_TRANSPARENT_HUGEPAGE
5322 spin_lock_init(&memcg->deferred_split_queue.split_queue_lock);
5323 INIT_LIST_HEAD(&memcg->deferred_split_queue.split_queue);
5324 memcg->deferred_split_queue.split_queue_len = 0;
5325#endif
Johannes Weiner73f576c2016-07-20 15:44:57 -07005326 idr_replace(&mem_cgroup_idr, memcg, memcg->id.id);
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08005327 return memcg;
5328fail:
Kirill Tkhai7e97de02018-08-02 15:36:01 -07005329 mem_cgroup_id_remove(memcg);
Tahsin Erdogan40e952f2017-03-09 16:17:26 -08005330 __mem_cgroup_free(memcg);
Yafang Shao11d67612020-05-07 18:35:43 -07005331 return ERR_PTR(error);
Glauber Costad142e3e2013-02-22 16:34:52 -08005332}
5333
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08005334static struct cgroup_subsys_state * __ref
5335mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
Glauber Costad142e3e2013-02-22 16:34:52 -08005336{
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08005337 struct mem_cgroup *parent = mem_cgroup_from_css(parent_css);
Roman Gushchinb87d8ce2020-10-17 16:13:40 -07005338 struct mem_cgroup *memcg, *old_memcg;
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08005339 long error = -ENOMEM;
Glauber Costad142e3e2013-02-22 16:34:52 -08005340
Roman Gushchinb87d8ce2020-10-17 16:13:40 -07005341 old_memcg = set_active_memcg(parent);
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08005342 memcg = mem_cgroup_alloc();
Roman Gushchinb87d8ce2020-10-17 16:13:40 -07005343 set_active_memcg(old_memcg);
Yafang Shao11d67612020-05-07 18:35:43 -07005344 if (IS_ERR(memcg))
5345 return ERR_CAST(memcg);
Li Zefan4219b2d2013-09-23 16:56:29 +08005346
Jakub Kicinskid1663a92020-06-01 21:49:49 -07005347 page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX);
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08005348 memcg->soft_limit = PAGE_COUNTER_MAX;
Jakub Kicinski4b82ab42020-06-01 21:49:52 -07005349 page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX);
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08005350 if (parent) {
5351 memcg->swappiness = mem_cgroup_swappiness(parent);
5352 memcg->oom_kill_disable = parent->oom_kill_disable;
Roman Gushchinbef86202020-12-14 19:06:49 -08005353
Johannes Weiner3e32cb22014-12-10 15:42:31 -08005354 page_counter_init(&memcg->memory, &parent->memory);
Vladimir Davydov37e84352016-01-20 15:02:56 -08005355 page_counter_init(&memcg->swap, &parent->swap);
Johannes Weiner3e32cb22014-12-10 15:42:31 -08005356 page_counter_init(&memcg->kmem, &parent->kmem);
Johannes Weiner0db15292016-01-20 15:02:50 -08005357 page_counter_init(&memcg->tcpmem, &parent->tcpmem);
Balbir Singh18f59ea2009-01-07 18:08:07 -08005358 } else {
Roman Gushchinbef86202020-12-14 19:06:49 -08005359 page_counter_init(&memcg->memory, NULL);
5360 page_counter_init(&memcg->swap, NULL);
5361 page_counter_init(&memcg->kmem, NULL);
5362 page_counter_init(&memcg->tcpmem, NULL);
Vladimir Davydovd6441632014-01-23 15:53:09 -08005363
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08005364 root_mem_cgroup = memcg;
5365 return &memcg->css;
5366 }
5367
Roman Gushchinbef86202020-12-14 19:06:49 -08005368 /* The following stuff does not apply to the root */
Vladimir Davydovb313aee2016-03-17 14:18:27 -07005369 error = memcg_online_kmem(memcg);
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08005370 if (error)
5371 goto fail;
Johannes Weiner127424c2016-01-20 15:02:32 -08005372
Johannes Weinerf7e1cb62016-01-14 15:21:29 -08005373 if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
Johannes Weineref129472016-01-14 15:21:34 -08005374 static_branch_inc(&memcg_sockets_enabled_key);
Johannes Weinerf7e1cb62016-01-14 15:21:29 -08005375
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08005376 return &memcg->css;
5377fail:
Kirill Tkhai7e97de02018-08-02 15:36:01 -07005378 mem_cgroup_id_remove(memcg);
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08005379 mem_cgroup_free(memcg);
Yafang Shao11d67612020-05-07 18:35:43 -07005380 return ERR_PTR(error);
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08005381}
5382
Johannes Weiner73f576c2016-07-20 15:44:57 -07005383static int mem_cgroup_css_online(struct cgroup_subsys_state *css)
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08005384{
Vladimir Davydov58fa2a52016-10-07 16:57:29 -07005385 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5386
Kirill Tkhai0a4465d2018-08-17 15:47:37 -07005387 /*
5388 * A memcg must be visible for memcg_expand_shrinker_maps()
5389 * by the time the maps are allocated. So, we allocate maps
5390 * here, when for_each_mem_cgroup() can't skip it.
5391 */
5392 if (memcg_alloc_shrinker_maps(memcg)) {
5393 mem_cgroup_id_remove(memcg);
5394 return -ENOMEM;
5395 }
5396
Johannes Weiner73f576c2016-07-20 15:44:57 -07005397 /* Online state pins memcg ID, memcg ID pins CSS */
Kirill Tkhai1c2d4792018-10-26 15:09:28 -07005398 refcount_set(&memcg->id.ref, 1);
Johannes Weiner73f576c2016-07-20 15:44:57 -07005399 css_get(css);
Johannes Weiner2f7dd7a2014-10-02 16:16:57 -07005400 return 0;
Balbir Singh8cdea7c2008-02-07 00:13:50 -08005401}
5402
Tejun Heoeb954192013-08-08 20:11:23 -04005403static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
KAMEZAWA Hiroyukidf878fb2008-02-07 00:14:28 -08005404{
Tejun Heoeb954192013-08-08 20:11:23 -04005405 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
Tejun Heo3bc942f2013-11-22 18:20:44 -05005406 struct mem_cgroup_event *event, *tmp;
Tejun Heo79bd9812013-11-22 18:20:42 -05005407
5408 /*
5409 * Unregister events and notify userspace.
5410 * Notify userspace about cgroup removing only after rmdir of cgroup
5411 * directory to avoid race between userspace and kernelspace.
5412 */
Tejun Heofba94802013-11-22 18:20:43 -05005413 spin_lock(&memcg->event_list_lock);
5414 list_for_each_entry_safe(event, tmp, &memcg->event_list, list) {
Tejun Heo79bd9812013-11-22 18:20:42 -05005415 list_del_init(&event->list);
5416 schedule_work(&event->remove);
5417 }
Tejun Heofba94802013-11-22 18:20:43 -05005418 spin_unlock(&memcg->event_list_lock);
KAMEZAWA Hiroyukiec64f512009-04-02 16:57:26 -07005419
Roman Gushchinbf8d5d52018-06-07 17:07:46 -07005420 page_counter_set_min(&memcg->memory, 0);
Roman Gushchin23067152018-06-07 17:06:22 -07005421 page_counter_set_low(&memcg->memory, 0);
Roman Gushchin63677c742017-09-06 16:21:47 -07005422
Johannes Weiner567e9ab2016-01-20 15:02:24 -08005423 memcg_offline_kmem(memcg);
Tejun Heo52ebea72015-05-22 17:13:37 -04005424 wb_memcg_offline(memcg);
Johannes Weiner73f576c2016-07-20 15:44:57 -07005425
Roman Gushchin591edfb2018-10-26 15:03:23 -07005426 drain_all_stock(memcg);
5427
Johannes Weiner73f576c2016-07-20 15:44:57 -07005428 mem_cgroup_id_put(memcg);
KAMEZAWA Hiroyukidf878fb2008-02-07 00:14:28 -08005429}
5430
Vladimir Davydov6df38682015-12-29 14:54:10 -08005431static void mem_cgroup_css_released(struct cgroup_subsys_state *css)
5432{
5433 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5434
5435 invalidate_reclaim_iterators(memcg);
5436}
5437
Tejun Heoeb954192013-08-08 20:11:23 -04005438static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
Balbir Singh8cdea7c2008-02-07 00:13:50 -08005439{
Tejun Heoeb954192013-08-08 20:11:23 -04005440 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
Tejun Heo97b27822019-08-26 09:06:56 -07005441 int __maybe_unused i;
Daisuke Nishimurac268e992009-01-15 13:51:13 -08005442
Tejun Heo97b27822019-08-26 09:06:56 -07005443#ifdef CONFIG_CGROUP_WRITEBACK
5444 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++)
5445 wb_wait_for_completion(&memcg->cgwb_frn[i].done);
5446#endif
Johannes Weinerf7e1cb62016-01-14 15:21:29 -08005447 if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
Johannes Weineref129472016-01-14 15:21:34 -08005448 static_branch_dec(&memcg_sockets_enabled_key);
Johannes Weiner3893e302016-01-20 15:02:29 -08005449
Johannes Weiner0db15292016-01-20 15:02:50 -08005450 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_active)
Vladimir Davydovd55f90b2016-01-20 15:02:44 -08005451 static_branch_dec(&memcg_sockets_enabled_key);
Johannes Weiner3893e302016-01-20 15:02:29 -08005452
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08005453 vmpressure_cleanup(&memcg->vmpressure);
5454 cancel_work_sync(&memcg->high_work);
5455 mem_cgroup_remove_from_trees(memcg);
Kirill Tkhai0a4465d2018-08-17 15:47:37 -07005456 memcg_free_shrinker_maps(memcg);
Johannes Weinerd886f4e2016-01-20 15:02:47 -08005457 memcg_free_kmem(memcg);
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08005458 mem_cgroup_free(memcg);
Balbir Singh8cdea7c2008-02-07 00:13:50 -08005459}
5460
Tejun Heo1ced9532014-07-08 18:02:57 -04005461/**
5462 * mem_cgroup_css_reset - reset the states of a mem_cgroup
5463 * @css: the target css
5464 *
5465 * Reset the states of the mem_cgroup associated with @css. This is
5466 * invoked when the userland requests disabling on the default hierarchy
5467 * but the memcg is pinned through dependency. The memcg should stop
5468 * applying policies and should revert to the vanilla state as it may be
5469 * made visible again.
5470 *
5471 * The current implementation only resets the essential configurations.
5472 * This needs to be expanded to cover all the visible parts.
5473 */
5474static void mem_cgroup_css_reset(struct cgroup_subsys_state *css)
5475{
5476 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5477
Roman Gushchinbbec2e12018-06-07 17:06:18 -07005478 page_counter_set_max(&memcg->memory, PAGE_COUNTER_MAX);
5479 page_counter_set_max(&memcg->swap, PAGE_COUNTER_MAX);
Roman Gushchinbbec2e12018-06-07 17:06:18 -07005480 page_counter_set_max(&memcg->kmem, PAGE_COUNTER_MAX);
5481 page_counter_set_max(&memcg->tcpmem, PAGE_COUNTER_MAX);
Roman Gushchinbf8d5d52018-06-07 17:07:46 -07005482 page_counter_set_min(&memcg->memory, 0);
Roman Gushchin23067152018-06-07 17:06:22 -07005483 page_counter_set_low(&memcg->memory, 0);
Jakub Kicinskid1663a92020-06-01 21:49:49 -07005484 page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX);
Johannes Weiner24d404d2015-01-08 14:32:35 -08005485 memcg->soft_limit = PAGE_COUNTER_MAX;
Jakub Kicinski4b82ab42020-06-01 21:49:52 -07005486 page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX);
Tejun Heo2529bb32015-05-22 18:23:34 -04005487 memcg_wb_domain_size_changed(memcg);
Tejun Heo1ced9532014-07-08 18:02:57 -04005488}
5489
Daisuke Nishimura02491442010-03-10 15:22:17 -08005490#ifdef CONFIG_MMU
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08005491/* Handlers for move charge at task migration. */
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08005492static int mem_cgroup_do_precharge(unsigned long count)
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08005493{
Johannes Weiner05b84302014-08-06 16:05:59 -07005494 int ret;
Johannes Weiner9476db92014-08-06 16:05:55 -07005495
Mel Gormand0164ad2015-11-06 16:28:21 -08005496 /* Try a single bulk charge without reclaim first, kswapd may wake */
5497 ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_DIRECT_RECLAIM, count);
Johannes Weiner9476db92014-08-06 16:05:55 -07005498 if (!ret) {
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08005499 mc.precharge += count;
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08005500 return ret;
5501 }
Johannes Weiner9476db92014-08-06 16:05:55 -07005502
David Rientjes36745342017-01-24 15:18:10 -08005503 /* Try charges one by one with reclaim, but do not retry */
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08005504 while (count--) {
David Rientjes36745342017-01-24 15:18:10 -08005505 ret = try_charge(mc.to, GFP_KERNEL | __GFP_NORETRY, 1);
KAMEZAWA Hiroyuki38c5d722012-01-12 17:19:01 -08005506 if (ret)
KAMEZAWA Hiroyuki38c5d722012-01-12 17:19:01 -08005507 return ret;
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08005508 mc.precharge++;
Johannes Weiner9476db92014-08-06 16:05:55 -07005509 cond_resched();
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08005510 }
Johannes Weiner9476db92014-08-06 16:05:55 -07005511 return 0;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005512}
5513
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005514union mc_target {
5515 struct page *page;
Daisuke Nishimura02491442010-03-10 15:22:17 -08005516 swp_entry_t ent;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005517};
5518
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005519enum mc_target_type {
Naoya Horiguchi8d32ff82012-03-21 16:34:27 -07005520 MC_TARGET_NONE = 0,
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005521 MC_TARGET_PAGE,
Daisuke Nishimura02491442010-03-10 15:22:17 -08005522 MC_TARGET_SWAP,
Jérôme Glissec733a822017-09-08 16:11:54 -07005523 MC_TARGET_DEVICE,
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005524};
5525
Daisuke Nishimura90254a62010-05-26 14:42:38 -07005526static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
5527 unsigned long addr, pte_t ptent)
5528{
Christoph Hellwig25b29952019-06-13 22:50:49 +02005529 struct page *page = vm_normal_page(vma, addr, ptent);
Daisuke Nishimura90254a62010-05-26 14:42:38 -07005530
5531 if (!page || !page_mapped(page))
5532 return NULL;
5533 if (PageAnon(page)) {
Johannes Weiner1dfab5a2015-02-11 15:26:09 -08005534 if (!(mc.flags & MOVE_ANON))
Daisuke Nishimura90254a62010-05-26 14:42:38 -07005535 return NULL;
Johannes Weiner1dfab5a2015-02-11 15:26:09 -08005536 } else {
5537 if (!(mc.flags & MOVE_FILE))
5538 return NULL;
5539 }
Daisuke Nishimura90254a62010-05-26 14:42:38 -07005540 if (!get_page_unless_zero(page))
5541 return NULL;
5542
5543 return page;
5544}
5545
Jérôme Glissec733a822017-09-08 16:11:54 -07005546#if defined(CONFIG_SWAP) || defined(CONFIG_DEVICE_PRIVATE)
Daisuke Nishimura90254a62010-05-26 14:42:38 -07005547static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
Li RongQing48406ef2016-07-26 15:22:14 -07005548 pte_t ptent, swp_entry_t *entry)
Daisuke Nishimura90254a62010-05-26 14:42:38 -07005549{
Daisuke Nishimura90254a62010-05-26 14:42:38 -07005550 struct page *page = NULL;
5551 swp_entry_t ent = pte_to_swp_entry(ptent);
5552
Ralph Campbell9a137152020-10-13 16:53:13 -07005553 if (!(mc.flags & MOVE_ANON))
Daisuke Nishimura90254a62010-05-26 14:42:38 -07005554 return NULL;
Jérôme Glissec733a822017-09-08 16:11:54 -07005555
5556 /*
5557 * Handle MEMORY_DEVICE_PRIVATE which are ZONE_DEVICE page belonging to
5558 * a device and because they are not accessible by CPU they are store
5559 * as special swap entry in the CPU page table.
5560 */
5561 if (is_device_private_entry(ent)) {
5562 page = device_private_entry_to_page(ent);
5563 /*
5564 * MEMORY_DEVICE_PRIVATE means ZONE_DEVICE page and which have
5565 * a refcount of 1 when free (unlike normal page)
5566 */
5567 if (!page_ref_add_unless(page, 1, 1))
5568 return NULL;
5569 return page;
5570 }
5571
Ralph Campbell9a137152020-10-13 16:53:13 -07005572 if (non_swap_entry(ent))
5573 return NULL;
5574
KAMEZAWA Hiroyuki4b913552012-05-29 15:06:51 -07005575 /*
5576 * Because lookup_swap_cache() updates some statistics counter,
5577 * we call find_get_page() with swapper_space directly.
5578 */
Huang Yingf6ab1f72016-10-07 17:00:21 -07005579 page = find_get_page(swap_address_space(ent), swp_offset(ent));
Johannes Weiner2d1c4982020-06-03 16:02:14 -07005580 entry->val = ent.val;
Daisuke Nishimura90254a62010-05-26 14:42:38 -07005581
5582 return page;
5583}
KAMEZAWA Hiroyuki4b913552012-05-29 15:06:51 -07005584#else
5585static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
Li RongQing48406ef2016-07-26 15:22:14 -07005586 pte_t ptent, swp_entry_t *entry)
KAMEZAWA Hiroyuki4b913552012-05-29 15:06:51 -07005587{
5588 return NULL;
5589}
5590#endif
Daisuke Nishimura90254a62010-05-26 14:42:38 -07005591
Daisuke Nishimura87946a72010-05-26 14:42:39 -07005592static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
5593 unsigned long addr, pte_t ptent, swp_entry_t *entry)
5594{
Daisuke Nishimura87946a72010-05-26 14:42:39 -07005595 if (!vma->vm_file) /* anonymous vma */
5596 return NULL;
Johannes Weiner1dfab5a2015-02-11 15:26:09 -08005597 if (!(mc.flags & MOVE_FILE))
Daisuke Nishimura87946a72010-05-26 14:42:39 -07005598 return NULL;
5599
Daisuke Nishimura87946a72010-05-26 14:42:39 -07005600 /* page is moved even if it's not RSS of this task(page-faulted). */
Hugh Dickinsaa3b1892011-08-03 16:21:24 -07005601 /* shmem/tmpfs may report page out on swap: account for that too. */
Matthew Wilcox (Oracle)f5df8632020-10-13 16:51:21 -07005602 return find_get_incore_page(vma->vm_file->f_mapping,
5603 linear_page_index(vma, addr));
Daisuke Nishimura87946a72010-05-26 14:42:39 -07005604}
5605
Chen Gangb1b0dea2015-04-14 15:47:35 -07005606/**
5607 * mem_cgroup_move_account - move account of the page
5608 * @page: the page
Li RongQing25843c22016-07-26 15:26:56 -07005609 * @compound: charge the page as compound or small page
Chen Gangb1b0dea2015-04-14 15:47:35 -07005610 * @from: mem_cgroup which the page is moved from.
5611 * @to: mem_cgroup which the page is moved to. @from != @to.
5612 *
Kirill A. Shutemov3ac808f2016-01-15 16:53:07 -08005613 * The caller must make sure the page is not on LRU (isolate_page() is useful.)
Chen Gangb1b0dea2015-04-14 15:47:35 -07005614 *
5615 * This function doesn't do "charge" to new cgroup and doesn't do "uncharge"
5616 * from old cgroup.
5617 */
5618static int mem_cgroup_move_account(struct page *page,
Kirill A. Shutemovf627c2f2016-01-15 16:52:20 -08005619 bool compound,
Chen Gangb1b0dea2015-04-14 15:47:35 -07005620 struct mem_cgroup *from,
5621 struct mem_cgroup *to)
5622{
Konstantin Khlebnikovae8af432019-10-18 20:20:11 -07005623 struct lruvec *from_vec, *to_vec;
5624 struct pglist_data *pgdat;
Matthew Wilcox (Oracle)6c357842020-08-14 17:30:37 -07005625 unsigned int nr_pages = compound ? thp_nr_pages(page) : 1;
Chen Gangb1b0dea2015-04-14 15:47:35 -07005626 int ret;
5627
5628 VM_BUG_ON(from == to);
5629 VM_BUG_ON_PAGE(PageLRU(page), page);
Kirill A. Shutemovf627c2f2016-01-15 16:52:20 -08005630 VM_BUG_ON(compound && !PageTransHuge(page));
Chen Gangb1b0dea2015-04-14 15:47:35 -07005631
5632 /*
Johannes Weiner6a93ca82016-03-15 14:57:19 -07005633 * Prevent mem_cgroup_migrate() from looking at
Roman Gushchinbcfe06b2020-12-01 13:58:27 -08005634 * page's memory cgroup of its source page while we change it.
Chen Gangb1b0dea2015-04-14 15:47:35 -07005635 */
Kirill A. Shutemovf627c2f2016-01-15 16:52:20 -08005636 ret = -EBUSY;
Chen Gangb1b0dea2015-04-14 15:47:35 -07005637 if (!trylock_page(page))
5638 goto out;
5639
5640 ret = -EINVAL;
Roman Gushchinbcfe06b2020-12-01 13:58:27 -08005641 if (page_memcg(page) != from)
Chen Gangb1b0dea2015-04-14 15:47:35 -07005642 goto out_unlock;
5643
Konstantin Khlebnikovae8af432019-10-18 20:20:11 -07005644 pgdat = page_pgdat(page);
Johannes Weiner867e5e12019-11-30 17:55:34 -08005645 from_vec = mem_cgroup_lruvec(from, pgdat);
5646 to_vec = mem_cgroup_lruvec(to, pgdat);
Konstantin Khlebnikovae8af432019-10-18 20:20:11 -07005647
Johannes Weinerabb242f2020-06-03 16:01:28 -07005648 lock_page_memcg(page);
Chen Gangb1b0dea2015-04-14 15:47:35 -07005649
Johannes Weinerbe5d0a72020-06-03 16:01:57 -07005650 if (PageAnon(page)) {
5651 if (page_mapped(page)) {
5652 __mod_lruvec_state(from_vec, NR_ANON_MAPPED, -nr_pages);
5653 __mod_lruvec_state(to_vec, NR_ANON_MAPPED, nr_pages);
Johannes Weiner468c3982020-06-03 16:02:01 -07005654 if (PageTransHuge(page)) {
5655 __mod_lruvec_state(from_vec, NR_ANON_THPS,
5656 -nr_pages);
5657 __mod_lruvec_state(to_vec, NR_ANON_THPS,
5658 nr_pages);
5659 }
5660
Johannes Weinerbe5d0a72020-06-03 16:01:57 -07005661 }
5662 } else {
Johannes Weiner0d1c2072020-06-03 16:01:54 -07005663 __mod_lruvec_state(from_vec, NR_FILE_PAGES, -nr_pages);
5664 __mod_lruvec_state(to_vec, NR_FILE_PAGES, nr_pages);
5665
5666 if (PageSwapBacked(page)) {
5667 __mod_lruvec_state(from_vec, NR_SHMEM, -nr_pages);
5668 __mod_lruvec_state(to_vec, NR_SHMEM, nr_pages);
5669 }
5670
Johannes Weiner49e50d22020-06-03 16:01:47 -07005671 if (page_mapped(page)) {
5672 __mod_lruvec_state(from_vec, NR_FILE_MAPPED, -nr_pages);
5673 __mod_lruvec_state(to_vec, NR_FILE_MAPPED, nr_pages);
5674 }
Chen Gangb1b0dea2015-04-14 15:47:35 -07005675
Johannes Weiner49e50d22020-06-03 16:01:47 -07005676 if (PageDirty(page)) {
5677 struct address_space *mapping = page_mapping(page);
Greg Thelenc4843a72015-05-22 17:13:16 -04005678
Christoph Hellwigf56753a2020-09-24 08:51:40 +02005679 if (mapping_can_writeback(mapping)) {
Johannes Weiner49e50d22020-06-03 16:01:47 -07005680 __mod_lruvec_state(from_vec, NR_FILE_DIRTY,
5681 -nr_pages);
5682 __mod_lruvec_state(to_vec, NR_FILE_DIRTY,
5683 nr_pages);
5684 }
Greg Thelenc4843a72015-05-22 17:13:16 -04005685 }
5686 }
5687
Chen Gangb1b0dea2015-04-14 15:47:35 -07005688 if (PageWriteback(page)) {
Konstantin Khlebnikovae8af432019-10-18 20:20:11 -07005689 __mod_lruvec_state(from_vec, NR_WRITEBACK, -nr_pages);
5690 __mod_lruvec_state(to_vec, NR_WRITEBACK, nr_pages);
Chen Gangb1b0dea2015-04-14 15:47:35 -07005691 }
5692
5693 /*
Johannes Weinerabb242f2020-06-03 16:01:28 -07005694 * All state has been migrated, let's switch to the new memcg.
5695 *
Roman Gushchinbcfe06b2020-12-01 13:58:27 -08005696 * It is safe to change page's memcg here because the page
Johannes Weinerabb242f2020-06-03 16:01:28 -07005697 * is referenced, charged, isolated, and locked: we can't race
5698 * with (un)charging, migration, LRU putback, or anything else
Roman Gushchinbcfe06b2020-12-01 13:58:27 -08005699 * that would rely on a stable page's memory cgroup.
Johannes Weinerabb242f2020-06-03 16:01:28 -07005700 *
5701 * Note that lock_page_memcg is a memcg lock, not a page lock,
Roman Gushchinbcfe06b2020-12-01 13:58:27 -08005702 * to save space. As soon as we switch page's memory cgroup to a
Johannes Weinerabb242f2020-06-03 16:01:28 -07005703 * new memcg that isn't locked, the above state can change
5704 * concurrently again. Make sure we're truly done with it.
Chen Gangb1b0dea2015-04-14 15:47:35 -07005705 */
Johannes Weinerabb242f2020-06-03 16:01:28 -07005706 smp_mb();
Chen Gangb1b0dea2015-04-14 15:47:35 -07005707
Johannes Weiner1a3e1f42020-08-06 23:20:45 -07005708 css_get(&to->css);
5709 css_put(&from->css);
5710
Roman Gushchinbcfe06b2020-12-01 13:58:27 -08005711 page->memcg_data = (unsigned long)to;
Yang Shi87eaceb2019-09-23 15:38:15 -07005712
Johannes Weinerabb242f2020-06-03 16:01:28 -07005713 __unlock_page_memcg(from);
Chen Gangb1b0dea2015-04-14 15:47:35 -07005714
5715 ret = 0;
5716
5717 local_irq_disable();
Johannes Weiner3fba69a2020-06-03 16:01:31 -07005718 mem_cgroup_charge_statistics(to, page, nr_pages);
Chen Gangb1b0dea2015-04-14 15:47:35 -07005719 memcg_check_events(to, page);
Johannes Weiner3fba69a2020-06-03 16:01:31 -07005720 mem_cgroup_charge_statistics(from, page, -nr_pages);
Chen Gangb1b0dea2015-04-14 15:47:35 -07005721 memcg_check_events(from, page);
5722 local_irq_enable();
5723out_unlock:
5724 unlock_page(page);
5725out:
5726 return ret;
5727}
5728
Li RongQing7cf78062016-05-27 14:27:46 -07005729/**
5730 * get_mctgt_type - get target type of moving charge
5731 * @vma: the vma the pte to be checked belongs
5732 * @addr: the address corresponding to the pte to be checked
5733 * @ptent: the pte to be checked
5734 * @target: the pointer the target page or swap ent will be stored(can be NULL)
5735 *
5736 * Returns
5737 * 0(MC_TARGET_NONE): if the pte is not a target for move charge.
5738 * 1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for
5739 * move charge. if @target is not NULL, the page is stored in target->page
5740 * with extra refcnt got(Callers should handle it).
5741 * 2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a
5742 * target for charge migration. if @target is not NULL, the entry is stored
5743 * in target->ent.
Christoph Hellwig25b29952019-06-13 22:50:49 +02005744 * 3(MC_TARGET_DEVICE): like MC_TARGET_PAGE but page is MEMORY_DEVICE_PRIVATE
5745 * (so ZONE_DEVICE page and thus not on the lru).
Jérôme Glissedf6ad692017-09-08 16:12:24 -07005746 * For now we such page is charge like a regular page would be as for all
5747 * intent and purposes it is just special memory taking the place of a
5748 * regular page.
Jérôme Glissec733a822017-09-08 16:11:54 -07005749 *
5750 * See Documentations/vm/hmm.txt and include/linux/hmm.h
Li RongQing7cf78062016-05-27 14:27:46 -07005751 *
5752 * Called with pte lock held.
5753 */
5754
Naoya Horiguchi8d32ff82012-03-21 16:34:27 -07005755static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma,
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005756 unsigned long addr, pte_t ptent, union mc_target *target)
5757{
Daisuke Nishimura02491442010-03-10 15:22:17 -08005758 struct page *page = NULL;
Naoya Horiguchi8d32ff82012-03-21 16:34:27 -07005759 enum mc_target_type ret = MC_TARGET_NONE;
Daisuke Nishimura02491442010-03-10 15:22:17 -08005760 swp_entry_t ent = { .val = 0 };
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005761
Daisuke Nishimura90254a62010-05-26 14:42:38 -07005762 if (pte_present(ptent))
5763 page = mc_handle_present_pte(vma, addr, ptent);
5764 else if (is_swap_pte(ptent))
Li RongQing48406ef2016-07-26 15:22:14 -07005765 page = mc_handle_swap_pte(vma, ptent, &ent);
Kirill A. Shutemov0661a332015-02-10 14:10:04 -08005766 else if (pte_none(ptent))
Daisuke Nishimura87946a72010-05-26 14:42:39 -07005767 page = mc_handle_file_pte(vma, addr, ptent, &ent);
Daisuke Nishimura90254a62010-05-26 14:42:38 -07005768
5769 if (!page && !ent.val)
Naoya Horiguchi8d32ff82012-03-21 16:34:27 -07005770 return ret;
Daisuke Nishimura02491442010-03-10 15:22:17 -08005771 if (page) {
Daisuke Nishimura02491442010-03-10 15:22:17 -08005772 /*
Johannes Weiner0a31bc92014-08-08 14:19:22 -07005773 * Do only loose check w/o serialization.
Johannes Weiner1306a852014-12-10 15:44:52 -08005774 * mem_cgroup_move_account() checks the page is valid or
Johannes Weiner0a31bc92014-08-08 14:19:22 -07005775 * not under LRU exclusion.
Daisuke Nishimura02491442010-03-10 15:22:17 -08005776 */
Roman Gushchinbcfe06b2020-12-01 13:58:27 -08005777 if (page_memcg(page) == mc.from) {
Daisuke Nishimura02491442010-03-10 15:22:17 -08005778 ret = MC_TARGET_PAGE;
Christoph Hellwig25b29952019-06-13 22:50:49 +02005779 if (is_device_private_page(page))
Jérôme Glissec733a822017-09-08 16:11:54 -07005780 ret = MC_TARGET_DEVICE;
Daisuke Nishimura02491442010-03-10 15:22:17 -08005781 if (target)
5782 target->page = page;
5783 }
5784 if (!ret || !target)
5785 put_page(page);
5786 }
Huang Ying3e14a572017-09-06 16:22:37 -07005787 /*
5788 * There is a swap entry and a page doesn't exist or isn't charged.
5789 * But we cannot move a tail-page in a THP.
5790 */
5791 if (ent.val && !ret && (!page || !PageTransCompound(page)) &&
Li Zefan34c00c32013-09-23 16:56:01 +08005792 mem_cgroup_id(mc.from) == lookup_swap_cgroup_id(ent)) {
KAMEZAWA Hiroyuki7f0f1542010-05-11 14:06:58 -07005793 ret = MC_TARGET_SWAP;
5794 if (target)
5795 target->ent = ent;
Daisuke Nishimura02491442010-03-10 15:22:17 -08005796 }
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005797 return ret;
5798}
5799
Naoya Horiguchi12724852012-03-21 16:34:28 -07005800#ifdef CONFIG_TRANSPARENT_HUGEPAGE
5801/*
Huang Yingd6810d72017-09-06 16:22:45 -07005802 * We don't consider PMD mapped swapping or file mapped pages because THP does
5803 * not support them for now.
Naoya Horiguchi12724852012-03-21 16:34:28 -07005804 * Caller should make sure that pmd_trans_huge(pmd) is true.
5805 */
5806static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
5807 unsigned long addr, pmd_t pmd, union mc_target *target)
5808{
5809 struct page *page = NULL;
Naoya Horiguchi12724852012-03-21 16:34:28 -07005810 enum mc_target_type ret = MC_TARGET_NONE;
5811
Zi Yan84c3fc42017-09-08 16:11:01 -07005812 if (unlikely(is_swap_pmd(pmd))) {
5813 VM_BUG_ON(thp_migration_supported() &&
5814 !is_pmd_migration_entry(pmd));
5815 return ret;
5816 }
Naoya Horiguchi12724852012-03-21 16:34:28 -07005817 page = pmd_page(pmd);
Sasha Levin309381fea2014-01-23 15:52:54 -08005818 VM_BUG_ON_PAGE(!page || !PageHead(page), page);
Johannes Weiner1dfab5a2015-02-11 15:26:09 -08005819 if (!(mc.flags & MOVE_ANON))
Naoya Horiguchi12724852012-03-21 16:34:28 -07005820 return ret;
Roman Gushchinbcfe06b2020-12-01 13:58:27 -08005821 if (page_memcg(page) == mc.from) {
Naoya Horiguchi12724852012-03-21 16:34:28 -07005822 ret = MC_TARGET_PAGE;
5823 if (target) {
5824 get_page(page);
5825 target->page = page;
5826 }
5827 }
5828 return ret;
5829}
5830#else
5831static inline enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
5832 unsigned long addr, pmd_t pmd, union mc_target *target)
5833{
5834 return MC_TARGET_NONE;
5835}
5836#endif
5837
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005838static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
5839 unsigned long addr, unsigned long end,
5840 struct mm_walk *walk)
5841{
Naoya Horiguchi26bcd642015-02-11 15:27:57 -08005842 struct vm_area_struct *vma = walk->vma;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005843 pte_t *pte;
5844 spinlock_t *ptl;
5845
Kirill A. Shutemovb6ec57f2016-01-21 16:40:25 -08005846 ptl = pmd_trans_huge_lock(pmd, vma);
5847 if (ptl) {
Jérôme Glissec733a822017-09-08 16:11:54 -07005848 /*
5849 * Note their can not be MC_TARGET_DEVICE for now as we do not
Christoph Hellwig25b29952019-06-13 22:50:49 +02005850 * support transparent huge page with MEMORY_DEVICE_PRIVATE but
5851 * this might change.
Jérôme Glissec733a822017-09-08 16:11:54 -07005852 */
Naoya Horiguchi12724852012-03-21 16:34:28 -07005853 if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE)
5854 mc.precharge += HPAGE_PMD_NR;
Kirill A. Shutemovbf929152013-11-14 14:30:54 -08005855 spin_unlock(ptl);
Andrea Arcangeli1a5a9902012-03-21 16:33:42 -07005856 return 0;
Naoya Horiguchi12724852012-03-21 16:34:28 -07005857 }
Dave Hansen03319322011-03-22 16:32:56 -07005858
Andrea Arcangeli45f83ce2012-03-28 14:42:40 -07005859 if (pmd_trans_unstable(pmd))
5860 return 0;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005861 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
5862 for (; addr != end; pte++, addr += PAGE_SIZE)
Naoya Horiguchi8d32ff82012-03-21 16:34:27 -07005863 if (get_mctgt_type(vma, addr, *pte, NULL))
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005864 mc.precharge++; /* increment precharge temporarily */
5865 pte_unmap_unlock(pte - 1, ptl);
5866 cond_resched();
5867
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08005868 return 0;
5869}
5870
Christoph Hellwig7b86ac32019-08-28 16:19:54 +02005871static const struct mm_walk_ops precharge_walk_ops = {
5872 .pmd_entry = mem_cgroup_count_precharge_pte_range,
5873};
5874
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005875static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
5876{
5877 unsigned long precharge;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005878
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07005879 mmap_read_lock(mm);
Christoph Hellwig7b86ac32019-08-28 16:19:54 +02005880 walk_page_range(mm, 0, mm->highest_vm_end, &precharge_walk_ops, NULL);
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07005881 mmap_read_unlock(mm);
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005882
5883 precharge = mc.precharge;
5884 mc.precharge = 0;
5885
5886 return precharge;
5887}
5888
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005889static int mem_cgroup_precharge_mc(struct mm_struct *mm)
5890{
Daisuke Nishimuradfe076b2011-01-13 15:47:41 -08005891 unsigned long precharge = mem_cgroup_count_precharge(mm);
5892
5893 VM_BUG_ON(mc.moving_task);
5894 mc.moving_task = current;
5895 return mem_cgroup_do_precharge(precharge);
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005896}
5897
Daisuke Nishimuradfe076b2011-01-13 15:47:41 -08005898/* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */
5899static void __mem_cgroup_clear_mc(void)
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005900{
KAMEZAWA Hiroyuki2bd9bb22010-08-10 18:02:58 -07005901 struct mem_cgroup *from = mc.from;
5902 struct mem_cgroup *to = mc.to;
5903
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005904 /* we must uncharge all the leftover precharges from mc.to */
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08005905 if (mc.precharge) {
Johannes Weiner00501b52014-08-08 14:19:20 -07005906 cancel_charge(mc.to, mc.precharge);
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08005907 mc.precharge = 0;
5908 }
5909 /*
5910 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so
5911 * we must uncharge here.
5912 */
5913 if (mc.moved_charge) {
Johannes Weiner00501b52014-08-08 14:19:20 -07005914 cancel_charge(mc.from, mc.moved_charge);
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08005915 mc.moved_charge = 0;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005916 }
Daisuke Nishimura483c30b2010-03-10 15:22:18 -08005917 /* we must fixup refcnts and charges */
5918 if (mc.moved_swap) {
Daisuke Nishimura483c30b2010-03-10 15:22:18 -08005919 /* uncharge swap account from the old cgroup */
Johannes Weinerce00a962014-09-05 08:43:57 -04005920 if (!mem_cgroup_is_root(mc.from))
Johannes Weiner3e32cb22014-12-10 15:42:31 -08005921 page_counter_uncharge(&mc.from->memsw, mc.moved_swap);
Daisuke Nishimura483c30b2010-03-10 15:22:18 -08005922
Vladimir Davydov615d66c2016-08-11 15:33:03 -07005923 mem_cgroup_id_put_many(mc.from, mc.moved_swap);
5924
Johannes Weiner05b84302014-08-06 16:05:59 -07005925 /*
Johannes Weiner3e32cb22014-12-10 15:42:31 -08005926 * we charged both to->memory and to->memsw, so we
5927 * should uncharge to->memory.
Johannes Weiner05b84302014-08-06 16:05:59 -07005928 */
Johannes Weinerce00a962014-09-05 08:43:57 -04005929 if (!mem_cgroup_is_root(mc.to))
Johannes Weiner3e32cb22014-12-10 15:42:31 -08005930 page_counter_uncharge(&mc.to->memory, mc.moved_swap);
Daisuke Nishimura483c30b2010-03-10 15:22:18 -08005931
Daisuke Nishimura483c30b2010-03-10 15:22:18 -08005932 mc.moved_swap = 0;
5933 }
Daisuke Nishimuradfe076b2011-01-13 15:47:41 -08005934 memcg_oom_recover(from);
5935 memcg_oom_recover(to);
5936 wake_up_all(&mc.waitq);
5937}
5938
5939static void mem_cgroup_clear_mc(void)
5940{
Tejun Heo264a0ae2016-04-21 19:09:02 -04005941 struct mm_struct *mm = mc.mm;
5942
Daisuke Nishimuradfe076b2011-01-13 15:47:41 -08005943 /*
5944 * we must clear moving_task before waking up waiters at the end of
5945 * task migration.
5946 */
5947 mc.moving_task = NULL;
5948 __mem_cgroup_clear_mc();
KAMEZAWA Hiroyuki2bd9bb22010-08-10 18:02:58 -07005949 spin_lock(&mc.lock);
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005950 mc.from = NULL;
5951 mc.to = NULL;
Tejun Heo264a0ae2016-04-21 19:09:02 -04005952 mc.mm = NULL;
KAMEZAWA Hiroyuki2bd9bb22010-08-10 18:02:58 -07005953 spin_unlock(&mc.lock);
Tejun Heo264a0ae2016-04-21 19:09:02 -04005954
5955 mmput(mm);
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005956}
5957
Tejun Heo1f7dd3e52015-12-03 10:18:21 -05005958static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08005959{
Tejun Heo1f7dd3e52015-12-03 10:18:21 -05005960 struct cgroup_subsys_state *css;
Ross Zwislereed67d72015-12-23 14:53:27 -07005961 struct mem_cgroup *memcg = NULL; /* unneeded init to make gcc happy */
Tejun Heo9f2115f2015-09-08 15:01:10 -07005962 struct mem_cgroup *from;
Tejun Heo4530edd2015-09-11 15:00:19 -04005963 struct task_struct *leader, *p;
Tejun Heo9f2115f2015-09-08 15:01:10 -07005964 struct mm_struct *mm;
Johannes Weiner1dfab5a2015-02-11 15:26:09 -08005965 unsigned long move_flags;
Tejun Heo9f2115f2015-09-08 15:01:10 -07005966 int ret = 0;
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08005967
Tejun Heo1f7dd3e52015-12-03 10:18:21 -05005968 /* charge immigration isn't supported on the default hierarchy */
5969 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
Tejun Heo9f2115f2015-09-08 15:01:10 -07005970 return 0;
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08005971
Tejun Heo4530edd2015-09-11 15:00:19 -04005972 /*
5973 * Multi-process migrations only happen on the default hierarchy
5974 * where charge immigration is not used. Perform charge
5975 * immigration if @tset contains a leader and whine if there are
5976 * multiple.
5977 */
5978 p = NULL;
Tejun Heo1f7dd3e52015-12-03 10:18:21 -05005979 cgroup_taskset_for_each_leader(leader, css, tset) {
Tejun Heo4530edd2015-09-11 15:00:19 -04005980 WARN_ON_ONCE(p);
5981 p = leader;
Tejun Heo1f7dd3e52015-12-03 10:18:21 -05005982 memcg = mem_cgroup_from_css(css);
Tejun Heo4530edd2015-09-11 15:00:19 -04005983 }
5984 if (!p)
5985 return 0;
5986
Tejun Heo1f7dd3e52015-12-03 10:18:21 -05005987 /*
5988 * We are now commited to this value whatever it is. Changes in this
5989 * tunable will only affect upcoming migrations, not the current one.
5990 * So we need to save it, and keep it going.
5991 */
5992 move_flags = READ_ONCE(memcg->move_charge_at_immigrate);
5993 if (!move_flags)
5994 return 0;
5995
Tejun Heo9f2115f2015-09-08 15:01:10 -07005996 from = mem_cgroup_from_task(p);
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08005997
Tejun Heo9f2115f2015-09-08 15:01:10 -07005998 VM_BUG_ON(from == memcg);
Johannes Weiner247b1442014-12-10 15:44:11 -08005999
Tejun Heo9f2115f2015-09-08 15:01:10 -07006000 mm = get_task_mm(p);
6001 if (!mm)
6002 return 0;
6003 /* We move charges only when we move a owner of the mm */
6004 if (mm->owner == p) {
6005 VM_BUG_ON(mc.from);
6006 VM_BUG_ON(mc.to);
6007 VM_BUG_ON(mc.precharge);
6008 VM_BUG_ON(mc.moved_charge);
6009 VM_BUG_ON(mc.moved_swap);
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08006010
Tejun Heo9f2115f2015-09-08 15:01:10 -07006011 spin_lock(&mc.lock);
Tejun Heo264a0ae2016-04-21 19:09:02 -04006012 mc.mm = mm;
Tejun Heo9f2115f2015-09-08 15:01:10 -07006013 mc.from = from;
6014 mc.to = memcg;
6015 mc.flags = move_flags;
6016 spin_unlock(&mc.lock);
6017 /* We set mc.moving_task later */
6018
6019 ret = mem_cgroup_precharge_mc(mm);
6020 if (ret)
6021 mem_cgroup_clear_mc();
Tejun Heo264a0ae2016-04-21 19:09:02 -04006022 } else {
6023 mmput(mm);
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08006024 }
6025 return ret;
6026}
6027
Tejun Heo1f7dd3e52015-12-03 10:18:21 -05006028static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08006029{
Johannes Weiner4e2f2452014-12-10 15:44:08 -08006030 if (mc.to)
6031 mem_cgroup_clear_mc();
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08006032}
6033
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08006034static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
6035 unsigned long addr, unsigned long end,
6036 struct mm_walk *walk)
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08006037{
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08006038 int ret = 0;
Naoya Horiguchi26bcd642015-02-11 15:27:57 -08006039 struct vm_area_struct *vma = walk->vma;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08006040 pte_t *pte;
6041 spinlock_t *ptl;
Naoya Horiguchi12724852012-03-21 16:34:28 -07006042 enum mc_target_type target_type;
6043 union mc_target target;
6044 struct page *page;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08006045
Kirill A. Shutemovb6ec57f2016-01-21 16:40:25 -08006046 ptl = pmd_trans_huge_lock(pmd, vma);
6047 if (ptl) {
Hugh Dickins62ade862012-05-18 11:28:34 -07006048 if (mc.precharge < HPAGE_PMD_NR) {
Kirill A. Shutemovbf929152013-11-14 14:30:54 -08006049 spin_unlock(ptl);
Naoya Horiguchi12724852012-03-21 16:34:28 -07006050 return 0;
6051 }
6052 target_type = get_mctgt_type_thp(vma, addr, *pmd, &target);
6053 if (target_type == MC_TARGET_PAGE) {
6054 page = target.page;
6055 if (!isolate_lru_page(page)) {
Kirill A. Shutemovf627c2f2016-01-15 16:52:20 -08006056 if (!mem_cgroup_move_account(page, true,
Johannes Weiner1306a852014-12-10 15:44:52 -08006057 mc.from, mc.to)) {
Naoya Horiguchi12724852012-03-21 16:34:28 -07006058 mc.precharge -= HPAGE_PMD_NR;
6059 mc.moved_charge += HPAGE_PMD_NR;
6060 }
6061 putback_lru_page(page);
6062 }
6063 put_page(page);
Jérôme Glissec733a822017-09-08 16:11:54 -07006064 } else if (target_type == MC_TARGET_DEVICE) {
6065 page = target.page;
6066 if (!mem_cgroup_move_account(page, true,
6067 mc.from, mc.to)) {
6068 mc.precharge -= HPAGE_PMD_NR;
6069 mc.moved_charge += HPAGE_PMD_NR;
6070 }
6071 put_page(page);
Naoya Horiguchi12724852012-03-21 16:34:28 -07006072 }
Kirill A. Shutemovbf929152013-11-14 14:30:54 -08006073 spin_unlock(ptl);
Andrea Arcangeli1a5a9902012-03-21 16:33:42 -07006074 return 0;
Naoya Horiguchi12724852012-03-21 16:34:28 -07006075 }
6076
Andrea Arcangeli45f83ce2012-03-28 14:42:40 -07006077 if (pmd_trans_unstable(pmd))
6078 return 0;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08006079retry:
6080 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
6081 for (; addr != end; addr += PAGE_SIZE) {
6082 pte_t ptent = *(pte++);
Jérôme Glissec733a822017-09-08 16:11:54 -07006083 bool device = false;
Daisuke Nishimura02491442010-03-10 15:22:17 -08006084 swp_entry_t ent;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08006085
6086 if (!mc.precharge)
6087 break;
6088
Naoya Horiguchi8d32ff82012-03-21 16:34:27 -07006089 switch (get_mctgt_type(vma, addr, ptent, &target)) {
Jérôme Glissec733a822017-09-08 16:11:54 -07006090 case MC_TARGET_DEVICE:
6091 device = true;
Joe Perchese4a9bc52020-04-06 20:08:39 -07006092 fallthrough;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08006093 case MC_TARGET_PAGE:
6094 page = target.page;
Kirill A. Shutemov53f92632016-01-15 16:53:42 -08006095 /*
6096 * We can have a part of the split pmd here. Moving it
6097 * can be done but it would be too convoluted so simply
6098 * ignore such a partial THP and keep it in original
6099 * memcg. There should be somebody mapping the head.
6100 */
6101 if (PageTransCompound(page))
6102 goto put;
Jérôme Glissec733a822017-09-08 16:11:54 -07006103 if (!device && isolate_lru_page(page))
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08006104 goto put;
Kirill A. Shutemovf627c2f2016-01-15 16:52:20 -08006105 if (!mem_cgroup_move_account(page, false,
6106 mc.from, mc.to)) {
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08006107 mc.precharge--;
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08006108 /* we uncharge from mc.from later. */
6109 mc.moved_charge++;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08006110 }
Jérôme Glissec733a822017-09-08 16:11:54 -07006111 if (!device)
6112 putback_lru_page(page);
Naoya Horiguchi8d32ff82012-03-21 16:34:27 -07006113put: /* get_mctgt_type() gets the page */
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08006114 put_page(page);
6115 break;
Daisuke Nishimura02491442010-03-10 15:22:17 -08006116 case MC_TARGET_SWAP:
6117 ent = target.ent;
Hugh Dickinse91cbb42012-05-29 15:06:51 -07006118 if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) {
Daisuke Nishimura02491442010-03-10 15:22:17 -08006119 mc.precharge--;
Hugh Dickins8d22a932020-07-23 21:15:24 -07006120 mem_cgroup_id_get_many(mc.to, 1);
6121 /* we fixup other refcnts and charges later. */
Daisuke Nishimura483c30b2010-03-10 15:22:18 -08006122 mc.moved_swap++;
6123 }
Daisuke Nishimura02491442010-03-10 15:22:17 -08006124 break;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08006125 default:
6126 break;
6127 }
6128 }
6129 pte_unmap_unlock(pte - 1, ptl);
6130 cond_resched();
6131
6132 if (addr != end) {
6133 /*
6134 * We have consumed all precharges we got in can_attach().
6135 * We try charge one by one, but don't do any additional
6136 * charges to mc.to if we have failed in charge once in attach()
6137 * phase.
6138 */
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08006139 ret = mem_cgroup_do_precharge(1);
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08006140 if (!ret)
6141 goto retry;
6142 }
6143
6144 return ret;
6145}
6146
Christoph Hellwig7b86ac32019-08-28 16:19:54 +02006147static const struct mm_walk_ops charge_walk_ops = {
6148 .pmd_entry = mem_cgroup_move_charge_pte_range,
6149};
6150
Tejun Heo264a0ae2016-04-21 19:09:02 -04006151static void mem_cgroup_move_charge(void)
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08006152{
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08006153 lru_add_drain_all();
Johannes Weiner312722c2014-12-10 15:44:25 -08006154 /*
Johannes Weiner81f8c3a2016-03-15 14:57:04 -07006155 * Signal lock_page_memcg() to take the memcg's move_lock
6156 * while we're moving its pages to another memcg. Then wait
6157 * for already started RCU-only updates to finish.
Johannes Weiner312722c2014-12-10 15:44:25 -08006158 */
6159 atomic_inc(&mc.from->moving_account);
6160 synchronize_rcu();
Daisuke Nishimuradfe076b2011-01-13 15:47:41 -08006161retry:
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07006162 if (unlikely(!mmap_read_trylock(mc.mm))) {
Daisuke Nishimuradfe076b2011-01-13 15:47:41 -08006163 /*
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07006164 * Someone who are holding the mmap_lock might be waiting in
Daisuke Nishimuradfe076b2011-01-13 15:47:41 -08006165 * waitq. So we cancel all extra charges, wake up all waiters,
6166 * and retry. Because we cancel precharges, we might not be able
6167 * to move enough charges, but moving charge is a best-effort
6168 * feature anyway, so it wouldn't be a big problem.
6169 */
6170 __mem_cgroup_clear_mc();
6171 cond_resched();
6172 goto retry;
6173 }
Naoya Horiguchi26bcd642015-02-11 15:27:57 -08006174 /*
6175 * When we have consumed all precharges and failed in doing
6176 * additional charge, the page walk just aborts.
6177 */
Christoph Hellwig7b86ac32019-08-28 16:19:54 +02006178 walk_page_range(mc.mm, 0, mc.mm->highest_vm_end, &charge_walk_ops,
6179 NULL);
James Morse0247f3f2016-10-07 17:00:12 -07006180
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07006181 mmap_read_unlock(mc.mm);
Johannes Weiner312722c2014-12-10 15:44:25 -08006182 atomic_dec(&mc.from->moving_account);
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08006183}
6184
Tejun Heo264a0ae2016-04-21 19:09:02 -04006185static void mem_cgroup_move_task(void)
Balbir Singh67e465a2008-02-07 00:13:54 -08006186{
Tejun Heo264a0ae2016-04-21 19:09:02 -04006187 if (mc.to) {
6188 mem_cgroup_move_charge();
KOSAKI Motohiroa4336582011-06-15 15:08:13 -07006189 mem_cgroup_clear_mc();
Tejun Heo264a0ae2016-04-21 19:09:02 -04006190 }
Balbir Singh67e465a2008-02-07 00:13:54 -08006191}
Daisuke Nishimura5cfb80a2010-03-23 13:35:11 -07006192#else /* !CONFIG_MMU */
Tejun Heo1f7dd3e52015-12-03 10:18:21 -05006193static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
Daisuke Nishimura5cfb80a2010-03-23 13:35:11 -07006194{
6195 return 0;
6196}
Tejun Heo1f7dd3e52015-12-03 10:18:21 -05006197static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
Daisuke Nishimura5cfb80a2010-03-23 13:35:11 -07006198{
6199}
Tejun Heo264a0ae2016-04-21 19:09:02 -04006200static void mem_cgroup_move_task(void)
Daisuke Nishimura5cfb80a2010-03-23 13:35:11 -07006201{
6202}
6203#endif
Balbir Singh67e465a2008-02-07 00:13:54 -08006204
Chris Down677dc972019-03-05 15:45:55 -08006205static int seq_puts_memcg_tunable(struct seq_file *m, unsigned long value)
6206{
6207 if (value == PAGE_COUNTER_MAX)
6208 seq_puts(m, "max\n");
6209 else
6210 seq_printf(m, "%llu\n", (u64)value * PAGE_SIZE);
6211
6212 return 0;
6213}
6214
Johannes Weiner241994ed2015-02-11 15:26:06 -08006215static u64 memory_current_read(struct cgroup_subsys_state *css,
6216 struct cftype *cft)
6217{
Johannes Weinerf5fc3c5d2015-11-05 18:50:23 -08006218 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
6219
6220 return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE;
Johannes Weiner241994ed2015-02-11 15:26:06 -08006221}
6222
Roman Gushchinbf8d5d52018-06-07 17:07:46 -07006223static int memory_min_show(struct seq_file *m, void *v)
6224{
Chris Down677dc972019-03-05 15:45:55 -08006225 return seq_puts_memcg_tunable(m,
6226 READ_ONCE(mem_cgroup_from_seq(m)->memory.min));
Roman Gushchinbf8d5d52018-06-07 17:07:46 -07006227}
6228
6229static ssize_t memory_min_write(struct kernfs_open_file *of,
6230 char *buf, size_t nbytes, loff_t off)
6231{
6232 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6233 unsigned long min;
6234 int err;
6235
6236 buf = strstrip(buf);
6237 err = page_counter_memparse(buf, "max", &min);
6238 if (err)
6239 return err;
6240
6241 page_counter_set_min(&memcg->memory, min);
6242
6243 return nbytes;
6244}
6245
Johannes Weiner241994ed2015-02-11 15:26:06 -08006246static int memory_low_show(struct seq_file *m, void *v)
6247{
Chris Down677dc972019-03-05 15:45:55 -08006248 return seq_puts_memcg_tunable(m,
6249 READ_ONCE(mem_cgroup_from_seq(m)->memory.low));
Johannes Weiner241994ed2015-02-11 15:26:06 -08006250}
6251
6252static ssize_t memory_low_write(struct kernfs_open_file *of,
6253 char *buf, size_t nbytes, loff_t off)
6254{
6255 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6256 unsigned long low;
6257 int err;
6258
6259 buf = strstrip(buf);
Johannes Weinerd2973692015-02-27 15:52:04 -08006260 err = page_counter_memparse(buf, "max", &low);
Johannes Weiner241994ed2015-02-11 15:26:06 -08006261 if (err)
6262 return err;
6263
Roman Gushchin23067152018-06-07 17:06:22 -07006264 page_counter_set_low(&memcg->memory, low);
Johannes Weiner241994ed2015-02-11 15:26:06 -08006265
6266 return nbytes;
6267}
6268
6269static int memory_high_show(struct seq_file *m, void *v)
6270{
Jakub Kicinskid1663a92020-06-01 21:49:49 -07006271 return seq_puts_memcg_tunable(m,
6272 READ_ONCE(mem_cgroup_from_seq(m)->memory.high));
Johannes Weiner241994ed2015-02-11 15:26:06 -08006273}
6274
6275static ssize_t memory_high_write(struct kernfs_open_file *of,
6276 char *buf, size_t nbytes, loff_t off)
6277{
6278 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
Chris Downd977aa92020-08-06 23:21:58 -07006279 unsigned int nr_retries = MAX_RECLAIM_RETRIES;
Johannes Weiner8c8c3832019-11-30 17:50:09 -08006280 bool drained = false;
Johannes Weiner241994ed2015-02-11 15:26:06 -08006281 unsigned long high;
6282 int err;
6283
6284 buf = strstrip(buf);
Johannes Weinerd2973692015-02-27 15:52:04 -08006285 err = page_counter_memparse(buf, "max", &high);
Johannes Weiner241994ed2015-02-11 15:26:06 -08006286 if (err)
6287 return err;
6288
Johannes Weinere82553c2021-02-09 13:42:28 -08006289 page_counter_set_high(&memcg->memory, high);
6290
Johannes Weiner8c8c3832019-11-30 17:50:09 -08006291 for (;;) {
6292 unsigned long nr_pages = page_counter_read(&memcg->memory);
6293 unsigned long reclaimed;
Johannes Weiner588083b2016-03-17 14:20:25 -07006294
Johannes Weiner8c8c3832019-11-30 17:50:09 -08006295 if (nr_pages <= high)
6296 break;
6297
6298 if (signal_pending(current))
6299 break;
6300
6301 if (!drained) {
6302 drain_all_stock(memcg);
6303 drained = true;
6304 continue;
6305 }
6306
6307 reclaimed = try_to_free_mem_cgroup_pages(memcg, nr_pages - high,
6308 GFP_KERNEL, true);
6309
6310 if (!reclaimed && !nr_retries--)
6311 break;
6312 }
6313
Johannes Weiner19ce33a2020-08-06 23:22:12 -07006314 memcg_wb_domain_size_changed(memcg);
Johannes Weiner241994ed2015-02-11 15:26:06 -08006315 return nbytes;
6316}
6317
6318static int memory_max_show(struct seq_file *m, void *v)
6319{
Chris Down677dc972019-03-05 15:45:55 -08006320 return seq_puts_memcg_tunable(m,
6321 READ_ONCE(mem_cgroup_from_seq(m)->memory.max));
Johannes Weiner241994ed2015-02-11 15:26:06 -08006322}
6323
6324static ssize_t memory_max_write(struct kernfs_open_file *of,
6325 char *buf, size_t nbytes, loff_t off)
6326{
6327 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
Chris Downd977aa92020-08-06 23:21:58 -07006328 unsigned int nr_reclaims = MAX_RECLAIM_RETRIES;
Johannes Weinerb6e6edc2016-03-17 14:20:28 -07006329 bool drained = false;
Johannes Weiner241994ed2015-02-11 15:26:06 -08006330 unsigned long max;
6331 int err;
6332
6333 buf = strstrip(buf);
Johannes Weinerd2973692015-02-27 15:52:04 -08006334 err = page_counter_memparse(buf, "max", &max);
Johannes Weiner241994ed2015-02-11 15:26:06 -08006335 if (err)
6336 return err;
6337
Roman Gushchinbbec2e12018-06-07 17:06:18 -07006338 xchg(&memcg->memory.max, max);
Johannes Weinerb6e6edc2016-03-17 14:20:28 -07006339
6340 for (;;) {
6341 unsigned long nr_pages = page_counter_read(&memcg->memory);
6342
6343 if (nr_pages <= max)
6344 break;
6345
Johannes Weiner7249c9f2019-11-30 17:50:06 -08006346 if (signal_pending(current))
Johannes Weinerb6e6edc2016-03-17 14:20:28 -07006347 break;
Johannes Weinerb6e6edc2016-03-17 14:20:28 -07006348
6349 if (!drained) {
6350 drain_all_stock(memcg);
6351 drained = true;
6352 continue;
6353 }
6354
6355 if (nr_reclaims) {
6356 if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max,
6357 GFP_KERNEL, true))
6358 nr_reclaims--;
6359 continue;
6360 }
6361
Johannes Weinere27be242018-04-10 16:29:45 -07006362 memcg_memory_event(memcg, MEMCG_OOM);
Johannes Weinerb6e6edc2016-03-17 14:20:28 -07006363 if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0))
6364 break;
6365 }
Johannes Weiner241994ed2015-02-11 15:26:06 -08006366
Tejun Heo2529bb32015-05-22 18:23:34 -04006367 memcg_wb_domain_size_changed(memcg);
Johannes Weiner241994ed2015-02-11 15:26:06 -08006368 return nbytes;
6369}
6370
Shakeel Butt1e577f92019-07-11 20:55:55 -07006371static void __memory_events_show(struct seq_file *m, atomic_long_t *events)
6372{
6373 seq_printf(m, "low %lu\n", atomic_long_read(&events[MEMCG_LOW]));
6374 seq_printf(m, "high %lu\n", atomic_long_read(&events[MEMCG_HIGH]));
6375 seq_printf(m, "max %lu\n", atomic_long_read(&events[MEMCG_MAX]));
6376 seq_printf(m, "oom %lu\n", atomic_long_read(&events[MEMCG_OOM]));
6377 seq_printf(m, "oom_kill %lu\n",
6378 atomic_long_read(&events[MEMCG_OOM_KILL]));
6379}
6380
Johannes Weiner241994ed2015-02-11 15:26:06 -08006381static int memory_events_show(struct seq_file *m, void *v)
6382{
Chris Downaa9694b2019-03-05 15:45:52 -08006383 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
Johannes Weiner241994ed2015-02-11 15:26:06 -08006384
Shakeel Butt1e577f92019-07-11 20:55:55 -07006385 __memory_events_show(m, memcg->memory_events);
6386 return 0;
6387}
Johannes Weiner241994ed2015-02-11 15:26:06 -08006388
Shakeel Butt1e577f92019-07-11 20:55:55 -07006389static int memory_events_local_show(struct seq_file *m, void *v)
6390{
6391 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6392
6393 __memory_events_show(m, memcg->memory_events_local);
Johannes Weiner241994ed2015-02-11 15:26:06 -08006394 return 0;
6395}
6396
Johannes Weiner587d9f72016-01-20 15:03:19 -08006397static int memory_stat_show(struct seq_file *m, void *v)
6398{
Chris Downaa9694b2019-03-05 15:45:52 -08006399 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
Johannes Weinerc8713d02019-07-11 20:55:59 -07006400 char *buf;
Johannes Weiner587d9f72016-01-20 15:03:19 -08006401
Johannes Weinerc8713d02019-07-11 20:55:59 -07006402 buf = memory_stat_format(memcg);
6403 if (!buf)
6404 return -ENOMEM;
6405 seq_puts(m, buf);
6406 kfree(buf);
Johannes Weiner587d9f72016-01-20 15:03:19 -08006407 return 0;
6408}
6409
Muchun Song5f9a4f42020-10-13 16:52:59 -07006410#ifdef CONFIG_NUMA
6411static int memory_numa_stat_show(struct seq_file *m, void *v)
6412{
6413 int i;
6414 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6415
6416 for (i = 0; i < ARRAY_SIZE(memory_stats); i++) {
6417 int nid;
6418
6419 if (memory_stats[i].idx >= NR_VM_NODE_STAT_ITEMS)
6420 continue;
6421
6422 seq_printf(m, "%s", memory_stats[i].name);
6423 for_each_node_state(nid, N_MEMORY) {
6424 u64 size;
6425 struct lruvec *lruvec;
6426
6427 lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
6428 size = lruvec_page_state(lruvec, memory_stats[i].idx);
6429 size *= memory_stats[i].ratio;
6430 seq_printf(m, " N%d=%llu", nid, size);
6431 }
6432 seq_putc(m, '\n');
6433 }
6434
6435 return 0;
6436}
6437#endif
6438
Roman Gushchin3d8b38e2018-08-21 21:53:54 -07006439static int memory_oom_group_show(struct seq_file *m, void *v)
6440{
Chris Downaa9694b2019-03-05 15:45:52 -08006441 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
Roman Gushchin3d8b38e2018-08-21 21:53:54 -07006442
6443 seq_printf(m, "%d\n", memcg->oom_group);
6444
6445 return 0;
6446}
6447
6448static ssize_t memory_oom_group_write(struct kernfs_open_file *of,
6449 char *buf, size_t nbytes, loff_t off)
6450{
6451 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6452 int ret, oom_group;
6453
6454 buf = strstrip(buf);
6455 if (!buf)
6456 return -EINVAL;
6457
6458 ret = kstrtoint(buf, 0, &oom_group);
6459 if (ret)
6460 return ret;
6461
6462 if (oom_group != 0 && oom_group != 1)
6463 return -EINVAL;
6464
6465 memcg->oom_group = oom_group;
6466
6467 return nbytes;
6468}
6469
Johannes Weiner241994ed2015-02-11 15:26:06 -08006470static struct cftype memory_files[] = {
6471 {
6472 .name = "current",
Johannes Weinerf5fc3c5d2015-11-05 18:50:23 -08006473 .flags = CFTYPE_NOT_ON_ROOT,
Johannes Weiner241994ed2015-02-11 15:26:06 -08006474 .read_u64 = memory_current_read,
6475 },
6476 {
Roman Gushchinbf8d5d52018-06-07 17:07:46 -07006477 .name = "min",
6478 .flags = CFTYPE_NOT_ON_ROOT,
6479 .seq_show = memory_min_show,
6480 .write = memory_min_write,
6481 },
6482 {
Johannes Weiner241994ed2015-02-11 15:26:06 -08006483 .name = "low",
6484 .flags = CFTYPE_NOT_ON_ROOT,
6485 .seq_show = memory_low_show,
6486 .write = memory_low_write,
6487 },
6488 {
6489 .name = "high",
6490 .flags = CFTYPE_NOT_ON_ROOT,
6491 .seq_show = memory_high_show,
6492 .write = memory_high_write,
6493 },
6494 {
6495 .name = "max",
6496 .flags = CFTYPE_NOT_ON_ROOT,
6497 .seq_show = memory_max_show,
6498 .write = memory_max_write,
6499 },
6500 {
6501 .name = "events",
6502 .flags = CFTYPE_NOT_ON_ROOT,
Tejun Heo472912a2015-09-18 18:01:59 -04006503 .file_offset = offsetof(struct mem_cgroup, events_file),
Johannes Weiner241994ed2015-02-11 15:26:06 -08006504 .seq_show = memory_events_show,
6505 },
Johannes Weiner587d9f72016-01-20 15:03:19 -08006506 {
Shakeel Butt1e577f92019-07-11 20:55:55 -07006507 .name = "events.local",
6508 .flags = CFTYPE_NOT_ON_ROOT,
6509 .file_offset = offsetof(struct mem_cgroup, events_local_file),
6510 .seq_show = memory_events_local_show,
6511 },
6512 {
Johannes Weiner587d9f72016-01-20 15:03:19 -08006513 .name = "stat",
Johannes Weiner587d9f72016-01-20 15:03:19 -08006514 .seq_show = memory_stat_show,
6515 },
Muchun Song5f9a4f42020-10-13 16:52:59 -07006516#ifdef CONFIG_NUMA
6517 {
6518 .name = "numa_stat",
6519 .seq_show = memory_numa_stat_show,
6520 },
6521#endif
Roman Gushchin3d8b38e2018-08-21 21:53:54 -07006522 {
6523 .name = "oom.group",
6524 .flags = CFTYPE_NOT_ON_ROOT | CFTYPE_NS_DELEGATABLE,
6525 .seq_show = memory_oom_group_show,
6526 .write = memory_oom_group_write,
6527 },
Johannes Weiner241994ed2015-02-11 15:26:06 -08006528 { } /* terminate */
6529};
6530
Tejun Heo073219e2014-02-08 10:36:58 -05006531struct cgroup_subsys memory_cgrp_subsys = {
Tejun Heo92fb9742012-11-19 08:13:38 -08006532 .css_alloc = mem_cgroup_css_alloc,
Glauber Costad142e3e2013-02-22 16:34:52 -08006533 .css_online = mem_cgroup_css_online,
Tejun Heo92fb9742012-11-19 08:13:38 -08006534 .css_offline = mem_cgroup_css_offline,
Vladimir Davydov6df38682015-12-29 14:54:10 -08006535 .css_released = mem_cgroup_css_released,
Tejun Heo92fb9742012-11-19 08:13:38 -08006536 .css_free = mem_cgroup_css_free,
Tejun Heo1ced9532014-07-08 18:02:57 -04006537 .css_reset = mem_cgroup_css_reset,
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08006538 .can_attach = mem_cgroup_can_attach,
6539 .cancel_attach = mem_cgroup_cancel_attach,
Tejun Heo264a0ae2016-04-21 19:09:02 -04006540 .post_attach = mem_cgroup_move_task,
Johannes Weiner241994ed2015-02-11 15:26:06 -08006541 .dfl_cftypes = memory_files,
6542 .legacy_cftypes = mem_cgroup_legacy_files,
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -08006543 .early_init = 0,
Balbir Singh8cdea7c2008-02-07 00:13:50 -08006544};
KAMEZAWA Hiroyukic0777192009-01-07 18:07:57 -08006545
Johannes Weinerbc50bcc2020-04-01 21:07:03 -07006546/*
6547 * This function calculates an individual cgroup's effective
6548 * protection which is derived from its own memory.min/low, its
6549 * parent's and siblings' settings, as well as the actual memory
6550 * distribution in the tree.
6551 *
6552 * The following rules apply to the effective protection values:
6553 *
6554 * 1. At the first level of reclaim, effective protection is equal to
6555 * the declared protection in memory.min and memory.low.
6556 *
6557 * 2. To enable safe delegation of the protection configuration, at
6558 * subsequent levels the effective protection is capped to the
6559 * parent's effective protection.
6560 *
6561 * 3. To make complex and dynamic subtrees easier to configure, the
6562 * user is allowed to overcommit the declared protection at a given
6563 * level. If that is the case, the parent's effective protection is
6564 * distributed to the children in proportion to how much protection
6565 * they have declared and how much of it they are utilizing.
6566 *
6567 * This makes distribution proportional, but also work-conserving:
6568 * if one cgroup claims much more protection than it uses memory,
6569 * the unused remainder is available to its siblings.
6570 *
6571 * 4. Conversely, when the declared protection is undercommitted at a
6572 * given level, the distribution of the larger parental protection
6573 * budget is NOT proportional. A cgroup's protection from a sibling
6574 * is capped to its own memory.min/low setting.
6575 *
Johannes Weiner8a931f82020-04-01 21:07:07 -07006576 * 5. However, to allow protecting recursive subtrees from each other
6577 * without having to declare each individual cgroup's fixed share
6578 * of the ancestor's claim to protection, any unutilized -
6579 * "floating" - protection from up the tree is distributed in
6580 * proportion to each cgroup's *usage*. This makes the protection
6581 * neutral wrt sibling cgroups and lets them compete freely over
6582 * the shared parental protection budget, but it protects the
6583 * subtree as a whole from neighboring subtrees.
6584 *
6585 * Note that 4. and 5. are not in conflict: 4. is about protecting
6586 * against immediate siblings whereas 5. is about protecting against
6587 * neighboring subtrees.
Johannes Weinerbc50bcc2020-04-01 21:07:03 -07006588 */
6589static unsigned long effective_protection(unsigned long usage,
Johannes Weiner8a931f82020-04-01 21:07:07 -07006590 unsigned long parent_usage,
Johannes Weinerbc50bcc2020-04-01 21:07:03 -07006591 unsigned long setting,
6592 unsigned long parent_effective,
6593 unsigned long siblings_protected)
6594{
6595 unsigned long protected;
Johannes Weiner8a931f82020-04-01 21:07:07 -07006596 unsigned long ep;
Johannes Weinerbc50bcc2020-04-01 21:07:03 -07006597
6598 protected = min(usage, setting);
6599 /*
6600 * If all cgroups at this level combined claim and use more
6601 * protection then what the parent affords them, distribute
6602 * shares in proportion to utilization.
6603 *
6604 * We are using actual utilization rather than the statically
6605 * claimed protection in order to be work-conserving: claimed
6606 * but unused protection is available to siblings that would
6607 * otherwise get a smaller chunk than what they claimed.
6608 */
6609 if (siblings_protected > parent_effective)
6610 return protected * parent_effective / siblings_protected;
6611
6612 /*
6613 * Ok, utilized protection of all children is within what the
6614 * parent affords them, so we know whatever this child claims
6615 * and utilizes is effectively protected.
6616 *
6617 * If there is unprotected usage beyond this value, reclaim
6618 * will apply pressure in proportion to that amount.
6619 *
6620 * If there is unutilized protection, the cgroup will be fully
6621 * shielded from reclaim, but we do return a smaller value for
6622 * protection than what the group could enjoy in theory. This
6623 * is okay. With the overcommit distribution above, effective
6624 * protection is always dependent on how memory is actually
6625 * consumed among the siblings anyway.
6626 */
Johannes Weiner8a931f82020-04-01 21:07:07 -07006627 ep = protected;
6628
6629 /*
6630 * If the children aren't claiming (all of) the protection
6631 * afforded to them by the parent, distribute the remainder in
6632 * proportion to the (unprotected) memory of each cgroup. That
6633 * way, cgroups that aren't explicitly prioritized wrt each
6634 * other compete freely over the allowance, but they are
6635 * collectively protected from neighboring trees.
6636 *
6637 * We're using unprotected memory for the weight so that if
6638 * some cgroups DO claim explicit protection, we don't protect
6639 * the same bytes twice.
Johannes Weinercd324ed2020-06-25 20:30:16 -07006640 *
6641 * Check both usage and parent_usage against the respective
6642 * protected values. One should imply the other, but they
6643 * aren't read atomically - make sure the division is sane.
Johannes Weiner8a931f82020-04-01 21:07:07 -07006644 */
6645 if (!(cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_RECURSIVE_PROT))
6646 return ep;
Johannes Weinercd324ed2020-06-25 20:30:16 -07006647 if (parent_effective > siblings_protected &&
6648 parent_usage > siblings_protected &&
6649 usage > protected) {
Johannes Weiner8a931f82020-04-01 21:07:07 -07006650 unsigned long unclaimed;
6651
6652 unclaimed = parent_effective - siblings_protected;
6653 unclaimed *= usage - protected;
6654 unclaimed /= parent_usage - siblings_protected;
6655
6656 ep += unclaimed;
6657 }
6658
6659 return ep;
Johannes Weinerbc50bcc2020-04-01 21:07:03 -07006660}
6661
Johannes Weiner241994ed2015-02-11 15:26:06 -08006662/**
Roman Gushchinbf8d5d52018-06-07 17:07:46 -07006663 * mem_cgroup_protected - check if memory consumption is in the normal range
Sean Christopherson34c81052017-07-10 15:48:05 -07006664 * @root: the top ancestor of the sub-tree being checked
Johannes Weiner241994ed2015-02-11 15:26:06 -08006665 * @memcg: the memory cgroup to check
6666 *
Roman Gushchin23067152018-06-07 17:06:22 -07006667 * WARNING: This function is not stateless! It can only be used as part
6668 * of a top-down tree iteration, not for isolated queries.
Johannes Weiner241994ed2015-02-11 15:26:06 -08006669 */
Chris Down45c7f7e2020-08-06 23:22:05 -07006670void mem_cgroup_calculate_protection(struct mem_cgroup *root,
6671 struct mem_cgroup *memcg)
Johannes Weiner241994ed2015-02-11 15:26:06 -08006672{
Johannes Weiner8a931f82020-04-01 21:07:07 -07006673 unsigned long usage, parent_usage;
Roman Gushchin23067152018-06-07 17:06:22 -07006674 struct mem_cgroup *parent;
6675
Johannes Weiner241994ed2015-02-11 15:26:06 -08006676 if (mem_cgroup_disabled())
Chris Down45c7f7e2020-08-06 23:22:05 -07006677 return;
Johannes Weiner241994ed2015-02-11 15:26:06 -08006678
Sean Christopherson34c81052017-07-10 15:48:05 -07006679 if (!root)
6680 root = root_mem_cgroup;
Yafang Shao22f74962020-08-06 23:22:01 -07006681
6682 /*
6683 * Effective values of the reclaim targets are ignored so they
6684 * can be stale. Have a look at mem_cgroup_protection for more
6685 * details.
6686 * TODO: calculation should be more robust so that we do not need
6687 * that special casing.
6688 */
Sean Christopherson34c81052017-07-10 15:48:05 -07006689 if (memcg == root)
Chris Down45c7f7e2020-08-06 23:22:05 -07006690 return;
Johannes Weiner241994ed2015-02-11 15:26:06 -08006691
Roman Gushchin23067152018-06-07 17:06:22 -07006692 usage = page_counter_read(&memcg->memory);
Roman Gushchinbf8d5d52018-06-07 17:07:46 -07006693 if (!usage)
Chris Down45c7f7e2020-08-06 23:22:05 -07006694 return;
Sean Christopherson34c81052017-07-10 15:48:05 -07006695
Roman Gushchinbf8d5d52018-06-07 17:07:46 -07006696 parent = parent_mem_cgroup(memcg);
Roman Gushchindf2a4192018-06-14 15:26:17 -07006697 /* No parent means a non-hierarchical mode on v1 memcg */
6698 if (!parent)
Chris Down45c7f7e2020-08-06 23:22:05 -07006699 return;
Roman Gushchindf2a4192018-06-14 15:26:17 -07006700
Johannes Weinerbc50bcc2020-04-01 21:07:03 -07006701 if (parent == root) {
Chris Downc3d53202020-04-01 21:07:27 -07006702 memcg->memory.emin = READ_ONCE(memcg->memory.min);
Chris Down03960e32020-06-25 20:30:22 -07006703 memcg->memory.elow = READ_ONCE(memcg->memory.low);
Chris Down45c7f7e2020-08-06 23:22:05 -07006704 return;
Roman Gushchinbf8d5d52018-06-07 17:07:46 -07006705 }
6706
Johannes Weiner8a931f82020-04-01 21:07:07 -07006707 parent_usage = page_counter_read(&parent->memory);
6708
Chris Downb3a78222020-04-01 21:07:33 -07006709 WRITE_ONCE(memcg->memory.emin, effective_protection(usage, parent_usage,
Chris Downc3d53202020-04-01 21:07:27 -07006710 READ_ONCE(memcg->memory.min),
6711 READ_ONCE(parent->memory.emin),
Chris Downb3a78222020-04-01 21:07:33 -07006712 atomic_long_read(&parent->memory.children_min_usage)));
Roman Gushchin23067152018-06-07 17:06:22 -07006713
Chris Downb3a78222020-04-01 21:07:33 -07006714 WRITE_ONCE(memcg->memory.elow, effective_protection(usage, parent_usage,
Chris Down03960e32020-06-25 20:30:22 -07006715 READ_ONCE(memcg->memory.low),
6716 READ_ONCE(parent->memory.elow),
Chris Downb3a78222020-04-01 21:07:33 -07006717 atomic_long_read(&parent->memory.children_low_usage)));
Johannes Weiner241994ed2015-02-11 15:26:06 -08006718}
6719
Johannes Weiner00501b52014-08-08 14:19:20 -07006720/**
Johannes Weinerf0e45fb2020-06-03 16:02:07 -07006721 * mem_cgroup_charge - charge a newly allocated page to a cgroup
Johannes Weiner00501b52014-08-08 14:19:20 -07006722 * @page: page to charge
6723 * @mm: mm context of the victim
6724 * @gfp_mask: reclaim mode
Johannes Weiner00501b52014-08-08 14:19:20 -07006725 *
6726 * Try to charge @page to the memcg that @mm belongs to, reclaiming
6727 * pages according to @gfp_mask if necessary.
6728 *
Johannes Weinerf0e45fb2020-06-03 16:02:07 -07006729 * Returns 0 on success. Otherwise, an error code is returned.
Johannes Weiner00501b52014-08-08 14:19:20 -07006730 */
Johannes Weinerd9eb1ea2020-06-03 16:02:24 -07006731int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask)
Johannes Weiner00501b52014-08-08 14:19:20 -07006732{
Matthew Wilcox (Oracle)6c357842020-08-14 17:30:37 -07006733 unsigned int nr_pages = thp_nr_pages(page);
Johannes Weiner00501b52014-08-08 14:19:20 -07006734 struct mem_cgroup *memcg = NULL;
Johannes Weiner00501b52014-08-08 14:19:20 -07006735 int ret = 0;
6736
6737 if (mem_cgroup_disabled())
6738 goto out;
6739
6740 if (PageSwapCache(page)) {
Johannes Weiner2d1c4982020-06-03 16:02:14 -07006741 swp_entry_t ent = { .val = page_private(page), };
6742 unsigned short id;
6743
Johannes Weiner00501b52014-08-08 14:19:20 -07006744 /*
6745 * Every swap fault against a single page tries to charge the
6746 * page, bail as early as possible. shmem_unuse() encounters
Roman Gushchinbcfe06b2020-12-01 13:58:27 -08006747 * already charged pages, too. page and memcg binding is
6748 * protected by the page lock, which serializes swap cache
6749 * removal, which in turn serializes uncharging.
Johannes Weiner00501b52014-08-08 14:19:20 -07006750 */
Vladimir Davydove993d902015-09-09 15:35:35 -07006751 VM_BUG_ON_PAGE(!PageLocked(page), page);
Roman Gushchinbcfe06b2020-12-01 13:58:27 -08006752 if (page_memcg(compound_head(page)))
Johannes Weiner00501b52014-08-08 14:19:20 -07006753 goto out;
Vladimir Davydove993d902015-09-09 15:35:35 -07006754
Johannes Weiner2d1c4982020-06-03 16:02:14 -07006755 id = lookup_swap_cgroup_id(ent);
6756 rcu_read_lock();
6757 memcg = mem_cgroup_from_id(id);
6758 if (memcg && !css_tryget_online(&memcg->css))
6759 memcg = NULL;
6760 rcu_read_unlock();
Johannes Weiner00501b52014-08-08 14:19:20 -07006761 }
6762
Johannes Weiner00501b52014-08-08 14:19:20 -07006763 if (!memcg)
6764 memcg = get_mem_cgroup_from_mm(mm);
6765
6766 ret = try_charge(memcg, gfp_mask, nr_pages);
Johannes Weinerf0e45fb2020-06-03 16:02:07 -07006767 if (ret)
6768 goto out_put;
Johannes Weiner00501b52014-08-08 14:19:20 -07006769
Johannes Weiner1a3e1f42020-08-06 23:20:45 -07006770 css_get(&memcg->css);
Johannes Weinerd9eb1ea2020-06-03 16:02:24 -07006771 commit_charge(page, memcg);
Johannes Weiner6abb5a82014-08-08 14:19:33 -07006772
Johannes Weiner6abb5a82014-08-08 14:19:33 -07006773 local_irq_disable();
Johannes Weiner3fba69a2020-06-03 16:01:31 -07006774 mem_cgroup_charge_statistics(memcg, page, nr_pages);
Johannes Weiner6abb5a82014-08-08 14:19:33 -07006775 memcg_check_events(memcg, page);
6776 local_irq_enable();
Johannes Weiner00501b52014-08-08 14:19:20 -07006777
Johannes Weiner2d1c4982020-06-03 16:02:14 -07006778 if (PageSwapCache(page)) {
Johannes Weiner00501b52014-08-08 14:19:20 -07006779 swp_entry_t entry = { .val = page_private(page) };
6780 /*
6781 * The swap entry might not get freed for a long time,
6782 * let's not wait for it. The page already received a
6783 * memory+swap charge, drop the swap entry duplicate.
6784 */
Huang Ying38d8b4e2017-07-06 15:37:18 -07006785 mem_cgroup_uncharge_swap(entry, nr_pages);
Johannes Weiner00501b52014-08-08 14:19:20 -07006786 }
Johannes Weiner00501b52014-08-08 14:19:20 -07006787
Johannes Weinerf0e45fb2020-06-03 16:02:07 -07006788out_put:
6789 css_put(&memcg->css);
6790out:
6791 return ret;
Johannes Weiner3fea5a42020-06-03 16:01:41 -07006792}
6793
Jérôme Glissea9d5ade2017-09-08 16:11:50 -07006794struct uncharge_gather {
6795 struct mem_cgroup *memcg;
Johannes Weiner9f762db2020-06-03 16:01:44 -07006796 unsigned long nr_pages;
Jérôme Glissea9d5ade2017-09-08 16:11:50 -07006797 unsigned long pgpgout;
Jérôme Glissea9d5ade2017-09-08 16:11:50 -07006798 unsigned long nr_kmem;
Jérôme Glissea9d5ade2017-09-08 16:11:50 -07006799 struct page *dummy_page;
6800};
6801
6802static inline void uncharge_gather_clear(struct uncharge_gather *ug)
Johannes Weiner747db952014-08-08 14:19:24 -07006803{
Jérôme Glissea9d5ade2017-09-08 16:11:50 -07006804 memset(ug, 0, sizeof(*ug));
6805}
6806
6807static void uncharge_batch(const struct uncharge_gather *ug)
6808{
Johannes Weiner747db952014-08-08 14:19:24 -07006809 unsigned long flags;
6810
Jérôme Glissea9d5ade2017-09-08 16:11:50 -07006811 if (!mem_cgroup_is_root(ug->memcg)) {
Johannes Weiner9f762db2020-06-03 16:01:44 -07006812 page_counter_uncharge(&ug->memcg->memory, ug->nr_pages);
Johannes Weiner7941d212016-01-14 15:21:23 -08006813 if (do_memsw_account())
Johannes Weiner9f762db2020-06-03 16:01:44 -07006814 page_counter_uncharge(&ug->memcg->memsw, ug->nr_pages);
Jérôme Glissea9d5ade2017-09-08 16:11:50 -07006815 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && ug->nr_kmem)
6816 page_counter_uncharge(&ug->memcg->kmem, ug->nr_kmem);
6817 memcg_oom_recover(ug->memcg);
Johannes Weinerce00a962014-09-05 08:43:57 -04006818 }
Johannes Weiner747db952014-08-08 14:19:24 -07006819
6820 local_irq_save(flags);
Johannes Weinerc9019e92018-01-31 16:16:37 -08006821 __count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout);
Johannes Weiner9f762db2020-06-03 16:01:44 -07006822 __this_cpu_add(ug->memcg->vmstats_percpu->nr_page_events, ug->nr_pages);
Jérôme Glissea9d5ade2017-09-08 16:11:50 -07006823 memcg_check_events(ug->memcg, ug->dummy_page);
Johannes Weiner747db952014-08-08 14:19:24 -07006824 local_irq_restore(flags);
Michal Hockof1796542020-09-04 16:35:24 -07006825
6826 /* drop reference from uncharge_page */
6827 css_put(&ug->memcg->css);
Jérôme Glissea9d5ade2017-09-08 16:11:50 -07006828}
6829
6830static void uncharge_page(struct page *page, struct uncharge_gather *ug)
6831{
Johannes Weiner9f762db2020-06-03 16:01:44 -07006832 unsigned long nr_pages;
6833
Jérôme Glissea9d5ade2017-09-08 16:11:50 -07006834 VM_BUG_ON_PAGE(PageLRU(page), page);
Jérôme Glissea9d5ade2017-09-08 16:11:50 -07006835
Roman Gushchinbcfe06b2020-12-01 13:58:27 -08006836 if (!page_memcg(page))
Jérôme Glissea9d5ade2017-09-08 16:11:50 -07006837 return;
6838
6839 /*
6840 * Nobody should be changing or seriously looking at
Roman Gushchinbcfe06b2020-12-01 13:58:27 -08006841 * page_memcg(page) at this point, we have fully
Jérôme Glissea9d5ade2017-09-08 16:11:50 -07006842 * exclusive access to the page.
6843 */
6844
Roman Gushchinbcfe06b2020-12-01 13:58:27 -08006845 if (ug->memcg != page_memcg(page)) {
Jérôme Glissea9d5ade2017-09-08 16:11:50 -07006846 if (ug->memcg) {
6847 uncharge_batch(ug);
6848 uncharge_gather_clear(ug);
6849 }
Roman Gushchinbcfe06b2020-12-01 13:58:27 -08006850 ug->memcg = page_memcg(page);
Michal Hockof1796542020-09-04 16:35:24 -07006851
6852 /* pairs with css_put in uncharge_batch */
6853 css_get(&ug->memcg->css);
Jérôme Glissea9d5ade2017-09-08 16:11:50 -07006854 }
6855
Johannes Weiner9f762db2020-06-03 16:01:44 -07006856 nr_pages = compound_nr(page);
6857 ug->nr_pages += nr_pages;
Jérôme Glissea9d5ade2017-09-08 16:11:50 -07006858
Roman Gushchin18b2db32020-12-01 13:58:30 -08006859 if (PageMemcgKmem(page))
Johannes Weiner9f762db2020-06-03 16:01:44 -07006860 ug->nr_kmem += nr_pages;
Roman Gushchin18b2db32020-12-01 13:58:30 -08006861 else
6862 ug->pgpgout++;
Jérôme Glissea9d5ade2017-09-08 16:11:50 -07006863
6864 ug->dummy_page = page;
Roman Gushchinbcfe06b2020-12-01 13:58:27 -08006865 page->memcg_data = 0;
Johannes Weiner1a3e1f42020-08-06 23:20:45 -07006866 css_put(&ug->memcg->css);
Johannes Weiner747db952014-08-08 14:19:24 -07006867}
6868
6869static void uncharge_list(struct list_head *page_list)
6870{
Jérôme Glissea9d5ade2017-09-08 16:11:50 -07006871 struct uncharge_gather ug;
Johannes Weiner747db952014-08-08 14:19:24 -07006872 struct list_head *next;
Jérôme Glissea9d5ade2017-09-08 16:11:50 -07006873
6874 uncharge_gather_clear(&ug);
Johannes Weiner747db952014-08-08 14:19:24 -07006875
Johannes Weiner8b592652016-03-17 14:20:31 -07006876 /*
6877 * Note that the list can be a single page->lru; hence the
6878 * do-while loop instead of a simple list_for_each_entry().
6879 */
Johannes Weiner747db952014-08-08 14:19:24 -07006880 next = page_list->next;
6881 do {
Jérôme Glissea9d5ade2017-09-08 16:11:50 -07006882 struct page *page;
6883
Johannes Weiner747db952014-08-08 14:19:24 -07006884 page = list_entry(next, struct page, lru);
6885 next = page->lru.next;
6886
Jérôme Glissea9d5ade2017-09-08 16:11:50 -07006887 uncharge_page(page, &ug);
Johannes Weiner747db952014-08-08 14:19:24 -07006888 } while (next != page_list);
6889
Jérôme Glissea9d5ade2017-09-08 16:11:50 -07006890 if (ug.memcg)
6891 uncharge_batch(&ug);
Johannes Weiner747db952014-08-08 14:19:24 -07006892}
6893
Johannes Weiner0a31bc92014-08-08 14:19:22 -07006894/**
6895 * mem_cgroup_uncharge - uncharge a page
6896 * @page: page to uncharge
6897 *
Johannes Weinerf0e45fb2020-06-03 16:02:07 -07006898 * Uncharge a page previously charged with mem_cgroup_charge().
Johannes Weiner0a31bc92014-08-08 14:19:22 -07006899 */
6900void mem_cgroup_uncharge(struct page *page)
6901{
Jérôme Glissea9d5ade2017-09-08 16:11:50 -07006902 struct uncharge_gather ug;
6903
Johannes Weiner0a31bc92014-08-08 14:19:22 -07006904 if (mem_cgroup_disabled())
6905 return;
6906
Johannes Weiner747db952014-08-08 14:19:24 -07006907 /* Don't touch page->lru of any random page, pre-check: */
Roman Gushchinbcfe06b2020-12-01 13:58:27 -08006908 if (!page_memcg(page))
Johannes Weiner0a31bc92014-08-08 14:19:22 -07006909 return;
6910
Jérôme Glissea9d5ade2017-09-08 16:11:50 -07006911 uncharge_gather_clear(&ug);
6912 uncharge_page(page, &ug);
6913 uncharge_batch(&ug);
Johannes Weiner747db952014-08-08 14:19:24 -07006914}
Johannes Weiner0a31bc92014-08-08 14:19:22 -07006915
Johannes Weiner747db952014-08-08 14:19:24 -07006916/**
6917 * mem_cgroup_uncharge_list - uncharge a list of page
6918 * @page_list: list of pages to uncharge
6919 *
6920 * Uncharge a list of pages previously charged with
Johannes Weinerf0e45fb2020-06-03 16:02:07 -07006921 * mem_cgroup_charge().
Johannes Weiner747db952014-08-08 14:19:24 -07006922 */
6923void mem_cgroup_uncharge_list(struct list_head *page_list)
6924{
6925 if (mem_cgroup_disabled())
6926 return;
Johannes Weiner0a31bc92014-08-08 14:19:22 -07006927
Johannes Weiner747db952014-08-08 14:19:24 -07006928 if (!list_empty(page_list))
6929 uncharge_list(page_list);
Johannes Weiner0a31bc92014-08-08 14:19:22 -07006930}
6931
6932/**
Johannes Weiner6a93ca82016-03-15 14:57:19 -07006933 * mem_cgroup_migrate - charge a page's replacement
6934 * @oldpage: currently circulating page
6935 * @newpage: replacement page
Johannes Weiner0a31bc92014-08-08 14:19:22 -07006936 *
Johannes Weiner6a93ca82016-03-15 14:57:19 -07006937 * Charge @newpage as a replacement page for @oldpage. @oldpage will
6938 * be uncharged upon free.
Johannes Weiner0a31bc92014-08-08 14:19:22 -07006939 *
6940 * Both pages must be locked, @newpage->mapping must be set up.
6941 */
Johannes Weiner6a93ca82016-03-15 14:57:19 -07006942void mem_cgroup_migrate(struct page *oldpage, struct page *newpage)
Johannes Weiner0a31bc92014-08-08 14:19:22 -07006943{
Johannes Weiner29833312014-12-10 15:44:02 -08006944 struct mem_cgroup *memcg;
Johannes Weiner44b7a8d2016-01-20 15:03:16 -08006945 unsigned int nr_pages;
Tejun Heod93c4132016-06-24 14:49:54 -07006946 unsigned long flags;
Johannes Weiner0a31bc92014-08-08 14:19:22 -07006947
6948 VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage);
6949 VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
Johannes Weiner0a31bc92014-08-08 14:19:22 -07006950 VM_BUG_ON_PAGE(PageAnon(oldpage) != PageAnon(newpage), newpage);
Johannes Weiner6abb5a82014-08-08 14:19:33 -07006951 VM_BUG_ON_PAGE(PageTransHuge(oldpage) != PageTransHuge(newpage),
6952 newpage);
Johannes Weiner0a31bc92014-08-08 14:19:22 -07006953
6954 if (mem_cgroup_disabled())
6955 return;
6956
6957 /* Page cache replacement: new page already charged? */
Roman Gushchinbcfe06b2020-12-01 13:58:27 -08006958 if (page_memcg(newpage))
Johannes Weiner0a31bc92014-08-08 14:19:22 -07006959 return;
6960
Roman Gushchinbcfe06b2020-12-01 13:58:27 -08006961 memcg = page_memcg(oldpage);
Alex Shia4055882020-12-18 14:01:31 -08006962 VM_WARN_ON_ONCE_PAGE(!memcg, oldpage);
Johannes Weiner29833312014-12-10 15:44:02 -08006963 if (!memcg)
Johannes Weiner0a31bc92014-08-08 14:19:22 -07006964 return;
6965
Johannes Weiner44b7a8d2016-01-20 15:03:16 -08006966 /* Force-charge the new page. The old one will be freed soon */
Matthew Wilcox (Oracle)6c357842020-08-14 17:30:37 -07006967 nr_pages = thp_nr_pages(newpage);
Johannes Weiner44b7a8d2016-01-20 15:03:16 -08006968
6969 page_counter_charge(&memcg->memory, nr_pages);
6970 if (do_memsw_account())
6971 page_counter_charge(&memcg->memsw, nr_pages);
Johannes Weiner0a31bc92014-08-08 14:19:22 -07006972
Johannes Weiner1a3e1f42020-08-06 23:20:45 -07006973 css_get(&memcg->css);
Johannes Weinerd9eb1ea2020-06-03 16:02:24 -07006974 commit_charge(newpage, memcg);
Johannes Weiner44b7a8d2016-01-20 15:03:16 -08006975
Tejun Heod93c4132016-06-24 14:49:54 -07006976 local_irq_save(flags);
Johannes Weiner3fba69a2020-06-03 16:01:31 -07006977 mem_cgroup_charge_statistics(memcg, newpage, nr_pages);
Johannes Weiner44b7a8d2016-01-20 15:03:16 -08006978 memcg_check_events(memcg, newpage);
Tejun Heod93c4132016-06-24 14:49:54 -07006979 local_irq_restore(flags);
Johannes Weiner0a31bc92014-08-08 14:19:22 -07006980}
6981
Johannes Weineref129472016-01-14 15:21:34 -08006982DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
Johannes Weiner11092082016-01-14 15:21:26 -08006983EXPORT_SYMBOL(memcg_sockets_enabled_key);
6984
Johannes Weiner2d758072016-10-07 17:00:58 -07006985void mem_cgroup_sk_alloc(struct sock *sk)
Johannes Weiner11092082016-01-14 15:21:26 -08006986{
6987 struct mem_cgroup *memcg;
6988
Johannes Weiner2d758072016-10-07 17:00:58 -07006989 if (!mem_cgroup_sockets_enabled)
6990 return;
6991
Shakeel Butte876ecc2020-03-09 22:16:05 -07006992 /* Do not associate the sock with unrelated interrupted task's memcg. */
6993 if (in_interrupt())
6994 return;
6995
Johannes Weiner11092082016-01-14 15:21:26 -08006996 rcu_read_lock();
6997 memcg = mem_cgroup_from_task(current);
Johannes Weinerf7e1cb62016-01-14 15:21:29 -08006998 if (memcg == root_mem_cgroup)
6999 goto out;
Johannes Weiner0db15292016-01-20 15:02:50 -08007000 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg->tcpmem_active)
Johannes Weinerf7e1cb62016-01-14 15:21:29 -08007001 goto out;
Shakeel Butt8965aa22020-04-01 21:07:10 -07007002 if (css_tryget(&memcg->css))
Johannes Weiner11092082016-01-14 15:21:26 -08007003 sk->sk_memcg = memcg;
Johannes Weinerf7e1cb62016-01-14 15:21:29 -08007004out:
Johannes Weiner11092082016-01-14 15:21:26 -08007005 rcu_read_unlock();
7006}
Johannes Weiner11092082016-01-14 15:21:26 -08007007
Johannes Weiner2d758072016-10-07 17:00:58 -07007008void mem_cgroup_sk_free(struct sock *sk)
Johannes Weiner11092082016-01-14 15:21:26 -08007009{
Johannes Weiner2d758072016-10-07 17:00:58 -07007010 if (sk->sk_memcg)
7011 css_put(&sk->sk_memcg->css);
Johannes Weiner11092082016-01-14 15:21:26 -08007012}
7013
7014/**
7015 * mem_cgroup_charge_skmem - charge socket memory
7016 * @memcg: memcg to charge
7017 * @nr_pages: number of pages to charge
7018 *
7019 * Charges @nr_pages to @memcg. Returns %true if the charge fit within
7020 * @memcg's configured limit, %false if the charge had to be forced.
7021 */
7022bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
7023{
Johannes Weinerf7e1cb62016-01-14 15:21:29 -08007024 gfp_t gfp_mask = GFP_KERNEL;
Johannes Weiner11092082016-01-14 15:21:26 -08007025
Johannes Weinerf7e1cb62016-01-14 15:21:29 -08007026 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
Johannes Weiner0db15292016-01-20 15:02:50 -08007027 struct page_counter *fail;
Johannes Weinerf7e1cb62016-01-14 15:21:29 -08007028
Johannes Weiner0db15292016-01-20 15:02:50 -08007029 if (page_counter_try_charge(&memcg->tcpmem, nr_pages, &fail)) {
7030 memcg->tcpmem_pressure = 0;
Johannes Weinerf7e1cb62016-01-14 15:21:29 -08007031 return true;
7032 }
Johannes Weiner0db15292016-01-20 15:02:50 -08007033 page_counter_charge(&memcg->tcpmem, nr_pages);
7034 memcg->tcpmem_pressure = 1;
Johannes Weinerf7e1cb62016-01-14 15:21:29 -08007035 return false;
Johannes Weiner11092082016-01-14 15:21:26 -08007036 }
Johannes Weinerd886f4e2016-01-20 15:02:47 -08007037
Johannes Weinerf7e1cb62016-01-14 15:21:29 -08007038 /* Don't block in the packet receive path */
7039 if (in_softirq())
7040 gfp_mask = GFP_NOWAIT;
7041
Johannes Weinerc9019e92018-01-31 16:16:37 -08007042 mod_memcg_state(memcg, MEMCG_SOCK, nr_pages);
Johannes Weinerb2807f02016-01-20 15:03:22 -08007043
Johannes Weinerf7e1cb62016-01-14 15:21:29 -08007044 if (try_charge(memcg, gfp_mask, nr_pages) == 0)
7045 return true;
7046
7047 try_charge(memcg, gfp_mask|__GFP_NOFAIL, nr_pages);
Johannes Weiner11092082016-01-14 15:21:26 -08007048 return false;
7049}
7050
7051/**
7052 * mem_cgroup_uncharge_skmem - uncharge socket memory
Mike Rapoportb7701a52018-02-06 15:42:13 -08007053 * @memcg: memcg to uncharge
7054 * @nr_pages: number of pages to uncharge
Johannes Weiner11092082016-01-14 15:21:26 -08007055 */
7056void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
7057{
Johannes Weinerf7e1cb62016-01-14 15:21:29 -08007058 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
Johannes Weiner0db15292016-01-20 15:02:50 -08007059 page_counter_uncharge(&memcg->tcpmem, nr_pages);
Johannes Weinerf7e1cb62016-01-14 15:21:29 -08007060 return;
7061 }
Johannes Weinerd886f4e2016-01-20 15:02:47 -08007062
Johannes Weinerc9019e92018-01-31 16:16:37 -08007063 mod_memcg_state(memcg, MEMCG_SOCK, -nr_pages);
Johannes Weinerb2807f02016-01-20 15:03:22 -08007064
Roman Gushchin475d0482017-09-08 16:13:09 -07007065 refill_stock(memcg, nr_pages);
Johannes Weiner11092082016-01-14 15:21:26 -08007066}
7067
Johannes Weinerf7e1cb62016-01-14 15:21:29 -08007068static int __init cgroup_memory(char *s)
7069{
7070 char *token;
7071
7072 while ((token = strsep(&s, ",")) != NULL) {
7073 if (!*token)
7074 continue;
7075 if (!strcmp(token, "nosocket"))
7076 cgroup_memory_nosocket = true;
Vladimir Davydov04823c82016-01-20 15:02:38 -08007077 if (!strcmp(token, "nokmem"))
7078 cgroup_memory_nokmem = true;
Johannes Weinerf7e1cb62016-01-14 15:21:29 -08007079 }
7080 return 0;
7081}
7082__setup("cgroup.memory=", cgroup_memory);
Johannes Weiner11092082016-01-14 15:21:26 -08007083
Michal Hocko2d110852013-02-22 16:34:43 -08007084/*
Michal Hocko10813122013-02-22 16:35:41 -08007085 * subsys_initcall() for memory controller.
7086 *
Sebastian Andrzej Siewior308167f2016-11-03 15:49:59 +01007087 * Some parts like memcg_hotplug_cpu_dead() have to be initialized from this
7088 * context because of lock dependencies (cgroup_lock -> cpu hotplug) but
7089 * basically everything that doesn't depend on a specific mem_cgroup structure
7090 * should be initialized from here.
Michal Hocko2d110852013-02-22 16:34:43 -08007091 */
7092static int __init mem_cgroup_init(void)
7093{
Johannes Weiner95a045f2015-02-11 15:26:33 -08007094 int cpu, node;
7095
Sebastian Andrzej Siewior308167f2016-11-03 15:49:59 +01007096 cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL,
7097 memcg_hotplug_cpu_dead);
Johannes Weiner95a045f2015-02-11 15:26:33 -08007098
7099 for_each_possible_cpu(cpu)
7100 INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work,
7101 drain_local_stock);
7102
7103 for_each_node(node) {
7104 struct mem_cgroup_tree_per_node *rtpn;
Johannes Weiner95a045f2015-02-11 15:26:33 -08007105
7106 rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL,
7107 node_online(node) ? node : NUMA_NO_NODE);
7108
Mel Gormanef8f2322016-07-28 15:46:05 -07007109 rtpn->rb_root = RB_ROOT;
Davidlohr Buesofa90b2f2017-09-08 16:15:21 -07007110 rtpn->rb_rightmost = NULL;
Mel Gormanef8f2322016-07-28 15:46:05 -07007111 spin_lock_init(&rtpn->lock);
Johannes Weiner95a045f2015-02-11 15:26:33 -08007112 soft_limit_tree.rb_tree_per_node[node] = rtpn;
7113 }
7114
Michal Hocko2d110852013-02-22 16:34:43 -08007115 return 0;
7116}
7117subsys_initcall(mem_cgroup_init);
Johannes Weiner21afa382015-02-11 15:26:36 -08007118
7119#ifdef CONFIG_MEMCG_SWAP
Arnd Bergmann358c07f2016-08-25 15:17:08 -07007120static struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg)
7121{
Kirill Tkhai1c2d4792018-10-26 15:09:28 -07007122 while (!refcount_inc_not_zero(&memcg->id.ref)) {
Arnd Bergmann358c07f2016-08-25 15:17:08 -07007123 /*
7124 * The root cgroup cannot be destroyed, so it's refcount must
7125 * always be >= 1.
7126 */
7127 if (WARN_ON_ONCE(memcg == root_mem_cgroup)) {
7128 VM_BUG_ON(1);
7129 break;
7130 }
7131 memcg = parent_mem_cgroup(memcg);
7132 if (!memcg)
7133 memcg = root_mem_cgroup;
7134 }
7135 return memcg;
7136}
7137
Johannes Weiner21afa382015-02-11 15:26:36 -08007138/**
7139 * mem_cgroup_swapout - transfer a memsw charge to swap
7140 * @page: page whose memsw charge to transfer
7141 * @entry: swap entry to move the charge to
7142 *
7143 * Transfer the memsw charge of @page to @entry.
7144 */
7145void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
7146{
Vladimir Davydov1f47b612016-08-11 15:33:00 -07007147 struct mem_cgroup *memcg, *swap_memcg;
Huang Yingd6810d72017-09-06 16:22:45 -07007148 unsigned int nr_entries;
Johannes Weiner21afa382015-02-11 15:26:36 -08007149 unsigned short oldid;
7150
7151 VM_BUG_ON_PAGE(PageLRU(page), page);
7152 VM_BUG_ON_PAGE(page_count(page), page);
7153
Alex Shi76358ab2020-12-18 14:01:28 -08007154 if (mem_cgroup_disabled())
7155 return;
7156
Johannes Weiner2d1c4982020-06-03 16:02:14 -07007157 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
Johannes Weiner21afa382015-02-11 15:26:36 -08007158 return;
7159
Roman Gushchinbcfe06b2020-12-01 13:58:27 -08007160 memcg = page_memcg(page);
Johannes Weiner21afa382015-02-11 15:26:36 -08007161
Alex Shia4055882020-12-18 14:01:31 -08007162 VM_WARN_ON_ONCE_PAGE(!memcg, page);
Johannes Weiner21afa382015-02-11 15:26:36 -08007163 if (!memcg)
7164 return;
7165
Vladimir Davydov1f47b612016-08-11 15:33:00 -07007166 /*
7167 * In case the memcg owning these pages has been offlined and doesn't
7168 * have an ID allocated to it anymore, charge the closest online
7169 * ancestor for the swap instead and transfer the memory+swap charge.
7170 */
7171 swap_memcg = mem_cgroup_id_get_online(memcg);
Matthew Wilcox (Oracle)6c357842020-08-14 17:30:37 -07007172 nr_entries = thp_nr_pages(page);
Huang Yingd6810d72017-09-06 16:22:45 -07007173 /* Get references for the tail pages, too */
7174 if (nr_entries > 1)
7175 mem_cgroup_id_get_many(swap_memcg, nr_entries - 1);
7176 oldid = swap_cgroup_record(entry, mem_cgroup_id(swap_memcg),
7177 nr_entries);
Johannes Weiner21afa382015-02-11 15:26:36 -08007178 VM_BUG_ON_PAGE(oldid, page);
Johannes Weinerc9019e92018-01-31 16:16:37 -08007179 mod_memcg_state(swap_memcg, MEMCG_SWAP, nr_entries);
Johannes Weiner21afa382015-02-11 15:26:36 -08007180
Roman Gushchinbcfe06b2020-12-01 13:58:27 -08007181 page->memcg_data = 0;
Johannes Weiner21afa382015-02-11 15:26:36 -08007182
7183 if (!mem_cgroup_is_root(memcg))
Huang Yingd6810d72017-09-06 16:22:45 -07007184 page_counter_uncharge(&memcg->memory, nr_entries);
Johannes Weiner21afa382015-02-11 15:26:36 -08007185
Johannes Weiner2d1c4982020-06-03 16:02:14 -07007186 if (!cgroup_memory_noswap && memcg != swap_memcg) {
Vladimir Davydov1f47b612016-08-11 15:33:00 -07007187 if (!mem_cgroup_is_root(swap_memcg))
Huang Yingd6810d72017-09-06 16:22:45 -07007188 page_counter_charge(&swap_memcg->memsw, nr_entries);
7189 page_counter_uncharge(&memcg->memsw, nr_entries);
Vladimir Davydov1f47b612016-08-11 15:33:00 -07007190 }
7191
Sebastian Andrzej Siewiorce9ce662015-09-04 15:47:50 -07007192 /*
7193 * Interrupts should be disabled here because the caller holds the
Matthew Wilcoxb93b0162018-04-10 16:36:56 -07007194 * i_pages lock which is taken with interrupts-off. It is
Sebastian Andrzej Siewiorce9ce662015-09-04 15:47:50 -07007195 * important here to have the interrupts disabled because it is the
Matthew Wilcoxb93b0162018-04-10 16:36:56 -07007196 * only synchronisation we have for updating the per-CPU variables.
Sebastian Andrzej Siewiorce9ce662015-09-04 15:47:50 -07007197 */
7198 VM_BUG_ON(!irqs_disabled());
Johannes Weiner3fba69a2020-06-03 16:01:31 -07007199 mem_cgroup_charge_statistics(memcg, page, -nr_entries);
Johannes Weiner21afa382015-02-11 15:26:36 -08007200 memcg_check_events(memcg, page);
Johannes Weiner73f576c2016-07-20 15:44:57 -07007201
Johannes Weiner1a3e1f42020-08-06 23:20:45 -07007202 css_put(&memcg->css);
Johannes Weiner21afa382015-02-11 15:26:36 -08007203}
7204
Huang Ying38d8b4e2017-07-06 15:37:18 -07007205/**
7206 * mem_cgroup_try_charge_swap - try charging swap space for a page
Vladimir Davydov37e84352016-01-20 15:02:56 -08007207 * @page: page being added to swap
7208 * @entry: swap entry to charge
7209 *
Huang Ying38d8b4e2017-07-06 15:37:18 -07007210 * Try to charge @page's memcg for the swap space at @entry.
Vladimir Davydov37e84352016-01-20 15:02:56 -08007211 *
7212 * Returns 0 on success, -ENOMEM on failure.
7213 */
7214int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry)
7215{
Matthew Wilcox (Oracle)6c357842020-08-14 17:30:37 -07007216 unsigned int nr_pages = thp_nr_pages(page);
Vladimir Davydov37e84352016-01-20 15:02:56 -08007217 struct page_counter *counter;
Huang Ying38d8b4e2017-07-06 15:37:18 -07007218 struct mem_cgroup *memcg;
Vladimir Davydov37e84352016-01-20 15:02:56 -08007219 unsigned short oldid;
7220
Alex Shi76358ab2020-12-18 14:01:28 -08007221 if (mem_cgroup_disabled())
7222 return 0;
7223
Johannes Weiner2d1c4982020-06-03 16:02:14 -07007224 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
Vladimir Davydov37e84352016-01-20 15:02:56 -08007225 return 0;
7226
Roman Gushchinbcfe06b2020-12-01 13:58:27 -08007227 memcg = page_memcg(page);
Vladimir Davydov37e84352016-01-20 15:02:56 -08007228
Alex Shia4055882020-12-18 14:01:31 -08007229 VM_WARN_ON_ONCE_PAGE(!memcg, page);
Vladimir Davydov37e84352016-01-20 15:02:56 -08007230 if (!memcg)
7231 return 0;
7232
Tejun Heof3a53a32018-06-07 17:05:35 -07007233 if (!entry.val) {
7234 memcg_memory_event(memcg, MEMCG_SWAP_FAIL);
Tejun Heobb98f2c2018-06-07 17:05:31 -07007235 return 0;
Tejun Heof3a53a32018-06-07 17:05:35 -07007236 }
Tejun Heobb98f2c2018-06-07 17:05:31 -07007237
Vladimir Davydov1f47b612016-08-11 15:33:00 -07007238 memcg = mem_cgroup_id_get_online(memcg);
Vladimir Davydov37e84352016-01-20 15:02:56 -08007239
Johannes Weiner2d1c4982020-06-03 16:02:14 -07007240 if (!cgroup_memory_noswap && !mem_cgroup_is_root(memcg) &&
Huang Ying38d8b4e2017-07-06 15:37:18 -07007241 !page_counter_try_charge(&memcg->swap, nr_pages, &counter)) {
Tejun Heof3a53a32018-06-07 17:05:35 -07007242 memcg_memory_event(memcg, MEMCG_SWAP_MAX);
7243 memcg_memory_event(memcg, MEMCG_SWAP_FAIL);
Vladimir Davydov1f47b612016-08-11 15:33:00 -07007244 mem_cgroup_id_put(memcg);
7245 return -ENOMEM;
7246 }
7247
Huang Ying38d8b4e2017-07-06 15:37:18 -07007248 /* Get references for the tail pages, too */
7249 if (nr_pages > 1)
7250 mem_cgroup_id_get_many(memcg, nr_pages - 1);
7251 oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg), nr_pages);
Vladimir Davydov37e84352016-01-20 15:02:56 -08007252 VM_BUG_ON_PAGE(oldid, page);
Johannes Weinerc9019e92018-01-31 16:16:37 -08007253 mod_memcg_state(memcg, MEMCG_SWAP, nr_pages);
Vladimir Davydov37e84352016-01-20 15:02:56 -08007254
Vladimir Davydov37e84352016-01-20 15:02:56 -08007255 return 0;
7256}
7257
Johannes Weiner21afa382015-02-11 15:26:36 -08007258/**
Huang Ying38d8b4e2017-07-06 15:37:18 -07007259 * mem_cgroup_uncharge_swap - uncharge swap space
Johannes Weiner21afa382015-02-11 15:26:36 -08007260 * @entry: swap entry to uncharge
Huang Ying38d8b4e2017-07-06 15:37:18 -07007261 * @nr_pages: the amount of swap space to uncharge
Johannes Weiner21afa382015-02-11 15:26:36 -08007262 */
Huang Ying38d8b4e2017-07-06 15:37:18 -07007263void mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages)
Johannes Weiner21afa382015-02-11 15:26:36 -08007264{
7265 struct mem_cgroup *memcg;
7266 unsigned short id;
7267
Huang Ying38d8b4e2017-07-06 15:37:18 -07007268 id = swap_cgroup_record(entry, 0, nr_pages);
Johannes Weiner21afa382015-02-11 15:26:36 -08007269 rcu_read_lock();
Vladimir Davydovadbe4272015-04-15 16:13:00 -07007270 memcg = mem_cgroup_from_id(id);
Johannes Weiner21afa382015-02-11 15:26:36 -08007271 if (memcg) {
Johannes Weiner2d1c4982020-06-03 16:02:14 -07007272 if (!cgroup_memory_noswap && !mem_cgroup_is_root(memcg)) {
Vladimir Davydov37e84352016-01-20 15:02:56 -08007273 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
Huang Ying38d8b4e2017-07-06 15:37:18 -07007274 page_counter_uncharge(&memcg->swap, nr_pages);
Vladimir Davydov37e84352016-01-20 15:02:56 -08007275 else
Huang Ying38d8b4e2017-07-06 15:37:18 -07007276 page_counter_uncharge(&memcg->memsw, nr_pages);
Vladimir Davydov37e84352016-01-20 15:02:56 -08007277 }
Johannes Weinerc9019e92018-01-31 16:16:37 -08007278 mod_memcg_state(memcg, MEMCG_SWAP, -nr_pages);
Huang Ying38d8b4e2017-07-06 15:37:18 -07007279 mem_cgroup_id_put_many(memcg, nr_pages);
Johannes Weiner21afa382015-02-11 15:26:36 -08007280 }
7281 rcu_read_unlock();
7282}
7283
Vladimir Davydovd8b38432016-01-20 15:03:07 -08007284long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
7285{
7286 long nr_swap_pages = get_nr_swap_pages();
7287
Johannes Weinereccb52e2020-06-03 16:02:11 -07007288 if (cgroup_memory_noswap || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
Vladimir Davydovd8b38432016-01-20 15:03:07 -08007289 return nr_swap_pages;
7290 for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg))
7291 nr_swap_pages = min_t(long, nr_swap_pages,
Roman Gushchinbbec2e12018-06-07 17:06:18 -07007292 READ_ONCE(memcg->swap.max) -
Vladimir Davydovd8b38432016-01-20 15:03:07 -08007293 page_counter_read(&memcg->swap));
7294 return nr_swap_pages;
7295}
7296
Vladimir Davydov5ccc5ab2016-01-20 15:03:10 -08007297bool mem_cgroup_swap_full(struct page *page)
7298{
7299 struct mem_cgroup *memcg;
7300
7301 VM_BUG_ON_PAGE(!PageLocked(page), page);
7302
7303 if (vm_swap_full())
7304 return true;
Johannes Weinereccb52e2020-06-03 16:02:11 -07007305 if (cgroup_memory_noswap || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
Vladimir Davydov5ccc5ab2016-01-20 15:03:10 -08007306 return false;
7307
Roman Gushchinbcfe06b2020-12-01 13:58:27 -08007308 memcg = page_memcg(page);
Vladimir Davydov5ccc5ab2016-01-20 15:03:10 -08007309 if (!memcg)
7310 return false;
7311
Jakub Kicinski4b82ab42020-06-01 21:49:52 -07007312 for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg)) {
7313 unsigned long usage = page_counter_read(&memcg->swap);
7314
7315 if (usage * 2 >= READ_ONCE(memcg->swap.high) ||
7316 usage * 2 >= READ_ONCE(memcg->swap.max))
Vladimir Davydov5ccc5ab2016-01-20 15:03:10 -08007317 return true;
Jakub Kicinski4b82ab42020-06-01 21:49:52 -07007318 }
Vladimir Davydov5ccc5ab2016-01-20 15:03:10 -08007319
7320 return false;
7321}
7322
Johannes Weinereccb52e2020-06-03 16:02:11 -07007323static int __init setup_swap_account(char *s)
Johannes Weiner21afa382015-02-11 15:26:36 -08007324{
7325 if (!strcmp(s, "1"))
Kaixu Xia5ab92902020-12-14 19:07:07 -08007326 cgroup_memory_noswap = false;
Johannes Weiner21afa382015-02-11 15:26:36 -08007327 else if (!strcmp(s, "0"))
Kaixu Xia5ab92902020-12-14 19:07:07 -08007328 cgroup_memory_noswap = true;
Johannes Weiner21afa382015-02-11 15:26:36 -08007329 return 1;
7330}
Johannes Weinereccb52e2020-06-03 16:02:11 -07007331__setup("swapaccount=", setup_swap_account);
Johannes Weiner21afa382015-02-11 15:26:36 -08007332
Vladimir Davydov37e84352016-01-20 15:02:56 -08007333static u64 swap_current_read(struct cgroup_subsys_state *css,
7334 struct cftype *cft)
7335{
7336 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
7337
7338 return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE;
7339}
7340
Jakub Kicinski4b82ab42020-06-01 21:49:52 -07007341static int swap_high_show(struct seq_file *m, void *v)
7342{
7343 return seq_puts_memcg_tunable(m,
7344 READ_ONCE(mem_cgroup_from_seq(m)->swap.high));
7345}
7346
7347static ssize_t swap_high_write(struct kernfs_open_file *of,
7348 char *buf, size_t nbytes, loff_t off)
7349{
7350 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
7351 unsigned long high;
7352 int err;
7353
7354 buf = strstrip(buf);
7355 err = page_counter_memparse(buf, "max", &high);
7356 if (err)
7357 return err;
7358
7359 page_counter_set_high(&memcg->swap, high);
7360
7361 return nbytes;
7362}
7363
Vladimir Davydov37e84352016-01-20 15:02:56 -08007364static int swap_max_show(struct seq_file *m, void *v)
7365{
Chris Down677dc972019-03-05 15:45:55 -08007366 return seq_puts_memcg_tunable(m,
7367 READ_ONCE(mem_cgroup_from_seq(m)->swap.max));
Vladimir Davydov37e84352016-01-20 15:02:56 -08007368}
7369
7370static ssize_t swap_max_write(struct kernfs_open_file *of,
7371 char *buf, size_t nbytes, loff_t off)
7372{
7373 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
7374 unsigned long max;
7375 int err;
7376
7377 buf = strstrip(buf);
7378 err = page_counter_memparse(buf, "max", &max);
7379 if (err)
7380 return err;
7381
Tejun Heobe091022018-06-07 17:09:21 -07007382 xchg(&memcg->swap.max, max);
Vladimir Davydov37e84352016-01-20 15:02:56 -08007383
7384 return nbytes;
7385}
7386
Tejun Heof3a53a32018-06-07 17:05:35 -07007387static int swap_events_show(struct seq_file *m, void *v)
7388{
Chris Downaa9694b2019-03-05 15:45:52 -08007389 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
Tejun Heof3a53a32018-06-07 17:05:35 -07007390
Jakub Kicinski4b82ab42020-06-01 21:49:52 -07007391 seq_printf(m, "high %lu\n",
7392 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_HIGH]));
Tejun Heof3a53a32018-06-07 17:05:35 -07007393 seq_printf(m, "max %lu\n",
7394 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_MAX]));
7395 seq_printf(m, "fail %lu\n",
7396 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_FAIL]));
7397
7398 return 0;
7399}
7400
Vladimir Davydov37e84352016-01-20 15:02:56 -08007401static struct cftype swap_files[] = {
7402 {
7403 .name = "swap.current",
7404 .flags = CFTYPE_NOT_ON_ROOT,
7405 .read_u64 = swap_current_read,
7406 },
7407 {
Jakub Kicinski4b82ab42020-06-01 21:49:52 -07007408 .name = "swap.high",
7409 .flags = CFTYPE_NOT_ON_ROOT,
7410 .seq_show = swap_high_show,
7411 .write = swap_high_write,
7412 },
7413 {
Vladimir Davydov37e84352016-01-20 15:02:56 -08007414 .name = "swap.max",
7415 .flags = CFTYPE_NOT_ON_ROOT,
7416 .seq_show = swap_max_show,
7417 .write = swap_max_write,
7418 },
Tejun Heof3a53a32018-06-07 17:05:35 -07007419 {
7420 .name = "swap.events",
7421 .flags = CFTYPE_NOT_ON_ROOT,
7422 .file_offset = offsetof(struct mem_cgroup, swap_events_file),
7423 .seq_show = swap_events_show,
7424 },
Vladimir Davydov37e84352016-01-20 15:02:56 -08007425 { } /* terminate */
7426};
7427
Johannes Weinereccb52e2020-06-03 16:02:11 -07007428static struct cftype memsw_files[] = {
Johannes Weiner21afa382015-02-11 15:26:36 -08007429 {
7430 .name = "memsw.usage_in_bytes",
7431 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
7432 .read_u64 = mem_cgroup_read_u64,
7433 },
7434 {
7435 .name = "memsw.max_usage_in_bytes",
7436 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
7437 .write = mem_cgroup_reset,
7438 .read_u64 = mem_cgroup_read_u64,
7439 },
7440 {
7441 .name = "memsw.limit_in_bytes",
7442 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
7443 .write = mem_cgroup_write,
7444 .read_u64 = mem_cgroup_read_u64,
7445 },
7446 {
7447 .name = "memsw.failcnt",
7448 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
7449 .write = mem_cgroup_reset,
7450 .read_u64 = mem_cgroup_read_u64,
7451 },
7452 { }, /* terminate */
7453};
7454
Bhupesh Sharma82ff1652020-07-23 21:15:21 -07007455/*
7456 * If mem_cgroup_swap_init() is implemented as a subsys_initcall()
7457 * instead of a core_initcall(), this could mean cgroup_memory_noswap still
7458 * remains set to false even when memcg is disabled via "cgroup_disable=memory"
7459 * boot parameter. This may result in premature OOPS inside
7460 * mem_cgroup_get_nr_swap_pages() function in corner cases.
7461 */
Johannes Weiner21afa382015-02-11 15:26:36 -08007462static int __init mem_cgroup_swap_init(void)
7463{
Johannes Weiner2d1c4982020-06-03 16:02:14 -07007464 /* No memory control -> no swap control */
7465 if (mem_cgroup_disabled())
7466 cgroup_memory_noswap = true;
7467
7468 if (cgroup_memory_noswap)
Johannes Weinereccb52e2020-06-03 16:02:11 -07007469 return 0;
7470
7471 WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys, swap_files));
7472 WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys, memsw_files));
7473
Johannes Weiner21afa382015-02-11 15:26:36 -08007474 return 0;
7475}
Bhupesh Sharma82ff1652020-07-23 21:15:21 -07007476core_initcall(mem_cgroup_swap_init);
Johannes Weiner21afa382015-02-11 15:26:36 -08007477
7478#endif /* CONFIG_MEMCG_SWAP */