blob: c31bc40a582706d28e2317c2dded2c7f441b9952 [file] [log] [blame]
Balbir Singh8cdea7c2008-02-07 00:13:50 -08001/* memcontrol.c - Memory Controller
2 *
3 * Copyright IBM Corporation, 2007
4 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
5 *
Pavel Emelianov78fb7462008-02-07 00:13:51 -08006 * Copyright 2007 OpenVZ SWsoft Inc
7 * Author: Pavel Emelianov <xemul@openvz.org>
8 *
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08009 * Memory thresholds
10 * Copyright (C) 2009 Nokia Corporation
11 * Author: Kirill A. Shutemov
12 *
Glauber Costa7ae1e1d2012-12-18 14:21:56 -080013 * Kernel Memory Controller
14 * Copyright (C) 2012 Parallels Inc. and Google Inc.
15 * Authors: Glauber Costa and Suleiman Souhlal
16 *
Balbir Singh8cdea7c2008-02-07 00:13:50 -080017 * This program is free software; you can redistribute it and/or modify
18 * it under the terms of the GNU General Public License as published by
19 * the Free Software Foundation; either version 2 of the License, or
20 * (at your option) any later version.
21 *
22 * This program is distributed in the hope that it will be useful,
23 * but WITHOUT ANY WARRANTY; without even the implied warranty of
24 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
25 * GNU General Public License for more details.
26 */
27
28#include <linux/res_counter.h>
29#include <linux/memcontrol.h>
30#include <linux/cgroup.h>
Pavel Emelianov78fb7462008-02-07 00:13:51 -080031#include <linux/mm.h>
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -080032#include <linux/hugetlb.h>
KAMEZAWA Hiroyukid13d1442009-01-07 18:07:56 -080033#include <linux/pagemap.h>
KAMEZAWA Hiroyukid52aa412008-02-07 00:14:24 -080034#include <linux/smp.h>
Balbir Singh8a9f3cc2008-02-07 00:13:53 -080035#include <linux/page-flags.h>
Balbir Singh66e17072008-02-07 00:13:56 -080036#include <linux/backing-dev.h>
Balbir Singh8a9f3cc2008-02-07 00:13:53 -080037#include <linux/bit_spinlock.h>
38#include <linux/rcupdate.h>
Balbir Singhe2224322009-04-02 16:57:39 -070039#include <linux/limits.h>
Paul Gortmakerb9e15ba2011-05-26 16:00:52 -040040#include <linux/export.h>
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -080041#include <linux/mutex.h>
Andrew Mortonbb4cc1a82013-09-24 15:27:40 -070042#include <linux/rbtree.h>
Balbir Singhb6ac57d2008-04-29 01:00:19 -070043#include <linux/slab.h>
Balbir Singh66e17072008-02-07 00:13:56 -080044#include <linux/swap.h>
Daisuke Nishimura02491442010-03-10 15:22:17 -080045#include <linux/swapops.h>
Balbir Singh66e17072008-02-07 00:13:56 -080046#include <linux/spinlock.h>
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -080047#include <linux/eventfd.h>
Tejun Heo79bd9812013-11-22 18:20:42 -050048#include <linux/poll.h>
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -080049#include <linux/sort.h>
Balbir Singh66e17072008-02-07 00:13:56 -080050#include <linux/fs.h>
KAMEZAWA Hiroyukid2ceb9b2008-02-07 00:14:25 -080051#include <linux/seq_file.h>
Anton Vorontsov70ddf632013-04-29 15:08:31 -070052#include <linux/vmpressure.h>
Christoph Lameterb69408e2008-10-18 20:26:14 -070053#include <linux/mm_inline.h>
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -070054#include <linux/page_cgroup.h>
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -080055#include <linux/cpu.h>
KAMEZAWA Hiroyuki158e0a22010-08-10 18:03:00 -070056#include <linux/oom.h>
Johannes Weiner0056f4e2013-10-31 16:34:14 -070057#include <linux/lockdep.h>
Tejun Heo79bd9812013-11-22 18:20:42 -050058#include <linux/file.h>
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -080059#include "internal.h"
Glauber Costad1a4c0b2011-12-11 21:47:04 +000060#include <net/sock.h>
Michal Hocko4bd2c1e2012-10-08 16:33:10 -070061#include <net/ip.h>
Glauber Costad1a4c0b2011-12-11 21:47:04 +000062#include <net/tcp_memcontrol.h>
Qiang Huangf35c3a82013-11-12 15:08:22 -080063#include "slab.h"
Balbir Singh8cdea7c2008-02-07 00:13:50 -080064
Balbir Singh8697d332008-02-07 00:13:59 -080065#include <asm/uaccess.h>
66
KOSAKI Motohirocc8e9702010-08-09 17:19:57 -070067#include <trace/events/vmscan.h>
68
Tejun Heo073219e2014-02-08 10:36:58 -050069struct cgroup_subsys memory_cgrp_subsys __read_mostly;
70EXPORT_SYMBOL(memory_cgrp_subsys);
David Rientjes68ae5642012-12-12 13:51:57 -080071
KAMEZAWA Hiroyukia181b0e2008-07-25 01:47:08 -070072#define MEM_CGROUP_RECLAIM_RETRIES 5
Kirill A. Shutemov6bbda352012-05-29 15:06:55 -070073static struct mem_cgroup *root_mem_cgroup __read_mostly;
Balbir Singh8cdea7c2008-02-07 00:13:50 -080074
Andrew Mortonc255a452012-07-31 16:43:02 -070075#ifdef CONFIG_MEMCG_SWAP
Li Zefan338c8432009-06-17 16:27:15 -070076/* Turned on only when memory cgroup is enabled && really_do_swap_account = 1 */
KAMEZAWA Hiroyukic0777192009-01-07 18:07:57 -080077int do_swap_account __read_mostly;
Michal Hockoa42c3902010-11-24 12:57:08 -080078
79/* for remember boot option*/
Andrew Mortonc255a452012-07-31 16:43:02 -070080#ifdef CONFIG_MEMCG_SWAP_ENABLED
Michal Hockoa42c3902010-11-24 12:57:08 -080081static int really_do_swap_account __initdata = 1;
82#else
Fabian Frederickada4ba52014-06-04 16:08:08 -070083static int really_do_swap_account __initdata;
Michal Hockoa42c3902010-11-24 12:57:08 -080084#endif
85
KAMEZAWA Hiroyukic0777192009-01-07 18:07:57 -080086#else
Kirill A. Shutemova0db00f2012-05-29 15:06:56 -070087#define do_swap_account 0
KAMEZAWA Hiroyukic0777192009-01-07 18:07:57 -080088#endif
89
90
Johannes Weineraf7c4b02012-05-29 15:07:08 -070091static const char * const mem_cgroup_stat_names[] = {
92 "cache",
93 "rss",
David Rientjesb070e652013-05-07 16:18:09 -070094 "rss_huge",
Johannes Weineraf7c4b02012-05-29 15:07:08 -070095 "mapped_file",
Sha Zhengju3ea67d02013-09-12 15:13:53 -070096 "writeback",
Johannes Weineraf7c4b02012-05-29 15:07:08 -070097 "swap",
98};
99
Johannes Weinere9f89742011-03-23 16:42:37 -0700100enum mem_cgroup_events_index {
101 MEM_CGROUP_EVENTS_PGPGIN, /* # of pages paged in */
102 MEM_CGROUP_EVENTS_PGPGOUT, /* # of pages paged out */
Ying Han456f9982011-05-26 16:25:38 -0700103 MEM_CGROUP_EVENTS_PGFAULT, /* # of page-faults */
104 MEM_CGROUP_EVENTS_PGMAJFAULT, /* # of major page-faults */
Johannes Weinere9f89742011-03-23 16:42:37 -0700105 MEM_CGROUP_EVENTS_NSTATS,
106};
Johannes Weineraf7c4b02012-05-29 15:07:08 -0700107
108static const char * const mem_cgroup_events_names[] = {
109 "pgpgin",
110 "pgpgout",
111 "pgfault",
112 "pgmajfault",
113};
114
Sha Zhengju58cf1882013-02-22 16:32:05 -0800115static const char * const mem_cgroup_lru_names[] = {
116 "inactive_anon",
117 "active_anon",
118 "inactive_file",
119 "active_file",
120 "unevictable",
121};
122
Johannes Weiner7a159cc2011-03-23 16:42:38 -0700123/*
124 * Per memcg event counter is incremented at every pagein/pageout. With THP,
125 * it will be incremated by the number of pages. This counter is used for
126 * for trigger some periodic events. This is straightforward and better
127 * than using jiffies etc. to handle periodic memcg event.
128 */
129enum mem_cgroup_events_target {
130 MEM_CGROUP_TARGET_THRESH,
Andrew Mortonbb4cc1a82013-09-24 15:27:40 -0700131 MEM_CGROUP_TARGET_SOFTLIMIT,
KAMEZAWA Hiroyuki453a9bf32011-07-08 15:39:43 -0700132 MEM_CGROUP_TARGET_NUMAINFO,
Johannes Weiner7a159cc2011-03-23 16:42:38 -0700133 MEM_CGROUP_NTARGETS,
134};
Kirill A. Shutemova0db00f2012-05-29 15:06:56 -0700135#define THRESHOLDS_EVENTS_TARGET 128
136#define SOFTLIMIT_EVENTS_TARGET 1024
137#define NUMAINFO_EVENTS_TARGET 1024
Johannes Weinere9f89742011-03-23 16:42:37 -0700138
KAMEZAWA Hiroyukid52aa412008-02-07 00:14:24 -0800139struct mem_cgroup_stat_cpu {
Johannes Weiner7a159cc2011-03-23 16:42:38 -0700140 long count[MEM_CGROUP_STAT_NSTATS];
Johannes Weinere9f89742011-03-23 16:42:37 -0700141 unsigned long events[MEM_CGROUP_EVENTS_NSTATS];
Johannes Weiner13114712012-05-29 15:07:07 -0700142 unsigned long nr_page_events;
Johannes Weiner7a159cc2011-03-23 16:42:38 -0700143 unsigned long targets[MEM_CGROUP_NTARGETS];
KAMEZAWA Hiroyukid52aa412008-02-07 00:14:24 -0800144};
145
Johannes Weiner527a5ec2012-01-12 17:17:55 -0800146struct mem_cgroup_reclaim_iter {
Michal Hocko5f578162013-04-29 15:07:17 -0700147 /*
148 * last scanned hierarchy member. Valid only if last_dead_count
149 * matches memcg->dead_count of the hierarchy root group.
150 */
Michal Hocko542f85f2013-04-29 15:07:15 -0700151 struct mem_cgroup *last_visited;
Hugh Dickinsd2ab70a2014-01-23 15:53:30 -0800152 int last_dead_count;
Michal Hocko5f578162013-04-29 15:07:17 -0700153
Johannes Weiner527a5ec2012-01-12 17:17:55 -0800154 /* scan generation, increased every round-trip */
155 unsigned int generation;
156};
157
KAMEZAWA Hiroyukid52aa412008-02-07 00:14:24 -0800158/*
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -0800159 * per-zone information in memory controller.
160 */
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -0800161struct mem_cgroup_per_zone {
Johannes Weiner6290df52012-01-12 17:18:10 -0800162 struct lruvec lruvec;
Hugh Dickins1eb49272012-03-21 16:34:19 -0700163 unsigned long lru_size[NR_LRU_LISTS];
KOSAKI Motohiro3e2f41f2009-01-07 18:08:20 -0800164
Johannes Weiner527a5ec2012-01-12 17:17:55 -0800165 struct mem_cgroup_reclaim_iter reclaim_iter[DEF_PRIORITY + 1];
166
Andrew Mortonbb4cc1a82013-09-24 15:27:40 -0700167 struct rb_node tree_node; /* RB tree node */
168 unsigned long long usage_in_excess;/* Set to the value by which */
169 /* the soft limit is exceeded*/
170 bool on_tree;
Hugh Dickinsd79154b2012-03-21 16:34:18 -0700171 struct mem_cgroup *memcg; /* Back pointer, we cannot */
Balbir Singh4e416952009-09-23 15:56:39 -0700172 /* use container_of */
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -0800173};
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -0800174
175struct mem_cgroup_per_node {
176 struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES];
177};
178
Andrew Mortonbb4cc1a82013-09-24 15:27:40 -0700179/*
180 * Cgroups above their limits are maintained in a RB-Tree, independent of
181 * their hierarchy representation
182 */
183
184struct mem_cgroup_tree_per_zone {
185 struct rb_root rb_root;
186 spinlock_t lock;
187};
188
189struct mem_cgroup_tree_per_node {
190 struct mem_cgroup_tree_per_zone rb_tree_per_zone[MAX_NR_ZONES];
191};
192
193struct mem_cgroup_tree {
194 struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES];
195};
196
197static struct mem_cgroup_tree soft_limit_tree __read_mostly;
198
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -0800199struct mem_cgroup_threshold {
200 struct eventfd_ctx *eventfd;
201 u64 threshold;
202};
203
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -0700204/* For threshold */
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -0800205struct mem_cgroup_threshold_ary {
Sha Zhengju748dad32012-05-29 15:06:57 -0700206 /* An array index points to threshold just below or equal to usage. */
Phil Carmody5407a562010-05-26 14:42:42 -0700207 int current_threshold;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -0800208 /* Size of entries[] */
209 unsigned int size;
210 /* Array of thresholds */
211 struct mem_cgroup_threshold entries[0];
212};
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -0700213
214struct mem_cgroup_thresholds {
215 /* Primary thresholds array */
216 struct mem_cgroup_threshold_ary *primary;
217 /*
218 * Spare threshold array.
219 * This is needed to make mem_cgroup_unregister_event() "never fail".
220 * It must be able to store at least primary->size - 1 entries.
221 */
222 struct mem_cgroup_threshold_ary *spare;
223};
224
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -0700225/* for OOM */
226struct mem_cgroup_eventfd_list {
227 struct list_head list;
228 struct eventfd_ctx *eventfd;
229};
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -0800230
Tejun Heo79bd9812013-11-22 18:20:42 -0500231/*
232 * cgroup_event represents events which userspace want to receive.
233 */
Tejun Heo3bc942f2013-11-22 18:20:44 -0500234struct mem_cgroup_event {
Tejun Heo79bd9812013-11-22 18:20:42 -0500235 /*
Tejun Heo59b6f872013-11-22 18:20:43 -0500236 * memcg which the event belongs to.
Tejun Heo79bd9812013-11-22 18:20:42 -0500237 */
Tejun Heo59b6f872013-11-22 18:20:43 -0500238 struct mem_cgroup *memcg;
Tejun Heo79bd9812013-11-22 18:20:42 -0500239 /*
Tejun Heo79bd9812013-11-22 18:20:42 -0500240 * eventfd to signal userspace about the event.
241 */
242 struct eventfd_ctx *eventfd;
243 /*
244 * Each of these stored in a list by the cgroup.
245 */
246 struct list_head list;
247 /*
Tejun Heofba94802013-11-22 18:20:43 -0500248 * register_event() callback will be used to add new userspace
249 * waiter for changes related to this event. Use eventfd_signal()
250 * on eventfd to send notification to userspace.
251 */
Tejun Heo59b6f872013-11-22 18:20:43 -0500252 int (*register_event)(struct mem_cgroup *memcg,
Tejun Heo347c4a82013-11-22 18:20:43 -0500253 struct eventfd_ctx *eventfd, const char *args);
Tejun Heofba94802013-11-22 18:20:43 -0500254 /*
255 * unregister_event() callback will be called when userspace closes
256 * the eventfd or on cgroup removing. This callback must be set,
257 * if you want provide notification functionality.
258 */
Tejun Heo59b6f872013-11-22 18:20:43 -0500259 void (*unregister_event)(struct mem_cgroup *memcg,
Tejun Heofba94802013-11-22 18:20:43 -0500260 struct eventfd_ctx *eventfd);
261 /*
Tejun Heo79bd9812013-11-22 18:20:42 -0500262 * All fields below needed to unregister event when
263 * userspace closes eventfd.
264 */
265 poll_table pt;
266 wait_queue_head_t *wqh;
267 wait_queue_t wait;
268 struct work_struct remove;
269};
270
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700271static void mem_cgroup_threshold(struct mem_cgroup *memcg);
272static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -0800273
Balbir Singhf64c3f52009-09-23 15:56:37 -0700274/*
Balbir Singh8cdea7c2008-02-07 00:13:50 -0800275 * The memory controller data structure. The memory controller controls both
276 * page cache and RSS per cgroup. We would eventually like to provide
277 * statistics based on the statistics developed by Rik Van Riel for clock-pro,
278 * to help the administrator determine what knobs to tune.
279 *
280 * TODO: Add a water mark for the memory controller. Reclaim will begin when
Balbir Singh8a9f3cc2008-02-07 00:13:53 -0800281 * we hit the water mark. May be even add a low water mark, such that
282 * no reclaim occurs from a cgroup at it's low water mark, this is
283 * a feature that will be implemented much later in the future.
Balbir Singh8cdea7c2008-02-07 00:13:50 -0800284 */
285struct mem_cgroup {
286 struct cgroup_subsys_state css;
287 /*
288 * the counter to account for memory usage
289 */
290 struct res_counter res;
Hugh Dickins59927fb2012-03-15 15:17:07 -0700291
Anton Vorontsov70ddf632013-04-29 15:08:31 -0700292 /* vmpressure notifications */
293 struct vmpressure vmpressure;
294
Li Zefan465939a2013-07-08 16:00:38 -0700295 /*
296 * the counter to account for mem+swap usage.
297 */
298 struct res_counter memsw;
Hugh Dickins59927fb2012-03-15 15:17:07 -0700299
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -0800300 /*
Glauber Costa510fc4e2012-12-18 14:21:47 -0800301 * the counter to account for kernel memory usage.
302 */
303 struct res_counter kmem;
304 /*
Balbir Singh18f59ea2009-01-07 18:08:07 -0800305 * Should the accounting and control be hierarchical, per subtree?
306 */
307 bool use_hierarchy;
Glauber Costa510fc4e2012-12-18 14:21:47 -0800308 unsigned long kmem_account_flags; /* See KMEM_ACCOUNTED_*, below */
Michal Hocko79dfdac2011-07-26 16:08:23 -0700309
310 bool oom_lock;
311 atomic_t under_oom;
Johannes Weiner3812c8c2013-09-12 15:13:44 -0700312 atomic_t oom_wakeups;
Michal Hocko79dfdac2011-07-26 16:08:23 -0700313
KAMEZAWA Hiroyuki1f4c0252011-07-26 16:08:21 -0700314 int swappiness;
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -0700315 /* OOM-Killer disable */
316 int oom_kill_disable;
KOSAKI Motohiroa7885eb2009-01-07 18:08:24 -0800317
KAMEZAWA Hiroyuki22a668d2009-06-17 16:27:19 -0700318 /* set when res.limit == memsw.limit */
319 bool memsw_is_minimum;
320
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -0800321 /* protect arrays of thresholds */
322 struct mutex thresholds_lock;
323
324 /* thresholds for memory usage. RCU-protected */
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -0700325 struct mem_cgroup_thresholds thresholds;
Kirill A. Shutemov907860e2010-05-26 14:42:46 -0700326
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -0800327 /* thresholds for mem+swap usage. RCU-protected */
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -0700328 struct mem_cgroup_thresholds memsw_thresholds;
Kirill A. Shutemov907860e2010-05-26 14:42:46 -0700329
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -0700330 /* For oom notifier event fd */
331 struct list_head oom_notify;
Johannes Weiner185efc02011-09-14 16:21:58 -0700332
KAMEZAWA Hiroyukid52aa412008-02-07 00:14:24 -0800333 /*
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -0800334 * Should we move charges of a task when a task is moved into this
335 * mem_cgroup ? And what type of charges should we move ?
336 */
Andrew Mortonf894ffa2013-09-12 15:13:35 -0700337 unsigned long move_charge_at_immigrate;
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -0800338 /*
KAMEZAWA Hiroyuki619d0942012-03-21 16:34:23 -0700339 * set > 0 if pages under this cgroup are moving to other cgroup.
340 */
341 atomic_t moving_account;
KAMEZAWA Hiroyuki312734c02012-03-21 16:34:24 -0700342 /* taken only while moving_account > 0 */
343 spinlock_t move_lock;
KAMEZAWA Hiroyuki619d0942012-03-21 16:34:23 -0700344 /*
KAMEZAWA Hiroyukic62b1a32010-03-10 15:22:29 -0800345 * percpu counter.
KAMEZAWA Hiroyukid52aa412008-02-07 00:14:24 -0800346 */
Kirill A. Shutemov3a7951b2012-05-29 15:06:56 -0700347 struct mem_cgroup_stat_cpu __percpu *stat;
KAMEZAWA Hiroyuki711d3d22010-10-27 15:33:42 -0700348 /*
349 * used when a cpu is offlined or other synchronizations
350 * See mem_cgroup_read_stat().
351 */
352 struct mem_cgroup_stat_cpu nocpu_base;
353 spinlock_t pcp_counter_lock;
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000354
Michal Hocko5f578162013-04-29 15:07:17 -0700355 atomic_t dead_count;
Michal Hocko4bd2c1e2012-10-08 16:33:10 -0700356#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_INET)
Eric W. Biederman2e685ca2013-10-19 16:26:19 -0700357 struct cg_proto tcp_mem;
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000358#endif
Glauber Costa2633d7a2012-12-18 14:22:34 -0800359#if defined(CONFIG_MEMCG_KMEM)
Vladimir Davydovbd673142014-06-04 16:07:40 -0700360 /* analogous to slab_common's slab_caches list, but per-memcg;
361 * protected by memcg_slab_mutex */
Glauber Costa2633d7a2012-12-18 14:22:34 -0800362 struct list_head memcg_slab_caches;
Glauber Costa2633d7a2012-12-18 14:22:34 -0800363 /* Index in the kmem_cache->memcg_params->memcg_caches array */
364 int kmemcg_id;
365#endif
Glauber Costa45cf7eb2013-02-22 16:34:49 -0800366
367 int last_scanned_node;
368#if MAX_NUMNODES > 1
369 nodemask_t scan_nodes;
370 atomic_t numainfo_events;
371 atomic_t numainfo_updating;
372#endif
Anton Vorontsov70ddf632013-04-29 15:08:31 -0700373
Tejun Heofba94802013-11-22 18:20:43 -0500374 /* List of events which userspace want to receive */
375 struct list_head event_list;
376 spinlock_t event_list_lock;
377
Johannes Weiner54f72fe2013-07-08 15:59:49 -0700378 struct mem_cgroup_per_node *nodeinfo[0];
379 /* WARNING: nodeinfo must be the last member here */
Balbir Singh8cdea7c2008-02-07 00:13:50 -0800380};
381
Glauber Costa510fc4e2012-12-18 14:21:47 -0800382/* internal only representation about the status of kmem accounting. */
383enum {
Vladimir Davydov6de64be2014-01-23 15:53:08 -0800384 KMEM_ACCOUNTED_ACTIVE, /* accounted by this cgroup itself */
Glauber Costa7de37682012-12-18 14:22:07 -0800385 KMEM_ACCOUNTED_DEAD, /* dead memcg with pending kmem charges */
Glauber Costa510fc4e2012-12-18 14:21:47 -0800386};
387
Glauber Costa510fc4e2012-12-18 14:21:47 -0800388#ifdef CONFIG_MEMCG_KMEM
389static inline void memcg_kmem_set_active(struct mem_cgroup *memcg)
390{
391 set_bit(KMEM_ACCOUNTED_ACTIVE, &memcg->kmem_account_flags);
392}
Glauber Costa7de37682012-12-18 14:22:07 -0800393
394static bool memcg_kmem_is_active(struct mem_cgroup *memcg)
395{
396 return test_bit(KMEM_ACCOUNTED_ACTIVE, &memcg->kmem_account_flags);
397}
398
399static void memcg_kmem_mark_dead(struct mem_cgroup *memcg)
400{
Li Zefan10d5ebf2013-07-08 16:00:33 -0700401 /*
402 * Our caller must use css_get() first, because memcg_uncharge_kmem()
403 * will call css_put() if it sees the memcg is dead.
404 */
405 smp_wmb();
Glauber Costa7de37682012-12-18 14:22:07 -0800406 if (test_bit(KMEM_ACCOUNTED_ACTIVE, &memcg->kmem_account_flags))
407 set_bit(KMEM_ACCOUNTED_DEAD, &memcg->kmem_account_flags);
408}
409
410static bool memcg_kmem_test_and_clear_dead(struct mem_cgroup *memcg)
411{
412 return test_and_clear_bit(KMEM_ACCOUNTED_DEAD,
413 &memcg->kmem_account_flags);
414}
Glauber Costa510fc4e2012-12-18 14:21:47 -0800415#endif
416
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -0800417/* Stuffs for move charges at task migration. */
418/*
Glauber Costaee5e8472013-02-22 16:34:50 -0800419 * Types of charges to be moved. "move_charge_at_immitgrate" and
420 * "immigrate_flags" are treated as a left-shifted bitmap of these types.
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -0800421 */
422enum move_type {
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -0800423 MOVE_CHARGE_TYPE_ANON, /* private anonymous page and swap of it */
Daisuke Nishimura87946a72010-05-26 14:42:39 -0700424 MOVE_CHARGE_TYPE_FILE, /* file page(including tmpfs) and swap of it */
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -0800425 NR_MOVE_TYPE,
426};
427
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -0800428/* "mc" and its members are protected by cgroup_mutex */
429static struct move_charge_struct {
Daisuke Nishimurab1dd6932010-11-24 12:57:06 -0800430 spinlock_t lock; /* for from, to */
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -0800431 struct mem_cgroup *from;
432 struct mem_cgroup *to;
Glauber Costaee5e8472013-02-22 16:34:50 -0800433 unsigned long immigrate_flags;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -0800434 unsigned long precharge;
Daisuke Nishimura854ffa82010-03-10 15:22:15 -0800435 unsigned long moved_charge;
Daisuke Nishimura483c30b2010-03-10 15:22:18 -0800436 unsigned long moved_swap;
Daisuke Nishimura8033b972010-03-10 15:22:16 -0800437 struct task_struct *moving_task; /* a task moving charges */
438 wait_queue_head_t waitq; /* a waitq for other context */
439} mc = {
KAMEZAWA Hiroyuki2bd9bb22010-08-10 18:02:58 -0700440 .lock = __SPIN_LOCK_UNLOCKED(mc.lock),
Daisuke Nishimura8033b972010-03-10 15:22:16 -0800441 .waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq),
442};
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -0800443
Daisuke Nishimura90254a62010-05-26 14:42:38 -0700444static bool move_anon(void)
445{
Glauber Costaee5e8472013-02-22 16:34:50 -0800446 return test_bit(MOVE_CHARGE_TYPE_ANON, &mc.immigrate_flags);
Daisuke Nishimura90254a62010-05-26 14:42:38 -0700447}
448
Daisuke Nishimura87946a72010-05-26 14:42:39 -0700449static bool move_file(void)
450{
Glauber Costaee5e8472013-02-22 16:34:50 -0800451 return test_bit(MOVE_CHARGE_TYPE_FILE, &mc.immigrate_flags);
Daisuke Nishimura87946a72010-05-26 14:42:39 -0700452}
453
Balbir Singh4e416952009-09-23 15:56:39 -0700454/*
455 * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft
456 * limit reclaim to prevent infinite loops, if they ever occur.
457 */
Kirill A. Shutemova0db00f2012-05-29 15:06:56 -0700458#define MEM_CGROUP_MAX_RECLAIM_LOOPS 100
Andrew Mortonbb4cc1a82013-09-24 15:27:40 -0700459#define MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS 2
Balbir Singh4e416952009-09-23 15:56:39 -0700460
KAMEZAWA Hiroyuki217bc312008-02-07 00:14:17 -0800461enum charge_type {
462 MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
Kamezawa Hiroyuki41326c12012-07-31 16:41:40 -0700463 MEM_CGROUP_CHARGE_TYPE_ANON,
KAMEZAWA Hiroyukid13d1442009-01-07 18:07:56 -0800464 MEM_CGROUP_CHARGE_TYPE_SWAPOUT, /* for accounting swapcache */
KAMEZAWA Hiroyuki8a9478c2009-06-17 16:27:17 -0700465 MEM_CGROUP_CHARGE_TYPE_DROP, /* a page was unused swap cache */
KAMEZAWA Hiroyukic05555b2008-10-18 20:28:11 -0700466 NR_CHARGE_TYPE,
467};
468
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -0800469/* for encoding cft->private value on file */
Glauber Costa86ae53e2012-12-18 14:21:45 -0800470enum res_type {
471 _MEM,
472 _MEMSWAP,
473 _OOM_TYPE,
Glauber Costa510fc4e2012-12-18 14:21:47 -0800474 _KMEM,
Glauber Costa86ae53e2012-12-18 14:21:45 -0800475};
476
Kirill A. Shutemova0db00f2012-05-29 15:06:56 -0700477#define MEMFILE_PRIVATE(x, val) ((x) << 16 | (val))
478#define MEMFILE_TYPE(val) ((val) >> 16 & 0xffff)
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -0800479#define MEMFILE_ATTR(val) ((val) & 0xffff)
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -0700480/* Used for OOM nofiier */
481#define OOM_CONTROL (0)
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -0800482
Balbir Singh75822b42009-09-23 15:56:38 -0700483/*
484 * Reclaim flags for mem_cgroup_hierarchical_reclaim
485 */
486#define MEM_CGROUP_RECLAIM_NOSWAP_BIT 0x0
487#define MEM_CGROUP_RECLAIM_NOSWAP (1 << MEM_CGROUP_RECLAIM_NOSWAP_BIT)
488#define MEM_CGROUP_RECLAIM_SHRINK_BIT 0x1
489#define MEM_CGROUP_RECLAIM_SHRINK (1 << MEM_CGROUP_RECLAIM_SHRINK_BIT)
490
Glauber Costa09998212013-02-22 16:34:55 -0800491/*
492 * The memcg_create_mutex will be held whenever a new cgroup is created.
493 * As a consequence, any change that needs to protect against new child cgroups
494 * appearing has to hold it as well.
495 */
496static DEFINE_MUTEX(memcg_create_mutex);
497
Wanpeng Lib2145142012-07-31 16:46:01 -0700498struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *s)
499{
Tejun Heoa7c6d552013-08-08 20:11:23 -0400500 return s ? container_of(s, struct mem_cgroup, css) : NULL;
Wanpeng Lib2145142012-07-31 16:46:01 -0700501}
502
Anton Vorontsov70ddf632013-04-29 15:08:31 -0700503/* Some nice accessors for the vmpressure. */
504struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg)
505{
506 if (!memcg)
507 memcg = root_mem_cgroup;
508 return &memcg->vmpressure;
509}
510
511struct cgroup_subsys_state *vmpressure_to_css(struct vmpressure *vmpr)
512{
513 return &container_of(vmpr, struct mem_cgroup, vmpressure)->css;
514}
515
Michal Hocko7ffc0ed2012-10-08 16:33:13 -0700516static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
517{
518 return (memcg == root_mem_cgroup);
519}
520
Li Zefan4219b2d2013-09-23 16:56:29 +0800521/*
522 * We restrict the id in the range of [1, 65535], so it can fit into
523 * an unsigned short.
524 */
525#define MEM_CGROUP_ID_MAX USHRT_MAX
526
Li Zefan34c00c32013-09-23 16:56:01 +0800527static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
528{
Tejun Heo15a4c832014-05-04 15:09:14 -0400529 return memcg->css.id;
Li Zefan34c00c32013-09-23 16:56:01 +0800530}
531
532static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
533{
534 struct cgroup_subsys_state *css;
535
Tejun Heo7d699dd2014-05-04 15:09:13 -0400536 css = css_from_id(id, &memory_cgrp_subsys);
Li Zefan34c00c32013-09-23 16:56:01 +0800537 return mem_cgroup_from_css(css);
538}
539
Glauber Costae1aab162011-12-11 21:47:03 +0000540/* Writing them here to avoid exposing memcg's inner layout */
Michal Hocko4bd2c1e2012-10-08 16:33:10 -0700541#if defined(CONFIG_INET) && defined(CONFIG_MEMCG_KMEM)
Glauber Costae1aab162011-12-11 21:47:03 +0000542
Glauber Costae1aab162011-12-11 21:47:03 +0000543void sock_update_memcg(struct sock *sk)
544{
Glauber Costa376be5f2012-01-20 04:57:14 +0000545 if (mem_cgroup_sockets_enabled) {
Glauber Costae1aab162011-12-11 21:47:03 +0000546 struct mem_cgroup *memcg;
Glauber Costa3f134612012-05-29 15:07:11 -0700547 struct cg_proto *cg_proto;
Glauber Costae1aab162011-12-11 21:47:03 +0000548
549 BUG_ON(!sk->sk_prot->proto_cgroup);
550
Glauber Costaf3f511e2012-01-05 20:16:39 +0000551 /* Socket cloning can throw us here with sk_cgrp already
552 * filled. It won't however, necessarily happen from
553 * process context. So the test for root memcg given
554 * the current task's memcg won't help us in this case.
555 *
556 * Respecting the original socket's memcg is a better
557 * decision in this case.
558 */
559 if (sk->sk_cgrp) {
560 BUG_ON(mem_cgroup_is_root(sk->sk_cgrp->memcg));
Li Zefan5347e5a2013-07-08 16:00:30 -0700561 css_get(&sk->sk_cgrp->memcg->css);
Glauber Costaf3f511e2012-01-05 20:16:39 +0000562 return;
563 }
564
Glauber Costae1aab162011-12-11 21:47:03 +0000565 rcu_read_lock();
566 memcg = mem_cgroup_from_task(current);
Glauber Costa3f134612012-05-29 15:07:11 -0700567 cg_proto = sk->sk_prot->proto_cgroup(memcg);
Li Zefan5347e5a2013-07-08 16:00:30 -0700568 if (!mem_cgroup_is_root(memcg) &&
Tejun Heoec903c02014-05-13 12:11:01 -0400569 memcg_proto_active(cg_proto) &&
570 css_tryget_online(&memcg->css)) {
Glauber Costa3f134612012-05-29 15:07:11 -0700571 sk->sk_cgrp = cg_proto;
Glauber Costae1aab162011-12-11 21:47:03 +0000572 }
573 rcu_read_unlock();
574 }
575}
576EXPORT_SYMBOL(sock_update_memcg);
577
578void sock_release_memcg(struct sock *sk)
579{
Glauber Costa376be5f2012-01-20 04:57:14 +0000580 if (mem_cgroup_sockets_enabled && sk->sk_cgrp) {
Glauber Costae1aab162011-12-11 21:47:03 +0000581 struct mem_cgroup *memcg;
582 WARN_ON(!sk->sk_cgrp->memcg);
583 memcg = sk->sk_cgrp->memcg;
Li Zefan5347e5a2013-07-08 16:00:30 -0700584 css_put(&sk->sk_cgrp->memcg->css);
Glauber Costae1aab162011-12-11 21:47:03 +0000585 }
586}
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000587
588struct cg_proto *tcp_proto_cgroup(struct mem_cgroup *memcg)
589{
590 if (!memcg || mem_cgroup_is_root(memcg))
591 return NULL;
592
Eric W. Biederman2e685ca2013-10-19 16:26:19 -0700593 return &memcg->tcp_mem;
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000594}
595EXPORT_SYMBOL(tcp_proto_cgroup);
Glauber Costae1aab162011-12-11 21:47:03 +0000596
Glauber Costa3f134612012-05-29 15:07:11 -0700597static void disarm_sock_keys(struct mem_cgroup *memcg)
598{
Eric W. Biederman2e685ca2013-10-19 16:26:19 -0700599 if (!memcg_proto_activated(&memcg->tcp_mem))
Glauber Costa3f134612012-05-29 15:07:11 -0700600 return;
601 static_key_slow_dec(&memcg_socket_limit_enabled);
602}
603#else
604static void disarm_sock_keys(struct mem_cgroup *memcg)
605{
606}
607#endif
608
Glauber Costaa8964b92012-12-18 14:22:09 -0800609#ifdef CONFIG_MEMCG_KMEM
Glauber Costa55007d82012-12-18 14:22:38 -0800610/*
611 * This will be the memcg's index in each cache's ->memcg_params->memcg_caches.
Li Zefanb8627832013-09-23 16:56:47 +0800612 * The main reason for not using cgroup id for this:
613 * this works better in sparse environments, where we have a lot of memcgs,
614 * but only a few kmem-limited. Or also, if we have, for instance, 200
615 * memcgs, and none but the 200th is kmem-limited, we'd have to have a
616 * 200 entry array for that.
Glauber Costa55007d82012-12-18 14:22:38 -0800617 *
618 * The current size of the caches array is stored in
619 * memcg_limited_groups_array_size. It will double each time we have to
620 * increase it.
621 */
622static DEFINE_IDA(kmem_limited_groups);
Glauber Costa749c5412012-12-18 14:23:01 -0800623int memcg_limited_groups_array_size;
624
Glauber Costa55007d82012-12-18 14:22:38 -0800625/*
626 * MIN_SIZE is different than 1, because we would like to avoid going through
627 * the alloc/free process all the time. In a small machine, 4 kmem-limited
628 * cgroups is a reasonable guess. In the future, it could be a parameter or
629 * tunable, but that is strictly not necessary.
630 *
Li Zefanb8627832013-09-23 16:56:47 +0800631 * MAX_SIZE should be as large as the number of cgrp_ids. Ideally, we could get
Glauber Costa55007d82012-12-18 14:22:38 -0800632 * this constant directly from cgroup, but it is understandable that this is
633 * better kept as an internal representation in cgroup.c. In any case, the
Li Zefanb8627832013-09-23 16:56:47 +0800634 * cgrp_id space is not getting any smaller, and we don't have to necessarily
Glauber Costa55007d82012-12-18 14:22:38 -0800635 * increase ours as well if it increases.
636 */
637#define MEMCG_CACHES_MIN_SIZE 4
Li Zefanb8627832013-09-23 16:56:47 +0800638#define MEMCG_CACHES_MAX_SIZE MEM_CGROUP_ID_MAX
Glauber Costa55007d82012-12-18 14:22:38 -0800639
Glauber Costad7f25f82012-12-18 14:22:40 -0800640/*
641 * A lot of the calls to the cache allocation functions are expected to be
642 * inlined by the compiler. Since the calls to memcg_kmem_get_cache are
643 * conditional to this static branch, we'll have to allow modules that does
644 * kmem_cache_alloc and the such to see this symbol as well
645 */
Glauber Costaa8964b92012-12-18 14:22:09 -0800646struct static_key memcg_kmem_enabled_key;
Glauber Costad7f25f82012-12-18 14:22:40 -0800647EXPORT_SYMBOL(memcg_kmem_enabled_key);
Glauber Costaa8964b92012-12-18 14:22:09 -0800648
649static void disarm_kmem_keys(struct mem_cgroup *memcg)
650{
Glauber Costa55007d82012-12-18 14:22:38 -0800651 if (memcg_kmem_is_active(memcg)) {
Glauber Costaa8964b92012-12-18 14:22:09 -0800652 static_key_slow_dec(&memcg_kmem_enabled_key);
Glauber Costa55007d82012-12-18 14:22:38 -0800653 ida_simple_remove(&kmem_limited_groups, memcg->kmemcg_id);
654 }
Glauber Costabea207c2012-12-18 14:22:11 -0800655 /*
656 * This check can't live in kmem destruction function,
657 * since the charges will outlive the cgroup
658 */
659 WARN_ON(res_counter_read_u64(&memcg->kmem, RES_USAGE) != 0);
Glauber Costaa8964b92012-12-18 14:22:09 -0800660}
661#else
662static void disarm_kmem_keys(struct mem_cgroup *memcg)
663{
664}
665#endif /* CONFIG_MEMCG_KMEM */
666
667static void disarm_static_keys(struct mem_cgroup *memcg)
668{
669 disarm_sock_keys(memcg);
670 disarm_kmem_keys(memcg);
671}
672
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700673static void drain_all_stock_async(struct mem_cgroup *memcg);
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -0800674
Balbir Singhf64c3f52009-09-23 15:56:37 -0700675static struct mem_cgroup_per_zone *
Jianyu Zhane2318752014-06-06 14:38:20 -0700676mem_cgroup_zone_zoneinfo(struct mem_cgroup *memcg, struct zone *zone)
Balbir Singhf64c3f52009-09-23 15:56:37 -0700677{
Jianyu Zhane2318752014-06-06 14:38:20 -0700678 int nid = zone_to_nid(zone);
679 int zid = zone_idx(zone);
680
Johannes Weiner54f72fe2013-07-08 15:59:49 -0700681 return &memcg->nodeinfo[nid]->zoneinfo[zid];
Balbir Singhf64c3f52009-09-23 15:56:37 -0700682}
683
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700684struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg)
Wu Fengguangd3242362009-12-16 12:19:59 +0100685{
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700686 return &memcg->css;
Wu Fengguangd3242362009-12-16 12:19:59 +0100687}
688
Balbir Singhf64c3f52009-09-23 15:56:37 -0700689static struct mem_cgroup_per_zone *
Jianyu Zhane2318752014-06-06 14:38:20 -0700690mem_cgroup_page_zoneinfo(struct mem_cgroup *memcg, struct page *page)
Balbir Singhf64c3f52009-09-23 15:56:37 -0700691{
Johannes Weiner97a6c372011-03-23 16:42:27 -0700692 int nid = page_to_nid(page);
693 int zid = page_zonenum(page);
Balbir Singhf64c3f52009-09-23 15:56:37 -0700694
Jianyu Zhane2318752014-06-06 14:38:20 -0700695 return &memcg->nodeinfo[nid]->zoneinfo[zid];
Balbir Singhf64c3f52009-09-23 15:56:37 -0700696}
697
Andrew Mortonbb4cc1a82013-09-24 15:27:40 -0700698static struct mem_cgroup_tree_per_zone *
699soft_limit_tree_node_zone(int nid, int zid)
700{
701 return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
702}
703
704static struct mem_cgroup_tree_per_zone *
705soft_limit_tree_from_page(struct page *page)
706{
707 int nid = page_to_nid(page);
708 int zid = page_zonenum(page);
709
710 return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
711}
712
Johannes Weinercf2c8122014-06-06 14:38:21 -0700713static void __mem_cgroup_insert_exceeded(struct mem_cgroup_per_zone *mz,
714 struct mem_cgroup_tree_per_zone *mctz,
715 unsigned long long new_usage_in_excess)
Andrew Mortonbb4cc1a82013-09-24 15:27:40 -0700716{
717 struct rb_node **p = &mctz->rb_root.rb_node;
718 struct rb_node *parent = NULL;
719 struct mem_cgroup_per_zone *mz_node;
720
721 if (mz->on_tree)
722 return;
723
724 mz->usage_in_excess = new_usage_in_excess;
725 if (!mz->usage_in_excess)
726 return;
727 while (*p) {
728 parent = *p;
729 mz_node = rb_entry(parent, struct mem_cgroup_per_zone,
730 tree_node);
731 if (mz->usage_in_excess < mz_node->usage_in_excess)
732 p = &(*p)->rb_left;
733 /*
734 * We can't avoid mem cgroups that are over their soft
735 * limit by the same amount
736 */
737 else if (mz->usage_in_excess >= mz_node->usage_in_excess)
738 p = &(*p)->rb_right;
739 }
740 rb_link_node(&mz->tree_node, parent, p);
741 rb_insert_color(&mz->tree_node, &mctz->rb_root);
742 mz->on_tree = true;
743}
744
Johannes Weinercf2c8122014-06-06 14:38:21 -0700745static void __mem_cgroup_remove_exceeded(struct mem_cgroup_per_zone *mz,
746 struct mem_cgroup_tree_per_zone *mctz)
Andrew Mortonbb4cc1a82013-09-24 15:27:40 -0700747{
748 if (!mz->on_tree)
749 return;
750 rb_erase(&mz->tree_node, &mctz->rb_root);
751 mz->on_tree = false;
752}
753
Johannes Weinercf2c8122014-06-06 14:38:21 -0700754static void mem_cgroup_remove_exceeded(struct mem_cgroup_per_zone *mz,
755 struct mem_cgroup_tree_per_zone *mctz)
Andrew Mortonbb4cc1a82013-09-24 15:27:40 -0700756{
757 spin_lock(&mctz->lock);
Johannes Weinercf2c8122014-06-06 14:38:21 -0700758 __mem_cgroup_remove_exceeded(mz, mctz);
Andrew Mortonbb4cc1a82013-09-24 15:27:40 -0700759 spin_unlock(&mctz->lock);
760}
761
762
763static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page)
764{
765 unsigned long long excess;
766 struct mem_cgroup_per_zone *mz;
767 struct mem_cgroup_tree_per_zone *mctz;
Andrew Mortonbb4cc1a82013-09-24 15:27:40 -0700768
Jianyu Zhane2318752014-06-06 14:38:20 -0700769 mctz = soft_limit_tree_from_page(page);
Andrew Mortonbb4cc1a82013-09-24 15:27:40 -0700770 /*
771 * Necessary to update all ancestors when hierarchy is used.
772 * because their event counter is not touched.
773 */
774 for (; memcg; memcg = parent_mem_cgroup(memcg)) {
Jianyu Zhane2318752014-06-06 14:38:20 -0700775 mz = mem_cgroup_page_zoneinfo(memcg, page);
Andrew Mortonbb4cc1a82013-09-24 15:27:40 -0700776 excess = res_counter_soft_limit_excess(&memcg->res);
777 /*
778 * We have to update the tree if mz is on RB-tree or
779 * mem is over its softlimit.
780 */
781 if (excess || mz->on_tree) {
782 spin_lock(&mctz->lock);
783 /* if on-tree, remove it */
784 if (mz->on_tree)
Johannes Weinercf2c8122014-06-06 14:38:21 -0700785 __mem_cgroup_remove_exceeded(mz, mctz);
Andrew Mortonbb4cc1a82013-09-24 15:27:40 -0700786 /*
787 * Insert again. mz->usage_in_excess will be updated.
788 * If excess is 0, no tree ops.
789 */
Johannes Weinercf2c8122014-06-06 14:38:21 -0700790 __mem_cgroup_insert_exceeded(mz, mctz, excess);
Andrew Mortonbb4cc1a82013-09-24 15:27:40 -0700791 spin_unlock(&mctz->lock);
792 }
793 }
794}
795
796static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg)
797{
Andrew Mortonbb4cc1a82013-09-24 15:27:40 -0700798 struct mem_cgroup_tree_per_zone *mctz;
Jianyu Zhane2318752014-06-06 14:38:20 -0700799 struct mem_cgroup_per_zone *mz;
800 int nid, zid;
Andrew Mortonbb4cc1a82013-09-24 15:27:40 -0700801
Jianyu Zhane2318752014-06-06 14:38:20 -0700802 for_each_node(nid) {
803 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
804 mz = &memcg->nodeinfo[nid]->zoneinfo[zid];
805 mctz = soft_limit_tree_node_zone(nid, zid);
Johannes Weinercf2c8122014-06-06 14:38:21 -0700806 mem_cgroup_remove_exceeded(mz, mctz);
Andrew Mortonbb4cc1a82013-09-24 15:27:40 -0700807 }
808 }
809}
810
811static struct mem_cgroup_per_zone *
812__mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
813{
814 struct rb_node *rightmost = NULL;
815 struct mem_cgroup_per_zone *mz;
816
817retry:
818 mz = NULL;
819 rightmost = rb_last(&mctz->rb_root);
820 if (!rightmost)
821 goto done; /* Nothing to reclaim from */
822
823 mz = rb_entry(rightmost, struct mem_cgroup_per_zone, tree_node);
824 /*
825 * Remove the node now but someone else can add it back,
826 * we will to add it back at the end of reclaim to its correct
827 * position in the tree.
828 */
Johannes Weinercf2c8122014-06-06 14:38:21 -0700829 __mem_cgroup_remove_exceeded(mz, mctz);
Andrew Mortonbb4cc1a82013-09-24 15:27:40 -0700830 if (!res_counter_soft_limit_excess(&mz->memcg->res) ||
Tejun Heoec903c02014-05-13 12:11:01 -0400831 !css_tryget_online(&mz->memcg->css))
Andrew Mortonbb4cc1a82013-09-24 15:27:40 -0700832 goto retry;
833done:
834 return mz;
835}
836
837static struct mem_cgroup_per_zone *
838mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
839{
840 struct mem_cgroup_per_zone *mz;
841
842 spin_lock(&mctz->lock);
843 mz = __mem_cgroup_largest_soft_limit_node(mctz);
844 spin_unlock(&mctz->lock);
845 return mz;
846}
847
KAMEZAWA Hiroyuki711d3d22010-10-27 15:33:42 -0700848/*
849 * Implementation Note: reading percpu statistics for memcg.
850 *
851 * Both of vmstat[] and percpu_counter has threshold and do periodic
852 * synchronization to implement "quick" read. There are trade-off between
853 * reading cost and precision of value. Then, we may have a chance to implement
854 * a periodic synchronizion of counter in memcg's counter.
855 *
856 * But this _read() function is used for user interface now. The user accounts
857 * memory usage by memory cgroup and he _always_ requires exact value because
858 * he accounts memory. Even if we provide quick-and-fuzzy read, we always
859 * have to visit all online cpus and make sum. So, for now, unnecessary
860 * synchronization is not implemented. (just implemented for cpu hotplug)
861 *
862 * If there are kernel internal actions which can make use of some not-exact
863 * value, and reading all cpu value can be performance bottleneck in some
864 * common workload, threashold and synchonization as vmstat[] should be
865 * implemented.
866 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700867static long mem_cgroup_read_stat(struct mem_cgroup *memcg,
Johannes Weiner7a159cc2011-03-23 16:42:38 -0700868 enum mem_cgroup_stat_index idx)
KAMEZAWA Hiroyukic62b1a32010-03-10 15:22:29 -0800869{
Johannes Weiner7a159cc2011-03-23 16:42:38 -0700870 long val = 0;
KAMEZAWA Hiroyukic62b1a32010-03-10 15:22:29 -0800871 int cpu;
KAMEZAWA Hiroyukic62b1a32010-03-10 15:22:29 -0800872
KAMEZAWA Hiroyuki711d3d22010-10-27 15:33:42 -0700873 get_online_cpus();
874 for_each_online_cpu(cpu)
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700875 val += per_cpu(memcg->stat->count[idx], cpu);
KAMEZAWA Hiroyuki711d3d22010-10-27 15:33:42 -0700876#ifdef CONFIG_HOTPLUG_CPU
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700877 spin_lock(&memcg->pcp_counter_lock);
878 val += memcg->nocpu_base.count[idx];
879 spin_unlock(&memcg->pcp_counter_lock);
KAMEZAWA Hiroyuki711d3d22010-10-27 15:33:42 -0700880#endif
881 put_online_cpus();
KAMEZAWA Hiroyukic62b1a32010-03-10 15:22:29 -0800882 return val;
883}
884
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700885static void mem_cgroup_swap_statistics(struct mem_cgroup *memcg,
Balbir Singh0c3e73e2009-09-23 15:56:42 -0700886 bool charge)
887{
888 int val = (charge) ? 1 : -1;
Kamezawa Hiroyukibff6bb82012-07-31 16:41:38 -0700889 this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_SWAP], val);
Balbir Singh0c3e73e2009-09-23 15:56:42 -0700890}
891
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700892static unsigned long mem_cgroup_read_events(struct mem_cgroup *memcg,
Johannes Weinere9f89742011-03-23 16:42:37 -0700893 enum mem_cgroup_events_index idx)
894{
895 unsigned long val = 0;
896 int cpu;
897
David Rientjes9c567512013-10-16 13:46:43 -0700898 get_online_cpus();
Johannes Weinere9f89742011-03-23 16:42:37 -0700899 for_each_online_cpu(cpu)
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700900 val += per_cpu(memcg->stat->events[idx], cpu);
Johannes Weinere9f89742011-03-23 16:42:37 -0700901#ifdef CONFIG_HOTPLUG_CPU
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700902 spin_lock(&memcg->pcp_counter_lock);
903 val += memcg->nocpu_base.events[idx];
904 spin_unlock(&memcg->pcp_counter_lock);
Johannes Weinere9f89742011-03-23 16:42:37 -0700905#endif
David Rientjes9c567512013-10-16 13:46:43 -0700906 put_online_cpus();
Johannes Weinere9f89742011-03-23 16:42:37 -0700907 return val;
908}
909
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700910static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
David Rientjesb070e652013-05-07 16:18:09 -0700911 struct page *page,
KAMEZAWA Hiroyukib24028572012-03-21 16:34:22 -0700912 bool anon, int nr_pages)
KAMEZAWA Hiroyukid52aa412008-02-07 00:14:24 -0800913{
KAMEZAWA Hiroyukib24028572012-03-21 16:34:22 -0700914 /*
915 * Here, RSS means 'mapped anon' and anon's SwapCache. Shmem/tmpfs is
916 * counted as CACHE even if it's on ANON LRU.
917 */
918 if (anon)
919 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS],
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700920 nr_pages);
KAMEZAWA Hiroyukid52aa412008-02-07 00:14:24 -0800921 else
KAMEZAWA Hiroyukib24028572012-03-21 16:34:22 -0700922 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_CACHE],
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700923 nr_pages);
Balaji Rao55e462b2008-05-01 04:35:12 -0700924
David Rientjesb070e652013-05-07 16:18:09 -0700925 if (PageTransHuge(page))
926 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE],
927 nr_pages);
928
KAMEZAWA Hiroyukie401f172011-01-20 14:44:23 -0800929 /* pagein of a big page is an event. So, ignore page size */
930 if (nr_pages > 0)
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700931 __this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGIN]);
KAMEZAWA Hiroyuki3751d602011-02-01 15:52:45 -0800932 else {
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700933 __this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT]);
KAMEZAWA Hiroyuki3751d602011-02-01 15:52:45 -0800934 nr_pages = -nr_pages; /* for event */
935 }
KAMEZAWA Hiroyukie401f172011-01-20 14:44:23 -0800936
Johannes Weiner13114712012-05-29 15:07:07 -0700937 __this_cpu_add(memcg->stat->nr_page_events, nr_pages);
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -0800938}
KAMEZAWA Hiroyukid52aa412008-02-07 00:14:24 -0800939
Jianyu Zhane2318752014-06-06 14:38:20 -0700940unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
Konstantin Khlebnikov074291f2012-05-29 15:07:00 -0700941{
942 struct mem_cgroup_per_zone *mz;
943
944 mz = container_of(lruvec, struct mem_cgroup_per_zone, lruvec);
945 return mz->lru_size[lru];
946}
947
Jianyu Zhane2318752014-06-06 14:38:20 -0700948static unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
949 int nid,
950 unsigned int lru_mask)
Ying Han889976d2011-05-26 16:25:33 -0700951{
Jianyu Zhane2318752014-06-06 14:38:20 -0700952 unsigned long nr = 0;
Ying Han889976d2011-05-26 16:25:33 -0700953 int zid;
954
Jianyu Zhane2318752014-06-06 14:38:20 -0700955 VM_BUG_ON((unsigned)nid >= nr_node_ids);
KAMEZAWA Hiroyukibb2a0de2011-07-26 16:08:22 -0700956
Jianyu Zhane2318752014-06-06 14:38:20 -0700957 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
958 struct mem_cgroup_per_zone *mz;
959 enum lru_list lru;
960
961 for_each_lru(lru) {
962 if (!(BIT(lru) & lru_mask))
963 continue;
964 mz = &memcg->nodeinfo[nid]->zoneinfo[zid];
965 nr += mz->lru_size[lru];
966 }
967 }
968 return nr;
Ying Han889976d2011-05-26 16:25:33 -0700969}
KAMEZAWA Hiroyukibb2a0de2011-07-26 16:08:22 -0700970
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700971static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg,
KAMEZAWA Hiroyukibb2a0de2011-07-26 16:08:22 -0700972 unsigned int lru_mask)
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -0800973{
Jianyu Zhane2318752014-06-06 14:38:20 -0700974 unsigned long nr = 0;
Ying Han889976d2011-05-26 16:25:33 -0700975 int nid;
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -0800976
Lai Jiangshan31aaea42012-12-12 13:51:27 -0800977 for_each_node_state(nid, N_MEMORY)
Jianyu Zhane2318752014-06-06 14:38:20 -0700978 nr += mem_cgroup_node_nr_lru_pages(memcg, nid, lru_mask);
979 return nr;
KAMEZAWA Hiroyukid52aa412008-02-07 00:14:24 -0800980}
981
Johannes Weinerf53d7ce2012-01-12 17:18:23 -0800982static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
983 enum mem_cgroup_events_target target)
KAMEZAWA Hiroyukid2265e62010-03-10 15:22:31 -0800984{
Johannes Weiner7a159cc2011-03-23 16:42:38 -0700985 unsigned long val, next;
986
Johannes Weiner13114712012-05-29 15:07:07 -0700987 val = __this_cpu_read(memcg->stat->nr_page_events);
Steven Rostedt47994012011-11-02 13:38:33 -0700988 next = __this_cpu_read(memcg->stat->targets[target]);
Johannes Weiner7a159cc2011-03-23 16:42:38 -0700989 /* from time_after() in jiffies.h */
Johannes Weinerf53d7ce2012-01-12 17:18:23 -0800990 if ((long)next - (long)val < 0) {
991 switch (target) {
992 case MEM_CGROUP_TARGET_THRESH:
993 next = val + THRESHOLDS_EVENTS_TARGET;
994 break;
Andrew Mortonbb4cc1a82013-09-24 15:27:40 -0700995 case MEM_CGROUP_TARGET_SOFTLIMIT:
996 next = val + SOFTLIMIT_EVENTS_TARGET;
997 break;
Johannes Weinerf53d7ce2012-01-12 17:18:23 -0800998 case MEM_CGROUP_TARGET_NUMAINFO:
999 next = val + NUMAINFO_EVENTS_TARGET;
1000 break;
1001 default:
1002 break;
1003 }
1004 __this_cpu_write(memcg->stat->targets[target], next);
1005 return true;
Johannes Weiner7a159cc2011-03-23 16:42:38 -07001006 }
Johannes Weinerf53d7ce2012-01-12 17:18:23 -08001007 return false;
KAMEZAWA Hiroyukid2265e62010-03-10 15:22:31 -08001008}
1009
1010/*
1011 * Check events in order.
1012 *
1013 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001014static void memcg_check_events(struct mem_cgroup *memcg, struct page *page)
KAMEZAWA Hiroyukid2265e62010-03-10 15:22:31 -08001015{
Steven Rostedt47994012011-11-02 13:38:33 -07001016 preempt_disable();
KAMEZAWA Hiroyukid2265e62010-03-10 15:22:31 -08001017 /* threshold event is triggered in finer grain than soft limit */
Johannes Weinerf53d7ce2012-01-12 17:18:23 -08001018 if (unlikely(mem_cgroup_event_ratelimit(memcg,
1019 MEM_CGROUP_TARGET_THRESH))) {
Andrew Mortonbb4cc1a82013-09-24 15:27:40 -07001020 bool do_softlimit;
Andrew Morton82b3f2a2012-02-03 15:37:14 -08001021 bool do_numainfo __maybe_unused;
Johannes Weinerf53d7ce2012-01-12 17:18:23 -08001022
Andrew Mortonbb4cc1a82013-09-24 15:27:40 -07001023 do_softlimit = mem_cgroup_event_ratelimit(memcg,
1024 MEM_CGROUP_TARGET_SOFTLIMIT);
KAMEZAWA Hiroyuki453a9bf32011-07-08 15:39:43 -07001025#if MAX_NUMNODES > 1
Johannes Weinerf53d7ce2012-01-12 17:18:23 -08001026 do_numainfo = mem_cgroup_event_ratelimit(memcg,
1027 MEM_CGROUP_TARGET_NUMAINFO);
KAMEZAWA Hiroyuki453a9bf32011-07-08 15:39:43 -07001028#endif
Johannes Weinerf53d7ce2012-01-12 17:18:23 -08001029 preempt_enable();
1030
1031 mem_cgroup_threshold(memcg);
Andrew Mortonbb4cc1a82013-09-24 15:27:40 -07001032 if (unlikely(do_softlimit))
1033 mem_cgroup_update_tree(memcg, page);
Johannes Weinerf53d7ce2012-01-12 17:18:23 -08001034#if MAX_NUMNODES > 1
1035 if (unlikely(do_numainfo))
1036 atomic_inc(&memcg->numainfo_events);
1037#endif
1038 } else
1039 preempt_enable();
KAMEZAWA Hiroyukid2265e62010-03-10 15:22:31 -08001040}
1041
Balbir Singhcf475ad2008-04-29 01:00:16 -07001042struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
Pavel Emelianov78fb7462008-02-07 00:13:51 -08001043{
Balbir Singh31a78f22008-09-28 23:09:31 +01001044 /*
1045 * mm_update_next_owner() may clear mm->owner to NULL
1046 * if it races with swapoff, page migration, etc.
1047 * So this can be called with p == NULL.
1048 */
1049 if (unlikely(!p))
1050 return NULL;
1051
Tejun Heo073219e2014-02-08 10:36:58 -05001052 return mem_cgroup_from_css(task_css(p, memory_cgrp_id));
Pavel Emelianov78fb7462008-02-07 00:13:51 -08001053}
1054
Johannes Weinerdf381972014-04-07 15:37:43 -07001055static struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
KAMEZAWA Hiroyuki54595fe2009-01-07 18:08:33 -08001056{
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001057 struct mem_cgroup *memcg = NULL;
KAMEZAWA Hiroyuki0b7f5692009-04-02 16:57:38 -07001058
KAMEZAWA Hiroyuki54595fe2009-01-07 18:08:33 -08001059 rcu_read_lock();
1060 do {
Michal Hocko6f6acb02014-05-22 11:54:19 -07001061 /*
1062 * Page cache insertions can happen withou an
1063 * actual mm context, e.g. during disk probing
1064 * on boot, loopback IO, acct() writes etc.
1065 */
1066 if (unlikely(!mm))
Johannes Weinerdf381972014-04-07 15:37:43 -07001067 memcg = root_mem_cgroup;
Michal Hocko6f6acb02014-05-22 11:54:19 -07001068 else {
1069 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
1070 if (unlikely(!memcg))
1071 memcg = root_mem_cgroup;
1072 }
Tejun Heoec903c02014-05-13 12:11:01 -04001073 } while (!css_tryget_online(&memcg->css));
KAMEZAWA Hiroyuki54595fe2009-01-07 18:08:33 -08001074 rcu_read_unlock();
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001075 return memcg;
KAMEZAWA Hiroyuki54595fe2009-01-07 18:08:33 -08001076}
1077
Michal Hocko16248d82013-04-29 15:07:19 -07001078/*
1079 * Returns a next (in a pre-order walk) alive memcg (with elevated css
1080 * ref. count) or NULL if the whole root's subtree has been visited.
1081 *
1082 * helper function to be used by mem_cgroup_iter
1083 */
1084static struct mem_cgroup *__mem_cgroup_iter_next(struct mem_cgroup *root,
Andrew Morton694fbc02013-09-24 15:27:37 -07001085 struct mem_cgroup *last_visited)
Michal Hocko16248d82013-04-29 15:07:19 -07001086{
Tejun Heo492eb212013-08-08 20:11:25 -04001087 struct cgroup_subsys_state *prev_css, *next_css;
Michal Hocko16248d82013-04-29 15:07:19 -07001088
Tejun Heobd8815a2013-08-08 20:11:27 -04001089 prev_css = last_visited ? &last_visited->css : NULL;
Michal Hocko16248d82013-04-29 15:07:19 -07001090skip_node:
Tejun Heo492eb212013-08-08 20:11:25 -04001091 next_css = css_next_descendant_pre(prev_css, &root->css);
Michal Hocko16248d82013-04-29 15:07:19 -07001092
1093 /*
1094 * Even if we found a group we have to make sure it is
1095 * alive. css && !memcg means that the groups should be
1096 * skipped and we should continue the tree walk.
1097 * last_visited css is safe to use because it is
1098 * protected by css_get and the tree walk is rcu safe.
Michal Hocko0eef6152014-01-23 15:53:37 -08001099 *
1100 * We do not take a reference on the root of the tree walk
1101 * because we might race with the root removal when it would
1102 * be the only node in the iterated hierarchy and mem_cgroup_iter
1103 * would end up in an endless loop because it expects that at
1104 * least one valid node will be returned. Root cannot disappear
1105 * because caller of the iterator should hold it already so
1106 * skipping css reference should be safe.
Michal Hocko16248d82013-04-29 15:07:19 -07001107 */
Tejun Heo492eb212013-08-08 20:11:25 -04001108 if (next_css) {
Hugh Dickinsce482252014-03-03 15:38:24 -08001109 if ((next_css == &root->css) ||
Tejun Heoec903c02014-05-13 12:11:01 -04001110 ((next_css->flags & CSS_ONLINE) &&
1111 css_tryget_online(next_css)))
Hugh Dickinsd8ad3052014-01-23 15:53:32 -08001112 return mem_cgroup_from_css(next_css);
Michal Hocko0eef6152014-01-23 15:53:37 -08001113
1114 prev_css = next_css;
1115 goto skip_node;
Michal Hocko16248d82013-04-29 15:07:19 -07001116 }
1117
1118 return NULL;
1119}
1120
Johannes Weiner519ebea2013-07-03 15:04:51 -07001121static void mem_cgroup_iter_invalidate(struct mem_cgroup *root)
1122{
1123 /*
1124 * When a group in the hierarchy below root is destroyed, the
1125 * hierarchy iterator can no longer be trusted since it might
1126 * have pointed to the destroyed group. Invalidate it.
1127 */
1128 atomic_inc(&root->dead_count);
1129}
1130
1131static struct mem_cgroup *
1132mem_cgroup_iter_load(struct mem_cgroup_reclaim_iter *iter,
1133 struct mem_cgroup *root,
1134 int *sequence)
1135{
1136 struct mem_cgroup *position = NULL;
1137 /*
1138 * A cgroup destruction happens in two stages: offlining and
1139 * release. They are separated by a RCU grace period.
1140 *
1141 * If the iterator is valid, we may still race with an
1142 * offlining. The RCU lock ensures the object won't be
1143 * released, tryget will fail if we lost the race.
1144 */
1145 *sequence = atomic_read(&root->dead_count);
1146 if (iter->last_dead_count == *sequence) {
1147 smp_rmb();
1148 position = iter->last_visited;
Michal Hockoecc736f2014-01-23 15:53:35 -08001149
1150 /*
1151 * We cannot take a reference to root because we might race
1152 * with root removal and returning NULL would end up in
1153 * an endless loop on the iterator user level when root
1154 * would be returned all the time.
1155 */
1156 if (position && position != root &&
Tejun Heoec903c02014-05-13 12:11:01 -04001157 !css_tryget_online(&position->css))
Johannes Weiner519ebea2013-07-03 15:04:51 -07001158 position = NULL;
1159 }
1160 return position;
1161}
1162
1163static void mem_cgroup_iter_update(struct mem_cgroup_reclaim_iter *iter,
1164 struct mem_cgroup *last_visited,
1165 struct mem_cgroup *new_position,
Michal Hockoecc736f2014-01-23 15:53:35 -08001166 struct mem_cgroup *root,
Johannes Weiner519ebea2013-07-03 15:04:51 -07001167 int sequence)
1168{
Michal Hockoecc736f2014-01-23 15:53:35 -08001169 /* root reference counting symmetric to mem_cgroup_iter_load */
1170 if (last_visited && last_visited != root)
Johannes Weiner519ebea2013-07-03 15:04:51 -07001171 css_put(&last_visited->css);
1172 /*
1173 * We store the sequence count from the time @last_visited was
1174 * loaded successfully instead of rereading it here so that we
1175 * don't lose destruction events in between. We could have
1176 * raced with the destruction of @new_position after all.
1177 */
1178 iter->last_visited = new_position;
1179 smp_wmb();
1180 iter->last_dead_count = sequence;
1181}
1182
Johannes Weiner56600482012-01-12 17:17:59 -08001183/**
1184 * mem_cgroup_iter - iterate over memory cgroup hierarchy
1185 * @root: hierarchy root
1186 * @prev: previously returned memcg, NULL on first invocation
1187 * @reclaim: cookie for shared reclaim walks, NULL for full walks
1188 *
1189 * Returns references to children of the hierarchy below @root, or
1190 * @root itself, or %NULL after a full round-trip.
1191 *
1192 * Caller must pass the return value in @prev on subsequent
1193 * invocations for reference counting, or use mem_cgroup_iter_break()
1194 * to cancel a hierarchy walk before the round-trip is complete.
1195 *
1196 * Reclaimers can specify a zone and a priority level in @reclaim to
1197 * divide up the memcgs in the hierarchy among all concurrent
1198 * reclaimers operating on the same zone and priority.
1199 */
Andrew Morton694fbc02013-09-24 15:27:37 -07001200struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
Johannes Weiner56600482012-01-12 17:17:59 -08001201 struct mem_cgroup *prev,
Andrew Morton694fbc02013-09-24 15:27:37 -07001202 struct mem_cgroup_reclaim_cookie *reclaim)
KAMEZAWA Hiroyuki14067bb2009-04-02 16:57:35 -07001203{
Johannes Weiner9f3a0d02012-01-12 17:17:48 -08001204 struct mem_cgroup *memcg = NULL;
Michal Hocko542f85f2013-04-29 15:07:15 -07001205 struct mem_cgroup *last_visited = NULL;
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -07001206
Andrew Morton694fbc02013-09-24 15:27:37 -07001207 if (mem_cgroup_disabled())
1208 return NULL;
Johannes Weiner56600482012-01-12 17:17:59 -08001209
KAMEZAWA Hiroyuki711d3d22010-10-27 15:33:42 -07001210 if (!root)
1211 root = root_mem_cgroup;
1212
Johannes Weiner9f3a0d02012-01-12 17:17:48 -08001213 if (prev && !reclaim)
Michal Hocko542f85f2013-04-29 15:07:15 -07001214 last_visited = prev;
Johannes Weiner9f3a0d02012-01-12 17:17:48 -08001215
Johannes Weiner9f3a0d02012-01-12 17:17:48 -08001216 if (!root->use_hierarchy && root != root_mem_cgroup) {
1217 if (prev)
Michal Hockoc40046f2013-04-29 15:07:14 -07001218 goto out_css_put;
Andrew Morton694fbc02013-09-24 15:27:37 -07001219 return root;
Johannes Weiner9f3a0d02012-01-12 17:17:48 -08001220 }
1221
Michal Hocko542f85f2013-04-29 15:07:15 -07001222 rcu_read_lock();
Johannes Weiner9f3a0d02012-01-12 17:17:48 -08001223 while (!memcg) {
Johannes Weiner527a5ec2012-01-12 17:17:55 -08001224 struct mem_cgroup_reclaim_iter *uninitialized_var(iter);
Johannes Weiner519ebea2013-07-03 15:04:51 -07001225 int uninitialized_var(seq);
Johannes Weiner9f3a0d02012-01-12 17:17:48 -08001226
Johannes Weiner527a5ec2012-01-12 17:17:55 -08001227 if (reclaim) {
Johannes Weiner527a5ec2012-01-12 17:17:55 -08001228 struct mem_cgroup_per_zone *mz;
1229
Jianyu Zhane2318752014-06-06 14:38:20 -07001230 mz = mem_cgroup_zone_zoneinfo(root, reclaim->zone);
Johannes Weiner527a5ec2012-01-12 17:17:55 -08001231 iter = &mz->reclaim_iter[reclaim->priority];
Michal Hocko542f85f2013-04-29 15:07:15 -07001232 if (prev && reclaim->generation != iter->generation) {
Michal Hocko5f578162013-04-29 15:07:17 -07001233 iter->last_visited = NULL;
Michal Hocko542f85f2013-04-29 15:07:15 -07001234 goto out_unlock;
1235 }
Michal Hocko5f578162013-04-29 15:07:17 -07001236
Johannes Weiner519ebea2013-07-03 15:04:51 -07001237 last_visited = mem_cgroup_iter_load(iter, root, &seq);
Johannes Weiner527a5ec2012-01-12 17:17:55 -08001238 }
Johannes Weiner9f3a0d02012-01-12 17:17:48 -08001239
Andrew Morton694fbc02013-09-24 15:27:37 -07001240 memcg = __mem_cgroup_iter_next(root, last_visited);
Michal Hocko542f85f2013-04-29 15:07:15 -07001241
Johannes Weiner527a5ec2012-01-12 17:17:55 -08001242 if (reclaim) {
Michal Hockoecc736f2014-01-23 15:53:35 -08001243 mem_cgroup_iter_update(iter, last_visited, memcg, root,
1244 seq);
Michal Hocko542f85f2013-04-29 15:07:15 -07001245
Michal Hocko19f39402013-04-29 15:07:18 -07001246 if (!memcg)
Johannes Weiner527a5ec2012-01-12 17:17:55 -08001247 iter->generation++;
1248 else if (!prev && memcg)
1249 reclaim->generation = iter->generation;
1250 }
Johannes Weiner9f3a0d02012-01-12 17:17:48 -08001251
Andrew Morton694fbc02013-09-24 15:27:37 -07001252 if (prev && !memcg)
Michal Hocko542f85f2013-04-29 15:07:15 -07001253 goto out_unlock;
Johannes Weiner9f3a0d02012-01-12 17:17:48 -08001254 }
Michal Hocko542f85f2013-04-29 15:07:15 -07001255out_unlock:
1256 rcu_read_unlock();
Michal Hockoc40046f2013-04-29 15:07:14 -07001257out_css_put:
1258 if (prev && prev != root)
1259 css_put(&prev->css);
1260
Johannes Weiner9f3a0d02012-01-12 17:17:48 -08001261 return memcg;
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -07001262}
Johannes Weiner9f3a0d02012-01-12 17:17:48 -08001263
Johannes Weiner56600482012-01-12 17:17:59 -08001264/**
1265 * mem_cgroup_iter_break - abort a hierarchy walk prematurely
1266 * @root: hierarchy root
1267 * @prev: last visited hierarchy member as returned by mem_cgroup_iter()
1268 */
1269void mem_cgroup_iter_break(struct mem_cgroup *root,
1270 struct mem_cgroup *prev)
Johannes Weiner9f3a0d02012-01-12 17:17:48 -08001271{
1272 if (!root)
1273 root = root_mem_cgroup;
1274 if (prev && prev != root)
1275 css_put(&prev->css);
1276}
1277
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -07001278/*
Johannes Weiner9f3a0d02012-01-12 17:17:48 -08001279 * Iteration constructs for visiting all cgroups (under a tree). If
1280 * loops are exited prematurely (break), mem_cgroup_iter_break() must
1281 * be used for reference counting.
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -07001282 */
Johannes Weiner9f3a0d02012-01-12 17:17:48 -08001283#define for_each_mem_cgroup_tree(iter, root) \
Johannes Weiner527a5ec2012-01-12 17:17:55 -08001284 for (iter = mem_cgroup_iter(root, NULL, NULL); \
Johannes Weiner9f3a0d02012-01-12 17:17:48 -08001285 iter != NULL; \
Johannes Weiner527a5ec2012-01-12 17:17:55 -08001286 iter = mem_cgroup_iter(root, iter, NULL))
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -07001287
Johannes Weiner9f3a0d02012-01-12 17:17:48 -08001288#define for_each_mem_cgroup(iter) \
Johannes Weiner527a5ec2012-01-12 17:17:55 -08001289 for (iter = mem_cgroup_iter(NULL, NULL, NULL); \
Johannes Weiner9f3a0d02012-01-12 17:17:48 -08001290 iter != NULL; \
Johannes Weiner527a5ec2012-01-12 17:17:55 -08001291 iter = mem_cgroup_iter(NULL, iter, NULL))
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -07001292
David Rientjes68ae5642012-12-12 13:51:57 -08001293void __mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx)
Ying Han456f9982011-05-26 16:25:38 -07001294{
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001295 struct mem_cgroup *memcg;
Ying Han456f9982011-05-26 16:25:38 -07001296
Ying Han456f9982011-05-26 16:25:38 -07001297 rcu_read_lock();
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001298 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
1299 if (unlikely(!memcg))
Ying Han456f9982011-05-26 16:25:38 -07001300 goto out;
1301
1302 switch (idx) {
Ying Han456f9982011-05-26 16:25:38 -07001303 case PGFAULT:
Johannes Weiner0e574a92012-01-12 17:18:35 -08001304 this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGFAULT]);
1305 break;
1306 case PGMAJFAULT:
1307 this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGMAJFAULT]);
Ying Han456f9982011-05-26 16:25:38 -07001308 break;
1309 default:
1310 BUG();
1311 }
1312out:
1313 rcu_read_unlock();
1314}
David Rientjes68ae5642012-12-12 13:51:57 -08001315EXPORT_SYMBOL(__mem_cgroup_count_vm_event);
Ying Han456f9982011-05-26 16:25:38 -07001316
Johannes Weiner925b7672012-01-12 17:18:15 -08001317/**
1318 * mem_cgroup_zone_lruvec - get the lru list vector for a zone and memcg
1319 * @zone: zone of the wanted lruvec
Hugh Dickinsfa9add62012-05-29 15:07:09 -07001320 * @memcg: memcg of the wanted lruvec
Johannes Weiner925b7672012-01-12 17:18:15 -08001321 *
1322 * Returns the lru list vector holding pages for the given @zone and
1323 * @mem. This can be the global zone lruvec, if the memory controller
1324 * is disabled.
1325 */
1326struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone,
1327 struct mem_cgroup *memcg)
1328{
1329 struct mem_cgroup_per_zone *mz;
Hugh Dickinsbea8c152012-11-16 14:14:54 -08001330 struct lruvec *lruvec;
Johannes Weiner925b7672012-01-12 17:18:15 -08001331
Hugh Dickinsbea8c152012-11-16 14:14:54 -08001332 if (mem_cgroup_disabled()) {
1333 lruvec = &zone->lruvec;
1334 goto out;
1335 }
Johannes Weiner925b7672012-01-12 17:18:15 -08001336
Jianyu Zhane2318752014-06-06 14:38:20 -07001337 mz = mem_cgroup_zone_zoneinfo(memcg, zone);
Hugh Dickinsbea8c152012-11-16 14:14:54 -08001338 lruvec = &mz->lruvec;
1339out:
1340 /*
1341 * Since a node can be onlined after the mem_cgroup was created,
1342 * we have to be prepared to initialize lruvec->zone here;
1343 * and if offlined then reonlined, we need to reinitialize it.
1344 */
1345 if (unlikely(lruvec->zone != zone))
1346 lruvec->zone = zone;
1347 return lruvec;
Johannes Weiner925b7672012-01-12 17:18:15 -08001348}
1349
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -08001350/*
1351 * Following LRU functions are allowed to be used without PCG_LOCK.
1352 * Operations are called by routine of global LRU independently from memcg.
1353 * What we have to take care of here is validness of pc->mem_cgroup.
1354 *
1355 * Changes to pc->mem_cgroup happens when
1356 * 1. charge
1357 * 2. moving account
1358 * In typical case, "charge" is done before add-to-lru. Exception is SwapCache.
1359 * It is added to LRU before charge.
1360 * If PCG_USED bit is not set, page_cgroup is not added to this private LRU.
1361 * When moving account, the page is not on LRU. It's isolated.
1362 */
1363
Johannes Weiner925b7672012-01-12 17:18:15 -08001364/**
Hugh Dickinsfa9add62012-05-29 15:07:09 -07001365 * mem_cgroup_page_lruvec - return lruvec for adding an lru page
Johannes Weiner925b7672012-01-12 17:18:15 -08001366 * @page: the page
Hugh Dickinsfa9add62012-05-29 15:07:09 -07001367 * @zone: zone of the page
Minchan Kim3f58a822011-03-22 16:32:53 -07001368 */
Hugh Dickinsfa9add62012-05-29 15:07:09 -07001369struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct zone *zone)
Minchan Kim3f58a822011-03-22 16:32:53 -07001370{
1371 struct mem_cgroup_per_zone *mz;
Johannes Weiner925b7672012-01-12 17:18:15 -08001372 struct mem_cgroup *memcg;
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -08001373 struct page_cgroup *pc;
Hugh Dickinsbea8c152012-11-16 14:14:54 -08001374 struct lruvec *lruvec;
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -08001375
Hugh Dickinsbea8c152012-11-16 14:14:54 -08001376 if (mem_cgroup_disabled()) {
1377 lruvec = &zone->lruvec;
1378 goto out;
1379 }
Christoph Lameterb69408e2008-10-18 20:26:14 -07001380
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -08001381 pc = lookup_page_cgroup(page);
KAMEZAWA Hiroyuki38c5d722012-01-12 17:19:01 -08001382 memcg = pc->mem_cgroup;
Hugh Dickins75121022012-03-05 14:59:18 -08001383
1384 /*
Hugh Dickinsfa9add62012-05-29 15:07:09 -07001385 * Surreptitiously switch any uncharged offlist page to root:
Hugh Dickins75121022012-03-05 14:59:18 -08001386 * an uncharged page off lru does nothing to secure
1387 * its former mem_cgroup from sudden removal.
1388 *
1389 * Our caller holds lru_lock, and PageCgroupUsed is updated
1390 * under page_cgroup lock: between them, they make all uses
1391 * of pc->mem_cgroup safe.
1392 */
Hugh Dickinsfa9add62012-05-29 15:07:09 -07001393 if (!PageLRU(page) && !PageCgroupUsed(pc) && memcg != root_mem_cgroup)
Hugh Dickins75121022012-03-05 14:59:18 -08001394 pc->mem_cgroup = memcg = root_mem_cgroup;
1395
Jianyu Zhane2318752014-06-06 14:38:20 -07001396 mz = mem_cgroup_page_zoneinfo(memcg, page);
Hugh Dickinsbea8c152012-11-16 14:14:54 -08001397 lruvec = &mz->lruvec;
1398out:
1399 /*
1400 * Since a node can be onlined after the mem_cgroup was created,
1401 * we have to be prepared to initialize lruvec->zone here;
1402 * and if offlined then reonlined, we need to reinitialize it.
1403 */
1404 if (unlikely(lruvec->zone != zone))
1405 lruvec->zone = zone;
1406 return lruvec;
Johannes Weiner925b7672012-01-12 17:18:15 -08001407}
1408
1409/**
Hugh Dickinsfa9add62012-05-29 15:07:09 -07001410 * mem_cgroup_update_lru_size - account for adding or removing an lru page
1411 * @lruvec: mem_cgroup per zone lru vector
1412 * @lru: index of lru list the page is sitting on
1413 * @nr_pages: positive when adding or negative when removing
Johannes Weiner925b7672012-01-12 17:18:15 -08001414 *
Hugh Dickinsfa9add62012-05-29 15:07:09 -07001415 * This function must be called when a page is added to or removed from an
1416 * lru list.
Johannes Weiner925b7672012-01-12 17:18:15 -08001417 */
Hugh Dickinsfa9add62012-05-29 15:07:09 -07001418void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
1419 int nr_pages)
Johannes Weiner925b7672012-01-12 17:18:15 -08001420{
1421 struct mem_cgroup_per_zone *mz;
Hugh Dickinsfa9add62012-05-29 15:07:09 -07001422 unsigned long *lru_size;
Johannes Weiner925b7672012-01-12 17:18:15 -08001423
1424 if (mem_cgroup_disabled())
1425 return;
1426
Hugh Dickinsfa9add62012-05-29 15:07:09 -07001427 mz = container_of(lruvec, struct mem_cgroup_per_zone, lruvec);
1428 lru_size = mz->lru_size + lru;
1429 *lru_size += nr_pages;
1430 VM_BUG_ON((long)(*lru_size) < 0);
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -08001431}
KAMEZAWA Hiroyuki544122e2009-01-07 18:08:34 -08001432
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -08001433/*
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001434 * Checks whether given mem is same or in the root_mem_cgroup's
Michal Hocko3e920412011-07-26 16:08:29 -07001435 * hierarchy subtree
1436 */
Johannes Weinerc3ac9a82012-05-29 15:06:25 -07001437bool __mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg,
1438 struct mem_cgroup *memcg)
Michal Hocko3e920412011-07-26 16:08:29 -07001439{
Johannes Weiner91c637342012-05-29 15:06:24 -07001440 if (root_memcg == memcg)
1441 return true;
Hugh Dickins3a981f42012-06-20 12:52:58 -07001442 if (!root_memcg->use_hierarchy || !memcg)
Johannes Weiner91c637342012-05-29 15:06:24 -07001443 return false;
Li Zefanb47f77b2013-09-23 16:55:43 +08001444 return cgroup_is_descendant(memcg->css.cgroup, root_memcg->css.cgroup);
Johannes Weinerc3ac9a82012-05-29 15:06:25 -07001445}
1446
1447static bool mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg,
1448 struct mem_cgroup *memcg)
1449{
1450 bool ret;
1451
Johannes Weiner91c637342012-05-29 15:06:24 -07001452 rcu_read_lock();
Johannes Weinerc3ac9a82012-05-29 15:06:25 -07001453 ret = __mem_cgroup_same_or_subtree(root_memcg, memcg);
Johannes Weiner91c637342012-05-29 15:06:24 -07001454 rcu_read_unlock();
1455 return ret;
Michal Hocko3e920412011-07-26 16:08:29 -07001456}
1457
David Rientjesffbdccf2013-07-03 15:01:23 -07001458bool task_in_mem_cgroup(struct task_struct *task,
1459 const struct mem_cgroup *memcg)
David Rientjes4c4a2212008-02-07 00:14:06 -08001460{
KAMEZAWA Hiroyuki0b7f5692009-04-02 16:57:38 -07001461 struct mem_cgroup *curr = NULL;
KAMEZAWA Hiroyuki158e0a22010-08-10 18:03:00 -07001462 struct task_struct *p;
David Rientjesffbdccf2013-07-03 15:01:23 -07001463 bool ret;
David Rientjes4c4a2212008-02-07 00:14:06 -08001464
KAMEZAWA Hiroyuki158e0a22010-08-10 18:03:00 -07001465 p = find_lock_task_mm(task);
David Rientjesde077d22012-01-12 17:18:52 -08001466 if (p) {
Johannes Weinerdf381972014-04-07 15:37:43 -07001467 curr = get_mem_cgroup_from_mm(p->mm);
David Rientjesde077d22012-01-12 17:18:52 -08001468 task_unlock(p);
1469 } else {
1470 /*
1471 * All threads may have already detached their mm's, but the oom
1472 * killer still needs to detect if they have already been oom
1473 * killed to prevent needlessly killing additional tasks.
1474 */
David Rientjesffbdccf2013-07-03 15:01:23 -07001475 rcu_read_lock();
David Rientjesde077d22012-01-12 17:18:52 -08001476 curr = mem_cgroup_from_task(task);
1477 if (curr)
1478 css_get(&curr->css);
David Rientjesffbdccf2013-07-03 15:01:23 -07001479 rcu_read_unlock();
David Rientjesde077d22012-01-12 17:18:52 -08001480 }
Daisuke Nishimurad31f56d2009-12-15 16:47:12 -08001481 /*
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001482 * We should check use_hierarchy of "memcg" not "curr". Because checking
Daisuke Nishimurad31f56d2009-12-15 16:47:12 -08001483 * use_hierarchy of "curr" here make this function true if hierarchy is
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001484 * enabled in "curr" and "curr" is a child of "memcg" in *cgroup*
1485 * hierarchy(even if use_hierarchy is disabled in "memcg").
Daisuke Nishimurad31f56d2009-12-15 16:47:12 -08001486 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001487 ret = mem_cgroup_same_or_subtree(memcg, curr);
KAMEZAWA Hiroyuki0b7f5692009-04-02 16:57:38 -07001488 css_put(&curr->css);
David Rientjes4c4a2212008-02-07 00:14:06 -08001489 return ret;
1490}
1491
Konstantin Khlebnikovc56d5c72012-05-29 15:07:00 -07001492int mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec)
KOSAKI Motohiro14797e22009-01-07 18:08:18 -08001493{
KOSAKI Motohiroc772be92009-01-07 18:08:25 -08001494 unsigned long inactive_ratio;
Johannes Weiner9b272972011-11-02 13:38:23 -07001495 unsigned long inactive;
1496 unsigned long active;
1497 unsigned long gb;
KOSAKI Motohiro14797e22009-01-07 18:08:18 -08001498
Hugh Dickins4d7dcca2012-05-29 15:07:08 -07001499 inactive = mem_cgroup_get_lru_size(lruvec, LRU_INACTIVE_ANON);
1500 active = mem_cgroup_get_lru_size(lruvec, LRU_ACTIVE_ANON);
KOSAKI Motohiro14797e22009-01-07 18:08:18 -08001501
KOSAKI Motohiroc772be92009-01-07 18:08:25 -08001502 gb = (inactive + active) >> (30 - PAGE_SHIFT);
1503 if (gb)
1504 inactive_ratio = int_sqrt(10 * gb);
1505 else
1506 inactive_ratio = 1;
1507
Johannes Weiner9b272972011-11-02 13:38:23 -07001508 return inactive * inactive_ratio < active;
KOSAKI Motohiroc772be92009-01-07 18:08:25 -08001509}
1510
Balbir Singh6d61ef42009-01-07 18:08:06 -08001511#define mem_cgroup_from_res_counter(counter, member) \
1512 container_of(counter, struct mem_cgroup, member)
1513
Johannes Weiner19942822011-02-01 15:52:43 -08001514/**
Johannes Weiner9d11ea92011-03-23 16:42:21 -07001515 * mem_cgroup_margin - calculate chargeable space of a memory cgroup
Wanpeng Lidad75572012-06-20 12:53:01 -07001516 * @memcg: the memory cgroup
Johannes Weiner19942822011-02-01 15:52:43 -08001517 *
Johannes Weiner9d11ea92011-03-23 16:42:21 -07001518 * Returns the maximum amount of memory @mem can be charged with, in
Johannes Weiner7ec99d62011-03-23 16:42:36 -07001519 * pages.
Johannes Weiner19942822011-02-01 15:52:43 -08001520 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001521static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
Johannes Weiner19942822011-02-01 15:52:43 -08001522{
Johannes Weiner9d11ea92011-03-23 16:42:21 -07001523 unsigned long long margin;
1524
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001525 margin = res_counter_margin(&memcg->res);
Johannes Weiner9d11ea92011-03-23 16:42:21 -07001526 if (do_swap_account)
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001527 margin = min(margin, res_counter_margin(&memcg->memsw));
Johannes Weiner7ec99d62011-03-23 16:42:36 -07001528 return margin >> PAGE_SHIFT;
Johannes Weiner19942822011-02-01 15:52:43 -08001529}
1530
KAMEZAWA Hiroyuki1f4c0252011-07-26 16:08:21 -07001531int mem_cgroup_swappiness(struct mem_cgroup *memcg)
KOSAKI Motohiroa7885eb2009-01-07 18:08:24 -08001532{
KOSAKI Motohiroa7885eb2009-01-07 18:08:24 -08001533 /* root ? */
Linus Torvalds14208b02014-06-09 15:03:33 -07001534 if (mem_cgroup_disabled() || !memcg->css.parent)
KOSAKI Motohiroa7885eb2009-01-07 18:08:24 -08001535 return vm_swappiness;
1536
Johannes Weinerbf1ff262011-03-23 16:42:32 -07001537 return memcg->swappiness;
KOSAKI Motohiroa7885eb2009-01-07 18:08:24 -08001538}
1539
KAMEZAWA Hiroyuki619d0942012-03-21 16:34:23 -07001540/*
1541 * memcg->moving_account is used for checking possibility that some thread is
1542 * calling move_account(). When a thread on CPU-A starts moving pages under
1543 * a memcg, other threads should check memcg->moving_account under
1544 * rcu_read_lock(), like this:
1545 *
1546 * CPU-A CPU-B
1547 * rcu_read_lock()
1548 * memcg->moving_account+1 if (memcg->mocing_account)
1549 * take heavy locks.
1550 * synchronize_rcu() update something.
1551 * rcu_read_unlock()
1552 * start move here.
1553 */
KAMEZAWA Hiroyuki4331f7d2012-03-21 16:34:26 -07001554
1555/* for quick checking without looking up memcg */
1556atomic_t memcg_moving __read_mostly;
1557
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001558static void mem_cgroup_start_move(struct mem_cgroup *memcg)
KAMEZAWA Hiroyuki32047e22010-10-27 15:33:40 -07001559{
KAMEZAWA Hiroyuki4331f7d2012-03-21 16:34:26 -07001560 atomic_inc(&memcg_moving);
KAMEZAWA Hiroyuki619d0942012-03-21 16:34:23 -07001561 atomic_inc(&memcg->moving_account);
KAMEZAWA Hiroyuki32047e22010-10-27 15:33:40 -07001562 synchronize_rcu();
1563}
1564
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001565static void mem_cgroup_end_move(struct mem_cgroup *memcg)
KAMEZAWA Hiroyuki32047e22010-10-27 15:33:40 -07001566{
KAMEZAWA Hiroyuki619d0942012-03-21 16:34:23 -07001567 /*
1568 * Now, mem_cgroup_clear_mc() may call this function with NULL.
1569 * We check NULL in callee rather than caller.
1570 */
KAMEZAWA Hiroyuki4331f7d2012-03-21 16:34:26 -07001571 if (memcg) {
1572 atomic_dec(&memcg_moving);
KAMEZAWA Hiroyuki619d0942012-03-21 16:34:23 -07001573 atomic_dec(&memcg->moving_account);
KAMEZAWA Hiroyuki4331f7d2012-03-21 16:34:26 -07001574 }
KAMEZAWA Hiroyuki32047e22010-10-27 15:33:40 -07001575}
KAMEZAWA Hiroyuki619d0942012-03-21 16:34:23 -07001576
KAMEZAWA Hiroyuki32047e22010-10-27 15:33:40 -07001577/*
Qiang Huangbdcbb652014-06-04 16:08:21 -07001578 * A routine for checking "mem" is under move_account() or not.
KAMEZAWA Hiroyuki32047e22010-10-27 15:33:40 -07001579 *
Qiang Huangbdcbb652014-06-04 16:08:21 -07001580 * Checking a cgroup is mc.from or mc.to or under hierarchy of
1581 * moving cgroups. This is for waiting at high-memory pressure
1582 * caused by "move".
KAMEZAWA Hiroyuki32047e22010-10-27 15:33:40 -07001583 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001584static bool mem_cgroup_under_move(struct mem_cgroup *memcg)
KAMEZAWA Hiroyuki4b534332010-08-10 18:02:57 -07001585{
KAMEZAWA Hiroyuki2bd9bb22010-08-10 18:02:58 -07001586 struct mem_cgroup *from;
1587 struct mem_cgroup *to;
KAMEZAWA Hiroyuki4b534332010-08-10 18:02:57 -07001588 bool ret = false;
KAMEZAWA Hiroyuki2bd9bb22010-08-10 18:02:58 -07001589 /*
1590 * Unlike task_move routines, we access mc.to, mc.from not under
1591 * mutual exclusion by cgroup_mutex. Here, we take spinlock instead.
1592 */
1593 spin_lock(&mc.lock);
1594 from = mc.from;
1595 to = mc.to;
1596 if (!from)
1597 goto unlock;
Michal Hocko3e920412011-07-26 16:08:29 -07001598
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001599 ret = mem_cgroup_same_or_subtree(memcg, from)
1600 || mem_cgroup_same_or_subtree(memcg, to);
KAMEZAWA Hiroyuki2bd9bb22010-08-10 18:02:58 -07001601unlock:
1602 spin_unlock(&mc.lock);
KAMEZAWA Hiroyuki4b534332010-08-10 18:02:57 -07001603 return ret;
1604}
1605
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001606static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg)
KAMEZAWA Hiroyuki4b534332010-08-10 18:02:57 -07001607{
1608 if (mc.moving_task && current != mc.moving_task) {
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001609 if (mem_cgroup_under_move(memcg)) {
KAMEZAWA Hiroyuki4b534332010-08-10 18:02:57 -07001610 DEFINE_WAIT(wait);
1611 prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE);
1612 /* moving charge context might have finished. */
1613 if (mc.moving_task)
1614 schedule();
1615 finish_wait(&mc.waitq, &wait);
1616 return true;
1617 }
1618 }
1619 return false;
1620}
1621
KAMEZAWA Hiroyuki312734c02012-03-21 16:34:24 -07001622/*
1623 * Take this lock when
1624 * - a code tries to modify page's memcg while it's USED.
1625 * - a code tries to modify page state accounting in a memcg.
KAMEZAWA Hiroyuki312734c02012-03-21 16:34:24 -07001626 */
1627static void move_lock_mem_cgroup(struct mem_cgroup *memcg,
1628 unsigned long *flags)
1629{
1630 spin_lock_irqsave(&memcg->move_lock, *flags);
1631}
1632
1633static void move_unlock_mem_cgroup(struct mem_cgroup *memcg,
1634 unsigned long *flags)
1635{
1636 spin_unlock_irqrestore(&memcg->move_lock, *flags);
1637}
1638
Sha Zhengju58cf1882013-02-22 16:32:05 -08001639#define K(x) ((x) << (PAGE_SHIFT-10))
Balbir Singhe2224322009-04-02 16:57:39 -07001640/**
Sha Zhengju58cf1882013-02-22 16:32:05 -08001641 * mem_cgroup_print_oom_info: Print OOM information relevant to memory controller.
Balbir Singhe2224322009-04-02 16:57:39 -07001642 * @memcg: The memory cgroup that went over limit
1643 * @p: Task that is going to be killed
1644 *
1645 * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
1646 * enabled
1647 */
1648void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
1649{
Tejun Heoe61734c2014-02-12 09:29:50 -05001650 /* oom_info_lock ensures that parallel ooms do not interleave */
Michal Hocko08088cb2014-02-25 15:01:44 -08001651 static DEFINE_MUTEX(oom_info_lock);
Sha Zhengju58cf1882013-02-22 16:32:05 -08001652 struct mem_cgroup *iter;
1653 unsigned int i;
Balbir Singhe2224322009-04-02 16:57:39 -07001654
Sha Zhengju58cf1882013-02-22 16:32:05 -08001655 if (!p)
Balbir Singhe2224322009-04-02 16:57:39 -07001656 return;
1657
Michal Hocko08088cb2014-02-25 15:01:44 -08001658 mutex_lock(&oom_info_lock);
Balbir Singhe2224322009-04-02 16:57:39 -07001659 rcu_read_lock();
1660
Tejun Heoe61734c2014-02-12 09:29:50 -05001661 pr_info("Task in ");
1662 pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id));
1663 pr_info(" killed as a result of limit of ");
1664 pr_cont_cgroup_path(memcg->css.cgroup);
1665 pr_info("\n");
Balbir Singhe2224322009-04-02 16:57:39 -07001666
Balbir Singhe2224322009-04-02 16:57:39 -07001667 rcu_read_unlock();
1668
Andrew Mortond0451972013-02-22 16:32:06 -08001669 pr_info("memory: usage %llukB, limit %llukB, failcnt %llu\n",
Balbir Singhe2224322009-04-02 16:57:39 -07001670 res_counter_read_u64(&memcg->res, RES_USAGE) >> 10,
1671 res_counter_read_u64(&memcg->res, RES_LIMIT) >> 10,
1672 res_counter_read_u64(&memcg->res, RES_FAILCNT));
Andrew Mortond0451972013-02-22 16:32:06 -08001673 pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %llu\n",
Balbir Singhe2224322009-04-02 16:57:39 -07001674 res_counter_read_u64(&memcg->memsw, RES_USAGE) >> 10,
1675 res_counter_read_u64(&memcg->memsw, RES_LIMIT) >> 10,
1676 res_counter_read_u64(&memcg->memsw, RES_FAILCNT));
Andrew Mortond0451972013-02-22 16:32:06 -08001677 pr_info("kmem: usage %llukB, limit %llukB, failcnt %llu\n",
Glauber Costa510fc4e2012-12-18 14:21:47 -08001678 res_counter_read_u64(&memcg->kmem, RES_USAGE) >> 10,
1679 res_counter_read_u64(&memcg->kmem, RES_LIMIT) >> 10,
1680 res_counter_read_u64(&memcg->kmem, RES_FAILCNT));
Sha Zhengju58cf1882013-02-22 16:32:05 -08001681
1682 for_each_mem_cgroup_tree(iter, memcg) {
Tejun Heoe61734c2014-02-12 09:29:50 -05001683 pr_info("Memory cgroup stats for ");
1684 pr_cont_cgroup_path(iter->css.cgroup);
Sha Zhengju58cf1882013-02-22 16:32:05 -08001685 pr_cont(":");
1686
1687 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
1688 if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
1689 continue;
1690 pr_cont(" %s:%ldKB", mem_cgroup_stat_names[i],
1691 K(mem_cgroup_read_stat(iter, i)));
1692 }
1693
1694 for (i = 0; i < NR_LRU_LISTS; i++)
1695 pr_cont(" %s:%luKB", mem_cgroup_lru_names[i],
1696 K(mem_cgroup_nr_lru_pages(iter, BIT(i))));
1697
1698 pr_cont("\n");
1699 }
Michal Hocko08088cb2014-02-25 15:01:44 -08001700 mutex_unlock(&oom_info_lock);
Balbir Singhe2224322009-04-02 16:57:39 -07001701}
1702
KAMEZAWA Hiroyuki81d39c22009-04-02 16:57:36 -07001703/*
1704 * This function returns the number of memcg under hierarchy tree. Returns
1705 * 1(self count) if no children.
1706 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001707static int mem_cgroup_count_children(struct mem_cgroup *memcg)
KAMEZAWA Hiroyuki81d39c22009-04-02 16:57:36 -07001708{
1709 int num = 0;
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -07001710 struct mem_cgroup *iter;
1711
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001712 for_each_mem_cgroup_tree(iter, memcg)
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -07001713 num++;
KAMEZAWA Hiroyuki81d39c22009-04-02 16:57:36 -07001714 return num;
1715}
1716
Balbir Singh6d61ef42009-01-07 18:08:06 -08001717/*
David Rientjesa63d83f2010-08-09 17:19:46 -07001718 * Return the memory (and swap, if configured) limit for a memcg.
1719 */
David Rientjes9cbb78b2012-07-31 16:43:44 -07001720static u64 mem_cgroup_get_limit(struct mem_cgroup *memcg)
David Rientjesa63d83f2010-08-09 17:19:46 -07001721{
1722 u64 limit;
David Rientjesa63d83f2010-08-09 17:19:46 -07001723
Johannes Weinerf3e8eb72011-01-13 15:47:39 -08001724 limit = res_counter_read_u64(&memcg->res, RES_LIMIT);
Johannes Weinerf3e8eb72011-01-13 15:47:39 -08001725
David Rientjesa63d83f2010-08-09 17:19:46 -07001726 /*
Michal Hocko9a5a8f12012-11-16 14:14:49 -08001727 * Do not consider swap space if we cannot swap due to swappiness
David Rientjesa63d83f2010-08-09 17:19:46 -07001728 */
Michal Hocko9a5a8f12012-11-16 14:14:49 -08001729 if (mem_cgroup_swappiness(memcg)) {
1730 u64 memsw;
1731
1732 limit += total_swap_pages << PAGE_SHIFT;
1733 memsw = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
1734
1735 /*
1736 * If memsw is finite and limits the amount of swap space
1737 * available to this memcg, return that limit.
1738 */
1739 limit = min(limit, memsw);
1740 }
1741
1742 return limit;
David Rientjesa63d83f2010-08-09 17:19:46 -07001743}
1744
David Rientjes19965462012-12-11 16:00:26 -08001745static void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
1746 int order)
David Rientjes9cbb78b2012-07-31 16:43:44 -07001747{
1748 struct mem_cgroup *iter;
1749 unsigned long chosen_points = 0;
1750 unsigned long totalpages;
1751 unsigned int points = 0;
1752 struct task_struct *chosen = NULL;
1753
David Rientjes876aafb2012-07-31 16:43:48 -07001754 /*
David Rientjes465adcf2013-04-29 15:08:45 -07001755 * If current has a pending SIGKILL or is exiting, then automatically
1756 * select it. The goal is to allow it to allocate so that it may
1757 * quickly exit and free its memory.
David Rientjes876aafb2012-07-31 16:43:48 -07001758 */
David Rientjes465adcf2013-04-29 15:08:45 -07001759 if (fatal_signal_pending(current) || current->flags & PF_EXITING) {
David Rientjes876aafb2012-07-31 16:43:48 -07001760 set_thread_flag(TIF_MEMDIE);
1761 return;
1762 }
1763
1764 check_panic_on_oom(CONSTRAINT_MEMCG, gfp_mask, order, NULL);
David Rientjes9cbb78b2012-07-31 16:43:44 -07001765 totalpages = mem_cgroup_get_limit(memcg) >> PAGE_SHIFT ? : 1;
1766 for_each_mem_cgroup_tree(iter, memcg) {
Tejun Heo72ec7022013-08-08 20:11:26 -04001767 struct css_task_iter it;
David Rientjes9cbb78b2012-07-31 16:43:44 -07001768 struct task_struct *task;
1769
Tejun Heo72ec7022013-08-08 20:11:26 -04001770 css_task_iter_start(&iter->css, &it);
1771 while ((task = css_task_iter_next(&it))) {
David Rientjes9cbb78b2012-07-31 16:43:44 -07001772 switch (oom_scan_process_thread(task, totalpages, NULL,
1773 false)) {
1774 case OOM_SCAN_SELECT:
1775 if (chosen)
1776 put_task_struct(chosen);
1777 chosen = task;
1778 chosen_points = ULONG_MAX;
1779 get_task_struct(chosen);
1780 /* fall through */
1781 case OOM_SCAN_CONTINUE:
1782 continue;
1783 case OOM_SCAN_ABORT:
Tejun Heo72ec7022013-08-08 20:11:26 -04001784 css_task_iter_end(&it);
David Rientjes9cbb78b2012-07-31 16:43:44 -07001785 mem_cgroup_iter_break(memcg, iter);
1786 if (chosen)
1787 put_task_struct(chosen);
1788 return;
1789 case OOM_SCAN_OK:
1790 break;
1791 };
1792 points = oom_badness(task, memcg, NULL, totalpages);
David Rientjesd49ad932014-01-23 15:53:34 -08001793 if (!points || points < chosen_points)
1794 continue;
1795 /* Prefer thread group leaders for display purposes */
1796 if (points == chosen_points &&
1797 thread_group_leader(chosen))
1798 continue;
1799
1800 if (chosen)
1801 put_task_struct(chosen);
1802 chosen = task;
1803 chosen_points = points;
1804 get_task_struct(chosen);
David Rientjes9cbb78b2012-07-31 16:43:44 -07001805 }
Tejun Heo72ec7022013-08-08 20:11:26 -04001806 css_task_iter_end(&it);
David Rientjes9cbb78b2012-07-31 16:43:44 -07001807 }
1808
1809 if (!chosen)
1810 return;
1811 points = chosen_points * 1000 / totalpages;
David Rientjes9cbb78b2012-07-31 16:43:44 -07001812 oom_kill_process(chosen, gfp_mask, order, points, totalpages, memcg,
1813 NULL, "Memory cgroup out of memory");
David Rientjes9cbb78b2012-07-31 16:43:44 -07001814}
1815
Johannes Weiner56600482012-01-12 17:17:59 -08001816static unsigned long mem_cgroup_reclaim(struct mem_cgroup *memcg,
1817 gfp_t gfp_mask,
1818 unsigned long flags)
1819{
1820 unsigned long total = 0;
1821 bool noswap = false;
1822 int loop;
1823
1824 if (flags & MEM_CGROUP_RECLAIM_NOSWAP)
1825 noswap = true;
1826 if (!(flags & MEM_CGROUP_RECLAIM_SHRINK) && memcg->memsw_is_minimum)
1827 noswap = true;
1828
1829 for (loop = 0; loop < MEM_CGROUP_MAX_RECLAIM_LOOPS; loop++) {
1830 if (loop)
1831 drain_all_stock_async(memcg);
1832 total += try_to_free_mem_cgroup_pages(memcg, gfp_mask, noswap);
1833 /*
1834 * Allow limit shrinkers, which are triggered directly
1835 * by userspace, to catch signals and stop reclaim
1836 * after minimal progress, regardless of the margin.
1837 */
1838 if (total && (flags & MEM_CGROUP_RECLAIM_SHRINK))
1839 break;
1840 if (mem_cgroup_margin(memcg))
1841 break;
1842 /*
1843 * If nothing was reclaimed after two attempts, there
1844 * may be no reclaimable pages in this hierarchy.
1845 */
1846 if (loop && !total)
1847 break;
1848 }
1849 return total;
1850}
1851
KAMEZAWA Hiroyuki4d0c0662011-07-08 15:39:42 -07001852/**
1853 * test_mem_cgroup_node_reclaimable
Wanpeng Lidad75572012-06-20 12:53:01 -07001854 * @memcg: the target memcg
KAMEZAWA Hiroyuki4d0c0662011-07-08 15:39:42 -07001855 * @nid: the node ID to be checked.
1856 * @noswap : specify true here if the user wants flle only information.
1857 *
1858 * This function returns whether the specified memcg contains any
1859 * reclaimable pages on a node. Returns true if there are any reclaimable
1860 * pages in the node.
1861 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001862static bool test_mem_cgroup_node_reclaimable(struct mem_cgroup *memcg,
KAMEZAWA Hiroyuki4d0c0662011-07-08 15:39:42 -07001863 int nid, bool noswap)
1864{
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001865 if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_FILE))
KAMEZAWA Hiroyuki4d0c0662011-07-08 15:39:42 -07001866 return true;
1867 if (noswap || !total_swap_pages)
1868 return false;
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001869 if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_ANON))
KAMEZAWA Hiroyuki4d0c0662011-07-08 15:39:42 -07001870 return true;
1871 return false;
1872
1873}
Andrew Mortonbb4cc1a82013-09-24 15:27:40 -07001874#if MAX_NUMNODES > 1
Ying Han889976d2011-05-26 16:25:33 -07001875
1876/*
1877 * Always updating the nodemask is not very good - even if we have an empty
1878 * list or the wrong list here, we can start from some node and traverse all
1879 * nodes based on the zonelist. So update the list loosely once per 10 secs.
1880 *
1881 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001882static void mem_cgroup_may_update_nodemask(struct mem_cgroup *memcg)
Ying Han889976d2011-05-26 16:25:33 -07001883{
1884 int nid;
KAMEZAWA Hiroyuki453a9bf32011-07-08 15:39:43 -07001885 /*
1886 * numainfo_events > 0 means there was at least NUMAINFO_EVENTS_TARGET
1887 * pagein/pageout changes since the last update.
1888 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001889 if (!atomic_read(&memcg->numainfo_events))
KAMEZAWA Hiroyuki453a9bf32011-07-08 15:39:43 -07001890 return;
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001891 if (atomic_inc_return(&memcg->numainfo_updating) > 1)
Ying Han889976d2011-05-26 16:25:33 -07001892 return;
1893
Ying Han889976d2011-05-26 16:25:33 -07001894 /* make a nodemask where this memcg uses memory from */
Lai Jiangshan31aaea42012-12-12 13:51:27 -08001895 memcg->scan_nodes = node_states[N_MEMORY];
Ying Han889976d2011-05-26 16:25:33 -07001896
Lai Jiangshan31aaea42012-12-12 13:51:27 -08001897 for_each_node_mask(nid, node_states[N_MEMORY]) {
Ying Han889976d2011-05-26 16:25:33 -07001898
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001899 if (!test_mem_cgroup_node_reclaimable(memcg, nid, false))
1900 node_clear(nid, memcg->scan_nodes);
Ying Han889976d2011-05-26 16:25:33 -07001901 }
KAMEZAWA Hiroyuki453a9bf32011-07-08 15:39:43 -07001902
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001903 atomic_set(&memcg->numainfo_events, 0);
1904 atomic_set(&memcg->numainfo_updating, 0);
Ying Han889976d2011-05-26 16:25:33 -07001905}
1906
1907/*
1908 * Selecting a node where we start reclaim from. Because what we need is just
1909 * reducing usage counter, start from anywhere is O,K. Considering
1910 * memory reclaim from current node, there are pros. and cons.
1911 *
1912 * Freeing memory from current node means freeing memory from a node which
1913 * we'll use or we've used. So, it may make LRU bad. And if several threads
1914 * hit limits, it will see a contention on a node. But freeing from remote
1915 * node means more costs for memory reclaim because of memory latency.
1916 *
1917 * Now, we use round-robin. Better algorithm is welcomed.
1918 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001919int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
Ying Han889976d2011-05-26 16:25:33 -07001920{
1921 int node;
1922
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001923 mem_cgroup_may_update_nodemask(memcg);
1924 node = memcg->last_scanned_node;
Ying Han889976d2011-05-26 16:25:33 -07001925
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001926 node = next_node(node, memcg->scan_nodes);
Ying Han889976d2011-05-26 16:25:33 -07001927 if (node == MAX_NUMNODES)
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001928 node = first_node(memcg->scan_nodes);
Ying Han889976d2011-05-26 16:25:33 -07001929 /*
1930 * We call this when we hit limit, not when pages are added to LRU.
1931 * No LRU may hold pages because all pages are UNEVICTABLE or
1932 * memcg is too small and all pages are not on LRU. In that case,
1933 * we use curret node.
1934 */
1935 if (unlikely(node == MAX_NUMNODES))
1936 node = numa_node_id();
1937
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001938 memcg->last_scanned_node = node;
Ying Han889976d2011-05-26 16:25:33 -07001939 return node;
1940}
1941
Andrew Mortonbb4cc1a82013-09-24 15:27:40 -07001942/*
1943 * Check all nodes whether it contains reclaimable pages or not.
1944 * For quick scan, we make use of scan_nodes. This will allow us to skip
1945 * unused nodes. But scan_nodes is lazily updated and may not cotain
1946 * enough new information. We need to do double check.
1947 */
1948static bool mem_cgroup_reclaimable(struct mem_cgroup *memcg, bool noswap)
1949{
1950 int nid;
1951
1952 /*
1953 * quick check...making use of scan_node.
1954 * We can skip unused nodes.
1955 */
1956 if (!nodes_empty(memcg->scan_nodes)) {
1957 for (nid = first_node(memcg->scan_nodes);
1958 nid < MAX_NUMNODES;
1959 nid = next_node(nid, memcg->scan_nodes)) {
1960
1961 if (test_mem_cgroup_node_reclaimable(memcg, nid, noswap))
1962 return true;
1963 }
1964 }
1965 /*
1966 * Check rest of nodes.
1967 */
1968 for_each_node_state(nid, N_MEMORY) {
1969 if (node_isset(nid, memcg->scan_nodes))
1970 continue;
1971 if (test_mem_cgroup_node_reclaimable(memcg, nid, noswap))
1972 return true;
1973 }
1974 return false;
1975}
1976
Ying Han889976d2011-05-26 16:25:33 -07001977#else
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001978int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
Ying Han889976d2011-05-26 16:25:33 -07001979{
1980 return 0;
1981}
KAMEZAWA Hiroyuki4d0c0662011-07-08 15:39:42 -07001982
Andrew Mortonbb4cc1a82013-09-24 15:27:40 -07001983static bool mem_cgroup_reclaimable(struct mem_cgroup *memcg, bool noswap)
1984{
1985 return test_mem_cgroup_node_reclaimable(memcg, 0, noswap);
1986}
Ying Han889976d2011-05-26 16:25:33 -07001987#endif
1988
Andrew Morton0608f432013-09-24 15:27:41 -07001989static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg,
1990 struct zone *zone,
1991 gfp_t gfp_mask,
1992 unsigned long *total_scanned)
Balbir Singh6d61ef42009-01-07 18:08:06 -08001993{
Andrew Morton0608f432013-09-24 15:27:41 -07001994 struct mem_cgroup *victim = NULL;
1995 int total = 0;
1996 int loop = 0;
1997 unsigned long excess;
1998 unsigned long nr_scanned;
1999 struct mem_cgroup_reclaim_cookie reclaim = {
2000 .zone = zone,
2001 .priority = 0,
2002 };
Johannes Weiner9d11ea92011-03-23 16:42:21 -07002003
Andrew Morton0608f432013-09-24 15:27:41 -07002004 excess = res_counter_soft_limit_excess(&root_memcg->res) >> PAGE_SHIFT;
Balbir Singh6d61ef42009-01-07 18:08:06 -08002005
Andrew Morton0608f432013-09-24 15:27:41 -07002006 while (1) {
2007 victim = mem_cgroup_iter(root_memcg, victim, &reclaim);
2008 if (!victim) {
2009 loop++;
2010 if (loop >= 2) {
2011 /*
2012 * If we have not been able to reclaim
2013 * anything, it might because there are
2014 * no reclaimable pages under this hierarchy
2015 */
2016 if (!total)
2017 break;
2018 /*
2019 * We want to do more targeted reclaim.
2020 * excess >> 2 is not to excessive so as to
2021 * reclaim too much, nor too less that we keep
2022 * coming back to reclaim from this cgroup
2023 */
2024 if (total >= (excess >> 2) ||
2025 (loop > MEM_CGROUP_MAX_RECLAIM_LOOPS))
2026 break;
2027 }
2028 continue;
2029 }
2030 if (!mem_cgroup_reclaimable(victim, false))
2031 continue;
2032 total += mem_cgroup_shrink_node_zone(victim, gfp_mask, false,
2033 zone, &nr_scanned);
2034 *total_scanned += nr_scanned;
2035 if (!res_counter_soft_limit_excess(&root_memcg->res))
2036 break;
Balbir Singh6d61ef42009-01-07 18:08:06 -08002037 }
Andrew Morton0608f432013-09-24 15:27:41 -07002038 mem_cgroup_iter_break(root_memcg, victim);
2039 return total;
Balbir Singh6d61ef42009-01-07 18:08:06 -08002040}
2041
Johannes Weiner0056f4e2013-10-31 16:34:14 -07002042#ifdef CONFIG_LOCKDEP
2043static struct lockdep_map memcg_oom_lock_dep_map = {
2044 .name = "memcg_oom_lock",
2045};
2046#endif
2047
Johannes Weinerfb2a6fc2013-09-12 15:13:43 -07002048static DEFINE_SPINLOCK(memcg_oom_lock);
2049
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08002050/*
2051 * Check OOM-Killer is already running under our hierarchy.
2052 * If someone is running, return false.
2053 */
Johannes Weinerfb2a6fc2013-09-12 15:13:43 -07002054static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg)
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08002055{
Michal Hocko79dfdac2011-07-26 16:08:23 -07002056 struct mem_cgroup *iter, *failed = NULL;
KAMEZAWA Hiroyukia636b322009-01-07 18:08:08 -08002057
Johannes Weinerfb2a6fc2013-09-12 15:13:43 -07002058 spin_lock(&memcg_oom_lock);
2059
Johannes Weiner9f3a0d02012-01-12 17:17:48 -08002060 for_each_mem_cgroup_tree(iter, memcg) {
Johannes Weiner23751be2011-08-25 15:59:16 -07002061 if (iter->oom_lock) {
Michal Hocko79dfdac2011-07-26 16:08:23 -07002062 /*
2063 * this subtree of our hierarchy is already locked
2064 * so we cannot give a lock.
2065 */
Michal Hocko79dfdac2011-07-26 16:08:23 -07002066 failed = iter;
Johannes Weiner9f3a0d02012-01-12 17:17:48 -08002067 mem_cgroup_iter_break(memcg, iter);
2068 break;
Johannes Weiner23751be2011-08-25 15:59:16 -07002069 } else
2070 iter->oom_lock = true;
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -07002071 }
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08002072
Johannes Weinerfb2a6fc2013-09-12 15:13:43 -07002073 if (failed) {
2074 /*
2075 * OK, we failed to lock the whole subtree so we have
2076 * to clean up what we set up to the failing subtree
2077 */
2078 for_each_mem_cgroup_tree(iter, memcg) {
2079 if (iter == failed) {
2080 mem_cgroup_iter_break(memcg, iter);
2081 break;
2082 }
2083 iter->oom_lock = false;
Michal Hocko79dfdac2011-07-26 16:08:23 -07002084 }
Johannes Weiner0056f4e2013-10-31 16:34:14 -07002085 } else
2086 mutex_acquire(&memcg_oom_lock_dep_map, 0, 1, _RET_IP_);
Johannes Weinerfb2a6fc2013-09-12 15:13:43 -07002087
2088 spin_unlock(&memcg_oom_lock);
2089
2090 return !failed;
KAMEZAWA Hiroyukia636b322009-01-07 18:08:08 -08002091}
KAMEZAWA Hiroyuki0b7f5692009-04-02 16:57:38 -07002092
Johannes Weinerfb2a6fc2013-09-12 15:13:43 -07002093static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg)
KAMEZAWA Hiroyuki0b7f5692009-04-02 16:57:38 -07002094{
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -07002095 struct mem_cgroup *iter;
2096
Johannes Weinerfb2a6fc2013-09-12 15:13:43 -07002097 spin_lock(&memcg_oom_lock);
Johannes Weiner0056f4e2013-10-31 16:34:14 -07002098 mutex_release(&memcg_oom_lock_dep_map, 1, _RET_IP_);
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002099 for_each_mem_cgroup_tree(iter, memcg)
Michal Hocko79dfdac2011-07-26 16:08:23 -07002100 iter->oom_lock = false;
Johannes Weinerfb2a6fc2013-09-12 15:13:43 -07002101 spin_unlock(&memcg_oom_lock);
Michal Hocko79dfdac2011-07-26 16:08:23 -07002102}
2103
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002104static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg)
Michal Hocko79dfdac2011-07-26 16:08:23 -07002105{
2106 struct mem_cgroup *iter;
2107
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002108 for_each_mem_cgroup_tree(iter, memcg)
Michal Hocko79dfdac2011-07-26 16:08:23 -07002109 atomic_inc(&iter->under_oom);
2110}
2111
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002112static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg)
Michal Hocko79dfdac2011-07-26 16:08:23 -07002113{
2114 struct mem_cgroup *iter;
2115
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08002116 /*
2117 * When a new child is created while the hierarchy is under oom,
2118 * mem_cgroup_oom_lock() may not be called. We have to use
2119 * atomic_add_unless() here.
2120 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002121 for_each_mem_cgroup_tree(iter, memcg)
Michal Hocko79dfdac2011-07-26 16:08:23 -07002122 atomic_add_unless(&iter->under_oom, -1, 0);
KAMEZAWA Hiroyuki0b7f5692009-04-02 16:57:38 -07002123}
2124
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08002125static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);
2126
KAMEZAWA Hiroyukidc98df52010-05-26 14:42:36 -07002127struct oom_wait_info {
Hugh Dickinsd79154b2012-03-21 16:34:18 -07002128 struct mem_cgroup *memcg;
KAMEZAWA Hiroyukidc98df52010-05-26 14:42:36 -07002129 wait_queue_t wait;
2130};
2131
2132static int memcg_oom_wake_function(wait_queue_t *wait,
2133 unsigned mode, int sync, void *arg)
2134{
Hugh Dickinsd79154b2012-03-21 16:34:18 -07002135 struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg;
2136 struct mem_cgroup *oom_wait_memcg;
KAMEZAWA Hiroyukidc98df52010-05-26 14:42:36 -07002137 struct oom_wait_info *oom_wait_info;
2138
2139 oom_wait_info = container_of(wait, struct oom_wait_info, wait);
Hugh Dickinsd79154b2012-03-21 16:34:18 -07002140 oom_wait_memcg = oom_wait_info->memcg;
KAMEZAWA Hiroyukidc98df52010-05-26 14:42:36 -07002141
KAMEZAWA Hiroyukidc98df52010-05-26 14:42:36 -07002142 /*
Hugh Dickinsd79154b2012-03-21 16:34:18 -07002143 * Both of oom_wait_info->memcg and wake_memcg are stable under us.
KAMEZAWA Hiroyukidc98df52010-05-26 14:42:36 -07002144 * Then we can use css_is_ancestor without taking care of RCU.
2145 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002146 if (!mem_cgroup_same_or_subtree(oom_wait_memcg, wake_memcg)
2147 && !mem_cgroup_same_or_subtree(wake_memcg, oom_wait_memcg))
KAMEZAWA Hiroyukidc98df52010-05-26 14:42:36 -07002148 return 0;
KAMEZAWA Hiroyukidc98df52010-05-26 14:42:36 -07002149 return autoremove_wake_function(wait, mode, sync, arg);
2150}
2151
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002152static void memcg_wakeup_oom(struct mem_cgroup *memcg)
KAMEZAWA Hiroyukidc98df52010-05-26 14:42:36 -07002153{
Johannes Weiner3812c8c2013-09-12 15:13:44 -07002154 atomic_inc(&memcg->oom_wakeups);
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002155 /* for filtering, pass "memcg" as argument. */
2156 __wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg);
KAMEZAWA Hiroyukidc98df52010-05-26 14:42:36 -07002157}
2158
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002159static void memcg_oom_recover(struct mem_cgroup *memcg)
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07002160{
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002161 if (memcg && atomic_read(&memcg->under_oom))
2162 memcg_wakeup_oom(memcg);
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07002163}
2164
Johannes Weiner3812c8c2013-09-12 15:13:44 -07002165static void mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08002166{
Johannes Weiner3812c8c2013-09-12 15:13:44 -07002167 if (!current->memcg_oom.may_oom)
2168 return;
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08002169 /*
Johannes Weiner49426422013-10-16 13:46:59 -07002170 * We are in the middle of the charge context here, so we
2171 * don't want to block when potentially sitting on a callstack
2172 * that holds all kinds of filesystem and mm locks.
2173 *
2174 * Also, the caller may handle a failed allocation gracefully
2175 * (like optional page cache readahead) and so an OOM killer
2176 * invocation might not even be necessary.
2177 *
2178 * That's why we don't do anything here except remember the
2179 * OOM context and then deal with it at the end of the page
2180 * fault when the stack is unwound, the locks are released,
2181 * and when we know whether the fault was overall successful.
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08002182 */
Johannes Weiner49426422013-10-16 13:46:59 -07002183 css_get(&memcg->css);
2184 current->memcg_oom.memcg = memcg;
2185 current->memcg_oom.gfp_mask = mask;
2186 current->memcg_oom.order = order;
2187}
2188
2189/**
2190 * mem_cgroup_oom_synchronize - complete memcg OOM handling
2191 * @handle: actually kill/wait or just clean up the OOM state
2192 *
2193 * This has to be called at the end of a page fault if the memcg OOM
2194 * handler was enabled.
2195 *
2196 * Memcg supports userspace OOM handling where failed allocations must
2197 * sleep on a waitqueue until the userspace task resolves the
2198 * situation. Sleeping directly in the charge context with all kinds
2199 * of locks held is not a good idea, instead we remember an OOM state
2200 * in the task and mem_cgroup_oom_synchronize() has to be called at
2201 * the end of the page fault to complete the OOM handling.
2202 *
2203 * Returns %true if an ongoing memcg OOM situation was detected and
2204 * completed, %false otherwise.
2205 */
2206bool mem_cgroup_oom_synchronize(bool handle)
2207{
2208 struct mem_cgroup *memcg = current->memcg_oom.memcg;
2209 struct oom_wait_info owait;
2210 bool locked;
2211
2212 /* OOM is global, do not handle */
2213 if (!memcg)
2214 return false;
2215
2216 if (!handle)
2217 goto cleanup;
2218
2219 owait.memcg = memcg;
2220 owait.wait.flags = 0;
2221 owait.wait.func = memcg_oom_wake_function;
2222 owait.wait.private = current;
2223 INIT_LIST_HEAD(&owait.wait.task_list);
2224
2225 prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
Johannes Weinerfb2a6fc2013-09-12 15:13:43 -07002226 mem_cgroup_mark_under_oom(memcg);
2227
2228 locked = mem_cgroup_oom_trylock(memcg);
2229
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07002230 if (locked)
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002231 mem_cgroup_oom_notify(memcg);
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08002232
Johannes Weinerfb2a6fc2013-09-12 15:13:43 -07002233 if (locked && !memcg->oom_kill_disable) {
2234 mem_cgroup_unmark_under_oom(memcg);
Johannes Weiner49426422013-10-16 13:46:59 -07002235 finish_wait(&memcg_oom_waitq, &owait.wait);
2236 mem_cgroup_out_of_memory(memcg, current->memcg_oom.gfp_mask,
2237 current->memcg_oom.order);
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07002238 } else {
Johannes Weiner3812c8c2013-09-12 15:13:44 -07002239 schedule();
Johannes Weiner49426422013-10-16 13:46:59 -07002240 mem_cgroup_unmark_under_oom(memcg);
2241 finish_wait(&memcg_oom_waitq, &owait.wait);
2242 }
2243
2244 if (locked) {
Johannes Weinerfb2a6fc2013-09-12 15:13:43 -07002245 mem_cgroup_oom_unlock(memcg);
2246 /*
2247 * There is no guarantee that an OOM-lock contender
2248 * sees the wakeups triggered by the OOM kill
2249 * uncharges. Wake any sleepers explicitely.
2250 */
2251 memcg_oom_recover(memcg);
2252 }
Johannes Weiner49426422013-10-16 13:46:59 -07002253cleanup:
2254 current->memcg_oom.memcg = NULL;
Johannes Weiner3812c8c2013-09-12 15:13:44 -07002255 css_put(&memcg->css);
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08002256 return true;
KAMEZAWA Hiroyuki0b7f5692009-04-02 16:57:38 -07002257}
2258
Balbir Singhd69b0422009-06-17 16:26:34 -07002259/*
Qiang Huangb5ffc852014-06-04 16:08:22 -07002260 * Used to update mapped file or writeback or other statistics.
KAMEZAWA Hiroyuki32047e22010-10-27 15:33:40 -07002261 *
2262 * Notes: Race condition
2263 *
Qiang Huangb5ffc852014-06-04 16:08:22 -07002264 * We usually use lock_page_cgroup() for accessing page_cgroup member but
KAMEZAWA Hiroyuki32047e22010-10-27 15:33:40 -07002265 * it tends to be costly. But considering some conditions, we doesn't need
2266 * to do so _always_.
2267 *
2268 * Considering "charge", lock_page_cgroup() is not required because all
2269 * file-stat operations happen after a page is attached to radix-tree. There
2270 * are no race with "charge".
2271 *
2272 * Considering "uncharge", we know that memcg doesn't clear pc->mem_cgroup
2273 * at "uncharge" intentionally. So, we always see valid pc->mem_cgroup even
2274 * if there are race with "uncharge". Statistics itself is properly handled
2275 * by flags.
2276 *
2277 * Considering "move", this is an only case we see a race. To make the race
Qiang Huangb5ffc852014-06-04 16:08:22 -07002278 * small, we check memcg->moving_account and detect there are possibility
2279 * of race or not. If there is, we take a lock.
Balbir Singhd69b0422009-06-17 16:26:34 -07002280 */
KAMEZAWA Hiroyuki26174ef2010-10-27 15:33:43 -07002281
KAMEZAWA Hiroyuki89c06bd2012-03-21 16:34:25 -07002282void __mem_cgroup_begin_update_page_stat(struct page *page,
2283 bool *locked, unsigned long *flags)
2284{
2285 struct mem_cgroup *memcg;
2286 struct page_cgroup *pc;
2287
2288 pc = lookup_page_cgroup(page);
2289again:
2290 memcg = pc->mem_cgroup;
2291 if (unlikely(!memcg || !PageCgroupUsed(pc)))
2292 return;
2293 /*
2294 * If this memory cgroup is not under account moving, we don't
Wanpeng Lida92c472012-07-31 16:43:26 -07002295 * need to take move_lock_mem_cgroup(). Because we already hold
KAMEZAWA Hiroyuki89c06bd2012-03-21 16:34:25 -07002296 * rcu_read_lock(), any calls to move_account will be delayed until
Qiang Huangbdcbb652014-06-04 16:08:21 -07002297 * rcu_read_unlock().
KAMEZAWA Hiroyuki89c06bd2012-03-21 16:34:25 -07002298 */
Qiang Huangbdcbb652014-06-04 16:08:21 -07002299 VM_BUG_ON(!rcu_read_lock_held());
2300 if (atomic_read(&memcg->moving_account) <= 0)
KAMEZAWA Hiroyuki89c06bd2012-03-21 16:34:25 -07002301 return;
2302
2303 move_lock_mem_cgroup(memcg, flags);
2304 if (memcg != pc->mem_cgroup || !PageCgroupUsed(pc)) {
2305 move_unlock_mem_cgroup(memcg, flags);
2306 goto again;
2307 }
2308 *locked = true;
2309}
2310
2311void __mem_cgroup_end_update_page_stat(struct page *page, unsigned long *flags)
2312{
2313 struct page_cgroup *pc = lookup_page_cgroup(page);
2314
2315 /*
2316 * It's guaranteed that pc->mem_cgroup never changes while
2317 * lock is held because a routine modifies pc->mem_cgroup
Wanpeng Lida92c472012-07-31 16:43:26 -07002318 * should take move_lock_mem_cgroup().
KAMEZAWA Hiroyuki89c06bd2012-03-21 16:34:25 -07002319 */
2320 move_unlock_mem_cgroup(pc->mem_cgroup, flags);
2321}
2322
Greg Thelen2a7106f2011-01-13 15:47:37 -08002323void mem_cgroup_update_page_stat(struct page *page,
Sha Zhengju68b48762013-09-12 15:13:50 -07002324 enum mem_cgroup_stat_index idx, int val)
Balbir Singhd69b0422009-06-17 16:26:34 -07002325{
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002326 struct mem_cgroup *memcg;
KAMEZAWA Hiroyuki32047e22010-10-27 15:33:40 -07002327 struct page_cgroup *pc = lookup_page_cgroup(page);
KAMEZAWA Hiroyukidbd4ea72011-01-13 15:47:38 -08002328 unsigned long uninitialized_var(flags);
Balbir Singhd69b0422009-06-17 16:26:34 -07002329
Johannes Weinercfa44942012-01-12 17:18:38 -08002330 if (mem_cgroup_disabled())
Balbir Singhd69b0422009-06-17 16:26:34 -07002331 return;
KAMEZAWA Hiroyuki89c06bd2012-03-21 16:34:25 -07002332
Sha Zhengju658b72c2013-09-12 15:13:52 -07002333 VM_BUG_ON(!rcu_read_lock_held());
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002334 memcg = pc->mem_cgroup;
2335 if (unlikely(!memcg || !PageCgroupUsed(pc)))
KAMEZAWA Hiroyuki89c06bd2012-03-21 16:34:25 -07002336 return;
KAMEZAWA Hiroyuki26174ef2010-10-27 15:33:43 -07002337
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002338 this_cpu_add(memcg->stat->count[idx], val);
Balbir Singhd69b0422009-06-17 16:26:34 -07002339}
KAMEZAWA Hiroyuki26174ef2010-10-27 15:33:43 -07002340
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08002341/*
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002342 * size of first charge trial. "32" comes from vmscan.c's magic value.
2343 * TODO: maybe necessary to use big numbers in big irons.
2344 */
Johannes Weiner7ec99d62011-03-23 16:42:36 -07002345#define CHARGE_BATCH 32U
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002346struct memcg_stock_pcp {
2347 struct mem_cgroup *cached; /* this never be root cgroup */
Johannes Weiner11c9ea42011-03-23 16:42:34 -07002348 unsigned int nr_pages;
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002349 struct work_struct work;
KAMEZAWA Hiroyuki26fe6162011-06-15 15:08:45 -07002350 unsigned long flags;
Kirill A. Shutemova0db00f2012-05-29 15:06:56 -07002351#define FLUSHING_CACHED_CHARGE 0
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002352};
2353static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
Michal Hocko9f50fad2011-08-09 11:56:26 +02002354static DEFINE_MUTEX(percpu_charge_mutex);
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002355
Suleiman Souhlala0956d52012-12-18 14:21:36 -08002356/**
2357 * consume_stock: Try to consume stocked charge on this cpu.
2358 * @memcg: memcg to consume from.
2359 * @nr_pages: how many pages to charge.
2360 *
2361 * The charges will only happen if @memcg matches the current cpu's memcg
2362 * stock, and at least @nr_pages are available in that stock. Failure to
2363 * service an allocation will refill the stock.
2364 *
2365 * returns true if successful, false otherwise.
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002366 */
Suleiman Souhlala0956d52012-12-18 14:21:36 -08002367static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002368{
2369 struct memcg_stock_pcp *stock;
2370 bool ret = true;
2371
Suleiman Souhlala0956d52012-12-18 14:21:36 -08002372 if (nr_pages > CHARGE_BATCH)
2373 return false;
2374
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002375 stock = &get_cpu_var(memcg_stock);
Suleiman Souhlala0956d52012-12-18 14:21:36 -08002376 if (memcg == stock->cached && stock->nr_pages >= nr_pages)
2377 stock->nr_pages -= nr_pages;
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002378 else /* need to call res_counter_charge */
2379 ret = false;
2380 put_cpu_var(memcg_stock);
2381 return ret;
2382}
2383
2384/*
2385 * Returns stocks cached in percpu to res_counter and reset cached information.
2386 */
2387static void drain_stock(struct memcg_stock_pcp *stock)
2388{
2389 struct mem_cgroup *old = stock->cached;
2390
Johannes Weiner11c9ea42011-03-23 16:42:34 -07002391 if (stock->nr_pages) {
2392 unsigned long bytes = stock->nr_pages * PAGE_SIZE;
2393
2394 res_counter_uncharge(&old->res, bytes);
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002395 if (do_swap_account)
Johannes Weiner11c9ea42011-03-23 16:42:34 -07002396 res_counter_uncharge(&old->memsw, bytes);
2397 stock->nr_pages = 0;
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002398 }
2399 stock->cached = NULL;
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002400}
2401
2402/*
2403 * This must be called under preempt disabled or must be called by
2404 * a thread which is pinned to local cpu.
2405 */
2406static void drain_local_stock(struct work_struct *dummy)
2407{
Christoph Lameter7c8e0182014-06-04 16:07:56 -07002408 struct memcg_stock_pcp *stock = this_cpu_ptr(&memcg_stock);
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002409 drain_stock(stock);
KAMEZAWA Hiroyuki26fe6162011-06-15 15:08:45 -07002410 clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002411}
2412
Michal Hockoe4777492013-02-22 16:35:40 -08002413static void __init memcg_stock_init(void)
2414{
2415 int cpu;
2416
2417 for_each_possible_cpu(cpu) {
2418 struct memcg_stock_pcp *stock =
2419 &per_cpu(memcg_stock, cpu);
2420 INIT_WORK(&stock->work, drain_local_stock);
2421 }
2422}
2423
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002424/*
2425 * Cache charges(val) which is from res_counter, to local per_cpu area.
Greg Thelen320cc512010-03-15 15:27:28 +01002426 * This will be consumed by consume_stock() function, later.
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002427 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002428static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002429{
2430 struct memcg_stock_pcp *stock = &get_cpu_var(memcg_stock);
2431
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002432 if (stock->cached != memcg) { /* reset if necessary */
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002433 drain_stock(stock);
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002434 stock->cached = memcg;
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002435 }
Johannes Weiner11c9ea42011-03-23 16:42:34 -07002436 stock->nr_pages += nr_pages;
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002437 put_cpu_var(memcg_stock);
2438}
2439
2440/*
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002441 * Drains all per-CPU charge caches for given root_memcg resp. subtree
Michal Hockod38144b2011-07-26 16:08:28 -07002442 * of the hierarchy under it. sync flag says whether we should block
2443 * until the work is done.
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002444 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002445static void drain_all_stock(struct mem_cgroup *root_memcg, bool sync)
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002446{
KAMEZAWA Hiroyuki26fe6162011-06-15 15:08:45 -07002447 int cpu, curcpu;
Michal Hockod38144b2011-07-26 16:08:28 -07002448
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002449 /* Notify other cpus that system-wide "drain" is running */
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002450 get_online_cpus();
Johannes Weiner5af12d02011-08-25 15:59:07 -07002451 curcpu = get_cpu();
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002452 for_each_online_cpu(cpu) {
2453 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002454 struct mem_cgroup *memcg;
KAMEZAWA Hiroyuki26fe6162011-06-15 15:08:45 -07002455
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002456 memcg = stock->cached;
2457 if (!memcg || !stock->nr_pages)
KAMEZAWA Hiroyuki26fe6162011-06-15 15:08:45 -07002458 continue;
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002459 if (!mem_cgroup_same_or_subtree(root_memcg, memcg))
Michal Hocko3e920412011-07-26 16:08:29 -07002460 continue;
Michal Hockod1a05b62011-07-26 16:08:27 -07002461 if (!test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) {
2462 if (cpu == curcpu)
2463 drain_local_stock(&stock->work);
2464 else
2465 schedule_work_on(cpu, &stock->work);
2466 }
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002467 }
Johannes Weiner5af12d02011-08-25 15:59:07 -07002468 put_cpu();
Michal Hockod38144b2011-07-26 16:08:28 -07002469
2470 if (!sync)
2471 goto out;
2472
2473 for_each_online_cpu(cpu) {
2474 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
Michal Hocko9f50fad2011-08-09 11:56:26 +02002475 if (test_bit(FLUSHING_CACHED_CHARGE, &stock->flags))
Michal Hockod38144b2011-07-26 16:08:28 -07002476 flush_work(&stock->work);
2477 }
2478out:
Andrew Mortonf894ffa2013-09-12 15:13:35 -07002479 put_online_cpus();
Michal Hockod38144b2011-07-26 16:08:28 -07002480}
2481
2482/*
2483 * Tries to drain stocked charges in other cpus. This function is asynchronous
2484 * and just put a work per cpu for draining localy on each cpu. Caller can
2485 * expects some charges will be back to res_counter later but cannot wait for
2486 * it.
2487 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002488static void drain_all_stock_async(struct mem_cgroup *root_memcg)
Michal Hockod38144b2011-07-26 16:08:28 -07002489{
Michal Hocko9f50fad2011-08-09 11:56:26 +02002490 /*
2491 * If someone calls draining, avoid adding more kworker runs.
2492 */
2493 if (!mutex_trylock(&percpu_charge_mutex))
2494 return;
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002495 drain_all_stock(root_memcg, false);
Michal Hocko9f50fad2011-08-09 11:56:26 +02002496 mutex_unlock(&percpu_charge_mutex);
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002497}
2498
2499/* This is a synchronous drain interface. */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002500static void drain_all_stock_sync(struct mem_cgroup *root_memcg)
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002501{
2502 /* called when force_empty is called */
Michal Hocko9f50fad2011-08-09 11:56:26 +02002503 mutex_lock(&percpu_charge_mutex);
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002504 drain_all_stock(root_memcg, true);
Michal Hocko9f50fad2011-08-09 11:56:26 +02002505 mutex_unlock(&percpu_charge_mutex);
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002506}
2507
KAMEZAWA Hiroyuki711d3d22010-10-27 15:33:42 -07002508/*
2509 * This function drains percpu counter value from DEAD cpu and
2510 * move it to local cpu. Note that this function can be preempted.
2511 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002512static void mem_cgroup_drain_pcp_counter(struct mem_cgroup *memcg, int cpu)
KAMEZAWA Hiroyuki711d3d22010-10-27 15:33:42 -07002513{
2514 int i;
2515
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002516 spin_lock(&memcg->pcp_counter_lock);
Johannes Weiner61046212012-05-29 15:07:05 -07002517 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002518 long x = per_cpu(memcg->stat->count[i], cpu);
KAMEZAWA Hiroyuki711d3d22010-10-27 15:33:42 -07002519
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002520 per_cpu(memcg->stat->count[i], cpu) = 0;
2521 memcg->nocpu_base.count[i] += x;
KAMEZAWA Hiroyuki711d3d22010-10-27 15:33:42 -07002522 }
Johannes Weinere9f89742011-03-23 16:42:37 -07002523 for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) {
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002524 unsigned long x = per_cpu(memcg->stat->events[i], cpu);
Johannes Weinere9f89742011-03-23 16:42:37 -07002525
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002526 per_cpu(memcg->stat->events[i], cpu) = 0;
2527 memcg->nocpu_base.events[i] += x;
Johannes Weinere9f89742011-03-23 16:42:37 -07002528 }
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002529 spin_unlock(&memcg->pcp_counter_lock);
KAMEZAWA Hiroyuki711d3d22010-10-27 15:33:42 -07002530}
2531
Paul Gortmaker0db06282013-06-19 14:53:51 -04002532static int memcg_cpu_hotplug_callback(struct notifier_block *nb,
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002533 unsigned long action,
2534 void *hcpu)
2535{
2536 int cpu = (unsigned long)hcpu;
2537 struct memcg_stock_pcp *stock;
KAMEZAWA Hiroyuki711d3d22010-10-27 15:33:42 -07002538 struct mem_cgroup *iter;
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002539
KAMEZAWA Hiroyuki619d0942012-03-21 16:34:23 -07002540 if (action == CPU_ONLINE)
KAMEZAWA Hiroyuki1489eba2010-10-27 15:33:42 -07002541 return NOTIFY_OK;
KAMEZAWA Hiroyuki1489eba2010-10-27 15:33:42 -07002542
Kirill A. Shutemovd8330492012-04-12 12:49:11 -07002543 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002544 return NOTIFY_OK;
KAMEZAWA Hiroyuki711d3d22010-10-27 15:33:42 -07002545
Johannes Weiner9f3a0d02012-01-12 17:17:48 -08002546 for_each_mem_cgroup(iter)
KAMEZAWA Hiroyuki711d3d22010-10-27 15:33:42 -07002547 mem_cgroup_drain_pcp_counter(iter, cpu);
2548
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002549 stock = &per_cpu(memcg_stock, cpu);
2550 drain_stock(stock);
2551 return NOTIFY_OK;
2552}
2553
Johannes Weiner6d1fdc42014-04-07 15:37:45 -07002554/**
2555 * mem_cgroup_try_charge - try charging a memcg
2556 * @memcg: memcg to charge
2557 * @nr_pages: number of pages to charge
KAMEZAWA Hiroyuki38c5d722012-01-12 17:19:01 -08002558 *
Johannes Weiner6d1fdc42014-04-07 15:37:45 -07002559 * Returns 0 if @memcg was charged successfully, -EINTR if the charge
2560 * was bypassed to root_mem_cgroup, and -ENOMEM if the charge failed.
Balbir Singh8a9f3cc2008-02-07 00:13:53 -08002561 */
Johannes Weiner6d1fdc42014-04-07 15:37:45 -07002562static int mem_cgroup_try_charge(struct mem_cgroup *memcg,
2563 gfp_t gfp_mask,
Michal Hocko0029e192014-08-06 16:05:53 -07002564 unsigned int nr_pages)
Balbir Singh8a9f3cc2008-02-07 00:13:53 -08002565{
Johannes Weiner7ec99d62011-03-23 16:42:36 -07002566 unsigned int batch = max(CHARGE_BATCH, nr_pages);
Johannes Weiner9b130612014-08-06 16:05:51 -07002567 int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
Johannes Weiner6539cc02014-08-06 16:05:42 -07002568 struct mem_cgroup *mem_over_limit;
2569 struct res_counter *fail_res;
2570 unsigned long nr_reclaimed;
2571 unsigned long flags = 0;
2572 unsigned long long size;
Johannes Weiner05b84302014-08-06 16:05:59 -07002573 int ret = 0;
KAMEZAWA Hiroyukia636b322009-01-07 18:08:08 -08002574
Johannes Weiner6539cc02014-08-06 16:05:42 -07002575retry:
Michal Hockob6b6cc72014-04-07 15:37:44 -07002576 if (consume_stock(memcg, nr_pages))
2577 goto done;
Balbir Singh8a9f3cc2008-02-07 00:13:53 -08002578
Johannes Weiner6539cc02014-08-06 16:05:42 -07002579 size = batch * PAGE_SIZE;
2580 if (!res_counter_charge(&memcg->res, size, &fail_res)) {
2581 if (!do_swap_account)
2582 goto done_restock;
2583 if (!res_counter_charge(&memcg->memsw, size, &fail_res))
2584 goto done_restock;
2585 res_counter_uncharge(&memcg->res, size);
2586 mem_over_limit = mem_cgroup_from_res_counter(fail_res, memsw);
2587 flags |= MEM_CGROUP_RECLAIM_NOSWAP;
2588 } else
2589 mem_over_limit = mem_cgroup_from_res_counter(fail_res, res);
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -08002590
Johannes Weiner6539cc02014-08-06 16:05:42 -07002591 if (batch > nr_pages) {
2592 batch = nr_pages;
2593 goto retry;
2594 }
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002595
Johannes Weiner06b078f2014-08-06 16:05:44 -07002596 /*
2597 * Unlike in global OOM situations, memcg is not in a physical
2598 * memory shortage. Allow dying and OOM-killed tasks to
2599 * bypass the last charges so that they can exit quickly and
2600 * free their memory.
2601 */
2602 if (unlikely(test_thread_flag(TIF_MEMDIE) ||
2603 fatal_signal_pending(current) ||
2604 current->flags & PF_EXITING))
2605 goto bypass;
2606
2607 if (unlikely(task_in_memcg_oom(current)))
2608 goto nomem;
2609
Johannes Weiner6539cc02014-08-06 16:05:42 -07002610 if (!(gfp_mask & __GFP_WAIT))
2611 goto nomem;
KAMEZAWA Hiroyuki4b534332010-08-10 18:02:57 -07002612
Johannes Weiner6539cc02014-08-06 16:05:42 -07002613 nr_reclaimed = mem_cgroup_reclaim(mem_over_limit, gfp_mask, flags);
2614
2615 if (mem_cgroup_margin(mem_over_limit) >= batch)
2616 goto retry;
Johannes Weiner28c34c22014-08-06 16:05:47 -07002617
2618 if (gfp_mask & __GFP_NORETRY)
2619 goto nomem;
Johannes Weiner6539cc02014-08-06 16:05:42 -07002620 /*
2621 * Even though the limit is exceeded at this point, reclaim
2622 * may have been able to free some pages. Retry the charge
2623 * before killing the task.
2624 *
2625 * Only for regular pages, though: huge pages are rather
2626 * unlikely to succeed so close to the limit, and we fall back
2627 * to regular pages anyway in case of failure.
2628 */
2629 if (nr_reclaimed && batch <= (1 << PAGE_ALLOC_COSTLY_ORDER))
2630 goto retry;
2631 /*
2632 * At task move, charge accounts can be doubly counted. So, it's
2633 * better to wait until the end of task_move if something is going on.
2634 */
2635 if (mem_cgroup_wait_acct_move(mem_over_limit))
2636 goto retry;
2637
Johannes Weiner9b130612014-08-06 16:05:51 -07002638 if (nr_retries--)
2639 goto retry;
2640
Johannes Weiner06b078f2014-08-06 16:05:44 -07002641 if (gfp_mask & __GFP_NOFAIL)
2642 goto bypass;
2643
Johannes Weiner6539cc02014-08-06 16:05:42 -07002644 if (fatal_signal_pending(current))
2645 goto bypass;
2646
Johannes Weiner6539cc02014-08-06 16:05:42 -07002647 mem_cgroup_oom(mem_over_limit, gfp_mask, get_order(batch));
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -08002648nomem:
Johannes Weiner6d1fdc42014-04-07 15:37:45 -07002649 if (!(gfp_mask & __GFP_NOFAIL))
Johannes Weiner3168ecb2013-10-31 16:34:13 -07002650 return -ENOMEM;
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08002651bypass:
Johannes Weiner05b84302014-08-06 16:05:59 -07002652 memcg = root_mem_cgroup;
2653 ret = -EINTR;
2654 goto retry;
Johannes Weiner6539cc02014-08-06 16:05:42 -07002655
2656done_restock:
2657 if (batch > nr_pages)
2658 refill_stock(memcg, batch - nr_pages);
2659done:
Johannes Weiner05b84302014-08-06 16:05:59 -07002660 return ret;
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -08002661}
Balbir Singh8a9f3cc2008-02-07 00:13:53 -08002662
Johannes Weiner6d1fdc42014-04-07 15:37:45 -07002663/**
2664 * mem_cgroup_try_charge_mm - try charging a mm
2665 * @mm: mm_struct to charge
2666 * @nr_pages: number of pages to charge
2667 * @oom: trigger OOM if reclaim fails
2668 *
2669 * Returns the charged mem_cgroup associated with the given mm_struct or
2670 * NULL the charge failed.
2671 */
2672static struct mem_cgroup *mem_cgroup_try_charge_mm(struct mm_struct *mm,
2673 gfp_t gfp_mask,
Michal Hocko0029e192014-08-06 16:05:53 -07002674 unsigned int nr_pages)
Johannes Weiner6d1fdc42014-04-07 15:37:45 -07002675
2676{
2677 struct mem_cgroup *memcg;
2678 int ret;
2679
2680 memcg = get_mem_cgroup_from_mm(mm);
Michal Hocko0029e192014-08-06 16:05:53 -07002681 ret = mem_cgroup_try_charge(memcg, gfp_mask, nr_pages);
Johannes Weiner6d1fdc42014-04-07 15:37:45 -07002682 css_put(&memcg->css);
2683 if (ret == -EINTR)
2684 memcg = root_mem_cgroup;
2685 else if (ret)
2686 memcg = NULL;
2687
2688 return memcg;
2689}
2690
KAMEZAWA Hiroyukia3b2d692009-04-02 16:57:45 -07002691/*
Daisuke Nishimuraa3032a22009-12-15 16:47:10 -08002692 * Somemtimes we have to undo a charge we got by try_charge().
2693 * This function is for that and do uncharge, put css's refcnt.
2694 * gotten by try_charge().
2695 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002696static void __mem_cgroup_cancel_charge(struct mem_cgroup *memcg,
Johannes Weinere7018b8d2011-03-23 16:42:33 -07002697 unsigned int nr_pages)
Daisuke Nishimuraa3032a22009-12-15 16:47:10 -08002698{
Johannes Weiner05b84302014-08-06 16:05:59 -07002699 unsigned long bytes = nr_pages * PAGE_SIZE;
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08002700
Johannes Weiner05b84302014-08-06 16:05:59 -07002701 res_counter_uncharge(&memcg->res, bytes);
2702 if (do_swap_account)
2703 res_counter_uncharge(&memcg->memsw, bytes);
Daisuke Nishimuraa3032a22009-12-15 16:47:10 -08002704}
2705
2706/*
KAMEZAWA Hiroyukid01dd172012-05-29 15:07:03 -07002707 * Cancel chrages in this cgroup....doesn't propagate to parent cgroup.
2708 * This is useful when moving usage to parent cgroup.
2709 */
2710static void __mem_cgroup_cancel_local_charge(struct mem_cgroup *memcg,
2711 unsigned int nr_pages)
2712{
2713 unsigned long bytes = nr_pages * PAGE_SIZE;
2714
KAMEZAWA Hiroyukid01dd172012-05-29 15:07:03 -07002715 res_counter_uncharge_until(&memcg->res, memcg->res.parent, bytes);
2716 if (do_swap_account)
2717 res_counter_uncharge_until(&memcg->memsw,
2718 memcg->memsw.parent, bytes);
2719}
2720
2721/*
KAMEZAWA Hiroyukia3b2d692009-04-02 16:57:45 -07002722 * A helper function to get mem_cgroup from ID. must be called under
Tejun Heoec903c02014-05-13 12:11:01 -04002723 * rcu_read_lock(). The caller is responsible for calling
2724 * css_tryget_online() if the mem_cgroup is used for charging. (dropping
2725 * refcnt from swap can be called against removed memcg.)
KAMEZAWA Hiroyukia3b2d692009-04-02 16:57:45 -07002726 */
2727static struct mem_cgroup *mem_cgroup_lookup(unsigned short id)
2728{
KAMEZAWA Hiroyukia3b2d692009-04-02 16:57:45 -07002729 /* ID 0 is unused ID */
2730 if (!id)
2731 return NULL;
Li Zefan34c00c32013-09-23 16:56:01 +08002732 return mem_cgroup_from_id(id);
KAMEZAWA Hiroyukia3b2d692009-04-02 16:57:45 -07002733}
2734
Wu Fengguange42d9d52009-12-16 12:19:59 +01002735struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
KAMEZAWA Hiroyukib5a84312009-01-07 18:08:35 -08002736{
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002737 struct mem_cgroup *memcg = NULL;
Daisuke Nishimura3c776e62009-04-02 16:57:43 -07002738 struct page_cgroup *pc;
KAMEZAWA Hiroyukia3b2d692009-04-02 16:57:45 -07002739 unsigned short id;
KAMEZAWA Hiroyukib5a84312009-01-07 18:08:35 -08002740 swp_entry_t ent;
2741
Sasha Levin309381fea2014-01-23 15:52:54 -08002742 VM_BUG_ON_PAGE(!PageLocked(page), page);
Daisuke Nishimura3c776e62009-04-02 16:57:43 -07002743
Daisuke Nishimura3c776e62009-04-02 16:57:43 -07002744 pc = lookup_page_cgroup(page);
Daisuke Nishimurac0bd3f632009-04-30 15:08:11 -07002745 lock_page_cgroup(pc);
KAMEZAWA Hiroyukia3b2d692009-04-02 16:57:45 -07002746 if (PageCgroupUsed(pc)) {
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002747 memcg = pc->mem_cgroup;
Tejun Heoec903c02014-05-13 12:11:01 -04002748 if (memcg && !css_tryget_online(&memcg->css))
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002749 memcg = NULL;
Wu Fengguange42d9d52009-12-16 12:19:59 +01002750 } else if (PageSwapCache(page)) {
Daisuke Nishimura3c776e62009-04-02 16:57:43 -07002751 ent.val = page_private(page);
Bob Liu9fb4b7c2012-01-12 17:18:48 -08002752 id = lookup_swap_cgroup_id(ent);
KAMEZAWA Hiroyukia3b2d692009-04-02 16:57:45 -07002753 rcu_read_lock();
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002754 memcg = mem_cgroup_lookup(id);
Tejun Heoec903c02014-05-13 12:11:01 -04002755 if (memcg && !css_tryget_online(&memcg->css))
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002756 memcg = NULL;
KAMEZAWA Hiroyukia3b2d692009-04-02 16:57:45 -07002757 rcu_read_unlock();
Daisuke Nishimura3c776e62009-04-02 16:57:43 -07002758 }
Daisuke Nishimurac0bd3f632009-04-30 15:08:11 -07002759 unlock_page_cgroup(pc);
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002760 return memcg;
KAMEZAWA Hiroyukib5a84312009-01-07 18:08:35 -08002761}
2762
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002763static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
Johannes Weiner5564e882011-03-23 16:42:29 -07002764 struct page *page,
Johannes Weiner7ec99d62011-03-23 16:42:36 -07002765 unsigned int nr_pages,
Hugh Dickins9ce70c02012-03-05 14:59:16 -08002766 enum charge_type ctype,
2767 bool lrucare)
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -08002768{
Johannes Weinerce587e62012-04-24 20:22:33 +02002769 struct page_cgroup *pc = lookup_page_cgroup(page);
Hugh Dickins9ce70c02012-03-05 14:59:16 -08002770 struct zone *uninitialized_var(zone);
Hugh Dickinsfa9add62012-05-29 15:07:09 -07002771 struct lruvec *lruvec;
Hugh Dickins9ce70c02012-03-05 14:59:16 -08002772 bool was_on_lru = false;
KAMEZAWA Hiroyukib24028572012-03-21 16:34:22 -07002773 bool anon;
Hugh Dickins9ce70c02012-03-05 14:59:16 -08002774
KAMEZAWA Hiroyukica3e0212011-01-20 14:44:24 -08002775 lock_page_cgroup(pc);
Sasha Levin309381fea2014-01-23 15:52:54 -08002776 VM_BUG_ON_PAGE(PageCgroupUsed(pc), page);
KAMEZAWA Hiroyukica3e0212011-01-20 14:44:24 -08002777 /*
2778 * we don't need page_cgroup_lock about tail pages, becase they are not
2779 * accessed by any other context at this point.
2780 */
Hugh Dickins9ce70c02012-03-05 14:59:16 -08002781
2782 /*
2783 * In some cases, SwapCache and FUSE(splice_buf->radixtree), the page
2784 * may already be on some other mem_cgroup's LRU. Take care of it.
2785 */
2786 if (lrucare) {
2787 zone = page_zone(page);
2788 spin_lock_irq(&zone->lru_lock);
2789 if (PageLRU(page)) {
Hugh Dickinsfa9add62012-05-29 15:07:09 -07002790 lruvec = mem_cgroup_zone_lruvec(zone, pc->mem_cgroup);
Hugh Dickins9ce70c02012-03-05 14:59:16 -08002791 ClearPageLRU(page);
Hugh Dickinsfa9add62012-05-29 15:07:09 -07002792 del_page_from_lru_list(page, lruvec, page_lru(page));
Hugh Dickins9ce70c02012-03-05 14:59:16 -08002793 was_on_lru = true;
2794 }
2795 }
2796
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002797 pc->mem_cgroup = memcg;
KAMEZAWA Hiroyukib24028572012-03-21 16:34:22 -07002798 SetPageCgroupUsed(pc);
Hugh Dickins3be912772008-02-07 00:14:19 -08002799
Hugh Dickins9ce70c02012-03-05 14:59:16 -08002800 if (lrucare) {
2801 if (was_on_lru) {
Hugh Dickinsfa9add62012-05-29 15:07:09 -07002802 lruvec = mem_cgroup_zone_lruvec(zone, pc->mem_cgroup);
Sasha Levin309381fea2014-01-23 15:52:54 -08002803 VM_BUG_ON_PAGE(PageLRU(page), page);
Hugh Dickins9ce70c02012-03-05 14:59:16 -08002804 SetPageLRU(page);
Hugh Dickinsfa9add62012-05-29 15:07:09 -07002805 add_page_to_lru_list(page, lruvec, page_lru(page));
Hugh Dickins9ce70c02012-03-05 14:59:16 -08002806 }
2807 spin_unlock_irq(&zone->lru_lock);
2808 }
2809
Kamezawa Hiroyuki41326c12012-07-31 16:41:40 -07002810 if (ctype == MEM_CGROUP_CHARGE_TYPE_ANON)
KAMEZAWA Hiroyukib24028572012-03-21 16:34:22 -07002811 anon = true;
2812 else
2813 anon = false;
2814
David Rientjesb070e652013-05-07 16:18:09 -07002815 mem_cgroup_charge_statistics(memcg, page, anon, nr_pages);
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -07002816 unlock_page_cgroup(pc);
Hugh Dickins9ce70c02012-03-05 14:59:16 -08002817
KAMEZAWA Hiroyuki430e48632010-03-10 15:22:30 -08002818 /*
Andrew Mortonbb4cc1a82013-09-24 15:27:40 -07002819 * "charge_statistics" updated event counter. Then, check it.
2820 * Insert ancestor (and ancestor's ancestors), to softlimit RB-tree.
2821 * if they exceeds softlimit.
KAMEZAWA Hiroyuki430e48632010-03-10 15:22:30 -08002822 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002823 memcg_check_events(memcg, page);
Balbir Singh8a9f3cc2008-02-07 00:13:53 -08002824}
2825
Glauber Costa7cf27982012-12-18 14:22:55 -08002826static DEFINE_MUTEX(set_limit_mutex);
2827
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08002828#ifdef CONFIG_MEMCG_KMEM
Vladimir Davydovbd673142014-06-04 16:07:40 -07002829/*
2830 * The memcg_slab_mutex is held whenever a per memcg kmem cache is created or
2831 * destroyed. It protects memcg_caches arrays and memcg_slab_caches lists.
2832 */
2833static DEFINE_MUTEX(memcg_slab_mutex);
2834
Vladimir Davydovd6441632014-01-23 15:53:09 -08002835static DEFINE_MUTEX(activate_kmem_mutex);
2836
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08002837static inline bool memcg_can_account_kmem(struct mem_cgroup *memcg)
2838{
2839 return !mem_cgroup_disabled() && !mem_cgroup_is_root(memcg) &&
Vladimir Davydov6de64be2014-01-23 15:53:08 -08002840 memcg_kmem_is_active(memcg);
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08002841}
2842
Glauber Costa1f458cb2012-12-18 14:22:50 -08002843/*
2844 * This is a bit cumbersome, but it is rarely used and avoids a backpointer
2845 * in the memcg_cache_params struct.
2846 */
2847static struct kmem_cache *memcg_params_to_cache(struct memcg_cache_params *p)
2848{
2849 struct kmem_cache *cachep;
2850
2851 VM_BUG_ON(p->is_root_cache);
2852 cachep = p->root_cache;
Qiang Huang7a67d7a2013-11-12 15:08:24 -08002853 return cache_from_memcg_idx(cachep, memcg_cache_id(p->memcg));
Glauber Costa1f458cb2012-12-18 14:22:50 -08002854}
2855
Glauber Costa749c5412012-12-18 14:23:01 -08002856#ifdef CONFIG_SLABINFO
Tejun Heo2da8ca82013-12-05 12:28:04 -05002857static int mem_cgroup_slabinfo_read(struct seq_file *m, void *v)
Glauber Costa749c5412012-12-18 14:23:01 -08002858{
Tejun Heo2da8ca82013-12-05 12:28:04 -05002859 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
Glauber Costa749c5412012-12-18 14:23:01 -08002860 struct memcg_cache_params *params;
2861
2862 if (!memcg_can_account_kmem(memcg))
2863 return -EIO;
2864
2865 print_slabinfo_header(m);
2866
Vladimir Davydovbd673142014-06-04 16:07:40 -07002867 mutex_lock(&memcg_slab_mutex);
Glauber Costa749c5412012-12-18 14:23:01 -08002868 list_for_each_entry(params, &memcg->memcg_slab_caches, list)
2869 cache_show(memcg_params_to_cache(params), m);
Vladimir Davydovbd673142014-06-04 16:07:40 -07002870 mutex_unlock(&memcg_slab_mutex);
Glauber Costa749c5412012-12-18 14:23:01 -08002871
2872 return 0;
2873}
2874#endif
2875
Vladimir Davydovc67a8a62014-06-04 16:07:39 -07002876static int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp, u64 size)
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08002877{
2878 struct res_counter *fail_res;
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08002879 int ret = 0;
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08002880
2881 ret = res_counter_charge(&memcg->kmem, size, &fail_res);
2882 if (ret)
2883 return ret;
2884
Michal Hocko0029e192014-08-06 16:05:53 -07002885 ret = mem_cgroup_try_charge(memcg, gfp, size >> PAGE_SHIFT);
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08002886 if (ret == -EINTR) {
2887 /*
Johannes Weiner6d1fdc42014-04-07 15:37:45 -07002888 * mem_cgroup_try_charge() chosed to bypass to root due to
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08002889 * OOM kill or fatal signal. Since our only options are to
2890 * either fail the allocation or charge it to this cgroup, do
2891 * it as a temporary condition. But we can't fail. From a
2892 * kmem/slab perspective, the cache has already been selected,
2893 * by mem_cgroup_kmem_get_cache(), so it is too late to change
2894 * our minds.
2895 *
2896 * This condition will only trigger if the task entered
2897 * memcg_charge_kmem in a sane state, but was OOM-killed during
Johannes Weiner6d1fdc42014-04-07 15:37:45 -07002898 * mem_cgroup_try_charge() above. Tasks that were already
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08002899 * dying when the allocation triggers should have been already
2900 * directed to the root cgroup in memcontrol.h
2901 */
2902 res_counter_charge_nofail(&memcg->res, size, &fail_res);
2903 if (do_swap_account)
2904 res_counter_charge_nofail(&memcg->memsw, size,
2905 &fail_res);
2906 ret = 0;
2907 } else if (ret)
2908 res_counter_uncharge(&memcg->kmem, size);
2909
2910 return ret;
2911}
2912
Vladimir Davydovc67a8a62014-06-04 16:07:39 -07002913static void memcg_uncharge_kmem(struct mem_cgroup *memcg, u64 size)
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08002914{
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08002915 res_counter_uncharge(&memcg->res, size);
2916 if (do_swap_account)
2917 res_counter_uncharge(&memcg->memsw, size);
Glauber Costa7de37682012-12-18 14:22:07 -08002918
2919 /* Not down to 0 */
2920 if (res_counter_uncharge(&memcg->kmem, size))
2921 return;
2922
Li Zefan10d5ebf2013-07-08 16:00:33 -07002923 /*
2924 * Releases a reference taken in kmem_cgroup_css_offline in case
2925 * this last uncharge is racing with the offlining code or it is
2926 * outliving the memcg existence.
2927 *
2928 * The memory barrier imposed by test&clear is paired with the
2929 * explicit one in memcg_kmem_mark_dead().
2930 */
Glauber Costa7de37682012-12-18 14:22:07 -08002931 if (memcg_kmem_test_and_clear_dead(memcg))
Li Zefan10d5ebf2013-07-08 16:00:33 -07002932 css_put(&memcg->css);
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08002933}
2934
Glauber Costa2633d7a2012-12-18 14:22:34 -08002935/*
2936 * helper for acessing a memcg's index. It will be used as an index in the
2937 * child cache array in kmem_cache, and also to derive its name. This function
2938 * will return -1 when this is not a kmem-limited memcg.
2939 */
2940int memcg_cache_id(struct mem_cgroup *memcg)
2941{
2942 return memcg ? memcg->kmemcg_id : -1;
2943}
2944
Glauber Costa55007d82012-12-18 14:22:38 -08002945static size_t memcg_caches_array_size(int num_groups)
2946{
2947 ssize_t size;
2948 if (num_groups <= 0)
2949 return 0;
2950
2951 size = 2 * num_groups;
2952 if (size < MEMCG_CACHES_MIN_SIZE)
2953 size = MEMCG_CACHES_MIN_SIZE;
2954 else if (size > MEMCG_CACHES_MAX_SIZE)
2955 size = MEMCG_CACHES_MAX_SIZE;
2956
2957 return size;
2958}
2959
2960/*
2961 * We should update the current array size iff all caches updates succeed. This
2962 * can only be done from the slab side. The slab mutex needs to be held when
2963 * calling this.
2964 */
2965void memcg_update_array_size(int num)
2966{
2967 if (num > memcg_limited_groups_array_size)
2968 memcg_limited_groups_array_size = memcg_caches_array_size(num);
2969}
2970
2971int memcg_update_cache_size(struct kmem_cache *s, int num_groups)
2972{
2973 struct memcg_cache_params *cur_params = s->memcg_params;
2974
Qiang Huangf35c3a82013-11-12 15:08:22 -08002975 VM_BUG_ON(!is_root_cache(s));
Glauber Costa55007d82012-12-18 14:22:38 -08002976
2977 if (num_groups > memcg_limited_groups_array_size) {
2978 int i;
Vladimir Davydovf8570262014-01-23 15:53:06 -08002979 struct memcg_cache_params *new_params;
Glauber Costa55007d82012-12-18 14:22:38 -08002980 ssize_t size = memcg_caches_array_size(num_groups);
2981
2982 size *= sizeof(void *);
Andrey Vagin90c7a792013-09-11 14:22:18 -07002983 size += offsetof(struct memcg_cache_params, memcg_caches);
Glauber Costa55007d82012-12-18 14:22:38 -08002984
Vladimir Davydovf8570262014-01-23 15:53:06 -08002985 new_params = kzalloc(size, GFP_KERNEL);
2986 if (!new_params)
Glauber Costa55007d82012-12-18 14:22:38 -08002987 return -ENOMEM;
Glauber Costa55007d82012-12-18 14:22:38 -08002988
Vladimir Davydovf8570262014-01-23 15:53:06 -08002989 new_params->is_root_cache = true;
Glauber Costa55007d82012-12-18 14:22:38 -08002990
2991 /*
2992 * There is the chance it will be bigger than
2993 * memcg_limited_groups_array_size, if we failed an allocation
2994 * in a cache, in which case all caches updated before it, will
2995 * have a bigger array.
2996 *
2997 * But if that is the case, the data after
2998 * memcg_limited_groups_array_size is certainly unused
2999 */
3000 for (i = 0; i < memcg_limited_groups_array_size; i++) {
3001 if (!cur_params->memcg_caches[i])
3002 continue;
Vladimir Davydovf8570262014-01-23 15:53:06 -08003003 new_params->memcg_caches[i] =
Glauber Costa55007d82012-12-18 14:22:38 -08003004 cur_params->memcg_caches[i];
3005 }
3006
3007 /*
3008 * Ideally, we would wait until all caches succeed, and only
3009 * then free the old one. But this is not worth the extra
3010 * pointer per-cache we'd have to have for this.
3011 *
3012 * It is not a big deal if some caches are left with a size
3013 * bigger than the others. And all updates will reset this
3014 * anyway.
3015 */
Vladimir Davydovf8570262014-01-23 15:53:06 -08003016 rcu_assign_pointer(s->memcg_params, new_params);
3017 if (cur_params)
3018 kfree_rcu(cur_params, rcu_head);
Glauber Costa55007d82012-12-18 14:22:38 -08003019 }
3020 return 0;
3021}
3022
Vladimir Davydov363a0442014-01-23 15:52:56 -08003023int memcg_alloc_cache_params(struct mem_cgroup *memcg, struct kmem_cache *s,
3024 struct kmem_cache *root_cache)
Glauber Costa2633d7a2012-12-18 14:22:34 -08003025{
Andrey Vagin90c7a792013-09-11 14:22:18 -07003026 size_t size;
Glauber Costa2633d7a2012-12-18 14:22:34 -08003027
3028 if (!memcg_kmem_enabled())
3029 return 0;
3030
Andrey Vagin90c7a792013-09-11 14:22:18 -07003031 if (!memcg) {
3032 size = offsetof(struct memcg_cache_params, memcg_caches);
Glauber Costa55007d82012-12-18 14:22:38 -08003033 size += memcg_limited_groups_array_size * sizeof(void *);
Andrey Vagin90c7a792013-09-11 14:22:18 -07003034 } else
3035 size = sizeof(struct memcg_cache_params);
Glauber Costa55007d82012-12-18 14:22:38 -08003036
Glauber Costa2633d7a2012-12-18 14:22:34 -08003037 s->memcg_params = kzalloc(size, GFP_KERNEL);
3038 if (!s->memcg_params)
3039 return -ENOMEM;
3040
Glauber Costa943a4512012-12-18 14:23:03 -08003041 if (memcg) {
Glauber Costa2633d7a2012-12-18 14:22:34 -08003042 s->memcg_params->memcg = memcg;
Glauber Costa943a4512012-12-18 14:23:03 -08003043 s->memcg_params->root_cache = root_cache;
Vladimir Davydov051dd462014-04-07 15:39:27 -07003044 css_get(&memcg->css);
Glauber Costa4ba902b2013-02-12 13:46:22 -08003045 } else
3046 s->memcg_params->is_root_cache = true;
3047
Glauber Costa2633d7a2012-12-18 14:22:34 -08003048 return 0;
3049}
3050
Vladimir Davydov363a0442014-01-23 15:52:56 -08003051void memcg_free_cache_params(struct kmem_cache *s)
3052{
Vladimir Davydov051dd462014-04-07 15:39:27 -07003053 if (!s->memcg_params)
3054 return;
3055 if (!s->memcg_params->is_root_cache)
3056 css_put(&s->memcg_params->memcg->css);
Vladimir Davydov363a0442014-01-23 15:52:56 -08003057 kfree(s->memcg_params);
3058}
3059
Vladimir Davydov776ed0f2014-06-04 16:10:02 -07003060static void memcg_register_cache(struct mem_cgroup *memcg,
3061 struct kmem_cache *root_cache)
Glauber Costa2633d7a2012-12-18 14:22:34 -08003062{
Vladimir Davydov93f39ee2014-06-04 16:08:24 -07003063 static char memcg_name_buf[NAME_MAX + 1]; /* protected by
3064 memcg_slab_mutex */
Vladimir Davydovbd673142014-06-04 16:07:40 -07003065 struct kmem_cache *cachep;
Glauber Costad7f25f82012-12-18 14:22:40 -08003066 int id;
3067
Vladimir Davydovbd673142014-06-04 16:07:40 -07003068 lockdep_assert_held(&memcg_slab_mutex);
3069
3070 id = memcg_cache_id(memcg);
Glauber Costad7f25f82012-12-18 14:22:40 -08003071
Vladimir Davydov2edefe12014-01-23 15:53:02 -08003072 /*
Vladimir Davydovbd673142014-06-04 16:07:40 -07003073 * Since per-memcg caches are created asynchronously on first
3074 * allocation (see memcg_kmem_get_cache()), several threads can try to
3075 * create the same cache, but only one of them may succeed.
Vladimir Davydov2edefe12014-01-23 15:53:02 -08003076 */
Vladimir Davydovbd673142014-06-04 16:07:40 -07003077 if (cache_from_memcg_idx(root_cache, id))
3078 return;
Vladimir Davydov2edefe12014-01-23 15:53:02 -08003079
Vladimir Davydov073ee1c2014-06-04 16:08:23 -07003080 cgroup_name(memcg->css.cgroup, memcg_name_buf, NAME_MAX + 1);
Vladimir Davydov776ed0f2014-06-04 16:10:02 -07003081 cachep = memcg_create_kmem_cache(memcg, root_cache, memcg_name_buf);
Vladimir Davydovbd673142014-06-04 16:07:40 -07003082 /*
3083 * If we could not create a memcg cache, do not complain, because
3084 * that's not critical at all as we can always proceed with the root
3085 * cache.
3086 */
3087 if (!cachep)
3088 return;
3089
3090 list_add(&cachep->memcg_params->list, &memcg->memcg_slab_caches);
Vladimir Davydov1aa13252014-01-23 15:52:58 -08003091
Vladimir Davydov1aa13252014-01-23 15:52:58 -08003092 /*
Vladimir Davydov959c8962014-01-23 15:52:59 -08003093 * Since readers won't lock (see cache_from_memcg_idx()), we need a
3094 * barrier here to ensure nobody will see the kmem_cache partially
3095 * initialized.
Vladimir Davydov1aa13252014-01-23 15:52:58 -08003096 */
Vladimir Davydov959c8962014-01-23 15:52:59 -08003097 smp_wmb();
3098
Vladimir Davydovbd673142014-06-04 16:07:40 -07003099 BUG_ON(root_cache->memcg_params->memcg_caches[id]);
3100 root_cache->memcg_params->memcg_caches[id] = cachep;
Vladimir Davydov1aa13252014-01-23 15:52:58 -08003101}
3102
Vladimir Davydov776ed0f2014-06-04 16:10:02 -07003103static void memcg_unregister_cache(struct kmem_cache *cachep)
Vladimir Davydov1aa13252014-01-23 15:52:58 -08003104{
Vladimir Davydovbd673142014-06-04 16:07:40 -07003105 struct kmem_cache *root_cache;
Vladimir Davydov1aa13252014-01-23 15:52:58 -08003106 struct mem_cgroup *memcg;
3107 int id;
3108
Vladimir Davydovbd673142014-06-04 16:07:40 -07003109 lockdep_assert_held(&memcg_slab_mutex);
Glauber Costad7f25f82012-12-18 14:22:40 -08003110
Vladimir Davydovbd673142014-06-04 16:07:40 -07003111 BUG_ON(is_root_cache(cachep));
Vladimir Davydov2edefe12014-01-23 15:53:02 -08003112
Vladimir Davydovbd673142014-06-04 16:07:40 -07003113 root_cache = cachep->memcg_params->root_cache;
3114 memcg = cachep->memcg_params->memcg;
Vladimir Davydov96403da2014-01-23 15:53:01 -08003115 id = memcg_cache_id(memcg);
Glauber Costad7f25f82012-12-18 14:22:40 -08003116
Vladimir Davydovbd673142014-06-04 16:07:40 -07003117 BUG_ON(root_cache->memcg_params->memcg_caches[id] != cachep);
3118 root_cache->memcg_params->memcg_caches[id] = NULL;
Glauber Costad7f25f82012-12-18 14:22:40 -08003119
Vladimir Davydovbd673142014-06-04 16:07:40 -07003120 list_del(&cachep->memcg_params->list);
3121
3122 kmem_cache_destroy(cachep);
Glauber Costa2633d7a2012-12-18 14:22:34 -08003123}
3124
Glauber Costa0e9d92f2012-12-18 14:22:42 -08003125/*
3126 * During the creation a new cache, we need to disable our accounting mechanism
3127 * altogether. This is true even if we are not creating, but rather just
3128 * enqueing new caches to be created.
3129 *
3130 * This is because that process will trigger allocations; some visible, like
3131 * explicit kmallocs to auxiliary data structures, name strings and internal
3132 * cache structures; some well concealed, like INIT_WORK() that can allocate
3133 * objects during debug.
3134 *
3135 * If any allocation happens during memcg_kmem_get_cache, we will recurse back
3136 * to it. This may not be a bounded recursion: since the first cache creation
3137 * failed to complete (waiting on the allocation), we'll just try to create the
3138 * cache again, failing at the same point.
3139 *
3140 * memcg_kmem_get_cache is prepared to abort after seeing a positive count of
3141 * memcg_kmem_skip_account. So we enclose anything that might allocate memory
3142 * inside the following two functions.
3143 */
3144static inline void memcg_stop_kmem_account(void)
3145{
3146 VM_BUG_ON(!current->mm);
3147 current->memcg_kmem_skip_account++;
3148}
3149
3150static inline void memcg_resume_kmem_account(void)
3151{
3152 VM_BUG_ON(!current->mm);
3153 current->memcg_kmem_skip_account--;
3154}
3155
Vladimir Davydov776ed0f2014-06-04 16:10:02 -07003156int __memcg_cleanup_cache_params(struct kmem_cache *s)
Glauber Costa7cf27982012-12-18 14:22:55 -08003157{
3158 struct kmem_cache *c;
Vladimir Davydovb8529902014-04-07 15:39:28 -07003159 int i, failed = 0;
Glauber Costa7cf27982012-12-18 14:22:55 -08003160
Vladimir Davydovbd673142014-06-04 16:07:40 -07003161 mutex_lock(&memcg_slab_mutex);
Qiang Huang7a67d7a2013-11-12 15:08:24 -08003162 for_each_memcg_cache_index(i) {
3163 c = cache_from_memcg_idx(s, i);
Glauber Costa7cf27982012-12-18 14:22:55 -08003164 if (!c)
3165 continue;
3166
Vladimir Davydov776ed0f2014-06-04 16:10:02 -07003167 memcg_unregister_cache(c);
Vladimir Davydovb8529902014-04-07 15:39:28 -07003168
3169 if (cache_from_memcg_idx(s, i))
3170 failed++;
Glauber Costa7cf27982012-12-18 14:22:55 -08003171 }
Vladimir Davydovbd673142014-06-04 16:07:40 -07003172 mutex_unlock(&memcg_slab_mutex);
Vladimir Davydovb8529902014-04-07 15:39:28 -07003173 return failed;
Glauber Costa7cf27982012-12-18 14:22:55 -08003174}
3175
Vladimir Davydov776ed0f2014-06-04 16:10:02 -07003176static void memcg_unregister_all_caches(struct mem_cgroup *memcg)
Glauber Costa1f458cb2012-12-18 14:22:50 -08003177{
3178 struct kmem_cache *cachep;
Vladimir Davydovbd673142014-06-04 16:07:40 -07003179 struct memcg_cache_params *params, *tmp;
Glauber Costa1f458cb2012-12-18 14:22:50 -08003180
3181 if (!memcg_kmem_is_active(memcg))
3182 return;
3183
Vladimir Davydovbd673142014-06-04 16:07:40 -07003184 mutex_lock(&memcg_slab_mutex);
3185 list_for_each_entry_safe(params, tmp, &memcg->memcg_slab_caches, list) {
Glauber Costa1f458cb2012-12-18 14:22:50 -08003186 cachep = memcg_params_to_cache(params);
Vladimir Davydovbd673142014-06-04 16:07:40 -07003187 kmem_cache_shrink(cachep);
3188 if (atomic_read(&cachep->memcg_params->nr_pages) == 0)
Vladimir Davydov776ed0f2014-06-04 16:10:02 -07003189 memcg_unregister_cache(cachep);
Glauber Costa1f458cb2012-12-18 14:22:50 -08003190 }
Vladimir Davydovbd673142014-06-04 16:07:40 -07003191 mutex_unlock(&memcg_slab_mutex);
Glauber Costa1f458cb2012-12-18 14:22:50 -08003192}
3193
Vladimir Davydov776ed0f2014-06-04 16:10:02 -07003194struct memcg_register_cache_work {
Vladimir Davydov5722d092014-04-07 15:39:24 -07003195 struct mem_cgroup *memcg;
3196 struct kmem_cache *cachep;
3197 struct work_struct work;
3198};
3199
Vladimir Davydov776ed0f2014-06-04 16:10:02 -07003200static void memcg_register_cache_func(struct work_struct *w)
Glauber Costad7f25f82012-12-18 14:22:40 -08003201{
Vladimir Davydov776ed0f2014-06-04 16:10:02 -07003202 struct memcg_register_cache_work *cw =
3203 container_of(w, struct memcg_register_cache_work, work);
Vladimir Davydov5722d092014-04-07 15:39:24 -07003204 struct mem_cgroup *memcg = cw->memcg;
3205 struct kmem_cache *cachep = cw->cachep;
Glauber Costad7f25f82012-12-18 14:22:40 -08003206
Vladimir Davydovbd673142014-06-04 16:07:40 -07003207 mutex_lock(&memcg_slab_mutex);
Vladimir Davydov776ed0f2014-06-04 16:10:02 -07003208 memcg_register_cache(memcg, cachep);
Vladimir Davydovbd673142014-06-04 16:07:40 -07003209 mutex_unlock(&memcg_slab_mutex);
3210
Vladimir Davydov5722d092014-04-07 15:39:24 -07003211 css_put(&memcg->css);
Glauber Costad7f25f82012-12-18 14:22:40 -08003212 kfree(cw);
3213}
3214
3215/*
3216 * Enqueue the creation of a per-memcg kmem_cache.
Glauber Costad7f25f82012-12-18 14:22:40 -08003217 */
Vladimir Davydov776ed0f2014-06-04 16:10:02 -07003218static void __memcg_schedule_register_cache(struct mem_cgroup *memcg,
3219 struct kmem_cache *cachep)
Glauber Costad7f25f82012-12-18 14:22:40 -08003220{
Vladimir Davydov776ed0f2014-06-04 16:10:02 -07003221 struct memcg_register_cache_work *cw;
Glauber Costad7f25f82012-12-18 14:22:40 -08003222
Vladimir Davydov776ed0f2014-06-04 16:10:02 -07003223 cw = kmalloc(sizeof(*cw), GFP_NOWAIT);
Li Zefanca0dde92013-04-29 15:08:57 -07003224 if (cw == NULL) {
3225 css_put(&memcg->css);
Glauber Costad7f25f82012-12-18 14:22:40 -08003226 return;
3227 }
3228
3229 cw->memcg = memcg;
3230 cw->cachep = cachep;
3231
Vladimir Davydov776ed0f2014-06-04 16:10:02 -07003232 INIT_WORK(&cw->work, memcg_register_cache_func);
Glauber Costad7f25f82012-12-18 14:22:40 -08003233 schedule_work(&cw->work);
3234}
3235
Vladimir Davydov776ed0f2014-06-04 16:10:02 -07003236static void memcg_schedule_register_cache(struct mem_cgroup *memcg,
3237 struct kmem_cache *cachep)
Glauber Costa0e9d92f2012-12-18 14:22:42 -08003238{
3239 /*
3240 * We need to stop accounting when we kmalloc, because if the
3241 * corresponding kmalloc cache is not yet created, the first allocation
Vladimir Davydov776ed0f2014-06-04 16:10:02 -07003242 * in __memcg_schedule_register_cache will recurse.
Glauber Costa0e9d92f2012-12-18 14:22:42 -08003243 *
3244 * However, it is better to enclose the whole function. Depending on
3245 * the debugging options enabled, INIT_WORK(), for instance, can
3246 * trigger an allocation. This too, will make us recurse. Because at
3247 * this point we can't allow ourselves back into memcg_kmem_get_cache,
3248 * the safest choice is to do it like this, wrapping the whole function.
3249 */
3250 memcg_stop_kmem_account();
Vladimir Davydov776ed0f2014-06-04 16:10:02 -07003251 __memcg_schedule_register_cache(memcg, cachep);
Glauber Costa0e9d92f2012-12-18 14:22:42 -08003252 memcg_resume_kmem_account();
3253}
Vladimir Davydovc67a8a62014-06-04 16:07:39 -07003254
3255int __memcg_charge_slab(struct kmem_cache *cachep, gfp_t gfp, int order)
3256{
3257 int res;
3258
3259 res = memcg_charge_kmem(cachep->memcg_params->memcg, gfp,
3260 PAGE_SIZE << order);
3261 if (!res)
3262 atomic_add(1 << order, &cachep->memcg_params->nr_pages);
3263 return res;
3264}
3265
3266void __memcg_uncharge_slab(struct kmem_cache *cachep, int order)
3267{
3268 memcg_uncharge_kmem(cachep->memcg_params->memcg, PAGE_SIZE << order);
3269 atomic_sub(1 << order, &cachep->memcg_params->nr_pages);
3270}
3271
Glauber Costad7f25f82012-12-18 14:22:40 -08003272/*
3273 * Return the kmem_cache we're supposed to use for a slab allocation.
3274 * We try to use the current memcg's version of the cache.
3275 *
3276 * If the cache does not exist yet, if we are the first user of it,
3277 * we either create it immediately, if possible, or create it asynchronously
3278 * in a workqueue.
3279 * In the latter case, we will let the current allocation go through with
3280 * the original cache.
3281 *
3282 * Can't be called in interrupt context or from kernel threads.
3283 * This function needs to be called with rcu_read_lock() held.
3284 */
3285struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep,
3286 gfp_t gfp)
3287{
3288 struct mem_cgroup *memcg;
Vladimir Davydov959c8962014-01-23 15:52:59 -08003289 struct kmem_cache *memcg_cachep;
Glauber Costad7f25f82012-12-18 14:22:40 -08003290
3291 VM_BUG_ON(!cachep->memcg_params);
3292 VM_BUG_ON(!cachep->memcg_params->is_root_cache);
3293
Glauber Costa0e9d92f2012-12-18 14:22:42 -08003294 if (!current->mm || current->memcg_kmem_skip_account)
3295 return cachep;
3296
Glauber Costad7f25f82012-12-18 14:22:40 -08003297 rcu_read_lock();
3298 memcg = mem_cgroup_from_task(rcu_dereference(current->mm->owner));
Glauber Costad7f25f82012-12-18 14:22:40 -08003299
3300 if (!memcg_can_account_kmem(memcg))
Li Zefanca0dde92013-04-29 15:08:57 -07003301 goto out;
Glauber Costad7f25f82012-12-18 14:22:40 -08003302
Vladimir Davydov959c8962014-01-23 15:52:59 -08003303 memcg_cachep = cache_from_memcg_idx(cachep, memcg_cache_id(memcg));
3304 if (likely(memcg_cachep)) {
3305 cachep = memcg_cachep;
Li Zefanca0dde92013-04-29 15:08:57 -07003306 goto out;
Glauber Costad7f25f82012-12-18 14:22:40 -08003307 }
3308
Li Zefanca0dde92013-04-29 15:08:57 -07003309 /* The corresponding put will be done in the workqueue. */
Tejun Heoec903c02014-05-13 12:11:01 -04003310 if (!css_tryget_online(&memcg->css))
Li Zefanca0dde92013-04-29 15:08:57 -07003311 goto out;
3312 rcu_read_unlock();
3313
3314 /*
3315 * If we are in a safe context (can wait, and not in interrupt
3316 * context), we could be be predictable and return right away.
3317 * This would guarantee that the allocation being performed
3318 * already belongs in the new cache.
3319 *
3320 * However, there are some clashes that can arrive from locking.
3321 * For instance, because we acquire the slab_mutex while doing
Vladimir Davydov776ed0f2014-06-04 16:10:02 -07003322 * memcg_create_kmem_cache, this means no further allocation
3323 * could happen with the slab_mutex held. So it's better to
3324 * defer everything.
Li Zefanca0dde92013-04-29 15:08:57 -07003325 */
Vladimir Davydov776ed0f2014-06-04 16:10:02 -07003326 memcg_schedule_register_cache(memcg, cachep);
Li Zefanca0dde92013-04-29 15:08:57 -07003327 return cachep;
3328out:
3329 rcu_read_unlock();
3330 return cachep;
Glauber Costad7f25f82012-12-18 14:22:40 -08003331}
Glauber Costad7f25f82012-12-18 14:22:40 -08003332
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08003333/*
3334 * We need to verify if the allocation against current->mm->owner's memcg is
3335 * possible for the given order. But the page is not allocated yet, so we'll
3336 * need a further commit step to do the final arrangements.
3337 *
3338 * It is possible for the task to switch cgroups in this mean time, so at
3339 * commit time, we can't rely on task conversion any longer. We'll then use
3340 * the handle argument to return to the caller which cgroup we should commit
3341 * against. We could also return the memcg directly and avoid the pointer
3342 * passing, but a boolean return value gives better semantics considering
3343 * the compiled-out case as well.
3344 *
3345 * Returning true means the allocation is possible.
3346 */
3347bool
3348__memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **_memcg, int order)
3349{
3350 struct mem_cgroup *memcg;
3351 int ret;
3352
3353 *_memcg = NULL;
Glauber Costa6d42c232013-07-08 16:00:00 -07003354
3355 /*
3356 * Disabling accounting is only relevant for some specific memcg
3357 * internal allocations. Therefore we would initially not have such
Vladimir Davydov52383432014-06-04 16:06:39 -07003358 * check here, since direct calls to the page allocator that are
3359 * accounted to kmemcg (alloc_kmem_pages and friends) only happen
3360 * outside memcg core. We are mostly concerned with cache allocations,
3361 * and by having this test at memcg_kmem_get_cache, we are already able
3362 * to relay the allocation to the root cache and bypass the memcg cache
3363 * altogether.
Glauber Costa6d42c232013-07-08 16:00:00 -07003364 *
3365 * There is one exception, though: the SLUB allocator does not create
3366 * large order caches, but rather service large kmallocs directly from
3367 * the page allocator. Therefore, the following sequence when backed by
3368 * the SLUB allocator:
3369 *
Andrew Mortonf894ffa2013-09-12 15:13:35 -07003370 * memcg_stop_kmem_account();
3371 * kmalloc(<large_number>)
3372 * memcg_resume_kmem_account();
Glauber Costa6d42c232013-07-08 16:00:00 -07003373 *
3374 * would effectively ignore the fact that we should skip accounting,
3375 * since it will drive us directly to this function without passing
3376 * through the cache selector memcg_kmem_get_cache. Such large
3377 * allocations are extremely rare but can happen, for instance, for the
3378 * cache arrays. We bring this test here.
3379 */
3380 if (!current->mm || current->memcg_kmem_skip_account)
3381 return true;
3382
Johannes Weinerdf381972014-04-07 15:37:43 -07003383 memcg = get_mem_cgroup_from_mm(current->mm);
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08003384
3385 if (!memcg_can_account_kmem(memcg)) {
3386 css_put(&memcg->css);
3387 return true;
3388 }
3389
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08003390 ret = memcg_charge_kmem(memcg, gfp, PAGE_SIZE << order);
3391 if (!ret)
3392 *_memcg = memcg;
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08003393
3394 css_put(&memcg->css);
3395 return (ret == 0);
3396}
3397
3398void __memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg,
3399 int order)
3400{
3401 struct page_cgroup *pc;
3402
3403 VM_BUG_ON(mem_cgroup_is_root(memcg));
3404
3405 /* The page allocation failed. Revert */
3406 if (!page) {
3407 memcg_uncharge_kmem(memcg, PAGE_SIZE << order);
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08003408 return;
3409 }
3410
3411 pc = lookup_page_cgroup(page);
3412 lock_page_cgroup(pc);
3413 pc->mem_cgroup = memcg;
3414 SetPageCgroupUsed(pc);
3415 unlock_page_cgroup(pc);
3416}
3417
3418void __memcg_kmem_uncharge_pages(struct page *page, int order)
3419{
3420 struct mem_cgroup *memcg = NULL;
3421 struct page_cgroup *pc;
3422
3423
3424 pc = lookup_page_cgroup(page);
3425 /*
3426 * Fast unlocked return. Theoretically might have changed, have to
3427 * check again after locking.
3428 */
3429 if (!PageCgroupUsed(pc))
3430 return;
3431
3432 lock_page_cgroup(pc);
3433 if (PageCgroupUsed(pc)) {
3434 memcg = pc->mem_cgroup;
3435 ClearPageCgroupUsed(pc);
3436 }
3437 unlock_page_cgroup(pc);
3438
3439 /*
3440 * We trust that only if there is a memcg associated with the page, it
3441 * is a valid allocation
3442 */
3443 if (!memcg)
3444 return;
3445
Sasha Levin309381fea2014-01-23 15:52:54 -08003446 VM_BUG_ON_PAGE(mem_cgroup_is_root(memcg), page);
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08003447 memcg_uncharge_kmem(memcg, PAGE_SIZE << order);
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08003448}
Glauber Costa1f458cb2012-12-18 14:22:50 -08003449#else
Vladimir Davydov776ed0f2014-06-04 16:10:02 -07003450static inline void memcg_unregister_all_caches(struct mem_cgroup *memcg)
Glauber Costa1f458cb2012-12-18 14:22:50 -08003451{
3452}
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08003453#endif /* CONFIG_MEMCG_KMEM */
3454
KAMEZAWA Hiroyukica3e0212011-01-20 14:44:24 -08003455#ifdef CONFIG_TRANSPARENT_HUGEPAGE
3456
Kirill A. Shutemova0db00f2012-05-29 15:06:56 -07003457#define PCGF_NOCOPY_AT_SPLIT (1 << PCG_LOCK | 1 << PCG_MIGRATION)
KAMEZAWA Hiroyukica3e0212011-01-20 14:44:24 -08003458/*
3459 * Because tail pages are not marked as "used", set it. We're under
KAMEZAWA Hiroyukie94c8a92012-01-12 17:18:20 -08003460 * zone->lru_lock, 'splitting on pmd' and compound_lock.
3461 * charge/uncharge will be never happen and move_account() is done under
3462 * compound_lock(), so we don't have to take care of races.
KAMEZAWA Hiroyukica3e0212011-01-20 14:44:24 -08003463 */
KAMEZAWA Hiroyukie94c8a92012-01-12 17:18:20 -08003464void mem_cgroup_split_huge_fixup(struct page *head)
KAMEZAWA Hiroyukica3e0212011-01-20 14:44:24 -08003465{
3466 struct page_cgroup *head_pc = lookup_page_cgroup(head);
KAMEZAWA Hiroyukie94c8a92012-01-12 17:18:20 -08003467 struct page_cgroup *pc;
David Rientjesb070e652013-05-07 16:18:09 -07003468 struct mem_cgroup *memcg;
KAMEZAWA Hiroyukie94c8a92012-01-12 17:18:20 -08003469 int i;
KAMEZAWA Hiroyukica3e0212011-01-20 14:44:24 -08003470
KAMEZAWA Hiroyuki3d37c4a2011-01-25 15:07:28 -08003471 if (mem_cgroup_disabled())
3472 return;
David Rientjesb070e652013-05-07 16:18:09 -07003473
3474 memcg = head_pc->mem_cgroup;
KAMEZAWA Hiroyukie94c8a92012-01-12 17:18:20 -08003475 for (i = 1; i < HPAGE_PMD_NR; i++) {
3476 pc = head_pc + i;
David Rientjesb070e652013-05-07 16:18:09 -07003477 pc->mem_cgroup = memcg;
KAMEZAWA Hiroyukie94c8a92012-01-12 17:18:20 -08003478 pc->flags = head_pc->flags & ~PCGF_NOCOPY_AT_SPLIT;
3479 }
David Rientjesb070e652013-05-07 16:18:09 -07003480 __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE],
3481 HPAGE_PMD_NR);
KAMEZAWA Hiroyukica3e0212011-01-20 14:44:24 -08003482}
Hugh Dickins12d27102012-01-12 17:19:52 -08003483#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
KAMEZAWA Hiroyukica3e0212011-01-20 14:44:24 -08003484
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08003485/**
Johannes Weinerde3638d2011-03-23 16:42:28 -07003486 * mem_cgroup_move_account - move account of the page
Johannes Weiner5564e882011-03-23 16:42:29 -07003487 * @page: the page
Johannes Weiner7ec99d62011-03-23 16:42:36 -07003488 * @nr_pages: number of regular pages (>1 for huge pages)
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08003489 * @pc: page_cgroup of the page.
3490 * @from: mem_cgroup which the page is moved from.
3491 * @to: mem_cgroup which the page is moved to. @from != @to.
3492 *
3493 * The caller must confirm following.
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -08003494 * - page is not on LRU (isolate_page() is useful.)
Johannes Weiner7ec99d62011-03-23 16:42:36 -07003495 * - compound_lock is held when nr_pages > 1
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08003496 *
KAMEZAWA Hiroyuki2f3479b2012-05-29 15:07:04 -07003497 * This function doesn't do "charge" to new cgroup and doesn't do "uncharge"
3498 * from old cgroup.
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08003499 */
Johannes Weiner7ec99d62011-03-23 16:42:36 -07003500static int mem_cgroup_move_account(struct page *page,
3501 unsigned int nr_pages,
3502 struct page_cgroup *pc,
3503 struct mem_cgroup *from,
KAMEZAWA Hiroyuki2f3479b2012-05-29 15:07:04 -07003504 struct mem_cgroup *to)
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08003505{
Johannes Weinerde3638d2011-03-23 16:42:28 -07003506 unsigned long flags;
3507 int ret;
KAMEZAWA Hiroyukib24028572012-03-21 16:34:22 -07003508 bool anon = PageAnon(page);
KAMEZAWA Hiroyuki987eba62011-01-20 14:44:25 -08003509
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08003510 VM_BUG_ON(from == to);
Sasha Levin309381fea2014-01-23 15:52:54 -08003511 VM_BUG_ON_PAGE(PageLRU(page), page);
Johannes Weinerde3638d2011-03-23 16:42:28 -07003512 /*
3513 * The page is isolated from LRU. So, collapse function
3514 * will not handle this page. But page splitting can happen.
3515 * Do this check under compound_page_lock(). The caller should
3516 * hold it.
3517 */
3518 ret = -EBUSY;
Johannes Weiner7ec99d62011-03-23 16:42:36 -07003519 if (nr_pages > 1 && !PageTransHuge(page))
Johannes Weinerde3638d2011-03-23 16:42:28 -07003520 goto out;
3521
3522 lock_page_cgroup(pc);
3523
3524 ret = -EINVAL;
3525 if (!PageCgroupUsed(pc) || pc->mem_cgroup != from)
3526 goto unlock;
3527
KAMEZAWA Hiroyuki312734c02012-03-21 16:34:24 -07003528 move_lock_mem_cgroup(from, &flags);
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08003529
Johannes Weiner59d1d252014-04-07 15:37:40 -07003530 if (!anon && page_mapped(page)) {
3531 __this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_FILE_MAPPED],
3532 nr_pages);
3533 __this_cpu_add(to->stat->count[MEM_CGROUP_STAT_FILE_MAPPED],
3534 nr_pages);
3535 }
Sha Zhengju3ea67d02013-09-12 15:13:53 -07003536
Johannes Weiner59d1d252014-04-07 15:37:40 -07003537 if (PageWriteback(page)) {
3538 __this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_WRITEBACK],
3539 nr_pages);
3540 __this_cpu_add(to->stat->count[MEM_CGROUP_STAT_WRITEBACK],
3541 nr_pages);
3542 }
Sha Zhengju3ea67d02013-09-12 15:13:53 -07003543
David Rientjesb070e652013-05-07 16:18:09 -07003544 mem_cgroup_charge_statistics(from, page, anon, -nr_pages);
Balbir Singhd69b0422009-06-17 16:26:34 -07003545
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08003546 /* caller should have done css_get */
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -08003547 pc->mem_cgroup = to;
David Rientjesb070e652013-05-07 16:18:09 -07003548 mem_cgroup_charge_statistics(to, page, anon, nr_pages);
KAMEZAWA Hiroyuki312734c02012-03-21 16:34:24 -07003549 move_unlock_mem_cgroup(from, &flags);
Johannes Weinerde3638d2011-03-23 16:42:28 -07003550 ret = 0;
3551unlock:
Daisuke Nishimura57f9fd7d2009-12-15 16:47:11 -08003552 unlock_page_cgroup(pc);
KAMEZAWA Hiroyukid2265e62010-03-10 15:22:31 -08003553 /*
3554 * check events
3555 */
Johannes Weiner5564e882011-03-23 16:42:29 -07003556 memcg_check_events(to, page);
3557 memcg_check_events(from, page);
Johannes Weinerde3638d2011-03-23 16:42:28 -07003558out:
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08003559 return ret;
3560}
3561
Michal Hocko2ef37d32012-10-26 13:37:30 +02003562/**
3563 * mem_cgroup_move_parent - moves page to the parent group
3564 * @page: the page to move
3565 * @pc: page_cgroup of the page
3566 * @child: page's cgroup
3567 *
3568 * move charges to its parent or the root cgroup if the group has no
3569 * parent (aka use_hierarchy==0).
3570 * Although this might fail (get_page_unless_zero, isolate_lru_page or
3571 * mem_cgroup_move_account fails) the failure is always temporary and
3572 * it signals a race with a page removal/uncharge or migration. In the
3573 * first case the page is on the way out and it will vanish from the LRU
3574 * on the next attempt and the call should be retried later.
3575 * Isolation from the LRU fails only if page has been isolated from
3576 * the LRU since we looked at it and that usually means either global
3577 * reclaim or migration going on. The page will either get back to the
3578 * LRU or vanish.
3579 * Finaly mem_cgroup_move_account fails only if the page got uncharged
3580 * (!PageCgroupUsed) or moved to a different group. The page will
3581 * disappear in the next attempt.
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08003582 */
Johannes Weiner5564e882011-03-23 16:42:29 -07003583static int mem_cgroup_move_parent(struct page *page,
3584 struct page_cgroup *pc,
KAMEZAWA Hiroyuki6068bf02012-07-31 16:42:45 -07003585 struct mem_cgroup *child)
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08003586{
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08003587 struct mem_cgroup *parent;
Johannes Weiner7ec99d62011-03-23 16:42:36 -07003588 unsigned int nr_pages;
Andrew Morton4be44892011-03-23 16:42:39 -07003589 unsigned long uninitialized_var(flags);
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08003590 int ret;
3591
Michal Hockod8423012012-10-26 13:37:29 +02003592 VM_BUG_ON(mem_cgroup_is_root(child));
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08003593
Daisuke Nishimura57f9fd7d2009-12-15 16:47:11 -08003594 ret = -EBUSY;
3595 if (!get_page_unless_zero(page))
3596 goto out;
3597 if (isolate_lru_page(page))
3598 goto put;
KAMEZAWA Hiroyuki52dbb902011-01-25 15:07:29 -08003599
Johannes Weiner7ec99d62011-03-23 16:42:36 -07003600 nr_pages = hpage_nr_pages(page);
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -08003601
KAMEZAWA Hiroyukicc926f72012-05-29 15:07:04 -07003602 parent = parent_mem_cgroup(child);
3603 /*
3604 * If no parent, move charges to root cgroup.
3605 */
3606 if (!parent)
3607 parent = root_mem_cgroup;
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -08003608
Michal Hocko2ef37d32012-10-26 13:37:30 +02003609 if (nr_pages > 1) {
Sasha Levin309381fea2014-01-23 15:52:54 -08003610 VM_BUG_ON_PAGE(!PageTransHuge(page), page);
KAMEZAWA Hiroyuki987eba62011-01-20 14:44:25 -08003611 flags = compound_lock_irqsave(page);
Michal Hocko2ef37d32012-10-26 13:37:30 +02003612 }
KAMEZAWA Hiroyuki987eba62011-01-20 14:44:25 -08003613
KAMEZAWA Hiroyukicc926f72012-05-29 15:07:04 -07003614 ret = mem_cgroup_move_account(page, nr_pages,
KAMEZAWA Hiroyuki2f3479b2012-05-29 15:07:04 -07003615 pc, child, parent);
KAMEZAWA Hiroyukicc926f72012-05-29 15:07:04 -07003616 if (!ret)
3617 __mem_cgroup_cancel_local_charge(child, nr_pages);
Jesper Juhl8dba4742011-01-25 15:07:24 -08003618
Johannes Weiner7ec99d62011-03-23 16:42:36 -07003619 if (nr_pages > 1)
KAMEZAWA Hiroyuki987eba62011-01-20 14:44:25 -08003620 compound_unlock_irqrestore(page, flags);
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -08003621 putback_lru_page(page);
Daisuke Nishimura57f9fd7d2009-12-15 16:47:11 -08003622put:
Daisuke Nishimura40d58132009-01-15 13:51:12 -08003623 put_page(page);
Daisuke Nishimura57f9fd7d2009-12-15 16:47:11 -08003624out:
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08003625 return ret;
3626}
3627
Michal Hockod715ae02014-04-07 15:37:46 -07003628int mem_cgroup_charge_anon(struct page *page,
Johannes Weiner1bec6b32014-04-07 15:37:41 -07003629 struct mm_struct *mm, gfp_t gfp_mask)
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -08003630{
Johannes Weiner7ec99d62011-03-23 16:42:36 -07003631 unsigned int nr_pages = 1;
Johannes Weiner6d1fdc42014-04-07 15:37:45 -07003632 struct mem_cgroup *memcg;
Andrea Arcangeliec168512011-01-13 15:46:56 -08003633
Johannes Weiner1bec6b32014-04-07 15:37:41 -07003634 if (mem_cgroup_disabled())
3635 return 0;
3636
3637 VM_BUG_ON_PAGE(page_mapped(page), page);
3638 VM_BUG_ON_PAGE(page->mapping && !PageAnon(page), page);
3639 VM_BUG_ON(!mm);
3640
Andrea Arcangeli37c2ac72011-01-13 15:47:16 -08003641 if (PageTransHuge(page)) {
Johannes Weiner7ec99d62011-03-23 16:42:36 -07003642 nr_pages <<= compound_order(page);
Sasha Levin309381fea2014-01-23 15:52:54 -08003643 VM_BUG_ON_PAGE(!PageTransHuge(page), page);
Andrea Arcangeli37c2ac72011-01-13 15:47:16 -08003644 }
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -08003645
Michal Hocko0029e192014-08-06 16:05:53 -07003646 memcg = mem_cgroup_try_charge_mm(mm, gfp_mask, nr_pages);
Johannes Weiner6d1fdc42014-04-07 15:37:45 -07003647 if (!memcg)
3648 return -ENOMEM;
Johannes Weiner1bec6b32014-04-07 15:37:41 -07003649 __mem_cgroup_commit_charge(memcg, page, nr_pages,
3650 MEM_CGROUP_CHARGE_TYPE_ANON, false);
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -08003651 return 0;
3652}
3653
KAMEZAWA Hiroyuki54595fe2009-01-07 18:08:33 -08003654/*
3655 * While swap-in, try_charge -> commit or cancel, the page is locked.
3656 * And when try_charge() successfully returns, one refcnt to memcg without
Uwe Kleine-König21ae2952009-10-07 15:21:09 +02003657 * struct page_cgroup is acquired. This refcnt will be consumed by
KAMEZAWA Hiroyuki54595fe2009-01-07 18:08:33 -08003658 * "commit()" or removed by "cancel()"
3659 */
Johannes Weiner0435a2f2012-07-31 16:45:43 -07003660static int __mem_cgroup_try_charge_swapin(struct mm_struct *mm,
3661 struct page *page,
3662 gfp_t mask,
3663 struct mem_cgroup **memcgp)
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08003664{
Johannes Weiner6d1fdc42014-04-07 15:37:45 -07003665 struct mem_cgroup *memcg = NULL;
Johannes Weiner90deb782012-07-31 16:45:47 -07003666 struct page_cgroup *pc;
KAMEZAWA Hiroyuki54595fe2009-01-07 18:08:33 -08003667 int ret;
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08003668
Johannes Weiner90deb782012-07-31 16:45:47 -07003669 pc = lookup_page_cgroup(page);
3670 /*
3671 * Every swap fault against a single page tries to charge the
3672 * page, bail as early as possible. shmem_unuse() encounters
3673 * already charged pages, too. The USED bit is protected by
3674 * the page lock, which serializes swap cache removal, which
3675 * in turn serializes uncharging.
3676 */
3677 if (PageCgroupUsed(pc))
Johannes Weiner6d1fdc42014-04-07 15:37:45 -07003678 goto out;
3679 if (do_swap_account)
3680 memcg = try_get_mem_cgroup_from_page(page);
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07003681 if (!memcg)
Johannes Weiner6d1fdc42014-04-07 15:37:45 -07003682 memcg = get_mem_cgroup_from_mm(mm);
Michal Hocko0029e192014-08-06 16:05:53 -07003683 ret = mem_cgroup_try_charge(memcg, mask, 1);
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07003684 css_put(&memcg->css);
KAMEZAWA Hiroyuki38c5d722012-01-12 17:19:01 -08003685 if (ret == -EINTR)
Johannes Weiner6d1fdc42014-04-07 15:37:45 -07003686 memcg = root_mem_cgroup;
3687 else if (ret)
3688 return ret;
3689out:
3690 *memcgp = memcg;
3691 return 0;
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08003692}
3693
Johannes Weiner0435a2f2012-07-31 16:45:43 -07003694int mem_cgroup_try_charge_swapin(struct mm_struct *mm, struct page *page,
3695 gfp_t gfp_mask, struct mem_cgroup **memcgp)
3696{
Johannes Weiner6d1fdc42014-04-07 15:37:45 -07003697 if (mem_cgroup_disabled()) {
3698 *memcgp = NULL;
Johannes Weiner0435a2f2012-07-31 16:45:43 -07003699 return 0;
Johannes Weiner6d1fdc42014-04-07 15:37:45 -07003700 }
Johannes Weinerbdf4f4d2012-07-31 16:45:50 -07003701 /*
3702 * A racing thread's fault, or swapoff, may have already
3703 * updated the pte, and even removed page from swap cache: in
3704 * those cases unuse_pte()'s pte_same() test will fail; but
3705 * there's also a KSM case which does need to charge the page.
3706 */
3707 if (!PageSwapCache(page)) {
Johannes Weiner6d1fdc42014-04-07 15:37:45 -07003708 struct mem_cgroup *memcg;
Johannes Weinerbdf4f4d2012-07-31 16:45:50 -07003709
Michal Hocko0029e192014-08-06 16:05:53 -07003710 memcg = mem_cgroup_try_charge_mm(mm, gfp_mask, 1);
Johannes Weiner6d1fdc42014-04-07 15:37:45 -07003711 if (!memcg)
3712 return -ENOMEM;
3713 *memcgp = memcg;
3714 return 0;
Johannes Weinerbdf4f4d2012-07-31 16:45:50 -07003715 }
Johannes Weiner0435a2f2012-07-31 16:45:43 -07003716 return __mem_cgroup_try_charge_swapin(mm, page, gfp_mask, memcgp);
3717}
3718
Johannes Weiner827a03d2012-07-31 16:45:36 -07003719void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *memcg)
3720{
3721 if (mem_cgroup_disabled())
3722 return;
3723 if (!memcg)
3724 return;
3725 __mem_cgroup_cancel_charge(memcg, 1);
3726}
3727
Daisuke Nishimura83aae4c72009-04-02 16:57:48 -07003728static void
Johannes Weiner72835c82012-01-12 17:18:32 -08003729__mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *memcg,
Daisuke Nishimura83aae4c72009-04-02 16:57:48 -07003730 enum charge_type ctype)
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -08003731{
Hirokazu Takahashif8d665422009-01-07 18:08:02 -08003732 if (mem_cgroup_disabled())
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -08003733 return;
Johannes Weiner72835c82012-01-12 17:18:32 -08003734 if (!memcg)
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -08003735 return;
KAMEZAWA Hiroyuki5a6475a2011-03-23 16:42:42 -07003736
Johannes Weinerce587e62012-04-24 20:22:33 +02003737 __mem_cgroup_commit_charge(memcg, page, 1, ctype, true);
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08003738 /*
3739 * Now swap is on-memory. This means this page may be
3740 * counted both as mem and swap....double count.
KAMEZAWA Hiroyuki03f3c432009-01-07 18:08:31 -08003741 * Fix it by uncharging from memsw. Basically, this SwapCache is stable
3742 * under lock_page(). But in do_swap_page()::memory.c, reuse_swap_page()
3743 * may call delete_from_swap_cache() before reach here.
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08003744 */
KAMEZAWA Hiroyuki03f3c432009-01-07 18:08:31 -08003745 if (do_swap_account && PageSwapCache(page)) {
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08003746 swp_entry_t ent = {.val = page_private(page)};
Hugh Dickins86493002012-05-29 15:06:52 -07003747 mem_cgroup_uncharge_swap(ent);
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08003748 }
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -08003749}
3750
Johannes Weiner72835c82012-01-12 17:18:32 -08003751void mem_cgroup_commit_charge_swapin(struct page *page,
3752 struct mem_cgroup *memcg)
Daisuke Nishimura83aae4c72009-04-02 16:57:48 -07003753{
Johannes Weiner72835c82012-01-12 17:18:32 -08003754 __mem_cgroup_commit_charge_swapin(page, memcg,
Kamezawa Hiroyuki41326c12012-07-31 16:41:40 -07003755 MEM_CGROUP_CHARGE_TYPE_ANON);
Daisuke Nishimura83aae4c72009-04-02 16:57:48 -07003756}
3757
Michal Hockod715ae02014-04-07 15:37:46 -07003758int mem_cgroup_charge_file(struct page *page, struct mm_struct *mm,
Johannes Weiner827a03d2012-07-31 16:45:36 -07003759 gfp_t gfp_mask)
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -08003760{
Johannes Weiner827a03d2012-07-31 16:45:36 -07003761 enum charge_type type = MEM_CGROUP_CHARGE_TYPE_CACHE;
Johannes Weiner6d1fdc42014-04-07 15:37:45 -07003762 struct mem_cgroup *memcg;
Johannes Weiner827a03d2012-07-31 16:45:36 -07003763 int ret;
3764
Hirokazu Takahashif8d665422009-01-07 18:08:02 -08003765 if (mem_cgroup_disabled())
Johannes Weiner827a03d2012-07-31 16:45:36 -07003766 return 0;
3767 if (PageCompound(page))
3768 return 0;
3769
Johannes Weiner6d1fdc42014-04-07 15:37:45 -07003770 if (PageSwapCache(page)) { /* shmem */
Johannes Weiner0435a2f2012-07-31 16:45:43 -07003771 ret = __mem_cgroup_try_charge_swapin(mm, page,
3772 gfp_mask, &memcg);
Johannes Weiner6d1fdc42014-04-07 15:37:45 -07003773 if (ret)
3774 return ret;
3775 __mem_cgroup_commit_charge_swapin(page, memcg, type);
3776 return 0;
Johannes Weiner827a03d2012-07-31 16:45:36 -07003777 }
Johannes Weiner6d1fdc42014-04-07 15:37:45 -07003778
Michal Hocko0029e192014-08-06 16:05:53 -07003779 memcg = mem_cgroup_try_charge_mm(mm, gfp_mask, 1);
Michal Hocko6f6acb02014-05-22 11:54:19 -07003780 if (!memcg)
3781 return -ENOMEM;
Johannes Weiner6d1fdc42014-04-07 15:37:45 -07003782 __mem_cgroup_commit_charge(memcg, page, 1, type, false);
3783 return 0;
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -08003784}
3785
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07003786static void mem_cgroup_do_uncharge(struct mem_cgroup *memcg,
Johannes Weiner7ec99d62011-03-23 16:42:36 -07003787 unsigned int nr_pages,
3788 const enum charge_type ctype)
KAMEZAWA Hiroyuki569b8462009-12-15 16:47:03 -08003789{
3790 struct memcg_batch_info *batch = NULL;
3791 bool uncharge_memsw = true;
Johannes Weiner7ec99d62011-03-23 16:42:36 -07003792
KAMEZAWA Hiroyuki569b8462009-12-15 16:47:03 -08003793 /* If swapout, usage of swap doesn't decrease */
3794 if (!do_swap_account || ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT)
3795 uncharge_memsw = false;
KAMEZAWA Hiroyuki569b8462009-12-15 16:47:03 -08003796
3797 batch = &current->memcg_batch;
3798 /*
3799 * In usual, we do css_get() when we remember memcg pointer.
3800 * But in this case, we keep res->usage until end of a series of
3801 * uncharges. Then, it's ok to ignore memcg's refcnt.
3802 */
3803 if (!batch->memcg)
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07003804 batch->memcg = memcg;
KAMEZAWA Hiroyuki569b8462009-12-15 16:47:03 -08003805 /*
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07003806 * do_batch > 0 when unmapping pages or inode invalidate/truncate.
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003807 * In those cases, all pages freed continuously can be expected to be in
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07003808 * the same cgroup and we have chance to coalesce uncharges.
3809 * But we do uncharge one by one if this is killed by OOM(TIF_MEMDIE)
3810 * because we want to do uncharge as soon as possible.
3811 */
3812
3813 if (!batch->do_batch || test_thread_flag(TIF_MEMDIE))
3814 goto direct_uncharge;
3815
Johannes Weiner7ec99d62011-03-23 16:42:36 -07003816 if (nr_pages > 1)
Andrea Arcangeliec168512011-01-13 15:46:56 -08003817 goto direct_uncharge;
3818
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07003819 /*
KAMEZAWA Hiroyuki569b8462009-12-15 16:47:03 -08003820 * In typical case, batch->memcg == mem. This means we can
3821 * merge a series of uncharges to an uncharge of res_counter.
3822 * If not, we uncharge res_counter ony by one.
3823 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07003824 if (batch->memcg != memcg)
KAMEZAWA Hiroyuki569b8462009-12-15 16:47:03 -08003825 goto direct_uncharge;
3826 /* remember freed charge and uncharge it later */
Johannes Weiner7ffd4ca2011-03-23 16:42:35 -07003827 batch->nr_pages++;
KAMEZAWA Hiroyuki569b8462009-12-15 16:47:03 -08003828 if (uncharge_memsw)
Johannes Weiner7ffd4ca2011-03-23 16:42:35 -07003829 batch->memsw_nr_pages++;
KAMEZAWA Hiroyuki569b8462009-12-15 16:47:03 -08003830 return;
3831direct_uncharge:
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07003832 res_counter_uncharge(&memcg->res, nr_pages * PAGE_SIZE);
KAMEZAWA Hiroyuki569b8462009-12-15 16:47:03 -08003833 if (uncharge_memsw)
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07003834 res_counter_uncharge(&memcg->memsw, nr_pages * PAGE_SIZE);
3835 if (unlikely(batch->memcg != memcg))
3836 memcg_oom_recover(memcg);
KAMEZAWA Hiroyuki569b8462009-12-15 16:47:03 -08003837}
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -08003838
Balbir Singh8697d332008-02-07 00:13:59 -08003839/*
KAMEZAWA Hiroyuki69029cd2008-07-25 01:47:14 -07003840 * uncharge if !page_mapped(page)
Balbir Singh8a9f3cc2008-02-07 00:13:53 -08003841 */
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08003842static struct mem_cgroup *
Johannes Weiner0030f532012-07-31 16:45:25 -07003843__mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype,
3844 bool end_migration)
Balbir Singh8a9f3cc2008-02-07 00:13:53 -08003845{
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07003846 struct mem_cgroup *memcg = NULL;
Johannes Weiner7ec99d62011-03-23 16:42:36 -07003847 unsigned int nr_pages = 1;
3848 struct page_cgroup *pc;
KAMEZAWA Hiroyukib24028572012-03-21 16:34:22 -07003849 bool anon;
Balbir Singh8a9f3cc2008-02-07 00:13:53 -08003850
Hirokazu Takahashif8d665422009-01-07 18:08:02 -08003851 if (mem_cgroup_disabled())
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08003852 return NULL;
Balbir Singh40779602008-04-04 14:29:59 -07003853
Andrea Arcangeli37c2ac72011-01-13 15:47:16 -08003854 if (PageTransHuge(page)) {
Johannes Weiner7ec99d62011-03-23 16:42:36 -07003855 nr_pages <<= compound_order(page);
Sasha Levin309381fea2014-01-23 15:52:54 -08003856 VM_BUG_ON_PAGE(!PageTransHuge(page), page);
Andrea Arcangeli37c2ac72011-01-13 15:47:16 -08003857 }
Balbir Singh8697d332008-02-07 00:13:59 -08003858 /*
Balbir Singh3c541e12008-02-07 00:14:41 -08003859 * Check if our page_cgroup is valid
Balbir Singh8697d332008-02-07 00:13:59 -08003860 */
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -07003861 pc = lookup_page_cgroup(page);
Johannes Weinercfa44942012-01-12 17:18:38 -08003862 if (unlikely(!PageCgroupUsed(pc)))
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08003863 return NULL;
Balbir Singh8a9f3cc2008-02-07 00:13:53 -08003864
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -07003865 lock_page_cgroup(pc);
KAMEZAWA Hiroyukid13d1442009-01-07 18:07:56 -08003866
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07003867 memcg = pc->mem_cgroup;
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08003868
KAMEZAWA Hiroyukid13d1442009-01-07 18:07:56 -08003869 if (!PageCgroupUsed(pc))
3870 goto unlock_out;
3871
KAMEZAWA Hiroyukib24028572012-03-21 16:34:22 -07003872 anon = PageAnon(page);
3873
KAMEZAWA Hiroyukid13d1442009-01-07 18:07:56 -08003874 switch (ctype) {
Kamezawa Hiroyuki41326c12012-07-31 16:41:40 -07003875 case MEM_CGROUP_CHARGE_TYPE_ANON:
KAMEZAWA Hiroyuki2ff76f12012-03-21 16:34:25 -07003876 /*
3877 * Generally PageAnon tells if it's the anon statistics to be
3878 * updated; but sometimes e.g. mem_cgroup_uncharge_page() is
3879 * used before page reached the stage of being marked PageAnon.
3880 */
KAMEZAWA Hiroyukib24028572012-03-21 16:34:22 -07003881 anon = true;
3882 /* fallthrough */
KAMEZAWA Hiroyuki8a9478c2009-06-17 16:27:17 -07003883 case MEM_CGROUP_CHARGE_TYPE_DROP:
akpm@linux-foundation.orgac39cf82010-05-26 14:42:46 -07003884 /* See mem_cgroup_prepare_migration() */
Johannes Weiner0030f532012-07-31 16:45:25 -07003885 if (page_mapped(page))
3886 goto unlock_out;
3887 /*
3888 * Pages under migration may not be uncharged. But
3889 * end_migration() /must/ be the one uncharging the
3890 * unused post-migration page and so it has to call
3891 * here with the migration bit still set. See the
3892 * res_counter handling below.
3893 */
3894 if (!end_migration && PageCgroupMigration(pc))
KAMEZAWA Hiroyukid13d1442009-01-07 18:07:56 -08003895 goto unlock_out;
3896 break;
3897 case MEM_CGROUP_CHARGE_TYPE_SWAPOUT:
3898 if (!PageAnon(page)) { /* Shared memory */
3899 if (page->mapping && !page_is_file_cache(page))
3900 goto unlock_out;
3901 } else if (page_mapped(page)) /* Anon */
3902 goto unlock_out;
3903 break;
3904 default:
3905 break;
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -07003906 }
KAMEZAWA Hiroyukid13d1442009-01-07 18:07:56 -08003907
David Rientjesb070e652013-05-07 16:18:09 -07003908 mem_cgroup_charge_statistics(memcg, page, anon, -nr_pages);
KAMEZAWA Hiroyuki04046e12009-04-02 16:57:33 -07003909
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -07003910 ClearPageCgroupUsed(pc);
KAMEZAWA Hiroyuki544122e2009-01-07 18:08:34 -08003911 /*
3912 * pc->mem_cgroup is not cleared here. It will be accessed when it's
3913 * freed from LRU. This is safe because uncharged page is expected not
3914 * to be reused (freed soon). Exception is SwapCache, it's handled by
3915 * special functions.
3916 */
Hugh Dickinsb9c565d2008-03-04 14:29:11 -08003917
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -07003918 unlock_page_cgroup(pc);
KAMEZAWA Hiroyukif75ca962010-08-10 18:03:02 -07003919 /*
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07003920 * even after unlock, we have memcg->res.usage here and this memcg
Li Zefan40503772013-07-08 16:00:34 -07003921 * will never be freed, so it's safe to call css_get().
KAMEZAWA Hiroyukif75ca962010-08-10 18:03:02 -07003922 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07003923 memcg_check_events(memcg, page);
KAMEZAWA Hiroyukif75ca962010-08-10 18:03:02 -07003924 if (do_swap_account && ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT) {
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07003925 mem_cgroup_swap_statistics(memcg, true);
Li Zefan40503772013-07-08 16:00:34 -07003926 css_get(&memcg->css);
KAMEZAWA Hiroyukif75ca962010-08-10 18:03:02 -07003927 }
Johannes Weiner0030f532012-07-31 16:45:25 -07003928 /*
3929 * Migration does not charge the res_counter for the
3930 * replacement page, so leave it alone when phasing out the
3931 * page that is unused after the migration.
3932 */
Johannes Weiner05b84302014-08-06 16:05:59 -07003933 if (!end_migration)
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07003934 mem_cgroup_do_uncharge(memcg, nr_pages, ctype);
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -08003935
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07003936 return memcg;
KAMEZAWA Hiroyukid13d1442009-01-07 18:07:56 -08003937
3938unlock_out:
3939 unlock_page_cgroup(pc);
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08003940 return NULL;
Balbir Singh3c541e12008-02-07 00:14:41 -08003941}
3942
KAMEZAWA Hiroyuki69029cd2008-07-25 01:47:14 -07003943void mem_cgroup_uncharge_page(struct page *page)
3944{
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -07003945 /* early check. */
3946 if (page_mapped(page))
3947 return;
Sasha Levin309381fea2014-01-23 15:52:54 -08003948 VM_BUG_ON_PAGE(page->mapping && !PageAnon(page), page);
Johannes Weiner28ccddf2013-05-24 15:55:15 -07003949 /*
3950 * If the page is in swap cache, uncharge should be deferred
3951 * to the swap path, which also properly accounts swap usage
3952 * and handles memcg lifetime.
3953 *
3954 * Note that this check is not stable and reclaim may add the
3955 * page to swap cache at any time after this. However, if the
3956 * page is not in swap cache by the time page->mapcount hits
3957 * 0, there won't be any page table references to the swap
3958 * slot, and reclaim will free it and not actually write the
3959 * page to disk.
3960 */
Johannes Weiner0c59b892012-07-31 16:45:31 -07003961 if (PageSwapCache(page))
3962 return;
Johannes Weiner0030f532012-07-31 16:45:25 -07003963 __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_ANON, false);
KAMEZAWA Hiroyuki69029cd2008-07-25 01:47:14 -07003964}
3965
3966void mem_cgroup_uncharge_cache_page(struct page *page)
3967{
Sasha Levin309381fea2014-01-23 15:52:54 -08003968 VM_BUG_ON_PAGE(page_mapped(page), page);
3969 VM_BUG_ON_PAGE(page->mapping, page);
Johannes Weiner0030f532012-07-31 16:45:25 -07003970 __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE, false);
KAMEZAWA Hiroyuki69029cd2008-07-25 01:47:14 -07003971}
3972
KAMEZAWA Hiroyuki569b8462009-12-15 16:47:03 -08003973/*
3974 * Batch_start/batch_end is called in unmap_page_range/invlidate/trucate.
3975 * In that cases, pages are freed continuously and we can expect pages
3976 * are in the same memcg. All these calls itself limits the number of
3977 * pages freed at once, then uncharge_start/end() is called properly.
3978 * This may be called prural(2) times in a context,
3979 */
3980
3981void mem_cgroup_uncharge_start(void)
3982{
3983 current->memcg_batch.do_batch++;
3984 /* We can do nest. */
3985 if (current->memcg_batch.do_batch == 1) {
3986 current->memcg_batch.memcg = NULL;
Johannes Weiner7ffd4ca2011-03-23 16:42:35 -07003987 current->memcg_batch.nr_pages = 0;
3988 current->memcg_batch.memsw_nr_pages = 0;
KAMEZAWA Hiroyuki569b8462009-12-15 16:47:03 -08003989 }
3990}
3991
3992void mem_cgroup_uncharge_end(void)
3993{
3994 struct memcg_batch_info *batch = &current->memcg_batch;
3995
3996 if (!batch->do_batch)
3997 return;
3998
3999 batch->do_batch--;
4000 if (batch->do_batch) /* If stacked, do nothing. */
4001 return;
4002
4003 if (!batch->memcg)
4004 return;
4005 /*
4006 * This "batch->memcg" is valid without any css_get/put etc...
4007 * bacause we hide charges behind us.
4008 */
Johannes Weiner7ffd4ca2011-03-23 16:42:35 -07004009 if (batch->nr_pages)
4010 res_counter_uncharge(&batch->memcg->res,
4011 batch->nr_pages * PAGE_SIZE);
4012 if (batch->memsw_nr_pages)
4013 res_counter_uncharge(&batch->memcg->memsw,
4014 batch->memsw_nr_pages * PAGE_SIZE);
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07004015 memcg_oom_recover(batch->memcg);
KAMEZAWA Hiroyuki569b8462009-12-15 16:47:03 -08004016 /* forget this pointer (for sanity check) */
4017 batch->memcg = NULL;
4018}
4019
Daisuke Nishimurae767e052009-05-28 14:34:28 -07004020#ifdef CONFIG_SWAP
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08004021/*
Daisuke Nishimurae767e052009-05-28 14:34:28 -07004022 * called after __delete_from_swap_cache() and drop "page" account.
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08004023 * memcg information is recorded to swap_cgroup of "ent"
4024 */
KAMEZAWA Hiroyuki8a9478c2009-06-17 16:27:17 -07004025void
4026mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout)
KAMEZAWA Hiroyukid13d1442009-01-07 18:07:56 -08004027{
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08004028 struct mem_cgroup *memcg;
KAMEZAWA Hiroyuki8a9478c2009-06-17 16:27:17 -07004029 int ctype = MEM_CGROUP_CHARGE_TYPE_SWAPOUT;
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08004030
KAMEZAWA Hiroyuki8a9478c2009-06-17 16:27:17 -07004031 if (!swapout) /* this was a swap cache but the swap is unused ! */
4032 ctype = MEM_CGROUP_CHARGE_TYPE_DROP;
4033
Johannes Weiner0030f532012-07-31 16:45:25 -07004034 memcg = __mem_cgroup_uncharge_common(page, ctype, false);
KAMEZAWA Hiroyuki8a9478c2009-06-17 16:27:17 -07004035
KAMEZAWA Hiroyukif75ca962010-08-10 18:03:02 -07004036 /*
4037 * record memcg information, if swapout && memcg != NULL,
Li Zefan40503772013-07-08 16:00:34 -07004038 * css_get() was called in uncharge().
KAMEZAWA Hiroyukif75ca962010-08-10 18:03:02 -07004039 */
4040 if (do_swap_account && swapout && memcg)
Li Zefan34c00c32013-09-23 16:56:01 +08004041 swap_cgroup_record(ent, mem_cgroup_id(memcg));
KAMEZAWA Hiroyukid13d1442009-01-07 18:07:56 -08004042}
Daisuke Nishimurae767e052009-05-28 14:34:28 -07004043#endif
KAMEZAWA Hiroyukid13d1442009-01-07 18:07:56 -08004044
Andrew Mortonc255a452012-07-31 16:43:02 -07004045#ifdef CONFIG_MEMCG_SWAP
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08004046/*
4047 * called from swap_entry_free(). remove record in swap_cgroup and
4048 * uncharge "memsw" account.
4049 */
4050void mem_cgroup_uncharge_swap(swp_entry_t ent)
4051{
4052 struct mem_cgroup *memcg;
KAMEZAWA Hiroyukia3b2d692009-04-02 16:57:45 -07004053 unsigned short id;
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08004054
4055 if (!do_swap_account)
4056 return;
4057
KAMEZAWA Hiroyukia3b2d692009-04-02 16:57:45 -07004058 id = swap_cgroup_record(ent, 0);
4059 rcu_read_lock();
4060 memcg = mem_cgroup_lookup(id);
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08004061 if (memcg) {
KAMEZAWA Hiroyukia3b2d692009-04-02 16:57:45 -07004062 /*
Tejun Heoec903c02014-05-13 12:11:01 -04004063 * We uncharge this because swap is freed. This memcg can
4064 * be obsolete one. We avoid calling css_tryget_online().
KAMEZAWA Hiroyukia3b2d692009-04-02 16:57:45 -07004065 */
Johannes Weiner05b84302014-08-06 16:05:59 -07004066 res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
Balbir Singh0c3e73e2009-09-23 15:56:42 -07004067 mem_cgroup_swap_statistics(memcg, false);
Li Zefan40503772013-07-08 16:00:34 -07004068 css_put(&memcg->css);
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08004069 }
KAMEZAWA Hiroyukia3b2d692009-04-02 16:57:45 -07004070 rcu_read_unlock();
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08004071}
Daisuke Nishimura02491442010-03-10 15:22:17 -08004072
4073/**
4074 * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record.
4075 * @entry: swap entry to be moved
4076 * @from: mem_cgroup which the entry is moved from
4077 * @to: mem_cgroup which the entry is moved to
4078 *
4079 * It succeeds only when the swap_cgroup's record for this entry is the same
4080 * as the mem_cgroup's id of @from.
4081 *
4082 * Returns 0 on success, -EINVAL on failure.
4083 *
4084 * The caller must have charged to @to, IOW, called res_counter_charge() about
4085 * both res and memsw, and called css_get().
4086 */
4087static int mem_cgroup_move_swap_account(swp_entry_t entry,
Hugh Dickinse91cbb42012-05-29 15:06:51 -07004088 struct mem_cgroup *from, struct mem_cgroup *to)
Daisuke Nishimura02491442010-03-10 15:22:17 -08004089{
4090 unsigned short old_id, new_id;
4091
Li Zefan34c00c32013-09-23 16:56:01 +08004092 old_id = mem_cgroup_id(from);
4093 new_id = mem_cgroup_id(to);
Daisuke Nishimura02491442010-03-10 15:22:17 -08004094
4095 if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
Daisuke Nishimura02491442010-03-10 15:22:17 -08004096 mem_cgroup_swap_statistics(from, false);
Daisuke Nishimura02491442010-03-10 15:22:17 -08004097 mem_cgroup_swap_statistics(to, true);
Daisuke Nishimura483c30b2010-03-10 15:22:18 -08004098 /*
4099 * This function is only called from task migration context now.
4100 * It postpones res_counter and refcount handling till the end
4101 * of task migration(mem_cgroup_clear_mc()) for performance
Li Zefan40503772013-07-08 16:00:34 -07004102 * improvement. But we cannot postpone css_get(to) because if
4103 * the process that has been moved to @to does swap-in, the
4104 * refcount of @to might be decreased to 0.
4105 *
4106 * We are in attach() phase, so the cgroup is guaranteed to be
4107 * alive, so we can just call css_get().
Daisuke Nishimura483c30b2010-03-10 15:22:18 -08004108 */
Li Zefan40503772013-07-08 16:00:34 -07004109 css_get(&to->css);
Daisuke Nishimura02491442010-03-10 15:22:17 -08004110 return 0;
4111 }
4112 return -EINVAL;
4113}
4114#else
4115static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
Hugh Dickinse91cbb42012-05-29 15:06:51 -07004116 struct mem_cgroup *from, struct mem_cgroup *to)
Daisuke Nishimura02491442010-03-10 15:22:17 -08004117{
4118 return -EINVAL;
4119}
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08004120#endif
4121
KAMEZAWA Hiroyukiae41be32008-02-07 00:14:10 -08004122/*
KAMEZAWA Hiroyuki01b1ae62009-01-07 18:07:50 -08004123 * Before starting migration, account PAGE_SIZE to mem_cgroup that the old
4124 * page belongs to.
KAMEZAWA Hiroyukiae41be32008-02-07 00:14:10 -08004125 */
Johannes Weiner0030f532012-07-31 16:45:25 -07004126void mem_cgroup_prepare_migration(struct page *page, struct page *newpage,
4127 struct mem_cgroup **memcgp)
KAMEZAWA Hiroyukiae41be32008-02-07 00:14:10 -08004128{
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07004129 struct mem_cgroup *memcg = NULL;
Mel Gormanb32967f2012-11-19 12:35:47 +00004130 unsigned int nr_pages = 1;
Johannes Weiner7ec99d62011-03-23 16:42:36 -07004131 struct page_cgroup *pc;
akpm@linux-foundation.orgac39cf82010-05-26 14:42:46 -07004132 enum charge_type ctype;
Hugh Dickins8869b8f2008-03-04 14:29:09 -08004133
Johannes Weiner72835c82012-01-12 17:18:32 -08004134 *memcgp = NULL;
KAMEZAWA Hiroyuki56039ef2011-03-23 16:42:19 -07004135
Hirokazu Takahashif8d665422009-01-07 18:08:02 -08004136 if (mem_cgroup_disabled())
Johannes Weiner0030f532012-07-31 16:45:25 -07004137 return;
Balbir Singh40779602008-04-04 14:29:59 -07004138
Mel Gormanb32967f2012-11-19 12:35:47 +00004139 if (PageTransHuge(page))
4140 nr_pages <<= compound_order(page);
4141
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -07004142 pc = lookup_page_cgroup(page);
4143 lock_page_cgroup(pc);
4144 if (PageCgroupUsed(pc)) {
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07004145 memcg = pc->mem_cgroup;
4146 css_get(&memcg->css);
akpm@linux-foundation.orgac39cf82010-05-26 14:42:46 -07004147 /*
4148 * At migrating an anonymous page, its mapcount goes down
4149 * to 0 and uncharge() will be called. But, even if it's fully
4150 * unmapped, migration may fail and this page has to be
4151 * charged again. We set MIGRATION flag here and delay uncharge
4152 * until end_migration() is called
4153 *
4154 * Corner Case Thinking
4155 * A)
4156 * When the old page was mapped as Anon and it's unmap-and-freed
4157 * while migration was ongoing.
4158 * If unmap finds the old page, uncharge() of it will be delayed
4159 * until end_migration(). If unmap finds a new page, it's
4160 * uncharged when it make mapcount to be 1->0. If unmap code
4161 * finds swap_migration_entry, the new page will not be mapped
4162 * and end_migration() will find it(mapcount==0).
4163 *
4164 * B)
4165 * When the old page was mapped but migraion fails, the kernel
4166 * remaps it. A charge for it is kept by MIGRATION flag even
4167 * if mapcount goes down to 0. We can do remap successfully
4168 * without charging it again.
4169 *
4170 * C)
4171 * The "old" page is under lock_page() until the end of
4172 * migration, so, the old page itself will not be swapped-out.
4173 * If the new page is swapped out before end_migraton, our
4174 * hook to usual swap-out path will catch the event.
4175 */
4176 if (PageAnon(page))
4177 SetPageCgroupMigration(pc);
Hugh Dickinsb9c565d2008-03-04 14:29:11 -08004178 }
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -07004179 unlock_page_cgroup(pc);
akpm@linux-foundation.orgac39cf82010-05-26 14:42:46 -07004180 /*
4181 * If the page is not charged at this point,
4182 * we return here.
4183 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07004184 if (!memcg)
Johannes Weiner0030f532012-07-31 16:45:25 -07004185 return;
KAMEZAWA Hiroyuki01b1ae62009-01-07 18:07:50 -08004186
Johannes Weiner72835c82012-01-12 17:18:32 -08004187 *memcgp = memcg;
akpm@linux-foundation.orgac39cf82010-05-26 14:42:46 -07004188 /*
4189 * We charge new page before it's used/mapped. So, even if unlock_page()
4190 * is called before end_migration, we can catch all events on this new
4191 * page. In the case new page is migrated but not remapped, new page's
4192 * mapcount will be finally 0 and we call uncharge in end_migration().
4193 */
akpm@linux-foundation.orgac39cf82010-05-26 14:42:46 -07004194 if (PageAnon(page))
Kamezawa Hiroyuki41326c12012-07-31 16:41:40 -07004195 ctype = MEM_CGROUP_CHARGE_TYPE_ANON;
akpm@linux-foundation.orgac39cf82010-05-26 14:42:46 -07004196 else
Johannes Weiner62ba7442012-07-31 16:45:39 -07004197 ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
Johannes Weiner0030f532012-07-31 16:45:25 -07004198 /*
4199 * The page is committed to the memcg, but it's not actually
4200 * charged to the res_counter since we plan on replacing the
4201 * old one and only one page is going to be left afterwards.
4202 */
Mel Gormanb32967f2012-11-19 12:35:47 +00004203 __mem_cgroup_commit_charge(memcg, newpage, nr_pages, ctype, false);
KAMEZAWA Hiroyukie8589cc2008-07-25 01:47:10 -07004204}
Hugh Dickinsfb59e9f2008-03-04 14:29:16 -08004205
KAMEZAWA Hiroyuki69029cd2008-07-25 01:47:14 -07004206/* remove redundant charge if migration failed*/
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07004207void mem_cgroup_end_migration(struct mem_cgroup *memcg,
Daisuke Nishimura50de1dd2011-01-13 15:47:43 -08004208 struct page *oldpage, struct page *newpage, bool migration_ok)
KAMEZAWA Hiroyukie8589cc2008-07-25 01:47:10 -07004209{
akpm@linux-foundation.orgac39cf82010-05-26 14:42:46 -07004210 struct page *used, *unused;
KAMEZAWA Hiroyuki01b1ae62009-01-07 18:07:50 -08004211 struct page_cgroup *pc;
KAMEZAWA Hiroyukib24028572012-03-21 16:34:22 -07004212 bool anon;
KAMEZAWA Hiroyuki01b1ae62009-01-07 18:07:50 -08004213
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07004214 if (!memcg)
KAMEZAWA Hiroyuki01b1ae62009-01-07 18:07:50 -08004215 return;
Tejun Heob25ed602012-11-05 09:16:59 -08004216
Daisuke Nishimura50de1dd2011-01-13 15:47:43 -08004217 if (!migration_ok) {
akpm@linux-foundation.orgac39cf82010-05-26 14:42:46 -07004218 used = oldpage;
4219 unused = newpage;
KAMEZAWA Hiroyuki01b1ae62009-01-07 18:07:50 -08004220 } else {
akpm@linux-foundation.orgac39cf82010-05-26 14:42:46 -07004221 used = newpage;
KAMEZAWA Hiroyuki01b1ae62009-01-07 18:07:50 -08004222 unused = oldpage;
4223 }
Johannes Weiner0030f532012-07-31 16:45:25 -07004224 anon = PageAnon(used);
Johannes Weiner7d188952012-07-31 16:45:34 -07004225 __mem_cgroup_uncharge_common(unused,
4226 anon ? MEM_CGROUP_CHARGE_TYPE_ANON
4227 : MEM_CGROUP_CHARGE_TYPE_CACHE,
4228 true);
Johannes Weiner0030f532012-07-31 16:45:25 -07004229 css_put(&memcg->css);
KAMEZAWA Hiroyuki69029cd2008-07-25 01:47:14 -07004230 /*
akpm@linux-foundation.orgac39cf82010-05-26 14:42:46 -07004231 * We disallowed uncharge of pages under migration because mapcount
4232 * of the page goes down to zero, temporarly.
4233 * Clear the flag and check the page should be charged.
KAMEZAWA Hiroyuki69029cd2008-07-25 01:47:14 -07004234 */
akpm@linux-foundation.orgac39cf82010-05-26 14:42:46 -07004235 pc = lookup_page_cgroup(oldpage);
4236 lock_page_cgroup(pc);
4237 ClearPageCgroupMigration(pc);
4238 unlock_page_cgroup(pc);
akpm@linux-foundation.orgac39cf82010-05-26 14:42:46 -07004239
KAMEZAWA Hiroyuki01b1ae62009-01-07 18:07:50 -08004240 /*
akpm@linux-foundation.orgac39cf82010-05-26 14:42:46 -07004241 * If a page is a file cache, radix-tree replacement is very atomic
4242 * and we can skip this check. When it was an Anon page, its mapcount
4243 * goes down to 0. But because we added MIGRATION flage, it's not
4244 * uncharged yet. There are several case but page->mapcount check
4245 * and USED bit check in mem_cgroup_uncharge_page() will do enough
4246 * check. (see prepare_charge() also)
KAMEZAWA Hiroyuki01b1ae62009-01-07 18:07:50 -08004247 */
KAMEZAWA Hiroyukib24028572012-03-21 16:34:22 -07004248 if (anon)
akpm@linux-foundation.orgac39cf82010-05-26 14:42:46 -07004249 mem_cgroup_uncharge_page(used);
KAMEZAWA Hiroyukiae41be32008-02-07 00:14:10 -08004250}
Pavel Emelianov78fb7462008-02-07 00:13:51 -08004251
KAMEZAWA Hiroyukiab936cb2012-01-12 17:17:44 -08004252/*
4253 * At replace page cache, newpage is not under any memcg but it's on
4254 * LRU. So, this function doesn't touch res_counter but handles LRU
4255 * in correct way. Both pages are locked so we cannot race with uncharge.
4256 */
4257void mem_cgroup_replace_page_cache(struct page *oldpage,
4258 struct page *newpage)
4259{
Hugh Dickinsbde05d12012-05-29 15:06:38 -07004260 struct mem_cgroup *memcg = NULL;
KAMEZAWA Hiroyukiab936cb2012-01-12 17:17:44 -08004261 struct page_cgroup *pc;
KAMEZAWA Hiroyukiab936cb2012-01-12 17:17:44 -08004262 enum charge_type type = MEM_CGROUP_CHARGE_TYPE_CACHE;
KAMEZAWA Hiroyukiab936cb2012-01-12 17:17:44 -08004263
4264 if (mem_cgroup_disabled())
4265 return;
4266
4267 pc = lookup_page_cgroup(oldpage);
4268 /* fix accounting on old pages */
4269 lock_page_cgroup(pc);
Hugh Dickinsbde05d12012-05-29 15:06:38 -07004270 if (PageCgroupUsed(pc)) {
4271 memcg = pc->mem_cgroup;
David Rientjesb070e652013-05-07 16:18:09 -07004272 mem_cgroup_charge_statistics(memcg, oldpage, false, -1);
Hugh Dickinsbde05d12012-05-29 15:06:38 -07004273 ClearPageCgroupUsed(pc);
4274 }
KAMEZAWA Hiroyukiab936cb2012-01-12 17:17:44 -08004275 unlock_page_cgroup(pc);
4276
Hugh Dickinsbde05d12012-05-29 15:06:38 -07004277 /*
4278 * When called from shmem_replace_page(), in some cases the
4279 * oldpage has already been charged, and in some cases not.
4280 */
4281 if (!memcg)
4282 return;
KAMEZAWA Hiroyukiab936cb2012-01-12 17:17:44 -08004283 /*
4284 * Even if newpage->mapping was NULL before starting replacement,
4285 * the newpage may be on LRU(or pagevec for LRU) already. We lock
4286 * LRU while we overwrite pc->mem_cgroup.
4287 */
Johannes Weinerce587e62012-04-24 20:22:33 +02004288 __mem_cgroup_commit_charge(memcg, newpage, 1, type, true);
KAMEZAWA Hiroyukiab936cb2012-01-12 17:17:44 -08004289}
4290
Daisuke Nishimuraf212ad72011-03-23 16:42:25 -07004291#ifdef CONFIG_DEBUG_VM
4292static struct page_cgroup *lookup_page_cgroup_used(struct page *page)
4293{
4294 struct page_cgroup *pc;
4295
4296 pc = lookup_page_cgroup(page);
Johannes Weinercfa44942012-01-12 17:18:38 -08004297 /*
4298 * Can be NULL while feeding pages into the page allocator for
4299 * the first time, i.e. during boot or memory hotplug;
4300 * or when mem_cgroup_disabled().
4301 */
Daisuke Nishimuraf212ad72011-03-23 16:42:25 -07004302 if (likely(pc) && PageCgroupUsed(pc))
4303 return pc;
4304 return NULL;
4305}
4306
4307bool mem_cgroup_bad_page_check(struct page *page)
4308{
4309 if (mem_cgroup_disabled())
4310 return false;
4311
4312 return lookup_page_cgroup_used(page) != NULL;
4313}
4314
4315void mem_cgroup_print_bad_page(struct page *page)
4316{
4317 struct page_cgroup *pc;
4318
4319 pc = lookup_page_cgroup_used(page);
4320 if (pc) {
Andrew Mortond0451972013-02-22 16:32:06 -08004321 pr_alert("pc:%p pc->flags:%lx pc->mem_cgroup:%p\n",
4322 pc, pc->flags, pc->mem_cgroup);
Daisuke Nishimuraf212ad72011-03-23 16:42:25 -07004323 }
4324}
4325#endif
4326
KOSAKI Motohirod38d2a72009-01-06 14:39:44 -08004327static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08004328 unsigned long long val)
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07004329{
KAMEZAWA Hiroyuki81d39c22009-04-02 16:57:36 -07004330 int retry_count;
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07004331 u64 memswlimit, memlimit;
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07004332 int ret = 0;
KAMEZAWA Hiroyuki81d39c22009-04-02 16:57:36 -07004333 int children = mem_cgroup_count_children(memcg);
4334 u64 curusage, oldusage;
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07004335 int enlarge;
KAMEZAWA Hiroyuki81d39c22009-04-02 16:57:36 -07004336
4337 /*
4338 * For keeping hierarchical_reclaim simple, how long we should retry
4339 * is depends on callers. We set our retry-count to be function
4340 * of # of children which we should visit in this loop.
4341 */
4342 retry_count = MEM_CGROUP_RECLAIM_RETRIES * children;
4343
4344 oldusage = res_counter_read_u64(&memcg->res, RES_USAGE);
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07004345
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07004346 enlarge = 0;
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08004347 while (retry_count) {
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07004348 if (signal_pending(current)) {
4349 ret = -EINTR;
4350 break;
4351 }
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08004352 /*
4353 * Rather than hide all in some function, I do this in
4354 * open coded manner. You see what this really does.
Wanpeng Liaaad1532012-07-31 16:43:23 -07004355 * We have to guarantee memcg->res.limit <= memcg->memsw.limit.
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08004356 */
4357 mutex_lock(&set_limit_mutex);
4358 memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
4359 if (memswlimit < val) {
4360 ret = -EINVAL;
4361 mutex_unlock(&set_limit_mutex);
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07004362 break;
4363 }
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07004364
4365 memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT);
4366 if (memlimit < val)
4367 enlarge = 1;
4368
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08004369 ret = res_counter_set_limit(&memcg->res, val);
KAMEZAWA Hiroyuki22a668d2009-06-17 16:27:19 -07004370 if (!ret) {
4371 if (memswlimit == val)
4372 memcg->memsw_is_minimum = true;
4373 else
4374 memcg->memsw_is_minimum = false;
4375 }
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08004376 mutex_unlock(&set_limit_mutex);
4377
4378 if (!ret)
4379 break;
4380
Johannes Weiner56600482012-01-12 17:17:59 -08004381 mem_cgroup_reclaim(memcg, GFP_KERNEL,
4382 MEM_CGROUP_RECLAIM_SHRINK);
KAMEZAWA Hiroyuki81d39c22009-04-02 16:57:36 -07004383 curusage = res_counter_read_u64(&memcg->res, RES_USAGE);
4384 /* Usage is reduced ? */
Andrew Mortonf894ffa2013-09-12 15:13:35 -07004385 if (curusage >= oldusage)
KAMEZAWA Hiroyuki81d39c22009-04-02 16:57:36 -07004386 retry_count--;
4387 else
4388 oldusage = curusage;
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08004389 }
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07004390 if (!ret && enlarge)
4391 memcg_oom_recover(memcg);
KOSAKI Motohiro14797e22009-01-07 18:08:18 -08004392
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08004393 return ret;
4394}
4395
Li Zefan338c8432009-06-17 16:27:15 -07004396static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
4397 unsigned long long val)
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08004398{
KAMEZAWA Hiroyuki81d39c22009-04-02 16:57:36 -07004399 int retry_count;
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07004400 u64 memlimit, memswlimit, oldusage, curusage;
KAMEZAWA Hiroyuki81d39c22009-04-02 16:57:36 -07004401 int children = mem_cgroup_count_children(memcg);
4402 int ret = -EBUSY;
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07004403 int enlarge = 0;
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08004404
KAMEZAWA Hiroyuki81d39c22009-04-02 16:57:36 -07004405 /* see mem_cgroup_resize_res_limit */
Andrew Mortonf894ffa2013-09-12 15:13:35 -07004406 retry_count = children * MEM_CGROUP_RECLAIM_RETRIES;
KAMEZAWA Hiroyuki81d39c22009-04-02 16:57:36 -07004407 oldusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08004408 while (retry_count) {
4409 if (signal_pending(current)) {
4410 ret = -EINTR;
4411 break;
4412 }
4413 /*
4414 * Rather than hide all in some function, I do this in
4415 * open coded manner. You see what this really does.
Wanpeng Liaaad1532012-07-31 16:43:23 -07004416 * We have to guarantee memcg->res.limit <= memcg->memsw.limit.
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08004417 */
4418 mutex_lock(&set_limit_mutex);
4419 memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT);
4420 if (memlimit > val) {
4421 ret = -EINVAL;
4422 mutex_unlock(&set_limit_mutex);
4423 break;
4424 }
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07004425 memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
4426 if (memswlimit < val)
4427 enlarge = 1;
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08004428 ret = res_counter_set_limit(&memcg->memsw, val);
KAMEZAWA Hiroyuki22a668d2009-06-17 16:27:19 -07004429 if (!ret) {
4430 if (memlimit == val)
4431 memcg->memsw_is_minimum = true;
4432 else
4433 memcg->memsw_is_minimum = false;
4434 }
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08004435 mutex_unlock(&set_limit_mutex);
4436
4437 if (!ret)
4438 break;
4439
Johannes Weiner56600482012-01-12 17:17:59 -08004440 mem_cgroup_reclaim(memcg, GFP_KERNEL,
4441 MEM_CGROUP_RECLAIM_NOSWAP |
4442 MEM_CGROUP_RECLAIM_SHRINK);
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08004443 curusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
KAMEZAWA Hiroyuki81d39c22009-04-02 16:57:36 -07004444 /* Usage is reduced ? */
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08004445 if (curusage >= oldusage)
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07004446 retry_count--;
KAMEZAWA Hiroyuki81d39c22009-04-02 16:57:36 -07004447 else
4448 oldusage = curusage;
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07004449 }
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07004450 if (!ret && enlarge)
4451 memcg_oom_recover(memcg);
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07004452 return ret;
4453}
4454
Andrew Morton0608f432013-09-24 15:27:41 -07004455unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
4456 gfp_t gfp_mask,
4457 unsigned long *total_scanned)
4458{
4459 unsigned long nr_reclaimed = 0;
4460 struct mem_cgroup_per_zone *mz, *next_mz = NULL;
4461 unsigned long reclaimed;
4462 int loop = 0;
4463 struct mem_cgroup_tree_per_zone *mctz;
4464 unsigned long long excess;
4465 unsigned long nr_scanned;
4466
4467 if (order > 0)
4468 return 0;
4469
4470 mctz = soft_limit_tree_node_zone(zone_to_nid(zone), zone_idx(zone));
4471 /*
4472 * This loop can run a while, specially if mem_cgroup's continuously
4473 * keep exceeding their soft limit and putting the system under
4474 * pressure
4475 */
4476 do {
4477 if (next_mz)
4478 mz = next_mz;
4479 else
4480 mz = mem_cgroup_largest_soft_limit_node(mctz);
4481 if (!mz)
4482 break;
4483
4484 nr_scanned = 0;
4485 reclaimed = mem_cgroup_soft_reclaim(mz->memcg, zone,
4486 gfp_mask, &nr_scanned);
4487 nr_reclaimed += reclaimed;
4488 *total_scanned += nr_scanned;
4489 spin_lock(&mctz->lock);
4490
4491 /*
4492 * If we failed to reclaim anything from this memory cgroup
4493 * it is time to move on to the next cgroup
4494 */
4495 next_mz = NULL;
4496 if (!reclaimed) {
4497 do {
4498 /*
4499 * Loop until we find yet another one.
4500 *
4501 * By the time we get the soft_limit lock
4502 * again, someone might have aded the
4503 * group back on the RB tree. Iterate to
4504 * make sure we get a different mem.
4505 * mem_cgroup_largest_soft_limit_node returns
4506 * NULL if no other cgroup is present on
4507 * the tree
4508 */
4509 next_mz =
4510 __mem_cgroup_largest_soft_limit_node(mctz);
4511 if (next_mz == mz)
4512 css_put(&next_mz->memcg->css);
4513 else /* next_mz == NULL or other memcg */
4514 break;
4515 } while (1);
4516 }
Johannes Weinercf2c8122014-06-06 14:38:21 -07004517 __mem_cgroup_remove_exceeded(mz, mctz);
Andrew Morton0608f432013-09-24 15:27:41 -07004518 excess = res_counter_soft_limit_excess(&mz->memcg->res);
4519 /*
4520 * One school of thought says that we should not add
4521 * back the node to the tree if reclaim returns 0.
4522 * But our reclaim could return 0, simply because due
4523 * to priority we are exposing a smaller subset of
4524 * memory to reclaim from. Consider this as a longer
4525 * term TODO.
4526 */
4527 /* If excess == 0, no tree ops */
Johannes Weinercf2c8122014-06-06 14:38:21 -07004528 __mem_cgroup_insert_exceeded(mz, mctz, excess);
Andrew Morton0608f432013-09-24 15:27:41 -07004529 spin_unlock(&mctz->lock);
4530 css_put(&mz->memcg->css);
4531 loop++;
4532 /*
4533 * Could not reclaim anything and there are no more
4534 * mem cgroups to try or we seem to be looping without
4535 * reclaiming anything.
4536 */
4537 if (!nr_reclaimed &&
4538 (next_mz == NULL ||
4539 loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS))
4540 break;
4541 } while (!nr_reclaimed);
4542 if (next_mz)
4543 css_put(&next_mz->memcg->css);
4544 return nr_reclaimed;
4545}
4546
Michal Hocko2ef37d32012-10-26 13:37:30 +02004547/**
4548 * mem_cgroup_force_empty_list - clears LRU of a group
4549 * @memcg: group to clear
4550 * @node: NUMA node
4551 * @zid: zone id
4552 * @lru: lru to to clear
4553 *
KAMEZAWA Hiroyuki3c935d12012-07-31 16:42:46 -07004554 * Traverse a specified page_cgroup list and try to drop them all. This doesn't
Michal Hocko2ef37d32012-10-26 13:37:30 +02004555 * reclaim the pages page themselves - pages are moved to the parent (or root)
4556 * group.
KAMEZAWA Hiroyukicc847582008-02-07 00:14:16 -08004557 */
Michal Hocko2ef37d32012-10-26 13:37:30 +02004558static void mem_cgroup_force_empty_list(struct mem_cgroup *memcg,
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -08004559 int node, int zid, enum lru_list lru)
KAMEZAWA Hiroyukicc847582008-02-07 00:14:16 -08004560{
Hugh Dickinsbea8c152012-11-16 14:14:54 -08004561 struct lruvec *lruvec;
Michal Hocko2ef37d32012-10-26 13:37:30 +02004562 unsigned long flags;
KAMEZAWA Hiroyuki072c56c12008-02-07 00:14:39 -08004563 struct list_head *list;
Johannes Weiner925b7672012-01-12 17:18:15 -08004564 struct page *busy;
4565 struct zone *zone;
KAMEZAWA Hiroyuki072c56c12008-02-07 00:14:39 -08004566
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -08004567 zone = &NODE_DATA(node)->node_zones[zid];
Hugh Dickinsbea8c152012-11-16 14:14:54 -08004568 lruvec = mem_cgroup_zone_lruvec(zone, memcg);
4569 list = &lruvec->lists[lru];
KAMEZAWA Hiroyukicc847582008-02-07 00:14:16 -08004570
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08004571 busy = NULL;
Michal Hocko2ef37d32012-10-26 13:37:30 +02004572 do {
Johannes Weiner925b7672012-01-12 17:18:15 -08004573 struct page_cgroup *pc;
Johannes Weiner5564e882011-03-23 16:42:29 -07004574 struct page *page;
4575
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -08004576 spin_lock_irqsave(&zone->lru_lock, flags);
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08004577 if (list_empty(list)) {
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -08004578 spin_unlock_irqrestore(&zone->lru_lock, flags);
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -07004579 break;
4580 }
Johannes Weiner925b7672012-01-12 17:18:15 -08004581 page = list_entry(list->prev, struct page, lru);
4582 if (busy == page) {
4583 list_move(&page->lru, list);
Thiago Farina648bcc72010-03-05 13:42:04 -08004584 busy = NULL;
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -08004585 spin_unlock_irqrestore(&zone->lru_lock, flags);
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08004586 continue;
4587 }
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -08004588 spin_unlock_irqrestore(&zone->lru_lock, flags);
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08004589
Johannes Weiner925b7672012-01-12 17:18:15 -08004590 pc = lookup_page_cgroup(page);
Johannes Weiner5564e882011-03-23 16:42:29 -07004591
KAMEZAWA Hiroyuki3c935d12012-07-31 16:42:46 -07004592 if (mem_cgroup_move_parent(page, pc, memcg)) {
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08004593 /* found lock contention or "pc" is obsolete. */
Johannes Weiner925b7672012-01-12 17:18:15 -08004594 busy = page;
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08004595 } else
4596 busy = NULL;
Hugh Dickins2a7a0e02014-06-04 16:11:04 -07004597 cond_resched();
Michal Hocko2ef37d32012-10-26 13:37:30 +02004598 } while (!list_empty(list));
KAMEZAWA Hiroyukicc847582008-02-07 00:14:16 -08004599}
4600
4601/*
Michal Hockoc26251f2012-10-26 13:37:28 +02004602 * make mem_cgroup's charge to be 0 if there is no task by moving
4603 * all the charges and pages to the parent.
KAMEZAWA Hiroyukicc847582008-02-07 00:14:16 -08004604 * This enables deleting this mem_cgroup.
Michal Hockoc26251f2012-10-26 13:37:28 +02004605 *
4606 * Caller is responsible for holding css reference on the memcg.
KAMEZAWA Hiroyukicc847582008-02-07 00:14:16 -08004607 */
Michal Hockoab5196c2012-10-26 13:37:32 +02004608static void mem_cgroup_reparent_charges(struct mem_cgroup *memcg)
KAMEZAWA Hiroyukicc847582008-02-07 00:14:16 -08004609{
Michal Hockoc26251f2012-10-26 13:37:28 +02004610 int node, zid;
Glauber Costabea207c2012-12-18 14:22:11 -08004611 u64 usage;
Hugh Dickins8869b8f2008-03-04 14:29:09 -08004612
Daisuke Nishimurafce66472010-01-15 17:01:30 -08004613 do {
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -07004614 /* This is for making all *used* pages to be on LRU. */
4615 lru_add_drain_all();
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07004616 drain_all_stock_sync(memcg);
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07004617 mem_cgroup_start_move(memcg);
Lai Jiangshan31aaea42012-12-12 13:51:27 -08004618 for_each_node_state(node, N_MEMORY) {
Michal Hocko2ef37d32012-10-26 13:37:30 +02004619 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
Hugh Dickinsf156ab932012-03-21 16:34:19 -07004620 enum lru_list lru;
4621 for_each_lru(lru) {
Michal Hocko2ef37d32012-10-26 13:37:30 +02004622 mem_cgroup_force_empty_list(memcg,
Hugh Dickinsf156ab932012-03-21 16:34:19 -07004623 node, zid, lru);
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08004624 }
KAMEZAWA Hiroyuki1ecaab22008-02-07 00:14:38 -08004625 }
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08004626 }
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07004627 mem_cgroup_end_move(memcg);
4628 memcg_oom_recover(memcg);
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -07004629 cond_resched();
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08004630
Michal Hocko2ef37d32012-10-26 13:37:30 +02004631 /*
Glauber Costabea207c2012-12-18 14:22:11 -08004632 * Kernel memory may not necessarily be trackable to a specific
4633 * process. So they are not migrated, and therefore we can't
4634 * expect their value to drop to 0 here.
4635 * Having res filled up with kmem only is enough.
4636 *
Michal Hocko2ef37d32012-10-26 13:37:30 +02004637 * This is a safety check because mem_cgroup_force_empty_list
4638 * could have raced with mem_cgroup_replace_page_cache callers
4639 * so the lru seemed empty but the page could have been added
4640 * right after the check. RES_USAGE should be safe as we always
4641 * charge before adding to the LRU.
4642 */
Glauber Costabea207c2012-12-18 14:22:11 -08004643 usage = res_counter_read_u64(&memcg->res, RES_USAGE) -
4644 res_counter_read_u64(&memcg->kmem, RES_USAGE);
4645 } while (usage > 0);
Michal Hockoc26251f2012-10-26 13:37:28 +02004646}
4647
Tejun Heoea280e72014-05-16 13:22:48 -04004648/*
4649 * Test whether @memcg has children, dead or alive. Note that this
4650 * function doesn't care whether @memcg has use_hierarchy enabled and
4651 * returns %true if there are child csses according to the cgroup
4652 * hierarchy. Testing use_hierarchy is the caller's responsiblity.
4653 */
Glauber Costab5f99b52013-02-22 16:34:53 -08004654static inline bool memcg_has_children(struct mem_cgroup *memcg)
4655{
Tejun Heoea280e72014-05-16 13:22:48 -04004656 bool ret;
4657
Johannes Weiner696ac172013-10-31 16:34:15 -07004658 /*
Tejun Heoea280e72014-05-16 13:22:48 -04004659 * The lock does not prevent addition or deletion of children, but
4660 * it prevents a new child from being initialized based on this
4661 * parent in css_online(), so it's enough to decide whether
4662 * hierarchically inherited attributes can still be changed or not.
Johannes Weiner696ac172013-10-31 16:34:15 -07004663 */
Tejun Heoea280e72014-05-16 13:22:48 -04004664 lockdep_assert_held(&memcg_create_mutex);
4665
4666 rcu_read_lock();
4667 ret = css_next_child(NULL, &memcg->css);
4668 rcu_read_unlock();
4669 return ret;
Glauber Costab5f99b52013-02-22 16:34:53 -08004670}
4671
4672/*
Michal Hockoc26251f2012-10-26 13:37:28 +02004673 * Reclaims as many pages from the given memcg as possible and moves
4674 * the rest to the parent.
4675 *
4676 * Caller is responsible for holding css reference for memcg.
4677 */
4678static int mem_cgroup_force_empty(struct mem_cgroup *memcg)
4679{
4680 int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
Michal Hockoc26251f2012-10-26 13:37:28 +02004681
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08004682 /* we call try-to-free pages for make this cgroup empty */
4683 lru_add_drain_all();
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08004684 /* try to free all pages in this cgroup */
Glauber Costa569530f2012-04-12 12:49:13 -07004685 while (nr_retries && res_counter_read_u64(&memcg->res, RES_USAGE) > 0) {
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08004686 int progress;
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08004687
Michal Hockoc26251f2012-10-26 13:37:28 +02004688 if (signal_pending(current))
4689 return -EINTR;
4690
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07004691 progress = try_to_free_mem_cgroup_pages(memcg, GFP_KERNEL,
Johannes Weiner185efc02011-09-14 16:21:58 -07004692 false);
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08004693 if (!progress) {
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08004694 nr_retries--;
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08004695 /* maybe some writeback is necessary */
Jens Axboe8aa7e842009-07-09 14:52:32 +02004696 congestion_wait(BLK_RW_ASYNC, HZ/10);
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08004697 }
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08004698
4699 }
Michal Hockoab5196c2012-10-26 13:37:32 +02004700
4701 return 0;
KAMEZAWA Hiroyukicc847582008-02-07 00:14:16 -08004702}
4703
Tejun Heo6770c642014-05-13 12:16:21 -04004704static ssize_t mem_cgroup_force_empty_write(struct kernfs_open_file *of,
4705 char *buf, size_t nbytes,
4706 loff_t off)
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08004707{
Tejun Heo6770c642014-05-13 12:16:21 -04004708 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
Michal Hockoc26251f2012-10-26 13:37:28 +02004709
Michal Hockod8423012012-10-26 13:37:29 +02004710 if (mem_cgroup_is_root(memcg))
4711 return -EINVAL;
Tejun Heo6770c642014-05-13 12:16:21 -04004712 return mem_cgroup_force_empty(memcg) ?: nbytes;
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08004713}
4714
Tejun Heo182446d2013-08-08 20:11:24 -04004715static u64 mem_cgroup_hierarchy_read(struct cgroup_subsys_state *css,
4716 struct cftype *cft)
Balbir Singh18f59ea2009-01-07 18:08:07 -08004717{
Tejun Heo182446d2013-08-08 20:11:24 -04004718 return mem_cgroup_from_css(css)->use_hierarchy;
Balbir Singh18f59ea2009-01-07 18:08:07 -08004719}
4720
Tejun Heo182446d2013-08-08 20:11:24 -04004721static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css,
4722 struct cftype *cft, u64 val)
Balbir Singh18f59ea2009-01-07 18:08:07 -08004723{
4724 int retval = 0;
Tejun Heo182446d2013-08-08 20:11:24 -04004725 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
Tejun Heo5c9d5352014-05-16 13:22:48 -04004726 struct mem_cgroup *parent_memcg = mem_cgroup_from_css(memcg->css.parent);
Balbir Singh18f59ea2009-01-07 18:08:07 -08004727
Glauber Costa09998212013-02-22 16:34:55 -08004728 mutex_lock(&memcg_create_mutex);
Glauber Costa567fb432012-07-31 16:43:07 -07004729
4730 if (memcg->use_hierarchy == val)
4731 goto out;
4732
Balbir Singh18f59ea2009-01-07 18:08:07 -08004733 /*
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02004734 * If parent's use_hierarchy is set, we can't make any modifications
Balbir Singh18f59ea2009-01-07 18:08:07 -08004735 * in the child subtrees. If it is unset, then the change can
4736 * occur, provided the current cgroup has no children.
4737 *
4738 * For the root cgroup, parent_mem is NULL, we allow value to be
4739 * set if there are no children.
4740 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07004741 if ((!parent_memcg || !parent_memcg->use_hierarchy) &&
Balbir Singh18f59ea2009-01-07 18:08:07 -08004742 (val == 1 || val == 0)) {
Tejun Heoea280e72014-05-16 13:22:48 -04004743 if (!memcg_has_children(memcg))
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07004744 memcg->use_hierarchy = val;
Balbir Singh18f59ea2009-01-07 18:08:07 -08004745 else
4746 retval = -EBUSY;
4747 } else
4748 retval = -EINVAL;
Glauber Costa567fb432012-07-31 16:43:07 -07004749
4750out:
Glauber Costa09998212013-02-22 16:34:55 -08004751 mutex_unlock(&memcg_create_mutex);
Balbir Singh18f59ea2009-01-07 18:08:07 -08004752
4753 return retval;
4754}
4755
Tejun Heo791badb2013-12-05 12:28:02 -05004756static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css,
Johannes Weiner05b84302014-08-06 16:05:59 -07004757 struct cftype *cft)
Balbir Singh8cdea7c2008-02-07 00:13:50 -08004758{
Tejun Heo182446d2013-08-08 20:11:24 -04004759 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
Johannes Weiner05b84302014-08-06 16:05:59 -07004760 enum res_type type = MEMFILE_TYPE(cft->private);
4761 int name = MEMFILE_ATTR(cft->private);
Tejun Heoaf36f902012-04-01 12:09:55 -07004762
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08004763 switch (type) {
4764 case _MEM:
Johannes Weiner05b84302014-08-06 16:05:59 -07004765 return res_counter_read_u64(&memcg->res, name);
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08004766 case _MEMSWAP:
Johannes Weiner05b84302014-08-06 16:05:59 -07004767 return res_counter_read_u64(&memcg->memsw, name);
Glauber Costa510fc4e2012-12-18 14:21:47 -08004768 case _KMEM:
Johannes Weiner05b84302014-08-06 16:05:59 -07004769 return res_counter_read_u64(&memcg->kmem, name);
Glauber Costa510fc4e2012-12-18 14:21:47 -08004770 break;
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08004771 default:
4772 BUG();
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08004773 }
Balbir Singh8cdea7c2008-02-07 00:13:50 -08004774}
Glauber Costa510fc4e2012-12-18 14:21:47 -08004775
Glauber Costa510fc4e2012-12-18 14:21:47 -08004776#ifdef CONFIG_MEMCG_KMEM
Vladimir Davydovd6441632014-01-23 15:53:09 -08004777/* should be called with activate_kmem_mutex held */
4778static int __memcg_activate_kmem(struct mem_cgroup *memcg,
4779 unsigned long long limit)
4780{
4781 int err = 0;
4782 int memcg_id;
4783
4784 if (memcg_kmem_is_active(memcg))
4785 return 0;
4786
4787 /*
4788 * We are going to allocate memory for data shared by all memory
4789 * cgroups so let's stop accounting here.
4790 */
4791 memcg_stop_kmem_account();
4792
Glauber Costa510fc4e2012-12-18 14:21:47 -08004793 /*
4794 * For simplicity, we won't allow this to be disabled. It also can't
4795 * be changed if the cgroup has children already, or if tasks had
4796 * already joined.
4797 *
4798 * If tasks join before we set the limit, a person looking at
4799 * kmem.usage_in_bytes will have no way to determine when it took
4800 * place, which makes the value quite meaningless.
4801 *
4802 * After it first became limited, changes in the value of the limit are
4803 * of course permitted.
Glauber Costa510fc4e2012-12-18 14:21:47 -08004804 */
Glauber Costa09998212013-02-22 16:34:55 -08004805 mutex_lock(&memcg_create_mutex);
Tejun Heoea280e72014-05-16 13:22:48 -04004806 if (cgroup_has_tasks(memcg->css.cgroup) ||
4807 (memcg->use_hierarchy && memcg_has_children(memcg)))
Vladimir Davydovd6441632014-01-23 15:53:09 -08004808 err = -EBUSY;
Glauber Costa09998212013-02-22 16:34:55 -08004809 mutex_unlock(&memcg_create_mutex);
Vladimir Davydovd6441632014-01-23 15:53:09 -08004810 if (err)
4811 goto out;
4812
4813 memcg_id = ida_simple_get(&kmem_limited_groups,
4814 0, MEMCG_CACHES_MAX_SIZE, GFP_KERNEL);
4815 if (memcg_id < 0) {
4816 err = memcg_id;
4817 goto out;
4818 }
4819
4820 /*
4821 * Make sure we have enough space for this cgroup in each root cache's
4822 * memcg_params.
4823 */
Vladimir Davydovbd673142014-06-04 16:07:40 -07004824 mutex_lock(&memcg_slab_mutex);
Vladimir Davydovd6441632014-01-23 15:53:09 -08004825 err = memcg_update_all_caches(memcg_id + 1);
Vladimir Davydovbd673142014-06-04 16:07:40 -07004826 mutex_unlock(&memcg_slab_mutex);
Vladimir Davydovd6441632014-01-23 15:53:09 -08004827 if (err)
4828 goto out_rmid;
4829
4830 memcg->kmemcg_id = memcg_id;
4831 INIT_LIST_HEAD(&memcg->memcg_slab_caches);
Vladimir Davydovd6441632014-01-23 15:53:09 -08004832
4833 /*
4834 * We couldn't have accounted to this cgroup, because it hasn't got the
4835 * active bit set yet, so this should succeed.
4836 */
4837 err = res_counter_set_limit(&memcg->kmem, limit);
4838 VM_BUG_ON(err);
4839
4840 static_key_slow_inc(&memcg_kmem_enabled_key);
4841 /*
4842 * Setting the active bit after enabling static branching will
4843 * guarantee no one starts accounting before all call sites are
4844 * patched.
4845 */
4846 memcg_kmem_set_active(memcg);
4847out:
4848 memcg_resume_kmem_account();
4849 return err;
4850
4851out_rmid:
4852 ida_simple_remove(&kmem_limited_groups, memcg_id);
4853 goto out;
4854}
4855
4856static int memcg_activate_kmem(struct mem_cgroup *memcg,
4857 unsigned long long limit)
4858{
4859 int ret;
4860
4861 mutex_lock(&activate_kmem_mutex);
4862 ret = __memcg_activate_kmem(memcg, limit);
4863 mutex_unlock(&activate_kmem_mutex);
Glauber Costa510fc4e2012-12-18 14:21:47 -08004864 return ret;
4865}
4866
Vladimir Davydovd6441632014-01-23 15:53:09 -08004867static int memcg_update_kmem_limit(struct mem_cgroup *memcg,
4868 unsigned long long val)
4869{
4870 int ret;
4871
4872 if (!memcg_kmem_is_active(memcg))
4873 ret = memcg_activate_kmem(memcg, val);
4874 else
4875 ret = res_counter_set_limit(&memcg->kmem, val);
4876 return ret;
4877}
4878
Glauber Costa55007d82012-12-18 14:22:38 -08004879static int memcg_propagate_kmem(struct mem_cgroup *memcg)
Glauber Costa510fc4e2012-12-18 14:21:47 -08004880{
Glauber Costa55007d82012-12-18 14:22:38 -08004881 int ret = 0;
Glauber Costa510fc4e2012-12-18 14:21:47 -08004882 struct mem_cgroup *parent = parent_mem_cgroup(memcg);
Vladimir Davydovd6441632014-01-23 15:53:09 -08004883
Glauber Costa510fc4e2012-12-18 14:21:47 -08004884 if (!parent)
Vladimir Davydovd6441632014-01-23 15:53:09 -08004885 return 0;
Glauber Costa55007d82012-12-18 14:22:38 -08004886
Vladimir Davydovd6441632014-01-23 15:53:09 -08004887 mutex_lock(&activate_kmem_mutex);
Glauber Costaa8964b92012-12-18 14:22:09 -08004888 /*
Vladimir Davydovd6441632014-01-23 15:53:09 -08004889 * If the parent cgroup is not kmem-active now, it cannot be activated
4890 * after this point, because it has at least one child already.
Glauber Costaa8964b92012-12-18 14:22:09 -08004891 */
Vladimir Davydovd6441632014-01-23 15:53:09 -08004892 if (memcg_kmem_is_active(parent))
4893 ret = __memcg_activate_kmem(memcg, RES_COUNTER_MAX);
4894 mutex_unlock(&activate_kmem_mutex);
Glauber Costa55007d82012-12-18 14:22:38 -08004895 return ret;
Glauber Costa510fc4e2012-12-18 14:21:47 -08004896}
Vladimir Davydovd6441632014-01-23 15:53:09 -08004897#else
4898static int memcg_update_kmem_limit(struct mem_cgroup *memcg,
4899 unsigned long long val)
4900{
4901 return -EINVAL;
4902}
Hugh Dickins6d0439902013-02-22 16:35:50 -08004903#endif /* CONFIG_MEMCG_KMEM */
Glauber Costa510fc4e2012-12-18 14:21:47 -08004904
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07004905/*
4906 * The user of this function is...
4907 * RES_LIMIT.
4908 */
Tejun Heo451af502014-05-13 12:16:21 -04004909static ssize_t mem_cgroup_write(struct kernfs_open_file *of,
4910 char *buf, size_t nbytes, loff_t off)
Balbir Singh8cdea7c2008-02-07 00:13:50 -08004911{
Tejun Heo451af502014-05-13 12:16:21 -04004912 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
Glauber Costa86ae53e2012-12-18 14:21:45 -08004913 enum res_type type;
4914 int name;
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07004915 unsigned long long val;
4916 int ret;
4917
Tejun Heo451af502014-05-13 12:16:21 -04004918 buf = strstrip(buf);
4919 type = MEMFILE_TYPE(of_cft(of)->private);
4920 name = MEMFILE_ATTR(of_cft(of)->private);
Tejun Heoaf36f902012-04-01 12:09:55 -07004921
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08004922 switch (name) {
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07004923 case RES_LIMIT:
Balbir Singh4b3bde42009-09-23 15:56:32 -07004924 if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */
4925 ret = -EINVAL;
4926 break;
4927 }
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07004928 /* This function does all necessary parse...reuse it */
Tejun Heo451af502014-05-13 12:16:21 -04004929 ret = res_counter_memparse_write_strategy(buf, &val);
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08004930 if (ret)
4931 break;
4932 if (type == _MEM)
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07004933 ret = mem_cgroup_resize_limit(memcg, val);
Glauber Costa510fc4e2012-12-18 14:21:47 -08004934 else if (type == _MEMSWAP)
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08004935 ret = mem_cgroup_resize_memsw_limit(memcg, val);
Glauber Costa510fc4e2012-12-18 14:21:47 -08004936 else if (type == _KMEM)
Vladimir Davydovd6441632014-01-23 15:53:09 -08004937 ret = memcg_update_kmem_limit(memcg, val);
Glauber Costa510fc4e2012-12-18 14:21:47 -08004938 else
4939 return -EINVAL;
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07004940 break;
Balbir Singh296c81d2009-09-23 15:56:36 -07004941 case RES_SOFT_LIMIT:
Tejun Heo451af502014-05-13 12:16:21 -04004942 ret = res_counter_memparse_write_strategy(buf, &val);
Balbir Singh296c81d2009-09-23 15:56:36 -07004943 if (ret)
4944 break;
4945 /*
4946 * For memsw, soft limits are hard to implement in terms
4947 * of semantics, for now, we support soft limits for
4948 * control without swap
4949 */
4950 if (type == _MEM)
4951 ret = res_counter_set_soft_limit(&memcg->res, val);
4952 else
4953 ret = -EINVAL;
4954 break;
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07004955 default:
4956 ret = -EINVAL; /* should be BUG() ? */
4957 break;
4958 }
Tejun Heo451af502014-05-13 12:16:21 -04004959 return ret ?: nbytes;
Balbir Singh8cdea7c2008-02-07 00:13:50 -08004960}
4961
KAMEZAWA Hiroyukifee7b542009-01-07 18:08:26 -08004962static void memcg_get_hierarchical_limit(struct mem_cgroup *memcg,
4963 unsigned long long *mem_limit, unsigned long long *memsw_limit)
4964{
KAMEZAWA Hiroyukifee7b542009-01-07 18:08:26 -08004965 unsigned long long min_limit, min_memsw_limit, tmp;
4966
4967 min_limit = res_counter_read_u64(&memcg->res, RES_LIMIT);
4968 min_memsw_limit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
KAMEZAWA Hiroyukifee7b542009-01-07 18:08:26 -08004969 if (!memcg->use_hierarchy)
4970 goto out;
4971
Tejun Heo5c9d5352014-05-16 13:22:48 -04004972 while (memcg->css.parent) {
4973 memcg = mem_cgroup_from_css(memcg->css.parent);
KAMEZAWA Hiroyukifee7b542009-01-07 18:08:26 -08004974 if (!memcg->use_hierarchy)
4975 break;
4976 tmp = res_counter_read_u64(&memcg->res, RES_LIMIT);
4977 min_limit = min(min_limit, tmp);
4978 tmp = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
4979 min_memsw_limit = min(min_memsw_limit, tmp);
4980 }
4981out:
4982 *mem_limit = min_limit;
4983 *memsw_limit = min_memsw_limit;
KAMEZAWA Hiroyukifee7b542009-01-07 18:08:26 -08004984}
4985
Tejun Heo6770c642014-05-13 12:16:21 -04004986static ssize_t mem_cgroup_reset(struct kernfs_open_file *of, char *buf,
4987 size_t nbytes, loff_t off)
Pavel Emelyanovc84872e2008-04-29 01:00:17 -07004988{
Tejun Heo6770c642014-05-13 12:16:21 -04004989 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
Glauber Costa86ae53e2012-12-18 14:21:45 -08004990 int name;
4991 enum res_type type;
Pavel Emelyanovc84872e2008-04-29 01:00:17 -07004992
Tejun Heo6770c642014-05-13 12:16:21 -04004993 type = MEMFILE_TYPE(of_cft(of)->private);
4994 name = MEMFILE_ATTR(of_cft(of)->private);
Tejun Heoaf36f902012-04-01 12:09:55 -07004995
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08004996 switch (name) {
Pavel Emelyanov29f2a4d2008-04-29 01:00:21 -07004997 case RES_MAX_USAGE:
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08004998 if (type == _MEM)
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07004999 res_counter_reset_max(&memcg->res);
Glauber Costa510fc4e2012-12-18 14:21:47 -08005000 else if (type == _MEMSWAP)
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07005001 res_counter_reset_max(&memcg->memsw);
Glauber Costa510fc4e2012-12-18 14:21:47 -08005002 else if (type == _KMEM)
5003 res_counter_reset_max(&memcg->kmem);
5004 else
5005 return -EINVAL;
Pavel Emelyanov29f2a4d2008-04-29 01:00:21 -07005006 break;
5007 case RES_FAILCNT:
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08005008 if (type == _MEM)
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07005009 res_counter_reset_failcnt(&memcg->res);
Glauber Costa510fc4e2012-12-18 14:21:47 -08005010 else if (type == _MEMSWAP)
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07005011 res_counter_reset_failcnt(&memcg->memsw);
Glauber Costa510fc4e2012-12-18 14:21:47 -08005012 else if (type == _KMEM)
5013 res_counter_reset_failcnt(&memcg->kmem);
5014 else
5015 return -EINVAL;
Pavel Emelyanov29f2a4d2008-04-29 01:00:21 -07005016 break;
5017 }
Balbir Singhf64c3f52009-09-23 15:56:37 -07005018
Tejun Heo6770c642014-05-13 12:16:21 -04005019 return nbytes;
Pavel Emelyanovc84872e2008-04-29 01:00:17 -07005020}
5021
Tejun Heo182446d2013-08-08 20:11:24 -04005022static u64 mem_cgroup_move_charge_read(struct cgroup_subsys_state *css,
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08005023 struct cftype *cft)
5024{
Tejun Heo182446d2013-08-08 20:11:24 -04005025 return mem_cgroup_from_css(css)->move_charge_at_immigrate;
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08005026}
5027
Daisuke Nishimura02491442010-03-10 15:22:17 -08005028#ifdef CONFIG_MMU
Tejun Heo182446d2013-08-08 20:11:24 -04005029static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08005030 struct cftype *cft, u64 val)
5031{
Tejun Heo182446d2013-08-08 20:11:24 -04005032 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08005033
5034 if (val >= (1 << NR_MOVE_TYPE))
5035 return -EINVAL;
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08005036
Glauber Costaee5e8472013-02-22 16:34:50 -08005037 /*
5038 * No kind of locking is needed in here, because ->can_attach() will
5039 * check this value once in the beginning of the process, and then carry
5040 * on with stale data. This means that changes to this value will only
5041 * affect task migrations starting after the change.
5042 */
5043 memcg->move_charge_at_immigrate = val;
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08005044 return 0;
5045}
Daisuke Nishimura02491442010-03-10 15:22:17 -08005046#else
Tejun Heo182446d2013-08-08 20:11:24 -04005047static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
Daisuke Nishimura02491442010-03-10 15:22:17 -08005048 struct cftype *cft, u64 val)
5049{
5050 return -ENOSYS;
5051}
5052#endif
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08005053
Ying Han406eb0c2011-05-26 16:25:37 -07005054#ifdef CONFIG_NUMA
Tejun Heo2da8ca82013-12-05 12:28:04 -05005055static int memcg_numa_stat_show(struct seq_file *m, void *v)
Ying Han406eb0c2011-05-26 16:25:37 -07005056{
Greg Thelen25485de2013-11-12 15:07:40 -08005057 struct numa_stat {
5058 const char *name;
5059 unsigned int lru_mask;
5060 };
5061
5062 static const struct numa_stat stats[] = {
5063 { "total", LRU_ALL },
5064 { "file", LRU_ALL_FILE },
5065 { "anon", LRU_ALL_ANON },
5066 { "unevictable", BIT(LRU_UNEVICTABLE) },
5067 };
5068 const struct numa_stat *stat;
Ying Han406eb0c2011-05-26 16:25:37 -07005069 int nid;
Greg Thelen25485de2013-11-12 15:07:40 -08005070 unsigned long nr;
Tejun Heo2da8ca82013-12-05 12:28:04 -05005071 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
Ying Han406eb0c2011-05-26 16:25:37 -07005072
Greg Thelen25485de2013-11-12 15:07:40 -08005073 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
5074 nr = mem_cgroup_nr_lru_pages(memcg, stat->lru_mask);
5075 seq_printf(m, "%s=%lu", stat->name, nr);
5076 for_each_node_state(nid, N_MEMORY) {
5077 nr = mem_cgroup_node_nr_lru_pages(memcg, nid,
5078 stat->lru_mask);
5079 seq_printf(m, " N%d=%lu", nid, nr);
5080 }
5081 seq_putc(m, '\n');
Ying Han406eb0c2011-05-26 16:25:37 -07005082 }
Ying Han406eb0c2011-05-26 16:25:37 -07005083
Ying Han071aee12013-11-12 15:07:41 -08005084 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
5085 struct mem_cgroup *iter;
Ying Han406eb0c2011-05-26 16:25:37 -07005086
Ying Han071aee12013-11-12 15:07:41 -08005087 nr = 0;
5088 for_each_mem_cgroup_tree(iter, memcg)
5089 nr += mem_cgroup_nr_lru_pages(iter, stat->lru_mask);
5090 seq_printf(m, "hierarchical_%s=%lu", stat->name, nr);
5091 for_each_node_state(nid, N_MEMORY) {
5092 nr = 0;
5093 for_each_mem_cgroup_tree(iter, memcg)
5094 nr += mem_cgroup_node_nr_lru_pages(
5095 iter, nid, stat->lru_mask);
5096 seq_printf(m, " N%d=%lu", nid, nr);
5097 }
5098 seq_putc(m, '\n');
Ying Han406eb0c2011-05-26 16:25:37 -07005099 }
Ying Han406eb0c2011-05-26 16:25:37 -07005100
Ying Han406eb0c2011-05-26 16:25:37 -07005101 return 0;
5102}
5103#endif /* CONFIG_NUMA */
5104
Johannes Weineraf7c4b02012-05-29 15:07:08 -07005105static inline void mem_cgroup_lru_names_not_uptodate(void)
5106{
5107 BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_lru_names) != NR_LRU_LISTS);
5108}
5109
Tejun Heo2da8ca82013-12-05 12:28:04 -05005110static int memcg_stat_show(struct seq_file *m, void *v)
KAMEZAWA Hiroyukid2ceb9b2008-02-07 00:14:25 -08005111{
Tejun Heo2da8ca82013-12-05 12:28:04 -05005112 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
Johannes Weineraf7c4b02012-05-29 15:07:08 -07005113 struct mem_cgroup *mi;
5114 unsigned int i;
KAMEZAWA Hiroyukid2ceb9b2008-02-07 00:14:25 -08005115
Johannes Weineraf7c4b02012-05-29 15:07:08 -07005116 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
Kamezawa Hiroyukibff6bb82012-07-31 16:41:38 -07005117 if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
Daisuke Nishimura1dd3a272009-09-23 15:56:43 -07005118 continue;
Johannes Weineraf7c4b02012-05-29 15:07:08 -07005119 seq_printf(m, "%s %ld\n", mem_cgroup_stat_names[i],
5120 mem_cgroup_read_stat(memcg, i) * PAGE_SIZE);
Daisuke Nishimura1dd3a272009-09-23 15:56:43 -07005121 }
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -08005122
Johannes Weineraf7c4b02012-05-29 15:07:08 -07005123 for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++)
5124 seq_printf(m, "%s %lu\n", mem_cgroup_events_names[i],
5125 mem_cgroup_read_events(memcg, i));
5126
5127 for (i = 0; i < NR_LRU_LISTS; i++)
5128 seq_printf(m, "%s %lu\n", mem_cgroup_lru_names[i],
5129 mem_cgroup_nr_lru_pages(memcg, BIT(i)) * PAGE_SIZE);
5130
KAMEZAWA Hiroyuki14067bb2009-04-02 16:57:35 -07005131 /* Hierarchical information */
KAMEZAWA Hiroyukifee7b542009-01-07 18:08:26 -08005132 {
5133 unsigned long long limit, memsw_limit;
Hugh Dickinsd79154b2012-03-21 16:34:18 -07005134 memcg_get_hierarchical_limit(memcg, &limit, &memsw_limit);
Johannes Weiner78ccf5b2012-05-29 15:07:06 -07005135 seq_printf(m, "hierarchical_memory_limit %llu\n", limit);
KAMEZAWA Hiroyukifee7b542009-01-07 18:08:26 -08005136 if (do_swap_account)
Johannes Weiner78ccf5b2012-05-29 15:07:06 -07005137 seq_printf(m, "hierarchical_memsw_limit %llu\n",
5138 memsw_limit);
KAMEZAWA Hiroyukifee7b542009-01-07 18:08:26 -08005139 }
KOSAKI Motohiro7f016ee2009-01-07 18:08:22 -08005140
Johannes Weineraf7c4b02012-05-29 15:07:08 -07005141 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
5142 long long val = 0;
5143
Kamezawa Hiroyukibff6bb82012-07-31 16:41:38 -07005144 if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
Daisuke Nishimura1dd3a272009-09-23 15:56:43 -07005145 continue;
Johannes Weineraf7c4b02012-05-29 15:07:08 -07005146 for_each_mem_cgroup_tree(mi, memcg)
5147 val += mem_cgroup_read_stat(mi, i) * PAGE_SIZE;
5148 seq_printf(m, "total_%s %lld\n", mem_cgroup_stat_names[i], val);
5149 }
5150
5151 for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) {
5152 unsigned long long val = 0;
5153
5154 for_each_mem_cgroup_tree(mi, memcg)
5155 val += mem_cgroup_read_events(mi, i);
5156 seq_printf(m, "total_%s %llu\n",
5157 mem_cgroup_events_names[i], val);
5158 }
5159
5160 for (i = 0; i < NR_LRU_LISTS; i++) {
5161 unsigned long long val = 0;
5162
5163 for_each_mem_cgroup_tree(mi, memcg)
5164 val += mem_cgroup_nr_lru_pages(mi, BIT(i)) * PAGE_SIZE;
5165 seq_printf(m, "total_%s %llu\n", mem_cgroup_lru_names[i], val);
Daisuke Nishimura1dd3a272009-09-23 15:56:43 -07005166 }
KAMEZAWA Hiroyuki14067bb2009-04-02 16:57:35 -07005167
KOSAKI Motohiro7f016ee2009-01-07 18:08:22 -08005168#ifdef CONFIG_DEBUG_VM
KOSAKI Motohiro7f016ee2009-01-07 18:08:22 -08005169 {
5170 int nid, zid;
5171 struct mem_cgroup_per_zone *mz;
Hugh Dickins89abfab2012-05-29 15:06:53 -07005172 struct zone_reclaim_stat *rstat;
KOSAKI Motohiro7f016ee2009-01-07 18:08:22 -08005173 unsigned long recent_rotated[2] = {0, 0};
5174 unsigned long recent_scanned[2] = {0, 0};
5175
5176 for_each_online_node(nid)
5177 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
Jianyu Zhane2318752014-06-06 14:38:20 -07005178 mz = &memcg->nodeinfo[nid]->zoneinfo[zid];
Hugh Dickins89abfab2012-05-29 15:06:53 -07005179 rstat = &mz->lruvec.reclaim_stat;
KOSAKI Motohiro7f016ee2009-01-07 18:08:22 -08005180
Hugh Dickins89abfab2012-05-29 15:06:53 -07005181 recent_rotated[0] += rstat->recent_rotated[0];
5182 recent_rotated[1] += rstat->recent_rotated[1];
5183 recent_scanned[0] += rstat->recent_scanned[0];
5184 recent_scanned[1] += rstat->recent_scanned[1];
KOSAKI Motohiro7f016ee2009-01-07 18:08:22 -08005185 }
Johannes Weiner78ccf5b2012-05-29 15:07:06 -07005186 seq_printf(m, "recent_rotated_anon %lu\n", recent_rotated[0]);
5187 seq_printf(m, "recent_rotated_file %lu\n", recent_rotated[1]);
5188 seq_printf(m, "recent_scanned_anon %lu\n", recent_scanned[0]);
5189 seq_printf(m, "recent_scanned_file %lu\n", recent_scanned[1]);
KOSAKI Motohiro7f016ee2009-01-07 18:08:22 -08005190 }
5191#endif
5192
KAMEZAWA Hiroyukid2ceb9b2008-02-07 00:14:25 -08005193 return 0;
5194}
5195
Tejun Heo182446d2013-08-08 20:11:24 -04005196static u64 mem_cgroup_swappiness_read(struct cgroup_subsys_state *css,
5197 struct cftype *cft)
KOSAKI Motohiroa7885eb2009-01-07 18:08:24 -08005198{
Tejun Heo182446d2013-08-08 20:11:24 -04005199 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
KOSAKI Motohiroa7885eb2009-01-07 18:08:24 -08005200
KAMEZAWA Hiroyuki1f4c0252011-07-26 16:08:21 -07005201 return mem_cgroup_swappiness(memcg);
KOSAKI Motohiroa7885eb2009-01-07 18:08:24 -08005202}
5203
Tejun Heo182446d2013-08-08 20:11:24 -04005204static int mem_cgroup_swappiness_write(struct cgroup_subsys_state *css,
5205 struct cftype *cft, u64 val)
KOSAKI Motohiroa7885eb2009-01-07 18:08:24 -08005206{
Tejun Heo182446d2013-08-08 20:11:24 -04005207 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
Li Zefan068b38c2009-01-15 13:51:26 -08005208
Johannes Weiner3dae7fe2014-06-04 16:07:01 -07005209 if (val > 100)
KOSAKI Motohiroa7885eb2009-01-07 18:08:24 -08005210 return -EINVAL;
5211
Linus Torvalds14208b02014-06-09 15:03:33 -07005212 if (css->parent)
Johannes Weiner3dae7fe2014-06-04 16:07:01 -07005213 memcg->swappiness = val;
5214 else
5215 vm_swappiness = val;
Li Zefan068b38c2009-01-15 13:51:26 -08005216
KOSAKI Motohiroa7885eb2009-01-07 18:08:24 -08005217 return 0;
5218}
5219
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08005220static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
5221{
5222 struct mem_cgroup_threshold_ary *t;
5223 u64 usage;
5224 int i;
5225
5226 rcu_read_lock();
5227 if (!swap)
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07005228 t = rcu_dereference(memcg->thresholds.primary);
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08005229 else
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07005230 t = rcu_dereference(memcg->memsw_thresholds.primary);
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08005231
5232 if (!t)
5233 goto unlock;
5234
Johannes Weiner05b84302014-08-06 16:05:59 -07005235 if (!swap)
5236 usage = res_counter_read_u64(&memcg->res, RES_USAGE);
5237 else
5238 usage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08005239
5240 /*
Sha Zhengju748dad32012-05-29 15:06:57 -07005241 * current_threshold points to threshold just below or equal to usage.
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08005242 * If it's not true, a threshold was crossed after last
5243 * call of __mem_cgroup_threshold().
5244 */
Phil Carmody5407a562010-05-26 14:42:42 -07005245 i = t->current_threshold;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08005246
5247 /*
5248 * Iterate backward over array of thresholds starting from
5249 * current_threshold and check if a threshold is crossed.
5250 * If none of thresholds below usage is crossed, we read
5251 * only one element of the array here.
5252 */
5253 for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--)
5254 eventfd_signal(t->entries[i].eventfd, 1);
5255
5256 /* i = current_threshold + 1 */
5257 i++;
5258
5259 /*
5260 * Iterate forward over array of thresholds starting from
5261 * current_threshold+1 and check if a threshold is crossed.
5262 * If none of thresholds above usage is crossed, we read
5263 * only one element of the array here.
5264 */
5265 for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++)
5266 eventfd_signal(t->entries[i].eventfd, 1);
5267
5268 /* Update current_threshold */
Phil Carmody5407a562010-05-26 14:42:42 -07005269 t->current_threshold = i - 1;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08005270unlock:
5271 rcu_read_unlock();
5272}
5273
5274static void mem_cgroup_threshold(struct mem_cgroup *memcg)
5275{
Kirill A. Shutemovad4ca5f2010-10-07 12:59:27 -07005276 while (memcg) {
5277 __mem_cgroup_threshold(memcg, false);
5278 if (do_swap_account)
5279 __mem_cgroup_threshold(memcg, true);
5280
5281 memcg = parent_mem_cgroup(memcg);
5282 }
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08005283}
5284
5285static int compare_thresholds(const void *a, const void *b)
5286{
5287 const struct mem_cgroup_threshold *_a = a;
5288 const struct mem_cgroup_threshold *_b = b;
5289
Greg Thelen2bff24a2013-09-11 14:23:08 -07005290 if (_a->threshold > _b->threshold)
5291 return 1;
5292
5293 if (_a->threshold < _b->threshold)
5294 return -1;
5295
5296 return 0;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08005297}
5298
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07005299static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg)
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07005300{
5301 struct mem_cgroup_eventfd_list *ev;
5302
Michal Hocko2bcf2e92014-07-30 16:08:33 -07005303 spin_lock(&memcg_oom_lock);
5304
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07005305 list_for_each_entry(ev, &memcg->oom_notify, list)
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07005306 eventfd_signal(ev->eventfd, 1);
Michal Hocko2bcf2e92014-07-30 16:08:33 -07005307
5308 spin_unlock(&memcg_oom_lock);
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07005309 return 0;
5310}
5311
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07005312static void mem_cgroup_oom_notify(struct mem_cgroup *memcg)
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07005313{
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -07005314 struct mem_cgroup *iter;
5315
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07005316 for_each_mem_cgroup_tree(iter, memcg)
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -07005317 mem_cgroup_oom_notify_cb(iter);
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07005318}
5319
Tejun Heo59b6f872013-11-22 18:20:43 -05005320static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
Tejun Heo347c4a82013-11-22 18:20:43 -05005321 struct eventfd_ctx *eventfd, const char *args, enum res_type type)
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08005322{
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07005323 struct mem_cgroup_thresholds *thresholds;
5324 struct mem_cgroup_threshold_ary *new;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08005325 u64 threshold, usage;
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07005326 int i, size, ret;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08005327
5328 ret = res_counter_memparse_write_strategy(args, &threshold);
5329 if (ret)
5330 return ret;
5331
5332 mutex_lock(&memcg->thresholds_lock);
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07005333
Johannes Weiner05b84302014-08-06 16:05:59 -07005334 if (type == _MEM) {
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07005335 thresholds = &memcg->thresholds;
Johannes Weiner05b84302014-08-06 16:05:59 -07005336 usage = res_counter_read_u64(&memcg->res, RES_USAGE);
5337 } else if (type == _MEMSWAP) {
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07005338 thresholds = &memcg->memsw_thresholds;
Johannes Weiner05b84302014-08-06 16:05:59 -07005339 usage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
5340 } else
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08005341 BUG();
5342
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08005343 /* Check if a threshold crossed before adding a new one */
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07005344 if (thresholds->primary)
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08005345 __mem_cgroup_threshold(memcg, type == _MEMSWAP);
5346
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07005347 size = thresholds->primary ? thresholds->primary->size + 1 : 1;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08005348
5349 /* Allocate memory for new array of thresholds */
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07005350 new = kmalloc(sizeof(*new) + size * sizeof(struct mem_cgroup_threshold),
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08005351 GFP_KERNEL);
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07005352 if (!new) {
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08005353 ret = -ENOMEM;
5354 goto unlock;
5355 }
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07005356 new->size = size;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08005357
5358 /* Copy thresholds (if any) to new array */
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07005359 if (thresholds->primary) {
5360 memcpy(new->entries, thresholds->primary->entries, (size - 1) *
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08005361 sizeof(struct mem_cgroup_threshold));
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07005362 }
5363
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08005364 /* Add new threshold */
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07005365 new->entries[size - 1].eventfd = eventfd;
5366 new->entries[size - 1].threshold = threshold;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08005367
5368 /* Sort thresholds. Registering of new threshold isn't time-critical */
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07005369 sort(new->entries, size, sizeof(struct mem_cgroup_threshold),
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08005370 compare_thresholds, NULL);
5371
5372 /* Find current threshold */
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07005373 new->current_threshold = -1;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08005374 for (i = 0; i < size; i++) {
Sha Zhengju748dad32012-05-29 15:06:57 -07005375 if (new->entries[i].threshold <= usage) {
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08005376 /*
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07005377 * new->current_threshold will not be used until
5378 * rcu_assign_pointer(), so it's safe to increment
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08005379 * it here.
5380 */
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07005381 ++new->current_threshold;
Sha Zhengju748dad32012-05-29 15:06:57 -07005382 } else
5383 break;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08005384 }
5385
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07005386 /* Free old spare buffer and save old primary buffer as spare */
5387 kfree(thresholds->spare);
5388 thresholds->spare = thresholds->primary;
5389
5390 rcu_assign_pointer(thresholds->primary, new);
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08005391
Kirill A. Shutemov907860e2010-05-26 14:42:46 -07005392 /* To be sure that nobody uses thresholds */
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08005393 synchronize_rcu();
5394
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08005395unlock:
5396 mutex_unlock(&memcg->thresholds_lock);
5397
5398 return ret;
5399}
5400
Tejun Heo59b6f872013-11-22 18:20:43 -05005401static int mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
Tejun Heo347c4a82013-11-22 18:20:43 -05005402 struct eventfd_ctx *eventfd, const char *args)
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08005403{
Tejun Heo59b6f872013-11-22 18:20:43 -05005404 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEM);
Tejun Heo347c4a82013-11-22 18:20:43 -05005405}
5406
Tejun Heo59b6f872013-11-22 18:20:43 -05005407static int memsw_cgroup_usage_register_event(struct mem_cgroup *memcg,
Tejun Heo347c4a82013-11-22 18:20:43 -05005408 struct eventfd_ctx *eventfd, const char *args)
5409{
Tejun Heo59b6f872013-11-22 18:20:43 -05005410 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEMSWAP);
Tejun Heo347c4a82013-11-22 18:20:43 -05005411}
5412
Tejun Heo59b6f872013-11-22 18:20:43 -05005413static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
Tejun Heo347c4a82013-11-22 18:20:43 -05005414 struct eventfd_ctx *eventfd, enum res_type type)
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08005415{
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07005416 struct mem_cgroup_thresholds *thresholds;
5417 struct mem_cgroup_threshold_ary *new;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08005418 u64 usage;
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07005419 int i, j, size;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08005420
5421 mutex_lock(&memcg->thresholds_lock);
Johannes Weiner05b84302014-08-06 16:05:59 -07005422
5423 if (type == _MEM) {
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07005424 thresholds = &memcg->thresholds;
Johannes Weiner05b84302014-08-06 16:05:59 -07005425 usage = res_counter_read_u64(&memcg->res, RES_USAGE);
5426 } else if (type == _MEMSWAP) {
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07005427 thresholds = &memcg->memsw_thresholds;
Johannes Weiner05b84302014-08-06 16:05:59 -07005428 usage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
5429 } else
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08005430 BUG();
5431
Anton Vorontsov371528c2012-02-24 05:14:46 +04005432 if (!thresholds->primary)
5433 goto unlock;
5434
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08005435 /* Check if a threshold crossed before removing */
5436 __mem_cgroup_threshold(memcg, type == _MEMSWAP);
5437
5438 /* Calculate new number of threshold */
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07005439 size = 0;
5440 for (i = 0; i < thresholds->primary->size; i++) {
5441 if (thresholds->primary->entries[i].eventfd != eventfd)
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08005442 size++;
5443 }
5444
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07005445 new = thresholds->spare;
Kirill A. Shutemov907860e2010-05-26 14:42:46 -07005446
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08005447 /* Set thresholds array to NULL if we don't have thresholds */
5448 if (!size) {
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07005449 kfree(new);
5450 new = NULL;
Kirill A. Shutemov907860e2010-05-26 14:42:46 -07005451 goto swap_buffers;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08005452 }
5453
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07005454 new->size = size;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08005455
5456 /* Copy thresholds and find current threshold */
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07005457 new->current_threshold = -1;
5458 for (i = 0, j = 0; i < thresholds->primary->size; i++) {
5459 if (thresholds->primary->entries[i].eventfd == eventfd)
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08005460 continue;
5461
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07005462 new->entries[j] = thresholds->primary->entries[i];
Sha Zhengju748dad32012-05-29 15:06:57 -07005463 if (new->entries[j].threshold <= usage) {
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08005464 /*
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07005465 * new->current_threshold will not be used
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08005466 * until rcu_assign_pointer(), so it's safe to increment
5467 * it here.
5468 */
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07005469 ++new->current_threshold;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08005470 }
5471 j++;
5472 }
5473
Kirill A. Shutemov907860e2010-05-26 14:42:46 -07005474swap_buffers:
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07005475 /* Swap primary and spare array */
5476 thresholds->spare = thresholds->primary;
Sha Zhengju8c757762012-05-10 13:01:45 -07005477 /* If all events are unregistered, free the spare array */
5478 if (!new) {
5479 kfree(thresholds->spare);
5480 thresholds->spare = NULL;
5481 }
5482
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07005483 rcu_assign_pointer(thresholds->primary, new);
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08005484
Kirill A. Shutemov907860e2010-05-26 14:42:46 -07005485 /* To be sure that nobody uses thresholds */
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08005486 synchronize_rcu();
Anton Vorontsov371528c2012-02-24 05:14:46 +04005487unlock:
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08005488 mutex_unlock(&memcg->thresholds_lock);
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08005489}
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08005490
Tejun Heo59b6f872013-11-22 18:20:43 -05005491static void mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
Tejun Heo347c4a82013-11-22 18:20:43 -05005492 struct eventfd_ctx *eventfd)
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07005493{
Tejun Heo59b6f872013-11-22 18:20:43 -05005494 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEM);
Tejun Heo347c4a82013-11-22 18:20:43 -05005495}
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07005496
Tejun Heo59b6f872013-11-22 18:20:43 -05005497static void memsw_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
Tejun Heo347c4a82013-11-22 18:20:43 -05005498 struct eventfd_ctx *eventfd)
5499{
Tejun Heo59b6f872013-11-22 18:20:43 -05005500 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEMSWAP);
Tejun Heo347c4a82013-11-22 18:20:43 -05005501}
5502
Tejun Heo59b6f872013-11-22 18:20:43 -05005503static int mem_cgroup_oom_register_event(struct mem_cgroup *memcg,
Tejun Heo347c4a82013-11-22 18:20:43 -05005504 struct eventfd_ctx *eventfd, const char *args)
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07005505{
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07005506 struct mem_cgroup_eventfd_list *event;
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07005507
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07005508 event = kmalloc(sizeof(*event), GFP_KERNEL);
5509 if (!event)
5510 return -ENOMEM;
5511
Michal Hocko1af8efe2011-07-26 16:08:24 -07005512 spin_lock(&memcg_oom_lock);
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07005513
5514 event->eventfd = eventfd;
5515 list_add(&event->list, &memcg->oom_notify);
5516
5517 /* already in OOM ? */
Michal Hocko79dfdac2011-07-26 16:08:23 -07005518 if (atomic_read(&memcg->under_oom))
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07005519 eventfd_signal(eventfd, 1);
Michal Hocko1af8efe2011-07-26 16:08:24 -07005520 spin_unlock(&memcg_oom_lock);
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07005521
5522 return 0;
5523}
5524
Tejun Heo59b6f872013-11-22 18:20:43 -05005525static void mem_cgroup_oom_unregister_event(struct mem_cgroup *memcg,
Tejun Heo347c4a82013-11-22 18:20:43 -05005526 struct eventfd_ctx *eventfd)
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07005527{
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07005528 struct mem_cgroup_eventfd_list *ev, *tmp;
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07005529
Michal Hocko1af8efe2011-07-26 16:08:24 -07005530 spin_lock(&memcg_oom_lock);
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07005531
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07005532 list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) {
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07005533 if (ev->eventfd == eventfd) {
5534 list_del(&ev->list);
5535 kfree(ev);
5536 }
5537 }
5538
Michal Hocko1af8efe2011-07-26 16:08:24 -07005539 spin_unlock(&memcg_oom_lock);
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07005540}
5541
Tejun Heo2da8ca82013-12-05 12:28:04 -05005542static int mem_cgroup_oom_control_read(struct seq_file *sf, void *v)
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07005543{
Tejun Heo2da8ca82013-12-05 12:28:04 -05005544 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(sf));
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07005545
Tejun Heo791badb2013-12-05 12:28:02 -05005546 seq_printf(sf, "oom_kill_disable %d\n", memcg->oom_kill_disable);
5547 seq_printf(sf, "under_oom %d\n", (bool)atomic_read(&memcg->under_oom));
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07005548 return 0;
5549}
5550
Tejun Heo182446d2013-08-08 20:11:24 -04005551static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css,
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07005552 struct cftype *cft, u64 val)
5553{
Tejun Heo182446d2013-08-08 20:11:24 -04005554 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07005555
5556 /* cannot set to root cgroup and only 0 and 1 are allowed */
Linus Torvalds14208b02014-06-09 15:03:33 -07005557 if (!css->parent || !((val == 0) || (val == 1)))
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07005558 return -EINVAL;
5559
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07005560 memcg->oom_kill_disable = val;
KAMEZAWA Hiroyuki4d845eb2010-06-29 15:05:18 -07005561 if (!val)
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07005562 memcg_oom_recover(memcg);
Johannes Weiner3dae7fe2014-06-04 16:07:01 -07005563
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07005564 return 0;
5565}
5566
Andrew Mortonc255a452012-07-31 16:43:02 -07005567#ifdef CONFIG_MEMCG_KMEM
Glauber Costacbe128e32012-04-09 19:36:34 -03005568static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
Glauber Costae5671df2011-12-11 21:47:01 +00005569{
Glauber Costa55007d82012-12-18 14:22:38 -08005570 int ret;
5571
Glauber Costa2633d7a2012-12-18 14:22:34 -08005572 memcg->kmemcg_id = -1;
Glauber Costa55007d82012-12-18 14:22:38 -08005573 ret = memcg_propagate_kmem(memcg);
5574 if (ret)
5575 return ret;
Glauber Costa2633d7a2012-12-18 14:22:34 -08005576
Glauber Costa1d62e432012-04-09 19:36:33 -03005577 return mem_cgroup_sockets_init(memcg, ss);
Michel Lespinasse573b4002013-04-29 15:08:13 -07005578}
Glauber Costae5671df2011-12-11 21:47:01 +00005579
Li Zefan10d5ebf2013-07-08 16:00:33 -07005580static void memcg_destroy_kmem(struct mem_cgroup *memcg)
Glauber Costad1a4c0b2011-12-11 21:47:04 +00005581{
Glauber Costa1d62e432012-04-09 19:36:33 -03005582 mem_cgroup_sockets_destroy(memcg);
Li Zefan10d5ebf2013-07-08 16:00:33 -07005583}
5584
5585static void kmem_cgroup_css_offline(struct mem_cgroup *memcg)
5586{
5587 if (!memcg_kmem_is_active(memcg))
5588 return;
5589
5590 /*
5591 * kmem charges can outlive the cgroup. In the case of slab
5592 * pages, for instance, a page contain objects from various
5593 * processes. As we prevent from taking a reference for every
5594 * such allocation we have to be careful when doing uncharge
5595 * (see memcg_uncharge_kmem) and here during offlining.
5596 *
5597 * The idea is that that only the _last_ uncharge which sees
5598 * the dead memcg will drop the last reference. An additional
5599 * reference is taken here before the group is marked dead
5600 * which is then paired with css_put during uncharge resp. here.
5601 *
5602 * Although this might sound strange as this path is called from
Tejun Heoec903c02014-05-13 12:11:01 -04005603 * css_offline() when the referencemight have dropped down to 0 and
5604 * shouldn't be incremented anymore (css_tryget_online() would
5605 * fail) we do not have other options because of the kmem
5606 * allocations lifetime.
Li Zefan10d5ebf2013-07-08 16:00:33 -07005607 */
5608 css_get(&memcg->css);
Glauber Costa7de37682012-12-18 14:22:07 -08005609
5610 memcg_kmem_mark_dead(memcg);
5611
5612 if (res_counter_read_u64(&memcg->kmem, RES_USAGE) != 0)
5613 return;
5614
Glauber Costa7de37682012-12-18 14:22:07 -08005615 if (memcg_kmem_test_and_clear_dead(memcg))
Li Zefan10d5ebf2013-07-08 16:00:33 -07005616 css_put(&memcg->css);
Glauber Costad1a4c0b2011-12-11 21:47:04 +00005617}
Glauber Costae5671df2011-12-11 21:47:01 +00005618#else
Glauber Costacbe128e32012-04-09 19:36:34 -03005619static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
Glauber Costae5671df2011-12-11 21:47:01 +00005620{
5621 return 0;
5622}
Glauber Costad1a4c0b2011-12-11 21:47:04 +00005623
Li Zefan10d5ebf2013-07-08 16:00:33 -07005624static void memcg_destroy_kmem(struct mem_cgroup *memcg)
5625{
5626}
5627
5628static void kmem_cgroup_css_offline(struct mem_cgroup *memcg)
Glauber Costad1a4c0b2011-12-11 21:47:04 +00005629{
5630}
Glauber Costae5671df2011-12-11 21:47:01 +00005631#endif
5632
Tejun Heo79bd9812013-11-22 18:20:42 -05005633/*
Tejun Heo3bc942f2013-11-22 18:20:44 -05005634 * DO NOT USE IN NEW FILES.
5635 *
5636 * "cgroup.event_control" implementation.
5637 *
5638 * This is way over-engineered. It tries to support fully configurable
5639 * events for each user. Such level of flexibility is completely
5640 * unnecessary especially in the light of the planned unified hierarchy.
5641 *
5642 * Please deprecate this and replace with something simpler if at all
5643 * possible.
5644 */
5645
5646/*
Tejun Heo79bd9812013-11-22 18:20:42 -05005647 * Unregister event and free resources.
5648 *
5649 * Gets called from workqueue.
5650 */
Tejun Heo3bc942f2013-11-22 18:20:44 -05005651static void memcg_event_remove(struct work_struct *work)
Tejun Heo79bd9812013-11-22 18:20:42 -05005652{
Tejun Heo3bc942f2013-11-22 18:20:44 -05005653 struct mem_cgroup_event *event =
5654 container_of(work, struct mem_cgroup_event, remove);
Tejun Heo59b6f872013-11-22 18:20:43 -05005655 struct mem_cgroup *memcg = event->memcg;
Tejun Heo79bd9812013-11-22 18:20:42 -05005656
5657 remove_wait_queue(event->wqh, &event->wait);
5658
Tejun Heo59b6f872013-11-22 18:20:43 -05005659 event->unregister_event(memcg, event->eventfd);
Tejun Heo79bd9812013-11-22 18:20:42 -05005660
5661 /* Notify userspace the event is going away. */
5662 eventfd_signal(event->eventfd, 1);
5663
5664 eventfd_ctx_put(event->eventfd);
5665 kfree(event);
Tejun Heo59b6f872013-11-22 18:20:43 -05005666 css_put(&memcg->css);
Tejun Heo79bd9812013-11-22 18:20:42 -05005667}
5668
5669/*
5670 * Gets called on POLLHUP on eventfd when user closes it.
5671 *
5672 * Called with wqh->lock held and interrupts disabled.
5673 */
Tejun Heo3bc942f2013-11-22 18:20:44 -05005674static int memcg_event_wake(wait_queue_t *wait, unsigned mode,
5675 int sync, void *key)
Tejun Heo79bd9812013-11-22 18:20:42 -05005676{
Tejun Heo3bc942f2013-11-22 18:20:44 -05005677 struct mem_cgroup_event *event =
5678 container_of(wait, struct mem_cgroup_event, wait);
Tejun Heo59b6f872013-11-22 18:20:43 -05005679 struct mem_cgroup *memcg = event->memcg;
Tejun Heo79bd9812013-11-22 18:20:42 -05005680 unsigned long flags = (unsigned long)key;
5681
5682 if (flags & POLLHUP) {
5683 /*
5684 * If the event has been detached at cgroup removal, we
5685 * can simply return knowing the other side will cleanup
5686 * for us.
5687 *
5688 * We can't race against event freeing since the other
5689 * side will require wqh->lock via remove_wait_queue(),
5690 * which we hold.
5691 */
Tejun Heofba94802013-11-22 18:20:43 -05005692 spin_lock(&memcg->event_list_lock);
Tejun Heo79bd9812013-11-22 18:20:42 -05005693 if (!list_empty(&event->list)) {
5694 list_del_init(&event->list);
5695 /*
5696 * We are in atomic context, but cgroup_event_remove()
5697 * may sleep, so we have to call it in workqueue.
5698 */
5699 schedule_work(&event->remove);
5700 }
Tejun Heofba94802013-11-22 18:20:43 -05005701 spin_unlock(&memcg->event_list_lock);
Tejun Heo79bd9812013-11-22 18:20:42 -05005702 }
5703
5704 return 0;
5705}
5706
Tejun Heo3bc942f2013-11-22 18:20:44 -05005707static void memcg_event_ptable_queue_proc(struct file *file,
Tejun Heo79bd9812013-11-22 18:20:42 -05005708 wait_queue_head_t *wqh, poll_table *pt)
5709{
Tejun Heo3bc942f2013-11-22 18:20:44 -05005710 struct mem_cgroup_event *event =
5711 container_of(pt, struct mem_cgroup_event, pt);
Tejun Heo79bd9812013-11-22 18:20:42 -05005712
5713 event->wqh = wqh;
5714 add_wait_queue(wqh, &event->wait);
5715}
5716
5717/*
Tejun Heo3bc942f2013-11-22 18:20:44 -05005718 * DO NOT USE IN NEW FILES.
5719 *
Tejun Heo79bd9812013-11-22 18:20:42 -05005720 * Parse input and register new cgroup event handler.
5721 *
5722 * Input must be in format '<event_fd> <control_fd> <args>'.
5723 * Interpretation of args is defined by control file implementation.
5724 */
Tejun Heo451af502014-05-13 12:16:21 -04005725static ssize_t memcg_write_event_control(struct kernfs_open_file *of,
5726 char *buf, size_t nbytes, loff_t off)
Tejun Heo79bd9812013-11-22 18:20:42 -05005727{
Tejun Heo451af502014-05-13 12:16:21 -04005728 struct cgroup_subsys_state *css = of_css(of);
Tejun Heofba94802013-11-22 18:20:43 -05005729 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
Tejun Heo3bc942f2013-11-22 18:20:44 -05005730 struct mem_cgroup_event *event;
Tejun Heo79bd9812013-11-22 18:20:42 -05005731 struct cgroup_subsys_state *cfile_css;
5732 unsigned int efd, cfd;
5733 struct fd efile;
5734 struct fd cfile;
Tejun Heofba94802013-11-22 18:20:43 -05005735 const char *name;
Tejun Heo79bd9812013-11-22 18:20:42 -05005736 char *endp;
5737 int ret;
5738
Tejun Heo451af502014-05-13 12:16:21 -04005739 buf = strstrip(buf);
5740
5741 efd = simple_strtoul(buf, &endp, 10);
Tejun Heo79bd9812013-11-22 18:20:42 -05005742 if (*endp != ' ')
5743 return -EINVAL;
Tejun Heo451af502014-05-13 12:16:21 -04005744 buf = endp + 1;
Tejun Heo79bd9812013-11-22 18:20:42 -05005745
Tejun Heo451af502014-05-13 12:16:21 -04005746 cfd = simple_strtoul(buf, &endp, 10);
Tejun Heo79bd9812013-11-22 18:20:42 -05005747 if ((*endp != ' ') && (*endp != '\0'))
5748 return -EINVAL;
Tejun Heo451af502014-05-13 12:16:21 -04005749 buf = endp + 1;
Tejun Heo79bd9812013-11-22 18:20:42 -05005750
5751 event = kzalloc(sizeof(*event), GFP_KERNEL);
5752 if (!event)
5753 return -ENOMEM;
5754
Tejun Heo59b6f872013-11-22 18:20:43 -05005755 event->memcg = memcg;
Tejun Heo79bd9812013-11-22 18:20:42 -05005756 INIT_LIST_HEAD(&event->list);
Tejun Heo3bc942f2013-11-22 18:20:44 -05005757 init_poll_funcptr(&event->pt, memcg_event_ptable_queue_proc);
5758 init_waitqueue_func_entry(&event->wait, memcg_event_wake);
5759 INIT_WORK(&event->remove, memcg_event_remove);
Tejun Heo79bd9812013-11-22 18:20:42 -05005760
5761 efile = fdget(efd);
5762 if (!efile.file) {
5763 ret = -EBADF;
5764 goto out_kfree;
5765 }
5766
5767 event->eventfd = eventfd_ctx_fileget(efile.file);
5768 if (IS_ERR(event->eventfd)) {
5769 ret = PTR_ERR(event->eventfd);
5770 goto out_put_efile;
5771 }
5772
5773 cfile = fdget(cfd);
5774 if (!cfile.file) {
5775 ret = -EBADF;
5776 goto out_put_eventfd;
5777 }
5778
5779 /* the process need read permission on control file */
5780 /* AV: shouldn't we check that it's been opened for read instead? */
5781 ret = inode_permission(file_inode(cfile.file), MAY_READ);
5782 if (ret < 0)
5783 goto out_put_cfile;
5784
Tejun Heo79bd9812013-11-22 18:20:42 -05005785 /*
Tejun Heofba94802013-11-22 18:20:43 -05005786 * Determine the event callbacks and set them in @event. This used
5787 * to be done via struct cftype but cgroup core no longer knows
5788 * about these events. The following is crude but the whole thing
5789 * is for compatibility anyway.
Tejun Heo3bc942f2013-11-22 18:20:44 -05005790 *
5791 * DO NOT ADD NEW FILES.
Tejun Heofba94802013-11-22 18:20:43 -05005792 */
5793 name = cfile.file->f_dentry->d_name.name;
5794
5795 if (!strcmp(name, "memory.usage_in_bytes")) {
5796 event->register_event = mem_cgroup_usage_register_event;
5797 event->unregister_event = mem_cgroup_usage_unregister_event;
5798 } else if (!strcmp(name, "memory.oom_control")) {
5799 event->register_event = mem_cgroup_oom_register_event;
5800 event->unregister_event = mem_cgroup_oom_unregister_event;
5801 } else if (!strcmp(name, "memory.pressure_level")) {
5802 event->register_event = vmpressure_register_event;
5803 event->unregister_event = vmpressure_unregister_event;
5804 } else if (!strcmp(name, "memory.memsw.usage_in_bytes")) {
Tejun Heo347c4a82013-11-22 18:20:43 -05005805 event->register_event = memsw_cgroup_usage_register_event;
5806 event->unregister_event = memsw_cgroup_usage_unregister_event;
Tejun Heofba94802013-11-22 18:20:43 -05005807 } else {
5808 ret = -EINVAL;
5809 goto out_put_cfile;
5810 }
5811
5812 /*
Tejun Heob5557c42013-11-22 18:20:42 -05005813 * Verify @cfile should belong to @css. Also, remaining events are
5814 * automatically removed on cgroup destruction but the removal is
5815 * asynchronous, so take an extra ref on @css.
Tejun Heo79bd9812013-11-22 18:20:42 -05005816 */
Tejun Heoec903c02014-05-13 12:11:01 -04005817 cfile_css = css_tryget_online_from_dir(cfile.file->f_dentry->d_parent,
5818 &memory_cgrp_subsys);
Tejun Heo79bd9812013-11-22 18:20:42 -05005819 ret = -EINVAL;
Tejun Heo5a17f542014-02-11 11:52:47 -05005820 if (IS_ERR(cfile_css))
Tejun Heo79bd9812013-11-22 18:20:42 -05005821 goto out_put_cfile;
Tejun Heo5a17f542014-02-11 11:52:47 -05005822 if (cfile_css != css) {
5823 css_put(cfile_css);
5824 goto out_put_cfile;
5825 }
Tejun Heo79bd9812013-11-22 18:20:42 -05005826
Tejun Heo451af502014-05-13 12:16:21 -04005827 ret = event->register_event(memcg, event->eventfd, buf);
Tejun Heo79bd9812013-11-22 18:20:42 -05005828 if (ret)
5829 goto out_put_css;
5830
5831 efile.file->f_op->poll(efile.file, &event->pt);
5832
Tejun Heofba94802013-11-22 18:20:43 -05005833 spin_lock(&memcg->event_list_lock);
5834 list_add(&event->list, &memcg->event_list);
5835 spin_unlock(&memcg->event_list_lock);
Tejun Heo79bd9812013-11-22 18:20:42 -05005836
5837 fdput(cfile);
5838 fdput(efile);
5839
Tejun Heo451af502014-05-13 12:16:21 -04005840 return nbytes;
Tejun Heo79bd9812013-11-22 18:20:42 -05005841
5842out_put_css:
Tejun Heob5557c42013-11-22 18:20:42 -05005843 css_put(css);
Tejun Heo79bd9812013-11-22 18:20:42 -05005844out_put_cfile:
5845 fdput(cfile);
5846out_put_eventfd:
5847 eventfd_ctx_put(event->eventfd);
5848out_put_efile:
5849 fdput(efile);
5850out_kfree:
5851 kfree(event);
5852
5853 return ret;
5854}
5855
Balbir Singh8cdea7c2008-02-07 00:13:50 -08005856static struct cftype mem_cgroup_files[] = {
5857 {
Balbir Singh0eea1032008-02-07 00:13:57 -08005858 .name = "usage_in_bytes",
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08005859 .private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
Tejun Heo791badb2013-12-05 12:28:02 -05005860 .read_u64 = mem_cgroup_read_u64,
Balbir Singh8cdea7c2008-02-07 00:13:50 -08005861 },
5862 {
Pavel Emelyanovc84872e2008-04-29 01:00:17 -07005863 .name = "max_usage_in_bytes",
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08005864 .private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
Tejun Heo6770c642014-05-13 12:16:21 -04005865 .write = mem_cgroup_reset,
Tejun Heo791badb2013-12-05 12:28:02 -05005866 .read_u64 = mem_cgroup_read_u64,
Pavel Emelyanovc84872e2008-04-29 01:00:17 -07005867 },
5868 {
Balbir Singh0eea1032008-02-07 00:13:57 -08005869 .name = "limit_in_bytes",
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08005870 .private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
Tejun Heo451af502014-05-13 12:16:21 -04005871 .write = mem_cgroup_write,
Tejun Heo791badb2013-12-05 12:28:02 -05005872 .read_u64 = mem_cgroup_read_u64,
Balbir Singh8cdea7c2008-02-07 00:13:50 -08005873 },
5874 {
Balbir Singh296c81d2009-09-23 15:56:36 -07005875 .name = "soft_limit_in_bytes",
5876 .private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT),
Tejun Heo451af502014-05-13 12:16:21 -04005877 .write = mem_cgroup_write,
Tejun Heo791badb2013-12-05 12:28:02 -05005878 .read_u64 = mem_cgroup_read_u64,
Balbir Singh296c81d2009-09-23 15:56:36 -07005879 },
5880 {
Balbir Singh8cdea7c2008-02-07 00:13:50 -08005881 .name = "failcnt",
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08005882 .private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
Tejun Heo6770c642014-05-13 12:16:21 -04005883 .write = mem_cgroup_reset,
Tejun Heo791badb2013-12-05 12:28:02 -05005884 .read_u64 = mem_cgroup_read_u64,
Balbir Singh8cdea7c2008-02-07 00:13:50 -08005885 },
Balbir Singh8697d332008-02-07 00:13:59 -08005886 {
KAMEZAWA Hiroyukid2ceb9b2008-02-07 00:14:25 -08005887 .name = "stat",
Tejun Heo2da8ca82013-12-05 12:28:04 -05005888 .seq_show = memcg_stat_show,
KAMEZAWA Hiroyukid2ceb9b2008-02-07 00:14:25 -08005889 },
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08005890 {
5891 .name = "force_empty",
Tejun Heo6770c642014-05-13 12:16:21 -04005892 .write = mem_cgroup_force_empty_write,
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08005893 },
Balbir Singh18f59ea2009-01-07 18:08:07 -08005894 {
5895 .name = "use_hierarchy",
5896 .write_u64 = mem_cgroup_hierarchy_write,
5897 .read_u64 = mem_cgroup_hierarchy_read,
5898 },
KOSAKI Motohiroa7885eb2009-01-07 18:08:24 -08005899 {
Tejun Heo3bc942f2013-11-22 18:20:44 -05005900 .name = "cgroup.event_control", /* XXX: for compat */
Tejun Heo451af502014-05-13 12:16:21 -04005901 .write = memcg_write_event_control,
Tejun Heo79bd9812013-11-22 18:20:42 -05005902 .flags = CFTYPE_NO_PREFIX,
5903 .mode = S_IWUGO,
5904 },
5905 {
KOSAKI Motohiroa7885eb2009-01-07 18:08:24 -08005906 .name = "swappiness",
5907 .read_u64 = mem_cgroup_swappiness_read,
5908 .write_u64 = mem_cgroup_swappiness_write,
5909 },
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08005910 {
5911 .name = "move_charge_at_immigrate",
5912 .read_u64 = mem_cgroup_move_charge_read,
5913 .write_u64 = mem_cgroup_move_charge_write,
5914 },
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07005915 {
5916 .name = "oom_control",
Tejun Heo2da8ca82013-12-05 12:28:04 -05005917 .seq_show = mem_cgroup_oom_control_read,
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07005918 .write_u64 = mem_cgroup_oom_control_write,
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07005919 .private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL),
5920 },
Anton Vorontsov70ddf632013-04-29 15:08:31 -07005921 {
5922 .name = "pressure_level",
Anton Vorontsov70ddf632013-04-29 15:08:31 -07005923 },
Ying Han406eb0c2011-05-26 16:25:37 -07005924#ifdef CONFIG_NUMA
5925 {
5926 .name = "numa_stat",
Tejun Heo2da8ca82013-12-05 12:28:04 -05005927 .seq_show = memcg_numa_stat_show,
Ying Han406eb0c2011-05-26 16:25:37 -07005928 },
5929#endif
Glauber Costa510fc4e2012-12-18 14:21:47 -08005930#ifdef CONFIG_MEMCG_KMEM
5931 {
5932 .name = "kmem.limit_in_bytes",
5933 .private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT),
Tejun Heo451af502014-05-13 12:16:21 -04005934 .write = mem_cgroup_write,
Tejun Heo791badb2013-12-05 12:28:02 -05005935 .read_u64 = mem_cgroup_read_u64,
Glauber Costa510fc4e2012-12-18 14:21:47 -08005936 },
5937 {
5938 .name = "kmem.usage_in_bytes",
5939 .private = MEMFILE_PRIVATE(_KMEM, RES_USAGE),
Tejun Heo791badb2013-12-05 12:28:02 -05005940 .read_u64 = mem_cgroup_read_u64,
Glauber Costa510fc4e2012-12-18 14:21:47 -08005941 },
5942 {
5943 .name = "kmem.failcnt",
5944 .private = MEMFILE_PRIVATE(_KMEM, RES_FAILCNT),
Tejun Heo6770c642014-05-13 12:16:21 -04005945 .write = mem_cgroup_reset,
Tejun Heo791badb2013-12-05 12:28:02 -05005946 .read_u64 = mem_cgroup_read_u64,
Glauber Costa510fc4e2012-12-18 14:21:47 -08005947 },
5948 {
5949 .name = "kmem.max_usage_in_bytes",
5950 .private = MEMFILE_PRIVATE(_KMEM, RES_MAX_USAGE),
Tejun Heo6770c642014-05-13 12:16:21 -04005951 .write = mem_cgroup_reset,
Tejun Heo791badb2013-12-05 12:28:02 -05005952 .read_u64 = mem_cgroup_read_u64,
Glauber Costa510fc4e2012-12-18 14:21:47 -08005953 },
Glauber Costa749c5412012-12-18 14:23:01 -08005954#ifdef CONFIG_SLABINFO
5955 {
5956 .name = "kmem.slabinfo",
Tejun Heo2da8ca82013-12-05 12:28:04 -05005957 .seq_show = mem_cgroup_slabinfo_read,
Glauber Costa749c5412012-12-18 14:23:01 -08005958 },
5959#endif
Glauber Costa510fc4e2012-12-18 14:21:47 -08005960#endif
Tejun Heo6bc10342012-04-01 12:09:55 -07005961 { }, /* terminate */
Tejun Heoaf36f902012-04-01 12:09:55 -07005962};
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08005963
Michal Hocko2d110852013-02-22 16:34:43 -08005964#ifdef CONFIG_MEMCG_SWAP
5965static struct cftype memsw_cgroup_files[] = {
5966 {
5967 .name = "memsw.usage_in_bytes",
5968 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
Tejun Heo791badb2013-12-05 12:28:02 -05005969 .read_u64 = mem_cgroup_read_u64,
Michal Hocko2d110852013-02-22 16:34:43 -08005970 },
5971 {
5972 .name = "memsw.max_usage_in_bytes",
5973 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
Tejun Heo6770c642014-05-13 12:16:21 -04005974 .write = mem_cgroup_reset,
Tejun Heo791badb2013-12-05 12:28:02 -05005975 .read_u64 = mem_cgroup_read_u64,
Michal Hocko2d110852013-02-22 16:34:43 -08005976 },
5977 {
5978 .name = "memsw.limit_in_bytes",
5979 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
Tejun Heo451af502014-05-13 12:16:21 -04005980 .write = mem_cgroup_write,
Tejun Heo791badb2013-12-05 12:28:02 -05005981 .read_u64 = mem_cgroup_read_u64,
Michal Hocko2d110852013-02-22 16:34:43 -08005982 },
5983 {
5984 .name = "memsw.failcnt",
5985 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
Tejun Heo6770c642014-05-13 12:16:21 -04005986 .write = mem_cgroup_reset,
Tejun Heo791badb2013-12-05 12:28:02 -05005987 .read_u64 = mem_cgroup_read_u64,
Michal Hocko2d110852013-02-22 16:34:43 -08005988 },
5989 { }, /* terminate */
5990};
5991#endif
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07005992static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -08005993{
5994 struct mem_cgroup_per_node *pn;
KAMEZAWA Hiroyuki1ecaab22008-02-07 00:14:38 -08005995 struct mem_cgroup_per_zone *mz;
KAMEZAWA Hiroyuki41e33552008-04-08 17:41:54 -07005996 int zone, tmp = node;
KAMEZAWA Hiroyuki1ecaab22008-02-07 00:14:38 -08005997 /*
5998 * This routine is called against possible nodes.
5999 * But it's BUG to call kmalloc() against offline node.
6000 *
6001 * TODO: this routine can waste much memory for nodes which will
6002 * never be onlined. It's better to use memory hotplug callback
6003 * function.
6004 */
KAMEZAWA Hiroyuki41e33552008-04-08 17:41:54 -07006005 if (!node_state(node, N_NORMAL_MEMORY))
6006 tmp = -1;
Jesper Juhl17295c82011-01-13 15:47:42 -08006007 pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -08006008 if (!pn)
6009 return 1;
KAMEZAWA Hiroyuki1ecaab22008-02-07 00:14:38 -08006010
KAMEZAWA Hiroyuki1ecaab22008-02-07 00:14:38 -08006011 for (zone = 0; zone < MAX_NR_ZONES; zone++) {
6012 mz = &pn->zoneinfo[zone];
Hugh Dickinsbea8c152012-11-16 14:14:54 -08006013 lruvec_init(&mz->lruvec);
Andrew Mortonbb4cc1a82013-09-24 15:27:40 -07006014 mz->usage_in_excess = 0;
6015 mz->on_tree = false;
Hugh Dickinsd79154b2012-03-21 16:34:18 -07006016 mz->memcg = memcg;
KAMEZAWA Hiroyuki1ecaab22008-02-07 00:14:38 -08006017 }
Johannes Weiner54f72fe2013-07-08 15:59:49 -07006018 memcg->nodeinfo[node] = pn;
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -08006019 return 0;
6020}
6021
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07006022static void free_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
KAMEZAWA Hiroyuki1ecaab22008-02-07 00:14:38 -08006023{
Johannes Weiner54f72fe2013-07-08 15:59:49 -07006024 kfree(memcg->nodeinfo[node]);
KAMEZAWA Hiroyuki1ecaab22008-02-07 00:14:38 -08006025}
6026
KAMEZAWA Hiroyuki33327942008-04-29 01:00:24 -07006027static struct mem_cgroup *mem_cgroup_alloc(void)
6028{
Hugh Dickinsd79154b2012-03-21 16:34:18 -07006029 struct mem_cgroup *memcg;
Vladimir Davydov8ff69e22014-01-23 15:52:52 -08006030 size_t size;
KAMEZAWA Hiroyuki33327942008-04-29 01:00:24 -07006031
Vladimir Davydov8ff69e22014-01-23 15:52:52 -08006032 size = sizeof(struct mem_cgroup);
6033 size += nr_node_ids * sizeof(struct mem_cgroup_per_node *);
KAMEZAWA Hiroyuki33327942008-04-29 01:00:24 -07006034
Vladimir Davydov8ff69e22014-01-23 15:52:52 -08006035 memcg = kzalloc(size, GFP_KERNEL);
Hugh Dickinsd79154b2012-03-21 16:34:18 -07006036 if (!memcg)
Dan Carpentere7bbcdf2010-03-23 13:35:12 -07006037 return NULL;
6038
Hugh Dickinsd79154b2012-03-21 16:34:18 -07006039 memcg->stat = alloc_percpu(struct mem_cgroup_stat_cpu);
6040 if (!memcg->stat)
Dan Carpenterd2e61b82010-11-11 14:05:12 -08006041 goto out_free;
Hugh Dickinsd79154b2012-03-21 16:34:18 -07006042 spin_lock_init(&memcg->pcp_counter_lock);
6043 return memcg;
Dan Carpenterd2e61b82010-11-11 14:05:12 -08006044
6045out_free:
Vladimir Davydov8ff69e22014-01-23 15:52:52 -08006046 kfree(memcg);
Dan Carpenterd2e61b82010-11-11 14:05:12 -08006047 return NULL;
KAMEZAWA Hiroyuki33327942008-04-29 01:00:24 -07006048}
6049
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08006050/*
Glauber Costac8b2a362012-12-18 14:22:13 -08006051 * At destroying mem_cgroup, references from swap_cgroup can remain.
6052 * (scanning all at force_empty is too costly...)
6053 *
6054 * Instead of clearing all references at force_empty, we remember
6055 * the number of reference from swap_cgroup and free mem_cgroup when
6056 * it goes down to 0.
6057 *
6058 * Removal of cgroup itself succeeds regardless of refs from swap.
Hugh Dickins59927fb2012-03-15 15:17:07 -07006059 */
Glauber Costac8b2a362012-12-18 14:22:13 -08006060
6061static void __mem_cgroup_free(struct mem_cgroup *memcg)
Hugh Dickins59927fb2012-03-15 15:17:07 -07006062{
Glauber Costac8b2a362012-12-18 14:22:13 -08006063 int node;
Hugh Dickins59927fb2012-03-15 15:17:07 -07006064
Andrew Mortonbb4cc1a82013-09-24 15:27:40 -07006065 mem_cgroup_remove_from_trees(memcg);
Glauber Costac8b2a362012-12-18 14:22:13 -08006066
6067 for_each_node(node)
6068 free_mem_cgroup_per_zone_info(memcg, node);
6069
6070 free_percpu(memcg->stat);
6071
Glauber Costa3f134612012-05-29 15:07:11 -07006072 /*
6073 * We need to make sure that (at least for now), the jump label
6074 * destruction code runs outside of the cgroup lock. This is because
6075 * get_online_cpus(), which is called from the static_branch update,
6076 * can't be called inside the cgroup_lock. cpusets are the ones
6077 * enforcing this dependency, so if they ever change, we might as well.
6078 *
6079 * schedule_work() will guarantee this happens. Be careful if you need
6080 * to move this code around, and make sure it is outside
6081 * the cgroup_lock.
6082 */
Glauber Costaa8964b92012-12-18 14:22:09 -08006083 disarm_static_keys(memcg);
Vladimir Davydov8ff69e22014-01-23 15:52:52 -08006084 kfree(memcg);
Hugh Dickins59927fb2012-03-15 15:17:07 -07006085}
Glauber Costa3afe36b2012-05-29 15:07:10 -07006086
Daisuke Nishimura7bcc1bb2009-01-29 14:25:11 -08006087/*
6088 * Returns the parent mem_cgroup in memcgroup hierarchy with hierarchy enabled.
6089 */
Glauber Costae1aab162011-12-11 21:47:03 +00006090struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
Daisuke Nishimura7bcc1bb2009-01-29 14:25:11 -08006091{
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07006092 if (!memcg->res.parent)
Daisuke Nishimura7bcc1bb2009-01-29 14:25:11 -08006093 return NULL;
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07006094 return mem_cgroup_from_res_counter(memcg->res.parent, res);
Daisuke Nishimura7bcc1bb2009-01-29 14:25:11 -08006095}
Glauber Costae1aab162011-12-11 21:47:03 +00006096EXPORT_SYMBOL(parent_mem_cgroup);
KAMEZAWA Hiroyuki33327942008-04-29 01:00:24 -07006097
Andrew Mortonbb4cc1a82013-09-24 15:27:40 -07006098static void __init mem_cgroup_soft_limit_tree_init(void)
6099{
6100 struct mem_cgroup_tree_per_node *rtpn;
6101 struct mem_cgroup_tree_per_zone *rtpz;
6102 int tmp, node, zone;
6103
6104 for_each_node(node) {
6105 tmp = node;
6106 if (!node_state(node, N_NORMAL_MEMORY))
6107 tmp = -1;
6108 rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, tmp);
6109 BUG_ON(!rtpn);
6110
6111 soft_limit_tree.rb_tree_per_node[node] = rtpn;
6112
6113 for (zone = 0; zone < MAX_NR_ZONES; zone++) {
6114 rtpz = &rtpn->rb_tree_per_zone[zone];
6115 rtpz->rb_root = RB_ROOT;
6116 spin_lock_init(&rtpz->lock);
6117 }
6118 }
6119}
6120
Li Zefan0eb253e2009-01-15 13:51:25 -08006121static struct cgroup_subsys_state * __ref
Tejun Heoeb954192013-08-08 20:11:23 -04006122mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
Balbir Singh8cdea7c2008-02-07 00:13:50 -08006123{
Glauber Costad142e3e2013-02-22 16:34:52 -08006124 struct mem_cgroup *memcg;
KAMEZAWA Hiroyuki04046e12009-04-02 16:57:33 -07006125 long error = -ENOMEM;
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -08006126 int node;
Balbir Singh8cdea7c2008-02-07 00:13:50 -08006127
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07006128 memcg = mem_cgroup_alloc();
6129 if (!memcg)
KAMEZAWA Hiroyuki04046e12009-04-02 16:57:33 -07006130 return ERR_PTR(error);
Pavel Emelianov78fb7462008-02-07 00:13:51 -08006131
Bob Liu3ed28fa2012-01-12 17:19:04 -08006132 for_each_node(node)
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07006133 if (alloc_mem_cgroup_per_zone_info(memcg, node))
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -08006134 goto free_out;
Balbir Singhf64c3f52009-09-23 15:56:37 -07006135
KAMEZAWA Hiroyukic0777192009-01-07 18:07:57 -08006136 /* root ? */
Tejun Heoeb954192013-08-08 20:11:23 -04006137 if (parent_css == NULL) {
Hillf Dantona41c58a2011-12-19 17:11:57 -08006138 root_mem_cgroup = memcg;
Glauber Costad142e3e2013-02-22 16:34:52 -08006139 res_counter_init(&memcg->res, NULL);
6140 res_counter_init(&memcg->memsw, NULL);
6141 res_counter_init(&memcg->kmem, NULL);
Balbir Singh18f59ea2009-01-07 18:08:07 -08006142 }
Balbir Singh28dbc4b2009-01-07 18:08:05 -08006143
Glauber Costad142e3e2013-02-22 16:34:52 -08006144 memcg->last_scanned_node = MAX_NUMNODES;
6145 INIT_LIST_HEAD(&memcg->oom_notify);
Glauber Costad142e3e2013-02-22 16:34:52 -08006146 memcg->move_charge_at_immigrate = 0;
6147 mutex_init(&memcg->thresholds_lock);
6148 spin_lock_init(&memcg->move_lock);
Anton Vorontsov70ddf632013-04-29 15:08:31 -07006149 vmpressure_init(&memcg->vmpressure);
Tejun Heofba94802013-11-22 18:20:43 -05006150 INIT_LIST_HEAD(&memcg->event_list);
6151 spin_lock_init(&memcg->event_list_lock);
Glauber Costad142e3e2013-02-22 16:34:52 -08006152
6153 return &memcg->css;
6154
6155free_out:
6156 __mem_cgroup_free(memcg);
6157 return ERR_PTR(error);
6158}
6159
6160static int
Tejun Heoeb954192013-08-08 20:11:23 -04006161mem_cgroup_css_online(struct cgroup_subsys_state *css)
Glauber Costad142e3e2013-02-22 16:34:52 -08006162{
Tejun Heoeb954192013-08-08 20:11:23 -04006163 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
Tejun Heo5c9d5352014-05-16 13:22:48 -04006164 struct mem_cgroup *parent = mem_cgroup_from_css(css->parent);
Glauber Costad142e3e2013-02-22 16:34:52 -08006165
Tejun Heo15a4c832014-05-04 15:09:14 -04006166 if (css->id > MEM_CGROUP_ID_MAX)
Li Zefan4219b2d2013-09-23 16:56:29 +08006167 return -ENOSPC;
6168
Tejun Heo63876982013-08-08 20:11:23 -04006169 if (!parent)
Glauber Costad142e3e2013-02-22 16:34:52 -08006170 return 0;
6171
Glauber Costa09998212013-02-22 16:34:55 -08006172 mutex_lock(&memcg_create_mutex);
Glauber Costad142e3e2013-02-22 16:34:52 -08006173
6174 memcg->use_hierarchy = parent->use_hierarchy;
6175 memcg->oom_kill_disable = parent->oom_kill_disable;
6176 memcg->swappiness = mem_cgroup_swappiness(parent);
6177
6178 if (parent->use_hierarchy) {
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07006179 res_counter_init(&memcg->res, &parent->res);
6180 res_counter_init(&memcg->memsw, &parent->memsw);
Glauber Costa510fc4e2012-12-18 14:21:47 -08006181 res_counter_init(&memcg->kmem, &parent->kmem);
Glauber Costa55007d82012-12-18 14:22:38 -08006182
Daisuke Nishimura7bcc1bb2009-01-29 14:25:11 -08006183 /*
Li Zefan8d76a972013-07-08 16:00:36 -07006184 * No need to take a reference to the parent because cgroup
6185 * core guarantees its existence.
Daisuke Nishimura7bcc1bb2009-01-29 14:25:11 -08006186 */
Balbir Singh18f59ea2009-01-07 18:08:07 -08006187 } else {
Johannes Weiner05b84302014-08-06 16:05:59 -07006188 res_counter_init(&memcg->res, &root_mem_cgroup->res);
6189 res_counter_init(&memcg->memsw, &root_mem_cgroup->memsw);
6190 res_counter_init(&memcg->kmem, &root_mem_cgroup->kmem);
Tejun Heo8c7f6ed2012-09-13 12:20:58 -07006191 /*
6192 * Deeper hierachy with use_hierarchy == false doesn't make
6193 * much sense so let cgroup subsystem know about this
6194 * unfortunate state in our controller.
6195 */
Glauber Costad142e3e2013-02-22 16:34:52 -08006196 if (parent != root_mem_cgroup)
Tejun Heo073219e2014-02-08 10:36:58 -05006197 memory_cgrp_subsys.broken_hierarchy = true;
Balbir Singh18f59ea2009-01-07 18:08:07 -08006198 }
Glauber Costa09998212013-02-22 16:34:55 -08006199 mutex_unlock(&memcg_create_mutex);
Vladimir Davydovd6441632014-01-23 15:53:09 -08006200
Tejun Heo073219e2014-02-08 10:36:58 -05006201 return memcg_init_kmem(memcg, &memory_cgrp_subsys);
Balbir Singh8cdea7c2008-02-07 00:13:50 -08006202}
6203
Michal Hocko5f578162013-04-29 15:07:17 -07006204/*
6205 * Announce all parents that a group from their hierarchy is gone.
6206 */
6207static void mem_cgroup_invalidate_reclaim_iterators(struct mem_cgroup *memcg)
6208{
6209 struct mem_cgroup *parent = memcg;
6210
6211 while ((parent = parent_mem_cgroup(parent)))
Johannes Weiner519ebea2013-07-03 15:04:51 -07006212 mem_cgroup_iter_invalidate(parent);
Michal Hocko5f578162013-04-29 15:07:17 -07006213
6214 /*
6215 * if the root memcg is not hierarchical we have to check it
6216 * explicitely.
6217 */
6218 if (!root_mem_cgroup->use_hierarchy)
Johannes Weiner519ebea2013-07-03 15:04:51 -07006219 mem_cgroup_iter_invalidate(root_mem_cgroup);
Michal Hocko5f578162013-04-29 15:07:17 -07006220}
6221
Tejun Heoeb954192013-08-08 20:11:23 -04006222static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
KAMEZAWA Hiroyukidf878fb2008-02-07 00:14:28 -08006223{
Tejun Heoeb954192013-08-08 20:11:23 -04006224 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
Tejun Heo3bc942f2013-11-22 18:20:44 -05006225 struct mem_cgroup_event *event, *tmp;
Filipe Brandenburger4fb1a862014-03-03 15:38:25 -08006226 struct cgroup_subsys_state *iter;
Tejun Heo79bd9812013-11-22 18:20:42 -05006227
6228 /*
6229 * Unregister events and notify userspace.
6230 * Notify userspace about cgroup removing only after rmdir of cgroup
6231 * directory to avoid race between userspace and kernelspace.
6232 */
Tejun Heofba94802013-11-22 18:20:43 -05006233 spin_lock(&memcg->event_list_lock);
6234 list_for_each_entry_safe(event, tmp, &memcg->event_list, list) {
Tejun Heo79bd9812013-11-22 18:20:42 -05006235 list_del_init(&event->list);
6236 schedule_work(&event->remove);
6237 }
Tejun Heofba94802013-11-22 18:20:43 -05006238 spin_unlock(&memcg->event_list_lock);
KAMEZAWA Hiroyukiec64f512009-04-02 16:57:26 -07006239
Li Zefan10d5ebf2013-07-08 16:00:33 -07006240 kmem_cgroup_css_offline(memcg);
6241
Michal Hocko5f578162013-04-29 15:07:17 -07006242 mem_cgroup_invalidate_reclaim_iterators(memcg);
Filipe Brandenburger4fb1a862014-03-03 15:38:25 -08006243
6244 /*
6245 * This requires that offlining is serialized. Right now that is
6246 * guaranteed because css_killed_work_fn() holds the cgroup_mutex.
6247 */
6248 css_for_each_descendant_post(iter, css)
6249 mem_cgroup_reparent_charges(mem_cgroup_from_css(iter));
6250
Vladimir Davydov776ed0f2014-06-04 16:10:02 -07006251 memcg_unregister_all_caches(memcg);
Michal Hocko33cb8762013-07-31 13:53:51 -07006252 vmpressure_cleanup(&memcg->vmpressure);
KAMEZAWA Hiroyukidf878fb2008-02-07 00:14:28 -08006253}
6254
Tejun Heoeb954192013-08-08 20:11:23 -04006255static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
Balbir Singh8cdea7c2008-02-07 00:13:50 -08006256{
Tejun Heoeb954192013-08-08 20:11:23 -04006257 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
Johannes Weiner96f1c582013-12-12 17:12:34 -08006258 /*
6259 * XXX: css_offline() would be where we should reparent all
6260 * memory to prepare the cgroup for destruction. However,
Tejun Heoec903c02014-05-13 12:11:01 -04006261 * memcg does not do css_tryget_online() and res_counter charging
Johannes Weiner96f1c582013-12-12 17:12:34 -08006262 * under the same RCU lock region, which means that charging
6263 * could race with offlining. Offlining only happens to
6264 * cgroups with no tasks in them but charges can show up
6265 * without any tasks from the swapin path when the target
6266 * memcg is looked up from the swapout record and not from the
6267 * current task as it usually is. A race like this can leak
6268 * charges and put pages with stale cgroup pointers into
6269 * circulation:
6270 *
6271 * #0 #1
6272 * lookup_swap_cgroup_id()
6273 * rcu_read_lock()
6274 * mem_cgroup_lookup()
Tejun Heoec903c02014-05-13 12:11:01 -04006275 * css_tryget_online()
Johannes Weiner96f1c582013-12-12 17:12:34 -08006276 * rcu_read_unlock()
Tejun Heoec903c02014-05-13 12:11:01 -04006277 * disable css_tryget_online()
Johannes Weiner96f1c582013-12-12 17:12:34 -08006278 * call_rcu()
6279 * offline_css()
6280 * reparent_charges()
6281 * res_counter_charge()
6282 * css_put()
6283 * css_free()
6284 * pc->mem_cgroup = dead memcg
6285 * add page to lru
6286 *
6287 * The bulk of the charges are still moved in offline_css() to
6288 * avoid pinning a lot of pages in case a long-term reference
6289 * like a swapout record is deferring the css_free() to long
6290 * after offlining. But this makes sure we catch any charges
6291 * made after offlining:
6292 */
6293 mem_cgroup_reparent_charges(memcg);
Daisuke Nishimurac268e992009-01-15 13:51:13 -08006294
Li Zefan10d5ebf2013-07-08 16:00:33 -07006295 memcg_destroy_kmem(memcg);
Li Zefan465939a2013-07-08 16:00:38 -07006296 __mem_cgroup_free(memcg);
Balbir Singh8cdea7c2008-02-07 00:13:50 -08006297}
6298
Tejun Heo1ced9532014-07-08 18:02:57 -04006299/**
6300 * mem_cgroup_css_reset - reset the states of a mem_cgroup
6301 * @css: the target css
6302 *
6303 * Reset the states of the mem_cgroup associated with @css. This is
6304 * invoked when the userland requests disabling on the default hierarchy
6305 * but the memcg is pinned through dependency. The memcg should stop
6306 * applying policies and should revert to the vanilla state as it may be
6307 * made visible again.
6308 *
6309 * The current implementation only resets the essential configurations.
6310 * This needs to be expanded to cover all the visible parts.
6311 */
6312static void mem_cgroup_css_reset(struct cgroup_subsys_state *css)
6313{
6314 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
6315
6316 mem_cgroup_resize_limit(memcg, ULLONG_MAX);
6317 mem_cgroup_resize_memsw_limit(memcg, ULLONG_MAX);
6318 memcg_update_kmem_limit(memcg, ULLONG_MAX);
6319 res_counter_set_soft_limit(&memcg->res, ULLONG_MAX);
6320}
6321
Daisuke Nishimura02491442010-03-10 15:22:17 -08006322#ifdef CONFIG_MMU
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08006323/* Handlers for move charge at task migration. */
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08006324static int mem_cgroup_do_precharge(unsigned long count)
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08006325{
Johannes Weiner05b84302014-08-06 16:05:59 -07006326 int ret;
Johannes Weiner9476db92014-08-06 16:05:55 -07006327
6328 /* Try a single bulk charge without reclaim first */
6329 ret = mem_cgroup_try_charge(mc.to, GFP_KERNEL & ~__GFP_WAIT, count);
6330 if (!ret) {
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08006331 mc.precharge += count;
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08006332 return ret;
6333 }
Johannes Weiner692e7c42014-08-06 16:05:57 -07006334 if (ret == -EINTR) {
6335 __mem_cgroup_cancel_charge(root_mem_cgroup, count);
6336 return ret;
6337 }
Johannes Weiner9476db92014-08-06 16:05:55 -07006338
6339 /* Try charges one by one with reclaim */
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08006340 while (count--) {
Johannes Weiner9476db92014-08-06 16:05:55 -07006341 ret = mem_cgroup_try_charge(mc.to,
Michal Hocko0029e192014-08-06 16:05:53 -07006342 GFP_KERNEL & ~__GFP_NORETRY, 1);
Johannes Weiner9476db92014-08-06 16:05:55 -07006343 /*
6344 * In case of failure, any residual charges against
6345 * mc.to will be dropped by mem_cgroup_clear_mc()
Johannes Weiner692e7c42014-08-06 16:05:57 -07006346 * later on. However, cancel any charges that are
6347 * bypassed to root right away or they'll be lost.
Johannes Weiner9476db92014-08-06 16:05:55 -07006348 */
Johannes Weiner692e7c42014-08-06 16:05:57 -07006349 if (ret == -EINTR)
6350 __mem_cgroup_cancel_charge(root_mem_cgroup, 1);
KAMEZAWA Hiroyuki38c5d722012-01-12 17:19:01 -08006351 if (ret)
KAMEZAWA Hiroyuki38c5d722012-01-12 17:19:01 -08006352 return ret;
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08006353 mc.precharge++;
Johannes Weiner9476db92014-08-06 16:05:55 -07006354 cond_resched();
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08006355 }
Johannes Weiner9476db92014-08-06 16:05:55 -07006356 return 0;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08006357}
6358
6359/**
Naoya Horiguchi8d32ff82012-03-21 16:34:27 -07006360 * get_mctgt_type - get target type of moving charge
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08006361 * @vma: the vma the pte to be checked belongs
6362 * @addr: the address corresponding to the pte to be checked
6363 * @ptent: the pte to be checked
Daisuke Nishimura02491442010-03-10 15:22:17 -08006364 * @target: the pointer the target page or swap ent will be stored(can be NULL)
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08006365 *
6366 * Returns
6367 * 0(MC_TARGET_NONE): if the pte is not a target for move charge.
6368 * 1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for
6369 * move charge. if @target is not NULL, the page is stored in target->page
6370 * with extra refcnt got(Callers should handle it).
Daisuke Nishimura02491442010-03-10 15:22:17 -08006371 * 2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a
6372 * target for charge migration. if @target is not NULL, the entry is stored
6373 * in target->ent.
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08006374 *
6375 * Called with pte lock held.
6376 */
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08006377union mc_target {
6378 struct page *page;
Daisuke Nishimura02491442010-03-10 15:22:17 -08006379 swp_entry_t ent;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08006380};
6381
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08006382enum mc_target_type {
Naoya Horiguchi8d32ff82012-03-21 16:34:27 -07006383 MC_TARGET_NONE = 0,
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08006384 MC_TARGET_PAGE,
Daisuke Nishimura02491442010-03-10 15:22:17 -08006385 MC_TARGET_SWAP,
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08006386};
6387
Daisuke Nishimura90254a62010-05-26 14:42:38 -07006388static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
6389 unsigned long addr, pte_t ptent)
6390{
6391 struct page *page = vm_normal_page(vma, addr, ptent);
6392
6393 if (!page || !page_mapped(page))
6394 return NULL;
6395 if (PageAnon(page)) {
6396 /* we don't move shared anon */
KAMEZAWA Hiroyuki4b913552012-05-29 15:06:51 -07006397 if (!move_anon())
Daisuke Nishimura90254a62010-05-26 14:42:38 -07006398 return NULL;
Daisuke Nishimura87946a72010-05-26 14:42:39 -07006399 } else if (!move_file())
6400 /* we ignore mapcount for file pages */
Daisuke Nishimura90254a62010-05-26 14:42:38 -07006401 return NULL;
6402 if (!get_page_unless_zero(page))
6403 return NULL;
6404
6405 return page;
6406}
6407
KAMEZAWA Hiroyuki4b913552012-05-29 15:06:51 -07006408#ifdef CONFIG_SWAP
Daisuke Nishimura90254a62010-05-26 14:42:38 -07006409static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
6410 unsigned long addr, pte_t ptent, swp_entry_t *entry)
6411{
Daisuke Nishimura90254a62010-05-26 14:42:38 -07006412 struct page *page = NULL;
6413 swp_entry_t ent = pte_to_swp_entry(ptent);
6414
6415 if (!move_anon() || non_swap_entry(ent))
6416 return NULL;
KAMEZAWA Hiroyuki4b913552012-05-29 15:06:51 -07006417 /*
6418 * Because lookup_swap_cache() updates some statistics counter,
6419 * we call find_get_page() with swapper_space directly.
6420 */
Shaohua Li33806f02013-02-22 16:34:37 -08006421 page = find_get_page(swap_address_space(ent), ent.val);
Daisuke Nishimura90254a62010-05-26 14:42:38 -07006422 if (do_swap_account)
6423 entry->val = ent.val;
6424
6425 return page;
6426}
KAMEZAWA Hiroyuki4b913552012-05-29 15:06:51 -07006427#else
6428static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
6429 unsigned long addr, pte_t ptent, swp_entry_t *entry)
6430{
6431 return NULL;
6432}
6433#endif
Daisuke Nishimura90254a62010-05-26 14:42:38 -07006434
Daisuke Nishimura87946a72010-05-26 14:42:39 -07006435static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
6436 unsigned long addr, pte_t ptent, swp_entry_t *entry)
6437{
6438 struct page *page = NULL;
Daisuke Nishimura87946a72010-05-26 14:42:39 -07006439 struct address_space *mapping;
6440 pgoff_t pgoff;
6441
6442 if (!vma->vm_file) /* anonymous vma */
6443 return NULL;
6444 if (!move_file())
6445 return NULL;
6446
Daisuke Nishimura87946a72010-05-26 14:42:39 -07006447 mapping = vma->vm_file->f_mapping;
6448 if (pte_none(ptent))
6449 pgoff = linear_page_index(vma, addr);
6450 else /* pte_file(ptent) is true */
6451 pgoff = pte_to_pgoff(ptent);
6452
6453 /* page is moved even if it's not RSS of this task(page-faulted). */
Hugh Dickinsaa3b1892011-08-03 16:21:24 -07006454#ifdef CONFIG_SWAP
6455 /* shmem/tmpfs may report page out on swap: account for that too. */
Johannes Weiner139b6a62014-05-06 12:50:05 -07006456 if (shmem_mapping(mapping)) {
6457 page = find_get_entry(mapping, pgoff);
6458 if (radix_tree_exceptional_entry(page)) {
6459 swp_entry_t swp = radix_to_swp_entry(page);
6460 if (do_swap_account)
6461 *entry = swp;
6462 page = find_get_page(swap_address_space(swp), swp.val);
6463 }
6464 } else
6465 page = find_get_page(mapping, pgoff);
6466#else
6467 page = find_get_page(mapping, pgoff);
Hugh Dickinsaa3b1892011-08-03 16:21:24 -07006468#endif
Daisuke Nishimura87946a72010-05-26 14:42:39 -07006469 return page;
6470}
6471
Naoya Horiguchi8d32ff82012-03-21 16:34:27 -07006472static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma,
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08006473 unsigned long addr, pte_t ptent, union mc_target *target)
6474{
Daisuke Nishimura02491442010-03-10 15:22:17 -08006475 struct page *page = NULL;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08006476 struct page_cgroup *pc;
Naoya Horiguchi8d32ff82012-03-21 16:34:27 -07006477 enum mc_target_type ret = MC_TARGET_NONE;
Daisuke Nishimura02491442010-03-10 15:22:17 -08006478 swp_entry_t ent = { .val = 0 };
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08006479
Daisuke Nishimura90254a62010-05-26 14:42:38 -07006480 if (pte_present(ptent))
6481 page = mc_handle_present_pte(vma, addr, ptent);
6482 else if (is_swap_pte(ptent))
6483 page = mc_handle_swap_pte(vma, addr, ptent, &ent);
Daisuke Nishimura87946a72010-05-26 14:42:39 -07006484 else if (pte_none(ptent) || pte_file(ptent))
6485 page = mc_handle_file_pte(vma, addr, ptent, &ent);
Daisuke Nishimura90254a62010-05-26 14:42:38 -07006486
6487 if (!page && !ent.val)
Naoya Horiguchi8d32ff82012-03-21 16:34:27 -07006488 return ret;
Daisuke Nishimura02491442010-03-10 15:22:17 -08006489 if (page) {
6490 pc = lookup_page_cgroup(page);
6491 /*
6492 * Do only loose check w/o page_cgroup lock.
6493 * mem_cgroup_move_account() checks the pc is valid or not under
6494 * the lock.
6495 */
6496 if (PageCgroupUsed(pc) && pc->mem_cgroup == mc.from) {
6497 ret = MC_TARGET_PAGE;
6498 if (target)
6499 target->page = page;
6500 }
6501 if (!ret || !target)
6502 put_page(page);
6503 }
Daisuke Nishimura90254a62010-05-26 14:42:38 -07006504 /* There is a swap entry and a page doesn't exist or isn't charged */
6505 if (ent.val && !ret &&
Li Zefan34c00c32013-09-23 16:56:01 +08006506 mem_cgroup_id(mc.from) == lookup_swap_cgroup_id(ent)) {
KAMEZAWA Hiroyuki7f0f1542010-05-11 14:06:58 -07006507 ret = MC_TARGET_SWAP;
6508 if (target)
6509 target->ent = ent;
Daisuke Nishimura02491442010-03-10 15:22:17 -08006510 }
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08006511 return ret;
6512}
6513
Naoya Horiguchi12724852012-03-21 16:34:28 -07006514#ifdef CONFIG_TRANSPARENT_HUGEPAGE
6515/*
6516 * We don't consider swapping or file mapped pages because THP does not
6517 * support them for now.
6518 * Caller should make sure that pmd_trans_huge(pmd) is true.
6519 */
6520static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
6521 unsigned long addr, pmd_t pmd, union mc_target *target)
6522{
6523 struct page *page = NULL;
6524 struct page_cgroup *pc;
6525 enum mc_target_type ret = MC_TARGET_NONE;
6526
6527 page = pmd_page(pmd);
Sasha Levin309381fea2014-01-23 15:52:54 -08006528 VM_BUG_ON_PAGE(!page || !PageHead(page), page);
Naoya Horiguchi12724852012-03-21 16:34:28 -07006529 if (!move_anon())
6530 return ret;
6531 pc = lookup_page_cgroup(page);
6532 if (PageCgroupUsed(pc) && pc->mem_cgroup == mc.from) {
6533 ret = MC_TARGET_PAGE;
6534 if (target) {
6535 get_page(page);
6536 target->page = page;
6537 }
6538 }
6539 return ret;
6540}
6541#else
6542static inline enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
6543 unsigned long addr, pmd_t pmd, union mc_target *target)
6544{
6545 return MC_TARGET_NONE;
6546}
6547#endif
6548
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08006549static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
6550 unsigned long addr, unsigned long end,
6551 struct mm_walk *walk)
6552{
6553 struct vm_area_struct *vma = walk->private;
6554 pte_t *pte;
6555 spinlock_t *ptl;
6556
Kirill A. Shutemovbf929152013-11-14 14:30:54 -08006557 if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
Naoya Horiguchi12724852012-03-21 16:34:28 -07006558 if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE)
6559 mc.precharge += HPAGE_PMD_NR;
Kirill A. Shutemovbf929152013-11-14 14:30:54 -08006560 spin_unlock(ptl);
Andrea Arcangeli1a5a9902012-03-21 16:33:42 -07006561 return 0;
Naoya Horiguchi12724852012-03-21 16:34:28 -07006562 }
Dave Hansen03319322011-03-22 16:32:56 -07006563
Andrea Arcangeli45f83ce2012-03-28 14:42:40 -07006564 if (pmd_trans_unstable(pmd))
6565 return 0;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08006566 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
6567 for (; addr != end; pte++, addr += PAGE_SIZE)
Naoya Horiguchi8d32ff82012-03-21 16:34:27 -07006568 if (get_mctgt_type(vma, addr, *pte, NULL))
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08006569 mc.precharge++; /* increment precharge temporarily */
6570 pte_unmap_unlock(pte - 1, ptl);
6571 cond_resched();
6572
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08006573 return 0;
6574}
6575
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08006576static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
6577{
6578 unsigned long precharge;
6579 struct vm_area_struct *vma;
6580
Daisuke Nishimuradfe076b2011-01-13 15:47:41 -08006581 down_read(&mm->mmap_sem);
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08006582 for (vma = mm->mmap; vma; vma = vma->vm_next) {
6583 struct mm_walk mem_cgroup_count_precharge_walk = {
6584 .pmd_entry = mem_cgroup_count_precharge_pte_range,
6585 .mm = mm,
6586 .private = vma,
6587 };
6588 if (is_vm_hugetlb_page(vma))
6589 continue;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08006590 walk_page_range(vma->vm_start, vma->vm_end,
6591 &mem_cgroup_count_precharge_walk);
6592 }
Daisuke Nishimuradfe076b2011-01-13 15:47:41 -08006593 up_read(&mm->mmap_sem);
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08006594
6595 precharge = mc.precharge;
6596 mc.precharge = 0;
6597
6598 return precharge;
6599}
6600
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08006601static int mem_cgroup_precharge_mc(struct mm_struct *mm)
6602{
Daisuke Nishimuradfe076b2011-01-13 15:47:41 -08006603 unsigned long precharge = mem_cgroup_count_precharge(mm);
6604
6605 VM_BUG_ON(mc.moving_task);
6606 mc.moving_task = current;
6607 return mem_cgroup_do_precharge(precharge);
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08006608}
6609
Daisuke Nishimuradfe076b2011-01-13 15:47:41 -08006610/* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */
6611static void __mem_cgroup_clear_mc(void)
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08006612{
KAMEZAWA Hiroyuki2bd9bb22010-08-10 18:02:58 -07006613 struct mem_cgroup *from = mc.from;
6614 struct mem_cgroup *to = mc.to;
Li Zefan40503772013-07-08 16:00:34 -07006615 int i;
KAMEZAWA Hiroyuki2bd9bb22010-08-10 18:02:58 -07006616
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08006617 /* we must uncharge all the leftover precharges from mc.to */
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08006618 if (mc.precharge) {
6619 __mem_cgroup_cancel_charge(mc.to, mc.precharge);
6620 mc.precharge = 0;
6621 }
6622 /*
6623 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so
6624 * we must uncharge here.
6625 */
6626 if (mc.moved_charge) {
6627 __mem_cgroup_cancel_charge(mc.from, mc.moved_charge);
6628 mc.moved_charge = 0;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08006629 }
Daisuke Nishimura483c30b2010-03-10 15:22:18 -08006630 /* we must fixup refcnts and charges */
6631 if (mc.moved_swap) {
Daisuke Nishimura483c30b2010-03-10 15:22:18 -08006632 /* uncharge swap account from the old cgroup */
Johannes Weiner05b84302014-08-06 16:05:59 -07006633 res_counter_uncharge(&mc.from->memsw,
6634 PAGE_SIZE * mc.moved_swap);
Li Zefan40503772013-07-08 16:00:34 -07006635
6636 for (i = 0; i < mc.moved_swap; i++)
6637 css_put(&mc.from->css);
Daisuke Nishimura483c30b2010-03-10 15:22:18 -08006638
Johannes Weiner05b84302014-08-06 16:05:59 -07006639 /*
6640 * we charged both to->res and to->memsw, so we should
6641 * uncharge to->res.
6642 */
6643 res_counter_uncharge(&mc.to->res,
6644 PAGE_SIZE * mc.moved_swap);
Li Zefan40503772013-07-08 16:00:34 -07006645 /* we've already done css_get(mc.to) */
Daisuke Nishimura483c30b2010-03-10 15:22:18 -08006646 mc.moved_swap = 0;
6647 }
Daisuke Nishimuradfe076b2011-01-13 15:47:41 -08006648 memcg_oom_recover(from);
6649 memcg_oom_recover(to);
6650 wake_up_all(&mc.waitq);
6651}
6652
6653static void mem_cgroup_clear_mc(void)
6654{
6655 struct mem_cgroup *from = mc.from;
6656
6657 /*
6658 * we must clear moving_task before waking up waiters at the end of
6659 * task migration.
6660 */
6661 mc.moving_task = NULL;
6662 __mem_cgroup_clear_mc();
KAMEZAWA Hiroyuki2bd9bb22010-08-10 18:02:58 -07006663 spin_lock(&mc.lock);
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08006664 mc.from = NULL;
6665 mc.to = NULL;
KAMEZAWA Hiroyuki2bd9bb22010-08-10 18:02:58 -07006666 spin_unlock(&mc.lock);
KAMEZAWA Hiroyuki32047e22010-10-27 15:33:40 -07006667 mem_cgroup_end_move(from);
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08006668}
6669
Tejun Heoeb954192013-08-08 20:11:23 -04006670static int mem_cgroup_can_attach(struct cgroup_subsys_state *css,
Li Zefan761b3ef52012-01-31 13:47:36 +08006671 struct cgroup_taskset *tset)
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08006672{
Tejun Heo2f7ee562011-12-12 18:12:21 -08006673 struct task_struct *p = cgroup_taskset_first(tset);
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08006674 int ret = 0;
Tejun Heoeb954192013-08-08 20:11:23 -04006675 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
Glauber Costaee5e8472013-02-22 16:34:50 -08006676 unsigned long move_charge_at_immigrate;
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08006677
Glauber Costaee5e8472013-02-22 16:34:50 -08006678 /*
6679 * We are now commited to this value whatever it is. Changes in this
6680 * tunable will only affect upcoming migrations, not the current one.
6681 * So we need to save it, and keep it going.
6682 */
6683 move_charge_at_immigrate = memcg->move_charge_at_immigrate;
6684 if (move_charge_at_immigrate) {
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08006685 struct mm_struct *mm;
6686 struct mem_cgroup *from = mem_cgroup_from_task(p);
6687
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07006688 VM_BUG_ON(from == memcg);
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08006689
6690 mm = get_task_mm(p);
6691 if (!mm)
6692 return 0;
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08006693 /* We move charges only when we move a owner of the mm */
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08006694 if (mm->owner == p) {
6695 VM_BUG_ON(mc.from);
6696 VM_BUG_ON(mc.to);
6697 VM_BUG_ON(mc.precharge);
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08006698 VM_BUG_ON(mc.moved_charge);
Daisuke Nishimura483c30b2010-03-10 15:22:18 -08006699 VM_BUG_ON(mc.moved_swap);
KAMEZAWA Hiroyuki32047e22010-10-27 15:33:40 -07006700 mem_cgroup_start_move(from);
KAMEZAWA Hiroyuki2bd9bb22010-08-10 18:02:58 -07006701 spin_lock(&mc.lock);
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08006702 mc.from = from;
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07006703 mc.to = memcg;
Glauber Costaee5e8472013-02-22 16:34:50 -08006704 mc.immigrate_flags = move_charge_at_immigrate;
KAMEZAWA Hiroyuki2bd9bb22010-08-10 18:02:58 -07006705 spin_unlock(&mc.lock);
Daisuke Nishimuradfe076b2011-01-13 15:47:41 -08006706 /* We set mc.moving_task later */
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08006707
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08006708 ret = mem_cgroup_precharge_mc(mm);
6709 if (ret)
6710 mem_cgroup_clear_mc();
Daisuke Nishimuradfe076b2011-01-13 15:47:41 -08006711 }
6712 mmput(mm);
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08006713 }
6714 return ret;
6715}
6716
Tejun Heoeb954192013-08-08 20:11:23 -04006717static void mem_cgroup_cancel_attach(struct cgroup_subsys_state *css,
Li Zefan761b3ef52012-01-31 13:47:36 +08006718 struct cgroup_taskset *tset)
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08006719{
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08006720 mem_cgroup_clear_mc();
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08006721}
6722
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08006723static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
6724 unsigned long addr, unsigned long end,
6725 struct mm_walk *walk)
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08006726{
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08006727 int ret = 0;
6728 struct vm_area_struct *vma = walk->private;
6729 pte_t *pte;
6730 spinlock_t *ptl;
Naoya Horiguchi12724852012-03-21 16:34:28 -07006731 enum mc_target_type target_type;
6732 union mc_target target;
6733 struct page *page;
6734 struct page_cgroup *pc;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08006735
Naoya Horiguchi12724852012-03-21 16:34:28 -07006736 /*
6737 * We don't take compound_lock() here but no race with splitting thp
6738 * happens because:
6739 * - if pmd_trans_huge_lock() returns 1, the relevant thp is not
6740 * under splitting, which means there's no concurrent thp split,
6741 * - if another thread runs into split_huge_page() just after we
6742 * entered this if-block, the thread must wait for page table lock
6743 * to be unlocked in __split_huge_page_splitting(), where the main
6744 * part of thp split is not executed yet.
6745 */
Kirill A. Shutemovbf929152013-11-14 14:30:54 -08006746 if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
Hugh Dickins62ade862012-05-18 11:28:34 -07006747 if (mc.precharge < HPAGE_PMD_NR) {
Kirill A. Shutemovbf929152013-11-14 14:30:54 -08006748 spin_unlock(ptl);
Naoya Horiguchi12724852012-03-21 16:34:28 -07006749 return 0;
6750 }
6751 target_type = get_mctgt_type_thp(vma, addr, *pmd, &target);
6752 if (target_type == MC_TARGET_PAGE) {
6753 page = target.page;
6754 if (!isolate_lru_page(page)) {
6755 pc = lookup_page_cgroup(page);
6756 if (!mem_cgroup_move_account(page, HPAGE_PMD_NR,
KAMEZAWA Hiroyuki2f3479b2012-05-29 15:07:04 -07006757 pc, mc.from, mc.to)) {
Naoya Horiguchi12724852012-03-21 16:34:28 -07006758 mc.precharge -= HPAGE_PMD_NR;
6759 mc.moved_charge += HPAGE_PMD_NR;
6760 }
6761 putback_lru_page(page);
6762 }
6763 put_page(page);
6764 }
Kirill A. Shutemovbf929152013-11-14 14:30:54 -08006765 spin_unlock(ptl);
Andrea Arcangeli1a5a9902012-03-21 16:33:42 -07006766 return 0;
Naoya Horiguchi12724852012-03-21 16:34:28 -07006767 }
6768
Andrea Arcangeli45f83ce2012-03-28 14:42:40 -07006769 if (pmd_trans_unstable(pmd))
6770 return 0;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08006771retry:
6772 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
6773 for (; addr != end; addr += PAGE_SIZE) {
6774 pte_t ptent = *(pte++);
Daisuke Nishimura02491442010-03-10 15:22:17 -08006775 swp_entry_t ent;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08006776
6777 if (!mc.precharge)
6778 break;
6779
Naoya Horiguchi8d32ff82012-03-21 16:34:27 -07006780 switch (get_mctgt_type(vma, addr, ptent, &target)) {
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08006781 case MC_TARGET_PAGE:
6782 page = target.page;
6783 if (isolate_lru_page(page))
6784 goto put;
6785 pc = lookup_page_cgroup(page);
Johannes Weiner7ec99d62011-03-23 16:42:36 -07006786 if (!mem_cgroup_move_account(page, 1, pc,
KAMEZAWA Hiroyuki2f3479b2012-05-29 15:07:04 -07006787 mc.from, mc.to)) {
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08006788 mc.precharge--;
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08006789 /* we uncharge from mc.from later. */
6790 mc.moved_charge++;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08006791 }
6792 putback_lru_page(page);
Naoya Horiguchi8d32ff82012-03-21 16:34:27 -07006793put: /* get_mctgt_type() gets the page */
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08006794 put_page(page);
6795 break;
Daisuke Nishimura02491442010-03-10 15:22:17 -08006796 case MC_TARGET_SWAP:
6797 ent = target.ent;
Hugh Dickinse91cbb42012-05-29 15:06:51 -07006798 if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) {
Daisuke Nishimura02491442010-03-10 15:22:17 -08006799 mc.precharge--;
Daisuke Nishimura483c30b2010-03-10 15:22:18 -08006800 /* we fixup refcnts and charges later. */
6801 mc.moved_swap++;
6802 }
Daisuke Nishimura02491442010-03-10 15:22:17 -08006803 break;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08006804 default:
6805 break;
6806 }
6807 }
6808 pte_unmap_unlock(pte - 1, ptl);
6809 cond_resched();
6810
6811 if (addr != end) {
6812 /*
6813 * We have consumed all precharges we got in can_attach().
6814 * We try charge one by one, but don't do any additional
6815 * charges to mc.to if we have failed in charge once in attach()
6816 * phase.
6817 */
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08006818 ret = mem_cgroup_do_precharge(1);
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08006819 if (!ret)
6820 goto retry;
6821 }
6822
6823 return ret;
6824}
6825
6826static void mem_cgroup_move_charge(struct mm_struct *mm)
6827{
6828 struct vm_area_struct *vma;
6829
6830 lru_add_drain_all();
Daisuke Nishimuradfe076b2011-01-13 15:47:41 -08006831retry:
6832 if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
6833 /*
6834 * Someone who are holding the mmap_sem might be waiting in
6835 * waitq. So we cancel all extra charges, wake up all waiters,
6836 * and retry. Because we cancel precharges, we might not be able
6837 * to move enough charges, but moving charge is a best-effort
6838 * feature anyway, so it wouldn't be a big problem.
6839 */
6840 __mem_cgroup_clear_mc();
6841 cond_resched();
6842 goto retry;
6843 }
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08006844 for (vma = mm->mmap; vma; vma = vma->vm_next) {
6845 int ret;
6846 struct mm_walk mem_cgroup_move_charge_walk = {
6847 .pmd_entry = mem_cgroup_move_charge_pte_range,
6848 .mm = mm,
6849 .private = vma,
6850 };
6851 if (is_vm_hugetlb_page(vma))
6852 continue;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08006853 ret = walk_page_range(vma->vm_start, vma->vm_end,
6854 &mem_cgroup_move_charge_walk);
6855 if (ret)
6856 /*
6857 * means we have consumed all precharges and failed in
6858 * doing additional charge. Just abandon here.
6859 */
6860 break;
6861 }
Daisuke Nishimuradfe076b2011-01-13 15:47:41 -08006862 up_read(&mm->mmap_sem);
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08006863}
6864
Tejun Heoeb954192013-08-08 20:11:23 -04006865static void mem_cgroup_move_task(struct cgroup_subsys_state *css,
Li Zefan761b3ef52012-01-31 13:47:36 +08006866 struct cgroup_taskset *tset)
Balbir Singh67e465a2008-02-07 00:13:54 -08006867{
Tejun Heo2f7ee562011-12-12 18:12:21 -08006868 struct task_struct *p = cgroup_taskset_first(tset);
KOSAKI Motohiroa4336582011-06-15 15:08:13 -07006869 struct mm_struct *mm = get_task_mm(p);
Daisuke Nishimuradfe076b2011-01-13 15:47:41 -08006870
Daisuke Nishimuradfe076b2011-01-13 15:47:41 -08006871 if (mm) {
KOSAKI Motohiroa4336582011-06-15 15:08:13 -07006872 if (mc.to)
6873 mem_cgroup_move_charge(mm);
Daisuke Nishimuradfe076b2011-01-13 15:47:41 -08006874 mmput(mm);
6875 }
KOSAKI Motohiroa4336582011-06-15 15:08:13 -07006876 if (mc.to)
6877 mem_cgroup_clear_mc();
Balbir Singh67e465a2008-02-07 00:13:54 -08006878}
Daisuke Nishimura5cfb80a2010-03-23 13:35:11 -07006879#else /* !CONFIG_MMU */
Tejun Heoeb954192013-08-08 20:11:23 -04006880static int mem_cgroup_can_attach(struct cgroup_subsys_state *css,
Li Zefan761b3ef52012-01-31 13:47:36 +08006881 struct cgroup_taskset *tset)
Daisuke Nishimura5cfb80a2010-03-23 13:35:11 -07006882{
6883 return 0;
6884}
Tejun Heoeb954192013-08-08 20:11:23 -04006885static void mem_cgroup_cancel_attach(struct cgroup_subsys_state *css,
Li Zefan761b3ef52012-01-31 13:47:36 +08006886 struct cgroup_taskset *tset)
Daisuke Nishimura5cfb80a2010-03-23 13:35:11 -07006887{
6888}
Tejun Heoeb954192013-08-08 20:11:23 -04006889static void mem_cgroup_move_task(struct cgroup_subsys_state *css,
Li Zefan761b3ef52012-01-31 13:47:36 +08006890 struct cgroup_taskset *tset)
Daisuke Nishimura5cfb80a2010-03-23 13:35:11 -07006891{
6892}
6893#endif
Balbir Singh67e465a2008-02-07 00:13:54 -08006894
Tejun Heof00baae2013-04-15 13:41:15 -07006895/*
6896 * Cgroup retains root cgroups across [un]mount cycles making it necessary
Tejun Heoaa6ec292014-07-09 10:08:08 -04006897 * to verify whether we're attached to the default hierarchy on each mount
6898 * attempt.
Tejun Heof00baae2013-04-15 13:41:15 -07006899 */
Tejun Heoeb954192013-08-08 20:11:23 -04006900static void mem_cgroup_bind(struct cgroup_subsys_state *root_css)
Tejun Heof00baae2013-04-15 13:41:15 -07006901{
6902 /*
Tejun Heoaa6ec292014-07-09 10:08:08 -04006903 * use_hierarchy is forced on the default hierarchy. cgroup core
Tejun Heof00baae2013-04-15 13:41:15 -07006904 * guarantees that @root doesn't have any children, so turning it
6905 * on for the root memcg is enough.
6906 */
Tejun Heoaa6ec292014-07-09 10:08:08 -04006907 if (cgroup_on_dfl(root_css->cgroup))
Tejun Heoeb954192013-08-08 20:11:23 -04006908 mem_cgroup_from_css(root_css)->use_hierarchy = true;
Tejun Heof00baae2013-04-15 13:41:15 -07006909}
6910
Tejun Heo073219e2014-02-08 10:36:58 -05006911struct cgroup_subsys memory_cgrp_subsys = {
Tejun Heo92fb9742012-11-19 08:13:38 -08006912 .css_alloc = mem_cgroup_css_alloc,
Glauber Costad142e3e2013-02-22 16:34:52 -08006913 .css_online = mem_cgroup_css_online,
Tejun Heo92fb9742012-11-19 08:13:38 -08006914 .css_offline = mem_cgroup_css_offline,
6915 .css_free = mem_cgroup_css_free,
Tejun Heo1ced9532014-07-08 18:02:57 -04006916 .css_reset = mem_cgroup_css_reset,
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08006917 .can_attach = mem_cgroup_can_attach,
6918 .cancel_attach = mem_cgroup_cancel_attach,
Balbir Singh67e465a2008-02-07 00:13:54 -08006919 .attach = mem_cgroup_move_task,
Tejun Heof00baae2013-04-15 13:41:15 -07006920 .bind = mem_cgroup_bind,
Tejun Heo55779642014-07-15 11:05:09 -04006921 .legacy_cftypes = mem_cgroup_files,
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -08006922 .early_init = 0,
Balbir Singh8cdea7c2008-02-07 00:13:50 -08006923};
KAMEZAWA Hiroyukic0777192009-01-07 18:07:57 -08006924
Andrew Mortonc255a452012-07-31 16:43:02 -07006925#ifdef CONFIG_MEMCG_SWAP
Michal Hockoa42c3902010-11-24 12:57:08 -08006926static int __init enable_swap_account(char *s)
6927{
Michal Hockoa2c89902011-05-24 17:12:50 -07006928 if (!strcmp(s, "1"))
Michal Hockoa42c3902010-11-24 12:57:08 -08006929 really_do_swap_account = 1;
Michal Hockoa2c89902011-05-24 17:12:50 -07006930 else if (!strcmp(s, "0"))
Michal Hockoa42c3902010-11-24 12:57:08 -08006931 really_do_swap_account = 0;
6932 return 1;
6933}
Michal Hockoa2c89902011-05-24 17:12:50 -07006934__setup("swapaccount=", enable_swap_account);
KAMEZAWA Hiroyukic0777192009-01-07 18:07:57 -08006935
Michal Hocko2d110852013-02-22 16:34:43 -08006936static void __init memsw_file_init(void)
6937{
Tejun Heo2cf669a2014-07-15 11:05:09 -04006938 WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys,
6939 memsw_cgroup_files));
Michal Hocko2d110852013-02-22 16:34:43 -08006940}
Michal Hocko6acc8b02013-02-22 16:34:45 -08006941
6942static void __init enable_swap_cgroup(void)
6943{
6944 if (!mem_cgroup_disabled() && really_do_swap_account) {
6945 do_swap_account = 1;
6946 memsw_file_init();
6947 }
6948}
6949
Michal Hocko2d110852013-02-22 16:34:43 -08006950#else
Michal Hocko6acc8b02013-02-22 16:34:45 -08006951static void __init enable_swap_cgroup(void)
Michal Hocko2d110852013-02-22 16:34:43 -08006952{
6953}
KAMEZAWA Hiroyukic0777192009-01-07 18:07:57 -08006954#endif
Michal Hocko2d110852013-02-22 16:34:43 -08006955
6956/*
Michal Hocko10813122013-02-22 16:35:41 -08006957 * subsys_initcall() for memory controller.
6958 *
6959 * Some parts like hotcpu_notifier() have to be initialized from this context
6960 * because of lock dependencies (cgroup_lock -> cpu hotplug) but basically
6961 * everything that doesn't depend on a specific mem_cgroup structure should
6962 * be initialized from here.
Michal Hocko2d110852013-02-22 16:34:43 -08006963 */
6964static int __init mem_cgroup_init(void)
6965{
6966 hotcpu_notifier(memcg_cpu_hotplug_callback, 0);
Michal Hocko6acc8b02013-02-22 16:34:45 -08006967 enable_swap_cgroup();
Andrew Mortonbb4cc1a82013-09-24 15:27:40 -07006968 mem_cgroup_soft_limit_tree_init();
Michal Hockoe4777492013-02-22 16:35:40 -08006969 memcg_stock_init();
Michal Hocko2d110852013-02-22 16:34:43 -08006970 return 0;
6971}
6972subsys_initcall(mem_cgroup_init);