blob: 43231ae6c3fd367296f0442944d1d84407cb8626 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Tim Chen67afa382017-02-22 15:45:39 -08002/*
3 * Manage cache of swap slots to be used for and returned from
4 * swap.
5 *
6 * Copyright(c) 2016 Intel Corporation.
7 *
8 * Author: Tim Chen <tim.c.chen@linux.intel.com>
9 *
10 * We allocate the swap slots from the global pool and put
11 * it into local per cpu caches. This has the advantage
12 * of no needing to acquire the swap_info lock every time
13 * we need a new slot.
14 *
15 * There is also opportunity to simply return the slot
16 * to local caches without needing to acquire swap_info
17 * lock. We do not reuse the returned slots directly but
18 * move them back to the global pool in a batch. This
19 * allows the slots to coaellesce and reduce fragmentation.
20 *
21 * The swap entry allocated is marked with SWAP_HAS_CACHE
22 * flag in map_count that prevents it from being allocated
23 * again from the global pool.
24 *
25 * The swap slots cache is protected by a mutex instead of
26 * a spin lock as when we search for slots with scan_swap_map,
27 * we can possibly sleep.
28 */
29
30#include <linux/swap_slots.h>
31#include <linux/cpu.h>
32#include <linux/cpumask.h>
33#include <linux/vmalloc.h>
34#include <linux/mutex.h>
Huang Ying54f180d2017-05-08 15:57:40 -070035#include <linux/mm.h>
Bing Hand4eef932022-05-30 16:28:05 +080036#include <trace/hooks/mm.h>
Tim Chen67afa382017-02-22 15:45:39 -080037
Tim Chen67afa382017-02-22 15:45:39 -080038static DEFINE_PER_CPU(struct swap_slots_cache, swp_slots);
39static bool swap_slot_cache_active;
Huang Yingba81f832017-02-22 15:45:46 -080040bool swap_slot_cache_enabled;
Tim Chen67afa382017-02-22 15:45:39 -080041static bool swap_slot_cache_initialized;
Colin Ian King31f21da2018-08-17 15:46:54 -070042static DEFINE_MUTEX(swap_slots_cache_mutex);
Tim Chen67afa382017-02-22 15:45:39 -080043/* Serialize swap slots cache enable/disable operations */
Colin Ian King31f21da2018-08-17 15:46:54 -070044static DEFINE_MUTEX(swap_slots_cache_enable_mutex);
Tim Chen67afa382017-02-22 15:45:39 -080045
46static void __drain_swap_slots_cache(unsigned int type);
47static void deactivate_swap_slots_cache(void);
48static void reactivate_swap_slots_cache(void);
49
Zhen Leie0f3ebb2020-08-06 23:20:11 -070050#define use_swap_slot_cache (swap_slot_cache_active && swap_slot_cache_enabled)
Tim Chen67afa382017-02-22 15:45:39 -080051#define SLOTS_CACHE 0x1
52#define SLOTS_CACHE_RET 0x2
53
54static void deactivate_swap_slots_cache(void)
55{
56 mutex_lock(&swap_slots_cache_mutex);
57 swap_slot_cache_active = false;
Bing Hanf6f18f72022-06-11 16:06:16 +080058 trace_android_vh_swap_slot_cache_active(false);
Tim Chen67afa382017-02-22 15:45:39 -080059 __drain_swap_slots_cache(SLOTS_CACHE|SLOTS_CACHE_RET);
60 mutex_unlock(&swap_slots_cache_mutex);
61}
62
63static void reactivate_swap_slots_cache(void)
64{
65 mutex_lock(&swap_slots_cache_mutex);
66 swap_slot_cache_active = true;
Bing Hanf6f18f72022-06-11 16:06:16 +080067 trace_android_vh_swap_slot_cache_active(true);
Tim Chen67afa382017-02-22 15:45:39 -080068 mutex_unlock(&swap_slots_cache_mutex);
69}
70
71/* Must not be called with cpu hot plug lock */
72void disable_swap_slots_cache_lock(void)
73{
74 mutex_lock(&swap_slots_cache_enable_mutex);
75 swap_slot_cache_enabled = false;
76 if (swap_slot_cache_initialized) {
77 /* serialize with cpu hotplug operations */
78 get_online_cpus();
79 __drain_swap_slots_cache(SLOTS_CACHE|SLOTS_CACHE_RET);
80 put_online_cpus();
81 }
82}
83
84static void __reenable_swap_slots_cache(void)
85{
86 swap_slot_cache_enabled = has_usable_swap();
87}
88
89void reenable_swap_slots_cache_unlock(void)
90{
91 __reenable_swap_slots_cache();
92 mutex_unlock(&swap_slots_cache_enable_mutex);
93}
94
Bing Hane0640592022-05-30 16:39:40 +080095bool is_swap_slot_cache_enabled(void)
96{
97 return swap_slot_cache_enabled;
98}
99EXPORT_SYMBOL_GPL(is_swap_slot_cache_enabled);
100
Bing Han06c27662022-06-10 19:40:35 +0800101bool check_cache_active(void)
Tim Chen67afa382017-02-22 15:45:39 -0800102{
103 long pages;
104
Zhen Leie0f3ebb2020-08-06 23:20:11 -0700105 if (!swap_slot_cache_enabled)
Tim Chen67afa382017-02-22 15:45:39 -0800106 return false;
107
108 pages = get_nr_swap_pages();
109 if (!swap_slot_cache_active) {
110 if (pages > num_online_cpus() *
111 THRESHOLD_ACTIVATE_SWAP_SLOTS_CACHE)
112 reactivate_swap_slots_cache();
113 goto out;
114 }
115
116 /* if global pool of slot caches too low, deactivate cache */
117 if (pages < num_online_cpus() * THRESHOLD_DEACTIVATE_SWAP_SLOTS_CACHE)
118 deactivate_swap_slots_cache();
119out:
120 return swap_slot_cache_active;
121}
Bing Han06c27662022-06-10 19:40:35 +0800122EXPORT_SYMBOL_GPL(check_cache_active);
Tim Chen67afa382017-02-22 15:45:39 -0800123
124static int alloc_swap_slot_cache(unsigned int cpu)
125{
126 struct swap_slots_cache *cache;
127 swp_entry_t *slots, *slots_ret;
Bing Hand4eef932022-05-30 16:28:05 +0800128 bool skip = false;
129 int ret = 0;
Tim Chen67afa382017-02-22 15:45:39 -0800130
131 /*
132 * Do allocation outside swap_slots_cache_mutex
Huang Ying54f180d2017-05-08 15:57:40 -0700133 * as kvzalloc could trigger reclaim and get_swap_page,
Tim Chen67afa382017-02-22 15:45:39 -0800134 * which can lock swap_slots_cache_mutex.
135 */
Bing Han09f42462022-10-11 14:46:57 +0800136 trace_android_rvh_alloc_swap_slot_cache(&per_cpu(swp_slots, cpu),
137 &ret, &skip);
Bing Hand4eef932022-05-30 16:28:05 +0800138 trace_android_vh_alloc_swap_slot_cache(&per_cpu(swp_slots, cpu),
139 &ret, &skip);
140 if (skip)
141 return ret;
Kees Cook778e1cd2018-06-12 14:04:48 -0700142 slots = kvcalloc(SWAP_SLOTS_CACHE_SIZE, sizeof(swp_entry_t),
Huang Ying54f180d2017-05-08 15:57:40 -0700143 GFP_KERNEL);
Tim Chen67afa382017-02-22 15:45:39 -0800144 if (!slots)
145 return -ENOMEM;
146
Kees Cook778e1cd2018-06-12 14:04:48 -0700147 slots_ret = kvcalloc(SWAP_SLOTS_CACHE_SIZE, sizeof(swp_entry_t),
Huang Ying54f180d2017-05-08 15:57:40 -0700148 GFP_KERNEL);
Tim Chen67afa382017-02-22 15:45:39 -0800149 if (!slots_ret) {
Huang Ying54f180d2017-05-08 15:57:40 -0700150 kvfree(slots);
Tim Chen67afa382017-02-22 15:45:39 -0800151 return -ENOMEM;
152 }
153
154 mutex_lock(&swap_slots_cache_mutex);
155 cache = &per_cpu(swp_slots, cpu);
Zhen Leif90eae22020-08-06 23:20:05 -0700156 if (cache->slots || cache->slots_ret) {
Tim Chen67afa382017-02-22 15:45:39 -0800157 /* cache already allocated */
Zhen Leif90eae22020-08-06 23:20:05 -0700158 mutex_unlock(&swap_slots_cache_mutex);
159
160 kvfree(slots);
161 kvfree(slots_ret);
162
163 return 0;
164 }
165
Tim Chen67afa382017-02-22 15:45:39 -0800166 if (!cache->lock_initialized) {
167 mutex_init(&cache->alloc_lock);
168 spin_lock_init(&cache->free_lock);
169 cache->lock_initialized = true;
170 }
171 cache->nr = 0;
172 cache->cur = 0;
173 cache->n_ret = 0;
Tim Chena2e16732017-11-15 17:34:18 -0800174 /*
175 * We initialized alloc_lock and free_lock earlier. We use
176 * !cache->slots or !cache->slots_ret to know if it is safe to acquire
177 * the corresponding lock and use the cache. Memory barrier below
178 * ensures the assumption.
179 */
180 mb();
Tim Chen67afa382017-02-22 15:45:39 -0800181 cache->slots = slots;
Tim Chen67afa382017-02-22 15:45:39 -0800182 cache->slots_ret = slots_ret;
Tim Chen67afa382017-02-22 15:45:39 -0800183 mutex_unlock(&swap_slots_cache_mutex);
Tim Chen67afa382017-02-22 15:45:39 -0800184 return 0;
185}
186
187static void drain_slots_cache_cpu(unsigned int cpu, unsigned int type,
188 bool free_slots)
189{
190 struct swap_slots_cache *cache;
191 swp_entry_t *slots = NULL;
Bing Hand4eef932022-05-30 16:28:05 +0800192 bool skip = false;
Tim Chen67afa382017-02-22 15:45:39 -0800193
194 cache = &per_cpu(swp_slots, cpu);
Bing Han09f42462022-10-11 14:46:57 +0800195 trace_android_rvh_drain_slots_cache_cpu(cache, type,
196 free_slots, &skip);
Bing Hand4eef932022-05-30 16:28:05 +0800197 trace_android_vh_drain_slots_cache_cpu(cache, type,
198 free_slots, &skip);
199 if (skip)
200 return;
Tim Chen67afa382017-02-22 15:45:39 -0800201 if ((type & SLOTS_CACHE) && cache->slots) {
202 mutex_lock(&cache->alloc_lock);
203 swapcache_free_entries(cache->slots + cache->cur, cache->nr);
204 cache->cur = 0;
205 cache->nr = 0;
206 if (free_slots && cache->slots) {
Huang Ying54f180d2017-05-08 15:57:40 -0700207 kvfree(cache->slots);
Tim Chen67afa382017-02-22 15:45:39 -0800208 cache->slots = NULL;
209 }
210 mutex_unlock(&cache->alloc_lock);
211 }
212 if ((type & SLOTS_CACHE_RET) && cache->slots_ret) {
213 spin_lock_irq(&cache->free_lock);
214 swapcache_free_entries(cache->slots_ret, cache->n_ret);
215 cache->n_ret = 0;
216 if (free_slots && cache->slots_ret) {
217 slots = cache->slots_ret;
218 cache->slots_ret = NULL;
219 }
220 spin_unlock_irq(&cache->free_lock);
221 if (slots)
Huang Ying54f180d2017-05-08 15:57:40 -0700222 kvfree(slots);
Tim Chen67afa382017-02-22 15:45:39 -0800223 }
224}
225
226static void __drain_swap_slots_cache(unsigned int type)
227{
228 unsigned int cpu;
229
230 /*
231 * This function is called during
232 * 1) swapoff, when we have to make sure no
233 * left over slots are in cache when we remove
234 * a swap device;
235 * 2) disabling of swap slot cache, when we run low
236 * on swap slots when allocating memory and need
237 * to return swap slots to global pool.
238 *
239 * We cannot acquire cpu hot plug lock here as
240 * this function can be invoked in the cpu
241 * hot plug path:
242 * cpu_up -> lock cpu_hotplug -> cpu hotplug state callback
243 * -> memory allocation -> direct reclaim -> get_swap_page
244 * -> drain_swap_slots_cache
245 *
246 * Hence the loop over current online cpu below could miss cpu that
247 * is being brought online but not yet marked as online.
248 * That is okay as we do not schedule and run anything on a
249 * cpu before it has been marked online. Hence, we will not
250 * fill any swap slots in slots cache of such cpu.
251 * There are no slots on such cpu that need to be drained.
252 */
253 for_each_online_cpu(cpu)
254 drain_slots_cache_cpu(cpu, type, false);
255}
256
257static int free_slot_cache(unsigned int cpu)
258{
259 mutex_lock(&swap_slots_cache_mutex);
260 drain_slots_cache_cpu(cpu, SLOTS_CACHE | SLOTS_CACHE_RET, true);
261 mutex_unlock(&swap_slots_cache_mutex);
262 return 0;
263}
264
Miaohe Linf3bc52c2020-10-13 16:52:18 -0700265void enable_swap_slots_cache(void)
Tim Chen67afa382017-02-22 15:45:39 -0800266{
Tim Chen67afa382017-02-22 15:45:39 -0800267 mutex_lock(&swap_slots_cache_enable_mutex);
Zhen Leid69a9572020-08-06 23:20:08 -0700268 if (!swap_slot_cache_initialized) {
269 int ret;
270
271 ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "swap_slots_cache",
272 alloc_swap_slot_cache, free_slot_cache);
273 if (WARN_ONCE(ret < 0, "Cache allocation failed (%s), operating "
274 "without swap slots cache.\n", __func__))
275 goto out_unlock;
276
277 swap_slot_cache_initialized = true;
Tim Chen67afa382017-02-22 15:45:39 -0800278 }
279
Tim Chen67afa382017-02-22 15:45:39 -0800280 __reenable_swap_slots_cache();
281out_unlock:
282 mutex_unlock(&swap_slots_cache_enable_mutex);
Tim Chen67afa382017-02-22 15:45:39 -0800283}
284
285/* called with swap slot cache's alloc lock held */
286static int refill_swap_slots_cache(struct swap_slots_cache *cache)
287{
288 if (!use_swap_slot_cache || cache->nr)
289 return 0;
290
291 cache->cur = 0;
292 if (swap_slot_cache_active)
Huang Ying5d5e8f12018-08-21 21:52:20 -0700293 cache->nr = get_swap_pages(SWAP_SLOTS_CACHE_SIZE,
294 cache->slots, 1);
Tim Chen67afa382017-02-22 15:45:39 -0800295
296 return cache->nr;
297}
298
299int free_swap_slot(swp_entry_t entry)
300{
301 struct swap_slots_cache *cache;
Bing Han7222a0b2022-06-10 18:45:31 +0800302 bool skip = false;
Tim Chen67afa382017-02-22 15:45:39 -0800303
Sebastian Andrzej Siewiorf07e0f842017-07-10 15:49:29 -0700304 cache = raw_cpu_ptr(&swp_slots);
Bing Han09f42462022-10-11 14:46:57 +0800305 trace_android_rvh_free_swap_slot(entry, cache, &skip);
Bing Han7222a0b2022-06-10 18:45:31 +0800306 trace_android_vh_free_swap_slot(entry, cache, &skip);
307 if (skip)
308 return 0;
Tim Chena2e16732017-11-15 17:34:18 -0800309 if (likely(use_swap_slot_cache && cache->slots_ret)) {
Tim Chen67afa382017-02-22 15:45:39 -0800310 spin_lock_irq(&cache->free_lock);
311 /* Swap slots cache may be deactivated before acquiring lock */
Sebastian Andrzej Siewiorf07e0f842017-07-10 15:49:29 -0700312 if (!use_swap_slot_cache || !cache->slots_ret) {
Tim Chen67afa382017-02-22 15:45:39 -0800313 spin_unlock_irq(&cache->free_lock);
314 goto direct_free;
315 }
316 if (cache->n_ret >= SWAP_SLOTS_CACHE_SIZE) {
317 /*
318 * Return slots to global pool.
319 * The current swap_map value is SWAP_HAS_CACHE.
320 * Set it to 0 to indicate it is available for
321 * allocation in global pool
322 */
323 swapcache_free_entries(cache->slots_ret, cache->n_ret);
324 cache->n_ret = 0;
325 }
326 cache->slots_ret[cache->n_ret++] = entry;
327 spin_unlock_irq(&cache->free_lock);
328 } else {
329direct_free:
330 swapcache_free_entries(&entry, 1);
331 }
Tim Chen67afa382017-02-22 15:45:39 -0800332
333 return 0;
334}
335
Huang Ying38d8b4e2017-07-06 15:37:18 -0700336swp_entry_t get_swap_page(struct page *page)
Tim Chen67afa382017-02-22 15:45:39 -0800337{
Wei Yang2406b762020-04-01 21:06:16 -0700338 swp_entry_t entry;
Tim Chen67afa382017-02-22 15:45:39 -0800339 struct swap_slots_cache *cache;
Bing Hand4eef932022-05-30 16:28:05 +0800340 bool found = false;
Huang Ying38d8b4e2017-07-06 15:37:18 -0700341 entry.val = 0;
342
Bing Han09f42462022-10-11 14:46:57 +0800343 trace_android_rvh_get_swap_page(page, &entry, raw_cpu_ptr(&swp_slots), &found);
Bing Hand4eef932022-05-30 16:28:05 +0800344 trace_android_vh_get_swap_page(page, &entry, raw_cpu_ptr(&swp_slots), &found);
345 if (found)
346 goto out;
347
Huang Ying38d8b4e2017-07-06 15:37:18 -0700348 if (PageTransHuge(page)) {
349 if (IS_ENABLED(CONFIG_THP_SWAP))
Huang Ying5d5e8f12018-08-21 21:52:20 -0700350 get_swap_pages(1, &entry, HPAGE_PMD_NR);
Tejun Heobb98f2c2018-06-07 17:05:31 -0700351 goto out;
Huang Ying38d8b4e2017-07-06 15:37:18 -0700352 }
353
Tim Chen67afa382017-02-22 15:45:39 -0800354 /*
355 * Preemption is allowed here, because we may sleep
356 * in refill_swap_slots_cache(). But it is safe, because
357 * accesses to the per-CPU data structure are protected by the
358 * mutex cache->alloc_lock.
359 *
360 * The alloc path here does not touch cache->slots_ret
361 * so cache->free_lock is not taken.
362 */
363 cache = raw_cpu_ptr(&swp_slots);
364
Tim Chena2e16732017-11-15 17:34:18 -0800365 if (likely(check_cache_active() && cache->slots)) {
Tim Chen67afa382017-02-22 15:45:39 -0800366 mutex_lock(&cache->alloc_lock);
367 if (cache->slots) {
368repeat:
369 if (cache->nr) {
Wei Yang2406b762020-04-01 21:06:16 -0700370 entry = cache->slots[cache->cur];
371 cache->slots[cache->cur++].val = 0;
Tim Chen67afa382017-02-22 15:45:39 -0800372 cache->nr--;
Wei Yang2406b762020-04-01 21:06:16 -0700373 } else if (refill_swap_slots_cache(cache)) {
374 goto repeat;
Tim Chen67afa382017-02-22 15:45:39 -0800375 }
376 }
377 mutex_unlock(&cache->alloc_lock);
378 if (entry.val)
Tejun Heobb98f2c2018-06-07 17:05:31 -0700379 goto out;
Tim Chen67afa382017-02-22 15:45:39 -0800380 }
381
Huang Ying5d5e8f12018-08-21 21:52:20 -0700382 get_swap_pages(1, &entry, 1);
Tejun Heobb98f2c2018-06-07 17:05:31 -0700383out:
384 if (mem_cgroup_try_charge_swap(page, entry)) {
385 put_swap_page(page, entry);
386 entry.val = 0;
387 }
Tim Chen67afa382017-02-22 15:45:39 -0800388 return entry;
389}