blob: 7af70fc988b96da54468c7a2b393d560c083b0b7 [file] [log] [blame]
Nadav Amit8b4770e2018-06-19 16:00:29 -07001// SPDX-License-Identifier: GPL-2.0
Dmitry Torokhov453dc652010-04-23 13:18:08 -04002/*
3 * VMware Balloon driver.
4 *
Nadav Amit8b4770e2018-06-19 16:00:29 -07005 * Copyright (C) 2000-2018, VMware, Inc. All Rights Reserved.
Dmitry Torokhov453dc652010-04-23 13:18:08 -04006 *
Dmitry Torokhov453dc652010-04-23 13:18:08 -04007 * This is VMware physical memory management driver for Linux. The driver
8 * acts like a "balloon" that can be inflated to reclaim physical pages by
9 * reserving them in the guest and invalidating them in the monitor,
10 * freeing up the underlying machine pages so they can be allocated to
11 * other guests. The balloon can also be deflated to allow the guest to
12 * use more physical memory. Higher level policies can control the sizes
13 * of balloons in VMs in order to manage physical memory resources.
14 */
15
16//#define DEBUG
17#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
19#include <linux/types.h>
20#include <linux/kernel.h>
21#include <linux/mm.h>
Xavier Deguillardf220a802015-08-06 15:17:58 -070022#include <linux/vmalloc.h>
Dmitry Torokhov453dc652010-04-23 13:18:08 -040023#include <linux/sched.h>
24#include <linux/module.h>
25#include <linux/workqueue.h>
26#include <linux/debugfs.h>
27#include <linux/seq_file.h>
Philip P. Moltmann48e3d662015-08-06 15:18:01 -070028#include <linux/vmw_vmci_defs.h>
29#include <linux/vmw_vmci_api.h>
H. Peter Anvina10a5692010-05-09 01:13:42 -070030#include <asm/hypervisor.h>
Dmitry Torokhov453dc652010-04-23 13:18:08 -040031
32MODULE_AUTHOR("VMware, Inc.");
33MODULE_DESCRIPTION("VMware Memory Control (Balloon) Driver");
Philip P. Moltmann48e3d662015-08-06 15:18:01 -070034MODULE_VERSION("1.5.0.0-k");
Dmitry Torokhov453dc652010-04-23 13:18:08 -040035MODULE_ALIAS("dmi:*:svnVMware*:*");
36MODULE_ALIAS("vmware_vmmemctl");
37MODULE_LICENSE("GPL");
38
39/*
Dmitry Torokhov453dc652010-04-23 13:18:08 -040040 * Use __GFP_HIGHMEM to allow pages from HIGHMEM zone. We don't
Mel Gorman71baba42015-11-06 16:28:28 -080041 * allow wait (__GFP_RECLAIM) for NOSLEEP page allocations. Use
Dmitry Torokhov453dc652010-04-23 13:18:08 -040042 * __GFP_NOWARN, to suppress page allocation failure warnings.
43 */
44#define VMW_PAGE_ALLOC_NOSLEEP (__GFP_HIGHMEM|__GFP_NOWARN)
45
46/*
47 * Use GFP_HIGHUSER when executing in a separate kernel thread
48 * context and allocation can sleep. This is less stressful to
49 * the guest memory system, since it allows the thread to block
50 * while memory is reclaimed, and won't take pages from emergency
51 * low-memory pools.
52 */
53#define VMW_PAGE_ALLOC_CANSLEEP (GFP_HIGHUSER)
54
Dmitry Torokhov55adaa42010-06-04 14:14:52 -070055/* Maximum number of refused pages we accumulate during inflation cycle */
56#define VMW_BALLOON_MAX_REFUSED 16
Dmitry Torokhov453dc652010-04-23 13:18:08 -040057
58/*
59 * Hypervisor communication port definitions.
60 */
61#define VMW_BALLOON_HV_PORT 0x5670
62#define VMW_BALLOON_HV_MAGIC 0x456c6d6f
Dmitry Torokhov453dc652010-04-23 13:18:08 -040063#define VMW_BALLOON_GUEST_ID 1 /* Linux */
64
Xavier Deguillardeb791002015-06-12 11:43:23 -070065enum vmwballoon_capabilities {
66 /*
67 * Bit 0 is reserved and not associated to any capability.
68 */
Philip P. Moltmann48e3d662015-08-06 15:18:01 -070069 VMW_BALLOON_BASIC_CMDS = (1 << 1),
70 VMW_BALLOON_BATCHED_CMDS = (1 << 2),
71 VMW_BALLOON_BATCHED_2M_CMDS = (1 << 3),
72 VMW_BALLOON_SIGNALLED_WAKEUP_CMD = (1 << 4),
Xavier Deguillardeb791002015-06-12 11:43:23 -070073};
74
Xavier Deguillardf220a802015-08-06 15:17:58 -070075#define VMW_BALLOON_CAPABILITIES (VMW_BALLOON_BASIC_CMDS \
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -070076 | VMW_BALLOON_BATCHED_CMDS \
Philip P. Moltmann48e3d662015-08-06 15:18:01 -070077 | VMW_BALLOON_BATCHED_2M_CMDS \
78 | VMW_BALLOON_SIGNALLED_WAKEUP_CMD)
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -070079
80#define VMW_BALLOON_2M_SHIFT (9)
81#define VMW_BALLOON_NUM_PAGE_SIZES (2)
Xavier Deguillardeb791002015-06-12 11:43:23 -070082
Xavier Deguillardf220a802015-08-06 15:17:58 -070083/*
84 * Backdoor commands availability:
85 *
86 * START, GET_TARGET and GUEST_ID are always available,
87 *
88 * VMW_BALLOON_BASIC_CMDS:
89 * LOCK and UNLOCK commands,
90 * VMW_BALLOON_BATCHED_CMDS:
91 * BATCHED_LOCK and BATCHED_UNLOCK commands.
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -070092 * VMW BALLOON_BATCHED_2M_CMDS:
Philip P. Moltmann48e3d662015-08-06 15:18:01 -070093 * BATCHED_2M_LOCK and BATCHED_2M_UNLOCK commands,
94 * VMW VMW_BALLOON_SIGNALLED_WAKEUP_CMD:
95 * VMW_BALLOON_CMD_VMCI_DOORBELL_SET command.
Xavier Deguillardf220a802015-08-06 15:17:58 -070096 */
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -070097#define VMW_BALLOON_CMD_START 0
98#define VMW_BALLOON_CMD_GET_TARGET 1
99#define VMW_BALLOON_CMD_LOCK 2
100#define VMW_BALLOON_CMD_UNLOCK 3
101#define VMW_BALLOON_CMD_GUEST_ID 4
102#define VMW_BALLOON_CMD_BATCHED_LOCK 6
103#define VMW_BALLOON_CMD_BATCHED_UNLOCK 7
104#define VMW_BALLOON_CMD_BATCHED_2M_LOCK 8
105#define VMW_BALLOON_CMD_BATCHED_2M_UNLOCK 9
Philip P. Moltmann48e3d662015-08-06 15:18:01 -0700106#define VMW_BALLOON_CMD_VMCI_DOORBELL_SET 10
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700107
Nadav Amit68131182018-09-20 10:30:08 -0700108#define VMW_BALLOON_CMD_NUM 11
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400109
110/* error codes */
Xavier Deguillardeb791002015-06-12 11:43:23 -0700111#define VMW_BALLOON_SUCCESS 0
112#define VMW_BALLOON_FAILURE -1
113#define VMW_BALLOON_ERROR_CMD_INVALID 1
114#define VMW_BALLOON_ERROR_PPN_INVALID 2
115#define VMW_BALLOON_ERROR_PPN_LOCKED 3
116#define VMW_BALLOON_ERROR_PPN_UNLOCKED 4
117#define VMW_BALLOON_ERROR_PPN_PINNED 5
118#define VMW_BALLOON_ERROR_PPN_NOTNEEDED 6
119#define VMW_BALLOON_ERROR_RESET 7
120#define VMW_BALLOON_ERROR_BUSY 8
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400121
Xavier Deguillardeb791002015-06-12 11:43:23 -0700122#define VMW_BALLOON_SUCCESS_WITH_CAPABILITIES (0x03000000)
123
Nadav Amit10a95d52018-09-20 10:30:07 -0700124#define VMW_BALLOON_CMD_WITH_TARGET_MASK \
125 ((1UL << VMW_BALLOON_CMD_GET_TARGET) | \
126 (1UL << VMW_BALLOON_CMD_LOCK) | \
127 (1UL << VMW_BALLOON_CMD_UNLOCK) | \
128 (1UL << VMW_BALLOON_CMD_BATCHED_LOCK) | \
129 (1UL << VMW_BALLOON_CMD_BATCHED_UNLOCK) | \
130 (1UL << VMW_BALLOON_CMD_BATCHED_2M_LOCK) | \
131 (1UL << VMW_BALLOON_CMD_BATCHED_2M_UNLOCK))
132
Nadav Amit68131182018-09-20 10:30:08 -0700133static const char * const vmballoon_cmd_names[] = {
134 [VMW_BALLOON_CMD_START] = "start",
135 [VMW_BALLOON_CMD_GET_TARGET] = "target",
136 [VMW_BALLOON_CMD_LOCK] = "lock",
137 [VMW_BALLOON_CMD_UNLOCK] = "unlock",
138 [VMW_BALLOON_CMD_GUEST_ID] = "guestType",
139 [VMW_BALLOON_CMD_BATCHED_LOCK] = "batchLock",
140 [VMW_BALLOON_CMD_BATCHED_UNLOCK] = "batchUnlock",
141 [VMW_BALLOON_CMD_BATCHED_2M_LOCK] = "2m-lock",
142 [VMW_BALLOON_CMD_BATCHED_2M_UNLOCK] = "2m-unlock",
143 [VMW_BALLOON_CMD_VMCI_DOORBELL_SET] = "doorbellSet"
144};
145
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400146#ifdef CONFIG_DEBUG_FS
147struct vmballoon_stats {
148 unsigned int timer;
Philip P. Moltmann48e3d662015-08-06 15:18:01 -0700149 unsigned int doorbell;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400150
Rakib Mullick2ca02df2011-11-02 13:40:07 -0700151 /* allocation statistics */
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700152 unsigned int alloc[VMW_BALLOON_NUM_PAGE_SIZES];
153 unsigned int alloc_fail[VMW_BALLOON_NUM_PAGE_SIZES];
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400154 unsigned int sleep_alloc;
155 unsigned int sleep_alloc_fail;
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700156 unsigned int refused_alloc[VMW_BALLOON_NUM_PAGE_SIZES];
157 unsigned int refused_free[VMW_BALLOON_NUM_PAGE_SIZES];
158 unsigned int free[VMW_BALLOON_NUM_PAGE_SIZES];
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400159
Nadav Amit68131182018-09-20 10:30:08 -0700160 /* Monitor operations. */
161 unsigned long ops[VMW_BALLOON_CMD_NUM];
162 unsigned long ops_fail[VMW_BALLOON_CMD_NUM];
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400163};
164
165#define STATS_INC(stat) (stat)++
166#else
167#define STATS_INC(stat)
168#endif
169
Xavier Deguillardf220a802015-08-06 15:17:58 -0700170struct vmballoon;
171
172struct vmballoon_ops {
173 void (*add_page)(struct vmballoon *b, int idx, struct page *p);
Xavier Deguillard4670de4d2015-08-06 15:17:59 -0700174 int (*lock)(struct vmballoon *b, unsigned int num_pages,
Nadav Amit10a95d52018-09-20 10:30:07 -0700175 bool is_2m_pages);
Xavier Deguillard4670de4d2015-08-06 15:17:59 -0700176 int (*unlock)(struct vmballoon *b, unsigned int num_pages,
Nadav Amit10a95d52018-09-20 10:30:07 -0700177 bool is_2m_pages);
Xavier Deguillardf220a802015-08-06 15:17:58 -0700178};
179
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700180struct vmballoon_page_size {
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400181 /* list of reserved physical pages */
182 struct list_head pages;
183
184 /* transient list of non-balloonable pages */
185 struct list_head refused_pages;
Dmitry Torokhov55adaa42010-06-04 14:14:52 -0700186 unsigned int n_refused_pages;
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700187};
188
Nadav Amit6c948752018-09-20 10:30:10 -0700189/**
190 * struct vmballoon_batch_entry - a batch entry for lock or unlock.
191 *
192 * @status: the status of the operation, which is written by the hypervisor.
193 * @reserved: reserved for future use. Must be set to zero.
194 * @pfn: the physical frame number of the page to be locked or unlocked.
195 */
196struct vmballoon_batch_entry {
197 u64 status : 5;
198 u64 reserved : PAGE_SHIFT - 5;
199 u64 pfn : 52;
200} __packed;
201
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700202struct vmballoon {
203 struct vmballoon_page_size page_sizes[VMW_BALLOON_NUM_PAGE_SIZES];
204
205 /* supported page sizes. 1 == 4k pages only, 2 == 4k and 2m pages */
206 unsigned supported_page_sizes;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400207
208 /* balloon size in pages */
209 unsigned int size;
210 unsigned int target;
211
212 /* reset flag */
213 bool reset_required;
214
Xavier Deguillardf220a802015-08-06 15:17:58 -0700215 unsigned long capabilities;
216
Nadav Amit6c948752018-09-20 10:30:10 -0700217 /**
218 * @batch_page: pointer to communication batch page.
219 *
220 * When batching is used, batch_page points to a page, which holds up to
221 * %VMW_BALLOON_BATCH_MAX_PAGES entries for locking or unlocking.
222 */
223 struct vmballoon_batch_entry *batch_page;
224
Xavier Deguillardf220a802015-08-06 15:17:58 -0700225 unsigned int batch_max_pages;
226 struct page *page;
227
228 const struct vmballoon_ops *ops;
229
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400230#ifdef CONFIG_DEBUG_FS
231 /* statistics */
232 struct vmballoon_stats stats;
233
234 /* debugfs file exporting statistics */
235 struct dentry *dbg_entry;
236#endif
237
238 struct sysinfo sysinfo;
239
240 struct delayed_work dwork;
Philip P. Moltmann48e3d662015-08-06 15:18:01 -0700241
242 struct vmci_handle vmci_doorbell;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400243};
244
245static struct vmballoon balloon;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400246
Nadav Amit10a95d52018-09-20 10:30:07 -0700247static inline unsigned long
248__vmballoon_cmd(struct vmballoon *b, unsigned long cmd, unsigned long arg1,
249 unsigned long arg2, unsigned long *result)
250{
251 unsigned long status, dummy1, dummy2, dummy3, local_result;
252
Nadav Amit68131182018-09-20 10:30:08 -0700253 STATS_INC(b->stats.ops[cmd]);
254
Nadav Amit10a95d52018-09-20 10:30:07 -0700255 asm volatile ("inl %%dx" :
256 "=a"(status),
257 "=c"(dummy1),
258 "=d"(dummy2),
259 "=b"(local_result),
260 "=S"(dummy3) :
261 "0"(VMW_BALLOON_HV_MAGIC),
262 "1"(cmd),
263 "2"(VMW_BALLOON_HV_PORT),
264 "3"(arg1),
265 "4"(arg2) :
266 "memory");
267
268 /* update the result if needed */
269 if (result)
270 *result = (cmd == VMW_BALLOON_CMD_START) ? dummy1 :
271 local_result;
272
273 /* update target when applicable */
274 if (status == VMW_BALLOON_SUCCESS &&
275 ((1ul << cmd) & VMW_BALLOON_CMD_WITH_TARGET_MASK))
276 b->target = local_result;
277
Nadav Amit68131182018-09-20 10:30:08 -0700278 if (status != VMW_BALLOON_SUCCESS &&
279 status != VMW_BALLOON_SUCCESS_WITH_CAPABILITIES) {
280 STATS_INC(b->stats.ops_fail[cmd]);
281 pr_debug("%s: %s [0x%lx,0x%lx) failed, returned %ld\n",
282 __func__, vmballoon_cmd_names[cmd], arg1, arg2,
283 status);
284 }
285
Nadav Amit10a95d52018-09-20 10:30:07 -0700286 /* mark reset required accordingly */
287 if (status == VMW_BALLOON_ERROR_RESET)
288 b->reset_required = true;
289
290 return status;
291}
292
293static __always_inline unsigned long
294vmballoon_cmd(struct vmballoon *b, unsigned long cmd, unsigned long arg1,
295 unsigned long arg2)
296{
297 unsigned long dummy;
298
299 return __vmballoon_cmd(b, cmd, arg1, arg2, &dummy);
300}
301
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400302/*
303 * Send "start" command to the host, communicating supported version
304 * of the protocol.
305 */
Xavier Deguillardf220a802015-08-06 15:17:58 -0700306static bool vmballoon_send_start(struct vmballoon *b, unsigned long req_caps)
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400307{
Nadav Amit10a95d52018-09-20 10:30:07 -0700308 unsigned long status, capabilities;
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700309 bool success;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400310
Nadav Amit10a95d52018-09-20 10:30:07 -0700311 status = __vmballoon_cmd(b, VMW_BALLOON_CMD_START, req_caps, 0,
312 &capabilities);
Xavier Deguillardf220a802015-08-06 15:17:58 -0700313
314 switch (status) {
315 case VMW_BALLOON_SUCCESS_WITH_CAPABILITIES:
316 b->capabilities = capabilities;
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700317 success = true;
318 break;
Xavier Deguillardf220a802015-08-06 15:17:58 -0700319 case VMW_BALLOON_SUCCESS:
320 b->capabilities = VMW_BALLOON_BASIC_CMDS;
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700321 success = true;
322 break;
323 default:
324 success = false;
Xavier Deguillardf220a802015-08-06 15:17:58 -0700325 }
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400326
Nadav Amit5081efd2018-06-19 16:00:25 -0700327 /*
328 * 2MB pages are only supported with batching. If batching is for some
329 * reason disabled, do not use 2MB pages, since otherwise the legacy
330 * mechanism is used with 2MB pages, causing a failure.
331 */
332 if ((b->capabilities & VMW_BALLOON_BATCHED_2M_CMDS) &&
333 (b->capabilities & VMW_BALLOON_BATCHED_CMDS))
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700334 b->supported_page_sizes = 2;
335 else
336 b->supported_page_sizes = 1;
337
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700338 return success;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400339}
340
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400341/*
342 * Communicate guest type to the host so that it can adjust ballooning
343 * algorithm to the one most appropriate for the guest. This command
344 * is normally issued after sending "start" command and is part of
345 * standard reset sequence.
346 */
347static bool vmballoon_send_guest_id(struct vmballoon *b)
348{
Nadav Amit10a95d52018-09-20 10:30:07 -0700349 unsigned long status;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400350
Nadav Amit10a95d52018-09-20 10:30:07 -0700351 status = vmballoon_cmd(b, VMW_BALLOON_CMD_GUEST_ID,
352 VMW_BALLOON_GUEST_ID, 0);
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400353
Nadav Amit10a95d52018-09-20 10:30:07 -0700354 if (status == VMW_BALLOON_SUCCESS)
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400355 return true;
356
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400357 return false;
358}
359
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700360static u16 vmballoon_page_size(bool is_2m_page)
361{
362 if (is_2m_page)
363 return 1 << VMW_BALLOON_2M_SHIFT;
364
365 return 1;
366}
367
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400368/*
369 * Retrieve desired balloon size from the host.
370 */
Nadav Amit10a95d52018-09-20 10:30:07 -0700371static bool vmballoon_send_get_target(struct vmballoon *b)
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400372{
373 unsigned long status;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400374 unsigned long limit;
375 u32 limit32;
376
377 /*
378 * si_meminfo() is cheap. Moreover, we want to provide dynamic
379 * max balloon size later. So let us call si_meminfo() every
380 * iteration.
381 */
382 si_meminfo(&b->sysinfo);
383 limit = b->sysinfo.totalram;
384
385 /* Ensure limit fits in 32-bits */
386 limit32 = (u32)limit;
387 if (limit != limit32)
388 return false;
389
Nadav Amit10a95d52018-09-20 10:30:07 -0700390 status = vmballoon_cmd(b, VMW_BALLOON_CMD_GET_TARGET, limit, 0);
391
392 if (status == VMW_BALLOON_SUCCESS)
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400393 return true;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400394
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400395 return false;
396}
397
398/*
399 * Notify the host about allocated page so that host can use it without
400 * fear that guest will need it. Host may reject some pages, we need to
401 * check the return value and maybe submit a different page.
402 */
Danny Kukawka3e5ba462012-01-30 23:00:08 +0100403static int vmballoon_send_lock_page(struct vmballoon *b, unsigned long pfn,
Nadav Amit4c9a7d62018-09-20 10:30:09 -0700404 unsigned int *hv_status, bool lock)
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400405{
Nadav Amit4c9a7d62018-09-20 10:30:09 -0700406 unsigned long status, cmd;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400407 u32 pfn32;
408
409 pfn32 = (u32)pfn;
410 if (pfn32 != pfn)
Nadav Amit09755692018-06-19 16:00:24 -0700411 return -EINVAL;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400412
Nadav Amit4c9a7d62018-09-20 10:30:09 -0700413 cmd = lock ? VMW_BALLOON_CMD_LOCK : VMW_BALLOON_CMD_UNLOCK;
414
415 *hv_status = status = vmballoon_cmd(b, cmd, pfn, 0);
Nadav Amit10a95d52018-09-20 10:30:07 -0700416
417 if (status == VMW_BALLOON_SUCCESS)
Danny Kukawka3e5ba462012-01-30 23:00:08 +0100418 return 0;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400419
Nadav Amit09755692018-06-19 16:00:24 -0700420 return -EIO;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400421}
422
Xavier Deguillardf220a802015-08-06 15:17:58 -0700423static int vmballoon_send_batched_lock(struct vmballoon *b,
Nadav Amit4c9a7d62018-09-20 10:30:09 -0700424 unsigned int num_pages, bool is_2m_pages,
425 bool lock)
Xavier Deguillardf220a802015-08-06 15:17:58 -0700426{
Nadav Amit90d72ce2018-07-02 19:27:13 -0700427 unsigned long pfn = PHYS_PFN(virt_to_phys(b->batch_page));
Nadav Amit10a95d52018-09-20 10:30:07 -0700428 unsigned long status, cmd;
Xavier Deguillardf220a802015-08-06 15:17:58 -0700429
Nadav Amit4c9a7d62018-09-20 10:30:09 -0700430 if (lock)
431 cmd = is_2m_pages ? VMW_BALLOON_CMD_BATCHED_2M_LOCK :
432 VMW_BALLOON_CMD_BATCHED_LOCK;
433 else
434 cmd = is_2m_pages ? VMW_BALLOON_CMD_BATCHED_2M_UNLOCK :
435 VMW_BALLOON_CMD_BATCHED_UNLOCK;
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700436
Nadav Amit10a95d52018-09-20 10:30:07 -0700437 status = vmballoon_cmd(b, cmd, pfn, num_pages);
438
439 if (status == VMW_BALLOON_SUCCESS)
Xavier Deguillardf220a802015-08-06 15:17:58 -0700440 return 0;
441
Xavier Deguillardf220a802015-08-06 15:17:58 -0700442 return 1;
443}
444
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700445static struct page *vmballoon_alloc_page(gfp_t flags, bool is_2m_page)
446{
447 if (is_2m_page)
448 return alloc_pages(flags, VMW_BALLOON_2M_SHIFT);
449
450 return alloc_page(flags);
451}
452
453static void vmballoon_free_page(struct page *page, bool is_2m_page)
454{
455 if (is_2m_page)
456 __free_pages(page, VMW_BALLOON_2M_SHIFT);
457 else
458 __free_page(page);
459}
460
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400461/*
462 * Quickly release all pages allocated for the balloon. This function is
463 * called when host decides to "reset" balloon for one reason or another.
464 * Unlike normal "deflate" we do not (shall not) notify host of the pages
465 * being released.
466 */
467static void vmballoon_pop(struct vmballoon *b)
468{
469 struct page *page, *next;
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700470 unsigned is_2m_pages;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400471
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700472 for (is_2m_pages = 0; is_2m_pages < VMW_BALLOON_NUM_PAGE_SIZES;
473 is_2m_pages++) {
474 struct vmballoon_page_size *page_size =
475 &b->page_sizes[is_2m_pages];
476 u16 size_per_page = vmballoon_page_size(is_2m_pages);
477
478 list_for_each_entry_safe(page, next, &page_size->pages, lru) {
479 list_del(&page->lru);
480 vmballoon_free_page(page, is_2m_pages);
481 STATS_INC(b->stats.free[is_2m_pages]);
482 b->size -= size_per_page;
483 cond_resched();
484 }
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400485 }
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400486
Gil Kupferb23220f2018-06-01 00:47:47 -0700487 /* Clearing the batch_page unconditionally has no adverse effect */
488 free_page((unsigned long)b->batch_page);
489 b->batch_page = NULL;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400490}
491
492/*
Xavier Deguillardef0f8f12015-06-12 11:43:22 -0700493 * Notify the host of a ballooned page. If host rejects the page put it on the
494 * refuse list, those refused page are then released at the end of the
495 * inflation cycle.
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400496 */
Xavier Deguillard4670de4d2015-08-06 15:17:59 -0700497static int vmballoon_lock_page(struct vmballoon *b, unsigned int num_pages,
Nadav Amit10a95d52018-09-20 10:30:07 -0700498 bool is_2m_pages)
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400499{
Xavier Deguillardef0f8f12015-06-12 11:43:22 -0700500 int locked, hv_status;
Xavier Deguillardf220a802015-08-06 15:17:58 -0700501 struct page *page = b->page;
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700502 struct vmballoon_page_size *page_size = &b->page_sizes[false];
503
504 /* is_2m_pages can never happen as 2m pages support implies batching */
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400505
Nadav Amit4c9a7d62018-09-20 10:30:09 -0700506 locked = vmballoon_send_lock_page(b, page_to_pfn(page), &hv_status,
507 true);
Nadav Amit10a95d52018-09-20 10:30:07 -0700508
Nadav Amit09755692018-06-19 16:00:24 -0700509 if (locked) {
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700510 STATS_INC(b->stats.refused_alloc[false]);
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400511
Nadav Amit09755692018-06-19 16:00:24 -0700512 if (locked == -EIO &&
513 (hv_status == VMW_BALLOON_ERROR_RESET ||
514 hv_status == VMW_BALLOON_ERROR_PPN_NOTNEEDED)) {
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700515 vmballoon_free_page(page, false);
Xavier Deguillardef0f8f12015-06-12 11:43:22 -0700516 return -EIO;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400517 }
518
Xavier Deguillardef0f8f12015-06-12 11:43:22 -0700519 /*
520 * Place page on the list of non-balloonable pages
521 * and retry allocation, unless we already accumulated
522 * too many of them, in which case take a breather.
523 */
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700524 if (page_size->n_refused_pages < VMW_BALLOON_MAX_REFUSED) {
525 page_size->n_refused_pages++;
526 list_add(&page->lru, &page_size->refused_pages);
Xavier Deguillardef0f8f12015-06-12 11:43:22 -0700527 } else {
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700528 vmballoon_free_page(page, false);
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400529 }
Nadav Amit09755692018-06-19 16:00:24 -0700530 return locked;
Xavier Deguillardef0f8f12015-06-12 11:43:22 -0700531 }
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400532
533 /* track allocated page */
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700534 list_add(&page->lru, &page_size->pages);
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400535
536 /* update balloon size */
537 b->size++;
538
539 return 0;
540}
541
Xavier Deguillardf220a802015-08-06 15:17:58 -0700542static int vmballoon_lock_batched_page(struct vmballoon *b,
Nadav Amit10a95d52018-09-20 10:30:07 -0700543 unsigned int num_pages, bool is_2m_pages)
Xavier Deguillardf220a802015-08-06 15:17:58 -0700544{
545 int locked, i;
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700546 u16 size_per_page = vmballoon_page_size(is_2m_pages);
Xavier Deguillardf220a802015-08-06 15:17:58 -0700547
Nadav Amit4c9a7d62018-09-20 10:30:09 -0700548 locked = vmballoon_send_batched_lock(b, num_pages, is_2m_pages, true);
Nadav Amit10a95d52018-09-20 10:30:07 -0700549
Xavier Deguillardf220a802015-08-06 15:17:58 -0700550 if (locked > 0) {
551 for (i = 0; i < num_pages; i++) {
Nadav Amit6c948752018-09-20 10:30:10 -0700552 struct page *p = pfn_to_page(b->batch_page[i].pfn);
Xavier Deguillardf220a802015-08-06 15:17:58 -0700553
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700554 vmballoon_free_page(p, is_2m_pages);
Xavier Deguillardf220a802015-08-06 15:17:58 -0700555 }
556
557 return -EIO;
558 }
559
560 for (i = 0; i < num_pages; i++) {
Nadav Amit6c948752018-09-20 10:30:10 -0700561 struct page *p = pfn_to_page(b->batch_page[i].pfn);
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700562 struct vmballoon_page_size *page_size =
563 &b->page_sizes[is_2m_pages];
Xavier Deguillardf220a802015-08-06 15:17:58 -0700564
Nadav Amit6c948752018-09-20 10:30:10 -0700565 locked = b->batch_page[i].status;
Xavier Deguillardf220a802015-08-06 15:17:58 -0700566
567 switch (locked) {
568 case VMW_BALLOON_SUCCESS:
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700569 list_add(&p->lru, &page_size->pages);
570 b->size += size_per_page;
Xavier Deguillardf220a802015-08-06 15:17:58 -0700571 break;
572 case VMW_BALLOON_ERROR_PPN_PINNED:
573 case VMW_BALLOON_ERROR_PPN_INVALID:
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700574 if (page_size->n_refused_pages
575 < VMW_BALLOON_MAX_REFUSED) {
576 list_add(&p->lru, &page_size->refused_pages);
577 page_size->n_refused_pages++;
Xavier Deguillardf220a802015-08-06 15:17:58 -0700578 break;
579 }
580 /* Fallthrough */
581 case VMW_BALLOON_ERROR_RESET:
582 case VMW_BALLOON_ERROR_PPN_NOTNEEDED:
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700583 vmballoon_free_page(p, is_2m_pages);
Xavier Deguillardf220a802015-08-06 15:17:58 -0700584 break;
585 default:
586 /* This should never happen */
587 WARN_ON_ONCE(true);
588 }
589 }
590
591 return 0;
592}
593
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400594/*
595 * Release the page allocated for the balloon. Note that we first notify
596 * the host so it can make sure the page will be available for the guest
597 * to use, if needed.
598 */
Xavier Deguillard4670de4d2015-08-06 15:17:59 -0700599static int vmballoon_unlock_page(struct vmballoon *b, unsigned int num_pages,
Nadav Amit10a95d52018-09-20 10:30:07 -0700600 bool is_2m_pages)
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400601{
Xavier Deguillardf220a802015-08-06 15:17:58 -0700602 struct page *page = b->page;
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700603 struct vmballoon_page_size *page_size = &b->page_sizes[false];
Nadav Amit4c9a7d62018-09-20 10:30:09 -0700604 unsigned int hv_status;
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700605
606 /* is_2m_pages can never happen as 2m pages support implies batching */
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400607
Nadav Amit4c9a7d62018-09-20 10:30:09 -0700608 if (!vmballoon_send_lock_page(b, page_to_pfn(page), &hv_status,
609 false)) {
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700610 list_add(&page->lru, &page_size->pages);
Xavier Deguillardf220a802015-08-06 15:17:58 -0700611 return -EIO;
612 }
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400613
614 /* deallocate page */
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700615 vmballoon_free_page(page, false);
616 STATS_INC(b->stats.free[false]);
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400617
618 /* update balloon size */
619 b->size--;
620
621 return 0;
622}
623
Xavier Deguillardf220a802015-08-06 15:17:58 -0700624static int vmballoon_unlock_batched_page(struct vmballoon *b,
Nadav Amit10a95d52018-09-20 10:30:07 -0700625 unsigned int num_pages, bool is_2m_pages)
Xavier Deguillardf220a802015-08-06 15:17:58 -0700626{
627 int locked, i, ret = 0;
628 bool hv_success;
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700629 u16 size_per_page = vmballoon_page_size(is_2m_pages);
Xavier Deguillardf220a802015-08-06 15:17:58 -0700630
Nadav Amit4c9a7d62018-09-20 10:30:09 -0700631 hv_success = vmballoon_send_batched_lock(b, num_pages, is_2m_pages,
632 false);
Nadav Amit10a95d52018-09-20 10:30:07 -0700633
Xavier Deguillardf220a802015-08-06 15:17:58 -0700634 if (!hv_success)
635 ret = -EIO;
636
637 for (i = 0; i < num_pages; i++) {
Nadav Amit6c948752018-09-20 10:30:10 -0700638 struct page *p = pfn_to_page(b->batch_page[i].pfn);
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700639 struct vmballoon_page_size *page_size =
640 &b->page_sizes[is_2m_pages];
Xavier Deguillardf220a802015-08-06 15:17:58 -0700641
Nadav Amit6c948752018-09-20 10:30:10 -0700642 locked = b->batch_page[i].status;
Xavier Deguillardf220a802015-08-06 15:17:58 -0700643 if (!hv_success || locked != VMW_BALLOON_SUCCESS) {
644 /*
645 * That page wasn't successfully unlocked by the
646 * hypervisor, re-add it to the list of pages owned by
647 * the balloon driver.
648 */
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700649 list_add(&p->lru, &page_size->pages);
Xavier Deguillardf220a802015-08-06 15:17:58 -0700650 } else {
651 /* deallocate page */
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700652 vmballoon_free_page(p, is_2m_pages);
653 STATS_INC(b->stats.free[is_2m_pages]);
Xavier Deguillardf220a802015-08-06 15:17:58 -0700654
655 /* update balloon size */
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700656 b->size -= size_per_page;
Xavier Deguillardf220a802015-08-06 15:17:58 -0700657 }
658 }
659
660 return ret;
661}
662
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400663/*
664 * Release pages that were allocated while attempting to inflate the
665 * balloon but were refused by the host for one reason or another.
666 */
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700667static void vmballoon_release_refused_pages(struct vmballoon *b,
668 bool is_2m_pages)
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400669{
670 struct page *page, *next;
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700671 struct vmballoon_page_size *page_size =
672 &b->page_sizes[is_2m_pages];
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400673
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700674 list_for_each_entry_safe(page, next, &page_size->refused_pages, lru) {
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400675 list_del(&page->lru);
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700676 vmballoon_free_page(page, is_2m_pages);
677 STATS_INC(b->stats.refused_free[is_2m_pages]);
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400678 }
Dmitry Torokhov55adaa42010-06-04 14:14:52 -0700679
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700680 page_size->n_refused_pages = 0;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400681}
682
Xavier Deguillardf220a802015-08-06 15:17:58 -0700683static void vmballoon_add_page(struct vmballoon *b, int idx, struct page *p)
684{
685 b->page = p;
686}
687
688static void vmballoon_add_batched_page(struct vmballoon *b, int idx,
689 struct page *p)
690{
Nadav Amit6c948752018-09-20 10:30:10 -0700691 b->batch_page[idx] = (struct vmballoon_batch_entry)
692 { .pfn = page_to_pfn(p) };
Xavier Deguillardf220a802015-08-06 15:17:58 -0700693}
694
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400695/*
696 * Inflate the balloon towards its target size. Note that we try to limit
697 * the rate of allocation to make sure we are not choking the rest of the
698 * system.
699 */
700static void vmballoon_inflate(struct vmballoon *b)
701{
Xavier Deguillardf220a802015-08-06 15:17:58 -0700702 unsigned int num_pages = 0;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400703 int error = 0;
Xavier Deguillardef0f8f12015-06-12 11:43:22 -0700704 gfp_t flags = VMW_PAGE_ALLOC_NOSLEEP;
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700705 bool is_2m_pages;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400706
707 pr_debug("%s - size: %d, target %d\n", __func__, b->size, b->target);
708
709 /*
710 * First try NOSLEEP page allocations to inflate balloon.
711 *
712 * If we do not throttle nosleep allocations, we can drain all
713 * free pages in the guest quickly (if the balloon target is high).
714 * As a side-effect, draining free pages helps to inform (force)
715 * the guest to start swapping if balloon target is not met yet,
716 * which is a desired behavior. However, balloon driver can consume
717 * all available CPU cycles if too many pages are allocated in a
718 * second. Therefore, we throttle nosleep allocations even when
719 * the guest is not under memory pressure. OTOH, if we have already
720 * predicted that the guest is under memory pressure, then we
721 * slowdown page allocations considerably.
722 */
723
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400724 /*
725 * Start with no sleep allocation rate which may be higher
726 * than sleeping allocation rate.
727 */
Nadav Amitec992cc2018-06-19 16:00:28 -0700728 is_2m_pages = b->supported_page_sizes == VMW_BALLOON_NUM_PAGE_SIZES;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400729
Nadav Amitec992cc2018-06-19 16:00:28 -0700730 pr_debug("%s - goal: %d", __func__, b->target - b->size);
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400731
Philip P. Moltmann33d268e2015-08-06 15:18:01 -0700732 while (!b->reset_required &&
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700733 b->size + num_pages * vmballoon_page_size(is_2m_pages)
734 < b->target) {
Xavier Deguillard4670de4d2015-08-06 15:17:59 -0700735 struct page *page;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400736
Xavier Deguillardef0f8f12015-06-12 11:43:22 -0700737 if (flags == VMW_PAGE_ALLOC_NOSLEEP)
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700738 STATS_INC(b->stats.alloc[is_2m_pages]);
Xavier Deguillardef0f8f12015-06-12 11:43:22 -0700739 else
740 STATS_INC(b->stats.sleep_alloc);
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400741
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700742 page = vmballoon_alloc_page(flags, is_2m_pages);
Xavier Deguillardef0f8f12015-06-12 11:43:22 -0700743 if (!page) {
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700744 STATS_INC(b->stats.alloc_fail[is_2m_pages]);
745
746 if (is_2m_pages) {
Nadav Amit10a95d52018-09-20 10:30:07 -0700747 b->ops->lock(b, num_pages, true);
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700748
749 /*
750 * ignore errors from locking as we now switch
751 * to 4k pages and we might get different
752 * errors.
753 */
754
755 num_pages = 0;
756 is_2m_pages = false;
757 continue;
758 }
759
Xavier Deguillardef0f8f12015-06-12 11:43:22 -0700760 if (flags == VMW_PAGE_ALLOC_CANSLEEP) {
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400761 /*
762 * CANSLEEP page allocation failed, so guest
Nadav Amitec992cc2018-06-19 16:00:28 -0700763 * is under severe memory pressure. We just log
764 * the event, but do not stop the inflation
765 * due to its negative impact on performance.
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400766 */
Xavier Deguillardef0f8f12015-06-12 11:43:22 -0700767 STATS_INC(b->stats.sleep_alloc_fail);
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400768 break;
769 }
770
771 /*
772 * NOSLEEP page allocation failed, so the guest is
Nadav Amitec992cc2018-06-19 16:00:28 -0700773 * under memory pressure. Slowing down page alloctions
774 * seems to be reasonable, but doing so might actually
775 * cause the hypervisor to throttle us down, resulting
776 * in degraded performance. We will count on the
777 * scheduler and standard memory management mechanisms
778 * for now.
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400779 */
Xavier Deguillardef0f8f12015-06-12 11:43:22 -0700780 flags = VMW_PAGE_ALLOC_CANSLEEP;
Xavier Deguillardef0f8f12015-06-12 11:43:22 -0700781 continue;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400782 }
783
Xavier Deguillardf220a802015-08-06 15:17:58 -0700784 b->ops->add_page(b, num_pages++, page);
785 if (num_pages == b->batch_max_pages) {
Nadav Amit10a95d52018-09-20 10:30:07 -0700786 error = b->ops->lock(b, num_pages, is_2m_pages);
787
Xavier Deguillardf220a802015-08-06 15:17:58 -0700788 num_pages = 0;
789 if (error)
790 break;
791 }
Xavier Deguillardef0f8f12015-06-12 11:43:22 -0700792
Philip P. Moltmann33d268e2015-08-06 15:18:01 -0700793 cond_resched();
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400794 }
795
Xavier Deguillardf220a802015-08-06 15:17:58 -0700796 if (num_pages > 0)
Nadav Amit10a95d52018-09-20 10:30:07 -0700797 b->ops->lock(b, num_pages, is_2m_pages);
Xavier Deguillardf220a802015-08-06 15:17:58 -0700798
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700799 vmballoon_release_refused_pages(b, true);
800 vmballoon_release_refused_pages(b, false);
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400801}
802
803/*
804 * Decrease the size of the balloon allowing guest to use more memory.
805 */
806static void vmballoon_deflate(struct vmballoon *b)
807{
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700808 unsigned is_2m_pages;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400809
Philip P. Moltmann33d268e2015-08-06 15:18:01 -0700810 pr_debug("%s - size: %d, target %d\n", __func__, b->size, b->target);
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400811
812 /* free pages to reach target */
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700813 for (is_2m_pages = 0; is_2m_pages < b->supported_page_sizes;
814 is_2m_pages++) {
815 struct page *page, *next;
816 unsigned int num_pages = 0;
817 struct vmballoon_page_size *page_size =
818 &b->page_sizes[is_2m_pages];
Xavier Deguillardf220a802015-08-06 15:17:58 -0700819
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700820 list_for_each_entry_safe(page, next, &page_size->pages, lru) {
821 if (b->reset_required ||
822 (b->target > 0 &&
823 b->size - num_pages
824 * vmballoon_page_size(is_2m_pages)
825 < b->target + vmballoon_page_size(true)))
826 break;
Philip P. Moltmann33d268e2015-08-06 15:18:01 -0700827
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700828 list_del(&page->lru);
829 b->ops->add_page(b, num_pages++, page);
830
831 if (num_pages == b->batch_max_pages) {
832 int error;
833
834 error = b->ops->unlock(b, num_pages,
Nadav Amit10a95d52018-09-20 10:30:07 -0700835 is_2m_pages);
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700836 num_pages = 0;
837 if (error)
838 return;
839 }
840
841 cond_resched();
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400842 }
843
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700844 if (num_pages > 0)
Nadav Amit10a95d52018-09-20 10:30:07 -0700845 b->ops->unlock(b, num_pages, is_2m_pages);
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400846 }
Xavier Deguillardf220a802015-08-06 15:17:58 -0700847}
848
849static const struct vmballoon_ops vmballoon_basic_ops = {
850 .add_page = vmballoon_add_page,
851 .lock = vmballoon_lock_page,
852 .unlock = vmballoon_unlock_page
853};
854
855static const struct vmballoon_ops vmballoon_batched_ops = {
856 .add_page = vmballoon_add_batched_page,
857 .lock = vmballoon_lock_batched_page,
858 .unlock = vmballoon_unlock_batched_page
859};
860
861static bool vmballoon_init_batching(struct vmballoon *b)
862{
Gil Kupferb23220f2018-06-01 00:47:47 -0700863 struct page *page;
864
865 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
866 if (!page)
Xavier Deguillardf220a802015-08-06 15:17:58 -0700867 return false;
868
Gil Kupferb23220f2018-06-01 00:47:47 -0700869 b->batch_page = page_address(page);
Xavier Deguillardf220a802015-08-06 15:17:58 -0700870 return true;
871}
872
873/*
Philip P. Moltmann48e3d662015-08-06 15:18:01 -0700874 * Receive notification and resize balloon
875 */
876static void vmballoon_doorbell(void *client_data)
877{
878 struct vmballoon *b = client_data;
879
880 STATS_INC(b->stats.doorbell);
881
882 mod_delayed_work(system_freezable_wq, &b->dwork, 0);
883}
884
885/*
886 * Clean up vmci doorbell
887 */
888static void vmballoon_vmci_cleanup(struct vmballoon *b)
889{
Nadav Amit10a95d52018-09-20 10:30:07 -0700890 vmballoon_cmd(b, VMW_BALLOON_CMD_VMCI_DOORBELL_SET,
891 VMCI_INVALID_ID, VMCI_INVALID_ID);
Philip P. Moltmann48e3d662015-08-06 15:18:01 -0700892
Philip P. Moltmann48e3d662015-08-06 15:18:01 -0700893 if (!vmci_handle_is_invalid(b->vmci_doorbell)) {
894 vmci_doorbell_destroy(b->vmci_doorbell);
895 b->vmci_doorbell = VMCI_INVALID_HANDLE;
896 }
897}
898
899/*
900 * Initialize vmci doorbell, to get notified as soon as balloon changes
901 */
902static int vmballoon_vmci_init(struct vmballoon *b)
903{
Nadav Amit10a95d52018-09-20 10:30:07 -0700904 unsigned long error;
Philip P. Moltmann48e3d662015-08-06 15:18:01 -0700905
Nadav Amitce664332018-06-19 16:00:26 -0700906 if ((b->capabilities & VMW_BALLOON_SIGNALLED_WAKEUP_CMD) == 0)
907 return 0;
Philip P. Moltmann48e3d662015-08-06 15:18:01 -0700908
Nadav Amitce664332018-06-19 16:00:26 -0700909 error = vmci_doorbell_create(&b->vmci_doorbell, VMCI_FLAG_DELAYED_CB,
910 VMCI_PRIVILEGE_FLAG_RESTRICTED,
911 vmballoon_doorbell, b);
Philip P. Moltmann48e3d662015-08-06 15:18:01 -0700912
Nadav Amitce664332018-06-19 16:00:26 -0700913 if (error != VMCI_SUCCESS)
914 goto fail;
Philip P. Moltmann48e3d662015-08-06 15:18:01 -0700915
Nadav Amit10a95d52018-09-20 10:30:07 -0700916 error = __vmballoon_cmd(b, VMW_BALLOON_CMD_VMCI_DOORBELL_SET,
917 b->vmci_doorbell.context,
918 b->vmci_doorbell.resource, NULL);
Nadav Amitce664332018-06-19 16:00:26 -0700919
Nadav Amitce664332018-06-19 16:00:26 -0700920 if (error != VMW_BALLOON_SUCCESS)
921 goto fail;
Philip P. Moltmann48e3d662015-08-06 15:18:01 -0700922
923 return 0;
Nadav Amitce664332018-06-19 16:00:26 -0700924fail:
925 vmballoon_vmci_cleanup(b);
926 return -EIO;
Philip P. Moltmann48e3d662015-08-06 15:18:01 -0700927}
928
929/*
Xavier Deguillardf220a802015-08-06 15:17:58 -0700930 * Perform standard reset sequence by popping the balloon (in case it
931 * is not empty) and then restarting protocol. This operation normally
932 * happens when host responds with VMW_BALLOON_ERROR_RESET to a command.
933 */
934static void vmballoon_reset(struct vmballoon *b)
935{
Philip P. Moltmann48e3d662015-08-06 15:18:01 -0700936 int error;
937
938 vmballoon_vmci_cleanup(b);
939
Xavier Deguillardf220a802015-08-06 15:17:58 -0700940 /* free all pages, skipping monitor unlock */
941 vmballoon_pop(b);
942
943 if (!vmballoon_send_start(b, VMW_BALLOON_CAPABILITIES))
944 return;
945
946 if ((b->capabilities & VMW_BALLOON_BATCHED_CMDS) != 0) {
947 b->ops = &vmballoon_batched_ops;
Nadav Amit6c948752018-09-20 10:30:10 -0700948 b->batch_max_pages = PAGE_SIZE / sizeof(struct
949 vmballoon_batch_entry);
Xavier Deguillardf220a802015-08-06 15:17:58 -0700950 if (!vmballoon_init_batching(b)) {
951 /*
952 * We failed to initialize batching, inform the monitor
953 * about it by sending a null capability.
954 *
955 * The guest will retry in one second.
956 */
957 vmballoon_send_start(b, 0);
958 return;
959 }
960 } else if ((b->capabilities & VMW_BALLOON_BASIC_CMDS) != 0) {
961 b->ops = &vmballoon_basic_ops;
962 b->batch_max_pages = 1;
963 }
964
965 b->reset_required = false;
Philip P. Moltmann48e3d662015-08-06 15:18:01 -0700966
967 error = vmballoon_vmci_init(b);
968 if (error)
969 pr_err("failed to initialize vmci doorbell\n");
970
Xavier Deguillardf220a802015-08-06 15:17:58 -0700971 if (!vmballoon_send_guest_id(b))
972 pr_err("failed to send guest ID to the host\n");
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400973}
974
975/*
976 * Balloon work function: reset protocol, if needed, get the new size and
977 * adjust balloon as needed. Repeat in 1 sec.
978 */
979static void vmballoon_work(struct work_struct *work)
980{
981 struct delayed_work *dwork = to_delayed_work(work);
982 struct vmballoon *b = container_of(dwork, struct vmballoon, dwork);
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400983
984 STATS_INC(b->stats.timer);
985
986 if (b->reset_required)
987 vmballoon_reset(b);
988
Nadav Amit10a95d52018-09-20 10:30:07 -0700989 if (!b->reset_required && vmballoon_send_get_target(b)) {
990 unsigned long target = b->target;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400991
Nadav Amit10a95d52018-09-20 10:30:07 -0700992 /* update target, adjust size */
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400993 if (b->size < target)
994 vmballoon_inflate(b);
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700995 else if (target == 0 ||
996 b->size > target + vmballoon_page_size(true))
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400997 vmballoon_deflate(b);
998 }
999
Dmitry Torokhovbeda94d2011-07-26 16:08:56 -07001000 /*
1001 * We are using a freezable workqueue so that balloon operations are
1002 * stopped while the system transitions to/from sleep/hibernation.
1003 */
1004 queue_delayed_work(system_freezable_wq,
1005 dwork, round_jiffies_relative(HZ));
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001006}
1007
1008/*
1009 * DEBUGFS Interface
1010 */
1011#ifdef CONFIG_DEBUG_FS
1012
1013static int vmballoon_debug_show(struct seq_file *f, void *offset)
1014{
1015 struct vmballoon *b = f->private;
1016 struct vmballoon_stats *stats = &b->stats;
Nadav Amit68131182018-09-20 10:30:08 -07001017 int i;
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001018
Philip P. Moltmannb36e89d2015-08-06 15:18:00 -07001019 /* format capabilities info */
1020 seq_printf(f,
1021 "balloon capabilities: %#4x\n"
Philip P. Moltmannd7568c12015-08-06 15:18:01 -07001022 "used capabilities: %#4lx\n"
1023 "is resetting: %c\n",
1024 VMW_BALLOON_CAPABILITIES, b->capabilities,
1025 b->reset_required ? 'y' : 'n');
Philip P. Moltmannb36e89d2015-08-06 15:18:00 -07001026
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001027 /* format size info */
1028 seq_printf(f,
1029 "target: %8d pages\n"
1030 "current: %8d pages\n",
1031 b->target, b->size);
1032
Nadav Amit68131182018-09-20 10:30:08 -07001033 for (i = 0; i < VMW_BALLOON_CMD_NUM; i++) {
1034 if (vmballoon_cmd_names[i] == NULL)
1035 continue;
1036
1037 seq_printf(f, "%-22s: %16lu (%lu failed)\n",
1038 vmballoon_cmd_names[i], stats->ops[i],
1039 stats->ops_fail[i]);
1040 }
1041
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001042 seq_printf(f,
1043 "\n"
1044 "timer: %8u\n"
Philip P. Moltmann48e3d662015-08-06 15:18:01 -07001045 "doorbell: %8u\n"
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -07001046 "prim2mAlloc: %8u (%4u failed)\n"
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001047 "primNoSleepAlloc: %8u (%4u failed)\n"
1048 "primCanSleepAlloc: %8u (%4u failed)\n"
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -07001049 "prim2mFree: %8u\n"
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001050 "primFree: %8u\n"
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -07001051 "err2mAlloc: %8u\n"
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001052 "errAlloc: %8u\n"
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -07001053 "err2mFree: %8u\n"
Nadav Amit68131182018-09-20 10:30:08 -07001054 "errFree: %8u\n",
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001055 stats->timer,
Philip P. Moltmann48e3d662015-08-06 15:18:01 -07001056 stats->doorbell,
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -07001057 stats->alloc[true], stats->alloc_fail[true],
1058 stats->alloc[false], stats->alloc_fail[false],
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001059 stats->sleep_alloc, stats->sleep_alloc_fail,
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -07001060 stats->free[true],
1061 stats->free[false],
1062 stats->refused_alloc[true], stats->refused_alloc[false],
Nadav Amit68131182018-09-20 10:30:08 -07001063 stats->refused_free[true], stats->refused_free[false]);
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001064
1065 return 0;
1066}
1067
1068static int vmballoon_debug_open(struct inode *inode, struct file *file)
1069{
1070 return single_open(file, vmballoon_debug_show, inode->i_private);
1071}
1072
1073static const struct file_operations vmballoon_debug_fops = {
1074 .owner = THIS_MODULE,
1075 .open = vmballoon_debug_open,
1076 .read = seq_read,
1077 .llseek = seq_lseek,
1078 .release = single_release,
1079};
1080
1081static int __init vmballoon_debugfs_init(struct vmballoon *b)
1082{
1083 int error;
1084
1085 b->dbg_entry = debugfs_create_file("vmmemctl", S_IRUGO, NULL, b,
1086 &vmballoon_debug_fops);
1087 if (IS_ERR(b->dbg_entry)) {
1088 error = PTR_ERR(b->dbg_entry);
1089 pr_err("failed to create debugfs entry, error: %d\n", error);
1090 return error;
1091 }
1092
1093 return 0;
1094}
1095
1096static void __exit vmballoon_debugfs_exit(struct vmballoon *b)
1097{
1098 debugfs_remove(b->dbg_entry);
1099}
1100
1101#else
1102
1103static inline int vmballoon_debugfs_init(struct vmballoon *b)
1104{
1105 return 0;
1106}
1107
1108static inline void vmballoon_debugfs_exit(struct vmballoon *b)
1109{
1110}
1111
1112#endif /* CONFIG_DEBUG_FS */
1113
1114static int __init vmballoon_init(void)
1115{
1116 int error;
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -07001117 unsigned is_2m_pages;
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001118 /*
1119 * Check if we are running on VMware's hypervisor and bail out
1120 * if we are not.
1121 */
Juergen Gross03b2a322017-11-09 14:27:36 +01001122 if (x86_hyper_type != X86_HYPER_VMWARE)
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001123 return -ENODEV;
1124
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -07001125 for (is_2m_pages = 0; is_2m_pages < VMW_BALLOON_NUM_PAGE_SIZES;
1126 is_2m_pages++) {
1127 INIT_LIST_HEAD(&balloon.page_sizes[is_2m_pages].pages);
1128 INIT_LIST_HEAD(&balloon.page_sizes[is_2m_pages].refused_pages);
1129 }
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001130
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001131 INIT_DELAYED_WORK(&balloon.dwork, vmballoon_work);
1132
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001133 error = vmballoon_debugfs_init(&balloon);
1134 if (error)
Dmitry Torokhovbeda94d2011-07-26 16:08:56 -07001135 return error;
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001136
Philip P. Moltmann48e3d662015-08-06 15:18:01 -07001137 balloon.vmci_doorbell = VMCI_INVALID_HANDLE;
Philip P. Moltmannd7568c12015-08-06 15:18:01 -07001138 balloon.batch_page = NULL;
1139 balloon.page = NULL;
1140 balloon.reset_required = true;
1141
Dmitry Torokhovbeda94d2011-07-26 16:08:56 -07001142 queue_delayed_work(system_freezable_wq, &balloon.dwork, 0);
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001143
1144 return 0;
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001145}
Nadav Amitc3cc1b02018-06-19 16:00:27 -07001146
1147/*
1148 * Using late_initcall() instead of module_init() allows the balloon to use the
1149 * VMCI doorbell even when the balloon is built into the kernel. Otherwise the
1150 * VMCI is probed only after the balloon is initialized. If the balloon is used
1151 * as a module, late_initcall() is equivalent to module_init().
1152 */
1153late_initcall(vmballoon_init);
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001154
1155static void __exit vmballoon_exit(void)
1156{
Philip P. Moltmann48e3d662015-08-06 15:18:01 -07001157 vmballoon_vmci_cleanup(&balloon);
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001158 cancel_delayed_work_sync(&balloon.dwork);
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001159
1160 vmballoon_debugfs_exit(&balloon);
1161
1162 /*
1163 * Deallocate all reserved memory, and reset connection with monitor.
1164 * Reset connection before deallocating memory to avoid potential for
1165 * additional spurious resets from guest touching deallocated pages.
1166 */
Philip P. Moltmannd7568c12015-08-06 15:18:01 -07001167 vmballoon_send_start(&balloon, 0);
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001168 vmballoon_pop(&balloon);
1169}
1170module_exit(vmballoon_exit);