blob: 9523089974998958c9956b46332eacc6c2e8e8d6 [file] [log] [blame]
Nadav Amit8b4770e2018-06-19 16:00:29 -07001// SPDX-License-Identifier: GPL-2.0
Dmitry Torokhov453dc652010-04-23 13:18:08 -04002/*
3 * VMware Balloon driver.
4 *
Nadav Amit8b4770e2018-06-19 16:00:29 -07005 * Copyright (C) 2000-2018, VMware, Inc. All Rights Reserved.
Dmitry Torokhov453dc652010-04-23 13:18:08 -04006 *
Dmitry Torokhov453dc652010-04-23 13:18:08 -04007 * This is VMware physical memory management driver for Linux. The driver
8 * acts like a "balloon" that can be inflated to reclaim physical pages by
9 * reserving them in the guest and invalidating them in the monitor,
10 * freeing up the underlying machine pages so they can be allocated to
11 * other guests. The balloon can also be deflated to allow the guest to
12 * use more physical memory. Higher level policies can control the sizes
13 * of balloons in VMs in order to manage physical memory resources.
14 */
15
16//#define DEBUG
17#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
19#include <linux/types.h>
20#include <linux/kernel.h>
21#include <linux/mm.h>
Xavier Deguillardf220a802015-08-06 15:17:58 -070022#include <linux/vmalloc.h>
Dmitry Torokhov453dc652010-04-23 13:18:08 -040023#include <linux/sched.h>
24#include <linux/module.h>
25#include <linux/workqueue.h>
26#include <linux/debugfs.h>
27#include <linux/seq_file.h>
Philip P. Moltmann48e3d662015-08-06 15:18:01 -070028#include <linux/vmw_vmci_defs.h>
29#include <linux/vmw_vmci_api.h>
H. Peter Anvina10a5692010-05-09 01:13:42 -070030#include <asm/hypervisor.h>
Dmitry Torokhov453dc652010-04-23 13:18:08 -040031
32MODULE_AUTHOR("VMware, Inc.");
33MODULE_DESCRIPTION("VMware Memory Control (Balloon) Driver");
Philip P. Moltmann48e3d662015-08-06 15:18:01 -070034MODULE_VERSION("1.5.0.0-k");
Dmitry Torokhov453dc652010-04-23 13:18:08 -040035MODULE_ALIAS("dmi:*:svnVMware*:*");
36MODULE_ALIAS("vmware_vmmemctl");
37MODULE_LICENSE("GPL");
38
39/*
Nadav Amit622074a2018-09-20 10:30:11 -070040 * Use __GFP_HIGHMEM to allow pages from HIGHMEM zone. We don't allow wait
41 * (__GFP_RECLAIM) for huge page allocations. Use __GFP_NOWARN, to suppress page
42 * allocation failure warnings. Disallow access to emergency low-memory pools.
Dmitry Torokhov453dc652010-04-23 13:18:08 -040043 */
Nadav Amit622074a2018-09-20 10:30:11 -070044#define VMW_HUGE_PAGE_ALLOC_FLAGS (__GFP_HIGHMEM|__GFP_NOWARN| \
45 __GFP_NOMEMALLOC)
Dmitry Torokhov453dc652010-04-23 13:18:08 -040046
47/*
Nadav Amit622074a2018-09-20 10:30:11 -070048 * Use __GFP_HIGHMEM to allow pages from HIGHMEM zone. We allow lightweight
49 * reclamation (__GFP_NORETRY). Use __GFP_NOWARN, to suppress page allocation
50 * failure warnings. Disallow access to emergency low-memory pools.
Dmitry Torokhov453dc652010-04-23 13:18:08 -040051 */
Nadav Amit622074a2018-09-20 10:30:11 -070052#define VMW_PAGE_ALLOC_FLAGS (__GFP_HIGHMEM|__GFP_NOWARN| \
53 __GFP_NOMEMALLOC|__GFP_NORETRY)
Dmitry Torokhov453dc652010-04-23 13:18:08 -040054
Dmitry Torokhov55adaa42010-06-04 14:14:52 -070055/* Maximum number of refused pages we accumulate during inflation cycle */
56#define VMW_BALLOON_MAX_REFUSED 16
Dmitry Torokhov453dc652010-04-23 13:18:08 -040057
58/*
59 * Hypervisor communication port definitions.
60 */
61#define VMW_BALLOON_HV_PORT 0x5670
62#define VMW_BALLOON_HV_MAGIC 0x456c6d6f
Dmitry Torokhov453dc652010-04-23 13:18:08 -040063#define VMW_BALLOON_GUEST_ID 1 /* Linux */
64
Xavier Deguillardeb791002015-06-12 11:43:23 -070065enum vmwballoon_capabilities {
66 /*
67 * Bit 0 is reserved and not associated to any capability.
68 */
Philip P. Moltmann48e3d662015-08-06 15:18:01 -070069 VMW_BALLOON_BASIC_CMDS = (1 << 1),
70 VMW_BALLOON_BATCHED_CMDS = (1 << 2),
71 VMW_BALLOON_BATCHED_2M_CMDS = (1 << 3),
72 VMW_BALLOON_SIGNALLED_WAKEUP_CMD = (1 << 4),
Xavier Deguillardeb791002015-06-12 11:43:23 -070073};
74
Xavier Deguillardf220a802015-08-06 15:17:58 -070075#define VMW_BALLOON_CAPABILITIES (VMW_BALLOON_BASIC_CMDS \
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -070076 | VMW_BALLOON_BATCHED_CMDS \
Philip P. Moltmann48e3d662015-08-06 15:18:01 -070077 | VMW_BALLOON_BATCHED_2M_CMDS \
78 | VMW_BALLOON_SIGNALLED_WAKEUP_CMD)
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -070079
80#define VMW_BALLOON_2M_SHIFT (9)
81#define VMW_BALLOON_NUM_PAGE_SIZES (2)
Xavier Deguillardeb791002015-06-12 11:43:23 -070082
Xavier Deguillardf220a802015-08-06 15:17:58 -070083/*
84 * Backdoor commands availability:
85 *
86 * START, GET_TARGET and GUEST_ID are always available,
87 *
88 * VMW_BALLOON_BASIC_CMDS:
89 * LOCK and UNLOCK commands,
90 * VMW_BALLOON_BATCHED_CMDS:
91 * BATCHED_LOCK and BATCHED_UNLOCK commands.
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -070092 * VMW BALLOON_BATCHED_2M_CMDS:
Philip P. Moltmann48e3d662015-08-06 15:18:01 -070093 * BATCHED_2M_LOCK and BATCHED_2M_UNLOCK commands,
94 * VMW VMW_BALLOON_SIGNALLED_WAKEUP_CMD:
95 * VMW_BALLOON_CMD_VMCI_DOORBELL_SET command.
Xavier Deguillardf220a802015-08-06 15:17:58 -070096 */
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -070097#define VMW_BALLOON_CMD_START 0
98#define VMW_BALLOON_CMD_GET_TARGET 1
99#define VMW_BALLOON_CMD_LOCK 2
100#define VMW_BALLOON_CMD_UNLOCK 3
101#define VMW_BALLOON_CMD_GUEST_ID 4
102#define VMW_BALLOON_CMD_BATCHED_LOCK 6
103#define VMW_BALLOON_CMD_BATCHED_UNLOCK 7
104#define VMW_BALLOON_CMD_BATCHED_2M_LOCK 8
105#define VMW_BALLOON_CMD_BATCHED_2M_UNLOCK 9
Philip P. Moltmann48e3d662015-08-06 15:18:01 -0700106#define VMW_BALLOON_CMD_VMCI_DOORBELL_SET 10
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700107
Nadav Amit68131182018-09-20 10:30:08 -0700108#define VMW_BALLOON_CMD_NUM 11
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400109
110/* error codes */
Xavier Deguillardeb791002015-06-12 11:43:23 -0700111#define VMW_BALLOON_SUCCESS 0
112#define VMW_BALLOON_FAILURE -1
113#define VMW_BALLOON_ERROR_CMD_INVALID 1
114#define VMW_BALLOON_ERROR_PPN_INVALID 2
115#define VMW_BALLOON_ERROR_PPN_LOCKED 3
116#define VMW_BALLOON_ERROR_PPN_UNLOCKED 4
117#define VMW_BALLOON_ERROR_PPN_PINNED 5
118#define VMW_BALLOON_ERROR_PPN_NOTNEEDED 6
119#define VMW_BALLOON_ERROR_RESET 7
120#define VMW_BALLOON_ERROR_BUSY 8
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400121
Xavier Deguillardeb791002015-06-12 11:43:23 -0700122#define VMW_BALLOON_SUCCESS_WITH_CAPABILITIES (0x03000000)
123
Nadav Amit10a95d52018-09-20 10:30:07 -0700124#define VMW_BALLOON_CMD_WITH_TARGET_MASK \
125 ((1UL << VMW_BALLOON_CMD_GET_TARGET) | \
126 (1UL << VMW_BALLOON_CMD_LOCK) | \
127 (1UL << VMW_BALLOON_CMD_UNLOCK) | \
128 (1UL << VMW_BALLOON_CMD_BATCHED_LOCK) | \
129 (1UL << VMW_BALLOON_CMD_BATCHED_UNLOCK) | \
130 (1UL << VMW_BALLOON_CMD_BATCHED_2M_LOCK) | \
131 (1UL << VMW_BALLOON_CMD_BATCHED_2M_UNLOCK))
132
Nadav Amit68131182018-09-20 10:30:08 -0700133static const char * const vmballoon_cmd_names[] = {
134 [VMW_BALLOON_CMD_START] = "start",
135 [VMW_BALLOON_CMD_GET_TARGET] = "target",
136 [VMW_BALLOON_CMD_LOCK] = "lock",
137 [VMW_BALLOON_CMD_UNLOCK] = "unlock",
138 [VMW_BALLOON_CMD_GUEST_ID] = "guestType",
139 [VMW_BALLOON_CMD_BATCHED_LOCK] = "batchLock",
140 [VMW_BALLOON_CMD_BATCHED_UNLOCK] = "batchUnlock",
141 [VMW_BALLOON_CMD_BATCHED_2M_LOCK] = "2m-lock",
142 [VMW_BALLOON_CMD_BATCHED_2M_UNLOCK] = "2m-unlock",
143 [VMW_BALLOON_CMD_VMCI_DOORBELL_SET] = "doorbellSet"
144};
145
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400146#ifdef CONFIG_DEBUG_FS
147struct vmballoon_stats {
148 unsigned int timer;
Philip P. Moltmann48e3d662015-08-06 15:18:01 -0700149 unsigned int doorbell;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400150
Rakib Mullick2ca02df2011-11-02 13:40:07 -0700151 /* allocation statistics */
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700152 unsigned int alloc[VMW_BALLOON_NUM_PAGE_SIZES];
153 unsigned int alloc_fail[VMW_BALLOON_NUM_PAGE_SIZES];
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700154 unsigned int refused_alloc[VMW_BALLOON_NUM_PAGE_SIZES];
155 unsigned int refused_free[VMW_BALLOON_NUM_PAGE_SIZES];
156 unsigned int free[VMW_BALLOON_NUM_PAGE_SIZES];
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400157
Nadav Amit68131182018-09-20 10:30:08 -0700158 /* Monitor operations. */
159 unsigned long ops[VMW_BALLOON_CMD_NUM];
160 unsigned long ops_fail[VMW_BALLOON_CMD_NUM];
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400161};
162
163#define STATS_INC(stat) (stat)++
164#else
165#define STATS_INC(stat)
166#endif
167
Xavier Deguillardf220a802015-08-06 15:17:58 -0700168struct vmballoon;
169
170struct vmballoon_ops {
171 void (*add_page)(struct vmballoon *b, int idx, struct page *p);
Xavier Deguillard4670de4d2015-08-06 15:17:59 -0700172 int (*lock)(struct vmballoon *b, unsigned int num_pages,
Nadav Amit10a95d52018-09-20 10:30:07 -0700173 bool is_2m_pages);
Xavier Deguillard4670de4d2015-08-06 15:17:59 -0700174 int (*unlock)(struct vmballoon *b, unsigned int num_pages,
Nadav Amit10a95d52018-09-20 10:30:07 -0700175 bool is_2m_pages);
Xavier Deguillardf220a802015-08-06 15:17:58 -0700176};
177
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700178struct vmballoon_page_size {
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400179 /* list of reserved physical pages */
180 struct list_head pages;
181
182 /* transient list of non-balloonable pages */
183 struct list_head refused_pages;
Dmitry Torokhov55adaa42010-06-04 14:14:52 -0700184 unsigned int n_refused_pages;
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700185};
186
Nadav Amit6c948752018-09-20 10:30:10 -0700187/**
188 * struct vmballoon_batch_entry - a batch entry for lock or unlock.
189 *
190 * @status: the status of the operation, which is written by the hypervisor.
191 * @reserved: reserved for future use. Must be set to zero.
192 * @pfn: the physical frame number of the page to be locked or unlocked.
193 */
194struct vmballoon_batch_entry {
195 u64 status : 5;
196 u64 reserved : PAGE_SHIFT - 5;
197 u64 pfn : 52;
198} __packed;
199
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700200struct vmballoon {
201 struct vmballoon_page_size page_sizes[VMW_BALLOON_NUM_PAGE_SIZES];
202
203 /* supported page sizes. 1 == 4k pages only, 2 == 4k and 2m pages */
204 unsigned supported_page_sizes;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400205
206 /* balloon size in pages */
207 unsigned int size;
208 unsigned int target;
209
210 /* reset flag */
211 bool reset_required;
212
Xavier Deguillardf220a802015-08-06 15:17:58 -0700213 unsigned long capabilities;
214
Nadav Amit6c948752018-09-20 10:30:10 -0700215 /**
216 * @batch_page: pointer to communication batch page.
217 *
218 * When batching is used, batch_page points to a page, which holds up to
219 * %VMW_BALLOON_BATCH_MAX_PAGES entries for locking or unlocking.
220 */
221 struct vmballoon_batch_entry *batch_page;
222
Xavier Deguillardf220a802015-08-06 15:17:58 -0700223 unsigned int batch_max_pages;
224 struct page *page;
225
226 const struct vmballoon_ops *ops;
227
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400228#ifdef CONFIG_DEBUG_FS
229 /* statistics */
230 struct vmballoon_stats stats;
231
232 /* debugfs file exporting statistics */
233 struct dentry *dbg_entry;
234#endif
235
236 struct sysinfo sysinfo;
237
238 struct delayed_work dwork;
Philip P. Moltmann48e3d662015-08-06 15:18:01 -0700239
240 struct vmci_handle vmci_doorbell;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400241};
242
243static struct vmballoon balloon;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400244
Nadav Amit10a95d52018-09-20 10:30:07 -0700245static inline unsigned long
246__vmballoon_cmd(struct vmballoon *b, unsigned long cmd, unsigned long arg1,
247 unsigned long arg2, unsigned long *result)
248{
249 unsigned long status, dummy1, dummy2, dummy3, local_result;
250
Nadav Amit68131182018-09-20 10:30:08 -0700251 STATS_INC(b->stats.ops[cmd]);
252
Nadav Amit10a95d52018-09-20 10:30:07 -0700253 asm volatile ("inl %%dx" :
254 "=a"(status),
255 "=c"(dummy1),
256 "=d"(dummy2),
257 "=b"(local_result),
258 "=S"(dummy3) :
259 "0"(VMW_BALLOON_HV_MAGIC),
260 "1"(cmd),
261 "2"(VMW_BALLOON_HV_PORT),
262 "3"(arg1),
263 "4"(arg2) :
264 "memory");
265
266 /* update the result if needed */
267 if (result)
268 *result = (cmd == VMW_BALLOON_CMD_START) ? dummy1 :
269 local_result;
270
271 /* update target when applicable */
272 if (status == VMW_BALLOON_SUCCESS &&
273 ((1ul << cmd) & VMW_BALLOON_CMD_WITH_TARGET_MASK))
274 b->target = local_result;
275
Nadav Amit68131182018-09-20 10:30:08 -0700276 if (status != VMW_BALLOON_SUCCESS &&
277 status != VMW_BALLOON_SUCCESS_WITH_CAPABILITIES) {
278 STATS_INC(b->stats.ops_fail[cmd]);
279 pr_debug("%s: %s [0x%lx,0x%lx) failed, returned %ld\n",
280 __func__, vmballoon_cmd_names[cmd], arg1, arg2,
281 status);
282 }
283
Nadav Amit10a95d52018-09-20 10:30:07 -0700284 /* mark reset required accordingly */
285 if (status == VMW_BALLOON_ERROR_RESET)
286 b->reset_required = true;
287
288 return status;
289}
290
291static __always_inline unsigned long
292vmballoon_cmd(struct vmballoon *b, unsigned long cmd, unsigned long arg1,
293 unsigned long arg2)
294{
295 unsigned long dummy;
296
297 return __vmballoon_cmd(b, cmd, arg1, arg2, &dummy);
298}
299
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400300/*
301 * Send "start" command to the host, communicating supported version
302 * of the protocol.
303 */
Xavier Deguillardf220a802015-08-06 15:17:58 -0700304static bool vmballoon_send_start(struct vmballoon *b, unsigned long req_caps)
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400305{
Nadav Amit10a95d52018-09-20 10:30:07 -0700306 unsigned long status, capabilities;
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700307 bool success;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400308
Nadav Amit10a95d52018-09-20 10:30:07 -0700309 status = __vmballoon_cmd(b, VMW_BALLOON_CMD_START, req_caps, 0,
310 &capabilities);
Xavier Deguillardf220a802015-08-06 15:17:58 -0700311
312 switch (status) {
313 case VMW_BALLOON_SUCCESS_WITH_CAPABILITIES:
314 b->capabilities = capabilities;
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700315 success = true;
316 break;
Xavier Deguillardf220a802015-08-06 15:17:58 -0700317 case VMW_BALLOON_SUCCESS:
318 b->capabilities = VMW_BALLOON_BASIC_CMDS;
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700319 success = true;
320 break;
321 default:
322 success = false;
Xavier Deguillardf220a802015-08-06 15:17:58 -0700323 }
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400324
Nadav Amit5081efd2018-06-19 16:00:25 -0700325 /*
326 * 2MB pages are only supported with batching. If batching is for some
327 * reason disabled, do not use 2MB pages, since otherwise the legacy
328 * mechanism is used with 2MB pages, causing a failure.
329 */
330 if ((b->capabilities & VMW_BALLOON_BATCHED_2M_CMDS) &&
331 (b->capabilities & VMW_BALLOON_BATCHED_CMDS))
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700332 b->supported_page_sizes = 2;
333 else
334 b->supported_page_sizes = 1;
335
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700336 return success;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400337}
338
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400339/*
340 * Communicate guest type to the host so that it can adjust ballooning
341 * algorithm to the one most appropriate for the guest. This command
342 * is normally issued after sending "start" command and is part of
343 * standard reset sequence.
344 */
345static bool vmballoon_send_guest_id(struct vmballoon *b)
346{
Nadav Amit10a95d52018-09-20 10:30:07 -0700347 unsigned long status;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400348
Nadav Amit10a95d52018-09-20 10:30:07 -0700349 status = vmballoon_cmd(b, VMW_BALLOON_CMD_GUEST_ID,
350 VMW_BALLOON_GUEST_ID, 0);
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400351
Nadav Amit10a95d52018-09-20 10:30:07 -0700352 if (status == VMW_BALLOON_SUCCESS)
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400353 return true;
354
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400355 return false;
356}
357
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700358static u16 vmballoon_page_size(bool is_2m_page)
359{
360 if (is_2m_page)
361 return 1 << VMW_BALLOON_2M_SHIFT;
362
363 return 1;
364}
365
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400366/*
367 * Retrieve desired balloon size from the host.
368 */
Nadav Amit10a95d52018-09-20 10:30:07 -0700369static bool vmballoon_send_get_target(struct vmballoon *b)
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400370{
371 unsigned long status;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400372 unsigned long limit;
373 u32 limit32;
374
375 /*
376 * si_meminfo() is cheap. Moreover, we want to provide dynamic
377 * max balloon size later. So let us call si_meminfo() every
378 * iteration.
379 */
380 si_meminfo(&b->sysinfo);
381 limit = b->sysinfo.totalram;
382
383 /* Ensure limit fits in 32-bits */
384 limit32 = (u32)limit;
385 if (limit != limit32)
386 return false;
387
Nadav Amit10a95d52018-09-20 10:30:07 -0700388 status = vmballoon_cmd(b, VMW_BALLOON_CMD_GET_TARGET, limit, 0);
389
390 if (status == VMW_BALLOON_SUCCESS)
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400391 return true;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400392
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400393 return false;
394}
395
396/*
397 * Notify the host about allocated page so that host can use it without
398 * fear that guest will need it. Host may reject some pages, we need to
399 * check the return value and maybe submit a different page.
400 */
Danny Kukawka3e5ba462012-01-30 23:00:08 +0100401static int vmballoon_send_lock_page(struct vmballoon *b, unsigned long pfn,
Nadav Amit4c9a7d62018-09-20 10:30:09 -0700402 unsigned int *hv_status, bool lock)
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400403{
Nadav Amit4c9a7d62018-09-20 10:30:09 -0700404 unsigned long status, cmd;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400405 u32 pfn32;
406
407 pfn32 = (u32)pfn;
408 if (pfn32 != pfn)
Nadav Amit09755692018-06-19 16:00:24 -0700409 return -EINVAL;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400410
Nadav Amit4c9a7d62018-09-20 10:30:09 -0700411 cmd = lock ? VMW_BALLOON_CMD_LOCK : VMW_BALLOON_CMD_UNLOCK;
412
413 *hv_status = status = vmballoon_cmd(b, cmd, pfn, 0);
Nadav Amit10a95d52018-09-20 10:30:07 -0700414
415 if (status == VMW_BALLOON_SUCCESS)
Danny Kukawka3e5ba462012-01-30 23:00:08 +0100416 return 0;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400417
Nadav Amit09755692018-06-19 16:00:24 -0700418 return -EIO;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400419}
420
Xavier Deguillardf220a802015-08-06 15:17:58 -0700421static int vmballoon_send_batched_lock(struct vmballoon *b,
Nadav Amit4c9a7d62018-09-20 10:30:09 -0700422 unsigned int num_pages, bool is_2m_pages,
423 bool lock)
Xavier Deguillardf220a802015-08-06 15:17:58 -0700424{
Nadav Amit90d72ce2018-07-02 19:27:13 -0700425 unsigned long pfn = PHYS_PFN(virt_to_phys(b->batch_page));
Nadav Amit10a95d52018-09-20 10:30:07 -0700426 unsigned long status, cmd;
Xavier Deguillardf220a802015-08-06 15:17:58 -0700427
Nadav Amit4c9a7d62018-09-20 10:30:09 -0700428 if (lock)
429 cmd = is_2m_pages ? VMW_BALLOON_CMD_BATCHED_2M_LOCK :
430 VMW_BALLOON_CMD_BATCHED_LOCK;
431 else
432 cmd = is_2m_pages ? VMW_BALLOON_CMD_BATCHED_2M_UNLOCK :
433 VMW_BALLOON_CMD_BATCHED_UNLOCK;
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700434
Nadav Amit10a95d52018-09-20 10:30:07 -0700435 status = vmballoon_cmd(b, cmd, pfn, num_pages);
436
437 if (status == VMW_BALLOON_SUCCESS)
Xavier Deguillardf220a802015-08-06 15:17:58 -0700438 return 0;
439
Xavier Deguillardf220a802015-08-06 15:17:58 -0700440 return 1;
441}
442
Nadav Amit622074a2018-09-20 10:30:11 -0700443static struct page *vmballoon_alloc_page(bool is_2m_page)
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700444{
445 if (is_2m_page)
Nadav Amit622074a2018-09-20 10:30:11 -0700446 return alloc_pages(VMW_HUGE_PAGE_ALLOC_FLAGS,
447 VMW_BALLOON_2M_SHIFT);
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700448
Nadav Amit622074a2018-09-20 10:30:11 -0700449 return alloc_page(VMW_PAGE_ALLOC_FLAGS);
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700450}
451
452static void vmballoon_free_page(struct page *page, bool is_2m_page)
453{
454 if (is_2m_page)
455 __free_pages(page, VMW_BALLOON_2M_SHIFT);
456 else
457 __free_page(page);
458}
459
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400460/*
461 * Quickly release all pages allocated for the balloon. This function is
462 * called when host decides to "reset" balloon for one reason or another.
463 * Unlike normal "deflate" we do not (shall not) notify host of the pages
464 * being released.
465 */
466static void vmballoon_pop(struct vmballoon *b)
467{
468 struct page *page, *next;
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700469 unsigned is_2m_pages;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400470
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700471 for (is_2m_pages = 0; is_2m_pages < VMW_BALLOON_NUM_PAGE_SIZES;
472 is_2m_pages++) {
473 struct vmballoon_page_size *page_size =
474 &b->page_sizes[is_2m_pages];
475 u16 size_per_page = vmballoon_page_size(is_2m_pages);
476
477 list_for_each_entry_safe(page, next, &page_size->pages, lru) {
478 list_del(&page->lru);
479 vmballoon_free_page(page, is_2m_pages);
480 STATS_INC(b->stats.free[is_2m_pages]);
481 b->size -= size_per_page;
482 cond_resched();
483 }
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400484 }
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400485
Gil Kupferb23220f2018-06-01 00:47:47 -0700486 /* Clearing the batch_page unconditionally has no adverse effect */
487 free_page((unsigned long)b->batch_page);
488 b->batch_page = NULL;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400489}
490
491/*
Xavier Deguillardef0f8f12015-06-12 11:43:22 -0700492 * Notify the host of a ballooned page. If host rejects the page put it on the
493 * refuse list, those refused page are then released at the end of the
494 * inflation cycle.
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400495 */
Xavier Deguillard4670de4d2015-08-06 15:17:59 -0700496static int vmballoon_lock_page(struct vmballoon *b, unsigned int num_pages,
Nadav Amit10a95d52018-09-20 10:30:07 -0700497 bool is_2m_pages)
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400498{
Xavier Deguillardef0f8f12015-06-12 11:43:22 -0700499 int locked, hv_status;
Xavier Deguillardf220a802015-08-06 15:17:58 -0700500 struct page *page = b->page;
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700501 struct vmballoon_page_size *page_size = &b->page_sizes[false];
502
503 /* is_2m_pages can never happen as 2m pages support implies batching */
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400504
Nadav Amit4c9a7d62018-09-20 10:30:09 -0700505 locked = vmballoon_send_lock_page(b, page_to_pfn(page), &hv_status,
506 true);
Nadav Amit10a95d52018-09-20 10:30:07 -0700507
Nadav Amit09755692018-06-19 16:00:24 -0700508 if (locked) {
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700509 STATS_INC(b->stats.refused_alloc[false]);
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400510
Nadav Amit09755692018-06-19 16:00:24 -0700511 if (locked == -EIO &&
512 (hv_status == VMW_BALLOON_ERROR_RESET ||
513 hv_status == VMW_BALLOON_ERROR_PPN_NOTNEEDED)) {
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700514 vmballoon_free_page(page, false);
Xavier Deguillardef0f8f12015-06-12 11:43:22 -0700515 return -EIO;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400516 }
517
Xavier Deguillardef0f8f12015-06-12 11:43:22 -0700518 /*
519 * Place page on the list of non-balloonable pages
520 * and retry allocation, unless we already accumulated
521 * too many of them, in which case take a breather.
522 */
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700523 if (page_size->n_refused_pages < VMW_BALLOON_MAX_REFUSED) {
524 page_size->n_refused_pages++;
525 list_add(&page->lru, &page_size->refused_pages);
Xavier Deguillardef0f8f12015-06-12 11:43:22 -0700526 } else {
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700527 vmballoon_free_page(page, false);
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400528 }
Nadav Amit09755692018-06-19 16:00:24 -0700529 return locked;
Xavier Deguillardef0f8f12015-06-12 11:43:22 -0700530 }
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400531
532 /* track allocated page */
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700533 list_add(&page->lru, &page_size->pages);
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400534
535 /* update balloon size */
536 b->size++;
537
538 return 0;
539}
540
Xavier Deguillardf220a802015-08-06 15:17:58 -0700541static int vmballoon_lock_batched_page(struct vmballoon *b,
Nadav Amit10a95d52018-09-20 10:30:07 -0700542 unsigned int num_pages, bool is_2m_pages)
Xavier Deguillardf220a802015-08-06 15:17:58 -0700543{
544 int locked, i;
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700545 u16 size_per_page = vmballoon_page_size(is_2m_pages);
Xavier Deguillardf220a802015-08-06 15:17:58 -0700546
Nadav Amit4c9a7d62018-09-20 10:30:09 -0700547 locked = vmballoon_send_batched_lock(b, num_pages, is_2m_pages, true);
Nadav Amit10a95d52018-09-20 10:30:07 -0700548
Xavier Deguillardf220a802015-08-06 15:17:58 -0700549 if (locked > 0) {
550 for (i = 0; i < num_pages; i++) {
Nadav Amit6c948752018-09-20 10:30:10 -0700551 struct page *p = pfn_to_page(b->batch_page[i].pfn);
Xavier Deguillardf220a802015-08-06 15:17:58 -0700552
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700553 vmballoon_free_page(p, is_2m_pages);
Xavier Deguillardf220a802015-08-06 15:17:58 -0700554 }
555
556 return -EIO;
557 }
558
559 for (i = 0; i < num_pages; i++) {
Nadav Amit6c948752018-09-20 10:30:10 -0700560 struct page *p = pfn_to_page(b->batch_page[i].pfn);
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700561 struct vmballoon_page_size *page_size =
562 &b->page_sizes[is_2m_pages];
Xavier Deguillardf220a802015-08-06 15:17:58 -0700563
Nadav Amit6c948752018-09-20 10:30:10 -0700564 locked = b->batch_page[i].status;
Xavier Deguillardf220a802015-08-06 15:17:58 -0700565
566 switch (locked) {
567 case VMW_BALLOON_SUCCESS:
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700568 list_add(&p->lru, &page_size->pages);
569 b->size += size_per_page;
Xavier Deguillardf220a802015-08-06 15:17:58 -0700570 break;
571 case VMW_BALLOON_ERROR_PPN_PINNED:
572 case VMW_BALLOON_ERROR_PPN_INVALID:
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700573 if (page_size->n_refused_pages
574 < VMW_BALLOON_MAX_REFUSED) {
575 list_add(&p->lru, &page_size->refused_pages);
576 page_size->n_refused_pages++;
Xavier Deguillardf220a802015-08-06 15:17:58 -0700577 break;
578 }
579 /* Fallthrough */
580 case VMW_BALLOON_ERROR_RESET:
581 case VMW_BALLOON_ERROR_PPN_NOTNEEDED:
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700582 vmballoon_free_page(p, is_2m_pages);
Xavier Deguillardf220a802015-08-06 15:17:58 -0700583 break;
584 default:
585 /* This should never happen */
586 WARN_ON_ONCE(true);
587 }
588 }
589
590 return 0;
591}
592
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400593/*
594 * Release the page allocated for the balloon. Note that we first notify
595 * the host so it can make sure the page will be available for the guest
596 * to use, if needed.
597 */
Xavier Deguillard4670de4d2015-08-06 15:17:59 -0700598static int vmballoon_unlock_page(struct vmballoon *b, unsigned int num_pages,
Nadav Amit10a95d52018-09-20 10:30:07 -0700599 bool is_2m_pages)
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400600{
Xavier Deguillardf220a802015-08-06 15:17:58 -0700601 struct page *page = b->page;
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700602 struct vmballoon_page_size *page_size = &b->page_sizes[false];
Nadav Amit4c9a7d62018-09-20 10:30:09 -0700603 unsigned int hv_status;
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700604
605 /* is_2m_pages can never happen as 2m pages support implies batching */
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400606
Nadav Amit4c9a7d62018-09-20 10:30:09 -0700607 if (!vmballoon_send_lock_page(b, page_to_pfn(page), &hv_status,
608 false)) {
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700609 list_add(&page->lru, &page_size->pages);
Xavier Deguillardf220a802015-08-06 15:17:58 -0700610 return -EIO;
611 }
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400612
613 /* deallocate page */
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700614 vmballoon_free_page(page, false);
615 STATS_INC(b->stats.free[false]);
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400616
617 /* update balloon size */
618 b->size--;
619
620 return 0;
621}
622
Xavier Deguillardf220a802015-08-06 15:17:58 -0700623static int vmballoon_unlock_batched_page(struct vmballoon *b,
Nadav Amit10a95d52018-09-20 10:30:07 -0700624 unsigned int num_pages, bool is_2m_pages)
Xavier Deguillardf220a802015-08-06 15:17:58 -0700625{
626 int locked, i, ret = 0;
627 bool hv_success;
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700628 u16 size_per_page = vmballoon_page_size(is_2m_pages);
Xavier Deguillardf220a802015-08-06 15:17:58 -0700629
Nadav Amit4c9a7d62018-09-20 10:30:09 -0700630 hv_success = vmballoon_send_batched_lock(b, num_pages, is_2m_pages,
631 false);
Nadav Amit10a95d52018-09-20 10:30:07 -0700632
Xavier Deguillardf220a802015-08-06 15:17:58 -0700633 if (!hv_success)
634 ret = -EIO;
635
636 for (i = 0; i < num_pages; i++) {
Nadav Amit6c948752018-09-20 10:30:10 -0700637 struct page *p = pfn_to_page(b->batch_page[i].pfn);
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700638 struct vmballoon_page_size *page_size =
639 &b->page_sizes[is_2m_pages];
Xavier Deguillardf220a802015-08-06 15:17:58 -0700640
Nadav Amit6c948752018-09-20 10:30:10 -0700641 locked = b->batch_page[i].status;
Xavier Deguillardf220a802015-08-06 15:17:58 -0700642 if (!hv_success || locked != VMW_BALLOON_SUCCESS) {
643 /*
644 * That page wasn't successfully unlocked by the
645 * hypervisor, re-add it to the list of pages owned by
646 * the balloon driver.
647 */
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700648 list_add(&p->lru, &page_size->pages);
Xavier Deguillardf220a802015-08-06 15:17:58 -0700649 } else {
650 /* deallocate page */
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700651 vmballoon_free_page(p, is_2m_pages);
652 STATS_INC(b->stats.free[is_2m_pages]);
Xavier Deguillardf220a802015-08-06 15:17:58 -0700653
654 /* update balloon size */
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700655 b->size -= size_per_page;
Xavier Deguillardf220a802015-08-06 15:17:58 -0700656 }
657 }
658
659 return ret;
660}
661
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400662/*
663 * Release pages that were allocated while attempting to inflate the
664 * balloon but were refused by the host for one reason or another.
665 */
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700666static void vmballoon_release_refused_pages(struct vmballoon *b,
667 bool is_2m_pages)
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400668{
669 struct page *page, *next;
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700670 struct vmballoon_page_size *page_size =
671 &b->page_sizes[is_2m_pages];
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400672
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700673 list_for_each_entry_safe(page, next, &page_size->refused_pages, lru) {
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400674 list_del(&page->lru);
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700675 vmballoon_free_page(page, is_2m_pages);
676 STATS_INC(b->stats.refused_free[is_2m_pages]);
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400677 }
Dmitry Torokhov55adaa42010-06-04 14:14:52 -0700678
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700679 page_size->n_refused_pages = 0;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400680}
681
Xavier Deguillardf220a802015-08-06 15:17:58 -0700682static void vmballoon_add_page(struct vmballoon *b, int idx, struct page *p)
683{
684 b->page = p;
685}
686
687static void vmballoon_add_batched_page(struct vmballoon *b, int idx,
688 struct page *p)
689{
Nadav Amit6c948752018-09-20 10:30:10 -0700690 b->batch_page[idx] = (struct vmballoon_batch_entry)
691 { .pfn = page_to_pfn(p) };
Xavier Deguillardf220a802015-08-06 15:17:58 -0700692}
693
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400694/*
695 * Inflate the balloon towards its target size. Note that we try to limit
696 * the rate of allocation to make sure we are not choking the rest of the
697 * system.
698 */
699static void vmballoon_inflate(struct vmballoon *b)
700{
Xavier Deguillardf220a802015-08-06 15:17:58 -0700701 unsigned int num_pages = 0;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400702 int error = 0;
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700703 bool is_2m_pages;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400704
705 pr_debug("%s - size: %d, target %d\n", __func__, b->size, b->target);
706
707 /*
708 * First try NOSLEEP page allocations to inflate balloon.
709 *
710 * If we do not throttle nosleep allocations, we can drain all
711 * free pages in the guest quickly (if the balloon target is high).
712 * As a side-effect, draining free pages helps to inform (force)
713 * the guest to start swapping if balloon target is not met yet,
714 * which is a desired behavior. However, balloon driver can consume
715 * all available CPU cycles if too many pages are allocated in a
716 * second. Therefore, we throttle nosleep allocations even when
717 * the guest is not under memory pressure. OTOH, if we have already
718 * predicted that the guest is under memory pressure, then we
719 * slowdown page allocations considerably.
720 */
721
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400722 /*
723 * Start with no sleep allocation rate which may be higher
724 * than sleeping allocation rate.
725 */
Nadav Amitec992cc2018-06-19 16:00:28 -0700726 is_2m_pages = b->supported_page_sizes == VMW_BALLOON_NUM_PAGE_SIZES;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400727
Nadav Amitec992cc2018-06-19 16:00:28 -0700728 pr_debug("%s - goal: %d", __func__, b->target - b->size);
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400729
Philip P. Moltmann33d268e2015-08-06 15:18:01 -0700730 while (!b->reset_required &&
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700731 b->size + num_pages * vmballoon_page_size(is_2m_pages)
732 < b->target) {
Xavier Deguillard4670de4d2015-08-06 15:17:59 -0700733 struct page *page;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400734
Nadav Amit622074a2018-09-20 10:30:11 -0700735 STATS_INC(b->stats.alloc[is_2m_pages]);
736 page = vmballoon_alloc_page(is_2m_pages);
Xavier Deguillardef0f8f12015-06-12 11:43:22 -0700737 if (!page) {
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700738 STATS_INC(b->stats.alloc_fail[is_2m_pages]);
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700739 if (is_2m_pages) {
Nadav Amit10a95d52018-09-20 10:30:07 -0700740 b->ops->lock(b, num_pages, true);
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700741
742 /*
743 * ignore errors from locking as we now switch
744 * to 4k pages and we might get different
745 * errors.
746 */
747
748 num_pages = 0;
749 is_2m_pages = false;
750 continue;
751 }
Nadav Amit622074a2018-09-20 10:30:11 -0700752 break;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400753 }
754
Xavier Deguillardf220a802015-08-06 15:17:58 -0700755 b->ops->add_page(b, num_pages++, page);
756 if (num_pages == b->batch_max_pages) {
Nadav Amit10a95d52018-09-20 10:30:07 -0700757 error = b->ops->lock(b, num_pages, is_2m_pages);
758
Xavier Deguillardf220a802015-08-06 15:17:58 -0700759 num_pages = 0;
760 if (error)
761 break;
762 }
Xavier Deguillardef0f8f12015-06-12 11:43:22 -0700763
Philip P. Moltmann33d268e2015-08-06 15:18:01 -0700764 cond_resched();
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400765 }
766
Xavier Deguillardf220a802015-08-06 15:17:58 -0700767 if (num_pages > 0)
Nadav Amit10a95d52018-09-20 10:30:07 -0700768 b->ops->lock(b, num_pages, is_2m_pages);
Xavier Deguillardf220a802015-08-06 15:17:58 -0700769
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700770 vmballoon_release_refused_pages(b, true);
771 vmballoon_release_refused_pages(b, false);
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400772}
773
774/*
775 * Decrease the size of the balloon allowing guest to use more memory.
776 */
777static void vmballoon_deflate(struct vmballoon *b)
778{
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700779 unsigned is_2m_pages;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400780
Philip P. Moltmann33d268e2015-08-06 15:18:01 -0700781 pr_debug("%s - size: %d, target %d\n", __func__, b->size, b->target);
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400782
783 /* free pages to reach target */
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700784 for (is_2m_pages = 0; is_2m_pages < b->supported_page_sizes;
785 is_2m_pages++) {
786 struct page *page, *next;
787 unsigned int num_pages = 0;
788 struct vmballoon_page_size *page_size =
789 &b->page_sizes[is_2m_pages];
Xavier Deguillardf220a802015-08-06 15:17:58 -0700790
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700791 list_for_each_entry_safe(page, next, &page_size->pages, lru) {
792 if (b->reset_required ||
793 (b->target > 0 &&
794 b->size - num_pages
795 * vmballoon_page_size(is_2m_pages)
796 < b->target + vmballoon_page_size(true)))
797 break;
Philip P. Moltmann33d268e2015-08-06 15:18:01 -0700798
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700799 list_del(&page->lru);
800 b->ops->add_page(b, num_pages++, page);
801
802 if (num_pages == b->batch_max_pages) {
803 int error;
804
805 error = b->ops->unlock(b, num_pages,
Nadav Amit10a95d52018-09-20 10:30:07 -0700806 is_2m_pages);
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700807 num_pages = 0;
808 if (error)
809 return;
810 }
811
812 cond_resched();
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400813 }
814
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700815 if (num_pages > 0)
Nadav Amit10a95d52018-09-20 10:30:07 -0700816 b->ops->unlock(b, num_pages, is_2m_pages);
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400817 }
Xavier Deguillardf220a802015-08-06 15:17:58 -0700818}
819
820static const struct vmballoon_ops vmballoon_basic_ops = {
821 .add_page = vmballoon_add_page,
822 .lock = vmballoon_lock_page,
823 .unlock = vmballoon_unlock_page
824};
825
826static const struct vmballoon_ops vmballoon_batched_ops = {
827 .add_page = vmballoon_add_batched_page,
828 .lock = vmballoon_lock_batched_page,
829 .unlock = vmballoon_unlock_batched_page
830};
831
832static bool vmballoon_init_batching(struct vmballoon *b)
833{
Gil Kupferb23220f2018-06-01 00:47:47 -0700834 struct page *page;
835
836 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
837 if (!page)
Xavier Deguillardf220a802015-08-06 15:17:58 -0700838 return false;
839
Gil Kupferb23220f2018-06-01 00:47:47 -0700840 b->batch_page = page_address(page);
Xavier Deguillardf220a802015-08-06 15:17:58 -0700841 return true;
842}
843
844/*
Philip P. Moltmann48e3d662015-08-06 15:18:01 -0700845 * Receive notification and resize balloon
846 */
847static void vmballoon_doorbell(void *client_data)
848{
849 struct vmballoon *b = client_data;
850
851 STATS_INC(b->stats.doorbell);
852
853 mod_delayed_work(system_freezable_wq, &b->dwork, 0);
854}
855
856/*
857 * Clean up vmci doorbell
858 */
859static void vmballoon_vmci_cleanup(struct vmballoon *b)
860{
Nadav Amit10a95d52018-09-20 10:30:07 -0700861 vmballoon_cmd(b, VMW_BALLOON_CMD_VMCI_DOORBELL_SET,
862 VMCI_INVALID_ID, VMCI_INVALID_ID);
Philip P. Moltmann48e3d662015-08-06 15:18:01 -0700863
Philip P. Moltmann48e3d662015-08-06 15:18:01 -0700864 if (!vmci_handle_is_invalid(b->vmci_doorbell)) {
865 vmci_doorbell_destroy(b->vmci_doorbell);
866 b->vmci_doorbell = VMCI_INVALID_HANDLE;
867 }
868}
869
870/*
871 * Initialize vmci doorbell, to get notified as soon as balloon changes
872 */
873static int vmballoon_vmci_init(struct vmballoon *b)
874{
Nadav Amit10a95d52018-09-20 10:30:07 -0700875 unsigned long error;
Philip P. Moltmann48e3d662015-08-06 15:18:01 -0700876
Nadav Amitce664332018-06-19 16:00:26 -0700877 if ((b->capabilities & VMW_BALLOON_SIGNALLED_WAKEUP_CMD) == 0)
878 return 0;
Philip P. Moltmann48e3d662015-08-06 15:18:01 -0700879
Nadav Amitce664332018-06-19 16:00:26 -0700880 error = vmci_doorbell_create(&b->vmci_doorbell, VMCI_FLAG_DELAYED_CB,
881 VMCI_PRIVILEGE_FLAG_RESTRICTED,
882 vmballoon_doorbell, b);
Philip P. Moltmann48e3d662015-08-06 15:18:01 -0700883
Nadav Amitce664332018-06-19 16:00:26 -0700884 if (error != VMCI_SUCCESS)
885 goto fail;
Philip P. Moltmann48e3d662015-08-06 15:18:01 -0700886
Nadav Amit10a95d52018-09-20 10:30:07 -0700887 error = __vmballoon_cmd(b, VMW_BALLOON_CMD_VMCI_DOORBELL_SET,
888 b->vmci_doorbell.context,
889 b->vmci_doorbell.resource, NULL);
Nadav Amitce664332018-06-19 16:00:26 -0700890
Nadav Amitce664332018-06-19 16:00:26 -0700891 if (error != VMW_BALLOON_SUCCESS)
892 goto fail;
Philip P. Moltmann48e3d662015-08-06 15:18:01 -0700893
894 return 0;
Nadav Amitce664332018-06-19 16:00:26 -0700895fail:
896 vmballoon_vmci_cleanup(b);
897 return -EIO;
Philip P. Moltmann48e3d662015-08-06 15:18:01 -0700898}
899
900/*
Xavier Deguillardf220a802015-08-06 15:17:58 -0700901 * Perform standard reset sequence by popping the balloon (in case it
902 * is not empty) and then restarting protocol. This operation normally
903 * happens when host responds with VMW_BALLOON_ERROR_RESET to a command.
904 */
905static void vmballoon_reset(struct vmballoon *b)
906{
Philip P. Moltmann48e3d662015-08-06 15:18:01 -0700907 int error;
908
909 vmballoon_vmci_cleanup(b);
910
Xavier Deguillardf220a802015-08-06 15:17:58 -0700911 /* free all pages, skipping monitor unlock */
912 vmballoon_pop(b);
913
914 if (!vmballoon_send_start(b, VMW_BALLOON_CAPABILITIES))
915 return;
916
917 if ((b->capabilities & VMW_BALLOON_BATCHED_CMDS) != 0) {
918 b->ops = &vmballoon_batched_ops;
Nadav Amit6c948752018-09-20 10:30:10 -0700919 b->batch_max_pages = PAGE_SIZE / sizeof(struct
920 vmballoon_batch_entry);
Xavier Deguillardf220a802015-08-06 15:17:58 -0700921 if (!vmballoon_init_batching(b)) {
922 /*
923 * We failed to initialize batching, inform the monitor
924 * about it by sending a null capability.
925 *
926 * The guest will retry in one second.
927 */
928 vmballoon_send_start(b, 0);
929 return;
930 }
931 } else if ((b->capabilities & VMW_BALLOON_BASIC_CMDS) != 0) {
932 b->ops = &vmballoon_basic_ops;
933 b->batch_max_pages = 1;
934 }
935
936 b->reset_required = false;
Philip P. Moltmann48e3d662015-08-06 15:18:01 -0700937
938 error = vmballoon_vmci_init(b);
939 if (error)
940 pr_err("failed to initialize vmci doorbell\n");
941
Xavier Deguillardf220a802015-08-06 15:17:58 -0700942 if (!vmballoon_send_guest_id(b))
943 pr_err("failed to send guest ID to the host\n");
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400944}
945
946/*
947 * Balloon work function: reset protocol, if needed, get the new size and
948 * adjust balloon as needed. Repeat in 1 sec.
949 */
950static void vmballoon_work(struct work_struct *work)
951{
952 struct delayed_work *dwork = to_delayed_work(work);
953 struct vmballoon *b = container_of(dwork, struct vmballoon, dwork);
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400954
955 STATS_INC(b->stats.timer);
956
957 if (b->reset_required)
958 vmballoon_reset(b);
959
Nadav Amit10a95d52018-09-20 10:30:07 -0700960 if (!b->reset_required && vmballoon_send_get_target(b)) {
961 unsigned long target = b->target;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400962
Nadav Amit10a95d52018-09-20 10:30:07 -0700963 /* update target, adjust size */
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400964 if (b->size < target)
965 vmballoon_inflate(b);
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -0700966 else if (target == 0 ||
967 b->size > target + vmballoon_page_size(true))
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400968 vmballoon_deflate(b);
969 }
970
Dmitry Torokhovbeda94d2011-07-26 16:08:56 -0700971 /*
972 * We are using a freezable workqueue so that balloon operations are
973 * stopped while the system transitions to/from sleep/hibernation.
974 */
975 queue_delayed_work(system_freezable_wq,
976 dwork, round_jiffies_relative(HZ));
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400977}
978
979/*
980 * DEBUGFS Interface
981 */
982#ifdef CONFIG_DEBUG_FS
983
984static int vmballoon_debug_show(struct seq_file *f, void *offset)
985{
986 struct vmballoon *b = f->private;
987 struct vmballoon_stats *stats = &b->stats;
Nadav Amit68131182018-09-20 10:30:08 -0700988 int i;
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400989
Philip P. Moltmannb36e89d2015-08-06 15:18:00 -0700990 /* format capabilities info */
991 seq_printf(f,
992 "balloon capabilities: %#4x\n"
Philip P. Moltmannd7568c12015-08-06 15:18:01 -0700993 "used capabilities: %#4lx\n"
994 "is resetting: %c\n",
995 VMW_BALLOON_CAPABILITIES, b->capabilities,
996 b->reset_required ? 'y' : 'n');
Philip P. Moltmannb36e89d2015-08-06 15:18:00 -0700997
Dmitry Torokhov453dc652010-04-23 13:18:08 -0400998 /* format size info */
999 seq_printf(f,
1000 "target: %8d pages\n"
1001 "current: %8d pages\n",
1002 b->target, b->size);
1003
Nadav Amit68131182018-09-20 10:30:08 -07001004 for (i = 0; i < VMW_BALLOON_CMD_NUM; i++) {
1005 if (vmballoon_cmd_names[i] == NULL)
1006 continue;
1007
1008 seq_printf(f, "%-22s: %16lu (%lu failed)\n",
1009 vmballoon_cmd_names[i], stats->ops[i],
1010 stats->ops_fail[i]);
1011 }
1012
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001013 seq_printf(f,
1014 "\n"
1015 "timer: %8u\n"
Philip P. Moltmann48e3d662015-08-06 15:18:01 -07001016 "doorbell: %8u\n"
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -07001017 "prim2mAlloc: %8u (%4u failed)\n"
Nadav Amit622074a2018-09-20 10:30:11 -07001018 "prim4kAlloc: %8u (%4u failed)\n"
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -07001019 "prim2mFree: %8u\n"
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001020 "primFree: %8u\n"
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -07001021 "err2mAlloc: %8u\n"
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001022 "errAlloc: %8u\n"
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -07001023 "err2mFree: %8u\n"
Nadav Amit68131182018-09-20 10:30:08 -07001024 "errFree: %8u\n",
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001025 stats->timer,
Philip P. Moltmann48e3d662015-08-06 15:18:01 -07001026 stats->doorbell,
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -07001027 stats->alloc[true], stats->alloc_fail[true],
1028 stats->alloc[false], stats->alloc_fail[false],
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -07001029 stats->free[true],
1030 stats->free[false],
1031 stats->refused_alloc[true], stats->refused_alloc[false],
Nadav Amit68131182018-09-20 10:30:08 -07001032 stats->refused_free[true], stats->refused_free[false]);
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001033
1034 return 0;
1035}
1036
1037static int vmballoon_debug_open(struct inode *inode, struct file *file)
1038{
1039 return single_open(file, vmballoon_debug_show, inode->i_private);
1040}
1041
1042static const struct file_operations vmballoon_debug_fops = {
1043 .owner = THIS_MODULE,
1044 .open = vmballoon_debug_open,
1045 .read = seq_read,
1046 .llseek = seq_lseek,
1047 .release = single_release,
1048};
1049
1050static int __init vmballoon_debugfs_init(struct vmballoon *b)
1051{
1052 int error;
1053
1054 b->dbg_entry = debugfs_create_file("vmmemctl", S_IRUGO, NULL, b,
1055 &vmballoon_debug_fops);
1056 if (IS_ERR(b->dbg_entry)) {
1057 error = PTR_ERR(b->dbg_entry);
1058 pr_err("failed to create debugfs entry, error: %d\n", error);
1059 return error;
1060 }
1061
1062 return 0;
1063}
1064
1065static void __exit vmballoon_debugfs_exit(struct vmballoon *b)
1066{
1067 debugfs_remove(b->dbg_entry);
1068}
1069
1070#else
1071
1072static inline int vmballoon_debugfs_init(struct vmballoon *b)
1073{
1074 return 0;
1075}
1076
1077static inline void vmballoon_debugfs_exit(struct vmballoon *b)
1078{
1079}
1080
1081#endif /* CONFIG_DEBUG_FS */
1082
1083static int __init vmballoon_init(void)
1084{
1085 int error;
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -07001086 unsigned is_2m_pages;
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001087 /*
1088 * Check if we are running on VMware's hypervisor and bail out
1089 * if we are not.
1090 */
Juergen Gross03b2a322017-11-09 14:27:36 +01001091 if (x86_hyper_type != X86_HYPER_VMWARE)
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001092 return -ENODEV;
1093
Philip P. Moltmann365bd7e2015-08-06 15:18:01 -07001094 for (is_2m_pages = 0; is_2m_pages < VMW_BALLOON_NUM_PAGE_SIZES;
1095 is_2m_pages++) {
1096 INIT_LIST_HEAD(&balloon.page_sizes[is_2m_pages].pages);
1097 INIT_LIST_HEAD(&balloon.page_sizes[is_2m_pages].refused_pages);
1098 }
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001099
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001100 INIT_DELAYED_WORK(&balloon.dwork, vmballoon_work);
1101
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001102 error = vmballoon_debugfs_init(&balloon);
1103 if (error)
Dmitry Torokhovbeda94d2011-07-26 16:08:56 -07001104 return error;
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001105
Philip P. Moltmann48e3d662015-08-06 15:18:01 -07001106 balloon.vmci_doorbell = VMCI_INVALID_HANDLE;
Philip P. Moltmannd7568c12015-08-06 15:18:01 -07001107 balloon.batch_page = NULL;
1108 balloon.page = NULL;
1109 balloon.reset_required = true;
1110
Dmitry Torokhovbeda94d2011-07-26 16:08:56 -07001111 queue_delayed_work(system_freezable_wq, &balloon.dwork, 0);
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001112
1113 return 0;
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001114}
Nadav Amitc3cc1b02018-06-19 16:00:27 -07001115
1116/*
1117 * Using late_initcall() instead of module_init() allows the balloon to use the
1118 * VMCI doorbell even when the balloon is built into the kernel. Otherwise the
1119 * VMCI is probed only after the balloon is initialized. If the balloon is used
1120 * as a module, late_initcall() is equivalent to module_init().
1121 */
1122late_initcall(vmballoon_init);
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001123
1124static void __exit vmballoon_exit(void)
1125{
Philip P. Moltmann48e3d662015-08-06 15:18:01 -07001126 vmballoon_vmci_cleanup(&balloon);
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001127 cancel_delayed_work_sync(&balloon.dwork);
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001128
1129 vmballoon_debugfs_exit(&balloon);
1130
1131 /*
1132 * Deallocate all reserved memory, and reset connection with monitor.
1133 * Reset connection before deallocating memory to avoid potential for
1134 * additional spurious resets from guest touching deallocated pages.
1135 */
Philip P. Moltmannd7568c12015-08-06 15:18:01 -07001136 vmballoon_send_start(&balloon, 0);
Dmitry Torokhov453dc652010-04-23 13:18:08 -04001137 vmballoon_pop(&balloon);
1138}
1139module_exit(vmballoon_exit);