blob: 0441185ac51009dd5b01c4e7a64e762f45d1e3da [file] [log] [blame]
Jens Axboe2b188cc2019-01-07 10:46:33 -07001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Shared application/kernel submission and completion ring pairs, for
4 * supporting fast/efficient IO.
5 *
6 * A note on the read/write ordering memory barriers that are matched between
Stefan Bühler1e84b972019-04-24 23:54:16 +02007 * the application and kernel side.
8 *
9 * After the application reads the CQ ring tail, it must use an
10 * appropriate smp_rmb() to pair with the smp_wmb() the kernel uses
11 * before writing the tail (using smp_load_acquire to read the tail will
12 * do). It also needs a smp_mb() before updating CQ head (ordering the
13 * entry load(s) with the head store), pairing with an implicit barrier
14 * through a control-dependency in io_get_cqring (smp_store_release to
15 * store head will do). Failure to do so could lead to reading invalid
16 * CQ entries.
17 *
18 * Likewise, the application must use an appropriate smp_wmb() before
19 * writing the SQ tail (ordering SQ entry stores with the tail store),
20 * which pairs with smp_load_acquire in io_get_sqring (smp_store_release
21 * to store the tail will do). And it needs a barrier ordering the SQ
22 * head load before writing new SQ entries (smp_load_acquire to read
23 * head will do).
24 *
25 * When using the SQ poll thread (IORING_SETUP_SQPOLL), the application
26 * needs to check the SQ flags for IORING_SQ_NEED_WAKEUP *after*
27 * updating the SQ tail; a full memory barrier smp_mb() is needed
28 * between.
Jens Axboe2b188cc2019-01-07 10:46:33 -070029 *
30 * Also see the examples in the liburing library:
31 *
32 * git://git.kernel.dk/liburing
33 *
34 * io_uring also uses READ/WRITE_ONCE() for _any_ store or load that happens
35 * from data shared between the kernel and application. This is done both
36 * for ordering purposes, but also to ensure that once a value is loaded from
37 * data that the application could potentially modify, it remains stable.
38 *
39 * Copyright (C) 2018-2019 Jens Axboe
Christoph Hellwigc992fe22019-01-11 09:43:02 -070040 * Copyright (c) 2018-2019 Christoph Hellwig
Jens Axboe2b188cc2019-01-07 10:46:33 -070041 */
42#include <linux/kernel.h>
43#include <linux/init.h>
44#include <linux/errno.h>
45#include <linux/syscalls.h>
46#include <linux/compat.h>
Jens Axboe52de1fe2020-02-27 10:15:42 -070047#include <net/compat.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070048#include <linux/refcount.h>
49#include <linux/uio.h>
Pavel Begunkov6b47ee62020-01-18 20:22:41 +030050#include <linux/bits.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070051
52#include <linux/sched/signal.h>
53#include <linux/fs.h>
54#include <linux/file.h>
55#include <linux/fdtable.h>
56#include <linux/mm.h>
57#include <linux/mman.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070058#include <linux/percpu.h>
59#include <linux/slab.h>
Jens Axboe6c271ce2019-01-10 11:22:30 -070060#include <linux/kthread.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070061#include <linux/blkdev.h>
Jens Axboeedafcce2019-01-09 09:16:05 -070062#include <linux/bvec.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070063#include <linux/net.h>
64#include <net/sock.h>
65#include <net/af_unix.h>
Jens Axboe6b063142019-01-10 22:13:58 -070066#include <net/scm.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070067#include <linux/anon_inodes.h>
68#include <linux/sched/mm.h>
69#include <linux/uaccess.h>
70#include <linux/nospec.h>
Jens Axboeedafcce2019-01-09 09:16:05 -070071#include <linux/sizes.h>
72#include <linux/hugetlb.h>
Jens Axboeaa4c3962019-11-29 10:14:00 -070073#include <linux/highmem.h>
Jens Axboe15b71ab2019-12-11 11:20:36 -070074#include <linux/namei.h>
75#include <linux/fsnotify.h>
Jens Axboe4840e412019-12-25 22:03:45 -070076#include <linux/fadvise.h>
Jens Axboe3e4827b2020-01-08 15:18:09 -070077#include <linux/eventpoll.h>
Jens Axboeff002b32020-02-07 16:05:21 -070078#include <linux/fs_struct.h>
Pavel Begunkov7d67af22020-02-24 11:32:45 +030079#include <linux/splice.h>
Jens Axboeb41e9852020-02-17 09:52:41 -070080#include <linux/task_work.h>
Jens Axboebcf5a062020-05-22 09:24:42 -060081#include <linux/pagemap.h>
Jens Axboe0f212202020-09-13 13:09:39 -060082#include <linux/io_uring.h>
Dennis Zhou91d8f512020-09-16 13:41:05 -070083#include <linux/blk-cgroup.h>
Jens Axboe4ea33a92020-10-15 13:46:44 -060084#include <linux/audit.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070085
Dmitrii Dolgovc826bd72019-10-15 19:02:01 +020086#define CREATE_TRACE_POINTS
87#include <trace/events/io_uring.h>
88
Jens Axboe2b188cc2019-01-07 10:46:33 -070089#include <uapi/linux/io_uring.h>
90
91#include "internal.h"
Jens Axboe561fb042019-10-24 07:25:42 -060092#include "io-wq.h"
Jens Axboe2b188cc2019-01-07 10:46:33 -070093
Daniel Xu5277dea2019-09-14 14:23:45 -070094#define IORING_MAX_ENTRIES 32768
Jens Axboe33a107f2019-10-04 12:10:03 -060095#define IORING_MAX_CQ_ENTRIES (2 * IORING_MAX_ENTRIES)
Jens Axboe65e19f52019-10-26 07:20:21 -060096
97/*
98 * Shift of 9 is 512 entries, or exactly one page on 64-bit archs
99 */
100#define IORING_FILE_TABLE_SHIFT 9
101#define IORING_MAX_FILES_TABLE (1U << IORING_FILE_TABLE_SHIFT)
102#define IORING_FILE_TABLE_MASK (IORING_MAX_FILES_TABLE - 1)
103#define IORING_MAX_FIXED_FILES (64 * IORING_MAX_FILES_TABLE)
Stefano Garzarella21b55db2020-08-27 16:58:30 +0200104#define IORING_MAX_RESTRICTIONS (IORING_RESTRICTION_LAST + \
105 IORING_REGISTER_LAST + IORING_OP_LAST)
Jens Axboe2b188cc2019-01-07 10:46:33 -0700106
107struct io_uring {
108 u32 head ____cacheline_aligned_in_smp;
109 u32 tail ____cacheline_aligned_in_smp;
110};
111
Stefan Bühler1e84b972019-04-24 23:54:16 +0200112/*
Hristo Venev75b28af2019-08-26 17:23:46 +0000113 * This data is shared with the application through the mmap at offsets
114 * IORING_OFF_SQ_RING and IORING_OFF_CQ_RING.
Stefan Bühler1e84b972019-04-24 23:54:16 +0200115 *
116 * The offsets to the member fields are published through struct
117 * io_sqring_offsets when calling io_uring_setup.
118 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000119struct io_rings {
Stefan Bühler1e84b972019-04-24 23:54:16 +0200120 /*
121 * Head and tail offsets into the ring; the offsets need to be
122 * masked to get valid indices.
123 *
Hristo Venev75b28af2019-08-26 17:23:46 +0000124 * The kernel controls head of the sq ring and the tail of the cq ring,
125 * and the application controls tail of the sq ring and the head of the
126 * cq ring.
Stefan Bühler1e84b972019-04-24 23:54:16 +0200127 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000128 struct io_uring sq, cq;
Stefan Bühler1e84b972019-04-24 23:54:16 +0200129 /*
Hristo Venev75b28af2019-08-26 17:23:46 +0000130 * Bitmasks to apply to head and tail offsets (constant, equals
Stefan Bühler1e84b972019-04-24 23:54:16 +0200131 * ring_entries - 1)
132 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000133 u32 sq_ring_mask, cq_ring_mask;
134 /* Ring sizes (constant, power of 2) */
135 u32 sq_ring_entries, cq_ring_entries;
Stefan Bühler1e84b972019-04-24 23:54:16 +0200136 /*
137 * Number of invalid entries dropped by the kernel due to
138 * invalid index stored in array
139 *
140 * Written by the kernel, shouldn't be modified by the
141 * application (i.e. get number of "new events" by comparing to
142 * cached value).
143 *
144 * After a new SQ head value was read by the application this
145 * counter includes all submissions that were dropped reaching
146 * the new SQ head (and possibly more).
147 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000148 u32 sq_dropped;
Stefan Bühler1e84b972019-04-24 23:54:16 +0200149 /*
Stefano Garzarella0d9b5b32020-05-15 18:38:04 +0200150 * Runtime SQ flags
Stefan Bühler1e84b972019-04-24 23:54:16 +0200151 *
152 * Written by the kernel, shouldn't be modified by the
153 * application.
154 *
155 * The application needs a full memory barrier before checking
156 * for IORING_SQ_NEED_WAKEUP after updating the sq tail.
157 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000158 u32 sq_flags;
Stefan Bühler1e84b972019-04-24 23:54:16 +0200159 /*
Stefano Garzarella0d9b5b32020-05-15 18:38:04 +0200160 * Runtime CQ flags
161 *
162 * Written by the application, shouldn't be modified by the
163 * kernel.
164 */
165 u32 cq_flags;
166 /*
Stefan Bühler1e84b972019-04-24 23:54:16 +0200167 * Number of completion events lost because the queue was full;
168 * this should be avoided by the application by making sure
LimingWu0b4295b2019-12-05 20:18:18 +0800169 * there are not more requests pending than there is space in
Stefan Bühler1e84b972019-04-24 23:54:16 +0200170 * the completion queue.
171 *
172 * Written by the kernel, shouldn't be modified by the
173 * application (i.e. get number of "new events" by comparing to
174 * cached value).
175 *
176 * As completion events come in out of order this counter is not
177 * ordered with any other data.
178 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000179 u32 cq_overflow;
Stefan Bühler1e84b972019-04-24 23:54:16 +0200180 /*
181 * Ring buffer of completion events.
182 *
183 * The kernel writes completion events fresh every time they are
184 * produced, so the application is allowed to modify pending
185 * entries.
186 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000187 struct io_uring_cqe cqes[] ____cacheline_aligned_in_smp;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700188};
189
Jens Axboeedafcce2019-01-09 09:16:05 -0700190struct io_mapped_ubuf {
191 u64 ubuf;
192 size_t len;
193 struct bio_vec *bvec;
194 unsigned int nr_bvecs;
Jens Axboede293932020-09-17 16:19:16 -0600195 unsigned long acct_pages;
Jens Axboeedafcce2019-01-09 09:16:05 -0700196};
197
Jens Axboe65e19f52019-10-26 07:20:21 -0600198struct fixed_file_table {
199 struct file **files;
Jens Axboe31b51512019-01-18 22:56:34 -0700200};
201
Xiaoguang Wang05589552020-03-31 14:05:18 +0800202struct fixed_file_ref_node {
203 struct percpu_ref refs;
204 struct list_head node;
205 struct list_head file_list;
206 struct fixed_file_data *file_data;
Jens Axboe4a38aed22020-05-14 17:21:15 -0600207 struct llist_node llist;
Pavel Begunkove2978222020-11-18 14:56:26 +0000208 bool done;
Xiaoguang Wang05589552020-03-31 14:05:18 +0800209};
210
Jens Axboe05f3fb32019-12-09 11:22:50 -0700211struct fixed_file_data {
212 struct fixed_file_table *table;
213 struct io_ring_ctx *ctx;
214
Pavel Begunkovb2e96852020-10-10 18:34:16 +0100215 struct fixed_file_ref_node *node;
Jens Axboe05f3fb32019-12-09 11:22:50 -0700216 struct percpu_ref refs;
Jens Axboe05f3fb32019-12-09 11:22:50 -0700217 struct completion done;
Xiaoguang Wang05589552020-03-31 14:05:18 +0800218 struct list_head ref_list;
219 spinlock_t lock;
Jens Axboe05f3fb32019-12-09 11:22:50 -0700220};
221
Jens Axboe5a2e7452020-02-23 16:23:11 -0700222struct io_buffer {
223 struct list_head list;
224 __u64 addr;
225 __s32 len;
226 __u16 bid;
227};
228
Stefano Garzarella21b55db2020-08-27 16:58:30 +0200229struct io_restriction {
230 DECLARE_BITMAP(register_op, IORING_REGISTER_LAST);
231 DECLARE_BITMAP(sqe_op, IORING_OP_LAST);
232 u8 sqe_flags_allowed;
233 u8 sqe_flags_required;
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +0200234 bool registered;
Stefano Garzarella21b55db2020-08-27 16:58:30 +0200235};
236
Jens Axboe534ca6d2020-09-02 13:52:19 -0600237struct io_sq_data {
238 refcount_t refs;
Jens Axboe69fb2132020-09-14 11:16:23 -0600239 struct mutex lock;
240
241 /* ctx's that are using this sqd */
242 struct list_head ctx_list;
243 struct list_head ctx_new_list;
244 struct mutex ctx_lock;
245
Jens Axboe534ca6d2020-09-02 13:52:19 -0600246 struct task_struct *thread;
247 struct wait_queue_head wait;
248};
249
Jens Axboe2b188cc2019-01-07 10:46:33 -0700250struct io_ring_ctx {
251 struct {
252 struct percpu_ref refs;
253 } ____cacheline_aligned_in_smp;
254
255 struct {
256 unsigned int flags;
Randy Dunlape1d85332020-02-05 20:57:10 -0800257 unsigned int compat: 1;
Bijan Mottahedehaad5d8d2020-06-16 16:36:08 -0700258 unsigned int limit_mem: 1;
Randy Dunlape1d85332020-02-05 20:57:10 -0800259 unsigned int cq_overflow_flushed: 1;
260 unsigned int drain_next: 1;
261 unsigned int eventfd_async: 1;
Stefano Garzarella21b55db2020-08-27 16:58:30 +0200262 unsigned int restricted: 1;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700263
Hristo Venev75b28af2019-08-26 17:23:46 +0000264 /*
265 * Ring buffer of indices into array of io_uring_sqe, which is
266 * mmapped by the application using the IORING_OFF_SQES offset.
267 *
268 * This indirection could e.g. be used to assign fixed
269 * io_uring_sqe entries to operations and only submit them to
270 * the queue when needed.
271 *
272 * The kernel modifies neither the indices array nor the entries
273 * array.
274 */
275 u32 *sq_array;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700276 unsigned cached_sq_head;
277 unsigned sq_entries;
278 unsigned sq_mask;
Jens Axboe6c271ce2019-01-10 11:22:30 -0700279 unsigned sq_thread_idle;
Jens Axboe498ccd92019-10-25 10:04:25 -0600280 unsigned cached_sq_dropped;
Pavel Begunkov2c3bac6d2020-10-18 10:17:40 +0100281 unsigned cached_cq_overflow;
Jens Axboead3eb2c2019-12-18 17:12:20 -0700282 unsigned long sq_check_overflow;
Jens Axboede0617e2019-04-06 21:51:27 -0600283
284 struct list_head defer_list;
Jens Axboe5262f562019-09-17 12:26:57 -0600285 struct list_head timeout_list;
Jens Axboe1d7bb1d2019-11-06 11:31:17 -0700286 struct list_head cq_overflow_list;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700287
Jens Axboefcb323c2019-10-24 12:39:47 -0600288 wait_queue_head_t inflight_wait;
Jens Axboead3eb2c2019-12-18 17:12:20 -0700289 struct io_uring_sqe *sq_sqes;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700290 } ____cacheline_aligned_in_smp;
291
Hristo Venev75b28af2019-08-26 17:23:46 +0000292 struct io_rings *rings;
293
Jens Axboe2b188cc2019-01-07 10:46:33 -0700294 /* IO offload */
Jens Axboe561fb042019-10-24 07:25:42 -0600295 struct io_wq *io_wq;
Jens Axboe2aede0e2020-09-14 10:45:53 -0600296
297 /*
298 * For SQPOLL usage - we hold a reference to the parent task, so we
299 * have access to the ->files
300 */
301 struct task_struct *sqo_task;
302
303 /* Only used for accounting purposes */
304 struct mm_struct *mm_account;
305
Dennis Zhou91d8f512020-09-16 13:41:05 -0700306#ifdef CONFIG_BLK_CGROUP
307 struct cgroup_subsys_state *sqo_blkcg_css;
308#endif
309
Jens Axboe534ca6d2020-09-02 13:52:19 -0600310 struct io_sq_data *sq_data; /* if using sq thread polling */
311
Jens Axboe90554202020-09-03 12:12:41 -0600312 struct wait_queue_head sqo_sq_wait;
Jens Axboe6a779382020-09-02 12:21:41 -0600313 struct wait_queue_entry sqo_wait_entry;
Jens Axboe69fb2132020-09-14 11:16:23 -0600314 struct list_head sqd_list;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700315
Jens Axboe6b063142019-01-10 22:13:58 -0700316 /*
317 * If used, fixed file set. Writers must ensure that ->refs is dead,
318 * readers must ensure that ->refs is alive as long as the file* is
319 * used. Only updated through io_uring_register(2).
320 */
Jens Axboe05f3fb32019-12-09 11:22:50 -0700321 struct fixed_file_data *file_data;
Jens Axboe6b063142019-01-10 22:13:58 -0700322 unsigned nr_user_files;
323
Jens Axboeedafcce2019-01-09 09:16:05 -0700324 /* if used, fixed mapped user buffers */
325 unsigned nr_user_bufs;
326 struct io_mapped_ubuf *user_bufs;
327
Jens Axboe2b188cc2019-01-07 10:46:33 -0700328 struct user_struct *user;
329
Jens Axboe0b8c0ec2019-12-02 08:50:00 -0700330 const struct cred *creds;
Jens Axboe181e4482019-11-25 08:52:30 -0700331
Jens Axboe4ea33a92020-10-15 13:46:44 -0600332#ifdef CONFIG_AUDIT
333 kuid_t loginuid;
334 unsigned int sessionid;
335#endif
336
Jens Axboe0f158b42020-05-14 17:18:39 -0600337 struct completion ref_comp;
338 struct completion sq_thread_comp;
Jens Axboe206aefd2019-11-07 18:27:42 -0700339
Jens Axboe0ddf92e2019-11-08 08:52:53 -0700340 /* if all else fails... */
341 struct io_kiocb *fallback_req;
342
Jens Axboe206aefd2019-11-07 18:27:42 -0700343#if defined(CONFIG_UNIX)
344 struct socket *ring_sock;
345#endif
346
Jens Axboe5a2e7452020-02-23 16:23:11 -0700347 struct idr io_buffer_idr;
348
Jens Axboe071698e2020-01-28 10:04:42 -0700349 struct idr personality_idr;
350
Jens Axboe206aefd2019-11-07 18:27:42 -0700351 struct {
352 unsigned cached_cq_tail;
353 unsigned cq_entries;
354 unsigned cq_mask;
355 atomic_t cq_timeouts;
Jens Axboead3eb2c2019-12-18 17:12:20 -0700356 unsigned long cq_check_overflow;
Jens Axboe206aefd2019-11-07 18:27:42 -0700357 struct wait_queue_head cq_wait;
358 struct fasync_struct *cq_fasync;
359 struct eventfd_ctx *cq_ev_fd;
360 } ____cacheline_aligned_in_smp;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700361
362 struct {
363 struct mutex uring_lock;
364 wait_queue_head_t wait;
365 } ____cacheline_aligned_in_smp;
366
367 struct {
368 spinlock_t completion_lock;
Jens Axboee94f1412019-12-19 12:06:02 -0700369
Jens Axboedef596e2019-01-09 08:59:42 -0700370 /*
Pavel Begunkov540e32a2020-07-13 23:37:09 +0300371 * ->iopoll_list is protected by the ctx->uring_lock for
Jens Axboedef596e2019-01-09 08:59:42 -0700372 * io_uring instances that don't use IORING_SETUP_SQPOLL.
373 * For SQPOLL, only the single threaded io_sq_thread() will
374 * manipulate the list, hence no extra locking is needed there.
375 */
Pavel Begunkov540e32a2020-07-13 23:37:09 +0300376 struct list_head iopoll_list;
Jens Axboe78076bb2019-12-04 19:56:40 -0700377 struct hlist_head *cancel_hash;
378 unsigned cancel_hash_bits;
Jens Axboee94f1412019-12-19 12:06:02 -0700379 bool poll_multi_file;
Jens Axboefcb323c2019-10-24 12:39:47 -0600380
381 spinlock_t inflight_lock;
382 struct list_head inflight_list;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700383 } ____cacheline_aligned_in_smp;
Jens Axboe85faa7b2020-04-09 18:14:00 -0600384
Jens Axboe4a38aed22020-05-14 17:21:15 -0600385 struct delayed_work file_put_work;
386 struct llist_head file_put_llist;
387
Jens Axboe85faa7b2020-04-09 18:14:00 -0600388 struct work_struct exit_work;
Stefano Garzarella21b55db2020-08-27 16:58:30 +0200389 struct io_restriction restrictions;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700390};
391
Jens Axboe09bb8392019-03-13 12:39:28 -0600392/*
393 * First field must be the file pointer in all the
394 * iocb unions! See also 'struct kiocb' in <linux/fs.h>
395 */
Jens Axboe221c5eb2019-01-17 09:41:58 -0700396struct io_poll_iocb {
397 struct file *file;
Pavel Begunkov018043b2020-10-27 23:17:18 +0000398 struct wait_queue_head *head;
Jens Axboe221c5eb2019-01-17 09:41:58 -0700399 __poll_t events;
Jens Axboe8c838782019-03-12 15:48:16 -0600400 bool done;
Jens Axboe221c5eb2019-01-17 09:41:58 -0700401 bool canceled;
Jens Axboe392edb42019-12-09 17:52:20 -0700402 struct wait_queue_entry wait;
Jens Axboe221c5eb2019-01-17 09:41:58 -0700403};
404
Pavel Begunkov018043b2020-10-27 23:17:18 +0000405struct io_poll_remove {
406 struct file *file;
407 u64 addr;
408};
409
Jens Axboeb5dba592019-12-11 14:02:38 -0700410struct io_close {
411 struct file *file;
412 struct file *put_file;
413 int fd;
414};
415
Jens Axboead8a48a2019-11-15 08:49:11 -0700416struct io_timeout_data {
417 struct io_kiocb *req;
418 struct hrtimer timer;
419 struct timespec64 ts;
420 enum hrtimer_mode mode;
421};
422
Jens Axboe8ed8d3c2019-12-16 11:55:28 -0700423struct io_accept {
424 struct file *file;
425 struct sockaddr __user *addr;
426 int __user *addr_len;
427 int flags;
Jens Axboe09952e32020-03-19 20:16:56 -0600428 unsigned long nofile;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -0700429};
430
431struct io_sync {
432 struct file *file;
433 loff_t len;
434 loff_t off;
435 int flags;
Jens Axboed63d1b52019-12-10 10:38:56 -0700436 int mode;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -0700437};
438
Jens Axboefbf23842019-12-17 18:45:56 -0700439struct io_cancel {
440 struct file *file;
441 u64 addr;
442};
443
Jens Axboeb29472e2019-12-17 18:50:29 -0700444struct io_timeout {
445 struct file *file;
Pavel Begunkovbfe68a22020-05-30 14:54:18 +0300446 u32 off;
447 u32 target_seq;
Pavel Begunkov135fcde2020-07-13 23:37:12 +0300448 struct list_head list;
Jens Axboeb29472e2019-12-17 18:50:29 -0700449};
450
Pavel Begunkov0bdf7a22020-10-10 18:34:10 +0100451struct io_timeout_rem {
452 struct file *file;
453 u64 addr;
454};
455
Jens Axboe9adbd452019-12-20 08:45:55 -0700456struct io_rw {
457 /* NOTE: kiocb has the file as the first member, so don't do it here */
458 struct kiocb kiocb;
459 u64 addr;
460 u64 len;
461};
462
Jens Axboe3fbb51c2019-12-20 08:51:52 -0700463struct io_connect {
464 struct file *file;
465 struct sockaddr __user *addr;
466 int addr_len;
467};
468
Jens Axboee47293f2019-12-20 08:58:21 -0700469struct io_sr_msg {
470 struct file *file;
Jens Axboefddafac2020-01-04 20:19:44 -0700471 union {
Pavel Begunkov270a5942020-07-12 20:41:04 +0300472 struct user_msghdr __user *umsg;
Jens Axboefddafac2020-01-04 20:19:44 -0700473 void __user *buf;
474 };
Jens Axboee47293f2019-12-20 08:58:21 -0700475 int msg_flags;
Jens Axboebcda7ba2020-02-23 16:42:51 -0700476 int bgid;
Jens Axboefddafac2020-01-04 20:19:44 -0700477 size_t len;
Jens Axboebcda7ba2020-02-23 16:42:51 -0700478 struct io_buffer *kbuf;
Jens Axboee47293f2019-12-20 08:58:21 -0700479};
480
Jens Axboe15b71ab2019-12-11 11:20:36 -0700481struct io_open {
482 struct file *file;
483 int dfd;
Jens Axboe944d1442020-11-13 16:48:44 -0700484 bool ignore_nonblock;
Jens Axboe15b71ab2019-12-11 11:20:36 -0700485 struct filename *filename;
Jens Axboec12cedf2020-01-08 17:41:21 -0700486 struct open_how how;
Jens Axboe4022e7a2020-03-19 19:23:18 -0600487 unsigned long nofile;
Jens Axboe15b71ab2019-12-11 11:20:36 -0700488};
489
Jens Axboe05f3fb32019-12-09 11:22:50 -0700490struct io_files_update {
491 struct file *file;
492 u64 arg;
493 u32 nr_args;
494 u32 offset;
495};
496
Jens Axboe4840e412019-12-25 22:03:45 -0700497struct io_fadvise {
498 struct file *file;
499 u64 offset;
500 u32 len;
501 u32 advice;
502};
503
Jens Axboec1ca7572019-12-25 22:18:28 -0700504struct io_madvise {
505 struct file *file;
506 u64 addr;
507 u32 len;
508 u32 advice;
509};
510
Jens Axboe3e4827b2020-01-08 15:18:09 -0700511struct io_epoll {
512 struct file *file;
513 int epfd;
514 int op;
515 int fd;
516 struct epoll_event event;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700517};
518
Pavel Begunkov7d67af22020-02-24 11:32:45 +0300519struct io_splice {
520 struct file *file_out;
521 struct file *file_in;
522 loff_t off_out;
523 loff_t off_in;
524 u64 len;
525 unsigned int flags;
526};
527
Jens Axboeddf0322d2020-02-23 16:41:33 -0700528struct io_provide_buf {
529 struct file *file;
530 __u64 addr;
531 __s32 len;
532 __u32 bgid;
533 __u16 nbufs;
534 __u16 bid;
535};
536
Bijan Mottahedeh1d9e1282020-05-22 21:31:16 -0700537struct io_statx {
538 struct file *file;
539 int dfd;
540 unsigned int mask;
541 unsigned int flags;
Bijan Mottahedehe62753e2020-05-22 21:31:18 -0700542 const char __user *filename;
Bijan Mottahedeh1d9e1282020-05-22 21:31:16 -0700543 struct statx __user *buffer;
544};
545
Jens Axboe36f4fa62020-09-05 11:14:22 -0600546struct io_shutdown {
547 struct file *file;
548 int how;
549};
550
Jens Axboe80a261f2020-09-28 14:23:58 -0600551struct io_rename {
552 struct file *file;
553 int old_dfd;
554 int new_dfd;
555 struct filename *oldpath;
556 struct filename *newpath;
557 int flags;
558};
559
Jens Axboe14a11432020-09-28 14:27:37 -0600560struct io_unlink {
561 struct file *file;
562 int dfd;
563 int flags;
564 struct filename *filename;
565};
566
Pavel Begunkov3ca405e2020-07-13 23:37:08 +0300567struct io_completion {
568 struct file *file;
569 struct list_head list;
Pavel Begunkov0f7e4662020-07-13 23:37:16 +0300570 int cflags;
Pavel Begunkov3ca405e2020-07-13 23:37:08 +0300571};
572
Jens Axboef499a022019-12-02 16:28:46 -0700573struct io_async_connect {
574 struct sockaddr_storage address;
575};
576
Jens Axboe03b12302019-12-02 18:50:25 -0700577struct io_async_msghdr {
578 struct iovec fast_iov[UIO_FASTIOV];
579 struct iovec *iov;
580 struct sockaddr __user *uaddr;
581 struct msghdr msg;
Jens Axboeb5379162020-02-09 11:29:15 -0700582 struct sockaddr_storage addr;
Jens Axboe03b12302019-12-02 18:50:25 -0700583};
584
Jens Axboef67676d2019-12-02 11:03:47 -0700585struct io_async_rw {
586 struct iovec fast_iov[UIO_FASTIOV];
Jens Axboeff6165b2020-08-13 09:47:43 -0600587 const struct iovec *free_iovec;
588 struct iov_iter iter;
Jens Axboe227c0c92020-08-13 11:51:40 -0600589 size_t bytes_done;
Jens Axboebcf5a062020-05-22 09:24:42 -0600590 struct wait_page_queue wpq;
Jens Axboef67676d2019-12-02 11:03:47 -0700591};
592
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300593enum {
594 REQ_F_FIXED_FILE_BIT = IOSQE_FIXED_FILE_BIT,
595 REQ_F_IO_DRAIN_BIT = IOSQE_IO_DRAIN_BIT,
596 REQ_F_LINK_BIT = IOSQE_IO_LINK_BIT,
597 REQ_F_HARDLINK_BIT = IOSQE_IO_HARDLINK_BIT,
598 REQ_F_FORCE_ASYNC_BIT = IOSQE_ASYNC_BIT,
Jens Axboebcda7ba2020-02-23 16:42:51 -0700599 REQ_F_BUFFER_SELECT_BIT = IOSQE_BUFFER_SELECT_BIT,
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300600
Pavel Begunkovdea3b492020-04-12 02:05:04 +0300601 REQ_F_LINK_HEAD_BIT,
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300602 REQ_F_FAIL_LINK_BIT,
603 REQ_F_INFLIGHT_BIT,
604 REQ_F_CUR_POS_BIT,
605 REQ_F_NOWAIT_BIT,
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300606 REQ_F_LINK_TIMEOUT_BIT,
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300607 REQ_F_ISREG_BIT,
Pavel Begunkov99bc4c32020-02-07 22:04:45 +0300608 REQ_F_NEED_CLEANUP_BIT,
Jens Axboed7718a92020-02-14 22:23:12 -0700609 REQ_F_POLLED_BIT,
Jens Axboebcda7ba2020-02-23 16:42:51 -0700610 REQ_F_BUFFER_SELECTED_BIT,
Jens Axboe5b0bbee2020-04-27 10:41:22 -0600611 REQ_F_NO_FILE_TABLE_BIT,
Xiaoguang Wang7cdaf582020-06-10 19:41:19 +0800612 REQ_F_WORK_INITIALIZED_BIT,
Pavel Begunkov900fad42020-10-19 16:39:16 +0100613 REQ_F_LTIMEOUT_ACTIVE_BIT,
Jens Axboe84557872020-03-03 15:28:17 -0700614
615 /* not a real bit, just to check we're not overflowing the space */
616 __REQ_F_LAST_BIT,
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300617};
618
619enum {
620 /* ctx owns file */
621 REQ_F_FIXED_FILE = BIT(REQ_F_FIXED_FILE_BIT),
622 /* drain existing IO first */
623 REQ_F_IO_DRAIN = BIT(REQ_F_IO_DRAIN_BIT),
624 /* linked sqes */
625 REQ_F_LINK = BIT(REQ_F_LINK_BIT),
626 /* doesn't sever on completion < 0 */
627 REQ_F_HARDLINK = BIT(REQ_F_HARDLINK_BIT),
628 /* IOSQE_ASYNC */
629 REQ_F_FORCE_ASYNC = BIT(REQ_F_FORCE_ASYNC_BIT),
Jens Axboebcda7ba2020-02-23 16:42:51 -0700630 /* IOSQE_BUFFER_SELECT */
631 REQ_F_BUFFER_SELECT = BIT(REQ_F_BUFFER_SELECT_BIT),
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300632
Pavel Begunkovdea3b492020-04-12 02:05:04 +0300633 /* head of a link */
634 REQ_F_LINK_HEAD = BIT(REQ_F_LINK_HEAD_BIT),
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300635 /* fail rest of links */
636 REQ_F_FAIL_LINK = BIT(REQ_F_FAIL_LINK_BIT),
637 /* on inflight list */
638 REQ_F_INFLIGHT = BIT(REQ_F_INFLIGHT_BIT),
639 /* read/write uses file position */
640 REQ_F_CUR_POS = BIT(REQ_F_CUR_POS_BIT),
641 /* must not punt to workers */
642 REQ_F_NOWAIT = BIT(REQ_F_NOWAIT_BIT),
Pavel Begunkov900fad42020-10-19 16:39:16 +0100643 /* has or had linked timeout */
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300644 REQ_F_LINK_TIMEOUT = BIT(REQ_F_LINK_TIMEOUT_BIT),
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300645 /* regular file */
646 REQ_F_ISREG = BIT(REQ_F_ISREG_BIT),
Pavel Begunkov99bc4c32020-02-07 22:04:45 +0300647 /* needs cleanup */
648 REQ_F_NEED_CLEANUP = BIT(REQ_F_NEED_CLEANUP_BIT),
Jens Axboed7718a92020-02-14 22:23:12 -0700649 /* already went through poll handler */
650 REQ_F_POLLED = BIT(REQ_F_POLLED_BIT),
Jens Axboebcda7ba2020-02-23 16:42:51 -0700651 /* buffer already selected */
652 REQ_F_BUFFER_SELECTED = BIT(REQ_F_BUFFER_SELECTED_BIT),
Jens Axboe5b0bbee2020-04-27 10:41:22 -0600653 /* doesn't need file table for this request */
654 REQ_F_NO_FILE_TABLE = BIT(REQ_F_NO_FILE_TABLE_BIT),
Xiaoguang Wang7cdaf582020-06-10 19:41:19 +0800655 /* io_wq_work is initialized */
656 REQ_F_WORK_INITIALIZED = BIT(REQ_F_WORK_INITIALIZED_BIT),
Pavel Begunkov900fad42020-10-19 16:39:16 +0100657 /* linked timeout is active, i.e. prepared by link's head */
658 REQ_F_LTIMEOUT_ACTIVE = BIT(REQ_F_LTIMEOUT_ACTIVE_BIT),
Jens Axboed7718a92020-02-14 22:23:12 -0700659};
660
661struct async_poll {
662 struct io_poll_iocb poll;
Jens Axboe807abcb2020-07-17 17:09:27 -0600663 struct io_poll_iocb *double_poll;
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300664};
665
Jens Axboe09bb8392019-03-13 12:39:28 -0600666/*
667 * NOTE! Each of the iocb union members has the file pointer
668 * as the first entry in their struct definition. So you can
669 * access the file pointer through any of the sub-structs,
670 * or directly as just 'ki_filp' in this struct.
671 */
Jens Axboe2b188cc2019-01-07 10:46:33 -0700672struct io_kiocb {
Jens Axboe221c5eb2019-01-17 09:41:58 -0700673 union {
Jens Axboe09bb8392019-03-13 12:39:28 -0600674 struct file *file;
Jens Axboe9adbd452019-12-20 08:45:55 -0700675 struct io_rw rw;
Jens Axboe221c5eb2019-01-17 09:41:58 -0700676 struct io_poll_iocb poll;
Pavel Begunkov018043b2020-10-27 23:17:18 +0000677 struct io_poll_remove poll_remove;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -0700678 struct io_accept accept;
679 struct io_sync sync;
Jens Axboefbf23842019-12-17 18:45:56 -0700680 struct io_cancel cancel;
Jens Axboeb29472e2019-12-17 18:50:29 -0700681 struct io_timeout timeout;
Pavel Begunkov0bdf7a22020-10-10 18:34:10 +0100682 struct io_timeout_rem timeout_rem;
Jens Axboe3fbb51c2019-12-20 08:51:52 -0700683 struct io_connect connect;
Jens Axboee47293f2019-12-20 08:58:21 -0700684 struct io_sr_msg sr_msg;
Jens Axboe15b71ab2019-12-11 11:20:36 -0700685 struct io_open open;
Jens Axboeb5dba592019-12-11 14:02:38 -0700686 struct io_close close;
Jens Axboe05f3fb32019-12-09 11:22:50 -0700687 struct io_files_update files_update;
Jens Axboe4840e412019-12-25 22:03:45 -0700688 struct io_fadvise fadvise;
Jens Axboec1ca7572019-12-25 22:18:28 -0700689 struct io_madvise madvise;
Jens Axboe3e4827b2020-01-08 15:18:09 -0700690 struct io_epoll epoll;
Pavel Begunkov7d67af22020-02-24 11:32:45 +0300691 struct io_splice splice;
Jens Axboeddf0322d2020-02-23 16:41:33 -0700692 struct io_provide_buf pbuf;
Bijan Mottahedeh1d9e1282020-05-22 21:31:16 -0700693 struct io_statx statx;
Jens Axboe36f4fa62020-09-05 11:14:22 -0600694 struct io_shutdown shutdown;
Jens Axboe80a261f2020-09-28 14:23:58 -0600695 struct io_rename rename;
Jens Axboe14a11432020-09-28 14:27:37 -0600696 struct io_unlink unlink;
Pavel Begunkov3ca405e2020-07-13 23:37:08 +0300697 /* use only after cleaning per-op data, see io_clean_op() */
698 struct io_completion compl;
Jens Axboe221c5eb2019-01-17 09:41:58 -0700699 };
Jens Axboe2b188cc2019-01-07 10:46:33 -0700700
Jens Axboee8c2bc12020-08-15 18:44:09 -0700701 /* opcode allocated if it needs to store data for async defer */
702 void *async_data;
Jens Axboed625c6e2019-12-17 19:53:05 -0700703 u8 opcode;
Xiaoguang Wang65a65432020-06-11 23:39:36 +0800704 /* polled IO has completed */
705 u8 iopoll_completed;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700706
Bijan Mottahedeh4f4eeba2020-05-19 14:52:49 -0700707 u16 buf_index;
Pavel Begunkov9cf7c102020-07-13 23:37:15 +0300708 u32 result;
Bijan Mottahedeh4f4eeba2020-05-19 14:52:49 -0700709
Pavel Begunkov010e8e62020-07-30 18:43:45 +0300710 struct io_ring_ctx *ctx;
711 unsigned int flags;
712 refcount_t refs;
713 struct task_struct *task;
714 u64 user_data;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700715
Pavel Begunkov010e8e62020-07-30 18:43:45 +0300716 struct list_head link_list;
Jens Axboed7718a92020-02-14 22:23:12 -0700717
Pavel Begunkovd21ffe72020-07-13 23:37:10 +0300718 /*
719 * 1. used with ctx->iopoll_list with reads/writes
720 * 2. to track reqs with ->files (see io_op_def::file_table)
721 */
Pavel Begunkov010e8e62020-07-30 18:43:45 +0300722 struct list_head inflight_entry;
Jens Axboefcb323c2019-10-24 12:39:47 -0600723
Pavel Begunkov010e8e62020-07-30 18:43:45 +0300724 struct percpu_ref *fixed_file_refs;
725 struct callback_head task_work;
726 /* for polled requests, i.e. IORING_OP_POLL_ADD and async armed poll */
727 struct hlist_node hash_node;
728 struct async_poll *apoll;
729 struct io_wq_work work;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700730};
731
Pavel Begunkov27dc8332020-07-13 23:37:14 +0300732struct io_defer_entry {
733 struct list_head list;
734 struct io_kiocb *req;
Pavel Begunkov9cf7c102020-07-13 23:37:15 +0300735 u32 seq;
Pavel Begunkov27dc8332020-07-13 23:37:14 +0300736};
737
Jens Axboedef596e2019-01-09 08:59:42 -0700738#define IO_IOPOLL_BATCH 8
Jens Axboe2b188cc2019-01-07 10:46:33 -0700739
Jens Axboe013538b2020-06-22 09:29:15 -0600740struct io_comp_state {
741 unsigned int nr;
742 struct list_head list;
743 struct io_ring_ctx *ctx;
744};
745
Jens Axboe9a56a232019-01-09 09:06:50 -0700746struct io_submit_state {
747 struct blk_plug plug;
748
749 /*
Jens Axboe2579f912019-01-09 09:10:43 -0700750 * io_kiocb alloc cache
751 */
752 void *reqs[IO_IOPOLL_BATCH];
Pavel Begunkov6c8a3132020-02-01 03:58:00 +0300753 unsigned int free_reqs;
Jens Axboe2579f912019-01-09 09:10:43 -0700754
755 /*
Jens Axboe013538b2020-06-22 09:29:15 -0600756 * Batch completion logic
757 */
758 struct io_comp_state comp;
759
760 /*
Jens Axboe9a56a232019-01-09 09:06:50 -0700761 * File reference cache
762 */
763 struct file *file;
764 unsigned int fd;
765 unsigned int has_refs;
Jens Axboe9a56a232019-01-09 09:06:50 -0700766 unsigned int ios_left;
767};
768
Jens Axboed3656342019-12-18 09:50:26 -0700769struct io_op_def {
Jens Axboed3656342019-12-18 09:50:26 -0700770 /* needs req->file assigned */
771 unsigned needs_file : 1;
Jens Axboefd2206e2020-06-02 16:40:47 -0600772 /* don't fail if file grab fails */
773 unsigned needs_file_no_error : 1;
Jens Axboed3656342019-12-18 09:50:26 -0700774 /* hash wq insertion if file is a regular file */
775 unsigned hash_reg_file : 1;
776 /* unbound wq insertion if file is a non-regular file */
777 unsigned unbound_nonreg_file : 1;
Jens Axboe66f4af92020-01-16 15:36:52 -0700778 /* opcode is not supported by this kernel */
779 unsigned not_supported : 1;
Jens Axboe8a727582020-02-20 09:59:44 -0700780 /* set if opcode supports polled "wait" */
781 unsigned pollin : 1;
782 unsigned pollout : 1;
Jens Axboebcda7ba2020-02-23 16:42:51 -0700783 /* op supports buffer selection */
784 unsigned buffer_select : 1;
Jens Axboee8c2bc12020-08-15 18:44:09 -0700785 /* must always have async data allocated */
786 unsigned needs_async_data : 1;
787 /* size of async data needed, if any */
788 unsigned short async_size;
Jens Axboe0f203762020-10-14 09:23:55 -0600789 unsigned work_flags;
Jens Axboed3656342019-12-18 09:50:26 -0700790};
791
Jens Axboe09186822020-10-13 15:01:40 -0600792static const struct io_op_def io_op_defs[] = {
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300793 [IORING_OP_NOP] = {},
794 [IORING_OP_READV] = {
Jens Axboed3656342019-12-18 09:50:26 -0700795 .needs_file = 1,
796 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700797 .pollin = 1,
Jens Axboe4d954c22020-02-27 07:31:19 -0700798 .buffer_select = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700799 .needs_async_data = 1,
800 .async_size = sizeof(struct io_async_rw),
Jens Axboe0f203762020-10-14 09:23:55 -0600801 .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG,
Jens Axboed3656342019-12-18 09:50:26 -0700802 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300803 [IORING_OP_WRITEV] = {
Jens Axboed3656342019-12-18 09:50:26 -0700804 .needs_file = 1,
805 .hash_reg_file = 1,
806 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700807 .pollout = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700808 .needs_async_data = 1,
809 .async_size = sizeof(struct io_async_rw),
Jens Axboe69228332020-10-20 14:28:41 -0600810 .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG |
811 IO_WQ_WORK_FSIZE,
Jens Axboed3656342019-12-18 09:50:26 -0700812 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300813 [IORING_OP_FSYNC] = {
Jens Axboed3656342019-12-18 09:50:26 -0700814 .needs_file = 1,
Jens Axboe0f203762020-10-14 09:23:55 -0600815 .work_flags = IO_WQ_WORK_BLKCG,
Jens Axboed3656342019-12-18 09:50:26 -0700816 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300817 [IORING_OP_READ_FIXED] = {
Jens Axboed3656342019-12-18 09:50:26 -0700818 .needs_file = 1,
819 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700820 .pollin = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700821 .async_size = sizeof(struct io_async_rw),
Jens Axboe4017eb92020-10-22 14:14:12 -0600822 .work_flags = IO_WQ_WORK_BLKCG | IO_WQ_WORK_MM,
Jens Axboed3656342019-12-18 09:50:26 -0700823 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300824 [IORING_OP_WRITE_FIXED] = {
Jens Axboed3656342019-12-18 09:50:26 -0700825 .needs_file = 1,
826 .hash_reg_file = 1,
827 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700828 .pollout = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700829 .async_size = sizeof(struct io_async_rw),
Jens Axboe4017eb92020-10-22 14:14:12 -0600830 .work_flags = IO_WQ_WORK_BLKCG | IO_WQ_WORK_FSIZE |
831 IO_WQ_WORK_MM,
Jens Axboed3656342019-12-18 09:50:26 -0700832 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300833 [IORING_OP_POLL_ADD] = {
Jens Axboed3656342019-12-18 09:50:26 -0700834 .needs_file = 1,
835 .unbound_nonreg_file = 1,
836 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300837 [IORING_OP_POLL_REMOVE] = {},
838 [IORING_OP_SYNC_FILE_RANGE] = {
Jens Axboed3656342019-12-18 09:50:26 -0700839 .needs_file = 1,
Jens Axboe0f203762020-10-14 09:23:55 -0600840 .work_flags = IO_WQ_WORK_BLKCG,
Jens Axboed3656342019-12-18 09:50:26 -0700841 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300842 [IORING_OP_SENDMSG] = {
Jens Axboed3656342019-12-18 09:50:26 -0700843 .needs_file = 1,
844 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700845 .pollout = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700846 .needs_async_data = 1,
847 .async_size = sizeof(struct io_async_msghdr),
Jens Axboe0f203762020-10-14 09:23:55 -0600848 .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG |
849 IO_WQ_WORK_FS,
Jens Axboed3656342019-12-18 09:50:26 -0700850 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300851 [IORING_OP_RECVMSG] = {
Jens Axboed3656342019-12-18 09:50:26 -0700852 .needs_file = 1,
853 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700854 .pollin = 1,
Jens Axboe52de1fe2020-02-27 10:15:42 -0700855 .buffer_select = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700856 .needs_async_data = 1,
857 .async_size = sizeof(struct io_async_msghdr),
Jens Axboe0f203762020-10-14 09:23:55 -0600858 .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG |
859 IO_WQ_WORK_FS,
Jens Axboed3656342019-12-18 09:50:26 -0700860 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300861 [IORING_OP_TIMEOUT] = {
Jens Axboee8c2bc12020-08-15 18:44:09 -0700862 .needs_async_data = 1,
863 .async_size = sizeof(struct io_timeout_data),
Jens Axboe0f203762020-10-14 09:23:55 -0600864 .work_flags = IO_WQ_WORK_MM,
Jens Axboed3656342019-12-18 09:50:26 -0700865 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300866 [IORING_OP_TIMEOUT_REMOVE] = {},
867 [IORING_OP_ACCEPT] = {
Jens Axboed3656342019-12-18 09:50:26 -0700868 .needs_file = 1,
869 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700870 .pollin = 1,
Jens Axboe0f203762020-10-14 09:23:55 -0600871 .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_FILES,
Jens Axboed3656342019-12-18 09:50:26 -0700872 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300873 [IORING_OP_ASYNC_CANCEL] = {},
874 [IORING_OP_LINK_TIMEOUT] = {
Jens Axboee8c2bc12020-08-15 18:44:09 -0700875 .needs_async_data = 1,
876 .async_size = sizeof(struct io_timeout_data),
Jens Axboe0f203762020-10-14 09:23:55 -0600877 .work_flags = IO_WQ_WORK_MM,
Jens Axboed3656342019-12-18 09:50:26 -0700878 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300879 [IORING_OP_CONNECT] = {
Jens Axboed3656342019-12-18 09:50:26 -0700880 .needs_file = 1,
881 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700882 .pollout = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700883 .needs_async_data = 1,
884 .async_size = sizeof(struct io_async_connect),
Jens Axboe0f203762020-10-14 09:23:55 -0600885 .work_flags = IO_WQ_WORK_MM,
Jens Axboed3656342019-12-18 09:50:26 -0700886 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300887 [IORING_OP_FALLOCATE] = {
Jens Axboed3656342019-12-18 09:50:26 -0700888 .needs_file = 1,
Jens Axboe69228332020-10-20 14:28:41 -0600889 .work_flags = IO_WQ_WORK_BLKCG | IO_WQ_WORK_FSIZE,
Jens Axboed3656342019-12-18 09:50:26 -0700890 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300891 [IORING_OP_OPENAT] = {
Jens Axboe0f203762020-10-14 09:23:55 -0600892 .work_flags = IO_WQ_WORK_FILES | IO_WQ_WORK_BLKCG |
Jens Axboe14587a462020-09-05 11:36:08 -0600893 IO_WQ_WORK_FS | IO_WQ_WORK_MM,
Jens Axboed3656342019-12-18 09:50:26 -0700894 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300895 [IORING_OP_CLOSE] = {
Jens Axboefd2206e2020-06-02 16:40:47 -0600896 .needs_file = 1,
897 .needs_file_no_error = 1,
Jens Axboe0f203762020-10-14 09:23:55 -0600898 .work_flags = IO_WQ_WORK_FILES | IO_WQ_WORK_BLKCG,
Jens Axboed3656342019-12-18 09:50:26 -0700899 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300900 [IORING_OP_FILES_UPDATE] = {
Jens Axboe0f203762020-10-14 09:23:55 -0600901 .work_flags = IO_WQ_WORK_FILES | IO_WQ_WORK_MM,
Jens Axboed3656342019-12-18 09:50:26 -0700902 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300903 [IORING_OP_STATX] = {
Jens Axboe0f203762020-10-14 09:23:55 -0600904 .work_flags = IO_WQ_WORK_FILES | IO_WQ_WORK_MM |
905 IO_WQ_WORK_FS | IO_WQ_WORK_BLKCG,
Jens Axboed3656342019-12-18 09:50:26 -0700906 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300907 [IORING_OP_READ] = {
Jens Axboe3a6820f2019-12-22 15:19:35 -0700908 .needs_file = 1,
909 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700910 .pollin = 1,
Jens Axboebcda7ba2020-02-23 16:42:51 -0700911 .buffer_select = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700912 .async_size = sizeof(struct io_async_rw),
Jens Axboe0f203762020-10-14 09:23:55 -0600913 .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG,
Jens Axboe3a6820f2019-12-22 15:19:35 -0700914 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300915 [IORING_OP_WRITE] = {
Jens Axboe3a6820f2019-12-22 15:19:35 -0700916 .needs_file = 1,
917 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700918 .pollout = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700919 .async_size = sizeof(struct io_async_rw),
Jens Axboe69228332020-10-20 14:28:41 -0600920 .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG |
921 IO_WQ_WORK_FSIZE,
Jens Axboe3a6820f2019-12-22 15:19:35 -0700922 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300923 [IORING_OP_FADVISE] = {
Jens Axboe4840e412019-12-25 22:03:45 -0700924 .needs_file = 1,
Jens Axboe0f203762020-10-14 09:23:55 -0600925 .work_flags = IO_WQ_WORK_BLKCG,
Jens Axboe4840e412019-12-25 22:03:45 -0700926 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300927 [IORING_OP_MADVISE] = {
Jens Axboe0f203762020-10-14 09:23:55 -0600928 .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG,
Jens Axboec1ca7572019-12-25 22:18:28 -0700929 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300930 [IORING_OP_SEND] = {
Jens Axboefddafac2020-01-04 20:19:44 -0700931 .needs_file = 1,
932 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700933 .pollout = 1,
Jens Axboe0f203762020-10-14 09:23:55 -0600934 .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG,
Jens Axboefddafac2020-01-04 20:19:44 -0700935 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300936 [IORING_OP_RECV] = {
Jens Axboefddafac2020-01-04 20:19:44 -0700937 .needs_file = 1,
938 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700939 .pollin = 1,
Jens Axboebcda7ba2020-02-23 16:42:51 -0700940 .buffer_select = 1,
Jens Axboe0f203762020-10-14 09:23:55 -0600941 .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG,
Jens Axboefddafac2020-01-04 20:19:44 -0700942 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300943 [IORING_OP_OPENAT2] = {
Jens Axboe0f203762020-10-14 09:23:55 -0600944 .work_flags = IO_WQ_WORK_FILES | IO_WQ_WORK_FS |
Jens Axboe14587a462020-09-05 11:36:08 -0600945 IO_WQ_WORK_BLKCG | IO_WQ_WORK_MM,
Jens Axboecebdb982020-01-08 17:59:24 -0700946 },
Jens Axboe3e4827b2020-01-08 15:18:09 -0700947 [IORING_OP_EPOLL_CTL] = {
948 .unbound_nonreg_file = 1,
Jens Axboe0f203762020-10-14 09:23:55 -0600949 .work_flags = IO_WQ_WORK_FILES,
Jens Axboe3e4827b2020-01-08 15:18:09 -0700950 },
Pavel Begunkov7d67af22020-02-24 11:32:45 +0300951 [IORING_OP_SPLICE] = {
952 .needs_file = 1,
953 .hash_reg_file = 1,
954 .unbound_nonreg_file = 1,
Jens Axboe0f203762020-10-14 09:23:55 -0600955 .work_flags = IO_WQ_WORK_BLKCG,
Jens Axboeddf0322d2020-02-23 16:41:33 -0700956 },
957 [IORING_OP_PROVIDE_BUFFERS] = {},
Jens Axboe067524e2020-03-02 16:32:28 -0700958 [IORING_OP_REMOVE_BUFFERS] = {},
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +0300959 [IORING_OP_TEE] = {
960 .needs_file = 1,
961 .hash_reg_file = 1,
962 .unbound_nonreg_file = 1,
963 },
Jens Axboe36f4fa62020-09-05 11:14:22 -0600964 [IORING_OP_SHUTDOWN] = {
965 .needs_file = 1,
966 },
Jens Axboe80a261f2020-09-28 14:23:58 -0600967 [IORING_OP_RENAMEAT] = {
968 .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_FILES |
969 IO_WQ_WORK_FS | IO_WQ_WORK_BLKCG,
970 },
Jens Axboe14a11432020-09-28 14:27:37 -0600971 [IORING_OP_UNLINKAT] = {
972 .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_FILES |
973 IO_WQ_WORK_FS | IO_WQ_WORK_BLKCG,
974 },
Jens Axboed3656342019-12-18 09:50:26 -0700975};
976
Bijan Mottahedeh2e0464d2020-06-16 16:36:10 -0700977enum io_mem_account {
978 ACCT_LOCKED,
979 ACCT_PINNED,
980};
981
Pavel Begunkov81b68a52020-07-30 18:43:46 +0300982static void __io_complete_rw(struct io_kiocb *req, long res, long res2,
983 struct io_comp_state *cs);
Jens Axboe78e19bb2019-11-06 15:21:34 -0700984static void io_cqring_fill_event(struct io_kiocb *req, long res);
Jackie Liuec9c02a2019-11-08 23:50:36 +0800985static void io_put_req(struct io_kiocb *req);
Pavel Begunkov216578e2020-10-13 09:44:00 +0100986static void io_put_req_deferred(struct io_kiocb *req, int nr);
Jens Axboec40f6372020-06-25 15:39:59 -0600987static void io_double_put_req(struct io_kiocb *req);
Jens Axboe94ae5e72019-11-14 19:39:52 -0700988static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req);
Jens Axboe7271ef32020-08-10 09:55:22 -0600989static void __io_queue_linked_timeout(struct io_kiocb *req);
Jens Axboe94ae5e72019-11-14 19:39:52 -0700990static void io_queue_linked_timeout(struct io_kiocb *req);
Jens Axboe05f3fb32019-12-09 11:22:50 -0700991static int __io_sqe_files_update(struct io_ring_ctx *ctx,
992 struct io_uring_files_update *ip,
993 unsigned nr_args);
Pavel Begunkov3ca405e2020-07-13 23:37:08 +0300994static void __io_clean_op(struct io_kiocb *req);
Pavel Begunkov8371adf2020-10-10 18:34:08 +0100995static struct file *io_file_get(struct io_submit_state *state,
996 struct io_kiocb *req, int fd, bool fixed);
Pavel Begunkovc1379e22020-09-30 22:57:56 +0300997static void __io_queue_sqe(struct io_kiocb *req, struct io_comp_state *cs);
Jens Axboe4349f302020-07-09 15:07:01 -0600998static void io_file_put_work(struct work_struct *work);
Jens Axboede0617e2019-04-06 21:51:27 -0600999
Jens Axboeb63534c2020-06-04 11:28:00 -06001000static ssize_t io_import_iovec(int rw, struct io_kiocb *req,
1001 struct iovec **iovec, struct iov_iter *iter,
1002 bool needs_lock);
Jens Axboeff6165b2020-08-13 09:47:43 -06001003static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec,
1004 const struct iovec *fast_iov,
Jens Axboe227c0c92020-08-13 11:51:40 -06001005 struct iov_iter *iter, bool force);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001006
1007static struct kmem_cache *req_cachep;
1008
Jens Axboe09186822020-10-13 15:01:40 -06001009static const struct file_operations io_uring_fops;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001010
1011struct sock *io_uring_get_socket(struct file *file)
1012{
1013#if defined(CONFIG_UNIX)
1014 if (file->f_op == &io_uring_fops) {
1015 struct io_ring_ctx *ctx = file->private_data;
1016
1017 return ctx->ring_sock->sk;
1018 }
1019#endif
1020 return NULL;
1021}
1022EXPORT_SYMBOL(io_uring_get_socket);
1023
Pavel Begunkov3ca405e2020-07-13 23:37:08 +03001024static inline void io_clean_op(struct io_kiocb *req)
1025{
Pavel Begunkovbb175342020-08-20 11:33:35 +03001026 if (req->flags & (REQ_F_NEED_CLEANUP | REQ_F_BUFFER_SELECTED |
1027 REQ_F_INFLIGHT))
Pavel Begunkov3ca405e2020-07-13 23:37:08 +03001028 __io_clean_op(req);
1029}
1030
Jens Axboe28cea78a2020-09-14 10:51:17 -06001031static void io_sq_thread_drop_mm_files(void)
Jens Axboec40f6372020-06-25 15:39:59 -06001032{
Jens Axboe28cea78a2020-09-14 10:51:17 -06001033 struct files_struct *files = current->files;
Jens Axboec40f6372020-06-25 15:39:59 -06001034 struct mm_struct *mm = current->mm;
1035
1036 if (mm) {
1037 kthread_unuse_mm(mm);
1038 mmput(mm);
Jens Axboe4b70cf92020-11-02 10:39:05 -07001039 current->mm = NULL;
Jens Axboec40f6372020-06-25 15:39:59 -06001040 }
Jens Axboe28cea78a2020-09-14 10:51:17 -06001041 if (files) {
1042 struct nsproxy *nsproxy = current->nsproxy;
1043
1044 task_lock(current);
1045 current->files = NULL;
1046 current->nsproxy = NULL;
1047 task_unlock(current);
1048 put_files_struct(files);
1049 put_nsproxy(nsproxy);
1050 }
1051}
1052
1053static void __io_sq_thread_acquire_files(struct io_ring_ctx *ctx)
1054{
1055 if (!current->files) {
1056 struct files_struct *files;
1057 struct nsproxy *nsproxy;
1058
1059 task_lock(ctx->sqo_task);
1060 files = ctx->sqo_task->files;
1061 if (!files) {
1062 task_unlock(ctx->sqo_task);
1063 return;
1064 }
1065 atomic_inc(&files->count);
1066 get_nsproxy(ctx->sqo_task->nsproxy);
1067 nsproxy = ctx->sqo_task->nsproxy;
1068 task_unlock(ctx->sqo_task);
1069
1070 task_lock(current);
1071 current->files = files;
1072 current->nsproxy = nsproxy;
1073 task_unlock(current);
1074 }
Jens Axboec40f6372020-06-25 15:39:59 -06001075}
1076
1077static int __io_sq_thread_acquire_mm(struct io_ring_ctx *ctx)
1078{
Jens Axboe4b70cf92020-11-02 10:39:05 -07001079 struct mm_struct *mm;
1080
1081 if (current->mm)
1082 return 0;
1083
1084 /* Should never happen */
1085 if (unlikely(!(ctx->flags & IORING_SETUP_SQPOLL)))
1086 return -EFAULT;
1087
1088 task_lock(ctx->sqo_task);
1089 mm = ctx->sqo_task->mm;
1090 if (unlikely(!mm || !mmget_not_zero(mm)))
1091 mm = NULL;
1092 task_unlock(ctx->sqo_task);
1093
1094 if (mm) {
1095 kthread_use_mm(mm);
1096 return 0;
Jens Axboec40f6372020-06-25 15:39:59 -06001097 }
1098
Jens Axboe4b70cf92020-11-02 10:39:05 -07001099 return -EFAULT;
Jens Axboec40f6372020-06-25 15:39:59 -06001100}
1101
Jens Axboe28cea78a2020-09-14 10:51:17 -06001102static int io_sq_thread_acquire_mm_files(struct io_ring_ctx *ctx,
1103 struct io_kiocb *req)
Jens Axboec40f6372020-06-25 15:39:59 -06001104{
Jens Axboe28cea78a2020-09-14 10:51:17 -06001105 const struct io_op_def *def = &io_op_defs[req->opcode];
1106
1107 if (def->work_flags & IO_WQ_WORK_MM) {
1108 int ret = __io_sq_thread_acquire_mm(ctx);
1109 if (unlikely(ret))
1110 return ret;
1111 }
1112
1113 if (def->needs_file || (def->work_flags & IO_WQ_WORK_FILES))
1114 __io_sq_thread_acquire_files(ctx);
1115
1116 return 0;
Jens Axboec40f6372020-06-25 15:39:59 -06001117}
1118
Dennis Zhou91d8f512020-09-16 13:41:05 -07001119static void io_sq_thread_associate_blkcg(struct io_ring_ctx *ctx,
1120 struct cgroup_subsys_state **cur_css)
1121
1122{
1123#ifdef CONFIG_BLK_CGROUP
1124 /* puts the old one when swapping */
1125 if (*cur_css != ctx->sqo_blkcg_css) {
1126 kthread_associate_blkcg(ctx->sqo_blkcg_css);
1127 *cur_css = ctx->sqo_blkcg_css;
1128 }
1129#endif
1130}
1131
1132static void io_sq_thread_unassociate_blkcg(void)
1133{
1134#ifdef CONFIG_BLK_CGROUP
1135 kthread_associate_blkcg(NULL);
1136#endif
1137}
1138
Jens Axboec40f6372020-06-25 15:39:59 -06001139static inline void req_set_fail_links(struct io_kiocb *req)
1140{
1141 if ((req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) == REQ_F_LINK)
1142 req->flags |= REQ_F_FAIL_LINK;
1143}
Jens Axboe4a38aed22020-05-14 17:21:15 -06001144
Xiaoguang Wang7cdaf582020-06-10 19:41:19 +08001145/*
Jens Axboe1e6fa522020-10-15 08:46:24 -06001146 * None of these are dereferenced, they are simply used to check if any of
1147 * them have changed. If we're under current and check they are still the
1148 * same, we're fine to grab references to them for actual out-of-line use.
1149 */
1150static void io_init_identity(struct io_identity *id)
1151{
1152 id->files = current->files;
1153 id->mm = current->mm;
1154#ifdef CONFIG_BLK_CGROUP
1155 rcu_read_lock();
1156 id->blkcg_css = blkcg_css();
1157 rcu_read_unlock();
1158#endif
1159 id->creds = current_cred();
1160 id->nsproxy = current->nsproxy;
1161 id->fs = current->fs;
1162 id->fsize = rlimit(RLIMIT_FSIZE);
Jens Axboe4ea33a92020-10-15 13:46:44 -06001163#ifdef CONFIG_AUDIT
1164 id->loginuid = current->loginuid;
1165 id->sessionid = current->sessionid;
1166#endif
Jens Axboe1e6fa522020-10-15 08:46:24 -06001167 refcount_set(&id->count, 1);
1168}
1169
Pavel Begunkovec99ca62020-10-18 10:17:38 +01001170static inline void __io_req_init_async(struct io_kiocb *req)
1171{
1172 memset(&req->work, 0, sizeof(req->work));
1173 req->flags |= REQ_F_WORK_INITIALIZED;
1174}
1175
Jens Axboe1e6fa522020-10-15 08:46:24 -06001176/*
Xiaoguang Wang7cdaf582020-06-10 19:41:19 +08001177 * Note: must call io_req_init_async() for the first time you
1178 * touch any members of io_wq_work.
1179 */
1180static inline void io_req_init_async(struct io_kiocb *req)
1181{
Jens Axboe500a3732020-10-15 17:38:03 -06001182 struct io_uring_task *tctx = current->io_uring;
1183
Xiaoguang Wang7cdaf582020-06-10 19:41:19 +08001184 if (req->flags & REQ_F_WORK_INITIALIZED)
1185 return;
1186
Pavel Begunkovec99ca62020-10-18 10:17:38 +01001187 __io_req_init_async(req);
Jens Axboe500a3732020-10-15 17:38:03 -06001188
1189 /* Grab a ref if this isn't our static identity */
1190 req->work.identity = tctx->identity;
1191 if (tctx->identity != &tctx->__identity)
1192 refcount_inc(&req->work.identity->count);
Xiaoguang Wang7cdaf582020-06-10 19:41:19 +08001193}
1194
Pavel Begunkov0cdaf762020-05-17 14:13:40 +03001195static inline bool io_async_submit(struct io_ring_ctx *ctx)
1196{
1197 return ctx->flags & IORING_SETUP_SQPOLL;
1198}
1199
Jens Axboe2b188cc2019-01-07 10:46:33 -07001200static void io_ring_ctx_ref_free(struct percpu_ref *ref)
1201{
1202 struct io_ring_ctx *ctx = container_of(ref, struct io_ring_ctx, refs);
1203
Jens Axboe0f158b42020-05-14 17:18:39 -06001204 complete(&ctx->ref_comp);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001205}
1206
Pavel Begunkov8eb7e2d2020-06-29 13:13:02 +03001207static inline bool io_is_timeout_noseq(struct io_kiocb *req)
1208{
1209 return !req->timeout.off;
1210}
1211
Jens Axboe2b188cc2019-01-07 10:46:33 -07001212static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
1213{
1214 struct io_ring_ctx *ctx;
Jens Axboe78076bb2019-12-04 19:56:40 -07001215 int hash_bits;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001216
1217 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
1218 if (!ctx)
1219 return NULL;
1220
Jens Axboe0ddf92e2019-11-08 08:52:53 -07001221 ctx->fallback_req = kmem_cache_alloc(req_cachep, GFP_KERNEL);
1222 if (!ctx->fallback_req)
1223 goto err;
1224
Jens Axboe78076bb2019-12-04 19:56:40 -07001225 /*
1226 * Use 5 bits less than the max cq entries, that should give us around
1227 * 32 entries per hash list if totally full and uniformly spread.
1228 */
1229 hash_bits = ilog2(p->cq_entries);
1230 hash_bits -= 5;
1231 if (hash_bits <= 0)
1232 hash_bits = 1;
1233 ctx->cancel_hash_bits = hash_bits;
1234 ctx->cancel_hash = kmalloc((1U << hash_bits) * sizeof(struct hlist_head),
1235 GFP_KERNEL);
1236 if (!ctx->cancel_hash)
1237 goto err;
1238 __hash_init(ctx->cancel_hash, 1U << hash_bits);
1239
Roman Gushchin21482892019-05-07 10:01:48 -07001240 if (percpu_ref_init(&ctx->refs, io_ring_ctx_ref_free,
Jens Axboe206aefd2019-11-07 18:27:42 -07001241 PERCPU_REF_ALLOW_REINIT, GFP_KERNEL))
1242 goto err;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001243
1244 ctx->flags = p->flags;
Jens Axboe90554202020-09-03 12:12:41 -06001245 init_waitqueue_head(&ctx->sqo_sq_wait);
Jens Axboe69fb2132020-09-14 11:16:23 -06001246 INIT_LIST_HEAD(&ctx->sqd_list);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001247 init_waitqueue_head(&ctx->cq_wait);
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001248 INIT_LIST_HEAD(&ctx->cq_overflow_list);
Jens Axboe0f158b42020-05-14 17:18:39 -06001249 init_completion(&ctx->ref_comp);
1250 init_completion(&ctx->sq_thread_comp);
Jens Axboe5a2e7452020-02-23 16:23:11 -07001251 idr_init(&ctx->io_buffer_idr);
Jens Axboe071698e2020-01-28 10:04:42 -07001252 idr_init(&ctx->personality_idr);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001253 mutex_init(&ctx->uring_lock);
1254 init_waitqueue_head(&ctx->wait);
1255 spin_lock_init(&ctx->completion_lock);
Pavel Begunkov540e32a2020-07-13 23:37:09 +03001256 INIT_LIST_HEAD(&ctx->iopoll_list);
Jens Axboede0617e2019-04-06 21:51:27 -06001257 INIT_LIST_HEAD(&ctx->defer_list);
Jens Axboe5262f562019-09-17 12:26:57 -06001258 INIT_LIST_HEAD(&ctx->timeout_list);
Jens Axboefcb323c2019-10-24 12:39:47 -06001259 init_waitqueue_head(&ctx->inflight_wait);
1260 spin_lock_init(&ctx->inflight_lock);
1261 INIT_LIST_HEAD(&ctx->inflight_list);
Jens Axboe4a38aed22020-05-14 17:21:15 -06001262 INIT_DELAYED_WORK(&ctx->file_put_work, io_file_put_work);
1263 init_llist_head(&ctx->file_put_llist);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001264 return ctx;
Jens Axboe206aefd2019-11-07 18:27:42 -07001265err:
Jens Axboe0ddf92e2019-11-08 08:52:53 -07001266 if (ctx->fallback_req)
1267 kmem_cache_free(req_cachep, ctx->fallback_req);
Jens Axboe78076bb2019-12-04 19:56:40 -07001268 kfree(ctx->cancel_hash);
Jens Axboe206aefd2019-11-07 18:27:42 -07001269 kfree(ctx);
1270 return NULL;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001271}
1272
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03001273static bool req_need_defer(struct io_kiocb *req, u32 seq)
Jens Axboede0617e2019-04-06 21:51:27 -06001274{
Jens Axboe2bc99302020-07-09 09:43:27 -06001275 if (unlikely(req->flags & REQ_F_IO_DRAIN)) {
1276 struct io_ring_ctx *ctx = req->ctx;
Jackie Liua197f662019-11-08 08:09:12 -07001277
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03001278 return seq != ctx->cached_cq_tail
Pavel Begunkov2c3bac6d2020-10-18 10:17:40 +01001279 + READ_ONCE(ctx->cached_cq_overflow);
Jens Axboe2bc99302020-07-09 09:43:27 -06001280 }
Jens Axboe7adf4ea2019-10-10 21:42:58 -06001281
Bob Liu9d858b22019-11-13 18:06:25 +08001282 return false;
Jens Axboe7adf4ea2019-10-10 21:42:58 -06001283}
1284
Jens Axboede0617e2019-04-06 21:51:27 -06001285static void __io_commit_cqring(struct io_ring_ctx *ctx)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001286{
Hristo Venev75b28af2019-08-26 17:23:46 +00001287 struct io_rings *rings = ctx->rings;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001288
Pavel Begunkov07910152020-01-17 03:52:46 +03001289 /* order cqe stores with ring update */
1290 smp_store_release(&rings->cq.tail, ctx->cached_cq_tail);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001291
Pavel Begunkov07910152020-01-17 03:52:46 +03001292 if (wq_has_sleeper(&ctx->cq_wait)) {
1293 wake_up_interruptible(&ctx->cq_wait);
1294 kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001295 }
1296}
1297
Jens Axboe5c3462c2020-10-15 09:02:33 -06001298static void io_put_identity(struct io_uring_task *tctx, struct io_kiocb *req)
Jens Axboe1e6fa522020-10-15 08:46:24 -06001299{
Jens Axboe500a3732020-10-15 17:38:03 -06001300 if (req->work.identity == &tctx->__identity)
Jens Axboe1e6fa522020-10-15 08:46:24 -06001301 return;
1302 if (refcount_dec_and_test(&req->work.identity->count))
1303 kfree(req->work.identity);
1304}
1305
Pavel Begunkov4edf20f2020-10-13 09:43:59 +01001306static void io_req_clean_work(struct io_kiocb *req)
Jens Axboecccf0ee2020-01-27 16:34:48 -07001307{
Xiaoguang Wang7cdaf582020-06-10 19:41:19 +08001308 if (!(req->flags & REQ_F_WORK_INITIALIZED))
Pavel Begunkov4edf20f2020-10-13 09:43:59 +01001309 return;
Jens Axboe51a4cc12020-08-10 10:55:56 -06001310
1311 req->flags &= ~REQ_F_WORK_INITIALIZED;
Xiaoguang Wang7cdaf582020-06-10 19:41:19 +08001312
Jens Axboedfead8a2020-10-14 10:12:37 -06001313 if (req->work.flags & IO_WQ_WORK_MM) {
Jens Axboe98447d62020-10-14 10:48:51 -06001314 mmdrop(req->work.identity->mm);
Jens Axboedfead8a2020-10-14 10:12:37 -06001315 req->work.flags &= ~IO_WQ_WORK_MM;
Jens Axboecccf0ee2020-01-27 16:34:48 -07001316 }
Dennis Zhou91d8f512020-09-16 13:41:05 -07001317#ifdef CONFIG_BLK_CGROUP
Jens Axboedfead8a2020-10-14 10:12:37 -06001318 if (req->work.flags & IO_WQ_WORK_BLKCG) {
Jens Axboe98447d62020-10-14 10:48:51 -06001319 css_put(req->work.identity->blkcg_css);
Jens Axboedfead8a2020-10-14 10:12:37 -06001320 req->work.flags &= ~IO_WQ_WORK_BLKCG;
Jens Axboecccf0ee2020-01-27 16:34:48 -07001321 }
Jens Axboedfead8a2020-10-14 10:12:37 -06001322#endif
1323 if (req->work.flags & IO_WQ_WORK_CREDS) {
Jens Axboe98447d62020-10-14 10:48:51 -06001324 put_cred(req->work.identity->creds);
Jens Axboedfead8a2020-10-14 10:12:37 -06001325 req->work.flags &= ~IO_WQ_WORK_CREDS;
1326 }
1327 if (req->work.flags & IO_WQ_WORK_FS) {
Jens Axboe98447d62020-10-14 10:48:51 -06001328 struct fs_struct *fs = req->work.identity->fs;
Jens Axboeff002b32020-02-07 16:05:21 -07001329
Jens Axboe98447d62020-10-14 10:48:51 -06001330 spin_lock(&req->work.identity->fs->lock);
Jens Axboeff002b32020-02-07 16:05:21 -07001331 if (--fs->users)
1332 fs = NULL;
Jens Axboe98447d62020-10-14 10:48:51 -06001333 spin_unlock(&req->work.identity->fs->lock);
Jens Axboeff002b32020-02-07 16:05:21 -07001334 if (fs)
1335 free_fs_struct(fs);
Jens Axboedfead8a2020-10-14 10:12:37 -06001336 req->work.flags &= ~IO_WQ_WORK_FS;
Jens Axboeff002b32020-02-07 16:05:21 -07001337 }
Jens Axboe51a4cc12020-08-10 10:55:56 -06001338
Jens Axboe5c3462c2020-10-15 09:02:33 -06001339 io_put_identity(req->task->io_uring, req);
Jens Axboe1e6fa522020-10-15 08:46:24 -06001340}
1341
1342/*
1343 * Create a private copy of io_identity, since some fields don't match
1344 * the current context.
1345 */
1346static bool io_identity_cow(struct io_kiocb *req)
1347{
Jens Axboe5c3462c2020-10-15 09:02:33 -06001348 struct io_uring_task *tctx = current->io_uring;
Jens Axboe1e6fa522020-10-15 08:46:24 -06001349 const struct cred *creds = NULL;
1350 struct io_identity *id;
1351
1352 if (req->work.flags & IO_WQ_WORK_CREDS)
1353 creds = req->work.identity->creds;
1354
1355 id = kmemdup(req->work.identity, sizeof(*id), GFP_KERNEL);
1356 if (unlikely(!id)) {
1357 req->work.flags |= IO_WQ_WORK_CANCEL;
1358 return false;
1359 }
1360
1361 /*
1362 * We can safely just re-init the creds we copied Either the field
1363 * matches the current one, or we haven't grabbed it yet. The only
1364 * exception is ->creds, through registered personalities, so handle
1365 * that one separately.
1366 */
1367 io_init_identity(id);
1368 if (creds)
1369 req->work.identity->creds = creds;
1370
1371 /* add one for this request */
1372 refcount_inc(&id->count);
1373
Jens Axboecb8a8ae2020-11-03 12:19:07 -07001374 /* drop tctx and req identity references, if needed */
1375 if (tctx->identity != &tctx->__identity &&
1376 refcount_dec_and_test(&tctx->identity->count))
1377 kfree(tctx->identity);
1378 if (req->work.identity != &tctx->__identity &&
1379 refcount_dec_and_test(&req->work.identity->count))
Jens Axboe1e6fa522020-10-15 08:46:24 -06001380 kfree(req->work.identity);
1381
1382 req->work.identity = id;
Jens Axboe500a3732020-10-15 17:38:03 -06001383 tctx->identity = id;
Jens Axboe1e6fa522020-10-15 08:46:24 -06001384 return true;
1385}
1386
1387static bool io_grab_identity(struct io_kiocb *req)
1388{
1389 const struct io_op_def *def = &io_op_defs[req->opcode];
Jens Axboe5c3462c2020-10-15 09:02:33 -06001390 struct io_identity *id = req->work.identity;
Jens Axboe1e6fa522020-10-15 08:46:24 -06001391 struct io_ring_ctx *ctx = req->ctx;
1392
Jens Axboe69228332020-10-20 14:28:41 -06001393 if (def->work_flags & IO_WQ_WORK_FSIZE) {
1394 if (id->fsize != rlimit(RLIMIT_FSIZE))
1395 return false;
1396 req->work.flags |= IO_WQ_WORK_FSIZE;
1397 }
Jens Axboe1e6fa522020-10-15 08:46:24 -06001398
1399 if (!(req->work.flags & IO_WQ_WORK_FILES) &&
1400 (def->work_flags & IO_WQ_WORK_FILES) &&
1401 !(req->flags & REQ_F_NO_FILE_TABLE)) {
1402 if (id->files != current->files ||
1403 id->nsproxy != current->nsproxy)
1404 return false;
1405 atomic_inc(&id->files->count);
1406 get_nsproxy(id->nsproxy);
1407 req->flags |= REQ_F_INFLIGHT;
1408
1409 spin_lock_irq(&ctx->inflight_lock);
1410 list_add(&req->inflight_entry, &ctx->inflight_list);
1411 spin_unlock_irq(&ctx->inflight_lock);
1412 req->work.flags |= IO_WQ_WORK_FILES;
1413 }
1414#ifdef CONFIG_BLK_CGROUP
1415 if (!(req->work.flags & IO_WQ_WORK_BLKCG) &&
1416 (def->work_flags & IO_WQ_WORK_BLKCG)) {
1417 rcu_read_lock();
1418 if (id->blkcg_css != blkcg_css()) {
1419 rcu_read_unlock();
1420 return false;
1421 }
1422 /*
1423 * This should be rare, either the cgroup is dying or the task
1424 * is moving cgroups. Just punt to root for the handful of ios.
1425 */
1426 if (css_tryget_online(id->blkcg_css))
1427 req->work.flags |= IO_WQ_WORK_BLKCG;
1428 rcu_read_unlock();
1429 }
1430#endif
1431 if (!(req->work.flags & IO_WQ_WORK_CREDS)) {
1432 if (id->creds != current_cred())
1433 return false;
1434 get_cred(id->creds);
1435 req->work.flags |= IO_WQ_WORK_CREDS;
1436 }
Jens Axboe4ea33a92020-10-15 13:46:44 -06001437#ifdef CONFIG_AUDIT
1438 if (!uid_eq(current->loginuid, id->loginuid) ||
1439 current->sessionid != id->sessionid)
1440 return false;
1441#endif
Jens Axboe1e6fa522020-10-15 08:46:24 -06001442 if (!(req->work.flags & IO_WQ_WORK_FS) &&
1443 (def->work_flags & IO_WQ_WORK_FS)) {
1444 if (current->fs != id->fs)
1445 return false;
1446 spin_lock(&id->fs->lock);
1447 if (!id->fs->in_exec) {
1448 id->fs->users++;
1449 req->work.flags |= IO_WQ_WORK_FS;
1450 } else {
1451 req->work.flags |= IO_WQ_WORK_CANCEL;
1452 }
1453 spin_unlock(&current->fs->lock);
1454 }
1455
1456 return true;
Jens Axboe561fb042019-10-24 07:25:42 -06001457}
1458
Pavel Begunkovcbdcb432020-06-29 19:18:43 +03001459static void io_prep_async_work(struct io_kiocb *req)
Jens Axboe561fb042019-10-24 07:25:42 -06001460{
Jens Axboed3656342019-12-18 09:50:26 -07001461 const struct io_op_def *def = &io_op_defs[req->opcode];
Pavel Begunkov23329512020-10-10 18:34:06 +01001462 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe5c3462c2020-10-15 09:02:33 -06001463 struct io_identity *id;
Jens Axboe54a91f32019-09-10 09:15:04 -06001464
Pavel Begunkov16d59802020-07-12 16:16:47 +03001465 io_req_init_async(req);
Jens Axboe5c3462c2020-10-15 09:02:33 -06001466 id = req->work.identity;
Pavel Begunkov16d59802020-07-12 16:16:47 +03001467
Pavel Begunkovfeaadc42020-10-22 16:47:16 +01001468 if (req->flags & REQ_F_FORCE_ASYNC)
1469 req->work.flags |= IO_WQ_WORK_CONCURRENT;
1470
Jens Axboed3656342019-12-18 09:50:26 -07001471 if (req->flags & REQ_F_ISREG) {
Pavel Begunkov23329512020-10-10 18:34:06 +01001472 if (def->hash_reg_file || (ctx->flags & IORING_SETUP_IOPOLL))
Pavel Begunkov8766dd52020-03-14 00:31:04 +03001473 io_wq_hash_work(&req->work, file_inode(req->file));
Jens Axboed3656342019-12-18 09:50:26 -07001474 } else {
1475 if (def->unbound_nonreg_file)
Jens Axboe3529d8c2019-12-19 18:24:38 -07001476 req->work.flags |= IO_WQ_WORK_UNBOUND;
Jens Axboe54a91f32019-09-10 09:15:04 -06001477 }
Pavel Begunkov23329512020-10-10 18:34:06 +01001478
Jens Axboe1e6fa522020-10-15 08:46:24 -06001479 /* ->mm can never change on us */
Jens Axboedfead8a2020-10-14 10:12:37 -06001480 if (!(req->work.flags & IO_WQ_WORK_MM) &&
1481 (def->work_flags & IO_WQ_WORK_MM)) {
Jens Axboe1e6fa522020-10-15 08:46:24 -06001482 mmgrab(id->mm);
Jens Axboedfead8a2020-10-14 10:12:37 -06001483 req->work.flags |= IO_WQ_WORK_MM;
Pavel Begunkov23329512020-10-10 18:34:06 +01001484 }
Jens Axboe1e6fa522020-10-15 08:46:24 -06001485
1486 /* if we fail grabbing identity, we must COW, regrab, and retry */
1487 if (io_grab_identity(req))
1488 return;
1489
1490 if (!io_identity_cow(req))
1491 return;
1492
1493 /* can't fail at this point */
1494 if (!io_grab_identity(req))
1495 WARN_ON(1);
Jens Axboe561fb042019-10-24 07:25:42 -06001496}
1497
Pavel Begunkovcbdcb432020-06-29 19:18:43 +03001498static void io_prep_async_link(struct io_kiocb *req)
1499{
1500 struct io_kiocb *cur;
1501
1502 io_prep_async_work(req);
1503 if (req->flags & REQ_F_LINK_HEAD)
1504 list_for_each_entry(cur, &req->link_list, link_list)
1505 io_prep_async_work(cur);
1506}
1507
Jens Axboe7271ef32020-08-10 09:55:22 -06001508static struct io_kiocb *__io_queue_async_work(struct io_kiocb *req)
Jens Axboe561fb042019-10-24 07:25:42 -06001509{
Jackie Liua197f662019-11-08 08:09:12 -07001510 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkovcbdcb432020-06-29 19:18:43 +03001511 struct io_kiocb *link = io_prep_linked_timeout(req);
Jens Axboe561fb042019-10-24 07:25:42 -06001512
Pavel Begunkov8766dd52020-03-14 00:31:04 +03001513 trace_io_uring_queue_async_work(ctx, io_wq_is_hashed(&req->work), req,
1514 &req->work, req->flags);
1515 io_wq_enqueue(ctx->io_wq, &req->work);
Jens Axboe7271ef32020-08-10 09:55:22 -06001516 return link;
Jens Axboe18d9be12019-09-10 09:13:05 -06001517}
1518
Pavel Begunkovcbdcb432020-06-29 19:18:43 +03001519static void io_queue_async_work(struct io_kiocb *req)
1520{
Jens Axboe7271ef32020-08-10 09:55:22 -06001521 struct io_kiocb *link;
1522
Pavel Begunkovcbdcb432020-06-29 19:18:43 +03001523 /* init ->work of the whole link before punting */
1524 io_prep_async_link(req);
Jens Axboe7271ef32020-08-10 09:55:22 -06001525 link = __io_queue_async_work(req);
1526
1527 if (link)
1528 io_queue_linked_timeout(link);
Pavel Begunkovcbdcb432020-06-29 19:18:43 +03001529}
1530
Jens Axboe5262f562019-09-17 12:26:57 -06001531static void io_kill_timeout(struct io_kiocb *req)
1532{
Jens Axboee8c2bc12020-08-15 18:44:09 -07001533 struct io_timeout_data *io = req->async_data;
Jens Axboe5262f562019-09-17 12:26:57 -06001534 int ret;
1535
Jens Axboee8c2bc12020-08-15 18:44:09 -07001536 ret = hrtimer_try_to_cancel(&io->timer);
Jens Axboe5262f562019-09-17 12:26:57 -06001537 if (ret != -1) {
Pavel Begunkov01cec8c2020-07-30 18:43:50 +03001538 atomic_set(&req->ctx->cq_timeouts,
1539 atomic_read(&req->ctx->cq_timeouts) + 1);
Pavel Begunkov135fcde2020-07-13 23:37:12 +03001540 list_del_init(&req->timeout.list);
Jens Axboe78e19bb2019-11-06 15:21:34 -07001541 io_cqring_fill_event(req, 0);
Pavel Begunkov216578e2020-10-13 09:44:00 +01001542 io_put_req_deferred(req, 1);
Jens Axboe5262f562019-09-17 12:26:57 -06001543 }
1544}
1545
Jens Axboef3606e32020-09-22 08:18:24 -06001546static bool io_task_match(struct io_kiocb *req, struct task_struct *tsk)
1547{
1548 struct io_ring_ctx *ctx = req->ctx;
1549
1550 if (!tsk || req->task == tsk)
1551 return true;
Jens Axboe534ca6d2020-09-02 13:52:19 -06001552 if (ctx->flags & IORING_SETUP_SQPOLL) {
1553 if (ctx->sq_data && req->task == ctx->sq_data->thread)
1554 return true;
1555 }
Jens Axboef3606e32020-09-22 08:18:24 -06001556 return false;
1557}
1558
Jens Axboe76e1b642020-09-26 15:05:03 -06001559/*
1560 * Returns true if we found and killed one or more timeouts
1561 */
1562static bool io_kill_timeouts(struct io_ring_ctx *ctx, struct task_struct *tsk)
Jens Axboe5262f562019-09-17 12:26:57 -06001563{
1564 struct io_kiocb *req, *tmp;
Jens Axboe76e1b642020-09-26 15:05:03 -06001565 int canceled = 0;
Jens Axboe5262f562019-09-17 12:26:57 -06001566
1567 spin_lock_irq(&ctx->completion_lock);
Jens Axboef3606e32020-09-22 08:18:24 -06001568 list_for_each_entry_safe(req, tmp, &ctx->timeout_list, timeout.list) {
Jens Axboe76e1b642020-09-26 15:05:03 -06001569 if (io_task_match(req, tsk)) {
Jens Axboef3606e32020-09-22 08:18:24 -06001570 io_kill_timeout(req);
Jens Axboe76e1b642020-09-26 15:05:03 -06001571 canceled++;
1572 }
Jens Axboef3606e32020-09-22 08:18:24 -06001573 }
Jens Axboe5262f562019-09-17 12:26:57 -06001574 spin_unlock_irq(&ctx->completion_lock);
Jens Axboe76e1b642020-09-26 15:05:03 -06001575 return canceled != 0;
Jens Axboe5262f562019-09-17 12:26:57 -06001576}
1577
Pavel Begunkov04518942020-05-26 20:34:05 +03001578static void __io_queue_deferred(struct io_ring_ctx *ctx)
1579{
1580 do {
Pavel Begunkov27dc8332020-07-13 23:37:14 +03001581 struct io_defer_entry *de = list_first_entry(&ctx->defer_list,
1582 struct io_defer_entry, list);
Jens Axboe7271ef32020-08-10 09:55:22 -06001583 struct io_kiocb *link;
Pavel Begunkov04518942020-05-26 20:34:05 +03001584
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03001585 if (req_need_defer(de->req, de->seq))
Pavel Begunkov04518942020-05-26 20:34:05 +03001586 break;
Pavel Begunkov27dc8332020-07-13 23:37:14 +03001587 list_del_init(&de->list);
Pavel Begunkovcbdcb432020-06-29 19:18:43 +03001588 /* punt-init is done before queueing for defer */
Jens Axboe7271ef32020-08-10 09:55:22 -06001589 link = __io_queue_async_work(de->req);
1590 if (link) {
1591 __io_queue_linked_timeout(link);
1592 /* drop submission reference */
Pavel Begunkov216578e2020-10-13 09:44:00 +01001593 io_put_req_deferred(link, 1);
Jens Axboe7271ef32020-08-10 09:55:22 -06001594 }
Pavel Begunkov27dc8332020-07-13 23:37:14 +03001595 kfree(de);
Pavel Begunkov04518942020-05-26 20:34:05 +03001596 } while (!list_empty(&ctx->defer_list));
1597}
1598
Pavel Begunkov360428f2020-05-30 14:54:17 +03001599static void io_flush_timeouts(struct io_ring_ctx *ctx)
1600{
1601 while (!list_empty(&ctx->timeout_list)) {
1602 struct io_kiocb *req = list_first_entry(&ctx->timeout_list,
Pavel Begunkov135fcde2020-07-13 23:37:12 +03001603 struct io_kiocb, timeout.list);
Pavel Begunkov360428f2020-05-30 14:54:17 +03001604
Pavel Begunkov8eb7e2d2020-06-29 13:13:02 +03001605 if (io_is_timeout_noseq(req))
Pavel Begunkov360428f2020-05-30 14:54:17 +03001606 break;
Pavel Begunkovbfe68a22020-05-30 14:54:18 +03001607 if (req->timeout.target_seq != ctx->cached_cq_tail
1608 - atomic_read(&ctx->cq_timeouts))
Pavel Begunkov360428f2020-05-30 14:54:17 +03001609 break;
Pavel Begunkovbfe68a22020-05-30 14:54:18 +03001610
Pavel Begunkov135fcde2020-07-13 23:37:12 +03001611 list_del_init(&req->timeout.list);
Pavel Begunkov360428f2020-05-30 14:54:17 +03001612 io_kill_timeout(req);
1613 }
1614}
1615
Jens Axboede0617e2019-04-06 21:51:27 -06001616static void io_commit_cqring(struct io_ring_ctx *ctx)
1617{
Pavel Begunkov360428f2020-05-30 14:54:17 +03001618 io_flush_timeouts(ctx);
Jens Axboede0617e2019-04-06 21:51:27 -06001619 __io_commit_cqring(ctx);
1620
Pavel Begunkov04518942020-05-26 20:34:05 +03001621 if (unlikely(!list_empty(&ctx->defer_list)))
1622 __io_queue_deferred(ctx);
Jens Axboede0617e2019-04-06 21:51:27 -06001623}
1624
Jens Axboe90554202020-09-03 12:12:41 -06001625static inline bool io_sqring_full(struct io_ring_ctx *ctx)
1626{
1627 struct io_rings *r = ctx->rings;
1628
1629 return READ_ONCE(r->sq.tail) - ctx->cached_sq_head == r->sq_ring_entries;
1630}
1631
Jens Axboe2b188cc2019-01-07 10:46:33 -07001632static struct io_uring_cqe *io_get_cqring(struct io_ring_ctx *ctx)
1633{
Hristo Venev75b28af2019-08-26 17:23:46 +00001634 struct io_rings *rings = ctx->rings;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001635 unsigned tail;
1636
1637 tail = ctx->cached_cq_tail;
Stefan Bühler115e12e2019-04-24 23:54:18 +02001638 /*
1639 * writes to the cq entry need to come after reading head; the
1640 * control dependency is enough as we're using WRITE_ONCE to
1641 * fill the cq entry
1642 */
Hristo Venev75b28af2019-08-26 17:23:46 +00001643 if (tail - READ_ONCE(rings->cq.head) == rings->cq_ring_entries)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001644 return NULL;
1645
1646 ctx->cached_cq_tail++;
Hristo Venev75b28af2019-08-26 17:23:46 +00001647 return &rings->cqes[tail & ctx->cq_mask];
Jens Axboe2b188cc2019-01-07 10:46:33 -07001648}
1649
Jens Axboef2842ab2020-01-08 11:04:00 -07001650static inline bool io_should_trigger_evfd(struct io_ring_ctx *ctx)
1651{
Jens Axboef0b493e2020-02-01 21:30:11 -07001652 if (!ctx->cq_ev_fd)
1653 return false;
Stefano Garzarella7e55a192020-05-15 18:38:05 +02001654 if (READ_ONCE(ctx->rings->cq_flags) & IORING_CQ_EVENTFD_DISABLED)
1655 return false;
Jens Axboef2842ab2020-01-08 11:04:00 -07001656 if (!ctx->eventfd_async)
1657 return true;
Jens Axboeb41e9852020-02-17 09:52:41 -07001658 return io_wq_current_is_worker();
Jens Axboef2842ab2020-01-08 11:04:00 -07001659}
1660
Jens Axboeb41e9852020-02-17 09:52:41 -07001661static void io_cqring_ev_posted(struct io_ring_ctx *ctx)
Jens Axboe8c838782019-03-12 15:48:16 -06001662{
1663 if (waitqueue_active(&ctx->wait))
1664 wake_up(&ctx->wait);
Jens Axboe534ca6d2020-09-02 13:52:19 -06001665 if (ctx->sq_data && waitqueue_active(&ctx->sq_data->wait))
1666 wake_up(&ctx->sq_data->wait);
Jens Axboeb41e9852020-02-17 09:52:41 -07001667 if (io_should_trigger_evfd(ctx))
Jens Axboe9b402842019-04-11 11:45:41 -06001668 eventfd_signal(ctx->cq_ev_fd, 1);
Jens Axboe8c838782019-03-12 15:48:16 -06001669}
1670
Pavel Begunkov46930142020-07-30 18:43:49 +03001671static void io_cqring_mark_overflow(struct io_ring_ctx *ctx)
1672{
1673 if (list_empty(&ctx->cq_overflow_list)) {
1674 clear_bit(0, &ctx->sq_check_overflow);
1675 clear_bit(0, &ctx->cq_check_overflow);
1676 ctx->rings->sq_flags &= ~IORING_SQ_CQ_OVERFLOW;
1677 }
1678}
1679
Pavel Begunkov99b32802020-11-04 13:39:31 +00001680static inline bool __io_match_files(struct io_kiocb *req,
1681 struct files_struct *files)
Jens Axboee6c8aa92020-09-28 13:10:13 -06001682{
Pavel Begunkov99b32802020-11-04 13:39:31 +00001683 return ((req->flags & REQ_F_WORK_INITIALIZED) &&
1684 (req->work.flags & IO_WQ_WORK_FILES)) &&
1685 req->work.identity->files == files;
1686}
1687
1688static bool io_match_files(struct io_kiocb *req,
1689 struct files_struct *files)
1690{
1691 struct io_kiocb *link;
1692
Jens Axboee6c8aa92020-09-28 13:10:13 -06001693 if (!files)
1694 return true;
Pavel Begunkov99b32802020-11-04 13:39:31 +00001695 if (__io_match_files(req, files))
1696 return true;
1697 if (req->flags & REQ_F_LINK_HEAD) {
1698 list_for_each_entry(link, &req->link_list, link_list) {
1699 if (__io_match_files(link, files))
1700 return true;
1701 }
1702 }
Jens Axboee6c8aa92020-09-28 13:10:13 -06001703 return false;
1704}
1705
Jens Axboec4a2ed72019-11-21 21:01:26 -07001706/* Returns true if there are no backlogged entries after the flush */
Jens Axboee6c8aa92020-09-28 13:10:13 -06001707static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force,
1708 struct task_struct *tsk,
1709 struct files_struct *files)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001710{
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001711 struct io_rings *rings = ctx->rings;
Jens Axboee6c8aa92020-09-28 13:10:13 -06001712 struct io_kiocb *req, *tmp;
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001713 struct io_uring_cqe *cqe;
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001714 unsigned long flags;
1715 LIST_HEAD(list);
1716
1717 if (!force) {
1718 if (list_empty_careful(&ctx->cq_overflow_list))
Jens Axboec4a2ed72019-11-21 21:01:26 -07001719 return true;
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001720 if ((ctx->cached_cq_tail - READ_ONCE(rings->cq.head) ==
1721 rings->cq_ring_entries))
Jens Axboec4a2ed72019-11-21 21:01:26 -07001722 return false;
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001723 }
1724
1725 spin_lock_irqsave(&ctx->completion_lock, flags);
1726
1727 /* if force is set, the ring is going away. always drop after that */
1728 if (force)
Jens Axboe69b3e542020-01-08 11:01:46 -07001729 ctx->cq_overflow_flushed = 1;
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001730
Jens Axboec4a2ed72019-11-21 21:01:26 -07001731 cqe = NULL;
Jens Axboee6c8aa92020-09-28 13:10:13 -06001732 list_for_each_entry_safe(req, tmp, &ctx->cq_overflow_list, compl.list) {
1733 if (tsk && req->task != tsk)
1734 continue;
1735 if (!io_match_files(req, files))
1736 continue;
1737
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001738 cqe = io_get_cqring(ctx);
1739 if (!cqe && !force)
1740 break;
1741
Pavel Begunkov40d8ddd2020-07-13 23:37:11 +03001742 list_move(&req->compl.list, &list);
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001743 if (cqe) {
1744 WRITE_ONCE(cqe->user_data, req->user_data);
1745 WRITE_ONCE(cqe->res, req->result);
Pavel Begunkov0f7e4662020-07-13 23:37:16 +03001746 WRITE_ONCE(cqe->flags, req->compl.cflags);
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001747 } else {
Pavel Begunkov2c3bac6d2020-10-18 10:17:40 +01001748 ctx->cached_cq_overflow++;
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001749 WRITE_ONCE(ctx->rings->cq_overflow,
Pavel Begunkov2c3bac6d2020-10-18 10:17:40 +01001750 ctx->cached_cq_overflow);
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001751 }
1752 }
1753
1754 io_commit_cqring(ctx);
Pavel Begunkov46930142020-07-30 18:43:49 +03001755 io_cqring_mark_overflow(ctx);
1756
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001757 spin_unlock_irqrestore(&ctx->completion_lock, flags);
1758 io_cqring_ev_posted(ctx);
1759
1760 while (!list_empty(&list)) {
Pavel Begunkov40d8ddd2020-07-13 23:37:11 +03001761 req = list_first_entry(&list, struct io_kiocb, compl.list);
1762 list_del(&req->compl.list);
Jackie Liuec9c02a2019-11-08 23:50:36 +08001763 io_put_req(req);
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001764 }
Jens Axboec4a2ed72019-11-21 21:01:26 -07001765
1766 return cqe != NULL;
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001767}
1768
Jens Axboebcda7ba2020-02-23 16:42:51 -07001769static void __io_cqring_fill_event(struct io_kiocb *req, long res, long cflags)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001770{
Jens Axboe78e19bb2019-11-06 15:21:34 -07001771 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001772 struct io_uring_cqe *cqe;
1773
Jens Axboe78e19bb2019-11-06 15:21:34 -07001774 trace_io_uring_complete(ctx, req->user_data, res);
Jens Axboe51c3ff62019-11-03 06:52:50 -07001775
Jens Axboe2b188cc2019-01-07 10:46:33 -07001776 /*
1777 * If we can't get a cq entry, userspace overflowed the
1778 * submission (by quite a lot). Increment the overflow count in
1779 * the ring.
1780 */
1781 cqe = io_get_cqring(ctx);
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001782 if (likely(cqe)) {
Jens Axboe78e19bb2019-11-06 15:21:34 -07001783 WRITE_ONCE(cqe->user_data, req->user_data);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001784 WRITE_ONCE(cqe->res, res);
Jens Axboebcda7ba2020-02-23 16:42:51 -07001785 WRITE_ONCE(cqe->flags, cflags);
Jens Axboefdaf0832020-10-30 09:37:30 -06001786 } else if (ctx->cq_overflow_flushed ||
1787 atomic_read(&req->task->io_uring->in_idle)) {
Jens Axboe0f212202020-09-13 13:09:39 -06001788 /*
1789 * If we're in ring overflow flush mode, or in task cancel mode,
1790 * then we cannot store the request for later flushing, we need
1791 * to drop it on the floor.
1792 */
Pavel Begunkov2c3bac6d2020-10-18 10:17:40 +01001793 ctx->cached_cq_overflow++;
1794 WRITE_ONCE(ctx->rings->cq_overflow, ctx->cached_cq_overflow);
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001795 } else {
Jens Axboead3eb2c2019-12-18 17:12:20 -07001796 if (list_empty(&ctx->cq_overflow_list)) {
1797 set_bit(0, &ctx->sq_check_overflow);
1798 set_bit(0, &ctx->cq_check_overflow);
Xiaoguang Wang6d5f9042020-07-09 09:15:29 +08001799 ctx->rings->sq_flags |= IORING_SQ_CQ_OVERFLOW;
Jens Axboead3eb2c2019-12-18 17:12:20 -07001800 }
Pavel Begunkov40d8ddd2020-07-13 23:37:11 +03001801 io_clean_op(req);
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001802 req->result = res;
Pavel Begunkov0f7e4662020-07-13 23:37:16 +03001803 req->compl.cflags = cflags;
Pavel Begunkov40d8ddd2020-07-13 23:37:11 +03001804 refcount_inc(&req->refs);
1805 list_add_tail(&req->compl.list, &ctx->cq_overflow_list);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001806 }
1807}
1808
Jens Axboebcda7ba2020-02-23 16:42:51 -07001809static void io_cqring_fill_event(struct io_kiocb *req, long res)
1810{
1811 __io_cqring_fill_event(req, res, 0);
1812}
1813
Jens Axboee1e16092020-06-22 09:17:17 -06001814static void io_cqring_add_event(struct io_kiocb *req, long res, long cflags)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001815{
Jens Axboe78e19bb2019-11-06 15:21:34 -07001816 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001817 unsigned long flags;
1818
1819 spin_lock_irqsave(&ctx->completion_lock, flags);
Jens Axboebcda7ba2020-02-23 16:42:51 -07001820 __io_cqring_fill_event(req, res, cflags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001821 io_commit_cqring(ctx);
1822 spin_unlock_irqrestore(&ctx->completion_lock, flags);
1823
Jens Axboe8c838782019-03-12 15:48:16 -06001824 io_cqring_ev_posted(ctx);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001825}
1826
Jens Axboe229a7b62020-06-22 10:13:11 -06001827static void io_submit_flush_completions(struct io_comp_state *cs)
Jens Axboebcda7ba2020-02-23 16:42:51 -07001828{
Jens Axboe229a7b62020-06-22 10:13:11 -06001829 struct io_ring_ctx *ctx = cs->ctx;
1830
1831 spin_lock_irq(&ctx->completion_lock);
1832 while (!list_empty(&cs->list)) {
1833 struct io_kiocb *req;
1834
Pavel Begunkov3ca405e2020-07-13 23:37:08 +03001835 req = list_first_entry(&cs->list, struct io_kiocb, compl.list);
1836 list_del(&req->compl.list);
Pavel Begunkov0f7e4662020-07-13 23:37:16 +03001837 __io_cqring_fill_event(req, req->result, req->compl.cflags);
Pavel Begunkov216578e2020-10-13 09:44:00 +01001838
1839 /*
1840 * io_free_req() doesn't care about completion_lock unless one
1841 * of these flags is set. REQ_F_WORK_INITIALIZED is in the list
1842 * because of a potential deadlock with req->work.fs->lock
1843 */
1844 if (req->flags & (REQ_F_FAIL_LINK|REQ_F_LINK_TIMEOUT
1845 |REQ_F_WORK_INITIALIZED)) {
Jens Axboe229a7b62020-06-22 10:13:11 -06001846 spin_unlock_irq(&ctx->completion_lock);
1847 io_put_req(req);
1848 spin_lock_irq(&ctx->completion_lock);
Pavel Begunkov216578e2020-10-13 09:44:00 +01001849 } else {
1850 io_put_req(req);
Jens Axboe229a7b62020-06-22 10:13:11 -06001851 }
1852 }
1853 io_commit_cqring(ctx);
1854 spin_unlock_irq(&ctx->completion_lock);
1855
1856 io_cqring_ev_posted(ctx);
1857 cs->nr = 0;
1858}
1859
1860static void __io_req_complete(struct io_kiocb *req, long res, unsigned cflags,
1861 struct io_comp_state *cs)
1862{
1863 if (!cs) {
1864 io_cqring_add_event(req, res, cflags);
1865 io_put_req(req);
1866 } else {
Pavel Begunkov3ca405e2020-07-13 23:37:08 +03001867 io_clean_op(req);
Jens Axboe229a7b62020-06-22 10:13:11 -06001868 req->result = res;
Pavel Begunkov0f7e4662020-07-13 23:37:16 +03001869 req->compl.cflags = cflags;
Pavel Begunkov3ca405e2020-07-13 23:37:08 +03001870 list_add_tail(&req->compl.list, &cs->list);
Jens Axboe229a7b62020-06-22 10:13:11 -06001871 if (++cs->nr >= 32)
1872 io_submit_flush_completions(cs);
1873 }
Jens Axboee1e16092020-06-22 09:17:17 -06001874}
1875
1876static void io_req_complete(struct io_kiocb *req, long res)
1877{
Jens Axboe229a7b62020-06-22 10:13:11 -06001878 __io_req_complete(req, res, 0, NULL);
Jens Axboebcda7ba2020-02-23 16:42:51 -07001879}
1880
Jens Axboe0ddf92e2019-11-08 08:52:53 -07001881static inline bool io_is_fallback_req(struct io_kiocb *req)
1882{
1883 return req == (struct io_kiocb *)
1884 ((unsigned long) req->ctx->fallback_req & ~1UL);
1885}
1886
1887static struct io_kiocb *io_get_fallback_req(struct io_ring_ctx *ctx)
1888{
1889 struct io_kiocb *req;
1890
1891 req = ctx->fallback_req;
Bijan Mottahedehdd461af2020-04-29 17:47:50 -07001892 if (!test_and_set_bit_lock(0, (unsigned long *) &ctx->fallback_req))
Jens Axboe0ddf92e2019-11-08 08:52:53 -07001893 return req;
1894
1895 return NULL;
1896}
1897
Pavel Begunkov0553b8b2020-04-08 08:58:45 +03001898static struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx,
1899 struct io_submit_state *state)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001900{
Pavel Begunkovf6b6c7d2020-06-21 13:09:53 +03001901 if (!state->free_reqs) {
Pavel Begunkov291b2822020-09-30 22:57:01 +03001902 gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
Jens Axboe2579f912019-01-09 09:10:43 -07001903 size_t sz;
1904 int ret;
1905
1906 sz = min_t(size_t, state->ios_left, ARRAY_SIZE(state->reqs));
Jens Axboefd6fab22019-03-14 16:30:06 -06001907 ret = kmem_cache_alloc_bulk(req_cachep, gfp, sz, state->reqs);
1908
1909 /*
1910 * Bulk alloc is all-or-nothing. If we fail to get a batch,
1911 * retry single alloc to be on the safe side.
1912 */
1913 if (unlikely(ret <= 0)) {
1914 state->reqs[0] = kmem_cache_alloc(req_cachep, gfp);
1915 if (!state->reqs[0])
Jens Axboe0ddf92e2019-11-08 08:52:53 -07001916 goto fallback;
Jens Axboefd6fab22019-03-14 16:30:06 -06001917 ret = 1;
1918 }
Pavel Begunkov291b2822020-09-30 22:57:01 +03001919 state->free_reqs = ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001920 }
1921
Pavel Begunkov291b2822020-09-30 22:57:01 +03001922 state->free_reqs--;
1923 return state->reqs[state->free_reqs];
Jens Axboe0ddf92e2019-11-08 08:52:53 -07001924fallback:
Pavel Begunkov0553b8b2020-04-08 08:58:45 +03001925 return io_get_fallback_req(ctx);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001926}
1927
Pavel Begunkov8da11c12020-02-24 11:32:44 +03001928static inline void io_put_file(struct io_kiocb *req, struct file *file,
1929 bool fixed)
1930{
1931 if (fixed)
Xiaoguang Wang05589552020-03-31 14:05:18 +08001932 percpu_ref_put(req->fixed_file_refs);
Pavel Begunkov8da11c12020-02-24 11:32:44 +03001933 else
1934 fput(file);
1935}
1936
Pavel Begunkov4edf20f2020-10-13 09:43:59 +01001937static void io_dismantle_req(struct io_kiocb *req)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001938{
Pavel Begunkov3ca405e2020-07-13 23:37:08 +03001939 io_clean_op(req);
Pavel Begunkov929a3af2020-02-19 00:19:09 +03001940
Jens Axboee8c2bc12020-08-15 18:44:09 -07001941 if (req->async_data)
1942 kfree(req->async_data);
Pavel Begunkov8da11c12020-02-24 11:32:44 +03001943 if (req->file)
1944 io_put_file(req, req->file, (req->flags & REQ_F_FIXED_FILE));
Jens Axboefcb323c2019-10-24 12:39:47 -06001945
Pavel Begunkov4edf20f2020-10-13 09:43:59 +01001946 io_req_clean_work(req);
Pavel Begunkove6543a82020-06-28 12:52:30 +03001947}
Pavel Begunkov2b85edf2019-12-28 14:13:03 +03001948
Pavel Begunkov216578e2020-10-13 09:44:00 +01001949static void __io_free_req(struct io_kiocb *req)
Pavel Begunkove6543a82020-06-28 12:52:30 +03001950{
Jens Axboe0f212202020-09-13 13:09:39 -06001951 struct io_uring_task *tctx = req->task->io_uring;
Jens Axboe51a4cc12020-08-10 10:55:56 -06001952 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkovecfc5172020-06-29 13:13:03 +03001953
Pavel Begunkov216578e2020-10-13 09:44:00 +01001954 io_dismantle_req(req);
Pavel Begunkove6543a82020-06-28 12:52:30 +03001955
Jens Axboed8a6df12020-10-15 16:24:45 -06001956 percpu_counter_dec(&tctx->inflight);
Jens Axboefdaf0832020-10-30 09:37:30 -06001957 if (atomic_read(&tctx->in_idle))
Jens Axboe0f212202020-09-13 13:09:39 -06001958 wake_up(&tctx->wait);
Jens Axboee3bc8e92020-09-24 08:45:57 -06001959 put_task_struct(req->task);
1960
Pavel Begunkovb1e50e52020-04-08 08:58:44 +03001961 if (likely(!io_is_fallback_req(req)))
1962 kmem_cache_free(req_cachep, req);
1963 else
Pavel Begunkovecfc5172020-06-29 13:13:03 +03001964 clear_bit_unlock(0, (unsigned long *) &ctx->fallback_req);
1965 percpu_ref_put(&ctx->refs);
Jens Axboee65ef562019-03-12 10:16:44 -06001966}
1967
Pavel Begunkovc9abd7a2020-10-22 16:43:11 +01001968static void io_kill_linked_timeout(struct io_kiocb *req)
Jens Axboe9e645e112019-05-10 16:07:28 -06001969{
Jackie Liua197f662019-11-08 08:09:12 -07001970 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkov7c86ffe2020-06-29 13:12:59 +03001971 struct io_kiocb *link;
Pavel Begunkovc9abd7a2020-10-22 16:43:11 +01001972 bool cancelled = false;
1973 unsigned long flags;
Pavel Begunkov7c86ffe2020-06-29 13:12:59 +03001974
Pavel Begunkovc9abd7a2020-10-22 16:43:11 +01001975 spin_lock_irqsave(&ctx->completion_lock, flags);
1976 link = list_first_entry_or_null(&req->link_list, struct io_kiocb,
1977 link_list);
Pavel Begunkov900fad42020-10-19 16:39:16 +01001978 /*
1979 * Can happen if a linked timeout fired and link had been like
1980 * req -> link t-out -> link t-out [-> ...]
1981 */
Pavel Begunkovc9abd7a2020-10-22 16:43:11 +01001982 if (link && (link->flags & REQ_F_LTIMEOUT_ACTIVE)) {
1983 struct io_timeout_data *io = link->async_data;
1984 int ret;
Pavel Begunkov7c86ffe2020-06-29 13:12:59 +03001985
Pavel Begunkovc9abd7a2020-10-22 16:43:11 +01001986 list_del_init(&link->link_list);
1987 ret = hrtimer_try_to_cancel(&io->timer);
1988 if (ret != -1) {
1989 io_cqring_fill_event(link, -ECANCELED);
1990 io_commit_cqring(ctx);
1991 cancelled = true;
1992 }
1993 }
Pavel Begunkov7c86ffe2020-06-29 13:12:59 +03001994 req->flags &= ~REQ_F_LINK_TIMEOUT;
Pavel Begunkov216578e2020-10-13 09:44:00 +01001995 spin_unlock_irqrestore(&ctx->completion_lock, flags);
Jens Axboeab0b6452020-06-30 08:43:15 -06001996
Pavel Begunkovc9abd7a2020-10-22 16:43:11 +01001997 if (cancelled) {
Pavel Begunkov7c86ffe2020-06-29 13:12:59 +03001998 io_cqring_ev_posted(ctx);
Pavel Begunkovc9abd7a2020-10-22 16:43:11 +01001999 io_put_req(link);
2000 }
Pavel Begunkov7c86ffe2020-06-29 13:12:59 +03002001}
2002
Pavel Begunkov9b5f7bd2020-06-29 13:13:00 +03002003static struct io_kiocb *io_req_link_next(struct io_kiocb *req)
Pavel Begunkov7c86ffe2020-06-29 13:12:59 +03002004{
2005 struct io_kiocb *nxt;
Jens Axboe4d7dd462019-11-20 13:03:52 -07002006
Jens Axboe9e645e112019-05-10 16:07:28 -06002007 /*
2008 * The list should never be empty when we are called here. But could
2009 * potentially happen if the chain is messed up, check to be on the
2010 * safe side.
2011 */
Pavel Begunkov7c86ffe2020-06-29 13:12:59 +03002012 if (unlikely(list_empty(&req->link_list)))
Pavel Begunkov9b5f7bd2020-06-29 13:13:00 +03002013 return NULL;
Jens Axboe94ae5e72019-11-14 19:39:52 -07002014
Pavel Begunkov7c86ffe2020-06-29 13:12:59 +03002015 nxt = list_first_entry(&req->link_list, struct io_kiocb, link_list);
2016 list_del_init(&req->link_list);
2017 if (!list_empty(&nxt->link_list))
2018 nxt->flags |= REQ_F_LINK_HEAD;
Pavel Begunkov9b5f7bd2020-06-29 13:13:00 +03002019 return nxt;
Jens Axboe9e645e112019-05-10 16:07:28 -06002020}
2021
2022/*
Pavel Begunkovdea3b492020-04-12 02:05:04 +03002023 * Called if REQ_F_LINK_HEAD is set, and we fail the head request
Jens Axboe9e645e112019-05-10 16:07:28 -06002024 */
Pavel Begunkovd148ca42020-10-18 10:17:39 +01002025static void io_fail_links(struct io_kiocb *req)
Jens Axboe9e645e112019-05-10 16:07:28 -06002026{
Jens Axboe2665abf2019-11-05 12:40:47 -07002027 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkovd148ca42020-10-18 10:17:39 +01002028 unsigned long flags;
Jens Axboe9e645e112019-05-10 16:07:28 -06002029
Pavel Begunkovd148ca42020-10-18 10:17:39 +01002030 spin_lock_irqsave(&ctx->completion_lock, flags);
Jens Axboe9e645e112019-05-10 16:07:28 -06002031 while (!list_empty(&req->link_list)) {
Pavel Begunkov44932332019-12-05 16:16:35 +03002032 struct io_kiocb *link = list_first_entry(&req->link_list,
2033 struct io_kiocb, link_list);
Jens Axboe9e645e112019-05-10 16:07:28 -06002034
Pavel Begunkov44932332019-12-05 16:16:35 +03002035 list_del_init(&link->link_list);
Dmitrii Dolgovc826bd72019-10-15 19:02:01 +02002036 trace_io_uring_fail_link(req, link);
Jens Axboe2665abf2019-11-05 12:40:47 -07002037
Pavel Begunkov7c86ffe2020-06-29 13:12:59 +03002038 io_cqring_fill_event(link, -ECANCELED);
Pavel Begunkov216578e2020-10-13 09:44:00 +01002039
2040 /*
2041 * It's ok to free under spinlock as they're not linked anymore,
2042 * but avoid REQ_F_WORK_INITIALIZED because it may deadlock on
2043 * work.fs->lock.
2044 */
2045 if (link->flags & REQ_F_WORK_INITIALIZED)
2046 io_put_req_deferred(link, 2);
2047 else
2048 io_double_put_req(link);
Jens Axboe9e645e112019-05-10 16:07:28 -06002049 }
Jens Axboe2665abf2019-11-05 12:40:47 -07002050
2051 io_commit_cqring(ctx);
Pavel Begunkov216578e2020-10-13 09:44:00 +01002052 spin_unlock_irqrestore(&ctx->completion_lock, flags);
Pavel Begunkov7c86ffe2020-06-29 13:12:59 +03002053
Jens Axboe2665abf2019-11-05 12:40:47 -07002054 io_cqring_ev_posted(ctx);
Jens Axboe9e645e112019-05-10 16:07:28 -06002055}
2056
Pavel Begunkov3fa5e0f2020-06-30 15:20:43 +03002057static struct io_kiocb *__io_req_find_next(struct io_kiocb *req)
Jens Axboe9e645e112019-05-10 16:07:28 -06002058{
Pavel Begunkov9b0d9112020-06-28 12:52:34 +03002059 req->flags &= ~REQ_F_LINK_HEAD;
Pavel Begunkov7c86ffe2020-06-29 13:12:59 +03002060 if (req->flags & REQ_F_LINK_TIMEOUT)
2061 io_kill_linked_timeout(req);
Jens Axboe2665abf2019-11-05 12:40:47 -07002062
Jens Axboe9e645e112019-05-10 16:07:28 -06002063 /*
2064 * If LINK is set, we have dependent requests in this chain. If we
2065 * didn't fail this request, queue the first one up, moving any other
2066 * dependencies to the next request. In case of failure, fail the rest
2067 * of the chain.
2068 */
Pavel Begunkov9b5f7bd2020-06-29 13:13:00 +03002069 if (likely(!(req->flags & REQ_F_FAIL_LINK)))
2070 return io_req_link_next(req);
2071 io_fail_links(req);
2072 return NULL;
Jens Axboe4d7dd462019-11-20 13:03:52 -07002073}
Jens Axboe2665abf2019-11-05 12:40:47 -07002074
Pavel Begunkov3fa5e0f2020-06-30 15:20:43 +03002075static struct io_kiocb *io_req_find_next(struct io_kiocb *req)
2076{
2077 if (likely(!(req->flags & REQ_F_LINK_HEAD)))
2078 return NULL;
2079 return __io_req_find_next(req);
2080}
2081
Jens Axboe87c43112020-09-30 21:00:14 -06002082static int io_req_task_work_add(struct io_kiocb *req, bool twa_signal_ok)
Jens Axboec2c4c832020-07-01 15:37:11 -06002083{
2084 struct task_struct *tsk = req->task;
2085 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe91989c72020-10-16 09:02:26 -06002086 enum task_work_notify_mode notify;
2087 int ret;
Jens Axboec2c4c832020-07-01 15:37:11 -06002088
Jens Axboe6200b0a2020-09-13 14:38:30 -06002089 if (tsk->flags & PF_EXITING)
2090 return -ESRCH;
2091
Jens Axboec2c4c832020-07-01 15:37:11 -06002092 /*
Jens Axboe0ba9c9e2020-08-06 19:41:50 -06002093 * SQPOLL kernel thread doesn't need notification, just a wakeup. For
2094 * all other cases, use TWA_SIGNAL unconditionally to ensure we're
2095 * processing task_work. There's no reliable way to tell if TWA_RESUME
2096 * will do the job.
Jens Axboec2c4c832020-07-01 15:37:11 -06002097 */
Jens Axboe91989c72020-10-16 09:02:26 -06002098 notify = TWA_NONE;
Jens Axboefd7d6de2020-08-23 11:00:37 -06002099 if (!(ctx->flags & IORING_SETUP_SQPOLL) && twa_signal_ok)
Jens Axboec2c4c832020-07-01 15:37:11 -06002100 notify = TWA_SIGNAL;
2101
Jens Axboe87c43112020-09-30 21:00:14 -06002102 ret = task_work_add(tsk, &req->task_work, notify);
Jens Axboec2c4c832020-07-01 15:37:11 -06002103 if (!ret)
2104 wake_up_process(tsk);
Jens Axboe0ba9c9e2020-08-06 19:41:50 -06002105
Jens Axboec2c4c832020-07-01 15:37:11 -06002106 return ret;
2107}
2108
Jens Axboec40f6372020-06-25 15:39:59 -06002109static void __io_req_task_cancel(struct io_kiocb *req, int error)
2110{
2111 struct io_ring_ctx *ctx = req->ctx;
2112
2113 spin_lock_irq(&ctx->completion_lock);
2114 io_cqring_fill_event(req, error);
2115 io_commit_cqring(ctx);
2116 spin_unlock_irq(&ctx->completion_lock);
2117
2118 io_cqring_ev_posted(ctx);
2119 req_set_fail_links(req);
2120 io_double_put_req(req);
2121}
2122
2123static void io_req_task_cancel(struct callback_head *cb)
2124{
2125 struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
Jens Axboe87ceb6a2020-09-14 08:20:12 -06002126 struct io_ring_ctx *ctx = req->ctx;
Jens Axboec40f6372020-06-25 15:39:59 -06002127
2128 __io_req_task_cancel(req, -ECANCELED);
Jens Axboe87ceb6a2020-09-14 08:20:12 -06002129 percpu_ref_put(&ctx->refs);
Jens Axboec40f6372020-06-25 15:39:59 -06002130}
2131
2132static void __io_req_task_submit(struct io_kiocb *req)
2133{
2134 struct io_ring_ctx *ctx = req->ctx;
2135
Jens Axboec40f6372020-06-25 15:39:59 -06002136 if (!__io_sq_thread_acquire_mm(ctx)) {
Jens Axboe28cea78a2020-09-14 10:51:17 -06002137 __io_sq_thread_acquire_files(ctx);
Jens Axboec40f6372020-06-25 15:39:59 -06002138 mutex_lock(&ctx->uring_lock);
Pavel Begunkovc1379e22020-09-30 22:57:56 +03002139 __io_queue_sqe(req, NULL);
Jens Axboec40f6372020-06-25 15:39:59 -06002140 mutex_unlock(&ctx->uring_lock);
Jens Axboe2665abf2019-11-05 12:40:47 -07002141 } else {
Jens Axboec40f6372020-06-25 15:39:59 -06002142 __io_req_task_cancel(req, -EFAULT);
Jens Axboe2665abf2019-11-05 12:40:47 -07002143 }
Jens Axboe9e645e112019-05-10 16:07:28 -06002144}
2145
Jens Axboec40f6372020-06-25 15:39:59 -06002146static void io_req_task_submit(struct callback_head *cb)
2147{
2148 struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
Jens Axboe6d816e02020-08-11 08:04:14 -06002149 struct io_ring_ctx *ctx = req->ctx;
Jens Axboec40f6372020-06-25 15:39:59 -06002150
2151 __io_req_task_submit(req);
Jens Axboe6d816e02020-08-11 08:04:14 -06002152 percpu_ref_put(&ctx->refs);
Jens Axboec40f6372020-06-25 15:39:59 -06002153}
2154
2155static void io_req_task_queue(struct io_kiocb *req)
2156{
Jens Axboec40f6372020-06-25 15:39:59 -06002157 int ret;
2158
2159 init_task_work(&req->task_work, io_req_task_submit);
Jens Axboe6d816e02020-08-11 08:04:14 -06002160 percpu_ref_get(&req->ctx->refs);
Jens Axboec40f6372020-06-25 15:39:59 -06002161
Jens Axboe87c43112020-09-30 21:00:14 -06002162 ret = io_req_task_work_add(req, true);
Jens Axboec40f6372020-06-25 15:39:59 -06002163 if (unlikely(ret)) {
Jens Axboec2c4c832020-07-01 15:37:11 -06002164 struct task_struct *tsk;
2165
Jens Axboec40f6372020-06-25 15:39:59 -06002166 init_task_work(&req->task_work, io_req_task_cancel);
2167 tsk = io_wq_get_task(req->ctx->io_wq);
Jens Axboe91989c72020-10-16 09:02:26 -06002168 task_work_add(tsk, &req->task_work, TWA_NONE);
Jens Axboec2c4c832020-07-01 15:37:11 -06002169 wake_up_process(tsk);
Jens Axboec40f6372020-06-25 15:39:59 -06002170 }
Jens Axboec40f6372020-06-25 15:39:59 -06002171}
2172
Pavel Begunkovc3524382020-06-28 12:52:32 +03002173static void io_queue_next(struct io_kiocb *req)
Jackie Liuc69f8db2019-11-09 11:00:08 +08002174{
Pavel Begunkov9b5f7bd2020-06-29 13:13:00 +03002175 struct io_kiocb *nxt = io_req_find_next(req);
Pavel Begunkov944e58b2019-11-21 23:21:01 +03002176
Pavel Begunkov906a8c32020-06-27 14:04:55 +03002177 if (nxt)
2178 io_req_task_queue(nxt);
Jackie Liuc69f8db2019-11-09 11:00:08 +08002179}
2180
Jens Axboe9e645e112019-05-10 16:07:28 -06002181static void io_free_req(struct io_kiocb *req)
2182{
Pavel Begunkovc3524382020-06-28 12:52:32 +03002183 io_queue_next(req);
Jens Axboe9e645e112019-05-10 16:07:28 -06002184 __io_free_req(req);
Jens Axboee65ef562019-03-12 10:16:44 -06002185}
2186
Pavel Begunkov2d6500d2020-06-28 12:52:33 +03002187struct req_batch {
2188 void *reqs[IO_IOPOLL_BATCH];
2189 int to_free;
Pavel Begunkov5af1d132020-07-18 11:32:52 +03002190
2191 struct task_struct *task;
2192 int task_refs;
Pavel Begunkov2d6500d2020-06-28 12:52:33 +03002193};
2194
Pavel Begunkov5af1d132020-07-18 11:32:52 +03002195static inline void io_init_req_batch(struct req_batch *rb)
Pavel Begunkov7a743e22020-03-03 21:33:13 +03002196{
Pavel Begunkov5af1d132020-07-18 11:32:52 +03002197 rb->to_free = 0;
2198 rb->task_refs = 0;
2199 rb->task = NULL;
2200}
Pavel Begunkov8766dd52020-03-14 00:31:04 +03002201
Pavel Begunkov2d6500d2020-06-28 12:52:33 +03002202static void __io_req_free_batch_flush(struct io_ring_ctx *ctx,
2203 struct req_batch *rb)
2204{
2205 kmem_cache_free_bulk(req_cachep, rb->to_free, rb->reqs);
2206 percpu_ref_put_many(&ctx->refs, rb->to_free);
2207 rb->to_free = 0;
2208}
Pavel Begunkov7a743e22020-03-03 21:33:13 +03002209
Pavel Begunkov2d6500d2020-06-28 12:52:33 +03002210static void io_req_free_batch_finish(struct io_ring_ctx *ctx,
2211 struct req_batch *rb)
2212{
2213 if (rb->to_free)
2214 __io_req_free_batch_flush(ctx, rb);
Pavel Begunkov5af1d132020-07-18 11:32:52 +03002215 if (rb->task) {
Jens Axboed8a6df12020-10-15 16:24:45 -06002216 struct io_uring_task *tctx = rb->task->io_uring;
2217
2218 percpu_counter_sub(&tctx->inflight, rb->task_refs);
Pavel Begunkov5af1d132020-07-18 11:32:52 +03002219 put_task_struct_many(rb->task, rb->task_refs);
2220 rb->task = NULL;
2221 }
Pavel Begunkov2d6500d2020-06-28 12:52:33 +03002222}
2223
2224static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req)
2225{
2226 if (unlikely(io_is_fallback_req(req))) {
2227 io_free_req(req);
2228 return;
2229 }
2230 if (req->flags & REQ_F_LINK_HEAD)
2231 io_queue_next(req);
2232
Jens Axboee3bc8e92020-09-24 08:45:57 -06002233 if (req->task != rb->task) {
Jens Axboe0f212202020-09-13 13:09:39 -06002234 if (rb->task) {
Jens Axboed8a6df12020-10-15 16:24:45 -06002235 struct io_uring_task *tctx = rb->task->io_uring;
2236
2237 percpu_counter_sub(&tctx->inflight, rb->task_refs);
Jens Axboee3bc8e92020-09-24 08:45:57 -06002238 put_task_struct_many(rb->task, rb->task_refs);
Pavel Begunkov5af1d132020-07-18 11:32:52 +03002239 }
Jens Axboee3bc8e92020-09-24 08:45:57 -06002240 rb->task = req->task;
2241 rb->task_refs = 0;
Pavel Begunkov5af1d132020-07-18 11:32:52 +03002242 }
Jens Axboee3bc8e92020-09-24 08:45:57 -06002243 rb->task_refs++;
Pavel Begunkov5af1d132020-07-18 11:32:52 +03002244
Pavel Begunkov4edf20f2020-10-13 09:43:59 +01002245 io_dismantle_req(req);
Pavel Begunkov2d6500d2020-06-28 12:52:33 +03002246 rb->reqs[rb->to_free++] = req;
2247 if (unlikely(rb->to_free == ARRAY_SIZE(rb->reqs)))
2248 __io_req_free_batch_flush(req->ctx, rb);
Pavel Begunkov7a743e22020-03-03 21:33:13 +03002249}
2250
Jens Axboeba816ad2019-09-28 11:36:45 -06002251/*
2252 * Drop reference to request, return next in chain (if there is one) if this
2253 * was the last reference to this request.
2254 */
Pavel Begunkov9b5f7bd2020-06-29 13:13:00 +03002255static struct io_kiocb *io_put_req_find_next(struct io_kiocb *req)
Jens Axboee65ef562019-03-12 10:16:44 -06002256{
Pavel Begunkov9b5f7bd2020-06-29 13:13:00 +03002257 struct io_kiocb *nxt = NULL;
2258
Jens Axboe2a44f462020-02-25 13:25:41 -07002259 if (refcount_dec_and_test(&req->refs)) {
Pavel Begunkov9b5f7bd2020-06-29 13:13:00 +03002260 nxt = io_req_find_next(req);
Jens Axboe4d7dd462019-11-20 13:03:52 -07002261 __io_free_req(req);
Jens Axboe2a44f462020-02-25 13:25:41 -07002262 }
Pavel Begunkov9b5f7bd2020-06-29 13:13:00 +03002263 return nxt;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002264}
2265
Jens Axboe2b188cc2019-01-07 10:46:33 -07002266static void io_put_req(struct io_kiocb *req)
2267{
Jens Axboedef596e2019-01-09 08:59:42 -07002268 if (refcount_dec_and_test(&req->refs))
2269 io_free_req(req);
2270}
2271
Pavel Begunkov216578e2020-10-13 09:44:00 +01002272static void io_put_req_deferred_cb(struct callback_head *cb)
2273{
2274 struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
2275
2276 io_free_req(req);
2277}
2278
2279static void io_free_req_deferred(struct io_kiocb *req)
2280{
2281 int ret;
2282
2283 init_task_work(&req->task_work, io_put_req_deferred_cb);
2284 ret = io_req_task_work_add(req, true);
2285 if (unlikely(ret)) {
2286 struct task_struct *tsk;
2287
2288 tsk = io_wq_get_task(req->ctx->io_wq);
Jens Axboe91989c72020-10-16 09:02:26 -06002289 task_work_add(tsk, &req->task_work, TWA_NONE);
Pavel Begunkov216578e2020-10-13 09:44:00 +01002290 wake_up_process(tsk);
2291 }
2292}
2293
2294static inline void io_put_req_deferred(struct io_kiocb *req, int refs)
2295{
2296 if (refcount_sub_and_test(refs, &req->refs))
2297 io_free_req_deferred(req);
2298}
2299
Pavel Begunkovf4db7182020-06-25 18:20:54 +03002300static struct io_wq_work *io_steal_work(struct io_kiocb *req)
Pavel Begunkov7a743e22020-03-03 21:33:13 +03002301{
Pavel Begunkov6df1db62020-07-03 22:15:06 +03002302 struct io_kiocb *nxt;
Pavel Begunkov7a743e22020-03-03 21:33:13 +03002303
Pavel Begunkovf4db7182020-06-25 18:20:54 +03002304 /*
2305 * A ref is owned by io-wq in which context we're. So, if that's the
2306 * last one, it's safe to steal next work. False negatives are Ok,
2307 * it just will be re-punted async in io_put_work()
2308 */
2309 if (refcount_read(&req->refs) != 1)
2310 return NULL;
2311
Pavel Begunkov9b5f7bd2020-06-29 13:13:00 +03002312 nxt = io_req_find_next(req);
Pavel Begunkov6df1db62020-07-03 22:15:06 +03002313 return nxt ? &nxt->work : NULL;
Pavel Begunkov7a743e22020-03-03 21:33:13 +03002314}
2315
Jens Axboe978db572019-11-14 22:39:04 -07002316static void io_double_put_req(struct io_kiocb *req)
2317{
2318 /* drop both submit and complete references */
2319 if (refcount_sub_and_test(2, &req->refs))
2320 io_free_req(req);
2321}
2322
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07002323static unsigned io_cqring_events(struct io_ring_ctx *ctx, bool noflush)
Jens Axboea3a0e432019-08-20 11:03:11 -06002324{
Jens Axboe84f97dc2019-11-06 11:27:53 -07002325 struct io_rings *rings = ctx->rings;
2326
Jens Axboead3eb2c2019-12-18 17:12:20 -07002327 if (test_bit(0, &ctx->cq_check_overflow)) {
2328 /*
2329 * noflush == true is from the waitqueue handler, just ensure
2330 * we wake up the task, and the next invocation will flush the
2331 * entries. We cannot safely to it from here.
2332 */
2333 if (noflush && !list_empty(&ctx->cq_overflow_list))
2334 return -1U;
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07002335
Jens Axboee6c8aa92020-09-28 13:10:13 -06002336 io_cqring_overflow_flush(ctx, false, NULL, NULL);
Jens Axboead3eb2c2019-12-18 17:12:20 -07002337 }
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07002338
Jens Axboea3a0e432019-08-20 11:03:11 -06002339 /* See comment at the top of this file */
2340 smp_rmb();
Jens Axboead3eb2c2019-12-18 17:12:20 -07002341 return ctx->cached_cq_tail - READ_ONCE(rings->cq.head);
Jens Axboea3a0e432019-08-20 11:03:11 -06002342}
2343
Pavel Begunkovfb5ccc92019-10-25 12:31:30 +03002344static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
2345{
2346 struct io_rings *rings = ctx->rings;
2347
2348 /* make sure SQ entry isn't read before tail */
2349 return smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head;
2350}
2351
Pavel Begunkov8ff069b2020-07-16 23:28:04 +03002352static unsigned int io_put_kbuf(struct io_kiocb *req, struct io_buffer *kbuf)
Jens Axboee94f1412019-12-19 12:06:02 -07002353{
Pavel Begunkov8ff069b2020-07-16 23:28:04 +03002354 unsigned int cflags;
Jens Axboee94f1412019-12-19 12:06:02 -07002355
Jens Axboebcda7ba2020-02-23 16:42:51 -07002356 cflags = kbuf->bid << IORING_CQE_BUFFER_SHIFT;
2357 cflags |= IORING_CQE_F_BUFFER;
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03002358 req->flags &= ~REQ_F_BUFFER_SELECTED;
Jens Axboebcda7ba2020-02-23 16:42:51 -07002359 kfree(kbuf);
2360 return cflags;
2361}
2362
Pavel Begunkov8ff069b2020-07-16 23:28:04 +03002363static inline unsigned int io_put_rw_kbuf(struct io_kiocb *req)
2364{
2365 struct io_buffer *kbuf;
2366
2367 kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
2368 return io_put_kbuf(req, kbuf);
2369}
2370
Jens Axboe4c6e2772020-07-01 11:29:10 -06002371static inline bool io_run_task_work(void)
2372{
Jens Axboe6200b0a2020-09-13 14:38:30 -06002373 /*
2374 * Not safe to run on exiting task, and the task_work handling will
2375 * not add work to such a task.
2376 */
2377 if (unlikely(current->flags & PF_EXITING))
2378 return false;
Jens Axboe4c6e2772020-07-01 11:29:10 -06002379 if (current->task_works) {
2380 __set_current_state(TASK_RUNNING);
2381 task_work_run();
2382 return true;
2383 }
2384
2385 return false;
2386}
2387
Xiaoguang Wangbbde0172020-06-16 02:06:38 +08002388static void io_iopoll_queue(struct list_head *again)
2389{
2390 struct io_kiocb *req;
2391
2392 do {
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002393 req = list_first_entry(again, struct io_kiocb, inflight_entry);
2394 list_del(&req->inflight_entry);
Pavel Begunkov81b68a52020-07-30 18:43:46 +03002395 __io_complete_rw(req, -EAGAIN, 0, NULL);
Xiaoguang Wangbbde0172020-06-16 02:06:38 +08002396 } while (!list_empty(again));
2397}
2398
Jens Axboedef596e2019-01-09 08:59:42 -07002399/*
2400 * Find and free completed poll iocbs
2401 */
2402static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
2403 struct list_head *done)
2404{
Jens Axboe8237e042019-12-28 10:48:22 -07002405 struct req_batch rb;
Jens Axboedef596e2019-01-09 08:59:42 -07002406 struct io_kiocb *req;
Xiaoguang Wangbbde0172020-06-16 02:06:38 +08002407 LIST_HEAD(again);
2408
2409 /* order with ->result store in io_complete_rw_iopoll() */
2410 smp_rmb();
Jens Axboedef596e2019-01-09 08:59:42 -07002411
Pavel Begunkov5af1d132020-07-18 11:32:52 +03002412 io_init_req_batch(&rb);
Jens Axboedef596e2019-01-09 08:59:42 -07002413 while (!list_empty(done)) {
Jens Axboebcda7ba2020-02-23 16:42:51 -07002414 int cflags = 0;
2415
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002416 req = list_first_entry(done, struct io_kiocb, inflight_entry);
Xiaoguang Wangbbde0172020-06-16 02:06:38 +08002417 if (READ_ONCE(req->result) == -EAGAIN) {
Jens Axboe56450c22020-08-26 18:58:26 -06002418 req->result = 0;
Xiaoguang Wangbbde0172020-06-16 02:06:38 +08002419 req->iopoll_completed = 0;
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002420 list_move_tail(&req->inflight_entry, &again);
Xiaoguang Wangbbde0172020-06-16 02:06:38 +08002421 continue;
2422 }
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002423 list_del(&req->inflight_entry);
Jens Axboedef596e2019-01-09 08:59:42 -07002424
Jens Axboebcda7ba2020-02-23 16:42:51 -07002425 if (req->flags & REQ_F_BUFFER_SELECTED)
Pavel Begunkov8ff069b2020-07-16 23:28:04 +03002426 cflags = io_put_rw_kbuf(req);
Jens Axboebcda7ba2020-02-23 16:42:51 -07002427
2428 __io_cqring_fill_event(req, req->result, cflags);
Jens Axboedef596e2019-01-09 08:59:42 -07002429 (*nr_events)++;
2430
Pavel Begunkovc3524382020-06-28 12:52:32 +03002431 if (refcount_dec_and_test(&req->refs))
Pavel Begunkov2d6500d2020-06-28 12:52:33 +03002432 io_req_free_batch(&rb, req);
Jens Axboedef596e2019-01-09 08:59:42 -07002433 }
Jens Axboedef596e2019-01-09 08:59:42 -07002434
Jens Axboe09bb8392019-03-13 12:39:28 -06002435 io_commit_cqring(ctx);
Xiaoguang Wang32b22442020-03-11 09:26:09 +08002436 if (ctx->flags & IORING_SETUP_SQPOLL)
2437 io_cqring_ev_posted(ctx);
Pavel Begunkov2d6500d2020-06-28 12:52:33 +03002438 io_req_free_batch_finish(ctx, &rb);
Jens Axboedef596e2019-01-09 08:59:42 -07002439
Xiaoguang Wangbbde0172020-06-16 02:06:38 +08002440 if (!list_empty(&again))
2441 io_iopoll_queue(&again);
Bijan Mottahedeh581f9812020-04-03 13:51:33 -07002442}
2443
Jens Axboedef596e2019-01-09 08:59:42 -07002444static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
2445 long min)
2446{
2447 struct io_kiocb *req, *tmp;
2448 LIST_HEAD(done);
2449 bool spin;
2450 int ret;
2451
2452 /*
2453 * Only spin for completions if we don't have multiple devices hanging
2454 * off our complete list, and we're under the requested amount.
2455 */
2456 spin = !ctx->poll_multi_file && *nr_events < min;
2457
2458 ret = 0;
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002459 list_for_each_entry_safe(req, tmp, &ctx->iopoll_list, inflight_entry) {
Jens Axboe9adbd452019-12-20 08:45:55 -07002460 struct kiocb *kiocb = &req->rw.kiocb;
Jens Axboedef596e2019-01-09 08:59:42 -07002461
2462 /*
Bijan Mottahedeh581f9812020-04-03 13:51:33 -07002463 * Move completed and retryable entries to our local lists.
2464 * If we find a request that requires polling, break out
2465 * and complete those lists first, if we have entries there.
Jens Axboedef596e2019-01-09 08:59:42 -07002466 */
Xiaoguang Wang65a65432020-06-11 23:39:36 +08002467 if (READ_ONCE(req->iopoll_completed)) {
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002468 list_move_tail(&req->inflight_entry, &done);
Jens Axboedef596e2019-01-09 08:59:42 -07002469 continue;
2470 }
2471 if (!list_empty(&done))
2472 break;
2473
2474 ret = kiocb->ki_filp->f_op->iopoll(kiocb, spin);
2475 if (ret < 0)
2476 break;
2477
Pavel Begunkov3aadc232020-07-06 17:59:29 +03002478 /* iopoll may have completed current req */
2479 if (READ_ONCE(req->iopoll_completed))
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002480 list_move_tail(&req->inflight_entry, &done);
Pavel Begunkov3aadc232020-07-06 17:59:29 +03002481
Jens Axboedef596e2019-01-09 08:59:42 -07002482 if (ret && spin)
2483 spin = false;
2484 ret = 0;
2485 }
2486
2487 if (!list_empty(&done))
2488 io_iopoll_complete(ctx, nr_events, &done);
2489
2490 return ret;
2491}
2492
2493/*
Brian Gianforcarod195a662019-12-13 03:09:50 -08002494 * Poll for a minimum of 'min' events. Note that if min == 0 we consider that a
Jens Axboedef596e2019-01-09 08:59:42 -07002495 * non-spinning poll check - we'll still enter the driver poll loop, but only
2496 * as a non-spinning completion check.
2497 */
2498static int io_iopoll_getevents(struct io_ring_ctx *ctx, unsigned int *nr_events,
2499 long min)
2500{
Pavel Begunkov540e32a2020-07-13 23:37:09 +03002501 while (!list_empty(&ctx->iopoll_list) && !need_resched()) {
Jens Axboedef596e2019-01-09 08:59:42 -07002502 int ret;
2503
2504 ret = io_do_iopoll(ctx, nr_events, min);
2505 if (ret < 0)
2506 return ret;
Pavel Begunkoveba0a4d2020-07-06 17:59:30 +03002507 if (*nr_events >= min)
Jens Axboedef596e2019-01-09 08:59:42 -07002508 return 0;
2509 }
2510
2511 return 1;
2512}
2513
2514/*
2515 * We can't just wait for polled events to come to us, we have to actively
2516 * find and complete them.
2517 */
Pavel Begunkovb2edc0a2020-07-07 16:36:22 +03002518static void io_iopoll_try_reap_events(struct io_ring_ctx *ctx)
Jens Axboedef596e2019-01-09 08:59:42 -07002519{
2520 if (!(ctx->flags & IORING_SETUP_IOPOLL))
2521 return;
2522
2523 mutex_lock(&ctx->uring_lock);
Pavel Begunkov540e32a2020-07-13 23:37:09 +03002524 while (!list_empty(&ctx->iopoll_list)) {
Jens Axboedef596e2019-01-09 08:59:42 -07002525 unsigned int nr_events = 0;
2526
Pavel Begunkovb2edc0a2020-07-07 16:36:22 +03002527 io_do_iopoll(ctx, &nr_events, 0);
Jens Axboe08f54392019-08-21 22:19:11 -06002528
Pavel Begunkovb2edc0a2020-07-07 16:36:22 +03002529 /* let it sleep and repeat later if can't complete a request */
2530 if (nr_events == 0)
2531 break;
Jens Axboe08f54392019-08-21 22:19:11 -06002532 /*
2533 * Ensure we allow local-to-the-cpu processing to take place,
2534 * in this case we need to ensure that we reap all events.
Pavel Begunkov3fcee5a2020-07-06 17:59:31 +03002535 * Also let task_work, etc. to progress by releasing the mutex
Jens Axboe08f54392019-08-21 22:19:11 -06002536 */
Pavel Begunkov3fcee5a2020-07-06 17:59:31 +03002537 if (need_resched()) {
2538 mutex_unlock(&ctx->uring_lock);
2539 cond_resched();
2540 mutex_lock(&ctx->uring_lock);
2541 }
Jens Axboedef596e2019-01-09 08:59:42 -07002542 }
2543 mutex_unlock(&ctx->uring_lock);
2544}
2545
Pavel Begunkov7668b922020-07-07 16:36:21 +03002546static int io_iopoll_check(struct io_ring_ctx *ctx, long min)
Jens Axboedef596e2019-01-09 08:59:42 -07002547{
Pavel Begunkov7668b922020-07-07 16:36:21 +03002548 unsigned int nr_events = 0;
Jens Axboe2b2ed972019-10-25 10:06:15 -06002549 int iters = 0, ret = 0;
Jens Axboedef596e2019-01-09 08:59:42 -07002550
Xiaoguang Wangc7849be2020-02-22 14:46:05 +08002551 /*
2552 * We disallow the app entering submit/complete with polling, but we
2553 * still need to lock the ring to prevent racing with polled issue
2554 * that got punted to a workqueue.
2555 */
2556 mutex_lock(&ctx->uring_lock);
Jens Axboedef596e2019-01-09 08:59:42 -07002557 do {
Jens Axboe500f9fb2019-08-19 12:15:59 -06002558 /*
Jens Axboea3a0e432019-08-20 11:03:11 -06002559 * Don't enter poll loop if we already have events pending.
2560 * If we do, we can potentially be spinning for commands that
2561 * already triggered a CQE (eg in error).
2562 */
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07002563 if (io_cqring_events(ctx, false))
Jens Axboea3a0e432019-08-20 11:03:11 -06002564 break;
2565
2566 /*
Jens Axboe500f9fb2019-08-19 12:15:59 -06002567 * If a submit got punted to a workqueue, we can have the
2568 * application entering polling for a command before it gets
2569 * issued. That app will hold the uring_lock for the duration
2570 * of the poll right here, so we need to take a breather every
2571 * now and then to ensure that the issue has a chance to add
2572 * the poll to the issued list. Otherwise we can spin here
2573 * forever, while the workqueue is stuck trying to acquire the
2574 * very same mutex.
2575 */
2576 if (!(++iters & 7)) {
2577 mutex_unlock(&ctx->uring_lock);
Jens Axboe4c6e2772020-07-01 11:29:10 -06002578 io_run_task_work();
Jens Axboe500f9fb2019-08-19 12:15:59 -06002579 mutex_lock(&ctx->uring_lock);
2580 }
2581
Pavel Begunkov7668b922020-07-07 16:36:21 +03002582 ret = io_iopoll_getevents(ctx, &nr_events, min);
Jens Axboedef596e2019-01-09 08:59:42 -07002583 if (ret <= 0)
2584 break;
2585 ret = 0;
Pavel Begunkov7668b922020-07-07 16:36:21 +03002586 } while (min && !nr_events && !need_resched());
Jens Axboedef596e2019-01-09 08:59:42 -07002587
Jens Axboe500f9fb2019-08-19 12:15:59 -06002588 mutex_unlock(&ctx->uring_lock);
Jens Axboedef596e2019-01-09 08:59:42 -07002589 return ret;
2590}
2591
Jens Axboe491381ce2019-10-17 09:20:46 -06002592static void kiocb_end_write(struct io_kiocb *req)
Jens Axboe2b188cc2019-01-07 10:46:33 -07002593{
Jens Axboe491381ce2019-10-17 09:20:46 -06002594 /*
2595 * Tell lockdep we inherited freeze protection from submission
2596 * thread.
2597 */
2598 if (req->flags & REQ_F_ISREG) {
2599 struct inode *inode = file_inode(req->file);
Jens Axboe2b188cc2019-01-07 10:46:33 -07002600
Jens Axboe491381ce2019-10-17 09:20:46 -06002601 __sb_writers_acquired(inode->i_sb, SB_FREEZE_WRITE);
Jens Axboe2b188cc2019-01-07 10:46:33 -07002602 }
Jens Axboe491381ce2019-10-17 09:20:46 -06002603 file_end_write(req->file);
Jens Axboe2b188cc2019-01-07 10:46:33 -07002604}
2605
Jens Axboea1d7c392020-06-22 11:09:46 -06002606static void io_complete_rw_common(struct kiocb *kiocb, long res,
2607 struct io_comp_state *cs)
Jens Axboe2b188cc2019-01-07 10:46:33 -07002608{
Jens Axboe9adbd452019-12-20 08:45:55 -07002609 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
Jens Axboebcda7ba2020-02-23 16:42:51 -07002610 int cflags = 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002611
Jens Axboe491381ce2019-10-17 09:20:46 -06002612 if (kiocb->ki_flags & IOCB_WRITE)
2613 kiocb_end_write(req);
Jens Axboe2b188cc2019-01-07 10:46:33 -07002614
Jens Axboe4e88d6e2019-12-07 20:59:47 -07002615 if (res != req->result)
2616 req_set_fail_links(req);
Jens Axboebcda7ba2020-02-23 16:42:51 -07002617 if (req->flags & REQ_F_BUFFER_SELECTED)
Pavel Begunkov8ff069b2020-07-16 23:28:04 +03002618 cflags = io_put_rw_kbuf(req);
Jens Axboea1d7c392020-06-22 11:09:46 -06002619 __io_req_complete(req, res, cflags, cs);
Jens Axboeba816ad2019-09-28 11:36:45 -06002620}
2621
Jens Axboeb63534c2020-06-04 11:28:00 -06002622#ifdef CONFIG_BLOCK
2623static bool io_resubmit_prep(struct io_kiocb *req, int error)
2624{
2625 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
2626 ssize_t ret = -ECANCELED;
2627 struct iov_iter iter;
2628 int rw;
2629
2630 if (error) {
2631 ret = error;
2632 goto end_req;
2633 }
2634
2635 switch (req->opcode) {
2636 case IORING_OP_READV:
2637 case IORING_OP_READ_FIXED:
2638 case IORING_OP_READ:
2639 rw = READ;
2640 break;
2641 case IORING_OP_WRITEV:
2642 case IORING_OP_WRITE_FIXED:
2643 case IORING_OP_WRITE:
2644 rw = WRITE;
2645 break;
2646 default:
2647 printk_once(KERN_WARNING "io_uring: bad opcode in resubmit %d\n",
2648 req->opcode);
2649 goto end_req;
2650 }
2651
Jens Axboee8c2bc12020-08-15 18:44:09 -07002652 if (!req->async_data) {
Jens Axboe8f3d7492020-09-14 09:28:14 -06002653 ret = io_import_iovec(rw, req, &iovec, &iter, false);
2654 if (ret < 0)
2655 goto end_req;
2656 ret = io_setup_async_rw(req, iovec, inline_vecs, &iter, false);
2657 if (!ret)
2658 return true;
2659 kfree(iovec);
2660 } else {
Jens Axboeb63534c2020-06-04 11:28:00 -06002661 return true;
Jens Axboe8f3d7492020-09-14 09:28:14 -06002662 }
Jens Axboeb63534c2020-06-04 11:28:00 -06002663end_req:
Jens Axboeb63534c2020-06-04 11:28:00 -06002664 req_set_fail_links(req);
Jens Axboeb63534c2020-06-04 11:28:00 -06002665 return false;
2666}
Jens Axboeb63534c2020-06-04 11:28:00 -06002667#endif
2668
2669static bool io_rw_reissue(struct io_kiocb *req, long res)
2670{
2671#ifdef CONFIG_BLOCK
Jens Axboe355afae2020-09-02 09:30:31 -06002672 umode_t mode = file_inode(req->file)->i_mode;
Jens Axboeb63534c2020-06-04 11:28:00 -06002673 int ret;
2674
Jens Axboe355afae2020-09-02 09:30:31 -06002675 if (!S_ISBLK(mode) && !S_ISREG(mode))
2676 return false;
Jens Axboeb63534c2020-06-04 11:28:00 -06002677 if ((res != -EAGAIN && res != -EOPNOTSUPP) || io_wq_current_is_worker())
2678 return false;
2679
Jens Axboe28cea78a2020-09-14 10:51:17 -06002680 ret = io_sq_thread_acquire_mm_files(req->ctx, req);
Jens Axboe6d816e02020-08-11 08:04:14 -06002681
Jens Axboefdee9462020-08-27 16:46:24 -06002682 if (io_resubmit_prep(req, ret)) {
2683 refcount_inc(&req->refs);
2684 io_queue_async_work(req);
Jens Axboeb63534c2020-06-04 11:28:00 -06002685 return true;
Jens Axboefdee9462020-08-27 16:46:24 -06002686 }
2687
Jens Axboeb63534c2020-06-04 11:28:00 -06002688#endif
2689 return false;
2690}
2691
Jens Axboea1d7c392020-06-22 11:09:46 -06002692static void __io_complete_rw(struct io_kiocb *req, long res, long res2,
2693 struct io_comp_state *cs)
2694{
2695 if (!io_rw_reissue(req, res))
2696 io_complete_rw_common(&req->rw.kiocb, res, cs);
Jens Axboeba816ad2019-09-28 11:36:45 -06002697}
2698
2699static void io_complete_rw(struct kiocb *kiocb, long res, long res2)
2700{
Jens Axboe9adbd452019-12-20 08:45:55 -07002701 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
Jens Axboeba816ad2019-09-28 11:36:45 -06002702
Jens Axboea1d7c392020-06-22 11:09:46 -06002703 __io_complete_rw(req, res, res2, NULL);
Jens Axboe2b188cc2019-01-07 10:46:33 -07002704}
2705
Jens Axboedef596e2019-01-09 08:59:42 -07002706static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2)
2707{
Jens Axboe9adbd452019-12-20 08:45:55 -07002708 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
Jens Axboedef596e2019-01-09 08:59:42 -07002709
Jens Axboe491381ce2019-10-17 09:20:46 -06002710 if (kiocb->ki_flags & IOCB_WRITE)
2711 kiocb_end_write(req);
Jens Axboedef596e2019-01-09 08:59:42 -07002712
Xiaoguang Wang2d7d6792020-06-16 02:06:37 +08002713 if (res != -EAGAIN && res != req->result)
Jens Axboe4e88d6e2019-12-07 20:59:47 -07002714 req_set_fail_links(req);
Xiaoguang Wangbbde0172020-06-16 02:06:38 +08002715
2716 WRITE_ONCE(req->result, res);
2717 /* order with io_poll_complete() checking ->result */
Pavel Begunkovcd664b02020-06-25 12:37:10 +03002718 smp_wmb();
2719 WRITE_ONCE(req->iopoll_completed, 1);
Jens Axboedef596e2019-01-09 08:59:42 -07002720}
2721
2722/*
2723 * After the iocb has been issued, it's safe to be found on the poll list.
2724 * Adding the kiocb to the list AFTER submission ensures that we don't
2725 * find it from a io_iopoll_getevents() thread before the issuer is done
2726 * accessing the kiocb cookie.
2727 */
2728static void io_iopoll_req_issued(struct io_kiocb *req)
2729{
2730 struct io_ring_ctx *ctx = req->ctx;
2731
2732 /*
2733 * Track whether we have multiple files in our lists. This will impact
2734 * how we do polling eventually, not spinning if we're on potentially
2735 * different devices.
2736 */
Pavel Begunkov540e32a2020-07-13 23:37:09 +03002737 if (list_empty(&ctx->iopoll_list)) {
Jens Axboedef596e2019-01-09 08:59:42 -07002738 ctx->poll_multi_file = false;
2739 } else if (!ctx->poll_multi_file) {
2740 struct io_kiocb *list_req;
2741
Pavel Begunkov540e32a2020-07-13 23:37:09 +03002742 list_req = list_first_entry(&ctx->iopoll_list, struct io_kiocb,
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002743 inflight_entry);
Jens Axboe9adbd452019-12-20 08:45:55 -07002744 if (list_req->file != req->file)
Jens Axboedef596e2019-01-09 08:59:42 -07002745 ctx->poll_multi_file = true;
2746 }
2747
2748 /*
2749 * For fast devices, IO may have already completed. If it has, add
2750 * it to the front so we find it first.
2751 */
Xiaoguang Wang65a65432020-06-11 23:39:36 +08002752 if (READ_ONCE(req->iopoll_completed))
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002753 list_add(&req->inflight_entry, &ctx->iopoll_list);
Jens Axboedef596e2019-01-09 08:59:42 -07002754 else
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002755 list_add_tail(&req->inflight_entry, &ctx->iopoll_list);
Xiaoguang Wangbdcd3ea2020-02-25 22:12:08 +08002756
2757 if ((ctx->flags & IORING_SETUP_SQPOLL) &&
Jens Axboe534ca6d2020-09-02 13:52:19 -06002758 wq_has_sleeper(&ctx->sq_data->wait))
2759 wake_up(&ctx->sq_data->wait);
Jens Axboedef596e2019-01-09 08:59:42 -07002760}
2761
Pavel Begunkov9f13c352020-05-17 14:13:41 +03002762static void __io_state_file_put(struct io_submit_state *state)
Jens Axboe9a56a232019-01-09 09:06:50 -07002763{
Pavel Begunkov06ef3602020-07-16 23:28:33 +03002764 if (state->has_refs)
2765 fput_many(state->file, state->has_refs);
Pavel Begunkov9f13c352020-05-17 14:13:41 +03002766 state->file = NULL;
2767}
2768
2769static inline void io_state_file_put(struct io_submit_state *state)
2770{
2771 if (state->file)
2772 __io_state_file_put(state);
Jens Axboe9a56a232019-01-09 09:06:50 -07002773}
2774
2775/*
2776 * Get as many references to a file as we have IOs left in this submission,
2777 * assuming most submissions are for one file, or at least that each file
2778 * has more than one submission.
2779 */
Pavel Begunkov8da11c12020-02-24 11:32:44 +03002780static struct file *__io_file_get(struct io_submit_state *state, int fd)
Jens Axboe9a56a232019-01-09 09:06:50 -07002781{
2782 if (!state)
2783 return fget(fd);
2784
2785 if (state->file) {
2786 if (state->fd == fd) {
Pavel Begunkov06ef3602020-07-16 23:28:33 +03002787 state->has_refs--;
Jens Axboe9a56a232019-01-09 09:06:50 -07002788 return state->file;
2789 }
Pavel Begunkov9f13c352020-05-17 14:13:41 +03002790 __io_state_file_put(state);
Jens Axboe9a56a232019-01-09 09:06:50 -07002791 }
2792 state->file = fget_many(fd, state->ios_left);
2793 if (!state->file)
2794 return NULL;
2795
2796 state->fd = fd;
Pavel Begunkov71b547c2020-10-10 18:34:09 +01002797 state->has_refs = state->ios_left - 1;
Jens Axboe9a56a232019-01-09 09:06:50 -07002798 return state->file;
2799}
2800
Jens Axboe4503b762020-06-01 10:00:27 -06002801static bool io_bdev_nowait(struct block_device *bdev)
2802{
2803#ifdef CONFIG_BLOCK
Jeffle Xu9ba0d0c2020-10-19 16:59:42 +08002804 return !bdev || blk_queue_nowait(bdev_get_queue(bdev));
Jens Axboe4503b762020-06-01 10:00:27 -06002805#else
2806 return true;
2807#endif
2808}
2809
Jens Axboe2b188cc2019-01-07 10:46:33 -07002810/*
2811 * If we tracked the file through the SCM inflight mechanism, we could support
2812 * any file. For now, just ensure that anything potentially problematic is done
2813 * inline.
2814 */
Jens Axboeaf197f52020-04-28 13:15:06 -06002815static bool io_file_supports_async(struct file *file, int rw)
Jens Axboe2b188cc2019-01-07 10:46:33 -07002816{
2817 umode_t mode = file_inode(file)->i_mode;
2818
Jens Axboe4503b762020-06-01 10:00:27 -06002819 if (S_ISBLK(mode)) {
2820 if (io_bdev_nowait(file->f_inode->i_bdev))
2821 return true;
2822 return false;
2823 }
2824 if (S_ISCHR(mode) || S_ISSOCK(mode))
Jens Axboe2b188cc2019-01-07 10:46:33 -07002825 return true;
Jens Axboe4503b762020-06-01 10:00:27 -06002826 if (S_ISREG(mode)) {
2827 if (io_bdev_nowait(file->f_inode->i_sb->s_bdev) &&
2828 file->f_op != &io_uring_fops)
2829 return true;
2830 return false;
2831 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07002832
Jens Axboec5b85622020-06-09 19:23:05 -06002833 /* any ->read/write should understand O_NONBLOCK */
2834 if (file->f_flags & O_NONBLOCK)
2835 return true;
2836
Jens Axboeaf197f52020-04-28 13:15:06 -06002837 if (!(file->f_mode & FMODE_NOWAIT))
2838 return false;
2839
2840 if (rw == READ)
2841 return file->f_op->read_iter != NULL;
2842
2843 return file->f_op->write_iter != NULL;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002844}
2845
Pavel Begunkova88fc402020-09-30 22:57:53 +03002846static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboe2b188cc2019-01-07 10:46:33 -07002847{
Jens Axboedef596e2019-01-09 08:59:42 -07002848 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe9adbd452019-12-20 08:45:55 -07002849 struct kiocb *kiocb = &req->rw.kiocb;
Jens Axboe09bb8392019-03-13 12:39:28 -06002850 unsigned ioprio;
2851 int ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002852
Jens Axboe491381ce2019-10-17 09:20:46 -06002853 if (S_ISREG(file_inode(req->file)->i_mode))
2854 req->flags |= REQ_F_ISREG;
2855
Jens Axboe2b188cc2019-01-07 10:46:33 -07002856 kiocb->ki_pos = READ_ONCE(sqe->off);
Jens Axboeba042912019-12-25 16:33:42 -07002857 if (kiocb->ki_pos == -1 && !(req->file->f_mode & FMODE_STREAM)) {
2858 req->flags |= REQ_F_CUR_POS;
2859 kiocb->ki_pos = req->file->f_pos;
2860 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07002861 kiocb->ki_hint = ki_hint_validate(file_write_hint(kiocb->ki_filp));
Pavel Begunkov3e577dc2020-02-01 03:58:42 +03002862 kiocb->ki_flags = iocb_flags(kiocb->ki_filp);
2863 ret = kiocb_set_rw_flags(kiocb, READ_ONCE(sqe->rw_flags));
2864 if (unlikely(ret))
2865 return ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002866
2867 ioprio = READ_ONCE(sqe->ioprio);
2868 if (ioprio) {
2869 ret = ioprio_check_cap(ioprio);
2870 if (ret)
Jens Axboe09bb8392019-03-13 12:39:28 -06002871 return ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002872
2873 kiocb->ki_ioprio = ioprio;
2874 } else
2875 kiocb->ki_ioprio = get_current_ioprio();
2876
Stefan Bühler8449eed2019-04-27 20:34:19 +02002877 /* don't allow async punt if RWF_NOWAIT was requested */
Jens Axboec5b85622020-06-09 19:23:05 -06002878 if (kiocb->ki_flags & IOCB_NOWAIT)
Stefan Bühler8449eed2019-04-27 20:34:19 +02002879 req->flags |= REQ_F_NOWAIT;
2880
Jens Axboedef596e2019-01-09 08:59:42 -07002881 if (ctx->flags & IORING_SETUP_IOPOLL) {
Jens Axboedef596e2019-01-09 08:59:42 -07002882 if (!(kiocb->ki_flags & IOCB_DIRECT) ||
2883 !kiocb->ki_filp->f_op->iopoll)
Jens Axboe09bb8392019-03-13 12:39:28 -06002884 return -EOPNOTSUPP;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002885
Jens Axboedef596e2019-01-09 08:59:42 -07002886 kiocb->ki_flags |= IOCB_HIPRI;
2887 kiocb->ki_complete = io_complete_rw_iopoll;
Xiaoguang Wang65a65432020-06-11 23:39:36 +08002888 req->iopoll_completed = 0;
Jens Axboedef596e2019-01-09 08:59:42 -07002889 } else {
Jens Axboe09bb8392019-03-13 12:39:28 -06002890 if (kiocb->ki_flags & IOCB_HIPRI)
2891 return -EINVAL;
Jens Axboedef596e2019-01-09 08:59:42 -07002892 kiocb->ki_complete = io_complete_rw;
2893 }
Jens Axboe9adbd452019-12-20 08:45:55 -07002894
Jens Axboe3529d8c2019-12-19 18:24:38 -07002895 req->rw.addr = READ_ONCE(sqe->addr);
2896 req->rw.len = READ_ONCE(sqe->len);
Bijan Mottahedeh4f4eeba2020-05-19 14:52:49 -07002897 req->buf_index = READ_ONCE(sqe->buf_index);
Jens Axboe2b188cc2019-01-07 10:46:33 -07002898 return 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002899}
2900
2901static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
2902{
2903 switch (ret) {
2904 case -EIOCBQUEUED:
2905 break;
2906 case -ERESTARTSYS:
2907 case -ERESTARTNOINTR:
2908 case -ERESTARTNOHAND:
2909 case -ERESTART_RESTARTBLOCK:
2910 /*
2911 * We can't just restart the syscall, since previously
2912 * submitted sqes may already be in progress. Just fail this
2913 * IO with EINTR.
2914 */
2915 ret = -EINTR;
Gustavo A. R. Silvadf561f662020-08-23 17:36:59 -05002916 fallthrough;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002917 default:
2918 kiocb->ki_complete(kiocb, ret, 0);
2919 }
2920}
2921
Jens Axboea1d7c392020-06-22 11:09:46 -06002922static void kiocb_done(struct kiocb *kiocb, ssize_t ret,
2923 struct io_comp_state *cs)
Jens Axboeba816ad2019-09-28 11:36:45 -06002924{
Jens Axboeba042912019-12-25 16:33:42 -07002925 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
Jens Axboee8c2bc12020-08-15 18:44:09 -07002926 struct io_async_rw *io = req->async_data;
Jens Axboeba042912019-12-25 16:33:42 -07002927
Jens Axboe227c0c92020-08-13 11:51:40 -06002928 /* add previously done IO, if any */
Jens Axboee8c2bc12020-08-15 18:44:09 -07002929 if (io && io->bytes_done > 0) {
Jens Axboe227c0c92020-08-13 11:51:40 -06002930 if (ret < 0)
Jens Axboee8c2bc12020-08-15 18:44:09 -07002931 ret = io->bytes_done;
Jens Axboe227c0c92020-08-13 11:51:40 -06002932 else
Jens Axboee8c2bc12020-08-15 18:44:09 -07002933 ret += io->bytes_done;
Jens Axboe227c0c92020-08-13 11:51:40 -06002934 }
2935
Jens Axboeba042912019-12-25 16:33:42 -07002936 if (req->flags & REQ_F_CUR_POS)
2937 req->file->f_pos = kiocb->ki_pos;
Pavel Begunkovbcaec082020-02-24 11:30:18 +03002938 if (ret >= 0 && kiocb->ki_complete == io_complete_rw)
Jens Axboea1d7c392020-06-22 11:09:46 -06002939 __io_complete_rw(req, ret, 0, cs);
Jens Axboeba816ad2019-09-28 11:36:45 -06002940 else
2941 io_rw_done(kiocb, ret);
2942}
2943
Jens Axboe9adbd452019-12-20 08:45:55 -07002944static ssize_t io_import_fixed(struct io_kiocb *req, int rw,
Pavel Begunkov7d009162019-11-25 23:14:40 +03002945 struct iov_iter *iter)
Jens Axboeedafcce2019-01-09 09:16:05 -07002946{
Jens Axboe9adbd452019-12-20 08:45:55 -07002947 struct io_ring_ctx *ctx = req->ctx;
2948 size_t len = req->rw.len;
Jens Axboeedafcce2019-01-09 09:16:05 -07002949 struct io_mapped_ubuf *imu;
Pavel Begunkov4be1c612020-09-06 00:45:48 +03002950 u16 index, buf_index = req->buf_index;
Jens Axboeedafcce2019-01-09 09:16:05 -07002951 size_t offset;
2952 u64 buf_addr;
2953
Jens Axboeedafcce2019-01-09 09:16:05 -07002954 if (unlikely(buf_index >= ctx->nr_user_bufs))
2955 return -EFAULT;
Jens Axboeedafcce2019-01-09 09:16:05 -07002956 index = array_index_nospec(buf_index, ctx->nr_user_bufs);
2957 imu = &ctx->user_bufs[index];
Jens Axboe9adbd452019-12-20 08:45:55 -07002958 buf_addr = req->rw.addr;
Jens Axboeedafcce2019-01-09 09:16:05 -07002959
2960 /* overflow */
2961 if (buf_addr + len < buf_addr)
2962 return -EFAULT;
2963 /* not inside the mapped region */
2964 if (buf_addr < imu->ubuf || buf_addr + len > imu->ubuf + imu->len)
2965 return -EFAULT;
2966
2967 /*
2968 * May not be a start of buffer, set size appropriately
2969 * and advance us to the beginning.
2970 */
2971 offset = buf_addr - imu->ubuf;
2972 iov_iter_bvec(iter, rw, imu->bvec, imu->nr_bvecs, offset + len);
Jens Axboebd11b3a2019-07-20 08:37:31 -06002973
2974 if (offset) {
2975 /*
2976 * Don't use iov_iter_advance() here, as it's really slow for
2977 * using the latter parts of a big fixed buffer - it iterates
2978 * over each segment manually. We can cheat a bit here, because
2979 * we know that:
2980 *
2981 * 1) it's a BVEC iter, we set it up
2982 * 2) all bvecs are PAGE_SIZE in size, except potentially the
2983 * first and last bvec
2984 *
2985 * So just find our index, and adjust the iterator afterwards.
2986 * If the offset is within the first bvec (or the whole first
2987 * bvec, just use iov_iter_advance(). This makes it easier
2988 * since we can just skip the first segment, which may not
2989 * be PAGE_SIZE aligned.
2990 */
2991 const struct bio_vec *bvec = imu->bvec;
2992
2993 if (offset <= bvec->bv_len) {
2994 iov_iter_advance(iter, offset);
2995 } else {
2996 unsigned long seg_skip;
2997
2998 /* skip first vec */
2999 offset -= bvec->bv_len;
3000 seg_skip = 1 + (offset >> PAGE_SHIFT);
3001
3002 iter->bvec = bvec + seg_skip;
3003 iter->nr_segs -= seg_skip;
Aleix Roca Nonell99c79f62019-08-15 14:03:22 +02003004 iter->count -= bvec->bv_len + offset;
Jens Axboebd11b3a2019-07-20 08:37:31 -06003005 iter->iov_offset = offset & ~PAGE_MASK;
Jens Axboebd11b3a2019-07-20 08:37:31 -06003006 }
3007 }
3008
Jens Axboe5e559562019-11-13 16:12:46 -07003009 return len;
Jens Axboeedafcce2019-01-09 09:16:05 -07003010}
3011
Jens Axboebcda7ba2020-02-23 16:42:51 -07003012static void io_ring_submit_unlock(struct io_ring_ctx *ctx, bool needs_lock)
3013{
3014 if (needs_lock)
3015 mutex_unlock(&ctx->uring_lock);
3016}
3017
3018static void io_ring_submit_lock(struct io_ring_ctx *ctx, bool needs_lock)
3019{
3020 /*
3021 * "Normal" inline submissions always hold the uring_lock, since we
3022 * grab it from the system call. Same is true for the SQPOLL offload.
3023 * The only exception is when we've detached the request and issue it
3024 * from an async worker thread, grab the lock for that case.
3025 */
3026 if (needs_lock)
3027 mutex_lock(&ctx->uring_lock);
3028}
3029
3030static struct io_buffer *io_buffer_select(struct io_kiocb *req, size_t *len,
3031 int bgid, struct io_buffer *kbuf,
3032 bool needs_lock)
3033{
3034 struct io_buffer *head;
3035
3036 if (req->flags & REQ_F_BUFFER_SELECTED)
3037 return kbuf;
3038
3039 io_ring_submit_lock(req->ctx, needs_lock);
3040
3041 lockdep_assert_held(&req->ctx->uring_lock);
3042
3043 head = idr_find(&req->ctx->io_buffer_idr, bgid);
3044 if (head) {
3045 if (!list_empty(&head->list)) {
3046 kbuf = list_last_entry(&head->list, struct io_buffer,
3047 list);
3048 list_del(&kbuf->list);
3049 } else {
3050 kbuf = head;
3051 idr_remove(&req->ctx->io_buffer_idr, bgid);
3052 }
3053 if (*len > kbuf->len)
3054 *len = kbuf->len;
3055 } else {
3056 kbuf = ERR_PTR(-ENOBUFS);
3057 }
3058
3059 io_ring_submit_unlock(req->ctx, needs_lock);
3060
3061 return kbuf;
3062}
3063
Jens Axboe4d954c22020-02-27 07:31:19 -07003064static void __user *io_rw_buffer_select(struct io_kiocb *req, size_t *len,
3065 bool needs_lock)
3066{
3067 struct io_buffer *kbuf;
Bijan Mottahedeh4f4eeba2020-05-19 14:52:49 -07003068 u16 bgid;
Jens Axboe4d954c22020-02-27 07:31:19 -07003069
3070 kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
Bijan Mottahedeh4f4eeba2020-05-19 14:52:49 -07003071 bgid = req->buf_index;
Jens Axboe4d954c22020-02-27 07:31:19 -07003072 kbuf = io_buffer_select(req, len, bgid, kbuf, needs_lock);
3073 if (IS_ERR(kbuf))
3074 return kbuf;
3075 req->rw.addr = (u64) (unsigned long) kbuf;
3076 req->flags |= REQ_F_BUFFER_SELECTED;
3077 return u64_to_user_ptr(kbuf->addr);
3078}
3079
3080#ifdef CONFIG_COMPAT
3081static ssize_t io_compat_import(struct io_kiocb *req, struct iovec *iov,
3082 bool needs_lock)
3083{
3084 struct compat_iovec __user *uiov;
3085 compat_ssize_t clen;
3086 void __user *buf;
3087 ssize_t len;
3088
3089 uiov = u64_to_user_ptr(req->rw.addr);
3090 if (!access_ok(uiov, sizeof(*uiov)))
3091 return -EFAULT;
3092 if (__get_user(clen, &uiov->iov_len))
3093 return -EFAULT;
3094 if (clen < 0)
3095 return -EINVAL;
3096
3097 len = clen;
3098 buf = io_rw_buffer_select(req, &len, needs_lock);
3099 if (IS_ERR(buf))
3100 return PTR_ERR(buf);
3101 iov[0].iov_base = buf;
3102 iov[0].iov_len = (compat_size_t) len;
3103 return 0;
3104}
3105#endif
3106
3107static ssize_t __io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
3108 bool needs_lock)
3109{
3110 struct iovec __user *uiov = u64_to_user_ptr(req->rw.addr);
3111 void __user *buf;
3112 ssize_t len;
3113
3114 if (copy_from_user(iov, uiov, sizeof(*uiov)))
3115 return -EFAULT;
3116
3117 len = iov[0].iov_len;
3118 if (len < 0)
3119 return -EINVAL;
3120 buf = io_rw_buffer_select(req, &len, needs_lock);
3121 if (IS_ERR(buf))
3122 return PTR_ERR(buf);
3123 iov[0].iov_base = buf;
3124 iov[0].iov_len = len;
3125 return 0;
3126}
3127
3128static ssize_t io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
3129 bool needs_lock)
3130{
Jens Axboedddb3e22020-06-04 11:27:01 -06003131 if (req->flags & REQ_F_BUFFER_SELECTED) {
3132 struct io_buffer *kbuf;
3133
3134 kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
3135 iov[0].iov_base = u64_to_user_ptr(kbuf->addr);
3136 iov[0].iov_len = kbuf->len;
Jens Axboe4d954c22020-02-27 07:31:19 -07003137 return 0;
Jens Axboedddb3e22020-06-04 11:27:01 -06003138 }
Jens Axboe4d954c22020-02-27 07:31:19 -07003139 if (!req->rw.len)
3140 return 0;
3141 else if (req->rw.len > 1)
3142 return -EINVAL;
3143
3144#ifdef CONFIG_COMPAT
3145 if (req->ctx->compat)
3146 return io_compat_import(req, iov, needs_lock);
3147#endif
3148
3149 return __io_iov_buffer_select(req, iov, needs_lock);
3150}
3151
Jens Axboe8452fd02020-08-18 13:58:33 -07003152static ssize_t __io_import_iovec(int rw, struct io_kiocb *req,
3153 struct iovec **iovec, struct iov_iter *iter,
3154 bool needs_lock)
Jens Axboe2b188cc2019-01-07 10:46:33 -07003155{
Jens Axboe9adbd452019-12-20 08:45:55 -07003156 void __user *buf = u64_to_user_ptr(req->rw.addr);
3157 size_t sqe_len = req->rw.len;
Jens Axboe4d954c22020-02-27 07:31:19 -07003158 ssize_t ret;
Jens Axboeedafcce2019-01-09 09:16:05 -07003159 u8 opcode;
3160
Jens Axboed625c6e2019-12-17 19:53:05 -07003161 opcode = req->opcode;
Pavel Begunkov7d009162019-11-25 23:14:40 +03003162 if (opcode == IORING_OP_READ_FIXED || opcode == IORING_OP_WRITE_FIXED) {
Jens Axboeedafcce2019-01-09 09:16:05 -07003163 *iovec = NULL;
Jens Axboe9adbd452019-12-20 08:45:55 -07003164 return io_import_fixed(req, rw, iter);
Jens Axboeedafcce2019-01-09 09:16:05 -07003165 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07003166
Jens Axboebcda7ba2020-02-23 16:42:51 -07003167 /* buffer index only valid with fixed read/write, or buffer select */
Bijan Mottahedeh4f4eeba2020-05-19 14:52:49 -07003168 if (req->buf_index && !(req->flags & REQ_F_BUFFER_SELECT))
Jens Axboe9adbd452019-12-20 08:45:55 -07003169 return -EINVAL;
3170
Jens Axboe3a6820f2019-12-22 15:19:35 -07003171 if (opcode == IORING_OP_READ || opcode == IORING_OP_WRITE) {
Jens Axboebcda7ba2020-02-23 16:42:51 -07003172 if (req->flags & REQ_F_BUFFER_SELECT) {
Jens Axboe4d954c22020-02-27 07:31:19 -07003173 buf = io_rw_buffer_select(req, &sqe_len, needs_lock);
Pavel Begunkov867a23e2020-08-20 11:34:39 +03003174 if (IS_ERR(buf))
Jens Axboe4d954c22020-02-27 07:31:19 -07003175 return PTR_ERR(buf);
Jens Axboe3f9d6442020-03-11 12:27:04 -06003176 req->rw.len = sqe_len;
Jens Axboebcda7ba2020-02-23 16:42:51 -07003177 }
3178
Jens Axboe3a6820f2019-12-22 15:19:35 -07003179 ret = import_single_range(rw, buf, sqe_len, *iovec, iter);
3180 *iovec = NULL;
Jens Axboe3a901592020-02-25 17:48:55 -07003181 return ret < 0 ? ret : sqe_len;
Jens Axboe3a6820f2019-12-22 15:19:35 -07003182 }
3183
Jens Axboe4d954c22020-02-27 07:31:19 -07003184 if (req->flags & REQ_F_BUFFER_SELECT) {
3185 ret = io_iov_buffer_select(req, *iovec, needs_lock);
Jens Axboe3f9d6442020-03-11 12:27:04 -06003186 if (!ret) {
3187 ret = (*iovec)->iov_len;
3188 iov_iter_init(iter, rw, *iovec, 1, ret);
3189 }
Jens Axboe4d954c22020-02-27 07:31:19 -07003190 *iovec = NULL;
3191 return ret;
3192 }
3193
Christoph Hellwig89cd35c2020-09-25 06:51:41 +02003194 return __import_iovec(rw, buf, sqe_len, UIO_FASTIOV, iovec, iter,
3195 req->ctx->compat);
Jens Axboe2b188cc2019-01-07 10:46:33 -07003196}
3197
Jens Axboe8452fd02020-08-18 13:58:33 -07003198static ssize_t io_import_iovec(int rw, struct io_kiocb *req,
3199 struct iovec **iovec, struct iov_iter *iter,
3200 bool needs_lock)
3201{
Jens Axboee8c2bc12020-08-15 18:44:09 -07003202 struct io_async_rw *iorw = req->async_data;
3203
3204 if (!iorw)
Jens Axboe8452fd02020-08-18 13:58:33 -07003205 return __io_import_iovec(rw, req, iovec, iter, needs_lock);
3206 *iovec = NULL;
Jens Axboee8c2bc12020-08-15 18:44:09 -07003207 return iov_iter_count(&iorw->iter);
Jens Axboe8452fd02020-08-18 13:58:33 -07003208}
3209
Jens Axboe0fef9482020-08-26 10:36:20 -06003210static inline loff_t *io_kiocb_ppos(struct kiocb *kiocb)
3211{
Pavel Begunkov5b09e372020-09-30 22:57:15 +03003212 return (kiocb->ki_filp->f_mode & FMODE_STREAM) ? NULL : &kiocb->ki_pos;
Jens Axboe0fef9482020-08-26 10:36:20 -06003213}
3214
Jens Axboe32960612019-09-23 11:05:34 -06003215/*
3216 * For files that don't have ->read_iter() and ->write_iter(), handle them
3217 * by looping over ->read() or ->write() manually.
3218 */
Jens Axboe4017eb92020-10-22 14:14:12 -06003219static ssize_t loop_rw_iter(int rw, struct io_kiocb *req, struct iov_iter *iter)
Jens Axboe32960612019-09-23 11:05:34 -06003220{
Jens Axboe4017eb92020-10-22 14:14:12 -06003221 struct kiocb *kiocb = &req->rw.kiocb;
3222 struct file *file = req->file;
Jens Axboe32960612019-09-23 11:05:34 -06003223 ssize_t ret = 0;
3224
3225 /*
3226 * Don't support polled IO through this interface, and we can't
3227 * support non-blocking either. For the latter, this just causes
3228 * the kiocb to be handled from an async context.
3229 */
3230 if (kiocb->ki_flags & IOCB_HIPRI)
3231 return -EOPNOTSUPP;
3232 if (kiocb->ki_flags & IOCB_NOWAIT)
3233 return -EAGAIN;
3234
3235 while (iov_iter_count(iter)) {
Pavel Begunkov311ae9e2019-11-24 11:58:24 +03003236 struct iovec iovec;
Jens Axboe32960612019-09-23 11:05:34 -06003237 ssize_t nr;
3238
Pavel Begunkov311ae9e2019-11-24 11:58:24 +03003239 if (!iov_iter_is_bvec(iter)) {
3240 iovec = iov_iter_iovec(iter);
3241 } else {
Jens Axboe4017eb92020-10-22 14:14:12 -06003242 iovec.iov_base = u64_to_user_ptr(req->rw.addr);
3243 iovec.iov_len = req->rw.len;
Pavel Begunkov311ae9e2019-11-24 11:58:24 +03003244 }
3245
Jens Axboe32960612019-09-23 11:05:34 -06003246 if (rw == READ) {
3247 nr = file->f_op->read(file, iovec.iov_base,
Jens Axboe0fef9482020-08-26 10:36:20 -06003248 iovec.iov_len, io_kiocb_ppos(kiocb));
Jens Axboe32960612019-09-23 11:05:34 -06003249 } else {
3250 nr = file->f_op->write(file, iovec.iov_base,
Jens Axboe0fef9482020-08-26 10:36:20 -06003251 iovec.iov_len, io_kiocb_ppos(kiocb));
Jens Axboe32960612019-09-23 11:05:34 -06003252 }
3253
3254 if (nr < 0) {
3255 if (!ret)
3256 ret = nr;
3257 break;
3258 }
3259 ret += nr;
3260 if (nr != iovec.iov_len)
3261 break;
Jens Axboe4017eb92020-10-22 14:14:12 -06003262 req->rw.len -= nr;
3263 req->rw.addr += nr;
Jens Axboe32960612019-09-23 11:05:34 -06003264 iov_iter_advance(iter, nr);
3265 }
3266
3267 return ret;
3268}
3269
Jens Axboeff6165b2020-08-13 09:47:43 -06003270static void io_req_map_rw(struct io_kiocb *req, const struct iovec *iovec,
3271 const struct iovec *fast_iov, struct iov_iter *iter)
Jens Axboef67676d2019-12-02 11:03:47 -07003272{
Jens Axboee8c2bc12020-08-15 18:44:09 -07003273 struct io_async_rw *rw = req->async_data;
Pavel Begunkovb64e3442020-07-13 22:59:18 +03003274
Jens Axboeff6165b2020-08-13 09:47:43 -06003275 memcpy(&rw->iter, iter, sizeof(*iter));
Pavel Begunkovafb87652020-09-06 00:45:46 +03003276 rw->free_iovec = iovec;
Jens Axboe227c0c92020-08-13 11:51:40 -06003277 rw->bytes_done = 0;
Jens Axboeff6165b2020-08-13 09:47:43 -06003278 /* can only be fixed buffers, no need to do anything */
3279 if (iter->type == ITER_BVEC)
3280 return;
Pavel Begunkovb64e3442020-07-13 22:59:18 +03003281 if (!iovec) {
Jens Axboeff6165b2020-08-13 09:47:43 -06003282 unsigned iov_off = 0;
3283
3284 rw->iter.iov = rw->fast_iov;
3285 if (iter->iov != fast_iov) {
3286 iov_off = iter->iov - fast_iov;
3287 rw->iter.iov += iov_off;
3288 }
3289 if (rw->fast_iov != fast_iov)
3290 memcpy(rw->fast_iov + iov_off, fast_iov + iov_off,
Xiaoguang Wang45097da2020-04-08 22:29:58 +08003291 sizeof(struct iovec) * iter->nr_segs);
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03003292 } else {
3293 req->flags |= REQ_F_NEED_CLEANUP;
Jens Axboef67676d2019-12-02 11:03:47 -07003294 }
3295}
3296
Jens Axboee8c2bc12020-08-15 18:44:09 -07003297static inline int __io_alloc_async_data(struct io_kiocb *req)
Xiaoguang Wang3d9932a2020-03-27 15:36:52 +08003298{
Jens Axboee8c2bc12020-08-15 18:44:09 -07003299 WARN_ON_ONCE(!io_op_defs[req->opcode].async_size);
3300 req->async_data = kmalloc(io_op_defs[req->opcode].async_size, GFP_KERNEL);
3301 return req->async_data == NULL;
Xiaoguang Wang3d9932a2020-03-27 15:36:52 +08003302}
3303
Jens Axboee8c2bc12020-08-15 18:44:09 -07003304static int io_alloc_async_data(struct io_kiocb *req)
Jens Axboef67676d2019-12-02 11:03:47 -07003305{
Jens Axboee8c2bc12020-08-15 18:44:09 -07003306 if (!io_op_defs[req->opcode].needs_async_data)
Jens Axboed3656342019-12-18 09:50:26 -07003307 return 0;
Xiaoguang Wang3d9932a2020-03-27 15:36:52 +08003308
Jens Axboee8c2bc12020-08-15 18:44:09 -07003309 return __io_alloc_async_data(req);
Jens Axboeb7bb4f72019-12-15 22:13:43 -07003310}
3311
Jens Axboeff6165b2020-08-13 09:47:43 -06003312static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec,
3313 const struct iovec *fast_iov,
Jens Axboe227c0c92020-08-13 11:51:40 -06003314 struct iov_iter *iter, bool force)
Jens Axboeb7bb4f72019-12-15 22:13:43 -07003315{
Jens Axboee8c2bc12020-08-15 18:44:09 -07003316 if (!force && !io_op_defs[req->opcode].needs_async_data)
Jens Axboe74566df2020-01-13 19:23:24 -07003317 return 0;
Jens Axboee8c2bc12020-08-15 18:44:09 -07003318 if (!req->async_data) {
3319 if (__io_alloc_async_data(req))
Jens Axboe5d204bc2020-01-31 12:06:52 -07003320 return -ENOMEM;
Jens Axboeb7bb4f72019-12-15 22:13:43 -07003321
Jens Axboeff6165b2020-08-13 09:47:43 -06003322 io_req_map_rw(req, iovec, fast_iov, iter);
Jens Axboe5d204bc2020-01-31 12:06:52 -07003323 }
Jens Axboeb7bb4f72019-12-15 22:13:43 -07003324 return 0;
Jens Axboef67676d2019-12-02 11:03:47 -07003325}
3326
Pavel Begunkov73debe62020-09-30 22:57:54 +03003327static inline int io_rw_prep_async(struct io_kiocb *req, int rw)
Pavel Begunkovc3e330a2020-07-13 22:59:19 +03003328{
Jens Axboee8c2bc12020-08-15 18:44:09 -07003329 struct io_async_rw *iorw = req->async_data;
Pavel Begunkovf4bff102020-09-06 00:45:45 +03003330 struct iovec *iov = iorw->fast_iov;
Pavel Begunkovc3e330a2020-07-13 22:59:19 +03003331 ssize_t ret;
3332
Pavel Begunkov73debe62020-09-30 22:57:54 +03003333 ret = __io_import_iovec(rw, req, &iov, &iorw->iter, false);
Pavel Begunkovc3e330a2020-07-13 22:59:19 +03003334 if (unlikely(ret < 0))
3335 return ret;
3336
Pavel Begunkovab0b1962020-09-06 00:45:47 +03003337 iorw->bytes_done = 0;
3338 iorw->free_iovec = iov;
3339 if (iov)
3340 req->flags |= REQ_F_NEED_CLEANUP;
Pavel Begunkovc3e330a2020-07-13 22:59:19 +03003341 return 0;
3342}
3343
Pavel Begunkov73debe62020-09-30 22:57:54 +03003344static int io_read_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboef67676d2019-12-02 11:03:47 -07003345{
3346 ssize_t ret;
3347
Pavel Begunkova88fc402020-09-30 22:57:53 +03003348 ret = io_prep_rw(req, sqe);
Jens Axboe3529d8c2019-12-19 18:24:38 -07003349 if (ret)
3350 return ret;
Jens Axboef67676d2019-12-02 11:03:47 -07003351
Jens Axboe3529d8c2019-12-19 18:24:38 -07003352 if (unlikely(!(req->file->f_mode & FMODE_READ)))
3353 return -EBADF;
Jens Axboef67676d2019-12-02 11:03:47 -07003354
Pavel Begunkov5f798be2020-02-08 13:28:02 +03003355 /* either don't need iovec imported or already have it */
Pavel Begunkov2d199892020-09-30 22:57:35 +03003356 if (!req->async_data)
Jens Axboe3529d8c2019-12-19 18:24:38 -07003357 return 0;
Pavel Begunkov73debe62020-09-30 22:57:54 +03003358 return io_rw_prep_async(req, READ);
Jens Axboef67676d2019-12-02 11:03:47 -07003359}
3360
Jens Axboec1dd91d2020-08-03 16:43:59 -06003361/*
3362 * This is our waitqueue callback handler, registered through lock_page_async()
3363 * when we initially tried to do the IO with the iocb armed our waitqueue.
3364 * This gets called when the page is unlocked, and we generally expect that to
3365 * happen when the page IO is completed and the page is now uptodate. This will
3366 * queue a task_work based retry of the operation, attempting to copy the data
3367 * again. If the latter fails because the page was NOT uptodate, then we will
3368 * do a thread based blocking retry of the operation. That's the unexpected
3369 * slow path.
3370 */
Jens Axboebcf5a062020-05-22 09:24:42 -06003371static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode,
3372 int sync, void *arg)
3373{
3374 struct wait_page_queue *wpq;
3375 struct io_kiocb *req = wait->private;
Jens Axboebcf5a062020-05-22 09:24:42 -06003376 struct wait_page_key *key = arg;
Jens Axboebcf5a062020-05-22 09:24:42 -06003377 int ret;
3378
3379 wpq = container_of(wait, struct wait_page_queue, wait);
3380
Linus Torvaldscdc8fcb2020-08-03 13:01:22 -07003381 if (!wake_page_match(wpq, key))
3382 return 0;
3383
Hao Xuc8d317a2020-09-29 20:00:45 +08003384 req->rw.kiocb.ki_flags &= ~IOCB_WAITQ;
Jens Axboebcf5a062020-05-22 09:24:42 -06003385 list_del_init(&wait->entry);
3386
Pavel Begunkove7375122020-07-12 20:42:04 +03003387 init_task_work(&req->task_work, io_req_task_submit);
Jens Axboe6d816e02020-08-11 08:04:14 -06003388 percpu_ref_get(&req->ctx->refs);
3389
Jens Axboebcf5a062020-05-22 09:24:42 -06003390 /* submit ref gets dropped, acquire a new one */
3391 refcount_inc(&req->refs);
Jens Axboe87c43112020-09-30 21:00:14 -06003392 ret = io_req_task_work_add(req, true);
Jens Axboebcf5a062020-05-22 09:24:42 -06003393 if (unlikely(ret)) {
Jens Axboec2c4c832020-07-01 15:37:11 -06003394 struct task_struct *tsk;
3395
Jens Axboebcf5a062020-05-22 09:24:42 -06003396 /* queue just for cancelation */
Pavel Begunkove7375122020-07-12 20:42:04 +03003397 init_task_work(&req->task_work, io_req_task_cancel);
Jens Axboebcf5a062020-05-22 09:24:42 -06003398 tsk = io_wq_get_task(req->ctx->io_wq);
Jens Axboe91989c72020-10-16 09:02:26 -06003399 task_work_add(tsk, &req->task_work, TWA_NONE);
Jens Axboec2c4c832020-07-01 15:37:11 -06003400 wake_up_process(tsk);
Jens Axboebcf5a062020-05-22 09:24:42 -06003401 }
Jens Axboebcf5a062020-05-22 09:24:42 -06003402 return 1;
3403}
3404
Jens Axboec1dd91d2020-08-03 16:43:59 -06003405/*
3406 * This controls whether a given IO request should be armed for async page
3407 * based retry. If we return false here, the request is handed to the async
3408 * worker threads for retry. If we're doing buffered reads on a regular file,
3409 * we prepare a private wait_page_queue entry and retry the operation. This
3410 * will either succeed because the page is now uptodate and unlocked, or it
3411 * will register a callback when the page is unlocked at IO completion. Through
3412 * that callback, io_uring uses task_work to setup a retry of the operation.
3413 * That retry will attempt the buffered read again. The retry will generally
3414 * succeed, or in rare cases where it fails, we then fall back to using the
3415 * async worker threads for a blocking retry.
3416 */
Jens Axboe227c0c92020-08-13 11:51:40 -06003417static bool io_rw_should_retry(struct io_kiocb *req)
Jens Axboebcf5a062020-05-22 09:24:42 -06003418{
Jens Axboee8c2bc12020-08-15 18:44:09 -07003419 struct io_async_rw *rw = req->async_data;
3420 struct wait_page_queue *wait = &rw->wpq;
Jens Axboebcf5a062020-05-22 09:24:42 -06003421 struct kiocb *kiocb = &req->rw.kiocb;
Jens Axboebcf5a062020-05-22 09:24:42 -06003422
3423 /* never retry for NOWAIT, we just complete with -EAGAIN */
3424 if (req->flags & REQ_F_NOWAIT)
3425 return false;
3426
Jens Axboe227c0c92020-08-13 11:51:40 -06003427 /* Only for buffered IO */
Jens Axboe3b2a4432020-08-16 10:58:43 -07003428 if (kiocb->ki_flags & (IOCB_DIRECT | IOCB_HIPRI))
Jens Axboebcf5a062020-05-22 09:24:42 -06003429 return false;
Jens Axboe3b2a4432020-08-16 10:58:43 -07003430
Jens Axboebcf5a062020-05-22 09:24:42 -06003431 /*
3432 * just use poll if we can, and don't attempt if the fs doesn't
3433 * support callback based unlocks
3434 */
3435 if (file_can_poll(req->file) || !(req->file->f_mode & FMODE_BUF_RASYNC))
3436 return false;
3437
Jens Axboe3b2a4432020-08-16 10:58:43 -07003438 wait->wait.func = io_async_buf_func;
3439 wait->wait.private = req;
3440 wait->wait.flags = 0;
3441 INIT_LIST_HEAD(&wait->wait.entry);
3442 kiocb->ki_flags |= IOCB_WAITQ;
Hao Xuc8d317a2020-09-29 20:00:45 +08003443 kiocb->ki_flags &= ~IOCB_NOWAIT;
Jens Axboe3b2a4432020-08-16 10:58:43 -07003444 kiocb->ki_waitq = wait;
Jens Axboe3b2a4432020-08-16 10:58:43 -07003445 return true;
Jens Axboebcf5a062020-05-22 09:24:42 -06003446}
3447
3448static int io_iter_do_read(struct io_kiocb *req, struct iov_iter *iter)
3449{
3450 if (req->file->f_op->read_iter)
3451 return call_read_iter(req->file, &req->rw.kiocb, iter);
Guoyu Huang2dd21112020-08-05 03:53:50 -07003452 else if (req->file->f_op->read)
Jens Axboe4017eb92020-10-22 14:14:12 -06003453 return loop_rw_iter(READ, req, iter);
Guoyu Huang2dd21112020-08-05 03:53:50 -07003454 else
3455 return -EINVAL;
Jens Axboebcf5a062020-05-22 09:24:42 -06003456}
3457
Jens Axboea1d7c392020-06-22 11:09:46 -06003458static int io_read(struct io_kiocb *req, bool force_nonblock,
3459 struct io_comp_state *cs)
Jens Axboe2b188cc2019-01-07 10:46:33 -07003460{
3461 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
Jens Axboe9adbd452019-12-20 08:45:55 -07003462 struct kiocb *kiocb = &req->rw.kiocb;
Jens Axboeff6165b2020-08-13 09:47:43 -06003463 struct iov_iter __iter, *iter = &__iter;
Jens Axboee8c2bc12020-08-15 18:44:09 -07003464 struct io_async_rw *rw = req->async_data;
Jens Axboe227c0c92020-08-13 11:51:40 -06003465 ssize_t io_size, ret, ret2;
Jens Axboe31b51512019-01-18 22:56:34 -07003466 size_t iov_count;
Jens Axboef5cac8b2020-09-14 09:30:38 -06003467 bool no_async;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003468
Jens Axboee8c2bc12020-08-15 18:44:09 -07003469 if (rw)
3470 iter = &rw->iter;
Jens Axboeff6165b2020-08-13 09:47:43 -06003471
3472 ret = io_import_iovec(READ, req, &iovec, iter, !force_nonblock);
Jens Axboe06b76d42019-12-19 14:44:26 -07003473 if (ret < 0)
3474 return ret;
Jens Axboeeefdf302020-08-27 16:40:19 -06003475 iov_count = iov_iter_count(iter);
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003476 io_size = ret;
3477 req->result = io_size;
Jens Axboe227c0c92020-08-13 11:51:40 -06003478 ret = 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003479
Jens Axboefd6c2e42019-12-18 12:19:41 -07003480 /* Ensure we clear previously set non-block flag */
3481 if (!force_nonblock)
Jens Axboe29de5f62020-02-20 09:56:08 -07003482 kiocb->ki_flags &= ~IOCB_NOWAIT;
Pavel Begunkova88fc402020-09-30 22:57:53 +03003483 else
3484 kiocb->ki_flags |= IOCB_NOWAIT;
3485
Jens Axboefd6c2e42019-12-18 12:19:41 -07003486
Pavel Begunkov24c74672020-06-21 13:09:51 +03003487 /* If the file doesn't support async, just async punt */
Jens Axboef5cac8b2020-09-14 09:30:38 -06003488 no_async = force_nonblock && !io_file_supports_async(req->file, READ);
3489 if (no_async)
Jens Axboef67676d2019-12-02 11:03:47 -07003490 goto copy_iov;
Jens Axboe9e645e112019-05-10 16:07:28 -06003491
Jens Axboe0fef9482020-08-26 10:36:20 -06003492 ret = rw_verify_area(READ, req->file, io_kiocb_ppos(kiocb), iov_count);
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003493 if (unlikely(ret))
3494 goto out_free;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003495
Jens Axboe227c0c92020-08-13 11:51:40 -06003496 ret = io_iter_do_read(req, iter);
Jens Axboe32960612019-09-23 11:05:34 -06003497
Jens Axboe227c0c92020-08-13 11:51:40 -06003498 if (!ret) {
3499 goto done;
3500 } else if (ret == -EIOCBQUEUED) {
3501 ret = 0;
3502 goto out_free;
3503 } else if (ret == -EAGAIN) {
Jens Axboeeefdf302020-08-27 16:40:19 -06003504 /* IOPOLL retry should happen for io-wq threads */
3505 if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboef91daf52020-08-15 15:58:42 -07003506 goto done;
Jens Axboe355afae2020-09-02 09:30:31 -06003507 /* no retry on NONBLOCK marked file */
3508 if (req->file->f_flags & O_NONBLOCK)
3509 goto done;
Jens Axboe84216312020-08-24 11:45:26 -06003510 /* some cases will consume bytes even on error returns */
3511 iov_iter_revert(iter, iov_count - iov_iter_count(iter));
Jens Axboef38c7e32020-09-25 15:23:43 -06003512 ret = 0;
3513 goto copy_iov;
Jens Axboe227c0c92020-08-13 11:51:40 -06003514 } else if (ret < 0) {
Jens Axboe00d23d52020-08-25 12:59:22 -06003515 /* make sure -ERESTARTSYS -> -EINTR is done */
3516 goto done;
Jens Axboe227c0c92020-08-13 11:51:40 -06003517 }
3518
3519 /* read it all, or we did blocking attempt. no retry. */
Jens Axboef91daf52020-08-15 15:58:42 -07003520 if (!iov_iter_count(iter) || !force_nonblock ||
3521 (req->file->f_flags & O_NONBLOCK))
Jens Axboe227c0c92020-08-13 11:51:40 -06003522 goto done;
3523
3524 io_size -= ret;
3525copy_iov:
3526 ret2 = io_setup_async_rw(req, iovec, inline_vecs, iter, true);
3527 if (ret2) {
3528 ret = ret2;
3529 goto out_free;
3530 }
Jens Axboef5cac8b2020-09-14 09:30:38 -06003531 if (no_async)
3532 return -EAGAIN;
Jens Axboee8c2bc12020-08-15 18:44:09 -07003533 rw = req->async_data;
Jens Axboe227c0c92020-08-13 11:51:40 -06003534 /* it's copied and will be cleaned with ->io */
3535 iovec = NULL;
3536 /* now use our persistent iterator, if we aren't already */
Jens Axboee8c2bc12020-08-15 18:44:09 -07003537 iter = &rw->iter;
Jens Axboe227c0c92020-08-13 11:51:40 -06003538retry:
Jens Axboee8c2bc12020-08-15 18:44:09 -07003539 rw->bytes_done += ret;
Jens Axboe227c0c92020-08-13 11:51:40 -06003540 /* if we can retry, do so with the callbacks armed */
3541 if (!io_rw_should_retry(req)) {
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003542 kiocb->ki_flags &= ~IOCB_WAITQ;
3543 return -EAGAIN;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003544 }
Jens Axboe227c0c92020-08-13 11:51:40 -06003545
3546 /*
3547 * Now retry read with the IOCB_WAITQ parts set in the iocb. If we
3548 * get -EIOCBQUEUED, then we'll get a notification when the desired
3549 * page gets unlocked. We can also get a partial read here, and if we
3550 * do, then just retry at the new offset.
3551 */
3552 ret = io_iter_do_read(req, iter);
3553 if (ret == -EIOCBQUEUED) {
3554 ret = 0;
3555 goto out_free;
3556 } else if (ret > 0 && ret < io_size) {
3557 /* we got some bytes, but not all. retry. */
3558 goto retry;
3559 }
3560done:
3561 kiocb_done(kiocb, ret, cs);
3562 ret = 0;
Jens Axboef67676d2019-12-02 11:03:47 -07003563out_free:
Pavel Begunkovf261c162020-08-20 11:34:10 +03003564 /* it's reportedly faster than delegating the null check to kfree() */
Pavel Begunkov252917c2020-07-13 22:59:20 +03003565 if (iovec)
Xiaoguang Wang6f2cc162020-06-18 15:01:56 +08003566 kfree(iovec);
Jens Axboe2b188cc2019-01-07 10:46:33 -07003567 return ret;
3568}
3569
Pavel Begunkov73debe62020-09-30 22:57:54 +03003570static int io_write_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboef67676d2019-12-02 11:03:47 -07003571{
3572 ssize_t ret;
3573
Pavel Begunkova88fc402020-09-30 22:57:53 +03003574 ret = io_prep_rw(req, sqe);
Jens Axboe3529d8c2019-12-19 18:24:38 -07003575 if (ret)
3576 return ret;
Jens Axboef67676d2019-12-02 11:03:47 -07003577
Jens Axboe3529d8c2019-12-19 18:24:38 -07003578 if (unlikely(!(req->file->f_mode & FMODE_WRITE)))
3579 return -EBADF;
Jens Axboef67676d2019-12-02 11:03:47 -07003580
Pavel Begunkov5f798be2020-02-08 13:28:02 +03003581 /* either don't need iovec imported or already have it */
Pavel Begunkov2d199892020-09-30 22:57:35 +03003582 if (!req->async_data)
Jens Axboe3529d8c2019-12-19 18:24:38 -07003583 return 0;
Pavel Begunkov73debe62020-09-30 22:57:54 +03003584 return io_rw_prep_async(req, WRITE);
Jens Axboef67676d2019-12-02 11:03:47 -07003585}
3586
Jens Axboea1d7c392020-06-22 11:09:46 -06003587static int io_write(struct io_kiocb *req, bool force_nonblock,
3588 struct io_comp_state *cs)
Jens Axboe2b188cc2019-01-07 10:46:33 -07003589{
3590 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
Jens Axboe9adbd452019-12-20 08:45:55 -07003591 struct kiocb *kiocb = &req->rw.kiocb;
Jens Axboeff6165b2020-08-13 09:47:43 -06003592 struct iov_iter __iter, *iter = &__iter;
Jens Axboee8c2bc12020-08-15 18:44:09 -07003593 struct io_async_rw *rw = req->async_data;
Jens Axboe31b51512019-01-18 22:56:34 -07003594 size_t iov_count;
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003595 ssize_t ret, ret2, io_size;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003596
Jens Axboee8c2bc12020-08-15 18:44:09 -07003597 if (rw)
3598 iter = &rw->iter;
Jens Axboeff6165b2020-08-13 09:47:43 -06003599
3600 ret = io_import_iovec(WRITE, req, &iovec, iter, !force_nonblock);
Jens Axboe06b76d42019-12-19 14:44:26 -07003601 if (ret < 0)
3602 return ret;
Jens Axboeeefdf302020-08-27 16:40:19 -06003603 iov_count = iov_iter_count(iter);
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003604 io_size = ret;
3605 req->result = io_size;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003606
Jens Axboefd6c2e42019-12-18 12:19:41 -07003607 /* Ensure we clear previously set non-block flag */
3608 if (!force_nonblock)
Pavel Begunkova88fc402020-09-30 22:57:53 +03003609 kiocb->ki_flags &= ~IOCB_NOWAIT;
3610 else
3611 kiocb->ki_flags |= IOCB_NOWAIT;
Jens Axboefd6c2e42019-12-18 12:19:41 -07003612
Pavel Begunkov24c74672020-06-21 13:09:51 +03003613 /* If the file doesn't support async, just async punt */
Jens Axboeaf197f52020-04-28 13:15:06 -06003614 if (force_nonblock && !io_file_supports_async(req->file, WRITE))
Jens Axboef67676d2019-12-02 11:03:47 -07003615 goto copy_iov;
Jens Axboef67676d2019-12-02 11:03:47 -07003616
Jens Axboe10d59342019-12-09 20:16:22 -07003617 /* file path doesn't support NOWAIT for non-direct_IO */
3618 if (force_nonblock && !(kiocb->ki_flags & IOCB_DIRECT) &&
3619 (req->flags & REQ_F_ISREG))
Jens Axboef67676d2019-12-02 11:03:47 -07003620 goto copy_iov;
Jens Axboe9e645e112019-05-10 16:07:28 -06003621
Jens Axboe0fef9482020-08-26 10:36:20 -06003622 ret = rw_verify_area(WRITE, req->file, io_kiocb_ppos(kiocb), iov_count);
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003623 if (unlikely(ret))
3624 goto out_free;
Roman Penyaev9bf79332019-03-25 20:09:24 +01003625
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003626 /*
3627 * Open-code file_start_write here to grab freeze protection,
3628 * which will be released by another thread in
3629 * io_complete_rw(). Fool lockdep by telling it the lock got
3630 * released so that it doesn't complain about the held lock when
3631 * we return to userspace.
3632 */
3633 if (req->flags & REQ_F_ISREG) {
Darrick J. Wong8a3c84b2020-11-10 16:50:21 -08003634 sb_start_write(file_inode(req->file)->i_sb);
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003635 __sb_writers_release(file_inode(req->file)->i_sb,
3636 SB_FREEZE_WRITE);
3637 }
3638 kiocb->ki_flags |= IOCB_WRITE;
Roman Penyaev9bf79332019-03-25 20:09:24 +01003639
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003640 if (req->file->f_op->write_iter)
Jens Axboeff6165b2020-08-13 09:47:43 -06003641 ret2 = call_write_iter(req->file, kiocb, iter);
Guoyu Huang2dd21112020-08-05 03:53:50 -07003642 else if (req->file->f_op->write)
Jens Axboe4017eb92020-10-22 14:14:12 -06003643 ret2 = loop_rw_iter(WRITE, req, iter);
Guoyu Huang2dd21112020-08-05 03:53:50 -07003644 else
3645 ret2 = -EINVAL;
Jens Axboe4ed734b2020-03-20 11:23:41 -06003646
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003647 /*
3648 * Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just
3649 * retry them without IOCB_NOWAIT.
3650 */
3651 if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT))
3652 ret2 = -EAGAIN;
Jens Axboe355afae2020-09-02 09:30:31 -06003653 /* no retry on NONBLOCK marked file */
3654 if (ret2 == -EAGAIN && (req->file->f_flags & O_NONBLOCK))
3655 goto done;
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003656 if (!force_nonblock || ret2 != -EAGAIN) {
Jens Axboeeefdf302020-08-27 16:40:19 -06003657 /* IOPOLL retry should happen for io-wq threads */
3658 if ((req->ctx->flags & IORING_SETUP_IOPOLL) && ret2 == -EAGAIN)
3659 goto copy_iov;
Jens Axboe355afae2020-09-02 09:30:31 -06003660done:
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003661 kiocb_done(kiocb, ret2, cs);
3662 } else {
Jens Axboef67676d2019-12-02 11:03:47 -07003663copy_iov:
Jens Axboe84216312020-08-24 11:45:26 -06003664 /* some cases will consume bytes even on error returns */
3665 iov_iter_revert(iter, iov_count - iov_iter_count(iter));
Jens Axboe227c0c92020-08-13 11:51:40 -06003666 ret = io_setup_async_rw(req, iovec, inline_vecs, iter, false);
Jens Axboeff6165b2020-08-13 09:47:43 -06003667 if (!ret)
3668 return -EAGAIN;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003669 }
Jens Axboe31b51512019-01-18 22:56:34 -07003670out_free:
Pavel Begunkovf261c162020-08-20 11:34:10 +03003671 /* it's reportedly faster than delegating the null check to kfree() */
Pavel Begunkov252917c2020-07-13 22:59:20 +03003672 if (iovec)
Xiaoguang Wang6f2cc162020-06-18 15:01:56 +08003673 kfree(iovec);
Jens Axboe2b188cc2019-01-07 10:46:33 -07003674 return ret;
3675}
3676
Jens Axboe80a261f2020-09-28 14:23:58 -06003677static int io_renameat_prep(struct io_kiocb *req,
3678 const struct io_uring_sqe *sqe)
3679{
3680 struct io_rename *ren = &req->rename;
3681 const char __user *oldf, *newf;
3682
3683 if (unlikely(req->flags & REQ_F_FIXED_FILE))
3684 return -EBADF;
3685
3686 ren->old_dfd = READ_ONCE(sqe->fd);
3687 oldf = u64_to_user_ptr(READ_ONCE(sqe->addr));
3688 newf = u64_to_user_ptr(READ_ONCE(sqe->addr2));
3689 ren->new_dfd = READ_ONCE(sqe->len);
3690 ren->flags = READ_ONCE(sqe->rename_flags);
3691
3692 ren->oldpath = getname(oldf);
3693 if (IS_ERR(ren->oldpath))
3694 return PTR_ERR(ren->oldpath);
3695
3696 ren->newpath = getname(newf);
3697 if (IS_ERR(ren->newpath)) {
3698 putname(ren->oldpath);
3699 return PTR_ERR(ren->newpath);
3700 }
3701
3702 req->flags |= REQ_F_NEED_CLEANUP;
3703 return 0;
3704}
3705
3706static int io_renameat(struct io_kiocb *req, bool force_nonblock)
3707{
3708 struct io_rename *ren = &req->rename;
3709 int ret;
3710
3711 if (force_nonblock)
3712 return -EAGAIN;
3713
3714 ret = do_renameat2(ren->old_dfd, ren->oldpath, ren->new_dfd,
3715 ren->newpath, ren->flags);
3716
3717 req->flags &= ~REQ_F_NEED_CLEANUP;
3718 if (ret < 0)
3719 req_set_fail_links(req);
3720 io_req_complete(req, ret);
3721 return 0;
3722}
3723
Jens Axboe14a11432020-09-28 14:27:37 -06003724static int io_unlinkat_prep(struct io_kiocb *req,
3725 const struct io_uring_sqe *sqe)
3726{
3727 struct io_unlink *un = &req->unlink;
3728 const char __user *fname;
3729
3730 if (unlikely(req->flags & REQ_F_FIXED_FILE))
3731 return -EBADF;
3732
3733 un->dfd = READ_ONCE(sqe->fd);
3734
3735 un->flags = READ_ONCE(sqe->unlink_flags);
3736 if (un->flags & ~AT_REMOVEDIR)
3737 return -EINVAL;
3738
3739 fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
3740 un->filename = getname(fname);
3741 if (IS_ERR(un->filename))
3742 return PTR_ERR(un->filename);
3743
3744 req->flags |= REQ_F_NEED_CLEANUP;
3745 return 0;
3746}
3747
3748static int io_unlinkat(struct io_kiocb *req, bool force_nonblock)
3749{
3750 struct io_unlink *un = &req->unlink;
3751 int ret;
3752
3753 if (force_nonblock)
3754 return -EAGAIN;
3755
3756 if (un->flags & AT_REMOVEDIR)
3757 ret = do_rmdir(un->dfd, un->filename);
3758 else
3759 ret = do_unlinkat(un->dfd, un->filename);
3760
3761 req->flags &= ~REQ_F_NEED_CLEANUP;
3762 if (ret < 0)
3763 req_set_fail_links(req);
3764 io_req_complete(req, ret);
3765 return 0;
3766}
3767
Jens Axboe36f4fa62020-09-05 11:14:22 -06003768static int io_shutdown_prep(struct io_kiocb *req,
3769 const struct io_uring_sqe *sqe)
3770{
3771#if defined(CONFIG_NET)
3772 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3773 return -EINVAL;
3774 if (sqe->ioprio || sqe->off || sqe->addr || sqe->rw_flags ||
3775 sqe->buf_index)
3776 return -EINVAL;
3777
3778 req->shutdown.how = READ_ONCE(sqe->len);
3779 return 0;
3780#else
3781 return -EOPNOTSUPP;
3782#endif
3783}
3784
3785static int io_shutdown(struct io_kiocb *req, bool force_nonblock)
3786{
3787#if defined(CONFIG_NET)
3788 struct socket *sock;
3789 int ret;
3790
3791 if (force_nonblock)
3792 return -EAGAIN;
3793
3794 sock = sock_from_file(req->file, &ret);
3795 if (unlikely(!sock))
3796 return ret;
3797
3798 ret = __sys_shutdown_sock(sock, req->shutdown.how);
3799 io_req_complete(req, ret);
3800 return 0;
3801#else
3802 return -EOPNOTSUPP;
3803#endif
3804}
3805
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03003806static int __io_splice_prep(struct io_kiocb *req,
3807 const struct io_uring_sqe *sqe)
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003808{
3809 struct io_splice* sp = &req->splice;
3810 unsigned int valid_flags = SPLICE_F_FD_IN_FIXED | SPLICE_F_ALL;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003811
Pavel Begunkov3232dd02020-06-03 18:03:22 +03003812 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3813 return -EINVAL;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003814
3815 sp->file_in = NULL;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003816 sp->len = READ_ONCE(sqe->len);
3817 sp->flags = READ_ONCE(sqe->splice_flags);
3818
3819 if (unlikely(sp->flags & ~valid_flags))
3820 return -EINVAL;
3821
Pavel Begunkov8371adf2020-10-10 18:34:08 +01003822 sp->file_in = io_file_get(NULL, req, READ_ONCE(sqe->splice_fd_in),
3823 (sp->flags & SPLICE_F_FD_IN_FIXED));
3824 if (!sp->file_in)
3825 return -EBADF;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003826 req->flags |= REQ_F_NEED_CLEANUP;
3827
Xiaoguang Wang7cdaf582020-06-10 19:41:19 +08003828 if (!S_ISREG(file_inode(sp->file_in)->i_mode)) {
3829 /*
3830 * Splice operation will be punted aync, and here need to
3831 * modify io_wq_work.flags, so initialize io_wq_work firstly.
3832 */
3833 io_req_init_async(req);
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003834 req->work.flags |= IO_WQ_WORK_UNBOUND;
Xiaoguang Wang7cdaf582020-06-10 19:41:19 +08003835 }
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003836
3837 return 0;
3838}
3839
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03003840static int io_tee_prep(struct io_kiocb *req,
3841 const struct io_uring_sqe *sqe)
3842{
3843 if (READ_ONCE(sqe->splice_off_in) || READ_ONCE(sqe->off))
3844 return -EINVAL;
3845 return __io_splice_prep(req, sqe);
3846}
3847
3848static int io_tee(struct io_kiocb *req, bool force_nonblock)
3849{
3850 struct io_splice *sp = &req->splice;
3851 struct file *in = sp->file_in;
3852 struct file *out = sp->file_out;
3853 unsigned int flags = sp->flags & ~SPLICE_F_FD_IN_FIXED;
3854 long ret = 0;
3855
3856 if (force_nonblock)
3857 return -EAGAIN;
3858 if (sp->len)
3859 ret = do_tee(in, out, sp->len, flags);
3860
3861 io_put_file(req, in, (sp->flags & SPLICE_F_FD_IN_FIXED));
3862 req->flags &= ~REQ_F_NEED_CLEANUP;
3863
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03003864 if (ret != sp->len)
3865 req_set_fail_links(req);
Jens Axboee1e16092020-06-22 09:17:17 -06003866 io_req_complete(req, ret);
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03003867 return 0;
3868}
3869
3870static int io_splice_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3871{
3872 struct io_splice* sp = &req->splice;
3873
3874 sp->off_in = READ_ONCE(sqe->splice_off_in);
3875 sp->off_out = READ_ONCE(sqe->off);
3876 return __io_splice_prep(req, sqe);
3877}
3878
Pavel Begunkov014db002020-03-03 21:33:12 +03003879static int io_splice(struct io_kiocb *req, bool force_nonblock)
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003880{
3881 struct io_splice *sp = &req->splice;
3882 struct file *in = sp->file_in;
3883 struct file *out = sp->file_out;
3884 unsigned int flags = sp->flags & ~SPLICE_F_FD_IN_FIXED;
3885 loff_t *poff_in, *poff_out;
Pavel Begunkovc9687422020-05-04 23:00:54 +03003886 long ret = 0;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003887
Pavel Begunkov2fb3e822020-05-01 17:09:38 +03003888 if (force_nonblock)
3889 return -EAGAIN;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003890
3891 poff_in = (sp->off_in == -1) ? NULL : &sp->off_in;
3892 poff_out = (sp->off_out == -1) ? NULL : &sp->off_out;
Pavel Begunkovc9687422020-05-04 23:00:54 +03003893
Jens Axboe948a7742020-05-17 14:21:38 -06003894 if (sp->len)
Pavel Begunkovc9687422020-05-04 23:00:54 +03003895 ret = do_splice(in, poff_in, out, poff_out, sp->len, flags);
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003896
3897 io_put_file(req, in, (sp->flags & SPLICE_F_FD_IN_FIXED));
3898 req->flags &= ~REQ_F_NEED_CLEANUP;
3899
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003900 if (ret != sp->len)
3901 req_set_fail_links(req);
Jens Axboee1e16092020-06-22 09:17:17 -06003902 io_req_complete(req, ret);
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003903 return 0;
3904}
3905
Jens Axboe2b188cc2019-01-07 10:46:33 -07003906/*
3907 * IORING_OP_NOP just posts a completion event, nothing else.
3908 */
Jens Axboe229a7b62020-06-22 10:13:11 -06003909static int io_nop(struct io_kiocb *req, struct io_comp_state *cs)
Jens Axboe2b188cc2019-01-07 10:46:33 -07003910{
3911 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003912
Jens Axboedef596e2019-01-09 08:59:42 -07003913 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
3914 return -EINVAL;
3915
Jens Axboe229a7b62020-06-22 10:13:11 -06003916 __io_req_complete(req, 0, 0, cs);
Jens Axboe2b188cc2019-01-07 10:46:33 -07003917 return 0;
3918}
3919
Jens Axboe3529d8c2019-12-19 18:24:38 -07003920static int io_prep_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Christoph Hellwigc992fe22019-01-11 09:43:02 -07003921{
Jens Axboe6b063142019-01-10 22:13:58 -07003922 struct io_ring_ctx *ctx = req->ctx;
Christoph Hellwigc992fe22019-01-11 09:43:02 -07003923
Jens Axboe09bb8392019-03-13 12:39:28 -06003924 if (!req->file)
3925 return -EBADF;
Christoph Hellwigc992fe22019-01-11 09:43:02 -07003926
Jens Axboe6b063142019-01-10 22:13:58 -07003927 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboedef596e2019-01-09 08:59:42 -07003928 return -EINVAL;
Jens Axboeedafcce2019-01-09 09:16:05 -07003929 if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index))
Christoph Hellwigc992fe22019-01-11 09:43:02 -07003930 return -EINVAL;
3931
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07003932 req->sync.flags = READ_ONCE(sqe->fsync_flags);
3933 if (unlikely(req->sync.flags & ~IORING_FSYNC_DATASYNC))
3934 return -EINVAL;
3935
3936 req->sync.off = READ_ONCE(sqe->off);
3937 req->sync.len = READ_ONCE(sqe->len);
Christoph Hellwigc992fe22019-01-11 09:43:02 -07003938 return 0;
3939}
3940
Pavel Begunkovac45abc2020-06-08 21:08:18 +03003941static int io_fsync(struct io_kiocb *req, bool force_nonblock)
Jens Axboe78912932020-01-14 22:09:06 -07003942{
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07003943 loff_t end = req->sync.off + req->sync.len;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07003944 int ret;
3945
Pavel Begunkovac45abc2020-06-08 21:08:18 +03003946 /* fsync always requires a blocking context */
3947 if (force_nonblock)
3948 return -EAGAIN;
3949
Jens Axboe9adbd452019-12-20 08:45:55 -07003950 ret = vfs_fsync_range(req->file, req->sync.off,
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07003951 end > 0 ? end : LLONG_MAX,
3952 req->sync.flags & IORING_FSYNC_DATASYNC);
3953 if (ret < 0)
3954 req_set_fail_links(req);
Jens Axboee1e16092020-06-22 09:17:17 -06003955 io_req_complete(req, ret);
Christoph Hellwigc992fe22019-01-11 09:43:02 -07003956 return 0;
3957}
3958
Jens Axboed63d1b52019-12-10 10:38:56 -07003959static int io_fallocate_prep(struct io_kiocb *req,
3960 const struct io_uring_sqe *sqe)
3961{
3962 if (sqe->ioprio || sqe->buf_index || sqe->rw_flags)
3963 return -EINVAL;
Pavel Begunkov3232dd02020-06-03 18:03:22 +03003964 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3965 return -EINVAL;
Jens Axboed63d1b52019-12-10 10:38:56 -07003966
3967 req->sync.off = READ_ONCE(sqe->off);
3968 req->sync.len = READ_ONCE(sqe->addr);
3969 req->sync.mode = READ_ONCE(sqe->len);
3970 return 0;
3971}
3972
Pavel Begunkov014db002020-03-03 21:33:12 +03003973static int io_fallocate(struct io_kiocb *req, bool force_nonblock)
Jens Axboed63d1b52019-12-10 10:38:56 -07003974{
Pavel Begunkovac45abc2020-06-08 21:08:18 +03003975 int ret;
Jens Axboed63d1b52019-12-10 10:38:56 -07003976
Pavel Begunkovac45abc2020-06-08 21:08:18 +03003977 /* fallocate always requiring blocking context */
3978 if (force_nonblock)
3979 return -EAGAIN;
Pavel Begunkovac45abc2020-06-08 21:08:18 +03003980 ret = vfs_fallocate(req->file, req->sync.mode, req->sync.off,
3981 req->sync.len);
Pavel Begunkovac45abc2020-06-08 21:08:18 +03003982 if (ret < 0)
3983 req_set_fail_links(req);
Jens Axboee1e16092020-06-22 09:17:17 -06003984 io_req_complete(req, ret);
Jens Axboed63d1b52019-12-10 10:38:56 -07003985 return 0;
3986}
3987
Pavel Begunkovec65fea2020-06-03 18:03:24 +03003988static int __io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboe15b71ab2019-12-11 11:20:36 -07003989{
Jens Axboef8748882020-01-08 17:47:02 -07003990 const char __user *fname;
Jens Axboe15b71ab2019-12-11 11:20:36 -07003991 int ret;
3992
Pavel Begunkovec65fea2020-06-03 18:03:24 +03003993 if (unlikely(sqe->ioprio || sqe->buf_index))
Jens Axboe15b71ab2019-12-11 11:20:36 -07003994 return -EINVAL;
Pavel Begunkovec65fea2020-06-03 18:03:24 +03003995 if (unlikely(req->flags & REQ_F_FIXED_FILE))
Jens Axboecf3040c2020-02-06 21:31:40 -07003996 return -EBADF;
Jens Axboe15b71ab2019-12-11 11:20:36 -07003997
Pavel Begunkovec65fea2020-06-03 18:03:24 +03003998 /* open.how should be already initialised */
3999 if (!(req->open.how.flags & O_PATH) && force_o_largefile())
Jens Axboe08a1d26eb2020-04-08 09:20:54 -06004000 req->open.how.flags |= O_LARGEFILE;
Jens Axboe15b71ab2019-12-11 11:20:36 -07004001
Pavel Begunkov25e72d12020-06-03 18:03:23 +03004002 req->open.dfd = READ_ONCE(sqe->fd);
4003 fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
Jens Axboef8748882020-01-08 17:47:02 -07004004 req->open.filename = getname(fname);
Jens Axboe15b71ab2019-12-11 11:20:36 -07004005 if (IS_ERR(req->open.filename)) {
4006 ret = PTR_ERR(req->open.filename);
4007 req->open.filename = NULL;
4008 return ret;
4009 }
Jens Axboe4022e7a2020-03-19 19:23:18 -06004010 req->open.nofile = rlimit(RLIMIT_NOFILE);
Jens Axboe944d1442020-11-13 16:48:44 -07004011 req->open.ignore_nonblock = false;
Pavel Begunkov8fef80b2020-02-07 23:59:53 +03004012 req->flags |= REQ_F_NEED_CLEANUP;
Jens Axboe15b71ab2019-12-11 11:20:36 -07004013 return 0;
4014}
4015
Pavel Begunkovec65fea2020-06-03 18:03:24 +03004016static int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4017{
4018 u64 flags, mode;
4019
Jens Axboe14587a462020-09-05 11:36:08 -06004020 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboe4eb8dde2020-09-18 19:36:24 -06004021 return -EINVAL;
Pavel Begunkovec65fea2020-06-03 18:03:24 +03004022 mode = READ_ONCE(sqe->len);
4023 flags = READ_ONCE(sqe->open_flags);
4024 req->open.how = build_open_how(flags, mode);
4025 return __io_openat_prep(req, sqe);
4026}
4027
Jens Axboecebdb982020-01-08 17:59:24 -07004028static int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4029{
4030 struct open_how __user *how;
Jens Axboecebdb982020-01-08 17:59:24 -07004031 size_t len;
4032 int ret;
4033
Jens Axboe14587a462020-09-05 11:36:08 -06004034 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboe4eb8dde2020-09-18 19:36:24 -06004035 return -EINVAL;
Jens Axboecebdb982020-01-08 17:59:24 -07004036 how = u64_to_user_ptr(READ_ONCE(sqe->addr2));
4037 len = READ_ONCE(sqe->len);
Jens Axboecebdb982020-01-08 17:59:24 -07004038 if (len < OPEN_HOW_SIZE_VER0)
4039 return -EINVAL;
4040
4041 ret = copy_struct_from_user(&req->open.how, sizeof(req->open.how), how,
4042 len);
4043 if (ret)
4044 return ret;
4045
Pavel Begunkovec65fea2020-06-03 18:03:24 +03004046 return __io_openat_prep(req, sqe);
Jens Axboecebdb982020-01-08 17:59:24 -07004047}
4048
Pavel Begunkov014db002020-03-03 21:33:12 +03004049static int io_openat2(struct io_kiocb *req, bool force_nonblock)
Jens Axboe15b71ab2019-12-11 11:20:36 -07004050{
4051 struct open_flags op;
Jens Axboe15b71ab2019-12-11 11:20:36 -07004052 struct file *file;
4053 int ret;
4054
Jens Axboe944d1442020-11-13 16:48:44 -07004055 if (force_nonblock && !req->open.ignore_nonblock)
Jens Axboe15b71ab2019-12-11 11:20:36 -07004056 return -EAGAIN;
Jens Axboe15b71ab2019-12-11 11:20:36 -07004057
Jens Axboecebdb982020-01-08 17:59:24 -07004058 ret = build_open_flags(&req->open.how, &op);
Jens Axboe15b71ab2019-12-11 11:20:36 -07004059 if (ret)
4060 goto err;
4061
Jens Axboe4022e7a2020-03-19 19:23:18 -06004062 ret = __get_unused_fd_flags(req->open.how.flags, req->open.nofile);
Jens Axboe15b71ab2019-12-11 11:20:36 -07004063 if (ret < 0)
4064 goto err;
4065
4066 file = do_filp_open(req->open.dfd, req->open.filename, &op);
4067 if (IS_ERR(file)) {
4068 put_unused_fd(ret);
4069 ret = PTR_ERR(file);
Jens Axboe944d1442020-11-13 16:48:44 -07004070 /*
4071 * A work-around to ensure that /proc/self works that way
4072 * that it should - if we get -EOPNOTSUPP back, then assume
4073 * that proc_self_get_link() failed us because we're in async
4074 * context. We should be safe to retry this from the task
4075 * itself with force_nonblock == false set, as it should not
4076 * block on lookup. Would be nice to know this upfront and
4077 * avoid the async dance, but doesn't seem feasible.
4078 */
4079 if (ret == -EOPNOTSUPP && io_wq_current_is_worker()) {
4080 req->open.ignore_nonblock = true;
4081 refcount_inc(&req->refs);
4082 io_req_task_queue(req);
4083 return 0;
4084 }
Jens Axboe15b71ab2019-12-11 11:20:36 -07004085 } else {
4086 fsnotify_open(file);
4087 fd_install(ret, file);
4088 }
4089err:
4090 putname(req->open.filename);
Pavel Begunkov8fef80b2020-02-07 23:59:53 +03004091 req->flags &= ~REQ_F_NEED_CLEANUP;
Jens Axboe15b71ab2019-12-11 11:20:36 -07004092 if (ret < 0)
4093 req_set_fail_links(req);
Jens Axboee1e16092020-06-22 09:17:17 -06004094 io_req_complete(req, ret);
Jens Axboe15b71ab2019-12-11 11:20:36 -07004095 return 0;
4096}
4097
Pavel Begunkov014db002020-03-03 21:33:12 +03004098static int io_openat(struct io_kiocb *req, bool force_nonblock)
Jens Axboecebdb982020-01-08 17:59:24 -07004099{
Pavel Begunkov014db002020-03-03 21:33:12 +03004100 return io_openat2(req, force_nonblock);
Jens Axboecebdb982020-01-08 17:59:24 -07004101}
4102
Jens Axboe067524e2020-03-02 16:32:28 -07004103static int io_remove_buffers_prep(struct io_kiocb *req,
4104 const struct io_uring_sqe *sqe)
4105{
4106 struct io_provide_buf *p = &req->pbuf;
4107 u64 tmp;
4108
4109 if (sqe->ioprio || sqe->rw_flags || sqe->addr || sqe->len || sqe->off)
4110 return -EINVAL;
4111
4112 tmp = READ_ONCE(sqe->fd);
4113 if (!tmp || tmp > USHRT_MAX)
4114 return -EINVAL;
4115
4116 memset(p, 0, sizeof(*p));
4117 p->nbufs = tmp;
4118 p->bgid = READ_ONCE(sqe->buf_group);
4119 return 0;
4120}
4121
4122static int __io_remove_buffers(struct io_ring_ctx *ctx, struct io_buffer *buf,
4123 int bgid, unsigned nbufs)
4124{
4125 unsigned i = 0;
4126
4127 /* shouldn't happen */
4128 if (!nbufs)
4129 return 0;
4130
4131 /* the head kbuf is the list itself */
4132 while (!list_empty(&buf->list)) {
4133 struct io_buffer *nxt;
4134
4135 nxt = list_first_entry(&buf->list, struct io_buffer, list);
4136 list_del(&nxt->list);
4137 kfree(nxt);
4138 if (++i == nbufs)
4139 return i;
4140 }
4141 i++;
4142 kfree(buf);
4143 idr_remove(&ctx->io_buffer_idr, bgid);
4144
4145 return i;
4146}
4147
Jens Axboe229a7b62020-06-22 10:13:11 -06004148static int io_remove_buffers(struct io_kiocb *req, bool force_nonblock,
4149 struct io_comp_state *cs)
Jens Axboe067524e2020-03-02 16:32:28 -07004150{
4151 struct io_provide_buf *p = &req->pbuf;
4152 struct io_ring_ctx *ctx = req->ctx;
4153 struct io_buffer *head;
4154 int ret = 0;
4155
4156 io_ring_submit_lock(ctx, !force_nonblock);
4157
4158 lockdep_assert_held(&ctx->uring_lock);
4159
4160 ret = -ENOENT;
4161 head = idr_find(&ctx->io_buffer_idr, p->bgid);
4162 if (head)
4163 ret = __io_remove_buffers(ctx, head, p->bgid, p->nbufs);
4164
4165 io_ring_submit_lock(ctx, !force_nonblock);
4166 if (ret < 0)
4167 req_set_fail_links(req);
Jens Axboe229a7b62020-06-22 10:13:11 -06004168 __io_req_complete(req, ret, 0, cs);
Jens Axboe067524e2020-03-02 16:32:28 -07004169 return 0;
4170}
4171
Jens Axboeddf0322d2020-02-23 16:41:33 -07004172static int io_provide_buffers_prep(struct io_kiocb *req,
4173 const struct io_uring_sqe *sqe)
4174{
4175 struct io_provide_buf *p = &req->pbuf;
4176 u64 tmp;
4177
4178 if (sqe->ioprio || sqe->rw_flags)
4179 return -EINVAL;
4180
4181 tmp = READ_ONCE(sqe->fd);
4182 if (!tmp || tmp > USHRT_MAX)
4183 return -E2BIG;
4184 p->nbufs = tmp;
4185 p->addr = READ_ONCE(sqe->addr);
4186 p->len = READ_ONCE(sqe->len);
4187
Bijan Mottahedehefe68c12020-06-04 18:01:52 -07004188 if (!access_ok(u64_to_user_ptr(p->addr), (p->len * p->nbufs)))
Jens Axboeddf0322d2020-02-23 16:41:33 -07004189 return -EFAULT;
4190
4191 p->bgid = READ_ONCE(sqe->buf_group);
4192 tmp = READ_ONCE(sqe->off);
4193 if (tmp > USHRT_MAX)
4194 return -E2BIG;
4195 p->bid = tmp;
4196 return 0;
4197}
4198
4199static int io_add_buffers(struct io_provide_buf *pbuf, struct io_buffer **head)
4200{
4201 struct io_buffer *buf;
4202 u64 addr = pbuf->addr;
4203 int i, bid = pbuf->bid;
4204
4205 for (i = 0; i < pbuf->nbufs; i++) {
4206 buf = kmalloc(sizeof(*buf), GFP_KERNEL);
4207 if (!buf)
4208 break;
4209
4210 buf->addr = addr;
4211 buf->len = pbuf->len;
4212 buf->bid = bid;
4213 addr += pbuf->len;
4214 bid++;
4215 if (!*head) {
4216 INIT_LIST_HEAD(&buf->list);
4217 *head = buf;
4218 } else {
4219 list_add_tail(&buf->list, &(*head)->list);
4220 }
4221 }
4222
4223 return i ? i : -ENOMEM;
4224}
4225
Jens Axboe229a7b62020-06-22 10:13:11 -06004226static int io_provide_buffers(struct io_kiocb *req, bool force_nonblock,
4227 struct io_comp_state *cs)
Jens Axboeddf0322d2020-02-23 16:41:33 -07004228{
4229 struct io_provide_buf *p = &req->pbuf;
4230 struct io_ring_ctx *ctx = req->ctx;
4231 struct io_buffer *head, *list;
4232 int ret = 0;
4233
4234 io_ring_submit_lock(ctx, !force_nonblock);
4235
4236 lockdep_assert_held(&ctx->uring_lock);
4237
4238 list = head = idr_find(&ctx->io_buffer_idr, p->bgid);
4239
4240 ret = io_add_buffers(p, &head);
4241 if (ret < 0)
4242 goto out;
4243
4244 if (!list) {
4245 ret = idr_alloc(&ctx->io_buffer_idr, head, p->bgid, p->bgid + 1,
4246 GFP_KERNEL);
4247 if (ret < 0) {
Jens Axboe067524e2020-03-02 16:32:28 -07004248 __io_remove_buffers(ctx, head, p->bgid, -1U);
Jens Axboeddf0322d2020-02-23 16:41:33 -07004249 goto out;
4250 }
4251 }
4252out:
4253 io_ring_submit_unlock(ctx, !force_nonblock);
4254 if (ret < 0)
4255 req_set_fail_links(req);
Jens Axboe229a7b62020-06-22 10:13:11 -06004256 __io_req_complete(req, ret, 0, cs);
Jens Axboeddf0322d2020-02-23 16:41:33 -07004257 return 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07004258}
4259
Jens Axboe3e4827b2020-01-08 15:18:09 -07004260static int io_epoll_ctl_prep(struct io_kiocb *req,
4261 const struct io_uring_sqe *sqe)
4262{
4263#if defined(CONFIG_EPOLL)
4264 if (sqe->ioprio || sqe->buf_index)
4265 return -EINVAL;
Jens Axboe6ca56f82020-09-18 16:51:19 -06004266 if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL)))
Pavel Begunkov3232dd02020-06-03 18:03:22 +03004267 return -EINVAL;
Jens Axboe3e4827b2020-01-08 15:18:09 -07004268
4269 req->epoll.epfd = READ_ONCE(sqe->fd);
4270 req->epoll.op = READ_ONCE(sqe->len);
4271 req->epoll.fd = READ_ONCE(sqe->off);
4272
4273 if (ep_op_has_event(req->epoll.op)) {
4274 struct epoll_event __user *ev;
4275
4276 ev = u64_to_user_ptr(READ_ONCE(sqe->addr));
4277 if (copy_from_user(&req->epoll.event, ev, sizeof(*ev)))
4278 return -EFAULT;
4279 }
4280
4281 return 0;
4282#else
4283 return -EOPNOTSUPP;
4284#endif
4285}
4286
Jens Axboe229a7b62020-06-22 10:13:11 -06004287static int io_epoll_ctl(struct io_kiocb *req, bool force_nonblock,
4288 struct io_comp_state *cs)
Jens Axboe3e4827b2020-01-08 15:18:09 -07004289{
4290#if defined(CONFIG_EPOLL)
4291 struct io_epoll *ie = &req->epoll;
4292 int ret;
4293
4294 ret = do_epoll_ctl(ie->epfd, ie->op, ie->fd, &ie->event, force_nonblock);
4295 if (force_nonblock && ret == -EAGAIN)
4296 return -EAGAIN;
4297
4298 if (ret < 0)
4299 req_set_fail_links(req);
Jens Axboe229a7b62020-06-22 10:13:11 -06004300 __io_req_complete(req, ret, 0, cs);
Jens Axboe3e4827b2020-01-08 15:18:09 -07004301 return 0;
4302#else
4303 return -EOPNOTSUPP;
4304#endif
4305}
4306
Jens Axboec1ca7572019-12-25 22:18:28 -07004307static int io_madvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4308{
4309#if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
4310 if (sqe->ioprio || sqe->buf_index || sqe->off)
4311 return -EINVAL;
Pavel Begunkov3232dd02020-06-03 18:03:22 +03004312 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4313 return -EINVAL;
Jens Axboec1ca7572019-12-25 22:18:28 -07004314
4315 req->madvise.addr = READ_ONCE(sqe->addr);
4316 req->madvise.len = READ_ONCE(sqe->len);
4317 req->madvise.advice = READ_ONCE(sqe->fadvise_advice);
4318 return 0;
4319#else
4320 return -EOPNOTSUPP;
4321#endif
4322}
4323
Pavel Begunkov014db002020-03-03 21:33:12 +03004324static int io_madvise(struct io_kiocb *req, bool force_nonblock)
Jens Axboec1ca7572019-12-25 22:18:28 -07004325{
4326#if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
4327 struct io_madvise *ma = &req->madvise;
4328 int ret;
4329
4330 if (force_nonblock)
4331 return -EAGAIN;
4332
Minchan Kim0726b012020-10-17 16:14:50 -07004333 ret = do_madvise(current->mm, ma->addr, ma->len, ma->advice);
Jens Axboec1ca7572019-12-25 22:18:28 -07004334 if (ret < 0)
4335 req_set_fail_links(req);
Jens Axboee1e16092020-06-22 09:17:17 -06004336 io_req_complete(req, ret);
Jens Axboec1ca7572019-12-25 22:18:28 -07004337 return 0;
4338#else
4339 return -EOPNOTSUPP;
4340#endif
4341}
4342
Jens Axboe4840e412019-12-25 22:03:45 -07004343static int io_fadvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4344{
4345 if (sqe->ioprio || sqe->buf_index || sqe->addr)
4346 return -EINVAL;
Pavel Begunkov3232dd02020-06-03 18:03:22 +03004347 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4348 return -EINVAL;
Jens Axboe4840e412019-12-25 22:03:45 -07004349
4350 req->fadvise.offset = READ_ONCE(sqe->off);
4351 req->fadvise.len = READ_ONCE(sqe->len);
4352 req->fadvise.advice = READ_ONCE(sqe->fadvise_advice);
4353 return 0;
4354}
4355
Pavel Begunkov014db002020-03-03 21:33:12 +03004356static int io_fadvise(struct io_kiocb *req, bool force_nonblock)
Jens Axboe4840e412019-12-25 22:03:45 -07004357{
4358 struct io_fadvise *fa = &req->fadvise;
4359 int ret;
4360
Jens Axboe3e694262020-02-01 09:22:49 -07004361 if (force_nonblock) {
4362 switch (fa->advice) {
4363 case POSIX_FADV_NORMAL:
4364 case POSIX_FADV_RANDOM:
4365 case POSIX_FADV_SEQUENTIAL:
4366 break;
4367 default:
4368 return -EAGAIN;
4369 }
4370 }
Jens Axboe4840e412019-12-25 22:03:45 -07004371
4372 ret = vfs_fadvise(req->file, fa->offset, fa->len, fa->advice);
4373 if (ret < 0)
4374 req_set_fail_links(req);
Jens Axboee1e16092020-06-22 09:17:17 -06004375 io_req_complete(req, ret);
Jens Axboe4840e412019-12-25 22:03:45 -07004376 return 0;
4377}
4378
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004379static int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4380{
Jens Axboe6ca56f82020-09-18 16:51:19 -06004381 if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL)))
Pavel Begunkov3232dd02020-06-03 18:03:22 +03004382 return -EINVAL;
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004383 if (sqe->ioprio || sqe->buf_index)
4384 return -EINVAL;
Pavel Begunkov9c280f92020-04-08 08:58:46 +03004385 if (req->flags & REQ_F_FIXED_FILE)
Jens Axboecf3040c2020-02-06 21:31:40 -07004386 return -EBADF;
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004387
Bijan Mottahedeh1d9e1282020-05-22 21:31:16 -07004388 req->statx.dfd = READ_ONCE(sqe->fd);
4389 req->statx.mask = READ_ONCE(sqe->len);
Bijan Mottahedehe62753e2020-05-22 21:31:18 -07004390 req->statx.filename = u64_to_user_ptr(READ_ONCE(sqe->addr));
Bijan Mottahedeh1d9e1282020-05-22 21:31:16 -07004391 req->statx.buffer = u64_to_user_ptr(READ_ONCE(sqe->addr2));
4392 req->statx.flags = READ_ONCE(sqe->statx_flags);
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004393
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004394 return 0;
4395}
4396
Pavel Begunkov014db002020-03-03 21:33:12 +03004397static int io_statx(struct io_kiocb *req, bool force_nonblock)
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004398{
Bijan Mottahedeh1d9e1282020-05-22 21:31:16 -07004399 struct io_statx *ctx = &req->statx;
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004400 int ret;
4401
Jens Axboe5b0bbee2020-04-27 10:41:22 -06004402 if (force_nonblock) {
4403 /* only need file table for an actual valid fd */
4404 if (ctx->dfd == -1 || ctx->dfd == AT_FDCWD)
4405 req->flags |= REQ_F_NO_FILE_TABLE;
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004406 return -EAGAIN;
Jens Axboe5b0bbee2020-04-27 10:41:22 -06004407 }
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004408
Bijan Mottahedehe62753e2020-05-22 21:31:18 -07004409 ret = do_statx(ctx->dfd, ctx->filename, ctx->flags, ctx->mask,
4410 ctx->buffer);
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004411
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004412 if (ret < 0)
4413 req_set_fail_links(req);
Jens Axboee1e16092020-06-22 09:17:17 -06004414 io_req_complete(req, ret);
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004415 return 0;
4416}
4417
Jens Axboeb5dba592019-12-11 14:02:38 -07004418static int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4419{
4420 /*
4421 * If we queue this for async, it must not be cancellable. That would
Xiaoguang Wang7cdaf582020-06-10 19:41:19 +08004422 * leave the 'file' in an undeterminate state, and here need to modify
4423 * io_wq_work.flags, so initialize io_wq_work firstly.
Jens Axboeb5dba592019-12-11 14:02:38 -07004424 */
Xiaoguang Wang7cdaf582020-06-10 19:41:19 +08004425 io_req_init_async(req);
Jens Axboeb5dba592019-12-11 14:02:38 -07004426 req->work.flags |= IO_WQ_WORK_NO_CANCEL;
4427
Jens Axboe14587a462020-09-05 11:36:08 -06004428 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
Pavel Begunkov3232dd02020-06-03 18:03:22 +03004429 return -EINVAL;
Jens Axboeb5dba592019-12-11 14:02:38 -07004430 if (sqe->ioprio || sqe->off || sqe->addr || sqe->len ||
4431 sqe->rw_flags || sqe->buf_index)
4432 return -EINVAL;
Pavel Begunkov9c280f92020-04-08 08:58:46 +03004433 if (req->flags & REQ_F_FIXED_FILE)
Jens Axboecf3040c2020-02-06 21:31:40 -07004434 return -EBADF;
Jens Axboeb5dba592019-12-11 14:02:38 -07004435
4436 req->close.fd = READ_ONCE(sqe->fd);
Jens Axboe0f212202020-09-13 13:09:39 -06004437 if ((req->file && req->file->f_op == &io_uring_fops))
Jens Axboefd2206e2020-06-02 16:40:47 -06004438 return -EBADF;
4439
Pavel Begunkov3af73b22020-06-08 21:08:17 +03004440 req->close.put_file = NULL;
Jens Axboeb5dba592019-12-11 14:02:38 -07004441 return 0;
4442}
4443
Jens Axboe229a7b62020-06-22 10:13:11 -06004444static int io_close(struct io_kiocb *req, bool force_nonblock,
4445 struct io_comp_state *cs)
Jens Axboeb5dba592019-12-11 14:02:38 -07004446{
Pavel Begunkov3af73b22020-06-08 21:08:17 +03004447 struct io_close *close = &req->close;
Jens Axboeb5dba592019-12-11 14:02:38 -07004448 int ret;
4449
Pavel Begunkov3af73b22020-06-08 21:08:17 +03004450 /* might be already done during nonblock submission */
4451 if (!close->put_file) {
4452 ret = __close_fd_get_file(close->fd, &close->put_file);
4453 if (ret < 0)
4454 return (ret == -ENOENT) ? -EBADF : ret;
4455 }
Jens Axboeb5dba592019-12-11 14:02:38 -07004456
4457 /* if the file has a flush method, be safe and punt to async */
Pavel Begunkov3af73b22020-06-08 21:08:17 +03004458 if (close->put_file->f_op->flush && force_nonblock) {
Pavel Begunkov24c74672020-06-21 13:09:51 +03004459 /* was never set, but play safe */
4460 req->flags &= ~REQ_F_NOWAIT;
Pavel Begunkov0bf0eef2020-05-26 20:34:06 +03004461 /* avoid grabbing files - we don't need the files */
Pavel Begunkov24c74672020-06-21 13:09:51 +03004462 req->flags |= REQ_F_NO_FILE_TABLE;
Pavel Begunkov0bf0eef2020-05-26 20:34:06 +03004463 return -EAGAIN;
Pavel Begunkova2100672020-03-02 23:45:16 +03004464 }
Jens Axboeb5dba592019-12-11 14:02:38 -07004465
Pavel Begunkov3af73b22020-06-08 21:08:17 +03004466 /* No ->flush() or already async, safely close from here */
Jens Axboe98447d62020-10-14 10:48:51 -06004467 ret = filp_close(close->put_file, req->work.identity->files);
Pavel Begunkov3af73b22020-06-08 21:08:17 +03004468 if (ret < 0)
4469 req_set_fail_links(req);
Pavel Begunkov3af73b22020-06-08 21:08:17 +03004470 fput(close->put_file);
4471 close->put_file = NULL;
Jens Axboe229a7b62020-06-22 10:13:11 -06004472 __io_req_complete(req, ret, 0, cs);
Jens Axboe1a417f42020-01-31 17:16:48 -07004473 return 0;
Jens Axboeb5dba592019-12-11 14:02:38 -07004474}
4475
Jens Axboe3529d8c2019-12-19 18:24:38 -07004476static int io_prep_sfr(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboe5d17b4a2019-04-09 14:56:44 -06004477{
4478 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe5d17b4a2019-04-09 14:56:44 -06004479
4480 if (!req->file)
4481 return -EBADF;
Jens Axboe5d17b4a2019-04-09 14:56:44 -06004482
4483 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
4484 return -EINVAL;
4485 if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index))
4486 return -EINVAL;
4487
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004488 req->sync.off = READ_ONCE(sqe->off);
4489 req->sync.len = READ_ONCE(sqe->len);
4490 req->sync.flags = READ_ONCE(sqe->sync_range_flags);
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004491 return 0;
4492}
4493
Pavel Begunkovac45abc2020-06-08 21:08:18 +03004494static int io_sync_file_range(struct io_kiocb *req, bool force_nonblock)
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004495{
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004496 int ret;
4497
Pavel Begunkovac45abc2020-06-08 21:08:18 +03004498 /* sync_file_range always requires a blocking context */
4499 if (force_nonblock)
4500 return -EAGAIN;
4501
Jens Axboe9adbd452019-12-20 08:45:55 -07004502 ret = sync_file_range(req->file, req->sync.off, req->sync.len,
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004503 req->sync.flags);
4504 if (ret < 0)
4505 req_set_fail_links(req);
Jens Axboee1e16092020-06-22 09:17:17 -06004506 io_req_complete(req, ret);
Jens Axboe5d17b4a2019-04-09 14:56:44 -06004507 return 0;
4508}
4509
YueHaibing469956e2020-03-04 15:53:52 +08004510#if defined(CONFIG_NET)
Pavel Begunkov02d27d82020-02-28 10:36:36 +03004511static int io_setup_async_msg(struct io_kiocb *req,
4512 struct io_async_msghdr *kmsg)
4513{
Jens Axboee8c2bc12020-08-15 18:44:09 -07004514 struct io_async_msghdr *async_msg = req->async_data;
4515
4516 if (async_msg)
Pavel Begunkov02d27d82020-02-28 10:36:36 +03004517 return -EAGAIN;
Jens Axboee8c2bc12020-08-15 18:44:09 -07004518 if (io_alloc_async_data(req)) {
Pavel Begunkov02d27d82020-02-28 10:36:36 +03004519 if (kmsg->iov != kmsg->fast_iov)
4520 kfree(kmsg->iov);
4521 return -ENOMEM;
4522 }
Jens Axboee8c2bc12020-08-15 18:44:09 -07004523 async_msg = req->async_data;
Pavel Begunkov02d27d82020-02-28 10:36:36 +03004524 req->flags |= REQ_F_NEED_CLEANUP;
Jens Axboee8c2bc12020-08-15 18:44:09 -07004525 memcpy(async_msg, kmsg, sizeof(*kmsg));
Pavel Begunkov02d27d82020-02-28 10:36:36 +03004526 return -EAGAIN;
4527}
4528
Pavel Begunkov2ae523e2020-07-12 20:41:06 +03004529static int io_sendmsg_copy_hdr(struct io_kiocb *req,
4530 struct io_async_msghdr *iomsg)
4531{
4532 iomsg->iov = iomsg->fast_iov;
4533 iomsg->msg.msg_name = &iomsg->addr;
4534 return sendmsg_copy_msghdr(&iomsg->msg, req->sr_msg.umsg,
4535 req->sr_msg.msg_flags, &iomsg->iov);
4536}
4537
Jens Axboe3529d8c2019-12-19 18:24:38 -07004538static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboeaa1fa282019-04-19 13:38:09 -06004539{
Jens Axboee8c2bc12020-08-15 18:44:09 -07004540 struct io_async_msghdr *async_msg = req->async_data;
Jens Axboee47293f2019-12-20 08:58:21 -07004541 struct io_sr_msg *sr = &req->sr_msg;
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03004542 int ret;
Jens Axboe03b12302019-12-02 18:50:25 -07004543
Pavel Begunkovd2b6f482020-06-03 18:03:25 +03004544 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4545 return -EINVAL;
4546
Jens Axboee47293f2019-12-20 08:58:21 -07004547 sr->msg_flags = READ_ONCE(sqe->msg_flags);
Pavel Begunkov270a5942020-07-12 20:41:04 +03004548 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
Jens Axboefddafac2020-01-04 20:19:44 -07004549 sr->len = READ_ONCE(sqe->len);
Jens Axboe3529d8c2019-12-19 18:24:38 -07004550
Jens Axboed8768362020-02-27 14:17:49 -07004551#ifdef CONFIG_COMPAT
4552 if (req->ctx->compat)
4553 sr->msg_flags |= MSG_CMSG_COMPAT;
4554#endif
4555
Jens Axboee8c2bc12020-08-15 18:44:09 -07004556 if (!async_msg || !io_op_defs[req->opcode].needs_async_data)
Jens Axboe3529d8c2019-12-19 18:24:38 -07004557 return 0;
Jens Axboee8c2bc12020-08-15 18:44:09 -07004558 ret = io_sendmsg_copy_hdr(req, async_msg);
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03004559 if (!ret)
4560 req->flags |= REQ_F_NEED_CLEANUP;
4561 return ret;
Jens Axboe03b12302019-12-02 18:50:25 -07004562}
4563
Jens Axboe229a7b62020-06-22 10:13:11 -06004564static int io_sendmsg(struct io_kiocb *req, bool force_nonblock,
4565 struct io_comp_state *cs)
Jens Axboe03b12302019-12-02 18:50:25 -07004566{
Pavel Begunkov6b754c82020-07-16 23:28:00 +03004567 struct io_async_msghdr iomsg, *kmsg;
Jens Axboe03b12302019-12-02 18:50:25 -07004568 struct socket *sock;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004569 unsigned flags;
Jens Axboe03b12302019-12-02 18:50:25 -07004570 int ret;
4571
Jens Axboe03b12302019-12-02 18:50:25 -07004572 sock = sock_from_file(req->file, &ret);
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004573 if (unlikely(!sock))
4574 return ret;
Jens Axboe03b12302019-12-02 18:50:25 -07004575
Jens Axboee8c2bc12020-08-15 18:44:09 -07004576 if (req->async_data) {
4577 kmsg = req->async_data;
4578 kmsg->msg.msg_name = &kmsg->addr;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004579 /* if iov is set, it's allocated already */
4580 if (!kmsg->iov)
4581 kmsg->iov = kmsg->fast_iov;
4582 kmsg->msg.msg_iter.iov = kmsg->iov;
4583 } else {
4584 ret = io_sendmsg_copy_hdr(req, &iomsg);
Jens Axboefddafac2020-01-04 20:19:44 -07004585 if (ret)
4586 return ret;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004587 kmsg = &iomsg;
Jens Axboefddafac2020-01-04 20:19:44 -07004588 }
4589
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004590 flags = req->sr_msg.msg_flags;
4591 if (flags & MSG_DONTWAIT)
4592 req->flags |= REQ_F_NOWAIT;
4593 else if (force_nonblock)
4594 flags |= MSG_DONTWAIT;
4595
4596 ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
4597 if (force_nonblock && ret == -EAGAIN)
4598 return io_setup_async_msg(req, kmsg);
4599 if (ret == -ERESTARTSYS)
4600 ret = -EINTR;
4601
Pavel Begunkov6b754c82020-07-16 23:28:00 +03004602 if (kmsg->iov != kmsg->fast_iov)
Jens Axboe03b12302019-12-02 18:50:25 -07004603 kfree(kmsg->iov);
4604 req->flags &= ~REQ_F_NEED_CLEANUP;
Jens Axboefddafac2020-01-04 20:19:44 -07004605 if (ret < 0)
4606 req_set_fail_links(req);
Jens Axboe229a7b62020-06-22 10:13:11 -06004607 __io_req_complete(req, ret, 0, cs);
Jens Axboefddafac2020-01-04 20:19:44 -07004608 return 0;
Jens Axboefddafac2020-01-04 20:19:44 -07004609}
4610
Jens Axboe229a7b62020-06-22 10:13:11 -06004611static int io_send(struct io_kiocb *req, bool force_nonblock,
4612 struct io_comp_state *cs)
Jens Axboe03b12302019-12-02 18:50:25 -07004613{
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004614 struct io_sr_msg *sr = &req->sr_msg;
4615 struct msghdr msg;
4616 struct iovec iov;
Jens Axboe03b12302019-12-02 18:50:25 -07004617 struct socket *sock;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004618 unsigned flags;
Jens Axboe03b12302019-12-02 18:50:25 -07004619 int ret;
4620
4621 sock = sock_from_file(req->file, &ret);
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004622 if (unlikely(!sock))
4623 return ret;
Jens Axboe03b12302019-12-02 18:50:25 -07004624
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004625 ret = import_single_range(WRITE, sr->buf, sr->len, &iov, &msg.msg_iter);
4626 if (unlikely(ret))
Zheng Bin14db8412020-09-09 20:12:37 +08004627 return ret;
Jens Axboe03b12302019-12-02 18:50:25 -07004628
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004629 msg.msg_name = NULL;
4630 msg.msg_control = NULL;
4631 msg.msg_controllen = 0;
4632 msg.msg_namelen = 0;
Jens Axboe03b12302019-12-02 18:50:25 -07004633
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004634 flags = req->sr_msg.msg_flags;
4635 if (flags & MSG_DONTWAIT)
4636 req->flags |= REQ_F_NOWAIT;
4637 else if (force_nonblock)
4638 flags |= MSG_DONTWAIT;
Jens Axboe03b12302019-12-02 18:50:25 -07004639
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004640 msg.msg_flags = flags;
4641 ret = sock_sendmsg(sock, &msg);
4642 if (force_nonblock && ret == -EAGAIN)
4643 return -EAGAIN;
4644 if (ret == -ERESTARTSYS)
4645 ret = -EINTR;
Jens Axboe03b12302019-12-02 18:50:25 -07004646
Jens Axboe03b12302019-12-02 18:50:25 -07004647 if (ret < 0)
4648 req_set_fail_links(req);
Jens Axboe229a7b62020-06-22 10:13:11 -06004649 __io_req_complete(req, ret, 0, cs);
Jens Axboe03b12302019-12-02 18:50:25 -07004650 return 0;
Jens Axboe03b12302019-12-02 18:50:25 -07004651}
4652
Pavel Begunkov1400e692020-07-12 20:41:05 +03004653static int __io_recvmsg_copy_hdr(struct io_kiocb *req,
4654 struct io_async_msghdr *iomsg)
Jens Axboe52de1fe2020-02-27 10:15:42 -07004655{
4656 struct io_sr_msg *sr = &req->sr_msg;
4657 struct iovec __user *uiov;
4658 size_t iov_len;
4659 int ret;
4660
Pavel Begunkov1400e692020-07-12 20:41:05 +03004661 ret = __copy_msghdr_from_user(&iomsg->msg, sr->umsg,
4662 &iomsg->uaddr, &uiov, &iov_len);
Jens Axboe52de1fe2020-02-27 10:15:42 -07004663 if (ret)
4664 return ret;
4665
4666 if (req->flags & REQ_F_BUFFER_SELECT) {
4667 if (iov_len > 1)
4668 return -EINVAL;
Pavel Begunkov1400e692020-07-12 20:41:05 +03004669 if (copy_from_user(iomsg->iov, uiov, sizeof(*uiov)))
Jens Axboe52de1fe2020-02-27 10:15:42 -07004670 return -EFAULT;
Pavel Begunkov1400e692020-07-12 20:41:05 +03004671 sr->len = iomsg->iov[0].iov_len;
4672 iov_iter_init(&iomsg->msg.msg_iter, READ, iomsg->iov, 1,
Jens Axboe52de1fe2020-02-27 10:15:42 -07004673 sr->len);
Pavel Begunkov1400e692020-07-12 20:41:05 +03004674 iomsg->iov = NULL;
Jens Axboe52de1fe2020-02-27 10:15:42 -07004675 } else {
Christoph Hellwig89cd35c2020-09-25 06:51:41 +02004676 ret = __import_iovec(READ, uiov, iov_len, UIO_FASTIOV,
4677 &iomsg->iov, &iomsg->msg.msg_iter,
4678 false);
Jens Axboe52de1fe2020-02-27 10:15:42 -07004679 if (ret > 0)
4680 ret = 0;
4681 }
4682
4683 return ret;
4684}
4685
4686#ifdef CONFIG_COMPAT
4687static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req,
Pavel Begunkov1400e692020-07-12 20:41:05 +03004688 struct io_async_msghdr *iomsg)
Jens Axboe52de1fe2020-02-27 10:15:42 -07004689{
4690 struct compat_msghdr __user *msg_compat;
4691 struct io_sr_msg *sr = &req->sr_msg;
4692 struct compat_iovec __user *uiov;
4693 compat_uptr_t ptr;
4694 compat_size_t len;
4695 int ret;
4696
Pavel Begunkov270a5942020-07-12 20:41:04 +03004697 msg_compat = (struct compat_msghdr __user *) sr->umsg;
Pavel Begunkov1400e692020-07-12 20:41:05 +03004698 ret = __get_compat_msghdr(&iomsg->msg, msg_compat, &iomsg->uaddr,
Jens Axboe52de1fe2020-02-27 10:15:42 -07004699 &ptr, &len);
4700 if (ret)
4701 return ret;
4702
4703 uiov = compat_ptr(ptr);
4704 if (req->flags & REQ_F_BUFFER_SELECT) {
4705 compat_ssize_t clen;
4706
4707 if (len > 1)
4708 return -EINVAL;
4709 if (!access_ok(uiov, sizeof(*uiov)))
4710 return -EFAULT;
4711 if (__get_user(clen, &uiov->iov_len))
4712 return -EFAULT;
4713 if (clen < 0)
4714 return -EINVAL;
Pavel Begunkov1400e692020-07-12 20:41:05 +03004715 sr->len = iomsg->iov[0].iov_len;
4716 iomsg->iov = NULL;
Jens Axboe52de1fe2020-02-27 10:15:42 -07004717 } else {
Christoph Hellwig89cd35c2020-09-25 06:51:41 +02004718 ret = __import_iovec(READ, (struct iovec __user *)uiov, len,
4719 UIO_FASTIOV, &iomsg->iov,
4720 &iomsg->msg.msg_iter, true);
Jens Axboe52de1fe2020-02-27 10:15:42 -07004721 if (ret < 0)
4722 return ret;
4723 }
4724
4725 return 0;
4726}
Jens Axboe03b12302019-12-02 18:50:25 -07004727#endif
Jens Axboe52de1fe2020-02-27 10:15:42 -07004728
Pavel Begunkov1400e692020-07-12 20:41:05 +03004729static int io_recvmsg_copy_hdr(struct io_kiocb *req,
4730 struct io_async_msghdr *iomsg)
Jens Axboe52de1fe2020-02-27 10:15:42 -07004731{
Pavel Begunkov1400e692020-07-12 20:41:05 +03004732 iomsg->msg.msg_name = &iomsg->addr;
4733 iomsg->iov = iomsg->fast_iov;
Jens Axboe52de1fe2020-02-27 10:15:42 -07004734
4735#ifdef CONFIG_COMPAT
4736 if (req->ctx->compat)
Pavel Begunkov1400e692020-07-12 20:41:05 +03004737 return __io_compat_recvmsg_copy_hdr(req, iomsg);
Jens Axboe52de1fe2020-02-27 10:15:42 -07004738#endif
4739
Pavel Begunkov1400e692020-07-12 20:41:05 +03004740 return __io_recvmsg_copy_hdr(req, iomsg);
Jens Axboe52de1fe2020-02-27 10:15:42 -07004741}
4742
Jens Axboebcda7ba2020-02-23 16:42:51 -07004743static struct io_buffer *io_recv_buffer_select(struct io_kiocb *req,
Pavel Begunkov7fbb1b52020-07-16 23:28:05 +03004744 bool needs_lock)
Jens Axboebcda7ba2020-02-23 16:42:51 -07004745{
4746 struct io_sr_msg *sr = &req->sr_msg;
4747 struct io_buffer *kbuf;
4748
Jens Axboebcda7ba2020-02-23 16:42:51 -07004749 kbuf = io_buffer_select(req, &sr->len, sr->bgid, sr->kbuf, needs_lock);
4750 if (IS_ERR(kbuf))
4751 return kbuf;
4752
4753 sr->kbuf = kbuf;
4754 req->flags |= REQ_F_BUFFER_SELECTED;
Jens Axboebcda7ba2020-02-23 16:42:51 -07004755 return kbuf;
Jens Axboe03b12302019-12-02 18:50:25 -07004756}
4757
Pavel Begunkov7fbb1b52020-07-16 23:28:05 +03004758static inline unsigned int io_put_recv_kbuf(struct io_kiocb *req)
4759{
4760 return io_put_kbuf(req, req->sr_msg.kbuf);
4761}
4762
Jens Axboe3529d8c2019-12-19 18:24:38 -07004763static int io_recvmsg_prep(struct io_kiocb *req,
4764 const struct io_uring_sqe *sqe)
Jens Axboe03b12302019-12-02 18:50:25 -07004765{
Jens Axboee8c2bc12020-08-15 18:44:09 -07004766 struct io_async_msghdr *async_msg = req->async_data;
Jens Axboee47293f2019-12-20 08:58:21 -07004767 struct io_sr_msg *sr = &req->sr_msg;
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03004768 int ret;
Jens Axboe06b76d42019-12-19 14:44:26 -07004769
Pavel Begunkovd2b6f482020-06-03 18:03:25 +03004770 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4771 return -EINVAL;
4772
Jens Axboe3529d8c2019-12-19 18:24:38 -07004773 sr->msg_flags = READ_ONCE(sqe->msg_flags);
Pavel Begunkov270a5942020-07-12 20:41:04 +03004774 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
Jens Axboe0b7b21e2020-01-31 08:34:59 -07004775 sr->len = READ_ONCE(sqe->len);
Jens Axboebcda7ba2020-02-23 16:42:51 -07004776 sr->bgid = READ_ONCE(sqe->buf_group);
Jens Axboe3529d8c2019-12-19 18:24:38 -07004777
Jens Axboed8768362020-02-27 14:17:49 -07004778#ifdef CONFIG_COMPAT
4779 if (req->ctx->compat)
4780 sr->msg_flags |= MSG_CMSG_COMPAT;
4781#endif
4782
Jens Axboee8c2bc12020-08-15 18:44:09 -07004783 if (!async_msg || !io_op_defs[req->opcode].needs_async_data)
Jens Axboe06b76d42019-12-19 14:44:26 -07004784 return 0;
Jens Axboee8c2bc12020-08-15 18:44:09 -07004785 ret = io_recvmsg_copy_hdr(req, async_msg);
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03004786 if (!ret)
4787 req->flags |= REQ_F_NEED_CLEANUP;
4788 return ret;
Jens Axboe03b12302019-12-02 18:50:25 -07004789}
4790
Jens Axboe229a7b62020-06-22 10:13:11 -06004791static int io_recvmsg(struct io_kiocb *req, bool force_nonblock,
4792 struct io_comp_state *cs)
Jens Axboe03b12302019-12-02 18:50:25 -07004793{
Pavel Begunkov6b754c82020-07-16 23:28:00 +03004794 struct io_async_msghdr iomsg, *kmsg;
Jens Axboe0fa03c62019-04-19 13:34:07 -06004795 struct socket *sock;
Pavel Begunkov7fbb1b52020-07-16 23:28:05 +03004796 struct io_buffer *kbuf;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004797 unsigned flags;
Jens Axboe52de1fe2020-02-27 10:15:42 -07004798 int ret, cflags = 0;
Jens Axboe0fa03c62019-04-19 13:34:07 -06004799
Jens Axboe0fa03c62019-04-19 13:34:07 -06004800 sock = sock_from_file(req->file, &ret);
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004801 if (unlikely(!sock))
4802 return ret;
Jens Axboe0fa03c62019-04-19 13:34:07 -06004803
Jens Axboee8c2bc12020-08-15 18:44:09 -07004804 if (req->async_data) {
4805 kmsg = req->async_data;
4806 kmsg->msg.msg_name = &kmsg->addr;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004807 /* if iov is set, it's allocated already */
4808 if (!kmsg->iov)
4809 kmsg->iov = kmsg->fast_iov;
4810 kmsg->msg.msg_iter.iov = kmsg->iov;
4811 } else {
4812 ret = io_recvmsg_copy_hdr(req, &iomsg);
4813 if (ret)
Pavel Begunkov681fda82020-07-15 22:20:45 +03004814 return ret;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004815 kmsg = &iomsg;
Jens Axboe0fa03c62019-04-19 13:34:07 -06004816 }
4817
Pavel Begunkovbc02ef32020-07-16 23:28:03 +03004818 if (req->flags & REQ_F_BUFFER_SELECT) {
Pavel Begunkov7fbb1b52020-07-16 23:28:05 +03004819 kbuf = io_recv_buffer_select(req, !force_nonblock);
Pavel Begunkovbc02ef32020-07-16 23:28:03 +03004820 if (IS_ERR(kbuf))
4821 return PTR_ERR(kbuf);
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004822 kmsg->fast_iov[0].iov_base = u64_to_user_ptr(kbuf->addr);
4823 iov_iter_init(&kmsg->msg.msg_iter, READ, kmsg->iov,
4824 1, req->sr_msg.len);
4825 }
4826
4827 flags = req->sr_msg.msg_flags;
4828 if (flags & MSG_DONTWAIT)
4829 req->flags |= REQ_F_NOWAIT;
4830 else if (force_nonblock)
4831 flags |= MSG_DONTWAIT;
4832
4833 ret = __sys_recvmsg_sock(sock, &kmsg->msg, req->sr_msg.umsg,
4834 kmsg->uaddr, flags);
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03004835 if (force_nonblock && ret == -EAGAIN)
4836 return io_setup_async_msg(req, kmsg);
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004837 if (ret == -ERESTARTSYS)
4838 ret = -EINTR;
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03004839
Pavel Begunkov7fbb1b52020-07-16 23:28:05 +03004840 if (req->flags & REQ_F_BUFFER_SELECTED)
4841 cflags = io_put_recv_kbuf(req);
Pavel Begunkov6b754c82020-07-16 23:28:00 +03004842 if (kmsg->iov != kmsg->fast_iov)
Jens Axboe0b416c32019-12-15 10:57:46 -07004843 kfree(kmsg->iov);
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03004844 req->flags &= ~REQ_F_NEED_CLEANUP;
Jens Axboe4e88d6e2019-12-07 20:59:47 -07004845 if (ret < 0)
4846 req_set_fail_links(req);
Jens Axboe229a7b62020-06-22 10:13:11 -06004847 __io_req_complete(req, ret, cflags, cs);
Jens Axboe0fa03c62019-04-19 13:34:07 -06004848 return 0;
Jens Axboe0fa03c62019-04-19 13:34:07 -06004849}
4850
Jens Axboe229a7b62020-06-22 10:13:11 -06004851static int io_recv(struct io_kiocb *req, bool force_nonblock,
4852 struct io_comp_state *cs)
Jens Axboefddafac2020-01-04 20:19:44 -07004853{
Pavel Begunkov6b754c82020-07-16 23:28:00 +03004854 struct io_buffer *kbuf;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004855 struct io_sr_msg *sr = &req->sr_msg;
4856 struct msghdr msg;
4857 void __user *buf = sr->buf;
Jens Axboefddafac2020-01-04 20:19:44 -07004858 struct socket *sock;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004859 struct iovec iov;
4860 unsigned flags;
Jens Axboebcda7ba2020-02-23 16:42:51 -07004861 int ret, cflags = 0;
Jens Axboefddafac2020-01-04 20:19:44 -07004862
Jens Axboefddafac2020-01-04 20:19:44 -07004863 sock = sock_from_file(req->file, &ret);
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004864 if (unlikely(!sock))
4865 return ret;
Jens Axboefddafac2020-01-04 20:19:44 -07004866
Pavel Begunkovbc02ef32020-07-16 23:28:03 +03004867 if (req->flags & REQ_F_BUFFER_SELECT) {
Pavel Begunkov7fbb1b52020-07-16 23:28:05 +03004868 kbuf = io_recv_buffer_select(req, !force_nonblock);
Jens Axboebcda7ba2020-02-23 16:42:51 -07004869 if (IS_ERR(kbuf))
4870 return PTR_ERR(kbuf);
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004871 buf = u64_to_user_ptr(kbuf->addr);
Jens Axboefddafac2020-01-04 20:19:44 -07004872 }
4873
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004874 ret = import_single_range(READ, buf, sr->len, &iov, &msg.msg_iter);
Pavel Begunkov14c32ee2020-07-16 23:28:01 +03004875 if (unlikely(ret))
4876 goto out_free;
Jens Axboefddafac2020-01-04 20:19:44 -07004877
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004878 msg.msg_name = NULL;
4879 msg.msg_control = NULL;
4880 msg.msg_controllen = 0;
4881 msg.msg_namelen = 0;
4882 msg.msg_iocb = NULL;
4883 msg.msg_flags = 0;
4884
4885 flags = req->sr_msg.msg_flags;
4886 if (flags & MSG_DONTWAIT)
4887 req->flags |= REQ_F_NOWAIT;
4888 else if (force_nonblock)
4889 flags |= MSG_DONTWAIT;
4890
4891 ret = sock_recvmsg(sock, &msg, flags);
4892 if (force_nonblock && ret == -EAGAIN)
4893 return -EAGAIN;
4894 if (ret == -ERESTARTSYS)
4895 ret = -EINTR;
Pavel Begunkov14c32ee2020-07-16 23:28:01 +03004896out_free:
Pavel Begunkov7fbb1b52020-07-16 23:28:05 +03004897 if (req->flags & REQ_F_BUFFER_SELECTED)
4898 cflags = io_put_recv_kbuf(req);
Jens Axboefddafac2020-01-04 20:19:44 -07004899 if (ret < 0)
4900 req_set_fail_links(req);
Jens Axboe229a7b62020-06-22 10:13:11 -06004901 __io_req_complete(req, ret, cflags, cs);
Jens Axboefddafac2020-01-04 20:19:44 -07004902 return 0;
Jens Axboefddafac2020-01-04 20:19:44 -07004903}
4904
Jens Axboe3529d8c2019-12-19 18:24:38 -07004905static int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboe17f2fe32019-10-17 14:42:58 -06004906{
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004907 struct io_accept *accept = &req->accept;
4908
Jens Axboe14587a462020-09-05 11:36:08 -06004909 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboe17f2fe32019-10-17 14:42:58 -06004910 return -EINVAL;
Hrvoje Zeba8042d6c2019-11-25 14:40:22 -05004911 if (sqe->ioprio || sqe->len || sqe->buf_index)
Jens Axboe17f2fe32019-10-17 14:42:58 -06004912 return -EINVAL;
4913
Jens Axboed55e5f52019-12-11 16:12:15 -07004914 accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
4915 accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2));
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004916 accept->flags = READ_ONCE(sqe->accept_flags);
Jens Axboe09952e32020-03-19 20:16:56 -06004917 accept->nofile = rlimit(RLIMIT_NOFILE);
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004918 return 0;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004919}
Jens Axboe17f2fe32019-10-17 14:42:58 -06004920
Jens Axboe229a7b62020-06-22 10:13:11 -06004921static int io_accept(struct io_kiocb *req, bool force_nonblock,
4922 struct io_comp_state *cs)
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004923{
4924 struct io_accept *accept = &req->accept;
Pavel Begunkovac45abc2020-06-08 21:08:18 +03004925 unsigned int file_flags = force_nonblock ? O_NONBLOCK : 0;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004926 int ret;
4927
Jiufei Xuee697dee2020-06-10 13:41:59 +08004928 if (req->file->f_flags & O_NONBLOCK)
4929 req->flags |= REQ_F_NOWAIT;
4930
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004931 ret = __sys_accept4_file(req->file, file_flags, accept->addr,
Jens Axboe09952e32020-03-19 20:16:56 -06004932 accept->addr_len, accept->flags,
4933 accept->nofile);
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004934 if (ret == -EAGAIN && force_nonblock)
Jens Axboe17f2fe32019-10-17 14:42:58 -06004935 return -EAGAIN;
Pavel Begunkovac45abc2020-06-08 21:08:18 +03004936 if (ret < 0) {
4937 if (ret == -ERESTARTSYS)
4938 ret = -EINTR;
Jens Axboe4e88d6e2019-12-07 20:59:47 -07004939 req_set_fail_links(req);
Pavel Begunkovac45abc2020-06-08 21:08:18 +03004940 }
Jens Axboe229a7b62020-06-22 10:13:11 -06004941 __io_req_complete(req, ret, 0, cs);
Jens Axboe17f2fe32019-10-17 14:42:58 -06004942 return 0;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004943}
4944
Jens Axboe3529d8c2019-12-19 18:24:38 -07004945static int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboef499a022019-12-02 16:28:46 -07004946{
Jens Axboe3529d8c2019-12-19 18:24:38 -07004947 struct io_connect *conn = &req->connect;
Jens Axboee8c2bc12020-08-15 18:44:09 -07004948 struct io_async_connect *io = req->async_data;
Jens Axboef499a022019-12-02 16:28:46 -07004949
Jens Axboe14587a462020-09-05 11:36:08 -06004950 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboe3fbb51c2019-12-20 08:51:52 -07004951 return -EINVAL;
4952 if (sqe->ioprio || sqe->len || sqe->buf_index || sqe->rw_flags)
4953 return -EINVAL;
4954
Jens Axboe3529d8c2019-12-19 18:24:38 -07004955 conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
4956 conn->addr_len = READ_ONCE(sqe->addr2);
4957
4958 if (!io)
4959 return 0;
4960
4961 return move_addr_to_kernel(conn->addr, conn->addr_len,
Jens Axboee8c2bc12020-08-15 18:44:09 -07004962 &io->address);
Jens Axboef499a022019-12-02 16:28:46 -07004963}
4964
Jens Axboe229a7b62020-06-22 10:13:11 -06004965static int io_connect(struct io_kiocb *req, bool force_nonblock,
4966 struct io_comp_state *cs)
Jens Axboef8e85cf2019-11-23 14:24:24 -07004967{
Jens Axboee8c2bc12020-08-15 18:44:09 -07004968 struct io_async_connect __io, *io;
Jens Axboef8e85cf2019-11-23 14:24:24 -07004969 unsigned file_flags;
Jens Axboe3fbb51c2019-12-20 08:51:52 -07004970 int ret;
Jens Axboef8e85cf2019-11-23 14:24:24 -07004971
Jens Axboee8c2bc12020-08-15 18:44:09 -07004972 if (req->async_data) {
4973 io = req->async_data;
Jens Axboef499a022019-12-02 16:28:46 -07004974 } else {
Jens Axboe3529d8c2019-12-19 18:24:38 -07004975 ret = move_addr_to_kernel(req->connect.addr,
4976 req->connect.addr_len,
Jens Axboee8c2bc12020-08-15 18:44:09 -07004977 &__io.address);
Jens Axboef499a022019-12-02 16:28:46 -07004978 if (ret)
4979 goto out;
4980 io = &__io;
4981 }
4982
Jens Axboe3fbb51c2019-12-20 08:51:52 -07004983 file_flags = force_nonblock ? O_NONBLOCK : 0;
4984
Jens Axboee8c2bc12020-08-15 18:44:09 -07004985 ret = __sys_connect_file(req->file, &io->address,
Jens Axboe3fbb51c2019-12-20 08:51:52 -07004986 req->connect.addr_len, file_flags);
Jens Axboe87f80d62019-12-03 11:23:54 -07004987 if ((ret == -EAGAIN || ret == -EINPROGRESS) && force_nonblock) {
Jens Axboee8c2bc12020-08-15 18:44:09 -07004988 if (req->async_data)
Jens Axboeb7bb4f72019-12-15 22:13:43 -07004989 return -EAGAIN;
Jens Axboee8c2bc12020-08-15 18:44:09 -07004990 if (io_alloc_async_data(req)) {
Jens Axboef499a022019-12-02 16:28:46 -07004991 ret = -ENOMEM;
4992 goto out;
4993 }
Jens Axboee8c2bc12020-08-15 18:44:09 -07004994 io = req->async_data;
4995 memcpy(req->async_data, &__io, sizeof(__io));
Jens Axboef8e85cf2019-11-23 14:24:24 -07004996 return -EAGAIN;
Jens Axboef499a022019-12-02 16:28:46 -07004997 }
Jens Axboef8e85cf2019-11-23 14:24:24 -07004998 if (ret == -ERESTARTSYS)
4999 ret = -EINTR;
Jens Axboef499a022019-12-02 16:28:46 -07005000out:
Jens Axboe4e88d6e2019-12-07 20:59:47 -07005001 if (ret < 0)
5002 req_set_fail_links(req);
Jens Axboe229a7b62020-06-22 10:13:11 -06005003 __io_req_complete(req, ret, 0, cs);
Jens Axboef8e85cf2019-11-23 14:24:24 -07005004 return 0;
Jens Axboef8e85cf2019-11-23 14:24:24 -07005005}
YueHaibing469956e2020-03-04 15:53:52 +08005006#else /* !CONFIG_NET */
5007static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
5008{
Jens Axboef8e85cf2019-11-23 14:24:24 -07005009 return -EOPNOTSUPP;
Jens Axboef8e85cf2019-11-23 14:24:24 -07005010}
5011
Randy Dunlap1e16c2f2020-06-26 16:32:50 -07005012static int io_sendmsg(struct io_kiocb *req, bool force_nonblock,
5013 struct io_comp_state *cs)
Jens Axboe221c5eb2019-01-17 09:41:58 -07005014{
YueHaibing469956e2020-03-04 15:53:52 +08005015 return -EOPNOTSUPP;
5016}
5017
Randy Dunlap1e16c2f2020-06-26 16:32:50 -07005018static int io_send(struct io_kiocb *req, bool force_nonblock,
5019 struct io_comp_state *cs)
YueHaibing469956e2020-03-04 15:53:52 +08005020{
5021 return -EOPNOTSUPP;
5022}
5023
5024static int io_recvmsg_prep(struct io_kiocb *req,
5025 const struct io_uring_sqe *sqe)
5026{
5027 return -EOPNOTSUPP;
5028}
5029
Randy Dunlap1e16c2f2020-06-26 16:32:50 -07005030static int io_recvmsg(struct io_kiocb *req, bool force_nonblock,
5031 struct io_comp_state *cs)
YueHaibing469956e2020-03-04 15:53:52 +08005032{
5033 return -EOPNOTSUPP;
5034}
5035
Randy Dunlap1e16c2f2020-06-26 16:32:50 -07005036static int io_recv(struct io_kiocb *req, bool force_nonblock,
5037 struct io_comp_state *cs)
YueHaibing469956e2020-03-04 15:53:52 +08005038{
5039 return -EOPNOTSUPP;
5040}
5041
5042static int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
5043{
5044 return -EOPNOTSUPP;
5045}
5046
Randy Dunlap1e16c2f2020-06-26 16:32:50 -07005047static int io_accept(struct io_kiocb *req, bool force_nonblock,
5048 struct io_comp_state *cs)
YueHaibing469956e2020-03-04 15:53:52 +08005049{
5050 return -EOPNOTSUPP;
5051}
5052
5053static int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
5054{
5055 return -EOPNOTSUPP;
5056}
5057
Randy Dunlap1e16c2f2020-06-26 16:32:50 -07005058static int io_connect(struct io_kiocb *req, bool force_nonblock,
5059 struct io_comp_state *cs)
YueHaibing469956e2020-03-04 15:53:52 +08005060{
5061 return -EOPNOTSUPP;
5062}
5063#endif /* CONFIG_NET */
Jens Axboe2b188cc2019-01-07 10:46:33 -07005064
Jens Axboed7718a92020-02-14 22:23:12 -07005065struct io_poll_table {
5066 struct poll_table_struct pt;
5067 struct io_kiocb *req;
5068 int error;
5069};
5070
Jens Axboed7718a92020-02-14 22:23:12 -07005071static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll,
5072 __poll_t mask, task_work_func_t func)
5073{
Jens Axboefd7d6de2020-08-23 11:00:37 -06005074 bool twa_signal_ok;
Jens Axboeaa96bf82020-04-03 11:26:26 -06005075 int ret;
Jens Axboed7718a92020-02-14 22:23:12 -07005076
5077 /* for instances that support it check for an event match first: */
5078 if (mask && !(mask & poll->events))
5079 return 0;
5080
5081 trace_io_uring_task_add(req->ctx, req->opcode, req->user_data, mask);
5082
5083 list_del_init(&poll->wait.entry);
5084
Jens Axboed7718a92020-02-14 22:23:12 -07005085 req->result = mask;
5086 init_task_work(&req->task_work, func);
Jens Axboe6d816e02020-08-11 08:04:14 -06005087 percpu_ref_get(&req->ctx->refs);
5088
Jens Axboed7718a92020-02-14 22:23:12 -07005089 /*
Jens Axboefd7d6de2020-08-23 11:00:37 -06005090 * If we using the signalfd wait_queue_head for this wakeup, then
5091 * it's not safe to use TWA_SIGNAL as we could be recursing on the
5092 * tsk->sighand->siglock on doing the wakeup. Should not be needed
5093 * either, as the normal wakeup will suffice.
5094 */
5095 twa_signal_ok = (poll->head != &req->task->sighand->signalfd_wqh);
5096
5097 /*
Jens Axboee3aabf92020-05-18 11:04:17 -06005098 * If this fails, then the task is exiting. When a task exits, the
5099 * work gets canceled, so just cancel this request as well instead
5100 * of executing it. We can't safely execute it anyway, as we may not
5101 * have the needed state needed for it anyway.
Jens Axboed7718a92020-02-14 22:23:12 -07005102 */
Jens Axboe87c43112020-09-30 21:00:14 -06005103 ret = io_req_task_work_add(req, twa_signal_ok);
Jens Axboeaa96bf82020-04-03 11:26:26 -06005104 if (unlikely(ret)) {
Jens Axboec2c4c832020-07-01 15:37:11 -06005105 struct task_struct *tsk;
5106
Jens Axboee3aabf92020-05-18 11:04:17 -06005107 WRITE_ONCE(poll->canceled, true);
Jens Axboeaa96bf82020-04-03 11:26:26 -06005108 tsk = io_wq_get_task(req->ctx->io_wq);
Jens Axboe91989c72020-10-16 09:02:26 -06005109 task_work_add(tsk, &req->task_work, TWA_NONE);
Jens Axboece593a62020-06-30 12:39:05 -06005110 wake_up_process(tsk);
Jens Axboeaa96bf82020-04-03 11:26:26 -06005111 }
Jens Axboed7718a92020-02-14 22:23:12 -07005112 return 1;
5113}
5114
Jens Axboe74ce6ce2020-04-13 11:09:12 -06005115static bool io_poll_rewait(struct io_kiocb *req, struct io_poll_iocb *poll)
5116 __acquires(&req->ctx->completion_lock)
5117{
5118 struct io_ring_ctx *ctx = req->ctx;
5119
5120 if (!req->result && !READ_ONCE(poll->canceled)) {
5121 struct poll_table_struct pt = { ._key = poll->events };
5122
5123 req->result = vfs_poll(req->file, &pt) & poll->events;
5124 }
5125
5126 spin_lock_irq(&ctx->completion_lock);
5127 if (!req->result && !READ_ONCE(poll->canceled)) {
5128 add_wait_queue(poll->head, &poll->wait);
5129 return true;
5130 }
5131
5132 return false;
5133}
5134
Jens Axboed4e7cd32020-08-15 11:44:50 -07005135static struct io_poll_iocb *io_poll_get_double(struct io_kiocb *req)
Jens Axboe18bceab2020-05-15 11:56:54 -06005136{
Jens Axboee8c2bc12020-08-15 18:44:09 -07005137 /* pure poll stashes this in ->async_data, poll driven retry elsewhere */
Jens Axboed4e7cd32020-08-15 11:44:50 -07005138 if (req->opcode == IORING_OP_POLL_ADD)
Jens Axboee8c2bc12020-08-15 18:44:09 -07005139 return req->async_data;
Jens Axboed4e7cd32020-08-15 11:44:50 -07005140 return req->apoll->double_poll;
5141}
5142
5143static struct io_poll_iocb *io_poll_get_single(struct io_kiocb *req)
5144{
5145 if (req->opcode == IORING_OP_POLL_ADD)
5146 return &req->poll;
5147 return &req->apoll->poll;
5148}
5149
5150static void io_poll_remove_double(struct io_kiocb *req)
5151{
5152 struct io_poll_iocb *poll = io_poll_get_double(req);
Jens Axboe18bceab2020-05-15 11:56:54 -06005153
5154 lockdep_assert_held(&req->ctx->completion_lock);
5155
5156 if (poll && poll->head) {
5157 struct wait_queue_head *head = poll->head;
5158
5159 spin_lock(&head->lock);
5160 list_del_init(&poll->wait.entry);
5161 if (poll->wait.private)
5162 refcount_dec(&req->refs);
5163 poll->head = NULL;
5164 spin_unlock(&head->lock);
5165 }
5166}
5167
5168static void io_poll_complete(struct io_kiocb *req, __poll_t mask, int error)
5169{
5170 struct io_ring_ctx *ctx = req->ctx;
5171
Jens Axboed4e7cd32020-08-15 11:44:50 -07005172 io_poll_remove_double(req);
Jens Axboe18bceab2020-05-15 11:56:54 -06005173 req->poll.done = true;
5174 io_cqring_fill_event(req, error ? error : mangle_poll(mask));
5175 io_commit_cqring(ctx);
5176}
5177
Jens Axboe18bceab2020-05-15 11:56:54 -06005178static void io_poll_task_func(struct callback_head *cb)
5179{
5180 struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
Jens Axboe6d816e02020-08-11 08:04:14 -06005181 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkovdd221f462020-10-18 10:17:42 +01005182 struct io_kiocb *nxt;
Jens Axboe18bceab2020-05-15 11:56:54 -06005183
Pavel Begunkovdd221f462020-10-18 10:17:42 +01005184 if (io_poll_rewait(req, &req->poll)) {
5185 spin_unlock_irq(&ctx->completion_lock);
5186 } else {
5187 hash_del(&req->hash_node);
5188 io_poll_complete(req, req->result, 0);
5189 spin_unlock_irq(&ctx->completion_lock);
5190
5191 nxt = io_put_req_find_next(req);
5192 io_cqring_ev_posted(ctx);
5193 if (nxt)
5194 __io_req_task_submit(nxt);
5195 }
5196
Jens Axboe6d816e02020-08-11 08:04:14 -06005197 percpu_ref_put(&ctx->refs);
Jens Axboe18bceab2020-05-15 11:56:54 -06005198}
5199
5200static int io_poll_double_wake(struct wait_queue_entry *wait, unsigned mode,
5201 int sync, void *key)
5202{
5203 struct io_kiocb *req = wait->private;
Jens Axboed4e7cd32020-08-15 11:44:50 -07005204 struct io_poll_iocb *poll = io_poll_get_single(req);
Jens Axboe18bceab2020-05-15 11:56:54 -06005205 __poll_t mask = key_to_poll(key);
5206
5207 /* for instances that support it check for an event match first: */
5208 if (mask && !(mask & poll->events))
5209 return 0;
5210
Jens Axboe8706e042020-09-28 08:38:54 -06005211 list_del_init(&wait->entry);
5212
Jens Axboe807abcb2020-07-17 17:09:27 -06005213 if (poll && poll->head) {
Jens Axboe18bceab2020-05-15 11:56:54 -06005214 bool done;
5215
Jens Axboe807abcb2020-07-17 17:09:27 -06005216 spin_lock(&poll->head->lock);
5217 done = list_empty(&poll->wait.entry);
Jens Axboe18bceab2020-05-15 11:56:54 -06005218 if (!done)
Jens Axboe807abcb2020-07-17 17:09:27 -06005219 list_del_init(&poll->wait.entry);
Jens Axboed4e7cd32020-08-15 11:44:50 -07005220 /* make sure double remove sees this as being gone */
5221 wait->private = NULL;
Jens Axboe807abcb2020-07-17 17:09:27 -06005222 spin_unlock(&poll->head->lock);
Jens Axboec8b5e262020-10-25 13:53:26 -06005223 if (!done) {
5224 /* use wait func handler, so it matches the rq type */
5225 poll->wait.func(&poll->wait, mode, sync, key);
5226 }
Jens Axboe18bceab2020-05-15 11:56:54 -06005227 }
5228 refcount_dec(&req->refs);
5229 return 1;
5230}
5231
5232static void io_init_poll_iocb(struct io_poll_iocb *poll, __poll_t events,
5233 wait_queue_func_t wake_func)
5234{
5235 poll->head = NULL;
5236 poll->done = false;
5237 poll->canceled = false;
5238 poll->events = events;
5239 INIT_LIST_HEAD(&poll->wait.entry);
5240 init_waitqueue_func_entry(&poll->wait, wake_func);
5241}
5242
5243static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt,
Jens Axboe807abcb2020-07-17 17:09:27 -06005244 struct wait_queue_head *head,
5245 struct io_poll_iocb **poll_ptr)
Jens Axboe18bceab2020-05-15 11:56:54 -06005246{
5247 struct io_kiocb *req = pt->req;
5248
5249 /*
5250 * If poll->head is already set, it's because the file being polled
5251 * uses multiple waitqueues for poll handling (eg one for read, one
5252 * for write). Setup a separate io_poll_iocb if this happens.
5253 */
5254 if (unlikely(poll->head)) {
Pavel Begunkov58852d42020-10-16 20:55:56 +01005255 struct io_poll_iocb *poll_one = poll;
5256
Jens Axboe18bceab2020-05-15 11:56:54 -06005257 /* already have a 2nd entry, fail a third attempt */
Jens Axboe807abcb2020-07-17 17:09:27 -06005258 if (*poll_ptr) {
Jens Axboe18bceab2020-05-15 11:56:54 -06005259 pt->error = -EINVAL;
5260 return;
5261 }
5262 poll = kmalloc(sizeof(*poll), GFP_ATOMIC);
5263 if (!poll) {
5264 pt->error = -ENOMEM;
5265 return;
5266 }
Pavel Begunkov58852d42020-10-16 20:55:56 +01005267 io_init_poll_iocb(poll, poll_one->events, io_poll_double_wake);
Jens Axboe18bceab2020-05-15 11:56:54 -06005268 refcount_inc(&req->refs);
5269 poll->wait.private = req;
Jens Axboe807abcb2020-07-17 17:09:27 -06005270 *poll_ptr = poll;
Jens Axboe18bceab2020-05-15 11:56:54 -06005271 }
5272
5273 pt->error = 0;
5274 poll->head = head;
Jiufei Xuea31eb4a2020-06-17 17:53:56 +08005275
5276 if (poll->events & EPOLLEXCLUSIVE)
5277 add_wait_queue_exclusive(head, &poll->wait);
5278 else
5279 add_wait_queue(head, &poll->wait);
Jens Axboe18bceab2020-05-15 11:56:54 -06005280}
5281
5282static void io_async_queue_proc(struct file *file, struct wait_queue_head *head,
5283 struct poll_table_struct *p)
5284{
5285 struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
Jens Axboe807abcb2020-07-17 17:09:27 -06005286 struct async_poll *apoll = pt->req->apoll;
Jens Axboe18bceab2020-05-15 11:56:54 -06005287
Jens Axboe807abcb2020-07-17 17:09:27 -06005288 __io_queue_proc(&apoll->poll, pt, head, &apoll->double_poll);
Jens Axboe18bceab2020-05-15 11:56:54 -06005289}
5290
Jens Axboed7718a92020-02-14 22:23:12 -07005291static void io_async_task_func(struct callback_head *cb)
5292{
5293 struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
5294 struct async_poll *apoll = req->apoll;
5295 struct io_ring_ctx *ctx = req->ctx;
5296
5297 trace_io_uring_task_run(req->ctx, req->opcode, req->user_data);
5298
Jens Axboe74ce6ce2020-04-13 11:09:12 -06005299 if (io_poll_rewait(req, &apoll->poll)) {
Jens Axboed7718a92020-02-14 22:23:12 -07005300 spin_unlock_irq(&ctx->completion_lock);
Jens Axboe6d816e02020-08-11 08:04:14 -06005301 percpu_ref_put(&ctx->refs);
Jens Axboe74ce6ce2020-04-13 11:09:12 -06005302 return;
Jens Axboed7718a92020-02-14 22:23:12 -07005303 }
5304
Jens Axboe31067252020-05-17 17:43:31 -06005305 /* If req is still hashed, it cannot have been canceled. Don't check. */
Pavel Begunkov0be0b0e2020-06-30 15:20:42 +03005306 if (hash_hashed(&req->hash_node))
Jens Axboe74ce6ce2020-04-13 11:09:12 -06005307 hash_del(&req->hash_node);
Jens Axboe2bae0472020-04-13 11:16:34 -06005308
Jens Axboed4e7cd32020-08-15 11:44:50 -07005309 io_poll_remove_double(req);
Jens Axboe74ce6ce2020-04-13 11:09:12 -06005310 spin_unlock_irq(&ctx->completion_lock);
5311
Pavel Begunkov0be0b0e2020-06-30 15:20:42 +03005312 if (!READ_ONCE(apoll->poll.canceled))
5313 __io_req_task_submit(req);
5314 else
5315 __io_req_task_cancel(req, -ECANCELED);
Dan Carpenteraa340842020-07-08 21:47:11 +03005316
Jens Axboe6d816e02020-08-11 08:04:14 -06005317 percpu_ref_put(&ctx->refs);
Jens Axboe807abcb2020-07-17 17:09:27 -06005318 kfree(apoll->double_poll);
Jens Axboe31067252020-05-17 17:43:31 -06005319 kfree(apoll);
Jens Axboed7718a92020-02-14 22:23:12 -07005320}
5321
5322static int io_async_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
5323 void *key)
5324{
5325 struct io_kiocb *req = wait->private;
5326 struct io_poll_iocb *poll = &req->apoll->poll;
5327
5328 trace_io_uring_poll_wake(req->ctx, req->opcode, req->user_data,
5329 key_to_poll(key));
5330
5331 return __io_async_wake(req, poll, key_to_poll(key), io_async_task_func);
5332}
5333
5334static void io_poll_req_insert(struct io_kiocb *req)
5335{
5336 struct io_ring_ctx *ctx = req->ctx;
5337 struct hlist_head *list;
5338
5339 list = &ctx->cancel_hash[hash_long(req->user_data, ctx->cancel_hash_bits)];
5340 hlist_add_head(&req->hash_node, list);
5341}
5342
5343static __poll_t __io_arm_poll_handler(struct io_kiocb *req,
5344 struct io_poll_iocb *poll,
5345 struct io_poll_table *ipt, __poll_t mask,
5346 wait_queue_func_t wake_func)
5347 __acquires(&ctx->completion_lock)
5348{
5349 struct io_ring_ctx *ctx = req->ctx;
5350 bool cancel = false;
5351
Pavel Begunkov4d52f332020-10-18 10:17:43 +01005352 INIT_HLIST_NODE(&req->hash_node);
Jens Axboe18bceab2020-05-15 11:56:54 -06005353 io_init_poll_iocb(poll, mask, wake_func);
Pavel Begunkovb90cd192020-06-21 13:09:52 +03005354 poll->file = req->file;
Jens Axboe18bceab2020-05-15 11:56:54 -06005355 poll->wait.private = req;
Jens Axboed7718a92020-02-14 22:23:12 -07005356
5357 ipt->pt._key = mask;
5358 ipt->req = req;
5359 ipt->error = -EINVAL;
5360
Jens Axboed7718a92020-02-14 22:23:12 -07005361 mask = vfs_poll(req->file, &ipt->pt) & poll->events;
5362
5363 spin_lock_irq(&ctx->completion_lock);
5364 if (likely(poll->head)) {
5365 spin_lock(&poll->head->lock);
5366 if (unlikely(list_empty(&poll->wait.entry))) {
5367 if (ipt->error)
5368 cancel = true;
5369 ipt->error = 0;
5370 mask = 0;
5371 }
5372 if (mask || ipt->error)
5373 list_del_init(&poll->wait.entry);
5374 else if (cancel)
5375 WRITE_ONCE(poll->canceled, true);
5376 else if (!poll->done) /* actually waiting for an event */
5377 io_poll_req_insert(req);
5378 spin_unlock(&poll->head->lock);
5379 }
5380
5381 return mask;
5382}
5383
5384static bool io_arm_poll_handler(struct io_kiocb *req)
5385{
5386 const struct io_op_def *def = &io_op_defs[req->opcode];
5387 struct io_ring_ctx *ctx = req->ctx;
5388 struct async_poll *apoll;
5389 struct io_poll_table ipt;
5390 __poll_t mask, ret;
Jens Axboe9dab14b2020-08-25 12:27:50 -06005391 int rw;
Jens Axboed7718a92020-02-14 22:23:12 -07005392
5393 if (!req->file || !file_can_poll(req->file))
5394 return false;
Pavel Begunkov24c74672020-06-21 13:09:51 +03005395 if (req->flags & REQ_F_POLLED)
Jens Axboed7718a92020-02-14 22:23:12 -07005396 return false;
Jens Axboe9dab14b2020-08-25 12:27:50 -06005397 if (def->pollin)
5398 rw = READ;
5399 else if (def->pollout)
5400 rw = WRITE;
5401 else
5402 return false;
5403 /* if we can't nonblock try, then no point in arming a poll handler */
5404 if (!io_file_supports_async(req->file, rw))
Jens Axboed7718a92020-02-14 22:23:12 -07005405 return false;
5406
5407 apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC);
5408 if (unlikely(!apoll))
5409 return false;
Jens Axboe807abcb2020-07-17 17:09:27 -06005410 apoll->double_poll = NULL;
Jens Axboed7718a92020-02-14 22:23:12 -07005411
5412 req->flags |= REQ_F_POLLED;
Jens Axboed7718a92020-02-14 22:23:12 -07005413 req->apoll = apoll;
Jens Axboed7718a92020-02-14 22:23:12 -07005414
Nathan Chancellor8755d972020-03-02 16:01:19 -07005415 mask = 0;
Jens Axboed7718a92020-02-14 22:23:12 -07005416 if (def->pollin)
Nathan Chancellor8755d972020-03-02 16:01:19 -07005417 mask |= POLLIN | POLLRDNORM;
Jens Axboed7718a92020-02-14 22:23:12 -07005418 if (def->pollout)
5419 mask |= POLLOUT | POLLWRNORM;
Luke Hsiao901341b2020-08-21 21:41:05 -07005420
5421 /* If reading from MSG_ERRQUEUE using recvmsg, ignore POLLIN */
5422 if ((req->opcode == IORING_OP_RECVMSG) &&
5423 (req->sr_msg.msg_flags & MSG_ERRQUEUE))
5424 mask &= ~POLLIN;
5425
Jens Axboed7718a92020-02-14 22:23:12 -07005426 mask |= POLLERR | POLLPRI;
5427
5428 ipt.pt._qproc = io_async_queue_proc;
5429
5430 ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask,
5431 io_async_wake);
Jens Axboea36da652020-08-11 09:50:19 -06005432 if (ret || ipt.error) {
Jens Axboed4e7cd32020-08-15 11:44:50 -07005433 io_poll_remove_double(req);
Jens Axboed7718a92020-02-14 22:23:12 -07005434 spin_unlock_irq(&ctx->completion_lock);
Jens Axboe807abcb2020-07-17 17:09:27 -06005435 kfree(apoll->double_poll);
Jens Axboed7718a92020-02-14 22:23:12 -07005436 kfree(apoll);
5437 return false;
5438 }
5439 spin_unlock_irq(&ctx->completion_lock);
5440 trace_io_uring_poll_arm(ctx, req->opcode, req->user_data, mask,
5441 apoll->poll.events);
5442 return true;
5443}
5444
5445static bool __io_poll_remove_one(struct io_kiocb *req,
5446 struct io_poll_iocb *poll)
5447{
Jens Axboeb41e9852020-02-17 09:52:41 -07005448 bool do_complete = false;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005449
5450 spin_lock(&poll->head->lock);
5451 WRITE_ONCE(poll->canceled, true);
Jens Axboe392edb42019-12-09 17:52:20 -07005452 if (!list_empty(&poll->wait.entry)) {
5453 list_del_init(&poll->wait.entry);
Jens Axboeb41e9852020-02-17 09:52:41 -07005454 do_complete = true;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005455 }
5456 spin_unlock(&poll->head->lock);
Jens Axboe3bfa5bc2020-05-17 13:54:12 -06005457 hash_del(&req->hash_node);
Jens Axboed7718a92020-02-14 22:23:12 -07005458 return do_complete;
5459}
5460
5461static bool io_poll_remove_one(struct io_kiocb *req)
5462{
5463 bool do_complete;
5464
Jens Axboed4e7cd32020-08-15 11:44:50 -07005465 io_poll_remove_double(req);
5466
Jens Axboed7718a92020-02-14 22:23:12 -07005467 if (req->opcode == IORING_OP_POLL_ADD) {
5468 do_complete = __io_poll_remove_one(req, &req->poll);
5469 } else {
Jens Axboe3bfa5bc2020-05-17 13:54:12 -06005470 struct async_poll *apoll = req->apoll;
5471
Jens Axboed7718a92020-02-14 22:23:12 -07005472 /* non-poll requests have submit ref still */
Jens Axboe3bfa5bc2020-05-17 13:54:12 -06005473 do_complete = __io_poll_remove_one(req, &apoll->poll);
5474 if (do_complete) {
Jens Axboed7718a92020-02-14 22:23:12 -07005475 io_put_req(req);
Jens Axboe807abcb2020-07-17 17:09:27 -06005476 kfree(apoll->double_poll);
Jens Axboe3bfa5bc2020-05-17 13:54:12 -06005477 kfree(apoll);
5478 }
Xiaoguang Wangb1f573b2020-04-12 14:50:54 +08005479 }
5480
Jens Axboeb41e9852020-02-17 09:52:41 -07005481 if (do_complete) {
5482 io_cqring_fill_event(req, -ECANCELED);
5483 io_commit_cqring(req->ctx);
Jens Axboef254ac02020-08-12 17:33:30 -06005484 req_set_fail_links(req);
Pavel Begunkov216578e2020-10-13 09:44:00 +01005485 io_put_req_deferred(req, 1);
Jens Axboeb41e9852020-02-17 09:52:41 -07005486 }
5487
5488 return do_complete;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005489}
5490
Jens Axboe76e1b642020-09-26 15:05:03 -06005491/*
5492 * Returns true if we found and killed one or more poll requests
5493 */
5494static bool io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk)
Jens Axboe221c5eb2019-01-17 09:41:58 -07005495{
Jens Axboe78076bb2019-12-04 19:56:40 -07005496 struct hlist_node *tmp;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005497 struct io_kiocb *req;
Jens Axboe8e2e1fa2020-04-13 17:05:14 -06005498 int posted = 0, i;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005499
5500 spin_lock_irq(&ctx->completion_lock);
Jens Axboe78076bb2019-12-04 19:56:40 -07005501 for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
5502 struct hlist_head *list;
5503
5504 list = &ctx->cancel_hash[i];
Jens Axboef3606e32020-09-22 08:18:24 -06005505 hlist_for_each_entry_safe(req, tmp, list, hash_node) {
5506 if (io_task_match(req, tsk))
5507 posted += io_poll_remove_one(req);
5508 }
Jens Axboe221c5eb2019-01-17 09:41:58 -07005509 }
5510 spin_unlock_irq(&ctx->completion_lock);
Jens Axboeb41e9852020-02-17 09:52:41 -07005511
Jens Axboe8e2e1fa2020-04-13 17:05:14 -06005512 if (posted)
5513 io_cqring_ev_posted(ctx);
Jens Axboe76e1b642020-09-26 15:05:03 -06005514
5515 return posted != 0;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005516}
5517
Jens Axboe47f46762019-11-09 17:43:02 -07005518static int io_poll_cancel(struct io_ring_ctx *ctx, __u64 sqe_addr)
5519{
Jens Axboe78076bb2019-12-04 19:56:40 -07005520 struct hlist_head *list;
Jens Axboe47f46762019-11-09 17:43:02 -07005521 struct io_kiocb *req;
5522
Jens Axboe78076bb2019-12-04 19:56:40 -07005523 list = &ctx->cancel_hash[hash_long(sqe_addr, ctx->cancel_hash_bits)];
5524 hlist_for_each_entry(req, list, hash_node) {
Jens Axboeb41e9852020-02-17 09:52:41 -07005525 if (sqe_addr != req->user_data)
5526 continue;
5527 if (io_poll_remove_one(req))
Jens Axboeeac406c2019-11-14 12:09:58 -07005528 return 0;
Jens Axboeb41e9852020-02-17 09:52:41 -07005529 return -EALREADY;
Jens Axboe47f46762019-11-09 17:43:02 -07005530 }
5531
5532 return -ENOENT;
5533}
5534
Jens Axboe3529d8c2019-12-19 18:24:38 -07005535static int io_poll_remove_prep(struct io_kiocb *req,
5536 const struct io_uring_sqe *sqe)
Jens Axboe221c5eb2019-01-17 09:41:58 -07005537{
Jens Axboe221c5eb2019-01-17 09:41:58 -07005538 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
5539 return -EINVAL;
5540 if (sqe->ioprio || sqe->off || sqe->len || sqe->buf_index ||
5541 sqe->poll_events)
5542 return -EINVAL;
5543
Pavel Begunkov018043b2020-10-27 23:17:18 +00005544 req->poll_remove.addr = READ_ONCE(sqe->addr);
Jens Axboe0969e782019-12-17 18:40:57 -07005545 return 0;
5546}
5547
5548/*
5549 * Find a running poll command that matches one specified in sqe->addr,
5550 * and remove it if found.
5551 */
5552static int io_poll_remove(struct io_kiocb *req)
5553{
5554 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe0969e782019-12-17 18:40:57 -07005555 int ret;
5556
Jens Axboe221c5eb2019-01-17 09:41:58 -07005557 spin_lock_irq(&ctx->completion_lock);
Pavel Begunkov018043b2020-10-27 23:17:18 +00005558 ret = io_poll_cancel(ctx, req->poll_remove.addr);
Jens Axboe221c5eb2019-01-17 09:41:58 -07005559 spin_unlock_irq(&ctx->completion_lock);
5560
Jens Axboe4e88d6e2019-12-07 20:59:47 -07005561 if (ret < 0)
5562 req_set_fail_links(req);
Jens Axboee1e16092020-06-22 09:17:17 -06005563 io_req_complete(req, ret);
Jens Axboe221c5eb2019-01-17 09:41:58 -07005564 return 0;
5565}
5566
Jens Axboe221c5eb2019-01-17 09:41:58 -07005567static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
5568 void *key)
5569{
Jens Axboec2f2eb72020-02-10 09:07:05 -07005570 struct io_kiocb *req = wait->private;
5571 struct io_poll_iocb *poll = &req->poll;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005572
Jens Axboed7718a92020-02-14 22:23:12 -07005573 return __io_async_wake(req, poll, key_to_poll(key), io_poll_task_func);
Jens Axboe221c5eb2019-01-17 09:41:58 -07005574}
5575
Jens Axboe221c5eb2019-01-17 09:41:58 -07005576static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head,
5577 struct poll_table_struct *p)
5578{
5579 struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
5580
Jens Axboee8c2bc12020-08-15 18:44:09 -07005581 __io_queue_proc(&pt->req->poll, pt, head, (struct io_poll_iocb **) &pt->req->async_data);
Jens Axboeeac406c2019-11-14 12:09:58 -07005582}
5583
Jens Axboe3529d8c2019-12-19 18:24:38 -07005584static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboe221c5eb2019-01-17 09:41:58 -07005585{
5586 struct io_poll_iocb *poll = &req->poll;
Jiufei Xue5769a352020-06-17 17:53:55 +08005587 u32 events;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005588
5589 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
5590 return -EINVAL;
5591 if (sqe->addr || sqe->ioprio || sqe->off || sqe->len || sqe->buf_index)
5592 return -EINVAL;
5593
Jiufei Xue5769a352020-06-17 17:53:55 +08005594 events = READ_ONCE(sqe->poll32_events);
5595#ifdef __BIG_ENDIAN
5596 events = swahw32(events);
5597#endif
Jiufei Xuea31eb4a2020-06-17 17:53:56 +08005598 poll->events = demangle_poll(events) | EPOLLERR | EPOLLHUP |
5599 (events & EPOLLEXCLUSIVE);
Jens Axboe0969e782019-12-17 18:40:57 -07005600 return 0;
5601}
5602
Pavel Begunkov014db002020-03-03 21:33:12 +03005603static int io_poll_add(struct io_kiocb *req)
Jens Axboe0969e782019-12-17 18:40:57 -07005604{
5605 struct io_poll_iocb *poll = &req->poll;
5606 struct io_ring_ctx *ctx = req->ctx;
5607 struct io_poll_table ipt;
Jens Axboe0969e782019-12-17 18:40:57 -07005608 __poll_t mask;
Jens Axboe0969e782019-12-17 18:40:57 -07005609
Jens Axboed7718a92020-02-14 22:23:12 -07005610 ipt.pt._qproc = io_poll_queue_proc;
Jens Axboe36703242019-07-25 10:20:18 -06005611
Jens Axboed7718a92020-02-14 22:23:12 -07005612 mask = __io_arm_poll_handler(req, &req->poll, &ipt, poll->events,
5613 io_poll_wake);
Jens Axboe221c5eb2019-01-17 09:41:58 -07005614
Jens Axboe8c838782019-03-12 15:48:16 -06005615 if (mask) { /* no async, we'd stolen it */
Jens Axboe8c838782019-03-12 15:48:16 -06005616 ipt.error = 0;
Jens Axboeb0dd8a42019-11-18 12:14:54 -07005617 io_poll_complete(req, mask, 0);
Jens Axboe8c838782019-03-12 15:48:16 -06005618 }
Jens Axboe221c5eb2019-01-17 09:41:58 -07005619 spin_unlock_irq(&ctx->completion_lock);
5620
Jens Axboe8c838782019-03-12 15:48:16 -06005621 if (mask) {
5622 io_cqring_ev_posted(ctx);
Pavel Begunkov014db002020-03-03 21:33:12 +03005623 io_put_req(req);
Jens Axboe221c5eb2019-01-17 09:41:58 -07005624 }
Jens Axboe8c838782019-03-12 15:48:16 -06005625 return ipt.error;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005626}
5627
Jens Axboe5262f562019-09-17 12:26:57 -06005628static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
5629{
Jens Axboead8a48a2019-11-15 08:49:11 -07005630 struct io_timeout_data *data = container_of(timer,
5631 struct io_timeout_data, timer);
5632 struct io_kiocb *req = data->req;
5633 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe5262f562019-09-17 12:26:57 -06005634 unsigned long flags;
5635
Jens Axboe5262f562019-09-17 12:26:57 -06005636 spin_lock_irqsave(&ctx->completion_lock, flags);
Pavel Begunkova71976f2020-10-10 18:34:11 +01005637 list_del_init(&req->timeout.list);
Pavel Begunkov01cec8c2020-07-30 18:43:50 +03005638 atomic_set(&req->ctx->cq_timeouts,
5639 atomic_read(&req->ctx->cq_timeouts) + 1);
5640
Jens Axboe78e19bb2019-11-06 15:21:34 -07005641 io_cqring_fill_event(req, -ETIME);
Jens Axboe5262f562019-09-17 12:26:57 -06005642 io_commit_cqring(ctx);
5643 spin_unlock_irqrestore(&ctx->completion_lock, flags);
5644
5645 io_cqring_ev_posted(ctx);
Jens Axboe4e88d6e2019-12-07 20:59:47 -07005646 req_set_fail_links(req);
Jens Axboe5262f562019-09-17 12:26:57 -06005647 io_put_req(req);
5648 return HRTIMER_NORESTART;
5649}
5650
Jens Axboef254ac02020-08-12 17:33:30 -06005651static int __io_timeout_cancel(struct io_kiocb *req)
Jens Axboe47f46762019-11-09 17:43:02 -07005652{
Jens Axboee8c2bc12020-08-15 18:44:09 -07005653 struct io_timeout_data *io = req->async_data;
Jens Axboef254ac02020-08-12 17:33:30 -06005654 int ret;
Jens Axboe47f46762019-11-09 17:43:02 -07005655
Jens Axboee8c2bc12020-08-15 18:44:09 -07005656 ret = hrtimer_try_to_cancel(&io->timer);
Jens Axboe47f46762019-11-09 17:43:02 -07005657 if (ret == -1)
5658 return -EALREADY;
Pavel Begunkova71976f2020-10-10 18:34:11 +01005659 list_del_init(&req->timeout.list);
Jens Axboe47f46762019-11-09 17:43:02 -07005660
Jens Axboe4e88d6e2019-12-07 20:59:47 -07005661 req_set_fail_links(req);
Jens Axboe47f46762019-11-09 17:43:02 -07005662 io_cqring_fill_event(req, -ECANCELED);
Pavel Begunkov216578e2020-10-13 09:44:00 +01005663 io_put_req_deferred(req, 1);
Jens Axboe47f46762019-11-09 17:43:02 -07005664 return 0;
5665}
5666
Jens Axboef254ac02020-08-12 17:33:30 -06005667static int io_timeout_cancel(struct io_ring_ctx *ctx, __u64 user_data)
5668{
5669 struct io_kiocb *req;
5670 int ret = -ENOENT;
5671
5672 list_for_each_entry(req, &ctx->timeout_list, timeout.list) {
5673 if (user_data == req->user_data) {
5674 ret = 0;
5675 break;
5676 }
5677 }
5678
5679 if (ret == -ENOENT)
5680 return ret;
5681
5682 return __io_timeout_cancel(req);
5683}
5684
Jens Axboe3529d8c2019-12-19 18:24:38 -07005685static int io_timeout_remove_prep(struct io_kiocb *req,
5686 const struct io_uring_sqe *sqe)
Jens Axboeb29472e2019-12-17 18:50:29 -07005687{
Jens Axboeb29472e2019-12-17 18:50:29 -07005688 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
5689 return -EINVAL;
Daniele Albano61710e42020-07-18 14:15:16 -06005690 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
5691 return -EINVAL;
Pavel Begunkov0bdf7a22020-10-10 18:34:10 +01005692 if (sqe->ioprio || sqe->buf_index || sqe->len || sqe->timeout_flags)
Jens Axboeb29472e2019-12-17 18:50:29 -07005693 return -EINVAL;
5694
Pavel Begunkov0bdf7a22020-10-10 18:34:10 +01005695 req->timeout_rem.addr = READ_ONCE(sqe->addr);
Jens Axboeb29472e2019-12-17 18:50:29 -07005696 return 0;
5697}
5698
Jens Axboe11365042019-10-16 09:08:32 -06005699/*
5700 * Remove or update an existing timeout command
5701 */
Jens Axboefc4df992019-12-10 14:38:45 -07005702static int io_timeout_remove(struct io_kiocb *req)
Jens Axboe11365042019-10-16 09:08:32 -06005703{
5704 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe47f46762019-11-09 17:43:02 -07005705 int ret;
Jens Axboe11365042019-10-16 09:08:32 -06005706
Jens Axboe11365042019-10-16 09:08:32 -06005707 spin_lock_irq(&ctx->completion_lock);
Pavel Begunkov0bdf7a22020-10-10 18:34:10 +01005708 ret = io_timeout_cancel(ctx, req->timeout_rem.addr);
Jens Axboe11365042019-10-16 09:08:32 -06005709
Jens Axboe47f46762019-11-09 17:43:02 -07005710 io_cqring_fill_event(req, ret);
Jens Axboe11365042019-10-16 09:08:32 -06005711 io_commit_cqring(ctx);
5712 spin_unlock_irq(&ctx->completion_lock);
Jens Axboe5262f562019-09-17 12:26:57 -06005713 io_cqring_ev_posted(ctx);
Jens Axboe4e88d6e2019-12-07 20:59:47 -07005714 if (ret < 0)
5715 req_set_fail_links(req);
Jackie Liuec9c02a2019-11-08 23:50:36 +08005716 io_put_req(req);
Jens Axboe11365042019-10-16 09:08:32 -06005717 return 0;
Jens Axboe5262f562019-09-17 12:26:57 -06005718}
5719
Jens Axboe3529d8c2019-12-19 18:24:38 -07005720static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
Jens Axboe2d283902019-12-04 11:08:05 -07005721 bool is_timeout_link)
Jens Axboe5262f562019-09-17 12:26:57 -06005722{
Jens Axboead8a48a2019-11-15 08:49:11 -07005723 struct io_timeout_data *data;
Jens Axboea41525a2019-10-15 16:48:15 -06005724 unsigned flags;
Pavel Begunkov56080b02020-05-26 20:34:04 +03005725 u32 off = READ_ONCE(sqe->off);
Jens Axboe5262f562019-09-17 12:26:57 -06005726
Jens Axboead8a48a2019-11-15 08:49:11 -07005727 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboe5262f562019-09-17 12:26:57 -06005728 return -EINVAL;
Jens Axboead8a48a2019-11-15 08:49:11 -07005729 if (sqe->ioprio || sqe->buf_index || sqe->len != 1)
Jens Axboea41525a2019-10-15 16:48:15 -06005730 return -EINVAL;
Pavel Begunkov56080b02020-05-26 20:34:04 +03005731 if (off && is_timeout_link)
Jens Axboe2d283902019-12-04 11:08:05 -07005732 return -EINVAL;
Jens Axboea41525a2019-10-15 16:48:15 -06005733 flags = READ_ONCE(sqe->timeout_flags);
5734 if (flags & ~IORING_TIMEOUT_ABS)
Jens Axboe5262f562019-09-17 12:26:57 -06005735 return -EINVAL;
Arnd Bergmannbdf20072019-10-01 09:53:29 -06005736
Pavel Begunkovbfe68a22020-05-30 14:54:18 +03005737 req->timeout.off = off;
Jens Axboe26a61672019-12-20 09:02:01 -07005738
Jens Axboee8c2bc12020-08-15 18:44:09 -07005739 if (!req->async_data && io_alloc_async_data(req))
Jens Axboe26a61672019-12-20 09:02:01 -07005740 return -ENOMEM;
5741
Jens Axboee8c2bc12020-08-15 18:44:09 -07005742 data = req->async_data;
Jens Axboead8a48a2019-11-15 08:49:11 -07005743 data->req = req;
Jens Axboead8a48a2019-11-15 08:49:11 -07005744
5745 if (get_timespec64(&data->ts, u64_to_user_ptr(sqe->addr)))
Jens Axboe5262f562019-09-17 12:26:57 -06005746 return -EFAULT;
5747
Jens Axboe11365042019-10-16 09:08:32 -06005748 if (flags & IORING_TIMEOUT_ABS)
Jens Axboead8a48a2019-11-15 08:49:11 -07005749 data->mode = HRTIMER_MODE_ABS;
Jens Axboe11365042019-10-16 09:08:32 -06005750 else
Jens Axboead8a48a2019-11-15 08:49:11 -07005751 data->mode = HRTIMER_MODE_REL;
Jens Axboe11365042019-10-16 09:08:32 -06005752
Jens Axboead8a48a2019-11-15 08:49:11 -07005753 hrtimer_init(&data->timer, CLOCK_MONOTONIC, data->mode);
5754 return 0;
5755}
5756
Jens Axboefc4df992019-12-10 14:38:45 -07005757static int io_timeout(struct io_kiocb *req)
Jens Axboead8a48a2019-11-15 08:49:11 -07005758{
Jens Axboead8a48a2019-11-15 08:49:11 -07005759 struct io_ring_ctx *ctx = req->ctx;
Jens Axboee8c2bc12020-08-15 18:44:09 -07005760 struct io_timeout_data *data = req->async_data;
Jens Axboead8a48a2019-11-15 08:49:11 -07005761 struct list_head *entry;
Pavel Begunkovbfe68a22020-05-30 14:54:18 +03005762 u32 tail, off = req->timeout.off;
Jens Axboead8a48a2019-11-15 08:49:11 -07005763
Pavel Begunkov733f5c92020-05-26 20:34:03 +03005764 spin_lock_irq(&ctx->completion_lock);
Jens Axboe93bd25b2019-11-11 23:34:31 -07005765
Jens Axboe5262f562019-09-17 12:26:57 -06005766 /*
5767 * sqe->off holds how many events that need to occur for this
Jens Axboe93bd25b2019-11-11 23:34:31 -07005768 * timeout event to be satisfied. If it isn't set, then this is
5769 * a pure timeout request, sequence isn't used.
Jens Axboe5262f562019-09-17 12:26:57 -06005770 */
Pavel Begunkov8eb7e2d2020-06-29 13:13:02 +03005771 if (io_is_timeout_noseq(req)) {
Jens Axboe93bd25b2019-11-11 23:34:31 -07005772 entry = ctx->timeout_list.prev;
5773 goto add;
5774 }
Jens Axboe5262f562019-09-17 12:26:57 -06005775
Pavel Begunkovbfe68a22020-05-30 14:54:18 +03005776 tail = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
5777 req->timeout.target_seq = tail + off;
Jens Axboe5262f562019-09-17 12:26:57 -06005778
5779 /*
5780 * Insertion sort, ensuring the first entry in the list is always
5781 * the one we need first.
5782 */
Jens Axboe5262f562019-09-17 12:26:57 -06005783 list_for_each_prev(entry, &ctx->timeout_list) {
Pavel Begunkov135fcde2020-07-13 23:37:12 +03005784 struct io_kiocb *nxt = list_entry(entry, struct io_kiocb,
5785 timeout.list);
Jens Axboe5262f562019-09-17 12:26:57 -06005786
Pavel Begunkov8eb7e2d2020-06-29 13:13:02 +03005787 if (io_is_timeout_noseq(nxt))
Jens Axboe93bd25b2019-11-11 23:34:31 -07005788 continue;
Pavel Begunkovbfe68a22020-05-30 14:54:18 +03005789 /* nxt.seq is behind @tail, otherwise would've been completed */
5790 if (off >= nxt->timeout.target_seq - tail)
Jens Axboe5262f562019-09-17 12:26:57 -06005791 break;
5792 }
Jens Axboe93bd25b2019-11-11 23:34:31 -07005793add:
Pavel Begunkov135fcde2020-07-13 23:37:12 +03005794 list_add(&req->timeout.list, entry);
Jens Axboead8a48a2019-11-15 08:49:11 -07005795 data->timer.function = io_timeout_fn;
5796 hrtimer_start(&data->timer, timespec64_to_ktime(data->ts), data->mode);
Jens Axboe842f9612019-10-29 12:34:10 -06005797 spin_unlock_irq(&ctx->completion_lock);
Jens Axboe5262f562019-09-17 12:26:57 -06005798 return 0;
5799}
5800
Jens Axboe62755e32019-10-28 21:49:21 -06005801static bool io_cancel_cb(struct io_wq_work *work, void *data)
Jens Axboede0617e2019-04-06 21:51:27 -06005802{
Jens Axboe62755e32019-10-28 21:49:21 -06005803 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
Jens Axboede0617e2019-04-06 21:51:27 -06005804
Jens Axboe62755e32019-10-28 21:49:21 -06005805 return req->user_data == (unsigned long) data;
5806}
5807
Jens Axboee977d6d2019-11-05 12:39:45 -07005808static int io_async_cancel_one(struct io_ring_ctx *ctx, void *sqe_addr)
Jens Axboe62755e32019-10-28 21:49:21 -06005809{
Jens Axboe62755e32019-10-28 21:49:21 -06005810 enum io_wq_cancel cancel_ret;
Jens Axboe62755e32019-10-28 21:49:21 -06005811 int ret = 0;
5812
Pavel Begunkov4f26bda2020-06-15 10:24:03 +03005813 cancel_ret = io_wq_cancel_cb(ctx->io_wq, io_cancel_cb, sqe_addr, false);
Jens Axboe62755e32019-10-28 21:49:21 -06005814 switch (cancel_ret) {
5815 case IO_WQ_CANCEL_OK:
5816 ret = 0;
5817 break;
5818 case IO_WQ_CANCEL_RUNNING:
5819 ret = -EALREADY;
5820 break;
5821 case IO_WQ_CANCEL_NOTFOUND:
5822 ret = -ENOENT;
5823 break;
5824 }
5825
Jens Axboee977d6d2019-11-05 12:39:45 -07005826 return ret;
5827}
5828
Jens Axboe47f46762019-11-09 17:43:02 -07005829static void io_async_find_and_cancel(struct io_ring_ctx *ctx,
5830 struct io_kiocb *req, __u64 sqe_addr,
Pavel Begunkov014db002020-03-03 21:33:12 +03005831 int success_ret)
Jens Axboe47f46762019-11-09 17:43:02 -07005832{
5833 unsigned long flags;
5834 int ret;
5835
5836 ret = io_async_cancel_one(ctx, (void *) (unsigned long) sqe_addr);
5837 if (ret != -ENOENT) {
5838 spin_lock_irqsave(&ctx->completion_lock, flags);
5839 goto done;
5840 }
5841
5842 spin_lock_irqsave(&ctx->completion_lock, flags);
5843 ret = io_timeout_cancel(ctx, sqe_addr);
5844 if (ret != -ENOENT)
5845 goto done;
5846 ret = io_poll_cancel(ctx, sqe_addr);
5847done:
Jens Axboeb0dd8a42019-11-18 12:14:54 -07005848 if (!ret)
5849 ret = success_ret;
Jens Axboe47f46762019-11-09 17:43:02 -07005850 io_cqring_fill_event(req, ret);
5851 io_commit_cqring(ctx);
5852 spin_unlock_irqrestore(&ctx->completion_lock, flags);
5853 io_cqring_ev_posted(ctx);
5854
Jens Axboe4e88d6e2019-12-07 20:59:47 -07005855 if (ret < 0)
5856 req_set_fail_links(req);
Pavel Begunkov014db002020-03-03 21:33:12 +03005857 io_put_req(req);
Jens Axboe47f46762019-11-09 17:43:02 -07005858}
5859
Jens Axboe3529d8c2019-12-19 18:24:38 -07005860static int io_async_cancel_prep(struct io_kiocb *req,
5861 const struct io_uring_sqe *sqe)
Jens Axboee977d6d2019-11-05 12:39:45 -07005862{
Jens Axboefbf23842019-12-17 18:45:56 -07005863 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboee977d6d2019-11-05 12:39:45 -07005864 return -EINVAL;
Daniele Albano61710e42020-07-18 14:15:16 -06005865 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
5866 return -EINVAL;
5867 if (sqe->ioprio || sqe->off || sqe->len || sqe->cancel_flags)
Jens Axboee977d6d2019-11-05 12:39:45 -07005868 return -EINVAL;
5869
Jens Axboefbf23842019-12-17 18:45:56 -07005870 req->cancel.addr = READ_ONCE(sqe->addr);
5871 return 0;
5872}
5873
Pavel Begunkov014db002020-03-03 21:33:12 +03005874static int io_async_cancel(struct io_kiocb *req)
Jens Axboefbf23842019-12-17 18:45:56 -07005875{
5876 struct io_ring_ctx *ctx = req->ctx;
Jens Axboefbf23842019-12-17 18:45:56 -07005877
Pavel Begunkov014db002020-03-03 21:33:12 +03005878 io_async_find_and_cancel(ctx, req, req->cancel.addr, 0);
Jens Axboe62755e32019-10-28 21:49:21 -06005879 return 0;
5880}
5881
Jens Axboe05f3fb32019-12-09 11:22:50 -07005882static int io_files_update_prep(struct io_kiocb *req,
5883 const struct io_uring_sqe *sqe)
5884{
Jens Axboe6ca56f82020-09-18 16:51:19 -06005885 if (unlikely(req->ctx->flags & IORING_SETUP_SQPOLL))
5886 return -EINVAL;
Daniele Albano61710e42020-07-18 14:15:16 -06005887 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
5888 return -EINVAL;
5889 if (sqe->ioprio || sqe->rw_flags)
Jens Axboe05f3fb32019-12-09 11:22:50 -07005890 return -EINVAL;
5891
5892 req->files_update.offset = READ_ONCE(sqe->off);
5893 req->files_update.nr_args = READ_ONCE(sqe->len);
5894 if (!req->files_update.nr_args)
5895 return -EINVAL;
5896 req->files_update.arg = READ_ONCE(sqe->addr);
5897 return 0;
5898}
5899
Jens Axboe229a7b62020-06-22 10:13:11 -06005900static int io_files_update(struct io_kiocb *req, bool force_nonblock,
5901 struct io_comp_state *cs)
Jens Axboe05f3fb32019-12-09 11:22:50 -07005902{
5903 struct io_ring_ctx *ctx = req->ctx;
5904 struct io_uring_files_update up;
5905 int ret;
5906
Jens Axboef86cd202020-01-29 13:46:44 -07005907 if (force_nonblock)
Jens Axboe05f3fb32019-12-09 11:22:50 -07005908 return -EAGAIN;
Jens Axboe05f3fb32019-12-09 11:22:50 -07005909
5910 up.offset = req->files_update.offset;
5911 up.fds = req->files_update.arg;
5912
5913 mutex_lock(&ctx->uring_lock);
5914 ret = __io_sqe_files_update(ctx, &up, req->files_update.nr_args);
5915 mutex_unlock(&ctx->uring_lock);
5916
5917 if (ret < 0)
5918 req_set_fail_links(req);
Jens Axboe229a7b62020-06-22 10:13:11 -06005919 __io_req_complete(req, ret, 0, cs);
Jens Axboe05f3fb32019-12-09 11:22:50 -07005920 return 0;
5921}
5922
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005923static int io_req_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboef67676d2019-12-02 11:03:47 -07005924{
Jens Axboed625c6e2019-12-17 19:53:05 -07005925 switch (req->opcode) {
Jens Axboee7815732019-12-17 19:45:06 -07005926 case IORING_OP_NOP:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005927 return 0;
Jens Axboef67676d2019-12-02 11:03:47 -07005928 case IORING_OP_READV:
5929 case IORING_OP_READ_FIXED:
Jens Axboe3a6820f2019-12-22 15:19:35 -07005930 case IORING_OP_READ:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005931 return io_read_prep(req, sqe);
Jens Axboef67676d2019-12-02 11:03:47 -07005932 case IORING_OP_WRITEV:
5933 case IORING_OP_WRITE_FIXED:
Jens Axboe3a6820f2019-12-22 15:19:35 -07005934 case IORING_OP_WRITE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005935 return io_write_prep(req, sqe);
Jens Axboe0969e782019-12-17 18:40:57 -07005936 case IORING_OP_POLL_ADD:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005937 return io_poll_add_prep(req, sqe);
Jens Axboe0969e782019-12-17 18:40:57 -07005938 case IORING_OP_POLL_REMOVE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005939 return io_poll_remove_prep(req, sqe);
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07005940 case IORING_OP_FSYNC:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005941 return io_prep_fsync(req, sqe);
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07005942 case IORING_OP_SYNC_FILE_RANGE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005943 return io_prep_sfr(req, sqe);
Jens Axboe03b12302019-12-02 18:50:25 -07005944 case IORING_OP_SENDMSG:
Jens Axboefddafac2020-01-04 20:19:44 -07005945 case IORING_OP_SEND:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005946 return io_sendmsg_prep(req, sqe);
Jens Axboe03b12302019-12-02 18:50:25 -07005947 case IORING_OP_RECVMSG:
Jens Axboefddafac2020-01-04 20:19:44 -07005948 case IORING_OP_RECV:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005949 return io_recvmsg_prep(req, sqe);
Jens Axboef499a022019-12-02 16:28:46 -07005950 case IORING_OP_CONNECT:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005951 return io_connect_prep(req, sqe);
Jens Axboe2d283902019-12-04 11:08:05 -07005952 case IORING_OP_TIMEOUT:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005953 return io_timeout_prep(req, sqe, false);
Jens Axboeb29472e2019-12-17 18:50:29 -07005954 case IORING_OP_TIMEOUT_REMOVE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005955 return io_timeout_remove_prep(req, sqe);
Jens Axboefbf23842019-12-17 18:45:56 -07005956 case IORING_OP_ASYNC_CANCEL:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005957 return io_async_cancel_prep(req, sqe);
Jens Axboe2d283902019-12-04 11:08:05 -07005958 case IORING_OP_LINK_TIMEOUT:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005959 return io_timeout_prep(req, sqe, true);
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07005960 case IORING_OP_ACCEPT:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005961 return io_accept_prep(req, sqe);
Jens Axboed63d1b52019-12-10 10:38:56 -07005962 case IORING_OP_FALLOCATE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005963 return io_fallocate_prep(req, sqe);
Jens Axboe15b71ab2019-12-11 11:20:36 -07005964 case IORING_OP_OPENAT:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005965 return io_openat_prep(req, sqe);
Jens Axboeb5dba592019-12-11 14:02:38 -07005966 case IORING_OP_CLOSE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005967 return io_close_prep(req, sqe);
Jens Axboe05f3fb32019-12-09 11:22:50 -07005968 case IORING_OP_FILES_UPDATE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005969 return io_files_update_prep(req, sqe);
Jens Axboeeddc7ef2019-12-13 21:18:10 -07005970 case IORING_OP_STATX:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005971 return io_statx_prep(req, sqe);
Jens Axboe4840e412019-12-25 22:03:45 -07005972 case IORING_OP_FADVISE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005973 return io_fadvise_prep(req, sqe);
Jens Axboec1ca7572019-12-25 22:18:28 -07005974 case IORING_OP_MADVISE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005975 return io_madvise_prep(req, sqe);
Jens Axboecebdb982020-01-08 17:59:24 -07005976 case IORING_OP_OPENAT2:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005977 return io_openat2_prep(req, sqe);
Jens Axboe3e4827b2020-01-08 15:18:09 -07005978 case IORING_OP_EPOLL_CTL:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005979 return io_epoll_ctl_prep(req, sqe);
Pavel Begunkov7d67af22020-02-24 11:32:45 +03005980 case IORING_OP_SPLICE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005981 return io_splice_prep(req, sqe);
Jens Axboeddf0322d2020-02-23 16:41:33 -07005982 case IORING_OP_PROVIDE_BUFFERS:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005983 return io_provide_buffers_prep(req, sqe);
Jens Axboe067524e2020-03-02 16:32:28 -07005984 case IORING_OP_REMOVE_BUFFERS:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005985 return io_remove_buffers_prep(req, sqe);
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03005986 case IORING_OP_TEE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005987 return io_tee_prep(req, sqe);
Jens Axboe36f4fa62020-09-05 11:14:22 -06005988 case IORING_OP_SHUTDOWN:
5989 return io_shutdown_prep(req, sqe);
Jens Axboe80a261f2020-09-28 14:23:58 -06005990 case IORING_OP_RENAMEAT:
5991 return io_renameat_prep(req, sqe);
Jens Axboe14a11432020-09-28 14:27:37 -06005992 case IORING_OP_UNLINKAT:
5993 return io_unlinkat_prep(req, sqe);
Jens Axboef67676d2019-12-02 11:03:47 -07005994 }
5995
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005996 printk_once(KERN_WARNING "io_uring: unhandled opcode %d\n",
5997 req->opcode);
5998 return-EINVAL;
5999}
6000
Jens Axboedef596e2019-01-09 08:59:42 -07006001static int io_req_defer_prep(struct io_kiocb *req,
6002 const struct io_uring_sqe *sqe)
Jens Axboedef596e2019-01-09 08:59:42 -07006003{
Jens Axboedef596e2019-01-09 08:59:42 -07006004 if (!sqe)
Jens Axboe2b188cc2019-01-07 10:46:33 -07006005 return 0;
Jens Axboee8c2bc12020-08-15 18:44:09 -07006006 if (io_alloc_async_data(req))
Jens Axboeb76da702019-11-20 13:05:32 -07006007 return -EAGAIN;
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006008 return io_req_prep(req, sqe);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006009}
6010
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03006011static u32 io_get_sequence(struct io_kiocb *req)
6012{
6013 struct io_kiocb *pos;
6014 struct io_ring_ctx *ctx = req->ctx;
6015 u32 total_submitted, nr_reqs = 1;
6016
6017 if (req->flags & REQ_F_LINK_HEAD)
6018 list_for_each_entry(pos, &req->link_list, link_list)
6019 nr_reqs++;
6020
6021 total_submitted = ctx->cached_sq_head - ctx->cached_sq_dropped;
6022 return total_submitted - nr_reqs;
6023}
6024
Jens Axboe3529d8c2019-12-19 18:24:38 -07006025static int io_req_defer(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboe2b188cc2019-01-07 10:46:33 -07006026{
6027 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkov27dc8332020-07-13 23:37:14 +03006028 struct io_defer_entry *de;
Jens Axboe2b188cc2019-01-07 10:46:33 -07006029 int ret;
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03006030 u32 seq;
Jens Axboe2b188cc2019-01-07 10:46:33 -07006031
6032 /* Still need defer if there is pending req in defer list. */
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03006033 if (likely(list_empty_careful(&ctx->defer_list) &&
6034 !(req->flags & REQ_F_IO_DRAIN)))
6035 return 0;
6036
6037 seq = io_get_sequence(req);
6038 /* Still a chance to pass the sequence check */
6039 if (!req_need_defer(req, seq) && list_empty_careful(&ctx->defer_list))
Jens Axboe2b188cc2019-01-07 10:46:33 -07006040 return 0;
6041
Jens Axboee8c2bc12020-08-15 18:44:09 -07006042 if (!req->async_data) {
Pavel Begunkov650b5482020-05-17 14:02:11 +03006043 ret = io_req_defer_prep(req, sqe);
Pavel Begunkov327d6d92020-07-15 12:46:51 +03006044 if (ret)
Pavel Begunkov650b5482020-05-17 14:02:11 +03006045 return ret;
6046 }
Pavel Begunkovcbdcb432020-06-29 19:18:43 +03006047 io_prep_async_link(req);
Pavel Begunkov27dc8332020-07-13 23:37:14 +03006048 de = kmalloc(sizeof(*de), GFP_KERNEL);
6049 if (!de)
6050 return -ENOMEM;
Jens Axboe31b51512019-01-18 22:56:34 -07006051
6052 spin_lock_irq(&ctx->completion_lock);
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03006053 if (!req_need_defer(req, seq) && list_empty(&ctx->defer_list)) {
Jens Axboe31b51512019-01-18 22:56:34 -07006054 spin_unlock_irq(&ctx->completion_lock);
Pavel Begunkov27dc8332020-07-13 23:37:14 +03006055 kfree(de);
Pavel Begunkovae348172020-07-23 20:25:20 +03006056 io_queue_async_work(req);
6057 return -EIOCBQUEUED;
Jens Axboe31b51512019-01-18 22:56:34 -07006058 }
6059
6060 trace_io_uring_defer(ctx, req, req->user_data);
Pavel Begunkov27dc8332020-07-13 23:37:14 +03006061 de->req = req;
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03006062 de->seq = seq;
Pavel Begunkov27dc8332020-07-13 23:37:14 +03006063 list_add_tail(&de->list, &ctx->defer_list);
Jens Axboe31b51512019-01-18 22:56:34 -07006064 spin_unlock_irq(&ctx->completion_lock);
6065 return -EIOCBQUEUED;
6066}
Jens Axboeedafcce2019-01-09 09:16:05 -07006067
Jens Axboef573d382020-09-22 10:19:24 -06006068static void io_req_drop_files(struct io_kiocb *req)
6069{
6070 struct io_ring_ctx *ctx = req->ctx;
6071 unsigned long flags;
6072
6073 spin_lock_irqsave(&ctx->inflight_lock, flags);
6074 list_del(&req->inflight_entry);
6075 if (waitqueue_active(&ctx->inflight_wait))
6076 wake_up(&ctx->inflight_wait);
6077 spin_unlock_irqrestore(&ctx->inflight_lock, flags);
6078 req->flags &= ~REQ_F_INFLIGHT;
Jens Axboe98447d62020-10-14 10:48:51 -06006079 put_files_struct(req->work.identity->files);
6080 put_nsproxy(req->work.identity->nsproxy);
Jens Axboedfead8a2020-10-14 10:12:37 -06006081 req->work.flags &= ~IO_WQ_WORK_FILES;
Jens Axboef573d382020-09-22 10:19:24 -06006082}
6083
Pavel Begunkov3ca405e2020-07-13 23:37:08 +03006084static void __io_clean_op(struct io_kiocb *req)
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03006085{
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03006086 if (req->flags & REQ_F_BUFFER_SELECTED) {
6087 switch (req->opcode) {
6088 case IORING_OP_READV:
6089 case IORING_OP_READ_FIXED:
6090 case IORING_OP_READ:
Jens Axboebcda7ba2020-02-23 16:42:51 -07006091 kfree((void *)(unsigned long)req->rw.addr);
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03006092 break;
6093 case IORING_OP_RECVMSG:
6094 case IORING_OP_RECV:
Jens Axboe52de1fe2020-02-27 10:15:42 -07006095 kfree(req->sr_msg.kbuf);
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03006096 break;
6097 }
6098 req->flags &= ~REQ_F_BUFFER_SELECTED;
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03006099 }
6100
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03006101 if (req->flags & REQ_F_NEED_CLEANUP) {
6102 switch (req->opcode) {
6103 case IORING_OP_READV:
6104 case IORING_OP_READ_FIXED:
6105 case IORING_OP_READ:
6106 case IORING_OP_WRITEV:
6107 case IORING_OP_WRITE_FIXED:
Jens Axboee8c2bc12020-08-15 18:44:09 -07006108 case IORING_OP_WRITE: {
6109 struct io_async_rw *io = req->async_data;
6110 if (io->free_iovec)
6111 kfree(io->free_iovec);
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03006112 break;
Jens Axboee8c2bc12020-08-15 18:44:09 -07006113 }
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03006114 case IORING_OP_RECVMSG:
Jens Axboee8c2bc12020-08-15 18:44:09 -07006115 case IORING_OP_SENDMSG: {
6116 struct io_async_msghdr *io = req->async_data;
6117 if (io->iov != io->fast_iov)
6118 kfree(io->iov);
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03006119 break;
Jens Axboee8c2bc12020-08-15 18:44:09 -07006120 }
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03006121 case IORING_OP_SPLICE:
6122 case IORING_OP_TEE:
6123 io_put_file(req, req->splice.file_in,
6124 (req->splice.flags & SPLICE_F_FD_IN_FIXED));
6125 break;
Jens Axboef3cd48502020-09-24 14:55:54 -06006126 case IORING_OP_OPENAT:
6127 case IORING_OP_OPENAT2:
6128 if (req->open.filename)
6129 putname(req->open.filename);
6130 break;
Jens Axboe80a261f2020-09-28 14:23:58 -06006131 case IORING_OP_RENAMEAT:
6132 putname(req->rename.oldpath);
6133 putname(req->rename.newpath);
6134 break;
Jens Axboe14a11432020-09-28 14:27:37 -06006135 case IORING_OP_UNLINKAT:
6136 putname(req->unlink.filename);
6137 break;
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03006138 }
6139 req->flags &= ~REQ_F_NEED_CLEANUP;
6140 }
Pavel Begunkovbb175342020-08-20 11:33:35 +03006141
Jens Axboef573d382020-09-22 10:19:24 -06006142 if (req->flags & REQ_F_INFLIGHT)
6143 io_req_drop_files(req);
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03006144}
6145
Pavel Begunkovc1379e22020-09-30 22:57:56 +03006146static int io_issue_sqe(struct io_kiocb *req, bool force_nonblock,
6147 struct io_comp_state *cs)
Jens Axboeedafcce2019-01-09 09:16:05 -07006148{
Jens Axboeedafcce2019-01-09 09:16:05 -07006149 struct io_ring_ctx *ctx = req->ctx;
Jens Axboed625c6e2019-12-17 19:53:05 -07006150 int ret;
Jens Axboeedafcce2019-01-09 09:16:05 -07006151
Jens Axboed625c6e2019-12-17 19:53:05 -07006152 switch (req->opcode) {
Jens Axboe2b188cc2019-01-07 10:46:33 -07006153 case IORING_OP_NOP:
Jens Axboe229a7b62020-06-22 10:13:11 -06006154 ret = io_nop(req, cs);
Jens Axboe31b51512019-01-18 22:56:34 -07006155 break;
6156 case IORING_OP_READV:
Jens Axboe3529d8c2019-12-19 18:24:38 -07006157 case IORING_OP_READ_FIXED:
Jens Axboe3a6820f2019-12-22 15:19:35 -07006158 case IORING_OP_READ:
Jens Axboea1d7c392020-06-22 11:09:46 -06006159 ret = io_read(req, force_nonblock, cs);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006160 break;
6161 case IORING_OP_WRITEV:
Jens Axboe2b188cc2019-01-07 10:46:33 -07006162 case IORING_OP_WRITE_FIXED:
Jens Axboe3a6820f2019-12-22 15:19:35 -07006163 case IORING_OP_WRITE:
Jens Axboea1d7c392020-06-22 11:09:46 -06006164 ret = io_write(req, force_nonblock, cs);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006165 break;
6166 case IORING_OP_FSYNC:
Pavel Begunkov014db002020-03-03 21:33:12 +03006167 ret = io_fsync(req, force_nonblock);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006168 break;
6169 case IORING_OP_POLL_ADD:
Pavel Begunkov014db002020-03-03 21:33:12 +03006170 ret = io_poll_add(req);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006171 break;
6172 case IORING_OP_POLL_REMOVE:
Jens Axboeb76da702019-11-20 13:05:32 -07006173 ret = io_poll_remove(req);
6174 break;
6175 case IORING_OP_SYNC_FILE_RANGE:
Pavel Begunkov014db002020-03-03 21:33:12 +03006176 ret = io_sync_file_range(req, force_nonblock);
Jens Axboeb76da702019-11-20 13:05:32 -07006177 break;
6178 case IORING_OP_SENDMSG:
Pavel Begunkov062d04d2020-10-10 18:34:12 +01006179 ret = io_sendmsg(req, force_nonblock, cs);
6180 break;
Jens Axboefddafac2020-01-04 20:19:44 -07006181 case IORING_OP_SEND:
Pavel Begunkov062d04d2020-10-10 18:34:12 +01006182 ret = io_send(req, force_nonblock, cs);
Jens Axboeb76da702019-11-20 13:05:32 -07006183 break;
6184 case IORING_OP_RECVMSG:
Pavel Begunkov062d04d2020-10-10 18:34:12 +01006185 ret = io_recvmsg(req, force_nonblock, cs);
6186 break;
Jens Axboefddafac2020-01-04 20:19:44 -07006187 case IORING_OP_RECV:
Pavel Begunkov062d04d2020-10-10 18:34:12 +01006188 ret = io_recv(req, force_nonblock, cs);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006189 break;
6190 case IORING_OP_TIMEOUT:
6191 ret = io_timeout(req);
6192 break;
6193 case IORING_OP_TIMEOUT_REMOVE:
6194 ret = io_timeout_remove(req);
6195 break;
6196 case IORING_OP_ACCEPT:
Jens Axboe229a7b62020-06-22 10:13:11 -06006197 ret = io_accept(req, force_nonblock, cs);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006198 break;
6199 case IORING_OP_CONNECT:
Jens Axboe229a7b62020-06-22 10:13:11 -06006200 ret = io_connect(req, force_nonblock, cs);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006201 break;
6202 case IORING_OP_ASYNC_CANCEL:
Pavel Begunkov014db002020-03-03 21:33:12 +03006203 ret = io_async_cancel(req);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006204 break;
Jens Axboed63d1b52019-12-10 10:38:56 -07006205 case IORING_OP_FALLOCATE:
Pavel Begunkov014db002020-03-03 21:33:12 +03006206 ret = io_fallocate(req, force_nonblock);
Jens Axboed63d1b52019-12-10 10:38:56 -07006207 break;
Jens Axboe15b71ab2019-12-11 11:20:36 -07006208 case IORING_OP_OPENAT:
Pavel Begunkov014db002020-03-03 21:33:12 +03006209 ret = io_openat(req, force_nonblock);
Jens Axboe15b71ab2019-12-11 11:20:36 -07006210 break;
Jens Axboeb5dba592019-12-11 14:02:38 -07006211 case IORING_OP_CLOSE:
Jens Axboe229a7b62020-06-22 10:13:11 -06006212 ret = io_close(req, force_nonblock, cs);
Jens Axboeb5dba592019-12-11 14:02:38 -07006213 break;
Jens Axboe05f3fb32019-12-09 11:22:50 -07006214 case IORING_OP_FILES_UPDATE:
Jens Axboe229a7b62020-06-22 10:13:11 -06006215 ret = io_files_update(req, force_nonblock, cs);
Jens Axboe05f3fb32019-12-09 11:22:50 -07006216 break;
Jens Axboeeddc7ef2019-12-13 21:18:10 -07006217 case IORING_OP_STATX:
Pavel Begunkov014db002020-03-03 21:33:12 +03006218 ret = io_statx(req, force_nonblock);
Jens Axboeeddc7ef2019-12-13 21:18:10 -07006219 break;
Jens Axboe4840e412019-12-25 22:03:45 -07006220 case IORING_OP_FADVISE:
Pavel Begunkov014db002020-03-03 21:33:12 +03006221 ret = io_fadvise(req, force_nonblock);
Jens Axboe4840e412019-12-25 22:03:45 -07006222 break;
Jens Axboec1ca7572019-12-25 22:18:28 -07006223 case IORING_OP_MADVISE:
Pavel Begunkov014db002020-03-03 21:33:12 +03006224 ret = io_madvise(req, force_nonblock);
Jens Axboec1ca7572019-12-25 22:18:28 -07006225 break;
Jens Axboecebdb982020-01-08 17:59:24 -07006226 case IORING_OP_OPENAT2:
Pavel Begunkov014db002020-03-03 21:33:12 +03006227 ret = io_openat2(req, force_nonblock);
Jens Axboecebdb982020-01-08 17:59:24 -07006228 break;
Jens Axboe3e4827b2020-01-08 15:18:09 -07006229 case IORING_OP_EPOLL_CTL:
Jens Axboe229a7b62020-06-22 10:13:11 -06006230 ret = io_epoll_ctl(req, force_nonblock, cs);
Jens Axboe3e4827b2020-01-08 15:18:09 -07006231 break;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03006232 case IORING_OP_SPLICE:
Pavel Begunkov014db002020-03-03 21:33:12 +03006233 ret = io_splice(req, force_nonblock);
Pavel Begunkov7d67af22020-02-24 11:32:45 +03006234 break;
Jens Axboeddf0322d2020-02-23 16:41:33 -07006235 case IORING_OP_PROVIDE_BUFFERS:
Jens Axboe229a7b62020-06-22 10:13:11 -06006236 ret = io_provide_buffers(req, force_nonblock, cs);
Jens Axboeddf0322d2020-02-23 16:41:33 -07006237 break;
Jens Axboe067524e2020-03-02 16:32:28 -07006238 case IORING_OP_REMOVE_BUFFERS:
Jens Axboe229a7b62020-06-22 10:13:11 -06006239 ret = io_remove_buffers(req, force_nonblock, cs);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006240 break;
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03006241 case IORING_OP_TEE:
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03006242 ret = io_tee(req, force_nonblock);
6243 break;
Jens Axboe36f4fa62020-09-05 11:14:22 -06006244 case IORING_OP_SHUTDOWN:
6245 ret = io_shutdown(req, force_nonblock);
6246 break;
Jens Axboe80a261f2020-09-28 14:23:58 -06006247 case IORING_OP_RENAMEAT:
6248 ret = io_renameat(req, force_nonblock);
6249 break;
Jens Axboe14a11432020-09-28 14:27:37 -06006250 case IORING_OP_UNLINKAT:
6251 ret = io_unlinkat(req, force_nonblock);
6252 break;
Jens Axboe2b188cc2019-01-07 10:46:33 -07006253 default:
6254 ret = -EINVAL;
6255 break;
Jens Axboe31b51512019-01-18 22:56:34 -07006256 }
6257
6258 if (ret)
Jens Axboeedafcce2019-01-09 09:16:05 -07006259 return ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07006260
Jens Axboeb5325762020-05-19 21:20:27 -06006261 /* If the op doesn't have a file, we're not polling for it */
6262 if ((ctx->flags & IORING_SETUP_IOPOLL) && req->file) {
Jens Axboe11ba8202020-01-15 21:51:17 -07006263 const bool in_async = io_wq_current_is_worker();
6264
Jens Axboe11ba8202020-01-15 21:51:17 -07006265 /* workqueue context doesn't hold uring_lock, grab it now */
6266 if (in_async)
6267 mutex_lock(&ctx->uring_lock);
6268
Jens Axboe2b188cc2019-01-07 10:46:33 -07006269 io_iopoll_req_issued(req);
Jens Axboe11ba8202020-01-15 21:51:17 -07006270
6271 if (in_async)
6272 mutex_unlock(&ctx->uring_lock);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006273 }
6274
6275 return 0;
6276}
6277
Pavel Begunkovf4db7182020-06-25 18:20:54 +03006278static struct io_wq_work *io_wq_submit_work(struct io_wq_work *work)
Pavel Begunkovd4c81f32020-06-08 21:08:19 +03006279{
Jens Axboe2b188cc2019-01-07 10:46:33 -07006280 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
Pavel Begunkov6df1db62020-07-03 22:15:06 +03006281 struct io_kiocb *timeout;
Jens Axboe561fb042019-10-24 07:25:42 -06006282 int ret = 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07006283
Pavel Begunkov6df1db62020-07-03 22:15:06 +03006284 timeout = io_prep_linked_timeout(req);
6285 if (timeout)
6286 io_queue_linked_timeout(timeout);
Pavel Begunkovd4c81f32020-06-08 21:08:19 +03006287
Jens Axboe0c9d5cc2019-12-11 19:29:43 -07006288 /* if NO_CANCEL is set, we must still run the work */
6289 if ((work->flags & (IO_WQ_WORK_CANCEL|IO_WQ_WORK_NO_CANCEL)) ==
6290 IO_WQ_WORK_CANCEL) {
Jens Axboe561fb042019-10-24 07:25:42 -06006291 ret = -ECANCELED;
Jens Axboe0c9d5cc2019-12-11 19:29:43 -07006292 }
Jens Axboe31b51512019-01-18 22:56:34 -07006293
Jens Axboe561fb042019-10-24 07:25:42 -06006294 if (!ret) {
Jens Axboe561fb042019-10-24 07:25:42 -06006295 do {
Pavel Begunkovc1379e22020-09-30 22:57:56 +03006296 ret = io_issue_sqe(req, false, NULL);
Jens Axboe561fb042019-10-24 07:25:42 -06006297 /*
6298 * We can get EAGAIN for polled IO even though we're
6299 * forcing a sync submission from here, since we can't
6300 * wait for request slots on the block side.
6301 */
6302 if (ret != -EAGAIN)
6303 break;
6304 cond_resched();
6305 } while (1);
6306 }
Jens Axboe31b51512019-01-18 22:56:34 -07006307
Jens Axboe561fb042019-10-24 07:25:42 -06006308 if (ret) {
Jens Axboe4e88d6e2019-12-07 20:59:47 -07006309 req_set_fail_links(req);
Jens Axboee1e16092020-06-22 09:17:17 -06006310 io_req_complete(req, ret);
Jens Axboeedafcce2019-01-09 09:16:05 -07006311 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07006312
Pavel Begunkovf4db7182020-06-25 18:20:54 +03006313 return io_steal_work(req);
Jens Axboe31b51512019-01-18 22:56:34 -07006314}
Jens Axboe2b188cc2019-01-07 10:46:33 -07006315
Jens Axboe65e19f52019-10-26 07:20:21 -06006316static inline struct file *io_file_from_index(struct io_ring_ctx *ctx,
6317 int index)
Jens Axboe09bb8392019-03-13 12:39:28 -06006318{
Jens Axboe65e19f52019-10-26 07:20:21 -06006319 struct fixed_file_table *table;
6320
Jens Axboe05f3fb32019-12-09 11:22:50 -07006321 table = &ctx->file_data->table[index >> IORING_FILE_TABLE_SHIFT];
Xiaoming Ni84695082020-05-11 19:25:43 +08006322 return table->files[index & IORING_FILE_TABLE_MASK];
Jens Axboe65e19f52019-10-26 07:20:21 -06006323}
6324
Pavel Begunkov8371adf2020-10-10 18:34:08 +01006325static struct file *io_file_get(struct io_submit_state *state,
6326 struct io_kiocb *req, int fd, bool fixed)
Pavel Begunkov8da11c12020-02-24 11:32:44 +03006327{
6328 struct io_ring_ctx *ctx = req->ctx;
6329 struct file *file;
6330
6331 if (fixed) {
Pavel Begunkov479f5172020-10-10 18:34:07 +01006332 if (unlikely((unsigned int)fd >= ctx->nr_user_files))
Pavel Begunkov8371adf2020-10-10 18:34:08 +01006333 return NULL;
Pavel Begunkov8da11c12020-02-24 11:32:44 +03006334 fd = array_index_nospec(fd, ctx->nr_user_files);
6335 file = io_file_from_index(ctx, fd);
Jens Axboefd2206e2020-06-02 16:40:47 -06006336 if (file) {
Pavel Begunkovb2e96852020-10-10 18:34:16 +01006337 req->fixed_file_refs = &ctx->file_data->node->refs;
Jens Axboefd2206e2020-06-02 16:40:47 -06006338 percpu_ref_get(req->fixed_file_refs);
6339 }
Pavel Begunkov8da11c12020-02-24 11:32:44 +03006340 } else {
6341 trace_io_uring_file_get(ctx, fd);
6342 file = __io_file_get(state, fd);
Pavel Begunkov8da11c12020-02-24 11:32:44 +03006343 }
6344
Pavel Begunkov8371adf2020-10-10 18:34:08 +01006345 return file;
Pavel Begunkov8da11c12020-02-24 11:32:44 +03006346}
6347
Jens Axboe3529d8c2019-12-19 18:24:38 -07006348static int io_req_set_file(struct io_submit_state *state, struct io_kiocb *req,
Jens Axboe63ff8222020-05-07 14:56:15 -06006349 int fd)
Jens Axboe09bb8392019-03-13 12:39:28 -06006350{
Jens Axboe28cea78a2020-09-14 10:51:17 -06006351 req->file = io_file_get(state, req, fd, req->flags & REQ_F_FIXED_FILE);
Pavel Begunkov8371adf2020-10-10 18:34:08 +01006352 if (req->file || io_op_defs[req->opcode].needs_file_no_error)
Jens Axboef86cd202020-01-29 13:46:44 -07006353 return 0;
Pavel Begunkov8371adf2020-10-10 18:34:08 +01006354 return -EBADF;
Pavel Begunkovf56040b2020-07-23 20:25:21 +03006355}
6356
Jens Axboe2665abf2019-11-05 12:40:47 -07006357static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
6358{
Jens Axboead8a48a2019-11-15 08:49:11 -07006359 struct io_timeout_data *data = container_of(timer,
6360 struct io_timeout_data, timer);
6361 struct io_kiocb *req = data->req;
Jens Axboe2665abf2019-11-05 12:40:47 -07006362 struct io_ring_ctx *ctx = req->ctx;
6363 struct io_kiocb *prev = NULL;
6364 unsigned long flags;
Jens Axboe2665abf2019-11-05 12:40:47 -07006365
6366 spin_lock_irqsave(&ctx->completion_lock, flags);
6367
6368 /*
6369 * We don't expect the list to be empty, that will only happen if we
6370 * race with the completion of the linked work.
6371 */
Pavel Begunkov44932332019-12-05 16:16:35 +03006372 if (!list_empty(&req->link_list)) {
6373 prev = list_entry(req->link_list.prev, struct io_kiocb,
6374 link_list);
Pavel Begunkov900fad42020-10-19 16:39:16 +01006375 if (refcount_inc_not_zero(&prev->refs))
Pavel Begunkov44932332019-12-05 16:16:35 +03006376 list_del_init(&req->link_list);
Pavel Begunkov900fad42020-10-19 16:39:16 +01006377 else
Jens Axboe76a46e02019-11-10 23:34:16 -07006378 prev = NULL;
Jens Axboe2665abf2019-11-05 12:40:47 -07006379 }
6380
6381 spin_unlock_irqrestore(&ctx->completion_lock, flags);
6382
6383 if (prev) {
Jens Axboe4e88d6e2019-12-07 20:59:47 -07006384 req_set_fail_links(prev);
Pavel Begunkov014db002020-03-03 21:33:12 +03006385 io_async_find_and_cancel(ctx, req, prev->user_data, -ETIME);
Jens Axboe76a46e02019-11-10 23:34:16 -07006386 io_put_req(prev);
Jens Axboe47f46762019-11-09 17:43:02 -07006387 } else {
Jens Axboee1e16092020-06-22 09:17:17 -06006388 io_req_complete(req, -ETIME);
Jens Axboe2665abf2019-11-05 12:40:47 -07006389 }
Jens Axboe2665abf2019-11-05 12:40:47 -07006390 return HRTIMER_NORESTART;
6391}
6392
Jens Axboe7271ef32020-08-10 09:55:22 -06006393static void __io_queue_linked_timeout(struct io_kiocb *req)
Jens Axboe2665abf2019-11-05 12:40:47 -07006394{
Jens Axboe76a46e02019-11-10 23:34:16 -07006395 /*
6396 * If the list is now empty, then our linked request finished before
6397 * we got a chance to setup the timer
6398 */
Pavel Begunkov44932332019-12-05 16:16:35 +03006399 if (!list_empty(&req->link_list)) {
Jens Axboee8c2bc12020-08-15 18:44:09 -07006400 struct io_timeout_data *data = req->async_data;
Jens Axboe94ae5e72019-11-14 19:39:52 -07006401
Jens Axboead8a48a2019-11-15 08:49:11 -07006402 data->timer.function = io_link_timeout_fn;
6403 hrtimer_start(&data->timer, timespec64_to_ktime(data->ts),
6404 data->mode);
Jens Axboe2665abf2019-11-05 12:40:47 -07006405 }
Jens Axboe7271ef32020-08-10 09:55:22 -06006406}
6407
6408static void io_queue_linked_timeout(struct io_kiocb *req)
6409{
6410 struct io_ring_ctx *ctx = req->ctx;
6411
6412 spin_lock_irq(&ctx->completion_lock);
6413 __io_queue_linked_timeout(req);
Jens Axboe76a46e02019-11-10 23:34:16 -07006414 spin_unlock_irq(&ctx->completion_lock);
Jens Axboe2665abf2019-11-05 12:40:47 -07006415
Jens Axboe2665abf2019-11-05 12:40:47 -07006416 /* drop submission reference */
Jens Axboe76a46e02019-11-10 23:34:16 -07006417 io_put_req(req);
Jens Axboe2665abf2019-11-05 12:40:47 -07006418}
6419
Jens Axboead8a48a2019-11-15 08:49:11 -07006420static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req)
Jens Axboe2665abf2019-11-05 12:40:47 -07006421{
6422 struct io_kiocb *nxt;
Jens Axboe2b188cc2019-01-07 10:46:33 -07006423
Pavel Begunkovdea3b492020-04-12 02:05:04 +03006424 if (!(req->flags & REQ_F_LINK_HEAD))
Jens Axboe2665abf2019-11-05 12:40:47 -07006425 return NULL;
Pavel Begunkov6df1db62020-07-03 22:15:06 +03006426 if (req->flags & REQ_F_LINK_TIMEOUT)
Jens Axboed7718a92020-02-14 22:23:12 -07006427 return NULL;
Jens Axboe2665abf2019-11-05 12:40:47 -07006428
Pavel Begunkov44932332019-12-05 16:16:35 +03006429 nxt = list_first_entry_or_null(&req->link_list, struct io_kiocb,
6430 link_list);
Jens Axboed625c6e2019-12-17 19:53:05 -07006431 if (!nxt || nxt->opcode != IORING_OP_LINK_TIMEOUT)
Jens Axboe76a46e02019-11-10 23:34:16 -07006432 return NULL;
Jens Axboe2665abf2019-11-05 12:40:47 -07006433
Pavel Begunkov900fad42020-10-19 16:39:16 +01006434 nxt->flags |= REQ_F_LTIMEOUT_ACTIVE;
Jens Axboe76a46e02019-11-10 23:34:16 -07006435 req->flags |= REQ_F_LINK_TIMEOUT;
Jens Axboe76a46e02019-11-10 23:34:16 -07006436 return nxt;
Jens Axboe2665abf2019-11-05 12:40:47 -07006437}
6438
Pavel Begunkovc1379e22020-09-30 22:57:56 +03006439static void __io_queue_sqe(struct io_kiocb *req, struct io_comp_state *cs)
Jens Axboe2b188cc2019-01-07 10:46:33 -07006440{
Jens Axboe4a0a7a12019-12-09 20:01:01 -07006441 struct io_kiocb *linked_timeout;
Jens Axboe193155c2020-02-22 23:22:19 -07006442 const struct cred *old_creds = NULL;
Jens Axboe2b188cc2019-01-07 10:46:33 -07006443 int ret;
6444
Jens Axboe4a0a7a12019-12-09 20:01:01 -07006445again:
6446 linked_timeout = io_prep_linked_timeout(req);
6447
Pavel Begunkov2e5aa6c2020-10-18 10:17:37 +01006448 if ((req->flags & REQ_F_WORK_INITIALIZED) &&
6449 (req->work.flags & IO_WQ_WORK_CREDS) &&
Jens Axboe98447d62020-10-14 10:48:51 -06006450 req->work.identity->creds != current_cred()) {
Jens Axboe193155c2020-02-22 23:22:19 -07006451 if (old_creds)
6452 revert_creds(old_creds);
Jens Axboe98447d62020-10-14 10:48:51 -06006453 if (old_creds == req->work.identity->creds)
Jens Axboe193155c2020-02-22 23:22:19 -07006454 old_creds = NULL; /* restored original creds */
6455 else
Jens Axboe98447d62020-10-14 10:48:51 -06006456 old_creds = override_creds(req->work.identity->creds);
Jens Axboe193155c2020-02-22 23:22:19 -07006457 }
6458
Pavel Begunkovc1379e22020-09-30 22:57:56 +03006459 ret = io_issue_sqe(req, true, cs);
Jens Axboe491381ce2019-10-17 09:20:46 -06006460
6461 /*
6462 * We async punt it if the file wasn't marked NOWAIT, or if the file
6463 * doesn't support non-blocking read/write attempts
6464 */
Pavel Begunkov24c74672020-06-21 13:09:51 +03006465 if (ret == -EAGAIN && !(req->flags & REQ_F_NOWAIT)) {
Pavel Begunkovf063c542020-07-25 14:41:59 +03006466 if (!io_arm_poll_handler(req)) {
Pavel Begunkovf063c542020-07-25 14:41:59 +03006467 /*
6468 * Queued up for async execution, worker will release
6469 * submit reference when the iocb is actually submitted.
6470 */
6471 io_queue_async_work(req);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006472 }
Pavel Begunkovbbad27b2019-11-19 23:32:47 +03006473
Pavel Begunkovf063c542020-07-25 14:41:59 +03006474 if (linked_timeout)
6475 io_queue_linked_timeout(linked_timeout);
Pavel Begunkov0d63c142020-10-22 16:47:18 +01006476 } else if (likely(!ret)) {
6477 /* drop submission reference */
6478 req = io_put_req_find_next(req);
6479 if (linked_timeout)
6480 io_queue_linked_timeout(linked_timeout);
Jens Axboee65ef562019-03-12 10:16:44 -06006481
Pavel Begunkov0d63c142020-10-22 16:47:18 +01006482 if (req) {
6483 if (!(req->flags & REQ_F_FORCE_ASYNC))
6484 goto again;
6485 io_queue_async_work(req);
6486 }
6487 } else {
Pavel Begunkov652532a2020-07-03 22:15:07 +03006488 /* un-prep timeout, so it'll be killed as any other linked */
6489 req->flags &= ~REQ_F_LINK_TIMEOUT;
Jens Axboe4e88d6e2019-12-07 20:59:47 -07006490 req_set_fail_links(req);
Jens Axboee65ef562019-03-12 10:16:44 -06006491 io_put_req(req);
Pavel Begunkov652532a2020-07-03 22:15:07 +03006492 io_req_complete(req, ret);
Jens Axboe9e645e112019-05-10 16:07:28 -06006493 }
Pavel Begunkov652532a2020-07-03 22:15:07 +03006494
Jens Axboe193155c2020-02-22 23:22:19 -07006495 if (old_creds)
6496 revert_creds(old_creds);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006497}
6498
Jens Axboef13fad72020-06-22 09:34:30 -06006499static void io_queue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
6500 struct io_comp_state *cs)
Jackie Liu4fe2c962019-09-09 20:50:40 +08006501{
6502 int ret;
6503
Jens Axboe3529d8c2019-12-19 18:24:38 -07006504 ret = io_req_defer(req, sqe);
Jackie Liu4fe2c962019-09-09 20:50:40 +08006505 if (ret) {
6506 if (ret != -EIOCBQUEUED) {
Pavel Begunkov11185912020-01-22 23:09:35 +03006507fail_req:
Jens Axboe4e88d6e2019-12-07 20:59:47 -07006508 req_set_fail_links(req);
Jens Axboee1e16092020-06-22 09:17:17 -06006509 io_put_req(req);
6510 io_req_complete(req, ret);
Jackie Liu4fe2c962019-09-09 20:50:40 +08006511 }
Pavel Begunkov25508782019-12-30 21:24:47 +03006512 } else if (req->flags & REQ_F_FORCE_ASYNC) {
Jens Axboee8c2bc12020-08-15 18:44:09 -07006513 if (!req->async_data) {
Pavel Begunkovbd2ab182020-05-17 14:02:12 +03006514 ret = io_req_defer_prep(req, sqe);
Pavel Begunkov327d6d92020-07-15 12:46:51 +03006515 if (unlikely(ret))
Pavel Begunkovbd2ab182020-05-17 14:02:12 +03006516 goto fail_req;
6517 }
Jens Axboece35a472019-12-17 08:04:44 -07006518 io_queue_async_work(req);
6519 } else {
Pavel Begunkovc1379e22020-09-30 22:57:56 +03006520 if (sqe) {
6521 ret = io_req_prep(req, sqe);
6522 if (unlikely(ret))
6523 goto fail_req;
6524 }
6525 __io_queue_sqe(req, cs);
Jens Axboece35a472019-12-17 08:04:44 -07006526 }
Jackie Liu4fe2c962019-09-09 20:50:40 +08006527}
6528
Jens Axboef13fad72020-06-22 09:34:30 -06006529static inline void io_queue_link_head(struct io_kiocb *req,
6530 struct io_comp_state *cs)
Jackie Liu4fe2c962019-09-09 20:50:40 +08006531{
Jens Axboe94ae5e72019-11-14 19:39:52 -07006532 if (unlikely(req->flags & REQ_F_FAIL_LINK)) {
Jens Axboee1e16092020-06-22 09:17:17 -06006533 io_put_req(req);
6534 io_req_complete(req, -ECANCELED);
Pavel Begunkov1b4a51b2019-11-21 11:54:28 +03006535 } else
Jens Axboef13fad72020-06-22 09:34:30 -06006536 io_queue_sqe(req, NULL, cs);
Jackie Liu4fe2c962019-09-09 20:50:40 +08006537}
6538
Pavel Begunkov863e0562020-10-27 23:25:35 +00006539struct io_submit_link {
6540 struct io_kiocb *head;
6541 struct io_kiocb *last;
6542};
6543
Pavel Begunkov1d4240c2020-04-12 02:05:03 +03006544static int io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
Pavel Begunkov863e0562020-10-27 23:25:35 +00006545 struct io_submit_link *link, struct io_comp_state *cs)
Jens Axboe9e645e112019-05-10 16:07:28 -06006546{
Jackie Liua197f662019-11-08 08:09:12 -07006547 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkovef4ff582020-04-12 02:05:05 +03006548 int ret;
Jens Axboe9e645e112019-05-10 16:07:28 -06006549
Jens Axboe9e645e112019-05-10 16:07:28 -06006550 /*
6551 * If we already have a head request, queue this one for async
6552 * submittal once the head completes. If we don't have a head but
6553 * IOSQE_IO_LINK is set in the sqe, start a new head. This one will be
6554 * submitted sync once the chain is complete. If none of those
6555 * conditions are true (normal request), then just queue it.
6556 */
Pavel Begunkov863e0562020-10-27 23:25:35 +00006557 if (link->head) {
6558 struct io_kiocb *head = link->head;
Jens Axboe9e645e112019-05-10 16:07:28 -06006559
Pavel Begunkov8cdf2192020-01-25 00:40:24 +03006560 /*
6561 * Taking sequential execution of a link, draining both sides
6562 * of the link also fullfils IOSQE_IO_DRAIN semantics for all
6563 * requests in the link. So, it drains the head and the
6564 * next after the link request. The last one is done via
6565 * drain_next flag to persist the effect across calls.
6566 */
Pavel Begunkovef4ff582020-04-12 02:05:05 +03006567 if (req->flags & REQ_F_IO_DRAIN) {
Pavel Begunkov711be032020-01-17 03:57:59 +03006568 head->flags |= REQ_F_IO_DRAIN;
6569 ctx->drain_next = 1;
6570 }
Jens Axboe3529d8c2019-12-19 18:24:38 -07006571 ret = io_req_defer_prep(req, sqe);
Pavel Begunkov327d6d92020-07-15 12:46:51 +03006572 if (unlikely(ret)) {
Jens Axboe4e88d6e2019-12-07 20:59:47 -07006573 /* fail even hard links since we don't submit */
Pavel Begunkov9d763772019-12-17 02:22:07 +03006574 head->flags |= REQ_F_FAIL_LINK;
Pavel Begunkov1d4240c2020-04-12 02:05:03 +03006575 return ret;
Jens Axboe2d283902019-12-04 11:08:05 -07006576 }
Pavel Begunkov9d763772019-12-17 02:22:07 +03006577 trace_io_uring_link(ctx, req, head);
6578 list_add_tail(&req->link_list, &head->link_list);
Pavel Begunkov863e0562020-10-27 23:25:35 +00006579 link->last = req;
Jens Axboe9e645e112019-05-10 16:07:28 -06006580
Pavel Begunkov32fe5252019-12-17 22:26:58 +03006581 /* last request of a link, enqueue the link */
Pavel Begunkovef4ff582020-04-12 02:05:05 +03006582 if (!(req->flags & (REQ_F_LINK | REQ_F_HARDLINK))) {
Jens Axboef13fad72020-06-22 09:34:30 -06006583 io_queue_link_head(head, cs);
Pavel Begunkov863e0562020-10-27 23:25:35 +00006584 link->head = NULL;
Pavel Begunkov32fe5252019-12-17 22:26:58 +03006585 }
Jens Axboe9e645e112019-05-10 16:07:28 -06006586 } else {
Pavel Begunkov711be032020-01-17 03:57:59 +03006587 if (unlikely(ctx->drain_next)) {
6588 req->flags |= REQ_F_IO_DRAIN;
Pavel Begunkovef4ff582020-04-12 02:05:05 +03006589 ctx->drain_next = 0;
Pavel Begunkov711be032020-01-17 03:57:59 +03006590 }
Pavel Begunkovef4ff582020-04-12 02:05:05 +03006591 if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) {
Pavel Begunkovdea3b492020-04-12 02:05:04 +03006592 req->flags |= REQ_F_LINK_HEAD;
Pavel Begunkov711be032020-01-17 03:57:59 +03006593 INIT_LIST_HEAD(&req->link_list);
Pavel Begunkovf1d96a82020-03-13 22:29:14 +03006594
Pavel Begunkov711be032020-01-17 03:57:59 +03006595 ret = io_req_defer_prep(req, sqe);
Pavel Begunkov327d6d92020-07-15 12:46:51 +03006596 if (unlikely(ret))
Pavel Begunkov711be032020-01-17 03:57:59 +03006597 req->flags |= REQ_F_FAIL_LINK;
Pavel Begunkov863e0562020-10-27 23:25:35 +00006598 link->head = req;
6599 link->last = req;
Pavel Begunkov711be032020-01-17 03:57:59 +03006600 } else {
Jens Axboef13fad72020-06-22 09:34:30 -06006601 io_queue_sqe(req, sqe, cs);
Pavel Begunkov711be032020-01-17 03:57:59 +03006602 }
Jens Axboe9e645e112019-05-10 16:07:28 -06006603 }
Pavel Begunkov2e6e1fd2019-12-05 16:15:45 +03006604
Pavel Begunkov1d4240c2020-04-12 02:05:03 +03006605 return 0;
Jens Axboe9e645e112019-05-10 16:07:28 -06006606}
6607
Jens Axboe9a56a232019-01-09 09:06:50 -07006608/*
6609 * Batched submission is done, ensure local IO is flushed out.
6610 */
6611static void io_submit_state_end(struct io_submit_state *state)
6612{
Jens Axboef13fad72020-06-22 09:34:30 -06006613 if (!list_empty(&state->comp.list))
6614 io_submit_flush_completions(&state->comp);
Jens Axboe9a56a232019-01-09 09:06:50 -07006615 blk_finish_plug(&state->plug);
Pavel Begunkov9f13c352020-05-17 14:13:41 +03006616 io_state_file_put(state);
Jens Axboe2579f912019-01-09 09:10:43 -07006617 if (state->free_reqs)
Pavel Begunkov6c8a3132020-02-01 03:58:00 +03006618 kmem_cache_free_bulk(req_cachep, state->free_reqs, state->reqs);
Jens Axboe9a56a232019-01-09 09:06:50 -07006619}
6620
6621/*
6622 * Start submission side cache.
6623 */
6624static void io_submit_state_start(struct io_submit_state *state,
Jens Axboe013538b2020-06-22 09:29:15 -06006625 struct io_ring_ctx *ctx, unsigned int max_ios)
Jens Axboe9a56a232019-01-09 09:06:50 -07006626{
6627 blk_start_plug(&state->plug);
Jens Axboe013538b2020-06-22 09:29:15 -06006628 state->comp.nr = 0;
6629 INIT_LIST_HEAD(&state->comp.list);
6630 state->comp.ctx = ctx;
Jens Axboe2579f912019-01-09 09:10:43 -07006631 state->free_reqs = 0;
Jens Axboe9a56a232019-01-09 09:06:50 -07006632 state->file = NULL;
6633 state->ios_left = max_ios;
6634}
6635
Jens Axboe2b188cc2019-01-07 10:46:33 -07006636static void io_commit_sqring(struct io_ring_ctx *ctx)
6637{
Hristo Venev75b28af2019-08-26 17:23:46 +00006638 struct io_rings *rings = ctx->rings;
Jens Axboe2b188cc2019-01-07 10:46:33 -07006639
Pavel Begunkovcaf582c2019-12-30 21:24:46 +03006640 /*
6641 * Ensure any loads from the SQEs are done at this point,
6642 * since once we write the new head, the application could
6643 * write new data to them.
6644 */
6645 smp_store_release(&rings->sq.head, ctx->cached_sq_head);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006646}
6647
6648/*
Jens Axboe3529d8c2019-12-19 18:24:38 -07006649 * Fetch an sqe, if one is available. Note that sqe_ptr will point to memory
Jens Axboe2b188cc2019-01-07 10:46:33 -07006650 * that is mapped by userspace. This means that care needs to be taken to
6651 * ensure that reads are stable, as we cannot rely on userspace always
6652 * being a good citizen. If members of the sqe are validated and then later
6653 * used, it's important that those reads are done through READ_ONCE() to
6654 * prevent a re-load down the line.
6655 */
Pavel Begunkov709b3022020-04-08 08:58:43 +03006656static const struct io_uring_sqe *io_get_sqe(struct io_ring_ctx *ctx)
Jens Axboe2b188cc2019-01-07 10:46:33 -07006657{
Hristo Venev75b28af2019-08-26 17:23:46 +00006658 u32 *sq_array = ctx->sq_array;
Jens Axboe2b188cc2019-01-07 10:46:33 -07006659 unsigned head;
6660
6661 /*
6662 * The cached sq head (or cq tail) serves two purposes:
6663 *
6664 * 1) allows us to batch the cost of updating the user visible
6665 * head updates.
6666 * 2) allows the kernel side to track the head on its own, even
6667 * though the application is the one updating it.
6668 */
Pavel Begunkovee7d46d2019-12-30 21:24:45 +03006669 head = READ_ONCE(sq_array[ctx->cached_sq_head & ctx->sq_mask]);
Pavel Begunkov709b3022020-04-08 08:58:43 +03006670 if (likely(head < ctx->sq_entries))
6671 return &ctx->sq_sqes[head];
Jens Axboe2b188cc2019-01-07 10:46:33 -07006672
6673 /* drop invalid entries */
Jens Axboe498ccd92019-10-25 10:04:25 -06006674 ctx->cached_sq_dropped++;
Pavel Begunkovee7d46d2019-12-30 21:24:45 +03006675 WRITE_ONCE(ctx->rings->sq_dropped, ctx->cached_sq_dropped);
Pavel Begunkov709b3022020-04-08 08:58:43 +03006676 return NULL;
6677}
6678
6679static inline void io_consume_sqe(struct io_ring_ctx *ctx)
6680{
6681 ctx->cached_sq_head++;
Jens Axboe2b188cc2019-01-07 10:46:33 -07006682}
6683
Stefano Garzarella21b55db2020-08-27 16:58:30 +02006684/*
6685 * Check SQE restrictions (opcode and flags).
6686 *
6687 * Returns 'true' if SQE is allowed, 'false' otherwise.
6688 */
6689static inline bool io_check_restriction(struct io_ring_ctx *ctx,
6690 struct io_kiocb *req,
6691 unsigned int sqe_flags)
6692{
6693 if (!ctx->restricted)
6694 return true;
6695
6696 if (!test_bit(req->opcode, ctx->restrictions.sqe_op))
6697 return false;
6698
6699 if ((sqe_flags & ctx->restrictions.sqe_flags_required) !=
6700 ctx->restrictions.sqe_flags_required)
6701 return false;
6702
6703 if (sqe_flags & ~(ctx->restrictions.sqe_flags_allowed |
6704 ctx->restrictions.sqe_flags_required))
6705 return false;
6706
6707 return true;
6708}
6709
Pavel Begunkovef4ff582020-04-12 02:05:05 +03006710#define SQE_VALID_FLAGS (IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK| \
6711 IOSQE_IO_HARDLINK | IOSQE_ASYNC | \
6712 IOSQE_BUFFER_SELECT)
6713
6714static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
6715 const struct io_uring_sqe *sqe,
Pavel Begunkov0cdaf762020-05-17 14:13:40 +03006716 struct io_submit_state *state)
Pavel Begunkov0553b8b2020-04-08 08:58:45 +03006717{
Pavel Begunkovef4ff582020-04-12 02:05:05 +03006718 unsigned int sqe_flags;
Pavel Begunkov71b547c2020-10-10 18:34:09 +01006719 int id, ret;
Pavel Begunkovef4ff582020-04-12 02:05:05 +03006720
Pavel Begunkov0553b8b2020-04-08 08:58:45 +03006721 req->opcode = READ_ONCE(sqe->opcode);
6722 req->user_data = READ_ONCE(sqe->user_data);
Jens Axboee8c2bc12020-08-15 18:44:09 -07006723 req->async_data = NULL;
Pavel Begunkov0553b8b2020-04-08 08:58:45 +03006724 req->file = NULL;
6725 req->ctx = ctx;
6726 req->flags = 0;
6727 /* one is dropped after submission, the other at completion */
6728 refcount_set(&req->refs, 2);
Pavel Begunkov4dd28242020-06-15 10:33:13 +03006729 req->task = current;
Pavel Begunkov0553b8b2020-04-08 08:58:45 +03006730 req->result = 0;
Pavel Begunkovef4ff582020-04-12 02:05:05 +03006731
6732 if (unlikely(req->opcode >= IORING_OP_LAST))
6733 return -EINVAL;
6734
Jens Axboe28cea78a2020-09-14 10:51:17 -06006735 if (unlikely(io_sq_thread_acquire_mm_files(ctx, req)))
Jens Axboe9d8426a2020-06-16 18:42:49 -06006736 return -EFAULT;
Pavel Begunkovef4ff582020-04-12 02:05:05 +03006737
6738 sqe_flags = READ_ONCE(sqe->flags);
6739 /* enforce forwards compatibility on users */
6740 if (unlikely(sqe_flags & ~SQE_VALID_FLAGS))
6741 return -EINVAL;
6742
Stefano Garzarella21b55db2020-08-27 16:58:30 +02006743 if (unlikely(!io_check_restriction(ctx, req, sqe_flags)))
6744 return -EACCES;
6745
Pavel Begunkovef4ff582020-04-12 02:05:05 +03006746 if ((sqe_flags & IOSQE_BUFFER_SELECT) &&
6747 !io_op_defs[req->opcode].buffer_select)
6748 return -EOPNOTSUPP;
6749
6750 id = READ_ONCE(sqe->personality);
6751 if (id) {
Jens Axboe1e6fa522020-10-15 08:46:24 -06006752 struct io_identity *iod;
6753
Jens Axboe1e6fa522020-10-15 08:46:24 -06006754 iod = idr_find(&ctx->personality_idr, id);
6755 if (unlikely(!iod))
Pavel Begunkovef4ff582020-04-12 02:05:05 +03006756 return -EINVAL;
Jens Axboe1e6fa522020-10-15 08:46:24 -06006757 refcount_inc(&iod->count);
Pavel Begunkovec99ca62020-10-18 10:17:38 +01006758
6759 __io_req_init_async(req);
Jens Axboe1e6fa522020-10-15 08:46:24 -06006760 get_cred(iod->creds);
6761 req->work.identity = iod;
Jens Axboedfead8a2020-10-14 10:12:37 -06006762 req->work.flags |= IO_WQ_WORK_CREDS;
Pavel Begunkovef4ff582020-04-12 02:05:05 +03006763 }
6764
6765 /* same numerical values with corresponding REQ_F_*, safe to copy */
Pavel Begunkovc11368a52020-05-17 14:13:42 +03006766 req->flags |= sqe_flags;
Pavel Begunkovef4ff582020-04-12 02:05:05 +03006767
Jens Axboe63ff8222020-05-07 14:56:15 -06006768 if (!io_op_defs[req->opcode].needs_file)
6769 return 0;
6770
Pavel Begunkov71b547c2020-10-10 18:34:09 +01006771 ret = io_req_set_file(state, req, READ_ONCE(sqe->fd));
6772 state->ios_left--;
6773 return ret;
Pavel Begunkov0553b8b2020-04-08 08:58:45 +03006774}
6775
Jens Axboe0f212202020-09-13 13:09:39 -06006776static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr)
Jens Axboe6c271ce2019-01-10 11:22:30 -07006777{
Jens Axboeac8691c2020-06-01 08:30:41 -06006778 struct io_submit_state state;
Pavel Begunkov863e0562020-10-27 23:25:35 +00006779 struct io_submit_link link;
Jens Axboe9e645e112019-05-10 16:07:28 -06006780 int i, submitted = 0;
Jens Axboe6c271ce2019-01-10 11:22:30 -07006781
Jens Axboec4a2ed72019-11-21 21:01:26 -07006782 /* if we have a backlog and couldn't flush it all, return BUSY */
Jens Axboead3eb2c2019-12-18 17:12:20 -07006783 if (test_bit(0, &ctx->sq_check_overflow)) {
6784 if (!list_empty(&ctx->cq_overflow_list) &&
Jens Axboee6c8aa92020-09-28 13:10:13 -06006785 !io_cqring_overflow_flush(ctx, false, NULL, NULL))
Jens Axboead3eb2c2019-12-18 17:12:20 -07006786 return -EBUSY;
6787 }
Jens Axboe6c271ce2019-01-10 11:22:30 -07006788
Pavel Begunkovee7d46d2019-12-30 21:24:45 +03006789 /* make sure SQ entry isn't read before tail */
6790 nr = min3(nr, ctx->sq_entries, io_sqring_entries(ctx));
Pavel Begunkov9ef4f122019-12-30 21:24:44 +03006791
Pavel Begunkov2b85edf2019-12-28 14:13:03 +03006792 if (!percpu_ref_tryget_many(&ctx->refs, nr))
6793 return -EAGAIN;
Jens Axboe6c271ce2019-01-10 11:22:30 -07006794
Jens Axboed8a6df12020-10-15 16:24:45 -06006795 percpu_counter_add(&current->io_uring->inflight, nr);
Jens Axboefaf7b512020-10-07 12:48:53 -06006796 refcount_add(nr, &current->usage);
Jens Axboe6c271ce2019-01-10 11:22:30 -07006797
Jens Axboe6c271ce2019-01-10 11:22:30 -07006798 io_submit_state_start(&state, ctx, nr);
Pavel Begunkov863e0562020-10-27 23:25:35 +00006799 link.head = NULL;
Pavel Begunkovb14cca02020-01-17 04:45:59 +03006800
Jens Axboe6c271ce2019-01-10 11:22:30 -07006801 for (i = 0; i < nr; i++) {
Jens Axboe3529d8c2019-12-19 18:24:38 -07006802 const struct io_uring_sqe *sqe;
Pavel Begunkov196be952019-11-07 01:41:06 +03006803 struct io_kiocb *req;
Pavel Begunkov1cb1edb2020-02-06 21:16:09 +03006804 int err;
Pavel Begunkovfb5ccc92019-10-25 12:31:30 +03006805
Pavel Begunkovb1e50e52020-04-08 08:58:44 +03006806 sqe = io_get_sqe(ctx);
6807 if (unlikely(!sqe)) {
6808 io_consume_sqe(ctx);
6809 break;
6810 }
Jens Axboeac8691c2020-06-01 08:30:41 -06006811 req = io_alloc_req(ctx, &state);
Pavel Begunkov196be952019-11-07 01:41:06 +03006812 if (unlikely(!req)) {
6813 if (!submitted)
6814 submitted = -EAGAIN;
Pavel Begunkovfb5ccc92019-10-25 12:31:30 +03006815 break;
Jens Axboe9e645e112019-05-10 16:07:28 -06006816 }
Pavel Begunkov709b3022020-04-08 08:58:43 +03006817 io_consume_sqe(ctx);
Jens Axboed3656342019-12-18 09:50:26 -07006818 /* will complete beyond this point, count as submitted */
6819 submitted++;
6820
Pavel Begunkov692d8362020-10-10 18:34:13 +01006821 err = io_init_req(ctx, req, sqe, &state);
Pavel Begunkovef4ff582020-04-12 02:05:05 +03006822 if (unlikely(err)) {
Pavel Begunkov1cb1edb2020-02-06 21:16:09 +03006823fail_req:
Jens Axboee1e16092020-06-22 09:17:17 -06006824 io_put_req(req);
6825 io_req_complete(req, err);
Jens Axboed3656342019-12-18 09:50:26 -07006826 break;
6827 }
6828
Jens Axboe354420f2020-01-08 18:55:15 -07006829 trace_io_uring_submit_sqe(ctx, req->opcode, req->user_data,
Pavel Begunkov0cdaf762020-05-17 14:13:40 +03006830 true, io_async_submit(ctx));
Jens Axboef13fad72020-06-22 09:34:30 -06006831 err = io_submit_sqe(req, sqe, &link, &state.comp);
Pavel Begunkov1d4240c2020-04-12 02:05:03 +03006832 if (err)
6833 goto fail_req;
Jens Axboe6c271ce2019-01-10 11:22:30 -07006834 }
6835
Pavel Begunkov9466f432020-01-25 22:34:01 +03006836 if (unlikely(submitted != nr)) {
6837 int ref_used = (submitted == -EAGAIN) ? 0 : submitted;
Jens Axboed8a6df12020-10-15 16:24:45 -06006838 struct io_uring_task *tctx = current->io_uring;
6839 int unused = nr - ref_used;
Pavel Begunkov9466f432020-01-25 22:34:01 +03006840
Jens Axboed8a6df12020-10-15 16:24:45 -06006841 percpu_ref_put_many(&ctx->refs, unused);
6842 percpu_counter_sub(&tctx->inflight, unused);
6843 put_task_struct_many(current, unused);
Pavel Begunkov9466f432020-01-25 22:34:01 +03006844 }
Pavel Begunkov863e0562020-10-27 23:25:35 +00006845 if (link.head)
6846 io_queue_link_head(link.head, &state.comp);
Jens Axboeac8691c2020-06-01 08:30:41 -06006847 io_submit_state_end(&state);
Jens Axboe6c271ce2019-01-10 11:22:30 -07006848
Pavel Begunkovae9428c2019-11-06 00:22:14 +03006849 /* Commit SQ ring head once we've consumed and submitted all SQEs */
6850 io_commit_sqring(ctx);
6851
Jens Axboe6c271ce2019-01-10 11:22:30 -07006852 return submitted;
6853}
6854
Xiaoguang Wang23b36282020-07-23 20:57:24 +08006855static inline void io_ring_set_wakeup_flag(struct io_ring_ctx *ctx)
6856{
6857 /* Tell userspace we may need a wakeup call */
6858 spin_lock_irq(&ctx->completion_lock);
6859 ctx->rings->sq_flags |= IORING_SQ_NEED_WAKEUP;
6860 spin_unlock_irq(&ctx->completion_lock);
6861}
6862
6863static inline void io_ring_clear_wakeup_flag(struct io_ring_ctx *ctx)
6864{
6865 spin_lock_irq(&ctx->completion_lock);
6866 ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP;
6867 spin_unlock_irq(&ctx->completion_lock);
6868}
6869
Jens Axboe3f0e64d2020-09-02 12:42:47 -06006870static int io_sq_wake_function(struct wait_queue_entry *wqe, unsigned mode,
6871 int sync, void *key)
Jens Axboe6c271ce2019-01-10 11:22:30 -07006872{
Jens Axboe3f0e64d2020-09-02 12:42:47 -06006873 struct io_ring_ctx *ctx = container_of(wqe, struct io_ring_ctx, sqo_wait_entry);
6874 int ret;
6875
6876 ret = autoremove_wake_function(wqe, mode, sync, key);
6877 if (ret) {
6878 unsigned long flags;
6879
6880 spin_lock_irqsave(&ctx->completion_lock, flags);
6881 ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP;
6882 spin_unlock_irqrestore(&ctx->completion_lock, flags);
6883 }
6884 return ret;
6885}
6886
Jens Axboec8d1ba52020-09-14 11:07:26 -06006887enum sq_ret {
6888 SQT_IDLE = 1,
6889 SQT_SPIN = 2,
6890 SQT_DID_WORK = 4,
6891};
6892
6893static enum sq_ret __io_sq_thread(struct io_ring_ctx *ctx,
Jens Axboee95eee22020-09-08 09:11:32 -06006894 unsigned long start_jiffies, bool cap_entries)
Jens Axboec8d1ba52020-09-14 11:07:26 -06006895{
6896 unsigned long timeout = start_jiffies + ctx->sq_thread_idle;
Jens Axboe534ca6d2020-09-02 13:52:19 -06006897 struct io_sq_data *sqd = ctx->sq_data;
Jens Axboec8d1ba52020-09-14 11:07:26 -06006898 unsigned int to_submit;
Xiaoguang Wangbdcd3ea2020-02-25 22:12:08 +08006899 int ret = 0;
Jens Axboe6c271ce2019-01-10 11:22:30 -07006900
Jens Axboec8d1ba52020-09-14 11:07:26 -06006901again:
6902 if (!list_empty(&ctx->iopoll_list)) {
6903 unsigned nr_events = 0;
Jackie Liua4c0b3d2019-07-08 13:41:12 +08006904
Jens Axboec8d1ba52020-09-14 11:07:26 -06006905 mutex_lock(&ctx->uring_lock);
6906 if (!list_empty(&ctx->iopoll_list) && !need_resched())
6907 io_do_iopoll(ctx, &nr_events, 0);
6908 mutex_unlock(&ctx->uring_lock);
6909 }
Jens Axboe6c271ce2019-01-10 11:22:30 -07006910
Jens Axboec8d1ba52020-09-14 11:07:26 -06006911 to_submit = io_sqring_entries(ctx);
Jens Axboe6c271ce2019-01-10 11:22:30 -07006912
Jens Axboec8d1ba52020-09-14 11:07:26 -06006913 /*
6914 * If submit got -EBUSY, flag us as needing the application
6915 * to enter the kernel to reap and flush events.
6916 */
6917 if (!to_submit || ret == -EBUSY || need_resched()) {
6918 /*
6919 * Drop cur_mm before scheduling, we can't hold it for
6920 * long periods (or over schedule()). Do this before
6921 * adding ourselves to the waitqueue, as the unuse/drop
6922 * may sleep.
6923 */
Jens Axboe28cea78a2020-09-14 10:51:17 -06006924 io_sq_thread_drop_mm_files();
Jens Axboe6c271ce2019-01-10 11:22:30 -07006925
Jens Axboec8d1ba52020-09-14 11:07:26 -06006926 /*
6927 * We're polling. If we're within the defined idle
6928 * period, then let us spin without work before going
6929 * to sleep. The exception is if we got EBUSY doing
6930 * more IO, we should wait for the application to
6931 * reap events and wake us up.
6932 */
6933 if (!list_empty(&ctx->iopoll_list) || need_resched() ||
6934 (!time_after(jiffies, timeout) && ret != -EBUSY &&
6935 !percpu_ref_is_dying(&ctx->refs)))
6936 return SQT_SPIN;
6937
Jens Axboe534ca6d2020-09-02 13:52:19 -06006938 prepare_to_wait(&sqd->wait, &ctx->sqo_wait_entry,
Jens Axboec8d1ba52020-09-14 11:07:26 -06006939 TASK_INTERRUPTIBLE);
6940
6941 /*
6942 * While doing polled IO, before going to sleep, we need
6943 * to check if there are new reqs added to iopoll_list,
6944 * it is because reqs may have been punted to io worker
6945 * and will be added to iopoll_list later, hence check
6946 * the iopoll_list again.
6947 */
6948 if ((ctx->flags & IORING_SETUP_IOPOLL) &&
6949 !list_empty_careful(&ctx->iopoll_list)) {
Jens Axboe534ca6d2020-09-02 13:52:19 -06006950 finish_wait(&sqd->wait, &ctx->sqo_wait_entry);
Jens Axboec8d1ba52020-09-14 11:07:26 -06006951 goto again;
Jens Axboe6c271ce2019-01-10 11:22:30 -07006952 }
6953
Pavel Begunkovfb5ccc92019-10-25 12:31:30 +03006954 to_submit = io_sqring_entries(ctx);
Jens Axboec8d1ba52020-09-14 11:07:26 -06006955 if (!to_submit || ret == -EBUSY)
6956 return SQT_IDLE;
6957 }
6958
Jens Axboe534ca6d2020-09-02 13:52:19 -06006959 finish_wait(&sqd->wait, &ctx->sqo_wait_entry);
Jens Axboec8d1ba52020-09-14 11:07:26 -06006960 io_ring_clear_wakeup_flag(ctx);
6961
Jens Axboee95eee22020-09-08 09:11:32 -06006962 /* if we're handling multiple rings, cap submit size for fairness */
6963 if (cap_entries && to_submit > 8)
6964 to_submit = 8;
6965
Jens Axboec8d1ba52020-09-14 11:07:26 -06006966 mutex_lock(&ctx->uring_lock);
6967 if (likely(!percpu_ref_is_dying(&ctx->refs)))
6968 ret = io_submit_sqes(ctx, to_submit);
6969 mutex_unlock(&ctx->uring_lock);
Jens Axboe90554202020-09-03 12:12:41 -06006970
6971 if (!io_sqring_full(ctx) && wq_has_sleeper(&ctx->sqo_sq_wait))
6972 wake_up(&ctx->sqo_sq_wait);
6973
Jens Axboec8d1ba52020-09-14 11:07:26 -06006974 return SQT_DID_WORK;
6975}
6976
Jens Axboe69fb2132020-09-14 11:16:23 -06006977static void io_sqd_init_new(struct io_sq_data *sqd)
6978{
6979 struct io_ring_ctx *ctx;
6980
6981 while (!list_empty(&sqd->ctx_new_list)) {
6982 ctx = list_first_entry(&sqd->ctx_new_list, struct io_ring_ctx, sqd_list);
6983 init_wait(&ctx->sqo_wait_entry);
6984 ctx->sqo_wait_entry.func = io_sq_wake_function;
6985 list_move_tail(&ctx->sqd_list, &sqd->ctx_list);
6986 complete(&ctx->sq_thread_comp);
6987 }
6988}
6989
Jens Axboe6c271ce2019-01-10 11:22:30 -07006990static int io_sq_thread(void *data)
6991{
Dennis Zhou91d8f512020-09-16 13:41:05 -07006992 struct cgroup_subsys_state *cur_css = NULL;
Jens Axboe28cea78a2020-09-14 10:51:17 -06006993 struct files_struct *old_files = current->files;
6994 struct nsproxy *old_nsproxy = current->nsproxy;
Jens Axboe69fb2132020-09-14 11:16:23 -06006995 const struct cred *old_cred = NULL;
6996 struct io_sq_data *sqd = data;
6997 struct io_ring_ctx *ctx;
Jens Axboec8d1ba52020-09-14 11:07:26 -06006998 unsigned long start_jiffies;
Jens Axboe6c271ce2019-01-10 11:22:30 -07006999
Jens Axboe28cea78a2020-09-14 10:51:17 -06007000 task_lock(current);
7001 current->files = NULL;
7002 current->nsproxy = NULL;
7003 task_unlock(current);
7004
Jens Axboec8d1ba52020-09-14 11:07:26 -06007005 start_jiffies = jiffies;
Jens Axboe69fb2132020-09-14 11:16:23 -06007006 while (!kthread_should_stop()) {
7007 enum sq_ret ret = 0;
Jens Axboee95eee22020-09-08 09:11:32 -06007008 bool cap_entries;
Jens Axboec1edbf52019-11-10 16:56:04 -07007009
7010 /*
Jens Axboe69fb2132020-09-14 11:16:23 -06007011 * Any changes to the sqd lists are synchronized through the
7012 * kthread parking. This synchronizes the thread vs users,
7013 * the users are synchronized on the sqd->ctx_lock.
Jens Axboec1edbf52019-11-10 16:56:04 -07007014 */
Jens Axboe69fb2132020-09-14 11:16:23 -06007015 if (kthread_should_park())
7016 kthread_parkme();
7017
7018 if (unlikely(!list_empty(&sqd->ctx_new_list)))
7019 io_sqd_init_new(sqd);
7020
Jens Axboee95eee22020-09-08 09:11:32 -06007021 cap_entries = !list_is_singular(&sqd->ctx_list);
7022
Jens Axboe69fb2132020-09-14 11:16:23 -06007023 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) {
7024 if (current->cred != ctx->creds) {
7025 if (old_cred)
7026 revert_creds(old_cred);
7027 old_cred = override_creds(ctx->creds);
7028 }
Dennis Zhou91d8f512020-09-16 13:41:05 -07007029 io_sq_thread_associate_blkcg(ctx, &cur_css);
Jens Axboe4ea33a92020-10-15 13:46:44 -06007030#ifdef CONFIG_AUDIT
7031 current->loginuid = ctx->loginuid;
7032 current->sessionid = ctx->sessionid;
7033#endif
Jens Axboe69fb2132020-09-14 11:16:23 -06007034
Jens Axboee95eee22020-09-08 09:11:32 -06007035 ret |= __io_sq_thread(ctx, start_jiffies, cap_entries);
Jens Axboe69fb2132020-09-14 11:16:23 -06007036
Jens Axboe28cea78a2020-09-14 10:51:17 -06007037 io_sq_thread_drop_mm_files();
Jens Axboe6c271ce2019-01-10 11:22:30 -07007038 }
7039
Jens Axboe69fb2132020-09-14 11:16:23 -06007040 if (ret & SQT_SPIN) {
Jens Axboec8d1ba52020-09-14 11:07:26 -06007041 io_run_task_work();
7042 cond_resched();
Jens Axboe69fb2132020-09-14 11:16:23 -06007043 } else if (ret == SQT_IDLE) {
7044 if (kthread_should_park())
7045 continue;
7046 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
7047 io_ring_set_wakeup_flag(ctx);
7048 schedule();
7049 start_jiffies = jiffies;
7050 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
7051 io_ring_clear_wakeup_flag(ctx);
Jens Axboe6c271ce2019-01-10 11:22:30 -07007052 }
Jens Axboe6c271ce2019-01-10 11:22:30 -07007053 }
7054
Jens Axboe4c6e2772020-07-01 11:29:10 -06007055 io_run_task_work();
Jens Axboeb41e9852020-02-17 09:52:41 -07007056
Dennis Zhou91d8f512020-09-16 13:41:05 -07007057 if (cur_css)
7058 io_sq_thread_unassociate_blkcg();
Jens Axboe69fb2132020-09-14 11:16:23 -06007059 if (old_cred)
7060 revert_creds(old_cred);
Jens Axboe06058632019-04-13 09:26:03 -06007061
Jens Axboe28cea78a2020-09-14 10:51:17 -06007062 task_lock(current);
7063 current->files = old_files;
7064 current->nsproxy = old_nsproxy;
7065 task_unlock(current);
7066
Roman Penyaev2bbcd6d2019-05-16 10:53:57 +02007067 kthread_parkme();
Jens Axboe06058632019-04-13 09:26:03 -06007068
Jens Axboe6c271ce2019-01-10 11:22:30 -07007069 return 0;
7070}
7071
Jens Axboebda52162019-09-24 13:47:15 -06007072struct io_wait_queue {
7073 struct wait_queue_entry wq;
7074 struct io_ring_ctx *ctx;
7075 unsigned to_wait;
7076 unsigned nr_timeouts;
7077};
7078
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07007079static inline bool io_should_wake(struct io_wait_queue *iowq, bool noflush)
Jens Axboebda52162019-09-24 13:47:15 -06007080{
7081 struct io_ring_ctx *ctx = iowq->ctx;
7082
7083 /*
Brian Gianforcarod195a662019-12-13 03:09:50 -08007084 * Wake up if we have enough events, or if a timeout occurred since we
Jens Axboebda52162019-09-24 13:47:15 -06007085 * started waiting. For timeouts, we always want to return to userspace,
7086 * regardless of event count.
7087 */
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07007088 return io_cqring_events(ctx, noflush) >= iowq->to_wait ||
Jens Axboebda52162019-09-24 13:47:15 -06007089 atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts;
7090}
7091
7092static int io_wake_function(struct wait_queue_entry *curr, unsigned int mode,
7093 int wake_flags, void *key)
7094{
7095 struct io_wait_queue *iowq = container_of(curr, struct io_wait_queue,
7096 wq);
7097
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07007098 /* use noflush == true, as we can't safely rely on locking context */
7099 if (!io_should_wake(iowq, true))
Jens Axboebda52162019-09-24 13:47:15 -06007100 return -1;
7101
7102 return autoremove_wake_function(curr, mode, wake_flags, key);
7103}
7104
Jens Axboeaf9c1a42020-09-24 13:32:18 -06007105static int io_run_task_work_sig(void)
7106{
7107 if (io_run_task_work())
7108 return 1;
7109 if (!signal_pending(current))
7110 return 0;
7111 if (current->jobctl & JOBCTL_TASK_WORK) {
7112 spin_lock_irq(&current->sighand->siglock);
7113 current->jobctl &= ~JOBCTL_TASK_WORK;
7114 recalc_sigpending();
7115 spin_unlock_irq(&current->sighand->siglock);
7116 return 1;
7117 }
7118 return -EINTR;
7119}
7120
Jens Axboe2b188cc2019-01-07 10:46:33 -07007121/*
7122 * Wait until events become available, if we don't already have some. The
7123 * application must reap them itself, as they reside on the shared cq ring.
7124 */
7125static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
7126 const sigset_t __user *sig, size_t sigsz)
7127{
Jens Axboebda52162019-09-24 13:47:15 -06007128 struct io_wait_queue iowq = {
7129 .wq = {
7130 .private = current,
7131 .func = io_wake_function,
7132 .entry = LIST_HEAD_INIT(iowq.wq.entry),
7133 },
7134 .ctx = ctx,
7135 .to_wait = min_events,
7136 };
Hristo Venev75b28af2019-08-26 17:23:46 +00007137 struct io_rings *rings = ctx->rings;
Jackie Liue9ffa5c2019-10-29 11:16:42 +08007138 int ret = 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07007139
Jens Axboeb41e9852020-02-17 09:52:41 -07007140 do {
7141 if (io_cqring_events(ctx, false) >= min_events)
7142 return 0;
Jens Axboe4c6e2772020-07-01 11:29:10 -06007143 if (!io_run_task_work())
Jens Axboeb41e9852020-02-17 09:52:41 -07007144 break;
Jens Axboeb41e9852020-02-17 09:52:41 -07007145 } while (1);
Jens Axboe2b188cc2019-01-07 10:46:33 -07007146
7147 if (sig) {
Arnd Bergmann9e75ad52019-03-25 15:34:53 +01007148#ifdef CONFIG_COMPAT
7149 if (in_compat_syscall())
7150 ret = set_compat_user_sigmask((const compat_sigset_t __user *)sig,
Oleg Nesterovb7724342019-07-16 16:29:53 -07007151 sigsz);
Arnd Bergmann9e75ad52019-03-25 15:34:53 +01007152 else
7153#endif
Oleg Nesterovb7724342019-07-16 16:29:53 -07007154 ret = set_user_sigmask(sig, sigsz);
Arnd Bergmann9e75ad52019-03-25 15:34:53 +01007155
Jens Axboe2b188cc2019-01-07 10:46:33 -07007156 if (ret)
7157 return ret;
7158 }
7159
Jens Axboebda52162019-09-24 13:47:15 -06007160 iowq.nr_timeouts = atomic_read(&ctx->cq_timeouts);
Dmitrii Dolgovc826bd72019-10-15 19:02:01 +02007161 trace_io_uring_cqring_wait(ctx, min_events);
Jens Axboebda52162019-09-24 13:47:15 -06007162 do {
7163 prepare_to_wait_exclusive(&ctx->wait, &iowq.wq,
7164 TASK_INTERRUPTIBLE);
Jens Axboece593a62020-06-30 12:39:05 -06007165 /* make sure we run task_work before checking for signals */
Jens Axboeaf9c1a42020-09-24 13:32:18 -06007166 ret = io_run_task_work_sig();
7167 if (ret > 0)
Jens Axboe4c6e2772020-07-01 11:29:10 -06007168 continue;
Jens Axboeaf9c1a42020-09-24 13:32:18 -06007169 else if (ret < 0)
Jens Axboece593a62020-06-30 12:39:05 -06007170 break;
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07007171 if (io_should_wake(&iowq, false))
Jens Axboebda52162019-09-24 13:47:15 -06007172 break;
7173 schedule();
Jens Axboebda52162019-09-24 13:47:15 -06007174 } while (1);
7175 finish_wait(&ctx->wait, &iowq.wq);
7176
Jens Axboeb7db41c2020-07-04 08:55:50 -06007177 restore_saved_sigmask_unless(ret == -EINTR);
Jens Axboe2b188cc2019-01-07 10:46:33 -07007178
Hristo Venev75b28af2019-08-26 17:23:46 +00007179 return READ_ONCE(rings->cq.head) == READ_ONCE(rings->cq.tail) ? ret : 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07007180}
7181
Jens Axboe6b063142019-01-10 22:13:58 -07007182static void __io_sqe_files_unregister(struct io_ring_ctx *ctx)
7183{
7184#if defined(CONFIG_UNIX)
7185 if (ctx->ring_sock) {
7186 struct sock *sock = ctx->ring_sock->sk;
7187 struct sk_buff *skb;
7188
7189 while ((skb = skb_dequeue(&sock->sk_receive_queue)) != NULL)
7190 kfree_skb(skb);
7191 }
7192#else
7193 int i;
7194
Jens Axboe65e19f52019-10-26 07:20:21 -06007195 for (i = 0; i < ctx->nr_user_files; i++) {
7196 struct file *file;
7197
7198 file = io_file_from_index(ctx, i);
7199 if (file)
7200 fput(file);
7201 }
Jens Axboe6b063142019-01-10 22:13:58 -07007202#endif
7203}
7204
Jens Axboe05f3fb32019-12-09 11:22:50 -07007205static void io_file_ref_kill(struct percpu_ref *ref)
7206{
7207 struct fixed_file_data *data;
7208
7209 data = container_of(ref, struct fixed_file_data, refs);
7210 complete(&data->done);
7211}
7212
Jens Axboe6b063142019-01-10 22:13:58 -07007213static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
7214{
Jens Axboe05f3fb32019-12-09 11:22:50 -07007215 struct fixed_file_data *data = ctx->file_data;
Xiaoguang Wang05589552020-03-31 14:05:18 +08007216 struct fixed_file_ref_node *ref_node = NULL;
Jens Axboe65e19f52019-10-26 07:20:21 -06007217 unsigned nr_tables, i;
7218
Jens Axboe05f3fb32019-12-09 11:22:50 -07007219 if (!data)
Jens Axboe6b063142019-01-10 22:13:58 -07007220 return -ENXIO;
7221
Jens Axboe6a4d07c2020-05-15 14:30:38 -06007222 spin_lock(&data->lock);
Pavel Begunkov1e5d7702020-11-18 14:56:25 +00007223 ref_node = data->node;
Jens Axboe6a4d07c2020-05-15 14:30:38 -06007224 spin_unlock(&data->lock);
Xiaoguang Wang05589552020-03-31 14:05:18 +08007225 if (ref_node)
7226 percpu_ref_kill(&ref_node->refs);
7227
7228 percpu_ref_kill(&data->refs);
7229
7230 /* wait for all refs nodes to complete */
Jens Axboe4a38aed22020-05-14 17:21:15 -06007231 flush_delayed_work(&ctx->file_put_work);
Jens Axboe2faf8522020-02-04 19:54:55 -07007232 wait_for_completion(&data->done);
Jens Axboe05f3fb32019-12-09 11:22:50 -07007233
Jens Axboe6b063142019-01-10 22:13:58 -07007234 __io_sqe_files_unregister(ctx);
Jens Axboe65e19f52019-10-26 07:20:21 -06007235 nr_tables = DIV_ROUND_UP(ctx->nr_user_files, IORING_MAX_FILES_TABLE);
7236 for (i = 0; i < nr_tables; i++)
Jens Axboe05f3fb32019-12-09 11:22:50 -07007237 kfree(data->table[i].files);
7238 kfree(data->table);
Xiaoguang Wang05589552020-03-31 14:05:18 +08007239 percpu_ref_exit(&data->refs);
7240 kfree(data);
Jens Axboe05f3fb32019-12-09 11:22:50 -07007241 ctx->file_data = NULL;
Jens Axboe6b063142019-01-10 22:13:58 -07007242 ctx->nr_user_files = 0;
7243 return 0;
7244}
7245
Jens Axboe534ca6d2020-09-02 13:52:19 -06007246static void io_put_sq_data(struct io_sq_data *sqd)
Jens Axboe6c271ce2019-01-10 11:22:30 -07007247{
Jens Axboe534ca6d2020-09-02 13:52:19 -06007248 if (refcount_dec_and_test(&sqd->refs)) {
Roman Penyaev2bbcd6d2019-05-16 10:53:57 +02007249 /*
7250 * The park is a bit of a work-around, without it we get
7251 * warning spews on shutdown with SQPOLL set and affinity
7252 * set to a single CPU.
7253 */
Jens Axboe534ca6d2020-09-02 13:52:19 -06007254 if (sqd->thread) {
7255 kthread_park(sqd->thread);
7256 kthread_stop(sqd->thread);
7257 }
7258
7259 kfree(sqd);
7260 }
7261}
7262
Jens Axboeaa061652020-09-02 14:50:27 -06007263static struct io_sq_data *io_attach_sq_data(struct io_uring_params *p)
7264{
7265 struct io_ring_ctx *ctx_attach;
7266 struct io_sq_data *sqd;
7267 struct fd f;
7268
7269 f = fdget(p->wq_fd);
7270 if (!f.file)
7271 return ERR_PTR(-ENXIO);
7272 if (f.file->f_op != &io_uring_fops) {
7273 fdput(f);
7274 return ERR_PTR(-EINVAL);
7275 }
7276
7277 ctx_attach = f.file->private_data;
7278 sqd = ctx_attach->sq_data;
7279 if (!sqd) {
7280 fdput(f);
7281 return ERR_PTR(-EINVAL);
7282 }
7283
7284 refcount_inc(&sqd->refs);
7285 fdput(f);
7286 return sqd;
7287}
7288
Jens Axboe534ca6d2020-09-02 13:52:19 -06007289static struct io_sq_data *io_get_sq_data(struct io_uring_params *p)
7290{
7291 struct io_sq_data *sqd;
7292
Jens Axboeaa061652020-09-02 14:50:27 -06007293 if (p->flags & IORING_SETUP_ATTACH_WQ)
7294 return io_attach_sq_data(p);
7295
Jens Axboe534ca6d2020-09-02 13:52:19 -06007296 sqd = kzalloc(sizeof(*sqd), GFP_KERNEL);
7297 if (!sqd)
7298 return ERR_PTR(-ENOMEM);
7299
7300 refcount_set(&sqd->refs, 1);
Jens Axboe69fb2132020-09-14 11:16:23 -06007301 INIT_LIST_HEAD(&sqd->ctx_list);
7302 INIT_LIST_HEAD(&sqd->ctx_new_list);
7303 mutex_init(&sqd->ctx_lock);
7304 mutex_init(&sqd->lock);
Jens Axboe534ca6d2020-09-02 13:52:19 -06007305 init_waitqueue_head(&sqd->wait);
7306 return sqd;
7307}
7308
Jens Axboe69fb2132020-09-14 11:16:23 -06007309static void io_sq_thread_unpark(struct io_sq_data *sqd)
7310 __releases(&sqd->lock)
7311{
7312 if (!sqd->thread)
7313 return;
7314 kthread_unpark(sqd->thread);
7315 mutex_unlock(&sqd->lock);
7316}
7317
7318static void io_sq_thread_park(struct io_sq_data *sqd)
7319 __acquires(&sqd->lock)
7320{
7321 if (!sqd->thread)
7322 return;
7323 mutex_lock(&sqd->lock);
7324 kthread_park(sqd->thread);
7325}
7326
Jens Axboe534ca6d2020-09-02 13:52:19 -06007327static void io_sq_thread_stop(struct io_ring_ctx *ctx)
7328{
7329 struct io_sq_data *sqd = ctx->sq_data;
7330
7331 if (sqd) {
7332 if (sqd->thread) {
7333 /*
7334 * We may arrive here from the error branch in
7335 * io_sq_offload_create() where the kthread is created
7336 * without being waked up, thus wake it up now to make
7337 * sure the wait will complete.
7338 */
7339 wake_up_process(sqd->thread);
7340 wait_for_completion(&ctx->sq_thread_comp);
Jens Axboe69fb2132020-09-14 11:16:23 -06007341
7342 io_sq_thread_park(sqd);
7343 }
7344
7345 mutex_lock(&sqd->ctx_lock);
7346 list_del(&ctx->sqd_list);
7347 mutex_unlock(&sqd->ctx_lock);
7348
7349 if (sqd->thread) {
7350 finish_wait(&sqd->wait, &ctx->sqo_wait_entry);
7351 io_sq_thread_unpark(sqd);
Jens Axboe534ca6d2020-09-02 13:52:19 -06007352 }
7353
7354 io_put_sq_data(sqd);
7355 ctx->sq_data = NULL;
Jens Axboe6c271ce2019-01-10 11:22:30 -07007356 }
7357}
7358
Jens Axboe6b063142019-01-10 22:13:58 -07007359static void io_finish_async(struct io_ring_ctx *ctx)
7360{
Jens Axboe6c271ce2019-01-10 11:22:30 -07007361 io_sq_thread_stop(ctx);
7362
Jens Axboe561fb042019-10-24 07:25:42 -06007363 if (ctx->io_wq) {
7364 io_wq_destroy(ctx->io_wq);
7365 ctx->io_wq = NULL;
Jens Axboe6b063142019-01-10 22:13:58 -07007366 }
7367}
7368
7369#if defined(CONFIG_UNIX)
Jens Axboe6b063142019-01-10 22:13:58 -07007370/*
7371 * Ensure the UNIX gc is aware of our file set, so we are certain that
7372 * the io_uring can be safely unregistered on process exit, even if we have
7373 * loops in the file referencing.
7374 */
7375static int __io_sqe_files_scm(struct io_ring_ctx *ctx, int nr, int offset)
7376{
7377 struct sock *sk = ctx->ring_sock->sk;
7378 struct scm_fp_list *fpl;
7379 struct sk_buff *skb;
Jens Axboe08a45172019-10-03 08:11:03 -06007380 int i, nr_files;
Jens Axboe6b063142019-01-10 22:13:58 -07007381
Jens Axboe6b063142019-01-10 22:13:58 -07007382 fpl = kzalloc(sizeof(*fpl), GFP_KERNEL);
7383 if (!fpl)
7384 return -ENOMEM;
7385
7386 skb = alloc_skb(0, GFP_KERNEL);
7387 if (!skb) {
7388 kfree(fpl);
7389 return -ENOMEM;
7390 }
7391
7392 skb->sk = sk;
Jens Axboe6b063142019-01-10 22:13:58 -07007393
Jens Axboe08a45172019-10-03 08:11:03 -06007394 nr_files = 0;
Jens Axboe6b063142019-01-10 22:13:58 -07007395 fpl->user = get_uid(ctx->user);
7396 for (i = 0; i < nr; i++) {
Jens Axboe65e19f52019-10-26 07:20:21 -06007397 struct file *file = io_file_from_index(ctx, i + offset);
7398
7399 if (!file)
Jens Axboe08a45172019-10-03 08:11:03 -06007400 continue;
Jens Axboe65e19f52019-10-26 07:20:21 -06007401 fpl->fp[nr_files] = get_file(file);
Jens Axboe08a45172019-10-03 08:11:03 -06007402 unix_inflight(fpl->user, fpl->fp[nr_files]);
7403 nr_files++;
Jens Axboe6b063142019-01-10 22:13:58 -07007404 }
7405
Jens Axboe08a45172019-10-03 08:11:03 -06007406 if (nr_files) {
7407 fpl->max = SCM_MAX_FD;
7408 fpl->count = nr_files;
7409 UNIXCB(skb).fp = fpl;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007410 skb->destructor = unix_destruct_scm;
Jens Axboe08a45172019-10-03 08:11:03 -06007411 refcount_add(skb->truesize, &sk->sk_wmem_alloc);
7412 skb_queue_head(&sk->sk_receive_queue, skb);
Jens Axboe6b063142019-01-10 22:13:58 -07007413
Jens Axboe08a45172019-10-03 08:11:03 -06007414 for (i = 0; i < nr_files; i++)
7415 fput(fpl->fp[i]);
7416 } else {
7417 kfree_skb(skb);
7418 kfree(fpl);
7419 }
Jens Axboe6b063142019-01-10 22:13:58 -07007420
7421 return 0;
7422}
7423
7424/*
7425 * If UNIX sockets are enabled, fd passing can cause a reference cycle which
7426 * causes regular reference counting to break down. We rely on the UNIX
7427 * garbage collection to take care of this problem for us.
7428 */
7429static int io_sqe_files_scm(struct io_ring_ctx *ctx)
7430{
7431 unsigned left, total;
7432 int ret = 0;
7433
7434 total = 0;
7435 left = ctx->nr_user_files;
7436 while (left) {
7437 unsigned this_files = min_t(unsigned, left, SCM_MAX_FD);
Jens Axboe6b063142019-01-10 22:13:58 -07007438
7439 ret = __io_sqe_files_scm(ctx, this_files, total);
7440 if (ret)
7441 break;
7442 left -= this_files;
7443 total += this_files;
7444 }
7445
7446 if (!ret)
7447 return 0;
7448
7449 while (total < ctx->nr_user_files) {
Jens Axboe65e19f52019-10-26 07:20:21 -06007450 struct file *file = io_file_from_index(ctx, total);
7451
7452 if (file)
7453 fput(file);
Jens Axboe6b063142019-01-10 22:13:58 -07007454 total++;
7455 }
7456
7457 return ret;
7458}
7459#else
7460static int io_sqe_files_scm(struct io_ring_ctx *ctx)
7461{
7462 return 0;
7463}
7464#endif
7465
Pavel Begunkov5398ae62020-10-10 18:34:14 +01007466static int io_sqe_alloc_file_tables(struct fixed_file_data *file_data,
7467 unsigned nr_tables, unsigned nr_files)
Jens Axboe65e19f52019-10-26 07:20:21 -06007468{
7469 int i;
7470
7471 for (i = 0; i < nr_tables; i++) {
Pavel Begunkov5398ae62020-10-10 18:34:14 +01007472 struct fixed_file_table *table = &file_data->table[i];
Jens Axboe65e19f52019-10-26 07:20:21 -06007473 unsigned this_files;
7474
7475 this_files = min(nr_files, IORING_MAX_FILES_TABLE);
7476 table->files = kcalloc(this_files, sizeof(struct file *),
7477 GFP_KERNEL);
7478 if (!table->files)
7479 break;
7480 nr_files -= this_files;
7481 }
7482
7483 if (i == nr_tables)
7484 return 0;
7485
7486 for (i = 0; i < nr_tables; i++) {
Pavel Begunkov5398ae62020-10-10 18:34:14 +01007487 struct fixed_file_table *table = &file_data->table[i];
Jens Axboe65e19f52019-10-26 07:20:21 -06007488 kfree(table->files);
7489 }
7490 return 1;
7491}
7492
Jens Axboe05f3fb32019-12-09 11:22:50 -07007493static void io_ring_file_put(struct io_ring_ctx *ctx, struct file *file)
Jens Axboec3a31e62019-10-03 13:59:56 -06007494{
7495#if defined(CONFIG_UNIX)
Jens Axboec3a31e62019-10-03 13:59:56 -06007496 struct sock *sock = ctx->ring_sock->sk;
7497 struct sk_buff_head list, *head = &sock->sk_receive_queue;
7498 struct sk_buff *skb;
7499 int i;
7500
7501 __skb_queue_head_init(&list);
7502
7503 /*
7504 * Find the skb that holds this file in its SCM_RIGHTS. When found,
7505 * remove this entry and rearrange the file array.
7506 */
7507 skb = skb_dequeue(head);
7508 while (skb) {
7509 struct scm_fp_list *fp;
7510
7511 fp = UNIXCB(skb).fp;
7512 for (i = 0; i < fp->count; i++) {
7513 int left;
7514
7515 if (fp->fp[i] != file)
7516 continue;
7517
7518 unix_notinflight(fp->user, fp->fp[i]);
7519 left = fp->count - 1 - i;
7520 if (left) {
7521 memmove(&fp->fp[i], &fp->fp[i + 1],
7522 left * sizeof(struct file *));
7523 }
7524 fp->count--;
7525 if (!fp->count) {
7526 kfree_skb(skb);
7527 skb = NULL;
7528 } else {
7529 __skb_queue_tail(&list, skb);
7530 }
7531 fput(file);
7532 file = NULL;
7533 break;
7534 }
7535
7536 if (!file)
7537 break;
7538
7539 __skb_queue_tail(&list, skb);
7540
7541 skb = skb_dequeue(head);
7542 }
7543
7544 if (skb_peek(&list)) {
7545 spin_lock_irq(&head->lock);
7546 while ((skb = __skb_dequeue(&list)) != NULL)
7547 __skb_queue_tail(head, skb);
7548 spin_unlock_irq(&head->lock);
7549 }
7550#else
Jens Axboe05f3fb32019-12-09 11:22:50 -07007551 fput(file);
Jens Axboec3a31e62019-10-03 13:59:56 -06007552#endif
7553}
7554
Jens Axboe05f3fb32019-12-09 11:22:50 -07007555struct io_file_put {
Xiaoguang Wang05589552020-03-31 14:05:18 +08007556 struct list_head list;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007557 struct file *file;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007558};
7559
Jens Axboe4a38aed22020-05-14 17:21:15 -06007560static void __io_file_put_work(struct fixed_file_ref_node *ref_node)
Jens Axboe05f3fb32019-12-09 11:22:50 -07007561{
Jens Axboe4a38aed22020-05-14 17:21:15 -06007562 struct fixed_file_data *file_data = ref_node->file_data;
7563 struct io_ring_ctx *ctx = file_data->ctx;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007564 struct io_file_put *pfile, *tmp;
Xiaoguang Wang05589552020-03-31 14:05:18 +08007565
7566 list_for_each_entry_safe(pfile, tmp, &ref_node->file_list, list) {
Jens Axboe6a4d07c2020-05-15 14:30:38 -06007567 list_del(&pfile->list);
Xiaoguang Wang05589552020-03-31 14:05:18 +08007568 io_ring_file_put(ctx, pfile->file);
7569 kfree(pfile);
Jens Axboe05f3fb32019-12-09 11:22:50 -07007570 }
7571
Xiaoguang Wang05589552020-03-31 14:05:18 +08007572 percpu_ref_exit(&ref_node->refs);
7573 kfree(ref_node);
7574 percpu_ref_put(&file_data->refs);
Jens Axboe05f3fb32019-12-09 11:22:50 -07007575}
7576
Jens Axboe4a38aed22020-05-14 17:21:15 -06007577static void io_file_put_work(struct work_struct *work)
7578{
7579 struct io_ring_ctx *ctx;
7580 struct llist_node *node;
7581
7582 ctx = container_of(work, struct io_ring_ctx, file_put_work.work);
7583 node = llist_del_all(&ctx->file_put_llist);
7584
7585 while (node) {
7586 struct fixed_file_ref_node *ref_node;
7587 struct llist_node *next = node->next;
7588
7589 ref_node = llist_entry(node, struct fixed_file_ref_node, llist);
7590 __io_file_put_work(ref_node);
7591 node = next;
7592 }
7593}
7594
Jens Axboe05f3fb32019-12-09 11:22:50 -07007595static void io_file_data_ref_zero(struct percpu_ref *ref)
7596{
Xiaoguang Wang05589552020-03-31 14:05:18 +08007597 struct fixed_file_ref_node *ref_node;
Pavel Begunkove2978222020-11-18 14:56:26 +00007598 struct fixed_file_data *data;
Jens Axboe4a38aed22020-05-14 17:21:15 -06007599 struct io_ring_ctx *ctx;
Pavel Begunkove2978222020-11-18 14:56:26 +00007600 bool first_add = false;
Jens Axboe4a38aed22020-05-14 17:21:15 -06007601 int delay = HZ;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007602
Xiaoguang Wang05589552020-03-31 14:05:18 +08007603 ref_node = container_of(ref, struct fixed_file_ref_node, refs);
Pavel Begunkove2978222020-11-18 14:56:26 +00007604 data = ref_node->file_data;
7605 ctx = data->ctx;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007606
Pavel Begunkove2978222020-11-18 14:56:26 +00007607 spin_lock(&data->lock);
7608 ref_node->done = true;
7609
7610 while (!list_empty(&data->ref_list)) {
7611 ref_node = list_first_entry(&data->ref_list,
7612 struct fixed_file_ref_node, node);
7613 /* recycle ref nodes in order */
7614 if (!ref_node->done)
7615 break;
7616 list_del(&ref_node->node);
7617 first_add |= llist_add(&ref_node->llist, &ctx->file_put_llist);
7618 }
7619 spin_unlock(&data->lock);
7620
7621 if (percpu_ref_is_dying(&data->refs))
Jens Axboe4a38aed22020-05-14 17:21:15 -06007622 delay = 0;
7623
Jens Axboe4a38aed22020-05-14 17:21:15 -06007624 if (!delay)
7625 mod_delayed_work(system_wq, &ctx->file_put_work, 0);
7626 else if (first_add)
7627 queue_delayed_work(system_wq, &ctx->file_put_work, delay);
Xiaoguang Wang05589552020-03-31 14:05:18 +08007628}
7629
7630static struct fixed_file_ref_node *alloc_fixed_file_ref_node(
7631 struct io_ring_ctx *ctx)
7632{
7633 struct fixed_file_ref_node *ref_node;
7634
7635 ref_node = kzalloc(sizeof(*ref_node), GFP_KERNEL);
7636 if (!ref_node)
7637 return ERR_PTR(-ENOMEM);
7638
7639 if (percpu_ref_init(&ref_node->refs, io_file_data_ref_zero,
7640 0, GFP_KERNEL)) {
7641 kfree(ref_node);
7642 return ERR_PTR(-ENOMEM);
7643 }
7644 INIT_LIST_HEAD(&ref_node->node);
7645 INIT_LIST_HEAD(&ref_node->file_list);
Xiaoguang Wang05589552020-03-31 14:05:18 +08007646 ref_node->file_data = ctx->file_data;
Pavel Begunkove2978222020-11-18 14:56:26 +00007647 ref_node->done = false;
Xiaoguang Wang05589552020-03-31 14:05:18 +08007648 return ref_node;
Xiaoguang Wang05589552020-03-31 14:05:18 +08007649}
7650
7651static void destroy_fixed_file_ref_node(struct fixed_file_ref_node *ref_node)
7652{
7653 percpu_ref_exit(&ref_node->refs);
7654 kfree(ref_node);
Jens Axboe05f3fb32019-12-09 11:22:50 -07007655}
7656
7657static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
7658 unsigned nr_args)
7659{
7660 __s32 __user *fds = (__s32 __user *) arg;
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01007661 unsigned nr_tables, i;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007662 struct file *file;
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01007663 int fd, ret = -ENOMEM;
Xiaoguang Wang05589552020-03-31 14:05:18 +08007664 struct fixed_file_ref_node *ref_node;
Pavel Begunkov5398ae62020-10-10 18:34:14 +01007665 struct fixed_file_data *file_data;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007666
7667 if (ctx->file_data)
7668 return -EBUSY;
7669 if (!nr_args)
7670 return -EINVAL;
7671 if (nr_args > IORING_MAX_FIXED_FILES)
7672 return -EMFILE;
7673
Pavel Begunkov5398ae62020-10-10 18:34:14 +01007674 file_data = kzalloc(sizeof(*ctx->file_data), GFP_KERNEL);
7675 if (!file_data)
Jens Axboe05f3fb32019-12-09 11:22:50 -07007676 return -ENOMEM;
Pavel Begunkov5398ae62020-10-10 18:34:14 +01007677 file_data->ctx = ctx;
7678 init_completion(&file_data->done);
7679 INIT_LIST_HEAD(&file_data->ref_list);
7680 spin_lock_init(&file_data->lock);
Jens Axboe05f3fb32019-12-09 11:22:50 -07007681
7682 nr_tables = DIV_ROUND_UP(nr_args, IORING_MAX_FILES_TABLE);
Colin Ian King035fbaf2020-10-12 15:03:41 +01007683 file_data->table = kcalloc(nr_tables, sizeof(*file_data->table),
Pavel Begunkov5398ae62020-10-10 18:34:14 +01007684 GFP_KERNEL);
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01007685 if (!file_data->table)
7686 goto out_free;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007687
Pavel Begunkov5398ae62020-10-10 18:34:14 +01007688 if (percpu_ref_init(&file_data->refs, io_file_ref_kill,
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01007689 PERCPU_REF_ALLOW_REINIT, GFP_KERNEL))
7690 goto out_free;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007691
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01007692 if (io_sqe_alloc_file_tables(file_data, nr_tables, nr_args))
7693 goto out_ref;
Jens Axboe55cbc252020-10-14 07:35:57 -06007694 ctx->file_data = file_data;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007695
7696 for (i = 0; i < nr_args; i++, ctx->nr_user_files++) {
7697 struct fixed_file_table *table;
7698 unsigned index;
7699
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01007700 if (copy_from_user(&fd, &fds[i], sizeof(fd))) {
7701 ret = -EFAULT;
7702 goto out_fput;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007703 }
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01007704 /* allow sparse sets */
7705 if (fd == -1)
7706 continue;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007707
Jens Axboe05f3fb32019-12-09 11:22:50 -07007708 file = fget(fd);
Jens Axboe05f3fb32019-12-09 11:22:50 -07007709 ret = -EBADF;
7710 if (!file)
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01007711 goto out_fput;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007712
7713 /*
7714 * Don't allow io_uring instances to be registered. If UNIX
7715 * isn't enabled, then this causes a reference cycle and this
7716 * instance can never get freed. If UNIX is enabled we'll
7717 * handle it just fine, but there's still no point in allowing
7718 * a ring fd as it doesn't support regular read/write anyway.
7719 */
7720 if (file->f_op == &io_uring_fops) {
7721 fput(file);
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01007722 goto out_fput;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007723 }
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01007724 table = &file_data->table[i >> IORING_FILE_TABLE_SHIFT];
7725 index = i & IORING_FILE_TABLE_MASK;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007726 table->files[index] = file;
7727 }
7728
Jens Axboe05f3fb32019-12-09 11:22:50 -07007729 ret = io_sqe_files_scm(ctx);
Xiaoguang Wang05589552020-03-31 14:05:18 +08007730 if (ret) {
Jens Axboe05f3fb32019-12-09 11:22:50 -07007731 io_sqe_files_unregister(ctx);
Xiaoguang Wang05589552020-03-31 14:05:18 +08007732 return ret;
7733 }
Jens Axboe05f3fb32019-12-09 11:22:50 -07007734
Xiaoguang Wang05589552020-03-31 14:05:18 +08007735 ref_node = alloc_fixed_file_ref_node(ctx);
7736 if (IS_ERR(ref_node)) {
7737 io_sqe_files_unregister(ctx);
7738 return PTR_ERR(ref_node);
7739 }
7740
Pavel Begunkovb2e96852020-10-10 18:34:16 +01007741 file_data->node = ref_node;
Pavel Begunkov5398ae62020-10-10 18:34:14 +01007742 spin_lock(&file_data->lock);
Pavel Begunkove2978222020-11-18 14:56:26 +00007743 list_add_tail(&ref_node->node, &file_data->ref_list);
Pavel Begunkov5398ae62020-10-10 18:34:14 +01007744 spin_unlock(&file_data->lock);
7745 percpu_ref_get(&file_data->refs);
Jens Axboe05f3fb32019-12-09 11:22:50 -07007746 return ret;
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01007747out_fput:
7748 for (i = 0; i < ctx->nr_user_files; i++) {
7749 file = io_file_from_index(ctx, i);
7750 if (file)
7751 fput(file);
7752 }
7753 for (i = 0; i < nr_tables; i++)
7754 kfree(file_data->table[i].files);
7755 ctx->nr_user_files = 0;
7756out_ref:
7757 percpu_ref_exit(&file_data->refs);
7758out_free:
7759 kfree(file_data->table);
7760 kfree(file_data);
Jens Axboe55cbc252020-10-14 07:35:57 -06007761 ctx->file_data = NULL;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007762 return ret;
7763}
7764
Jens Axboec3a31e62019-10-03 13:59:56 -06007765static int io_sqe_file_register(struct io_ring_ctx *ctx, struct file *file,
7766 int index)
7767{
7768#if defined(CONFIG_UNIX)
7769 struct sock *sock = ctx->ring_sock->sk;
7770 struct sk_buff_head *head = &sock->sk_receive_queue;
7771 struct sk_buff *skb;
7772
7773 /*
7774 * See if we can merge this file into an existing skb SCM_RIGHTS
7775 * file set. If there's no room, fall back to allocating a new skb
7776 * and filling it in.
7777 */
7778 spin_lock_irq(&head->lock);
7779 skb = skb_peek(head);
7780 if (skb) {
7781 struct scm_fp_list *fpl = UNIXCB(skb).fp;
7782
7783 if (fpl->count < SCM_MAX_FD) {
7784 __skb_unlink(skb, head);
7785 spin_unlock_irq(&head->lock);
7786 fpl->fp[fpl->count] = get_file(file);
7787 unix_inflight(fpl->user, fpl->fp[fpl->count]);
7788 fpl->count++;
7789 spin_lock_irq(&head->lock);
7790 __skb_queue_head(head, skb);
7791 } else {
7792 skb = NULL;
7793 }
7794 }
7795 spin_unlock_irq(&head->lock);
7796
7797 if (skb) {
7798 fput(file);
7799 return 0;
7800 }
7801
7802 return __io_sqe_files_scm(ctx, 1, index);
7803#else
7804 return 0;
7805#endif
7806}
7807
Hillf Dantona5318d32020-03-23 17:47:15 +08007808static int io_queue_file_removal(struct fixed_file_data *data,
Xiaoguang Wang05589552020-03-31 14:05:18 +08007809 struct file *file)
Jens Axboe05f3fb32019-12-09 11:22:50 -07007810{
Hillf Dantona5318d32020-03-23 17:47:15 +08007811 struct io_file_put *pfile;
Pavel Begunkovb2e96852020-10-10 18:34:16 +01007812 struct fixed_file_ref_node *ref_node = data->node;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007813
Jens Axboe05f3fb32019-12-09 11:22:50 -07007814 pfile = kzalloc(sizeof(*pfile), GFP_KERNEL);
Hillf Dantona5318d32020-03-23 17:47:15 +08007815 if (!pfile)
7816 return -ENOMEM;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007817
7818 pfile->file = file;
Xiaoguang Wang05589552020-03-31 14:05:18 +08007819 list_add(&pfile->list, &ref_node->file_list);
7820
Hillf Dantona5318d32020-03-23 17:47:15 +08007821 return 0;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007822}
7823
7824static int __io_sqe_files_update(struct io_ring_ctx *ctx,
7825 struct io_uring_files_update *up,
7826 unsigned nr_args)
7827{
7828 struct fixed_file_data *data = ctx->file_data;
Xiaoguang Wang05589552020-03-31 14:05:18 +08007829 struct fixed_file_ref_node *ref_node;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007830 struct file *file;
Jens Axboec3a31e62019-10-03 13:59:56 -06007831 __s32 __user *fds;
7832 int fd, i, err;
7833 __u32 done;
Xiaoguang Wang05589552020-03-31 14:05:18 +08007834 bool needs_switch = false;
Jens Axboec3a31e62019-10-03 13:59:56 -06007835
Jens Axboe05f3fb32019-12-09 11:22:50 -07007836 if (check_add_overflow(up->offset, nr_args, &done))
Jens Axboec3a31e62019-10-03 13:59:56 -06007837 return -EOVERFLOW;
7838 if (done > ctx->nr_user_files)
7839 return -EINVAL;
7840
Xiaoguang Wang05589552020-03-31 14:05:18 +08007841 ref_node = alloc_fixed_file_ref_node(ctx);
7842 if (IS_ERR(ref_node))
7843 return PTR_ERR(ref_node);
7844
Jens Axboec3a31e62019-10-03 13:59:56 -06007845 done = 0;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007846 fds = u64_to_user_ptr(up->fds);
Jens Axboec3a31e62019-10-03 13:59:56 -06007847 while (nr_args) {
Jens Axboe65e19f52019-10-26 07:20:21 -06007848 struct fixed_file_table *table;
7849 unsigned index;
7850
Jens Axboec3a31e62019-10-03 13:59:56 -06007851 err = 0;
7852 if (copy_from_user(&fd, &fds[done], sizeof(fd))) {
7853 err = -EFAULT;
7854 break;
7855 }
Jens Axboe05f3fb32019-12-09 11:22:50 -07007856 i = array_index_nospec(up->offset, ctx->nr_user_files);
7857 table = &ctx->file_data->table[i >> IORING_FILE_TABLE_SHIFT];
Jens Axboe65e19f52019-10-26 07:20:21 -06007858 index = i & IORING_FILE_TABLE_MASK;
7859 if (table->files[index]) {
Jiufei Xue98dfd502020-09-01 13:35:02 +08007860 file = table->files[index];
Hillf Dantona5318d32020-03-23 17:47:15 +08007861 err = io_queue_file_removal(data, file);
7862 if (err)
7863 break;
Jens Axboe65e19f52019-10-26 07:20:21 -06007864 table->files[index] = NULL;
Xiaoguang Wang05589552020-03-31 14:05:18 +08007865 needs_switch = true;
Jens Axboec3a31e62019-10-03 13:59:56 -06007866 }
7867 if (fd != -1) {
Jens Axboec3a31e62019-10-03 13:59:56 -06007868 file = fget(fd);
7869 if (!file) {
7870 err = -EBADF;
7871 break;
7872 }
7873 /*
7874 * Don't allow io_uring instances to be registered. If
7875 * UNIX isn't enabled, then this causes a reference
7876 * cycle and this instance can never get freed. If UNIX
7877 * is enabled we'll handle it just fine, but there's
7878 * still no point in allowing a ring fd as it doesn't
7879 * support regular read/write anyway.
7880 */
7881 if (file->f_op == &io_uring_fops) {
7882 fput(file);
7883 err = -EBADF;
7884 break;
7885 }
Jens Axboe65e19f52019-10-26 07:20:21 -06007886 table->files[index] = file;
Jens Axboec3a31e62019-10-03 13:59:56 -06007887 err = io_sqe_file_register(ctx, file, i);
Yang Yingliangf3bd9da2020-07-09 10:11:41 +00007888 if (err) {
Jiufei Xue95d1c8e2020-09-02 17:59:39 +08007889 table->files[index] = NULL;
Yang Yingliangf3bd9da2020-07-09 10:11:41 +00007890 fput(file);
Jens Axboec3a31e62019-10-03 13:59:56 -06007891 break;
Yang Yingliangf3bd9da2020-07-09 10:11:41 +00007892 }
Jens Axboec3a31e62019-10-03 13:59:56 -06007893 }
7894 nr_args--;
7895 done++;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007896 up->offset++;
7897 }
7898
Xiaoguang Wang05589552020-03-31 14:05:18 +08007899 if (needs_switch) {
Pavel Begunkovb2e96852020-10-10 18:34:16 +01007900 percpu_ref_kill(&data->node->refs);
Jens Axboe6a4d07c2020-05-15 14:30:38 -06007901 spin_lock(&data->lock);
Pavel Begunkove2978222020-11-18 14:56:26 +00007902 list_add_tail(&ref_node->node, &data->ref_list);
Pavel Begunkovb2e96852020-10-10 18:34:16 +01007903 data->node = ref_node;
Jens Axboe6a4d07c2020-05-15 14:30:38 -06007904 spin_unlock(&data->lock);
Xiaoguang Wang05589552020-03-31 14:05:18 +08007905 percpu_ref_get(&ctx->file_data->refs);
7906 } else
7907 destroy_fixed_file_ref_node(ref_node);
Jens Axboec3a31e62019-10-03 13:59:56 -06007908
7909 return done ? done : err;
7910}
Xiaoguang Wang05589552020-03-31 14:05:18 +08007911
Jens Axboe05f3fb32019-12-09 11:22:50 -07007912static int io_sqe_files_update(struct io_ring_ctx *ctx, void __user *arg,
7913 unsigned nr_args)
7914{
7915 struct io_uring_files_update up;
7916
7917 if (!ctx->file_data)
7918 return -ENXIO;
7919 if (!nr_args)
7920 return -EINVAL;
7921 if (copy_from_user(&up, arg, sizeof(up)))
7922 return -EFAULT;
7923 if (up.resv)
7924 return -EINVAL;
7925
7926 return __io_sqe_files_update(ctx, &up, nr_args);
7927}
Jens Axboec3a31e62019-10-03 13:59:56 -06007928
Pavel Begunkove9fd9392020-03-04 16:14:12 +03007929static void io_free_work(struct io_wq_work *work)
Jens Axboe7d723062019-11-12 22:31:31 -07007930{
7931 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
7932
Pavel Begunkove9fd9392020-03-04 16:14:12 +03007933 /* Consider that io_steal_work() relies on this ref */
Jens Axboe7d723062019-11-12 22:31:31 -07007934 io_put_req(req);
7935}
7936
Pavel Begunkov24369c22020-01-28 03:15:48 +03007937static int io_init_wq_offload(struct io_ring_ctx *ctx,
7938 struct io_uring_params *p)
7939{
7940 struct io_wq_data data;
7941 struct fd f;
7942 struct io_ring_ctx *ctx_attach;
7943 unsigned int concurrency;
7944 int ret = 0;
7945
7946 data.user = ctx->user;
Pavel Begunkove9fd9392020-03-04 16:14:12 +03007947 data.free_work = io_free_work;
Pavel Begunkovf5fa38c2020-06-08 21:08:20 +03007948 data.do_work = io_wq_submit_work;
Pavel Begunkov24369c22020-01-28 03:15:48 +03007949
7950 if (!(p->flags & IORING_SETUP_ATTACH_WQ)) {
7951 /* Do QD, or 4 * CPUS, whatever is smallest */
7952 concurrency = min(ctx->sq_entries, 4 * num_online_cpus());
7953
7954 ctx->io_wq = io_wq_create(concurrency, &data);
7955 if (IS_ERR(ctx->io_wq)) {
7956 ret = PTR_ERR(ctx->io_wq);
7957 ctx->io_wq = NULL;
7958 }
7959 return ret;
7960 }
7961
7962 f = fdget(p->wq_fd);
7963 if (!f.file)
7964 return -EBADF;
7965
7966 if (f.file->f_op != &io_uring_fops) {
7967 ret = -EINVAL;
7968 goto out_fput;
7969 }
7970
7971 ctx_attach = f.file->private_data;
7972 /* @io_wq is protected by holding the fd */
7973 if (!io_wq_get(ctx_attach->io_wq, &data)) {
7974 ret = -EINVAL;
7975 goto out_fput;
7976 }
7977
7978 ctx->io_wq = ctx_attach->io_wq;
7979out_fput:
7980 fdput(f);
7981 return ret;
7982}
7983
Jens Axboe0f212202020-09-13 13:09:39 -06007984static int io_uring_alloc_task_context(struct task_struct *task)
7985{
7986 struct io_uring_task *tctx;
Jens Axboed8a6df12020-10-15 16:24:45 -06007987 int ret;
Jens Axboe0f212202020-09-13 13:09:39 -06007988
7989 tctx = kmalloc(sizeof(*tctx), GFP_KERNEL);
7990 if (unlikely(!tctx))
7991 return -ENOMEM;
7992
Jens Axboed8a6df12020-10-15 16:24:45 -06007993 ret = percpu_counter_init(&tctx->inflight, 0, GFP_KERNEL);
7994 if (unlikely(ret)) {
7995 kfree(tctx);
7996 return ret;
7997 }
7998
Jens Axboe0f212202020-09-13 13:09:39 -06007999 xa_init(&tctx->xa);
8000 init_waitqueue_head(&tctx->wait);
8001 tctx->last = NULL;
Jens Axboefdaf0832020-10-30 09:37:30 -06008002 atomic_set(&tctx->in_idle, 0);
8003 tctx->sqpoll = false;
Jens Axboe500a3732020-10-15 17:38:03 -06008004 io_init_identity(&tctx->__identity);
8005 tctx->identity = &tctx->__identity;
Jens Axboe0f212202020-09-13 13:09:39 -06008006 task->io_uring = tctx;
8007 return 0;
8008}
8009
8010void __io_uring_free(struct task_struct *tsk)
8011{
8012 struct io_uring_task *tctx = tsk->io_uring;
8013
8014 WARN_ON_ONCE(!xa_empty(&tctx->xa));
Jens Axboe500a3732020-10-15 17:38:03 -06008015 WARN_ON_ONCE(refcount_read(&tctx->identity->count) != 1);
8016 if (tctx->identity != &tctx->__identity)
8017 kfree(tctx->identity);
Jens Axboed8a6df12020-10-15 16:24:45 -06008018 percpu_counter_destroy(&tctx->inflight);
Jens Axboe0f212202020-09-13 13:09:39 -06008019 kfree(tctx);
8020 tsk->io_uring = NULL;
8021}
8022
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02008023static int io_sq_offload_create(struct io_ring_ctx *ctx,
8024 struct io_uring_params *p)
Jens Axboe2b188cc2019-01-07 10:46:33 -07008025{
8026 int ret;
8027
Jens Axboe6c271ce2019-01-10 11:22:30 -07008028 if (ctx->flags & IORING_SETUP_SQPOLL) {
Jens Axboe534ca6d2020-09-02 13:52:19 -06008029 struct io_sq_data *sqd;
8030
Jens Axboe3ec482d2019-04-08 10:51:01 -06008031 ret = -EPERM;
Jens Axboece59fc62020-09-02 13:28:09 -06008032 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_NICE))
Jens Axboe3ec482d2019-04-08 10:51:01 -06008033 goto err;
8034
Jens Axboe534ca6d2020-09-02 13:52:19 -06008035 sqd = io_get_sq_data(p);
8036 if (IS_ERR(sqd)) {
8037 ret = PTR_ERR(sqd);
8038 goto err;
8039 }
Jens Axboe69fb2132020-09-14 11:16:23 -06008040
Jens Axboe534ca6d2020-09-02 13:52:19 -06008041 ctx->sq_data = sqd;
Jens Axboe69fb2132020-09-14 11:16:23 -06008042 io_sq_thread_park(sqd);
8043 mutex_lock(&sqd->ctx_lock);
8044 list_add(&ctx->sqd_list, &sqd->ctx_new_list);
8045 mutex_unlock(&sqd->ctx_lock);
8046 io_sq_thread_unpark(sqd);
Jens Axboe534ca6d2020-09-02 13:52:19 -06008047
Jens Axboe917257d2019-04-13 09:28:55 -06008048 ctx->sq_thread_idle = msecs_to_jiffies(p->sq_thread_idle);
8049 if (!ctx->sq_thread_idle)
8050 ctx->sq_thread_idle = HZ;
8051
Jens Axboeaa061652020-09-02 14:50:27 -06008052 if (sqd->thread)
8053 goto done;
8054
Jens Axboe6c271ce2019-01-10 11:22:30 -07008055 if (p->flags & IORING_SETUP_SQ_AFF) {
Jens Axboe44a9bd12019-05-14 20:00:30 -06008056 int cpu = p->sq_thread_cpu;
Jens Axboe6c271ce2019-01-10 11:22:30 -07008057
Jens Axboe917257d2019-04-13 09:28:55 -06008058 ret = -EINVAL;
Jens Axboe44a9bd12019-05-14 20:00:30 -06008059 if (cpu >= nr_cpu_ids)
8060 goto err;
Shenghui Wang7889f442019-05-07 16:03:19 +08008061 if (!cpu_online(cpu))
Jens Axboe917257d2019-04-13 09:28:55 -06008062 goto err;
8063
Jens Axboe69fb2132020-09-14 11:16:23 -06008064 sqd->thread = kthread_create_on_cpu(io_sq_thread, sqd,
Jens Axboe534ca6d2020-09-02 13:52:19 -06008065 cpu, "io_uring-sq");
Jens Axboe6c271ce2019-01-10 11:22:30 -07008066 } else {
Jens Axboe69fb2132020-09-14 11:16:23 -06008067 sqd->thread = kthread_create(io_sq_thread, sqd,
Jens Axboe6c271ce2019-01-10 11:22:30 -07008068 "io_uring-sq");
8069 }
Jens Axboe534ca6d2020-09-02 13:52:19 -06008070 if (IS_ERR(sqd->thread)) {
8071 ret = PTR_ERR(sqd->thread);
8072 sqd->thread = NULL;
Jens Axboe6c271ce2019-01-10 11:22:30 -07008073 goto err;
8074 }
Jens Axboe534ca6d2020-09-02 13:52:19 -06008075 ret = io_uring_alloc_task_context(sqd->thread);
Jens Axboe0f212202020-09-13 13:09:39 -06008076 if (ret)
8077 goto err;
Jens Axboe6c271ce2019-01-10 11:22:30 -07008078 } else if (p->flags & IORING_SETUP_SQ_AFF) {
8079 /* Can't have SQ_AFF without SQPOLL */
8080 ret = -EINVAL;
8081 goto err;
8082 }
8083
Jens Axboeaa061652020-09-02 14:50:27 -06008084done:
Pavel Begunkov24369c22020-01-28 03:15:48 +03008085 ret = io_init_wq_offload(ctx, p);
8086 if (ret)
Jens Axboe2b188cc2019-01-07 10:46:33 -07008087 goto err;
Jens Axboe2b188cc2019-01-07 10:46:33 -07008088
8089 return 0;
8090err:
Jens Axboe54a91f32019-09-10 09:15:04 -06008091 io_finish_async(ctx);
Jens Axboe2b188cc2019-01-07 10:46:33 -07008092 return ret;
8093}
8094
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02008095static void io_sq_offload_start(struct io_ring_ctx *ctx)
8096{
Jens Axboe534ca6d2020-09-02 13:52:19 -06008097 struct io_sq_data *sqd = ctx->sq_data;
8098
8099 if ((ctx->flags & IORING_SETUP_SQPOLL) && sqd->thread)
8100 wake_up_process(sqd->thread);
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02008101}
8102
Bijan Mottahedeha087e2b2020-06-16 16:36:07 -07008103static inline void __io_unaccount_mem(struct user_struct *user,
8104 unsigned long nr_pages)
Jens Axboe2b188cc2019-01-07 10:46:33 -07008105{
8106 atomic_long_sub(nr_pages, &user->locked_vm);
8107}
8108
Bijan Mottahedeha087e2b2020-06-16 16:36:07 -07008109static inline int __io_account_mem(struct user_struct *user,
8110 unsigned long nr_pages)
Jens Axboe2b188cc2019-01-07 10:46:33 -07008111{
8112 unsigned long page_limit, cur_pages, new_pages;
8113
8114 /* Don't allow more pages than we can safely lock */
8115 page_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
8116
8117 do {
8118 cur_pages = atomic_long_read(&user->locked_vm);
8119 new_pages = cur_pages + nr_pages;
8120 if (new_pages > page_limit)
8121 return -ENOMEM;
8122 } while (atomic_long_cmpxchg(&user->locked_vm, cur_pages,
8123 new_pages) != cur_pages);
8124
8125 return 0;
8126}
8127
Bijan Mottahedeh2e0464d2020-06-16 16:36:10 -07008128static void io_unaccount_mem(struct io_ring_ctx *ctx, unsigned long nr_pages,
8129 enum io_mem_account acct)
Bijan Mottahedeha087e2b2020-06-16 16:36:07 -07008130{
Bijan Mottahedehaad5d8d2020-06-16 16:36:08 -07008131 if (ctx->limit_mem)
Bijan Mottahedeha087e2b2020-06-16 16:36:07 -07008132 __io_unaccount_mem(ctx->user, nr_pages);
Bijan Mottahedeh30975822020-06-16 16:36:09 -07008133
Jens Axboe2aede0e2020-09-14 10:45:53 -06008134 if (ctx->mm_account) {
Bijan Mottahedeh2e0464d2020-06-16 16:36:10 -07008135 if (acct == ACCT_LOCKED)
Jens Axboe2aede0e2020-09-14 10:45:53 -06008136 ctx->mm_account->locked_vm -= nr_pages;
Bijan Mottahedeh2e0464d2020-06-16 16:36:10 -07008137 else if (acct == ACCT_PINNED)
Jens Axboe2aede0e2020-09-14 10:45:53 -06008138 atomic64_sub(nr_pages, &ctx->mm_account->pinned_vm);
Bijan Mottahedeh2e0464d2020-06-16 16:36:10 -07008139 }
Bijan Mottahedeha087e2b2020-06-16 16:36:07 -07008140}
8141
Bijan Mottahedeh2e0464d2020-06-16 16:36:10 -07008142static int io_account_mem(struct io_ring_ctx *ctx, unsigned long nr_pages,
8143 enum io_mem_account acct)
Bijan Mottahedeha087e2b2020-06-16 16:36:07 -07008144{
Bijan Mottahedeh30975822020-06-16 16:36:09 -07008145 int ret;
8146
8147 if (ctx->limit_mem) {
8148 ret = __io_account_mem(ctx->user, nr_pages);
8149 if (ret)
8150 return ret;
8151 }
8152
Jens Axboe2aede0e2020-09-14 10:45:53 -06008153 if (ctx->mm_account) {
Bijan Mottahedeh2e0464d2020-06-16 16:36:10 -07008154 if (acct == ACCT_LOCKED)
Jens Axboe2aede0e2020-09-14 10:45:53 -06008155 ctx->mm_account->locked_vm += nr_pages;
Bijan Mottahedeh2e0464d2020-06-16 16:36:10 -07008156 else if (acct == ACCT_PINNED)
Jens Axboe2aede0e2020-09-14 10:45:53 -06008157 atomic64_add(nr_pages, &ctx->mm_account->pinned_vm);
Bijan Mottahedeh2e0464d2020-06-16 16:36:10 -07008158 }
Bijan Mottahedeha087e2b2020-06-16 16:36:07 -07008159
8160 return 0;
8161}
8162
Jens Axboe2b188cc2019-01-07 10:46:33 -07008163static void io_mem_free(void *ptr)
8164{
Mark Rutland52e04ef2019-04-30 17:30:21 +01008165 struct page *page;
Jens Axboe2b188cc2019-01-07 10:46:33 -07008166
Mark Rutland52e04ef2019-04-30 17:30:21 +01008167 if (!ptr)
8168 return;
8169
8170 page = virt_to_head_page(ptr);
Jens Axboe2b188cc2019-01-07 10:46:33 -07008171 if (put_page_testzero(page))
8172 free_compound_page(page);
8173}
8174
8175static void *io_mem_alloc(size_t size)
8176{
8177 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN | __GFP_COMP |
8178 __GFP_NORETRY;
8179
8180 return (void *) __get_free_pages(gfp_flags, get_order(size));
8181}
8182
Hristo Venev75b28af2019-08-26 17:23:46 +00008183static unsigned long rings_size(unsigned sq_entries, unsigned cq_entries,
8184 size_t *sq_offset)
8185{
8186 struct io_rings *rings;
8187 size_t off, sq_array_size;
8188
8189 off = struct_size(rings, cqes, cq_entries);
8190 if (off == SIZE_MAX)
8191 return SIZE_MAX;
8192
8193#ifdef CONFIG_SMP
8194 off = ALIGN(off, SMP_CACHE_BYTES);
8195 if (off == 0)
8196 return SIZE_MAX;
8197#endif
8198
Dmitry Vyukovb36200f2020-07-11 11:31:11 +02008199 if (sq_offset)
8200 *sq_offset = off;
8201
Hristo Venev75b28af2019-08-26 17:23:46 +00008202 sq_array_size = array_size(sizeof(u32), sq_entries);
8203 if (sq_array_size == SIZE_MAX)
8204 return SIZE_MAX;
8205
8206 if (check_add_overflow(off, sq_array_size, &off))
8207 return SIZE_MAX;
8208
Hristo Venev75b28af2019-08-26 17:23:46 +00008209 return off;
8210}
8211
Jens Axboe2b188cc2019-01-07 10:46:33 -07008212static unsigned long ring_pages(unsigned sq_entries, unsigned cq_entries)
8213{
Hristo Venev75b28af2019-08-26 17:23:46 +00008214 size_t pages;
Jens Axboe2b188cc2019-01-07 10:46:33 -07008215
Hristo Venev75b28af2019-08-26 17:23:46 +00008216 pages = (size_t)1 << get_order(
8217 rings_size(sq_entries, cq_entries, NULL));
8218 pages += (size_t)1 << get_order(
8219 array_size(sizeof(struct io_uring_sqe), sq_entries));
Jens Axboe2b188cc2019-01-07 10:46:33 -07008220
Hristo Venev75b28af2019-08-26 17:23:46 +00008221 return pages;
Jens Axboe2b188cc2019-01-07 10:46:33 -07008222}
8223
Jens Axboeedafcce2019-01-09 09:16:05 -07008224static int io_sqe_buffer_unregister(struct io_ring_ctx *ctx)
8225{
8226 int i, j;
8227
8228 if (!ctx->user_bufs)
8229 return -ENXIO;
8230
8231 for (i = 0; i < ctx->nr_user_bufs; i++) {
8232 struct io_mapped_ubuf *imu = &ctx->user_bufs[i];
8233
8234 for (j = 0; j < imu->nr_bvecs; j++)
John Hubbardf1f6a7d2020-01-30 22:13:35 -08008235 unpin_user_page(imu->bvec[j].bv_page);
Jens Axboeedafcce2019-01-09 09:16:05 -07008236
Jens Axboede293932020-09-17 16:19:16 -06008237 if (imu->acct_pages)
8238 io_unaccount_mem(ctx, imu->acct_pages, ACCT_PINNED);
Mark Rutlandd4ef6472019-05-01 16:59:16 +01008239 kvfree(imu->bvec);
Jens Axboeedafcce2019-01-09 09:16:05 -07008240 imu->nr_bvecs = 0;
8241 }
8242
8243 kfree(ctx->user_bufs);
8244 ctx->user_bufs = NULL;
8245 ctx->nr_user_bufs = 0;
8246 return 0;
8247}
8248
8249static int io_copy_iov(struct io_ring_ctx *ctx, struct iovec *dst,
8250 void __user *arg, unsigned index)
8251{
8252 struct iovec __user *src;
8253
8254#ifdef CONFIG_COMPAT
8255 if (ctx->compat) {
8256 struct compat_iovec __user *ciovs;
8257 struct compat_iovec ciov;
8258
8259 ciovs = (struct compat_iovec __user *) arg;
8260 if (copy_from_user(&ciov, &ciovs[index], sizeof(ciov)))
8261 return -EFAULT;
8262
Jens Axboed55e5f52019-12-11 16:12:15 -07008263 dst->iov_base = u64_to_user_ptr((u64)ciov.iov_base);
Jens Axboeedafcce2019-01-09 09:16:05 -07008264 dst->iov_len = ciov.iov_len;
8265 return 0;
8266 }
8267#endif
8268 src = (struct iovec __user *) arg;
8269 if (copy_from_user(dst, &src[index], sizeof(*dst)))
8270 return -EFAULT;
8271 return 0;
8272}
8273
Jens Axboede293932020-09-17 16:19:16 -06008274/*
8275 * Not super efficient, but this is just a registration time. And we do cache
8276 * the last compound head, so generally we'll only do a full search if we don't
8277 * match that one.
8278 *
8279 * We check if the given compound head page has already been accounted, to
8280 * avoid double accounting it. This allows us to account the full size of the
8281 * page, not just the constituent pages of a huge page.
8282 */
8283static bool headpage_already_acct(struct io_ring_ctx *ctx, struct page **pages,
8284 int nr_pages, struct page *hpage)
8285{
8286 int i, j;
8287
8288 /* check current page array */
8289 for (i = 0; i < nr_pages; i++) {
8290 if (!PageCompound(pages[i]))
8291 continue;
8292 if (compound_head(pages[i]) == hpage)
8293 return true;
8294 }
8295
8296 /* check previously registered pages */
8297 for (i = 0; i < ctx->nr_user_bufs; i++) {
8298 struct io_mapped_ubuf *imu = &ctx->user_bufs[i];
8299
8300 for (j = 0; j < imu->nr_bvecs; j++) {
8301 if (!PageCompound(imu->bvec[j].bv_page))
8302 continue;
8303 if (compound_head(imu->bvec[j].bv_page) == hpage)
8304 return true;
8305 }
8306 }
8307
8308 return false;
8309}
8310
8311static int io_buffer_account_pin(struct io_ring_ctx *ctx, struct page **pages,
8312 int nr_pages, struct io_mapped_ubuf *imu,
8313 struct page **last_hpage)
8314{
8315 int i, ret;
8316
8317 for (i = 0; i < nr_pages; i++) {
8318 if (!PageCompound(pages[i])) {
8319 imu->acct_pages++;
8320 } else {
8321 struct page *hpage;
8322
8323 hpage = compound_head(pages[i]);
8324 if (hpage == *last_hpage)
8325 continue;
8326 *last_hpage = hpage;
8327 if (headpage_already_acct(ctx, pages, i, hpage))
8328 continue;
8329 imu->acct_pages += page_size(hpage) >> PAGE_SHIFT;
8330 }
8331 }
8332
8333 if (!imu->acct_pages)
8334 return 0;
8335
8336 ret = io_account_mem(ctx, imu->acct_pages, ACCT_PINNED);
8337 if (ret)
8338 imu->acct_pages = 0;
8339 return ret;
8340}
8341
Jens Axboeedafcce2019-01-09 09:16:05 -07008342static int io_sqe_buffer_register(struct io_ring_ctx *ctx, void __user *arg,
8343 unsigned nr_args)
8344{
8345 struct vm_area_struct **vmas = NULL;
8346 struct page **pages = NULL;
Jens Axboede293932020-09-17 16:19:16 -06008347 struct page *last_hpage = NULL;
Jens Axboeedafcce2019-01-09 09:16:05 -07008348 int i, j, got_pages = 0;
8349 int ret = -EINVAL;
8350
8351 if (ctx->user_bufs)
8352 return -EBUSY;
8353 if (!nr_args || nr_args > UIO_MAXIOV)
8354 return -EINVAL;
8355
8356 ctx->user_bufs = kcalloc(nr_args, sizeof(struct io_mapped_ubuf),
8357 GFP_KERNEL);
8358 if (!ctx->user_bufs)
8359 return -ENOMEM;
8360
8361 for (i = 0; i < nr_args; i++) {
8362 struct io_mapped_ubuf *imu = &ctx->user_bufs[i];
8363 unsigned long off, start, end, ubuf;
8364 int pret, nr_pages;
8365 struct iovec iov;
8366 size_t size;
8367
8368 ret = io_copy_iov(ctx, &iov, arg, i);
8369 if (ret)
Pavel Begunkova2786822019-05-26 12:35:47 +03008370 goto err;
Jens Axboeedafcce2019-01-09 09:16:05 -07008371
8372 /*
8373 * Don't impose further limits on the size and buffer
8374 * constraints here, we'll -EINVAL later when IO is
8375 * submitted if they are wrong.
8376 */
8377 ret = -EFAULT;
8378 if (!iov.iov_base || !iov.iov_len)
8379 goto err;
8380
8381 /* arbitrary limit, but we need something */
8382 if (iov.iov_len > SZ_1G)
8383 goto err;
8384
8385 ubuf = (unsigned long) iov.iov_base;
8386 end = (ubuf + iov.iov_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
8387 start = ubuf >> PAGE_SHIFT;
8388 nr_pages = end - start;
8389
Jens Axboeedafcce2019-01-09 09:16:05 -07008390 ret = 0;
8391 if (!pages || nr_pages > got_pages) {
Denis Efremova8c73c12020-06-05 12:32:03 +03008392 kvfree(vmas);
8393 kvfree(pages);
Mark Rutlandd4ef6472019-05-01 16:59:16 +01008394 pages = kvmalloc_array(nr_pages, sizeof(struct page *),
Jens Axboeedafcce2019-01-09 09:16:05 -07008395 GFP_KERNEL);
Mark Rutlandd4ef6472019-05-01 16:59:16 +01008396 vmas = kvmalloc_array(nr_pages,
Jens Axboeedafcce2019-01-09 09:16:05 -07008397 sizeof(struct vm_area_struct *),
8398 GFP_KERNEL);
8399 if (!pages || !vmas) {
8400 ret = -ENOMEM;
Jens Axboeedafcce2019-01-09 09:16:05 -07008401 goto err;
8402 }
8403 got_pages = nr_pages;
8404 }
8405
Mark Rutlandd4ef6472019-05-01 16:59:16 +01008406 imu->bvec = kvmalloc_array(nr_pages, sizeof(struct bio_vec),
Jens Axboeedafcce2019-01-09 09:16:05 -07008407 GFP_KERNEL);
8408 ret = -ENOMEM;
Jens Axboede293932020-09-17 16:19:16 -06008409 if (!imu->bvec)
Jens Axboeedafcce2019-01-09 09:16:05 -07008410 goto err;
Jens Axboeedafcce2019-01-09 09:16:05 -07008411
8412 ret = 0;
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07008413 mmap_read_lock(current->mm);
John Hubbard2113b052020-01-30 22:13:13 -08008414 pret = pin_user_pages(ubuf, nr_pages,
Ira Weiny932f4a62019-05-13 17:17:03 -07008415 FOLL_WRITE | FOLL_LONGTERM,
8416 pages, vmas);
Jens Axboeedafcce2019-01-09 09:16:05 -07008417 if (pret == nr_pages) {
8418 /* don't support file backed memory */
8419 for (j = 0; j < nr_pages; j++) {
8420 struct vm_area_struct *vma = vmas[j];
8421
8422 if (vma->vm_file &&
8423 !is_file_hugepages(vma->vm_file)) {
8424 ret = -EOPNOTSUPP;
8425 break;
8426 }
8427 }
8428 } else {
8429 ret = pret < 0 ? pret : -EFAULT;
8430 }
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07008431 mmap_read_unlock(current->mm);
Jens Axboeedafcce2019-01-09 09:16:05 -07008432 if (ret) {
8433 /*
8434 * if we did partial map, or found file backed vmas,
8435 * release any pages we did get
8436 */
John Hubbard27c4d3a2019-08-04 19:32:06 -07008437 if (pret > 0)
John Hubbardf1f6a7d2020-01-30 22:13:35 -08008438 unpin_user_pages(pages, pret);
Jens Axboede293932020-09-17 16:19:16 -06008439 kvfree(imu->bvec);
8440 goto err;
8441 }
8442
8443 ret = io_buffer_account_pin(ctx, pages, pret, imu, &last_hpage);
8444 if (ret) {
8445 unpin_user_pages(pages, pret);
Mark Rutlandd4ef6472019-05-01 16:59:16 +01008446 kvfree(imu->bvec);
Jens Axboeedafcce2019-01-09 09:16:05 -07008447 goto err;
8448 }
8449
8450 off = ubuf & ~PAGE_MASK;
8451 size = iov.iov_len;
8452 for (j = 0; j < nr_pages; j++) {
8453 size_t vec_len;
8454
8455 vec_len = min_t(size_t, size, PAGE_SIZE - off);
8456 imu->bvec[j].bv_page = pages[j];
8457 imu->bvec[j].bv_len = vec_len;
8458 imu->bvec[j].bv_offset = off;
8459 off = 0;
8460 size -= vec_len;
8461 }
8462 /* store original address for later verification */
8463 imu->ubuf = ubuf;
8464 imu->len = iov.iov_len;
8465 imu->nr_bvecs = nr_pages;
8466
8467 ctx->nr_user_bufs++;
8468 }
Mark Rutlandd4ef6472019-05-01 16:59:16 +01008469 kvfree(pages);
8470 kvfree(vmas);
Jens Axboeedafcce2019-01-09 09:16:05 -07008471 return 0;
8472err:
Mark Rutlandd4ef6472019-05-01 16:59:16 +01008473 kvfree(pages);
8474 kvfree(vmas);
Jens Axboeedafcce2019-01-09 09:16:05 -07008475 io_sqe_buffer_unregister(ctx);
8476 return ret;
8477}
8478
Jens Axboe9b402842019-04-11 11:45:41 -06008479static int io_eventfd_register(struct io_ring_ctx *ctx, void __user *arg)
8480{
8481 __s32 __user *fds = arg;
8482 int fd;
8483
8484 if (ctx->cq_ev_fd)
8485 return -EBUSY;
8486
8487 if (copy_from_user(&fd, fds, sizeof(*fds)))
8488 return -EFAULT;
8489
8490 ctx->cq_ev_fd = eventfd_ctx_fdget(fd);
8491 if (IS_ERR(ctx->cq_ev_fd)) {
8492 int ret = PTR_ERR(ctx->cq_ev_fd);
8493 ctx->cq_ev_fd = NULL;
8494 return ret;
8495 }
8496
8497 return 0;
8498}
8499
8500static int io_eventfd_unregister(struct io_ring_ctx *ctx)
8501{
8502 if (ctx->cq_ev_fd) {
8503 eventfd_ctx_put(ctx->cq_ev_fd);
8504 ctx->cq_ev_fd = NULL;
8505 return 0;
8506 }
8507
8508 return -ENXIO;
8509}
8510
Jens Axboe5a2e7452020-02-23 16:23:11 -07008511static int __io_destroy_buffers(int id, void *p, void *data)
8512{
8513 struct io_ring_ctx *ctx = data;
8514 struct io_buffer *buf = p;
8515
Jens Axboe067524e2020-03-02 16:32:28 -07008516 __io_remove_buffers(ctx, buf, id, -1U);
Jens Axboe5a2e7452020-02-23 16:23:11 -07008517 return 0;
8518}
8519
8520static void io_destroy_buffers(struct io_ring_ctx *ctx)
8521{
8522 idr_for_each(&ctx->io_buffer_idr, __io_destroy_buffers, ctx);
8523 idr_destroy(&ctx->io_buffer_idr);
8524}
8525
Jens Axboe2b188cc2019-01-07 10:46:33 -07008526static void io_ring_ctx_free(struct io_ring_ctx *ctx)
8527{
Jens Axboe6b063142019-01-10 22:13:58 -07008528 io_finish_async(ctx);
Jens Axboeedafcce2019-01-09 09:16:05 -07008529 io_sqe_buffer_unregister(ctx);
Jens Axboe2aede0e2020-09-14 10:45:53 -06008530
8531 if (ctx->sqo_task) {
8532 put_task_struct(ctx->sqo_task);
8533 ctx->sqo_task = NULL;
8534 mmdrop(ctx->mm_account);
8535 ctx->mm_account = NULL;
Bijan Mottahedeh30975822020-06-16 16:36:09 -07008536 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07008537
Dennis Zhou91d8f512020-09-16 13:41:05 -07008538#ifdef CONFIG_BLK_CGROUP
8539 if (ctx->sqo_blkcg_css)
8540 css_put(ctx->sqo_blkcg_css);
8541#endif
8542
Jens Axboe6b063142019-01-10 22:13:58 -07008543 io_sqe_files_unregister(ctx);
Jens Axboe9b402842019-04-11 11:45:41 -06008544 io_eventfd_unregister(ctx);
Jens Axboe5a2e7452020-02-23 16:23:11 -07008545 io_destroy_buffers(ctx);
Jens Axboe41726c92020-02-23 13:11:42 -07008546 idr_destroy(&ctx->personality_idr);
Jens Axboedef596e2019-01-09 08:59:42 -07008547
Jens Axboe2b188cc2019-01-07 10:46:33 -07008548#if defined(CONFIG_UNIX)
Eric Biggers355e8d22019-06-12 14:58:43 -07008549 if (ctx->ring_sock) {
8550 ctx->ring_sock->file = NULL; /* so that iput() is called */
Jens Axboe2b188cc2019-01-07 10:46:33 -07008551 sock_release(ctx->ring_sock);
Eric Biggers355e8d22019-06-12 14:58:43 -07008552 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07008553#endif
8554
Hristo Venev75b28af2019-08-26 17:23:46 +00008555 io_mem_free(ctx->rings);
Jens Axboe2b188cc2019-01-07 10:46:33 -07008556 io_mem_free(ctx->sq_sqes);
Jens Axboe2b188cc2019-01-07 10:46:33 -07008557
8558 percpu_ref_exit(&ctx->refs);
Jens Axboe2b188cc2019-01-07 10:46:33 -07008559 free_uid(ctx->user);
Jens Axboe181e4482019-11-25 08:52:30 -07008560 put_cred(ctx->creds);
Jens Axboe78076bb2019-12-04 19:56:40 -07008561 kfree(ctx->cancel_hash);
Jens Axboe0ddf92e2019-11-08 08:52:53 -07008562 kmem_cache_free(req_cachep, ctx->fallback_req);
Jens Axboe2b188cc2019-01-07 10:46:33 -07008563 kfree(ctx);
8564}
8565
8566static __poll_t io_uring_poll(struct file *file, poll_table *wait)
8567{
8568 struct io_ring_ctx *ctx = file->private_data;
8569 __poll_t mask = 0;
8570
8571 poll_wait(file, &ctx->cq_wait, wait);
Stefan Bühler4f7067c2019-04-24 23:54:17 +02008572 /*
8573 * synchronizes with barrier from wq_has_sleeper call in
8574 * io_commit_cqring
8575 */
Jens Axboe2b188cc2019-01-07 10:46:33 -07008576 smp_rmb();
Jens Axboe90554202020-09-03 12:12:41 -06008577 if (!io_sqring_full(ctx))
Jens Axboe2b188cc2019-01-07 10:46:33 -07008578 mask |= EPOLLOUT | EPOLLWRNORM;
Stefano Garzarella63e5d812020-02-07 13:18:28 +01008579 if (io_cqring_events(ctx, false))
Jens Axboe2b188cc2019-01-07 10:46:33 -07008580 mask |= EPOLLIN | EPOLLRDNORM;
8581
8582 return mask;
8583}
8584
8585static int io_uring_fasync(int fd, struct file *file, int on)
8586{
8587 struct io_ring_ctx *ctx = file->private_data;
8588
8589 return fasync_helper(fd, file, on, &ctx->cq_fasync);
8590}
8591
Jens Axboe071698e2020-01-28 10:04:42 -07008592static int io_remove_personalities(int id, void *p, void *data)
8593{
8594 struct io_ring_ctx *ctx = data;
Jens Axboe1e6fa522020-10-15 08:46:24 -06008595 struct io_identity *iod;
Jens Axboe071698e2020-01-28 10:04:42 -07008596
Jens Axboe1e6fa522020-10-15 08:46:24 -06008597 iod = idr_remove(&ctx->personality_idr, id);
8598 if (iod) {
8599 put_cred(iod->creds);
8600 if (refcount_dec_and_test(&iod->count))
8601 kfree(iod);
8602 }
Jens Axboe071698e2020-01-28 10:04:42 -07008603 return 0;
8604}
8605
Jens Axboe85faa7b2020-04-09 18:14:00 -06008606static void io_ring_exit_work(struct work_struct *work)
8607{
Pavel Begunkovb2edc0a2020-07-07 16:36:22 +03008608 struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx,
8609 exit_work);
Jens Axboe85faa7b2020-04-09 18:14:00 -06008610
Jens Axboe56952e92020-06-17 15:00:04 -06008611 /*
8612 * If we're doing polled IO and end up having requests being
8613 * submitted async (out-of-line), then completions can come in while
8614 * we're waiting for refs to drop. We need to reap these manually,
8615 * as nobody else will be looking for them.
8616 */
Pavel Begunkovb2edc0a2020-07-07 16:36:22 +03008617 do {
Jens Axboe56952e92020-06-17 15:00:04 -06008618 if (ctx->rings)
Jens Axboee6c8aa92020-09-28 13:10:13 -06008619 io_cqring_overflow_flush(ctx, true, NULL, NULL);
Pavel Begunkovb2edc0a2020-07-07 16:36:22 +03008620 io_iopoll_try_reap_events(ctx);
8621 } while (!wait_for_completion_timeout(&ctx->ref_comp, HZ/20));
Jens Axboe85faa7b2020-04-09 18:14:00 -06008622 io_ring_ctx_free(ctx);
8623}
8624
Jens Axboe2b188cc2019-01-07 10:46:33 -07008625static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
8626{
8627 mutex_lock(&ctx->uring_lock);
8628 percpu_ref_kill(&ctx->refs);
8629 mutex_unlock(&ctx->uring_lock);
8630
Jens Axboef3606e32020-09-22 08:18:24 -06008631 io_kill_timeouts(ctx, NULL);
8632 io_poll_remove_all(ctx, NULL);
Jens Axboe561fb042019-10-24 07:25:42 -06008633
8634 if (ctx->io_wq)
8635 io_wq_cancel_all(ctx->io_wq);
8636
Jens Axboe15dff282019-11-13 09:09:23 -07008637 /* if we failed setting up the ctx, we might not have any rings */
8638 if (ctx->rings)
Jens Axboee6c8aa92020-09-28 13:10:13 -06008639 io_cqring_overflow_flush(ctx, true, NULL, NULL);
Pavel Begunkovb2edc0a2020-07-07 16:36:22 +03008640 io_iopoll_try_reap_events(ctx);
Jens Axboe071698e2020-01-28 10:04:42 -07008641 idr_for_each(&ctx->personality_idr, io_remove_personalities, ctx);
Jens Axboe309fc032020-07-10 09:13:34 -06008642
8643 /*
8644 * Do this upfront, so we won't have a grace period where the ring
8645 * is closed but resources aren't reaped yet. This can cause
8646 * spurious failure in setting up a new ring.
8647 */
Jens Axboe760618f2020-07-24 12:53:31 -06008648 io_unaccount_mem(ctx, ring_pages(ctx->sq_entries, ctx->cq_entries),
8649 ACCT_LOCKED);
Jens Axboe309fc032020-07-10 09:13:34 -06008650
Jens Axboe85faa7b2020-04-09 18:14:00 -06008651 INIT_WORK(&ctx->exit_work, io_ring_exit_work);
Jens Axboefc666772020-08-19 11:10:51 -06008652 /*
8653 * Use system_unbound_wq to avoid spawning tons of event kworkers
8654 * if we're exiting a ton of rings at the same time. It just adds
8655 * noise and overhead, there's no discernable change in runtime
8656 * over using system_wq.
8657 */
8658 queue_work(system_unbound_wq, &ctx->exit_work);
Jens Axboe2b188cc2019-01-07 10:46:33 -07008659}
8660
8661static int io_uring_release(struct inode *inode, struct file *file)
8662{
8663 struct io_ring_ctx *ctx = file->private_data;
8664
8665 file->private_data = NULL;
8666 io_ring_ctx_wait_and_kill(ctx);
8667 return 0;
8668}
8669
Pavel Begunkov67c4d9e2020-06-15 10:24:05 +03008670static bool io_wq_files_match(struct io_wq_work *work, void *data)
8671{
8672 struct files_struct *files = data;
8673
Jens Axboedfead8a2020-10-14 10:12:37 -06008674 return !files || ((work->flags & IO_WQ_WORK_FILES) &&
Jens Axboe98447d62020-10-14 10:48:51 -06008675 work->identity->files == files);
Pavel Begunkov67c4d9e2020-06-15 10:24:05 +03008676}
8677
Jens Axboef254ac02020-08-12 17:33:30 -06008678/*
8679 * Returns true if 'preq' is the link parent of 'req'
8680 */
8681static bool io_match_link(struct io_kiocb *preq, struct io_kiocb *req)
8682{
8683 struct io_kiocb *link;
8684
8685 if (!(preq->flags & REQ_F_LINK_HEAD))
8686 return false;
8687
8688 list_for_each_entry(link, &preq->link_list, link_list) {
8689 if (link == req)
8690 return true;
8691 }
8692
8693 return false;
8694}
8695
8696/*
8697 * We're looking to cancel 'req' because it's holding on to our files, but
8698 * 'req' could be a link to another request. See if it is, and cancel that
8699 * parent request if so.
8700 */
8701static bool io_poll_remove_link(struct io_ring_ctx *ctx, struct io_kiocb *req)
8702{
8703 struct hlist_node *tmp;
8704 struct io_kiocb *preq;
8705 bool found = false;
8706 int i;
8707
8708 spin_lock_irq(&ctx->completion_lock);
8709 for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
8710 struct hlist_head *list;
8711
8712 list = &ctx->cancel_hash[i];
8713 hlist_for_each_entry_safe(preq, tmp, list, hash_node) {
8714 found = io_match_link(preq, req);
8715 if (found) {
8716 io_poll_remove_one(preq);
8717 break;
8718 }
8719 }
8720 }
8721 spin_unlock_irq(&ctx->completion_lock);
8722 return found;
8723}
8724
8725static bool io_timeout_remove_link(struct io_ring_ctx *ctx,
8726 struct io_kiocb *req)
8727{
8728 struct io_kiocb *preq;
8729 bool found = false;
8730
8731 spin_lock_irq(&ctx->completion_lock);
8732 list_for_each_entry(preq, &ctx->timeout_list, timeout.list) {
8733 found = io_match_link(preq, req);
8734 if (found) {
8735 __io_timeout_cancel(preq);
8736 break;
8737 }
8738 }
8739 spin_unlock_irq(&ctx->completion_lock);
8740 return found;
8741}
8742
Jens Axboeb711d4e2020-08-16 08:23:05 -07008743static bool io_cancel_link_cb(struct io_wq_work *work, void *data)
8744{
Pavel Begunkov9a472ef2020-11-05 22:31:37 +00008745 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
8746 bool ret;
8747
8748 if (req->flags & REQ_F_LINK_TIMEOUT) {
8749 unsigned long flags;
8750 struct io_ring_ctx *ctx = req->ctx;
8751
8752 /* protect against races with linked timeouts */
8753 spin_lock_irqsave(&ctx->completion_lock, flags);
8754 ret = io_match_link(req, data);
8755 spin_unlock_irqrestore(&ctx->completion_lock, flags);
8756 } else {
8757 ret = io_match_link(req, data);
8758 }
8759 return ret;
Jens Axboeb711d4e2020-08-16 08:23:05 -07008760}
8761
8762static void io_attempt_cancel(struct io_ring_ctx *ctx, struct io_kiocb *req)
8763{
8764 enum io_wq_cancel cret;
8765
8766 /* cancel this particular work, if it's running */
8767 cret = io_wq_cancel_work(ctx->io_wq, &req->work);
8768 if (cret != IO_WQ_CANCEL_NOTFOUND)
8769 return;
8770
8771 /* find links that hold this pending, cancel those */
8772 cret = io_wq_cancel_cb(ctx->io_wq, io_cancel_link_cb, req, true);
8773 if (cret != IO_WQ_CANCEL_NOTFOUND)
8774 return;
8775
8776 /* if we have a poll link holding this pending, cancel that */
8777 if (io_poll_remove_link(ctx, req))
8778 return;
8779
8780 /* final option, timeout link is holding this req pending */
8781 io_timeout_remove_link(ctx, req);
8782}
8783
Pavel Begunkovb7ddce32020-09-06 00:45:14 +03008784static void io_cancel_defer_files(struct io_ring_ctx *ctx,
Pavel Begunkovef9865a2020-11-05 14:06:19 +00008785 struct task_struct *task,
Pavel Begunkovb7ddce32020-09-06 00:45:14 +03008786 struct files_struct *files)
8787{
8788 struct io_defer_entry *de = NULL;
8789 LIST_HEAD(list);
8790
8791 spin_lock_irq(&ctx->completion_lock);
8792 list_for_each_entry_reverse(de, &ctx->defer_list, list) {
Pavel Begunkovef9865a2020-11-05 14:06:19 +00008793 if (io_task_match(de->req, task) &&
8794 io_match_files(de->req, files)) {
Pavel Begunkovb7ddce32020-09-06 00:45:14 +03008795 list_cut_position(&list, &ctx->defer_list, &de->list);
8796 break;
8797 }
8798 }
8799 spin_unlock_irq(&ctx->completion_lock);
8800
8801 while (!list_empty(&list)) {
8802 de = list_first_entry(&list, struct io_defer_entry, list);
8803 list_del_init(&de->list);
8804 req_set_fail_links(de->req);
8805 io_put_req(de->req);
8806 io_req_complete(de->req, -ECANCELED);
8807 kfree(de);
8808 }
8809}
8810
Jens Axboe76e1b642020-09-26 15:05:03 -06008811/*
8812 * Returns true if we found and killed one or more files pinning requests
8813 */
8814static bool io_uring_cancel_files(struct io_ring_ctx *ctx,
Jens Axboefcb323c2019-10-24 12:39:47 -06008815 struct files_struct *files)
8816{
Pavel Begunkov67c4d9e2020-06-15 10:24:05 +03008817 if (list_empty_careful(&ctx->inflight_list))
Jens Axboe76e1b642020-09-26 15:05:03 -06008818 return false;
Pavel Begunkov67c4d9e2020-06-15 10:24:05 +03008819
8820 /* cancel all at once, should be faster than doing it one by one*/
8821 io_wq_cancel_cb(ctx->io_wq, io_wq_files_match, files, true);
8822
Jens Axboefcb323c2019-10-24 12:39:47 -06008823 while (!list_empty_careful(&ctx->inflight_list)) {
Xiaoguang Wangd8f1b972020-04-26 15:54:43 +08008824 struct io_kiocb *cancel_req = NULL, *req;
8825 DEFINE_WAIT(wait);
Jens Axboefcb323c2019-10-24 12:39:47 -06008826
8827 spin_lock_irq(&ctx->inflight_lock);
8828 list_for_each_entry(req, &ctx->inflight_list, inflight_entry) {
Jens Axboedfead8a2020-10-14 10:12:37 -06008829 if (files && (req->work.flags & IO_WQ_WORK_FILES) &&
Jens Axboe98447d62020-10-14 10:48:51 -06008830 req->work.identity->files != files)
Jens Axboe768134d2019-11-10 20:30:53 -07008831 continue;
8832 /* req is being completed, ignore */
8833 if (!refcount_inc_not_zero(&req->refs))
8834 continue;
8835 cancel_req = req;
8836 break;
Jens Axboefcb323c2019-10-24 12:39:47 -06008837 }
Jens Axboe768134d2019-11-10 20:30:53 -07008838 if (cancel_req)
Jens Axboefcb323c2019-10-24 12:39:47 -06008839 prepare_to_wait(&ctx->inflight_wait, &wait,
Jens Axboe768134d2019-11-10 20:30:53 -07008840 TASK_UNINTERRUPTIBLE);
Jens Axboefcb323c2019-10-24 12:39:47 -06008841 spin_unlock_irq(&ctx->inflight_lock);
8842
Jens Axboe768134d2019-11-10 20:30:53 -07008843 /* We need to keep going until we don't find a matching req */
8844 if (!cancel_req)
Jens Axboefcb323c2019-10-24 12:39:47 -06008845 break;
Pavel Begunkovbb175342020-08-20 11:33:35 +03008846 /* cancel this request, or head link requests */
8847 io_attempt_cancel(ctx, cancel_req);
8848 io_put_req(cancel_req);
Jens Axboe6200b0a2020-09-13 14:38:30 -06008849 /* cancellations _may_ trigger task work */
8850 io_run_task_work();
Jens Axboefcb323c2019-10-24 12:39:47 -06008851 schedule();
Xiaoguang Wangd8f1b972020-04-26 15:54:43 +08008852 finish_wait(&ctx->inflight_wait, &wait);
Jens Axboefcb323c2019-10-24 12:39:47 -06008853 }
Jens Axboe76e1b642020-09-26 15:05:03 -06008854
8855 return true;
Jens Axboefcb323c2019-10-24 12:39:47 -06008856}
8857
Pavel Begunkov801dd572020-06-15 10:33:14 +03008858static bool io_cancel_task_cb(struct io_wq_work *work, void *data)
Pavel Begunkov44e728b2020-06-15 10:24:04 +03008859{
Pavel Begunkov801dd572020-06-15 10:33:14 +03008860 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
8861 struct task_struct *task = data;
Pavel Begunkov44e728b2020-06-15 10:24:04 +03008862
Jens Axboef3606e32020-09-22 08:18:24 -06008863 return io_task_match(req, task);
Pavel Begunkov44e728b2020-06-15 10:24:04 +03008864}
8865
Jens Axboe0f212202020-09-13 13:09:39 -06008866static bool __io_uring_cancel_task_requests(struct io_ring_ctx *ctx,
8867 struct task_struct *task,
8868 struct files_struct *files)
8869{
8870 bool ret;
8871
8872 ret = io_uring_cancel_files(ctx, files);
8873 if (!files) {
8874 enum io_wq_cancel cret;
8875
8876 cret = io_wq_cancel_cb(ctx->io_wq, io_cancel_task_cb, task, true);
8877 if (cret != IO_WQ_CANCEL_NOTFOUND)
8878 ret = true;
8879
8880 /* SQPOLL thread does its own polling */
8881 if (!(ctx->flags & IORING_SETUP_SQPOLL)) {
8882 while (!list_empty_careful(&ctx->iopoll_list)) {
8883 io_iopoll_try_reap_events(ctx);
8884 ret = true;
8885 }
8886 }
8887
8888 ret |= io_poll_remove_all(ctx, task);
8889 ret |= io_kill_timeouts(ctx, task);
8890 }
8891
8892 return ret;
8893}
8894
8895/*
8896 * We need to iteratively cancel requests, in case a request has dependent
8897 * hard links. These persist even for failure of cancelations, hence keep
8898 * looping until none are found.
8899 */
8900static void io_uring_cancel_task_requests(struct io_ring_ctx *ctx,
8901 struct files_struct *files)
8902{
8903 struct task_struct *task = current;
8904
Jens Axboefdaf0832020-10-30 09:37:30 -06008905 if ((ctx->flags & IORING_SETUP_SQPOLL) && ctx->sq_data) {
Jens Axboe534ca6d2020-09-02 13:52:19 -06008906 task = ctx->sq_data->thread;
Jens Axboefdaf0832020-10-30 09:37:30 -06008907 atomic_inc(&task->io_uring->in_idle);
8908 io_sq_thread_park(ctx->sq_data);
8909 }
Jens Axboe0f212202020-09-13 13:09:39 -06008910
Pavel Begunkovef9865a2020-11-05 14:06:19 +00008911 if (files)
8912 io_cancel_defer_files(ctx, NULL, files);
8913 else
8914 io_cancel_defer_files(ctx, task, NULL);
8915
Jens Axboe0f212202020-09-13 13:09:39 -06008916 io_cqring_overflow_flush(ctx, true, task, files);
8917
8918 while (__io_uring_cancel_task_requests(ctx, task, files)) {
8919 io_run_task_work();
8920 cond_resched();
8921 }
Jens Axboefdaf0832020-10-30 09:37:30 -06008922
8923 if ((ctx->flags & IORING_SETUP_SQPOLL) && ctx->sq_data) {
8924 atomic_dec(&task->io_uring->in_idle);
8925 /*
8926 * If the files that are going away are the ones in the thread
8927 * identity, clear them out.
8928 */
8929 if (task->io_uring->identity->files == files)
8930 task->io_uring->identity->files = NULL;
8931 io_sq_thread_unpark(ctx->sq_data);
8932 }
Jens Axboe0f212202020-09-13 13:09:39 -06008933}
8934
8935/*
8936 * Note that this task has used io_uring. We use it for cancelation purposes.
8937 */
Jens Axboefdaf0832020-10-30 09:37:30 -06008938static int io_uring_add_task_file(struct io_ring_ctx *ctx, struct file *file)
Jens Axboe0f212202020-09-13 13:09:39 -06008939{
Matthew Wilcox (Oracle)236434c2020-10-09 13:49:52 +01008940 struct io_uring_task *tctx = current->io_uring;
8941
8942 if (unlikely(!tctx)) {
Jens Axboe0f212202020-09-13 13:09:39 -06008943 int ret;
8944
8945 ret = io_uring_alloc_task_context(current);
8946 if (unlikely(ret))
8947 return ret;
Matthew Wilcox (Oracle)236434c2020-10-09 13:49:52 +01008948 tctx = current->io_uring;
Jens Axboe0f212202020-09-13 13:09:39 -06008949 }
Matthew Wilcox (Oracle)236434c2020-10-09 13:49:52 +01008950 if (tctx->last != file) {
8951 void *old = xa_load(&tctx->xa, (unsigned long)file);
Jens Axboe0f212202020-09-13 13:09:39 -06008952
Matthew Wilcox (Oracle)236434c2020-10-09 13:49:52 +01008953 if (!old) {
Jens Axboe0f212202020-09-13 13:09:39 -06008954 get_file(file);
Matthew Wilcox (Oracle)236434c2020-10-09 13:49:52 +01008955 xa_store(&tctx->xa, (unsigned long)file, file, GFP_KERNEL);
Jens Axboe0f212202020-09-13 13:09:39 -06008956 }
Matthew Wilcox (Oracle)236434c2020-10-09 13:49:52 +01008957 tctx->last = file;
Jens Axboe0f212202020-09-13 13:09:39 -06008958 }
8959
Jens Axboefdaf0832020-10-30 09:37:30 -06008960 /*
8961 * This is race safe in that the task itself is doing this, hence it
8962 * cannot be going through the exit/cancel paths at the same time.
8963 * This cannot be modified while exit/cancel is running.
8964 */
8965 if (!tctx->sqpoll && (ctx->flags & IORING_SETUP_SQPOLL))
8966 tctx->sqpoll = true;
8967
Jens Axboe0f212202020-09-13 13:09:39 -06008968 return 0;
8969}
8970
8971/*
8972 * Remove this io_uring_file -> task mapping.
8973 */
8974static void io_uring_del_task_file(struct file *file)
8975{
8976 struct io_uring_task *tctx = current->io_uring;
Jens Axboe0f212202020-09-13 13:09:39 -06008977
8978 if (tctx->last == file)
8979 tctx->last = NULL;
Matthew Wilcox (Oracle)5e2ed8c2020-10-09 13:49:53 +01008980 file = xa_erase(&tctx->xa, (unsigned long)file);
Jens Axboe0f212202020-09-13 13:09:39 -06008981 if (file)
8982 fput(file);
8983}
8984
Jens Axboe0f212202020-09-13 13:09:39 -06008985/*
8986 * Drop task note for this file if we're the only ones that hold it after
8987 * pending fput()
8988 */
Pavel Begunkovc8fb20b2020-10-22 16:38:27 +01008989static void io_uring_attempt_task_drop(struct file *file)
Jens Axboe0f212202020-09-13 13:09:39 -06008990{
8991 if (!current->io_uring)
8992 return;
8993 /*
8994 * fput() is pending, will be 2 if the only other ref is our potential
8995 * task file note. If the task is exiting, drop regardless of count.
8996 */
Pavel Begunkovc8fb20b2020-10-22 16:38:27 +01008997 if (fatal_signal_pending(current) || (current->flags & PF_EXITING) ||
8998 atomic_long_read(&file->f_count) == 2)
8999 io_uring_del_task_file(file);
Jens Axboe0f212202020-09-13 13:09:39 -06009000}
9001
9002void __io_uring_files_cancel(struct files_struct *files)
9003{
9004 struct io_uring_task *tctx = current->io_uring;
Matthew Wilcox (Oracle)ce765372020-10-09 13:49:51 +01009005 struct file *file;
9006 unsigned long index;
Jens Axboe0f212202020-09-13 13:09:39 -06009007
9008 /* make sure overflow events are dropped */
Jens Axboefdaf0832020-10-30 09:37:30 -06009009 atomic_inc(&tctx->in_idle);
Jens Axboe0f212202020-09-13 13:09:39 -06009010
Matthew Wilcox (Oracle)ce765372020-10-09 13:49:51 +01009011 xa_for_each(&tctx->xa, index, file) {
9012 struct io_ring_ctx *ctx = file->private_data;
Jens Axboe0f212202020-09-13 13:09:39 -06009013
9014 io_uring_cancel_task_requests(ctx, files);
9015 if (files)
9016 io_uring_del_task_file(file);
Matthew Wilcox (Oracle)ce765372020-10-09 13:49:51 +01009017 }
Jens Axboefdaf0832020-10-30 09:37:30 -06009018
9019 atomic_dec(&tctx->in_idle);
9020}
9021
9022static s64 tctx_inflight(struct io_uring_task *tctx)
9023{
9024 unsigned long index;
9025 struct file *file;
9026 s64 inflight;
9027
9028 inflight = percpu_counter_sum(&tctx->inflight);
9029 if (!tctx->sqpoll)
9030 return inflight;
9031
9032 /*
9033 * If we have SQPOLL rings, then we need to iterate and find them, and
9034 * add the pending count for those.
9035 */
9036 xa_for_each(&tctx->xa, index, file) {
9037 struct io_ring_ctx *ctx = file->private_data;
9038
9039 if (ctx->flags & IORING_SETUP_SQPOLL) {
9040 struct io_uring_task *__tctx = ctx->sqo_task->io_uring;
9041
9042 inflight += percpu_counter_sum(&__tctx->inflight);
9043 }
9044 }
9045
9046 return inflight;
Jens Axboe0f212202020-09-13 13:09:39 -06009047}
9048
Jens Axboe0f212202020-09-13 13:09:39 -06009049/*
9050 * Find any io_uring fd that this task has registered or done IO on, and cancel
9051 * requests.
9052 */
9053void __io_uring_task_cancel(void)
9054{
9055 struct io_uring_task *tctx = current->io_uring;
9056 DEFINE_WAIT(wait);
Jens Axboed8a6df12020-10-15 16:24:45 -06009057 s64 inflight;
Jens Axboe0f212202020-09-13 13:09:39 -06009058
9059 /* make sure overflow events are dropped */
Jens Axboefdaf0832020-10-30 09:37:30 -06009060 atomic_inc(&tctx->in_idle);
Jens Axboe0f212202020-09-13 13:09:39 -06009061
Jens Axboed8a6df12020-10-15 16:24:45 -06009062 do {
Jens Axboe0f212202020-09-13 13:09:39 -06009063 /* read completions before cancelations */
Jens Axboefdaf0832020-10-30 09:37:30 -06009064 inflight = tctx_inflight(tctx);
Jens Axboed8a6df12020-10-15 16:24:45 -06009065 if (!inflight)
9066 break;
Jens Axboe0f212202020-09-13 13:09:39 -06009067 __io_uring_files_cancel(NULL);
9068
9069 prepare_to_wait(&tctx->wait, &wait, TASK_UNINTERRUPTIBLE);
9070
9071 /*
9072 * If we've seen completions, retry. This avoids a race where
9073 * a completion comes in before we did prepare_to_wait().
9074 */
Jens Axboefdaf0832020-10-30 09:37:30 -06009075 if (inflight != tctx_inflight(tctx))
Jens Axboe0f212202020-09-13 13:09:39 -06009076 continue;
Jens Axboe0f212202020-09-13 13:09:39 -06009077 schedule();
Jens Axboed8a6df12020-10-15 16:24:45 -06009078 } while (1);
Jens Axboe0f212202020-09-13 13:09:39 -06009079
9080 finish_wait(&tctx->wait, &wait);
Jens Axboefdaf0832020-10-30 09:37:30 -06009081 atomic_dec(&tctx->in_idle);
Jens Axboefcb323c2019-10-24 12:39:47 -06009082}
9083
9084static int io_uring_flush(struct file *file, void *data)
9085{
Pavel Begunkovc8fb20b2020-10-22 16:38:27 +01009086 io_uring_attempt_task_drop(file);
Jens Axboefcb323c2019-10-24 12:39:47 -06009087 return 0;
9088}
9089
Roman Penyaev6c5c2402019-11-28 12:53:22 +01009090static void *io_uring_validate_mmap_request(struct file *file,
9091 loff_t pgoff, size_t sz)
Jens Axboe2b188cc2019-01-07 10:46:33 -07009092{
Jens Axboe2b188cc2019-01-07 10:46:33 -07009093 struct io_ring_ctx *ctx = file->private_data;
Roman Penyaev6c5c2402019-11-28 12:53:22 +01009094 loff_t offset = pgoff << PAGE_SHIFT;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009095 struct page *page;
9096 void *ptr;
9097
9098 switch (offset) {
9099 case IORING_OFF_SQ_RING:
Hristo Venev75b28af2019-08-26 17:23:46 +00009100 case IORING_OFF_CQ_RING:
9101 ptr = ctx->rings;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009102 break;
9103 case IORING_OFF_SQES:
9104 ptr = ctx->sq_sqes;
9105 break;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009106 default:
Roman Penyaev6c5c2402019-11-28 12:53:22 +01009107 return ERR_PTR(-EINVAL);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009108 }
9109
9110 page = virt_to_head_page(ptr);
Matthew Wilcox (Oracle)a50b8542019-09-23 15:34:25 -07009111 if (sz > page_size(page))
Roman Penyaev6c5c2402019-11-28 12:53:22 +01009112 return ERR_PTR(-EINVAL);
9113
9114 return ptr;
9115}
9116
9117#ifdef CONFIG_MMU
9118
9119static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
9120{
9121 size_t sz = vma->vm_end - vma->vm_start;
9122 unsigned long pfn;
9123 void *ptr;
9124
9125 ptr = io_uring_validate_mmap_request(file, vma->vm_pgoff, sz);
9126 if (IS_ERR(ptr))
9127 return PTR_ERR(ptr);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009128
9129 pfn = virt_to_phys(ptr) >> PAGE_SHIFT;
9130 return remap_pfn_range(vma, vma->vm_start, pfn, sz, vma->vm_page_prot);
9131}
9132
Roman Penyaev6c5c2402019-11-28 12:53:22 +01009133#else /* !CONFIG_MMU */
9134
9135static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
9136{
9137 return vma->vm_flags & (VM_SHARED | VM_MAYSHARE) ? 0 : -EINVAL;
9138}
9139
9140static unsigned int io_uring_nommu_mmap_capabilities(struct file *file)
9141{
9142 return NOMMU_MAP_DIRECT | NOMMU_MAP_READ | NOMMU_MAP_WRITE;
9143}
9144
9145static unsigned long io_uring_nommu_get_unmapped_area(struct file *file,
9146 unsigned long addr, unsigned long len,
9147 unsigned long pgoff, unsigned long flags)
9148{
9149 void *ptr;
9150
9151 ptr = io_uring_validate_mmap_request(file, pgoff, len);
9152 if (IS_ERR(ptr))
9153 return PTR_ERR(ptr);
9154
9155 return (unsigned long) ptr;
9156}
9157
9158#endif /* !CONFIG_MMU */
9159
Jens Axboe90554202020-09-03 12:12:41 -06009160static void io_sqpoll_wait_sq(struct io_ring_ctx *ctx)
9161{
9162 DEFINE_WAIT(wait);
9163
9164 do {
9165 if (!io_sqring_full(ctx))
9166 break;
9167
9168 prepare_to_wait(&ctx->sqo_sq_wait, &wait, TASK_INTERRUPTIBLE);
9169
9170 if (!io_sqring_full(ctx))
9171 break;
9172
9173 schedule();
9174 } while (!signal_pending(current));
9175
9176 finish_wait(&ctx->sqo_sq_wait, &wait);
9177}
9178
Jens Axboe2b188cc2019-01-07 10:46:33 -07009179SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
9180 u32, min_complete, u32, flags, const sigset_t __user *, sig,
9181 size_t, sigsz)
9182{
9183 struct io_ring_ctx *ctx;
9184 long ret = -EBADF;
9185 int submitted = 0;
9186 struct fd f;
9187
Jens Axboe4c6e2772020-07-01 11:29:10 -06009188 io_run_task_work();
Jens Axboeb41e9852020-02-17 09:52:41 -07009189
Jens Axboe90554202020-09-03 12:12:41 -06009190 if (flags & ~(IORING_ENTER_GETEVENTS | IORING_ENTER_SQ_WAKEUP |
9191 IORING_ENTER_SQ_WAIT))
Jens Axboe2b188cc2019-01-07 10:46:33 -07009192 return -EINVAL;
9193
9194 f = fdget(fd);
9195 if (!f.file)
9196 return -EBADF;
9197
9198 ret = -EOPNOTSUPP;
9199 if (f.file->f_op != &io_uring_fops)
9200 goto out_fput;
9201
9202 ret = -ENXIO;
9203 ctx = f.file->private_data;
9204 if (!percpu_ref_tryget(&ctx->refs))
9205 goto out_fput;
9206
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02009207 ret = -EBADFD;
9208 if (ctx->flags & IORING_SETUP_R_DISABLED)
9209 goto out;
9210
Jens Axboe6c271ce2019-01-10 11:22:30 -07009211 /*
9212 * For SQ polling, the thread will do all submissions and completions.
9213 * Just return the requested submit count, and wake the thread if
9214 * we were asked to.
9215 */
Jens Axboeb2a9ead2019-09-12 14:19:16 -06009216 ret = 0;
Jens Axboe6c271ce2019-01-10 11:22:30 -07009217 if (ctx->flags & IORING_SETUP_SQPOLL) {
Jens Axboec1edbf52019-11-10 16:56:04 -07009218 if (!list_empty_careful(&ctx->cq_overflow_list))
Jens Axboee6c8aa92020-09-28 13:10:13 -06009219 io_cqring_overflow_flush(ctx, false, NULL, NULL);
Jens Axboe6c271ce2019-01-10 11:22:30 -07009220 if (flags & IORING_ENTER_SQ_WAKEUP)
Jens Axboe534ca6d2020-09-02 13:52:19 -06009221 wake_up(&ctx->sq_data->wait);
Jens Axboe90554202020-09-03 12:12:41 -06009222 if (flags & IORING_ENTER_SQ_WAIT)
9223 io_sqpoll_wait_sq(ctx);
Jens Axboe6c271ce2019-01-10 11:22:30 -07009224 submitted = to_submit;
Jens Axboeb2a9ead2019-09-12 14:19:16 -06009225 } else if (to_submit) {
Jens Axboefdaf0832020-10-30 09:37:30 -06009226 ret = io_uring_add_task_file(ctx, f.file);
Jens Axboe0f212202020-09-13 13:09:39 -06009227 if (unlikely(ret))
9228 goto out;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009229 mutex_lock(&ctx->uring_lock);
Jens Axboe0f212202020-09-13 13:09:39 -06009230 submitted = io_submit_sqes(ctx, to_submit);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009231 mutex_unlock(&ctx->uring_lock);
Pavel Begunkov7c504e652019-12-18 19:53:45 +03009232
9233 if (submitted != to_submit)
9234 goto out;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009235 }
9236 if (flags & IORING_ENTER_GETEVENTS) {
9237 min_complete = min(min_complete, ctx->cq_entries);
9238
Xiaoguang Wang32b22442020-03-11 09:26:09 +08009239 /*
9240 * When SETUP_IOPOLL and SETUP_SQPOLL are both enabled, user
9241 * space applications don't need to do io completion events
9242 * polling again, they can rely on io_sq_thread to do polling
9243 * work, which can reduce cpu usage and uring_lock contention.
9244 */
9245 if (ctx->flags & IORING_SETUP_IOPOLL &&
9246 !(ctx->flags & IORING_SETUP_SQPOLL)) {
Pavel Begunkov7668b922020-07-07 16:36:21 +03009247 ret = io_iopoll_check(ctx, min_complete);
Jens Axboedef596e2019-01-09 08:59:42 -07009248 } else {
9249 ret = io_cqring_wait(ctx, min_complete, sig, sigsz);
9250 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07009251 }
9252
Pavel Begunkov7c504e652019-12-18 19:53:45 +03009253out:
Pavel Begunkov6805b322019-10-08 02:18:42 +03009254 percpu_ref_put(&ctx->refs);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009255out_fput:
9256 fdput(f);
9257 return submitted ? submitted : ret;
9258}
9259
Tobias Klauserbebdb652020-02-26 18:38:32 +01009260#ifdef CONFIG_PROC_FS
Jens Axboe87ce9552020-01-30 08:25:34 -07009261static int io_uring_show_cred(int id, void *p, void *data)
9262{
Jens Axboe6b47ab82020-11-05 09:50:16 -07009263 struct io_identity *iod = p;
9264 const struct cred *cred = iod->creds;
Jens Axboe87ce9552020-01-30 08:25:34 -07009265 struct seq_file *m = data;
9266 struct user_namespace *uns = seq_user_ns(m);
9267 struct group_info *gi;
9268 kernel_cap_t cap;
9269 unsigned __capi;
9270 int g;
9271
9272 seq_printf(m, "%5d\n", id);
9273 seq_put_decimal_ull(m, "\tUid:\t", from_kuid_munged(uns, cred->uid));
9274 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->euid));
9275 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->suid));
9276 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->fsuid));
9277 seq_put_decimal_ull(m, "\n\tGid:\t", from_kgid_munged(uns, cred->gid));
9278 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->egid));
9279 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->sgid));
9280 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->fsgid));
9281 seq_puts(m, "\n\tGroups:\t");
9282 gi = cred->group_info;
9283 for (g = 0; g < gi->ngroups; g++) {
9284 seq_put_decimal_ull(m, g ? " " : "",
9285 from_kgid_munged(uns, gi->gid[g]));
9286 }
9287 seq_puts(m, "\n\tCapEff:\t");
9288 cap = cred->cap_effective;
9289 CAP_FOR_EACH_U32(__capi)
9290 seq_put_hex_ll(m, NULL, cap.cap[CAP_LAST_U32 - __capi], 8);
9291 seq_putc(m, '\n');
9292 return 0;
9293}
9294
9295static void __io_uring_show_fdinfo(struct io_ring_ctx *ctx, struct seq_file *m)
9296{
Joseph Qidbbe9c62020-09-29 09:01:22 -06009297 struct io_sq_data *sq = NULL;
Jens Axboefad8e0d2020-09-28 08:57:48 -06009298 bool has_lock;
Jens Axboe87ce9552020-01-30 08:25:34 -07009299 int i;
9300
Jens Axboefad8e0d2020-09-28 08:57:48 -06009301 /*
9302 * Avoid ABBA deadlock between the seq lock and the io_uring mutex,
9303 * since fdinfo case grabs it in the opposite direction of normal use
9304 * cases. If we fail to get the lock, we just don't iterate any
9305 * structures that could be going away outside the io_uring mutex.
9306 */
9307 has_lock = mutex_trylock(&ctx->uring_lock);
9308
Joseph Qidbbe9c62020-09-29 09:01:22 -06009309 if (has_lock && (ctx->flags & IORING_SETUP_SQPOLL))
9310 sq = ctx->sq_data;
9311
9312 seq_printf(m, "SqThread:\t%d\n", sq ? task_pid_nr(sq->thread) : -1);
9313 seq_printf(m, "SqThreadCpu:\t%d\n", sq ? task_cpu(sq->thread) : -1);
Jens Axboe87ce9552020-01-30 08:25:34 -07009314 seq_printf(m, "UserFiles:\t%u\n", ctx->nr_user_files);
Jens Axboefad8e0d2020-09-28 08:57:48 -06009315 for (i = 0; has_lock && i < ctx->nr_user_files; i++) {
Jens Axboe87ce9552020-01-30 08:25:34 -07009316 struct fixed_file_table *table;
9317 struct file *f;
9318
9319 table = &ctx->file_data->table[i >> IORING_FILE_TABLE_SHIFT];
9320 f = table->files[i & IORING_FILE_TABLE_MASK];
9321 if (f)
9322 seq_printf(m, "%5u: %s\n", i, file_dentry(f)->d_iname);
9323 else
9324 seq_printf(m, "%5u: <none>\n", i);
9325 }
9326 seq_printf(m, "UserBufs:\t%u\n", ctx->nr_user_bufs);
Jens Axboefad8e0d2020-09-28 08:57:48 -06009327 for (i = 0; has_lock && i < ctx->nr_user_bufs; i++) {
Jens Axboe87ce9552020-01-30 08:25:34 -07009328 struct io_mapped_ubuf *buf = &ctx->user_bufs[i];
9329
9330 seq_printf(m, "%5u: 0x%llx/%u\n", i, buf->ubuf,
9331 (unsigned int) buf->len);
9332 }
Jens Axboefad8e0d2020-09-28 08:57:48 -06009333 if (has_lock && !idr_is_empty(&ctx->personality_idr)) {
Jens Axboe87ce9552020-01-30 08:25:34 -07009334 seq_printf(m, "Personalities:\n");
9335 idr_for_each(&ctx->personality_idr, io_uring_show_cred, m);
9336 }
Jens Axboed7718a92020-02-14 22:23:12 -07009337 seq_printf(m, "PollList:\n");
9338 spin_lock_irq(&ctx->completion_lock);
9339 for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
9340 struct hlist_head *list = &ctx->cancel_hash[i];
9341 struct io_kiocb *req;
9342
9343 hlist_for_each_entry(req, list, hash_node)
9344 seq_printf(m, " op=%d, task_works=%d\n", req->opcode,
9345 req->task->task_works != NULL);
9346 }
9347 spin_unlock_irq(&ctx->completion_lock);
Jens Axboefad8e0d2020-09-28 08:57:48 -06009348 if (has_lock)
9349 mutex_unlock(&ctx->uring_lock);
Jens Axboe87ce9552020-01-30 08:25:34 -07009350}
9351
9352static void io_uring_show_fdinfo(struct seq_file *m, struct file *f)
9353{
9354 struct io_ring_ctx *ctx = f->private_data;
9355
9356 if (percpu_ref_tryget(&ctx->refs)) {
9357 __io_uring_show_fdinfo(ctx, m);
9358 percpu_ref_put(&ctx->refs);
9359 }
9360}
Tobias Klauserbebdb652020-02-26 18:38:32 +01009361#endif
Jens Axboe87ce9552020-01-30 08:25:34 -07009362
Jens Axboe2b188cc2019-01-07 10:46:33 -07009363static const struct file_operations io_uring_fops = {
9364 .release = io_uring_release,
Jens Axboefcb323c2019-10-24 12:39:47 -06009365 .flush = io_uring_flush,
Jens Axboe2b188cc2019-01-07 10:46:33 -07009366 .mmap = io_uring_mmap,
Roman Penyaev6c5c2402019-11-28 12:53:22 +01009367#ifndef CONFIG_MMU
9368 .get_unmapped_area = io_uring_nommu_get_unmapped_area,
9369 .mmap_capabilities = io_uring_nommu_mmap_capabilities,
9370#endif
Jens Axboe2b188cc2019-01-07 10:46:33 -07009371 .poll = io_uring_poll,
9372 .fasync = io_uring_fasync,
Tobias Klauserbebdb652020-02-26 18:38:32 +01009373#ifdef CONFIG_PROC_FS
Jens Axboe87ce9552020-01-30 08:25:34 -07009374 .show_fdinfo = io_uring_show_fdinfo,
Tobias Klauserbebdb652020-02-26 18:38:32 +01009375#endif
Jens Axboe2b188cc2019-01-07 10:46:33 -07009376};
9377
9378static int io_allocate_scq_urings(struct io_ring_ctx *ctx,
9379 struct io_uring_params *p)
9380{
Hristo Venev75b28af2019-08-26 17:23:46 +00009381 struct io_rings *rings;
9382 size_t size, sq_array_offset;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009383
Jens Axboebd740482020-08-05 12:58:23 -06009384 /* make sure these are sane, as we already accounted them */
9385 ctx->sq_entries = p->sq_entries;
9386 ctx->cq_entries = p->cq_entries;
9387
Hristo Venev75b28af2019-08-26 17:23:46 +00009388 size = rings_size(p->sq_entries, p->cq_entries, &sq_array_offset);
9389 if (size == SIZE_MAX)
9390 return -EOVERFLOW;
9391
9392 rings = io_mem_alloc(size);
9393 if (!rings)
Jens Axboe2b188cc2019-01-07 10:46:33 -07009394 return -ENOMEM;
9395
Hristo Venev75b28af2019-08-26 17:23:46 +00009396 ctx->rings = rings;
9397 ctx->sq_array = (u32 *)((char *)rings + sq_array_offset);
9398 rings->sq_ring_mask = p->sq_entries - 1;
9399 rings->cq_ring_mask = p->cq_entries - 1;
9400 rings->sq_ring_entries = p->sq_entries;
9401 rings->cq_ring_entries = p->cq_entries;
9402 ctx->sq_mask = rings->sq_ring_mask;
9403 ctx->cq_mask = rings->cq_ring_mask;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009404
9405 size = array_size(sizeof(struct io_uring_sqe), p->sq_entries);
Jens Axboeeb065d32019-11-20 09:26:29 -07009406 if (size == SIZE_MAX) {
9407 io_mem_free(ctx->rings);
9408 ctx->rings = NULL;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009409 return -EOVERFLOW;
Jens Axboeeb065d32019-11-20 09:26:29 -07009410 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07009411
9412 ctx->sq_sqes = io_mem_alloc(size);
Jens Axboeeb065d32019-11-20 09:26:29 -07009413 if (!ctx->sq_sqes) {
9414 io_mem_free(ctx->rings);
9415 ctx->rings = NULL;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009416 return -ENOMEM;
Jens Axboeeb065d32019-11-20 09:26:29 -07009417 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07009418
Jens Axboe2b188cc2019-01-07 10:46:33 -07009419 return 0;
9420}
9421
9422/*
9423 * Allocate an anonymous fd, this is what constitutes the application
9424 * visible backing of an io_uring instance. The application mmaps this
9425 * fd to gain access to the SQ/CQ ring details. If UNIX sockets are enabled,
9426 * we have to tie this fd to a socket for file garbage collection purposes.
9427 */
9428static int io_uring_get_fd(struct io_ring_ctx *ctx)
9429{
9430 struct file *file;
9431 int ret;
9432
9433#if defined(CONFIG_UNIX)
9434 ret = sock_create_kern(&init_net, PF_UNIX, SOCK_RAW, IPPROTO_IP,
9435 &ctx->ring_sock);
9436 if (ret)
9437 return ret;
9438#endif
9439
9440 ret = get_unused_fd_flags(O_RDWR | O_CLOEXEC);
9441 if (ret < 0)
9442 goto err;
9443
9444 file = anon_inode_getfile("[io_uring]", &io_uring_fops, ctx,
9445 O_RDWR | O_CLOEXEC);
9446 if (IS_ERR(file)) {
Jens Axboe0f212202020-09-13 13:09:39 -06009447err_fd:
Jens Axboe2b188cc2019-01-07 10:46:33 -07009448 put_unused_fd(ret);
9449 ret = PTR_ERR(file);
9450 goto err;
9451 }
9452
9453#if defined(CONFIG_UNIX)
9454 ctx->ring_sock->file = file;
9455#endif
Jens Axboefdaf0832020-10-30 09:37:30 -06009456 if (unlikely(io_uring_add_task_file(ctx, file))) {
Jens Axboe0f212202020-09-13 13:09:39 -06009457 file = ERR_PTR(-ENOMEM);
9458 goto err_fd;
9459 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07009460 fd_install(ret, file);
9461 return ret;
9462err:
9463#if defined(CONFIG_UNIX)
9464 sock_release(ctx->ring_sock);
9465 ctx->ring_sock = NULL;
9466#endif
9467 return ret;
9468}
9469
Xiaoguang Wang7f136572020-05-05 16:28:53 +08009470static int io_uring_create(unsigned entries, struct io_uring_params *p,
9471 struct io_uring_params __user *params)
Jens Axboe2b188cc2019-01-07 10:46:33 -07009472{
9473 struct user_struct *user = NULL;
9474 struct io_ring_ctx *ctx;
Bijan Mottahedehaad5d8d2020-06-16 16:36:08 -07009475 bool limit_mem;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009476 int ret;
9477
Jens Axboe8110c1a2019-12-28 15:39:54 -07009478 if (!entries)
Jens Axboe2b188cc2019-01-07 10:46:33 -07009479 return -EINVAL;
Jens Axboe8110c1a2019-12-28 15:39:54 -07009480 if (entries > IORING_MAX_ENTRIES) {
9481 if (!(p->flags & IORING_SETUP_CLAMP))
9482 return -EINVAL;
9483 entries = IORING_MAX_ENTRIES;
9484 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07009485
9486 /*
9487 * Use twice as many entries for the CQ ring. It's possible for the
9488 * application to drive a higher depth than the size of the SQ ring,
9489 * since the sqes are only used at submission time. This allows for
Jens Axboe33a107f2019-10-04 12:10:03 -06009490 * some flexibility in overcommitting a bit. If the application has
9491 * set IORING_SETUP_CQSIZE, it will have passed in the desired number
9492 * of CQ ring entries manually.
Jens Axboe2b188cc2019-01-07 10:46:33 -07009493 */
9494 p->sq_entries = roundup_pow_of_two(entries);
Jens Axboe33a107f2019-10-04 12:10:03 -06009495 if (p->flags & IORING_SETUP_CQSIZE) {
9496 /*
9497 * If IORING_SETUP_CQSIZE is set, we do the same roundup
9498 * to a power-of-two, if it isn't already. We do NOT impose
9499 * any cq vs sq ring sizing.
9500 */
Jens Axboe88ec3212020-11-11 10:38:53 -07009501 p->cq_entries = roundup_pow_of_two(p->cq_entries);
Jens Axboe8110c1a2019-12-28 15:39:54 -07009502 if (p->cq_entries < p->sq_entries)
Jens Axboe33a107f2019-10-04 12:10:03 -06009503 return -EINVAL;
Jens Axboe8110c1a2019-12-28 15:39:54 -07009504 if (p->cq_entries > IORING_MAX_CQ_ENTRIES) {
9505 if (!(p->flags & IORING_SETUP_CLAMP))
9506 return -EINVAL;
9507 p->cq_entries = IORING_MAX_CQ_ENTRIES;
9508 }
Jens Axboe33a107f2019-10-04 12:10:03 -06009509 } else {
9510 p->cq_entries = 2 * p->sq_entries;
9511 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07009512
9513 user = get_uid(current_user());
Bijan Mottahedehaad5d8d2020-06-16 16:36:08 -07009514 limit_mem = !capable(CAP_IPC_LOCK);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009515
Bijan Mottahedehaad5d8d2020-06-16 16:36:08 -07009516 if (limit_mem) {
Bijan Mottahedeha087e2b2020-06-16 16:36:07 -07009517 ret = __io_account_mem(user,
Jens Axboe2b188cc2019-01-07 10:46:33 -07009518 ring_pages(p->sq_entries, p->cq_entries));
9519 if (ret) {
9520 free_uid(user);
9521 return ret;
9522 }
9523 }
9524
9525 ctx = io_ring_ctx_alloc(p);
9526 if (!ctx) {
Bijan Mottahedehaad5d8d2020-06-16 16:36:08 -07009527 if (limit_mem)
Bijan Mottahedeha087e2b2020-06-16 16:36:07 -07009528 __io_unaccount_mem(user, ring_pages(p->sq_entries,
Jens Axboe2b188cc2019-01-07 10:46:33 -07009529 p->cq_entries));
9530 free_uid(user);
9531 return -ENOMEM;
9532 }
9533 ctx->compat = in_compat_syscall();
Jens Axboe2b188cc2019-01-07 10:46:33 -07009534 ctx->user = user;
Jens Axboe0b8c0ec2019-12-02 08:50:00 -07009535 ctx->creds = get_current_cred();
Jens Axboe4ea33a92020-10-15 13:46:44 -06009536#ifdef CONFIG_AUDIT
9537 ctx->loginuid = current->loginuid;
9538 ctx->sessionid = current->sessionid;
9539#endif
Jens Axboe2aede0e2020-09-14 10:45:53 -06009540 ctx->sqo_task = get_task_struct(current);
9541
9542 /*
9543 * This is just grabbed for accounting purposes. When a process exits,
9544 * the mm is exited and dropped before the files, hence we need to hang
9545 * on to this mm purely for the purposes of being able to unaccount
9546 * memory (locked/pinned vm). It's not used for anything else.
9547 */
Jens Axboe6b7898e2020-08-25 07:58:00 -06009548 mmgrab(current->mm);
Jens Axboe2aede0e2020-09-14 10:45:53 -06009549 ctx->mm_account = current->mm;
Jens Axboe6b7898e2020-08-25 07:58:00 -06009550
Dennis Zhou91d8f512020-09-16 13:41:05 -07009551#ifdef CONFIG_BLK_CGROUP
9552 /*
9553 * The sq thread will belong to the original cgroup it was inited in.
9554 * If the cgroup goes offline (e.g. disabling the io controller), then
9555 * issued bios will be associated with the closest cgroup later in the
9556 * block layer.
9557 */
9558 rcu_read_lock();
9559 ctx->sqo_blkcg_css = blkcg_css();
9560 ret = css_tryget_online(ctx->sqo_blkcg_css);
9561 rcu_read_unlock();
9562 if (!ret) {
9563 /* don't init against a dying cgroup, have the user try again */
9564 ctx->sqo_blkcg_css = NULL;
9565 ret = -ENODEV;
9566 goto err;
9567 }
9568#endif
Jens Axboe6c271ce2019-01-10 11:22:30 -07009569
Jens Axboe2b188cc2019-01-07 10:46:33 -07009570 /*
9571 * Account memory _before_ installing the file descriptor. Once
9572 * the descriptor is installed, it can get closed at any time. Also
Jens Axboe2b188cc2019-01-07 10:46:33 -07009573 * do this before hitting the general error path, as ring freeing
Hristo Venev75b28af2019-08-26 17:23:46 +00009574 * will un-account as well.
9575 */
9576 io_account_mem(ctx, ring_pages(p->sq_entries, p->cq_entries),
9577 ACCT_LOCKED);
9578 ctx->limit_mem = limit_mem;
9579
9580 ret = io_allocate_scq_urings(ctx, p);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009581 if (ret)
9582 goto err;
Hristo Venev75b28af2019-08-26 17:23:46 +00009583
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02009584 ret = io_sq_offload_create(ctx, p);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009585 if (ret)
9586 goto err;
9587
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02009588 if (!(p->flags & IORING_SETUP_R_DISABLED))
9589 io_sq_offload_start(ctx);
9590
Jens Axboe2b188cc2019-01-07 10:46:33 -07009591 memset(&p->sq_off, 0, sizeof(p->sq_off));
9592 p->sq_off.head = offsetof(struct io_rings, sq.head);
9593 p->sq_off.tail = offsetof(struct io_rings, sq.tail);
9594 p->sq_off.ring_mask = offsetof(struct io_rings, sq_ring_mask);
9595 p->sq_off.ring_entries = offsetof(struct io_rings, sq_ring_entries);
9596 p->sq_off.flags = offsetof(struct io_rings, sq_flags);
9597 p->sq_off.dropped = offsetof(struct io_rings, sq_dropped);
9598 p->sq_off.array = (char *)ctx->sq_array - (char *)ctx->rings;
9599
9600 memset(&p->cq_off, 0, sizeof(p->cq_off));
Hristo Venev75b28af2019-08-26 17:23:46 +00009601 p->cq_off.head = offsetof(struct io_rings, cq.head);
9602 p->cq_off.tail = offsetof(struct io_rings, cq.tail);
9603 p->cq_off.ring_mask = offsetof(struct io_rings, cq_ring_mask);
9604 p->cq_off.ring_entries = offsetof(struct io_rings, cq_ring_entries);
9605 p->cq_off.overflow = offsetof(struct io_rings, cq_overflow);
9606 p->cq_off.cqes = offsetof(struct io_rings, cqes);
Stefano Garzarella0d9b5b32020-05-15 18:38:04 +02009607 p->cq_off.flags = offsetof(struct io_rings, cq_flags);
Jens Axboeac90f242019-09-06 10:26:21 -06009608
Xiaoguang Wang7f136572020-05-05 16:28:53 +08009609 p->features = IORING_FEAT_SINGLE_MMAP | IORING_FEAT_NODROP |
9610 IORING_FEAT_SUBMIT_STABLE | IORING_FEAT_RW_CUR_POS |
Jiufei Xue5769a352020-06-17 17:53:55 +08009611 IORING_FEAT_CUR_PERSONALITY | IORING_FEAT_FAST_POLL |
Jens Axboe28cea78a2020-09-14 10:51:17 -06009612 IORING_FEAT_POLL_32BITS | IORING_FEAT_SQPOLL_NONFIXED;
Xiaoguang Wang7f136572020-05-05 16:28:53 +08009613
9614 if (copy_to_user(params, p, sizeof(*p))) {
9615 ret = -EFAULT;
9616 goto err;
9617 }
Jens Axboed1719f72020-07-30 13:43:53 -06009618
9619 /*
Jens Axboe044c1ab2019-10-28 09:15:33 -06009620 * Install ring fd as the very last thing, so we don't risk someone
9621 * having closed it before we finish setup
9622 */
9623 ret = io_uring_get_fd(ctx);
9624 if (ret < 0)
9625 goto err;
9626
Dmitrii Dolgovc826bd72019-10-15 19:02:01 +02009627 trace_io_uring_create(ret, ctx, p->sq_entries, p->cq_entries, p->flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009628 return ret;
9629err:
9630 io_ring_ctx_wait_and_kill(ctx);
9631 return ret;
9632}
9633
9634/*
9635 * Sets up an aio uring context, and returns the fd. Applications asks for a
9636 * ring size, we return the actual sq/cq ring sizes (among other things) in the
9637 * params structure passed in.
9638 */
9639static long io_uring_setup(u32 entries, struct io_uring_params __user *params)
9640{
9641 struct io_uring_params p;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009642 int i;
9643
9644 if (copy_from_user(&p, params, sizeof(p)))
9645 return -EFAULT;
9646 for (i = 0; i < ARRAY_SIZE(p.resv); i++) {
9647 if (p.resv[i])
9648 return -EINVAL;
9649 }
9650
Jens Axboe6c271ce2019-01-10 11:22:30 -07009651 if (p.flags & ~(IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL |
Jens Axboe8110c1a2019-12-28 15:39:54 -07009652 IORING_SETUP_SQ_AFF | IORING_SETUP_CQSIZE |
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02009653 IORING_SETUP_CLAMP | IORING_SETUP_ATTACH_WQ |
9654 IORING_SETUP_R_DISABLED))
Jens Axboe2b188cc2019-01-07 10:46:33 -07009655 return -EINVAL;
9656
Xiaoguang Wang7f136572020-05-05 16:28:53 +08009657 return io_uring_create(entries, &p, params);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009658}
9659
9660SYSCALL_DEFINE2(io_uring_setup, u32, entries,
9661 struct io_uring_params __user *, params)
9662{
9663 return io_uring_setup(entries, params);
9664}
9665
Jens Axboe66f4af92020-01-16 15:36:52 -07009666static int io_probe(struct io_ring_ctx *ctx, void __user *arg, unsigned nr_args)
9667{
9668 struct io_uring_probe *p;
9669 size_t size;
9670 int i, ret;
9671
9672 size = struct_size(p, ops, nr_args);
9673 if (size == SIZE_MAX)
9674 return -EOVERFLOW;
9675 p = kzalloc(size, GFP_KERNEL);
9676 if (!p)
9677 return -ENOMEM;
9678
9679 ret = -EFAULT;
9680 if (copy_from_user(p, arg, size))
9681 goto out;
9682 ret = -EINVAL;
9683 if (memchr_inv(p, 0, size))
9684 goto out;
9685
9686 p->last_op = IORING_OP_LAST - 1;
9687 if (nr_args > IORING_OP_LAST)
9688 nr_args = IORING_OP_LAST;
9689
9690 for (i = 0; i < nr_args; i++) {
9691 p->ops[i].op = i;
9692 if (!io_op_defs[i].not_supported)
9693 p->ops[i].flags = IO_URING_OP_SUPPORTED;
9694 }
9695 p->ops_len = i;
9696
9697 ret = 0;
9698 if (copy_to_user(arg, p, size))
9699 ret = -EFAULT;
9700out:
9701 kfree(p);
9702 return ret;
9703}
9704
Jens Axboe071698e2020-01-28 10:04:42 -07009705static int io_register_personality(struct io_ring_ctx *ctx)
9706{
Jens Axboe1e6fa522020-10-15 08:46:24 -06009707 struct io_identity *id;
9708 int ret;
Jens Axboe071698e2020-01-28 10:04:42 -07009709
Jens Axboe1e6fa522020-10-15 08:46:24 -06009710 id = kmalloc(sizeof(*id), GFP_KERNEL);
9711 if (unlikely(!id))
9712 return -ENOMEM;
9713
9714 io_init_identity(id);
9715 id->creds = get_current_cred();
9716
9717 ret = idr_alloc_cyclic(&ctx->personality_idr, id, 1, USHRT_MAX, GFP_KERNEL);
9718 if (ret < 0) {
9719 put_cred(id->creds);
9720 kfree(id);
9721 }
9722 return ret;
Jens Axboe071698e2020-01-28 10:04:42 -07009723}
9724
9725static int io_unregister_personality(struct io_ring_ctx *ctx, unsigned id)
9726{
Jens Axboe1e6fa522020-10-15 08:46:24 -06009727 struct io_identity *iod;
Jens Axboe071698e2020-01-28 10:04:42 -07009728
Jens Axboe1e6fa522020-10-15 08:46:24 -06009729 iod = idr_remove(&ctx->personality_idr, id);
9730 if (iod) {
9731 put_cred(iod->creds);
9732 if (refcount_dec_and_test(&iod->count))
9733 kfree(iod);
Jens Axboe071698e2020-01-28 10:04:42 -07009734 return 0;
9735 }
9736
9737 return -EINVAL;
9738}
9739
Stefano Garzarella21b55db2020-08-27 16:58:30 +02009740static int io_register_restrictions(struct io_ring_ctx *ctx, void __user *arg,
9741 unsigned int nr_args)
9742{
9743 struct io_uring_restriction *res;
9744 size_t size;
9745 int i, ret;
9746
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02009747 /* Restrictions allowed only if rings started disabled */
9748 if (!(ctx->flags & IORING_SETUP_R_DISABLED))
9749 return -EBADFD;
9750
Stefano Garzarella21b55db2020-08-27 16:58:30 +02009751 /* We allow only a single restrictions registration */
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02009752 if (ctx->restrictions.registered)
Stefano Garzarella21b55db2020-08-27 16:58:30 +02009753 return -EBUSY;
9754
9755 if (!arg || nr_args > IORING_MAX_RESTRICTIONS)
9756 return -EINVAL;
9757
9758 size = array_size(nr_args, sizeof(*res));
9759 if (size == SIZE_MAX)
9760 return -EOVERFLOW;
9761
9762 res = memdup_user(arg, size);
9763 if (IS_ERR(res))
9764 return PTR_ERR(res);
9765
9766 ret = 0;
9767
9768 for (i = 0; i < nr_args; i++) {
9769 switch (res[i].opcode) {
9770 case IORING_RESTRICTION_REGISTER_OP:
9771 if (res[i].register_op >= IORING_REGISTER_LAST) {
9772 ret = -EINVAL;
9773 goto out;
9774 }
9775
9776 __set_bit(res[i].register_op,
9777 ctx->restrictions.register_op);
9778 break;
9779 case IORING_RESTRICTION_SQE_OP:
9780 if (res[i].sqe_op >= IORING_OP_LAST) {
9781 ret = -EINVAL;
9782 goto out;
9783 }
9784
9785 __set_bit(res[i].sqe_op, ctx->restrictions.sqe_op);
9786 break;
9787 case IORING_RESTRICTION_SQE_FLAGS_ALLOWED:
9788 ctx->restrictions.sqe_flags_allowed = res[i].sqe_flags;
9789 break;
9790 case IORING_RESTRICTION_SQE_FLAGS_REQUIRED:
9791 ctx->restrictions.sqe_flags_required = res[i].sqe_flags;
9792 break;
9793 default:
9794 ret = -EINVAL;
9795 goto out;
9796 }
9797 }
9798
9799out:
9800 /* Reset all restrictions if an error happened */
9801 if (ret != 0)
9802 memset(&ctx->restrictions, 0, sizeof(ctx->restrictions));
9803 else
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02009804 ctx->restrictions.registered = true;
Stefano Garzarella21b55db2020-08-27 16:58:30 +02009805
9806 kfree(res);
9807 return ret;
9808}
9809
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02009810static int io_register_enable_rings(struct io_ring_ctx *ctx)
9811{
9812 if (!(ctx->flags & IORING_SETUP_R_DISABLED))
9813 return -EBADFD;
9814
9815 if (ctx->restrictions.registered)
9816 ctx->restricted = 1;
9817
9818 ctx->flags &= ~IORING_SETUP_R_DISABLED;
9819
9820 io_sq_offload_start(ctx);
9821
9822 return 0;
9823}
9824
Jens Axboe071698e2020-01-28 10:04:42 -07009825static bool io_register_op_must_quiesce(int op)
9826{
9827 switch (op) {
9828 case IORING_UNREGISTER_FILES:
9829 case IORING_REGISTER_FILES_UPDATE:
9830 case IORING_REGISTER_PROBE:
9831 case IORING_REGISTER_PERSONALITY:
9832 case IORING_UNREGISTER_PERSONALITY:
9833 return false;
9834 default:
9835 return true;
9836 }
9837}
9838
Jens Axboeedafcce2019-01-09 09:16:05 -07009839static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
9840 void __user *arg, unsigned nr_args)
Jens Axboeb19062a2019-04-15 10:49:38 -06009841 __releases(ctx->uring_lock)
9842 __acquires(ctx->uring_lock)
Jens Axboeedafcce2019-01-09 09:16:05 -07009843{
9844 int ret;
9845
Jens Axboe35fa71a2019-04-22 10:23:23 -06009846 /*
9847 * We're inside the ring mutex, if the ref is already dying, then
9848 * someone else killed the ctx or is already going through
9849 * io_uring_register().
9850 */
9851 if (percpu_ref_is_dying(&ctx->refs))
9852 return -ENXIO;
9853
Jens Axboe071698e2020-01-28 10:04:42 -07009854 if (io_register_op_must_quiesce(opcode)) {
Jens Axboe05f3fb32019-12-09 11:22:50 -07009855 percpu_ref_kill(&ctx->refs);
Jens Axboeb19062a2019-04-15 10:49:38 -06009856
Jens Axboe05f3fb32019-12-09 11:22:50 -07009857 /*
9858 * Drop uring mutex before waiting for references to exit. If
9859 * another thread is currently inside io_uring_enter() it might
9860 * need to grab the uring_lock to make progress. If we hold it
9861 * here across the drain wait, then we can deadlock. It's safe
9862 * to drop the mutex here, since no new references will come in
9863 * after we've killed the percpu ref.
9864 */
9865 mutex_unlock(&ctx->uring_lock);
Jens Axboeaf9c1a42020-09-24 13:32:18 -06009866 do {
9867 ret = wait_for_completion_interruptible(&ctx->ref_comp);
9868 if (!ret)
9869 break;
Jens Axboeed6930c2020-10-08 19:09:46 -06009870 ret = io_run_task_work_sig();
9871 if (ret < 0)
9872 break;
Jens Axboeaf9c1a42020-09-24 13:32:18 -06009873 } while (1);
9874
Jens Axboe05f3fb32019-12-09 11:22:50 -07009875 mutex_lock(&ctx->uring_lock);
Jens Axboeaf9c1a42020-09-24 13:32:18 -06009876
Jens Axboec1503682020-01-08 08:26:07 -07009877 if (ret) {
9878 percpu_ref_resurrect(&ctx->refs);
Stefano Garzarella21b55db2020-08-27 16:58:30 +02009879 goto out_quiesce;
9880 }
9881 }
9882
9883 if (ctx->restricted) {
9884 if (opcode >= IORING_REGISTER_LAST) {
9885 ret = -EINVAL;
9886 goto out;
9887 }
9888
9889 if (!test_bit(opcode, ctx->restrictions.register_op)) {
9890 ret = -EACCES;
Jens Axboec1503682020-01-08 08:26:07 -07009891 goto out;
9892 }
Jens Axboe05f3fb32019-12-09 11:22:50 -07009893 }
Jens Axboeedafcce2019-01-09 09:16:05 -07009894
9895 switch (opcode) {
9896 case IORING_REGISTER_BUFFERS:
9897 ret = io_sqe_buffer_register(ctx, arg, nr_args);
9898 break;
9899 case IORING_UNREGISTER_BUFFERS:
9900 ret = -EINVAL;
9901 if (arg || nr_args)
9902 break;
9903 ret = io_sqe_buffer_unregister(ctx);
9904 break;
Jens Axboe6b063142019-01-10 22:13:58 -07009905 case IORING_REGISTER_FILES:
9906 ret = io_sqe_files_register(ctx, arg, nr_args);
9907 break;
9908 case IORING_UNREGISTER_FILES:
9909 ret = -EINVAL;
9910 if (arg || nr_args)
9911 break;
9912 ret = io_sqe_files_unregister(ctx);
9913 break;
Jens Axboec3a31e62019-10-03 13:59:56 -06009914 case IORING_REGISTER_FILES_UPDATE:
9915 ret = io_sqe_files_update(ctx, arg, nr_args);
9916 break;
Jens Axboe9b402842019-04-11 11:45:41 -06009917 case IORING_REGISTER_EVENTFD:
Jens Axboef2842ab2020-01-08 11:04:00 -07009918 case IORING_REGISTER_EVENTFD_ASYNC:
Jens Axboe9b402842019-04-11 11:45:41 -06009919 ret = -EINVAL;
9920 if (nr_args != 1)
9921 break;
9922 ret = io_eventfd_register(ctx, arg);
Jens Axboef2842ab2020-01-08 11:04:00 -07009923 if (ret)
9924 break;
9925 if (opcode == IORING_REGISTER_EVENTFD_ASYNC)
9926 ctx->eventfd_async = 1;
9927 else
9928 ctx->eventfd_async = 0;
Jens Axboe9b402842019-04-11 11:45:41 -06009929 break;
9930 case IORING_UNREGISTER_EVENTFD:
9931 ret = -EINVAL;
9932 if (arg || nr_args)
9933 break;
9934 ret = io_eventfd_unregister(ctx);
9935 break;
Jens Axboe66f4af92020-01-16 15:36:52 -07009936 case IORING_REGISTER_PROBE:
9937 ret = -EINVAL;
9938 if (!arg || nr_args > 256)
9939 break;
9940 ret = io_probe(ctx, arg, nr_args);
9941 break;
Jens Axboe071698e2020-01-28 10:04:42 -07009942 case IORING_REGISTER_PERSONALITY:
9943 ret = -EINVAL;
9944 if (arg || nr_args)
9945 break;
9946 ret = io_register_personality(ctx);
9947 break;
9948 case IORING_UNREGISTER_PERSONALITY:
9949 ret = -EINVAL;
9950 if (arg)
9951 break;
9952 ret = io_unregister_personality(ctx, nr_args);
9953 break;
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02009954 case IORING_REGISTER_ENABLE_RINGS:
9955 ret = -EINVAL;
9956 if (arg || nr_args)
9957 break;
9958 ret = io_register_enable_rings(ctx);
9959 break;
Stefano Garzarella21b55db2020-08-27 16:58:30 +02009960 case IORING_REGISTER_RESTRICTIONS:
9961 ret = io_register_restrictions(ctx, arg, nr_args);
9962 break;
Jens Axboeedafcce2019-01-09 09:16:05 -07009963 default:
9964 ret = -EINVAL;
9965 break;
9966 }
9967
Stefano Garzarella21b55db2020-08-27 16:58:30 +02009968out:
Jens Axboe071698e2020-01-28 10:04:42 -07009969 if (io_register_op_must_quiesce(opcode)) {
Jens Axboe05f3fb32019-12-09 11:22:50 -07009970 /* bring the ctx back to life */
Jens Axboe05f3fb32019-12-09 11:22:50 -07009971 percpu_ref_reinit(&ctx->refs);
Stefano Garzarella21b55db2020-08-27 16:58:30 +02009972out_quiesce:
Jens Axboe0f158b42020-05-14 17:18:39 -06009973 reinit_completion(&ctx->ref_comp);
Jens Axboe05f3fb32019-12-09 11:22:50 -07009974 }
Jens Axboeedafcce2019-01-09 09:16:05 -07009975 return ret;
9976}
9977
9978SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode,
9979 void __user *, arg, unsigned int, nr_args)
9980{
9981 struct io_ring_ctx *ctx;
9982 long ret = -EBADF;
9983 struct fd f;
9984
9985 f = fdget(fd);
9986 if (!f.file)
9987 return -EBADF;
9988
9989 ret = -EOPNOTSUPP;
9990 if (f.file->f_op != &io_uring_fops)
9991 goto out_fput;
9992
9993 ctx = f.file->private_data;
9994
9995 mutex_lock(&ctx->uring_lock);
9996 ret = __io_uring_register(ctx, opcode, arg, nr_args);
9997 mutex_unlock(&ctx->uring_lock);
Dmitrii Dolgovc826bd72019-10-15 19:02:01 +02009998 trace_io_uring_register(ctx, opcode, ctx->nr_user_files, ctx->nr_user_bufs,
9999 ctx->cq_ev_fd != NULL, ret);
Jens Axboeedafcce2019-01-09 09:16:05 -070010000out_fput:
10001 fdput(f);
10002 return ret;
10003}
10004
Jens Axboe2b188cc2019-01-07 10:46:33 -070010005static int __init io_uring_init(void)
10006{
Stefan Metzmacherd7f62e82020-01-29 14:39:41 +010010007#define __BUILD_BUG_VERIFY_ELEMENT(stype, eoffset, etype, ename) do { \
10008 BUILD_BUG_ON(offsetof(stype, ename) != eoffset); \
10009 BUILD_BUG_ON(sizeof(etype) != sizeof_field(stype, ename)); \
10010} while (0)
10011
10012#define BUILD_BUG_SQE_ELEM(eoffset, etype, ename) \
10013 __BUILD_BUG_VERIFY_ELEMENT(struct io_uring_sqe, eoffset, etype, ename)
10014 BUILD_BUG_ON(sizeof(struct io_uring_sqe) != 64);
10015 BUILD_BUG_SQE_ELEM(0, __u8, opcode);
10016 BUILD_BUG_SQE_ELEM(1, __u8, flags);
10017 BUILD_BUG_SQE_ELEM(2, __u16, ioprio);
10018 BUILD_BUG_SQE_ELEM(4, __s32, fd);
10019 BUILD_BUG_SQE_ELEM(8, __u64, off);
10020 BUILD_BUG_SQE_ELEM(8, __u64, addr2);
10021 BUILD_BUG_SQE_ELEM(16, __u64, addr);
Pavel Begunkov7d67af22020-02-24 11:32:45 +030010022 BUILD_BUG_SQE_ELEM(16, __u64, splice_off_in);
Stefan Metzmacherd7f62e82020-01-29 14:39:41 +010010023 BUILD_BUG_SQE_ELEM(24, __u32, len);
10024 BUILD_BUG_SQE_ELEM(28, __kernel_rwf_t, rw_flags);
10025 BUILD_BUG_SQE_ELEM(28, /* compat */ int, rw_flags);
10026 BUILD_BUG_SQE_ELEM(28, /* compat */ __u32, rw_flags);
10027 BUILD_BUG_SQE_ELEM(28, __u32, fsync_flags);
Jiufei Xue5769a352020-06-17 17:53:55 +080010028 BUILD_BUG_SQE_ELEM(28, /* compat */ __u16, poll_events);
10029 BUILD_BUG_SQE_ELEM(28, __u32, poll32_events);
Stefan Metzmacherd7f62e82020-01-29 14:39:41 +010010030 BUILD_BUG_SQE_ELEM(28, __u32, sync_range_flags);
10031 BUILD_BUG_SQE_ELEM(28, __u32, msg_flags);
10032 BUILD_BUG_SQE_ELEM(28, __u32, timeout_flags);
10033 BUILD_BUG_SQE_ELEM(28, __u32, accept_flags);
10034 BUILD_BUG_SQE_ELEM(28, __u32, cancel_flags);
10035 BUILD_BUG_SQE_ELEM(28, __u32, open_flags);
10036 BUILD_BUG_SQE_ELEM(28, __u32, statx_flags);
10037 BUILD_BUG_SQE_ELEM(28, __u32, fadvise_advice);
Pavel Begunkov7d67af22020-02-24 11:32:45 +030010038 BUILD_BUG_SQE_ELEM(28, __u32, splice_flags);
Stefan Metzmacherd7f62e82020-01-29 14:39:41 +010010039 BUILD_BUG_SQE_ELEM(32, __u64, user_data);
10040 BUILD_BUG_SQE_ELEM(40, __u16, buf_index);
10041 BUILD_BUG_SQE_ELEM(42, __u16, personality);
Pavel Begunkov7d67af22020-02-24 11:32:45 +030010042 BUILD_BUG_SQE_ELEM(44, __s32, splice_fd_in);
Stefan Metzmacherd7f62e82020-01-29 14:39:41 +010010043
Jens Axboed3656342019-12-18 09:50:26 -070010044 BUILD_BUG_ON(ARRAY_SIZE(io_op_defs) != IORING_OP_LAST);
Jens Axboe84557872020-03-03 15:28:17 -070010045 BUILD_BUG_ON(__REQ_F_LAST_BIT >= 8 * sizeof(int));
Jens Axboe2b188cc2019-01-07 10:46:33 -070010046 req_cachep = KMEM_CACHE(io_kiocb, SLAB_HWCACHE_ALIGN | SLAB_PANIC);
10047 return 0;
10048};
10049__initcall(io_uring_init);