blob: aef640616edb916c4869a51756e31826cccde6a6 [file] [log] [blame]
Jens Axboe2b188cc2019-01-07 10:46:33 -07001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Shared application/kernel submission and completion ring pairs, for
4 * supporting fast/efficient IO.
5 *
6 * A note on the read/write ordering memory barriers that are matched between
Stefan Bühler1e84b972019-04-24 23:54:16 +02007 * the application and kernel side.
8 *
9 * After the application reads the CQ ring tail, it must use an
10 * appropriate smp_rmb() to pair with the smp_wmb() the kernel uses
11 * before writing the tail (using smp_load_acquire to read the tail will
12 * do). It also needs a smp_mb() before updating CQ head (ordering the
13 * entry load(s) with the head store), pairing with an implicit barrier
14 * through a control-dependency in io_get_cqring (smp_store_release to
15 * store head will do). Failure to do so could lead to reading invalid
16 * CQ entries.
17 *
18 * Likewise, the application must use an appropriate smp_wmb() before
19 * writing the SQ tail (ordering SQ entry stores with the tail store),
20 * which pairs with smp_load_acquire in io_get_sqring (smp_store_release
21 * to store the tail will do). And it needs a barrier ordering the SQ
22 * head load before writing new SQ entries (smp_load_acquire to read
23 * head will do).
24 *
25 * When using the SQ poll thread (IORING_SETUP_SQPOLL), the application
26 * needs to check the SQ flags for IORING_SQ_NEED_WAKEUP *after*
27 * updating the SQ tail; a full memory barrier smp_mb() is needed
28 * between.
Jens Axboe2b188cc2019-01-07 10:46:33 -070029 *
30 * Also see the examples in the liburing library:
31 *
32 * git://git.kernel.dk/liburing
33 *
34 * io_uring also uses READ/WRITE_ONCE() for _any_ store or load that happens
35 * from data shared between the kernel and application. This is done both
36 * for ordering purposes, but also to ensure that once a value is loaded from
37 * data that the application could potentially modify, it remains stable.
38 *
39 * Copyright (C) 2018-2019 Jens Axboe
Christoph Hellwigc992fe22019-01-11 09:43:02 -070040 * Copyright (c) 2018-2019 Christoph Hellwig
Jens Axboe2b188cc2019-01-07 10:46:33 -070041 */
42#include <linux/kernel.h>
43#include <linux/init.h>
44#include <linux/errno.h>
45#include <linux/syscalls.h>
46#include <linux/compat.h>
Jens Axboe52de1fe2020-02-27 10:15:42 -070047#include <net/compat.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070048#include <linux/refcount.h>
49#include <linux/uio.h>
Pavel Begunkov6b47ee62020-01-18 20:22:41 +030050#include <linux/bits.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070051
52#include <linux/sched/signal.h>
53#include <linux/fs.h>
54#include <linux/file.h>
55#include <linux/fdtable.h>
56#include <linux/mm.h>
57#include <linux/mman.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070058#include <linux/percpu.h>
59#include <linux/slab.h>
Jens Axboe6c271ce2019-01-10 11:22:30 -070060#include <linux/kthread.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070061#include <linux/blkdev.h>
Jens Axboeedafcce2019-01-09 09:16:05 -070062#include <linux/bvec.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070063#include <linux/net.h>
64#include <net/sock.h>
65#include <net/af_unix.h>
Jens Axboe6b063142019-01-10 22:13:58 -070066#include <net/scm.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070067#include <linux/anon_inodes.h>
68#include <linux/sched/mm.h>
69#include <linux/uaccess.h>
70#include <linux/nospec.h>
Jens Axboeedafcce2019-01-09 09:16:05 -070071#include <linux/sizes.h>
72#include <linux/hugetlb.h>
Jens Axboeaa4c3962019-11-29 10:14:00 -070073#include <linux/highmem.h>
Jens Axboe15b71ab2019-12-11 11:20:36 -070074#include <linux/namei.h>
75#include <linux/fsnotify.h>
Jens Axboe4840e412019-12-25 22:03:45 -070076#include <linux/fadvise.h>
Jens Axboe3e4827b2020-01-08 15:18:09 -070077#include <linux/eventpoll.h>
Jens Axboeff002b32020-02-07 16:05:21 -070078#include <linux/fs_struct.h>
Pavel Begunkov7d67af22020-02-24 11:32:45 +030079#include <linux/splice.h>
Jens Axboeb41e9852020-02-17 09:52:41 -070080#include <linux/task_work.h>
Jens Axboebcf5a062020-05-22 09:24:42 -060081#include <linux/pagemap.h>
Jens Axboe0f212202020-09-13 13:09:39 -060082#include <linux/io_uring.h>
Dennis Zhou91d8f512020-09-16 13:41:05 -070083#include <linux/blk-cgroup.h>
Jens Axboe4ea33a92020-10-15 13:46:44 -060084#include <linux/audit.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070085
Dmitrii Dolgovc826bd72019-10-15 19:02:01 +020086#define CREATE_TRACE_POINTS
87#include <trace/events/io_uring.h>
88
Jens Axboe2b188cc2019-01-07 10:46:33 -070089#include <uapi/linux/io_uring.h>
90
91#include "internal.h"
Jens Axboe561fb042019-10-24 07:25:42 -060092#include "io-wq.h"
Jens Axboe2b188cc2019-01-07 10:46:33 -070093
Daniel Xu5277dea2019-09-14 14:23:45 -070094#define IORING_MAX_ENTRIES 32768
Jens Axboe33a107f2019-10-04 12:10:03 -060095#define IORING_MAX_CQ_ENTRIES (2 * IORING_MAX_ENTRIES)
Jens Axboe65e19f52019-10-26 07:20:21 -060096
97/*
98 * Shift of 9 is 512 entries, or exactly one page on 64-bit archs
99 */
100#define IORING_FILE_TABLE_SHIFT 9
101#define IORING_MAX_FILES_TABLE (1U << IORING_FILE_TABLE_SHIFT)
102#define IORING_FILE_TABLE_MASK (IORING_MAX_FILES_TABLE - 1)
103#define IORING_MAX_FIXED_FILES (64 * IORING_MAX_FILES_TABLE)
Stefano Garzarella21b55db2020-08-27 16:58:30 +0200104#define IORING_MAX_RESTRICTIONS (IORING_RESTRICTION_LAST + \
105 IORING_REGISTER_LAST + IORING_OP_LAST)
Jens Axboe2b188cc2019-01-07 10:46:33 -0700106
107struct io_uring {
108 u32 head ____cacheline_aligned_in_smp;
109 u32 tail ____cacheline_aligned_in_smp;
110};
111
Stefan Bühler1e84b972019-04-24 23:54:16 +0200112/*
Hristo Venev75b28af2019-08-26 17:23:46 +0000113 * This data is shared with the application through the mmap at offsets
114 * IORING_OFF_SQ_RING and IORING_OFF_CQ_RING.
Stefan Bühler1e84b972019-04-24 23:54:16 +0200115 *
116 * The offsets to the member fields are published through struct
117 * io_sqring_offsets when calling io_uring_setup.
118 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000119struct io_rings {
Stefan Bühler1e84b972019-04-24 23:54:16 +0200120 /*
121 * Head and tail offsets into the ring; the offsets need to be
122 * masked to get valid indices.
123 *
Hristo Venev75b28af2019-08-26 17:23:46 +0000124 * The kernel controls head of the sq ring and the tail of the cq ring,
125 * and the application controls tail of the sq ring and the head of the
126 * cq ring.
Stefan Bühler1e84b972019-04-24 23:54:16 +0200127 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000128 struct io_uring sq, cq;
Stefan Bühler1e84b972019-04-24 23:54:16 +0200129 /*
Hristo Venev75b28af2019-08-26 17:23:46 +0000130 * Bitmasks to apply to head and tail offsets (constant, equals
Stefan Bühler1e84b972019-04-24 23:54:16 +0200131 * ring_entries - 1)
132 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000133 u32 sq_ring_mask, cq_ring_mask;
134 /* Ring sizes (constant, power of 2) */
135 u32 sq_ring_entries, cq_ring_entries;
Stefan Bühler1e84b972019-04-24 23:54:16 +0200136 /*
137 * Number of invalid entries dropped by the kernel due to
138 * invalid index stored in array
139 *
140 * Written by the kernel, shouldn't be modified by the
141 * application (i.e. get number of "new events" by comparing to
142 * cached value).
143 *
144 * After a new SQ head value was read by the application this
145 * counter includes all submissions that were dropped reaching
146 * the new SQ head (and possibly more).
147 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000148 u32 sq_dropped;
Stefan Bühler1e84b972019-04-24 23:54:16 +0200149 /*
Stefano Garzarella0d9b5b32020-05-15 18:38:04 +0200150 * Runtime SQ flags
Stefan Bühler1e84b972019-04-24 23:54:16 +0200151 *
152 * Written by the kernel, shouldn't be modified by the
153 * application.
154 *
155 * The application needs a full memory barrier before checking
156 * for IORING_SQ_NEED_WAKEUP after updating the sq tail.
157 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000158 u32 sq_flags;
Stefan Bühler1e84b972019-04-24 23:54:16 +0200159 /*
Stefano Garzarella0d9b5b32020-05-15 18:38:04 +0200160 * Runtime CQ flags
161 *
162 * Written by the application, shouldn't be modified by the
163 * kernel.
164 */
165 u32 cq_flags;
166 /*
Stefan Bühler1e84b972019-04-24 23:54:16 +0200167 * Number of completion events lost because the queue was full;
168 * this should be avoided by the application by making sure
LimingWu0b4295b2019-12-05 20:18:18 +0800169 * there are not more requests pending than there is space in
Stefan Bühler1e84b972019-04-24 23:54:16 +0200170 * the completion queue.
171 *
172 * Written by the kernel, shouldn't be modified by the
173 * application (i.e. get number of "new events" by comparing to
174 * cached value).
175 *
176 * As completion events come in out of order this counter is not
177 * ordered with any other data.
178 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000179 u32 cq_overflow;
Stefan Bühler1e84b972019-04-24 23:54:16 +0200180 /*
181 * Ring buffer of completion events.
182 *
183 * The kernel writes completion events fresh every time they are
184 * produced, so the application is allowed to modify pending
185 * entries.
186 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000187 struct io_uring_cqe cqes[] ____cacheline_aligned_in_smp;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700188};
189
Pavel Begunkov45d189c2021-02-10 00:03:07 +0000190enum io_uring_cmd_flags {
191 IO_URING_F_NONBLOCK = 1,
Pavel Begunkov889fca72021-02-10 00:03:09 +0000192 IO_URING_F_COMPLETE_DEFER = 2,
Pavel Begunkov45d189c2021-02-10 00:03:07 +0000193};
194
Jens Axboeedafcce2019-01-09 09:16:05 -0700195struct io_mapped_ubuf {
196 u64 ubuf;
197 size_t len;
198 struct bio_vec *bvec;
199 unsigned int nr_bvecs;
Jens Axboede293932020-09-17 16:19:16 -0600200 unsigned long acct_pages;
Jens Axboeedafcce2019-01-09 09:16:05 -0700201};
202
Bijan Mottahedeh50238532021-01-15 17:37:45 +0000203struct io_ring_ctx;
204
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +0000205struct io_rsrc_put {
206 struct list_head list;
Bijan Mottahedeh50238532021-01-15 17:37:45 +0000207 union {
208 void *rsrc;
209 struct file *file;
210 };
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +0000211};
212
213struct fixed_rsrc_table {
Jens Axboe65e19f52019-10-26 07:20:21 -0600214 struct file **files;
Jens Axboe31b51512019-01-18 22:56:34 -0700215};
216
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +0000217struct fixed_rsrc_ref_node {
Xiaoguang Wang05589552020-03-31 14:05:18 +0800218 struct percpu_ref refs;
219 struct list_head node;
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +0000220 struct list_head rsrc_list;
221 struct fixed_rsrc_data *rsrc_data;
Bijan Mottahedeh50238532021-01-15 17:37:45 +0000222 void (*rsrc_put)(struct io_ring_ctx *ctx,
223 struct io_rsrc_put *prsrc);
Jens Axboe4a38aed22020-05-14 17:21:15 -0600224 struct llist_node llist;
Pavel Begunkove2978222020-11-18 14:56:26 +0000225 bool done;
Xiaoguang Wang05589552020-03-31 14:05:18 +0800226};
227
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +0000228struct fixed_rsrc_data {
229 struct fixed_rsrc_table *table;
Jens Axboe05f3fb32019-12-09 11:22:50 -0700230 struct io_ring_ctx *ctx;
231
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +0000232 struct fixed_rsrc_ref_node *node;
Jens Axboe05f3fb32019-12-09 11:22:50 -0700233 struct percpu_ref refs;
Jens Axboe05f3fb32019-12-09 11:22:50 -0700234 struct completion done;
235};
236
Jens Axboe5a2e7452020-02-23 16:23:11 -0700237struct io_buffer {
238 struct list_head list;
239 __u64 addr;
240 __s32 len;
241 __u16 bid;
242};
243
Stefano Garzarella21b55db2020-08-27 16:58:30 +0200244struct io_restriction {
245 DECLARE_BITMAP(register_op, IORING_REGISTER_LAST);
246 DECLARE_BITMAP(sqe_op, IORING_OP_LAST);
247 u8 sqe_flags_allowed;
248 u8 sqe_flags_required;
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +0200249 bool registered;
Stefano Garzarella21b55db2020-08-27 16:58:30 +0200250};
251
Jens Axboe534ca6d2020-09-02 13:52:19 -0600252struct io_sq_data {
253 refcount_t refs;
Jens Axboe69fb2132020-09-14 11:16:23 -0600254 struct mutex lock;
255
256 /* ctx's that are using this sqd */
257 struct list_head ctx_list;
258 struct list_head ctx_new_list;
259 struct mutex ctx_lock;
260
Jens Axboe534ca6d2020-09-02 13:52:19 -0600261 struct task_struct *thread;
262 struct wait_queue_head wait;
Xiaoguang Wang08369242020-11-03 14:15:59 +0800263
264 unsigned sq_thread_idle;
Jens Axboe534ca6d2020-09-02 13:52:19 -0600265};
266
Pavel Begunkov258b29a2021-02-10 00:03:10 +0000267#define IO_IOPOLL_BATCH 8
Pavel Begunkov6dd0be12021-02-10 00:03:13 +0000268#define IO_COMPL_BATCH 32
Pavel Begunkov258b29a2021-02-10 00:03:10 +0000269
270struct io_comp_state {
271 unsigned int nr;
Pavel Begunkov6dd0be12021-02-10 00:03:13 +0000272 struct io_kiocb *reqs[IO_COMPL_BATCH];
Pavel Begunkov258b29a2021-02-10 00:03:10 +0000273};
274
275struct io_submit_state {
276 struct blk_plug plug;
277
278 /*
279 * io_kiocb alloc cache
280 */
281 void *reqs[IO_IOPOLL_BATCH];
282 unsigned int free_reqs;
283
284 bool plug_started;
285
286 /*
287 * Batch completion logic
288 */
289 struct io_comp_state comp;
290
291 /*
292 * File reference cache
293 */
294 struct file *file;
295 unsigned int fd;
296 unsigned int file_refs;
297 unsigned int ios_left;
298};
299
Jens Axboe2b188cc2019-01-07 10:46:33 -0700300struct io_ring_ctx {
301 struct {
302 struct percpu_ref refs;
303 } ____cacheline_aligned_in_smp;
304
305 struct {
306 unsigned int flags;
Randy Dunlape1d85332020-02-05 20:57:10 -0800307 unsigned int compat: 1;
Bijan Mottahedehaad5d8d2020-06-16 16:36:08 -0700308 unsigned int limit_mem: 1;
Randy Dunlape1d85332020-02-05 20:57:10 -0800309 unsigned int cq_overflow_flushed: 1;
310 unsigned int drain_next: 1;
311 unsigned int eventfd_async: 1;
Stefano Garzarella21b55db2020-08-27 16:58:30 +0200312 unsigned int restricted: 1;
Pavel Begunkovd9d05212021-01-08 20:57:25 +0000313 unsigned int sqo_dead: 1;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700314
Hristo Venev75b28af2019-08-26 17:23:46 +0000315 /*
316 * Ring buffer of indices into array of io_uring_sqe, which is
317 * mmapped by the application using the IORING_OFF_SQES offset.
318 *
319 * This indirection could e.g. be used to assign fixed
320 * io_uring_sqe entries to operations and only submit them to
321 * the queue when needed.
322 *
323 * The kernel modifies neither the indices array nor the entries
324 * array.
325 */
326 u32 *sq_array;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700327 unsigned cached_sq_head;
328 unsigned sq_entries;
329 unsigned sq_mask;
Jens Axboe6c271ce2019-01-10 11:22:30 -0700330 unsigned sq_thread_idle;
Jens Axboe498ccd92019-10-25 10:04:25 -0600331 unsigned cached_sq_dropped;
Pavel Begunkov2c3bac6d2020-10-18 10:17:40 +0100332 unsigned cached_cq_overflow;
Jens Axboead3eb2c2019-12-18 17:12:20 -0700333 unsigned long sq_check_overflow;
Jens Axboede0617e2019-04-06 21:51:27 -0600334
335 struct list_head defer_list;
Jens Axboe5262f562019-09-17 12:26:57 -0600336 struct list_head timeout_list;
Jens Axboe1d7bb1d2019-11-06 11:31:17 -0700337 struct list_head cq_overflow_list;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700338
Jens Axboead3eb2c2019-12-18 17:12:20 -0700339 struct io_uring_sqe *sq_sqes;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700340 } ____cacheline_aligned_in_smp;
341
Hristo Venev75b28af2019-08-26 17:23:46 +0000342 struct io_rings *rings;
343
Jens Axboe2b188cc2019-01-07 10:46:33 -0700344 /* IO offload */
Jens Axboe561fb042019-10-24 07:25:42 -0600345 struct io_wq *io_wq;
Jens Axboe2aede0e2020-09-14 10:45:53 -0600346
347 /*
348 * For SQPOLL usage - we hold a reference to the parent task, so we
349 * have access to the ->files
350 */
351 struct task_struct *sqo_task;
352
353 /* Only used for accounting purposes */
354 struct mm_struct *mm_account;
355
Dennis Zhou91d8f512020-09-16 13:41:05 -0700356#ifdef CONFIG_BLK_CGROUP
357 struct cgroup_subsys_state *sqo_blkcg_css;
358#endif
359
Jens Axboe534ca6d2020-09-02 13:52:19 -0600360 struct io_sq_data *sq_data; /* if using sq thread polling */
361
Jens Axboe90554202020-09-03 12:12:41 -0600362 struct wait_queue_head sqo_sq_wait;
Jens Axboe69fb2132020-09-14 11:16:23 -0600363 struct list_head sqd_list;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700364
Jens Axboe6b063142019-01-10 22:13:58 -0700365 /*
366 * If used, fixed file set. Writers must ensure that ->refs is dead,
367 * readers must ensure that ->refs is alive as long as the file* is
368 * used. Only updated through io_uring_register(2).
369 */
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +0000370 struct fixed_rsrc_data *file_data;
Jens Axboe6b063142019-01-10 22:13:58 -0700371 unsigned nr_user_files;
372
Jens Axboeedafcce2019-01-09 09:16:05 -0700373 /* if used, fixed mapped user buffers */
374 unsigned nr_user_bufs;
375 struct io_mapped_ubuf *user_bufs;
376
Jens Axboe2b188cc2019-01-07 10:46:33 -0700377 struct user_struct *user;
378
Jens Axboe0b8c0ec2019-12-02 08:50:00 -0700379 const struct cred *creds;
Jens Axboe181e4482019-11-25 08:52:30 -0700380
Jens Axboe4ea33a92020-10-15 13:46:44 -0600381#ifdef CONFIG_AUDIT
382 kuid_t loginuid;
383 unsigned int sessionid;
384#endif
385
Jens Axboe0f158b42020-05-14 17:18:39 -0600386 struct completion ref_comp;
387 struct completion sq_thread_comp;
Jens Axboe206aefd2019-11-07 18:27:42 -0700388
Jens Axboe0ddf92e2019-11-08 08:52:53 -0700389 /* if all else fails... */
390 struct io_kiocb *fallback_req;
391
Jens Axboe206aefd2019-11-07 18:27:42 -0700392#if defined(CONFIG_UNIX)
393 struct socket *ring_sock;
394#endif
395
Jens Axboe5a2e7452020-02-23 16:23:11 -0700396 struct idr io_buffer_idr;
397
Jens Axboe071698e2020-01-28 10:04:42 -0700398 struct idr personality_idr;
399
Jens Axboe206aefd2019-11-07 18:27:42 -0700400 struct {
401 unsigned cached_cq_tail;
402 unsigned cq_entries;
403 unsigned cq_mask;
404 atomic_t cq_timeouts;
Marcelo Diop-Gonzalezf0105052021-01-15 11:54:40 -0500405 unsigned cq_last_tm_flush;
Jens Axboead3eb2c2019-12-18 17:12:20 -0700406 unsigned long cq_check_overflow;
Jens Axboe206aefd2019-11-07 18:27:42 -0700407 struct wait_queue_head cq_wait;
408 struct fasync_struct *cq_fasync;
409 struct eventfd_ctx *cq_ev_fd;
410 } ____cacheline_aligned_in_smp;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700411
412 struct {
413 struct mutex uring_lock;
414 wait_queue_head_t wait;
415 } ____cacheline_aligned_in_smp;
416
417 struct {
418 spinlock_t completion_lock;
Jens Axboee94f1412019-12-19 12:06:02 -0700419
Jens Axboedef596e2019-01-09 08:59:42 -0700420 /*
Pavel Begunkov540e32a2020-07-13 23:37:09 +0300421 * ->iopoll_list is protected by the ctx->uring_lock for
Jens Axboedef596e2019-01-09 08:59:42 -0700422 * io_uring instances that don't use IORING_SETUP_SQPOLL.
423 * For SQPOLL, only the single threaded io_sq_thread() will
424 * manipulate the list, hence no extra locking is needed there.
425 */
Pavel Begunkov540e32a2020-07-13 23:37:09 +0300426 struct list_head iopoll_list;
Jens Axboe78076bb2019-12-04 19:56:40 -0700427 struct hlist_head *cancel_hash;
428 unsigned cancel_hash_bits;
Jens Axboee94f1412019-12-19 12:06:02 -0700429 bool poll_multi_file;
Jens Axboefcb323c2019-10-24 12:39:47 -0600430
431 spinlock_t inflight_lock;
432 struct list_head inflight_list;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700433 } ____cacheline_aligned_in_smp;
Jens Axboe85faa7b2020-04-09 18:14:00 -0600434
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +0000435 struct delayed_work rsrc_put_work;
436 struct llist_head rsrc_put_llist;
Bijan Mottahedehd67d2262021-01-15 17:37:46 +0000437 struct list_head rsrc_ref_list;
438 spinlock_t rsrc_ref_lock;
Jens Axboe4a38aed22020-05-14 17:21:15 -0600439
Jens Axboe85faa7b2020-04-09 18:14:00 -0600440 struct work_struct exit_work;
Stefano Garzarella21b55db2020-08-27 16:58:30 +0200441 struct io_restriction restrictions;
Pavel Begunkov258b29a2021-02-10 00:03:10 +0000442 struct io_submit_state submit_state;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700443};
444
Jens Axboe09bb8392019-03-13 12:39:28 -0600445/*
446 * First field must be the file pointer in all the
447 * iocb unions! See also 'struct kiocb' in <linux/fs.h>
448 */
Jens Axboe221c5eb2019-01-17 09:41:58 -0700449struct io_poll_iocb {
450 struct file *file;
Pavel Begunkov018043b2020-10-27 23:17:18 +0000451 struct wait_queue_head *head;
Jens Axboe221c5eb2019-01-17 09:41:58 -0700452 __poll_t events;
Jens Axboe8c838782019-03-12 15:48:16 -0600453 bool done;
Jens Axboe221c5eb2019-01-17 09:41:58 -0700454 bool canceled;
Jens Axboe392edb42019-12-09 17:52:20 -0700455 struct wait_queue_entry wait;
Jens Axboe221c5eb2019-01-17 09:41:58 -0700456};
457
Pavel Begunkov018043b2020-10-27 23:17:18 +0000458struct io_poll_remove {
459 struct file *file;
460 u64 addr;
461};
462
Jens Axboeb5dba592019-12-11 14:02:38 -0700463struct io_close {
464 struct file *file;
Jens Axboeb5dba592019-12-11 14:02:38 -0700465 int fd;
466};
467
Jens Axboead8a48a2019-11-15 08:49:11 -0700468struct io_timeout_data {
469 struct io_kiocb *req;
470 struct hrtimer timer;
471 struct timespec64 ts;
472 enum hrtimer_mode mode;
473};
474
Jens Axboe8ed8d3c2019-12-16 11:55:28 -0700475struct io_accept {
476 struct file *file;
477 struct sockaddr __user *addr;
478 int __user *addr_len;
479 int flags;
Jens Axboe09952e32020-03-19 20:16:56 -0600480 unsigned long nofile;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -0700481};
482
483struct io_sync {
484 struct file *file;
485 loff_t len;
486 loff_t off;
487 int flags;
Jens Axboed63d1b52019-12-10 10:38:56 -0700488 int mode;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -0700489};
490
Jens Axboefbf23842019-12-17 18:45:56 -0700491struct io_cancel {
492 struct file *file;
493 u64 addr;
494};
495
Jens Axboeb29472e2019-12-17 18:50:29 -0700496struct io_timeout {
497 struct file *file;
Pavel Begunkovbfe68a22020-05-30 14:54:18 +0300498 u32 off;
499 u32 target_seq;
Pavel Begunkov135fcde2020-07-13 23:37:12 +0300500 struct list_head list;
Pavel Begunkov90cd7e42020-10-27 23:25:36 +0000501 /* head of the link, used by linked timeouts only */
502 struct io_kiocb *head;
Jens Axboeb29472e2019-12-17 18:50:29 -0700503};
504
Pavel Begunkov0bdf7a22020-10-10 18:34:10 +0100505struct io_timeout_rem {
506 struct file *file;
507 u64 addr;
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +0000508
509 /* timeout update */
510 struct timespec64 ts;
511 u32 flags;
Pavel Begunkov0bdf7a22020-10-10 18:34:10 +0100512};
513
Jens Axboe9adbd452019-12-20 08:45:55 -0700514struct io_rw {
515 /* NOTE: kiocb has the file as the first member, so don't do it here */
516 struct kiocb kiocb;
517 u64 addr;
518 u64 len;
519};
520
Jens Axboe3fbb51c2019-12-20 08:51:52 -0700521struct io_connect {
522 struct file *file;
523 struct sockaddr __user *addr;
524 int addr_len;
525};
526
Jens Axboee47293f2019-12-20 08:58:21 -0700527struct io_sr_msg {
528 struct file *file;
Jens Axboefddafac2020-01-04 20:19:44 -0700529 union {
Pavel Begunkov270a5942020-07-12 20:41:04 +0300530 struct user_msghdr __user *umsg;
Jens Axboefddafac2020-01-04 20:19:44 -0700531 void __user *buf;
532 };
Jens Axboee47293f2019-12-20 08:58:21 -0700533 int msg_flags;
Jens Axboebcda7ba2020-02-23 16:42:51 -0700534 int bgid;
Jens Axboefddafac2020-01-04 20:19:44 -0700535 size_t len;
Jens Axboebcda7ba2020-02-23 16:42:51 -0700536 struct io_buffer *kbuf;
Jens Axboee47293f2019-12-20 08:58:21 -0700537};
538
Jens Axboe15b71ab2019-12-11 11:20:36 -0700539struct io_open {
540 struct file *file;
541 int dfd;
Jens Axboe15b71ab2019-12-11 11:20:36 -0700542 struct filename *filename;
Jens Axboec12cedf2020-01-08 17:41:21 -0700543 struct open_how how;
Jens Axboe4022e7a2020-03-19 19:23:18 -0600544 unsigned long nofile;
Jens Axboe15b71ab2019-12-11 11:20:36 -0700545};
546
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +0000547struct io_rsrc_update {
Jens Axboe05f3fb32019-12-09 11:22:50 -0700548 struct file *file;
549 u64 arg;
550 u32 nr_args;
551 u32 offset;
552};
553
Jens Axboe4840e412019-12-25 22:03:45 -0700554struct io_fadvise {
555 struct file *file;
556 u64 offset;
557 u32 len;
558 u32 advice;
559};
560
Jens Axboec1ca7572019-12-25 22:18:28 -0700561struct io_madvise {
562 struct file *file;
563 u64 addr;
564 u32 len;
565 u32 advice;
566};
567
Jens Axboe3e4827b2020-01-08 15:18:09 -0700568struct io_epoll {
569 struct file *file;
570 int epfd;
571 int op;
572 int fd;
573 struct epoll_event event;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700574};
575
Pavel Begunkov7d67af22020-02-24 11:32:45 +0300576struct io_splice {
577 struct file *file_out;
578 struct file *file_in;
579 loff_t off_out;
580 loff_t off_in;
581 u64 len;
582 unsigned int flags;
583};
584
Jens Axboeddf0322d2020-02-23 16:41:33 -0700585struct io_provide_buf {
586 struct file *file;
587 __u64 addr;
588 __s32 len;
589 __u32 bgid;
590 __u16 nbufs;
591 __u16 bid;
592};
593
Bijan Mottahedeh1d9e1282020-05-22 21:31:16 -0700594struct io_statx {
595 struct file *file;
596 int dfd;
597 unsigned int mask;
598 unsigned int flags;
Bijan Mottahedehe62753e2020-05-22 21:31:18 -0700599 const char __user *filename;
Bijan Mottahedeh1d9e1282020-05-22 21:31:16 -0700600 struct statx __user *buffer;
601};
602
Jens Axboe36f4fa62020-09-05 11:14:22 -0600603struct io_shutdown {
604 struct file *file;
605 int how;
606};
607
Jens Axboe80a261f2020-09-28 14:23:58 -0600608struct io_rename {
609 struct file *file;
610 int old_dfd;
611 int new_dfd;
612 struct filename *oldpath;
613 struct filename *newpath;
614 int flags;
615};
616
Jens Axboe14a11432020-09-28 14:27:37 -0600617struct io_unlink {
618 struct file *file;
619 int dfd;
620 int flags;
621 struct filename *filename;
622};
623
Pavel Begunkov3ca405e2020-07-13 23:37:08 +0300624struct io_completion {
625 struct file *file;
626 struct list_head list;
Pavel Begunkov0f7e4662020-07-13 23:37:16 +0300627 int cflags;
Pavel Begunkov3ca405e2020-07-13 23:37:08 +0300628};
629
Jens Axboef499a022019-12-02 16:28:46 -0700630struct io_async_connect {
631 struct sockaddr_storage address;
632};
633
Jens Axboe03b12302019-12-02 18:50:25 -0700634struct io_async_msghdr {
635 struct iovec fast_iov[UIO_FASTIOV];
Pavel Begunkov257e84a2021-02-05 00:58:00 +0000636 /* points to an allocated iov, if NULL we use fast_iov instead */
637 struct iovec *free_iov;
Jens Axboe03b12302019-12-02 18:50:25 -0700638 struct sockaddr __user *uaddr;
639 struct msghdr msg;
Jens Axboeb5379162020-02-09 11:29:15 -0700640 struct sockaddr_storage addr;
Jens Axboe03b12302019-12-02 18:50:25 -0700641};
642
Jens Axboef67676d2019-12-02 11:03:47 -0700643struct io_async_rw {
644 struct iovec fast_iov[UIO_FASTIOV];
Jens Axboeff6165b2020-08-13 09:47:43 -0600645 const struct iovec *free_iovec;
646 struct iov_iter iter;
Jens Axboe227c0c92020-08-13 11:51:40 -0600647 size_t bytes_done;
Jens Axboebcf5a062020-05-22 09:24:42 -0600648 struct wait_page_queue wpq;
Jens Axboef67676d2019-12-02 11:03:47 -0700649};
650
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300651enum {
652 REQ_F_FIXED_FILE_BIT = IOSQE_FIXED_FILE_BIT,
653 REQ_F_IO_DRAIN_BIT = IOSQE_IO_DRAIN_BIT,
654 REQ_F_LINK_BIT = IOSQE_IO_LINK_BIT,
655 REQ_F_HARDLINK_BIT = IOSQE_IO_HARDLINK_BIT,
656 REQ_F_FORCE_ASYNC_BIT = IOSQE_ASYNC_BIT,
Jens Axboebcda7ba2020-02-23 16:42:51 -0700657 REQ_F_BUFFER_SELECT_BIT = IOSQE_BUFFER_SELECT_BIT,
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300658
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300659 REQ_F_FAIL_LINK_BIT,
660 REQ_F_INFLIGHT_BIT,
661 REQ_F_CUR_POS_BIT,
662 REQ_F_NOWAIT_BIT,
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300663 REQ_F_LINK_TIMEOUT_BIT,
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300664 REQ_F_ISREG_BIT,
Pavel Begunkov99bc4c32020-02-07 22:04:45 +0300665 REQ_F_NEED_CLEANUP_BIT,
Jens Axboed7718a92020-02-14 22:23:12 -0700666 REQ_F_POLLED_BIT,
Jens Axboebcda7ba2020-02-23 16:42:51 -0700667 REQ_F_BUFFER_SELECTED_BIT,
Jens Axboe5b0bbee2020-04-27 10:41:22 -0600668 REQ_F_NO_FILE_TABLE_BIT,
Xiaoguang Wang7cdaf582020-06-10 19:41:19 +0800669 REQ_F_WORK_INITIALIZED_BIT,
Pavel Begunkov900fad42020-10-19 16:39:16 +0100670 REQ_F_LTIMEOUT_ACTIVE_BIT,
Pavel Begunkove342c802021-01-19 13:32:47 +0000671 REQ_F_COMPLETE_INLINE_BIT,
Jens Axboe84557872020-03-03 15:28:17 -0700672
673 /* not a real bit, just to check we're not overflowing the space */
674 __REQ_F_LAST_BIT,
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300675};
676
677enum {
678 /* ctx owns file */
679 REQ_F_FIXED_FILE = BIT(REQ_F_FIXED_FILE_BIT),
680 /* drain existing IO first */
681 REQ_F_IO_DRAIN = BIT(REQ_F_IO_DRAIN_BIT),
682 /* linked sqes */
683 REQ_F_LINK = BIT(REQ_F_LINK_BIT),
684 /* doesn't sever on completion < 0 */
685 REQ_F_HARDLINK = BIT(REQ_F_HARDLINK_BIT),
686 /* IOSQE_ASYNC */
687 REQ_F_FORCE_ASYNC = BIT(REQ_F_FORCE_ASYNC_BIT),
Jens Axboebcda7ba2020-02-23 16:42:51 -0700688 /* IOSQE_BUFFER_SELECT */
689 REQ_F_BUFFER_SELECT = BIT(REQ_F_BUFFER_SELECT_BIT),
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300690
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300691 /* fail rest of links */
692 REQ_F_FAIL_LINK = BIT(REQ_F_FAIL_LINK_BIT),
693 /* on inflight list */
694 REQ_F_INFLIGHT = BIT(REQ_F_INFLIGHT_BIT),
695 /* read/write uses file position */
696 REQ_F_CUR_POS = BIT(REQ_F_CUR_POS_BIT),
697 /* must not punt to workers */
698 REQ_F_NOWAIT = BIT(REQ_F_NOWAIT_BIT),
Pavel Begunkov900fad42020-10-19 16:39:16 +0100699 /* has or had linked timeout */
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300700 REQ_F_LINK_TIMEOUT = BIT(REQ_F_LINK_TIMEOUT_BIT),
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300701 /* regular file */
702 REQ_F_ISREG = BIT(REQ_F_ISREG_BIT),
Pavel Begunkov99bc4c32020-02-07 22:04:45 +0300703 /* needs cleanup */
704 REQ_F_NEED_CLEANUP = BIT(REQ_F_NEED_CLEANUP_BIT),
Jens Axboed7718a92020-02-14 22:23:12 -0700705 /* already went through poll handler */
706 REQ_F_POLLED = BIT(REQ_F_POLLED_BIT),
Jens Axboebcda7ba2020-02-23 16:42:51 -0700707 /* buffer already selected */
708 REQ_F_BUFFER_SELECTED = BIT(REQ_F_BUFFER_SELECTED_BIT),
Jens Axboe5b0bbee2020-04-27 10:41:22 -0600709 /* doesn't need file table for this request */
710 REQ_F_NO_FILE_TABLE = BIT(REQ_F_NO_FILE_TABLE_BIT),
Xiaoguang Wang7cdaf582020-06-10 19:41:19 +0800711 /* io_wq_work is initialized */
712 REQ_F_WORK_INITIALIZED = BIT(REQ_F_WORK_INITIALIZED_BIT),
Pavel Begunkov900fad42020-10-19 16:39:16 +0100713 /* linked timeout is active, i.e. prepared by link's head */
714 REQ_F_LTIMEOUT_ACTIVE = BIT(REQ_F_LTIMEOUT_ACTIVE_BIT),
Pavel Begunkove342c802021-01-19 13:32:47 +0000715 /* completion is deferred through io_comp_state */
716 REQ_F_COMPLETE_INLINE = BIT(REQ_F_COMPLETE_INLINE_BIT),
Jens Axboed7718a92020-02-14 22:23:12 -0700717};
718
719struct async_poll {
720 struct io_poll_iocb poll;
Jens Axboe807abcb2020-07-17 17:09:27 -0600721 struct io_poll_iocb *double_poll;
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300722};
723
Jens Axboe09bb8392019-03-13 12:39:28 -0600724/*
725 * NOTE! Each of the iocb union members has the file pointer
726 * as the first entry in their struct definition. So you can
727 * access the file pointer through any of the sub-structs,
728 * or directly as just 'ki_filp' in this struct.
729 */
Jens Axboe2b188cc2019-01-07 10:46:33 -0700730struct io_kiocb {
Jens Axboe221c5eb2019-01-17 09:41:58 -0700731 union {
Jens Axboe09bb8392019-03-13 12:39:28 -0600732 struct file *file;
Jens Axboe9adbd452019-12-20 08:45:55 -0700733 struct io_rw rw;
Jens Axboe221c5eb2019-01-17 09:41:58 -0700734 struct io_poll_iocb poll;
Pavel Begunkov018043b2020-10-27 23:17:18 +0000735 struct io_poll_remove poll_remove;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -0700736 struct io_accept accept;
737 struct io_sync sync;
Jens Axboefbf23842019-12-17 18:45:56 -0700738 struct io_cancel cancel;
Jens Axboeb29472e2019-12-17 18:50:29 -0700739 struct io_timeout timeout;
Pavel Begunkov0bdf7a22020-10-10 18:34:10 +0100740 struct io_timeout_rem timeout_rem;
Jens Axboe3fbb51c2019-12-20 08:51:52 -0700741 struct io_connect connect;
Jens Axboee47293f2019-12-20 08:58:21 -0700742 struct io_sr_msg sr_msg;
Jens Axboe15b71ab2019-12-11 11:20:36 -0700743 struct io_open open;
Jens Axboeb5dba592019-12-11 14:02:38 -0700744 struct io_close close;
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +0000745 struct io_rsrc_update rsrc_update;
Jens Axboe4840e412019-12-25 22:03:45 -0700746 struct io_fadvise fadvise;
Jens Axboec1ca7572019-12-25 22:18:28 -0700747 struct io_madvise madvise;
Jens Axboe3e4827b2020-01-08 15:18:09 -0700748 struct io_epoll epoll;
Pavel Begunkov7d67af22020-02-24 11:32:45 +0300749 struct io_splice splice;
Jens Axboeddf0322d2020-02-23 16:41:33 -0700750 struct io_provide_buf pbuf;
Bijan Mottahedeh1d9e1282020-05-22 21:31:16 -0700751 struct io_statx statx;
Jens Axboe36f4fa62020-09-05 11:14:22 -0600752 struct io_shutdown shutdown;
Jens Axboe80a261f2020-09-28 14:23:58 -0600753 struct io_rename rename;
Jens Axboe14a11432020-09-28 14:27:37 -0600754 struct io_unlink unlink;
Pavel Begunkov3ca405e2020-07-13 23:37:08 +0300755 /* use only after cleaning per-op data, see io_clean_op() */
756 struct io_completion compl;
Jens Axboe221c5eb2019-01-17 09:41:58 -0700757 };
Jens Axboe2b188cc2019-01-07 10:46:33 -0700758
Jens Axboee8c2bc12020-08-15 18:44:09 -0700759 /* opcode allocated if it needs to store data for async defer */
760 void *async_data;
Jens Axboed625c6e2019-12-17 19:53:05 -0700761 u8 opcode;
Xiaoguang Wang65a65432020-06-11 23:39:36 +0800762 /* polled IO has completed */
763 u8 iopoll_completed;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700764
Bijan Mottahedeh4f4eeba2020-05-19 14:52:49 -0700765 u16 buf_index;
Pavel Begunkov9cf7c102020-07-13 23:37:15 +0300766 u32 result;
Bijan Mottahedeh4f4eeba2020-05-19 14:52:49 -0700767
Pavel Begunkov010e8e62020-07-30 18:43:45 +0300768 struct io_ring_ctx *ctx;
769 unsigned int flags;
770 refcount_t refs;
771 struct task_struct *task;
772 u64 user_data;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700773
Pavel Begunkovf2f87372020-10-27 23:25:37 +0000774 struct io_kiocb *link;
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +0000775 struct percpu_ref *fixed_rsrc_refs;
Jens Axboed7718a92020-02-14 22:23:12 -0700776
Pavel Begunkovd21ffe72020-07-13 23:37:10 +0300777 /*
778 * 1. used with ctx->iopoll_list with reads/writes
779 * 2. to track reqs with ->files (see io_op_def::file_table)
780 */
Pavel Begunkov010e8e62020-07-30 18:43:45 +0300781 struct list_head inflight_entry;
Pavel Begunkov010e8e62020-07-30 18:43:45 +0300782 struct callback_head task_work;
783 /* for polled requests, i.e. IORING_OP_POLL_ADD and async armed poll */
784 struct hlist_node hash_node;
785 struct async_poll *apoll;
786 struct io_wq_work work;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700787};
788
Pavel Begunkov27dc8332020-07-13 23:37:14 +0300789struct io_defer_entry {
790 struct list_head list;
791 struct io_kiocb *req;
Pavel Begunkov9cf7c102020-07-13 23:37:15 +0300792 u32 seq;
Pavel Begunkov27dc8332020-07-13 23:37:14 +0300793};
794
Jens Axboed3656342019-12-18 09:50:26 -0700795struct io_op_def {
Jens Axboed3656342019-12-18 09:50:26 -0700796 /* needs req->file assigned */
797 unsigned needs_file : 1;
Jens Axboed3656342019-12-18 09:50:26 -0700798 /* hash wq insertion if file is a regular file */
799 unsigned hash_reg_file : 1;
800 /* unbound wq insertion if file is a non-regular file */
801 unsigned unbound_nonreg_file : 1;
Jens Axboe66f4af92020-01-16 15:36:52 -0700802 /* opcode is not supported by this kernel */
803 unsigned not_supported : 1;
Jens Axboe8a727582020-02-20 09:59:44 -0700804 /* set if opcode supports polled "wait" */
805 unsigned pollin : 1;
806 unsigned pollout : 1;
Jens Axboebcda7ba2020-02-23 16:42:51 -0700807 /* op supports buffer selection */
808 unsigned buffer_select : 1;
Jens Axboee8c2bc12020-08-15 18:44:09 -0700809 /* must always have async data allocated */
810 unsigned needs_async_data : 1;
Jens Axboe27926b62020-10-28 09:33:23 -0600811 /* should block plug */
812 unsigned plug : 1;
Jens Axboee8c2bc12020-08-15 18:44:09 -0700813 /* size of async data needed, if any */
814 unsigned short async_size;
Jens Axboe0f203762020-10-14 09:23:55 -0600815 unsigned work_flags;
Jens Axboed3656342019-12-18 09:50:26 -0700816};
817
Jens Axboe09186822020-10-13 15:01:40 -0600818static const struct io_op_def io_op_defs[] = {
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300819 [IORING_OP_NOP] = {},
820 [IORING_OP_READV] = {
Jens Axboed3656342019-12-18 09:50:26 -0700821 .needs_file = 1,
822 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700823 .pollin = 1,
Jens Axboe4d954c22020-02-27 07:31:19 -0700824 .buffer_select = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700825 .needs_async_data = 1,
Jens Axboe27926b62020-10-28 09:33:23 -0600826 .plug = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700827 .async_size = sizeof(struct io_async_rw),
Jens Axboe0f203762020-10-14 09:23:55 -0600828 .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG,
Jens Axboed3656342019-12-18 09:50:26 -0700829 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300830 [IORING_OP_WRITEV] = {
Jens Axboed3656342019-12-18 09:50:26 -0700831 .needs_file = 1,
832 .hash_reg_file = 1,
833 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700834 .pollout = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700835 .needs_async_data = 1,
Jens Axboe27926b62020-10-28 09:33:23 -0600836 .plug = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700837 .async_size = sizeof(struct io_async_rw),
Jens Axboe69228332020-10-20 14:28:41 -0600838 .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG |
839 IO_WQ_WORK_FSIZE,
Jens Axboed3656342019-12-18 09:50:26 -0700840 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300841 [IORING_OP_FSYNC] = {
Jens Axboed3656342019-12-18 09:50:26 -0700842 .needs_file = 1,
Jens Axboe0f203762020-10-14 09:23:55 -0600843 .work_flags = IO_WQ_WORK_BLKCG,
Jens Axboed3656342019-12-18 09:50:26 -0700844 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300845 [IORING_OP_READ_FIXED] = {
Jens Axboed3656342019-12-18 09:50:26 -0700846 .needs_file = 1,
847 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700848 .pollin = 1,
Jens Axboe27926b62020-10-28 09:33:23 -0600849 .plug = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700850 .async_size = sizeof(struct io_async_rw),
Jens Axboe4017eb92020-10-22 14:14:12 -0600851 .work_flags = IO_WQ_WORK_BLKCG | IO_WQ_WORK_MM,
Jens Axboed3656342019-12-18 09:50:26 -0700852 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300853 [IORING_OP_WRITE_FIXED] = {
Jens Axboed3656342019-12-18 09:50:26 -0700854 .needs_file = 1,
855 .hash_reg_file = 1,
856 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700857 .pollout = 1,
Jens Axboe27926b62020-10-28 09:33:23 -0600858 .plug = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700859 .async_size = sizeof(struct io_async_rw),
Jens Axboe4017eb92020-10-22 14:14:12 -0600860 .work_flags = IO_WQ_WORK_BLKCG | IO_WQ_WORK_FSIZE |
861 IO_WQ_WORK_MM,
Jens Axboed3656342019-12-18 09:50:26 -0700862 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300863 [IORING_OP_POLL_ADD] = {
Jens Axboed3656342019-12-18 09:50:26 -0700864 .needs_file = 1,
865 .unbound_nonreg_file = 1,
866 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300867 [IORING_OP_POLL_REMOVE] = {},
868 [IORING_OP_SYNC_FILE_RANGE] = {
Jens Axboed3656342019-12-18 09:50:26 -0700869 .needs_file = 1,
Jens Axboe0f203762020-10-14 09:23:55 -0600870 .work_flags = IO_WQ_WORK_BLKCG,
Jens Axboed3656342019-12-18 09:50:26 -0700871 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300872 [IORING_OP_SENDMSG] = {
Jens Axboed3656342019-12-18 09:50:26 -0700873 .needs_file = 1,
874 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700875 .pollout = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700876 .needs_async_data = 1,
877 .async_size = sizeof(struct io_async_msghdr),
Pavel Begunkov10cad2c2020-11-07 13:20:39 +0000878 .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG,
Jens Axboed3656342019-12-18 09:50:26 -0700879 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300880 [IORING_OP_RECVMSG] = {
Jens Axboed3656342019-12-18 09:50:26 -0700881 .needs_file = 1,
882 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700883 .pollin = 1,
Jens Axboe52de1fe2020-02-27 10:15:42 -0700884 .buffer_select = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700885 .needs_async_data = 1,
886 .async_size = sizeof(struct io_async_msghdr),
Pavel Begunkov10cad2c2020-11-07 13:20:39 +0000887 .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG,
Jens Axboed3656342019-12-18 09:50:26 -0700888 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300889 [IORING_OP_TIMEOUT] = {
Jens Axboee8c2bc12020-08-15 18:44:09 -0700890 .needs_async_data = 1,
891 .async_size = sizeof(struct io_timeout_data),
Jens Axboe0f203762020-10-14 09:23:55 -0600892 .work_flags = IO_WQ_WORK_MM,
Jens Axboed3656342019-12-18 09:50:26 -0700893 },
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +0000894 [IORING_OP_TIMEOUT_REMOVE] = {
895 /* used by timeout updates' prep() */
896 .work_flags = IO_WQ_WORK_MM,
897 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300898 [IORING_OP_ACCEPT] = {
Jens Axboed3656342019-12-18 09:50:26 -0700899 .needs_file = 1,
900 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700901 .pollin = 1,
Jens Axboe0f203762020-10-14 09:23:55 -0600902 .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_FILES,
Jens Axboed3656342019-12-18 09:50:26 -0700903 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300904 [IORING_OP_ASYNC_CANCEL] = {},
905 [IORING_OP_LINK_TIMEOUT] = {
Jens Axboee8c2bc12020-08-15 18:44:09 -0700906 .needs_async_data = 1,
907 .async_size = sizeof(struct io_timeout_data),
Jens Axboe0f203762020-10-14 09:23:55 -0600908 .work_flags = IO_WQ_WORK_MM,
Jens Axboed3656342019-12-18 09:50:26 -0700909 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300910 [IORING_OP_CONNECT] = {
Jens Axboed3656342019-12-18 09:50:26 -0700911 .needs_file = 1,
912 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700913 .pollout = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700914 .needs_async_data = 1,
915 .async_size = sizeof(struct io_async_connect),
Jens Axboe0f203762020-10-14 09:23:55 -0600916 .work_flags = IO_WQ_WORK_MM,
Jens Axboed3656342019-12-18 09:50:26 -0700917 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300918 [IORING_OP_FALLOCATE] = {
Jens Axboed3656342019-12-18 09:50:26 -0700919 .needs_file = 1,
Jens Axboe69228332020-10-20 14:28:41 -0600920 .work_flags = IO_WQ_WORK_BLKCG | IO_WQ_WORK_FSIZE,
Jens Axboed3656342019-12-18 09:50:26 -0700921 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300922 [IORING_OP_OPENAT] = {
Jens Axboe0f203762020-10-14 09:23:55 -0600923 .work_flags = IO_WQ_WORK_FILES | IO_WQ_WORK_BLKCG |
Jens Axboe14587a462020-09-05 11:36:08 -0600924 IO_WQ_WORK_FS | IO_WQ_WORK_MM,
Jens Axboed3656342019-12-18 09:50:26 -0700925 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300926 [IORING_OP_CLOSE] = {
Jens Axboe0f203762020-10-14 09:23:55 -0600927 .work_flags = IO_WQ_WORK_FILES | IO_WQ_WORK_BLKCG,
Jens Axboed3656342019-12-18 09:50:26 -0700928 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300929 [IORING_OP_FILES_UPDATE] = {
Jens Axboe0f203762020-10-14 09:23:55 -0600930 .work_flags = IO_WQ_WORK_FILES | IO_WQ_WORK_MM,
Jens Axboed3656342019-12-18 09:50:26 -0700931 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300932 [IORING_OP_STATX] = {
Jens Axboe0f203762020-10-14 09:23:55 -0600933 .work_flags = IO_WQ_WORK_FILES | IO_WQ_WORK_MM |
934 IO_WQ_WORK_FS | IO_WQ_WORK_BLKCG,
Jens Axboed3656342019-12-18 09:50:26 -0700935 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300936 [IORING_OP_READ] = {
Jens Axboe3a6820f2019-12-22 15:19:35 -0700937 .needs_file = 1,
938 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700939 .pollin = 1,
Jens Axboebcda7ba2020-02-23 16:42:51 -0700940 .buffer_select = 1,
Jens Axboe27926b62020-10-28 09:33:23 -0600941 .plug = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700942 .async_size = sizeof(struct io_async_rw),
Jens Axboe0f203762020-10-14 09:23:55 -0600943 .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG,
Jens Axboe3a6820f2019-12-22 15:19:35 -0700944 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300945 [IORING_OP_WRITE] = {
Jens Axboe3a6820f2019-12-22 15:19:35 -0700946 .needs_file = 1,
947 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700948 .pollout = 1,
Jens Axboe27926b62020-10-28 09:33:23 -0600949 .plug = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700950 .async_size = sizeof(struct io_async_rw),
Jens Axboe69228332020-10-20 14:28:41 -0600951 .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG |
952 IO_WQ_WORK_FSIZE,
Jens Axboe3a6820f2019-12-22 15:19:35 -0700953 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300954 [IORING_OP_FADVISE] = {
Jens Axboe4840e412019-12-25 22:03:45 -0700955 .needs_file = 1,
Jens Axboe0f203762020-10-14 09:23:55 -0600956 .work_flags = IO_WQ_WORK_BLKCG,
Jens Axboe4840e412019-12-25 22:03:45 -0700957 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300958 [IORING_OP_MADVISE] = {
Jens Axboe0f203762020-10-14 09:23:55 -0600959 .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG,
Jens Axboec1ca7572019-12-25 22:18:28 -0700960 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300961 [IORING_OP_SEND] = {
Jens Axboefddafac2020-01-04 20:19:44 -0700962 .needs_file = 1,
963 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700964 .pollout = 1,
Jens Axboe0f203762020-10-14 09:23:55 -0600965 .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG,
Jens Axboefddafac2020-01-04 20:19:44 -0700966 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300967 [IORING_OP_RECV] = {
Jens Axboefddafac2020-01-04 20:19:44 -0700968 .needs_file = 1,
969 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700970 .pollin = 1,
Jens Axboebcda7ba2020-02-23 16:42:51 -0700971 .buffer_select = 1,
Jens Axboe0f203762020-10-14 09:23:55 -0600972 .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG,
Jens Axboefddafac2020-01-04 20:19:44 -0700973 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300974 [IORING_OP_OPENAT2] = {
Jens Axboe0f203762020-10-14 09:23:55 -0600975 .work_flags = IO_WQ_WORK_FILES | IO_WQ_WORK_FS |
Jens Axboe14587a462020-09-05 11:36:08 -0600976 IO_WQ_WORK_BLKCG | IO_WQ_WORK_MM,
Jens Axboecebdb982020-01-08 17:59:24 -0700977 },
Jens Axboe3e4827b2020-01-08 15:18:09 -0700978 [IORING_OP_EPOLL_CTL] = {
979 .unbound_nonreg_file = 1,
Jens Axboe0f203762020-10-14 09:23:55 -0600980 .work_flags = IO_WQ_WORK_FILES,
Jens Axboe3e4827b2020-01-08 15:18:09 -0700981 },
Pavel Begunkov7d67af22020-02-24 11:32:45 +0300982 [IORING_OP_SPLICE] = {
983 .needs_file = 1,
984 .hash_reg_file = 1,
985 .unbound_nonreg_file = 1,
Jens Axboe0f203762020-10-14 09:23:55 -0600986 .work_flags = IO_WQ_WORK_BLKCG,
Jens Axboeddf0322d2020-02-23 16:41:33 -0700987 },
988 [IORING_OP_PROVIDE_BUFFERS] = {},
Jens Axboe067524e2020-03-02 16:32:28 -0700989 [IORING_OP_REMOVE_BUFFERS] = {},
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +0300990 [IORING_OP_TEE] = {
991 .needs_file = 1,
992 .hash_reg_file = 1,
993 .unbound_nonreg_file = 1,
994 },
Jens Axboe36f4fa62020-09-05 11:14:22 -0600995 [IORING_OP_SHUTDOWN] = {
996 .needs_file = 1,
997 },
Jens Axboe80a261f2020-09-28 14:23:58 -0600998 [IORING_OP_RENAMEAT] = {
999 .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_FILES |
1000 IO_WQ_WORK_FS | IO_WQ_WORK_BLKCG,
1001 },
Jens Axboe14a11432020-09-28 14:27:37 -06001002 [IORING_OP_UNLINKAT] = {
1003 .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_FILES |
1004 IO_WQ_WORK_FS | IO_WQ_WORK_BLKCG,
1005 },
Jens Axboed3656342019-12-18 09:50:26 -07001006};
1007
Bijan Mottahedeh2e0464d2020-06-16 16:36:10 -07001008enum io_mem_account {
1009 ACCT_LOCKED,
1010 ACCT_PINNED,
1011};
1012
Pavel Begunkov9936c7c2021-02-04 13:51:56 +00001013static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
1014 struct task_struct *task,
1015 struct files_struct *files);
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00001016static void destroy_fixed_rsrc_ref_node(struct fixed_rsrc_ref_node *ref_node);
Pavel Begunkovbc9744c2021-01-15 17:37:49 +00001017static struct fixed_rsrc_ref_node *alloc_fixed_rsrc_ref_node(
Pavel Begunkov1ffc5422020-12-30 21:34:15 +00001018 struct io_ring_ctx *ctx);
Pavel Begunkovbc9744c2021-01-15 17:37:49 +00001019static void init_fixed_file_ref_node(struct io_ring_ctx *ctx,
1020 struct fixed_rsrc_ref_node *ref_node);
Pavel Begunkov1ffc5422020-12-30 21:34:15 +00001021
Pavel Begunkov81b68a52020-07-30 18:43:46 +03001022static void __io_complete_rw(struct io_kiocb *req, long res, long res2,
Pavel Begunkov889fca72021-02-10 00:03:09 +00001023 unsigned int issue_flags);
Jens Axboe78e19bb2019-11-06 15:21:34 -07001024static void io_cqring_fill_event(struct io_kiocb *req, long res);
Jackie Liuec9c02a2019-11-08 23:50:36 +08001025static void io_put_req(struct io_kiocb *req);
Pavel Begunkov216578e2020-10-13 09:44:00 +01001026static void io_put_req_deferred(struct io_kiocb *req, int nr);
Jens Axboec40f6372020-06-25 15:39:59 -06001027static void io_double_put_req(struct io_kiocb *req);
Jens Axboe94ae5e72019-11-14 19:39:52 -07001028static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req);
Jens Axboe7271ef32020-08-10 09:55:22 -06001029static void __io_queue_linked_timeout(struct io_kiocb *req);
Jens Axboe94ae5e72019-11-14 19:39:52 -07001030static void io_queue_linked_timeout(struct io_kiocb *req);
Jens Axboe05f3fb32019-12-09 11:22:50 -07001031static int __io_sqe_files_update(struct io_ring_ctx *ctx,
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00001032 struct io_uring_rsrc_update *ip,
Jens Axboe05f3fb32019-12-09 11:22:50 -07001033 unsigned nr_args);
Pavel Begunkov3ca405e2020-07-13 23:37:08 +03001034static void __io_clean_op(struct io_kiocb *req);
Pavel Begunkov8371adf2020-10-10 18:34:08 +01001035static struct file *io_file_get(struct io_submit_state *state,
1036 struct io_kiocb *req, int fd, bool fixed);
Pavel Begunkovc1379e22020-09-30 22:57:56 +03001037static void __io_queue_sqe(struct io_kiocb *req, struct io_comp_state *cs);
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00001038static void io_rsrc_put_work(struct work_struct *work);
Jens Axboede0617e2019-04-06 21:51:27 -06001039
Pavel Begunkov847595d2021-02-04 13:52:06 +00001040static int io_import_iovec(int rw, struct io_kiocb *req, struct iovec **iovec,
1041 struct iov_iter *iter, bool needs_lock);
Jens Axboeff6165b2020-08-13 09:47:43 -06001042static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec,
1043 const struct iovec *fast_iov,
Jens Axboe227c0c92020-08-13 11:51:40 -06001044 struct iov_iter *iter, bool force);
Pavel Begunkov907d1df2021-01-26 23:35:10 +00001045static void io_req_task_queue(struct io_kiocb *req);
Jens Axboe9a56a232019-01-09 09:06:50 -07001046
Jens Axboe2b188cc2019-01-07 10:46:33 -07001047static struct kmem_cache *req_cachep;
1048
Jens Axboe09186822020-10-13 15:01:40 -06001049static const struct file_operations io_uring_fops;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001050
1051struct sock *io_uring_get_socket(struct file *file)
1052{
1053#if defined(CONFIG_UNIX)
1054 if (file->f_op == &io_uring_fops) {
1055 struct io_ring_ctx *ctx = file->private_data;
1056
1057 return ctx->ring_sock->sk;
1058 }
1059#endif
1060 return NULL;
1061}
1062EXPORT_SYMBOL(io_uring_get_socket);
1063
Pavel Begunkovf2f87372020-10-27 23:25:37 +00001064#define io_for_each_link(pos, head) \
1065 for (pos = (head); pos; pos = pos->link)
1066
Pavel Begunkov3ca405e2020-07-13 23:37:08 +03001067static inline void io_clean_op(struct io_kiocb *req)
1068{
Pavel Begunkov9d5c8192021-01-24 15:08:14 +00001069 if (req->flags & (REQ_F_NEED_CLEANUP | REQ_F_BUFFER_SELECTED))
Pavel Begunkov3ca405e2020-07-13 23:37:08 +03001070 __io_clean_op(req);
1071}
1072
Pavel Begunkov36f72fe2020-11-18 19:57:26 +00001073static inline void io_set_resource_node(struct io_kiocb *req)
Jens Axboec40f6372020-06-25 15:39:59 -06001074{
Pavel Begunkov36f72fe2020-11-18 19:57:26 +00001075 struct io_ring_ctx *ctx = req->ctx;
1076
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00001077 if (!req->fixed_rsrc_refs) {
1078 req->fixed_rsrc_refs = &ctx->file_data->node->refs;
1079 percpu_ref_get(req->fixed_rsrc_refs);
Pavel Begunkov36f72fe2020-11-18 19:57:26 +00001080 }
1081}
1082
Pavel Begunkov08d23632020-11-06 13:00:22 +00001083static bool io_match_task(struct io_kiocb *head,
1084 struct task_struct *task,
1085 struct files_struct *files)
1086{
1087 struct io_kiocb *req;
1088
Jens Axboe84965ff2021-01-23 15:51:11 -07001089 if (task && head->task != task) {
1090 /* in terms of cancelation, always match if req task is dead */
1091 if (head->task->flags & PF_EXITING)
1092 return true;
Pavel Begunkov08d23632020-11-06 13:00:22 +00001093 return false;
Jens Axboe84965ff2021-01-23 15:51:11 -07001094 }
Pavel Begunkov08d23632020-11-06 13:00:22 +00001095 if (!files)
1096 return true;
1097
1098 io_for_each_link(req, head) {
Jens Axboe02a13672021-01-23 15:49:31 -07001099 if (!(req->flags & REQ_F_WORK_INITIALIZED))
1100 continue;
1101 if (req->file && req->file->f_op == &io_uring_fops)
1102 return true;
1103 if ((req->work.flags & IO_WQ_WORK_FILES) &&
Pavel Begunkov08d23632020-11-06 13:00:22 +00001104 req->work.identity->files == files)
1105 return true;
1106 }
1107 return false;
1108}
1109
Jens Axboe28cea78a2020-09-14 10:51:17 -06001110static void io_sq_thread_drop_mm_files(void)
Jens Axboec40f6372020-06-25 15:39:59 -06001111{
Jens Axboe28cea78a2020-09-14 10:51:17 -06001112 struct files_struct *files = current->files;
Jens Axboec40f6372020-06-25 15:39:59 -06001113 struct mm_struct *mm = current->mm;
1114
1115 if (mm) {
1116 kthread_unuse_mm(mm);
1117 mmput(mm);
Jens Axboe4b70cf92020-11-02 10:39:05 -07001118 current->mm = NULL;
Jens Axboec40f6372020-06-25 15:39:59 -06001119 }
Jens Axboe28cea78a2020-09-14 10:51:17 -06001120 if (files) {
1121 struct nsproxy *nsproxy = current->nsproxy;
1122
1123 task_lock(current);
1124 current->files = NULL;
1125 current->nsproxy = NULL;
1126 task_unlock(current);
1127 put_files_struct(files);
1128 put_nsproxy(nsproxy);
1129 }
1130}
1131
Pavel Begunkov1a38ffc2020-11-08 12:55:55 +00001132static int __io_sq_thread_acquire_files(struct io_ring_ctx *ctx)
Jens Axboe28cea78a2020-09-14 10:51:17 -06001133{
Pavel Begunkov621fadc2021-01-11 04:00:31 +00001134 if (current->flags & PF_EXITING)
1135 return -EFAULT;
1136
Jens Axboe28cea78a2020-09-14 10:51:17 -06001137 if (!current->files) {
1138 struct files_struct *files;
1139 struct nsproxy *nsproxy;
1140
1141 task_lock(ctx->sqo_task);
1142 files = ctx->sqo_task->files;
1143 if (!files) {
1144 task_unlock(ctx->sqo_task);
Pavel Begunkov1a38ffc2020-11-08 12:55:55 +00001145 return -EOWNERDEAD;
Jens Axboe28cea78a2020-09-14 10:51:17 -06001146 }
1147 atomic_inc(&files->count);
1148 get_nsproxy(ctx->sqo_task->nsproxy);
1149 nsproxy = ctx->sqo_task->nsproxy;
1150 task_unlock(ctx->sqo_task);
1151
1152 task_lock(current);
1153 current->files = files;
1154 current->nsproxy = nsproxy;
1155 task_unlock(current);
1156 }
Pavel Begunkov1a38ffc2020-11-08 12:55:55 +00001157 return 0;
Jens Axboec40f6372020-06-25 15:39:59 -06001158}
1159
1160static int __io_sq_thread_acquire_mm(struct io_ring_ctx *ctx)
1161{
Jens Axboe4b70cf92020-11-02 10:39:05 -07001162 struct mm_struct *mm;
1163
Pavel Begunkov621fadc2021-01-11 04:00:31 +00001164 if (current->flags & PF_EXITING)
1165 return -EFAULT;
Jens Axboe4b70cf92020-11-02 10:39:05 -07001166 if (current->mm)
1167 return 0;
1168
1169 /* Should never happen */
1170 if (unlikely(!(ctx->flags & IORING_SETUP_SQPOLL)))
1171 return -EFAULT;
1172
1173 task_lock(ctx->sqo_task);
1174 mm = ctx->sqo_task->mm;
1175 if (unlikely(!mm || !mmget_not_zero(mm)))
1176 mm = NULL;
1177 task_unlock(ctx->sqo_task);
1178
1179 if (mm) {
1180 kthread_use_mm(mm);
1181 return 0;
Jens Axboec40f6372020-06-25 15:39:59 -06001182 }
1183
Jens Axboe4b70cf92020-11-02 10:39:05 -07001184 return -EFAULT;
Jens Axboec40f6372020-06-25 15:39:59 -06001185}
1186
Jens Axboe28cea78a2020-09-14 10:51:17 -06001187static int io_sq_thread_acquire_mm_files(struct io_ring_ctx *ctx,
1188 struct io_kiocb *req)
Jens Axboec40f6372020-06-25 15:39:59 -06001189{
Jens Axboe28cea78a2020-09-14 10:51:17 -06001190 const struct io_op_def *def = &io_op_defs[req->opcode];
Pavel Begunkov1a38ffc2020-11-08 12:55:55 +00001191 int ret;
Jens Axboe28cea78a2020-09-14 10:51:17 -06001192
1193 if (def->work_flags & IO_WQ_WORK_MM) {
Pavel Begunkov1a38ffc2020-11-08 12:55:55 +00001194 ret = __io_sq_thread_acquire_mm(ctx);
Jens Axboe28cea78a2020-09-14 10:51:17 -06001195 if (unlikely(ret))
1196 return ret;
1197 }
1198
Pavel Begunkov1a38ffc2020-11-08 12:55:55 +00001199 if (def->needs_file || (def->work_flags & IO_WQ_WORK_FILES)) {
1200 ret = __io_sq_thread_acquire_files(ctx);
1201 if (unlikely(ret))
1202 return ret;
1203 }
Jens Axboe28cea78a2020-09-14 10:51:17 -06001204
1205 return 0;
Jens Axboec40f6372020-06-25 15:39:59 -06001206}
1207
Dennis Zhou91d8f512020-09-16 13:41:05 -07001208static void io_sq_thread_associate_blkcg(struct io_ring_ctx *ctx,
1209 struct cgroup_subsys_state **cur_css)
1210
1211{
1212#ifdef CONFIG_BLK_CGROUP
1213 /* puts the old one when swapping */
1214 if (*cur_css != ctx->sqo_blkcg_css) {
1215 kthread_associate_blkcg(ctx->sqo_blkcg_css);
1216 *cur_css = ctx->sqo_blkcg_css;
1217 }
1218#endif
1219}
1220
1221static void io_sq_thread_unassociate_blkcg(void)
1222{
1223#ifdef CONFIG_BLK_CGROUP
1224 kthread_associate_blkcg(NULL);
1225#endif
1226}
1227
Jens Axboec40f6372020-06-25 15:39:59 -06001228static inline void req_set_fail_links(struct io_kiocb *req)
1229{
1230 if ((req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) == REQ_F_LINK)
1231 req->flags |= REQ_F_FAIL_LINK;
1232}
Jens Axboe4a38aed22020-05-14 17:21:15 -06001233
Xiaoguang Wang7cdaf582020-06-10 19:41:19 +08001234/*
Jens Axboe1e6fa522020-10-15 08:46:24 -06001235 * None of these are dereferenced, they are simply used to check if any of
1236 * them have changed. If we're under current and check they are still the
1237 * same, we're fine to grab references to them for actual out-of-line use.
1238 */
1239static void io_init_identity(struct io_identity *id)
1240{
1241 id->files = current->files;
1242 id->mm = current->mm;
1243#ifdef CONFIG_BLK_CGROUP
1244 rcu_read_lock();
1245 id->blkcg_css = blkcg_css();
1246 rcu_read_unlock();
1247#endif
1248 id->creds = current_cred();
1249 id->nsproxy = current->nsproxy;
1250 id->fs = current->fs;
1251 id->fsize = rlimit(RLIMIT_FSIZE);
Jens Axboe4ea33a92020-10-15 13:46:44 -06001252#ifdef CONFIG_AUDIT
1253 id->loginuid = current->loginuid;
1254 id->sessionid = current->sessionid;
1255#endif
Jens Axboe1e6fa522020-10-15 08:46:24 -06001256 refcount_set(&id->count, 1);
1257}
1258
Pavel Begunkovec99ca62020-10-18 10:17:38 +01001259static inline void __io_req_init_async(struct io_kiocb *req)
1260{
1261 memset(&req->work, 0, sizeof(req->work));
1262 req->flags |= REQ_F_WORK_INITIALIZED;
1263}
1264
Jens Axboe1e6fa522020-10-15 08:46:24 -06001265/*
Xiaoguang Wang7cdaf582020-06-10 19:41:19 +08001266 * Note: must call io_req_init_async() for the first time you
1267 * touch any members of io_wq_work.
1268 */
1269static inline void io_req_init_async(struct io_kiocb *req)
1270{
Jens Axboe500a3732020-10-15 17:38:03 -06001271 struct io_uring_task *tctx = current->io_uring;
1272
Xiaoguang Wang7cdaf582020-06-10 19:41:19 +08001273 if (req->flags & REQ_F_WORK_INITIALIZED)
1274 return;
1275
Pavel Begunkovec99ca62020-10-18 10:17:38 +01001276 __io_req_init_async(req);
Jens Axboe500a3732020-10-15 17:38:03 -06001277
1278 /* Grab a ref if this isn't our static identity */
1279 req->work.identity = tctx->identity;
1280 if (tctx->identity != &tctx->__identity)
1281 refcount_inc(&req->work.identity->count);
Xiaoguang Wang7cdaf582020-06-10 19:41:19 +08001282}
1283
Jens Axboe2b188cc2019-01-07 10:46:33 -07001284static void io_ring_ctx_ref_free(struct percpu_ref *ref)
1285{
1286 struct io_ring_ctx *ctx = container_of(ref, struct io_ring_ctx, refs);
1287
Jens Axboe0f158b42020-05-14 17:18:39 -06001288 complete(&ctx->ref_comp);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001289}
1290
Pavel Begunkov8eb7e2d2020-06-29 13:13:02 +03001291static inline bool io_is_timeout_noseq(struct io_kiocb *req)
1292{
1293 return !req->timeout.off;
1294}
1295
Jens Axboe2b188cc2019-01-07 10:46:33 -07001296static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
1297{
1298 struct io_ring_ctx *ctx;
Jens Axboe78076bb2019-12-04 19:56:40 -07001299 int hash_bits;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001300
1301 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
1302 if (!ctx)
1303 return NULL;
1304
Jens Axboe0ddf92e2019-11-08 08:52:53 -07001305 ctx->fallback_req = kmem_cache_alloc(req_cachep, GFP_KERNEL);
1306 if (!ctx->fallback_req)
1307 goto err;
1308
Jens Axboe78076bb2019-12-04 19:56:40 -07001309 /*
1310 * Use 5 bits less than the max cq entries, that should give us around
1311 * 32 entries per hash list if totally full and uniformly spread.
1312 */
1313 hash_bits = ilog2(p->cq_entries);
1314 hash_bits -= 5;
1315 if (hash_bits <= 0)
1316 hash_bits = 1;
1317 ctx->cancel_hash_bits = hash_bits;
1318 ctx->cancel_hash = kmalloc((1U << hash_bits) * sizeof(struct hlist_head),
1319 GFP_KERNEL);
1320 if (!ctx->cancel_hash)
1321 goto err;
1322 __hash_init(ctx->cancel_hash, 1U << hash_bits);
1323
Roman Gushchin21482892019-05-07 10:01:48 -07001324 if (percpu_ref_init(&ctx->refs, io_ring_ctx_ref_free,
Jens Axboe206aefd2019-11-07 18:27:42 -07001325 PERCPU_REF_ALLOW_REINIT, GFP_KERNEL))
1326 goto err;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001327
1328 ctx->flags = p->flags;
Jens Axboe90554202020-09-03 12:12:41 -06001329 init_waitqueue_head(&ctx->sqo_sq_wait);
Jens Axboe69fb2132020-09-14 11:16:23 -06001330 INIT_LIST_HEAD(&ctx->sqd_list);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001331 init_waitqueue_head(&ctx->cq_wait);
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001332 INIT_LIST_HEAD(&ctx->cq_overflow_list);
Jens Axboe0f158b42020-05-14 17:18:39 -06001333 init_completion(&ctx->ref_comp);
1334 init_completion(&ctx->sq_thread_comp);
Jens Axboe5a2e7452020-02-23 16:23:11 -07001335 idr_init(&ctx->io_buffer_idr);
Jens Axboe071698e2020-01-28 10:04:42 -07001336 idr_init(&ctx->personality_idr);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001337 mutex_init(&ctx->uring_lock);
1338 init_waitqueue_head(&ctx->wait);
1339 spin_lock_init(&ctx->completion_lock);
Pavel Begunkov540e32a2020-07-13 23:37:09 +03001340 INIT_LIST_HEAD(&ctx->iopoll_list);
Jens Axboede0617e2019-04-06 21:51:27 -06001341 INIT_LIST_HEAD(&ctx->defer_list);
Jens Axboe5262f562019-09-17 12:26:57 -06001342 INIT_LIST_HEAD(&ctx->timeout_list);
Jens Axboefcb323c2019-10-24 12:39:47 -06001343 spin_lock_init(&ctx->inflight_lock);
1344 INIT_LIST_HEAD(&ctx->inflight_list);
Bijan Mottahedehd67d2262021-01-15 17:37:46 +00001345 spin_lock_init(&ctx->rsrc_ref_lock);
1346 INIT_LIST_HEAD(&ctx->rsrc_ref_list);
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00001347 INIT_DELAYED_WORK(&ctx->rsrc_put_work, io_rsrc_put_work);
1348 init_llist_head(&ctx->rsrc_put_llist);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001349 return ctx;
Jens Axboe206aefd2019-11-07 18:27:42 -07001350err:
Jens Axboe0ddf92e2019-11-08 08:52:53 -07001351 if (ctx->fallback_req)
1352 kmem_cache_free(req_cachep, ctx->fallback_req);
Jens Axboe78076bb2019-12-04 19:56:40 -07001353 kfree(ctx->cancel_hash);
Jens Axboe206aefd2019-11-07 18:27:42 -07001354 kfree(ctx);
1355 return NULL;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001356}
1357
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03001358static bool req_need_defer(struct io_kiocb *req, u32 seq)
Jens Axboede0617e2019-04-06 21:51:27 -06001359{
Jens Axboe2bc99302020-07-09 09:43:27 -06001360 if (unlikely(req->flags & REQ_F_IO_DRAIN)) {
1361 struct io_ring_ctx *ctx = req->ctx;
Jackie Liua197f662019-11-08 08:09:12 -07001362
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03001363 return seq != ctx->cached_cq_tail
Pavel Begunkov2c3bac6d2020-10-18 10:17:40 +01001364 + READ_ONCE(ctx->cached_cq_overflow);
Jens Axboe2bc99302020-07-09 09:43:27 -06001365 }
Jens Axboe7adf4ea2019-10-10 21:42:58 -06001366
Bob Liu9d858b22019-11-13 18:06:25 +08001367 return false;
Jens Axboe7adf4ea2019-10-10 21:42:58 -06001368}
1369
Jens Axboe5c3462c2020-10-15 09:02:33 -06001370static void io_put_identity(struct io_uring_task *tctx, struct io_kiocb *req)
Jens Axboe1e6fa522020-10-15 08:46:24 -06001371{
Jens Axboe500a3732020-10-15 17:38:03 -06001372 if (req->work.identity == &tctx->__identity)
Jens Axboe1e6fa522020-10-15 08:46:24 -06001373 return;
1374 if (refcount_dec_and_test(&req->work.identity->count))
1375 kfree(req->work.identity);
1376}
1377
Pavel Begunkov4edf20f2020-10-13 09:43:59 +01001378static void io_req_clean_work(struct io_kiocb *req)
Jens Axboecccf0ee2020-01-27 16:34:48 -07001379{
Xiaoguang Wang7cdaf582020-06-10 19:41:19 +08001380 if (!(req->flags & REQ_F_WORK_INITIALIZED))
Pavel Begunkov4edf20f2020-10-13 09:43:59 +01001381 return;
Jens Axboe51a4cc12020-08-10 10:55:56 -06001382
Pavel Begunkove86d0042021-02-01 18:59:54 +00001383 if (req->work.flags & IO_WQ_WORK_MM)
Jens Axboe98447d62020-10-14 10:48:51 -06001384 mmdrop(req->work.identity->mm);
Dennis Zhou91d8f512020-09-16 13:41:05 -07001385#ifdef CONFIG_BLK_CGROUP
Pavel Begunkove86d0042021-02-01 18:59:54 +00001386 if (req->work.flags & IO_WQ_WORK_BLKCG)
Jens Axboe98447d62020-10-14 10:48:51 -06001387 css_put(req->work.identity->blkcg_css);
Jens Axboedfead8a2020-10-14 10:12:37 -06001388#endif
Pavel Begunkove86d0042021-02-01 18:59:54 +00001389 if (req->work.flags & IO_WQ_WORK_CREDS)
Jens Axboe98447d62020-10-14 10:48:51 -06001390 put_cred(req->work.identity->creds);
Jens Axboedfead8a2020-10-14 10:12:37 -06001391 if (req->work.flags & IO_WQ_WORK_FS) {
Jens Axboe98447d62020-10-14 10:48:51 -06001392 struct fs_struct *fs = req->work.identity->fs;
Jens Axboeff002b32020-02-07 16:05:21 -07001393
Jens Axboe98447d62020-10-14 10:48:51 -06001394 spin_lock(&req->work.identity->fs->lock);
Jens Axboeff002b32020-02-07 16:05:21 -07001395 if (--fs->users)
1396 fs = NULL;
Jens Axboe98447d62020-10-14 10:48:51 -06001397 spin_unlock(&req->work.identity->fs->lock);
Jens Axboeff002b32020-02-07 16:05:21 -07001398 if (fs)
1399 free_fs_struct(fs);
1400 }
Pavel Begunkov34e08fe2021-02-01 18:59:53 +00001401 if (req->work.flags & IO_WQ_WORK_FILES) {
1402 put_files_struct(req->work.identity->files);
1403 put_nsproxy(req->work.identity->nsproxy);
Pavel Begunkov34e08fe2021-02-01 18:59:53 +00001404 }
1405 if (req->flags & REQ_F_INFLIGHT) {
1406 struct io_ring_ctx *ctx = req->ctx;
1407 struct io_uring_task *tctx = req->task->io_uring;
1408 unsigned long flags;
1409
1410 spin_lock_irqsave(&ctx->inflight_lock, flags);
1411 list_del(&req->inflight_entry);
1412 spin_unlock_irqrestore(&ctx->inflight_lock, flags);
1413 req->flags &= ~REQ_F_INFLIGHT;
1414 if (atomic_read(&tctx->in_idle))
1415 wake_up(&tctx->wait);
1416 }
Jens Axboe51a4cc12020-08-10 10:55:56 -06001417
Pavel Begunkove86d0042021-02-01 18:59:54 +00001418 req->flags &= ~REQ_F_WORK_INITIALIZED;
1419 req->work.flags &= ~(IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG | IO_WQ_WORK_FS |
1420 IO_WQ_WORK_CREDS | IO_WQ_WORK_FILES);
Jens Axboe5c3462c2020-10-15 09:02:33 -06001421 io_put_identity(req->task->io_uring, req);
Jens Axboe1e6fa522020-10-15 08:46:24 -06001422}
1423
1424/*
1425 * Create a private copy of io_identity, since some fields don't match
1426 * the current context.
1427 */
1428static bool io_identity_cow(struct io_kiocb *req)
1429{
Jens Axboe5c3462c2020-10-15 09:02:33 -06001430 struct io_uring_task *tctx = current->io_uring;
Jens Axboe1e6fa522020-10-15 08:46:24 -06001431 const struct cred *creds = NULL;
1432 struct io_identity *id;
1433
1434 if (req->work.flags & IO_WQ_WORK_CREDS)
1435 creds = req->work.identity->creds;
1436
1437 id = kmemdup(req->work.identity, sizeof(*id), GFP_KERNEL);
1438 if (unlikely(!id)) {
1439 req->work.flags |= IO_WQ_WORK_CANCEL;
1440 return false;
1441 }
1442
1443 /*
1444 * We can safely just re-init the creds we copied Either the field
1445 * matches the current one, or we haven't grabbed it yet. The only
1446 * exception is ->creds, through registered personalities, so handle
1447 * that one separately.
1448 */
1449 io_init_identity(id);
1450 if (creds)
Pavel Begunkove8c954d2020-12-06 22:22:46 +00001451 id->creds = creds;
Jens Axboe1e6fa522020-10-15 08:46:24 -06001452
1453 /* add one for this request */
1454 refcount_inc(&id->count);
1455
Jens Axboecb8a8ae2020-11-03 12:19:07 -07001456 /* drop tctx and req identity references, if needed */
1457 if (tctx->identity != &tctx->__identity &&
1458 refcount_dec_and_test(&tctx->identity->count))
1459 kfree(tctx->identity);
1460 if (req->work.identity != &tctx->__identity &&
1461 refcount_dec_and_test(&req->work.identity->count))
Jens Axboe1e6fa522020-10-15 08:46:24 -06001462 kfree(req->work.identity);
1463
1464 req->work.identity = id;
Jens Axboe500a3732020-10-15 17:38:03 -06001465 tctx->identity = id;
Jens Axboe1e6fa522020-10-15 08:46:24 -06001466 return true;
1467}
1468
Pavel Begunkovce3d5aa2021-02-01 18:59:55 +00001469static void io_req_track_inflight(struct io_kiocb *req)
1470{
1471 struct io_ring_ctx *ctx = req->ctx;
1472
1473 if (!(req->flags & REQ_F_INFLIGHT)) {
1474 io_req_init_async(req);
1475 req->flags |= REQ_F_INFLIGHT;
1476
1477 spin_lock_irq(&ctx->inflight_lock);
1478 list_add(&req->inflight_entry, &ctx->inflight_list);
1479 spin_unlock_irq(&ctx->inflight_lock);
1480 }
1481}
1482
Jens Axboe1e6fa522020-10-15 08:46:24 -06001483static bool io_grab_identity(struct io_kiocb *req)
1484{
1485 const struct io_op_def *def = &io_op_defs[req->opcode];
Jens Axboe5c3462c2020-10-15 09:02:33 -06001486 struct io_identity *id = req->work.identity;
Jens Axboe1e6fa522020-10-15 08:46:24 -06001487
Jens Axboe69228332020-10-20 14:28:41 -06001488 if (def->work_flags & IO_WQ_WORK_FSIZE) {
1489 if (id->fsize != rlimit(RLIMIT_FSIZE))
1490 return false;
1491 req->work.flags |= IO_WQ_WORK_FSIZE;
1492 }
Jens Axboe1e6fa522020-10-15 08:46:24 -06001493#ifdef CONFIG_BLK_CGROUP
1494 if (!(req->work.flags & IO_WQ_WORK_BLKCG) &&
1495 (def->work_flags & IO_WQ_WORK_BLKCG)) {
1496 rcu_read_lock();
1497 if (id->blkcg_css != blkcg_css()) {
1498 rcu_read_unlock();
1499 return false;
1500 }
1501 /*
1502 * This should be rare, either the cgroup is dying or the task
1503 * is moving cgroups. Just punt to root for the handful of ios.
1504 */
1505 if (css_tryget_online(id->blkcg_css))
1506 req->work.flags |= IO_WQ_WORK_BLKCG;
1507 rcu_read_unlock();
1508 }
1509#endif
1510 if (!(req->work.flags & IO_WQ_WORK_CREDS)) {
1511 if (id->creds != current_cred())
1512 return false;
1513 get_cred(id->creds);
1514 req->work.flags |= IO_WQ_WORK_CREDS;
1515 }
Jens Axboe4ea33a92020-10-15 13:46:44 -06001516#ifdef CONFIG_AUDIT
1517 if (!uid_eq(current->loginuid, id->loginuid) ||
1518 current->sessionid != id->sessionid)
1519 return false;
1520#endif
Jens Axboe1e6fa522020-10-15 08:46:24 -06001521 if (!(req->work.flags & IO_WQ_WORK_FS) &&
1522 (def->work_flags & IO_WQ_WORK_FS)) {
1523 if (current->fs != id->fs)
1524 return false;
1525 spin_lock(&id->fs->lock);
1526 if (!id->fs->in_exec) {
1527 id->fs->users++;
1528 req->work.flags |= IO_WQ_WORK_FS;
1529 } else {
1530 req->work.flags |= IO_WQ_WORK_CANCEL;
1531 }
1532 spin_unlock(&current->fs->lock);
1533 }
Pavel Begunkovaf604702020-11-25 18:41:28 +00001534 if (!(req->work.flags & IO_WQ_WORK_FILES) &&
1535 (def->work_flags & IO_WQ_WORK_FILES) &&
1536 !(req->flags & REQ_F_NO_FILE_TABLE)) {
1537 if (id->files != current->files ||
1538 id->nsproxy != current->nsproxy)
1539 return false;
1540 atomic_inc(&id->files->count);
1541 get_nsproxy(id->nsproxy);
Pavel Begunkovaf604702020-11-25 18:41:28 +00001542 req->work.flags |= IO_WQ_WORK_FILES;
Pavel Begunkovce3d5aa2021-02-01 18:59:55 +00001543 io_req_track_inflight(req);
Pavel Begunkovaf604702020-11-25 18:41:28 +00001544 }
Jens Axboe77788772020-12-29 10:50:46 -07001545 if (!(req->work.flags & IO_WQ_WORK_MM) &&
1546 (def->work_flags & IO_WQ_WORK_MM)) {
1547 if (id->mm != current->mm)
1548 return false;
1549 mmgrab(id->mm);
1550 req->work.flags |= IO_WQ_WORK_MM;
1551 }
Jens Axboe1e6fa522020-10-15 08:46:24 -06001552
1553 return true;
Jens Axboe561fb042019-10-24 07:25:42 -06001554}
1555
Pavel Begunkovcbdcb432020-06-29 19:18:43 +03001556static void io_prep_async_work(struct io_kiocb *req)
Jens Axboe561fb042019-10-24 07:25:42 -06001557{
Jens Axboed3656342019-12-18 09:50:26 -07001558 const struct io_op_def *def = &io_op_defs[req->opcode];
Pavel Begunkov23329512020-10-10 18:34:06 +01001559 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe54a91f32019-09-10 09:15:04 -06001560
Pavel Begunkov16d59802020-07-12 16:16:47 +03001561 io_req_init_async(req);
1562
Pavel Begunkovfeaadc42020-10-22 16:47:16 +01001563 if (req->flags & REQ_F_FORCE_ASYNC)
1564 req->work.flags |= IO_WQ_WORK_CONCURRENT;
1565
Jens Axboed3656342019-12-18 09:50:26 -07001566 if (req->flags & REQ_F_ISREG) {
Pavel Begunkov23329512020-10-10 18:34:06 +01001567 if (def->hash_reg_file || (ctx->flags & IORING_SETUP_IOPOLL))
Pavel Begunkov8766dd52020-03-14 00:31:04 +03001568 io_wq_hash_work(&req->work, file_inode(req->file));
Jens Axboed3656342019-12-18 09:50:26 -07001569 } else {
1570 if (def->unbound_nonreg_file)
Jens Axboe3529d8c2019-12-19 18:24:38 -07001571 req->work.flags |= IO_WQ_WORK_UNBOUND;
Jens Axboe54a91f32019-09-10 09:15:04 -06001572 }
Pavel Begunkov23329512020-10-10 18:34:06 +01001573
Jens Axboe1e6fa522020-10-15 08:46:24 -06001574 /* if we fail grabbing identity, we must COW, regrab, and retry */
1575 if (io_grab_identity(req))
1576 return;
1577
1578 if (!io_identity_cow(req))
1579 return;
1580
1581 /* can't fail at this point */
1582 if (!io_grab_identity(req))
1583 WARN_ON(1);
Jens Axboe561fb042019-10-24 07:25:42 -06001584}
1585
Pavel Begunkovcbdcb432020-06-29 19:18:43 +03001586static void io_prep_async_link(struct io_kiocb *req)
1587{
1588 struct io_kiocb *cur;
1589
Pavel Begunkovf2f87372020-10-27 23:25:37 +00001590 io_for_each_link(cur, req)
1591 io_prep_async_work(cur);
Pavel Begunkovcbdcb432020-06-29 19:18:43 +03001592}
1593
Jens Axboe7271ef32020-08-10 09:55:22 -06001594static struct io_kiocb *__io_queue_async_work(struct io_kiocb *req)
Jens Axboe561fb042019-10-24 07:25:42 -06001595{
Jackie Liua197f662019-11-08 08:09:12 -07001596 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkovcbdcb432020-06-29 19:18:43 +03001597 struct io_kiocb *link = io_prep_linked_timeout(req);
Jens Axboe561fb042019-10-24 07:25:42 -06001598
Pavel Begunkov8766dd52020-03-14 00:31:04 +03001599 trace_io_uring_queue_async_work(ctx, io_wq_is_hashed(&req->work), req,
1600 &req->work, req->flags);
1601 io_wq_enqueue(ctx->io_wq, &req->work);
Jens Axboe7271ef32020-08-10 09:55:22 -06001602 return link;
Jens Axboe18d9be12019-09-10 09:13:05 -06001603}
1604
Pavel Begunkovcbdcb432020-06-29 19:18:43 +03001605static void io_queue_async_work(struct io_kiocb *req)
1606{
Jens Axboe7271ef32020-08-10 09:55:22 -06001607 struct io_kiocb *link;
1608
Pavel Begunkovcbdcb432020-06-29 19:18:43 +03001609 /* init ->work of the whole link before punting */
1610 io_prep_async_link(req);
Jens Axboe7271ef32020-08-10 09:55:22 -06001611 link = __io_queue_async_work(req);
1612
1613 if (link)
1614 io_queue_linked_timeout(link);
Pavel Begunkovcbdcb432020-06-29 19:18:43 +03001615}
1616
Jens Axboe5262f562019-09-17 12:26:57 -06001617static void io_kill_timeout(struct io_kiocb *req)
1618{
Jens Axboee8c2bc12020-08-15 18:44:09 -07001619 struct io_timeout_data *io = req->async_data;
Jens Axboe5262f562019-09-17 12:26:57 -06001620 int ret;
1621
Jens Axboee8c2bc12020-08-15 18:44:09 -07001622 ret = hrtimer_try_to_cancel(&io->timer);
Jens Axboe5262f562019-09-17 12:26:57 -06001623 if (ret != -1) {
Pavel Begunkov01cec8c2020-07-30 18:43:50 +03001624 atomic_set(&req->ctx->cq_timeouts,
1625 atomic_read(&req->ctx->cq_timeouts) + 1);
Pavel Begunkov135fcde2020-07-13 23:37:12 +03001626 list_del_init(&req->timeout.list);
Jens Axboe78e19bb2019-11-06 15:21:34 -07001627 io_cqring_fill_event(req, 0);
Pavel Begunkov216578e2020-10-13 09:44:00 +01001628 io_put_req_deferred(req, 1);
Jens Axboe5262f562019-09-17 12:26:57 -06001629 }
1630}
1631
Jens Axboe76e1b642020-09-26 15:05:03 -06001632/*
1633 * Returns true if we found and killed one or more timeouts
1634 */
Pavel Begunkov6b819282020-11-06 13:00:25 +00001635static bool io_kill_timeouts(struct io_ring_ctx *ctx, struct task_struct *tsk,
1636 struct files_struct *files)
Jens Axboe5262f562019-09-17 12:26:57 -06001637{
1638 struct io_kiocb *req, *tmp;
Jens Axboe76e1b642020-09-26 15:05:03 -06001639 int canceled = 0;
Jens Axboe5262f562019-09-17 12:26:57 -06001640
1641 spin_lock_irq(&ctx->completion_lock);
Jens Axboef3606e32020-09-22 08:18:24 -06001642 list_for_each_entry_safe(req, tmp, &ctx->timeout_list, timeout.list) {
Pavel Begunkov6b819282020-11-06 13:00:25 +00001643 if (io_match_task(req, tsk, files)) {
Jens Axboef3606e32020-09-22 08:18:24 -06001644 io_kill_timeout(req);
Jens Axboe76e1b642020-09-26 15:05:03 -06001645 canceled++;
1646 }
Jens Axboef3606e32020-09-22 08:18:24 -06001647 }
Jens Axboe5262f562019-09-17 12:26:57 -06001648 spin_unlock_irq(&ctx->completion_lock);
Jens Axboe76e1b642020-09-26 15:05:03 -06001649 return canceled != 0;
Jens Axboe5262f562019-09-17 12:26:57 -06001650}
1651
Pavel Begunkov04518942020-05-26 20:34:05 +03001652static void __io_queue_deferred(struct io_ring_ctx *ctx)
1653{
1654 do {
Pavel Begunkov27dc8332020-07-13 23:37:14 +03001655 struct io_defer_entry *de = list_first_entry(&ctx->defer_list,
1656 struct io_defer_entry, list);
Pavel Begunkov04518942020-05-26 20:34:05 +03001657
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03001658 if (req_need_defer(de->req, de->seq))
Pavel Begunkov04518942020-05-26 20:34:05 +03001659 break;
Pavel Begunkov27dc8332020-07-13 23:37:14 +03001660 list_del_init(&de->list);
Pavel Begunkov907d1df2021-01-26 23:35:10 +00001661 io_req_task_queue(de->req);
Pavel Begunkov27dc8332020-07-13 23:37:14 +03001662 kfree(de);
Pavel Begunkov04518942020-05-26 20:34:05 +03001663 } while (!list_empty(&ctx->defer_list));
1664}
1665
Pavel Begunkov360428f2020-05-30 14:54:17 +03001666static void io_flush_timeouts(struct io_ring_ctx *ctx)
1667{
Marcelo Diop-Gonzalezf0105052021-01-15 11:54:40 -05001668 u32 seq;
1669
1670 if (list_empty(&ctx->timeout_list))
1671 return;
1672
1673 seq = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
1674
1675 do {
1676 u32 events_needed, events_got;
Pavel Begunkov360428f2020-05-30 14:54:17 +03001677 struct io_kiocb *req = list_first_entry(&ctx->timeout_list,
Pavel Begunkov135fcde2020-07-13 23:37:12 +03001678 struct io_kiocb, timeout.list);
Pavel Begunkov360428f2020-05-30 14:54:17 +03001679
Pavel Begunkov8eb7e2d2020-06-29 13:13:02 +03001680 if (io_is_timeout_noseq(req))
Pavel Begunkov360428f2020-05-30 14:54:17 +03001681 break;
Marcelo Diop-Gonzalezf0105052021-01-15 11:54:40 -05001682
1683 /*
1684 * Since seq can easily wrap around over time, subtract
1685 * the last seq at which timeouts were flushed before comparing.
1686 * Assuming not more than 2^31-1 events have happened since,
1687 * these subtractions won't have wrapped, so we can check if
1688 * target is in [last_seq, current_seq] by comparing the two.
1689 */
1690 events_needed = req->timeout.target_seq - ctx->cq_last_tm_flush;
1691 events_got = seq - ctx->cq_last_tm_flush;
1692 if (events_got < events_needed)
Pavel Begunkov360428f2020-05-30 14:54:17 +03001693 break;
Pavel Begunkovbfe68a22020-05-30 14:54:18 +03001694
Pavel Begunkov135fcde2020-07-13 23:37:12 +03001695 list_del_init(&req->timeout.list);
Pavel Begunkov360428f2020-05-30 14:54:17 +03001696 io_kill_timeout(req);
Marcelo Diop-Gonzalezf0105052021-01-15 11:54:40 -05001697 } while (!list_empty(&ctx->timeout_list));
1698
1699 ctx->cq_last_tm_flush = seq;
Pavel Begunkov360428f2020-05-30 14:54:17 +03001700}
1701
Jens Axboede0617e2019-04-06 21:51:27 -06001702static void io_commit_cqring(struct io_ring_ctx *ctx)
1703{
Pavel Begunkov360428f2020-05-30 14:54:17 +03001704 io_flush_timeouts(ctx);
Pavel Begunkovec30e042021-01-19 13:32:38 +00001705
1706 /* order cqe stores with ring update */
1707 smp_store_release(&ctx->rings->cq.tail, ctx->cached_cq_tail);
Jens Axboede0617e2019-04-06 21:51:27 -06001708
Pavel Begunkov04518942020-05-26 20:34:05 +03001709 if (unlikely(!list_empty(&ctx->defer_list)))
1710 __io_queue_deferred(ctx);
Jens Axboede0617e2019-04-06 21:51:27 -06001711}
1712
Jens Axboe90554202020-09-03 12:12:41 -06001713static inline bool io_sqring_full(struct io_ring_ctx *ctx)
1714{
1715 struct io_rings *r = ctx->rings;
1716
1717 return READ_ONCE(r->sq.tail) - ctx->cached_sq_head == r->sq_ring_entries;
1718}
1719
Pavel Begunkov888aae22021-01-19 13:32:39 +00001720static inline unsigned int __io_cqring_events(struct io_ring_ctx *ctx)
1721{
1722 return ctx->cached_cq_tail - READ_ONCE(ctx->rings->cq.head);
1723}
1724
Jens Axboe2b188cc2019-01-07 10:46:33 -07001725static struct io_uring_cqe *io_get_cqring(struct io_ring_ctx *ctx)
1726{
Hristo Venev75b28af2019-08-26 17:23:46 +00001727 struct io_rings *rings = ctx->rings;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001728 unsigned tail;
1729
Stefan Bühler115e12e2019-04-24 23:54:18 +02001730 /*
1731 * writes to the cq entry need to come after reading head; the
1732 * control dependency is enough as we're using WRITE_ONCE to
1733 * fill the cq entry
1734 */
Pavel Begunkov888aae22021-01-19 13:32:39 +00001735 if (__io_cqring_events(ctx) == rings->cq_ring_entries)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001736 return NULL;
1737
Pavel Begunkov888aae22021-01-19 13:32:39 +00001738 tail = ctx->cached_cq_tail++;
Hristo Venev75b28af2019-08-26 17:23:46 +00001739 return &rings->cqes[tail & ctx->cq_mask];
Jens Axboe2b188cc2019-01-07 10:46:33 -07001740}
1741
Jens Axboef2842ab2020-01-08 11:04:00 -07001742static inline bool io_should_trigger_evfd(struct io_ring_ctx *ctx)
1743{
Jens Axboef0b493e2020-02-01 21:30:11 -07001744 if (!ctx->cq_ev_fd)
1745 return false;
Stefano Garzarella7e55a192020-05-15 18:38:05 +02001746 if (READ_ONCE(ctx->rings->cq_flags) & IORING_CQ_EVENTFD_DISABLED)
1747 return false;
Jens Axboef2842ab2020-01-08 11:04:00 -07001748 if (!ctx->eventfd_async)
1749 return true;
Jens Axboeb41e9852020-02-17 09:52:41 -07001750 return io_wq_current_is_worker();
Jens Axboef2842ab2020-01-08 11:04:00 -07001751}
1752
Jens Axboeb41e9852020-02-17 09:52:41 -07001753static void io_cqring_ev_posted(struct io_ring_ctx *ctx)
Jens Axboe8c838782019-03-12 15:48:16 -06001754{
Pavel Begunkovb1445e52021-01-07 03:15:43 +00001755 /* see waitqueue_active() comment */
1756 smp_mb();
1757
Jens Axboe8c838782019-03-12 15:48:16 -06001758 if (waitqueue_active(&ctx->wait))
1759 wake_up(&ctx->wait);
Jens Axboe534ca6d2020-09-02 13:52:19 -06001760 if (ctx->sq_data && waitqueue_active(&ctx->sq_data->wait))
1761 wake_up(&ctx->sq_data->wait);
Jens Axboeb41e9852020-02-17 09:52:41 -07001762 if (io_should_trigger_evfd(ctx))
Jens Axboe9b402842019-04-11 11:45:41 -06001763 eventfd_signal(ctx->cq_ev_fd, 1);
Pavel Begunkovb1445e52021-01-07 03:15:43 +00001764 if (waitqueue_active(&ctx->cq_wait)) {
Pavel Begunkov4aa84f22021-01-07 03:15:42 +00001765 wake_up_interruptible(&ctx->cq_wait);
1766 kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN);
1767 }
Jens Axboe8c838782019-03-12 15:48:16 -06001768}
1769
Pavel Begunkov80c18e42021-01-07 03:15:41 +00001770static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx)
1771{
Pavel Begunkovb1445e52021-01-07 03:15:43 +00001772 /* see waitqueue_active() comment */
1773 smp_mb();
1774
Pavel Begunkov80c18e42021-01-07 03:15:41 +00001775 if (ctx->flags & IORING_SETUP_SQPOLL) {
1776 if (waitqueue_active(&ctx->wait))
1777 wake_up(&ctx->wait);
1778 }
1779 if (io_should_trigger_evfd(ctx))
1780 eventfd_signal(ctx->cq_ev_fd, 1);
Pavel Begunkovb1445e52021-01-07 03:15:43 +00001781 if (waitqueue_active(&ctx->cq_wait)) {
Pavel Begunkov4aa84f22021-01-07 03:15:42 +00001782 wake_up_interruptible(&ctx->cq_wait);
1783 kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN);
1784 }
Pavel Begunkov80c18e42021-01-07 03:15:41 +00001785}
1786
Jens Axboec4a2ed72019-11-21 21:01:26 -07001787/* Returns true if there are no backlogged entries after the flush */
Pavel Begunkov6c503152021-01-04 20:36:36 +00001788static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force,
1789 struct task_struct *tsk,
1790 struct files_struct *files)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001791{
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001792 struct io_rings *rings = ctx->rings;
Jens Axboee6c8aa92020-09-28 13:10:13 -06001793 struct io_kiocb *req, *tmp;
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001794 struct io_uring_cqe *cqe;
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001795 unsigned long flags;
Jens Axboeb18032b2021-01-24 16:58:56 -07001796 bool all_flushed, posted;
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001797 LIST_HEAD(list);
1798
Pavel Begunkove23de152020-12-17 00:24:37 +00001799 if (!force && __io_cqring_events(ctx) == rings->cq_ring_entries)
1800 return false;
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001801
Jens Axboeb18032b2021-01-24 16:58:56 -07001802 posted = false;
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001803 spin_lock_irqsave(&ctx->completion_lock, flags);
Jens Axboee6c8aa92020-09-28 13:10:13 -06001804 list_for_each_entry_safe(req, tmp, &ctx->cq_overflow_list, compl.list) {
Pavel Begunkov08d23632020-11-06 13:00:22 +00001805 if (!io_match_task(req, tsk, files))
Jens Axboee6c8aa92020-09-28 13:10:13 -06001806 continue;
1807
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001808 cqe = io_get_cqring(ctx);
1809 if (!cqe && !force)
1810 break;
1811
Pavel Begunkov40d8ddd2020-07-13 23:37:11 +03001812 list_move(&req->compl.list, &list);
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001813 if (cqe) {
1814 WRITE_ONCE(cqe->user_data, req->user_data);
1815 WRITE_ONCE(cqe->res, req->result);
Pavel Begunkov0f7e4662020-07-13 23:37:16 +03001816 WRITE_ONCE(cqe->flags, req->compl.cflags);
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001817 } else {
Pavel Begunkov2c3bac6d2020-10-18 10:17:40 +01001818 ctx->cached_cq_overflow++;
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001819 WRITE_ONCE(ctx->rings->cq_overflow,
Pavel Begunkov2c3bac6d2020-10-18 10:17:40 +01001820 ctx->cached_cq_overflow);
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001821 }
Jens Axboeb18032b2021-01-24 16:58:56 -07001822 posted = true;
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001823 }
1824
Pavel Begunkov09e88402020-12-17 00:24:38 +00001825 all_flushed = list_empty(&ctx->cq_overflow_list);
1826 if (all_flushed) {
1827 clear_bit(0, &ctx->sq_check_overflow);
1828 clear_bit(0, &ctx->cq_check_overflow);
1829 ctx->rings->sq_flags &= ~IORING_SQ_CQ_OVERFLOW;
1830 }
Pavel Begunkov46930142020-07-30 18:43:49 +03001831
Jens Axboeb18032b2021-01-24 16:58:56 -07001832 if (posted)
1833 io_commit_cqring(ctx);
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001834 spin_unlock_irqrestore(&ctx->completion_lock, flags);
Jens Axboeb18032b2021-01-24 16:58:56 -07001835 if (posted)
1836 io_cqring_ev_posted(ctx);
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001837
1838 while (!list_empty(&list)) {
Pavel Begunkov40d8ddd2020-07-13 23:37:11 +03001839 req = list_first_entry(&list, struct io_kiocb, compl.list);
1840 list_del(&req->compl.list);
Jackie Liuec9c02a2019-11-08 23:50:36 +08001841 io_put_req(req);
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001842 }
Jens Axboec4a2ed72019-11-21 21:01:26 -07001843
Pavel Begunkov09e88402020-12-17 00:24:38 +00001844 return all_flushed;
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001845}
1846
Pavel Begunkov6c503152021-01-04 20:36:36 +00001847static void io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force,
1848 struct task_struct *tsk,
1849 struct files_struct *files)
1850{
1851 if (test_bit(0, &ctx->cq_check_overflow)) {
1852 /* iopoll syncs against uring_lock, not completion_lock */
1853 if (ctx->flags & IORING_SETUP_IOPOLL)
1854 mutex_lock(&ctx->uring_lock);
1855 __io_cqring_overflow_flush(ctx, force, tsk, files);
1856 if (ctx->flags & IORING_SETUP_IOPOLL)
1857 mutex_unlock(&ctx->uring_lock);
1858 }
1859}
1860
Jens Axboebcda7ba2020-02-23 16:42:51 -07001861static void __io_cqring_fill_event(struct io_kiocb *req, long res, long cflags)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001862{
Jens Axboe78e19bb2019-11-06 15:21:34 -07001863 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001864 struct io_uring_cqe *cqe;
1865
Jens Axboe78e19bb2019-11-06 15:21:34 -07001866 trace_io_uring_complete(ctx, req->user_data, res);
Jens Axboe51c3ff62019-11-03 06:52:50 -07001867
Jens Axboe2b188cc2019-01-07 10:46:33 -07001868 /*
1869 * If we can't get a cq entry, userspace overflowed the
1870 * submission (by quite a lot). Increment the overflow count in
1871 * the ring.
1872 */
1873 cqe = io_get_cqring(ctx);
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001874 if (likely(cqe)) {
Jens Axboe78e19bb2019-11-06 15:21:34 -07001875 WRITE_ONCE(cqe->user_data, req->user_data);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001876 WRITE_ONCE(cqe->res, res);
Jens Axboebcda7ba2020-02-23 16:42:51 -07001877 WRITE_ONCE(cqe->flags, cflags);
Jens Axboefdaf0832020-10-30 09:37:30 -06001878 } else if (ctx->cq_overflow_flushed ||
1879 atomic_read(&req->task->io_uring->in_idle)) {
Jens Axboe0f212202020-09-13 13:09:39 -06001880 /*
1881 * If we're in ring overflow flush mode, or in task cancel mode,
1882 * then we cannot store the request for later flushing, we need
1883 * to drop it on the floor.
1884 */
Pavel Begunkov2c3bac6d2020-10-18 10:17:40 +01001885 ctx->cached_cq_overflow++;
1886 WRITE_ONCE(ctx->rings->cq_overflow, ctx->cached_cq_overflow);
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001887 } else {
Jens Axboead3eb2c2019-12-18 17:12:20 -07001888 if (list_empty(&ctx->cq_overflow_list)) {
1889 set_bit(0, &ctx->sq_check_overflow);
1890 set_bit(0, &ctx->cq_check_overflow);
Xiaoguang Wang6d5f9042020-07-09 09:15:29 +08001891 ctx->rings->sq_flags |= IORING_SQ_CQ_OVERFLOW;
Jens Axboead3eb2c2019-12-18 17:12:20 -07001892 }
Pavel Begunkov40d8ddd2020-07-13 23:37:11 +03001893 io_clean_op(req);
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001894 req->result = res;
Pavel Begunkov0f7e4662020-07-13 23:37:16 +03001895 req->compl.cflags = cflags;
Pavel Begunkov40d8ddd2020-07-13 23:37:11 +03001896 refcount_inc(&req->refs);
1897 list_add_tail(&req->compl.list, &ctx->cq_overflow_list);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001898 }
1899}
1900
Jens Axboebcda7ba2020-02-23 16:42:51 -07001901static void io_cqring_fill_event(struct io_kiocb *req, long res)
1902{
1903 __io_cqring_fill_event(req, res, 0);
1904}
1905
Pavel Begunkov9ae1f8d2021-02-01 18:59:51 +00001906static void io_req_complete_post(struct io_kiocb *req, long res,
1907 unsigned int cflags)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001908{
Jens Axboe78e19bb2019-11-06 15:21:34 -07001909 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001910 unsigned long flags;
1911
1912 spin_lock_irqsave(&ctx->completion_lock, flags);
Jens Axboebcda7ba2020-02-23 16:42:51 -07001913 __io_cqring_fill_event(req, res, cflags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001914 io_commit_cqring(ctx);
1915 spin_unlock_irqrestore(&ctx->completion_lock, flags);
1916
Jens Axboe8c838782019-03-12 15:48:16 -06001917 io_cqring_ev_posted(ctx);
Pavel Begunkov9ae1f8d2021-02-01 18:59:51 +00001918}
1919
1920static inline void io_req_complete_nostate(struct io_kiocb *req, long res,
1921 unsigned int cflags)
1922{
1923 io_req_complete_post(req, res, cflags);
Pavel Begunkova38d68d2021-01-19 13:32:45 +00001924 io_put_req(req);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001925}
1926
Pavel Begunkovba88ff12021-02-10 00:03:11 +00001927static void io_submit_flush_completions(struct io_comp_state *cs,
1928 struct io_ring_ctx *ctx)
Jens Axboebcda7ba2020-02-23 16:42:51 -07001929{
Pavel Begunkov6dd0be12021-02-10 00:03:13 +00001930 int i, nr = cs->nr;
1931
Jens Axboe229a7b62020-06-22 10:13:11 -06001932 spin_lock_irq(&ctx->completion_lock);
Pavel Begunkov6dd0be12021-02-10 00:03:13 +00001933 for (i = 0; i < nr; i++) {
1934 struct io_kiocb *req = cs->reqs[i];
Jens Axboe229a7b62020-06-22 10:13:11 -06001935
Pavel Begunkov0f7e4662020-07-13 23:37:16 +03001936 __io_cqring_fill_event(req, req->result, req->compl.cflags);
Jens Axboe229a7b62020-06-22 10:13:11 -06001937 }
1938 io_commit_cqring(ctx);
1939 spin_unlock_irq(&ctx->completion_lock);
1940
1941 io_cqring_ev_posted(ctx);
Pavel Begunkov6dd0be12021-02-10 00:03:13 +00001942 for (i = 0; i < nr; i++)
1943 io_double_put_req(cs->reqs[i]);
Jens Axboe229a7b62020-06-22 10:13:11 -06001944 cs->nr = 0;
1945}
1946
Pavel Begunkova38d68d2021-01-19 13:32:45 +00001947static void io_req_complete_state(struct io_kiocb *req, long res,
Pavel Begunkov889fca72021-02-10 00:03:09 +00001948 unsigned int cflags)
Jens Axboe229a7b62020-06-22 10:13:11 -06001949{
Pavel Begunkova38d68d2021-01-19 13:32:45 +00001950 io_clean_op(req);
1951 req->result = res;
1952 req->compl.cflags = cflags;
Pavel Begunkove342c802021-01-19 13:32:47 +00001953 req->flags |= REQ_F_COMPLETE_INLINE;
Jens Axboee1e16092020-06-22 09:17:17 -06001954}
1955
Pavel Begunkov889fca72021-02-10 00:03:09 +00001956static inline void __io_req_complete(struct io_kiocb *req, unsigned issue_flags,
1957 long res, unsigned cflags)
Pavel Begunkova38d68d2021-01-19 13:32:45 +00001958{
Pavel Begunkov889fca72021-02-10 00:03:09 +00001959 if (issue_flags & IO_URING_F_COMPLETE_DEFER)
1960 io_req_complete_state(req, res, cflags);
Pavel Begunkova38d68d2021-01-19 13:32:45 +00001961 else
Pavel Begunkov889fca72021-02-10 00:03:09 +00001962 io_req_complete_nostate(req, res, cflags);
Pavel Begunkova38d68d2021-01-19 13:32:45 +00001963}
1964
1965static inline void io_req_complete(struct io_kiocb *req, long res)
Jens Axboee1e16092020-06-22 09:17:17 -06001966{
Pavel Begunkov889fca72021-02-10 00:03:09 +00001967 __io_req_complete(req, 0, res, 0);
Jens Axboebcda7ba2020-02-23 16:42:51 -07001968}
1969
Jens Axboe0ddf92e2019-11-08 08:52:53 -07001970static inline bool io_is_fallback_req(struct io_kiocb *req)
1971{
1972 return req == (struct io_kiocb *)
1973 ((unsigned long) req->ctx->fallback_req & ~1UL);
1974}
1975
1976static struct io_kiocb *io_get_fallback_req(struct io_ring_ctx *ctx)
1977{
1978 struct io_kiocb *req;
1979
1980 req = ctx->fallback_req;
Bijan Mottahedehdd461af2020-04-29 17:47:50 -07001981 if (!test_and_set_bit_lock(0, (unsigned long *) &ctx->fallback_req))
Jens Axboe0ddf92e2019-11-08 08:52:53 -07001982 return req;
1983
1984 return NULL;
1985}
1986
Pavel Begunkov258b29a2021-02-10 00:03:10 +00001987static struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001988{
Pavel Begunkov258b29a2021-02-10 00:03:10 +00001989 struct io_submit_state *state = &ctx->submit_state;
1990
Pavel Begunkovf6b6c7d2020-06-21 13:09:53 +03001991 if (!state->free_reqs) {
Pavel Begunkov291b2822020-09-30 22:57:01 +03001992 gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
Jens Axboe2579f912019-01-09 09:10:43 -07001993 size_t sz;
1994 int ret;
1995
1996 sz = min_t(size_t, state->ios_left, ARRAY_SIZE(state->reqs));
Jens Axboefd6fab22019-03-14 16:30:06 -06001997 ret = kmem_cache_alloc_bulk(req_cachep, gfp, sz, state->reqs);
1998
1999 /*
2000 * Bulk alloc is all-or-nothing. If we fail to get a batch,
2001 * retry single alloc to be on the safe side.
2002 */
2003 if (unlikely(ret <= 0)) {
2004 state->reqs[0] = kmem_cache_alloc(req_cachep, gfp);
2005 if (!state->reqs[0])
Pavel Begunkov85bcb6c2021-01-19 13:32:40 +00002006 return io_get_fallback_req(ctx);
Jens Axboefd6fab22019-03-14 16:30:06 -06002007 ret = 1;
2008 }
Pavel Begunkov291b2822020-09-30 22:57:01 +03002009 state->free_reqs = ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002010 }
2011
Pavel Begunkov291b2822020-09-30 22:57:01 +03002012 state->free_reqs--;
2013 return state->reqs[state->free_reqs];
Jens Axboe2b188cc2019-01-07 10:46:33 -07002014}
2015
Pavel Begunkov8da11c12020-02-24 11:32:44 +03002016static inline void io_put_file(struct io_kiocb *req, struct file *file,
2017 bool fixed)
2018{
Pavel Begunkov36f72fe2020-11-18 19:57:26 +00002019 if (!fixed)
Pavel Begunkov8da11c12020-02-24 11:32:44 +03002020 fput(file);
2021}
2022
Pavel Begunkov4edf20f2020-10-13 09:43:59 +01002023static void io_dismantle_req(struct io_kiocb *req)
Jens Axboe2b188cc2019-01-07 10:46:33 -07002024{
Pavel Begunkov3ca405e2020-07-13 23:37:08 +03002025 io_clean_op(req);
Pavel Begunkov929a3af2020-02-19 00:19:09 +03002026
Jens Axboee8c2bc12020-08-15 18:44:09 -07002027 if (req->async_data)
2028 kfree(req->async_data);
Pavel Begunkov8da11c12020-02-24 11:32:44 +03002029 if (req->file)
2030 io_put_file(req, req->file, (req->flags & REQ_F_FIXED_FILE));
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00002031 if (req->fixed_rsrc_refs)
2032 percpu_ref_put(req->fixed_rsrc_refs);
Pavel Begunkov4edf20f2020-10-13 09:43:59 +01002033 io_req_clean_work(req);
Pavel Begunkove6543a82020-06-28 12:52:30 +03002034}
Pavel Begunkov2b85edf2019-12-28 14:13:03 +03002035
Pavel Begunkov7c660732021-01-25 11:42:21 +00002036static inline void io_put_task(struct task_struct *task, int nr)
2037{
2038 struct io_uring_task *tctx = task->io_uring;
2039
2040 percpu_counter_sub(&tctx->inflight, nr);
2041 if (unlikely(atomic_read(&tctx->in_idle)))
2042 wake_up(&tctx->wait);
2043 put_task_struct_many(task, nr);
2044}
2045
Pavel Begunkov216578e2020-10-13 09:44:00 +01002046static void __io_free_req(struct io_kiocb *req)
Pavel Begunkove6543a82020-06-28 12:52:30 +03002047{
Jens Axboe51a4cc12020-08-10 10:55:56 -06002048 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkovecfc5172020-06-29 13:13:03 +03002049
Pavel Begunkov216578e2020-10-13 09:44:00 +01002050 io_dismantle_req(req);
Pavel Begunkov7c660732021-01-25 11:42:21 +00002051 io_put_task(req->task, 1);
Jens Axboee3bc8e92020-09-24 08:45:57 -06002052
Pavel Begunkovb1e50e52020-04-08 08:58:44 +03002053 if (likely(!io_is_fallback_req(req)))
2054 kmem_cache_free(req_cachep, req);
2055 else
Pavel Begunkovecfc5172020-06-29 13:13:03 +03002056 clear_bit_unlock(0, (unsigned long *) &ctx->fallback_req);
2057 percpu_ref_put(&ctx->refs);
Jens Axboee65ef562019-03-12 10:16:44 -06002058}
2059
Pavel Begunkovf2f87372020-10-27 23:25:37 +00002060static inline void io_remove_next_linked(struct io_kiocb *req)
2061{
2062 struct io_kiocb *nxt = req->link;
2063
2064 req->link = nxt->link;
2065 nxt->link = NULL;
2066}
2067
Pavel Begunkovc9abd7a2020-10-22 16:43:11 +01002068static void io_kill_linked_timeout(struct io_kiocb *req)
Jens Axboe9e645e112019-05-10 16:07:28 -06002069{
Jackie Liua197f662019-11-08 08:09:12 -07002070 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkov7c86ffe2020-06-29 13:12:59 +03002071 struct io_kiocb *link;
Pavel Begunkovc9abd7a2020-10-22 16:43:11 +01002072 bool cancelled = false;
2073 unsigned long flags;
Pavel Begunkov7c86ffe2020-06-29 13:12:59 +03002074
Pavel Begunkovc9abd7a2020-10-22 16:43:11 +01002075 spin_lock_irqsave(&ctx->completion_lock, flags);
Pavel Begunkovf2f87372020-10-27 23:25:37 +00002076 link = req->link;
2077
Pavel Begunkov900fad42020-10-19 16:39:16 +01002078 /*
2079 * Can happen if a linked timeout fired and link had been like
2080 * req -> link t-out -> link t-out [-> ...]
2081 */
Pavel Begunkovc9abd7a2020-10-22 16:43:11 +01002082 if (link && (link->flags & REQ_F_LTIMEOUT_ACTIVE)) {
2083 struct io_timeout_data *io = link->async_data;
2084 int ret;
Pavel Begunkov7c86ffe2020-06-29 13:12:59 +03002085
Pavel Begunkovf2f87372020-10-27 23:25:37 +00002086 io_remove_next_linked(req);
Pavel Begunkov90cd7e42020-10-27 23:25:36 +00002087 link->timeout.head = NULL;
Pavel Begunkovc9abd7a2020-10-22 16:43:11 +01002088 ret = hrtimer_try_to_cancel(&io->timer);
2089 if (ret != -1) {
2090 io_cqring_fill_event(link, -ECANCELED);
2091 io_commit_cqring(ctx);
2092 cancelled = true;
2093 }
2094 }
Pavel Begunkov7c86ffe2020-06-29 13:12:59 +03002095 req->flags &= ~REQ_F_LINK_TIMEOUT;
Pavel Begunkov216578e2020-10-13 09:44:00 +01002096 spin_unlock_irqrestore(&ctx->completion_lock, flags);
Jens Axboeab0b6452020-06-30 08:43:15 -06002097
Pavel Begunkovc9abd7a2020-10-22 16:43:11 +01002098 if (cancelled) {
Pavel Begunkov7c86ffe2020-06-29 13:12:59 +03002099 io_cqring_ev_posted(ctx);
Pavel Begunkovc9abd7a2020-10-22 16:43:11 +01002100 io_put_req(link);
2101 }
Pavel Begunkov7c86ffe2020-06-29 13:12:59 +03002102}
2103
Jens Axboe4d7dd462019-11-20 13:03:52 -07002104
Pavel Begunkovd148ca42020-10-18 10:17:39 +01002105static void io_fail_links(struct io_kiocb *req)
Jens Axboe9e645e112019-05-10 16:07:28 -06002106{
Pavel Begunkovf2f87372020-10-27 23:25:37 +00002107 struct io_kiocb *link, *nxt;
Jens Axboe2665abf2019-11-05 12:40:47 -07002108 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkovd148ca42020-10-18 10:17:39 +01002109 unsigned long flags;
Jens Axboe9e645e112019-05-10 16:07:28 -06002110
Pavel Begunkovd148ca42020-10-18 10:17:39 +01002111 spin_lock_irqsave(&ctx->completion_lock, flags);
Pavel Begunkovf2f87372020-10-27 23:25:37 +00002112 link = req->link;
2113 req->link = NULL;
Jens Axboe9e645e112019-05-10 16:07:28 -06002114
Pavel Begunkovf2f87372020-10-27 23:25:37 +00002115 while (link) {
2116 nxt = link->link;
2117 link->link = NULL;
2118
Dmitrii Dolgovc826bd72019-10-15 19:02:01 +02002119 trace_io_uring_fail_link(req, link);
Pavel Begunkov7c86ffe2020-06-29 13:12:59 +03002120 io_cqring_fill_event(link, -ECANCELED);
Pavel Begunkov216578e2020-10-13 09:44:00 +01002121
2122 /*
2123 * It's ok to free under spinlock as they're not linked anymore,
2124 * but avoid REQ_F_WORK_INITIALIZED because it may deadlock on
2125 * work.fs->lock.
2126 */
2127 if (link->flags & REQ_F_WORK_INITIALIZED)
2128 io_put_req_deferred(link, 2);
2129 else
2130 io_double_put_req(link);
Pavel Begunkovf2f87372020-10-27 23:25:37 +00002131 link = nxt;
Jens Axboe9e645e112019-05-10 16:07:28 -06002132 }
Jens Axboe2665abf2019-11-05 12:40:47 -07002133 io_commit_cqring(ctx);
Pavel Begunkov216578e2020-10-13 09:44:00 +01002134 spin_unlock_irqrestore(&ctx->completion_lock, flags);
Pavel Begunkov7c86ffe2020-06-29 13:12:59 +03002135
Jens Axboe2665abf2019-11-05 12:40:47 -07002136 io_cqring_ev_posted(ctx);
Jens Axboe9e645e112019-05-10 16:07:28 -06002137}
2138
Pavel Begunkov3fa5e0f2020-06-30 15:20:43 +03002139static struct io_kiocb *__io_req_find_next(struct io_kiocb *req)
Jens Axboe9e645e112019-05-10 16:07:28 -06002140{
Pavel Begunkov7c86ffe2020-06-29 13:12:59 +03002141 if (req->flags & REQ_F_LINK_TIMEOUT)
2142 io_kill_linked_timeout(req);
Jens Axboe2665abf2019-11-05 12:40:47 -07002143
Jens Axboe9e645e112019-05-10 16:07:28 -06002144 /*
2145 * If LINK is set, we have dependent requests in this chain. If we
2146 * didn't fail this request, queue the first one up, moving any other
2147 * dependencies to the next request. In case of failure, fail the rest
2148 * of the chain.
2149 */
Pavel Begunkovf2f87372020-10-27 23:25:37 +00002150 if (likely(!(req->flags & REQ_F_FAIL_LINK))) {
2151 struct io_kiocb *nxt = req->link;
2152
2153 req->link = NULL;
2154 return nxt;
2155 }
Pavel Begunkov9b5f7bd2020-06-29 13:13:00 +03002156 io_fail_links(req);
2157 return NULL;
Jens Axboe4d7dd462019-11-20 13:03:52 -07002158}
Jens Axboe2665abf2019-11-05 12:40:47 -07002159
Pavel Begunkovf2f87372020-10-27 23:25:37 +00002160static inline struct io_kiocb *io_req_find_next(struct io_kiocb *req)
Pavel Begunkov3fa5e0f2020-06-30 15:20:43 +03002161{
Pavel Begunkovf2f87372020-10-27 23:25:37 +00002162 if (likely(!(req->link) && !(req->flags & REQ_F_LINK_TIMEOUT)))
Pavel Begunkov3fa5e0f2020-06-30 15:20:43 +03002163 return NULL;
2164 return __io_req_find_next(req);
2165}
2166
Jens Axboe355fb9e2020-10-22 20:19:35 -06002167static int io_req_task_work_add(struct io_kiocb *req)
Jens Axboec2c4c832020-07-01 15:37:11 -06002168{
2169 struct task_struct *tsk = req->task;
2170 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe91989c72020-10-16 09:02:26 -06002171 enum task_work_notify_mode notify;
2172 int ret;
Jens Axboec2c4c832020-07-01 15:37:11 -06002173
Jens Axboe6200b0a2020-09-13 14:38:30 -06002174 if (tsk->flags & PF_EXITING)
2175 return -ESRCH;
2176
Jens Axboec2c4c832020-07-01 15:37:11 -06002177 /*
Jens Axboe0ba9c9e2020-08-06 19:41:50 -06002178 * SQPOLL kernel thread doesn't need notification, just a wakeup. For
2179 * all other cases, use TWA_SIGNAL unconditionally to ensure we're
2180 * processing task_work. There's no reliable way to tell if TWA_RESUME
2181 * will do the job.
Jens Axboec2c4c832020-07-01 15:37:11 -06002182 */
Jens Axboe91989c72020-10-16 09:02:26 -06002183 notify = TWA_NONE;
Jens Axboe355fb9e2020-10-22 20:19:35 -06002184 if (!(ctx->flags & IORING_SETUP_SQPOLL))
Jens Axboec2c4c832020-07-01 15:37:11 -06002185 notify = TWA_SIGNAL;
2186
Jens Axboe87c43112020-09-30 21:00:14 -06002187 ret = task_work_add(tsk, &req->task_work, notify);
Jens Axboec2c4c832020-07-01 15:37:11 -06002188 if (!ret)
2189 wake_up_process(tsk);
Jens Axboe0ba9c9e2020-08-06 19:41:50 -06002190
Jens Axboec2c4c832020-07-01 15:37:11 -06002191 return ret;
2192}
2193
Pavel Begunkoveab30c42021-01-19 13:32:42 +00002194static void io_req_task_work_add_fallback(struct io_kiocb *req,
2195 void (*cb)(struct callback_head *))
2196{
2197 struct task_struct *tsk = io_wq_get_task(req->ctx->io_wq);
2198
2199 init_task_work(&req->task_work, cb);
2200 task_work_add(tsk, &req->task_work, TWA_NONE);
2201 wake_up_process(tsk);
2202}
2203
Jens Axboec40f6372020-06-25 15:39:59 -06002204static void __io_req_task_cancel(struct io_kiocb *req, int error)
2205{
2206 struct io_ring_ctx *ctx = req->ctx;
2207
2208 spin_lock_irq(&ctx->completion_lock);
2209 io_cqring_fill_event(req, error);
2210 io_commit_cqring(ctx);
2211 spin_unlock_irq(&ctx->completion_lock);
2212
2213 io_cqring_ev_posted(ctx);
2214 req_set_fail_links(req);
2215 io_double_put_req(req);
2216}
2217
2218static void io_req_task_cancel(struct callback_head *cb)
2219{
2220 struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
Jens Axboe87ceb6a2020-09-14 08:20:12 -06002221 struct io_ring_ctx *ctx = req->ctx;
Jens Axboec40f6372020-06-25 15:39:59 -06002222
2223 __io_req_task_cancel(req, -ECANCELED);
Jens Axboe87ceb6a2020-09-14 08:20:12 -06002224 percpu_ref_put(&ctx->refs);
Jens Axboec40f6372020-06-25 15:39:59 -06002225}
2226
2227static void __io_req_task_submit(struct io_kiocb *req)
2228{
2229 struct io_ring_ctx *ctx = req->ctx;
2230
Pavel Begunkov81b6d052021-01-04 20:36:35 +00002231 mutex_lock(&ctx->uring_lock);
Pavel Begunkovd9d05212021-01-08 20:57:25 +00002232 if (!ctx->sqo_dead &&
2233 !__io_sq_thread_acquire_mm(ctx) &&
2234 !__io_sq_thread_acquire_files(ctx))
Pavel Begunkovc1379e22020-09-30 22:57:56 +03002235 __io_queue_sqe(req, NULL);
Pavel Begunkov81b6d052021-01-04 20:36:35 +00002236 else
Jens Axboec40f6372020-06-25 15:39:59 -06002237 __io_req_task_cancel(req, -EFAULT);
Pavel Begunkov81b6d052021-01-04 20:36:35 +00002238 mutex_unlock(&ctx->uring_lock);
Jens Axboe9e645e112019-05-10 16:07:28 -06002239}
2240
Jens Axboec40f6372020-06-25 15:39:59 -06002241static void io_req_task_submit(struct callback_head *cb)
2242{
2243 struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
Jens Axboe6d816e02020-08-11 08:04:14 -06002244 struct io_ring_ctx *ctx = req->ctx;
Jens Axboec40f6372020-06-25 15:39:59 -06002245
2246 __io_req_task_submit(req);
Jens Axboe6d816e02020-08-11 08:04:14 -06002247 percpu_ref_put(&ctx->refs);
Jens Axboec40f6372020-06-25 15:39:59 -06002248}
2249
2250static void io_req_task_queue(struct io_kiocb *req)
2251{
Jens Axboec40f6372020-06-25 15:39:59 -06002252 int ret;
2253
2254 init_task_work(&req->task_work, io_req_task_submit);
Jens Axboe6d816e02020-08-11 08:04:14 -06002255 percpu_ref_get(&req->ctx->refs);
Jens Axboec40f6372020-06-25 15:39:59 -06002256
Jens Axboe355fb9e2020-10-22 20:19:35 -06002257 ret = io_req_task_work_add(req);
Pavel Begunkoveab30c42021-01-19 13:32:42 +00002258 if (unlikely(ret))
2259 io_req_task_work_add_fallback(req, io_req_task_cancel);
Jens Axboec40f6372020-06-25 15:39:59 -06002260}
2261
Pavel Begunkovf2f87372020-10-27 23:25:37 +00002262static inline void io_queue_next(struct io_kiocb *req)
Jackie Liuc69f8db2019-11-09 11:00:08 +08002263{
Pavel Begunkov9b5f7bd2020-06-29 13:13:00 +03002264 struct io_kiocb *nxt = io_req_find_next(req);
Pavel Begunkov944e58b2019-11-21 23:21:01 +03002265
Pavel Begunkov906a8c32020-06-27 14:04:55 +03002266 if (nxt)
2267 io_req_task_queue(nxt);
Jackie Liuc69f8db2019-11-09 11:00:08 +08002268}
2269
Jens Axboe9e645e112019-05-10 16:07:28 -06002270static void io_free_req(struct io_kiocb *req)
2271{
Pavel Begunkovc3524382020-06-28 12:52:32 +03002272 io_queue_next(req);
Jens Axboe9e645e112019-05-10 16:07:28 -06002273 __io_free_req(req);
Jens Axboee65ef562019-03-12 10:16:44 -06002274}
2275
Pavel Begunkov2d6500d2020-06-28 12:52:33 +03002276struct req_batch {
2277 void *reqs[IO_IOPOLL_BATCH];
2278 int to_free;
Pavel Begunkov5af1d132020-07-18 11:32:52 +03002279
2280 struct task_struct *task;
2281 int task_refs;
Pavel Begunkov2d6500d2020-06-28 12:52:33 +03002282};
2283
Pavel Begunkov5af1d132020-07-18 11:32:52 +03002284static inline void io_init_req_batch(struct req_batch *rb)
Pavel Begunkov7a743e22020-03-03 21:33:13 +03002285{
Pavel Begunkov5af1d132020-07-18 11:32:52 +03002286 rb->to_free = 0;
2287 rb->task_refs = 0;
2288 rb->task = NULL;
2289}
Pavel Begunkov8766dd52020-03-14 00:31:04 +03002290
Pavel Begunkov2d6500d2020-06-28 12:52:33 +03002291static void __io_req_free_batch_flush(struct io_ring_ctx *ctx,
2292 struct req_batch *rb)
2293{
2294 kmem_cache_free_bulk(req_cachep, rb->to_free, rb->reqs);
2295 percpu_ref_put_many(&ctx->refs, rb->to_free);
2296 rb->to_free = 0;
2297}
Pavel Begunkov7a743e22020-03-03 21:33:13 +03002298
Pavel Begunkov2d6500d2020-06-28 12:52:33 +03002299static void io_req_free_batch_finish(struct io_ring_ctx *ctx,
2300 struct req_batch *rb)
2301{
2302 if (rb->to_free)
2303 __io_req_free_batch_flush(ctx, rb);
Pavel Begunkov5af1d132020-07-18 11:32:52 +03002304 if (rb->task) {
Pavel Begunkov7c660732021-01-25 11:42:21 +00002305 io_put_task(rb->task, rb->task_refs);
Pavel Begunkov5af1d132020-07-18 11:32:52 +03002306 rb->task = NULL;
2307 }
Pavel Begunkov2d6500d2020-06-28 12:52:33 +03002308}
2309
2310static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req)
2311{
2312 if (unlikely(io_is_fallback_req(req))) {
2313 io_free_req(req);
2314 return;
2315 }
Pavel Begunkovf2f87372020-10-27 23:25:37 +00002316 io_queue_next(req);
Pavel Begunkov2d6500d2020-06-28 12:52:33 +03002317
Jens Axboee3bc8e92020-09-24 08:45:57 -06002318 if (req->task != rb->task) {
Pavel Begunkov7c660732021-01-25 11:42:21 +00002319 if (rb->task)
2320 io_put_task(rb->task, rb->task_refs);
Jens Axboee3bc8e92020-09-24 08:45:57 -06002321 rb->task = req->task;
2322 rb->task_refs = 0;
Pavel Begunkov5af1d132020-07-18 11:32:52 +03002323 }
Jens Axboee3bc8e92020-09-24 08:45:57 -06002324 rb->task_refs++;
Pavel Begunkov5af1d132020-07-18 11:32:52 +03002325
Pavel Begunkov4edf20f2020-10-13 09:43:59 +01002326 io_dismantle_req(req);
Pavel Begunkov2d6500d2020-06-28 12:52:33 +03002327 rb->reqs[rb->to_free++] = req;
2328 if (unlikely(rb->to_free == ARRAY_SIZE(rb->reqs)))
2329 __io_req_free_batch_flush(req->ctx, rb);
Pavel Begunkov7a743e22020-03-03 21:33:13 +03002330}
2331
Jens Axboeba816ad2019-09-28 11:36:45 -06002332/*
2333 * Drop reference to request, return next in chain (if there is one) if this
2334 * was the last reference to this request.
2335 */
Pavel Begunkov9b5f7bd2020-06-29 13:13:00 +03002336static struct io_kiocb *io_put_req_find_next(struct io_kiocb *req)
Jens Axboee65ef562019-03-12 10:16:44 -06002337{
Pavel Begunkov9b5f7bd2020-06-29 13:13:00 +03002338 struct io_kiocb *nxt = NULL;
2339
Jens Axboe2a44f462020-02-25 13:25:41 -07002340 if (refcount_dec_and_test(&req->refs)) {
Pavel Begunkov9b5f7bd2020-06-29 13:13:00 +03002341 nxt = io_req_find_next(req);
Jens Axboe4d7dd462019-11-20 13:03:52 -07002342 __io_free_req(req);
Jens Axboe2a44f462020-02-25 13:25:41 -07002343 }
Pavel Begunkov9b5f7bd2020-06-29 13:13:00 +03002344 return nxt;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002345}
2346
Jens Axboe2b188cc2019-01-07 10:46:33 -07002347static void io_put_req(struct io_kiocb *req)
2348{
Jens Axboedef596e2019-01-09 08:59:42 -07002349 if (refcount_dec_and_test(&req->refs))
2350 io_free_req(req);
2351}
2352
Pavel Begunkov216578e2020-10-13 09:44:00 +01002353static void io_put_req_deferred_cb(struct callback_head *cb)
2354{
2355 struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
2356
2357 io_free_req(req);
2358}
2359
2360static void io_free_req_deferred(struct io_kiocb *req)
2361{
2362 int ret;
2363
2364 init_task_work(&req->task_work, io_put_req_deferred_cb);
Jens Axboe355fb9e2020-10-22 20:19:35 -06002365 ret = io_req_task_work_add(req);
Pavel Begunkoveab30c42021-01-19 13:32:42 +00002366 if (unlikely(ret))
2367 io_req_task_work_add_fallback(req, io_put_req_deferred_cb);
Pavel Begunkov216578e2020-10-13 09:44:00 +01002368}
2369
2370static inline void io_put_req_deferred(struct io_kiocb *req, int refs)
2371{
2372 if (refcount_sub_and_test(refs, &req->refs))
2373 io_free_req_deferred(req);
2374}
2375
Jens Axboe978db572019-11-14 22:39:04 -07002376static void io_double_put_req(struct io_kiocb *req)
2377{
2378 /* drop both submit and complete references */
2379 if (refcount_sub_and_test(2, &req->refs))
2380 io_free_req(req);
2381}
2382
Pavel Begunkov6c503152021-01-04 20:36:36 +00002383static unsigned io_cqring_events(struct io_ring_ctx *ctx)
Jens Axboea3a0e432019-08-20 11:03:11 -06002384{
2385 /* See comment at the top of this file */
2386 smp_rmb();
Pavel Begunkove23de152020-12-17 00:24:37 +00002387 return __io_cqring_events(ctx);
Jens Axboea3a0e432019-08-20 11:03:11 -06002388}
2389
Pavel Begunkovfb5ccc92019-10-25 12:31:30 +03002390static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
2391{
2392 struct io_rings *rings = ctx->rings;
2393
2394 /* make sure SQ entry isn't read before tail */
2395 return smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head;
2396}
2397
Pavel Begunkov8ff069b2020-07-16 23:28:04 +03002398static unsigned int io_put_kbuf(struct io_kiocb *req, struct io_buffer *kbuf)
Jens Axboee94f1412019-12-19 12:06:02 -07002399{
Pavel Begunkov8ff069b2020-07-16 23:28:04 +03002400 unsigned int cflags;
Jens Axboee94f1412019-12-19 12:06:02 -07002401
Jens Axboebcda7ba2020-02-23 16:42:51 -07002402 cflags = kbuf->bid << IORING_CQE_BUFFER_SHIFT;
2403 cflags |= IORING_CQE_F_BUFFER;
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03002404 req->flags &= ~REQ_F_BUFFER_SELECTED;
Jens Axboebcda7ba2020-02-23 16:42:51 -07002405 kfree(kbuf);
2406 return cflags;
2407}
2408
Pavel Begunkov8ff069b2020-07-16 23:28:04 +03002409static inline unsigned int io_put_rw_kbuf(struct io_kiocb *req)
2410{
2411 struct io_buffer *kbuf;
2412
2413 kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
2414 return io_put_kbuf(req, kbuf);
2415}
2416
Jens Axboe4c6e2772020-07-01 11:29:10 -06002417static inline bool io_run_task_work(void)
2418{
Jens Axboe6200b0a2020-09-13 14:38:30 -06002419 /*
2420 * Not safe to run on exiting task, and the task_work handling will
2421 * not add work to such a task.
2422 */
2423 if (unlikely(current->flags & PF_EXITING))
2424 return false;
Jens Axboe4c6e2772020-07-01 11:29:10 -06002425 if (current->task_works) {
2426 __set_current_state(TASK_RUNNING);
2427 task_work_run();
2428 return true;
2429 }
2430
2431 return false;
2432}
2433
Xiaoguang Wangbbde0172020-06-16 02:06:38 +08002434static void io_iopoll_queue(struct list_head *again)
2435{
2436 struct io_kiocb *req;
2437
2438 do {
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002439 req = list_first_entry(again, struct io_kiocb, inflight_entry);
2440 list_del(&req->inflight_entry);
Pavel Begunkov889fca72021-02-10 00:03:09 +00002441 __io_complete_rw(req, -EAGAIN, 0, 0);
Xiaoguang Wangbbde0172020-06-16 02:06:38 +08002442 } while (!list_empty(again));
2443}
2444
Jens Axboedef596e2019-01-09 08:59:42 -07002445/*
2446 * Find and free completed poll iocbs
2447 */
2448static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
2449 struct list_head *done)
2450{
Jens Axboe8237e042019-12-28 10:48:22 -07002451 struct req_batch rb;
Jens Axboedef596e2019-01-09 08:59:42 -07002452 struct io_kiocb *req;
Xiaoguang Wangbbde0172020-06-16 02:06:38 +08002453 LIST_HEAD(again);
2454
2455 /* order with ->result store in io_complete_rw_iopoll() */
2456 smp_rmb();
Jens Axboedef596e2019-01-09 08:59:42 -07002457
Pavel Begunkov5af1d132020-07-18 11:32:52 +03002458 io_init_req_batch(&rb);
Jens Axboedef596e2019-01-09 08:59:42 -07002459 while (!list_empty(done)) {
Jens Axboebcda7ba2020-02-23 16:42:51 -07002460 int cflags = 0;
2461
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002462 req = list_first_entry(done, struct io_kiocb, inflight_entry);
Xiaoguang Wangbbde0172020-06-16 02:06:38 +08002463 if (READ_ONCE(req->result) == -EAGAIN) {
Jens Axboe56450c22020-08-26 18:58:26 -06002464 req->result = 0;
Xiaoguang Wangbbde0172020-06-16 02:06:38 +08002465 req->iopoll_completed = 0;
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002466 list_move_tail(&req->inflight_entry, &again);
Xiaoguang Wangbbde0172020-06-16 02:06:38 +08002467 continue;
2468 }
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002469 list_del(&req->inflight_entry);
Jens Axboedef596e2019-01-09 08:59:42 -07002470
Jens Axboebcda7ba2020-02-23 16:42:51 -07002471 if (req->flags & REQ_F_BUFFER_SELECTED)
Pavel Begunkov8ff069b2020-07-16 23:28:04 +03002472 cflags = io_put_rw_kbuf(req);
Jens Axboebcda7ba2020-02-23 16:42:51 -07002473
2474 __io_cqring_fill_event(req, req->result, cflags);
Jens Axboedef596e2019-01-09 08:59:42 -07002475 (*nr_events)++;
2476
Pavel Begunkovc3524382020-06-28 12:52:32 +03002477 if (refcount_dec_and_test(&req->refs))
Pavel Begunkov2d6500d2020-06-28 12:52:33 +03002478 io_req_free_batch(&rb, req);
Jens Axboedef596e2019-01-09 08:59:42 -07002479 }
Jens Axboedef596e2019-01-09 08:59:42 -07002480
Jens Axboe09bb8392019-03-13 12:39:28 -06002481 io_commit_cqring(ctx);
Pavel Begunkov80c18e42021-01-07 03:15:41 +00002482 io_cqring_ev_posted_iopoll(ctx);
Pavel Begunkov2d6500d2020-06-28 12:52:33 +03002483 io_req_free_batch_finish(ctx, &rb);
Jens Axboedef596e2019-01-09 08:59:42 -07002484
Xiaoguang Wangbbde0172020-06-16 02:06:38 +08002485 if (!list_empty(&again))
2486 io_iopoll_queue(&again);
Bijan Mottahedeh581f9812020-04-03 13:51:33 -07002487}
2488
Jens Axboedef596e2019-01-09 08:59:42 -07002489static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
2490 long min)
2491{
2492 struct io_kiocb *req, *tmp;
2493 LIST_HEAD(done);
2494 bool spin;
2495 int ret;
2496
2497 /*
2498 * Only spin for completions if we don't have multiple devices hanging
2499 * off our complete list, and we're under the requested amount.
2500 */
2501 spin = !ctx->poll_multi_file && *nr_events < min;
2502
2503 ret = 0;
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002504 list_for_each_entry_safe(req, tmp, &ctx->iopoll_list, inflight_entry) {
Jens Axboe9adbd452019-12-20 08:45:55 -07002505 struct kiocb *kiocb = &req->rw.kiocb;
Jens Axboedef596e2019-01-09 08:59:42 -07002506
2507 /*
Bijan Mottahedeh581f9812020-04-03 13:51:33 -07002508 * Move completed and retryable entries to our local lists.
2509 * If we find a request that requires polling, break out
2510 * and complete those lists first, if we have entries there.
Jens Axboedef596e2019-01-09 08:59:42 -07002511 */
Xiaoguang Wang65a65432020-06-11 23:39:36 +08002512 if (READ_ONCE(req->iopoll_completed)) {
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002513 list_move_tail(&req->inflight_entry, &done);
Jens Axboedef596e2019-01-09 08:59:42 -07002514 continue;
2515 }
2516 if (!list_empty(&done))
2517 break;
2518
2519 ret = kiocb->ki_filp->f_op->iopoll(kiocb, spin);
2520 if (ret < 0)
2521 break;
2522
Pavel Begunkov3aadc232020-07-06 17:59:29 +03002523 /* iopoll may have completed current req */
2524 if (READ_ONCE(req->iopoll_completed))
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002525 list_move_tail(&req->inflight_entry, &done);
Pavel Begunkov3aadc232020-07-06 17:59:29 +03002526
Jens Axboedef596e2019-01-09 08:59:42 -07002527 if (ret && spin)
2528 spin = false;
2529 ret = 0;
2530 }
2531
2532 if (!list_empty(&done))
2533 io_iopoll_complete(ctx, nr_events, &done);
2534
2535 return ret;
2536}
2537
2538/*
Brian Gianforcarod195a662019-12-13 03:09:50 -08002539 * Poll for a minimum of 'min' events. Note that if min == 0 we consider that a
Jens Axboedef596e2019-01-09 08:59:42 -07002540 * non-spinning poll check - we'll still enter the driver poll loop, but only
2541 * as a non-spinning completion check.
2542 */
2543static int io_iopoll_getevents(struct io_ring_ctx *ctx, unsigned int *nr_events,
2544 long min)
2545{
Pavel Begunkov540e32a2020-07-13 23:37:09 +03002546 while (!list_empty(&ctx->iopoll_list) && !need_resched()) {
Jens Axboedef596e2019-01-09 08:59:42 -07002547 int ret;
2548
2549 ret = io_do_iopoll(ctx, nr_events, min);
2550 if (ret < 0)
2551 return ret;
Pavel Begunkoveba0a4d2020-07-06 17:59:30 +03002552 if (*nr_events >= min)
Jens Axboedef596e2019-01-09 08:59:42 -07002553 return 0;
2554 }
2555
2556 return 1;
2557}
2558
2559/*
2560 * We can't just wait for polled events to come to us, we have to actively
2561 * find and complete them.
2562 */
Pavel Begunkovb2edc0a2020-07-07 16:36:22 +03002563static void io_iopoll_try_reap_events(struct io_ring_ctx *ctx)
Jens Axboedef596e2019-01-09 08:59:42 -07002564{
2565 if (!(ctx->flags & IORING_SETUP_IOPOLL))
2566 return;
2567
2568 mutex_lock(&ctx->uring_lock);
Pavel Begunkov540e32a2020-07-13 23:37:09 +03002569 while (!list_empty(&ctx->iopoll_list)) {
Jens Axboedef596e2019-01-09 08:59:42 -07002570 unsigned int nr_events = 0;
2571
Pavel Begunkovb2edc0a2020-07-07 16:36:22 +03002572 io_do_iopoll(ctx, &nr_events, 0);
Jens Axboe08f54392019-08-21 22:19:11 -06002573
Pavel Begunkovb2edc0a2020-07-07 16:36:22 +03002574 /* let it sleep and repeat later if can't complete a request */
2575 if (nr_events == 0)
2576 break;
Jens Axboe08f54392019-08-21 22:19:11 -06002577 /*
2578 * Ensure we allow local-to-the-cpu processing to take place,
2579 * in this case we need to ensure that we reap all events.
Pavel Begunkov3fcee5a2020-07-06 17:59:31 +03002580 * Also let task_work, etc. to progress by releasing the mutex
Jens Axboe08f54392019-08-21 22:19:11 -06002581 */
Pavel Begunkov3fcee5a2020-07-06 17:59:31 +03002582 if (need_resched()) {
2583 mutex_unlock(&ctx->uring_lock);
2584 cond_resched();
2585 mutex_lock(&ctx->uring_lock);
2586 }
Jens Axboedef596e2019-01-09 08:59:42 -07002587 }
2588 mutex_unlock(&ctx->uring_lock);
2589}
2590
Pavel Begunkov7668b922020-07-07 16:36:21 +03002591static int io_iopoll_check(struct io_ring_ctx *ctx, long min)
Jens Axboedef596e2019-01-09 08:59:42 -07002592{
Pavel Begunkov7668b922020-07-07 16:36:21 +03002593 unsigned int nr_events = 0;
Jens Axboe2b2ed972019-10-25 10:06:15 -06002594 int iters = 0, ret = 0;
Jens Axboedef596e2019-01-09 08:59:42 -07002595
Xiaoguang Wangc7849be2020-02-22 14:46:05 +08002596 /*
2597 * We disallow the app entering submit/complete with polling, but we
2598 * still need to lock the ring to prevent racing with polled issue
2599 * that got punted to a workqueue.
2600 */
2601 mutex_lock(&ctx->uring_lock);
Jens Axboedef596e2019-01-09 08:59:42 -07002602 do {
Jens Axboe500f9fb2019-08-19 12:15:59 -06002603 /*
Jens Axboea3a0e432019-08-20 11:03:11 -06002604 * Don't enter poll loop if we already have events pending.
2605 * If we do, we can potentially be spinning for commands that
2606 * already triggered a CQE (eg in error).
2607 */
Pavel Begunkov6c503152021-01-04 20:36:36 +00002608 if (test_bit(0, &ctx->cq_check_overflow))
2609 __io_cqring_overflow_flush(ctx, false, NULL, NULL);
2610 if (io_cqring_events(ctx))
Jens Axboea3a0e432019-08-20 11:03:11 -06002611 break;
2612
2613 /*
Jens Axboe500f9fb2019-08-19 12:15:59 -06002614 * If a submit got punted to a workqueue, we can have the
2615 * application entering polling for a command before it gets
2616 * issued. That app will hold the uring_lock for the duration
2617 * of the poll right here, so we need to take a breather every
2618 * now and then to ensure that the issue has a chance to add
2619 * the poll to the issued list. Otherwise we can spin here
2620 * forever, while the workqueue is stuck trying to acquire the
2621 * very same mutex.
2622 */
2623 if (!(++iters & 7)) {
2624 mutex_unlock(&ctx->uring_lock);
Jens Axboe4c6e2772020-07-01 11:29:10 -06002625 io_run_task_work();
Jens Axboe500f9fb2019-08-19 12:15:59 -06002626 mutex_lock(&ctx->uring_lock);
2627 }
2628
Pavel Begunkov7668b922020-07-07 16:36:21 +03002629 ret = io_iopoll_getevents(ctx, &nr_events, min);
Jens Axboedef596e2019-01-09 08:59:42 -07002630 if (ret <= 0)
2631 break;
2632 ret = 0;
Pavel Begunkov7668b922020-07-07 16:36:21 +03002633 } while (min && !nr_events && !need_resched());
Jens Axboedef596e2019-01-09 08:59:42 -07002634
Jens Axboe500f9fb2019-08-19 12:15:59 -06002635 mutex_unlock(&ctx->uring_lock);
Jens Axboedef596e2019-01-09 08:59:42 -07002636 return ret;
2637}
2638
Jens Axboe491381ce2019-10-17 09:20:46 -06002639static void kiocb_end_write(struct io_kiocb *req)
Jens Axboe2b188cc2019-01-07 10:46:33 -07002640{
Jens Axboe491381ce2019-10-17 09:20:46 -06002641 /*
2642 * Tell lockdep we inherited freeze protection from submission
2643 * thread.
2644 */
2645 if (req->flags & REQ_F_ISREG) {
2646 struct inode *inode = file_inode(req->file);
Jens Axboe2b188cc2019-01-07 10:46:33 -07002647
Jens Axboe491381ce2019-10-17 09:20:46 -06002648 __sb_writers_acquired(inode->i_sb, SB_FREEZE_WRITE);
Jens Axboe2b188cc2019-01-07 10:46:33 -07002649 }
Jens Axboe491381ce2019-10-17 09:20:46 -06002650 file_end_write(req->file);
Jens Axboe2b188cc2019-01-07 10:46:33 -07002651}
2652
Jens Axboea1d7c392020-06-22 11:09:46 -06002653static void io_complete_rw_common(struct kiocb *kiocb, long res,
Pavel Begunkov889fca72021-02-10 00:03:09 +00002654 unsigned int issue_flags)
Jens Axboe2b188cc2019-01-07 10:46:33 -07002655{
Jens Axboe9adbd452019-12-20 08:45:55 -07002656 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
Jens Axboebcda7ba2020-02-23 16:42:51 -07002657 int cflags = 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002658
Jens Axboe491381ce2019-10-17 09:20:46 -06002659 if (kiocb->ki_flags & IOCB_WRITE)
2660 kiocb_end_write(req);
Jens Axboe2b188cc2019-01-07 10:46:33 -07002661
Jens Axboe4e88d6e2019-12-07 20:59:47 -07002662 if (res != req->result)
2663 req_set_fail_links(req);
Jens Axboebcda7ba2020-02-23 16:42:51 -07002664 if (req->flags & REQ_F_BUFFER_SELECTED)
Pavel Begunkov8ff069b2020-07-16 23:28:04 +03002665 cflags = io_put_rw_kbuf(req);
Pavel Begunkov889fca72021-02-10 00:03:09 +00002666 __io_req_complete(req, issue_flags, res, cflags);
Jens Axboeba816ad2019-09-28 11:36:45 -06002667}
2668
Jens Axboeb63534c2020-06-04 11:28:00 -06002669#ifdef CONFIG_BLOCK
Pavel Begunkovdc2a6e92021-01-19 13:32:35 +00002670static bool io_resubmit_prep(struct io_kiocb *req)
Jens Axboeb63534c2020-06-04 11:28:00 -06002671{
2672 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
Pavel Begunkov847595d2021-02-04 13:52:06 +00002673 int rw, ret = -ECANCELED;
Jens Axboeb63534c2020-06-04 11:28:00 -06002674 struct iov_iter iter;
Jens Axboeb63534c2020-06-04 11:28:00 -06002675
Pavel Begunkovdc2a6e92021-01-19 13:32:35 +00002676 /* already prepared */
2677 if (req->async_data)
2678 return true;
Jens Axboeb63534c2020-06-04 11:28:00 -06002679
2680 switch (req->opcode) {
2681 case IORING_OP_READV:
2682 case IORING_OP_READ_FIXED:
2683 case IORING_OP_READ:
2684 rw = READ;
2685 break;
2686 case IORING_OP_WRITEV:
2687 case IORING_OP_WRITE_FIXED:
2688 case IORING_OP_WRITE:
2689 rw = WRITE;
2690 break;
2691 default:
2692 printk_once(KERN_WARNING "io_uring: bad opcode in resubmit %d\n",
2693 req->opcode);
Pavel Begunkovdc2a6e92021-01-19 13:32:35 +00002694 return false;
Jens Axboeb63534c2020-06-04 11:28:00 -06002695 }
2696
Pavel Begunkovdc2a6e92021-01-19 13:32:35 +00002697 ret = io_import_iovec(rw, req, &iovec, &iter, false);
2698 if (ret < 0)
2699 return false;
Pavel Begunkov6bf985d2021-02-04 13:52:01 +00002700 return !io_setup_async_rw(req, iovec, inline_vecs, &iter, false);
Jens Axboeb63534c2020-06-04 11:28:00 -06002701}
Jens Axboeb63534c2020-06-04 11:28:00 -06002702#endif
2703
2704static bool io_rw_reissue(struct io_kiocb *req, long res)
2705{
2706#ifdef CONFIG_BLOCK
Pavel Begunkovbf6182b6d2021-01-19 13:32:34 +00002707 umode_t mode;
Jens Axboeb63534c2020-06-04 11:28:00 -06002708 int ret;
2709
Pavel Begunkovbf6182b6d2021-01-19 13:32:34 +00002710 if (res != -EAGAIN && res != -EOPNOTSUPP)
Jens Axboe355afae2020-09-02 09:30:31 -06002711 return false;
Pavel Begunkovbf6182b6d2021-01-19 13:32:34 +00002712 mode = file_inode(req->file)->i_mode;
Pavel Begunkov75c668c2021-02-04 13:52:05 +00002713 if (!S_ISBLK(mode) && !S_ISREG(mode))
2714 return false;
2715 if ((req->flags & REQ_F_NOWAIT) || io_wq_current_is_worker())
Jens Axboeb63534c2020-06-04 11:28:00 -06002716 return false;
2717
Pavel Begunkov55e6ac12021-01-08 20:57:22 +00002718 lockdep_assert_held(&req->ctx->uring_lock);
2719
Jens Axboe28cea78a2020-09-14 10:51:17 -06002720 ret = io_sq_thread_acquire_mm_files(req->ctx, req);
Jens Axboe6d816e02020-08-11 08:04:14 -06002721
Pavel Begunkovdc2a6e92021-01-19 13:32:35 +00002722 if (!ret && io_resubmit_prep(req)) {
Jens Axboefdee9462020-08-27 16:46:24 -06002723 refcount_inc(&req->refs);
2724 io_queue_async_work(req);
Jens Axboeb63534c2020-06-04 11:28:00 -06002725 return true;
Jens Axboefdee9462020-08-27 16:46:24 -06002726 }
Pavel Begunkovdc2a6e92021-01-19 13:32:35 +00002727 req_set_fail_links(req);
Jens Axboeb63534c2020-06-04 11:28:00 -06002728#endif
2729 return false;
2730}
2731
Jens Axboea1d7c392020-06-22 11:09:46 -06002732static void __io_complete_rw(struct io_kiocb *req, long res, long res2,
Pavel Begunkov889fca72021-02-10 00:03:09 +00002733 unsigned int issue_flags)
Jens Axboea1d7c392020-06-22 11:09:46 -06002734{
2735 if (!io_rw_reissue(req, res))
Pavel Begunkov889fca72021-02-10 00:03:09 +00002736 io_complete_rw_common(&req->rw.kiocb, res, issue_flags);
Jens Axboeba816ad2019-09-28 11:36:45 -06002737}
2738
2739static void io_complete_rw(struct kiocb *kiocb, long res, long res2)
2740{
Jens Axboe9adbd452019-12-20 08:45:55 -07002741 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
Jens Axboeba816ad2019-09-28 11:36:45 -06002742
Pavel Begunkov889fca72021-02-10 00:03:09 +00002743 __io_complete_rw(req, res, res2, 0);
Jens Axboe2b188cc2019-01-07 10:46:33 -07002744}
2745
Jens Axboedef596e2019-01-09 08:59:42 -07002746static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2)
2747{
Jens Axboe9adbd452019-12-20 08:45:55 -07002748 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
Jens Axboedef596e2019-01-09 08:59:42 -07002749
Jens Axboe491381ce2019-10-17 09:20:46 -06002750 if (kiocb->ki_flags & IOCB_WRITE)
2751 kiocb_end_write(req);
Jens Axboedef596e2019-01-09 08:59:42 -07002752
Xiaoguang Wang2d7d6792020-06-16 02:06:37 +08002753 if (res != -EAGAIN && res != req->result)
Jens Axboe4e88d6e2019-12-07 20:59:47 -07002754 req_set_fail_links(req);
Xiaoguang Wangbbde0172020-06-16 02:06:38 +08002755
2756 WRITE_ONCE(req->result, res);
2757 /* order with io_poll_complete() checking ->result */
Pavel Begunkovcd664b02020-06-25 12:37:10 +03002758 smp_wmb();
2759 WRITE_ONCE(req->iopoll_completed, 1);
Jens Axboedef596e2019-01-09 08:59:42 -07002760}
2761
2762/*
2763 * After the iocb has been issued, it's safe to be found on the poll list.
2764 * Adding the kiocb to the list AFTER submission ensures that we don't
2765 * find it from a io_iopoll_getevents() thread before the issuer is done
2766 * accessing the kiocb cookie.
2767 */
Xiaoguang Wang2e9dbe92020-11-13 00:44:08 +08002768static void io_iopoll_req_issued(struct io_kiocb *req, bool in_async)
Jens Axboedef596e2019-01-09 08:59:42 -07002769{
2770 struct io_ring_ctx *ctx = req->ctx;
2771
2772 /*
2773 * Track whether we have multiple files in our lists. This will impact
2774 * how we do polling eventually, not spinning if we're on potentially
2775 * different devices.
2776 */
Pavel Begunkov540e32a2020-07-13 23:37:09 +03002777 if (list_empty(&ctx->iopoll_list)) {
Jens Axboedef596e2019-01-09 08:59:42 -07002778 ctx->poll_multi_file = false;
2779 } else if (!ctx->poll_multi_file) {
2780 struct io_kiocb *list_req;
2781
Pavel Begunkov540e32a2020-07-13 23:37:09 +03002782 list_req = list_first_entry(&ctx->iopoll_list, struct io_kiocb,
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002783 inflight_entry);
Jens Axboe9adbd452019-12-20 08:45:55 -07002784 if (list_req->file != req->file)
Jens Axboedef596e2019-01-09 08:59:42 -07002785 ctx->poll_multi_file = true;
2786 }
2787
2788 /*
2789 * For fast devices, IO may have already completed. If it has, add
2790 * it to the front so we find it first.
2791 */
Xiaoguang Wang65a65432020-06-11 23:39:36 +08002792 if (READ_ONCE(req->iopoll_completed))
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002793 list_add(&req->inflight_entry, &ctx->iopoll_list);
Jens Axboedef596e2019-01-09 08:59:42 -07002794 else
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002795 list_add_tail(&req->inflight_entry, &ctx->iopoll_list);
Xiaoguang Wangbdcd3ea2020-02-25 22:12:08 +08002796
Xiaoguang Wang2e9dbe92020-11-13 00:44:08 +08002797 /*
2798 * If IORING_SETUP_SQPOLL is enabled, sqes are either handled in sq thread
2799 * task context or in io worker task context. If current task context is
2800 * sq thread, we don't need to check whether should wake up sq thread.
2801 */
2802 if (in_async && (ctx->flags & IORING_SETUP_SQPOLL) &&
Jens Axboe534ca6d2020-09-02 13:52:19 -06002803 wq_has_sleeper(&ctx->sq_data->wait))
2804 wake_up(&ctx->sq_data->wait);
Jens Axboedef596e2019-01-09 08:59:42 -07002805}
2806
Pavel Begunkov9f13c352020-05-17 14:13:41 +03002807static inline void io_state_file_put(struct io_submit_state *state)
2808{
Pavel Begunkov02b23a92021-01-19 13:32:41 +00002809 if (state->file_refs) {
2810 fput_many(state->file, state->file_refs);
2811 state->file_refs = 0;
2812 }
Jens Axboe9a56a232019-01-09 09:06:50 -07002813}
2814
2815/*
2816 * Get as many references to a file as we have IOs left in this submission,
2817 * assuming most submissions are for one file, or at least that each file
2818 * has more than one submission.
2819 */
Pavel Begunkov8da11c12020-02-24 11:32:44 +03002820static struct file *__io_file_get(struct io_submit_state *state, int fd)
Jens Axboe9a56a232019-01-09 09:06:50 -07002821{
2822 if (!state)
2823 return fget(fd);
2824
Pavel Begunkov6e1271e2020-11-20 15:50:50 +00002825 if (state->file_refs) {
Jens Axboe9a56a232019-01-09 09:06:50 -07002826 if (state->fd == fd) {
Pavel Begunkov6e1271e2020-11-20 15:50:50 +00002827 state->file_refs--;
Jens Axboe9a56a232019-01-09 09:06:50 -07002828 return state->file;
2829 }
Pavel Begunkov02b23a92021-01-19 13:32:41 +00002830 io_state_file_put(state);
Jens Axboe9a56a232019-01-09 09:06:50 -07002831 }
2832 state->file = fget_many(fd, state->ios_left);
Pavel Begunkov6e1271e2020-11-20 15:50:50 +00002833 if (unlikely(!state->file))
Jens Axboe9a56a232019-01-09 09:06:50 -07002834 return NULL;
2835
2836 state->fd = fd;
Pavel Begunkov6e1271e2020-11-20 15:50:50 +00002837 state->file_refs = state->ios_left - 1;
Jens Axboe9a56a232019-01-09 09:06:50 -07002838 return state->file;
2839}
2840
Jens Axboe4503b762020-06-01 10:00:27 -06002841static bool io_bdev_nowait(struct block_device *bdev)
2842{
Jeffle Xu9ba0d0c2020-10-19 16:59:42 +08002843 return !bdev || blk_queue_nowait(bdev_get_queue(bdev));
Jens Axboe4503b762020-06-01 10:00:27 -06002844}
2845
Jens Axboe2b188cc2019-01-07 10:46:33 -07002846/*
2847 * If we tracked the file through the SCM inflight mechanism, we could support
2848 * any file. For now, just ensure that anything potentially problematic is done
2849 * inline.
2850 */
Jens Axboeaf197f52020-04-28 13:15:06 -06002851static bool io_file_supports_async(struct file *file, int rw)
Jens Axboe2b188cc2019-01-07 10:46:33 -07002852{
2853 umode_t mode = file_inode(file)->i_mode;
2854
Jens Axboe4503b762020-06-01 10:00:27 -06002855 if (S_ISBLK(mode)) {
Christoph Hellwig4e7b5672020-11-23 13:38:40 +01002856 if (IS_ENABLED(CONFIG_BLOCK) &&
2857 io_bdev_nowait(I_BDEV(file->f_mapping->host)))
Jens Axboe4503b762020-06-01 10:00:27 -06002858 return true;
2859 return false;
2860 }
2861 if (S_ISCHR(mode) || S_ISSOCK(mode))
Jens Axboe2b188cc2019-01-07 10:46:33 -07002862 return true;
Jens Axboe4503b762020-06-01 10:00:27 -06002863 if (S_ISREG(mode)) {
Christoph Hellwig4e7b5672020-11-23 13:38:40 +01002864 if (IS_ENABLED(CONFIG_BLOCK) &&
2865 io_bdev_nowait(file->f_inode->i_sb->s_bdev) &&
Jens Axboe4503b762020-06-01 10:00:27 -06002866 file->f_op != &io_uring_fops)
2867 return true;
2868 return false;
2869 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07002870
Jens Axboec5b85622020-06-09 19:23:05 -06002871 /* any ->read/write should understand O_NONBLOCK */
2872 if (file->f_flags & O_NONBLOCK)
2873 return true;
2874
Jens Axboeaf197f52020-04-28 13:15:06 -06002875 if (!(file->f_mode & FMODE_NOWAIT))
2876 return false;
2877
2878 if (rw == READ)
2879 return file->f_op->read_iter != NULL;
2880
2881 return file->f_op->write_iter != NULL;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002882}
2883
Pavel Begunkova88fc402020-09-30 22:57:53 +03002884static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboe2b188cc2019-01-07 10:46:33 -07002885{
Jens Axboedef596e2019-01-09 08:59:42 -07002886 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe9adbd452019-12-20 08:45:55 -07002887 struct kiocb *kiocb = &req->rw.kiocb;
Pavel Begunkov75c668c2021-02-04 13:52:05 +00002888 struct file *file = req->file;
Jens Axboe09bb8392019-03-13 12:39:28 -06002889 unsigned ioprio;
2890 int ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002891
Pavel Begunkov75c668c2021-02-04 13:52:05 +00002892 if (S_ISREG(file_inode(file)->i_mode))
Jens Axboe491381ce2019-10-17 09:20:46 -06002893 req->flags |= REQ_F_ISREG;
2894
Jens Axboe2b188cc2019-01-07 10:46:33 -07002895 kiocb->ki_pos = READ_ONCE(sqe->off);
Pavel Begunkov75c668c2021-02-04 13:52:05 +00002896 if (kiocb->ki_pos == -1 && !(file->f_mode & FMODE_STREAM)) {
Jens Axboeba042912019-12-25 16:33:42 -07002897 req->flags |= REQ_F_CUR_POS;
Pavel Begunkov75c668c2021-02-04 13:52:05 +00002898 kiocb->ki_pos = file->f_pos;
Jens Axboeba042912019-12-25 16:33:42 -07002899 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07002900 kiocb->ki_hint = ki_hint_validate(file_write_hint(kiocb->ki_filp));
Pavel Begunkov3e577dc2020-02-01 03:58:42 +03002901 kiocb->ki_flags = iocb_flags(kiocb->ki_filp);
2902 ret = kiocb_set_rw_flags(kiocb, READ_ONCE(sqe->rw_flags));
2903 if (unlikely(ret))
2904 return ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002905
Pavel Begunkov75c668c2021-02-04 13:52:05 +00002906 /* don't allow async punt for O_NONBLOCK or RWF_NOWAIT */
2907 if ((kiocb->ki_flags & IOCB_NOWAIT) || (file->f_flags & O_NONBLOCK))
2908 req->flags |= REQ_F_NOWAIT;
2909
Jens Axboe2b188cc2019-01-07 10:46:33 -07002910 ioprio = READ_ONCE(sqe->ioprio);
2911 if (ioprio) {
2912 ret = ioprio_check_cap(ioprio);
2913 if (ret)
Jens Axboe09bb8392019-03-13 12:39:28 -06002914 return ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002915
2916 kiocb->ki_ioprio = ioprio;
2917 } else
2918 kiocb->ki_ioprio = get_current_ioprio();
2919
Jens Axboedef596e2019-01-09 08:59:42 -07002920 if (ctx->flags & IORING_SETUP_IOPOLL) {
Jens Axboedef596e2019-01-09 08:59:42 -07002921 if (!(kiocb->ki_flags & IOCB_DIRECT) ||
2922 !kiocb->ki_filp->f_op->iopoll)
Jens Axboe09bb8392019-03-13 12:39:28 -06002923 return -EOPNOTSUPP;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002924
Jens Axboedef596e2019-01-09 08:59:42 -07002925 kiocb->ki_flags |= IOCB_HIPRI;
2926 kiocb->ki_complete = io_complete_rw_iopoll;
Xiaoguang Wang65a65432020-06-11 23:39:36 +08002927 req->iopoll_completed = 0;
Jens Axboedef596e2019-01-09 08:59:42 -07002928 } else {
Jens Axboe09bb8392019-03-13 12:39:28 -06002929 if (kiocb->ki_flags & IOCB_HIPRI)
2930 return -EINVAL;
Jens Axboedef596e2019-01-09 08:59:42 -07002931 kiocb->ki_complete = io_complete_rw;
2932 }
Jens Axboe9adbd452019-12-20 08:45:55 -07002933
Jens Axboe3529d8c2019-12-19 18:24:38 -07002934 req->rw.addr = READ_ONCE(sqe->addr);
2935 req->rw.len = READ_ONCE(sqe->len);
Bijan Mottahedeh4f4eeba2020-05-19 14:52:49 -07002936 req->buf_index = READ_ONCE(sqe->buf_index);
Jens Axboe2b188cc2019-01-07 10:46:33 -07002937 return 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002938}
2939
2940static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
2941{
2942 switch (ret) {
2943 case -EIOCBQUEUED:
2944 break;
2945 case -ERESTARTSYS:
2946 case -ERESTARTNOINTR:
2947 case -ERESTARTNOHAND:
2948 case -ERESTART_RESTARTBLOCK:
2949 /*
2950 * We can't just restart the syscall, since previously
2951 * submitted sqes may already be in progress. Just fail this
2952 * IO with EINTR.
2953 */
2954 ret = -EINTR;
Gustavo A. R. Silvadf561f662020-08-23 17:36:59 -05002955 fallthrough;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002956 default:
2957 kiocb->ki_complete(kiocb, ret, 0);
2958 }
2959}
2960
Jens Axboea1d7c392020-06-22 11:09:46 -06002961static void kiocb_done(struct kiocb *kiocb, ssize_t ret,
Pavel Begunkov889fca72021-02-10 00:03:09 +00002962 unsigned int issue_flags)
Jens Axboeba816ad2019-09-28 11:36:45 -06002963{
Jens Axboeba042912019-12-25 16:33:42 -07002964 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
Jens Axboee8c2bc12020-08-15 18:44:09 -07002965 struct io_async_rw *io = req->async_data;
Jens Axboeba042912019-12-25 16:33:42 -07002966
Jens Axboe227c0c92020-08-13 11:51:40 -06002967 /* add previously done IO, if any */
Jens Axboee8c2bc12020-08-15 18:44:09 -07002968 if (io && io->bytes_done > 0) {
Jens Axboe227c0c92020-08-13 11:51:40 -06002969 if (ret < 0)
Jens Axboee8c2bc12020-08-15 18:44:09 -07002970 ret = io->bytes_done;
Jens Axboe227c0c92020-08-13 11:51:40 -06002971 else
Jens Axboee8c2bc12020-08-15 18:44:09 -07002972 ret += io->bytes_done;
Jens Axboe227c0c92020-08-13 11:51:40 -06002973 }
2974
Jens Axboeba042912019-12-25 16:33:42 -07002975 if (req->flags & REQ_F_CUR_POS)
2976 req->file->f_pos = kiocb->ki_pos;
Pavel Begunkovbcaec082020-02-24 11:30:18 +03002977 if (ret >= 0 && kiocb->ki_complete == io_complete_rw)
Pavel Begunkov889fca72021-02-10 00:03:09 +00002978 __io_complete_rw(req, ret, 0, issue_flags);
Jens Axboeba816ad2019-09-28 11:36:45 -06002979 else
2980 io_rw_done(kiocb, ret);
2981}
2982
Pavel Begunkov847595d2021-02-04 13:52:06 +00002983static int io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter)
Jens Axboeedafcce2019-01-09 09:16:05 -07002984{
Jens Axboe9adbd452019-12-20 08:45:55 -07002985 struct io_ring_ctx *ctx = req->ctx;
2986 size_t len = req->rw.len;
Jens Axboeedafcce2019-01-09 09:16:05 -07002987 struct io_mapped_ubuf *imu;
Pavel Begunkov4be1c612020-09-06 00:45:48 +03002988 u16 index, buf_index = req->buf_index;
Jens Axboeedafcce2019-01-09 09:16:05 -07002989 size_t offset;
2990 u64 buf_addr;
2991
Jens Axboeedafcce2019-01-09 09:16:05 -07002992 if (unlikely(buf_index >= ctx->nr_user_bufs))
2993 return -EFAULT;
Jens Axboeedafcce2019-01-09 09:16:05 -07002994 index = array_index_nospec(buf_index, ctx->nr_user_bufs);
2995 imu = &ctx->user_bufs[index];
Jens Axboe9adbd452019-12-20 08:45:55 -07002996 buf_addr = req->rw.addr;
Jens Axboeedafcce2019-01-09 09:16:05 -07002997
2998 /* overflow */
2999 if (buf_addr + len < buf_addr)
3000 return -EFAULT;
3001 /* not inside the mapped region */
3002 if (buf_addr < imu->ubuf || buf_addr + len > imu->ubuf + imu->len)
3003 return -EFAULT;
3004
3005 /*
3006 * May not be a start of buffer, set size appropriately
3007 * and advance us to the beginning.
3008 */
3009 offset = buf_addr - imu->ubuf;
3010 iov_iter_bvec(iter, rw, imu->bvec, imu->nr_bvecs, offset + len);
Jens Axboebd11b3a2019-07-20 08:37:31 -06003011
3012 if (offset) {
3013 /*
3014 * Don't use iov_iter_advance() here, as it's really slow for
3015 * using the latter parts of a big fixed buffer - it iterates
3016 * over each segment manually. We can cheat a bit here, because
3017 * we know that:
3018 *
3019 * 1) it's a BVEC iter, we set it up
3020 * 2) all bvecs are PAGE_SIZE in size, except potentially the
3021 * first and last bvec
3022 *
3023 * So just find our index, and adjust the iterator afterwards.
3024 * If the offset is within the first bvec (or the whole first
3025 * bvec, just use iov_iter_advance(). This makes it easier
3026 * since we can just skip the first segment, which may not
3027 * be PAGE_SIZE aligned.
3028 */
3029 const struct bio_vec *bvec = imu->bvec;
3030
3031 if (offset <= bvec->bv_len) {
3032 iov_iter_advance(iter, offset);
3033 } else {
3034 unsigned long seg_skip;
3035
3036 /* skip first vec */
3037 offset -= bvec->bv_len;
3038 seg_skip = 1 + (offset >> PAGE_SHIFT);
3039
3040 iter->bvec = bvec + seg_skip;
3041 iter->nr_segs -= seg_skip;
Aleix Roca Nonell99c79f62019-08-15 14:03:22 +02003042 iter->count -= bvec->bv_len + offset;
Jens Axboebd11b3a2019-07-20 08:37:31 -06003043 iter->iov_offset = offset & ~PAGE_MASK;
Jens Axboebd11b3a2019-07-20 08:37:31 -06003044 }
3045 }
3046
Pavel Begunkov847595d2021-02-04 13:52:06 +00003047 return 0;
Jens Axboeedafcce2019-01-09 09:16:05 -07003048}
3049
Jens Axboebcda7ba2020-02-23 16:42:51 -07003050static void io_ring_submit_unlock(struct io_ring_ctx *ctx, bool needs_lock)
3051{
3052 if (needs_lock)
3053 mutex_unlock(&ctx->uring_lock);
3054}
3055
3056static void io_ring_submit_lock(struct io_ring_ctx *ctx, bool needs_lock)
3057{
3058 /*
3059 * "Normal" inline submissions always hold the uring_lock, since we
3060 * grab it from the system call. Same is true for the SQPOLL offload.
3061 * The only exception is when we've detached the request and issue it
3062 * from an async worker thread, grab the lock for that case.
3063 */
3064 if (needs_lock)
3065 mutex_lock(&ctx->uring_lock);
3066}
3067
3068static struct io_buffer *io_buffer_select(struct io_kiocb *req, size_t *len,
3069 int bgid, struct io_buffer *kbuf,
3070 bool needs_lock)
3071{
3072 struct io_buffer *head;
3073
3074 if (req->flags & REQ_F_BUFFER_SELECTED)
3075 return kbuf;
3076
3077 io_ring_submit_lock(req->ctx, needs_lock);
3078
3079 lockdep_assert_held(&req->ctx->uring_lock);
3080
3081 head = idr_find(&req->ctx->io_buffer_idr, bgid);
3082 if (head) {
3083 if (!list_empty(&head->list)) {
3084 kbuf = list_last_entry(&head->list, struct io_buffer,
3085 list);
3086 list_del(&kbuf->list);
3087 } else {
3088 kbuf = head;
3089 idr_remove(&req->ctx->io_buffer_idr, bgid);
3090 }
3091 if (*len > kbuf->len)
3092 *len = kbuf->len;
3093 } else {
3094 kbuf = ERR_PTR(-ENOBUFS);
3095 }
3096
3097 io_ring_submit_unlock(req->ctx, needs_lock);
3098
3099 return kbuf;
3100}
3101
Jens Axboe4d954c22020-02-27 07:31:19 -07003102static void __user *io_rw_buffer_select(struct io_kiocb *req, size_t *len,
3103 bool needs_lock)
3104{
3105 struct io_buffer *kbuf;
Bijan Mottahedeh4f4eeba2020-05-19 14:52:49 -07003106 u16 bgid;
Jens Axboe4d954c22020-02-27 07:31:19 -07003107
3108 kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
Bijan Mottahedeh4f4eeba2020-05-19 14:52:49 -07003109 bgid = req->buf_index;
Jens Axboe4d954c22020-02-27 07:31:19 -07003110 kbuf = io_buffer_select(req, len, bgid, kbuf, needs_lock);
3111 if (IS_ERR(kbuf))
3112 return kbuf;
3113 req->rw.addr = (u64) (unsigned long) kbuf;
3114 req->flags |= REQ_F_BUFFER_SELECTED;
3115 return u64_to_user_ptr(kbuf->addr);
3116}
3117
3118#ifdef CONFIG_COMPAT
3119static ssize_t io_compat_import(struct io_kiocb *req, struct iovec *iov,
3120 bool needs_lock)
3121{
3122 struct compat_iovec __user *uiov;
3123 compat_ssize_t clen;
3124 void __user *buf;
3125 ssize_t len;
3126
3127 uiov = u64_to_user_ptr(req->rw.addr);
3128 if (!access_ok(uiov, sizeof(*uiov)))
3129 return -EFAULT;
3130 if (__get_user(clen, &uiov->iov_len))
3131 return -EFAULT;
3132 if (clen < 0)
3133 return -EINVAL;
3134
3135 len = clen;
3136 buf = io_rw_buffer_select(req, &len, needs_lock);
3137 if (IS_ERR(buf))
3138 return PTR_ERR(buf);
3139 iov[0].iov_base = buf;
3140 iov[0].iov_len = (compat_size_t) len;
3141 return 0;
3142}
3143#endif
3144
3145static ssize_t __io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
3146 bool needs_lock)
3147{
3148 struct iovec __user *uiov = u64_to_user_ptr(req->rw.addr);
3149 void __user *buf;
3150 ssize_t len;
3151
3152 if (copy_from_user(iov, uiov, sizeof(*uiov)))
3153 return -EFAULT;
3154
3155 len = iov[0].iov_len;
3156 if (len < 0)
3157 return -EINVAL;
3158 buf = io_rw_buffer_select(req, &len, needs_lock);
3159 if (IS_ERR(buf))
3160 return PTR_ERR(buf);
3161 iov[0].iov_base = buf;
3162 iov[0].iov_len = len;
3163 return 0;
3164}
3165
3166static ssize_t io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
3167 bool needs_lock)
3168{
Jens Axboedddb3e22020-06-04 11:27:01 -06003169 if (req->flags & REQ_F_BUFFER_SELECTED) {
3170 struct io_buffer *kbuf;
3171
3172 kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
3173 iov[0].iov_base = u64_to_user_ptr(kbuf->addr);
3174 iov[0].iov_len = kbuf->len;
Jens Axboe4d954c22020-02-27 07:31:19 -07003175 return 0;
Jens Axboedddb3e22020-06-04 11:27:01 -06003176 }
Pavel Begunkovdd201662020-12-19 03:15:43 +00003177 if (req->rw.len != 1)
Jens Axboe4d954c22020-02-27 07:31:19 -07003178 return -EINVAL;
3179
3180#ifdef CONFIG_COMPAT
3181 if (req->ctx->compat)
3182 return io_compat_import(req, iov, needs_lock);
3183#endif
3184
3185 return __io_iov_buffer_select(req, iov, needs_lock);
3186}
3187
Pavel Begunkov847595d2021-02-04 13:52:06 +00003188static int io_import_iovec(int rw, struct io_kiocb *req, struct iovec **iovec,
3189 struct iov_iter *iter, bool needs_lock)
Jens Axboe2b188cc2019-01-07 10:46:33 -07003190{
Jens Axboe9adbd452019-12-20 08:45:55 -07003191 void __user *buf = u64_to_user_ptr(req->rw.addr);
3192 size_t sqe_len = req->rw.len;
Pavel Begunkov847595d2021-02-04 13:52:06 +00003193 u8 opcode = req->opcode;
Jens Axboe4d954c22020-02-27 07:31:19 -07003194 ssize_t ret;
Jens Axboeedafcce2019-01-09 09:16:05 -07003195
Pavel Begunkov7d009162019-11-25 23:14:40 +03003196 if (opcode == IORING_OP_READ_FIXED || opcode == IORING_OP_WRITE_FIXED) {
Jens Axboeedafcce2019-01-09 09:16:05 -07003197 *iovec = NULL;
Jens Axboe9adbd452019-12-20 08:45:55 -07003198 return io_import_fixed(req, rw, iter);
Jens Axboeedafcce2019-01-09 09:16:05 -07003199 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07003200
Jens Axboebcda7ba2020-02-23 16:42:51 -07003201 /* buffer index only valid with fixed read/write, or buffer select */
Bijan Mottahedeh4f4eeba2020-05-19 14:52:49 -07003202 if (req->buf_index && !(req->flags & REQ_F_BUFFER_SELECT))
Jens Axboe9adbd452019-12-20 08:45:55 -07003203 return -EINVAL;
3204
Jens Axboe3a6820f2019-12-22 15:19:35 -07003205 if (opcode == IORING_OP_READ || opcode == IORING_OP_WRITE) {
Jens Axboebcda7ba2020-02-23 16:42:51 -07003206 if (req->flags & REQ_F_BUFFER_SELECT) {
Jens Axboe4d954c22020-02-27 07:31:19 -07003207 buf = io_rw_buffer_select(req, &sqe_len, needs_lock);
Pavel Begunkov867a23e2020-08-20 11:34:39 +03003208 if (IS_ERR(buf))
Jens Axboe4d954c22020-02-27 07:31:19 -07003209 return PTR_ERR(buf);
Jens Axboe3f9d6442020-03-11 12:27:04 -06003210 req->rw.len = sqe_len;
Jens Axboebcda7ba2020-02-23 16:42:51 -07003211 }
3212
Jens Axboe3a6820f2019-12-22 15:19:35 -07003213 ret = import_single_range(rw, buf, sqe_len, *iovec, iter);
3214 *iovec = NULL;
David Laight10fc72e2020-11-07 13:16:25 +00003215 return ret;
Jens Axboe3a6820f2019-12-22 15:19:35 -07003216 }
3217
Jens Axboe4d954c22020-02-27 07:31:19 -07003218 if (req->flags & REQ_F_BUFFER_SELECT) {
3219 ret = io_iov_buffer_select(req, *iovec, needs_lock);
Pavel Begunkov847595d2021-02-04 13:52:06 +00003220 if (!ret)
3221 iov_iter_init(iter, rw, *iovec, 1, (*iovec)->iov_len);
Jens Axboe4d954c22020-02-27 07:31:19 -07003222 *iovec = NULL;
3223 return ret;
3224 }
3225
Christoph Hellwig89cd35c2020-09-25 06:51:41 +02003226 return __import_iovec(rw, buf, sqe_len, UIO_FASTIOV, iovec, iter,
3227 req->ctx->compat);
Jens Axboe2b188cc2019-01-07 10:46:33 -07003228}
3229
Jens Axboe0fef9482020-08-26 10:36:20 -06003230static inline loff_t *io_kiocb_ppos(struct kiocb *kiocb)
3231{
Pavel Begunkov5b09e372020-09-30 22:57:15 +03003232 return (kiocb->ki_filp->f_mode & FMODE_STREAM) ? NULL : &kiocb->ki_pos;
Jens Axboe0fef9482020-08-26 10:36:20 -06003233}
3234
Jens Axboe32960612019-09-23 11:05:34 -06003235/*
3236 * For files that don't have ->read_iter() and ->write_iter(), handle them
3237 * by looping over ->read() or ->write() manually.
3238 */
Jens Axboe4017eb92020-10-22 14:14:12 -06003239static ssize_t loop_rw_iter(int rw, struct io_kiocb *req, struct iov_iter *iter)
Jens Axboe32960612019-09-23 11:05:34 -06003240{
Jens Axboe4017eb92020-10-22 14:14:12 -06003241 struct kiocb *kiocb = &req->rw.kiocb;
3242 struct file *file = req->file;
Jens Axboe32960612019-09-23 11:05:34 -06003243 ssize_t ret = 0;
3244
3245 /*
3246 * Don't support polled IO through this interface, and we can't
3247 * support non-blocking either. For the latter, this just causes
3248 * the kiocb to be handled from an async context.
3249 */
3250 if (kiocb->ki_flags & IOCB_HIPRI)
3251 return -EOPNOTSUPP;
3252 if (kiocb->ki_flags & IOCB_NOWAIT)
3253 return -EAGAIN;
3254
3255 while (iov_iter_count(iter)) {
Pavel Begunkov311ae9e2019-11-24 11:58:24 +03003256 struct iovec iovec;
Jens Axboe32960612019-09-23 11:05:34 -06003257 ssize_t nr;
3258
Pavel Begunkov311ae9e2019-11-24 11:58:24 +03003259 if (!iov_iter_is_bvec(iter)) {
3260 iovec = iov_iter_iovec(iter);
3261 } else {
Jens Axboe4017eb92020-10-22 14:14:12 -06003262 iovec.iov_base = u64_to_user_ptr(req->rw.addr);
3263 iovec.iov_len = req->rw.len;
Pavel Begunkov311ae9e2019-11-24 11:58:24 +03003264 }
3265
Jens Axboe32960612019-09-23 11:05:34 -06003266 if (rw == READ) {
3267 nr = file->f_op->read(file, iovec.iov_base,
Jens Axboe0fef9482020-08-26 10:36:20 -06003268 iovec.iov_len, io_kiocb_ppos(kiocb));
Jens Axboe32960612019-09-23 11:05:34 -06003269 } else {
3270 nr = file->f_op->write(file, iovec.iov_base,
Jens Axboe0fef9482020-08-26 10:36:20 -06003271 iovec.iov_len, io_kiocb_ppos(kiocb));
Jens Axboe32960612019-09-23 11:05:34 -06003272 }
3273
3274 if (nr < 0) {
3275 if (!ret)
3276 ret = nr;
3277 break;
3278 }
3279 ret += nr;
3280 if (nr != iovec.iov_len)
3281 break;
Jens Axboe4017eb92020-10-22 14:14:12 -06003282 req->rw.len -= nr;
3283 req->rw.addr += nr;
Jens Axboe32960612019-09-23 11:05:34 -06003284 iov_iter_advance(iter, nr);
3285 }
3286
3287 return ret;
3288}
3289
Jens Axboeff6165b2020-08-13 09:47:43 -06003290static void io_req_map_rw(struct io_kiocb *req, const struct iovec *iovec,
3291 const struct iovec *fast_iov, struct iov_iter *iter)
Jens Axboef67676d2019-12-02 11:03:47 -07003292{
Jens Axboee8c2bc12020-08-15 18:44:09 -07003293 struct io_async_rw *rw = req->async_data;
Pavel Begunkovb64e3442020-07-13 22:59:18 +03003294
Jens Axboeff6165b2020-08-13 09:47:43 -06003295 memcpy(&rw->iter, iter, sizeof(*iter));
Pavel Begunkovafb87652020-09-06 00:45:46 +03003296 rw->free_iovec = iovec;
Jens Axboe227c0c92020-08-13 11:51:40 -06003297 rw->bytes_done = 0;
Jens Axboeff6165b2020-08-13 09:47:43 -06003298 /* can only be fixed buffers, no need to do anything */
Pavel Begunkov9c3a2052020-11-23 23:20:27 +00003299 if (iov_iter_is_bvec(iter))
Jens Axboeff6165b2020-08-13 09:47:43 -06003300 return;
Pavel Begunkovb64e3442020-07-13 22:59:18 +03003301 if (!iovec) {
Jens Axboeff6165b2020-08-13 09:47:43 -06003302 unsigned iov_off = 0;
3303
3304 rw->iter.iov = rw->fast_iov;
3305 if (iter->iov != fast_iov) {
3306 iov_off = iter->iov - fast_iov;
3307 rw->iter.iov += iov_off;
3308 }
3309 if (rw->fast_iov != fast_iov)
3310 memcpy(rw->fast_iov + iov_off, fast_iov + iov_off,
Xiaoguang Wang45097da2020-04-08 22:29:58 +08003311 sizeof(struct iovec) * iter->nr_segs);
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03003312 } else {
3313 req->flags |= REQ_F_NEED_CLEANUP;
Jens Axboef67676d2019-12-02 11:03:47 -07003314 }
3315}
3316
Jens Axboee8c2bc12020-08-15 18:44:09 -07003317static inline int __io_alloc_async_data(struct io_kiocb *req)
Xiaoguang Wang3d9932a2020-03-27 15:36:52 +08003318{
Jens Axboee8c2bc12020-08-15 18:44:09 -07003319 WARN_ON_ONCE(!io_op_defs[req->opcode].async_size);
3320 req->async_data = kmalloc(io_op_defs[req->opcode].async_size, GFP_KERNEL);
3321 return req->async_data == NULL;
Xiaoguang Wang3d9932a2020-03-27 15:36:52 +08003322}
3323
Jens Axboee8c2bc12020-08-15 18:44:09 -07003324static int io_alloc_async_data(struct io_kiocb *req)
Jens Axboef67676d2019-12-02 11:03:47 -07003325{
Jens Axboee8c2bc12020-08-15 18:44:09 -07003326 if (!io_op_defs[req->opcode].needs_async_data)
Jens Axboed3656342019-12-18 09:50:26 -07003327 return 0;
Xiaoguang Wang3d9932a2020-03-27 15:36:52 +08003328
Jens Axboee8c2bc12020-08-15 18:44:09 -07003329 return __io_alloc_async_data(req);
Jens Axboeb7bb4f72019-12-15 22:13:43 -07003330}
3331
Jens Axboeff6165b2020-08-13 09:47:43 -06003332static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec,
3333 const struct iovec *fast_iov,
Jens Axboe227c0c92020-08-13 11:51:40 -06003334 struct iov_iter *iter, bool force)
Jens Axboeb7bb4f72019-12-15 22:13:43 -07003335{
Jens Axboee8c2bc12020-08-15 18:44:09 -07003336 if (!force && !io_op_defs[req->opcode].needs_async_data)
Jens Axboe74566df2020-01-13 19:23:24 -07003337 return 0;
Jens Axboee8c2bc12020-08-15 18:44:09 -07003338 if (!req->async_data) {
Pavel Begunkov6bf985d2021-02-04 13:52:01 +00003339 if (__io_alloc_async_data(req)) {
3340 kfree(iovec);
Jens Axboe5d204bc2020-01-31 12:06:52 -07003341 return -ENOMEM;
Pavel Begunkov6bf985d2021-02-04 13:52:01 +00003342 }
Jens Axboeb7bb4f72019-12-15 22:13:43 -07003343
Jens Axboeff6165b2020-08-13 09:47:43 -06003344 io_req_map_rw(req, iovec, fast_iov, iter);
Jens Axboe5d204bc2020-01-31 12:06:52 -07003345 }
Jens Axboeb7bb4f72019-12-15 22:13:43 -07003346 return 0;
Jens Axboef67676d2019-12-02 11:03:47 -07003347}
3348
Pavel Begunkov73debe62020-09-30 22:57:54 +03003349static inline int io_rw_prep_async(struct io_kiocb *req, int rw)
Pavel Begunkovc3e330a2020-07-13 22:59:19 +03003350{
Jens Axboee8c2bc12020-08-15 18:44:09 -07003351 struct io_async_rw *iorw = req->async_data;
Pavel Begunkovf4bff102020-09-06 00:45:45 +03003352 struct iovec *iov = iorw->fast_iov;
Pavel Begunkov847595d2021-02-04 13:52:06 +00003353 int ret;
Pavel Begunkovc3e330a2020-07-13 22:59:19 +03003354
Pavel Begunkov2846c482020-11-07 13:16:27 +00003355 ret = io_import_iovec(rw, req, &iov, &iorw->iter, false);
Pavel Begunkovc3e330a2020-07-13 22:59:19 +03003356 if (unlikely(ret < 0))
3357 return ret;
3358
Pavel Begunkovab0b1962020-09-06 00:45:47 +03003359 iorw->bytes_done = 0;
3360 iorw->free_iovec = iov;
3361 if (iov)
3362 req->flags |= REQ_F_NEED_CLEANUP;
Pavel Begunkovc3e330a2020-07-13 22:59:19 +03003363 return 0;
3364}
3365
Pavel Begunkov73debe62020-09-30 22:57:54 +03003366static int io_read_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboef67676d2019-12-02 11:03:47 -07003367{
3368 ssize_t ret;
3369
Pavel Begunkova88fc402020-09-30 22:57:53 +03003370 ret = io_prep_rw(req, sqe);
Jens Axboe3529d8c2019-12-19 18:24:38 -07003371 if (ret)
3372 return ret;
Jens Axboef67676d2019-12-02 11:03:47 -07003373
Jens Axboe3529d8c2019-12-19 18:24:38 -07003374 if (unlikely(!(req->file->f_mode & FMODE_READ)))
3375 return -EBADF;
Jens Axboef67676d2019-12-02 11:03:47 -07003376
Pavel Begunkov5f798be2020-02-08 13:28:02 +03003377 /* either don't need iovec imported or already have it */
Pavel Begunkov2d199892020-09-30 22:57:35 +03003378 if (!req->async_data)
Jens Axboe3529d8c2019-12-19 18:24:38 -07003379 return 0;
Pavel Begunkov73debe62020-09-30 22:57:54 +03003380 return io_rw_prep_async(req, READ);
Jens Axboef67676d2019-12-02 11:03:47 -07003381}
3382
Jens Axboec1dd91d2020-08-03 16:43:59 -06003383/*
3384 * This is our waitqueue callback handler, registered through lock_page_async()
3385 * when we initially tried to do the IO with the iocb armed our waitqueue.
3386 * This gets called when the page is unlocked, and we generally expect that to
3387 * happen when the page IO is completed and the page is now uptodate. This will
3388 * queue a task_work based retry of the operation, attempting to copy the data
3389 * again. If the latter fails because the page was NOT uptodate, then we will
3390 * do a thread based blocking retry of the operation. That's the unexpected
3391 * slow path.
3392 */
Jens Axboebcf5a062020-05-22 09:24:42 -06003393static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode,
3394 int sync, void *arg)
3395{
3396 struct wait_page_queue *wpq;
3397 struct io_kiocb *req = wait->private;
Jens Axboebcf5a062020-05-22 09:24:42 -06003398 struct wait_page_key *key = arg;
Jens Axboebcf5a062020-05-22 09:24:42 -06003399 int ret;
3400
3401 wpq = container_of(wait, struct wait_page_queue, wait);
3402
Linus Torvaldscdc8fcb2020-08-03 13:01:22 -07003403 if (!wake_page_match(wpq, key))
3404 return 0;
3405
Hao Xuc8d317a2020-09-29 20:00:45 +08003406 req->rw.kiocb.ki_flags &= ~IOCB_WAITQ;
Jens Axboebcf5a062020-05-22 09:24:42 -06003407 list_del_init(&wait->entry);
3408
Pavel Begunkove7375122020-07-12 20:42:04 +03003409 init_task_work(&req->task_work, io_req_task_submit);
Jens Axboe6d816e02020-08-11 08:04:14 -06003410 percpu_ref_get(&req->ctx->refs);
3411
Jens Axboebcf5a062020-05-22 09:24:42 -06003412 /* submit ref gets dropped, acquire a new one */
3413 refcount_inc(&req->refs);
Jens Axboe355fb9e2020-10-22 20:19:35 -06003414 ret = io_req_task_work_add(req);
Pavel Begunkoveab30c42021-01-19 13:32:42 +00003415 if (unlikely(ret))
3416 io_req_task_work_add_fallback(req, io_req_task_cancel);
Jens Axboebcf5a062020-05-22 09:24:42 -06003417 return 1;
3418}
3419
Jens Axboec1dd91d2020-08-03 16:43:59 -06003420/*
3421 * This controls whether a given IO request should be armed for async page
3422 * based retry. If we return false here, the request is handed to the async
3423 * worker threads for retry. If we're doing buffered reads on a regular file,
3424 * we prepare a private wait_page_queue entry and retry the operation. This
3425 * will either succeed because the page is now uptodate and unlocked, or it
3426 * will register a callback when the page is unlocked at IO completion. Through
3427 * that callback, io_uring uses task_work to setup a retry of the operation.
3428 * That retry will attempt the buffered read again. The retry will generally
3429 * succeed, or in rare cases where it fails, we then fall back to using the
3430 * async worker threads for a blocking retry.
3431 */
Jens Axboe227c0c92020-08-13 11:51:40 -06003432static bool io_rw_should_retry(struct io_kiocb *req)
Jens Axboebcf5a062020-05-22 09:24:42 -06003433{
Jens Axboee8c2bc12020-08-15 18:44:09 -07003434 struct io_async_rw *rw = req->async_data;
3435 struct wait_page_queue *wait = &rw->wpq;
Jens Axboebcf5a062020-05-22 09:24:42 -06003436 struct kiocb *kiocb = &req->rw.kiocb;
Jens Axboebcf5a062020-05-22 09:24:42 -06003437
3438 /* never retry for NOWAIT, we just complete with -EAGAIN */
3439 if (req->flags & REQ_F_NOWAIT)
3440 return false;
3441
Jens Axboe227c0c92020-08-13 11:51:40 -06003442 /* Only for buffered IO */
Jens Axboe3b2a4432020-08-16 10:58:43 -07003443 if (kiocb->ki_flags & (IOCB_DIRECT | IOCB_HIPRI))
Jens Axboebcf5a062020-05-22 09:24:42 -06003444 return false;
Jens Axboe3b2a4432020-08-16 10:58:43 -07003445
Jens Axboebcf5a062020-05-22 09:24:42 -06003446 /*
3447 * just use poll if we can, and don't attempt if the fs doesn't
3448 * support callback based unlocks
3449 */
3450 if (file_can_poll(req->file) || !(req->file->f_mode & FMODE_BUF_RASYNC))
3451 return false;
3452
Jens Axboe3b2a4432020-08-16 10:58:43 -07003453 wait->wait.func = io_async_buf_func;
3454 wait->wait.private = req;
3455 wait->wait.flags = 0;
3456 INIT_LIST_HEAD(&wait->wait.entry);
3457 kiocb->ki_flags |= IOCB_WAITQ;
Hao Xuc8d317a2020-09-29 20:00:45 +08003458 kiocb->ki_flags &= ~IOCB_NOWAIT;
Jens Axboe3b2a4432020-08-16 10:58:43 -07003459 kiocb->ki_waitq = wait;
Jens Axboe3b2a4432020-08-16 10:58:43 -07003460 return true;
Jens Axboebcf5a062020-05-22 09:24:42 -06003461}
3462
3463static int io_iter_do_read(struct io_kiocb *req, struct iov_iter *iter)
3464{
3465 if (req->file->f_op->read_iter)
3466 return call_read_iter(req->file, &req->rw.kiocb, iter);
Guoyu Huang2dd21112020-08-05 03:53:50 -07003467 else if (req->file->f_op->read)
Jens Axboe4017eb92020-10-22 14:14:12 -06003468 return loop_rw_iter(READ, req, iter);
Guoyu Huang2dd21112020-08-05 03:53:50 -07003469 else
3470 return -EINVAL;
Jens Axboebcf5a062020-05-22 09:24:42 -06003471}
3472
Pavel Begunkov889fca72021-02-10 00:03:09 +00003473static int io_read(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe2b188cc2019-01-07 10:46:33 -07003474{
3475 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
Jens Axboe9adbd452019-12-20 08:45:55 -07003476 struct kiocb *kiocb = &req->rw.kiocb;
Jens Axboeff6165b2020-08-13 09:47:43 -06003477 struct iov_iter __iter, *iter = &__iter;
Jens Axboee8c2bc12020-08-15 18:44:09 -07003478 struct io_async_rw *rw = req->async_data;
Jens Axboe227c0c92020-08-13 11:51:40 -06003479 ssize_t io_size, ret, ret2;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003480 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003481
Pavel Begunkov2846c482020-11-07 13:16:27 +00003482 if (rw) {
Jens Axboee8c2bc12020-08-15 18:44:09 -07003483 iter = &rw->iter;
Pavel Begunkov2846c482020-11-07 13:16:27 +00003484 iovec = NULL;
3485 } else {
3486 ret = io_import_iovec(READ, req, &iovec, iter, !force_nonblock);
3487 if (ret < 0)
3488 return ret;
3489 }
Pavel Begunkov632546c2020-11-07 13:16:26 +00003490 io_size = iov_iter_count(iter);
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003491 req->result = io_size;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003492
Jens Axboefd6c2e42019-12-18 12:19:41 -07003493 /* Ensure we clear previously set non-block flag */
3494 if (!force_nonblock)
Jens Axboe29de5f62020-02-20 09:56:08 -07003495 kiocb->ki_flags &= ~IOCB_NOWAIT;
Pavel Begunkova88fc402020-09-30 22:57:53 +03003496 else
3497 kiocb->ki_flags |= IOCB_NOWAIT;
3498
Pavel Begunkov24c74672020-06-21 13:09:51 +03003499 /* If the file doesn't support async, just async punt */
Pavel Begunkov6713e7a2021-02-04 13:51:59 +00003500 if (force_nonblock && !io_file_supports_async(req->file, READ)) {
3501 ret = io_setup_async_rw(req, iovec, inline_vecs, iter, true);
Pavel Begunkov6bf985d2021-02-04 13:52:01 +00003502 return ret ?: -EAGAIN;
Pavel Begunkov6713e7a2021-02-04 13:51:59 +00003503 }
Jens Axboe9e645e112019-05-10 16:07:28 -06003504
Pavel Begunkov632546c2020-11-07 13:16:26 +00003505 ret = rw_verify_area(READ, req->file, io_kiocb_ppos(kiocb), io_size);
Pavel Begunkov5ea5dd42021-02-04 13:52:03 +00003506 if (unlikely(ret)) {
3507 kfree(iovec);
3508 return ret;
3509 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07003510
Jens Axboe227c0c92020-08-13 11:51:40 -06003511 ret = io_iter_do_read(req, iter);
Jens Axboe32960612019-09-23 11:05:34 -06003512
Pavel Begunkov57cd6572021-02-01 18:59:56 +00003513 if (ret == -EIOCBQUEUED) {
Pavel Begunkov5ea5dd42021-02-04 13:52:03 +00003514 /* it's faster to check here then delegate to kfree */
3515 if (iovec)
3516 kfree(iovec);
3517 return 0;
Jens Axboe227c0c92020-08-13 11:51:40 -06003518 } else if (ret == -EAGAIN) {
Jens Axboeeefdf302020-08-27 16:40:19 -06003519 /* IOPOLL retry should happen for io-wq threads */
3520 if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboef91daf52020-08-15 15:58:42 -07003521 goto done;
Pavel Begunkov75c668c2021-02-04 13:52:05 +00003522 /* no retry on NONBLOCK nor RWF_NOWAIT */
3523 if (req->flags & REQ_F_NOWAIT)
Jens Axboe355afae2020-09-02 09:30:31 -06003524 goto done;
Jens Axboe84216312020-08-24 11:45:26 -06003525 /* some cases will consume bytes even on error returns */
Pavel Begunkov632546c2020-11-07 13:16:26 +00003526 iov_iter_revert(iter, io_size - iov_iter_count(iter));
Jens Axboef38c7e32020-09-25 15:23:43 -06003527 ret = 0;
Pavel Begunkov7335e3b2021-02-04 13:52:02 +00003528 } else if (ret <= 0 || ret == io_size || !force_nonblock ||
Pavel Begunkov75c668c2021-02-04 13:52:05 +00003529 (req->flags & REQ_F_NOWAIT) || !(req->flags & REQ_F_ISREG)) {
Pavel Begunkov7335e3b2021-02-04 13:52:02 +00003530 /* read all, failed, already did sync or don't want to retry */
Jens Axboe00d23d52020-08-25 12:59:22 -06003531 goto done;
Jens Axboe227c0c92020-08-13 11:51:40 -06003532 }
3533
Jens Axboe227c0c92020-08-13 11:51:40 -06003534 ret2 = io_setup_async_rw(req, iovec, inline_vecs, iter, true);
Pavel Begunkov6bf985d2021-02-04 13:52:01 +00003535 if (ret2)
3536 return ret2;
3537
Jens Axboee8c2bc12020-08-15 18:44:09 -07003538 rw = req->async_data;
Jens Axboe227c0c92020-08-13 11:51:40 -06003539 /* now use our persistent iterator, if we aren't already */
Jens Axboee8c2bc12020-08-15 18:44:09 -07003540 iter = &rw->iter;
Jens Axboe227c0c92020-08-13 11:51:40 -06003541
Pavel Begunkovb23df912021-02-04 13:52:04 +00003542 do {
3543 io_size -= ret;
3544 rw->bytes_done += ret;
3545 /* if we can retry, do so with the callbacks armed */
3546 if (!io_rw_should_retry(req)) {
3547 kiocb->ki_flags &= ~IOCB_WAITQ;
3548 return -EAGAIN;
3549 }
3550
3551 /*
3552 * Now retry read with the IOCB_WAITQ parts set in the iocb. If
3553 * we get -EIOCBQUEUED, then we'll get a notification when the
3554 * desired page gets unlocked. We can also get a partial read
3555 * here, and if we do, then just retry at the new offset.
3556 */
3557 ret = io_iter_do_read(req, iter);
3558 if (ret == -EIOCBQUEUED)
3559 return 0;
3560 /* we got some bytes, but not all. retry. */
3561 } while (ret > 0 && ret < io_size);
Jens Axboe227c0c92020-08-13 11:51:40 -06003562done:
Pavel Begunkov889fca72021-02-10 00:03:09 +00003563 kiocb_done(kiocb, ret, issue_flags);
Pavel Begunkov5ea5dd42021-02-04 13:52:03 +00003564 return 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003565}
3566
Pavel Begunkov73debe62020-09-30 22:57:54 +03003567static int io_write_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboef67676d2019-12-02 11:03:47 -07003568{
3569 ssize_t ret;
3570
Pavel Begunkova88fc402020-09-30 22:57:53 +03003571 ret = io_prep_rw(req, sqe);
Jens Axboe3529d8c2019-12-19 18:24:38 -07003572 if (ret)
3573 return ret;
Jens Axboef67676d2019-12-02 11:03:47 -07003574
Jens Axboe3529d8c2019-12-19 18:24:38 -07003575 if (unlikely(!(req->file->f_mode & FMODE_WRITE)))
3576 return -EBADF;
Jens Axboef67676d2019-12-02 11:03:47 -07003577
Pavel Begunkov5f798be2020-02-08 13:28:02 +03003578 /* either don't need iovec imported or already have it */
Pavel Begunkov2d199892020-09-30 22:57:35 +03003579 if (!req->async_data)
Jens Axboe3529d8c2019-12-19 18:24:38 -07003580 return 0;
Pavel Begunkov73debe62020-09-30 22:57:54 +03003581 return io_rw_prep_async(req, WRITE);
Jens Axboef67676d2019-12-02 11:03:47 -07003582}
3583
Pavel Begunkov889fca72021-02-10 00:03:09 +00003584static int io_write(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe2b188cc2019-01-07 10:46:33 -07003585{
3586 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
Jens Axboe9adbd452019-12-20 08:45:55 -07003587 struct kiocb *kiocb = &req->rw.kiocb;
Jens Axboeff6165b2020-08-13 09:47:43 -06003588 struct iov_iter __iter, *iter = &__iter;
Jens Axboee8c2bc12020-08-15 18:44:09 -07003589 struct io_async_rw *rw = req->async_data;
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003590 ssize_t ret, ret2, io_size;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003591 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003592
Pavel Begunkov2846c482020-11-07 13:16:27 +00003593 if (rw) {
Jens Axboee8c2bc12020-08-15 18:44:09 -07003594 iter = &rw->iter;
Pavel Begunkov2846c482020-11-07 13:16:27 +00003595 iovec = NULL;
3596 } else {
3597 ret = io_import_iovec(WRITE, req, &iovec, iter, !force_nonblock);
3598 if (ret < 0)
3599 return ret;
3600 }
Pavel Begunkov632546c2020-11-07 13:16:26 +00003601 io_size = iov_iter_count(iter);
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003602 req->result = io_size;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003603
Jens Axboefd6c2e42019-12-18 12:19:41 -07003604 /* Ensure we clear previously set non-block flag */
3605 if (!force_nonblock)
Pavel Begunkova88fc402020-09-30 22:57:53 +03003606 kiocb->ki_flags &= ~IOCB_NOWAIT;
3607 else
3608 kiocb->ki_flags |= IOCB_NOWAIT;
Jens Axboefd6c2e42019-12-18 12:19:41 -07003609
Pavel Begunkov24c74672020-06-21 13:09:51 +03003610 /* If the file doesn't support async, just async punt */
Jens Axboeaf197f52020-04-28 13:15:06 -06003611 if (force_nonblock && !io_file_supports_async(req->file, WRITE))
Jens Axboef67676d2019-12-02 11:03:47 -07003612 goto copy_iov;
Jens Axboef67676d2019-12-02 11:03:47 -07003613
Jens Axboe10d59342019-12-09 20:16:22 -07003614 /* file path doesn't support NOWAIT for non-direct_IO */
3615 if (force_nonblock && !(kiocb->ki_flags & IOCB_DIRECT) &&
3616 (req->flags & REQ_F_ISREG))
Jens Axboef67676d2019-12-02 11:03:47 -07003617 goto copy_iov;
Jens Axboe9e645e112019-05-10 16:07:28 -06003618
Pavel Begunkov632546c2020-11-07 13:16:26 +00003619 ret = rw_verify_area(WRITE, req->file, io_kiocb_ppos(kiocb), io_size);
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003620 if (unlikely(ret))
3621 goto out_free;
Roman Penyaev9bf79332019-03-25 20:09:24 +01003622
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003623 /*
3624 * Open-code file_start_write here to grab freeze protection,
3625 * which will be released by another thread in
3626 * io_complete_rw(). Fool lockdep by telling it the lock got
3627 * released so that it doesn't complain about the held lock when
3628 * we return to userspace.
3629 */
3630 if (req->flags & REQ_F_ISREG) {
Darrick J. Wong8a3c84b2020-11-10 16:50:21 -08003631 sb_start_write(file_inode(req->file)->i_sb);
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003632 __sb_writers_release(file_inode(req->file)->i_sb,
3633 SB_FREEZE_WRITE);
3634 }
3635 kiocb->ki_flags |= IOCB_WRITE;
Roman Penyaev9bf79332019-03-25 20:09:24 +01003636
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003637 if (req->file->f_op->write_iter)
Jens Axboeff6165b2020-08-13 09:47:43 -06003638 ret2 = call_write_iter(req->file, kiocb, iter);
Guoyu Huang2dd21112020-08-05 03:53:50 -07003639 else if (req->file->f_op->write)
Jens Axboe4017eb92020-10-22 14:14:12 -06003640 ret2 = loop_rw_iter(WRITE, req, iter);
Guoyu Huang2dd21112020-08-05 03:53:50 -07003641 else
3642 ret2 = -EINVAL;
Jens Axboe4ed734b2020-03-20 11:23:41 -06003643
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003644 /*
3645 * Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just
3646 * retry them without IOCB_NOWAIT.
3647 */
3648 if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT))
3649 ret2 = -EAGAIN;
Pavel Begunkov75c668c2021-02-04 13:52:05 +00003650 /* no retry on NONBLOCK nor RWF_NOWAIT */
3651 if (ret2 == -EAGAIN && (req->flags & REQ_F_NOWAIT))
Jens Axboe355afae2020-09-02 09:30:31 -06003652 goto done;
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003653 if (!force_nonblock || ret2 != -EAGAIN) {
Jens Axboeeefdf302020-08-27 16:40:19 -06003654 /* IOPOLL retry should happen for io-wq threads */
3655 if ((req->ctx->flags & IORING_SETUP_IOPOLL) && ret2 == -EAGAIN)
3656 goto copy_iov;
Jens Axboe355afae2020-09-02 09:30:31 -06003657done:
Pavel Begunkov889fca72021-02-10 00:03:09 +00003658 kiocb_done(kiocb, ret2, issue_flags);
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003659 } else {
Jens Axboef67676d2019-12-02 11:03:47 -07003660copy_iov:
Jens Axboe84216312020-08-24 11:45:26 -06003661 /* some cases will consume bytes even on error returns */
Pavel Begunkov632546c2020-11-07 13:16:26 +00003662 iov_iter_revert(iter, io_size - iov_iter_count(iter));
Jens Axboe227c0c92020-08-13 11:51:40 -06003663 ret = io_setup_async_rw(req, iovec, inline_vecs, iter, false);
Pavel Begunkov6bf985d2021-02-04 13:52:01 +00003664 return ret ?: -EAGAIN;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003665 }
Jens Axboe31b51512019-01-18 22:56:34 -07003666out_free:
Pavel Begunkovf261c162020-08-20 11:34:10 +03003667 /* it's reportedly faster than delegating the null check to kfree() */
Pavel Begunkov252917c2020-07-13 22:59:20 +03003668 if (iovec)
Xiaoguang Wang6f2cc162020-06-18 15:01:56 +08003669 kfree(iovec);
Jens Axboe2b188cc2019-01-07 10:46:33 -07003670 return ret;
3671}
3672
Jens Axboe80a261f2020-09-28 14:23:58 -06003673static int io_renameat_prep(struct io_kiocb *req,
3674 const struct io_uring_sqe *sqe)
3675{
3676 struct io_rename *ren = &req->rename;
3677 const char __user *oldf, *newf;
3678
3679 if (unlikely(req->flags & REQ_F_FIXED_FILE))
3680 return -EBADF;
3681
3682 ren->old_dfd = READ_ONCE(sqe->fd);
3683 oldf = u64_to_user_ptr(READ_ONCE(sqe->addr));
3684 newf = u64_to_user_ptr(READ_ONCE(sqe->addr2));
3685 ren->new_dfd = READ_ONCE(sqe->len);
3686 ren->flags = READ_ONCE(sqe->rename_flags);
3687
3688 ren->oldpath = getname(oldf);
3689 if (IS_ERR(ren->oldpath))
3690 return PTR_ERR(ren->oldpath);
3691
3692 ren->newpath = getname(newf);
3693 if (IS_ERR(ren->newpath)) {
3694 putname(ren->oldpath);
3695 return PTR_ERR(ren->newpath);
3696 }
3697
3698 req->flags |= REQ_F_NEED_CLEANUP;
3699 return 0;
3700}
3701
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003702static int io_renameat(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe80a261f2020-09-28 14:23:58 -06003703{
3704 struct io_rename *ren = &req->rename;
3705 int ret;
3706
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003707 if (issue_flags & IO_URING_F_NONBLOCK)
Jens Axboe80a261f2020-09-28 14:23:58 -06003708 return -EAGAIN;
3709
3710 ret = do_renameat2(ren->old_dfd, ren->oldpath, ren->new_dfd,
3711 ren->newpath, ren->flags);
3712
3713 req->flags &= ~REQ_F_NEED_CLEANUP;
3714 if (ret < 0)
3715 req_set_fail_links(req);
3716 io_req_complete(req, ret);
3717 return 0;
3718}
3719
Jens Axboe14a11432020-09-28 14:27:37 -06003720static int io_unlinkat_prep(struct io_kiocb *req,
3721 const struct io_uring_sqe *sqe)
3722{
3723 struct io_unlink *un = &req->unlink;
3724 const char __user *fname;
3725
3726 if (unlikely(req->flags & REQ_F_FIXED_FILE))
3727 return -EBADF;
3728
3729 un->dfd = READ_ONCE(sqe->fd);
3730
3731 un->flags = READ_ONCE(sqe->unlink_flags);
3732 if (un->flags & ~AT_REMOVEDIR)
3733 return -EINVAL;
3734
3735 fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
3736 un->filename = getname(fname);
3737 if (IS_ERR(un->filename))
3738 return PTR_ERR(un->filename);
3739
3740 req->flags |= REQ_F_NEED_CLEANUP;
3741 return 0;
3742}
3743
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003744static int io_unlinkat(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe14a11432020-09-28 14:27:37 -06003745{
3746 struct io_unlink *un = &req->unlink;
3747 int ret;
3748
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003749 if (issue_flags & IO_URING_F_NONBLOCK)
Jens Axboe14a11432020-09-28 14:27:37 -06003750 return -EAGAIN;
3751
3752 if (un->flags & AT_REMOVEDIR)
3753 ret = do_rmdir(un->dfd, un->filename);
3754 else
3755 ret = do_unlinkat(un->dfd, un->filename);
3756
3757 req->flags &= ~REQ_F_NEED_CLEANUP;
3758 if (ret < 0)
3759 req_set_fail_links(req);
3760 io_req_complete(req, ret);
3761 return 0;
3762}
3763
Jens Axboe36f4fa62020-09-05 11:14:22 -06003764static int io_shutdown_prep(struct io_kiocb *req,
3765 const struct io_uring_sqe *sqe)
3766{
3767#if defined(CONFIG_NET)
3768 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3769 return -EINVAL;
3770 if (sqe->ioprio || sqe->off || sqe->addr || sqe->rw_flags ||
3771 sqe->buf_index)
3772 return -EINVAL;
3773
3774 req->shutdown.how = READ_ONCE(sqe->len);
3775 return 0;
3776#else
3777 return -EOPNOTSUPP;
3778#endif
3779}
3780
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003781static int io_shutdown(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe36f4fa62020-09-05 11:14:22 -06003782{
3783#if defined(CONFIG_NET)
3784 struct socket *sock;
3785 int ret;
3786
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003787 if (issue_flags & IO_URING_F_NONBLOCK)
Jens Axboe36f4fa62020-09-05 11:14:22 -06003788 return -EAGAIN;
3789
Linus Torvalds48aba792020-12-16 12:44:05 -08003790 sock = sock_from_file(req->file);
Jens Axboe36f4fa62020-09-05 11:14:22 -06003791 if (unlikely(!sock))
Linus Torvalds48aba792020-12-16 12:44:05 -08003792 return -ENOTSOCK;
Jens Axboe36f4fa62020-09-05 11:14:22 -06003793
3794 ret = __sys_shutdown_sock(sock, req->shutdown.how);
Jens Axboea1464682020-12-14 20:57:27 -07003795 if (ret < 0)
3796 req_set_fail_links(req);
Jens Axboe36f4fa62020-09-05 11:14:22 -06003797 io_req_complete(req, ret);
3798 return 0;
3799#else
3800 return -EOPNOTSUPP;
3801#endif
3802}
3803
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03003804static int __io_splice_prep(struct io_kiocb *req,
3805 const struct io_uring_sqe *sqe)
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003806{
3807 struct io_splice* sp = &req->splice;
3808 unsigned int valid_flags = SPLICE_F_FD_IN_FIXED | SPLICE_F_ALL;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003809
Pavel Begunkov3232dd02020-06-03 18:03:22 +03003810 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3811 return -EINVAL;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003812
3813 sp->file_in = NULL;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003814 sp->len = READ_ONCE(sqe->len);
3815 sp->flags = READ_ONCE(sqe->splice_flags);
3816
3817 if (unlikely(sp->flags & ~valid_flags))
3818 return -EINVAL;
3819
Pavel Begunkov8371adf2020-10-10 18:34:08 +01003820 sp->file_in = io_file_get(NULL, req, READ_ONCE(sqe->splice_fd_in),
3821 (sp->flags & SPLICE_F_FD_IN_FIXED));
3822 if (!sp->file_in)
3823 return -EBADF;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003824 req->flags |= REQ_F_NEED_CLEANUP;
3825
Xiaoguang Wang7cdaf582020-06-10 19:41:19 +08003826 if (!S_ISREG(file_inode(sp->file_in)->i_mode)) {
3827 /*
3828 * Splice operation will be punted aync, and here need to
3829 * modify io_wq_work.flags, so initialize io_wq_work firstly.
3830 */
3831 io_req_init_async(req);
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003832 req->work.flags |= IO_WQ_WORK_UNBOUND;
Xiaoguang Wang7cdaf582020-06-10 19:41:19 +08003833 }
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003834
3835 return 0;
3836}
3837
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03003838static int io_tee_prep(struct io_kiocb *req,
3839 const struct io_uring_sqe *sqe)
3840{
3841 if (READ_ONCE(sqe->splice_off_in) || READ_ONCE(sqe->off))
3842 return -EINVAL;
3843 return __io_splice_prep(req, sqe);
3844}
3845
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003846static int io_tee(struct io_kiocb *req, unsigned int issue_flags)
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03003847{
3848 struct io_splice *sp = &req->splice;
3849 struct file *in = sp->file_in;
3850 struct file *out = sp->file_out;
3851 unsigned int flags = sp->flags & ~SPLICE_F_FD_IN_FIXED;
3852 long ret = 0;
3853
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003854 if (issue_flags & IO_URING_F_NONBLOCK)
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03003855 return -EAGAIN;
3856 if (sp->len)
3857 ret = do_tee(in, out, sp->len, flags);
3858
3859 io_put_file(req, in, (sp->flags & SPLICE_F_FD_IN_FIXED));
3860 req->flags &= ~REQ_F_NEED_CLEANUP;
3861
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03003862 if (ret != sp->len)
3863 req_set_fail_links(req);
Jens Axboee1e16092020-06-22 09:17:17 -06003864 io_req_complete(req, ret);
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03003865 return 0;
3866}
3867
3868static int io_splice_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3869{
3870 struct io_splice* sp = &req->splice;
3871
3872 sp->off_in = READ_ONCE(sqe->splice_off_in);
3873 sp->off_out = READ_ONCE(sqe->off);
3874 return __io_splice_prep(req, sqe);
3875}
3876
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003877static int io_splice(struct io_kiocb *req, unsigned int issue_flags)
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003878{
3879 struct io_splice *sp = &req->splice;
3880 struct file *in = sp->file_in;
3881 struct file *out = sp->file_out;
3882 unsigned int flags = sp->flags & ~SPLICE_F_FD_IN_FIXED;
3883 loff_t *poff_in, *poff_out;
Pavel Begunkovc9687422020-05-04 23:00:54 +03003884 long ret = 0;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003885
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003886 if (issue_flags & IO_URING_F_NONBLOCK)
Pavel Begunkov2fb3e822020-05-01 17:09:38 +03003887 return -EAGAIN;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003888
3889 poff_in = (sp->off_in == -1) ? NULL : &sp->off_in;
3890 poff_out = (sp->off_out == -1) ? NULL : &sp->off_out;
Pavel Begunkovc9687422020-05-04 23:00:54 +03003891
Jens Axboe948a7742020-05-17 14:21:38 -06003892 if (sp->len)
Pavel Begunkovc9687422020-05-04 23:00:54 +03003893 ret = do_splice(in, poff_in, out, poff_out, sp->len, flags);
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003894
3895 io_put_file(req, in, (sp->flags & SPLICE_F_FD_IN_FIXED));
3896 req->flags &= ~REQ_F_NEED_CLEANUP;
3897
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003898 if (ret != sp->len)
3899 req_set_fail_links(req);
Jens Axboee1e16092020-06-22 09:17:17 -06003900 io_req_complete(req, ret);
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003901 return 0;
3902}
3903
Jens Axboe2b188cc2019-01-07 10:46:33 -07003904/*
3905 * IORING_OP_NOP just posts a completion event, nothing else.
3906 */
Pavel Begunkov889fca72021-02-10 00:03:09 +00003907static int io_nop(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe2b188cc2019-01-07 10:46:33 -07003908{
3909 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003910
Jens Axboedef596e2019-01-09 08:59:42 -07003911 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
3912 return -EINVAL;
3913
Pavel Begunkov889fca72021-02-10 00:03:09 +00003914 __io_req_complete(req, issue_flags, 0, 0);
Jens Axboe2b188cc2019-01-07 10:46:33 -07003915 return 0;
3916}
3917
Jens Axboe3529d8c2019-12-19 18:24:38 -07003918static int io_prep_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Christoph Hellwigc992fe22019-01-11 09:43:02 -07003919{
Jens Axboe6b063142019-01-10 22:13:58 -07003920 struct io_ring_ctx *ctx = req->ctx;
Christoph Hellwigc992fe22019-01-11 09:43:02 -07003921
Jens Axboe09bb8392019-03-13 12:39:28 -06003922 if (!req->file)
3923 return -EBADF;
Christoph Hellwigc992fe22019-01-11 09:43:02 -07003924
Jens Axboe6b063142019-01-10 22:13:58 -07003925 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboedef596e2019-01-09 08:59:42 -07003926 return -EINVAL;
Jens Axboeedafcce2019-01-09 09:16:05 -07003927 if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index))
Christoph Hellwigc992fe22019-01-11 09:43:02 -07003928 return -EINVAL;
3929
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07003930 req->sync.flags = READ_ONCE(sqe->fsync_flags);
3931 if (unlikely(req->sync.flags & ~IORING_FSYNC_DATASYNC))
3932 return -EINVAL;
3933
3934 req->sync.off = READ_ONCE(sqe->off);
3935 req->sync.len = READ_ONCE(sqe->len);
Christoph Hellwigc992fe22019-01-11 09:43:02 -07003936 return 0;
3937}
3938
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003939static int io_fsync(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe78912932020-01-14 22:09:06 -07003940{
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07003941 loff_t end = req->sync.off + req->sync.len;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07003942 int ret;
3943
Pavel Begunkovac45abc2020-06-08 21:08:18 +03003944 /* fsync always requires a blocking context */
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003945 if (issue_flags & IO_URING_F_NONBLOCK)
Pavel Begunkovac45abc2020-06-08 21:08:18 +03003946 return -EAGAIN;
3947
Jens Axboe9adbd452019-12-20 08:45:55 -07003948 ret = vfs_fsync_range(req->file, req->sync.off,
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07003949 end > 0 ? end : LLONG_MAX,
3950 req->sync.flags & IORING_FSYNC_DATASYNC);
3951 if (ret < 0)
3952 req_set_fail_links(req);
Jens Axboee1e16092020-06-22 09:17:17 -06003953 io_req_complete(req, ret);
Christoph Hellwigc992fe22019-01-11 09:43:02 -07003954 return 0;
3955}
3956
Jens Axboed63d1b52019-12-10 10:38:56 -07003957static int io_fallocate_prep(struct io_kiocb *req,
3958 const struct io_uring_sqe *sqe)
3959{
3960 if (sqe->ioprio || sqe->buf_index || sqe->rw_flags)
3961 return -EINVAL;
Pavel Begunkov3232dd02020-06-03 18:03:22 +03003962 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3963 return -EINVAL;
Jens Axboed63d1b52019-12-10 10:38:56 -07003964
3965 req->sync.off = READ_ONCE(sqe->off);
3966 req->sync.len = READ_ONCE(sqe->addr);
3967 req->sync.mode = READ_ONCE(sqe->len);
3968 return 0;
3969}
3970
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003971static int io_fallocate(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboed63d1b52019-12-10 10:38:56 -07003972{
Pavel Begunkovac45abc2020-06-08 21:08:18 +03003973 int ret;
Jens Axboed63d1b52019-12-10 10:38:56 -07003974
Pavel Begunkovac45abc2020-06-08 21:08:18 +03003975 /* fallocate always requiring blocking context */
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003976 if (issue_flags & IO_URING_F_NONBLOCK)
Pavel Begunkovac45abc2020-06-08 21:08:18 +03003977 return -EAGAIN;
Pavel Begunkovac45abc2020-06-08 21:08:18 +03003978 ret = vfs_fallocate(req->file, req->sync.mode, req->sync.off,
3979 req->sync.len);
Pavel Begunkovac45abc2020-06-08 21:08:18 +03003980 if (ret < 0)
3981 req_set_fail_links(req);
Jens Axboee1e16092020-06-22 09:17:17 -06003982 io_req_complete(req, ret);
Jens Axboed63d1b52019-12-10 10:38:56 -07003983 return 0;
3984}
3985
Pavel Begunkovec65fea2020-06-03 18:03:24 +03003986static int __io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboe15b71ab2019-12-11 11:20:36 -07003987{
Jens Axboef8748882020-01-08 17:47:02 -07003988 const char __user *fname;
Jens Axboe15b71ab2019-12-11 11:20:36 -07003989 int ret;
3990
Pavel Begunkovec65fea2020-06-03 18:03:24 +03003991 if (unlikely(sqe->ioprio || sqe->buf_index))
Jens Axboe15b71ab2019-12-11 11:20:36 -07003992 return -EINVAL;
Pavel Begunkovec65fea2020-06-03 18:03:24 +03003993 if (unlikely(req->flags & REQ_F_FIXED_FILE))
Jens Axboecf3040c2020-02-06 21:31:40 -07003994 return -EBADF;
Jens Axboe15b71ab2019-12-11 11:20:36 -07003995
Pavel Begunkovec65fea2020-06-03 18:03:24 +03003996 /* open.how should be already initialised */
3997 if (!(req->open.how.flags & O_PATH) && force_o_largefile())
Jens Axboe08a1d26eb2020-04-08 09:20:54 -06003998 req->open.how.flags |= O_LARGEFILE;
Jens Axboe15b71ab2019-12-11 11:20:36 -07003999
Pavel Begunkov25e72d12020-06-03 18:03:23 +03004000 req->open.dfd = READ_ONCE(sqe->fd);
4001 fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
Jens Axboef8748882020-01-08 17:47:02 -07004002 req->open.filename = getname(fname);
Jens Axboe15b71ab2019-12-11 11:20:36 -07004003 if (IS_ERR(req->open.filename)) {
4004 ret = PTR_ERR(req->open.filename);
4005 req->open.filename = NULL;
4006 return ret;
4007 }
Jens Axboe4022e7a2020-03-19 19:23:18 -06004008 req->open.nofile = rlimit(RLIMIT_NOFILE);
Pavel Begunkov8fef80b2020-02-07 23:59:53 +03004009 req->flags |= REQ_F_NEED_CLEANUP;
Jens Axboe15b71ab2019-12-11 11:20:36 -07004010 return 0;
4011}
4012
Pavel Begunkovec65fea2020-06-03 18:03:24 +03004013static int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4014{
4015 u64 flags, mode;
4016
Jens Axboe14587a462020-09-05 11:36:08 -06004017 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboe4eb8dde2020-09-18 19:36:24 -06004018 return -EINVAL;
Pavel Begunkovec65fea2020-06-03 18:03:24 +03004019 mode = READ_ONCE(sqe->len);
4020 flags = READ_ONCE(sqe->open_flags);
4021 req->open.how = build_open_how(flags, mode);
4022 return __io_openat_prep(req, sqe);
4023}
4024
Jens Axboecebdb982020-01-08 17:59:24 -07004025static int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4026{
4027 struct open_how __user *how;
Jens Axboecebdb982020-01-08 17:59:24 -07004028 size_t len;
4029 int ret;
4030
Jens Axboe14587a462020-09-05 11:36:08 -06004031 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboe4eb8dde2020-09-18 19:36:24 -06004032 return -EINVAL;
Jens Axboecebdb982020-01-08 17:59:24 -07004033 how = u64_to_user_ptr(READ_ONCE(sqe->addr2));
4034 len = READ_ONCE(sqe->len);
Jens Axboecebdb982020-01-08 17:59:24 -07004035 if (len < OPEN_HOW_SIZE_VER0)
4036 return -EINVAL;
4037
4038 ret = copy_struct_from_user(&req->open.how, sizeof(req->open.how), how,
4039 len);
4040 if (ret)
4041 return ret;
4042
Pavel Begunkovec65fea2020-06-03 18:03:24 +03004043 return __io_openat_prep(req, sqe);
Jens Axboecebdb982020-01-08 17:59:24 -07004044}
4045
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004046static int io_openat2(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe15b71ab2019-12-11 11:20:36 -07004047{
4048 struct open_flags op;
Jens Axboe15b71ab2019-12-11 11:20:36 -07004049 struct file *file;
Jens Axboe3a81fd02020-12-10 12:25:36 -07004050 bool nonblock_set;
4051 bool resolve_nonblock;
Jens Axboe15b71ab2019-12-11 11:20:36 -07004052 int ret;
4053
Jens Axboecebdb982020-01-08 17:59:24 -07004054 ret = build_open_flags(&req->open.how, &op);
Jens Axboe15b71ab2019-12-11 11:20:36 -07004055 if (ret)
4056 goto err;
Jens Axboe3a81fd02020-12-10 12:25:36 -07004057 nonblock_set = op.open_flag & O_NONBLOCK;
4058 resolve_nonblock = req->open.how.resolve & RESOLVE_CACHED;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004059 if (issue_flags & IO_URING_F_NONBLOCK) {
Jens Axboe3a81fd02020-12-10 12:25:36 -07004060 /*
4061 * Don't bother trying for O_TRUNC, O_CREAT, or O_TMPFILE open,
4062 * it'll always -EAGAIN
4063 */
4064 if (req->open.how.flags & (O_TRUNC | O_CREAT | O_TMPFILE))
4065 return -EAGAIN;
4066 op.lookup_flags |= LOOKUP_CACHED;
4067 op.open_flag |= O_NONBLOCK;
4068 }
Jens Axboe15b71ab2019-12-11 11:20:36 -07004069
Jens Axboe4022e7a2020-03-19 19:23:18 -06004070 ret = __get_unused_fd_flags(req->open.how.flags, req->open.nofile);
Jens Axboe15b71ab2019-12-11 11:20:36 -07004071 if (ret < 0)
4072 goto err;
4073
4074 file = do_filp_open(req->open.dfd, req->open.filename, &op);
Jens Axboe3a81fd02020-12-10 12:25:36 -07004075 /* only retry if RESOLVE_CACHED wasn't already set by application */
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004076 if ((!resolve_nonblock && (issue_flags & IO_URING_F_NONBLOCK)) &&
4077 file == ERR_PTR(-EAGAIN)) {
Jens Axboe3a81fd02020-12-10 12:25:36 -07004078 /*
4079 * We could hang on to this 'fd', but seems like marginal
4080 * gain for something that is now known to be a slower path.
4081 * So just put it, and we'll get a new one when we retry.
4082 */
4083 put_unused_fd(ret);
4084 return -EAGAIN;
4085 }
4086
Jens Axboe15b71ab2019-12-11 11:20:36 -07004087 if (IS_ERR(file)) {
4088 put_unused_fd(ret);
4089 ret = PTR_ERR(file);
4090 } else {
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004091 if ((issue_flags & IO_URING_F_NONBLOCK) && !nonblock_set)
Jens Axboe3a81fd02020-12-10 12:25:36 -07004092 file->f_flags &= ~O_NONBLOCK;
Jens Axboe15b71ab2019-12-11 11:20:36 -07004093 fsnotify_open(file);
4094 fd_install(ret, file);
4095 }
4096err:
4097 putname(req->open.filename);
Pavel Begunkov8fef80b2020-02-07 23:59:53 +03004098 req->flags &= ~REQ_F_NEED_CLEANUP;
Jens Axboe15b71ab2019-12-11 11:20:36 -07004099 if (ret < 0)
4100 req_set_fail_links(req);
Jens Axboee1e16092020-06-22 09:17:17 -06004101 io_req_complete(req, ret);
Jens Axboe15b71ab2019-12-11 11:20:36 -07004102 return 0;
4103}
4104
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004105static int io_openat(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboecebdb982020-01-08 17:59:24 -07004106{
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004107 return io_openat2(req, issue_flags & IO_URING_F_NONBLOCK);
Jens Axboecebdb982020-01-08 17:59:24 -07004108}
4109
Jens Axboe067524e2020-03-02 16:32:28 -07004110static int io_remove_buffers_prep(struct io_kiocb *req,
4111 const struct io_uring_sqe *sqe)
4112{
4113 struct io_provide_buf *p = &req->pbuf;
4114 u64 tmp;
4115
4116 if (sqe->ioprio || sqe->rw_flags || sqe->addr || sqe->len || sqe->off)
4117 return -EINVAL;
4118
4119 tmp = READ_ONCE(sqe->fd);
4120 if (!tmp || tmp > USHRT_MAX)
4121 return -EINVAL;
4122
4123 memset(p, 0, sizeof(*p));
4124 p->nbufs = tmp;
4125 p->bgid = READ_ONCE(sqe->buf_group);
4126 return 0;
4127}
4128
4129static int __io_remove_buffers(struct io_ring_ctx *ctx, struct io_buffer *buf,
4130 int bgid, unsigned nbufs)
4131{
4132 unsigned i = 0;
4133
4134 /* shouldn't happen */
4135 if (!nbufs)
4136 return 0;
4137
4138 /* the head kbuf is the list itself */
4139 while (!list_empty(&buf->list)) {
4140 struct io_buffer *nxt;
4141
4142 nxt = list_first_entry(&buf->list, struct io_buffer, list);
4143 list_del(&nxt->list);
4144 kfree(nxt);
4145 if (++i == nbufs)
4146 return i;
4147 }
4148 i++;
4149 kfree(buf);
4150 idr_remove(&ctx->io_buffer_idr, bgid);
4151
4152 return i;
4153}
4154
Pavel Begunkov889fca72021-02-10 00:03:09 +00004155static int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe067524e2020-03-02 16:32:28 -07004156{
4157 struct io_provide_buf *p = &req->pbuf;
4158 struct io_ring_ctx *ctx = req->ctx;
4159 struct io_buffer *head;
4160 int ret = 0;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004161 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Jens Axboe067524e2020-03-02 16:32:28 -07004162
4163 io_ring_submit_lock(ctx, !force_nonblock);
4164
4165 lockdep_assert_held(&ctx->uring_lock);
4166
4167 ret = -ENOENT;
4168 head = idr_find(&ctx->io_buffer_idr, p->bgid);
4169 if (head)
4170 ret = __io_remove_buffers(ctx, head, p->bgid, p->nbufs);
Jens Axboe067524e2020-03-02 16:32:28 -07004171 if (ret < 0)
4172 req_set_fail_links(req);
Pavel Begunkov31bff9a2020-12-06 22:22:43 +00004173
4174 /* need to hold the lock to complete IOPOLL requests */
4175 if (ctx->flags & IORING_SETUP_IOPOLL) {
Pavel Begunkov889fca72021-02-10 00:03:09 +00004176 __io_req_complete(req, issue_flags, ret, 0);
Pavel Begunkov31bff9a2020-12-06 22:22:43 +00004177 io_ring_submit_unlock(ctx, !force_nonblock);
4178 } else {
4179 io_ring_submit_unlock(ctx, !force_nonblock);
Pavel Begunkov889fca72021-02-10 00:03:09 +00004180 __io_req_complete(req, issue_flags, ret, 0);
Pavel Begunkov31bff9a2020-12-06 22:22:43 +00004181 }
Jens Axboe067524e2020-03-02 16:32:28 -07004182 return 0;
4183}
4184
Jens Axboeddf0322d2020-02-23 16:41:33 -07004185static int io_provide_buffers_prep(struct io_kiocb *req,
4186 const struct io_uring_sqe *sqe)
4187{
4188 struct io_provide_buf *p = &req->pbuf;
4189 u64 tmp;
4190
4191 if (sqe->ioprio || sqe->rw_flags)
4192 return -EINVAL;
4193
4194 tmp = READ_ONCE(sqe->fd);
4195 if (!tmp || tmp > USHRT_MAX)
4196 return -E2BIG;
4197 p->nbufs = tmp;
4198 p->addr = READ_ONCE(sqe->addr);
4199 p->len = READ_ONCE(sqe->len);
4200
Bijan Mottahedehefe68c12020-06-04 18:01:52 -07004201 if (!access_ok(u64_to_user_ptr(p->addr), (p->len * p->nbufs)))
Jens Axboeddf0322d2020-02-23 16:41:33 -07004202 return -EFAULT;
4203
4204 p->bgid = READ_ONCE(sqe->buf_group);
4205 tmp = READ_ONCE(sqe->off);
4206 if (tmp > USHRT_MAX)
4207 return -E2BIG;
4208 p->bid = tmp;
4209 return 0;
4210}
4211
4212static int io_add_buffers(struct io_provide_buf *pbuf, struct io_buffer **head)
4213{
4214 struct io_buffer *buf;
4215 u64 addr = pbuf->addr;
4216 int i, bid = pbuf->bid;
4217
4218 for (i = 0; i < pbuf->nbufs; i++) {
4219 buf = kmalloc(sizeof(*buf), GFP_KERNEL);
4220 if (!buf)
4221 break;
4222
4223 buf->addr = addr;
4224 buf->len = pbuf->len;
4225 buf->bid = bid;
4226 addr += pbuf->len;
4227 bid++;
4228 if (!*head) {
4229 INIT_LIST_HEAD(&buf->list);
4230 *head = buf;
4231 } else {
4232 list_add_tail(&buf->list, &(*head)->list);
4233 }
4234 }
4235
4236 return i ? i : -ENOMEM;
4237}
4238
Pavel Begunkov889fca72021-02-10 00:03:09 +00004239static int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboeddf0322d2020-02-23 16:41:33 -07004240{
4241 struct io_provide_buf *p = &req->pbuf;
4242 struct io_ring_ctx *ctx = req->ctx;
4243 struct io_buffer *head, *list;
4244 int ret = 0;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004245 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Jens Axboeddf0322d2020-02-23 16:41:33 -07004246
4247 io_ring_submit_lock(ctx, !force_nonblock);
4248
4249 lockdep_assert_held(&ctx->uring_lock);
4250
4251 list = head = idr_find(&ctx->io_buffer_idr, p->bgid);
4252
4253 ret = io_add_buffers(p, &head);
4254 if (ret < 0)
4255 goto out;
4256
4257 if (!list) {
4258 ret = idr_alloc(&ctx->io_buffer_idr, head, p->bgid, p->bgid + 1,
4259 GFP_KERNEL);
4260 if (ret < 0) {
Jens Axboe067524e2020-03-02 16:32:28 -07004261 __io_remove_buffers(ctx, head, p->bgid, -1U);
Jens Axboeddf0322d2020-02-23 16:41:33 -07004262 goto out;
4263 }
4264 }
4265out:
Jens Axboeddf0322d2020-02-23 16:41:33 -07004266 if (ret < 0)
4267 req_set_fail_links(req);
Pavel Begunkov31bff9a2020-12-06 22:22:43 +00004268
4269 /* need to hold the lock to complete IOPOLL requests */
4270 if (ctx->flags & IORING_SETUP_IOPOLL) {
Pavel Begunkov889fca72021-02-10 00:03:09 +00004271 __io_req_complete(req, issue_flags, ret, 0);
Pavel Begunkov31bff9a2020-12-06 22:22:43 +00004272 io_ring_submit_unlock(ctx, !force_nonblock);
4273 } else {
4274 io_ring_submit_unlock(ctx, !force_nonblock);
Pavel Begunkov889fca72021-02-10 00:03:09 +00004275 __io_req_complete(req, issue_flags, ret, 0);
Pavel Begunkov31bff9a2020-12-06 22:22:43 +00004276 }
Jens Axboeddf0322d2020-02-23 16:41:33 -07004277 return 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07004278}
4279
Jens Axboe3e4827b2020-01-08 15:18:09 -07004280static int io_epoll_ctl_prep(struct io_kiocb *req,
4281 const struct io_uring_sqe *sqe)
4282{
4283#if defined(CONFIG_EPOLL)
4284 if (sqe->ioprio || sqe->buf_index)
4285 return -EINVAL;
Jens Axboe6ca56f82020-09-18 16:51:19 -06004286 if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL)))
Pavel Begunkov3232dd02020-06-03 18:03:22 +03004287 return -EINVAL;
Jens Axboe3e4827b2020-01-08 15:18:09 -07004288
4289 req->epoll.epfd = READ_ONCE(sqe->fd);
4290 req->epoll.op = READ_ONCE(sqe->len);
4291 req->epoll.fd = READ_ONCE(sqe->off);
4292
4293 if (ep_op_has_event(req->epoll.op)) {
4294 struct epoll_event __user *ev;
4295
4296 ev = u64_to_user_ptr(READ_ONCE(sqe->addr));
4297 if (copy_from_user(&req->epoll.event, ev, sizeof(*ev)))
4298 return -EFAULT;
4299 }
4300
4301 return 0;
4302#else
4303 return -EOPNOTSUPP;
4304#endif
4305}
4306
Pavel Begunkov889fca72021-02-10 00:03:09 +00004307static int io_epoll_ctl(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe3e4827b2020-01-08 15:18:09 -07004308{
4309#if defined(CONFIG_EPOLL)
4310 struct io_epoll *ie = &req->epoll;
4311 int ret;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004312 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Jens Axboe3e4827b2020-01-08 15:18:09 -07004313
4314 ret = do_epoll_ctl(ie->epfd, ie->op, ie->fd, &ie->event, force_nonblock);
4315 if (force_nonblock && ret == -EAGAIN)
4316 return -EAGAIN;
4317
4318 if (ret < 0)
4319 req_set_fail_links(req);
Pavel Begunkov889fca72021-02-10 00:03:09 +00004320 __io_req_complete(req, issue_flags, ret, 0);
Jens Axboe3e4827b2020-01-08 15:18:09 -07004321 return 0;
4322#else
4323 return -EOPNOTSUPP;
4324#endif
4325}
4326
Jens Axboec1ca7572019-12-25 22:18:28 -07004327static int io_madvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4328{
4329#if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
4330 if (sqe->ioprio || sqe->buf_index || sqe->off)
4331 return -EINVAL;
Pavel Begunkov3232dd02020-06-03 18:03:22 +03004332 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4333 return -EINVAL;
Jens Axboec1ca7572019-12-25 22:18:28 -07004334
4335 req->madvise.addr = READ_ONCE(sqe->addr);
4336 req->madvise.len = READ_ONCE(sqe->len);
4337 req->madvise.advice = READ_ONCE(sqe->fadvise_advice);
4338 return 0;
4339#else
4340 return -EOPNOTSUPP;
4341#endif
4342}
4343
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004344static int io_madvise(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboec1ca7572019-12-25 22:18:28 -07004345{
4346#if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
4347 struct io_madvise *ma = &req->madvise;
4348 int ret;
4349
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004350 if (issue_flags & IO_URING_F_NONBLOCK)
Jens Axboec1ca7572019-12-25 22:18:28 -07004351 return -EAGAIN;
4352
Minchan Kim0726b012020-10-17 16:14:50 -07004353 ret = do_madvise(current->mm, ma->addr, ma->len, ma->advice);
Jens Axboec1ca7572019-12-25 22:18:28 -07004354 if (ret < 0)
4355 req_set_fail_links(req);
Jens Axboee1e16092020-06-22 09:17:17 -06004356 io_req_complete(req, ret);
Jens Axboec1ca7572019-12-25 22:18:28 -07004357 return 0;
4358#else
4359 return -EOPNOTSUPP;
4360#endif
4361}
4362
Jens Axboe4840e412019-12-25 22:03:45 -07004363static int io_fadvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4364{
4365 if (sqe->ioprio || sqe->buf_index || sqe->addr)
4366 return -EINVAL;
Pavel Begunkov3232dd02020-06-03 18:03:22 +03004367 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4368 return -EINVAL;
Jens Axboe4840e412019-12-25 22:03:45 -07004369
4370 req->fadvise.offset = READ_ONCE(sqe->off);
4371 req->fadvise.len = READ_ONCE(sqe->len);
4372 req->fadvise.advice = READ_ONCE(sqe->fadvise_advice);
4373 return 0;
4374}
4375
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004376static int io_fadvise(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe4840e412019-12-25 22:03:45 -07004377{
4378 struct io_fadvise *fa = &req->fadvise;
4379 int ret;
4380
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004381 if (issue_flags & IO_URING_F_NONBLOCK) {
Jens Axboe3e694262020-02-01 09:22:49 -07004382 switch (fa->advice) {
4383 case POSIX_FADV_NORMAL:
4384 case POSIX_FADV_RANDOM:
4385 case POSIX_FADV_SEQUENTIAL:
4386 break;
4387 default:
4388 return -EAGAIN;
4389 }
4390 }
Jens Axboe4840e412019-12-25 22:03:45 -07004391
4392 ret = vfs_fadvise(req->file, fa->offset, fa->len, fa->advice);
4393 if (ret < 0)
4394 req_set_fail_links(req);
Jens Axboee1e16092020-06-22 09:17:17 -06004395 io_req_complete(req, ret);
Jens Axboe4840e412019-12-25 22:03:45 -07004396 return 0;
4397}
4398
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004399static int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4400{
Jens Axboe6ca56f82020-09-18 16:51:19 -06004401 if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL)))
Pavel Begunkov3232dd02020-06-03 18:03:22 +03004402 return -EINVAL;
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004403 if (sqe->ioprio || sqe->buf_index)
4404 return -EINVAL;
Pavel Begunkov9c280f92020-04-08 08:58:46 +03004405 if (req->flags & REQ_F_FIXED_FILE)
Jens Axboecf3040c2020-02-06 21:31:40 -07004406 return -EBADF;
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004407
Bijan Mottahedeh1d9e1282020-05-22 21:31:16 -07004408 req->statx.dfd = READ_ONCE(sqe->fd);
4409 req->statx.mask = READ_ONCE(sqe->len);
Bijan Mottahedehe62753e2020-05-22 21:31:18 -07004410 req->statx.filename = u64_to_user_ptr(READ_ONCE(sqe->addr));
Bijan Mottahedeh1d9e1282020-05-22 21:31:16 -07004411 req->statx.buffer = u64_to_user_ptr(READ_ONCE(sqe->addr2));
4412 req->statx.flags = READ_ONCE(sqe->statx_flags);
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004413
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004414 return 0;
4415}
4416
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004417static int io_statx(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004418{
Bijan Mottahedeh1d9e1282020-05-22 21:31:16 -07004419 struct io_statx *ctx = &req->statx;
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004420 int ret;
4421
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004422 if (issue_flags & IO_URING_F_NONBLOCK) {
Jens Axboe5b0bbee2020-04-27 10:41:22 -06004423 /* only need file table for an actual valid fd */
4424 if (ctx->dfd == -1 || ctx->dfd == AT_FDCWD)
4425 req->flags |= REQ_F_NO_FILE_TABLE;
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004426 return -EAGAIN;
Jens Axboe5b0bbee2020-04-27 10:41:22 -06004427 }
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004428
Bijan Mottahedehe62753e2020-05-22 21:31:18 -07004429 ret = do_statx(ctx->dfd, ctx->filename, ctx->flags, ctx->mask,
4430 ctx->buffer);
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004431
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004432 if (ret < 0)
4433 req_set_fail_links(req);
Jens Axboee1e16092020-06-22 09:17:17 -06004434 io_req_complete(req, ret);
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004435 return 0;
4436}
4437
Jens Axboeb5dba592019-12-11 14:02:38 -07004438static int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4439{
Jens Axboe14587a462020-09-05 11:36:08 -06004440 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
Pavel Begunkov3232dd02020-06-03 18:03:22 +03004441 return -EINVAL;
Jens Axboeb5dba592019-12-11 14:02:38 -07004442 if (sqe->ioprio || sqe->off || sqe->addr || sqe->len ||
4443 sqe->rw_flags || sqe->buf_index)
4444 return -EINVAL;
Pavel Begunkov9c280f92020-04-08 08:58:46 +03004445 if (req->flags & REQ_F_FIXED_FILE)
Jens Axboecf3040c2020-02-06 21:31:40 -07004446 return -EBADF;
Jens Axboeb5dba592019-12-11 14:02:38 -07004447
4448 req->close.fd = READ_ONCE(sqe->fd);
Jens Axboeb5dba592019-12-11 14:02:38 -07004449 return 0;
4450}
4451
Pavel Begunkov889fca72021-02-10 00:03:09 +00004452static int io_close(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboeb5dba592019-12-11 14:02:38 -07004453{
Jens Axboe9eac1902021-01-19 15:50:37 -07004454 struct files_struct *files = current->files;
Pavel Begunkov3af73b22020-06-08 21:08:17 +03004455 struct io_close *close = &req->close;
Jens Axboe9eac1902021-01-19 15:50:37 -07004456 struct fdtable *fdt;
4457 struct file *file;
Jens Axboeb5dba592019-12-11 14:02:38 -07004458 int ret;
4459
Jens Axboe9eac1902021-01-19 15:50:37 -07004460 file = NULL;
4461 ret = -EBADF;
4462 spin_lock(&files->file_lock);
4463 fdt = files_fdtable(files);
4464 if (close->fd >= fdt->max_fds) {
4465 spin_unlock(&files->file_lock);
4466 goto err;
4467 }
4468 file = fdt->fd[close->fd];
4469 if (!file) {
4470 spin_unlock(&files->file_lock);
4471 goto err;
4472 }
4473
4474 if (file->f_op == &io_uring_fops) {
4475 spin_unlock(&files->file_lock);
4476 file = NULL;
4477 goto err;
Pavel Begunkov3af73b22020-06-08 21:08:17 +03004478 }
Jens Axboeb5dba592019-12-11 14:02:38 -07004479
4480 /* if the file has a flush method, be safe and punt to async */
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004481 if (file->f_op->flush && (issue_flags & IO_URING_F_NONBLOCK)) {
Jens Axboe9eac1902021-01-19 15:50:37 -07004482 spin_unlock(&files->file_lock);
Pavel Begunkov0bf0eef2020-05-26 20:34:06 +03004483 return -EAGAIN;
Pavel Begunkova2100672020-03-02 23:45:16 +03004484 }
Jens Axboeb5dba592019-12-11 14:02:38 -07004485
Jens Axboe9eac1902021-01-19 15:50:37 -07004486 ret = __close_fd_get_file(close->fd, &file);
4487 spin_unlock(&files->file_lock);
4488 if (ret < 0) {
4489 if (ret == -ENOENT)
4490 ret = -EBADF;
4491 goto err;
4492 }
4493
Pavel Begunkov3af73b22020-06-08 21:08:17 +03004494 /* No ->flush() or already async, safely close from here */
Jens Axboe9eac1902021-01-19 15:50:37 -07004495 ret = filp_close(file, current->files);
4496err:
Pavel Begunkov3af73b22020-06-08 21:08:17 +03004497 if (ret < 0)
4498 req_set_fail_links(req);
Jens Axboe9eac1902021-01-19 15:50:37 -07004499 if (file)
4500 fput(file);
Pavel Begunkov889fca72021-02-10 00:03:09 +00004501 __io_req_complete(req, issue_flags, ret, 0);
Jens Axboe1a417f42020-01-31 17:16:48 -07004502 return 0;
Jens Axboeb5dba592019-12-11 14:02:38 -07004503}
4504
Jens Axboe3529d8c2019-12-19 18:24:38 -07004505static int io_prep_sfr(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboe5d17b4a2019-04-09 14:56:44 -06004506{
4507 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe5d17b4a2019-04-09 14:56:44 -06004508
4509 if (!req->file)
4510 return -EBADF;
Jens Axboe5d17b4a2019-04-09 14:56:44 -06004511
4512 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
4513 return -EINVAL;
4514 if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index))
4515 return -EINVAL;
4516
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004517 req->sync.off = READ_ONCE(sqe->off);
4518 req->sync.len = READ_ONCE(sqe->len);
4519 req->sync.flags = READ_ONCE(sqe->sync_range_flags);
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004520 return 0;
4521}
4522
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004523static int io_sync_file_range(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004524{
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004525 int ret;
4526
Pavel Begunkovac45abc2020-06-08 21:08:18 +03004527 /* sync_file_range always requires a blocking context */
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004528 if (issue_flags & IO_URING_F_NONBLOCK)
Pavel Begunkovac45abc2020-06-08 21:08:18 +03004529 return -EAGAIN;
4530
Jens Axboe9adbd452019-12-20 08:45:55 -07004531 ret = sync_file_range(req->file, req->sync.off, req->sync.len,
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004532 req->sync.flags);
4533 if (ret < 0)
4534 req_set_fail_links(req);
Jens Axboee1e16092020-06-22 09:17:17 -06004535 io_req_complete(req, ret);
Jens Axboe5d17b4a2019-04-09 14:56:44 -06004536 return 0;
4537}
4538
YueHaibing469956e2020-03-04 15:53:52 +08004539#if defined(CONFIG_NET)
Pavel Begunkov02d27d82020-02-28 10:36:36 +03004540static int io_setup_async_msg(struct io_kiocb *req,
4541 struct io_async_msghdr *kmsg)
4542{
Jens Axboee8c2bc12020-08-15 18:44:09 -07004543 struct io_async_msghdr *async_msg = req->async_data;
4544
4545 if (async_msg)
Pavel Begunkov02d27d82020-02-28 10:36:36 +03004546 return -EAGAIN;
Jens Axboee8c2bc12020-08-15 18:44:09 -07004547 if (io_alloc_async_data(req)) {
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004548 kfree(kmsg->free_iov);
Pavel Begunkov02d27d82020-02-28 10:36:36 +03004549 return -ENOMEM;
4550 }
Jens Axboee8c2bc12020-08-15 18:44:09 -07004551 async_msg = req->async_data;
Pavel Begunkov02d27d82020-02-28 10:36:36 +03004552 req->flags |= REQ_F_NEED_CLEANUP;
Jens Axboee8c2bc12020-08-15 18:44:09 -07004553 memcpy(async_msg, kmsg, sizeof(*kmsg));
Pavel Begunkov2a780802021-02-05 00:57:58 +00004554 async_msg->msg.msg_name = &async_msg->addr;
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004555 /* if were using fast_iov, set it to the new one */
4556 if (!async_msg->free_iov)
4557 async_msg->msg.msg_iter.iov = async_msg->fast_iov;
4558
Pavel Begunkov02d27d82020-02-28 10:36:36 +03004559 return -EAGAIN;
4560}
4561
Pavel Begunkov2ae523e2020-07-12 20:41:06 +03004562static int io_sendmsg_copy_hdr(struct io_kiocb *req,
4563 struct io_async_msghdr *iomsg)
4564{
Pavel Begunkov2ae523e2020-07-12 20:41:06 +03004565 iomsg->msg.msg_name = &iomsg->addr;
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004566 iomsg->free_iov = iomsg->fast_iov;
Pavel Begunkov2ae523e2020-07-12 20:41:06 +03004567 return sendmsg_copy_msghdr(&iomsg->msg, req->sr_msg.umsg,
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004568 req->sr_msg.msg_flags, &iomsg->free_iov);
Pavel Begunkov2ae523e2020-07-12 20:41:06 +03004569}
4570
Jens Axboe3529d8c2019-12-19 18:24:38 -07004571static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboeaa1fa282019-04-19 13:38:09 -06004572{
Jens Axboee8c2bc12020-08-15 18:44:09 -07004573 struct io_async_msghdr *async_msg = req->async_data;
Jens Axboee47293f2019-12-20 08:58:21 -07004574 struct io_sr_msg *sr = &req->sr_msg;
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03004575 int ret;
Jens Axboe03b12302019-12-02 18:50:25 -07004576
Pavel Begunkovd2b6f482020-06-03 18:03:25 +03004577 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4578 return -EINVAL;
4579
Jens Axboee47293f2019-12-20 08:58:21 -07004580 sr->msg_flags = READ_ONCE(sqe->msg_flags);
Pavel Begunkov270a5942020-07-12 20:41:04 +03004581 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
Jens Axboefddafac2020-01-04 20:19:44 -07004582 sr->len = READ_ONCE(sqe->len);
Jens Axboe3529d8c2019-12-19 18:24:38 -07004583
Jens Axboed8768362020-02-27 14:17:49 -07004584#ifdef CONFIG_COMPAT
4585 if (req->ctx->compat)
4586 sr->msg_flags |= MSG_CMSG_COMPAT;
4587#endif
4588
Jens Axboee8c2bc12020-08-15 18:44:09 -07004589 if (!async_msg || !io_op_defs[req->opcode].needs_async_data)
Jens Axboe3529d8c2019-12-19 18:24:38 -07004590 return 0;
Jens Axboee8c2bc12020-08-15 18:44:09 -07004591 ret = io_sendmsg_copy_hdr(req, async_msg);
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03004592 if (!ret)
4593 req->flags |= REQ_F_NEED_CLEANUP;
4594 return ret;
Jens Axboe03b12302019-12-02 18:50:25 -07004595}
4596
Pavel Begunkov889fca72021-02-10 00:03:09 +00004597static int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe03b12302019-12-02 18:50:25 -07004598{
Pavel Begunkov6b754c82020-07-16 23:28:00 +03004599 struct io_async_msghdr iomsg, *kmsg;
Jens Axboe03b12302019-12-02 18:50:25 -07004600 struct socket *sock;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004601 unsigned flags;
Jens Axboe03b12302019-12-02 18:50:25 -07004602 int ret;
4603
Florent Revestdba4a922020-12-04 12:36:04 +01004604 sock = sock_from_file(req->file);
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004605 if (unlikely(!sock))
Florent Revestdba4a922020-12-04 12:36:04 +01004606 return -ENOTSOCK;
Jens Axboe03b12302019-12-02 18:50:25 -07004607
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004608 kmsg = req->async_data;
4609 if (!kmsg) {
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004610 ret = io_sendmsg_copy_hdr(req, &iomsg);
Jens Axboefddafac2020-01-04 20:19:44 -07004611 if (ret)
4612 return ret;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004613 kmsg = &iomsg;
Jens Axboefddafac2020-01-04 20:19:44 -07004614 }
4615
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004616 flags = req->sr_msg.msg_flags;
4617 if (flags & MSG_DONTWAIT)
4618 req->flags |= REQ_F_NOWAIT;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004619 else if (issue_flags & IO_URING_F_NONBLOCK)
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004620 flags |= MSG_DONTWAIT;
4621
4622 ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004623 if ((issue_flags & IO_URING_F_NONBLOCK) && ret == -EAGAIN)
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004624 return io_setup_async_msg(req, kmsg);
4625 if (ret == -ERESTARTSYS)
4626 ret = -EINTR;
4627
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004628 /* fast path, check for non-NULL to avoid function call */
4629 if (kmsg->free_iov)
4630 kfree(kmsg->free_iov);
Jens Axboe03b12302019-12-02 18:50:25 -07004631 req->flags &= ~REQ_F_NEED_CLEANUP;
Jens Axboefddafac2020-01-04 20:19:44 -07004632 if (ret < 0)
4633 req_set_fail_links(req);
Pavel Begunkov889fca72021-02-10 00:03:09 +00004634 __io_req_complete(req, issue_flags, ret, 0);
Jens Axboefddafac2020-01-04 20:19:44 -07004635 return 0;
Jens Axboefddafac2020-01-04 20:19:44 -07004636}
4637
Pavel Begunkov889fca72021-02-10 00:03:09 +00004638static int io_send(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe03b12302019-12-02 18:50:25 -07004639{
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004640 struct io_sr_msg *sr = &req->sr_msg;
4641 struct msghdr msg;
4642 struct iovec iov;
Jens Axboe03b12302019-12-02 18:50:25 -07004643 struct socket *sock;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004644 unsigned flags;
Jens Axboe03b12302019-12-02 18:50:25 -07004645 int ret;
4646
Florent Revestdba4a922020-12-04 12:36:04 +01004647 sock = sock_from_file(req->file);
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004648 if (unlikely(!sock))
Florent Revestdba4a922020-12-04 12:36:04 +01004649 return -ENOTSOCK;
Jens Axboe03b12302019-12-02 18:50:25 -07004650
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004651 ret = import_single_range(WRITE, sr->buf, sr->len, &iov, &msg.msg_iter);
4652 if (unlikely(ret))
Zheng Bin14db8412020-09-09 20:12:37 +08004653 return ret;
Jens Axboe03b12302019-12-02 18:50:25 -07004654
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004655 msg.msg_name = NULL;
4656 msg.msg_control = NULL;
4657 msg.msg_controllen = 0;
4658 msg.msg_namelen = 0;
Jens Axboe03b12302019-12-02 18:50:25 -07004659
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004660 flags = req->sr_msg.msg_flags;
4661 if (flags & MSG_DONTWAIT)
4662 req->flags |= REQ_F_NOWAIT;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004663 else if (issue_flags & IO_URING_F_NONBLOCK)
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004664 flags |= MSG_DONTWAIT;
Jens Axboe03b12302019-12-02 18:50:25 -07004665
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004666 msg.msg_flags = flags;
4667 ret = sock_sendmsg(sock, &msg);
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004668 if ((issue_flags & IO_URING_F_NONBLOCK) && ret == -EAGAIN)
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004669 return -EAGAIN;
4670 if (ret == -ERESTARTSYS)
4671 ret = -EINTR;
Jens Axboe03b12302019-12-02 18:50:25 -07004672
Jens Axboe03b12302019-12-02 18:50:25 -07004673 if (ret < 0)
4674 req_set_fail_links(req);
Pavel Begunkov889fca72021-02-10 00:03:09 +00004675 __io_req_complete(req, issue_flags, ret, 0);
Jens Axboe03b12302019-12-02 18:50:25 -07004676 return 0;
Jens Axboe03b12302019-12-02 18:50:25 -07004677}
4678
Pavel Begunkov1400e692020-07-12 20:41:05 +03004679static int __io_recvmsg_copy_hdr(struct io_kiocb *req,
4680 struct io_async_msghdr *iomsg)
Jens Axboe52de1fe2020-02-27 10:15:42 -07004681{
4682 struct io_sr_msg *sr = &req->sr_msg;
4683 struct iovec __user *uiov;
4684 size_t iov_len;
4685 int ret;
4686
Pavel Begunkov1400e692020-07-12 20:41:05 +03004687 ret = __copy_msghdr_from_user(&iomsg->msg, sr->umsg,
4688 &iomsg->uaddr, &uiov, &iov_len);
Jens Axboe52de1fe2020-02-27 10:15:42 -07004689 if (ret)
4690 return ret;
4691
4692 if (req->flags & REQ_F_BUFFER_SELECT) {
4693 if (iov_len > 1)
4694 return -EINVAL;
Pavel Begunkov5476dfe2021-02-05 00:57:59 +00004695 if (copy_from_user(iomsg->fast_iov, uiov, sizeof(*uiov)))
Jens Axboe52de1fe2020-02-27 10:15:42 -07004696 return -EFAULT;
Pavel Begunkov5476dfe2021-02-05 00:57:59 +00004697 sr->len = iomsg->fast_iov[0].iov_len;
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004698 iomsg->free_iov = NULL;
Jens Axboe52de1fe2020-02-27 10:15:42 -07004699 } else {
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004700 iomsg->free_iov = iomsg->fast_iov;
Christoph Hellwig89cd35c2020-09-25 06:51:41 +02004701 ret = __import_iovec(READ, uiov, iov_len, UIO_FASTIOV,
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004702 &iomsg->free_iov, &iomsg->msg.msg_iter,
Christoph Hellwig89cd35c2020-09-25 06:51:41 +02004703 false);
Jens Axboe52de1fe2020-02-27 10:15:42 -07004704 if (ret > 0)
4705 ret = 0;
4706 }
4707
4708 return ret;
4709}
4710
4711#ifdef CONFIG_COMPAT
4712static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req,
Pavel Begunkov1400e692020-07-12 20:41:05 +03004713 struct io_async_msghdr *iomsg)
Jens Axboe52de1fe2020-02-27 10:15:42 -07004714{
4715 struct compat_msghdr __user *msg_compat;
4716 struct io_sr_msg *sr = &req->sr_msg;
4717 struct compat_iovec __user *uiov;
4718 compat_uptr_t ptr;
4719 compat_size_t len;
4720 int ret;
4721
Pavel Begunkov270a5942020-07-12 20:41:04 +03004722 msg_compat = (struct compat_msghdr __user *) sr->umsg;
Pavel Begunkov1400e692020-07-12 20:41:05 +03004723 ret = __get_compat_msghdr(&iomsg->msg, msg_compat, &iomsg->uaddr,
Jens Axboe52de1fe2020-02-27 10:15:42 -07004724 &ptr, &len);
4725 if (ret)
4726 return ret;
4727
4728 uiov = compat_ptr(ptr);
4729 if (req->flags & REQ_F_BUFFER_SELECT) {
4730 compat_ssize_t clen;
4731
4732 if (len > 1)
4733 return -EINVAL;
4734 if (!access_ok(uiov, sizeof(*uiov)))
4735 return -EFAULT;
4736 if (__get_user(clen, &uiov->iov_len))
4737 return -EFAULT;
4738 if (clen < 0)
4739 return -EINVAL;
Pavel Begunkov2d280bc2020-11-29 18:33:32 +00004740 sr->len = clen;
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004741 iomsg->free_iov = NULL;
Jens Axboe52de1fe2020-02-27 10:15:42 -07004742 } else {
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004743 iomsg->free_iov = iomsg->fast_iov;
Christoph Hellwig89cd35c2020-09-25 06:51:41 +02004744 ret = __import_iovec(READ, (struct iovec __user *)uiov, len,
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004745 UIO_FASTIOV, &iomsg->free_iov,
Christoph Hellwig89cd35c2020-09-25 06:51:41 +02004746 &iomsg->msg.msg_iter, true);
Jens Axboe52de1fe2020-02-27 10:15:42 -07004747 if (ret < 0)
4748 return ret;
4749 }
4750
4751 return 0;
4752}
Jens Axboe03b12302019-12-02 18:50:25 -07004753#endif
Jens Axboe52de1fe2020-02-27 10:15:42 -07004754
Pavel Begunkov1400e692020-07-12 20:41:05 +03004755static int io_recvmsg_copy_hdr(struct io_kiocb *req,
4756 struct io_async_msghdr *iomsg)
Jens Axboe52de1fe2020-02-27 10:15:42 -07004757{
Pavel Begunkov1400e692020-07-12 20:41:05 +03004758 iomsg->msg.msg_name = &iomsg->addr;
Jens Axboe52de1fe2020-02-27 10:15:42 -07004759
4760#ifdef CONFIG_COMPAT
4761 if (req->ctx->compat)
Pavel Begunkov1400e692020-07-12 20:41:05 +03004762 return __io_compat_recvmsg_copy_hdr(req, iomsg);
Jens Axboe52de1fe2020-02-27 10:15:42 -07004763#endif
4764
Pavel Begunkov1400e692020-07-12 20:41:05 +03004765 return __io_recvmsg_copy_hdr(req, iomsg);
Jens Axboe52de1fe2020-02-27 10:15:42 -07004766}
4767
Jens Axboebcda7ba2020-02-23 16:42:51 -07004768static struct io_buffer *io_recv_buffer_select(struct io_kiocb *req,
Pavel Begunkov7fbb1b52020-07-16 23:28:05 +03004769 bool needs_lock)
Jens Axboebcda7ba2020-02-23 16:42:51 -07004770{
4771 struct io_sr_msg *sr = &req->sr_msg;
4772 struct io_buffer *kbuf;
4773
Jens Axboebcda7ba2020-02-23 16:42:51 -07004774 kbuf = io_buffer_select(req, &sr->len, sr->bgid, sr->kbuf, needs_lock);
4775 if (IS_ERR(kbuf))
4776 return kbuf;
4777
4778 sr->kbuf = kbuf;
4779 req->flags |= REQ_F_BUFFER_SELECTED;
Jens Axboebcda7ba2020-02-23 16:42:51 -07004780 return kbuf;
Jens Axboe03b12302019-12-02 18:50:25 -07004781}
4782
Pavel Begunkov7fbb1b52020-07-16 23:28:05 +03004783static inline unsigned int io_put_recv_kbuf(struct io_kiocb *req)
4784{
4785 return io_put_kbuf(req, req->sr_msg.kbuf);
4786}
4787
Jens Axboe3529d8c2019-12-19 18:24:38 -07004788static int io_recvmsg_prep(struct io_kiocb *req,
4789 const struct io_uring_sqe *sqe)
Jens Axboe03b12302019-12-02 18:50:25 -07004790{
Jens Axboee8c2bc12020-08-15 18:44:09 -07004791 struct io_async_msghdr *async_msg = req->async_data;
Jens Axboee47293f2019-12-20 08:58:21 -07004792 struct io_sr_msg *sr = &req->sr_msg;
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03004793 int ret;
Jens Axboe06b76d42019-12-19 14:44:26 -07004794
Pavel Begunkovd2b6f482020-06-03 18:03:25 +03004795 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4796 return -EINVAL;
4797
Jens Axboe3529d8c2019-12-19 18:24:38 -07004798 sr->msg_flags = READ_ONCE(sqe->msg_flags);
Pavel Begunkov270a5942020-07-12 20:41:04 +03004799 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
Jens Axboe0b7b21e2020-01-31 08:34:59 -07004800 sr->len = READ_ONCE(sqe->len);
Jens Axboebcda7ba2020-02-23 16:42:51 -07004801 sr->bgid = READ_ONCE(sqe->buf_group);
Jens Axboe3529d8c2019-12-19 18:24:38 -07004802
Jens Axboed8768362020-02-27 14:17:49 -07004803#ifdef CONFIG_COMPAT
4804 if (req->ctx->compat)
4805 sr->msg_flags |= MSG_CMSG_COMPAT;
4806#endif
4807
Jens Axboee8c2bc12020-08-15 18:44:09 -07004808 if (!async_msg || !io_op_defs[req->opcode].needs_async_data)
Jens Axboe06b76d42019-12-19 14:44:26 -07004809 return 0;
Jens Axboee8c2bc12020-08-15 18:44:09 -07004810 ret = io_recvmsg_copy_hdr(req, async_msg);
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03004811 if (!ret)
4812 req->flags |= REQ_F_NEED_CLEANUP;
4813 return ret;
Jens Axboe03b12302019-12-02 18:50:25 -07004814}
4815
Pavel Begunkov889fca72021-02-10 00:03:09 +00004816static int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe03b12302019-12-02 18:50:25 -07004817{
Pavel Begunkov6b754c82020-07-16 23:28:00 +03004818 struct io_async_msghdr iomsg, *kmsg;
Jens Axboe0fa03c62019-04-19 13:34:07 -06004819 struct socket *sock;
Pavel Begunkov7fbb1b52020-07-16 23:28:05 +03004820 struct io_buffer *kbuf;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004821 unsigned flags;
Jens Axboe52de1fe2020-02-27 10:15:42 -07004822 int ret, cflags = 0;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004823 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Jens Axboe0fa03c62019-04-19 13:34:07 -06004824
Florent Revestdba4a922020-12-04 12:36:04 +01004825 sock = sock_from_file(req->file);
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004826 if (unlikely(!sock))
Florent Revestdba4a922020-12-04 12:36:04 +01004827 return -ENOTSOCK;
Jens Axboe0fa03c62019-04-19 13:34:07 -06004828
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004829 kmsg = req->async_data;
4830 if (!kmsg) {
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004831 ret = io_recvmsg_copy_hdr(req, &iomsg);
4832 if (ret)
Pavel Begunkov681fda82020-07-15 22:20:45 +03004833 return ret;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004834 kmsg = &iomsg;
Jens Axboe0fa03c62019-04-19 13:34:07 -06004835 }
4836
Pavel Begunkovbc02ef32020-07-16 23:28:03 +03004837 if (req->flags & REQ_F_BUFFER_SELECT) {
Pavel Begunkov7fbb1b52020-07-16 23:28:05 +03004838 kbuf = io_recv_buffer_select(req, !force_nonblock);
Pavel Begunkovbc02ef32020-07-16 23:28:03 +03004839 if (IS_ERR(kbuf))
4840 return PTR_ERR(kbuf);
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004841 kmsg->fast_iov[0].iov_base = u64_to_user_ptr(kbuf->addr);
Pavel Begunkov5476dfe2021-02-05 00:57:59 +00004842 kmsg->fast_iov[0].iov_len = req->sr_msg.len;
4843 iov_iter_init(&kmsg->msg.msg_iter, READ, kmsg->fast_iov,
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004844 1, req->sr_msg.len);
4845 }
4846
4847 flags = req->sr_msg.msg_flags;
4848 if (flags & MSG_DONTWAIT)
4849 req->flags |= REQ_F_NOWAIT;
4850 else if (force_nonblock)
4851 flags |= MSG_DONTWAIT;
4852
4853 ret = __sys_recvmsg_sock(sock, &kmsg->msg, req->sr_msg.umsg,
4854 kmsg->uaddr, flags);
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03004855 if (force_nonblock && ret == -EAGAIN)
4856 return io_setup_async_msg(req, kmsg);
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004857 if (ret == -ERESTARTSYS)
4858 ret = -EINTR;
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03004859
Pavel Begunkov7fbb1b52020-07-16 23:28:05 +03004860 if (req->flags & REQ_F_BUFFER_SELECTED)
4861 cflags = io_put_recv_kbuf(req);
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004862 /* fast path, check for non-NULL to avoid function call */
4863 if (kmsg->free_iov)
4864 kfree(kmsg->free_iov);
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03004865 req->flags &= ~REQ_F_NEED_CLEANUP;
Jens Axboe4e88d6e2019-12-07 20:59:47 -07004866 if (ret < 0)
4867 req_set_fail_links(req);
Pavel Begunkov889fca72021-02-10 00:03:09 +00004868 __io_req_complete(req, issue_flags, ret, cflags);
Jens Axboe0fa03c62019-04-19 13:34:07 -06004869 return 0;
Jens Axboe0fa03c62019-04-19 13:34:07 -06004870}
4871
Pavel Begunkov889fca72021-02-10 00:03:09 +00004872static int io_recv(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboefddafac2020-01-04 20:19:44 -07004873{
Pavel Begunkov6b754c82020-07-16 23:28:00 +03004874 struct io_buffer *kbuf;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004875 struct io_sr_msg *sr = &req->sr_msg;
4876 struct msghdr msg;
4877 void __user *buf = sr->buf;
Jens Axboefddafac2020-01-04 20:19:44 -07004878 struct socket *sock;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004879 struct iovec iov;
4880 unsigned flags;
Jens Axboebcda7ba2020-02-23 16:42:51 -07004881 int ret, cflags = 0;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004882 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Jens Axboefddafac2020-01-04 20:19:44 -07004883
Florent Revestdba4a922020-12-04 12:36:04 +01004884 sock = sock_from_file(req->file);
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004885 if (unlikely(!sock))
Florent Revestdba4a922020-12-04 12:36:04 +01004886 return -ENOTSOCK;
Jens Axboefddafac2020-01-04 20:19:44 -07004887
Pavel Begunkovbc02ef32020-07-16 23:28:03 +03004888 if (req->flags & REQ_F_BUFFER_SELECT) {
Pavel Begunkov7fbb1b52020-07-16 23:28:05 +03004889 kbuf = io_recv_buffer_select(req, !force_nonblock);
Jens Axboebcda7ba2020-02-23 16:42:51 -07004890 if (IS_ERR(kbuf))
4891 return PTR_ERR(kbuf);
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004892 buf = u64_to_user_ptr(kbuf->addr);
Jens Axboefddafac2020-01-04 20:19:44 -07004893 }
4894
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004895 ret = import_single_range(READ, buf, sr->len, &iov, &msg.msg_iter);
Pavel Begunkov14c32ee2020-07-16 23:28:01 +03004896 if (unlikely(ret))
4897 goto out_free;
Jens Axboefddafac2020-01-04 20:19:44 -07004898
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004899 msg.msg_name = NULL;
4900 msg.msg_control = NULL;
4901 msg.msg_controllen = 0;
4902 msg.msg_namelen = 0;
4903 msg.msg_iocb = NULL;
4904 msg.msg_flags = 0;
4905
4906 flags = req->sr_msg.msg_flags;
4907 if (flags & MSG_DONTWAIT)
4908 req->flags |= REQ_F_NOWAIT;
4909 else if (force_nonblock)
4910 flags |= MSG_DONTWAIT;
4911
4912 ret = sock_recvmsg(sock, &msg, flags);
4913 if (force_nonblock && ret == -EAGAIN)
4914 return -EAGAIN;
4915 if (ret == -ERESTARTSYS)
4916 ret = -EINTR;
Pavel Begunkov14c32ee2020-07-16 23:28:01 +03004917out_free:
Pavel Begunkov7fbb1b52020-07-16 23:28:05 +03004918 if (req->flags & REQ_F_BUFFER_SELECTED)
4919 cflags = io_put_recv_kbuf(req);
Jens Axboefddafac2020-01-04 20:19:44 -07004920 if (ret < 0)
4921 req_set_fail_links(req);
Pavel Begunkov889fca72021-02-10 00:03:09 +00004922 __io_req_complete(req, issue_flags, ret, cflags);
Jens Axboefddafac2020-01-04 20:19:44 -07004923 return 0;
Jens Axboefddafac2020-01-04 20:19:44 -07004924}
4925
Jens Axboe3529d8c2019-12-19 18:24:38 -07004926static int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboe17f2fe32019-10-17 14:42:58 -06004927{
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004928 struct io_accept *accept = &req->accept;
4929
Jens Axboe14587a462020-09-05 11:36:08 -06004930 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboe17f2fe32019-10-17 14:42:58 -06004931 return -EINVAL;
Hrvoje Zeba8042d6c2019-11-25 14:40:22 -05004932 if (sqe->ioprio || sqe->len || sqe->buf_index)
Jens Axboe17f2fe32019-10-17 14:42:58 -06004933 return -EINVAL;
4934
Jens Axboed55e5f52019-12-11 16:12:15 -07004935 accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
4936 accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2));
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004937 accept->flags = READ_ONCE(sqe->accept_flags);
Jens Axboe09952e32020-03-19 20:16:56 -06004938 accept->nofile = rlimit(RLIMIT_NOFILE);
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004939 return 0;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004940}
Jens Axboe17f2fe32019-10-17 14:42:58 -06004941
Pavel Begunkov889fca72021-02-10 00:03:09 +00004942static int io_accept(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004943{
4944 struct io_accept *accept = &req->accept;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004945 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Pavel Begunkovac45abc2020-06-08 21:08:18 +03004946 unsigned int file_flags = force_nonblock ? O_NONBLOCK : 0;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004947 int ret;
4948
Jiufei Xuee697dee2020-06-10 13:41:59 +08004949 if (req->file->f_flags & O_NONBLOCK)
4950 req->flags |= REQ_F_NOWAIT;
4951
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004952 ret = __sys_accept4_file(req->file, file_flags, accept->addr,
Jens Axboe09952e32020-03-19 20:16:56 -06004953 accept->addr_len, accept->flags,
4954 accept->nofile);
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004955 if (ret == -EAGAIN && force_nonblock)
Jens Axboe17f2fe32019-10-17 14:42:58 -06004956 return -EAGAIN;
Pavel Begunkovac45abc2020-06-08 21:08:18 +03004957 if (ret < 0) {
4958 if (ret == -ERESTARTSYS)
4959 ret = -EINTR;
Jens Axboe4e88d6e2019-12-07 20:59:47 -07004960 req_set_fail_links(req);
Pavel Begunkovac45abc2020-06-08 21:08:18 +03004961 }
Pavel Begunkov889fca72021-02-10 00:03:09 +00004962 __io_req_complete(req, issue_flags, ret, 0);
Jens Axboe17f2fe32019-10-17 14:42:58 -06004963 return 0;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004964}
4965
Jens Axboe3529d8c2019-12-19 18:24:38 -07004966static int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboef499a022019-12-02 16:28:46 -07004967{
Jens Axboe3529d8c2019-12-19 18:24:38 -07004968 struct io_connect *conn = &req->connect;
Jens Axboee8c2bc12020-08-15 18:44:09 -07004969 struct io_async_connect *io = req->async_data;
Jens Axboef499a022019-12-02 16:28:46 -07004970
Jens Axboe14587a462020-09-05 11:36:08 -06004971 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboe3fbb51c2019-12-20 08:51:52 -07004972 return -EINVAL;
4973 if (sqe->ioprio || sqe->len || sqe->buf_index || sqe->rw_flags)
4974 return -EINVAL;
4975
Jens Axboe3529d8c2019-12-19 18:24:38 -07004976 conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
4977 conn->addr_len = READ_ONCE(sqe->addr2);
4978
4979 if (!io)
4980 return 0;
4981
4982 return move_addr_to_kernel(conn->addr, conn->addr_len,
Jens Axboee8c2bc12020-08-15 18:44:09 -07004983 &io->address);
Jens Axboef499a022019-12-02 16:28:46 -07004984}
4985
Pavel Begunkov889fca72021-02-10 00:03:09 +00004986static int io_connect(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboef8e85cf2019-11-23 14:24:24 -07004987{
Jens Axboee8c2bc12020-08-15 18:44:09 -07004988 struct io_async_connect __io, *io;
Jens Axboef8e85cf2019-11-23 14:24:24 -07004989 unsigned file_flags;
Jens Axboe3fbb51c2019-12-20 08:51:52 -07004990 int ret;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004991 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Jens Axboef8e85cf2019-11-23 14:24:24 -07004992
Jens Axboee8c2bc12020-08-15 18:44:09 -07004993 if (req->async_data) {
4994 io = req->async_data;
Jens Axboef499a022019-12-02 16:28:46 -07004995 } else {
Jens Axboe3529d8c2019-12-19 18:24:38 -07004996 ret = move_addr_to_kernel(req->connect.addr,
4997 req->connect.addr_len,
Jens Axboee8c2bc12020-08-15 18:44:09 -07004998 &__io.address);
Jens Axboef499a022019-12-02 16:28:46 -07004999 if (ret)
5000 goto out;
5001 io = &__io;
5002 }
5003
Jens Axboe3fbb51c2019-12-20 08:51:52 -07005004 file_flags = force_nonblock ? O_NONBLOCK : 0;
5005
Jens Axboee8c2bc12020-08-15 18:44:09 -07005006 ret = __sys_connect_file(req->file, &io->address,
Jens Axboe3fbb51c2019-12-20 08:51:52 -07005007 req->connect.addr_len, file_flags);
Jens Axboe87f80d62019-12-03 11:23:54 -07005008 if ((ret == -EAGAIN || ret == -EINPROGRESS) && force_nonblock) {
Jens Axboee8c2bc12020-08-15 18:44:09 -07005009 if (req->async_data)
Jens Axboeb7bb4f72019-12-15 22:13:43 -07005010 return -EAGAIN;
Jens Axboee8c2bc12020-08-15 18:44:09 -07005011 if (io_alloc_async_data(req)) {
Jens Axboef499a022019-12-02 16:28:46 -07005012 ret = -ENOMEM;
5013 goto out;
5014 }
Jens Axboee8c2bc12020-08-15 18:44:09 -07005015 io = req->async_data;
5016 memcpy(req->async_data, &__io, sizeof(__io));
Jens Axboef8e85cf2019-11-23 14:24:24 -07005017 return -EAGAIN;
Jens Axboef499a022019-12-02 16:28:46 -07005018 }
Jens Axboef8e85cf2019-11-23 14:24:24 -07005019 if (ret == -ERESTARTSYS)
5020 ret = -EINTR;
Jens Axboef499a022019-12-02 16:28:46 -07005021out:
Jens Axboe4e88d6e2019-12-07 20:59:47 -07005022 if (ret < 0)
5023 req_set_fail_links(req);
Pavel Begunkov889fca72021-02-10 00:03:09 +00005024 __io_req_complete(req, issue_flags, ret, 0);
Jens Axboef8e85cf2019-11-23 14:24:24 -07005025 return 0;
Jens Axboef8e85cf2019-11-23 14:24:24 -07005026}
YueHaibing469956e2020-03-04 15:53:52 +08005027#else /* !CONFIG_NET */
5028static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
5029{
Jens Axboef8e85cf2019-11-23 14:24:24 -07005030 return -EOPNOTSUPP;
Jens Axboef8e85cf2019-11-23 14:24:24 -07005031}
5032
Pavel Begunkov889fca72021-02-10 00:03:09 +00005033static int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe221c5eb2019-01-17 09:41:58 -07005034{
YueHaibing469956e2020-03-04 15:53:52 +08005035 return -EOPNOTSUPP;
5036}
5037
Pavel Begunkov889fca72021-02-10 00:03:09 +00005038static int io_send(struct io_kiocb *req, unsigned int issue_flags)
YueHaibing469956e2020-03-04 15:53:52 +08005039{
5040 return -EOPNOTSUPP;
5041}
5042
5043static int io_recvmsg_prep(struct io_kiocb *req,
5044 const struct io_uring_sqe *sqe)
5045{
5046 return -EOPNOTSUPP;
5047}
5048
Pavel Begunkov889fca72021-02-10 00:03:09 +00005049static int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
YueHaibing469956e2020-03-04 15:53:52 +08005050{
5051 return -EOPNOTSUPP;
5052}
5053
Pavel Begunkov889fca72021-02-10 00:03:09 +00005054static int io_recv(struct io_kiocb *req, unsigned int issue_flags)
YueHaibing469956e2020-03-04 15:53:52 +08005055{
5056 return -EOPNOTSUPP;
5057}
5058
5059static int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
5060{
5061 return -EOPNOTSUPP;
5062}
5063
Pavel Begunkov889fca72021-02-10 00:03:09 +00005064static int io_accept(struct io_kiocb *req, unsigned int issue_flags)
YueHaibing469956e2020-03-04 15:53:52 +08005065{
5066 return -EOPNOTSUPP;
5067}
5068
5069static int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
5070{
5071 return -EOPNOTSUPP;
5072}
5073
Pavel Begunkov889fca72021-02-10 00:03:09 +00005074static int io_connect(struct io_kiocb *req, unsigned int issue_flags)
YueHaibing469956e2020-03-04 15:53:52 +08005075{
5076 return -EOPNOTSUPP;
5077}
5078#endif /* CONFIG_NET */
Jens Axboe2b188cc2019-01-07 10:46:33 -07005079
Jens Axboed7718a92020-02-14 22:23:12 -07005080struct io_poll_table {
5081 struct poll_table_struct pt;
5082 struct io_kiocb *req;
5083 int error;
5084};
5085
Jens Axboed7718a92020-02-14 22:23:12 -07005086static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll,
5087 __poll_t mask, task_work_func_t func)
5088{
Jens Axboeaa96bf82020-04-03 11:26:26 -06005089 int ret;
Jens Axboed7718a92020-02-14 22:23:12 -07005090
5091 /* for instances that support it check for an event match first: */
5092 if (mask && !(mask & poll->events))
5093 return 0;
5094
5095 trace_io_uring_task_add(req->ctx, req->opcode, req->user_data, mask);
5096
5097 list_del_init(&poll->wait.entry);
5098
Jens Axboed7718a92020-02-14 22:23:12 -07005099 req->result = mask;
5100 init_task_work(&req->task_work, func);
Jens Axboe6d816e02020-08-11 08:04:14 -06005101 percpu_ref_get(&req->ctx->refs);
5102
Jens Axboed7718a92020-02-14 22:23:12 -07005103 /*
Jens Axboee3aabf92020-05-18 11:04:17 -06005104 * If this fails, then the task is exiting. When a task exits, the
5105 * work gets canceled, so just cancel this request as well instead
5106 * of executing it. We can't safely execute it anyway, as we may not
5107 * have the needed state needed for it anyway.
Jens Axboed7718a92020-02-14 22:23:12 -07005108 */
Jens Axboe355fb9e2020-10-22 20:19:35 -06005109 ret = io_req_task_work_add(req);
Jens Axboeaa96bf82020-04-03 11:26:26 -06005110 if (unlikely(ret)) {
Jens Axboee3aabf92020-05-18 11:04:17 -06005111 WRITE_ONCE(poll->canceled, true);
Pavel Begunkoveab30c42021-01-19 13:32:42 +00005112 io_req_task_work_add_fallback(req, func);
Jens Axboeaa96bf82020-04-03 11:26:26 -06005113 }
Jens Axboed7718a92020-02-14 22:23:12 -07005114 return 1;
5115}
5116
Jens Axboe74ce6ce2020-04-13 11:09:12 -06005117static bool io_poll_rewait(struct io_kiocb *req, struct io_poll_iocb *poll)
5118 __acquires(&req->ctx->completion_lock)
5119{
5120 struct io_ring_ctx *ctx = req->ctx;
5121
5122 if (!req->result && !READ_ONCE(poll->canceled)) {
5123 struct poll_table_struct pt = { ._key = poll->events };
5124
5125 req->result = vfs_poll(req->file, &pt) & poll->events;
5126 }
5127
5128 spin_lock_irq(&ctx->completion_lock);
5129 if (!req->result && !READ_ONCE(poll->canceled)) {
5130 add_wait_queue(poll->head, &poll->wait);
5131 return true;
5132 }
5133
5134 return false;
5135}
5136
Jens Axboed4e7cd32020-08-15 11:44:50 -07005137static struct io_poll_iocb *io_poll_get_double(struct io_kiocb *req)
Jens Axboe18bceab2020-05-15 11:56:54 -06005138{
Jens Axboee8c2bc12020-08-15 18:44:09 -07005139 /* pure poll stashes this in ->async_data, poll driven retry elsewhere */
Jens Axboed4e7cd32020-08-15 11:44:50 -07005140 if (req->opcode == IORING_OP_POLL_ADD)
Jens Axboee8c2bc12020-08-15 18:44:09 -07005141 return req->async_data;
Jens Axboed4e7cd32020-08-15 11:44:50 -07005142 return req->apoll->double_poll;
5143}
5144
5145static struct io_poll_iocb *io_poll_get_single(struct io_kiocb *req)
5146{
5147 if (req->opcode == IORING_OP_POLL_ADD)
5148 return &req->poll;
5149 return &req->apoll->poll;
5150}
5151
5152static void io_poll_remove_double(struct io_kiocb *req)
5153{
5154 struct io_poll_iocb *poll = io_poll_get_double(req);
Jens Axboe18bceab2020-05-15 11:56:54 -06005155
5156 lockdep_assert_held(&req->ctx->completion_lock);
5157
5158 if (poll && poll->head) {
5159 struct wait_queue_head *head = poll->head;
5160
5161 spin_lock(&head->lock);
5162 list_del_init(&poll->wait.entry);
5163 if (poll->wait.private)
5164 refcount_dec(&req->refs);
5165 poll->head = NULL;
5166 spin_unlock(&head->lock);
5167 }
5168}
5169
5170static void io_poll_complete(struct io_kiocb *req, __poll_t mask, int error)
5171{
5172 struct io_ring_ctx *ctx = req->ctx;
5173
Jens Axboed4e7cd32020-08-15 11:44:50 -07005174 io_poll_remove_double(req);
Jens Axboe18bceab2020-05-15 11:56:54 -06005175 req->poll.done = true;
5176 io_cqring_fill_event(req, error ? error : mangle_poll(mask));
5177 io_commit_cqring(ctx);
5178}
5179
Jens Axboe18bceab2020-05-15 11:56:54 -06005180static void io_poll_task_func(struct callback_head *cb)
5181{
5182 struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
Jens Axboe6d816e02020-08-11 08:04:14 -06005183 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkovdd221f462020-10-18 10:17:42 +01005184 struct io_kiocb *nxt;
Jens Axboe18bceab2020-05-15 11:56:54 -06005185
Pavel Begunkovdd221f462020-10-18 10:17:42 +01005186 if (io_poll_rewait(req, &req->poll)) {
5187 spin_unlock_irq(&ctx->completion_lock);
5188 } else {
5189 hash_del(&req->hash_node);
5190 io_poll_complete(req, req->result, 0);
5191 spin_unlock_irq(&ctx->completion_lock);
5192
5193 nxt = io_put_req_find_next(req);
5194 io_cqring_ev_posted(ctx);
5195 if (nxt)
5196 __io_req_task_submit(nxt);
5197 }
5198
Jens Axboe6d816e02020-08-11 08:04:14 -06005199 percpu_ref_put(&ctx->refs);
Jens Axboe18bceab2020-05-15 11:56:54 -06005200}
5201
5202static int io_poll_double_wake(struct wait_queue_entry *wait, unsigned mode,
5203 int sync, void *key)
5204{
5205 struct io_kiocb *req = wait->private;
Jens Axboed4e7cd32020-08-15 11:44:50 -07005206 struct io_poll_iocb *poll = io_poll_get_single(req);
Jens Axboe18bceab2020-05-15 11:56:54 -06005207 __poll_t mask = key_to_poll(key);
5208
5209 /* for instances that support it check for an event match first: */
5210 if (mask && !(mask & poll->events))
5211 return 0;
5212
Jens Axboe8706e042020-09-28 08:38:54 -06005213 list_del_init(&wait->entry);
5214
Jens Axboe807abcb2020-07-17 17:09:27 -06005215 if (poll && poll->head) {
Jens Axboe18bceab2020-05-15 11:56:54 -06005216 bool done;
5217
Jens Axboe807abcb2020-07-17 17:09:27 -06005218 spin_lock(&poll->head->lock);
5219 done = list_empty(&poll->wait.entry);
Jens Axboe18bceab2020-05-15 11:56:54 -06005220 if (!done)
Jens Axboe807abcb2020-07-17 17:09:27 -06005221 list_del_init(&poll->wait.entry);
Jens Axboed4e7cd32020-08-15 11:44:50 -07005222 /* make sure double remove sees this as being gone */
5223 wait->private = NULL;
Jens Axboe807abcb2020-07-17 17:09:27 -06005224 spin_unlock(&poll->head->lock);
Jens Axboec8b5e262020-10-25 13:53:26 -06005225 if (!done) {
5226 /* use wait func handler, so it matches the rq type */
5227 poll->wait.func(&poll->wait, mode, sync, key);
5228 }
Jens Axboe18bceab2020-05-15 11:56:54 -06005229 }
5230 refcount_dec(&req->refs);
5231 return 1;
5232}
5233
5234static void io_init_poll_iocb(struct io_poll_iocb *poll, __poll_t events,
5235 wait_queue_func_t wake_func)
5236{
5237 poll->head = NULL;
5238 poll->done = false;
5239 poll->canceled = false;
5240 poll->events = events;
5241 INIT_LIST_HEAD(&poll->wait.entry);
5242 init_waitqueue_func_entry(&poll->wait, wake_func);
5243}
5244
5245static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt,
Jens Axboe807abcb2020-07-17 17:09:27 -06005246 struct wait_queue_head *head,
5247 struct io_poll_iocb **poll_ptr)
Jens Axboe18bceab2020-05-15 11:56:54 -06005248{
5249 struct io_kiocb *req = pt->req;
5250
5251 /*
5252 * If poll->head is already set, it's because the file being polled
5253 * uses multiple waitqueues for poll handling (eg one for read, one
5254 * for write). Setup a separate io_poll_iocb if this happens.
5255 */
5256 if (unlikely(poll->head)) {
Pavel Begunkov58852d42020-10-16 20:55:56 +01005257 struct io_poll_iocb *poll_one = poll;
5258
Jens Axboe18bceab2020-05-15 11:56:54 -06005259 /* already have a 2nd entry, fail a third attempt */
Jens Axboe807abcb2020-07-17 17:09:27 -06005260 if (*poll_ptr) {
Jens Axboe18bceab2020-05-15 11:56:54 -06005261 pt->error = -EINVAL;
5262 return;
5263 }
5264 poll = kmalloc(sizeof(*poll), GFP_ATOMIC);
5265 if (!poll) {
5266 pt->error = -ENOMEM;
5267 return;
5268 }
Pavel Begunkov58852d42020-10-16 20:55:56 +01005269 io_init_poll_iocb(poll, poll_one->events, io_poll_double_wake);
Jens Axboe18bceab2020-05-15 11:56:54 -06005270 refcount_inc(&req->refs);
5271 poll->wait.private = req;
Jens Axboe807abcb2020-07-17 17:09:27 -06005272 *poll_ptr = poll;
Jens Axboe18bceab2020-05-15 11:56:54 -06005273 }
5274
5275 pt->error = 0;
5276 poll->head = head;
Jiufei Xuea31eb4a2020-06-17 17:53:56 +08005277
5278 if (poll->events & EPOLLEXCLUSIVE)
5279 add_wait_queue_exclusive(head, &poll->wait);
5280 else
5281 add_wait_queue(head, &poll->wait);
Jens Axboe18bceab2020-05-15 11:56:54 -06005282}
5283
5284static void io_async_queue_proc(struct file *file, struct wait_queue_head *head,
5285 struct poll_table_struct *p)
5286{
5287 struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
Jens Axboe807abcb2020-07-17 17:09:27 -06005288 struct async_poll *apoll = pt->req->apoll;
Jens Axboe18bceab2020-05-15 11:56:54 -06005289
Jens Axboe807abcb2020-07-17 17:09:27 -06005290 __io_queue_proc(&apoll->poll, pt, head, &apoll->double_poll);
Jens Axboe18bceab2020-05-15 11:56:54 -06005291}
5292
Jens Axboed7718a92020-02-14 22:23:12 -07005293static void io_async_task_func(struct callback_head *cb)
5294{
5295 struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
5296 struct async_poll *apoll = req->apoll;
5297 struct io_ring_ctx *ctx = req->ctx;
5298
5299 trace_io_uring_task_run(req->ctx, req->opcode, req->user_data);
5300
Jens Axboe74ce6ce2020-04-13 11:09:12 -06005301 if (io_poll_rewait(req, &apoll->poll)) {
Jens Axboed7718a92020-02-14 22:23:12 -07005302 spin_unlock_irq(&ctx->completion_lock);
Jens Axboe6d816e02020-08-11 08:04:14 -06005303 percpu_ref_put(&ctx->refs);
Jens Axboe74ce6ce2020-04-13 11:09:12 -06005304 return;
Jens Axboed7718a92020-02-14 22:23:12 -07005305 }
5306
Jens Axboe31067252020-05-17 17:43:31 -06005307 /* If req is still hashed, it cannot have been canceled. Don't check. */
Pavel Begunkov0be0b0e2020-06-30 15:20:42 +03005308 if (hash_hashed(&req->hash_node))
Jens Axboe74ce6ce2020-04-13 11:09:12 -06005309 hash_del(&req->hash_node);
Jens Axboe2bae0472020-04-13 11:16:34 -06005310
Jens Axboed4e7cd32020-08-15 11:44:50 -07005311 io_poll_remove_double(req);
Jens Axboe74ce6ce2020-04-13 11:09:12 -06005312 spin_unlock_irq(&ctx->completion_lock);
5313
Pavel Begunkov0be0b0e2020-06-30 15:20:42 +03005314 if (!READ_ONCE(apoll->poll.canceled))
5315 __io_req_task_submit(req);
5316 else
5317 __io_req_task_cancel(req, -ECANCELED);
Dan Carpenteraa340842020-07-08 21:47:11 +03005318
Jens Axboe6d816e02020-08-11 08:04:14 -06005319 percpu_ref_put(&ctx->refs);
Jens Axboe807abcb2020-07-17 17:09:27 -06005320 kfree(apoll->double_poll);
Jens Axboe31067252020-05-17 17:43:31 -06005321 kfree(apoll);
Jens Axboed7718a92020-02-14 22:23:12 -07005322}
5323
5324static int io_async_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
5325 void *key)
5326{
5327 struct io_kiocb *req = wait->private;
5328 struct io_poll_iocb *poll = &req->apoll->poll;
5329
5330 trace_io_uring_poll_wake(req->ctx, req->opcode, req->user_data,
5331 key_to_poll(key));
5332
5333 return __io_async_wake(req, poll, key_to_poll(key), io_async_task_func);
5334}
5335
5336static void io_poll_req_insert(struct io_kiocb *req)
5337{
5338 struct io_ring_ctx *ctx = req->ctx;
5339 struct hlist_head *list;
5340
5341 list = &ctx->cancel_hash[hash_long(req->user_data, ctx->cancel_hash_bits)];
5342 hlist_add_head(&req->hash_node, list);
5343}
5344
5345static __poll_t __io_arm_poll_handler(struct io_kiocb *req,
5346 struct io_poll_iocb *poll,
5347 struct io_poll_table *ipt, __poll_t mask,
5348 wait_queue_func_t wake_func)
5349 __acquires(&ctx->completion_lock)
5350{
5351 struct io_ring_ctx *ctx = req->ctx;
5352 bool cancel = false;
5353
Pavel Begunkov4d52f332020-10-18 10:17:43 +01005354 INIT_HLIST_NODE(&req->hash_node);
Jens Axboe18bceab2020-05-15 11:56:54 -06005355 io_init_poll_iocb(poll, mask, wake_func);
Pavel Begunkovb90cd192020-06-21 13:09:52 +03005356 poll->file = req->file;
Jens Axboe18bceab2020-05-15 11:56:54 -06005357 poll->wait.private = req;
Jens Axboed7718a92020-02-14 22:23:12 -07005358
5359 ipt->pt._key = mask;
5360 ipt->req = req;
5361 ipt->error = -EINVAL;
5362
Jens Axboed7718a92020-02-14 22:23:12 -07005363 mask = vfs_poll(req->file, &ipt->pt) & poll->events;
5364
5365 spin_lock_irq(&ctx->completion_lock);
5366 if (likely(poll->head)) {
5367 spin_lock(&poll->head->lock);
5368 if (unlikely(list_empty(&poll->wait.entry))) {
5369 if (ipt->error)
5370 cancel = true;
5371 ipt->error = 0;
5372 mask = 0;
5373 }
5374 if (mask || ipt->error)
5375 list_del_init(&poll->wait.entry);
5376 else if (cancel)
5377 WRITE_ONCE(poll->canceled, true);
5378 else if (!poll->done) /* actually waiting for an event */
5379 io_poll_req_insert(req);
5380 spin_unlock(&poll->head->lock);
5381 }
5382
5383 return mask;
5384}
5385
5386static bool io_arm_poll_handler(struct io_kiocb *req)
5387{
5388 const struct io_op_def *def = &io_op_defs[req->opcode];
5389 struct io_ring_ctx *ctx = req->ctx;
5390 struct async_poll *apoll;
5391 struct io_poll_table ipt;
5392 __poll_t mask, ret;
Jens Axboe9dab14b2020-08-25 12:27:50 -06005393 int rw;
Jens Axboed7718a92020-02-14 22:23:12 -07005394
5395 if (!req->file || !file_can_poll(req->file))
5396 return false;
Pavel Begunkov24c74672020-06-21 13:09:51 +03005397 if (req->flags & REQ_F_POLLED)
Jens Axboed7718a92020-02-14 22:23:12 -07005398 return false;
Jens Axboe9dab14b2020-08-25 12:27:50 -06005399 if (def->pollin)
5400 rw = READ;
5401 else if (def->pollout)
5402 rw = WRITE;
5403 else
5404 return false;
5405 /* if we can't nonblock try, then no point in arming a poll handler */
5406 if (!io_file_supports_async(req->file, rw))
Jens Axboed7718a92020-02-14 22:23:12 -07005407 return false;
5408
5409 apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC);
5410 if (unlikely(!apoll))
5411 return false;
Jens Axboe807abcb2020-07-17 17:09:27 -06005412 apoll->double_poll = NULL;
Jens Axboed7718a92020-02-14 22:23:12 -07005413
5414 req->flags |= REQ_F_POLLED;
Jens Axboed7718a92020-02-14 22:23:12 -07005415 req->apoll = apoll;
Jens Axboed7718a92020-02-14 22:23:12 -07005416
Nathan Chancellor8755d972020-03-02 16:01:19 -07005417 mask = 0;
Jens Axboed7718a92020-02-14 22:23:12 -07005418 if (def->pollin)
Nathan Chancellor8755d972020-03-02 16:01:19 -07005419 mask |= POLLIN | POLLRDNORM;
Jens Axboed7718a92020-02-14 22:23:12 -07005420 if (def->pollout)
5421 mask |= POLLOUT | POLLWRNORM;
Luke Hsiao901341b2020-08-21 21:41:05 -07005422
5423 /* If reading from MSG_ERRQUEUE using recvmsg, ignore POLLIN */
5424 if ((req->opcode == IORING_OP_RECVMSG) &&
5425 (req->sr_msg.msg_flags & MSG_ERRQUEUE))
5426 mask &= ~POLLIN;
5427
Jens Axboed7718a92020-02-14 22:23:12 -07005428 mask |= POLLERR | POLLPRI;
5429
5430 ipt.pt._qproc = io_async_queue_proc;
5431
5432 ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask,
5433 io_async_wake);
Jens Axboea36da652020-08-11 09:50:19 -06005434 if (ret || ipt.error) {
Jens Axboed4e7cd32020-08-15 11:44:50 -07005435 io_poll_remove_double(req);
Jens Axboed7718a92020-02-14 22:23:12 -07005436 spin_unlock_irq(&ctx->completion_lock);
Jens Axboe807abcb2020-07-17 17:09:27 -06005437 kfree(apoll->double_poll);
Jens Axboed7718a92020-02-14 22:23:12 -07005438 kfree(apoll);
5439 return false;
5440 }
5441 spin_unlock_irq(&ctx->completion_lock);
5442 trace_io_uring_poll_arm(ctx, req->opcode, req->user_data, mask,
5443 apoll->poll.events);
5444 return true;
5445}
5446
5447static bool __io_poll_remove_one(struct io_kiocb *req,
5448 struct io_poll_iocb *poll)
5449{
Jens Axboeb41e9852020-02-17 09:52:41 -07005450 bool do_complete = false;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005451
5452 spin_lock(&poll->head->lock);
5453 WRITE_ONCE(poll->canceled, true);
Jens Axboe392edb42019-12-09 17:52:20 -07005454 if (!list_empty(&poll->wait.entry)) {
5455 list_del_init(&poll->wait.entry);
Jens Axboeb41e9852020-02-17 09:52:41 -07005456 do_complete = true;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005457 }
5458 spin_unlock(&poll->head->lock);
Jens Axboe3bfa5bc2020-05-17 13:54:12 -06005459 hash_del(&req->hash_node);
Jens Axboed7718a92020-02-14 22:23:12 -07005460 return do_complete;
5461}
5462
5463static bool io_poll_remove_one(struct io_kiocb *req)
5464{
5465 bool do_complete;
5466
Jens Axboed4e7cd32020-08-15 11:44:50 -07005467 io_poll_remove_double(req);
5468
Jens Axboed7718a92020-02-14 22:23:12 -07005469 if (req->opcode == IORING_OP_POLL_ADD) {
5470 do_complete = __io_poll_remove_one(req, &req->poll);
5471 } else {
Jens Axboe3bfa5bc2020-05-17 13:54:12 -06005472 struct async_poll *apoll = req->apoll;
5473
Jens Axboed7718a92020-02-14 22:23:12 -07005474 /* non-poll requests have submit ref still */
Jens Axboe3bfa5bc2020-05-17 13:54:12 -06005475 do_complete = __io_poll_remove_one(req, &apoll->poll);
5476 if (do_complete) {
Jens Axboed7718a92020-02-14 22:23:12 -07005477 io_put_req(req);
Jens Axboe807abcb2020-07-17 17:09:27 -06005478 kfree(apoll->double_poll);
Jens Axboe3bfa5bc2020-05-17 13:54:12 -06005479 kfree(apoll);
5480 }
Xiaoguang Wangb1f573b2020-04-12 14:50:54 +08005481 }
5482
Jens Axboeb41e9852020-02-17 09:52:41 -07005483 if (do_complete) {
5484 io_cqring_fill_event(req, -ECANCELED);
5485 io_commit_cqring(req->ctx);
Jens Axboef254ac02020-08-12 17:33:30 -06005486 req_set_fail_links(req);
Pavel Begunkov216578e2020-10-13 09:44:00 +01005487 io_put_req_deferred(req, 1);
Jens Axboeb41e9852020-02-17 09:52:41 -07005488 }
5489
5490 return do_complete;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005491}
5492
Jens Axboe76e1b642020-09-26 15:05:03 -06005493/*
5494 * Returns true if we found and killed one or more poll requests
5495 */
Pavel Begunkov6b819282020-11-06 13:00:25 +00005496static bool io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk,
5497 struct files_struct *files)
Jens Axboe221c5eb2019-01-17 09:41:58 -07005498{
Jens Axboe78076bb2019-12-04 19:56:40 -07005499 struct hlist_node *tmp;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005500 struct io_kiocb *req;
Jens Axboe8e2e1fa2020-04-13 17:05:14 -06005501 int posted = 0, i;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005502
5503 spin_lock_irq(&ctx->completion_lock);
Jens Axboe78076bb2019-12-04 19:56:40 -07005504 for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
5505 struct hlist_head *list;
5506
5507 list = &ctx->cancel_hash[i];
Jens Axboef3606e32020-09-22 08:18:24 -06005508 hlist_for_each_entry_safe(req, tmp, list, hash_node) {
Pavel Begunkov6b819282020-11-06 13:00:25 +00005509 if (io_match_task(req, tsk, files))
Jens Axboef3606e32020-09-22 08:18:24 -06005510 posted += io_poll_remove_one(req);
5511 }
Jens Axboe221c5eb2019-01-17 09:41:58 -07005512 }
5513 spin_unlock_irq(&ctx->completion_lock);
Jens Axboeb41e9852020-02-17 09:52:41 -07005514
Jens Axboe8e2e1fa2020-04-13 17:05:14 -06005515 if (posted)
5516 io_cqring_ev_posted(ctx);
Jens Axboe76e1b642020-09-26 15:05:03 -06005517
5518 return posted != 0;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005519}
5520
Jens Axboe47f46762019-11-09 17:43:02 -07005521static int io_poll_cancel(struct io_ring_ctx *ctx, __u64 sqe_addr)
5522{
Jens Axboe78076bb2019-12-04 19:56:40 -07005523 struct hlist_head *list;
Jens Axboe47f46762019-11-09 17:43:02 -07005524 struct io_kiocb *req;
5525
Jens Axboe78076bb2019-12-04 19:56:40 -07005526 list = &ctx->cancel_hash[hash_long(sqe_addr, ctx->cancel_hash_bits)];
5527 hlist_for_each_entry(req, list, hash_node) {
Jens Axboeb41e9852020-02-17 09:52:41 -07005528 if (sqe_addr != req->user_data)
5529 continue;
5530 if (io_poll_remove_one(req))
Jens Axboeeac406c2019-11-14 12:09:58 -07005531 return 0;
Jens Axboeb41e9852020-02-17 09:52:41 -07005532 return -EALREADY;
Jens Axboe47f46762019-11-09 17:43:02 -07005533 }
5534
5535 return -ENOENT;
5536}
5537
Jens Axboe3529d8c2019-12-19 18:24:38 -07005538static int io_poll_remove_prep(struct io_kiocb *req,
5539 const struct io_uring_sqe *sqe)
Jens Axboe221c5eb2019-01-17 09:41:58 -07005540{
Jens Axboe221c5eb2019-01-17 09:41:58 -07005541 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
5542 return -EINVAL;
5543 if (sqe->ioprio || sqe->off || sqe->len || sqe->buf_index ||
5544 sqe->poll_events)
5545 return -EINVAL;
5546
Pavel Begunkov018043b2020-10-27 23:17:18 +00005547 req->poll_remove.addr = READ_ONCE(sqe->addr);
Jens Axboe0969e782019-12-17 18:40:57 -07005548 return 0;
5549}
5550
5551/*
5552 * Find a running poll command that matches one specified in sqe->addr,
5553 * and remove it if found.
5554 */
Pavel Begunkov61e98202021-02-10 00:03:08 +00005555static int io_poll_remove(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe0969e782019-12-17 18:40:57 -07005556{
5557 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe0969e782019-12-17 18:40:57 -07005558 int ret;
5559
Jens Axboe221c5eb2019-01-17 09:41:58 -07005560 spin_lock_irq(&ctx->completion_lock);
Pavel Begunkov018043b2020-10-27 23:17:18 +00005561 ret = io_poll_cancel(ctx, req->poll_remove.addr);
Jens Axboe221c5eb2019-01-17 09:41:58 -07005562 spin_unlock_irq(&ctx->completion_lock);
5563
Jens Axboe4e88d6e2019-12-07 20:59:47 -07005564 if (ret < 0)
5565 req_set_fail_links(req);
Jens Axboee1e16092020-06-22 09:17:17 -06005566 io_req_complete(req, ret);
Jens Axboe221c5eb2019-01-17 09:41:58 -07005567 return 0;
5568}
5569
Jens Axboe221c5eb2019-01-17 09:41:58 -07005570static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
5571 void *key)
5572{
Jens Axboec2f2eb72020-02-10 09:07:05 -07005573 struct io_kiocb *req = wait->private;
5574 struct io_poll_iocb *poll = &req->poll;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005575
Jens Axboed7718a92020-02-14 22:23:12 -07005576 return __io_async_wake(req, poll, key_to_poll(key), io_poll_task_func);
Jens Axboe221c5eb2019-01-17 09:41:58 -07005577}
5578
Jens Axboe221c5eb2019-01-17 09:41:58 -07005579static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head,
5580 struct poll_table_struct *p)
5581{
5582 struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
5583
Jens Axboee8c2bc12020-08-15 18:44:09 -07005584 __io_queue_proc(&pt->req->poll, pt, head, (struct io_poll_iocb **) &pt->req->async_data);
Jens Axboeeac406c2019-11-14 12:09:58 -07005585}
5586
Jens Axboe3529d8c2019-12-19 18:24:38 -07005587static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboe221c5eb2019-01-17 09:41:58 -07005588{
5589 struct io_poll_iocb *poll = &req->poll;
Jiufei Xue5769a352020-06-17 17:53:55 +08005590 u32 events;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005591
5592 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
5593 return -EINVAL;
5594 if (sqe->addr || sqe->ioprio || sqe->off || sqe->len || sqe->buf_index)
5595 return -EINVAL;
5596
Jiufei Xue5769a352020-06-17 17:53:55 +08005597 events = READ_ONCE(sqe->poll32_events);
5598#ifdef __BIG_ENDIAN
5599 events = swahw32(events);
5600#endif
Jiufei Xuea31eb4a2020-06-17 17:53:56 +08005601 poll->events = demangle_poll(events) | EPOLLERR | EPOLLHUP |
5602 (events & EPOLLEXCLUSIVE);
Jens Axboe0969e782019-12-17 18:40:57 -07005603 return 0;
5604}
5605
Pavel Begunkov61e98202021-02-10 00:03:08 +00005606static int io_poll_add(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe0969e782019-12-17 18:40:57 -07005607{
5608 struct io_poll_iocb *poll = &req->poll;
5609 struct io_ring_ctx *ctx = req->ctx;
5610 struct io_poll_table ipt;
Jens Axboe0969e782019-12-17 18:40:57 -07005611 __poll_t mask;
Jens Axboe0969e782019-12-17 18:40:57 -07005612
Jens Axboed7718a92020-02-14 22:23:12 -07005613 ipt.pt._qproc = io_poll_queue_proc;
Jens Axboe36703242019-07-25 10:20:18 -06005614
Jens Axboed7718a92020-02-14 22:23:12 -07005615 mask = __io_arm_poll_handler(req, &req->poll, &ipt, poll->events,
5616 io_poll_wake);
Jens Axboe221c5eb2019-01-17 09:41:58 -07005617
Jens Axboe8c838782019-03-12 15:48:16 -06005618 if (mask) { /* no async, we'd stolen it */
Jens Axboe8c838782019-03-12 15:48:16 -06005619 ipt.error = 0;
Jens Axboeb0dd8a42019-11-18 12:14:54 -07005620 io_poll_complete(req, mask, 0);
Jens Axboe8c838782019-03-12 15:48:16 -06005621 }
Jens Axboe221c5eb2019-01-17 09:41:58 -07005622 spin_unlock_irq(&ctx->completion_lock);
5623
Jens Axboe8c838782019-03-12 15:48:16 -06005624 if (mask) {
5625 io_cqring_ev_posted(ctx);
Pavel Begunkov014db002020-03-03 21:33:12 +03005626 io_put_req(req);
Jens Axboe221c5eb2019-01-17 09:41:58 -07005627 }
Jens Axboe8c838782019-03-12 15:48:16 -06005628 return ipt.error;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005629}
5630
Jens Axboe5262f562019-09-17 12:26:57 -06005631static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
5632{
Jens Axboead8a48a2019-11-15 08:49:11 -07005633 struct io_timeout_data *data = container_of(timer,
5634 struct io_timeout_data, timer);
5635 struct io_kiocb *req = data->req;
5636 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe5262f562019-09-17 12:26:57 -06005637 unsigned long flags;
5638
Jens Axboe5262f562019-09-17 12:26:57 -06005639 spin_lock_irqsave(&ctx->completion_lock, flags);
Pavel Begunkova71976f2020-10-10 18:34:11 +01005640 list_del_init(&req->timeout.list);
Pavel Begunkov01cec8c2020-07-30 18:43:50 +03005641 atomic_set(&req->ctx->cq_timeouts,
5642 atomic_read(&req->ctx->cq_timeouts) + 1);
5643
Jens Axboe78e19bb2019-11-06 15:21:34 -07005644 io_cqring_fill_event(req, -ETIME);
Jens Axboe5262f562019-09-17 12:26:57 -06005645 io_commit_cqring(ctx);
5646 spin_unlock_irqrestore(&ctx->completion_lock, flags);
5647
5648 io_cqring_ev_posted(ctx);
Jens Axboe4e88d6e2019-12-07 20:59:47 -07005649 req_set_fail_links(req);
Jens Axboe5262f562019-09-17 12:26:57 -06005650 io_put_req(req);
5651 return HRTIMER_NORESTART;
5652}
5653
Pavel Begunkovfbd15842020-11-30 19:11:15 +00005654static struct io_kiocb *io_timeout_extract(struct io_ring_ctx *ctx,
5655 __u64 user_data)
Jens Axboe47f46762019-11-09 17:43:02 -07005656{
Pavel Begunkovfbd15842020-11-30 19:11:15 +00005657 struct io_timeout_data *io;
Jens Axboef254ac02020-08-12 17:33:30 -06005658 struct io_kiocb *req;
5659 int ret = -ENOENT;
5660
5661 list_for_each_entry(req, &ctx->timeout_list, timeout.list) {
5662 if (user_data == req->user_data) {
5663 ret = 0;
5664 break;
5665 }
5666 }
5667
5668 if (ret == -ENOENT)
Pavel Begunkovfbd15842020-11-30 19:11:15 +00005669 return ERR_PTR(ret);
Jens Axboef254ac02020-08-12 17:33:30 -06005670
Pavel Begunkovfbd15842020-11-30 19:11:15 +00005671 io = req->async_data;
5672 ret = hrtimer_try_to_cancel(&io->timer);
5673 if (ret == -1)
5674 return ERR_PTR(-EALREADY);
5675 list_del_init(&req->timeout.list);
5676 return req;
5677}
5678
5679static int io_timeout_cancel(struct io_ring_ctx *ctx, __u64 user_data)
5680{
5681 struct io_kiocb *req = io_timeout_extract(ctx, user_data);
5682
5683 if (IS_ERR(req))
5684 return PTR_ERR(req);
5685
5686 req_set_fail_links(req);
5687 io_cqring_fill_event(req, -ECANCELED);
5688 io_put_req_deferred(req, 1);
5689 return 0;
Jens Axboef254ac02020-08-12 17:33:30 -06005690}
5691
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +00005692static int io_timeout_update(struct io_ring_ctx *ctx, __u64 user_data,
5693 struct timespec64 *ts, enum hrtimer_mode mode)
5694{
5695 struct io_kiocb *req = io_timeout_extract(ctx, user_data);
5696 struct io_timeout_data *data;
5697
5698 if (IS_ERR(req))
5699 return PTR_ERR(req);
5700
5701 req->timeout.off = 0; /* noseq */
5702 data = req->async_data;
5703 list_add_tail(&req->timeout.list, &ctx->timeout_list);
5704 hrtimer_init(&data->timer, CLOCK_MONOTONIC, mode);
5705 data->timer.function = io_timeout_fn;
5706 hrtimer_start(&data->timer, timespec64_to_ktime(*ts), mode);
5707 return 0;
Jens Axboe47f46762019-11-09 17:43:02 -07005708}
5709
Jens Axboe3529d8c2019-12-19 18:24:38 -07005710static int io_timeout_remove_prep(struct io_kiocb *req,
5711 const struct io_uring_sqe *sqe)
Jens Axboeb29472e2019-12-17 18:50:29 -07005712{
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +00005713 struct io_timeout_rem *tr = &req->timeout_rem;
5714
Jens Axboeb29472e2019-12-17 18:50:29 -07005715 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
5716 return -EINVAL;
Daniele Albano61710e42020-07-18 14:15:16 -06005717 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
5718 return -EINVAL;
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +00005719 if (sqe->ioprio || sqe->buf_index || sqe->len)
Jens Axboeb29472e2019-12-17 18:50:29 -07005720 return -EINVAL;
5721
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +00005722 tr->addr = READ_ONCE(sqe->addr);
5723 tr->flags = READ_ONCE(sqe->timeout_flags);
5724 if (tr->flags & IORING_TIMEOUT_UPDATE) {
5725 if (tr->flags & ~(IORING_TIMEOUT_UPDATE|IORING_TIMEOUT_ABS))
5726 return -EINVAL;
5727 if (get_timespec64(&tr->ts, u64_to_user_ptr(sqe->addr2)))
5728 return -EFAULT;
5729 } else if (tr->flags) {
5730 /* timeout removal doesn't support flags */
5731 return -EINVAL;
5732 }
5733
Jens Axboeb29472e2019-12-17 18:50:29 -07005734 return 0;
5735}
5736
Pavel Begunkov8662dae2021-01-19 13:32:44 +00005737static inline enum hrtimer_mode io_translate_timeout_mode(unsigned int flags)
5738{
5739 return (flags & IORING_TIMEOUT_ABS) ? HRTIMER_MODE_ABS
5740 : HRTIMER_MODE_REL;
5741}
5742
Jens Axboe11365042019-10-16 09:08:32 -06005743/*
5744 * Remove or update an existing timeout command
5745 */
Pavel Begunkov61e98202021-02-10 00:03:08 +00005746static int io_timeout_remove(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe11365042019-10-16 09:08:32 -06005747{
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +00005748 struct io_timeout_rem *tr = &req->timeout_rem;
Jens Axboe11365042019-10-16 09:08:32 -06005749 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe47f46762019-11-09 17:43:02 -07005750 int ret;
Jens Axboe11365042019-10-16 09:08:32 -06005751
Jens Axboe11365042019-10-16 09:08:32 -06005752 spin_lock_irq(&ctx->completion_lock);
Pavel Begunkov8662dae2021-01-19 13:32:44 +00005753 if (!(req->timeout_rem.flags & IORING_TIMEOUT_UPDATE))
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +00005754 ret = io_timeout_cancel(ctx, tr->addr);
Pavel Begunkov8662dae2021-01-19 13:32:44 +00005755 else
5756 ret = io_timeout_update(ctx, tr->addr, &tr->ts,
5757 io_translate_timeout_mode(tr->flags));
Jens Axboe11365042019-10-16 09:08:32 -06005758
Jens Axboe47f46762019-11-09 17:43:02 -07005759 io_cqring_fill_event(req, ret);
Jens Axboe11365042019-10-16 09:08:32 -06005760 io_commit_cqring(ctx);
5761 spin_unlock_irq(&ctx->completion_lock);
Jens Axboe5262f562019-09-17 12:26:57 -06005762 io_cqring_ev_posted(ctx);
Jens Axboe4e88d6e2019-12-07 20:59:47 -07005763 if (ret < 0)
5764 req_set_fail_links(req);
Jackie Liuec9c02a2019-11-08 23:50:36 +08005765 io_put_req(req);
Jens Axboe11365042019-10-16 09:08:32 -06005766 return 0;
Jens Axboe5262f562019-09-17 12:26:57 -06005767}
5768
Jens Axboe3529d8c2019-12-19 18:24:38 -07005769static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
Jens Axboe2d283902019-12-04 11:08:05 -07005770 bool is_timeout_link)
Jens Axboe5262f562019-09-17 12:26:57 -06005771{
Jens Axboead8a48a2019-11-15 08:49:11 -07005772 struct io_timeout_data *data;
Jens Axboea41525a2019-10-15 16:48:15 -06005773 unsigned flags;
Pavel Begunkov56080b02020-05-26 20:34:04 +03005774 u32 off = READ_ONCE(sqe->off);
Jens Axboe5262f562019-09-17 12:26:57 -06005775
Jens Axboead8a48a2019-11-15 08:49:11 -07005776 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboe5262f562019-09-17 12:26:57 -06005777 return -EINVAL;
Jens Axboead8a48a2019-11-15 08:49:11 -07005778 if (sqe->ioprio || sqe->buf_index || sqe->len != 1)
Jens Axboea41525a2019-10-15 16:48:15 -06005779 return -EINVAL;
Pavel Begunkov56080b02020-05-26 20:34:04 +03005780 if (off && is_timeout_link)
Jens Axboe2d283902019-12-04 11:08:05 -07005781 return -EINVAL;
Jens Axboea41525a2019-10-15 16:48:15 -06005782 flags = READ_ONCE(sqe->timeout_flags);
5783 if (flags & ~IORING_TIMEOUT_ABS)
Jens Axboe5262f562019-09-17 12:26:57 -06005784 return -EINVAL;
Arnd Bergmannbdf20072019-10-01 09:53:29 -06005785
Pavel Begunkovbfe68a22020-05-30 14:54:18 +03005786 req->timeout.off = off;
Jens Axboe26a61672019-12-20 09:02:01 -07005787
Jens Axboee8c2bc12020-08-15 18:44:09 -07005788 if (!req->async_data && io_alloc_async_data(req))
Jens Axboe26a61672019-12-20 09:02:01 -07005789 return -ENOMEM;
5790
Jens Axboee8c2bc12020-08-15 18:44:09 -07005791 data = req->async_data;
Jens Axboead8a48a2019-11-15 08:49:11 -07005792 data->req = req;
Jens Axboead8a48a2019-11-15 08:49:11 -07005793
5794 if (get_timespec64(&data->ts, u64_to_user_ptr(sqe->addr)))
Jens Axboe5262f562019-09-17 12:26:57 -06005795 return -EFAULT;
5796
Pavel Begunkov8662dae2021-01-19 13:32:44 +00005797 data->mode = io_translate_timeout_mode(flags);
Jens Axboead8a48a2019-11-15 08:49:11 -07005798 hrtimer_init(&data->timer, CLOCK_MONOTONIC, data->mode);
5799 return 0;
5800}
5801
Pavel Begunkov61e98202021-02-10 00:03:08 +00005802static int io_timeout(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboead8a48a2019-11-15 08:49:11 -07005803{
Jens Axboead8a48a2019-11-15 08:49:11 -07005804 struct io_ring_ctx *ctx = req->ctx;
Jens Axboee8c2bc12020-08-15 18:44:09 -07005805 struct io_timeout_data *data = req->async_data;
Jens Axboead8a48a2019-11-15 08:49:11 -07005806 struct list_head *entry;
Pavel Begunkovbfe68a22020-05-30 14:54:18 +03005807 u32 tail, off = req->timeout.off;
Jens Axboead8a48a2019-11-15 08:49:11 -07005808
Pavel Begunkov733f5c92020-05-26 20:34:03 +03005809 spin_lock_irq(&ctx->completion_lock);
Jens Axboe93bd25b2019-11-11 23:34:31 -07005810
Jens Axboe5262f562019-09-17 12:26:57 -06005811 /*
5812 * sqe->off holds how many events that need to occur for this
Jens Axboe93bd25b2019-11-11 23:34:31 -07005813 * timeout event to be satisfied. If it isn't set, then this is
5814 * a pure timeout request, sequence isn't used.
Jens Axboe5262f562019-09-17 12:26:57 -06005815 */
Pavel Begunkov8eb7e2d2020-06-29 13:13:02 +03005816 if (io_is_timeout_noseq(req)) {
Jens Axboe93bd25b2019-11-11 23:34:31 -07005817 entry = ctx->timeout_list.prev;
5818 goto add;
5819 }
Jens Axboe5262f562019-09-17 12:26:57 -06005820
Pavel Begunkovbfe68a22020-05-30 14:54:18 +03005821 tail = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
5822 req->timeout.target_seq = tail + off;
Jens Axboe5262f562019-09-17 12:26:57 -06005823
Marcelo Diop-Gonzalezf0105052021-01-15 11:54:40 -05005824 /* Update the last seq here in case io_flush_timeouts() hasn't.
5825 * This is safe because ->completion_lock is held, and submissions
5826 * and completions are never mixed in the same ->completion_lock section.
5827 */
5828 ctx->cq_last_tm_flush = tail;
5829
Jens Axboe5262f562019-09-17 12:26:57 -06005830 /*
5831 * Insertion sort, ensuring the first entry in the list is always
5832 * the one we need first.
5833 */
Jens Axboe5262f562019-09-17 12:26:57 -06005834 list_for_each_prev(entry, &ctx->timeout_list) {
Pavel Begunkov135fcde2020-07-13 23:37:12 +03005835 struct io_kiocb *nxt = list_entry(entry, struct io_kiocb,
5836 timeout.list);
Jens Axboe5262f562019-09-17 12:26:57 -06005837
Pavel Begunkov8eb7e2d2020-06-29 13:13:02 +03005838 if (io_is_timeout_noseq(nxt))
Jens Axboe93bd25b2019-11-11 23:34:31 -07005839 continue;
Pavel Begunkovbfe68a22020-05-30 14:54:18 +03005840 /* nxt.seq is behind @tail, otherwise would've been completed */
5841 if (off >= nxt->timeout.target_seq - tail)
Jens Axboe5262f562019-09-17 12:26:57 -06005842 break;
5843 }
Jens Axboe93bd25b2019-11-11 23:34:31 -07005844add:
Pavel Begunkov135fcde2020-07-13 23:37:12 +03005845 list_add(&req->timeout.list, entry);
Jens Axboead8a48a2019-11-15 08:49:11 -07005846 data->timer.function = io_timeout_fn;
5847 hrtimer_start(&data->timer, timespec64_to_ktime(data->ts), data->mode);
Jens Axboe842f9612019-10-29 12:34:10 -06005848 spin_unlock_irq(&ctx->completion_lock);
Jens Axboe5262f562019-09-17 12:26:57 -06005849 return 0;
5850}
5851
Jens Axboe62755e32019-10-28 21:49:21 -06005852static bool io_cancel_cb(struct io_wq_work *work, void *data)
Jens Axboede0617e2019-04-06 21:51:27 -06005853{
Jens Axboe62755e32019-10-28 21:49:21 -06005854 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
Jens Axboede0617e2019-04-06 21:51:27 -06005855
Jens Axboe62755e32019-10-28 21:49:21 -06005856 return req->user_data == (unsigned long) data;
5857}
5858
Jens Axboee977d6d2019-11-05 12:39:45 -07005859static int io_async_cancel_one(struct io_ring_ctx *ctx, void *sqe_addr)
Jens Axboe62755e32019-10-28 21:49:21 -06005860{
Jens Axboe62755e32019-10-28 21:49:21 -06005861 enum io_wq_cancel cancel_ret;
Jens Axboe62755e32019-10-28 21:49:21 -06005862 int ret = 0;
5863
Pavel Begunkov4f26bda2020-06-15 10:24:03 +03005864 cancel_ret = io_wq_cancel_cb(ctx->io_wq, io_cancel_cb, sqe_addr, false);
Jens Axboe62755e32019-10-28 21:49:21 -06005865 switch (cancel_ret) {
5866 case IO_WQ_CANCEL_OK:
5867 ret = 0;
5868 break;
5869 case IO_WQ_CANCEL_RUNNING:
5870 ret = -EALREADY;
5871 break;
5872 case IO_WQ_CANCEL_NOTFOUND:
5873 ret = -ENOENT;
5874 break;
5875 }
5876
Jens Axboee977d6d2019-11-05 12:39:45 -07005877 return ret;
5878}
5879
Jens Axboe47f46762019-11-09 17:43:02 -07005880static void io_async_find_and_cancel(struct io_ring_ctx *ctx,
5881 struct io_kiocb *req, __u64 sqe_addr,
Pavel Begunkov014db002020-03-03 21:33:12 +03005882 int success_ret)
Jens Axboe47f46762019-11-09 17:43:02 -07005883{
5884 unsigned long flags;
5885 int ret;
5886
5887 ret = io_async_cancel_one(ctx, (void *) (unsigned long) sqe_addr);
5888 if (ret != -ENOENT) {
5889 spin_lock_irqsave(&ctx->completion_lock, flags);
5890 goto done;
5891 }
5892
5893 spin_lock_irqsave(&ctx->completion_lock, flags);
5894 ret = io_timeout_cancel(ctx, sqe_addr);
5895 if (ret != -ENOENT)
5896 goto done;
5897 ret = io_poll_cancel(ctx, sqe_addr);
5898done:
Jens Axboeb0dd8a42019-11-18 12:14:54 -07005899 if (!ret)
5900 ret = success_ret;
Jens Axboe47f46762019-11-09 17:43:02 -07005901 io_cqring_fill_event(req, ret);
5902 io_commit_cqring(ctx);
5903 spin_unlock_irqrestore(&ctx->completion_lock, flags);
5904 io_cqring_ev_posted(ctx);
5905
Jens Axboe4e88d6e2019-12-07 20:59:47 -07005906 if (ret < 0)
5907 req_set_fail_links(req);
Pavel Begunkov014db002020-03-03 21:33:12 +03005908 io_put_req(req);
Jens Axboe47f46762019-11-09 17:43:02 -07005909}
5910
Jens Axboe3529d8c2019-12-19 18:24:38 -07005911static int io_async_cancel_prep(struct io_kiocb *req,
5912 const struct io_uring_sqe *sqe)
Jens Axboee977d6d2019-11-05 12:39:45 -07005913{
Jens Axboefbf23842019-12-17 18:45:56 -07005914 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboee977d6d2019-11-05 12:39:45 -07005915 return -EINVAL;
Daniele Albano61710e42020-07-18 14:15:16 -06005916 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
5917 return -EINVAL;
5918 if (sqe->ioprio || sqe->off || sqe->len || sqe->cancel_flags)
Jens Axboee977d6d2019-11-05 12:39:45 -07005919 return -EINVAL;
5920
Jens Axboefbf23842019-12-17 18:45:56 -07005921 req->cancel.addr = READ_ONCE(sqe->addr);
5922 return 0;
5923}
5924
Pavel Begunkov61e98202021-02-10 00:03:08 +00005925static int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboefbf23842019-12-17 18:45:56 -07005926{
5927 struct io_ring_ctx *ctx = req->ctx;
Jens Axboefbf23842019-12-17 18:45:56 -07005928
Pavel Begunkov014db002020-03-03 21:33:12 +03005929 io_async_find_and_cancel(ctx, req, req->cancel.addr, 0);
Jens Axboe62755e32019-10-28 21:49:21 -06005930 return 0;
5931}
5932
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00005933static int io_rsrc_update_prep(struct io_kiocb *req,
Jens Axboe05f3fb32019-12-09 11:22:50 -07005934 const struct io_uring_sqe *sqe)
5935{
Jens Axboe6ca56f82020-09-18 16:51:19 -06005936 if (unlikely(req->ctx->flags & IORING_SETUP_SQPOLL))
5937 return -EINVAL;
Daniele Albano61710e42020-07-18 14:15:16 -06005938 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
5939 return -EINVAL;
5940 if (sqe->ioprio || sqe->rw_flags)
Jens Axboe05f3fb32019-12-09 11:22:50 -07005941 return -EINVAL;
5942
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00005943 req->rsrc_update.offset = READ_ONCE(sqe->off);
5944 req->rsrc_update.nr_args = READ_ONCE(sqe->len);
5945 if (!req->rsrc_update.nr_args)
Jens Axboe05f3fb32019-12-09 11:22:50 -07005946 return -EINVAL;
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00005947 req->rsrc_update.arg = READ_ONCE(sqe->addr);
Jens Axboe05f3fb32019-12-09 11:22:50 -07005948 return 0;
5949}
5950
Pavel Begunkov889fca72021-02-10 00:03:09 +00005951static int io_files_update(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe05f3fb32019-12-09 11:22:50 -07005952{
5953 struct io_ring_ctx *ctx = req->ctx;
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00005954 struct io_uring_rsrc_update up;
Jens Axboe05f3fb32019-12-09 11:22:50 -07005955 int ret;
5956
Pavel Begunkov45d189c2021-02-10 00:03:07 +00005957 if (issue_flags & IO_URING_F_NONBLOCK)
Jens Axboe05f3fb32019-12-09 11:22:50 -07005958 return -EAGAIN;
Jens Axboe05f3fb32019-12-09 11:22:50 -07005959
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00005960 up.offset = req->rsrc_update.offset;
5961 up.data = req->rsrc_update.arg;
Jens Axboe05f3fb32019-12-09 11:22:50 -07005962
5963 mutex_lock(&ctx->uring_lock);
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00005964 ret = __io_sqe_files_update(ctx, &up, req->rsrc_update.nr_args);
Jens Axboe05f3fb32019-12-09 11:22:50 -07005965 mutex_unlock(&ctx->uring_lock);
5966
5967 if (ret < 0)
5968 req_set_fail_links(req);
Pavel Begunkov889fca72021-02-10 00:03:09 +00005969 __io_req_complete(req, issue_flags, ret, 0);
Jens Axboe05f3fb32019-12-09 11:22:50 -07005970 return 0;
5971}
5972
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005973static int io_req_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboef67676d2019-12-02 11:03:47 -07005974{
Jens Axboed625c6e2019-12-17 19:53:05 -07005975 switch (req->opcode) {
Jens Axboee7815732019-12-17 19:45:06 -07005976 case IORING_OP_NOP:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005977 return 0;
Jens Axboef67676d2019-12-02 11:03:47 -07005978 case IORING_OP_READV:
5979 case IORING_OP_READ_FIXED:
Jens Axboe3a6820f2019-12-22 15:19:35 -07005980 case IORING_OP_READ:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005981 return io_read_prep(req, sqe);
Jens Axboef67676d2019-12-02 11:03:47 -07005982 case IORING_OP_WRITEV:
5983 case IORING_OP_WRITE_FIXED:
Jens Axboe3a6820f2019-12-22 15:19:35 -07005984 case IORING_OP_WRITE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005985 return io_write_prep(req, sqe);
Jens Axboe0969e782019-12-17 18:40:57 -07005986 case IORING_OP_POLL_ADD:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005987 return io_poll_add_prep(req, sqe);
Jens Axboe0969e782019-12-17 18:40:57 -07005988 case IORING_OP_POLL_REMOVE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005989 return io_poll_remove_prep(req, sqe);
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07005990 case IORING_OP_FSYNC:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005991 return io_prep_fsync(req, sqe);
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07005992 case IORING_OP_SYNC_FILE_RANGE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005993 return io_prep_sfr(req, sqe);
Jens Axboe03b12302019-12-02 18:50:25 -07005994 case IORING_OP_SENDMSG:
Jens Axboefddafac2020-01-04 20:19:44 -07005995 case IORING_OP_SEND:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005996 return io_sendmsg_prep(req, sqe);
Jens Axboe03b12302019-12-02 18:50:25 -07005997 case IORING_OP_RECVMSG:
Jens Axboefddafac2020-01-04 20:19:44 -07005998 case IORING_OP_RECV:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005999 return io_recvmsg_prep(req, sqe);
Jens Axboef499a022019-12-02 16:28:46 -07006000 case IORING_OP_CONNECT:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006001 return io_connect_prep(req, sqe);
Jens Axboe2d283902019-12-04 11:08:05 -07006002 case IORING_OP_TIMEOUT:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006003 return io_timeout_prep(req, sqe, false);
Jens Axboeb29472e2019-12-17 18:50:29 -07006004 case IORING_OP_TIMEOUT_REMOVE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006005 return io_timeout_remove_prep(req, sqe);
Jens Axboefbf23842019-12-17 18:45:56 -07006006 case IORING_OP_ASYNC_CANCEL:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006007 return io_async_cancel_prep(req, sqe);
Jens Axboe2d283902019-12-04 11:08:05 -07006008 case IORING_OP_LINK_TIMEOUT:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006009 return io_timeout_prep(req, sqe, true);
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07006010 case IORING_OP_ACCEPT:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006011 return io_accept_prep(req, sqe);
Jens Axboed63d1b52019-12-10 10:38:56 -07006012 case IORING_OP_FALLOCATE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006013 return io_fallocate_prep(req, sqe);
Jens Axboe15b71ab2019-12-11 11:20:36 -07006014 case IORING_OP_OPENAT:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006015 return io_openat_prep(req, sqe);
Jens Axboeb5dba592019-12-11 14:02:38 -07006016 case IORING_OP_CLOSE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006017 return io_close_prep(req, sqe);
Jens Axboe05f3fb32019-12-09 11:22:50 -07006018 case IORING_OP_FILES_UPDATE:
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00006019 return io_rsrc_update_prep(req, sqe);
Jens Axboeeddc7ef2019-12-13 21:18:10 -07006020 case IORING_OP_STATX:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006021 return io_statx_prep(req, sqe);
Jens Axboe4840e412019-12-25 22:03:45 -07006022 case IORING_OP_FADVISE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006023 return io_fadvise_prep(req, sqe);
Jens Axboec1ca7572019-12-25 22:18:28 -07006024 case IORING_OP_MADVISE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006025 return io_madvise_prep(req, sqe);
Jens Axboecebdb982020-01-08 17:59:24 -07006026 case IORING_OP_OPENAT2:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006027 return io_openat2_prep(req, sqe);
Jens Axboe3e4827b2020-01-08 15:18:09 -07006028 case IORING_OP_EPOLL_CTL:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006029 return io_epoll_ctl_prep(req, sqe);
Pavel Begunkov7d67af22020-02-24 11:32:45 +03006030 case IORING_OP_SPLICE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006031 return io_splice_prep(req, sqe);
Jens Axboeddf0322d2020-02-23 16:41:33 -07006032 case IORING_OP_PROVIDE_BUFFERS:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006033 return io_provide_buffers_prep(req, sqe);
Jens Axboe067524e2020-03-02 16:32:28 -07006034 case IORING_OP_REMOVE_BUFFERS:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006035 return io_remove_buffers_prep(req, sqe);
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03006036 case IORING_OP_TEE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006037 return io_tee_prep(req, sqe);
Jens Axboe36f4fa62020-09-05 11:14:22 -06006038 case IORING_OP_SHUTDOWN:
6039 return io_shutdown_prep(req, sqe);
Jens Axboe80a261f2020-09-28 14:23:58 -06006040 case IORING_OP_RENAMEAT:
6041 return io_renameat_prep(req, sqe);
Jens Axboe14a11432020-09-28 14:27:37 -06006042 case IORING_OP_UNLINKAT:
6043 return io_unlinkat_prep(req, sqe);
Jens Axboef67676d2019-12-02 11:03:47 -07006044 }
6045
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006046 printk_once(KERN_WARNING "io_uring: unhandled opcode %d\n",
6047 req->opcode);
6048 return-EINVAL;
6049}
6050
Jens Axboedef596e2019-01-09 08:59:42 -07006051static int io_req_defer_prep(struct io_kiocb *req,
6052 const struct io_uring_sqe *sqe)
Jens Axboedef596e2019-01-09 08:59:42 -07006053{
Jens Axboedef596e2019-01-09 08:59:42 -07006054 if (!sqe)
Jens Axboe2b188cc2019-01-07 10:46:33 -07006055 return 0;
Jens Axboee8c2bc12020-08-15 18:44:09 -07006056 if (io_alloc_async_data(req))
Jens Axboeb76da702019-11-20 13:05:32 -07006057 return -EAGAIN;
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006058 return io_req_prep(req, sqe);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006059}
6060
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03006061static u32 io_get_sequence(struct io_kiocb *req)
6062{
6063 struct io_kiocb *pos;
6064 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkovf2f87372020-10-27 23:25:37 +00006065 u32 total_submitted, nr_reqs = 0;
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03006066
Pavel Begunkovf2f87372020-10-27 23:25:37 +00006067 io_for_each_link(pos, req)
6068 nr_reqs++;
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03006069
6070 total_submitted = ctx->cached_sq_head - ctx->cached_sq_dropped;
6071 return total_submitted - nr_reqs;
6072}
6073
Jens Axboe3529d8c2019-12-19 18:24:38 -07006074static int io_req_defer(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboe2b188cc2019-01-07 10:46:33 -07006075{
6076 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkov27dc8332020-07-13 23:37:14 +03006077 struct io_defer_entry *de;
Jens Axboe2b188cc2019-01-07 10:46:33 -07006078 int ret;
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03006079 u32 seq;
Jens Axboe2b188cc2019-01-07 10:46:33 -07006080
6081 /* Still need defer if there is pending req in defer list. */
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03006082 if (likely(list_empty_careful(&ctx->defer_list) &&
6083 !(req->flags & REQ_F_IO_DRAIN)))
6084 return 0;
6085
6086 seq = io_get_sequence(req);
6087 /* Still a chance to pass the sequence check */
6088 if (!req_need_defer(req, seq) && list_empty_careful(&ctx->defer_list))
Jens Axboe2b188cc2019-01-07 10:46:33 -07006089 return 0;
6090
Jens Axboee8c2bc12020-08-15 18:44:09 -07006091 if (!req->async_data) {
Pavel Begunkov650b5482020-05-17 14:02:11 +03006092 ret = io_req_defer_prep(req, sqe);
Pavel Begunkov327d6d92020-07-15 12:46:51 +03006093 if (ret)
Pavel Begunkov650b5482020-05-17 14:02:11 +03006094 return ret;
6095 }
Pavel Begunkovcbdcb432020-06-29 19:18:43 +03006096 io_prep_async_link(req);
Pavel Begunkov27dc8332020-07-13 23:37:14 +03006097 de = kmalloc(sizeof(*de), GFP_KERNEL);
6098 if (!de)
6099 return -ENOMEM;
Jens Axboe31b51512019-01-18 22:56:34 -07006100
6101 spin_lock_irq(&ctx->completion_lock);
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03006102 if (!req_need_defer(req, seq) && list_empty(&ctx->defer_list)) {
Jens Axboe31b51512019-01-18 22:56:34 -07006103 spin_unlock_irq(&ctx->completion_lock);
Pavel Begunkov27dc8332020-07-13 23:37:14 +03006104 kfree(de);
Pavel Begunkovae348172020-07-23 20:25:20 +03006105 io_queue_async_work(req);
6106 return -EIOCBQUEUED;
Jens Axboe31b51512019-01-18 22:56:34 -07006107 }
6108
6109 trace_io_uring_defer(ctx, req, req->user_data);
Pavel Begunkov27dc8332020-07-13 23:37:14 +03006110 de->req = req;
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03006111 de->seq = seq;
Pavel Begunkov27dc8332020-07-13 23:37:14 +03006112 list_add_tail(&de->list, &ctx->defer_list);
Jens Axboe31b51512019-01-18 22:56:34 -07006113 spin_unlock_irq(&ctx->completion_lock);
6114 return -EIOCBQUEUED;
6115}
Jens Axboeedafcce2019-01-09 09:16:05 -07006116
Pavel Begunkov3ca405e2020-07-13 23:37:08 +03006117static void __io_clean_op(struct io_kiocb *req)
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03006118{
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03006119 if (req->flags & REQ_F_BUFFER_SELECTED) {
6120 switch (req->opcode) {
6121 case IORING_OP_READV:
6122 case IORING_OP_READ_FIXED:
6123 case IORING_OP_READ:
Jens Axboebcda7ba2020-02-23 16:42:51 -07006124 kfree((void *)(unsigned long)req->rw.addr);
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03006125 break;
6126 case IORING_OP_RECVMSG:
6127 case IORING_OP_RECV:
Jens Axboe52de1fe2020-02-27 10:15:42 -07006128 kfree(req->sr_msg.kbuf);
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03006129 break;
6130 }
6131 req->flags &= ~REQ_F_BUFFER_SELECTED;
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03006132 }
6133
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03006134 if (req->flags & REQ_F_NEED_CLEANUP) {
6135 switch (req->opcode) {
6136 case IORING_OP_READV:
6137 case IORING_OP_READ_FIXED:
6138 case IORING_OP_READ:
6139 case IORING_OP_WRITEV:
6140 case IORING_OP_WRITE_FIXED:
Jens Axboee8c2bc12020-08-15 18:44:09 -07006141 case IORING_OP_WRITE: {
6142 struct io_async_rw *io = req->async_data;
6143 if (io->free_iovec)
6144 kfree(io->free_iovec);
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03006145 break;
Jens Axboee8c2bc12020-08-15 18:44:09 -07006146 }
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03006147 case IORING_OP_RECVMSG:
Jens Axboee8c2bc12020-08-15 18:44:09 -07006148 case IORING_OP_SENDMSG: {
6149 struct io_async_msghdr *io = req->async_data;
Pavel Begunkov257e84a2021-02-05 00:58:00 +00006150
6151 kfree(io->free_iov);
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03006152 break;
Jens Axboee8c2bc12020-08-15 18:44:09 -07006153 }
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03006154 case IORING_OP_SPLICE:
6155 case IORING_OP_TEE:
6156 io_put_file(req, req->splice.file_in,
6157 (req->splice.flags & SPLICE_F_FD_IN_FIXED));
6158 break;
Jens Axboef3cd48502020-09-24 14:55:54 -06006159 case IORING_OP_OPENAT:
6160 case IORING_OP_OPENAT2:
6161 if (req->open.filename)
6162 putname(req->open.filename);
6163 break;
Jens Axboe80a261f2020-09-28 14:23:58 -06006164 case IORING_OP_RENAMEAT:
6165 putname(req->rename.oldpath);
6166 putname(req->rename.newpath);
6167 break;
Jens Axboe14a11432020-09-28 14:27:37 -06006168 case IORING_OP_UNLINKAT:
6169 putname(req->unlink.filename);
6170 break;
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03006171 }
6172 req->flags &= ~REQ_F_NEED_CLEANUP;
6173 }
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03006174}
6175
Pavel Begunkov889fca72021-02-10 00:03:09 +00006176static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboeedafcce2019-01-09 09:16:05 -07006177{
Jens Axboeedafcce2019-01-09 09:16:05 -07006178 struct io_ring_ctx *ctx = req->ctx;
Jens Axboed625c6e2019-12-17 19:53:05 -07006179 int ret;
Jens Axboeedafcce2019-01-09 09:16:05 -07006180
Jens Axboed625c6e2019-12-17 19:53:05 -07006181 switch (req->opcode) {
Jens Axboe2b188cc2019-01-07 10:46:33 -07006182 case IORING_OP_NOP:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006183 ret = io_nop(req, issue_flags);
Jens Axboe31b51512019-01-18 22:56:34 -07006184 break;
6185 case IORING_OP_READV:
Jens Axboe3529d8c2019-12-19 18:24:38 -07006186 case IORING_OP_READ_FIXED:
Jens Axboe3a6820f2019-12-22 15:19:35 -07006187 case IORING_OP_READ:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006188 ret = io_read(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006189 break;
6190 case IORING_OP_WRITEV:
Jens Axboe2b188cc2019-01-07 10:46:33 -07006191 case IORING_OP_WRITE_FIXED:
Jens Axboe3a6820f2019-12-22 15:19:35 -07006192 case IORING_OP_WRITE:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006193 ret = io_write(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006194 break;
6195 case IORING_OP_FSYNC:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006196 ret = io_fsync(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006197 break;
6198 case IORING_OP_POLL_ADD:
Pavel Begunkov61e98202021-02-10 00:03:08 +00006199 ret = io_poll_add(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006200 break;
6201 case IORING_OP_POLL_REMOVE:
Pavel Begunkov61e98202021-02-10 00:03:08 +00006202 ret = io_poll_remove(req, issue_flags);
Jens Axboeb76da702019-11-20 13:05:32 -07006203 break;
6204 case IORING_OP_SYNC_FILE_RANGE:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006205 ret = io_sync_file_range(req, issue_flags);
Jens Axboeb76da702019-11-20 13:05:32 -07006206 break;
6207 case IORING_OP_SENDMSG:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006208 ret = io_sendmsg(req, issue_flags);
Pavel Begunkov062d04d2020-10-10 18:34:12 +01006209 break;
Jens Axboefddafac2020-01-04 20:19:44 -07006210 case IORING_OP_SEND:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006211 ret = io_send(req, issue_flags);
Jens Axboeb76da702019-11-20 13:05:32 -07006212 break;
6213 case IORING_OP_RECVMSG:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006214 ret = io_recvmsg(req, issue_flags);
Pavel Begunkov062d04d2020-10-10 18:34:12 +01006215 break;
Jens Axboefddafac2020-01-04 20:19:44 -07006216 case IORING_OP_RECV:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006217 ret = io_recv(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006218 break;
6219 case IORING_OP_TIMEOUT:
Pavel Begunkov61e98202021-02-10 00:03:08 +00006220 ret = io_timeout(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006221 break;
6222 case IORING_OP_TIMEOUT_REMOVE:
Pavel Begunkov61e98202021-02-10 00:03:08 +00006223 ret = io_timeout_remove(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006224 break;
6225 case IORING_OP_ACCEPT:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006226 ret = io_accept(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006227 break;
6228 case IORING_OP_CONNECT:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006229 ret = io_connect(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006230 break;
6231 case IORING_OP_ASYNC_CANCEL:
Pavel Begunkov61e98202021-02-10 00:03:08 +00006232 ret = io_async_cancel(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006233 break;
Jens Axboed63d1b52019-12-10 10:38:56 -07006234 case IORING_OP_FALLOCATE:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006235 ret = io_fallocate(req, issue_flags);
Jens Axboed63d1b52019-12-10 10:38:56 -07006236 break;
Jens Axboe15b71ab2019-12-11 11:20:36 -07006237 case IORING_OP_OPENAT:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006238 ret = io_openat(req, issue_flags);
Jens Axboe15b71ab2019-12-11 11:20:36 -07006239 break;
Jens Axboeb5dba592019-12-11 14:02:38 -07006240 case IORING_OP_CLOSE:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006241 ret = io_close(req, issue_flags);
Jens Axboeb5dba592019-12-11 14:02:38 -07006242 break;
Jens Axboe05f3fb32019-12-09 11:22:50 -07006243 case IORING_OP_FILES_UPDATE:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006244 ret = io_files_update(req, issue_flags);
Jens Axboe05f3fb32019-12-09 11:22:50 -07006245 break;
Jens Axboeeddc7ef2019-12-13 21:18:10 -07006246 case IORING_OP_STATX:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006247 ret = io_statx(req, issue_flags);
Jens Axboeeddc7ef2019-12-13 21:18:10 -07006248 break;
Jens Axboe4840e412019-12-25 22:03:45 -07006249 case IORING_OP_FADVISE:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006250 ret = io_fadvise(req, issue_flags);
Jens Axboe4840e412019-12-25 22:03:45 -07006251 break;
Jens Axboec1ca7572019-12-25 22:18:28 -07006252 case IORING_OP_MADVISE:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006253 ret = io_madvise(req, issue_flags);
Jens Axboec1ca7572019-12-25 22:18:28 -07006254 break;
Jens Axboecebdb982020-01-08 17:59:24 -07006255 case IORING_OP_OPENAT2:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006256 ret = io_openat2(req, issue_flags);
Jens Axboecebdb982020-01-08 17:59:24 -07006257 break;
Jens Axboe3e4827b2020-01-08 15:18:09 -07006258 case IORING_OP_EPOLL_CTL:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006259 ret = io_epoll_ctl(req, issue_flags);
Jens Axboe3e4827b2020-01-08 15:18:09 -07006260 break;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03006261 case IORING_OP_SPLICE:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006262 ret = io_splice(req, issue_flags);
Pavel Begunkov7d67af22020-02-24 11:32:45 +03006263 break;
Jens Axboeddf0322d2020-02-23 16:41:33 -07006264 case IORING_OP_PROVIDE_BUFFERS:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006265 ret = io_provide_buffers(req, issue_flags);
Jens Axboeddf0322d2020-02-23 16:41:33 -07006266 break;
Jens Axboe067524e2020-03-02 16:32:28 -07006267 case IORING_OP_REMOVE_BUFFERS:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006268 ret = io_remove_buffers(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006269 break;
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03006270 case IORING_OP_TEE:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006271 ret = io_tee(req, issue_flags);
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03006272 break;
Jens Axboe36f4fa62020-09-05 11:14:22 -06006273 case IORING_OP_SHUTDOWN:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006274 ret = io_shutdown(req, issue_flags);
Jens Axboe36f4fa62020-09-05 11:14:22 -06006275 break;
Jens Axboe80a261f2020-09-28 14:23:58 -06006276 case IORING_OP_RENAMEAT:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006277 ret = io_renameat(req, issue_flags);
Jens Axboe80a261f2020-09-28 14:23:58 -06006278 break;
Jens Axboe14a11432020-09-28 14:27:37 -06006279 case IORING_OP_UNLINKAT:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006280 ret = io_unlinkat(req, issue_flags);
Jens Axboe14a11432020-09-28 14:27:37 -06006281 break;
Jens Axboe2b188cc2019-01-07 10:46:33 -07006282 default:
6283 ret = -EINVAL;
6284 break;
Jens Axboe31b51512019-01-18 22:56:34 -07006285 }
6286
6287 if (ret)
Jens Axboeedafcce2019-01-09 09:16:05 -07006288 return ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07006289
Jens Axboeb5325762020-05-19 21:20:27 -06006290 /* If the op doesn't have a file, we're not polling for it */
6291 if ((ctx->flags & IORING_SETUP_IOPOLL) && req->file) {
Jens Axboe11ba8202020-01-15 21:51:17 -07006292 const bool in_async = io_wq_current_is_worker();
6293
Jens Axboe11ba8202020-01-15 21:51:17 -07006294 /* workqueue context doesn't hold uring_lock, grab it now */
6295 if (in_async)
6296 mutex_lock(&ctx->uring_lock);
6297
Xiaoguang Wang2e9dbe92020-11-13 00:44:08 +08006298 io_iopoll_req_issued(req, in_async);
Jens Axboe11ba8202020-01-15 21:51:17 -07006299
6300 if (in_async)
6301 mutex_unlock(&ctx->uring_lock);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006302 }
6303
6304 return 0;
6305}
6306
Pavel Begunkov5280f7e2021-02-04 13:52:08 +00006307static void io_wq_submit_work(struct io_wq_work *work)
Pavel Begunkovd4c81f32020-06-08 21:08:19 +03006308{
Jens Axboe2b188cc2019-01-07 10:46:33 -07006309 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
Pavel Begunkov6df1db62020-07-03 22:15:06 +03006310 struct io_kiocb *timeout;
Jens Axboe561fb042019-10-24 07:25:42 -06006311 int ret = 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07006312
Pavel Begunkov6df1db62020-07-03 22:15:06 +03006313 timeout = io_prep_linked_timeout(req);
6314 if (timeout)
6315 io_queue_linked_timeout(timeout);
Pavel Begunkovd4c81f32020-06-08 21:08:19 +03006316
Jens Axboe4014d942021-01-19 15:53:54 -07006317 if (work->flags & IO_WQ_WORK_CANCEL)
Jens Axboe561fb042019-10-24 07:25:42 -06006318 ret = -ECANCELED;
Jens Axboe31b51512019-01-18 22:56:34 -07006319
Jens Axboe561fb042019-10-24 07:25:42 -06006320 if (!ret) {
Jens Axboe561fb042019-10-24 07:25:42 -06006321 do {
Pavel Begunkov889fca72021-02-10 00:03:09 +00006322 ret = io_issue_sqe(req, 0);
Jens Axboe561fb042019-10-24 07:25:42 -06006323 /*
6324 * We can get EAGAIN for polled IO even though we're
6325 * forcing a sync submission from here, since we can't
6326 * wait for request slots on the block side.
6327 */
6328 if (ret != -EAGAIN)
6329 break;
6330 cond_resched();
6331 } while (1);
6332 }
Jens Axboe31b51512019-01-18 22:56:34 -07006333
Jens Axboe561fb042019-10-24 07:25:42 -06006334 if (ret) {
Xiaoguang Wangc07e6712020-12-14 23:49:41 +08006335 struct io_ring_ctx *lock_ctx = NULL;
Xiaoguang Wangdad1b122020-12-06 22:22:42 +00006336
Xiaoguang Wangc07e6712020-12-14 23:49:41 +08006337 if (req->ctx->flags & IORING_SETUP_IOPOLL)
6338 lock_ctx = req->ctx;
6339
6340 /*
6341 * io_iopoll_complete() does not hold completion_lock to
6342 * complete polled io, so here for polled io, we can not call
6343 * io_req_complete() directly, otherwise there maybe concurrent
6344 * access to cqring, defer_list, etc, which is not safe. Given
6345 * that io_iopoll_complete() is always called under uring_lock,
6346 * so here for polled io, we also get uring_lock to complete
6347 * it.
6348 */
6349 if (lock_ctx)
6350 mutex_lock(&lock_ctx->uring_lock);
6351
6352 req_set_fail_links(req);
6353 io_req_complete(req, ret);
6354
6355 if (lock_ctx)
6356 mutex_unlock(&lock_ctx->uring_lock);
Jens Axboeedafcce2019-01-09 09:16:05 -07006357 }
Jens Axboe31b51512019-01-18 22:56:34 -07006358}
Jens Axboe2b188cc2019-01-07 10:46:33 -07006359
Jens Axboe65e19f52019-10-26 07:20:21 -06006360static inline struct file *io_file_from_index(struct io_ring_ctx *ctx,
6361 int index)
Jens Axboe09bb8392019-03-13 12:39:28 -06006362{
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00006363 struct fixed_rsrc_table *table;
Jens Axboe65e19f52019-10-26 07:20:21 -06006364
Jens Axboe05f3fb32019-12-09 11:22:50 -07006365 table = &ctx->file_data->table[index >> IORING_FILE_TABLE_SHIFT];
Xiaoming Ni84695082020-05-11 19:25:43 +08006366 return table->files[index & IORING_FILE_TABLE_MASK];
Jens Axboe65e19f52019-10-26 07:20:21 -06006367}
6368
Pavel Begunkov8371adf2020-10-10 18:34:08 +01006369static struct file *io_file_get(struct io_submit_state *state,
6370 struct io_kiocb *req, int fd, bool fixed)
Pavel Begunkov8da11c12020-02-24 11:32:44 +03006371{
6372 struct io_ring_ctx *ctx = req->ctx;
6373 struct file *file;
6374
6375 if (fixed) {
Pavel Begunkov479f5172020-10-10 18:34:07 +01006376 if (unlikely((unsigned int)fd >= ctx->nr_user_files))
Pavel Begunkov8371adf2020-10-10 18:34:08 +01006377 return NULL;
Pavel Begunkov8da11c12020-02-24 11:32:44 +03006378 fd = array_index_nospec(fd, ctx->nr_user_files);
6379 file = io_file_from_index(ctx, fd);
Pavel Begunkov36f72fe2020-11-18 19:57:26 +00006380 io_set_resource_node(req);
Pavel Begunkov8da11c12020-02-24 11:32:44 +03006381 } else {
6382 trace_io_uring_file_get(ctx, fd);
6383 file = __io_file_get(state, fd);
Pavel Begunkov8da11c12020-02-24 11:32:44 +03006384 }
6385
Pavel Begunkovce3d5aa2021-02-01 18:59:55 +00006386 if (file && unlikely(file->f_op == &io_uring_fops))
6387 io_req_track_inflight(req);
Pavel Begunkov8371adf2020-10-10 18:34:08 +01006388 return file;
Pavel Begunkov8da11c12020-02-24 11:32:44 +03006389}
6390
Jens Axboe2665abf2019-11-05 12:40:47 -07006391static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
6392{
Jens Axboead8a48a2019-11-15 08:49:11 -07006393 struct io_timeout_data *data = container_of(timer,
6394 struct io_timeout_data, timer);
Pavel Begunkov90cd7e42020-10-27 23:25:36 +00006395 struct io_kiocb *prev, *req = data->req;
Jens Axboe2665abf2019-11-05 12:40:47 -07006396 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe2665abf2019-11-05 12:40:47 -07006397 unsigned long flags;
Jens Axboe2665abf2019-11-05 12:40:47 -07006398
6399 spin_lock_irqsave(&ctx->completion_lock, flags);
Pavel Begunkov90cd7e42020-10-27 23:25:36 +00006400 prev = req->timeout.head;
6401 req->timeout.head = NULL;
Jens Axboe2665abf2019-11-05 12:40:47 -07006402
6403 /*
6404 * We don't expect the list to be empty, that will only happen if we
6405 * race with the completion of the linked work.
6406 */
Pavel Begunkov90cd7e42020-10-27 23:25:36 +00006407 if (prev && refcount_inc_not_zero(&prev->refs))
Pavel Begunkovf2f87372020-10-27 23:25:37 +00006408 io_remove_next_linked(prev);
Pavel Begunkov90cd7e42020-10-27 23:25:36 +00006409 else
6410 prev = NULL;
Jens Axboe2665abf2019-11-05 12:40:47 -07006411 spin_unlock_irqrestore(&ctx->completion_lock, flags);
6412
6413 if (prev) {
Jens Axboe4e88d6e2019-12-07 20:59:47 -07006414 req_set_fail_links(prev);
Pavel Begunkov014db002020-03-03 21:33:12 +03006415 io_async_find_and_cancel(ctx, req, prev->user_data, -ETIME);
Pavel Begunkov9ae1f8d2021-02-01 18:59:51 +00006416 io_put_req_deferred(prev, 1);
Jens Axboe47f46762019-11-09 17:43:02 -07006417 } else {
Pavel Begunkov9ae1f8d2021-02-01 18:59:51 +00006418 io_req_complete_post(req, -ETIME, 0);
6419 io_put_req_deferred(req, 1);
Jens Axboe2665abf2019-11-05 12:40:47 -07006420 }
Jens Axboe2665abf2019-11-05 12:40:47 -07006421 return HRTIMER_NORESTART;
6422}
6423
Jens Axboe7271ef32020-08-10 09:55:22 -06006424static void __io_queue_linked_timeout(struct io_kiocb *req)
Jens Axboe2665abf2019-11-05 12:40:47 -07006425{
Jens Axboe76a46e02019-11-10 23:34:16 -07006426 /*
Pavel Begunkovf2f87372020-10-27 23:25:37 +00006427 * If the back reference is NULL, then our linked request finished
6428 * before we got a chance to setup the timer
Jens Axboe76a46e02019-11-10 23:34:16 -07006429 */
Pavel Begunkov90cd7e42020-10-27 23:25:36 +00006430 if (req->timeout.head) {
Jens Axboee8c2bc12020-08-15 18:44:09 -07006431 struct io_timeout_data *data = req->async_data;
Jens Axboe94ae5e72019-11-14 19:39:52 -07006432
Jens Axboead8a48a2019-11-15 08:49:11 -07006433 data->timer.function = io_link_timeout_fn;
6434 hrtimer_start(&data->timer, timespec64_to_ktime(data->ts),
6435 data->mode);
Jens Axboe2665abf2019-11-05 12:40:47 -07006436 }
Jens Axboe7271ef32020-08-10 09:55:22 -06006437}
6438
6439static void io_queue_linked_timeout(struct io_kiocb *req)
6440{
6441 struct io_ring_ctx *ctx = req->ctx;
6442
6443 spin_lock_irq(&ctx->completion_lock);
6444 __io_queue_linked_timeout(req);
Jens Axboe76a46e02019-11-10 23:34:16 -07006445 spin_unlock_irq(&ctx->completion_lock);
Jens Axboe2665abf2019-11-05 12:40:47 -07006446
Jens Axboe2665abf2019-11-05 12:40:47 -07006447 /* drop submission reference */
Jens Axboe76a46e02019-11-10 23:34:16 -07006448 io_put_req(req);
Jens Axboe2665abf2019-11-05 12:40:47 -07006449}
6450
Jens Axboead8a48a2019-11-15 08:49:11 -07006451static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req)
Jens Axboe2665abf2019-11-05 12:40:47 -07006452{
Pavel Begunkovf2f87372020-10-27 23:25:37 +00006453 struct io_kiocb *nxt = req->link;
Jens Axboe2b188cc2019-01-07 10:46:33 -07006454
Pavel Begunkovf2f87372020-10-27 23:25:37 +00006455 if (!nxt || (req->flags & REQ_F_LINK_TIMEOUT) ||
6456 nxt->opcode != IORING_OP_LINK_TIMEOUT)
Jens Axboed7718a92020-02-14 22:23:12 -07006457 return NULL;
Jens Axboe2665abf2019-11-05 12:40:47 -07006458
Pavel Begunkov90cd7e42020-10-27 23:25:36 +00006459 nxt->timeout.head = req;
Pavel Begunkov900fad42020-10-19 16:39:16 +01006460 nxt->flags |= REQ_F_LTIMEOUT_ACTIVE;
Jens Axboe76a46e02019-11-10 23:34:16 -07006461 req->flags |= REQ_F_LINK_TIMEOUT;
Jens Axboe76a46e02019-11-10 23:34:16 -07006462 return nxt;
Jens Axboe2665abf2019-11-05 12:40:47 -07006463}
6464
Pavel Begunkovc1379e22020-09-30 22:57:56 +03006465static void __io_queue_sqe(struct io_kiocb *req, struct io_comp_state *cs)
Jens Axboe2b188cc2019-01-07 10:46:33 -07006466{
Jens Axboe4a0a7a12019-12-09 20:01:01 -07006467 struct io_kiocb *linked_timeout;
Jens Axboe193155c2020-02-22 23:22:19 -07006468 const struct cred *old_creds = NULL;
Pavel Begunkov889fca72021-02-10 00:03:09 +00006469 int ret, issue_flags = IO_URING_F_NONBLOCK;
Jens Axboe2b188cc2019-01-07 10:46:33 -07006470
Pavel Begunkov889fca72021-02-10 00:03:09 +00006471 if (cs)
6472 issue_flags |= IO_URING_F_COMPLETE_DEFER;
Jens Axboe4a0a7a12019-12-09 20:01:01 -07006473again:
6474 linked_timeout = io_prep_linked_timeout(req);
6475
Pavel Begunkov2e5aa6c2020-10-18 10:17:37 +01006476 if ((req->flags & REQ_F_WORK_INITIALIZED) &&
6477 (req->work.flags & IO_WQ_WORK_CREDS) &&
Jens Axboe98447d62020-10-14 10:48:51 -06006478 req->work.identity->creds != current_cred()) {
Jens Axboe193155c2020-02-22 23:22:19 -07006479 if (old_creds)
6480 revert_creds(old_creds);
Jens Axboe98447d62020-10-14 10:48:51 -06006481 if (old_creds == req->work.identity->creds)
Jens Axboe193155c2020-02-22 23:22:19 -07006482 old_creds = NULL; /* restored original creds */
6483 else
Jens Axboe98447d62020-10-14 10:48:51 -06006484 old_creds = override_creds(req->work.identity->creds);
Jens Axboe193155c2020-02-22 23:22:19 -07006485 }
6486
Pavel Begunkov889fca72021-02-10 00:03:09 +00006487 ret = io_issue_sqe(req, issue_flags);
Jens Axboe491381ce2019-10-17 09:20:46 -06006488
6489 /*
6490 * We async punt it if the file wasn't marked NOWAIT, or if the file
6491 * doesn't support non-blocking read/write attempts
6492 */
Pavel Begunkov24c74672020-06-21 13:09:51 +03006493 if (ret == -EAGAIN && !(req->flags & REQ_F_NOWAIT)) {
Pavel Begunkovf063c542020-07-25 14:41:59 +03006494 if (!io_arm_poll_handler(req)) {
Pavel Begunkovf063c542020-07-25 14:41:59 +03006495 /*
6496 * Queued up for async execution, worker will release
6497 * submit reference when the iocb is actually submitted.
6498 */
6499 io_queue_async_work(req);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006500 }
Pavel Begunkovbbad27b2019-11-19 23:32:47 +03006501
Pavel Begunkovf063c542020-07-25 14:41:59 +03006502 if (linked_timeout)
6503 io_queue_linked_timeout(linked_timeout);
Pavel Begunkov0d63c142020-10-22 16:47:18 +01006504 } else if (likely(!ret)) {
6505 /* drop submission reference */
Pavel Begunkove342c802021-01-19 13:32:47 +00006506 if (req->flags & REQ_F_COMPLETE_INLINE) {
Pavel Begunkov6dd0be12021-02-10 00:03:13 +00006507 cs->reqs[cs->nr++] = req;
6508 if (cs->nr == IO_COMPL_BATCH)
Pavel Begunkovba88ff12021-02-10 00:03:11 +00006509 io_submit_flush_completions(cs, req->ctx);
Pavel Begunkov9affd662021-01-19 13:32:46 +00006510 req = NULL;
6511 } else {
6512 req = io_put_req_find_next(req);
6513 }
6514
Pavel Begunkov0d63c142020-10-22 16:47:18 +01006515 if (linked_timeout)
6516 io_queue_linked_timeout(linked_timeout);
Jens Axboee65ef562019-03-12 10:16:44 -06006517
Pavel Begunkov0d63c142020-10-22 16:47:18 +01006518 if (req) {
6519 if (!(req->flags & REQ_F_FORCE_ASYNC))
6520 goto again;
6521 io_queue_async_work(req);
6522 }
6523 } else {
Pavel Begunkov652532a2020-07-03 22:15:07 +03006524 /* un-prep timeout, so it'll be killed as any other linked */
6525 req->flags &= ~REQ_F_LINK_TIMEOUT;
Jens Axboe4e88d6e2019-12-07 20:59:47 -07006526 req_set_fail_links(req);
Jens Axboee65ef562019-03-12 10:16:44 -06006527 io_put_req(req);
Pavel Begunkov652532a2020-07-03 22:15:07 +03006528 io_req_complete(req, ret);
Jens Axboe9e645e112019-05-10 16:07:28 -06006529 }
Pavel Begunkov652532a2020-07-03 22:15:07 +03006530
Jens Axboe193155c2020-02-22 23:22:19 -07006531 if (old_creds)
6532 revert_creds(old_creds);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006533}
6534
Jens Axboef13fad72020-06-22 09:34:30 -06006535static void io_queue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
6536 struct io_comp_state *cs)
Jackie Liu4fe2c962019-09-09 20:50:40 +08006537{
6538 int ret;
6539
Jens Axboe3529d8c2019-12-19 18:24:38 -07006540 ret = io_req_defer(req, sqe);
Jackie Liu4fe2c962019-09-09 20:50:40 +08006541 if (ret) {
6542 if (ret != -EIOCBQUEUED) {
Pavel Begunkov11185912020-01-22 23:09:35 +03006543fail_req:
Jens Axboe4e88d6e2019-12-07 20:59:47 -07006544 req_set_fail_links(req);
Jens Axboee1e16092020-06-22 09:17:17 -06006545 io_put_req(req);
6546 io_req_complete(req, ret);
Jackie Liu4fe2c962019-09-09 20:50:40 +08006547 }
Pavel Begunkov25508782019-12-30 21:24:47 +03006548 } else if (req->flags & REQ_F_FORCE_ASYNC) {
Jens Axboee8c2bc12020-08-15 18:44:09 -07006549 if (!req->async_data) {
Pavel Begunkovbd2ab182020-05-17 14:02:12 +03006550 ret = io_req_defer_prep(req, sqe);
Pavel Begunkov327d6d92020-07-15 12:46:51 +03006551 if (unlikely(ret))
Pavel Begunkovbd2ab182020-05-17 14:02:12 +03006552 goto fail_req;
6553 }
Jens Axboece35a472019-12-17 08:04:44 -07006554 io_queue_async_work(req);
6555 } else {
Pavel Begunkovc1379e22020-09-30 22:57:56 +03006556 if (sqe) {
6557 ret = io_req_prep(req, sqe);
6558 if (unlikely(ret))
6559 goto fail_req;
6560 }
6561 __io_queue_sqe(req, cs);
Jens Axboece35a472019-12-17 08:04:44 -07006562 }
Jackie Liu4fe2c962019-09-09 20:50:40 +08006563}
6564
Jens Axboef13fad72020-06-22 09:34:30 -06006565static inline void io_queue_link_head(struct io_kiocb *req,
6566 struct io_comp_state *cs)
Jackie Liu4fe2c962019-09-09 20:50:40 +08006567{
Jens Axboe94ae5e72019-11-14 19:39:52 -07006568 if (unlikely(req->flags & REQ_F_FAIL_LINK)) {
Jens Axboee1e16092020-06-22 09:17:17 -06006569 io_put_req(req);
6570 io_req_complete(req, -ECANCELED);
Pavel Begunkov1b4a51b2019-11-21 11:54:28 +03006571 } else
Jens Axboef13fad72020-06-22 09:34:30 -06006572 io_queue_sqe(req, NULL, cs);
Jackie Liu4fe2c962019-09-09 20:50:40 +08006573}
6574
Pavel Begunkov863e0562020-10-27 23:25:35 +00006575struct io_submit_link {
6576 struct io_kiocb *head;
6577 struct io_kiocb *last;
6578};
6579
Pavel Begunkov1d4240c2020-04-12 02:05:03 +03006580static int io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
Pavel Begunkov863e0562020-10-27 23:25:35 +00006581 struct io_submit_link *link, struct io_comp_state *cs)
Jens Axboe9e645e112019-05-10 16:07:28 -06006582{
Jackie Liua197f662019-11-08 08:09:12 -07006583 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkovef4ff582020-04-12 02:05:05 +03006584 int ret;
Jens Axboe9e645e112019-05-10 16:07:28 -06006585
Jens Axboe9e645e112019-05-10 16:07:28 -06006586 /*
6587 * If we already have a head request, queue this one for async
6588 * submittal once the head completes. If we don't have a head but
6589 * IOSQE_IO_LINK is set in the sqe, start a new head. This one will be
6590 * submitted sync once the chain is complete. If none of those
6591 * conditions are true (normal request), then just queue it.
6592 */
Pavel Begunkov863e0562020-10-27 23:25:35 +00006593 if (link->head) {
6594 struct io_kiocb *head = link->head;
Jens Axboe9e645e112019-05-10 16:07:28 -06006595
Pavel Begunkov8cdf2192020-01-25 00:40:24 +03006596 /*
6597 * Taking sequential execution of a link, draining both sides
6598 * of the link also fullfils IOSQE_IO_DRAIN semantics for all
6599 * requests in the link. So, it drains the head and the
6600 * next after the link request. The last one is done via
6601 * drain_next flag to persist the effect across calls.
6602 */
Pavel Begunkovef4ff582020-04-12 02:05:05 +03006603 if (req->flags & REQ_F_IO_DRAIN) {
Pavel Begunkov711be032020-01-17 03:57:59 +03006604 head->flags |= REQ_F_IO_DRAIN;
6605 ctx->drain_next = 1;
6606 }
Jens Axboe3529d8c2019-12-19 18:24:38 -07006607 ret = io_req_defer_prep(req, sqe);
Pavel Begunkov327d6d92020-07-15 12:46:51 +03006608 if (unlikely(ret)) {
Jens Axboe4e88d6e2019-12-07 20:59:47 -07006609 /* fail even hard links since we don't submit */
Pavel Begunkov9d763772019-12-17 02:22:07 +03006610 head->flags |= REQ_F_FAIL_LINK;
Pavel Begunkov1d4240c2020-04-12 02:05:03 +03006611 return ret;
Jens Axboe2d283902019-12-04 11:08:05 -07006612 }
Pavel Begunkov9d763772019-12-17 02:22:07 +03006613 trace_io_uring_link(ctx, req, head);
Pavel Begunkovf2f87372020-10-27 23:25:37 +00006614 link->last->link = req;
Pavel Begunkov863e0562020-10-27 23:25:35 +00006615 link->last = req;
Jens Axboe9e645e112019-05-10 16:07:28 -06006616
Pavel Begunkov32fe5252019-12-17 22:26:58 +03006617 /* last request of a link, enqueue the link */
Pavel Begunkovef4ff582020-04-12 02:05:05 +03006618 if (!(req->flags & (REQ_F_LINK | REQ_F_HARDLINK))) {
Jens Axboef13fad72020-06-22 09:34:30 -06006619 io_queue_link_head(head, cs);
Pavel Begunkov863e0562020-10-27 23:25:35 +00006620 link->head = NULL;
Pavel Begunkov32fe5252019-12-17 22:26:58 +03006621 }
Jens Axboe9e645e112019-05-10 16:07:28 -06006622 } else {
Pavel Begunkov711be032020-01-17 03:57:59 +03006623 if (unlikely(ctx->drain_next)) {
6624 req->flags |= REQ_F_IO_DRAIN;
Pavel Begunkovef4ff582020-04-12 02:05:05 +03006625 ctx->drain_next = 0;
Pavel Begunkov711be032020-01-17 03:57:59 +03006626 }
Pavel Begunkovef4ff582020-04-12 02:05:05 +03006627 if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) {
Pavel Begunkov711be032020-01-17 03:57:59 +03006628 ret = io_req_defer_prep(req, sqe);
Pavel Begunkov327d6d92020-07-15 12:46:51 +03006629 if (unlikely(ret))
Pavel Begunkov711be032020-01-17 03:57:59 +03006630 req->flags |= REQ_F_FAIL_LINK;
Pavel Begunkov863e0562020-10-27 23:25:35 +00006631 link->head = req;
6632 link->last = req;
Pavel Begunkov711be032020-01-17 03:57:59 +03006633 } else {
Jens Axboef13fad72020-06-22 09:34:30 -06006634 io_queue_sqe(req, sqe, cs);
Pavel Begunkov711be032020-01-17 03:57:59 +03006635 }
Jens Axboe9e645e112019-05-10 16:07:28 -06006636 }
Pavel Begunkov2e6e1fd2019-12-05 16:15:45 +03006637
Pavel Begunkov1d4240c2020-04-12 02:05:03 +03006638 return 0;
Jens Axboe9e645e112019-05-10 16:07:28 -06006639}
6640
Jens Axboe9a56a232019-01-09 09:06:50 -07006641/*
6642 * Batched submission is done, ensure local IO is flushed out.
6643 */
Pavel Begunkovba88ff12021-02-10 00:03:11 +00006644static void io_submit_state_end(struct io_submit_state *state,
6645 struct io_ring_ctx *ctx)
Jens Axboe9a56a232019-01-09 09:06:50 -07006646{
Pavel Begunkov6dd0be12021-02-10 00:03:13 +00006647 if (state->comp.nr)
Pavel Begunkovba88ff12021-02-10 00:03:11 +00006648 io_submit_flush_completions(&state->comp, ctx);
Jens Axboe27926b62020-10-28 09:33:23 -06006649 if (state->plug_started)
6650 blk_finish_plug(&state->plug);
Pavel Begunkov9f13c352020-05-17 14:13:41 +03006651 io_state_file_put(state);
Pavel Begunkov50872752021-02-10 00:03:12 +00006652 if (state->free_reqs) {
Pavel Begunkov6c8a3132020-02-01 03:58:00 +03006653 kmem_cache_free_bulk(req_cachep, state->free_reqs, state->reqs);
Pavel Begunkov50872752021-02-10 00:03:12 +00006654 state->free_reqs = 0;
6655 }
Jens Axboe9a56a232019-01-09 09:06:50 -07006656}
6657
6658/*
6659 * Start submission side cache.
6660 */
6661static void io_submit_state_start(struct io_submit_state *state,
Pavel Begunkovba88ff12021-02-10 00:03:11 +00006662 unsigned int max_ios)
Jens Axboe9a56a232019-01-09 09:06:50 -07006663{
Jens Axboe27926b62020-10-28 09:33:23 -06006664 state->plug_started = false;
Jens Axboe9a56a232019-01-09 09:06:50 -07006665 state->ios_left = max_ios;
6666}
6667
Jens Axboe2b188cc2019-01-07 10:46:33 -07006668static void io_commit_sqring(struct io_ring_ctx *ctx)
6669{
Hristo Venev75b28af2019-08-26 17:23:46 +00006670 struct io_rings *rings = ctx->rings;
Jens Axboe2b188cc2019-01-07 10:46:33 -07006671
Pavel Begunkovcaf582c2019-12-30 21:24:46 +03006672 /*
6673 * Ensure any loads from the SQEs are done at this point,
6674 * since once we write the new head, the application could
6675 * write new data to them.
6676 */
6677 smp_store_release(&rings->sq.head, ctx->cached_sq_head);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006678}
6679
6680/*
Jens Axboe3529d8c2019-12-19 18:24:38 -07006681 * Fetch an sqe, if one is available. Note that sqe_ptr will point to memory
Jens Axboe2b188cc2019-01-07 10:46:33 -07006682 * that is mapped by userspace. This means that care needs to be taken to
6683 * ensure that reads are stable, as we cannot rely on userspace always
6684 * being a good citizen. If members of the sqe are validated and then later
6685 * used, it's important that those reads are done through READ_ONCE() to
6686 * prevent a re-load down the line.
6687 */
Pavel Begunkov709b3022020-04-08 08:58:43 +03006688static const struct io_uring_sqe *io_get_sqe(struct io_ring_ctx *ctx)
Jens Axboe2b188cc2019-01-07 10:46:33 -07006689{
Hristo Venev75b28af2019-08-26 17:23:46 +00006690 u32 *sq_array = ctx->sq_array;
Jens Axboe2b188cc2019-01-07 10:46:33 -07006691 unsigned head;
6692
6693 /*
6694 * The cached sq head (or cq tail) serves two purposes:
6695 *
6696 * 1) allows us to batch the cost of updating the user visible
6697 * head updates.
6698 * 2) allows the kernel side to track the head on its own, even
6699 * though the application is the one updating it.
6700 */
Pavel Begunkovee7d46d2019-12-30 21:24:45 +03006701 head = READ_ONCE(sq_array[ctx->cached_sq_head & ctx->sq_mask]);
Pavel Begunkov709b3022020-04-08 08:58:43 +03006702 if (likely(head < ctx->sq_entries))
6703 return &ctx->sq_sqes[head];
Jens Axboe2b188cc2019-01-07 10:46:33 -07006704
6705 /* drop invalid entries */
Jens Axboe498ccd92019-10-25 10:04:25 -06006706 ctx->cached_sq_dropped++;
Pavel Begunkovee7d46d2019-12-30 21:24:45 +03006707 WRITE_ONCE(ctx->rings->sq_dropped, ctx->cached_sq_dropped);
Pavel Begunkov709b3022020-04-08 08:58:43 +03006708 return NULL;
6709}
6710
6711static inline void io_consume_sqe(struct io_ring_ctx *ctx)
6712{
6713 ctx->cached_sq_head++;
Jens Axboe2b188cc2019-01-07 10:46:33 -07006714}
6715
Stefano Garzarella21b55db2020-08-27 16:58:30 +02006716/*
6717 * Check SQE restrictions (opcode and flags).
6718 *
6719 * Returns 'true' if SQE is allowed, 'false' otherwise.
6720 */
6721static inline bool io_check_restriction(struct io_ring_ctx *ctx,
6722 struct io_kiocb *req,
6723 unsigned int sqe_flags)
6724{
6725 if (!ctx->restricted)
6726 return true;
6727
6728 if (!test_bit(req->opcode, ctx->restrictions.sqe_op))
6729 return false;
6730
6731 if ((sqe_flags & ctx->restrictions.sqe_flags_required) !=
6732 ctx->restrictions.sqe_flags_required)
6733 return false;
6734
6735 if (sqe_flags & ~(ctx->restrictions.sqe_flags_allowed |
6736 ctx->restrictions.sqe_flags_required))
6737 return false;
6738
6739 return true;
6740}
6741
Pavel Begunkovef4ff582020-04-12 02:05:05 +03006742#define SQE_VALID_FLAGS (IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK| \
6743 IOSQE_IO_HARDLINK | IOSQE_ASYNC | \
6744 IOSQE_BUFFER_SELECT)
6745
6746static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
Pavel Begunkov258b29a2021-02-10 00:03:10 +00006747 const struct io_uring_sqe *sqe)
Pavel Begunkov0553b8b2020-04-08 08:58:45 +03006748{
Pavel Begunkov258b29a2021-02-10 00:03:10 +00006749 struct io_submit_state *state;
Pavel Begunkovef4ff582020-04-12 02:05:05 +03006750 unsigned int sqe_flags;
Pavel Begunkov71b547c2020-10-10 18:34:09 +01006751 int id, ret;
Pavel Begunkovef4ff582020-04-12 02:05:05 +03006752
Pavel Begunkov0553b8b2020-04-08 08:58:45 +03006753 req->opcode = READ_ONCE(sqe->opcode);
6754 req->user_data = READ_ONCE(sqe->user_data);
Jens Axboee8c2bc12020-08-15 18:44:09 -07006755 req->async_data = NULL;
Pavel Begunkov0553b8b2020-04-08 08:58:45 +03006756 req->file = NULL;
6757 req->ctx = ctx;
6758 req->flags = 0;
Pavel Begunkovf2f87372020-10-27 23:25:37 +00006759 req->link = NULL;
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00006760 req->fixed_rsrc_refs = NULL;
Pavel Begunkov0553b8b2020-04-08 08:58:45 +03006761 /* one is dropped after submission, the other at completion */
6762 refcount_set(&req->refs, 2);
Pavel Begunkov4dd28242020-06-15 10:33:13 +03006763 req->task = current;
Pavel Begunkov0553b8b2020-04-08 08:58:45 +03006764 req->result = 0;
Pavel Begunkovef4ff582020-04-12 02:05:05 +03006765
6766 if (unlikely(req->opcode >= IORING_OP_LAST))
6767 return -EINVAL;
6768
Jens Axboe28cea78a2020-09-14 10:51:17 -06006769 if (unlikely(io_sq_thread_acquire_mm_files(ctx, req)))
Jens Axboe9d8426a2020-06-16 18:42:49 -06006770 return -EFAULT;
Pavel Begunkovef4ff582020-04-12 02:05:05 +03006771
6772 sqe_flags = READ_ONCE(sqe->flags);
6773 /* enforce forwards compatibility on users */
6774 if (unlikely(sqe_flags & ~SQE_VALID_FLAGS))
6775 return -EINVAL;
6776
Stefano Garzarella21b55db2020-08-27 16:58:30 +02006777 if (unlikely(!io_check_restriction(ctx, req, sqe_flags)))
6778 return -EACCES;
6779
Pavel Begunkovef4ff582020-04-12 02:05:05 +03006780 if ((sqe_flags & IOSQE_BUFFER_SELECT) &&
6781 !io_op_defs[req->opcode].buffer_select)
6782 return -EOPNOTSUPP;
6783
6784 id = READ_ONCE(sqe->personality);
6785 if (id) {
Jens Axboe1e6fa522020-10-15 08:46:24 -06006786 struct io_identity *iod;
6787
Jens Axboe1e6fa522020-10-15 08:46:24 -06006788 iod = idr_find(&ctx->personality_idr, id);
6789 if (unlikely(!iod))
Pavel Begunkovef4ff582020-04-12 02:05:05 +03006790 return -EINVAL;
Jens Axboe1e6fa522020-10-15 08:46:24 -06006791 refcount_inc(&iod->count);
Pavel Begunkovec99ca62020-10-18 10:17:38 +01006792
6793 __io_req_init_async(req);
Jens Axboe1e6fa522020-10-15 08:46:24 -06006794 get_cred(iod->creds);
6795 req->work.identity = iod;
Jens Axboedfead8a2020-10-14 10:12:37 -06006796 req->work.flags |= IO_WQ_WORK_CREDS;
Pavel Begunkovef4ff582020-04-12 02:05:05 +03006797 }
6798
6799 /* same numerical values with corresponding REQ_F_*, safe to copy */
Pavel Begunkovc11368a52020-05-17 14:13:42 +03006800 req->flags |= sqe_flags;
Pavel Begunkov258b29a2021-02-10 00:03:10 +00006801 state = &ctx->submit_state;
Pavel Begunkovef4ff582020-04-12 02:05:05 +03006802
Jens Axboe27926b62020-10-28 09:33:23 -06006803 /*
6804 * Plug now if we have more than 1 IO left after this, and the target
6805 * is potentially a read/write to block based storage.
6806 */
6807 if (!state->plug_started && state->ios_left > 1 &&
6808 io_op_defs[req->opcode].plug) {
6809 blk_start_plug(&state->plug);
6810 state->plug_started = true;
6811 }
Jens Axboe63ff8222020-05-07 14:56:15 -06006812
Pavel Begunkovbd5bbda2020-11-20 15:50:51 +00006813 ret = 0;
6814 if (io_op_defs[req->opcode].needs_file) {
6815 bool fixed = req->flags & REQ_F_FIXED_FILE;
Jens Axboe63ff8222020-05-07 14:56:15 -06006816
Pavel Begunkovbd5bbda2020-11-20 15:50:51 +00006817 req->file = io_file_get(state, req, READ_ONCE(sqe->fd), fixed);
Pavel Begunkovba13e232021-02-01 18:59:52 +00006818 if (unlikely(!req->file))
Pavel Begunkovbd5bbda2020-11-20 15:50:51 +00006819 ret = -EBADF;
6820 }
6821
Pavel Begunkov71b547c2020-10-10 18:34:09 +01006822 state->ios_left--;
6823 return ret;
Pavel Begunkov0553b8b2020-04-08 08:58:45 +03006824}
6825
Jens Axboe0f212202020-09-13 13:09:39 -06006826static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr)
Jens Axboe6c271ce2019-01-10 11:22:30 -07006827{
Pavel Begunkov863e0562020-10-27 23:25:35 +00006828 struct io_submit_link link;
Jens Axboe9e645e112019-05-10 16:07:28 -06006829 int i, submitted = 0;
Jens Axboe6c271ce2019-01-10 11:22:30 -07006830
Jens Axboec4a2ed72019-11-21 21:01:26 -07006831 /* if we have a backlog and couldn't flush it all, return BUSY */
Jens Axboead3eb2c2019-12-18 17:12:20 -07006832 if (test_bit(0, &ctx->sq_check_overflow)) {
Pavel Begunkov6c503152021-01-04 20:36:36 +00006833 if (!__io_cqring_overflow_flush(ctx, false, NULL, NULL))
Jens Axboead3eb2c2019-12-18 17:12:20 -07006834 return -EBUSY;
6835 }
Jens Axboe6c271ce2019-01-10 11:22:30 -07006836
Pavel Begunkovee7d46d2019-12-30 21:24:45 +03006837 /* make sure SQ entry isn't read before tail */
6838 nr = min3(nr, ctx->sq_entries, io_sqring_entries(ctx));
Pavel Begunkov9ef4f122019-12-30 21:24:44 +03006839
Pavel Begunkov2b85edf2019-12-28 14:13:03 +03006840 if (!percpu_ref_tryget_many(&ctx->refs, nr))
6841 return -EAGAIN;
Jens Axboe6c271ce2019-01-10 11:22:30 -07006842
Jens Axboed8a6df12020-10-15 16:24:45 -06006843 percpu_counter_add(&current->io_uring->inflight, nr);
Jens Axboefaf7b512020-10-07 12:48:53 -06006844 refcount_add(nr, &current->usage);
Jens Axboe6c271ce2019-01-10 11:22:30 -07006845
Pavel Begunkovba88ff12021-02-10 00:03:11 +00006846 io_submit_state_start(&ctx->submit_state, nr);
Pavel Begunkov863e0562020-10-27 23:25:35 +00006847 link.head = NULL;
Pavel Begunkovb14cca02020-01-17 04:45:59 +03006848
Jens Axboe6c271ce2019-01-10 11:22:30 -07006849 for (i = 0; i < nr; i++) {
Jens Axboe3529d8c2019-12-19 18:24:38 -07006850 const struct io_uring_sqe *sqe;
Pavel Begunkov196be952019-11-07 01:41:06 +03006851 struct io_kiocb *req;
Pavel Begunkov1cb1edb2020-02-06 21:16:09 +03006852 int err;
Pavel Begunkovfb5ccc92019-10-25 12:31:30 +03006853
Pavel Begunkovb1e50e52020-04-08 08:58:44 +03006854 sqe = io_get_sqe(ctx);
6855 if (unlikely(!sqe)) {
6856 io_consume_sqe(ctx);
6857 break;
6858 }
Pavel Begunkov258b29a2021-02-10 00:03:10 +00006859 req = io_alloc_req(ctx);
Pavel Begunkov196be952019-11-07 01:41:06 +03006860 if (unlikely(!req)) {
6861 if (!submitted)
6862 submitted = -EAGAIN;
Pavel Begunkovfb5ccc92019-10-25 12:31:30 +03006863 break;
Jens Axboe9e645e112019-05-10 16:07:28 -06006864 }
Pavel Begunkov709b3022020-04-08 08:58:43 +03006865 io_consume_sqe(ctx);
Jens Axboed3656342019-12-18 09:50:26 -07006866 /* will complete beyond this point, count as submitted */
6867 submitted++;
6868
Pavel Begunkov258b29a2021-02-10 00:03:10 +00006869 err = io_init_req(ctx, req, sqe);
Pavel Begunkovef4ff582020-04-12 02:05:05 +03006870 if (unlikely(err)) {
Pavel Begunkov1cb1edb2020-02-06 21:16:09 +03006871fail_req:
Jens Axboee1e16092020-06-22 09:17:17 -06006872 io_put_req(req);
6873 io_req_complete(req, err);
Jens Axboed3656342019-12-18 09:50:26 -07006874 break;
6875 }
6876
Jens Axboe354420f2020-01-08 18:55:15 -07006877 trace_io_uring_submit_sqe(ctx, req->opcode, req->user_data,
Pavel Begunkov2d7e9352021-01-19 13:32:37 +00006878 true, ctx->flags & IORING_SETUP_SQPOLL);
Pavel Begunkov258b29a2021-02-10 00:03:10 +00006879 err = io_submit_sqe(req, sqe, &link, &ctx->submit_state.comp);
Pavel Begunkov1d4240c2020-04-12 02:05:03 +03006880 if (err)
6881 goto fail_req;
Jens Axboe6c271ce2019-01-10 11:22:30 -07006882 }
6883
Pavel Begunkov9466f432020-01-25 22:34:01 +03006884 if (unlikely(submitted != nr)) {
6885 int ref_used = (submitted == -EAGAIN) ? 0 : submitted;
Jens Axboed8a6df12020-10-15 16:24:45 -06006886 struct io_uring_task *tctx = current->io_uring;
6887 int unused = nr - ref_used;
Pavel Begunkov9466f432020-01-25 22:34:01 +03006888
Jens Axboed8a6df12020-10-15 16:24:45 -06006889 percpu_ref_put_many(&ctx->refs, unused);
6890 percpu_counter_sub(&tctx->inflight, unused);
6891 put_task_struct_many(current, unused);
Pavel Begunkov9466f432020-01-25 22:34:01 +03006892 }
Pavel Begunkov863e0562020-10-27 23:25:35 +00006893 if (link.head)
Pavel Begunkov258b29a2021-02-10 00:03:10 +00006894 io_queue_link_head(link.head, &ctx->submit_state.comp);
Pavel Begunkovba88ff12021-02-10 00:03:11 +00006895 io_submit_state_end(&ctx->submit_state, ctx);
Jens Axboe6c271ce2019-01-10 11:22:30 -07006896
Pavel Begunkovae9428c2019-11-06 00:22:14 +03006897 /* Commit SQ ring head once we've consumed and submitted all SQEs */
6898 io_commit_sqring(ctx);
6899
Jens Axboe6c271ce2019-01-10 11:22:30 -07006900 return submitted;
6901}
6902
Xiaoguang Wang23b36282020-07-23 20:57:24 +08006903static inline void io_ring_set_wakeup_flag(struct io_ring_ctx *ctx)
6904{
6905 /* Tell userspace we may need a wakeup call */
6906 spin_lock_irq(&ctx->completion_lock);
6907 ctx->rings->sq_flags |= IORING_SQ_NEED_WAKEUP;
6908 spin_unlock_irq(&ctx->completion_lock);
6909}
6910
6911static inline void io_ring_clear_wakeup_flag(struct io_ring_ctx *ctx)
6912{
6913 spin_lock_irq(&ctx->completion_lock);
6914 ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP;
6915 spin_unlock_irq(&ctx->completion_lock);
6916}
6917
Xiaoguang Wang08369242020-11-03 14:15:59 +08006918static int __io_sq_thread(struct io_ring_ctx *ctx, bool cap_entries)
Jens Axboe6c271ce2019-01-10 11:22:30 -07006919{
Jens Axboec8d1ba52020-09-14 11:07:26 -06006920 unsigned int to_submit;
Xiaoguang Wangbdcd3ea2020-02-25 22:12:08 +08006921 int ret = 0;
Jens Axboe6c271ce2019-01-10 11:22:30 -07006922
Jens Axboec8d1ba52020-09-14 11:07:26 -06006923 to_submit = io_sqring_entries(ctx);
Jens Axboee95eee22020-09-08 09:11:32 -06006924 /* if we're handling multiple rings, cap submit size for fairness */
6925 if (cap_entries && to_submit > 8)
6926 to_submit = 8;
6927
Xiaoguang Wang906a3c62020-11-12 14:56:00 +08006928 if (!list_empty(&ctx->iopoll_list) || to_submit) {
6929 unsigned nr_events = 0;
6930
Xiaoguang Wang08369242020-11-03 14:15:59 +08006931 mutex_lock(&ctx->uring_lock);
Xiaoguang Wang906a3c62020-11-12 14:56:00 +08006932 if (!list_empty(&ctx->iopoll_list))
6933 io_do_iopoll(ctx, &nr_events, 0);
6934
Pavel Begunkovd9d05212021-01-08 20:57:25 +00006935 if (to_submit && !ctx->sqo_dead &&
6936 likely(!percpu_ref_is_dying(&ctx->refs)))
Xiaoguang Wang08369242020-11-03 14:15:59 +08006937 ret = io_submit_sqes(ctx, to_submit);
6938 mutex_unlock(&ctx->uring_lock);
6939 }
Jens Axboe90554202020-09-03 12:12:41 -06006940
6941 if (!io_sqring_full(ctx) && wq_has_sleeper(&ctx->sqo_sq_wait))
6942 wake_up(&ctx->sqo_sq_wait);
6943
Xiaoguang Wang08369242020-11-03 14:15:59 +08006944 return ret;
6945}
6946
6947static void io_sqd_update_thread_idle(struct io_sq_data *sqd)
6948{
6949 struct io_ring_ctx *ctx;
6950 unsigned sq_thread_idle = 0;
6951
6952 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) {
6953 if (sq_thread_idle < ctx->sq_thread_idle)
6954 sq_thread_idle = ctx->sq_thread_idle;
6955 }
6956
6957 sqd->sq_thread_idle = sq_thread_idle;
Jens Axboec8d1ba52020-09-14 11:07:26 -06006958}
6959
Jens Axboe69fb2132020-09-14 11:16:23 -06006960static void io_sqd_init_new(struct io_sq_data *sqd)
6961{
6962 struct io_ring_ctx *ctx;
6963
6964 while (!list_empty(&sqd->ctx_new_list)) {
6965 ctx = list_first_entry(&sqd->ctx_new_list, struct io_ring_ctx, sqd_list);
Jens Axboe69fb2132020-09-14 11:16:23 -06006966 list_move_tail(&ctx->sqd_list, &sqd->ctx_list);
6967 complete(&ctx->sq_thread_comp);
6968 }
Xiaoguang Wang08369242020-11-03 14:15:59 +08006969
6970 io_sqd_update_thread_idle(sqd);
Jens Axboe69fb2132020-09-14 11:16:23 -06006971}
6972
Jens Axboe6c271ce2019-01-10 11:22:30 -07006973static int io_sq_thread(void *data)
6974{
Dennis Zhou91d8f512020-09-16 13:41:05 -07006975 struct cgroup_subsys_state *cur_css = NULL;
Jens Axboe28cea78a2020-09-14 10:51:17 -06006976 struct files_struct *old_files = current->files;
6977 struct nsproxy *old_nsproxy = current->nsproxy;
Jens Axboe69fb2132020-09-14 11:16:23 -06006978 const struct cred *old_cred = NULL;
6979 struct io_sq_data *sqd = data;
6980 struct io_ring_ctx *ctx;
Xiaoguang Wanga0d92052020-11-12 14:55:59 +08006981 unsigned long timeout = 0;
Xiaoguang Wang08369242020-11-03 14:15:59 +08006982 DEFINE_WAIT(wait);
Jens Axboe6c271ce2019-01-10 11:22:30 -07006983
Jens Axboe28cea78a2020-09-14 10:51:17 -06006984 task_lock(current);
6985 current->files = NULL;
6986 current->nsproxy = NULL;
6987 task_unlock(current);
6988
Jens Axboe69fb2132020-09-14 11:16:23 -06006989 while (!kthread_should_stop()) {
Xiaoguang Wang08369242020-11-03 14:15:59 +08006990 int ret;
6991 bool cap_entries, sqt_spin, needs_sched;
Jens Axboec1edbf52019-11-10 16:56:04 -07006992
6993 /*
Jens Axboe69fb2132020-09-14 11:16:23 -06006994 * Any changes to the sqd lists are synchronized through the
6995 * kthread parking. This synchronizes the thread vs users,
6996 * the users are synchronized on the sqd->ctx_lock.
Jens Axboec1edbf52019-11-10 16:56:04 -07006997 */
Xiaoguang Wang65b2b212020-11-19 17:44:46 +08006998 if (kthread_should_park()) {
Jens Axboe69fb2132020-09-14 11:16:23 -06006999 kthread_parkme();
Xiaoguang Wang65b2b212020-11-19 17:44:46 +08007000 /*
7001 * When sq thread is unparked, in case the previous park operation
7002 * comes from io_put_sq_data(), which means that sq thread is going
7003 * to be stopped, so here needs to have a check.
7004 */
7005 if (kthread_should_stop())
7006 break;
7007 }
Jens Axboe69fb2132020-09-14 11:16:23 -06007008
Xiaoguang Wang08369242020-11-03 14:15:59 +08007009 if (unlikely(!list_empty(&sqd->ctx_new_list))) {
Jens Axboe69fb2132020-09-14 11:16:23 -06007010 io_sqd_init_new(sqd);
Xiaoguang Wang08369242020-11-03 14:15:59 +08007011 timeout = jiffies + sqd->sq_thread_idle;
7012 }
Jens Axboe69fb2132020-09-14 11:16:23 -06007013
Xiaoguang Wang08369242020-11-03 14:15:59 +08007014 sqt_spin = false;
Jens Axboee95eee22020-09-08 09:11:32 -06007015 cap_entries = !list_is_singular(&sqd->ctx_list);
Jens Axboe69fb2132020-09-14 11:16:23 -06007016 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) {
7017 if (current->cred != ctx->creds) {
7018 if (old_cred)
7019 revert_creds(old_cred);
7020 old_cred = override_creds(ctx->creds);
7021 }
Dennis Zhou91d8f512020-09-16 13:41:05 -07007022 io_sq_thread_associate_blkcg(ctx, &cur_css);
Jens Axboe4ea33a92020-10-15 13:46:44 -06007023#ifdef CONFIG_AUDIT
7024 current->loginuid = ctx->loginuid;
7025 current->sessionid = ctx->sessionid;
7026#endif
Jens Axboe69fb2132020-09-14 11:16:23 -06007027
Xiaoguang Wang08369242020-11-03 14:15:59 +08007028 ret = __io_sq_thread(ctx, cap_entries);
7029 if (!sqt_spin && (ret > 0 || !list_empty(&ctx->iopoll_list)))
7030 sqt_spin = true;
Jens Axboe69fb2132020-09-14 11:16:23 -06007031
Jens Axboe28cea78a2020-09-14 10:51:17 -06007032 io_sq_thread_drop_mm_files();
Jens Axboe6c271ce2019-01-10 11:22:30 -07007033 }
7034
Xiaoguang Wang08369242020-11-03 14:15:59 +08007035 if (sqt_spin || !time_after(jiffies, timeout)) {
Jens Axboec8d1ba52020-09-14 11:07:26 -06007036 io_run_task_work();
Pavel Begunkovd434ab62021-01-11 04:00:30 +00007037 io_sq_thread_drop_mm_files();
Jens Axboec8d1ba52020-09-14 11:07:26 -06007038 cond_resched();
Xiaoguang Wang08369242020-11-03 14:15:59 +08007039 if (sqt_spin)
7040 timeout = jiffies + sqd->sq_thread_idle;
7041 continue;
7042 }
7043
Xiaoguang Wang08369242020-11-03 14:15:59 +08007044 needs_sched = true;
7045 prepare_to_wait(&sqd->wait, &wait, TASK_INTERRUPTIBLE);
7046 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) {
7047 if ((ctx->flags & IORING_SETUP_IOPOLL) &&
7048 !list_empty_careful(&ctx->iopoll_list)) {
7049 needs_sched = false;
7050 break;
7051 }
7052 if (io_sqring_entries(ctx)) {
7053 needs_sched = false;
7054 break;
7055 }
7056 }
7057
Hao Xu8b28fdf2021-01-31 22:39:04 +08007058 if (needs_sched && !kthread_should_park()) {
Jens Axboe69fb2132020-09-14 11:16:23 -06007059 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
7060 io_ring_set_wakeup_flag(ctx);
Xiaoguang Wang08369242020-11-03 14:15:59 +08007061
Jens Axboe69fb2132020-09-14 11:16:23 -06007062 schedule();
Jens Axboe69fb2132020-09-14 11:16:23 -06007063 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
7064 io_ring_clear_wakeup_flag(ctx);
Jens Axboe6c271ce2019-01-10 11:22:30 -07007065 }
Xiaoguang Wang08369242020-11-03 14:15:59 +08007066
7067 finish_wait(&sqd->wait, &wait);
7068 timeout = jiffies + sqd->sq_thread_idle;
Jens Axboe6c271ce2019-01-10 11:22:30 -07007069 }
7070
Jens Axboe4c6e2772020-07-01 11:29:10 -06007071 io_run_task_work();
Pavel Begunkovd434ab62021-01-11 04:00:30 +00007072 io_sq_thread_drop_mm_files();
Jens Axboeb41e9852020-02-17 09:52:41 -07007073
Dennis Zhou91d8f512020-09-16 13:41:05 -07007074 if (cur_css)
7075 io_sq_thread_unassociate_blkcg();
Jens Axboe69fb2132020-09-14 11:16:23 -06007076 if (old_cred)
7077 revert_creds(old_cred);
Jens Axboe06058632019-04-13 09:26:03 -06007078
Jens Axboe28cea78a2020-09-14 10:51:17 -06007079 task_lock(current);
7080 current->files = old_files;
7081 current->nsproxy = old_nsproxy;
7082 task_unlock(current);
7083
Roman Penyaev2bbcd6d2019-05-16 10:53:57 +02007084 kthread_parkme();
Jens Axboe06058632019-04-13 09:26:03 -06007085
Jens Axboe6c271ce2019-01-10 11:22:30 -07007086 return 0;
7087}
7088
Jens Axboebda52162019-09-24 13:47:15 -06007089struct io_wait_queue {
7090 struct wait_queue_entry wq;
7091 struct io_ring_ctx *ctx;
7092 unsigned to_wait;
7093 unsigned nr_timeouts;
7094};
7095
Pavel Begunkov6c503152021-01-04 20:36:36 +00007096static inline bool io_should_wake(struct io_wait_queue *iowq)
Jens Axboebda52162019-09-24 13:47:15 -06007097{
7098 struct io_ring_ctx *ctx = iowq->ctx;
7099
7100 /*
Brian Gianforcarod195a662019-12-13 03:09:50 -08007101 * Wake up if we have enough events, or if a timeout occurred since we
Jens Axboebda52162019-09-24 13:47:15 -06007102 * started waiting. For timeouts, we always want to return to userspace,
7103 * regardless of event count.
7104 */
Pavel Begunkov6c503152021-01-04 20:36:36 +00007105 return io_cqring_events(ctx) >= iowq->to_wait ||
Jens Axboebda52162019-09-24 13:47:15 -06007106 atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts;
7107}
7108
7109static int io_wake_function(struct wait_queue_entry *curr, unsigned int mode,
7110 int wake_flags, void *key)
7111{
7112 struct io_wait_queue *iowq = container_of(curr, struct io_wait_queue,
7113 wq);
7114
Pavel Begunkov6c503152021-01-04 20:36:36 +00007115 /*
7116 * Cannot safely flush overflowed CQEs from here, ensure we wake up
7117 * the task, and the next invocation will do it.
7118 */
7119 if (io_should_wake(iowq) || test_bit(0, &iowq->ctx->cq_check_overflow))
7120 return autoremove_wake_function(curr, mode, wake_flags, key);
7121 return -1;
Jens Axboebda52162019-09-24 13:47:15 -06007122}
7123
Jens Axboeaf9c1a42020-09-24 13:32:18 -06007124static int io_run_task_work_sig(void)
7125{
7126 if (io_run_task_work())
7127 return 1;
7128 if (!signal_pending(current))
7129 return 0;
Jens Axboe792ee0f62020-10-22 20:17:18 -06007130 if (test_tsk_thread_flag(current, TIF_NOTIFY_SIGNAL))
7131 return -ERESTARTSYS;
Jens Axboeaf9c1a42020-09-24 13:32:18 -06007132 return -EINTR;
7133}
7134
Pavel Begunkoveeb60b92021-02-04 13:51:58 +00007135/* when returns >0, the caller should retry */
7136static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
7137 struct io_wait_queue *iowq,
7138 signed long *timeout)
7139{
7140 int ret;
7141
7142 /* make sure we run task_work before checking for signals */
7143 ret = io_run_task_work_sig();
7144 if (ret || io_should_wake(iowq))
7145 return ret;
7146 /* let the caller flush overflows, retry */
7147 if (test_bit(0, &ctx->cq_check_overflow))
7148 return 1;
7149
7150 *timeout = schedule_timeout(*timeout);
7151 return !*timeout ? -ETIME : 1;
7152}
7153
Jens Axboe2b188cc2019-01-07 10:46:33 -07007154/*
7155 * Wait until events become available, if we don't already have some. The
7156 * application must reap them itself, as they reside on the shared cq ring.
7157 */
7158static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
Hao Xuc73ebb62020-11-03 10:54:37 +08007159 const sigset_t __user *sig, size_t sigsz,
7160 struct __kernel_timespec __user *uts)
Jens Axboe2b188cc2019-01-07 10:46:33 -07007161{
Jens Axboebda52162019-09-24 13:47:15 -06007162 struct io_wait_queue iowq = {
7163 .wq = {
7164 .private = current,
7165 .func = io_wake_function,
7166 .entry = LIST_HEAD_INIT(iowq.wq.entry),
7167 },
7168 .ctx = ctx,
7169 .to_wait = min_events,
7170 };
Hristo Venev75b28af2019-08-26 17:23:46 +00007171 struct io_rings *rings = ctx->rings;
Pavel Begunkovc1d5a222021-02-04 13:51:57 +00007172 signed long timeout = MAX_SCHEDULE_TIMEOUT;
7173 int ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07007174
Jens Axboeb41e9852020-02-17 09:52:41 -07007175 do {
Pavel Begunkov6c503152021-01-04 20:36:36 +00007176 io_cqring_overflow_flush(ctx, false, NULL, NULL);
7177 if (io_cqring_events(ctx) >= min_events)
Jens Axboeb41e9852020-02-17 09:52:41 -07007178 return 0;
Jens Axboe4c6e2772020-07-01 11:29:10 -06007179 if (!io_run_task_work())
Jens Axboeb41e9852020-02-17 09:52:41 -07007180 break;
Jens Axboeb41e9852020-02-17 09:52:41 -07007181 } while (1);
Jens Axboe2b188cc2019-01-07 10:46:33 -07007182
7183 if (sig) {
Arnd Bergmann9e75ad52019-03-25 15:34:53 +01007184#ifdef CONFIG_COMPAT
7185 if (in_compat_syscall())
7186 ret = set_compat_user_sigmask((const compat_sigset_t __user *)sig,
Oleg Nesterovb7724342019-07-16 16:29:53 -07007187 sigsz);
Arnd Bergmann9e75ad52019-03-25 15:34:53 +01007188 else
7189#endif
Oleg Nesterovb7724342019-07-16 16:29:53 -07007190 ret = set_user_sigmask(sig, sigsz);
Arnd Bergmann9e75ad52019-03-25 15:34:53 +01007191
Jens Axboe2b188cc2019-01-07 10:46:33 -07007192 if (ret)
7193 return ret;
7194 }
7195
Hao Xuc73ebb62020-11-03 10:54:37 +08007196 if (uts) {
Pavel Begunkovc1d5a222021-02-04 13:51:57 +00007197 struct timespec64 ts;
7198
Hao Xuc73ebb62020-11-03 10:54:37 +08007199 if (get_timespec64(&ts, uts))
7200 return -EFAULT;
7201 timeout = timespec64_to_jiffies(&ts);
7202 }
7203
Jens Axboebda52162019-09-24 13:47:15 -06007204 iowq.nr_timeouts = atomic_read(&ctx->cq_timeouts);
Dmitrii Dolgovc826bd72019-10-15 19:02:01 +02007205 trace_io_uring_cqring_wait(ctx, min_events);
Jens Axboebda52162019-09-24 13:47:15 -06007206 do {
Pavel Begunkov6c503152021-01-04 20:36:36 +00007207 io_cqring_overflow_flush(ctx, false, NULL, NULL);
Jens Axboebda52162019-09-24 13:47:15 -06007208 prepare_to_wait_exclusive(&ctx->wait, &iowq.wq,
7209 TASK_INTERRUPTIBLE);
Pavel Begunkoveeb60b92021-02-04 13:51:58 +00007210 ret = io_cqring_wait_schedule(ctx, &iowq, &timeout);
7211 finish_wait(&ctx->wait, &iowq.wq);
7212 } while (ret > 0);
Jens Axboebda52162019-09-24 13:47:15 -06007213
Jens Axboeb7db41c2020-07-04 08:55:50 -06007214 restore_saved_sigmask_unless(ret == -EINTR);
Jens Axboe2b188cc2019-01-07 10:46:33 -07007215
Hristo Venev75b28af2019-08-26 17:23:46 +00007216 return READ_ONCE(rings->cq.head) == READ_ONCE(rings->cq.tail) ? ret : 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07007217}
7218
Jens Axboe6b063142019-01-10 22:13:58 -07007219static void __io_sqe_files_unregister(struct io_ring_ctx *ctx)
7220{
7221#if defined(CONFIG_UNIX)
7222 if (ctx->ring_sock) {
7223 struct sock *sock = ctx->ring_sock->sk;
7224 struct sk_buff *skb;
7225
7226 while ((skb = skb_dequeue(&sock->sk_receive_queue)) != NULL)
7227 kfree_skb(skb);
7228 }
7229#else
7230 int i;
7231
Jens Axboe65e19f52019-10-26 07:20:21 -06007232 for (i = 0; i < ctx->nr_user_files; i++) {
7233 struct file *file;
7234
7235 file = io_file_from_index(ctx, i);
7236 if (file)
7237 fput(file);
7238 }
Jens Axboe6b063142019-01-10 22:13:58 -07007239#endif
7240}
7241
Bijan Mottahedeh00835dc2021-01-15 17:37:52 +00007242static void io_rsrc_data_ref_zero(struct percpu_ref *ref)
Jens Axboe05f3fb32019-12-09 11:22:50 -07007243{
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007244 struct fixed_rsrc_data *data;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007245
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007246 data = container_of(ref, struct fixed_rsrc_data, refs);
Jens Axboe05f3fb32019-12-09 11:22:50 -07007247 complete(&data->done);
7248}
7249
Bijan Mottahedeh2a63b2d2021-01-15 17:37:47 +00007250static inline void io_rsrc_ref_lock(struct io_ring_ctx *ctx)
7251{
7252 spin_lock_bh(&ctx->rsrc_ref_lock);
7253}
7254
7255static inline void io_rsrc_ref_unlock(struct io_ring_ctx *ctx)
7256{
7257 spin_unlock_bh(&ctx->rsrc_ref_lock);
7258}
7259
Bijan Mottahedehd67d2262021-01-15 17:37:46 +00007260static void io_sqe_rsrc_set_node(struct io_ring_ctx *ctx,
7261 struct fixed_rsrc_data *rsrc_data,
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007262 struct fixed_rsrc_ref_node *ref_node)
Pavel Begunkov1642b442020-12-30 21:34:14 +00007263{
Bijan Mottahedeh2a63b2d2021-01-15 17:37:47 +00007264 io_rsrc_ref_lock(ctx);
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007265 rsrc_data->node = ref_node;
Bijan Mottahedehd67d2262021-01-15 17:37:46 +00007266 list_add_tail(&ref_node->node, &ctx->rsrc_ref_list);
Bijan Mottahedeh2a63b2d2021-01-15 17:37:47 +00007267 io_rsrc_ref_unlock(ctx);
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007268 percpu_ref_get(&rsrc_data->refs);
Pavel Begunkov1642b442020-12-30 21:34:14 +00007269}
7270
Bijan Mottahedehd7954b22021-01-15 17:37:50 +00007271static int io_rsrc_ref_quiesce(struct fixed_rsrc_data *data,
7272 struct io_ring_ctx *ctx,
7273 struct fixed_rsrc_ref_node *backup_node)
Jens Axboe6b063142019-01-10 22:13:58 -07007274{
Bijan Mottahedehd7954b22021-01-15 17:37:50 +00007275 struct fixed_rsrc_ref_node *ref_node;
Pavel Begunkov1ffc5422020-12-30 21:34:15 +00007276 int ret;
Jens Axboe65e19f52019-10-26 07:20:21 -06007277
Bijan Mottahedeh2a63b2d2021-01-15 17:37:47 +00007278 io_rsrc_ref_lock(ctx);
Pavel Begunkov1e5d7702020-11-18 14:56:25 +00007279 ref_node = data->node;
Bijan Mottahedeh2a63b2d2021-01-15 17:37:47 +00007280 io_rsrc_ref_unlock(ctx);
Xiaoguang Wang05589552020-03-31 14:05:18 +08007281 if (ref_node)
7282 percpu_ref_kill(&ref_node->refs);
7283
7284 percpu_ref_kill(&data->refs);
7285
7286 /* wait for all refs nodes to complete */
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007287 flush_delayed_work(&ctx->rsrc_put_work);
Pavel Begunkov1ffc5422020-12-30 21:34:15 +00007288 do {
7289 ret = wait_for_completion_interruptible(&data->done);
7290 if (!ret)
7291 break;
7292 ret = io_run_task_work_sig();
7293 if (ret < 0) {
7294 percpu_ref_resurrect(&data->refs);
7295 reinit_completion(&data->done);
Bijan Mottahedehd67d2262021-01-15 17:37:46 +00007296 io_sqe_rsrc_set_node(ctx, data, backup_node);
Pavel Begunkov1ffc5422020-12-30 21:34:15 +00007297 return ret;
7298 }
7299 } while (1);
Jens Axboe05f3fb32019-12-09 11:22:50 -07007300
Bijan Mottahedehd7954b22021-01-15 17:37:50 +00007301 destroy_fixed_rsrc_ref_node(backup_node);
7302 return 0;
7303}
7304
Bijan Mottahedeh1ad555c2021-01-15 17:37:51 +00007305static struct fixed_rsrc_data *alloc_fixed_rsrc_data(struct io_ring_ctx *ctx)
7306{
7307 struct fixed_rsrc_data *data;
7308
7309 data = kzalloc(sizeof(*data), GFP_KERNEL);
7310 if (!data)
7311 return NULL;
7312
Bijan Mottahedeh00835dc2021-01-15 17:37:52 +00007313 if (percpu_ref_init(&data->refs, io_rsrc_data_ref_zero,
Bijan Mottahedeh1ad555c2021-01-15 17:37:51 +00007314 PERCPU_REF_ALLOW_REINIT, GFP_KERNEL)) {
7315 kfree(data);
7316 return NULL;
7317 }
7318 data->ctx = ctx;
7319 init_completion(&data->done);
7320 return data;
7321}
7322
7323static void free_fixed_rsrc_data(struct fixed_rsrc_data *data)
7324{
7325 percpu_ref_exit(&data->refs);
7326 kfree(data->table);
7327 kfree(data);
7328}
7329
Bijan Mottahedehd7954b22021-01-15 17:37:50 +00007330static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
7331{
7332 struct fixed_rsrc_data *data = ctx->file_data;
7333 struct fixed_rsrc_ref_node *backup_node;
7334 unsigned nr_tables, i;
7335 int ret;
7336
7337 if (!data)
7338 return -ENXIO;
7339 backup_node = alloc_fixed_rsrc_ref_node(ctx);
7340 if (!backup_node)
7341 return -ENOMEM;
7342 init_fixed_file_ref_node(ctx, backup_node);
7343
7344 ret = io_rsrc_ref_quiesce(data, ctx, backup_node);
7345 if (ret)
7346 return ret;
7347
Jens Axboe6b063142019-01-10 22:13:58 -07007348 __io_sqe_files_unregister(ctx);
Jens Axboe65e19f52019-10-26 07:20:21 -06007349 nr_tables = DIV_ROUND_UP(ctx->nr_user_files, IORING_MAX_FILES_TABLE);
7350 for (i = 0; i < nr_tables; i++)
Jens Axboe05f3fb32019-12-09 11:22:50 -07007351 kfree(data->table[i].files);
Bijan Mottahedeh1ad555c2021-01-15 17:37:51 +00007352 free_fixed_rsrc_data(data);
Jens Axboe05f3fb32019-12-09 11:22:50 -07007353 ctx->file_data = NULL;
Jens Axboe6b063142019-01-10 22:13:58 -07007354 ctx->nr_user_files = 0;
7355 return 0;
7356}
7357
Jens Axboe534ca6d2020-09-02 13:52:19 -06007358static void io_put_sq_data(struct io_sq_data *sqd)
Jens Axboe6c271ce2019-01-10 11:22:30 -07007359{
Jens Axboe534ca6d2020-09-02 13:52:19 -06007360 if (refcount_dec_and_test(&sqd->refs)) {
Roman Penyaev2bbcd6d2019-05-16 10:53:57 +02007361 /*
7362 * The park is a bit of a work-around, without it we get
7363 * warning spews on shutdown with SQPOLL set and affinity
7364 * set to a single CPU.
7365 */
Jens Axboe534ca6d2020-09-02 13:52:19 -06007366 if (sqd->thread) {
7367 kthread_park(sqd->thread);
7368 kthread_stop(sqd->thread);
7369 }
7370
7371 kfree(sqd);
7372 }
7373}
7374
Jens Axboeaa061652020-09-02 14:50:27 -06007375static struct io_sq_data *io_attach_sq_data(struct io_uring_params *p)
7376{
7377 struct io_ring_ctx *ctx_attach;
7378 struct io_sq_data *sqd;
7379 struct fd f;
7380
7381 f = fdget(p->wq_fd);
7382 if (!f.file)
7383 return ERR_PTR(-ENXIO);
7384 if (f.file->f_op != &io_uring_fops) {
7385 fdput(f);
7386 return ERR_PTR(-EINVAL);
7387 }
7388
7389 ctx_attach = f.file->private_data;
7390 sqd = ctx_attach->sq_data;
7391 if (!sqd) {
7392 fdput(f);
7393 return ERR_PTR(-EINVAL);
7394 }
7395
7396 refcount_inc(&sqd->refs);
7397 fdput(f);
7398 return sqd;
7399}
7400
Jens Axboe534ca6d2020-09-02 13:52:19 -06007401static struct io_sq_data *io_get_sq_data(struct io_uring_params *p)
7402{
7403 struct io_sq_data *sqd;
7404
Jens Axboeaa061652020-09-02 14:50:27 -06007405 if (p->flags & IORING_SETUP_ATTACH_WQ)
7406 return io_attach_sq_data(p);
7407
Jens Axboe534ca6d2020-09-02 13:52:19 -06007408 sqd = kzalloc(sizeof(*sqd), GFP_KERNEL);
7409 if (!sqd)
7410 return ERR_PTR(-ENOMEM);
7411
7412 refcount_set(&sqd->refs, 1);
Jens Axboe69fb2132020-09-14 11:16:23 -06007413 INIT_LIST_HEAD(&sqd->ctx_list);
7414 INIT_LIST_HEAD(&sqd->ctx_new_list);
7415 mutex_init(&sqd->ctx_lock);
7416 mutex_init(&sqd->lock);
Jens Axboe534ca6d2020-09-02 13:52:19 -06007417 init_waitqueue_head(&sqd->wait);
7418 return sqd;
7419}
7420
Jens Axboe69fb2132020-09-14 11:16:23 -06007421static void io_sq_thread_unpark(struct io_sq_data *sqd)
7422 __releases(&sqd->lock)
7423{
7424 if (!sqd->thread)
7425 return;
7426 kthread_unpark(sqd->thread);
7427 mutex_unlock(&sqd->lock);
7428}
7429
7430static void io_sq_thread_park(struct io_sq_data *sqd)
7431 __acquires(&sqd->lock)
7432{
7433 if (!sqd->thread)
7434 return;
7435 mutex_lock(&sqd->lock);
7436 kthread_park(sqd->thread);
7437}
7438
Jens Axboe534ca6d2020-09-02 13:52:19 -06007439static void io_sq_thread_stop(struct io_ring_ctx *ctx)
7440{
7441 struct io_sq_data *sqd = ctx->sq_data;
7442
7443 if (sqd) {
7444 if (sqd->thread) {
7445 /*
7446 * We may arrive here from the error branch in
7447 * io_sq_offload_create() where the kthread is created
7448 * without being waked up, thus wake it up now to make
7449 * sure the wait will complete.
7450 */
7451 wake_up_process(sqd->thread);
7452 wait_for_completion(&ctx->sq_thread_comp);
Jens Axboe69fb2132020-09-14 11:16:23 -06007453
7454 io_sq_thread_park(sqd);
7455 }
7456
7457 mutex_lock(&sqd->ctx_lock);
7458 list_del(&ctx->sqd_list);
Xiaoguang Wang08369242020-11-03 14:15:59 +08007459 io_sqd_update_thread_idle(sqd);
Jens Axboe69fb2132020-09-14 11:16:23 -06007460 mutex_unlock(&sqd->ctx_lock);
7461
Xiaoguang Wang08369242020-11-03 14:15:59 +08007462 if (sqd->thread)
Jens Axboe69fb2132020-09-14 11:16:23 -06007463 io_sq_thread_unpark(sqd);
Jens Axboe534ca6d2020-09-02 13:52:19 -06007464
7465 io_put_sq_data(sqd);
7466 ctx->sq_data = NULL;
Jens Axboe6c271ce2019-01-10 11:22:30 -07007467 }
7468}
7469
Jens Axboe6b063142019-01-10 22:13:58 -07007470static void io_finish_async(struct io_ring_ctx *ctx)
7471{
Jens Axboe6c271ce2019-01-10 11:22:30 -07007472 io_sq_thread_stop(ctx);
7473
Jens Axboe561fb042019-10-24 07:25:42 -06007474 if (ctx->io_wq) {
7475 io_wq_destroy(ctx->io_wq);
7476 ctx->io_wq = NULL;
Jens Axboe6b063142019-01-10 22:13:58 -07007477 }
7478}
7479
7480#if defined(CONFIG_UNIX)
Jens Axboe6b063142019-01-10 22:13:58 -07007481/*
7482 * Ensure the UNIX gc is aware of our file set, so we are certain that
7483 * the io_uring can be safely unregistered on process exit, even if we have
7484 * loops in the file referencing.
7485 */
7486static int __io_sqe_files_scm(struct io_ring_ctx *ctx, int nr, int offset)
7487{
7488 struct sock *sk = ctx->ring_sock->sk;
7489 struct scm_fp_list *fpl;
7490 struct sk_buff *skb;
Jens Axboe08a45172019-10-03 08:11:03 -06007491 int i, nr_files;
Jens Axboe6b063142019-01-10 22:13:58 -07007492
Jens Axboe6b063142019-01-10 22:13:58 -07007493 fpl = kzalloc(sizeof(*fpl), GFP_KERNEL);
7494 if (!fpl)
7495 return -ENOMEM;
7496
7497 skb = alloc_skb(0, GFP_KERNEL);
7498 if (!skb) {
7499 kfree(fpl);
7500 return -ENOMEM;
7501 }
7502
7503 skb->sk = sk;
Jens Axboe6b063142019-01-10 22:13:58 -07007504
Jens Axboe08a45172019-10-03 08:11:03 -06007505 nr_files = 0;
Jens Axboe6b063142019-01-10 22:13:58 -07007506 fpl->user = get_uid(ctx->user);
7507 for (i = 0; i < nr; i++) {
Jens Axboe65e19f52019-10-26 07:20:21 -06007508 struct file *file = io_file_from_index(ctx, i + offset);
7509
7510 if (!file)
Jens Axboe08a45172019-10-03 08:11:03 -06007511 continue;
Jens Axboe65e19f52019-10-26 07:20:21 -06007512 fpl->fp[nr_files] = get_file(file);
Jens Axboe08a45172019-10-03 08:11:03 -06007513 unix_inflight(fpl->user, fpl->fp[nr_files]);
7514 nr_files++;
Jens Axboe6b063142019-01-10 22:13:58 -07007515 }
7516
Jens Axboe08a45172019-10-03 08:11:03 -06007517 if (nr_files) {
7518 fpl->max = SCM_MAX_FD;
7519 fpl->count = nr_files;
7520 UNIXCB(skb).fp = fpl;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007521 skb->destructor = unix_destruct_scm;
Jens Axboe08a45172019-10-03 08:11:03 -06007522 refcount_add(skb->truesize, &sk->sk_wmem_alloc);
7523 skb_queue_head(&sk->sk_receive_queue, skb);
Jens Axboe6b063142019-01-10 22:13:58 -07007524
Jens Axboe08a45172019-10-03 08:11:03 -06007525 for (i = 0; i < nr_files; i++)
7526 fput(fpl->fp[i]);
7527 } else {
7528 kfree_skb(skb);
7529 kfree(fpl);
7530 }
Jens Axboe6b063142019-01-10 22:13:58 -07007531
7532 return 0;
7533}
7534
7535/*
7536 * If UNIX sockets are enabled, fd passing can cause a reference cycle which
7537 * causes regular reference counting to break down. We rely on the UNIX
7538 * garbage collection to take care of this problem for us.
7539 */
7540static int io_sqe_files_scm(struct io_ring_ctx *ctx)
7541{
7542 unsigned left, total;
7543 int ret = 0;
7544
7545 total = 0;
7546 left = ctx->nr_user_files;
7547 while (left) {
7548 unsigned this_files = min_t(unsigned, left, SCM_MAX_FD);
Jens Axboe6b063142019-01-10 22:13:58 -07007549
7550 ret = __io_sqe_files_scm(ctx, this_files, total);
7551 if (ret)
7552 break;
7553 left -= this_files;
7554 total += this_files;
7555 }
7556
7557 if (!ret)
7558 return 0;
7559
7560 while (total < ctx->nr_user_files) {
Jens Axboe65e19f52019-10-26 07:20:21 -06007561 struct file *file = io_file_from_index(ctx, total);
7562
7563 if (file)
7564 fput(file);
Jens Axboe6b063142019-01-10 22:13:58 -07007565 total++;
7566 }
7567
7568 return ret;
7569}
7570#else
7571static int io_sqe_files_scm(struct io_ring_ctx *ctx)
7572{
7573 return 0;
7574}
7575#endif
7576
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007577static int io_sqe_alloc_file_tables(struct fixed_rsrc_data *file_data,
Pavel Begunkov5398ae62020-10-10 18:34:14 +01007578 unsigned nr_tables, unsigned nr_files)
Jens Axboe65e19f52019-10-26 07:20:21 -06007579{
7580 int i;
7581
7582 for (i = 0; i < nr_tables; i++) {
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007583 struct fixed_rsrc_table *table = &file_data->table[i];
Jens Axboe65e19f52019-10-26 07:20:21 -06007584 unsigned this_files;
7585
7586 this_files = min(nr_files, IORING_MAX_FILES_TABLE);
7587 table->files = kcalloc(this_files, sizeof(struct file *),
7588 GFP_KERNEL);
7589 if (!table->files)
7590 break;
7591 nr_files -= this_files;
7592 }
7593
7594 if (i == nr_tables)
7595 return 0;
7596
7597 for (i = 0; i < nr_tables; i++) {
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007598 struct fixed_rsrc_table *table = &file_data->table[i];
Jens Axboe65e19f52019-10-26 07:20:21 -06007599 kfree(table->files);
7600 }
7601 return 1;
7602}
7603
Bijan Mottahedeh50238532021-01-15 17:37:45 +00007604static void io_ring_file_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc)
Jens Axboec3a31e62019-10-03 13:59:56 -06007605{
Bijan Mottahedeh50238532021-01-15 17:37:45 +00007606 struct file *file = prsrc->file;
Jens Axboec3a31e62019-10-03 13:59:56 -06007607#if defined(CONFIG_UNIX)
Jens Axboec3a31e62019-10-03 13:59:56 -06007608 struct sock *sock = ctx->ring_sock->sk;
7609 struct sk_buff_head list, *head = &sock->sk_receive_queue;
7610 struct sk_buff *skb;
7611 int i;
7612
7613 __skb_queue_head_init(&list);
7614
7615 /*
7616 * Find the skb that holds this file in its SCM_RIGHTS. When found,
7617 * remove this entry and rearrange the file array.
7618 */
7619 skb = skb_dequeue(head);
7620 while (skb) {
7621 struct scm_fp_list *fp;
7622
7623 fp = UNIXCB(skb).fp;
7624 for (i = 0; i < fp->count; i++) {
7625 int left;
7626
7627 if (fp->fp[i] != file)
7628 continue;
7629
7630 unix_notinflight(fp->user, fp->fp[i]);
7631 left = fp->count - 1 - i;
7632 if (left) {
7633 memmove(&fp->fp[i], &fp->fp[i + 1],
7634 left * sizeof(struct file *));
7635 }
7636 fp->count--;
7637 if (!fp->count) {
7638 kfree_skb(skb);
7639 skb = NULL;
7640 } else {
7641 __skb_queue_tail(&list, skb);
7642 }
7643 fput(file);
7644 file = NULL;
7645 break;
7646 }
7647
7648 if (!file)
7649 break;
7650
7651 __skb_queue_tail(&list, skb);
7652
7653 skb = skb_dequeue(head);
7654 }
7655
7656 if (skb_peek(&list)) {
7657 spin_lock_irq(&head->lock);
7658 while ((skb = __skb_dequeue(&list)) != NULL)
7659 __skb_queue_tail(head, skb);
7660 spin_unlock_irq(&head->lock);
7661 }
7662#else
Jens Axboe05f3fb32019-12-09 11:22:50 -07007663 fput(file);
Jens Axboec3a31e62019-10-03 13:59:56 -06007664#endif
7665}
7666
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007667static void __io_rsrc_put_work(struct fixed_rsrc_ref_node *ref_node)
Jens Axboe05f3fb32019-12-09 11:22:50 -07007668{
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007669 struct fixed_rsrc_data *rsrc_data = ref_node->rsrc_data;
7670 struct io_ring_ctx *ctx = rsrc_data->ctx;
7671 struct io_rsrc_put *prsrc, *tmp;
Xiaoguang Wang05589552020-03-31 14:05:18 +08007672
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007673 list_for_each_entry_safe(prsrc, tmp, &ref_node->rsrc_list, list) {
7674 list_del(&prsrc->list);
Bijan Mottahedeh50238532021-01-15 17:37:45 +00007675 ref_node->rsrc_put(ctx, prsrc);
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007676 kfree(prsrc);
Jens Axboe05f3fb32019-12-09 11:22:50 -07007677 }
7678
Xiaoguang Wang05589552020-03-31 14:05:18 +08007679 percpu_ref_exit(&ref_node->refs);
7680 kfree(ref_node);
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007681 percpu_ref_put(&rsrc_data->refs);
Jens Axboe05f3fb32019-12-09 11:22:50 -07007682}
7683
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007684static void io_rsrc_put_work(struct work_struct *work)
Jens Axboe4a38aed22020-05-14 17:21:15 -06007685{
7686 struct io_ring_ctx *ctx;
7687 struct llist_node *node;
7688
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007689 ctx = container_of(work, struct io_ring_ctx, rsrc_put_work.work);
7690 node = llist_del_all(&ctx->rsrc_put_llist);
Jens Axboe4a38aed22020-05-14 17:21:15 -06007691
7692 while (node) {
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007693 struct fixed_rsrc_ref_node *ref_node;
Jens Axboe4a38aed22020-05-14 17:21:15 -06007694 struct llist_node *next = node->next;
7695
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007696 ref_node = llist_entry(node, struct fixed_rsrc_ref_node, llist);
7697 __io_rsrc_put_work(ref_node);
Jens Axboe4a38aed22020-05-14 17:21:15 -06007698 node = next;
7699 }
7700}
7701
Pavel Begunkovea64ec022021-02-04 13:52:07 +00007702static struct file **io_fixed_file_slot(struct fixed_rsrc_data *file_data,
7703 unsigned i)
7704{
7705 struct fixed_rsrc_table *table;
7706
7707 table = &file_data->table[i >> IORING_FILE_TABLE_SHIFT];
7708 return &table->files[i & IORING_FILE_TABLE_MASK];
7709}
7710
Bijan Mottahedeh00835dc2021-01-15 17:37:52 +00007711static void io_rsrc_node_ref_zero(struct percpu_ref *ref)
Jens Axboe05f3fb32019-12-09 11:22:50 -07007712{
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007713 struct fixed_rsrc_ref_node *ref_node;
7714 struct fixed_rsrc_data *data;
Jens Axboe4a38aed22020-05-14 17:21:15 -06007715 struct io_ring_ctx *ctx;
Pavel Begunkove2978222020-11-18 14:56:26 +00007716 bool first_add = false;
Jens Axboe4a38aed22020-05-14 17:21:15 -06007717 int delay = HZ;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007718
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007719 ref_node = container_of(ref, struct fixed_rsrc_ref_node, refs);
7720 data = ref_node->rsrc_data;
Pavel Begunkove2978222020-11-18 14:56:26 +00007721 ctx = data->ctx;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007722
Bijan Mottahedeh2a63b2d2021-01-15 17:37:47 +00007723 io_rsrc_ref_lock(ctx);
Pavel Begunkove2978222020-11-18 14:56:26 +00007724 ref_node->done = true;
7725
Bijan Mottahedehd67d2262021-01-15 17:37:46 +00007726 while (!list_empty(&ctx->rsrc_ref_list)) {
7727 ref_node = list_first_entry(&ctx->rsrc_ref_list,
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007728 struct fixed_rsrc_ref_node, node);
Pavel Begunkove2978222020-11-18 14:56:26 +00007729 /* recycle ref nodes in order */
7730 if (!ref_node->done)
7731 break;
7732 list_del(&ref_node->node);
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007733 first_add |= llist_add(&ref_node->llist, &ctx->rsrc_put_llist);
Pavel Begunkove2978222020-11-18 14:56:26 +00007734 }
Bijan Mottahedeh2a63b2d2021-01-15 17:37:47 +00007735 io_rsrc_ref_unlock(ctx);
Pavel Begunkove2978222020-11-18 14:56:26 +00007736
7737 if (percpu_ref_is_dying(&data->refs))
Jens Axboe4a38aed22020-05-14 17:21:15 -06007738 delay = 0;
7739
Jens Axboe4a38aed22020-05-14 17:21:15 -06007740 if (!delay)
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007741 mod_delayed_work(system_wq, &ctx->rsrc_put_work, 0);
Jens Axboe4a38aed22020-05-14 17:21:15 -06007742 else if (first_add)
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007743 queue_delayed_work(system_wq, &ctx->rsrc_put_work, delay);
Xiaoguang Wang05589552020-03-31 14:05:18 +08007744}
7745
Bijan Mottahedeh68025352021-01-15 17:37:48 +00007746static struct fixed_rsrc_ref_node *alloc_fixed_rsrc_ref_node(
Xiaoguang Wang05589552020-03-31 14:05:18 +08007747 struct io_ring_ctx *ctx)
7748{
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007749 struct fixed_rsrc_ref_node *ref_node;
Xiaoguang Wang05589552020-03-31 14:05:18 +08007750
7751 ref_node = kzalloc(sizeof(*ref_node), GFP_KERNEL);
7752 if (!ref_node)
Matthew Wilcox (Oracle)3e2224c2021-01-06 16:09:26 +00007753 return NULL;
Xiaoguang Wang05589552020-03-31 14:05:18 +08007754
Bijan Mottahedeh00835dc2021-01-15 17:37:52 +00007755 if (percpu_ref_init(&ref_node->refs, io_rsrc_node_ref_zero,
Xiaoguang Wang05589552020-03-31 14:05:18 +08007756 0, GFP_KERNEL)) {
7757 kfree(ref_node);
Matthew Wilcox (Oracle)3e2224c2021-01-06 16:09:26 +00007758 return NULL;
Xiaoguang Wang05589552020-03-31 14:05:18 +08007759 }
7760 INIT_LIST_HEAD(&ref_node->node);
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007761 INIT_LIST_HEAD(&ref_node->rsrc_list);
Bijan Mottahedeh68025352021-01-15 17:37:48 +00007762 ref_node->done = false;
7763 return ref_node;
7764}
7765
Pavel Begunkovbc9744c2021-01-15 17:37:49 +00007766static void init_fixed_file_ref_node(struct io_ring_ctx *ctx,
7767 struct fixed_rsrc_ref_node *ref_node)
Bijan Mottahedeh68025352021-01-15 17:37:48 +00007768{
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007769 ref_node->rsrc_data = ctx->file_data;
Bijan Mottahedeh50238532021-01-15 17:37:45 +00007770 ref_node->rsrc_put = io_ring_file_put;
Xiaoguang Wang05589552020-03-31 14:05:18 +08007771}
7772
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007773static void destroy_fixed_rsrc_ref_node(struct fixed_rsrc_ref_node *ref_node)
Xiaoguang Wang05589552020-03-31 14:05:18 +08007774{
7775 percpu_ref_exit(&ref_node->refs);
7776 kfree(ref_node);
Jens Axboe05f3fb32019-12-09 11:22:50 -07007777}
7778
Pavel Begunkovea64ec022021-02-04 13:52:07 +00007779
Jens Axboe05f3fb32019-12-09 11:22:50 -07007780static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
7781 unsigned nr_args)
7782{
7783 __s32 __user *fds = (__s32 __user *) arg;
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01007784 unsigned nr_tables, i;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007785 struct file *file;
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01007786 int fd, ret = -ENOMEM;
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007787 struct fixed_rsrc_ref_node *ref_node;
7788 struct fixed_rsrc_data *file_data;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007789
7790 if (ctx->file_data)
7791 return -EBUSY;
7792 if (!nr_args)
7793 return -EINVAL;
7794 if (nr_args > IORING_MAX_FIXED_FILES)
7795 return -EMFILE;
7796
Bijan Mottahedeh1ad555c2021-01-15 17:37:51 +00007797 file_data = alloc_fixed_rsrc_data(ctx);
Pavel Begunkov5398ae62020-10-10 18:34:14 +01007798 if (!file_data)
Jens Axboe05f3fb32019-12-09 11:22:50 -07007799 return -ENOMEM;
Dan Carpenter13770a72021-02-01 15:23:42 +03007800 ctx->file_data = file_data;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007801
7802 nr_tables = DIV_ROUND_UP(nr_args, IORING_MAX_FILES_TABLE);
Colin Ian King035fbaf2020-10-12 15:03:41 +01007803 file_data->table = kcalloc(nr_tables, sizeof(*file_data->table),
Pavel Begunkov5398ae62020-10-10 18:34:14 +01007804 GFP_KERNEL);
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01007805 if (!file_data->table)
7806 goto out_free;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007807
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01007808 if (io_sqe_alloc_file_tables(file_data, nr_tables, nr_args))
Bijan Mottahedeh1ad555c2021-01-15 17:37:51 +00007809 goto out_free;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007810
7811 for (i = 0; i < nr_args; i++, ctx->nr_user_files++) {
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01007812 if (copy_from_user(&fd, &fds[i], sizeof(fd))) {
7813 ret = -EFAULT;
7814 goto out_fput;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007815 }
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01007816 /* allow sparse sets */
7817 if (fd == -1)
7818 continue;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007819
Jens Axboe05f3fb32019-12-09 11:22:50 -07007820 file = fget(fd);
Jens Axboe05f3fb32019-12-09 11:22:50 -07007821 ret = -EBADF;
7822 if (!file)
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01007823 goto out_fput;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007824
7825 /*
7826 * Don't allow io_uring instances to be registered. If UNIX
7827 * isn't enabled, then this causes a reference cycle and this
7828 * instance can never get freed. If UNIX is enabled we'll
7829 * handle it just fine, but there's still no point in allowing
7830 * a ring fd as it doesn't support regular read/write anyway.
7831 */
7832 if (file->f_op == &io_uring_fops) {
7833 fput(file);
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01007834 goto out_fput;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007835 }
Pavel Begunkovea64ec022021-02-04 13:52:07 +00007836 *io_fixed_file_slot(file_data, i) = file;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007837 }
7838
Jens Axboe05f3fb32019-12-09 11:22:50 -07007839 ret = io_sqe_files_scm(ctx);
Xiaoguang Wang05589552020-03-31 14:05:18 +08007840 if (ret) {
Jens Axboe05f3fb32019-12-09 11:22:50 -07007841 io_sqe_files_unregister(ctx);
Xiaoguang Wang05589552020-03-31 14:05:18 +08007842 return ret;
7843 }
Jens Axboe05f3fb32019-12-09 11:22:50 -07007844
Pavel Begunkovbc9744c2021-01-15 17:37:49 +00007845 ref_node = alloc_fixed_rsrc_ref_node(ctx);
Matthew Wilcox (Oracle)3e2224c2021-01-06 16:09:26 +00007846 if (!ref_node) {
Xiaoguang Wang05589552020-03-31 14:05:18 +08007847 io_sqe_files_unregister(ctx);
Matthew Wilcox (Oracle)3e2224c2021-01-06 16:09:26 +00007848 return -ENOMEM;
Xiaoguang Wang05589552020-03-31 14:05:18 +08007849 }
Pavel Begunkovbc9744c2021-01-15 17:37:49 +00007850 init_fixed_file_ref_node(ctx, ref_node);
Xiaoguang Wang05589552020-03-31 14:05:18 +08007851
Bijan Mottahedehd67d2262021-01-15 17:37:46 +00007852 io_sqe_rsrc_set_node(ctx, file_data, ref_node);
Jens Axboe05f3fb32019-12-09 11:22:50 -07007853 return ret;
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01007854out_fput:
7855 for (i = 0; i < ctx->nr_user_files; i++) {
7856 file = io_file_from_index(ctx, i);
7857 if (file)
7858 fput(file);
7859 }
7860 for (i = 0; i < nr_tables; i++)
7861 kfree(file_data->table[i].files);
7862 ctx->nr_user_files = 0;
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01007863out_free:
Bijan Mottahedeh1ad555c2021-01-15 17:37:51 +00007864 free_fixed_rsrc_data(ctx->file_data);
Jens Axboe55cbc252020-10-14 07:35:57 -06007865 ctx->file_data = NULL;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007866 return ret;
7867}
7868
Jens Axboec3a31e62019-10-03 13:59:56 -06007869static int io_sqe_file_register(struct io_ring_ctx *ctx, struct file *file,
7870 int index)
7871{
7872#if defined(CONFIG_UNIX)
7873 struct sock *sock = ctx->ring_sock->sk;
7874 struct sk_buff_head *head = &sock->sk_receive_queue;
7875 struct sk_buff *skb;
7876
7877 /*
7878 * See if we can merge this file into an existing skb SCM_RIGHTS
7879 * file set. If there's no room, fall back to allocating a new skb
7880 * and filling it in.
7881 */
7882 spin_lock_irq(&head->lock);
7883 skb = skb_peek(head);
7884 if (skb) {
7885 struct scm_fp_list *fpl = UNIXCB(skb).fp;
7886
7887 if (fpl->count < SCM_MAX_FD) {
7888 __skb_unlink(skb, head);
7889 spin_unlock_irq(&head->lock);
7890 fpl->fp[fpl->count] = get_file(file);
7891 unix_inflight(fpl->user, fpl->fp[fpl->count]);
7892 fpl->count++;
7893 spin_lock_irq(&head->lock);
7894 __skb_queue_head(head, skb);
7895 } else {
7896 skb = NULL;
7897 }
7898 }
7899 spin_unlock_irq(&head->lock);
7900
7901 if (skb) {
7902 fput(file);
7903 return 0;
7904 }
7905
7906 return __io_sqe_files_scm(ctx, 1, index);
7907#else
7908 return 0;
7909#endif
7910}
7911
Bijan Mottahedeh50238532021-01-15 17:37:45 +00007912static int io_queue_rsrc_removal(struct fixed_rsrc_data *data, void *rsrc)
Jens Axboe05f3fb32019-12-09 11:22:50 -07007913{
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007914 struct io_rsrc_put *prsrc;
7915 struct fixed_rsrc_ref_node *ref_node = data->node;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007916
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007917 prsrc = kzalloc(sizeof(*prsrc), GFP_KERNEL);
7918 if (!prsrc)
Hillf Dantona5318d32020-03-23 17:47:15 +08007919 return -ENOMEM;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007920
Bijan Mottahedeh50238532021-01-15 17:37:45 +00007921 prsrc->rsrc = rsrc;
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007922 list_add(&prsrc->list, &ref_node->rsrc_list);
Xiaoguang Wang05589552020-03-31 14:05:18 +08007923
Hillf Dantona5318d32020-03-23 17:47:15 +08007924 return 0;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007925}
7926
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007927static inline int io_queue_file_removal(struct fixed_rsrc_data *data,
7928 struct file *file)
7929{
Bijan Mottahedeh50238532021-01-15 17:37:45 +00007930 return io_queue_rsrc_removal(data, (void *)file);
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007931}
7932
Jens Axboe05f3fb32019-12-09 11:22:50 -07007933static int __io_sqe_files_update(struct io_ring_ctx *ctx,
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007934 struct io_uring_rsrc_update *up,
Jens Axboe05f3fb32019-12-09 11:22:50 -07007935 unsigned nr_args)
7936{
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007937 struct fixed_rsrc_data *data = ctx->file_data;
7938 struct fixed_rsrc_ref_node *ref_node;
Pavel Begunkovea64ec022021-02-04 13:52:07 +00007939 struct file *file, **file_slot;
Jens Axboec3a31e62019-10-03 13:59:56 -06007940 __s32 __user *fds;
7941 int fd, i, err;
7942 __u32 done;
Xiaoguang Wang05589552020-03-31 14:05:18 +08007943 bool needs_switch = false;
Jens Axboec3a31e62019-10-03 13:59:56 -06007944
Jens Axboe05f3fb32019-12-09 11:22:50 -07007945 if (check_add_overflow(up->offset, nr_args, &done))
Jens Axboec3a31e62019-10-03 13:59:56 -06007946 return -EOVERFLOW;
7947 if (done > ctx->nr_user_files)
7948 return -EINVAL;
7949
Pavel Begunkovbc9744c2021-01-15 17:37:49 +00007950 ref_node = alloc_fixed_rsrc_ref_node(ctx);
Matthew Wilcox (Oracle)3e2224c2021-01-06 16:09:26 +00007951 if (!ref_node)
7952 return -ENOMEM;
Pavel Begunkovbc9744c2021-01-15 17:37:49 +00007953 init_fixed_file_ref_node(ctx, ref_node);
Xiaoguang Wang05589552020-03-31 14:05:18 +08007954
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007955 fds = u64_to_user_ptr(up->data);
Pavel Begunkov67973b92021-01-26 13:51:09 +00007956 for (done = 0; done < nr_args; done++) {
Jens Axboec3a31e62019-10-03 13:59:56 -06007957 err = 0;
7958 if (copy_from_user(&fd, &fds[done], sizeof(fd))) {
7959 err = -EFAULT;
7960 break;
7961 }
noah4e0377a2021-01-26 15:23:28 -05007962 if (fd == IORING_REGISTER_FILES_SKIP)
7963 continue;
7964
Pavel Begunkov67973b92021-01-26 13:51:09 +00007965 i = array_index_nospec(up->offset + done, ctx->nr_user_files);
Pavel Begunkovea64ec022021-02-04 13:52:07 +00007966 file_slot = io_fixed_file_slot(ctx->file_data, i);
7967
7968 if (*file_slot) {
7969 err = io_queue_file_removal(data, *file_slot);
Hillf Dantona5318d32020-03-23 17:47:15 +08007970 if (err)
7971 break;
Pavel Begunkovea64ec022021-02-04 13:52:07 +00007972 *file_slot = NULL;
Xiaoguang Wang05589552020-03-31 14:05:18 +08007973 needs_switch = true;
Jens Axboec3a31e62019-10-03 13:59:56 -06007974 }
7975 if (fd != -1) {
Jens Axboec3a31e62019-10-03 13:59:56 -06007976 file = fget(fd);
7977 if (!file) {
7978 err = -EBADF;
7979 break;
7980 }
7981 /*
7982 * Don't allow io_uring instances to be registered. If
7983 * UNIX isn't enabled, then this causes a reference
7984 * cycle and this instance can never get freed. If UNIX
7985 * is enabled we'll handle it just fine, but there's
7986 * still no point in allowing a ring fd as it doesn't
7987 * support regular read/write anyway.
7988 */
7989 if (file->f_op == &io_uring_fops) {
7990 fput(file);
7991 err = -EBADF;
7992 break;
7993 }
Jens Axboec3a31e62019-10-03 13:59:56 -06007994 err = io_sqe_file_register(ctx, file, i);
Yang Yingliangf3bd9da2020-07-09 10:11:41 +00007995 if (err) {
7996 fput(file);
Jens Axboec3a31e62019-10-03 13:59:56 -06007997 break;
Yang Yingliangf3bd9da2020-07-09 10:11:41 +00007998 }
Pavel Begunkovea64ec022021-02-04 13:52:07 +00007999 *file_slot = file;
Jens Axboec3a31e62019-10-03 13:59:56 -06008000 }
Jens Axboe05f3fb32019-12-09 11:22:50 -07008001 }
8002
Xiaoguang Wang05589552020-03-31 14:05:18 +08008003 if (needs_switch) {
Pavel Begunkovb2e96852020-10-10 18:34:16 +01008004 percpu_ref_kill(&data->node->refs);
Bijan Mottahedehd67d2262021-01-15 17:37:46 +00008005 io_sqe_rsrc_set_node(ctx, data, ref_node);
Xiaoguang Wang05589552020-03-31 14:05:18 +08008006 } else
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00008007 destroy_fixed_rsrc_ref_node(ref_node);
Jens Axboec3a31e62019-10-03 13:59:56 -06008008
8009 return done ? done : err;
8010}
Xiaoguang Wang05589552020-03-31 14:05:18 +08008011
Jens Axboe05f3fb32019-12-09 11:22:50 -07008012static int io_sqe_files_update(struct io_ring_ctx *ctx, void __user *arg,
8013 unsigned nr_args)
8014{
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00008015 struct io_uring_rsrc_update up;
Jens Axboe05f3fb32019-12-09 11:22:50 -07008016
8017 if (!ctx->file_data)
8018 return -ENXIO;
8019 if (!nr_args)
8020 return -EINVAL;
8021 if (copy_from_user(&up, arg, sizeof(up)))
8022 return -EFAULT;
8023 if (up.resv)
8024 return -EINVAL;
8025
8026 return __io_sqe_files_update(ctx, &up, nr_args);
8027}
Jens Axboec3a31e62019-10-03 13:59:56 -06008028
Pavel Begunkov5280f7e2021-02-04 13:52:08 +00008029static struct io_wq_work *io_free_work(struct io_wq_work *work)
Jens Axboe7d723062019-11-12 22:31:31 -07008030{
8031 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
8032
Pavel Begunkov5280f7e2021-02-04 13:52:08 +00008033 req = io_put_req_find_next(req);
8034 return req ? &req->work : NULL;
Jens Axboe7d723062019-11-12 22:31:31 -07008035}
8036
Pavel Begunkov24369c22020-01-28 03:15:48 +03008037static int io_init_wq_offload(struct io_ring_ctx *ctx,
8038 struct io_uring_params *p)
8039{
8040 struct io_wq_data data;
8041 struct fd f;
8042 struct io_ring_ctx *ctx_attach;
8043 unsigned int concurrency;
8044 int ret = 0;
8045
8046 data.user = ctx->user;
Pavel Begunkove9fd9392020-03-04 16:14:12 +03008047 data.free_work = io_free_work;
Pavel Begunkovf5fa38c2020-06-08 21:08:20 +03008048 data.do_work = io_wq_submit_work;
Pavel Begunkov24369c22020-01-28 03:15:48 +03008049
8050 if (!(p->flags & IORING_SETUP_ATTACH_WQ)) {
8051 /* Do QD, or 4 * CPUS, whatever is smallest */
8052 concurrency = min(ctx->sq_entries, 4 * num_online_cpus());
8053
8054 ctx->io_wq = io_wq_create(concurrency, &data);
8055 if (IS_ERR(ctx->io_wq)) {
8056 ret = PTR_ERR(ctx->io_wq);
8057 ctx->io_wq = NULL;
8058 }
8059 return ret;
8060 }
8061
8062 f = fdget(p->wq_fd);
8063 if (!f.file)
8064 return -EBADF;
8065
8066 if (f.file->f_op != &io_uring_fops) {
8067 ret = -EINVAL;
8068 goto out_fput;
8069 }
8070
8071 ctx_attach = f.file->private_data;
8072 /* @io_wq is protected by holding the fd */
8073 if (!io_wq_get(ctx_attach->io_wq, &data)) {
8074 ret = -EINVAL;
8075 goto out_fput;
8076 }
8077
8078 ctx->io_wq = ctx_attach->io_wq;
8079out_fput:
8080 fdput(f);
8081 return ret;
8082}
8083
Jens Axboe0f212202020-09-13 13:09:39 -06008084static int io_uring_alloc_task_context(struct task_struct *task)
8085{
8086 struct io_uring_task *tctx;
Jens Axboed8a6df12020-10-15 16:24:45 -06008087 int ret;
Jens Axboe0f212202020-09-13 13:09:39 -06008088
8089 tctx = kmalloc(sizeof(*tctx), GFP_KERNEL);
8090 if (unlikely(!tctx))
8091 return -ENOMEM;
8092
Jens Axboed8a6df12020-10-15 16:24:45 -06008093 ret = percpu_counter_init(&tctx->inflight, 0, GFP_KERNEL);
8094 if (unlikely(ret)) {
8095 kfree(tctx);
8096 return ret;
8097 }
8098
Jens Axboe0f212202020-09-13 13:09:39 -06008099 xa_init(&tctx->xa);
8100 init_waitqueue_head(&tctx->wait);
8101 tctx->last = NULL;
Jens Axboefdaf0832020-10-30 09:37:30 -06008102 atomic_set(&tctx->in_idle, 0);
8103 tctx->sqpoll = false;
Jens Axboe500a3732020-10-15 17:38:03 -06008104 io_init_identity(&tctx->__identity);
8105 tctx->identity = &tctx->__identity;
Jens Axboe0f212202020-09-13 13:09:39 -06008106 task->io_uring = tctx;
8107 return 0;
8108}
8109
8110void __io_uring_free(struct task_struct *tsk)
8111{
8112 struct io_uring_task *tctx = tsk->io_uring;
8113
8114 WARN_ON_ONCE(!xa_empty(&tctx->xa));
Jens Axboe500a3732020-10-15 17:38:03 -06008115 WARN_ON_ONCE(refcount_read(&tctx->identity->count) != 1);
8116 if (tctx->identity != &tctx->__identity)
8117 kfree(tctx->identity);
Jens Axboed8a6df12020-10-15 16:24:45 -06008118 percpu_counter_destroy(&tctx->inflight);
Jens Axboe0f212202020-09-13 13:09:39 -06008119 kfree(tctx);
8120 tsk->io_uring = NULL;
8121}
8122
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02008123static int io_sq_offload_create(struct io_ring_ctx *ctx,
8124 struct io_uring_params *p)
Jens Axboe2b188cc2019-01-07 10:46:33 -07008125{
8126 int ret;
8127
Jens Axboe6c271ce2019-01-10 11:22:30 -07008128 if (ctx->flags & IORING_SETUP_SQPOLL) {
Jens Axboe534ca6d2020-09-02 13:52:19 -06008129 struct io_sq_data *sqd;
8130
Jens Axboe3ec482d2019-04-08 10:51:01 -06008131 ret = -EPERM;
Jens Axboece59fc62020-09-02 13:28:09 -06008132 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_NICE))
Jens Axboe3ec482d2019-04-08 10:51:01 -06008133 goto err;
8134
Jens Axboe534ca6d2020-09-02 13:52:19 -06008135 sqd = io_get_sq_data(p);
8136 if (IS_ERR(sqd)) {
8137 ret = PTR_ERR(sqd);
8138 goto err;
8139 }
Jens Axboe69fb2132020-09-14 11:16:23 -06008140
Jens Axboe534ca6d2020-09-02 13:52:19 -06008141 ctx->sq_data = sqd;
Jens Axboe69fb2132020-09-14 11:16:23 -06008142 io_sq_thread_park(sqd);
8143 mutex_lock(&sqd->ctx_lock);
8144 list_add(&ctx->sqd_list, &sqd->ctx_new_list);
8145 mutex_unlock(&sqd->ctx_lock);
8146 io_sq_thread_unpark(sqd);
Jens Axboe534ca6d2020-09-02 13:52:19 -06008147
Jens Axboe917257d2019-04-13 09:28:55 -06008148 ctx->sq_thread_idle = msecs_to_jiffies(p->sq_thread_idle);
8149 if (!ctx->sq_thread_idle)
8150 ctx->sq_thread_idle = HZ;
8151
Jens Axboeaa061652020-09-02 14:50:27 -06008152 if (sqd->thread)
8153 goto done;
8154
Jens Axboe6c271ce2019-01-10 11:22:30 -07008155 if (p->flags & IORING_SETUP_SQ_AFF) {
Jens Axboe44a9bd12019-05-14 20:00:30 -06008156 int cpu = p->sq_thread_cpu;
Jens Axboe6c271ce2019-01-10 11:22:30 -07008157
Jens Axboe917257d2019-04-13 09:28:55 -06008158 ret = -EINVAL;
Jens Axboe44a9bd12019-05-14 20:00:30 -06008159 if (cpu >= nr_cpu_ids)
8160 goto err;
Shenghui Wang7889f442019-05-07 16:03:19 +08008161 if (!cpu_online(cpu))
Jens Axboe917257d2019-04-13 09:28:55 -06008162 goto err;
8163
Jens Axboe69fb2132020-09-14 11:16:23 -06008164 sqd->thread = kthread_create_on_cpu(io_sq_thread, sqd,
Jens Axboe534ca6d2020-09-02 13:52:19 -06008165 cpu, "io_uring-sq");
Jens Axboe6c271ce2019-01-10 11:22:30 -07008166 } else {
Jens Axboe69fb2132020-09-14 11:16:23 -06008167 sqd->thread = kthread_create(io_sq_thread, sqd,
Jens Axboe6c271ce2019-01-10 11:22:30 -07008168 "io_uring-sq");
8169 }
Jens Axboe534ca6d2020-09-02 13:52:19 -06008170 if (IS_ERR(sqd->thread)) {
8171 ret = PTR_ERR(sqd->thread);
8172 sqd->thread = NULL;
Jens Axboe6c271ce2019-01-10 11:22:30 -07008173 goto err;
8174 }
Jens Axboe534ca6d2020-09-02 13:52:19 -06008175 ret = io_uring_alloc_task_context(sqd->thread);
Jens Axboe0f212202020-09-13 13:09:39 -06008176 if (ret)
8177 goto err;
Jens Axboe6c271ce2019-01-10 11:22:30 -07008178 } else if (p->flags & IORING_SETUP_SQ_AFF) {
8179 /* Can't have SQ_AFF without SQPOLL */
8180 ret = -EINVAL;
8181 goto err;
8182 }
8183
Jens Axboeaa061652020-09-02 14:50:27 -06008184done:
Pavel Begunkov24369c22020-01-28 03:15:48 +03008185 ret = io_init_wq_offload(ctx, p);
8186 if (ret)
Jens Axboe2b188cc2019-01-07 10:46:33 -07008187 goto err;
Jens Axboe2b188cc2019-01-07 10:46:33 -07008188
8189 return 0;
8190err:
Jens Axboe54a91f32019-09-10 09:15:04 -06008191 io_finish_async(ctx);
Jens Axboe2b188cc2019-01-07 10:46:33 -07008192 return ret;
8193}
8194
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02008195static void io_sq_offload_start(struct io_ring_ctx *ctx)
8196{
Jens Axboe534ca6d2020-09-02 13:52:19 -06008197 struct io_sq_data *sqd = ctx->sq_data;
8198
8199 if ((ctx->flags & IORING_SETUP_SQPOLL) && sqd->thread)
8200 wake_up_process(sqd->thread);
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02008201}
8202
Bijan Mottahedeha087e2b2020-06-16 16:36:07 -07008203static inline void __io_unaccount_mem(struct user_struct *user,
8204 unsigned long nr_pages)
Jens Axboe2b188cc2019-01-07 10:46:33 -07008205{
8206 atomic_long_sub(nr_pages, &user->locked_vm);
8207}
8208
Bijan Mottahedeha087e2b2020-06-16 16:36:07 -07008209static inline int __io_account_mem(struct user_struct *user,
8210 unsigned long nr_pages)
Jens Axboe2b188cc2019-01-07 10:46:33 -07008211{
8212 unsigned long page_limit, cur_pages, new_pages;
8213
8214 /* Don't allow more pages than we can safely lock */
8215 page_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
8216
8217 do {
8218 cur_pages = atomic_long_read(&user->locked_vm);
8219 new_pages = cur_pages + nr_pages;
8220 if (new_pages > page_limit)
8221 return -ENOMEM;
8222 } while (atomic_long_cmpxchg(&user->locked_vm, cur_pages,
8223 new_pages) != cur_pages);
8224
8225 return 0;
8226}
8227
Bijan Mottahedeh2e0464d2020-06-16 16:36:10 -07008228static void io_unaccount_mem(struct io_ring_ctx *ctx, unsigned long nr_pages,
8229 enum io_mem_account acct)
Bijan Mottahedeha087e2b2020-06-16 16:36:07 -07008230{
Bijan Mottahedehaad5d8d2020-06-16 16:36:08 -07008231 if (ctx->limit_mem)
Bijan Mottahedeha087e2b2020-06-16 16:36:07 -07008232 __io_unaccount_mem(ctx->user, nr_pages);
Bijan Mottahedeh30975822020-06-16 16:36:09 -07008233
Jens Axboe2aede0e2020-09-14 10:45:53 -06008234 if (ctx->mm_account) {
Jens Axboe4bc4a912020-12-17 07:53:33 -07008235 if (acct == ACCT_LOCKED) {
8236 mmap_write_lock(ctx->mm_account);
Jens Axboe2aede0e2020-09-14 10:45:53 -06008237 ctx->mm_account->locked_vm -= nr_pages;
Jens Axboe4bc4a912020-12-17 07:53:33 -07008238 mmap_write_unlock(ctx->mm_account);
8239 }else if (acct == ACCT_PINNED) {
Jens Axboe2aede0e2020-09-14 10:45:53 -06008240 atomic64_sub(nr_pages, &ctx->mm_account->pinned_vm);
Jens Axboe4bc4a912020-12-17 07:53:33 -07008241 }
Bijan Mottahedeh2e0464d2020-06-16 16:36:10 -07008242 }
Bijan Mottahedeha087e2b2020-06-16 16:36:07 -07008243}
8244
Bijan Mottahedeh2e0464d2020-06-16 16:36:10 -07008245static int io_account_mem(struct io_ring_ctx *ctx, unsigned long nr_pages,
8246 enum io_mem_account acct)
Bijan Mottahedeha087e2b2020-06-16 16:36:07 -07008247{
Bijan Mottahedeh30975822020-06-16 16:36:09 -07008248 int ret;
8249
8250 if (ctx->limit_mem) {
8251 ret = __io_account_mem(ctx->user, nr_pages);
8252 if (ret)
8253 return ret;
8254 }
8255
Jens Axboe2aede0e2020-09-14 10:45:53 -06008256 if (ctx->mm_account) {
Jens Axboe4bc4a912020-12-17 07:53:33 -07008257 if (acct == ACCT_LOCKED) {
8258 mmap_write_lock(ctx->mm_account);
Jens Axboe2aede0e2020-09-14 10:45:53 -06008259 ctx->mm_account->locked_vm += nr_pages;
Jens Axboe4bc4a912020-12-17 07:53:33 -07008260 mmap_write_unlock(ctx->mm_account);
8261 } else if (acct == ACCT_PINNED) {
Jens Axboe2aede0e2020-09-14 10:45:53 -06008262 atomic64_add(nr_pages, &ctx->mm_account->pinned_vm);
Jens Axboe4bc4a912020-12-17 07:53:33 -07008263 }
Bijan Mottahedeh2e0464d2020-06-16 16:36:10 -07008264 }
Bijan Mottahedeha087e2b2020-06-16 16:36:07 -07008265
8266 return 0;
8267}
8268
Jens Axboe2b188cc2019-01-07 10:46:33 -07008269static void io_mem_free(void *ptr)
8270{
Mark Rutland52e04ef2019-04-30 17:30:21 +01008271 struct page *page;
Jens Axboe2b188cc2019-01-07 10:46:33 -07008272
Mark Rutland52e04ef2019-04-30 17:30:21 +01008273 if (!ptr)
8274 return;
8275
8276 page = virt_to_head_page(ptr);
Jens Axboe2b188cc2019-01-07 10:46:33 -07008277 if (put_page_testzero(page))
8278 free_compound_page(page);
8279}
8280
8281static void *io_mem_alloc(size_t size)
8282{
8283 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN | __GFP_COMP |
8284 __GFP_NORETRY;
8285
8286 return (void *) __get_free_pages(gfp_flags, get_order(size));
8287}
8288
Hristo Venev75b28af2019-08-26 17:23:46 +00008289static unsigned long rings_size(unsigned sq_entries, unsigned cq_entries,
8290 size_t *sq_offset)
8291{
8292 struct io_rings *rings;
8293 size_t off, sq_array_size;
8294
8295 off = struct_size(rings, cqes, cq_entries);
8296 if (off == SIZE_MAX)
8297 return SIZE_MAX;
8298
8299#ifdef CONFIG_SMP
8300 off = ALIGN(off, SMP_CACHE_BYTES);
8301 if (off == 0)
8302 return SIZE_MAX;
8303#endif
8304
Dmitry Vyukovb36200f2020-07-11 11:31:11 +02008305 if (sq_offset)
8306 *sq_offset = off;
8307
Hristo Venev75b28af2019-08-26 17:23:46 +00008308 sq_array_size = array_size(sizeof(u32), sq_entries);
8309 if (sq_array_size == SIZE_MAX)
8310 return SIZE_MAX;
8311
8312 if (check_add_overflow(off, sq_array_size, &off))
8313 return SIZE_MAX;
8314
Hristo Venev75b28af2019-08-26 17:23:46 +00008315 return off;
8316}
8317
Jens Axboe2b188cc2019-01-07 10:46:33 -07008318static unsigned long ring_pages(unsigned sq_entries, unsigned cq_entries)
8319{
Hristo Venev75b28af2019-08-26 17:23:46 +00008320 size_t pages;
Jens Axboe2b188cc2019-01-07 10:46:33 -07008321
Hristo Venev75b28af2019-08-26 17:23:46 +00008322 pages = (size_t)1 << get_order(
8323 rings_size(sq_entries, cq_entries, NULL));
8324 pages += (size_t)1 << get_order(
8325 array_size(sizeof(struct io_uring_sqe), sq_entries));
Jens Axboe2b188cc2019-01-07 10:46:33 -07008326
Hristo Venev75b28af2019-08-26 17:23:46 +00008327 return pages;
Jens Axboe2b188cc2019-01-07 10:46:33 -07008328}
8329
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008330static int io_sqe_buffers_unregister(struct io_ring_ctx *ctx)
Jens Axboeedafcce2019-01-09 09:16:05 -07008331{
8332 int i, j;
8333
8334 if (!ctx->user_bufs)
8335 return -ENXIO;
8336
8337 for (i = 0; i < ctx->nr_user_bufs; i++) {
8338 struct io_mapped_ubuf *imu = &ctx->user_bufs[i];
8339
8340 for (j = 0; j < imu->nr_bvecs; j++)
John Hubbardf1f6a7d2020-01-30 22:13:35 -08008341 unpin_user_page(imu->bvec[j].bv_page);
Jens Axboeedafcce2019-01-09 09:16:05 -07008342
Jens Axboede293932020-09-17 16:19:16 -06008343 if (imu->acct_pages)
8344 io_unaccount_mem(ctx, imu->acct_pages, ACCT_PINNED);
Mark Rutlandd4ef6472019-05-01 16:59:16 +01008345 kvfree(imu->bvec);
Jens Axboeedafcce2019-01-09 09:16:05 -07008346 imu->nr_bvecs = 0;
8347 }
8348
8349 kfree(ctx->user_bufs);
8350 ctx->user_bufs = NULL;
8351 ctx->nr_user_bufs = 0;
8352 return 0;
8353}
8354
8355static int io_copy_iov(struct io_ring_ctx *ctx, struct iovec *dst,
8356 void __user *arg, unsigned index)
8357{
8358 struct iovec __user *src;
8359
8360#ifdef CONFIG_COMPAT
8361 if (ctx->compat) {
8362 struct compat_iovec __user *ciovs;
8363 struct compat_iovec ciov;
8364
8365 ciovs = (struct compat_iovec __user *) arg;
8366 if (copy_from_user(&ciov, &ciovs[index], sizeof(ciov)))
8367 return -EFAULT;
8368
Jens Axboed55e5f52019-12-11 16:12:15 -07008369 dst->iov_base = u64_to_user_ptr((u64)ciov.iov_base);
Jens Axboeedafcce2019-01-09 09:16:05 -07008370 dst->iov_len = ciov.iov_len;
8371 return 0;
8372 }
8373#endif
8374 src = (struct iovec __user *) arg;
8375 if (copy_from_user(dst, &src[index], sizeof(*dst)))
8376 return -EFAULT;
8377 return 0;
8378}
8379
Jens Axboede293932020-09-17 16:19:16 -06008380/*
8381 * Not super efficient, but this is just a registration time. And we do cache
8382 * the last compound head, so generally we'll only do a full search if we don't
8383 * match that one.
8384 *
8385 * We check if the given compound head page has already been accounted, to
8386 * avoid double accounting it. This allows us to account the full size of the
8387 * page, not just the constituent pages of a huge page.
8388 */
8389static bool headpage_already_acct(struct io_ring_ctx *ctx, struct page **pages,
8390 int nr_pages, struct page *hpage)
8391{
8392 int i, j;
8393
8394 /* check current page array */
8395 for (i = 0; i < nr_pages; i++) {
8396 if (!PageCompound(pages[i]))
8397 continue;
8398 if (compound_head(pages[i]) == hpage)
8399 return true;
8400 }
8401
8402 /* check previously registered pages */
8403 for (i = 0; i < ctx->nr_user_bufs; i++) {
8404 struct io_mapped_ubuf *imu = &ctx->user_bufs[i];
8405
8406 for (j = 0; j < imu->nr_bvecs; j++) {
8407 if (!PageCompound(imu->bvec[j].bv_page))
8408 continue;
8409 if (compound_head(imu->bvec[j].bv_page) == hpage)
8410 return true;
8411 }
8412 }
8413
8414 return false;
8415}
8416
8417static int io_buffer_account_pin(struct io_ring_ctx *ctx, struct page **pages,
8418 int nr_pages, struct io_mapped_ubuf *imu,
8419 struct page **last_hpage)
8420{
8421 int i, ret;
8422
8423 for (i = 0; i < nr_pages; i++) {
8424 if (!PageCompound(pages[i])) {
8425 imu->acct_pages++;
8426 } else {
8427 struct page *hpage;
8428
8429 hpage = compound_head(pages[i]);
8430 if (hpage == *last_hpage)
8431 continue;
8432 *last_hpage = hpage;
8433 if (headpage_already_acct(ctx, pages, i, hpage))
8434 continue;
8435 imu->acct_pages += page_size(hpage) >> PAGE_SHIFT;
8436 }
8437 }
8438
8439 if (!imu->acct_pages)
8440 return 0;
8441
8442 ret = io_account_mem(ctx, imu->acct_pages, ACCT_PINNED);
8443 if (ret)
8444 imu->acct_pages = 0;
8445 return ret;
8446}
8447
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008448static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov,
8449 struct io_mapped_ubuf *imu,
8450 struct page **last_hpage)
Jens Axboeedafcce2019-01-09 09:16:05 -07008451{
8452 struct vm_area_struct **vmas = NULL;
8453 struct page **pages = NULL;
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008454 unsigned long off, start, end, ubuf;
8455 size_t size;
8456 int ret, pret, nr_pages, i;
8457
8458 ubuf = (unsigned long) iov->iov_base;
8459 end = (ubuf + iov->iov_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
8460 start = ubuf >> PAGE_SHIFT;
8461 nr_pages = end - start;
8462
8463 ret = -ENOMEM;
8464
8465 pages = kvmalloc_array(nr_pages, sizeof(struct page *), GFP_KERNEL);
8466 if (!pages)
8467 goto done;
8468
8469 vmas = kvmalloc_array(nr_pages, sizeof(struct vm_area_struct *),
8470 GFP_KERNEL);
8471 if (!vmas)
8472 goto done;
8473
8474 imu->bvec = kvmalloc_array(nr_pages, sizeof(struct bio_vec),
8475 GFP_KERNEL);
8476 if (!imu->bvec)
8477 goto done;
8478
8479 ret = 0;
8480 mmap_read_lock(current->mm);
8481 pret = pin_user_pages(ubuf, nr_pages, FOLL_WRITE | FOLL_LONGTERM,
8482 pages, vmas);
8483 if (pret == nr_pages) {
8484 /* don't support file backed memory */
8485 for (i = 0; i < nr_pages; i++) {
8486 struct vm_area_struct *vma = vmas[i];
8487
8488 if (vma->vm_file &&
8489 !is_file_hugepages(vma->vm_file)) {
8490 ret = -EOPNOTSUPP;
8491 break;
8492 }
8493 }
8494 } else {
8495 ret = pret < 0 ? pret : -EFAULT;
8496 }
8497 mmap_read_unlock(current->mm);
8498 if (ret) {
8499 /*
8500 * if we did partial map, or found file backed vmas,
8501 * release any pages we did get
8502 */
8503 if (pret > 0)
8504 unpin_user_pages(pages, pret);
8505 kvfree(imu->bvec);
8506 goto done;
8507 }
8508
8509 ret = io_buffer_account_pin(ctx, pages, pret, imu, last_hpage);
8510 if (ret) {
8511 unpin_user_pages(pages, pret);
8512 kvfree(imu->bvec);
8513 goto done;
8514 }
8515
8516 off = ubuf & ~PAGE_MASK;
8517 size = iov->iov_len;
8518 for (i = 0; i < nr_pages; i++) {
8519 size_t vec_len;
8520
8521 vec_len = min_t(size_t, size, PAGE_SIZE - off);
8522 imu->bvec[i].bv_page = pages[i];
8523 imu->bvec[i].bv_len = vec_len;
8524 imu->bvec[i].bv_offset = off;
8525 off = 0;
8526 size -= vec_len;
8527 }
8528 /* store original address for later verification */
8529 imu->ubuf = ubuf;
8530 imu->len = iov->iov_len;
8531 imu->nr_bvecs = nr_pages;
8532 ret = 0;
8533done:
8534 kvfree(pages);
8535 kvfree(vmas);
8536 return ret;
8537}
8538
Bijan Mottahedeh2b358602021-01-06 12:39:11 -08008539static int io_buffers_map_alloc(struct io_ring_ctx *ctx, unsigned int nr_args)
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008540{
Jens Axboeedafcce2019-01-09 09:16:05 -07008541 if (ctx->user_bufs)
8542 return -EBUSY;
8543 if (!nr_args || nr_args > UIO_MAXIOV)
8544 return -EINVAL;
8545
8546 ctx->user_bufs = kcalloc(nr_args, sizeof(struct io_mapped_ubuf),
8547 GFP_KERNEL);
8548 if (!ctx->user_bufs)
8549 return -ENOMEM;
8550
Bijan Mottahedeh2b358602021-01-06 12:39:11 -08008551 return 0;
8552}
8553
8554static int io_buffer_validate(struct iovec *iov)
8555{
8556 /*
8557 * Don't impose further limits on the size and buffer
8558 * constraints here, we'll -EINVAL later when IO is
8559 * submitted if they are wrong.
8560 */
8561 if (!iov->iov_base || !iov->iov_len)
8562 return -EFAULT;
8563
8564 /* arbitrary limit, but we need something */
8565 if (iov->iov_len > SZ_1G)
8566 return -EFAULT;
8567
8568 return 0;
8569}
8570
8571static int io_sqe_buffers_register(struct io_ring_ctx *ctx, void __user *arg,
8572 unsigned int nr_args)
8573{
8574 int i, ret;
8575 struct iovec iov;
8576 struct page *last_hpage = NULL;
8577
8578 ret = io_buffers_map_alloc(ctx, nr_args);
8579 if (ret)
8580 return ret;
8581
Jens Axboeedafcce2019-01-09 09:16:05 -07008582 for (i = 0; i < nr_args; i++) {
8583 struct io_mapped_ubuf *imu = &ctx->user_bufs[i];
Jens Axboeedafcce2019-01-09 09:16:05 -07008584
8585 ret = io_copy_iov(ctx, &iov, arg, i);
8586 if (ret)
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008587 break;
Jens Axboeedafcce2019-01-09 09:16:05 -07008588
Bijan Mottahedeh2b358602021-01-06 12:39:11 -08008589 ret = io_buffer_validate(&iov);
8590 if (ret)
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008591 break;
Jens Axboeedafcce2019-01-09 09:16:05 -07008592
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008593 ret = io_sqe_buffer_register(ctx, &iov, imu, &last_hpage);
8594 if (ret)
8595 break;
Jens Axboeedafcce2019-01-09 09:16:05 -07008596
8597 ctx->nr_user_bufs++;
8598 }
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008599
8600 if (ret)
8601 io_sqe_buffers_unregister(ctx);
8602
Jens Axboeedafcce2019-01-09 09:16:05 -07008603 return ret;
8604}
8605
Jens Axboe9b402842019-04-11 11:45:41 -06008606static int io_eventfd_register(struct io_ring_ctx *ctx, void __user *arg)
8607{
8608 __s32 __user *fds = arg;
8609 int fd;
8610
8611 if (ctx->cq_ev_fd)
8612 return -EBUSY;
8613
8614 if (copy_from_user(&fd, fds, sizeof(*fds)))
8615 return -EFAULT;
8616
8617 ctx->cq_ev_fd = eventfd_ctx_fdget(fd);
8618 if (IS_ERR(ctx->cq_ev_fd)) {
8619 int ret = PTR_ERR(ctx->cq_ev_fd);
8620 ctx->cq_ev_fd = NULL;
8621 return ret;
8622 }
8623
8624 return 0;
8625}
8626
8627static int io_eventfd_unregister(struct io_ring_ctx *ctx)
8628{
8629 if (ctx->cq_ev_fd) {
8630 eventfd_ctx_put(ctx->cq_ev_fd);
8631 ctx->cq_ev_fd = NULL;
8632 return 0;
8633 }
8634
8635 return -ENXIO;
8636}
8637
Jens Axboe5a2e7452020-02-23 16:23:11 -07008638static int __io_destroy_buffers(int id, void *p, void *data)
8639{
8640 struct io_ring_ctx *ctx = data;
8641 struct io_buffer *buf = p;
8642
Jens Axboe067524e2020-03-02 16:32:28 -07008643 __io_remove_buffers(ctx, buf, id, -1U);
Jens Axboe5a2e7452020-02-23 16:23:11 -07008644 return 0;
8645}
8646
8647static void io_destroy_buffers(struct io_ring_ctx *ctx)
8648{
8649 idr_for_each(&ctx->io_buffer_idr, __io_destroy_buffers, ctx);
8650 idr_destroy(&ctx->io_buffer_idr);
8651}
8652
Jens Axboe2b188cc2019-01-07 10:46:33 -07008653static void io_ring_ctx_free(struct io_ring_ctx *ctx)
8654{
Jens Axboe6b063142019-01-10 22:13:58 -07008655 io_finish_async(ctx);
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008656 io_sqe_buffers_unregister(ctx);
Jens Axboe2aede0e2020-09-14 10:45:53 -06008657
8658 if (ctx->sqo_task) {
8659 put_task_struct(ctx->sqo_task);
8660 ctx->sqo_task = NULL;
8661 mmdrop(ctx->mm_account);
8662 ctx->mm_account = NULL;
Bijan Mottahedeh30975822020-06-16 16:36:09 -07008663 }
Jens Axboedef596e2019-01-09 08:59:42 -07008664
Dennis Zhou91d8f512020-09-16 13:41:05 -07008665#ifdef CONFIG_BLK_CGROUP
8666 if (ctx->sqo_blkcg_css)
8667 css_put(ctx->sqo_blkcg_css);
8668#endif
8669
Jens Axboe6b063142019-01-10 22:13:58 -07008670 io_sqe_files_unregister(ctx);
Jens Axboe9b402842019-04-11 11:45:41 -06008671 io_eventfd_unregister(ctx);
Jens Axboe5a2e7452020-02-23 16:23:11 -07008672 io_destroy_buffers(ctx);
Jens Axboe41726c92020-02-23 13:11:42 -07008673 idr_destroy(&ctx->personality_idr);
Jens Axboedef596e2019-01-09 08:59:42 -07008674
Jens Axboe2b188cc2019-01-07 10:46:33 -07008675#if defined(CONFIG_UNIX)
Eric Biggers355e8d22019-06-12 14:58:43 -07008676 if (ctx->ring_sock) {
8677 ctx->ring_sock->file = NULL; /* so that iput() is called */
Jens Axboe2b188cc2019-01-07 10:46:33 -07008678 sock_release(ctx->ring_sock);
Eric Biggers355e8d22019-06-12 14:58:43 -07008679 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07008680#endif
8681
Hristo Venev75b28af2019-08-26 17:23:46 +00008682 io_mem_free(ctx->rings);
Jens Axboe2b188cc2019-01-07 10:46:33 -07008683 io_mem_free(ctx->sq_sqes);
Jens Axboe2b188cc2019-01-07 10:46:33 -07008684
8685 percpu_ref_exit(&ctx->refs);
Jens Axboe2b188cc2019-01-07 10:46:33 -07008686 free_uid(ctx->user);
Jens Axboe181e4482019-11-25 08:52:30 -07008687 put_cred(ctx->creds);
Jens Axboe78076bb2019-12-04 19:56:40 -07008688 kfree(ctx->cancel_hash);
Jens Axboe0ddf92e2019-11-08 08:52:53 -07008689 kmem_cache_free(req_cachep, ctx->fallback_req);
Jens Axboe2b188cc2019-01-07 10:46:33 -07008690 kfree(ctx);
8691}
8692
8693static __poll_t io_uring_poll(struct file *file, poll_table *wait)
8694{
8695 struct io_ring_ctx *ctx = file->private_data;
8696 __poll_t mask = 0;
8697
8698 poll_wait(file, &ctx->cq_wait, wait);
Stefan Bühler4f7067c2019-04-24 23:54:17 +02008699 /*
8700 * synchronizes with barrier from wq_has_sleeper call in
8701 * io_commit_cqring
8702 */
Jens Axboe2b188cc2019-01-07 10:46:33 -07008703 smp_rmb();
Jens Axboe90554202020-09-03 12:12:41 -06008704 if (!io_sqring_full(ctx))
Jens Axboe2b188cc2019-01-07 10:46:33 -07008705 mask |= EPOLLOUT | EPOLLWRNORM;
Pavel Begunkov6c503152021-01-04 20:36:36 +00008706 io_cqring_overflow_flush(ctx, false, NULL, NULL);
8707 if (io_cqring_events(ctx))
Jens Axboe2b188cc2019-01-07 10:46:33 -07008708 mask |= EPOLLIN | EPOLLRDNORM;
8709
8710 return mask;
8711}
8712
8713static int io_uring_fasync(int fd, struct file *file, int on)
8714{
8715 struct io_ring_ctx *ctx = file->private_data;
8716
8717 return fasync_helper(fd, file, on, &ctx->cq_fasync);
8718}
8719
Yejune Deng0bead8c2020-12-24 11:02:20 +08008720static int io_unregister_personality(struct io_ring_ctx *ctx, unsigned id)
Jens Axboe071698e2020-01-28 10:04:42 -07008721{
Jens Axboe1e6fa522020-10-15 08:46:24 -06008722 struct io_identity *iod;
Jens Axboe071698e2020-01-28 10:04:42 -07008723
Jens Axboe1e6fa522020-10-15 08:46:24 -06008724 iod = idr_remove(&ctx->personality_idr, id);
8725 if (iod) {
8726 put_cred(iod->creds);
8727 if (refcount_dec_and_test(&iod->count))
8728 kfree(iod);
Yejune Deng0bead8c2020-12-24 11:02:20 +08008729 return 0;
Jens Axboe1e6fa522020-10-15 08:46:24 -06008730 }
Yejune Deng0bead8c2020-12-24 11:02:20 +08008731
8732 return -EINVAL;
8733}
8734
8735static int io_remove_personalities(int id, void *p, void *data)
8736{
8737 struct io_ring_ctx *ctx = data;
8738
8739 io_unregister_personality(ctx, id);
Jens Axboe071698e2020-01-28 10:04:42 -07008740 return 0;
8741}
8742
Jens Axboe85faa7b2020-04-09 18:14:00 -06008743static void io_ring_exit_work(struct work_struct *work)
8744{
Pavel Begunkovb2edc0a2020-07-07 16:36:22 +03008745 struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx,
8746 exit_work);
Jens Axboe85faa7b2020-04-09 18:14:00 -06008747
Jens Axboe56952e92020-06-17 15:00:04 -06008748 /*
8749 * If we're doing polled IO and end up having requests being
8750 * submitted async (out-of-line), then completions can come in while
8751 * we're waiting for refs to drop. We need to reap these manually,
8752 * as nobody else will be looking for them.
8753 */
Pavel Begunkovb2edc0a2020-07-07 16:36:22 +03008754 do {
Pavel Begunkov9936c7c2021-02-04 13:51:56 +00008755 io_uring_try_cancel_requests(ctx, NULL, NULL);
Pavel Begunkovb2edc0a2020-07-07 16:36:22 +03008756 } while (!wait_for_completion_timeout(&ctx->ref_comp, HZ/20));
Jens Axboe85faa7b2020-04-09 18:14:00 -06008757 io_ring_ctx_free(ctx);
8758}
8759
Jens Axboe00c18642020-12-20 10:45:02 -07008760static bool io_cancel_ctx_cb(struct io_wq_work *work, void *data)
8761{
8762 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
8763
8764 return req->ctx == data;
8765}
8766
Jens Axboe2b188cc2019-01-07 10:46:33 -07008767static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
8768{
8769 mutex_lock(&ctx->uring_lock);
8770 percpu_ref_kill(&ctx->refs);
Pavel Begunkovd9d05212021-01-08 20:57:25 +00008771
8772 if (WARN_ON_ONCE((ctx->flags & IORING_SETUP_SQPOLL) && !ctx->sqo_dead))
8773 ctx->sqo_dead = 1;
8774
Pavel Begunkovcda286f2020-12-17 00:24:35 +00008775 /* if force is set, the ring is going away. always drop after that */
8776 ctx->cq_overflow_flushed = 1;
Pavel Begunkov634578f2020-12-06 22:22:44 +00008777 if (ctx->rings)
Pavel Begunkov6c503152021-01-04 20:36:36 +00008778 __io_cqring_overflow_flush(ctx, true, NULL, NULL);
Pavel Begunkov5c766a92021-01-19 13:32:36 +00008779 idr_for_each(&ctx->personality_idr, io_remove_personalities, ctx);
Jens Axboe2b188cc2019-01-07 10:46:33 -07008780 mutex_unlock(&ctx->uring_lock);
8781
Pavel Begunkov6b819282020-11-06 13:00:25 +00008782 io_kill_timeouts(ctx, NULL, NULL);
8783 io_poll_remove_all(ctx, NULL, NULL);
Jens Axboe561fb042019-10-24 07:25:42 -06008784
8785 if (ctx->io_wq)
Jens Axboe00c18642020-12-20 10:45:02 -07008786 io_wq_cancel_cb(ctx->io_wq, io_cancel_ctx_cb, ctx, true);
Jens Axboe561fb042019-10-24 07:25:42 -06008787
Jens Axboe15dff282019-11-13 09:09:23 -07008788 /* if we failed setting up the ctx, we might not have any rings */
Pavel Begunkovb2edc0a2020-07-07 16:36:22 +03008789 io_iopoll_try_reap_events(ctx);
Jens Axboe309fc032020-07-10 09:13:34 -06008790
8791 /*
8792 * Do this upfront, so we won't have a grace period where the ring
8793 * is closed but resources aren't reaped yet. This can cause
8794 * spurious failure in setting up a new ring.
8795 */
Jens Axboe760618f2020-07-24 12:53:31 -06008796 io_unaccount_mem(ctx, ring_pages(ctx->sq_entries, ctx->cq_entries),
8797 ACCT_LOCKED);
Jens Axboe309fc032020-07-10 09:13:34 -06008798
Jens Axboe85faa7b2020-04-09 18:14:00 -06008799 INIT_WORK(&ctx->exit_work, io_ring_exit_work);
Jens Axboefc666772020-08-19 11:10:51 -06008800 /*
8801 * Use system_unbound_wq to avoid spawning tons of event kworkers
8802 * if we're exiting a ton of rings at the same time. It just adds
8803 * noise and overhead, there's no discernable change in runtime
8804 * over using system_wq.
8805 */
8806 queue_work(system_unbound_wq, &ctx->exit_work);
Jens Axboe2b188cc2019-01-07 10:46:33 -07008807}
8808
8809static int io_uring_release(struct inode *inode, struct file *file)
8810{
8811 struct io_ring_ctx *ctx = file->private_data;
8812
8813 file->private_data = NULL;
8814 io_ring_ctx_wait_and_kill(ctx);
8815 return 0;
8816}
8817
Pavel Begunkovf6edbab2020-11-06 13:00:26 +00008818struct io_task_cancel {
8819 struct task_struct *task;
8820 struct files_struct *files;
8821};
Pavel Begunkov67c4d9e2020-06-15 10:24:05 +03008822
Pavel Begunkovf6edbab2020-11-06 13:00:26 +00008823static bool io_cancel_task_cb(struct io_wq_work *work, void *data)
Jens Axboeb711d4e2020-08-16 08:23:05 -07008824{
Pavel Begunkov9a472ef2020-11-05 22:31:37 +00008825 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
Pavel Begunkovf6edbab2020-11-06 13:00:26 +00008826 struct io_task_cancel *cancel = data;
Pavel Begunkov9a472ef2020-11-05 22:31:37 +00008827 bool ret;
8828
Pavel Begunkovf6edbab2020-11-06 13:00:26 +00008829 if (cancel->files && (req->flags & REQ_F_LINK_TIMEOUT)) {
Pavel Begunkov9a472ef2020-11-05 22:31:37 +00008830 unsigned long flags;
8831 struct io_ring_ctx *ctx = req->ctx;
8832
8833 /* protect against races with linked timeouts */
8834 spin_lock_irqsave(&ctx->completion_lock, flags);
Pavel Begunkovf6edbab2020-11-06 13:00:26 +00008835 ret = io_match_task(req, cancel->task, cancel->files);
Pavel Begunkov9a472ef2020-11-05 22:31:37 +00008836 spin_unlock_irqrestore(&ctx->completion_lock, flags);
8837 } else {
Pavel Begunkovf6edbab2020-11-06 13:00:26 +00008838 ret = io_match_task(req, cancel->task, cancel->files);
Pavel Begunkov9a472ef2020-11-05 22:31:37 +00008839 }
8840 return ret;
Jens Axboeb711d4e2020-08-16 08:23:05 -07008841}
8842
Pavel Begunkovb7ddce32020-09-06 00:45:14 +03008843static void io_cancel_defer_files(struct io_ring_ctx *ctx,
Pavel Begunkovef9865a2020-11-05 14:06:19 +00008844 struct task_struct *task,
Pavel Begunkovb7ddce32020-09-06 00:45:14 +03008845 struct files_struct *files)
8846{
8847 struct io_defer_entry *de = NULL;
8848 LIST_HEAD(list);
8849
8850 spin_lock_irq(&ctx->completion_lock);
8851 list_for_each_entry_reverse(de, &ctx->defer_list, list) {
Pavel Begunkov08d23632020-11-06 13:00:22 +00008852 if (io_match_task(de->req, task, files)) {
Pavel Begunkovb7ddce32020-09-06 00:45:14 +03008853 list_cut_position(&list, &ctx->defer_list, &de->list);
8854 break;
8855 }
8856 }
8857 spin_unlock_irq(&ctx->completion_lock);
8858
8859 while (!list_empty(&list)) {
8860 de = list_first_entry(&list, struct io_defer_entry, list);
8861 list_del_init(&de->list);
8862 req_set_fail_links(de->req);
8863 io_put_req(de->req);
8864 io_req_complete(de->req, -ECANCELED);
8865 kfree(de);
8866 }
8867}
8868
Pavel Begunkov9936c7c2021-02-04 13:51:56 +00008869static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
8870 struct task_struct *task,
8871 struct files_struct *files)
8872{
8873 struct io_task_cancel cancel = { .task = task, .files = files, };
8874
8875 while (1) {
8876 enum io_wq_cancel cret;
8877 bool ret = false;
8878
8879 if (ctx->io_wq) {
8880 cret = io_wq_cancel_cb(ctx->io_wq, io_cancel_task_cb,
8881 &cancel, true);
8882 ret |= (cret != IO_WQ_CANCEL_NOTFOUND);
8883 }
8884
8885 /* SQPOLL thread does its own polling */
8886 if (!(ctx->flags & IORING_SETUP_SQPOLL) && !files) {
8887 while (!list_empty_careful(&ctx->iopoll_list)) {
8888 io_iopoll_try_reap_events(ctx);
8889 ret = true;
8890 }
8891 }
8892
8893 ret |= io_poll_remove_all(ctx, task, files);
8894 ret |= io_kill_timeouts(ctx, task, files);
8895 ret |= io_run_task_work();
8896 io_cqring_overflow_flush(ctx, true, task, files);
8897 if (!ret)
8898 break;
8899 cond_resched();
8900 }
8901}
8902
Pavel Begunkovca70f002021-01-26 15:28:27 +00008903static int io_uring_count_inflight(struct io_ring_ctx *ctx,
8904 struct task_struct *task,
8905 struct files_struct *files)
8906{
8907 struct io_kiocb *req;
8908 int cnt = 0;
8909
8910 spin_lock_irq(&ctx->inflight_lock);
8911 list_for_each_entry(req, &ctx->inflight_list, inflight_entry)
8912 cnt += io_match_task(req, task, files);
8913 spin_unlock_irq(&ctx->inflight_lock);
8914 return cnt;
8915}
8916
Pavel Begunkovb52fda02020-11-06 13:00:24 +00008917static void io_uring_cancel_files(struct io_ring_ctx *ctx,
Pavel Begunkovdf9923f2020-11-06 13:00:23 +00008918 struct task_struct *task,
Jens Axboefcb323c2019-10-24 12:39:47 -06008919 struct files_struct *files)
8920{
Jens Axboefcb323c2019-10-24 12:39:47 -06008921 while (!list_empty_careful(&ctx->inflight_list)) {
Xiaoguang Wangd8f1b972020-04-26 15:54:43 +08008922 DEFINE_WAIT(wait);
Pavel Begunkovca70f002021-01-26 15:28:27 +00008923 int inflight;
Jens Axboefcb323c2019-10-24 12:39:47 -06008924
Pavel Begunkovca70f002021-01-26 15:28:27 +00008925 inflight = io_uring_count_inflight(ctx, task, files);
8926 if (!inflight)
Jens Axboefcb323c2019-10-24 12:39:47 -06008927 break;
Pavel Begunkovf6edbab2020-11-06 13:00:26 +00008928
Pavel Begunkov9936c7c2021-02-04 13:51:56 +00008929 io_uring_try_cancel_requests(ctx, task, files);
Pavel Begunkovca70f002021-01-26 15:28:27 +00008930 prepare_to_wait(&task->io_uring->wait, &wait,
8931 TASK_UNINTERRUPTIBLE);
8932 if (inflight == io_uring_count_inflight(ctx, task, files))
8933 schedule();
Pavel Begunkovc98de082020-11-15 12:56:32 +00008934 finish_wait(&task->io_uring->wait, &wait);
Jens Axboefcb323c2019-10-24 12:39:47 -06008935 }
8936}
8937
Pavel Begunkovd9d05212021-01-08 20:57:25 +00008938static void io_disable_sqo_submit(struct io_ring_ctx *ctx)
8939{
Pavel Begunkovd9d05212021-01-08 20:57:25 +00008940 mutex_lock(&ctx->uring_lock);
8941 ctx->sqo_dead = 1;
8942 mutex_unlock(&ctx->uring_lock);
8943
8944 /* make sure callers enter the ring to get error */
Pavel Begunkovb4411612021-01-13 12:42:24 +00008945 if (ctx->rings)
8946 io_ring_set_wakeup_flag(ctx);
Pavel Begunkovd9d05212021-01-08 20:57:25 +00008947}
8948
Jens Axboe0f212202020-09-13 13:09:39 -06008949/*
8950 * We need to iteratively cancel requests, in case a request has dependent
8951 * hard links. These persist even for failure of cancelations, hence keep
8952 * looping until none are found.
8953 */
8954static void io_uring_cancel_task_requests(struct io_ring_ctx *ctx,
8955 struct files_struct *files)
8956{
8957 struct task_struct *task = current;
8958
Jens Axboefdaf0832020-10-30 09:37:30 -06008959 if ((ctx->flags & IORING_SETUP_SQPOLL) && ctx->sq_data) {
Pavel Begunkovd9d05212021-01-08 20:57:25 +00008960 io_disable_sqo_submit(ctx);
Jens Axboe534ca6d2020-09-02 13:52:19 -06008961 task = ctx->sq_data->thread;
Jens Axboefdaf0832020-10-30 09:37:30 -06008962 atomic_inc(&task->io_uring->in_idle);
8963 io_sq_thread_park(ctx->sq_data);
8964 }
Jens Axboe0f212202020-09-13 13:09:39 -06008965
Pavel Begunkovdf9923f2020-11-06 13:00:23 +00008966 io_cancel_defer_files(ctx, task, files);
Jens Axboe0f212202020-09-13 13:09:39 -06008967
Pavel Begunkov3a7efd12021-01-28 23:23:42 +00008968 io_uring_cancel_files(ctx, task, files);
Pavel Begunkovb52fda02020-11-06 13:00:24 +00008969 if (!files)
Pavel Begunkov9936c7c2021-02-04 13:51:56 +00008970 io_uring_try_cancel_requests(ctx, task, NULL);
Jens Axboefdaf0832020-10-30 09:37:30 -06008971
8972 if ((ctx->flags & IORING_SETUP_SQPOLL) && ctx->sq_data) {
8973 atomic_dec(&task->io_uring->in_idle);
8974 /*
8975 * If the files that are going away are the ones in the thread
8976 * identity, clear them out.
8977 */
8978 if (task->io_uring->identity->files == files)
8979 task->io_uring->identity->files = NULL;
8980 io_sq_thread_unpark(ctx->sq_data);
8981 }
Jens Axboe0f212202020-09-13 13:09:39 -06008982}
8983
8984/*
8985 * Note that this task has used io_uring. We use it for cancelation purposes.
8986 */
Jens Axboefdaf0832020-10-30 09:37:30 -06008987static int io_uring_add_task_file(struct io_ring_ctx *ctx, struct file *file)
Jens Axboe0f212202020-09-13 13:09:39 -06008988{
Matthew Wilcox (Oracle)236434c2020-10-09 13:49:52 +01008989 struct io_uring_task *tctx = current->io_uring;
Pavel Begunkova528b042020-12-21 18:34:04 +00008990 int ret;
Matthew Wilcox (Oracle)236434c2020-10-09 13:49:52 +01008991
8992 if (unlikely(!tctx)) {
Jens Axboe0f212202020-09-13 13:09:39 -06008993 ret = io_uring_alloc_task_context(current);
8994 if (unlikely(ret))
8995 return ret;
Matthew Wilcox (Oracle)236434c2020-10-09 13:49:52 +01008996 tctx = current->io_uring;
Jens Axboe0f212202020-09-13 13:09:39 -06008997 }
Matthew Wilcox (Oracle)236434c2020-10-09 13:49:52 +01008998 if (tctx->last != file) {
8999 void *old = xa_load(&tctx->xa, (unsigned long)file);
Jens Axboe0f212202020-09-13 13:09:39 -06009000
Matthew Wilcox (Oracle)236434c2020-10-09 13:49:52 +01009001 if (!old) {
Jens Axboe0f212202020-09-13 13:09:39 -06009002 get_file(file);
Pavel Begunkova528b042020-12-21 18:34:04 +00009003 ret = xa_err(xa_store(&tctx->xa, (unsigned long)file,
9004 file, GFP_KERNEL));
9005 if (ret) {
9006 fput(file);
9007 return ret;
9008 }
Pavel Begunkovecfc8492021-01-25 11:42:20 +00009009
9010 /* one and only SQPOLL file note, held by sqo_task */
9011 WARN_ON_ONCE((ctx->flags & IORING_SETUP_SQPOLL) &&
9012 current != ctx->sqo_task);
Jens Axboe0f212202020-09-13 13:09:39 -06009013 }
Matthew Wilcox (Oracle)236434c2020-10-09 13:49:52 +01009014 tctx->last = file;
Jens Axboe0f212202020-09-13 13:09:39 -06009015 }
9016
Jens Axboefdaf0832020-10-30 09:37:30 -06009017 /*
9018 * This is race safe in that the task itself is doing this, hence it
9019 * cannot be going through the exit/cancel paths at the same time.
9020 * This cannot be modified while exit/cancel is running.
9021 */
9022 if (!tctx->sqpoll && (ctx->flags & IORING_SETUP_SQPOLL))
9023 tctx->sqpoll = true;
9024
Jens Axboe0f212202020-09-13 13:09:39 -06009025 return 0;
9026}
9027
9028/*
9029 * Remove this io_uring_file -> task mapping.
9030 */
9031static void io_uring_del_task_file(struct file *file)
9032{
9033 struct io_uring_task *tctx = current->io_uring;
Jens Axboe0f212202020-09-13 13:09:39 -06009034
9035 if (tctx->last == file)
9036 tctx->last = NULL;
Matthew Wilcox (Oracle)5e2ed8c2020-10-09 13:49:53 +01009037 file = xa_erase(&tctx->xa, (unsigned long)file);
Jens Axboe0f212202020-09-13 13:09:39 -06009038 if (file)
9039 fput(file);
9040}
9041
Pavel Begunkovde7f1d92021-01-04 20:43:29 +00009042static void io_uring_remove_task_files(struct io_uring_task *tctx)
9043{
9044 struct file *file;
9045 unsigned long index;
9046
9047 xa_for_each(&tctx->xa, index, file)
9048 io_uring_del_task_file(file);
9049}
9050
Jens Axboe0f212202020-09-13 13:09:39 -06009051void __io_uring_files_cancel(struct files_struct *files)
9052{
9053 struct io_uring_task *tctx = current->io_uring;
Matthew Wilcox (Oracle)ce765372020-10-09 13:49:51 +01009054 struct file *file;
9055 unsigned long index;
Jens Axboe0f212202020-09-13 13:09:39 -06009056
9057 /* make sure overflow events are dropped */
Jens Axboefdaf0832020-10-30 09:37:30 -06009058 atomic_inc(&tctx->in_idle);
Pavel Begunkovde7f1d92021-01-04 20:43:29 +00009059 xa_for_each(&tctx->xa, index, file)
9060 io_uring_cancel_task_requests(file->private_data, files);
Jens Axboefdaf0832020-10-30 09:37:30 -06009061 atomic_dec(&tctx->in_idle);
Pavel Begunkovde7f1d92021-01-04 20:43:29 +00009062
9063 if (files)
9064 io_uring_remove_task_files(tctx);
Jens Axboefdaf0832020-10-30 09:37:30 -06009065}
9066
9067static s64 tctx_inflight(struct io_uring_task *tctx)
9068{
Pavel Begunkov0e9ddb32021-02-07 22:34:26 +00009069 return percpu_counter_sum(&tctx->inflight);
9070}
9071
9072static void io_uring_cancel_sqpoll(struct io_ring_ctx *ctx)
9073{
9074 struct io_uring_task *tctx;
Jens Axboefdaf0832020-10-30 09:37:30 -06009075 s64 inflight;
Pavel Begunkov0e9ddb32021-02-07 22:34:26 +00009076 DEFINE_WAIT(wait);
Jens Axboefdaf0832020-10-30 09:37:30 -06009077
Pavel Begunkov0e9ddb32021-02-07 22:34:26 +00009078 if (!ctx->sq_data)
9079 return;
9080 tctx = ctx->sq_data->thread->io_uring;
9081 io_disable_sqo_submit(ctx);
Jens Axboefdaf0832020-10-30 09:37:30 -06009082
Pavel Begunkov0e9ddb32021-02-07 22:34:26 +00009083 atomic_inc(&tctx->in_idle);
9084 do {
9085 /* read completions before cancelations */
9086 inflight = tctx_inflight(tctx);
9087 if (!inflight)
9088 break;
9089 io_uring_cancel_task_requests(ctx, NULL);
Jens Axboefdaf0832020-10-30 09:37:30 -06009090
Pavel Begunkov0e9ddb32021-02-07 22:34:26 +00009091 prepare_to_wait(&tctx->wait, &wait, TASK_UNINTERRUPTIBLE);
9092 /*
9093 * If we've seen completions, retry without waiting. This
9094 * avoids a race where a completion comes in before we did
9095 * prepare_to_wait().
9096 */
9097 if (inflight == tctx_inflight(tctx))
9098 schedule();
9099 finish_wait(&tctx->wait, &wait);
9100 } while (1);
9101 atomic_dec(&tctx->in_idle);
Jens Axboe0f212202020-09-13 13:09:39 -06009102}
9103
Jens Axboe0f212202020-09-13 13:09:39 -06009104/*
9105 * Find any io_uring fd that this task has registered or done IO on, and cancel
9106 * requests.
9107 */
9108void __io_uring_task_cancel(void)
9109{
9110 struct io_uring_task *tctx = current->io_uring;
9111 DEFINE_WAIT(wait);
Jens Axboed8a6df12020-10-15 16:24:45 -06009112 s64 inflight;
Jens Axboe0f212202020-09-13 13:09:39 -06009113
9114 /* make sure overflow events are dropped */
Jens Axboefdaf0832020-10-30 09:37:30 -06009115 atomic_inc(&tctx->in_idle);
Jens Axboe0f212202020-09-13 13:09:39 -06009116
Pavel Begunkov0b5cd6c2021-01-17 02:29:56 +00009117 /* trigger io_disable_sqo_submit() */
Pavel Begunkov0e9ddb32021-02-07 22:34:26 +00009118 if (tctx->sqpoll) {
9119 struct file *file;
9120 unsigned long index;
9121
9122 xa_for_each(&tctx->xa, index, file)
9123 io_uring_cancel_sqpoll(file->private_data);
9124 }
Pavel Begunkov0b5cd6c2021-01-17 02:29:56 +00009125
Jens Axboed8a6df12020-10-15 16:24:45 -06009126 do {
Jens Axboe0f212202020-09-13 13:09:39 -06009127 /* read completions before cancelations */
Jens Axboefdaf0832020-10-30 09:37:30 -06009128 inflight = tctx_inflight(tctx);
Jens Axboed8a6df12020-10-15 16:24:45 -06009129 if (!inflight)
9130 break;
Jens Axboe0f212202020-09-13 13:09:39 -06009131 __io_uring_files_cancel(NULL);
9132
9133 prepare_to_wait(&tctx->wait, &wait, TASK_UNINTERRUPTIBLE);
9134
9135 /*
Pavel Begunkova1bb3cd2021-01-26 15:28:26 +00009136 * If we've seen completions, retry without waiting. This
9137 * avoids a race where a completion comes in before we did
9138 * prepare_to_wait().
Jens Axboe0f212202020-09-13 13:09:39 -06009139 */
Pavel Begunkova1bb3cd2021-01-26 15:28:26 +00009140 if (inflight == tctx_inflight(tctx))
9141 schedule();
Pavel Begunkovf57555e2020-12-20 13:21:44 +00009142 finish_wait(&tctx->wait, &wait);
Jens Axboed8a6df12020-10-15 16:24:45 -06009143 } while (1);
Jens Axboe0f212202020-09-13 13:09:39 -06009144
Jens Axboefdaf0832020-10-30 09:37:30 -06009145 atomic_dec(&tctx->in_idle);
Pavel Begunkovde7f1d92021-01-04 20:43:29 +00009146
9147 io_uring_remove_task_files(tctx);
Pavel Begunkov44e728b2020-06-15 10:24:04 +03009148}
9149
Jens Axboefcb323c2019-10-24 12:39:47 -06009150static int io_uring_flush(struct file *file, void *data)
9151{
Pavel Begunkov6b5733e2021-01-08 20:57:24 +00009152 struct io_uring_task *tctx = current->io_uring;
Pavel Begunkovd9d05212021-01-08 20:57:25 +00009153 struct io_ring_ctx *ctx = file->private_data;
Pavel Begunkov6b5733e2021-01-08 20:57:24 +00009154
Jens Axboe84965ff2021-01-23 15:51:11 -07009155 if (fatal_signal_pending(current) || (current->flags & PF_EXITING))
9156 io_uring_cancel_task_requests(ctx, NULL);
9157
Pavel Begunkov6b5733e2021-01-08 20:57:24 +00009158 if (!tctx)
Pavel Begunkov4f793dc2021-01-08 20:57:23 +00009159 return 0;
9160
Pavel Begunkov6b5733e2021-01-08 20:57:24 +00009161 /* we should have cancelled and erased it before PF_EXITING */
9162 WARN_ON_ONCE((current->flags & PF_EXITING) &&
9163 xa_load(&tctx->xa, (unsigned long)file));
9164
Pavel Begunkov4f793dc2021-01-08 20:57:23 +00009165 /*
9166 * fput() is pending, will be 2 if the only other ref is our potential
9167 * task file note. If the task is exiting, drop regardless of count.
9168 */
Pavel Begunkov6b5733e2021-01-08 20:57:24 +00009169 if (atomic_long_read(&file->f_count) != 2)
9170 return 0;
Pavel Begunkov4f793dc2021-01-08 20:57:23 +00009171
Pavel Begunkovd9d05212021-01-08 20:57:25 +00009172 if (ctx->flags & IORING_SETUP_SQPOLL) {
9173 /* there is only one file note, which is owned by sqo_task */
Pavel Begunkov4325cb42021-01-16 05:32:30 +00009174 WARN_ON_ONCE(ctx->sqo_task != current &&
9175 xa_load(&tctx->xa, (unsigned long)file));
9176 /* sqo_dead check is for when this happens after cancellation */
9177 WARN_ON_ONCE(ctx->sqo_task == current && !ctx->sqo_dead &&
Pavel Begunkovd9d05212021-01-08 20:57:25 +00009178 !xa_load(&tctx->xa, (unsigned long)file));
9179
9180 io_disable_sqo_submit(ctx);
9181 }
9182
9183 if (!(ctx->flags & IORING_SETUP_SQPOLL) || ctx->sqo_task == current)
9184 io_uring_del_task_file(file);
Jens Axboefcb323c2019-10-24 12:39:47 -06009185 return 0;
9186}
9187
Roman Penyaev6c5c2402019-11-28 12:53:22 +01009188static void *io_uring_validate_mmap_request(struct file *file,
9189 loff_t pgoff, size_t sz)
Jens Axboe2b188cc2019-01-07 10:46:33 -07009190{
Jens Axboe2b188cc2019-01-07 10:46:33 -07009191 struct io_ring_ctx *ctx = file->private_data;
Roman Penyaev6c5c2402019-11-28 12:53:22 +01009192 loff_t offset = pgoff << PAGE_SHIFT;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009193 struct page *page;
9194 void *ptr;
9195
9196 switch (offset) {
9197 case IORING_OFF_SQ_RING:
Hristo Venev75b28af2019-08-26 17:23:46 +00009198 case IORING_OFF_CQ_RING:
9199 ptr = ctx->rings;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009200 break;
9201 case IORING_OFF_SQES:
9202 ptr = ctx->sq_sqes;
9203 break;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009204 default:
Roman Penyaev6c5c2402019-11-28 12:53:22 +01009205 return ERR_PTR(-EINVAL);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009206 }
9207
9208 page = virt_to_head_page(ptr);
Matthew Wilcox (Oracle)a50b8542019-09-23 15:34:25 -07009209 if (sz > page_size(page))
Roman Penyaev6c5c2402019-11-28 12:53:22 +01009210 return ERR_PTR(-EINVAL);
9211
9212 return ptr;
9213}
9214
9215#ifdef CONFIG_MMU
9216
9217static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
9218{
9219 size_t sz = vma->vm_end - vma->vm_start;
9220 unsigned long pfn;
9221 void *ptr;
9222
9223 ptr = io_uring_validate_mmap_request(file, vma->vm_pgoff, sz);
9224 if (IS_ERR(ptr))
9225 return PTR_ERR(ptr);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009226
9227 pfn = virt_to_phys(ptr) >> PAGE_SHIFT;
9228 return remap_pfn_range(vma, vma->vm_start, pfn, sz, vma->vm_page_prot);
9229}
9230
Roman Penyaev6c5c2402019-11-28 12:53:22 +01009231#else /* !CONFIG_MMU */
9232
9233static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
9234{
9235 return vma->vm_flags & (VM_SHARED | VM_MAYSHARE) ? 0 : -EINVAL;
9236}
9237
9238static unsigned int io_uring_nommu_mmap_capabilities(struct file *file)
9239{
9240 return NOMMU_MAP_DIRECT | NOMMU_MAP_READ | NOMMU_MAP_WRITE;
9241}
9242
9243static unsigned long io_uring_nommu_get_unmapped_area(struct file *file,
9244 unsigned long addr, unsigned long len,
9245 unsigned long pgoff, unsigned long flags)
9246{
9247 void *ptr;
9248
9249 ptr = io_uring_validate_mmap_request(file, pgoff, len);
9250 if (IS_ERR(ptr))
9251 return PTR_ERR(ptr);
9252
9253 return (unsigned long) ptr;
9254}
9255
9256#endif /* !CONFIG_MMU */
9257
Pavel Begunkovd9d05212021-01-08 20:57:25 +00009258static int io_sqpoll_wait_sq(struct io_ring_ctx *ctx)
Jens Axboe90554202020-09-03 12:12:41 -06009259{
Pavel Begunkovd9d05212021-01-08 20:57:25 +00009260 int ret = 0;
Jens Axboe90554202020-09-03 12:12:41 -06009261 DEFINE_WAIT(wait);
9262
9263 do {
9264 if (!io_sqring_full(ctx))
9265 break;
9266
9267 prepare_to_wait(&ctx->sqo_sq_wait, &wait, TASK_INTERRUPTIBLE);
9268
Pavel Begunkovd9d05212021-01-08 20:57:25 +00009269 if (unlikely(ctx->sqo_dead)) {
9270 ret = -EOWNERDEAD;
9271 goto out;
9272 }
9273
Jens Axboe90554202020-09-03 12:12:41 -06009274 if (!io_sqring_full(ctx))
9275 break;
9276
9277 schedule();
9278 } while (!signal_pending(current));
9279
9280 finish_wait(&ctx->sqo_sq_wait, &wait);
Pavel Begunkovd9d05212021-01-08 20:57:25 +00009281out:
9282 return ret;
Jens Axboe90554202020-09-03 12:12:41 -06009283}
9284
Hao Xuc73ebb62020-11-03 10:54:37 +08009285static int io_get_ext_arg(unsigned flags, const void __user *argp, size_t *argsz,
9286 struct __kernel_timespec __user **ts,
9287 const sigset_t __user **sig)
9288{
9289 struct io_uring_getevents_arg arg;
9290
9291 /*
9292 * If EXT_ARG isn't set, then we have no timespec and the argp pointer
9293 * is just a pointer to the sigset_t.
9294 */
9295 if (!(flags & IORING_ENTER_EXT_ARG)) {
9296 *sig = (const sigset_t __user *) argp;
9297 *ts = NULL;
9298 return 0;
9299 }
9300
9301 /*
9302 * EXT_ARG is set - ensure we agree on the size of it and copy in our
9303 * timespec and sigset_t pointers if good.
9304 */
9305 if (*argsz != sizeof(arg))
9306 return -EINVAL;
9307 if (copy_from_user(&arg, argp, sizeof(arg)))
9308 return -EFAULT;
9309 *sig = u64_to_user_ptr(arg.sigmask);
9310 *argsz = arg.sigmask_sz;
9311 *ts = u64_to_user_ptr(arg.ts);
9312 return 0;
9313}
9314
Jens Axboe2b188cc2019-01-07 10:46:33 -07009315SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
Hao Xuc73ebb62020-11-03 10:54:37 +08009316 u32, min_complete, u32, flags, const void __user *, argp,
9317 size_t, argsz)
Jens Axboe2b188cc2019-01-07 10:46:33 -07009318{
9319 struct io_ring_ctx *ctx;
9320 long ret = -EBADF;
9321 int submitted = 0;
9322 struct fd f;
9323
Jens Axboe4c6e2772020-07-01 11:29:10 -06009324 io_run_task_work();
Jens Axboeb41e9852020-02-17 09:52:41 -07009325
Jens Axboe90554202020-09-03 12:12:41 -06009326 if (flags & ~(IORING_ENTER_GETEVENTS | IORING_ENTER_SQ_WAKEUP |
Hao Xuc73ebb62020-11-03 10:54:37 +08009327 IORING_ENTER_SQ_WAIT | IORING_ENTER_EXT_ARG))
Jens Axboe2b188cc2019-01-07 10:46:33 -07009328 return -EINVAL;
9329
9330 f = fdget(fd);
9331 if (!f.file)
9332 return -EBADF;
9333
9334 ret = -EOPNOTSUPP;
9335 if (f.file->f_op != &io_uring_fops)
9336 goto out_fput;
9337
9338 ret = -ENXIO;
9339 ctx = f.file->private_data;
9340 if (!percpu_ref_tryget(&ctx->refs))
9341 goto out_fput;
9342
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02009343 ret = -EBADFD;
9344 if (ctx->flags & IORING_SETUP_R_DISABLED)
9345 goto out;
9346
Jens Axboe6c271ce2019-01-10 11:22:30 -07009347 /*
9348 * For SQ polling, the thread will do all submissions and completions.
9349 * Just return the requested submit count, and wake the thread if
9350 * we were asked to.
9351 */
Jens Axboeb2a9ead2019-09-12 14:19:16 -06009352 ret = 0;
Jens Axboe6c271ce2019-01-10 11:22:30 -07009353 if (ctx->flags & IORING_SETUP_SQPOLL) {
Pavel Begunkov6c503152021-01-04 20:36:36 +00009354 io_cqring_overflow_flush(ctx, false, NULL, NULL);
Pavel Begunkov89448c42020-12-17 00:24:39 +00009355
Pavel Begunkovd9d05212021-01-08 20:57:25 +00009356 ret = -EOWNERDEAD;
9357 if (unlikely(ctx->sqo_dead))
9358 goto out;
Jens Axboe6c271ce2019-01-10 11:22:30 -07009359 if (flags & IORING_ENTER_SQ_WAKEUP)
Jens Axboe534ca6d2020-09-02 13:52:19 -06009360 wake_up(&ctx->sq_data->wait);
Pavel Begunkovd9d05212021-01-08 20:57:25 +00009361 if (flags & IORING_ENTER_SQ_WAIT) {
9362 ret = io_sqpoll_wait_sq(ctx);
9363 if (ret)
9364 goto out;
9365 }
Jens Axboe6c271ce2019-01-10 11:22:30 -07009366 submitted = to_submit;
Jens Axboeb2a9ead2019-09-12 14:19:16 -06009367 } else if (to_submit) {
Jens Axboefdaf0832020-10-30 09:37:30 -06009368 ret = io_uring_add_task_file(ctx, f.file);
Jens Axboe0f212202020-09-13 13:09:39 -06009369 if (unlikely(ret))
9370 goto out;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009371 mutex_lock(&ctx->uring_lock);
Jens Axboe0f212202020-09-13 13:09:39 -06009372 submitted = io_submit_sqes(ctx, to_submit);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009373 mutex_unlock(&ctx->uring_lock);
Pavel Begunkov7c504e652019-12-18 19:53:45 +03009374
9375 if (submitted != to_submit)
9376 goto out;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009377 }
9378 if (flags & IORING_ENTER_GETEVENTS) {
Hao Xuc73ebb62020-11-03 10:54:37 +08009379 const sigset_t __user *sig;
9380 struct __kernel_timespec __user *ts;
9381
9382 ret = io_get_ext_arg(flags, argp, &argsz, &ts, &sig);
9383 if (unlikely(ret))
9384 goto out;
9385
Jens Axboe2b188cc2019-01-07 10:46:33 -07009386 min_complete = min(min_complete, ctx->cq_entries);
9387
Xiaoguang Wang32b22442020-03-11 09:26:09 +08009388 /*
9389 * When SETUP_IOPOLL and SETUP_SQPOLL are both enabled, user
9390 * space applications don't need to do io completion events
9391 * polling again, they can rely on io_sq_thread to do polling
9392 * work, which can reduce cpu usage and uring_lock contention.
9393 */
9394 if (ctx->flags & IORING_SETUP_IOPOLL &&
9395 !(ctx->flags & IORING_SETUP_SQPOLL)) {
Pavel Begunkov7668b922020-07-07 16:36:21 +03009396 ret = io_iopoll_check(ctx, min_complete);
Jens Axboedef596e2019-01-09 08:59:42 -07009397 } else {
Hao Xuc73ebb62020-11-03 10:54:37 +08009398 ret = io_cqring_wait(ctx, min_complete, sig, argsz, ts);
Jens Axboedef596e2019-01-09 08:59:42 -07009399 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07009400 }
9401
Pavel Begunkov7c504e652019-12-18 19:53:45 +03009402out:
Pavel Begunkov6805b322019-10-08 02:18:42 +03009403 percpu_ref_put(&ctx->refs);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009404out_fput:
9405 fdput(f);
9406 return submitted ? submitted : ret;
9407}
9408
Tobias Klauserbebdb652020-02-26 18:38:32 +01009409#ifdef CONFIG_PROC_FS
Jens Axboe87ce9552020-01-30 08:25:34 -07009410static int io_uring_show_cred(int id, void *p, void *data)
9411{
Jens Axboe6b47ab82020-11-05 09:50:16 -07009412 struct io_identity *iod = p;
9413 const struct cred *cred = iod->creds;
Jens Axboe87ce9552020-01-30 08:25:34 -07009414 struct seq_file *m = data;
9415 struct user_namespace *uns = seq_user_ns(m);
9416 struct group_info *gi;
9417 kernel_cap_t cap;
9418 unsigned __capi;
9419 int g;
9420
9421 seq_printf(m, "%5d\n", id);
9422 seq_put_decimal_ull(m, "\tUid:\t", from_kuid_munged(uns, cred->uid));
9423 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->euid));
9424 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->suid));
9425 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->fsuid));
9426 seq_put_decimal_ull(m, "\n\tGid:\t", from_kgid_munged(uns, cred->gid));
9427 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->egid));
9428 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->sgid));
9429 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->fsgid));
9430 seq_puts(m, "\n\tGroups:\t");
9431 gi = cred->group_info;
9432 for (g = 0; g < gi->ngroups; g++) {
9433 seq_put_decimal_ull(m, g ? " " : "",
9434 from_kgid_munged(uns, gi->gid[g]));
9435 }
9436 seq_puts(m, "\n\tCapEff:\t");
9437 cap = cred->cap_effective;
9438 CAP_FOR_EACH_U32(__capi)
9439 seq_put_hex_ll(m, NULL, cap.cap[CAP_LAST_U32 - __capi], 8);
9440 seq_putc(m, '\n');
9441 return 0;
9442}
9443
9444static void __io_uring_show_fdinfo(struct io_ring_ctx *ctx, struct seq_file *m)
9445{
Joseph Qidbbe9c62020-09-29 09:01:22 -06009446 struct io_sq_data *sq = NULL;
Jens Axboefad8e0d2020-09-28 08:57:48 -06009447 bool has_lock;
Jens Axboe87ce9552020-01-30 08:25:34 -07009448 int i;
9449
Jens Axboefad8e0d2020-09-28 08:57:48 -06009450 /*
9451 * Avoid ABBA deadlock between the seq lock and the io_uring mutex,
9452 * since fdinfo case grabs it in the opposite direction of normal use
9453 * cases. If we fail to get the lock, we just don't iterate any
9454 * structures that could be going away outside the io_uring mutex.
9455 */
9456 has_lock = mutex_trylock(&ctx->uring_lock);
9457
Joseph Qidbbe9c62020-09-29 09:01:22 -06009458 if (has_lock && (ctx->flags & IORING_SETUP_SQPOLL))
9459 sq = ctx->sq_data;
9460
9461 seq_printf(m, "SqThread:\t%d\n", sq ? task_pid_nr(sq->thread) : -1);
9462 seq_printf(m, "SqThreadCpu:\t%d\n", sq ? task_cpu(sq->thread) : -1);
Jens Axboe87ce9552020-01-30 08:25:34 -07009463 seq_printf(m, "UserFiles:\t%u\n", ctx->nr_user_files);
Jens Axboefad8e0d2020-09-28 08:57:48 -06009464 for (i = 0; has_lock && i < ctx->nr_user_files; i++) {
Pavel Begunkovea64ec022021-02-04 13:52:07 +00009465 struct file *f = *io_fixed_file_slot(ctx->file_data, i);
Jens Axboe87ce9552020-01-30 08:25:34 -07009466
Jens Axboe87ce9552020-01-30 08:25:34 -07009467 if (f)
9468 seq_printf(m, "%5u: %s\n", i, file_dentry(f)->d_iname);
9469 else
9470 seq_printf(m, "%5u: <none>\n", i);
9471 }
9472 seq_printf(m, "UserBufs:\t%u\n", ctx->nr_user_bufs);
Jens Axboefad8e0d2020-09-28 08:57:48 -06009473 for (i = 0; has_lock && i < ctx->nr_user_bufs; i++) {
Jens Axboe87ce9552020-01-30 08:25:34 -07009474 struct io_mapped_ubuf *buf = &ctx->user_bufs[i];
9475
9476 seq_printf(m, "%5u: 0x%llx/%u\n", i, buf->ubuf,
9477 (unsigned int) buf->len);
9478 }
Jens Axboefad8e0d2020-09-28 08:57:48 -06009479 if (has_lock && !idr_is_empty(&ctx->personality_idr)) {
Jens Axboe87ce9552020-01-30 08:25:34 -07009480 seq_printf(m, "Personalities:\n");
9481 idr_for_each(&ctx->personality_idr, io_uring_show_cred, m);
9482 }
Jens Axboed7718a92020-02-14 22:23:12 -07009483 seq_printf(m, "PollList:\n");
9484 spin_lock_irq(&ctx->completion_lock);
9485 for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
9486 struct hlist_head *list = &ctx->cancel_hash[i];
9487 struct io_kiocb *req;
9488
9489 hlist_for_each_entry(req, list, hash_node)
9490 seq_printf(m, " op=%d, task_works=%d\n", req->opcode,
9491 req->task->task_works != NULL);
9492 }
9493 spin_unlock_irq(&ctx->completion_lock);
Jens Axboefad8e0d2020-09-28 08:57:48 -06009494 if (has_lock)
9495 mutex_unlock(&ctx->uring_lock);
Jens Axboe87ce9552020-01-30 08:25:34 -07009496}
9497
9498static void io_uring_show_fdinfo(struct seq_file *m, struct file *f)
9499{
9500 struct io_ring_ctx *ctx = f->private_data;
9501
9502 if (percpu_ref_tryget(&ctx->refs)) {
9503 __io_uring_show_fdinfo(ctx, m);
9504 percpu_ref_put(&ctx->refs);
9505 }
9506}
Tobias Klauserbebdb652020-02-26 18:38:32 +01009507#endif
Jens Axboe87ce9552020-01-30 08:25:34 -07009508
Jens Axboe2b188cc2019-01-07 10:46:33 -07009509static const struct file_operations io_uring_fops = {
9510 .release = io_uring_release,
Jens Axboefcb323c2019-10-24 12:39:47 -06009511 .flush = io_uring_flush,
Jens Axboe2b188cc2019-01-07 10:46:33 -07009512 .mmap = io_uring_mmap,
Roman Penyaev6c5c2402019-11-28 12:53:22 +01009513#ifndef CONFIG_MMU
9514 .get_unmapped_area = io_uring_nommu_get_unmapped_area,
9515 .mmap_capabilities = io_uring_nommu_mmap_capabilities,
9516#endif
Jens Axboe2b188cc2019-01-07 10:46:33 -07009517 .poll = io_uring_poll,
9518 .fasync = io_uring_fasync,
Tobias Klauserbebdb652020-02-26 18:38:32 +01009519#ifdef CONFIG_PROC_FS
Jens Axboe87ce9552020-01-30 08:25:34 -07009520 .show_fdinfo = io_uring_show_fdinfo,
Tobias Klauserbebdb652020-02-26 18:38:32 +01009521#endif
Jens Axboe2b188cc2019-01-07 10:46:33 -07009522};
9523
9524static int io_allocate_scq_urings(struct io_ring_ctx *ctx,
9525 struct io_uring_params *p)
9526{
Hristo Venev75b28af2019-08-26 17:23:46 +00009527 struct io_rings *rings;
9528 size_t size, sq_array_offset;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009529
Jens Axboebd740482020-08-05 12:58:23 -06009530 /* make sure these are sane, as we already accounted them */
9531 ctx->sq_entries = p->sq_entries;
9532 ctx->cq_entries = p->cq_entries;
9533
Hristo Venev75b28af2019-08-26 17:23:46 +00009534 size = rings_size(p->sq_entries, p->cq_entries, &sq_array_offset);
9535 if (size == SIZE_MAX)
9536 return -EOVERFLOW;
9537
9538 rings = io_mem_alloc(size);
9539 if (!rings)
Jens Axboe2b188cc2019-01-07 10:46:33 -07009540 return -ENOMEM;
9541
Hristo Venev75b28af2019-08-26 17:23:46 +00009542 ctx->rings = rings;
9543 ctx->sq_array = (u32 *)((char *)rings + sq_array_offset);
9544 rings->sq_ring_mask = p->sq_entries - 1;
9545 rings->cq_ring_mask = p->cq_entries - 1;
9546 rings->sq_ring_entries = p->sq_entries;
9547 rings->cq_ring_entries = p->cq_entries;
9548 ctx->sq_mask = rings->sq_ring_mask;
9549 ctx->cq_mask = rings->cq_ring_mask;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009550
9551 size = array_size(sizeof(struct io_uring_sqe), p->sq_entries);
Jens Axboeeb065d32019-11-20 09:26:29 -07009552 if (size == SIZE_MAX) {
9553 io_mem_free(ctx->rings);
9554 ctx->rings = NULL;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009555 return -EOVERFLOW;
Jens Axboeeb065d32019-11-20 09:26:29 -07009556 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07009557
9558 ctx->sq_sqes = io_mem_alloc(size);
Jens Axboeeb065d32019-11-20 09:26:29 -07009559 if (!ctx->sq_sqes) {
9560 io_mem_free(ctx->rings);
9561 ctx->rings = NULL;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009562 return -ENOMEM;
Jens Axboeeb065d32019-11-20 09:26:29 -07009563 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07009564
Jens Axboe2b188cc2019-01-07 10:46:33 -07009565 return 0;
9566}
9567
Pavel Begunkov9faadcc2020-12-21 18:34:05 +00009568static int io_uring_install_fd(struct io_ring_ctx *ctx, struct file *file)
9569{
9570 int ret, fd;
9571
9572 fd = get_unused_fd_flags(O_RDWR | O_CLOEXEC);
9573 if (fd < 0)
9574 return fd;
9575
9576 ret = io_uring_add_task_file(ctx, file);
9577 if (ret) {
9578 put_unused_fd(fd);
9579 return ret;
9580 }
9581 fd_install(fd, file);
9582 return fd;
9583}
9584
Jens Axboe2b188cc2019-01-07 10:46:33 -07009585/*
9586 * Allocate an anonymous fd, this is what constitutes the application
9587 * visible backing of an io_uring instance. The application mmaps this
9588 * fd to gain access to the SQ/CQ ring details. If UNIX sockets are enabled,
9589 * we have to tie this fd to a socket for file garbage collection purposes.
9590 */
Pavel Begunkov9faadcc2020-12-21 18:34:05 +00009591static struct file *io_uring_get_file(struct io_ring_ctx *ctx)
Jens Axboe2b188cc2019-01-07 10:46:33 -07009592{
9593 struct file *file;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009594#if defined(CONFIG_UNIX)
Pavel Begunkov9faadcc2020-12-21 18:34:05 +00009595 int ret;
9596
Jens Axboe2b188cc2019-01-07 10:46:33 -07009597 ret = sock_create_kern(&init_net, PF_UNIX, SOCK_RAW, IPPROTO_IP,
9598 &ctx->ring_sock);
9599 if (ret)
Pavel Begunkov9faadcc2020-12-21 18:34:05 +00009600 return ERR_PTR(ret);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009601#endif
9602
Jens Axboe2b188cc2019-01-07 10:46:33 -07009603 file = anon_inode_getfile("[io_uring]", &io_uring_fops, ctx,
9604 O_RDWR | O_CLOEXEC);
Pavel Begunkov9faadcc2020-12-21 18:34:05 +00009605#if defined(CONFIG_UNIX)
Jens Axboe2b188cc2019-01-07 10:46:33 -07009606 if (IS_ERR(file)) {
Pavel Begunkov9faadcc2020-12-21 18:34:05 +00009607 sock_release(ctx->ring_sock);
9608 ctx->ring_sock = NULL;
9609 } else {
9610 ctx->ring_sock->file = file;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009611 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07009612#endif
Pavel Begunkov9faadcc2020-12-21 18:34:05 +00009613 return file;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009614}
9615
Xiaoguang Wang7f136572020-05-05 16:28:53 +08009616static int io_uring_create(unsigned entries, struct io_uring_params *p,
9617 struct io_uring_params __user *params)
Jens Axboe2b188cc2019-01-07 10:46:33 -07009618{
9619 struct user_struct *user = NULL;
9620 struct io_ring_ctx *ctx;
Pavel Begunkov9faadcc2020-12-21 18:34:05 +00009621 struct file *file;
Bijan Mottahedehaad5d8d2020-06-16 16:36:08 -07009622 bool limit_mem;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009623 int ret;
9624
Jens Axboe8110c1a2019-12-28 15:39:54 -07009625 if (!entries)
Jens Axboe2b188cc2019-01-07 10:46:33 -07009626 return -EINVAL;
Jens Axboe8110c1a2019-12-28 15:39:54 -07009627 if (entries > IORING_MAX_ENTRIES) {
9628 if (!(p->flags & IORING_SETUP_CLAMP))
9629 return -EINVAL;
9630 entries = IORING_MAX_ENTRIES;
9631 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07009632
9633 /*
9634 * Use twice as many entries for the CQ ring. It's possible for the
9635 * application to drive a higher depth than the size of the SQ ring,
9636 * since the sqes are only used at submission time. This allows for
Jens Axboe33a107f2019-10-04 12:10:03 -06009637 * some flexibility in overcommitting a bit. If the application has
9638 * set IORING_SETUP_CQSIZE, it will have passed in the desired number
9639 * of CQ ring entries manually.
Jens Axboe2b188cc2019-01-07 10:46:33 -07009640 */
9641 p->sq_entries = roundup_pow_of_two(entries);
Jens Axboe33a107f2019-10-04 12:10:03 -06009642 if (p->flags & IORING_SETUP_CQSIZE) {
9643 /*
9644 * If IORING_SETUP_CQSIZE is set, we do the same roundup
9645 * to a power-of-two, if it isn't already. We do NOT impose
9646 * any cq vs sq ring sizing.
9647 */
Joseph Qieb2667b32020-11-24 15:03:03 +08009648 if (!p->cq_entries)
Jens Axboe33a107f2019-10-04 12:10:03 -06009649 return -EINVAL;
Jens Axboe8110c1a2019-12-28 15:39:54 -07009650 if (p->cq_entries > IORING_MAX_CQ_ENTRIES) {
9651 if (!(p->flags & IORING_SETUP_CLAMP))
9652 return -EINVAL;
9653 p->cq_entries = IORING_MAX_CQ_ENTRIES;
9654 }
Joseph Qieb2667b32020-11-24 15:03:03 +08009655 p->cq_entries = roundup_pow_of_two(p->cq_entries);
9656 if (p->cq_entries < p->sq_entries)
9657 return -EINVAL;
Jens Axboe33a107f2019-10-04 12:10:03 -06009658 } else {
9659 p->cq_entries = 2 * p->sq_entries;
9660 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07009661
9662 user = get_uid(current_user());
Bijan Mottahedehaad5d8d2020-06-16 16:36:08 -07009663 limit_mem = !capable(CAP_IPC_LOCK);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009664
Bijan Mottahedehaad5d8d2020-06-16 16:36:08 -07009665 if (limit_mem) {
Bijan Mottahedeha087e2b2020-06-16 16:36:07 -07009666 ret = __io_account_mem(user,
Jens Axboe2b188cc2019-01-07 10:46:33 -07009667 ring_pages(p->sq_entries, p->cq_entries));
9668 if (ret) {
9669 free_uid(user);
9670 return ret;
9671 }
9672 }
9673
9674 ctx = io_ring_ctx_alloc(p);
9675 if (!ctx) {
Bijan Mottahedehaad5d8d2020-06-16 16:36:08 -07009676 if (limit_mem)
Bijan Mottahedeha087e2b2020-06-16 16:36:07 -07009677 __io_unaccount_mem(user, ring_pages(p->sq_entries,
Jens Axboe2b188cc2019-01-07 10:46:33 -07009678 p->cq_entries));
9679 free_uid(user);
9680 return -ENOMEM;
9681 }
9682 ctx->compat = in_compat_syscall();
Jens Axboe2b188cc2019-01-07 10:46:33 -07009683 ctx->user = user;
Jens Axboe0b8c0ec2019-12-02 08:50:00 -07009684 ctx->creds = get_current_cred();
Jens Axboe4ea33a92020-10-15 13:46:44 -06009685#ifdef CONFIG_AUDIT
9686 ctx->loginuid = current->loginuid;
9687 ctx->sessionid = current->sessionid;
9688#endif
Jens Axboe2aede0e2020-09-14 10:45:53 -06009689 ctx->sqo_task = get_task_struct(current);
9690
9691 /*
9692 * This is just grabbed for accounting purposes. When a process exits,
9693 * the mm is exited and dropped before the files, hence we need to hang
9694 * on to this mm purely for the purposes of being able to unaccount
9695 * memory (locked/pinned vm). It's not used for anything else.
9696 */
Jens Axboe6b7898e2020-08-25 07:58:00 -06009697 mmgrab(current->mm);
Jens Axboe2aede0e2020-09-14 10:45:53 -06009698 ctx->mm_account = current->mm;
Jens Axboe6b7898e2020-08-25 07:58:00 -06009699
Dennis Zhou91d8f512020-09-16 13:41:05 -07009700#ifdef CONFIG_BLK_CGROUP
9701 /*
9702 * The sq thread will belong to the original cgroup it was inited in.
9703 * If the cgroup goes offline (e.g. disabling the io controller), then
9704 * issued bios will be associated with the closest cgroup later in the
9705 * block layer.
9706 */
9707 rcu_read_lock();
9708 ctx->sqo_blkcg_css = blkcg_css();
9709 ret = css_tryget_online(ctx->sqo_blkcg_css);
9710 rcu_read_unlock();
9711 if (!ret) {
9712 /* don't init against a dying cgroup, have the user try again */
9713 ctx->sqo_blkcg_css = NULL;
9714 ret = -ENODEV;
9715 goto err;
9716 }
9717#endif
Jens Axboe6c271ce2019-01-10 11:22:30 -07009718
Jens Axboe2b188cc2019-01-07 10:46:33 -07009719 /*
9720 * Account memory _before_ installing the file descriptor. Once
9721 * the descriptor is installed, it can get closed at any time. Also
Jens Axboe2b188cc2019-01-07 10:46:33 -07009722 * do this before hitting the general error path, as ring freeing
Hristo Venev75b28af2019-08-26 17:23:46 +00009723 * will un-account as well.
9724 */
9725 io_account_mem(ctx, ring_pages(p->sq_entries, p->cq_entries),
9726 ACCT_LOCKED);
9727 ctx->limit_mem = limit_mem;
9728
9729 ret = io_allocate_scq_urings(ctx, p);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009730 if (ret)
9731 goto err;
Hristo Venev75b28af2019-08-26 17:23:46 +00009732
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02009733 ret = io_sq_offload_create(ctx, p);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009734 if (ret)
9735 goto err;
9736
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02009737 if (!(p->flags & IORING_SETUP_R_DISABLED))
9738 io_sq_offload_start(ctx);
9739
Jens Axboe2b188cc2019-01-07 10:46:33 -07009740 memset(&p->sq_off, 0, sizeof(p->sq_off));
9741 p->sq_off.head = offsetof(struct io_rings, sq.head);
9742 p->sq_off.tail = offsetof(struct io_rings, sq.tail);
9743 p->sq_off.ring_mask = offsetof(struct io_rings, sq_ring_mask);
9744 p->sq_off.ring_entries = offsetof(struct io_rings, sq_ring_entries);
9745 p->sq_off.flags = offsetof(struct io_rings, sq_flags);
9746 p->sq_off.dropped = offsetof(struct io_rings, sq_dropped);
9747 p->sq_off.array = (char *)ctx->sq_array - (char *)ctx->rings;
9748
9749 memset(&p->cq_off, 0, sizeof(p->cq_off));
Hristo Venev75b28af2019-08-26 17:23:46 +00009750 p->cq_off.head = offsetof(struct io_rings, cq.head);
9751 p->cq_off.tail = offsetof(struct io_rings, cq.tail);
9752 p->cq_off.ring_mask = offsetof(struct io_rings, cq_ring_mask);
9753 p->cq_off.ring_entries = offsetof(struct io_rings, cq_ring_entries);
9754 p->cq_off.overflow = offsetof(struct io_rings, cq_overflow);
9755 p->cq_off.cqes = offsetof(struct io_rings, cqes);
Stefano Garzarella0d9b5b32020-05-15 18:38:04 +02009756 p->cq_off.flags = offsetof(struct io_rings, cq_flags);
Jens Axboeac90f242019-09-06 10:26:21 -06009757
Xiaoguang Wang7f136572020-05-05 16:28:53 +08009758 p->features = IORING_FEAT_SINGLE_MMAP | IORING_FEAT_NODROP |
9759 IORING_FEAT_SUBMIT_STABLE | IORING_FEAT_RW_CUR_POS |
Jiufei Xue5769a352020-06-17 17:53:55 +08009760 IORING_FEAT_CUR_PERSONALITY | IORING_FEAT_FAST_POLL |
Hao Xuc73ebb62020-11-03 10:54:37 +08009761 IORING_FEAT_POLL_32BITS | IORING_FEAT_SQPOLL_NONFIXED |
9762 IORING_FEAT_EXT_ARG;
Xiaoguang Wang7f136572020-05-05 16:28:53 +08009763
9764 if (copy_to_user(params, p, sizeof(*p))) {
9765 ret = -EFAULT;
9766 goto err;
9767 }
Jens Axboed1719f72020-07-30 13:43:53 -06009768
Pavel Begunkov9faadcc2020-12-21 18:34:05 +00009769 file = io_uring_get_file(ctx);
9770 if (IS_ERR(file)) {
9771 ret = PTR_ERR(file);
9772 goto err;
9773 }
9774
Jens Axboed1719f72020-07-30 13:43:53 -06009775 /*
Jens Axboe044c1ab2019-10-28 09:15:33 -06009776 * Install ring fd as the very last thing, so we don't risk someone
9777 * having closed it before we finish setup
9778 */
Pavel Begunkov9faadcc2020-12-21 18:34:05 +00009779 ret = io_uring_install_fd(ctx, file);
9780 if (ret < 0) {
Pavel Begunkov06585c42021-01-13 12:42:25 +00009781 io_disable_sqo_submit(ctx);
Pavel Begunkov9faadcc2020-12-21 18:34:05 +00009782 /* fput will clean it up */
9783 fput(file);
9784 return ret;
9785 }
Jens Axboe044c1ab2019-10-28 09:15:33 -06009786
Dmitrii Dolgovc826bd72019-10-15 19:02:01 +02009787 trace_io_uring_create(ret, ctx, p->sq_entries, p->cq_entries, p->flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009788 return ret;
9789err:
Pavel Begunkovd9d05212021-01-08 20:57:25 +00009790 io_disable_sqo_submit(ctx);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009791 io_ring_ctx_wait_and_kill(ctx);
9792 return ret;
9793}
9794
9795/*
9796 * Sets up an aio uring context, and returns the fd. Applications asks for a
9797 * ring size, we return the actual sq/cq ring sizes (among other things) in the
9798 * params structure passed in.
9799 */
9800static long io_uring_setup(u32 entries, struct io_uring_params __user *params)
9801{
9802 struct io_uring_params p;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009803 int i;
9804
9805 if (copy_from_user(&p, params, sizeof(p)))
9806 return -EFAULT;
9807 for (i = 0; i < ARRAY_SIZE(p.resv); i++) {
9808 if (p.resv[i])
9809 return -EINVAL;
9810 }
9811
Jens Axboe6c271ce2019-01-10 11:22:30 -07009812 if (p.flags & ~(IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL |
Jens Axboe8110c1a2019-12-28 15:39:54 -07009813 IORING_SETUP_SQ_AFF | IORING_SETUP_CQSIZE |
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02009814 IORING_SETUP_CLAMP | IORING_SETUP_ATTACH_WQ |
9815 IORING_SETUP_R_DISABLED))
Jens Axboe2b188cc2019-01-07 10:46:33 -07009816 return -EINVAL;
9817
Xiaoguang Wang7f136572020-05-05 16:28:53 +08009818 return io_uring_create(entries, &p, params);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009819}
9820
9821SYSCALL_DEFINE2(io_uring_setup, u32, entries,
9822 struct io_uring_params __user *, params)
9823{
9824 return io_uring_setup(entries, params);
9825}
9826
Jens Axboe66f4af92020-01-16 15:36:52 -07009827static int io_probe(struct io_ring_ctx *ctx, void __user *arg, unsigned nr_args)
9828{
9829 struct io_uring_probe *p;
9830 size_t size;
9831 int i, ret;
9832
9833 size = struct_size(p, ops, nr_args);
9834 if (size == SIZE_MAX)
9835 return -EOVERFLOW;
9836 p = kzalloc(size, GFP_KERNEL);
9837 if (!p)
9838 return -ENOMEM;
9839
9840 ret = -EFAULT;
9841 if (copy_from_user(p, arg, size))
9842 goto out;
9843 ret = -EINVAL;
9844 if (memchr_inv(p, 0, size))
9845 goto out;
9846
9847 p->last_op = IORING_OP_LAST - 1;
9848 if (nr_args > IORING_OP_LAST)
9849 nr_args = IORING_OP_LAST;
9850
9851 for (i = 0; i < nr_args; i++) {
9852 p->ops[i].op = i;
9853 if (!io_op_defs[i].not_supported)
9854 p->ops[i].flags = IO_URING_OP_SUPPORTED;
9855 }
9856 p->ops_len = i;
9857
9858 ret = 0;
9859 if (copy_to_user(arg, p, size))
9860 ret = -EFAULT;
9861out:
9862 kfree(p);
9863 return ret;
9864}
9865
Jens Axboe071698e2020-01-28 10:04:42 -07009866static int io_register_personality(struct io_ring_ctx *ctx)
9867{
Jens Axboe1e6fa522020-10-15 08:46:24 -06009868 struct io_identity *id;
9869 int ret;
Jens Axboe071698e2020-01-28 10:04:42 -07009870
Jens Axboe1e6fa522020-10-15 08:46:24 -06009871 id = kmalloc(sizeof(*id), GFP_KERNEL);
9872 if (unlikely(!id))
9873 return -ENOMEM;
9874
9875 io_init_identity(id);
9876 id->creds = get_current_cred();
9877
9878 ret = idr_alloc_cyclic(&ctx->personality_idr, id, 1, USHRT_MAX, GFP_KERNEL);
9879 if (ret < 0) {
9880 put_cred(id->creds);
9881 kfree(id);
9882 }
9883 return ret;
Jens Axboe071698e2020-01-28 10:04:42 -07009884}
9885
Stefano Garzarella21b55db2020-08-27 16:58:30 +02009886static int io_register_restrictions(struct io_ring_ctx *ctx, void __user *arg,
9887 unsigned int nr_args)
9888{
9889 struct io_uring_restriction *res;
9890 size_t size;
9891 int i, ret;
9892
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02009893 /* Restrictions allowed only if rings started disabled */
9894 if (!(ctx->flags & IORING_SETUP_R_DISABLED))
9895 return -EBADFD;
9896
Stefano Garzarella21b55db2020-08-27 16:58:30 +02009897 /* We allow only a single restrictions registration */
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02009898 if (ctx->restrictions.registered)
Stefano Garzarella21b55db2020-08-27 16:58:30 +02009899 return -EBUSY;
9900
9901 if (!arg || nr_args > IORING_MAX_RESTRICTIONS)
9902 return -EINVAL;
9903
9904 size = array_size(nr_args, sizeof(*res));
9905 if (size == SIZE_MAX)
9906 return -EOVERFLOW;
9907
9908 res = memdup_user(arg, size);
9909 if (IS_ERR(res))
9910 return PTR_ERR(res);
9911
9912 ret = 0;
9913
9914 for (i = 0; i < nr_args; i++) {
9915 switch (res[i].opcode) {
9916 case IORING_RESTRICTION_REGISTER_OP:
9917 if (res[i].register_op >= IORING_REGISTER_LAST) {
9918 ret = -EINVAL;
9919 goto out;
9920 }
9921
9922 __set_bit(res[i].register_op,
9923 ctx->restrictions.register_op);
9924 break;
9925 case IORING_RESTRICTION_SQE_OP:
9926 if (res[i].sqe_op >= IORING_OP_LAST) {
9927 ret = -EINVAL;
9928 goto out;
9929 }
9930
9931 __set_bit(res[i].sqe_op, ctx->restrictions.sqe_op);
9932 break;
9933 case IORING_RESTRICTION_SQE_FLAGS_ALLOWED:
9934 ctx->restrictions.sqe_flags_allowed = res[i].sqe_flags;
9935 break;
9936 case IORING_RESTRICTION_SQE_FLAGS_REQUIRED:
9937 ctx->restrictions.sqe_flags_required = res[i].sqe_flags;
9938 break;
9939 default:
9940 ret = -EINVAL;
9941 goto out;
9942 }
9943 }
9944
9945out:
9946 /* Reset all restrictions if an error happened */
9947 if (ret != 0)
9948 memset(&ctx->restrictions, 0, sizeof(ctx->restrictions));
9949 else
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02009950 ctx->restrictions.registered = true;
Stefano Garzarella21b55db2020-08-27 16:58:30 +02009951
9952 kfree(res);
9953 return ret;
9954}
9955
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02009956static int io_register_enable_rings(struct io_ring_ctx *ctx)
9957{
9958 if (!(ctx->flags & IORING_SETUP_R_DISABLED))
9959 return -EBADFD;
9960
9961 if (ctx->restrictions.registered)
9962 ctx->restricted = 1;
9963
9964 ctx->flags &= ~IORING_SETUP_R_DISABLED;
9965
9966 io_sq_offload_start(ctx);
9967
9968 return 0;
9969}
9970
Jens Axboe071698e2020-01-28 10:04:42 -07009971static bool io_register_op_must_quiesce(int op)
9972{
9973 switch (op) {
9974 case IORING_UNREGISTER_FILES:
9975 case IORING_REGISTER_FILES_UPDATE:
9976 case IORING_REGISTER_PROBE:
9977 case IORING_REGISTER_PERSONALITY:
9978 case IORING_UNREGISTER_PERSONALITY:
9979 return false;
9980 default:
9981 return true;
9982 }
9983}
9984
Jens Axboeedafcce2019-01-09 09:16:05 -07009985static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
9986 void __user *arg, unsigned nr_args)
Jens Axboeb19062a2019-04-15 10:49:38 -06009987 __releases(ctx->uring_lock)
9988 __acquires(ctx->uring_lock)
Jens Axboeedafcce2019-01-09 09:16:05 -07009989{
9990 int ret;
9991
Jens Axboe35fa71a2019-04-22 10:23:23 -06009992 /*
9993 * We're inside the ring mutex, if the ref is already dying, then
9994 * someone else killed the ctx or is already going through
9995 * io_uring_register().
9996 */
9997 if (percpu_ref_is_dying(&ctx->refs))
9998 return -ENXIO;
9999
Jens Axboe071698e2020-01-28 10:04:42 -070010000 if (io_register_op_must_quiesce(opcode)) {
Jens Axboe05f3fb32019-12-09 11:22:50 -070010001 percpu_ref_kill(&ctx->refs);
Jens Axboeb19062a2019-04-15 10:49:38 -060010002
Jens Axboe05f3fb32019-12-09 11:22:50 -070010003 /*
10004 * Drop uring mutex before waiting for references to exit. If
10005 * another thread is currently inside io_uring_enter() it might
10006 * need to grab the uring_lock to make progress. If we hold it
10007 * here across the drain wait, then we can deadlock. It's safe
10008 * to drop the mutex here, since no new references will come in
10009 * after we've killed the percpu ref.
10010 */
10011 mutex_unlock(&ctx->uring_lock);
Jens Axboeaf9c1a42020-09-24 13:32:18 -060010012 do {
10013 ret = wait_for_completion_interruptible(&ctx->ref_comp);
10014 if (!ret)
10015 break;
Jens Axboeed6930c2020-10-08 19:09:46 -060010016 ret = io_run_task_work_sig();
10017 if (ret < 0)
10018 break;
Jens Axboeaf9c1a42020-09-24 13:32:18 -060010019 } while (1);
10020
Jens Axboe05f3fb32019-12-09 11:22:50 -070010021 mutex_lock(&ctx->uring_lock);
Jens Axboeaf9c1a42020-09-24 13:32:18 -060010022
Jens Axboec1503682020-01-08 08:26:07 -070010023 if (ret) {
10024 percpu_ref_resurrect(&ctx->refs);
Stefano Garzarella21b55db2020-08-27 16:58:30 +020010025 goto out_quiesce;
10026 }
10027 }
10028
10029 if (ctx->restricted) {
10030 if (opcode >= IORING_REGISTER_LAST) {
10031 ret = -EINVAL;
10032 goto out;
10033 }
10034
10035 if (!test_bit(opcode, ctx->restrictions.register_op)) {
10036 ret = -EACCES;
Jens Axboec1503682020-01-08 08:26:07 -070010037 goto out;
10038 }
Jens Axboe05f3fb32019-12-09 11:22:50 -070010039 }
Jens Axboeedafcce2019-01-09 09:16:05 -070010040
10041 switch (opcode) {
10042 case IORING_REGISTER_BUFFERS:
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -080010043 ret = io_sqe_buffers_register(ctx, arg, nr_args);
Jens Axboeedafcce2019-01-09 09:16:05 -070010044 break;
10045 case IORING_UNREGISTER_BUFFERS:
10046 ret = -EINVAL;
10047 if (arg || nr_args)
10048 break;
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -080010049 ret = io_sqe_buffers_unregister(ctx);
Jens Axboeedafcce2019-01-09 09:16:05 -070010050 break;
Jens Axboe6b063142019-01-10 22:13:58 -070010051 case IORING_REGISTER_FILES:
10052 ret = io_sqe_files_register(ctx, arg, nr_args);
10053 break;
10054 case IORING_UNREGISTER_FILES:
10055 ret = -EINVAL;
10056 if (arg || nr_args)
10057 break;
10058 ret = io_sqe_files_unregister(ctx);
10059 break;
Jens Axboec3a31e62019-10-03 13:59:56 -060010060 case IORING_REGISTER_FILES_UPDATE:
10061 ret = io_sqe_files_update(ctx, arg, nr_args);
10062 break;
Jens Axboe9b402842019-04-11 11:45:41 -060010063 case IORING_REGISTER_EVENTFD:
Jens Axboef2842ab2020-01-08 11:04:00 -070010064 case IORING_REGISTER_EVENTFD_ASYNC:
Jens Axboe9b402842019-04-11 11:45:41 -060010065 ret = -EINVAL;
10066 if (nr_args != 1)
10067 break;
10068 ret = io_eventfd_register(ctx, arg);
Jens Axboef2842ab2020-01-08 11:04:00 -070010069 if (ret)
10070 break;
10071 if (opcode == IORING_REGISTER_EVENTFD_ASYNC)
10072 ctx->eventfd_async = 1;
10073 else
10074 ctx->eventfd_async = 0;
Jens Axboe9b402842019-04-11 11:45:41 -060010075 break;
10076 case IORING_UNREGISTER_EVENTFD:
10077 ret = -EINVAL;
10078 if (arg || nr_args)
10079 break;
10080 ret = io_eventfd_unregister(ctx);
10081 break;
Jens Axboe66f4af92020-01-16 15:36:52 -070010082 case IORING_REGISTER_PROBE:
10083 ret = -EINVAL;
10084 if (!arg || nr_args > 256)
10085 break;
10086 ret = io_probe(ctx, arg, nr_args);
10087 break;
Jens Axboe071698e2020-01-28 10:04:42 -070010088 case IORING_REGISTER_PERSONALITY:
10089 ret = -EINVAL;
10090 if (arg || nr_args)
10091 break;
10092 ret = io_register_personality(ctx);
10093 break;
10094 case IORING_UNREGISTER_PERSONALITY:
10095 ret = -EINVAL;
10096 if (arg)
10097 break;
10098 ret = io_unregister_personality(ctx, nr_args);
10099 break;
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +020010100 case IORING_REGISTER_ENABLE_RINGS:
10101 ret = -EINVAL;
10102 if (arg || nr_args)
10103 break;
10104 ret = io_register_enable_rings(ctx);
10105 break;
Stefano Garzarella21b55db2020-08-27 16:58:30 +020010106 case IORING_REGISTER_RESTRICTIONS:
10107 ret = io_register_restrictions(ctx, arg, nr_args);
10108 break;
Jens Axboeedafcce2019-01-09 09:16:05 -070010109 default:
10110 ret = -EINVAL;
10111 break;
10112 }
10113
Stefano Garzarella21b55db2020-08-27 16:58:30 +020010114out:
Jens Axboe071698e2020-01-28 10:04:42 -070010115 if (io_register_op_must_quiesce(opcode)) {
Jens Axboe05f3fb32019-12-09 11:22:50 -070010116 /* bring the ctx back to life */
Jens Axboe05f3fb32019-12-09 11:22:50 -070010117 percpu_ref_reinit(&ctx->refs);
Stefano Garzarella21b55db2020-08-27 16:58:30 +020010118out_quiesce:
Jens Axboe0f158b42020-05-14 17:18:39 -060010119 reinit_completion(&ctx->ref_comp);
Jens Axboe05f3fb32019-12-09 11:22:50 -070010120 }
Jens Axboeedafcce2019-01-09 09:16:05 -070010121 return ret;
10122}
10123
10124SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode,
10125 void __user *, arg, unsigned int, nr_args)
10126{
10127 struct io_ring_ctx *ctx;
10128 long ret = -EBADF;
10129 struct fd f;
10130
10131 f = fdget(fd);
10132 if (!f.file)
10133 return -EBADF;
10134
10135 ret = -EOPNOTSUPP;
10136 if (f.file->f_op != &io_uring_fops)
10137 goto out_fput;
10138
10139 ctx = f.file->private_data;
10140
10141 mutex_lock(&ctx->uring_lock);
10142 ret = __io_uring_register(ctx, opcode, arg, nr_args);
10143 mutex_unlock(&ctx->uring_lock);
Dmitrii Dolgovc826bd72019-10-15 19:02:01 +020010144 trace_io_uring_register(ctx, opcode, ctx->nr_user_files, ctx->nr_user_bufs,
10145 ctx->cq_ev_fd != NULL, ret);
Jens Axboeedafcce2019-01-09 09:16:05 -070010146out_fput:
10147 fdput(f);
10148 return ret;
10149}
10150
Jens Axboe2b188cc2019-01-07 10:46:33 -070010151static int __init io_uring_init(void)
10152{
Stefan Metzmacherd7f62e82020-01-29 14:39:41 +010010153#define __BUILD_BUG_VERIFY_ELEMENT(stype, eoffset, etype, ename) do { \
10154 BUILD_BUG_ON(offsetof(stype, ename) != eoffset); \
10155 BUILD_BUG_ON(sizeof(etype) != sizeof_field(stype, ename)); \
10156} while (0)
10157
10158#define BUILD_BUG_SQE_ELEM(eoffset, etype, ename) \
10159 __BUILD_BUG_VERIFY_ELEMENT(struct io_uring_sqe, eoffset, etype, ename)
10160 BUILD_BUG_ON(sizeof(struct io_uring_sqe) != 64);
10161 BUILD_BUG_SQE_ELEM(0, __u8, opcode);
10162 BUILD_BUG_SQE_ELEM(1, __u8, flags);
10163 BUILD_BUG_SQE_ELEM(2, __u16, ioprio);
10164 BUILD_BUG_SQE_ELEM(4, __s32, fd);
10165 BUILD_BUG_SQE_ELEM(8, __u64, off);
10166 BUILD_BUG_SQE_ELEM(8, __u64, addr2);
10167 BUILD_BUG_SQE_ELEM(16, __u64, addr);
Pavel Begunkov7d67af22020-02-24 11:32:45 +030010168 BUILD_BUG_SQE_ELEM(16, __u64, splice_off_in);
Stefan Metzmacherd7f62e82020-01-29 14:39:41 +010010169 BUILD_BUG_SQE_ELEM(24, __u32, len);
10170 BUILD_BUG_SQE_ELEM(28, __kernel_rwf_t, rw_flags);
10171 BUILD_BUG_SQE_ELEM(28, /* compat */ int, rw_flags);
10172 BUILD_BUG_SQE_ELEM(28, /* compat */ __u32, rw_flags);
10173 BUILD_BUG_SQE_ELEM(28, __u32, fsync_flags);
Jiufei Xue5769a352020-06-17 17:53:55 +080010174 BUILD_BUG_SQE_ELEM(28, /* compat */ __u16, poll_events);
10175 BUILD_BUG_SQE_ELEM(28, __u32, poll32_events);
Stefan Metzmacherd7f62e82020-01-29 14:39:41 +010010176 BUILD_BUG_SQE_ELEM(28, __u32, sync_range_flags);
10177 BUILD_BUG_SQE_ELEM(28, __u32, msg_flags);
10178 BUILD_BUG_SQE_ELEM(28, __u32, timeout_flags);
10179 BUILD_BUG_SQE_ELEM(28, __u32, accept_flags);
10180 BUILD_BUG_SQE_ELEM(28, __u32, cancel_flags);
10181 BUILD_BUG_SQE_ELEM(28, __u32, open_flags);
10182 BUILD_BUG_SQE_ELEM(28, __u32, statx_flags);
10183 BUILD_BUG_SQE_ELEM(28, __u32, fadvise_advice);
Pavel Begunkov7d67af22020-02-24 11:32:45 +030010184 BUILD_BUG_SQE_ELEM(28, __u32, splice_flags);
Stefan Metzmacherd7f62e82020-01-29 14:39:41 +010010185 BUILD_BUG_SQE_ELEM(32, __u64, user_data);
10186 BUILD_BUG_SQE_ELEM(40, __u16, buf_index);
10187 BUILD_BUG_SQE_ELEM(42, __u16, personality);
Pavel Begunkov7d67af22020-02-24 11:32:45 +030010188 BUILD_BUG_SQE_ELEM(44, __s32, splice_fd_in);
Stefan Metzmacherd7f62e82020-01-29 14:39:41 +010010189
Jens Axboed3656342019-12-18 09:50:26 -070010190 BUILD_BUG_ON(ARRAY_SIZE(io_op_defs) != IORING_OP_LAST);
Jens Axboe84557872020-03-03 15:28:17 -070010191 BUILD_BUG_ON(__REQ_F_LAST_BIT >= 8 * sizeof(int));
Jens Axboe2b188cc2019-01-07 10:46:33 -070010192 req_cachep = KMEM_CACHE(io_kiocb, SLAB_HWCACHE_ALIGN | SLAB_PANIC);
10193 return 0;
10194};
10195__initcall(io_uring_init);