blob: bb497511fadb1b15bfb06aa5230ef414e8cd04be [file] [log] [blame]
Jens Axboe2b188cc2019-01-07 10:46:33 -07001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Shared application/kernel submission and completion ring pairs, for
4 * supporting fast/efficient IO.
5 *
6 * A note on the read/write ordering memory barriers that are matched between
Stefan Bühler1e84b972019-04-24 23:54:16 +02007 * the application and kernel side.
8 *
9 * After the application reads the CQ ring tail, it must use an
10 * appropriate smp_rmb() to pair with the smp_wmb() the kernel uses
11 * before writing the tail (using smp_load_acquire to read the tail will
12 * do). It also needs a smp_mb() before updating CQ head (ordering the
13 * entry load(s) with the head store), pairing with an implicit barrier
14 * through a control-dependency in io_get_cqring (smp_store_release to
15 * store head will do). Failure to do so could lead to reading invalid
16 * CQ entries.
17 *
18 * Likewise, the application must use an appropriate smp_wmb() before
19 * writing the SQ tail (ordering SQ entry stores with the tail store),
20 * which pairs with smp_load_acquire in io_get_sqring (smp_store_release
21 * to store the tail will do). And it needs a barrier ordering the SQ
22 * head load before writing new SQ entries (smp_load_acquire to read
23 * head will do).
24 *
25 * When using the SQ poll thread (IORING_SETUP_SQPOLL), the application
26 * needs to check the SQ flags for IORING_SQ_NEED_WAKEUP *after*
27 * updating the SQ tail; a full memory barrier smp_mb() is needed
28 * between.
Jens Axboe2b188cc2019-01-07 10:46:33 -070029 *
30 * Also see the examples in the liburing library:
31 *
32 * git://git.kernel.dk/liburing
33 *
34 * io_uring also uses READ/WRITE_ONCE() for _any_ store or load that happens
35 * from data shared between the kernel and application. This is done both
36 * for ordering purposes, but also to ensure that once a value is loaded from
37 * data that the application could potentially modify, it remains stable.
38 *
39 * Copyright (C) 2018-2019 Jens Axboe
Christoph Hellwigc992fe22019-01-11 09:43:02 -070040 * Copyright (c) 2018-2019 Christoph Hellwig
Jens Axboe2b188cc2019-01-07 10:46:33 -070041 */
42#include <linux/kernel.h>
43#include <linux/init.h>
44#include <linux/errno.h>
45#include <linux/syscalls.h>
46#include <linux/compat.h>
Jens Axboe52de1fe2020-02-27 10:15:42 -070047#include <net/compat.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070048#include <linux/refcount.h>
49#include <linux/uio.h>
Pavel Begunkov6b47ee62020-01-18 20:22:41 +030050#include <linux/bits.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070051
52#include <linux/sched/signal.h>
53#include <linux/fs.h>
54#include <linux/file.h>
55#include <linux/fdtable.h>
56#include <linux/mm.h>
57#include <linux/mman.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070058#include <linux/percpu.h>
59#include <linux/slab.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070060#include <linux/blkdev.h>
Jens Axboeedafcce2019-01-09 09:16:05 -070061#include <linux/bvec.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070062#include <linux/net.h>
63#include <net/sock.h>
64#include <net/af_unix.h>
Jens Axboe6b063142019-01-10 22:13:58 -070065#include <net/scm.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070066#include <linux/anon_inodes.h>
67#include <linux/sched/mm.h>
68#include <linux/uaccess.h>
69#include <linux/nospec.h>
Jens Axboeedafcce2019-01-09 09:16:05 -070070#include <linux/sizes.h>
71#include <linux/hugetlb.h>
Jens Axboeaa4c3962019-11-29 10:14:00 -070072#include <linux/highmem.h>
Jens Axboe15b71ab2019-12-11 11:20:36 -070073#include <linux/namei.h>
74#include <linux/fsnotify.h>
Jens Axboe4840e412019-12-25 22:03:45 -070075#include <linux/fadvise.h>
Jens Axboe3e4827b2020-01-08 15:18:09 -070076#include <linux/eventpoll.h>
Pavel Begunkov7d67af22020-02-24 11:32:45 +030077#include <linux/splice.h>
Jens Axboeb41e9852020-02-17 09:52:41 -070078#include <linux/task_work.h>
Jens Axboebcf5a062020-05-22 09:24:42 -060079#include <linux/pagemap.h>
Jens Axboe0f212202020-09-13 13:09:39 -060080#include <linux/io_uring.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070081
Dmitrii Dolgovc826bd72019-10-15 19:02:01 +020082#define CREATE_TRACE_POINTS
83#include <trace/events/io_uring.h>
84
Jens Axboe2b188cc2019-01-07 10:46:33 -070085#include <uapi/linux/io_uring.h>
86
87#include "internal.h"
Jens Axboe561fb042019-10-24 07:25:42 -060088#include "io-wq.h"
Jens Axboe2b188cc2019-01-07 10:46:33 -070089
Daniel Xu5277dea2019-09-14 14:23:45 -070090#define IORING_MAX_ENTRIES 32768
Jens Axboe33a107f2019-10-04 12:10:03 -060091#define IORING_MAX_CQ_ENTRIES (2 * IORING_MAX_ENTRIES)
Jens Axboe65e19f52019-10-26 07:20:21 -060092
93/*
94 * Shift of 9 is 512 entries, or exactly one page on 64-bit archs
95 */
96#define IORING_FILE_TABLE_SHIFT 9
97#define IORING_MAX_FILES_TABLE (1U << IORING_FILE_TABLE_SHIFT)
98#define IORING_FILE_TABLE_MASK (IORING_MAX_FILES_TABLE - 1)
99#define IORING_MAX_FIXED_FILES (64 * IORING_MAX_FILES_TABLE)
Stefano Garzarella21b55db2020-08-27 16:58:30 +0200100#define IORING_MAX_RESTRICTIONS (IORING_RESTRICTION_LAST + \
101 IORING_REGISTER_LAST + IORING_OP_LAST)
Jens Axboe2b188cc2019-01-07 10:46:33 -0700102
Pavel Begunkovb16fed662021-02-18 18:29:40 +0000103#define SQE_VALID_FLAGS (IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK| \
104 IOSQE_IO_HARDLINK | IOSQE_ASYNC | \
105 IOSQE_BUFFER_SELECT)
106
Jens Axboe2b188cc2019-01-07 10:46:33 -0700107struct io_uring {
108 u32 head ____cacheline_aligned_in_smp;
109 u32 tail ____cacheline_aligned_in_smp;
110};
111
Stefan Bühler1e84b972019-04-24 23:54:16 +0200112/*
Hristo Venev75b28af2019-08-26 17:23:46 +0000113 * This data is shared with the application through the mmap at offsets
114 * IORING_OFF_SQ_RING and IORING_OFF_CQ_RING.
Stefan Bühler1e84b972019-04-24 23:54:16 +0200115 *
116 * The offsets to the member fields are published through struct
117 * io_sqring_offsets when calling io_uring_setup.
118 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000119struct io_rings {
Stefan Bühler1e84b972019-04-24 23:54:16 +0200120 /*
121 * Head and tail offsets into the ring; the offsets need to be
122 * masked to get valid indices.
123 *
Hristo Venev75b28af2019-08-26 17:23:46 +0000124 * The kernel controls head of the sq ring and the tail of the cq ring,
125 * and the application controls tail of the sq ring and the head of the
126 * cq ring.
Stefan Bühler1e84b972019-04-24 23:54:16 +0200127 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000128 struct io_uring sq, cq;
Stefan Bühler1e84b972019-04-24 23:54:16 +0200129 /*
Hristo Venev75b28af2019-08-26 17:23:46 +0000130 * Bitmasks to apply to head and tail offsets (constant, equals
Stefan Bühler1e84b972019-04-24 23:54:16 +0200131 * ring_entries - 1)
132 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000133 u32 sq_ring_mask, cq_ring_mask;
134 /* Ring sizes (constant, power of 2) */
135 u32 sq_ring_entries, cq_ring_entries;
Stefan Bühler1e84b972019-04-24 23:54:16 +0200136 /*
137 * Number of invalid entries dropped by the kernel due to
138 * invalid index stored in array
139 *
140 * Written by the kernel, shouldn't be modified by the
141 * application (i.e. get number of "new events" by comparing to
142 * cached value).
143 *
144 * After a new SQ head value was read by the application this
145 * counter includes all submissions that were dropped reaching
146 * the new SQ head (and possibly more).
147 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000148 u32 sq_dropped;
Stefan Bühler1e84b972019-04-24 23:54:16 +0200149 /*
Stefano Garzarella0d9b5b32020-05-15 18:38:04 +0200150 * Runtime SQ flags
Stefan Bühler1e84b972019-04-24 23:54:16 +0200151 *
152 * Written by the kernel, shouldn't be modified by the
153 * application.
154 *
155 * The application needs a full memory barrier before checking
156 * for IORING_SQ_NEED_WAKEUP after updating the sq tail.
157 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000158 u32 sq_flags;
Stefan Bühler1e84b972019-04-24 23:54:16 +0200159 /*
Stefano Garzarella0d9b5b32020-05-15 18:38:04 +0200160 * Runtime CQ flags
161 *
162 * Written by the application, shouldn't be modified by the
163 * kernel.
164 */
165 u32 cq_flags;
166 /*
Stefan Bühler1e84b972019-04-24 23:54:16 +0200167 * Number of completion events lost because the queue was full;
168 * this should be avoided by the application by making sure
LimingWu0b4295b2019-12-05 20:18:18 +0800169 * there are not more requests pending than there is space in
Stefan Bühler1e84b972019-04-24 23:54:16 +0200170 * the completion queue.
171 *
172 * Written by the kernel, shouldn't be modified by the
173 * application (i.e. get number of "new events" by comparing to
174 * cached value).
175 *
176 * As completion events come in out of order this counter is not
177 * ordered with any other data.
178 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000179 u32 cq_overflow;
Stefan Bühler1e84b972019-04-24 23:54:16 +0200180 /*
181 * Ring buffer of completion events.
182 *
183 * The kernel writes completion events fresh every time they are
184 * produced, so the application is allowed to modify pending
185 * entries.
186 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000187 struct io_uring_cqe cqes[] ____cacheline_aligned_in_smp;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700188};
189
Pavel Begunkov45d189c2021-02-10 00:03:07 +0000190enum io_uring_cmd_flags {
191 IO_URING_F_NONBLOCK = 1,
Pavel Begunkov889fca72021-02-10 00:03:09 +0000192 IO_URING_F_COMPLETE_DEFER = 2,
Pavel Begunkov45d189c2021-02-10 00:03:07 +0000193};
194
Jens Axboeedafcce2019-01-09 09:16:05 -0700195struct io_mapped_ubuf {
196 u64 ubuf;
197 size_t len;
198 struct bio_vec *bvec;
199 unsigned int nr_bvecs;
Jens Axboede293932020-09-17 16:19:16 -0600200 unsigned long acct_pages;
Jens Axboeedafcce2019-01-09 09:16:05 -0700201};
202
Bijan Mottahedeh50238532021-01-15 17:37:45 +0000203struct io_ring_ctx;
204
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +0000205struct io_rsrc_put {
206 struct list_head list;
Bijan Mottahedeh50238532021-01-15 17:37:45 +0000207 union {
208 void *rsrc;
209 struct file *file;
210 };
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +0000211};
212
213struct fixed_rsrc_table {
Jens Axboe65e19f52019-10-26 07:20:21 -0600214 struct file **files;
Jens Axboe31b51512019-01-18 22:56:34 -0700215};
216
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +0000217struct fixed_rsrc_ref_node {
Xiaoguang Wang05589552020-03-31 14:05:18 +0800218 struct percpu_ref refs;
219 struct list_head node;
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +0000220 struct list_head rsrc_list;
221 struct fixed_rsrc_data *rsrc_data;
Bijan Mottahedeh50238532021-01-15 17:37:45 +0000222 void (*rsrc_put)(struct io_ring_ctx *ctx,
223 struct io_rsrc_put *prsrc);
Jens Axboe4a38aed22020-05-14 17:21:15 -0600224 struct llist_node llist;
Pavel Begunkove2978222020-11-18 14:56:26 +0000225 bool done;
Xiaoguang Wang05589552020-03-31 14:05:18 +0800226};
227
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +0000228struct fixed_rsrc_data {
229 struct fixed_rsrc_table *table;
Jens Axboe05f3fb32019-12-09 11:22:50 -0700230 struct io_ring_ctx *ctx;
231
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +0000232 struct fixed_rsrc_ref_node *node;
Jens Axboe05f3fb32019-12-09 11:22:50 -0700233 struct percpu_ref refs;
Jens Axboe05f3fb32019-12-09 11:22:50 -0700234 struct completion done;
Hao Xu8bad28d2021-02-19 17:19:36 +0800235 bool quiesce;
Jens Axboe05f3fb32019-12-09 11:22:50 -0700236};
237
Jens Axboe5a2e7452020-02-23 16:23:11 -0700238struct io_buffer {
239 struct list_head list;
240 __u64 addr;
241 __s32 len;
242 __u16 bid;
243};
244
Stefano Garzarella21b55db2020-08-27 16:58:30 +0200245struct io_restriction {
246 DECLARE_BITMAP(register_op, IORING_REGISTER_LAST);
247 DECLARE_BITMAP(sqe_op, IORING_OP_LAST);
248 u8 sqe_flags_allowed;
249 u8 sqe_flags_required;
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +0200250 bool registered;
Stefano Garzarella21b55db2020-08-27 16:58:30 +0200251};
252
Jens Axboe37d1e2e2021-02-17 21:03:43 -0700253enum {
254 IO_SQ_THREAD_SHOULD_STOP = 0,
255 IO_SQ_THREAD_SHOULD_PARK,
256};
257
Jens Axboe534ca6d2020-09-02 13:52:19 -0600258struct io_sq_data {
259 refcount_t refs;
Pavel Begunkov9e138a42021-03-14 20:57:12 +0000260 atomic_t park_pending;
Pavel Begunkov09a6f4e2021-03-14 20:57:10 +0000261 struct mutex lock;
Jens Axboe69fb2132020-09-14 11:16:23 -0600262
263 /* ctx's that are using this sqd */
264 struct list_head ctx_list;
Jens Axboe69fb2132020-09-14 11:16:23 -0600265
Jens Axboe534ca6d2020-09-02 13:52:19 -0600266 struct task_struct *thread;
267 struct wait_queue_head wait;
Xiaoguang Wang08369242020-11-03 14:15:59 +0800268
269 unsigned sq_thread_idle;
Jens Axboe37d1e2e2021-02-17 21:03:43 -0700270 int sq_cpu;
271 pid_t task_pid;
Jens Axboe5c2469e2021-03-11 10:17:56 -0700272 pid_t task_tgid;
Jens Axboe37d1e2e2021-02-17 21:03:43 -0700273
274 unsigned long state;
Jens Axboe37d1e2e2021-02-17 21:03:43 -0700275 struct completion exited;
Pavel Begunkovb7f5a0b2021-03-15 14:23:08 +0000276 struct callback_head *park_task_work;
Jens Axboe534ca6d2020-09-02 13:52:19 -0600277};
278
Pavel Begunkov258b29a2021-02-10 00:03:10 +0000279#define IO_IOPOLL_BATCH 8
Pavel Begunkov6dd0be12021-02-10 00:03:13 +0000280#define IO_COMPL_BATCH 32
Pavel Begunkov6ff119a2021-02-10 00:03:18 +0000281#define IO_REQ_CACHE_SIZE 32
Pavel Begunkovbf019da2021-02-10 00:03:17 +0000282#define IO_REQ_ALLOC_BATCH 8
Pavel Begunkov258b29a2021-02-10 00:03:10 +0000283
284struct io_comp_state {
Pavel Begunkov6dd0be12021-02-10 00:03:13 +0000285 struct io_kiocb *reqs[IO_COMPL_BATCH];
Jens Axboe1b4c3512021-02-10 00:03:19 +0000286 unsigned int nr;
Jens Axboec7dae4b2021-02-09 19:53:37 -0700287 unsigned int locked_free_nr;
288 /* inline/task_work completion list, under ->uring_lock */
Jens Axboe1b4c3512021-02-10 00:03:19 +0000289 struct list_head free_list;
Jens Axboec7dae4b2021-02-09 19:53:37 -0700290 /* IRQ completion list, under ->completion_lock */
291 struct list_head locked_free_list;
Pavel Begunkov258b29a2021-02-10 00:03:10 +0000292};
293
Pavel Begunkova1ab7b32021-02-18 18:29:42 +0000294struct io_submit_link {
295 struct io_kiocb *head;
296 struct io_kiocb *last;
297};
298
Pavel Begunkov258b29a2021-02-10 00:03:10 +0000299struct io_submit_state {
300 struct blk_plug plug;
Pavel Begunkova1ab7b32021-02-18 18:29:42 +0000301 struct io_submit_link link;
Pavel Begunkov258b29a2021-02-10 00:03:10 +0000302
303 /*
304 * io_kiocb alloc cache
305 */
Pavel Begunkovbf019da2021-02-10 00:03:17 +0000306 void *reqs[IO_REQ_CACHE_SIZE];
Pavel Begunkov258b29a2021-02-10 00:03:10 +0000307 unsigned int free_reqs;
308
309 bool plug_started;
310
311 /*
312 * Batch completion logic
313 */
314 struct io_comp_state comp;
315
316 /*
317 * File reference cache
318 */
319 struct file *file;
320 unsigned int fd;
321 unsigned int file_refs;
322 unsigned int ios_left;
323};
324
Jens Axboe2b188cc2019-01-07 10:46:33 -0700325struct io_ring_ctx {
326 struct {
327 struct percpu_ref refs;
328 } ____cacheline_aligned_in_smp;
329
330 struct {
331 unsigned int flags;
Randy Dunlape1d85332020-02-05 20:57:10 -0800332 unsigned int compat: 1;
Randy Dunlape1d85332020-02-05 20:57:10 -0800333 unsigned int cq_overflow_flushed: 1;
334 unsigned int drain_next: 1;
335 unsigned int eventfd_async: 1;
Stefano Garzarella21b55db2020-08-27 16:58:30 +0200336 unsigned int restricted: 1;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700337
Hristo Venev75b28af2019-08-26 17:23:46 +0000338 /*
339 * Ring buffer of indices into array of io_uring_sqe, which is
340 * mmapped by the application using the IORING_OFF_SQES offset.
341 *
342 * This indirection could e.g. be used to assign fixed
343 * io_uring_sqe entries to operations and only submit them to
344 * the queue when needed.
345 *
346 * The kernel modifies neither the indices array nor the entries
347 * array.
348 */
349 u32 *sq_array;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700350 unsigned cached_sq_head;
351 unsigned sq_entries;
352 unsigned sq_mask;
Jens Axboe6c271ce2019-01-10 11:22:30 -0700353 unsigned sq_thread_idle;
Jens Axboe498ccd92019-10-25 10:04:25 -0600354 unsigned cached_sq_dropped;
Pavel Begunkov2c3bac6d2020-10-18 10:17:40 +0100355 unsigned cached_cq_overflow;
Jens Axboead3eb2c2019-12-18 17:12:20 -0700356 unsigned long sq_check_overflow;
Jens Axboede0617e2019-04-06 21:51:27 -0600357
Jens Axboee9418942021-02-19 12:33:30 -0700358 /* hashed buffered write serialization */
359 struct io_wq_hash *hash_map;
360
Jens Axboede0617e2019-04-06 21:51:27 -0600361 struct list_head defer_list;
Jens Axboe5262f562019-09-17 12:26:57 -0600362 struct list_head timeout_list;
Jens Axboe1d7bb1d2019-11-06 11:31:17 -0700363 struct list_head cq_overflow_list;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700364
Jens Axboead3eb2c2019-12-18 17:12:20 -0700365 struct io_uring_sqe *sq_sqes;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700366 } ____cacheline_aligned_in_smp;
367
Jens Axboe3c1a2ea2021-02-11 10:48:03 -0700368 struct {
369 struct mutex uring_lock;
370 wait_queue_head_t wait;
371 } ____cacheline_aligned_in_smp;
372
373 struct io_submit_state submit_state;
374
Hristo Venev75b28af2019-08-26 17:23:46 +0000375 struct io_rings *rings;
376
Jens Axboe2aede0e2020-09-14 10:45:53 -0600377 /* Only used for accounting purposes */
378 struct mm_struct *mm_account;
379
Stefan Metzmacher7c30f36a2021-03-07 11:54:28 +0100380 const struct cred *sq_creds; /* cred used for __io_sq_thread() */
Jens Axboe534ca6d2020-09-02 13:52:19 -0600381 struct io_sq_data *sq_data; /* if using sq thread polling */
382
Jens Axboe90554202020-09-03 12:12:41 -0600383 struct wait_queue_head sqo_sq_wait;
Jens Axboe69fb2132020-09-14 11:16:23 -0600384 struct list_head sqd_list;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700385
Jens Axboe6b063142019-01-10 22:13:58 -0700386 /*
387 * If used, fixed file set. Writers must ensure that ->refs is dead,
388 * readers must ensure that ->refs is alive as long as the file* is
389 * used. Only updated through io_uring_register(2).
390 */
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +0000391 struct fixed_rsrc_data *file_data;
Jens Axboe6b063142019-01-10 22:13:58 -0700392 unsigned nr_user_files;
393
Jens Axboeedafcce2019-01-09 09:16:05 -0700394 /* if used, fixed mapped user buffers */
395 unsigned nr_user_bufs;
396 struct io_mapped_ubuf *user_bufs;
397
Jens Axboe2b188cc2019-01-07 10:46:33 -0700398 struct user_struct *user;
399
Jens Axboe0f158b42020-05-14 17:18:39 -0600400 struct completion ref_comp;
Jens Axboe206aefd2019-11-07 18:27:42 -0700401
402#if defined(CONFIG_UNIX)
403 struct socket *ring_sock;
404#endif
405
Jens Axboe9e15c3a2021-03-13 12:29:43 -0700406 struct xarray io_buffers;
Jens Axboe5a2e7452020-02-23 16:23:11 -0700407
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +0000408 struct xarray personalities;
409 u32 pers_next;
Jens Axboe071698e2020-01-28 10:04:42 -0700410
Jens Axboe206aefd2019-11-07 18:27:42 -0700411 struct {
412 unsigned cached_cq_tail;
413 unsigned cq_entries;
414 unsigned cq_mask;
415 atomic_t cq_timeouts;
Marcelo Diop-Gonzalezf0105052021-01-15 11:54:40 -0500416 unsigned cq_last_tm_flush;
Jens Axboead3eb2c2019-12-18 17:12:20 -0700417 unsigned long cq_check_overflow;
Jens Axboe206aefd2019-11-07 18:27:42 -0700418 struct wait_queue_head cq_wait;
419 struct fasync_struct *cq_fasync;
420 struct eventfd_ctx *cq_ev_fd;
421 } ____cacheline_aligned_in_smp;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700422
423 struct {
Jens Axboe2b188cc2019-01-07 10:46:33 -0700424 spinlock_t completion_lock;
Jens Axboee94f1412019-12-19 12:06:02 -0700425
Jens Axboedef596e2019-01-09 08:59:42 -0700426 /*
Pavel Begunkov540e32a2020-07-13 23:37:09 +0300427 * ->iopoll_list is protected by the ctx->uring_lock for
Jens Axboedef596e2019-01-09 08:59:42 -0700428 * io_uring instances that don't use IORING_SETUP_SQPOLL.
429 * For SQPOLL, only the single threaded io_sq_thread() will
430 * manipulate the list, hence no extra locking is needed there.
431 */
Pavel Begunkov540e32a2020-07-13 23:37:09 +0300432 struct list_head iopoll_list;
Jens Axboe78076bb2019-12-04 19:56:40 -0700433 struct hlist_head *cancel_hash;
434 unsigned cancel_hash_bits;
Jens Axboee94f1412019-12-19 12:06:02 -0700435 bool poll_multi_file;
Jens Axboefcb323c2019-10-24 12:39:47 -0600436
437 spinlock_t inflight_lock;
438 struct list_head inflight_list;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700439 } ____cacheline_aligned_in_smp;
Jens Axboe85faa7b2020-04-09 18:14:00 -0600440
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +0000441 struct delayed_work rsrc_put_work;
442 struct llist_head rsrc_put_llist;
Bijan Mottahedehd67d2262021-01-15 17:37:46 +0000443 struct list_head rsrc_ref_list;
444 spinlock_t rsrc_ref_lock;
Jens Axboe4a38aed22020-05-14 17:21:15 -0600445
Stefano Garzarella21b55db2020-08-27 16:58:30 +0200446 struct io_restriction restrictions;
Jens Axboe3c1a2ea2021-02-11 10:48:03 -0700447
Jens Axboe7c25c0d2021-02-16 07:17:00 -0700448 /* exit task_work */
449 struct callback_head *exit_task_work;
450
Jens Axboee9418942021-02-19 12:33:30 -0700451 struct wait_queue_head hash_wait;
452
Jens Axboe3c1a2ea2021-02-11 10:48:03 -0700453 /* Keep this last, we don't need it for the fast path */
454 struct work_struct exit_work;
Pavel Begunkov13bf43f2021-03-06 11:02:12 +0000455 struct list_head tctx_list;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700456};
457
Stefan Metzmacher53e043b2021-03-15 12:56:56 +0100458struct io_uring_task {
459 /* submission side */
460 struct xarray xa;
461 struct wait_queue_head wait;
Stefan Metzmacheree53fb22021-03-15 12:56:57 +0100462 const struct io_ring_ctx *last;
463 struct io_wq *io_wq;
Stefan Metzmacher53e043b2021-03-15 12:56:56 +0100464 struct percpu_counter inflight;
465 atomic_t in_idle;
466 bool sqpoll;
467
468 spinlock_t task_lock;
469 struct io_wq_work_list task_list;
470 unsigned long task_state;
471 struct callback_head task_work;
472};
473
Jens Axboe09bb8392019-03-13 12:39:28 -0600474/*
475 * First field must be the file pointer in all the
476 * iocb unions! See also 'struct kiocb' in <linux/fs.h>
477 */
Jens Axboe221c5eb2019-01-17 09:41:58 -0700478struct io_poll_iocb {
479 struct file *file;
Pavel Begunkov018043b2020-10-27 23:17:18 +0000480 struct wait_queue_head *head;
Jens Axboe221c5eb2019-01-17 09:41:58 -0700481 __poll_t events;
Jens Axboe8c838782019-03-12 15:48:16 -0600482 bool done;
Jens Axboe221c5eb2019-01-17 09:41:58 -0700483 bool canceled;
Jens Axboe392edb42019-12-09 17:52:20 -0700484 struct wait_queue_entry wait;
Jens Axboe221c5eb2019-01-17 09:41:58 -0700485};
486
Pavel Begunkov018043b2020-10-27 23:17:18 +0000487struct io_poll_remove {
488 struct file *file;
489 u64 addr;
490};
491
Jens Axboeb5dba592019-12-11 14:02:38 -0700492struct io_close {
493 struct file *file;
Jens Axboeb5dba592019-12-11 14:02:38 -0700494 int fd;
495};
496
Jens Axboead8a48a2019-11-15 08:49:11 -0700497struct io_timeout_data {
498 struct io_kiocb *req;
499 struct hrtimer timer;
500 struct timespec64 ts;
501 enum hrtimer_mode mode;
502};
503
Jens Axboe8ed8d3c2019-12-16 11:55:28 -0700504struct io_accept {
505 struct file *file;
506 struct sockaddr __user *addr;
507 int __user *addr_len;
508 int flags;
Jens Axboe09952e32020-03-19 20:16:56 -0600509 unsigned long nofile;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -0700510};
511
512struct io_sync {
513 struct file *file;
514 loff_t len;
515 loff_t off;
516 int flags;
Jens Axboed63d1b52019-12-10 10:38:56 -0700517 int mode;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -0700518};
519
Jens Axboefbf23842019-12-17 18:45:56 -0700520struct io_cancel {
521 struct file *file;
522 u64 addr;
523};
524
Jens Axboeb29472e2019-12-17 18:50:29 -0700525struct io_timeout {
526 struct file *file;
Pavel Begunkovbfe68a22020-05-30 14:54:18 +0300527 u32 off;
528 u32 target_seq;
Pavel Begunkov135fcde2020-07-13 23:37:12 +0300529 struct list_head list;
Pavel Begunkov90cd7e42020-10-27 23:25:36 +0000530 /* head of the link, used by linked timeouts only */
531 struct io_kiocb *head;
Jens Axboeb29472e2019-12-17 18:50:29 -0700532};
533
Pavel Begunkov0bdf7a22020-10-10 18:34:10 +0100534struct io_timeout_rem {
535 struct file *file;
536 u64 addr;
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +0000537
538 /* timeout update */
539 struct timespec64 ts;
540 u32 flags;
Pavel Begunkov0bdf7a22020-10-10 18:34:10 +0100541};
542
Jens Axboe9adbd452019-12-20 08:45:55 -0700543struct io_rw {
544 /* NOTE: kiocb has the file as the first member, so don't do it here */
545 struct kiocb kiocb;
546 u64 addr;
547 u64 len;
548};
549
Jens Axboe3fbb51c2019-12-20 08:51:52 -0700550struct io_connect {
551 struct file *file;
552 struct sockaddr __user *addr;
553 int addr_len;
554};
555
Jens Axboee47293f2019-12-20 08:58:21 -0700556struct io_sr_msg {
557 struct file *file;
Jens Axboefddafac2020-01-04 20:19:44 -0700558 union {
Pavel Begunkov270a5942020-07-12 20:41:04 +0300559 struct user_msghdr __user *umsg;
Jens Axboefddafac2020-01-04 20:19:44 -0700560 void __user *buf;
561 };
Jens Axboee47293f2019-12-20 08:58:21 -0700562 int msg_flags;
Jens Axboebcda7ba2020-02-23 16:42:51 -0700563 int bgid;
Jens Axboefddafac2020-01-04 20:19:44 -0700564 size_t len;
Jens Axboebcda7ba2020-02-23 16:42:51 -0700565 struct io_buffer *kbuf;
Jens Axboee47293f2019-12-20 08:58:21 -0700566};
567
Jens Axboe15b71ab2019-12-11 11:20:36 -0700568struct io_open {
569 struct file *file;
570 int dfd;
Jens Axboe15b71ab2019-12-11 11:20:36 -0700571 struct filename *filename;
Jens Axboec12cedf2020-01-08 17:41:21 -0700572 struct open_how how;
Jens Axboe4022e7a2020-03-19 19:23:18 -0600573 unsigned long nofile;
Jens Axboe15b71ab2019-12-11 11:20:36 -0700574};
575
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +0000576struct io_rsrc_update {
Jens Axboe05f3fb32019-12-09 11:22:50 -0700577 struct file *file;
578 u64 arg;
579 u32 nr_args;
580 u32 offset;
581};
582
Jens Axboe4840e412019-12-25 22:03:45 -0700583struct io_fadvise {
584 struct file *file;
585 u64 offset;
586 u32 len;
587 u32 advice;
588};
589
Jens Axboec1ca7572019-12-25 22:18:28 -0700590struct io_madvise {
591 struct file *file;
592 u64 addr;
593 u32 len;
594 u32 advice;
595};
596
Jens Axboe3e4827b2020-01-08 15:18:09 -0700597struct io_epoll {
598 struct file *file;
599 int epfd;
600 int op;
601 int fd;
602 struct epoll_event event;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700603};
604
Pavel Begunkov7d67af22020-02-24 11:32:45 +0300605struct io_splice {
606 struct file *file_out;
607 struct file *file_in;
608 loff_t off_out;
609 loff_t off_in;
610 u64 len;
611 unsigned int flags;
612};
613
Jens Axboeddf0322d2020-02-23 16:41:33 -0700614struct io_provide_buf {
615 struct file *file;
616 __u64 addr;
617 __s32 len;
618 __u32 bgid;
619 __u16 nbufs;
620 __u16 bid;
621};
622
Bijan Mottahedeh1d9e1282020-05-22 21:31:16 -0700623struct io_statx {
624 struct file *file;
625 int dfd;
626 unsigned int mask;
627 unsigned int flags;
Bijan Mottahedehe62753e2020-05-22 21:31:18 -0700628 const char __user *filename;
Bijan Mottahedeh1d9e1282020-05-22 21:31:16 -0700629 struct statx __user *buffer;
630};
631
Jens Axboe36f4fa62020-09-05 11:14:22 -0600632struct io_shutdown {
633 struct file *file;
634 int how;
635};
636
Jens Axboe80a261f2020-09-28 14:23:58 -0600637struct io_rename {
638 struct file *file;
639 int old_dfd;
640 int new_dfd;
641 struct filename *oldpath;
642 struct filename *newpath;
643 int flags;
644};
645
Jens Axboe14a11432020-09-28 14:27:37 -0600646struct io_unlink {
647 struct file *file;
648 int dfd;
649 int flags;
650 struct filename *filename;
651};
652
Pavel Begunkov3ca405e2020-07-13 23:37:08 +0300653struct io_completion {
654 struct file *file;
655 struct list_head list;
Pavel Begunkov8c3f9cd2021-02-28 22:35:15 +0000656 u32 cflags;
Pavel Begunkov3ca405e2020-07-13 23:37:08 +0300657};
658
Jens Axboef499a022019-12-02 16:28:46 -0700659struct io_async_connect {
660 struct sockaddr_storage address;
661};
662
Jens Axboe03b12302019-12-02 18:50:25 -0700663struct io_async_msghdr {
664 struct iovec fast_iov[UIO_FASTIOV];
Pavel Begunkov257e84a2021-02-05 00:58:00 +0000665 /* points to an allocated iov, if NULL we use fast_iov instead */
666 struct iovec *free_iov;
Jens Axboe03b12302019-12-02 18:50:25 -0700667 struct sockaddr __user *uaddr;
668 struct msghdr msg;
Jens Axboeb5379162020-02-09 11:29:15 -0700669 struct sockaddr_storage addr;
Jens Axboe03b12302019-12-02 18:50:25 -0700670};
671
Jens Axboef67676d2019-12-02 11:03:47 -0700672struct io_async_rw {
673 struct iovec fast_iov[UIO_FASTIOV];
Jens Axboeff6165b2020-08-13 09:47:43 -0600674 const struct iovec *free_iovec;
675 struct iov_iter iter;
Jens Axboe227c0c92020-08-13 11:51:40 -0600676 size_t bytes_done;
Jens Axboebcf5a062020-05-22 09:24:42 -0600677 struct wait_page_queue wpq;
Jens Axboef67676d2019-12-02 11:03:47 -0700678};
679
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300680enum {
681 REQ_F_FIXED_FILE_BIT = IOSQE_FIXED_FILE_BIT,
682 REQ_F_IO_DRAIN_BIT = IOSQE_IO_DRAIN_BIT,
683 REQ_F_LINK_BIT = IOSQE_IO_LINK_BIT,
684 REQ_F_HARDLINK_BIT = IOSQE_IO_HARDLINK_BIT,
685 REQ_F_FORCE_ASYNC_BIT = IOSQE_ASYNC_BIT,
Jens Axboebcda7ba2020-02-23 16:42:51 -0700686 REQ_F_BUFFER_SELECT_BIT = IOSQE_BUFFER_SELECT_BIT,
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300687
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300688 REQ_F_FAIL_LINK_BIT,
689 REQ_F_INFLIGHT_BIT,
690 REQ_F_CUR_POS_BIT,
691 REQ_F_NOWAIT_BIT,
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300692 REQ_F_LINK_TIMEOUT_BIT,
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300693 REQ_F_ISREG_BIT,
Pavel Begunkov99bc4c32020-02-07 22:04:45 +0300694 REQ_F_NEED_CLEANUP_BIT,
Jens Axboed7718a92020-02-14 22:23:12 -0700695 REQ_F_POLLED_BIT,
Jens Axboebcda7ba2020-02-23 16:42:51 -0700696 REQ_F_BUFFER_SELECTED_BIT,
Jens Axboe5b0bbee2020-04-27 10:41:22 -0600697 REQ_F_NO_FILE_TABLE_BIT,
Pavel Begunkov900fad42020-10-19 16:39:16 +0100698 REQ_F_LTIMEOUT_ACTIVE_BIT,
Pavel Begunkove342c802021-01-19 13:32:47 +0000699 REQ_F_COMPLETE_INLINE_BIT,
Jens Axboe230d50d2021-04-01 20:41:15 -0600700 REQ_F_REISSUE_BIT,
Jens Axboe84557872020-03-03 15:28:17 -0700701
702 /* not a real bit, just to check we're not overflowing the space */
703 __REQ_F_LAST_BIT,
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300704};
705
706enum {
707 /* ctx owns file */
708 REQ_F_FIXED_FILE = BIT(REQ_F_FIXED_FILE_BIT),
709 /* drain existing IO first */
710 REQ_F_IO_DRAIN = BIT(REQ_F_IO_DRAIN_BIT),
711 /* linked sqes */
712 REQ_F_LINK = BIT(REQ_F_LINK_BIT),
713 /* doesn't sever on completion < 0 */
714 REQ_F_HARDLINK = BIT(REQ_F_HARDLINK_BIT),
715 /* IOSQE_ASYNC */
716 REQ_F_FORCE_ASYNC = BIT(REQ_F_FORCE_ASYNC_BIT),
Jens Axboebcda7ba2020-02-23 16:42:51 -0700717 /* IOSQE_BUFFER_SELECT */
718 REQ_F_BUFFER_SELECT = BIT(REQ_F_BUFFER_SELECT_BIT),
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300719
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300720 /* fail rest of links */
721 REQ_F_FAIL_LINK = BIT(REQ_F_FAIL_LINK_BIT),
Pavel Begunkovb05a1bc2021-03-04 13:59:24 +0000722 /* on inflight list, should be cancelled and waited on exit reliably */
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300723 REQ_F_INFLIGHT = BIT(REQ_F_INFLIGHT_BIT),
724 /* read/write uses file position */
725 REQ_F_CUR_POS = BIT(REQ_F_CUR_POS_BIT),
726 /* must not punt to workers */
727 REQ_F_NOWAIT = BIT(REQ_F_NOWAIT_BIT),
Pavel Begunkov900fad42020-10-19 16:39:16 +0100728 /* has or had linked timeout */
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300729 REQ_F_LINK_TIMEOUT = BIT(REQ_F_LINK_TIMEOUT_BIT),
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300730 /* regular file */
731 REQ_F_ISREG = BIT(REQ_F_ISREG_BIT),
Pavel Begunkov99bc4c32020-02-07 22:04:45 +0300732 /* needs cleanup */
733 REQ_F_NEED_CLEANUP = BIT(REQ_F_NEED_CLEANUP_BIT),
Jens Axboed7718a92020-02-14 22:23:12 -0700734 /* already went through poll handler */
735 REQ_F_POLLED = BIT(REQ_F_POLLED_BIT),
Jens Axboebcda7ba2020-02-23 16:42:51 -0700736 /* buffer already selected */
737 REQ_F_BUFFER_SELECTED = BIT(REQ_F_BUFFER_SELECTED_BIT),
Jens Axboe5b0bbee2020-04-27 10:41:22 -0600738 /* doesn't need file table for this request */
739 REQ_F_NO_FILE_TABLE = BIT(REQ_F_NO_FILE_TABLE_BIT),
Pavel Begunkov900fad42020-10-19 16:39:16 +0100740 /* linked timeout is active, i.e. prepared by link's head */
741 REQ_F_LTIMEOUT_ACTIVE = BIT(REQ_F_LTIMEOUT_ACTIVE_BIT),
Pavel Begunkove342c802021-01-19 13:32:47 +0000742 /* completion is deferred through io_comp_state */
743 REQ_F_COMPLETE_INLINE = BIT(REQ_F_COMPLETE_INLINE_BIT),
Jens Axboe230d50d2021-04-01 20:41:15 -0600744 /* caller should reissue async */
745 REQ_F_REISSUE = BIT(REQ_F_REISSUE_BIT),
Jens Axboed7718a92020-02-14 22:23:12 -0700746};
747
748struct async_poll {
749 struct io_poll_iocb poll;
Jens Axboe807abcb2020-07-17 17:09:27 -0600750 struct io_poll_iocb *double_poll;
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300751};
752
Jens Axboe7cbf1722021-02-10 00:03:20 +0000753struct io_task_work {
754 struct io_wq_work_node node;
755 task_work_func_t func;
756};
757
Jens Axboe09bb8392019-03-13 12:39:28 -0600758/*
759 * NOTE! Each of the iocb union members has the file pointer
760 * as the first entry in their struct definition. So you can
761 * access the file pointer through any of the sub-structs,
762 * or directly as just 'ki_filp' in this struct.
763 */
Jens Axboe2b188cc2019-01-07 10:46:33 -0700764struct io_kiocb {
Jens Axboe221c5eb2019-01-17 09:41:58 -0700765 union {
Jens Axboe09bb8392019-03-13 12:39:28 -0600766 struct file *file;
Jens Axboe9adbd452019-12-20 08:45:55 -0700767 struct io_rw rw;
Jens Axboe221c5eb2019-01-17 09:41:58 -0700768 struct io_poll_iocb poll;
Pavel Begunkov018043b2020-10-27 23:17:18 +0000769 struct io_poll_remove poll_remove;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -0700770 struct io_accept accept;
771 struct io_sync sync;
Jens Axboefbf23842019-12-17 18:45:56 -0700772 struct io_cancel cancel;
Jens Axboeb29472e2019-12-17 18:50:29 -0700773 struct io_timeout timeout;
Pavel Begunkov0bdf7a22020-10-10 18:34:10 +0100774 struct io_timeout_rem timeout_rem;
Jens Axboe3fbb51c2019-12-20 08:51:52 -0700775 struct io_connect connect;
Jens Axboee47293f2019-12-20 08:58:21 -0700776 struct io_sr_msg sr_msg;
Jens Axboe15b71ab2019-12-11 11:20:36 -0700777 struct io_open open;
Jens Axboeb5dba592019-12-11 14:02:38 -0700778 struct io_close close;
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +0000779 struct io_rsrc_update rsrc_update;
Jens Axboe4840e412019-12-25 22:03:45 -0700780 struct io_fadvise fadvise;
Jens Axboec1ca7572019-12-25 22:18:28 -0700781 struct io_madvise madvise;
Jens Axboe3e4827b2020-01-08 15:18:09 -0700782 struct io_epoll epoll;
Pavel Begunkov7d67af22020-02-24 11:32:45 +0300783 struct io_splice splice;
Jens Axboeddf0322d2020-02-23 16:41:33 -0700784 struct io_provide_buf pbuf;
Bijan Mottahedeh1d9e1282020-05-22 21:31:16 -0700785 struct io_statx statx;
Jens Axboe36f4fa62020-09-05 11:14:22 -0600786 struct io_shutdown shutdown;
Jens Axboe80a261f2020-09-28 14:23:58 -0600787 struct io_rename rename;
Jens Axboe14a11432020-09-28 14:27:37 -0600788 struct io_unlink unlink;
Pavel Begunkov3ca405e2020-07-13 23:37:08 +0300789 /* use only after cleaning per-op data, see io_clean_op() */
790 struct io_completion compl;
Jens Axboe221c5eb2019-01-17 09:41:58 -0700791 };
Jens Axboe2b188cc2019-01-07 10:46:33 -0700792
Jens Axboee8c2bc12020-08-15 18:44:09 -0700793 /* opcode allocated if it needs to store data for async defer */
794 void *async_data;
Jens Axboed625c6e2019-12-17 19:53:05 -0700795 u8 opcode;
Xiaoguang Wang65a65432020-06-11 23:39:36 +0800796 /* polled IO has completed */
797 u8 iopoll_completed;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700798
Bijan Mottahedeh4f4eeba2020-05-19 14:52:49 -0700799 u16 buf_index;
Pavel Begunkov9cf7c102020-07-13 23:37:15 +0300800 u32 result;
Bijan Mottahedeh4f4eeba2020-05-19 14:52:49 -0700801
Pavel Begunkov010e8e62020-07-30 18:43:45 +0300802 struct io_ring_ctx *ctx;
803 unsigned int flags;
804 refcount_t refs;
805 struct task_struct *task;
806 u64 user_data;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700807
Pavel Begunkovf2f87372020-10-27 23:25:37 +0000808 struct io_kiocb *link;
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +0000809 struct percpu_ref *fixed_rsrc_refs;
Jens Axboed7718a92020-02-14 22:23:12 -0700810
Pavel Begunkovd21ffe72020-07-13 23:37:10 +0300811 /*
812 * 1. used with ctx->iopoll_list with reads/writes
813 * 2. to track reqs with ->files (see io_op_def::file_table)
814 */
Pavel Begunkov010e8e62020-07-30 18:43:45 +0300815 struct list_head inflight_entry;
Jens Axboe7cbf1722021-02-10 00:03:20 +0000816 union {
817 struct io_task_work io_task_work;
818 struct callback_head task_work;
819 };
Pavel Begunkov010e8e62020-07-30 18:43:45 +0300820 /* for polled requests, i.e. IORING_OP_POLL_ADD and async armed poll */
821 struct hlist_node hash_node;
822 struct async_poll *apoll;
823 struct io_wq_work work;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700824};
825
Pavel Begunkov13bf43f2021-03-06 11:02:12 +0000826struct io_tctx_node {
827 struct list_head ctx_node;
828 struct task_struct *task;
Pavel Begunkov13bf43f2021-03-06 11:02:12 +0000829 struct io_ring_ctx *ctx;
830};
831
Pavel Begunkov27dc8332020-07-13 23:37:14 +0300832struct io_defer_entry {
833 struct list_head list;
834 struct io_kiocb *req;
Pavel Begunkov9cf7c102020-07-13 23:37:15 +0300835 u32 seq;
Pavel Begunkov27dc8332020-07-13 23:37:14 +0300836};
837
Jens Axboed3656342019-12-18 09:50:26 -0700838struct io_op_def {
Jens Axboed3656342019-12-18 09:50:26 -0700839 /* needs req->file assigned */
840 unsigned needs_file : 1;
Jens Axboed3656342019-12-18 09:50:26 -0700841 /* hash wq insertion if file is a regular file */
842 unsigned hash_reg_file : 1;
843 /* unbound wq insertion if file is a non-regular file */
844 unsigned unbound_nonreg_file : 1;
Jens Axboe66f4af92020-01-16 15:36:52 -0700845 /* opcode is not supported by this kernel */
846 unsigned not_supported : 1;
Jens Axboe8a727582020-02-20 09:59:44 -0700847 /* set if opcode supports polled "wait" */
848 unsigned pollin : 1;
849 unsigned pollout : 1;
Jens Axboebcda7ba2020-02-23 16:42:51 -0700850 /* op supports buffer selection */
851 unsigned buffer_select : 1;
Jens Axboee8c2bc12020-08-15 18:44:09 -0700852 /* must always have async data allocated */
853 unsigned needs_async_data : 1;
Jens Axboe27926b62020-10-28 09:33:23 -0600854 /* should block plug */
855 unsigned plug : 1;
Jens Axboee8c2bc12020-08-15 18:44:09 -0700856 /* size of async data needed, if any */
857 unsigned short async_size;
Jens Axboed3656342019-12-18 09:50:26 -0700858};
859
Jens Axboe09186822020-10-13 15:01:40 -0600860static const struct io_op_def io_op_defs[] = {
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300861 [IORING_OP_NOP] = {},
862 [IORING_OP_READV] = {
Jens Axboed3656342019-12-18 09:50:26 -0700863 .needs_file = 1,
864 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700865 .pollin = 1,
Jens Axboe4d954c22020-02-27 07:31:19 -0700866 .buffer_select = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700867 .needs_async_data = 1,
Jens Axboe27926b62020-10-28 09:33:23 -0600868 .plug = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700869 .async_size = sizeof(struct io_async_rw),
Jens Axboed3656342019-12-18 09:50:26 -0700870 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300871 [IORING_OP_WRITEV] = {
Jens Axboed3656342019-12-18 09:50:26 -0700872 .needs_file = 1,
873 .hash_reg_file = 1,
874 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700875 .pollout = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700876 .needs_async_data = 1,
Jens Axboe27926b62020-10-28 09:33:23 -0600877 .plug = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700878 .async_size = sizeof(struct io_async_rw),
Jens Axboed3656342019-12-18 09:50:26 -0700879 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300880 [IORING_OP_FSYNC] = {
Jens Axboed3656342019-12-18 09:50:26 -0700881 .needs_file = 1,
882 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300883 [IORING_OP_READ_FIXED] = {
Jens Axboed3656342019-12-18 09:50:26 -0700884 .needs_file = 1,
885 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700886 .pollin = 1,
Jens Axboe27926b62020-10-28 09:33:23 -0600887 .plug = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700888 .async_size = sizeof(struct io_async_rw),
Jens Axboed3656342019-12-18 09:50:26 -0700889 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300890 [IORING_OP_WRITE_FIXED] = {
Jens Axboed3656342019-12-18 09:50:26 -0700891 .needs_file = 1,
892 .hash_reg_file = 1,
893 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700894 .pollout = 1,
Jens Axboe27926b62020-10-28 09:33:23 -0600895 .plug = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700896 .async_size = sizeof(struct io_async_rw),
Jens Axboed3656342019-12-18 09:50:26 -0700897 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300898 [IORING_OP_POLL_ADD] = {
Jens Axboed3656342019-12-18 09:50:26 -0700899 .needs_file = 1,
900 .unbound_nonreg_file = 1,
901 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300902 [IORING_OP_POLL_REMOVE] = {},
903 [IORING_OP_SYNC_FILE_RANGE] = {
Jens Axboed3656342019-12-18 09:50:26 -0700904 .needs_file = 1,
905 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300906 [IORING_OP_SENDMSG] = {
Jens Axboed3656342019-12-18 09:50:26 -0700907 .needs_file = 1,
908 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700909 .pollout = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700910 .needs_async_data = 1,
911 .async_size = sizeof(struct io_async_msghdr),
Jens Axboed3656342019-12-18 09:50:26 -0700912 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300913 [IORING_OP_RECVMSG] = {
Jens Axboed3656342019-12-18 09:50:26 -0700914 .needs_file = 1,
915 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700916 .pollin = 1,
Jens Axboe52de1fe2020-02-27 10:15:42 -0700917 .buffer_select = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700918 .needs_async_data = 1,
919 .async_size = sizeof(struct io_async_msghdr),
Jens Axboed3656342019-12-18 09:50:26 -0700920 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300921 [IORING_OP_TIMEOUT] = {
Jens Axboee8c2bc12020-08-15 18:44:09 -0700922 .needs_async_data = 1,
923 .async_size = sizeof(struct io_timeout_data),
Jens Axboed3656342019-12-18 09:50:26 -0700924 },
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +0000925 [IORING_OP_TIMEOUT_REMOVE] = {
926 /* used by timeout updates' prep() */
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +0000927 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300928 [IORING_OP_ACCEPT] = {
Jens Axboed3656342019-12-18 09:50:26 -0700929 .needs_file = 1,
930 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700931 .pollin = 1,
Jens Axboed3656342019-12-18 09:50:26 -0700932 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300933 [IORING_OP_ASYNC_CANCEL] = {},
934 [IORING_OP_LINK_TIMEOUT] = {
Jens Axboee8c2bc12020-08-15 18:44:09 -0700935 .needs_async_data = 1,
936 .async_size = sizeof(struct io_timeout_data),
Jens Axboed3656342019-12-18 09:50:26 -0700937 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300938 [IORING_OP_CONNECT] = {
Jens Axboed3656342019-12-18 09:50:26 -0700939 .needs_file = 1,
940 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700941 .pollout = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700942 .needs_async_data = 1,
943 .async_size = sizeof(struct io_async_connect),
Jens Axboed3656342019-12-18 09:50:26 -0700944 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300945 [IORING_OP_FALLOCATE] = {
Jens Axboed3656342019-12-18 09:50:26 -0700946 .needs_file = 1,
947 },
Jens Axboe44526be2021-02-15 13:32:18 -0700948 [IORING_OP_OPENAT] = {},
949 [IORING_OP_CLOSE] = {},
950 [IORING_OP_FILES_UPDATE] = {},
951 [IORING_OP_STATX] = {},
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300952 [IORING_OP_READ] = {
Jens Axboe3a6820f2019-12-22 15:19:35 -0700953 .needs_file = 1,
954 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700955 .pollin = 1,
Jens Axboebcda7ba2020-02-23 16:42:51 -0700956 .buffer_select = 1,
Jens Axboe27926b62020-10-28 09:33:23 -0600957 .plug = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700958 .async_size = sizeof(struct io_async_rw),
Jens Axboe3a6820f2019-12-22 15:19:35 -0700959 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300960 [IORING_OP_WRITE] = {
Jens Axboe3a6820f2019-12-22 15:19:35 -0700961 .needs_file = 1,
962 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700963 .pollout = 1,
Jens Axboe27926b62020-10-28 09:33:23 -0600964 .plug = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700965 .async_size = sizeof(struct io_async_rw),
Jens Axboe3a6820f2019-12-22 15:19:35 -0700966 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300967 [IORING_OP_FADVISE] = {
Jens Axboe4840e412019-12-25 22:03:45 -0700968 .needs_file = 1,
969 },
Jens Axboe44526be2021-02-15 13:32:18 -0700970 [IORING_OP_MADVISE] = {},
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300971 [IORING_OP_SEND] = {
Jens Axboefddafac2020-01-04 20:19:44 -0700972 .needs_file = 1,
973 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700974 .pollout = 1,
Jens Axboefddafac2020-01-04 20:19:44 -0700975 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300976 [IORING_OP_RECV] = {
Jens Axboefddafac2020-01-04 20:19:44 -0700977 .needs_file = 1,
978 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700979 .pollin = 1,
Jens Axboebcda7ba2020-02-23 16:42:51 -0700980 .buffer_select = 1,
Jens Axboefddafac2020-01-04 20:19:44 -0700981 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300982 [IORING_OP_OPENAT2] = {
Jens Axboecebdb982020-01-08 17:59:24 -0700983 },
Jens Axboe3e4827b2020-01-08 15:18:09 -0700984 [IORING_OP_EPOLL_CTL] = {
985 .unbound_nonreg_file = 1,
Jens Axboe3e4827b2020-01-08 15:18:09 -0700986 },
Pavel Begunkov7d67af22020-02-24 11:32:45 +0300987 [IORING_OP_SPLICE] = {
988 .needs_file = 1,
989 .hash_reg_file = 1,
990 .unbound_nonreg_file = 1,
Jens Axboeddf0322d2020-02-23 16:41:33 -0700991 },
992 [IORING_OP_PROVIDE_BUFFERS] = {},
Jens Axboe067524e2020-03-02 16:32:28 -0700993 [IORING_OP_REMOVE_BUFFERS] = {},
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +0300994 [IORING_OP_TEE] = {
995 .needs_file = 1,
996 .hash_reg_file = 1,
997 .unbound_nonreg_file = 1,
998 },
Jens Axboe36f4fa62020-09-05 11:14:22 -0600999 [IORING_OP_SHUTDOWN] = {
1000 .needs_file = 1,
1001 },
Jens Axboe44526be2021-02-15 13:32:18 -07001002 [IORING_OP_RENAMEAT] = {},
1003 [IORING_OP_UNLINKAT] = {},
Jens Axboed3656342019-12-18 09:50:26 -07001004};
1005
Pavel Begunkov7a612352021-03-09 00:37:59 +00001006static bool io_disarm_next(struct io_kiocb *req);
Pavel Begunkovd56d9382021-03-06 11:02:13 +00001007static void io_uring_del_task_file(unsigned long index);
Pavel Begunkov9936c7c2021-02-04 13:51:56 +00001008static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
1009 struct task_struct *task,
1010 struct files_struct *files);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07001011static void io_uring_cancel_sqpoll(struct io_ring_ctx *ctx);
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00001012static void destroy_fixed_rsrc_ref_node(struct fixed_rsrc_ref_node *ref_node);
Pavel Begunkovbc9744c2021-01-15 17:37:49 +00001013static struct fixed_rsrc_ref_node *alloc_fixed_rsrc_ref_node(
Pavel Begunkov1ffc5422020-12-30 21:34:15 +00001014 struct io_ring_ctx *ctx);
Pavel Begunkovf2303b12021-02-20 18:03:49 +00001015static void io_ring_file_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc);
Pavel Begunkov1ffc5422020-12-30 21:34:15 +00001016
Pavel Begunkov23faba32021-02-11 18:28:22 +00001017static bool io_rw_reissue(struct io_kiocb *req);
Jens Axboe78e19bb2019-11-06 15:21:34 -07001018static void io_cqring_fill_event(struct io_kiocb *req, long res);
Jackie Liuec9c02a2019-11-08 23:50:36 +08001019static void io_put_req(struct io_kiocb *req);
Pavel Begunkov216578e2020-10-13 09:44:00 +01001020static void io_put_req_deferred(struct io_kiocb *req, int nr);
Jens Axboec40f6372020-06-25 15:39:59 -06001021static void io_double_put_req(struct io_kiocb *req);
Jens Axboec7dae4b2021-02-09 19:53:37 -07001022static void io_dismantle_req(struct io_kiocb *req);
1023static void io_put_task(struct task_struct *task, int nr);
1024static void io_queue_next(struct io_kiocb *req);
Jens Axboe94ae5e72019-11-14 19:39:52 -07001025static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req);
Jens Axboe7271ef32020-08-10 09:55:22 -06001026static void __io_queue_linked_timeout(struct io_kiocb *req);
Jens Axboe94ae5e72019-11-14 19:39:52 -07001027static void io_queue_linked_timeout(struct io_kiocb *req);
Jens Axboe05f3fb32019-12-09 11:22:50 -07001028static int __io_sqe_files_update(struct io_ring_ctx *ctx,
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00001029 struct io_uring_rsrc_update *ip,
Jens Axboe05f3fb32019-12-09 11:22:50 -07001030 unsigned nr_args);
Pavel Begunkov3ca405e2020-07-13 23:37:08 +03001031static void __io_clean_op(struct io_kiocb *req);
Pavel Begunkov8371adf2020-10-10 18:34:08 +01001032static struct file *io_file_get(struct io_submit_state *state,
1033 struct io_kiocb *req, int fd, bool fixed);
Pavel Begunkovc5eef2b2021-02-10 00:03:22 +00001034static void __io_queue_sqe(struct io_kiocb *req);
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00001035static void io_rsrc_put_work(struct work_struct *work);
Jens Axboede0617e2019-04-06 21:51:27 -06001036
Pavel Begunkov847595d2021-02-04 13:52:06 +00001037static int io_import_iovec(int rw, struct io_kiocb *req, struct iovec **iovec,
1038 struct iov_iter *iter, bool needs_lock);
Jens Axboeff6165b2020-08-13 09:47:43 -06001039static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec,
1040 const struct iovec *fast_iov,
Jens Axboe227c0c92020-08-13 11:51:40 -06001041 struct iov_iter *iter, bool force);
Pavel Begunkov907d1df2021-01-26 23:35:10 +00001042static void io_req_task_queue(struct io_kiocb *req);
Jens Axboe65453d12021-02-10 00:03:21 +00001043static void io_submit_flush_completions(struct io_comp_state *cs,
1044 struct io_ring_ctx *ctx);
Jens Axboe9a56a232019-01-09 09:06:50 -07001045
Jens Axboe2b188cc2019-01-07 10:46:33 -07001046static struct kmem_cache *req_cachep;
1047
Jens Axboe09186822020-10-13 15:01:40 -06001048static const struct file_operations io_uring_fops;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001049
1050struct sock *io_uring_get_socket(struct file *file)
1051{
1052#if defined(CONFIG_UNIX)
1053 if (file->f_op == &io_uring_fops) {
1054 struct io_ring_ctx *ctx = file->private_data;
1055
1056 return ctx->ring_sock->sk;
1057 }
1058#endif
1059 return NULL;
1060}
1061EXPORT_SYMBOL(io_uring_get_socket);
1062
Pavel Begunkovf2f87372020-10-27 23:25:37 +00001063#define io_for_each_link(pos, head) \
1064 for (pos = (head); pos; pos = pos->link)
1065
Pavel Begunkov3ca405e2020-07-13 23:37:08 +03001066static inline void io_clean_op(struct io_kiocb *req)
1067{
Pavel Begunkov9d5c8192021-01-24 15:08:14 +00001068 if (req->flags & (REQ_F_NEED_CLEANUP | REQ_F_BUFFER_SELECTED))
Pavel Begunkov3ca405e2020-07-13 23:37:08 +03001069 __io_clean_op(req);
1070}
1071
Pavel Begunkov36f72fe2020-11-18 19:57:26 +00001072static inline void io_set_resource_node(struct io_kiocb *req)
Jens Axboec40f6372020-06-25 15:39:59 -06001073{
Pavel Begunkov36f72fe2020-11-18 19:57:26 +00001074 struct io_ring_ctx *ctx = req->ctx;
1075
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00001076 if (!req->fixed_rsrc_refs) {
1077 req->fixed_rsrc_refs = &ctx->file_data->node->refs;
1078 percpu_ref_get(req->fixed_rsrc_refs);
Pavel Begunkov36f72fe2020-11-18 19:57:26 +00001079 }
1080}
1081
Pavel Begunkov08d23632020-11-06 13:00:22 +00001082static bool io_match_task(struct io_kiocb *head,
1083 struct task_struct *task,
1084 struct files_struct *files)
1085{
1086 struct io_kiocb *req;
1087
Jens Axboe84965ff2021-01-23 15:51:11 -07001088 if (task && head->task != task) {
1089 /* in terms of cancelation, always match if req task is dead */
1090 if (head->task->flags & PF_EXITING)
1091 return true;
Pavel Begunkov08d23632020-11-06 13:00:22 +00001092 return false;
Jens Axboe84965ff2021-01-23 15:51:11 -07001093 }
Pavel Begunkov08d23632020-11-06 13:00:22 +00001094 if (!files)
1095 return true;
1096
1097 io_for_each_link(req, head) {
Pavel Begunkovb05a1bc2021-03-04 13:59:24 +00001098 if (req->flags & REQ_F_INFLIGHT)
Jens Axboe02a13672021-01-23 15:49:31 -07001099 return true;
Pavel Begunkov08d23632020-11-06 13:00:22 +00001100 }
1101 return false;
1102}
1103
Jens Axboec40f6372020-06-25 15:39:59 -06001104static inline void req_set_fail_links(struct io_kiocb *req)
1105{
1106 if ((req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) == REQ_F_LINK)
1107 req->flags |= REQ_F_FAIL_LINK;
1108}
Jens Axboe4a38aed22020-05-14 17:21:15 -06001109
Jens Axboe2b188cc2019-01-07 10:46:33 -07001110static void io_ring_ctx_ref_free(struct percpu_ref *ref)
1111{
1112 struct io_ring_ctx *ctx = container_of(ref, struct io_ring_ctx, refs);
1113
Jens Axboe0f158b42020-05-14 17:18:39 -06001114 complete(&ctx->ref_comp);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001115}
1116
Pavel Begunkov8eb7e2d2020-06-29 13:13:02 +03001117static inline bool io_is_timeout_noseq(struct io_kiocb *req)
1118{
1119 return !req->timeout.off;
1120}
1121
Jens Axboe2b188cc2019-01-07 10:46:33 -07001122static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
1123{
1124 struct io_ring_ctx *ctx;
Jens Axboe78076bb2019-12-04 19:56:40 -07001125 int hash_bits;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001126
1127 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
1128 if (!ctx)
1129 return NULL;
1130
Jens Axboe78076bb2019-12-04 19:56:40 -07001131 /*
1132 * Use 5 bits less than the max cq entries, that should give us around
1133 * 32 entries per hash list if totally full and uniformly spread.
1134 */
1135 hash_bits = ilog2(p->cq_entries);
1136 hash_bits -= 5;
1137 if (hash_bits <= 0)
1138 hash_bits = 1;
1139 ctx->cancel_hash_bits = hash_bits;
1140 ctx->cancel_hash = kmalloc((1U << hash_bits) * sizeof(struct hlist_head),
1141 GFP_KERNEL);
1142 if (!ctx->cancel_hash)
1143 goto err;
1144 __hash_init(ctx->cancel_hash, 1U << hash_bits);
1145
Roman Gushchin21482892019-05-07 10:01:48 -07001146 if (percpu_ref_init(&ctx->refs, io_ring_ctx_ref_free,
Jens Axboe206aefd2019-11-07 18:27:42 -07001147 PERCPU_REF_ALLOW_REINIT, GFP_KERNEL))
1148 goto err;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001149
1150 ctx->flags = p->flags;
Jens Axboe90554202020-09-03 12:12:41 -06001151 init_waitqueue_head(&ctx->sqo_sq_wait);
Jens Axboe69fb2132020-09-14 11:16:23 -06001152 INIT_LIST_HEAD(&ctx->sqd_list);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001153 init_waitqueue_head(&ctx->cq_wait);
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001154 INIT_LIST_HEAD(&ctx->cq_overflow_list);
Jens Axboe0f158b42020-05-14 17:18:39 -06001155 init_completion(&ctx->ref_comp);
Jens Axboe9e15c3a2021-03-13 12:29:43 -07001156 xa_init_flags(&ctx->io_buffers, XA_FLAGS_ALLOC1);
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +00001157 xa_init_flags(&ctx->personalities, XA_FLAGS_ALLOC1);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001158 mutex_init(&ctx->uring_lock);
1159 init_waitqueue_head(&ctx->wait);
1160 spin_lock_init(&ctx->completion_lock);
Pavel Begunkov540e32a2020-07-13 23:37:09 +03001161 INIT_LIST_HEAD(&ctx->iopoll_list);
Jens Axboede0617e2019-04-06 21:51:27 -06001162 INIT_LIST_HEAD(&ctx->defer_list);
Jens Axboe5262f562019-09-17 12:26:57 -06001163 INIT_LIST_HEAD(&ctx->timeout_list);
Jens Axboefcb323c2019-10-24 12:39:47 -06001164 spin_lock_init(&ctx->inflight_lock);
1165 INIT_LIST_HEAD(&ctx->inflight_list);
Bijan Mottahedehd67d2262021-01-15 17:37:46 +00001166 spin_lock_init(&ctx->rsrc_ref_lock);
1167 INIT_LIST_HEAD(&ctx->rsrc_ref_list);
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00001168 INIT_DELAYED_WORK(&ctx->rsrc_put_work, io_rsrc_put_work);
1169 init_llist_head(&ctx->rsrc_put_llist);
Pavel Begunkov13bf43f2021-03-06 11:02:12 +00001170 INIT_LIST_HEAD(&ctx->tctx_list);
Jens Axboe1b4c3512021-02-10 00:03:19 +00001171 INIT_LIST_HEAD(&ctx->submit_state.comp.free_list);
Jens Axboec7dae4b2021-02-09 19:53:37 -07001172 INIT_LIST_HEAD(&ctx->submit_state.comp.locked_free_list);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001173 return ctx;
Jens Axboe206aefd2019-11-07 18:27:42 -07001174err:
Jens Axboe78076bb2019-12-04 19:56:40 -07001175 kfree(ctx->cancel_hash);
Jens Axboe206aefd2019-11-07 18:27:42 -07001176 kfree(ctx);
1177 return NULL;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001178}
1179
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03001180static bool req_need_defer(struct io_kiocb *req, u32 seq)
Jens Axboede0617e2019-04-06 21:51:27 -06001181{
Jens Axboe2bc99302020-07-09 09:43:27 -06001182 if (unlikely(req->flags & REQ_F_IO_DRAIN)) {
1183 struct io_ring_ctx *ctx = req->ctx;
Jackie Liua197f662019-11-08 08:09:12 -07001184
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03001185 return seq != ctx->cached_cq_tail
Pavel Begunkov2c3bac6d2020-10-18 10:17:40 +01001186 + READ_ONCE(ctx->cached_cq_overflow);
Jens Axboe2bc99302020-07-09 09:43:27 -06001187 }
Jens Axboe7adf4ea2019-10-10 21:42:58 -06001188
Bob Liu9d858b22019-11-13 18:06:25 +08001189 return false;
Jens Axboe7adf4ea2019-10-10 21:42:58 -06001190}
1191
Pavel Begunkovce3d5aa2021-02-01 18:59:55 +00001192static void io_req_track_inflight(struct io_kiocb *req)
1193{
1194 struct io_ring_ctx *ctx = req->ctx;
1195
1196 if (!(req->flags & REQ_F_INFLIGHT)) {
Pavel Begunkovce3d5aa2021-02-01 18:59:55 +00001197 req->flags |= REQ_F_INFLIGHT;
1198
1199 spin_lock_irq(&ctx->inflight_lock);
1200 list_add(&req->inflight_entry, &ctx->inflight_list);
1201 spin_unlock_irq(&ctx->inflight_lock);
1202 }
1203}
1204
Pavel Begunkovcbdcb432020-06-29 19:18:43 +03001205static void io_prep_async_work(struct io_kiocb *req)
Jens Axboe561fb042019-10-24 07:25:42 -06001206{
Jens Axboed3656342019-12-18 09:50:26 -07001207 const struct io_op_def *def = &io_op_defs[req->opcode];
Pavel Begunkov23329512020-10-10 18:34:06 +01001208 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe54a91f32019-09-10 09:15:04 -06001209
Jens Axboe003e8dc2021-03-06 09:22:27 -07001210 if (!req->work.creds)
1211 req->work.creds = get_current_cred();
1212
Pavel Begunkovfeaadc42020-10-22 16:47:16 +01001213 if (req->flags & REQ_F_FORCE_ASYNC)
1214 req->work.flags |= IO_WQ_WORK_CONCURRENT;
1215
Jens Axboed3656342019-12-18 09:50:26 -07001216 if (req->flags & REQ_F_ISREG) {
Pavel Begunkov23329512020-10-10 18:34:06 +01001217 if (def->hash_reg_file || (ctx->flags & IORING_SETUP_IOPOLL))
Pavel Begunkov8766dd52020-03-14 00:31:04 +03001218 io_wq_hash_work(&req->work, file_inode(req->file));
Jens Axboe4b982bd2021-04-01 08:38:34 -06001219 } else if (!req->file || !S_ISBLK(file_inode(req->file)->i_mode)) {
Jens Axboed3656342019-12-18 09:50:26 -07001220 if (def->unbound_nonreg_file)
Jens Axboe3529d8c2019-12-19 18:24:38 -07001221 req->work.flags |= IO_WQ_WORK_UNBOUND;
Jens Axboe54a91f32019-09-10 09:15:04 -06001222 }
Jens Axboe561fb042019-10-24 07:25:42 -06001223}
1224
Pavel Begunkovcbdcb432020-06-29 19:18:43 +03001225static void io_prep_async_link(struct io_kiocb *req)
1226{
1227 struct io_kiocb *cur;
1228
Pavel Begunkovf2f87372020-10-27 23:25:37 +00001229 io_for_each_link(cur, req)
1230 io_prep_async_work(cur);
Pavel Begunkovcbdcb432020-06-29 19:18:43 +03001231}
1232
Pavel Begunkovebf93662021-03-01 18:20:47 +00001233static void io_queue_async_work(struct io_kiocb *req)
Jens Axboe561fb042019-10-24 07:25:42 -06001234{
Jackie Liua197f662019-11-08 08:09:12 -07001235 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkovcbdcb432020-06-29 19:18:43 +03001236 struct io_kiocb *link = io_prep_linked_timeout(req);
Jens Axboe5aa75ed2021-02-16 12:56:50 -07001237 struct io_uring_task *tctx = req->task->io_uring;
Jens Axboe561fb042019-10-24 07:25:42 -06001238
Jens Axboe3bfe6102021-02-16 14:15:30 -07001239 BUG_ON(!tctx);
1240 BUG_ON(!tctx->io_wq);
Jens Axboe561fb042019-10-24 07:25:42 -06001241
Pavel Begunkovcbdcb432020-06-29 19:18:43 +03001242 /* init ->work of the whole link before punting */
1243 io_prep_async_link(req);
Pavel Begunkovd07f1e8a2021-03-22 01:45:58 +00001244 trace_io_uring_queue_async_work(ctx, io_wq_is_hashed(&req->work), req,
1245 &req->work, req->flags);
Pavel Begunkovebf93662021-03-01 18:20:47 +00001246 io_wq_enqueue(tctx->io_wq, &req->work);
Jens Axboe7271ef32020-08-10 09:55:22 -06001247 if (link)
1248 io_queue_linked_timeout(link);
Pavel Begunkovcbdcb432020-06-29 19:18:43 +03001249}
1250
Pavel Begunkov1ee41602021-03-25 18:32:42 +00001251static void io_kill_timeout(struct io_kiocb *req, int status)
Jens Axboe5262f562019-09-17 12:26:57 -06001252{
Jens Axboee8c2bc12020-08-15 18:44:09 -07001253 struct io_timeout_data *io = req->async_data;
Jens Axboe5262f562019-09-17 12:26:57 -06001254 int ret;
1255
Jens Axboee8c2bc12020-08-15 18:44:09 -07001256 ret = hrtimer_try_to_cancel(&io->timer);
Jens Axboe5262f562019-09-17 12:26:57 -06001257 if (ret != -1) {
Pavel Begunkov01cec8c2020-07-30 18:43:50 +03001258 atomic_set(&req->ctx->cq_timeouts,
1259 atomic_read(&req->ctx->cq_timeouts) + 1);
Pavel Begunkov135fcde2020-07-13 23:37:12 +03001260 list_del_init(&req->timeout.list);
Pavel Begunkov1ee41602021-03-25 18:32:42 +00001261 io_cqring_fill_event(req, status);
Pavel Begunkov216578e2020-10-13 09:44:00 +01001262 io_put_req_deferred(req, 1);
Jens Axboe5262f562019-09-17 12:26:57 -06001263 }
1264}
1265
Pavel Begunkov04518942020-05-26 20:34:05 +03001266static void __io_queue_deferred(struct io_ring_ctx *ctx)
1267{
1268 do {
Pavel Begunkov27dc8332020-07-13 23:37:14 +03001269 struct io_defer_entry *de = list_first_entry(&ctx->defer_list,
1270 struct io_defer_entry, list);
Pavel Begunkov04518942020-05-26 20:34:05 +03001271
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03001272 if (req_need_defer(de->req, de->seq))
Pavel Begunkov04518942020-05-26 20:34:05 +03001273 break;
Pavel Begunkov27dc8332020-07-13 23:37:14 +03001274 list_del_init(&de->list);
Pavel Begunkov907d1df2021-01-26 23:35:10 +00001275 io_req_task_queue(de->req);
Pavel Begunkov27dc8332020-07-13 23:37:14 +03001276 kfree(de);
Pavel Begunkov04518942020-05-26 20:34:05 +03001277 } while (!list_empty(&ctx->defer_list));
1278}
1279
Pavel Begunkov360428f2020-05-30 14:54:17 +03001280static void io_flush_timeouts(struct io_ring_ctx *ctx)
1281{
Marcelo Diop-Gonzalezf0105052021-01-15 11:54:40 -05001282 u32 seq;
1283
1284 if (list_empty(&ctx->timeout_list))
1285 return;
1286
1287 seq = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
1288
1289 do {
1290 u32 events_needed, events_got;
Pavel Begunkov360428f2020-05-30 14:54:17 +03001291 struct io_kiocb *req = list_first_entry(&ctx->timeout_list,
Pavel Begunkov135fcde2020-07-13 23:37:12 +03001292 struct io_kiocb, timeout.list);
Pavel Begunkov360428f2020-05-30 14:54:17 +03001293
Pavel Begunkov8eb7e2d2020-06-29 13:13:02 +03001294 if (io_is_timeout_noseq(req))
Pavel Begunkov360428f2020-05-30 14:54:17 +03001295 break;
Marcelo Diop-Gonzalezf0105052021-01-15 11:54:40 -05001296
1297 /*
1298 * Since seq can easily wrap around over time, subtract
1299 * the last seq at which timeouts were flushed before comparing.
1300 * Assuming not more than 2^31-1 events have happened since,
1301 * these subtractions won't have wrapped, so we can check if
1302 * target is in [last_seq, current_seq] by comparing the two.
1303 */
1304 events_needed = req->timeout.target_seq - ctx->cq_last_tm_flush;
1305 events_got = seq - ctx->cq_last_tm_flush;
1306 if (events_got < events_needed)
Pavel Begunkov360428f2020-05-30 14:54:17 +03001307 break;
Pavel Begunkovbfe68a22020-05-30 14:54:18 +03001308
Pavel Begunkov135fcde2020-07-13 23:37:12 +03001309 list_del_init(&req->timeout.list);
Pavel Begunkov1ee41602021-03-25 18:32:42 +00001310 io_kill_timeout(req, 0);
Marcelo Diop-Gonzalezf0105052021-01-15 11:54:40 -05001311 } while (!list_empty(&ctx->timeout_list));
1312
1313 ctx->cq_last_tm_flush = seq;
Pavel Begunkov360428f2020-05-30 14:54:17 +03001314}
1315
Jens Axboede0617e2019-04-06 21:51:27 -06001316static void io_commit_cqring(struct io_ring_ctx *ctx)
1317{
Pavel Begunkov360428f2020-05-30 14:54:17 +03001318 io_flush_timeouts(ctx);
Pavel Begunkovec30e042021-01-19 13:32:38 +00001319
1320 /* order cqe stores with ring update */
1321 smp_store_release(&ctx->rings->cq.tail, ctx->cached_cq_tail);
Jens Axboede0617e2019-04-06 21:51:27 -06001322
Pavel Begunkov04518942020-05-26 20:34:05 +03001323 if (unlikely(!list_empty(&ctx->defer_list)))
1324 __io_queue_deferred(ctx);
Jens Axboede0617e2019-04-06 21:51:27 -06001325}
1326
Jens Axboe90554202020-09-03 12:12:41 -06001327static inline bool io_sqring_full(struct io_ring_ctx *ctx)
1328{
1329 struct io_rings *r = ctx->rings;
1330
1331 return READ_ONCE(r->sq.tail) - ctx->cached_sq_head == r->sq_ring_entries;
1332}
1333
Pavel Begunkov888aae22021-01-19 13:32:39 +00001334static inline unsigned int __io_cqring_events(struct io_ring_ctx *ctx)
1335{
1336 return ctx->cached_cq_tail - READ_ONCE(ctx->rings->cq.head);
1337}
1338
Jens Axboe2b188cc2019-01-07 10:46:33 -07001339static struct io_uring_cqe *io_get_cqring(struct io_ring_ctx *ctx)
1340{
Hristo Venev75b28af2019-08-26 17:23:46 +00001341 struct io_rings *rings = ctx->rings;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001342 unsigned tail;
1343
Stefan Bühler115e12e2019-04-24 23:54:18 +02001344 /*
1345 * writes to the cq entry need to come after reading head; the
1346 * control dependency is enough as we're using WRITE_ONCE to
1347 * fill the cq entry
1348 */
Pavel Begunkov888aae22021-01-19 13:32:39 +00001349 if (__io_cqring_events(ctx) == rings->cq_ring_entries)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001350 return NULL;
1351
Pavel Begunkov888aae22021-01-19 13:32:39 +00001352 tail = ctx->cached_cq_tail++;
Hristo Venev75b28af2019-08-26 17:23:46 +00001353 return &rings->cqes[tail & ctx->cq_mask];
Jens Axboe2b188cc2019-01-07 10:46:33 -07001354}
1355
Jens Axboef2842ab2020-01-08 11:04:00 -07001356static inline bool io_should_trigger_evfd(struct io_ring_ctx *ctx)
1357{
Jens Axboef0b493e2020-02-01 21:30:11 -07001358 if (!ctx->cq_ev_fd)
1359 return false;
Stefano Garzarella7e55a192020-05-15 18:38:05 +02001360 if (READ_ONCE(ctx->rings->cq_flags) & IORING_CQ_EVENTFD_DISABLED)
1361 return false;
Jens Axboef2842ab2020-01-08 11:04:00 -07001362 if (!ctx->eventfd_async)
1363 return true;
Jens Axboeb41e9852020-02-17 09:52:41 -07001364 return io_wq_current_is_worker();
Jens Axboef2842ab2020-01-08 11:04:00 -07001365}
1366
Jens Axboeb41e9852020-02-17 09:52:41 -07001367static void io_cqring_ev_posted(struct io_ring_ctx *ctx)
Jens Axboe8c838782019-03-12 15:48:16 -06001368{
Pavel Begunkovb1445e52021-01-07 03:15:43 +00001369 /* see waitqueue_active() comment */
1370 smp_mb();
1371
Jens Axboe8c838782019-03-12 15:48:16 -06001372 if (waitqueue_active(&ctx->wait))
1373 wake_up(&ctx->wait);
Jens Axboe534ca6d2020-09-02 13:52:19 -06001374 if (ctx->sq_data && waitqueue_active(&ctx->sq_data->wait))
1375 wake_up(&ctx->sq_data->wait);
Jens Axboeb41e9852020-02-17 09:52:41 -07001376 if (io_should_trigger_evfd(ctx))
Jens Axboe9b402842019-04-11 11:45:41 -06001377 eventfd_signal(ctx->cq_ev_fd, 1);
Pavel Begunkovb1445e52021-01-07 03:15:43 +00001378 if (waitqueue_active(&ctx->cq_wait)) {
Pavel Begunkov4aa84f22021-01-07 03:15:42 +00001379 wake_up_interruptible(&ctx->cq_wait);
1380 kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN);
1381 }
Jens Axboe8c838782019-03-12 15:48:16 -06001382}
1383
Pavel Begunkov80c18e42021-01-07 03:15:41 +00001384static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx)
1385{
Pavel Begunkovb1445e52021-01-07 03:15:43 +00001386 /* see waitqueue_active() comment */
1387 smp_mb();
1388
Pavel Begunkov80c18e42021-01-07 03:15:41 +00001389 if (ctx->flags & IORING_SETUP_SQPOLL) {
1390 if (waitqueue_active(&ctx->wait))
1391 wake_up(&ctx->wait);
1392 }
1393 if (io_should_trigger_evfd(ctx))
1394 eventfd_signal(ctx->cq_ev_fd, 1);
Pavel Begunkovb1445e52021-01-07 03:15:43 +00001395 if (waitqueue_active(&ctx->cq_wait)) {
Pavel Begunkov4aa84f22021-01-07 03:15:42 +00001396 wake_up_interruptible(&ctx->cq_wait);
1397 kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN);
1398 }
Pavel Begunkov80c18e42021-01-07 03:15:41 +00001399}
1400
Jens Axboec4a2ed72019-11-21 21:01:26 -07001401/* Returns true if there are no backlogged entries after the flush */
Pavel Begunkov6c503152021-01-04 20:36:36 +00001402static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force,
1403 struct task_struct *tsk,
1404 struct files_struct *files)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001405{
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001406 struct io_rings *rings = ctx->rings;
Jens Axboee6c8aa92020-09-28 13:10:13 -06001407 struct io_kiocb *req, *tmp;
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001408 struct io_uring_cqe *cqe;
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001409 unsigned long flags;
Jens Axboeb18032b2021-01-24 16:58:56 -07001410 bool all_flushed, posted;
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001411 LIST_HEAD(list);
1412
Pavel Begunkove23de152020-12-17 00:24:37 +00001413 if (!force && __io_cqring_events(ctx) == rings->cq_ring_entries)
1414 return false;
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001415
Jens Axboeb18032b2021-01-24 16:58:56 -07001416 posted = false;
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001417 spin_lock_irqsave(&ctx->completion_lock, flags);
Jens Axboee6c8aa92020-09-28 13:10:13 -06001418 list_for_each_entry_safe(req, tmp, &ctx->cq_overflow_list, compl.list) {
Pavel Begunkov08d23632020-11-06 13:00:22 +00001419 if (!io_match_task(req, tsk, files))
Jens Axboee6c8aa92020-09-28 13:10:13 -06001420 continue;
1421
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001422 cqe = io_get_cqring(ctx);
1423 if (!cqe && !force)
1424 break;
1425
Pavel Begunkov40d8ddd2020-07-13 23:37:11 +03001426 list_move(&req->compl.list, &list);
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001427 if (cqe) {
1428 WRITE_ONCE(cqe->user_data, req->user_data);
1429 WRITE_ONCE(cqe->res, req->result);
Pavel Begunkov0f7e4662020-07-13 23:37:16 +03001430 WRITE_ONCE(cqe->flags, req->compl.cflags);
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001431 } else {
Pavel Begunkov2c3bac6d2020-10-18 10:17:40 +01001432 ctx->cached_cq_overflow++;
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001433 WRITE_ONCE(ctx->rings->cq_overflow,
Pavel Begunkov2c3bac6d2020-10-18 10:17:40 +01001434 ctx->cached_cq_overflow);
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001435 }
Jens Axboeb18032b2021-01-24 16:58:56 -07001436 posted = true;
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001437 }
1438
Pavel Begunkov09e88402020-12-17 00:24:38 +00001439 all_flushed = list_empty(&ctx->cq_overflow_list);
1440 if (all_flushed) {
1441 clear_bit(0, &ctx->sq_check_overflow);
1442 clear_bit(0, &ctx->cq_check_overflow);
1443 ctx->rings->sq_flags &= ~IORING_SQ_CQ_OVERFLOW;
1444 }
Pavel Begunkov46930142020-07-30 18:43:49 +03001445
Jens Axboeb18032b2021-01-24 16:58:56 -07001446 if (posted)
1447 io_commit_cqring(ctx);
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001448 spin_unlock_irqrestore(&ctx->completion_lock, flags);
Jens Axboeb18032b2021-01-24 16:58:56 -07001449 if (posted)
1450 io_cqring_ev_posted(ctx);
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001451
1452 while (!list_empty(&list)) {
Pavel Begunkov40d8ddd2020-07-13 23:37:11 +03001453 req = list_first_entry(&list, struct io_kiocb, compl.list);
1454 list_del(&req->compl.list);
Jackie Liuec9c02a2019-11-08 23:50:36 +08001455 io_put_req(req);
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001456 }
Jens Axboec4a2ed72019-11-21 21:01:26 -07001457
Pavel Begunkov09e88402020-12-17 00:24:38 +00001458 return all_flushed;
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001459}
1460
Jens Axboeca0a2652021-03-04 17:15:48 -07001461static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force,
Pavel Begunkov6c503152021-01-04 20:36:36 +00001462 struct task_struct *tsk,
1463 struct files_struct *files)
1464{
Jens Axboeca0a2652021-03-04 17:15:48 -07001465 bool ret = true;
1466
Pavel Begunkov6c503152021-01-04 20:36:36 +00001467 if (test_bit(0, &ctx->cq_check_overflow)) {
1468 /* iopoll syncs against uring_lock, not completion_lock */
1469 if (ctx->flags & IORING_SETUP_IOPOLL)
1470 mutex_lock(&ctx->uring_lock);
Jens Axboeca0a2652021-03-04 17:15:48 -07001471 ret = __io_cqring_overflow_flush(ctx, force, tsk, files);
Pavel Begunkov6c503152021-01-04 20:36:36 +00001472 if (ctx->flags & IORING_SETUP_IOPOLL)
1473 mutex_unlock(&ctx->uring_lock);
1474 }
Jens Axboeca0a2652021-03-04 17:15:48 -07001475
1476 return ret;
Pavel Begunkov6c503152021-01-04 20:36:36 +00001477}
1478
Pavel Begunkov8c3f9cd2021-02-28 22:35:15 +00001479static void __io_cqring_fill_event(struct io_kiocb *req, long res,
1480 unsigned int cflags)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001481{
Jens Axboe78e19bb2019-11-06 15:21:34 -07001482 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001483 struct io_uring_cqe *cqe;
1484
Jens Axboe78e19bb2019-11-06 15:21:34 -07001485 trace_io_uring_complete(ctx, req->user_data, res);
Jens Axboe51c3ff62019-11-03 06:52:50 -07001486
Jens Axboe2b188cc2019-01-07 10:46:33 -07001487 /*
1488 * If we can't get a cq entry, userspace overflowed the
1489 * submission (by quite a lot). Increment the overflow count in
1490 * the ring.
1491 */
1492 cqe = io_get_cqring(ctx);
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001493 if (likely(cqe)) {
Jens Axboe78e19bb2019-11-06 15:21:34 -07001494 WRITE_ONCE(cqe->user_data, req->user_data);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001495 WRITE_ONCE(cqe->res, res);
Jens Axboebcda7ba2020-02-23 16:42:51 -07001496 WRITE_ONCE(cqe->flags, cflags);
Jens Axboefdaf0832020-10-30 09:37:30 -06001497 } else if (ctx->cq_overflow_flushed ||
1498 atomic_read(&req->task->io_uring->in_idle)) {
Jens Axboe0f212202020-09-13 13:09:39 -06001499 /*
1500 * If we're in ring overflow flush mode, or in task cancel mode,
1501 * then we cannot store the request for later flushing, we need
1502 * to drop it on the floor.
1503 */
Pavel Begunkov2c3bac6d2020-10-18 10:17:40 +01001504 ctx->cached_cq_overflow++;
1505 WRITE_ONCE(ctx->rings->cq_overflow, ctx->cached_cq_overflow);
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001506 } else {
Jens Axboead3eb2c2019-12-18 17:12:20 -07001507 if (list_empty(&ctx->cq_overflow_list)) {
1508 set_bit(0, &ctx->sq_check_overflow);
1509 set_bit(0, &ctx->cq_check_overflow);
Xiaoguang Wang6d5f9042020-07-09 09:15:29 +08001510 ctx->rings->sq_flags |= IORING_SQ_CQ_OVERFLOW;
Jens Axboead3eb2c2019-12-18 17:12:20 -07001511 }
Pavel Begunkov40d8ddd2020-07-13 23:37:11 +03001512 io_clean_op(req);
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001513 req->result = res;
Pavel Begunkov0f7e4662020-07-13 23:37:16 +03001514 req->compl.cflags = cflags;
Pavel Begunkov40d8ddd2020-07-13 23:37:11 +03001515 refcount_inc(&req->refs);
1516 list_add_tail(&req->compl.list, &ctx->cq_overflow_list);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001517 }
1518}
1519
Jens Axboebcda7ba2020-02-23 16:42:51 -07001520static void io_cqring_fill_event(struct io_kiocb *req, long res)
1521{
1522 __io_cqring_fill_event(req, res, 0);
1523}
1524
Pavel Begunkov7a612352021-03-09 00:37:59 +00001525static void io_req_complete_post(struct io_kiocb *req, long res,
1526 unsigned int cflags)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001527{
Jens Axboe78e19bb2019-11-06 15:21:34 -07001528 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001529 unsigned long flags;
1530
1531 spin_lock_irqsave(&ctx->completion_lock, flags);
Jens Axboebcda7ba2020-02-23 16:42:51 -07001532 __io_cqring_fill_event(req, res, cflags);
Jens Axboec7dae4b2021-02-09 19:53:37 -07001533 /*
1534 * If we're the last reference to this request, add to our locked
1535 * free_list cache.
1536 */
1537 if (refcount_dec_and_test(&req->refs)) {
1538 struct io_comp_state *cs = &ctx->submit_state.comp;
1539
Pavel Begunkov7a612352021-03-09 00:37:59 +00001540 if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) {
1541 if (req->flags & (REQ_F_LINK_TIMEOUT | REQ_F_FAIL_LINK))
1542 io_disarm_next(req);
1543 if (req->link) {
1544 io_req_task_queue(req->link);
1545 req->link = NULL;
1546 }
1547 }
Jens Axboec7dae4b2021-02-09 19:53:37 -07001548 io_dismantle_req(req);
1549 io_put_task(req->task, 1);
1550 list_add(&req->compl.list, &cs->locked_free_list);
1551 cs->locked_free_nr++;
Pavel Begunkov180f8292021-03-14 20:57:09 +00001552 } else {
1553 if (!percpu_ref_tryget(&ctx->refs))
1554 req = NULL;
1555 }
Pavel Begunkov7a612352021-03-09 00:37:59 +00001556 io_commit_cqring(ctx);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001557 spin_unlock_irqrestore(&ctx->completion_lock, flags);
Pavel Begunkov7a612352021-03-09 00:37:59 +00001558
Pavel Begunkov180f8292021-03-14 20:57:09 +00001559 if (req) {
1560 io_cqring_ev_posted(ctx);
Jens Axboec7dae4b2021-02-09 19:53:37 -07001561 percpu_ref_put(&ctx->refs);
Pavel Begunkov180f8292021-03-14 20:57:09 +00001562 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07001563}
1564
Pavel Begunkova38d68d2021-01-19 13:32:45 +00001565static void io_req_complete_state(struct io_kiocb *req, long res,
Pavel Begunkov889fca72021-02-10 00:03:09 +00001566 unsigned int cflags)
Jens Axboebcda7ba2020-02-23 16:42:51 -07001567{
Pavel Begunkova38d68d2021-01-19 13:32:45 +00001568 io_clean_op(req);
1569 req->result = res;
1570 req->compl.cflags = cflags;
Pavel Begunkove342c802021-01-19 13:32:47 +00001571 req->flags |= REQ_F_COMPLETE_INLINE;
Jens Axboee1e16092020-06-22 09:17:17 -06001572}
Jens Axboe2b188cc2019-01-07 10:46:33 -07001573
Pavel Begunkov889fca72021-02-10 00:03:09 +00001574static inline void __io_req_complete(struct io_kiocb *req, unsigned issue_flags,
1575 long res, unsigned cflags)
Pavel Begunkova38d68d2021-01-19 13:32:45 +00001576{
Pavel Begunkov889fca72021-02-10 00:03:09 +00001577 if (issue_flags & IO_URING_F_COMPLETE_DEFER)
1578 io_req_complete_state(req, res, cflags);
Pavel Begunkova38d68d2021-01-19 13:32:45 +00001579 else
Jens Axboec7dae4b2021-02-09 19:53:37 -07001580 io_req_complete_post(req, res, cflags);
Pavel Begunkova38d68d2021-01-19 13:32:45 +00001581}
Jens Axboebcda7ba2020-02-23 16:42:51 -07001582
Pavel Begunkova38d68d2021-01-19 13:32:45 +00001583static inline void io_req_complete(struct io_kiocb *req, long res)
Jens Axboee1e16092020-06-22 09:17:17 -06001584{
Pavel Begunkov889fca72021-02-10 00:03:09 +00001585 __io_req_complete(req, 0, res, 0);
Jens Axboebcda7ba2020-02-23 16:42:51 -07001586}
1587
Pavel Begunkovf41db2732021-02-28 22:35:12 +00001588static void io_req_complete_failed(struct io_kiocb *req, long res)
1589{
1590 req_set_fail_links(req);
1591 io_put_req(req);
1592 io_req_complete_post(req, res, 0);
1593}
1594
Jens Axboec7dae4b2021-02-09 19:53:37 -07001595static bool io_flush_cached_reqs(struct io_ring_ctx *ctx)
Pavel Begunkove5d1bc02021-02-10 00:03:23 +00001596{
Jens Axboec7dae4b2021-02-09 19:53:37 -07001597 struct io_submit_state *state = &ctx->submit_state;
1598 struct io_comp_state *cs = &state->comp;
Pavel Begunkove5d1bc02021-02-10 00:03:23 +00001599 struct io_kiocb *req = NULL;
1600
Jens Axboec7dae4b2021-02-09 19:53:37 -07001601 /*
1602 * If we have more than a batch's worth of requests in our IRQ side
1603 * locked cache, grab the lock and move them over to our submission
1604 * side cache.
1605 */
1606 if (READ_ONCE(cs->locked_free_nr) > IO_COMPL_BATCH) {
1607 spin_lock_irq(&ctx->completion_lock);
1608 list_splice_init(&cs->locked_free_list, &cs->free_list);
1609 cs->locked_free_nr = 0;
1610 spin_unlock_irq(&ctx->completion_lock);
1611 }
1612
1613 while (!list_empty(&cs->free_list)) {
1614 req = list_first_entry(&cs->free_list, struct io_kiocb,
Pavel Begunkove5d1bc02021-02-10 00:03:23 +00001615 compl.list);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001616 list_del(&req->compl.list);
Pavel Begunkove5d1bc02021-02-10 00:03:23 +00001617 state->reqs[state->free_reqs++] = req;
1618 if (state->free_reqs == ARRAY_SIZE(state->reqs))
1619 break;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001620 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07001621
Pavel Begunkove5d1bc02021-02-10 00:03:23 +00001622 return req != NULL;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001623}
1624
Pavel Begunkov258b29a2021-02-10 00:03:10 +00001625static struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001626{
Pavel Begunkov258b29a2021-02-10 00:03:10 +00001627 struct io_submit_state *state = &ctx->submit_state;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001628
Pavel Begunkovbf019da2021-02-10 00:03:17 +00001629 BUILD_BUG_ON(IO_REQ_ALLOC_BATCH > ARRAY_SIZE(state->reqs));
Jens Axboe2b188cc2019-01-07 10:46:33 -07001630
Pavel Begunkovf6b6c7d2020-06-21 13:09:53 +03001631 if (!state->free_reqs) {
Pavel Begunkov291b2822020-09-30 22:57:01 +03001632 gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
Jens Axboe2579f912019-01-09 09:10:43 -07001633 int ret;
1634
Jens Axboec7dae4b2021-02-09 19:53:37 -07001635 if (io_flush_cached_reqs(ctx))
Pavel Begunkove5d1bc02021-02-10 00:03:23 +00001636 goto got_req;
1637
Pavel Begunkovbf019da2021-02-10 00:03:17 +00001638 ret = kmem_cache_alloc_bulk(req_cachep, gfp, IO_REQ_ALLOC_BATCH,
1639 state->reqs);
Jens Axboefd6fab22019-03-14 16:30:06 -06001640
1641 /*
1642 * Bulk alloc is all-or-nothing. If we fail to get a batch,
1643 * retry single alloc to be on the safe side.
1644 */
1645 if (unlikely(ret <= 0)) {
1646 state->reqs[0] = kmem_cache_alloc(req_cachep, gfp);
1647 if (!state->reqs[0])
Pavel Begunkov3893f392021-02-10 00:03:15 +00001648 return NULL;
Jens Axboefd6fab22019-03-14 16:30:06 -06001649 ret = 1;
1650 }
Pavel Begunkov291b2822020-09-30 22:57:01 +03001651 state->free_reqs = ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001652 }
Pavel Begunkove5d1bc02021-02-10 00:03:23 +00001653got_req:
Pavel Begunkov291b2822020-09-30 22:57:01 +03001654 state->free_reqs--;
1655 return state->reqs[state->free_reqs];
Jens Axboe2b188cc2019-01-07 10:46:33 -07001656}
1657
Pavel Begunkov8da11c12020-02-24 11:32:44 +03001658static inline void io_put_file(struct io_kiocb *req, struct file *file,
1659 bool fixed)
1660{
Pavel Begunkov36f72fe2020-11-18 19:57:26 +00001661 if (!fixed)
Pavel Begunkov8da11c12020-02-24 11:32:44 +03001662 fput(file);
1663}
1664
Pavel Begunkov4edf20f2020-10-13 09:43:59 +01001665static void io_dismantle_req(struct io_kiocb *req)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001666{
Pavel Begunkov3ca405e2020-07-13 23:37:08 +03001667 io_clean_op(req);
Pavel Begunkov929a3af2020-02-19 00:19:09 +03001668
Jens Axboee8c2bc12020-08-15 18:44:09 -07001669 if (req->async_data)
1670 kfree(req->async_data);
Pavel Begunkov8da11c12020-02-24 11:32:44 +03001671 if (req->file)
1672 io_put_file(req, req->file, (req->flags & REQ_F_FIXED_FILE));
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00001673 if (req->fixed_rsrc_refs)
1674 percpu_ref_put(req->fixed_rsrc_refs);
Jens Axboe003e8dc2021-03-06 09:22:27 -07001675 if (req->work.creds) {
1676 put_cred(req->work.creds);
1677 req->work.creds = NULL;
1678 }
Pavel Begunkovf85c3102021-03-01 18:20:46 +00001679
1680 if (req->flags & REQ_F_INFLIGHT) {
1681 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkovf85c3102021-03-01 18:20:46 +00001682 unsigned long flags;
1683
1684 spin_lock_irqsave(&ctx->inflight_lock, flags);
1685 list_del(&req->inflight_entry);
1686 spin_unlock_irqrestore(&ctx->inflight_lock, flags);
1687 req->flags &= ~REQ_F_INFLIGHT;
Pavel Begunkovf85c3102021-03-01 18:20:46 +00001688 }
Pavel Begunkove6543a82020-06-28 12:52:30 +03001689}
Pavel Begunkov2b85edf2019-12-28 14:13:03 +03001690
Pavel Begunkovb23fcf42021-03-01 18:20:48 +00001691/* must to be called somewhat shortly after putting a request */
Pavel Begunkov7c660732021-01-25 11:42:21 +00001692static inline void io_put_task(struct task_struct *task, int nr)
1693{
1694 struct io_uring_task *tctx = task->io_uring;
1695
1696 percpu_counter_sub(&tctx->inflight, nr);
1697 if (unlikely(atomic_read(&tctx->in_idle)))
1698 wake_up(&tctx->wait);
1699 put_task_struct_many(task, nr);
1700}
1701
Pavel Begunkov216578e2020-10-13 09:44:00 +01001702static void __io_free_req(struct io_kiocb *req)
Pavel Begunkove6543a82020-06-28 12:52:30 +03001703{
Jens Axboe51a4cc12020-08-10 10:55:56 -06001704 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkovecfc5172020-06-29 13:13:03 +03001705
Pavel Begunkov216578e2020-10-13 09:44:00 +01001706 io_dismantle_req(req);
Pavel Begunkov7c660732021-01-25 11:42:21 +00001707 io_put_task(req->task, 1);
Pavel Begunkove6543a82020-06-28 12:52:30 +03001708
Pavel Begunkov3893f392021-02-10 00:03:15 +00001709 kmem_cache_free(req_cachep, req);
Pavel Begunkovecfc5172020-06-29 13:13:03 +03001710 percpu_ref_put(&ctx->refs);
Jens Axboee65ef562019-03-12 10:16:44 -06001711}
1712
Pavel Begunkovf2f87372020-10-27 23:25:37 +00001713static inline void io_remove_next_linked(struct io_kiocb *req)
1714{
1715 struct io_kiocb *nxt = req->link;
1716
1717 req->link = nxt->link;
1718 nxt->link = NULL;
1719}
1720
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00001721static bool io_kill_linked_timeout(struct io_kiocb *req)
1722 __must_hold(&req->ctx->completion_lock)
Jens Axboe9e645e112019-05-10 16:07:28 -06001723{
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00001724 struct io_kiocb *link = req->link;
Pavel Begunkovc9abd7a2020-10-22 16:43:11 +01001725 bool cancelled = false;
Pavel Begunkovf2f87372020-10-27 23:25:37 +00001726
Pavel Begunkov900fad42020-10-19 16:39:16 +01001727 /*
1728 * Can happen if a linked timeout fired and link had been like
1729 * req -> link t-out -> link t-out [-> ...]
1730 */
Pavel Begunkovc9abd7a2020-10-22 16:43:11 +01001731 if (link && (link->flags & REQ_F_LTIMEOUT_ACTIVE)) {
1732 struct io_timeout_data *io = link->async_data;
1733 int ret;
Pavel Begunkov7c86ffe2020-06-29 13:12:59 +03001734
Pavel Begunkovf2f87372020-10-27 23:25:37 +00001735 io_remove_next_linked(req);
Pavel Begunkov90cd7e42020-10-27 23:25:36 +00001736 link->timeout.head = NULL;
Pavel Begunkovc9abd7a2020-10-22 16:43:11 +01001737 ret = hrtimer_try_to_cancel(&io->timer);
1738 if (ret != -1) {
1739 io_cqring_fill_event(link, -ECANCELED);
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00001740 io_put_req_deferred(link, 1);
Pavel Begunkovc9abd7a2020-10-22 16:43:11 +01001741 cancelled = true;
1742 }
1743 }
Pavel Begunkov7c86ffe2020-06-29 13:12:59 +03001744 req->flags &= ~REQ_F_LINK_TIMEOUT;
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00001745 return cancelled;
Pavel Begunkov7c86ffe2020-06-29 13:12:59 +03001746}
1747
Pavel Begunkovd148ca42020-10-18 10:17:39 +01001748static void io_fail_links(struct io_kiocb *req)
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00001749 __must_hold(&req->ctx->completion_lock)
Jens Axboe9e645e112019-05-10 16:07:28 -06001750{
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00001751 struct io_kiocb *nxt, *link = req->link;
Jens Axboe9e645e112019-05-10 16:07:28 -06001752
Pavel Begunkovf2f87372020-10-27 23:25:37 +00001753 req->link = NULL;
Pavel Begunkovf2f87372020-10-27 23:25:37 +00001754 while (link) {
1755 nxt = link->link;
1756 link->link = NULL;
1757
Dmitrii Dolgovc826bd72019-10-15 19:02:01 +02001758 trace_io_uring_fail_link(req, link);
Pavel Begunkov7c86ffe2020-06-29 13:12:59 +03001759 io_cqring_fill_event(link, -ECANCELED);
Jens Axboe1575f212021-02-27 15:20:49 -07001760 io_put_req_deferred(link, 2);
Pavel Begunkovf2f87372020-10-27 23:25:37 +00001761 link = nxt;
Jens Axboe9e645e112019-05-10 16:07:28 -06001762 }
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00001763}
Pavel Begunkov7c86ffe2020-06-29 13:12:59 +03001764
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00001765static bool io_disarm_next(struct io_kiocb *req)
1766 __must_hold(&req->ctx->completion_lock)
1767{
1768 bool posted = false;
1769
1770 if (likely(req->flags & REQ_F_LINK_TIMEOUT))
1771 posted = io_kill_linked_timeout(req);
1772 if (unlikely(req->flags & REQ_F_FAIL_LINK)) {
1773 posted |= (req->link != NULL);
1774 io_fail_links(req);
1775 }
1776 return posted;
Jens Axboe9e645e112019-05-10 16:07:28 -06001777}
1778
Pavel Begunkov3fa5e0f2020-06-30 15:20:43 +03001779static struct io_kiocb *__io_req_find_next(struct io_kiocb *req)
Jens Axboe9e645e112019-05-10 16:07:28 -06001780{
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00001781 struct io_kiocb *nxt;
Jens Axboe2665abf2019-11-05 12:40:47 -07001782
Jens Axboe9e645e112019-05-10 16:07:28 -06001783 /*
1784 * If LINK is set, we have dependent requests in this chain. If we
1785 * didn't fail this request, queue the first one up, moving any other
1786 * dependencies to the next request. In case of failure, fail the rest
1787 * of the chain.
1788 */
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00001789 if (req->flags & (REQ_F_LINK_TIMEOUT | REQ_F_FAIL_LINK)) {
1790 struct io_ring_ctx *ctx = req->ctx;
1791 unsigned long flags;
1792 bool posted;
Pavel Begunkovf2f87372020-10-27 23:25:37 +00001793
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00001794 spin_lock_irqsave(&ctx->completion_lock, flags);
1795 posted = io_disarm_next(req);
1796 if (posted)
1797 io_commit_cqring(req->ctx);
1798 spin_unlock_irqrestore(&ctx->completion_lock, flags);
1799 if (posted)
1800 io_cqring_ev_posted(ctx);
Pavel Begunkovf2f87372020-10-27 23:25:37 +00001801 }
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00001802 nxt = req->link;
1803 req->link = NULL;
1804 return nxt;
Jens Axboe4d7dd462019-11-20 13:03:52 -07001805}
Jens Axboe2665abf2019-11-05 12:40:47 -07001806
Pavel Begunkovf2f87372020-10-27 23:25:37 +00001807static inline struct io_kiocb *io_req_find_next(struct io_kiocb *req)
Pavel Begunkov3fa5e0f2020-06-30 15:20:43 +03001808{
Pavel Begunkovcdbff982021-02-12 18:41:16 +00001809 if (likely(!(req->flags & (REQ_F_LINK|REQ_F_HARDLINK))))
Pavel Begunkov3fa5e0f2020-06-30 15:20:43 +03001810 return NULL;
1811 return __io_req_find_next(req);
1812}
1813
Pavel Begunkov2c323952021-02-28 22:04:53 +00001814static void ctx_flush_and_put(struct io_ring_ctx *ctx)
1815{
1816 if (!ctx)
1817 return;
1818 if (ctx->submit_state.comp.nr) {
1819 mutex_lock(&ctx->uring_lock);
1820 io_submit_flush_completions(&ctx->submit_state.comp, ctx);
1821 mutex_unlock(&ctx->uring_lock);
1822 }
1823 percpu_ref_put(&ctx->refs);
1824}
1825
Jens Axboe7cbf1722021-02-10 00:03:20 +00001826static bool __tctx_task_work(struct io_uring_task *tctx)
1827{
Jens Axboe65453d12021-02-10 00:03:21 +00001828 struct io_ring_ctx *ctx = NULL;
Jens Axboe7cbf1722021-02-10 00:03:20 +00001829 struct io_wq_work_list list;
1830 struct io_wq_work_node *node;
1831
1832 if (wq_list_empty(&tctx->task_list))
1833 return false;
1834
Jens Axboe0b81e802021-02-16 10:33:53 -07001835 spin_lock_irq(&tctx->task_lock);
Jens Axboe7cbf1722021-02-10 00:03:20 +00001836 list = tctx->task_list;
1837 INIT_WQ_LIST(&tctx->task_list);
Jens Axboe0b81e802021-02-16 10:33:53 -07001838 spin_unlock_irq(&tctx->task_lock);
Jens Axboe7cbf1722021-02-10 00:03:20 +00001839
1840 node = list.first;
1841 while (node) {
1842 struct io_wq_work_node *next = node->next;
1843 struct io_kiocb *req;
1844
1845 req = container_of(node, struct io_kiocb, io_task_work.node);
Pavel Begunkov2c323952021-02-28 22:04:53 +00001846 if (req->ctx != ctx) {
1847 ctx_flush_and_put(ctx);
1848 ctx = req->ctx;
1849 percpu_ref_get(&ctx->refs);
1850 }
1851
Jens Axboe7cbf1722021-02-10 00:03:20 +00001852 req->task_work.func(&req->task_work);
1853 node = next;
Jens Axboe65453d12021-02-10 00:03:21 +00001854 }
1855
Pavel Begunkov2c323952021-02-28 22:04:53 +00001856 ctx_flush_and_put(ctx);
Jens Axboe7cbf1722021-02-10 00:03:20 +00001857 return list.first != NULL;
1858}
1859
1860static void tctx_task_work(struct callback_head *cb)
1861{
1862 struct io_uring_task *tctx = container_of(cb, struct io_uring_task, task_work);
1863
Jens Axboe1d5f3602021-02-26 14:54:16 -07001864 clear_bit(0, &tctx->task_state);
1865
Jens Axboe7cbf1722021-02-10 00:03:20 +00001866 while (__tctx_task_work(tctx))
1867 cond_resched();
Jens Axboe7cbf1722021-02-10 00:03:20 +00001868}
1869
1870static int io_task_work_add(struct task_struct *tsk, struct io_kiocb *req,
1871 enum task_work_notify_mode notify)
1872{
1873 struct io_uring_task *tctx = tsk->io_uring;
1874 struct io_wq_work_node *node, *prev;
Jens Axboe0b81e802021-02-16 10:33:53 -07001875 unsigned long flags;
Jens Axboe7cbf1722021-02-10 00:03:20 +00001876 int ret;
1877
1878 WARN_ON_ONCE(!tctx);
1879
Jens Axboe0b81e802021-02-16 10:33:53 -07001880 spin_lock_irqsave(&tctx->task_lock, flags);
Jens Axboe7cbf1722021-02-10 00:03:20 +00001881 wq_list_add_tail(&req->io_task_work.node, &tctx->task_list);
Jens Axboe0b81e802021-02-16 10:33:53 -07001882 spin_unlock_irqrestore(&tctx->task_lock, flags);
Jens Axboe7cbf1722021-02-10 00:03:20 +00001883
1884 /* task_work already pending, we're done */
1885 if (test_bit(0, &tctx->task_state) ||
1886 test_and_set_bit(0, &tctx->task_state))
1887 return 0;
1888
1889 if (!task_work_add(tsk, &tctx->task_work, notify))
1890 return 0;
1891
1892 /*
1893 * Slow path - we failed, find and delete work. if the work is not
1894 * in the list, it got run and we're fine.
1895 */
1896 ret = 0;
Jens Axboe0b81e802021-02-16 10:33:53 -07001897 spin_lock_irqsave(&tctx->task_lock, flags);
Jens Axboe7cbf1722021-02-10 00:03:20 +00001898 wq_list_for_each(node, prev, &tctx->task_list) {
1899 if (&req->io_task_work.node == node) {
1900 wq_list_del(&tctx->task_list, node, prev);
1901 ret = 1;
1902 break;
1903 }
1904 }
Jens Axboe0b81e802021-02-16 10:33:53 -07001905 spin_unlock_irqrestore(&tctx->task_lock, flags);
Jens Axboe7cbf1722021-02-10 00:03:20 +00001906 clear_bit(0, &tctx->task_state);
1907 return ret;
1908}
1909
Jens Axboe355fb9e2020-10-22 20:19:35 -06001910static int io_req_task_work_add(struct io_kiocb *req)
Jens Axboec2c4c832020-07-01 15:37:11 -06001911{
1912 struct task_struct *tsk = req->task;
1913 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe91989c72020-10-16 09:02:26 -06001914 enum task_work_notify_mode notify;
1915 int ret;
Jens Axboec2c4c832020-07-01 15:37:11 -06001916
Jens Axboe6200b0a2020-09-13 14:38:30 -06001917 if (tsk->flags & PF_EXITING)
1918 return -ESRCH;
1919
Jens Axboec2c4c832020-07-01 15:37:11 -06001920 /*
Jens Axboe0ba9c9e2020-08-06 19:41:50 -06001921 * SQPOLL kernel thread doesn't need notification, just a wakeup. For
1922 * all other cases, use TWA_SIGNAL unconditionally to ensure we're
1923 * processing task_work. There's no reliable way to tell if TWA_RESUME
1924 * will do the job.
Jens Axboec2c4c832020-07-01 15:37:11 -06001925 */
Jens Axboe91989c72020-10-16 09:02:26 -06001926 notify = TWA_NONE;
Jens Axboe355fb9e2020-10-22 20:19:35 -06001927 if (!(ctx->flags & IORING_SETUP_SQPOLL))
Jens Axboec2c4c832020-07-01 15:37:11 -06001928 notify = TWA_SIGNAL;
1929
Jens Axboe7cbf1722021-02-10 00:03:20 +00001930 ret = io_task_work_add(tsk, req, notify);
Jens Axboec2c4c832020-07-01 15:37:11 -06001931 if (!ret)
1932 wake_up_process(tsk);
Jens Axboe0ba9c9e2020-08-06 19:41:50 -06001933
Jens Axboec2c4c832020-07-01 15:37:11 -06001934 return ret;
1935}
1936
Pavel Begunkov9b465712021-03-15 14:23:07 +00001937static bool io_run_task_work_head(struct callback_head **work_head)
1938{
1939 struct callback_head *work, *next;
1940 bool executed = false;
1941
1942 do {
1943 work = xchg(work_head, NULL);
1944 if (!work)
1945 break;
1946
1947 do {
1948 next = work->next;
1949 work->func(work);
1950 work = next;
1951 cond_resched();
1952 } while (work);
1953 executed = true;
1954 } while (1);
1955
1956 return executed;
1957}
1958
1959static void io_task_work_add_head(struct callback_head **work_head,
1960 struct callback_head *task_work)
1961{
1962 struct callback_head *head;
1963
1964 do {
1965 head = READ_ONCE(*work_head);
1966 task_work->next = head;
1967 } while (cmpxchg(work_head, head, task_work) != head);
1968}
1969
Pavel Begunkoveab30c42021-01-19 13:32:42 +00001970static void io_req_task_work_add_fallback(struct io_kiocb *req,
Jens Axboe7cbf1722021-02-10 00:03:20 +00001971 task_work_func_t cb)
Pavel Begunkoveab30c42021-01-19 13:32:42 +00001972{
Pavel Begunkoveab30c42021-01-19 13:32:42 +00001973 init_task_work(&req->task_work, cb);
Pavel Begunkov9b465712021-03-15 14:23:07 +00001974 io_task_work_add_head(&req->ctx->exit_task_work, &req->task_work);
Pavel Begunkoveab30c42021-01-19 13:32:42 +00001975}
1976
Jens Axboec40f6372020-06-25 15:39:59 -06001977static void __io_req_task_cancel(struct io_kiocb *req, int error)
1978{
1979 struct io_ring_ctx *ctx = req->ctx;
1980
1981 spin_lock_irq(&ctx->completion_lock);
1982 io_cqring_fill_event(req, error);
1983 io_commit_cqring(ctx);
1984 spin_unlock_irq(&ctx->completion_lock);
1985
1986 io_cqring_ev_posted(ctx);
1987 req_set_fail_links(req);
1988 io_double_put_req(req);
1989}
1990
1991static void io_req_task_cancel(struct callback_head *cb)
1992{
1993 struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
Jens Axboe87ceb6a2020-09-14 08:20:12 -06001994 struct io_ring_ctx *ctx = req->ctx;
Jens Axboec40f6372020-06-25 15:39:59 -06001995
Pavel Begunkove83acd72021-02-28 22:35:09 +00001996 /* ctx is guaranteed to stay alive while we hold uring_lock */
Pavel Begunkov792bb6e2021-02-18 22:32:51 +00001997 mutex_lock(&ctx->uring_lock);
Pavel Begunkova3df76982021-02-18 22:32:52 +00001998 __io_req_task_cancel(req, req->result);
Pavel Begunkov792bb6e2021-02-18 22:32:51 +00001999 mutex_unlock(&ctx->uring_lock);
Jens Axboec40f6372020-06-25 15:39:59 -06002000}
2001
2002static void __io_req_task_submit(struct io_kiocb *req)
2003{
2004 struct io_ring_ctx *ctx = req->ctx;
2005
Pavel Begunkov04fc6c82021-02-12 03:23:54 +00002006 /* ctx stays valid until unlock, even if we drop all ours ctx->refs */
Pavel Begunkov81b6d052021-01-04 20:36:35 +00002007 mutex_lock(&ctx->uring_lock);
Pavel Begunkov70aacfe2021-03-01 13:02:15 +00002008 if (!(current->flags & PF_EXITING) && !current->in_execve)
Pavel Begunkovc5eef2b2021-02-10 00:03:22 +00002009 __io_queue_sqe(req);
Pavel Begunkov81b6d052021-01-04 20:36:35 +00002010 else
Jens Axboec40f6372020-06-25 15:39:59 -06002011 __io_req_task_cancel(req, -EFAULT);
Pavel Begunkov81b6d052021-01-04 20:36:35 +00002012 mutex_unlock(&ctx->uring_lock);
Jens Axboe9e645e112019-05-10 16:07:28 -06002013}
2014
Jens Axboec40f6372020-06-25 15:39:59 -06002015static void io_req_task_submit(struct callback_head *cb)
2016{
2017 struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
2018
2019 __io_req_task_submit(req);
2020}
2021
Pavel Begunkova3df76982021-02-18 22:32:52 +00002022static void io_req_task_queue_fail(struct io_kiocb *req, int ret)
2023{
Pavel Begunkova3df76982021-02-18 22:32:52 +00002024 req->result = ret;
2025 req->task_work.func = io_req_task_cancel;
2026
2027 if (unlikely(io_req_task_work_add(req)))
2028 io_req_task_work_add_fallback(req, io_req_task_cancel);
2029}
2030
Pavel Begunkov2c4b8eb2021-02-28 22:35:10 +00002031static void io_req_task_queue(struct io_kiocb *req)
2032{
2033 req->task_work.func = io_req_task_submit;
2034
2035 if (unlikely(io_req_task_work_add(req)))
2036 io_req_task_queue_fail(req, -ECANCELED);
2037}
2038
Pavel Begunkovf2f87372020-10-27 23:25:37 +00002039static inline void io_queue_next(struct io_kiocb *req)
Jackie Liuc69f8db2019-11-09 11:00:08 +08002040{
Pavel Begunkov9b5f7bd2020-06-29 13:13:00 +03002041 struct io_kiocb *nxt = io_req_find_next(req);
Pavel Begunkov944e58b2019-11-21 23:21:01 +03002042
Pavel Begunkov906a8c32020-06-27 14:04:55 +03002043 if (nxt)
2044 io_req_task_queue(nxt);
Jackie Liuc69f8db2019-11-09 11:00:08 +08002045}
2046
Jens Axboe9e645e112019-05-10 16:07:28 -06002047static void io_free_req(struct io_kiocb *req)
2048{
Pavel Begunkovc3524382020-06-28 12:52:32 +03002049 io_queue_next(req);
Jens Axboe9e645e112019-05-10 16:07:28 -06002050 __io_free_req(req);
Jens Axboee65ef562019-03-12 10:16:44 -06002051}
2052
Pavel Begunkov2d6500d2020-06-28 12:52:33 +03002053struct req_batch {
Pavel Begunkov5af1d132020-07-18 11:32:52 +03002054 struct task_struct *task;
2055 int task_refs;
Jens Axboe1b4c3512021-02-10 00:03:19 +00002056 int ctx_refs;
Pavel Begunkov2d6500d2020-06-28 12:52:33 +03002057};
2058
Pavel Begunkov5af1d132020-07-18 11:32:52 +03002059static inline void io_init_req_batch(struct req_batch *rb)
Pavel Begunkov7a743e22020-03-03 21:33:13 +03002060{
Pavel Begunkov5af1d132020-07-18 11:32:52 +03002061 rb->task_refs = 0;
Pavel Begunkov9ae72462021-02-10 00:03:16 +00002062 rb->ctx_refs = 0;
Pavel Begunkov5af1d132020-07-18 11:32:52 +03002063 rb->task = NULL;
2064}
Pavel Begunkov8766dd52020-03-14 00:31:04 +03002065
Pavel Begunkov2d6500d2020-06-28 12:52:33 +03002066static void io_req_free_batch_finish(struct io_ring_ctx *ctx,
2067 struct req_batch *rb)
2068{
Pavel Begunkov6e833d52021-02-11 18:28:20 +00002069 if (rb->task)
Pavel Begunkov7c660732021-01-25 11:42:21 +00002070 io_put_task(rb->task, rb->task_refs);
Pavel Begunkov9ae72462021-02-10 00:03:16 +00002071 if (rb->ctx_refs)
2072 percpu_ref_put_many(&ctx->refs, rb->ctx_refs);
Pavel Begunkov2d6500d2020-06-28 12:52:33 +03002073}
2074
Pavel Begunkov6ff119a2021-02-10 00:03:18 +00002075static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req,
2076 struct io_submit_state *state)
Pavel Begunkov2d6500d2020-06-28 12:52:33 +03002077{
Pavel Begunkovf2f87372020-10-27 23:25:37 +00002078 io_queue_next(req);
Pavel Begunkov2d6500d2020-06-28 12:52:33 +03002079
Jens Axboee3bc8e92020-09-24 08:45:57 -06002080 if (req->task != rb->task) {
Pavel Begunkov7c660732021-01-25 11:42:21 +00002081 if (rb->task)
2082 io_put_task(rb->task, rb->task_refs);
Jens Axboee3bc8e92020-09-24 08:45:57 -06002083 rb->task = req->task;
2084 rb->task_refs = 0;
Pavel Begunkov5af1d132020-07-18 11:32:52 +03002085 }
Jens Axboee3bc8e92020-09-24 08:45:57 -06002086 rb->task_refs++;
Pavel Begunkov9ae72462021-02-10 00:03:16 +00002087 rb->ctx_refs++;
Pavel Begunkov5af1d132020-07-18 11:32:52 +03002088
Pavel Begunkov4edf20f2020-10-13 09:43:59 +01002089 io_dismantle_req(req);
Pavel Begunkovbd759042021-02-12 03:23:50 +00002090 if (state->free_reqs != ARRAY_SIZE(state->reqs))
Pavel Begunkov6ff119a2021-02-10 00:03:18 +00002091 state->reqs[state->free_reqs++] = req;
Pavel Begunkovbd759042021-02-12 03:23:50 +00002092 else
2093 list_add(&req->compl.list, &state->comp.free_list);
Pavel Begunkov7a743e22020-03-03 21:33:13 +03002094}
2095
Pavel Begunkov905c1722021-02-10 00:03:14 +00002096static void io_submit_flush_completions(struct io_comp_state *cs,
2097 struct io_ring_ctx *ctx)
2098{
2099 int i, nr = cs->nr;
2100 struct io_kiocb *req;
2101 struct req_batch rb;
2102
2103 io_init_req_batch(&rb);
2104 spin_lock_irq(&ctx->completion_lock);
2105 for (i = 0; i < nr; i++) {
2106 req = cs->reqs[i];
2107 __io_cqring_fill_event(req, req->result, req->compl.cflags);
2108 }
2109 io_commit_cqring(ctx);
2110 spin_unlock_irq(&ctx->completion_lock);
2111
2112 io_cqring_ev_posted(ctx);
2113 for (i = 0; i < nr; i++) {
2114 req = cs->reqs[i];
2115
2116 /* submission and completion refs */
2117 if (refcount_sub_and_test(2, &req->refs))
Pavel Begunkov6ff119a2021-02-10 00:03:18 +00002118 io_req_free_batch(&rb, req, &ctx->submit_state);
Pavel Begunkov905c1722021-02-10 00:03:14 +00002119 }
2120
2121 io_req_free_batch_finish(ctx, &rb);
2122 cs->nr = 0;
Jens Axboee65ef562019-03-12 10:16:44 -06002123}
2124
Jens Axboeba816ad2019-09-28 11:36:45 -06002125/*
2126 * Drop reference to request, return next in chain (if there is one) if this
2127 * was the last reference to this request.
2128 */
Pavel Begunkov9b5f7bd2020-06-29 13:13:00 +03002129static struct io_kiocb *io_put_req_find_next(struct io_kiocb *req)
Jens Axboee65ef562019-03-12 10:16:44 -06002130{
Pavel Begunkov9b5f7bd2020-06-29 13:13:00 +03002131 struct io_kiocb *nxt = NULL;
2132
Jens Axboe2a44f462020-02-25 13:25:41 -07002133 if (refcount_dec_and_test(&req->refs)) {
Pavel Begunkov9b5f7bd2020-06-29 13:13:00 +03002134 nxt = io_req_find_next(req);
Jens Axboe4d7dd462019-11-20 13:03:52 -07002135 __io_free_req(req);
Jens Axboe2a44f462020-02-25 13:25:41 -07002136 }
Pavel Begunkov9b5f7bd2020-06-29 13:13:00 +03002137 return nxt;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002138}
2139
Jens Axboe2b188cc2019-01-07 10:46:33 -07002140static void io_put_req(struct io_kiocb *req)
2141{
Jens Axboedef596e2019-01-09 08:59:42 -07002142 if (refcount_dec_and_test(&req->refs))
2143 io_free_req(req);
2144}
2145
Pavel Begunkov216578e2020-10-13 09:44:00 +01002146static void io_put_req_deferred_cb(struct callback_head *cb)
2147{
2148 struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
2149
2150 io_free_req(req);
2151}
2152
2153static void io_free_req_deferred(struct io_kiocb *req)
2154{
2155 int ret;
2156
Jens Axboe7cbf1722021-02-10 00:03:20 +00002157 req->task_work.func = io_put_req_deferred_cb;
Jens Axboe355fb9e2020-10-22 20:19:35 -06002158 ret = io_req_task_work_add(req);
Pavel Begunkoveab30c42021-01-19 13:32:42 +00002159 if (unlikely(ret))
2160 io_req_task_work_add_fallback(req, io_put_req_deferred_cb);
Pavel Begunkov216578e2020-10-13 09:44:00 +01002161}
2162
2163static inline void io_put_req_deferred(struct io_kiocb *req, int refs)
2164{
2165 if (refcount_sub_and_test(refs, &req->refs))
2166 io_free_req_deferred(req);
2167}
2168
Jens Axboe978db572019-11-14 22:39:04 -07002169static void io_double_put_req(struct io_kiocb *req)
2170{
2171 /* drop both submit and complete references */
2172 if (refcount_sub_and_test(2, &req->refs))
2173 io_free_req(req);
2174}
2175
Pavel Begunkov6c503152021-01-04 20:36:36 +00002176static unsigned io_cqring_events(struct io_ring_ctx *ctx)
Jens Axboea3a0e432019-08-20 11:03:11 -06002177{
2178 /* See comment at the top of this file */
2179 smp_rmb();
Pavel Begunkove23de152020-12-17 00:24:37 +00002180 return __io_cqring_events(ctx);
Jens Axboea3a0e432019-08-20 11:03:11 -06002181}
2182
Pavel Begunkovfb5ccc92019-10-25 12:31:30 +03002183static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
2184{
2185 struct io_rings *rings = ctx->rings;
2186
2187 /* make sure SQ entry isn't read before tail */
2188 return smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head;
2189}
2190
Pavel Begunkov8ff069b2020-07-16 23:28:04 +03002191static unsigned int io_put_kbuf(struct io_kiocb *req, struct io_buffer *kbuf)
Jens Axboee94f1412019-12-19 12:06:02 -07002192{
Pavel Begunkov8ff069b2020-07-16 23:28:04 +03002193 unsigned int cflags;
Jens Axboee94f1412019-12-19 12:06:02 -07002194
Jens Axboebcda7ba2020-02-23 16:42:51 -07002195 cflags = kbuf->bid << IORING_CQE_BUFFER_SHIFT;
2196 cflags |= IORING_CQE_F_BUFFER;
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03002197 req->flags &= ~REQ_F_BUFFER_SELECTED;
Jens Axboebcda7ba2020-02-23 16:42:51 -07002198 kfree(kbuf);
2199 return cflags;
2200}
2201
Pavel Begunkov8ff069b2020-07-16 23:28:04 +03002202static inline unsigned int io_put_rw_kbuf(struct io_kiocb *req)
2203{
2204 struct io_buffer *kbuf;
2205
2206 kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
2207 return io_put_kbuf(req, kbuf);
2208}
2209
Jens Axboe4c6e2772020-07-01 11:29:10 -06002210static inline bool io_run_task_work(void)
2211{
Jens Axboe6200b0a2020-09-13 14:38:30 -06002212 /*
2213 * Not safe to run on exiting task, and the task_work handling will
2214 * not add work to such a task.
2215 */
2216 if (unlikely(current->flags & PF_EXITING))
2217 return false;
Jens Axboe4c6e2772020-07-01 11:29:10 -06002218 if (current->task_works) {
2219 __set_current_state(TASK_RUNNING);
2220 task_work_run();
2221 return true;
2222 }
2223
2224 return false;
2225}
2226
Jens Axboedef596e2019-01-09 08:59:42 -07002227/*
2228 * Find and free completed poll iocbs
2229 */
2230static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
2231 struct list_head *done)
2232{
Jens Axboe8237e042019-12-28 10:48:22 -07002233 struct req_batch rb;
Jens Axboedef596e2019-01-09 08:59:42 -07002234 struct io_kiocb *req;
Xiaoguang Wangbbde0172020-06-16 02:06:38 +08002235
2236 /* order with ->result store in io_complete_rw_iopoll() */
2237 smp_rmb();
Jens Axboedef596e2019-01-09 08:59:42 -07002238
Pavel Begunkov5af1d132020-07-18 11:32:52 +03002239 io_init_req_batch(&rb);
Jens Axboedef596e2019-01-09 08:59:42 -07002240 while (!list_empty(done)) {
Jens Axboebcda7ba2020-02-23 16:42:51 -07002241 int cflags = 0;
2242
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002243 req = list_first_entry(done, struct io_kiocb, inflight_entry);
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002244 list_del(&req->inflight_entry);
Jens Axboedef596e2019-01-09 08:59:42 -07002245
Pavel Begunkovf1613402021-02-11 18:28:21 +00002246 if (READ_ONCE(req->result) == -EAGAIN) {
2247 req->iopoll_completed = 0;
Pavel Begunkov23faba32021-02-11 18:28:22 +00002248 if (io_rw_reissue(req))
Pavel Begunkovf1613402021-02-11 18:28:21 +00002249 continue;
2250 }
2251
Jens Axboebcda7ba2020-02-23 16:42:51 -07002252 if (req->flags & REQ_F_BUFFER_SELECTED)
Pavel Begunkov8ff069b2020-07-16 23:28:04 +03002253 cflags = io_put_rw_kbuf(req);
Jens Axboebcda7ba2020-02-23 16:42:51 -07002254
2255 __io_cqring_fill_event(req, req->result, cflags);
Jens Axboedef596e2019-01-09 08:59:42 -07002256 (*nr_events)++;
2257
Pavel Begunkovc3524382020-06-28 12:52:32 +03002258 if (refcount_dec_and_test(&req->refs))
Pavel Begunkov6ff119a2021-02-10 00:03:18 +00002259 io_req_free_batch(&rb, req, &ctx->submit_state);
Jens Axboedef596e2019-01-09 08:59:42 -07002260 }
Jens Axboedef596e2019-01-09 08:59:42 -07002261
Jens Axboe09bb8392019-03-13 12:39:28 -06002262 io_commit_cqring(ctx);
Pavel Begunkov80c18e42021-01-07 03:15:41 +00002263 io_cqring_ev_posted_iopoll(ctx);
Pavel Begunkov2d6500d2020-06-28 12:52:33 +03002264 io_req_free_batch_finish(ctx, &rb);
Bijan Mottahedeh581f9812020-04-03 13:51:33 -07002265}
2266
Jens Axboedef596e2019-01-09 08:59:42 -07002267static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
2268 long min)
2269{
2270 struct io_kiocb *req, *tmp;
2271 LIST_HEAD(done);
2272 bool spin;
2273 int ret;
2274
2275 /*
2276 * Only spin for completions if we don't have multiple devices hanging
2277 * off our complete list, and we're under the requested amount.
2278 */
2279 spin = !ctx->poll_multi_file && *nr_events < min;
2280
2281 ret = 0;
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002282 list_for_each_entry_safe(req, tmp, &ctx->iopoll_list, inflight_entry) {
Jens Axboe9adbd452019-12-20 08:45:55 -07002283 struct kiocb *kiocb = &req->rw.kiocb;
Jens Axboedef596e2019-01-09 08:59:42 -07002284
2285 /*
Bijan Mottahedeh581f9812020-04-03 13:51:33 -07002286 * Move completed and retryable entries to our local lists.
2287 * If we find a request that requires polling, break out
2288 * and complete those lists first, if we have entries there.
Jens Axboedef596e2019-01-09 08:59:42 -07002289 */
Xiaoguang Wang65a65432020-06-11 23:39:36 +08002290 if (READ_ONCE(req->iopoll_completed)) {
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002291 list_move_tail(&req->inflight_entry, &done);
Jens Axboedef596e2019-01-09 08:59:42 -07002292 continue;
2293 }
2294 if (!list_empty(&done))
2295 break;
2296
2297 ret = kiocb->ki_filp->f_op->iopoll(kiocb, spin);
2298 if (ret < 0)
2299 break;
2300
Pavel Begunkov3aadc232020-07-06 17:59:29 +03002301 /* iopoll may have completed current req */
2302 if (READ_ONCE(req->iopoll_completed))
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002303 list_move_tail(&req->inflight_entry, &done);
Pavel Begunkov3aadc232020-07-06 17:59:29 +03002304
Jens Axboedef596e2019-01-09 08:59:42 -07002305 if (ret && spin)
2306 spin = false;
2307 ret = 0;
2308 }
2309
2310 if (!list_empty(&done))
2311 io_iopoll_complete(ctx, nr_events, &done);
2312
2313 return ret;
2314}
2315
2316/*
Brian Gianforcarod195a662019-12-13 03:09:50 -08002317 * Poll for a minimum of 'min' events. Note that if min == 0 we consider that a
Jens Axboedef596e2019-01-09 08:59:42 -07002318 * non-spinning poll check - we'll still enter the driver poll loop, but only
2319 * as a non-spinning completion check.
2320 */
2321static int io_iopoll_getevents(struct io_ring_ctx *ctx, unsigned int *nr_events,
2322 long min)
2323{
Pavel Begunkov540e32a2020-07-13 23:37:09 +03002324 while (!list_empty(&ctx->iopoll_list) && !need_resched()) {
Jens Axboedef596e2019-01-09 08:59:42 -07002325 int ret;
2326
2327 ret = io_do_iopoll(ctx, nr_events, min);
2328 if (ret < 0)
2329 return ret;
Pavel Begunkoveba0a4d2020-07-06 17:59:30 +03002330 if (*nr_events >= min)
Jens Axboedef596e2019-01-09 08:59:42 -07002331 return 0;
2332 }
2333
2334 return 1;
2335}
2336
2337/*
2338 * We can't just wait for polled events to come to us, we have to actively
2339 * find and complete them.
2340 */
Pavel Begunkovb2edc0a2020-07-07 16:36:22 +03002341static void io_iopoll_try_reap_events(struct io_ring_ctx *ctx)
Jens Axboedef596e2019-01-09 08:59:42 -07002342{
2343 if (!(ctx->flags & IORING_SETUP_IOPOLL))
2344 return;
2345
2346 mutex_lock(&ctx->uring_lock);
Pavel Begunkov540e32a2020-07-13 23:37:09 +03002347 while (!list_empty(&ctx->iopoll_list)) {
Jens Axboedef596e2019-01-09 08:59:42 -07002348 unsigned int nr_events = 0;
2349
Pavel Begunkovb2edc0a2020-07-07 16:36:22 +03002350 io_do_iopoll(ctx, &nr_events, 0);
Jens Axboe08f54392019-08-21 22:19:11 -06002351
Pavel Begunkovb2edc0a2020-07-07 16:36:22 +03002352 /* let it sleep and repeat later if can't complete a request */
2353 if (nr_events == 0)
2354 break;
Jens Axboe08f54392019-08-21 22:19:11 -06002355 /*
2356 * Ensure we allow local-to-the-cpu processing to take place,
2357 * in this case we need to ensure that we reap all events.
Pavel Begunkov3fcee5a2020-07-06 17:59:31 +03002358 * Also let task_work, etc. to progress by releasing the mutex
Jens Axboe08f54392019-08-21 22:19:11 -06002359 */
Pavel Begunkov3fcee5a2020-07-06 17:59:31 +03002360 if (need_resched()) {
2361 mutex_unlock(&ctx->uring_lock);
2362 cond_resched();
2363 mutex_lock(&ctx->uring_lock);
2364 }
Jens Axboedef596e2019-01-09 08:59:42 -07002365 }
2366 mutex_unlock(&ctx->uring_lock);
2367}
2368
Pavel Begunkov7668b922020-07-07 16:36:21 +03002369static int io_iopoll_check(struct io_ring_ctx *ctx, long min)
Jens Axboedef596e2019-01-09 08:59:42 -07002370{
Pavel Begunkov7668b922020-07-07 16:36:21 +03002371 unsigned int nr_events = 0;
Jens Axboe2b2ed972019-10-25 10:06:15 -06002372 int iters = 0, ret = 0;
Jens Axboedef596e2019-01-09 08:59:42 -07002373
Xiaoguang Wangc7849be2020-02-22 14:46:05 +08002374 /*
2375 * We disallow the app entering submit/complete with polling, but we
2376 * still need to lock the ring to prevent racing with polled issue
2377 * that got punted to a workqueue.
2378 */
2379 mutex_lock(&ctx->uring_lock);
Jens Axboedef596e2019-01-09 08:59:42 -07002380 do {
Jens Axboe500f9fb2019-08-19 12:15:59 -06002381 /*
Jens Axboea3a0e432019-08-20 11:03:11 -06002382 * Don't enter poll loop if we already have events pending.
2383 * If we do, we can potentially be spinning for commands that
2384 * already triggered a CQE (eg in error).
2385 */
Pavel Begunkov6c503152021-01-04 20:36:36 +00002386 if (test_bit(0, &ctx->cq_check_overflow))
2387 __io_cqring_overflow_flush(ctx, false, NULL, NULL);
2388 if (io_cqring_events(ctx))
Jens Axboea3a0e432019-08-20 11:03:11 -06002389 break;
2390
2391 /*
Jens Axboe500f9fb2019-08-19 12:15:59 -06002392 * If a submit got punted to a workqueue, we can have the
2393 * application entering polling for a command before it gets
2394 * issued. That app will hold the uring_lock for the duration
2395 * of the poll right here, so we need to take a breather every
2396 * now and then to ensure that the issue has a chance to add
2397 * the poll to the issued list. Otherwise we can spin here
2398 * forever, while the workqueue is stuck trying to acquire the
2399 * very same mutex.
2400 */
2401 if (!(++iters & 7)) {
2402 mutex_unlock(&ctx->uring_lock);
Jens Axboe4c6e2772020-07-01 11:29:10 -06002403 io_run_task_work();
Jens Axboe500f9fb2019-08-19 12:15:59 -06002404 mutex_lock(&ctx->uring_lock);
2405 }
2406
Pavel Begunkov7668b922020-07-07 16:36:21 +03002407 ret = io_iopoll_getevents(ctx, &nr_events, min);
Jens Axboedef596e2019-01-09 08:59:42 -07002408 if (ret <= 0)
2409 break;
2410 ret = 0;
Pavel Begunkov7668b922020-07-07 16:36:21 +03002411 } while (min && !nr_events && !need_resched());
Jens Axboedef596e2019-01-09 08:59:42 -07002412
Jens Axboe500f9fb2019-08-19 12:15:59 -06002413 mutex_unlock(&ctx->uring_lock);
Jens Axboedef596e2019-01-09 08:59:42 -07002414 return ret;
2415}
2416
Jens Axboe491381ce2019-10-17 09:20:46 -06002417static void kiocb_end_write(struct io_kiocb *req)
Jens Axboe2b188cc2019-01-07 10:46:33 -07002418{
Jens Axboe491381ce2019-10-17 09:20:46 -06002419 /*
2420 * Tell lockdep we inherited freeze protection from submission
2421 * thread.
2422 */
2423 if (req->flags & REQ_F_ISREG) {
2424 struct inode *inode = file_inode(req->file);
Jens Axboe2b188cc2019-01-07 10:46:33 -07002425
Jens Axboe491381ce2019-10-17 09:20:46 -06002426 __sb_writers_acquired(inode->i_sb, SB_FREEZE_WRITE);
Jens Axboe2b188cc2019-01-07 10:46:33 -07002427 }
Jens Axboe491381ce2019-10-17 09:20:46 -06002428 file_end_write(req->file);
Jens Axboe2b188cc2019-01-07 10:46:33 -07002429}
2430
Jens Axboeb63534c2020-06-04 11:28:00 -06002431#ifdef CONFIG_BLOCK
Pavel Begunkovdc2a6e92021-01-19 13:32:35 +00002432static bool io_resubmit_prep(struct io_kiocb *req)
Jens Axboeb63534c2020-06-04 11:28:00 -06002433{
2434 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
Colin Ian King4a245472021-02-10 20:00:07 +00002435 int rw, ret;
Jens Axboeb63534c2020-06-04 11:28:00 -06002436 struct iov_iter iter;
Jens Axboeb63534c2020-06-04 11:28:00 -06002437
Pavel Begunkovdc2a6e92021-01-19 13:32:35 +00002438 /* already prepared */
2439 if (req->async_data)
2440 return true;
Jens Axboeb63534c2020-06-04 11:28:00 -06002441
2442 switch (req->opcode) {
2443 case IORING_OP_READV:
2444 case IORING_OP_READ_FIXED:
2445 case IORING_OP_READ:
2446 rw = READ;
2447 break;
2448 case IORING_OP_WRITEV:
2449 case IORING_OP_WRITE_FIXED:
2450 case IORING_OP_WRITE:
2451 rw = WRITE;
2452 break;
2453 default:
2454 printk_once(KERN_WARNING "io_uring: bad opcode in resubmit %d\n",
2455 req->opcode);
Pavel Begunkovdc2a6e92021-01-19 13:32:35 +00002456 return false;
Jens Axboeb63534c2020-06-04 11:28:00 -06002457 }
2458
Pavel Begunkovdc2a6e92021-01-19 13:32:35 +00002459 ret = io_import_iovec(rw, req, &iovec, &iter, false);
2460 if (ret < 0)
2461 return false;
Pavel Begunkov6bf985d2021-02-04 13:52:01 +00002462 return !io_setup_async_rw(req, iovec, inline_vecs, &iter, false);
Jens Axboeb63534c2020-06-04 11:28:00 -06002463}
Jens Axboeb63534c2020-06-04 11:28:00 -06002464
Jens Axboe3e6a0d32021-03-01 13:56:00 -07002465static bool io_rw_should_reissue(struct io_kiocb *req)
Jens Axboeb63534c2020-06-04 11:28:00 -06002466{
Jens Axboe355afae2020-09-02 09:30:31 -06002467 umode_t mode = file_inode(req->file)->i_mode;
Jens Axboe3e6a0d32021-03-01 13:56:00 -07002468 struct io_ring_ctx *ctx = req->ctx;
Jens Axboeb63534c2020-06-04 11:28:00 -06002469
Jens Axboe355afae2020-09-02 09:30:31 -06002470 if (!S_ISBLK(mode) && !S_ISREG(mode))
2471 return false;
Jens Axboe3e6a0d32021-03-01 13:56:00 -07002472 if ((req->flags & REQ_F_NOWAIT) || (io_wq_current_is_worker() &&
2473 !(ctx->flags & IORING_SETUP_IOPOLL)))
Jens Axboeb63534c2020-06-04 11:28:00 -06002474 return false;
Jens Axboe7c977a52021-02-23 19:17:35 -07002475 /*
2476 * If ref is dying, we might be running poll reap from the exit work.
2477 * Don't attempt to reissue from that path, just let it fail with
2478 * -EAGAIN.
2479 */
Jens Axboe3e6a0d32021-03-01 13:56:00 -07002480 if (percpu_ref_is_dying(&ctx->refs))
2481 return false;
2482 return true;
2483}
Jens Axboee82ad482021-04-02 19:45:34 -06002484#else
2485static bool io_rw_should_reissue(struct io_kiocb *req)
2486{
2487 return false;
2488}
Jens Axboe3e6a0d32021-03-01 13:56:00 -07002489#endif
2490
2491static bool io_rw_reissue(struct io_kiocb *req)
2492{
2493#ifdef CONFIG_BLOCK
2494 if (!io_rw_should_reissue(req))
Jens Axboe7c977a52021-02-23 19:17:35 -07002495 return false;
Jens Axboeb63534c2020-06-04 11:28:00 -06002496
Pavel Begunkov55e6ac12021-01-08 20:57:22 +00002497 lockdep_assert_held(&req->ctx->uring_lock);
2498
Jens Axboe37d1e2e2021-02-17 21:03:43 -07002499 if (io_resubmit_prep(req)) {
Jens Axboefdee9462020-08-27 16:46:24 -06002500 refcount_inc(&req->refs);
2501 io_queue_async_work(req);
Jens Axboeb63534c2020-06-04 11:28:00 -06002502 return true;
Jens Axboefdee9462020-08-27 16:46:24 -06002503 }
Pavel Begunkovdc2a6e92021-01-19 13:32:35 +00002504 req_set_fail_links(req);
Jens Axboeb63534c2020-06-04 11:28:00 -06002505#endif
2506 return false;
2507}
2508
Jens Axboea1d7c392020-06-22 11:09:46 -06002509static void __io_complete_rw(struct io_kiocb *req, long res, long res2,
Pavel Begunkov889fca72021-02-10 00:03:09 +00002510 unsigned int issue_flags)
Jens Axboea1d7c392020-06-22 11:09:46 -06002511{
Pavel Begunkov2f8e45f2021-02-11 18:28:23 +00002512 int cflags = 0;
2513
Pavel Begunkovb65c1282021-03-22 01:45:59 +00002514 if (req->rw.kiocb.ki_flags & IOCB_WRITE)
2515 kiocb_end_write(req);
Jens Axboe230d50d2021-04-01 20:41:15 -06002516 if ((res == -EAGAIN || res == -EOPNOTSUPP) && io_rw_should_reissue(req)) {
2517 req->flags |= REQ_F_REISSUE;
Pavel Begunkov23faba32021-02-11 18:28:22 +00002518 return;
Jens Axboe230d50d2021-04-01 20:41:15 -06002519 }
Pavel Begunkov2f8e45f2021-02-11 18:28:23 +00002520 if (res != req->result)
2521 req_set_fail_links(req);
Pavel Begunkov2f8e45f2021-02-11 18:28:23 +00002522 if (req->flags & REQ_F_BUFFER_SELECTED)
2523 cflags = io_put_rw_kbuf(req);
2524 __io_req_complete(req, issue_flags, res, cflags);
Jens Axboeba816ad2019-09-28 11:36:45 -06002525}
2526
2527static void io_complete_rw(struct kiocb *kiocb, long res, long res2)
2528{
Jens Axboe9adbd452019-12-20 08:45:55 -07002529 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
Jens Axboeba816ad2019-09-28 11:36:45 -06002530
Pavel Begunkov889fca72021-02-10 00:03:09 +00002531 __io_complete_rw(req, res, res2, 0);
Jens Axboe2b188cc2019-01-07 10:46:33 -07002532}
2533
Jens Axboedef596e2019-01-09 08:59:42 -07002534static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2)
2535{
Jens Axboe9adbd452019-12-20 08:45:55 -07002536 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
Jens Axboedef596e2019-01-09 08:59:42 -07002537
Jens Axboe3e6a0d32021-03-01 13:56:00 -07002538#ifdef CONFIG_BLOCK
2539 /* Rewind iter, if we have one. iopoll path resubmits as usual */
2540 if (res == -EAGAIN && io_rw_should_reissue(req)) {
2541 struct io_async_rw *rw = req->async_data;
2542
2543 if (rw)
2544 iov_iter_revert(&rw->iter,
2545 req->result - iov_iter_count(&rw->iter));
2546 else if (!io_resubmit_prep(req))
2547 res = -EIO;
2548 }
2549#endif
2550
Jens Axboe491381ce2019-10-17 09:20:46 -06002551 if (kiocb->ki_flags & IOCB_WRITE)
2552 kiocb_end_write(req);
Jens Axboedef596e2019-01-09 08:59:42 -07002553
Xiaoguang Wang2d7d6792020-06-16 02:06:37 +08002554 if (res != -EAGAIN && res != req->result)
Jens Axboe4e88d6e2019-12-07 20:59:47 -07002555 req_set_fail_links(req);
Xiaoguang Wangbbde0172020-06-16 02:06:38 +08002556
2557 WRITE_ONCE(req->result, res);
2558 /* order with io_poll_complete() checking ->result */
Pavel Begunkovcd664b02020-06-25 12:37:10 +03002559 smp_wmb();
2560 WRITE_ONCE(req->iopoll_completed, 1);
Jens Axboedef596e2019-01-09 08:59:42 -07002561}
2562
2563/*
2564 * After the iocb has been issued, it's safe to be found on the poll list.
2565 * Adding the kiocb to the list AFTER submission ensures that we don't
2566 * find it from a io_iopoll_getevents() thread before the issuer is done
2567 * accessing the kiocb cookie.
2568 */
Xiaoguang Wang2e9dbe92020-11-13 00:44:08 +08002569static void io_iopoll_req_issued(struct io_kiocb *req, bool in_async)
Jens Axboedef596e2019-01-09 08:59:42 -07002570{
2571 struct io_ring_ctx *ctx = req->ctx;
2572
2573 /*
2574 * Track whether we have multiple files in our lists. This will impact
2575 * how we do polling eventually, not spinning if we're on potentially
2576 * different devices.
2577 */
Pavel Begunkov540e32a2020-07-13 23:37:09 +03002578 if (list_empty(&ctx->iopoll_list)) {
Jens Axboedef596e2019-01-09 08:59:42 -07002579 ctx->poll_multi_file = false;
2580 } else if (!ctx->poll_multi_file) {
2581 struct io_kiocb *list_req;
2582
Pavel Begunkov540e32a2020-07-13 23:37:09 +03002583 list_req = list_first_entry(&ctx->iopoll_list, struct io_kiocb,
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002584 inflight_entry);
Jens Axboe9adbd452019-12-20 08:45:55 -07002585 if (list_req->file != req->file)
Jens Axboedef596e2019-01-09 08:59:42 -07002586 ctx->poll_multi_file = true;
2587 }
2588
2589 /*
2590 * For fast devices, IO may have already completed. If it has, add
2591 * it to the front so we find it first.
2592 */
Xiaoguang Wang65a65432020-06-11 23:39:36 +08002593 if (READ_ONCE(req->iopoll_completed))
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002594 list_add(&req->inflight_entry, &ctx->iopoll_list);
Jens Axboedef596e2019-01-09 08:59:42 -07002595 else
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002596 list_add_tail(&req->inflight_entry, &ctx->iopoll_list);
Xiaoguang Wangbdcd3ea2020-02-25 22:12:08 +08002597
Xiaoguang Wang2e9dbe92020-11-13 00:44:08 +08002598 /*
2599 * If IORING_SETUP_SQPOLL is enabled, sqes are either handled in sq thread
2600 * task context or in io worker task context. If current task context is
2601 * sq thread, we don't need to check whether should wake up sq thread.
2602 */
2603 if (in_async && (ctx->flags & IORING_SETUP_SQPOLL) &&
Jens Axboe534ca6d2020-09-02 13:52:19 -06002604 wq_has_sleeper(&ctx->sq_data->wait))
2605 wake_up(&ctx->sq_data->wait);
Jens Axboedef596e2019-01-09 08:59:42 -07002606}
2607
Pavel Begunkov9f13c352020-05-17 14:13:41 +03002608static inline void io_state_file_put(struct io_submit_state *state)
2609{
Pavel Begunkov02b23a92021-01-19 13:32:41 +00002610 if (state->file_refs) {
2611 fput_many(state->file, state->file_refs);
2612 state->file_refs = 0;
2613 }
Jens Axboe9a56a232019-01-09 09:06:50 -07002614}
2615
2616/*
2617 * Get as many references to a file as we have IOs left in this submission,
2618 * assuming most submissions are for one file, or at least that each file
2619 * has more than one submission.
2620 */
Pavel Begunkov8da11c12020-02-24 11:32:44 +03002621static struct file *__io_file_get(struct io_submit_state *state, int fd)
Jens Axboe9a56a232019-01-09 09:06:50 -07002622{
2623 if (!state)
2624 return fget(fd);
2625
Pavel Begunkov6e1271e2020-11-20 15:50:50 +00002626 if (state->file_refs) {
Jens Axboe9a56a232019-01-09 09:06:50 -07002627 if (state->fd == fd) {
Pavel Begunkov6e1271e2020-11-20 15:50:50 +00002628 state->file_refs--;
Jens Axboe9a56a232019-01-09 09:06:50 -07002629 return state->file;
2630 }
Pavel Begunkov02b23a92021-01-19 13:32:41 +00002631 io_state_file_put(state);
Jens Axboe9a56a232019-01-09 09:06:50 -07002632 }
2633 state->file = fget_many(fd, state->ios_left);
Pavel Begunkov6e1271e2020-11-20 15:50:50 +00002634 if (unlikely(!state->file))
Jens Axboe9a56a232019-01-09 09:06:50 -07002635 return NULL;
2636
2637 state->fd = fd;
Pavel Begunkov6e1271e2020-11-20 15:50:50 +00002638 state->file_refs = state->ios_left - 1;
Jens Axboe9a56a232019-01-09 09:06:50 -07002639 return state->file;
2640}
2641
Jens Axboe4503b762020-06-01 10:00:27 -06002642static bool io_bdev_nowait(struct block_device *bdev)
2643{
Jeffle Xu9ba0d0c2020-10-19 16:59:42 +08002644 return !bdev || blk_queue_nowait(bdev_get_queue(bdev));
Jens Axboe4503b762020-06-01 10:00:27 -06002645}
2646
Jens Axboe2b188cc2019-01-07 10:46:33 -07002647/*
2648 * If we tracked the file through the SCM inflight mechanism, we could support
2649 * any file. For now, just ensure that anything potentially problematic is done
2650 * inline.
2651 */
Jens Axboeaf197f52020-04-28 13:15:06 -06002652static bool io_file_supports_async(struct file *file, int rw)
Jens Axboe2b188cc2019-01-07 10:46:33 -07002653{
2654 umode_t mode = file_inode(file)->i_mode;
2655
Jens Axboe4503b762020-06-01 10:00:27 -06002656 if (S_ISBLK(mode)) {
Christoph Hellwig4e7b5672020-11-23 13:38:40 +01002657 if (IS_ENABLED(CONFIG_BLOCK) &&
2658 io_bdev_nowait(I_BDEV(file->f_mapping->host)))
Jens Axboe4503b762020-06-01 10:00:27 -06002659 return true;
2660 return false;
2661 }
2662 if (S_ISCHR(mode) || S_ISSOCK(mode))
Jens Axboe2b188cc2019-01-07 10:46:33 -07002663 return true;
Jens Axboe4503b762020-06-01 10:00:27 -06002664 if (S_ISREG(mode)) {
Christoph Hellwig4e7b5672020-11-23 13:38:40 +01002665 if (IS_ENABLED(CONFIG_BLOCK) &&
2666 io_bdev_nowait(file->f_inode->i_sb->s_bdev) &&
Jens Axboe4503b762020-06-01 10:00:27 -06002667 file->f_op != &io_uring_fops)
2668 return true;
2669 return false;
2670 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07002671
Jens Axboec5b85622020-06-09 19:23:05 -06002672 /* any ->read/write should understand O_NONBLOCK */
2673 if (file->f_flags & O_NONBLOCK)
2674 return true;
2675
Jens Axboeaf197f52020-04-28 13:15:06 -06002676 if (!(file->f_mode & FMODE_NOWAIT))
2677 return false;
2678
2679 if (rw == READ)
2680 return file->f_op->read_iter != NULL;
2681
2682 return file->f_op->write_iter != NULL;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002683}
2684
Pavel Begunkova88fc402020-09-30 22:57:53 +03002685static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboe2b188cc2019-01-07 10:46:33 -07002686{
Jens Axboedef596e2019-01-09 08:59:42 -07002687 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe9adbd452019-12-20 08:45:55 -07002688 struct kiocb *kiocb = &req->rw.kiocb;
Pavel Begunkov75c668c2021-02-04 13:52:05 +00002689 struct file *file = req->file;
Jens Axboe09bb8392019-03-13 12:39:28 -06002690 unsigned ioprio;
2691 int ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002692
Pavel Begunkov75c668c2021-02-04 13:52:05 +00002693 if (S_ISREG(file_inode(file)->i_mode))
Jens Axboe491381ce2019-10-17 09:20:46 -06002694 req->flags |= REQ_F_ISREG;
2695
Jens Axboe2b188cc2019-01-07 10:46:33 -07002696 kiocb->ki_pos = READ_ONCE(sqe->off);
Pavel Begunkov75c668c2021-02-04 13:52:05 +00002697 if (kiocb->ki_pos == -1 && !(file->f_mode & FMODE_STREAM)) {
Jens Axboeba042912019-12-25 16:33:42 -07002698 req->flags |= REQ_F_CUR_POS;
Pavel Begunkov75c668c2021-02-04 13:52:05 +00002699 kiocb->ki_pos = file->f_pos;
Jens Axboeba042912019-12-25 16:33:42 -07002700 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07002701 kiocb->ki_hint = ki_hint_validate(file_write_hint(kiocb->ki_filp));
Pavel Begunkov3e577dc2020-02-01 03:58:42 +03002702 kiocb->ki_flags = iocb_flags(kiocb->ki_filp);
2703 ret = kiocb_set_rw_flags(kiocb, READ_ONCE(sqe->rw_flags));
2704 if (unlikely(ret))
2705 return ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002706
Pavel Begunkov75c668c2021-02-04 13:52:05 +00002707 /* don't allow async punt for O_NONBLOCK or RWF_NOWAIT */
2708 if ((kiocb->ki_flags & IOCB_NOWAIT) || (file->f_flags & O_NONBLOCK))
2709 req->flags |= REQ_F_NOWAIT;
2710
Jens Axboe2b188cc2019-01-07 10:46:33 -07002711 ioprio = READ_ONCE(sqe->ioprio);
2712 if (ioprio) {
2713 ret = ioprio_check_cap(ioprio);
2714 if (ret)
Jens Axboe09bb8392019-03-13 12:39:28 -06002715 return ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002716
2717 kiocb->ki_ioprio = ioprio;
2718 } else
2719 kiocb->ki_ioprio = get_current_ioprio();
2720
Jens Axboedef596e2019-01-09 08:59:42 -07002721 if (ctx->flags & IORING_SETUP_IOPOLL) {
Jens Axboedef596e2019-01-09 08:59:42 -07002722 if (!(kiocb->ki_flags & IOCB_DIRECT) ||
2723 !kiocb->ki_filp->f_op->iopoll)
Jens Axboe09bb8392019-03-13 12:39:28 -06002724 return -EOPNOTSUPP;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002725
Jens Axboedef596e2019-01-09 08:59:42 -07002726 kiocb->ki_flags |= IOCB_HIPRI;
2727 kiocb->ki_complete = io_complete_rw_iopoll;
Xiaoguang Wang65a65432020-06-11 23:39:36 +08002728 req->iopoll_completed = 0;
Jens Axboedef596e2019-01-09 08:59:42 -07002729 } else {
Jens Axboe09bb8392019-03-13 12:39:28 -06002730 if (kiocb->ki_flags & IOCB_HIPRI)
2731 return -EINVAL;
Jens Axboedef596e2019-01-09 08:59:42 -07002732 kiocb->ki_complete = io_complete_rw;
2733 }
Jens Axboe9adbd452019-12-20 08:45:55 -07002734
Jens Axboe3529d8c2019-12-19 18:24:38 -07002735 req->rw.addr = READ_ONCE(sqe->addr);
2736 req->rw.len = READ_ONCE(sqe->len);
Bijan Mottahedeh4f4eeba2020-05-19 14:52:49 -07002737 req->buf_index = READ_ONCE(sqe->buf_index);
Jens Axboe2b188cc2019-01-07 10:46:33 -07002738 return 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002739}
2740
2741static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
2742{
2743 switch (ret) {
2744 case -EIOCBQUEUED:
2745 break;
2746 case -ERESTARTSYS:
2747 case -ERESTARTNOINTR:
2748 case -ERESTARTNOHAND:
2749 case -ERESTART_RESTARTBLOCK:
2750 /*
2751 * We can't just restart the syscall, since previously
2752 * submitted sqes may already be in progress. Just fail this
2753 * IO with EINTR.
2754 */
2755 ret = -EINTR;
Gustavo A. R. Silvadf561f662020-08-23 17:36:59 -05002756 fallthrough;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002757 default:
2758 kiocb->ki_complete(kiocb, ret, 0);
2759 }
2760}
2761
Jens Axboea1d7c392020-06-22 11:09:46 -06002762static void kiocb_done(struct kiocb *kiocb, ssize_t ret,
Pavel Begunkov889fca72021-02-10 00:03:09 +00002763 unsigned int issue_flags)
Jens Axboeba816ad2019-09-28 11:36:45 -06002764{
Jens Axboeba042912019-12-25 16:33:42 -07002765 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
Jens Axboee8c2bc12020-08-15 18:44:09 -07002766 struct io_async_rw *io = req->async_data;
Pavel Begunkov97284632021-04-08 19:28:03 +01002767 bool check_reissue = kiocb->ki_complete == io_complete_rw;
Jens Axboeba042912019-12-25 16:33:42 -07002768
Jens Axboe227c0c92020-08-13 11:51:40 -06002769 /* add previously done IO, if any */
Jens Axboee8c2bc12020-08-15 18:44:09 -07002770 if (io && io->bytes_done > 0) {
Jens Axboe227c0c92020-08-13 11:51:40 -06002771 if (ret < 0)
Jens Axboee8c2bc12020-08-15 18:44:09 -07002772 ret = io->bytes_done;
Jens Axboe227c0c92020-08-13 11:51:40 -06002773 else
Jens Axboee8c2bc12020-08-15 18:44:09 -07002774 ret += io->bytes_done;
Jens Axboe227c0c92020-08-13 11:51:40 -06002775 }
2776
Jens Axboeba042912019-12-25 16:33:42 -07002777 if (req->flags & REQ_F_CUR_POS)
2778 req->file->f_pos = kiocb->ki_pos;
Pavel Begunkovbcaec082020-02-24 11:30:18 +03002779 if (ret >= 0 && kiocb->ki_complete == io_complete_rw)
Pavel Begunkov889fca72021-02-10 00:03:09 +00002780 __io_complete_rw(req, ret, 0, issue_flags);
Jens Axboeba816ad2019-09-28 11:36:45 -06002781 else
2782 io_rw_done(kiocb, ret);
Pavel Begunkov97284632021-04-08 19:28:03 +01002783
2784 if (check_reissue && req->flags & REQ_F_REISSUE) {
2785 req->flags &= ~REQ_F_REISSUE;
2786 if (!io_rw_reissue(req)) {
2787 int cflags = 0;
2788
2789 req_set_fail_links(req);
2790 if (req->flags & REQ_F_BUFFER_SELECTED)
2791 cflags = io_put_rw_kbuf(req);
2792 __io_req_complete(req, issue_flags, ret, cflags);
2793 }
2794 }
Jens Axboeba816ad2019-09-28 11:36:45 -06002795}
2796
Pavel Begunkov847595d2021-02-04 13:52:06 +00002797static int io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter)
Jens Axboeedafcce2019-01-09 09:16:05 -07002798{
Jens Axboe9adbd452019-12-20 08:45:55 -07002799 struct io_ring_ctx *ctx = req->ctx;
2800 size_t len = req->rw.len;
Jens Axboeedafcce2019-01-09 09:16:05 -07002801 struct io_mapped_ubuf *imu;
Pavel Begunkov4be1c612020-09-06 00:45:48 +03002802 u16 index, buf_index = req->buf_index;
Jens Axboeedafcce2019-01-09 09:16:05 -07002803 size_t offset;
2804 u64 buf_addr;
2805
Jens Axboeedafcce2019-01-09 09:16:05 -07002806 if (unlikely(buf_index >= ctx->nr_user_bufs))
2807 return -EFAULT;
Jens Axboeedafcce2019-01-09 09:16:05 -07002808 index = array_index_nospec(buf_index, ctx->nr_user_bufs);
2809 imu = &ctx->user_bufs[index];
Jens Axboe9adbd452019-12-20 08:45:55 -07002810 buf_addr = req->rw.addr;
Jens Axboeedafcce2019-01-09 09:16:05 -07002811
2812 /* overflow */
2813 if (buf_addr + len < buf_addr)
2814 return -EFAULT;
2815 /* not inside the mapped region */
2816 if (buf_addr < imu->ubuf || buf_addr + len > imu->ubuf + imu->len)
2817 return -EFAULT;
2818
2819 /*
2820 * May not be a start of buffer, set size appropriately
2821 * and advance us to the beginning.
2822 */
2823 offset = buf_addr - imu->ubuf;
2824 iov_iter_bvec(iter, rw, imu->bvec, imu->nr_bvecs, offset + len);
Jens Axboebd11b3a2019-07-20 08:37:31 -06002825
2826 if (offset) {
2827 /*
2828 * Don't use iov_iter_advance() here, as it's really slow for
2829 * using the latter parts of a big fixed buffer - it iterates
2830 * over each segment manually. We can cheat a bit here, because
2831 * we know that:
2832 *
2833 * 1) it's a BVEC iter, we set it up
2834 * 2) all bvecs are PAGE_SIZE in size, except potentially the
2835 * first and last bvec
2836 *
2837 * So just find our index, and adjust the iterator afterwards.
2838 * If the offset is within the first bvec (or the whole first
2839 * bvec, just use iov_iter_advance(). This makes it easier
2840 * since we can just skip the first segment, which may not
2841 * be PAGE_SIZE aligned.
2842 */
2843 const struct bio_vec *bvec = imu->bvec;
2844
2845 if (offset <= bvec->bv_len) {
2846 iov_iter_advance(iter, offset);
2847 } else {
2848 unsigned long seg_skip;
2849
2850 /* skip first vec */
2851 offset -= bvec->bv_len;
2852 seg_skip = 1 + (offset >> PAGE_SHIFT);
2853
2854 iter->bvec = bvec + seg_skip;
2855 iter->nr_segs -= seg_skip;
Aleix Roca Nonell99c79f62019-08-15 14:03:22 +02002856 iter->count -= bvec->bv_len + offset;
Jens Axboebd11b3a2019-07-20 08:37:31 -06002857 iter->iov_offset = offset & ~PAGE_MASK;
Jens Axboebd11b3a2019-07-20 08:37:31 -06002858 }
2859 }
2860
Pavel Begunkov847595d2021-02-04 13:52:06 +00002861 return 0;
Jens Axboeedafcce2019-01-09 09:16:05 -07002862}
2863
Jens Axboebcda7ba2020-02-23 16:42:51 -07002864static void io_ring_submit_unlock(struct io_ring_ctx *ctx, bool needs_lock)
2865{
2866 if (needs_lock)
2867 mutex_unlock(&ctx->uring_lock);
2868}
2869
2870static void io_ring_submit_lock(struct io_ring_ctx *ctx, bool needs_lock)
2871{
2872 /*
2873 * "Normal" inline submissions always hold the uring_lock, since we
2874 * grab it from the system call. Same is true for the SQPOLL offload.
2875 * The only exception is when we've detached the request and issue it
2876 * from an async worker thread, grab the lock for that case.
2877 */
2878 if (needs_lock)
2879 mutex_lock(&ctx->uring_lock);
2880}
2881
2882static struct io_buffer *io_buffer_select(struct io_kiocb *req, size_t *len,
2883 int bgid, struct io_buffer *kbuf,
2884 bool needs_lock)
2885{
2886 struct io_buffer *head;
2887
2888 if (req->flags & REQ_F_BUFFER_SELECTED)
2889 return kbuf;
2890
2891 io_ring_submit_lock(req->ctx, needs_lock);
2892
2893 lockdep_assert_held(&req->ctx->uring_lock);
2894
Jens Axboe9e15c3a2021-03-13 12:29:43 -07002895 head = xa_load(&req->ctx->io_buffers, bgid);
Jens Axboebcda7ba2020-02-23 16:42:51 -07002896 if (head) {
2897 if (!list_empty(&head->list)) {
2898 kbuf = list_last_entry(&head->list, struct io_buffer,
2899 list);
2900 list_del(&kbuf->list);
2901 } else {
2902 kbuf = head;
Jens Axboe9e15c3a2021-03-13 12:29:43 -07002903 xa_erase(&req->ctx->io_buffers, bgid);
Jens Axboebcda7ba2020-02-23 16:42:51 -07002904 }
2905 if (*len > kbuf->len)
2906 *len = kbuf->len;
2907 } else {
2908 kbuf = ERR_PTR(-ENOBUFS);
2909 }
2910
2911 io_ring_submit_unlock(req->ctx, needs_lock);
2912
2913 return kbuf;
2914}
2915
Jens Axboe4d954c22020-02-27 07:31:19 -07002916static void __user *io_rw_buffer_select(struct io_kiocb *req, size_t *len,
2917 bool needs_lock)
2918{
2919 struct io_buffer *kbuf;
Bijan Mottahedeh4f4eeba2020-05-19 14:52:49 -07002920 u16 bgid;
Jens Axboe4d954c22020-02-27 07:31:19 -07002921
2922 kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
Bijan Mottahedeh4f4eeba2020-05-19 14:52:49 -07002923 bgid = req->buf_index;
Jens Axboe4d954c22020-02-27 07:31:19 -07002924 kbuf = io_buffer_select(req, len, bgid, kbuf, needs_lock);
2925 if (IS_ERR(kbuf))
2926 return kbuf;
2927 req->rw.addr = (u64) (unsigned long) kbuf;
2928 req->flags |= REQ_F_BUFFER_SELECTED;
2929 return u64_to_user_ptr(kbuf->addr);
2930}
2931
2932#ifdef CONFIG_COMPAT
2933static ssize_t io_compat_import(struct io_kiocb *req, struct iovec *iov,
2934 bool needs_lock)
2935{
2936 struct compat_iovec __user *uiov;
2937 compat_ssize_t clen;
2938 void __user *buf;
2939 ssize_t len;
2940
2941 uiov = u64_to_user_ptr(req->rw.addr);
2942 if (!access_ok(uiov, sizeof(*uiov)))
2943 return -EFAULT;
2944 if (__get_user(clen, &uiov->iov_len))
2945 return -EFAULT;
2946 if (clen < 0)
2947 return -EINVAL;
2948
2949 len = clen;
2950 buf = io_rw_buffer_select(req, &len, needs_lock);
2951 if (IS_ERR(buf))
2952 return PTR_ERR(buf);
2953 iov[0].iov_base = buf;
2954 iov[0].iov_len = (compat_size_t) len;
2955 return 0;
2956}
2957#endif
2958
2959static ssize_t __io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
2960 bool needs_lock)
2961{
2962 struct iovec __user *uiov = u64_to_user_ptr(req->rw.addr);
2963 void __user *buf;
2964 ssize_t len;
2965
2966 if (copy_from_user(iov, uiov, sizeof(*uiov)))
2967 return -EFAULT;
2968
2969 len = iov[0].iov_len;
2970 if (len < 0)
2971 return -EINVAL;
2972 buf = io_rw_buffer_select(req, &len, needs_lock);
2973 if (IS_ERR(buf))
2974 return PTR_ERR(buf);
2975 iov[0].iov_base = buf;
2976 iov[0].iov_len = len;
2977 return 0;
2978}
2979
2980static ssize_t io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
2981 bool needs_lock)
2982{
Jens Axboedddb3e22020-06-04 11:27:01 -06002983 if (req->flags & REQ_F_BUFFER_SELECTED) {
2984 struct io_buffer *kbuf;
2985
2986 kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
2987 iov[0].iov_base = u64_to_user_ptr(kbuf->addr);
2988 iov[0].iov_len = kbuf->len;
Jens Axboe4d954c22020-02-27 07:31:19 -07002989 return 0;
Jens Axboedddb3e22020-06-04 11:27:01 -06002990 }
Pavel Begunkovdd201662020-12-19 03:15:43 +00002991 if (req->rw.len != 1)
Jens Axboe4d954c22020-02-27 07:31:19 -07002992 return -EINVAL;
2993
2994#ifdef CONFIG_COMPAT
2995 if (req->ctx->compat)
2996 return io_compat_import(req, iov, needs_lock);
2997#endif
2998
2999 return __io_iov_buffer_select(req, iov, needs_lock);
3000}
3001
Pavel Begunkov847595d2021-02-04 13:52:06 +00003002static int io_import_iovec(int rw, struct io_kiocb *req, struct iovec **iovec,
3003 struct iov_iter *iter, bool needs_lock)
Jens Axboe2b188cc2019-01-07 10:46:33 -07003004{
Jens Axboe9adbd452019-12-20 08:45:55 -07003005 void __user *buf = u64_to_user_ptr(req->rw.addr);
3006 size_t sqe_len = req->rw.len;
Pavel Begunkov847595d2021-02-04 13:52:06 +00003007 u8 opcode = req->opcode;
Jens Axboe4d954c22020-02-27 07:31:19 -07003008 ssize_t ret;
Jens Axboeedafcce2019-01-09 09:16:05 -07003009
Pavel Begunkov7d009162019-11-25 23:14:40 +03003010 if (opcode == IORING_OP_READ_FIXED || opcode == IORING_OP_WRITE_FIXED) {
Jens Axboeedafcce2019-01-09 09:16:05 -07003011 *iovec = NULL;
Jens Axboe9adbd452019-12-20 08:45:55 -07003012 return io_import_fixed(req, rw, iter);
Jens Axboeedafcce2019-01-09 09:16:05 -07003013 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07003014
Jens Axboebcda7ba2020-02-23 16:42:51 -07003015 /* buffer index only valid with fixed read/write, or buffer select */
Bijan Mottahedeh4f4eeba2020-05-19 14:52:49 -07003016 if (req->buf_index && !(req->flags & REQ_F_BUFFER_SELECT))
Jens Axboe9adbd452019-12-20 08:45:55 -07003017 return -EINVAL;
3018
Jens Axboe3a6820f2019-12-22 15:19:35 -07003019 if (opcode == IORING_OP_READ || opcode == IORING_OP_WRITE) {
Jens Axboebcda7ba2020-02-23 16:42:51 -07003020 if (req->flags & REQ_F_BUFFER_SELECT) {
Jens Axboe4d954c22020-02-27 07:31:19 -07003021 buf = io_rw_buffer_select(req, &sqe_len, needs_lock);
Pavel Begunkov867a23e2020-08-20 11:34:39 +03003022 if (IS_ERR(buf))
Jens Axboe4d954c22020-02-27 07:31:19 -07003023 return PTR_ERR(buf);
Jens Axboe3f9d6442020-03-11 12:27:04 -06003024 req->rw.len = sqe_len;
Jens Axboebcda7ba2020-02-23 16:42:51 -07003025 }
3026
Jens Axboe3a6820f2019-12-22 15:19:35 -07003027 ret = import_single_range(rw, buf, sqe_len, *iovec, iter);
3028 *iovec = NULL;
David Laight10fc72e2020-11-07 13:16:25 +00003029 return ret;
Jens Axboe3a6820f2019-12-22 15:19:35 -07003030 }
3031
Jens Axboe4d954c22020-02-27 07:31:19 -07003032 if (req->flags & REQ_F_BUFFER_SELECT) {
3033 ret = io_iov_buffer_select(req, *iovec, needs_lock);
Pavel Begunkov847595d2021-02-04 13:52:06 +00003034 if (!ret)
3035 iov_iter_init(iter, rw, *iovec, 1, (*iovec)->iov_len);
Jens Axboe4d954c22020-02-27 07:31:19 -07003036 *iovec = NULL;
3037 return ret;
3038 }
3039
Christoph Hellwig89cd35c2020-09-25 06:51:41 +02003040 return __import_iovec(rw, buf, sqe_len, UIO_FASTIOV, iovec, iter,
3041 req->ctx->compat);
Jens Axboe2b188cc2019-01-07 10:46:33 -07003042}
3043
Jens Axboe0fef9482020-08-26 10:36:20 -06003044static inline loff_t *io_kiocb_ppos(struct kiocb *kiocb)
3045{
Pavel Begunkov5b09e372020-09-30 22:57:15 +03003046 return (kiocb->ki_filp->f_mode & FMODE_STREAM) ? NULL : &kiocb->ki_pos;
Jens Axboe0fef9482020-08-26 10:36:20 -06003047}
3048
Jens Axboe32960612019-09-23 11:05:34 -06003049/*
3050 * For files that don't have ->read_iter() and ->write_iter(), handle them
3051 * by looping over ->read() or ->write() manually.
3052 */
Jens Axboe4017eb92020-10-22 14:14:12 -06003053static ssize_t loop_rw_iter(int rw, struct io_kiocb *req, struct iov_iter *iter)
Jens Axboe32960612019-09-23 11:05:34 -06003054{
Jens Axboe4017eb92020-10-22 14:14:12 -06003055 struct kiocb *kiocb = &req->rw.kiocb;
3056 struct file *file = req->file;
Jens Axboe32960612019-09-23 11:05:34 -06003057 ssize_t ret = 0;
3058
3059 /*
3060 * Don't support polled IO through this interface, and we can't
3061 * support non-blocking either. For the latter, this just causes
3062 * the kiocb to be handled from an async context.
3063 */
3064 if (kiocb->ki_flags & IOCB_HIPRI)
3065 return -EOPNOTSUPP;
3066 if (kiocb->ki_flags & IOCB_NOWAIT)
3067 return -EAGAIN;
3068
3069 while (iov_iter_count(iter)) {
Pavel Begunkov311ae9e2019-11-24 11:58:24 +03003070 struct iovec iovec;
Jens Axboe32960612019-09-23 11:05:34 -06003071 ssize_t nr;
3072
Pavel Begunkov311ae9e2019-11-24 11:58:24 +03003073 if (!iov_iter_is_bvec(iter)) {
3074 iovec = iov_iter_iovec(iter);
3075 } else {
Jens Axboe4017eb92020-10-22 14:14:12 -06003076 iovec.iov_base = u64_to_user_ptr(req->rw.addr);
3077 iovec.iov_len = req->rw.len;
Pavel Begunkov311ae9e2019-11-24 11:58:24 +03003078 }
3079
Jens Axboe32960612019-09-23 11:05:34 -06003080 if (rw == READ) {
3081 nr = file->f_op->read(file, iovec.iov_base,
Jens Axboe0fef9482020-08-26 10:36:20 -06003082 iovec.iov_len, io_kiocb_ppos(kiocb));
Jens Axboe32960612019-09-23 11:05:34 -06003083 } else {
3084 nr = file->f_op->write(file, iovec.iov_base,
Jens Axboe0fef9482020-08-26 10:36:20 -06003085 iovec.iov_len, io_kiocb_ppos(kiocb));
Jens Axboe32960612019-09-23 11:05:34 -06003086 }
3087
3088 if (nr < 0) {
3089 if (!ret)
3090 ret = nr;
3091 break;
3092 }
3093 ret += nr;
3094 if (nr != iovec.iov_len)
3095 break;
Jens Axboe4017eb92020-10-22 14:14:12 -06003096 req->rw.len -= nr;
3097 req->rw.addr += nr;
Jens Axboe32960612019-09-23 11:05:34 -06003098 iov_iter_advance(iter, nr);
3099 }
3100
3101 return ret;
3102}
3103
Jens Axboeff6165b2020-08-13 09:47:43 -06003104static void io_req_map_rw(struct io_kiocb *req, const struct iovec *iovec,
3105 const struct iovec *fast_iov, struct iov_iter *iter)
Jens Axboef67676d2019-12-02 11:03:47 -07003106{
Jens Axboee8c2bc12020-08-15 18:44:09 -07003107 struct io_async_rw *rw = req->async_data;
Pavel Begunkovb64e3442020-07-13 22:59:18 +03003108
Jens Axboeff6165b2020-08-13 09:47:43 -06003109 memcpy(&rw->iter, iter, sizeof(*iter));
Pavel Begunkovafb87652020-09-06 00:45:46 +03003110 rw->free_iovec = iovec;
Jens Axboe227c0c92020-08-13 11:51:40 -06003111 rw->bytes_done = 0;
Jens Axboeff6165b2020-08-13 09:47:43 -06003112 /* can only be fixed buffers, no need to do anything */
Pavel Begunkov9c3a2052020-11-23 23:20:27 +00003113 if (iov_iter_is_bvec(iter))
Jens Axboeff6165b2020-08-13 09:47:43 -06003114 return;
Pavel Begunkovb64e3442020-07-13 22:59:18 +03003115 if (!iovec) {
Jens Axboeff6165b2020-08-13 09:47:43 -06003116 unsigned iov_off = 0;
3117
3118 rw->iter.iov = rw->fast_iov;
3119 if (iter->iov != fast_iov) {
3120 iov_off = iter->iov - fast_iov;
3121 rw->iter.iov += iov_off;
3122 }
3123 if (rw->fast_iov != fast_iov)
3124 memcpy(rw->fast_iov + iov_off, fast_iov + iov_off,
Xiaoguang Wang45097da2020-04-08 22:29:58 +08003125 sizeof(struct iovec) * iter->nr_segs);
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03003126 } else {
3127 req->flags |= REQ_F_NEED_CLEANUP;
Jens Axboef67676d2019-12-02 11:03:47 -07003128 }
3129}
3130
Jens Axboee8c2bc12020-08-15 18:44:09 -07003131static inline int __io_alloc_async_data(struct io_kiocb *req)
Xiaoguang Wang3d9932a2020-03-27 15:36:52 +08003132{
Jens Axboee8c2bc12020-08-15 18:44:09 -07003133 WARN_ON_ONCE(!io_op_defs[req->opcode].async_size);
3134 req->async_data = kmalloc(io_op_defs[req->opcode].async_size, GFP_KERNEL);
3135 return req->async_data == NULL;
Xiaoguang Wang3d9932a2020-03-27 15:36:52 +08003136}
3137
Jens Axboee8c2bc12020-08-15 18:44:09 -07003138static int io_alloc_async_data(struct io_kiocb *req)
Jens Axboef67676d2019-12-02 11:03:47 -07003139{
Jens Axboee8c2bc12020-08-15 18:44:09 -07003140 if (!io_op_defs[req->opcode].needs_async_data)
Jens Axboed3656342019-12-18 09:50:26 -07003141 return 0;
Xiaoguang Wang3d9932a2020-03-27 15:36:52 +08003142
Jens Axboee8c2bc12020-08-15 18:44:09 -07003143 return __io_alloc_async_data(req);
Jens Axboeb7bb4f72019-12-15 22:13:43 -07003144}
3145
Jens Axboeff6165b2020-08-13 09:47:43 -06003146static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec,
3147 const struct iovec *fast_iov,
Jens Axboe227c0c92020-08-13 11:51:40 -06003148 struct iov_iter *iter, bool force)
Jens Axboeb7bb4f72019-12-15 22:13:43 -07003149{
Jens Axboee8c2bc12020-08-15 18:44:09 -07003150 if (!force && !io_op_defs[req->opcode].needs_async_data)
Jens Axboe74566df2020-01-13 19:23:24 -07003151 return 0;
Jens Axboee8c2bc12020-08-15 18:44:09 -07003152 if (!req->async_data) {
Pavel Begunkov6bf985d2021-02-04 13:52:01 +00003153 if (__io_alloc_async_data(req)) {
3154 kfree(iovec);
Jens Axboe5d204bc2020-01-31 12:06:52 -07003155 return -ENOMEM;
Pavel Begunkov6bf985d2021-02-04 13:52:01 +00003156 }
Jens Axboeb7bb4f72019-12-15 22:13:43 -07003157
Jens Axboeff6165b2020-08-13 09:47:43 -06003158 io_req_map_rw(req, iovec, fast_iov, iter);
Jens Axboe5d204bc2020-01-31 12:06:52 -07003159 }
Jens Axboeb7bb4f72019-12-15 22:13:43 -07003160 return 0;
Jens Axboef67676d2019-12-02 11:03:47 -07003161}
3162
Pavel Begunkov73debe62020-09-30 22:57:54 +03003163static inline int io_rw_prep_async(struct io_kiocb *req, int rw)
Pavel Begunkovc3e330a2020-07-13 22:59:19 +03003164{
Jens Axboee8c2bc12020-08-15 18:44:09 -07003165 struct io_async_rw *iorw = req->async_data;
Pavel Begunkovf4bff102020-09-06 00:45:45 +03003166 struct iovec *iov = iorw->fast_iov;
Pavel Begunkov847595d2021-02-04 13:52:06 +00003167 int ret;
Pavel Begunkovc3e330a2020-07-13 22:59:19 +03003168
Pavel Begunkov2846c482020-11-07 13:16:27 +00003169 ret = io_import_iovec(rw, req, &iov, &iorw->iter, false);
Pavel Begunkovc3e330a2020-07-13 22:59:19 +03003170 if (unlikely(ret < 0))
3171 return ret;
3172
Pavel Begunkovab0b1962020-09-06 00:45:47 +03003173 iorw->bytes_done = 0;
3174 iorw->free_iovec = iov;
3175 if (iov)
3176 req->flags |= REQ_F_NEED_CLEANUP;
Pavel Begunkovc3e330a2020-07-13 22:59:19 +03003177 return 0;
3178}
3179
Pavel Begunkov73debe62020-09-30 22:57:54 +03003180static int io_read_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboef67676d2019-12-02 11:03:47 -07003181{
Jens Axboe3529d8c2019-12-19 18:24:38 -07003182 if (unlikely(!(req->file->f_mode & FMODE_READ)))
3183 return -EBADF;
Pavel Begunkov93642ef2021-02-18 18:29:44 +00003184 return io_prep_rw(req, sqe);
Jens Axboef67676d2019-12-02 11:03:47 -07003185}
3186
Jens Axboec1dd91d2020-08-03 16:43:59 -06003187/*
3188 * This is our waitqueue callback handler, registered through lock_page_async()
3189 * when we initially tried to do the IO with the iocb armed our waitqueue.
3190 * This gets called when the page is unlocked, and we generally expect that to
3191 * happen when the page IO is completed and the page is now uptodate. This will
3192 * queue a task_work based retry of the operation, attempting to copy the data
3193 * again. If the latter fails because the page was NOT uptodate, then we will
3194 * do a thread based blocking retry of the operation. That's the unexpected
3195 * slow path.
3196 */
Jens Axboebcf5a062020-05-22 09:24:42 -06003197static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode,
3198 int sync, void *arg)
3199{
3200 struct wait_page_queue *wpq;
3201 struct io_kiocb *req = wait->private;
Jens Axboebcf5a062020-05-22 09:24:42 -06003202 struct wait_page_key *key = arg;
Jens Axboebcf5a062020-05-22 09:24:42 -06003203
3204 wpq = container_of(wait, struct wait_page_queue, wait);
3205
Linus Torvaldscdc8fcb2020-08-03 13:01:22 -07003206 if (!wake_page_match(wpq, key))
3207 return 0;
3208
Hao Xuc8d317a2020-09-29 20:00:45 +08003209 req->rw.kiocb.ki_flags &= ~IOCB_WAITQ;
Jens Axboebcf5a062020-05-22 09:24:42 -06003210 list_del_init(&wait->entry);
3211
Jens Axboebcf5a062020-05-22 09:24:42 -06003212 /* submit ref gets dropped, acquire a new one */
3213 refcount_inc(&req->refs);
Pavel Begunkov921b9052021-02-12 03:23:53 +00003214 io_req_task_queue(req);
Jens Axboebcf5a062020-05-22 09:24:42 -06003215 return 1;
3216}
3217
Jens Axboec1dd91d2020-08-03 16:43:59 -06003218/*
3219 * This controls whether a given IO request should be armed for async page
3220 * based retry. If we return false here, the request is handed to the async
3221 * worker threads for retry. If we're doing buffered reads on a regular file,
3222 * we prepare a private wait_page_queue entry and retry the operation. This
3223 * will either succeed because the page is now uptodate and unlocked, or it
3224 * will register a callback when the page is unlocked at IO completion. Through
3225 * that callback, io_uring uses task_work to setup a retry of the operation.
3226 * That retry will attempt the buffered read again. The retry will generally
3227 * succeed, or in rare cases where it fails, we then fall back to using the
3228 * async worker threads for a blocking retry.
3229 */
Jens Axboe227c0c92020-08-13 11:51:40 -06003230static bool io_rw_should_retry(struct io_kiocb *req)
Jens Axboebcf5a062020-05-22 09:24:42 -06003231{
Jens Axboee8c2bc12020-08-15 18:44:09 -07003232 struct io_async_rw *rw = req->async_data;
3233 struct wait_page_queue *wait = &rw->wpq;
Jens Axboebcf5a062020-05-22 09:24:42 -06003234 struct kiocb *kiocb = &req->rw.kiocb;
Jens Axboebcf5a062020-05-22 09:24:42 -06003235
3236 /* never retry for NOWAIT, we just complete with -EAGAIN */
3237 if (req->flags & REQ_F_NOWAIT)
3238 return false;
3239
Jens Axboe227c0c92020-08-13 11:51:40 -06003240 /* Only for buffered IO */
Jens Axboe3b2a4432020-08-16 10:58:43 -07003241 if (kiocb->ki_flags & (IOCB_DIRECT | IOCB_HIPRI))
Jens Axboebcf5a062020-05-22 09:24:42 -06003242 return false;
Jens Axboe3b2a4432020-08-16 10:58:43 -07003243
Jens Axboebcf5a062020-05-22 09:24:42 -06003244 /*
3245 * just use poll if we can, and don't attempt if the fs doesn't
3246 * support callback based unlocks
3247 */
3248 if (file_can_poll(req->file) || !(req->file->f_mode & FMODE_BUF_RASYNC))
3249 return false;
3250
Jens Axboe3b2a4432020-08-16 10:58:43 -07003251 wait->wait.func = io_async_buf_func;
3252 wait->wait.private = req;
3253 wait->wait.flags = 0;
3254 INIT_LIST_HEAD(&wait->wait.entry);
3255 kiocb->ki_flags |= IOCB_WAITQ;
Hao Xuc8d317a2020-09-29 20:00:45 +08003256 kiocb->ki_flags &= ~IOCB_NOWAIT;
Jens Axboe3b2a4432020-08-16 10:58:43 -07003257 kiocb->ki_waitq = wait;
Jens Axboe3b2a4432020-08-16 10:58:43 -07003258 return true;
Jens Axboebcf5a062020-05-22 09:24:42 -06003259}
3260
3261static int io_iter_do_read(struct io_kiocb *req, struct iov_iter *iter)
3262{
3263 if (req->file->f_op->read_iter)
3264 return call_read_iter(req->file, &req->rw.kiocb, iter);
Guoyu Huang2dd21112020-08-05 03:53:50 -07003265 else if (req->file->f_op->read)
Jens Axboe4017eb92020-10-22 14:14:12 -06003266 return loop_rw_iter(READ, req, iter);
Guoyu Huang2dd21112020-08-05 03:53:50 -07003267 else
3268 return -EINVAL;
Jens Axboebcf5a062020-05-22 09:24:42 -06003269}
3270
Pavel Begunkov889fca72021-02-10 00:03:09 +00003271static int io_read(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe2b188cc2019-01-07 10:46:33 -07003272{
3273 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
Jens Axboe9adbd452019-12-20 08:45:55 -07003274 struct kiocb *kiocb = &req->rw.kiocb;
Jens Axboeff6165b2020-08-13 09:47:43 -06003275 struct iov_iter __iter, *iter = &__iter;
Jens Axboee8c2bc12020-08-15 18:44:09 -07003276 struct io_async_rw *rw = req->async_data;
Jens Axboe227c0c92020-08-13 11:51:40 -06003277 ssize_t io_size, ret, ret2;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003278 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003279
Pavel Begunkov2846c482020-11-07 13:16:27 +00003280 if (rw) {
Jens Axboee8c2bc12020-08-15 18:44:09 -07003281 iter = &rw->iter;
Pavel Begunkov2846c482020-11-07 13:16:27 +00003282 iovec = NULL;
3283 } else {
3284 ret = io_import_iovec(READ, req, &iovec, iter, !force_nonblock);
3285 if (ret < 0)
3286 return ret;
3287 }
Pavel Begunkov632546c2020-11-07 13:16:26 +00003288 io_size = iov_iter_count(iter);
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003289 req->result = io_size;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003290
Jens Axboefd6c2e42019-12-18 12:19:41 -07003291 /* Ensure we clear previously set non-block flag */
3292 if (!force_nonblock)
Jens Axboe29de5f62020-02-20 09:56:08 -07003293 kiocb->ki_flags &= ~IOCB_NOWAIT;
Pavel Begunkova88fc402020-09-30 22:57:53 +03003294 else
3295 kiocb->ki_flags |= IOCB_NOWAIT;
3296
Pavel Begunkov24c74672020-06-21 13:09:51 +03003297 /* If the file doesn't support async, just async punt */
Pavel Begunkov6713e7a2021-02-04 13:51:59 +00003298 if (force_nonblock && !io_file_supports_async(req->file, READ)) {
3299 ret = io_setup_async_rw(req, iovec, inline_vecs, iter, true);
Pavel Begunkov6bf985d2021-02-04 13:52:01 +00003300 return ret ?: -EAGAIN;
Pavel Begunkov6713e7a2021-02-04 13:51:59 +00003301 }
Jens Axboe9e645e112019-05-10 16:07:28 -06003302
Pavel Begunkov632546c2020-11-07 13:16:26 +00003303 ret = rw_verify_area(READ, req->file, io_kiocb_ppos(kiocb), io_size);
Pavel Begunkov5ea5dd42021-02-04 13:52:03 +00003304 if (unlikely(ret)) {
3305 kfree(iovec);
3306 return ret;
3307 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07003308
Jens Axboe227c0c92020-08-13 11:51:40 -06003309 ret = io_iter_do_read(req, iter);
Jens Axboe32960612019-09-23 11:05:34 -06003310
Jens Axboe230d50d2021-04-01 20:41:15 -06003311 if (ret == -EAGAIN || (req->flags & REQ_F_REISSUE)) {
Pavel Begunkov6ad7f232021-04-08 01:54:39 +01003312 req->flags &= ~REQ_F_REISSUE;
Jens Axboeeefdf302020-08-27 16:40:19 -06003313 /* IOPOLL retry should happen for io-wq threads */
3314 if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboef91daf52020-08-15 15:58:42 -07003315 goto done;
Pavel Begunkov75c668c2021-02-04 13:52:05 +00003316 /* no retry on NONBLOCK nor RWF_NOWAIT */
3317 if (req->flags & REQ_F_NOWAIT)
Jens Axboe355afae2020-09-02 09:30:31 -06003318 goto done;
Jens Axboe84216312020-08-24 11:45:26 -06003319 /* some cases will consume bytes even on error returns */
Pavel Begunkov632546c2020-11-07 13:16:26 +00003320 iov_iter_revert(iter, io_size - iov_iter_count(iter));
Jens Axboef38c7e32020-09-25 15:23:43 -06003321 ret = 0;
Jens Axboe230d50d2021-04-01 20:41:15 -06003322 } else if (ret == -EIOCBQUEUED) {
3323 goto out_free;
Pavel Begunkov7335e3b2021-02-04 13:52:02 +00003324 } else if (ret <= 0 || ret == io_size || !force_nonblock ||
Pavel Begunkov75c668c2021-02-04 13:52:05 +00003325 (req->flags & REQ_F_NOWAIT) || !(req->flags & REQ_F_ISREG)) {
Pavel Begunkov7335e3b2021-02-04 13:52:02 +00003326 /* read all, failed, already did sync or don't want to retry */
Jens Axboe00d23d52020-08-25 12:59:22 -06003327 goto done;
Jens Axboe227c0c92020-08-13 11:51:40 -06003328 }
3329
Jens Axboe227c0c92020-08-13 11:51:40 -06003330 ret2 = io_setup_async_rw(req, iovec, inline_vecs, iter, true);
Pavel Begunkov6bf985d2021-02-04 13:52:01 +00003331 if (ret2)
3332 return ret2;
3333
Pavel Begunkovfe1cdd52021-02-17 21:02:36 +00003334 iovec = NULL;
Jens Axboee8c2bc12020-08-15 18:44:09 -07003335 rw = req->async_data;
Jens Axboe227c0c92020-08-13 11:51:40 -06003336 /* now use our persistent iterator, if we aren't already */
Jens Axboee8c2bc12020-08-15 18:44:09 -07003337 iter = &rw->iter;
Jens Axboe227c0c92020-08-13 11:51:40 -06003338
Pavel Begunkovb23df912021-02-04 13:52:04 +00003339 do {
3340 io_size -= ret;
3341 rw->bytes_done += ret;
3342 /* if we can retry, do so with the callbacks armed */
3343 if (!io_rw_should_retry(req)) {
3344 kiocb->ki_flags &= ~IOCB_WAITQ;
3345 return -EAGAIN;
3346 }
3347
3348 /*
3349 * Now retry read with the IOCB_WAITQ parts set in the iocb. If
3350 * we get -EIOCBQUEUED, then we'll get a notification when the
3351 * desired page gets unlocked. We can also get a partial read
3352 * here, and if we do, then just retry at the new offset.
3353 */
3354 ret = io_iter_do_read(req, iter);
3355 if (ret == -EIOCBQUEUED)
3356 return 0;
Jens Axboe227c0c92020-08-13 11:51:40 -06003357 /* we got some bytes, but not all. retry. */
Jens Axboeb5b0ecb2021-03-04 21:02:58 -07003358 kiocb->ki_flags &= ~IOCB_WAITQ;
Pavel Begunkovb23df912021-02-04 13:52:04 +00003359 } while (ret > 0 && ret < io_size);
Jens Axboe227c0c92020-08-13 11:51:40 -06003360done:
Pavel Begunkov889fca72021-02-10 00:03:09 +00003361 kiocb_done(kiocb, ret, issue_flags);
Pavel Begunkovfe1cdd52021-02-17 21:02:36 +00003362out_free:
3363 /* it's faster to check here then delegate to kfree */
3364 if (iovec)
3365 kfree(iovec);
Pavel Begunkov5ea5dd42021-02-04 13:52:03 +00003366 return 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003367}
3368
Pavel Begunkov73debe62020-09-30 22:57:54 +03003369static int io_write_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboef67676d2019-12-02 11:03:47 -07003370{
Jens Axboe3529d8c2019-12-19 18:24:38 -07003371 if (unlikely(!(req->file->f_mode & FMODE_WRITE)))
3372 return -EBADF;
Pavel Begunkov93642ef2021-02-18 18:29:44 +00003373 return io_prep_rw(req, sqe);
Jens Axboef67676d2019-12-02 11:03:47 -07003374}
3375
Pavel Begunkov889fca72021-02-10 00:03:09 +00003376static int io_write(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe2b188cc2019-01-07 10:46:33 -07003377{
3378 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
Jens Axboe9adbd452019-12-20 08:45:55 -07003379 struct kiocb *kiocb = &req->rw.kiocb;
Jens Axboeff6165b2020-08-13 09:47:43 -06003380 struct iov_iter __iter, *iter = &__iter;
Jens Axboee8c2bc12020-08-15 18:44:09 -07003381 struct io_async_rw *rw = req->async_data;
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003382 ssize_t ret, ret2, io_size;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003383 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003384
Pavel Begunkov2846c482020-11-07 13:16:27 +00003385 if (rw) {
Jens Axboee8c2bc12020-08-15 18:44:09 -07003386 iter = &rw->iter;
Pavel Begunkov2846c482020-11-07 13:16:27 +00003387 iovec = NULL;
3388 } else {
3389 ret = io_import_iovec(WRITE, req, &iovec, iter, !force_nonblock);
3390 if (ret < 0)
3391 return ret;
3392 }
Pavel Begunkov632546c2020-11-07 13:16:26 +00003393 io_size = iov_iter_count(iter);
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003394 req->result = io_size;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003395
Jens Axboefd6c2e42019-12-18 12:19:41 -07003396 /* Ensure we clear previously set non-block flag */
3397 if (!force_nonblock)
Pavel Begunkova88fc402020-09-30 22:57:53 +03003398 kiocb->ki_flags &= ~IOCB_NOWAIT;
3399 else
3400 kiocb->ki_flags |= IOCB_NOWAIT;
Jens Axboefd6c2e42019-12-18 12:19:41 -07003401
Pavel Begunkov24c74672020-06-21 13:09:51 +03003402 /* If the file doesn't support async, just async punt */
Jens Axboeaf197f52020-04-28 13:15:06 -06003403 if (force_nonblock && !io_file_supports_async(req->file, WRITE))
Jens Axboef67676d2019-12-02 11:03:47 -07003404 goto copy_iov;
Jens Axboef67676d2019-12-02 11:03:47 -07003405
Jens Axboe10d59342019-12-09 20:16:22 -07003406 /* file path doesn't support NOWAIT for non-direct_IO */
3407 if (force_nonblock && !(kiocb->ki_flags & IOCB_DIRECT) &&
3408 (req->flags & REQ_F_ISREG))
Jens Axboef67676d2019-12-02 11:03:47 -07003409 goto copy_iov;
Jens Axboe9e645e112019-05-10 16:07:28 -06003410
Pavel Begunkov632546c2020-11-07 13:16:26 +00003411 ret = rw_verify_area(WRITE, req->file, io_kiocb_ppos(kiocb), io_size);
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003412 if (unlikely(ret))
3413 goto out_free;
Roman Penyaev9bf79332019-03-25 20:09:24 +01003414
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003415 /*
3416 * Open-code file_start_write here to grab freeze protection,
3417 * which will be released by another thread in
3418 * io_complete_rw(). Fool lockdep by telling it the lock got
3419 * released so that it doesn't complain about the held lock when
3420 * we return to userspace.
3421 */
3422 if (req->flags & REQ_F_ISREG) {
Darrick J. Wong8a3c84b2020-11-10 16:50:21 -08003423 sb_start_write(file_inode(req->file)->i_sb);
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003424 __sb_writers_release(file_inode(req->file)->i_sb,
3425 SB_FREEZE_WRITE);
3426 }
3427 kiocb->ki_flags |= IOCB_WRITE;
Roman Penyaev9bf79332019-03-25 20:09:24 +01003428
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003429 if (req->file->f_op->write_iter)
Jens Axboeff6165b2020-08-13 09:47:43 -06003430 ret2 = call_write_iter(req->file, kiocb, iter);
Guoyu Huang2dd21112020-08-05 03:53:50 -07003431 else if (req->file->f_op->write)
Jens Axboe4017eb92020-10-22 14:14:12 -06003432 ret2 = loop_rw_iter(WRITE, req, iter);
Guoyu Huang2dd21112020-08-05 03:53:50 -07003433 else
3434 ret2 = -EINVAL;
Jens Axboe4ed734b2020-03-20 11:23:41 -06003435
Pavel Begunkov6ad7f232021-04-08 01:54:39 +01003436 if (req->flags & REQ_F_REISSUE) {
3437 req->flags &= ~REQ_F_REISSUE;
Jens Axboe230d50d2021-04-01 20:41:15 -06003438 ret2 = -EAGAIN;
Pavel Begunkov6ad7f232021-04-08 01:54:39 +01003439 }
Jens Axboe230d50d2021-04-01 20:41:15 -06003440
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003441 /*
3442 * Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just
3443 * retry them without IOCB_NOWAIT.
3444 */
3445 if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT))
3446 ret2 = -EAGAIN;
Pavel Begunkov75c668c2021-02-04 13:52:05 +00003447 /* no retry on NONBLOCK nor RWF_NOWAIT */
3448 if (ret2 == -EAGAIN && (req->flags & REQ_F_NOWAIT))
Jens Axboe355afae2020-09-02 09:30:31 -06003449 goto done;
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003450 if (!force_nonblock || ret2 != -EAGAIN) {
Jens Axboeeefdf302020-08-27 16:40:19 -06003451 /* IOPOLL retry should happen for io-wq threads */
3452 if ((req->ctx->flags & IORING_SETUP_IOPOLL) && ret2 == -EAGAIN)
3453 goto copy_iov;
Jens Axboe355afae2020-09-02 09:30:31 -06003454done:
Pavel Begunkov889fca72021-02-10 00:03:09 +00003455 kiocb_done(kiocb, ret2, issue_flags);
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003456 } else {
Jens Axboef67676d2019-12-02 11:03:47 -07003457copy_iov:
Jens Axboe84216312020-08-24 11:45:26 -06003458 /* some cases will consume bytes even on error returns */
Pavel Begunkov632546c2020-11-07 13:16:26 +00003459 iov_iter_revert(iter, io_size - iov_iter_count(iter));
Jens Axboe227c0c92020-08-13 11:51:40 -06003460 ret = io_setup_async_rw(req, iovec, inline_vecs, iter, false);
Pavel Begunkov6bf985d2021-02-04 13:52:01 +00003461 return ret ?: -EAGAIN;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003462 }
Jens Axboe31b51512019-01-18 22:56:34 -07003463out_free:
Pavel Begunkovf261c162020-08-20 11:34:10 +03003464 /* it's reportedly faster than delegating the null check to kfree() */
Pavel Begunkov252917c2020-07-13 22:59:20 +03003465 if (iovec)
Xiaoguang Wang6f2cc162020-06-18 15:01:56 +08003466 kfree(iovec);
Jens Axboe2b188cc2019-01-07 10:46:33 -07003467 return ret;
3468}
3469
Jens Axboe80a261f2020-09-28 14:23:58 -06003470static int io_renameat_prep(struct io_kiocb *req,
3471 const struct io_uring_sqe *sqe)
3472{
3473 struct io_rename *ren = &req->rename;
3474 const char __user *oldf, *newf;
3475
3476 if (unlikely(req->flags & REQ_F_FIXED_FILE))
3477 return -EBADF;
3478
3479 ren->old_dfd = READ_ONCE(sqe->fd);
3480 oldf = u64_to_user_ptr(READ_ONCE(sqe->addr));
3481 newf = u64_to_user_ptr(READ_ONCE(sqe->addr2));
3482 ren->new_dfd = READ_ONCE(sqe->len);
3483 ren->flags = READ_ONCE(sqe->rename_flags);
3484
3485 ren->oldpath = getname(oldf);
3486 if (IS_ERR(ren->oldpath))
3487 return PTR_ERR(ren->oldpath);
3488
3489 ren->newpath = getname(newf);
3490 if (IS_ERR(ren->newpath)) {
3491 putname(ren->oldpath);
3492 return PTR_ERR(ren->newpath);
3493 }
3494
3495 req->flags |= REQ_F_NEED_CLEANUP;
3496 return 0;
3497}
3498
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003499static int io_renameat(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe80a261f2020-09-28 14:23:58 -06003500{
3501 struct io_rename *ren = &req->rename;
3502 int ret;
3503
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003504 if (issue_flags & IO_URING_F_NONBLOCK)
Jens Axboe80a261f2020-09-28 14:23:58 -06003505 return -EAGAIN;
3506
3507 ret = do_renameat2(ren->old_dfd, ren->oldpath, ren->new_dfd,
3508 ren->newpath, ren->flags);
3509
3510 req->flags &= ~REQ_F_NEED_CLEANUP;
3511 if (ret < 0)
3512 req_set_fail_links(req);
3513 io_req_complete(req, ret);
3514 return 0;
3515}
3516
Jens Axboe14a11432020-09-28 14:27:37 -06003517static int io_unlinkat_prep(struct io_kiocb *req,
3518 const struct io_uring_sqe *sqe)
3519{
3520 struct io_unlink *un = &req->unlink;
3521 const char __user *fname;
3522
3523 if (unlikely(req->flags & REQ_F_FIXED_FILE))
3524 return -EBADF;
3525
3526 un->dfd = READ_ONCE(sqe->fd);
3527
3528 un->flags = READ_ONCE(sqe->unlink_flags);
3529 if (un->flags & ~AT_REMOVEDIR)
3530 return -EINVAL;
3531
3532 fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
3533 un->filename = getname(fname);
3534 if (IS_ERR(un->filename))
3535 return PTR_ERR(un->filename);
3536
3537 req->flags |= REQ_F_NEED_CLEANUP;
3538 return 0;
3539}
3540
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003541static int io_unlinkat(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe14a11432020-09-28 14:27:37 -06003542{
3543 struct io_unlink *un = &req->unlink;
3544 int ret;
3545
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003546 if (issue_flags & IO_URING_F_NONBLOCK)
Jens Axboe14a11432020-09-28 14:27:37 -06003547 return -EAGAIN;
3548
3549 if (un->flags & AT_REMOVEDIR)
3550 ret = do_rmdir(un->dfd, un->filename);
3551 else
3552 ret = do_unlinkat(un->dfd, un->filename);
3553
3554 req->flags &= ~REQ_F_NEED_CLEANUP;
3555 if (ret < 0)
3556 req_set_fail_links(req);
3557 io_req_complete(req, ret);
3558 return 0;
3559}
3560
Jens Axboe36f4fa62020-09-05 11:14:22 -06003561static int io_shutdown_prep(struct io_kiocb *req,
3562 const struct io_uring_sqe *sqe)
3563{
3564#if defined(CONFIG_NET)
3565 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3566 return -EINVAL;
3567 if (sqe->ioprio || sqe->off || sqe->addr || sqe->rw_flags ||
3568 sqe->buf_index)
3569 return -EINVAL;
3570
3571 req->shutdown.how = READ_ONCE(sqe->len);
3572 return 0;
3573#else
3574 return -EOPNOTSUPP;
3575#endif
3576}
3577
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003578static int io_shutdown(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe36f4fa62020-09-05 11:14:22 -06003579{
3580#if defined(CONFIG_NET)
3581 struct socket *sock;
3582 int ret;
3583
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003584 if (issue_flags & IO_URING_F_NONBLOCK)
Jens Axboe36f4fa62020-09-05 11:14:22 -06003585 return -EAGAIN;
3586
Linus Torvalds48aba792020-12-16 12:44:05 -08003587 sock = sock_from_file(req->file);
Jens Axboe36f4fa62020-09-05 11:14:22 -06003588 if (unlikely(!sock))
Linus Torvalds48aba792020-12-16 12:44:05 -08003589 return -ENOTSOCK;
Jens Axboe36f4fa62020-09-05 11:14:22 -06003590
3591 ret = __sys_shutdown_sock(sock, req->shutdown.how);
Jens Axboea1464682020-12-14 20:57:27 -07003592 if (ret < 0)
3593 req_set_fail_links(req);
Jens Axboe36f4fa62020-09-05 11:14:22 -06003594 io_req_complete(req, ret);
3595 return 0;
3596#else
3597 return -EOPNOTSUPP;
3598#endif
3599}
3600
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03003601static int __io_splice_prep(struct io_kiocb *req,
3602 const struct io_uring_sqe *sqe)
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003603{
3604 struct io_splice* sp = &req->splice;
3605 unsigned int valid_flags = SPLICE_F_FD_IN_FIXED | SPLICE_F_ALL;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003606
Pavel Begunkov3232dd02020-06-03 18:03:22 +03003607 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3608 return -EINVAL;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003609
3610 sp->file_in = NULL;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003611 sp->len = READ_ONCE(sqe->len);
3612 sp->flags = READ_ONCE(sqe->splice_flags);
3613
3614 if (unlikely(sp->flags & ~valid_flags))
3615 return -EINVAL;
3616
Pavel Begunkov8371adf2020-10-10 18:34:08 +01003617 sp->file_in = io_file_get(NULL, req, READ_ONCE(sqe->splice_fd_in),
3618 (sp->flags & SPLICE_F_FD_IN_FIXED));
3619 if (!sp->file_in)
3620 return -EBADF;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003621 req->flags |= REQ_F_NEED_CLEANUP;
3622
Xiaoguang Wang7cdaf582020-06-10 19:41:19 +08003623 if (!S_ISREG(file_inode(sp->file_in)->i_mode)) {
3624 /*
3625 * Splice operation will be punted aync, and here need to
3626 * modify io_wq_work.flags, so initialize io_wq_work firstly.
3627 */
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003628 req->work.flags |= IO_WQ_WORK_UNBOUND;
Xiaoguang Wang7cdaf582020-06-10 19:41:19 +08003629 }
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003630
3631 return 0;
3632}
3633
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03003634static int io_tee_prep(struct io_kiocb *req,
3635 const struct io_uring_sqe *sqe)
3636{
3637 if (READ_ONCE(sqe->splice_off_in) || READ_ONCE(sqe->off))
3638 return -EINVAL;
3639 return __io_splice_prep(req, sqe);
3640}
3641
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003642static int io_tee(struct io_kiocb *req, unsigned int issue_flags)
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03003643{
3644 struct io_splice *sp = &req->splice;
3645 struct file *in = sp->file_in;
3646 struct file *out = sp->file_out;
3647 unsigned int flags = sp->flags & ~SPLICE_F_FD_IN_FIXED;
3648 long ret = 0;
3649
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003650 if (issue_flags & IO_URING_F_NONBLOCK)
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03003651 return -EAGAIN;
3652 if (sp->len)
3653 ret = do_tee(in, out, sp->len, flags);
3654
3655 io_put_file(req, in, (sp->flags & SPLICE_F_FD_IN_FIXED));
3656 req->flags &= ~REQ_F_NEED_CLEANUP;
3657
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03003658 if (ret != sp->len)
3659 req_set_fail_links(req);
Jens Axboee1e16092020-06-22 09:17:17 -06003660 io_req_complete(req, ret);
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03003661 return 0;
3662}
3663
3664static int io_splice_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3665{
3666 struct io_splice* sp = &req->splice;
3667
3668 sp->off_in = READ_ONCE(sqe->splice_off_in);
3669 sp->off_out = READ_ONCE(sqe->off);
3670 return __io_splice_prep(req, sqe);
3671}
3672
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003673static int io_splice(struct io_kiocb *req, unsigned int issue_flags)
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003674{
3675 struct io_splice *sp = &req->splice;
3676 struct file *in = sp->file_in;
3677 struct file *out = sp->file_out;
3678 unsigned int flags = sp->flags & ~SPLICE_F_FD_IN_FIXED;
3679 loff_t *poff_in, *poff_out;
Pavel Begunkovc9687422020-05-04 23:00:54 +03003680 long ret = 0;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003681
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003682 if (issue_flags & IO_URING_F_NONBLOCK)
Pavel Begunkov2fb3e822020-05-01 17:09:38 +03003683 return -EAGAIN;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003684
3685 poff_in = (sp->off_in == -1) ? NULL : &sp->off_in;
3686 poff_out = (sp->off_out == -1) ? NULL : &sp->off_out;
Pavel Begunkovc9687422020-05-04 23:00:54 +03003687
Jens Axboe948a7742020-05-17 14:21:38 -06003688 if (sp->len)
Pavel Begunkovc9687422020-05-04 23:00:54 +03003689 ret = do_splice(in, poff_in, out, poff_out, sp->len, flags);
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003690
3691 io_put_file(req, in, (sp->flags & SPLICE_F_FD_IN_FIXED));
3692 req->flags &= ~REQ_F_NEED_CLEANUP;
3693
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003694 if (ret != sp->len)
3695 req_set_fail_links(req);
Jens Axboee1e16092020-06-22 09:17:17 -06003696 io_req_complete(req, ret);
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003697 return 0;
3698}
3699
Jens Axboe2b188cc2019-01-07 10:46:33 -07003700/*
3701 * IORING_OP_NOP just posts a completion event, nothing else.
3702 */
Pavel Begunkov889fca72021-02-10 00:03:09 +00003703static int io_nop(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe2b188cc2019-01-07 10:46:33 -07003704{
3705 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003706
Jens Axboedef596e2019-01-09 08:59:42 -07003707 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
3708 return -EINVAL;
3709
Pavel Begunkov889fca72021-02-10 00:03:09 +00003710 __io_req_complete(req, issue_flags, 0, 0);
Jens Axboe2b188cc2019-01-07 10:46:33 -07003711 return 0;
3712}
3713
Pavel Begunkov1155c762021-02-18 18:29:38 +00003714static int io_fsync_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Christoph Hellwigc992fe22019-01-11 09:43:02 -07003715{
Jens Axboe6b063142019-01-10 22:13:58 -07003716 struct io_ring_ctx *ctx = req->ctx;
Christoph Hellwigc992fe22019-01-11 09:43:02 -07003717
Jens Axboe09bb8392019-03-13 12:39:28 -06003718 if (!req->file)
3719 return -EBADF;
Christoph Hellwigc992fe22019-01-11 09:43:02 -07003720
Jens Axboe6b063142019-01-10 22:13:58 -07003721 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboedef596e2019-01-09 08:59:42 -07003722 return -EINVAL;
Jens Axboeedafcce2019-01-09 09:16:05 -07003723 if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index))
Christoph Hellwigc992fe22019-01-11 09:43:02 -07003724 return -EINVAL;
3725
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07003726 req->sync.flags = READ_ONCE(sqe->fsync_flags);
3727 if (unlikely(req->sync.flags & ~IORING_FSYNC_DATASYNC))
3728 return -EINVAL;
3729
3730 req->sync.off = READ_ONCE(sqe->off);
3731 req->sync.len = READ_ONCE(sqe->len);
Christoph Hellwigc992fe22019-01-11 09:43:02 -07003732 return 0;
3733}
3734
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003735static int io_fsync(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe78912932020-01-14 22:09:06 -07003736{
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07003737 loff_t end = req->sync.off + req->sync.len;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07003738 int ret;
3739
Pavel Begunkovac45abc2020-06-08 21:08:18 +03003740 /* fsync always requires a blocking context */
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003741 if (issue_flags & IO_URING_F_NONBLOCK)
Pavel Begunkovac45abc2020-06-08 21:08:18 +03003742 return -EAGAIN;
3743
Jens Axboe9adbd452019-12-20 08:45:55 -07003744 ret = vfs_fsync_range(req->file, req->sync.off,
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07003745 end > 0 ? end : LLONG_MAX,
3746 req->sync.flags & IORING_FSYNC_DATASYNC);
3747 if (ret < 0)
3748 req_set_fail_links(req);
Jens Axboee1e16092020-06-22 09:17:17 -06003749 io_req_complete(req, ret);
Christoph Hellwigc992fe22019-01-11 09:43:02 -07003750 return 0;
3751}
3752
Jens Axboed63d1b52019-12-10 10:38:56 -07003753static int io_fallocate_prep(struct io_kiocb *req,
3754 const struct io_uring_sqe *sqe)
3755{
3756 if (sqe->ioprio || sqe->buf_index || sqe->rw_flags)
3757 return -EINVAL;
Pavel Begunkov3232dd02020-06-03 18:03:22 +03003758 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3759 return -EINVAL;
Jens Axboed63d1b52019-12-10 10:38:56 -07003760
3761 req->sync.off = READ_ONCE(sqe->off);
3762 req->sync.len = READ_ONCE(sqe->addr);
3763 req->sync.mode = READ_ONCE(sqe->len);
3764 return 0;
3765}
3766
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003767static int io_fallocate(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboed63d1b52019-12-10 10:38:56 -07003768{
Pavel Begunkovac45abc2020-06-08 21:08:18 +03003769 int ret;
Jens Axboed63d1b52019-12-10 10:38:56 -07003770
Pavel Begunkovac45abc2020-06-08 21:08:18 +03003771 /* fallocate always requiring blocking context */
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003772 if (issue_flags & IO_URING_F_NONBLOCK)
Pavel Begunkovac45abc2020-06-08 21:08:18 +03003773 return -EAGAIN;
Pavel Begunkovac45abc2020-06-08 21:08:18 +03003774 ret = vfs_fallocate(req->file, req->sync.mode, req->sync.off,
3775 req->sync.len);
Pavel Begunkovac45abc2020-06-08 21:08:18 +03003776 if (ret < 0)
3777 req_set_fail_links(req);
Jens Axboee1e16092020-06-22 09:17:17 -06003778 io_req_complete(req, ret);
Jens Axboed63d1b52019-12-10 10:38:56 -07003779 return 0;
3780}
3781
Pavel Begunkovec65fea2020-06-03 18:03:24 +03003782static int __io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboe15b71ab2019-12-11 11:20:36 -07003783{
Jens Axboef8748882020-01-08 17:47:02 -07003784 const char __user *fname;
Jens Axboe15b71ab2019-12-11 11:20:36 -07003785 int ret;
3786
Pavel Begunkovec65fea2020-06-03 18:03:24 +03003787 if (unlikely(sqe->ioprio || sqe->buf_index))
Jens Axboe15b71ab2019-12-11 11:20:36 -07003788 return -EINVAL;
Pavel Begunkovec65fea2020-06-03 18:03:24 +03003789 if (unlikely(req->flags & REQ_F_FIXED_FILE))
Jens Axboecf3040c2020-02-06 21:31:40 -07003790 return -EBADF;
Jens Axboe15b71ab2019-12-11 11:20:36 -07003791
Pavel Begunkovec65fea2020-06-03 18:03:24 +03003792 /* open.how should be already initialised */
3793 if (!(req->open.how.flags & O_PATH) && force_o_largefile())
Jens Axboe08a1d26eb2020-04-08 09:20:54 -06003794 req->open.how.flags |= O_LARGEFILE;
Jens Axboe15b71ab2019-12-11 11:20:36 -07003795
Pavel Begunkov25e72d12020-06-03 18:03:23 +03003796 req->open.dfd = READ_ONCE(sqe->fd);
3797 fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
Jens Axboef8748882020-01-08 17:47:02 -07003798 req->open.filename = getname(fname);
Jens Axboe15b71ab2019-12-11 11:20:36 -07003799 if (IS_ERR(req->open.filename)) {
3800 ret = PTR_ERR(req->open.filename);
3801 req->open.filename = NULL;
3802 return ret;
3803 }
Jens Axboe4022e7a2020-03-19 19:23:18 -06003804 req->open.nofile = rlimit(RLIMIT_NOFILE);
Pavel Begunkov8fef80b2020-02-07 23:59:53 +03003805 req->flags |= REQ_F_NEED_CLEANUP;
Jens Axboe15b71ab2019-12-11 11:20:36 -07003806 return 0;
3807}
3808
Pavel Begunkovec65fea2020-06-03 18:03:24 +03003809static int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3810{
3811 u64 flags, mode;
3812
Jens Axboe14587a462020-09-05 11:36:08 -06003813 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboe4eb8dde2020-09-18 19:36:24 -06003814 return -EINVAL;
Pavel Begunkovec65fea2020-06-03 18:03:24 +03003815 mode = READ_ONCE(sqe->len);
3816 flags = READ_ONCE(sqe->open_flags);
3817 req->open.how = build_open_how(flags, mode);
3818 return __io_openat_prep(req, sqe);
3819}
3820
Jens Axboecebdb982020-01-08 17:59:24 -07003821static int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3822{
3823 struct open_how __user *how;
Jens Axboecebdb982020-01-08 17:59:24 -07003824 size_t len;
3825 int ret;
3826
Jens Axboe14587a462020-09-05 11:36:08 -06003827 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboe4eb8dde2020-09-18 19:36:24 -06003828 return -EINVAL;
Jens Axboecebdb982020-01-08 17:59:24 -07003829 how = u64_to_user_ptr(READ_ONCE(sqe->addr2));
3830 len = READ_ONCE(sqe->len);
Jens Axboecebdb982020-01-08 17:59:24 -07003831 if (len < OPEN_HOW_SIZE_VER0)
3832 return -EINVAL;
3833
3834 ret = copy_struct_from_user(&req->open.how, sizeof(req->open.how), how,
3835 len);
3836 if (ret)
3837 return ret;
3838
Pavel Begunkovec65fea2020-06-03 18:03:24 +03003839 return __io_openat_prep(req, sqe);
Jens Axboecebdb982020-01-08 17:59:24 -07003840}
3841
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003842static int io_openat2(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe15b71ab2019-12-11 11:20:36 -07003843{
3844 struct open_flags op;
Jens Axboe15b71ab2019-12-11 11:20:36 -07003845 struct file *file;
Jens Axboe3a81fd02020-12-10 12:25:36 -07003846 bool nonblock_set;
3847 bool resolve_nonblock;
Jens Axboe15b71ab2019-12-11 11:20:36 -07003848 int ret;
3849
Jens Axboecebdb982020-01-08 17:59:24 -07003850 ret = build_open_flags(&req->open.how, &op);
Jens Axboe15b71ab2019-12-11 11:20:36 -07003851 if (ret)
3852 goto err;
Jens Axboe3a81fd02020-12-10 12:25:36 -07003853 nonblock_set = op.open_flag & O_NONBLOCK;
3854 resolve_nonblock = req->open.how.resolve & RESOLVE_CACHED;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003855 if (issue_flags & IO_URING_F_NONBLOCK) {
Jens Axboe3a81fd02020-12-10 12:25:36 -07003856 /*
3857 * Don't bother trying for O_TRUNC, O_CREAT, or O_TMPFILE open,
3858 * it'll always -EAGAIN
3859 */
3860 if (req->open.how.flags & (O_TRUNC | O_CREAT | O_TMPFILE))
3861 return -EAGAIN;
3862 op.lookup_flags |= LOOKUP_CACHED;
3863 op.open_flag |= O_NONBLOCK;
3864 }
Jens Axboe15b71ab2019-12-11 11:20:36 -07003865
Jens Axboe4022e7a2020-03-19 19:23:18 -06003866 ret = __get_unused_fd_flags(req->open.how.flags, req->open.nofile);
Jens Axboe15b71ab2019-12-11 11:20:36 -07003867 if (ret < 0)
3868 goto err;
3869
3870 file = do_filp_open(req->open.dfd, req->open.filename, &op);
Jens Axboe3a81fd02020-12-10 12:25:36 -07003871 /* only retry if RESOLVE_CACHED wasn't already set by application */
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003872 if ((!resolve_nonblock && (issue_flags & IO_URING_F_NONBLOCK)) &&
3873 file == ERR_PTR(-EAGAIN)) {
Jens Axboe3a81fd02020-12-10 12:25:36 -07003874 /*
3875 * We could hang on to this 'fd', but seems like marginal
3876 * gain for something that is now known to be a slower path.
3877 * So just put it, and we'll get a new one when we retry.
3878 */
3879 put_unused_fd(ret);
3880 return -EAGAIN;
3881 }
3882
Jens Axboe15b71ab2019-12-11 11:20:36 -07003883 if (IS_ERR(file)) {
3884 put_unused_fd(ret);
3885 ret = PTR_ERR(file);
3886 } else {
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003887 if ((issue_flags & IO_URING_F_NONBLOCK) && !nonblock_set)
Jens Axboe3a81fd02020-12-10 12:25:36 -07003888 file->f_flags &= ~O_NONBLOCK;
Jens Axboe15b71ab2019-12-11 11:20:36 -07003889 fsnotify_open(file);
3890 fd_install(ret, file);
3891 }
3892err:
3893 putname(req->open.filename);
Pavel Begunkov8fef80b2020-02-07 23:59:53 +03003894 req->flags &= ~REQ_F_NEED_CLEANUP;
Jens Axboe15b71ab2019-12-11 11:20:36 -07003895 if (ret < 0)
3896 req_set_fail_links(req);
Jens Axboee1e16092020-06-22 09:17:17 -06003897 io_req_complete(req, ret);
Jens Axboe15b71ab2019-12-11 11:20:36 -07003898 return 0;
3899}
3900
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003901static int io_openat(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboecebdb982020-01-08 17:59:24 -07003902{
Pavel Begunkove45cff52021-02-28 22:35:14 +00003903 return io_openat2(req, issue_flags);
Jens Axboecebdb982020-01-08 17:59:24 -07003904}
3905
Jens Axboe067524e2020-03-02 16:32:28 -07003906static int io_remove_buffers_prep(struct io_kiocb *req,
3907 const struct io_uring_sqe *sqe)
3908{
3909 struct io_provide_buf *p = &req->pbuf;
3910 u64 tmp;
3911
3912 if (sqe->ioprio || sqe->rw_flags || sqe->addr || sqe->len || sqe->off)
3913 return -EINVAL;
3914
3915 tmp = READ_ONCE(sqe->fd);
3916 if (!tmp || tmp > USHRT_MAX)
3917 return -EINVAL;
3918
3919 memset(p, 0, sizeof(*p));
3920 p->nbufs = tmp;
3921 p->bgid = READ_ONCE(sqe->buf_group);
3922 return 0;
3923}
3924
3925static int __io_remove_buffers(struct io_ring_ctx *ctx, struct io_buffer *buf,
3926 int bgid, unsigned nbufs)
3927{
3928 unsigned i = 0;
3929
3930 /* shouldn't happen */
3931 if (!nbufs)
3932 return 0;
3933
3934 /* the head kbuf is the list itself */
3935 while (!list_empty(&buf->list)) {
3936 struct io_buffer *nxt;
3937
3938 nxt = list_first_entry(&buf->list, struct io_buffer, list);
3939 list_del(&nxt->list);
3940 kfree(nxt);
3941 if (++i == nbufs)
3942 return i;
3943 }
3944 i++;
3945 kfree(buf);
Jens Axboe9e15c3a2021-03-13 12:29:43 -07003946 xa_erase(&ctx->io_buffers, bgid);
Jens Axboe067524e2020-03-02 16:32:28 -07003947
3948 return i;
3949}
3950
Pavel Begunkov889fca72021-02-10 00:03:09 +00003951static int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe067524e2020-03-02 16:32:28 -07003952{
3953 struct io_provide_buf *p = &req->pbuf;
3954 struct io_ring_ctx *ctx = req->ctx;
3955 struct io_buffer *head;
3956 int ret = 0;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003957 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Jens Axboe067524e2020-03-02 16:32:28 -07003958
3959 io_ring_submit_lock(ctx, !force_nonblock);
3960
3961 lockdep_assert_held(&ctx->uring_lock);
3962
3963 ret = -ENOENT;
Jens Axboe9e15c3a2021-03-13 12:29:43 -07003964 head = xa_load(&ctx->io_buffers, p->bgid);
Jens Axboe067524e2020-03-02 16:32:28 -07003965 if (head)
3966 ret = __io_remove_buffers(ctx, head, p->bgid, p->nbufs);
Jens Axboe067524e2020-03-02 16:32:28 -07003967 if (ret < 0)
3968 req_set_fail_links(req);
Pavel Begunkov31bff9a2020-12-06 22:22:43 +00003969
Pavel Begunkov9fb8cb42021-02-28 22:35:13 +00003970 /* complete before unlock, IOPOLL may need the lock */
3971 __io_req_complete(req, issue_flags, ret, 0);
3972 io_ring_submit_unlock(ctx, !force_nonblock);
Jens Axboe067524e2020-03-02 16:32:28 -07003973 return 0;
3974}
3975
Jens Axboeddf0322d2020-02-23 16:41:33 -07003976static int io_provide_buffers_prep(struct io_kiocb *req,
3977 const struct io_uring_sqe *sqe)
3978{
Pavel Begunkovd81269f2021-03-19 10:21:19 +00003979 unsigned long size;
Jens Axboeddf0322d2020-02-23 16:41:33 -07003980 struct io_provide_buf *p = &req->pbuf;
3981 u64 tmp;
3982
3983 if (sqe->ioprio || sqe->rw_flags)
3984 return -EINVAL;
3985
3986 tmp = READ_ONCE(sqe->fd);
3987 if (!tmp || tmp > USHRT_MAX)
3988 return -E2BIG;
3989 p->nbufs = tmp;
3990 p->addr = READ_ONCE(sqe->addr);
3991 p->len = READ_ONCE(sqe->len);
3992
Pavel Begunkovd81269f2021-03-19 10:21:19 +00003993 size = (unsigned long)p->len * p->nbufs;
3994 if (!access_ok(u64_to_user_ptr(p->addr), size))
Jens Axboeddf0322d2020-02-23 16:41:33 -07003995 return -EFAULT;
3996
3997 p->bgid = READ_ONCE(sqe->buf_group);
3998 tmp = READ_ONCE(sqe->off);
3999 if (tmp > USHRT_MAX)
4000 return -E2BIG;
4001 p->bid = tmp;
4002 return 0;
4003}
4004
4005static int io_add_buffers(struct io_provide_buf *pbuf, struct io_buffer **head)
4006{
4007 struct io_buffer *buf;
4008 u64 addr = pbuf->addr;
4009 int i, bid = pbuf->bid;
4010
4011 for (i = 0; i < pbuf->nbufs; i++) {
4012 buf = kmalloc(sizeof(*buf), GFP_KERNEL);
4013 if (!buf)
4014 break;
4015
4016 buf->addr = addr;
4017 buf->len = pbuf->len;
4018 buf->bid = bid;
4019 addr += pbuf->len;
4020 bid++;
4021 if (!*head) {
4022 INIT_LIST_HEAD(&buf->list);
4023 *head = buf;
4024 } else {
4025 list_add_tail(&buf->list, &(*head)->list);
4026 }
4027 }
4028
4029 return i ? i : -ENOMEM;
4030}
4031
Pavel Begunkov889fca72021-02-10 00:03:09 +00004032static int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboeddf0322d2020-02-23 16:41:33 -07004033{
4034 struct io_provide_buf *p = &req->pbuf;
4035 struct io_ring_ctx *ctx = req->ctx;
4036 struct io_buffer *head, *list;
4037 int ret = 0;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004038 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Jens Axboeddf0322d2020-02-23 16:41:33 -07004039
4040 io_ring_submit_lock(ctx, !force_nonblock);
4041
4042 lockdep_assert_held(&ctx->uring_lock);
4043
Jens Axboe9e15c3a2021-03-13 12:29:43 -07004044 list = head = xa_load(&ctx->io_buffers, p->bgid);
Jens Axboeddf0322d2020-02-23 16:41:33 -07004045
4046 ret = io_add_buffers(p, &head);
Jens Axboe9e15c3a2021-03-13 12:29:43 -07004047 if (ret >= 0 && !list) {
4048 ret = xa_insert(&ctx->io_buffers, p->bgid, head, GFP_KERNEL);
4049 if (ret < 0)
Jens Axboe067524e2020-03-02 16:32:28 -07004050 __io_remove_buffers(ctx, head, p->bgid, -1U);
Jens Axboeddf0322d2020-02-23 16:41:33 -07004051 }
Jens Axboeddf0322d2020-02-23 16:41:33 -07004052 if (ret < 0)
4053 req_set_fail_links(req);
Pavel Begunkov9fb8cb42021-02-28 22:35:13 +00004054 /* complete before unlock, IOPOLL may need the lock */
4055 __io_req_complete(req, issue_flags, ret, 0);
4056 io_ring_submit_unlock(ctx, !force_nonblock);
Jens Axboeddf0322d2020-02-23 16:41:33 -07004057 return 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07004058}
4059
Jens Axboe3e4827b2020-01-08 15:18:09 -07004060static int io_epoll_ctl_prep(struct io_kiocb *req,
4061 const struct io_uring_sqe *sqe)
4062{
4063#if defined(CONFIG_EPOLL)
4064 if (sqe->ioprio || sqe->buf_index)
4065 return -EINVAL;
Jens Axboe6ca56f82020-09-18 16:51:19 -06004066 if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL)))
Pavel Begunkov3232dd02020-06-03 18:03:22 +03004067 return -EINVAL;
Jens Axboe3e4827b2020-01-08 15:18:09 -07004068
4069 req->epoll.epfd = READ_ONCE(sqe->fd);
4070 req->epoll.op = READ_ONCE(sqe->len);
4071 req->epoll.fd = READ_ONCE(sqe->off);
4072
4073 if (ep_op_has_event(req->epoll.op)) {
4074 struct epoll_event __user *ev;
4075
4076 ev = u64_to_user_ptr(READ_ONCE(sqe->addr));
4077 if (copy_from_user(&req->epoll.event, ev, sizeof(*ev)))
4078 return -EFAULT;
4079 }
4080
4081 return 0;
4082#else
4083 return -EOPNOTSUPP;
4084#endif
4085}
4086
Pavel Begunkov889fca72021-02-10 00:03:09 +00004087static int io_epoll_ctl(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe3e4827b2020-01-08 15:18:09 -07004088{
4089#if defined(CONFIG_EPOLL)
4090 struct io_epoll *ie = &req->epoll;
4091 int ret;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004092 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Jens Axboe3e4827b2020-01-08 15:18:09 -07004093
4094 ret = do_epoll_ctl(ie->epfd, ie->op, ie->fd, &ie->event, force_nonblock);
4095 if (force_nonblock && ret == -EAGAIN)
4096 return -EAGAIN;
4097
4098 if (ret < 0)
4099 req_set_fail_links(req);
Pavel Begunkov889fca72021-02-10 00:03:09 +00004100 __io_req_complete(req, issue_flags, ret, 0);
Jens Axboe3e4827b2020-01-08 15:18:09 -07004101 return 0;
4102#else
4103 return -EOPNOTSUPP;
4104#endif
4105}
4106
Jens Axboec1ca7572019-12-25 22:18:28 -07004107static int io_madvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4108{
4109#if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
4110 if (sqe->ioprio || sqe->buf_index || sqe->off)
4111 return -EINVAL;
Pavel Begunkov3232dd02020-06-03 18:03:22 +03004112 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4113 return -EINVAL;
Jens Axboec1ca7572019-12-25 22:18:28 -07004114
4115 req->madvise.addr = READ_ONCE(sqe->addr);
4116 req->madvise.len = READ_ONCE(sqe->len);
4117 req->madvise.advice = READ_ONCE(sqe->fadvise_advice);
4118 return 0;
4119#else
4120 return -EOPNOTSUPP;
4121#endif
4122}
4123
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004124static int io_madvise(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboec1ca7572019-12-25 22:18:28 -07004125{
4126#if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
4127 struct io_madvise *ma = &req->madvise;
4128 int ret;
4129
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004130 if (issue_flags & IO_URING_F_NONBLOCK)
Jens Axboec1ca7572019-12-25 22:18:28 -07004131 return -EAGAIN;
4132
Minchan Kim0726b012020-10-17 16:14:50 -07004133 ret = do_madvise(current->mm, ma->addr, ma->len, ma->advice);
Jens Axboec1ca7572019-12-25 22:18:28 -07004134 if (ret < 0)
4135 req_set_fail_links(req);
Jens Axboee1e16092020-06-22 09:17:17 -06004136 io_req_complete(req, ret);
Jens Axboec1ca7572019-12-25 22:18:28 -07004137 return 0;
4138#else
4139 return -EOPNOTSUPP;
4140#endif
4141}
4142
Jens Axboe4840e412019-12-25 22:03:45 -07004143static int io_fadvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4144{
4145 if (sqe->ioprio || sqe->buf_index || sqe->addr)
4146 return -EINVAL;
Pavel Begunkov3232dd02020-06-03 18:03:22 +03004147 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4148 return -EINVAL;
Jens Axboe4840e412019-12-25 22:03:45 -07004149
4150 req->fadvise.offset = READ_ONCE(sqe->off);
4151 req->fadvise.len = READ_ONCE(sqe->len);
4152 req->fadvise.advice = READ_ONCE(sqe->fadvise_advice);
4153 return 0;
4154}
4155
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004156static int io_fadvise(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe4840e412019-12-25 22:03:45 -07004157{
4158 struct io_fadvise *fa = &req->fadvise;
4159 int ret;
4160
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004161 if (issue_flags & IO_URING_F_NONBLOCK) {
Jens Axboe3e694262020-02-01 09:22:49 -07004162 switch (fa->advice) {
4163 case POSIX_FADV_NORMAL:
4164 case POSIX_FADV_RANDOM:
4165 case POSIX_FADV_SEQUENTIAL:
4166 break;
4167 default:
4168 return -EAGAIN;
4169 }
4170 }
Jens Axboe4840e412019-12-25 22:03:45 -07004171
4172 ret = vfs_fadvise(req->file, fa->offset, fa->len, fa->advice);
4173 if (ret < 0)
4174 req_set_fail_links(req);
Jens Axboee1e16092020-06-22 09:17:17 -06004175 io_req_complete(req, ret);
Jens Axboe4840e412019-12-25 22:03:45 -07004176 return 0;
4177}
4178
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004179static int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4180{
Jens Axboe6ca56f82020-09-18 16:51:19 -06004181 if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL)))
Pavel Begunkov3232dd02020-06-03 18:03:22 +03004182 return -EINVAL;
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004183 if (sqe->ioprio || sqe->buf_index)
4184 return -EINVAL;
Pavel Begunkov9c280f92020-04-08 08:58:46 +03004185 if (req->flags & REQ_F_FIXED_FILE)
Jens Axboecf3040c2020-02-06 21:31:40 -07004186 return -EBADF;
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004187
Bijan Mottahedeh1d9e1282020-05-22 21:31:16 -07004188 req->statx.dfd = READ_ONCE(sqe->fd);
4189 req->statx.mask = READ_ONCE(sqe->len);
Bijan Mottahedehe62753e2020-05-22 21:31:18 -07004190 req->statx.filename = u64_to_user_ptr(READ_ONCE(sqe->addr));
Bijan Mottahedeh1d9e1282020-05-22 21:31:16 -07004191 req->statx.buffer = u64_to_user_ptr(READ_ONCE(sqe->addr2));
4192 req->statx.flags = READ_ONCE(sqe->statx_flags);
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004193
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004194 return 0;
4195}
4196
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004197static int io_statx(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004198{
Bijan Mottahedeh1d9e1282020-05-22 21:31:16 -07004199 struct io_statx *ctx = &req->statx;
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004200 int ret;
4201
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004202 if (issue_flags & IO_URING_F_NONBLOCK) {
Jens Axboe5b0bbee2020-04-27 10:41:22 -06004203 /* only need file table for an actual valid fd */
4204 if (ctx->dfd == -1 || ctx->dfd == AT_FDCWD)
4205 req->flags |= REQ_F_NO_FILE_TABLE;
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004206 return -EAGAIN;
Jens Axboe5b0bbee2020-04-27 10:41:22 -06004207 }
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004208
Bijan Mottahedehe62753e2020-05-22 21:31:18 -07004209 ret = do_statx(ctx->dfd, ctx->filename, ctx->flags, ctx->mask,
4210 ctx->buffer);
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004211
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004212 if (ret < 0)
4213 req_set_fail_links(req);
Jens Axboee1e16092020-06-22 09:17:17 -06004214 io_req_complete(req, ret);
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004215 return 0;
4216}
4217
Jens Axboeb5dba592019-12-11 14:02:38 -07004218static int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4219{
Jens Axboe14587a462020-09-05 11:36:08 -06004220 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
Pavel Begunkov3232dd02020-06-03 18:03:22 +03004221 return -EINVAL;
Jens Axboeb5dba592019-12-11 14:02:38 -07004222 if (sqe->ioprio || sqe->off || sqe->addr || sqe->len ||
4223 sqe->rw_flags || sqe->buf_index)
4224 return -EINVAL;
Pavel Begunkov9c280f92020-04-08 08:58:46 +03004225 if (req->flags & REQ_F_FIXED_FILE)
Jens Axboecf3040c2020-02-06 21:31:40 -07004226 return -EBADF;
Jens Axboeb5dba592019-12-11 14:02:38 -07004227
4228 req->close.fd = READ_ONCE(sqe->fd);
Jens Axboeb5dba592019-12-11 14:02:38 -07004229 return 0;
4230}
4231
Pavel Begunkov889fca72021-02-10 00:03:09 +00004232static int io_close(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboeb5dba592019-12-11 14:02:38 -07004233{
Jens Axboe9eac1902021-01-19 15:50:37 -07004234 struct files_struct *files = current->files;
Pavel Begunkov3af73b22020-06-08 21:08:17 +03004235 struct io_close *close = &req->close;
Jens Axboe9eac1902021-01-19 15:50:37 -07004236 struct fdtable *fdt;
4237 struct file *file;
Jens Axboeb5dba592019-12-11 14:02:38 -07004238 int ret;
4239
Jens Axboe9eac1902021-01-19 15:50:37 -07004240 file = NULL;
4241 ret = -EBADF;
4242 spin_lock(&files->file_lock);
4243 fdt = files_fdtable(files);
4244 if (close->fd >= fdt->max_fds) {
4245 spin_unlock(&files->file_lock);
4246 goto err;
4247 }
4248 file = fdt->fd[close->fd];
4249 if (!file) {
4250 spin_unlock(&files->file_lock);
4251 goto err;
4252 }
4253
4254 if (file->f_op == &io_uring_fops) {
4255 spin_unlock(&files->file_lock);
4256 file = NULL;
4257 goto err;
Pavel Begunkov3af73b22020-06-08 21:08:17 +03004258 }
Jens Axboeb5dba592019-12-11 14:02:38 -07004259
4260 /* if the file has a flush method, be safe and punt to async */
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004261 if (file->f_op->flush && (issue_flags & IO_URING_F_NONBLOCK)) {
Jens Axboe9eac1902021-01-19 15:50:37 -07004262 spin_unlock(&files->file_lock);
Pavel Begunkov0bf0eef2020-05-26 20:34:06 +03004263 return -EAGAIN;
Pavel Begunkova2100672020-03-02 23:45:16 +03004264 }
Jens Axboeb5dba592019-12-11 14:02:38 -07004265
Jens Axboe9eac1902021-01-19 15:50:37 -07004266 ret = __close_fd_get_file(close->fd, &file);
4267 spin_unlock(&files->file_lock);
4268 if (ret < 0) {
4269 if (ret == -ENOENT)
4270 ret = -EBADF;
4271 goto err;
4272 }
4273
Pavel Begunkov3af73b22020-06-08 21:08:17 +03004274 /* No ->flush() or already async, safely close from here */
Jens Axboe9eac1902021-01-19 15:50:37 -07004275 ret = filp_close(file, current->files);
4276err:
Pavel Begunkov3af73b22020-06-08 21:08:17 +03004277 if (ret < 0)
4278 req_set_fail_links(req);
Jens Axboe9eac1902021-01-19 15:50:37 -07004279 if (file)
4280 fput(file);
Pavel Begunkov889fca72021-02-10 00:03:09 +00004281 __io_req_complete(req, issue_flags, ret, 0);
Jens Axboe1a417f42020-01-31 17:16:48 -07004282 return 0;
Jens Axboeb5dba592019-12-11 14:02:38 -07004283}
4284
Pavel Begunkov1155c762021-02-18 18:29:38 +00004285static int io_sfr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboe5d17b4a2019-04-09 14:56:44 -06004286{
4287 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe5d17b4a2019-04-09 14:56:44 -06004288
Jens Axboe5d17b4a2019-04-09 14:56:44 -06004289 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
4290 return -EINVAL;
4291 if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index))
4292 return -EINVAL;
4293
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004294 req->sync.off = READ_ONCE(sqe->off);
4295 req->sync.len = READ_ONCE(sqe->len);
4296 req->sync.flags = READ_ONCE(sqe->sync_range_flags);
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004297 return 0;
4298}
4299
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004300static int io_sync_file_range(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004301{
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004302 int ret;
4303
Pavel Begunkovac45abc2020-06-08 21:08:18 +03004304 /* sync_file_range always requires a blocking context */
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004305 if (issue_flags & IO_URING_F_NONBLOCK)
Pavel Begunkovac45abc2020-06-08 21:08:18 +03004306 return -EAGAIN;
4307
Jens Axboe9adbd452019-12-20 08:45:55 -07004308 ret = sync_file_range(req->file, req->sync.off, req->sync.len,
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004309 req->sync.flags);
4310 if (ret < 0)
4311 req_set_fail_links(req);
Jens Axboee1e16092020-06-22 09:17:17 -06004312 io_req_complete(req, ret);
Jens Axboe5d17b4a2019-04-09 14:56:44 -06004313 return 0;
4314}
4315
YueHaibing469956e2020-03-04 15:53:52 +08004316#if defined(CONFIG_NET)
Pavel Begunkov02d27d82020-02-28 10:36:36 +03004317static int io_setup_async_msg(struct io_kiocb *req,
4318 struct io_async_msghdr *kmsg)
4319{
Jens Axboee8c2bc12020-08-15 18:44:09 -07004320 struct io_async_msghdr *async_msg = req->async_data;
4321
4322 if (async_msg)
Pavel Begunkov02d27d82020-02-28 10:36:36 +03004323 return -EAGAIN;
Jens Axboee8c2bc12020-08-15 18:44:09 -07004324 if (io_alloc_async_data(req)) {
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004325 kfree(kmsg->free_iov);
Pavel Begunkov02d27d82020-02-28 10:36:36 +03004326 return -ENOMEM;
4327 }
Jens Axboee8c2bc12020-08-15 18:44:09 -07004328 async_msg = req->async_data;
Pavel Begunkov02d27d82020-02-28 10:36:36 +03004329 req->flags |= REQ_F_NEED_CLEANUP;
Jens Axboee8c2bc12020-08-15 18:44:09 -07004330 memcpy(async_msg, kmsg, sizeof(*kmsg));
Pavel Begunkov2a780802021-02-05 00:57:58 +00004331 async_msg->msg.msg_name = &async_msg->addr;
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004332 /* if were using fast_iov, set it to the new one */
4333 if (!async_msg->free_iov)
4334 async_msg->msg.msg_iter.iov = async_msg->fast_iov;
4335
Pavel Begunkov02d27d82020-02-28 10:36:36 +03004336 return -EAGAIN;
4337}
4338
Pavel Begunkov2ae523e2020-07-12 20:41:06 +03004339static int io_sendmsg_copy_hdr(struct io_kiocb *req,
4340 struct io_async_msghdr *iomsg)
4341{
Pavel Begunkov2ae523e2020-07-12 20:41:06 +03004342 iomsg->msg.msg_name = &iomsg->addr;
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004343 iomsg->free_iov = iomsg->fast_iov;
Pavel Begunkov2ae523e2020-07-12 20:41:06 +03004344 return sendmsg_copy_msghdr(&iomsg->msg, req->sr_msg.umsg,
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004345 req->sr_msg.msg_flags, &iomsg->free_iov);
Pavel Begunkov2ae523e2020-07-12 20:41:06 +03004346}
4347
Pavel Begunkov93642ef2021-02-18 18:29:44 +00004348static int io_sendmsg_prep_async(struct io_kiocb *req)
4349{
4350 int ret;
4351
4352 if (!io_op_defs[req->opcode].needs_async_data)
4353 return 0;
4354 ret = io_sendmsg_copy_hdr(req, req->async_data);
4355 if (!ret)
4356 req->flags |= REQ_F_NEED_CLEANUP;
4357 return ret;
4358}
4359
Jens Axboe3529d8c2019-12-19 18:24:38 -07004360static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboeaa1fa282019-04-19 13:38:09 -06004361{
Jens Axboee47293f2019-12-20 08:58:21 -07004362 struct io_sr_msg *sr = &req->sr_msg;
Jens Axboe03b12302019-12-02 18:50:25 -07004363
Pavel Begunkovd2b6f482020-06-03 18:03:25 +03004364 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4365 return -EINVAL;
4366
Jens Axboee47293f2019-12-20 08:58:21 -07004367 sr->msg_flags = READ_ONCE(sqe->msg_flags);
Pavel Begunkov270a5942020-07-12 20:41:04 +03004368 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
Jens Axboefddafac2020-01-04 20:19:44 -07004369 sr->len = READ_ONCE(sqe->len);
Jens Axboe3529d8c2019-12-19 18:24:38 -07004370
Jens Axboed8768362020-02-27 14:17:49 -07004371#ifdef CONFIG_COMPAT
4372 if (req->ctx->compat)
4373 sr->msg_flags |= MSG_CMSG_COMPAT;
4374#endif
Pavel Begunkov93642ef2021-02-18 18:29:44 +00004375 return 0;
Jens Axboe03b12302019-12-02 18:50:25 -07004376}
4377
Pavel Begunkov889fca72021-02-10 00:03:09 +00004378static int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe03b12302019-12-02 18:50:25 -07004379{
Pavel Begunkov6b754c82020-07-16 23:28:00 +03004380 struct io_async_msghdr iomsg, *kmsg;
Jens Axboe03b12302019-12-02 18:50:25 -07004381 struct socket *sock;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004382 unsigned flags;
Stefan Metzmacher00312752021-03-20 20:33:36 +01004383 int min_ret = 0;
Jens Axboe03b12302019-12-02 18:50:25 -07004384 int ret;
4385
Florent Revestdba4a922020-12-04 12:36:04 +01004386 sock = sock_from_file(req->file);
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004387 if (unlikely(!sock))
Florent Revestdba4a922020-12-04 12:36:04 +01004388 return -ENOTSOCK;
Jens Axboe03b12302019-12-02 18:50:25 -07004389
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004390 kmsg = req->async_data;
4391 if (!kmsg) {
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004392 ret = io_sendmsg_copy_hdr(req, &iomsg);
Jens Axboefddafac2020-01-04 20:19:44 -07004393 if (ret)
4394 return ret;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004395 kmsg = &iomsg;
Jens Axboefddafac2020-01-04 20:19:44 -07004396 }
4397
Stefan Metzmacher76cd9792021-03-16 16:33:27 +01004398 flags = req->sr_msg.msg_flags | MSG_NOSIGNAL;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004399 if (flags & MSG_DONTWAIT)
4400 req->flags |= REQ_F_NOWAIT;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004401 else if (issue_flags & IO_URING_F_NONBLOCK)
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004402 flags |= MSG_DONTWAIT;
4403
Stefan Metzmacher00312752021-03-20 20:33:36 +01004404 if (flags & MSG_WAITALL)
4405 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
4406
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004407 ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004408 if ((issue_flags & IO_URING_F_NONBLOCK) && ret == -EAGAIN)
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004409 return io_setup_async_msg(req, kmsg);
4410 if (ret == -ERESTARTSYS)
4411 ret = -EINTR;
4412
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004413 /* fast path, check for non-NULL to avoid function call */
4414 if (kmsg->free_iov)
4415 kfree(kmsg->free_iov);
Jens Axboe03b12302019-12-02 18:50:25 -07004416 req->flags &= ~REQ_F_NEED_CLEANUP;
Stefan Metzmacher00312752021-03-20 20:33:36 +01004417 if (ret < min_ret)
Jens Axboefddafac2020-01-04 20:19:44 -07004418 req_set_fail_links(req);
Pavel Begunkov889fca72021-02-10 00:03:09 +00004419 __io_req_complete(req, issue_flags, ret, 0);
Jens Axboefddafac2020-01-04 20:19:44 -07004420 return 0;
Jens Axboefddafac2020-01-04 20:19:44 -07004421}
4422
Pavel Begunkov889fca72021-02-10 00:03:09 +00004423static int io_send(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe03b12302019-12-02 18:50:25 -07004424{
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004425 struct io_sr_msg *sr = &req->sr_msg;
4426 struct msghdr msg;
4427 struct iovec iov;
Jens Axboe03b12302019-12-02 18:50:25 -07004428 struct socket *sock;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004429 unsigned flags;
Stefan Metzmacher00312752021-03-20 20:33:36 +01004430 int min_ret = 0;
Jens Axboe03b12302019-12-02 18:50:25 -07004431 int ret;
4432
Florent Revestdba4a922020-12-04 12:36:04 +01004433 sock = sock_from_file(req->file);
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004434 if (unlikely(!sock))
Florent Revestdba4a922020-12-04 12:36:04 +01004435 return -ENOTSOCK;
Jens Axboe03b12302019-12-02 18:50:25 -07004436
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004437 ret = import_single_range(WRITE, sr->buf, sr->len, &iov, &msg.msg_iter);
4438 if (unlikely(ret))
Zheng Bin14db8412020-09-09 20:12:37 +08004439 return ret;
Jens Axboe03b12302019-12-02 18:50:25 -07004440
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004441 msg.msg_name = NULL;
4442 msg.msg_control = NULL;
4443 msg.msg_controllen = 0;
4444 msg.msg_namelen = 0;
Jens Axboe03b12302019-12-02 18:50:25 -07004445
Stefan Metzmacher76cd9792021-03-16 16:33:27 +01004446 flags = req->sr_msg.msg_flags | MSG_NOSIGNAL;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004447 if (flags & MSG_DONTWAIT)
4448 req->flags |= REQ_F_NOWAIT;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004449 else if (issue_flags & IO_URING_F_NONBLOCK)
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004450 flags |= MSG_DONTWAIT;
Jens Axboe03b12302019-12-02 18:50:25 -07004451
Stefan Metzmacher00312752021-03-20 20:33:36 +01004452 if (flags & MSG_WAITALL)
4453 min_ret = iov_iter_count(&msg.msg_iter);
4454
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004455 msg.msg_flags = flags;
4456 ret = sock_sendmsg(sock, &msg);
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004457 if ((issue_flags & IO_URING_F_NONBLOCK) && ret == -EAGAIN)
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004458 return -EAGAIN;
4459 if (ret == -ERESTARTSYS)
4460 ret = -EINTR;
Jens Axboe03b12302019-12-02 18:50:25 -07004461
Stefan Metzmacher00312752021-03-20 20:33:36 +01004462 if (ret < min_ret)
Jens Axboe03b12302019-12-02 18:50:25 -07004463 req_set_fail_links(req);
Pavel Begunkov889fca72021-02-10 00:03:09 +00004464 __io_req_complete(req, issue_flags, ret, 0);
Jens Axboe03b12302019-12-02 18:50:25 -07004465 return 0;
Jens Axboe03b12302019-12-02 18:50:25 -07004466}
4467
Pavel Begunkov1400e692020-07-12 20:41:05 +03004468static int __io_recvmsg_copy_hdr(struct io_kiocb *req,
4469 struct io_async_msghdr *iomsg)
Jens Axboe52de1fe2020-02-27 10:15:42 -07004470{
4471 struct io_sr_msg *sr = &req->sr_msg;
4472 struct iovec __user *uiov;
4473 size_t iov_len;
4474 int ret;
4475
Pavel Begunkov1400e692020-07-12 20:41:05 +03004476 ret = __copy_msghdr_from_user(&iomsg->msg, sr->umsg,
4477 &iomsg->uaddr, &uiov, &iov_len);
Jens Axboe52de1fe2020-02-27 10:15:42 -07004478 if (ret)
4479 return ret;
4480
4481 if (req->flags & REQ_F_BUFFER_SELECT) {
4482 if (iov_len > 1)
4483 return -EINVAL;
Pavel Begunkov5476dfe2021-02-05 00:57:59 +00004484 if (copy_from_user(iomsg->fast_iov, uiov, sizeof(*uiov)))
Jens Axboe52de1fe2020-02-27 10:15:42 -07004485 return -EFAULT;
Pavel Begunkov5476dfe2021-02-05 00:57:59 +00004486 sr->len = iomsg->fast_iov[0].iov_len;
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004487 iomsg->free_iov = NULL;
Jens Axboe52de1fe2020-02-27 10:15:42 -07004488 } else {
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004489 iomsg->free_iov = iomsg->fast_iov;
Christoph Hellwig89cd35c2020-09-25 06:51:41 +02004490 ret = __import_iovec(READ, uiov, iov_len, UIO_FASTIOV,
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004491 &iomsg->free_iov, &iomsg->msg.msg_iter,
Christoph Hellwig89cd35c2020-09-25 06:51:41 +02004492 false);
Jens Axboe52de1fe2020-02-27 10:15:42 -07004493 if (ret > 0)
4494 ret = 0;
4495 }
4496
4497 return ret;
4498}
4499
4500#ifdef CONFIG_COMPAT
4501static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req,
Pavel Begunkov1400e692020-07-12 20:41:05 +03004502 struct io_async_msghdr *iomsg)
Jens Axboe52de1fe2020-02-27 10:15:42 -07004503{
4504 struct compat_msghdr __user *msg_compat;
4505 struct io_sr_msg *sr = &req->sr_msg;
4506 struct compat_iovec __user *uiov;
4507 compat_uptr_t ptr;
4508 compat_size_t len;
4509 int ret;
4510
Pavel Begunkov270a5942020-07-12 20:41:04 +03004511 msg_compat = (struct compat_msghdr __user *) sr->umsg;
Pavel Begunkov1400e692020-07-12 20:41:05 +03004512 ret = __get_compat_msghdr(&iomsg->msg, msg_compat, &iomsg->uaddr,
Jens Axboe52de1fe2020-02-27 10:15:42 -07004513 &ptr, &len);
4514 if (ret)
4515 return ret;
4516
4517 uiov = compat_ptr(ptr);
4518 if (req->flags & REQ_F_BUFFER_SELECT) {
4519 compat_ssize_t clen;
4520
4521 if (len > 1)
4522 return -EINVAL;
4523 if (!access_ok(uiov, sizeof(*uiov)))
4524 return -EFAULT;
4525 if (__get_user(clen, &uiov->iov_len))
4526 return -EFAULT;
4527 if (clen < 0)
4528 return -EINVAL;
Pavel Begunkov2d280bc2020-11-29 18:33:32 +00004529 sr->len = clen;
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004530 iomsg->free_iov = NULL;
Jens Axboe52de1fe2020-02-27 10:15:42 -07004531 } else {
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004532 iomsg->free_iov = iomsg->fast_iov;
Christoph Hellwig89cd35c2020-09-25 06:51:41 +02004533 ret = __import_iovec(READ, (struct iovec __user *)uiov, len,
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004534 UIO_FASTIOV, &iomsg->free_iov,
Christoph Hellwig89cd35c2020-09-25 06:51:41 +02004535 &iomsg->msg.msg_iter, true);
Jens Axboe52de1fe2020-02-27 10:15:42 -07004536 if (ret < 0)
4537 return ret;
4538 }
4539
4540 return 0;
4541}
Jens Axboe03b12302019-12-02 18:50:25 -07004542#endif
Jens Axboe52de1fe2020-02-27 10:15:42 -07004543
Pavel Begunkov1400e692020-07-12 20:41:05 +03004544static int io_recvmsg_copy_hdr(struct io_kiocb *req,
4545 struct io_async_msghdr *iomsg)
Jens Axboe52de1fe2020-02-27 10:15:42 -07004546{
Pavel Begunkov1400e692020-07-12 20:41:05 +03004547 iomsg->msg.msg_name = &iomsg->addr;
Jens Axboe52de1fe2020-02-27 10:15:42 -07004548
4549#ifdef CONFIG_COMPAT
4550 if (req->ctx->compat)
Pavel Begunkov1400e692020-07-12 20:41:05 +03004551 return __io_compat_recvmsg_copy_hdr(req, iomsg);
Jens Axboe52de1fe2020-02-27 10:15:42 -07004552#endif
4553
Pavel Begunkov1400e692020-07-12 20:41:05 +03004554 return __io_recvmsg_copy_hdr(req, iomsg);
Jens Axboe52de1fe2020-02-27 10:15:42 -07004555}
4556
Jens Axboebcda7ba2020-02-23 16:42:51 -07004557static struct io_buffer *io_recv_buffer_select(struct io_kiocb *req,
Pavel Begunkov7fbb1b52020-07-16 23:28:05 +03004558 bool needs_lock)
Jens Axboebcda7ba2020-02-23 16:42:51 -07004559{
4560 struct io_sr_msg *sr = &req->sr_msg;
4561 struct io_buffer *kbuf;
4562
Jens Axboebcda7ba2020-02-23 16:42:51 -07004563 kbuf = io_buffer_select(req, &sr->len, sr->bgid, sr->kbuf, needs_lock);
4564 if (IS_ERR(kbuf))
4565 return kbuf;
4566
4567 sr->kbuf = kbuf;
4568 req->flags |= REQ_F_BUFFER_SELECTED;
Jens Axboebcda7ba2020-02-23 16:42:51 -07004569 return kbuf;
Jens Axboe03b12302019-12-02 18:50:25 -07004570}
4571
Pavel Begunkov7fbb1b52020-07-16 23:28:05 +03004572static inline unsigned int io_put_recv_kbuf(struct io_kiocb *req)
4573{
4574 return io_put_kbuf(req, req->sr_msg.kbuf);
4575}
4576
Pavel Begunkov93642ef2021-02-18 18:29:44 +00004577static int io_recvmsg_prep_async(struct io_kiocb *req)
Jens Axboe03b12302019-12-02 18:50:25 -07004578{
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03004579 int ret;
Jens Axboe06b76d42019-12-19 14:44:26 -07004580
Pavel Begunkov93642ef2021-02-18 18:29:44 +00004581 if (!io_op_defs[req->opcode].needs_async_data)
4582 return 0;
4583 ret = io_recvmsg_copy_hdr(req, req->async_data);
4584 if (!ret)
4585 req->flags |= REQ_F_NEED_CLEANUP;
4586 return ret;
4587}
4588
4589static int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4590{
4591 struct io_sr_msg *sr = &req->sr_msg;
4592
Pavel Begunkovd2b6f482020-06-03 18:03:25 +03004593 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4594 return -EINVAL;
4595
Jens Axboe3529d8c2019-12-19 18:24:38 -07004596 sr->msg_flags = READ_ONCE(sqe->msg_flags);
Pavel Begunkov270a5942020-07-12 20:41:04 +03004597 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
Jens Axboe0b7b21e2020-01-31 08:34:59 -07004598 sr->len = READ_ONCE(sqe->len);
Jens Axboebcda7ba2020-02-23 16:42:51 -07004599 sr->bgid = READ_ONCE(sqe->buf_group);
Jens Axboe3529d8c2019-12-19 18:24:38 -07004600
Jens Axboed8768362020-02-27 14:17:49 -07004601#ifdef CONFIG_COMPAT
4602 if (req->ctx->compat)
4603 sr->msg_flags |= MSG_CMSG_COMPAT;
4604#endif
Pavel Begunkov93642ef2021-02-18 18:29:44 +00004605 return 0;
Jens Axboe03b12302019-12-02 18:50:25 -07004606}
4607
Pavel Begunkov889fca72021-02-10 00:03:09 +00004608static int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe03b12302019-12-02 18:50:25 -07004609{
Pavel Begunkov6b754c82020-07-16 23:28:00 +03004610 struct io_async_msghdr iomsg, *kmsg;
Jens Axboe0fa03c62019-04-19 13:34:07 -06004611 struct socket *sock;
Pavel Begunkov7fbb1b52020-07-16 23:28:05 +03004612 struct io_buffer *kbuf;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004613 unsigned flags;
Stefan Metzmacher00312752021-03-20 20:33:36 +01004614 int min_ret = 0;
Jens Axboe52de1fe2020-02-27 10:15:42 -07004615 int ret, cflags = 0;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004616 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Jens Axboe0fa03c62019-04-19 13:34:07 -06004617
Florent Revestdba4a922020-12-04 12:36:04 +01004618 sock = sock_from_file(req->file);
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004619 if (unlikely(!sock))
Florent Revestdba4a922020-12-04 12:36:04 +01004620 return -ENOTSOCK;
Jens Axboe0fa03c62019-04-19 13:34:07 -06004621
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004622 kmsg = req->async_data;
4623 if (!kmsg) {
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004624 ret = io_recvmsg_copy_hdr(req, &iomsg);
4625 if (ret)
Pavel Begunkov681fda82020-07-15 22:20:45 +03004626 return ret;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004627 kmsg = &iomsg;
Jens Axboe0fa03c62019-04-19 13:34:07 -06004628 }
4629
Pavel Begunkovbc02ef32020-07-16 23:28:03 +03004630 if (req->flags & REQ_F_BUFFER_SELECT) {
Pavel Begunkov7fbb1b52020-07-16 23:28:05 +03004631 kbuf = io_recv_buffer_select(req, !force_nonblock);
Pavel Begunkovbc02ef32020-07-16 23:28:03 +03004632 if (IS_ERR(kbuf))
4633 return PTR_ERR(kbuf);
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004634 kmsg->fast_iov[0].iov_base = u64_to_user_ptr(kbuf->addr);
Pavel Begunkov5476dfe2021-02-05 00:57:59 +00004635 kmsg->fast_iov[0].iov_len = req->sr_msg.len;
4636 iov_iter_init(&kmsg->msg.msg_iter, READ, kmsg->fast_iov,
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004637 1, req->sr_msg.len);
4638 }
4639
Stefan Metzmacher76cd9792021-03-16 16:33:27 +01004640 flags = req->sr_msg.msg_flags | MSG_NOSIGNAL;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004641 if (flags & MSG_DONTWAIT)
4642 req->flags |= REQ_F_NOWAIT;
4643 else if (force_nonblock)
4644 flags |= MSG_DONTWAIT;
4645
Stefan Metzmacher00312752021-03-20 20:33:36 +01004646 if (flags & MSG_WAITALL)
4647 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
4648
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004649 ret = __sys_recvmsg_sock(sock, &kmsg->msg, req->sr_msg.umsg,
4650 kmsg->uaddr, flags);
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03004651 if (force_nonblock && ret == -EAGAIN)
4652 return io_setup_async_msg(req, kmsg);
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004653 if (ret == -ERESTARTSYS)
4654 ret = -EINTR;
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03004655
Pavel Begunkov7fbb1b52020-07-16 23:28:05 +03004656 if (req->flags & REQ_F_BUFFER_SELECTED)
4657 cflags = io_put_recv_kbuf(req);
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004658 /* fast path, check for non-NULL to avoid function call */
4659 if (kmsg->free_iov)
4660 kfree(kmsg->free_iov);
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03004661 req->flags &= ~REQ_F_NEED_CLEANUP;
Stefan Metzmacher00312752021-03-20 20:33:36 +01004662 if (ret < min_ret || ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))))
Jens Axboe4e88d6e2019-12-07 20:59:47 -07004663 req_set_fail_links(req);
Pavel Begunkov889fca72021-02-10 00:03:09 +00004664 __io_req_complete(req, issue_flags, ret, cflags);
Jens Axboe0fa03c62019-04-19 13:34:07 -06004665 return 0;
Jens Axboe0fa03c62019-04-19 13:34:07 -06004666}
4667
Pavel Begunkov889fca72021-02-10 00:03:09 +00004668static int io_recv(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboefddafac2020-01-04 20:19:44 -07004669{
Pavel Begunkov6b754c82020-07-16 23:28:00 +03004670 struct io_buffer *kbuf;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004671 struct io_sr_msg *sr = &req->sr_msg;
4672 struct msghdr msg;
4673 void __user *buf = sr->buf;
Jens Axboefddafac2020-01-04 20:19:44 -07004674 struct socket *sock;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004675 struct iovec iov;
4676 unsigned flags;
Stefan Metzmacher00312752021-03-20 20:33:36 +01004677 int min_ret = 0;
Jens Axboebcda7ba2020-02-23 16:42:51 -07004678 int ret, cflags = 0;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004679 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Jens Axboefddafac2020-01-04 20:19:44 -07004680
Florent Revestdba4a922020-12-04 12:36:04 +01004681 sock = sock_from_file(req->file);
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004682 if (unlikely(!sock))
Florent Revestdba4a922020-12-04 12:36:04 +01004683 return -ENOTSOCK;
Jens Axboefddafac2020-01-04 20:19:44 -07004684
Pavel Begunkovbc02ef32020-07-16 23:28:03 +03004685 if (req->flags & REQ_F_BUFFER_SELECT) {
Pavel Begunkov7fbb1b52020-07-16 23:28:05 +03004686 kbuf = io_recv_buffer_select(req, !force_nonblock);
Jens Axboebcda7ba2020-02-23 16:42:51 -07004687 if (IS_ERR(kbuf))
4688 return PTR_ERR(kbuf);
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004689 buf = u64_to_user_ptr(kbuf->addr);
Jens Axboefddafac2020-01-04 20:19:44 -07004690 }
4691
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004692 ret = import_single_range(READ, buf, sr->len, &iov, &msg.msg_iter);
Pavel Begunkov14c32ee2020-07-16 23:28:01 +03004693 if (unlikely(ret))
4694 goto out_free;
Jens Axboefddafac2020-01-04 20:19:44 -07004695
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004696 msg.msg_name = NULL;
4697 msg.msg_control = NULL;
4698 msg.msg_controllen = 0;
4699 msg.msg_namelen = 0;
4700 msg.msg_iocb = NULL;
4701 msg.msg_flags = 0;
4702
Stefan Metzmacher76cd9792021-03-16 16:33:27 +01004703 flags = req->sr_msg.msg_flags | MSG_NOSIGNAL;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004704 if (flags & MSG_DONTWAIT)
4705 req->flags |= REQ_F_NOWAIT;
4706 else if (force_nonblock)
4707 flags |= MSG_DONTWAIT;
4708
Stefan Metzmacher00312752021-03-20 20:33:36 +01004709 if (flags & MSG_WAITALL)
4710 min_ret = iov_iter_count(&msg.msg_iter);
4711
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004712 ret = sock_recvmsg(sock, &msg, flags);
4713 if (force_nonblock && ret == -EAGAIN)
4714 return -EAGAIN;
4715 if (ret == -ERESTARTSYS)
4716 ret = -EINTR;
Pavel Begunkov14c32ee2020-07-16 23:28:01 +03004717out_free:
Pavel Begunkov7fbb1b52020-07-16 23:28:05 +03004718 if (req->flags & REQ_F_BUFFER_SELECTED)
4719 cflags = io_put_recv_kbuf(req);
Stefan Metzmacher00312752021-03-20 20:33:36 +01004720 if (ret < min_ret || ((flags & MSG_WAITALL) && (msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))))
Jens Axboefddafac2020-01-04 20:19:44 -07004721 req_set_fail_links(req);
Pavel Begunkov889fca72021-02-10 00:03:09 +00004722 __io_req_complete(req, issue_flags, ret, cflags);
Jens Axboefddafac2020-01-04 20:19:44 -07004723 return 0;
Jens Axboefddafac2020-01-04 20:19:44 -07004724}
4725
Jens Axboe3529d8c2019-12-19 18:24:38 -07004726static int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboe17f2fe32019-10-17 14:42:58 -06004727{
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004728 struct io_accept *accept = &req->accept;
4729
Jens Axboe14587a462020-09-05 11:36:08 -06004730 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboe17f2fe32019-10-17 14:42:58 -06004731 return -EINVAL;
Hrvoje Zeba8042d6c2019-11-25 14:40:22 -05004732 if (sqe->ioprio || sqe->len || sqe->buf_index)
Jens Axboe17f2fe32019-10-17 14:42:58 -06004733 return -EINVAL;
4734
Jens Axboed55e5f52019-12-11 16:12:15 -07004735 accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
4736 accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2));
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004737 accept->flags = READ_ONCE(sqe->accept_flags);
Jens Axboe09952e32020-03-19 20:16:56 -06004738 accept->nofile = rlimit(RLIMIT_NOFILE);
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004739 return 0;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004740}
Jens Axboe17f2fe32019-10-17 14:42:58 -06004741
Pavel Begunkov889fca72021-02-10 00:03:09 +00004742static int io_accept(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004743{
4744 struct io_accept *accept = &req->accept;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004745 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Pavel Begunkovac45abc2020-06-08 21:08:18 +03004746 unsigned int file_flags = force_nonblock ? O_NONBLOCK : 0;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004747 int ret;
4748
Jiufei Xuee697dee2020-06-10 13:41:59 +08004749 if (req->file->f_flags & O_NONBLOCK)
4750 req->flags |= REQ_F_NOWAIT;
4751
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004752 ret = __sys_accept4_file(req->file, file_flags, accept->addr,
Jens Axboe09952e32020-03-19 20:16:56 -06004753 accept->addr_len, accept->flags,
4754 accept->nofile);
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004755 if (ret == -EAGAIN && force_nonblock)
Jens Axboe17f2fe32019-10-17 14:42:58 -06004756 return -EAGAIN;
Pavel Begunkovac45abc2020-06-08 21:08:18 +03004757 if (ret < 0) {
4758 if (ret == -ERESTARTSYS)
4759 ret = -EINTR;
Jens Axboe4e88d6e2019-12-07 20:59:47 -07004760 req_set_fail_links(req);
Pavel Begunkovac45abc2020-06-08 21:08:18 +03004761 }
Pavel Begunkov889fca72021-02-10 00:03:09 +00004762 __io_req_complete(req, issue_flags, ret, 0);
Jens Axboe17f2fe32019-10-17 14:42:58 -06004763 return 0;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004764}
4765
Pavel Begunkov93642ef2021-02-18 18:29:44 +00004766static int io_connect_prep_async(struct io_kiocb *req)
4767{
4768 struct io_async_connect *io = req->async_data;
4769 struct io_connect *conn = &req->connect;
4770
4771 return move_addr_to_kernel(conn->addr, conn->addr_len, &io->address);
4772}
4773
Jens Axboe3529d8c2019-12-19 18:24:38 -07004774static int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboef499a022019-12-02 16:28:46 -07004775{
Jens Axboe3529d8c2019-12-19 18:24:38 -07004776 struct io_connect *conn = &req->connect;
Jens Axboef499a022019-12-02 16:28:46 -07004777
Jens Axboe14587a462020-09-05 11:36:08 -06004778 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboe3fbb51c2019-12-20 08:51:52 -07004779 return -EINVAL;
4780 if (sqe->ioprio || sqe->len || sqe->buf_index || sqe->rw_flags)
4781 return -EINVAL;
4782
Jens Axboe3529d8c2019-12-19 18:24:38 -07004783 conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
4784 conn->addr_len = READ_ONCE(sqe->addr2);
Pavel Begunkov93642ef2021-02-18 18:29:44 +00004785 return 0;
Jens Axboef499a022019-12-02 16:28:46 -07004786}
4787
Pavel Begunkov889fca72021-02-10 00:03:09 +00004788static int io_connect(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboef8e85cf2019-11-23 14:24:24 -07004789{
Jens Axboee8c2bc12020-08-15 18:44:09 -07004790 struct io_async_connect __io, *io;
Jens Axboef8e85cf2019-11-23 14:24:24 -07004791 unsigned file_flags;
Jens Axboe3fbb51c2019-12-20 08:51:52 -07004792 int ret;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004793 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Jens Axboef8e85cf2019-11-23 14:24:24 -07004794
Jens Axboee8c2bc12020-08-15 18:44:09 -07004795 if (req->async_data) {
4796 io = req->async_data;
Jens Axboef499a022019-12-02 16:28:46 -07004797 } else {
Jens Axboe3529d8c2019-12-19 18:24:38 -07004798 ret = move_addr_to_kernel(req->connect.addr,
4799 req->connect.addr_len,
Jens Axboee8c2bc12020-08-15 18:44:09 -07004800 &__io.address);
Jens Axboef499a022019-12-02 16:28:46 -07004801 if (ret)
4802 goto out;
4803 io = &__io;
4804 }
4805
Jens Axboe3fbb51c2019-12-20 08:51:52 -07004806 file_flags = force_nonblock ? O_NONBLOCK : 0;
4807
Jens Axboee8c2bc12020-08-15 18:44:09 -07004808 ret = __sys_connect_file(req->file, &io->address,
Jens Axboe3fbb51c2019-12-20 08:51:52 -07004809 req->connect.addr_len, file_flags);
Jens Axboe87f80d62019-12-03 11:23:54 -07004810 if ((ret == -EAGAIN || ret == -EINPROGRESS) && force_nonblock) {
Jens Axboee8c2bc12020-08-15 18:44:09 -07004811 if (req->async_data)
Jens Axboeb7bb4f72019-12-15 22:13:43 -07004812 return -EAGAIN;
Jens Axboee8c2bc12020-08-15 18:44:09 -07004813 if (io_alloc_async_data(req)) {
Jens Axboef499a022019-12-02 16:28:46 -07004814 ret = -ENOMEM;
4815 goto out;
4816 }
Jens Axboee8c2bc12020-08-15 18:44:09 -07004817 memcpy(req->async_data, &__io, sizeof(__io));
Jens Axboef8e85cf2019-11-23 14:24:24 -07004818 return -EAGAIN;
Jens Axboef499a022019-12-02 16:28:46 -07004819 }
Jens Axboef8e85cf2019-11-23 14:24:24 -07004820 if (ret == -ERESTARTSYS)
4821 ret = -EINTR;
Jens Axboef499a022019-12-02 16:28:46 -07004822out:
Jens Axboe4e88d6e2019-12-07 20:59:47 -07004823 if (ret < 0)
4824 req_set_fail_links(req);
Pavel Begunkov889fca72021-02-10 00:03:09 +00004825 __io_req_complete(req, issue_flags, ret, 0);
Jens Axboef8e85cf2019-11-23 14:24:24 -07004826 return 0;
Jens Axboef8e85cf2019-11-23 14:24:24 -07004827}
YueHaibing469956e2020-03-04 15:53:52 +08004828#else /* !CONFIG_NET */
Jens Axboe99a10082021-02-19 09:35:19 -07004829#define IO_NETOP_FN(op) \
4830static int io_##op(struct io_kiocb *req, unsigned int issue_flags) \
4831{ \
4832 return -EOPNOTSUPP; \
Jens Axboef8e85cf2019-11-23 14:24:24 -07004833}
4834
Jens Axboe99a10082021-02-19 09:35:19 -07004835#define IO_NETOP_PREP(op) \
4836IO_NETOP_FN(op) \
4837static int io_##op##_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) \
4838{ \
4839 return -EOPNOTSUPP; \
4840} \
4841
4842#define IO_NETOP_PREP_ASYNC(op) \
4843IO_NETOP_PREP(op) \
4844static int io_##op##_prep_async(struct io_kiocb *req) \
4845{ \
4846 return -EOPNOTSUPP; \
YueHaibing469956e2020-03-04 15:53:52 +08004847}
4848
Jens Axboe99a10082021-02-19 09:35:19 -07004849IO_NETOP_PREP_ASYNC(sendmsg);
4850IO_NETOP_PREP_ASYNC(recvmsg);
4851IO_NETOP_PREP_ASYNC(connect);
4852IO_NETOP_PREP(accept);
4853IO_NETOP_FN(send);
4854IO_NETOP_FN(recv);
YueHaibing469956e2020-03-04 15:53:52 +08004855#endif /* CONFIG_NET */
Jens Axboe17f2fe32019-10-17 14:42:58 -06004856
Jens Axboed7718a92020-02-14 22:23:12 -07004857struct io_poll_table {
4858 struct poll_table_struct pt;
4859 struct io_kiocb *req;
4860 int error;
4861};
4862
Jens Axboed7718a92020-02-14 22:23:12 -07004863static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll,
4864 __poll_t mask, task_work_func_t func)
4865{
Jens Axboeaa96bf82020-04-03 11:26:26 -06004866 int ret;
Jens Axboed7718a92020-02-14 22:23:12 -07004867
4868 /* for instances that support it check for an event match first: */
4869 if (mask && !(mask & poll->events))
4870 return 0;
4871
4872 trace_io_uring_task_add(req->ctx, req->opcode, req->user_data, mask);
4873
4874 list_del_init(&poll->wait.entry);
4875
Jens Axboed7718a92020-02-14 22:23:12 -07004876 req->result = mask;
Jens Axboe7cbf1722021-02-10 00:03:20 +00004877 req->task_work.func = func;
Jens Axboe6d816e02020-08-11 08:04:14 -06004878 percpu_ref_get(&req->ctx->refs);
4879
Jens Axboed7718a92020-02-14 22:23:12 -07004880 /*
Jens Axboee3aabf92020-05-18 11:04:17 -06004881 * If this fails, then the task is exiting. When a task exits, the
4882 * work gets canceled, so just cancel this request as well instead
4883 * of executing it. We can't safely execute it anyway, as we may not
4884 * have the needed state needed for it anyway.
Jens Axboed7718a92020-02-14 22:23:12 -07004885 */
Jens Axboe355fb9e2020-10-22 20:19:35 -06004886 ret = io_req_task_work_add(req);
Jens Axboeaa96bf82020-04-03 11:26:26 -06004887 if (unlikely(ret)) {
Jens Axboee3aabf92020-05-18 11:04:17 -06004888 WRITE_ONCE(poll->canceled, true);
Pavel Begunkoveab30c42021-01-19 13:32:42 +00004889 io_req_task_work_add_fallback(req, func);
Jens Axboeaa96bf82020-04-03 11:26:26 -06004890 }
Jens Axboed7718a92020-02-14 22:23:12 -07004891 return 1;
4892}
4893
Jens Axboe74ce6ce2020-04-13 11:09:12 -06004894static bool io_poll_rewait(struct io_kiocb *req, struct io_poll_iocb *poll)
4895 __acquires(&req->ctx->completion_lock)
4896{
4897 struct io_ring_ctx *ctx = req->ctx;
4898
4899 if (!req->result && !READ_ONCE(poll->canceled)) {
4900 struct poll_table_struct pt = { ._key = poll->events };
4901
4902 req->result = vfs_poll(req->file, &pt) & poll->events;
4903 }
4904
4905 spin_lock_irq(&ctx->completion_lock);
4906 if (!req->result && !READ_ONCE(poll->canceled)) {
4907 add_wait_queue(poll->head, &poll->wait);
4908 return true;
4909 }
4910
4911 return false;
4912}
4913
Jens Axboed4e7cd32020-08-15 11:44:50 -07004914static struct io_poll_iocb *io_poll_get_double(struct io_kiocb *req)
Jens Axboe18bceab2020-05-15 11:56:54 -06004915{
Jens Axboee8c2bc12020-08-15 18:44:09 -07004916 /* pure poll stashes this in ->async_data, poll driven retry elsewhere */
Jens Axboed4e7cd32020-08-15 11:44:50 -07004917 if (req->opcode == IORING_OP_POLL_ADD)
Jens Axboee8c2bc12020-08-15 18:44:09 -07004918 return req->async_data;
Jens Axboed4e7cd32020-08-15 11:44:50 -07004919 return req->apoll->double_poll;
4920}
4921
4922static struct io_poll_iocb *io_poll_get_single(struct io_kiocb *req)
4923{
4924 if (req->opcode == IORING_OP_POLL_ADD)
4925 return &req->poll;
4926 return &req->apoll->poll;
4927}
4928
4929static void io_poll_remove_double(struct io_kiocb *req)
4930{
4931 struct io_poll_iocb *poll = io_poll_get_double(req);
Jens Axboe18bceab2020-05-15 11:56:54 -06004932
4933 lockdep_assert_held(&req->ctx->completion_lock);
4934
4935 if (poll && poll->head) {
4936 struct wait_queue_head *head = poll->head;
4937
4938 spin_lock(&head->lock);
4939 list_del_init(&poll->wait.entry);
4940 if (poll->wait.private)
4941 refcount_dec(&req->refs);
4942 poll->head = NULL;
4943 spin_unlock(&head->lock);
4944 }
4945}
4946
4947static void io_poll_complete(struct io_kiocb *req, __poll_t mask, int error)
4948{
4949 struct io_ring_ctx *ctx = req->ctx;
4950
Jens Axboed4e7cd32020-08-15 11:44:50 -07004951 io_poll_remove_double(req);
Jens Axboe18bceab2020-05-15 11:56:54 -06004952 req->poll.done = true;
4953 io_cqring_fill_event(req, error ? error : mangle_poll(mask));
4954 io_commit_cqring(ctx);
4955}
4956
Jens Axboe18bceab2020-05-15 11:56:54 -06004957static void io_poll_task_func(struct callback_head *cb)
4958{
4959 struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
Jens Axboe6d816e02020-08-11 08:04:14 -06004960 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkovdd221f462020-10-18 10:17:42 +01004961 struct io_kiocb *nxt;
Jens Axboe18bceab2020-05-15 11:56:54 -06004962
Pavel Begunkovdd221f462020-10-18 10:17:42 +01004963 if (io_poll_rewait(req, &req->poll)) {
4964 spin_unlock_irq(&ctx->completion_lock);
4965 } else {
4966 hash_del(&req->hash_node);
4967 io_poll_complete(req, req->result, 0);
4968 spin_unlock_irq(&ctx->completion_lock);
4969
4970 nxt = io_put_req_find_next(req);
4971 io_cqring_ev_posted(ctx);
4972 if (nxt)
4973 __io_req_task_submit(nxt);
4974 }
4975
Jens Axboe6d816e02020-08-11 08:04:14 -06004976 percpu_ref_put(&ctx->refs);
Jens Axboe18bceab2020-05-15 11:56:54 -06004977}
4978
4979static int io_poll_double_wake(struct wait_queue_entry *wait, unsigned mode,
4980 int sync, void *key)
4981{
4982 struct io_kiocb *req = wait->private;
Jens Axboed4e7cd32020-08-15 11:44:50 -07004983 struct io_poll_iocb *poll = io_poll_get_single(req);
Jens Axboe18bceab2020-05-15 11:56:54 -06004984 __poll_t mask = key_to_poll(key);
4985
4986 /* for instances that support it check for an event match first: */
4987 if (mask && !(mask & poll->events))
4988 return 0;
4989
Jens Axboe8706e042020-09-28 08:38:54 -06004990 list_del_init(&wait->entry);
4991
Jens Axboe807abcb2020-07-17 17:09:27 -06004992 if (poll && poll->head) {
Jens Axboe18bceab2020-05-15 11:56:54 -06004993 bool done;
4994
Jens Axboe807abcb2020-07-17 17:09:27 -06004995 spin_lock(&poll->head->lock);
4996 done = list_empty(&poll->wait.entry);
Jens Axboe18bceab2020-05-15 11:56:54 -06004997 if (!done)
Jens Axboe807abcb2020-07-17 17:09:27 -06004998 list_del_init(&poll->wait.entry);
Jens Axboed4e7cd32020-08-15 11:44:50 -07004999 /* make sure double remove sees this as being gone */
5000 wait->private = NULL;
Jens Axboe807abcb2020-07-17 17:09:27 -06005001 spin_unlock(&poll->head->lock);
Jens Axboec8b5e262020-10-25 13:53:26 -06005002 if (!done) {
5003 /* use wait func handler, so it matches the rq type */
5004 poll->wait.func(&poll->wait, mode, sync, key);
5005 }
Jens Axboe18bceab2020-05-15 11:56:54 -06005006 }
5007 refcount_dec(&req->refs);
5008 return 1;
5009}
5010
5011static void io_init_poll_iocb(struct io_poll_iocb *poll, __poll_t events,
5012 wait_queue_func_t wake_func)
5013{
5014 poll->head = NULL;
5015 poll->done = false;
5016 poll->canceled = false;
5017 poll->events = events;
5018 INIT_LIST_HEAD(&poll->wait.entry);
5019 init_waitqueue_func_entry(&poll->wait, wake_func);
5020}
5021
5022static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt,
Jens Axboe807abcb2020-07-17 17:09:27 -06005023 struct wait_queue_head *head,
5024 struct io_poll_iocb **poll_ptr)
Jens Axboe18bceab2020-05-15 11:56:54 -06005025{
5026 struct io_kiocb *req = pt->req;
5027
5028 /*
5029 * If poll->head is already set, it's because the file being polled
5030 * uses multiple waitqueues for poll handling (eg one for read, one
5031 * for write). Setup a separate io_poll_iocb if this happens.
5032 */
5033 if (unlikely(poll->head)) {
Pavel Begunkov58852d42020-10-16 20:55:56 +01005034 struct io_poll_iocb *poll_one = poll;
5035
Jens Axboe18bceab2020-05-15 11:56:54 -06005036 /* already have a 2nd entry, fail a third attempt */
Jens Axboe807abcb2020-07-17 17:09:27 -06005037 if (*poll_ptr) {
Jens Axboe18bceab2020-05-15 11:56:54 -06005038 pt->error = -EINVAL;
5039 return;
5040 }
Jens Axboe1c3b3e62021-02-28 16:07:30 -07005041 /* double add on the same waitqueue head, ignore */
5042 if (poll->head == head)
5043 return;
Jens Axboe18bceab2020-05-15 11:56:54 -06005044 poll = kmalloc(sizeof(*poll), GFP_ATOMIC);
5045 if (!poll) {
5046 pt->error = -ENOMEM;
5047 return;
5048 }
Pavel Begunkov58852d42020-10-16 20:55:56 +01005049 io_init_poll_iocb(poll, poll_one->events, io_poll_double_wake);
Jens Axboe18bceab2020-05-15 11:56:54 -06005050 refcount_inc(&req->refs);
5051 poll->wait.private = req;
Jens Axboe807abcb2020-07-17 17:09:27 -06005052 *poll_ptr = poll;
Jens Axboe18bceab2020-05-15 11:56:54 -06005053 }
5054
5055 pt->error = 0;
5056 poll->head = head;
Jiufei Xuea31eb4a2020-06-17 17:53:56 +08005057
5058 if (poll->events & EPOLLEXCLUSIVE)
5059 add_wait_queue_exclusive(head, &poll->wait);
5060 else
5061 add_wait_queue(head, &poll->wait);
Jens Axboe18bceab2020-05-15 11:56:54 -06005062}
5063
5064static void io_async_queue_proc(struct file *file, struct wait_queue_head *head,
5065 struct poll_table_struct *p)
5066{
5067 struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
Jens Axboe807abcb2020-07-17 17:09:27 -06005068 struct async_poll *apoll = pt->req->apoll;
Jens Axboe18bceab2020-05-15 11:56:54 -06005069
Jens Axboe807abcb2020-07-17 17:09:27 -06005070 __io_queue_proc(&apoll->poll, pt, head, &apoll->double_poll);
Jens Axboe18bceab2020-05-15 11:56:54 -06005071}
5072
Jens Axboed7718a92020-02-14 22:23:12 -07005073static void io_async_task_func(struct callback_head *cb)
5074{
5075 struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
5076 struct async_poll *apoll = req->apoll;
5077 struct io_ring_ctx *ctx = req->ctx;
5078
5079 trace_io_uring_task_run(req->ctx, req->opcode, req->user_data);
5080
Jens Axboe74ce6ce2020-04-13 11:09:12 -06005081 if (io_poll_rewait(req, &apoll->poll)) {
Jens Axboed7718a92020-02-14 22:23:12 -07005082 spin_unlock_irq(&ctx->completion_lock);
Jens Axboe6d816e02020-08-11 08:04:14 -06005083 percpu_ref_put(&ctx->refs);
Jens Axboe74ce6ce2020-04-13 11:09:12 -06005084 return;
Jens Axboed7718a92020-02-14 22:23:12 -07005085 }
5086
Jens Axboe31067252020-05-17 17:43:31 -06005087 /* If req is still hashed, it cannot have been canceled. Don't check. */
Pavel Begunkov0be0b0e2020-06-30 15:20:42 +03005088 if (hash_hashed(&req->hash_node))
Jens Axboe74ce6ce2020-04-13 11:09:12 -06005089 hash_del(&req->hash_node);
Jens Axboe2bae0472020-04-13 11:16:34 -06005090
Jens Axboed4e7cd32020-08-15 11:44:50 -07005091 io_poll_remove_double(req);
Jens Axboe74ce6ce2020-04-13 11:09:12 -06005092 spin_unlock_irq(&ctx->completion_lock);
5093
Pavel Begunkov0be0b0e2020-06-30 15:20:42 +03005094 if (!READ_ONCE(apoll->poll.canceled))
5095 __io_req_task_submit(req);
5096 else
5097 __io_req_task_cancel(req, -ECANCELED);
Dan Carpenteraa340842020-07-08 21:47:11 +03005098
Jens Axboe6d816e02020-08-11 08:04:14 -06005099 percpu_ref_put(&ctx->refs);
Jens Axboe807abcb2020-07-17 17:09:27 -06005100 kfree(apoll->double_poll);
Jens Axboe31067252020-05-17 17:43:31 -06005101 kfree(apoll);
Jens Axboed7718a92020-02-14 22:23:12 -07005102}
5103
5104static int io_async_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
5105 void *key)
5106{
5107 struct io_kiocb *req = wait->private;
5108 struct io_poll_iocb *poll = &req->apoll->poll;
5109
5110 trace_io_uring_poll_wake(req->ctx, req->opcode, req->user_data,
5111 key_to_poll(key));
5112
5113 return __io_async_wake(req, poll, key_to_poll(key), io_async_task_func);
5114}
5115
5116static void io_poll_req_insert(struct io_kiocb *req)
5117{
5118 struct io_ring_ctx *ctx = req->ctx;
5119 struct hlist_head *list;
5120
5121 list = &ctx->cancel_hash[hash_long(req->user_data, ctx->cancel_hash_bits)];
5122 hlist_add_head(&req->hash_node, list);
5123}
5124
5125static __poll_t __io_arm_poll_handler(struct io_kiocb *req,
5126 struct io_poll_iocb *poll,
5127 struct io_poll_table *ipt, __poll_t mask,
5128 wait_queue_func_t wake_func)
5129 __acquires(&ctx->completion_lock)
5130{
5131 struct io_ring_ctx *ctx = req->ctx;
5132 bool cancel = false;
5133
Pavel Begunkov4d52f332020-10-18 10:17:43 +01005134 INIT_HLIST_NODE(&req->hash_node);
Jens Axboe18bceab2020-05-15 11:56:54 -06005135 io_init_poll_iocb(poll, mask, wake_func);
Pavel Begunkovb90cd192020-06-21 13:09:52 +03005136 poll->file = req->file;
Jens Axboe18bceab2020-05-15 11:56:54 -06005137 poll->wait.private = req;
Jens Axboed7718a92020-02-14 22:23:12 -07005138
5139 ipt->pt._key = mask;
5140 ipt->req = req;
5141 ipt->error = -EINVAL;
5142
Jens Axboed7718a92020-02-14 22:23:12 -07005143 mask = vfs_poll(req->file, &ipt->pt) & poll->events;
5144
5145 spin_lock_irq(&ctx->completion_lock);
5146 if (likely(poll->head)) {
5147 spin_lock(&poll->head->lock);
5148 if (unlikely(list_empty(&poll->wait.entry))) {
5149 if (ipt->error)
5150 cancel = true;
5151 ipt->error = 0;
5152 mask = 0;
5153 }
5154 if (mask || ipt->error)
5155 list_del_init(&poll->wait.entry);
5156 else if (cancel)
5157 WRITE_ONCE(poll->canceled, true);
5158 else if (!poll->done) /* actually waiting for an event */
5159 io_poll_req_insert(req);
5160 spin_unlock(&poll->head->lock);
5161 }
5162
5163 return mask;
5164}
5165
5166static bool io_arm_poll_handler(struct io_kiocb *req)
5167{
5168 const struct io_op_def *def = &io_op_defs[req->opcode];
5169 struct io_ring_ctx *ctx = req->ctx;
5170 struct async_poll *apoll;
5171 struct io_poll_table ipt;
5172 __poll_t mask, ret;
Jens Axboe9dab14b2020-08-25 12:27:50 -06005173 int rw;
Jens Axboed7718a92020-02-14 22:23:12 -07005174
5175 if (!req->file || !file_can_poll(req->file))
5176 return false;
Pavel Begunkov24c74672020-06-21 13:09:51 +03005177 if (req->flags & REQ_F_POLLED)
Jens Axboed7718a92020-02-14 22:23:12 -07005178 return false;
Jens Axboe9dab14b2020-08-25 12:27:50 -06005179 if (def->pollin)
5180 rw = READ;
5181 else if (def->pollout)
5182 rw = WRITE;
5183 else
5184 return false;
5185 /* if we can't nonblock try, then no point in arming a poll handler */
5186 if (!io_file_supports_async(req->file, rw))
Jens Axboed7718a92020-02-14 22:23:12 -07005187 return false;
5188
5189 apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC);
5190 if (unlikely(!apoll))
5191 return false;
Jens Axboe807abcb2020-07-17 17:09:27 -06005192 apoll->double_poll = NULL;
Jens Axboed7718a92020-02-14 22:23:12 -07005193
5194 req->flags |= REQ_F_POLLED;
Jens Axboed7718a92020-02-14 22:23:12 -07005195 req->apoll = apoll;
Jens Axboed7718a92020-02-14 22:23:12 -07005196
Nathan Chancellor8755d972020-03-02 16:01:19 -07005197 mask = 0;
Jens Axboed7718a92020-02-14 22:23:12 -07005198 if (def->pollin)
Nathan Chancellor8755d972020-03-02 16:01:19 -07005199 mask |= POLLIN | POLLRDNORM;
Jens Axboed7718a92020-02-14 22:23:12 -07005200 if (def->pollout)
5201 mask |= POLLOUT | POLLWRNORM;
Luke Hsiao901341b2020-08-21 21:41:05 -07005202
5203 /* If reading from MSG_ERRQUEUE using recvmsg, ignore POLLIN */
5204 if ((req->opcode == IORING_OP_RECVMSG) &&
5205 (req->sr_msg.msg_flags & MSG_ERRQUEUE))
5206 mask &= ~POLLIN;
5207
Jens Axboed7718a92020-02-14 22:23:12 -07005208 mask |= POLLERR | POLLPRI;
5209
5210 ipt.pt._qproc = io_async_queue_proc;
5211
5212 ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask,
5213 io_async_wake);
Jens Axboea36da652020-08-11 09:50:19 -06005214 if (ret || ipt.error) {
Jens Axboed4e7cd32020-08-15 11:44:50 -07005215 io_poll_remove_double(req);
Jens Axboed7718a92020-02-14 22:23:12 -07005216 spin_unlock_irq(&ctx->completion_lock);
Jens Axboe807abcb2020-07-17 17:09:27 -06005217 kfree(apoll->double_poll);
Jens Axboed7718a92020-02-14 22:23:12 -07005218 kfree(apoll);
5219 return false;
5220 }
5221 spin_unlock_irq(&ctx->completion_lock);
5222 trace_io_uring_poll_arm(ctx, req->opcode, req->user_data, mask,
5223 apoll->poll.events);
5224 return true;
5225}
5226
5227static bool __io_poll_remove_one(struct io_kiocb *req,
5228 struct io_poll_iocb *poll)
5229{
Jens Axboeb41e9852020-02-17 09:52:41 -07005230 bool do_complete = false;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005231
5232 spin_lock(&poll->head->lock);
5233 WRITE_ONCE(poll->canceled, true);
Jens Axboe392edb42019-12-09 17:52:20 -07005234 if (!list_empty(&poll->wait.entry)) {
5235 list_del_init(&poll->wait.entry);
Jens Axboeb41e9852020-02-17 09:52:41 -07005236 do_complete = true;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005237 }
5238 spin_unlock(&poll->head->lock);
Jens Axboe3bfa5bc2020-05-17 13:54:12 -06005239 hash_del(&req->hash_node);
Jens Axboed7718a92020-02-14 22:23:12 -07005240 return do_complete;
5241}
5242
5243static bool io_poll_remove_one(struct io_kiocb *req)
5244{
5245 bool do_complete;
5246
Jens Axboed4e7cd32020-08-15 11:44:50 -07005247 io_poll_remove_double(req);
5248
Jens Axboed7718a92020-02-14 22:23:12 -07005249 if (req->opcode == IORING_OP_POLL_ADD) {
5250 do_complete = __io_poll_remove_one(req, &req->poll);
5251 } else {
Jens Axboe3bfa5bc2020-05-17 13:54:12 -06005252 struct async_poll *apoll = req->apoll;
5253
Jens Axboed7718a92020-02-14 22:23:12 -07005254 /* non-poll requests have submit ref still */
Jens Axboe3bfa5bc2020-05-17 13:54:12 -06005255 do_complete = __io_poll_remove_one(req, &apoll->poll);
5256 if (do_complete) {
Jens Axboed7718a92020-02-14 22:23:12 -07005257 io_put_req(req);
Jens Axboe807abcb2020-07-17 17:09:27 -06005258 kfree(apoll->double_poll);
Jens Axboe3bfa5bc2020-05-17 13:54:12 -06005259 kfree(apoll);
5260 }
Xiaoguang Wangb1f573b2020-04-12 14:50:54 +08005261 }
5262
Jens Axboeb41e9852020-02-17 09:52:41 -07005263 if (do_complete) {
5264 io_cqring_fill_event(req, -ECANCELED);
5265 io_commit_cqring(req->ctx);
Jens Axboef254ac02020-08-12 17:33:30 -06005266 req_set_fail_links(req);
Pavel Begunkov216578e2020-10-13 09:44:00 +01005267 io_put_req_deferred(req, 1);
Jens Axboeb41e9852020-02-17 09:52:41 -07005268 }
5269
5270 return do_complete;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005271}
5272
Jens Axboe76e1b642020-09-26 15:05:03 -06005273/*
5274 * Returns true if we found and killed one or more poll requests
5275 */
Pavel Begunkov6b819282020-11-06 13:00:25 +00005276static bool io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk,
5277 struct files_struct *files)
Jens Axboe221c5eb2019-01-17 09:41:58 -07005278{
Jens Axboe78076bb2019-12-04 19:56:40 -07005279 struct hlist_node *tmp;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005280 struct io_kiocb *req;
Jens Axboe8e2e1fa2020-04-13 17:05:14 -06005281 int posted = 0, i;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005282
5283 spin_lock_irq(&ctx->completion_lock);
Jens Axboe78076bb2019-12-04 19:56:40 -07005284 for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
5285 struct hlist_head *list;
5286
5287 list = &ctx->cancel_hash[i];
Jens Axboef3606e32020-09-22 08:18:24 -06005288 hlist_for_each_entry_safe(req, tmp, list, hash_node) {
Pavel Begunkov6b819282020-11-06 13:00:25 +00005289 if (io_match_task(req, tsk, files))
Jens Axboef3606e32020-09-22 08:18:24 -06005290 posted += io_poll_remove_one(req);
5291 }
Jens Axboe221c5eb2019-01-17 09:41:58 -07005292 }
5293 spin_unlock_irq(&ctx->completion_lock);
Jens Axboeb41e9852020-02-17 09:52:41 -07005294
Jens Axboe8e2e1fa2020-04-13 17:05:14 -06005295 if (posted)
5296 io_cqring_ev_posted(ctx);
Jens Axboe76e1b642020-09-26 15:05:03 -06005297
5298 return posted != 0;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005299}
5300
Jens Axboe47f46762019-11-09 17:43:02 -07005301static int io_poll_cancel(struct io_ring_ctx *ctx, __u64 sqe_addr)
5302{
Jens Axboe78076bb2019-12-04 19:56:40 -07005303 struct hlist_head *list;
Jens Axboe47f46762019-11-09 17:43:02 -07005304 struct io_kiocb *req;
5305
Jens Axboe78076bb2019-12-04 19:56:40 -07005306 list = &ctx->cancel_hash[hash_long(sqe_addr, ctx->cancel_hash_bits)];
5307 hlist_for_each_entry(req, list, hash_node) {
Jens Axboeb41e9852020-02-17 09:52:41 -07005308 if (sqe_addr != req->user_data)
5309 continue;
5310 if (io_poll_remove_one(req))
Jens Axboeeac406c2019-11-14 12:09:58 -07005311 return 0;
Jens Axboeb41e9852020-02-17 09:52:41 -07005312 return -EALREADY;
Jens Axboe47f46762019-11-09 17:43:02 -07005313 }
5314
5315 return -ENOENT;
5316}
5317
Jens Axboe3529d8c2019-12-19 18:24:38 -07005318static int io_poll_remove_prep(struct io_kiocb *req,
5319 const struct io_uring_sqe *sqe)
Jens Axboe221c5eb2019-01-17 09:41:58 -07005320{
Jens Axboe221c5eb2019-01-17 09:41:58 -07005321 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
5322 return -EINVAL;
5323 if (sqe->ioprio || sqe->off || sqe->len || sqe->buf_index ||
5324 sqe->poll_events)
5325 return -EINVAL;
5326
Pavel Begunkov018043b2020-10-27 23:17:18 +00005327 req->poll_remove.addr = READ_ONCE(sqe->addr);
Jens Axboe0969e782019-12-17 18:40:57 -07005328 return 0;
5329}
5330
5331/*
5332 * Find a running poll command that matches one specified in sqe->addr,
5333 * and remove it if found.
5334 */
Pavel Begunkov61e98202021-02-10 00:03:08 +00005335static int io_poll_remove(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe0969e782019-12-17 18:40:57 -07005336{
5337 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe0969e782019-12-17 18:40:57 -07005338 int ret;
5339
Jens Axboe221c5eb2019-01-17 09:41:58 -07005340 spin_lock_irq(&ctx->completion_lock);
Pavel Begunkov018043b2020-10-27 23:17:18 +00005341 ret = io_poll_cancel(ctx, req->poll_remove.addr);
Jens Axboe221c5eb2019-01-17 09:41:58 -07005342 spin_unlock_irq(&ctx->completion_lock);
5343
Jens Axboe4e88d6e2019-12-07 20:59:47 -07005344 if (ret < 0)
5345 req_set_fail_links(req);
Jens Axboee1e16092020-06-22 09:17:17 -06005346 io_req_complete(req, ret);
Jens Axboe221c5eb2019-01-17 09:41:58 -07005347 return 0;
5348}
5349
Jens Axboe221c5eb2019-01-17 09:41:58 -07005350static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
5351 void *key)
5352{
Jens Axboec2f2eb72020-02-10 09:07:05 -07005353 struct io_kiocb *req = wait->private;
5354 struct io_poll_iocb *poll = &req->poll;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005355
Jens Axboed7718a92020-02-14 22:23:12 -07005356 return __io_async_wake(req, poll, key_to_poll(key), io_poll_task_func);
Jens Axboe221c5eb2019-01-17 09:41:58 -07005357}
5358
Jens Axboe221c5eb2019-01-17 09:41:58 -07005359static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head,
5360 struct poll_table_struct *p)
5361{
5362 struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
5363
Jens Axboee8c2bc12020-08-15 18:44:09 -07005364 __io_queue_proc(&pt->req->poll, pt, head, (struct io_poll_iocb **) &pt->req->async_data);
Jens Axboeeac406c2019-11-14 12:09:58 -07005365}
5366
Jens Axboe3529d8c2019-12-19 18:24:38 -07005367static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboe221c5eb2019-01-17 09:41:58 -07005368{
5369 struct io_poll_iocb *poll = &req->poll;
Jiufei Xue5769a352020-06-17 17:53:55 +08005370 u32 events;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005371
5372 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
5373 return -EINVAL;
5374 if (sqe->addr || sqe->ioprio || sqe->off || sqe->len || sqe->buf_index)
5375 return -EINVAL;
5376
Jiufei Xue5769a352020-06-17 17:53:55 +08005377 events = READ_ONCE(sqe->poll32_events);
5378#ifdef __BIG_ENDIAN
5379 events = swahw32(events);
5380#endif
Jiufei Xuea31eb4a2020-06-17 17:53:56 +08005381 poll->events = demangle_poll(events) | EPOLLERR | EPOLLHUP |
5382 (events & EPOLLEXCLUSIVE);
Jens Axboe0969e782019-12-17 18:40:57 -07005383 return 0;
5384}
5385
Pavel Begunkov61e98202021-02-10 00:03:08 +00005386static int io_poll_add(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe0969e782019-12-17 18:40:57 -07005387{
5388 struct io_poll_iocb *poll = &req->poll;
5389 struct io_ring_ctx *ctx = req->ctx;
5390 struct io_poll_table ipt;
Jens Axboe0969e782019-12-17 18:40:57 -07005391 __poll_t mask;
Jens Axboe0969e782019-12-17 18:40:57 -07005392
Jens Axboed7718a92020-02-14 22:23:12 -07005393 ipt.pt._qproc = io_poll_queue_proc;
Jens Axboe36703242019-07-25 10:20:18 -06005394
Jens Axboed7718a92020-02-14 22:23:12 -07005395 mask = __io_arm_poll_handler(req, &req->poll, &ipt, poll->events,
5396 io_poll_wake);
Jens Axboe221c5eb2019-01-17 09:41:58 -07005397
Jens Axboe8c838782019-03-12 15:48:16 -06005398 if (mask) { /* no async, we'd stolen it */
Jens Axboe8c838782019-03-12 15:48:16 -06005399 ipt.error = 0;
Jens Axboeb0dd8a42019-11-18 12:14:54 -07005400 io_poll_complete(req, mask, 0);
Jens Axboe8c838782019-03-12 15:48:16 -06005401 }
Jens Axboe221c5eb2019-01-17 09:41:58 -07005402 spin_unlock_irq(&ctx->completion_lock);
5403
Jens Axboe8c838782019-03-12 15:48:16 -06005404 if (mask) {
5405 io_cqring_ev_posted(ctx);
Pavel Begunkov014db002020-03-03 21:33:12 +03005406 io_put_req(req);
Jens Axboe221c5eb2019-01-17 09:41:58 -07005407 }
Jens Axboe8c838782019-03-12 15:48:16 -06005408 return ipt.error;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005409}
5410
Jens Axboe5262f562019-09-17 12:26:57 -06005411static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
5412{
Jens Axboead8a48a2019-11-15 08:49:11 -07005413 struct io_timeout_data *data = container_of(timer,
5414 struct io_timeout_data, timer);
5415 struct io_kiocb *req = data->req;
5416 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe5262f562019-09-17 12:26:57 -06005417 unsigned long flags;
5418
Jens Axboe5262f562019-09-17 12:26:57 -06005419 spin_lock_irqsave(&ctx->completion_lock, flags);
Pavel Begunkova71976f2020-10-10 18:34:11 +01005420 list_del_init(&req->timeout.list);
Pavel Begunkov01cec8c2020-07-30 18:43:50 +03005421 atomic_set(&req->ctx->cq_timeouts,
5422 atomic_read(&req->ctx->cq_timeouts) + 1);
5423
Jens Axboe78e19bb2019-11-06 15:21:34 -07005424 io_cqring_fill_event(req, -ETIME);
Jens Axboe5262f562019-09-17 12:26:57 -06005425 io_commit_cqring(ctx);
5426 spin_unlock_irqrestore(&ctx->completion_lock, flags);
5427
5428 io_cqring_ev_posted(ctx);
Jens Axboe4e88d6e2019-12-07 20:59:47 -07005429 req_set_fail_links(req);
Jens Axboe5262f562019-09-17 12:26:57 -06005430 io_put_req(req);
5431 return HRTIMER_NORESTART;
5432}
5433
Pavel Begunkovfbd15842020-11-30 19:11:15 +00005434static struct io_kiocb *io_timeout_extract(struct io_ring_ctx *ctx,
5435 __u64 user_data)
Jens Axboe47f46762019-11-09 17:43:02 -07005436{
Pavel Begunkovfbd15842020-11-30 19:11:15 +00005437 struct io_timeout_data *io;
Jens Axboef254ac02020-08-12 17:33:30 -06005438 struct io_kiocb *req;
5439 int ret = -ENOENT;
5440
5441 list_for_each_entry(req, &ctx->timeout_list, timeout.list) {
5442 if (user_data == req->user_data) {
5443 ret = 0;
5444 break;
5445 }
5446 }
5447
5448 if (ret == -ENOENT)
Pavel Begunkovfbd15842020-11-30 19:11:15 +00005449 return ERR_PTR(ret);
Jens Axboef254ac02020-08-12 17:33:30 -06005450
Pavel Begunkovfbd15842020-11-30 19:11:15 +00005451 io = req->async_data;
5452 ret = hrtimer_try_to_cancel(&io->timer);
5453 if (ret == -1)
5454 return ERR_PTR(-EALREADY);
5455 list_del_init(&req->timeout.list);
5456 return req;
5457}
5458
5459static int io_timeout_cancel(struct io_ring_ctx *ctx, __u64 user_data)
5460{
5461 struct io_kiocb *req = io_timeout_extract(ctx, user_data);
5462
5463 if (IS_ERR(req))
5464 return PTR_ERR(req);
5465
5466 req_set_fail_links(req);
5467 io_cqring_fill_event(req, -ECANCELED);
5468 io_put_req_deferred(req, 1);
5469 return 0;
Jens Axboef254ac02020-08-12 17:33:30 -06005470}
5471
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +00005472static int io_timeout_update(struct io_ring_ctx *ctx, __u64 user_data,
5473 struct timespec64 *ts, enum hrtimer_mode mode)
5474{
5475 struct io_kiocb *req = io_timeout_extract(ctx, user_data);
5476 struct io_timeout_data *data;
5477
5478 if (IS_ERR(req))
5479 return PTR_ERR(req);
5480
5481 req->timeout.off = 0; /* noseq */
5482 data = req->async_data;
5483 list_add_tail(&req->timeout.list, &ctx->timeout_list);
5484 hrtimer_init(&data->timer, CLOCK_MONOTONIC, mode);
5485 data->timer.function = io_timeout_fn;
5486 hrtimer_start(&data->timer, timespec64_to_ktime(*ts), mode);
5487 return 0;
Jens Axboe47f46762019-11-09 17:43:02 -07005488}
5489
Jens Axboe3529d8c2019-12-19 18:24:38 -07005490static int io_timeout_remove_prep(struct io_kiocb *req,
5491 const struct io_uring_sqe *sqe)
Jens Axboeb29472e2019-12-17 18:50:29 -07005492{
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +00005493 struct io_timeout_rem *tr = &req->timeout_rem;
5494
Jens Axboeb29472e2019-12-17 18:50:29 -07005495 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
5496 return -EINVAL;
Daniele Albano61710e42020-07-18 14:15:16 -06005497 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
5498 return -EINVAL;
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +00005499 if (sqe->ioprio || sqe->buf_index || sqe->len)
Jens Axboeb29472e2019-12-17 18:50:29 -07005500 return -EINVAL;
5501
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +00005502 tr->addr = READ_ONCE(sqe->addr);
5503 tr->flags = READ_ONCE(sqe->timeout_flags);
5504 if (tr->flags & IORING_TIMEOUT_UPDATE) {
5505 if (tr->flags & ~(IORING_TIMEOUT_UPDATE|IORING_TIMEOUT_ABS))
5506 return -EINVAL;
5507 if (get_timespec64(&tr->ts, u64_to_user_ptr(sqe->addr2)))
5508 return -EFAULT;
5509 } else if (tr->flags) {
5510 /* timeout removal doesn't support flags */
5511 return -EINVAL;
5512 }
5513
Jens Axboeb29472e2019-12-17 18:50:29 -07005514 return 0;
5515}
5516
Pavel Begunkov8662dae2021-01-19 13:32:44 +00005517static inline enum hrtimer_mode io_translate_timeout_mode(unsigned int flags)
5518{
5519 return (flags & IORING_TIMEOUT_ABS) ? HRTIMER_MODE_ABS
5520 : HRTIMER_MODE_REL;
5521}
5522
Jens Axboe11365042019-10-16 09:08:32 -06005523/*
5524 * Remove or update an existing timeout command
5525 */
Pavel Begunkov61e98202021-02-10 00:03:08 +00005526static int io_timeout_remove(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe11365042019-10-16 09:08:32 -06005527{
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +00005528 struct io_timeout_rem *tr = &req->timeout_rem;
Jens Axboe11365042019-10-16 09:08:32 -06005529 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe47f46762019-11-09 17:43:02 -07005530 int ret;
Jens Axboe11365042019-10-16 09:08:32 -06005531
Jens Axboe11365042019-10-16 09:08:32 -06005532 spin_lock_irq(&ctx->completion_lock);
Pavel Begunkov8662dae2021-01-19 13:32:44 +00005533 if (!(req->timeout_rem.flags & IORING_TIMEOUT_UPDATE))
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +00005534 ret = io_timeout_cancel(ctx, tr->addr);
Pavel Begunkov8662dae2021-01-19 13:32:44 +00005535 else
5536 ret = io_timeout_update(ctx, tr->addr, &tr->ts,
5537 io_translate_timeout_mode(tr->flags));
Jens Axboe11365042019-10-16 09:08:32 -06005538
Jens Axboe47f46762019-11-09 17:43:02 -07005539 io_cqring_fill_event(req, ret);
Jens Axboe11365042019-10-16 09:08:32 -06005540 io_commit_cqring(ctx);
5541 spin_unlock_irq(&ctx->completion_lock);
Jens Axboe5262f562019-09-17 12:26:57 -06005542 io_cqring_ev_posted(ctx);
Jens Axboe4e88d6e2019-12-07 20:59:47 -07005543 if (ret < 0)
5544 req_set_fail_links(req);
Jackie Liuec9c02a2019-11-08 23:50:36 +08005545 io_put_req(req);
Jens Axboe11365042019-10-16 09:08:32 -06005546 return 0;
Jens Axboe5262f562019-09-17 12:26:57 -06005547}
5548
Jens Axboe3529d8c2019-12-19 18:24:38 -07005549static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
Jens Axboe2d283902019-12-04 11:08:05 -07005550 bool is_timeout_link)
Jens Axboe5262f562019-09-17 12:26:57 -06005551{
Jens Axboead8a48a2019-11-15 08:49:11 -07005552 struct io_timeout_data *data;
Jens Axboea41525a2019-10-15 16:48:15 -06005553 unsigned flags;
Pavel Begunkov56080b02020-05-26 20:34:04 +03005554 u32 off = READ_ONCE(sqe->off);
Jens Axboe5262f562019-09-17 12:26:57 -06005555
Jens Axboead8a48a2019-11-15 08:49:11 -07005556 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboe5262f562019-09-17 12:26:57 -06005557 return -EINVAL;
Jens Axboead8a48a2019-11-15 08:49:11 -07005558 if (sqe->ioprio || sqe->buf_index || sqe->len != 1)
Jens Axboea41525a2019-10-15 16:48:15 -06005559 return -EINVAL;
Pavel Begunkov56080b02020-05-26 20:34:04 +03005560 if (off && is_timeout_link)
Jens Axboe2d283902019-12-04 11:08:05 -07005561 return -EINVAL;
Jens Axboea41525a2019-10-15 16:48:15 -06005562 flags = READ_ONCE(sqe->timeout_flags);
5563 if (flags & ~IORING_TIMEOUT_ABS)
Jens Axboe5262f562019-09-17 12:26:57 -06005564 return -EINVAL;
Arnd Bergmannbdf20072019-10-01 09:53:29 -06005565
Pavel Begunkovbfe68a22020-05-30 14:54:18 +03005566 req->timeout.off = off;
Jens Axboe26a61672019-12-20 09:02:01 -07005567
Jens Axboee8c2bc12020-08-15 18:44:09 -07005568 if (!req->async_data && io_alloc_async_data(req))
Jens Axboe26a61672019-12-20 09:02:01 -07005569 return -ENOMEM;
5570
Jens Axboee8c2bc12020-08-15 18:44:09 -07005571 data = req->async_data;
Jens Axboead8a48a2019-11-15 08:49:11 -07005572 data->req = req;
Jens Axboead8a48a2019-11-15 08:49:11 -07005573
5574 if (get_timespec64(&data->ts, u64_to_user_ptr(sqe->addr)))
Jens Axboe5262f562019-09-17 12:26:57 -06005575 return -EFAULT;
5576
Pavel Begunkov8662dae2021-01-19 13:32:44 +00005577 data->mode = io_translate_timeout_mode(flags);
Jens Axboead8a48a2019-11-15 08:49:11 -07005578 hrtimer_init(&data->timer, CLOCK_MONOTONIC, data->mode);
Pavel Begunkov2482b582021-03-25 18:32:44 +00005579 if (is_timeout_link)
5580 io_req_track_inflight(req);
Jens Axboead8a48a2019-11-15 08:49:11 -07005581 return 0;
5582}
5583
Pavel Begunkov61e98202021-02-10 00:03:08 +00005584static int io_timeout(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboead8a48a2019-11-15 08:49:11 -07005585{
Jens Axboead8a48a2019-11-15 08:49:11 -07005586 struct io_ring_ctx *ctx = req->ctx;
Jens Axboee8c2bc12020-08-15 18:44:09 -07005587 struct io_timeout_data *data = req->async_data;
Jens Axboead8a48a2019-11-15 08:49:11 -07005588 struct list_head *entry;
Pavel Begunkovbfe68a22020-05-30 14:54:18 +03005589 u32 tail, off = req->timeout.off;
Jens Axboead8a48a2019-11-15 08:49:11 -07005590
Pavel Begunkov733f5c92020-05-26 20:34:03 +03005591 spin_lock_irq(&ctx->completion_lock);
Jens Axboe93bd25b2019-11-11 23:34:31 -07005592
Jens Axboe5262f562019-09-17 12:26:57 -06005593 /*
5594 * sqe->off holds how many events that need to occur for this
Jens Axboe93bd25b2019-11-11 23:34:31 -07005595 * timeout event to be satisfied. If it isn't set, then this is
5596 * a pure timeout request, sequence isn't used.
Jens Axboe5262f562019-09-17 12:26:57 -06005597 */
Pavel Begunkov8eb7e2d2020-06-29 13:13:02 +03005598 if (io_is_timeout_noseq(req)) {
Jens Axboe93bd25b2019-11-11 23:34:31 -07005599 entry = ctx->timeout_list.prev;
5600 goto add;
5601 }
Jens Axboe5262f562019-09-17 12:26:57 -06005602
Pavel Begunkovbfe68a22020-05-30 14:54:18 +03005603 tail = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
5604 req->timeout.target_seq = tail + off;
Jens Axboe5262f562019-09-17 12:26:57 -06005605
Marcelo Diop-Gonzalezf0105052021-01-15 11:54:40 -05005606 /* Update the last seq here in case io_flush_timeouts() hasn't.
5607 * This is safe because ->completion_lock is held, and submissions
5608 * and completions are never mixed in the same ->completion_lock section.
5609 */
5610 ctx->cq_last_tm_flush = tail;
5611
Jens Axboe5262f562019-09-17 12:26:57 -06005612 /*
5613 * Insertion sort, ensuring the first entry in the list is always
5614 * the one we need first.
5615 */
Jens Axboe5262f562019-09-17 12:26:57 -06005616 list_for_each_prev(entry, &ctx->timeout_list) {
Pavel Begunkov135fcde2020-07-13 23:37:12 +03005617 struct io_kiocb *nxt = list_entry(entry, struct io_kiocb,
5618 timeout.list);
Jens Axboe5262f562019-09-17 12:26:57 -06005619
Pavel Begunkov8eb7e2d2020-06-29 13:13:02 +03005620 if (io_is_timeout_noseq(nxt))
Jens Axboe93bd25b2019-11-11 23:34:31 -07005621 continue;
Pavel Begunkovbfe68a22020-05-30 14:54:18 +03005622 /* nxt.seq is behind @tail, otherwise would've been completed */
5623 if (off >= nxt->timeout.target_seq - tail)
Jens Axboe5262f562019-09-17 12:26:57 -06005624 break;
5625 }
Jens Axboe93bd25b2019-11-11 23:34:31 -07005626add:
Pavel Begunkov135fcde2020-07-13 23:37:12 +03005627 list_add(&req->timeout.list, entry);
Jens Axboead8a48a2019-11-15 08:49:11 -07005628 data->timer.function = io_timeout_fn;
5629 hrtimer_start(&data->timer, timespec64_to_ktime(data->ts), data->mode);
Jens Axboe842f9612019-10-29 12:34:10 -06005630 spin_unlock_irq(&ctx->completion_lock);
Jens Axboe5262f562019-09-17 12:26:57 -06005631 return 0;
5632}
5633
Pavel Begunkovf458dd842021-03-08 12:14:14 +00005634struct io_cancel_data {
5635 struct io_ring_ctx *ctx;
5636 u64 user_data;
5637};
5638
Jens Axboe62755e32019-10-28 21:49:21 -06005639static bool io_cancel_cb(struct io_wq_work *work, void *data)
Jens Axboede0617e2019-04-06 21:51:27 -06005640{
Jens Axboe62755e32019-10-28 21:49:21 -06005641 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
Pavel Begunkovf458dd842021-03-08 12:14:14 +00005642 struct io_cancel_data *cd = data;
Jens Axboede0617e2019-04-06 21:51:27 -06005643
Pavel Begunkovf458dd842021-03-08 12:14:14 +00005644 return req->ctx == cd->ctx && req->user_data == cd->user_data;
Jens Axboe62755e32019-10-28 21:49:21 -06005645}
5646
Pavel Begunkovf458dd842021-03-08 12:14:14 +00005647static int io_async_cancel_one(struct io_uring_task *tctx, u64 user_data,
5648 struct io_ring_ctx *ctx)
Jens Axboe62755e32019-10-28 21:49:21 -06005649{
Pavel Begunkovf458dd842021-03-08 12:14:14 +00005650 struct io_cancel_data data = { .ctx = ctx, .user_data = user_data, };
Jens Axboe62755e32019-10-28 21:49:21 -06005651 enum io_wq_cancel cancel_ret;
Jens Axboe62755e32019-10-28 21:49:21 -06005652 int ret = 0;
5653
Pavel Begunkovf458dd842021-03-08 12:14:14 +00005654 if (!tctx || !tctx->io_wq)
Jens Axboe5aa75ed2021-02-16 12:56:50 -07005655 return -ENOENT;
5656
Pavel Begunkovf458dd842021-03-08 12:14:14 +00005657 cancel_ret = io_wq_cancel_cb(tctx->io_wq, io_cancel_cb, &data, false);
Jens Axboe62755e32019-10-28 21:49:21 -06005658 switch (cancel_ret) {
5659 case IO_WQ_CANCEL_OK:
5660 ret = 0;
5661 break;
5662 case IO_WQ_CANCEL_RUNNING:
5663 ret = -EALREADY;
5664 break;
5665 case IO_WQ_CANCEL_NOTFOUND:
5666 ret = -ENOENT;
5667 break;
5668 }
5669
Jens Axboee977d6d2019-11-05 12:39:45 -07005670 return ret;
5671}
5672
Jens Axboe47f46762019-11-09 17:43:02 -07005673static void io_async_find_and_cancel(struct io_ring_ctx *ctx,
5674 struct io_kiocb *req, __u64 sqe_addr,
Pavel Begunkov014db002020-03-03 21:33:12 +03005675 int success_ret)
Jens Axboe47f46762019-11-09 17:43:02 -07005676{
5677 unsigned long flags;
5678 int ret;
5679
Pavel Begunkovf458dd842021-03-08 12:14:14 +00005680 ret = io_async_cancel_one(req->task->io_uring, sqe_addr, ctx);
Jens Axboe47f46762019-11-09 17:43:02 -07005681 if (ret != -ENOENT) {
5682 spin_lock_irqsave(&ctx->completion_lock, flags);
5683 goto done;
5684 }
5685
5686 spin_lock_irqsave(&ctx->completion_lock, flags);
5687 ret = io_timeout_cancel(ctx, sqe_addr);
5688 if (ret != -ENOENT)
5689 goto done;
5690 ret = io_poll_cancel(ctx, sqe_addr);
5691done:
Jens Axboeb0dd8a42019-11-18 12:14:54 -07005692 if (!ret)
5693 ret = success_ret;
Jens Axboe47f46762019-11-09 17:43:02 -07005694 io_cqring_fill_event(req, ret);
5695 io_commit_cqring(ctx);
5696 spin_unlock_irqrestore(&ctx->completion_lock, flags);
5697 io_cqring_ev_posted(ctx);
5698
Jens Axboe4e88d6e2019-12-07 20:59:47 -07005699 if (ret < 0)
5700 req_set_fail_links(req);
Pavel Begunkov014db002020-03-03 21:33:12 +03005701 io_put_req(req);
Jens Axboe47f46762019-11-09 17:43:02 -07005702}
5703
Jens Axboe3529d8c2019-12-19 18:24:38 -07005704static int io_async_cancel_prep(struct io_kiocb *req,
5705 const struct io_uring_sqe *sqe)
Jens Axboee977d6d2019-11-05 12:39:45 -07005706{
Jens Axboefbf23842019-12-17 18:45:56 -07005707 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboee977d6d2019-11-05 12:39:45 -07005708 return -EINVAL;
Daniele Albano61710e42020-07-18 14:15:16 -06005709 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
5710 return -EINVAL;
5711 if (sqe->ioprio || sqe->off || sqe->len || sqe->cancel_flags)
Jens Axboee977d6d2019-11-05 12:39:45 -07005712 return -EINVAL;
5713
Jens Axboefbf23842019-12-17 18:45:56 -07005714 req->cancel.addr = READ_ONCE(sqe->addr);
5715 return 0;
5716}
5717
Pavel Begunkov61e98202021-02-10 00:03:08 +00005718static int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboefbf23842019-12-17 18:45:56 -07005719{
5720 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkov58f99372021-03-12 16:25:55 +00005721 u64 sqe_addr = req->cancel.addr;
5722 struct io_tctx_node *node;
5723 int ret;
Jens Axboefbf23842019-12-17 18:45:56 -07005724
Pavel Begunkov58f99372021-03-12 16:25:55 +00005725 /* tasks should wait for their io-wq threads, so safe w/o sync */
5726 ret = io_async_cancel_one(req->task->io_uring, sqe_addr, ctx);
5727 spin_lock_irq(&ctx->completion_lock);
5728 if (ret != -ENOENT)
5729 goto done;
5730 ret = io_timeout_cancel(ctx, sqe_addr);
5731 if (ret != -ENOENT)
5732 goto done;
5733 ret = io_poll_cancel(ctx, sqe_addr);
5734 if (ret != -ENOENT)
5735 goto done;
5736 spin_unlock_irq(&ctx->completion_lock);
5737
5738 /* slow path, try all io-wq's */
5739 io_ring_submit_lock(ctx, !(issue_flags & IO_URING_F_NONBLOCK));
5740 ret = -ENOENT;
5741 list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
5742 struct io_uring_task *tctx = node->task->io_uring;
5743
5744 if (!tctx || !tctx->io_wq)
5745 continue;
5746 ret = io_async_cancel_one(tctx, req->cancel.addr, ctx);
5747 if (ret != -ENOENT)
5748 break;
5749 }
5750 io_ring_submit_unlock(ctx, !(issue_flags & IO_URING_F_NONBLOCK));
5751
5752 spin_lock_irq(&ctx->completion_lock);
5753done:
5754 io_cqring_fill_event(req, ret);
5755 io_commit_cqring(ctx);
5756 spin_unlock_irq(&ctx->completion_lock);
5757 io_cqring_ev_posted(ctx);
5758
5759 if (ret < 0)
5760 req_set_fail_links(req);
5761 io_put_req(req);
Jens Axboe62755e32019-10-28 21:49:21 -06005762 return 0;
5763}
5764
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00005765static int io_rsrc_update_prep(struct io_kiocb *req,
Jens Axboe05f3fb32019-12-09 11:22:50 -07005766 const struct io_uring_sqe *sqe)
5767{
Jens Axboe6ca56f82020-09-18 16:51:19 -06005768 if (unlikely(req->ctx->flags & IORING_SETUP_SQPOLL))
5769 return -EINVAL;
Daniele Albano61710e42020-07-18 14:15:16 -06005770 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
5771 return -EINVAL;
5772 if (sqe->ioprio || sqe->rw_flags)
Jens Axboe05f3fb32019-12-09 11:22:50 -07005773 return -EINVAL;
5774
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00005775 req->rsrc_update.offset = READ_ONCE(sqe->off);
5776 req->rsrc_update.nr_args = READ_ONCE(sqe->len);
5777 if (!req->rsrc_update.nr_args)
Jens Axboe05f3fb32019-12-09 11:22:50 -07005778 return -EINVAL;
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00005779 req->rsrc_update.arg = READ_ONCE(sqe->addr);
Jens Axboe05f3fb32019-12-09 11:22:50 -07005780 return 0;
5781}
5782
Pavel Begunkov889fca72021-02-10 00:03:09 +00005783static int io_files_update(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe05f3fb32019-12-09 11:22:50 -07005784{
5785 struct io_ring_ctx *ctx = req->ctx;
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00005786 struct io_uring_rsrc_update up;
Jens Axboe05f3fb32019-12-09 11:22:50 -07005787 int ret;
5788
Pavel Begunkov45d189c2021-02-10 00:03:07 +00005789 if (issue_flags & IO_URING_F_NONBLOCK)
Jens Axboe05f3fb32019-12-09 11:22:50 -07005790 return -EAGAIN;
Jens Axboe05f3fb32019-12-09 11:22:50 -07005791
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00005792 up.offset = req->rsrc_update.offset;
5793 up.data = req->rsrc_update.arg;
Jens Axboe05f3fb32019-12-09 11:22:50 -07005794
5795 mutex_lock(&ctx->uring_lock);
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00005796 ret = __io_sqe_files_update(ctx, &up, req->rsrc_update.nr_args);
Jens Axboe05f3fb32019-12-09 11:22:50 -07005797 mutex_unlock(&ctx->uring_lock);
5798
5799 if (ret < 0)
5800 req_set_fail_links(req);
Pavel Begunkov889fca72021-02-10 00:03:09 +00005801 __io_req_complete(req, issue_flags, ret, 0);
Jens Axboe05f3fb32019-12-09 11:22:50 -07005802 return 0;
5803}
5804
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005805static int io_req_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboef67676d2019-12-02 11:03:47 -07005806{
Jens Axboed625c6e2019-12-17 19:53:05 -07005807 switch (req->opcode) {
Jens Axboee7815732019-12-17 19:45:06 -07005808 case IORING_OP_NOP:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005809 return 0;
Jens Axboef67676d2019-12-02 11:03:47 -07005810 case IORING_OP_READV:
5811 case IORING_OP_READ_FIXED:
Jens Axboe3a6820f2019-12-22 15:19:35 -07005812 case IORING_OP_READ:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005813 return io_read_prep(req, sqe);
Jens Axboef67676d2019-12-02 11:03:47 -07005814 case IORING_OP_WRITEV:
5815 case IORING_OP_WRITE_FIXED:
Jens Axboe3a6820f2019-12-22 15:19:35 -07005816 case IORING_OP_WRITE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005817 return io_write_prep(req, sqe);
Jens Axboe0969e782019-12-17 18:40:57 -07005818 case IORING_OP_POLL_ADD:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005819 return io_poll_add_prep(req, sqe);
Jens Axboe0969e782019-12-17 18:40:57 -07005820 case IORING_OP_POLL_REMOVE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005821 return io_poll_remove_prep(req, sqe);
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07005822 case IORING_OP_FSYNC:
Pavel Begunkov1155c762021-02-18 18:29:38 +00005823 return io_fsync_prep(req, sqe);
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07005824 case IORING_OP_SYNC_FILE_RANGE:
Pavel Begunkov1155c762021-02-18 18:29:38 +00005825 return io_sfr_prep(req, sqe);
Jens Axboe03b12302019-12-02 18:50:25 -07005826 case IORING_OP_SENDMSG:
Jens Axboefddafac2020-01-04 20:19:44 -07005827 case IORING_OP_SEND:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005828 return io_sendmsg_prep(req, sqe);
Jens Axboe03b12302019-12-02 18:50:25 -07005829 case IORING_OP_RECVMSG:
Jens Axboefddafac2020-01-04 20:19:44 -07005830 case IORING_OP_RECV:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005831 return io_recvmsg_prep(req, sqe);
Jens Axboef499a022019-12-02 16:28:46 -07005832 case IORING_OP_CONNECT:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005833 return io_connect_prep(req, sqe);
Jens Axboe2d283902019-12-04 11:08:05 -07005834 case IORING_OP_TIMEOUT:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005835 return io_timeout_prep(req, sqe, false);
Jens Axboeb29472e2019-12-17 18:50:29 -07005836 case IORING_OP_TIMEOUT_REMOVE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005837 return io_timeout_remove_prep(req, sqe);
Jens Axboefbf23842019-12-17 18:45:56 -07005838 case IORING_OP_ASYNC_CANCEL:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005839 return io_async_cancel_prep(req, sqe);
Jens Axboe2d283902019-12-04 11:08:05 -07005840 case IORING_OP_LINK_TIMEOUT:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005841 return io_timeout_prep(req, sqe, true);
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07005842 case IORING_OP_ACCEPT:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005843 return io_accept_prep(req, sqe);
Jens Axboed63d1b52019-12-10 10:38:56 -07005844 case IORING_OP_FALLOCATE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005845 return io_fallocate_prep(req, sqe);
Jens Axboe15b71ab2019-12-11 11:20:36 -07005846 case IORING_OP_OPENAT:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005847 return io_openat_prep(req, sqe);
Jens Axboeb5dba592019-12-11 14:02:38 -07005848 case IORING_OP_CLOSE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005849 return io_close_prep(req, sqe);
Jens Axboe05f3fb32019-12-09 11:22:50 -07005850 case IORING_OP_FILES_UPDATE:
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00005851 return io_rsrc_update_prep(req, sqe);
Jens Axboeeddc7ef2019-12-13 21:18:10 -07005852 case IORING_OP_STATX:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005853 return io_statx_prep(req, sqe);
Jens Axboe4840e412019-12-25 22:03:45 -07005854 case IORING_OP_FADVISE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005855 return io_fadvise_prep(req, sqe);
Jens Axboec1ca7572019-12-25 22:18:28 -07005856 case IORING_OP_MADVISE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005857 return io_madvise_prep(req, sqe);
Jens Axboecebdb982020-01-08 17:59:24 -07005858 case IORING_OP_OPENAT2:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005859 return io_openat2_prep(req, sqe);
Jens Axboe3e4827b2020-01-08 15:18:09 -07005860 case IORING_OP_EPOLL_CTL:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005861 return io_epoll_ctl_prep(req, sqe);
Pavel Begunkov7d67af22020-02-24 11:32:45 +03005862 case IORING_OP_SPLICE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005863 return io_splice_prep(req, sqe);
Jens Axboeddf0322d2020-02-23 16:41:33 -07005864 case IORING_OP_PROVIDE_BUFFERS:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005865 return io_provide_buffers_prep(req, sqe);
Jens Axboe067524e2020-03-02 16:32:28 -07005866 case IORING_OP_REMOVE_BUFFERS:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005867 return io_remove_buffers_prep(req, sqe);
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03005868 case IORING_OP_TEE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005869 return io_tee_prep(req, sqe);
Jens Axboe36f4fa62020-09-05 11:14:22 -06005870 case IORING_OP_SHUTDOWN:
5871 return io_shutdown_prep(req, sqe);
Jens Axboe80a261f2020-09-28 14:23:58 -06005872 case IORING_OP_RENAMEAT:
5873 return io_renameat_prep(req, sqe);
Jens Axboe14a11432020-09-28 14:27:37 -06005874 case IORING_OP_UNLINKAT:
5875 return io_unlinkat_prep(req, sqe);
Jens Axboef67676d2019-12-02 11:03:47 -07005876 }
5877
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005878 printk_once(KERN_WARNING "io_uring: unhandled opcode %d\n",
5879 req->opcode);
5880 return-EINVAL;
5881}
5882
Pavel Begunkov93642ef2021-02-18 18:29:44 +00005883static int io_req_prep_async(struct io_kiocb *req)
Jens Axboedef596e2019-01-09 08:59:42 -07005884{
Pavel Begunkov93642ef2021-02-18 18:29:44 +00005885 switch (req->opcode) {
5886 case IORING_OP_READV:
5887 case IORING_OP_READ_FIXED:
5888 case IORING_OP_READ:
5889 return io_rw_prep_async(req, READ);
5890 case IORING_OP_WRITEV:
5891 case IORING_OP_WRITE_FIXED:
5892 case IORING_OP_WRITE:
5893 return io_rw_prep_async(req, WRITE);
5894 case IORING_OP_SENDMSG:
5895 case IORING_OP_SEND:
5896 return io_sendmsg_prep_async(req);
5897 case IORING_OP_RECVMSG:
5898 case IORING_OP_RECV:
5899 return io_recvmsg_prep_async(req);
5900 case IORING_OP_CONNECT:
5901 return io_connect_prep_async(req);
5902 }
5903 return 0;
5904}
5905
Pavel Begunkovbe7053b2021-02-18 18:29:45 +00005906static int io_req_defer_prep(struct io_kiocb *req)
Jens Axboedef596e2019-01-09 08:59:42 -07005907{
Pavel Begunkovbe7053b2021-02-18 18:29:45 +00005908 if (!io_op_defs[req->opcode].needs_async_data)
Jens Axboe2b188cc2019-01-07 10:46:33 -07005909 return 0;
Pavel Begunkovbe7053b2021-02-18 18:29:45 +00005910 /* some opcodes init it during the inital prep */
Pavel Begunkov93642ef2021-02-18 18:29:44 +00005911 if (req->async_data)
Pavel Begunkovbe7053b2021-02-18 18:29:45 +00005912 return 0;
5913 if (__io_alloc_async_data(req))
Jens Axboeb76da702019-11-20 13:05:32 -07005914 return -EAGAIN;
Pavel Begunkovbe7053b2021-02-18 18:29:45 +00005915 return io_req_prep_async(req);
Jens Axboe2b188cc2019-01-07 10:46:33 -07005916}
5917
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03005918static u32 io_get_sequence(struct io_kiocb *req)
5919{
5920 struct io_kiocb *pos;
5921 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkovf2f87372020-10-27 23:25:37 +00005922 u32 total_submitted, nr_reqs = 0;
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03005923
Pavel Begunkovf2f87372020-10-27 23:25:37 +00005924 io_for_each_link(pos, req)
5925 nr_reqs++;
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03005926
5927 total_submitted = ctx->cached_sq_head - ctx->cached_sq_dropped;
5928 return total_submitted - nr_reqs;
5929}
5930
Pavel Begunkovbe7053b2021-02-18 18:29:45 +00005931static int io_req_defer(struct io_kiocb *req)
Jens Axboe2b188cc2019-01-07 10:46:33 -07005932{
5933 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkov27dc8332020-07-13 23:37:14 +03005934 struct io_defer_entry *de;
Jens Axboe2b188cc2019-01-07 10:46:33 -07005935 int ret;
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03005936 u32 seq;
Jens Axboe2b188cc2019-01-07 10:46:33 -07005937
5938 /* Still need defer if there is pending req in defer list. */
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03005939 if (likely(list_empty_careful(&ctx->defer_list) &&
5940 !(req->flags & REQ_F_IO_DRAIN)))
5941 return 0;
5942
5943 seq = io_get_sequence(req);
5944 /* Still a chance to pass the sequence check */
5945 if (!req_need_defer(req, seq) && list_empty_careful(&ctx->defer_list))
Jens Axboe2b188cc2019-01-07 10:46:33 -07005946 return 0;
5947
Pavel Begunkovbe7053b2021-02-18 18:29:45 +00005948 ret = io_req_defer_prep(req);
5949 if (ret)
5950 return ret;
Pavel Begunkovcbdcb432020-06-29 19:18:43 +03005951 io_prep_async_link(req);
Pavel Begunkov27dc8332020-07-13 23:37:14 +03005952 de = kmalloc(sizeof(*de), GFP_KERNEL);
5953 if (!de)
5954 return -ENOMEM;
Jens Axboe31b51512019-01-18 22:56:34 -07005955
5956 spin_lock_irq(&ctx->completion_lock);
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03005957 if (!req_need_defer(req, seq) && list_empty(&ctx->defer_list)) {
Jens Axboe31b51512019-01-18 22:56:34 -07005958 spin_unlock_irq(&ctx->completion_lock);
Pavel Begunkov27dc8332020-07-13 23:37:14 +03005959 kfree(de);
Pavel Begunkovae348172020-07-23 20:25:20 +03005960 io_queue_async_work(req);
5961 return -EIOCBQUEUED;
Jens Axboe31b51512019-01-18 22:56:34 -07005962 }
5963
5964 trace_io_uring_defer(ctx, req, req->user_data);
Pavel Begunkov27dc8332020-07-13 23:37:14 +03005965 de->req = req;
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03005966 de->seq = seq;
Pavel Begunkov27dc8332020-07-13 23:37:14 +03005967 list_add_tail(&de->list, &ctx->defer_list);
Jens Axboe31b51512019-01-18 22:56:34 -07005968 spin_unlock_irq(&ctx->completion_lock);
5969 return -EIOCBQUEUED;
5970}
Jens Axboeedafcce2019-01-09 09:16:05 -07005971
Pavel Begunkov3ca405e2020-07-13 23:37:08 +03005972static void __io_clean_op(struct io_kiocb *req)
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03005973{
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03005974 if (req->flags & REQ_F_BUFFER_SELECTED) {
5975 switch (req->opcode) {
5976 case IORING_OP_READV:
5977 case IORING_OP_READ_FIXED:
5978 case IORING_OP_READ:
Jens Axboebcda7ba2020-02-23 16:42:51 -07005979 kfree((void *)(unsigned long)req->rw.addr);
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03005980 break;
5981 case IORING_OP_RECVMSG:
5982 case IORING_OP_RECV:
Jens Axboe52de1fe2020-02-27 10:15:42 -07005983 kfree(req->sr_msg.kbuf);
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03005984 break;
5985 }
5986 req->flags &= ~REQ_F_BUFFER_SELECTED;
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03005987 }
5988
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03005989 if (req->flags & REQ_F_NEED_CLEANUP) {
5990 switch (req->opcode) {
5991 case IORING_OP_READV:
5992 case IORING_OP_READ_FIXED:
5993 case IORING_OP_READ:
5994 case IORING_OP_WRITEV:
5995 case IORING_OP_WRITE_FIXED:
Jens Axboee8c2bc12020-08-15 18:44:09 -07005996 case IORING_OP_WRITE: {
5997 struct io_async_rw *io = req->async_data;
5998 if (io->free_iovec)
5999 kfree(io->free_iovec);
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03006000 break;
Jens Axboee8c2bc12020-08-15 18:44:09 -07006001 }
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03006002 case IORING_OP_RECVMSG:
Jens Axboee8c2bc12020-08-15 18:44:09 -07006003 case IORING_OP_SENDMSG: {
6004 struct io_async_msghdr *io = req->async_data;
Pavel Begunkov257e84a2021-02-05 00:58:00 +00006005
6006 kfree(io->free_iov);
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03006007 break;
Jens Axboee8c2bc12020-08-15 18:44:09 -07006008 }
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03006009 case IORING_OP_SPLICE:
6010 case IORING_OP_TEE:
6011 io_put_file(req, req->splice.file_in,
6012 (req->splice.flags & SPLICE_F_FD_IN_FIXED));
6013 break;
Jens Axboef3cd48502020-09-24 14:55:54 -06006014 case IORING_OP_OPENAT:
6015 case IORING_OP_OPENAT2:
6016 if (req->open.filename)
6017 putname(req->open.filename);
6018 break;
Jens Axboe80a261f2020-09-28 14:23:58 -06006019 case IORING_OP_RENAMEAT:
6020 putname(req->rename.oldpath);
6021 putname(req->rename.newpath);
6022 break;
Jens Axboe14a11432020-09-28 14:27:37 -06006023 case IORING_OP_UNLINKAT:
6024 putname(req->unlink.filename);
6025 break;
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03006026 }
6027 req->flags &= ~REQ_F_NEED_CLEANUP;
6028 }
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03006029}
6030
Pavel Begunkov889fca72021-02-10 00:03:09 +00006031static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboeedafcce2019-01-09 09:16:05 -07006032{
Jens Axboeedafcce2019-01-09 09:16:05 -07006033 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe5730b272021-02-27 15:57:30 -07006034 const struct cred *creds = NULL;
Jens Axboed625c6e2019-12-17 19:53:05 -07006035 int ret;
Jens Axboeedafcce2019-01-09 09:16:05 -07006036
Jens Axboe003e8dc2021-03-06 09:22:27 -07006037 if (req->work.creds && req->work.creds != current_cred())
6038 creds = override_creds(req->work.creds);
Jens Axboe5730b272021-02-27 15:57:30 -07006039
Jens Axboed625c6e2019-12-17 19:53:05 -07006040 switch (req->opcode) {
Jens Axboe2b188cc2019-01-07 10:46:33 -07006041 case IORING_OP_NOP:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006042 ret = io_nop(req, issue_flags);
Jens Axboe31b51512019-01-18 22:56:34 -07006043 break;
6044 case IORING_OP_READV:
Jens Axboe3529d8c2019-12-19 18:24:38 -07006045 case IORING_OP_READ_FIXED:
Jens Axboe3a6820f2019-12-22 15:19:35 -07006046 case IORING_OP_READ:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006047 ret = io_read(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006048 break;
6049 case IORING_OP_WRITEV:
Jens Axboe2b188cc2019-01-07 10:46:33 -07006050 case IORING_OP_WRITE_FIXED:
Jens Axboe3a6820f2019-12-22 15:19:35 -07006051 case IORING_OP_WRITE:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006052 ret = io_write(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006053 break;
6054 case IORING_OP_FSYNC:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006055 ret = io_fsync(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006056 break;
6057 case IORING_OP_POLL_ADD:
Pavel Begunkov61e98202021-02-10 00:03:08 +00006058 ret = io_poll_add(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006059 break;
6060 case IORING_OP_POLL_REMOVE:
Pavel Begunkov61e98202021-02-10 00:03:08 +00006061 ret = io_poll_remove(req, issue_flags);
Jens Axboeb76da702019-11-20 13:05:32 -07006062 break;
6063 case IORING_OP_SYNC_FILE_RANGE:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006064 ret = io_sync_file_range(req, issue_flags);
Jens Axboeb76da702019-11-20 13:05:32 -07006065 break;
6066 case IORING_OP_SENDMSG:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006067 ret = io_sendmsg(req, issue_flags);
Pavel Begunkov062d04d2020-10-10 18:34:12 +01006068 break;
Jens Axboefddafac2020-01-04 20:19:44 -07006069 case IORING_OP_SEND:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006070 ret = io_send(req, issue_flags);
Jens Axboeb76da702019-11-20 13:05:32 -07006071 break;
6072 case IORING_OP_RECVMSG:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006073 ret = io_recvmsg(req, issue_flags);
Pavel Begunkov062d04d2020-10-10 18:34:12 +01006074 break;
Jens Axboefddafac2020-01-04 20:19:44 -07006075 case IORING_OP_RECV:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006076 ret = io_recv(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006077 break;
6078 case IORING_OP_TIMEOUT:
Pavel Begunkov61e98202021-02-10 00:03:08 +00006079 ret = io_timeout(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006080 break;
6081 case IORING_OP_TIMEOUT_REMOVE:
Pavel Begunkov61e98202021-02-10 00:03:08 +00006082 ret = io_timeout_remove(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006083 break;
6084 case IORING_OP_ACCEPT:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006085 ret = io_accept(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006086 break;
6087 case IORING_OP_CONNECT:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006088 ret = io_connect(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006089 break;
6090 case IORING_OP_ASYNC_CANCEL:
Pavel Begunkov61e98202021-02-10 00:03:08 +00006091 ret = io_async_cancel(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006092 break;
Jens Axboed63d1b52019-12-10 10:38:56 -07006093 case IORING_OP_FALLOCATE:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006094 ret = io_fallocate(req, issue_flags);
Jens Axboed63d1b52019-12-10 10:38:56 -07006095 break;
Jens Axboe15b71ab2019-12-11 11:20:36 -07006096 case IORING_OP_OPENAT:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006097 ret = io_openat(req, issue_flags);
Jens Axboe15b71ab2019-12-11 11:20:36 -07006098 break;
Jens Axboeb5dba592019-12-11 14:02:38 -07006099 case IORING_OP_CLOSE:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006100 ret = io_close(req, issue_flags);
Jens Axboeb5dba592019-12-11 14:02:38 -07006101 break;
Jens Axboe05f3fb32019-12-09 11:22:50 -07006102 case IORING_OP_FILES_UPDATE:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006103 ret = io_files_update(req, issue_flags);
Jens Axboe05f3fb32019-12-09 11:22:50 -07006104 break;
Jens Axboeeddc7ef2019-12-13 21:18:10 -07006105 case IORING_OP_STATX:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006106 ret = io_statx(req, issue_flags);
Jens Axboeeddc7ef2019-12-13 21:18:10 -07006107 break;
Jens Axboe4840e412019-12-25 22:03:45 -07006108 case IORING_OP_FADVISE:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006109 ret = io_fadvise(req, issue_flags);
Jens Axboe4840e412019-12-25 22:03:45 -07006110 break;
Jens Axboec1ca7572019-12-25 22:18:28 -07006111 case IORING_OP_MADVISE:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006112 ret = io_madvise(req, issue_flags);
Jens Axboec1ca7572019-12-25 22:18:28 -07006113 break;
Jens Axboecebdb982020-01-08 17:59:24 -07006114 case IORING_OP_OPENAT2:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006115 ret = io_openat2(req, issue_flags);
Jens Axboecebdb982020-01-08 17:59:24 -07006116 break;
Jens Axboe3e4827b2020-01-08 15:18:09 -07006117 case IORING_OP_EPOLL_CTL:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006118 ret = io_epoll_ctl(req, issue_flags);
Jens Axboe3e4827b2020-01-08 15:18:09 -07006119 break;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03006120 case IORING_OP_SPLICE:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006121 ret = io_splice(req, issue_flags);
Pavel Begunkov7d67af22020-02-24 11:32:45 +03006122 break;
Jens Axboeddf0322d2020-02-23 16:41:33 -07006123 case IORING_OP_PROVIDE_BUFFERS:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006124 ret = io_provide_buffers(req, issue_flags);
Jens Axboeddf0322d2020-02-23 16:41:33 -07006125 break;
Jens Axboe067524e2020-03-02 16:32:28 -07006126 case IORING_OP_REMOVE_BUFFERS:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006127 ret = io_remove_buffers(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006128 break;
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03006129 case IORING_OP_TEE:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006130 ret = io_tee(req, issue_flags);
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03006131 break;
Jens Axboe36f4fa62020-09-05 11:14:22 -06006132 case IORING_OP_SHUTDOWN:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006133 ret = io_shutdown(req, issue_flags);
Jens Axboe36f4fa62020-09-05 11:14:22 -06006134 break;
Jens Axboe80a261f2020-09-28 14:23:58 -06006135 case IORING_OP_RENAMEAT:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006136 ret = io_renameat(req, issue_flags);
Jens Axboe80a261f2020-09-28 14:23:58 -06006137 break;
Jens Axboe14a11432020-09-28 14:27:37 -06006138 case IORING_OP_UNLINKAT:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006139 ret = io_unlinkat(req, issue_flags);
Jens Axboe14a11432020-09-28 14:27:37 -06006140 break;
Jens Axboe2b188cc2019-01-07 10:46:33 -07006141 default:
6142 ret = -EINVAL;
6143 break;
Jens Axboe31b51512019-01-18 22:56:34 -07006144 }
6145
Jens Axboe5730b272021-02-27 15:57:30 -07006146 if (creds)
6147 revert_creds(creds);
6148
Jens Axboe2b188cc2019-01-07 10:46:33 -07006149 if (ret)
6150 return ret;
6151
Jens Axboeb5325762020-05-19 21:20:27 -06006152 /* If the op doesn't have a file, we're not polling for it */
6153 if ((ctx->flags & IORING_SETUP_IOPOLL) && req->file) {
Jens Axboe11ba8202020-01-15 21:51:17 -07006154 const bool in_async = io_wq_current_is_worker();
6155
Jens Axboe11ba8202020-01-15 21:51:17 -07006156 /* workqueue context doesn't hold uring_lock, grab it now */
6157 if (in_async)
6158 mutex_lock(&ctx->uring_lock);
6159
Xiaoguang Wang2e9dbe92020-11-13 00:44:08 +08006160 io_iopoll_req_issued(req, in_async);
Jens Axboe11ba8202020-01-15 21:51:17 -07006161
6162 if (in_async)
6163 mutex_unlock(&ctx->uring_lock);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006164 }
6165
6166 return 0;
6167}
6168
Pavel Begunkov5280f7e2021-02-04 13:52:08 +00006169static void io_wq_submit_work(struct io_wq_work *work)
Pavel Begunkovd4c81f32020-06-08 21:08:19 +03006170{
Jens Axboe2b188cc2019-01-07 10:46:33 -07006171 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
Pavel Begunkov6df1db62020-07-03 22:15:06 +03006172 struct io_kiocb *timeout;
Jens Axboe561fb042019-10-24 07:25:42 -06006173 int ret = 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07006174
Pavel Begunkov6df1db62020-07-03 22:15:06 +03006175 timeout = io_prep_linked_timeout(req);
6176 if (timeout)
6177 io_queue_linked_timeout(timeout);
Pavel Begunkovd4c81f32020-06-08 21:08:19 +03006178
Jens Axboe4014d942021-01-19 15:53:54 -07006179 if (work->flags & IO_WQ_WORK_CANCEL)
Jens Axboe561fb042019-10-24 07:25:42 -06006180 ret = -ECANCELED;
Jens Axboe31b51512019-01-18 22:56:34 -07006181
Jens Axboe561fb042019-10-24 07:25:42 -06006182 if (!ret) {
Jens Axboe561fb042019-10-24 07:25:42 -06006183 do {
Pavel Begunkov889fca72021-02-10 00:03:09 +00006184 ret = io_issue_sqe(req, 0);
Jens Axboe561fb042019-10-24 07:25:42 -06006185 /*
6186 * We can get EAGAIN for polled IO even though we're
6187 * forcing a sync submission from here, since we can't
6188 * wait for request slots on the block side.
6189 */
6190 if (ret != -EAGAIN)
6191 break;
6192 cond_resched();
6193 } while (1);
6194 }
Jens Axboe31b51512019-01-18 22:56:34 -07006195
Pavel Begunkova3df76982021-02-18 22:32:52 +00006196 /* avoid locking problems by failing it from a clean context */
Jens Axboe561fb042019-10-24 07:25:42 -06006197 if (ret) {
Pavel Begunkova3df76982021-02-18 22:32:52 +00006198 /* io-wq is going to take one down */
6199 refcount_inc(&req->refs);
6200 io_req_task_queue_fail(req, ret);
Jens Axboeedafcce2019-01-09 09:16:05 -07006201 }
Jens Axboe31b51512019-01-18 22:56:34 -07006202}
Jens Axboe2b188cc2019-01-07 10:46:33 -07006203
Pavel Begunkovdafecf12021-02-28 22:35:11 +00006204static inline struct file **io_fixed_file_slot(struct fixed_rsrc_data *file_data,
6205 unsigned i)
Jens Axboe09bb8392019-03-13 12:39:28 -06006206{
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00006207 struct fixed_rsrc_table *table;
Jens Axboe65e19f52019-10-26 07:20:21 -06006208
Pavel Begunkovdafecf12021-02-28 22:35:11 +00006209 table = &file_data->table[i >> IORING_FILE_TABLE_SHIFT];
6210 return &table->files[i & IORING_FILE_TABLE_MASK];
6211}
6212
6213static inline struct file *io_file_from_index(struct io_ring_ctx *ctx,
6214 int index)
6215{
6216 return *io_fixed_file_slot(ctx->file_data, index);
Jens Axboe65e19f52019-10-26 07:20:21 -06006217}
6218
Pavel Begunkov8371adf2020-10-10 18:34:08 +01006219static struct file *io_file_get(struct io_submit_state *state,
6220 struct io_kiocb *req, int fd, bool fixed)
Pavel Begunkov8da11c12020-02-24 11:32:44 +03006221{
6222 struct io_ring_ctx *ctx = req->ctx;
6223 struct file *file;
6224
6225 if (fixed) {
Pavel Begunkov479f5172020-10-10 18:34:07 +01006226 if (unlikely((unsigned int)fd >= ctx->nr_user_files))
Pavel Begunkov8371adf2020-10-10 18:34:08 +01006227 return NULL;
Pavel Begunkov8da11c12020-02-24 11:32:44 +03006228 fd = array_index_nospec(fd, ctx->nr_user_files);
6229 file = io_file_from_index(ctx, fd);
Pavel Begunkov36f72fe2020-11-18 19:57:26 +00006230 io_set_resource_node(req);
Pavel Begunkov8da11c12020-02-24 11:32:44 +03006231 } else {
6232 trace_io_uring_file_get(ctx, fd);
6233 file = __io_file_get(state, fd);
Pavel Begunkov8da11c12020-02-24 11:32:44 +03006234 }
6235
Pavel Begunkovce3d5aa2021-02-01 18:59:55 +00006236 if (file && unlikely(file->f_op == &io_uring_fops))
6237 io_req_track_inflight(req);
Pavel Begunkov8371adf2020-10-10 18:34:08 +01006238 return file;
Pavel Begunkov8da11c12020-02-24 11:32:44 +03006239}
6240
Jens Axboe2665abf2019-11-05 12:40:47 -07006241static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
6242{
Jens Axboead8a48a2019-11-15 08:49:11 -07006243 struct io_timeout_data *data = container_of(timer,
6244 struct io_timeout_data, timer);
Pavel Begunkov90cd7e42020-10-27 23:25:36 +00006245 struct io_kiocb *prev, *req = data->req;
Jens Axboe2665abf2019-11-05 12:40:47 -07006246 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe2665abf2019-11-05 12:40:47 -07006247 unsigned long flags;
Jens Axboe2665abf2019-11-05 12:40:47 -07006248
6249 spin_lock_irqsave(&ctx->completion_lock, flags);
Pavel Begunkov90cd7e42020-10-27 23:25:36 +00006250 prev = req->timeout.head;
6251 req->timeout.head = NULL;
Jens Axboe2665abf2019-11-05 12:40:47 -07006252
6253 /*
6254 * We don't expect the list to be empty, that will only happen if we
6255 * race with the completion of the linked work.
6256 */
Pavel Begunkov90cd7e42020-10-27 23:25:36 +00006257 if (prev && refcount_inc_not_zero(&prev->refs))
Pavel Begunkovf2f87372020-10-27 23:25:37 +00006258 io_remove_next_linked(prev);
Pavel Begunkov90cd7e42020-10-27 23:25:36 +00006259 else
6260 prev = NULL;
Jens Axboe2665abf2019-11-05 12:40:47 -07006261 spin_unlock_irqrestore(&ctx->completion_lock, flags);
6262
6263 if (prev) {
Pavel Begunkov014db002020-03-03 21:33:12 +03006264 io_async_find_and_cancel(ctx, req, prev->user_data, -ETIME);
Pavel Begunkov9ae1f8d2021-02-01 18:59:51 +00006265 io_put_req_deferred(prev, 1);
Jens Axboe47f46762019-11-09 17:43:02 -07006266 } else {
Pavel Begunkov9ae1f8d2021-02-01 18:59:51 +00006267 io_req_complete_post(req, -ETIME, 0);
6268 io_put_req_deferred(req, 1);
Jens Axboe2665abf2019-11-05 12:40:47 -07006269 }
Jens Axboe2665abf2019-11-05 12:40:47 -07006270 return HRTIMER_NORESTART;
6271}
6272
Jens Axboe7271ef32020-08-10 09:55:22 -06006273static void __io_queue_linked_timeout(struct io_kiocb *req)
Jens Axboe2665abf2019-11-05 12:40:47 -07006274{
Jens Axboe76a46e02019-11-10 23:34:16 -07006275 /*
Pavel Begunkovf2f87372020-10-27 23:25:37 +00006276 * If the back reference is NULL, then our linked request finished
6277 * before we got a chance to setup the timer
Jens Axboe76a46e02019-11-10 23:34:16 -07006278 */
Pavel Begunkov90cd7e42020-10-27 23:25:36 +00006279 if (req->timeout.head) {
Jens Axboee8c2bc12020-08-15 18:44:09 -07006280 struct io_timeout_data *data = req->async_data;
Jens Axboe94ae5e72019-11-14 19:39:52 -07006281
Jens Axboead8a48a2019-11-15 08:49:11 -07006282 data->timer.function = io_link_timeout_fn;
6283 hrtimer_start(&data->timer, timespec64_to_ktime(data->ts),
6284 data->mode);
Jens Axboe2665abf2019-11-05 12:40:47 -07006285 }
Jens Axboe7271ef32020-08-10 09:55:22 -06006286}
6287
6288static void io_queue_linked_timeout(struct io_kiocb *req)
6289{
6290 struct io_ring_ctx *ctx = req->ctx;
6291
6292 spin_lock_irq(&ctx->completion_lock);
6293 __io_queue_linked_timeout(req);
Jens Axboe76a46e02019-11-10 23:34:16 -07006294 spin_unlock_irq(&ctx->completion_lock);
Jens Axboe2665abf2019-11-05 12:40:47 -07006295
Jens Axboe2665abf2019-11-05 12:40:47 -07006296 /* drop submission reference */
Jens Axboe76a46e02019-11-10 23:34:16 -07006297 io_put_req(req);
Jens Axboe2665abf2019-11-05 12:40:47 -07006298}
6299
Jens Axboead8a48a2019-11-15 08:49:11 -07006300static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req)
Jens Axboe2665abf2019-11-05 12:40:47 -07006301{
Pavel Begunkovf2f87372020-10-27 23:25:37 +00006302 struct io_kiocb *nxt = req->link;
Jens Axboe2b188cc2019-01-07 10:46:33 -07006303
Pavel Begunkovf2f87372020-10-27 23:25:37 +00006304 if (!nxt || (req->flags & REQ_F_LINK_TIMEOUT) ||
6305 nxt->opcode != IORING_OP_LINK_TIMEOUT)
Jens Axboed7718a92020-02-14 22:23:12 -07006306 return NULL;
Jens Axboe2665abf2019-11-05 12:40:47 -07006307
Pavel Begunkov90cd7e42020-10-27 23:25:36 +00006308 nxt->timeout.head = req;
Pavel Begunkov900fad42020-10-19 16:39:16 +01006309 nxt->flags |= REQ_F_LTIMEOUT_ACTIVE;
Jens Axboe76a46e02019-11-10 23:34:16 -07006310 req->flags |= REQ_F_LINK_TIMEOUT;
Jens Axboe76a46e02019-11-10 23:34:16 -07006311 return nxt;
Jens Axboe2665abf2019-11-05 12:40:47 -07006312}
6313
Pavel Begunkovc5eef2b2021-02-10 00:03:22 +00006314static void __io_queue_sqe(struct io_kiocb *req)
Jens Axboe2b188cc2019-01-07 10:46:33 -07006315{
Pavel Begunkovd3d72982021-02-12 03:23:51 +00006316 struct io_kiocb *linked_timeout = io_prep_linked_timeout(req);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006317 int ret;
6318
Pavel Begunkovc5eef2b2021-02-10 00:03:22 +00006319 ret = io_issue_sqe(req, IO_URING_F_NONBLOCK|IO_URING_F_COMPLETE_DEFER);
Jens Axboe491381ce2019-10-17 09:20:46 -06006320
6321 /*
6322 * We async punt it if the file wasn't marked NOWAIT, or if the file
6323 * doesn't support non-blocking read/write attempts
6324 */
Pavel Begunkov24c74672020-06-21 13:09:51 +03006325 if (ret == -EAGAIN && !(req->flags & REQ_F_NOWAIT)) {
Pavel Begunkovf063c542020-07-25 14:41:59 +03006326 if (!io_arm_poll_handler(req)) {
Pavel Begunkovf063c542020-07-25 14:41:59 +03006327 /*
6328 * Queued up for async execution, worker will release
6329 * submit reference when the iocb is actually submitted.
6330 */
6331 io_queue_async_work(req);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006332 }
Pavel Begunkov0d63c142020-10-22 16:47:18 +01006333 } else if (likely(!ret)) {
6334 /* drop submission reference */
Pavel Begunkove342c802021-01-19 13:32:47 +00006335 if (req->flags & REQ_F_COMPLETE_INLINE) {
Pavel Begunkovc5eef2b2021-02-10 00:03:22 +00006336 struct io_ring_ctx *ctx = req->ctx;
6337 struct io_comp_state *cs = &ctx->submit_state.comp;
Jens Axboee65ef562019-03-12 10:16:44 -06006338
Pavel Begunkov6dd0be12021-02-10 00:03:13 +00006339 cs->reqs[cs->nr++] = req;
Pavel Begunkovd3d72982021-02-12 03:23:51 +00006340 if (cs->nr == ARRAY_SIZE(cs->reqs))
Pavel Begunkovc5eef2b2021-02-10 00:03:22 +00006341 io_submit_flush_completions(cs, ctx);
Pavel Begunkov9affd662021-01-19 13:32:46 +00006342 } else {
Pavel Begunkovd3d72982021-02-12 03:23:51 +00006343 io_put_req(req);
Pavel Begunkov0d63c142020-10-22 16:47:18 +01006344 }
6345 } else {
Pavel Begunkovf41db2732021-02-28 22:35:12 +00006346 io_req_complete_failed(req, ret);
Jens Axboe9e645e112019-05-10 16:07:28 -06006347 }
Pavel Begunkovd3d72982021-02-12 03:23:51 +00006348 if (linked_timeout)
6349 io_queue_linked_timeout(linked_timeout);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006350}
6351
Pavel Begunkovbe7053b2021-02-18 18:29:45 +00006352static void io_queue_sqe(struct io_kiocb *req)
Jackie Liu4fe2c962019-09-09 20:50:40 +08006353{
6354 int ret;
6355
Pavel Begunkovbe7053b2021-02-18 18:29:45 +00006356 ret = io_req_defer(req);
Jackie Liu4fe2c962019-09-09 20:50:40 +08006357 if (ret) {
6358 if (ret != -EIOCBQUEUED) {
Pavel Begunkov11185912020-01-22 23:09:35 +03006359fail_req:
Pavel Begunkovf41db2732021-02-28 22:35:12 +00006360 io_req_complete_failed(req, ret);
Jackie Liu4fe2c962019-09-09 20:50:40 +08006361 }
Pavel Begunkov25508782019-12-30 21:24:47 +03006362 } else if (req->flags & REQ_F_FORCE_ASYNC) {
Pavel Begunkovbe7053b2021-02-18 18:29:45 +00006363 ret = io_req_defer_prep(req);
6364 if (unlikely(ret))
6365 goto fail_req;
Jens Axboece35a472019-12-17 08:04:44 -07006366 io_queue_async_work(req);
6367 } else {
Pavel Begunkovc5eef2b2021-02-10 00:03:22 +00006368 __io_queue_sqe(req);
Jens Axboece35a472019-12-17 08:04:44 -07006369 }
Jackie Liu4fe2c962019-09-09 20:50:40 +08006370}
6371
Stefano Garzarella21b55db2020-08-27 16:58:30 +02006372/*
6373 * Check SQE restrictions (opcode and flags).
6374 *
6375 * Returns 'true' if SQE is allowed, 'false' otherwise.
6376 */
6377static inline bool io_check_restriction(struct io_ring_ctx *ctx,
6378 struct io_kiocb *req,
6379 unsigned int sqe_flags)
6380{
6381 if (!ctx->restricted)
6382 return true;
6383
6384 if (!test_bit(req->opcode, ctx->restrictions.sqe_op))
6385 return false;
6386
6387 if ((sqe_flags & ctx->restrictions.sqe_flags_required) !=
6388 ctx->restrictions.sqe_flags_required)
6389 return false;
6390
6391 if (sqe_flags & ~(ctx->restrictions.sqe_flags_allowed |
6392 ctx->restrictions.sqe_flags_required))
6393 return false;
6394
6395 return true;
6396}
6397
Pavel Begunkovef4ff582020-04-12 02:05:05 +03006398static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
Pavel Begunkov258b29a2021-02-10 00:03:10 +00006399 const struct io_uring_sqe *sqe)
Pavel Begunkov0553b8b2020-04-08 08:58:45 +03006400{
Pavel Begunkov258b29a2021-02-10 00:03:10 +00006401 struct io_submit_state *state;
Pavel Begunkovef4ff582020-04-12 02:05:05 +03006402 unsigned int sqe_flags;
Jens Axboe003e8dc2021-03-06 09:22:27 -07006403 int personality, ret = 0;
Pavel Begunkovef4ff582020-04-12 02:05:05 +03006404
Pavel Begunkov0553b8b2020-04-08 08:58:45 +03006405 req->opcode = READ_ONCE(sqe->opcode);
Pavel Begunkov5be9ad12021-02-12 18:41:17 +00006406 /* same numerical values with corresponding REQ_F_*, safe to copy */
6407 req->flags = sqe_flags = READ_ONCE(sqe->flags);
Pavel Begunkov0553b8b2020-04-08 08:58:45 +03006408 req->user_data = READ_ONCE(sqe->user_data);
Jens Axboee8c2bc12020-08-15 18:44:09 -07006409 req->async_data = NULL;
Pavel Begunkov0553b8b2020-04-08 08:58:45 +03006410 req->file = NULL;
6411 req->ctx = ctx;
Pavel Begunkovf2f87372020-10-27 23:25:37 +00006412 req->link = NULL;
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00006413 req->fixed_rsrc_refs = NULL;
Pavel Begunkov0553b8b2020-04-08 08:58:45 +03006414 /* one is dropped after submission, the other at completion */
6415 refcount_set(&req->refs, 2);
Pavel Begunkov4dd28242020-06-15 10:33:13 +03006416 req->task = current;
Pavel Begunkov0553b8b2020-04-08 08:58:45 +03006417 req->result = 0;
Jens Axboe93e68e02021-03-09 07:02:21 -07006418 req->work.list.next = NULL;
6419 req->work.creds = NULL;
6420 req->work.flags = 0;
Pavel Begunkovef4ff582020-04-12 02:05:05 +03006421
Pavel Begunkov5be9ad12021-02-12 18:41:17 +00006422 /* enforce forwards compatibility on users */
Pavel Begunkovebf4a5d2021-02-20 01:39:53 +00006423 if (unlikely(sqe_flags & ~SQE_VALID_FLAGS)) {
6424 req->flags = 0;
Pavel Begunkov5be9ad12021-02-12 18:41:17 +00006425 return -EINVAL;
Pavel Begunkovebf4a5d2021-02-20 01:39:53 +00006426 }
Pavel Begunkov5be9ad12021-02-12 18:41:17 +00006427
Pavel Begunkovef4ff582020-04-12 02:05:05 +03006428 if (unlikely(req->opcode >= IORING_OP_LAST))
6429 return -EINVAL;
6430
Stefano Garzarella21b55db2020-08-27 16:58:30 +02006431 if (unlikely(!io_check_restriction(ctx, req, sqe_flags)))
6432 return -EACCES;
6433
Pavel Begunkovef4ff582020-04-12 02:05:05 +03006434 if ((sqe_flags & IOSQE_BUFFER_SELECT) &&
6435 !io_op_defs[req->opcode].buffer_select)
6436 return -EOPNOTSUPP;
6437
Jens Axboe003e8dc2021-03-06 09:22:27 -07006438 personality = READ_ONCE(sqe->personality);
6439 if (personality) {
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +00006440 req->work.creds = xa_load(&ctx->personalities, personality);
Jens Axboe003e8dc2021-03-06 09:22:27 -07006441 if (!req->work.creds)
6442 return -EINVAL;
6443 get_cred(req->work.creds);
Jens Axboe003e8dc2021-03-06 09:22:27 -07006444 }
Pavel Begunkov258b29a2021-02-10 00:03:10 +00006445 state = &ctx->submit_state;
Pavel Begunkovef4ff582020-04-12 02:05:05 +03006446
Jens Axboe27926b62020-10-28 09:33:23 -06006447 /*
6448 * Plug now if we have more than 1 IO left after this, and the target
6449 * is potentially a read/write to block based storage.
6450 */
6451 if (!state->plug_started && state->ios_left > 1 &&
6452 io_op_defs[req->opcode].plug) {
6453 blk_start_plug(&state->plug);
6454 state->plug_started = true;
6455 }
Jens Axboe63ff8222020-05-07 14:56:15 -06006456
Pavel Begunkovbd5bbda2020-11-20 15:50:51 +00006457 if (io_op_defs[req->opcode].needs_file) {
6458 bool fixed = req->flags & REQ_F_FIXED_FILE;
Jens Axboe63ff8222020-05-07 14:56:15 -06006459
Pavel Begunkovbd5bbda2020-11-20 15:50:51 +00006460 req->file = io_file_get(state, req, READ_ONCE(sqe->fd), fixed);
Pavel Begunkovba13e232021-02-01 18:59:52 +00006461 if (unlikely(!req->file))
Pavel Begunkovbd5bbda2020-11-20 15:50:51 +00006462 ret = -EBADF;
6463 }
6464
Pavel Begunkov71b547c2020-10-10 18:34:09 +01006465 state->ios_left--;
6466 return ret;
Pavel Begunkov0553b8b2020-04-08 08:58:45 +03006467}
6468
Pavel Begunkova6b8cadc2021-02-18 18:29:41 +00006469static int io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
Pavel Begunkova1ab7b32021-02-18 18:29:42 +00006470 const struct io_uring_sqe *sqe)
Jens Axboe6c271ce2019-01-10 11:22:30 -07006471{
Pavel Begunkova1ab7b32021-02-18 18:29:42 +00006472 struct io_submit_link *link = &ctx->submit_state.link;
Jens Axboe6c271ce2019-01-10 11:22:30 -07006473 int ret;
6474
Pavel Begunkova6b8cadc2021-02-18 18:29:41 +00006475 ret = io_init_req(ctx, req, sqe);
6476 if (unlikely(ret)) {
6477fail_req:
Pavel Begunkovde59bc12021-02-18 18:29:47 +00006478 if (link->head) {
6479 /* fail even hard links since we don't submit */
Pavel Begunkovcf109602021-02-18 18:29:43 +00006480 link->head->flags |= REQ_F_FAIL_LINK;
Pavel Begunkovf41db2732021-02-28 22:35:12 +00006481 io_req_complete_failed(link->head, -ECANCELED);
Pavel Begunkovde59bc12021-02-18 18:29:47 +00006482 link->head = NULL;
6483 }
Pavel Begunkovf41db2732021-02-28 22:35:12 +00006484 io_req_complete_failed(req, ret);
Pavel Begunkova6b8cadc2021-02-18 18:29:41 +00006485 return ret;
6486 }
Pavel Begunkovbe7053b2021-02-18 18:29:45 +00006487 ret = io_req_prep(req, sqe);
6488 if (unlikely(ret))
6489 goto fail_req;
Pavel Begunkova6b8cadc2021-02-18 18:29:41 +00006490
Pavel Begunkovbe7053b2021-02-18 18:29:45 +00006491 /* don't need @sqe from now on */
Pavel Begunkova6b8cadc2021-02-18 18:29:41 +00006492 trace_io_uring_submit_sqe(ctx, req->opcode, req->user_data,
6493 true, ctx->flags & IORING_SETUP_SQPOLL);
6494
Jens Axboe6c271ce2019-01-10 11:22:30 -07006495 /*
6496 * If we already have a head request, queue this one for async
6497 * submittal once the head completes. If we don't have a head but
6498 * IOSQE_IO_LINK is set in the sqe, start a new head. This one will be
6499 * submitted sync once the chain is complete. If none of those
6500 * conditions are true (normal request), then just queue it.
6501 */
6502 if (link->head) {
6503 struct io_kiocb *head = link->head;
6504
6505 /*
6506 * Taking sequential execution of a link, draining both sides
6507 * of the link also fullfils IOSQE_IO_DRAIN semantics for all
6508 * requests in the link. So, it drains the head and the
6509 * next after the link request. The last one is done via
6510 * drain_next flag to persist the effect across calls.
6511 */
6512 if (req->flags & REQ_F_IO_DRAIN) {
6513 head->flags |= REQ_F_IO_DRAIN;
6514 ctx->drain_next = 1;
6515 }
Pavel Begunkovbe7053b2021-02-18 18:29:45 +00006516 ret = io_req_defer_prep(req);
Pavel Begunkovcf109602021-02-18 18:29:43 +00006517 if (unlikely(ret))
Pavel Begunkova6b8cadc2021-02-18 18:29:41 +00006518 goto fail_req;
Jens Axboe6c271ce2019-01-10 11:22:30 -07006519 trace_io_uring_link(ctx, req, head);
6520 link->last->link = req;
6521 link->last = req;
6522
6523 /* last request of a link, enqueue the link */
6524 if (!(req->flags & (REQ_F_LINK | REQ_F_HARDLINK))) {
Pavel Begunkovde59bc12021-02-18 18:29:47 +00006525 io_queue_sqe(head);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006526 link->head = NULL;
6527 }
Jackie Liu4fe2c962019-09-09 20:50:40 +08006528 } else {
6529 if (unlikely(ctx->drain_next)) {
6530 req->flags |= REQ_F_IO_DRAIN;
6531 ctx->drain_next = 0;
6532 }
6533 if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) {
Jackie Liu4fe2c962019-09-09 20:50:40 +08006534 link->head = req;
6535 link->last = req;
6536 } else {
Pavel Begunkovbe7053b2021-02-18 18:29:45 +00006537 io_queue_sqe(req);
Jackie Liu4fe2c962019-09-09 20:50:40 +08006538 }
6539 }
6540
6541 return 0;
6542}
6543
6544/*
6545 * Batched submission is done, ensure local IO is flushed out.
6546 */
6547static void io_submit_state_end(struct io_submit_state *state,
6548 struct io_ring_ctx *ctx)
Pavel Begunkov1b4a51b2019-11-21 11:54:28 +03006549{
Pavel Begunkova1ab7b32021-02-18 18:29:42 +00006550 if (state->link.head)
Pavel Begunkovde59bc12021-02-18 18:29:47 +00006551 io_queue_sqe(state->link.head);
Jens Axboe3529d8c2019-12-19 18:24:38 -07006552 if (state->comp.nr)
Jens Axboe9e645e112019-05-10 16:07:28 -06006553 io_submit_flush_completions(&state->comp, ctx);
Jackie Liua197f662019-11-08 08:09:12 -07006554 if (state->plug_started)
Pavel Begunkov32fe5252019-12-17 22:26:58 +03006555 blk_finish_plug(&state->plug);
Jens Axboe75c6a032020-01-28 10:15:23 -07006556 io_state_file_put(state);
Jens Axboe9e645e112019-05-10 16:07:28 -06006557}
Pavel Begunkov32fe5252019-12-17 22:26:58 +03006558
Jens Axboe9e645e112019-05-10 16:07:28 -06006559/*
6560 * Start submission side cache.
Pavel Begunkov32fe5252019-12-17 22:26:58 +03006561 */
Jens Axboe9e645e112019-05-10 16:07:28 -06006562static void io_submit_state_start(struct io_submit_state *state,
Pavel Begunkov196be952019-11-07 01:41:06 +03006563 unsigned int max_ios)
Jens Axboe9e645e112019-05-10 16:07:28 -06006564{
6565 state->plug_started = false;
Jens Axboebcda7ba2020-02-23 16:42:51 -07006566 state->ios_left = max_ios;
Pavel Begunkova1ab7b32021-02-18 18:29:42 +00006567 /* set only head, no need to init link_last in advance */
6568 state->link.head = NULL;
Jens Axboe75c6a032020-01-28 10:15:23 -07006569}
6570
Jens Axboe193155c2020-02-22 23:22:19 -07006571static void io_commit_sqring(struct io_ring_ctx *ctx)
6572{
Jens Axboe75c6a032020-01-28 10:15:23 -07006573 struct io_rings *rings = ctx->rings;
6574
6575 /*
Jens Axboe193155c2020-02-22 23:22:19 -07006576 * Ensure any loads from the SQEs are done at this point,
Jens Axboe75c6a032020-01-28 10:15:23 -07006577 * since once we write the new head, the application could
6578 * write new data to them.
Pavel Begunkov6b47ee62020-01-18 20:22:41 +03006579 */
Pavel Begunkov8da11c12020-02-24 11:32:44 +03006580 smp_store_release(&rings->sq.head, ctx->cached_sq_head);
Jens Axboebcda7ba2020-02-23 16:42:51 -07006581}
6582
Jens Axboe9e645e112019-05-10 16:07:28 -06006583/*
Jens Axboe3529d8c2019-12-19 18:24:38 -07006584 * Fetch an sqe, if one is available. Note that sqe_ptr will point to memory
Jens Axboe9e645e112019-05-10 16:07:28 -06006585 * that is mapped by userspace. This means that care needs to be taken to
6586 * ensure that reads are stable, as we cannot rely on userspace always
Jens Axboe78e19bb2019-11-06 15:21:34 -07006587 * being a good citizen. If members of the sqe are validated and then later
6588 * used, it's important that those reads are done through READ_ONCE() to
Pavel Begunkov2e6e1fd2019-12-05 16:15:45 +03006589 * prevent a re-load down the line.
Jens Axboe9e645e112019-05-10 16:07:28 -06006590 */
6591static const struct io_uring_sqe *io_get_sqe(struct io_ring_ctx *ctx)
Jens Axboe9e645e112019-05-10 16:07:28 -06006592{
6593 u32 *sq_array = ctx->sq_array;
6594 unsigned head;
6595
6596 /*
6597 * The cached sq head (or cq tail) serves two purposes:
6598 *
6599 * 1) allows us to batch the cost of updating the user visible
Pavel Begunkov9d763772019-12-17 02:22:07 +03006600 * head updates.
Jens Axboe9e645e112019-05-10 16:07:28 -06006601 * 2) allows the kernel side to track the head on its own, even
Pavel Begunkov8cdf2192020-01-25 00:40:24 +03006602 * though the application is the one updating it.
6603 */
6604 head = READ_ONCE(sq_array[ctx->cached_sq_head++ & ctx->sq_mask]);
6605 if (likely(head < ctx->sq_entries))
6606 return &ctx->sq_sqes[head];
6607
6608 /* drop invalid entries */
Pavel Begunkov711be032020-01-17 03:57:59 +03006609 ctx->cached_sq_dropped++;
6610 WRITE_ONCE(ctx->rings->sq_dropped, ctx->cached_sq_dropped);
6611 return NULL;
6612}
Jens Axboeb7bb4f72019-12-15 22:13:43 -07006613
Jens Axboe0f212202020-09-13 13:09:39 -06006614static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr)
Jens Axboe6c271ce2019-01-10 11:22:30 -07006615{
Pavel Begunkov46c4e162021-02-18 18:29:37 +00006616 int submitted = 0;
Jens Axboe6c271ce2019-01-10 11:22:30 -07006617
Jens Axboec4a2ed72019-11-21 21:01:26 -07006618 /* if we have a backlog and couldn't flush it all, return BUSY */
Jens Axboead3eb2c2019-12-18 17:12:20 -07006619 if (test_bit(0, &ctx->sq_check_overflow)) {
Pavel Begunkov6c503152021-01-04 20:36:36 +00006620 if (!__io_cqring_overflow_flush(ctx, false, NULL, NULL))
Jens Axboead3eb2c2019-12-18 17:12:20 -07006621 return -EBUSY;
6622 }
Jens Axboe6c271ce2019-01-10 11:22:30 -07006623
Pavel Begunkovee7d46d2019-12-30 21:24:45 +03006624 /* make sure SQ entry isn't read before tail */
6625 nr = min3(nr, ctx->sq_entries, io_sqring_entries(ctx));
Pavel Begunkov9ef4f122019-12-30 21:24:44 +03006626
Pavel Begunkov2b85edf2019-12-28 14:13:03 +03006627 if (!percpu_ref_tryget_many(&ctx->refs, nr))
6628 return -EAGAIN;
Jens Axboe6c271ce2019-01-10 11:22:30 -07006629
Jens Axboed8a6df12020-10-15 16:24:45 -06006630 percpu_counter_add(&current->io_uring->inflight, nr);
Jens Axboefaf7b512020-10-07 12:48:53 -06006631 refcount_add(nr, &current->usage);
Pavel Begunkovba88ff12021-02-10 00:03:11 +00006632 io_submit_state_start(&ctx->submit_state, nr);
Pavel Begunkovb14cca02020-01-17 04:45:59 +03006633
Pavel Begunkov46c4e162021-02-18 18:29:37 +00006634 while (submitted < nr) {
Jens Axboe3529d8c2019-12-19 18:24:38 -07006635 const struct io_uring_sqe *sqe;
Pavel Begunkov196be952019-11-07 01:41:06 +03006636 struct io_kiocb *req;
Pavel Begunkovfb5ccc92019-10-25 12:31:30 +03006637
Pavel Begunkov258b29a2021-02-10 00:03:10 +00006638 req = io_alloc_req(ctx);
Pavel Begunkov196be952019-11-07 01:41:06 +03006639 if (unlikely(!req)) {
6640 if (!submitted)
6641 submitted = -EAGAIN;
Pavel Begunkovfb5ccc92019-10-25 12:31:30 +03006642 break;
Jens Axboe9e645e112019-05-10 16:07:28 -06006643 }
Pavel Begunkov4fccfcb2021-02-12 11:55:17 +00006644 sqe = io_get_sqe(ctx);
6645 if (unlikely(!sqe)) {
6646 kmem_cache_free(req_cachep, req);
6647 break;
6648 }
Jens Axboed3656342019-12-18 09:50:26 -07006649 /* will complete beyond this point, count as submitted */
6650 submitted++;
Pavel Begunkova1ab7b32021-02-18 18:29:42 +00006651 if (io_submit_sqe(ctx, req, sqe))
Jens Axboed3656342019-12-18 09:50:26 -07006652 break;
Jens Axboe6c271ce2019-01-10 11:22:30 -07006653 }
6654
Pavel Begunkov9466f432020-01-25 22:34:01 +03006655 if (unlikely(submitted != nr)) {
6656 int ref_used = (submitted == -EAGAIN) ? 0 : submitted;
Jens Axboed8a6df12020-10-15 16:24:45 -06006657 struct io_uring_task *tctx = current->io_uring;
6658 int unused = nr - ref_used;
Pavel Begunkov9466f432020-01-25 22:34:01 +03006659
Jens Axboed8a6df12020-10-15 16:24:45 -06006660 percpu_ref_put_many(&ctx->refs, unused);
6661 percpu_counter_sub(&tctx->inflight, unused);
6662 put_task_struct_many(current, unused);
Pavel Begunkov9466f432020-01-25 22:34:01 +03006663 }
Jens Axboe6c271ce2019-01-10 11:22:30 -07006664
Pavel Begunkova1ab7b32021-02-18 18:29:42 +00006665 io_submit_state_end(&ctx->submit_state, ctx);
Pavel Begunkovae9428c2019-11-06 00:22:14 +03006666 /* Commit SQ ring head once we've consumed and submitted all SQEs */
6667 io_commit_sqring(ctx);
6668
Jens Axboe6c271ce2019-01-10 11:22:30 -07006669 return submitted;
6670}
6671
Xiaoguang Wang23b36282020-07-23 20:57:24 +08006672static inline void io_ring_set_wakeup_flag(struct io_ring_ctx *ctx)
6673{
6674 /* Tell userspace we may need a wakeup call */
6675 spin_lock_irq(&ctx->completion_lock);
6676 ctx->rings->sq_flags |= IORING_SQ_NEED_WAKEUP;
6677 spin_unlock_irq(&ctx->completion_lock);
6678}
6679
6680static inline void io_ring_clear_wakeup_flag(struct io_ring_ctx *ctx)
6681{
6682 spin_lock_irq(&ctx->completion_lock);
6683 ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP;
6684 spin_unlock_irq(&ctx->completion_lock);
6685}
6686
Xiaoguang Wang08369242020-11-03 14:15:59 +08006687static int __io_sq_thread(struct io_ring_ctx *ctx, bool cap_entries)
Jens Axboe6c271ce2019-01-10 11:22:30 -07006688{
Jens Axboec8d1ba52020-09-14 11:07:26 -06006689 unsigned int to_submit;
Xiaoguang Wangbdcd3ea2020-02-25 22:12:08 +08006690 int ret = 0;
Jens Axboe6c271ce2019-01-10 11:22:30 -07006691
Jens Axboec8d1ba52020-09-14 11:07:26 -06006692 to_submit = io_sqring_entries(ctx);
Jens Axboee95eee22020-09-08 09:11:32 -06006693 /* if we're handling multiple rings, cap submit size for fairness */
6694 if (cap_entries && to_submit > 8)
6695 to_submit = 8;
6696
Xiaoguang Wang906a3c62020-11-12 14:56:00 +08006697 if (!list_empty(&ctx->iopoll_list) || to_submit) {
6698 unsigned nr_events = 0;
6699
Xiaoguang Wang08369242020-11-03 14:15:59 +08006700 mutex_lock(&ctx->uring_lock);
Xiaoguang Wang906a3c62020-11-12 14:56:00 +08006701 if (!list_empty(&ctx->iopoll_list))
6702 io_do_iopoll(ctx, &nr_events, 0);
6703
Pavel Begunkov0298ef92021-03-08 13:20:57 +00006704 if (to_submit && likely(!percpu_ref_is_dying(&ctx->refs)) &&
6705 !(ctx->flags & IORING_SETUP_R_DISABLED))
Xiaoguang Wang08369242020-11-03 14:15:59 +08006706 ret = io_submit_sqes(ctx, to_submit);
6707 mutex_unlock(&ctx->uring_lock);
6708 }
Jens Axboe90554202020-09-03 12:12:41 -06006709
6710 if (!io_sqring_full(ctx) && wq_has_sleeper(&ctx->sqo_sq_wait))
6711 wake_up(&ctx->sqo_sq_wait);
6712
Xiaoguang Wang08369242020-11-03 14:15:59 +08006713 return ret;
6714}
6715
6716static void io_sqd_update_thread_idle(struct io_sq_data *sqd)
6717{
6718 struct io_ring_ctx *ctx;
6719 unsigned sq_thread_idle = 0;
6720
6721 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) {
6722 if (sq_thread_idle < ctx->sq_thread_idle)
6723 sq_thread_idle = ctx->sq_thread_idle;
6724 }
6725
6726 sqd->sq_thread_idle = sq_thread_idle;
Jens Axboec8d1ba52020-09-14 11:07:26 -06006727}
6728
Jens Axboe6c271ce2019-01-10 11:22:30 -07006729static int io_sq_thread(void *data)
6730{
Jens Axboe69fb2132020-09-14 11:16:23 -06006731 struct io_sq_data *sqd = data;
6732 struct io_ring_ctx *ctx;
Xiaoguang Wanga0d92052020-11-12 14:55:59 +08006733 unsigned long timeout = 0;
Jens Axboe37d1e2e2021-02-17 21:03:43 -07006734 char buf[TASK_COMM_LEN];
Xiaoguang Wang08369242020-11-03 14:15:59 +08006735 DEFINE_WAIT(wait);
Jens Axboe6c271ce2019-01-10 11:22:30 -07006736
Pavel Begunkov696ee882021-04-01 09:55:04 +01006737 snprintf(buf, sizeof(buf), "iou-sqp-%d", sqd->task_pid);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07006738 set_task_comm(current, buf);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07006739 current->pf_io_worker = NULL;
Jens Axboe28cea78a2020-09-14 10:51:17 -06006740
Jens Axboe37d1e2e2021-02-17 21:03:43 -07006741 if (sqd->sq_cpu != -1)
6742 set_cpus_allowed_ptr(current, cpumask_of(sqd->sq_cpu));
6743 else
6744 set_cpus_allowed_ptr(current, cpu_online_mask);
6745 current->flags |= PF_NO_SETAFFINITY;
6746
Pavel Begunkov09a6f4e2021-03-14 20:57:10 +00006747 mutex_lock(&sqd->lock);
Jens Axboe05962f92021-03-06 13:58:48 -07006748 while (!test_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state)) {
Xiaoguang Wang08369242020-11-03 14:15:59 +08006749 int ret;
6750 bool cap_entries, sqt_spin, needs_sched;
Jens Axboec1edbf52019-11-10 16:56:04 -07006751
Jens Axboe82734c52021-03-29 06:52:44 -06006752 if (test_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state) ||
6753 signal_pending(current)) {
6754 bool did_sig = false;
6755
Pavel Begunkov09a6f4e2021-03-14 20:57:10 +00006756 mutex_unlock(&sqd->lock);
Jens Axboe82734c52021-03-29 06:52:44 -06006757 if (signal_pending(current)) {
6758 struct ksignal ksig;
6759
6760 did_sig = get_signal(&ksig);
6761 }
Jens Axboe05962f92021-03-06 13:58:48 -07006762 cond_resched();
Pavel Begunkov09a6f4e2021-03-14 20:57:10 +00006763 mutex_lock(&sqd->lock);
Jens Axboe82734c52021-03-29 06:52:44 -06006764 if (did_sig)
6765 break;
Pavel Begunkov521d6a72021-03-11 23:29:38 +00006766 io_run_task_work();
Pavel Begunkovb7f5a0b2021-03-15 14:23:08 +00006767 io_run_task_work_head(&sqd->park_task_work);
Xiaoguang Wang08369242020-11-03 14:15:59 +08006768 timeout = jiffies + sqd->sq_thread_idle;
Pavel Begunkov7d41e852021-03-10 13:13:54 +00006769 continue;
Xiaoguang Wang08369242020-11-03 14:15:59 +08006770 }
Xiaoguang Wang08369242020-11-03 14:15:59 +08006771 sqt_spin = false;
Jens Axboee95eee22020-09-08 09:11:32 -06006772 cap_entries = !list_is_singular(&sqd->ctx_list);
Jens Axboe69fb2132020-09-14 11:16:23 -06006773 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) {
Stefan Metzmacher7c30f36a2021-03-07 11:54:28 +01006774 const struct cred *creds = NULL;
6775
6776 if (ctx->sq_creds != current_cred())
6777 creds = override_creds(ctx->sq_creds);
Xiaoguang Wang08369242020-11-03 14:15:59 +08006778 ret = __io_sq_thread(ctx, cap_entries);
Stefan Metzmacher7c30f36a2021-03-07 11:54:28 +01006779 if (creds)
6780 revert_creds(creds);
Xiaoguang Wang08369242020-11-03 14:15:59 +08006781 if (!sqt_spin && (ret > 0 || !list_empty(&ctx->iopoll_list)))
6782 sqt_spin = true;
Jens Axboe6c271ce2019-01-10 11:22:30 -07006783 }
6784
Xiaoguang Wang08369242020-11-03 14:15:59 +08006785 if (sqt_spin || !time_after(jiffies, timeout)) {
Jens Axboec8d1ba52020-09-14 11:07:26 -06006786 io_run_task_work();
6787 cond_resched();
Xiaoguang Wang08369242020-11-03 14:15:59 +08006788 if (sqt_spin)
6789 timeout = jiffies + sqd->sq_thread_idle;
6790 continue;
6791 }
6792
Xiaoguang Wang08369242020-11-03 14:15:59 +08006793 needs_sched = true;
6794 prepare_to_wait(&sqd->wait, &wait, TASK_INTERRUPTIBLE);
6795 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) {
6796 if ((ctx->flags & IORING_SETUP_IOPOLL) &&
6797 !list_empty_careful(&ctx->iopoll_list)) {
6798 needs_sched = false;
6799 break;
6800 }
6801 if (io_sqring_entries(ctx)) {
6802 needs_sched = false;
6803 break;
6804 }
6805 }
6806
Jens Axboe05962f92021-03-06 13:58:48 -07006807 if (needs_sched && !test_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state)) {
Jens Axboe69fb2132020-09-14 11:16:23 -06006808 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
6809 io_ring_set_wakeup_flag(ctx);
Xiaoguang Wang08369242020-11-03 14:15:59 +08006810
Pavel Begunkov09a6f4e2021-03-14 20:57:10 +00006811 mutex_unlock(&sqd->lock);
Jens Axboe69fb2132020-09-14 11:16:23 -06006812 schedule();
Pavel Begunkov09a6f4e2021-03-14 20:57:10 +00006813 mutex_lock(&sqd->lock);
Jens Axboe69fb2132020-09-14 11:16:23 -06006814 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
6815 io_ring_clear_wakeup_flag(ctx);
Jens Axboe6c271ce2019-01-10 11:22:30 -07006816 }
Xiaoguang Wang08369242020-11-03 14:15:59 +08006817
6818 finish_wait(&sqd->wait, &wait);
Pavel Begunkovb7f5a0b2021-03-15 14:23:08 +00006819 io_run_task_work_head(&sqd->park_task_work);
Xiaoguang Wang08369242020-11-03 14:15:59 +08006820 timeout = jiffies + sqd->sq_thread_idle;
Jens Axboe6c271ce2019-01-10 11:22:30 -07006821 }
6822
Jens Axboe37d1e2e2021-02-17 21:03:43 -07006823 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
6824 io_uring_cancel_sqpoll(ctx);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07006825 sqd->thread = NULL;
Jens Axboe05962f92021-03-06 13:58:48 -07006826 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
Jens Axboe5f3f26f2021-02-25 10:17:46 -07006827 io_ring_set_wakeup_flag(ctx);
Pavel Begunkov09a6f4e2021-03-14 20:57:10 +00006828 mutex_unlock(&sqd->lock);
Pavel Begunkov521d6a72021-03-11 23:29:38 +00006829
6830 io_run_task_work();
Pavel Begunkovb7f5a0b2021-03-15 14:23:08 +00006831 io_run_task_work_head(&sqd->park_task_work);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07006832 complete(&sqd->exited);
6833 do_exit(0);
Jens Axboe6c271ce2019-01-10 11:22:30 -07006834}
6835
Jens Axboebda52162019-09-24 13:47:15 -06006836struct io_wait_queue {
6837 struct wait_queue_entry wq;
6838 struct io_ring_ctx *ctx;
6839 unsigned to_wait;
6840 unsigned nr_timeouts;
6841};
6842
Pavel Begunkov6c503152021-01-04 20:36:36 +00006843static inline bool io_should_wake(struct io_wait_queue *iowq)
Jens Axboebda52162019-09-24 13:47:15 -06006844{
6845 struct io_ring_ctx *ctx = iowq->ctx;
6846
6847 /*
Brian Gianforcarod195a662019-12-13 03:09:50 -08006848 * Wake up if we have enough events, or if a timeout occurred since we
Jens Axboebda52162019-09-24 13:47:15 -06006849 * started waiting. For timeouts, we always want to return to userspace,
6850 * regardless of event count.
6851 */
Pavel Begunkov6c503152021-01-04 20:36:36 +00006852 return io_cqring_events(ctx) >= iowq->to_wait ||
Jens Axboebda52162019-09-24 13:47:15 -06006853 atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts;
6854}
6855
6856static int io_wake_function(struct wait_queue_entry *curr, unsigned int mode,
6857 int wake_flags, void *key)
6858{
6859 struct io_wait_queue *iowq = container_of(curr, struct io_wait_queue,
6860 wq);
6861
Pavel Begunkov6c503152021-01-04 20:36:36 +00006862 /*
6863 * Cannot safely flush overflowed CQEs from here, ensure we wake up
6864 * the task, and the next invocation will do it.
6865 */
6866 if (io_should_wake(iowq) || test_bit(0, &iowq->ctx->cq_check_overflow))
6867 return autoremove_wake_function(curr, mode, wake_flags, key);
6868 return -1;
Jens Axboebda52162019-09-24 13:47:15 -06006869}
6870
Jens Axboeaf9c1a42020-09-24 13:32:18 -06006871static int io_run_task_work_sig(void)
6872{
6873 if (io_run_task_work())
6874 return 1;
6875 if (!signal_pending(current))
6876 return 0;
Jens Axboe0b8cfa92021-03-21 14:16:08 -06006877 if (test_thread_flag(TIF_NOTIFY_SIGNAL))
Jens Axboe792ee0f62020-10-22 20:17:18 -06006878 return -ERESTARTSYS;
Jens Axboeaf9c1a42020-09-24 13:32:18 -06006879 return -EINTR;
6880}
6881
Pavel Begunkoveeb60b92021-02-04 13:51:58 +00006882/* when returns >0, the caller should retry */
6883static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
6884 struct io_wait_queue *iowq,
6885 signed long *timeout)
6886{
6887 int ret;
6888
6889 /* make sure we run task_work before checking for signals */
6890 ret = io_run_task_work_sig();
6891 if (ret || io_should_wake(iowq))
6892 return ret;
6893 /* let the caller flush overflows, retry */
6894 if (test_bit(0, &ctx->cq_check_overflow))
6895 return 1;
6896
6897 *timeout = schedule_timeout(*timeout);
6898 return !*timeout ? -ETIME : 1;
6899}
6900
Jens Axboe2b188cc2019-01-07 10:46:33 -07006901/*
6902 * Wait until events become available, if we don't already have some. The
6903 * application must reap them itself, as they reside on the shared cq ring.
6904 */
6905static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
Hao Xuc73ebb62020-11-03 10:54:37 +08006906 const sigset_t __user *sig, size_t sigsz,
6907 struct __kernel_timespec __user *uts)
Jens Axboe2b188cc2019-01-07 10:46:33 -07006908{
Jens Axboebda52162019-09-24 13:47:15 -06006909 struct io_wait_queue iowq = {
6910 .wq = {
6911 .private = current,
6912 .func = io_wake_function,
6913 .entry = LIST_HEAD_INIT(iowq.wq.entry),
6914 },
6915 .ctx = ctx,
6916 .to_wait = min_events,
6917 };
Hristo Venev75b28af2019-08-26 17:23:46 +00006918 struct io_rings *rings = ctx->rings;
Pavel Begunkovc1d5a222021-02-04 13:51:57 +00006919 signed long timeout = MAX_SCHEDULE_TIMEOUT;
6920 int ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07006921
Jens Axboeb41e9852020-02-17 09:52:41 -07006922 do {
Pavel Begunkov6c503152021-01-04 20:36:36 +00006923 io_cqring_overflow_flush(ctx, false, NULL, NULL);
6924 if (io_cqring_events(ctx) >= min_events)
Jens Axboeb41e9852020-02-17 09:52:41 -07006925 return 0;
Jens Axboe4c6e2772020-07-01 11:29:10 -06006926 if (!io_run_task_work())
Jens Axboeb41e9852020-02-17 09:52:41 -07006927 break;
Jens Axboeb41e9852020-02-17 09:52:41 -07006928 } while (1);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006929
6930 if (sig) {
Arnd Bergmann9e75ad52019-03-25 15:34:53 +01006931#ifdef CONFIG_COMPAT
6932 if (in_compat_syscall())
6933 ret = set_compat_user_sigmask((const compat_sigset_t __user *)sig,
Oleg Nesterovb7724342019-07-16 16:29:53 -07006934 sigsz);
Arnd Bergmann9e75ad52019-03-25 15:34:53 +01006935 else
6936#endif
Oleg Nesterovb7724342019-07-16 16:29:53 -07006937 ret = set_user_sigmask(sig, sigsz);
Arnd Bergmann9e75ad52019-03-25 15:34:53 +01006938
Jens Axboe2b188cc2019-01-07 10:46:33 -07006939 if (ret)
6940 return ret;
6941 }
6942
Hao Xuc73ebb62020-11-03 10:54:37 +08006943 if (uts) {
Pavel Begunkovc1d5a222021-02-04 13:51:57 +00006944 struct timespec64 ts;
6945
Hao Xuc73ebb62020-11-03 10:54:37 +08006946 if (get_timespec64(&ts, uts))
6947 return -EFAULT;
6948 timeout = timespec64_to_jiffies(&ts);
6949 }
6950
Jens Axboebda52162019-09-24 13:47:15 -06006951 iowq.nr_timeouts = atomic_read(&ctx->cq_timeouts);
Dmitrii Dolgovc826bd72019-10-15 19:02:01 +02006952 trace_io_uring_cqring_wait(ctx, min_events);
Jens Axboebda52162019-09-24 13:47:15 -06006953 do {
Jens Axboeca0a2652021-03-04 17:15:48 -07006954 /* if we can't even flush overflow, don't wait for more */
6955 if (!io_cqring_overflow_flush(ctx, false, NULL, NULL)) {
6956 ret = -EBUSY;
6957 break;
6958 }
Jens Axboebda52162019-09-24 13:47:15 -06006959 prepare_to_wait_exclusive(&ctx->wait, &iowq.wq,
6960 TASK_INTERRUPTIBLE);
Pavel Begunkoveeb60b92021-02-04 13:51:58 +00006961 ret = io_cqring_wait_schedule(ctx, &iowq, &timeout);
6962 finish_wait(&ctx->wait, &iowq.wq);
Jens Axboeca0a2652021-03-04 17:15:48 -07006963 cond_resched();
Pavel Begunkoveeb60b92021-02-04 13:51:58 +00006964 } while (ret > 0);
Jens Axboebda52162019-09-24 13:47:15 -06006965
Jens Axboeb7db41c2020-07-04 08:55:50 -06006966 restore_saved_sigmask_unless(ret == -EINTR);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006967
Hristo Venev75b28af2019-08-26 17:23:46 +00006968 return READ_ONCE(rings->cq.head) == READ_ONCE(rings->cq.tail) ? ret : 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07006969}
6970
Jens Axboe6b063142019-01-10 22:13:58 -07006971static void __io_sqe_files_unregister(struct io_ring_ctx *ctx)
6972{
6973#if defined(CONFIG_UNIX)
6974 if (ctx->ring_sock) {
6975 struct sock *sock = ctx->ring_sock->sk;
6976 struct sk_buff *skb;
6977
6978 while ((skb = skb_dequeue(&sock->sk_receive_queue)) != NULL)
6979 kfree_skb(skb);
6980 }
6981#else
6982 int i;
6983
Jens Axboe65e19f52019-10-26 07:20:21 -06006984 for (i = 0; i < ctx->nr_user_files; i++) {
6985 struct file *file;
6986
6987 file = io_file_from_index(ctx, i);
6988 if (file)
6989 fput(file);
6990 }
Jens Axboe6b063142019-01-10 22:13:58 -07006991#endif
6992}
6993
Bijan Mottahedeh00835dc2021-01-15 17:37:52 +00006994static void io_rsrc_data_ref_zero(struct percpu_ref *ref)
Jens Axboe05f3fb32019-12-09 11:22:50 -07006995{
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00006996 struct fixed_rsrc_data *data;
Jens Axboe05f3fb32019-12-09 11:22:50 -07006997
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00006998 data = container_of(ref, struct fixed_rsrc_data, refs);
Jens Axboe05f3fb32019-12-09 11:22:50 -07006999 complete(&data->done);
7000}
7001
Bijan Mottahedeh2a63b2d2021-01-15 17:37:47 +00007002static inline void io_rsrc_ref_lock(struct io_ring_ctx *ctx)
Pavel Begunkov1642b442020-12-30 21:34:14 +00007003{
Bijan Mottahedeh2a63b2d2021-01-15 17:37:47 +00007004 spin_lock_bh(&ctx->rsrc_ref_lock);
Pavel Begunkov1642b442020-12-30 21:34:14 +00007005}
7006
Bijan Mottahedeh2a63b2d2021-01-15 17:37:47 +00007007static inline void io_rsrc_ref_unlock(struct io_ring_ctx *ctx)
Jens Axboe6b063142019-01-10 22:13:58 -07007008{
Bijan Mottahedeh2a63b2d2021-01-15 17:37:47 +00007009 spin_unlock_bh(&ctx->rsrc_ref_lock);
7010}
7011
Bijan Mottahedehd67d2262021-01-15 17:37:46 +00007012static void io_sqe_rsrc_set_node(struct io_ring_ctx *ctx,
7013 struct fixed_rsrc_data *rsrc_data,
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007014 struct fixed_rsrc_ref_node *ref_node)
Jens Axboe6b063142019-01-10 22:13:58 -07007015{
Bijan Mottahedeh2a63b2d2021-01-15 17:37:47 +00007016 io_rsrc_ref_lock(ctx);
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007017 rsrc_data->node = ref_node;
Bijan Mottahedehd67d2262021-01-15 17:37:46 +00007018 list_add_tail(&ref_node->node, &ctx->rsrc_ref_list);
Bijan Mottahedeh2a63b2d2021-01-15 17:37:47 +00007019 io_rsrc_ref_unlock(ctx);
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007020 percpu_ref_get(&rsrc_data->refs);
Jens Axboe6b063142019-01-10 22:13:58 -07007021}
7022
Hao Xu8bad28d2021-02-19 17:19:36 +08007023static void io_sqe_rsrc_kill_node(struct io_ring_ctx *ctx, struct fixed_rsrc_data *data)
Jens Axboe6b063142019-01-10 22:13:58 -07007024{
Hao Xu8bad28d2021-02-19 17:19:36 +08007025 struct fixed_rsrc_ref_node *ref_node = NULL;
Jens Axboe65e19f52019-10-26 07:20:21 -06007026
Bijan Mottahedeh2a63b2d2021-01-15 17:37:47 +00007027 io_rsrc_ref_lock(ctx);
Pavel Begunkov1e5d7702020-11-18 14:56:25 +00007028 ref_node = data->node;
Pavel Begunkove6cb0072021-02-20 18:03:47 +00007029 data->node = NULL;
Bijan Mottahedeh2a63b2d2021-01-15 17:37:47 +00007030 io_rsrc_ref_unlock(ctx);
Xiaoguang Wang05589552020-03-31 14:05:18 +08007031 if (ref_node)
7032 percpu_ref_kill(&ref_node->refs);
Hao Xu8bad28d2021-02-19 17:19:36 +08007033}
Xiaoguang Wang05589552020-03-31 14:05:18 +08007034
Hao Xu8bad28d2021-02-19 17:19:36 +08007035static int io_rsrc_ref_quiesce(struct fixed_rsrc_data *data,
7036 struct io_ring_ctx *ctx,
Pavel Begunkovf2303b12021-02-20 18:03:49 +00007037 void (*rsrc_put)(struct io_ring_ctx *ctx,
7038 struct io_rsrc_put *prsrc))
Hao Xu8bad28d2021-02-19 17:19:36 +08007039{
Pavel Begunkovf2303b12021-02-20 18:03:49 +00007040 struct fixed_rsrc_ref_node *backup_node;
Hao Xu8bad28d2021-02-19 17:19:36 +08007041 int ret;
Xiaoguang Wang05589552020-03-31 14:05:18 +08007042
Hao Xu8bad28d2021-02-19 17:19:36 +08007043 if (data->quiesce)
7044 return -ENXIO;
7045
7046 data->quiesce = true;
Pavel Begunkov1ffc5422020-12-30 21:34:15 +00007047 do {
Pavel Begunkovf2303b12021-02-20 18:03:49 +00007048 ret = -ENOMEM;
7049 backup_node = alloc_fixed_rsrc_ref_node(ctx);
7050 if (!backup_node)
7051 break;
7052 backup_node->rsrc_data = data;
7053 backup_node->rsrc_put = rsrc_put;
7054
Hao Xu8bad28d2021-02-19 17:19:36 +08007055 io_sqe_rsrc_kill_node(ctx, data);
7056 percpu_ref_kill(&data->refs);
7057 flush_delayed_work(&ctx->rsrc_put_work);
7058
Pavel Begunkov1ffc5422020-12-30 21:34:15 +00007059 ret = wait_for_completion_interruptible(&data->done);
7060 if (!ret)
7061 break;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007062
Jens Axboecb5e1b82021-02-25 07:37:35 -07007063 percpu_ref_resurrect(&data->refs);
Hao Xu8bad28d2021-02-19 17:19:36 +08007064 io_sqe_rsrc_set_node(ctx, data, backup_node);
7065 backup_node = NULL;
Jens Axboecb5e1b82021-02-25 07:37:35 -07007066 reinit_completion(&data->done);
Hao Xu8bad28d2021-02-19 17:19:36 +08007067 mutex_unlock(&ctx->uring_lock);
7068 ret = io_run_task_work_sig();
7069 mutex_lock(&ctx->uring_lock);
Pavel Begunkovf2303b12021-02-20 18:03:49 +00007070 } while (ret >= 0);
Hao Xu8bad28d2021-02-19 17:19:36 +08007071 data->quiesce = false;
7072
7073 if (backup_node)
7074 destroy_fixed_rsrc_ref_node(backup_node);
7075 return ret;
Bijan Mottahedehd7954b22021-01-15 17:37:50 +00007076}
7077
Bijan Mottahedeh1ad555c2021-01-15 17:37:51 +00007078static struct fixed_rsrc_data *alloc_fixed_rsrc_data(struct io_ring_ctx *ctx)
7079{
7080 struct fixed_rsrc_data *data;
7081
7082 data = kzalloc(sizeof(*data), GFP_KERNEL);
7083 if (!data)
7084 return NULL;
7085
Bijan Mottahedeh00835dc2021-01-15 17:37:52 +00007086 if (percpu_ref_init(&data->refs, io_rsrc_data_ref_zero,
Bijan Mottahedeh1ad555c2021-01-15 17:37:51 +00007087 PERCPU_REF_ALLOW_REINIT, GFP_KERNEL)) {
7088 kfree(data);
7089 return NULL;
7090 }
7091 data->ctx = ctx;
7092 init_completion(&data->done);
7093 return data;
7094}
7095
7096static void free_fixed_rsrc_data(struct fixed_rsrc_data *data)
7097{
7098 percpu_ref_exit(&data->refs);
7099 kfree(data->table);
7100 kfree(data);
7101}
7102
Bijan Mottahedehd7954b22021-01-15 17:37:50 +00007103static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
7104{
7105 struct fixed_rsrc_data *data = ctx->file_data;
Bijan Mottahedehd7954b22021-01-15 17:37:50 +00007106 unsigned nr_tables, i;
7107 int ret;
7108
Hao Xu8bad28d2021-02-19 17:19:36 +08007109 /*
7110 * percpu_ref_is_dying() is to stop parallel files unregister
7111 * Since we possibly drop uring lock later in this function to
7112 * run task work.
7113 */
7114 if (!data || percpu_ref_is_dying(&data->refs))
Bijan Mottahedehd7954b22021-01-15 17:37:50 +00007115 return -ENXIO;
Pavel Begunkovf2303b12021-02-20 18:03:49 +00007116 ret = io_rsrc_ref_quiesce(data, ctx, io_ring_file_put);
Bijan Mottahedehd7954b22021-01-15 17:37:50 +00007117 if (ret)
7118 return ret;
7119
Jens Axboe6b063142019-01-10 22:13:58 -07007120 __io_sqe_files_unregister(ctx);
Jens Axboe65e19f52019-10-26 07:20:21 -06007121 nr_tables = DIV_ROUND_UP(ctx->nr_user_files, IORING_MAX_FILES_TABLE);
7122 for (i = 0; i < nr_tables; i++)
Jens Axboe05f3fb32019-12-09 11:22:50 -07007123 kfree(data->table[i].files);
Bijan Mottahedeh1ad555c2021-01-15 17:37:51 +00007124 free_fixed_rsrc_data(data);
Jens Axboe05f3fb32019-12-09 11:22:50 -07007125 ctx->file_data = NULL;
Jens Axboe6b063142019-01-10 22:13:58 -07007126 ctx->nr_user_files = 0;
7127 return 0;
7128}
7129
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007130static void io_sq_thread_unpark(struct io_sq_data *sqd)
Pavel Begunkov09a6f4e2021-03-14 20:57:10 +00007131 __releases(&sqd->lock)
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007132{
Pavel Begunkov521d6a72021-03-11 23:29:38 +00007133 WARN_ON_ONCE(sqd->thread == current);
7134
Pavel Begunkov9e138a42021-03-14 20:57:12 +00007135 /*
7136 * Do the dance but not conditional clear_bit() because it'd race with
7137 * other threads incrementing park_pending and setting the bit.
7138 */
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007139 clear_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state);
Pavel Begunkov9e138a42021-03-14 20:57:12 +00007140 if (atomic_dec_return(&sqd->park_pending))
7141 set_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state);
Pavel Begunkov09a6f4e2021-03-14 20:57:10 +00007142 mutex_unlock(&sqd->lock);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007143}
7144
Jens Axboe86e0d672021-03-05 08:44:39 -07007145static void io_sq_thread_park(struct io_sq_data *sqd)
Pavel Begunkov09a6f4e2021-03-14 20:57:10 +00007146 __acquires(&sqd->lock)
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007147{
Pavel Begunkov521d6a72021-03-11 23:29:38 +00007148 WARN_ON_ONCE(sqd->thread == current);
7149
Pavel Begunkov9e138a42021-03-14 20:57:12 +00007150 atomic_inc(&sqd->park_pending);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007151 set_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state);
Pavel Begunkov09a6f4e2021-03-14 20:57:10 +00007152 mutex_lock(&sqd->lock);
Jens Axboe05962f92021-03-06 13:58:48 -07007153 if (sqd->thread)
Jens Axboe86e0d672021-03-05 08:44:39 -07007154 wake_up_process(sqd->thread);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007155}
7156
7157static void io_sq_thread_stop(struct io_sq_data *sqd)
7158{
Pavel Begunkov521d6a72021-03-11 23:29:38 +00007159 WARN_ON_ONCE(sqd->thread == current);
7160
Pavel Begunkov09a6f4e2021-03-14 20:57:10 +00007161 mutex_lock(&sqd->lock);
Jens Axboe05962f92021-03-06 13:58:48 -07007162 set_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state);
Jens Axboee8f98f242021-03-09 16:32:13 -07007163 if (sqd->thread)
7164 wake_up_process(sqd->thread);
Pavel Begunkov09a6f4e2021-03-14 20:57:10 +00007165 mutex_unlock(&sqd->lock);
Jens Axboe05962f92021-03-06 13:58:48 -07007166 wait_for_completion(&sqd->exited);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007167}
7168
Jens Axboe534ca6d2020-09-02 13:52:19 -06007169static void io_put_sq_data(struct io_sq_data *sqd)
Jens Axboe6c271ce2019-01-10 11:22:30 -07007170{
Jens Axboe534ca6d2020-09-02 13:52:19 -06007171 if (refcount_dec_and_test(&sqd->refs)) {
Pavel Begunkov9e138a42021-03-14 20:57:12 +00007172 WARN_ON_ONCE(atomic_read(&sqd->park_pending));
7173
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007174 io_sq_thread_stop(sqd);
7175 kfree(sqd);
7176 }
7177}
7178
7179static void io_sq_thread_finish(struct io_ring_ctx *ctx)
7180{
7181 struct io_sq_data *sqd = ctx->sq_data;
7182
7183 if (sqd) {
Jens Axboe05962f92021-03-06 13:58:48 -07007184 io_sq_thread_park(sqd);
Pavel Begunkov521d6a72021-03-11 23:29:38 +00007185 list_del_init(&ctx->sqd_list);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007186 io_sqd_update_thread_idle(sqd);
Jens Axboe05962f92021-03-06 13:58:48 -07007187 io_sq_thread_unpark(sqd);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007188
7189 io_put_sq_data(sqd);
7190 ctx->sq_data = NULL;
Stefan Metzmacher7c30f36a2021-03-07 11:54:28 +01007191 if (ctx->sq_creds)
7192 put_cred(ctx->sq_creds);
Jens Axboe534ca6d2020-09-02 13:52:19 -06007193 }
7194}
7195
Jens Axboeaa061652020-09-02 14:50:27 -06007196static struct io_sq_data *io_attach_sq_data(struct io_uring_params *p)
7197{
7198 struct io_ring_ctx *ctx_attach;
7199 struct io_sq_data *sqd;
7200 struct fd f;
7201
7202 f = fdget(p->wq_fd);
7203 if (!f.file)
7204 return ERR_PTR(-ENXIO);
7205 if (f.file->f_op != &io_uring_fops) {
7206 fdput(f);
7207 return ERR_PTR(-EINVAL);
7208 }
7209
7210 ctx_attach = f.file->private_data;
7211 sqd = ctx_attach->sq_data;
7212 if (!sqd) {
7213 fdput(f);
7214 return ERR_PTR(-EINVAL);
7215 }
Jens Axboe5c2469e2021-03-11 10:17:56 -07007216 if (sqd->task_tgid != current->tgid) {
7217 fdput(f);
7218 return ERR_PTR(-EPERM);
7219 }
Jens Axboeaa061652020-09-02 14:50:27 -06007220
7221 refcount_inc(&sqd->refs);
7222 fdput(f);
7223 return sqd;
7224}
7225
Pavel Begunkov26984fb2021-03-11 23:29:37 +00007226static struct io_sq_data *io_get_sq_data(struct io_uring_params *p,
7227 bool *attached)
Jens Axboe534ca6d2020-09-02 13:52:19 -06007228{
7229 struct io_sq_data *sqd;
7230
Pavel Begunkov26984fb2021-03-11 23:29:37 +00007231 *attached = false;
Jens Axboe5c2469e2021-03-11 10:17:56 -07007232 if (p->flags & IORING_SETUP_ATTACH_WQ) {
7233 sqd = io_attach_sq_data(p);
Pavel Begunkov26984fb2021-03-11 23:29:37 +00007234 if (!IS_ERR(sqd)) {
7235 *attached = true;
Jens Axboe5c2469e2021-03-11 10:17:56 -07007236 return sqd;
Pavel Begunkov26984fb2021-03-11 23:29:37 +00007237 }
Jens Axboe5c2469e2021-03-11 10:17:56 -07007238 /* fall through for EPERM case, setup new sqd/task */
7239 if (PTR_ERR(sqd) != -EPERM)
7240 return sqd;
7241 }
Jens Axboeaa061652020-09-02 14:50:27 -06007242
Jens Axboe534ca6d2020-09-02 13:52:19 -06007243 sqd = kzalloc(sizeof(*sqd), GFP_KERNEL);
7244 if (!sqd)
7245 return ERR_PTR(-ENOMEM);
7246
Pavel Begunkov9e138a42021-03-14 20:57:12 +00007247 atomic_set(&sqd->park_pending, 0);
Jens Axboe534ca6d2020-09-02 13:52:19 -06007248 refcount_set(&sqd->refs, 1);
Jens Axboe69fb2132020-09-14 11:16:23 -06007249 INIT_LIST_HEAD(&sqd->ctx_list);
Pavel Begunkov09a6f4e2021-03-14 20:57:10 +00007250 mutex_init(&sqd->lock);
Jens Axboe534ca6d2020-09-02 13:52:19 -06007251 init_waitqueue_head(&sqd->wait);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007252 init_completion(&sqd->exited);
Jens Axboe534ca6d2020-09-02 13:52:19 -06007253 return sqd;
7254}
7255
Jens Axboe6b063142019-01-10 22:13:58 -07007256#if defined(CONFIG_UNIX)
Jens Axboe6b063142019-01-10 22:13:58 -07007257/*
7258 * Ensure the UNIX gc is aware of our file set, so we are certain that
7259 * the io_uring can be safely unregistered on process exit, even if we have
7260 * loops in the file referencing.
7261 */
7262static int __io_sqe_files_scm(struct io_ring_ctx *ctx, int nr, int offset)
7263{
7264 struct sock *sk = ctx->ring_sock->sk;
7265 struct scm_fp_list *fpl;
7266 struct sk_buff *skb;
Jens Axboe08a45172019-10-03 08:11:03 -06007267 int i, nr_files;
Jens Axboe6b063142019-01-10 22:13:58 -07007268
Jens Axboe6b063142019-01-10 22:13:58 -07007269 fpl = kzalloc(sizeof(*fpl), GFP_KERNEL);
7270 if (!fpl)
7271 return -ENOMEM;
7272
7273 skb = alloc_skb(0, GFP_KERNEL);
7274 if (!skb) {
7275 kfree(fpl);
7276 return -ENOMEM;
7277 }
7278
7279 skb->sk = sk;
Jens Axboe6b063142019-01-10 22:13:58 -07007280
Jens Axboe08a45172019-10-03 08:11:03 -06007281 nr_files = 0;
Jens Axboe62e398b2021-02-21 16:19:37 -07007282 fpl->user = get_uid(current_user());
Jens Axboe6b063142019-01-10 22:13:58 -07007283 for (i = 0; i < nr; i++) {
Jens Axboe65e19f52019-10-26 07:20:21 -06007284 struct file *file = io_file_from_index(ctx, i + offset);
7285
7286 if (!file)
Jens Axboe08a45172019-10-03 08:11:03 -06007287 continue;
Jens Axboe65e19f52019-10-26 07:20:21 -06007288 fpl->fp[nr_files] = get_file(file);
Jens Axboe08a45172019-10-03 08:11:03 -06007289 unix_inflight(fpl->user, fpl->fp[nr_files]);
7290 nr_files++;
Jens Axboe6b063142019-01-10 22:13:58 -07007291 }
7292
Jens Axboe08a45172019-10-03 08:11:03 -06007293 if (nr_files) {
7294 fpl->max = SCM_MAX_FD;
7295 fpl->count = nr_files;
7296 UNIXCB(skb).fp = fpl;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007297 skb->destructor = unix_destruct_scm;
Jens Axboe08a45172019-10-03 08:11:03 -06007298 refcount_add(skb->truesize, &sk->sk_wmem_alloc);
7299 skb_queue_head(&sk->sk_receive_queue, skb);
Jens Axboe6b063142019-01-10 22:13:58 -07007300
Jens Axboe08a45172019-10-03 08:11:03 -06007301 for (i = 0; i < nr_files; i++)
7302 fput(fpl->fp[i]);
7303 } else {
7304 kfree_skb(skb);
7305 kfree(fpl);
7306 }
Jens Axboe6b063142019-01-10 22:13:58 -07007307
7308 return 0;
7309}
7310
7311/*
7312 * If UNIX sockets are enabled, fd passing can cause a reference cycle which
7313 * causes regular reference counting to break down. We rely on the UNIX
7314 * garbage collection to take care of this problem for us.
7315 */
7316static int io_sqe_files_scm(struct io_ring_ctx *ctx)
7317{
7318 unsigned left, total;
7319 int ret = 0;
7320
7321 total = 0;
7322 left = ctx->nr_user_files;
7323 while (left) {
7324 unsigned this_files = min_t(unsigned, left, SCM_MAX_FD);
Jens Axboe6b063142019-01-10 22:13:58 -07007325
7326 ret = __io_sqe_files_scm(ctx, this_files, total);
7327 if (ret)
7328 break;
7329 left -= this_files;
7330 total += this_files;
7331 }
7332
7333 if (!ret)
7334 return 0;
7335
7336 while (total < ctx->nr_user_files) {
Jens Axboe65e19f52019-10-26 07:20:21 -06007337 struct file *file = io_file_from_index(ctx, total);
7338
7339 if (file)
7340 fput(file);
Jens Axboe6b063142019-01-10 22:13:58 -07007341 total++;
7342 }
7343
7344 return ret;
7345}
7346#else
7347static int io_sqe_files_scm(struct io_ring_ctx *ctx)
7348{
7349 return 0;
7350}
7351#endif
7352
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007353static int io_sqe_alloc_file_tables(struct fixed_rsrc_data *file_data,
Pavel Begunkov5398ae62020-10-10 18:34:14 +01007354 unsigned nr_tables, unsigned nr_files)
Jens Axboe65e19f52019-10-26 07:20:21 -06007355{
7356 int i;
7357
7358 for (i = 0; i < nr_tables; i++) {
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007359 struct fixed_rsrc_table *table = &file_data->table[i];
Jens Axboe65e19f52019-10-26 07:20:21 -06007360 unsigned this_files;
7361
7362 this_files = min(nr_files, IORING_MAX_FILES_TABLE);
7363 table->files = kcalloc(this_files, sizeof(struct file *),
7364 GFP_KERNEL);
7365 if (!table->files)
7366 break;
7367 nr_files -= this_files;
7368 }
7369
7370 if (i == nr_tables)
7371 return 0;
7372
7373 for (i = 0; i < nr_tables; i++) {
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007374 struct fixed_rsrc_table *table = &file_data->table[i];
Jens Axboe65e19f52019-10-26 07:20:21 -06007375 kfree(table->files);
7376 }
7377 return 1;
7378}
7379
Bijan Mottahedeh50238532021-01-15 17:37:45 +00007380static void io_ring_file_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc)
Jens Axboec3a31e62019-10-03 13:59:56 -06007381{
Bijan Mottahedeh50238532021-01-15 17:37:45 +00007382 struct file *file = prsrc->file;
Jens Axboec3a31e62019-10-03 13:59:56 -06007383#if defined(CONFIG_UNIX)
Jens Axboec3a31e62019-10-03 13:59:56 -06007384 struct sock *sock = ctx->ring_sock->sk;
7385 struct sk_buff_head list, *head = &sock->sk_receive_queue;
7386 struct sk_buff *skb;
7387 int i;
7388
7389 __skb_queue_head_init(&list);
7390
7391 /*
7392 * Find the skb that holds this file in its SCM_RIGHTS. When found,
7393 * remove this entry and rearrange the file array.
7394 */
7395 skb = skb_dequeue(head);
7396 while (skb) {
7397 struct scm_fp_list *fp;
7398
7399 fp = UNIXCB(skb).fp;
7400 for (i = 0; i < fp->count; i++) {
7401 int left;
7402
7403 if (fp->fp[i] != file)
7404 continue;
7405
7406 unix_notinflight(fp->user, fp->fp[i]);
7407 left = fp->count - 1 - i;
7408 if (left) {
7409 memmove(&fp->fp[i], &fp->fp[i + 1],
7410 left * sizeof(struct file *));
7411 }
7412 fp->count--;
7413 if (!fp->count) {
7414 kfree_skb(skb);
7415 skb = NULL;
7416 } else {
7417 __skb_queue_tail(&list, skb);
7418 }
7419 fput(file);
7420 file = NULL;
7421 break;
7422 }
7423
7424 if (!file)
7425 break;
7426
7427 __skb_queue_tail(&list, skb);
7428
7429 skb = skb_dequeue(head);
7430 }
7431
7432 if (skb_peek(&list)) {
7433 spin_lock_irq(&head->lock);
7434 while ((skb = __skb_dequeue(&list)) != NULL)
7435 __skb_queue_tail(head, skb);
7436 spin_unlock_irq(&head->lock);
7437 }
7438#else
Jens Axboe05f3fb32019-12-09 11:22:50 -07007439 fput(file);
Jens Axboec3a31e62019-10-03 13:59:56 -06007440#endif
7441}
7442
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007443static void __io_rsrc_put_work(struct fixed_rsrc_ref_node *ref_node)
Jens Axboe05f3fb32019-12-09 11:22:50 -07007444{
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007445 struct fixed_rsrc_data *rsrc_data = ref_node->rsrc_data;
7446 struct io_ring_ctx *ctx = rsrc_data->ctx;
7447 struct io_rsrc_put *prsrc, *tmp;
Xiaoguang Wang05589552020-03-31 14:05:18 +08007448
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007449 list_for_each_entry_safe(prsrc, tmp, &ref_node->rsrc_list, list) {
7450 list_del(&prsrc->list);
Bijan Mottahedeh50238532021-01-15 17:37:45 +00007451 ref_node->rsrc_put(ctx, prsrc);
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007452 kfree(prsrc);
Jens Axboe05f3fb32019-12-09 11:22:50 -07007453 }
7454
Xiaoguang Wang05589552020-03-31 14:05:18 +08007455 percpu_ref_exit(&ref_node->refs);
7456 kfree(ref_node);
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007457 percpu_ref_put(&rsrc_data->refs);
Jens Axboe05f3fb32019-12-09 11:22:50 -07007458}
7459
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007460static void io_rsrc_put_work(struct work_struct *work)
Jens Axboe4a38aed22020-05-14 17:21:15 -06007461{
7462 struct io_ring_ctx *ctx;
7463 struct llist_node *node;
7464
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007465 ctx = container_of(work, struct io_ring_ctx, rsrc_put_work.work);
7466 node = llist_del_all(&ctx->rsrc_put_llist);
Jens Axboe4a38aed22020-05-14 17:21:15 -06007467
7468 while (node) {
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007469 struct fixed_rsrc_ref_node *ref_node;
Jens Axboe4a38aed22020-05-14 17:21:15 -06007470 struct llist_node *next = node->next;
7471
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007472 ref_node = llist_entry(node, struct fixed_rsrc_ref_node, llist);
7473 __io_rsrc_put_work(ref_node);
Jens Axboe4a38aed22020-05-14 17:21:15 -06007474 node = next;
7475 }
7476}
7477
Bijan Mottahedeh00835dc2021-01-15 17:37:52 +00007478static void io_rsrc_node_ref_zero(struct percpu_ref *ref)
Jens Axboe05f3fb32019-12-09 11:22:50 -07007479{
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007480 struct fixed_rsrc_ref_node *ref_node;
7481 struct fixed_rsrc_data *data;
Jens Axboe4a38aed22020-05-14 17:21:15 -06007482 struct io_ring_ctx *ctx;
Pavel Begunkove2978222020-11-18 14:56:26 +00007483 bool first_add = false;
Jens Axboe4a38aed22020-05-14 17:21:15 -06007484 int delay = HZ;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007485
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007486 ref_node = container_of(ref, struct fixed_rsrc_ref_node, refs);
7487 data = ref_node->rsrc_data;
Pavel Begunkove2978222020-11-18 14:56:26 +00007488 ctx = data->ctx;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007489
Bijan Mottahedeh2a63b2d2021-01-15 17:37:47 +00007490 io_rsrc_ref_lock(ctx);
Pavel Begunkove2978222020-11-18 14:56:26 +00007491 ref_node->done = true;
7492
Bijan Mottahedehd67d2262021-01-15 17:37:46 +00007493 while (!list_empty(&ctx->rsrc_ref_list)) {
7494 ref_node = list_first_entry(&ctx->rsrc_ref_list,
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007495 struct fixed_rsrc_ref_node, node);
Pavel Begunkove2978222020-11-18 14:56:26 +00007496 /* recycle ref nodes in order */
7497 if (!ref_node->done)
7498 break;
7499 list_del(&ref_node->node);
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007500 first_add |= llist_add(&ref_node->llist, &ctx->rsrc_put_llist);
Pavel Begunkove2978222020-11-18 14:56:26 +00007501 }
Bijan Mottahedeh2a63b2d2021-01-15 17:37:47 +00007502 io_rsrc_ref_unlock(ctx);
Pavel Begunkove2978222020-11-18 14:56:26 +00007503
7504 if (percpu_ref_is_dying(&data->refs))
Jens Axboe4a38aed22020-05-14 17:21:15 -06007505 delay = 0;
7506
Jens Axboe4a38aed22020-05-14 17:21:15 -06007507 if (!delay)
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007508 mod_delayed_work(system_wq, &ctx->rsrc_put_work, 0);
Jens Axboe4a38aed22020-05-14 17:21:15 -06007509 else if (first_add)
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007510 queue_delayed_work(system_wq, &ctx->rsrc_put_work, delay);
Xiaoguang Wang05589552020-03-31 14:05:18 +08007511}
7512
Bijan Mottahedeh68025352021-01-15 17:37:48 +00007513static struct fixed_rsrc_ref_node *alloc_fixed_rsrc_ref_node(
Xiaoguang Wang05589552020-03-31 14:05:18 +08007514 struct io_ring_ctx *ctx)
7515{
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007516 struct fixed_rsrc_ref_node *ref_node;
Xiaoguang Wang05589552020-03-31 14:05:18 +08007517
7518 ref_node = kzalloc(sizeof(*ref_node), GFP_KERNEL);
7519 if (!ref_node)
Matthew Wilcox (Oracle)3e2224c2021-01-06 16:09:26 +00007520 return NULL;
Xiaoguang Wang05589552020-03-31 14:05:18 +08007521
Bijan Mottahedeh00835dc2021-01-15 17:37:52 +00007522 if (percpu_ref_init(&ref_node->refs, io_rsrc_node_ref_zero,
Xiaoguang Wang05589552020-03-31 14:05:18 +08007523 0, GFP_KERNEL)) {
7524 kfree(ref_node);
Matthew Wilcox (Oracle)3e2224c2021-01-06 16:09:26 +00007525 return NULL;
Xiaoguang Wang05589552020-03-31 14:05:18 +08007526 }
7527 INIT_LIST_HEAD(&ref_node->node);
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007528 INIT_LIST_HEAD(&ref_node->rsrc_list);
Pavel Begunkove2978222020-11-18 14:56:26 +00007529 ref_node->done = false;
Xiaoguang Wang05589552020-03-31 14:05:18 +08007530 return ref_node;
Xiaoguang Wang05589552020-03-31 14:05:18 +08007531}
7532
Pavel Begunkovbc9744c2021-01-15 17:37:49 +00007533static void init_fixed_file_ref_node(struct io_ring_ctx *ctx,
7534 struct fixed_rsrc_ref_node *ref_node)
Bijan Mottahedeh68025352021-01-15 17:37:48 +00007535{
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007536 ref_node->rsrc_data = ctx->file_data;
Bijan Mottahedeh50238532021-01-15 17:37:45 +00007537 ref_node->rsrc_put = io_ring_file_put;
Xiaoguang Wang05589552020-03-31 14:05:18 +08007538}
7539
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007540static void destroy_fixed_rsrc_ref_node(struct fixed_rsrc_ref_node *ref_node)
Xiaoguang Wang05589552020-03-31 14:05:18 +08007541{
7542 percpu_ref_exit(&ref_node->refs);
7543 kfree(ref_node);
Jens Axboe05f3fb32019-12-09 11:22:50 -07007544}
7545
Pavel Begunkovea64ec022021-02-04 13:52:07 +00007546
Jens Axboe05f3fb32019-12-09 11:22:50 -07007547static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
7548 unsigned nr_args)
7549{
7550 __s32 __user *fds = (__s32 __user *) arg;
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01007551 unsigned nr_tables, i;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007552 struct file *file;
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01007553 int fd, ret = -ENOMEM;
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007554 struct fixed_rsrc_ref_node *ref_node;
7555 struct fixed_rsrc_data *file_data;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007556
7557 if (ctx->file_data)
7558 return -EBUSY;
7559 if (!nr_args)
7560 return -EINVAL;
7561 if (nr_args > IORING_MAX_FIXED_FILES)
7562 return -EMFILE;
7563
Bijan Mottahedeh1ad555c2021-01-15 17:37:51 +00007564 file_data = alloc_fixed_rsrc_data(ctx);
Pavel Begunkov5398ae62020-10-10 18:34:14 +01007565 if (!file_data)
Jens Axboe05f3fb32019-12-09 11:22:50 -07007566 return -ENOMEM;
Dan Carpenter13770a72021-02-01 15:23:42 +03007567 ctx->file_data = file_data;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007568
7569 nr_tables = DIV_ROUND_UP(nr_args, IORING_MAX_FILES_TABLE);
Colin Ian King035fbaf2020-10-12 15:03:41 +01007570 file_data->table = kcalloc(nr_tables, sizeof(*file_data->table),
Pavel Begunkov5398ae62020-10-10 18:34:14 +01007571 GFP_KERNEL);
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01007572 if (!file_data->table)
7573 goto out_free;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007574
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01007575 if (io_sqe_alloc_file_tables(file_data, nr_tables, nr_args))
Jens Axboe05f3fb32019-12-09 11:22:50 -07007576 goto out_free;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007577
Jens Axboe05f3fb32019-12-09 11:22:50 -07007578 for (i = 0; i < nr_args; i++, ctx->nr_user_files++) {
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01007579 if (copy_from_user(&fd, &fds[i], sizeof(fd))) {
7580 ret = -EFAULT;
7581 goto out_fput;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007582 }
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01007583 /* allow sparse sets */
7584 if (fd == -1)
7585 continue;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007586
Jens Axboe05f3fb32019-12-09 11:22:50 -07007587 file = fget(fd);
Jens Axboe05f3fb32019-12-09 11:22:50 -07007588 ret = -EBADF;
7589 if (!file)
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01007590 goto out_fput;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007591
7592 /*
7593 * Don't allow io_uring instances to be registered. If UNIX
7594 * isn't enabled, then this causes a reference cycle and this
7595 * instance can never get freed. If UNIX is enabled we'll
7596 * handle it just fine, but there's still no point in allowing
7597 * a ring fd as it doesn't support regular read/write anyway.
7598 */
7599 if (file->f_op == &io_uring_fops) {
7600 fput(file);
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01007601 goto out_fput;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007602 }
Pavel Begunkovea64ec022021-02-04 13:52:07 +00007603 *io_fixed_file_slot(file_data, i) = file;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007604 }
7605
Jens Axboe05f3fb32019-12-09 11:22:50 -07007606 ret = io_sqe_files_scm(ctx);
Xiaoguang Wang05589552020-03-31 14:05:18 +08007607 if (ret) {
Jens Axboe05f3fb32019-12-09 11:22:50 -07007608 io_sqe_files_unregister(ctx);
Xiaoguang Wang05589552020-03-31 14:05:18 +08007609 return ret;
7610 }
Jens Axboe05f3fb32019-12-09 11:22:50 -07007611
Pavel Begunkovbc9744c2021-01-15 17:37:49 +00007612 ref_node = alloc_fixed_rsrc_ref_node(ctx);
Matthew Wilcox (Oracle)3e2224c2021-01-06 16:09:26 +00007613 if (!ref_node) {
Xiaoguang Wang05589552020-03-31 14:05:18 +08007614 io_sqe_files_unregister(ctx);
Matthew Wilcox (Oracle)3e2224c2021-01-06 16:09:26 +00007615 return -ENOMEM;
Xiaoguang Wang05589552020-03-31 14:05:18 +08007616 }
Pavel Begunkovbc9744c2021-01-15 17:37:49 +00007617 init_fixed_file_ref_node(ctx, ref_node);
Xiaoguang Wang05589552020-03-31 14:05:18 +08007618
Bijan Mottahedehd67d2262021-01-15 17:37:46 +00007619 io_sqe_rsrc_set_node(ctx, file_data, ref_node);
Jens Axboe05f3fb32019-12-09 11:22:50 -07007620 return ret;
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01007621out_fput:
7622 for (i = 0; i < ctx->nr_user_files; i++) {
7623 file = io_file_from_index(ctx, i);
7624 if (file)
7625 fput(file);
7626 }
7627 for (i = 0; i < nr_tables; i++)
7628 kfree(file_data->table[i].files);
7629 ctx->nr_user_files = 0;
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01007630out_free:
Bijan Mottahedeh1ad555c2021-01-15 17:37:51 +00007631 free_fixed_rsrc_data(ctx->file_data);
Jens Axboe55cbc252020-10-14 07:35:57 -06007632 ctx->file_data = NULL;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007633 return ret;
7634}
7635
Jens Axboec3a31e62019-10-03 13:59:56 -06007636static int io_sqe_file_register(struct io_ring_ctx *ctx, struct file *file,
7637 int index)
7638{
7639#if defined(CONFIG_UNIX)
7640 struct sock *sock = ctx->ring_sock->sk;
7641 struct sk_buff_head *head = &sock->sk_receive_queue;
7642 struct sk_buff *skb;
7643
7644 /*
7645 * See if we can merge this file into an existing skb SCM_RIGHTS
7646 * file set. If there's no room, fall back to allocating a new skb
7647 * and filling it in.
7648 */
7649 spin_lock_irq(&head->lock);
7650 skb = skb_peek(head);
7651 if (skb) {
7652 struct scm_fp_list *fpl = UNIXCB(skb).fp;
7653
7654 if (fpl->count < SCM_MAX_FD) {
7655 __skb_unlink(skb, head);
7656 spin_unlock_irq(&head->lock);
7657 fpl->fp[fpl->count] = get_file(file);
7658 unix_inflight(fpl->user, fpl->fp[fpl->count]);
7659 fpl->count++;
7660 spin_lock_irq(&head->lock);
7661 __skb_queue_head(head, skb);
7662 } else {
7663 skb = NULL;
7664 }
7665 }
7666 spin_unlock_irq(&head->lock);
7667
7668 if (skb) {
7669 fput(file);
7670 return 0;
7671 }
7672
7673 return __io_sqe_files_scm(ctx, 1, index);
7674#else
7675 return 0;
7676#endif
7677}
7678
Bijan Mottahedeh50238532021-01-15 17:37:45 +00007679static int io_queue_rsrc_removal(struct fixed_rsrc_data *data, void *rsrc)
Jens Axboe05f3fb32019-12-09 11:22:50 -07007680{
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007681 struct io_rsrc_put *prsrc;
7682 struct fixed_rsrc_ref_node *ref_node = data->node;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007683
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007684 prsrc = kzalloc(sizeof(*prsrc), GFP_KERNEL);
7685 if (!prsrc)
Hillf Dantona5318d32020-03-23 17:47:15 +08007686 return -ENOMEM;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007687
Bijan Mottahedeh50238532021-01-15 17:37:45 +00007688 prsrc->rsrc = rsrc;
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007689 list_add(&prsrc->list, &ref_node->rsrc_list);
Xiaoguang Wang05589552020-03-31 14:05:18 +08007690
Hillf Dantona5318d32020-03-23 17:47:15 +08007691 return 0;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007692}
7693
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007694static inline int io_queue_file_removal(struct fixed_rsrc_data *data,
7695 struct file *file)
7696{
Bijan Mottahedeh50238532021-01-15 17:37:45 +00007697 return io_queue_rsrc_removal(data, (void *)file);
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007698}
7699
Jens Axboe05f3fb32019-12-09 11:22:50 -07007700static int __io_sqe_files_update(struct io_ring_ctx *ctx,
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007701 struct io_uring_rsrc_update *up,
Jens Axboe05f3fb32019-12-09 11:22:50 -07007702 unsigned nr_args)
7703{
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007704 struct fixed_rsrc_data *data = ctx->file_data;
7705 struct fixed_rsrc_ref_node *ref_node;
Pavel Begunkovea64ec022021-02-04 13:52:07 +00007706 struct file *file, **file_slot;
Jens Axboec3a31e62019-10-03 13:59:56 -06007707 __s32 __user *fds;
7708 int fd, i, err;
7709 __u32 done;
Xiaoguang Wang05589552020-03-31 14:05:18 +08007710 bool needs_switch = false;
Jens Axboec3a31e62019-10-03 13:59:56 -06007711
Jens Axboe05f3fb32019-12-09 11:22:50 -07007712 if (check_add_overflow(up->offset, nr_args, &done))
Jens Axboec3a31e62019-10-03 13:59:56 -06007713 return -EOVERFLOW;
7714 if (done > ctx->nr_user_files)
7715 return -EINVAL;
7716
Pavel Begunkovbc9744c2021-01-15 17:37:49 +00007717 ref_node = alloc_fixed_rsrc_ref_node(ctx);
Matthew Wilcox (Oracle)3e2224c2021-01-06 16:09:26 +00007718 if (!ref_node)
7719 return -ENOMEM;
Pavel Begunkovbc9744c2021-01-15 17:37:49 +00007720 init_fixed_file_ref_node(ctx, ref_node);
Xiaoguang Wang05589552020-03-31 14:05:18 +08007721
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007722 fds = u64_to_user_ptr(up->data);
Pavel Begunkov67973b92021-01-26 13:51:09 +00007723 for (done = 0; done < nr_args; done++) {
Jens Axboec3a31e62019-10-03 13:59:56 -06007724 err = 0;
7725 if (copy_from_user(&fd, &fds[done], sizeof(fd))) {
7726 err = -EFAULT;
7727 break;
7728 }
noah4e0377a2021-01-26 15:23:28 -05007729 if (fd == IORING_REGISTER_FILES_SKIP)
7730 continue;
7731
Pavel Begunkov67973b92021-01-26 13:51:09 +00007732 i = array_index_nospec(up->offset + done, ctx->nr_user_files);
Pavel Begunkovea64ec022021-02-04 13:52:07 +00007733 file_slot = io_fixed_file_slot(ctx->file_data, i);
7734
7735 if (*file_slot) {
7736 err = io_queue_file_removal(data, *file_slot);
Hillf Dantona5318d32020-03-23 17:47:15 +08007737 if (err)
7738 break;
Pavel Begunkovea64ec022021-02-04 13:52:07 +00007739 *file_slot = NULL;
Xiaoguang Wang05589552020-03-31 14:05:18 +08007740 needs_switch = true;
Jens Axboec3a31e62019-10-03 13:59:56 -06007741 }
7742 if (fd != -1) {
Jens Axboec3a31e62019-10-03 13:59:56 -06007743 file = fget(fd);
7744 if (!file) {
7745 err = -EBADF;
7746 break;
7747 }
7748 /*
7749 * Don't allow io_uring instances to be registered. If
7750 * UNIX isn't enabled, then this causes a reference
7751 * cycle and this instance can never get freed. If UNIX
7752 * is enabled we'll handle it just fine, but there's
7753 * still no point in allowing a ring fd as it doesn't
7754 * support regular read/write anyway.
7755 */
7756 if (file->f_op == &io_uring_fops) {
7757 fput(file);
7758 err = -EBADF;
7759 break;
7760 }
Jens Axboee68a3ff2021-02-11 07:45:08 -07007761 *file_slot = file;
Jens Axboec3a31e62019-10-03 13:59:56 -06007762 err = io_sqe_file_register(ctx, file, i);
Yang Yingliangf3bd9da2020-07-09 10:11:41 +00007763 if (err) {
Jens Axboee68a3ff2021-02-11 07:45:08 -07007764 *file_slot = NULL;
Yang Yingliangf3bd9da2020-07-09 10:11:41 +00007765 fput(file);
Jens Axboec3a31e62019-10-03 13:59:56 -06007766 break;
Yang Yingliangf3bd9da2020-07-09 10:11:41 +00007767 }
Jens Axboec3a31e62019-10-03 13:59:56 -06007768 }
Jens Axboe05f3fb32019-12-09 11:22:50 -07007769 }
7770
Xiaoguang Wang05589552020-03-31 14:05:18 +08007771 if (needs_switch) {
Pavel Begunkovb2e96852020-10-10 18:34:16 +01007772 percpu_ref_kill(&data->node->refs);
Bijan Mottahedehd67d2262021-01-15 17:37:46 +00007773 io_sqe_rsrc_set_node(ctx, data, ref_node);
Xiaoguang Wang05589552020-03-31 14:05:18 +08007774 } else
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007775 destroy_fixed_rsrc_ref_node(ref_node);
Jens Axboec3a31e62019-10-03 13:59:56 -06007776
7777 return done ? done : err;
7778}
Xiaoguang Wang05589552020-03-31 14:05:18 +08007779
Jens Axboe05f3fb32019-12-09 11:22:50 -07007780static int io_sqe_files_update(struct io_ring_ctx *ctx, void __user *arg,
7781 unsigned nr_args)
7782{
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007783 struct io_uring_rsrc_update up;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007784
7785 if (!ctx->file_data)
7786 return -ENXIO;
7787 if (!nr_args)
7788 return -EINVAL;
7789 if (copy_from_user(&up, arg, sizeof(up)))
7790 return -EFAULT;
7791 if (up.resv)
7792 return -EINVAL;
7793
7794 return __io_sqe_files_update(ctx, &up, nr_args);
7795}
Jens Axboec3a31e62019-10-03 13:59:56 -06007796
Pavel Begunkov5280f7e2021-02-04 13:52:08 +00007797static struct io_wq_work *io_free_work(struct io_wq_work *work)
Jens Axboe7d723062019-11-12 22:31:31 -07007798{
7799 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
7800
Pavel Begunkov5280f7e2021-02-04 13:52:08 +00007801 req = io_put_req_find_next(req);
7802 return req ? &req->work : NULL;
Jens Axboe7d723062019-11-12 22:31:31 -07007803}
7804
Jens Axboe5aa75ed2021-02-16 12:56:50 -07007805static struct io_wq *io_init_wq_offload(struct io_ring_ctx *ctx)
Pavel Begunkov24369c22020-01-28 03:15:48 +03007806{
Jens Axboee9418942021-02-19 12:33:30 -07007807 struct io_wq_hash *hash;
Pavel Begunkov24369c22020-01-28 03:15:48 +03007808 struct io_wq_data data;
Pavel Begunkov24369c22020-01-28 03:15:48 +03007809 unsigned int concurrency;
Pavel Begunkov24369c22020-01-28 03:15:48 +03007810
Jens Axboee9418942021-02-19 12:33:30 -07007811 hash = ctx->hash_map;
7812 if (!hash) {
7813 hash = kzalloc(sizeof(*hash), GFP_KERNEL);
7814 if (!hash)
7815 return ERR_PTR(-ENOMEM);
7816 refcount_set(&hash->refs, 1);
7817 init_waitqueue_head(&hash->wait);
7818 ctx->hash_map = hash;
7819 }
7820
7821 data.hash = hash;
Pavel Begunkove9fd9392020-03-04 16:14:12 +03007822 data.free_work = io_free_work;
Pavel Begunkovf5fa38c2020-06-08 21:08:20 +03007823 data.do_work = io_wq_submit_work;
Pavel Begunkov24369c22020-01-28 03:15:48 +03007824
Jens Axboed25e3a32021-02-16 11:41:41 -07007825 /* Do QD, or 4 * CPUS, whatever is smallest */
7826 concurrency = min(ctx->sq_entries, 4 * num_online_cpus());
Pavel Begunkov24369c22020-01-28 03:15:48 +03007827
Jens Axboe5aa75ed2021-02-16 12:56:50 -07007828 return io_wq_create(concurrency, &data);
Pavel Begunkov24369c22020-01-28 03:15:48 +03007829}
7830
Jens Axboe5aa75ed2021-02-16 12:56:50 -07007831static int io_uring_alloc_task_context(struct task_struct *task,
7832 struct io_ring_ctx *ctx)
Jens Axboe0f212202020-09-13 13:09:39 -06007833{
7834 struct io_uring_task *tctx;
Jens Axboed8a6df12020-10-15 16:24:45 -06007835 int ret;
Jens Axboe0f212202020-09-13 13:09:39 -06007836
7837 tctx = kmalloc(sizeof(*tctx), GFP_KERNEL);
7838 if (unlikely(!tctx))
7839 return -ENOMEM;
7840
Jens Axboed8a6df12020-10-15 16:24:45 -06007841 ret = percpu_counter_init(&tctx->inflight, 0, GFP_KERNEL);
7842 if (unlikely(ret)) {
7843 kfree(tctx);
7844 return ret;
7845 }
7846
Jens Axboe5aa75ed2021-02-16 12:56:50 -07007847 tctx->io_wq = io_init_wq_offload(ctx);
7848 if (IS_ERR(tctx->io_wq)) {
7849 ret = PTR_ERR(tctx->io_wq);
7850 percpu_counter_destroy(&tctx->inflight);
7851 kfree(tctx);
7852 return ret;
7853 }
7854
Jens Axboe0f212202020-09-13 13:09:39 -06007855 xa_init(&tctx->xa);
7856 init_waitqueue_head(&tctx->wait);
7857 tctx->last = NULL;
Jens Axboefdaf0832020-10-30 09:37:30 -06007858 atomic_set(&tctx->in_idle, 0);
Jens Axboe0f212202020-09-13 13:09:39 -06007859 task->io_uring = tctx;
Jens Axboe7cbf1722021-02-10 00:03:20 +00007860 spin_lock_init(&tctx->task_lock);
7861 INIT_WQ_LIST(&tctx->task_list);
7862 tctx->task_state = 0;
7863 init_task_work(&tctx->task_work, tctx_task_work);
Jens Axboe0f212202020-09-13 13:09:39 -06007864 return 0;
7865}
7866
7867void __io_uring_free(struct task_struct *tsk)
7868{
7869 struct io_uring_task *tctx = tsk->io_uring;
7870
7871 WARN_ON_ONCE(!xa_empty(&tctx->xa));
Pavel Begunkovef8eaa42021-02-27 11:16:45 +00007872 WARN_ON_ONCE(tctx->io_wq);
7873
Jens Axboed8a6df12020-10-15 16:24:45 -06007874 percpu_counter_destroy(&tctx->inflight);
Jens Axboe0f212202020-09-13 13:09:39 -06007875 kfree(tctx);
7876 tsk->io_uring = NULL;
7877}
7878
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02007879static int io_sq_offload_create(struct io_ring_ctx *ctx,
7880 struct io_uring_params *p)
Jens Axboe2b188cc2019-01-07 10:46:33 -07007881{
7882 int ret;
7883
Jens Axboed25e3a32021-02-16 11:41:41 -07007884 /* Retain compatibility with failing for an invalid attach attempt */
7885 if ((ctx->flags & (IORING_SETUP_ATTACH_WQ | IORING_SETUP_SQPOLL)) ==
7886 IORING_SETUP_ATTACH_WQ) {
7887 struct fd f;
7888
7889 f = fdget(p->wq_fd);
7890 if (!f.file)
7891 return -ENXIO;
7892 if (f.file->f_op != &io_uring_fops) {
7893 fdput(f);
7894 return -EINVAL;
7895 }
7896 fdput(f);
7897 }
Jens Axboe6c271ce2019-01-10 11:22:30 -07007898 if (ctx->flags & IORING_SETUP_SQPOLL) {
Jens Axboe46fe18b2021-03-04 12:39:36 -07007899 struct task_struct *tsk;
Jens Axboe534ca6d2020-09-02 13:52:19 -06007900 struct io_sq_data *sqd;
Pavel Begunkov26984fb2021-03-11 23:29:37 +00007901 bool attached;
Jens Axboe534ca6d2020-09-02 13:52:19 -06007902
Jens Axboe3ec482d2019-04-08 10:51:01 -06007903 ret = -EPERM;
Jens Axboece59fc62020-09-02 13:28:09 -06007904 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_NICE))
Jens Axboe3ec482d2019-04-08 10:51:01 -06007905 goto err;
7906
Pavel Begunkov26984fb2021-03-11 23:29:37 +00007907 sqd = io_get_sq_data(p, &attached);
Jens Axboe534ca6d2020-09-02 13:52:19 -06007908 if (IS_ERR(sqd)) {
7909 ret = PTR_ERR(sqd);
7910 goto err;
7911 }
Jens Axboe69fb2132020-09-14 11:16:23 -06007912
Stefan Metzmacher7c30f36a2021-03-07 11:54:28 +01007913 ctx->sq_creds = get_current_cred();
Jens Axboe534ca6d2020-09-02 13:52:19 -06007914 ctx->sq_data = sqd;
Jens Axboe917257d2019-04-13 09:28:55 -06007915 ctx->sq_thread_idle = msecs_to_jiffies(p->sq_thread_idle);
7916 if (!ctx->sq_thread_idle)
7917 ctx->sq_thread_idle = HZ;
7918
Pavel Begunkov26984fb2021-03-11 23:29:37 +00007919 ret = 0;
Pavel Begunkov78d7f6b2021-03-10 13:13:53 +00007920 io_sq_thread_park(sqd);
Pavel Begunkovde75a3d2021-03-18 11:54:35 +00007921 list_add(&ctx->sqd_list, &sqd->ctx_list);
7922 io_sqd_update_thread_idle(sqd);
Pavel Begunkov26984fb2021-03-11 23:29:37 +00007923 /* don't attach to a dying SQPOLL thread, would be racy */
Pavel Begunkovde75a3d2021-03-18 11:54:35 +00007924 if (attached && !sqd->thread)
Pavel Begunkov26984fb2021-03-11 23:29:37 +00007925 ret = -ENXIO;
Pavel Begunkov78d7f6b2021-03-10 13:13:53 +00007926 io_sq_thread_unpark(sqd);
7927
Pavel Begunkovde75a3d2021-03-18 11:54:35 +00007928 if (ret < 0)
7929 goto err;
7930 if (attached)
Jens Axboe5aa75ed2021-02-16 12:56:50 -07007931 return 0;
Jens Axboeaa061652020-09-02 14:50:27 -06007932
Jens Axboe6c271ce2019-01-10 11:22:30 -07007933 if (p->flags & IORING_SETUP_SQ_AFF) {
Jens Axboe44a9bd12019-05-14 20:00:30 -06007934 int cpu = p->sq_thread_cpu;
Jens Axboe6c271ce2019-01-10 11:22:30 -07007935
Jens Axboe917257d2019-04-13 09:28:55 -06007936 ret = -EINVAL;
Jens Axboe44a9bd12019-05-14 20:00:30 -06007937 if (cpu >= nr_cpu_ids)
Jens Axboee8f98f242021-03-09 16:32:13 -07007938 goto err_sqpoll;
Shenghui Wang7889f442019-05-07 16:03:19 +08007939 if (!cpu_online(cpu))
Jens Axboee8f98f242021-03-09 16:32:13 -07007940 goto err_sqpoll;
Jens Axboe917257d2019-04-13 09:28:55 -06007941
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007942 sqd->sq_cpu = cpu;
Jens Axboe6c271ce2019-01-10 11:22:30 -07007943 } else {
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007944 sqd->sq_cpu = -1;
Jens Axboe6c271ce2019-01-10 11:22:30 -07007945 }
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007946
7947 sqd->task_pid = current->pid;
Jens Axboe5c2469e2021-03-11 10:17:56 -07007948 sqd->task_tgid = current->tgid;
Jens Axboe46fe18b2021-03-04 12:39:36 -07007949 tsk = create_io_thread(io_sq_thread, sqd, NUMA_NO_NODE);
7950 if (IS_ERR(tsk)) {
7951 ret = PTR_ERR(tsk);
Jens Axboee8f98f242021-03-09 16:32:13 -07007952 goto err_sqpoll;
Jens Axboe6c271ce2019-01-10 11:22:30 -07007953 }
Pavel Begunkov97a73a02021-03-08 17:30:54 +00007954
Jens Axboe46fe18b2021-03-04 12:39:36 -07007955 sqd->thread = tsk;
Pavel Begunkov97a73a02021-03-08 17:30:54 +00007956 ret = io_uring_alloc_task_context(tsk, ctx);
Jens Axboe46fe18b2021-03-04 12:39:36 -07007957 wake_up_new_task(tsk);
Jens Axboe0f212202020-09-13 13:09:39 -06007958 if (ret)
7959 goto err;
Jens Axboe6c271ce2019-01-10 11:22:30 -07007960 } else if (p->flags & IORING_SETUP_SQ_AFF) {
7961 /* Can't have SQ_AFF without SQPOLL */
7962 ret = -EINVAL;
7963 goto err;
7964 }
7965
Jens Axboe2b188cc2019-01-07 10:46:33 -07007966 return 0;
7967err:
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007968 io_sq_thread_finish(ctx);
Jens Axboe2b188cc2019-01-07 10:46:33 -07007969 return ret;
Jens Axboee8f98f242021-03-09 16:32:13 -07007970err_sqpoll:
7971 complete(&ctx->sq_data->exited);
7972 goto err;
Jens Axboe2b188cc2019-01-07 10:46:33 -07007973}
7974
Bijan Mottahedeha087e2b2020-06-16 16:36:07 -07007975static inline void __io_unaccount_mem(struct user_struct *user,
7976 unsigned long nr_pages)
Jens Axboe2b188cc2019-01-07 10:46:33 -07007977{
7978 atomic_long_sub(nr_pages, &user->locked_vm);
7979}
7980
Bijan Mottahedeha087e2b2020-06-16 16:36:07 -07007981static inline int __io_account_mem(struct user_struct *user,
7982 unsigned long nr_pages)
Jens Axboe2b188cc2019-01-07 10:46:33 -07007983{
7984 unsigned long page_limit, cur_pages, new_pages;
7985
7986 /* Don't allow more pages than we can safely lock */
7987 page_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
7988
7989 do {
7990 cur_pages = atomic_long_read(&user->locked_vm);
7991 new_pages = cur_pages + nr_pages;
7992 if (new_pages > page_limit)
7993 return -ENOMEM;
7994 } while (atomic_long_cmpxchg(&user->locked_vm, cur_pages,
7995 new_pages) != cur_pages);
7996
7997 return 0;
7998}
7999
Jens Axboe26bfa89e2021-02-09 20:14:12 -07008000static void io_unaccount_mem(struct io_ring_ctx *ctx, unsigned long nr_pages)
Bijan Mottahedeha087e2b2020-06-16 16:36:07 -07008001{
Jens Axboe62e398b2021-02-21 16:19:37 -07008002 if (ctx->user)
Bijan Mottahedeha087e2b2020-06-16 16:36:07 -07008003 __io_unaccount_mem(ctx->user, nr_pages);
Bijan Mottahedeh30975822020-06-16 16:36:09 -07008004
Jens Axboe26bfa89e2021-02-09 20:14:12 -07008005 if (ctx->mm_account)
8006 atomic64_sub(nr_pages, &ctx->mm_account->pinned_vm);
Bijan Mottahedeha087e2b2020-06-16 16:36:07 -07008007}
8008
Jens Axboe26bfa89e2021-02-09 20:14:12 -07008009static int io_account_mem(struct io_ring_ctx *ctx, unsigned long nr_pages)
Bijan Mottahedeha087e2b2020-06-16 16:36:07 -07008010{
Bijan Mottahedeh30975822020-06-16 16:36:09 -07008011 int ret;
8012
Jens Axboe62e398b2021-02-21 16:19:37 -07008013 if (ctx->user) {
Bijan Mottahedeh30975822020-06-16 16:36:09 -07008014 ret = __io_account_mem(ctx->user, nr_pages);
8015 if (ret)
8016 return ret;
8017 }
8018
Jens Axboe26bfa89e2021-02-09 20:14:12 -07008019 if (ctx->mm_account)
8020 atomic64_add(nr_pages, &ctx->mm_account->pinned_vm);
Bijan Mottahedeha087e2b2020-06-16 16:36:07 -07008021
8022 return 0;
8023}
8024
Jens Axboe2b188cc2019-01-07 10:46:33 -07008025static void io_mem_free(void *ptr)
8026{
Mark Rutland52e04ef2019-04-30 17:30:21 +01008027 struct page *page;
Jens Axboe2b188cc2019-01-07 10:46:33 -07008028
Mark Rutland52e04ef2019-04-30 17:30:21 +01008029 if (!ptr)
8030 return;
8031
8032 page = virt_to_head_page(ptr);
Jens Axboe2b188cc2019-01-07 10:46:33 -07008033 if (put_page_testzero(page))
8034 free_compound_page(page);
8035}
8036
8037static void *io_mem_alloc(size_t size)
8038{
8039 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN | __GFP_COMP |
Jens Axboe26bfa89e2021-02-09 20:14:12 -07008040 __GFP_NORETRY | __GFP_ACCOUNT;
Jens Axboe2b188cc2019-01-07 10:46:33 -07008041
8042 return (void *) __get_free_pages(gfp_flags, get_order(size));
8043}
8044
Hristo Venev75b28af2019-08-26 17:23:46 +00008045static unsigned long rings_size(unsigned sq_entries, unsigned cq_entries,
8046 size_t *sq_offset)
8047{
8048 struct io_rings *rings;
8049 size_t off, sq_array_size;
8050
8051 off = struct_size(rings, cqes, cq_entries);
8052 if (off == SIZE_MAX)
8053 return SIZE_MAX;
8054
8055#ifdef CONFIG_SMP
8056 off = ALIGN(off, SMP_CACHE_BYTES);
8057 if (off == 0)
8058 return SIZE_MAX;
8059#endif
8060
Dmitry Vyukovb36200f2020-07-11 11:31:11 +02008061 if (sq_offset)
8062 *sq_offset = off;
8063
Hristo Venev75b28af2019-08-26 17:23:46 +00008064 sq_array_size = array_size(sizeof(u32), sq_entries);
8065 if (sq_array_size == SIZE_MAX)
8066 return SIZE_MAX;
8067
8068 if (check_add_overflow(off, sq_array_size, &off))
8069 return SIZE_MAX;
8070
Hristo Venev75b28af2019-08-26 17:23:46 +00008071 return off;
8072}
8073
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008074static int io_sqe_buffers_unregister(struct io_ring_ctx *ctx)
Jens Axboeedafcce2019-01-09 09:16:05 -07008075{
8076 int i, j;
8077
8078 if (!ctx->user_bufs)
8079 return -ENXIO;
8080
8081 for (i = 0; i < ctx->nr_user_bufs; i++) {
8082 struct io_mapped_ubuf *imu = &ctx->user_bufs[i];
8083
8084 for (j = 0; j < imu->nr_bvecs; j++)
John Hubbardf1f6a7d2020-01-30 22:13:35 -08008085 unpin_user_page(imu->bvec[j].bv_page);
Jens Axboeedafcce2019-01-09 09:16:05 -07008086
Jens Axboede293932020-09-17 16:19:16 -06008087 if (imu->acct_pages)
Jens Axboe26bfa89e2021-02-09 20:14:12 -07008088 io_unaccount_mem(ctx, imu->acct_pages);
Mark Rutlandd4ef6472019-05-01 16:59:16 +01008089 kvfree(imu->bvec);
Jens Axboeedafcce2019-01-09 09:16:05 -07008090 imu->nr_bvecs = 0;
8091 }
8092
8093 kfree(ctx->user_bufs);
8094 ctx->user_bufs = NULL;
8095 ctx->nr_user_bufs = 0;
8096 return 0;
8097}
8098
8099static int io_copy_iov(struct io_ring_ctx *ctx, struct iovec *dst,
8100 void __user *arg, unsigned index)
8101{
8102 struct iovec __user *src;
8103
8104#ifdef CONFIG_COMPAT
8105 if (ctx->compat) {
8106 struct compat_iovec __user *ciovs;
8107 struct compat_iovec ciov;
8108
8109 ciovs = (struct compat_iovec __user *) arg;
8110 if (copy_from_user(&ciov, &ciovs[index], sizeof(ciov)))
8111 return -EFAULT;
8112
Jens Axboed55e5f52019-12-11 16:12:15 -07008113 dst->iov_base = u64_to_user_ptr((u64)ciov.iov_base);
Jens Axboeedafcce2019-01-09 09:16:05 -07008114 dst->iov_len = ciov.iov_len;
8115 return 0;
8116 }
8117#endif
8118 src = (struct iovec __user *) arg;
8119 if (copy_from_user(dst, &src[index], sizeof(*dst)))
8120 return -EFAULT;
8121 return 0;
8122}
8123
Jens Axboede293932020-09-17 16:19:16 -06008124/*
8125 * Not super efficient, but this is just a registration time. And we do cache
8126 * the last compound head, so generally we'll only do a full search if we don't
8127 * match that one.
8128 *
8129 * We check if the given compound head page has already been accounted, to
8130 * avoid double accounting it. This allows us to account the full size of the
8131 * page, not just the constituent pages of a huge page.
8132 */
8133static bool headpage_already_acct(struct io_ring_ctx *ctx, struct page **pages,
8134 int nr_pages, struct page *hpage)
8135{
8136 int i, j;
8137
8138 /* check current page array */
8139 for (i = 0; i < nr_pages; i++) {
8140 if (!PageCompound(pages[i]))
8141 continue;
8142 if (compound_head(pages[i]) == hpage)
8143 return true;
8144 }
8145
8146 /* check previously registered pages */
8147 for (i = 0; i < ctx->nr_user_bufs; i++) {
8148 struct io_mapped_ubuf *imu = &ctx->user_bufs[i];
8149
8150 for (j = 0; j < imu->nr_bvecs; j++) {
8151 if (!PageCompound(imu->bvec[j].bv_page))
8152 continue;
8153 if (compound_head(imu->bvec[j].bv_page) == hpage)
8154 return true;
8155 }
8156 }
8157
8158 return false;
8159}
8160
8161static int io_buffer_account_pin(struct io_ring_ctx *ctx, struct page **pages,
8162 int nr_pages, struct io_mapped_ubuf *imu,
8163 struct page **last_hpage)
8164{
8165 int i, ret;
8166
8167 for (i = 0; i < nr_pages; i++) {
8168 if (!PageCompound(pages[i])) {
8169 imu->acct_pages++;
8170 } else {
8171 struct page *hpage;
8172
8173 hpage = compound_head(pages[i]);
8174 if (hpage == *last_hpage)
8175 continue;
8176 *last_hpage = hpage;
8177 if (headpage_already_acct(ctx, pages, i, hpage))
8178 continue;
8179 imu->acct_pages += page_size(hpage) >> PAGE_SHIFT;
8180 }
8181 }
8182
8183 if (!imu->acct_pages)
8184 return 0;
8185
Jens Axboe26bfa89e2021-02-09 20:14:12 -07008186 ret = io_account_mem(ctx, imu->acct_pages);
Jens Axboede293932020-09-17 16:19:16 -06008187 if (ret)
8188 imu->acct_pages = 0;
8189 return ret;
8190}
8191
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008192static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov,
8193 struct io_mapped_ubuf *imu,
8194 struct page **last_hpage)
Jens Axboeedafcce2019-01-09 09:16:05 -07008195{
8196 struct vm_area_struct **vmas = NULL;
8197 struct page **pages = NULL;
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008198 unsigned long off, start, end, ubuf;
8199 size_t size;
8200 int ret, pret, nr_pages, i;
Jens Axboeedafcce2019-01-09 09:16:05 -07008201
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008202 ubuf = (unsigned long) iov->iov_base;
8203 end = (ubuf + iov->iov_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
8204 start = ubuf >> PAGE_SHIFT;
8205 nr_pages = end - start;
8206
8207 ret = -ENOMEM;
8208
8209 pages = kvmalloc_array(nr_pages, sizeof(struct page *), GFP_KERNEL);
8210 if (!pages)
8211 goto done;
8212
8213 vmas = kvmalloc_array(nr_pages, sizeof(struct vm_area_struct *),
8214 GFP_KERNEL);
8215 if (!vmas)
8216 goto done;
8217
8218 imu->bvec = kvmalloc_array(nr_pages, sizeof(struct bio_vec),
8219 GFP_KERNEL);
8220 if (!imu->bvec)
8221 goto done;
8222
8223 ret = 0;
8224 mmap_read_lock(current->mm);
8225 pret = pin_user_pages(ubuf, nr_pages, FOLL_WRITE | FOLL_LONGTERM,
8226 pages, vmas);
8227 if (pret == nr_pages) {
8228 /* don't support file backed memory */
8229 for (i = 0; i < nr_pages; i++) {
8230 struct vm_area_struct *vma = vmas[i];
8231
8232 if (vma->vm_file &&
8233 !is_file_hugepages(vma->vm_file)) {
8234 ret = -EOPNOTSUPP;
8235 break;
8236 }
8237 }
8238 } else {
8239 ret = pret < 0 ? pret : -EFAULT;
8240 }
8241 mmap_read_unlock(current->mm);
8242 if (ret) {
8243 /*
8244 * if we did partial map, or found file backed vmas,
8245 * release any pages we did get
8246 */
8247 if (pret > 0)
8248 unpin_user_pages(pages, pret);
8249 kvfree(imu->bvec);
8250 goto done;
8251 }
8252
8253 ret = io_buffer_account_pin(ctx, pages, pret, imu, last_hpage);
8254 if (ret) {
8255 unpin_user_pages(pages, pret);
8256 kvfree(imu->bvec);
8257 goto done;
8258 }
8259
8260 off = ubuf & ~PAGE_MASK;
8261 size = iov->iov_len;
8262 for (i = 0; i < nr_pages; i++) {
8263 size_t vec_len;
8264
8265 vec_len = min_t(size_t, size, PAGE_SIZE - off);
8266 imu->bvec[i].bv_page = pages[i];
8267 imu->bvec[i].bv_len = vec_len;
8268 imu->bvec[i].bv_offset = off;
8269 off = 0;
8270 size -= vec_len;
8271 }
8272 /* store original address for later verification */
8273 imu->ubuf = ubuf;
8274 imu->len = iov->iov_len;
8275 imu->nr_bvecs = nr_pages;
8276 ret = 0;
8277done:
8278 kvfree(pages);
8279 kvfree(vmas);
8280 return ret;
8281}
8282
Bijan Mottahedeh2b358602021-01-06 12:39:11 -08008283static int io_buffers_map_alloc(struct io_ring_ctx *ctx, unsigned int nr_args)
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008284{
Jens Axboeedafcce2019-01-09 09:16:05 -07008285 if (ctx->user_bufs)
8286 return -EBUSY;
8287 if (!nr_args || nr_args > UIO_MAXIOV)
8288 return -EINVAL;
8289
8290 ctx->user_bufs = kcalloc(nr_args, sizeof(struct io_mapped_ubuf),
8291 GFP_KERNEL);
8292 if (!ctx->user_bufs)
8293 return -ENOMEM;
8294
Bijan Mottahedeh2b358602021-01-06 12:39:11 -08008295 return 0;
8296}
8297
8298static int io_buffer_validate(struct iovec *iov)
8299{
8300 /*
8301 * Don't impose further limits on the size and buffer
8302 * constraints here, we'll -EINVAL later when IO is
8303 * submitted if they are wrong.
8304 */
8305 if (!iov->iov_base || !iov->iov_len)
8306 return -EFAULT;
8307
8308 /* arbitrary limit, but we need something */
8309 if (iov->iov_len > SZ_1G)
8310 return -EFAULT;
8311
8312 return 0;
8313}
8314
8315static int io_sqe_buffers_register(struct io_ring_ctx *ctx, void __user *arg,
8316 unsigned int nr_args)
8317{
8318 int i, ret;
8319 struct iovec iov;
8320 struct page *last_hpage = NULL;
8321
8322 ret = io_buffers_map_alloc(ctx, nr_args);
8323 if (ret)
8324 return ret;
8325
Jens Axboeedafcce2019-01-09 09:16:05 -07008326 for (i = 0; i < nr_args; i++) {
8327 struct io_mapped_ubuf *imu = &ctx->user_bufs[i];
Jens Axboeedafcce2019-01-09 09:16:05 -07008328
8329 ret = io_copy_iov(ctx, &iov, arg, i);
8330 if (ret)
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008331 break;
Jens Axboeedafcce2019-01-09 09:16:05 -07008332
Bijan Mottahedeh2b358602021-01-06 12:39:11 -08008333 ret = io_buffer_validate(&iov);
8334 if (ret)
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008335 break;
Jens Axboeedafcce2019-01-09 09:16:05 -07008336
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008337 ret = io_sqe_buffer_register(ctx, &iov, imu, &last_hpage);
8338 if (ret)
8339 break;
Jens Axboeedafcce2019-01-09 09:16:05 -07008340
8341 ctx->nr_user_bufs++;
8342 }
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008343
8344 if (ret)
8345 io_sqe_buffers_unregister(ctx);
8346
Jens Axboeedafcce2019-01-09 09:16:05 -07008347 return ret;
8348}
8349
Jens Axboe9b402842019-04-11 11:45:41 -06008350static int io_eventfd_register(struct io_ring_ctx *ctx, void __user *arg)
8351{
8352 __s32 __user *fds = arg;
8353 int fd;
8354
8355 if (ctx->cq_ev_fd)
8356 return -EBUSY;
8357
8358 if (copy_from_user(&fd, fds, sizeof(*fds)))
8359 return -EFAULT;
8360
8361 ctx->cq_ev_fd = eventfd_ctx_fdget(fd);
8362 if (IS_ERR(ctx->cq_ev_fd)) {
8363 int ret = PTR_ERR(ctx->cq_ev_fd);
8364 ctx->cq_ev_fd = NULL;
8365 return ret;
8366 }
8367
8368 return 0;
8369}
8370
8371static int io_eventfd_unregister(struct io_ring_ctx *ctx)
8372{
8373 if (ctx->cq_ev_fd) {
8374 eventfd_ctx_put(ctx->cq_ev_fd);
8375 ctx->cq_ev_fd = NULL;
8376 return 0;
8377 }
8378
8379 return -ENXIO;
8380}
8381
Jens Axboe5a2e7452020-02-23 16:23:11 -07008382static void io_destroy_buffers(struct io_ring_ctx *ctx)
8383{
Jens Axboe9e15c3a2021-03-13 12:29:43 -07008384 struct io_buffer *buf;
8385 unsigned long index;
8386
8387 xa_for_each(&ctx->io_buffers, index, buf)
8388 __io_remove_buffers(ctx, buf, index, -1U);
Jens Axboe5a2e7452020-02-23 16:23:11 -07008389}
8390
Jens Axboe68e68ee2021-02-13 09:00:02 -07008391static void io_req_cache_free(struct list_head *list, struct task_struct *tsk)
Jens Axboe1b4c3512021-02-10 00:03:19 +00008392{
Jens Axboe68e68ee2021-02-13 09:00:02 -07008393 struct io_kiocb *req, *nxt;
Jens Axboe1b4c3512021-02-10 00:03:19 +00008394
Jens Axboe68e68ee2021-02-13 09:00:02 -07008395 list_for_each_entry_safe(req, nxt, list, compl.list) {
8396 if (tsk && req->task != tsk)
8397 continue;
Jens Axboe1b4c3512021-02-10 00:03:19 +00008398 list_del(&req->compl.list);
8399 kmem_cache_free(req_cachep, req);
8400 }
8401}
8402
Jens Axboe4010fec2021-02-27 15:04:18 -07008403static void io_req_caches_free(struct io_ring_ctx *ctx)
Jens Axboe2b188cc2019-01-07 10:46:33 -07008404{
Pavel Begunkovbf019da2021-02-10 00:03:17 +00008405 struct io_submit_state *submit_state = &ctx->submit_state;
Pavel Begunkove5547d22021-02-23 22:17:20 +00008406 struct io_comp_state *cs = &ctx->submit_state.comp;
Pavel Begunkovbf019da2021-02-10 00:03:17 +00008407
Jens Axboe9a4fdbd2021-02-13 09:09:44 -07008408 mutex_lock(&ctx->uring_lock);
8409
Pavel Begunkov8e5c66c2021-02-22 11:45:55 +00008410 if (submit_state->free_reqs) {
Jens Axboe9a4fdbd2021-02-13 09:09:44 -07008411 kmem_cache_free_bulk(req_cachep, submit_state->free_reqs,
8412 submit_state->reqs);
Pavel Begunkov8e5c66c2021-02-22 11:45:55 +00008413 submit_state->free_reqs = 0;
8414 }
Jens Axboe9a4fdbd2021-02-13 09:09:44 -07008415
8416 spin_lock_irq(&ctx->completion_lock);
Pavel Begunkove5547d22021-02-23 22:17:20 +00008417 list_splice_init(&cs->locked_free_list, &cs->free_list);
8418 cs->locked_free_nr = 0;
Jens Axboe9a4fdbd2021-02-13 09:09:44 -07008419 spin_unlock_irq(&ctx->completion_lock);
8420
Pavel Begunkove5547d22021-02-23 22:17:20 +00008421 io_req_cache_free(&cs->free_list, NULL);
8422
Jens Axboe9a4fdbd2021-02-13 09:09:44 -07008423 mutex_unlock(&ctx->uring_lock);
8424}
8425
Jens Axboe2b188cc2019-01-07 10:46:33 -07008426static void io_ring_ctx_free(struct io_ring_ctx *ctx)
8427{
Pavel Begunkov04fc6c82021-02-12 03:23:54 +00008428 /*
8429 * Some may use context even when all refs and requests have been put,
Pavel Begunkov180f8292021-03-14 20:57:09 +00008430 * and they are free to do so while still holding uring_lock or
8431 * completion_lock, see __io_req_task_submit(). Wait for them to finish.
Pavel Begunkov04fc6c82021-02-12 03:23:54 +00008432 */
8433 mutex_lock(&ctx->uring_lock);
8434 mutex_unlock(&ctx->uring_lock);
Pavel Begunkov180f8292021-03-14 20:57:09 +00008435 spin_lock_irq(&ctx->completion_lock);
8436 spin_unlock_irq(&ctx->completion_lock);
Pavel Begunkov04fc6c82021-02-12 03:23:54 +00008437
Jens Axboe37d1e2e2021-02-17 21:03:43 -07008438 io_sq_thread_finish(ctx);
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008439 io_sqe_buffers_unregister(ctx);
Jens Axboe2aede0e2020-09-14 10:45:53 -06008440
Jens Axboe37d1e2e2021-02-17 21:03:43 -07008441 if (ctx->mm_account) {
Jens Axboe2aede0e2020-09-14 10:45:53 -06008442 mmdrop(ctx->mm_account);
8443 ctx->mm_account = NULL;
Bijan Mottahedeh30975822020-06-16 16:36:09 -07008444 }
Jens Axboedef596e2019-01-09 08:59:42 -07008445
Hao Xu8bad28d2021-02-19 17:19:36 +08008446 mutex_lock(&ctx->uring_lock);
Jens Axboe6b063142019-01-10 22:13:58 -07008447 io_sqe_files_unregister(ctx);
Hao Xu8bad28d2021-02-19 17:19:36 +08008448 mutex_unlock(&ctx->uring_lock);
Jens Axboe9b402842019-04-11 11:45:41 -06008449 io_eventfd_unregister(ctx);
Jens Axboe5a2e7452020-02-23 16:23:11 -07008450 io_destroy_buffers(ctx);
Jens Axboedef596e2019-01-09 08:59:42 -07008451
Jens Axboe2b188cc2019-01-07 10:46:33 -07008452#if defined(CONFIG_UNIX)
Eric Biggers355e8d22019-06-12 14:58:43 -07008453 if (ctx->ring_sock) {
8454 ctx->ring_sock->file = NULL; /* so that iput() is called */
Jens Axboe2b188cc2019-01-07 10:46:33 -07008455 sock_release(ctx->ring_sock);
Eric Biggers355e8d22019-06-12 14:58:43 -07008456 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07008457#endif
8458
Hristo Venev75b28af2019-08-26 17:23:46 +00008459 io_mem_free(ctx->rings);
Jens Axboe2b188cc2019-01-07 10:46:33 -07008460 io_mem_free(ctx->sq_sqes);
Jens Axboe2b188cc2019-01-07 10:46:33 -07008461
8462 percpu_ref_exit(&ctx->refs);
Jens Axboe2b188cc2019-01-07 10:46:33 -07008463 free_uid(ctx->user);
Jens Axboe4010fec2021-02-27 15:04:18 -07008464 io_req_caches_free(ctx);
Jens Axboee9418942021-02-19 12:33:30 -07008465 if (ctx->hash_map)
8466 io_wq_put_hash(ctx->hash_map);
Jens Axboe78076bb2019-12-04 19:56:40 -07008467 kfree(ctx->cancel_hash);
Jens Axboe2b188cc2019-01-07 10:46:33 -07008468 kfree(ctx);
8469}
8470
8471static __poll_t io_uring_poll(struct file *file, poll_table *wait)
8472{
8473 struct io_ring_ctx *ctx = file->private_data;
8474 __poll_t mask = 0;
8475
8476 poll_wait(file, &ctx->cq_wait, wait);
Stefan Bühler4f7067c2019-04-24 23:54:17 +02008477 /*
8478 * synchronizes with barrier from wq_has_sleeper call in
8479 * io_commit_cqring
8480 */
Jens Axboe2b188cc2019-01-07 10:46:33 -07008481 smp_rmb();
Jens Axboe90554202020-09-03 12:12:41 -06008482 if (!io_sqring_full(ctx))
Jens Axboe2b188cc2019-01-07 10:46:33 -07008483 mask |= EPOLLOUT | EPOLLWRNORM;
Hao Xued670c32021-02-05 16:34:21 +08008484
8485 /*
8486 * Don't flush cqring overflow list here, just do a simple check.
8487 * Otherwise there could possible be ABBA deadlock:
8488 * CPU0 CPU1
8489 * ---- ----
8490 * lock(&ctx->uring_lock);
8491 * lock(&ep->mtx);
8492 * lock(&ctx->uring_lock);
8493 * lock(&ep->mtx);
8494 *
8495 * Users may get EPOLLIN meanwhile seeing nothing in cqring, this
8496 * pushs them to do the flush.
8497 */
8498 if (io_cqring_events(ctx) || test_bit(0, &ctx->cq_check_overflow))
Jens Axboe2b188cc2019-01-07 10:46:33 -07008499 mask |= EPOLLIN | EPOLLRDNORM;
8500
8501 return mask;
8502}
8503
8504static int io_uring_fasync(int fd, struct file *file, int on)
8505{
8506 struct io_ring_ctx *ctx = file->private_data;
8507
8508 return fasync_helper(fd, file, on, &ctx->cq_fasync);
8509}
8510
Yejune Deng0bead8c2020-12-24 11:02:20 +08008511static int io_unregister_personality(struct io_ring_ctx *ctx, unsigned id)
Jens Axboe071698e2020-01-28 10:04:42 -07008512{
Jens Axboe4379bf82021-02-15 13:40:22 -07008513 const struct cred *creds;
Jens Axboe071698e2020-01-28 10:04:42 -07008514
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +00008515 creds = xa_erase(&ctx->personalities, id);
Jens Axboe4379bf82021-02-15 13:40:22 -07008516 if (creds) {
8517 put_cred(creds);
Yejune Deng0bead8c2020-12-24 11:02:20 +08008518 return 0;
Jens Axboe1e6fa522020-10-15 08:46:24 -06008519 }
Yejune Deng0bead8c2020-12-24 11:02:20 +08008520
8521 return -EINVAL;
8522}
8523
Pavel Begunkov9b465712021-03-15 14:23:07 +00008524static inline bool io_run_ctx_fallback(struct io_ring_ctx *ctx)
Jens Axboe7c25c0d2021-02-16 07:17:00 -07008525{
Pavel Begunkov9b465712021-03-15 14:23:07 +00008526 return io_run_task_work_head(&ctx->exit_task_work);
Jens Axboe7c25c0d2021-02-16 07:17:00 -07008527}
8528
Pavel Begunkovd56d9382021-03-06 11:02:13 +00008529struct io_tctx_exit {
8530 struct callback_head task_work;
8531 struct completion completion;
Pavel Begunkovbaf186c2021-03-06 11:02:15 +00008532 struct io_ring_ctx *ctx;
Pavel Begunkovd56d9382021-03-06 11:02:13 +00008533};
8534
8535static void io_tctx_exit_cb(struct callback_head *cb)
8536{
8537 struct io_uring_task *tctx = current->io_uring;
8538 struct io_tctx_exit *work;
8539
8540 work = container_of(cb, struct io_tctx_exit, task_work);
8541 /*
8542 * When @in_idle, we're in cancellation and it's racy to remove the
8543 * node. It'll be removed by the end of cancellation, just ignore it.
8544 */
8545 if (!atomic_read(&tctx->in_idle))
Pavel Begunkovbaf186c2021-03-06 11:02:15 +00008546 io_uring_del_task_file((unsigned long)work->ctx);
Pavel Begunkovd56d9382021-03-06 11:02:13 +00008547 complete(&work->completion);
8548}
8549
Jens Axboe85faa7b2020-04-09 18:14:00 -06008550static void io_ring_exit_work(struct work_struct *work)
8551{
Pavel Begunkovd56d9382021-03-06 11:02:13 +00008552 struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx, exit_work);
Pavel Begunkovb5bb3a22021-03-06 11:02:16 +00008553 unsigned long timeout = jiffies + HZ * 60 * 5;
Pavel Begunkovd56d9382021-03-06 11:02:13 +00008554 struct io_tctx_exit exit;
8555 struct io_tctx_node *node;
8556 int ret;
Jens Axboe85faa7b2020-04-09 18:14:00 -06008557
Pavel Begunkova185f1d2021-03-23 10:52:38 +00008558 /* prevent SQPOLL from submitting new requests */
8559 if (ctx->sq_data) {
8560 io_sq_thread_park(ctx->sq_data);
8561 list_del_init(&ctx->sqd_list);
8562 io_sqd_update_thread_idle(ctx->sq_data);
8563 io_sq_thread_unpark(ctx->sq_data);
8564 }
8565
Jens Axboe56952e92020-06-17 15:00:04 -06008566 /*
8567 * If we're doing polled IO and end up having requests being
8568 * submitted async (out-of-line), then completions can come in while
8569 * we're waiting for refs to drop. We need to reap these manually,
8570 * as nobody else will be looking for them.
8571 */
Pavel Begunkovb2edc0a2020-07-07 16:36:22 +03008572 do {
Pavel Begunkov9936c7c2021-02-04 13:51:56 +00008573 io_uring_try_cancel_requests(ctx, NULL, NULL);
Pavel Begunkovb5bb3a22021-03-06 11:02:16 +00008574
8575 WARN_ON_ONCE(time_after(jiffies, timeout));
Pavel Begunkovb2edc0a2020-07-07 16:36:22 +03008576 } while (!wait_for_completion_timeout(&ctx->ref_comp, HZ/20));
Pavel Begunkovd56d9382021-03-06 11:02:13 +00008577
8578 mutex_lock(&ctx->uring_lock);
8579 while (!list_empty(&ctx->tctx_list)) {
Pavel Begunkovb5bb3a22021-03-06 11:02:16 +00008580 WARN_ON_ONCE(time_after(jiffies, timeout));
8581
Pavel Begunkovd56d9382021-03-06 11:02:13 +00008582 node = list_first_entry(&ctx->tctx_list, struct io_tctx_node,
8583 ctx_node);
Pavel Begunkovbaf186c2021-03-06 11:02:15 +00008584 exit.ctx = ctx;
Pavel Begunkovd56d9382021-03-06 11:02:13 +00008585 init_completion(&exit.completion);
8586 init_task_work(&exit.task_work, io_tctx_exit_cb);
8587 ret = task_work_add(node->task, &exit.task_work, TWA_SIGNAL);
8588 if (WARN_ON_ONCE(ret))
8589 continue;
8590 wake_up_process(node->task);
8591
8592 mutex_unlock(&ctx->uring_lock);
8593 wait_for_completion(&exit.completion);
8594 cond_resched();
8595 mutex_lock(&ctx->uring_lock);
8596 }
8597 mutex_unlock(&ctx->uring_lock);
8598
Jens Axboe85faa7b2020-04-09 18:14:00 -06008599 io_ring_ctx_free(ctx);
8600}
8601
Pavel Begunkov80c4cbd2021-03-25 18:32:43 +00008602/* Returns true if we found and killed one or more timeouts */
8603static bool io_kill_timeouts(struct io_ring_ctx *ctx, struct task_struct *tsk,
8604 struct files_struct *files)
8605{
8606 struct io_kiocb *req, *tmp;
8607 int canceled = 0;
8608
8609 spin_lock_irq(&ctx->completion_lock);
8610 list_for_each_entry_safe(req, tmp, &ctx->timeout_list, timeout.list) {
8611 if (io_match_task(req, tsk, files)) {
8612 io_kill_timeout(req, -ECANCELED);
8613 canceled++;
8614 }
8615 }
Pavel Begunkov51520422021-03-29 11:39:29 +01008616 if (canceled != 0)
8617 io_commit_cqring(ctx);
Pavel Begunkov80c4cbd2021-03-25 18:32:43 +00008618 spin_unlock_irq(&ctx->completion_lock);
Pavel Begunkov80c4cbd2021-03-25 18:32:43 +00008619 if (canceled != 0)
8620 io_cqring_ev_posted(ctx);
8621 return canceled != 0;
8622}
8623
Jens Axboe2b188cc2019-01-07 10:46:33 -07008624static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
8625{
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +00008626 unsigned long index;
8627 struct creds *creds;
8628
Jens Axboe2b188cc2019-01-07 10:46:33 -07008629 mutex_lock(&ctx->uring_lock);
8630 percpu_ref_kill(&ctx->refs);
Pavel Begunkovcda286f2020-12-17 00:24:35 +00008631 /* if force is set, the ring is going away. always drop after that */
8632 ctx->cq_overflow_flushed = 1;
Pavel Begunkov634578f2020-12-06 22:22:44 +00008633 if (ctx->rings)
Pavel Begunkov6c503152021-01-04 20:36:36 +00008634 __io_cqring_overflow_flush(ctx, true, NULL, NULL);
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +00008635 xa_for_each(&ctx->personalities, index, creds)
8636 io_unregister_personality(ctx, index);
Jens Axboe2b188cc2019-01-07 10:46:33 -07008637 mutex_unlock(&ctx->uring_lock);
8638
Pavel Begunkov6b819282020-11-06 13:00:25 +00008639 io_kill_timeouts(ctx, NULL, NULL);
8640 io_poll_remove_all(ctx, NULL, NULL);
Jens Axboe561fb042019-10-24 07:25:42 -06008641
Jens Axboe15dff282019-11-13 09:09:23 -07008642 /* if we failed setting up the ctx, we might not have any rings */
Pavel Begunkovb2edc0a2020-07-07 16:36:22 +03008643 io_iopoll_try_reap_events(ctx);
Jens Axboe309fc032020-07-10 09:13:34 -06008644
Jens Axboe85faa7b2020-04-09 18:14:00 -06008645 INIT_WORK(&ctx->exit_work, io_ring_exit_work);
Jens Axboefc666772020-08-19 11:10:51 -06008646 /*
8647 * Use system_unbound_wq to avoid spawning tons of event kworkers
8648 * if we're exiting a ton of rings at the same time. It just adds
8649 * noise and overhead, there's no discernable change in runtime
8650 * over using system_wq.
8651 */
8652 queue_work(system_unbound_wq, &ctx->exit_work);
Jens Axboe2b188cc2019-01-07 10:46:33 -07008653}
8654
8655static int io_uring_release(struct inode *inode, struct file *file)
8656{
8657 struct io_ring_ctx *ctx = file->private_data;
8658
8659 file->private_data = NULL;
8660 io_ring_ctx_wait_and_kill(ctx);
8661 return 0;
8662}
8663
Pavel Begunkovf6edbab2020-11-06 13:00:26 +00008664struct io_task_cancel {
8665 struct task_struct *task;
8666 struct files_struct *files;
8667};
Pavel Begunkov67c4d9e2020-06-15 10:24:05 +03008668
Pavel Begunkovf6edbab2020-11-06 13:00:26 +00008669static bool io_cancel_task_cb(struct io_wq_work *work, void *data)
Jens Axboeb711d4e2020-08-16 08:23:05 -07008670{
Pavel Begunkov9a472ef2020-11-05 22:31:37 +00008671 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
Pavel Begunkovf6edbab2020-11-06 13:00:26 +00008672 struct io_task_cancel *cancel = data;
Pavel Begunkov9a472ef2020-11-05 22:31:37 +00008673 bool ret;
8674
Pavel Begunkovf6edbab2020-11-06 13:00:26 +00008675 if (cancel->files && (req->flags & REQ_F_LINK_TIMEOUT)) {
Pavel Begunkov9a472ef2020-11-05 22:31:37 +00008676 unsigned long flags;
8677 struct io_ring_ctx *ctx = req->ctx;
8678
8679 /* protect against races with linked timeouts */
8680 spin_lock_irqsave(&ctx->completion_lock, flags);
Pavel Begunkovf6edbab2020-11-06 13:00:26 +00008681 ret = io_match_task(req, cancel->task, cancel->files);
Pavel Begunkov9a472ef2020-11-05 22:31:37 +00008682 spin_unlock_irqrestore(&ctx->completion_lock, flags);
8683 } else {
Pavel Begunkovf6edbab2020-11-06 13:00:26 +00008684 ret = io_match_task(req, cancel->task, cancel->files);
Pavel Begunkov9a472ef2020-11-05 22:31:37 +00008685 }
8686 return ret;
Jens Axboeb711d4e2020-08-16 08:23:05 -07008687}
8688
Pavel Begunkove1915f72021-03-11 23:29:35 +00008689static bool io_cancel_defer_files(struct io_ring_ctx *ctx,
Pavel Begunkovef9865a2020-11-05 14:06:19 +00008690 struct task_struct *task,
Pavel Begunkovb7ddce32020-09-06 00:45:14 +03008691 struct files_struct *files)
8692{
Pavel Begunkove1915f72021-03-11 23:29:35 +00008693 struct io_defer_entry *de;
Pavel Begunkovb7ddce32020-09-06 00:45:14 +03008694 LIST_HEAD(list);
8695
8696 spin_lock_irq(&ctx->completion_lock);
8697 list_for_each_entry_reverse(de, &ctx->defer_list, list) {
Pavel Begunkov08d23632020-11-06 13:00:22 +00008698 if (io_match_task(de->req, task, files)) {
Pavel Begunkovb7ddce32020-09-06 00:45:14 +03008699 list_cut_position(&list, &ctx->defer_list, &de->list);
8700 break;
8701 }
8702 }
8703 spin_unlock_irq(&ctx->completion_lock);
Pavel Begunkove1915f72021-03-11 23:29:35 +00008704 if (list_empty(&list))
8705 return false;
Pavel Begunkovb7ddce32020-09-06 00:45:14 +03008706
8707 while (!list_empty(&list)) {
8708 de = list_first_entry(&list, struct io_defer_entry, list);
8709 list_del_init(&de->list);
Pavel Begunkovf41db2732021-02-28 22:35:12 +00008710 io_req_complete_failed(de->req, -ECANCELED);
Pavel Begunkovb7ddce32020-09-06 00:45:14 +03008711 kfree(de);
8712 }
Pavel Begunkove1915f72021-03-11 23:29:35 +00008713 return true;
Pavel Begunkovb7ddce32020-09-06 00:45:14 +03008714}
8715
Pavel Begunkov1b007642021-03-06 11:02:17 +00008716static bool io_cancel_ctx_cb(struct io_wq_work *work, void *data)
8717{
8718 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
8719
8720 return req->ctx == data;
8721}
8722
8723static bool io_uring_try_cancel_iowq(struct io_ring_ctx *ctx)
8724{
8725 struct io_tctx_node *node;
8726 enum io_wq_cancel cret;
8727 bool ret = false;
8728
8729 mutex_lock(&ctx->uring_lock);
8730 list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
8731 struct io_uring_task *tctx = node->task->io_uring;
8732
8733 /*
8734 * io_wq will stay alive while we hold uring_lock, because it's
8735 * killed after ctx nodes, which requires to take the lock.
8736 */
8737 if (!tctx || !tctx->io_wq)
8738 continue;
8739 cret = io_wq_cancel_cb(tctx->io_wq, io_cancel_ctx_cb, ctx, true);
8740 ret |= (cret != IO_WQ_CANCEL_NOTFOUND);
8741 }
8742 mutex_unlock(&ctx->uring_lock);
8743
8744 return ret;
8745}
8746
Pavel Begunkov9936c7c2021-02-04 13:51:56 +00008747static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
8748 struct task_struct *task,
8749 struct files_struct *files)
8750{
8751 struct io_task_cancel cancel = { .task = task, .files = files, };
Pavel Begunkov1b007642021-03-06 11:02:17 +00008752 struct io_uring_task *tctx = task ? task->io_uring : NULL;
Pavel Begunkov9936c7c2021-02-04 13:51:56 +00008753
8754 while (1) {
8755 enum io_wq_cancel cret;
8756 bool ret = false;
8757
Pavel Begunkov1b007642021-03-06 11:02:17 +00008758 if (!task) {
8759 ret |= io_uring_try_cancel_iowq(ctx);
8760 } else if (tctx && tctx->io_wq) {
8761 /*
8762 * Cancels requests of all rings, not only @ctx, but
8763 * it's fine as the task is in exit/exec.
8764 */
Jens Axboe5aa75ed2021-02-16 12:56:50 -07008765 cret = io_wq_cancel_cb(tctx->io_wq, io_cancel_task_cb,
Pavel Begunkov9936c7c2021-02-04 13:51:56 +00008766 &cancel, true);
8767 ret |= (cret != IO_WQ_CANCEL_NOTFOUND);
8768 }
8769
8770 /* SQPOLL thread does its own polling */
Jens Axboed052d1d2021-03-11 10:49:20 -07008771 if ((!(ctx->flags & IORING_SETUP_SQPOLL) && !files) ||
8772 (ctx->sq_data && ctx->sq_data->thread == current)) {
Pavel Begunkov9936c7c2021-02-04 13:51:56 +00008773 while (!list_empty_careful(&ctx->iopoll_list)) {
8774 io_iopoll_try_reap_events(ctx);
8775 ret = true;
8776 }
8777 }
8778
Pavel Begunkove1915f72021-03-11 23:29:35 +00008779 ret |= io_cancel_defer_files(ctx, task, files);
Pavel Begunkov9936c7c2021-02-04 13:51:56 +00008780 ret |= io_poll_remove_all(ctx, task, files);
8781 ret |= io_kill_timeouts(ctx, task, files);
8782 ret |= io_run_task_work();
Pavel Begunkovba50a032021-02-26 15:47:56 +00008783 ret |= io_run_ctx_fallback(ctx);
Pavel Begunkov9936c7c2021-02-04 13:51:56 +00008784 io_cqring_overflow_flush(ctx, true, task, files);
8785 if (!ret)
8786 break;
8787 cond_resched();
8788 }
8789}
8790
Pavel Begunkovca70f002021-01-26 15:28:27 +00008791static int io_uring_count_inflight(struct io_ring_ctx *ctx,
8792 struct task_struct *task,
8793 struct files_struct *files)
8794{
8795 struct io_kiocb *req;
8796 int cnt = 0;
8797
8798 spin_lock_irq(&ctx->inflight_lock);
8799 list_for_each_entry(req, &ctx->inflight_list, inflight_entry)
8800 cnt += io_match_task(req, task, files);
8801 spin_unlock_irq(&ctx->inflight_lock);
8802 return cnt;
8803}
8804
Pavel Begunkovb52fda02020-11-06 13:00:24 +00008805static void io_uring_cancel_files(struct io_ring_ctx *ctx,
Pavel Begunkovdf9923f2020-11-06 13:00:23 +00008806 struct task_struct *task,
Jens Axboefcb323c2019-10-24 12:39:47 -06008807 struct files_struct *files)
8808{
Jens Axboefcb323c2019-10-24 12:39:47 -06008809 while (!list_empty_careful(&ctx->inflight_list)) {
Xiaoguang Wangd8f1b972020-04-26 15:54:43 +08008810 DEFINE_WAIT(wait);
Pavel Begunkovca70f002021-01-26 15:28:27 +00008811 int inflight;
Jens Axboefcb323c2019-10-24 12:39:47 -06008812
Pavel Begunkovca70f002021-01-26 15:28:27 +00008813 inflight = io_uring_count_inflight(ctx, task, files);
8814 if (!inflight)
Jens Axboefcb323c2019-10-24 12:39:47 -06008815 break;
Pavel Begunkovf6edbab2020-11-06 13:00:26 +00008816
Pavel Begunkov9936c7c2021-02-04 13:51:56 +00008817 io_uring_try_cancel_requests(ctx, task, files);
Pavel Begunkovca70f002021-01-26 15:28:27 +00008818
8819 prepare_to_wait(&task->io_uring->wait, &wait,
8820 TASK_UNINTERRUPTIBLE);
8821 if (inflight == io_uring_count_inflight(ctx, task, files))
8822 schedule();
Pavel Begunkovc98de082020-11-15 12:56:32 +00008823 finish_wait(&task->io_uring->wait, &wait);
Jens Axboe0f212202020-09-13 13:09:39 -06008824 }
Jens Axboe0f212202020-09-13 13:09:39 -06008825}
8826
8827/*
Jens Axboe0f212202020-09-13 13:09:39 -06008828 * Note that this task has used io_uring. We use it for cancelation purposes.
8829 */
Pavel Begunkovbaf186c2021-03-06 11:02:15 +00008830static int io_uring_add_task_file(struct io_ring_ctx *ctx)
Jens Axboe0f212202020-09-13 13:09:39 -06008831{
Matthew Wilcox (Oracle)236434c2020-10-09 13:49:52 +01008832 struct io_uring_task *tctx = current->io_uring;
Pavel Begunkov13bf43f2021-03-06 11:02:12 +00008833 struct io_tctx_node *node;
Pavel Begunkova528b042020-12-21 18:34:04 +00008834 int ret;
Matthew Wilcox (Oracle)236434c2020-10-09 13:49:52 +01008835
8836 if (unlikely(!tctx)) {
Jens Axboe5aa75ed2021-02-16 12:56:50 -07008837 ret = io_uring_alloc_task_context(current, ctx);
Jens Axboe0f212202020-09-13 13:09:39 -06008838 if (unlikely(ret))
8839 return ret;
Matthew Wilcox (Oracle)236434c2020-10-09 13:49:52 +01008840 tctx = current->io_uring;
Jens Axboe0f212202020-09-13 13:09:39 -06008841 }
Pavel Begunkovbaf186c2021-03-06 11:02:15 +00008842 if (tctx->last != ctx) {
8843 void *old = xa_load(&tctx->xa, (unsigned long)ctx);
Jens Axboe0f212202020-09-13 13:09:39 -06008844
Matthew Wilcox (Oracle)236434c2020-10-09 13:49:52 +01008845 if (!old) {
Pavel Begunkov13bf43f2021-03-06 11:02:12 +00008846 node = kmalloc(sizeof(*node), GFP_KERNEL);
8847 if (!node)
8848 return -ENOMEM;
8849 node->ctx = ctx;
Pavel Begunkov13bf43f2021-03-06 11:02:12 +00008850 node->task = current;
8851
Pavel Begunkovbaf186c2021-03-06 11:02:15 +00008852 ret = xa_err(xa_store(&tctx->xa, (unsigned long)ctx,
Pavel Begunkov13bf43f2021-03-06 11:02:12 +00008853 node, GFP_KERNEL));
Pavel Begunkova528b042020-12-21 18:34:04 +00008854 if (ret) {
Pavel Begunkov13bf43f2021-03-06 11:02:12 +00008855 kfree(node);
Pavel Begunkova528b042020-12-21 18:34:04 +00008856 return ret;
8857 }
Pavel Begunkov13bf43f2021-03-06 11:02:12 +00008858
8859 mutex_lock(&ctx->uring_lock);
8860 list_add(&node->ctx_node, &ctx->tctx_list);
8861 mutex_unlock(&ctx->uring_lock);
Jens Axboe0f212202020-09-13 13:09:39 -06008862 }
Pavel Begunkovbaf186c2021-03-06 11:02:15 +00008863 tctx->last = ctx;
Jens Axboe0f212202020-09-13 13:09:39 -06008864 }
Jens Axboe0f212202020-09-13 13:09:39 -06008865 return 0;
8866}
8867
8868/*
8869 * Remove this io_uring_file -> task mapping.
8870 */
Pavel Begunkov29412672021-03-06 11:02:11 +00008871static void io_uring_del_task_file(unsigned long index)
Jens Axboe0f212202020-09-13 13:09:39 -06008872{
8873 struct io_uring_task *tctx = current->io_uring;
Pavel Begunkov13bf43f2021-03-06 11:02:12 +00008874 struct io_tctx_node *node;
Pavel Begunkov29412672021-03-06 11:02:11 +00008875
Pavel Begunkoveebd2e32021-03-06 11:02:14 +00008876 if (!tctx)
8877 return;
Pavel Begunkov13bf43f2021-03-06 11:02:12 +00008878 node = xa_erase(&tctx->xa, index);
8879 if (!node)
Pavel Begunkov29412672021-03-06 11:02:11 +00008880 return;
Jens Axboe0f212202020-09-13 13:09:39 -06008881
Pavel Begunkov13bf43f2021-03-06 11:02:12 +00008882 WARN_ON_ONCE(current != node->task);
8883 WARN_ON_ONCE(list_empty(&node->ctx_node));
8884
8885 mutex_lock(&node->ctx->uring_lock);
8886 list_del(&node->ctx_node);
8887 mutex_unlock(&node->ctx->uring_lock);
8888
Pavel Begunkovbaf186c2021-03-06 11:02:15 +00008889 if (tctx->last == node->ctx)
Jens Axboe0f212202020-09-13 13:09:39 -06008890 tctx->last = NULL;
Pavel Begunkov13bf43f2021-03-06 11:02:12 +00008891 kfree(node);
Jens Axboe0f212202020-09-13 13:09:39 -06008892}
8893
Pavel Begunkov8452d4a2021-02-27 11:16:46 +00008894static void io_uring_clean_tctx(struct io_uring_task *tctx)
Pavel Begunkovde7f1d92021-01-04 20:43:29 +00008895{
Pavel Begunkov13bf43f2021-03-06 11:02:12 +00008896 struct io_tctx_node *node;
Pavel Begunkovde7f1d92021-01-04 20:43:29 +00008897 unsigned long index;
8898
Pavel Begunkov13bf43f2021-03-06 11:02:12 +00008899 xa_for_each(&tctx->xa, index, node)
Pavel Begunkov29412672021-03-06 11:02:11 +00008900 io_uring_del_task_file(index);
Pavel Begunkov8452d4a2021-02-27 11:16:46 +00008901 if (tctx->io_wq) {
8902 io_wq_put_and_exit(tctx->io_wq);
8903 tctx->io_wq = NULL;
8904 }
Pavel Begunkovde7f1d92021-01-04 20:43:29 +00008905}
8906
Pavel Begunkov521d6a72021-03-11 23:29:38 +00008907static s64 tctx_inflight(struct io_uring_task *tctx)
8908{
8909 return percpu_counter_sum(&tctx->inflight);
8910}
8911
8912static void io_sqpoll_cancel_cb(struct callback_head *cb)
8913{
8914 struct io_tctx_exit *work = container_of(cb, struct io_tctx_exit, task_work);
8915 struct io_ring_ctx *ctx = work->ctx;
8916 struct io_sq_data *sqd = ctx->sq_data;
8917
8918 if (sqd->thread)
8919 io_uring_cancel_sqpoll(ctx);
8920 complete(&work->completion);
8921}
8922
8923static void io_sqpoll_cancel_sync(struct io_ring_ctx *ctx)
8924{
8925 struct io_sq_data *sqd = ctx->sq_data;
8926 struct io_tctx_exit work = { .ctx = ctx, };
8927 struct task_struct *task;
8928
8929 io_sq_thread_park(sqd);
8930 list_del_init(&ctx->sqd_list);
8931 io_sqd_update_thread_idle(sqd);
8932 task = sqd->thread;
8933 if (task) {
8934 init_completion(&work.completion);
8935 init_task_work(&work.task_work, io_sqpoll_cancel_cb);
Pavel Begunkovb7f5a0b2021-03-15 14:23:08 +00008936 io_task_work_add_head(&sqd->park_task_work, &work.task_work);
Pavel Begunkov521d6a72021-03-11 23:29:38 +00008937 wake_up_process(task);
8938 }
8939 io_sq_thread_unpark(sqd);
8940
8941 if (task)
8942 wait_for_completion(&work.completion);
8943}
8944
Jens Axboe0f212202020-09-13 13:09:39 -06008945void __io_uring_files_cancel(struct files_struct *files)
8946{
8947 struct io_uring_task *tctx = current->io_uring;
Pavel Begunkov13bf43f2021-03-06 11:02:12 +00008948 struct io_tctx_node *node;
Matthew Wilcox (Oracle)ce765372020-10-09 13:49:51 +01008949 unsigned long index;
Jens Axboe0f212202020-09-13 13:09:39 -06008950
8951 /* make sure overflow events are dropped */
Jens Axboefdaf0832020-10-30 09:37:30 -06008952 atomic_inc(&tctx->in_idle);
Pavel Begunkov521d6a72021-03-11 23:29:38 +00008953 xa_for_each(&tctx->xa, index, node) {
8954 struct io_ring_ctx *ctx = node->ctx;
8955
8956 if (ctx->sq_data) {
8957 io_sqpoll_cancel_sync(ctx);
8958 continue;
8959 }
8960 io_uring_cancel_files(ctx, current, files);
8961 if (!files)
8962 io_uring_try_cancel_requests(ctx, current, NULL);
8963 }
Jens Axboefdaf0832020-10-30 09:37:30 -06008964 atomic_dec(&tctx->in_idle);
Pavel Begunkovde7f1d92021-01-04 20:43:29 +00008965
Pavel Begunkov8452d4a2021-02-27 11:16:46 +00008966 if (files)
8967 io_uring_clean_tctx(tctx);
Jens Axboefdaf0832020-10-30 09:37:30 -06008968}
8969
Pavel Begunkov521d6a72021-03-11 23:29:38 +00008970/* should only be called by SQPOLL task */
Pavel Begunkov0e9ddb32021-02-07 22:34:26 +00008971static void io_uring_cancel_sqpoll(struct io_ring_ctx *ctx)
8972{
Jens Axboe37d1e2e2021-02-17 21:03:43 -07008973 struct io_sq_data *sqd = ctx->sq_data;
Pavel Begunkov521d6a72021-03-11 23:29:38 +00008974 struct io_uring_task *tctx = current->io_uring;
Jens Axboefdaf0832020-10-30 09:37:30 -06008975 s64 inflight;
Pavel Begunkov0e9ddb32021-02-07 22:34:26 +00008976 DEFINE_WAIT(wait);
Jens Axboefdaf0832020-10-30 09:37:30 -06008977
Pavel Begunkov521d6a72021-03-11 23:29:38 +00008978 WARN_ON_ONCE(!sqd || ctx->sq_data->thread != current);
8979
Pavel Begunkov0e9ddb32021-02-07 22:34:26 +00008980 atomic_inc(&tctx->in_idle);
8981 do {
8982 /* read completions before cancelations */
8983 inflight = tctx_inflight(tctx);
8984 if (!inflight)
8985 break;
Pavel Begunkov521d6a72021-03-11 23:29:38 +00008986 io_uring_try_cancel_requests(ctx, current, NULL);
Jens Axboefdaf0832020-10-30 09:37:30 -06008987
Pavel Begunkov0e9ddb32021-02-07 22:34:26 +00008988 prepare_to_wait(&tctx->wait, &wait, TASK_UNINTERRUPTIBLE);
8989 /*
8990 * If we've seen completions, retry without waiting. This
8991 * avoids a race where a completion comes in before we did
8992 * prepare_to_wait().
8993 */
8994 if (inflight == tctx_inflight(tctx))
8995 schedule();
8996 finish_wait(&tctx->wait, &wait);
8997 } while (1);
8998 atomic_dec(&tctx->in_idle);
Jens Axboe0f212202020-09-13 13:09:39 -06008999}
9000
Jens Axboe0f212202020-09-13 13:09:39 -06009001/*
9002 * Find any io_uring fd that this task has registered or done IO on, and cancel
9003 * requests.
9004 */
9005void __io_uring_task_cancel(void)
9006{
9007 struct io_uring_task *tctx = current->io_uring;
9008 DEFINE_WAIT(wait);
Jens Axboed8a6df12020-10-15 16:24:45 -06009009 s64 inflight;
Jens Axboe0f212202020-09-13 13:09:39 -06009010
9011 /* make sure overflow events are dropped */
Jens Axboefdaf0832020-10-30 09:37:30 -06009012 atomic_inc(&tctx->in_idle);
Pavel Begunkov5a978dc2021-03-27 09:59:30 +00009013 __io_uring_files_cancel(NULL);
9014
Jens Axboed8a6df12020-10-15 16:24:45 -06009015 do {
Jens Axboe0f212202020-09-13 13:09:39 -06009016 /* read completions before cancelations */
Jens Axboefdaf0832020-10-30 09:37:30 -06009017 inflight = tctx_inflight(tctx);
Jens Axboed8a6df12020-10-15 16:24:45 -06009018 if (!inflight)
9019 break;
Jens Axboe0f212202020-09-13 13:09:39 -06009020 __io_uring_files_cancel(NULL);
9021
9022 prepare_to_wait(&tctx->wait, &wait, TASK_UNINTERRUPTIBLE);
9023
9024 /*
Pavel Begunkova1bb3cd2021-01-26 15:28:26 +00009025 * If we've seen completions, retry without waiting. This
9026 * avoids a race where a completion comes in before we did
9027 * prepare_to_wait().
Jens Axboe0f212202020-09-13 13:09:39 -06009028 */
Pavel Begunkova1bb3cd2021-01-26 15:28:26 +00009029 if (inflight == tctx_inflight(tctx))
9030 schedule();
Pavel Begunkovf57555e2020-12-20 13:21:44 +00009031 finish_wait(&tctx->wait, &wait);
Jens Axboed8a6df12020-10-15 16:24:45 -06009032 } while (1);
Jens Axboe0f212202020-09-13 13:09:39 -06009033
Jens Axboefdaf0832020-10-30 09:37:30 -06009034 atomic_dec(&tctx->in_idle);
Pavel Begunkovde7f1d92021-01-04 20:43:29 +00009035
Pavel Begunkov8452d4a2021-02-27 11:16:46 +00009036 io_uring_clean_tctx(tctx);
9037 /* all current's requests should be gone, we can kill tctx */
9038 __io_uring_free(current);
Pavel Begunkov44e728b2020-06-15 10:24:04 +03009039}
9040
Roman Penyaev6c5c2402019-11-28 12:53:22 +01009041static void *io_uring_validate_mmap_request(struct file *file,
9042 loff_t pgoff, size_t sz)
Jens Axboe2b188cc2019-01-07 10:46:33 -07009043{
Jens Axboe2b188cc2019-01-07 10:46:33 -07009044 struct io_ring_ctx *ctx = file->private_data;
Roman Penyaev6c5c2402019-11-28 12:53:22 +01009045 loff_t offset = pgoff << PAGE_SHIFT;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009046 struct page *page;
9047 void *ptr;
9048
9049 switch (offset) {
9050 case IORING_OFF_SQ_RING:
Hristo Venev75b28af2019-08-26 17:23:46 +00009051 case IORING_OFF_CQ_RING:
9052 ptr = ctx->rings;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009053 break;
9054 case IORING_OFF_SQES:
9055 ptr = ctx->sq_sqes;
9056 break;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009057 default:
Roman Penyaev6c5c2402019-11-28 12:53:22 +01009058 return ERR_PTR(-EINVAL);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009059 }
9060
9061 page = virt_to_head_page(ptr);
Matthew Wilcox (Oracle)a50b8542019-09-23 15:34:25 -07009062 if (sz > page_size(page))
Roman Penyaev6c5c2402019-11-28 12:53:22 +01009063 return ERR_PTR(-EINVAL);
9064
9065 return ptr;
9066}
9067
9068#ifdef CONFIG_MMU
9069
9070static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
9071{
9072 size_t sz = vma->vm_end - vma->vm_start;
9073 unsigned long pfn;
9074 void *ptr;
9075
9076 ptr = io_uring_validate_mmap_request(file, vma->vm_pgoff, sz);
9077 if (IS_ERR(ptr))
9078 return PTR_ERR(ptr);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009079
9080 pfn = virt_to_phys(ptr) >> PAGE_SHIFT;
9081 return remap_pfn_range(vma, vma->vm_start, pfn, sz, vma->vm_page_prot);
9082}
9083
Roman Penyaev6c5c2402019-11-28 12:53:22 +01009084#else /* !CONFIG_MMU */
9085
9086static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
9087{
9088 return vma->vm_flags & (VM_SHARED | VM_MAYSHARE) ? 0 : -EINVAL;
9089}
9090
9091static unsigned int io_uring_nommu_mmap_capabilities(struct file *file)
9092{
9093 return NOMMU_MAP_DIRECT | NOMMU_MAP_READ | NOMMU_MAP_WRITE;
9094}
9095
9096static unsigned long io_uring_nommu_get_unmapped_area(struct file *file,
9097 unsigned long addr, unsigned long len,
9098 unsigned long pgoff, unsigned long flags)
9099{
9100 void *ptr;
9101
9102 ptr = io_uring_validate_mmap_request(file, pgoff, len);
9103 if (IS_ERR(ptr))
9104 return PTR_ERR(ptr);
9105
9106 return (unsigned long) ptr;
9107}
9108
9109#endif /* !CONFIG_MMU */
9110
Pavel Begunkovd9d05212021-01-08 20:57:25 +00009111static int io_sqpoll_wait_sq(struct io_ring_ctx *ctx)
Jens Axboe90554202020-09-03 12:12:41 -06009112{
9113 DEFINE_WAIT(wait);
9114
9115 do {
9116 if (!io_sqring_full(ctx))
9117 break;
Jens Axboe90554202020-09-03 12:12:41 -06009118 prepare_to_wait(&ctx->sqo_sq_wait, &wait, TASK_INTERRUPTIBLE);
9119
9120 if (!io_sqring_full(ctx))
9121 break;
Jens Axboe90554202020-09-03 12:12:41 -06009122 schedule();
9123 } while (!signal_pending(current));
9124
9125 finish_wait(&ctx->sqo_sq_wait, &wait);
Yang Li51993282021-03-09 14:30:41 +08009126 return 0;
Jens Axboe90554202020-09-03 12:12:41 -06009127}
9128
Hao Xuc73ebb62020-11-03 10:54:37 +08009129static int io_get_ext_arg(unsigned flags, const void __user *argp, size_t *argsz,
9130 struct __kernel_timespec __user **ts,
9131 const sigset_t __user **sig)
9132{
9133 struct io_uring_getevents_arg arg;
9134
9135 /*
9136 * If EXT_ARG isn't set, then we have no timespec and the argp pointer
9137 * is just a pointer to the sigset_t.
9138 */
9139 if (!(flags & IORING_ENTER_EXT_ARG)) {
9140 *sig = (const sigset_t __user *) argp;
9141 *ts = NULL;
9142 return 0;
9143 }
9144
9145 /*
9146 * EXT_ARG is set - ensure we agree on the size of it and copy in our
9147 * timespec and sigset_t pointers if good.
9148 */
9149 if (*argsz != sizeof(arg))
9150 return -EINVAL;
9151 if (copy_from_user(&arg, argp, sizeof(arg)))
9152 return -EFAULT;
9153 *sig = u64_to_user_ptr(arg.sigmask);
9154 *argsz = arg.sigmask_sz;
9155 *ts = u64_to_user_ptr(arg.ts);
9156 return 0;
9157}
9158
Jens Axboe2b188cc2019-01-07 10:46:33 -07009159SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
Hao Xuc73ebb62020-11-03 10:54:37 +08009160 u32, min_complete, u32, flags, const void __user *, argp,
9161 size_t, argsz)
Jens Axboe2b188cc2019-01-07 10:46:33 -07009162{
9163 struct io_ring_ctx *ctx;
9164 long ret = -EBADF;
9165 int submitted = 0;
9166 struct fd f;
9167
Jens Axboe4c6e2772020-07-01 11:29:10 -06009168 io_run_task_work();
Jens Axboeb41e9852020-02-17 09:52:41 -07009169
Jens Axboe90554202020-09-03 12:12:41 -06009170 if (flags & ~(IORING_ENTER_GETEVENTS | IORING_ENTER_SQ_WAKEUP |
Hao Xuc73ebb62020-11-03 10:54:37 +08009171 IORING_ENTER_SQ_WAIT | IORING_ENTER_EXT_ARG))
Jens Axboe2b188cc2019-01-07 10:46:33 -07009172 return -EINVAL;
9173
9174 f = fdget(fd);
9175 if (!f.file)
9176 return -EBADF;
9177
9178 ret = -EOPNOTSUPP;
9179 if (f.file->f_op != &io_uring_fops)
9180 goto out_fput;
9181
9182 ret = -ENXIO;
9183 ctx = f.file->private_data;
9184 if (!percpu_ref_tryget(&ctx->refs))
9185 goto out_fput;
9186
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02009187 ret = -EBADFD;
9188 if (ctx->flags & IORING_SETUP_R_DISABLED)
9189 goto out;
9190
Jens Axboe6c271ce2019-01-10 11:22:30 -07009191 /*
9192 * For SQ polling, the thread will do all submissions and completions.
9193 * Just return the requested submit count, and wake the thread if
9194 * we were asked to.
9195 */
Jens Axboeb2a9ead2019-09-12 14:19:16 -06009196 ret = 0;
Jens Axboe6c271ce2019-01-10 11:22:30 -07009197 if (ctx->flags & IORING_SETUP_SQPOLL) {
Pavel Begunkov6c503152021-01-04 20:36:36 +00009198 io_cqring_overflow_flush(ctx, false, NULL, NULL);
Pavel Begunkov89448c42020-12-17 00:24:39 +00009199
Pavel Begunkovd9d05212021-01-08 20:57:25 +00009200 ret = -EOWNERDEAD;
Stefan Metzmacher04147482021-03-07 11:54:29 +01009201 if (unlikely(ctx->sq_data->thread == NULL)) {
9202 goto out;
9203 }
Jens Axboe6c271ce2019-01-10 11:22:30 -07009204 if (flags & IORING_ENTER_SQ_WAKEUP)
Jens Axboe534ca6d2020-09-02 13:52:19 -06009205 wake_up(&ctx->sq_data->wait);
Pavel Begunkovd9d05212021-01-08 20:57:25 +00009206 if (flags & IORING_ENTER_SQ_WAIT) {
9207 ret = io_sqpoll_wait_sq(ctx);
9208 if (ret)
9209 goto out;
9210 }
Jens Axboe6c271ce2019-01-10 11:22:30 -07009211 submitted = to_submit;
Jens Axboeb2a9ead2019-09-12 14:19:16 -06009212 } else if (to_submit) {
Pavel Begunkovbaf186c2021-03-06 11:02:15 +00009213 ret = io_uring_add_task_file(ctx);
Jens Axboe0f212202020-09-13 13:09:39 -06009214 if (unlikely(ret))
9215 goto out;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009216 mutex_lock(&ctx->uring_lock);
Jens Axboe0f212202020-09-13 13:09:39 -06009217 submitted = io_submit_sqes(ctx, to_submit);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009218 mutex_unlock(&ctx->uring_lock);
Pavel Begunkov7c504e652019-12-18 19:53:45 +03009219
9220 if (submitted != to_submit)
9221 goto out;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009222 }
9223 if (flags & IORING_ENTER_GETEVENTS) {
Hao Xuc73ebb62020-11-03 10:54:37 +08009224 const sigset_t __user *sig;
9225 struct __kernel_timespec __user *ts;
9226
9227 ret = io_get_ext_arg(flags, argp, &argsz, &ts, &sig);
9228 if (unlikely(ret))
9229 goto out;
9230
Jens Axboe2b188cc2019-01-07 10:46:33 -07009231 min_complete = min(min_complete, ctx->cq_entries);
9232
Xiaoguang Wang32b22442020-03-11 09:26:09 +08009233 /*
9234 * When SETUP_IOPOLL and SETUP_SQPOLL are both enabled, user
9235 * space applications don't need to do io completion events
9236 * polling again, they can rely on io_sq_thread to do polling
9237 * work, which can reduce cpu usage and uring_lock contention.
9238 */
9239 if (ctx->flags & IORING_SETUP_IOPOLL &&
9240 !(ctx->flags & IORING_SETUP_SQPOLL)) {
Pavel Begunkov7668b922020-07-07 16:36:21 +03009241 ret = io_iopoll_check(ctx, min_complete);
Jens Axboedef596e2019-01-09 08:59:42 -07009242 } else {
Hao Xuc73ebb62020-11-03 10:54:37 +08009243 ret = io_cqring_wait(ctx, min_complete, sig, argsz, ts);
Jens Axboedef596e2019-01-09 08:59:42 -07009244 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07009245 }
9246
Pavel Begunkov7c504e652019-12-18 19:53:45 +03009247out:
Pavel Begunkov6805b322019-10-08 02:18:42 +03009248 percpu_ref_put(&ctx->refs);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009249out_fput:
9250 fdput(f);
9251 return submitted ? submitted : ret;
9252}
9253
Tobias Klauserbebdb652020-02-26 18:38:32 +01009254#ifdef CONFIG_PROC_FS
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +00009255static int io_uring_show_cred(struct seq_file *m, unsigned int id,
9256 const struct cred *cred)
Jens Axboe87ce9552020-01-30 08:25:34 -07009257{
Jens Axboe87ce9552020-01-30 08:25:34 -07009258 struct user_namespace *uns = seq_user_ns(m);
9259 struct group_info *gi;
9260 kernel_cap_t cap;
9261 unsigned __capi;
9262 int g;
9263
9264 seq_printf(m, "%5d\n", id);
9265 seq_put_decimal_ull(m, "\tUid:\t", from_kuid_munged(uns, cred->uid));
9266 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->euid));
9267 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->suid));
9268 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->fsuid));
9269 seq_put_decimal_ull(m, "\n\tGid:\t", from_kgid_munged(uns, cred->gid));
9270 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->egid));
9271 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->sgid));
9272 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->fsgid));
9273 seq_puts(m, "\n\tGroups:\t");
9274 gi = cred->group_info;
9275 for (g = 0; g < gi->ngroups; g++) {
9276 seq_put_decimal_ull(m, g ? " " : "",
9277 from_kgid_munged(uns, gi->gid[g]));
9278 }
9279 seq_puts(m, "\n\tCapEff:\t");
9280 cap = cred->cap_effective;
9281 CAP_FOR_EACH_U32(__capi)
9282 seq_put_hex_ll(m, NULL, cap.cap[CAP_LAST_U32 - __capi], 8);
9283 seq_putc(m, '\n');
9284 return 0;
9285}
9286
9287static void __io_uring_show_fdinfo(struct io_ring_ctx *ctx, struct seq_file *m)
9288{
Joseph Qidbbe9c62020-09-29 09:01:22 -06009289 struct io_sq_data *sq = NULL;
Jens Axboefad8e0d2020-09-28 08:57:48 -06009290 bool has_lock;
Jens Axboe87ce9552020-01-30 08:25:34 -07009291 int i;
9292
Jens Axboefad8e0d2020-09-28 08:57:48 -06009293 /*
9294 * Avoid ABBA deadlock between the seq lock and the io_uring mutex,
9295 * since fdinfo case grabs it in the opposite direction of normal use
9296 * cases. If we fail to get the lock, we just don't iterate any
9297 * structures that could be going away outside the io_uring mutex.
9298 */
9299 has_lock = mutex_trylock(&ctx->uring_lock);
9300
Jens Axboe5f3f26f2021-02-25 10:17:46 -07009301 if (has_lock && (ctx->flags & IORING_SETUP_SQPOLL)) {
Joseph Qidbbe9c62020-09-29 09:01:22 -06009302 sq = ctx->sq_data;
Jens Axboe5f3f26f2021-02-25 10:17:46 -07009303 if (!sq->thread)
9304 sq = NULL;
9305 }
Joseph Qidbbe9c62020-09-29 09:01:22 -06009306
9307 seq_printf(m, "SqThread:\t%d\n", sq ? task_pid_nr(sq->thread) : -1);
9308 seq_printf(m, "SqThreadCpu:\t%d\n", sq ? task_cpu(sq->thread) : -1);
Jens Axboe87ce9552020-01-30 08:25:34 -07009309 seq_printf(m, "UserFiles:\t%u\n", ctx->nr_user_files);
Jens Axboefad8e0d2020-09-28 08:57:48 -06009310 for (i = 0; has_lock && i < ctx->nr_user_files; i++) {
Pavel Begunkovea64ec022021-02-04 13:52:07 +00009311 struct file *f = *io_fixed_file_slot(ctx->file_data, i);
Jens Axboe87ce9552020-01-30 08:25:34 -07009312
Jens Axboe87ce9552020-01-30 08:25:34 -07009313 if (f)
9314 seq_printf(m, "%5u: %s\n", i, file_dentry(f)->d_iname);
9315 else
9316 seq_printf(m, "%5u: <none>\n", i);
9317 }
9318 seq_printf(m, "UserBufs:\t%u\n", ctx->nr_user_bufs);
Jens Axboefad8e0d2020-09-28 08:57:48 -06009319 for (i = 0; has_lock && i < ctx->nr_user_bufs; i++) {
Jens Axboe87ce9552020-01-30 08:25:34 -07009320 struct io_mapped_ubuf *buf = &ctx->user_bufs[i];
9321
9322 seq_printf(m, "%5u: 0x%llx/%u\n", i, buf->ubuf,
9323 (unsigned int) buf->len);
9324 }
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +00009325 if (has_lock && !xa_empty(&ctx->personalities)) {
9326 unsigned long index;
9327 const struct cred *cred;
9328
Jens Axboe87ce9552020-01-30 08:25:34 -07009329 seq_printf(m, "Personalities:\n");
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +00009330 xa_for_each(&ctx->personalities, index, cred)
9331 io_uring_show_cred(m, index, cred);
Jens Axboe87ce9552020-01-30 08:25:34 -07009332 }
Jens Axboed7718a92020-02-14 22:23:12 -07009333 seq_printf(m, "PollList:\n");
9334 spin_lock_irq(&ctx->completion_lock);
9335 for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
9336 struct hlist_head *list = &ctx->cancel_hash[i];
9337 struct io_kiocb *req;
9338
9339 hlist_for_each_entry(req, list, hash_node)
9340 seq_printf(m, " op=%d, task_works=%d\n", req->opcode,
9341 req->task->task_works != NULL);
9342 }
9343 spin_unlock_irq(&ctx->completion_lock);
Jens Axboefad8e0d2020-09-28 08:57:48 -06009344 if (has_lock)
9345 mutex_unlock(&ctx->uring_lock);
Jens Axboe87ce9552020-01-30 08:25:34 -07009346}
9347
9348static void io_uring_show_fdinfo(struct seq_file *m, struct file *f)
9349{
9350 struct io_ring_ctx *ctx = f->private_data;
9351
9352 if (percpu_ref_tryget(&ctx->refs)) {
9353 __io_uring_show_fdinfo(ctx, m);
9354 percpu_ref_put(&ctx->refs);
9355 }
9356}
Tobias Klauserbebdb652020-02-26 18:38:32 +01009357#endif
Jens Axboe87ce9552020-01-30 08:25:34 -07009358
Jens Axboe2b188cc2019-01-07 10:46:33 -07009359static const struct file_operations io_uring_fops = {
9360 .release = io_uring_release,
9361 .mmap = io_uring_mmap,
Roman Penyaev6c5c2402019-11-28 12:53:22 +01009362#ifndef CONFIG_MMU
9363 .get_unmapped_area = io_uring_nommu_get_unmapped_area,
9364 .mmap_capabilities = io_uring_nommu_mmap_capabilities,
9365#endif
Jens Axboe2b188cc2019-01-07 10:46:33 -07009366 .poll = io_uring_poll,
9367 .fasync = io_uring_fasync,
Tobias Klauserbebdb652020-02-26 18:38:32 +01009368#ifdef CONFIG_PROC_FS
Jens Axboe87ce9552020-01-30 08:25:34 -07009369 .show_fdinfo = io_uring_show_fdinfo,
Tobias Klauserbebdb652020-02-26 18:38:32 +01009370#endif
Jens Axboe2b188cc2019-01-07 10:46:33 -07009371};
9372
9373static int io_allocate_scq_urings(struct io_ring_ctx *ctx,
9374 struct io_uring_params *p)
9375{
Hristo Venev75b28af2019-08-26 17:23:46 +00009376 struct io_rings *rings;
9377 size_t size, sq_array_offset;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009378
Jens Axboebd740482020-08-05 12:58:23 -06009379 /* make sure these are sane, as we already accounted them */
9380 ctx->sq_entries = p->sq_entries;
9381 ctx->cq_entries = p->cq_entries;
9382
Hristo Venev75b28af2019-08-26 17:23:46 +00009383 size = rings_size(p->sq_entries, p->cq_entries, &sq_array_offset);
9384 if (size == SIZE_MAX)
9385 return -EOVERFLOW;
9386
9387 rings = io_mem_alloc(size);
9388 if (!rings)
Jens Axboe2b188cc2019-01-07 10:46:33 -07009389 return -ENOMEM;
9390
Hristo Venev75b28af2019-08-26 17:23:46 +00009391 ctx->rings = rings;
9392 ctx->sq_array = (u32 *)((char *)rings + sq_array_offset);
9393 rings->sq_ring_mask = p->sq_entries - 1;
9394 rings->cq_ring_mask = p->cq_entries - 1;
9395 rings->sq_ring_entries = p->sq_entries;
9396 rings->cq_ring_entries = p->cq_entries;
9397 ctx->sq_mask = rings->sq_ring_mask;
9398 ctx->cq_mask = rings->cq_ring_mask;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009399
9400 size = array_size(sizeof(struct io_uring_sqe), p->sq_entries);
Jens Axboeeb065d32019-11-20 09:26:29 -07009401 if (size == SIZE_MAX) {
9402 io_mem_free(ctx->rings);
9403 ctx->rings = NULL;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009404 return -EOVERFLOW;
Jens Axboeeb065d32019-11-20 09:26:29 -07009405 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07009406
9407 ctx->sq_sqes = io_mem_alloc(size);
Jens Axboeeb065d32019-11-20 09:26:29 -07009408 if (!ctx->sq_sqes) {
9409 io_mem_free(ctx->rings);
9410 ctx->rings = NULL;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009411 return -ENOMEM;
Jens Axboeeb065d32019-11-20 09:26:29 -07009412 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07009413
Jens Axboe2b188cc2019-01-07 10:46:33 -07009414 return 0;
9415}
9416
Pavel Begunkov9faadcc2020-12-21 18:34:05 +00009417static int io_uring_install_fd(struct io_ring_ctx *ctx, struct file *file)
9418{
9419 int ret, fd;
9420
9421 fd = get_unused_fd_flags(O_RDWR | O_CLOEXEC);
9422 if (fd < 0)
9423 return fd;
9424
Pavel Begunkovbaf186c2021-03-06 11:02:15 +00009425 ret = io_uring_add_task_file(ctx);
Pavel Begunkov9faadcc2020-12-21 18:34:05 +00009426 if (ret) {
9427 put_unused_fd(fd);
9428 return ret;
9429 }
9430 fd_install(fd, file);
9431 return fd;
9432}
9433
Jens Axboe2b188cc2019-01-07 10:46:33 -07009434/*
9435 * Allocate an anonymous fd, this is what constitutes the application
9436 * visible backing of an io_uring instance. The application mmaps this
9437 * fd to gain access to the SQ/CQ ring details. If UNIX sockets are enabled,
9438 * we have to tie this fd to a socket for file garbage collection purposes.
9439 */
Pavel Begunkov9faadcc2020-12-21 18:34:05 +00009440static struct file *io_uring_get_file(struct io_ring_ctx *ctx)
Jens Axboe2b188cc2019-01-07 10:46:33 -07009441{
9442 struct file *file;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009443#if defined(CONFIG_UNIX)
Pavel Begunkov9faadcc2020-12-21 18:34:05 +00009444 int ret;
9445
Jens Axboe2b188cc2019-01-07 10:46:33 -07009446 ret = sock_create_kern(&init_net, PF_UNIX, SOCK_RAW, IPPROTO_IP,
9447 &ctx->ring_sock);
9448 if (ret)
Pavel Begunkov9faadcc2020-12-21 18:34:05 +00009449 return ERR_PTR(ret);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009450#endif
9451
Jens Axboe2b188cc2019-01-07 10:46:33 -07009452 file = anon_inode_getfile("[io_uring]", &io_uring_fops, ctx,
9453 O_RDWR | O_CLOEXEC);
Pavel Begunkov9faadcc2020-12-21 18:34:05 +00009454#if defined(CONFIG_UNIX)
Jens Axboe2b188cc2019-01-07 10:46:33 -07009455 if (IS_ERR(file)) {
Pavel Begunkov9faadcc2020-12-21 18:34:05 +00009456 sock_release(ctx->ring_sock);
9457 ctx->ring_sock = NULL;
9458 } else {
9459 ctx->ring_sock->file = file;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009460 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07009461#endif
Pavel Begunkov9faadcc2020-12-21 18:34:05 +00009462 return file;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009463}
9464
Xiaoguang Wang7f136572020-05-05 16:28:53 +08009465static int io_uring_create(unsigned entries, struct io_uring_params *p,
9466 struct io_uring_params __user *params)
Jens Axboe2b188cc2019-01-07 10:46:33 -07009467{
Jens Axboe2b188cc2019-01-07 10:46:33 -07009468 struct io_ring_ctx *ctx;
Pavel Begunkov9faadcc2020-12-21 18:34:05 +00009469 struct file *file;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009470 int ret;
9471
Jens Axboe8110c1a2019-12-28 15:39:54 -07009472 if (!entries)
Jens Axboe2b188cc2019-01-07 10:46:33 -07009473 return -EINVAL;
Jens Axboe8110c1a2019-12-28 15:39:54 -07009474 if (entries > IORING_MAX_ENTRIES) {
9475 if (!(p->flags & IORING_SETUP_CLAMP))
9476 return -EINVAL;
9477 entries = IORING_MAX_ENTRIES;
9478 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07009479
9480 /*
9481 * Use twice as many entries for the CQ ring. It's possible for the
9482 * application to drive a higher depth than the size of the SQ ring,
9483 * since the sqes are only used at submission time. This allows for
Jens Axboe33a107f2019-10-04 12:10:03 -06009484 * some flexibility in overcommitting a bit. If the application has
9485 * set IORING_SETUP_CQSIZE, it will have passed in the desired number
9486 * of CQ ring entries manually.
Jens Axboe2b188cc2019-01-07 10:46:33 -07009487 */
9488 p->sq_entries = roundup_pow_of_two(entries);
Jens Axboe33a107f2019-10-04 12:10:03 -06009489 if (p->flags & IORING_SETUP_CQSIZE) {
9490 /*
9491 * If IORING_SETUP_CQSIZE is set, we do the same roundup
9492 * to a power-of-two, if it isn't already. We do NOT impose
9493 * any cq vs sq ring sizing.
9494 */
Joseph Qieb2667b32020-11-24 15:03:03 +08009495 if (!p->cq_entries)
Jens Axboe33a107f2019-10-04 12:10:03 -06009496 return -EINVAL;
Jens Axboe8110c1a2019-12-28 15:39:54 -07009497 if (p->cq_entries > IORING_MAX_CQ_ENTRIES) {
9498 if (!(p->flags & IORING_SETUP_CLAMP))
9499 return -EINVAL;
9500 p->cq_entries = IORING_MAX_CQ_ENTRIES;
9501 }
Joseph Qieb2667b32020-11-24 15:03:03 +08009502 p->cq_entries = roundup_pow_of_two(p->cq_entries);
9503 if (p->cq_entries < p->sq_entries)
9504 return -EINVAL;
Jens Axboe33a107f2019-10-04 12:10:03 -06009505 } else {
9506 p->cq_entries = 2 * p->sq_entries;
9507 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07009508
Jens Axboe2b188cc2019-01-07 10:46:33 -07009509 ctx = io_ring_ctx_alloc(p);
Jens Axboe62e398b2021-02-21 16:19:37 -07009510 if (!ctx)
Jens Axboe2b188cc2019-01-07 10:46:33 -07009511 return -ENOMEM;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009512 ctx->compat = in_compat_syscall();
Jens Axboe62e398b2021-02-21 16:19:37 -07009513 if (!capable(CAP_IPC_LOCK))
9514 ctx->user = get_uid(current_user());
Jens Axboe2aede0e2020-09-14 10:45:53 -06009515
9516 /*
9517 * This is just grabbed for accounting purposes. When a process exits,
9518 * the mm is exited and dropped before the files, hence we need to hang
9519 * on to this mm purely for the purposes of being able to unaccount
9520 * memory (locked/pinned vm). It's not used for anything else.
9521 */
Jens Axboe6b7898e2020-08-25 07:58:00 -06009522 mmgrab(current->mm);
Jens Axboe2aede0e2020-09-14 10:45:53 -06009523 ctx->mm_account = current->mm;
Jens Axboe6b7898e2020-08-25 07:58:00 -06009524
Jens Axboe2b188cc2019-01-07 10:46:33 -07009525 ret = io_allocate_scq_urings(ctx, p);
9526 if (ret)
9527 goto err;
9528
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02009529 ret = io_sq_offload_create(ctx, p);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009530 if (ret)
9531 goto err;
9532
Jens Axboe2b188cc2019-01-07 10:46:33 -07009533 memset(&p->sq_off, 0, sizeof(p->sq_off));
Hristo Venev75b28af2019-08-26 17:23:46 +00009534 p->sq_off.head = offsetof(struct io_rings, sq.head);
9535 p->sq_off.tail = offsetof(struct io_rings, sq.tail);
9536 p->sq_off.ring_mask = offsetof(struct io_rings, sq_ring_mask);
9537 p->sq_off.ring_entries = offsetof(struct io_rings, sq_ring_entries);
9538 p->sq_off.flags = offsetof(struct io_rings, sq_flags);
9539 p->sq_off.dropped = offsetof(struct io_rings, sq_dropped);
9540 p->sq_off.array = (char *)ctx->sq_array - (char *)ctx->rings;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009541
9542 memset(&p->cq_off, 0, sizeof(p->cq_off));
Hristo Venev75b28af2019-08-26 17:23:46 +00009543 p->cq_off.head = offsetof(struct io_rings, cq.head);
9544 p->cq_off.tail = offsetof(struct io_rings, cq.tail);
9545 p->cq_off.ring_mask = offsetof(struct io_rings, cq_ring_mask);
9546 p->cq_off.ring_entries = offsetof(struct io_rings, cq_ring_entries);
9547 p->cq_off.overflow = offsetof(struct io_rings, cq_overflow);
9548 p->cq_off.cqes = offsetof(struct io_rings, cqes);
Stefano Garzarella0d9b5b32020-05-15 18:38:04 +02009549 p->cq_off.flags = offsetof(struct io_rings, cq_flags);
Jens Axboeac90f242019-09-06 10:26:21 -06009550
Xiaoguang Wang7f136572020-05-05 16:28:53 +08009551 p->features = IORING_FEAT_SINGLE_MMAP | IORING_FEAT_NODROP |
9552 IORING_FEAT_SUBMIT_STABLE | IORING_FEAT_RW_CUR_POS |
Jiufei Xue5769a352020-06-17 17:53:55 +08009553 IORING_FEAT_CUR_PERSONALITY | IORING_FEAT_FAST_POLL |
Hao Xuc73ebb62020-11-03 10:54:37 +08009554 IORING_FEAT_POLL_32BITS | IORING_FEAT_SQPOLL_NONFIXED |
Jens Axboe1c0aa1f2021-02-20 11:55:28 -07009555 IORING_FEAT_EXT_ARG | IORING_FEAT_NATIVE_WORKERS;
Xiaoguang Wang7f136572020-05-05 16:28:53 +08009556
9557 if (copy_to_user(params, p, sizeof(*p))) {
9558 ret = -EFAULT;
9559 goto err;
9560 }
Jens Axboed1719f72020-07-30 13:43:53 -06009561
Pavel Begunkov9faadcc2020-12-21 18:34:05 +00009562 file = io_uring_get_file(ctx);
9563 if (IS_ERR(file)) {
9564 ret = PTR_ERR(file);
9565 goto err;
9566 }
9567
Jens Axboed1719f72020-07-30 13:43:53 -06009568 /*
Jens Axboe044c1ab2019-10-28 09:15:33 -06009569 * Install ring fd as the very last thing, so we don't risk someone
9570 * having closed it before we finish setup
9571 */
Pavel Begunkov9faadcc2020-12-21 18:34:05 +00009572 ret = io_uring_install_fd(ctx, file);
9573 if (ret < 0) {
9574 /* fput will clean it up */
9575 fput(file);
9576 return ret;
9577 }
Jens Axboe044c1ab2019-10-28 09:15:33 -06009578
Dmitrii Dolgovc826bd72019-10-15 19:02:01 +02009579 trace_io_uring_create(ret, ctx, p->sq_entries, p->cq_entries, p->flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009580 return ret;
9581err:
9582 io_ring_ctx_wait_and_kill(ctx);
9583 return ret;
9584}
9585
9586/*
9587 * Sets up an aio uring context, and returns the fd. Applications asks for a
9588 * ring size, we return the actual sq/cq ring sizes (among other things) in the
9589 * params structure passed in.
9590 */
9591static long io_uring_setup(u32 entries, struct io_uring_params __user *params)
9592{
9593 struct io_uring_params p;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009594 int i;
9595
9596 if (copy_from_user(&p, params, sizeof(p)))
9597 return -EFAULT;
9598 for (i = 0; i < ARRAY_SIZE(p.resv); i++) {
9599 if (p.resv[i])
9600 return -EINVAL;
9601 }
9602
Jens Axboe6c271ce2019-01-10 11:22:30 -07009603 if (p.flags & ~(IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL |
Jens Axboe8110c1a2019-12-28 15:39:54 -07009604 IORING_SETUP_SQ_AFF | IORING_SETUP_CQSIZE |
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02009605 IORING_SETUP_CLAMP | IORING_SETUP_ATTACH_WQ |
9606 IORING_SETUP_R_DISABLED))
Jens Axboe2b188cc2019-01-07 10:46:33 -07009607 return -EINVAL;
9608
Xiaoguang Wang7f136572020-05-05 16:28:53 +08009609 return io_uring_create(entries, &p, params);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009610}
9611
9612SYSCALL_DEFINE2(io_uring_setup, u32, entries,
9613 struct io_uring_params __user *, params)
9614{
9615 return io_uring_setup(entries, params);
9616}
9617
Jens Axboe66f4af92020-01-16 15:36:52 -07009618static int io_probe(struct io_ring_ctx *ctx, void __user *arg, unsigned nr_args)
9619{
9620 struct io_uring_probe *p;
9621 size_t size;
9622 int i, ret;
9623
9624 size = struct_size(p, ops, nr_args);
9625 if (size == SIZE_MAX)
9626 return -EOVERFLOW;
9627 p = kzalloc(size, GFP_KERNEL);
9628 if (!p)
9629 return -ENOMEM;
9630
9631 ret = -EFAULT;
9632 if (copy_from_user(p, arg, size))
9633 goto out;
9634 ret = -EINVAL;
9635 if (memchr_inv(p, 0, size))
9636 goto out;
9637
9638 p->last_op = IORING_OP_LAST - 1;
9639 if (nr_args > IORING_OP_LAST)
9640 nr_args = IORING_OP_LAST;
9641
9642 for (i = 0; i < nr_args; i++) {
9643 p->ops[i].op = i;
9644 if (!io_op_defs[i].not_supported)
9645 p->ops[i].flags = IO_URING_OP_SUPPORTED;
9646 }
9647 p->ops_len = i;
9648
9649 ret = 0;
9650 if (copy_to_user(arg, p, size))
9651 ret = -EFAULT;
9652out:
9653 kfree(p);
9654 return ret;
9655}
9656
Jens Axboe071698e2020-01-28 10:04:42 -07009657static int io_register_personality(struct io_ring_ctx *ctx)
9658{
Jens Axboe4379bf82021-02-15 13:40:22 -07009659 const struct cred *creds;
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +00009660 u32 id;
Jens Axboe1e6fa522020-10-15 08:46:24 -06009661 int ret;
Jens Axboe071698e2020-01-28 10:04:42 -07009662
Jens Axboe4379bf82021-02-15 13:40:22 -07009663 creds = get_current_cred();
Jens Axboe1e6fa522020-10-15 08:46:24 -06009664
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +00009665 ret = xa_alloc_cyclic(&ctx->personalities, &id, (void *)creds,
9666 XA_LIMIT(0, USHRT_MAX), &ctx->pers_next, GFP_KERNEL);
9667 if (!ret)
9668 return id;
9669 put_cred(creds);
Jens Axboe1e6fa522020-10-15 08:46:24 -06009670 return ret;
Jens Axboe071698e2020-01-28 10:04:42 -07009671}
9672
Stefano Garzarella21b55db2020-08-27 16:58:30 +02009673static int io_register_restrictions(struct io_ring_ctx *ctx, void __user *arg,
9674 unsigned int nr_args)
9675{
9676 struct io_uring_restriction *res;
9677 size_t size;
9678 int i, ret;
9679
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02009680 /* Restrictions allowed only if rings started disabled */
9681 if (!(ctx->flags & IORING_SETUP_R_DISABLED))
9682 return -EBADFD;
9683
Stefano Garzarella21b55db2020-08-27 16:58:30 +02009684 /* We allow only a single restrictions registration */
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02009685 if (ctx->restrictions.registered)
Stefano Garzarella21b55db2020-08-27 16:58:30 +02009686 return -EBUSY;
9687
9688 if (!arg || nr_args > IORING_MAX_RESTRICTIONS)
9689 return -EINVAL;
9690
9691 size = array_size(nr_args, sizeof(*res));
9692 if (size == SIZE_MAX)
9693 return -EOVERFLOW;
9694
9695 res = memdup_user(arg, size);
9696 if (IS_ERR(res))
9697 return PTR_ERR(res);
9698
9699 ret = 0;
9700
9701 for (i = 0; i < nr_args; i++) {
9702 switch (res[i].opcode) {
9703 case IORING_RESTRICTION_REGISTER_OP:
9704 if (res[i].register_op >= IORING_REGISTER_LAST) {
9705 ret = -EINVAL;
9706 goto out;
9707 }
9708
9709 __set_bit(res[i].register_op,
9710 ctx->restrictions.register_op);
9711 break;
9712 case IORING_RESTRICTION_SQE_OP:
9713 if (res[i].sqe_op >= IORING_OP_LAST) {
9714 ret = -EINVAL;
9715 goto out;
9716 }
9717
9718 __set_bit(res[i].sqe_op, ctx->restrictions.sqe_op);
9719 break;
9720 case IORING_RESTRICTION_SQE_FLAGS_ALLOWED:
9721 ctx->restrictions.sqe_flags_allowed = res[i].sqe_flags;
9722 break;
9723 case IORING_RESTRICTION_SQE_FLAGS_REQUIRED:
9724 ctx->restrictions.sqe_flags_required = res[i].sqe_flags;
9725 break;
9726 default:
9727 ret = -EINVAL;
9728 goto out;
9729 }
9730 }
9731
9732out:
9733 /* Reset all restrictions if an error happened */
9734 if (ret != 0)
9735 memset(&ctx->restrictions, 0, sizeof(ctx->restrictions));
9736 else
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02009737 ctx->restrictions.registered = true;
Stefano Garzarella21b55db2020-08-27 16:58:30 +02009738
9739 kfree(res);
9740 return ret;
9741}
9742
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02009743static int io_register_enable_rings(struct io_ring_ctx *ctx)
9744{
9745 if (!(ctx->flags & IORING_SETUP_R_DISABLED))
9746 return -EBADFD;
9747
9748 if (ctx->restrictions.registered)
9749 ctx->restricted = 1;
9750
Pavel Begunkov0298ef92021-03-08 13:20:57 +00009751 ctx->flags &= ~IORING_SETUP_R_DISABLED;
9752 if (ctx->sq_data && wq_has_sleeper(&ctx->sq_data->wait))
9753 wake_up(&ctx->sq_data->wait);
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02009754 return 0;
9755}
9756
Jens Axboe071698e2020-01-28 10:04:42 -07009757static bool io_register_op_must_quiesce(int op)
9758{
9759 switch (op) {
9760 case IORING_UNREGISTER_FILES:
9761 case IORING_REGISTER_FILES_UPDATE:
9762 case IORING_REGISTER_PROBE:
9763 case IORING_REGISTER_PERSONALITY:
9764 case IORING_UNREGISTER_PERSONALITY:
9765 return false;
9766 default:
9767 return true;
9768 }
9769}
9770
Jens Axboeedafcce2019-01-09 09:16:05 -07009771static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
9772 void __user *arg, unsigned nr_args)
Jens Axboeb19062a2019-04-15 10:49:38 -06009773 __releases(ctx->uring_lock)
9774 __acquires(ctx->uring_lock)
Jens Axboeedafcce2019-01-09 09:16:05 -07009775{
9776 int ret;
9777
Jens Axboe35fa71a2019-04-22 10:23:23 -06009778 /*
9779 * We're inside the ring mutex, if the ref is already dying, then
9780 * someone else killed the ctx or is already going through
9781 * io_uring_register().
9782 */
9783 if (percpu_ref_is_dying(&ctx->refs))
9784 return -ENXIO;
9785
Jens Axboe071698e2020-01-28 10:04:42 -07009786 if (io_register_op_must_quiesce(opcode)) {
Jens Axboe05f3fb32019-12-09 11:22:50 -07009787 percpu_ref_kill(&ctx->refs);
Jens Axboeb19062a2019-04-15 10:49:38 -06009788
Jens Axboe05f3fb32019-12-09 11:22:50 -07009789 /*
9790 * Drop uring mutex before waiting for references to exit. If
9791 * another thread is currently inside io_uring_enter() it might
9792 * need to grab the uring_lock to make progress. If we hold it
9793 * here across the drain wait, then we can deadlock. It's safe
9794 * to drop the mutex here, since no new references will come in
9795 * after we've killed the percpu ref.
9796 */
9797 mutex_unlock(&ctx->uring_lock);
Jens Axboeaf9c1a42020-09-24 13:32:18 -06009798 do {
9799 ret = wait_for_completion_interruptible(&ctx->ref_comp);
9800 if (!ret)
9801 break;
Jens Axboeed6930c2020-10-08 19:09:46 -06009802 ret = io_run_task_work_sig();
9803 if (ret < 0)
9804 break;
Jens Axboeaf9c1a42020-09-24 13:32:18 -06009805 } while (1);
9806
Jens Axboe05f3fb32019-12-09 11:22:50 -07009807 mutex_lock(&ctx->uring_lock);
Jens Axboeaf9c1a42020-09-24 13:32:18 -06009808
Jens Axboec1503682020-01-08 08:26:07 -07009809 if (ret) {
9810 percpu_ref_resurrect(&ctx->refs);
Stefano Garzarella21b55db2020-08-27 16:58:30 +02009811 goto out_quiesce;
9812 }
9813 }
9814
9815 if (ctx->restricted) {
9816 if (opcode >= IORING_REGISTER_LAST) {
9817 ret = -EINVAL;
9818 goto out;
9819 }
9820
9821 if (!test_bit(opcode, ctx->restrictions.register_op)) {
9822 ret = -EACCES;
Jens Axboec1503682020-01-08 08:26:07 -07009823 goto out;
9824 }
Jens Axboe05f3fb32019-12-09 11:22:50 -07009825 }
Jens Axboeedafcce2019-01-09 09:16:05 -07009826
9827 switch (opcode) {
9828 case IORING_REGISTER_BUFFERS:
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08009829 ret = io_sqe_buffers_register(ctx, arg, nr_args);
Jens Axboeedafcce2019-01-09 09:16:05 -07009830 break;
9831 case IORING_UNREGISTER_BUFFERS:
9832 ret = -EINVAL;
9833 if (arg || nr_args)
9834 break;
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08009835 ret = io_sqe_buffers_unregister(ctx);
Jens Axboeedafcce2019-01-09 09:16:05 -07009836 break;
Jens Axboe6b063142019-01-10 22:13:58 -07009837 case IORING_REGISTER_FILES:
9838 ret = io_sqe_files_register(ctx, arg, nr_args);
9839 break;
9840 case IORING_UNREGISTER_FILES:
9841 ret = -EINVAL;
9842 if (arg || nr_args)
9843 break;
9844 ret = io_sqe_files_unregister(ctx);
9845 break;
Jens Axboec3a31e62019-10-03 13:59:56 -06009846 case IORING_REGISTER_FILES_UPDATE:
9847 ret = io_sqe_files_update(ctx, arg, nr_args);
9848 break;
Jens Axboe9b402842019-04-11 11:45:41 -06009849 case IORING_REGISTER_EVENTFD:
Jens Axboef2842ab2020-01-08 11:04:00 -07009850 case IORING_REGISTER_EVENTFD_ASYNC:
Jens Axboe9b402842019-04-11 11:45:41 -06009851 ret = -EINVAL;
9852 if (nr_args != 1)
9853 break;
9854 ret = io_eventfd_register(ctx, arg);
Jens Axboef2842ab2020-01-08 11:04:00 -07009855 if (ret)
9856 break;
9857 if (opcode == IORING_REGISTER_EVENTFD_ASYNC)
9858 ctx->eventfd_async = 1;
9859 else
9860 ctx->eventfd_async = 0;
Jens Axboe9b402842019-04-11 11:45:41 -06009861 break;
9862 case IORING_UNREGISTER_EVENTFD:
9863 ret = -EINVAL;
9864 if (arg || nr_args)
9865 break;
9866 ret = io_eventfd_unregister(ctx);
9867 break;
Jens Axboe66f4af92020-01-16 15:36:52 -07009868 case IORING_REGISTER_PROBE:
9869 ret = -EINVAL;
9870 if (!arg || nr_args > 256)
9871 break;
9872 ret = io_probe(ctx, arg, nr_args);
9873 break;
Jens Axboe071698e2020-01-28 10:04:42 -07009874 case IORING_REGISTER_PERSONALITY:
9875 ret = -EINVAL;
9876 if (arg || nr_args)
9877 break;
9878 ret = io_register_personality(ctx);
9879 break;
9880 case IORING_UNREGISTER_PERSONALITY:
9881 ret = -EINVAL;
9882 if (arg)
9883 break;
9884 ret = io_unregister_personality(ctx, nr_args);
9885 break;
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02009886 case IORING_REGISTER_ENABLE_RINGS:
9887 ret = -EINVAL;
9888 if (arg || nr_args)
9889 break;
9890 ret = io_register_enable_rings(ctx);
9891 break;
Stefano Garzarella21b55db2020-08-27 16:58:30 +02009892 case IORING_REGISTER_RESTRICTIONS:
9893 ret = io_register_restrictions(ctx, arg, nr_args);
9894 break;
Jens Axboeedafcce2019-01-09 09:16:05 -07009895 default:
9896 ret = -EINVAL;
9897 break;
9898 }
9899
Stefano Garzarella21b55db2020-08-27 16:58:30 +02009900out:
Jens Axboe071698e2020-01-28 10:04:42 -07009901 if (io_register_op_must_quiesce(opcode)) {
Jens Axboe05f3fb32019-12-09 11:22:50 -07009902 /* bring the ctx back to life */
Jens Axboe05f3fb32019-12-09 11:22:50 -07009903 percpu_ref_reinit(&ctx->refs);
Stefano Garzarella21b55db2020-08-27 16:58:30 +02009904out_quiesce:
Jens Axboe0f158b42020-05-14 17:18:39 -06009905 reinit_completion(&ctx->ref_comp);
Jens Axboe05f3fb32019-12-09 11:22:50 -07009906 }
Jens Axboeedafcce2019-01-09 09:16:05 -07009907 return ret;
9908}
9909
9910SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode,
9911 void __user *, arg, unsigned int, nr_args)
9912{
9913 struct io_ring_ctx *ctx;
9914 long ret = -EBADF;
9915 struct fd f;
9916
9917 f = fdget(fd);
9918 if (!f.file)
9919 return -EBADF;
9920
9921 ret = -EOPNOTSUPP;
9922 if (f.file->f_op != &io_uring_fops)
9923 goto out_fput;
9924
9925 ctx = f.file->private_data;
9926
Pavel Begunkovb6c23dd2021-02-20 15:17:18 +00009927 io_run_task_work();
9928
Jens Axboeedafcce2019-01-09 09:16:05 -07009929 mutex_lock(&ctx->uring_lock);
9930 ret = __io_uring_register(ctx, opcode, arg, nr_args);
9931 mutex_unlock(&ctx->uring_lock);
Dmitrii Dolgovc826bd72019-10-15 19:02:01 +02009932 trace_io_uring_register(ctx, opcode, ctx->nr_user_files, ctx->nr_user_bufs,
9933 ctx->cq_ev_fd != NULL, ret);
Jens Axboeedafcce2019-01-09 09:16:05 -07009934out_fput:
9935 fdput(f);
9936 return ret;
9937}
9938
Jens Axboe2b188cc2019-01-07 10:46:33 -07009939static int __init io_uring_init(void)
9940{
Stefan Metzmacherd7f62e82020-01-29 14:39:41 +01009941#define __BUILD_BUG_VERIFY_ELEMENT(stype, eoffset, etype, ename) do { \
9942 BUILD_BUG_ON(offsetof(stype, ename) != eoffset); \
9943 BUILD_BUG_ON(sizeof(etype) != sizeof_field(stype, ename)); \
9944} while (0)
9945
9946#define BUILD_BUG_SQE_ELEM(eoffset, etype, ename) \
9947 __BUILD_BUG_VERIFY_ELEMENT(struct io_uring_sqe, eoffset, etype, ename)
9948 BUILD_BUG_ON(sizeof(struct io_uring_sqe) != 64);
9949 BUILD_BUG_SQE_ELEM(0, __u8, opcode);
9950 BUILD_BUG_SQE_ELEM(1, __u8, flags);
9951 BUILD_BUG_SQE_ELEM(2, __u16, ioprio);
9952 BUILD_BUG_SQE_ELEM(4, __s32, fd);
9953 BUILD_BUG_SQE_ELEM(8, __u64, off);
9954 BUILD_BUG_SQE_ELEM(8, __u64, addr2);
9955 BUILD_BUG_SQE_ELEM(16, __u64, addr);
Pavel Begunkov7d67af22020-02-24 11:32:45 +03009956 BUILD_BUG_SQE_ELEM(16, __u64, splice_off_in);
Stefan Metzmacherd7f62e82020-01-29 14:39:41 +01009957 BUILD_BUG_SQE_ELEM(24, __u32, len);
9958 BUILD_BUG_SQE_ELEM(28, __kernel_rwf_t, rw_flags);
9959 BUILD_BUG_SQE_ELEM(28, /* compat */ int, rw_flags);
9960 BUILD_BUG_SQE_ELEM(28, /* compat */ __u32, rw_flags);
9961 BUILD_BUG_SQE_ELEM(28, __u32, fsync_flags);
Jiufei Xue5769a352020-06-17 17:53:55 +08009962 BUILD_BUG_SQE_ELEM(28, /* compat */ __u16, poll_events);
9963 BUILD_BUG_SQE_ELEM(28, __u32, poll32_events);
Stefan Metzmacherd7f62e82020-01-29 14:39:41 +01009964 BUILD_BUG_SQE_ELEM(28, __u32, sync_range_flags);
9965 BUILD_BUG_SQE_ELEM(28, __u32, msg_flags);
9966 BUILD_BUG_SQE_ELEM(28, __u32, timeout_flags);
9967 BUILD_BUG_SQE_ELEM(28, __u32, accept_flags);
9968 BUILD_BUG_SQE_ELEM(28, __u32, cancel_flags);
9969 BUILD_BUG_SQE_ELEM(28, __u32, open_flags);
9970 BUILD_BUG_SQE_ELEM(28, __u32, statx_flags);
9971 BUILD_BUG_SQE_ELEM(28, __u32, fadvise_advice);
Pavel Begunkov7d67af22020-02-24 11:32:45 +03009972 BUILD_BUG_SQE_ELEM(28, __u32, splice_flags);
Stefan Metzmacherd7f62e82020-01-29 14:39:41 +01009973 BUILD_BUG_SQE_ELEM(32, __u64, user_data);
9974 BUILD_BUG_SQE_ELEM(40, __u16, buf_index);
9975 BUILD_BUG_SQE_ELEM(42, __u16, personality);
Pavel Begunkov7d67af22020-02-24 11:32:45 +03009976 BUILD_BUG_SQE_ELEM(44, __s32, splice_fd_in);
Stefan Metzmacherd7f62e82020-01-29 14:39:41 +01009977
Jens Axboed3656342019-12-18 09:50:26 -07009978 BUILD_BUG_ON(ARRAY_SIZE(io_op_defs) != IORING_OP_LAST);
Jens Axboe84557872020-03-03 15:28:17 -07009979 BUILD_BUG_ON(__REQ_F_LAST_BIT >= 8 * sizeof(int));
Jens Axboe91f245d2021-02-09 13:48:50 -07009980 req_cachep = KMEM_CACHE(io_kiocb, SLAB_HWCACHE_ALIGN | SLAB_PANIC |
9981 SLAB_ACCOUNT);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009982 return 0;
9983};
9984__initcall(io_uring_init);