blob: 49cdeaf710ee329b3cda03534b5b714d2aebf978 [file] [log] [blame]
Jens Axboe2b188cc2019-01-07 10:46:33 -07001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Shared application/kernel submission and completion ring pairs, for
4 * supporting fast/efficient IO.
5 *
6 * A note on the read/write ordering memory barriers that are matched between
Stefan Bühler1e84b972019-04-24 23:54:16 +02007 * the application and kernel side.
8 *
9 * After the application reads the CQ ring tail, it must use an
10 * appropriate smp_rmb() to pair with the smp_wmb() the kernel uses
11 * before writing the tail (using smp_load_acquire to read the tail will
12 * do). It also needs a smp_mb() before updating CQ head (ordering the
13 * entry load(s) with the head store), pairing with an implicit barrier
14 * through a control-dependency in io_get_cqring (smp_store_release to
15 * store head will do). Failure to do so could lead to reading invalid
16 * CQ entries.
17 *
18 * Likewise, the application must use an appropriate smp_wmb() before
19 * writing the SQ tail (ordering SQ entry stores with the tail store),
20 * which pairs with smp_load_acquire in io_get_sqring (smp_store_release
21 * to store the tail will do). And it needs a barrier ordering the SQ
22 * head load before writing new SQ entries (smp_load_acquire to read
23 * head will do).
24 *
25 * When using the SQ poll thread (IORING_SETUP_SQPOLL), the application
26 * needs to check the SQ flags for IORING_SQ_NEED_WAKEUP *after*
27 * updating the SQ tail; a full memory barrier smp_mb() is needed
28 * between.
Jens Axboe2b188cc2019-01-07 10:46:33 -070029 *
30 * Also see the examples in the liburing library:
31 *
32 * git://git.kernel.dk/liburing
33 *
34 * io_uring also uses READ/WRITE_ONCE() for _any_ store or load that happens
35 * from data shared between the kernel and application. This is done both
36 * for ordering purposes, but also to ensure that once a value is loaded from
37 * data that the application could potentially modify, it remains stable.
38 *
39 * Copyright (C) 2018-2019 Jens Axboe
Christoph Hellwigc992fe22019-01-11 09:43:02 -070040 * Copyright (c) 2018-2019 Christoph Hellwig
Jens Axboe2b188cc2019-01-07 10:46:33 -070041 */
42#include <linux/kernel.h>
43#include <linux/init.h>
44#include <linux/errno.h>
45#include <linux/syscalls.h>
46#include <linux/compat.h>
Jens Axboe52de1fe2020-02-27 10:15:42 -070047#include <net/compat.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070048#include <linux/refcount.h>
49#include <linux/uio.h>
Pavel Begunkov6b47ee62020-01-18 20:22:41 +030050#include <linux/bits.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070051
52#include <linux/sched/signal.h>
53#include <linux/fs.h>
54#include <linux/file.h>
55#include <linux/fdtable.h>
56#include <linux/mm.h>
57#include <linux/mman.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070058#include <linux/percpu.h>
59#include <linux/slab.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070060#include <linux/blkdev.h>
Jens Axboeedafcce2019-01-09 09:16:05 -070061#include <linux/bvec.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070062#include <linux/net.h>
63#include <net/sock.h>
64#include <net/af_unix.h>
Jens Axboe6b063142019-01-10 22:13:58 -070065#include <net/scm.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070066#include <linux/anon_inodes.h>
67#include <linux/sched/mm.h>
68#include <linux/uaccess.h>
69#include <linux/nospec.h>
Jens Axboeedafcce2019-01-09 09:16:05 -070070#include <linux/sizes.h>
71#include <linux/hugetlb.h>
Jens Axboeaa4c3962019-11-29 10:14:00 -070072#include <linux/highmem.h>
Jens Axboe15b71ab2019-12-11 11:20:36 -070073#include <linux/namei.h>
74#include <linux/fsnotify.h>
Jens Axboe4840e412019-12-25 22:03:45 -070075#include <linux/fadvise.h>
Jens Axboe3e4827b2020-01-08 15:18:09 -070076#include <linux/eventpoll.h>
Pavel Begunkov7d67af22020-02-24 11:32:45 +030077#include <linux/splice.h>
Jens Axboeb41e9852020-02-17 09:52:41 -070078#include <linux/task_work.h>
Jens Axboebcf5a062020-05-22 09:24:42 -060079#include <linux/pagemap.h>
Jens Axboe0f212202020-09-13 13:09:39 -060080#include <linux/io_uring.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070081
Dmitrii Dolgovc826bd72019-10-15 19:02:01 +020082#define CREATE_TRACE_POINTS
83#include <trace/events/io_uring.h>
84
Jens Axboe2b188cc2019-01-07 10:46:33 -070085#include <uapi/linux/io_uring.h>
86
87#include "internal.h"
Jens Axboe561fb042019-10-24 07:25:42 -060088#include "io-wq.h"
Jens Axboe2b188cc2019-01-07 10:46:33 -070089
Daniel Xu5277dea2019-09-14 14:23:45 -070090#define IORING_MAX_ENTRIES 32768
Jens Axboe33a107f2019-10-04 12:10:03 -060091#define IORING_MAX_CQ_ENTRIES (2 * IORING_MAX_ENTRIES)
Jens Axboe65e19f52019-10-26 07:20:21 -060092
93/*
94 * Shift of 9 is 512 entries, or exactly one page on 64-bit archs
95 */
96#define IORING_FILE_TABLE_SHIFT 9
97#define IORING_MAX_FILES_TABLE (1U << IORING_FILE_TABLE_SHIFT)
98#define IORING_FILE_TABLE_MASK (IORING_MAX_FILES_TABLE - 1)
99#define IORING_MAX_FIXED_FILES (64 * IORING_MAX_FILES_TABLE)
Stefano Garzarella21b55db2020-08-27 16:58:30 +0200100#define IORING_MAX_RESTRICTIONS (IORING_RESTRICTION_LAST + \
101 IORING_REGISTER_LAST + IORING_OP_LAST)
Jens Axboe2b188cc2019-01-07 10:46:33 -0700102
Pavel Begunkovb16fed662021-02-18 18:29:40 +0000103#define SQE_VALID_FLAGS (IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK| \
104 IOSQE_IO_HARDLINK | IOSQE_ASYNC | \
105 IOSQE_BUFFER_SELECT)
106
Jens Axboe2b188cc2019-01-07 10:46:33 -0700107struct io_uring {
108 u32 head ____cacheline_aligned_in_smp;
109 u32 tail ____cacheline_aligned_in_smp;
110};
111
Stefan Bühler1e84b972019-04-24 23:54:16 +0200112/*
Hristo Venev75b28af2019-08-26 17:23:46 +0000113 * This data is shared with the application through the mmap at offsets
114 * IORING_OFF_SQ_RING and IORING_OFF_CQ_RING.
Stefan Bühler1e84b972019-04-24 23:54:16 +0200115 *
116 * The offsets to the member fields are published through struct
117 * io_sqring_offsets when calling io_uring_setup.
118 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000119struct io_rings {
Stefan Bühler1e84b972019-04-24 23:54:16 +0200120 /*
121 * Head and tail offsets into the ring; the offsets need to be
122 * masked to get valid indices.
123 *
Hristo Venev75b28af2019-08-26 17:23:46 +0000124 * The kernel controls head of the sq ring and the tail of the cq ring,
125 * and the application controls tail of the sq ring and the head of the
126 * cq ring.
Stefan Bühler1e84b972019-04-24 23:54:16 +0200127 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000128 struct io_uring sq, cq;
Stefan Bühler1e84b972019-04-24 23:54:16 +0200129 /*
Hristo Venev75b28af2019-08-26 17:23:46 +0000130 * Bitmasks to apply to head and tail offsets (constant, equals
Stefan Bühler1e84b972019-04-24 23:54:16 +0200131 * ring_entries - 1)
132 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000133 u32 sq_ring_mask, cq_ring_mask;
134 /* Ring sizes (constant, power of 2) */
135 u32 sq_ring_entries, cq_ring_entries;
Stefan Bühler1e84b972019-04-24 23:54:16 +0200136 /*
137 * Number of invalid entries dropped by the kernel due to
138 * invalid index stored in array
139 *
140 * Written by the kernel, shouldn't be modified by the
141 * application (i.e. get number of "new events" by comparing to
142 * cached value).
143 *
144 * After a new SQ head value was read by the application this
145 * counter includes all submissions that were dropped reaching
146 * the new SQ head (and possibly more).
147 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000148 u32 sq_dropped;
Stefan Bühler1e84b972019-04-24 23:54:16 +0200149 /*
Stefano Garzarella0d9b5b32020-05-15 18:38:04 +0200150 * Runtime SQ flags
Stefan Bühler1e84b972019-04-24 23:54:16 +0200151 *
152 * Written by the kernel, shouldn't be modified by the
153 * application.
154 *
155 * The application needs a full memory barrier before checking
156 * for IORING_SQ_NEED_WAKEUP after updating the sq tail.
157 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000158 u32 sq_flags;
Stefan Bühler1e84b972019-04-24 23:54:16 +0200159 /*
Stefano Garzarella0d9b5b32020-05-15 18:38:04 +0200160 * Runtime CQ flags
161 *
162 * Written by the application, shouldn't be modified by the
163 * kernel.
164 */
165 u32 cq_flags;
166 /*
Stefan Bühler1e84b972019-04-24 23:54:16 +0200167 * Number of completion events lost because the queue was full;
168 * this should be avoided by the application by making sure
LimingWu0b4295b2019-12-05 20:18:18 +0800169 * there are not more requests pending than there is space in
Stefan Bühler1e84b972019-04-24 23:54:16 +0200170 * the completion queue.
171 *
172 * Written by the kernel, shouldn't be modified by the
173 * application (i.e. get number of "new events" by comparing to
174 * cached value).
175 *
176 * As completion events come in out of order this counter is not
177 * ordered with any other data.
178 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000179 u32 cq_overflow;
Stefan Bühler1e84b972019-04-24 23:54:16 +0200180 /*
181 * Ring buffer of completion events.
182 *
183 * The kernel writes completion events fresh every time they are
184 * produced, so the application is allowed to modify pending
185 * entries.
186 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000187 struct io_uring_cqe cqes[] ____cacheline_aligned_in_smp;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700188};
189
Pavel Begunkov45d189c2021-02-10 00:03:07 +0000190enum io_uring_cmd_flags {
191 IO_URING_F_NONBLOCK = 1,
Pavel Begunkov889fca72021-02-10 00:03:09 +0000192 IO_URING_F_COMPLETE_DEFER = 2,
Pavel Begunkov45d189c2021-02-10 00:03:07 +0000193};
194
Jens Axboeedafcce2019-01-09 09:16:05 -0700195struct io_mapped_ubuf {
196 u64 ubuf;
197 size_t len;
198 struct bio_vec *bvec;
199 unsigned int nr_bvecs;
Jens Axboede293932020-09-17 16:19:16 -0600200 unsigned long acct_pages;
Jens Axboeedafcce2019-01-09 09:16:05 -0700201};
202
Bijan Mottahedeh50238532021-01-15 17:37:45 +0000203struct io_ring_ctx;
204
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +0000205struct io_rsrc_put {
206 struct list_head list;
Bijan Mottahedeh50238532021-01-15 17:37:45 +0000207 union {
208 void *rsrc;
209 struct file *file;
210 };
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +0000211};
212
213struct fixed_rsrc_table {
Jens Axboe65e19f52019-10-26 07:20:21 -0600214 struct file **files;
Jens Axboe31b51512019-01-18 22:56:34 -0700215};
216
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +0000217struct fixed_rsrc_ref_node {
Xiaoguang Wang05589552020-03-31 14:05:18 +0800218 struct percpu_ref refs;
219 struct list_head node;
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +0000220 struct list_head rsrc_list;
221 struct fixed_rsrc_data *rsrc_data;
Bijan Mottahedeh50238532021-01-15 17:37:45 +0000222 void (*rsrc_put)(struct io_ring_ctx *ctx,
223 struct io_rsrc_put *prsrc);
Jens Axboe4a38aed22020-05-14 17:21:15 -0600224 struct llist_node llist;
Pavel Begunkove2978222020-11-18 14:56:26 +0000225 bool done;
Xiaoguang Wang05589552020-03-31 14:05:18 +0800226};
227
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +0000228struct fixed_rsrc_data {
229 struct fixed_rsrc_table *table;
Jens Axboe05f3fb32019-12-09 11:22:50 -0700230 struct io_ring_ctx *ctx;
231
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +0000232 struct fixed_rsrc_ref_node *node;
Jens Axboe05f3fb32019-12-09 11:22:50 -0700233 struct percpu_ref refs;
Jens Axboe05f3fb32019-12-09 11:22:50 -0700234 struct completion done;
Hao Xu8bad28d2021-02-19 17:19:36 +0800235 bool quiesce;
Jens Axboe05f3fb32019-12-09 11:22:50 -0700236};
237
Jens Axboe5a2e7452020-02-23 16:23:11 -0700238struct io_buffer {
239 struct list_head list;
240 __u64 addr;
241 __s32 len;
242 __u16 bid;
243};
244
Stefano Garzarella21b55db2020-08-27 16:58:30 +0200245struct io_restriction {
246 DECLARE_BITMAP(register_op, IORING_REGISTER_LAST);
247 DECLARE_BITMAP(sqe_op, IORING_OP_LAST);
248 u8 sqe_flags_allowed;
249 u8 sqe_flags_required;
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +0200250 bool registered;
Stefano Garzarella21b55db2020-08-27 16:58:30 +0200251};
252
Jens Axboe37d1e2e2021-02-17 21:03:43 -0700253enum {
254 IO_SQ_THREAD_SHOULD_STOP = 0,
255 IO_SQ_THREAD_SHOULD_PARK,
256};
257
Jens Axboe534ca6d2020-09-02 13:52:19 -0600258struct io_sq_data {
259 refcount_t refs;
Pavel Begunkov9e138a42021-03-14 20:57:12 +0000260 atomic_t park_pending;
Pavel Begunkov09a6f4e2021-03-14 20:57:10 +0000261 struct mutex lock;
Jens Axboe69fb2132020-09-14 11:16:23 -0600262
263 /* ctx's that are using this sqd */
264 struct list_head ctx_list;
Jens Axboe69fb2132020-09-14 11:16:23 -0600265
Jens Axboe534ca6d2020-09-02 13:52:19 -0600266 struct task_struct *thread;
267 struct wait_queue_head wait;
Xiaoguang Wang08369242020-11-03 14:15:59 +0800268
269 unsigned sq_thread_idle;
Jens Axboe37d1e2e2021-02-17 21:03:43 -0700270 int sq_cpu;
271 pid_t task_pid;
Jens Axboe5c2469e2021-03-11 10:17:56 -0700272 pid_t task_tgid;
Jens Axboe37d1e2e2021-02-17 21:03:43 -0700273
274 unsigned long state;
Jens Axboe37d1e2e2021-02-17 21:03:43 -0700275 struct completion exited;
Pavel Begunkovb7f5a0b2021-03-15 14:23:08 +0000276 struct callback_head *park_task_work;
Jens Axboe534ca6d2020-09-02 13:52:19 -0600277};
278
Pavel Begunkov258b29a2021-02-10 00:03:10 +0000279#define IO_IOPOLL_BATCH 8
Pavel Begunkov6dd0be12021-02-10 00:03:13 +0000280#define IO_COMPL_BATCH 32
Pavel Begunkov6ff119a2021-02-10 00:03:18 +0000281#define IO_REQ_CACHE_SIZE 32
Pavel Begunkovbf019da2021-02-10 00:03:17 +0000282#define IO_REQ_ALLOC_BATCH 8
Pavel Begunkov258b29a2021-02-10 00:03:10 +0000283
284struct io_comp_state {
Pavel Begunkov6dd0be12021-02-10 00:03:13 +0000285 struct io_kiocb *reqs[IO_COMPL_BATCH];
Jens Axboe1b4c3512021-02-10 00:03:19 +0000286 unsigned int nr;
Jens Axboec7dae4b2021-02-09 19:53:37 -0700287 unsigned int locked_free_nr;
288 /* inline/task_work completion list, under ->uring_lock */
Jens Axboe1b4c3512021-02-10 00:03:19 +0000289 struct list_head free_list;
Jens Axboec7dae4b2021-02-09 19:53:37 -0700290 /* IRQ completion list, under ->completion_lock */
291 struct list_head locked_free_list;
Pavel Begunkov258b29a2021-02-10 00:03:10 +0000292};
293
Pavel Begunkova1ab7b32021-02-18 18:29:42 +0000294struct io_submit_link {
295 struct io_kiocb *head;
296 struct io_kiocb *last;
297};
298
Pavel Begunkov258b29a2021-02-10 00:03:10 +0000299struct io_submit_state {
300 struct blk_plug plug;
Pavel Begunkova1ab7b32021-02-18 18:29:42 +0000301 struct io_submit_link link;
Pavel Begunkov258b29a2021-02-10 00:03:10 +0000302
303 /*
304 * io_kiocb alloc cache
305 */
Pavel Begunkovbf019da2021-02-10 00:03:17 +0000306 void *reqs[IO_REQ_CACHE_SIZE];
Pavel Begunkov258b29a2021-02-10 00:03:10 +0000307 unsigned int free_reqs;
308
309 bool plug_started;
310
311 /*
312 * Batch completion logic
313 */
314 struct io_comp_state comp;
315
316 /*
317 * File reference cache
318 */
319 struct file *file;
320 unsigned int fd;
321 unsigned int file_refs;
322 unsigned int ios_left;
323};
324
Jens Axboe2b188cc2019-01-07 10:46:33 -0700325struct io_ring_ctx {
326 struct {
327 struct percpu_ref refs;
328 } ____cacheline_aligned_in_smp;
329
330 struct {
331 unsigned int flags;
Randy Dunlape1d85332020-02-05 20:57:10 -0800332 unsigned int compat: 1;
Randy Dunlape1d85332020-02-05 20:57:10 -0800333 unsigned int cq_overflow_flushed: 1;
334 unsigned int drain_next: 1;
335 unsigned int eventfd_async: 1;
Stefano Garzarella21b55db2020-08-27 16:58:30 +0200336 unsigned int restricted: 1;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700337
Hristo Venev75b28af2019-08-26 17:23:46 +0000338 /*
339 * Ring buffer of indices into array of io_uring_sqe, which is
340 * mmapped by the application using the IORING_OFF_SQES offset.
341 *
342 * This indirection could e.g. be used to assign fixed
343 * io_uring_sqe entries to operations and only submit them to
344 * the queue when needed.
345 *
346 * The kernel modifies neither the indices array nor the entries
347 * array.
348 */
349 u32 *sq_array;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700350 unsigned cached_sq_head;
351 unsigned sq_entries;
352 unsigned sq_mask;
Jens Axboe6c271ce2019-01-10 11:22:30 -0700353 unsigned sq_thread_idle;
Jens Axboe498ccd92019-10-25 10:04:25 -0600354 unsigned cached_sq_dropped;
Pavel Begunkov2c3bac6d2020-10-18 10:17:40 +0100355 unsigned cached_cq_overflow;
Jens Axboead3eb2c2019-12-18 17:12:20 -0700356 unsigned long sq_check_overflow;
Jens Axboede0617e2019-04-06 21:51:27 -0600357
Jens Axboee9418942021-02-19 12:33:30 -0700358 /* hashed buffered write serialization */
359 struct io_wq_hash *hash_map;
360
Jens Axboede0617e2019-04-06 21:51:27 -0600361 struct list_head defer_list;
Jens Axboe5262f562019-09-17 12:26:57 -0600362 struct list_head timeout_list;
Jens Axboe1d7bb1d2019-11-06 11:31:17 -0700363 struct list_head cq_overflow_list;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700364
Jens Axboead3eb2c2019-12-18 17:12:20 -0700365 struct io_uring_sqe *sq_sqes;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700366 } ____cacheline_aligned_in_smp;
367
Jens Axboe3c1a2ea2021-02-11 10:48:03 -0700368 struct {
369 struct mutex uring_lock;
370 wait_queue_head_t wait;
371 } ____cacheline_aligned_in_smp;
372
373 struct io_submit_state submit_state;
374
Hristo Venev75b28af2019-08-26 17:23:46 +0000375 struct io_rings *rings;
376
Jens Axboe2aede0e2020-09-14 10:45:53 -0600377 /* Only used for accounting purposes */
378 struct mm_struct *mm_account;
379
Stefan Metzmacher7c30f36a2021-03-07 11:54:28 +0100380 const struct cred *sq_creds; /* cred used for __io_sq_thread() */
Jens Axboe534ca6d2020-09-02 13:52:19 -0600381 struct io_sq_data *sq_data; /* if using sq thread polling */
382
Jens Axboe90554202020-09-03 12:12:41 -0600383 struct wait_queue_head sqo_sq_wait;
Jens Axboe69fb2132020-09-14 11:16:23 -0600384 struct list_head sqd_list;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700385
Jens Axboe6b063142019-01-10 22:13:58 -0700386 /*
387 * If used, fixed file set. Writers must ensure that ->refs is dead,
388 * readers must ensure that ->refs is alive as long as the file* is
389 * used. Only updated through io_uring_register(2).
390 */
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +0000391 struct fixed_rsrc_data *file_data;
Jens Axboe6b063142019-01-10 22:13:58 -0700392 unsigned nr_user_files;
393
Jens Axboeedafcce2019-01-09 09:16:05 -0700394 /* if used, fixed mapped user buffers */
395 unsigned nr_user_bufs;
396 struct io_mapped_ubuf *user_bufs;
397
Jens Axboe2b188cc2019-01-07 10:46:33 -0700398 struct user_struct *user;
399
Jens Axboe0f158b42020-05-14 17:18:39 -0600400 struct completion ref_comp;
Jens Axboe206aefd2019-11-07 18:27:42 -0700401
402#if defined(CONFIG_UNIX)
403 struct socket *ring_sock;
404#endif
405
Jens Axboe9e15c3a2021-03-13 12:29:43 -0700406 struct xarray io_buffers;
Jens Axboe5a2e7452020-02-23 16:23:11 -0700407
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +0000408 struct xarray personalities;
409 u32 pers_next;
Jens Axboe071698e2020-01-28 10:04:42 -0700410
Jens Axboe206aefd2019-11-07 18:27:42 -0700411 struct {
412 unsigned cached_cq_tail;
413 unsigned cq_entries;
414 unsigned cq_mask;
415 atomic_t cq_timeouts;
Marcelo Diop-Gonzalezf0105052021-01-15 11:54:40 -0500416 unsigned cq_last_tm_flush;
Jens Axboead3eb2c2019-12-18 17:12:20 -0700417 unsigned long cq_check_overflow;
Jens Axboe206aefd2019-11-07 18:27:42 -0700418 struct wait_queue_head cq_wait;
419 struct fasync_struct *cq_fasync;
420 struct eventfd_ctx *cq_ev_fd;
421 } ____cacheline_aligned_in_smp;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700422
423 struct {
Jens Axboe2b188cc2019-01-07 10:46:33 -0700424 spinlock_t completion_lock;
Jens Axboee94f1412019-12-19 12:06:02 -0700425
Jens Axboedef596e2019-01-09 08:59:42 -0700426 /*
Pavel Begunkov540e32a2020-07-13 23:37:09 +0300427 * ->iopoll_list is protected by the ctx->uring_lock for
Jens Axboedef596e2019-01-09 08:59:42 -0700428 * io_uring instances that don't use IORING_SETUP_SQPOLL.
429 * For SQPOLL, only the single threaded io_sq_thread() will
430 * manipulate the list, hence no extra locking is needed there.
431 */
Pavel Begunkov540e32a2020-07-13 23:37:09 +0300432 struct list_head iopoll_list;
Jens Axboe78076bb2019-12-04 19:56:40 -0700433 struct hlist_head *cancel_hash;
434 unsigned cancel_hash_bits;
Jens Axboee94f1412019-12-19 12:06:02 -0700435 bool poll_multi_file;
Jens Axboefcb323c2019-10-24 12:39:47 -0600436
437 spinlock_t inflight_lock;
438 struct list_head inflight_list;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700439 } ____cacheline_aligned_in_smp;
Jens Axboe85faa7b2020-04-09 18:14:00 -0600440
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +0000441 struct delayed_work rsrc_put_work;
442 struct llist_head rsrc_put_llist;
Bijan Mottahedehd67d2262021-01-15 17:37:46 +0000443 struct list_head rsrc_ref_list;
444 spinlock_t rsrc_ref_lock;
Jens Axboe4a38aed22020-05-14 17:21:15 -0600445
Stefano Garzarella21b55db2020-08-27 16:58:30 +0200446 struct io_restriction restrictions;
Jens Axboe3c1a2ea2021-02-11 10:48:03 -0700447
Jens Axboe7c25c0d2021-02-16 07:17:00 -0700448 /* exit task_work */
449 struct callback_head *exit_task_work;
450
Jens Axboee9418942021-02-19 12:33:30 -0700451 struct wait_queue_head hash_wait;
452
Jens Axboe3c1a2ea2021-02-11 10:48:03 -0700453 /* Keep this last, we don't need it for the fast path */
454 struct work_struct exit_work;
Pavel Begunkov13bf43f2021-03-06 11:02:12 +0000455 struct list_head tctx_list;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700456};
457
Stefan Metzmacher53e043b2021-03-15 12:56:56 +0100458struct io_uring_task {
459 /* submission side */
460 struct xarray xa;
461 struct wait_queue_head wait;
Stefan Metzmacheree53fb22021-03-15 12:56:57 +0100462 const struct io_ring_ctx *last;
463 struct io_wq *io_wq;
Stefan Metzmacher53e043b2021-03-15 12:56:56 +0100464 struct percpu_counter inflight;
465 atomic_t in_idle;
466 bool sqpoll;
467
468 spinlock_t task_lock;
469 struct io_wq_work_list task_list;
470 unsigned long task_state;
471 struct callback_head task_work;
472};
473
Jens Axboe09bb8392019-03-13 12:39:28 -0600474/*
475 * First field must be the file pointer in all the
476 * iocb unions! See also 'struct kiocb' in <linux/fs.h>
477 */
Jens Axboe221c5eb2019-01-17 09:41:58 -0700478struct io_poll_iocb {
479 struct file *file;
Pavel Begunkov018043b2020-10-27 23:17:18 +0000480 struct wait_queue_head *head;
Jens Axboe221c5eb2019-01-17 09:41:58 -0700481 __poll_t events;
Jens Axboe8c838782019-03-12 15:48:16 -0600482 bool done;
Jens Axboe221c5eb2019-01-17 09:41:58 -0700483 bool canceled;
Jens Axboe392edb42019-12-09 17:52:20 -0700484 struct wait_queue_entry wait;
Jens Axboe221c5eb2019-01-17 09:41:58 -0700485};
486
Pavel Begunkov018043b2020-10-27 23:17:18 +0000487struct io_poll_remove {
488 struct file *file;
489 u64 addr;
490};
491
Jens Axboeb5dba592019-12-11 14:02:38 -0700492struct io_close {
493 struct file *file;
Jens Axboeb5dba592019-12-11 14:02:38 -0700494 int fd;
495};
496
Jens Axboead8a48a2019-11-15 08:49:11 -0700497struct io_timeout_data {
498 struct io_kiocb *req;
499 struct hrtimer timer;
500 struct timespec64 ts;
501 enum hrtimer_mode mode;
502};
503
Jens Axboe8ed8d3c2019-12-16 11:55:28 -0700504struct io_accept {
505 struct file *file;
506 struct sockaddr __user *addr;
507 int __user *addr_len;
508 int flags;
Jens Axboe09952e32020-03-19 20:16:56 -0600509 unsigned long nofile;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -0700510};
511
512struct io_sync {
513 struct file *file;
514 loff_t len;
515 loff_t off;
516 int flags;
Jens Axboed63d1b52019-12-10 10:38:56 -0700517 int mode;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -0700518};
519
Jens Axboefbf23842019-12-17 18:45:56 -0700520struct io_cancel {
521 struct file *file;
522 u64 addr;
523};
524
Jens Axboeb29472e2019-12-17 18:50:29 -0700525struct io_timeout {
526 struct file *file;
Pavel Begunkovbfe68a22020-05-30 14:54:18 +0300527 u32 off;
528 u32 target_seq;
Pavel Begunkov135fcde2020-07-13 23:37:12 +0300529 struct list_head list;
Pavel Begunkov90cd7e42020-10-27 23:25:36 +0000530 /* head of the link, used by linked timeouts only */
531 struct io_kiocb *head;
Jens Axboeb29472e2019-12-17 18:50:29 -0700532};
533
Pavel Begunkov0bdf7a22020-10-10 18:34:10 +0100534struct io_timeout_rem {
535 struct file *file;
536 u64 addr;
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +0000537
538 /* timeout update */
539 struct timespec64 ts;
540 u32 flags;
Pavel Begunkov0bdf7a22020-10-10 18:34:10 +0100541};
542
Jens Axboe9adbd452019-12-20 08:45:55 -0700543struct io_rw {
544 /* NOTE: kiocb has the file as the first member, so don't do it here */
545 struct kiocb kiocb;
546 u64 addr;
547 u64 len;
548};
549
Jens Axboe3fbb51c2019-12-20 08:51:52 -0700550struct io_connect {
551 struct file *file;
552 struct sockaddr __user *addr;
553 int addr_len;
554};
555
Jens Axboee47293f2019-12-20 08:58:21 -0700556struct io_sr_msg {
557 struct file *file;
Jens Axboefddafac2020-01-04 20:19:44 -0700558 union {
Pavel Begunkov270a5942020-07-12 20:41:04 +0300559 struct user_msghdr __user *umsg;
Jens Axboefddafac2020-01-04 20:19:44 -0700560 void __user *buf;
561 };
Jens Axboee47293f2019-12-20 08:58:21 -0700562 int msg_flags;
Jens Axboebcda7ba2020-02-23 16:42:51 -0700563 int bgid;
Jens Axboefddafac2020-01-04 20:19:44 -0700564 size_t len;
Jens Axboebcda7ba2020-02-23 16:42:51 -0700565 struct io_buffer *kbuf;
Jens Axboee47293f2019-12-20 08:58:21 -0700566};
567
Jens Axboe15b71ab2019-12-11 11:20:36 -0700568struct io_open {
569 struct file *file;
570 int dfd;
Jens Axboe15b71ab2019-12-11 11:20:36 -0700571 struct filename *filename;
Jens Axboec12cedf2020-01-08 17:41:21 -0700572 struct open_how how;
Jens Axboe4022e7a2020-03-19 19:23:18 -0600573 unsigned long nofile;
Jens Axboe15b71ab2019-12-11 11:20:36 -0700574};
575
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +0000576struct io_rsrc_update {
Jens Axboe05f3fb32019-12-09 11:22:50 -0700577 struct file *file;
578 u64 arg;
579 u32 nr_args;
580 u32 offset;
581};
582
Jens Axboe4840e412019-12-25 22:03:45 -0700583struct io_fadvise {
584 struct file *file;
585 u64 offset;
586 u32 len;
587 u32 advice;
588};
589
Jens Axboec1ca7572019-12-25 22:18:28 -0700590struct io_madvise {
591 struct file *file;
592 u64 addr;
593 u32 len;
594 u32 advice;
595};
596
Jens Axboe3e4827b2020-01-08 15:18:09 -0700597struct io_epoll {
598 struct file *file;
599 int epfd;
600 int op;
601 int fd;
602 struct epoll_event event;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700603};
604
Pavel Begunkov7d67af22020-02-24 11:32:45 +0300605struct io_splice {
606 struct file *file_out;
607 struct file *file_in;
608 loff_t off_out;
609 loff_t off_in;
610 u64 len;
611 unsigned int flags;
612};
613
Jens Axboeddf0322d2020-02-23 16:41:33 -0700614struct io_provide_buf {
615 struct file *file;
616 __u64 addr;
617 __s32 len;
618 __u32 bgid;
619 __u16 nbufs;
620 __u16 bid;
621};
622
Bijan Mottahedeh1d9e1282020-05-22 21:31:16 -0700623struct io_statx {
624 struct file *file;
625 int dfd;
626 unsigned int mask;
627 unsigned int flags;
Bijan Mottahedehe62753e2020-05-22 21:31:18 -0700628 const char __user *filename;
Bijan Mottahedeh1d9e1282020-05-22 21:31:16 -0700629 struct statx __user *buffer;
630};
631
Jens Axboe36f4fa62020-09-05 11:14:22 -0600632struct io_shutdown {
633 struct file *file;
634 int how;
635};
636
Jens Axboe80a261f2020-09-28 14:23:58 -0600637struct io_rename {
638 struct file *file;
639 int old_dfd;
640 int new_dfd;
641 struct filename *oldpath;
642 struct filename *newpath;
643 int flags;
644};
645
Jens Axboe14a11432020-09-28 14:27:37 -0600646struct io_unlink {
647 struct file *file;
648 int dfd;
649 int flags;
650 struct filename *filename;
651};
652
Pavel Begunkov3ca405e2020-07-13 23:37:08 +0300653struct io_completion {
654 struct file *file;
655 struct list_head list;
Pavel Begunkov0f7e4662020-07-13 23:37:16 +0300656 int cflags;
Pavel Begunkov3ca405e2020-07-13 23:37:08 +0300657};
658
Jens Axboef499a022019-12-02 16:28:46 -0700659struct io_async_connect {
660 struct sockaddr_storage address;
661};
662
Jens Axboe03b12302019-12-02 18:50:25 -0700663struct io_async_msghdr {
664 struct iovec fast_iov[UIO_FASTIOV];
Pavel Begunkov257e84a2021-02-05 00:58:00 +0000665 /* points to an allocated iov, if NULL we use fast_iov instead */
666 struct iovec *free_iov;
Jens Axboe03b12302019-12-02 18:50:25 -0700667 struct sockaddr __user *uaddr;
668 struct msghdr msg;
Jens Axboeb5379162020-02-09 11:29:15 -0700669 struct sockaddr_storage addr;
Jens Axboe03b12302019-12-02 18:50:25 -0700670};
671
Jens Axboef67676d2019-12-02 11:03:47 -0700672struct io_async_rw {
673 struct iovec fast_iov[UIO_FASTIOV];
Jens Axboeff6165b2020-08-13 09:47:43 -0600674 const struct iovec *free_iovec;
675 struct iov_iter iter;
Jens Axboe227c0c92020-08-13 11:51:40 -0600676 size_t bytes_done;
Jens Axboebcf5a062020-05-22 09:24:42 -0600677 struct wait_page_queue wpq;
Jens Axboef67676d2019-12-02 11:03:47 -0700678};
679
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300680enum {
681 REQ_F_FIXED_FILE_BIT = IOSQE_FIXED_FILE_BIT,
682 REQ_F_IO_DRAIN_BIT = IOSQE_IO_DRAIN_BIT,
683 REQ_F_LINK_BIT = IOSQE_IO_LINK_BIT,
684 REQ_F_HARDLINK_BIT = IOSQE_IO_HARDLINK_BIT,
685 REQ_F_FORCE_ASYNC_BIT = IOSQE_ASYNC_BIT,
Jens Axboebcda7ba2020-02-23 16:42:51 -0700686 REQ_F_BUFFER_SELECT_BIT = IOSQE_BUFFER_SELECT_BIT,
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300687
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300688 REQ_F_FAIL_LINK_BIT,
689 REQ_F_INFLIGHT_BIT,
690 REQ_F_CUR_POS_BIT,
691 REQ_F_NOWAIT_BIT,
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300692 REQ_F_LINK_TIMEOUT_BIT,
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300693 REQ_F_ISREG_BIT,
Pavel Begunkov99bc4c32020-02-07 22:04:45 +0300694 REQ_F_NEED_CLEANUP_BIT,
Jens Axboed7718a92020-02-14 22:23:12 -0700695 REQ_F_POLLED_BIT,
Jens Axboebcda7ba2020-02-23 16:42:51 -0700696 REQ_F_BUFFER_SELECTED_BIT,
Jens Axboe5b0bbee2020-04-27 10:41:22 -0600697 REQ_F_NO_FILE_TABLE_BIT,
Pavel Begunkov900fad42020-10-19 16:39:16 +0100698 REQ_F_LTIMEOUT_ACTIVE_BIT,
Pavel Begunkove342c802021-01-19 13:32:47 +0000699 REQ_F_COMPLETE_INLINE_BIT,
Jens Axboe230d50d2021-04-01 20:41:15 -0600700 REQ_F_REISSUE_BIT,
Jens Axboe84557872020-03-03 15:28:17 -0700701
702 /* not a real bit, just to check we're not overflowing the space */
703 __REQ_F_LAST_BIT,
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300704};
705
706enum {
707 /* ctx owns file */
708 REQ_F_FIXED_FILE = BIT(REQ_F_FIXED_FILE_BIT),
709 /* drain existing IO first */
710 REQ_F_IO_DRAIN = BIT(REQ_F_IO_DRAIN_BIT),
711 /* linked sqes */
712 REQ_F_LINK = BIT(REQ_F_LINK_BIT),
713 /* doesn't sever on completion < 0 */
714 REQ_F_HARDLINK = BIT(REQ_F_HARDLINK_BIT),
715 /* IOSQE_ASYNC */
716 REQ_F_FORCE_ASYNC = BIT(REQ_F_FORCE_ASYNC_BIT),
Jens Axboebcda7ba2020-02-23 16:42:51 -0700717 /* IOSQE_BUFFER_SELECT */
718 REQ_F_BUFFER_SELECT = BIT(REQ_F_BUFFER_SELECT_BIT),
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300719
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300720 /* fail rest of links */
721 REQ_F_FAIL_LINK = BIT(REQ_F_FAIL_LINK_BIT),
Pavel Begunkovb05a1bc2021-03-04 13:59:24 +0000722 /* on inflight list, should be cancelled and waited on exit reliably */
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300723 REQ_F_INFLIGHT = BIT(REQ_F_INFLIGHT_BIT),
724 /* read/write uses file position */
725 REQ_F_CUR_POS = BIT(REQ_F_CUR_POS_BIT),
726 /* must not punt to workers */
727 REQ_F_NOWAIT = BIT(REQ_F_NOWAIT_BIT),
Pavel Begunkov900fad42020-10-19 16:39:16 +0100728 /* has or had linked timeout */
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300729 REQ_F_LINK_TIMEOUT = BIT(REQ_F_LINK_TIMEOUT_BIT),
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300730 /* regular file */
731 REQ_F_ISREG = BIT(REQ_F_ISREG_BIT),
Pavel Begunkov99bc4c32020-02-07 22:04:45 +0300732 /* needs cleanup */
733 REQ_F_NEED_CLEANUP = BIT(REQ_F_NEED_CLEANUP_BIT),
Jens Axboed7718a92020-02-14 22:23:12 -0700734 /* already went through poll handler */
735 REQ_F_POLLED = BIT(REQ_F_POLLED_BIT),
Jens Axboebcda7ba2020-02-23 16:42:51 -0700736 /* buffer already selected */
737 REQ_F_BUFFER_SELECTED = BIT(REQ_F_BUFFER_SELECTED_BIT),
Jens Axboe5b0bbee2020-04-27 10:41:22 -0600738 /* doesn't need file table for this request */
739 REQ_F_NO_FILE_TABLE = BIT(REQ_F_NO_FILE_TABLE_BIT),
Pavel Begunkov900fad42020-10-19 16:39:16 +0100740 /* linked timeout is active, i.e. prepared by link's head */
741 REQ_F_LTIMEOUT_ACTIVE = BIT(REQ_F_LTIMEOUT_ACTIVE_BIT),
Pavel Begunkove342c802021-01-19 13:32:47 +0000742 /* completion is deferred through io_comp_state */
743 REQ_F_COMPLETE_INLINE = BIT(REQ_F_COMPLETE_INLINE_BIT),
Jens Axboe230d50d2021-04-01 20:41:15 -0600744 /* caller should reissue async */
745 REQ_F_REISSUE = BIT(REQ_F_REISSUE_BIT),
Jens Axboed7718a92020-02-14 22:23:12 -0700746};
747
748struct async_poll {
749 struct io_poll_iocb poll;
Jens Axboe807abcb2020-07-17 17:09:27 -0600750 struct io_poll_iocb *double_poll;
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300751};
752
Jens Axboe7cbf1722021-02-10 00:03:20 +0000753struct io_task_work {
754 struct io_wq_work_node node;
755 task_work_func_t func;
756};
757
Jens Axboe09bb8392019-03-13 12:39:28 -0600758/*
759 * NOTE! Each of the iocb union members has the file pointer
760 * as the first entry in their struct definition. So you can
761 * access the file pointer through any of the sub-structs,
762 * or directly as just 'ki_filp' in this struct.
763 */
Jens Axboe2b188cc2019-01-07 10:46:33 -0700764struct io_kiocb {
Jens Axboe221c5eb2019-01-17 09:41:58 -0700765 union {
Jens Axboe09bb8392019-03-13 12:39:28 -0600766 struct file *file;
Jens Axboe9adbd452019-12-20 08:45:55 -0700767 struct io_rw rw;
Jens Axboe221c5eb2019-01-17 09:41:58 -0700768 struct io_poll_iocb poll;
Pavel Begunkov018043b2020-10-27 23:17:18 +0000769 struct io_poll_remove poll_remove;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -0700770 struct io_accept accept;
771 struct io_sync sync;
Jens Axboefbf23842019-12-17 18:45:56 -0700772 struct io_cancel cancel;
Jens Axboeb29472e2019-12-17 18:50:29 -0700773 struct io_timeout timeout;
Pavel Begunkov0bdf7a22020-10-10 18:34:10 +0100774 struct io_timeout_rem timeout_rem;
Jens Axboe3fbb51c2019-12-20 08:51:52 -0700775 struct io_connect connect;
Jens Axboee47293f2019-12-20 08:58:21 -0700776 struct io_sr_msg sr_msg;
Jens Axboe15b71ab2019-12-11 11:20:36 -0700777 struct io_open open;
Jens Axboeb5dba592019-12-11 14:02:38 -0700778 struct io_close close;
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +0000779 struct io_rsrc_update rsrc_update;
Jens Axboe4840e412019-12-25 22:03:45 -0700780 struct io_fadvise fadvise;
Jens Axboec1ca7572019-12-25 22:18:28 -0700781 struct io_madvise madvise;
Jens Axboe3e4827b2020-01-08 15:18:09 -0700782 struct io_epoll epoll;
Pavel Begunkov7d67af22020-02-24 11:32:45 +0300783 struct io_splice splice;
Jens Axboeddf0322d2020-02-23 16:41:33 -0700784 struct io_provide_buf pbuf;
Bijan Mottahedeh1d9e1282020-05-22 21:31:16 -0700785 struct io_statx statx;
Jens Axboe36f4fa62020-09-05 11:14:22 -0600786 struct io_shutdown shutdown;
Jens Axboe80a261f2020-09-28 14:23:58 -0600787 struct io_rename rename;
Jens Axboe14a11432020-09-28 14:27:37 -0600788 struct io_unlink unlink;
Pavel Begunkov3ca405e2020-07-13 23:37:08 +0300789 /* use only after cleaning per-op data, see io_clean_op() */
790 struct io_completion compl;
Jens Axboe221c5eb2019-01-17 09:41:58 -0700791 };
Jens Axboe2b188cc2019-01-07 10:46:33 -0700792
Jens Axboee8c2bc12020-08-15 18:44:09 -0700793 /* opcode allocated if it needs to store data for async defer */
794 void *async_data;
Jens Axboed625c6e2019-12-17 19:53:05 -0700795 u8 opcode;
Xiaoguang Wang65a65432020-06-11 23:39:36 +0800796 /* polled IO has completed */
797 u8 iopoll_completed;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700798
Bijan Mottahedeh4f4eeba2020-05-19 14:52:49 -0700799 u16 buf_index;
Pavel Begunkov9cf7c102020-07-13 23:37:15 +0300800 u32 result;
Bijan Mottahedeh4f4eeba2020-05-19 14:52:49 -0700801
Pavel Begunkov010e8e62020-07-30 18:43:45 +0300802 struct io_ring_ctx *ctx;
803 unsigned int flags;
804 refcount_t refs;
805 struct task_struct *task;
806 u64 user_data;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700807
Pavel Begunkovf2f87372020-10-27 23:25:37 +0000808 struct io_kiocb *link;
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +0000809 struct percpu_ref *fixed_rsrc_refs;
Jens Axboed7718a92020-02-14 22:23:12 -0700810
Pavel Begunkovd21ffe72020-07-13 23:37:10 +0300811 /*
812 * 1. used with ctx->iopoll_list with reads/writes
813 * 2. to track reqs with ->files (see io_op_def::file_table)
814 */
Pavel Begunkov010e8e62020-07-30 18:43:45 +0300815 struct list_head inflight_entry;
Jens Axboe7cbf1722021-02-10 00:03:20 +0000816 union {
817 struct io_task_work io_task_work;
818 struct callback_head task_work;
819 };
Pavel Begunkov010e8e62020-07-30 18:43:45 +0300820 /* for polled requests, i.e. IORING_OP_POLL_ADD and async armed poll */
821 struct hlist_node hash_node;
822 struct async_poll *apoll;
823 struct io_wq_work work;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700824};
825
Pavel Begunkov13bf43f2021-03-06 11:02:12 +0000826struct io_tctx_node {
827 struct list_head ctx_node;
828 struct task_struct *task;
Pavel Begunkov13bf43f2021-03-06 11:02:12 +0000829 struct io_ring_ctx *ctx;
830};
831
Pavel Begunkov27dc8332020-07-13 23:37:14 +0300832struct io_defer_entry {
833 struct list_head list;
834 struct io_kiocb *req;
Pavel Begunkov9cf7c102020-07-13 23:37:15 +0300835 u32 seq;
Pavel Begunkov27dc8332020-07-13 23:37:14 +0300836};
837
Jens Axboed3656342019-12-18 09:50:26 -0700838struct io_op_def {
Jens Axboed3656342019-12-18 09:50:26 -0700839 /* needs req->file assigned */
840 unsigned needs_file : 1;
Jens Axboed3656342019-12-18 09:50:26 -0700841 /* hash wq insertion if file is a regular file */
842 unsigned hash_reg_file : 1;
843 /* unbound wq insertion if file is a non-regular file */
844 unsigned unbound_nonreg_file : 1;
Jens Axboe66f4af92020-01-16 15:36:52 -0700845 /* opcode is not supported by this kernel */
846 unsigned not_supported : 1;
Jens Axboe8a727582020-02-20 09:59:44 -0700847 /* set if opcode supports polled "wait" */
848 unsigned pollin : 1;
849 unsigned pollout : 1;
Jens Axboebcda7ba2020-02-23 16:42:51 -0700850 /* op supports buffer selection */
851 unsigned buffer_select : 1;
Jens Axboee8c2bc12020-08-15 18:44:09 -0700852 /* must always have async data allocated */
853 unsigned needs_async_data : 1;
Jens Axboe27926b62020-10-28 09:33:23 -0600854 /* should block plug */
855 unsigned plug : 1;
Jens Axboee8c2bc12020-08-15 18:44:09 -0700856 /* size of async data needed, if any */
857 unsigned short async_size;
Jens Axboed3656342019-12-18 09:50:26 -0700858};
859
Jens Axboe09186822020-10-13 15:01:40 -0600860static const struct io_op_def io_op_defs[] = {
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300861 [IORING_OP_NOP] = {},
862 [IORING_OP_READV] = {
Jens Axboed3656342019-12-18 09:50:26 -0700863 .needs_file = 1,
864 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700865 .pollin = 1,
Jens Axboe4d954c22020-02-27 07:31:19 -0700866 .buffer_select = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700867 .needs_async_data = 1,
Jens Axboe27926b62020-10-28 09:33:23 -0600868 .plug = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700869 .async_size = sizeof(struct io_async_rw),
Jens Axboed3656342019-12-18 09:50:26 -0700870 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300871 [IORING_OP_WRITEV] = {
Jens Axboed3656342019-12-18 09:50:26 -0700872 .needs_file = 1,
873 .hash_reg_file = 1,
874 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700875 .pollout = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700876 .needs_async_data = 1,
Jens Axboe27926b62020-10-28 09:33:23 -0600877 .plug = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700878 .async_size = sizeof(struct io_async_rw),
Jens Axboed3656342019-12-18 09:50:26 -0700879 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300880 [IORING_OP_FSYNC] = {
Jens Axboed3656342019-12-18 09:50:26 -0700881 .needs_file = 1,
882 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300883 [IORING_OP_READ_FIXED] = {
Jens Axboed3656342019-12-18 09:50:26 -0700884 .needs_file = 1,
885 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700886 .pollin = 1,
Jens Axboe27926b62020-10-28 09:33:23 -0600887 .plug = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700888 .async_size = sizeof(struct io_async_rw),
Jens Axboed3656342019-12-18 09:50:26 -0700889 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300890 [IORING_OP_WRITE_FIXED] = {
Jens Axboed3656342019-12-18 09:50:26 -0700891 .needs_file = 1,
892 .hash_reg_file = 1,
893 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700894 .pollout = 1,
Jens Axboe27926b62020-10-28 09:33:23 -0600895 .plug = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700896 .async_size = sizeof(struct io_async_rw),
Jens Axboed3656342019-12-18 09:50:26 -0700897 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300898 [IORING_OP_POLL_ADD] = {
Jens Axboed3656342019-12-18 09:50:26 -0700899 .needs_file = 1,
900 .unbound_nonreg_file = 1,
901 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300902 [IORING_OP_POLL_REMOVE] = {},
903 [IORING_OP_SYNC_FILE_RANGE] = {
Jens Axboed3656342019-12-18 09:50:26 -0700904 .needs_file = 1,
905 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300906 [IORING_OP_SENDMSG] = {
Jens Axboed3656342019-12-18 09:50:26 -0700907 .needs_file = 1,
908 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700909 .pollout = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700910 .needs_async_data = 1,
911 .async_size = sizeof(struct io_async_msghdr),
Jens Axboed3656342019-12-18 09:50:26 -0700912 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300913 [IORING_OP_RECVMSG] = {
Jens Axboed3656342019-12-18 09:50:26 -0700914 .needs_file = 1,
915 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700916 .pollin = 1,
Jens Axboe52de1fe2020-02-27 10:15:42 -0700917 .buffer_select = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700918 .needs_async_data = 1,
919 .async_size = sizeof(struct io_async_msghdr),
Jens Axboed3656342019-12-18 09:50:26 -0700920 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300921 [IORING_OP_TIMEOUT] = {
Jens Axboee8c2bc12020-08-15 18:44:09 -0700922 .needs_async_data = 1,
923 .async_size = sizeof(struct io_timeout_data),
Jens Axboed3656342019-12-18 09:50:26 -0700924 },
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +0000925 [IORING_OP_TIMEOUT_REMOVE] = {
926 /* used by timeout updates' prep() */
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +0000927 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300928 [IORING_OP_ACCEPT] = {
Jens Axboed3656342019-12-18 09:50:26 -0700929 .needs_file = 1,
930 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700931 .pollin = 1,
Jens Axboed3656342019-12-18 09:50:26 -0700932 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300933 [IORING_OP_ASYNC_CANCEL] = {},
934 [IORING_OP_LINK_TIMEOUT] = {
Jens Axboee8c2bc12020-08-15 18:44:09 -0700935 .needs_async_data = 1,
936 .async_size = sizeof(struct io_timeout_data),
Jens Axboed3656342019-12-18 09:50:26 -0700937 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300938 [IORING_OP_CONNECT] = {
Jens Axboed3656342019-12-18 09:50:26 -0700939 .needs_file = 1,
940 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700941 .pollout = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700942 .needs_async_data = 1,
943 .async_size = sizeof(struct io_async_connect),
Jens Axboed3656342019-12-18 09:50:26 -0700944 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300945 [IORING_OP_FALLOCATE] = {
Jens Axboed3656342019-12-18 09:50:26 -0700946 .needs_file = 1,
947 },
Jens Axboe44526be2021-02-15 13:32:18 -0700948 [IORING_OP_OPENAT] = {},
949 [IORING_OP_CLOSE] = {},
950 [IORING_OP_FILES_UPDATE] = {},
951 [IORING_OP_STATX] = {},
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300952 [IORING_OP_READ] = {
Jens Axboe3a6820f2019-12-22 15:19:35 -0700953 .needs_file = 1,
954 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700955 .pollin = 1,
Jens Axboebcda7ba2020-02-23 16:42:51 -0700956 .buffer_select = 1,
Jens Axboe27926b62020-10-28 09:33:23 -0600957 .plug = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700958 .async_size = sizeof(struct io_async_rw),
Jens Axboe3a6820f2019-12-22 15:19:35 -0700959 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300960 [IORING_OP_WRITE] = {
Jens Axboe3a6820f2019-12-22 15:19:35 -0700961 .needs_file = 1,
962 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700963 .pollout = 1,
Jens Axboe27926b62020-10-28 09:33:23 -0600964 .plug = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700965 .async_size = sizeof(struct io_async_rw),
Jens Axboe3a6820f2019-12-22 15:19:35 -0700966 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300967 [IORING_OP_FADVISE] = {
Jens Axboe4840e412019-12-25 22:03:45 -0700968 .needs_file = 1,
969 },
Jens Axboe44526be2021-02-15 13:32:18 -0700970 [IORING_OP_MADVISE] = {},
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300971 [IORING_OP_SEND] = {
Jens Axboefddafac2020-01-04 20:19:44 -0700972 .needs_file = 1,
973 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700974 .pollout = 1,
Jens Axboefddafac2020-01-04 20:19:44 -0700975 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300976 [IORING_OP_RECV] = {
Jens Axboefddafac2020-01-04 20:19:44 -0700977 .needs_file = 1,
978 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700979 .pollin = 1,
Jens Axboebcda7ba2020-02-23 16:42:51 -0700980 .buffer_select = 1,
Jens Axboefddafac2020-01-04 20:19:44 -0700981 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300982 [IORING_OP_OPENAT2] = {
Jens Axboecebdb982020-01-08 17:59:24 -0700983 },
Jens Axboe3e4827b2020-01-08 15:18:09 -0700984 [IORING_OP_EPOLL_CTL] = {
985 .unbound_nonreg_file = 1,
Jens Axboe3e4827b2020-01-08 15:18:09 -0700986 },
Pavel Begunkov7d67af22020-02-24 11:32:45 +0300987 [IORING_OP_SPLICE] = {
988 .needs_file = 1,
989 .hash_reg_file = 1,
990 .unbound_nonreg_file = 1,
Jens Axboeddf0322d2020-02-23 16:41:33 -0700991 },
992 [IORING_OP_PROVIDE_BUFFERS] = {},
Jens Axboe067524e2020-03-02 16:32:28 -0700993 [IORING_OP_REMOVE_BUFFERS] = {},
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +0300994 [IORING_OP_TEE] = {
995 .needs_file = 1,
996 .hash_reg_file = 1,
997 .unbound_nonreg_file = 1,
998 },
Jens Axboe36f4fa62020-09-05 11:14:22 -0600999 [IORING_OP_SHUTDOWN] = {
1000 .needs_file = 1,
1001 },
Jens Axboe44526be2021-02-15 13:32:18 -07001002 [IORING_OP_RENAMEAT] = {},
1003 [IORING_OP_UNLINKAT] = {},
Jens Axboed3656342019-12-18 09:50:26 -07001004};
1005
Pavel Begunkov7a612352021-03-09 00:37:59 +00001006static bool io_disarm_next(struct io_kiocb *req);
Pavel Begunkovd56d9382021-03-06 11:02:13 +00001007static void io_uring_del_task_file(unsigned long index);
Pavel Begunkov9936c7c2021-02-04 13:51:56 +00001008static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
1009 struct task_struct *task,
1010 struct files_struct *files);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07001011static void io_uring_cancel_sqpoll(struct io_ring_ctx *ctx);
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00001012static void destroy_fixed_rsrc_ref_node(struct fixed_rsrc_ref_node *ref_node);
Pavel Begunkovbc9744c2021-01-15 17:37:49 +00001013static struct fixed_rsrc_ref_node *alloc_fixed_rsrc_ref_node(
Pavel Begunkov1ffc5422020-12-30 21:34:15 +00001014 struct io_ring_ctx *ctx);
Pavel Begunkovf2303b12021-02-20 18:03:49 +00001015static void io_ring_file_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc);
Pavel Begunkov1ffc5422020-12-30 21:34:15 +00001016
Pavel Begunkov23faba32021-02-11 18:28:22 +00001017static bool io_rw_reissue(struct io_kiocb *req);
Jens Axboe78e19bb2019-11-06 15:21:34 -07001018static void io_cqring_fill_event(struct io_kiocb *req, long res);
Jackie Liuec9c02a2019-11-08 23:50:36 +08001019static void io_put_req(struct io_kiocb *req);
Pavel Begunkov216578e2020-10-13 09:44:00 +01001020static void io_put_req_deferred(struct io_kiocb *req, int nr);
Jens Axboec40f6372020-06-25 15:39:59 -06001021static void io_double_put_req(struct io_kiocb *req);
Jens Axboec7dae4b2021-02-09 19:53:37 -07001022static void io_dismantle_req(struct io_kiocb *req);
1023static void io_put_task(struct task_struct *task, int nr);
1024static void io_queue_next(struct io_kiocb *req);
Jens Axboe94ae5e72019-11-14 19:39:52 -07001025static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req);
Jens Axboe7271ef32020-08-10 09:55:22 -06001026static void __io_queue_linked_timeout(struct io_kiocb *req);
Jens Axboe94ae5e72019-11-14 19:39:52 -07001027static void io_queue_linked_timeout(struct io_kiocb *req);
Jens Axboe05f3fb32019-12-09 11:22:50 -07001028static int __io_sqe_files_update(struct io_ring_ctx *ctx,
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00001029 struct io_uring_rsrc_update *ip,
Jens Axboe05f3fb32019-12-09 11:22:50 -07001030 unsigned nr_args);
Pavel Begunkov3ca405e2020-07-13 23:37:08 +03001031static void __io_clean_op(struct io_kiocb *req);
Pavel Begunkov8371adf2020-10-10 18:34:08 +01001032static struct file *io_file_get(struct io_submit_state *state,
1033 struct io_kiocb *req, int fd, bool fixed);
Pavel Begunkovc5eef2b2021-02-10 00:03:22 +00001034static void __io_queue_sqe(struct io_kiocb *req);
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00001035static void io_rsrc_put_work(struct work_struct *work);
Jens Axboede0617e2019-04-06 21:51:27 -06001036
Pavel Begunkov847595d2021-02-04 13:52:06 +00001037static int io_import_iovec(int rw, struct io_kiocb *req, struct iovec **iovec,
1038 struct iov_iter *iter, bool needs_lock);
Jens Axboeff6165b2020-08-13 09:47:43 -06001039static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec,
1040 const struct iovec *fast_iov,
Jens Axboe227c0c92020-08-13 11:51:40 -06001041 struct iov_iter *iter, bool force);
Pavel Begunkov907d1df2021-01-26 23:35:10 +00001042static void io_req_task_queue(struct io_kiocb *req);
Jens Axboe65453d12021-02-10 00:03:21 +00001043static void io_submit_flush_completions(struct io_comp_state *cs,
1044 struct io_ring_ctx *ctx);
Jens Axboe9a56a232019-01-09 09:06:50 -07001045
Jens Axboe2b188cc2019-01-07 10:46:33 -07001046static struct kmem_cache *req_cachep;
1047
Jens Axboe09186822020-10-13 15:01:40 -06001048static const struct file_operations io_uring_fops;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001049
1050struct sock *io_uring_get_socket(struct file *file)
1051{
1052#if defined(CONFIG_UNIX)
1053 if (file->f_op == &io_uring_fops) {
1054 struct io_ring_ctx *ctx = file->private_data;
1055
1056 return ctx->ring_sock->sk;
1057 }
1058#endif
1059 return NULL;
1060}
1061EXPORT_SYMBOL(io_uring_get_socket);
1062
Pavel Begunkovf2f87372020-10-27 23:25:37 +00001063#define io_for_each_link(pos, head) \
1064 for (pos = (head); pos; pos = pos->link)
1065
Pavel Begunkov3ca405e2020-07-13 23:37:08 +03001066static inline void io_clean_op(struct io_kiocb *req)
1067{
Pavel Begunkov9d5c8192021-01-24 15:08:14 +00001068 if (req->flags & (REQ_F_NEED_CLEANUP | REQ_F_BUFFER_SELECTED))
Pavel Begunkov3ca405e2020-07-13 23:37:08 +03001069 __io_clean_op(req);
1070}
1071
Pavel Begunkov36f72fe2020-11-18 19:57:26 +00001072static inline void io_set_resource_node(struct io_kiocb *req)
Jens Axboec40f6372020-06-25 15:39:59 -06001073{
Pavel Begunkov36f72fe2020-11-18 19:57:26 +00001074 struct io_ring_ctx *ctx = req->ctx;
1075
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00001076 if (!req->fixed_rsrc_refs) {
1077 req->fixed_rsrc_refs = &ctx->file_data->node->refs;
1078 percpu_ref_get(req->fixed_rsrc_refs);
Pavel Begunkov36f72fe2020-11-18 19:57:26 +00001079 }
1080}
1081
Pavel Begunkov08d23632020-11-06 13:00:22 +00001082static bool io_match_task(struct io_kiocb *head,
1083 struct task_struct *task,
1084 struct files_struct *files)
1085{
1086 struct io_kiocb *req;
1087
Jens Axboe84965ff2021-01-23 15:51:11 -07001088 if (task && head->task != task) {
1089 /* in terms of cancelation, always match if req task is dead */
1090 if (head->task->flags & PF_EXITING)
1091 return true;
Pavel Begunkov08d23632020-11-06 13:00:22 +00001092 return false;
Jens Axboe84965ff2021-01-23 15:51:11 -07001093 }
Pavel Begunkov08d23632020-11-06 13:00:22 +00001094 if (!files)
1095 return true;
1096
1097 io_for_each_link(req, head) {
Pavel Begunkovb05a1bc2021-03-04 13:59:24 +00001098 if (req->flags & REQ_F_INFLIGHT)
Jens Axboe02a13672021-01-23 15:49:31 -07001099 return true;
Pavel Begunkov08d23632020-11-06 13:00:22 +00001100 }
1101 return false;
1102}
1103
Jens Axboec40f6372020-06-25 15:39:59 -06001104static inline void req_set_fail_links(struct io_kiocb *req)
1105{
1106 if ((req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) == REQ_F_LINK)
1107 req->flags |= REQ_F_FAIL_LINK;
1108}
Jens Axboe4a38aed22020-05-14 17:21:15 -06001109
Jens Axboe2b188cc2019-01-07 10:46:33 -07001110static void io_ring_ctx_ref_free(struct percpu_ref *ref)
1111{
1112 struct io_ring_ctx *ctx = container_of(ref, struct io_ring_ctx, refs);
1113
Jens Axboe0f158b42020-05-14 17:18:39 -06001114 complete(&ctx->ref_comp);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001115}
1116
Pavel Begunkov8eb7e2d2020-06-29 13:13:02 +03001117static inline bool io_is_timeout_noseq(struct io_kiocb *req)
1118{
1119 return !req->timeout.off;
1120}
1121
Jens Axboe2b188cc2019-01-07 10:46:33 -07001122static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
1123{
1124 struct io_ring_ctx *ctx;
Jens Axboe78076bb2019-12-04 19:56:40 -07001125 int hash_bits;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001126
1127 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
1128 if (!ctx)
1129 return NULL;
1130
Jens Axboe78076bb2019-12-04 19:56:40 -07001131 /*
1132 * Use 5 bits less than the max cq entries, that should give us around
1133 * 32 entries per hash list if totally full and uniformly spread.
1134 */
1135 hash_bits = ilog2(p->cq_entries);
1136 hash_bits -= 5;
1137 if (hash_bits <= 0)
1138 hash_bits = 1;
1139 ctx->cancel_hash_bits = hash_bits;
1140 ctx->cancel_hash = kmalloc((1U << hash_bits) * sizeof(struct hlist_head),
1141 GFP_KERNEL);
1142 if (!ctx->cancel_hash)
1143 goto err;
1144 __hash_init(ctx->cancel_hash, 1U << hash_bits);
1145
Roman Gushchin21482892019-05-07 10:01:48 -07001146 if (percpu_ref_init(&ctx->refs, io_ring_ctx_ref_free,
Jens Axboe206aefd2019-11-07 18:27:42 -07001147 PERCPU_REF_ALLOW_REINIT, GFP_KERNEL))
1148 goto err;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001149
1150 ctx->flags = p->flags;
Jens Axboe90554202020-09-03 12:12:41 -06001151 init_waitqueue_head(&ctx->sqo_sq_wait);
Jens Axboe69fb2132020-09-14 11:16:23 -06001152 INIT_LIST_HEAD(&ctx->sqd_list);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001153 init_waitqueue_head(&ctx->cq_wait);
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001154 INIT_LIST_HEAD(&ctx->cq_overflow_list);
Jens Axboe0f158b42020-05-14 17:18:39 -06001155 init_completion(&ctx->ref_comp);
Jens Axboe9e15c3a2021-03-13 12:29:43 -07001156 xa_init_flags(&ctx->io_buffers, XA_FLAGS_ALLOC1);
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +00001157 xa_init_flags(&ctx->personalities, XA_FLAGS_ALLOC1);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001158 mutex_init(&ctx->uring_lock);
1159 init_waitqueue_head(&ctx->wait);
1160 spin_lock_init(&ctx->completion_lock);
Pavel Begunkov540e32a2020-07-13 23:37:09 +03001161 INIT_LIST_HEAD(&ctx->iopoll_list);
Jens Axboede0617e2019-04-06 21:51:27 -06001162 INIT_LIST_HEAD(&ctx->defer_list);
Jens Axboe5262f562019-09-17 12:26:57 -06001163 INIT_LIST_HEAD(&ctx->timeout_list);
Jens Axboefcb323c2019-10-24 12:39:47 -06001164 spin_lock_init(&ctx->inflight_lock);
1165 INIT_LIST_HEAD(&ctx->inflight_list);
Bijan Mottahedehd67d2262021-01-15 17:37:46 +00001166 spin_lock_init(&ctx->rsrc_ref_lock);
1167 INIT_LIST_HEAD(&ctx->rsrc_ref_list);
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00001168 INIT_DELAYED_WORK(&ctx->rsrc_put_work, io_rsrc_put_work);
1169 init_llist_head(&ctx->rsrc_put_llist);
Pavel Begunkov13bf43f2021-03-06 11:02:12 +00001170 INIT_LIST_HEAD(&ctx->tctx_list);
Jens Axboe1b4c3512021-02-10 00:03:19 +00001171 INIT_LIST_HEAD(&ctx->submit_state.comp.free_list);
Jens Axboec7dae4b2021-02-09 19:53:37 -07001172 INIT_LIST_HEAD(&ctx->submit_state.comp.locked_free_list);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001173 return ctx;
Jens Axboe206aefd2019-11-07 18:27:42 -07001174err:
Jens Axboe78076bb2019-12-04 19:56:40 -07001175 kfree(ctx->cancel_hash);
Jens Axboe206aefd2019-11-07 18:27:42 -07001176 kfree(ctx);
1177 return NULL;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001178}
1179
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03001180static bool req_need_defer(struct io_kiocb *req, u32 seq)
Jens Axboede0617e2019-04-06 21:51:27 -06001181{
Jens Axboe2bc99302020-07-09 09:43:27 -06001182 if (unlikely(req->flags & REQ_F_IO_DRAIN)) {
1183 struct io_ring_ctx *ctx = req->ctx;
Jackie Liua197f662019-11-08 08:09:12 -07001184
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03001185 return seq != ctx->cached_cq_tail
Pavel Begunkov2c3bac6d2020-10-18 10:17:40 +01001186 + READ_ONCE(ctx->cached_cq_overflow);
Jens Axboe2bc99302020-07-09 09:43:27 -06001187 }
Jens Axboe7adf4ea2019-10-10 21:42:58 -06001188
Bob Liu9d858b22019-11-13 18:06:25 +08001189 return false;
Jens Axboe7adf4ea2019-10-10 21:42:58 -06001190}
1191
Pavel Begunkovce3d5aa2021-02-01 18:59:55 +00001192static void io_req_track_inflight(struct io_kiocb *req)
1193{
1194 struct io_ring_ctx *ctx = req->ctx;
1195
1196 if (!(req->flags & REQ_F_INFLIGHT)) {
Pavel Begunkovce3d5aa2021-02-01 18:59:55 +00001197 req->flags |= REQ_F_INFLIGHT;
1198
1199 spin_lock_irq(&ctx->inflight_lock);
1200 list_add(&req->inflight_entry, &ctx->inflight_list);
1201 spin_unlock_irq(&ctx->inflight_lock);
1202 }
1203}
1204
Pavel Begunkovcbdcb432020-06-29 19:18:43 +03001205static void io_prep_async_work(struct io_kiocb *req)
Jens Axboe561fb042019-10-24 07:25:42 -06001206{
Jens Axboed3656342019-12-18 09:50:26 -07001207 const struct io_op_def *def = &io_op_defs[req->opcode];
Pavel Begunkov23329512020-10-10 18:34:06 +01001208 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe54a91f32019-09-10 09:15:04 -06001209
Jens Axboe003e8dc2021-03-06 09:22:27 -07001210 if (!req->work.creds)
1211 req->work.creds = get_current_cred();
1212
Pavel Begunkovfeaadc42020-10-22 16:47:16 +01001213 if (req->flags & REQ_F_FORCE_ASYNC)
1214 req->work.flags |= IO_WQ_WORK_CONCURRENT;
1215
Jens Axboed3656342019-12-18 09:50:26 -07001216 if (req->flags & REQ_F_ISREG) {
Pavel Begunkov23329512020-10-10 18:34:06 +01001217 if (def->hash_reg_file || (ctx->flags & IORING_SETUP_IOPOLL))
Pavel Begunkov8766dd52020-03-14 00:31:04 +03001218 io_wq_hash_work(&req->work, file_inode(req->file));
Jens Axboe4b982bd2021-04-01 08:38:34 -06001219 } else if (!req->file || !S_ISBLK(file_inode(req->file)->i_mode)) {
Jens Axboed3656342019-12-18 09:50:26 -07001220 if (def->unbound_nonreg_file)
Jens Axboe3529d8c2019-12-19 18:24:38 -07001221 req->work.flags |= IO_WQ_WORK_UNBOUND;
Jens Axboe54a91f32019-09-10 09:15:04 -06001222 }
Jens Axboe561fb042019-10-24 07:25:42 -06001223}
1224
Pavel Begunkovcbdcb432020-06-29 19:18:43 +03001225static void io_prep_async_link(struct io_kiocb *req)
1226{
1227 struct io_kiocb *cur;
1228
Pavel Begunkovf2f87372020-10-27 23:25:37 +00001229 io_for_each_link(cur, req)
1230 io_prep_async_work(cur);
Pavel Begunkovcbdcb432020-06-29 19:18:43 +03001231}
1232
Pavel Begunkovebf93662021-03-01 18:20:47 +00001233static void io_queue_async_work(struct io_kiocb *req)
Jens Axboe561fb042019-10-24 07:25:42 -06001234{
Jackie Liua197f662019-11-08 08:09:12 -07001235 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkovcbdcb432020-06-29 19:18:43 +03001236 struct io_kiocb *link = io_prep_linked_timeout(req);
Jens Axboe5aa75ed2021-02-16 12:56:50 -07001237 struct io_uring_task *tctx = req->task->io_uring;
Jens Axboe561fb042019-10-24 07:25:42 -06001238
Jens Axboe3bfe6102021-02-16 14:15:30 -07001239 BUG_ON(!tctx);
1240 BUG_ON(!tctx->io_wq);
Jens Axboe561fb042019-10-24 07:25:42 -06001241
Pavel Begunkovcbdcb432020-06-29 19:18:43 +03001242 /* init ->work of the whole link before punting */
1243 io_prep_async_link(req);
Pavel Begunkovd07f1e8a2021-03-22 01:45:58 +00001244 trace_io_uring_queue_async_work(ctx, io_wq_is_hashed(&req->work), req,
1245 &req->work, req->flags);
Pavel Begunkovebf93662021-03-01 18:20:47 +00001246 io_wq_enqueue(tctx->io_wq, &req->work);
Jens Axboe7271ef32020-08-10 09:55:22 -06001247 if (link)
1248 io_queue_linked_timeout(link);
Pavel Begunkovcbdcb432020-06-29 19:18:43 +03001249}
1250
Pavel Begunkov1ee41602021-03-25 18:32:42 +00001251static void io_kill_timeout(struct io_kiocb *req, int status)
Jens Axboe5262f562019-09-17 12:26:57 -06001252{
Jens Axboee8c2bc12020-08-15 18:44:09 -07001253 struct io_timeout_data *io = req->async_data;
Jens Axboe5262f562019-09-17 12:26:57 -06001254 int ret;
1255
Jens Axboee8c2bc12020-08-15 18:44:09 -07001256 ret = hrtimer_try_to_cancel(&io->timer);
Jens Axboe5262f562019-09-17 12:26:57 -06001257 if (ret != -1) {
Pavel Begunkov01cec8c2020-07-30 18:43:50 +03001258 atomic_set(&req->ctx->cq_timeouts,
1259 atomic_read(&req->ctx->cq_timeouts) + 1);
Pavel Begunkov135fcde2020-07-13 23:37:12 +03001260 list_del_init(&req->timeout.list);
Pavel Begunkov1ee41602021-03-25 18:32:42 +00001261 io_cqring_fill_event(req, status);
Pavel Begunkov216578e2020-10-13 09:44:00 +01001262 io_put_req_deferred(req, 1);
Jens Axboe5262f562019-09-17 12:26:57 -06001263 }
1264}
1265
Pavel Begunkov04518942020-05-26 20:34:05 +03001266static void __io_queue_deferred(struct io_ring_ctx *ctx)
1267{
1268 do {
Pavel Begunkov27dc8332020-07-13 23:37:14 +03001269 struct io_defer_entry *de = list_first_entry(&ctx->defer_list,
1270 struct io_defer_entry, list);
Pavel Begunkov04518942020-05-26 20:34:05 +03001271
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03001272 if (req_need_defer(de->req, de->seq))
Pavel Begunkov04518942020-05-26 20:34:05 +03001273 break;
Pavel Begunkov27dc8332020-07-13 23:37:14 +03001274 list_del_init(&de->list);
Pavel Begunkov907d1df2021-01-26 23:35:10 +00001275 io_req_task_queue(de->req);
Pavel Begunkov27dc8332020-07-13 23:37:14 +03001276 kfree(de);
Pavel Begunkov04518942020-05-26 20:34:05 +03001277 } while (!list_empty(&ctx->defer_list));
1278}
1279
Pavel Begunkov360428f2020-05-30 14:54:17 +03001280static void io_flush_timeouts(struct io_ring_ctx *ctx)
1281{
Marcelo Diop-Gonzalezf0105052021-01-15 11:54:40 -05001282 u32 seq;
1283
1284 if (list_empty(&ctx->timeout_list))
1285 return;
1286
1287 seq = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
1288
1289 do {
1290 u32 events_needed, events_got;
Pavel Begunkov360428f2020-05-30 14:54:17 +03001291 struct io_kiocb *req = list_first_entry(&ctx->timeout_list,
Pavel Begunkov135fcde2020-07-13 23:37:12 +03001292 struct io_kiocb, timeout.list);
Pavel Begunkov360428f2020-05-30 14:54:17 +03001293
Pavel Begunkov8eb7e2d2020-06-29 13:13:02 +03001294 if (io_is_timeout_noseq(req))
Pavel Begunkov360428f2020-05-30 14:54:17 +03001295 break;
Marcelo Diop-Gonzalezf0105052021-01-15 11:54:40 -05001296
1297 /*
1298 * Since seq can easily wrap around over time, subtract
1299 * the last seq at which timeouts were flushed before comparing.
1300 * Assuming not more than 2^31-1 events have happened since,
1301 * these subtractions won't have wrapped, so we can check if
1302 * target is in [last_seq, current_seq] by comparing the two.
1303 */
1304 events_needed = req->timeout.target_seq - ctx->cq_last_tm_flush;
1305 events_got = seq - ctx->cq_last_tm_flush;
1306 if (events_got < events_needed)
Pavel Begunkov360428f2020-05-30 14:54:17 +03001307 break;
Pavel Begunkovbfe68a22020-05-30 14:54:18 +03001308
Pavel Begunkov135fcde2020-07-13 23:37:12 +03001309 list_del_init(&req->timeout.list);
Pavel Begunkov1ee41602021-03-25 18:32:42 +00001310 io_kill_timeout(req, 0);
Marcelo Diop-Gonzalezf0105052021-01-15 11:54:40 -05001311 } while (!list_empty(&ctx->timeout_list));
1312
1313 ctx->cq_last_tm_flush = seq;
Pavel Begunkov360428f2020-05-30 14:54:17 +03001314}
1315
Jens Axboede0617e2019-04-06 21:51:27 -06001316static void io_commit_cqring(struct io_ring_ctx *ctx)
1317{
Pavel Begunkov360428f2020-05-30 14:54:17 +03001318 io_flush_timeouts(ctx);
Pavel Begunkovec30e042021-01-19 13:32:38 +00001319
1320 /* order cqe stores with ring update */
1321 smp_store_release(&ctx->rings->cq.tail, ctx->cached_cq_tail);
Jens Axboede0617e2019-04-06 21:51:27 -06001322
Pavel Begunkov04518942020-05-26 20:34:05 +03001323 if (unlikely(!list_empty(&ctx->defer_list)))
1324 __io_queue_deferred(ctx);
Jens Axboede0617e2019-04-06 21:51:27 -06001325}
1326
Jens Axboe90554202020-09-03 12:12:41 -06001327static inline bool io_sqring_full(struct io_ring_ctx *ctx)
1328{
1329 struct io_rings *r = ctx->rings;
1330
1331 return READ_ONCE(r->sq.tail) - ctx->cached_sq_head == r->sq_ring_entries;
1332}
1333
Pavel Begunkov888aae22021-01-19 13:32:39 +00001334static inline unsigned int __io_cqring_events(struct io_ring_ctx *ctx)
1335{
1336 return ctx->cached_cq_tail - READ_ONCE(ctx->rings->cq.head);
1337}
1338
Jens Axboe2b188cc2019-01-07 10:46:33 -07001339static struct io_uring_cqe *io_get_cqring(struct io_ring_ctx *ctx)
1340{
Hristo Venev75b28af2019-08-26 17:23:46 +00001341 struct io_rings *rings = ctx->rings;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001342 unsigned tail;
1343
Stefan Bühler115e12e2019-04-24 23:54:18 +02001344 /*
1345 * writes to the cq entry need to come after reading head; the
1346 * control dependency is enough as we're using WRITE_ONCE to
1347 * fill the cq entry
1348 */
Pavel Begunkov888aae22021-01-19 13:32:39 +00001349 if (__io_cqring_events(ctx) == rings->cq_ring_entries)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001350 return NULL;
1351
Pavel Begunkov888aae22021-01-19 13:32:39 +00001352 tail = ctx->cached_cq_tail++;
Hristo Venev75b28af2019-08-26 17:23:46 +00001353 return &rings->cqes[tail & ctx->cq_mask];
Jens Axboe2b188cc2019-01-07 10:46:33 -07001354}
1355
Jens Axboef2842ab2020-01-08 11:04:00 -07001356static inline bool io_should_trigger_evfd(struct io_ring_ctx *ctx)
1357{
Jens Axboef0b493e2020-02-01 21:30:11 -07001358 if (!ctx->cq_ev_fd)
1359 return false;
Stefano Garzarella7e55a192020-05-15 18:38:05 +02001360 if (READ_ONCE(ctx->rings->cq_flags) & IORING_CQ_EVENTFD_DISABLED)
1361 return false;
Jens Axboef2842ab2020-01-08 11:04:00 -07001362 if (!ctx->eventfd_async)
1363 return true;
Jens Axboeb41e9852020-02-17 09:52:41 -07001364 return io_wq_current_is_worker();
Jens Axboef2842ab2020-01-08 11:04:00 -07001365}
1366
Jens Axboeb41e9852020-02-17 09:52:41 -07001367static void io_cqring_ev_posted(struct io_ring_ctx *ctx)
Jens Axboe8c838782019-03-12 15:48:16 -06001368{
Pavel Begunkovb1445e52021-01-07 03:15:43 +00001369 /* see waitqueue_active() comment */
1370 smp_mb();
1371
Jens Axboe8c838782019-03-12 15:48:16 -06001372 if (waitqueue_active(&ctx->wait))
1373 wake_up(&ctx->wait);
Jens Axboe534ca6d2020-09-02 13:52:19 -06001374 if (ctx->sq_data && waitqueue_active(&ctx->sq_data->wait))
1375 wake_up(&ctx->sq_data->wait);
Jens Axboeb41e9852020-02-17 09:52:41 -07001376 if (io_should_trigger_evfd(ctx))
Jens Axboe9b402842019-04-11 11:45:41 -06001377 eventfd_signal(ctx->cq_ev_fd, 1);
Pavel Begunkovb1445e52021-01-07 03:15:43 +00001378 if (waitqueue_active(&ctx->cq_wait)) {
Pavel Begunkov4aa84f22021-01-07 03:15:42 +00001379 wake_up_interruptible(&ctx->cq_wait);
1380 kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN);
1381 }
Jens Axboe8c838782019-03-12 15:48:16 -06001382}
1383
Pavel Begunkov80c18e42021-01-07 03:15:41 +00001384static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx)
1385{
Pavel Begunkovb1445e52021-01-07 03:15:43 +00001386 /* see waitqueue_active() comment */
1387 smp_mb();
1388
Pavel Begunkov80c18e42021-01-07 03:15:41 +00001389 if (ctx->flags & IORING_SETUP_SQPOLL) {
1390 if (waitqueue_active(&ctx->wait))
1391 wake_up(&ctx->wait);
1392 }
1393 if (io_should_trigger_evfd(ctx))
1394 eventfd_signal(ctx->cq_ev_fd, 1);
Pavel Begunkovb1445e52021-01-07 03:15:43 +00001395 if (waitqueue_active(&ctx->cq_wait)) {
Pavel Begunkov4aa84f22021-01-07 03:15:42 +00001396 wake_up_interruptible(&ctx->cq_wait);
1397 kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN);
1398 }
Pavel Begunkov80c18e42021-01-07 03:15:41 +00001399}
1400
Jens Axboec4a2ed72019-11-21 21:01:26 -07001401/* Returns true if there are no backlogged entries after the flush */
Pavel Begunkov6c503152021-01-04 20:36:36 +00001402static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force,
1403 struct task_struct *tsk,
1404 struct files_struct *files)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001405{
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001406 struct io_rings *rings = ctx->rings;
Jens Axboee6c8aa92020-09-28 13:10:13 -06001407 struct io_kiocb *req, *tmp;
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001408 struct io_uring_cqe *cqe;
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001409 unsigned long flags;
Jens Axboeb18032b2021-01-24 16:58:56 -07001410 bool all_flushed, posted;
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001411 LIST_HEAD(list);
1412
Pavel Begunkove23de152020-12-17 00:24:37 +00001413 if (!force && __io_cqring_events(ctx) == rings->cq_ring_entries)
1414 return false;
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001415
Jens Axboeb18032b2021-01-24 16:58:56 -07001416 posted = false;
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001417 spin_lock_irqsave(&ctx->completion_lock, flags);
Jens Axboee6c8aa92020-09-28 13:10:13 -06001418 list_for_each_entry_safe(req, tmp, &ctx->cq_overflow_list, compl.list) {
Pavel Begunkov08d23632020-11-06 13:00:22 +00001419 if (!io_match_task(req, tsk, files))
Jens Axboee6c8aa92020-09-28 13:10:13 -06001420 continue;
1421
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001422 cqe = io_get_cqring(ctx);
1423 if (!cqe && !force)
1424 break;
1425
Pavel Begunkov40d8ddd2020-07-13 23:37:11 +03001426 list_move(&req->compl.list, &list);
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001427 if (cqe) {
1428 WRITE_ONCE(cqe->user_data, req->user_data);
1429 WRITE_ONCE(cqe->res, req->result);
Pavel Begunkov0f7e4662020-07-13 23:37:16 +03001430 WRITE_ONCE(cqe->flags, req->compl.cflags);
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001431 } else {
Pavel Begunkov2c3bac6d2020-10-18 10:17:40 +01001432 ctx->cached_cq_overflow++;
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001433 WRITE_ONCE(ctx->rings->cq_overflow,
Pavel Begunkov2c3bac6d2020-10-18 10:17:40 +01001434 ctx->cached_cq_overflow);
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001435 }
Jens Axboeb18032b2021-01-24 16:58:56 -07001436 posted = true;
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001437 }
1438
Pavel Begunkov09e88402020-12-17 00:24:38 +00001439 all_flushed = list_empty(&ctx->cq_overflow_list);
1440 if (all_flushed) {
1441 clear_bit(0, &ctx->sq_check_overflow);
1442 clear_bit(0, &ctx->cq_check_overflow);
1443 ctx->rings->sq_flags &= ~IORING_SQ_CQ_OVERFLOW;
1444 }
Pavel Begunkov46930142020-07-30 18:43:49 +03001445
Jens Axboeb18032b2021-01-24 16:58:56 -07001446 if (posted)
1447 io_commit_cqring(ctx);
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001448 spin_unlock_irqrestore(&ctx->completion_lock, flags);
Jens Axboeb18032b2021-01-24 16:58:56 -07001449 if (posted)
1450 io_cqring_ev_posted(ctx);
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001451
1452 while (!list_empty(&list)) {
Pavel Begunkov40d8ddd2020-07-13 23:37:11 +03001453 req = list_first_entry(&list, struct io_kiocb, compl.list);
1454 list_del(&req->compl.list);
Jackie Liuec9c02a2019-11-08 23:50:36 +08001455 io_put_req(req);
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001456 }
Jens Axboec4a2ed72019-11-21 21:01:26 -07001457
Pavel Begunkov09e88402020-12-17 00:24:38 +00001458 return all_flushed;
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001459}
1460
Jens Axboeca0a2652021-03-04 17:15:48 -07001461static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force,
Pavel Begunkov6c503152021-01-04 20:36:36 +00001462 struct task_struct *tsk,
1463 struct files_struct *files)
1464{
Jens Axboeca0a2652021-03-04 17:15:48 -07001465 bool ret = true;
1466
Pavel Begunkov6c503152021-01-04 20:36:36 +00001467 if (test_bit(0, &ctx->cq_check_overflow)) {
1468 /* iopoll syncs against uring_lock, not completion_lock */
1469 if (ctx->flags & IORING_SETUP_IOPOLL)
1470 mutex_lock(&ctx->uring_lock);
Jens Axboeca0a2652021-03-04 17:15:48 -07001471 ret = __io_cqring_overflow_flush(ctx, force, tsk, files);
Pavel Begunkov6c503152021-01-04 20:36:36 +00001472 if (ctx->flags & IORING_SETUP_IOPOLL)
1473 mutex_unlock(&ctx->uring_lock);
1474 }
Jens Axboeca0a2652021-03-04 17:15:48 -07001475
1476 return ret;
Pavel Begunkov6c503152021-01-04 20:36:36 +00001477}
1478
Jens Axboebcda7ba2020-02-23 16:42:51 -07001479static void __io_cqring_fill_event(struct io_kiocb *req, long res, long cflags)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001480{
Jens Axboe78e19bb2019-11-06 15:21:34 -07001481 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001482 struct io_uring_cqe *cqe;
1483
Jens Axboe78e19bb2019-11-06 15:21:34 -07001484 trace_io_uring_complete(ctx, req->user_data, res);
Jens Axboe51c3ff62019-11-03 06:52:50 -07001485
Jens Axboe2b188cc2019-01-07 10:46:33 -07001486 /*
1487 * If we can't get a cq entry, userspace overflowed the
1488 * submission (by quite a lot). Increment the overflow count in
1489 * the ring.
1490 */
1491 cqe = io_get_cqring(ctx);
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001492 if (likely(cqe)) {
Jens Axboe78e19bb2019-11-06 15:21:34 -07001493 WRITE_ONCE(cqe->user_data, req->user_data);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001494 WRITE_ONCE(cqe->res, res);
Jens Axboebcda7ba2020-02-23 16:42:51 -07001495 WRITE_ONCE(cqe->flags, cflags);
Jens Axboefdaf0832020-10-30 09:37:30 -06001496 } else if (ctx->cq_overflow_flushed ||
1497 atomic_read(&req->task->io_uring->in_idle)) {
Jens Axboe0f212202020-09-13 13:09:39 -06001498 /*
1499 * If we're in ring overflow flush mode, or in task cancel mode,
1500 * then we cannot store the request for later flushing, we need
1501 * to drop it on the floor.
1502 */
Pavel Begunkov2c3bac6d2020-10-18 10:17:40 +01001503 ctx->cached_cq_overflow++;
1504 WRITE_ONCE(ctx->rings->cq_overflow, ctx->cached_cq_overflow);
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001505 } else {
Jens Axboead3eb2c2019-12-18 17:12:20 -07001506 if (list_empty(&ctx->cq_overflow_list)) {
1507 set_bit(0, &ctx->sq_check_overflow);
1508 set_bit(0, &ctx->cq_check_overflow);
Xiaoguang Wang6d5f9042020-07-09 09:15:29 +08001509 ctx->rings->sq_flags |= IORING_SQ_CQ_OVERFLOW;
Jens Axboead3eb2c2019-12-18 17:12:20 -07001510 }
Pavel Begunkov40d8ddd2020-07-13 23:37:11 +03001511 io_clean_op(req);
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001512 req->result = res;
Pavel Begunkov0f7e4662020-07-13 23:37:16 +03001513 req->compl.cflags = cflags;
Pavel Begunkov40d8ddd2020-07-13 23:37:11 +03001514 refcount_inc(&req->refs);
1515 list_add_tail(&req->compl.list, &ctx->cq_overflow_list);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001516 }
1517}
1518
Jens Axboebcda7ba2020-02-23 16:42:51 -07001519static void io_cqring_fill_event(struct io_kiocb *req, long res)
1520{
1521 __io_cqring_fill_event(req, res, 0);
1522}
1523
Pavel Begunkov7a612352021-03-09 00:37:59 +00001524static void io_req_complete_post(struct io_kiocb *req, long res,
1525 unsigned int cflags)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001526{
Jens Axboe78e19bb2019-11-06 15:21:34 -07001527 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001528 unsigned long flags;
1529
1530 spin_lock_irqsave(&ctx->completion_lock, flags);
Jens Axboebcda7ba2020-02-23 16:42:51 -07001531 __io_cqring_fill_event(req, res, cflags);
Jens Axboec7dae4b2021-02-09 19:53:37 -07001532 /*
1533 * If we're the last reference to this request, add to our locked
1534 * free_list cache.
1535 */
1536 if (refcount_dec_and_test(&req->refs)) {
1537 struct io_comp_state *cs = &ctx->submit_state.comp;
1538
Pavel Begunkov7a612352021-03-09 00:37:59 +00001539 if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) {
1540 if (req->flags & (REQ_F_LINK_TIMEOUT | REQ_F_FAIL_LINK))
1541 io_disarm_next(req);
1542 if (req->link) {
1543 io_req_task_queue(req->link);
1544 req->link = NULL;
1545 }
1546 }
Jens Axboec7dae4b2021-02-09 19:53:37 -07001547 io_dismantle_req(req);
1548 io_put_task(req->task, 1);
1549 list_add(&req->compl.list, &cs->locked_free_list);
1550 cs->locked_free_nr++;
Pavel Begunkov180f8292021-03-14 20:57:09 +00001551 } else {
1552 if (!percpu_ref_tryget(&ctx->refs))
1553 req = NULL;
1554 }
Pavel Begunkov7a612352021-03-09 00:37:59 +00001555 io_commit_cqring(ctx);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001556 spin_unlock_irqrestore(&ctx->completion_lock, flags);
Pavel Begunkov7a612352021-03-09 00:37:59 +00001557
Pavel Begunkov180f8292021-03-14 20:57:09 +00001558 if (req) {
1559 io_cqring_ev_posted(ctx);
Jens Axboec7dae4b2021-02-09 19:53:37 -07001560 percpu_ref_put(&ctx->refs);
Pavel Begunkov180f8292021-03-14 20:57:09 +00001561 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07001562}
1563
Pavel Begunkova38d68d2021-01-19 13:32:45 +00001564static void io_req_complete_state(struct io_kiocb *req, long res,
Pavel Begunkov889fca72021-02-10 00:03:09 +00001565 unsigned int cflags)
Jens Axboebcda7ba2020-02-23 16:42:51 -07001566{
Pavel Begunkova38d68d2021-01-19 13:32:45 +00001567 io_clean_op(req);
1568 req->result = res;
1569 req->compl.cflags = cflags;
Pavel Begunkove342c802021-01-19 13:32:47 +00001570 req->flags |= REQ_F_COMPLETE_INLINE;
Jens Axboee1e16092020-06-22 09:17:17 -06001571}
Jens Axboe2b188cc2019-01-07 10:46:33 -07001572
Pavel Begunkov889fca72021-02-10 00:03:09 +00001573static inline void __io_req_complete(struct io_kiocb *req, unsigned issue_flags,
1574 long res, unsigned cflags)
Pavel Begunkova38d68d2021-01-19 13:32:45 +00001575{
Pavel Begunkov889fca72021-02-10 00:03:09 +00001576 if (issue_flags & IO_URING_F_COMPLETE_DEFER)
1577 io_req_complete_state(req, res, cflags);
Pavel Begunkova38d68d2021-01-19 13:32:45 +00001578 else
Jens Axboec7dae4b2021-02-09 19:53:37 -07001579 io_req_complete_post(req, res, cflags);
Pavel Begunkova38d68d2021-01-19 13:32:45 +00001580}
Jens Axboebcda7ba2020-02-23 16:42:51 -07001581
Pavel Begunkova38d68d2021-01-19 13:32:45 +00001582static inline void io_req_complete(struct io_kiocb *req, long res)
Jens Axboee1e16092020-06-22 09:17:17 -06001583{
Pavel Begunkov889fca72021-02-10 00:03:09 +00001584 __io_req_complete(req, 0, res, 0);
Jens Axboebcda7ba2020-02-23 16:42:51 -07001585}
1586
Jens Axboec7dae4b2021-02-09 19:53:37 -07001587static bool io_flush_cached_reqs(struct io_ring_ctx *ctx)
Pavel Begunkove5d1bc02021-02-10 00:03:23 +00001588{
Jens Axboec7dae4b2021-02-09 19:53:37 -07001589 struct io_submit_state *state = &ctx->submit_state;
1590 struct io_comp_state *cs = &state->comp;
Pavel Begunkove5d1bc02021-02-10 00:03:23 +00001591 struct io_kiocb *req = NULL;
1592
Jens Axboec7dae4b2021-02-09 19:53:37 -07001593 /*
1594 * If we have more than a batch's worth of requests in our IRQ side
1595 * locked cache, grab the lock and move them over to our submission
1596 * side cache.
1597 */
1598 if (READ_ONCE(cs->locked_free_nr) > IO_COMPL_BATCH) {
1599 spin_lock_irq(&ctx->completion_lock);
1600 list_splice_init(&cs->locked_free_list, &cs->free_list);
1601 cs->locked_free_nr = 0;
1602 spin_unlock_irq(&ctx->completion_lock);
1603 }
1604
1605 while (!list_empty(&cs->free_list)) {
1606 req = list_first_entry(&cs->free_list, struct io_kiocb,
Pavel Begunkove5d1bc02021-02-10 00:03:23 +00001607 compl.list);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001608 list_del(&req->compl.list);
Pavel Begunkove5d1bc02021-02-10 00:03:23 +00001609 state->reqs[state->free_reqs++] = req;
1610 if (state->free_reqs == ARRAY_SIZE(state->reqs))
1611 break;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001612 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07001613
Pavel Begunkove5d1bc02021-02-10 00:03:23 +00001614 return req != NULL;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001615}
1616
Pavel Begunkov258b29a2021-02-10 00:03:10 +00001617static struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001618{
Pavel Begunkov258b29a2021-02-10 00:03:10 +00001619 struct io_submit_state *state = &ctx->submit_state;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001620
Pavel Begunkovbf019da2021-02-10 00:03:17 +00001621 BUILD_BUG_ON(IO_REQ_ALLOC_BATCH > ARRAY_SIZE(state->reqs));
Jens Axboe2b188cc2019-01-07 10:46:33 -07001622
Pavel Begunkovf6b6c7d2020-06-21 13:09:53 +03001623 if (!state->free_reqs) {
Pavel Begunkov291b2822020-09-30 22:57:01 +03001624 gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
Jens Axboe2579f912019-01-09 09:10:43 -07001625 int ret;
1626
Jens Axboec7dae4b2021-02-09 19:53:37 -07001627 if (io_flush_cached_reqs(ctx))
Pavel Begunkove5d1bc02021-02-10 00:03:23 +00001628 goto got_req;
1629
Pavel Begunkovbf019da2021-02-10 00:03:17 +00001630 ret = kmem_cache_alloc_bulk(req_cachep, gfp, IO_REQ_ALLOC_BATCH,
1631 state->reqs);
Jens Axboefd6fab22019-03-14 16:30:06 -06001632
1633 /*
1634 * Bulk alloc is all-or-nothing. If we fail to get a batch,
1635 * retry single alloc to be on the safe side.
1636 */
1637 if (unlikely(ret <= 0)) {
1638 state->reqs[0] = kmem_cache_alloc(req_cachep, gfp);
1639 if (!state->reqs[0])
Pavel Begunkov3893f392021-02-10 00:03:15 +00001640 return NULL;
Jens Axboefd6fab22019-03-14 16:30:06 -06001641 ret = 1;
1642 }
Pavel Begunkov291b2822020-09-30 22:57:01 +03001643 state->free_reqs = ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001644 }
Pavel Begunkove5d1bc02021-02-10 00:03:23 +00001645got_req:
Pavel Begunkov291b2822020-09-30 22:57:01 +03001646 state->free_reqs--;
1647 return state->reqs[state->free_reqs];
Jens Axboe2b188cc2019-01-07 10:46:33 -07001648}
1649
Pavel Begunkov8da11c12020-02-24 11:32:44 +03001650static inline void io_put_file(struct io_kiocb *req, struct file *file,
1651 bool fixed)
1652{
Pavel Begunkov36f72fe2020-11-18 19:57:26 +00001653 if (!fixed)
Pavel Begunkov8da11c12020-02-24 11:32:44 +03001654 fput(file);
1655}
1656
Pavel Begunkov4edf20f2020-10-13 09:43:59 +01001657static void io_dismantle_req(struct io_kiocb *req)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001658{
Pavel Begunkov3ca405e2020-07-13 23:37:08 +03001659 io_clean_op(req);
Pavel Begunkov929a3af2020-02-19 00:19:09 +03001660
Jens Axboee8c2bc12020-08-15 18:44:09 -07001661 if (req->async_data)
1662 kfree(req->async_data);
Pavel Begunkov8da11c12020-02-24 11:32:44 +03001663 if (req->file)
1664 io_put_file(req, req->file, (req->flags & REQ_F_FIXED_FILE));
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00001665 if (req->fixed_rsrc_refs)
1666 percpu_ref_put(req->fixed_rsrc_refs);
Jens Axboe003e8dc2021-03-06 09:22:27 -07001667 if (req->work.creds) {
1668 put_cred(req->work.creds);
1669 req->work.creds = NULL;
1670 }
Pavel Begunkovf85c3102021-03-01 18:20:46 +00001671
1672 if (req->flags & REQ_F_INFLIGHT) {
1673 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkovf85c3102021-03-01 18:20:46 +00001674 unsigned long flags;
1675
1676 spin_lock_irqsave(&ctx->inflight_lock, flags);
1677 list_del(&req->inflight_entry);
1678 spin_unlock_irqrestore(&ctx->inflight_lock, flags);
1679 req->flags &= ~REQ_F_INFLIGHT;
Pavel Begunkovf85c3102021-03-01 18:20:46 +00001680 }
Pavel Begunkove6543a82020-06-28 12:52:30 +03001681}
Pavel Begunkov2b85edf2019-12-28 14:13:03 +03001682
Pavel Begunkovb23fcf42021-03-01 18:20:48 +00001683/* must to be called somewhat shortly after putting a request */
Pavel Begunkov7c660732021-01-25 11:42:21 +00001684static inline void io_put_task(struct task_struct *task, int nr)
1685{
1686 struct io_uring_task *tctx = task->io_uring;
1687
1688 percpu_counter_sub(&tctx->inflight, nr);
1689 if (unlikely(atomic_read(&tctx->in_idle)))
1690 wake_up(&tctx->wait);
1691 put_task_struct_many(task, nr);
1692}
1693
Pavel Begunkov216578e2020-10-13 09:44:00 +01001694static void __io_free_req(struct io_kiocb *req)
Pavel Begunkove6543a82020-06-28 12:52:30 +03001695{
Jens Axboe51a4cc12020-08-10 10:55:56 -06001696 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkovecfc5172020-06-29 13:13:03 +03001697
Pavel Begunkov216578e2020-10-13 09:44:00 +01001698 io_dismantle_req(req);
Pavel Begunkov7c660732021-01-25 11:42:21 +00001699 io_put_task(req->task, 1);
Pavel Begunkove6543a82020-06-28 12:52:30 +03001700
Pavel Begunkov3893f392021-02-10 00:03:15 +00001701 kmem_cache_free(req_cachep, req);
Pavel Begunkovecfc5172020-06-29 13:13:03 +03001702 percpu_ref_put(&ctx->refs);
Jens Axboee65ef562019-03-12 10:16:44 -06001703}
1704
Pavel Begunkovf2f87372020-10-27 23:25:37 +00001705static inline void io_remove_next_linked(struct io_kiocb *req)
1706{
1707 struct io_kiocb *nxt = req->link;
1708
1709 req->link = nxt->link;
1710 nxt->link = NULL;
1711}
1712
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00001713static bool io_kill_linked_timeout(struct io_kiocb *req)
1714 __must_hold(&req->ctx->completion_lock)
Jens Axboe9e645e112019-05-10 16:07:28 -06001715{
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00001716 struct io_kiocb *link = req->link;
Pavel Begunkovc9abd7a2020-10-22 16:43:11 +01001717 bool cancelled = false;
Pavel Begunkovf2f87372020-10-27 23:25:37 +00001718
Pavel Begunkov900fad42020-10-19 16:39:16 +01001719 /*
1720 * Can happen if a linked timeout fired and link had been like
1721 * req -> link t-out -> link t-out [-> ...]
1722 */
Pavel Begunkovc9abd7a2020-10-22 16:43:11 +01001723 if (link && (link->flags & REQ_F_LTIMEOUT_ACTIVE)) {
1724 struct io_timeout_data *io = link->async_data;
1725 int ret;
Pavel Begunkov7c86ffe2020-06-29 13:12:59 +03001726
Pavel Begunkovf2f87372020-10-27 23:25:37 +00001727 io_remove_next_linked(req);
Pavel Begunkov90cd7e42020-10-27 23:25:36 +00001728 link->timeout.head = NULL;
Pavel Begunkovc9abd7a2020-10-22 16:43:11 +01001729 ret = hrtimer_try_to_cancel(&io->timer);
1730 if (ret != -1) {
1731 io_cqring_fill_event(link, -ECANCELED);
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00001732 io_put_req_deferred(link, 1);
Pavel Begunkovc9abd7a2020-10-22 16:43:11 +01001733 cancelled = true;
1734 }
1735 }
Pavel Begunkov7c86ffe2020-06-29 13:12:59 +03001736 req->flags &= ~REQ_F_LINK_TIMEOUT;
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00001737 return cancelled;
Pavel Begunkov7c86ffe2020-06-29 13:12:59 +03001738}
1739
Pavel Begunkovd148ca42020-10-18 10:17:39 +01001740static void io_fail_links(struct io_kiocb *req)
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00001741 __must_hold(&req->ctx->completion_lock)
Jens Axboe9e645e112019-05-10 16:07:28 -06001742{
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00001743 struct io_kiocb *nxt, *link = req->link;
Jens Axboe9e645e112019-05-10 16:07:28 -06001744
Pavel Begunkovf2f87372020-10-27 23:25:37 +00001745 req->link = NULL;
Pavel Begunkovf2f87372020-10-27 23:25:37 +00001746 while (link) {
1747 nxt = link->link;
1748 link->link = NULL;
1749
Dmitrii Dolgovc826bd72019-10-15 19:02:01 +02001750 trace_io_uring_fail_link(req, link);
Pavel Begunkov7c86ffe2020-06-29 13:12:59 +03001751 io_cqring_fill_event(link, -ECANCELED);
Jens Axboe1575f212021-02-27 15:20:49 -07001752 io_put_req_deferred(link, 2);
Pavel Begunkovf2f87372020-10-27 23:25:37 +00001753 link = nxt;
Jens Axboe9e645e112019-05-10 16:07:28 -06001754 }
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00001755}
Pavel Begunkov7c86ffe2020-06-29 13:12:59 +03001756
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00001757static bool io_disarm_next(struct io_kiocb *req)
1758 __must_hold(&req->ctx->completion_lock)
1759{
1760 bool posted = false;
1761
1762 if (likely(req->flags & REQ_F_LINK_TIMEOUT))
1763 posted = io_kill_linked_timeout(req);
1764 if (unlikely(req->flags & REQ_F_FAIL_LINK)) {
1765 posted |= (req->link != NULL);
1766 io_fail_links(req);
1767 }
1768 return posted;
Jens Axboe9e645e112019-05-10 16:07:28 -06001769}
1770
Pavel Begunkov3fa5e0f2020-06-30 15:20:43 +03001771static struct io_kiocb *__io_req_find_next(struct io_kiocb *req)
Jens Axboe9e645e112019-05-10 16:07:28 -06001772{
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00001773 struct io_kiocb *nxt;
Jens Axboe2665abf2019-11-05 12:40:47 -07001774
Jens Axboe9e645e112019-05-10 16:07:28 -06001775 /*
1776 * If LINK is set, we have dependent requests in this chain. If we
1777 * didn't fail this request, queue the first one up, moving any other
1778 * dependencies to the next request. In case of failure, fail the rest
1779 * of the chain.
1780 */
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00001781 if (req->flags & (REQ_F_LINK_TIMEOUT | REQ_F_FAIL_LINK)) {
1782 struct io_ring_ctx *ctx = req->ctx;
1783 unsigned long flags;
1784 bool posted;
Pavel Begunkovf2f87372020-10-27 23:25:37 +00001785
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00001786 spin_lock_irqsave(&ctx->completion_lock, flags);
1787 posted = io_disarm_next(req);
1788 if (posted)
1789 io_commit_cqring(req->ctx);
1790 spin_unlock_irqrestore(&ctx->completion_lock, flags);
1791 if (posted)
1792 io_cqring_ev_posted(ctx);
Pavel Begunkovf2f87372020-10-27 23:25:37 +00001793 }
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00001794 nxt = req->link;
1795 req->link = NULL;
1796 return nxt;
Jens Axboe4d7dd462019-11-20 13:03:52 -07001797}
Jens Axboe2665abf2019-11-05 12:40:47 -07001798
Pavel Begunkovf2f87372020-10-27 23:25:37 +00001799static inline struct io_kiocb *io_req_find_next(struct io_kiocb *req)
Pavel Begunkov3fa5e0f2020-06-30 15:20:43 +03001800{
Pavel Begunkovcdbff982021-02-12 18:41:16 +00001801 if (likely(!(req->flags & (REQ_F_LINK|REQ_F_HARDLINK))))
Pavel Begunkov3fa5e0f2020-06-30 15:20:43 +03001802 return NULL;
1803 return __io_req_find_next(req);
1804}
1805
Pavel Begunkov2c323952021-02-28 22:04:53 +00001806static void ctx_flush_and_put(struct io_ring_ctx *ctx)
1807{
1808 if (!ctx)
1809 return;
1810 if (ctx->submit_state.comp.nr) {
1811 mutex_lock(&ctx->uring_lock);
1812 io_submit_flush_completions(&ctx->submit_state.comp, ctx);
1813 mutex_unlock(&ctx->uring_lock);
1814 }
1815 percpu_ref_put(&ctx->refs);
1816}
1817
Jens Axboe7cbf1722021-02-10 00:03:20 +00001818static bool __tctx_task_work(struct io_uring_task *tctx)
1819{
Jens Axboe65453d12021-02-10 00:03:21 +00001820 struct io_ring_ctx *ctx = NULL;
Jens Axboe7cbf1722021-02-10 00:03:20 +00001821 struct io_wq_work_list list;
1822 struct io_wq_work_node *node;
1823
1824 if (wq_list_empty(&tctx->task_list))
1825 return false;
1826
Jens Axboe0b81e802021-02-16 10:33:53 -07001827 spin_lock_irq(&tctx->task_lock);
Jens Axboe7cbf1722021-02-10 00:03:20 +00001828 list = tctx->task_list;
1829 INIT_WQ_LIST(&tctx->task_list);
Jens Axboe0b81e802021-02-16 10:33:53 -07001830 spin_unlock_irq(&tctx->task_lock);
Jens Axboe7cbf1722021-02-10 00:03:20 +00001831
1832 node = list.first;
1833 while (node) {
1834 struct io_wq_work_node *next = node->next;
1835 struct io_kiocb *req;
1836
1837 req = container_of(node, struct io_kiocb, io_task_work.node);
Pavel Begunkov2c323952021-02-28 22:04:53 +00001838 if (req->ctx != ctx) {
1839 ctx_flush_and_put(ctx);
1840 ctx = req->ctx;
1841 percpu_ref_get(&ctx->refs);
1842 }
1843
Jens Axboe7cbf1722021-02-10 00:03:20 +00001844 req->task_work.func(&req->task_work);
1845 node = next;
Jens Axboe65453d12021-02-10 00:03:21 +00001846 }
1847
Pavel Begunkov2c323952021-02-28 22:04:53 +00001848 ctx_flush_and_put(ctx);
Jens Axboe7cbf1722021-02-10 00:03:20 +00001849 return list.first != NULL;
1850}
1851
1852static void tctx_task_work(struct callback_head *cb)
1853{
1854 struct io_uring_task *tctx = container_of(cb, struct io_uring_task, task_work);
1855
Jens Axboe1d5f3602021-02-26 14:54:16 -07001856 clear_bit(0, &tctx->task_state);
1857
Jens Axboe7cbf1722021-02-10 00:03:20 +00001858 while (__tctx_task_work(tctx))
1859 cond_resched();
Jens Axboe7cbf1722021-02-10 00:03:20 +00001860}
1861
1862static int io_task_work_add(struct task_struct *tsk, struct io_kiocb *req,
1863 enum task_work_notify_mode notify)
1864{
1865 struct io_uring_task *tctx = tsk->io_uring;
1866 struct io_wq_work_node *node, *prev;
Jens Axboe0b81e802021-02-16 10:33:53 -07001867 unsigned long flags;
Jens Axboe7cbf1722021-02-10 00:03:20 +00001868 int ret;
1869
1870 WARN_ON_ONCE(!tctx);
1871
Jens Axboe0b81e802021-02-16 10:33:53 -07001872 spin_lock_irqsave(&tctx->task_lock, flags);
Jens Axboe7cbf1722021-02-10 00:03:20 +00001873 wq_list_add_tail(&req->io_task_work.node, &tctx->task_list);
Jens Axboe0b81e802021-02-16 10:33:53 -07001874 spin_unlock_irqrestore(&tctx->task_lock, flags);
Jens Axboe7cbf1722021-02-10 00:03:20 +00001875
1876 /* task_work already pending, we're done */
1877 if (test_bit(0, &tctx->task_state) ||
1878 test_and_set_bit(0, &tctx->task_state))
1879 return 0;
1880
1881 if (!task_work_add(tsk, &tctx->task_work, notify))
1882 return 0;
1883
1884 /*
1885 * Slow path - we failed, find and delete work. if the work is not
1886 * in the list, it got run and we're fine.
1887 */
1888 ret = 0;
Jens Axboe0b81e802021-02-16 10:33:53 -07001889 spin_lock_irqsave(&tctx->task_lock, flags);
Jens Axboe7cbf1722021-02-10 00:03:20 +00001890 wq_list_for_each(node, prev, &tctx->task_list) {
1891 if (&req->io_task_work.node == node) {
1892 wq_list_del(&tctx->task_list, node, prev);
1893 ret = 1;
1894 break;
1895 }
1896 }
Jens Axboe0b81e802021-02-16 10:33:53 -07001897 spin_unlock_irqrestore(&tctx->task_lock, flags);
Jens Axboe7cbf1722021-02-10 00:03:20 +00001898 clear_bit(0, &tctx->task_state);
1899 return ret;
1900}
1901
Jens Axboe355fb9e2020-10-22 20:19:35 -06001902static int io_req_task_work_add(struct io_kiocb *req)
Jens Axboec2c4c832020-07-01 15:37:11 -06001903{
1904 struct task_struct *tsk = req->task;
1905 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe91989c72020-10-16 09:02:26 -06001906 enum task_work_notify_mode notify;
1907 int ret;
Jens Axboec2c4c832020-07-01 15:37:11 -06001908
Jens Axboe6200b0a2020-09-13 14:38:30 -06001909 if (tsk->flags & PF_EXITING)
1910 return -ESRCH;
1911
Jens Axboec2c4c832020-07-01 15:37:11 -06001912 /*
Jens Axboe0ba9c9e2020-08-06 19:41:50 -06001913 * SQPOLL kernel thread doesn't need notification, just a wakeup. For
1914 * all other cases, use TWA_SIGNAL unconditionally to ensure we're
1915 * processing task_work. There's no reliable way to tell if TWA_RESUME
1916 * will do the job.
Jens Axboec2c4c832020-07-01 15:37:11 -06001917 */
Jens Axboe91989c72020-10-16 09:02:26 -06001918 notify = TWA_NONE;
Jens Axboe355fb9e2020-10-22 20:19:35 -06001919 if (!(ctx->flags & IORING_SETUP_SQPOLL))
Jens Axboec2c4c832020-07-01 15:37:11 -06001920 notify = TWA_SIGNAL;
1921
Jens Axboe7cbf1722021-02-10 00:03:20 +00001922 ret = io_task_work_add(tsk, req, notify);
Jens Axboec2c4c832020-07-01 15:37:11 -06001923 if (!ret)
1924 wake_up_process(tsk);
Jens Axboe0ba9c9e2020-08-06 19:41:50 -06001925
Jens Axboec2c4c832020-07-01 15:37:11 -06001926 return ret;
1927}
1928
Pavel Begunkov9b465712021-03-15 14:23:07 +00001929static bool io_run_task_work_head(struct callback_head **work_head)
1930{
1931 struct callback_head *work, *next;
1932 bool executed = false;
1933
1934 do {
1935 work = xchg(work_head, NULL);
1936 if (!work)
1937 break;
1938
1939 do {
1940 next = work->next;
1941 work->func(work);
1942 work = next;
1943 cond_resched();
1944 } while (work);
1945 executed = true;
1946 } while (1);
1947
1948 return executed;
1949}
1950
1951static void io_task_work_add_head(struct callback_head **work_head,
1952 struct callback_head *task_work)
1953{
1954 struct callback_head *head;
1955
1956 do {
1957 head = READ_ONCE(*work_head);
1958 task_work->next = head;
1959 } while (cmpxchg(work_head, head, task_work) != head);
1960}
1961
Pavel Begunkoveab30c42021-01-19 13:32:42 +00001962static void io_req_task_work_add_fallback(struct io_kiocb *req,
Jens Axboe7cbf1722021-02-10 00:03:20 +00001963 task_work_func_t cb)
Pavel Begunkoveab30c42021-01-19 13:32:42 +00001964{
Pavel Begunkoveab30c42021-01-19 13:32:42 +00001965 init_task_work(&req->task_work, cb);
Pavel Begunkov9b465712021-03-15 14:23:07 +00001966 io_task_work_add_head(&req->ctx->exit_task_work, &req->task_work);
Pavel Begunkoveab30c42021-01-19 13:32:42 +00001967}
1968
Jens Axboec40f6372020-06-25 15:39:59 -06001969static void __io_req_task_cancel(struct io_kiocb *req, int error)
1970{
1971 struct io_ring_ctx *ctx = req->ctx;
1972
1973 spin_lock_irq(&ctx->completion_lock);
1974 io_cqring_fill_event(req, error);
1975 io_commit_cqring(ctx);
1976 spin_unlock_irq(&ctx->completion_lock);
1977
1978 io_cqring_ev_posted(ctx);
1979 req_set_fail_links(req);
1980 io_double_put_req(req);
1981}
1982
1983static void io_req_task_cancel(struct callback_head *cb)
1984{
1985 struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
Jens Axboe87ceb6a2020-09-14 08:20:12 -06001986 struct io_ring_ctx *ctx = req->ctx;
Jens Axboec40f6372020-06-25 15:39:59 -06001987
Pavel Begunkove83acd72021-02-28 22:35:09 +00001988 /* ctx is guaranteed to stay alive while we hold uring_lock */
Pavel Begunkov792bb6e2021-02-18 22:32:51 +00001989 mutex_lock(&ctx->uring_lock);
Pavel Begunkova3df76982021-02-18 22:32:52 +00001990 __io_req_task_cancel(req, req->result);
Pavel Begunkov792bb6e2021-02-18 22:32:51 +00001991 mutex_unlock(&ctx->uring_lock);
Jens Axboec40f6372020-06-25 15:39:59 -06001992}
1993
1994static void __io_req_task_submit(struct io_kiocb *req)
1995{
1996 struct io_ring_ctx *ctx = req->ctx;
1997
Pavel Begunkov04fc6c82021-02-12 03:23:54 +00001998 /* ctx stays valid until unlock, even if we drop all ours ctx->refs */
Pavel Begunkov81b6d052021-01-04 20:36:35 +00001999 mutex_lock(&ctx->uring_lock);
Pavel Begunkov70aacfe2021-03-01 13:02:15 +00002000 if (!(current->flags & PF_EXITING) && !current->in_execve)
Pavel Begunkovc5eef2b2021-02-10 00:03:22 +00002001 __io_queue_sqe(req);
Pavel Begunkov81b6d052021-01-04 20:36:35 +00002002 else
Jens Axboec40f6372020-06-25 15:39:59 -06002003 __io_req_task_cancel(req, -EFAULT);
Pavel Begunkov81b6d052021-01-04 20:36:35 +00002004 mutex_unlock(&ctx->uring_lock);
Jens Axboe9e645e112019-05-10 16:07:28 -06002005}
2006
Jens Axboec40f6372020-06-25 15:39:59 -06002007static void io_req_task_submit(struct callback_head *cb)
2008{
2009 struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
2010
2011 __io_req_task_submit(req);
2012}
2013
Pavel Begunkova3df76982021-02-18 22:32:52 +00002014static void io_req_task_queue_fail(struct io_kiocb *req, int ret)
2015{
Pavel Begunkova3df76982021-02-18 22:32:52 +00002016 req->result = ret;
2017 req->task_work.func = io_req_task_cancel;
2018
2019 if (unlikely(io_req_task_work_add(req)))
2020 io_req_task_work_add_fallback(req, io_req_task_cancel);
2021}
2022
Pavel Begunkov2c4b8eb2021-02-28 22:35:10 +00002023static void io_req_task_queue(struct io_kiocb *req)
2024{
2025 req->task_work.func = io_req_task_submit;
2026
2027 if (unlikely(io_req_task_work_add(req)))
2028 io_req_task_queue_fail(req, -ECANCELED);
2029}
2030
Pavel Begunkovf2f87372020-10-27 23:25:37 +00002031static inline void io_queue_next(struct io_kiocb *req)
Jackie Liuc69f8db2019-11-09 11:00:08 +08002032{
Pavel Begunkov9b5f7bd2020-06-29 13:13:00 +03002033 struct io_kiocb *nxt = io_req_find_next(req);
Pavel Begunkov944e58b2019-11-21 23:21:01 +03002034
Pavel Begunkov906a8c32020-06-27 14:04:55 +03002035 if (nxt)
2036 io_req_task_queue(nxt);
Jackie Liuc69f8db2019-11-09 11:00:08 +08002037}
2038
Jens Axboe9e645e112019-05-10 16:07:28 -06002039static void io_free_req(struct io_kiocb *req)
2040{
Pavel Begunkovc3524382020-06-28 12:52:32 +03002041 io_queue_next(req);
Jens Axboe9e645e112019-05-10 16:07:28 -06002042 __io_free_req(req);
Jens Axboee65ef562019-03-12 10:16:44 -06002043}
2044
Pavel Begunkov2d6500d2020-06-28 12:52:33 +03002045struct req_batch {
Pavel Begunkov5af1d132020-07-18 11:32:52 +03002046 struct task_struct *task;
2047 int task_refs;
Jens Axboe1b4c3512021-02-10 00:03:19 +00002048 int ctx_refs;
Pavel Begunkov2d6500d2020-06-28 12:52:33 +03002049};
2050
Pavel Begunkov5af1d132020-07-18 11:32:52 +03002051static inline void io_init_req_batch(struct req_batch *rb)
Pavel Begunkov7a743e22020-03-03 21:33:13 +03002052{
Pavel Begunkov5af1d132020-07-18 11:32:52 +03002053 rb->task_refs = 0;
Pavel Begunkov9ae72462021-02-10 00:03:16 +00002054 rb->ctx_refs = 0;
Pavel Begunkov5af1d132020-07-18 11:32:52 +03002055 rb->task = NULL;
2056}
Pavel Begunkov8766dd52020-03-14 00:31:04 +03002057
Pavel Begunkov2d6500d2020-06-28 12:52:33 +03002058static void io_req_free_batch_finish(struct io_ring_ctx *ctx,
2059 struct req_batch *rb)
2060{
Pavel Begunkov6e833d52021-02-11 18:28:20 +00002061 if (rb->task)
Pavel Begunkov7c660732021-01-25 11:42:21 +00002062 io_put_task(rb->task, rb->task_refs);
Pavel Begunkov9ae72462021-02-10 00:03:16 +00002063 if (rb->ctx_refs)
2064 percpu_ref_put_many(&ctx->refs, rb->ctx_refs);
Pavel Begunkov2d6500d2020-06-28 12:52:33 +03002065}
2066
Pavel Begunkov6ff119a2021-02-10 00:03:18 +00002067static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req,
2068 struct io_submit_state *state)
Pavel Begunkov2d6500d2020-06-28 12:52:33 +03002069{
Pavel Begunkovf2f87372020-10-27 23:25:37 +00002070 io_queue_next(req);
Pavel Begunkov2d6500d2020-06-28 12:52:33 +03002071
Jens Axboee3bc8e92020-09-24 08:45:57 -06002072 if (req->task != rb->task) {
Pavel Begunkov7c660732021-01-25 11:42:21 +00002073 if (rb->task)
2074 io_put_task(rb->task, rb->task_refs);
Jens Axboee3bc8e92020-09-24 08:45:57 -06002075 rb->task = req->task;
2076 rb->task_refs = 0;
Pavel Begunkov5af1d132020-07-18 11:32:52 +03002077 }
Jens Axboee3bc8e92020-09-24 08:45:57 -06002078 rb->task_refs++;
Pavel Begunkov9ae72462021-02-10 00:03:16 +00002079 rb->ctx_refs++;
Pavel Begunkov5af1d132020-07-18 11:32:52 +03002080
Pavel Begunkov4edf20f2020-10-13 09:43:59 +01002081 io_dismantle_req(req);
Pavel Begunkovbd759042021-02-12 03:23:50 +00002082 if (state->free_reqs != ARRAY_SIZE(state->reqs))
Pavel Begunkov6ff119a2021-02-10 00:03:18 +00002083 state->reqs[state->free_reqs++] = req;
Pavel Begunkovbd759042021-02-12 03:23:50 +00002084 else
2085 list_add(&req->compl.list, &state->comp.free_list);
Pavel Begunkov7a743e22020-03-03 21:33:13 +03002086}
2087
Pavel Begunkov905c1722021-02-10 00:03:14 +00002088static void io_submit_flush_completions(struct io_comp_state *cs,
2089 struct io_ring_ctx *ctx)
2090{
2091 int i, nr = cs->nr;
2092 struct io_kiocb *req;
2093 struct req_batch rb;
2094
2095 io_init_req_batch(&rb);
2096 spin_lock_irq(&ctx->completion_lock);
2097 for (i = 0; i < nr; i++) {
2098 req = cs->reqs[i];
2099 __io_cqring_fill_event(req, req->result, req->compl.cflags);
2100 }
2101 io_commit_cqring(ctx);
2102 spin_unlock_irq(&ctx->completion_lock);
2103
2104 io_cqring_ev_posted(ctx);
2105 for (i = 0; i < nr; i++) {
2106 req = cs->reqs[i];
2107
2108 /* submission and completion refs */
2109 if (refcount_sub_and_test(2, &req->refs))
Pavel Begunkov6ff119a2021-02-10 00:03:18 +00002110 io_req_free_batch(&rb, req, &ctx->submit_state);
Pavel Begunkov905c1722021-02-10 00:03:14 +00002111 }
2112
2113 io_req_free_batch_finish(ctx, &rb);
2114 cs->nr = 0;
Jens Axboee65ef562019-03-12 10:16:44 -06002115}
2116
Jens Axboeba816ad2019-09-28 11:36:45 -06002117/*
2118 * Drop reference to request, return next in chain (if there is one) if this
2119 * was the last reference to this request.
2120 */
Pavel Begunkov9b5f7bd2020-06-29 13:13:00 +03002121static struct io_kiocb *io_put_req_find_next(struct io_kiocb *req)
Jens Axboee65ef562019-03-12 10:16:44 -06002122{
Pavel Begunkov9b5f7bd2020-06-29 13:13:00 +03002123 struct io_kiocb *nxt = NULL;
2124
Jens Axboe2a44f462020-02-25 13:25:41 -07002125 if (refcount_dec_and_test(&req->refs)) {
Pavel Begunkov9b5f7bd2020-06-29 13:13:00 +03002126 nxt = io_req_find_next(req);
Jens Axboe4d7dd462019-11-20 13:03:52 -07002127 __io_free_req(req);
Jens Axboe2a44f462020-02-25 13:25:41 -07002128 }
Pavel Begunkov9b5f7bd2020-06-29 13:13:00 +03002129 return nxt;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002130}
2131
Jens Axboe2b188cc2019-01-07 10:46:33 -07002132static void io_put_req(struct io_kiocb *req)
2133{
Jens Axboedef596e2019-01-09 08:59:42 -07002134 if (refcount_dec_and_test(&req->refs))
2135 io_free_req(req);
2136}
2137
Pavel Begunkov216578e2020-10-13 09:44:00 +01002138static void io_put_req_deferred_cb(struct callback_head *cb)
2139{
2140 struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
2141
2142 io_free_req(req);
2143}
2144
2145static void io_free_req_deferred(struct io_kiocb *req)
2146{
2147 int ret;
2148
Jens Axboe7cbf1722021-02-10 00:03:20 +00002149 req->task_work.func = io_put_req_deferred_cb;
Jens Axboe355fb9e2020-10-22 20:19:35 -06002150 ret = io_req_task_work_add(req);
Pavel Begunkoveab30c42021-01-19 13:32:42 +00002151 if (unlikely(ret))
2152 io_req_task_work_add_fallback(req, io_put_req_deferred_cb);
Pavel Begunkov216578e2020-10-13 09:44:00 +01002153}
2154
2155static inline void io_put_req_deferred(struct io_kiocb *req, int refs)
2156{
2157 if (refcount_sub_and_test(refs, &req->refs))
2158 io_free_req_deferred(req);
2159}
2160
Jens Axboe978db572019-11-14 22:39:04 -07002161static void io_double_put_req(struct io_kiocb *req)
2162{
2163 /* drop both submit and complete references */
2164 if (refcount_sub_and_test(2, &req->refs))
2165 io_free_req(req);
2166}
2167
Pavel Begunkov6c503152021-01-04 20:36:36 +00002168static unsigned io_cqring_events(struct io_ring_ctx *ctx)
Jens Axboea3a0e432019-08-20 11:03:11 -06002169{
2170 /* See comment at the top of this file */
2171 smp_rmb();
Pavel Begunkove23de152020-12-17 00:24:37 +00002172 return __io_cqring_events(ctx);
Jens Axboea3a0e432019-08-20 11:03:11 -06002173}
2174
Pavel Begunkovfb5ccc92019-10-25 12:31:30 +03002175static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
2176{
2177 struct io_rings *rings = ctx->rings;
2178
2179 /* make sure SQ entry isn't read before tail */
2180 return smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head;
2181}
2182
Pavel Begunkov8ff069b2020-07-16 23:28:04 +03002183static unsigned int io_put_kbuf(struct io_kiocb *req, struct io_buffer *kbuf)
Jens Axboee94f1412019-12-19 12:06:02 -07002184{
Pavel Begunkov8ff069b2020-07-16 23:28:04 +03002185 unsigned int cflags;
Jens Axboee94f1412019-12-19 12:06:02 -07002186
Jens Axboebcda7ba2020-02-23 16:42:51 -07002187 cflags = kbuf->bid << IORING_CQE_BUFFER_SHIFT;
2188 cflags |= IORING_CQE_F_BUFFER;
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03002189 req->flags &= ~REQ_F_BUFFER_SELECTED;
Jens Axboebcda7ba2020-02-23 16:42:51 -07002190 kfree(kbuf);
2191 return cflags;
2192}
2193
Pavel Begunkov8ff069b2020-07-16 23:28:04 +03002194static inline unsigned int io_put_rw_kbuf(struct io_kiocb *req)
2195{
2196 struct io_buffer *kbuf;
2197
2198 kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
2199 return io_put_kbuf(req, kbuf);
2200}
2201
Jens Axboe4c6e2772020-07-01 11:29:10 -06002202static inline bool io_run_task_work(void)
2203{
Jens Axboe6200b0a2020-09-13 14:38:30 -06002204 /*
2205 * Not safe to run on exiting task, and the task_work handling will
2206 * not add work to such a task.
2207 */
2208 if (unlikely(current->flags & PF_EXITING))
2209 return false;
Jens Axboe4c6e2772020-07-01 11:29:10 -06002210 if (current->task_works) {
2211 __set_current_state(TASK_RUNNING);
2212 task_work_run();
2213 return true;
2214 }
2215
2216 return false;
2217}
2218
Jens Axboedef596e2019-01-09 08:59:42 -07002219/*
2220 * Find and free completed poll iocbs
2221 */
2222static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
2223 struct list_head *done)
2224{
Jens Axboe8237e042019-12-28 10:48:22 -07002225 struct req_batch rb;
Jens Axboedef596e2019-01-09 08:59:42 -07002226 struct io_kiocb *req;
Xiaoguang Wangbbde0172020-06-16 02:06:38 +08002227
2228 /* order with ->result store in io_complete_rw_iopoll() */
2229 smp_rmb();
Jens Axboedef596e2019-01-09 08:59:42 -07002230
Pavel Begunkov5af1d132020-07-18 11:32:52 +03002231 io_init_req_batch(&rb);
Jens Axboedef596e2019-01-09 08:59:42 -07002232 while (!list_empty(done)) {
Jens Axboebcda7ba2020-02-23 16:42:51 -07002233 int cflags = 0;
2234
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002235 req = list_first_entry(done, struct io_kiocb, inflight_entry);
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002236 list_del(&req->inflight_entry);
Jens Axboedef596e2019-01-09 08:59:42 -07002237
Pavel Begunkovf1613402021-02-11 18:28:21 +00002238 if (READ_ONCE(req->result) == -EAGAIN) {
2239 req->iopoll_completed = 0;
Pavel Begunkov23faba32021-02-11 18:28:22 +00002240 if (io_rw_reissue(req))
Pavel Begunkovf1613402021-02-11 18:28:21 +00002241 continue;
2242 }
2243
Jens Axboebcda7ba2020-02-23 16:42:51 -07002244 if (req->flags & REQ_F_BUFFER_SELECTED)
Pavel Begunkov8ff069b2020-07-16 23:28:04 +03002245 cflags = io_put_rw_kbuf(req);
Jens Axboebcda7ba2020-02-23 16:42:51 -07002246
2247 __io_cqring_fill_event(req, req->result, cflags);
Jens Axboedef596e2019-01-09 08:59:42 -07002248 (*nr_events)++;
2249
Pavel Begunkovc3524382020-06-28 12:52:32 +03002250 if (refcount_dec_and_test(&req->refs))
Pavel Begunkov6ff119a2021-02-10 00:03:18 +00002251 io_req_free_batch(&rb, req, &ctx->submit_state);
Jens Axboedef596e2019-01-09 08:59:42 -07002252 }
Jens Axboedef596e2019-01-09 08:59:42 -07002253
Jens Axboe09bb8392019-03-13 12:39:28 -06002254 io_commit_cqring(ctx);
Pavel Begunkov80c18e42021-01-07 03:15:41 +00002255 io_cqring_ev_posted_iopoll(ctx);
Pavel Begunkov2d6500d2020-06-28 12:52:33 +03002256 io_req_free_batch_finish(ctx, &rb);
Bijan Mottahedeh581f9812020-04-03 13:51:33 -07002257}
2258
Jens Axboedef596e2019-01-09 08:59:42 -07002259static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
2260 long min)
2261{
2262 struct io_kiocb *req, *tmp;
2263 LIST_HEAD(done);
2264 bool spin;
2265 int ret;
2266
2267 /*
2268 * Only spin for completions if we don't have multiple devices hanging
2269 * off our complete list, and we're under the requested amount.
2270 */
2271 spin = !ctx->poll_multi_file && *nr_events < min;
2272
2273 ret = 0;
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002274 list_for_each_entry_safe(req, tmp, &ctx->iopoll_list, inflight_entry) {
Jens Axboe9adbd452019-12-20 08:45:55 -07002275 struct kiocb *kiocb = &req->rw.kiocb;
Jens Axboedef596e2019-01-09 08:59:42 -07002276
2277 /*
Bijan Mottahedeh581f9812020-04-03 13:51:33 -07002278 * Move completed and retryable entries to our local lists.
2279 * If we find a request that requires polling, break out
2280 * and complete those lists first, if we have entries there.
Jens Axboedef596e2019-01-09 08:59:42 -07002281 */
Xiaoguang Wang65a65432020-06-11 23:39:36 +08002282 if (READ_ONCE(req->iopoll_completed)) {
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002283 list_move_tail(&req->inflight_entry, &done);
Jens Axboedef596e2019-01-09 08:59:42 -07002284 continue;
2285 }
2286 if (!list_empty(&done))
2287 break;
2288
2289 ret = kiocb->ki_filp->f_op->iopoll(kiocb, spin);
2290 if (ret < 0)
2291 break;
2292
Pavel Begunkov3aadc232020-07-06 17:59:29 +03002293 /* iopoll may have completed current req */
2294 if (READ_ONCE(req->iopoll_completed))
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002295 list_move_tail(&req->inflight_entry, &done);
Pavel Begunkov3aadc232020-07-06 17:59:29 +03002296
Jens Axboedef596e2019-01-09 08:59:42 -07002297 if (ret && spin)
2298 spin = false;
2299 ret = 0;
2300 }
2301
2302 if (!list_empty(&done))
2303 io_iopoll_complete(ctx, nr_events, &done);
2304
2305 return ret;
2306}
2307
2308/*
Brian Gianforcarod195a662019-12-13 03:09:50 -08002309 * Poll for a minimum of 'min' events. Note that if min == 0 we consider that a
Jens Axboedef596e2019-01-09 08:59:42 -07002310 * non-spinning poll check - we'll still enter the driver poll loop, but only
2311 * as a non-spinning completion check.
2312 */
2313static int io_iopoll_getevents(struct io_ring_ctx *ctx, unsigned int *nr_events,
2314 long min)
2315{
Pavel Begunkov540e32a2020-07-13 23:37:09 +03002316 while (!list_empty(&ctx->iopoll_list) && !need_resched()) {
Jens Axboedef596e2019-01-09 08:59:42 -07002317 int ret;
2318
2319 ret = io_do_iopoll(ctx, nr_events, min);
2320 if (ret < 0)
2321 return ret;
Pavel Begunkoveba0a4d2020-07-06 17:59:30 +03002322 if (*nr_events >= min)
Jens Axboedef596e2019-01-09 08:59:42 -07002323 return 0;
2324 }
2325
2326 return 1;
2327}
2328
2329/*
2330 * We can't just wait for polled events to come to us, we have to actively
2331 * find and complete them.
2332 */
Pavel Begunkovb2edc0a2020-07-07 16:36:22 +03002333static void io_iopoll_try_reap_events(struct io_ring_ctx *ctx)
Jens Axboedef596e2019-01-09 08:59:42 -07002334{
2335 if (!(ctx->flags & IORING_SETUP_IOPOLL))
2336 return;
2337
2338 mutex_lock(&ctx->uring_lock);
Pavel Begunkov540e32a2020-07-13 23:37:09 +03002339 while (!list_empty(&ctx->iopoll_list)) {
Jens Axboedef596e2019-01-09 08:59:42 -07002340 unsigned int nr_events = 0;
2341
Pavel Begunkovb2edc0a2020-07-07 16:36:22 +03002342 io_do_iopoll(ctx, &nr_events, 0);
Jens Axboe08f54392019-08-21 22:19:11 -06002343
Pavel Begunkovb2edc0a2020-07-07 16:36:22 +03002344 /* let it sleep and repeat later if can't complete a request */
2345 if (nr_events == 0)
2346 break;
Jens Axboe08f54392019-08-21 22:19:11 -06002347 /*
2348 * Ensure we allow local-to-the-cpu processing to take place,
2349 * in this case we need to ensure that we reap all events.
Pavel Begunkov3fcee5a2020-07-06 17:59:31 +03002350 * Also let task_work, etc. to progress by releasing the mutex
Jens Axboe08f54392019-08-21 22:19:11 -06002351 */
Pavel Begunkov3fcee5a2020-07-06 17:59:31 +03002352 if (need_resched()) {
2353 mutex_unlock(&ctx->uring_lock);
2354 cond_resched();
2355 mutex_lock(&ctx->uring_lock);
2356 }
Jens Axboedef596e2019-01-09 08:59:42 -07002357 }
2358 mutex_unlock(&ctx->uring_lock);
2359}
2360
Pavel Begunkov7668b922020-07-07 16:36:21 +03002361static int io_iopoll_check(struct io_ring_ctx *ctx, long min)
Jens Axboedef596e2019-01-09 08:59:42 -07002362{
Pavel Begunkov7668b922020-07-07 16:36:21 +03002363 unsigned int nr_events = 0;
Jens Axboe2b2ed972019-10-25 10:06:15 -06002364 int iters = 0, ret = 0;
Jens Axboedef596e2019-01-09 08:59:42 -07002365
Xiaoguang Wangc7849be2020-02-22 14:46:05 +08002366 /*
2367 * We disallow the app entering submit/complete with polling, but we
2368 * still need to lock the ring to prevent racing with polled issue
2369 * that got punted to a workqueue.
2370 */
2371 mutex_lock(&ctx->uring_lock);
Jens Axboedef596e2019-01-09 08:59:42 -07002372 do {
Jens Axboe500f9fb2019-08-19 12:15:59 -06002373 /*
Jens Axboea3a0e432019-08-20 11:03:11 -06002374 * Don't enter poll loop if we already have events pending.
2375 * If we do, we can potentially be spinning for commands that
2376 * already triggered a CQE (eg in error).
2377 */
Pavel Begunkov6c503152021-01-04 20:36:36 +00002378 if (test_bit(0, &ctx->cq_check_overflow))
2379 __io_cqring_overflow_flush(ctx, false, NULL, NULL);
2380 if (io_cqring_events(ctx))
Jens Axboea3a0e432019-08-20 11:03:11 -06002381 break;
2382
2383 /*
Jens Axboe500f9fb2019-08-19 12:15:59 -06002384 * If a submit got punted to a workqueue, we can have the
2385 * application entering polling for a command before it gets
2386 * issued. That app will hold the uring_lock for the duration
2387 * of the poll right here, so we need to take a breather every
2388 * now and then to ensure that the issue has a chance to add
2389 * the poll to the issued list. Otherwise we can spin here
2390 * forever, while the workqueue is stuck trying to acquire the
2391 * very same mutex.
2392 */
2393 if (!(++iters & 7)) {
2394 mutex_unlock(&ctx->uring_lock);
Jens Axboe4c6e2772020-07-01 11:29:10 -06002395 io_run_task_work();
Jens Axboe500f9fb2019-08-19 12:15:59 -06002396 mutex_lock(&ctx->uring_lock);
2397 }
2398
Pavel Begunkov7668b922020-07-07 16:36:21 +03002399 ret = io_iopoll_getevents(ctx, &nr_events, min);
Jens Axboedef596e2019-01-09 08:59:42 -07002400 if (ret <= 0)
2401 break;
2402 ret = 0;
Pavel Begunkov7668b922020-07-07 16:36:21 +03002403 } while (min && !nr_events && !need_resched());
Jens Axboedef596e2019-01-09 08:59:42 -07002404
Jens Axboe500f9fb2019-08-19 12:15:59 -06002405 mutex_unlock(&ctx->uring_lock);
Jens Axboedef596e2019-01-09 08:59:42 -07002406 return ret;
2407}
2408
Jens Axboe491381ce2019-10-17 09:20:46 -06002409static void kiocb_end_write(struct io_kiocb *req)
Jens Axboe2b188cc2019-01-07 10:46:33 -07002410{
Jens Axboe491381ce2019-10-17 09:20:46 -06002411 /*
2412 * Tell lockdep we inherited freeze protection from submission
2413 * thread.
2414 */
2415 if (req->flags & REQ_F_ISREG) {
2416 struct inode *inode = file_inode(req->file);
Jens Axboe2b188cc2019-01-07 10:46:33 -07002417
Jens Axboe491381ce2019-10-17 09:20:46 -06002418 __sb_writers_acquired(inode->i_sb, SB_FREEZE_WRITE);
Jens Axboe2b188cc2019-01-07 10:46:33 -07002419 }
Jens Axboe491381ce2019-10-17 09:20:46 -06002420 file_end_write(req->file);
Jens Axboe2b188cc2019-01-07 10:46:33 -07002421}
2422
Jens Axboeb63534c2020-06-04 11:28:00 -06002423#ifdef CONFIG_BLOCK
Pavel Begunkovdc2a6e92021-01-19 13:32:35 +00002424static bool io_resubmit_prep(struct io_kiocb *req)
Jens Axboeb63534c2020-06-04 11:28:00 -06002425{
2426 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
Colin Ian King4a245472021-02-10 20:00:07 +00002427 int rw, ret;
Jens Axboeb63534c2020-06-04 11:28:00 -06002428 struct iov_iter iter;
Jens Axboeb63534c2020-06-04 11:28:00 -06002429
Pavel Begunkovdc2a6e92021-01-19 13:32:35 +00002430 /* already prepared */
2431 if (req->async_data)
2432 return true;
Jens Axboeb63534c2020-06-04 11:28:00 -06002433
2434 switch (req->opcode) {
2435 case IORING_OP_READV:
2436 case IORING_OP_READ_FIXED:
2437 case IORING_OP_READ:
2438 rw = READ;
2439 break;
2440 case IORING_OP_WRITEV:
2441 case IORING_OP_WRITE_FIXED:
2442 case IORING_OP_WRITE:
2443 rw = WRITE;
2444 break;
2445 default:
2446 printk_once(KERN_WARNING "io_uring: bad opcode in resubmit %d\n",
2447 req->opcode);
Pavel Begunkovdc2a6e92021-01-19 13:32:35 +00002448 return false;
Jens Axboeb63534c2020-06-04 11:28:00 -06002449 }
2450
Pavel Begunkovdc2a6e92021-01-19 13:32:35 +00002451 ret = io_import_iovec(rw, req, &iovec, &iter, false);
2452 if (ret < 0)
2453 return false;
Pavel Begunkov6bf985d2021-02-04 13:52:01 +00002454 return !io_setup_async_rw(req, iovec, inline_vecs, &iter, false);
Jens Axboeb63534c2020-06-04 11:28:00 -06002455}
Jens Axboeb63534c2020-06-04 11:28:00 -06002456
Jens Axboe3e6a0d32021-03-01 13:56:00 -07002457static bool io_rw_should_reissue(struct io_kiocb *req)
Jens Axboeb63534c2020-06-04 11:28:00 -06002458{
Jens Axboe355afae2020-09-02 09:30:31 -06002459 umode_t mode = file_inode(req->file)->i_mode;
Jens Axboe3e6a0d32021-03-01 13:56:00 -07002460 struct io_ring_ctx *ctx = req->ctx;
Jens Axboeb63534c2020-06-04 11:28:00 -06002461
Jens Axboe355afae2020-09-02 09:30:31 -06002462 if (!S_ISBLK(mode) && !S_ISREG(mode))
2463 return false;
Jens Axboe3e6a0d32021-03-01 13:56:00 -07002464 if ((req->flags & REQ_F_NOWAIT) || (io_wq_current_is_worker() &&
2465 !(ctx->flags & IORING_SETUP_IOPOLL)))
Jens Axboeb63534c2020-06-04 11:28:00 -06002466 return false;
Jens Axboe7c977a52021-02-23 19:17:35 -07002467 /*
2468 * If ref is dying, we might be running poll reap from the exit work.
2469 * Don't attempt to reissue from that path, just let it fail with
2470 * -EAGAIN.
2471 */
Jens Axboe3e6a0d32021-03-01 13:56:00 -07002472 if (percpu_ref_is_dying(&ctx->refs))
2473 return false;
2474 return true;
2475}
Jens Axboee82ad482021-04-02 19:45:34 -06002476#else
2477static bool io_rw_should_reissue(struct io_kiocb *req)
2478{
2479 return false;
2480}
Jens Axboe3e6a0d32021-03-01 13:56:00 -07002481#endif
2482
2483static bool io_rw_reissue(struct io_kiocb *req)
2484{
2485#ifdef CONFIG_BLOCK
2486 if (!io_rw_should_reissue(req))
Jens Axboe7c977a52021-02-23 19:17:35 -07002487 return false;
Jens Axboeb63534c2020-06-04 11:28:00 -06002488
Pavel Begunkov55e6ac12021-01-08 20:57:22 +00002489 lockdep_assert_held(&req->ctx->uring_lock);
2490
Jens Axboe37d1e2e2021-02-17 21:03:43 -07002491 if (io_resubmit_prep(req)) {
Jens Axboefdee9462020-08-27 16:46:24 -06002492 refcount_inc(&req->refs);
2493 io_queue_async_work(req);
Jens Axboeb63534c2020-06-04 11:28:00 -06002494 return true;
Jens Axboefdee9462020-08-27 16:46:24 -06002495 }
Pavel Begunkovdc2a6e92021-01-19 13:32:35 +00002496 req_set_fail_links(req);
Jens Axboeb63534c2020-06-04 11:28:00 -06002497#endif
2498 return false;
2499}
2500
Jens Axboea1d7c392020-06-22 11:09:46 -06002501static void __io_complete_rw(struct io_kiocb *req, long res, long res2,
Pavel Begunkov889fca72021-02-10 00:03:09 +00002502 unsigned int issue_flags)
Jens Axboea1d7c392020-06-22 11:09:46 -06002503{
Pavel Begunkov2f8e45f2021-02-11 18:28:23 +00002504 int cflags = 0;
2505
Pavel Begunkovb65c1282021-03-22 01:45:59 +00002506 if (req->rw.kiocb.ki_flags & IOCB_WRITE)
2507 kiocb_end_write(req);
Jens Axboe230d50d2021-04-01 20:41:15 -06002508 if ((res == -EAGAIN || res == -EOPNOTSUPP) && io_rw_should_reissue(req)) {
2509 req->flags |= REQ_F_REISSUE;
Pavel Begunkov23faba32021-02-11 18:28:22 +00002510 return;
Jens Axboe230d50d2021-04-01 20:41:15 -06002511 }
Pavel Begunkov2f8e45f2021-02-11 18:28:23 +00002512 if (res != req->result)
2513 req_set_fail_links(req);
Pavel Begunkov2f8e45f2021-02-11 18:28:23 +00002514 if (req->flags & REQ_F_BUFFER_SELECTED)
2515 cflags = io_put_rw_kbuf(req);
2516 __io_req_complete(req, issue_flags, res, cflags);
Jens Axboeba816ad2019-09-28 11:36:45 -06002517}
2518
2519static void io_complete_rw(struct kiocb *kiocb, long res, long res2)
2520{
Jens Axboe9adbd452019-12-20 08:45:55 -07002521 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
Jens Axboeba816ad2019-09-28 11:36:45 -06002522
Pavel Begunkov889fca72021-02-10 00:03:09 +00002523 __io_complete_rw(req, res, res2, 0);
Jens Axboe2b188cc2019-01-07 10:46:33 -07002524}
2525
Jens Axboedef596e2019-01-09 08:59:42 -07002526static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2)
2527{
Jens Axboe9adbd452019-12-20 08:45:55 -07002528 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
Jens Axboedef596e2019-01-09 08:59:42 -07002529
Jens Axboe3e6a0d32021-03-01 13:56:00 -07002530#ifdef CONFIG_BLOCK
2531 /* Rewind iter, if we have one. iopoll path resubmits as usual */
2532 if (res == -EAGAIN && io_rw_should_reissue(req)) {
2533 struct io_async_rw *rw = req->async_data;
2534
2535 if (rw)
2536 iov_iter_revert(&rw->iter,
2537 req->result - iov_iter_count(&rw->iter));
2538 else if (!io_resubmit_prep(req))
2539 res = -EIO;
2540 }
2541#endif
2542
Jens Axboe491381ce2019-10-17 09:20:46 -06002543 if (kiocb->ki_flags & IOCB_WRITE)
2544 kiocb_end_write(req);
Jens Axboedef596e2019-01-09 08:59:42 -07002545
Xiaoguang Wang2d7d6792020-06-16 02:06:37 +08002546 if (res != -EAGAIN && res != req->result)
Jens Axboe4e88d6e2019-12-07 20:59:47 -07002547 req_set_fail_links(req);
Xiaoguang Wangbbde0172020-06-16 02:06:38 +08002548
2549 WRITE_ONCE(req->result, res);
2550 /* order with io_poll_complete() checking ->result */
Pavel Begunkovcd664b02020-06-25 12:37:10 +03002551 smp_wmb();
2552 WRITE_ONCE(req->iopoll_completed, 1);
Jens Axboedef596e2019-01-09 08:59:42 -07002553}
2554
2555/*
2556 * After the iocb has been issued, it's safe to be found on the poll list.
2557 * Adding the kiocb to the list AFTER submission ensures that we don't
2558 * find it from a io_iopoll_getevents() thread before the issuer is done
2559 * accessing the kiocb cookie.
2560 */
Xiaoguang Wang2e9dbe92020-11-13 00:44:08 +08002561static void io_iopoll_req_issued(struct io_kiocb *req, bool in_async)
Jens Axboedef596e2019-01-09 08:59:42 -07002562{
2563 struct io_ring_ctx *ctx = req->ctx;
2564
2565 /*
2566 * Track whether we have multiple files in our lists. This will impact
2567 * how we do polling eventually, not spinning if we're on potentially
2568 * different devices.
2569 */
Pavel Begunkov540e32a2020-07-13 23:37:09 +03002570 if (list_empty(&ctx->iopoll_list)) {
Jens Axboedef596e2019-01-09 08:59:42 -07002571 ctx->poll_multi_file = false;
2572 } else if (!ctx->poll_multi_file) {
2573 struct io_kiocb *list_req;
2574
Pavel Begunkov540e32a2020-07-13 23:37:09 +03002575 list_req = list_first_entry(&ctx->iopoll_list, struct io_kiocb,
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002576 inflight_entry);
Jens Axboe9adbd452019-12-20 08:45:55 -07002577 if (list_req->file != req->file)
Jens Axboedef596e2019-01-09 08:59:42 -07002578 ctx->poll_multi_file = true;
2579 }
2580
2581 /*
2582 * For fast devices, IO may have already completed. If it has, add
2583 * it to the front so we find it first.
2584 */
Xiaoguang Wang65a65432020-06-11 23:39:36 +08002585 if (READ_ONCE(req->iopoll_completed))
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002586 list_add(&req->inflight_entry, &ctx->iopoll_list);
Jens Axboedef596e2019-01-09 08:59:42 -07002587 else
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002588 list_add_tail(&req->inflight_entry, &ctx->iopoll_list);
Xiaoguang Wangbdcd3ea2020-02-25 22:12:08 +08002589
Xiaoguang Wang2e9dbe92020-11-13 00:44:08 +08002590 /*
2591 * If IORING_SETUP_SQPOLL is enabled, sqes are either handled in sq thread
2592 * task context or in io worker task context. If current task context is
2593 * sq thread, we don't need to check whether should wake up sq thread.
2594 */
2595 if (in_async && (ctx->flags & IORING_SETUP_SQPOLL) &&
Jens Axboe534ca6d2020-09-02 13:52:19 -06002596 wq_has_sleeper(&ctx->sq_data->wait))
2597 wake_up(&ctx->sq_data->wait);
Jens Axboedef596e2019-01-09 08:59:42 -07002598}
2599
Pavel Begunkov9f13c352020-05-17 14:13:41 +03002600static inline void io_state_file_put(struct io_submit_state *state)
2601{
Pavel Begunkov02b23a92021-01-19 13:32:41 +00002602 if (state->file_refs) {
2603 fput_many(state->file, state->file_refs);
2604 state->file_refs = 0;
2605 }
Jens Axboe9a56a232019-01-09 09:06:50 -07002606}
2607
2608/*
2609 * Get as many references to a file as we have IOs left in this submission,
2610 * assuming most submissions are for one file, or at least that each file
2611 * has more than one submission.
2612 */
Pavel Begunkov8da11c12020-02-24 11:32:44 +03002613static struct file *__io_file_get(struct io_submit_state *state, int fd)
Jens Axboe9a56a232019-01-09 09:06:50 -07002614{
2615 if (!state)
2616 return fget(fd);
2617
Pavel Begunkov6e1271e2020-11-20 15:50:50 +00002618 if (state->file_refs) {
Jens Axboe9a56a232019-01-09 09:06:50 -07002619 if (state->fd == fd) {
Pavel Begunkov6e1271e2020-11-20 15:50:50 +00002620 state->file_refs--;
Jens Axboe9a56a232019-01-09 09:06:50 -07002621 return state->file;
2622 }
Pavel Begunkov02b23a92021-01-19 13:32:41 +00002623 io_state_file_put(state);
Jens Axboe9a56a232019-01-09 09:06:50 -07002624 }
2625 state->file = fget_many(fd, state->ios_left);
Pavel Begunkov6e1271e2020-11-20 15:50:50 +00002626 if (unlikely(!state->file))
Jens Axboe9a56a232019-01-09 09:06:50 -07002627 return NULL;
2628
2629 state->fd = fd;
Pavel Begunkov6e1271e2020-11-20 15:50:50 +00002630 state->file_refs = state->ios_left - 1;
Jens Axboe9a56a232019-01-09 09:06:50 -07002631 return state->file;
2632}
2633
Jens Axboe4503b762020-06-01 10:00:27 -06002634static bool io_bdev_nowait(struct block_device *bdev)
2635{
Jeffle Xu9ba0d0c2020-10-19 16:59:42 +08002636 return !bdev || blk_queue_nowait(bdev_get_queue(bdev));
Jens Axboe4503b762020-06-01 10:00:27 -06002637}
2638
Jens Axboe2b188cc2019-01-07 10:46:33 -07002639/*
2640 * If we tracked the file through the SCM inflight mechanism, we could support
2641 * any file. For now, just ensure that anything potentially problematic is done
2642 * inline.
2643 */
Jens Axboeaf197f52020-04-28 13:15:06 -06002644static bool io_file_supports_async(struct file *file, int rw)
Jens Axboe2b188cc2019-01-07 10:46:33 -07002645{
2646 umode_t mode = file_inode(file)->i_mode;
2647
Jens Axboe4503b762020-06-01 10:00:27 -06002648 if (S_ISBLK(mode)) {
Christoph Hellwig4e7b5672020-11-23 13:38:40 +01002649 if (IS_ENABLED(CONFIG_BLOCK) &&
2650 io_bdev_nowait(I_BDEV(file->f_mapping->host)))
Jens Axboe4503b762020-06-01 10:00:27 -06002651 return true;
2652 return false;
2653 }
2654 if (S_ISCHR(mode) || S_ISSOCK(mode))
Jens Axboe2b188cc2019-01-07 10:46:33 -07002655 return true;
Jens Axboe4503b762020-06-01 10:00:27 -06002656 if (S_ISREG(mode)) {
Christoph Hellwig4e7b5672020-11-23 13:38:40 +01002657 if (IS_ENABLED(CONFIG_BLOCK) &&
2658 io_bdev_nowait(file->f_inode->i_sb->s_bdev) &&
Jens Axboe4503b762020-06-01 10:00:27 -06002659 file->f_op != &io_uring_fops)
2660 return true;
2661 return false;
2662 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07002663
Jens Axboec5b85622020-06-09 19:23:05 -06002664 /* any ->read/write should understand O_NONBLOCK */
2665 if (file->f_flags & O_NONBLOCK)
2666 return true;
2667
Jens Axboeaf197f52020-04-28 13:15:06 -06002668 if (!(file->f_mode & FMODE_NOWAIT))
2669 return false;
2670
2671 if (rw == READ)
2672 return file->f_op->read_iter != NULL;
2673
2674 return file->f_op->write_iter != NULL;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002675}
2676
Pavel Begunkova88fc402020-09-30 22:57:53 +03002677static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboe2b188cc2019-01-07 10:46:33 -07002678{
Jens Axboedef596e2019-01-09 08:59:42 -07002679 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe9adbd452019-12-20 08:45:55 -07002680 struct kiocb *kiocb = &req->rw.kiocb;
Pavel Begunkov75c668c2021-02-04 13:52:05 +00002681 struct file *file = req->file;
Jens Axboe09bb8392019-03-13 12:39:28 -06002682 unsigned ioprio;
2683 int ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002684
Pavel Begunkov75c668c2021-02-04 13:52:05 +00002685 if (S_ISREG(file_inode(file)->i_mode))
Jens Axboe491381ce2019-10-17 09:20:46 -06002686 req->flags |= REQ_F_ISREG;
2687
Jens Axboe2b188cc2019-01-07 10:46:33 -07002688 kiocb->ki_pos = READ_ONCE(sqe->off);
Pavel Begunkov75c668c2021-02-04 13:52:05 +00002689 if (kiocb->ki_pos == -1 && !(file->f_mode & FMODE_STREAM)) {
Jens Axboeba042912019-12-25 16:33:42 -07002690 req->flags |= REQ_F_CUR_POS;
Pavel Begunkov75c668c2021-02-04 13:52:05 +00002691 kiocb->ki_pos = file->f_pos;
Jens Axboeba042912019-12-25 16:33:42 -07002692 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07002693 kiocb->ki_hint = ki_hint_validate(file_write_hint(kiocb->ki_filp));
Pavel Begunkov3e577dc2020-02-01 03:58:42 +03002694 kiocb->ki_flags = iocb_flags(kiocb->ki_filp);
2695 ret = kiocb_set_rw_flags(kiocb, READ_ONCE(sqe->rw_flags));
2696 if (unlikely(ret))
2697 return ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002698
Pavel Begunkov75c668c2021-02-04 13:52:05 +00002699 /* don't allow async punt for O_NONBLOCK or RWF_NOWAIT */
2700 if ((kiocb->ki_flags & IOCB_NOWAIT) || (file->f_flags & O_NONBLOCK))
2701 req->flags |= REQ_F_NOWAIT;
2702
Jens Axboe2b188cc2019-01-07 10:46:33 -07002703 ioprio = READ_ONCE(sqe->ioprio);
2704 if (ioprio) {
2705 ret = ioprio_check_cap(ioprio);
2706 if (ret)
Jens Axboe09bb8392019-03-13 12:39:28 -06002707 return ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002708
2709 kiocb->ki_ioprio = ioprio;
2710 } else
2711 kiocb->ki_ioprio = get_current_ioprio();
2712
Jens Axboedef596e2019-01-09 08:59:42 -07002713 if (ctx->flags & IORING_SETUP_IOPOLL) {
Jens Axboedef596e2019-01-09 08:59:42 -07002714 if (!(kiocb->ki_flags & IOCB_DIRECT) ||
2715 !kiocb->ki_filp->f_op->iopoll)
Jens Axboe09bb8392019-03-13 12:39:28 -06002716 return -EOPNOTSUPP;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002717
Jens Axboedef596e2019-01-09 08:59:42 -07002718 kiocb->ki_flags |= IOCB_HIPRI;
2719 kiocb->ki_complete = io_complete_rw_iopoll;
Xiaoguang Wang65a65432020-06-11 23:39:36 +08002720 req->iopoll_completed = 0;
Jens Axboedef596e2019-01-09 08:59:42 -07002721 } else {
Jens Axboe09bb8392019-03-13 12:39:28 -06002722 if (kiocb->ki_flags & IOCB_HIPRI)
2723 return -EINVAL;
Jens Axboedef596e2019-01-09 08:59:42 -07002724 kiocb->ki_complete = io_complete_rw;
2725 }
Jens Axboe9adbd452019-12-20 08:45:55 -07002726
Jens Axboe3529d8c2019-12-19 18:24:38 -07002727 req->rw.addr = READ_ONCE(sqe->addr);
2728 req->rw.len = READ_ONCE(sqe->len);
Bijan Mottahedeh4f4eeba2020-05-19 14:52:49 -07002729 req->buf_index = READ_ONCE(sqe->buf_index);
Jens Axboe2b188cc2019-01-07 10:46:33 -07002730 return 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002731}
2732
2733static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
2734{
2735 switch (ret) {
2736 case -EIOCBQUEUED:
2737 break;
2738 case -ERESTARTSYS:
2739 case -ERESTARTNOINTR:
2740 case -ERESTARTNOHAND:
2741 case -ERESTART_RESTARTBLOCK:
2742 /*
2743 * We can't just restart the syscall, since previously
2744 * submitted sqes may already be in progress. Just fail this
2745 * IO with EINTR.
2746 */
2747 ret = -EINTR;
Gustavo A. R. Silvadf561f662020-08-23 17:36:59 -05002748 fallthrough;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002749 default:
2750 kiocb->ki_complete(kiocb, ret, 0);
2751 }
2752}
2753
Jens Axboea1d7c392020-06-22 11:09:46 -06002754static void kiocb_done(struct kiocb *kiocb, ssize_t ret,
Pavel Begunkov889fca72021-02-10 00:03:09 +00002755 unsigned int issue_flags)
Jens Axboeba816ad2019-09-28 11:36:45 -06002756{
Jens Axboeba042912019-12-25 16:33:42 -07002757 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
Jens Axboee8c2bc12020-08-15 18:44:09 -07002758 struct io_async_rw *io = req->async_data;
Pavel Begunkov97284632021-04-08 19:28:03 +01002759 bool check_reissue = kiocb->ki_complete == io_complete_rw;
Jens Axboeba042912019-12-25 16:33:42 -07002760
Jens Axboe227c0c92020-08-13 11:51:40 -06002761 /* add previously done IO, if any */
Jens Axboee8c2bc12020-08-15 18:44:09 -07002762 if (io && io->bytes_done > 0) {
Jens Axboe227c0c92020-08-13 11:51:40 -06002763 if (ret < 0)
Jens Axboee8c2bc12020-08-15 18:44:09 -07002764 ret = io->bytes_done;
Jens Axboe227c0c92020-08-13 11:51:40 -06002765 else
Jens Axboee8c2bc12020-08-15 18:44:09 -07002766 ret += io->bytes_done;
Jens Axboe227c0c92020-08-13 11:51:40 -06002767 }
2768
Jens Axboeba042912019-12-25 16:33:42 -07002769 if (req->flags & REQ_F_CUR_POS)
2770 req->file->f_pos = kiocb->ki_pos;
Pavel Begunkovbcaec082020-02-24 11:30:18 +03002771 if (ret >= 0 && kiocb->ki_complete == io_complete_rw)
Pavel Begunkov889fca72021-02-10 00:03:09 +00002772 __io_complete_rw(req, ret, 0, issue_flags);
Jens Axboeba816ad2019-09-28 11:36:45 -06002773 else
2774 io_rw_done(kiocb, ret);
Pavel Begunkov97284632021-04-08 19:28:03 +01002775
2776 if (check_reissue && req->flags & REQ_F_REISSUE) {
2777 req->flags &= ~REQ_F_REISSUE;
2778 if (!io_rw_reissue(req)) {
2779 int cflags = 0;
2780
2781 req_set_fail_links(req);
2782 if (req->flags & REQ_F_BUFFER_SELECTED)
2783 cflags = io_put_rw_kbuf(req);
2784 __io_req_complete(req, issue_flags, ret, cflags);
2785 }
2786 }
Jens Axboeba816ad2019-09-28 11:36:45 -06002787}
2788
Pavel Begunkov847595d2021-02-04 13:52:06 +00002789static int io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter)
Jens Axboeedafcce2019-01-09 09:16:05 -07002790{
Jens Axboe9adbd452019-12-20 08:45:55 -07002791 struct io_ring_ctx *ctx = req->ctx;
2792 size_t len = req->rw.len;
Jens Axboeedafcce2019-01-09 09:16:05 -07002793 struct io_mapped_ubuf *imu;
Pavel Begunkov4be1c612020-09-06 00:45:48 +03002794 u16 index, buf_index = req->buf_index;
Jens Axboeedafcce2019-01-09 09:16:05 -07002795 size_t offset;
2796 u64 buf_addr;
2797
Jens Axboeedafcce2019-01-09 09:16:05 -07002798 if (unlikely(buf_index >= ctx->nr_user_bufs))
2799 return -EFAULT;
Jens Axboeedafcce2019-01-09 09:16:05 -07002800 index = array_index_nospec(buf_index, ctx->nr_user_bufs);
2801 imu = &ctx->user_bufs[index];
Jens Axboe9adbd452019-12-20 08:45:55 -07002802 buf_addr = req->rw.addr;
Jens Axboeedafcce2019-01-09 09:16:05 -07002803
2804 /* overflow */
2805 if (buf_addr + len < buf_addr)
2806 return -EFAULT;
2807 /* not inside the mapped region */
2808 if (buf_addr < imu->ubuf || buf_addr + len > imu->ubuf + imu->len)
2809 return -EFAULT;
2810
2811 /*
2812 * May not be a start of buffer, set size appropriately
2813 * and advance us to the beginning.
2814 */
2815 offset = buf_addr - imu->ubuf;
2816 iov_iter_bvec(iter, rw, imu->bvec, imu->nr_bvecs, offset + len);
Jens Axboebd11b3a2019-07-20 08:37:31 -06002817
2818 if (offset) {
2819 /*
2820 * Don't use iov_iter_advance() here, as it's really slow for
2821 * using the latter parts of a big fixed buffer - it iterates
2822 * over each segment manually. We can cheat a bit here, because
2823 * we know that:
2824 *
2825 * 1) it's a BVEC iter, we set it up
2826 * 2) all bvecs are PAGE_SIZE in size, except potentially the
2827 * first and last bvec
2828 *
2829 * So just find our index, and adjust the iterator afterwards.
2830 * If the offset is within the first bvec (or the whole first
2831 * bvec, just use iov_iter_advance(). This makes it easier
2832 * since we can just skip the first segment, which may not
2833 * be PAGE_SIZE aligned.
2834 */
2835 const struct bio_vec *bvec = imu->bvec;
2836
2837 if (offset <= bvec->bv_len) {
2838 iov_iter_advance(iter, offset);
2839 } else {
2840 unsigned long seg_skip;
2841
2842 /* skip first vec */
2843 offset -= bvec->bv_len;
2844 seg_skip = 1 + (offset >> PAGE_SHIFT);
2845
2846 iter->bvec = bvec + seg_skip;
2847 iter->nr_segs -= seg_skip;
Aleix Roca Nonell99c79f62019-08-15 14:03:22 +02002848 iter->count -= bvec->bv_len + offset;
Jens Axboebd11b3a2019-07-20 08:37:31 -06002849 iter->iov_offset = offset & ~PAGE_MASK;
Jens Axboebd11b3a2019-07-20 08:37:31 -06002850 }
2851 }
2852
Pavel Begunkov847595d2021-02-04 13:52:06 +00002853 return 0;
Jens Axboeedafcce2019-01-09 09:16:05 -07002854}
2855
Jens Axboebcda7ba2020-02-23 16:42:51 -07002856static void io_ring_submit_unlock(struct io_ring_ctx *ctx, bool needs_lock)
2857{
2858 if (needs_lock)
2859 mutex_unlock(&ctx->uring_lock);
2860}
2861
2862static void io_ring_submit_lock(struct io_ring_ctx *ctx, bool needs_lock)
2863{
2864 /*
2865 * "Normal" inline submissions always hold the uring_lock, since we
2866 * grab it from the system call. Same is true for the SQPOLL offload.
2867 * The only exception is when we've detached the request and issue it
2868 * from an async worker thread, grab the lock for that case.
2869 */
2870 if (needs_lock)
2871 mutex_lock(&ctx->uring_lock);
2872}
2873
2874static struct io_buffer *io_buffer_select(struct io_kiocb *req, size_t *len,
2875 int bgid, struct io_buffer *kbuf,
2876 bool needs_lock)
2877{
2878 struct io_buffer *head;
2879
2880 if (req->flags & REQ_F_BUFFER_SELECTED)
2881 return kbuf;
2882
2883 io_ring_submit_lock(req->ctx, needs_lock);
2884
2885 lockdep_assert_held(&req->ctx->uring_lock);
2886
Jens Axboe9e15c3a2021-03-13 12:29:43 -07002887 head = xa_load(&req->ctx->io_buffers, bgid);
Jens Axboebcda7ba2020-02-23 16:42:51 -07002888 if (head) {
2889 if (!list_empty(&head->list)) {
2890 kbuf = list_last_entry(&head->list, struct io_buffer,
2891 list);
2892 list_del(&kbuf->list);
2893 } else {
2894 kbuf = head;
Jens Axboe9e15c3a2021-03-13 12:29:43 -07002895 xa_erase(&req->ctx->io_buffers, bgid);
Jens Axboebcda7ba2020-02-23 16:42:51 -07002896 }
2897 if (*len > kbuf->len)
2898 *len = kbuf->len;
2899 } else {
2900 kbuf = ERR_PTR(-ENOBUFS);
2901 }
2902
2903 io_ring_submit_unlock(req->ctx, needs_lock);
2904
2905 return kbuf;
2906}
2907
Jens Axboe4d954c22020-02-27 07:31:19 -07002908static void __user *io_rw_buffer_select(struct io_kiocb *req, size_t *len,
2909 bool needs_lock)
2910{
2911 struct io_buffer *kbuf;
Bijan Mottahedeh4f4eeba2020-05-19 14:52:49 -07002912 u16 bgid;
Jens Axboe4d954c22020-02-27 07:31:19 -07002913
2914 kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
Bijan Mottahedeh4f4eeba2020-05-19 14:52:49 -07002915 bgid = req->buf_index;
Jens Axboe4d954c22020-02-27 07:31:19 -07002916 kbuf = io_buffer_select(req, len, bgid, kbuf, needs_lock);
2917 if (IS_ERR(kbuf))
2918 return kbuf;
2919 req->rw.addr = (u64) (unsigned long) kbuf;
2920 req->flags |= REQ_F_BUFFER_SELECTED;
2921 return u64_to_user_ptr(kbuf->addr);
2922}
2923
2924#ifdef CONFIG_COMPAT
2925static ssize_t io_compat_import(struct io_kiocb *req, struct iovec *iov,
2926 bool needs_lock)
2927{
2928 struct compat_iovec __user *uiov;
2929 compat_ssize_t clen;
2930 void __user *buf;
2931 ssize_t len;
2932
2933 uiov = u64_to_user_ptr(req->rw.addr);
2934 if (!access_ok(uiov, sizeof(*uiov)))
2935 return -EFAULT;
2936 if (__get_user(clen, &uiov->iov_len))
2937 return -EFAULT;
2938 if (clen < 0)
2939 return -EINVAL;
2940
2941 len = clen;
2942 buf = io_rw_buffer_select(req, &len, needs_lock);
2943 if (IS_ERR(buf))
2944 return PTR_ERR(buf);
2945 iov[0].iov_base = buf;
2946 iov[0].iov_len = (compat_size_t) len;
2947 return 0;
2948}
2949#endif
2950
2951static ssize_t __io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
2952 bool needs_lock)
2953{
2954 struct iovec __user *uiov = u64_to_user_ptr(req->rw.addr);
2955 void __user *buf;
2956 ssize_t len;
2957
2958 if (copy_from_user(iov, uiov, sizeof(*uiov)))
2959 return -EFAULT;
2960
2961 len = iov[0].iov_len;
2962 if (len < 0)
2963 return -EINVAL;
2964 buf = io_rw_buffer_select(req, &len, needs_lock);
2965 if (IS_ERR(buf))
2966 return PTR_ERR(buf);
2967 iov[0].iov_base = buf;
2968 iov[0].iov_len = len;
2969 return 0;
2970}
2971
2972static ssize_t io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
2973 bool needs_lock)
2974{
Jens Axboedddb3e22020-06-04 11:27:01 -06002975 if (req->flags & REQ_F_BUFFER_SELECTED) {
2976 struct io_buffer *kbuf;
2977
2978 kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
2979 iov[0].iov_base = u64_to_user_ptr(kbuf->addr);
2980 iov[0].iov_len = kbuf->len;
Jens Axboe4d954c22020-02-27 07:31:19 -07002981 return 0;
Jens Axboedddb3e22020-06-04 11:27:01 -06002982 }
Pavel Begunkovdd201662020-12-19 03:15:43 +00002983 if (req->rw.len != 1)
Jens Axboe4d954c22020-02-27 07:31:19 -07002984 return -EINVAL;
2985
2986#ifdef CONFIG_COMPAT
2987 if (req->ctx->compat)
2988 return io_compat_import(req, iov, needs_lock);
2989#endif
2990
2991 return __io_iov_buffer_select(req, iov, needs_lock);
2992}
2993
Pavel Begunkov847595d2021-02-04 13:52:06 +00002994static int io_import_iovec(int rw, struct io_kiocb *req, struct iovec **iovec,
2995 struct iov_iter *iter, bool needs_lock)
Jens Axboe2b188cc2019-01-07 10:46:33 -07002996{
Jens Axboe9adbd452019-12-20 08:45:55 -07002997 void __user *buf = u64_to_user_ptr(req->rw.addr);
2998 size_t sqe_len = req->rw.len;
Pavel Begunkov847595d2021-02-04 13:52:06 +00002999 u8 opcode = req->opcode;
Jens Axboe4d954c22020-02-27 07:31:19 -07003000 ssize_t ret;
Jens Axboeedafcce2019-01-09 09:16:05 -07003001
Pavel Begunkov7d009162019-11-25 23:14:40 +03003002 if (opcode == IORING_OP_READ_FIXED || opcode == IORING_OP_WRITE_FIXED) {
Jens Axboeedafcce2019-01-09 09:16:05 -07003003 *iovec = NULL;
Jens Axboe9adbd452019-12-20 08:45:55 -07003004 return io_import_fixed(req, rw, iter);
Jens Axboeedafcce2019-01-09 09:16:05 -07003005 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07003006
Jens Axboebcda7ba2020-02-23 16:42:51 -07003007 /* buffer index only valid with fixed read/write, or buffer select */
Bijan Mottahedeh4f4eeba2020-05-19 14:52:49 -07003008 if (req->buf_index && !(req->flags & REQ_F_BUFFER_SELECT))
Jens Axboe9adbd452019-12-20 08:45:55 -07003009 return -EINVAL;
3010
Jens Axboe3a6820f2019-12-22 15:19:35 -07003011 if (opcode == IORING_OP_READ || opcode == IORING_OP_WRITE) {
Jens Axboebcda7ba2020-02-23 16:42:51 -07003012 if (req->flags & REQ_F_BUFFER_SELECT) {
Jens Axboe4d954c22020-02-27 07:31:19 -07003013 buf = io_rw_buffer_select(req, &sqe_len, needs_lock);
Pavel Begunkov867a23e2020-08-20 11:34:39 +03003014 if (IS_ERR(buf))
Jens Axboe4d954c22020-02-27 07:31:19 -07003015 return PTR_ERR(buf);
Jens Axboe3f9d6442020-03-11 12:27:04 -06003016 req->rw.len = sqe_len;
Jens Axboebcda7ba2020-02-23 16:42:51 -07003017 }
3018
Jens Axboe3a6820f2019-12-22 15:19:35 -07003019 ret = import_single_range(rw, buf, sqe_len, *iovec, iter);
3020 *iovec = NULL;
David Laight10fc72e2020-11-07 13:16:25 +00003021 return ret;
Jens Axboe3a6820f2019-12-22 15:19:35 -07003022 }
3023
Jens Axboe4d954c22020-02-27 07:31:19 -07003024 if (req->flags & REQ_F_BUFFER_SELECT) {
3025 ret = io_iov_buffer_select(req, *iovec, needs_lock);
Pavel Begunkov847595d2021-02-04 13:52:06 +00003026 if (!ret)
3027 iov_iter_init(iter, rw, *iovec, 1, (*iovec)->iov_len);
Jens Axboe4d954c22020-02-27 07:31:19 -07003028 *iovec = NULL;
3029 return ret;
3030 }
3031
Christoph Hellwig89cd35c2020-09-25 06:51:41 +02003032 return __import_iovec(rw, buf, sqe_len, UIO_FASTIOV, iovec, iter,
3033 req->ctx->compat);
Jens Axboe2b188cc2019-01-07 10:46:33 -07003034}
3035
Jens Axboe0fef9482020-08-26 10:36:20 -06003036static inline loff_t *io_kiocb_ppos(struct kiocb *kiocb)
3037{
Pavel Begunkov5b09e372020-09-30 22:57:15 +03003038 return (kiocb->ki_filp->f_mode & FMODE_STREAM) ? NULL : &kiocb->ki_pos;
Jens Axboe0fef9482020-08-26 10:36:20 -06003039}
3040
Jens Axboe32960612019-09-23 11:05:34 -06003041/*
3042 * For files that don't have ->read_iter() and ->write_iter(), handle them
3043 * by looping over ->read() or ->write() manually.
3044 */
Jens Axboe4017eb92020-10-22 14:14:12 -06003045static ssize_t loop_rw_iter(int rw, struct io_kiocb *req, struct iov_iter *iter)
Jens Axboe32960612019-09-23 11:05:34 -06003046{
Jens Axboe4017eb92020-10-22 14:14:12 -06003047 struct kiocb *kiocb = &req->rw.kiocb;
3048 struct file *file = req->file;
Jens Axboe32960612019-09-23 11:05:34 -06003049 ssize_t ret = 0;
3050
3051 /*
3052 * Don't support polled IO through this interface, and we can't
3053 * support non-blocking either. For the latter, this just causes
3054 * the kiocb to be handled from an async context.
3055 */
3056 if (kiocb->ki_flags & IOCB_HIPRI)
3057 return -EOPNOTSUPP;
3058 if (kiocb->ki_flags & IOCB_NOWAIT)
3059 return -EAGAIN;
3060
3061 while (iov_iter_count(iter)) {
Pavel Begunkov311ae9e2019-11-24 11:58:24 +03003062 struct iovec iovec;
Jens Axboe32960612019-09-23 11:05:34 -06003063 ssize_t nr;
3064
Pavel Begunkov311ae9e2019-11-24 11:58:24 +03003065 if (!iov_iter_is_bvec(iter)) {
3066 iovec = iov_iter_iovec(iter);
3067 } else {
Jens Axboe4017eb92020-10-22 14:14:12 -06003068 iovec.iov_base = u64_to_user_ptr(req->rw.addr);
3069 iovec.iov_len = req->rw.len;
Pavel Begunkov311ae9e2019-11-24 11:58:24 +03003070 }
3071
Jens Axboe32960612019-09-23 11:05:34 -06003072 if (rw == READ) {
3073 nr = file->f_op->read(file, iovec.iov_base,
Jens Axboe0fef9482020-08-26 10:36:20 -06003074 iovec.iov_len, io_kiocb_ppos(kiocb));
Jens Axboe32960612019-09-23 11:05:34 -06003075 } else {
3076 nr = file->f_op->write(file, iovec.iov_base,
Jens Axboe0fef9482020-08-26 10:36:20 -06003077 iovec.iov_len, io_kiocb_ppos(kiocb));
Jens Axboe32960612019-09-23 11:05:34 -06003078 }
3079
3080 if (nr < 0) {
3081 if (!ret)
3082 ret = nr;
3083 break;
3084 }
3085 ret += nr;
3086 if (nr != iovec.iov_len)
3087 break;
Jens Axboe4017eb92020-10-22 14:14:12 -06003088 req->rw.len -= nr;
3089 req->rw.addr += nr;
Jens Axboe32960612019-09-23 11:05:34 -06003090 iov_iter_advance(iter, nr);
3091 }
3092
3093 return ret;
3094}
3095
Jens Axboeff6165b2020-08-13 09:47:43 -06003096static void io_req_map_rw(struct io_kiocb *req, const struct iovec *iovec,
3097 const struct iovec *fast_iov, struct iov_iter *iter)
Jens Axboef67676d2019-12-02 11:03:47 -07003098{
Jens Axboee8c2bc12020-08-15 18:44:09 -07003099 struct io_async_rw *rw = req->async_data;
Pavel Begunkovb64e3442020-07-13 22:59:18 +03003100
Jens Axboeff6165b2020-08-13 09:47:43 -06003101 memcpy(&rw->iter, iter, sizeof(*iter));
Pavel Begunkovafb87652020-09-06 00:45:46 +03003102 rw->free_iovec = iovec;
Jens Axboe227c0c92020-08-13 11:51:40 -06003103 rw->bytes_done = 0;
Jens Axboeff6165b2020-08-13 09:47:43 -06003104 /* can only be fixed buffers, no need to do anything */
Pavel Begunkov9c3a2052020-11-23 23:20:27 +00003105 if (iov_iter_is_bvec(iter))
Jens Axboeff6165b2020-08-13 09:47:43 -06003106 return;
Pavel Begunkovb64e3442020-07-13 22:59:18 +03003107 if (!iovec) {
Jens Axboeff6165b2020-08-13 09:47:43 -06003108 unsigned iov_off = 0;
3109
3110 rw->iter.iov = rw->fast_iov;
3111 if (iter->iov != fast_iov) {
3112 iov_off = iter->iov - fast_iov;
3113 rw->iter.iov += iov_off;
3114 }
3115 if (rw->fast_iov != fast_iov)
3116 memcpy(rw->fast_iov + iov_off, fast_iov + iov_off,
Xiaoguang Wang45097da2020-04-08 22:29:58 +08003117 sizeof(struct iovec) * iter->nr_segs);
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03003118 } else {
3119 req->flags |= REQ_F_NEED_CLEANUP;
Jens Axboef67676d2019-12-02 11:03:47 -07003120 }
3121}
3122
Jens Axboee8c2bc12020-08-15 18:44:09 -07003123static inline int __io_alloc_async_data(struct io_kiocb *req)
Xiaoguang Wang3d9932a2020-03-27 15:36:52 +08003124{
Jens Axboee8c2bc12020-08-15 18:44:09 -07003125 WARN_ON_ONCE(!io_op_defs[req->opcode].async_size);
3126 req->async_data = kmalloc(io_op_defs[req->opcode].async_size, GFP_KERNEL);
3127 return req->async_data == NULL;
Xiaoguang Wang3d9932a2020-03-27 15:36:52 +08003128}
3129
Jens Axboee8c2bc12020-08-15 18:44:09 -07003130static int io_alloc_async_data(struct io_kiocb *req)
Jens Axboef67676d2019-12-02 11:03:47 -07003131{
Jens Axboee8c2bc12020-08-15 18:44:09 -07003132 if (!io_op_defs[req->opcode].needs_async_data)
Jens Axboed3656342019-12-18 09:50:26 -07003133 return 0;
Xiaoguang Wang3d9932a2020-03-27 15:36:52 +08003134
Jens Axboee8c2bc12020-08-15 18:44:09 -07003135 return __io_alloc_async_data(req);
Jens Axboeb7bb4f72019-12-15 22:13:43 -07003136}
3137
Jens Axboeff6165b2020-08-13 09:47:43 -06003138static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec,
3139 const struct iovec *fast_iov,
Jens Axboe227c0c92020-08-13 11:51:40 -06003140 struct iov_iter *iter, bool force)
Jens Axboeb7bb4f72019-12-15 22:13:43 -07003141{
Jens Axboee8c2bc12020-08-15 18:44:09 -07003142 if (!force && !io_op_defs[req->opcode].needs_async_data)
Jens Axboe74566df2020-01-13 19:23:24 -07003143 return 0;
Jens Axboee8c2bc12020-08-15 18:44:09 -07003144 if (!req->async_data) {
Pavel Begunkov6bf985d2021-02-04 13:52:01 +00003145 if (__io_alloc_async_data(req)) {
3146 kfree(iovec);
Jens Axboe5d204bc2020-01-31 12:06:52 -07003147 return -ENOMEM;
Pavel Begunkov6bf985d2021-02-04 13:52:01 +00003148 }
Jens Axboeb7bb4f72019-12-15 22:13:43 -07003149
Jens Axboeff6165b2020-08-13 09:47:43 -06003150 io_req_map_rw(req, iovec, fast_iov, iter);
Jens Axboe5d204bc2020-01-31 12:06:52 -07003151 }
Jens Axboeb7bb4f72019-12-15 22:13:43 -07003152 return 0;
Jens Axboef67676d2019-12-02 11:03:47 -07003153}
3154
Pavel Begunkov73debe62020-09-30 22:57:54 +03003155static inline int io_rw_prep_async(struct io_kiocb *req, int rw)
Pavel Begunkovc3e330a2020-07-13 22:59:19 +03003156{
Jens Axboee8c2bc12020-08-15 18:44:09 -07003157 struct io_async_rw *iorw = req->async_data;
Pavel Begunkovf4bff102020-09-06 00:45:45 +03003158 struct iovec *iov = iorw->fast_iov;
Pavel Begunkov847595d2021-02-04 13:52:06 +00003159 int ret;
Pavel Begunkovc3e330a2020-07-13 22:59:19 +03003160
Pavel Begunkov2846c482020-11-07 13:16:27 +00003161 ret = io_import_iovec(rw, req, &iov, &iorw->iter, false);
Pavel Begunkovc3e330a2020-07-13 22:59:19 +03003162 if (unlikely(ret < 0))
3163 return ret;
3164
Pavel Begunkovab0b1962020-09-06 00:45:47 +03003165 iorw->bytes_done = 0;
3166 iorw->free_iovec = iov;
3167 if (iov)
3168 req->flags |= REQ_F_NEED_CLEANUP;
Pavel Begunkovc3e330a2020-07-13 22:59:19 +03003169 return 0;
3170}
3171
Pavel Begunkov73debe62020-09-30 22:57:54 +03003172static int io_read_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboef67676d2019-12-02 11:03:47 -07003173{
Jens Axboe3529d8c2019-12-19 18:24:38 -07003174 if (unlikely(!(req->file->f_mode & FMODE_READ)))
3175 return -EBADF;
Pavel Begunkov93642ef2021-02-18 18:29:44 +00003176 return io_prep_rw(req, sqe);
Jens Axboef67676d2019-12-02 11:03:47 -07003177}
3178
Jens Axboec1dd91d2020-08-03 16:43:59 -06003179/*
3180 * This is our waitqueue callback handler, registered through lock_page_async()
3181 * when we initially tried to do the IO with the iocb armed our waitqueue.
3182 * This gets called when the page is unlocked, and we generally expect that to
3183 * happen when the page IO is completed and the page is now uptodate. This will
3184 * queue a task_work based retry of the operation, attempting to copy the data
3185 * again. If the latter fails because the page was NOT uptodate, then we will
3186 * do a thread based blocking retry of the operation. That's the unexpected
3187 * slow path.
3188 */
Jens Axboebcf5a062020-05-22 09:24:42 -06003189static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode,
3190 int sync, void *arg)
3191{
3192 struct wait_page_queue *wpq;
3193 struct io_kiocb *req = wait->private;
Jens Axboebcf5a062020-05-22 09:24:42 -06003194 struct wait_page_key *key = arg;
Jens Axboebcf5a062020-05-22 09:24:42 -06003195
3196 wpq = container_of(wait, struct wait_page_queue, wait);
3197
Linus Torvaldscdc8fcb2020-08-03 13:01:22 -07003198 if (!wake_page_match(wpq, key))
3199 return 0;
3200
Hao Xuc8d317a2020-09-29 20:00:45 +08003201 req->rw.kiocb.ki_flags &= ~IOCB_WAITQ;
Jens Axboebcf5a062020-05-22 09:24:42 -06003202 list_del_init(&wait->entry);
3203
Jens Axboebcf5a062020-05-22 09:24:42 -06003204 /* submit ref gets dropped, acquire a new one */
3205 refcount_inc(&req->refs);
Pavel Begunkov921b9052021-02-12 03:23:53 +00003206 io_req_task_queue(req);
Jens Axboebcf5a062020-05-22 09:24:42 -06003207 return 1;
3208}
3209
Jens Axboec1dd91d2020-08-03 16:43:59 -06003210/*
3211 * This controls whether a given IO request should be armed for async page
3212 * based retry. If we return false here, the request is handed to the async
3213 * worker threads for retry. If we're doing buffered reads on a regular file,
3214 * we prepare a private wait_page_queue entry and retry the operation. This
3215 * will either succeed because the page is now uptodate and unlocked, or it
3216 * will register a callback when the page is unlocked at IO completion. Through
3217 * that callback, io_uring uses task_work to setup a retry of the operation.
3218 * That retry will attempt the buffered read again. The retry will generally
3219 * succeed, or in rare cases where it fails, we then fall back to using the
3220 * async worker threads for a blocking retry.
3221 */
Jens Axboe227c0c92020-08-13 11:51:40 -06003222static bool io_rw_should_retry(struct io_kiocb *req)
Jens Axboebcf5a062020-05-22 09:24:42 -06003223{
Jens Axboee8c2bc12020-08-15 18:44:09 -07003224 struct io_async_rw *rw = req->async_data;
3225 struct wait_page_queue *wait = &rw->wpq;
Jens Axboebcf5a062020-05-22 09:24:42 -06003226 struct kiocb *kiocb = &req->rw.kiocb;
Jens Axboebcf5a062020-05-22 09:24:42 -06003227
3228 /* never retry for NOWAIT, we just complete with -EAGAIN */
3229 if (req->flags & REQ_F_NOWAIT)
3230 return false;
3231
Jens Axboe227c0c92020-08-13 11:51:40 -06003232 /* Only for buffered IO */
Jens Axboe3b2a4432020-08-16 10:58:43 -07003233 if (kiocb->ki_flags & (IOCB_DIRECT | IOCB_HIPRI))
Jens Axboebcf5a062020-05-22 09:24:42 -06003234 return false;
Jens Axboe3b2a4432020-08-16 10:58:43 -07003235
Jens Axboebcf5a062020-05-22 09:24:42 -06003236 /*
3237 * just use poll if we can, and don't attempt if the fs doesn't
3238 * support callback based unlocks
3239 */
3240 if (file_can_poll(req->file) || !(req->file->f_mode & FMODE_BUF_RASYNC))
3241 return false;
3242
Jens Axboe3b2a4432020-08-16 10:58:43 -07003243 wait->wait.func = io_async_buf_func;
3244 wait->wait.private = req;
3245 wait->wait.flags = 0;
3246 INIT_LIST_HEAD(&wait->wait.entry);
3247 kiocb->ki_flags |= IOCB_WAITQ;
Hao Xuc8d317a2020-09-29 20:00:45 +08003248 kiocb->ki_flags &= ~IOCB_NOWAIT;
Jens Axboe3b2a4432020-08-16 10:58:43 -07003249 kiocb->ki_waitq = wait;
Jens Axboe3b2a4432020-08-16 10:58:43 -07003250 return true;
Jens Axboebcf5a062020-05-22 09:24:42 -06003251}
3252
3253static int io_iter_do_read(struct io_kiocb *req, struct iov_iter *iter)
3254{
3255 if (req->file->f_op->read_iter)
3256 return call_read_iter(req->file, &req->rw.kiocb, iter);
Guoyu Huang2dd21112020-08-05 03:53:50 -07003257 else if (req->file->f_op->read)
Jens Axboe4017eb92020-10-22 14:14:12 -06003258 return loop_rw_iter(READ, req, iter);
Guoyu Huang2dd21112020-08-05 03:53:50 -07003259 else
3260 return -EINVAL;
Jens Axboebcf5a062020-05-22 09:24:42 -06003261}
3262
Pavel Begunkov889fca72021-02-10 00:03:09 +00003263static int io_read(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe2b188cc2019-01-07 10:46:33 -07003264{
3265 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
Jens Axboe9adbd452019-12-20 08:45:55 -07003266 struct kiocb *kiocb = &req->rw.kiocb;
Jens Axboeff6165b2020-08-13 09:47:43 -06003267 struct iov_iter __iter, *iter = &__iter;
Jens Axboee8c2bc12020-08-15 18:44:09 -07003268 struct io_async_rw *rw = req->async_data;
Jens Axboe227c0c92020-08-13 11:51:40 -06003269 ssize_t io_size, ret, ret2;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003270 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003271
Pavel Begunkov2846c482020-11-07 13:16:27 +00003272 if (rw) {
Jens Axboee8c2bc12020-08-15 18:44:09 -07003273 iter = &rw->iter;
Pavel Begunkov2846c482020-11-07 13:16:27 +00003274 iovec = NULL;
3275 } else {
3276 ret = io_import_iovec(READ, req, &iovec, iter, !force_nonblock);
3277 if (ret < 0)
3278 return ret;
3279 }
Pavel Begunkov632546c2020-11-07 13:16:26 +00003280 io_size = iov_iter_count(iter);
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003281 req->result = io_size;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003282
Jens Axboefd6c2e42019-12-18 12:19:41 -07003283 /* Ensure we clear previously set non-block flag */
3284 if (!force_nonblock)
Jens Axboe29de5f62020-02-20 09:56:08 -07003285 kiocb->ki_flags &= ~IOCB_NOWAIT;
Pavel Begunkova88fc402020-09-30 22:57:53 +03003286 else
3287 kiocb->ki_flags |= IOCB_NOWAIT;
3288
Pavel Begunkov24c74672020-06-21 13:09:51 +03003289 /* If the file doesn't support async, just async punt */
Pavel Begunkov6713e7a2021-02-04 13:51:59 +00003290 if (force_nonblock && !io_file_supports_async(req->file, READ)) {
3291 ret = io_setup_async_rw(req, iovec, inline_vecs, iter, true);
Pavel Begunkov6bf985d2021-02-04 13:52:01 +00003292 return ret ?: -EAGAIN;
Pavel Begunkov6713e7a2021-02-04 13:51:59 +00003293 }
Jens Axboe9e645e112019-05-10 16:07:28 -06003294
Pavel Begunkov632546c2020-11-07 13:16:26 +00003295 ret = rw_verify_area(READ, req->file, io_kiocb_ppos(kiocb), io_size);
Pavel Begunkov5ea5dd42021-02-04 13:52:03 +00003296 if (unlikely(ret)) {
3297 kfree(iovec);
3298 return ret;
3299 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07003300
Jens Axboe227c0c92020-08-13 11:51:40 -06003301 ret = io_iter_do_read(req, iter);
Jens Axboe32960612019-09-23 11:05:34 -06003302
Jens Axboe230d50d2021-04-01 20:41:15 -06003303 if (ret == -EAGAIN || (req->flags & REQ_F_REISSUE)) {
Pavel Begunkov6ad7f232021-04-08 01:54:39 +01003304 req->flags &= ~REQ_F_REISSUE;
Jens Axboeeefdf302020-08-27 16:40:19 -06003305 /* IOPOLL retry should happen for io-wq threads */
3306 if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboef91daf52020-08-15 15:58:42 -07003307 goto done;
Pavel Begunkov75c668c2021-02-04 13:52:05 +00003308 /* no retry on NONBLOCK nor RWF_NOWAIT */
3309 if (req->flags & REQ_F_NOWAIT)
Jens Axboe355afae2020-09-02 09:30:31 -06003310 goto done;
Jens Axboe84216312020-08-24 11:45:26 -06003311 /* some cases will consume bytes even on error returns */
Pavel Begunkov632546c2020-11-07 13:16:26 +00003312 iov_iter_revert(iter, io_size - iov_iter_count(iter));
Jens Axboef38c7e32020-09-25 15:23:43 -06003313 ret = 0;
Jens Axboe230d50d2021-04-01 20:41:15 -06003314 } else if (ret == -EIOCBQUEUED) {
3315 goto out_free;
Pavel Begunkov7335e3b2021-02-04 13:52:02 +00003316 } else if (ret <= 0 || ret == io_size || !force_nonblock ||
Pavel Begunkov75c668c2021-02-04 13:52:05 +00003317 (req->flags & REQ_F_NOWAIT) || !(req->flags & REQ_F_ISREG)) {
Pavel Begunkov7335e3b2021-02-04 13:52:02 +00003318 /* read all, failed, already did sync or don't want to retry */
Jens Axboe00d23d52020-08-25 12:59:22 -06003319 goto done;
Jens Axboe227c0c92020-08-13 11:51:40 -06003320 }
3321
Jens Axboe227c0c92020-08-13 11:51:40 -06003322 ret2 = io_setup_async_rw(req, iovec, inline_vecs, iter, true);
Pavel Begunkov6bf985d2021-02-04 13:52:01 +00003323 if (ret2)
3324 return ret2;
3325
Pavel Begunkovfe1cdd52021-02-17 21:02:36 +00003326 iovec = NULL;
Jens Axboee8c2bc12020-08-15 18:44:09 -07003327 rw = req->async_data;
Jens Axboe227c0c92020-08-13 11:51:40 -06003328 /* now use our persistent iterator, if we aren't already */
Jens Axboee8c2bc12020-08-15 18:44:09 -07003329 iter = &rw->iter;
Jens Axboe227c0c92020-08-13 11:51:40 -06003330
Pavel Begunkovb23df912021-02-04 13:52:04 +00003331 do {
3332 io_size -= ret;
3333 rw->bytes_done += ret;
3334 /* if we can retry, do so with the callbacks armed */
3335 if (!io_rw_should_retry(req)) {
3336 kiocb->ki_flags &= ~IOCB_WAITQ;
3337 return -EAGAIN;
3338 }
3339
3340 /*
3341 * Now retry read with the IOCB_WAITQ parts set in the iocb. If
3342 * we get -EIOCBQUEUED, then we'll get a notification when the
3343 * desired page gets unlocked. We can also get a partial read
3344 * here, and if we do, then just retry at the new offset.
3345 */
3346 ret = io_iter_do_read(req, iter);
3347 if (ret == -EIOCBQUEUED)
3348 return 0;
Jens Axboe227c0c92020-08-13 11:51:40 -06003349 /* we got some bytes, but not all. retry. */
Jens Axboeb5b0ecb2021-03-04 21:02:58 -07003350 kiocb->ki_flags &= ~IOCB_WAITQ;
Pavel Begunkovb23df912021-02-04 13:52:04 +00003351 } while (ret > 0 && ret < io_size);
Jens Axboe227c0c92020-08-13 11:51:40 -06003352done:
Pavel Begunkov889fca72021-02-10 00:03:09 +00003353 kiocb_done(kiocb, ret, issue_flags);
Pavel Begunkovfe1cdd52021-02-17 21:02:36 +00003354out_free:
3355 /* it's faster to check here then delegate to kfree */
3356 if (iovec)
3357 kfree(iovec);
Pavel Begunkov5ea5dd42021-02-04 13:52:03 +00003358 return 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003359}
3360
Pavel Begunkov73debe62020-09-30 22:57:54 +03003361static int io_write_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboef67676d2019-12-02 11:03:47 -07003362{
Jens Axboe3529d8c2019-12-19 18:24:38 -07003363 if (unlikely(!(req->file->f_mode & FMODE_WRITE)))
3364 return -EBADF;
Pavel Begunkov93642ef2021-02-18 18:29:44 +00003365 return io_prep_rw(req, sqe);
Jens Axboef67676d2019-12-02 11:03:47 -07003366}
3367
Pavel Begunkov889fca72021-02-10 00:03:09 +00003368static int io_write(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe2b188cc2019-01-07 10:46:33 -07003369{
3370 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
Jens Axboe9adbd452019-12-20 08:45:55 -07003371 struct kiocb *kiocb = &req->rw.kiocb;
Jens Axboeff6165b2020-08-13 09:47:43 -06003372 struct iov_iter __iter, *iter = &__iter;
Jens Axboee8c2bc12020-08-15 18:44:09 -07003373 struct io_async_rw *rw = req->async_data;
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003374 ssize_t ret, ret2, io_size;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003375 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003376
Pavel Begunkov2846c482020-11-07 13:16:27 +00003377 if (rw) {
Jens Axboee8c2bc12020-08-15 18:44:09 -07003378 iter = &rw->iter;
Pavel Begunkov2846c482020-11-07 13:16:27 +00003379 iovec = NULL;
3380 } else {
3381 ret = io_import_iovec(WRITE, req, &iovec, iter, !force_nonblock);
3382 if (ret < 0)
3383 return ret;
3384 }
Pavel Begunkov632546c2020-11-07 13:16:26 +00003385 io_size = iov_iter_count(iter);
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003386 req->result = io_size;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003387
Jens Axboefd6c2e42019-12-18 12:19:41 -07003388 /* Ensure we clear previously set non-block flag */
3389 if (!force_nonblock)
Pavel Begunkova88fc402020-09-30 22:57:53 +03003390 kiocb->ki_flags &= ~IOCB_NOWAIT;
3391 else
3392 kiocb->ki_flags |= IOCB_NOWAIT;
Jens Axboefd6c2e42019-12-18 12:19:41 -07003393
Pavel Begunkov24c74672020-06-21 13:09:51 +03003394 /* If the file doesn't support async, just async punt */
Jens Axboeaf197f52020-04-28 13:15:06 -06003395 if (force_nonblock && !io_file_supports_async(req->file, WRITE))
Jens Axboef67676d2019-12-02 11:03:47 -07003396 goto copy_iov;
Jens Axboef67676d2019-12-02 11:03:47 -07003397
Jens Axboe10d59342019-12-09 20:16:22 -07003398 /* file path doesn't support NOWAIT for non-direct_IO */
3399 if (force_nonblock && !(kiocb->ki_flags & IOCB_DIRECT) &&
3400 (req->flags & REQ_F_ISREG))
Jens Axboef67676d2019-12-02 11:03:47 -07003401 goto copy_iov;
Jens Axboe9e645e112019-05-10 16:07:28 -06003402
Pavel Begunkov632546c2020-11-07 13:16:26 +00003403 ret = rw_verify_area(WRITE, req->file, io_kiocb_ppos(kiocb), io_size);
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003404 if (unlikely(ret))
3405 goto out_free;
Roman Penyaev9bf79332019-03-25 20:09:24 +01003406
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003407 /*
3408 * Open-code file_start_write here to grab freeze protection,
3409 * which will be released by another thread in
3410 * io_complete_rw(). Fool lockdep by telling it the lock got
3411 * released so that it doesn't complain about the held lock when
3412 * we return to userspace.
3413 */
3414 if (req->flags & REQ_F_ISREG) {
Darrick J. Wong8a3c84b2020-11-10 16:50:21 -08003415 sb_start_write(file_inode(req->file)->i_sb);
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003416 __sb_writers_release(file_inode(req->file)->i_sb,
3417 SB_FREEZE_WRITE);
3418 }
3419 kiocb->ki_flags |= IOCB_WRITE;
Roman Penyaev9bf79332019-03-25 20:09:24 +01003420
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003421 if (req->file->f_op->write_iter)
Jens Axboeff6165b2020-08-13 09:47:43 -06003422 ret2 = call_write_iter(req->file, kiocb, iter);
Guoyu Huang2dd21112020-08-05 03:53:50 -07003423 else if (req->file->f_op->write)
Jens Axboe4017eb92020-10-22 14:14:12 -06003424 ret2 = loop_rw_iter(WRITE, req, iter);
Guoyu Huang2dd21112020-08-05 03:53:50 -07003425 else
3426 ret2 = -EINVAL;
Jens Axboe4ed734b2020-03-20 11:23:41 -06003427
Pavel Begunkov6ad7f232021-04-08 01:54:39 +01003428 if (req->flags & REQ_F_REISSUE) {
3429 req->flags &= ~REQ_F_REISSUE;
Jens Axboe230d50d2021-04-01 20:41:15 -06003430 ret2 = -EAGAIN;
Pavel Begunkov6ad7f232021-04-08 01:54:39 +01003431 }
Jens Axboe230d50d2021-04-01 20:41:15 -06003432
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003433 /*
3434 * Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just
3435 * retry them without IOCB_NOWAIT.
3436 */
3437 if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT))
3438 ret2 = -EAGAIN;
Pavel Begunkov75c668c2021-02-04 13:52:05 +00003439 /* no retry on NONBLOCK nor RWF_NOWAIT */
3440 if (ret2 == -EAGAIN && (req->flags & REQ_F_NOWAIT))
Jens Axboe355afae2020-09-02 09:30:31 -06003441 goto done;
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003442 if (!force_nonblock || ret2 != -EAGAIN) {
Jens Axboeeefdf302020-08-27 16:40:19 -06003443 /* IOPOLL retry should happen for io-wq threads */
3444 if ((req->ctx->flags & IORING_SETUP_IOPOLL) && ret2 == -EAGAIN)
3445 goto copy_iov;
Jens Axboe355afae2020-09-02 09:30:31 -06003446done:
Pavel Begunkov889fca72021-02-10 00:03:09 +00003447 kiocb_done(kiocb, ret2, issue_flags);
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003448 } else {
Jens Axboef67676d2019-12-02 11:03:47 -07003449copy_iov:
Jens Axboe84216312020-08-24 11:45:26 -06003450 /* some cases will consume bytes even on error returns */
Pavel Begunkov632546c2020-11-07 13:16:26 +00003451 iov_iter_revert(iter, io_size - iov_iter_count(iter));
Jens Axboe227c0c92020-08-13 11:51:40 -06003452 ret = io_setup_async_rw(req, iovec, inline_vecs, iter, false);
Pavel Begunkov6bf985d2021-02-04 13:52:01 +00003453 return ret ?: -EAGAIN;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003454 }
Jens Axboe31b51512019-01-18 22:56:34 -07003455out_free:
Pavel Begunkovf261c162020-08-20 11:34:10 +03003456 /* it's reportedly faster than delegating the null check to kfree() */
Pavel Begunkov252917c2020-07-13 22:59:20 +03003457 if (iovec)
Xiaoguang Wang6f2cc162020-06-18 15:01:56 +08003458 kfree(iovec);
Jens Axboe2b188cc2019-01-07 10:46:33 -07003459 return ret;
3460}
3461
Jens Axboe80a261f2020-09-28 14:23:58 -06003462static int io_renameat_prep(struct io_kiocb *req,
3463 const struct io_uring_sqe *sqe)
3464{
3465 struct io_rename *ren = &req->rename;
3466 const char __user *oldf, *newf;
3467
3468 if (unlikely(req->flags & REQ_F_FIXED_FILE))
3469 return -EBADF;
3470
3471 ren->old_dfd = READ_ONCE(sqe->fd);
3472 oldf = u64_to_user_ptr(READ_ONCE(sqe->addr));
3473 newf = u64_to_user_ptr(READ_ONCE(sqe->addr2));
3474 ren->new_dfd = READ_ONCE(sqe->len);
3475 ren->flags = READ_ONCE(sqe->rename_flags);
3476
3477 ren->oldpath = getname(oldf);
3478 if (IS_ERR(ren->oldpath))
3479 return PTR_ERR(ren->oldpath);
3480
3481 ren->newpath = getname(newf);
3482 if (IS_ERR(ren->newpath)) {
3483 putname(ren->oldpath);
3484 return PTR_ERR(ren->newpath);
3485 }
3486
3487 req->flags |= REQ_F_NEED_CLEANUP;
3488 return 0;
3489}
3490
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003491static int io_renameat(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe80a261f2020-09-28 14:23:58 -06003492{
3493 struct io_rename *ren = &req->rename;
3494 int ret;
3495
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003496 if (issue_flags & IO_URING_F_NONBLOCK)
Jens Axboe80a261f2020-09-28 14:23:58 -06003497 return -EAGAIN;
3498
3499 ret = do_renameat2(ren->old_dfd, ren->oldpath, ren->new_dfd,
3500 ren->newpath, ren->flags);
3501
3502 req->flags &= ~REQ_F_NEED_CLEANUP;
3503 if (ret < 0)
3504 req_set_fail_links(req);
3505 io_req_complete(req, ret);
3506 return 0;
3507}
3508
Jens Axboe14a11432020-09-28 14:27:37 -06003509static int io_unlinkat_prep(struct io_kiocb *req,
3510 const struct io_uring_sqe *sqe)
3511{
3512 struct io_unlink *un = &req->unlink;
3513 const char __user *fname;
3514
3515 if (unlikely(req->flags & REQ_F_FIXED_FILE))
3516 return -EBADF;
3517
3518 un->dfd = READ_ONCE(sqe->fd);
3519
3520 un->flags = READ_ONCE(sqe->unlink_flags);
3521 if (un->flags & ~AT_REMOVEDIR)
3522 return -EINVAL;
3523
3524 fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
3525 un->filename = getname(fname);
3526 if (IS_ERR(un->filename))
3527 return PTR_ERR(un->filename);
3528
3529 req->flags |= REQ_F_NEED_CLEANUP;
3530 return 0;
3531}
3532
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003533static int io_unlinkat(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe14a11432020-09-28 14:27:37 -06003534{
3535 struct io_unlink *un = &req->unlink;
3536 int ret;
3537
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003538 if (issue_flags & IO_URING_F_NONBLOCK)
Jens Axboe14a11432020-09-28 14:27:37 -06003539 return -EAGAIN;
3540
3541 if (un->flags & AT_REMOVEDIR)
3542 ret = do_rmdir(un->dfd, un->filename);
3543 else
3544 ret = do_unlinkat(un->dfd, un->filename);
3545
3546 req->flags &= ~REQ_F_NEED_CLEANUP;
3547 if (ret < 0)
3548 req_set_fail_links(req);
3549 io_req_complete(req, ret);
3550 return 0;
3551}
3552
Jens Axboe36f4fa62020-09-05 11:14:22 -06003553static int io_shutdown_prep(struct io_kiocb *req,
3554 const struct io_uring_sqe *sqe)
3555{
3556#if defined(CONFIG_NET)
3557 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3558 return -EINVAL;
3559 if (sqe->ioprio || sqe->off || sqe->addr || sqe->rw_flags ||
3560 sqe->buf_index)
3561 return -EINVAL;
3562
3563 req->shutdown.how = READ_ONCE(sqe->len);
3564 return 0;
3565#else
3566 return -EOPNOTSUPP;
3567#endif
3568}
3569
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003570static int io_shutdown(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe36f4fa62020-09-05 11:14:22 -06003571{
3572#if defined(CONFIG_NET)
3573 struct socket *sock;
3574 int ret;
3575
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003576 if (issue_flags & IO_URING_F_NONBLOCK)
Jens Axboe36f4fa62020-09-05 11:14:22 -06003577 return -EAGAIN;
3578
Linus Torvalds48aba792020-12-16 12:44:05 -08003579 sock = sock_from_file(req->file);
Jens Axboe36f4fa62020-09-05 11:14:22 -06003580 if (unlikely(!sock))
Linus Torvalds48aba792020-12-16 12:44:05 -08003581 return -ENOTSOCK;
Jens Axboe36f4fa62020-09-05 11:14:22 -06003582
3583 ret = __sys_shutdown_sock(sock, req->shutdown.how);
Jens Axboea1464682020-12-14 20:57:27 -07003584 if (ret < 0)
3585 req_set_fail_links(req);
Jens Axboe36f4fa62020-09-05 11:14:22 -06003586 io_req_complete(req, ret);
3587 return 0;
3588#else
3589 return -EOPNOTSUPP;
3590#endif
3591}
3592
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03003593static int __io_splice_prep(struct io_kiocb *req,
3594 const struct io_uring_sqe *sqe)
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003595{
3596 struct io_splice* sp = &req->splice;
3597 unsigned int valid_flags = SPLICE_F_FD_IN_FIXED | SPLICE_F_ALL;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003598
Pavel Begunkov3232dd02020-06-03 18:03:22 +03003599 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3600 return -EINVAL;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003601
3602 sp->file_in = NULL;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003603 sp->len = READ_ONCE(sqe->len);
3604 sp->flags = READ_ONCE(sqe->splice_flags);
3605
3606 if (unlikely(sp->flags & ~valid_flags))
3607 return -EINVAL;
3608
Pavel Begunkov8371adf2020-10-10 18:34:08 +01003609 sp->file_in = io_file_get(NULL, req, READ_ONCE(sqe->splice_fd_in),
3610 (sp->flags & SPLICE_F_FD_IN_FIXED));
3611 if (!sp->file_in)
3612 return -EBADF;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003613 req->flags |= REQ_F_NEED_CLEANUP;
3614
Xiaoguang Wang7cdaf582020-06-10 19:41:19 +08003615 if (!S_ISREG(file_inode(sp->file_in)->i_mode)) {
3616 /*
3617 * Splice operation will be punted aync, and here need to
3618 * modify io_wq_work.flags, so initialize io_wq_work firstly.
3619 */
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003620 req->work.flags |= IO_WQ_WORK_UNBOUND;
Xiaoguang Wang7cdaf582020-06-10 19:41:19 +08003621 }
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003622
3623 return 0;
3624}
3625
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03003626static int io_tee_prep(struct io_kiocb *req,
3627 const struct io_uring_sqe *sqe)
3628{
3629 if (READ_ONCE(sqe->splice_off_in) || READ_ONCE(sqe->off))
3630 return -EINVAL;
3631 return __io_splice_prep(req, sqe);
3632}
3633
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003634static int io_tee(struct io_kiocb *req, unsigned int issue_flags)
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03003635{
3636 struct io_splice *sp = &req->splice;
3637 struct file *in = sp->file_in;
3638 struct file *out = sp->file_out;
3639 unsigned int flags = sp->flags & ~SPLICE_F_FD_IN_FIXED;
3640 long ret = 0;
3641
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003642 if (issue_flags & IO_URING_F_NONBLOCK)
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03003643 return -EAGAIN;
3644 if (sp->len)
3645 ret = do_tee(in, out, sp->len, flags);
3646
3647 io_put_file(req, in, (sp->flags & SPLICE_F_FD_IN_FIXED));
3648 req->flags &= ~REQ_F_NEED_CLEANUP;
3649
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03003650 if (ret != sp->len)
3651 req_set_fail_links(req);
Jens Axboee1e16092020-06-22 09:17:17 -06003652 io_req_complete(req, ret);
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03003653 return 0;
3654}
3655
3656static int io_splice_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3657{
3658 struct io_splice* sp = &req->splice;
3659
3660 sp->off_in = READ_ONCE(sqe->splice_off_in);
3661 sp->off_out = READ_ONCE(sqe->off);
3662 return __io_splice_prep(req, sqe);
3663}
3664
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003665static int io_splice(struct io_kiocb *req, unsigned int issue_flags)
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003666{
3667 struct io_splice *sp = &req->splice;
3668 struct file *in = sp->file_in;
3669 struct file *out = sp->file_out;
3670 unsigned int flags = sp->flags & ~SPLICE_F_FD_IN_FIXED;
3671 loff_t *poff_in, *poff_out;
Pavel Begunkovc9687422020-05-04 23:00:54 +03003672 long ret = 0;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003673
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003674 if (issue_flags & IO_URING_F_NONBLOCK)
Pavel Begunkov2fb3e822020-05-01 17:09:38 +03003675 return -EAGAIN;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003676
3677 poff_in = (sp->off_in == -1) ? NULL : &sp->off_in;
3678 poff_out = (sp->off_out == -1) ? NULL : &sp->off_out;
Pavel Begunkovc9687422020-05-04 23:00:54 +03003679
Jens Axboe948a7742020-05-17 14:21:38 -06003680 if (sp->len)
Pavel Begunkovc9687422020-05-04 23:00:54 +03003681 ret = do_splice(in, poff_in, out, poff_out, sp->len, flags);
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003682
3683 io_put_file(req, in, (sp->flags & SPLICE_F_FD_IN_FIXED));
3684 req->flags &= ~REQ_F_NEED_CLEANUP;
3685
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003686 if (ret != sp->len)
3687 req_set_fail_links(req);
Jens Axboee1e16092020-06-22 09:17:17 -06003688 io_req_complete(req, ret);
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003689 return 0;
3690}
3691
Jens Axboe2b188cc2019-01-07 10:46:33 -07003692/*
3693 * IORING_OP_NOP just posts a completion event, nothing else.
3694 */
Pavel Begunkov889fca72021-02-10 00:03:09 +00003695static int io_nop(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe2b188cc2019-01-07 10:46:33 -07003696{
3697 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003698
Jens Axboedef596e2019-01-09 08:59:42 -07003699 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
3700 return -EINVAL;
3701
Pavel Begunkov889fca72021-02-10 00:03:09 +00003702 __io_req_complete(req, issue_flags, 0, 0);
Jens Axboe2b188cc2019-01-07 10:46:33 -07003703 return 0;
3704}
3705
Pavel Begunkov1155c762021-02-18 18:29:38 +00003706static int io_fsync_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Christoph Hellwigc992fe22019-01-11 09:43:02 -07003707{
Jens Axboe6b063142019-01-10 22:13:58 -07003708 struct io_ring_ctx *ctx = req->ctx;
Christoph Hellwigc992fe22019-01-11 09:43:02 -07003709
Jens Axboe09bb8392019-03-13 12:39:28 -06003710 if (!req->file)
3711 return -EBADF;
Christoph Hellwigc992fe22019-01-11 09:43:02 -07003712
Jens Axboe6b063142019-01-10 22:13:58 -07003713 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboedef596e2019-01-09 08:59:42 -07003714 return -EINVAL;
Jens Axboeedafcce2019-01-09 09:16:05 -07003715 if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index))
Christoph Hellwigc992fe22019-01-11 09:43:02 -07003716 return -EINVAL;
3717
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07003718 req->sync.flags = READ_ONCE(sqe->fsync_flags);
3719 if (unlikely(req->sync.flags & ~IORING_FSYNC_DATASYNC))
3720 return -EINVAL;
3721
3722 req->sync.off = READ_ONCE(sqe->off);
3723 req->sync.len = READ_ONCE(sqe->len);
Christoph Hellwigc992fe22019-01-11 09:43:02 -07003724 return 0;
3725}
3726
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003727static int io_fsync(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe78912932020-01-14 22:09:06 -07003728{
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07003729 loff_t end = req->sync.off + req->sync.len;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07003730 int ret;
3731
Pavel Begunkovac45abc2020-06-08 21:08:18 +03003732 /* fsync always requires a blocking context */
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003733 if (issue_flags & IO_URING_F_NONBLOCK)
Pavel Begunkovac45abc2020-06-08 21:08:18 +03003734 return -EAGAIN;
3735
Jens Axboe9adbd452019-12-20 08:45:55 -07003736 ret = vfs_fsync_range(req->file, req->sync.off,
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07003737 end > 0 ? end : LLONG_MAX,
3738 req->sync.flags & IORING_FSYNC_DATASYNC);
3739 if (ret < 0)
3740 req_set_fail_links(req);
Jens Axboee1e16092020-06-22 09:17:17 -06003741 io_req_complete(req, ret);
Christoph Hellwigc992fe22019-01-11 09:43:02 -07003742 return 0;
3743}
3744
Jens Axboed63d1b52019-12-10 10:38:56 -07003745static int io_fallocate_prep(struct io_kiocb *req,
3746 const struct io_uring_sqe *sqe)
3747{
3748 if (sqe->ioprio || sqe->buf_index || sqe->rw_flags)
3749 return -EINVAL;
Pavel Begunkov3232dd02020-06-03 18:03:22 +03003750 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3751 return -EINVAL;
Jens Axboed63d1b52019-12-10 10:38:56 -07003752
3753 req->sync.off = READ_ONCE(sqe->off);
3754 req->sync.len = READ_ONCE(sqe->addr);
3755 req->sync.mode = READ_ONCE(sqe->len);
3756 return 0;
3757}
3758
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003759static int io_fallocate(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboed63d1b52019-12-10 10:38:56 -07003760{
Pavel Begunkovac45abc2020-06-08 21:08:18 +03003761 int ret;
Jens Axboed63d1b52019-12-10 10:38:56 -07003762
Pavel Begunkovac45abc2020-06-08 21:08:18 +03003763 /* fallocate always requiring blocking context */
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003764 if (issue_flags & IO_URING_F_NONBLOCK)
Pavel Begunkovac45abc2020-06-08 21:08:18 +03003765 return -EAGAIN;
Pavel Begunkovac45abc2020-06-08 21:08:18 +03003766 ret = vfs_fallocate(req->file, req->sync.mode, req->sync.off,
3767 req->sync.len);
Pavel Begunkovac45abc2020-06-08 21:08:18 +03003768 if (ret < 0)
3769 req_set_fail_links(req);
Jens Axboee1e16092020-06-22 09:17:17 -06003770 io_req_complete(req, ret);
Jens Axboed63d1b52019-12-10 10:38:56 -07003771 return 0;
3772}
3773
Pavel Begunkovec65fea2020-06-03 18:03:24 +03003774static int __io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboe15b71ab2019-12-11 11:20:36 -07003775{
Jens Axboef8748882020-01-08 17:47:02 -07003776 const char __user *fname;
Jens Axboe15b71ab2019-12-11 11:20:36 -07003777 int ret;
3778
Pavel Begunkovec65fea2020-06-03 18:03:24 +03003779 if (unlikely(sqe->ioprio || sqe->buf_index))
Jens Axboe15b71ab2019-12-11 11:20:36 -07003780 return -EINVAL;
Pavel Begunkovec65fea2020-06-03 18:03:24 +03003781 if (unlikely(req->flags & REQ_F_FIXED_FILE))
Jens Axboecf3040c2020-02-06 21:31:40 -07003782 return -EBADF;
Jens Axboe15b71ab2019-12-11 11:20:36 -07003783
Pavel Begunkovec65fea2020-06-03 18:03:24 +03003784 /* open.how should be already initialised */
3785 if (!(req->open.how.flags & O_PATH) && force_o_largefile())
Jens Axboe08a1d26eb2020-04-08 09:20:54 -06003786 req->open.how.flags |= O_LARGEFILE;
Jens Axboe15b71ab2019-12-11 11:20:36 -07003787
Pavel Begunkov25e72d12020-06-03 18:03:23 +03003788 req->open.dfd = READ_ONCE(sqe->fd);
3789 fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
Jens Axboef8748882020-01-08 17:47:02 -07003790 req->open.filename = getname(fname);
Jens Axboe15b71ab2019-12-11 11:20:36 -07003791 if (IS_ERR(req->open.filename)) {
3792 ret = PTR_ERR(req->open.filename);
3793 req->open.filename = NULL;
3794 return ret;
3795 }
Jens Axboe4022e7a2020-03-19 19:23:18 -06003796 req->open.nofile = rlimit(RLIMIT_NOFILE);
Pavel Begunkov8fef80b2020-02-07 23:59:53 +03003797 req->flags |= REQ_F_NEED_CLEANUP;
Jens Axboe15b71ab2019-12-11 11:20:36 -07003798 return 0;
3799}
3800
Pavel Begunkovec65fea2020-06-03 18:03:24 +03003801static int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3802{
3803 u64 flags, mode;
3804
Jens Axboe14587a462020-09-05 11:36:08 -06003805 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboe4eb8dde2020-09-18 19:36:24 -06003806 return -EINVAL;
Pavel Begunkovec65fea2020-06-03 18:03:24 +03003807 mode = READ_ONCE(sqe->len);
3808 flags = READ_ONCE(sqe->open_flags);
3809 req->open.how = build_open_how(flags, mode);
3810 return __io_openat_prep(req, sqe);
3811}
3812
Jens Axboecebdb982020-01-08 17:59:24 -07003813static int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3814{
3815 struct open_how __user *how;
Jens Axboecebdb982020-01-08 17:59:24 -07003816 size_t len;
3817 int ret;
3818
Jens Axboe14587a462020-09-05 11:36:08 -06003819 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboe4eb8dde2020-09-18 19:36:24 -06003820 return -EINVAL;
Jens Axboecebdb982020-01-08 17:59:24 -07003821 how = u64_to_user_ptr(READ_ONCE(sqe->addr2));
3822 len = READ_ONCE(sqe->len);
Jens Axboecebdb982020-01-08 17:59:24 -07003823 if (len < OPEN_HOW_SIZE_VER0)
3824 return -EINVAL;
3825
3826 ret = copy_struct_from_user(&req->open.how, sizeof(req->open.how), how,
3827 len);
3828 if (ret)
3829 return ret;
3830
Pavel Begunkovec65fea2020-06-03 18:03:24 +03003831 return __io_openat_prep(req, sqe);
Jens Axboecebdb982020-01-08 17:59:24 -07003832}
3833
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003834static int io_openat2(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe15b71ab2019-12-11 11:20:36 -07003835{
3836 struct open_flags op;
Jens Axboe15b71ab2019-12-11 11:20:36 -07003837 struct file *file;
Jens Axboe3a81fd02020-12-10 12:25:36 -07003838 bool nonblock_set;
3839 bool resolve_nonblock;
Jens Axboe15b71ab2019-12-11 11:20:36 -07003840 int ret;
3841
Jens Axboecebdb982020-01-08 17:59:24 -07003842 ret = build_open_flags(&req->open.how, &op);
Jens Axboe15b71ab2019-12-11 11:20:36 -07003843 if (ret)
3844 goto err;
Jens Axboe3a81fd02020-12-10 12:25:36 -07003845 nonblock_set = op.open_flag & O_NONBLOCK;
3846 resolve_nonblock = req->open.how.resolve & RESOLVE_CACHED;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003847 if (issue_flags & IO_URING_F_NONBLOCK) {
Jens Axboe3a81fd02020-12-10 12:25:36 -07003848 /*
3849 * Don't bother trying for O_TRUNC, O_CREAT, or O_TMPFILE open,
3850 * it'll always -EAGAIN
3851 */
3852 if (req->open.how.flags & (O_TRUNC | O_CREAT | O_TMPFILE))
3853 return -EAGAIN;
3854 op.lookup_flags |= LOOKUP_CACHED;
3855 op.open_flag |= O_NONBLOCK;
3856 }
Jens Axboe15b71ab2019-12-11 11:20:36 -07003857
Jens Axboe4022e7a2020-03-19 19:23:18 -06003858 ret = __get_unused_fd_flags(req->open.how.flags, req->open.nofile);
Jens Axboe15b71ab2019-12-11 11:20:36 -07003859 if (ret < 0)
3860 goto err;
3861
3862 file = do_filp_open(req->open.dfd, req->open.filename, &op);
Jens Axboe3a81fd02020-12-10 12:25:36 -07003863 /* only retry if RESOLVE_CACHED wasn't already set by application */
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003864 if ((!resolve_nonblock && (issue_flags & IO_URING_F_NONBLOCK)) &&
3865 file == ERR_PTR(-EAGAIN)) {
Jens Axboe3a81fd02020-12-10 12:25:36 -07003866 /*
3867 * We could hang on to this 'fd', but seems like marginal
3868 * gain for something that is now known to be a slower path.
3869 * So just put it, and we'll get a new one when we retry.
3870 */
3871 put_unused_fd(ret);
3872 return -EAGAIN;
3873 }
3874
Jens Axboe15b71ab2019-12-11 11:20:36 -07003875 if (IS_ERR(file)) {
3876 put_unused_fd(ret);
3877 ret = PTR_ERR(file);
3878 } else {
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003879 if ((issue_flags & IO_URING_F_NONBLOCK) && !nonblock_set)
Jens Axboe3a81fd02020-12-10 12:25:36 -07003880 file->f_flags &= ~O_NONBLOCK;
Jens Axboe15b71ab2019-12-11 11:20:36 -07003881 fsnotify_open(file);
3882 fd_install(ret, file);
3883 }
3884err:
3885 putname(req->open.filename);
Pavel Begunkov8fef80b2020-02-07 23:59:53 +03003886 req->flags &= ~REQ_F_NEED_CLEANUP;
Jens Axboe15b71ab2019-12-11 11:20:36 -07003887 if (ret < 0)
3888 req_set_fail_links(req);
Jens Axboee1e16092020-06-22 09:17:17 -06003889 io_req_complete(req, ret);
Jens Axboe15b71ab2019-12-11 11:20:36 -07003890 return 0;
3891}
3892
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003893static int io_openat(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboecebdb982020-01-08 17:59:24 -07003894{
Pavel Begunkove45cff52021-02-28 22:35:14 +00003895 return io_openat2(req, issue_flags);
Jens Axboecebdb982020-01-08 17:59:24 -07003896}
3897
Jens Axboe067524e2020-03-02 16:32:28 -07003898static int io_remove_buffers_prep(struct io_kiocb *req,
3899 const struct io_uring_sqe *sqe)
3900{
3901 struct io_provide_buf *p = &req->pbuf;
3902 u64 tmp;
3903
3904 if (sqe->ioprio || sqe->rw_flags || sqe->addr || sqe->len || sqe->off)
3905 return -EINVAL;
3906
3907 tmp = READ_ONCE(sqe->fd);
3908 if (!tmp || tmp > USHRT_MAX)
3909 return -EINVAL;
3910
3911 memset(p, 0, sizeof(*p));
3912 p->nbufs = tmp;
3913 p->bgid = READ_ONCE(sqe->buf_group);
3914 return 0;
3915}
3916
3917static int __io_remove_buffers(struct io_ring_ctx *ctx, struct io_buffer *buf,
3918 int bgid, unsigned nbufs)
3919{
3920 unsigned i = 0;
3921
3922 /* shouldn't happen */
3923 if (!nbufs)
3924 return 0;
3925
3926 /* the head kbuf is the list itself */
3927 while (!list_empty(&buf->list)) {
3928 struct io_buffer *nxt;
3929
3930 nxt = list_first_entry(&buf->list, struct io_buffer, list);
3931 list_del(&nxt->list);
3932 kfree(nxt);
3933 if (++i == nbufs)
3934 return i;
3935 }
3936 i++;
3937 kfree(buf);
Jens Axboe9e15c3a2021-03-13 12:29:43 -07003938 xa_erase(&ctx->io_buffers, bgid);
Jens Axboe067524e2020-03-02 16:32:28 -07003939
3940 return i;
3941}
3942
Pavel Begunkov889fca72021-02-10 00:03:09 +00003943static int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe067524e2020-03-02 16:32:28 -07003944{
3945 struct io_provide_buf *p = &req->pbuf;
3946 struct io_ring_ctx *ctx = req->ctx;
3947 struct io_buffer *head;
3948 int ret = 0;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003949 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Jens Axboe067524e2020-03-02 16:32:28 -07003950
3951 io_ring_submit_lock(ctx, !force_nonblock);
3952
3953 lockdep_assert_held(&ctx->uring_lock);
3954
3955 ret = -ENOENT;
Jens Axboe9e15c3a2021-03-13 12:29:43 -07003956 head = xa_load(&ctx->io_buffers, p->bgid);
Jens Axboe067524e2020-03-02 16:32:28 -07003957 if (head)
3958 ret = __io_remove_buffers(ctx, head, p->bgid, p->nbufs);
Jens Axboe067524e2020-03-02 16:32:28 -07003959 if (ret < 0)
3960 req_set_fail_links(req);
Pavel Begunkov31bff9a2020-12-06 22:22:43 +00003961
3962 /* need to hold the lock to complete IOPOLL requests */
3963 if (ctx->flags & IORING_SETUP_IOPOLL) {
Pavel Begunkov889fca72021-02-10 00:03:09 +00003964 __io_req_complete(req, issue_flags, ret, 0);
Pavel Begunkov31bff9a2020-12-06 22:22:43 +00003965 io_ring_submit_unlock(ctx, !force_nonblock);
3966 } else {
3967 io_ring_submit_unlock(ctx, !force_nonblock);
Pavel Begunkov889fca72021-02-10 00:03:09 +00003968 __io_req_complete(req, issue_flags, ret, 0);
Pavel Begunkov31bff9a2020-12-06 22:22:43 +00003969 }
Jens Axboe067524e2020-03-02 16:32:28 -07003970 return 0;
3971}
3972
Jens Axboeddf0322d2020-02-23 16:41:33 -07003973static int io_provide_buffers_prep(struct io_kiocb *req,
3974 const struct io_uring_sqe *sqe)
3975{
Pavel Begunkovd81269f2021-03-19 10:21:19 +00003976 unsigned long size;
Jens Axboeddf0322d2020-02-23 16:41:33 -07003977 struct io_provide_buf *p = &req->pbuf;
3978 u64 tmp;
3979
3980 if (sqe->ioprio || sqe->rw_flags)
3981 return -EINVAL;
3982
3983 tmp = READ_ONCE(sqe->fd);
3984 if (!tmp || tmp > USHRT_MAX)
3985 return -E2BIG;
3986 p->nbufs = tmp;
3987 p->addr = READ_ONCE(sqe->addr);
3988 p->len = READ_ONCE(sqe->len);
3989
Pavel Begunkovd81269f2021-03-19 10:21:19 +00003990 size = (unsigned long)p->len * p->nbufs;
3991 if (!access_ok(u64_to_user_ptr(p->addr), size))
Jens Axboeddf0322d2020-02-23 16:41:33 -07003992 return -EFAULT;
3993
3994 p->bgid = READ_ONCE(sqe->buf_group);
3995 tmp = READ_ONCE(sqe->off);
3996 if (tmp > USHRT_MAX)
3997 return -E2BIG;
3998 p->bid = tmp;
3999 return 0;
4000}
4001
4002static int io_add_buffers(struct io_provide_buf *pbuf, struct io_buffer **head)
4003{
4004 struct io_buffer *buf;
4005 u64 addr = pbuf->addr;
4006 int i, bid = pbuf->bid;
4007
4008 for (i = 0; i < pbuf->nbufs; i++) {
4009 buf = kmalloc(sizeof(*buf), GFP_KERNEL);
4010 if (!buf)
4011 break;
4012
4013 buf->addr = addr;
4014 buf->len = pbuf->len;
4015 buf->bid = bid;
4016 addr += pbuf->len;
4017 bid++;
4018 if (!*head) {
4019 INIT_LIST_HEAD(&buf->list);
4020 *head = buf;
4021 } else {
4022 list_add_tail(&buf->list, &(*head)->list);
4023 }
4024 }
4025
4026 return i ? i : -ENOMEM;
4027}
4028
Pavel Begunkov889fca72021-02-10 00:03:09 +00004029static int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboeddf0322d2020-02-23 16:41:33 -07004030{
4031 struct io_provide_buf *p = &req->pbuf;
4032 struct io_ring_ctx *ctx = req->ctx;
4033 struct io_buffer *head, *list;
4034 int ret = 0;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004035 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Jens Axboeddf0322d2020-02-23 16:41:33 -07004036
4037 io_ring_submit_lock(ctx, !force_nonblock);
4038
4039 lockdep_assert_held(&ctx->uring_lock);
4040
Jens Axboe9e15c3a2021-03-13 12:29:43 -07004041 list = head = xa_load(&ctx->io_buffers, p->bgid);
Jens Axboeddf0322d2020-02-23 16:41:33 -07004042
4043 ret = io_add_buffers(p, &head);
Jens Axboe9e15c3a2021-03-13 12:29:43 -07004044 if (ret >= 0 && !list) {
4045 ret = xa_insert(&ctx->io_buffers, p->bgid, head, GFP_KERNEL);
4046 if (ret < 0)
Jens Axboe067524e2020-03-02 16:32:28 -07004047 __io_remove_buffers(ctx, head, p->bgid, -1U);
Jens Axboeddf0322d2020-02-23 16:41:33 -07004048 }
Jens Axboeddf0322d2020-02-23 16:41:33 -07004049 if (ret < 0)
4050 req_set_fail_links(req);
Pavel Begunkov31bff9a2020-12-06 22:22:43 +00004051
4052 /* need to hold the lock to complete IOPOLL requests */
4053 if (ctx->flags & IORING_SETUP_IOPOLL) {
Pavel Begunkov889fca72021-02-10 00:03:09 +00004054 __io_req_complete(req, issue_flags, ret, 0);
Pavel Begunkov31bff9a2020-12-06 22:22:43 +00004055 io_ring_submit_unlock(ctx, !force_nonblock);
4056 } else {
4057 io_ring_submit_unlock(ctx, !force_nonblock);
Pavel Begunkov889fca72021-02-10 00:03:09 +00004058 __io_req_complete(req, issue_flags, ret, 0);
Pavel Begunkov31bff9a2020-12-06 22:22:43 +00004059 }
Jens Axboeddf0322d2020-02-23 16:41:33 -07004060 return 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07004061}
4062
Jens Axboe3e4827b2020-01-08 15:18:09 -07004063static int io_epoll_ctl_prep(struct io_kiocb *req,
4064 const struct io_uring_sqe *sqe)
4065{
4066#if defined(CONFIG_EPOLL)
4067 if (sqe->ioprio || sqe->buf_index)
4068 return -EINVAL;
Jens Axboe6ca56f82020-09-18 16:51:19 -06004069 if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL)))
Pavel Begunkov3232dd02020-06-03 18:03:22 +03004070 return -EINVAL;
Jens Axboe3e4827b2020-01-08 15:18:09 -07004071
4072 req->epoll.epfd = READ_ONCE(sqe->fd);
4073 req->epoll.op = READ_ONCE(sqe->len);
4074 req->epoll.fd = READ_ONCE(sqe->off);
4075
4076 if (ep_op_has_event(req->epoll.op)) {
4077 struct epoll_event __user *ev;
4078
4079 ev = u64_to_user_ptr(READ_ONCE(sqe->addr));
4080 if (copy_from_user(&req->epoll.event, ev, sizeof(*ev)))
4081 return -EFAULT;
4082 }
4083
4084 return 0;
4085#else
4086 return -EOPNOTSUPP;
4087#endif
4088}
4089
Pavel Begunkov889fca72021-02-10 00:03:09 +00004090static int io_epoll_ctl(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe3e4827b2020-01-08 15:18:09 -07004091{
4092#if defined(CONFIG_EPOLL)
4093 struct io_epoll *ie = &req->epoll;
4094 int ret;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004095 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Jens Axboe3e4827b2020-01-08 15:18:09 -07004096
4097 ret = do_epoll_ctl(ie->epfd, ie->op, ie->fd, &ie->event, force_nonblock);
4098 if (force_nonblock && ret == -EAGAIN)
4099 return -EAGAIN;
4100
4101 if (ret < 0)
4102 req_set_fail_links(req);
Pavel Begunkov889fca72021-02-10 00:03:09 +00004103 __io_req_complete(req, issue_flags, ret, 0);
Jens Axboe3e4827b2020-01-08 15:18:09 -07004104 return 0;
4105#else
4106 return -EOPNOTSUPP;
4107#endif
4108}
4109
Jens Axboec1ca7572019-12-25 22:18:28 -07004110static int io_madvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4111{
4112#if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
4113 if (sqe->ioprio || sqe->buf_index || sqe->off)
4114 return -EINVAL;
Pavel Begunkov3232dd02020-06-03 18:03:22 +03004115 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4116 return -EINVAL;
Jens Axboec1ca7572019-12-25 22:18:28 -07004117
4118 req->madvise.addr = READ_ONCE(sqe->addr);
4119 req->madvise.len = READ_ONCE(sqe->len);
4120 req->madvise.advice = READ_ONCE(sqe->fadvise_advice);
4121 return 0;
4122#else
4123 return -EOPNOTSUPP;
4124#endif
4125}
4126
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004127static int io_madvise(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboec1ca7572019-12-25 22:18:28 -07004128{
4129#if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
4130 struct io_madvise *ma = &req->madvise;
4131 int ret;
4132
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004133 if (issue_flags & IO_URING_F_NONBLOCK)
Jens Axboec1ca7572019-12-25 22:18:28 -07004134 return -EAGAIN;
4135
Minchan Kim0726b012020-10-17 16:14:50 -07004136 ret = do_madvise(current->mm, ma->addr, ma->len, ma->advice);
Jens Axboec1ca7572019-12-25 22:18:28 -07004137 if (ret < 0)
4138 req_set_fail_links(req);
Jens Axboee1e16092020-06-22 09:17:17 -06004139 io_req_complete(req, ret);
Jens Axboec1ca7572019-12-25 22:18:28 -07004140 return 0;
4141#else
4142 return -EOPNOTSUPP;
4143#endif
4144}
4145
Jens Axboe4840e412019-12-25 22:03:45 -07004146static int io_fadvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4147{
4148 if (sqe->ioprio || sqe->buf_index || sqe->addr)
4149 return -EINVAL;
Pavel Begunkov3232dd02020-06-03 18:03:22 +03004150 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4151 return -EINVAL;
Jens Axboe4840e412019-12-25 22:03:45 -07004152
4153 req->fadvise.offset = READ_ONCE(sqe->off);
4154 req->fadvise.len = READ_ONCE(sqe->len);
4155 req->fadvise.advice = READ_ONCE(sqe->fadvise_advice);
4156 return 0;
4157}
4158
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004159static int io_fadvise(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe4840e412019-12-25 22:03:45 -07004160{
4161 struct io_fadvise *fa = &req->fadvise;
4162 int ret;
4163
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004164 if (issue_flags & IO_URING_F_NONBLOCK) {
Jens Axboe3e694262020-02-01 09:22:49 -07004165 switch (fa->advice) {
4166 case POSIX_FADV_NORMAL:
4167 case POSIX_FADV_RANDOM:
4168 case POSIX_FADV_SEQUENTIAL:
4169 break;
4170 default:
4171 return -EAGAIN;
4172 }
4173 }
Jens Axboe4840e412019-12-25 22:03:45 -07004174
4175 ret = vfs_fadvise(req->file, fa->offset, fa->len, fa->advice);
4176 if (ret < 0)
4177 req_set_fail_links(req);
Jens Axboee1e16092020-06-22 09:17:17 -06004178 io_req_complete(req, ret);
Jens Axboe4840e412019-12-25 22:03:45 -07004179 return 0;
4180}
4181
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004182static int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4183{
Jens Axboe6ca56f82020-09-18 16:51:19 -06004184 if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL)))
Pavel Begunkov3232dd02020-06-03 18:03:22 +03004185 return -EINVAL;
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004186 if (sqe->ioprio || sqe->buf_index)
4187 return -EINVAL;
Pavel Begunkov9c280f92020-04-08 08:58:46 +03004188 if (req->flags & REQ_F_FIXED_FILE)
Jens Axboecf3040c2020-02-06 21:31:40 -07004189 return -EBADF;
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004190
Bijan Mottahedeh1d9e1282020-05-22 21:31:16 -07004191 req->statx.dfd = READ_ONCE(sqe->fd);
4192 req->statx.mask = READ_ONCE(sqe->len);
Bijan Mottahedehe62753e2020-05-22 21:31:18 -07004193 req->statx.filename = u64_to_user_ptr(READ_ONCE(sqe->addr));
Bijan Mottahedeh1d9e1282020-05-22 21:31:16 -07004194 req->statx.buffer = u64_to_user_ptr(READ_ONCE(sqe->addr2));
4195 req->statx.flags = READ_ONCE(sqe->statx_flags);
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004196
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004197 return 0;
4198}
4199
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004200static int io_statx(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004201{
Bijan Mottahedeh1d9e1282020-05-22 21:31:16 -07004202 struct io_statx *ctx = &req->statx;
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004203 int ret;
4204
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004205 if (issue_flags & IO_URING_F_NONBLOCK) {
Jens Axboe5b0bbee2020-04-27 10:41:22 -06004206 /* only need file table for an actual valid fd */
4207 if (ctx->dfd == -1 || ctx->dfd == AT_FDCWD)
4208 req->flags |= REQ_F_NO_FILE_TABLE;
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004209 return -EAGAIN;
Jens Axboe5b0bbee2020-04-27 10:41:22 -06004210 }
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004211
Bijan Mottahedehe62753e2020-05-22 21:31:18 -07004212 ret = do_statx(ctx->dfd, ctx->filename, ctx->flags, ctx->mask,
4213 ctx->buffer);
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004214
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004215 if (ret < 0)
4216 req_set_fail_links(req);
Jens Axboee1e16092020-06-22 09:17:17 -06004217 io_req_complete(req, ret);
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004218 return 0;
4219}
4220
Jens Axboeb5dba592019-12-11 14:02:38 -07004221static int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4222{
Jens Axboe14587a462020-09-05 11:36:08 -06004223 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
Pavel Begunkov3232dd02020-06-03 18:03:22 +03004224 return -EINVAL;
Jens Axboeb5dba592019-12-11 14:02:38 -07004225 if (sqe->ioprio || sqe->off || sqe->addr || sqe->len ||
4226 sqe->rw_flags || sqe->buf_index)
4227 return -EINVAL;
Pavel Begunkov9c280f92020-04-08 08:58:46 +03004228 if (req->flags & REQ_F_FIXED_FILE)
Jens Axboecf3040c2020-02-06 21:31:40 -07004229 return -EBADF;
Jens Axboeb5dba592019-12-11 14:02:38 -07004230
4231 req->close.fd = READ_ONCE(sqe->fd);
Jens Axboeb5dba592019-12-11 14:02:38 -07004232 return 0;
4233}
4234
Pavel Begunkov889fca72021-02-10 00:03:09 +00004235static int io_close(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboeb5dba592019-12-11 14:02:38 -07004236{
Jens Axboe9eac1902021-01-19 15:50:37 -07004237 struct files_struct *files = current->files;
Pavel Begunkov3af73b22020-06-08 21:08:17 +03004238 struct io_close *close = &req->close;
Jens Axboe9eac1902021-01-19 15:50:37 -07004239 struct fdtable *fdt;
4240 struct file *file;
Jens Axboeb5dba592019-12-11 14:02:38 -07004241 int ret;
4242
Jens Axboe9eac1902021-01-19 15:50:37 -07004243 file = NULL;
4244 ret = -EBADF;
4245 spin_lock(&files->file_lock);
4246 fdt = files_fdtable(files);
4247 if (close->fd >= fdt->max_fds) {
4248 spin_unlock(&files->file_lock);
4249 goto err;
4250 }
4251 file = fdt->fd[close->fd];
4252 if (!file) {
4253 spin_unlock(&files->file_lock);
4254 goto err;
4255 }
4256
4257 if (file->f_op == &io_uring_fops) {
4258 spin_unlock(&files->file_lock);
4259 file = NULL;
4260 goto err;
Pavel Begunkov3af73b22020-06-08 21:08:17 +03004261 }
Jens Axboeb5dba592019-12-11 14:02:38 -07004262
4263 /* if the file has a flush method, be safe and punt to async */
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004264 if (file->f_op->flush && (issue_flags & IO_URING_F_NONBLOCK)) {
Jens Axboe9eac1902021-01-19 15:50:37 -07004265 spin_unlock(&files->file_lock);
Pavel Begunkov0bf0eef2020-05-26 20:34:06 +03004266 return -EAGAIN;
Pavel Begunkova2100672020-03-02 23:45:16 +03004267 }
Jens Axboeb5dba592019-12-11 14:02:38 -07004268
Jens Axboe9eac1902021-01-19 15:50:37 -07004269 ret = __close_fd_get_file(close->fd, &file);
4270 spin_unlock(&files->file_lock);
4271 if (ret < 0) {
4272 if (ret == -ENOENT)
4273 ret = -EBADF;
4274 goto err;
4275 }
4276
Pavel Begunkov3af73b22020-06-08 21:08:17 +03004277 /* No ->flush() or already async, safely close from here */
Jens Axboe9eac1902021-01-19 15:50:37 -07004278 ret = filp_close(file, current->files);
4279err:
Pavel Begunkov3af73b22020-06-08 21:08:17 +03004280 if (ret < 0)
4281 req_set_fail_links(req);
Jens Axboe9eac1902021-01-19 15:50:37 -07004282 if (file)
4283 fput(file);
Pavel Begunkov889fca72021-02-10 00:03:09 +00004284 __io_req_complete(req, issue_flags, ret, 0);
Jens Axboe1a417f42020-01-31 17:16:48 -07004285 return 0;
Jens Axboeb5dba592019-12-11 14:02:38 -07004286}
4287
Pavel Begunkov1155c762021-02-18 18:29:38 +00004288static int io_sfr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboe5d17b4a2019-04-09 14:56:44 -06004289{
4290 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe5d17b4a2019-04-09 14:56:44 -06004291
Jens Axboe5d17b4a2019-04-09 14:56:44 -06004292 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
4293 return -EINVAL;
4294 if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index))
4295 return -EINVAL;
4296
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004297 req->sync.off = READ_ONCE(sqe->off);
4298 req->sync.len = READ_ONCE(sqe->len);
4299 req->sync.flags = READ_ONCE(sqe->sync_range_flags);
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004300 return 0;
4301}
4302
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004303static int io_sync_file_range(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004304{
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004305 int ret;
4306
Pavel Begunkovac45abc2020-06-08 21:08:18 +03004307 /* sync_file_range always requires a blocking context */
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004308 if (issue_flags & IO_URING_F_NONBLOCK)
Pavel Begunkovac45abc2020-06-08 21:08:18 +03004309 return -EAGAIN;
4310
Jens Axboe9adbd452019-12-20 08:45:55 -07004311 ret = sync_file_range(req->file, req->sync.off, req->sync.len,
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004312 req->sync.flags);
4313 if (ret < 0)
4314 req_set_fail_links(req);
Jens Axboee1e16092020-06-22 09:17:17 -06004315 io_req_complete(req, ret);
Jens Axboe5d17b4a2019-04-09 14:56:44 -06004316 return 0;
4317}
4318
YueHaibing469956e2020-03-04 15:53:52 +08004319#if defined(CONFIG_NET)
Pavel Begunkov02d27d82020-02-28 10:36:36 +03004320static int io_setup_async_msg(struct io_kiocb *req,
4321 struct io_async_msghdr *kmsg)
4322{
Jens Axboee8c2bc12020-08-15 18:44:09 -07004323 struct io_async_msghdr *async_msg = req->async_data;
4324
4325 if (async_msg)
Pavel Begunkov02d27d82020-02-28 10:36:36 +03004326 return -EAGAIN;
Jens Axboee8c2bc12020-08-15 18:44:09 -07004327 if (io_alloc_async_data(req)) {
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004328 kfree(kmsg->free_iov);
Pavel Begunkov02d27d82020-02-28 10:36:36 +03004329 return -ENOMEM;
4330 }
Jens Axboee8c2bc12020-08-15 18:44:09 -07004331 async_msg = req->async_data;
Pavel Begunkov02d27d82020-02-28 10:36:36 +03004332 req->flags |= REQ_F_NEED_CLEANUP;
Jens Axboee8c2bc12020-08-15 18:44:09 -07004333 memcpy(async_msg, kmsg, sizeof(*kmsg));
Pavel Begunkov2a780802021-02-05 00:57:58 +00004334 async_msg->msg.msg_name = &async_msg->addr;
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004335 /* if were using fast_iov, set it to the new one */
4336 if (!async_msg->free_iov)
4337 async_msg->msg.msg_iter.iov = async_msg->fast_iov;
4338
Pavel Begunkov02d27d82020-02-28 10:36:36 +03004339 return -EAGAIN;
4340}
4341
Pavel Begunkov2ae523e2020-07-12 20:41:06 +03004342static int io_sendmsg_copy_hdr(struct io_kiocb *req,
4343 struct io_async_msghdr *iomsg)
4344{
Pavel Begunkov2ae523e2020-07-12 20:41:06 +03004345 iomsg->msg.msg_name = &iomsg->addr;
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004346 iomsg->free_iov = iomsg->fast_iov;
Pavel Begunkov2ae523e2020-07-12 20:41:06 +03004347 return sendmsg_copy_msghdr(&iomsg->msg, req->sr_msg.umsg,
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004348 req->sr_msg.msg_flags, &iomsg->free_iov);
Pavel Begunkov2ae523e2020-07-12 20:41:06 +03004349}
4350
Pavel Begunkov93642ef2021-02-18 18:29:44 +00004351static int io_sendmsg_prep_async(struct io_kiocb *req)
4352{
4353 int ret;
4354
4355 if (!io_op_defs[req->opcode].needs_async_data)
4356 return 0;
4357 ret = io_sendmsg_copy_hdr(req, req->async_data);
4358 if (!ret)
4359 req->flags |= REQ_F_NEED_CLEANUP;
4360 return ret;
4361}
4362
Jens Axboe3529d8c2019-12-19 18:24:38 -07004363static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboeaa1fa282019-04-19 13:38:09 -06004364{
Jens Axboee47293f2019-12-20 08:58:21 -07004365 struct io_sr_msg *sr = &req->sr_msg;
Jens Axboe03b12302019-12-02 18:50:25 -07004366
Pavel Begunkovd2b6f482020-06-03 18:03:25 +03004367 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4368 return -EINVAL;
4369
Jens Axboee47293f2019-12-20 08:58:21 -07004370 sr->msg_flags = READ_ONCE(sqe->msg_flags);
Pavel Begunkov270a5942020-07-12 20:41:04 +03004371 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
Jens Axboefddafac2020-01-04 20:19:44 -07004372 sr->len = READ_ONCE(sqe->len);
Jens Axboe3529d8c2019-12-19 18:24:38 -07004373
Jens Axboed8768362020-02-27 14:17:49 -07004374#ifdef CONFIG_COMPAT
4375 if (req->ctx->compat)
4376 sr->msg_flags |= MSG_CMSG_COMPAT;
4377#endif
Pavel Begunkov93642ef2021-02-18 18:29:44 +00004378 return 0;
Jens Axboe03b12302019-12-02 18:50:25 -07004379}
4380
Pavel Begunkov889fca72021-02-10 00:03:09 +00004381static int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe03b12302019-12-02 18:50:25 -07004382{
Pavel Begunkov6b754c82020-07-16 23:28:00 +03004383 struct io_async_msghdr iomsg, *kmsg;
Jens Axboe03b12302019-12-02 18:50:25 -07004384 struct socket *sock;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004385 unsigned flags;
Stefan Metzmacher00312752021-03-20 20:33:36 +01004386 int min_ret = 0;
Jens Axboe03b12302019-12-02 18:50:25 -07004387 int ret;
4388
Florent Revestdba4a922020-12-04 12:36:04 +01004389 sock = sock_from_file(req->file);
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004390 if (unlikely(!sock))
Florent Revestdba4a922020-12-04 12:36:04 +01004391 return -ENOTSOCK;
Jens Axboe03b12302019-12-02 18:50:25 -07004392
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004393 kmsg = req->async_data;
4394 if (!kmsg) {
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004395 ret = io_sendmsg_copy_hdr(req, &iomsg);
Jens Axboefddafac2020-01-04 20:19:44 -07004396 if (ret)
4397 return ret;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004398 kmsg = &iomsg;
Jens Axboefddafac2020-01-04 20:19:44 -07004399 }
4400
Stefan Metzmacher76cd9792021-03-16 16:33:27 +01004401 flags = req->sr_msg.msg_flags | MSG_NOSIGNAL;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004402 if (flags & MSG_DONTWAIT)
4403 req->flags |= REQ_F_NOWAIT;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004404 else if (issue_flags & IO_URING_F_NONBLOCK)
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004405 flags |= MSG_DONTWAIT;
4406
Stefan Metzmacher00312752021-03-20 20:33:36 +01004407 if (flags & MSG_WAITALL)
4408 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
4409
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004410 ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004411 if ((issue_flags & IO_URING_F_NONBLOCK) && ret == -EAGAIN)
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004412 return io_setup_async_msg(req, kmsg);
4413 if (ret == -ERESTARTSYS)
4414 ret = -EINTR;
4415
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004416 /* fast path, check for non-NULL to avoid function call */
4417 if (kmsg->free_iov)
4418 kfree(kmsg->free_iov);
Jens Axboe03b12302019-12-02 18:50:25 -07004419 req->flags &= ~REQ_F_NEED_CLEANUP;
Stefan Metzmacher00312752021-03-20 20:33:36 +01004420 if (ret < min_ret)
Jens Axboefddafac2020-01-04 20:19:44 -07004421 req_set_fail_links(req);
Pavel Begunkov889fca72021-02-10 00:03:09 +00004422 __io_req_complete(req, issue_flags, ret, 0);
Jens Axboefddafac2020-01-04 20:19:44 -07004423 return 0;
Jens Axboefddafac2020-01-04 20:19:44 -07004424}
4425
Pavel Begunkov889fca72021-02-10 00:03:09 +00004426static int io_send(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe03b12302019-12-02 18:50:25 -07004427{
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004428 struct io_sr_msg *sr = &req->sr_msg;
4429 struct msghdr msg;
4430 struct iovec iov;
Jens Axboe03b12302019-12-02 18:50:25 -07004431 struct socket *sock;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004432 unsigned flags;
Stefan Metzmacher00312752021-03-20 20:33:36 +01004433 int min_ret = 0;
Jens Axboe03b12302019-12-02 18:50:25 -07004434 int ret;
4435
Florent Revestdba4a922020-12-04 12:36:04 +01004436 sock = sock_from_file(req->file);
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004437 if (unlikely(!sock))
Florent Revestdba4a922020-12-04 12:36:04 +01004438 return -ENOTSOCK;
Jens Axboe03b12302019-12-02 18:50:25 -07004439
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004440 ret = import_single_range(WRITE, sr->buf, sr->len, &iov, &msg.msg_iter);
4441 if (unlikely(ret))
Zheng Bin14db8412020-09-09 20:12:37 +08004442 return ret;
Jens Axboe03b12302019-12-02 18:50:25 -07004443
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004444 msg.msg_name = NULL;
4445 msg.msg_control = NULL;
4446 msg.msg_controllen = 0;
4447 msg.msg_namelen = 0;
Jens Axboe03b12302019-12-02 18:50:25 -07004448
Stefan Metzmacher76cd9792021-03-16 16:33:27 +01004449 flags = req->sr_msg.msg_flags | MSG_NOSIGNAL;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004450 if (flags & MSG_DONTWAIT)
4451 req->flags |= REQ_F_NOWAIT;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004452 else if (issue_flags & IO_URING_F_NONBLOCK)
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004453 flags |= MSG_DONTWAIT;
Jens Axboe03b12302019-12-02 18:50:25 -07004454
Stefan Metzmacher00312752021-03-20 20:33:36 +01004455 if (flags & MSG_WAITALL)
4456 min_ret = iov_iter_count(&msg.msg_iter);
4457
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004458 msg.msg_flags = flags;
4459 ret = sock_sendmsg(sock, &msg);
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004460 if ((issue_flags & IO_URING_F_NONBLOCK) && ret == -EAGAIN)
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004461 return -EAGAIN;
4462 if (ret == -ERESTARTSYS)
4463 ret = -EINTR;
Jens Axboe03b12302019-12-02 18:50:25 -07004464
Stefan Metzmacher00312752021-03-20 20:33:36 +01004465 if (ret < min_ret)
Jens Axboe03b12302019-12-02 18:50:25 -07004466 req_set_fail_links(req);
Pavel Begunkov889fca72021-02-10 00:03:09 +00004467 __io_req_complete(req, issue_flags, ret, 0);
Jens Axboe03b12302019-12-02 18:50:25 -07004468 return 0;
Jens Axboe03b12302019-12-02 18:50:25 -07004469}
4470
Pavel Begunkov1400e692020-07-12 20:41:05 +03004471static int __io_recvmsg_copy_hdr(struct io_kiocb *req,
4472 struct io_async_msghdr *iomsg)
Jens Axboe52de1fe2020-02-27 10:15:42 -07004473{
4474 struct io_sr_msg *sr = &req->sr_msg;
4475 struct iovec __user *uiov;
4476 size_t iov_len;
4477 int ret;
4478
Pavel Begunkov1400e692020-07-12 20:41:05 +03004479 ret = __copy_msghdr_from_user(&iomsg->msg, sr->umsg,
4480 &iomsg->uaddr, &uiov, &iov_len);
Jens Axboe52de1fe2020-02-27 10:15:42 -07004481 if (ret)
4482 return ret;
4483
4484 if (req->flags & REQ_F_BUFFER_SELECT) {
4485 if (iov_len > 1)
4486 return -EINVAL;
Pavel Begunkov5476dfe2021-02-05 00:57:59 +00004487 if (copy_from_user(iomsg->fast_iov, uiov, sizeof(*uiov)))
Jens Axboe52de1fe2020-02-27 10:15:42 -07004488 return -EFAULT;
Pavel Begunkov5476dfe2021-02-05 00:57:59 +00004489 sr->len = iomsg->fast_iov[0].iov_len;
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004490 iomsg->free_iov = NULL;
Jens Axboe52de1fe2020-02-27 10:15:42 -07004491 } else {
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004492 iomsg->free_iov = iomsg->fast_iov;
Christoph Hellwig89cd35c2020-09-25 06:51:41 +02004493 ret = __import_iovec(READ, uiov, iov_len, UIO_FASTIOV,
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004494 &iomsg->free_iov, &iomsg->msg.msg_iter,
Christoph Hellwig89cd35c2020-09-25 06:51:41 +02004495 false);
Jens Axboe52de1fe2020-02-27 10:15:42 -07004496 if (ret > 0)
4497 ret = 0;
4498 }
4499
4500 return ret;
4501}
4502
4503#ifdef CONFIG_COMPAT
4504static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req,
Pavel Begunkov1400e692020-07-12 20:41:05 +03004505 struct io_async_msghdr *iomsg)
Jens Axboe52de1fe2020-02-27 10:15:42 -07004506{
4507 struct compat_msghdr __user *msg_compat;
4508 struct io_sr_msg *sr = &req->sr_msg;
4509 struct compat_iovec __user *uiov;
4510 compat_uptr_t ptr;
4511 compat_size_t len;
4512 int ret;
4513
Pavel Begunkov270a5942020-07-12 20:41:04 +03004514 msg_compat = (struct compat_msghdr __user *) sr->umsg;
Pavel Begunkov1400e692020-07-12 20:41:05 +03004515 ret = __get_compat_msghdr(&iomsg->msg, msg_compat, &iomsg->uaddr,
Jens Axboe52de1fe2020-02-27 10:15:42 -07004516 &ptr, &len);
4517 if (ret)
4518 return ret;
4519
4520 uiov = compat_ptr(ptr);
4521 if (req->flags & REQ_F_BUFFER_SELECT) {
4522 compat_ssize_t clen;
4523
4524 if (len > 1)
4525 return -EINVAL;
4526 if (!access_ok(uiov, sizeof(*uiov)))
4527 return -EFAULT;
4528 if (__get_user(clen, &uiov->iov_len))
4529 return -EFAULT;
4530 if (clen < 0)
4531 return -EINVAL;
Pavel Begunkov2d280bc2020-11-29 18:33:32 +00004532 sr->len = clen;
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004533 iomsg->free_iov = NULL;
Jens Axboe52de1fe2020-02-27 10:15:42 -07004534 } else {
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004535 iomsg->free_iov = iomsg->fast_iov;
Christoph Hellwig89cd35c2020-09-25 06:51:41 +02004536 ret = __import_iovec(READ, (struct iovec __user *)uiov, len,
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004537 UIO_FASTIOV, &iomsg->free_iov,
Christoph Hellwig89cd35c2020-09-25 06:51:41 +02004538 &iomsg->msg.msg_iter, true);
Jens Axboe52de1fe2020-02-27 10:15:42 -07004539 if (ret < 0)
4540 return ret;
4541 }
4542
4543 return 0;
4544}
Jens Axboe03b12302019-12-02 18:50:25 -07004545#endif
Jens Axboe52de1fe2020-02-27 10:15:42 -07004546
Pavel Begunkov1400e692020-07-12 20:41:05 +03004547static int io_recvmsg_copy_hdr(struct io_kiocb *req,
4548 struct io_async_msghdr *iomsg)
Jens Axboe52de1fe2020-02-27 10:15:42 -07004549{
Pavel Begunkov1400e692020-07-12 20:41:05 +03004550 iomsg->msg.msg_name = &iomsg->addr;
Jens Axboe52de1fe2020-02-27 10:15:42 -07004551
4552#ifdef CONFIG_COMPAT
4553 if (req->ctx->compat)
Pavel Begunkov1400e692020-07-12 20:41:05 +03004554 return __io_compat_recvmsg_copy_hdr(req, iomsg);
Jens Axboe52de1fe2020-02-27 10:15:42 -07004555#endif
4556
Pavel Begunkov1400e692020-07-12 20:41:05 +03004557 return __io_recvmsg_copy_hdr(req, iomsg);
Jens Axboe52de1fe2020-02-27 10:15:42 -07004558}
4559
Jens Axboebcda7ba2020-02-23 16:42:51 -07004560static struct io_buffer *io_recv_buffer_select(struct io_kiocb *req,
Pavel Begunkov7fbb1b52020-07-16 23:28:05 +03004561 bool needs_lock)
Jens Axboebcda7ba2020-02-23 16:42:51 -07004562{
4563 struct io_sr_msg *sr = &req->sr_msg;
4564 struct io_buffer *kbuf;
4565
Jens Axboebcda7ba2020-02-23 16:42:51 -07004566 kbuf = io_buffer_select(req, &sr->len, sr->bgid, sr->kbuf, needs_lock);
4567 if (IS_ERR(kbuf))
4568 return kbuf;
4569
4570 sr->kbuf = kbuf;
4571 req->flags |= REQ_F_BUFFER_SELECTED;
Jens Axboebcda7ba2020-02-23 16:42:51 -07004572 return kbuf;
Jens Axboe03b12302019-12-02 18:50:25 -07004573}
4574
Pavel Begunkov7fbb1b52020-07-16 23:28:05 +03004575static inline unsigned int io_put_recv_kbuf(struct io_kiocb *req)
4576{
4577 return io_put_kbuf(req, req->sr_msg.kbuf);
4578}
4579
Pavel Begunkov93642ef2021-02-18 18:29:44 +00004580static int io_recvmsg_prep_async(struct io_kiocb *req)
Jens Axboe03b12302019-12-02 18:50:25 -07004581{
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03004582 int ret;
Jens Axboe06b76d42019-12-19 14:44:26 -07004583
Pavel Begunkov93642ef2021-02-18 18:29:44 +00004584 if (!io_op_defs[req->opcode].needs_async_data)
4585 return 0;
4586 ret = io_recvmsg_copy_hdr(req, req->async_data);
4587 if (!ret)
4588 req->flags |= REQ_F_NEED_CLEANUP;
4589 return ret;
4590}
4591
4592static int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4593{
4594 struct io_sr_msg *sr = &req->sr_msg;
4595
Pavel Begunkovd2b6f482020-06-03 18:03:25 +03004596 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4597 return -EINVAL;
4598
Jens Axboe3529d8c2019-12-19 18:24:38 -07004599 sr->msg_flags = READ_ONCE(sqe->msg_flags);
Pavel Begunkov270a5942020-07-12 20:41:04 +03004600 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
Jens Axboe0b7b21e2020-01-31 08:34:59 -07004601 sr->len = READ_ONCE(sqe->len);
Jens Axboebcda7ba2020-02-23 16:42:51 -07004602 sr->bgid = READ_ONCE(sqe->buf_group);
Jens Axboe3529d8c2019-12-19 18:24:38 -07004603
Jens Axboed8768362020-02-27 14:17:49 -07004604#ifdef CONFIG_COMPAT
4605 if (req->ctx->compat)
4606 sr->msg_flags |= MSG_CMSG_COMPAT;
4607#endif
Pavel Begunkov93642ef2021-02-18 18:29:44 +00004608 return 0;
Jens Axboe03b12302019-12-02 18:50:25 -07004609}
4610
Pavel Begunkov889fca72021-02-10 00:03:09 +00004611static int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe03b12302019-12-02 18:50:25 -07004612{
Pavel Begunkov6b754c82020-07-16 23:28:00 +03004613 struct io_async_msghdr iomsg, *kmsg;
Jens Axboe0fa03c62019-04-19 13:34:07 -06004614 struct socket *sock;
Pavel Begunkov7fbb1b52020-07-16 23:28:05 +03004615 struct io_buffer *kbuf;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004616 unsigned flags;
Stefan Metzmacher00312752021-03-20 20:33:36 +01004617 int min_ret = 0;
Jens Axboe52de1fe2020-02-27 10:15:42 -07004618 int ret, cflags = 0;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004619 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Jens Axboe0fa03c62019-04-19 13:34:07 -06004620
Florent Revestdba4a922020-12-04 12:36:04 +01004621 sock = sock_from_file(req->file);
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004622 if (unlikely(!sock))
Florent Revestdba4a922020-12-04 12:36:04 +01004623 return -ENOTSOCK;
Jens Axboe0fa03c62019-04-19 13:34:07 -06004624
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004625 kmsg = req->async_data;
4626 if (!kmsg) {
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004627 ret = io_recvmsg_copy_hdr(req, &iomsg);
4628 if (ret)
Pavel Begunkov681fda82020-07-15 22:20:45 +03004629 return ret;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004630 kmsg = &iomsg;
Jens Axboe0fa03c62019-04-19 13:34:07 -06004631 }
4632
Pavel Begunkovbc02ef32020-07-16 23:28:03 +03004633 if (req->flags & REQ_F_BUFFER_SELECT) {
Pavel Begunkov7fbb1b52020-07-16 23:28:05 +03004634 kbuf = io_recv_buffer_select(req, !force_nonblock);
Pavel Begunkovbc02ef32020-07-16 23:28:03 +03004635 if (IS_ERR(kbuf))
4636 return PTR_ERR(kbuf);
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004637 kmsg->fast_iov[0].iov_base = u64_to_user_ptr(kbuf->addr);
Pavel Begunkov5476dfe2021-02-05 00:57:59 +00004638 kmsg->fast_iov[0].iov_len = req->sr_msg.len;
4639 iov_iter_init(&kmsg->msg.msg_iter, READ, kmsg->fast_iov,
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004640 1, req->sr_msg.len);
4641 }
4642
Stefan Metzmacher76cd9792021-03-16 16:33:27 +01004643 flags = req->sr_msg.msg_flags | MSG_NOSIGNAL;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004644 if (flags & MSG_DONTWAIT)
4645 req->flags |= REQ_F_NOWAIT;
4646 else if (force_nonblock)
4647 flags |= MSG_DONTWAIT;
4648
Stefan Metzmacher00312752021-03-20 20:33:36 +01004649 if (flags & MSG_WAITALL)
4650 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
4651
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004652 ret = __sys_recvmsg_sock(sock, &kmsg->msg, req->sr_msg.umsg,
4653 kmsg->uaddr, flags);
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03004654 if (force_nonblock && ret == -EAGAIN)
4655 return io_setup_async_msg(req, kmsg);
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004656 if (ret == -ERESTARTSYS)
4657 ret = -EINTR;
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03004658
Pavel Begunkov7fbb1b52020-07-16 23:28:05 +03004659 if (req->flags & REQ_F_BUFFER_SELECTED)
4660 cflags = io_put_recv_kbuf(req);
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004661 /* fast path, check for non-NULL to avoid function call */
4662 if (kmsg->free_iov)
4663 kfree(kmsg->free_iov);
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03004664 req->flags &= ~REQ_F_NEED_CLEANUP;
Stefan Metzmacher00312752021-03-20 20:33:36 +01004665 if (ret < min_ret || ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))))
Jens Axboe4e88d6e2019-12-07 20:59:47 -07004666 req_set_fail_links(req);
Pavel Begunkov889fca72021-02-10 00:03:09 +00004667 __io_req_complete(req, issue_flags, ret, cflags);
Jens Axboe0fa03c62019-04-19 13:34:07 -06004668 return 0;
Jens Axboe0fa03c62019-04-19 13:34:07 -06004669}
4670
Pavel Begunkov889fca72021-02-10 00:03:09 +00004671static int io_recv(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboefddafac2020-01-04 20:19:44 -07004672{
Pavel Begunkov6b754c82020-07-16 23:28:00 +03004673 struct io_buffer *kbuf;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004674 struct io_sr_msg *sr = &req->sr_msg;
4675 struct msghdr msg;
4676 void __user *buf = sr->buf;
Jens Axboefddafac2020-01-04 20:19:44 -07004677 struct socket *sock;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004678 struct iovec iov;
4679 unsigned flags;
Stefan Metzmacher00312752021-03-20 20:33:36 +01004680 int min_ret = 0;
Jens Axboebcda7ba2020-02-23 16:42:51 -07004681 int ret, cflags = 0;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004682 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Jens Axboefddafac2020-01-04 20:19:44 -07004683
Florent Revestdba4a922020-12-04 12:36:04 +01004684 sock = sock_from_file(req->file);
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004685 if (unlikely(!sock))
Florent Revestdba4a922020-12-04 12:36:04 +01004686 return -ENOTSOCK;
Jens Axboefddafac2020-01-04 20:19:44 -07004687
Pavel Begunkovbc02ef32020-07-16 23:28:03 +03004688 if (req->flags & REQ_F_BUFFER_SELECT) {
Pavel Begunkov7fbb1b52020-07-16 23:28:05 +03004689 kbuf = io_recv_buffer_select(req, !force_nonblock);
Jens Axboebcda7ba2020-02-23 16:42:51 -07004690 if (IS_ERR(kbuf))
4691 return PTR_ERR(kbuf);
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004692 buf = u64_to_user_ptr(kbuf->addr);
Jens Axboefddafac2020-01-04 20:19:44 -07004693 }
4694
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004695 ret = import_single_range(READ, buf, sr->len, &iov, &msg.msg_iter);
Pavel Begunkov14c32ee2020-07-16 23:28:01 +03004696 if (unlikely(ret))
4697 goto out_free;
Jens Axboefddafac2020-01-04 20:19:44 -07004698
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004699 msg.msg_name = NULL;
4700 msg.msg_control = NULL;
4701 msg.msg_controllen = 0;
4702 msg.msg_namelen = 0;
4703 msg.msg_iocb = NULL;
4704 msg.msg_flags = 0;
4705
Stefan Metzmacher76cd9792021-03-16 16:33:27 +01004706 flags = req->sr_msg.msg_flags | MSG_NOSIGNAL;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004707 if (flags & MSG_DONTWAIT)
4708 req->flags |= REQ_F_NOWAIT;
4709 else if (force_nonblock)
4710 flags |= MSG_DONTWAIT;
4711
Stefan Metzmacher00312752021-03-20 20:33:36 +01004712 if (flags & MSG_WAITALL)
4713 min_ret = iov_iter_count(&msg.msg_iter);
4714
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004715 ret = sock_recvmsg(sock, &msg, flags);
4716 if (force_nonblock && ret == -EAGAIN)
4717 return -EAGAIN;
4718 if (ret == -ERESTARTSYS)
4719 ret = -EINTR;
Pavel Begunkov14c32ee2020-07-16 23:28:01 +03004720out_free:
Pavel Begunkov7fbb1b52020-07-16 23:28:05 +03004721 if (req->flags & REQ_F_BUFFER_SELECTED)
4722 cflags = io_put_recv_kbuf(req);
Stefan Metzmacher00312752021-03-20 20:33:36 +01004723 if (ret < min_ret || ((flags & MSG_WAITALL) && (msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))))
Jens Axboefddafac2020-01-04 20:19:44 -07004724 req_set_fail_links(req);
Pavel Begunkov889fca72021-02-10 00:03:09 +00004725 __io_req_complete(req, issue_flags, ret, cflags);
Jens Axboefddafac2020-01-04 20:19:44 -07004726 return 0;
Jens Axboefddafac2020-01-04 20:19:44 -07004727}
4728
Jens Axboe3529d8c2019-12-19 18:24:38 -07004729static int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboe17f2fe32019-10-17 14:42:58 -06004730{
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004731 struct io_accept *accept = &req->accept;
4732
Jens Axboe14587a462020-09-05 11:36:08 -06004733 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboe17f2fe32019-10-17 14:42:58 -06004734 return -EINVAL;
Hrvoje Zeba8042d6c2019-11-25 14:40:22 -05004735 if (sqe->ioprio || sqe->len || sqe->buf_index)
Jens Axboe17f2fe32019-10-17 14:42:58 -06004736 return -EINVAL;
4737
Jens Axboed55e5f52019-12-11 16:12:15 -07004738 accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
4739 accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2));
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004740 accept->flags = READ_ONCE(sqe->accept_flags);
Jens Axboe09952e32020-03-19 20:16:56 -06004741 accept->nofile = rlimit(RLIMIT_NOFILE);
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004742 return 0;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004743}
Jens Axboe17f2fe32019-10-17 14:42:58 -06004744
Pavel Begunkov889fca72021-02-10 00:03:09 +00004745static int io_accept(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004746{
4747 struct io_accept *accept = &req->accept;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004748 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Pavel Begunkovac45abc2020-06-08 21:08:18 +03004749 unsigned int file_flags = force_nonblock ? O_NONBLOCK : 0;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004750 int ret;
4751
Jiufei Xuee697dee2020-06-10 13:41:59 +08004752 if (req->file->f_flags & O_NONBLOCK)
4753 req->flags |= REQ_F_NOWAIT;
4754
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004755 ret = __sys_accept4_file(req->file, file_flags, accept->addr,
Jens Axboe09952e32020-03-19 20:16:56 -06004756 accept->addr_len, accept->flags,
4757 accept->nofile);
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004758 if (ret == -EAGAIN && force_nonblock)
Jens Axboe17f2fe32019-10-17 14:42:58 -06004759 return -EAGAIN;
Pavel Begunkovac45abc2020-06-08 21:08:18 +03004760 if (ret < 0) {
4761 if (ret == -ERESTARTSYS)
4762 ret = -EINTR;
Jens Axboe4e88d6e2019-12-07 20:59:47 -07004763 req_set_fail_links(req);
Pavel Begunkovac45abc2020-06-08 21:08:18 +03004764 }
Pavel Begunkov889fca72021-02-10 00:03:09 +00004765 __io_req_complete(req, issue_flags, ret, 0);
Jens Axboe17f2fe32019-10-17 14:42:58 -06004766 return 0;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004767}
4768
Pavel Begunkov93642ef2021-02-18 18:29:44 +00004769static int io_connect_prep_async(struct io_kiocb *req)
4770{
4771 struct io_async_connect *io = req->async_data;
4772 struct io_connect *conn = &req->connect;
4773
4774 return move_addr_to_kernel(conn->addr, conn->addr_len, &io->address);
4775}
4776
Jens Axboe3529d8c2019-12-19 18:24:38 -07004777static int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboef499a022019-12-02 16:28:46 -07004778{
Jens Axboe3529d8c2019-12-19 18:24:38 -07004779 struct io_connect *conn = &req->connect;
Jens Axboef499a022019-12-02 16:28:46 -07004780
Jens Axboe14587a462020-09-05 11:36:08 -06004781 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboe3fbb51c2019-12-20 08:51:52 -07004782 return -EINVAL;
4783 if (sqe->ioprio || sqe->len || sqe->buf_index || sqe->rw_flags)
4784 return -EINVAL;
4785
Jens Axboe3529d8c2019-12-19 18:24:38 -07004786 conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
4787 conn->addr_len = READ_ONCE(sqe->addr2);
Pavel Begunkov93642ef2021-02-18 18:29:44 +00004788 return 0;
Jens Axboef499a022019-12-02 16:28:46 -07004789}
4790
Pavel Begunkov889fca72021-02-10 00:03:09 +00004791static int io_connect(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboef8e85cf2019-11-23 14:24:24 -07004792{
Jens Axboee8c2bc12020-08-15 18:44:09 -07004793 struct io_async_connect __io, *io;
Jens Axboef8e85cf2019-11-23 14:24:24 -07004794 unsigned file_flags;
Jens Axboe3fbb51c2019-12-20 08:51:52 -07004795 int ret;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004796 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Jens Axboef8e85cf2019-11-23 14:24:24 -07004797
Jens Axboee8c2bc12020-08-15 18:44:09 -07004798 if (req->async_data) {
4799 io = req->async_data;
Jens Axboef499a022019-12-02 16:28:46 -07004800 } else {
Jens Axboe3529d8c2019-12-19 18:24:38 -07004801 ret = move_addr_to_kernel(req->connect.addr,
4802 req->connect.addr_len,
Jens Axboee8c2bc12020-08-15 18:44:09 -07004803 &__io.address);
Jens Axboef499a022019-12-02 16:28:46 -07004804 if (ret)
4805 goto out;
4806 io = &__io;
4807 }
4808
Jens Axboe3fbb51c2019-12-20 08:51:52 -07004809 file_flags = force_nonblock ? O_NONBLOCK : 0;
4810
Jens Axboee8c2bc12020-08-15 18:44:09 -07004811 ret = __sys_connect_file(req->file, &io->address,
Jens Axboe3fbb51c2019-12-20 08:51:52 -07004812 req->connect.addr_len, file_flags);
Jens Axboe87f80d62019-12-03 11:23:54 -07004813 if ((ret == -EAGAIN || ret == -EINPROGRESS) && force_nonblock) {
Jens Axboee8c2bc12020-08-15 18:44:09 -07004814 if (req->async_data)
Jens Axboeb7bb4f72019-12-15 22:13:43 -07004815 return -EAGAIN;
Jens Axboee8c2bc12020-08-15 18:44:09 -07004816 if (io_alloc_async_data(req)) {
Jens Axboef499a022019-12-02 16:28:46 -07004817 ret = -ENOMEM;
4818 goto out;
4819 }
Jens Axboee8c2bc12020-08-15 18:44:09 -07004820 memcpy(req->async_data, &__io, sizeof(__io));
Jens Axboef8e85cf2019-11-23 14:24:24 -07004821 return -EAGAIN;
Jens Axboef499a022019-12-02 16:28:46 -07004822 }
Jens Axboef8e85cf2019-11-23 14:24:24 -07004823 if (ret == -ERESTARTSYS)
4824 ret = -EINTR;
Jens Axboef499a022019-12-02 16:28:46 -07004825out:
Jens Axboe4e88d6e2019-12-07 20:59:47 -07004826 if (ret < 0)
4827 req_set_fail_links(req);
Pavel Begunkov889fca72021-02-10 00:03:09 +00004828 __io_req_complete(req, issue_flags, ret, 0);
Jens Axboef8e85cf2019-11-23 14:24:24 -07004829 return 0;
Jens Axboef8e85cf2019-11-23 14:24:24 -07004830}
YueHaibing469956e2020-03-04 15:53:52 +08004831#else /* !CONFIG_NET */
Jens Axboe99a10082021-02-19 09:35:19 -07004832#define IO_NETOP_FN(op) \
4833static int io_##op(struct io_kiocb *req, unsigned int issue_flags) \
4834{ \
4835 return -EOPNOTSUPP; \
Jens Axboef8e85cf2019-11-23 14:24:24 -07004836}
4837
Jens Axboe99a10082021-02-19 09:35:19 -07004838#define IO_NETOP_PREP(op) \
4839IO_NETOP_FN(op) \
4840static int io_##op##_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) \
4841{ \
4842 return -EOPNOTSUPP; \
4843} \
4844
4845#define IO_NETOP_PREP_ASYNC(op) \
4846IO_NETOP_PREP(op) \
4847static int io_##op##_prep_async(struct io_kiocb *req) \
4848{ \
4849 return -EOPNOTSUPP; \
YueHaibing469956e2020-03-04 15:53:52 +08004850}
4851
Jens Axboe99a10082021-02-19 09:35:19 -07004852IO_NETOP_PREP_ASYNC(sendmsg);
4853IO_NETOP_PREP_ASYNC(recvmsg);
4854IO_NETOP_PREP_ASYNC(connect);
4855IO_NETOP_PREP(accept);
4856IO_NETOP_FN(send);
4857IO_NETOP_FN(recv);
YueHaibing469956e2020-03-04 15:53:52 +08004858#endif /* CONFIG_NET */
Jens Axboe17f2fe32019-10-17 14:42:58 -06004859
Jens Axboed7718a92020-02-14 22:23:12 -07004860struct io_poll_table {
4861 struct poll_table_struct pt;
4862 struct io_kiocb *req;
4863 int error;
4864};
4865
Jens Axboed7718a92020-02-14 22:23:12 -07004866static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll,
4867 __poll_t mask, task_work_func_t func)
4868{
Jens Axboeaa96bf82020-04-03 11:26:26 -06004869 int ret;
Jens Axboed7718a92020-02-14 22:23:12 -07004870
4871 /* for instances that support it check for an event match first: */
4872 if (mask && !(mask & poll->events))
4873 return 0;
4874
4875 trace_io_uring_task_add(req->ctx, req->opcode, req->user_data, mask);
4876
4877 list_del_init(&poll->wait.entry);
4878
Jens Axboed7718a92020-02-14 22:23:12 -07004879 req->result = mask;
Jens Axboe7cbf1722021-02-10 00:03:20 +00004880 req->task_work.func = func;
Jens Axboe6d816e02020-08-11 08:04:14 -06004881 percpu_ref_get(&req->ctx->refs);
4882
Jens Axboed7718a92020-02-14 22:23:12 -07004883 /*
Jens Axboee3aabf92020-05-18 11:04:17 -06004884 * If this fails, then the task is exiting. When a task exits, the
4885 * work gets canceled, so just cancel this request as well instead
4886 * of executing it. We can't safely execute it anyway, as we may not
4887 * have the needed state needed for it anyway.
Jens Axboed7718a92020-02-14 22:23:12 -07004888 */
Jens Axboe355fb9e2020-10-22 20:19:35 -06004889 ret = io_req_task_work_add(req);
Jens Axboeaa96bf82020-04-03 11:26:26 -06004890 if (unlikely(ret)) {
Jens Axboee3aabf92020-05-18 11:04:17 -06004891 WRITE_ONCE(poll->canceled, true);
Pavel Begunkoveab30c42021-01-19 13:32:42 +00004892 io_req_task_work_add_fallback(req, func);
Jens Axboeaa96bf82020-04-03 11:26:26 -06004893 }
Jens Axboed7718a92020-02-14 22:23:12 -07004894 return 1;
4895}
4896
Jens Axboe74ce6ce2020-04-13 11:09:12 -06004897static bool io_poll_rewait(struct io_kiocb *req, struct io_poll_iocb *poll)
4898 __acquires(&req->ctx->completion_lock)
4899{
4900 struct io_ring_ctx *ctx = req->ctx;
4901
4902 if (!req->result && !READ_ONCE(poll->canceled)) {
4903 struct poll_table_struct pt = { ._key = poll->events };
4904
4905 req->result = vfs_poll(req->file, &pt) & poll->events;
4906 }
4907
4908 spin_lock_irq(&ctx->completion_lock);
4909 if (!req->result && !READ_ONCE(poll->canceled)) {
4910 add_wait_queue(poll->head, &poll->wait);
4911 return true;
4912 }
4913
4914 return false;
4915}
4916
Jens Axboed4e7cd32020-08-15 11:44:50 -07004917static struct io_poll_iocb *io_poll_get_double(struct io_kiocb *req)
Jens Axboe18bceab2020-05-15 11:56:54 -06004918{
Jens Axboee8c2bc12020-08-15 18:44:09 -07004919 /* pure poll stashes this in ->async_data, poll driven retry elsewhere */
Jens Axboed4e7cd32020-08-15 11:44:50 -07004920 if (req->opcode == IORING_OP_POLL_ADD)
Jens Axboee8c2bc12020-08-15 18:44:09 -07004921 return req->async_data;
Jens Axboed4e7cd32020-08-15 11:44:50 -07004922 return req->apoll->double_poll;
4923}
4924
4925static struct io_poll_iocb *io_poll_get_single(struct io_kiocb *req)
4926{
4927 if (req->opcode == IORING_OP_POLL_ADD)
4928 return &req->poll;
4929 return &req->apoll->poll;
4930}
4931
4932static void io_poll_remove_double(struct io_kiocb *req)
4933{
4934 struct io_poll_iocb *poll = io_poll_get_double(req);
Jens Axboe18bceab2020-05-15 11:56:54 -06004935
4936 lockdep_assert_held(&req->ctx->completion_lock);
4937
4938 if (poll && poll->head) {
4939 struct wait_queue_head *head = poll->head;
4940
4941 spin_lock(&head->lock);
4942 list_del_init(&poll->wait.entry);
4943 if (poll->wait.private)
4944 refcount_dec(&req->refs);
4945 poll->head = NULL;
4946 spin_unlock(&head->lock);
4947 }
4948}
4949
4950static void io_poll_complete(struct io_kiocb *req, __poll_t mask, int error)
4951{
4952 struct io_ring_ctx *ctx = req->ctx;
4953
Jens Axboed4e7cd32020-08-15 11:44:50 -07004954 io_poll_remove_double(req);
Jens Axboe18bceab2020-05-15 11:56:54 -06004955 req->poll.done = true;
4956 io_cqring_fill_event(req, error ? error : mangle_poll(mask));
4957 io_commit_cqring(ctx);
4958}
4959
Jens Axboe18bceab2020-05-15 11:56:54 -06004960static void io_poll_task_func(struct callback_head *cb)
4961{
4962 struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
Jens Axboe6d816e02020-08-11 08:04:14 -06004963 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkovdd221f462020-10-18 10:17:42 +01004964 struct io_kiocb *nxt;
Jens Axboe18bceab2020-05-15 11:56:54 -06004965
Pavel Begunkovdd221f462020-10-18 10:17:42 +01004966 if (io_poll_rewait(req, &req->poll)) {
4967 spin_unlock_irq(&ctx->completion_lock);
4968 } else {
4969 hash_del(&req->hash_node);
4970 io_poll_complete(req, req->result, 0);
4971 spin_unlock_irq(&ctx->completion_lock);
4972
4973 nxt = io_put_req_find_next(req);
4974 io_cqring_ev_posted(ctx);
4975 if (nxt)
4976 __io_req_task_submit(nxt);
4977 }
4978
Jens Axboe6d816e02020-08-11 08:04:14 -06004979 percpu_ref_put(&ctx->refs);
Jens Axboe18bceab2020-05-15 11:56:54 -06004980}
4981
4982static int io_poll_double_wake(struct wait_queue_entry *wait, unsigned mode,
4983 int sync, void *key)
4984{
4985 struct io_kiocb *req = wait->private;
Jens Axboed4e7cd32020-08-15 11:44:50 -07004986 struct io_poll_iocb *poll = io_poll_get_single(req);
Jens Axboe18bceab2020-05-15 11:56:54 -06004987 __poll_t mask = key_to_poll(key);
4988
4989 /* for instances that support it check for an event match first: */
4990 if (mask && !(mask & poll->events))
4991 return 0;
4992
Jens Axboe8706e042020-09-28 08:38:54 -06004993 list_del_init(&wait->entry);
4994
Jens Axboe807abcb2020-07-17 17:09:27 -06004995 if (poll && poll->head) {
Jens Axboe18bceab2020-05-15 11:56:54 -06004996 bool done;
4997
Jens Axboe807abcb2020-07-17 17:09:27 -06004998 spin_lock(&poll->head->lock);
4999 done = list_empty(&poll->wait.entry);
Jens Axboe18bceab2020-05-15 11:56:54 -06005000 if (!done)
Jens Axboe807abcb2020-07-17 17:09:27 -06005001 list_del_init(&poll->wait.entry);
Jens Axboed4e7cd32020-08-15 11:44:50 -07005002 /* make sure double remove sees this as being gone */
5003 wait->private = NULL;
Jens Axboe807abcb2020-07-17 17:09:27 -06005004 spin_unlock(&poll->head->lock);
Jens Axboec8b5e262020-10-25 13:53:26 -06005005 if (!done) {
5006 /* use wait func handler, so it matches the rq type */
5007 poll->wait.func(&poll->wait, mode, sync, key);
5008 }
Jens Axboe18bceab2020-05-15 11:56:54 -06005009 }
5010 refcount_dec(&req->refs);
5011 return 1;
5012}
5013
5014static void io_init_poll_iocb(struct io_poll_iocb *poll, __poll_t events,
5015 wait_queue_func_t wake_func)
5016{
5017 poll->head = NULL;
5018 poll->done = false;
5019 poll->canceled = false;
5020 poll->events = events;
5021 INIT_LIST_HEAD(&poll->wait.entry);
5022 init_waitqueue_func_entry(&poll->wait, wake_func);
5023}
5024
5025static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt,
Jens Axboe807abcb2020-07-17 17:09:27 -06005026 struct wait_queue_head *head,
5027 struct io_poll_iocb **poll_ptr)
Jens Axboe18bceab2020-05-15 11:56:54 -06005028{
5029 struct io_kiocb *req = pt->req;
5030
5031 /*
5032 * If poll->head is already set, it's because the file being polled
5033 * uses multiple waitqueues for poll handling (eg one for read, one
5034 * for write). Setup a separate io_poll_iocb if this happens.
5035 */
5036 if (unlikely(poll->head)) {
Pavel Begunkov58852d42020-10-16 20:55:56 +01005037 struct io_poll_iocb *poll_one = poll;
5038
Jens Axboe18bceab2020-05-15 11:56:54 -06005039 /* already have a 2nd entry, fail a third attempt */
Jens Axboe807abcb2020-07-17 17:09:27 -06005040 if (*poll_ptr) {
Jens Axboe18bceab2020-05-15 11:56:54 -06005041 pt->error = -EINVAL;
5042 return;
5043 }
Jens Axboe1c3b3e62021-02-28 16:07:30 -07005044 /* double add on the same waitqueue head, ignore */
5045 if (poll->head == head)
5046 return;
Jens Axboe18bceab2020-05-15 11:56:54 -06005047 poll = kmalloc(sizeof(*poll), GFP_ATOMIC);
5048 if (!poll) {
5049 pt->error = -ENOMEM;
5050 return;
5051 }
Pavel Begunkov58852d42020-10-16 20:55:56 +01005052 io_init_poll_iocb(poll, poll_one->events, io_poll_double_wake);
Jens Axboe18bceab2020-05-15 11:56:54 -06005053 refcount_inc(&req->refs);
5054 poll->wait.private = req;
Jens Axboe807abcb2020-07-17 17:09:27 -06005055 *poll_ptr = poll;
Jens Axboe18bceab2020-05-15 11:56:54 -06005056 }
5057
5058 pt->error = 0;
5059 poll->head = head;
Jiufei Xuea31eb4a2020-06-17 17:53:56 +08005060
5061 if (poll->events & EPOLLEXCLUSIVE)
5062 add_wait_queue_exclusive(head, &poll->wait);
5063 else
5064 add_wait_queue(head, &poll->wait);
Jens Axboe18bceab2020-05-15 11:56:54 -06005065}
5066
5067static void io_async_queue_proc(struct file *file, struct wait_queue_head *head,
5068 struct poll_table_struct *p)
5069{
5070 struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
Jens Axboe807abcb2020-07-17 17:09:27 -06005071 struct async_poll *apoll = pt->req->apoll;
Jens Axboe18bceab2020-05-15 11:56:54 -06005072
Jens Axboe807abcb2020-07-17 17:09:27 -06005073 __io_queue_proc(&apoll->poll, pt, head, &apoll->double_poll);
Jens Axboe18bceab2020-05-15 11:56:54 -06005074}
5075
Jens Axboed7718a92020-02-14 22:23:12 -07005076static void io_async_task_func(struct callback_head *cb)
5077{
5078 struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
5079 struct async_poll *apoll = req->apoll;
5080 struct io_ring_ctx *ctx = req->ctx;
5081
5082 trace_io_uring_task_run(req->ctx, req->opcode, req->user_data);
5083
Jens Axboe74ce6ce2020-04-13 11:09:12 -06005084 if (io_poll_rewait(req, &apoll->poll)) {
Jens Axboed7718a92020-02-14 22:23:12 -07005085 spin_unlock_irq(&ctx->completion_lock);
Jens Axboe6d816e02020-08-11 08:04:14 -06005086 percpu_ref_put(&ctx->refs);
Jens Axboe74ce6ce2020-04-13 11:09:12 -06005087 return;
Jens Axboed7718a92020-02-14 22:23:12 -07005088 }
5089
Jens Axboe31067252020-05-17 17:43:31 -06005090 /* If req is still hashed, it cannot have been canceled. Don't check. */
Pavel Begunkov0be0b0e2020-06-30 15:20:42 +03005091 if (hash_hashed(&req->hash_node))
Jens Axboe74ce6ce2020-04-13 11:09:12 -06005092 hash_del(&req->hash_node);
Jens Axboe2bae0472020-04-13 11:16:34 -06005093
Jens Axboed4e7cd32020-08-15 11:44:50 -07005094 io_poll_remove_double(req);
Jens Axboe74ce6ce2020-04-13 11:09:12 -06005095 spin_unlock_irq(&ctx->completion_lock);
5096
Pavel Begunkov0be0b0e2020-06-30 15:20:42 +03005097 if (!READ_ONCE(apoll->poll.canceled))
5098 __io_req_task_submit(req);
5099 else
5100 __io_req_task_cancel(req, -ECANCELED);
Dan Carpenteraa340842020-07-08 21:47:11 +03005101
Jens Axboe6d816e02020-08-11 08:04:14 -06005102 percpu_ref_put(&ctx->refs);
Jens Axboe807abcb2020-07-17 17:09:27 -06005103 kfree(apoll->double_poll);
Jens Axboe31067252020-05-17 17:43:31 -06005104 kfree(apoll);
Jens Axboed7718a92020-02-14 22:23:12 -07005105}
5106
5107static int io_async_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
5108 void *key)
5109{
5110 struct io_kiocb *req = wait->private;
5111 struct io_poll_iocb *poll = &req->apoll->poll;
5112
5113 trace_io_uring_poll_wake(req->ctx, req->opcode, req->user_data,
5114 key_to_poll(key));
5115
5116 return __io_async_wake(req, poll, key_to_poll(key), io_async_task_func);
5117}
5118
5119static void io_poll_req_insert(struct io_kiocb *req)
5120{
5121 struct io_ring_ctx *ctx = req->ctx;
5122 struct hlist_head *list;
5123
5124 list = &ctx->cancel_hash[hash_long(req->user_data, ctx->cancel_hash_bits)];
5125 hlist_add_head(&req->hash_node, list);
5126}
5127
5128static __poll_t __io_arm_poll_handler(struct io_kiocb *req,
5129 struct io_poll_iocb *poll,
5130 struct io_poll_table *ipt, __poll_t mask,
5131 wait_queue_func_t wake_func)
5132 __acquires(&ctx->completion_lock)
5133{
5134 struct io_ring_ctx *ctx = req->ctx;
5135 bool cancel = false;
5136
Pavel Begunkov4d52f332020-10-18 10:17:43 +01005137 INIT_HLIST_NODE(&req->hash_node);
Jens Axboe18bceab2020-05-15 11:56:54 -06005138 io_init_poll_iocb(poll, mask, wake_func);
Pavel Begunkovb90cd192020-06-21 13:09:52 +03005139 poll->file = req->file;
Jens Axboe18bceab2020-05-15 11:56:54 -06005140 poll->wait.private = req;
Jens Axboed7718a92020-02-14 22:23:12 -07005141
5142 ipt->pt._key = mask;
5143 ipt->req = req;
5144 ipt->error = -EINVAL;
5145
Jens Axboed7718a92020-02-14 22:23:12 -07005146 mask = vfs_poll(req->file, &ipt->pt) & poll->events;
5147
5148 spin_lock_irq(&ctx->completion_lock);
5149 if (likely(poll->head)) {
5150 spin_lock(&poll->head->lock);
5151 if (unlikely(list_empty(&poll->wait.entry))) {
5152 if (ipt->error)
5153 cancel = true;
5154 ipt->error = 0;
5155 mask = 0;
5156 }
5157 if (mask || ipt->error)
5158 list_del_init(&poll->wait.entry);
5159 else if (cancel)
5160 WRITE_ONCE(poll->canceled, true);
5161 else if (!poll->done) /* actually waiting for an event */
5162 io_poll_req_insert(req);
5163 spin_unlock(&poll->head->lock);
5164 }
5165
5166 return mask;
5167}
5168
5169static bool io_arm_poll_handler(struct io_kiocb *req)
5170{
5171 const struct io_op_def *def = &io_op_defs[req->opcode];
5172 struct io_ring_ctx *ctx = req->ctx;
5173 struct async_poll *apoll;
5174 struct io_poll_table ipt;
5175 __poll_t mask, ret;
Jens Axboe9dab14b2020-08-25 12:27:50 -06005176 int rw;
Jens Axboed7718a92020-02-14 22:23:12 -07005177
5178 if (!req->file || !file_can_poll(req->file))
5179 return false;
Pavel Begunkov24c74672020-06-21 13:09:51 +03005180 if (req->flags & REQ_F_POLLED)
Jens Axboed7718a92020-02-14 22:23:12 -07005181 return false;
Jens Axboe9dab14b2020-08-25 12:27:50 -06005182 if (def->pollin)
5183 rw = READ;
5184 else if (def->pollout)
5185 rw = WRITE;
5186 else
5187 return false;
5188 /* if we can't nonblock try, then no point in arming a poll handler */
5189 if (!io_file_supports_async(req->file, rw))
Jens Axboed7718a92020-02-14 22:23:12 -07005190 return false;
5191
5192 apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC);
5193 if (unlikely(!apoll))
5194 return false;
Jens Axboe807abcb2020-07-17 17:09:27 -06005195 apoll->double_poll = NULL;
Jens Axboed7718a92020-02-14 22:23:12 -07005196
5197 req->flags |= REQ_F_POLLED;
Jens Axboed7718a92020-02-14 22:23:12 -07005198 req->apoll = apoll;
Jens Axboed7718a92020-02-14 22:23:12 -07005199
Nathan Chancellor8755d972020-03-02 16:01:19 -07005200 mask = 0;
Jens Axboed7718a92020-02-14 22:23:12 -07005201 if (def->pollin)
Nathan Chancellor8755d972020-03-02 16:01:19 -07005202 mask |= POLLIN | POLLRDNORM;
Jens Axboed7718a92020-02-14 22:23:12 -07005203 if (def->pollout)
5204 mask |= POLLOUT | POLLWRNORM;
Luke Hsiao901341b2020-08-21 21:41:05 -07005205
5206 /* If reading from MSG_ERRQUEUE using recvmsg, ignore POLLIN */
5207 if ((req->opcode == IORING_OP_RECVMSG) &&
5208 (req->sr_msg.msg_flags & MSG_ERRQUEUE))
5209 mask &= ~POLLIN;
5210
Jens Axboed7718a92020-02-14 22:23:12 -07005211 mask |= POLLERR | POLLPRI;
5212
5213 ipt.pt._qproc = io_async_queue_proc;
5214
5215 ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask,
5216 io_async_wake);
Jens Axboea36da652020-08-11 09:50:19 -06005217 if (ret || ipt.error) {
Jens Axboed4e7cd32020-08-15 11:44:50 -07005218 io_poll_remove_double(req);
Jens Axboed7718a92020-02-14 22:23:12 -07005219 spin_unlock_irq(&ctx->completion_lock);
Jens Axboe807abcb2020-07-17 17:09:27 -06005220 kfree(apoll->double_poll);
Jens Axboed7718a92020-02-14 22:23:12 -07005221 kfree(apoll);
5222 return false;
5223 }
5224 spin_unlock_irq(&ctx->completion_lock);
5225 trace_io_uring_poll_arm(ctx, req->opcode, req->user_data, mask,
5226 apoll->poll.events);
5227 return true;
5228}
5229
5230static bool __io_poll_remove_one(struct io_kiocb *req,
5231 struct io_poll_iocb *poll)
5232{
Jens Axboeb41e9852020-02-17 09:52:41 -07005233 bool do_complete = false;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005234
5235 spin_lock(&poll->head->lock);
5236 WRITE_ONCE(poll->canceled, true);
Jens Axboe392edb42019-12-09 17:52:20 -07005237 if (!list_empty(&poll->wait.entry)) {
5238 list_del_init(&poll->wait.entry);
Jens Axboeb41e9852020-02-17 09:52:41 -07005239 do_complete = true;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005240 }
5241 spin_unlock(&poll->head->lock);
Jens Axboe3bfa5bc2020-05-17 13:54:12 -06005242 hash_del(&req->hash_node);
Jens Axboed7718a92020-02-14 22:23:12 -07005243 return do_complete;
5244}
5245
5246static bool io_poll_remove_one(struct io_kiocb *req)
5247{
5248 bool do_complete;
5249
Jens Axboed4e7cd32020-08-15 11:44:50 -07005250 io_poll_remove_double(req);
5251
Jens Axboed7718a92020-02-14 22:23:12 -07005252 if (req->opcode == IORING_OP_POLL_ADD) {
5253 do_complete = __io_poll_remove_one(req, &req->poll);
5254 } else {
Jens Axboe3bfa5bc2020-05-17 13:54:12 -06005255 struct async_poll *apoll = req->apoll;
5256
Jens Axboed7718a92020-02-14 22:23:12 -07005257 /* non-poll requests have submit ref still */
Jens Axboe3bfa5bc2020-05-17 13:54:12 -06005258 do_complete = __io_poll_remove_one(req, &apoll->poll);
5259 if (do_complete) {
Jens Axboed7718a92020-02-14 22:23:12 -07005260 io_put_req(req);
Jens Axboe807abcb2020-07-17 17:09:27 -06005261 kfree(apoll->double_poll);
Jens Axboe3bfa5bc2020-05-17 13:54:12 -06005262 kfree(apoll);
5263 }
Xiaoguang Wangb1f573b2020-04-12 14:50:54 +08005264 }
5265
Jens Axboeb41e9852020-02-17 09:52:41 -07005266 if (do_complete) {
5267 io_cqring_fill_event(req, -ECANCELED);
5268 io_commit_cqring(req->ctx);
Jens Axboef254ac02020-08-12 17:33:30 -06005269 req_set_fail_links(req);
Pavel Begunkov216578e2020-10-13 09:44:00 +01005270 io_put_req_deferred(req, 1);
Jens Axboeb41e9852020-02-17 09:52:41 -07005271 }
5272
5273 return do_complete;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005274}
5275
Jens Axboe76e1b642020-09-26 15:05:03 -06005276/*
5277 * Returns true if we found and killed one or more poll requests
5278 */
Pavel Begunkov6b819282020-11-06 13:00:25 +00005279static bool io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk,
5280 struct files_struct *files)
Jens Axboe221c5eb2019-01-17 09:41:58 -07005281{
Jens Axboe78076bb2019-12-04 19:56:40 -07005282 struct hlist_node *tmp;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005283 struct io_kiocb *req;
Jens Axboe8e2e1fa2020-04-13 17:05:14 -06005284 int posted = 0, i;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005285
5286 spin_lock_irq(&ctx->completion_lock);
Jens Axboe78076bb2019-12-04 19:56:40 -07005287 for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
5288 struct hlist_head *list;
5289
5290 list = &ctx->cancel_hash[i];
Jens Axboef3606e32020-09-22 08:18:24 -06005291 hlist_for_each_entry_safe(req, tmp, list, hash_node) {
Pavel Begunkov6b819282020-11-06 13:00:25 +00005292 if (io_match_task(req, tsk, files))
Jens Axboef3606e32020-09-22 08:18:24 -06005293 posted += io_poll_remove_one(req);
5294 }
Jens Axboe221c5eb2019-01-17 09:41:58 -07005295 }
5296 spin_unlock_irq(&ctx->completion_lock);
Jens Axboeb41e9852020-02-17 09:52:41 -07005297
Jens Axboe8e2e1fa2020-04-13 17:05:14 -06005298 if (posted)
5299 io_cqring_ev_posted(ctx);
Jens Axboe76e1b642020-09-26 15:05:03 -06005300
5301 return posted != 0;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005302}
5303
Jens Axboe47f46762019-11-09 17:43:02 -07005304static int io_poll_cancel(struct io_ring_ctx *ctx, __u64 sqe_addr)
5305{
Jens Axboe78076bb2019-12-04 19:56:40 -07005306 struct hlist_head *list;
Jens Axboe47f46762019-11-09 17:43:02 -07005307 struct io_kiocb *req;
5308
Jens Axboe78076bb2019-12-04 19:56:40 -07005309 list = &ctx->cancel_hash[hash_long(sqe_addr, ctx->cancel_hash_bits)];
5310 hlist_for_each_entry(req, list, hash_node) {
Jens Axboeb41e9852020-02-17 09:52:41 -07005311 if (sqe_addr != req->user_data)
5312 continue;
5313 if (io_poll_remove_one(req))
Jens Axboeeac406c2019-11-14 12:09:58 -07005314 return 0;
Jens Axboeb41e9852020-02-17 09:52:41 -07005315 return -EALREADY;
Jens Axboe47f46762019-11-09 17:43:02 -07005316 }
5317
5318 return -ENOENT;
5319}
5320
Jens Axboe3529d8c2019-12-19 18:24:38 -07005321static int io_poll_remove_prep(struct io_kiocb *req,
5322 const struct io_uring_sqe *sqe)
Jens Axboe221c5eb2019-01-17 09:41:58 -07005323{
Jens Axboe221c5eb2019-01-17 09:41:58 -07005324 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
5325 return -EINVAL;
5326 if (sqe->ioprio || sqe->off || sqe->len || sqe->buf_index ||
5327 sqe->poll_events)
5328 return -EINVAL;
5329
Pavel Begunkov018043b2020-10-27 23:17:18 +00005330 req->poll_remove.addr = READ_ONCE(sqe->addr);
Jens Axboe0969e782019-12-17 18:40:57 -07005331 return 0;
5332}
5333
5334/*
5335 * Find a running poll command that matches one specified in sqe->addr,
5336 * and remove it if found.
5337 */
Pavel Begunkov61e98202021-02-10 00:03:08 +00005338static int io_poll_remove(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe0969e782019-12-17 18:40:57 -07005339{
5340 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe0969e782019-12-17 18:40:57 -07005341 int ret;
5342
Jens Axboe221c5eb2019-01-17 09:41:58 -07005343 spin_lock_irq(&ctx->completion_lock);
Pavel Begunkov018043b2020-10-27 23:17:18 +00005344 ret = io_poll_cancel(ctx, req->poll_remove.addr);
Jens Axboe221c5eb2019-01-17 09:41:58 -07005345 spin_unlock_irq(&ctx->completion_lock);
5346
Jens Axboe4e88d6e2019-12-07 20:59:47 -07005347 if (ret < 0)
5348 req_set_fail_links(req);
Jens Axboee1e16092020-06-22 09:17:17 -06005349 io_req_complete(req, ret);
Jens Axboe221c5eb2019-01-17 09:41:58 -07005350 return 0;
5351}
5352
Jens Axboe221c5eb2019-01-17 09:41:58 -07005353static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
5354 void *key)
5355{
Jens Axboec2f2eb72020-02-10 09:07:05 -07005356 struct io_kiocb *req = wait->private;
5357 struct io_poll_iocb *poll = &req->poll;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005358
Jens Axboed7718a92020-02-14 22:23:12 -07005359 return __io_async_wake(req, poll, key_to_poll(key), io_poll_task_func);
Jens Axboe221c5eb2019-01-17 09:41:58 -07005360}
5361
Jens Axboe221c5eb2019-01-17 09:41:58 -07005362static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head,
5363 struct poll_table_struct *p)
5364{
5365 struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
5366
Jens Axboee8c2bc12020-08-15 18:44:09 -07005367 __io_queue_proc(&pt->req->poll, pt, head, (struct io_poll_iocb **) &pt->req->async_data);
Jens Axboeeac406c2019-11-14 12:09:58 -07005368}
5369
Jens Axboe3529d8c2019-12-19 18:24:38 -07005370static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboe221c5eb2019-01-17 09:41:58 -07005371{
5372 struct io_poll_iocb *poll = &req->poll;
Jiufei Xue5769a352020-06-17 17:53:55 +08005373 u32 events;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005374
5375 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
5376 return -EINVAL;
5377 if (sqe->addr || sqe->ioprio || sqe->off || sqe->len || sqe->buf_index)
5378 return -EINVAL;
5379
Jiufei Xue5769a352020-06-17 17:53:55 +08005380 events = READ_ONCE(sqe->poll32_events);
5381#ifdef __BIG_ENDIAN
5382 events = swahw32(events);
5383#endif
Jiufei Xuea31eb4a2020-06-17 17:53:56 +08005384 poll->events = demangle_poll(events) | EPOLLERR | EPOLLHUP |
5385 (events & EPOLLEXCLUSIVE);
Jens Axboe0969e782019-12-17 18:40:57 -07005386 return 0;
5387}
5388
Pavel Begunkov61e98202021-02-10 00:03:08 +00005389static int io_poll_add(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe0969e782019-12-17 18:40:57 -07005390{
5391 struct io_poll_iocb *poll = &req->poll;
5392 struct io_ring_ctx *ctx = req->ctx;
5393 struct io_poll_table ipt;
Jens Axboe0969e782019-12-17 18:40:57 -07005394 __poll_t mask;
Jens Axboe0969e782019-12-17 18:40:57 -07005395
Jens Axboed7718a92020-02-14 22:23:12 -07005396 ipt.pt._qproc = io_poll_queue_proc;
Jens Axboe36703242019-07-25 10:20:18 -06005397
Jens Axboed7718a92020-02-14 22:23:12 -07005398 mask = __io_arm_poll_handler(req, &req->poll, &ipt, poll->events,
5399 io_poll_wake);
Jens Axboe221c5eb2019-01-17 09:41:58 -07005400
Jens Axboe8c838782019-03-12 15:48:16 -06005401 if (mask) { /* no async, we'd stolen it */
Jens Axboe8c838782019-03-12 15:48:16 -06005402 ipt.error = 0;
Jens Axboeb0dd8a42019-11-18 12:14:54 -07005403 io_poll_complete(req, mask, 0);
Jens Axboe8c838782019-03-12 15:48:16 -06005404 }
Jens Axboe221c5eb2019-01-17 09:41:58 -07005405 spin_unlock_irq(&ctx->completion_lock);
5406
Jens Axboe8c838782019-03-12 15:48:16 -06005407 if (mask) {
5408 io_cqring_ev_posted(ctx);
Pavel Begunkov014db002020-03-03 21:33:12 +03005409 io_put_req(req);
Jens Axboe221c5eb2019-01-17 09:41:58 -07005410 }
Jens Axboe8c838782019-03-12 15:48:16 -06005411 return ipt.error;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005412}
5413
Jens Axboe5262f562019-09-17 12:26:57 -06005414static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
5415{
Jens Axboead8a48a2019-11-15 08:49:11 -07005416 struct io_timeout_data *data = container_of(timer,
5417 struct io_timeout_data, timer);
5418 struct io_kiocb *req = data->req;
5419 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe5262f562019-09-17 12:26:57 -06005420 unsigned long flags;
5421
Jens Axboe5262f562019-09-17 12:26:57 -06005422 spin_lock_irqsave(&ctx->completion_lock, flags);
Pavel Begunkova71976f2020-10-10 18:34:11 +01005423 list_del_init(&req->timeout.list);
Pavel Begunkov01cec8c2020-07-30 18:43:50 +03005424 atomic_set(&req->ctx->cq_timeouts,
5425 atomic_read(&req->ctx->cq_timeouts) + 1);
5426
Jens Axboe78e19bb2019-11-06 15:21:34 -07005427 io_cqring_fill_event(req, -ETIME);
Jens Axboe5262f562019-09-17 12:26:57 -06005428 io_commit_cqring(ctx);
5429 spin_unlock_irqrestore(&ctx->completion_lock, flags);
5430
5431 io_cqring_ev_posted(ctx);
Jens Axboe4e88d6e2019-12-07 20:59:47 -07005432 req_set_fail_links(req);
Jens Axboe5262f562019-09-17 12:26:57 -06005433 io_put_req(req);
5434 return HRTIMER_NORESTART;
5435}
5436
Pavel Begunkovfbd15842020-11-30 19:11:15 +00005437static struct io_kiocb *io_timeout_extract(struct io_ring_ctx *ctx,
5438 __u64 user_data)
Jens Axboe47f46762019-11-09 17:43:02 -07005439{
Pavel Begunkovfbd15842020-11-30 19:11:15 +00005440 struct io_timeout_data *io;
Jens Axboef254ac02020-08-12 17:33:30 -06005441 struct io_kiocb *req;
5442 int ret = -ENOENT;
5443
5444 list_for_each_entry(req, &ctx->timeout_list, timeout.list) {
5445 if (user_data == req->user_data) {
5446 ret = 0;
5447 break;
5448 }
5449 }
5450
5451 if (ret == -ENOENT)
Pavel Begunkovfbd15842020-11-30 19:11:15 +00005452 return ERR_PTR(ret);
Jens Axboef254ac02020-08-12 17:33:30 -06005453
Pavel Begunkovfbd15842020-11-30 19:11:15 +00005454 io = req->async_data;
5455 ret = hrtimer_try_to_cancel(&io->timer);
5456 if (ret == -1)
5457 return ERR_PTR(-EALREADY);
5458 list_del_init(&req->timeout.list);
5459 return req;
5460}
5461
5462static int io_timeout_cancel(struct io_ring_ctx *ctx, __u64 user_data)
5463{
5464 struct io_kiocb *req = io_timeout_extract(ctx, user_data);
5465
5466 if (IS_ERR(req))
5467 return PTR_ERR(req);
5468
5469 req_set_fail_links(req);
5470 io_cqring_fill_event(req, -ECANCELED);
5471 io_put_req_deferred(req, 1);
5472 return 0;
Jens Axboef254ac02020-08-12 17:33:30 -06005473}
5474
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +00005475static int io_timeout_update(struct io_ring_ctx *ctx, __u64 user_data,
5476 struct timespec64 *ts, enum hrtimer_mode mode)
5477{
5478 struct io_kiocb *req = io_timeout_extract(ctx, user_data);
5479 struct io_timeout_data *data;
5480
5481 if (IS_ERR(req))
5482 return PTR_ERR(req);
5483
5484 req->timeout.off = 0; /* noseq */
5485 data = req->async_data;
5486 list_add_tail(&req->timeout.list, &ctx->timeout_list);
5487 hrtimer_init(&data->timer, CLOCK_MONOTONIC, mode);
5488 data->timer.function = io_timeout_fn;
5489 hrtimer_start(&data->timer, timespec64_to_ktime(*ts), mode);
5490 return 0;
Jens Axboe47f46762019-11-09 17:43:02 -07005491}
5492
Jens Axboe3529d8c2019-12-19 18:24:38 -07005493static int io_timeout_remove_prep(struct io_kiocb *req,
5494 const struct io_uring_sqe *sqe)
Jens Axboeb29472e2019-12-17 18:50:29 -07005495{
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +00005496 struct io_timeout_rem *tr = &req->timeout_rem;
5497
Jens Axboeb29472e2019-12-17 18:50:29 -07005498 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
5499 return -EINVAL;
Daniele Albano61710e42020-07-18 14:15:16 -06005500 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
5501 return -EINVAL;
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +00005502 if (sqe->ioprio || sqe->buf_index || sqe->len)
Jens Axboeb29472e2019-12-17 18:50:29 -07005503 return -EINVAL;
5504
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +00005505 tr->addr = READ_ONCE(sqe->addr);
5506 tr->flags = READ_ONCE(sqe->timeout_flags);
5507 if (tr->flags & IORING_TIMEOUT_UPDATE) {
5508 if (tr->flags & ~(IORING_TIMEOUT_UPDATE|IORING_TIMEOUT_ABS))
5509 return -EINVAL;
5510 if (get_timespec64(&tr->ts, u64_to_user_ptr(sqe->addr2)))
5511 return -EFAULT;
5512 } else if (tr->flags) {
5513 /* timeout removal doesn't support flags */
5514 return -EINVAL;
5515 }
5516
Jens Axboeb29472e2019-12-17 18:50:29 -07005517 return 0;
5518}
5519
Pavel Begunkov8662dae2021-01-19 13:32:44 +00005520static inline enum hrtimer_mode io_translate_timeout_mode(unsigned int flags)
5521{
5522 return (flags & IORING_TIMEOUT_ABS) ? HRTIMER_MODE_ABS
5523 : HRTIMER_MODE_REL;
5524}
5525
Jens Axboe11365042019-10-16 09:08:32 -06005526/*
5527 * Remove or update an existing timeout command
5528 */
Pavel Begunkov61e98202021-02-10 00:03:08 +00005529static int io_timeout_remove(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe11365042019-10-16 09:08:32 -06005530{
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +00005531 struct io_timeout_rem *tr = &req->timeout_rem;
Jens Axboe11365042019-10-16 09:08:32 -06005532 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe47f46762019-11-09 17:43:02 -07005533 int ret;
Jens Axboe11365042019-10-16 09:08:32 -06005534
Jens Axboe11365042019-10-16 09:08:32 -06005535 spin_lock_irq(&ctx->completion_lock);
Pavel Begunkov8662dae2021-01-19 13:32:44 +00005536 if (!(req->timeout_rem.flags & IORING_TIMEOUT_UPDATE))
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +00005537 ret = io_timeout_cancel(ctx, tr->addr);
Pavel Begunkov8662dae2021-01-19 13:32:44 +00005538 else
5539 ret = io_timeout_update(ctx, tr->addr, &tr->ts,
5540 io_translate_timeout_mode(tr->flags));
Jens Axboe11365042019-10-16 09:08:32 -06005541
Jens Axboe47f46762019-11-09 17:43:02 -07005542 io_cqring_fill_event(req, ret);
Jens Axboe11365042019-10-16 09:08:32 -06005543 io_commit_cqring(ctx);
5544 spin_unlock_irq(&ctx->completion_lock);
Jens Axboe5262f562019-09-17 12:26:57 -06005545 io_cqring_ev_posted(ctx);
Jens Axboe4e88d6e2019-12-07 20:59:47 -07005546 if (ret < 0)
5547 req_set_fail_links(req);
Jackie Liuec9c02a2019-11-08 23:50:36 +08005548 io_put_req(req);
Jens Axboe11365042019-10-16 09:08:32 -06005549 return 0;
Jens Axboe5262f562019-09-17 12:26:57 -06005550}
5551
Jens Axboe3529d8c2019-12-19 18:24:38 -07005552static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
Jens Axboe2d283902019-12-04 11:08:05 -07005553 bool is_timeout_link)
Jens Axboe5262f562019-09-17 12:26:57 -06005554{
Jens Axboead8a48a2019-11-15 08:49:11 -07005555 struct io_timeout_data *data;
Jens Axboea41525a2019-10-15 16:48:15 -06005556 unsigned flags;
Pavel Begunkov56080b02020-05-26 20:34:04 +03005557 u32 off = READ_ONCE(sqe->off);
Jens Axboe5262f562019-09-17 12:26:57 -06005558
Jens Axboead8a48a2019-11-15 08:49:11 -07005559 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboe5262f562019-09-17 12:26:57 -06005560 return -EINVAL;
Jens Axboead8a48a2019-11-15 08:49:11 -07005561 if (sqe->ioprio || sqe->buf_index || sqe->len != 1)
Jens Axboea41525a2019-10-15 16:48:15 -06005562 return -EINVAL;
Pavel Begunkov56080b02020-05-26 20:34:04 +03005563 if (off && is_timeout_link)
Jens Axboe2d283902019-12-04 11:08:05 -07005564 return -EINVAL;
Jens Axboea41525a2019-10-15 16:48:15 -06005565 flags = READ_ONCE(sqe->timeout_flags);
5566 if (flags & ~IORING_TIMEOUT_ABS)
Jens Axboe5262f562019-09-17 12:26:57 -06005567 return -EINVAL;
Arnd Bergmannbdf20072019-10-01 09:53:29 -06005568
Pavel Begunkovbfe68a22020-05-30 14:54:18 +03005569 req->timeout.off = off;
Jens Axboe26a61672019-12-20 09:02:01 -07005570
Jens Axboee8c2bc12020-08-15 18:44:09 -07005571 if (!req->async_data && io_alloc_async_data(req))
Jens Axboe26a61672019-12-20 09:02:01 -07005572 return -ENOMEM;
5573
Jens Axboee8c2bc12020-08-15 18:44:09 -07005574 data = req->async_data;
Jens Axboead8a48a2019-11-15 08:49:11 -07005575 data->req = req;
Jens Axboead8a48a2019-11-15 08:49:11 -07005576
5577 if (get_timespec64(&data->ts, u64_to_user_ptr(sqe->addr)))
Jens Axboe5262f562019-09-17 12:26:57 -06005578 return -EFAULT;
5579
Pavel Begunkov8662dae2021-01-19 13:32:44 +00005580 data->mode = io_translate_timeout_mode(flags);
Jens Axboead8a48a2019-11-15 08:49:11 -07005581 hrtimer_init(&data->timer, CLOCK_MONOTONIC, data->mode);
Pavel Begunkov2482b582021-03-25 18:32:44 +00005582 if (is_timeout_link)
5583 io_req_track_inflight(req);
Jens Axboead8a48a2019-11-15 08:49:11 -07005584 return 0;
5585}
5586
Pavel Begunkov61e98202021-02-10 00:03:08 +00005587static int io_timeout(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboead8a48a2019-11-15 08:49:11 -07005588{
Jens Axboead8a48a2019-11-15 08:49:11 -07005589 struct io_ring_ctx *ctx = req->ctx;
Jens Axboee8c2bc12020-08-15 18:44:09 -07005590 struct io_timeout_data *data = req->async_data;
Jens Axboead8a48a2019-11-15 08:49:11 -07005591 struct list_head *entry;
Pavel Begunkovbfe68a22020-05-30 14:54:18 +03005592 u32 tail, off = req->timeout.off;
Jens Axboead8a48a2019-11-15 08:49:11 -07005593
Pavel Begunkov733f5c92020-05-26 20:34:03 +03005594 spin_lock_irq(&ctx->completion_lock);
Jens Axboe93bd25b2019-11-11 23:34:31 -07005595
Jens Axboe5262f562019-09-17 12:26:57 -06005596 /*
5597 * sqe->off holds how many events that need to occur for this
Jens Axboe93bd25b2019-11-11 23:34:31 -07005598 * timeout event to be satisfied. If it isn't set, then this is
5599 * a pure timeout request, sequence isn't used.
Jens Axboe5262f562019-09-17 12:26:57 -06005600 */
Pavel Begunkov8eb7e2d2020-06-29 13:13:02 +03005601 if (io_is_timeout_noseq(req)) {
Jens Axboe93bd25b2019-11-11 23:34:31 -07005602 entry = ctx->timeout_list.prev;
5603 goto add;
5604 }
Jens Axboe5262f562019-09-17 12:26:57 -06005605
Pavel Begunkovbfe68a22020-05-30 14:54:18 +03005606 tail = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
5607 req->timeout.target_seq = tail + off;
Jens Axboe5262f562019-09-17 12:26:57 -06005608
Marcelo Diop-Gonzalezf0105052021-01-15 11:54:40 -05005609 /* Update the last seq here in case io_flush_timeouts() hasn't.
5610 * This is safe because ->completion_lock is held, and submissions
5611 * and completions are never mixed in the same ->completion_lock section.
5612 */
5613 ctx->cq_last_tm_flush = tail;
5614
Jens Axboe5262f562019-09-17 12:26:57 -06005615 /*
5616 * Insertion sort, ensuring the first entry in the list is always
5617 * the one we need first.
5618 */
Jens Axboe5262f562019-09-17 12:26:57 -06005619 list_for_each_prev(entry, &ctx->timeout_list) {
Pavel Begunkov135fcde2020-07-13 23:37:12 +03005620 struct io_kiocb *nxt = list_entry(entry, struct io_kiocb,
5621 timeout.list);
Jens Axboe5262f562019-09-17 12:26:57 -06005622
Pavel Begunkov8eb7e2d2020-06-29 13:13:02 +03005623 if (io_is_timeout_noseq(nxt))
Jens Axboe93bd25b2019-11-11 23:34:31 -07005624 continue;
Pavel Begunkovbfe68a22020-05-30 14:54:18 +03005625 /* nxt.seq is behind @tail, otherwise would've been completed */
5626 if (off >= nxt->timeout.target_seq - tail)
Jens Axboe5262f562019-09-17 12:26:57 -06005627 break;
5628 }
Jens Axboe93bd25b2019-11-11 23:34:31 -07005629add:
Pavel Begunkov135fcde2020-07-13 23:37:12 +03005630 list_add(&req->timeout.list, entry);
Jens Axboead8a48a2019-11-15 08:49:11 -07005631 data->timer.function = io_timeout_fn;
5632 hrtimer_start(&data->timer, timespec64_to_ktime(data->ts), data->mode);
Jens Axboe842f9612019-10-29 12:34:10 -06005633 spin_unlock_irq(&ctx->completion_lock);
Jens Axboe5262f562019-09-17 12:26:57 -06005634 return 0;
5635}
5636
Pavel Begunkovf458dd842021-03-08 12:14:14 +00005637struct io_cancel_data {
5638 struct io_ring_ctx *ctx;
5639 u64 user_data;
5640};
5641
Jens Axboe62755e32019-10-28 21:49:21 -06005642static bool io_cancel_cb(struct io_wq_work *work, void *data)
Jens Axboede0617e2019-04-06 21:51:27 -06005643{
Jens Axboe62755e32019-10-28 21:49:21 -06005644 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
Pavel Begunkovf458dd842021-03-08 12:14:14 +00005645 struct io_cancel_data *cd = data;
Jens Axboede0617e2019-04-06 21:51:27 -06005646
Pavel Begunkovf458dd842021-03-08 12:14:14 +00005647 return req->ctx == cd->ctx && req->user_data == cd->user_data;
Jens Axboe62755e32019-10-28 21:49:21 -06005648}
5649
Pavel Begunkovf458dd842021-03-08 12:14:14 +00005650static int io_async_cancel_one(struct io_uring_task *tctx, u64 user_data,
5651 struct io_ring_ctx *ctx)
Jens Axboe62755e32019-10-28 21:49:21 -06005652{
Pavel Begunkovf458dd842021-03-08 12:14:14 +00005653 struct io_cancel_data data = { .ctx = ctx, .user_data = user_data, };
Jens Axboe62755e32019-10-28 21:49:21 -06005654 enum io_wq_cancel cancel_ret;
Jens Axboe62755e32019-10-28 21:49:21 -06005655 int ret = 0;
5656
Pavel Begunkovf458dd842021-03-08 12:14:14 +00005657 if (!tctx || !tctx->io_wq)
Jens Axboe5aa75ed2021-02-16 12:56:50 -07005658 return -ENOENT;
5659
Pavel Begunkovf458dd842021-03-08 12:14:14 +00005660 cancel_ret = io_wq_cancel_cb(tctx->io_wq, io_cancel_cb, &data, false);
Jens Axboe62755e32019-10-28 21:49:21 -06005661 switch (cancel_ret) {
5662 case IO_WQ_CANCEL_OK:
5663 ret = 0;
5664 break;
5665 case IO_WQ_CANCEL_RUNNING:
5666 ret = -EALREADY;
5667 break;
5668 case IO_WQ_CANCEL_NOTFOUND:
5669 ret = -ENOENT;
5670 break;
5671 }
5672
Jens Axboee977d6d2019-11-05 12:39:45 -07005673 return ret;
5674}
5675
Jens Axboe47f46762019-11-09 17:43:02 -07005676static void io_async_find_and_cancel(struct io_ring_ctx *ctx,
5677 struct io_kiocb *req, __u64 sqe_addr,
Pavel Begunkov014db002020-03-03 21:33:12 +03005678 int success_ret)
Jens Axboe47f46762019-11-09 17:43:02 -07005679{
5680 unsigned long flags;
5681 int ret;
5682
Pavel Begunkovf458dd842021-03-08 12:14:14 +00005683 ret = io_async_cancel_one(req->task->io_uring, sqe_addr, ctx);
Jens Axboe47f46762019-11-09 17:43:02 -07005684 if (ret != -ENOENT) {
5685 spin_lock_irqsave(&ctx->completion_lock, flags);
5686 goto done;
5687 }
5688
5689 spin_lock_irqsave(&ctx->completion_lock, flags);
5690 ret = io_timeout_cancel(ctx, sqe_addr);
5691 if (ret != -ENOENT)
5692 goto done;
5693 ret = io_poll_cancel(ctx, sqe_addr);
5694done:
Jens Axboeb0dd8a42019-11-18 12:14:54 -07005695 if (!ret)
5696 ret = success_ret;
Jens Axboe47f46762019-11-09 17:43:02 -07005697 io_cqring_fill_event(req, ret);
5698 io_commit_cqring(ctx);
5699 spin_unlock_irqrestore(&ctx->completion_lock, flags);
5700 io_cqring_ev_posted(ctx);
5701
Jens Axboe4e88d6e2019-12-07 20:59:47 -07005702 if (ret < 0)
5703 req_set_fail_links(req);
Pavel Begunkov014db002020-03-03 21:33:12 +03005704 io_put_req(req);
Jens Axboe47f46762019-11-09 17:43:02 -07005705}
5706
Jens Axboe3529d8c2019-12-19 18:24:38 -07005707static int io_async_cancel_prep(struct io_kiocb *req,
5708 const struct io_uring_sqe *sqe)
Jens Axboee977d6d2019-11-05 12:39:45 -07005709{
Jens Axboefbf23842019-12-17 18:45:56 -07005710 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboee977d6d2019-11-05 12:39:45 -07005711 return -EINVAL;
Daniele Albano61710e42020-07-18 14:15:16 -06005712 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
5713 return -EINVAL;
5714 if (sqe->ioprio || sqe->off || sqe->len || sqe->cancel_flags)
Jens Axboee977d6d2019-11-05 12:39:45 -07005715 return -EINVAL;
5716
Jens Axboefbf23842019-12-17 18:45:56 -07005717 req->cancel.addr = READ_ONCE(sqe->addr);
5718 return 0;
5719}
5720
Pavel Begunkov61e98202021-02-10 00:03:08 +00005721static int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboefbf23842019-12-17 18:45:56 -07005722{
5723 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkov58f99372021-03-12 16:25:55 +00005724 u64 sqe_addr = req->cancel.addr;
5725 struct io_tctx_node *node;
5726 int ret;
Jens Axboefbf23842019-12-17 18:45:56 -07005727
Pavel Begunkov58f99372021-03-12 16:25:55 +00005728 /* tasks should wait for their io-wq threads, so safe w/o sync */
5729 ret = io_async_cancel_one(req->task->io_uring, sqe_addr, ctx);
5730 spin_lock_irq(&ctx->completion_lock);
5731 if (ret != -ENOENT)
5732 goto done;
5733 ret = io_timeout_cancel(ctx, sqe_addr);
5734 if (ret != -ENOENT)
5735 goto done;
5736 ret = io_poll_cancel(ctx, sqe_addr);
5737 if (ret != -ENOENT)
5738 goto done;
5739 spin_unlock_irq(&ctx->completion_lock);
5740
5741 /* slow path, try all io-wq's */
5742 io_ring_submit_lock(ctx, !(issue_flags & IO_URING_F_NONBLOCK));
5743 ret = -ENOENT;
5744 list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
5745 struct io_uring_task *tctx = node->task->io_uring;
5746
5747 if (!tctx || !tctx->io_wq)
5748 continue;
5749 ret = io_async_cancel_one(tctx, req->cancel.addr, ctx);
5750 if (ret != -ENOENT)
5751 break;
5752 }
5753 io_ring_submit_unlock(ctx, !(issue_flags & IO_URING_F_NONBLOCK));
5754
5755 spin_lock_irq(&ctx->completion_lock);
5756done:
5757 io_cqring_fill_event(req, ret);
5758 io_commit_cqring(ctx);
5759 spin_unlock_irq(&ctx->completion_lock);
5760 io_cqring_ev_posted(ctx);
5761
5762 if (ret < 0)
5763 req_set_fail_links(req);
5764 io_put_req(req);
Jens Axboe62755e32019-10-28 21:49:21 -06005765 return 0;
5766}
5767
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00005768static int io_rsrc_update_prep(struct io_kiocb *req,
Jens Axboe05f3fb32019-12-09 11:22:50 -07005769 const struct io_uring_sqe *sqe)
5770{
Jens Axboe6ca56f82020-09-18 16:51:19 -06005771 if (unlikely(req->ctx->flags & IORING_SETUP_SQPOLL))
5772 return -EINVAL;
Daniele Albano61710e42020-07-18 14:15:16 -06005773 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
5774 return -EINVAL;
5775 if (sqe->ioprio || sqe->rw_flags)
Jens Axboe05f3fb32019-12-09 11:22:50 -07005776 return -EINVAL;
5777
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00005778 req->rsrc_update.offset = READ_ONCE(sqe->off);
5779 req->rsrc_update.nr_args = READ_ONCE(sqe->len);
5780 if (!req->rsrc_update.nr_args)
Jens Axboe05f3fb32019-12-09 11:22:50 -07005781 return -EINVAL;
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00005782 req->rsrc_update.arg = READ_ONCE(sqe->addr);
Jens Axboe05f3fb32019-12-09 11:22:50 -07005783 return 0;
5784}
5785
Pavel Begunkov889fca72021-02-10 00:03:09 +00005786static int io_files_update(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe05f3fb32019-12-09 11:22:50 -07005787{
5788 struct io_ring_ctx *ctx = req->ctx;
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00005789 struct io_uring_rsrc_update up;
Jens Axboe05f3fb32019-12-09 11:22:50 -07005790 int ret;
5791
Pavel Begunkov45d189c2021-02-10 00:03:07 +00005792 if (issue_flags & IO_URING_F_NONBLOCK)
Jens Axboe05f3fb32019-12-09 11:22:50 -07005793 return -EAGAIN;
Jens Axboe05f3fb32019-12-09 11:22:50 -07005794
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00005795 up.offset = req->rsrc_update.offset;
5796 up.data = req->rsrc_update.arg;
Jens Axboe05f3fb32019-12-09 11:22:50 -07005797
5798 mutex_lock(&ctx->uring_lock);
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00005799 ret = __io_sqe_files_update(ctx, &up, req->rsrc_update.nr_args);
Jens Axboe05f3fb32019-12-09 11:22:50 -07005800 mutex_unlock(&ctx->uring_lock);
5801
5802 if (ret < 0)
5803 req_set_fail_links(req);
Pavel Begunkov889fca72021-02-10 00:03:09 +00005804 __io_req_complete(req, issue_flags, ret, 0);
Jens Axboe05f3fb32019-12-09 11:22:50 -07005805 return 0;
5806}
5807
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005808static int io_req_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboef67676d2019-12-02 11:03:47 -07005809{
Jens Axboed625c6e2019-12-17 19:53:05 -07005810 switch (req->opcode) {
Jens Axboee7815732019-12-17 19:45:06 -07005811 case IORING_OP_NOP:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005812 return 0;
Jens Axboef67676d2019-12-02 11:03:47 -07005813 case IORING_OP_READV:
5814 case IORING_OP_READ_FIXED:
Jens Axboe3a6820f2019-12-22 15:19:35 -07005815 case IORING_OP_READ:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005816 return io_read_prep(req, sqe);
Jens Axboef67676d2019-12-02 11:03:47 -07005817 case IORING_OP_WRITEV:
5818 case IORING_OP_WRITE_FIXED:
Jens Axboe3a6820f2019-12-22 15:19:35 -07005819 case IORING_OP_WRITE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005820 return io_write_prep(req, sqe);
Jens Axboe0969e782019-12-17 18:40:57 -07005821 case IORING_OP_POLL_ADD:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005822 return io_poll_add_prep(req, sqe);
Jens Axboe0969e782019-12-17 18:40:57 -07005823 case IORING_OP_POLL_REMOVE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005824 return io_poll_remove_prep(req, sqe);
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07005825 case IORING_OP_FSYNC:
Pavel Begunkov1155c762021-02-18 18:29:38 +00005826 return io_fsync_prep(req, sqe);
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07005827 case IORING_OP_SYNC_FILE_RANGE:
Pavel Begunkov1155c762021-02-18 18:29:38 +00005828 return io_sfr_prep(req, sqe);
Jens Axboe03b12302019-12-02 18:50:25 -07005829 case IORING_OP_SENDMSG:
Jens Axboefddafac2020-01-04 20:19:44 -07005830 case IORING_OP_SEND:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005831 return io_sendmsg_prep(req, sqe);
Jens Axboe03b12302019-12-02 18:50:25 -07005832 case IORING_OP_RECVMSG:
Jens Axboefddafac2020-01-04 20:19:44 -07005833 case IORING_OP_RECV:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005834 return io_recvmsg_prep(req, sqe);
Jens Axboef499a022019-12-02 16:28:46 -07005835 case IORING_OP_CONNECT:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005836 return io_connect_prep(req, sqe);
Jens Axboe2d283902019-12-04 11:08:05 -07005837 case IORING_OP_TIMEOUT:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005838 return io_timeout_prep(req, sqe, false);
Jens Axboeb29472e2019-12-17 18:50:29 -07005839 case IORING_OP_TIMEOUT_REMOVE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005840 return io_timeout_remove_prep(req, sqe);
Jens Axboefbf23842019-12-17 18:45:56 -07005841 case IORING_OP_ASYNC_CANCEL:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005842 return io_async_cancel_prep(req, sqe);
Jens Axboe2d283902019-12-04 11:08:05 -07005843 case IORING_OP_LINK_TIMEOUT:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005844 return io_timeout_prep(req, sqe, true);
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07005845 case IORING_OP_ACCEPT:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005846 return io_accept_prep(req, sqe);
Jens Axboed63d1b52019-12-10 10:38:56 -07005847 case IORING_OP_FALLOCATE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005848 return io_fallocate_prep(req, sqe);
Jens Axboe15b71ab2019-12-11 11:20:36 -07005849 case IORING_OP_OPENAT:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005850 return io_openat_prep(req, sqe);
Jens Axboeb5dba592019-12-11 14:02:38 -07005851 case IORING_OP_CLOSE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005852 return io_close_prep(req, sqe);
Jens Axboe05f3fb32019-12-09 11:22:50 -07005853 case IORING_OP_FILES_UPDATE:
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00005854 return io_rsrc_update_prep(req, sqe);
Jens Axboeeddc7ef2019-12-13 21:18:10 -07005855 case IORING_OP_STATX:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005856 return io_statx_prep(req, sqe);
Jens Axboe4840e412019-12-25 22:03:45 -07005857 case IORING_OP_FADVISE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005858 return io_fadvise_prep(req, sqe);
Jens Axboec1ca7572019-12-25 22:18:28 -07005859 case IORING_OP_MADVISE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005860 return io_madvise_prep(req, sqe);
Jens Axboecebdb982020-01-08 17:59:24 -07005861 case IORING_OP_OPENAT2:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005862 return io_openat2_prep(req, sqe);
Jens Axboe3e4827b2020-01-08 15:18:09 -07005863 case IORING_OP_EPOLL_CTL:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005864 return io_epoll_ctl_prep(req, sqe);
Pavel Begunkov7d67af22020-02-24 11:32:45 +03005865 case IORING_OP_SPLICE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005866 return io_splice_prep(req, sqe);
Jens Axboeddf0322d2020-02-23 16:41:33 -07005867 case IORING_OP_PROVIDE_BUFFERS:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005868 return io_provide_buffers_prep(req, sqe);
Jens Axboe067524e2020-03-02 16:32:28 -07005869 case IORING_OP_REMOVE_BUFFERS:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005870 return io_remove_buffers_prep(req, sqe);
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03005871 case IORING_OP_TEE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005872 return io_tee_prep(req, sqe);
Jens Axboe36f4fa62020-09-05 11:14:22 -06005873 case IORING_OP_SHUTDOWN:
5874 return io_shutdown_prep(req, sqe);
Jens Axboe80a261f2020-09-28 14:23:58 -06005875 case IORING_OP_RENAMEAT:
5876 return io_renameat_prep(req, sqe);
Jens Axboe14a11432020-09-28 14:27:37 -06005877 case IORING_OP_UNLINKAT:
5878 return io_unlinkat_prep(req, sqe);
Jens Axboef67676d2019-12-02 11:03:47 -07005879 }
5880
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005881 printk_once(KERN_WARNING "io_uring: unhandled opcode %d\n",
5882 req->opcode);
5883 return-EINVAL;
5884}
5885
Pavel Begunkov93642ef2021-02-18 18:29:44 +00005886static int io_req_prep_async(struct io_kiocb *req)
Jens Axboedef596e2019-01-09 08:59:42 -07005887{
Pavel Begunkov93642ef2021-02-18 18:29:44 +00005888 switch (req->opcode) {
5889 case IORING_OP_READV:
5890 case IORING_OP_READ_FIXED:
5891 case IORING_OP_READ:
5892 return io_rw_prep_async(req, READ);
5893 case IORING_OP_WRITEV:
5894 case IORING_OP_WRITE_FIXED:
5895 case IORING_OP_WRITE:
5896 return io_rw_prep_async(req, WRITE);
5897 case IORING_OP_SENDMSG:
5898 case IORING_OP_SEND:
5899 return io_sendmsg_prep_async(req);
5900 case IORING_OP_RECVMSG:
5901 case IORING_OP_RECV:
5902 return io_recvmsg_prep_async(req);
5903 case IORING_OP_CONNECT:
5904 return io_connect_prep_async(req);
5905 }
5906 return 0;
5907}
5908
Pavel Begunkovbe7053b2021-02-18 18:29:45 +00005909static int io_req_defer_prep(struct io_kiocb *req)
Jens Axboedef596e2019-01-09 08:59:42 -07005910{
Pavel Begunkovbe7053b2021-02-18 18:29:45 +00005911 if (!io_op_defs[req->opcode].needs_async_data)
Jens Axboe2b188cc2019-01-07 10:46:33 -07005912 return 0;
Pavel Begunkovbe7053b2021-02-18 18:29:45 +00005913 /* some opcodes init it during the inital prep */
Pavel Begunkov93642ef2021-02-18 18:29:44 +00005914 if (req->async_data)
Pavel Begunkovbe7053b2021-02-18 18:29:45 +00005915 return 0;
5916 if (__io_alloc_async_data(req))
Jens Axboeb76da702019-11-20 13:05:32 -07005917 return -EAGAIN;
Pavel Begunkovbe7053b2021-02-18 18:29:45 +00005918 return io_req_prep_async(req);
Jens Axboe2b188cc2019-01-07 10:46:33 -07005919}
5920
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03005921static u32 io_get_sequence(struct io_kiocb *req)
5922{
5923 struct io_kiocb *pos;
5924 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkovf2f87372020-10-27 23:25:37 +00005925 u32 total_submitted, nr_reqs = 0;
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03005926
Pavel Begunkovf2f87372020-10-27 23:25:37 +00005927 io_for_each_link(pos, req)
5928 nr_reqs++;
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03005929
5930 total_submitted = ctx->cached_sq_head - ctx->cached_sq_dropped;
5931 return total_submitted - nr_reqs;
5932}
5933
Pavel Begunkovbe7053b2021-02-18 18:29:45 +00005934static int io_req_defer(struct io_kiocb *req)
Jens Axboe2b188cc2019-01-07 10:46:33 -07005935{
5936 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkov27dc8332020-07-13 23:37:14 +03005937 struct io_defer_entry *de;
Jens Axboe2b188cc2019-01-07 10:46:33 -07005938 int ret;
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03005939 u32 seq;
Jens Axboe2b188cc2019-01-07 10:46:33 -07005940
5941 /* Still need defer if there is pending req in defer list. */
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03005942 if (likely(list_empty_careful(&ctx->defer_list) &&
5943 !(req->flags & REQ_F_IO_DRAIN)))
5944 return 0;
5945
5946 seq = io_get_sequence(req);
5947 /* Still a chance to pass the sequence check */
5948 if (!req_need_defer(req, seq) && list_empty_careful(&ctx->defer_list))
Jens Axboe2b188cc2019-01-07 10:46:33 -07005949 return 0;
5950
Pavel Begunkovbe7053b2021-02-18 18:29:45 +00005951 ret = io_req_defer_prep(req);
5952 if (ret)
5953 return ret;
Pavel Begunkovcbdcb432020-06-29 19:18:43 +03005954 io_prep_async_link(req);
Pavel Begunkov27dc8332020-07-13 23:37:14 +03005955 de = kmalloc(sizeof(*de), GFP_KERNEL);
5956 if (!de)
5957 return -ENOMEM;
Jens Axboe31b51512019-01-18 22:56:34 -07005958
5959 spin_lock_irq(&ctx->completion_lock);
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03005960 if (!req_need_defer(req, seq) && list_empty(&ctx->defer_list)) {
Jens Axboe31b51512019-01-18 22:56:34 -07005961 spin_unlock_irq(&ctx->completion_lock);
Pavel Begunkov27dc8332020-07-13 23:37:14 +03005962 kfree(de);
Pavel Begunkovae348172020-07-23 20:25:20 +03005963 io_queue_async_work(req);
5964 return -EIOCBQUEUED;
Jens Axboe31b51512019-01-18 22:56:34 -07005965 }
5966
5967 trace_io_uring_defer(ctx, req, req->user_data);
Pavel Begunkov27dc8332020-07-13 23:37:14 +03005968 de->req = req;
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03005969 de->seq = seq;
Pavel Begunkov27dc8332020-07-13 23:37:14 +03005970 list_add_tail(&de->list, &ctx->defer_list);
Jens Axboe31b51512019-01-18 22:56:34 -07005971 spin_unlock_irq(&ctx->completion_lock);
5972 return -EIOCBQUEUED;
5973}
Jens Axboeedafcce2019-01-09 09:16:05 -07005974
Pavel Begunkov3ca405e2020-07-13 23:37:08 +03005975static void __io_clean_op(struct io_kiocb *req)
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03005976{
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03005977 if (req->flags & REQ_F_BUFFER_SELECTED) {
5978 switch (req->opcode) {
5979 case IORING_OP_READV:
5980 case IORING_OP_READ_FIXED:
5981 case IORING_OP_READ:
Jens Axboebcda7ba2020-02-23 16:42:51 -07005982 kfree((void *)(unsigned long)req->rw.addr);
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03005983 break;
5984 case IORING_OP_RECVMSG:
5985 case IORING_OP_RECV:
Jens Axboe52de1fe2020-02-27 10:15:42 -07005986 kfree(req->sr_msg.kbuf);
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03005987 break;
5988 }
5989 req->flags &= ~REQ_F_BUFFER_SELECTED;
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03005990 }
5991
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03005992 if (req->flags & REQ_F_NEED_CLEANUP) {
5993 switch (req->opcode) {
5994 case IORING_OP_READV:
5995 case IORING_OP_READ_FIXED:
5996 case IORING_OP_READ:
5997 case IORING_OP_WRITEV:
5998 case IORING_OP_WRITE_FIXED:
Jens Axboee8c2bc12020-08-15 18:44:09 -07005999 case IORING_OP_WRITE: {
6000 struct io_async_rw *io = req->async_data;
6001 if (io->free_iovec)
6002 kfree(io->free_iovec);
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03006003 break;
Jens Axboee8c2bc12020-08-15 18:44:09 -07006004 }
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03006005 case IORING_OP_RECVMSG:
Jens Axboee8c2bc12020-08-15 18:44:09 -07006006 case IORING_OP_SENDMSG: {
6007 struct io_async_msghdr *io = req->async_data;
Pavel Begunkov257e84a2021-02-05 00:58:00 +00006008
6009 kfree(io->free_iov);
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03006010 break;
Jens Axboee8c2bc12020-08-15 18:44:09 -07006011 }
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03006012 case IORING_OP_SPLICE:
6013 case IORING_OP_TEE:
6014 io_put_file(req, req->splice.file_in,
6015 (req->splice.flags & SPLICE_F_FD_IN_FIXED));
6016 break;
Jens Axboef3cd48502020-09-24 14:55:54 -06006017 case IORING_OP_OPENAT:
6018 case IORING_OP_OPENAT2:
6019 if (req->open.filename)
6020 putname(req->open.filename);
6021 break;
Jens Axboe80a261f2020-09-28 14:23:58 -06006022 case IORING_OP_RENAMEAT:
6023 putname(req->rename.oldpath);
6024 putname(req->rename.newpath);
6025 break;
Jens Axboe14a11432020-09-28 14:27:37 -06006026 case IORING_OP_UNLINKAT:
6027 putname(req->unlink.filename);
6028 break;
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03006029 }
6030 req->flags &= ~REQ_F_NEED_CLEANUP;
6031 }
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03006032}
6033
Pavel Begunkov889fca72021-02-10 00:03:09 +00006034static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboeedafcce2019-01-09 09:16:05 -07006035{
Jens Axboeedafcce2019-01-09 09:16:05 -07006036 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe5730b272021-02-27 15:57:30 -07006037 const struct cred *creds = NULL;
Jens Axboed625c6e2019-12-17 19:53:05 -07006038 int ret;
Jens Axboeedafcce2019-01-09 09:16:05 -07006039
Jens Axboe003e8dc2021-03-06 09:22:27 -07006040 if (req->work.creds && req->work.creds != current_cred())
6041 creds = override_creds(req->work.creds);
Jens Axboe5730b272021-02-27 15:57:30 -07006042
Jens Axboed625c6e2019-12-17 19:53:05 -07006043 switch (req->opcode) {
Jens Axboe2b188cc2019-01-07 10:46:33 -07006044 case IORING_OP_NOP:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006045 ret = io_nop(req, issue_flags);
Jens Axboe31b51512019-01-18 22:56:34 -07006046 break;
6047 case IORING_OP_READV:
Jens Axboe3529d8c2019-12-19 18:24:38 -07006048 case IORING_OP_READ_FIXED:
Jens Axboe3a6820f2019-12-22 15:19:35 -07006049 case IORING_OP_READ:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006050 ret = io_read(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006051 break;
6052 case IORING_OP_WRITEV:
Jens Axboe2b188cc2019-01-07 10:46:33 -07006053 case IORING_OP_WRITE_FIXED:
Jens Axboe3a6820f2019-12-22 15:19:35 -07006054 case IORING_OP_WRITE:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006055 ret = io_write(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006056 break;
6057 case IORING_OP_FSYNC:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006058 ret = io_fsync(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006059 break;
6060 case IORING_OP_POLL_ADD:
Pavel Begunkov61e98202021-02-10 00:03:08 +00006061 ret = io_poll_add(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006062 break;
6063 case IORING_OP_POLL_REMOVE:
Pavel Begunkov61e98202021-02-10 00:03:08 +00006064 ret = io_poll_remove(req, issue_flags);
Jens Axboeb76da702019-11-20 13:05:32 -07006065 break;
6066 case IORING_OP_SYNC_FILE_RANGE:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006067 ret = io_sync_file_range(req, issue_flags);
Jens Axboeb76da702019-11-20 13:05:32 -07006068 break;
6069 case IORING_OP_SENDMSG:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006070 ret = io_sendmsg(req, issue_flags);
Pavel Begunkov062d04d2020-10-10 18:34:12 +01006071 break;
Jens Axboefddafac2020-01-04 20:19:44 -07006072 case IORING_OP_SEND:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006073 ret = io_send(req, issue_flags);
Jens Axboeb76da702019-11-20 13:05:32 -07006074 break;
6075 case IORING_OP_RECVMSG:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006076 ret = io_recvmsg(req, issue_flags);
Pavel Begunkov062d04d2020-10-10 18:34:12 +01006077 break;
Jens Axboefddafac2020-01-04 20:19:44 -07006078 case IORING_OP_RECV:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006079 ret = io_recv(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006080 break;
6081 case IORING_OP_TIMEOUT:
Pavel Begunkov61e98202021-02-10 00:03:08 +00006082 ret = io_timeout(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006083 break;
6084 case IORING_OP_TIMEOUT_REMOVE:
Pavel Begunkov61e98202021-02-10 00:03:08 +00006085 ret = io_timeout_remove(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006086 break;
6087 case IORING_OP_ACCEPT:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006088 ret = io_accept(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006089 break;
6090 case IORING_OP_CONNECT:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006091 ret = io_connect(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006092 break;
6093 case IORING_OP_ASYNC_CANCEL:
Pavel Begunkov61e98202021-02-10 00:03:08 +00006094 ret = io_async_cancel(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006095 break;
Jens Axboed63d1b52019-12-10 10:38:56 -07006096 case IORING_OP_FALLOCATE:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006097 ret = io_fallocate(req, issue_flags);
Jens Axboed63d1b52019-12-10 10:38:56 -07006098 break;
Jens Axboe15b71ab2019-12-11 11:20:36 -07006099 case IORING_OP_OPENAT:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006100 ret = io_openat(req, issue_flags);
Jens Axboe15b71ab2019-12-11 11:20:36 -07006101 break;
Jens Axboeb5dba592019-12-11 14:02:38 -07006102 case IORING_OP_CLOSE:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006103 ret = io_close(req, issue_flags);
Jens Axboeb5dba592019-12-11 14:02:38 -07006104 break;
Jens Axboe05f3fb32019-12-09 11:22:50 -07006105 case IORING_OP_FILES_UPDATE:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006106 ret = io_files_update(req, issue_flags);
Jens Axboe05f3fb32019-12-09 11:22:50 -07006107 break;
Jens Axboeeddc7ef2019-12-13 21:18:10 -07006108 case IORING_OP_STATX:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006109 ret = io_statx(req, issue_flags);
Jens Axboeeddc7ef2019-12-13 21:18:10 -07006110 break;
Jens Axboe4840e412019-12-25 22:03:45 -07006111 case IORING_OP_FADVISE:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006112 ret = io_fadvise(req, issue_flags);
Jens Axboe4840e412019-12-25 22:03:45 -07006113 break;
Jens Axboec1ca7572019-12-25 22:18:28 -07006114 case IORING_OP_MADVISE:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006115 ret = io_madvise(req, issue_flags);
Jens Axboec1ca7572019-12-25 22:18:28 -07006116 break;
Jens Axboecebdb982020-01-08 17:59:24 -07006117 case IORING_OP_OPENAT2:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006118 ret = io_openat2(req, issue_flags);
Jens Axboecebdb982020-01-08 17:59:24 -07006119 break;
Jens Axboe3e4827b2020-01-08 15:18:09 -07006120 case IORING_OP_EPOLL_CTL:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006121 ret = io_epoll_ctl(req, issue_flags);
Jens Axboe3e4827b2020-01-08 15:18:09 -07006122 break;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03006123 case IORING_OP_SPLICE:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006124 ret = io_splice(req, issue_flags);
Pavel Begunkov7d67af22020-02-24 11:32:45 +03006125 break;
Jens Axboeddf0322d2020-02-23 16:41:33 -07006126 case IORING_OP_PROVIDE_BUFFERS:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006127 ret = io_provide_buffers(req, issue_flags);
Jens Axboeddf0322d2020-02-23 16:41:33 -07006128 break;
Jens Axboe067524e2020-03-02 16:32:28 -07006129 case IORING_OP_REMOVE_BUFFERS:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006130 ret = io_remove_buffers(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006131 break;
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03006132 case IORING_OP_TEE:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006133 ret = io_tee(req, issue_flags);
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03006134 break;
Jens Axboe36f4fa62020-09-05 11:14:22 -06006135 case IORING_OP_SHUTDOWN:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006136 ret = io_shutdown(req, issue_flags);
Jens Axboe36f4fa62020-09-05 11:14:22 -06006137 break;
Jens Axboe80a261f2020-09-28 14:23:58 -06006138 case IORING_OP_RENAMEAT:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006139 ret = io_renameat(req, issue_flags);
Jens Axboe80a261f2020-09-28 14:23:58 -06006140 break;
Jens Axboe14a11432020-09-28 14:27:37 -06006141 case IORING_OP_UNLINKAT:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006142 ret = io_unlinkat(req, issue_flags);
Jens Axboe14a11432020-09-28 14:27:37 -06006143 break;
Jens Axboe2b188cc2019-01-07 10:46:33 -07006144 default:
6145 ret = -EINVAL;
6146 break;
Jens Axboe31b51512019-01-18 22:56:34 -07006147 }
6148
Jens Axboe5730b272021-02-27 15:57:30 -07006149 if (creds)
6150 revert_creds(creds);
6151
Jens Axboe2b188cc2019-01-07 10:46:33 -07006152 if (ret)
6153 return ret;
6154
Jens Axboeb5325762020-05-19 21:20:27 -06006155 /* If the op doesn't have a file, we're not polling for it */
6156 if ((ctx->flags & IORING_SETUP_IOPOLL) && req->file) {
Jens Axboe11ba8202020-01-15 21:51:17 -07006157 const bool in_async = io_wq_current_is_worker();
6158
Jens Axboe11ba8202020-01-15 21:51:17 -07006159 /* workqueue context doesn't hold uring_lock, grab it now */
6160 if (in_async)
6161 mutex_lock(&ctx->uring_lock);
6162
Xiaoguang Wang2e9dbe92020-11-13 00:44:08 +08006163 io_iopoll_req_issued(req, in_async);
Jens Axboe11ba8202020-01-15 21:51:17 -07006164
6165 if (in_async)
6166 mutex_unlock(&ctx->uring_lock);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006167 }
6168
6169 return 0;
6170}
6171
Pavel Begunkov5280f7e2021-02-04 13:52:08 +00006172static void io_wq_submit_work(struct io_wq_work *work)
Pavel Begunkovd4c81f32020-06-08 21:08:19 +03006173{
Jens Axboe2b188cc2019-01-07 10:46:33 -07006174 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
Pavel Begunkov6df1db62020-07-03 22:15:06 +03006175 struct io_kiocb *timeout;
Jens Axboe561fb042019-10-24 07:25:42 -06006176 int ret = 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07006177
Pavel Begunkov6df1db62020-07-03 22:15:06 +03006178 timeout = io_prep_linked_timeout(req);
6179 if (timeout)
6180 io_queue_linked_timeout(timeout);
Pavel Begunkovd4c81f32020-06-08 21:08:19 +03006181
Jens Axboe4014d942021-01-19 15:53:54 -07006182 if (work->flags & IO_WQ_WORK_CANCEL)
Jens Axboe561fb042019-10-24 07:25:42 -06006183 ret = -ECANCELED;
Jens Axboe31b51512019-01-18 22:56:34 -07006184
Jens Axboe561fb042019-10-24 07:25:42 -06006185 if (!ret) {
Jens Axboe561fb042019-10-24 07:25:42 -06006186 do {
Pavel Begunkov889fca72021-02-10 00:03:09 +00006187 ret = io_issue_sqe(req, 0);
Jens Axboe561fb042019-10-24 07:25:42 -06006188 /*
6189 * We can get EAGAIN for polled IO even though we're
6190 * forcing a sync submission from here, since we can't
6191 * wait for request slots on the block side.
6192 */
6193 if (ret != -EAGAIN)
6194 break;
6195 cond_resched();
6196 } while (1);
6197 }
Jens Axboe31b51512019-01-18 22:56:34 -07006198
Pavel Begunkova3df76982021-02-18 22:32:52 +00006199 /* avoid locking problems by failing it from a clean context */
Jens Axboe561fb042019-10-24 07:25:42 -06006200 if (ret) {
Pavel Begunkova3df76982021-02-18 22:32:52 +00006201 /* io-wq is going to take one down */
6202 refcount_inc(&req->refs);
6203 io_req_task_queue_fail(req, ret);
Jens Axboeedafcce2019-01-09 09:16:05 -07006204 }
Jens Axboe31b51512019-01-18 22:56:34 -07006205}
Jens Axboe2b188cc2019-01-07 10:46:33 -07006206
Jens Axboe65e19f52019-10-26 07:20:21 -06006207static inline struct file *io_file_from_index(struct io_ring_ctx *ctx,
6208 int index)
Jens Axboe09bb8392019-03-13 12:39:28 -06006209{
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00006210 struct fixed_rsrc_table *table;
Jens Axboe65e19f52019-10-26 07:20:21 -06006211
Jens Axboe05f3fb32019-12-09 11:22:50 -07006212 table = &ctx->file_data->table[index >> IORING_FILE_TABLE_SHIFT];
Xiaoming Ni84695082020-05-11 19:25:43 +08006213 return table->files[index & IORING_FILE_TABLE_MASK];
Jens Axboe65e19f52019-10-26 07:20:21 -06006214}
6215
Pavel Begunkov8371adf2020-10-10 18:34:08 +01006216static struct file *io_file_get(struct io_submit_state *state,
6217 struct io_kiocb *req, int fd, bool fixed)
Pavel Begunkov8da11c12020-02-24 11:32:44 +03006218{
6219 struct io_ring_ctx *ctx = req->ctx;
6220 struct file *file;
6221
6222 if (fixed) {
Pavel Begunkov479f5172020-10-10 18:34:07 +01006223 if (unlikely((unsigned int)fd >= ctx->nr_user_files))
Pavel Begunkov8371adf2020-10-10 18:34:08 +01006224 return NULL;
Pavel Begunkov8da11c12020-02-24 11:32:44 +03006225 fd = array_index_nospec(fd, ctx->nr_user_files);
6226 file = io_file_from_index(ctx, fd);
Pavel Begunkov36f72fe2020-11-18 19:57:26 +00006227 io_set_resource_node(req);
Pavel Begunkov8da11c12020-02-24 11:32:44 +03006228 } else {
6229 trace_io_uring_file_get(ctx, fd);
6230 file = __io_file_get(state, fd);
Pavel Begunkov8da11c12020-02-24 11:32:44 +03006231 }
6232
Pavel Begunkovce3d5aa2021-02-01 18:59:55 +00006233 if (file && unlikely(file->f_op == &io_uring_fops))
6234 io_req_track_inflight(req);
Pavel Begunkov8371adf2020-10-10 18:34:08 +01006235 return file;
Pavel Begunkov8da11c12020-02-24 11:32:44 +03006236}
6237
Jens Axboe2665abf2019-11-05 12:40:47 -07006238static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
6239{
Jens Axboead8a48a2019-11-15 08:49:11 -07006240 struct io_timeout_data *data = container_of(timer,
6241 struct io_timeout_data, timer);
Pavel Begunkov90cd7e42020-10-27 23:25:36 +00006242 struct io_kiocb *prev, *req = data->req;
Jens Axboe2665abf2019-11-05 12:40:47 -07006243 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe2665abf2019-11-05 12:40:47 -07006244 unsigned long flags;
Jens Axboe2665abf2019-11-05 12:40:47 -07006245
6246 spin_lock_irqsave(&ctx->completion_lock, flags);
Pavel Begunkov90cd7e42020-10-27 23:25:36 +00006247 prev = req->timeout.head;
6248 req->timeout.head = NULL;
Jens Axboe2665abf2019-11-05 12:40:47 -07006249
6250 /*
6251 * We don't expect the list to be empty, that will only happen if we
6252 * race with the completion of the linked work.
6253 */
Pavel Begunkov90cd7e42020-10-27 23:25:36 +00006254 if (prev && refcount_inc_not_zero(&prev->refs))
Pavel Begunkovf2f87372020-10-27 23:25:37 +00006255 io_remove_next_linked(prev);
Pavel Begunkov90cd7e42020-10-27 23:25:36 +00006256 else
6257 prev = NULL;
Jens Axboe2665abf2019-11-05 12:40:47 -07006258 spin_unlock_irqrestore(&ctx->completion_lock, flags);
6259
6260 if (prev) {
Pavel Begunkov014db002020-03-03 21:33:12 +03006261 io_async_find_and_cancel(ctx, req, prev->user_data, -ETIME);
Pavel Begunkov9ae1f8d2021-02-01 18:59:51 +00006262 io_put_req_deferred(prev, 1);
Jens Axboe47f46762019-11-09 17:43:02 -07006263 } else {
Pavel Begunkov9ae1f8d2021-02-01 18:59:51 +00006264 io_req_complete_post(req, -ETIME, 0);
6265 io_put_req_deferred(req, 1);
Jens Axboe2665abf2019-11-05 12:40:47 -07006266 }
Jens Axboe2665abf2019-11-05 12:40:47 -07006267 return HRTIMER_NORESTART;
6268}
6269
Jens Axboe7271ef32020-08-10 09:55:22 -06006270static void __io_queue_linked_timeout(struct io_kiocb *req)
Jens Axboe2665abf2019-11-05 12:40:47 -07006271{
Jens Axboe76a46e02019-11-10 23:34:16 -07006272 /*
Pavel Begunkovf2f87372020-10-27 23:25:37 +00006273 * If the back reference is NULL, then our linked request finished
6274 * before we got a chance to setup the timer
Jens Axboe76a46e02019-11-10 23:34:16 -07006275 */
Pavel Begunkov90cd7e42020-10-27 23:25:36 +00006276 if (req->timeout.head) {
Jens Axboee8c2bc12020-08-15 18:44:09 -07006277 struct io_timeout_data *data = req->async_data;
Jens Axboe94ae5e72019-11-14 19:39:52 -07006278
Jens Axboead8a48a2019-11-15 08:49:11 -07006279 data->timer.function = io_link_timeout_fn;
6280 hrtimer_start(&data->timer, timespec64_to_ktime(data->ts),
6281 data->mode);
Jens Axboe2665abf2019-11-05 12:40:47 -07006282 }
Jens Axboe7271ef32020-08-10 09:55:22 -06006283}
6284
6285static void io_queue_linked_timeout(struct io_kiocb *req)
6286{
6287 struct io_ring_ctx *ctx = req->ctx;
6288
6289 spin_lock_irq(&ctx->completion_lock);
6290 __io_queue_linked_timeout(req);
Jens Axboe76a46e02019-11-10 23:34:16 -07006291 spin_unlock_irq(&ctx->completion_lock);
Jens Axboe2665abf2019-11-05 12:40:47 -07006292
Jens Axboe2665abf2019-11-05 12:40:47 -07006293 /* drop submission reference */
Jens Axboe76a46e02019-11-10 23:34:16 -07006294 io_put_req(req);
Jens Axboe2665abf2019-11-05 12:40:47 -07006295}
6296
Jens Axboead8a48a2019-11-15 08:49:11 -07006297static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req)
Jens Axboe2665abf2019-11-05 12:40:47 -07006298{
Pavel Begunkovf2f87372020-10-27 23:25:37 +00006299 struct io_kiocb *nxt = req->link;
Jens Axboe2b188cc2019-01-07 10:46:33 -07006300
Pavel Begunkovf2f87372020-10-27 23:25:37 +00006301 if (!nxt || (req->flags & REQ_F_LINK_TIMEOUT) ||
6302 nxt->opcode != IORING_OP_LINK_TIMEOUT)
Jens Axboed7718a92020-02-14 22:23:12 -07006303 return NULL;
Jens Axboe2665abf2019-11-05 12:40:47 -07006304
Pavel Begunkov90cd7e42020-10-27 23:25:36 +00006305 nxt->timeout.head = req;
Pavel Begunkov900fad42020-10-19 16:39:16 +01006306 nxt->flags |= REQ_F_LTIMEOUT_ACTIVE;
Jens Axboe76a46e02019-11-10 23:34:16 -07006307 req->flags |= REQ_F_LINK_TIMEOUT;
Jens Axboe76a46e02019-11-10 23:34:16 -07006308 return nxt;
Jens Axboe2665abf2019-11-05 12:40:47 -07006309}
6310
Pavel Begunkovc5eef2b2021-02-10 00:03:22 +00006311static void __io_queue_sqe(struct io_kiocb *req)
Jens Axboe2b188cc2019-01-07 10:46:33 -07006312{
Pavel Begunkovd3d72982021-02-12 03:23:51 +00006313 struct io_kiocb *linked_timeout = io_prep_linked_timeout(req);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006314 int ret;
6315
Pavel Begunkovc5eef2b2021-02-10 00:03:22 +00006316 ret = io_issue_sqe(req, IO_URING_F_NONBLOCK|IO_URING_F_COMPLETE_DEFER);
Jens Axboe491381ce2019-10-17 09:20:46 -06006317
6318 /*
6319 * We async punt it if the file wasn't marked NOWAIT, or if the file
6320 * doesn't support non-blocking read/write attempts
6321 */
Pavel Begunkov24c74672020-06-21 13:09:51 +03006322 if (ret == -EAGAIN && !(req->flags & REQ_F_NOWAIT)) {
Pavel Begunkovf063c542020-07-25 14:41:59 +03006323 if (!io_arm_poll_handler(req)) {
Pavel Begunkovf063c542020-07-25 14:41:59 +03006324 /*
6325 * Queued up for async execution, worker will release
6326 * submit reference when the iocb is actually submitted.
6327 */
6328 io_queue_async_work(req);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006329 }
Pavel Begunkov0d63c142020-10-22 16:47:18 +01006330 } else if (likely(!ret)) {
6331 /* drop submission reference */
Pavel Begunkove342c802021-01-19 13:32:47 +00006332 if (req->flags & REQ_F_COMPLETE_INLINE) {
Pavel Begunkovc5eef2b2021-02-10 00:03:22 +00006333 struct io_ring_ctx *ctx = req->ctx;
6334 struct io_comp_state *cs = &ctx->submit_state.comp;
Jens Axboee65ef562019-03-12 10:16:44 -06006335
Pavel Begunkov6dd0be12021-02-10 00:03:13 +00006336 cs->reqs[cs->nr++] = req;
Pavel Begunkovd3d72982021-02-12 03:23:51 +00006337 if (cs->nr == ARRAY_SIZE(cs->reqs))
Pavel Begunkovc5eef2b2021-02-10 00:03:22 +00006338 io_submit_flush_completions(cs, ctx);
Pavel Begunkov9affd662021-01-19 13:32:46 +00006339 } else {
Pavel Begunkovd3d72982021-02-12 03:23:51 +00006340 io_put_req(req);
Pavel Begunkov0d63c142020-10-22 16:47:18 +01006341 }
6342 } else {
Jens Axboe4e88d6e2019-12-07 20:59:47 -07006343 req_set_fail_links(req);
Jens Axboee65ef562019-03-12 10:16:44 -06006344 io_put_req(req);
Pavel Begunkov652532a2020-07-03 22:15:07 +03006345 io_req_complete(req, ret);
Jens Axboe9e645e112019-05-10 16:07:28 -06006346 }
Pavel Begunkovd3d72982021-02-12 03:23:51 +00006347 if (linked_timeout)
6348 io_queue_linked_timeout(linked_timeout);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006349}
6350
Pavel Begunkovbe7053b2021-02-18 18:29:45 +00006351static void io_queue_sqe(struct io_kiocb *req)
Jackie Liu4fe2c962019-09-09 20:50:40 +08006352{
6353 int ret;
6354
Pavel Begunkovbe7053b2021-02-18 18:29:45 +00006355 ret = io_req_defer(req);
Jackie Liu4fe2c962019-09-09 20:50:40 +08006356 if (ret) {
6357 if (ret != -EIOCBQUEUED) {
Pavel Begunkov11185912020-01-22 23:09:35 +03006358fail_req:
Jens Axboe4e88d6e2019-12-07 20:59:47 -07006359 req_set_fail_links(req);
Jens Axboee1e16092020-06-22 09:17:17 -06006360 io_put_req(req);
6361 io_req_complete(req, ret);
Jackie Liu4fe2c962019-09-09 20:50:40 +08006362 }
Pavel Begunkov25508782019-12-30 21:24:47 +03006363 } else if (req->flags & REQ_F_FORCE_ASYNC) {
Pavel Begunkovbe7053b2021-02-18 18:29:45 +00006364 ret = io_req_defer_prep(req);
6365 if (unlikely(ret))
6366 goto fail_req;
Jens Axboece35a472019-12-17 08:04:44 -07006367 io_queue_async_work(req);
6368 } else {
Pavel Begunkovc5eef2b2021-02-10 00:03:22 +00006369 __io_queue_sqe(req);
Jens Axboece35a472019-12-17 08:04:44 -07006370 }
Jackie Liu4fe2c962019-09-09 20:50:40 +08006371}
6372
Stefano Garzarella21b55db2020-08-27 16:58:30 +02006373/*
6374 * Check SQE restrictions (opcode and flags).
6375 *
6376 * Returns 'true' if SQE is allowed, 'false' otherwise.
6377 */
6378static inline bool io_check_restriction(struct io_ring_ctx *ctx,
6379 struct io_kiocb *req,
6380 unsigned int sqe_flags)
6381{
6382 if (!ctx->restricted)
6383 return true;
6384
6385 if (!test_bit(req->opcode, ctx->restrictions.sqe_op))
6386 return false;
6387
6388 if ((sqe_flags & ctx->restrictions.sqe_flags_required) !=
6389 ctx->restrictions.sqe_flags_required)
6390 return false;
6391
6392 if (sqe_flags & ~(ctx->restrictions.sqe_flags_allowed |
6393 ctx->restrictions.sqe_flags_required))
6394 return false;
6395
6396 return true;
6397}
6398
Pavel Begunkovef4ff582020-04-12 02:05:05 +03006399static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
Pavel Begunkov258b29a2021-02-10 00:03:10 +00006400 const struct io_uring_sqe *sqe)
Pavel Begunkov0553b8b2020-04-08 08:58:45 +03006401{
Pavel Begunkov258b29a2021-02-10 00:03:10 +00006402 struct io_submit_state *state;
Pavel Begunkovef4ff582020-04-12 02:05:05 +03006403 unsigned int sqe_flags;
Jens Axboe003e8dc2021-03-06 09:22:27 -07006404 int personality, ret = 0;
Pavel Begunkovef4ff582020-04-12 02:05:05 +03006405
Pavel Begunkov0553b8b2020-04-08 08:58:45 +03006406 req->opcode = READ_ONCE(sqe->opcode);
Pavel Begunkov5be9ad12021-02-12 18:41:17 +00006407 /* same numerical values with corresponding REQ_F_*, safe to copy */
6408 req->flags = sqe_flags = READ_ONCE(sqe->flags);
Pavel Begunkov0553b8b2020-04-08 08:58:45 +03006409 req->user_data = READ_ONCE(sqe->user_data);
Jens Axboee8c2bc12020-08-15 18:44:09 -07006410 req->async_data = NULL;
Pavel Begunkov0553b8b2020-04-08 08:58:45 +03006411 req->file = NULL;
6412 req->ctx = ctx;
Pavel Begunkovf2f87372020-10-27 23:25:37 +00006413 req->link = NULL;
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00006414 req->fixed_rsrc_refs = NULL;
Pavel Begunkov0553b8b2020-04-08 08:58:45 +03006415 /* one is dropped after submission, the other at completion */
6416 refcount_set(&req->refs, 2);
Pavel Begunkov4dd28242020-06-15 10:33:13 +03006417 req->task = current;
Pavel Begunkov0553b8b2020-04-08 08:58:45 +03006418 req->result = 0;
Jens Axboe93e68e02021-03-09 07:02:21 -07006419 req->work.list.next = NULL;
6420 req->work.creds = NULL;
6421 req->work.flags = 0;
Pavel Begunkovef4ff582020-04-12 02:05:05 +03006422
Pavel Begunkov5be9ad12021-02-12 18:41:17 +00006423 /* enforce forwards compatibility on users */
Pavel Begunkovebf4a5d2021-02-20 01:39:53 +00006424 if (unlikely(sqe_flags & ~SQE_VALID_FLAGS)) {
6425 req->flags = 0;
Pavel Begunkov5be9ad12021-02-12 18:41:17 +00006426 return -EINVAL;
Pavel Begunkovebf4a5d2021-02-20 01:39:53 +00006427 }
Pavel Begunkov5be9ad12021-02-12 18:41:17 +00006428
Pavel Begunkovef4ff582020-04-12 02:05:05 +03006429 if (unlikely(req->opcode >= IORING_OP_LAST))
6430 return -EINVAL;
6431
Stefano Garzarella21b55db2020-08-27 16:58:30 +02006432 if (unlikely(!io_check_restriction(ctx, req, sqe_flags)))
6433 return -EACCES;
6434
Pavel Begunkovef4ff582020-04-12 02:05:05 +03006435 if ((sqe_flags & IOSQE_BUFFER_SELECT) &&
6436 !io_op_defs[req->opcode].buffer_select)
6437 return -EOPNOTSUPP;
6438
Jens Axboe003e8dc2021-03-06 09:22:27 -07006439 personality = READ_ONCE(sqe->personality);
6440 if (personality) {
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +00006441 req->work.creds = xa_load(&ctx->personalities, personality);
Jens Axboe003e8dc2021-03-06 09:22:27 -07006442 if (!req->work.creds)
6443 return -EINVAL;
6444 get_cred(req->work.creds);
Jens Axboe003e8dc2021-03-06 09:22:27 -07006445 }
Pavel Begunkov258b29a2021-02-10 00:03:10 +00006446 state = &ctx->submit_state;
Pavel Begunkovef4ff582020-04-12 02:05:05 +03006447
Jens Axboe27926b62020-10-28 09:33:23 -06006448 /*
6449 * Plug now if we have more than 1 IO left after this, and the target
6450 * is potentially a read/write to block based storage.
6451 */
6452 if (!state->plug_started && state->ios_left > 1 &&
6453 io_op_defs[req->opcode].plug) {
6454 blk_start_plug(&state->plug);
6455 state->plug_started = true;
6456 }
Jens Axboe63ff8222020-05-07 14:56:15 -06006457
Pavel Begunkovbd5bbda2020-11-20 15:50:51 +00006458 if (io_op_defs[req->opcode].needs_file) {
6459 bool fixed = req->flags & REQ_F_FIXED_FILE;
Jens Axboe63ff8222020-05-07 14:56:15 -06006460
Pavel Begunkovbd5bbda2020-11-20 15:50:51 +00006461 req->file = io_file_get(state, req, READ_ONCE(sqe->fd), fixed);
Pavel Begunkovba13e232021-02-01 18:59:52 +00006462 if (unlikely(!req->file))
Pavel Begunkovbd5bbda2020-11-20 15:50:51 +00006463 ret = -EBADF;
6464 }
6465
Pavel Begunkov71b547c2020-10-10 18:34:09 +01006466 state->ios_left--;
6467 return ret;
Pavel Begunkov0553b8b2020-04-08 08:58:45 +03006468}
6469
Pavel Begunkova6b8cadc2021-02-18 18:29:41 +00006470static int io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
Pavel Begunkova1ab7b32021-02-18 18:29:42 +00006471 const struct io_uring_sqe *sqe)
Jens Axboe6c271ce2019-01-10 11:22:30 -07006472{
Pavel Begunkova1ab7b32021-02-18 18:29:42 +00006473 struct io_submit_link *link = &ctx->submit_state.link;
Jens Axboe6c271ce2019-01-10 11:22:30 -07006474 int ret;
6475
Pavel Begunkova6b8cadc2021-02-18 18:29:41 +00006476 ret = io_init_req(ctx, req, sqe);
6477 if (unlikely(ret)) {
6478fail_req:
Pavel Begunkovde59bc12021-02-18 18:29:47 +00006479 if (link->head) {
6480 /* fail even hard links since we don't submit */
Pavel Begunkovcf109602021-02-18 18:29:43 +00006481 link->head->flags |= REQ_F_FAIL_LINK;
Pavel Begunkovde59bc12021-02-18 18:29:47 +00006482 io_put_req(link->head);
6483 io_req_complete(link->head, -ECANCELED);
6484 link->head = NULL;
6485 }
Pavel Begunkov90b87492021-03-25 19:05:14 +00006486 io_put_req(req);
6487 io_req_complete(req, ret);
Pavel Begunkova6b8cadc2021-02-18 18:29:41 +00006488 return ret;
6489 }
Pavel Begunkovbe7053b2021-02-18 18:29:45 +00006490 ret = io_req_prep(req, sqe);
6491 if (unlikely(ret))
6492 goto fail_req;
Pavel Begunkova6b8cadc2021-02-18 18:29:41 +00006493
Pavel Begunkovbe7053b2021-02-18 18:29:45 +00006494 /* don't need @sqe from now on */
Pavel Begunkova6b8cadc2021-02-18 18:29:41 +00006495 trace_io_uring_submit_sqe(ctx, req->opcode, req->user_data,
6496 true, ctx->flags & IORING_SETUP_SQPOLL);
6497
Jens Axboe6c271ce2019-01-10 11:22:30 -07006498 /*
6499 * If we already have a head request, queue this one for async
6500 * submittal once the head completes. If we don't have a head but
6501 * IOSQE_IO_LINK is set in the sqe, start a new head. This one will be
6502 * submitted sync once the chain is complete. If none of those
6503 * conditions are true (normal request), then just queue it.
6504 */
6505 if (link->head) {
6506 struct io_kiocb *head = link->head;
6507
6508 /*
6509 * Taking sequential execution of a link, draining both sides
6510 * of the link also fullfils IOSQE_IO_DRAIN semantics for all
6511 * requests in the link. So, it drains the head and the
6512 * next after the link request. The last one is done via
6513 * drain_next flag to persist the effect across calls.
6514 */
6515 if (req->flags & REQ_F_IO_DRAIN) {
6516 head->flags |= REQ_F_IO_DRAIN;
6517 ctx->drain_next = 1;
6518 }
Pavel Begunkovbe7053b2021-02-18 18:29:45 +00006519 ret = io_req_defer_prep(req);
Pavel Begunkovcf109602021-02-18 18:29:43 +00006520 if (unlikely(ret))
Pavel Begunkova6b8cadc2021-02-18 18:29:41 +00006521 goto fail_req;
Jens Axboe6c271ce2019-01-10 11:22:30 -07006522 trace_io_uring_link(ctx, req, head);
6523 link->last->link = req;
6524 link->last = req;
6525
6526 /* last request of a link, enqueue the link */
6527 if (!(req->flags & (REQ_F_LINK | REQ_F_HARDLINK))) {
Pavel Begunkovde59bc12021-02-18 18:29:47 +00006528 io_queue_sqe(head);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006529 link->head = NULL;
6530 }
Jackie Liu4fe2c962019-09-09 20:50:40 +08006531 } else {
6532 if (unlikely(ctx->drain_next)) {
6533 req->flags |= REQ_F_IO_DRAIN;
6534 ctx->drain_next = 0;
6535 }
6536 if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) {
Jackie Liu4fe2c962019-09-09 20:50:40 +08006537 link->head = req;
6538 link->last = req;
6539 } else {
Pavel Begunkovbe7053b2021-02-18 18:29:45 +00006540 io_queue_sqe(req);
Jackie Liu4fe2c962019-09-09 20:50:40 +08006541 }
6542 }
6543
6544 return 0;
6545}
6546
6547/*
6548 * Batched submission is done, ensure local IO is flushed out.
6549 */
6550static void io_submit_state_end(struct io_submit_state *state,
6551 struct io_ring_ctx *ctx)
Pavel Begunkov1b4a51b2019-11-21 11:54:28 +03006552{
Pavel Begunkova1ab7b32021-02-18 18:29:42 +00006553 if (state->link.head)
Pavel Begunkovde59bc12021-02-18 18:29:47 +00006554 io_queue_sqe(state->link.head);
Jens Axboe3529d8c2019-12-19 18:24:38 -07006555 if (state->comp.nr)
Jens Axboe9e645e112019-05-10 16:07:28 -06006556 io_submit_flush_completions(&state->comp, ctx);
Jackie Liua197f662019-11-08 08:09:12 -07006557 if (state->plug_started)
Pavel Begunkov32fe5252019-12-17 22:26:58 +03006558 blk_finish_plug(&state->plug);
Jens Axboe75c6a032020-01-28 10:15:23 -07006559 io_state_file_put(state);
Jens Axboe9e645e112019-05-10 16:07:28 -06006560}
Pavel Begunkov32fe5252019-12-17 22:26:58 +03006561
Jens Axboe9e645e112019-05-10 16:07:28 -06006562/*
6563 * Start submission side cache.
Pavel Begunkov32fe5252019-12-17 22:26:58 +03006564 */
Jens Axboe9e645e112019-05-10 16:07:28 -06006565static void io_submit_state_start(struct io_submit_state *state,
Pavel Begunkov196be952019-11-07 01:41:06 +03006566 unsigned int max_ios)
Jens Axboe9e645e112019-05-10 16:07:28 -06006567{
6568 state->plug_started = false;
Jens Axboebcda7ba2020-02-23 16:42:51 -07006569 state->ios_left = max_ios;
Pavel Begunkova1ab7b32021-02-18 18:29:42 +00006570 /* set only head, no need to init link_last in advance */
6571 state->link.head = NULL;
Jens Axboe75c6a032020-01-28 10:15:23 -07006572}
6573
Jens Axboe193155c2020-02-22 23:22:19 -07006574static void io_commit_sqring(struct io_ring_ctx *ctx)
6575{
Jens Axboe75c6a032020-01-28 10:15:23 -07006576 struct io_rings *rings = ctx->rings;
6577
6578 /*
Jens Axboe193155c2020-02-22 23:22:19 -07006579 * Ensure any loads from the SQEs are done at this point,
Jens Axboe75c6a032020-01-28 10:15:23 -07006580 * since once we write the new head, the application could
6581 * write new data to them.
Pavel Begunkov6b47ee62020-01-18 20:22:41 +03006582 */
Pavel Begunkov8da11c12020-02-24 11:32:44 +03006583 smp_store_release(&rings->sq.head, ctx->cached_sq_head);
Jens Axboebcda7ba2020-02-23 16:42:51 -07006584}
6585
Jens Axboe9e645e112019-05-10 16:07:28 -06006586/*
Jens Axboe3529d8c2019-12-19 18:24:38 -07006587 * Fetch an sqe, if one is available. Note that sqe_ptr will point to memory
Jens Axboe9e645e112019-05-10 16:07:28 -06006588 * that is mapped by userspace. This means that care needs to be taken to
6589 * ensure that reads are stable, as we cannot rely on userspace always
Jens Axboe78e19bb2019-11-06 15:21:34 -07006590 * being a good citizen. If members of the sqe are validated and then later
6591 * used, it's important that those reads are done through READ_ONCE() to
Pavel Begunkov2e6e1fd2019-12-05 16:15:45 +03006592 * prevent a re-load down the line.
Jens Axboe9e645e112019-05-10 16:07:28 -06006593 */
6594static const struct io_uring_sqe *io_get_sqe(struct io_ring_ctx *ctx)
Jens Axboe9e645e112019-05-10 16:07:28 -06006595{
6596 u32 *sq_array = ctx->sq_array;
6597 unsigned head;
6598
6599 /*
6600 * The cached sq head (or cq tail) serves two purposes:
6601 *
6602 * 1) allows us to batch the cost of updating the user visible
Pavel Begunkov9d763772019-12-17 02:22:07 +03006603 * head updates.
Jens Axboe9e645e112019-05-10 16:07:28 -06006604 * 2) allows the kernel side to track the head on its own, even
Pavel Begunkov8cdf2192020-01-25 00:40:24 +03006605 * though the application is the one updating it.
6606 */
6607 head = READ_ONCE(sq_array[ctx->cached_sq_head++ & ctx->sq_mask]);
6608 if (likely(head < ctx->sq_entries))
6609 return &ctx->sq_sqes[head];
6610
6611 /* drop invalid entries */
Pavel Begunkov711be032020-01-17 03:57:59 +03006612 ctx->cached_sq_dropped++;
6613 WRITE_ONCE(ctx->rings->sq_dropped, ctx->cached_sq_dropped);
6614 return NULL;
6615}
Jens Axboeb7bb4f72019-12-15 22:13:43 -07006616
Jens Axboe0f212202020-09-13 13:09:39 -06006617static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr)
Jens Axboe6c271ce2019-01-10 11:22:30 -07006618{
Pavel Begunkov46c4e162021-02-18 18:29:37 +00006619 int submitted = 0;
Jens Axboe6c271ce2019-01-10 11:22:30 -07006620
Jens Axboec4a2ed72019-11-21 21:01:26 -07006621 /* if we have a backlog and couldn't flush it all, return BUSY */
Jens Axboead3eb2c2019-12-18 17:12:20 -07006622 if (test_bit(0, &ctx->sq_check_overflow)) {
Pavel Begunkov6c503152021-01-04 20:36:36 +00006623 if (!__io_cqring_overflow_flush(ctx, false, NULL, NULL))
Jens Axboead3eb2c2019-12-18 17:12:20 -07006624 return -EBUSY;
6625 }
Jens Axboe6c271ce2019-01-10 11:22:30 -07006626
Pavel Begunkovee7d46d2019-12-30 21:24:45 +03006627 /* make sure SQ entry isn't read before tail */
6628 nr = min3(nr, ctx->sq_entries, io_sqring_entries(ctx));
Pavel Begunkov9ef4f122019-12-30 21:24:44 +03006629
Pavel Begunkov2b85edf2019-12-28 14:13:03 +03006630 if (!percpu_ref_tryget_many(&ctx->refs, nr))
6631 return -EAGAIN;
Jens Axboe6c271ce2019-01-10 11:22:30 -07006632
Jens Axboed8a6df12020-10-15 16:24:45 -06006633 percpu_counter_add(&current->io_uring->inflight, nr);
Jens Axboefaf7b512020-10-07 12:48:53 -06006634 refcount_add(nr, &current->usage);
Pavel Begunkovba88ff12021-02-10 00:03:11 +00006635 io_submit_state_start(&ctx->submit_state, nr);
Pavel Begunkovb14cca02020-01-17 04:45:59 +03006636
Pavel Begunkov46c4e162021-02-18 18:29:37 +00006637 while (submitted < nr) {
Jens Axboe3529d8c2019-12-19 18:24:38 -07006638 const struct io_uring_sqe *sqe;
Pavel Begunkov196be952019-11-07 01:41:06 +03006639 struct io_kiocb *req;
Pavel Begunkovfb5ccc92019-10-25 12:31:30 +03006640
Pavel Begunkov258b29a2021-02-10 00:03:10 +00006641 req = io_alloc_req(ctx);
Pavel Begunkov196be952019-11-07 01:41:06 +03006642 if (unlikely(!req)) {
6643 if (!submitted)
6644 submitted = -EAGAIN;
Pavel Begunkovfb5ccc92019-10-25 12:31:30 +03006645 break;
Jens Axboe9e645e112019-05-10 16:07:28 -06006646 }
Pavel Begunkov4fccfcb2021-02-12 11:55:17 +00006647 sqe = io_get_sqe(ctx);
6648 if (unlikely(!sqe)) {
6649 kmem_cache_free(req_cachep, req);
6650 break;
6651 }
Jens Axboed3656342019-12-18 09:50:26 -07006652 /* will complete beyond this point, count as submitted */
6653 submitted++;
Pavel Begunkova1ab7b32021-02-18 18:29:42 +00006654 if (io_submit_sqe(ctx, req, sqe))
Jens Axboed3656342019-12-18 09:50:26 -07006655 break;
Jens Axboe6c271ce2019-01-10 11:22:30 -07006656 }
6657
Pavel Begunkov9466f432020-01-25 22:34:01 +03006658 if (unlikely(submitted != nr)) {
6659 int ref_used = (submitted == -EAGAIN) ? 0 : submitted;
Jens Axboed8a6df12020-10-15 16:24:45 -06006660 struct io_uring_task *tctx = current->io_uring;
6661 int unused = nr - ref_used;
Pavel Begunkov9466f432020-01-25 22:34:01 +03006662
Jens Axboed8a6df12020-10-15 16:24:45 -06006663 percpu_ref_put_many(&ctx->refs, unused);
6664 percpu_counter_sub(&tctx->inflight, unused);
6665 put_task_struct_many(current, unused);
Pavel Begunkov9466f432020-01-25 22:34:01 +03006666 }
Jens Axboe6c271ce2019-01-10 11:22:30 -07006667
Pavel Begunkova1ab7b32021-02-18 18:29:42 +00006668 io_submit_state_end(&ctx->submit_state, ctx);
Pavel Begunkovae9428c2019-11-06 00:22:14 +03006669 /* Commit SQ ring head once we've consumed and submitted all SQEs */
6670 io_commit_sqring(ctx);
6671
Jens Axboe6c271ce2019-01-10 11:22:30 -07006672 return submitted;
6673}
6674
Xiaoguang Wang23b36282020-07-23 20:57:24 +08006675static inline void io_ring_set_wakeup_flag(struct io_ring_ctx *ctx)
6676{
6677 /* Tell userspace we may need a wakeup call */
6678 spin_lock_irq(&ctx->completion_lock);
6679 ctx->rings->sq_flags |= IORING_SQ_NEED_WAKEUP;
6680 spin_unlock_irq(&ctx->completion_lock);
6681}
6682
6683static inline void io_ring_clear_wakeup_flag(struct io_ring_ctx *ctx)
6684{
6685 spin_lock_irq(&ctx->completion_lock);
6686 ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP;
6687 spin_unlock_irq(&ctx->completion_lock);
6688}
6689
Xiaoguang Wang08369242020-11-03 14:15:59 +08006690static int __io_sq_thread(struct io_ring_ctx *ctx, bool cap_entries)
Jens Axboe6c271ce2019-01-10 11:22:30 -07006691{
Jens Axboec8d1ba52020-09-14 11:07:26 -06006692 unsigned int to_submit;
Xiaoguang Wangbdcd3ea2020-02-25 22:12:08 +08006693 int ret = 0;
Jens Axboe6c271ce2019-01-10 11:22:30 -07006694
Jens Axboec8d1ba52020-09-14 11:07:26 -06006695 to_submit = io_sqring_entries(ctx);
Jens Axboee95eee22020-09-08 09:11:32 -06006696 /* if we're handling multiple rings, cap submit size for fairness */
6697 if (cap_entries && to_submit > 8)
6698 to_submit = 8;
6699
Xiaoguang Wang906a3c62020-11-12 14:56:00 +08006700 if (!list_empty(&ctx->iopoll_list) || to_submit) {
6701 unsigned nr_events = 0;
6702
Xiaoguang Wang08369242020-11-03 14:15:59 +08006703 mutex_lock(&ctx->uring_lock);
Xiaoguang Wang906a3c62020-11-12 14:56:00 +08006704 if (!list_empty(&ctx->iopoll_list))
6705 io_do_iopoll(ctx, &nr_events, 0);
6706
Pavel Begunkov0298ef92021-03-08 13:20:57 +00006707 if (to_submit && likely(!percpu_ref_is_dying(&ctx->refs)) &&
6708 !(ctx->flags & IORING_SETUP_R_DISABLED))
Xiaoguang Wang08369242020-11-03 14:15:59 +08006709 ret = io_submit_sqes(ctx, to_submit);
6710 mutex_unlock(&ctx->uring_lock);
6711 }
Jens Axboe90554202020-09-03 12:12:41 -06006712
6713 if (!io_sqring_full(ctx) && wq_has_sleeper(&ctx->sqo_sq_wait))
6714 wake_up(&ctx->sqo_sq_wait);
6715
Xiaoguang Wang08369242020-11-03 14:15:59 +08006716 return ret;
6717}
6718
6719static void io_sqd_update_thread_idle(struct io_sq_data *sqd)
6720{
6721 struct io_ring_ctx *ctx;
6722 unsigned sq_thread_idle = 0;
6723
6724 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) {
6725 if (sq_thread_idle < ctx->sq_thread_idle)
6726 sq_thread_idle = ctx->sq_thread_idle;
6727 }
6728
6729 sqd->sq_thread_idle = sq_thread_idle;
Jens Axboec8d1ba52020-09-14 11:07:26 -06006730}
6731
Jens Axboe6c271ce2019-01-10 11:22:30 -07006732static int io_sq_thread(void *data)
6733{
Jens Axboe69fb2132020-09-14 11:16:23 -06006734 struct io_sq_data *sqd = data;
6735 struct io_ring_ctx *ctx;
Xiaoguang Wanga0d92052020-11-12 14:55:59 +08006736 unsigned long timeout = 0;
Jens Axboe37d1e2e2021-02-17 21:03:43 -07006737 char buf[TASK_COMM_LEN];
Xiaoguang Wang08369242020-11-03 14:15:59 +08006738 DEFINE_WAIT(wait);
Jens Axboe6c271ce2019-01-10 11:22:30 -07006739
Pavel Begunkov696ee882021-04-01 09:55:04 +01006740 snprintf(buf, sizeof(buf), "iou-sqp-%d", sqd->task_pid);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07006741 set_task_comm(current, buf);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07006742 current->pf_io_worker = NULL;
Jens Axboe28cea78a2020-09-14 10:51:17 -06006743
Jens Axboe37d1e2e2021-02-17 21:03:43 -07006744 if (sqd->sq_cpu != -1)
6745 set_cpus_allowed_ptr(current, cpumask_of(sqd->sq_cpu));
6746 else
6747 set_cpus_allowed_ptr(current, cpu_online_mask);
6748 current->flags |= PF_NO_SETAFFINITY;
6749
Pavel Begunkov09a6f4e2021-03-14 20:57:10 +00006750 mutex_lock(&sqd->lock);
Jens Axboe05962f92021-03-06 13:58:48 -07006751 while (!test_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state)) {
Xiaoguang Wang08369242020-11-03 14:15:59 +08006752 int ret;
6753 bool cap_entries, sqt_spin, needs_sched;
Jens Axboec1edbf52019-11-10 16:56:04 -07006754
Jens Axboe82734c52021-03-29 06:52:44 -06006755 if (test_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state) ||
6756 signal_pending(current)) {
6757 bool did_sig = false;
6758
Pavel Begunkov09a6f4e2021-03-14 20:57:10 +00006759 mutex_unlock(&sqd->lock);
Jens Axboe82734c52021-03-29 06:52:44 -06006760 if (signal_pending(current)) {
6761 struct ksignal ksig;
6762
6763 did_sig = get_signal(&ksig);
6764 }
Jens Axboe05962f92021-03-06 13:58:48 -07006765 cond_resched();
Pavel Begunkov09a6f4e2021-03-14 20:57:10 +00006766 mutex_lock(&sqd->lock);
Jens Axboe82734c52021-03-29 06:52:44 -06006767 if (did_sig)
6768 break;
Pavel Begunkov521d6a72021-03-11 23:29:38 +00006769 io_run_task_work();
Pavel Begunkovb7f5a0b2021-03-15 14:23:08 +00006770 io_run_task_work_head(&sqd->park_task_work);
Xiaoguang Wang08369242020-11-03 14:15:59 +08006771 timeout = jiffies + sqd->sq_thread_idle;
Pavel Begunkov7d41e852021-03-10 13:13:54 +00006772 continue;
Xiaoguang Wang08369242020-11-03 14:15:59 +08006773 }
Xiaoguang Wang08369242020-11-03 14:15:59 +08006774 sqt_spin = false;
Jens Axboee95eee22020-09-08 09:11:32 -06006775 cap_entries = !list_is_singular(&sqd->ctx_list);
Jens Axboe69fb2132020-09-14 11:16:23 -06006776 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) {
Stefan Metzmacher7c30f36a2021-03-07 11:54:28 +01006777 const struct cred *creds = NULL;
6778
6779 if (ctx->sq_creds != current_cred())
6780 creds = override_creds(ctx->sq_creds);
Xiaoguang Wang08369242020-11-03 14:15:59 +08006781 ret = __io_sq_thread(ctx, cap_entries);
Stefan Metzmacher7c30f36a2021-03-07 11:54:28 +01006782 if (creds)
6783 revert_creds(creds);
Xiaoguang Wang08369242020-11-03 14:15:59 +08006784 if (!sqt_spin && (ret > 0 || !list_empty(&ctx->iopoll_list)))
6785 sqt_spin = true;
Jens Axboe6c271ce2019-01-10 11:22:30 -07006786 }
6787
Xiaoguang Wang08369242020-11-03 14:15:59 +08006788 if (sqt_spin || !time_after(jiffies, timeout)) {
Jens Axboec8d1ba52020-09-14 11:07:26 -06006789 io_run_task_work();
6790 cond_resched();
Xiaoguang Wang08369242020-11-03 14:15:59 +08006791 if (sqt_spin)
6792 timeout = jiffies + sqd->sq_thread_idle;
6793 continue;
6794 }
6795
Xiaoguang Wang08369242020-11-03 14:15:59 +08006796 needs_sched = true;
6797 prepare_to_wait(&sqd->wait, &wait, TASK_INTERRUPTIBLE);
6798 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) {
6799 if ((ctx->flags & IORING_SETUP_IOPOLL) &&
6800 !list_empty_careful(&ctx->iopoll_list)) {
6801 needs_sched = false;
6802 break;
6803 }
6804 if (io_sqring_entries(ctx)) {
6805 needs_sched = false;
6806 break;
6807 }
6808 }
6809
Jens Axboe05962f92021-03-06 13:58:48 -07006810 if (needs_sched && !test_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state)) {
Jens Axboe69fb2132020-09-14 11:16:23 -06006811 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
6812 io_ring_set_wakeup_flag(ctx);
Xiaoguang Wang08369242020-11-03 14:15:59 +08006813
Pavel Begunkov09a6f4e2021-03-14 20:57:10 +00006814 mutex_unlock(&sqd->lock);
Jens Axboe69fb2132020-09-14 11:16:23 -06006815 schedule();
Pavel Begunkov09a6f4e2021-03-14 20:57:10 +00006816 mutex_lock(&sqd->lock);
Jens Axboe69fb2132020-09-14 11:16:23 -06006817 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
6818 io_ring_clear_wakeup_flag(ctx);
Jens Axboe6c271ce2019-01-10 11:22:30 -07006819 }
Xiaoguang Wang08369242020-11-03 14:15:59 +08006820
6821 finish_wait(&sqd->wait, &wait);
Pavel Begunkovb7f5a0b2021-03-15 14:23:08 +00006822 io_run_task_work_head(&sqd->park_task_work);
Xiaoguang Wang08369242020-11-03 14:15:59 +08006823 timeout = jiffies + sqd->sq_thread_idle;
Jens Axboe6c271ce2019-01-10 11:22:30 -07006824 }
6825
Jens Axboe37d1e2e2021-02-17 21:03:43 -07006826 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
6827 io_uring_cancel_sqpoll(ctx);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07006828 sqd->thread = NULL;
Jens Axboe05962f92021-03-06 13:58:48 -07006829 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
Jens Axboe5f3f26f2021-02-25 10:17:46 -07006830 io_ring_set_wakeup_flag(ctx);
Pavel Begunkov09a6f4e2021-03-14 20:57:10 +00006831 mutex_unlock(&sqd->lock);
Pavel Begunkov521d6a72021-03-11 23:29:38 +00006832
6833 io_run_task_work();
Pavel Begunkovb7f5a0b2021-03-15 14:23:08 +00006834 io_run_task_work_head(&sqd->park_task_work);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07006835 complete(&sqd->exited);
6836 do_exit(0);
Jens Axboe6c271ce2019-01-10 11:22:30 -07006837}
6838
Jens Axboebda52162019-09-24 13:47:15 -06006839struct io_wait_queue {
6840 struct wait_queue_entry wq;
6841 struct io_ring_ctx *ctx;
6842 unsigned to_wait;
6843 unsigned nr_timeouts;
6844};
6845
Pavel Begunkov6c503152021-01-04 20:36:36 +00006846static inline bool io_should_wake(struct io_wait_queue *iowq)
Jens Axboebda52162019-09-24 13:47:15 -06006847{
6848 struct io_ring_ctx *ctx = iowq->ctx;
6849
6850 /*
Brian Gianforcarod195a662019-12-13 03:09:50 -08006851 * Wake up if we have enough events, or if a timeout occurred since we
Jens Axboebda52162019-09-24 13:47:15 -06006852 * started waiting. For timeouts, we always want to return to userspace,
6853 * regardless of event count.
6854 */
Pavel Begunkov6c503152021-01-04 20:36:36 +00006855 return io_cqring_events(ctx) >= iowq->to_wait ||
Jens Axboebda52162019-09-24 13:47:15 -06006856 atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts;
6857}
6858
6859static int io_wake_function(struct wait_queue_entry *curr, unsigned int mode,
6860 int wake_flags, void *key)
6861{
6862 struct io_wait_queue *iowq = container_of(curr, struct io_wait_queue,
6863 wq);
6864
Pavel Begunkov6c503152021-01-04 20:36:36 +00006865 /*
6866 * Cannot safely flush overflowed CQEs from here, ensure we wake up
6867 * the task, and the next invocation will do it.
6868 */
6869 if (io_should_wake(iowq) || test_bit(0, &iowq->ctx->cq_check_overflow))
6870 return autoremove_wake_function(curr, mode, wake_flags, key);
6871 return -1;
Jens Axboebda52162019-09-24 13:47:15 -06006872}
6873
Jens Axboeaf9c1a42020-09-24 13:32:18 -06006874static int io_run_task_work_sig(void)
6875{
6876 if (io_run_task_work())
6877 return 1;
6878 if (!signal_pending(current))
6879 return 0;
Jens Axboe0b8cfa92021-03-21 14:16:08 -06006880 if (test_thread_flag(TIF_NOTIFY_SIGNAL))
Jens Axboe792ee0f62020-10-22 20:17:18 -06006881 return -ERESTARTSYS;
Jens Axboeaf9c1a42020-09-24 13:32:18 -06006882 return -EINTR;
6883}
6884
Pavel Begunkoveeb60b92021-02-04 13:51:58 +00006885/* when returns >0, the caller should retry */
6886static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
6887 struct io_wait_queue *iowq,
6888 signed long *timeout)
6889{
6890 int ret;
6891
6892 /* make sure we run task_work before checking for signals */
6893 ret = io_run_task_work_sig();
6894 if (ret || io_should_wake(iowq))
6895 return ret;
6896 /* let the caller flush overflows, retry */
6897 if (test_bit(0, &ctx->cq_check_overflow))
6898 return 1;
6899
6900 *timeout = schedule_timeout(*timeout);
6901 return !*timeout ? -ETIME : 1;
6902}
6903
Jens Axboe2b188cc2019-01-07 10:46:33 -07006904/*
6905 * Wait until events become available, if we don't already have some. The
6906 * application must reap them itself, as they reside on the shared cq ring.
6907 */
6908static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
Hao Xuc73ebb62020-11-03 10:54:37 +08006909 const sigset_t __user *sig, size_t sigsz,
6910 struct __kernel_timespec __user *uts)
Jens Axboe2b188cc2019-01-07 10:46:33 -07006911{
Jens Axboebda52162019-09-24 13:47:15 -06006912 struct io_wait_queue iowq = {
6913 .wq = {
6914 .private = current,
6915 .func = io_wake_function,
6916 .entry = LIST_HEAD_INIT(iowq.wq.entry),
6917 },
6918 .ctx = ctx,
6919 .to_wait = min_events,
6920 };
Hristo Venev75b28af2019-08-26 17:23:46 +00006921 struct io_rings *rings = ctx->rings;
Pavel Begunkovc1d5a222021-02-04 13:51:57 +00006922 signed long timeout = MAX_SCHEDULE_TIMEOUT;
6923 int ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07006924
Jens Axboeb41e9852020-02-17 09:52:41 -07006925 do {
Pavel Begunkov6c503152021-01-04 20:36:36 +00006926 io_cqring_overflow_flush(ctx, false, NULL, NULL);
6927 if (io_cqring_events(ctx) >= min_events)
Jens Axboeb41e9852020-02-17 09:52:41 -07006928 return 0;
Jens Axboe4c6e2772020-07-01 11:29:10 -06006929 if (!io_run_task_work())
Jens Axboeb41e9852020-02-17 09:52:41 -07006930 break;
Jens Axboeb41e9852020-02-17 09:52:41 -07006931 } while (1);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006932
6933 if (sig) {
Arnd Bergmann9e75ad52019-03-25 15:34:53 +01006934#ifdef CONFIG_COMPAT
6935 if (in_compat_syscall())
6936 ret = set_compat_user_sigmask((const compat_sigset_t __user *)sig,
Oleg Nesterovb7724342019-07-16 16:29:53 -07006937 sigsz);
Arnd Bergmann9e75ad52019-03-25 15:34:53 +01006938 else
6939#endif
Oleg Nesterovb7724342019-07-16 16:29:53 -07006940 ret = set_user_sigmask(sig, sigsz);
Arnd Bergmann9e75ad52019-03-25 15:34:53 +01006941
Jens Axboe2b188cc2019-01-07 10:46:33 -07006942 if (ret)
6943 return ret;
6944 }
6945
Hao Xuc73ebb62020-11-03 10:54:37 +08006946 if (uts) {
Pavel Begunkovc1d5a222021-02-04 13:51:57 +00006947 struct timespec64 ts;
6948
Hao Xuc73ebb62020-11-03 10:54:37 +08006949 if (get_timespec64(&ts, uts))
6950 return -EFAULT;
6951 timeout = timespec64_to_jiffies(&ts);
6952 }
6953
Jens Axboebda52162019-09-24 13:47:15 -06006954 iowq.nr_timeouts = atomic_read(&ctx->cq_timeouts);
Dmitrii Dolgovc826bd72019-10-15 19:02:01 +02006955 trace_io_uring_cqring_wait(ctx, min_events);
Jens Axboebda52162019-09-24 13:47:15 -06006956 do {
Jens Axboeca0a2652021-03-04 17:15:48 -07006957 /* if we can't even flush overflow, don't wait for more */
6958 if (!io_cqring_overflow_flush(ctx, false, NULL, NULL)) {
6959 ret = -EBUSY;
6960 break;
6961 }
Jens Axboebda52162019-09-24 13:47:15 -06006962 prepare_to_wait_exclusive(&ctx->wait, &iowq.wq,
6963 TASK_INTERRUPTIBLE);
Pavel Begunkoveeb60b92021-02-04 13:51:58 +00006964 ret = io_cqring_wait_schedule(ctx, &iowq, &timeout);
6965 finish_wait(&ctx->wait, &iowq.wq);
Jens Axboeca0a2652021-03-04 17:15:48 -07006966 cond_resched();
Pavel Begunkoveeb60b92021-02-04 13:51:58 +00006967 } while (ret > 0);
Jens Axboebda52162019-09-24 13:47:15 -06006968
Jens Axboeb7db41c2020-07-04 08:55:50 -06006969 restore_saved_sigmask_unless(ret == -EINTR);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006970
Hristo Venev75b28af2019-08-26 17:23:46 +00006971 return READ_ONCE(rings->cq.head) == READ_ONCE(rings->cq.tail) ? ret : 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07006972}
6973
Jens Axboe6b063142019-01-10 22:13:58 -07006974static void __io_sqe_files_unregister(struct io_ring_ctx *ctx)
6975{
6976#if defined(CONFIG_UNIX)
6977 if (ctx->ring_sock) {
6978 struct sock *sock = ctx->ring_sock->sk;
6979 struct sk_buff *skb;
6980
6981 while ((skb = skb_dequeue(&sock->sk_receive_queue)) != NULL)
6982 kfree_skb(skb);
6983 }
6984#else
6985 int i;
6986
Jens Axboe65e19f52019-10-26 07:20:21 -06006987 for (i = 0; i < ctx->nr_user_files; i++) {
6988 struct file *file;
6989
6990 file = io_file_from_index(ctx, i);
6991 if (file)
6992 fput(file);
6993 }
Jens Axboe6b063142019-01-10 22:13:58 -07006994#endif
6995}
6996
Bijan Mottahedeh00835dc2021-01-15 17:37:52 +00006997static void io_rsrc_data_ref_zero(struct percpu_ref *ref)
Jens Axboe05f3fb32019-12-09 11:22:50 -07006998{
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00006999 struct fixed_rsrc_data *data;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007000
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007001 data = container_of(ref, struct fixed_rsrc_data, refs);
Jens Axboe05f3fb32019-12-09 11:22:50 -07007002 complete(&data->done);
7003}
7004
Bijan Mottahedeh2a63b2d2021-01-15 17:37:47 +00007005static inline void io_rsrc_ref_lock(struct io_ring_ctx *ctx)
Pavel Begunkov1642b442020-12-30 21:34:14 +00007006{
Bijan Mottahedeh2a63b2d2021-01-15 17:37:47 +00007007 spin_lock_bh(&ctx->rsrc_ref_lock);
Pavel Begunkov1642b442020-12-30 21:34:14 +00007008}
7009
Bijan Mottahedeh2a63b2d2021-01-15 17:37:47 +00007010static inline void io_rsrc_ref_unlock(struct io_ring_ctx *ctx)
Jens Axboe6b063142019-01-10 22:13:58 -07007011{
Bijan Mottahedeh2a63b2d2021-01-15 17:37:47 +00007012 spin_unlock_bh(&ctx->rsrc_ref_lock);
7013}
7014
Bijan Mottahedehd67d2262021-01-15 17:37:46 +00007015static void io_sqe_rsrc_set_node(struct io_ring_ctx *ctx,
7016 struct fixed_rsrc_data *rsrc_data,
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007017 struct fixed_rsrc_ref_node *ref_node)
Jens Axboe6b063142019-01-10 22:13:58 -07007018{
Bijan Mottahedeh2a63b2d2021-01-15 17:37:47 +00007019 io_rsrc_ref_lock(ctx);
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007020 rsrc_data->node = ref_node;
Bijan Mottahedehd67d2262021-01-15 17:37:46 +00007021 list_add_tail(&ref_node->node, &ctx->rsrc_ref_list);
Bijan Mottahedeh2a63b2d2021-01-15 17:37:47 +00007022 io_rsrc_ref_unlock(ctx);
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007023 percpu_ref_get(&rsrc_data->refs);
Jens Axboe6b063142019-01-10 22:13:58 -07007024}
7025
Hao Xu8bad28d2021-02-19 17:19:36 +08007026static void io_sqe_rsrc_kill_node(struct io_ring_ctx *ctx, struct fixed_rsrc_data *data)
Jens Axboe6b063142019-01-10 22:13:58 -07007027{
Hao Xu8bad28d2021-02-19 17:19:36 +08007028 struct fixed_rsrc_ref_node *ref_node = NULL;
Jens Axboe65e19f52019-10-26 07:20:21 -06007029
Bijan Mottahedeh2a63b2d2021-01-15 17:37:47 +00007030 io_rsrc_ref_lock(ctx);
Pavel Begunkov1e5d7702020-11-18 14:56:25 +00007031 ref_node = data->node;
Pavel Begunkove6cb0072021-02-20 18:03:47 +00007032 data->node = NULL;
Bijan Mottahedeh2a63b2d2021-01-15 17:37:47 +00007033 io_rsrc_ref_unlock(ctx);
Xiaoguang Wang05589552020-03-31 14:05:18 +08007034 if (ref_node)
7035 percpu_ref_kill(&ref_node->refs);
Hao Xu8bad28d2021-02-19 17:19:36 +08007036}
Xiaoguang Wang05589552020-03-31 14:05:18 +08007037
Hao Xu8bad28d2021-02-19 17:19:36 +08007038static int io_rsrc_ref_quiesce(struct fixed_rsrc_data *data,
7039 struct io_ring_ctx *ctx,
Pavel Begunkovf2303b12021-02-20 18:03:49 +00007040 void (*rsrc_put)(struct io_ring_ctx *ctx,
7041 struct io_rsrc_put *prsrc))
Hao Xu8bad28d2021-02-19 17:19:36 +08007042{
Pavel Begunkovf2303b12021-02-20 18:03:49 +00007043 struct fixed_rsrc_ref_node *backup_node;
Hao Xu8bad28d2021-02-19 17:19:36 +08007044 int ret;
Xiaoguang Wang05589552020-03-31 14:05:18 +08007045
Hao Xu8bad28d2021-02-19 17:19:36 +08007046 if (data->quiesce)
7047 return -ENXIO;
7048
7049 data->quiesce = true;
Pavel Begunkov1ffc5422020-12-30 21:34:15 +00007050 do {
Pavel Begunkovf2303b12021-02-20 18:03:49 +00007051 ret = -ENOMEM;
7052 backup_node = alloc_fixed_rsrc_ref_node(ctx);
7053 if (!backup_node)
7054 break;
7055 backup_node->rsrc_data = data;
7056 backup_node->rsrc_put = rsrc_put;
7057
Hao Xu8bad28d2021-02-19 17:19:36 +08007058 io_sqe_rsrc_kill_node(ctx, data);
7059 percpu_ref_kill(&data->refs);
7060 flush_delayed_work(&ctx->rsrc_put_work);
7061
Pavel Begunkov1ffc5422020-12-30 21:34:15 +00007062 ret = wait_for_completion_interruptible(&data->done);
7063 if (!ret)
7064 break;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007065
Jens Axboecb5e1b82021-02-25 07:37:35 -07007066 percpu_ref_resurrect(&data->refs);
Hao Xu8bad28d2021-02-19 17:19:36 +08007067 io_sqe_rsrc_set_node(ctx, data, backup_node);
7068 backup_node = NULL;
Jens Axboecb5e1b82021-02-25 07:37:35 -07007069 reinit_completion(&data->done);
Hao Xu8bad28d2021-02-19 17:19:36 +08007070 mutex_unlock(&ctx->uring_lock);
7071 ret = io_run_task_work_sig();
7072 mutex_lock(&ctx->uring_lock);
Pavel Begunkovf2303b12021-02-20 18:03:49 +00007073 } while (ret >= 0);
Hao Xu8bad28d2021-02-19 17:19:36 +08007074 data->quiesce = false;
7075
7076 if (backup_node)
7077 destroy_fixed_rsrc_ref_node(backup_node);
7078 return ret;
Bijan Mottahedehd7954b22021-01-15 17:37:50 +00007079}
7080
Bijan Mottahedeh1ad555c2021-01-15 17:37:51 +00007081static struct fixed_rsrc_data *alloc_fixed_rsrc_data(struct io_ring_ctx *ctx)
7082{
7083 struct fixed_rsrc_data *data;
7084
7085 data = kzalloc(sizeof(*data), GFP_KERNEL);
7086 if (!data)
7087 return NULL;
7088
Bijan Mottahedeh00835dc2021-01-15 17:37:52 +00007089 if (percpu_ref_init(&data->refs, io_rsrc_data_ref_zero,
Bijan Mottahedeh1ad555c2021-01-15 17:37:51 +00007090 PERCPU_REF_ALLOW_REINIT, GFP_KERNEL)) {
7091 kfree(data);
7092 return NULL;
7093 }
7094 data->ctx = ctx;
7095 init_completion(&data->done);
7096 return data;
7097}
7098
7099static void free_fixed_rsrc_data(struct fixed_rsrc_data *data)
7100{
7101 percpu_ref_exit(&data->refs);
7102 kfree(data->table);
7103 kfree(data);
7104}
7105
Bijan Mottahedehd7954b22021-01-15 17:37:50 +00007106static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
7107{
7108 struct fixed_rsrc_data *data = ctx->file_data;
Bijan Mottahedehd7954b22021-01-15 17:37:50 +00007109 unsigned nr_tables, i;
7110 int ret;
7111
Hao Xu8bad28d2021-02-19 17:19:36 +08007112 /*
7113 * percpu_ref_is_dying() is to stop parallel files unregister
7114 * Since we possibly drop uring lock later in this function to
7115 * run task work.
7116 */
7117 if (!data || percpu_ref_is_dying(&data->refs))
Bijan Mottahedehd7954b22021-01-15 17:37:50 +00007118 return -ENXIO;
Pavel Begunkovf2303b12021-02-20 18:03:49 +00007119 ret = io_rsrc_ref_quiesce(data, ctx, io_ring_file_put);
Bijan Mottahedehd7954b22021-01-15 17:37:50 +00007120 if (ret)
7121 return ret;
7122
Jens Axboe6b063142019-01-10 22:13:58 -07007123 __io_sqe_files_unregister(ctx);
Jens Axboe65e19f52019-10-26 07:20:21 -06007124 nr_tables = DIV_ROUND_UP(ctx->nr_user_files, IORING_MAX_FILES_TABLE);
7125 for (i = 0; i < nr_tables; i++)
Jens Axboe05f3fb32019-12-09 11:22:50 -07007126 kfree(data->table[i].files);
Bijan Mottahedeh1ad555c2021-01-15 17:37:51 +00007127 free_fixed_rsrc_data(data);
Jens Axboe05f3fb32019-12-09 11:22:50 -07007128 ctx->file_data = NULL;
Jens Axboe6b063142019-01-10 22:13:58 -07007129 ctx->nr_user_files = 0;
7130 return 0;
7131}
7132
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007133static void io_sq_thread_unpark(struct io_sq_data *sqd)
Pavel Begunkov09a6f4e2021-03-14 20:57:10 +00007134 __releases(&sqd->lock)
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007135{
Pavel Begunkov521d6a72021-03-11 23:29:38 +00007136 WARN_ON_ONCE(sqd->thread == current);
7137
Pavel Begunkov9e138a42021-03-14 20:57:12 +00007138 /*
7139 * Do the dance but not conditional clear_bit() because it'd race with
7140 * other threads incrementing park_pending and setting the bit.
7141 */
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007142 clear_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state);
Pavel Begunkov9e138a42021-03-14 20:57:12 +00007143 if (atomic_dec_return(&sqd->park_pending))
7144 set_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state);
Pavel Begunkov09a6f4e2021-03-14 20:57:10 +00007145 mutex_unlock(&sqd->lock);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007146}
7147
Jens Axboe86e0d672021-03-05 08:44:39 -07007148static void io_sq_thread_park(struct io_sq_data *sqd)
Pavel Begunkov09a6f4e2021-03-14 20:57:10 +00007149 __acquires(&sqd->lock)
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007150{
Pavel Begunkov521d6a72021-03-11 23:29:38 +00007151 WARN_ON_ONCE(sqd->thread == current);
7152
Pavel Begunkov9e138a42021-03-14 20:57:12 +00007153 atomic_inc(&sqd->park_pending);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007154 set_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state);
Pavel Begunkov09a6f4e2021-03-14 20:57:10 +00007155 mutex_lock(&sqd->lock);
Jens Axboe05962f92021-03-06 13:58:48 -07007156 if (sqd->thread)
Jens Axboe86e0d672021-03-05 08:44:39 -07007157 wake_up_process(sqd->thread);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007158}
7159
7160static void io_sq_thread_stop(struct io_sq_data *sqd)
7161{
Pavel Begunkov521d6a72021-03-11 23:29:38 +00007162 WARN_ON_ONCE(sqd->thread == current);
7163
Pavel Begunkov09a6f4e2021-03-14 20:57:10 +00007164 mutex_lock(&sqd->lock);
Jens Axboe05962f92021-03-06 13:58:48 -07007165 set_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state);
Jens Axboee8f98f242021-03-09 16:32:13 -07007166 if (sqd->thread)
7167 wake_up_process(sqd->thread);
Pavel Begunkov09a6f4e2021-03-14 20:57:10 +00007168 mutex_unlock(&sqd->lock);
Jens Axboe05962f92021-03-06 13:58:48 -07007169 wait_for_completion(&sqd->exited);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007170}
7171
Jens Axboe534ca6d2020-09-02 13:52:19 -06007172static void io_put_sq_data(struct io_sq_data *sqd)
Jens Axboe6c271ce2019-01-10 11:22:30 -07007173{
Jens Axboe534ca6d2020-09-02 13:52:19 -06007174 if (refcount_dec_and_test(&sqd->refs)) {
Pavel Begunkov9e138a42021-03-14 20:57:12 +00007175 WARN_ON_ONCE(atomic_read(&sqd->park_pending));
7176
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007177 io_sq_thread_stop(sqd);
7178 kfree(sqd);
7179 }
7180}
7181
7182static void io_sq_thread_finish(struct io_ring_ctx *ctx)
7183{
7184 struct io_sq_data *sqd = ctx->sq_data;
7185
7186 if (sqd) {
Jens Axboe05962f92021-03-06 13:58:48 -07007187 io_sq_thread_park(sqd);
Pavel Begunkov521d6a72021-03-11 23:29:38 +00007188 list_del_init(&ctx->sqd_list);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007189 io_sqd_update_thread_idle(sqd);
Jens Axboe05962f92021-03-06 13:58:48 -07007190 io_sq_thread_unpark(sqd);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007191
7192 io_put_sq_data(sqd);
7193 ctx->sq_data = NULL;
Stefan Metzmacher7c30f36a2021-03-07 11:54:28 +01007194 if (ctx->sq_creds)
7195 put_cred(ctx->sq_creds);
Jens Axboe534ca6d2020-09-02 13:52:19 -06007196 }
7197}
7198
Jens Axboeaa061652020-09-02 14:50:27 -06007199static struct io_sq_data *io_attach_sq_data(struct io_uring_params *p)
7200{
7201 struct io_ring_ctx *ctx_attach;
7202 struct io_sq_data *sqd;
7203 struct fd f;
7204
7205 f = fdget(p->wq_fd);
7206 if (!f.file)
7207 return ERR_PTR(-ENXIO);
7208 if (f.file->f_op != &io_uring_fops) {
7209 fdput(f);
7210 return ERR_PTR(-EINVAL);
7211 }
7212
7213 ctx_attach = f.file->private_data;
7214 sqd = ctx_attach->sq_data;
7215 if (!sqd) {
7216 fdput(f);
7217 return ERR_PTR(-EINVAL);
7218 }
Jens Axboe5c2469e2021-03-11 10:17:56 -07007219 if (sqd->task_tgid != current->tgid) {
7220 fdput(f);
7221 return ERR_PTR(-EPERM);
7222 }
Jens Axboeaa061652020-09-02 14:50:27 -06007223
7224 refcount_inc(&sqd->refs);
7225 fdput(f);
7226 return sqd;
7227}
7228
Pavel Begunkov26984fb2021-03-11 23:29:37 +00007229static struct io_sq_data *io_get_sq_data(struct io_uring_params *p,
7230 bool *attached)
Jens Axboe534ca6d2020-09-02 13:52:19 -06007231{
7232 struct io_sq_data *sqd;
7233
Pavel Begunkov26984fb2021-03-11 23:29:37 +00007234 *attached = false;
Jens Axboe5c2469e2021-03-11 10:17:56 -07007235 if (p->flags & IORING_SETUP_ATTACH_WQ) {
7236 sqd = io_attach_sq_data(p);
Pavel Begunkov26984fb2021-03-11 23:29:37 +00007237 if (!IS_ERR(sqd)) {
7238 *attached = true;
Jens Axboe5c2469e2021-03-11 10:17:56 -07007239 return sqd;
Pavel Begunkov26984fb2021-03-11 23:29:37 +00007240 }
Jens Axboe5c2469e2021-03-11 10:17:56 -07007241 /* fall through for EPERM case, setup new sqd/task */
7242 if (PTR_ERR(sqd) != -EPERM)
7243 return sqd;
7244 }
Jens Axboeaa061652020-09-02 14:50:27 -06007245
Jens Axboe534ca6d2020-09-02 13:52:19 -06007246 sqd = kzalloc(sizeof(*sqd), GFP_KERNEL);
7247 if (!sqd)
7248 return ERR_PTR(-ENOMEM);
7249
Pavel Begunkov9e138a42021-03-14 20:57:12 +00007250 atomic_set(&sqd->park_pending, 0);
Jens Axboe534ca6d2020-09-02 13:52:19 -06007251 refcount_set(&sqd->refs, 1);
Jens Axboe69fb2132020-09-14 11:16:23 -06007252 INIT_LIST_HEAD(&sqd->ctx_list);
Pavel Begunkov09a6f4e2021-03-14 20:57:10 +00007253 mutex_init(&sqd->lock);
Jens Axboe534ca6d2020-09-02 13:52:19 -06007254 init_waitqueue_head(&sqd->wait);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007255 init_completion(&sqd->exited);
Jens Axboe534ca6d2020-09-02 13:52:19 -06007256 return sqd;
7257}
7258
Jens Axboe6b063142019-01-10 22:13:58 -07007259#if defined(CONFIG_UNIX)
Jens Axboe6b063142019-01-10 22:13:58 -07007260/*
7261 * Ensure the UNIX gc is aware of our file set, so we are certain that
7262 * the io_uring can be safely unregistered on process exit, even if we have
7263 * loops in the file referencing.
7264 */
7265static int __io_sqe_files_scm(struct io_ring_ctx *ctx, int nr, int offset)
7266{
7267 struct sock *sk = ctx->ring_sock->sk;
7268 struct scm_fp_list *fpl;
7269 struct sk_buff *skb;
Jens Axboe08a45172019-10-03 08:11:03 -06007270 int i, nr_files;
Jens Axboe6b063142019-01-10 22:13:58 -07007271
Jens Axboe6b063142019-01-10 22:13:58 -07007272 fpl = kzalloc(sizeof(*fpl), GFP_KERNEL);
7273 if (!fpl)
7274 return -ENOMEM;
7275
7276 skb = alloc_skb(0, GFP_KERNEL);
7277 if (!skb) {
7278 kfree(fpl);
7279 return -ENOMEM;
7280 }
7281
7282 skb->sk = sk;
Jens Axboe6b063142019-01-10 22:13:58 -07007283
Jens Axboe08a45172019-10-03 08:11:03 -06007284 nr_files = 0;
Jens Axboe62e398b2021-02-21 16:19:37 -07007285 fpl->user = get_uid(current_user());
Jens Axboe6b063142019-01-10 22:13:58 -07007286 for (i = 0; i < nr; i++) {
Jens Axboe65e19f52019-10-26 07:20:21 -06007287 struct file *file = io_file_from_index(ctx, i + offset);
7288
7289 if (!file)
Jens Axboe08a45172019-10-03 08:11:03 -06007290 continue;
Jens Axboe65e19f52019-10-26 07:20:21 -06007291 fpl->fp[nr_files] = get_file(file);
Jens Axboe08a45172019-10-03 08:11:03 -06007292 unix_inflight(fpl->user, fpl->fp[nr_files]);
7293 nr_files++;
Jens Axboe6b063142019-01-10 22:13:58 -07007294 }
7295
Jens Axboe08a45172019-10-03 08:11:03 -06007296 if (nr_files) {
7297 fpl->max = SCM_MAX_FD;
7298 fpl->count = nr_files;
7299 UNIXCB(skb).fp = fpl;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007300 skb->destructor = unix_destruct_scm;
Jens Axboe08a45172019-10-03 08:11:03 -06007301 refcount_add(skb->truesize, &sk->sk_wmem_alloc);
7302 skb_queue_head(&sk->sk_receive_queue, skb);
Jens Axboe6b063142019-01-10 22:13:58 -07007303
Jens Axboe08a45172019-10-03 08:11:03 -06007304 for (i = 0; i < nr_files; i++)
7305 fput(fpl->fp[i]);
7306 } else {
7307 kfree_skb(skb);
7308 kfree(fpl);
7309 }
Jens Axboe6b063142019-01-10 22:13:58 -07007310
7311 return 0;
7312}
7313
7314/*
7315 * If UNIX sockets are enabled, fd passing can cause a reference cycle which
7316 * causes regular reference counting to break down. We rely on the UNIX
7317 * garbage collection to take care of this problem for us.
7318 */
7319static int io_sqe_files_scm(struct io_ring_ctx *ctx)
7320{
7321 unsigned left, total;
7322 int ret = 0;
7323
7324 total = 0;
7325 left = ctx->nr_user_files;
7326 while (left) {
7327 unsigned this_files = min_t(unsigned, left, SCM_MAX_FD);
Jens Axboe6b063142019-01-10 22:13:58 -07007328
7329 ret = __io_sqe_files_scm(ctx, this_files, total);
7330 if (ret)
7331 break;
7332 left -= this_files;
7333 total += this_files;
7334 }
7335
7336 if (!ret)
7337 return 0;
7338
7339 while (total < ctx->nr_user_files) {
Jens Axboe65e19f52019-10-26 07:20:21 -06007340 struct file *file = io_file_from_index(ctx, total);
7341
7342 if (file)
7343 fput(file);
Jens Axboe6b063142019-01-10 22:13:58 -07007344 total++;
7345 }
7346
7347 return ret;
7348}
7349#else
7350static int io_sqe_files_scm(struct io_ring_ctx *ctx)
7351{
7352 return 0;
7353}
7354#endif
7355
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007356static int io_sqe_alloc_file_tables(struct fixed_rsrc_data *file_data,
Pavel Begunkov5398ae62020-10-10 18:34:14 +01007357 unsigned nr_tables, unsigned nr_files)
Jens Axboe65e19f52019-10-26 07:20:21 -06007358{
7359 int i;
7360
7361 for (i = 0; i < nr_tables; i++) {
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007362 struct fixed_rsrc_table *table = &file_data->table[i];
Jens Axboe65e19f52019-10-26 07:20:21 -06007363 unsigned this_files;
7364
7365 this_files = min(nr_files, IORING_MAX_FILES_TABLE);
7366 table->files = kcalloc(this_files, sizeof(struct file *),
7367 GFP_KERNEL);
7368 if (!table->files)
7369 break;
7370 nr_files -= this_files;
7371 }
7372
7373 if (i == nr_tables)
7374 return 0;
7375
7376 for (i = 0; i < nr_tables; i++) {
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007377 struct fixed_rsrc_table *table = &file_data->table[i];
Jens Axboe65e19f52019-10-26 07:20:21 -06007378 kfree(table->files);
7379 }
7380 return 1;
7381}
7382
Bijan Mottahedeh50238532021-01-15 17:37:45 +00007383static void io_ring_file_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc)
Jens Axboec3a31e62019-10-03 13:59:56 -06007384{
Bijan Mottahedeh50238532021-01-15 17:37:45 +00007385 struct file *file = prsrc->file;
Jens Axboec3a31e62019-10-03 13:59:56 -06007386#if defined(CONFIG_UNIX)
Jens Axboec3a31e62019-10-03 13:59:56 -06007387 struct sock *sock = ctx->ring_sock->sk;
7388 struct sk_buff_head list, *head = &sock->sk_receive_queue;
7389 struct sk_buff *skb;
7390 int i;
7391
7392 __skb_queue_head_init(&list);
7393
7394 /*
7395 * Find the skb that holds this file in its SCM_RIGHTS. When found,
7396 * remove this entry and rearrange the file array.
7397 */
7398 skb = skb_dequeue(head);
7399 while (skb) {
7400 struct scm_fp_list *fp;
7401
7402 fp = UNIXCB(skb).fp;
7403 for (i = 0; i < fp->count; i++) {
7404 int left;
7405
7406 if (fp->fp[i] != file)
7407 continue;
7408
7409 unix_notinflight(fp->user, fp->fp[i]);
7410 left = fp->count - 1 - i;
7411 if (left) {
7412 memmove(&fp->fp[i], &fp->fp[i + 1],
7413 left * sizeof(struct file *));
7414 }
7415 fp->count--;
7416 if (!fp->count) {
7417 kfree_skb(skb);
7418 skb = NULL;
7419 } else {
7420 __skb_queue_tail(&list, skb);
7421 }
7422 fput(file);
7423 file = NULL;
7424 break;
7425 }
7426
7427 if (!file)
7428 break;
7429
7430 __skb_queue_tail(&list, skb);
7431
7432 skb = skb_dequeue(head);
7433 }
7434
7435 if (skb_peek(&list)) {
7436 spin_lock_irq(&head->lock);
7437 while ((skb = __skb_dequeue(&list)) != NULL)
7438 __skb_queue_tail(head, skb);
7439 spin_unlock_irq(&head->lock);
7440 }
7441#else
Jens Axboe05f3fb32019-12-09 11:22:50 -07007442 fput(file);
Jens Axboec3a31e62019-10-03 13:59:56 -06007443#endif
7444}
7445
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007446static void __io_rsrc_put_work(struct fixed_rsrc_ref_node *ref_node)
Jens Axboe05f3fb32019-12-09 11:22:50 -07007447{
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007448 struct fixed_rsrc_data *rsrc_data = ref_node->rsrc_data;
7449 struct io_ring_ctx *ctx = rsrc_data->ctx;
7450 struct io_rsrc_put *prsrc, *tmp;
Xiaoguang Wang05589552020-03-31 14:05:18 +08007451
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007452 list_for_each_entry_safe(prsrc, tmp, &ref_node->rsrc_list, list) {
7453 list_del(&prsrc->list);
Bijan Mottahedeh50238532021-01-15 17:37:45 +00007454 ref_node->rsrc_put(ctx, prsrc);
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007455 kfree(prsrc);
Jens Axboe05f3fb32019-12-09 11:22:50 -07007456 }
7457
Xiaoguang Wang05589552020-03-31 14:05:18 +08007458 percpu_ref_exit(&ref_node->refs);
7459 kfree(ref_node);
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007460 percpu_ref_put(&rsrc_data->refs);
Jens Axboe05f3fb32019-12-09 11:22:50 -07007461}
7462
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007463static void io_rsrc_put_work(struct work_struct *work)
Jens Axboe4a38aed22020-05-14 17:21:15 -06007464{
7465 struct io_ring_ctx *ctx;
7466 struct llist_node *node;
7467
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007468 ctx = container_of(work, struct io_ring_ctx, rsrc_put_work.work);
7469 node = llist_del_all(&ctx->rsrc_put_llist);
Jens Axboe4a38aed22020-05-14 17:21:15 -06007470
7471 while (node) {
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007472 struct fixed_rsrc_ref_node *ref_node;
Jens Axboe4a38aed22020-05-14 17:21:15 -06007473 struct llist_node *next = node->next;
7474
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007475 ref_node = llist_entry(node, struct fixed_rsrc_ref_node, llist);
7476 __io_rsrc_put_work(ref_node);
Jens Axboe4a38aed22020-05-14 17:21:15 -06007477 node = next;
7478 }
7479}
7480
Pavel Begunkovea64ec022021-02-04 13:52:07 +00007481static struct file **io_fixed_file_slot(struct fixed_rsrc_data *file_data,
7482 unsigned i)
Jens Axboe05f3fb32019-12-09 11:22:50 -07007483{
Pavel Begunkovea64ec022021-02-04 13:52:07 +00007484 struct fixed_rsrc_table *table;
7485
7486 table = &file_data->table[i >> IORING_FILE_TABLE_SHIFT];
7487 return &table->files[i & IORING_FILE_TABLE_MASK];
7488}
7489
Bijan Mottahedeh00835dc2021-01-15 17:37:52 +00007490static void io_rsrc_node_ref_zero(struct percpu_ref *ref)
Jens Axboe05f3fb32019-12-09 11:22:50 -07007491{
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007492 struct fixed_rsrc_ref_node *ref_node;
7493 struct fixed_rsrc_data *data;
Jens Axboe4a38aed22020-05-14 17:21:15 -06007494 struct io_ring_ctx *ctx;
Pavel Begunkove2978222020-11-18 14:56:26 +00007495 bool first_add = false;
Jens Axboe4a38aed22020-05-14 17:21:15 -06007496 int delay = HZ;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007497
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007498 ref_node = container_of(ref, struct fixed_rsrc_ref_node, refs);
7499 data = ref_node->rsrc_data;
Pavel Begunkove2978222020-11-18 14:56:26 +00007500 ctx = data->ctx;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007501
Bijan Mottahedeh2a63b2d2021-01-15 17:37:47 +00007502 io_rsrc_ref_lock(ctx);
Pavel Begunkove2978222020-11-18 14:56:26 +00007503 ref_node->done = true;
7504
Bijan Mottahedehd67d2262021-01-15 17:37:46 +00007505 while (!list_empty(&ctx->rsrc_ref_list)) {
7506 ref_node = list_first_entry(&ctx->rsrc_ref_list,
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007507 struct fixed_rsrc_ref_node, node);
Pavel Begunkove2978222020-11-18 14:56:26 +00007508 /* recycle ref nodes in order */
7509 if (!ref_node->done)
7510 break;
7511 list_del(&ref_node->node);
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007512 first_add |= llist_add(&ref_node->llist, &ctx->rsrc_put_llist);
Pavel Begunkove2978222020-11-18 14:56:26 +00007513 }
Bijan Mottahedeh2a63b2d2021-01-15 17:37:47 +00007514 io_rsrc_ref_unlock(ctx);
Pavel Begunkove2978222020-11-18 14:56:26 +00007515
7516 if (percpu_ref_is_dying(&data->refs))
Jens Axboe4a38aed22020-05-14 17:21:15 -06007517 delay = 0;
7518
Jens Axboe4a38aed22020-05-14 17:21:15 -06007519 if (!delay)
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007520 mod_delayed_work(system_wq, &ctx->rsrc_put_work, 0);
Jens Axboe4a38aed22020-05-14 17:21:15 -06007521 else if (first_add)
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007522 queue_delayed_work(system_wq, &ctx->rsrc_put_work, delay);
Xiaoguang Wang05589552020-03-31 14:05:18 +08007523}
7524
Bijan Mottahedeh68025352021-01-15 17:37:48 +00007525static struct fixed_rsrc_ref_node *alloc_fixed_rsrc_ref_node(
Xiaoguang Wang05589552020-03-31 14:05:18 +08007526 struct io_ring_ctx *ctx)
7527{
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007528 struct fixed_rsrc_ref_node *ref_node;
Xiaoguang Wang05589552020-03-31 14:05:18 +08007529
7530 ref_node = kzalloc(sizeof(*ref_node), GFP_KERNEL);
7531 if (!ref_node)
Matthew Wilcox (Oracle)3e2224c2021-01-06 16:09:26 +00007532 return NULL;
Xiaoguang Wang05589552020-03-31 14:05:18 +08007533
Bijan Mottahedeh00835dc2021-01-15 17:37:52 +00007534 if (percpu_ref_init(&ref_node->refs, io_rsrc_node_ref_zero,
Xiaoguang Wang05589552020-03-31 14:05:18 +08007535 0, GFP_KERNEL)) {
7536 kfree(ref_node);
Matthew Wilcox (Oracle)3e2224c2021-01-06 16:09:26 +00007537 return NULL;
Xiaoguang Wang05589552020-03-31 14:05:18 +08007538 }
7539 INIT_LIST_HEAD(&ref_node->node);
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007540 INIT_LIST_HEAD(&ref_node->rsrc_list);
Pavel Begunkove2978222020-11-18 14:56:26 +00007541 ref_node->done = false;
Xiaoguang Wang05589552020-03-31 14:05:18 +08007542 return ref_node;
Xiaoguang Wang05589552020-03-31 14:05:18 +08007543}
7544
Pavel Begunkovbc9744c2021-01-15 17:37:49 +00007545static void init_fixed_file_ref_node(struct io_ring_ctx *ctx,
7546 struct fixed_rsrc_ref_node *ref_node)
Bijan Mottahedeh68025352021-01-15 17:37:48 +00007547{
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007548 ref_node->rsrc_data = ctx->file_data;
Bijan Mottahedeh50238532021-01-15 17:37:45 +00007549 ref_node->rsrc_put = io_ring_file_put;
Xiaoguang Wang05589552020-03-31 14:05:18 +08007550}
7551
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007552static void destroy_fixed_rsrc_ref_node(struct fixed_rsrc_ref_node *ref_node)
Xiaoguang Wang05589552020-03-31 14:05:18 +08007553{
7554 percpu_ref_exit(&ref_node->refs);
7555 kfree(ref_node);
Jens Axboe05f3fb32019-12-09 11:22:50 -07007556}
7557
Pavel Begunkovea64ec022021-02-04 13:52:07 +00007558
Jens Axboe05f3fb32019-12-09 11:22:50 -07007559static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
7560 unsigned nr_args)
7561{
7562 __s32 __user *fds = (__s32 __user *) arg;
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01007563 unsigned nr_tables, i;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007564 struct file *file;
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01007565 int fd, ret = -ENOMEM;
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007566 struct fixed_rsrc_ref_node *ref_node;
7567 struct fixed_rsrc_data *file_data;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007568
7569 if (ctx->file_data)
7570 return -EBUSY;
7571 if (!nr_args)
7572 return -EINVAL;
7573 if (nr_args > IORING_MAX_FIXED_FILES)
7574 return -EMFILE;
7575
Bijan Mottahedeh1ad555c2021-01-15 17:37:51 +00007576 file_data = alloc_fixed_rsrc_data(ctx);
Pavel Begunkov5398ae62020-10-10 18:34:14 +01007577 if (!file_data)
Jens Axboe05f3fb32019-12-09 11:22:50 -07007578 return -ENOMEM;
Dan Carpenter13770a72021-02-01 15:23:42 +03007579 ctx->file_data = file_data;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007580
7581 nr_tables = DIV_ROUND_UP(nr_args, IORING_MAX_FILES_TABLE);
Colin Ian King035fbaf2020-10-12 15:03:41 +01007582 file_data->table = kcalloc(nr_tables, sizeof(*file_data->table),
Pavel Begunkov5398ae62020-10-10 18:34:14 +01007583 GFP_KERNEL);
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01007584 if (!file_data->table)
7585 goto out_free;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007586
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01007587 if (io_sqe_alloc_file_tables(file_data, nr_tables, nr_args))
Jens Axboe05f3fb32019-12-09 11:22:50 -07007588 goto out_free;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007589
Jens Axboe05f3fb32019-12-09 11:22:50 -07007590 for (i = 0; i < nr_args; i++, ctx->nr_user_files++) {
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01007591 if (copy_from_user(&fd, &fds[i], sizeof(fd))) {
7592 ret = -EFAULT;
7593 goto out_fput;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007594 }
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01007595 /* allow sparse sets */
7596 if (fd == -1)
7597 continue;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007598
Jens Axboe05f3fb32019-12-09 11:22:50 -07007599 file = fget(fd);
Jens Axboe05f3fb32019-12-09 11:22:50 -07007600 ret = -EBADF;
7601 if (!file)
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01007602 goto out_fput;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007603
7604 /*
7605 * Don't allow io_uring instances to be registered. If UNIX
7606 * isn't enabled, then this causes a reference cycle and this
7607 * instance can never get freed. If UNIX is enabled we'll
7608 * handle it just fine, but there's still no point in allowing
7609 * a ring fd as it doesn't support regular read/write anyway.
7610 */
7611 if (file->f_op == &io_uring_fops) {
7612 fput(file);
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01007613 goto out_fput;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007614 }
Pavel Begunkovea64ec022021-02-04 13:52:07 +00007615 *io_fixed_file_slot(file_data, i) = file;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007616 }
7617
Jens Axboe05f3fb32019-12-09 11:22:50 -07007618 ret = io_sqe_files_scm(ctx);
Xiaoguang Wang05589552020-03-31 14:05:18 +08007619 if (ret) {
Jens Axboe05f3fb32019-12-09 11:22:50 -07007620 io_sqe_files_unregister(ctx);
Xiaoguang Wang05589552020-03-31 14:05:18 +08007621 return ret;
7622 }
Jens Axboe05f3fb32019-12-09 11:22:50 -07007623
Pavel Begunkovbc9744c2021-01-15 17:37:49 +00007624 ref_node = alloc_fixed_rsrc_ref_node(ctx);
Matthew Wilcox (Oracle)3e2224c2021-01-06 16:09:26 +00007625 if (!ref_node) {
Xiaoguang Wang05589552020-03-31 14:05:18 +08007626 io_sqe_files_unregister(ctx);
Matthew Wilcox (Oracle)3e2224c2021-01-06 16:09:26 +00007627 return -ENOMEM;
Xiaoguang Wang05589552020-03-31 14:05:18 +08007628 }
Pavel Begunkovbc9744c2021-01-15 17:37:49 +00007629 init_fixed_file_ref_node(ctx, ref_node);
Xiaoguang Wang05589552020-03-31 14:05:18 +08007630
Bijan Mottahedehd67d2262021-01-15 17:37:46 +00007631 io_sqe_rsrc_set_node(ctx, file_data, ref_node);
Jens Axboe05f3fb32019-12-09 11:22:50 -07007632 return ret;
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01007633out_fput:
7634 for (i = 0; i < ctx->nr_user_files; i++) {
7635 file = io_file_from_index(ctx, i);
7636 if (file)
7637 fput(file);
7638 }
7639 for (i = 0; i < nr_tables; i++)
7640 kfree(file_data->table[i].files);
7641 ctx->nr_user_files = 0;
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01007642out_free:
Bijan Mottahedeh1ad555c2021-01-15 17:37:51 +00007643 free_fixed_rsrc_data(ctx->file_data);
Jens Axboe55cbc252020-10-14 07:35:57 -06007644 ctx->file_data = NULL;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007645 return ret;
7646}
7647
Jens Axboec3a31e62019-10-03 13:59:56 -06007648static int io_sqe_file_register(struct io_ring_ctx *ctx, struct file *file,
7649 int index)
7650{
7651#if defined(CONFIG_UNIX)
7652 struct sock *sock = ctx->ring_sock->sk;
7653 struct sk_buff_head *head = &sock->sk_receive_queue;
7654 struct sk_buff *skb;
7655
7656 /*
7657 * See if we can merge this file into an existing skb SCM_RIGHTS
7658 * file set. If there's no room, fall back to allocating a new skb
7659 * and filling it in.
7660 */
7661 spin_lock_irq(&head->lock);
7662 skb = skb_peek(head);
7663 if (skb) {
7664 struct scm_fp_list *fpl = UNIXCB(skb).fp;
7665
7666 if (fpl->count < SCM_MAX_FD) {
7667 __skb_unlink(skb, head);
7668 spin_unlock_irq(&head->lock);
7669 fpl->fp[fpl->count] = get_file(file);
7670 unix_inflight(fpl->user, fpl->fp[fpl->count]);
7671 fpl->count++;
7672 spin_lock_irq(&head->lock);
7673 __skb_queue_head(head, skb);
7674 } else {
7675 skb = NULL;
7676 }
7677 }
7678 spin_unlock_irq(&head->lock);
7679
7680 if (skb) {
7681 fput(file);
7682 return 0;
7683 }
7684
7685 return __io_sqe_files_scm(ctx, 1, index);
7686#else
7687 return 0;
7688#endif
7689}
7690
Bijan Mottahedeh50238532021-01-15 17:37:45 +00007691static int io_queue_rsrc_removal(struct fixed_rsrc_data *data, void *rsrc)
Jens Axboe05f3fb32019-12-09 11:22:50 -07007692{
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007693 struct io_rsrc_put *prsrc;
7694 struct fixed_rsrc_ref_node *ref_node = data->node;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007695
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007696 prsrc = kzalloc(sizeof(*prsrc), GFP_KERNEL);
7697 if (!prsrc)
Hillf Dantona5318d32020-03-23 17:47:15 +08007698 return -ENOMEM;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007699
Bijan Mottahedeh50238532021-01-15 17:37:45 +00007700 prsrc->rsrc = rsrc;
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007701 list_add(&prsrc->list, &ref_node->rsrc_list);
Xiaoguang Wang05589552020-03-31 14:05:18 +08007702
Hillf Dantona5318d32020-03-23 17:47:15 +08007703 return 0;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007704}
7705
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007706static inline int io_queue_file_removal(struct fixed_rsrc_data *data,
7707 struct file *file)
7708{
Bijan Mottahedeh50238532021-01-15 17:37:45 +00007709 return io_queue_rsrc_removal(data, (void *)file);
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007710}
7711
Jens Axboe05f3fb32019-12-09 11:22:50 -07007712static int __io_sqe_files_update(struct io_ring_ctx *ctx,
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007713 struct io_uring_rsrc_update *up,
Jens Axboe05f3fb32019-12-09 11:22:50 -07007714 unsigned nr_args)
7715{
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007716 struct fixed_rsrc_data *data = ctx->file_data;
7717 struct fixed_rsrc_ref_node *ref_node;
Pavel Begunkovea64ec022021-02-04 13:52:07 +00007718 struct file *file, **file_slot;
Jens Axboec3a31e62019-10-03 13:59:56 -06007719 __s32 __user *fds;
7720 int fd, i, err;
7721 __u32 done;
Xiaoguang Wang05589552020-03-31 14:05:18 +08007722 bool needs_switch = false;
Jens Axboec3a31e62019-10-03 13:59:56 -06007723
Jens Axboe05f3fb32019-12-09 11:22:50 -07007724 if (check_add_overflow(up->offset, nr_args, &done))
Jens Axboec3a31e62019-10-03 13:59:56 -06007725 return -EOVERFLOW;
7726 if (done > ctx->nr_user_files)
7727 return -EINVAL;
7728
Pavel Begunkovbc9744c2021-01-15 17:37:49 +00007729 ref_node = alloc_fixed_rsrc_ref_node(ctx);
Matthew Wilcox (Oracle)3e2224c2021-01-06 16:09:26 +00007730 if (!ref_node)
7731 return -ENOMEM;
Pavel Begunkovbc9744c2021-01-15 17:37:49 +00007732 init_fixed_file_ref_node(ctx, ref_node);
Xiaoguang Wang05589552020-03-31 14:05:18 +08007733
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007734 fds = u64_to_user_ptr(up->data);
Pavel Begunkov67973b92021-01-26 13:51:09 +00007735 for (done = 0; done < nr_args; done++) {
Jens Axboec3a31e62019-10-03 13:59:56 -06007736 err = 0;
7737 if (copy_from_user(&fd, &fds[done], sizeof(fd))) {
7738 err = -EFAULT;
7739 break;
7740 }
noah4e0377a2021-01-26 15:23:28 -05007741 if (fd == IORING_REGISTER_FILES_SKIP)
7742 continue;
7743
Pavel Begunkov67973b92021-01-26 13:51:09 +00007744 i = array_index_nospec(up->offset + done, ctx->nr_user_files);
Pavel Begunkovea64ec022021-02-04 13:52:07 +00007745 file_slot = io_fixed_file_slot(ctx->file_data, i);
7746
7747 if (*file_slot) {
7748 err = io_queue_file_removal(data, *file_slot);
Hillf Dantona5318d32020-03-23 17:47:15 +08007749 if (err)
7750 break;
Pavel Begunkovea64ec022021-02-04 13:52:07 +00007751 *file_slot = NULL;
Xiaoguang Wang05589552020-03-31 14:05:18 +08007752 needs_switch = true;
Jens Axboec3a31e62019-10-03 13:59:56 -06007753 }
7754 if (fd != -1) {
Jens Axboec3a31e62019-10-03 13:59:56 -06007755 file = fget(fd);
7756 if (!file) {
7757 err = -EBADF;
7758 break;
7759 }
7760 /*
7761 * Don't allow io_uring instances to be registered. If
7762 * UNIX isn't enabled, then this causes a reference
7763 * cycle and this instance can never get freed. If UNIX
7764 * is enabled we'll handle it just fine, but there's
7765 * still no point in allowing a ring fd as it doesn't
7766 * support regular read/write anyway.
7767 */
7768 if (file->f_op == &io_uring_fops) {
7769 fput(file);
7770 err = -EBADF;
7771 break;
7772 }
Jens Axboee68a3ff2021-02-11 07:45:08 -07007773 *file_slot = file;
Jens Axboec3a31e62019-10-03 13:59:56 -06007774 err = io_sqe_file_register(ctx, file, i);
Yang Yingliangf3bd9da2020-07-09 10:11:41 +00007775 if (err) {
Jens Axboee68a3ff2021-02-11 07:45:08 -07007776 *file_slot = NULL;
Yang Yingliangf3bd9da2020-07-09 10:11:41 +00007777 fput(file);
Jens Axboec3a31e62019-10-03 13:59:56 -06007778 break;
Yang Yingliangf3bd9da2020-07-09 10:11:41 +00007779 }
Jens Axboec3a31e62019-10-03 13:59:56 -06007780 }
Jens Axboe05f3fb32019-12-09 11:22:50 -07007781 }
7782
Xiaoguang Wang05589552020-03-31 14:05:18 +08007783 if (needs_switch) {
Pavel Begunkovb2e96852020-10-10 18:34:16 +01007784 percpu_ref_kill(&data->node->refs);
Bijan Mottahedehd67d2262021-01-15 17:37:46 +00007785 io_sqe_rsrc_set_node(ctx, data, ref_node);
Xiaoguang Wang05589552020-03-31 14:05:18 +08007786 } else
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007787 destroy_fixed_rsrc_ref_node(ref_node);
Jens Axboec3a31e62019-10-03 13:59:56 -06007788
7789 return done ? done : err;
7790}
Xiaoguang Wang05589552020-03-31 14:05:18 +08007791
Jens Axboe05f3fb32019-12-09 11:22:50 -07007792static int io_sqe_files_update(struct io_ring_ctx *ctx, void __user *arg,
7793 unsigned nr_args)
7794{
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007795 struct io_uring_rsrc_update up;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007796
7797 if (!ctx->file_data)
7798 return -ENXIO;
7799 if (!nr_args)
7800 return -EINVAL;
7801 if (copy_from_user(&up, arg, sizeof(up)))
7802 return -EFAULT;
7803 if (up.resv)
7804 return -EINVAL;
7805
7806 return __io_sqe_files_update(ctx, &up, nr_args);
7807}
Jens Axboec3a31e62019-10-03 13:59:56 -06007808
Pavel Begunkov5280f7e2021-02-04 13:52:08 +00007809static struct io_wq_work *io_free_work(struct io_wq_work *work)
Jens Axboe7d723062019-11-12 22:31:31 -07007810{
7811 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
7812
Pavel Begunkov5280f7e2021-02-04 13:52:08 +00007813 req = io_put_req_find_next(req);
7814 return req ? &req->work : NULL;
Jens Axboe7d723062019-11-12 22:31:31 -07007815}
7816
Jens Axboe5aa75ed2021-02-16 12:56:50 -07007817static struct io_wq *io_init_wq_offload(struct io_ring_ctx *ctx)
Pavel Begunkov24369c22020-01-28 03:15:48 +03007818{
Jens Axboee9418942021-02-19 12:33:30 -07007819 struct io_wq_hash *hash;
Pavel Begunkov24369c22020-01-28 03:15:48 +03007820 struct io_wq_data data;
Pavel Begunkov24369c22020-01-28 03:15:48 +03007821 unsigned int concurrency;
Pavel Begunkov24369c22020-01-28 03:15:48 +03007822
Jens Axboee9418942021-02-19 12:33:30 -07007823 hash = ctx->hash_map;
7824 if (!hash) {
7825 hash = kzalloc(sizeof(*hash), GFP_KERNEL);
7826 if (!hash)
7827 return ERR_PTR(-ENOMEM);
7828 refcount_set(&hash->refs, 1);
7829 init_waitqueue_head(&hash->wait);
7830 ctx->hash_map = hash;
7831 }
7832
7833 data.hash = hash;
Pavel Begunkove9fd9392020-03-04 16:14:12 +03007834 data.free_work = io_free_work;
Pavel Begunkovf5fa38c2020-06-08 21:08:20 +03007835 data.do_work = io_wq_submit_work;
Pavel Begunkov24369c22020-01-28 03:15:48 +03007836
Jens Axboed25e3a32021-02-16 11:41:41 -07007837 /* Do QD, or 4 * CPUS, whatever is smallest */
7838 concurrency = min(ctx->sq_entries, 4 * num_online_cpus());
Pavel Begunkov24369c22020-01-28 03:15:48 +03007839
Jens Axboe5aa75ed2021-02-16 12:56:50 -07007840 return io_wq_create(concurrency, &data);
Pavel Begunkov24369c22020-01-28 03:15:48 +03007841}
7842
Jens Axboe5aa75ed2021-02-16 12:56:50 -07007843static int io_uring_alloc_task_context(struct task_struct *task,
7844 struct io_ring_ctx *ctx)
Jens Axboe0f212202020-09-13 13:09:39 -06007845{
7846 struct io_uring_task *tctx;
Jens Axboed8a6df12020-10-15 16:24:45 -06007847 int ret;
Jens Axboe0f212202020-09-13 13:09:39 -06007848
7849 tctx = kmalloc(sizeof(*tctx), GFP_KERNEL);
7850 if (unlikely(!tctx))
7851 return -ENOMEM;
7852
Jens Axboed8a6df12020-10-15 16:24:45 -06007853 ret = percpu_counter_init(&tctx->inflight, 0, GFP_KERNEL);
7854 if (unlikely(ret)) {
7855 kfree(tctx);
7856 return ret;
7857 }
7858
Jens Axboe5aa75ed2021-02-16 12:56:50 -07007859 tctx->io_wq = io_init_wq_offload(ctx);
7860 if (IS_ERR(tctx->io_wq)) {
7861 ret = PTR_ERR(tctx->io_wq);
7862 percpu_counter_destroy(&tctx->inflight);
7863 kfree(tctx);
7864 return ret;
7865 }
7866
Jens Axboe0f212202020-09-13 13:09:39 -06007867 xa_init(&tctx->xa);
7868 init_waitqueue_head(&tctx->wait);
7869 tctx->last = NULL;
Jens Axboefdaf0832020-10-30 09:37:30 -06007870 atomic_set(&tctx->in_idle, 0);
Jens Axboe0f212202020-09-13 13:09:39 -06007871 task->io_uring = tctx;
Jens Axboe7cbf1722021-02-10 00:03:20 +00007872 spin_lock_init(&tctx->task_lock);
7873 INIT_WQ_LIST(&tctx->task_list);
7874 tctx->task_state = 0;
7875 init_task_work(&tctx->task_work, tctx_task_work);
Jens Axboe0f212202020-09-13 13:09:39 -06007876 return 0;
7877}
7878
7879void __io_uring_free(struct task_struct *tsk)
7880{
7881 struct io_uring_task *tctx = tsk->io_uring;
7882
7883 WARN_ON_ONCE(!xa_empty(&tctx->xa));
Pavel Begunkovef8eaa42021-02-27 11:16:45 +00007884 WARN_ON_ONCE(tctx->io_wq);
7885
Jens Axboed8a6df12020-10-15 16:24:45 -06007886 percpu_counter_destroy(&tctx->inflight);
Jens Axboe0f212202020-09-13 13:09:39 -06007887 kfree(tctx);
7888 tsk->io_uring = NULL;
7889}
7890
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02007891static int io_sq_offload_create(struct io_ring_ctx *ctx,
7892 struct io_uring_params *p)
Jens Axboe2b188cc2019-01-07 10:46:33 -07007893{
7894 int ret;
7895
Jens Axboed25e3a32021-02-16 11:41:41 -07007896 /* Retain compatibility with failing for an invalid attach attempt */
7897 if ((ctx->flags & (IORING_SETUP_ATTACH_WQ | IORING_SETUP_SQPOLL)) ==
7898 IORING_SETUP_ATTACH_WQ) {
7899 struct fd f;
7900
7901 f = fdget(p->wq_fd);
7902 if (!f.file)
7903 return -ENXIO;
7904 if (f.file->f_op != &io_uring_fops) {
7905 fdput(f);
7906 return -EINVAL;
7907 }
7908 fdput(f);
7909 }
Jens Axboe6c271ce2019-01-10 11:22:30 -07007910 if (ctx->flags & IORING_SETUP_SQPOLL) {
Jens Axboe46fe18b2021-03-04 12:39:36 -07007911 struct task_struct *tsk;
Jens Axboe534ca6d2020-09-02 13:52:19 -06007912 struct io_sq_data *sqd;
Pavel Begunkov26984fb2021-03-11 23:29:37 +00007913 bool attached;
Jens Axboe534ca6d2020-09-02 13:52:19 -06007914
Jens Axboe3ec482d2019-04-08 10:51:01 -06007915 ret = -EPERM;
Jens Axboece59fc62020-09-02 13:28:09 -06007916 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_NICE))
Jens Axboe3ec482d2019-04-08 10:51:01 -06007917 goto err;
7918
Pavel Begunkov26984fb2021-03-11 23:29:37 +00007919 sqd = io_get_sq_data(p, &attached);
Jens Axboe534ca6d2020-09-02 13:52:19 -06007920 if (IS_ERR(sqd)) {
7921 ret = PTR_ERR(sqd);
7922 goto err;
7923 }
Jens Axboe69fb2132020-09-14 11:16:23 -06007924
Stefan Metzmacher7c30f36a2021-03-07 11:54:28 +01007925 ctx->sq_creds = get_current_cred();
Jens Axboe534ca6d2020-09-02 13:52:19 -06007926 ctx->sq_data = sqd;
Jens Axboe917257d2019-04-13 09:28:55 -06007927 ctx->sq_thread_idle = msecs_to_jiffies(p->sq_thread_idle);
7928 if (!ctx->sq_thread_idle)
7929 ctx->sq_thread_idle = HZ;
7930
Pavel Begunkov26984fb2021-03-11 23:29:37 +00007931 ret = 0;
Pavel Begunkov78d7f6b2021-03-10 13:13:53 +00007932 io_sq_thread_park(sqd);
Pavel Begunkovde75a3d2021-03-18 11:54:35 +00007933 list_add(&ctx->sqd_list, &sqd->ctx_list);
7934 io_sqd_update_thread_idle(sqd);
Pavel Begunkov26984fb2021-03-11 23:29:37 +00007935 /* don't attach to a dying SQPOLL thread, would be racy */
Pavel Begunkovde75a3d2021-03-18 11:54:35 +00007936 if (attached && !sqd->thread)
Pavel Begunkov26984fb2021-03-11 23:29:37 +00007937 ret = -ENXIO;
Pavel Begunkov78d7f6b2021-03-10 13:13:53 +00007938 io_sq_thread_unpark(sqd);
7939
Pavel Begunkovde75a3d2021-03-18 11:54:35 +00007940 if (ret < 0)
7941 goto err;
7942 if (attached)
Jens Axboe5aa75ed2021-02-16 12:56:50 -07007943 return 0;
Jens Axboeaa061652020-09-02 14:50:27 -06007944
Jens Axboe6c271ce2019-01-10 11:22:30 -07007945 if (p->flags & IORING_SETUP_SQ_AFF) {
Jens Axboe44a9bd12019-05-14 20:00:30 -06007946 int cpu = p->sq_thread_cpu;
Jens Axboe6c271ce2019-01-10 11:22:30 -07007947
Jens Axboe917257d2019-04-13 09:28:55 -06007948 ret = -EINVAL;
Jens Axboe44a9bd12019-05-14 20:00:30 -06007949 if (cpu >= nr_cpu_ids)
Jens Axboee8f98f242021-03-09 16:32:13 -07007950 goto err_sqpoll;
Shenghui Wang7889f442019-05-07 16:03:19 +08007951 if (!cpu_online(cpu))
Jens Axboee8f98f242021-03-09 16:32:13 -07007952 goto err_sqpoll;
Jens Axboe917257d2019-04-13 09:28:55 -06007953
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007954 sqd->sq_cpu = cpu;
Jens Axboe6c271ce2019-01-10 11:22:30 -07007955 } else {
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007956 sqd->sq_cpu = -1;
Jens Axboe6c271ce2019-01-10 11:22:30 -07007957 }
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007958
7959 sqd->task_pid = current->pid;
Jens Axboe5c2469e2021-03-11 10:17:56 -07007960 sqd->task_tgid = current->tgid;
Jens Axboe46fe18b2021-03-04 12:39:36 -07007961 tsk = create_io_thread(io_sq_thread, sqd, NUMA_NO_NODE);
7962 if (IS_ERR(tsk)) {
7963 ret = PTR_ERR(tsk);
Jens Axboee8f98f242021-03-09 16:32:13 -07007964 goto err_sqpoll;
Jens Axboe6c271ce2019-01-10 11:22:30 -07007965 }
Pavel Begunkov97a73a02021-03-08 17:30:54 +00007966
Jens Axboe46fe18b2021-03-04 12:39:36 -07007967 sqd->thread = tsk;
Pavel Begunkov97a73a02021-03-08 17:30:54 +00007968 ret = io_uring_alloc_task_context(tsk, ctx);
Jens Axboe46fe18b2021-03-04 12:39:36 -07007969 wake_up_new_task(tsk);
Jens Axboe0f212202020-09-13 13:09:39 -06007970 if (ret)
7971 goto err;
Jens Axboe6c271ce2019-01-10 11:22:30 -07007972 } else if (p->flags & IORING_SETUP_SQ_AFF) {
7973 /* Can't have SQ_AFF without SQPOLL */
7974 ret = -EINVAL;
7975 goto err;
7976 }
7977
Jens Axboe2b188cc2019-01-07 10:46:33 -07007978 return 0;
7979err:
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007980 io_sq_thread_finish(ctx);
Jens Axboe2b188cc2019-01-07 10:46:33 -07007981 return ret;
Jens Axboee8f98f242021-03-09 16:32:13 -07007982err_sqpoll:
7983 complete(&ctx->sq_data->exited);
7984 goto err;
Jens Axboe2b188cc2019-01-07 10:46:33 -07007985}
7986
Bijan Mottahedeha087e2b2020-06-16 16:36:07 -07007987static inline void __io_unaccount_mem(struct user_struct *user,
7988 unsigned long nr_pages)
Jens Axboe2b188cc2019-01-07 10:46:33 -07007989{
7990 atomic_long_sub(nr_pages, &user->locked_vm);
7991}
7992
Bijan Mottahedeha087e2b2020-06-16 16:36:07 -07007993static inline int __io_account_mem(struct user_struct *user,
7994 unsigned long nr_pages)
Jens Axboe2b188cc2019-01-07 10:46:33 -07007995{
7996 unsigned long page_limit, cur_pages, new_pages;
7997
7998 /* Don't allow more pages than we can safely lock */
7999 page_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
8000
8001 do {
8002 cur_pages = atomic_long_read(&user->locked_vm);
8003 new_pages = cur_pages + nr_pages;
8004 if (new_pages > page_limit)
8005 return -ENOMEM;
8006 } while (atomic_long_cmpxchg(&user->locked_vm, cur_pages,
8007 new_pages) != cur_pages);
8008
8009 return 0;
8010}
8011
Jens Axboe26bfa89e2021-02-09 20:14:12 -07008012static void io_unaccount_mem(struct io_ring_ctx *ctx, unsigned long nr_pages)
Bijan Mottahedeha087e2b2020-06-16 16:36:07 -07008013{
Jens Axboe62e398b2021-02-21 16:19:37 -07008014 if (ctx->user)
Bijan Mottahedeha087e2b2020-06-16 16:36:07 -07008015 __io_unaccount_mem(ctx->user, nr_pages);
Bijan Mottahedeh30975822020-06-16 16:36:09 -07008016
Jens Axboe26bfa89e2021-02-09 20:14:12 -07008017 if (ctx->mm_account)
8018 atomic64_sub(nr_pages, &ctx->mm_account->pinned_vm);
Bijan Mottahedeha087e2b2020-06-16 16:36:07 -07008019}
8020
Jens Axboe26bfa89e2021-02-09 20:14:12 -07008021static int io_account_mem(struct io_ring_ctx *ctx, unsigned long nr_pages)
Bijan Mottahedeha087e2b2020-06-16 16:36:07 -07008022{
Bijan Mottahedeh30975822020-06-16 16:36:09 -07008023 int ret;
8024
Jens Axboe62e398b2021-02-21 16:19:37 -07008025 if (ctx->user) {
Bijan Mottahedeh30975822020-06-16 16:36:09 -07008026 ret = __io_account_mem(ctx->user, nr_pages);
8027 if (ret)
8028 return ret;
8029 }
8030
Jens Axboe26bfa89e2021-02-09 20:14:12 -07008031 if (ctx->mm_account)
8032 atomic64_add(nr_pages, &ctx->mm_account->pinned_vm);
Bijan Mottahedeha087e2b2020-06-16 16:36:07 -07008033
8034 return 0;
8035}
8036
Jens Axboe2b188cc2019-01-07 10:46:33 -07008037static void io_mem_free(void *ptr)
8038{
Mark Rutland52e04ef2019-04-30 17:30:21 +01008039 struct page *page;
Jens Axboe2b188cc2019-01-07 10:46:33 -07008040
Mark Rutland52e04ef2019-04-30 17:30:21 +01008041 if (!ptr)
8042 return;
8043
8044 page = virt_to_head_page(ptr);
Jens Axboe2b188cc2019-01-07 10:46:33 -07008045 if (put_page_testzero(page))
8046 free_compound_page(page);
8047}
8048
8049static void *io_mem_alloc(size_t size)
8050{
8051 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN | __GFP_COMP |
Jens Axboe26bfa89e2021-02-09 20:14:12 -07008052 __GFP_NORETRY | __GFP_ACCOUNT;
Jens Axboe2b188cc2019-01-07 10:46:33 -07008053
8054 return (void *) __get_free_pages(gfp_flags, get_order(size));
8055}
8056
Hristo Venev75b28af2019-08-26 17:23:46 +00008057static unsigned long rings_size(unsigned sq_entries, unsigned cq_entries,
8058 size_t *sq_offset)
8059{
8060 struct io_rings *rings;
8061 size_t off, sq_array_size;
8062
8063 off = struct_size(rings, cqes, cq_entries);
8064 if (off == SIZE_MAX)
8065 return SIZE_MAX;
8066
8067#ifdef CONFIG_SMP
8068 off = ALIGN(off, SMP_CACHE_BYTES);
8069 if (off == 0)
8070 return SIZE_MAX;
8071#endif
8072
Dmitry Vyukovb36200f2020-07-11 11:31:11 +02008073 if (sq_offset)
8074 *sq_offset = off;
8075
Hristo Venev75b28af2019-08-26 17:23:46 +00008076 sq_array_size = array_size(sizeof(u32), sq_entries);
8077 if (sq_array_size == SIZE_MAX)
8078 return SIZE_MAX;
8079
8080 if (check_add_overflow(off, sq_array_size, &off))
8081 return SIZE_MAX;
8082
Hristo Venev75b28af2019-08-26 17:23:46 +00008083 return off;
8084}
8085
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008086static int io_sqe_buffers_unregister(struct io_ring_ctx *ctx)
Jens Axboeedafcce2019-01-09 09:16:05 -07008087{
8088 int i, j;
8089
8090 if (!ctx->user_bufs)
8091 return -ENXIO;
8092
8093 for (i = 0; i < ctx->nr_user_bufs; i++) {
8094 struct io_mapped_ubuf *imu = &ctx->user_bufs[i];
8095
8096 for (j = 0; j < imu->nr_bvecs; j++)
John Hubbardf1f6a7d2020-01-30 22:13:35 -08008097 unpin_user_page(imu->bvec[j].bv_page);
Jens Axboeedafcce2019-01-09 09:16:05 -07008098
Jens Axboede293932020-09-17 16:19:16 -06008099 if (imu->acct_pages)
Jens Axboe26bfa89e2021-02-09 20:14:12 -07008100 io_unaccount_mem(ctx, imu->acct_pages);
Mark Rutlandd4ef6472019-05-01 16:59:16 +01008101 kvfree(imu->bvec);
Jens Axboeedafcce2019-01-09 09:16:05 -07008102 imu->nr_bvecs = 0;
8103 }
8104
8105 kfree(ctx->user_bufs);
8106 ctx->user_bufs = NULL;
8107 ctx->nr_user_bufs = 0;
8108 return 0;
8109}
8110
8111static int io_copy_iov(struct io_ring_ctx *ctx, struct iovec *dst,
8112 void __user *arg, unsigned index)
8113{
8114 struct iovec __user *src;
8115
8116#ifdef CONFIG_COMPAT
8117 if (ctx->compat) {
8118 struct compat_iovec __user *ciovs;
8119 struct compat_iovec ciov;
8120
8121 ciovs = (struct compat_iovec __user *) arg;
8122 if (copy_from_user(&ciov, &ciovs[index], sizeof(ciov)))
8123 return -EFAULT;
8124
Jens Axboed55e5f52019-12-11 16:12:15 -07008125 dst->iov_base = u64_to_user_ptr((u64)ciov.iov_base);
Jens Axboeedafcce2019-01-09 09:16:05 -07008126 dst->iov_len = ciov.iov_len;
8127 return 0;
8128 }
8129#endif
8130 src = (struct iovec __user *) arg;
8131 if (copy_from_user(dst, &src[index], sizeof(*dst)))
8132 return -EFAULT;
8133 return 0;
8134}
8135
Jens Axboede293932020-09-17 16:19:16 -06008136/*
8137 * Not super efficient, but this is just a registration time. And we do cache
8138 * the last compound head, so generally we'll only do a full search if we don't
8139 * match that one.
8140 *
8141 * We check if the given compound head page has already been accounted, to
8142 * avoid double accounting it. This allows us to account the full size of the
8143 * page, not just the constituent pages of a huge page.
8144 */
8145static bool headpage_already_acct(struct io_ring_ctx *ctx, struct page **pages,
8146 int nr_pages, struct page *hpage)
8147{
8148 int i, j;
8149
8150 /* check current page array */
8151 for (i = 0; i < nr_pages; i++) {
8152 if (!PageCompound(pages[i]))
8153 continue;
8154 if (compound_head(pages[i]) == hpage)
8155 return true;
8156 }
8157
8158 /* check previously registered pages */
8159 for (i = 0; i < ctx->nr_user_bufs; i++) {
8160 struct io_mapped_ubuf *imu = &ctx->user_bufs[i];
8161
8162 for (j = 0; j < imu->nr_bvecs; j++) {
8163 if (!PageCompound(imu->bvec[j].bv_page))
8164 continue;
8165 if (compound_head(imu->bvec[j].bv_page) == hpage)
8166 return true;
8167 }
8168 }
8169
8170 return false;
8171}
8172
8173static int io_buffer_account_pin(struct io_ring_ctx *ctx, struct page **pages,
8174 int nr_pages, struct io_mapped_ubuf *imu,
8175 struct page **last_hpage)
8176{
8177 int i, ret;
8178
8179 for (i = 0; i < nr_pages; i++) {
8180 if (!PageCompound(pages[i])) {
8181 imu->acct_pages++;
8182 } else {
8183 struct page *hpage;
8184
8185 hpage = compound_head(pages[i]);
8186 if (hpage == *last_hpage)
8187 continue;
8188 *last_hpage = hpage;
8189 if (headpage_already_acct(ctx, pages, i, hpage))
8190 continue;
8191 imu->acct_pages += page_size(hpage) >> PAGE_SHIFT;
8192 }
8193 }
8194
8195 if (!imu->acct_pages)
8196 return 0;
8197
Jens Axboe26bfa89e2021-02-09 20:14:12 -07008198 ret = io_account_mem(ctx, imu->acct_pages);
Jens Axboede293932020-09-17 16:19:16 -06008199 if (ret)
8200 imu->acct_pages = 0;
8201 return ret;
8202}
8203
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008204static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov,
8205 struct io_mapped_ubuf *imu,
8206 struct page **last_hpage)
Jens Axboeedafcce2019-01-09 09:16:05 -07008207{
8208 struct vm_area_struct **vmas = NULL;
8209 struct page **pages = NULL;
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008210 unsigned long off, start, end, ubuf;
8211 size_t size;
8212 int ret, pret, nr_pages, i;
Jens Axboeedafcce2019-01-09 09:16:05 -07008213
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008214 ubuf = (unsigned long) iov->iov_base;
8215 end = (ubuf + iov->iov_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
8216 start = ubuf >> PAGE_SHIFT;
8217 nr_pages = end - start;
8218
8219 ret = -ENOMEM;
8220
8221 pages = kvmalloc_array(nr_pages, sizeof(struct page *), GFP_KERNEL);
8222 if (!pages)
8223 goto done;
8224
8225 vmas = kvmalloc_array(nr_pages, sizeof(struct vm_area_struct *),
8226 GFP_KERNEL);
8227 if (!vmas)
8228 goto done;
8229
8230 imu->bvec = kvmalloc_array(nr_pages, sizeof(struct bio_vec),
8231 GFP_KERNEL);
8232 if (!imu->bvec)
8233 goto done;
8234
8235 ret = 0;
8236 mmap_read_lock(current->mm);
8237 pret = pin_user_pages(ubuf, nr_pages, FOLL_WRITE | FOLL_LONGTERM,
8238 pages, vmas);
8239 if (pret == nr_pages) {
8240 /* don't support file backed memory */
8241 for (i = 0; i < nr_pages; i++) {
8242 struct vm_area_struct *vma = vmas[i];
8243
8244 if (vma->vm_file &&
8245 !is_file_hugepages(vma->vm_file)) {
8246 ret = -EOPNOTSUPP;
8247 break;
8248 }
8249 }
8250 } else {
8251 ret = pret < 0 ? pret : -EFAULT;
8252 }
8253 mmap_read_unlock(current->mm);
8254 if (ret) {
8255 /*
8256 * if we did partial map, or found file backed vmas,
8257 * release any pages we did get
8258 */
8259 if (pret > 0)
8260 unpin_user_pages(pages, pret);
8261 kvfree(imu->bvec);
8262 goto done;
8263 }
8264
8265 ret = io_buffer_account_pin(ctx, pages, pret, imu, last_hpage);
8266 if (ret) {
8267 unpin_user_pages(pages, pret);
8268 kvfree(imu->bvec);
8269 goto done;
8270 }
8271
8272 off = ubuf & ~PAGE_MASK;
8273 size = iov->iov_len;
8274 for (i = 0; i < nr_pages; i++) {
8275 size_t vec_len;
8276
8277 vec_len = min_t(size_t, size, PAGE_SIZE - off);
8278 imu->bvec[i].bv_page = pages[i];
8279 imu->bvec[i].bv_len = vec_len;
8280 imu->bvec[i].bv_offset = off;
8281 off = 0;
8282 size -= vec_len;
8283 }
8284 /* store original address for later verification */
8285 imu->ubuf = ubuf;
8286 imu->len = iov->iov_len;
8287 imu->nr_bvecs = nr_pages;
8288 ret = 0;
8289done:
8290 kvfree(pages);
8291 kvfree(vmas);
8292 return ret;
8293}
8294
Bijan Mottahedeh2b358602021-01-06 12:39:11 -08008295static int io_buffers_map_alloc(struct io_ring_ctx *ctx, unsigned int nr_args)
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008296{
Jens Axboeedafcce2019-01-09 09:16:05 -07008297 if (ctx->user_bufs)
8298 return -EBUSY;
8299 if (!nr_args || nr_args > UIO_MAXIOV)
8300 return -EINVAL;
8301
8302 ctx->user_bufs = kcalloc(nr_args, sizeof(struct io_mapped_ubuf),
8303 GFP_KERNEL);
8304 if (!ctx->user_bufs)
8305 return -ENOMEM;
8306
Bijan Mottahedeh2b358602021-01-06 12:39:11 -08008307 return 0;
8308}
8309
8310static int io_buffer_validate(struct iovec *iov)
8311{
8312 /*
8313 * Don't impose further limits on the size and buffer
8314 * constraints here, we'll -EINVAL later when IO is
8315 * submitted if they are wrong.
8316 */
8317 if (!iov->iov_base || !iov->iov_len)
8318 return -EFAULT;
8319
8320 /* arbitrary limit, but we need something */
8321 if (iov->iov_len > SZ_1G)
8322 return -EFAULT;
8323
8324 return 0;
8325}
8326
8327static int io_sqe_buffers_register(struct io_ring_ctx *ctx, void __user *arg,
8328 unsigned int nr_args)
8329{
8330 int i, ret;
8331 struct iovec iov;
8332 struct page *last_hpage = NULL;
8333
8334 ret = io_buffers_map_alloc(ctx, nr_args);
8335 if (ret)
8336 return ret;
8337
Jens Axboeedafcce2019-01-09 09:16:05 -07008338 for (i = 0; i < nr_args; i++) {
8339 struct io_mapped_ubuf *imu = &ctx->user_bufs[i];
Jens Axboeedafcce2019-01-09 09:16:05 -07008340
8341 ret = io_copy_iov(ctx, &iov, arg, i);
8342 if (ret)
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008343 break;
Jens Axboeedafcce2019-01-09 09:16:05 -07008344
Bijan Mottahedeh2b358602021-01-06 12:39:11 -08008345 ret = io_buffer_validate(&iov);
8346 if (ret)
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008347 break;
Jens Axboeedafcce2019-01-09 09:16:05 -07008348
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008349 ret = io_sqe_buffer_register(ctx, &iov, imu, &last_hpage);
8350 if (ret)
8351 break;
Jens Axboeedafcce2019-01-09 09:16:05 -07008352
8353 ctx->nr_user_bufs++;
8354 }
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008355
8356 if (ret)
8357 io_sqe_buffers_unregister(ctx);
8358
Jens Axboeedafcce2019-01-09 09:16:05 -07008359 return ret;
8360}
8361
Jens Axboe9b402842019-04-11 11:45:41 -06008362static int io_eventfd_register(struct io_ring_ctx *ctx, void __user *arg)
8363{
8364 __s32 __user *fds = arg;
8365 int fd;
8366
8367 if (ctx->cq_ev_fd)
8368 return -EBUSY;
8369
8370 if (copy_from_user(&fd, fds, sizeof(*fds)))
8371 return -EFAULT;
8372
8373 ctx->cq_ev_fd = eventfd_ctx_fdget(fd);
8374 if (IS_ERR(ctx->cq_ev_fd)) {
8375 int ret = PTR_ERR(ctx->cq_ev_fd);
8376 ctx->cq_ev_fd = NULL;
8377 return ret;
8378 }
8379
8380 return 0;
8381}
8382
8383static int io_eventfd_unregister(struct io_ring_ctx *ctx)
8384{
8385 if (ctx->cq_ev_fd) {
8386 eventfd_ctx_put(ctx->cq_ev_fd);
8387 ctx->cq_ev_fd = NULL;
8388 return 0;
8389 }
8390
8391 return -ENXIO;
8392}
8393
Jens Axboe5a2e7452020-02-23 16:23:11 -07008394static void io_destroy_buffers(struct io_ring_ctx *ctx)
8395{
Jens Axboe9e15c3a2021-03-13 12:29:43 -07008396 struct io_buffer *buf;
8397 unsigned long index;
8398
8399 xa_for_each(&ctx->io_buffers, index, buf)
8400 __io_remove_buffers(ctx, buf, index, -1U);
Jens Axboe5a2e7452020-02-23 16:23:11 -07008401}
8402
Jens Axboe68e68ee2021-02-13 09:00:02 -07008403static void io_req_cache_free(struct list_head *list, struct task_struct *tsk)
Jens Axboe1b4c3512021-02-10 00:03:19 +00008404{
Jens Axboe68e68ee2021-02-13 09:00:02 -07008405 struct io_kiocb *req, *nxt;
Jens Axboe1b4c3512021-02-10 00:03:19 +00008406
Jens Axboe68e68ee2021-02-13 09:00:02 -07008407 list_for_each_entry_safe(req, nxt, list, compl.list) {
8408 if (tsk && req->task != tsk)
8409 continue;
Jens Axboe1b4c3512021-02-10 00:03:19 +00008410 list_del(&req->compl.list);
8411 kmem_cache_free(req_cachep, req);
8412 }
8413}
8414
Jens Axboe4010fec2021-02-27 15:04:18 -07008415static void io_req_caches_free(struct io_ring_ctx *ctx)
Jens Axboe2b188cc2019-01-07 10:46:33 -07008416{
Pavel Begunkovbf019da2021-02-10 00:03:17 +00008417 struct io_submit_state *submit_state = &ctx->submit_state;
Pavel Begunkove5547d22021-02-23 22:17:20 +00008418 struct io_comp_state *cs = &ctx->submit_state.comp;
Pavel Begunkovbf019da2021-02-10 00:03:17 +00008419
Jens Axboe9a4fdbd2021-02-13 09:09:44 -07008420 mutex_lock(&ctx->uring_lock);
8421
Pavel Begunkov8e5c66c2021-02-22 11:45:55 +00008422 if (submit_state->free_reqs) {
Jens Axboe9a4fdbd2021-02-13 09:09:44 -07008423 kmem_cache_free_bulk(req_cachep, submit_state->free_reqs,
8424 submit_state->reqs);
Pavel Begunkov8e5c66c2021-02-22 11:45:55 +00008425 submit_state->free_reqs = 0;
8426 }
Jens Axboe9a4fdbd2021-02-13 09:09:44 -07008427
8428 spin_lock_irq(&ctx->completion_lock);
Pavel Begunkove5547d22021-02-23 22:17:20 +00008429 list_splice_init(&cs->locked_free_list, &cs->free_list);
8430 cs->locked_free_nr = 0;
Jens Axboe9a4fdbd2021-02-13 09:09:44 -07008431 spin_unlock_irq(&ctx->completion_lock);
8432
Pavel Begunkove5547d22021-02-23 22:17:20 +00008433 io_req_cache_free(&cs->free_list, NULL);
8434
Jens Axboe9a4fdbd2021-02-13 09:09:44 -07008435 mutex_unlock(&ctx->uring_lock);
8436}
8437
Jens Axboe2b188cc2019-01-07 10:46:33 -07008438static void io_ring_ctx_free(struct io_ring_ctx *ctx)
8439{
Pavel Begunkov04fc6c82021-02-12 03:23:54 +00008440 /*
8441 * Some may use context even when all refs and requests have been put,
Pavel Begunkov180f8292021-03-14 20:57:09 +00008442 * and they are free to do so while still holding uring_lock or
8443 * completion_lock, see __io_req_task_submit(). Wait for them to finish.
Pavel Begunkov04fc6c82021-02-12 03:23:54 +00008444 */
8445 mutex_lock(&ctx->uring_lock);
8446 mutex_unlock(&ctx->uring_lock);
Pavel Begunkov180f8292021-03-14 20:57:09 +00008447 spin_lock_irq(&ctx->completion_lock);
8448 spin_unlock_irq(&ctx->completion_lock);
Pavel Begunkov04fc6c82021-02-12 03:23:54 +00008449
Jens Axboe37d1e2e2021-02-17 21:03:43 -07008450 io_sq_thread_finish(ctx);
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008451 io_sqe_buffers_unregister(ctx);
Jens Axboe2aede0e2020-09-14 10:45:53 -06008452
Jens Axboe37d1e2e2021-02-17 21:03:43 -07008453 if (ctx->mm_account) {
Jens Axboe2aede0e2020-09-14 10:45:53 -06008454 mmdrop(ctx->mm_account);
8455 ctx->mm_account = NULL;
Bijan Mottahedeh30975822020-06-16 16:36:09 -07008456 }
Jens Axboedef596e2019-01-09 08:59:42 -07008457
Hao Xu8bad28d2021-02-19 17:19:36 +08008458 mutex_lock(&ctx->uring_lock);
Jens Axboe6b063142019-01-10 22:13:58 -07008459 io_sqe_files_unregister(ctx);
Hao Xu8bad28d2021-02-19 17:19:36 +08008460 mutex_unlock(&ctx->uring_lock);
Jens Axboe9b402842019-04-11 11:45:41 -06008461 io_eventfd_unregister(ctx);
Jens Axboe5a2e7452020-02-23 16:23:11 -07008462 io_destroy_buffers(ctx);
Jens Axboedef596e2019-01-09 08:59:42 -07008463
Jens Axboe2b188cc2019-01-07 10:46:33 -07008464#if defined(CONFIG_UNIX)
Eric Biggers355e8d22019-06-12 14:58:43 -07008465 if (ctx->ring_sock) {
8466 ctx->ring_sock->file = NULL; /* so that iput() is called */
Jens Axboe2b188cc2019-01-07 10:46:33 -07008467 sock_release(ctx->ring_sock);
Eric Biggers355e8d22019-06-12 14:58:43 -07008468 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07008469#endif
8470
Hristo Venev75b28af2019-08-26 17:23:46 +00008471 io_mem_free(ctx->rings);
Jens Axboe2b188cc2019-01-07 10:46:33 -07008472 io_mem_free(ctx->sq_sqes);
Jens Axboe2b188cc2019-01-07 10:46:33 -07008473
8474 percpu_ref_exit(&ctx->refs);
Jens Axboe2b188cc2019-01-07 10:46:33 -07008475 free_uid(ctx->user);
Jens Axboe4010fec2021-02-27 15:04:18 -07008476 io_req_caches_free(ctx);
Jens Axboee9418942021-02-19 12:33:30 -07008477 if (ctx->hash_map)
8478 io_wq_put_hash(ctx->hash_map);
Jens Axboe78076bb2019-12-04 19:56:40 -07008479 kfree(ctx->cancel_hash);
Jens Axboe2b188cc2019-01-07 10:46:33 -07008480 kfree(ctx);
8481}
8482
8483static __poll_t io_uring_poll(struct file *file, poll_table *wait)
8484{
8485 struct io_ring_ctx *ctx = file->private_data;
8486 __poll_t mask = 0;
8487
8488 poll_wait(file, &ctx->cq_wait, wait);
Stefan Bühler4f7067c2019-04-24 23:54:17 +02008489 /*
8490 * synchronizes with barrier from wq_has_sleeper call in
8491 * io_commit_cqring
8492 */
Jens Axboe2b188cc2019-01-07 10:46:33 -07008493 smp_rmb();
Jens Axboe90554202020-09-03 12:12:41 -06008494 if (!io_sqring_full(ctx))
Jens Axboe2b188cc2019-01-07 10:46:33 -07008495 mask |= EPOLLOUT | EPOLLWRNORM;
Hao Xued670c32021-02-05 16:34:21 +08008496
8497 /*
8498 * Don't flush cqring overflow list here, just do a simple check.
8499 * Otherwise there could possible be ABBA deadlock:
8500 * CPU0 CPU1
8501 * ---- ----
8502 * lock(&ctx->uring_lock);
8503 * lock(&ep->mtx);
8504 * lock(&ctx->uring_lock);
8505 * lock(&ep->mtx);
8506 *
8507 * Users may get EPOLLIN meanwhile seeing nothing in cqring, this
8508 * pushs them to do the flush.
8509 */
8510 if (io_cqring_events(ctx) || test_bit(0, &ctx->cq_check_overflow))
Jens Axboe2b188cc2019-01-07 10:46:33 -07008511 mask |= EPOLLIN | EPOLLRDNORM;
8512
8513 return mask;
8514}
8515
8516static int io_uring_fasync(int fd, struct file *file, int on)
8517{
8518 struct io_ring_ctx *ctx = file->private_data;
8519
8520 return fasync_helper(fd, file, on, &ctx->cq_fasync);
8521}
8522
Yejune Deng0bead8c2020-12-24 11:02:20 +08008523static int io_unregister_personality(struct io_ring_ctx *ctx, unsigned id)
Jens Axboe071698e2020-01-28 10:04:42 -07008524{
Jens Axboe4379bf82021-02-15 13:40:22 -07008525 const struct cred *creds;
Jens Axboe071698e2020-01-28 10:04:42 -07008526
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +00008527 creds = xa_erase(&ctx->personalities, id);
Jens Axboe4379bf82021-02-15 13:40:22 -07008528 if (creds) {
8529 put_cred(creds);
Yejune Deng0bead8c2020-12-24 11:02:20 +08008530 return 0;
Jens Axboe1e6fa522020-10-15 08:46:24 -06008531 }
Yejune Deng0bead8c2020-12-24 11:02:20 +08008532
8533 return -EINVAL;
8534}
8535
Pavel Begunkov9b465712021-03-15 14:23:07 +00008536static inline bool io_run_ctx_fallback(struct io_ring_ctx *ctx)
Jens Axboe7c25c0d2021-02-16 07:17:00 -07008537{
Pavel Begunkov9b465712021-03-15 14:23:07 +00008538 return io_run_task_work_head(&ctx->exit_task_work);
Jens Axboe7c25c0d2021-02-16 07:17:00 -07008539}
8540
Pavel Begunkovd56d9382021-03-06 11:02:13 +00008541struct io_tctx_exit {
8542 struct callback_head task_work;
8543 struct completion completion;
Pavel Begunkovbaf186c2021-03-06 11:02:15 +00008544 struct io_ring_ctx *ctx;
Pavel Begunkovd56d9382021-03-06 11:02:13 +00008545};
8546
8547static void io_tctx_exit_cb(struct callback_head *cb)
8548{
8549 struct io_uring_task *tctx = current->io_uring;
8550 struct io_tctx_exit *work;
8551
8552 work = container_of(cb, struct io_tctx_exit, task_work);
8553 /*
8554 * When @in_idle, we're in cancellation and it's racy to remove the
8555 * node. It'll be removed by the end of cancellation, just ignore it.
8556 */
8557 if (!atomic_read(&tctx->in_idle))
Pavel Begunkovbaf186c2021-03-06 11:02:15 +00008558 io_uring_del_task_file((unsigned long)work->ctx);
Pavel Begunkovd56d9382021-03-06 11:02:13 +00008559 complete(&work->completion);
8560}
8561
Jens Axboe85faa7b2020-04-09 18:14:00 -06008562static void io_ring_exit_work(struct work_struct *work)
8563{
Pavel Begunkovd56d9382021-03-06 11:02:13 +00008564 struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx, exit_work);
Pavel Begunkovb5bb3a22021-03-06 11:02:16 +00008565 unsigned long timeout = jiffies + HZ * 60 * 5;
Pavel Begunkovd56d9382021-03-06 11:02:13 +00008566 struct io_tctx_exit exit;
8567 struct io_tctx_node *node;
8568 int ret;
Jens Axboe85faa7b2020-04-09 18:14:00 -06008569
Pavel Begunkova185f1d2021-03-23 10:52:38 +00008570 /* prevent SQPOLL from submitting new requests */
8571 if (ctx->sq_data) {
8572 io_sq_thread_park(ctx->sq_data);
8573 list_del_init(&ctx->sqd_list);
8574 io_sqd_update_thread_idle(ctx->sq_data);
8575 io_sq_thread_unpark(ctx->sq_data);
8576 }
8577
Jens Axboe56952e92020-06-17 15:00:04 -06008578 /*
8579 * If we're doing polled IO and end up having requests being
8580 * submitted async (out-of-line), then completions can come in while
8581 * we're waiting for refs to drop. We need to reap these manually,
8582 * as nobody else will be looking for them.
8583 */
Pavel Begunkovb2edc0a2020-07-07 16:36:22 +03008584 do {
Pavel Begunkov9936c7c2021-02-04 13:51:56 +00008585 io_uring_try_cancel_requests(ctx, NULL, NULL);
Pavel Begunkovb5bb3a22021-03-06 11:02:16 +00008586
8587 WARN_ON_ONCE(time_after(jiffies, timeout));
Pavel Begunkovb2edc0a2020-07-07 16:36:22 +03008588 } while (!wait_for_completion_timeout(&ctx->ref_comp, HZ/20));
Pavel Begunkovd56d9382021-03-06 11:02:13 +00008589
8590 mutex_lock(&ctx->uring_lock);
8591 while (!list_empty(&ctx->tctx_list)) {
Pavel Begunkovb5bb3a22021-03-06 11:02:16 +00008592 WARN_ON_ONCE(time_after(jiffies, timeout));
8593
Pavel Begunkovd56d9382021-03-06 11:02:13 +00008594 node = list_first_entry(&ctx->tctx_list, struct io_tctx_node,
8595 ctx_node);
Pavel Begunkovbaf186c2021-03-06 11:02:15 +00008596 exit.ctx = ctx;
Pavel Begunkovd56d9382021-03-06 11:02:13 +00008597 init_completion(&exit.completion);
8598 init_task_work(&exit.task_work, io_tctx_exit_cb);
8599 ret = task_work_add(node->task, &exit.task_work, TWA_SIGNAL);
8600 if (WARN_ON_ONCE(ret))
8601 continue;
8602 wake_up_process(node->task);
8603
8604 mutex_unlock(&ctx->uring_lock);
8605 wait_for_completion(&exit.completion);
8606 cond_resched();
8607 mutex_lock(&ctx->uring_lock);
8608 }
8609 mutex_unlock(&ctx->uring_lock);
8610
Jens Axboe85faa7b2020-04-09 18:14:00 -06008611 io_ring_ctx_free(ctx);
8612}
8613
Pavel Begunkov80c4cbd2021-03-25 18:32:43 +00008614/* Returns true if we found and killed one or more timeouts */
8615static bool io_kill_timeouts(struct io_ring_ctx *ctx, struct task_struct *tsk,
8616 struct files_struct *files)
8617{
8618 struct io_kiocb *req, *tmp;
8619 int canceled = 0;
8620
8621 spin_lock_irq(&ctx->completion_lock);
8622 list_for_each_entry_safe(req, tmp, &ctx->timeout_list, timeout.list) {
8623 if (io_match_task(req, tsk, files)) {
8624 io_kill_timeout(req, -ECANCELED);
8625 canceled++;
8626 }
8627 }
Pavel Begunkov51520422021-03-29 11:39:29 +01008628 if (canceled != 0)
8629 io_commit_cqring(ctx);
Pavel Begunkov80c4cbd2021-03-25 18:32:43 +00008630 spin_unlock_irq(&ctx->completion_lock);
Pavel Begunkov80c4cbd2021-03-25 18:32:43 +00008631 if (canceled != 0)
8632 io_cqring_ev_posted(ctx);
8633 return canceled != 0;
8634}
8635
Jens Axboe2b188cc2019-01-07 10:46:33 -07008636static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
8637{
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +00008638 unsigned long index;
8639 struct creds *creds;
8640
Jens Axboe2b188cc2019-01-07 10:46:33 -07008641 mutex_lock(&ctx->uring_lock);
8642 percpu_ref_kill(&ctx->refs);
Pavel Begunkovcda286f2020-12-17 00:24:35 +00008643 /* if force is set, the ring is going away. always drop after that */
8644 ctx->cq_overflow_flushed = 1;
Pavel Begunkov634578f2020-12-06 22:22:44 +00008645 if (ctx->rings)
Pavel Begunkov6c503152021-01-04 20:36:36 +00008646 __io_cqring_overflow_flush(ctx, true, NULL, NULL);
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +00008647 xa_for_each(&ctx->personalities, index, creds)
8648 io_unregister_personality(ctx, index);
Jens Axboe2b188cc2019-01-07 10:46:33 -07008649 mutex_unlock(&ctx->uring_lock);
8650
Pavel Begunkov6b819282020-11-06 13:00:25 +00008651 io_kill_timeouts(ctx, NULL, NULL);
8652 io_poll_remove_all(ctx, NULL, NULL);
Jens Axboe561fb042019-10-24 07:25:42 -06008653
Jens Axboe15dff282019-11-13 09:09:23 -07008654 /* if we failed setting up the ctx, we might not have any rings */
Pavel Begunkovb2edc0a2020-07-07 16:36:22 +03008655 io_iopoll_try_reap_events(ctx);
Jens Axboe309fc032020-07-10 09:13:34 -06008656
Jens Axboe85faa7b2020-04-09 18:14:00 -06008657 INIT_WORK(&ctx->exit_work, io_ring_exit_work);
Jens Axboefc666772020-08-19 11:10:51 -06008658 /*
8659 * Use system_unbound_wq to avoid spawning tons of event kworkers
8660 * if we're exiting a ton of rings at the same time. It just adds
8661 * noise and overhead, there's no discernable change in runtime
8662 * over using system_wq.
8663 */
8664 queue_work(system_unbound_wq, &ctx->exit_work);
Jens Axboe2b188cc2019-01-07 10:46:33 -07008665}
8666
8667static int io_uring_release(struct inode *inode, struct file *file)
8668{
8669 struct io_ring_ctx *ctx = file->private_data;
8670
8671 file->private_data = NULL;
8672 io_ring_ctx_wait_and_kill(ctx);
8673 return 0;
8674}
8675
Pavel Begunkovf6edbab2020-11-06 13:00:26 +00008676struct io_task_cancel {
8677 struct task_struct *task;
8678 struct files_struct *files;
8679};
Pavel Begunkov67c4d9e2020-06-15 10:24:05 +03008680
Pavel Begunkovf6edbab2020-11-06 13:00:26 +00008681static bool io_cancel_task_cb(struct io_wq_work *work, void *data)
Jens Axboeb711d4e2020-08-16 08:23:05 -07008682{
Pavel Begunkov9a472ef2020-11-05 22:31:37 +00008683 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
Pavel Begunkovf6edbab2020-11-06 13:00:26 +00008684 struct io_task_cancel *cancel = data;
Pavel Begunkov9a472ef2020-11-05 22:31:37 +00008685 bool ret;
8686
Pavel Begunkovf6edbab2020-11-06 13:00:26 +00008687 if (cancel->files && (req->flags & REQ_F_LINK_TIMEOUT)) {
Pavel Begunkov9a472ef2020-11-05 22:31:37 +00008688 unsigned long flags;
8689 struct io_ring_ctx *ctx = req->ctx;
8690
8691 /* protect against races with linked timeouts */
8692 spin_lock_irqsave(&ctx->completion_lock, flags);
Pavel Begunkovf6edbab2020-11-06 13:00:26 +00008693 ret = io_match_task(req, cancel->task, cancel->files);
Pavel Begunkov9a472ef2020-11-05 22:31:37 +00008694 spin_unlock_irqrestore(&ctx->completion_lock, flags);
8695 } else {
Pavel Begunkovf6edbab2020-11-06 13:00:26 +00008696 ret = io_match_task(req, cancel->task, cancel->files);
Pavel Begunkov9a472ef2020-11-05 22:31:37 +00008697 }
8698 return ret;
Jens Axboeb711d4e2020-08-16 08:23:05 -07008699}
8700
Pavel Begunkove1915f72021-03-11 23:29:35 +00008701static bool io_cancel_defer_files(struct io_ring_ctx *ctx,
Pavel Begunkovef9865a2020-11-05 14:06:19 +00008702 struct task_struct *task,
Pavel Begunkovb7ddce32020-09-06 00:45:14 +03008703 struct files_struct *files)
8704{
Pavel Begunkove1915f72021-03-11 23:29:35 +00008705 struct io_defer_entry *de;
Pavel Begunkovb7ddce32020-09-06 00:45:14 +03008706 LIST_HEAD(list);
8707
8708 spin_lock_irq(&ctx->completion_lock);
8709 list_for_each_entry_reverse(de, &ctx->defer_list, list) {
Pavel Begunkov08d23632020-11-06 13:00:22 +00008710 if (io_match_task(de->req, task, files)) {
Pavel Begunkovb7ddce32020-09-06 00:45:14 +03008711 list_cut_position(&list, &ctx->defer_list, &de->list);
8712 break;
8713 }
8714 }
8715 spin_unlock_irq(&ctx->completion_lock);
Pavel Begunkove1915f72021-03-11 23:29:35 +00008716 if (list_empty(&list))
8717 return false;
Pavel Begunkovb7ddce32020-09-06 00:45:14 +03008718
8719 while (!list_empty(&list)) {
8720 de = list_first_entry(&list, struct io_defer_entry, list);
8721 list_del_init(&de->list);
8722 req_set_fail_links(de->req);
8723 io_put_req(de->req);
8724 io_req_complete(de->req, -ECANCELED);
8725 kfree(de);
8726 }
Pavel Begunkove1915f72021-03-11 23:29:35 +00008727 return true;
Pavel Begunkovb7ddce32020-09-06 00:45:14 +03008728}
8729
Pavel Begunkov1b007642021-03-06 11:02:17 +00008730static bool io_cancel_ctx_cb(struct io_wq_work *work, void *data)
8731{
8732 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
8733
8734 return req->ctx == data;
8735}
8736
8737static bool io_uring_try_cancel_iowq(struct io_ring_ctx *ctx)
8738{
8739 struct io_tctx_node *node;
8740 enum io_wq_cancel cret;
8741 bool ret = false;
8742
8743 mutex_lock(&ctx->uring_lock);
8744 list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
8745 struct io_uring_task *tctx = node->task->io_uring;
8746
8747 /*
8748 * io_wq will stay alive while we hold uring_lock, because it's
8749 * killed after ctx nodes, which requires to take the lock.
8750 */
8751 if (!tctx || !tctx->io_wq)
8752 continue;
8753 cret = io_wq_cancel_cb(tctx->io_wq, io_cancel_ctx_cb, ctx, true);
8754 ret |= (cret != IO_WQ_CANCEL_NOTFOUND);
8755 }
8756 mutex_unlock(&ctx->uring_lock);
8757
8758 return ret;
8759}
8760
Pavel Begunkov9936c7c2021-02-04 13:51:56 +00008761static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
8762 struct task_struct *task,
8763 struct files_struct *files)
8764{
8765 struct io_task_cancel cancel = { .task = task, .files = files, };
Pavel Begunkov1b007642021-03-06 11:02:17 +00008766 struct io_uring_task *tctx = task ? task->io_uring : NULL;
Pavel Begunkov9936c7c2021-02-04 13:51:56 +00008767
8768 while (1) {
8769 enum io_wq_cancel cret;
8770 bool ret = false;
8771
Pavel Begunkov1b007642021-03-06 11:02:17 +00008772 if (!task) {
8773 ret |= io_uring_try_cancel_iowq(ctx);
8774 } else if (tctx && tctx->io_wq) {
8775 /*
8776 * Cancels requests of all rings, not only @ctx, but
8777 * it's fine as the task is in exit/exec.
8778 */
Jens Axboe5aa75ed2021-02-16 12:56:50 -07008779 cret = io_wq_cancel_cb(tctx->io_wq, io_cancel_task_cb,
Pavel Begunkov9936c7c2021-02-04 13:51:56 +00008780 &cancel, true);
8781 ret |= (cret != IO_WQ_CANCEL_NOTFOUND);
8782 }
8783
8784 /* SQPOLL thread does its own polling */
Jens Axboed052d1d2021-03-11 10:49:20 -07008785 if ((!(ctx->flags & IORING_SETUP_SQPOLL) && !files) ||
8786 (ctx->sq_data && ctx->sq_data->thread == current)) {
Pavel Begunkov9936c7c2021-02-04 13:51:56 +00008787 while (!list_empty_careful(&ctx->iopoll_list)) {
8788 io_iopoll_try_reap_events(ctx);
8789 ret = true;
8790 }
8791 }
8792
Pavel Begunkove1915f72021-03-11 23:29:35 +00008793 ret |= io_cancel_defer_files(ctx, task, files);
Pavel Begunkov9936c7c2021-02-04 13:51:56 +00008794 ret |= io_poll_remove_all(ctx, task, files);
8795 ret |= io_kill_timeouts(ctx, task, files);
8796 ret |= io_run_task_work();
Pavel Begunkovba50a032021-02-26 15:47:56 +00008797 ret |= io_run_ctx_fallback(ctx);
Pavel Begunkov9936c7c2021-02-04 13:51:56 +00008798 io_cqring_overflow_flush(ctx, true, task, files);
8799 if (!ret)
8800 break;
8801 cond_resched();
8802 }
8803}
8804
Pavel Begunkovca70f002021-01-26 15:28:27 +00008805static int io_uring_count_inflight(struct io_ring_ctx *ctx,
8806 struct task_struct *task,
8807 struct files_struct *files)
8808{
8809 struct io_kiocb *req;
8810 int cnt = 0;
8811
8812 spin_lock_irq(&ctx->inflight_lock);
8813 list_for_each_entry(req, &ctx->inflight_list, inflight_entry)
8814 cnt += io_match_task(req, task, files);
8815 spin_unlock_irq(&ctx->inflight_lock);
8816 return cnt;
8817}
8818
Pavel Begunkovb52fda02020-11-06 13:00:24 +00008819static void io_uring_cancel_files(struct io_ring_ctx *ctx,
Pavel Begunkovdf9923f2020-11-06 13:00:23 +00008820 struct task_struct *task,
Jens Axboefcb323c2019-10-24 12:39:47 -06008821 struct files_struct *files)
8822{
Jens Axboefcb323c2019-10-24 12:39:47 -06008823 while (!list_empty_careful(&ctx->inflight_list)) {
Xiaoguang Wangd8f1b972020-04-26 15:54:43 +08008824 DEFINE_WAIT(wait);
Pavel Begunkovca70f002021-01-26 15:28:27 +00008825 int inflight;
Jens Axboefcb323c2019-10-24 12:39:47 -06008826
Pavel Begunkovca70f002021-01-26 15:28:27 +00008827 inflight = io_uring_count_inflight(ctx, task, files);
8828 if (!inflight)
Jens Axboefcb323c2019-10-24 12:39:47 -06008829 break;
Pavel Begunkovf6edbab2020-11-06 13:00:26 +00008830
Pavel Begunkov9936c7c2021-02-04 13:51:56 +00008831 io_uring_try_cancel_requests(ctx, task, files);
Pavel Begunkovca70f002021-01-26 15:28:27 +00008832
8833 prepare_to_wait(&task->io_uring->wait, &wait,
8834 TASK_UNINTERRUPTIBLE);
8835 if (inflight == io_uring_count_inflight(ctx, task, files))
8836 schedule();
Pavel Begunkovc98de082020-11-15 12:56:32 +00008837 finish_wait(&task->io_uring->wait, &wait);
Jens Axboe0f212202020-09-13 13:09:39 -06008838 }
Jens Axboe0f212202020-09-13 13:09:39 -06008839}
8840
8841/*
Jens Axboe0f212202020-09-13 13:09:39 -06008842 * Note that this task has used io_uring. We use it for cancelation purposes.
8843 */
Pavel Begunkovbaf186c2021-03-06 11:02:15 +00008844static int io_uring_add_task_file(struct io_ring_ctx *ctx)
Jens Axboe0f212202020-09-13 13:09:39 -06008845{
Matthew Wilcox (Oracle)236434c2020-10-09 13:49:52 +01008846 struct io_uring_task *tctx = current->io_uring;
Pavel Begunkov13bf43f2021-03-06 11:02:12 +00008847 struct io_tctx_node *node;
Pavel Begunkova528b042020-12-21 18:34:04 +00008848 int ret;
Matthew Wilcox (Oracle)236434c2020-10-09 13:49:52 +01008849
8850 if (unlikely(!tctx)) {
Jens Axboe5aa75ed2021-02-16 12:56:50 -07008851 ret = io_uring_alloc_task_context(current, ctx);
Jens Axboe0f212202020-09-13 13:09:39 -06008852 if (unlikely(ret))
8853 return ret;
Matthew Wilcox (Oracle)236434c2020-10-09 13:49:52 +01008854 tctx = current->io_uring;
Jens Axboe0f212202020-09-13 13:09:39 -06008855 }
Pavel Begunkovbaf186c2021-03-06 11:02:15 +00008856 if (tctx->last != ctx) {
8857 void *old = xa_load(&tctx->xa, (unsigned long)ctx);
Jens Axboe0f212202020-09-13 13:09:39 -06008858
Matthew Wilcox (Oracle)236434c2020-10-09 13:49:52 +01008859 if (!old) {
Pavel Begunkov13bf43f2021-03-06 11:02:12 +00008860 node = kmalloc(sizeof(*node), GFP_KERNEL);
8861 if (!node)
8862 return -ENOMEM;
8863 node->ctx = ctx;
Pavel Begunkov13bf43f2021-03-06 11:02:12 +00008864 node->task = current;
8865
Pavel Begunkovbaf186c2021-03-06 11:02:15 +00008866 ret = xa_err(xa_store(&tctx->xa, (unsigned long)ctx,
Pavel Begunkov13bf43f2021-03-06 11:02:12 +00008867 node, GFP_KERNEL));
Pavel Begunkova528b042020-12-21 18:34:04 +00008868 if (ret) {
Pavel Begunkov13bf43f2021-03-06 11:02:12 +00008869 kfree(node);
Pavel Begunkova528b042020-12-21 18:34:04 +00008870 return ret;
8871 }
Pavel Begunkov13bf43f2021-03-06 11:02:12 +00008872
8873 mutex_lock(&ctx->uring_lock);
8874 list_add(&node->ctx_node, &ctx->tctx_list);
8875 mutex_unlock(&ctx->uring_lock);
Jens Axboe0f212202020-09-13 13:09:39 -06008876 }
Pavel Begunkovbaf186c2021-03-06 11:02:15 +00008877 tctx->last = ctx;
Jens Axboe0f212202020-09-13 13:09:39 -06008878 }
Jens Axboe0f212202020-09-13 13:09:39 -06008879 return 0;
8880}
8881
8882/*
8883 * Remove this io_uring_file -> task mapping.
8884 */
Pavel Begunkov29412672021-03-06 11:02:11 +00008885static void io_uring_del_task_file(unsigned long index)
Jens Axboe0f212202020-09-13 13:09:39 -06008886{
8887 struct io_uring_task *tctx = current->io_uring;
Pavel Begunkov13bf43f2021-03-06 11:02:12 +00008888 struct io_tctx_node *node;
Pavel Begunkov29412672021-03-06 11:02:11 +00008889
Pavel Begunkoveebd2e32021-03-06 11:02:14 +00008890 if (!tctx)
8891 return;
Pavel Begunkov13bf43f2021-03-06 11:02:12 +00008892 node = xa_erase(&tctx->xa, index);
8893 if (!node)
Pavel Begunkov29412672021-03-06 11:02:11 +00008894 return;
Jens Axboe0f212202020-09-13 13:09:39 -06008895
Pavel Begunkov13bf43f2021-03-06 11:02:12 +00008896 WARN_ON_ONCE(current != node->task);
8897 WARN_ON_ONCE(list_empty(&node->ctx_node));
8898
8899 mutex_lock(&node->ctx->uring_lock);
8900 list_del(&node->ctx_node);
8901 mutex_unlock(&node->ctx->uring_lock);
8902
Pavel Begunkovbaf186c2021-03-06 11:02:15 +00008903 if (tctx->last == node->ctx)
Jens Axboe0f212202020-09-13 13:09:39 -06008904 tctx->last = NULL;
Pavel Begunkov13bf43f2021-03-06 11:02:12 +00008905 kfree(node);
Jens Axboe0f212202020-09-13 13:09:39 -06008906}
8907
Pavel Begunkov8452d4a2021-02-27 11:16:46 +00008908static void io_uring_clean_tctx(struct io_uring_task *tctx)
Pavel Begunkovde7f1d92021-01-04 20:43:29 +00008909{
Pavel Begunkov13bf43f2021-03-06 11:02:12 +00008910 struct io_tctx_node *node;
Pavel Begunkovde7f1d92021-01-04 20:43:29 +00008911 unsigned long index;
8912
Pavel Begunkov13bf43f2021-03-06 11:02:12 +00008913 xa_for_each(&tctx->xa, index, node)
Pavel Begunkov29412672021-03-06 11:02:11 +00008914 io_uring_del_task_file(index);
Pavel Begunkov8452d4a2021-02-27 11:16:46 +00008915 if (tctx->io_wq) {
8916 io_wq_put_and_exit(tctx->io_wq);
8917 tctx->io_wq = NULL;
8918 }
Pavel Begunkovde7f1d92021-01-04 20:43:29 +00008919}
8920
Pavel Begunkov521d6a72021-03-11 23:29:38 +00008921static s64 tctx_inflight(struct io_uring_task *tctx)
8922{
8923 return percpu_counter_sum(&tctx->inflight);
8924}
8925
8926static void io_sqpoll_cancel_cb(struct callback_head *cb)
8927{
8928 struct io_tctx_exit *work = container_of(cb, struct io_tctx_exit, task_work);
8929 struct io_ring_ctx *ctx = work->ctx;
8930 struct io_sq_data *sqd = ctx->sq_data;
8931
8932 if (sqd->thread)
8933 io_uring_cancel_sqpoll(ctx);
8934 complete(&work->completion);
8935}
8936
8937static void io_sqpoll_cancel_sync(struct io_ring_ctx *ctx)
8938{
8939 struct io_sq_data *sqd = ctx->sq_data;
8940 struct io_tctx_exit work = { .ctx = ctx, };
8941 struct task_struct *task;
8942
8943 io_sq_thread_park(sqd);
8944 list_del_init(&ctx->sqd_list);
8945 io_sqd_update_thread_idle(sqd);
8946 task = sqd->thread;
8947 if (task) {
8948 init_completion(&work.completion);
8949 init_task_work(&work.task_work, io_sqpoll_cancel_cb);
Pavel Begunkovb7f5a0b2021-03-15 14:23:08 +00008950 io_task_work_add_head(&sqd->park_task_work, &work.task_work);
Pavel Begunkov521d6a72021-03-11 23:29:38 +00008951 wake_up_process(task);
8952 }
8953 io_sq_thread_unpark(sqd);
8954
8955 if (task)
8956 wait_for_completion(&work.completion);
8957}
8958
Jens Axboe0f212202020-09-13 13:09:39 -06008959void __io_uring_files_cancel(struct files_struct *files)
8960{
8961 struct io_uring_task *tctx = current->io_uring;
Pavel Begunkov13bf43f2021-03-06 11:02:12 +00008962 struct io_tctx_node *node;
Matthew Wilcox (Oracle)ce765372020-10-09 13:49:51 +01008963 unsigned long index;
Jens Axboe0f212202020-09-13 13:09:39 -06008964
8965 /* make sure overflow events are dropped */
Jens Axboefdaf0832020-10-30 09:37:30 -06008966 atomic_inc(&tctx->in_idle);
Pavel Begunkov521d6a72021-03-11 23:29:38 +00008967 xa_for_each(&tctx->xa, index, node) {
8968 struct io_ring_ctx *ctx = node->ctx;
8969
8970 if (ctx->sq_data) {
8971 io_sqpoll_cancel_sync(ctx);
8972 continue;
8973 }
8974 io_uring_cancel_files(ctx, current, files);
8975 if (!files)
8976 io_uring_try_cancel_requests(ctx, current, NULL);
8977 }
Jens Axboefdaf0832020-10-30 09:37:30 -06008978 atomic_dec(&tctx->in_idle);
Pavel Begunkovde7f1d92021-01-04 20:43:29 +00008979
Pavel Begunkov8452d4a2021-02-27 11:16:46 +00008980 if (files)
8981 io_uring_clean_tctx(tctx);
Jens Axboefdaf0832020-10-30 09:37:30 -06008982}
8983
Pavel Begunkov521d6a72021-03-11 23:29:38 +00008984/* should only be called by SQPOLL task */
Pavel Begunkov0e9ddb32021-02-07 22:34:26 +00008985static void io_uring_cancel_sqpoll(struct io_ring_ctx *ctx)
8986{
Jens Axboe37d1e2e2021-02-17 21:03:43 -07008987 struct io_sq_data *sqd = ctx->sq_data;
Pavel Begunkov521d6a72021-03-11 23:29:38 +00008988 struct io_uring_task *tctx = current->io_uring;
Jens Axboefdaf0832020-10-30 09:37:30 -06008989 s64 inflight;
Pavel Begunkov0e9ddb32021-02-07 22:34:26 +00008990 DEFINE_WAIT(wait);
Jens Axboefdaf0832020-10-30 09:37:30 -06008991
Pavel Begunkov521d6a72021-03-11 23:29:38 +00008992 WARN_ON_ONCE(!sqd || ctx->sq_data->thread != current);
8993
Pavel Begunkov0e9ddb32021-02-07 22:34:26 +00008994 atomic_inc(&tctx->in_idle);
8995 do {
8996 /* read completions before cancelations */
8997 inflight = tctx_inflight(tctx);
8998 if (!inflight)
8999 break;
Pavel Begunkov521d6a72021-03-11 23:29:38 +00009000 io_uring_try_cancel_requests(ctx, current, NULL);
Jens Axboefdaf0832020-10-30 09:37:30 -06009001
Pavel Begunkov0e9ddb32021-02-07 22:34:26 +00009002 prepare_to_wait(&tctx->wait, &wait, TASK_UNINTERRUPTIBLE);
9003 /*
9004 * If we've seen completions, retry without waiting. This
9005 * avoids a race where a completion comes in before we did
9006 * prepare_to_wait().
9007 */
9008 if (inflight == tctx_inflight(tctx))
9009 schedule();
9010 finish_wait(&tctx->wait, &wait);
9011 } while (1);
9012 atomic_dec(&tctx->in_idle);
Jens Axboe0f212202020-09-13 13:09:39 -06009013}
9014
Jens Axboe0f212202020-09-13 13:09:39 -06009015/*
9016 * Find any io_uring fd that this task has registered or done IO on, and cancel
9017 * requests.
9018 */
9019void __io_uring_task_cancel(void)
9020{
9021 struct io_uring_task *tctx = current->io_uring;
9022 DEFINE_WAIT(wait);
Jens Axboed8a6df12020-10-15 16:24:45 -06009023 s64 inflight;
Jens Axboe0f212202020-09-13 13:09:39 -06009024
9025 /* make sure overflow events are dropped */
Jens Axboefdaf0832020-10-30 09:37:30 -06009026 atomic_inc(&tctx->in_idle);
Pavel Begunkov5a978dc2021-03-27 09:59:30 +00009027 __io_uring_files_cancel(NULL);
9028
Jens Axboed8a6df12020-10-15 16:24:45 -06009029 do {
Jens Axboe0f212202020-09-13 13:09:39 -06009030 /* read completions before cancelations */
Jens Axboefdaf0832020-10-30 09:37:30 -06009031 inflight = tctx_inflight(tctx);
Jens Axboed8a6df12020-10-15 16:24:45 -06009032 if (!inflight)
9033 break;
Jens Axboe0f212202020-09-13 13:09:39 -06009034 __io_uring_files_cancel(NULL);
9035
9036 prepare_to_wait(&tctx->wait, &wait, TASK_UNINTERRUPTIBLE);
9037
9038 /*
Pavel Begunkova1bb3cd2021-01-26 15:28:26 +00009039 * If we've seen completions, retry without waiting. This
9040 * avoids a race where a completion comes in before we did
9041 * prepare_to_wait().
Jens Axboe0f212202020-09-13 13:09:39 -06009042 */
Pavel Begunkova1bb3cd2021-01-26 15:28:26 +00009043 if (inflight == tctx_inflight(tctx))
9044 schedule();
Pavel Begunkovf57555e2020-12-20 13:21:44 +00009045 finish_wait(&tctx->wait, &wait);
Jens Axboed8a6df12020-10-15 16:24:45 -06009046 } while (1);
Jens Axboe0f212202020-09-13 13:09:39 -06009047
Jens Axboefdaf0832020-10-30 09:37:30 -06009048 atomic_dec(&tctx->in_idle);
Pavel Begunkovde7f1d92021-01-04 20:43:29 +00009049
Pavel Begunkov8452d4a2021-02-27 11:16:46 +00009050 io_uring_clean_tctx(tctx);
9051 /* all current's requests should be gone, we can kill tctx */
9052 __io_uring_free(current);
Pavel Begunkov44e728b2020-06-15 10:24:04 +03009053}
9054
Roman Penyaev6c5c2402019-11-28 12:53:22 +01009055static void *io_uring_validate_mmap_request(struct file *file,
9056 loff_t pgoff, size_t sz)
Jens Axboe2b188cc2019-01-07 10:46:33 -07009057{
Jens Axboe2b188cc2019-01-07 10:46:33 -07009058 struct io_ring_ctx *ctx = file->private_data;
Roman Penyaev6c5c2402019-11-28 12:53:22 +01009059 loff_t offset = pgoff << PAGE_SHIFT;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009060 struct page *page;
9061 void *ptr;
9062
9063 switch (offset) {
9064 case IORING_OFF_SQ_RING:
Hristo Venev75b28af2019-08-26 17:23:46 +00009065 case IORING_OFF_CQ_RING:
9066 ptr = ctx->rings;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009067 break;
9068 case IORING_OFF_SQES:
9069 ptr = ctx->sq_sqes;
9070 break;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009071 default:
Roman Penyaev6c5c2402019-11-28 12:53:22 +01009072 return ERR_PTR(-EINVAL);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009073 }
9074
9075 page = virt_to_head_page(ptr);
Matthew Wilcox (Oracle)a50b8542019-09-23 15:34:25 -07009076 if (sz > page_size(page))
Roman Penyaev6c5c2402019-11-28 12:53:22 +01009077 return ERR_PTR(-EINVAL);
9078
9079 return ptr;
9080}
9081
9082#ifdef CONFIG_MMU
9083
9084static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
9085{
9086 size_t sz = vma->vm_end - vma->vm_start;
9087 unsigned long pfn;
9088 void *ptr;
9089
9090 ptr = io_uring_validate_mmap_request(file, vma->vm_pgoff, sz);
9091 if (IS_ERR(ptr))
9092 return PTR_ERR(ptr);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009093
9094 pfn = virt_to_phys(ptr) >> PAGE_SHIFT;
9095 return remap_pfn_range(vma, vma->vm_start, pfn, sz, vma->vm_page_prot);
9096}
9097
Roman Penyaev6c5c2402019-11-28 12:53:22 +01009098#else /* !CONFIG_MMU */
9099
9100static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
9101{
9102 return vma->vm_flags & (VM_SHARED | VM_MAYSHARE) ? 0 : -EINVAL;
9103}
9104
9105static unsigned int io_uring_nommu_mmap_capabilities(struct file *file)
9106{
9107 return NOMMU_MAP_DIRECT | NOMMU_MAP_READ | NOMMU_MAP_WRITE;
9108}
9109
9110static unsigned long io_uring_nommu_get_unmapped_area(struct file *file,
9111 unsigned long addr, unsigned long len,
9112 unsigned long pgoff, unsigned long flags)
9113{
9114 void *ptr;
9115
9116 ptr = io_uring_validate_mmap_request(file, pgoff, len);
9117 if (IS_ERR(ptr))
9118 return PTR_ERR(ptr);
9119
9120 return (unsigned long) ptr;
9121}
9122
9123#endif /* !CONFIG_MMU */
9124
Pavel Begunkovd9d05212021-01-08 20:57:25 +00009125static int io_sqpoll_wait_sq(struct io_ring_ctx *ctx)
Jens Axboe90554202020-09-03 12:12:41 -06009126{
9127 DEFINE_WAIT(wait);
9128
9129 do {
9130 if (!io_sqring_full(ctx))
9131 break;
Jens Axboe90554202020-09-03 12:12:41 -06009132 prepare_to_wait(&ctx->sqo_sq_wait, &wait, TASK_INTERRUPTIBLE);
9133
9134 if (!io_sqring_full(ctx))
9135 break;
Jens Axboe90554202020-09-03 12:12:41 -06009136 schedule();
9137 } while (!signal_pending(current));
9138
9139 finish_wait(&ctx->sqo_sq_wait, &wait);
Yang Li51993282021-03-09 14:30:41 +08009140 return 0;
Jens Axboe90554202020-09-03 12:12:41 -06009141}
9142
Hao Xuc73ebb62020-11-03 10:54:37 +08009143static int io_get_ext_arg(unsigned flags, const void __user *argp, size_t *argsz,
9144 struct __kernel_timespec __user **ts,
9145 const sigset_t __user **sig)
9146{
9147 struct io_uring_getevents_arg arg;
9148
9149 /*
9150 * If EXT_ARG isn't set, then we have no timespec and the argp pointer
9151 * is just a pointer to the sigset_t.
9152 */
9153 if (!(flags & IORING_ENTER_EXT_ARG)) {
9154 *sig = (const sigset_t __user *) argp;
9155 *ts = NULL;
9156 return 0;
9157 }
9158
9159 /*
9160 * EXT_ARG is set - ensure we agree on the size of it and copy in our
9161 * timespec and sigset_t pointers if good.
9162 */
9163 if (*argsz != sizeof(arg))
9164 return -EINVAL;
9165 if (copy_from_user(&arg, argp, sizeof(arg)))
9166 return -EFAULT;
9167 *sig = u64_to_user_ptr(arg.sigmask);
9168 *argsz = arg.sigmask_sz;
9169 *ts = u64_to_user_ptr(arg.ts);
9170 return 0;
9171}
9172
Jens Axboe2b188cc2019-01-07 10:46:33 -07009173SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
Hao Xuc73ebb62020-11-03 10:54:37 +08009174 u32, min_complete, u32, flags, const void __user *, argp,
9175 size_t, argsz)
Jens Axboe2b188cc2019-01-07 10:46:33 -07009176{
9177 struct io_ring_ctx *ctx;
9178 long ret = -EBADF;
9179 int submitted = 0;
9180 struct fd f;
9181
Jens Axboe4c6e2772020-07-01 11:29:10 -06009182 io_run_task_work();
Jens Axboeb41e9852020-02-17 09:52:41 -07009183
Jens Axboe90554202020-09-03 12:12:41 -06009184 if (flags & ~(IORING_ENTER_GETEVENTS | IORING_ENTER_SQ_WAKEUP |
Hao Xuc73ebb62020-11-03 10:54:37 +08009185 IORING_ENTER_SQ_WAIT | IORING_ENTER_EXT_ARG))
Jens Axboe2b188cc2019-01-07 10:46:33 -07009186 return -EINVAL;
9187
9188 f = fdget(fd);
9189 if (!f.file)
9190 return -EBADF;
9191
9192 ret = -EOPNOTSUPP;
9193 if (f.file->f_op != &io_uring_fops)
9194 goto out_fput;
9195
9196 ret = -ENXIO;
9197 ctx = f.file->private_data;
9198 if (!percpu_ref_tryget(&ctx->refs))
9199 goto out_fput;
9200
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02009201 ret = -EBADFD;
9202 if (ctx->flags & IORING_SETUP_R_DISABLED)
9203 goto out;
9204
Jens Axboe6c271ce2019-01-10 11:22:30 -07009205 /*
9206 * For SQ polling, the thread will do all submissions and completions.
9207 * Just return the requested submit count, and wake the thread if
9208 * we were asked to.
9209 */
Jens Axboeb2a9ead2019-09-12 14:19:16 -06009210 ret = 0;
Jens Axboe6c271ce2019-01-10 11:22:30 -07009211 if (ctx->flags & IORING_SETUP_SQPOLL) {
Pavel Begunkov6c503152021-01-04 20:36:36 +00009212 io_cqring_overflow_flush(ctx, false, NULL, NULL);
Pavel Begunkov89448c42020-12-17 00:24:39 +00009213
Pavel Begunkovd9d05212021-01-08 20:57:25 +00009214 ret = -EOWNERDEAD;
Stefan Metzmacher04147482021-03-07 11:54:29 +01009215 if (unlikely(ctx->sq_data->thread == NULL)) {
9216 goto out;
9217 }
Jens Axboe6c271ce2019-01-10 11:22:30 -07009218 if (flags & IORING_ENTER_SQ_WAKEUP)
Jens Axboe534ca6d2020-09-02 13:52:19 -06009219 wake_up(&ctx->sq_data->wait);
Pavel Begunkovd9d05212021-01-08 20:57:25 +00009220 if (flags & IORING_ENTER_SQ_WAIT) {
9221 ret = io_sqpoll_wait_sq(ctx);
9222 if (ret)
9223 goto out;
9224 }
Jens Axboe6c271ce2019-01-10 11:22:30 -07009225 submitted = to_submit;
Jens Axboeb2a9ead2019-09-12 14:19:16 -06009226 } else if (to_submit) {
Pavel Begunkovbaf186c2021-03-06 11:02:15 +00009227 ret = io_uring_add_task_file(ctx);
Jens Axboe0f212202020-09-13 13:09:39 -06009228 if (unlikely(ret))
9229 goto out;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009230 mutex_lock(&ctx->uring_lock);
Jens Axboe0f212202020-09-13 13:09:39 -06009231 submitted = io_submit_sqes(ctx, to_submit);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009232 mutex_unlock(&ctx->uring_lock);
Pavel Begunkov7c504e652019-12-18 19:53:45 +03009233
9234 if (submitted != to_submit)
9235 goto out;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009236 }
9237 if (flags & IORING_ENTER_GETEVENTS) {
Hao Xuc73ebb62020-11-03 10:54:37 +08009238 const sigset_t __user *sig;
9239 struct __kernel_timespec __user *ts;
9240
9241 ret = io_get_ext_arg(flags, argp, &argsz, &ts, &sig);
9242 if (unlikely(ret))
9243 goto out;
9244
Jens Axboe2b188cc2019-01-07 10:46:33 -07009245 min_complete = min(min_complete, ctx->cq_entries);
9246
Xiaoguang Wang32b22442020-03-11 09:26:09 +08009247 /*
9248 * When SETUP_IOPOLL and SETUP_SQPOLL are both enabled, user
9249 * space applications don't need to do io completion events
9250 * polling again, they can rely on io_sq_thread to do polling
9251 * work, which can reduce cpu usage and uring_lock contention.
9252 */
9253 if (ctx->flags & IORING_SETUP_IOPOLL &&
9254 !(ctx->flags & IORING_SETUP_SQPOLL)) {
Pavel Begunkov7668b922020-07-07 16:36:21 +03009255 ret = io_iopoll_check(ctx, min_complete);
Jens Axboedef596e2019-01-09 08:59:42 -07009256 } else {
Hao Xuc73ebb62020-11-03 10:54:37 +08009257 ret = io_cqring_wait(ctx, min_complete, sig, argsz, ts);
Jens Axboedef596e2019-01-09 08:59:42 -07009258 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07009259 }
9260
Pavel Begunkov7c504e652019-12-18 19:53:45 +03009261out:
Pavel Begunkov6805b322019-10-08 02:18:42 +03009262 percpu_ref_put(&ctx->refs);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009263out_fput:
9264 fdput(f);
9265 return submitted ? submitted : ret;
9266}
9267
Tobias Klauserbebdb652020-02-26 18:38:32 +01009268#ifdef CONFIG_PROC_FS
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +00009269static int io_uring_show_cred(struct seq_file *m, unsigned int id,
9270 const struct cred *cred)
Jens Axboe87ce9552020-01-30 08:25:34 -07009271{
Jens Axboe87ce9552020-01-30 08:25:34 -07009272 struct user_namespace *uns = seq_user_ns(m);
9273 struct group_info *gi;
9274 kernel_cap_t cap;
9275 unsigned __capi;
9276 int g;
9277
9278 seq_printf(m, "%5d\n", id);
9279 seq_put_decimal_ull(m, "\tUid:\t", from_kuid_munged(uns, cred->uid));
9280 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->euid));
9281 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->suid));
9282 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->fsuid));
9283 seq_put_decimal_ull(m, "\n\tGid:\t", from_kgid_munged(uns, cred->gid));
9284 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->egid));
9285 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->sgid));
9286 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->fsgid));
9287 seq_puts(m, "\n\tGroups:\t");
9288 gi = cred->group_info;
9289 for (g = 0; g < gi->ngroups; g++) {
9290 seq_put_decimal_ull(m, g ? " " : "",
9291 from_kgid_munged(uns, gi->gid[g]));
9292 }
9293 seq_puts(m, "\n\tCapEff:\t");
9294 cap = cred->cap_effective;
9295 CAP_FOR_EACH_U32(__capi)
9296 seq_put_hex_ll(m, NULL, cap.cap[CAP_LAST_U32 - __capi], 8);
9297 seq_putc(m, '\n');
9298 return 0;
9299}
9300
9301static void __io_uring_show_fdinfo(struct io_ring_ctx *ctx, struct seq_file *m)
9302{
Joseph Qidbbe9c62020-09-29 09:01:22 -06009303 struct io_sq_data *sq = NULL;
Jens Axboefad8e0d2020-09-28 08:57:48 -06009304 bool has_lock;
Jens Axboe87ce9552020-01-30 08:25:34 -07009305 int i;
9306
Jens Axboefad8e0d2020-09-28 08:57:48 -06009307 /*
9308 * Avoid ABBA deadlock between the seq lock and the io_uring mutex,
9309 * since fdinfo case grabs it in the opposite direction of normal use
9310 * cases. If we fail to get the lock, we just don't iterate any
9311 * structures that could be going away outside the io_uring mutex.
9312 */
9313 has_lock = mutex_trylock(&ctx->uring_lock);
9314
Jens Axboe5f3f26f2021-02-25 10:17:46 -07009315 if (has_lock && (ctx->flags & IORING_SETUP_SQPOLL)) {
Joseph Qidbbe9c62020-09-29 09:01:22 -06009316 sq = ctx->sq_data;
Jens Axboe5f3f26f2021-02-25 10:17:46 -07009317 if (!sq->thread)
9318 sq = NULL;
9319 }
Joseph Qidbbe9c62020-09-29 09:01:22 -06009320
9321 seq_printf(m, "SqThread:\t%d\n", sq ? task_pid_nr(sq->thread) : -1);
9322 seq_printf(m, "SqThreadCpu:\t%d\n", sq ? task_cpu(sq->thread) : -1);
Jens Axboe87ce9552020-01-30 08:25:34 -07009323 seq_printf(m, "UserFiles:\t%u\n", ctx->nr_user_files);
Jens Axboefad8e0d2020-09-28 08:57:48 -06009324 for (i = 0; has_lock && i < ctx->nr_user_files; i++) {
Pavel Begunkovea64ec022021-02-04 13:52:07 +00009325 struct file *f = *io_fixed_file_slot(ctx->file_data, i);
Jens Axboe87ce9552020-01-30 08:25:34 -07009326
Jens Axboe87ce9552020-01-30 08:25:34 -07009327 if (f)
9328 seq_printf(m, "%5u: %s\n", i, file_dentry(f)->d_iname);
9329 else
9330 seq_printf(m, "%5u: <none>\n", i);
9331 }
9332 seq_printf(m, "UserBufs:\t%u\n", ctx->nr_user_bufs);
Jens Axboefad8e0d2020-09-28 08:57:48 -06009333 for (i = 0; has_lock && i < ctx->nr_user_bufs; i++) {
Jens Axboe87ce9552020-01-30 08:25:34 -07009334 struct io_mapped_ubuf *buf = &ctx->user_bufs[i];
9335
9336 seq_printf(m, "%5u: 0x%llx/%u\n", i, buf->ubuf,
9337 (unsigned int) buf->len);
9338 }
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +00009339 if (has_lock && !xa_empty(&ctx->personalities)) {
9340 unsigned long index;
9341 const struct cred *cred;
9342
Jens Axboe87ce9552020-01-30 08:25:34 -07009343 seq_printf(m, "Personalities:\n");
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +00009344 xa_for_each(&ctx->personalities, index, cred)
9345 io_uring_show_cred(m, index, cred);
Jens Axboe87ce9552020-01-30 08:25:34 -07009346 }
Jens Axboed7718a92020-02-14 22:23:12 -07009347 seq_printf(m, "PollList:\n");
9348 spin_lock_irq(&ctx->completion_lock);
9349 for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
9350 struct hlist_head *list = &ctx->cancel_hash[i];
9351 struct io_kiocb *req;
9352
9353 hlist_for_each_entry(req, list, hash_node)
9354 seq_printf(m, " op=%d, task_works=%d\n", req->opcode,
9355 req->task->task_works != NULL);
9356 }
9357 spin_unlock_irq(&ctx->completion_lock);
Jens Axboefad8e0d2020-09-28 08:57:48 -06009358 if (has_lock)
9359 mutex_unlock(&ctx->uring_lock);
Jens Axboe87ce9552020-01-30 08:25:34 -07009360}
9361
9362static void io_uring_show_fdinfo(struct seq_file *m, struct file *f)
9363{
9364 struct io_ring_ctx *ctx = f->private_data;
9365
9366 if (percpu_ref_tryget(&ctx->refs)) {
9367 __io_uring_show_fdinfo(ctx, m);
9368 percpu_ref_put(&ctx->refs);
9369 }
9370}
Tobias Klauserbebdb652020-02-26 18:38:32 +01009371#endif
Jens Axboe87ce9552020-01-30 08:25:34 -07009372
Jens Axboe2b188cc2019-01-07 10:46:33 -07009373static const struct file_operations io_uring_fops = {
9374 .release = io_uring_release,
9375 .mmap = io_uring_mmap,
Roman Penyaev6c5c2402019-11-28 12:53:22 +01009376#ifndef CONFIG_MMU
9377 .get_unmapped_area = io_uring_nommu_get_unmapped_area,
9378 .mmap_capabilities = io_uring_nommu_mmap_capabilities,
9379#endif
Jens Axboe2b188cc2019-01-07 10:46:33 -07009380 .poll = io_uring_poll,
9381 .fasync = io_uring_fasync,
Tobias Klauserbebdb652020-02-26 18:38:32 +01009382#ifdef CONFIG_PROC_FS
Jens Axboe87ce9552020-01-30 08:25:34 -07009383 .show_fdinfo = io_uring_show_fdinfo,
Tobias Klauserbebdb652020-02-26 18:38:32 +01009384#endif
Jens Axboe2b188cc2019-01-07 10:46:33 -07009385};
9386
9387static int io_allocate_scq_urings(struct io_ring_ctx *ctx,
9388 struct io_uring_params *p)
9389{
Hristo Venev75b28af2019-08-26 17:23:46 +00009390 struct io_rings *rings;
9391 size_t size, sq_array_offset;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009392
Jens Axboebd740482020-08-05 12:58:23 -06009393 /* make sure these are sane, as we already accounted them */
9394 ctx->sq_entries = p->sq_entries;
9395 ctx->cq_entries = p->cq_entries;
9396
Hristo Venev75b28af2019-08-26 17:23:46 +00009397 size = rings_size(p->sq_entries, p->cq_entries, &sq_array_offset);
9398 if (size == SIZE_MAX)
9399 return -EOVERFLOW;
9400
9401 rings = io_mem_alloc(size);
9402 if (!rings)
Jens Axboe2b188cc2019-01-07 10:46:33 -07009403 return -ENOMEM;
9404
Hristo Venev75b28af2019-08-26 17:23:46 +00009405 ctx->rings = rings;
9406 ctx->sq_array = (u32 *)((char *)rings + sq_array_offset);
9407 rings->sq_ring_mask = p->sq_entries - 1;
9408 rings->cq_ring_mask = p->cq_entries - 1;
9409 rings->sq_ring_entries = p->sq_entries;
9410 rings->cq_ring_entries = p->cq_entries;
9411 ctx->sq_mask = rings->sq_ring_mask;
9412 ctx->cq_mask = rings->cq_ring_mask;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009413
9414 size = array_size(sizeof(struct io_uring_sqe), p->sq_entries);
Jens Axboeeb065d32019-11-20 09:26:29 -07009415 if (size == SIZE_MAX) {
9416 io_mem_free(ctx->rings);
9417 ctx->rings = NULL;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009418 return -EOVERFLOW;
Jens Axboeeb065d32019-11-20 09:26:29 -07009419 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07009420
9421 ctx->sq_sqes = io_mem_alloc(size);
Jens Axboeeb065d32019-11-20 09:26:29 -07009422 if (!ctx->sq_sqes) {
9423 io_mem_free(ctx->rings);
9424 ctx->rings = NULL;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009425 return -ENOMEM;
Jens Axboeeb065d32019-11-20 09:26:29 -07009426 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07009427
Jens Axboe2b188cc2019-01-07 10:46:33 -07009428 return 0;
9429}
9430
Pavel Begunkov9faadcc2020-12-21 18:34:05 +00009431static int io_uring_install_fd(struct io_ring_ctx *ctx, struct file *file)
9432{
9433 int ret, fd;
9434
9435 fd = get_unused_fd_flags(O_RDWR | O_CLOEXEC);
9436 if (fd < 0)
9437 return fd;
9438
Pavel Begunkovbaf186c2021-03-06 11:02:15 +00009439 ret = io_uring_add_task_file(ctx);
Pavel Begunkov9faadcc2020-12-21 18:34:05 +00009440 if (ret) {
9441 put_unused_fd(fd);
9442 return ret;
9443 }
9444 fd_install(fd, file);
9445 return fd;
9446}
9447
Jens Axboe2b188cc2019-01-07 10:46:33 -07009448/*
9449 * Allocate an anonymous fd, this is what constitutes the application
9450 * visible backing of an io_uring instance. The application mmaps this
9451 * fd to gain access to the SQ/CQ ring details. If UNIX sockets are enabled,
9452 * we have to tie this fd to a socket for file garbage collection purposes.
9453 */
Pavel Begunkov9faadcc2020-12-21 18:34:05 +00009454static struct file *io_uring_get_file(struct io_ring_ctx *ctx)
Jens Axboe2b188cc2019-01-07 10:46:33 -07009455{
9456 struct file *file;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009457#if defined(CONFIG_UNIX)
Pavel Begunkov9faadcc2020-12-21 18:34:05 +00009458 int ret;
9459
Jens Axboe2b188cc2019-01-07 10:46:33 -07009460 ret = sock_create_kern(&init_net, PF_UNIX, SOCK_RAW, IPPROTO_IP,
9461 &ctx->ring_sock);
9462 if (ret)
Pavel Begunkov9faadcc2020-12-21 18:34:05 +00009463 return ERR_PTR(ret);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009464#endif
9465
Jens Axboe2b188cc2019-01-07 10:46:33 -07009466 file = anon_inode_getfile("[io_uring]", &io_uring_fops, ctx,
9467 O_RDWR | O_CLOEXEC);
Pavel Begunkov9faadcc2020-12-21 18:34:05 +00009468#if defined(CONFIG_UNIX)
Jens Axboe2b188cc2019-01-07 10:46:33 -07009469 if (IS_ERR(file)) {
Pavel Begunkov9faadcc2020-12-21 18:34:05 +00009470 sock_release(ctx->ring_sock);
9471 ctx->ring_sock = NULL;
9472 } else {
9473 ctx->ring_sock->file = file;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009474 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07009475#endif
Pavel Begunkov9faadcc2020-12-21 18:34:05 +00009476 return file;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009477}
9478
Xiaoguang Wang7f136572020-05-05 16:28:53 +08009479static int io_uring_create(unsigned entries, struct io_uring_params *p,
9480 struct io_uring_params __user *params)
Jens Axboe2b188cc2019-01-07 10:46:33 -07009481{
Jens Axboe2b188cc2019-01-07 10:46:33 -07009482 struct io_ring_ctx *ctx;
Pavel Begunkov9faadcc2020-12-21 18:34:05 +00009483 struct file *file;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009484 int ret;
9485
Jens Axboe8110c1a2019-12-28 15:39:54 -07009486 if (!entries)
Jens Axboe2b188cc2019-01-07 10:46:33 -07009487 return -EINVAL;
Jens Axboe8110c1a2019-12-28 15:39:54 -07009488 if (entries > IORING_MAX_ENTRIES) {
9489 if (!(p->flags & IORING_SETUP_CLAMP))
9490 return -EINVAL;
9491 entries = IORING_MAX_ENTRIES;
9492 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07009493
9494 /*
9495 * Use twice as many entries for the CQ ring. It's possible for the
9496 * application to drive a higher depth than the size of the SQ ring,
9497 * since the sqes are only used at submission time. This allows for
Jens Axboe33a107f2019-10-04 12:10:03 -06009498 * some flexibility in overcommitting a bit. If the application has
9499 * set IORING_SETUP_CQSIZE, it will have passed in the desired number
9500 * of CQ ring entries manually.
Jens Axboe2b188cc2019-01-07 10:46:33 -07009501 */
9502 p->sq_entries = roundup_pow_of_two(entries);
Jens Axboe33a107f2019-10-04 12:10:03 -06009503 if (p->flags & IORING_SETUP_CQSIZE) {
9504 /*
9505 * If IORING_SETUP_CQSIZE is set, we do the same roundup
9506 * to a power-of-two, if it isn't already. We do NOT impose
9507 * any cq vs sq ring sizing.
9508 */
Joseph Qieb2667b32020-11-24 15:03:03 +08009509 if (!p->cq_entries)
Jens Axboe33a107f2019-10-04 12:10:03 -06009510 return -EINVAL;
Jens Axboe8110c1a2019-12-28 15:39:54 -07009511 if (p->cq_entries > IORING_MAX_CQ_ENTRIES) {
9512 if (!(p->flags & IORING_SETUP_CLAMP))
9513 return -EINVAL;
9514 p->cq_entries = IORING_MAX_CQ_ENTRIES;
9515 }
Joseph Qieb2667b32020-11-24 15:03:03 +08009516 p->cq_entries = roundup_pow_of_two(p->cq_entries);
9517 if (p->cq_entries < p->sq_entries)
9518 return -EINVAL;
Jens Axboe33a107f2019-10-04 12:10:03 -06009519 } else {
9520 p->cq_entries = 2 * p->sq_entries;
9521 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07009522
Jens Axboe2b188cc2019-01-07 10:46:33 -07009523 ctx = io_ring_ctx_alloc(p);
Jens Axboe62e398b2021-02-21 16:19:37 -07009524 if (!ctx)
Jens Axboe2b188cc2019-01-07 10:46:33 -07009525 return -ENOMEM;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009526 ctx->compat = in_compat_syscall();
Jens Axboe62e398b2021-02-21 16:19:37 -07009527 if (!capable(CAP_IPC_LOCK))
9528 ctx->user = get_uid(current_user());
Jens Axboe2aede0e2020-09-14 10:45:53 -06009529
9530 /*
9531 * This is just grabbed for accounting purposes. When a process exits,
9532 * the mm is exited and dropped before the files, hence we need to hang
9533 * on to this mm purely for the purposes of being able to unaccount
9534 * memory (locked/pinned vm). It's not used for anything else.
9535 */
Jens Axboe6b7898e2020-08-25 07:58:00 -06009536 mmgrab(current->mm);
Jens Axboe2aede0e2020-09-14 10:45:53 -06009537 ctx->mm_account = current->mm;
Jens Axboe6b7898e2020-08-25 07:58:00 -06009538
Jens Axboe2b188cc2019-01-07 10:46:33 -07009539 ret = io_allocate_scq_urings(ctx, p);
9540 if (ret)
9541 goto err;
9542
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02009543 ret = io_sq_offload_create(ctx, p);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009544 if (ret)
9545 goto err;
9546
Jens Axboe2b188cc2019-01-07 10:46:33 -07009547 memset(&p->sq_off, 0, sizeof(p->sq_off));
Hristo Venev75b28af2019-08-26 17:23:46 +00009548 p->sq_off.head = offsetof(struct io_rings, sq.head);
9549 p->sq_off.tail = offsetof(struct io_rings, sq.tail);
9550 p->sq_off.ring_mask = offsetof(struct io_rings, sq_ring_mask);
9551 p->sq_off.ring_entries = offsetof(struct io_rings, sq_ring_entries);
9552 p->sq_off.flags = offsetof(struct io_rings, sq_flags);
9553 p->sq_off.dropped = offsetof(struct io_rings, sq_dropped);
9554 p->sq_off.array = (char *)ctx->sq_array - (char *)ctx->rings;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009555
9556 memset(&p->cq_off, 0, sizeof(p->cq_off));
Hristo Venev75b28af2019-08-26 17:23:46 +00009557 p->cq_off.head = offsetof(struct io_rings, cq.head);
9558 p->cq_off.tail = offsetof(struct io_rings, cq.tail);
9559 p->cq_off.ring_mask = offsetof(struct io_rings, cq_ring_mask);
9560 p->cq_off.ring_entries = offsetof(struct io_rings, cq_ring_entries);
9561 p->cq_off.overflow = offsetof(struct io_rings, cq_overflow);
9562 p->cq_off.cqes = offsetof(struct io_rings, cqes);
Stefano Garzarella0d9b5b32020-05-15 18:38:04 +02009563 p->cq_off.flags = offsetof(struct io_rings, cq_flags);
Jens Axboeac90f242019-09-06 10:26:21 -06009564
Xiaoguang Wang7f136572020-05-05 16:28:53 +08009565 p->features = IORING_FEAT_SINGLE_MMAP | IORING_FEAT_NODROP |
9566 IORING_FEAT_SUBMIT_STABLE | IORING_FEAT_RW_CUR_POS |
Jiufei Xue5769a352020-06-17 17:53:55 +08009567 IORING_FEAT_CUR_PERSONALITY | IORING_FEAT_FAST_POLL |
Hao Xuc73ebb62020-11-03 10:54:37 +08009568 IORING_FEAT_POLL_32BITS | IORING_FEAT_SQPOLL_NONFIXED |
Jens Axboe1c0aa1f2021-02-20 11:55:28 -07009569 IORING_FEAT_EXT_ARG | IORING_FEAT_NATIVE_WORKERS;
Xiaoguang Wang7f136572020-05-05 16:28:53 +08009570
9571 if (copy_to_user(params, p, sizeof(*p))) {
9572 ret = -EFAULT;
9573 goto err;
9574 }
Jens Axboed1719f72020-07-30 13:43:53 -06009575
Pavel Begunkov9faadcc2020-12-21 18:34:05 +00009576 file = io_uring_get_file(ctx);
9577 if (IS_ERR(file)) {
9578 ret = PTR_ERR(file);
9579 goto err;
9580 }
9581
Jens Axboed1719f72020-07-30 13:43:53 -06009582 /*
Jens Axboe044c1ab2019-10-28 09:15:33 -06009583 * Install ring fd as the very last thing, so we don't risk someone
9584 * having closed it before we finish setup
9585 */
Pavel Begunkov9faadcc2020-12-21 18:34:05 +00009586 ret = io_uring_install_fd(ctx, file);
9587 if (ret < 0) {
9588 /* fput will clean it up */
9589 fput(file);
9590 return ret;
9591 }
Jens Axboe044c1ab2019-10-28 09:15:33 -06009592
Dmitrii Dolgovc826bd72019-10-15 19:02:01 +02009593 trace_io_uring_create(ret, ctx, p->sq_entries, p->cq_entries, p->flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009594 return ret;
9595err:
9596 io_ring_ctx_wait_and_kill(ctx);
9597 return ret;
9598}
9599
9600/*
9601 * Sets up an aio uring context, and returns the fd. Applications asks for a
9602 * ring size, we return the actual sq/cq ring sizes (among other things) in the
9603 * params structure passed in.
9604 */
9605static long io_uring_setup(u32 entries, struct io_uring_params __user *params)
9606{
9607 struct io_uring_params p;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009608 int i;
9609
9610 if (copy_from_user(&p, params, sizeof(p)))
9611 return -EFAULT;
9612 for (i = 0; i < ARRAY_SIZE(p.resv); i++) {
9613 if (p.resv[i])
9614 return -EINVAL;
9615 }
9616
Jens Axboe6c271ce2019-01-10 11:22:30 -07009617 if (p.flags & ~(IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL |
Jens Axboe8110c1a2019-12-28 15:39:54 -07009618 IORING_SETUP_SQ_AFF | IORING_SETUP_CQSIZE |
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02009619 IORING_SETUP_CLAMP | IORING_SETUP_ATTACH_WQ |
9620 IORING_SETUP_R_DISABLED))
Jens Axboe2b188cc2019-01-07 10:46:33 -07009621 return -EINVAL;
9622
Xiaoguang Wang7f136572020-05-05 16:28:53 +08009623 return io_uring_create(entries, &p, params);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009624}
9625
9626SYSCALL_DEFINE2(io_uring_setup, u32, entries,
9627 struct io_uring_params __user *, params)
9628{
9629 return io_uring_setup(entries, params);
9630}
9631
Jens Axboe66f4af92020-01-16 15:36:52 -07009632static int io_probe(struct io_ring_ctx *ctx, void __user *arg, unsigned nr_args)
9633{
9634 struct io_uring_probe *p;
9635 size_t size;
9636 int i, ret;
9637
9638 size = struct_size(p, ops, nr_args);
9639 if (size == SIZE_MAX)
9640 return -EOVERFLOW;
9641 p = kzalloc(size, GFP_KERNEL);
9642 if (!p)
9643 return -ENOMEM;
9644
9645 ret = -EFAULT;
9646 if (copy_from_user(p, arg, size))
9647 goto out;
9648 ret = -EINVAL;
9649 if (memchr_inv(p, 0, size))
9650 goto out;
9651
9652 p->last_op = IORING_OP_LAST - 1;
9653 if (nr_args > IORING_OP_LAST)
9654 nr_args = IORING_OP_LAST;
9655
9656 for (i = 0; i < nr_args; i++) {
9657 p->ops[i].op = i;
9658 if (!io_op_defs[i].not_supported)
9659 p->ops[i].flags = IO_URING_OP_SUPPORTED;
9660 }
9661 p->ops_len = i;
9662
9663 ret = 0;
9664 if (copy_to_user(arg, p, size))
9665 ret = -EFAULT;
9666out:
9667 kfree(p);
9668 return ret;
9669}
9670
Jens Axboe071698e2020-01-28 10:04:42 -07009671static int io_register_personality(struct io_ring_ctx *ctx)
9672{
Jens Axboe4379bf82021-02-15 13:40:22 -07009673 const struct cred *creds;
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +00009674 u32 id;
Jens Axboe1e6fa522020-10-15 08:46:24 -06009675 int ret;
Jens Axboe071698e2020-01-28 10:04:42 -07009676
Jens Axboe4379bf82021-02-15 13:40:22 -07009677 creds = get_current_cred();
Jens Axboe1e6fa522020-10-15 08:46:24 -06009678
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +00009679 ret = xa_alloc_cyclic(&ctx->personalities, &id, (void *)creds,
9680 XA_LIMIT(0, USHRT_MAX), &ctx->pers_next, GFP_KERNEL);
9681 if (!ret)
9682 return id;
9683 put_cred(creds);
Jens Axboe1e6fa522020-10-15 08:46:24 -06009684 return ret;
Jens Axboe071698e2020-01-28 10:04:42 -07009685}
9686
Stefano Garzarella21b55db2020-08-27 16:58:30 +02009687static int io_register_restrictions(struct io_ring_ctx *ctx, void __user *arg,
9688 unsigned int nr_args)
9689{
9690 struct io_uring_restriction *res;
9691 size_t size;
9692 int i, ret;
9693
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02009694 /* Restrictions allowed only if rings started disabled */
9695 if (!(ctx->flags & IORING_SETUP_R_DISABLED))
9696 return -EBADFD;
9697
Stefano Garzarella21b55db2020-08-27 16:58:30 +02009698 /* We allow only a single restrictions registration */
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02009699 if (ctx->restrictions.registered)
Stefano Garzarella21b55db2020-08-27 16:58:30 +02009700 return -EBUSY;
9701
9702 if (!arg || nr_args > IORING_MAX_RESTRICTIONS)
9703 return -EINVAL;
9704
9705 size = array_size(nr_args, sizeof(*res));
9706 if (size == SIZE_MAX)
9707 return -EOVERFLOW;
9708
9709 res = memdup_user(arg, size);
9710 if (IS_ERR(res))
9711 return PTR_ERR(res);
9712
9713 ret = 0;
9714
9715 for (i = 0; i < nr_args; i++) {
9716 switch (res[i].opcode) {
9717 case IORING_RESTRICTION_REGISTER_OP:
9718 if (res[i].register_op >= IORING_REGISTER_LAST) {
9719 ret = -EINVAL;
9720 goto out;
9721 }
9722
9723 __set_bit(res[i].register_op,
9724 ctx->restrictions.register_op);
9725 break;
9726 case IORING_RESTRICTION_SQE_OP:
9727 if (res[i].sqe_op >= IORING_OP_LAST) {
9728 ret = -EINVAL;
9729 goto out;
9730 }
9731
9732 __set_bit(res[i].sqe_op, ctx->restrictions.sqe_op);
9733 break;
9734 case IORING_RESTRICTION_SQE_FLAGS_ALLOWED:
9735 ctx->restrictions.sqe_flags_allowed = res[i].sqe_flags;
9736 break;
9737 case IORING_RESTRICTION_SQE_FLAGS_REQUIRED:
9738 ctx->restrictions.sqe_flags_required = res[i].sqe_flags;
9739 break;
9740 default:
9741 ret = -EINVAL;
9742 goto out;
9743 }
9744 }
9745
9746out:
9747 /* Reset all restrictions if an error happened */
9748 if (ret != 0)
9749 memset(&ctx->restrictions, 0, sizeof(ctx->restrictions));
9750 else
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02009751 ctx->restrictions.registered = true;
Stefano Garzarella21b55db2020-08-27 16:58:30 +02009752
9753 kfree(res);
9754 return ret;
9755}
9756
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02009757static int io_register_enable_rings(struct io_ring_ctx *ctx)
9758{
9759 if (!(ctx->flags & IORING_SETUP_R_DISABLED))
9760 return -EBADFD;
9761
9762 if (ctx->restrictions.registered)
9763 ctx->restricted = 1;
9764
Pavel Begunkov0298ef92021-03-08 13:20:57 +00009765 ctx->flags &= ~IORING_SETUP_R_DISABLED;
9766 if (ctx->sq_data && wq_has_sleeper(&ctx->sq_data->wait))
9767 wake_up(&ctx->sq_data->wait);
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02009768 return 0;
9769}
9770
Jens Axboe071698e2020-01-28 10:04:42 -07009771static bool io_register_op_must_quiesce(int op)
9772{
9773 switch (op) {
9774 case IORING_UNREGISTER_FILES:
9775 case IORING_REGISTER_FILES_UPDATE:
9776 case IORING_REGISTER_PROBE:
9777 case IORING_REGISTER_PERSONALITY:
9778 case IORING_UNREGISTER_PERSONALITY:
9779 return false;
9780 default:
9781 return true;
9782 }
9783}
9784
Jens Axboeedafcce2019-01-09 09:16:05 -07009785static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
9786 void __user *arg, unsigned nr_args)
Jens Axboeb19062a2019-04-15 10:49:38 -06009787 __releases(ctx->uring_lock)
9788 __acquires(ctx->uring_lock)
Jens Axboeedafcce2019-01-09 09:16:05 -07009789{
9790 int ret;
9791
Jens Axboe35fa71a2019-04-22 10:23:23 -06009792 /*
9793 * We're inside the ring mutex, if the ref is already dying, then
9794 * someone else killed the ctx or is already going through
9795 * io_uring_register().
9796 */
9797 if (percpu_ref_is_dying(&ctx->refs))
9798 return -ENXIO;
9799
Jens Axboe071698e2020-01-28 10:04:42 -07009800 if (io_register_op_must_quiesce(opcode)) {
Jens Axboe05f3fb32019-12-09 11:22:50 -07009801 percpu_ref_kill(&ctx->refs);
Jens Axboeb19062a2019-04-15 10:49:38 -06009802
Jens Axboe05f3fb32019-12-09 11:22:50 -07009803 /*
9804 * Drop uring mutex before waiting for references to exit. If
9805 * another thread is currently inside io_uring_enter() it might
9806 * need to grab the uring_lock to make progress. If we hold it
9807 * here across the drain wait, then we can deadlock. It's safe
9808 * to drop the mutex here, since no new references will come in
9809 * after we've killed the percpu ref.
9810 */
9811 mutex_unlock(&ctx->uring_lock);
Jens Axboeaf9c1a42020-09-24 13:32:18 -06009812 do {
9813 ret = wait_for_completion_interruptible(&ctx->ref_comp);
9814 if (!ret)
9815 break;
Jens Axboeed6930c2020-10-08 19:09:46 -06009816 ret = io_run_task_work_sig();
9817 if (ret < 0)
9818 break;
Jens Axboeaf9c1a42020-09-24 13:32:18 -06009819 } while (1);
9820
Jens Axboe05f3fb32019-12-09 11:22:50 -07009821 mutex_lock(&ctx->uring_lock);
Jens Axboeaf9c1a42020-09-24 13:32:18 -06009822
Jens Axboec1503682020-01-08 08:26:07 -07009823 if (ret) {
9824 percpu_ref_resurrect(&ctx->refs);
Stefano Garzarella21b55db2020-08-27 16:58:30 +02009825 goto out_quiesce;
9826 }
9827 }
9828
9829 if (ctx->restricted) {
9830 if (opcode >= IORING_REGISTER_LAST) {
9831 ret = -EINVAL;
9832 goto out;
9833 }
9834
9835 if (!test_bit(opcode, ctx->restrictions.register_op)) {
9836 ret = -EACCES;
Jens Axboec1503682020-01-08 08:26:07 -07009837 goto out;
9838 }
Jens Axboe05f3fb32019-12-09 11:22:50 -07009839 }
Jens Axboeedafcce2019-01-09 09:16:05 -07009840
9841 switch (opcode) {
9842 case IORING_REGISTER_BUFFERS:
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08009843 ret = io_sqe_buffers_register(ctx, arg, nr_args);
Jens Axboeedafcce2019-01-09 09:16:05 -07009844 break;
9845 case IORING_UNREGISTER_BUFFERS:
9846 ret = -EINVAL;
9847 if (arg || nr_args)
9848 break;
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08009849 ret = io_sqe_buffers_unregister(ctx);
Jens Axboeedafcce2019-01-09 09:16:05 -07009850 break;
Jens Axboe6b063142019-01-10 22:13:58 -07009851 case IORING_REGISTER_FILES:
9852 ret = io_sqe_files_register(ctx, arg, nr_args);
9853 break;
9854 case IORING_UNREGISTER_FILES:
9855 ret = -EINVAL;
9856 if (arg || nr_args)
9857 break;
9858 ret = io_sqe_files_unregister(ctx);
9859 break;
Jens Axboec3a31e62019-10-03 13:59:56 -06009860 case IORING_REGISTER_FILES_UPDATE:
9861 ret = io_sqe_files_update(ctx, arg, nr_args);
9862 break;
Jens Axboe9b402842019-04-11 11:45:41 -06009863 case IORING_REGISTER_EVENTFD:
Jens Axboef2842ab2020-01-08 11:04:00 -07009864 case IORING_REGISTER_EVENTFD_ASYNC:
Jens Axboe9b402842019-04-11 11:45:41 -06009865 ret = -EINVAL;
9866 if (nr_args != 1)
9867 break;
9868 ret = io_eventfd_register(ctx, arg);
Jens Axboef2842ab2020-01-08 11:04:00 -07009869 if (ret)
9870 break;
9871 if (opcode == IORING_REGISTER_EVENTFD_ASYNC)
9872 ctx->eventfd_async = 1;
9873 else
9874 ctx->eventfd_async = 0;
Jens Axboe9b402842019-04-11 11:45:41 -06009875 break;
9876 case IORING_UNREGISTER_EVENTFD:
9877 ret = -EINVAL;
9878 if (arg || nr_args)
9879 break;
9880 ret = io_eventfd_unregister(ctx);
9881 break;
Jens Axboe66f4af92020-01-16 15:36:52 -07009882 case IORING_REGISTER_PROBE:
9883 ret = -EINVAL;
9884 if (!arg || nr_args > 256)
9885 break;
9886 ret = io_probe(ctx, arg, nr_args);
9887 break;
Jens Axboe071698e2020-01-28 10:04:42 -07009888 case IORING_REGISTER_PERSONALITY:
9889 ret = -EINVAL;
9890 if (arg || nr_args)
9891 break;
9892 ret = io_register_personality(ctx);
9893 break;
9894 case IORING_UNREGISTER_PERSONALITY:
9895 ret = -EINVAL;
9896 if (arg)
9897 break;
9898 ret = io_unregister_personality(ctx, nr_args);
9899 break;
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02009900 case IORING_REGISTER_ENABLE_RINGS:
9901 ret = -EINVAL;
9902 if (arg || nr_args)
9903 break;
9904 ret = io_register_enable_rings(ctx);
9905 break;
Stefano Garzarella21b55db2020-08-27 16:58:30 +02009906 case IORING_REGISTER_RESTRICTIONS:
9907 ret = io_register_restrictions(ctx, arg, nr_args);
9908 break;
Jens Axboeedafcce2019-01-09 09:16:05 -07009909 default:
9910 ret = -EINVAL;
9911 break;
9912 }
9913
Stefano Garzarella21b55db2020-08-27 16:58:30 +02009914out:
Jens Axboe071698e2020-01-28 10:04:42 -07009915 if (io_register_op_must_quiesce(opcode)) {
Jens Axboe05f3fb32019-12-09 11:22:50 -07009916 /* bring the ctx back to life */
Jens Axboe05f3fb32019-12-09 11:22:50 -07009917 percpu_ref_reinit(&ctx->refs);
Stefano Garzarella21b55db2020-08-27 16:58:30 +02009918out_quiesce:
Jens Axboe0f158b42020-05-14 17:18:39 -06009919 reinit_completion(&ctx->ref_comp);
Jens Axboe05f3fb32019-12-09 11:22:50 -07009920 }
Jens Axboeedafcce2019-01-09 09:16:05 -07009921 return ret;
9922}
9923
9924SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode,
9925 void __user *, arg, unsigned int, nr_args)
9926{
9927 struct io_ring_ctx *ctx;
9928 long ret = -EBADF;
9929 struct fd f;
9930
9931 f = fdget(fd);
9932 if (!f.file)
9933 return -EBADF;
9934
9935 ret = -EOPNOTSUPP;
9936 if (f.file->f_op != &io_uring_fops)
9937 goto out_fput;
9938
9939 ctx = f.file->private_data;
9940
Pavel Begunkovb6c23dd2021-02-20 15:17:18 +00009941 io_run_task_work();
9942
Jens Axboeedafcce2019-01-09 09:16:05 -07009943 mutex_lock(&ctx->uring_lock);
9944 ret = __io_uring_register(ctx, opcode, arg, nr_args);
9945 mutex_unlock(&ctx->uring_lock);
Dmitrii Dolgovc826bd72019-10-15 19:02:01 +02009946 trace_io_uring_register(ctx, opcode, ctx->nr_user_files, ctx->nr_user_bufs,
9947 ctx->cq_ev_fd != NULL, ret);
Jens Axboeedafcce2019-01-09 09:16:05 -07009948out_fput:
9949 fdput(f);
9950 return ret;
9951}
9952
Jens Axboe2b188cc2019-01-07 10:46:33 -07009953static int __init io_uring_init(void)
9954{
Stefan Metzmacherd7f62e82020-01-29 14:39:41 +01009955#define __BUILD_BUG_VERIFY_ELEMENT(stype, eoffset, etype, ename) do { \
9956 BUILD_BUG_ON(offsetof(stype, ename) != eoffset); \
9957 BUILD_BUG_ON(sizeof(etype) != sizeof_field(stype, ename)); \
9958} while (0)
9959
9960#define BUILD_BUG_SQE_ELEM(eoffset, etype, ename) \
9961 __BUILD_BUG_VERIFY_ELEMENT(struct io_uring_sqe, eoffset, etype, ename)
9962 BUILD_BUG_ON(sizeof(struct io_uring_sqe) != 64);
9963 BUILD_BUG_SQE_ELEM(0, __u8, opcode);
9964 BUILD_BUG_SQE_ELEM(1, __u8, flags);
9965 BUILD_BUG_SQE_ELEM(2, __u16, ioprio);
9966 BUILD_BUG_SQE_ELEM(4, __s32, fd);
9967 BUILD_BUG_SQE_ELEM(8, __u64, off);
9968 BUILD_BUG_SQE_ELEM(8, __u64, addr2);
9969 BUILD_BUG_SQE_ELEM(16, __u64, addr);
Pavel Begunkov7d67af22020-02-24 11:32:45 +03009970 BUILD_BUG_SQE_ELEM(16, __u64, splice_off_in);
Stefan Metzmacherd7f62e82020-01-29 14:39:41 +01009971 BUILD_BUG_SQE_ELEM(24, __u32, len);
9972 BUILD_BUG_SQE_ELEM(28, __kernel_rwf_t, rw_flags);
9973 BUILD_BUG_SQE_ELEM(28, /* compat */ int, rw_flags);
9974 BUILD_BUG_SQE_ELEM(28, /* compat */ __u32, rw_flags);
9975 BUILD_BUG_SQE_ELEM(28, __u32, fsync_flags);
Jiufei Xue5769a352020-06-17 17:53:55 +08009976 BUILD_BUG_SQE_ELEM(28, /* compat */ __u16, poll_events);
9977 BUILD_BUG_SQE_ELEM(28, __u32, poll32_events);
Stefan Metzmacherd7f62e82020-01-29 14:39:41 +01009978 BUILD_BUG_SQE_ELEM(28, __u32, sync_range_flags);
9979 BUILD_BUG_SQE_ELEM(28, __u32, msg_flags);
9980 BUILD_BUG_SQE_ELEM(28, __u32, timeout_flags);
9981 BUILD_BUG_SQE_ELEM(28, __u32, accept_flags);
9982 BUILD_BUG_SQE_ELEM(28, __u32, cancel_flags);
9983 BUILD_BUG_SQE_ELEM(28, __u32, open_flags);
9984 BUILD_BUG_SQE_ELEM(28, __u32, statx_flags);
9985 BUILD_BUG_SQE_ELEM(28, __u32, fadvise_advice);
Pavel Begunkov7d67af22020-02-24 11:32:45 +03009986 BUILD_BUG_SQE_ELEM(28, __u32, splice_flags);
Stefan Metzmacherd7f62e82020-01-29 14:39:41 +01009987 BUILD_BUG_SQE_ELEM(32, __u64, user_data);
9988 BUILD_BUG_SQE_ELEM(40, __u16, buf_index);
9989 BUILD_BUG_SQE_ELEM(42, __u16, personality);
Pavel Begunkov7d67af22020-02-24 11:32:45 +03009990 BUILD_BUG_SQE_ELEM(44, __s32, splice_fd_in);
Stefan Metzmacherd7f62e82020-01-29 14:39:41 +01009991
Jens Axboed3656342019-12-18 09:50:26 -07009992 BUILD_BUG_ON(ARRAY_SIZE(io_op_defs) != IORING_OP_LAST);
Jens Axboe84557872020-03-03 15:28:17 -07009993 BUILD_BUG_ON(__REQ_F_LAST_BIT >= 8 * sizeof(int));
Jens Axboe91f245d2021-02-09 13:48:50 -07009994 req_cachep = KMEM_CACHE(io_kiocb, SLAB_HWCACHE_ALIGN | SLAB_PANIC |
9995 SLAB_ACCOUNT);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009996 return 0;
9997};
9998__initcall(io_uring_init);