blob: 58d62dd9f8e4ba215e2469da697c8780d3ed4b3a [file] [log] [blame]
Jens Axboe2b188cc2019-01-07 10:46:33 -07001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Shared application/kernel submission and completion ring pairs, for
4 * supporting fast/efficient IO.
5 *
6 * A note on the read/write ordering memory barriers that are matched between
Stefan Bühler1e84b972019-04-24 23:54:16 +02007 * the application and kernel side.
8 *
9 * After the application reads the CQ ring tail, it must use an
10 * appropriate smp_rmb() to pair with the smp_wmb() the kernel uses
11 * before writing the tail (using smp_load_acquire to read the tail will
12 * do). It also needs a smp_mb() before updating CQ head (ordering the
13 * entry load(s) with the head store), pairing with an implicit barrier
14 * through a control-dependency in io_get_cqring (smp_store_release to
15 * store head will do). Failure to do so could lead to reading invalid
16 * CQ entries.
17 *
18 * Likewise, the application must use an appropriate smp_wmb() before
19 * writing the SQ tail (ordering SQ entry stores with the tail store),
20 * which pairs with smp_load_acquire in io_get_sqring (smp_store_release
21 * to store the tail will do). And it needs a barrier ordering the SQ
22 * head load before writing new SQ entries (smp_load_acquire to read
23 * head will do).
24 *
25 * When using the SQ poll thread (IORING_SETUP_SQPOLL), the application
26 * needs to check the SQ flags for IORING_SQ_NEED_WAKEUP *after*
27 * updating the SQ tail; a full memory barrier smp_mb() is needed
28 * between.
Jens Axboe2b188cc2019-01-07 10:46:33 -070029 *
30 * Also see the examples in the liburing library:
31 *
32 * git://git.kernel.dk/liburing
33 *
34 * io_uring also uses READ/WRITE_ONCE() for _any_ store or load that happens
35 * from data shared between the kernel and application. This is done both
36 * for ordering purposes, but also to ensure that once a value is loaded from
37 * data that the application could potentially modify, it remains stable.
38 *
39 * Copyright (C) 2018-2019 Jens Axboe
Christoph Hellwigc992fe22019-01-11 09:43:02 -070040 * Copyright (c) 2018-2019 Christoph Hellwig
Jens Axboe2b188cc2019-01-07 10:46:33 -070041 */
42#include <linux/kernel.h>
43#include <linux/init.h>
44#include <linux/errno.h>
45#include <linux/syscalls.h>
46#include <linux/compat.h>
Jens Axboe52de1fe2020-02-27 10:15:42 -070047#include <net/compat.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070048#include <linux/refcount.h>
49#include <linux/uio.h>
Pavel Begunkov6b47ee62020-01-18 20:22:41 +030050#include <linux/bits.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070051
52#include <linux/sched/signal.h>
53#include <linux/fs.h>
54#include <linux/file.h>
55#include <linux/fdtable.h>
56#include <linux/mm.h>
57#include <linux/mman.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070058#include <linux/percpu.h>
59#include <linux/slab.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070060#include <linux/blkdev.h>
Jens Axboeedafcce2019-01-09 09:16:05 -070061#include <linux/bvec.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070062#include <linux/net.h>
63#include <net/sock.h>
64#include <net/af_unix.h>
Jens Axboe6b063142019-01-10 22:13:58 -070065#include <net/scm.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070066#include <linux/anon_inodes.h>
67#include <linux/sched/mm.h>
68#include <linux/uaccess.h>
69#include <linux/nospec.h>
Jens Axboeedafcce2019-01-09 09:16:05 -070070#include <linux/sizes.h>
71#include <linux/hugetlb.h>
Jens Axboeaa4c3962019-11-29 10:14:00 -070072#include <linux/highmem.h>
Jens Axboe15b71ab2019-12-11 11:20:36 -070073#include <linux/namei.h>
74#include <linux/fsnotify.h>
Jens Axboe4840e412019-12-25 22:03:45 -070075#include <linux/fadvise.h>
Jens Axboe3e4827b2020-01-08 15:18:09 -070076#include <linux/eventpoll.h>
Pavel Begunkov7d67af22020-02-24 11:32:45 +030077#include <linux/splice.h>
Jens Axboeb41e9852020-02-17 09:52:41 -070078#include <linux/task_work.h>
Jens Axboebcf5a062020-05-22 09:24:42 -060079#include <linux/pagemap.h>
Jens Axboe0f212202020-09-13 13:09:39 -060080#include <linux/io_uring.h>
Jens Axboee4b4a132021-03-01 18:36:25 -070081#include <linux/freezer.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070082
Dmitrii Dolgovc826bd72019-10-15 19:02:01 +020083#define CREATE_TRACE_POINTS
84#include <trace/events/io_uring.h>
85
Jens Axboe2b188cc2019-01-07 10:46:33 -070086#include <uapi/linux/io_uring.h>
87
88#include "internal.h"
Jens Axboe561fb042019-10-24 07:25:42 -060089#include "io-wq.h"
Jens Axboe2b188cc2019-01-07 10:46:33 -070090
Daniel Xu5277dea2019-09-14 14:23:45 -070091#define IORING_MAX_ENTRIES 32768
Jens Axboe33a107f2019-10-04 12:10:03 -060092#define IORING_MAX_CQ_ENTRIES (2 * IORING_MAX_ENTRIES)
Jens Axboe65e19f52019-10-26 07:20:21 -060093
94/*
95 * Shift of 9 is 512 entries, or exactly one page on 64-bit archs
96 */
97#define IORING_FILE_TABLE_SHIFT 9
98#define IORING_MAX_FILES_TABLE (1U << IORING_FILE_TABLE_SHIFT)
99#define IORING_FILE_TABLE_MASK (IORING_MAX_FILES_TABLE - 1)
100#define IORING_MAX_FIXED_FILES (64 * IORING_MAX_FILES_TABLE)
Stefano Garzarella21b55db2020-08-27 16:58:30 +0200101#define IORING_MAX_RESTRICTIONS (IORING_RESTRICTION_LAST + \
102 IORING_REGISTER_LAST + IORING_OP_LAST)
Jens Axboe2b188cc2019-01-07 10:46:33 -0700103
Pavel Begunkovb16fed662021-02-18 18:29:40 +0000104#define SQE_VALID_FLAGS (IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK| \
105 IOSQE_IO_HARDLINK | IOSQE_ASYNC | \
106 IOSQE_BUFFER_SELECT)
107
Jens Axboe2b188cc2019-01-07 10:46:33 -0700108struct io_uring {
109 u32 head ____cacheline_aligned_in_smp;
110 u32 tail ____cacheline_aligned_in_smp;
111};
112
Stefan Bühler1e84b972019-04-24 23:54:16 +0200113/*
Hristo Venev75b28af2019-08-26 17:23:46 +0000114 * This data is shared with the application through the mmap at offsets
115 * IORING_OFF_SQ_RING and IORING_OFF_CQ_RING.
Stefan Bühler1e84b972019-04-24 23:54:16 +0200116 *
117 * The offsets to the member fields are published through struct
118 * io_sqring_offsets when calling io_uring_setup.
119 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000120struct io_rings {
Stefan Bühler1e84b972019-04-24 23:54:16 +0200121 /*
122 * Head and tail offsets into the ring; the offsets need to be
123 * masked to get valid indices.
124 *
Hristo Venev75b28af2019-08-26 17:23:46 +0000125 * The kernel controls head of the sq ring and the tail of the cq ring,
126 * and the application controls tail of the sq ring and the head of the
127 * cq ring.
Stefan Bühler1e84b972019-04-24 23:54:16 +0200128 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000129 struct io_uring sq, cq;
Stefan Bühler1e84b972019-04-24 23:54:16 +0200130 /*
Hristo Venev75b28af2019-08-26 17:23:46 +0000131 * Bitmasks to apply to head and tail offsets (constant, equals
Stefan Bühler1e84b972019-04-24 23:54:16 +0200132 * ring_entries - 1)
133 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000134 u32 sq_ring_mask, cq_ring_mask;
135 /* Ring sizes (constant, power of 2) */
136 u32 sq_ring_entries, cq_ring_entries;
Stefan Bühler1e84b972019-04-24 23:54:16 +0200137 /*
138 * Number of invalid entries dropped by the kernel due to
139 * invalid index stored in array
140 *
141 * Written by the kernel, shouldn't be modified by the
142 * application (i.e. get number of "new events" by comparing to
143 * cached value).
144 *
145 * After a new SQ head value was read by the application this
146 * counter includes all submissions that were dropped reaching
147 * the new SQ head (and possibly more).
148 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000149 u32 sq_dropped;
Stefan Bühler1e84b972019-04-24 23:54:16 +0200150 /*
Stefano Garzarella0d9b5b32020-05-15 18:38:04 +0200151 * Runtime SQ flags
Stefan Bühler1e84b972019-04-24 23:54:16 +0200152 *
153 * Written by the kernel, shouldn't be modified by the
154 * application.
155 *
156 * The application needs a full memory barrier before checking
157 * for IORING_SQ_NEED_WAKEUP after updating the sq tail.
158 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000159 u32 sq_flags;
Stefan Bühler1e84b972019-04-24 23:54:16 +0200160 /*
Stefano Garzarella0d9b5b32020-05-15 18:38:04 +0200161 * Runtime CQ flags
162 *
163 * Written by the application, shouldn't be modified by the
164 * kernel.
165 */
166 u32 cq_flags;
167 /*
Stefan Bühler1e84b972019-04-24 23:54:16 +0200168 * Number of completion events lost because the queue was full;
169 * this should be avoided by the application by making sure
LimingWu0b4295b2019-12-05 20:18:18 +0800170 * there are not more requests pending than there is space in
Stefan Bühler1e84b972019-04-24 23:54:16 +0200171 * the completion queue.
172 *
173 * Written by the kernel, shouldn't be modified by the
174 * application (i.e. get number of "new events" by comparing to
175 * cached value).
176 *
177 * As completion events come in out of order this counter is not
178 * ordered with any other data.
179 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000180 u32 cq_overflow;
Stefan Bühler1e84b972019-04-24 23:54:16 +0200181 /*
182 * Ring buffer of completion events.
183 *
184 * The kernel writes completion events fresh every time they are
185 * produced, so the application is allowed to modify pending
186 * entries.
187 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000188 struct io_uring_cqe cqes[] ____cacheline_aligned_in_smp;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700189};
190
Pavel Begunkov45d189c2021-02-10 00:03:07 +0000191enum io_uring_cmd_flags {
192 IO_URING_F_NONBLOCK = 1,
Pavel Begunkov889fca72021-02-10 00:03:09 +0000193 IO_URING_F_COMPLETE_DEFER = 2,
Pavel Begunkov45d189c2021-02-10 00:03:07 +0000194};
195
Jens Axboeedafcce2019-01-09 09:16:05 -0700196struct io_mapped_ubuf {
197 u64 ubuf;
198 size_t len;
199 struct bio_vec *bvec;
200 unsigned int nr_bvecs;
Jens Axboede293932020-09-17 16:19:16 -0600201 unsigned long acct_pages;
Jens Axboeedafcce2019-01-09 09:16:05 -0700202};
203
Bijan Mottahedeh50238532021-01-15 17:37:45 +0000204struct io_ring_ctx;
205
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +0000206struct io_rsrc_put {
207 struct list_head list;
Bijan Mottahedeh50238532021-01-15 17:37:45 +0000208 union {
209 void *rsrc;
210 struct file *file;
211 };
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +0000212};
213
214struct fixed_rsrc_table {
Jens Axboe65e19f52019-10-26 07:20:21 -0600215 struct file **files;
Jens Axboe31b51512019-01-18 22:56:34 -0700216};
217
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +0000218struct fixed_rsrc_ref_node {
Xiaoguang Wang05589552020-03-31 14:05:18 +0800219 struct percpu_ref refs;
220 struct list_head node;
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +0000221 struct list_head rsrc_list;
222 struct fixed_rsrc_data *rsrc_data;
Bijan Mottahedeh50238532021-01-15 17:37:45 +0000223 void (*rsrc_put)(struct io_ring_ctx *ctx,
224 struct io_rsrc_put *prsrc);
Jens Axboe4a38aed22020-05-14 17:21:15 -0600225 struct llist_node llist;
Pavel Begunkove2978222020-11-18 14:56:26 +0000226 bool done;
Xiaoguang Wang05589552020-03-31 14:05:18 +0800227};
228
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +0000229struct fixed_rsrc_data {
230 struct fixed_rsrc_table *table;
Jens Axboe05f3fb32019-12-09 11:22:50 -0700231 struct io_ring_ctx *ctx;
232
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +0000233 struct fixed_rsrc_ref_node *node;
Jens Axboe05f3fb32019-12-09 11:22:50 -0700234 struct percpu_ref refs;
Jens Axboe05f3fb32019-12-09 11:22:50 -0700235 struct completion done;
Hao Xu8bad28d2021-02-19 17:19:36 +0800236 bool quiesce;
Jens Axboe05f3fb32019-12-09 11:22:50 -0700237};
238
Jens Axboe5a2e7452020-02-23 16:23:11 -0700239struct io_buffer {
240 struct list_head list;
241 __u64 addr;
242 __s32 len;
243 __u16 bid;
244};
245
Stefano Garzarella21b55db2020-08-27 16:58:30 +0200246struct io_restriction {
247 DECLARE_BITMAP(register_op, IORING_REGISTER_LAST);
248 DECLARE_BITMAP(sqe_op, IORING_OP_LAST);
249 u8 sqe_flags_allowed;
250 u8 sqe_flags_required;
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +0200251 bool registered;
Stefano Garzarella21b55db2020-08-27 16:58:30 +0200252};
253
Jens Axboe37d1e2e2021-02-17 21:03:43 -0700254enum {
255 IO_SQ_THREAD_SHOULD_STOP = 0,
256 IO_SQ_THREAD_SHOULD_PARK,
257};
258
Jens Axboe534ca6d2020-09-02 13:52:19 -0600259struct io_sq_data {
260 refcount_t refs;
Jens Axboe05962f92021-03-06 13:58:48 -0700261 struct rw_semaphore rw_lock;
Jens Axboe69fb2132020-09-14 11:16:23 -0600262
263 /* ctx's that are using this sqd */
264 struct list_head ctx_list;
Jens Axboe69fb2132020-09-14 11:16:23 -0600265
Jens Axboe534ca6d2020-09-02 13:52:19 -0600266 struct task_struct *thread;
267 struct wait_queue_head wait;
Xiaoguang Wang08369242020-11-03 14:15:59 +0800268
269 unsigned sq_thread_idle;
Jens Axboe37d1e2e2021-02-17 21:03:43 -0700270 int sq_cpu;
271 pid_t task_pid;
Jens Axboe5c2469e2021-03-11 10:17:56 -0700272 pid_t task_tgid;
Jens Axboe37d1e2e2021-02-17 21:03:43 -0700273
274 unsigned long state;
Jens Axboe37d1e2e2021-02-17 21:03:43 -0700275 struct completion exited;
Jens Axboe534ca6d2020-09-02 13:52:19 -0600276};
277
Pavel Begunkov258b29a2021-02-10 00:03:10 +0000278#define IO_IOPOLL_BATCH 8
Pavel Begunkov6dd0be12021-02-10 00:03:13 +0000279#define IO_COMPL_BATCH 32
Pavel Begunkov6ff119a2021-02-10 00:03:18 +0000280#define IO_REQ_CACHE_SIZE 32
Pavel Begunkovbf019da2021-02-10 00:03:17 +0000281#define IO_REQ_ALLOC_BATCH 8
Pavel Begunkov258b29a2021-02-10 00:03:10 +0000282
283struct io_comp_state {
Pavel Begunkov6dd0be12021-02-10 00:03:13 +0000284 struct io_kiocb *reqs[IO_COMPL_BATCH];
Jens Axboe1b4c3512021-02-10 00:03:19 +0000285 unsigned int nr;
Jens Axboec7dae4b2021-02-09 19:53:37 -0700286 unsigned int locked_free_nr;
287 /* inline/task_work completion list, under ->uring_lock */
Jens Axboe1b4c3512021-02-10 00:03:19 +0000288 struct list_head free_list;
Jens Axboec7dae4b2021-02-09 19:53:37 -0700289 /* IRQ completion list, under ->completion_lock */
290 struct list_head locked_free_list;
Pavel Begunkov258b29a2021-02-10 00:03:10 +0000291};
292
Pavel Begunkova1ab7b32021-02-18 18:29:42 +0000293struct io_submit_link {
294 struct io_kiocb *head;
295 struct io_kiocb *last;
296};
297
Pavel Begunkov258b29a2021-02-10 00:03:10 +0000298struct io_submit_state {
299 struct blk_plug plug;
Pavel Begunkova1ab7b32021-02-18 18:29:42 +0000300 struct io_submit_link link;
Pavel Begunkov258b29a2021-02-10 00:03:10 +0000301
302 /*
303 * io_kiocb alloc cache
304 */
Pavel Begunkovbf019da2021-02-10 00:03:17 +0000305 void *reqs[IO_REQ_CACHE_SIZE];
Pavel Begunkov258b29a2021-02-10 00:03:10 +0000306 unsigned int free_reqs;
307
308 bool plug_started;
309
310 /*
311 * Batch completion logic
312 */
313 struct io_comp_state comp;
314
315 /*
316 * File reference cache
317 */
318 struct file *file;
319 unsigned int fd;
320 unsigned int file_refs;
321 unsigned int ios_left;
322};
323
Jens Axboe2b188cc2019-01-07 10:46:33 -0700324struct io_ring_ctx {
325 struct {
326 struct percpu_ref refs;
327 } ____cacheline_aligned_in_smp;
328
329 struct {
330 unsigned int flags;
Randy Dunlape1d85332020-02-05 20:57:10 -0800331 unsigned int compat: 1;
Randy Dunlape1d85332020-02-05 20:57:10 -0800332 unsigned int cq_overflow_flushed: 1;
333 unsigned int drain_next: 1;
334 unsigned int eventfd_async: 1;
Stefano Garzarella21b55db2020-08-27 16:58:30 +0200335 unsigned int restricted: 1;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700336
Hristo Venev75b28af2019-08-26 17:23:46 +0000337 /*
338 * Ring buffer of indices into array of io_uring_sqe, which is
339 * mmapped by the application using the IORING_OFF_SQES offset.
340 *
341 * This indirection could e.g. be used to assign fixed
342 * io_uring_sqe entries to operations and only submit them to
343 * the queue when needed.
344 *
345 * The kernel modifies neither the indices array nor the entries
346 * array.
347 */
348 u32 *sq_array;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700349 unsigned cached_sq_head;
350 unsigned sq_entries;
351 unsigned sq_mask;
Jens Axboe6c271ce2019-01-10 11:22:30 -0700352 unsigned sq_thread_idle;
Jens Axboe498ccd92019-10-25 10:04:25 -0600353 unsigned cached_sq_dropped;
Pavel Begunkov2c3bac6d2020-10-18 10:17:40 +0100354 unsigned cached_cq_overflow;
Jens Axboead3eb2c2019-12-18 17:12:20 -0700355 unsigned long sq_check_overflow;
Jens Axboede0617e2019-04-06 21:51:27 -0600356
Jens Axboee9418942021-02-19 12:33:30 -0700357 /* hashed buffered write serialization */
358 struct io_wq_hash *hash_map;
359
Jens Axboede0617e2019-04-06 21:51:27 -0600360 struct list_head defer_list;
Jens Axboe5262f562019-09-17 12:26:57 -0600361 struct list_head timeout_list;
Jens Axboe1d7bb1d2019-11-06 11:31:17 -0700362 struct list_head cq_overflow_list;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700363
Jens Axboead3eb2c2019-12-18 17:12:20 -0700364 struct io_uring_sqe *sq_sqes;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700365 } ____cacheline_aligned_in_smp;
366
Jens Axboe3c1a2ea2021-02-11 10:48:03 -0700367 struct {
368 struct mutex uring_lock;
369 wait_queue_head_t wait;
370 } ____cacheline_aligned_in_smp;
371
372 struct io_submit_state submit_state;
373
Hristo Venev75b28af2019-08-26 17:23:46 +0000374 struct io_rings *rings;
375
Jens Axboe2aede0e2020-09-14 10:45:53 -0600376 /* Only used for accounting purposes */
377 struct mm_struct *mm_account;
378
Stefan Metzmacher7c30f36a2021-03-07 11:54:28 +0100379 const struct cred *sq_creds; /* cred used for __io_sq_thread() */
Jens Axboe534ca6d2020-09-02 13:52:19 -0600380 struct io_sq_data *sq_data; /* if using sq thread polling */
381
Jens Axboe90554202020-09-03 12:12:41 -0600382 struct wait_queue_head sqo_sq_wait;
Jens Axboe69fb2132020-09-14 11:16:23 -0600383 struct list_head sqd_list;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700384
Jens Axboe6b063142019-01-10 22:13:58 -0700385 /*
386 * If used, fixed file set. Writers must ensure that ->refs is dead,
387 * readers must ensure that ->refs is alive as long as the file* is
388 * used. Only updated through io_uring_register(2).
389 */
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +0000390 struct fixed_rsrc_data *file_data;
Jens Axboe6b063142019-01-10 22:13:58 -0700391 unsigned nr_user_files;
392
Jens Axboeedafcce2019-01-09 09:16:05 -0700393 /* if used, fixed mapped user buffers */
394 unsigned nr_user_bufs;
395 struct io_mapped_ubuf *user_bufs;
396
Jens Axboe2b188cc2019-01-07 10:46:33 -0700397 struct user_struct *user;
398
Jens Axboe0f158b42020-05-14 17:18:39 -0600399 struct completion ref_comp;
Jens Axboe206aefd2019-11-07 18:27:42 -0700400
401#if defined(CONFIG_UNIX)
402 struct socket *ring_sock;
403#endif
404
Jens Axboe9e15c3a2021-03-13 12:29:43 -0700405 struct xarray io_buffers;
Jens Axboe5a2e7452020-02-23 16:23:11 -0700406
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +0000407 struct xarray personalities;
408 u32 pers_next;
Jens Axboe071698e2020-01-28 10:04:42 -0700409
Jens Axboe206aefd2019-11-07 18:27:42 -0700410 struct {
411 unsigned cached_cq_tail;
412 unsigned cq_entries;
413 unsigned cq_mask;
414 atomic_t cq_timeouts;
Marcelo Diop-Gonzalezf0105052021-01-15 11:54:40 -0500415 unsigned cq_last_tm_flush;
Jens Axboead3eb2c2019-12-18 17:12:20 -0700416 unsigned long cq_check_overflow;
Jens Axboe206aefd2019-11-07 18:27:42 -0700417 struct wait_queue_head cq_wait;
418 struct fasync_struct *cq_fasync;
419 struct eventfd_ctx *cq_ev_fd;
420 } ____cacheline_aligned_in_smp;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700421
422 struct {
Jens Axboe2b188cc2019-01-07 10:46:33 -0700423 spinlock_t completion_lock;
Jens Axboee94f1412019-12-19 12:06:02 -0700424
Jens Axboedef596e2019-01-09 08:59:42 -0700425 /*
Pavel Begunkov540e32a2020-07-13 23:37:09 +0300426 * ->iopoll_list is protected by the ctx->uring_lock for
Jens Axboedef596e2019-01-09 08:59:42 -0700427 * io_uring instances that don't use IORING_SETUP_SQPOLL.
428 * For SQPOLL, only the single threaded io_sq_thread() will
429 * manipulate the list, hence no extra locking is needed there.
430 */
Pavel Begunkov540e32a2020-07-13 23:37:09 +0300431 struct list_head iopoll_list;
Jens Axboe78076bb2019-12-04 19:56:40 -0700432 struct hlist_head *cancel_hash;
433 unsigned cancel_hash_bits;
Jens Axboee94f1412019-12-19 12:06:02 -0700434 bool poll_multi_file;
Jens Axboefcb323c2019-10-24 12:39:47 -0600435
436 spinlock_t inflight_lock;
437 struct list_head inflight_list;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700438 } ____cacheline_aligned_in_smp;
Jens Axboe85faa7b2020-04-09 18:14:00 -0600439
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +0000440 struct delayed_work rsrc_put_work;
441 struct llist_head rsrc_put_llist;
Bijan Mottahedehd67d2262021-01-15 17:37:46 +0000442 struct list_head rsrc_ref_list;
443 spinlock_t rsrc_ref_lock;
Jens Axboe4a38aed22020-05-14 17:21:15 -0600444
Stefano Garzarella21b55db2020-08-27 16:58:30 +0200445 struct io_restriction restrictions;
Jens Axboe3c1a2ea2021-02-11 10:48:03 -0700446
Jens Axboe7c25c0d2021-02-16 07:17:00 -0700447 /* exit task_work */
448 struct callback_head *exit_task_work;
449
Jens Axboee9418942021-02-19 12:33:30 -0700450 struct wait_queue_head hash_wait;
451
Jens Axboe3c1a2ea2021-02-11 10:48:03 -0700452 /* Keep this last, we don't need it for the fast path */
453 struct work_struct exit_work;
Pavel Begunkov13bf43f2021-03-06 11:02:12 +0000454 struct list_head tctx_list;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700455};
456
Jens Axboe09bb8392019-03-13 12:39:28 -0600457/*
458 * First field must be the file pointer in all the
459 * iocb unions! See also 'struct kiocb' in <linux/fs.h>
460 */
Jens Axboe221c5eb2019-01-17 09:41:58 -0700461struct io_poll_iocb {
462 struct file *file;
Pavel Begunkov018043b2020-10-27 23:17:18 +0000463 struct wait_queue_head *head;
Jens Axboe221c5eb2019-01-17 09:41:58 -0700464 __poll_t events;
Jens Axboe8c838782019-03-12 15:48:16 -0600465 bool done;
Jens Axboe221c5eb2019-01-17 09:41:58 -0700466 bool canceled;
Jens Axboe392edb42019-12-09 17:52:20 -0700467 struct wait_queue_entry wait;
Jens Axboe221c5eb2019-01-17 09:41:58 -0700468};
469
Pavel Begunkov018043b2020-10-27 23:17:18 +0000470struct io_poll_remove {
471 struct file *file;
472 u64 addr;
473};
474
Jens Axboeb5dba592019-12-11 14:02:38 -0700475struct io_close {
476 struct file *file;
Jens Axboeb5dba592019-12-11 14:02:38 -0700477 int fd;
478};
479
Jens Axboead8a48a2019-11-15 08:49:11 -0700480struct io_timeout_data {
481 struct io_kiocb *req;
482 struct hrtimer timer;
483 struct timespec64 ts;
484 enum hrtimer_mode mode;
485};
486
Jens Axboe8ed8d3c2019-12-16 11:55:28 -0700487struct io_accept {
488 struct file *file;
489 struct sockaddr __user *addr;
490 int __user *addr_len;
491 int flags;
Jens Axboe09952e32020-03-19 20:16:56 -0600492 unsigned long nofile;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -0700493};
494
495struct io_sync {
496 struct file *file;
497 loff_t len;
498 loff_t off;
499 int flags;
Jens Axboed63d1b52019-12-10 10:38:56 -0700500 int mode;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -0700501};
502
Jens Axboefbf23842019-12-17 18:45:56 -0700503struct io_cancel {
504 struct file *file;
505 u64 addr;
506};
507
Jens Axboeb29472e2019-12-17 18:50:29 -0700508struct io_timeout {
509 struct file *file;
Pavel Begunkovbfe68a22020-05-30 14:54:18 +0300510 u32 off;
511 u32 target_seq;
Pavel Begunkov135fcde2020-07-13 23:37:12 +0300512 struct list_head list;
Pavel Begunkov90cd7e42020-10-27 23:25:36 +0000513 /* head of the link, used by linked timeouts only */
514 struct io_kiocb *head;
Jens Axboeb29472e2019-12-17 18:50:29 -0700515};
516
Pavel Begunkov0bdf7a22020-10-10 18:34:10 +0100517struct io_timeout_rem {
518 struct file *file;
519 u64 addr;
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +0000520
521 /* timeout update */
522 struct timespec64 ts;
523 u32 flags;
Pavel Begunkov0bdf7a22020-10-10 18:34:10 +0100524};
525
Jens Axboe9adbd452019-12-20 08:45:55 -0700526struct io_rw {
527 /* NOTE: kiocb has the file as the first member, so don't do it here */
528 struct kiocb kiocb;
529 u64 addr;
530 u64 len;
531};
532
Jens Axboe3fbb51c2019-12-20 08:51:52 -0700533struct io_connect {
534 struct file *file;
535 struct sockaddr __user *addr;
536 int addr_len;
537};
538
Jens Axboee47293f2019-12-20 08:58:21 -0700539struct io_sr_msg {
540 struct file *file;
Jens Axboefddafac2020-01-04 20:19:44 -0700541 union {
Pavel Begunkov270a5942020-07-12 20:41:04 +0300542 struct user_msghdr __user *umsg;
Jens Axboefddafac2020-01-04 20:19:44 -0700543 void __user *buf;
544 };
Jens Axboee47293f2019-12-20 08:58:21 -0700545 int msg_flags;
Jens Axboebcda7ba2020-02-23 16:42:51 -0700546 int bgid;
Jens Axboefddafac2020-01-04 20:19:44 -0700547 size_t len;
Jens Axboebcda7ba2020-02-23 16:42:51 -0700548 struct io_buffer *kbuf;
Jens Axboee47293f2019-12-20 08:58:21 -0700549};
550
Jens Axboe15b71ab2019-12-11 11:20:36 -0700551struct io_open {
552 struct file *file;
553 int dfd;
Jens Axboe15b71ab2019-12-11 11:20:36 -0700554 struct filename *filename;
Jens Axboec12cedf2020-01-08 17:41:21 -0700555 struct open_how how;
Jens Axboe4022e7a2020-03-19 19:23:18 -0600556 unsigned long nofile;
Jens Axboe15b71ab2019-12-11 11:20:36 -0700557};
558
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +0000559struct io_rsrc_update {
Jens Axboe05f3fb32019-12-09 11:22:50 -0700560 struct file *file;
561 u64 arg;
562 u32 nr_args;
563 u32 offset;
564};
565
Jens Axboe4840e412019-12-25 22:03:45 -0700566struct io_fadvise {
567 struct file *file;
568 u64 offset;
569 u32 len;
570 u32 advice;
571};
572
Jens Axboec1ca7572019-12-25 22:18:28 -0700573struct io_madvise {
574 struct file *file;
575 u64 addr;
576 u32 len;
577 u32 advice;
578};
579
Jens Axboe3e4827b2020-01-08 15:18:09 -0700580struct io_epoll {
581 struct file *file;
582 int epfd;
583 int op;
584 int fd;
585 struct epoll_event event;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700586};
587
Pavel Begunkov7d67af22020-02-24 11:32:45 +0300588struct io_splice {
589 struct file *file_out;
590 struct file *file_in;
591 loff_t off_out;
592 loff_t off_in;
593 u64 len;
594 unsigned int flags;
595};
596
Jens Axboeddf0322d2020-02-23 16:41:33 -0700597struct io_provide_buf {
598 struct file *file;
599 __u64 addr;
600 __s32 len;
601 __u32 bgid;
602 __u16 nbufs;
603 __u16 bid;
604};
605
Bijan Mottahedeh1d9e1282020-05-22 21:31:16 -0700606struct io_statx {
607 struct file *file;
608 int dfd;
609 unsigned int mask;
610 unsigned int flags;
Bijan Mottahedehe62753e2020-05-22 21:31:18 -0700611 const char __user *filename;
Bijan Mottahedeh1d9e1282020-05-22 21:31:16 -0700612 struct statx __user *buffer;
613};
614
Jens Axboe36f4fa62020-09-05 11:14:22 -0600615struct io_shutdown {
616 struct file *file;
617 int how;
618};
619
Jens Axboe80a261f2020-09-28 14:23:58 -0600620struct io_rename {
621 struct file *file;
622 int old_dfd;
623 int new_dfd;
624 struct filename *oldpath;
625 struct filename *newpath;
626 int flags;
627};
628
Jens Axboe14a11432020-09-28 14:27:37 -0600629struct io_unlink {
630 struct file *file;
631 int dfd;
632 int flags;
633 struct filename *filename;
634};
635
Pavel Begunkov3ca405e2020-07-13 23:37:08 +0300636struct io_completion {
637 struct file *file;
638 struct list_head list;
Pavel Begunkov0f7e4662020-07-13 23:37:16 +0300639 int cflags;
Pavel Begunkov3ca405e2020-07-13 23:37:08 +0300640};
641
Jens Axboef499a022019-12-02 16:28:46 -0700642struct io_async_connect {
643 struct sockaddr_storage address;
644};
645
Jens Axboe03b12302019-12-02 18:50:25 -0700646struct io_async_msghdr {
647 struct iovec fast_iov[UIO_FASTIOV];
Pavel Begunkov257e84a2021-02-05 00:58:00 +0000648 /* points to an allocated iov, if NULL we use fast_iov instead */
649 struct iovec *free_iov;
Jens Axboe03b12302019-12-02 18:50:25 -0700650 struct sockaddr __user *uaddr;
651 struct msghdr msg;
Jens Axboeb5379162020-02-09 11:29:15 -0700652 struct sockaddr_storage addr;
Jens Axboe03b12302019-12-02 18:50:25 -0700653};
654
Jens Axboef67676d2019-12-02 11:03:47 -0700655struct io_async_rw {
656 struct iovec fast_iov[UIO_FASTIOV];
Jens Axboeff6165b2020-08-13 09:47:43 -0600657 const struct iovec *free_iovec;
658 struct iov_iter iter;
Jens Axboe227c0c92020-08-13 11:51:40 -0600659 size_t bytes_done;
Jens Axboebcf5a062020-05-22 09:24:42 -0600660 struct wait_page_queue wpq;
Jens Axboef67676d2019-12-02 11:03:47 -0700661};
662
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300663enum {
664 REQ_F_FIXED_FILE_BIT = IOSQE_FIXED_FILE_BIT,
665 REQ_F_IO_DRAIN_BIT = IOSQE_IO_DRAIN_BIT,
666 REQ_F_LINK_BIT = IOSQE_IO_LINK_BIT,
667 REQ_F_HARDLINK_BIT = IOSQE_IO_HARDLINK_BIT,
668 REQ_F_FORCE_ASYNC_BIT = IOSQE_ASYNC_BIT,
Jens Axboebcda7ba2020-02-23 16:42:51 -0700669 REQ_F_BUFFER_SELECT_BIT = IOSQE_BUFFER_SELECT_BIT,
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300670
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300671 REQ_F_FAIL_LINK_BIT,
672 REQ_F_INFLIGHT_BIT,
673 REQ_F_CUR_POS_BIT,
674 REQ_F_NOWAIT_BIT,
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300675 REQ_F_LINK_TIMEOUT_BIT,
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300676 REQ_F_ISREG_BIT,
Pavel Begunkov99bc4c32020-02-07 22:04:45 +0300677 REQ_F_NEED_CLEANUP_BIT,
Jens Axboed7718a92020-02-14 22:23:12 -0700678 REQ_F_POLLED_BIT,
Jens Axboebcda7ba2020-02-23 16:42:51 -0700679 REQ_F_BUFFER_SELECTED_BIT,
Jens Axboe5b0bbee2020-04-27 10:41:22 -0600680 REQ_F_NO_FILE_TABLE_BIT,
Pavel Begunkov900fad42020-10-19 16:39:16 +0100681 REQ_F_LTIMEOUT_ACTIVE_BIT,
Pavel Begunkove342c802021-01-19 13:32:47 +0000682 REQ_F_COMPLETE_INLINE_BIT,
Jens Axboe84557872020-03-03 15:28:17 -0700683
684 /* not a real bit, just to check we're not overflowing the space */
685 __REQ_F_LAST_BIT,
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300686};
687
688enum {
689 /* ctx owns file */
690 REQ_F_FIXED_FILE = BIT(REQ_F_FIXED_FILE_BIT),
691 /* drain existing IO first */
692 REQ_F_IO_DRAIN = BIT(REQ_F_IO_DRAIN_BIT),
693 /* linked sqes */
694 REQ_F_LINK = BIT(REQ_F_LINK_BIT),
695 /* doesn't sever on completion < 0 */
696 REQ_F_HARDLINK = BIT(REQ_F_HARDLINK_BIT),
697 /* IOSQE_ASYNC */
698 REQ_F_FORCE_ASYNC = BIT(REQ_F_FORCE_ASYNC_BIT),
Jens Axboebcda7ba2020-02-23 16:42:51 -0700699 /* IOSQE_BUFFER_SELECT */
700 REQ_F_BUFFER_SELECT = BIT(REQ_F_BUFFER_SELECT_BIT),
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300701
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300702 /* fail rest of links */
703 REQ_F_FAIL_LINK = BIT(REQ_F_FAIL_LINK_BIT),
Pavel Begunkovb05a1bc2021-03-04 13:59:24 +0000704 /* on inflight list, should be cancelled and waited on exit reliably */
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300705 REQ_F_INFLIGHT = BIT(REQ_F_INFLIGHT_BIT),
706 /* read/write uses file position */
707 REQ_F_CUR_POS = BIT(REQ_F_CUR_POS_BIT),
708 /* must not punt to workers */
709 REQ_F_NOWAIT = BIT(REQ_F_NOWAIT_BIT),
Pavel Begunkov900fad42020-10-19 16:39:16 +0100710 /* has or had linked timeout */
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300711 REQ_F_LINK_TIMEOUT = BIT(REQ_F_LINK_TIMEOUT_BIT),
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300712 /* regular file */
713 REQ_F_ISREG = BIT(REQ_F_ISREG_BIT),
Pavel Begunkov99bc4c32020-02-07 22:04:45 +0300714 /* needs cleanup */
715 REQ_F_NEED_CLEANUP = BIT(REQ_F_NEED_CLEANUP_BIT),
Jens Axboed7718a92020-02-14 22:23:12 -0700716 /* already went through poll handler */
717 REQ_F_POLLED = BIT(REQ_F_POLLED_BIT),
Jens Axboebcda7ba2020-02-23 16:42:51 -0700718 /* buffer already selected */
719 REQ_F_BUFFER_SELECTED = BIT(REQ_F_BUFFER_SELECTED_BIT),
Jens Axboe5b0bbee2020-04-27 10:41:22 -0600720 /* doesn't need file table for this request */
721 REQ_F_NO_FILE_TABLE = BIT(REQ_F_NO_FILE_TABLE_BIT),
Pavel Begunkov900fad42020-10-19 16:39:16 +0100722 /* linked timeout is active, i.e. prepared by link's head */
723 REQ_F_LTIMEOUT_ACTIVE = BIT(REQ_F_LTIMEOUT_ACTIVE_BIT),
Pavel Begunkove342c802021-01-19 13:32:47 +0000724 /* completion is deferred through io_comp_state */
725 REQ_F_COMPLETE_INLINE = BIT(REQ_F_COMPLETE_INLINE_BIT),
Jens Axboed7718a92020-02-14 22:23:12 -0700726};
727
728struct async_poll {
729 struct io_poll_iocb poll;
Jens Axboe807abcb2020-07-17 17:09:27 -0600730 struct io_poll_iocb *double_poll;
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300731};
732
Jens Axboe7cbf1722021-02-10 00:03:20 +0000733struct io_task_work {
734 struct io_wq_work_node node;
735 task_work_func_t func;
736};
737
Jens Axboe09bb8392019-03-13 12:39:28 -0600738/*
739 * NOTE! Each of the iocb union members has the file pointer
740 * as the first entry in their struct definition. So you can
741 * access the file pointer through any of the sub-structs,
742 * or directly as just 'ki_filp' in this struct.
743 */
Jens Axboe2b188cc2019-01-07 10:46:33 -0700744struct io_kiocb {
Jens Axboe221c5eb2019-01-17 09:41:58 -0700745 union {
Jens Axboe09bb8392019-03-13 12:39:28 -0600746 struct file *file;
Jens Axboe9adbd452019-12-20 08:45:55 -0700747 struct io_rw rw;
Jens Axboe221c5eb2019-01-17 09:41:58 -0700748 struct io_poll_iocb poll;
Pavel Begunkov018043b2020-10-27 23:17:18 +0000749 struct io_poll_remove poll_remove;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -0700750 struct io_accept accept;
751 struct io_sync sync;
Jens Axboefbf23842019-12-17 18:45:56 -0700752 struct io_cancel cancel;
Jens Axboeb29472e2019-12-17 18:50:29 -0700753 struct io_timeout timeout;
Pavel Begunkov0bdf7a22020-10-10 18:34:10 +0100754 struct io_timeout_rem timeout_rem;
Jens Axboe3fbb51c2019-12-20 08:51:52 -0700755 struct io_connect connect;
Jens Axboee47293f2019-12-20 08:58:21 -0700756 struct io_sr_msg sr_msg;
Jens Axboe15b71ab2019-12-11 11:20:36 -0700757 struct io_open open;
Jens Axboeb5dba592019-12-11 14:02:38 -0700758 struct io_close close;
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +0000759 struct io_rsrc_update rsrc_update;
Jens Axboe4840e412019-12-25 22:03:45 -0700760 struct io_fadvise fadvise;
Jens Axboec1ca7572019-12-25 22:18:28 -0700761 struct io_madvise madvise;
Jens Axboe3e4827b2020-01-08 15:18:09 -0700762 struct io_epoll epoll;
Pavel Begunkov7d67af22020-02-24 11:32:45 +0300763 struct io_splice splice;
Jens Axboeddf0322d2020-02-23 16:41:33 -0700764 struct io_provide_buf pbuf;
Bijan Mottahedeh1d9e1282020-05-22 21:31:16 -0700765 struct io_statx statx;
Jens Axboe36f4fa62020-09-05 11:14:22 -0600766 struct io_shutdown shutdown;
Jens Axboe80a261f2020-09-28 14:23:58 -0600767 struct io_rename rename;
Jens Axboe14a11432020-09-28 14:27:37 -0600768 struct io_unlink unlink;
Pavel Begunkov3ca405e2020-07-13 23:37:08 +0300769 /* use only after cleaning per-op data, see io_clean_op() */
770 struct io_completion compl;
Jens Axboe221c5eb2019-01-17 09:41:58 -0700771 };
Jens Axboe2b188cc2019-01-07 10:46:33 -0700772
Jens Axboee8c2bc12020-08-15 18:44:09 -0700773 /* opcode allocated if it needs to store data for async defer */
774 void *async_data;
Jens Axboed625c6e2019-12-17 19:53:05 -0700775 u8 opcode;
Xiaoguang Wang65a65432020-06-11 23:39:36 +0800776 /* polled IO has completed */
777 u8 iopoll_completed;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700778
Bijan Mottahedeh4f4eeba2020-05-19 14:52:49 -0700779 u16 buf_index;
Pavel Begunkov9cf7c102020-07-13 23:37:15 +0300780 u32 result;
Bijan Mottahedeh4f4eeba2020-05-19 14:52:49 -0700781
Pavel Begunkov010e8e62020-07-30 18:43:45 +0300782 struct io_ring_ctx *ctx;
783 unsigned int flags;
784 refcount_t refs;
785 struct task_struct *task;
786 u64 user_data;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700787
Pavel Begunkovf2f87372020-10-27 23:25:37 +0000788 struct io_kiocb *link;
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +0000789 struct percpu_ref *fixed_rsrc_refs;
Jens Axboed7718a92020-02-14 22:23:12 -0700790
Pavel Begunkovd21ffe72020-07-13 23:37:10 +0300791 /*
792 * 1. used with ctx->iopoll_list with reads/writes
793 * 2. to track reqs with ->files (see io_op_def::file_table)
794 */
Pavel Begunkov010e8e62020-07-30 18:43:45 +0300795 struct list_head inflight_entry;
Jens Axboe7cbf1722021-02-10 00:03:20 +0000796 union {
797 struct io_task_work io_task_work;
798 struct callback_head task_work;
799 };
Pavel Begunkov010e8e62020-07-30 18:43:45 +0300800 /* for polled requests, i.e. IORING_OP_POLL_ADD and async armed poll */
801 struct hlist_node hash_node;
802 struct async_poll *apoll;
803 struct io_wq_work work;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700804};
805
Pavel Begunkov13bf43f2021-03-06 11:02:12 +0000806struct io_tctx_node {
807 struct list_head ctx_node;
808 struct task_struct *task;
Pavel Begunkov13bf43f2021-03-06 11:02:12 +0000809 struct io_ring_ctx *ctx;
810};
811
Pavel Begunkov27dc8332020-07-13 23:37:14 +0300812struct io_defer_entry {
813 struct list_head list;
814 struct io_kiocb *req;
Pavel Begunkov9cf7c102020-07-13 23:37:15 +0300815 u32 seq;
Pavel Begunkov27dc8332020-07-13 23:37:14 +0300816};
817
Jens Axboed3656342019-12-18 09:50:26 -0700818struct io_op_def {
Jens Axboed3656342019-12-18 09:50:26 -0700819 /* needs req->file assigned */
820 unsigned needs_file : 1;
Jens Axboed3656342019-12-18 09:50:26 -0700821 /* hash wq insertion if file is a regular file */
822 unsigned hash_reg_file : 1;
823 /* unbound wq insertion if file is a non-regular file */
824 unsigned unbound_nonreg_file : 1;
Jens Axboe66f4af92020-01-16 15:36:52 -0700825 /* opcode is not supported by this kernel */
826 unsigned not_supported : 1;
Jens Axboe8a727582020-02-20 09:59:44 -0700827 /* set if opcode supports polled "wait" */
828 unsigned pollin : 1;
829 unsigned pollout : 1;
Jens Axboebcda7ba2020-02-23 16:42:51 -0700830 /* op supports buffer selection */
831 unsigned buffer_select : 1;
Jens Axboee8c2bc12020-08-15 18:44:09 -0700832 /* must always have async data allocated */
833 unsigned needs_async_data : 1;
Jens Axboe27926b62020-10-28 09:33:23 -0600834 /* should block plug */
835 unsigned plug : 1;
Jens Axboee8c2bc12020-08-15 18:44:09 -0700836 /* size of async data needed, if any */
837 unsigned short async_size;
Jens Axboed3656342019-12-18 09:50:26 -0700838};
839
Jens Axboe09186822020-10-13 15:01:40 -0600840static const struct io_op_def io_op_defs[] = {
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300841 [IORING_OP_NOP] = {},
842 [IORING_OP_READV] = {
Jens Axboed3656342019-12-18 09:50:26 -0700843 .needs_file = 1,
844 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700845 .pollin = 1,
Jens Axboe4d954c22020-02-27 07:31:19 -0700846 .buffer_select = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700847 .needs_async_data = 1,
Jens Axboe27926b62020-10-28 09:33:23 -0600848 .plug = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700849 .async_size = sizeof(struct io_async_rw),
Jens Axboed3656342019-12-18 09:50:26 -0700850 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300851 [IORING_OP_WRITEV] = {
Jens Axboed3656342019-12-18 09:50:26 -0700852 .needs_file = 1,
853 .hash_reg_file = 1,
854 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700855 .pollout = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700856 .needs_async_data = 1,
Jens Axboe27926b62020-10-28 09:33:23 -0600857 .plug = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700858 .async_size = sizeof(struct io_async_rw),
Jens Axboed3656342019-12-18 09:50:26 -0700859 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300860 [IORING_OP_FSYNC] = {
Jens Axboed3656342019-12-18 09:50:26 -0700861 .needs_file = 1,
862 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300863 [IORING_OP_READ_FIXED] = {
Jens Axboed3656342019-12-18 09:50:26 -0700864 .needs_file = 1,
865 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700866 .pollin = 1,
Jens Axboe27926b62020-10-28 09:33:23 -0600867 .plug = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700868 .async_size = sizeof(struct io_async_rw),
Jens Axboed3656342019-12-18 09:50:26 -0700869 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300870 [IORING_OP_WRITE_FIXED] = {
Jens Axboed3656342019-12-18 09:50:26 -0700871 .needs_file = 1,
872 .hash_reg_file = 1,
873 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700874 .pollout = 1,
Jens Axboe27926b62020-10-28 09:33:23 -0600875 .plug = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700876 .async_size = sizeof(struct io_async_rw),
Jens Axboed3656342019-12-18 09:50:26 -0700877 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300878 [IORING_OP_POLL_ADD] = {
Jens Axboed3656342019-12-18 09:50:26 -0700879 .needs_file = 1,
880 .unbound_nonreg_file = 1,
881 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300882 [IORING_OP_POLL_REMOVE] = {},
883 [IORING_OP_SYNC_FILE_RANGE] = {
Jens Axboed3656342019-12-18 09:50:26 -0700884 .needs_file = 1,
885 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300886 [IORING_OP_SENDMSG] = {
Jens Axboed3656342019-12-18 09:50:26 -0700887 .needs_file = 1,
888 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700889 .pollout = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700890 .needs_async_data = 1,
891 .async_size = sizeof(struct io_async_msghdr),
Jens Axboed3656342019-12-18 09:50:26 -0700892 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300893 [IORING_OP_RECVMSG] = {
Jens Axboed3656342019-12-18 09:50:26 -0700894 .needs_file = 1,
895 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700896 .pollin = 1,
Jens Axboe52de1fe2020-02-27 10:15:42 -0700897 .buffer_select = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700898 .needs_async_data = 1,
899 .async_size = sizeof(struct io_async_msghdr),
Jens Axboed3656342019-12-18 09:50:26 -0700900 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300901 [IORING_OP_TIMEOUT] = {
Jens Axboee8c2bc12020-08-15 18:44:09 -0700902 .needs_async_data = 1,
903 .async_size = sizeof(struct io_timeout_data),
Jens Axboed3656342019-12-18 09:50:26 -0700904 },
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +0000905 [IORING_OP_TIMEOUT_REMOVE] = {
906 /* used by timeout updates' prep() */
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +0000907 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300908 [IORING_OP_ACCEPT] = {
Jens Axboed3656342019-12-18 09:50:26 -0700909 .needs_file = 1,
910 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700911 .pollin = 1,
Jens Axboed3656342019-12-18 09:50:26 -0700912 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300913 [IORING_OP_ASYNC_CANCEL] = {},
914 [IORING_OP_LINK_TIMEOUT] = {
Jens Axboee8c2bc12020-08-15 18:44:09 -0700915 .needs_async_data = 1,
916 .async_size = sizeof(struct io_timeout_data),
Jens Axboed3656342019-12-18 09:50:26 -0700917 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300918 [IORING_OP_CONNECT] = {
Jens Axboed3656342019-12-18 09:50:26 -0700919 .needs_file = 1,
920 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700921 .pollout = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700922 .needs_async_data = 1,
923 .async_size = sizeof(struct io_async_connect),
Jens Axboed3656342019-12-18 09:50:26 -0700924 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300925 [IORING_OP_FALLOCATE] = {
Jens Axboed3656342019-12-18 09:50:26 -0700926 .needs_file = 1,
927 },
Jens Axboe44526be2021-02-15 13:32:18 -0700928 [IORING_OP_OPENAT] = {},
929 [IORING_OP_CLOSE] = {},
930 [IORING_OP_FILES_UPDATE] = {},
931 [IORING_OP_STATX] = {},
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300932 [IORING_OP_READ] = {
Jens Axboe3a6820f2019-12-22 15:19:35 -0700933 .needs_file = 1,
934 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700935 .pollin = 1,
Jens Axboebcda7ba2020-02-23 16:42:51 -0700936 .buffer_select = 1,
Jens Axboe27926b62020-10-28 09:33:23 -0600937 .plug = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700938 .async_size = sizeof(struct io_async_rw),
Jens Axboe3a6820f2019-12-22 15:19:35 -0700939 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300940 [IORING_OP_WRITE] = {
Jens Axboe3a6820f2019-12-22 15:19:35 -0700941 .needs_file = 1,
942 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700943 .pollout = 1,
Jens Axboe27926b62020-10-28 09:33:23 -0600944 .plug = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700945 .async_size = sizeof(struct io_async_rw),
Jens Axboe3a6820f2019-12-22 15:19:35 -0700946 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300947 [IORING_OP_FADVISE] = {
Jens Axboe4840e412019-12-25 22:03:45 -0700948 .needs_file = 1,
949 },
Jens Axboe44526be2021-02-15 13:32:18 -0700950 [IORING_OP_MADVISE] = {},
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300951 [IORING_OP_SEND] = {
Jens Axboefddafac2020-01-04 20:19:44 -0700952 .needs_file = 1,
953 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700954 .pollout = 1,
Jens Axboefddafac2020-01-04 20:19:44 -0700955 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300956 [IORING_OP_RECV] = {
Jens Axboefddafac2020-01-04 20:19:44 -0700957 .needs_file = 1,
958 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700959 .pollin = 1,
Jens Axboebcda7ba2020-02-23 16:42:51 -0700960 .buffer_select = 1,
Jens Axboefddafac2020-01-04 20:19:44 -0700961 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300962 [IORING_OP_OPENAT2] = {
Jens Axboecebdb982020-01-08 17:59:24 -0700963 },
Jens Axboe3e4827b2020-01-08 15:18:09 -0700964 [IORING_OP_EPOLL_CTL] = {
965 .unbound_nonreg_file = 1,
Jens Axboe3e4827b2020-01-08 15:18:09 -0700966 },
Pavel Begunkov7d67af22020-02-24 11:32:45 +0300967 [IORING_OP_SPLICE] = {
968 .needs_file = 1,
969 .hash_reg_file = 1,
970 .unbound_nonreg_file = 1,
Jens Axboeddf0322d2020-02-23 16:41:33 -0700971 },
972 [IORING_OP_PROVIDE_BUFFERS] = {},
Jens Axboe067524e2020-03-02 16:32:28 -0700973 [IORING_OP_REMOVE_BUFFERS] = {},
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +0300974 [IORING_OP_TEE] = {
975 .needs_file = 1,
976 .hash_reg_file = 1,
977 .unbound_nonreg_file = 1,
978 },
Jens Axboe36f4fa62020-09-05 11:14:22 -0600979 [IORING_OP_SHUTDOWN] = {
980 .needs_file = 1,
981 },
Jens Axboe44526be2021-02-15 13:32:18 -0700982 [IORING_OP_RENAMEAT] = {},
983 [IORING_OP_UNLINKAT] = {},
Jens Axboed3656342019-12-18 09:50:26 -0700984};
985
Pavel Begunkov7a612352021-03-09 00:37:59 +0000986static bool io_disarm_next(struct io_kiocb *req);
Pavel Begunkovd56d9382021-03-06 11:02:13 +0000987static void io_uring_del_task_file(unsigned long index);
Pavel Begunkov9936c7c2021-02-04 13:51:56 +0000988static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
989 struct task_struct *task,
990 struct files_struct *files);
Jens Axboe37d1e2e2021-02-17 21:03:43 -0700991static void io_uring_cancel_sqpoll(struct io_ring_ctx *ctx);
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +0000992static void destroy_fixed_rsrc_ref_node(struct fixed_rsrc_ref_node *ref_node);
Pavel Begunkovbc9744c2021-01-15 17:37:49 +0000993static struct fixed_rsrc_ref_node *alloc_fixed_rsrc_ref_node(
Pavel Begunkov1ffc5422020-12-30 21:34:15 +0000994 struct io_ring_ctx *ctx);
Pavel Begunkovf2303b12021-02-20 18:03:49 +0000995static void io_ring_file_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc);
Pavel Begunkov1ffc5422020-12-30 21:34:15 +0000996
Pavel Begunkov23faba32021-02-11 18:28:22 +0000997static bool io_rw_reissue(struct io_kiocb *req);
Jens Axboe78e19bb2019-11-06 15:21:34 -0700998static void io_cqring_fill_event(struct io_kiocb *req, long res);
Jackie Liuec9c02a2019-11-08 23:50:36 +0800999static void io_put_req(struct io_kiocb *req);
Pavel Begunkov216578e2020-10-13 09:44:00 +01001000static void io_put_req_deferred(struct io_kiocb *req, int nr);
Jens Axboec40f6372020-06-25 15:39:59 -06001001static void io_double_put_req(struct io_kiocb *req);
Jens Axboec7dae4b2021-02-09 19:53:37 -07001002static void io_dismantle_req(struct io_kiocb *req);
1003static void io_put_task(struct task_struct *task, int nr);
1004static void io_queue_next(struct io_kiocb *req);
Jens Axboe94ae5e72019-11-14 19:39:52 -07001005static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req);
Jens Axboe7271ef32020-08-10 09:55:22 -06001006static void __io_queue_linked_timeout(struct io_kiocb *req);
Jens Axboe94ae5e72019-11-14 19:39:52 -07001007static void io_queue_linked_timeout(struct io_kiocb *req);
Jens Axboe05f3fb32019-12-09 11:22:50 -07001008static int __io_sqe_files_update(struct io_ring_ctx *ctx,
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00001009 struct io_uring_rsrc_update *ip,
Jens Axboe05f3fb32019-12-09 11:22:50 -07001010 unsigned nr_args);
Pavel Begunkov3ca405e2020-07-13 23:37:08 +03001011static void __io_clean_op(struct io_kiocb *req);
Pavel Begunkov8371adf2020-10-10 18:34:08 +01001012static struct file *io_file_get(struct io_submit_state *state,
1013 struct io_kiocb *req, int fd, bool fixed);
Pavel Begunkovc5eef2b2021-02-10 00:03:22 +00001014static void __io_queue_sqe(struct io_kiocb *req);
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00001015static void io_rsrc_put_work(struct work_struct *work);
Jens Axboede0617e2019-04-06 21:51:27 -06001016
Pavel Begunkov847595d2021-02-04 13:52:06 +00001017static int io_import_iovec(int rw, struct io_kiocb *req, struct iovec **iovec,
1018 struct iov_iter *iter, bool needs_lock);
Jens Axboeff6165b2020-08-13 09:47:43 -06001019static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec,
1020 const struct iovec *fast_iov,
Jens Axboe227c0c92020-08-13 11:51:40 -06001021 struct iov_iter *iter, bool force);
Pavel Begunkov907d1df2021-01-26 23:35:10 +00001022static void io_req_task_queue(struct io_kiocb *req);
Jens Axboe65453d12021-02-10 00:03:21 +00001023static void io_submit_flush_completions(struct io_comp_state *cs,
1024 struct io_ring_ctx *ctx);
Jens Axboe9a56a232019-01-09 09:06:50 -07001025
Jens Axboe2b188cc2019-01-07 10:46:33 -07001026static struct kmem_cache *req_cachep;
1027
Jens Axboe09186822020-10-13 15:01:40 -06001028static const struct file_operations io_uring_fops;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001029
1030struct sock *io_uring_get_socket(struct file *file)
1031{
1032#if defined(CONFIG_UNIX)
1033 if (file->f_op == &io_uring_fops) {
1034 struct io_ring_ctx *ctx = file->private_data;
1035
1036 return ctx->ring_sock->sk;
1037 }
1038#endif
1039 return NULL;
1040}
1041EXPORT_SYMBOL(io_uring_get_socket);
1042
Pavel Begunkovf2f87372020-10-27 23:25:37 +00001043#define io_for_each_link(pos, head) \
1044 for (pos = (head); pos; pos = pos->link)
1045
Pavel Begunkov3ca405e2020-07-13 23:37:08 +03001046static inline void io_clean_op(struct io_kiocb *req)
1047{
Pavel Begunkov9d5c8192021-01-24 15:08:14 +00001048 if (req->flags & (REQ_F_NEED_CLEANUP | REQ_F_BUFFER_SELECTED))
Pavel Begunkov3ca405e2020-07-13 23:37:08 +03001049 __io_clean_op(req);
1050}
1051
Pavel Begunkov36f72fe2020-11-18 19:57:26 +00001052static inline void io_set_resource_node(struct io_kiocb *req)
Jens Axboec40f6372020-06-25 15:39:59 -06001053{
Pavel Begunkov36f72fe2020-11-18 19:57:26 +00001054 struct io_ring_ctx *ctx = req->ctx;
1055
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00001056 if (!req->fixed_rsrc_refs) {
1057 req->fixed_rsrc_refs = &ctx->file_data->node->refs;
1058 percpu_ref_get(req->fixed_rsrc_refs);
Pavel Begunkov36f72fe2020-11-18 19:57:26 +00001059 }
1060}
1061
Pavel Begunkov08d23632020-11-06 13:00:22 +00001062static bool io_match_task(struct io_kiocb *head,
1063 struct task_struct *task,
1064 struct files_struct *files)
1065{
1066 struct io_kiocb *req;
1067
Jens Axboe84965ff2021-01-23 15:51:11 -07001068 if (task && head->task != task) {
1069 /* in terms of cancelation, always match if req task is dead */
1070 if (head->task->flags & PF_EXITING)
1071 return true;
Pavel Begunkov08d23632020-11-06 13:00:22 +00001072 return false;
Jens Axboe84965ff2021-01-23 15:51:11 -07001073 }
Pavel Begunkov08d23632020-11-06 13:00:22 +00001074 if (!files)
1075 return true;
1076
1077 io_for_each_link(req, head) {
Pavel Begunkovb05a1bc2021-03-04 13:59:24 +00001078 if (req->flags & REQ_F_INFLIGHT)
Jens Axboe02a13672021-01-23 15:49:31 -07001079 return true;
Jens Axboe4379bf82021-02-15 13:40:22 -07001080 if (req->task->files == files)
Pavel Begunkov08d23632020-11-06 13:00:22 +00001081 return true;
1082 }
1083 return false;
1084}
1085
Jens Axboec40f6372020-06-25 15:39:59 -06001086static inline void req_set_fail_links(struct io_kiocb *req)
1087{
1088 if ((req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) == REQ_F_LINK)
1089 req->flags |= REQ_F_FAIL_LINK;
1090}
Jens Axboe4a38aed22020-05-14 17:21:15 -06001091
Jens Axboe2b188cc2019-01-07 10:46:33 -07001092static void io_ring_ctx_ref_free(struct percpu_ref *ref)
1093{
1094 struct io_ring_ctx *ctx = container_of(ref, struct io_ring_ctx, refs);
1095
Jens Axboe0f158b42020-05-14 17:18:39 -06001096 complete(&ctx->ref_comp);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001097}
1098
Pavel Begunkov8eb7e2d2020-06-29 13:13:02 +03001099static inline bool io_is_timeout_noseq(struct io_kiocb *req)
1100{
1101 return !req->timeout.off;
1102}
1103
Jens Axboe2b188cc2019-01-07 10:46:33 -07001104static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
1105{
1106 struct io_ring_ctx *ctx;
Jens Axboe78076bb2019-12-04 19:56:40 -07001107 int hash_bits;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001108
1109 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
1110 if (!ctx)
1111 return NULL;
1112
Jens Axboe78076bb2019-12-04 19:56:40 -07001113 /*
1114 * Use 5 bits less than the max cq entries, that should give us around
1115 * 32 entries per hash list if totally full and uniformly spread.
1116 */
1117 hash_bits = ilog2(p->cq_entries);
1118 hash_bits -= 5;
1119 if (hash_bits <= 0)
1120 hash_bits = 1;
1121 ctx->cancel_hash_bits = hash_bits;
1122 ctx->cancel_hash = kmalloc((1U << hash_bits) * sizeof(struct hlist_head),
1123 GFP_KERNEL);
1124 if (!ctx->cancel_hash)
1125 goto err;
1126 __hash_init(ctx->cancel_hash, 1U << hash_bits);
1127
Roman Gushchin21482892019-05-07 10:01:48 -07001128 if (percpu_ref_init(&ctx->refs, io_ring_ctx_ref_free,
Jens Axboe206aefd2019-11-07 18:27:42 -07001129 PERCPU_REF_ALLOW_REINIT, GFP_KERNEL))
1130 goto err;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001131
1132 ctx->flags = p->flags;
Jens Axboe90554202020-09-03 12:12:41 -06001133 init_waitqueue_head(&ctx->sqo_sq_wait);
Jens Axboe69fb2132020-09-14 11:16:23 -06001134 INIT_LIST_HEAD(&ctx->sqd_list);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001135 init_waitqueue_head(&ctx->cq_wait);
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001136 INIT_LIST_HEAD(&ctx->cq_overflow_list);
Jens Axboe0f158b42020-05-14 17:18:39 -06001137 init_completion(&ctx->ref_comp);
Jens Axboe9e15c3a2021-03-13 12:29:43 -07001138 xa_init_flags(&ctx->io_buffers, XA_FLAGS_ALLOC1);
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +00001139 xa_init_flags(&ctx->personalities, XA_FLAGS_ALLOC1);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001140 mutex_init(&ctx->uring_lock);
1141 init_waitqueue_head(&ctx->wait);
1142 spin_lock_init(&ctx->completion_lock);
Pavel Begunkov540e32a2020-07-13 23:37:09 +03001143 INIT_LIST_HEAD(&ctx->iopoll_list);
Jens Axboede0617e2019-04-06 21:51:27 -06001144 INIT_LIST_HEAD(&ctx->defer_list);
Jens Axboe5262f562019-09-17 12:26:57 -06001145 INIT_LIST_HEAD(&ctx->timeout_list);
Jens Axboefcb323c2019-10-24 12:39:47 -06001146 spin_lock_init(&ctx->inflight_lock);
1147 INIT_LIST_HEAD(&ctx->inflight_list);
Bijan Mottahedehd67d2262021-01-15 17:37:46 +00001148 spin_lock_init(&ctx->rsrc_ref_lock);
1149 INIT_LIST_HEAD(&ctx->rsrc_ref_list);
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00001150 INIT_DELAYED_WORK(&ctx->rsrc_put_work, io_rsrc_put_work);
1151 init_llist_head(&ctx->rsrc_put_llist);
Pavel Begunkov13bf43f2021-03-06 11:02:12 +00001152 INIT_LIST_HEAD(&ctx->tctx_list);
Jens Axboe1b4c3512021-02-10 00:03:19 +00001153 INIT_LIST_HEAD(&ctx->submit_state.comp.free_list);
Jens Axboec7dae4b2021-02-09 19:53:37 -07001154 INIT_LIST_HEAD(&ctx->submit_state.comp.locked_free_list);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001155 return ctx;
Jens Axboe206aefd2019-11-07 18:27:42 -07001156err:
Jens Axboe78076bb2019-12-04 19:56:40 -07001157 kfree(ctx->cancel_hash);
Jens Axboe206aefd2019-11-07 18:27:42 -07001158 kfree(ctx);
1159 return NULL;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001160}
1161
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03001162static bool req_need_defer(struct io_kiocb *req, u32 seq)
Jens Axboede0617e2019-04-06 21:51:27 -06001163{
Jens Axboe2bc99302020-07-09 09:43:27 -06001164 if (unlikely(req->flags & REQ_F_IO_DRAIN)) {
1165 struct io_ring_ctx *ctx = req->ctx;
Jackie Liua197f662019-11-08 08:09:12 -07001166
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03001167 return seq != ctx->cached_cq_tail
Pavel Begunkov2c3bac6d2020-10-18 10:17:40 +01001168 + READ_ONCE(ctx->cached_cq_overflow);
Jens Axboe2bc99302020-07-09 09:43:27 -06001169 }
Jens Axboe7adf4ea2019-10-10 21:42:58 -06001170
Bob Liu9d858b22019-11-13 18:06:25 +08001171 return false;
Jens Axboe7adf4ea2019-10-10 21:42:58 -06001172}
1173
Pavel Begunkovce3d5aa2021-02-01 18:59:55 +00001174static void io_req_track_inflight(struct io_kiocb *req)
1175{
1176 struct io_ring_ctx *ctx = req->ctx;
1177
1178 if (!(req->flags & REQ_F_INFLIGHT)) {
Pavel Begunkovce3d5aa2021-02-01 18:59:55 +00001179 req->flags |= REQ_F_INFLIGHT;
1180
1181 spin_lock_irq(&ctx->inflight_lock);
1182 list_add(&req->inflight_entry, &ctx->inflight_list);
1183 spin_unlock_irq(&ctx->inflight_lock);
1184 }
1185}
1186
Pavel Begunkovcbdcb432020-06-29 19:18:43 +03001187static void io_prep_async_work(struct io_kiocb *req)
Jens Axboe561fb042019-10-24 07:25:42 -06001188{
Jens Axboed3656342019-12-18 09:50:26 -07001189 const struct io_op_def *def = &io_op_defs[req->opcode];
Pavel Begunkov23329512020-10-10 18:34:06 +01001190 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe54a91f32019-09-10 09:15:04 -06001191
Jens Axboe003e8dc2021-03-06 09:22:27 -07001192 if (!req->work.creds)
1193 req->work.creds = get_current_cred();
1194
Pavel Begunkovfeaadc42020-10-22 16:47:16 +01001195 if (req->flags & REQ_F_FORCE_ASYNC)
1196 req->work.flags |= IO_WQ_WORK_CONCURRENT;
1197
Jens Axboed3656342019-12-18 09:50:26 -07001198 if (req->flags & REQ_F_ISREG) {
Pavel Begunkov23329512020-10-10 18:34:06 +01001199 if (def->hash_reg_file || (ctx->flags & IORING_SETUP_IOPOLL))
Pavel Begunkov8766dd52020-03-14 00:31:04 +03001200 io_wq_hash_work(&req->work, file_inode(req->file));
Jens Axboed3656342019-12-18 09:50:26 -07001201 } else {
1202 if (def->unbound_nonreg_file)
Jens Axboe3529d8c2019-12-19 18:24:38 -07001203 req->work.flags |= IO_WQ_WORK_UNBOUND;
Jens Axboe54a91f32019-09-10 09:15:04 -06001204 }
Jens Axboe561fb042019-10-24 07:25:42 -06001205}
1206
Pavel Begunkovcbdcb432020-06-29 19:18:43 +03001207static void io_prep_async_link(struct io_kiocb *req)
1208{
1209 struct io_kiocb *cur;
1210
Pavel Begunkovf2f87372020-10-27 23:25:37 +00001211 io_for_each_link(cur, req)
1212 io_prep_async_work(cur);
Pavel Begunkovcbdcb432020-06-29 19:18:43 +03001213}
1214
Pavel Begunkovebf93662021-03-01 18:20:47 +00001215static void io_queue_async_work(struct io_kiocb *req)
Jens Axboe561fb042019-10-24 07:25:42 -06001216{
Jackie Liua197f662019-11-08 08:09:12 -07001217 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkovcbdcb432020-06-29 19:18:43 +03001218 struct io_kiocb *link = io_prep_linked_timeout(req);
Jens Axboe5aa75ed2021-02-16 12:56:50 -07001219 struct io_uring_task *tctx = req->task->io_uring;
Jens Axboe561fb042019-10-24 07:25:42 -06001220
Jens Axboe3bfe6102021-02-16 14:15:30 -07001221 BUG_ON(!tctx);
1222 BUG_ON(!tctx->io_wq);
Jens Axboe561fb042019-10-24 07:25:42 -06001223
Pavel Begunkov8766dd52020-03-14 00:31:04 +03001224 trace_io_uring_queue_async_work(ctx, io_wq_is_hashed(&req->work), req,
1225 &req->work, req->flags);
Pavel Begunkovcbdcb432020-06-29 19:18:43 +03001226 /* init ->work of the whole link before punting */
1227 io_prep_async_link(req);
Pavel Begunkovebf93662021-03-01 18:20:47 +00001228 io_wq_enqueue(tctx->io_wq, &req->work);
Jens Axboe7271ef32020-08-10 09:55:22 -06001229 if (link)
1230 io_queue_linked_timeout(link);
Pavel Begunkovcbdcb432020-06-29 19:18:43 +03001231}
1232
Jens Axboe5262f562019-09-17 12:26:57 -06001233static void io_kill_timeout(struct io_kiocb *req)
1234{
Jens Axboee8c2bc12020-08-15 18:44:09 -07001235 struct io_timeout_data *io = req->async_data;
Jens Axboe5262f562019-09-17 12:26:57 -06001236 int ret;
1237
Jens Axboee8c2bc12020-08-15 18:44:09 -07001238 ret = hrtimer_try_to_cancel(&io->timer);
Jens Axboe5262f562019-09-17 12:26:57 -06001239 if (ret != -1) {
Pavel Begunkov01cec8c2020-07-30 18:43:50 +03001240 atomic_set(&req->ctx->cq_timeouts,
1241 atomic_read(&req->ctx->cq_timeouts) + 1);
Pavel Begunkov135fcde2020-07-13 23:37:12 +03001242 list_del_init(&req->timeout.list);
Jens Axboe78e19bb2019-11-06 15:21:34 -07001243 io_cqring_fill_event(req, 0);
Pavel Begunkov216578e2020-10-13 09:44:00 +01001244 io_put_req_deferred(req, 1);
Jens Axboe5262f562019-09-17 12:26:57 -06001245 }
1246}
1247
Jens Axboe76e1b642020-09-26 15:05:03 -06001248/*
1249 * Returns true if we found and killed one or more timeouts
1250 */
Pavel Begunkov6b819282020-11-06 13:00:25 +00001251static bool io_kill_timeouts(struct io_ring_ctx *ctx, struct task_struct *tsk,
1252 struct files_struct *files)
Jens Axboe5262f562019-09-17 12:26:57 -06001253{
1254 struct io_kiocb *req, *tmp;
Jens Axboe76e1b642020-09-26 15:05:03 -06001255 int canceled = 0;
Jens Axboe5262f562019-09-17 12:26:57 -06001256
1257 spin_lock_irq(&ctx->completion_lock);
Jens Axboef3606e32020-09-22 08:18:24 -06001258 list_for_each_entry_safe(req, tmp, &ctx->timeout_list, timeout.list) {
Pavel Begunkov6b819282020-11-06 13:00:25 +00001259 if (io_match_task(req, tsk, files)) {
Jens Axboef3606e32020-09-22 08:18:24 -06001260 io_kill_timeout(req);
Jens Axboe76e1b642020-09-26 15:05:03 -06001261 canceled++;
1262 }
Jens Axboef3606e32020-09-22 08:18:24 -06001263 }
Jens Axboe5262f562019-09-17 12:26:57 -06001264 spin_unlock_irq(&ctx->completion_lock);
Jens Axboe76e1b642020-09-26 15:05:03 -06001265 return canceled != 0;
Jens Axboe5262f562019-09-17 12:26:57 -06001266}
1267
Pavel Begunkov04518942020-05-26 20:34:05 +03001268static void __io_queue_deferred(struct io_ring_ctx *ctx)
1269{
1270 do {
Pavel Begunkov27dc8332020-07-13 23:37:14 +03001271 struct io_defer_entry *de = list_first_entry(&ctx->defer_list,
1272 struct io_defer_entry, list);
Pavel Begunkov04518942020-05-26 20:34:05 +03001273
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03001274 if (req_need_defer(de->req, de->seq))
Pavel Begunkov04518942020-05-26 20:34:05 +03001275 break;
Pavel Begunkov27dc8332020-07-13 23:37:14 +03001276 list_del_init(&de->list);
Pavel Begunkov907d1df2021-01-26 23:35:10 +00001277 io_req_task_queue(de->req);
Pavel Begunkov27dc8332020-07-13 23:37:14 +03001278 kfree(de);
Pavel Begunkov04518942020-05-26 20:34:05 +03001279 } while (!list_empty(&ctx->defer_list));
1280}
1281
Pavel Begunkov360428f2020-05-30 14:54:17 +03001282static void io_flush_timeouts(struct io_ring_ctx *ctx)
1283{
Marcelo Diop-Gonzalezf0105052021-01-15 11:54:40 -05001284 u32 seq;
1285
1286 if (list_empty(&ctx->timeout_list))
1287 return;
1288
1289 seq = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
1290
1291 do {
1292 u32 events_needed, events_got;
Pavel Begunkov360428f2020-05-30 14:54:17 +03001293 struct io_kiocb *req = list_first_entry(&ctx->timeout_list,
Pavel Begunkov135fcde2020-07-13 23:37:12 +03001294 struct io_kiocb, timeout.list);
Pavel Begunkov360428f2020-05-30 14:54:17 +03001295
Pavel Begunkov8eb7e2d2020-06-29 13:13:02 +03001296 if (io_is_timeout_noseq(req))
Pavel Begunkov360428f2020-05-30 14:54:17 +03001297 break;
Marcelo Diop-Gonzalezf0105052021-01-15 11:54:40 -05001298
1299 /*
1300 * Since seq can easily wrap around over time, subtract
1301 * the last seq at which timeouts were flushed before comparing.
1302 * Assuming not more than 2^31-1 events have happened since,
1303 * these subtractions won't have wrapped, so we can check if
1304 * target is in [last_seq, current_seq] by comparing the two.
1305 */
1306 events_needed = req->timeout.target_seq - ctx->cq_last_tm_flush;
1307 events_got = seq - ctx->cq_last_tm_flush;
1308 if (events_got < events_needed)
Pavel Begunkov360428f2020-05-30 14:54:17 +03001309 break;
Pavel Begunkovbfe68a22020-05-30 14:54:18 +03001310
Pavel Begunkov135fcde2020-07-13 23:37:12 +03001311 list_del_init(&req->timeout.list);
Pavel Begunkov360428f2020-05-30 14:54:17 +03001312 io_kill_timeout(req);
Marcelo Diop-Gonzalezf0105052021-01-15 11:54:40 -05001313 } while (!list_empty(&ctx->timeout_list));
1314
1315 ctx->cq_last_tm_flush = seq;
Pavel Begunkov360428f2020-05-30 14:54:17 +03001316}
1317
Jens Axboede0617e2019-04-06 21:51:27 -06001318static void io_commit_cqring(struct io_ring_ctx *ctx)
1319{
Pavel Begunkov360428f2020-05-30 14:54:17 +03001320 io_flush_timeouts(ctx);
Pavel Begunkovec30e042021-01-19 13:32:38 +00001321
1322 /* order cqe stores with ring update */
1323 smp_store_release(&ctx->rings->cq.tail, ctx->cached_cq_tail);
Jens Axboede0617e2019-04-06 21:51:27 -06001324
Pavel Begunkov04518942020-05-26 20:34:05 +03001325 if (unlikely(!list_empty(&ctx->defer_list)))
1326 __io_queue_deferred(ctx);
Jens Axboede0617e2019-04-06 21:51:27 -06001327}
1328
Jens Axboe90554202020-09-03 12:12:41 -06001329static inline bool io_sqring_full(struct io_ring_ctx *ctx)
1330{
1331 struct io_rings *r = ctx->rings;
1332
1333 return READ_ONCE(r->sq.tail) - ctx->cached_sq_head == r->sq_ring_entries;
1334}
1335
Pavel Begunkov888aae22021-01-19 13:32:39 +00001336static inline unsigned int __io_cqring_events(struct io_ring_ctx *ctx)
1337{
1338 return ctx->cached_cq_tail - READ_ONCE(ctx->rings->cq.head);
1339}
1340
Jens Axboe2b188cc2019-01-07 10:46:33 -07001341static struct io_uring_cqe *io_get_cqring(struct io_ring_ctx *ctx)
1342{
Hristo Venev75b28af2019-08-26 17:23:46 +00001343 struct io_rings *rings = ctx->rings;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001344 unsigned tail;
1345
Stefan Bühler115e12e2019-04-24 23:54:18 +02001346 /*
1347 * writes to the cq entry need to come after reading head; the
1348 * control dependency is enough as we're using WRITE_ONCE to
1349 * fill the cq entry
1350 */
Pavel Begunkov888aae22021-01-19 13:32:39 +00001351 if (__io_cqring_events(ctx) == rings->cq_ring_entries)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001352 return NULL;
1353
Pavel Begunkov888aae22021-01-19 13:32:39 +00001354 tail = ctx->cached_cq_tail++;
Hristo Venev75b28af2019-08-26 17:23:46 +00001355 return &rings->cqes[tail & ctx->cq_mask];
Jens Axboe2b188cc2019-01-07 10:46:33 -07001356}
1357
Jens Axboef2842ab2020-01-08 11:04:00 -07001358static inline bool io_should_trigger_evfd(struct io_ring_ctx *ctx)
1359{
Jens Axboef0b493e2020-02-01 21:30:11 -07001360 if (!ctx->cq_ev_fd)
1361 return false;
Stefano Garzarella7e55a192020-05-15 18:38:05 +02001362 if (READ_ONCE(ctx->rings->cq_flags) & IORING_CQ_EVENTFD_DISABLED)
1363 return false;
Jens Axboef2842ab2020-01-08 11:04:00 -07001364 if (!ctx->eventfd_async)
1365 return true;
Jens Axboeb41e9852020-02-17 09:52:41 -07001366 return io_wq_current_is_worker();
Jens Axboef2842ab2020-01-08 11:04:00 -07001367}
1368
Jens Axboeb41e9852020-02-17 09:52:41 -07001369static void io_cqring_ev_posted(struct io_ring_ctx *ctx)
Jens Axboe8c838782019-03-12 15:48:16 -06001370{
Pavel Begunkovb1445e52021-01-07 03:15:43 +00001371 /* see waitqueue_active() comment */
1372 smp_mb();
1373
Jens Axboe8c838782019-03-12 15:48:16 -06001374 if (waitqueue_active(&ctx->wait))
1375 wake_up(&ctx->wait);
Jens Axboe534ca6d2020-09-02 13:52:19 -06001376 if (ctx->sq_data && waitqueue_active(&ctx->sq_data->wait))
1377 wake_up(&ctx->sq_data->wait);
Jens Axboeb41e9852020-02-17 09:52:41 -07001378 if (io_should_trigger_evfd(ctx))
Jens Axboe9b402842019-04-11 11:45:41 -06001379 eventfd_signal(ctx->cq_ev_fd, 1);
Pavel Begunkovb1445e52021-01-07 03:15:43 +00001380 if (waitqueue_active(&ctx->cq_wait)) {
Pavel Begunkov4aa84f22021-01-07 03:15:42 +00001381 wake_up_interruptible(&ctx->cq_wait);
1382 kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN);
1383 }
Jens Axboe8c838782019-03-12 15:48:16 -06001384}
1385
Pavel Begunkov80c18e42021-01-07 03:15:41 +00001386static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx)
1387{
Pavel Begunkovb1445e52021-01-07 03:15:43 +00001388 /* see waitqueue_active() comment */
1389 smp_mb();
1390
Pavel Begunkov80c18e42021-01-07 03:15:41 +00001391 if (ctx->flags & IORING_SETUP_SQPOLL) {
1392 if (waitqueue_active(&ctx->wait))
1393 wake_up(&ctx->wait);
1394 }
1395 if (io_should_trigger_evfd(ctx))
1396 eventfd_signal(ctx->cq_ev_fd, 1);
Pavel Begunkovb1445e52021-01-07 03:15:43 +00001397 if (waitqueue_active(&ctx->cq_wait)) {
Pavel Begunkov4aa84f22021-01-07 03:15:42 +00001398 wake_up_interruptible(&ctx->cq_wait);
1399 kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN);
1400 }
Pavel Begunkov80c18e42021-01-07 03:15:41 +00001401}
1402
Jens Axboec4a2ed72019-11-21 21:01:26 -07001403/* Returns true if there are no backlogged entries after the flush */
Pavel Begunkov6c503152021-01-04 20:36:36 +00001404static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force,
1405 struct task_struct *tsk,
1406 struct files_struct *files)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001407{
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001408 struct io_rings *rings = ctx->rings;
Jens Axboee6c8aa92020-09-28 13:10:13 -06001409 struct io_kiocb *req, *tmp;
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001410 struct io_uring_cqe *cqe;
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001411 unsigned long flags;
Jens Axboeb18032b2021-01-24 16:58:56 -07001412 bool all_flushed, posted;
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001413 LIST_HEAD(list);
1414
Pavel Begunkove23de152020-12-17 00:24:37 +00001415 if (!force && __io_cqring_events(ctx) == rings->cq_ring_entries)
1416 return false;
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001417
Jens Axboeb18032b2021-01-24 16:58:56 -07001418 posted = false;
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001419 spin_lock_irqsave(&ctx->completion_lock, flags);
Jens Axboee6c8aa92020-09-28 13:10:13 -06001420 list_for_each_entry_safe(req, tmp, &ctx->cq_overflow_list, compl.list) {
Pavel Begunkov08d23632020-11-06 13:00:22 +00001421 if (!io_match_task(req, tsk, files))
Jens Axboee6c8aa92020-09-28 13:10:13 -06001422 continue;
1423
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001424 cqe = io_get_cqring(ctx);
1425 if (!cqe && !force)
1426 break;
1427
Pavel Begunkov40d8ddd2020-07-13 23:37:11 +03001428 list_move(&req->compl.list, &list);
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001429 if (cqe) {
1430 WRITE_ONCE(cqe->user_data, req->user_data);
1431 WRITE_ONCE(cqe->res, req->result);
Pavel Begunkov0f7e4662020-07-13 23:37:16 +03001432 WRITE_ONCE(cqe->flags, req->compl.cflags);
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001433 } else {
Pavel Begunkov2c3bac6d2020-10-18 10:17:40 +01001434 ctx->cached_cq_overflow++;
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001435 WRITE_ONCE(ctx->rings->cq_overflow,
Pavel Begunkov2c3bac6d2020-10-18 10:17:40 +01001436 ctx->cached_cq_overflow);
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001437 }
Jens Axboeb18032b2021-01-24 16:58:56 -07001438 posted = true;
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001439 }
1440
Pavel Begunkov09e88402020-12-17 00:24:38 +00001441 all_flushed = list_empty(&ctx->cq_overflow_list);
1442 if (all_flushed) {
1443 clear_bit(0, &ctx->sq_check_overflow);
1444 clear_bit(0, &ctx->cq_check_overflow);
1445 ctx->rings->sq_flags &= ~IORING_SQ_CQ_OVERFLOW;
1446 }
Pavel Begunkov46930142020-07-30 18:43:49 +03001447
Jens Axboeb18032b2021-01-24 16:58:56 -07001448 if (posted)
1449 io_commit_cqring(ctx);
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001450 spin_unlock_irqrestore(&ctx->completion_lock, flags);
Jens Axboeb18032b2021-01-24 16:58:56 -07001451 if (posted)
1452 io_cqring_ev_posted(ctx);
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001453
1454 while (!list_empty(&list)) {
Pavel Begunkov40d8ddd2020-07-13 23:37:11 +03001455 req = list_first_entry(&list, struct io_kiocb, compl.list);
1456 list_del(&req->compl.list);
Jackie Liuec9c02a2019-11-08 23:50:36 +08001457 io_put_req(req);
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001458 }
Jens Axboec4a2ed72019-11-21 21:01:26 -07001459
Pavel Begunkov09e88402020-12-17 00:24:38 +00001460 return all_flushed;
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001461}
1462
Jens Axboeca0a2652021-03-04 17:15:48 -07001463static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force,
Pavel Begunkov6c503152021-01-04 20:36:36 +00001464 struct task_struct *tsk,
1465 struct files_struct *files)
1466{
Jens Axboeca0a2652021-03-04 17:15:48 -07001467 bool ret = true;
1468
Pavel Begunkov6c503152021-01-04 20:36:36 +00001469 if (test_bit(0, &ctx->cq_check_overflow)) {
1470 /* iopoll syncs against uring_lock, not completion_lock */
1471 if (ctx->flags & IORING_SETUP_IOPOLL)
1472 mutex_lock(&ctx->uring_lock);
Jens Axboeca0a2652021-03-04 17:15:48 -07001473 ret = __io_cqring_overflow_flush(ctx, force, tsk, files);
Pavel Begunkov6c503152021-01-04 20:36:36 +00001474 if (ctx->flags & IORING_SETUP_IOPOLL)
1475 mutex_unlock(&ctx->uring_lock);
1476 }
Jens Axboeca0a2652021-03-04 17:15:48 -07001477
1478 return ret;
Pavel Begunkov6c503152021-01-04 20:36:36 +00001479}
1480
Jens Axboebcda7ba2020-02-23 16:42:51 -07001481static void __io_cqring_fill_event(struct io_kiocb *req, long res, long cflags)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001482{
Jens Axboe78e19bb2019-11-06 15:21:34 -07001483 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001484 struct io_uring_cqe *cqe;
1485
Jens Axboe78e19bb2019-11-06 15:21:34 -07001486 trace_io_uring_complete(ctx, req->user_data, res);
Jens Axboe51c3ff62019-11-03 06:52:50 -07001487
Jens Axboe2b188cc2019-01-07 10:46:33 -07001488 /*
1489 * If we can't get a cq entry, userspace overflowed the
1490 * submission (by quite a lot). Increment the overflow count in
1491 * the ring.
1492 */
1493 cqe = io_get_cqring(ctx);
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001494 if (likely(cqe)) {
Jens Axboe78e19bb2019-11-06 15:21:34 -07001495 WRITE_ONCE(cqe->user_data, req->user_data);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001496 WRITE_ONCE(cqe->res, res);
Jens Axboebcda7ba2020-02-23 16:42:51 -07001497 WRITE_ONCE(cqe->flags, cflags);
Jens Axboefdaf0832020-10-30 09:37:30 -06001498 } else if (ctx->cq_overflow_flushed ||
1499 atomic_read(&req->task->io_uring->in_idle)) {
Jens Axboe0f212202020-09-13 13:09:39 -06001500 /*
1501 * If we're in ring overflow flush mode, or in task cancel mode,
1502 * then we cannot store the request for later flushing, we need
1503 * to drop it on the floor.
1504 */
Pavel Begunkov2c3bac6d2020-10-18 10:17:40 +01001505 ctx->cached_cq_overflow++;
1506 WRITE_ONCE(ctx->rings->cq_overflow, ctx->cached_cq_overflow);
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001507 } else {
Jens Axboead3eb2c2019-12-18 17:12:20 -07001508 if (list_empty(&ctx->cq_overflow_list)) {
1509 set_bit(0, &ctx->sq_check_overflow);
1510 set_bit(0, &ctx->cq_check_overflow);
Xiaoguang Wang6d5f9042020-07-09 09:15:29 +08001511 ctx->rings->sq_flags |= IORING_SQ_CQ_OVERFLOW;
Jens Axboead3eb2c2019-12-18 17:12:20 -07001512 }
Pavel Begunkov40d8ddd2020-07-13 23:37:11 +03001513 io_clean_op(req);
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001514 req->result = res;
Pavel Begunkov0f7e4662020-07-13 23:37:16 +03001515 req->compl.cflags = cflags;
Pavel Begunkov40d8ddd2020-07-13 23:37:11 +03001516 refcount_inc(&req->refs);
1517 list_add_tail(&req->compl.list, &ctx->cq_overflow_list);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001518 }
1519}
1520
Jens Axboebcda7ba2020-02-23 16:42:51 -07001521static void io_cqring_fill_event(struct io_kiocb *req, long res)
1522{
1523 __io_cqring_fill_event(req, res, 0);
1524}
1525
Pavel Begunkov7a612352021-03-09 00:37:59 +00001526static void io_req_complete_post(struct io_kiocb *req, long res,
1527 unsigned int cflags)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001528{
Jens Axboe78e19bb2019-11-06 15:21:34 -07001529 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001530 unsigned long flags;
1531
1532 spin_lock_irqsave(&ctx->completion_lock, flags);
Jens Axboebcda7ba2020-02-23 16:42:51 -07001533 __io_cqring_fill_event(req, res, cflags);
Jens Axboec7dae4b2021-02-09 19:53:37 -07001534 /*
1535 * If we're the last reference to this request, add to our locked
1536 * free_list cache.
1537 */
1538 if (refcount_dec_and_test(&req->refs)) {
1539 struct io_comp_state *cs = &ctx->submit_state.comp;
1540
Pavel Begunkov7a612352021-03-09 00:37:59 +00001541 if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) {
1542 if (req->flags & (REQ_F_LINK_TIMEOUT | REQ_F_FAIL_LINK))
1543 io_disarm_next(req);
1544 if (req->link) {
1545 io_req_task_queue(req->link);
1546 req->link = NULL;
1547 }
1548 }
Jens Axboec7dae4b2021-02-09 19:53:37 -07001549 io_dismantle_req(req);
1550 io_put_task(req->task, 1);
1551 list_add(&req->compl.list, &cs->locked_free_list);
1552 cs->locked_free_nr++;
1553 } else
1554 req = NULL;
Pavel Begunkov7a612352021-03-09 00:37:59 +00001555 io_commit_cqring(ctx);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001556 spin_unlock_irqrestore(&ctx->completion_lock, flags);
Jens Axboe8c838782019-03-12 15:48:16 -06001557 io_cqring_ev_posted(ctx);
Pavel Begunkov7a612352021-03-09 00:37:59 +00001558
1559 if (req)
Jens Axboec7dae4b2021-02-09 19:53:37 -07001560 percpu_ref_put(&ctx->refs);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001561}
1562
Pavel Begunkova38d68d2021-01-19 13:32:45 +00001563static void io_req_complete_state(struct io_kiocb *req, long res,
Pavel Begunkov889fca72021-02-10 00:03:09 +00001564 unsigned int cflags)
Jens Axboebcda7ba2020-02-23 16:42:51 -07001565{
Pavel Begunkova38d68d2021-01-19 13:32:45 +00001566 io_clean_op(req);
1567 req->result = res;
1568 req->compl.cflags = cflags;
Pavel Begunkove342c802021-01-19 13:32:47 +00001569 req->flags |= REQ_F_COMPLETE_INLINE;
Jens Axboee1e16092020-06-22 09:17:17 -06001570}
Jens Axboe2b188cc2019-01-07 10:46:33 -07001571
Pavel Begunkov889fca72021-02-10 00:03:09 +00001572static inline void __io_req_complete(struct io_kiocb *req, unsigned issue_flags,
1573 long res, unsigned cflags)
Pavel Begunkova38d68d2021-01-19 13:32:45 +00001574{
Pavel Begunkov889fca72021-02-10 00:03:09 +00001575 if (issue_flags & IO_URING_F_COMPLETE_DEFER)
1576 io_req_complete_state(req, res, cflags);
Pavel Begunkova38d68d2021-01-19 13:32:45 +00001577 else
Jens Axboec7dae4b2021-02-09 19:53:37 -07001578 io_req_complete_post(req, res, cflags);
Pavel Begunkova38d68d2021-01-19 13:32:45 +00001579}
Jens Axboebcda7ba2020-02-23 16:42:51 -07001580
Pavel Begunkova38d68d2021-01-19 13:32:45 +00001581static inline void io_req_complete(struct io_kiocb *req, long res)
Jens Axboee1e16092020-06-22 09:17:17 -06001582{
Pavel Begunkov889fca72021-02-10 00:03:09 +00001583 __io_req_complete(req, 0, res, 0);
Jens Axboebcda7ba2020-02-23 16:42:51 -07001584}
1585
Jens Axboec7dae4b2021-02-09 19:53:37 -07001586static bool io_flush_cached_reqs(struct io_ring_ctx *ctx)
Pavel Begunkove5d1bc02021-02-10 00:03:23 +00001587{
Jens Axboec7dae4b2021-02-09 19:53:37 -07001588 struct io_submit_state *state = &ctx->submit_state;
1589 struct io_comp_state *cs = &state->comp;
Pavel Begunkove5d1bc02021-02-10 00:03:23 +00001590 struct io_kiocb *req = NULL;
1591
Jens Axboec7dae4b2021-02-09 19:53:37 -07001592 /*
1593 * If we have more than a batch's worth of requests in our IRQ side
1594 * locked cache, grab the lock and move them over to our submission
1595 * side cache.
1596 */
1597 if (READ_ONCE(cs->locked_free_nr) > IO_COMPL_BATCH) {
1598 spin_lock_irq(&ctx->completion_lock);
1599 list_splice_init(&cs->locked_free_list, &cs->free_list);
1600 cs->locked_free_nr = 0;
1601 spin_unlock_irq(&ctx->completion_lock);
1602 }
1603
1604 while (!list_empty(&cs->free_list)) {
1605 req = list_first_entry(&cs->free_list, struct io_kiocb,
Pavel Begunkove5d1bc02021-02-10 00:03:23 +00001606 compl.list);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001607 list_del(&req->compl.list);
Pavel Begunkove5d1bc02021-02-10 00:03:23 +00001608 state->reqs[state->free_reqs++] = req;
1609 if (state->free_reqs == ARRAY_SIZE(state->reqs))
1610 break;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001611 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07001612
Pavel Begunkove5d1bc02021-02-10 00:03:23 +00001613 return req != NULL;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001614}
1615
Pavel Begunkov258b29a2021-02-10 00:03:10 +00001616static struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001617{
Pavel Begunkov258b29a2021-02-10 00:03:10 +00001618 struct io_submit_state *state = &ctx->submit_state;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001619
Pavel Begunkovbf019da2021-02-10 00:03:17 +00001620 BUILD_BUG_ON(IO_REQ_ALLOC_BATCH > ARRAY_SIZE(state->reqs));
Jens Axboe2b188cc2019-01-07 10:46:33 -07001621
Pavel Begunkovf6b6c7d2020-06-21 13:09:53 +03001622 if (!state->free_reqs) {
Pavel Begunkov291b2822020-09-30 22:57:01 +03001623 gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
Jens Axboe2579f912019-01-09 09:10:43 -07001624 int ret;
1625
Jens Axboec7dae4b2021-02-09 19:53:37 -07001626 if (io_flush_cached_reqs(ctx))
Pavel Begunkove5d1bc02021-02-10 00:03:23 +00001627 goto got_req;
1628
Pavel Begunkovbf019da2021-02-10 00:03:17 +00001629 ret = kmem_cache_alloc_bulk(req_cachep, gfp, IO_REQ_ALLOC_BATCH,
1630 state->reqs);
Jens Axboefd6fab22019-03-14 16:30:06 -06001631
1632 /*
1633 * Bulk alloc is all-or-nothing. If we fail to get a batch,
1634 * retry single alloc to be on the safe side.
1635 */
1636 if (unlikely(ret <= 0)) {
1637 state->reqs[0] = kmem_cache_alloc(req_cachep, gfp);
1638 if (!state->reqs[0])
Pavel Begunkov3893f392021-02-10 00:03:15 +00001639 return NULL;
Jens Axboefd6fab22019-03-14 16:30:06 -06001640 ret = 1;
1641 }
Pavel Begunkov291b2822020-09-30 22:57:01 +03001642 state->free_reqs = ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001643 }
Pavel Begunkove5d1bc02021-02-10 00:03:23 +00001644got_req:
Pavel Begunkov291b2822020-09-30 22:57:01 +03001645 state->free_reqs--;
1646 return state->reqs[state->free_reqs];
Jens Axboe2b188cc2019-01-07 10:46:33 -07001647}
1648
Pavel Begunkov8da11c12020-02-24 11:32:44 +03001649static inline void io_put_file(struct io_kiocb *req, struct file *file,
1650 bool fixed)
1651{
Pavel Begunkov36f72fe2020-11-18 19:57:26 +00001652 if (!fixed)
Pavel Begunkov8da11c12020-02-24 11:32:44 +03001653 fput(file);
1654}
1655
Pavel Begunkov4edf20f2020-10-13 09:43:59 +01001656static void io_dismantle_req(struct io_kiocb *req)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001657{
Pavel Begunkov3ca405e2020-07-13 23:37:08 +03001658 io_clean_op(req);
Pavel Begunkov929a3af2020-02-19 00:19:09 +03001659
Jens Axboee8c2bc12020-08-15 18:44:09 -07001660 if (req->async_data)
1661 kfree(req->async_data);
Pavel Begunkov8da11c12020-02-24 11:32:44 +03001662 if (req->file)
1663 io_put_file(req, req->file, (req->flags & REQ_F_FIXED_FILE));
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00001664 if (req->fixed_rsrc_refs)
1665 percpu_ref_put(req->fixed_rsrc_refs);
Jens Axboe003e8dc2021-03-06 09:22:27 -07001666 if (req->work.creds) {
1667 put_cred(req->work.creds);
1668 req->work.creds = NULL;
1669 }
Pavel Begunkovf85c3102021-03-01 18:20:46 +00001670
1671 if (req->flags & REQ_F_INFLIGHT) {
1672 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkovf85c3102021-03-01 18:20:46 +00001673 unsigned long flags;
1674
1675 spin_lock_irqsave(&ctx->inflight_lock, flags);
1676 list_del(&req->inflight_entry);
1677 spin_unlock_irqrestore(&ctx->inflight_lock, flags);
1678 req->flags &= ~REQ_F_INFLIGHT;
Pavel Begunkovf85c3102021-03-01 18:20:46 +00001679 }
Pavel Begunkove6543a82020-06-28 12:52:30 +03001680}
Pavel Begunkov2b85edf2019-12-28 14:13:03 +03001681
Pavel Begunkovb23fcf42021-03-01 18:20:48 +00001682/* must to be called somewhat shortly after putting a request */
Pavel Begunkov7c660732021-01-25 11:42:21 +00001683static inline void io_put_task(struct task_struct *task, int nr)
1684{
1685 struct io_uring_task *tctx = task->io_uring;
1686
1687 percpu_counter_sub(&tctx->inflight, nr);
1688 if (unlikely(atomic_read(&tctx->in_idle)))
1689 wake_up(&tctx->wait);
1690 put_task_struct_many(task, nr);
1691}
1692
Pavel Begunkov216578e2020-10-13 09:44:00 +01001693static void __io_free_req(struct io_kiocb *req)
Pavel Begunkove6543a82020-06-28 12:52:30 +03001694{
Jens Axboe51a4cc12020-08-10 10:55:56 -06001695 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkovecfc5172020-06-29 13:13:03 +03001696
Pavel Begunkov216578e2020-10-13 09:44:00 +01001697 io_dismantle_req(req);
Pavel Begunkov7c660732021-01-25 11:42:21 +00001698 io_put_task(req->task, 1);
Pavel Begunkove6543a82020-06-28 12:52:30 +03001699
Pavel Begunkov3893f392021-02-10 00:03:15 +00001700 kmem_cache_free(req_cachep, req);
Pavel Begunkovecfc5172020-06-29 13:13:03 +03001701 percpu_ref_put(&ctx->refs);
Jens Axboee65ef562019-03-12 10:16:44 -06001702}
1703
Pavel Begunkovf2f87372020-10-27 23:25:37 +00001704static inline void io_remove_next_linked(struct io_kiocb *req)
1705{
1706 struct io_kiocb *nxt = req->link;
1707
1708 req->link = nxt->link;
1709 nxt->link = NULL;
1710}
1711
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00001712static bool io_kill_linked_timeout(struct io_kiocb *req)
1713 __must_hold(&req->ctx->completion_lock)
Jens Axboe9e645e112019-05-10 16:07:28 -06001714{
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00001715 struct io_kiocb *link = req->link;
Pavel Begunkovc9abd7a2020-10-22 16:43:11 +01001716 bool cancelled = false;
Pavel Begunkovf2f87372020-10-27 23:25:37 +00001717
Pavel Begunkov900fad42020-10-19 16:39:16 +01001718 /*
1719 * Can happen if a linked timeout fired and link had been like
1720 * req -> link t-out -> link t-out [-> ...]
1721 */
Pavel Begunkovc9abd7a2020-10-22 16:43:11 +01001722 if (link && (link->flags & REQ_F_LTIMEOUT_ACTIVE)) {
1723 struct io_timeout_data *io = link->async_data;
1724 int ret;
Pavel Begunkov7c86ffe2020-06-29 13:12:59 +03001725
Pavel Begunkovf2f87372020-10-27 23:25:37 +00001726 io_remove_next_linked(req);
Pavel Begunkov90cd7e42020-10-27 23:25:36 +00001727 link->timeout.head = NULL;
Pavel Begunkovc9abd7a2020-10-22 16:43:11 +01001728 ret = hrtimer_try_to_cancel(&io->timer);
1729 if (ret != -1) {
1730 io_cqring_fill_event(link, -ECANCELED);
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00001731 io_put_req_deferred(link, 1);
Pavel Begunkovc9abd7a2020-10-22 16:43:11 +01001732 cancelled = true;
1733 }
1734 }
Pavel Begunkov7c86ffe2020-06-29 13:12:59 +03001735 req->flags &= ~REQ_F_LINK_TIMEOUT;
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00001736 return cancelled;
Pavel Begunkov7c86ffe2020-06-29 13:12:59 +03001737}
1738
Pavel Begunkovd148ca42020-10-18 10:17:39 +01001739static void io_fail_links(struct io_kiocb *req)
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00001740 __must_hold(&req->ctx->completion_lock)
Jens Axboe9e645e112019-05-10 16:07:28 -06001741{
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00001742 struct io_kiocb *nxt, *link = req->link;
Jens Axboe9e645e112019-05-10 16:07:28 -06001743
Pavel Begunkovf2f87372020-10-27 23:25:37 +00001744 req->link = NULL;
Pavel Begunkovf2f87372020-10-27 23:25:37 +00001745 while (link) {
1746 nxt = link->link;
1747 link->link = NULL;
1748
Dmitrii Dolgovc826bd72019-10-15 19:02:01 +02001749 trace_io_uring_fail_link(req, link);
Pavel Begunkov7c86ffe2020-06-29 13:12:59 +03001750 io_cqring_fill_event(link, -ECANCELED);
Jens Axboe1575f212021-02-27 15:20:49 -07001751 io_put_req_deferred(link, 2);
Pavel Begunkovf2f87372020-10-27 23:25:37 +00001752 link = nxt;
Jens Axboe9e645e112019-05-10 16:07:28 -06001753 }
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00001754}
Pavel Begunkov7c86ffe2020-06-29 13:12:59 +03001755
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00001756static bool io_disarm_next(struct io_kiocb *req)
1757 __must_hold(&req->ctx->completion_lock)
1758{
1759 bool posted = false;
1760
1761 if (likely(req->flags & REQ_F_LINK_TIMEOUT))
1762 posted = io_kill_linked_timeout(req);
1763 if (unlikely(req->flags & REQ_F_FAIL_LINK)) {
1764 posted |= (req->link != NULL);
1765 io_fail_links(req);
1766 }
1767 return posted;
Jens Axboe9e645e112019-05-10 16:07:28 -06001768}
1769
Pavel Begunkov3fa5e0f2020-06-30 15:20:43 +03001770static struct io_kiocb *__io_req_find_next(struct io_kiocb *req)
Jens Axboe9e645e112019-05-10 16:07:28 -06001771{
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00001772 struct io_kiocb *nxt;
Jens Axboe2665abf2019-11-05 12:40:47 -07001773
Jens Axboe9e645e112019-05-10 16:07:28 -06001774 /*
1775 * If LINK is set, we have dependent requests in this chain. If we
1776 * didn't fail this request, queue the first one up, moving any other
1777 * dependencies to the next request. In case of failure, fail the rest
1778 * of the chain.
1779 */
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00001780 if (req->flags & (REQ_F_LINK_TIMEOUT | REQ_F_FAIL_LINK)) {
1781 struct io_ring_ctx *ctx = req->ctx;
1782 unsigned long flags;
1783 bool posted;
Pavel Begunkovf2f87372020-10-27 23:25:37 +00001784
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00001785 spin_lock_irqsave(&ctx->completion_lock, flags);
1786 posted = io_disarm_next(req);
1787 if (posted)
1788 io_commit_cqring(req->ctx);
1789 spin_unlock_irqrestore(&ctx->completion_lock, flags);
1790 if (posted)
1791 io_cqring_ev_posted(ctx);
Pavel Begunkovf2f87372020-10-27 23:25:37 +00001792 }
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00001793 nxt = req->link;
1794 req->link = NULL;
1795 return nxt;
Jens Axboe4d7dd462019-11-20 13:03:52 -07001796}
Jens Axboe2665abf2019-11-05 12:40:47 -07001797
Pavel Begunkovf2f87372020-10-27 23:25:37 +00001798static inline struct io_kiocb *io_req_find_next(struct io_kiocb *req)
Pavel Begunkov3fa5e0f2020-06-30 15:20:43 +03001799{
Pavel Begunkovcdbff982021-02-12 18:41:16 +00001800 if (likely(!(req->flags & (REQ_F_LINK|REQ_F_HARDLINK))))
Pavel Begunkov3fa5e0f2020-06-30 15:20:43 +03001801 return NULL;
1802 return __io_req_find_next(req);
1803}
1804
Pavel Begunkov2c323952021-02-28 22:04:53 +00001805static void ctx_flush_and_put(struct io_ring_ctx *ctx)
1806{
1807 if (!ctx)
1808 return;
1809 if (ctx->submit_state.comp.nr) {
1810 mutex_lock(&ctx->uring_lock);
1811 io_submit_flush_completions(&ctx->submit_state.comp, ctx);
1812 mutex_unlock(&ctx->uring_lock);
1813 }
1814 percpu_ref_put(&ctx->refs);
1815}
1816
Jens Axboe7cbf1722021-02-10 00:03:20 +00001817static bool __tctx_task_work(struct io_uring_task *tctx)
1818{
Jens Axboe65453d12021-02-10 00:03:21 +00001819 struct io_ring_ctx *ctx = NULL;
Jens Axboe7cbf1722021-02-10 00:03:20 +00001820 struct io_wq_work_list list;
1821 struct io_wq_work_node *node;
1822
1823 if (wq_list_empty(&tctx->task_list))
1824 return false;
1825
Jens Axboe0b81e802021-02-16 10:33:53 -07001826 spin_lock_irq(&tctx->task_lock);
Jens Axboe7cbf1722021-02-10 00:03:20 +00001827 list = tctx->task_list;
1828 INIT_WQ_LIST(&tctx->task_list);
Jens Axboe0b81e802021-02-16 10:33:53 -07001829 spin_unlock_irq(&tctx->task_lock);
Jens Axboe7cbf1722021-02-10 00:03:20 +00001830
1831 node = list.first;
1832 while (node) {
1833 struct io_wq_work_node *next = node->next;
1834 struct io_kiocb *req;
1835
1836 req = container_of(node, struct io_kiocb, io_task_work.node);
Pavel Begunkov2c323952021-02-28 22:04:53 +00001837 if (req->ctx != ctx) {
1838 ctx_flush_and_put(ctx);
1839 ctx = req->ctx;
1840 percpu_ref_get(&ctx->refs);
1841 }
1842
Jens Axboe7cbf1722021-02-10 00:03:20 +00001843 req->task_work.func(&req->task_work);
1844 node = next;
Jens Axboe65453d12021-02-10 00:03:21 +00001845 }
1846
Pavel Begunkov2c323952021-02-28 22:04:53 +00001847 ctx_flush_and_put(ctx);
Jens Axboe7cbf1722021-02-10 00:03:20 +00001848 return list.first != NULL;
1849}
1850
1851static void tctx_task_work(struct callback_head *cb)
1852{
1853 struct io_uring_task *tctx = container_of(cb, struct io_uring_task, task_work);
1854
Jens Axboe1d5f3602021-02-26 14:54:16 -07001855 clear_bit(0, &tctx->task_state);
1856
Jens Axboe7cbf1722021-02-10 00:03:20 +00001857 while (__tctx_task_work(tctx))
1858 cond_resched();
Jens Axboe7cbf1722021-02-10 00:03:20 +00001859}
1860
1861static int io_task_work_add(struct task_struct *tsk, struct io_kiocb *req,
1862 enum task_work_notify_mode notify)
1863{
1864 struct io_uring_task *tctx = tsk->io_uring;
1865 struct io_wq_work_node *node, *prev;
Jens Axboe0b81e802021-02-16 10:33:53 -07001866 unsigned long flags;
Jens Axboe7cbf1722021-02-10 00:03:20 +00001867 int ret;
1868
1869 WARN_ON_ONCE(!tctx);
1870
Jens Axboe0b81e802021-02-16 10:33:53 -07001871 spin_lock_irqsave(&tctx->task_lock, flags);
Jens Axboe7cbf1722021-02-10 00:03:20 +00001872 wq_list_add_tail(&req->io_task_work.node, &tctx->task_list);
Jens Axboe0b81e802021-02-16 10:33:53 -07001873 spin_unlock_irqrestore(&tctx->task_lock, flags);
Jens Axboe7cbf1722021-02-10 00:03:20 +00001874
1875 /* task_work already pending, we're done */
1876 if (test_bit(0, &tctx->task_state) ||
1877 test_and_set_bit(0, &tctx->task_state))
1878 return 0;
1879
1880 if (!task_work_add(tsk, &tctx->task_work, notify))
1881 return 0;
1882
1883 /*
1884 * Slow path - we failed, find and delete work. if the work is not
1885 * in the list, it got run and we're fine.
1886 */
1887 ret = 0;
Jens Axboe0b81e802021-02-16 10:33:53 -07001888 spin_lock_irqsave(&tctx->task_lock, flags);
Jens Axboe7cbf1722021-02-10 00:03:20 +00001889 wq_list_for_each(node, prev, &tctx->task_list) {
1890 if (&req->io_task_work.node == node) {
1891 wq_list_del(&tctx->task_list, node, prev);
1892 ret = 1;
1893 break;
1894 }
1895 }
Jens Axboe0b81e802021-02-16 10:33:53 -07001896 spin_unlock_irqrestore(&tctx->task_lock, flags);
Jens Axboe7cbf1722021-02-10 00:03:20 +00001897 clear_bit(0, &tctx->task_state);
1898 return ret;
1899}
1900
Jens Axboe355fb9e2020-10-22 20:19:35 -06001901static int io_req_task_work_add(struct io_kiocb *req)
Jens Axboec2c4c832020-07-01 15:37:11 -06001902{
1903 struct task_struct *tsk = req->task;
1904 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe91989c72020-10-16 09:02:26 -06001905 enum task_work_notify_mode notify;
1906 int ret;
Jens Axboec2c4c832020-07-01 15:37:11 -06001907
Jens Axboe6200b0a2020-09-13 14:38:30 -06001908 if (tsk->flags & PF_EXITING)
1909 return -ESRCH;
1910
Jens Axboec2c4c832020-07-01 15:37:11 -06001911 /*
Jens Axboe0ba9c9e2020-08-06 19:41:50 -06001912 * SQPOLL kernel thread doesn't need notification, just a wakeup. For
1913 * all other cases, use TWA_SIGNAL unconditionally to ensure we're
1914 * processing task_work. There's no reliable way to tell if TWA_RESUME
1915 * will do the job.
Jens Axboec2c4c832020-07-01 15:37:11 -06001916 */
Jens Axboe91989c72020-10-16 09:02:26 -06001917 notify = TWA_NONE;
Jens Axboe355fb9e2020-10-22 20:19:35 -06001918 if (!(ctx->flags & IORING_SETUP_SQPOLL))
Jens Axboec2c4c832020-07-01 15:37:11 -06001919 notify = TWA_SIGNAL;
1920
Jens Axboe7cbf1722021-02-10 00:03:20 +00001921 ret = io_task_work_add(tsk, req, notify);
Jens Axboec2c4c832020-07-01 15:37:11 -06001922 if (!ret)
1923 wake_up_process(tsk);
Jens Axboe0ba9c9e2020-08-06 19:41:50 -06001924
Jens Axboec2c4c832020-07-01 15:37:11 -06001925 return ret;
1926}
1927
Pavel Begunkoveab30c42021-01-19 13:32:42 +00001928static void io_req_task_work_add_fallback(struct io_kiocb *req,
Jens Axboe7cbf1722021-02-10 00:03:20 +00001929 task_work_func_t cb)
Pavel Begunkoveab30c42021-01-19 13:32:42 +00001930{
Jens Axboe7c25c0d2021-02-16 07:17:00 -07001931 struct io_ring_ctx *ctx = req->ctx;
1932 struct callback_head *head;
Pavel Begunkoveab30c42021-01-19 13:32:42 +00001933
1934 init_task_work(&req->task_work, cb);
Jens Axboe7c25c0d2021-02-16 07:17:00 -07001935 do {
1936 head = READ_ONCE(ctx->exit_task_work);
1937 req->task_work.next = head;
1938 } while (cmpxchg(&ctx->exit_task_work, head, &req->task_work) != head);
Pavel Begunkoveab30c42021-01-19 13:32:42 +00001939}
1940
Jens Axboec40f6372020-06-25 15:39:59 -06001941static void __io_req_task_cancel(struct io_kiocb *req, int error)
1942{
1943 struct io_ring_ctx *ctx = req->ctx;
1944
1945 spin_lock_irq(&ctx->completion_lock);
1946 io_cqring_fill_event(req, error);
1947 io_commit_cqring(ctx);
1948 spin_unlock_irq(&ctx->completion_lock);
1949
1950 io_cqring_ev_posted(ctx);
1951 req_set_fail_links(req);
1952 io_double_put_req(req);
1953}
1954
1955static void io_req_task_cancel(struct callback_head *cb)
1956{
1957 struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
Jens Axboe87ceb6a2020-09-14 08:20:12 -06001958 struct io_ring_ctx *ctx = req->ctx;
Jens Axboec40f6372020-06-25 15:39:59 -06001959
Pavel Begunkov792bb6e2021-02-18 22:32:51 +00001960 mutex_lock(&ctx->uring_lock);
Pavel Begunkova3df76982021-02-18 22:32:52 +00001961 __io_req_task_cancel(req, req->result);
Pavel Begunkov792bb6e2021-02-18 22:32:51 +00001962 mutex_unlock(&ctx->uring_lock);
Jens Axboe87ceb6a2020-09-14 08:20:12 -06001963 percpu_ref_put(&ctx->refs);
Jens Axboec40f6372020-06-25 15:39:59 -06001964}
1965
1966static void __io_req_task_submit(struct io_kiocb *req)
1967{
1968 struct io_ring_ctx *ctx = req->ctx;
1969
Pavel Begunkov04fc6c82021-02-12 03:23:54 +00001970 /* ctx stays valid until unlock, even if we drop all ours ctx->refs */
Pavel Begunkov81b6d052021-01-04 20:36:35 +00001971 mutex_lock(&ctx->uring_lock);
Pavel Begunkov70aacfe2021-03-01 13:02:15 +00001972 if (!(current->flags & PF_EXITING) && !current->in_execve)
Pavel Begunkovc5eef2b2021-02-10 00:03:22 +00001973 __io_queue_sqe(req);
Pavel Begunkov81b6d052021-01-04 20:36:35 +00001974 else
Jens Axboec40f6372020-06-25 15:39:59 -06001975 __io_req_task_cancel(req, -EFAULT);
Pavel Begunkov81b6d052021-01-04 20:36:35 +00001976 mutex_unlock(&ctx->uring_lock);
Jens Axboe9e645e112019-05-10 16:07:28 -06001977}
1978
Jens Axboec40f6372020-06-25 15:39:59 -06001979static void io_req_task_submit(struct callback_head *cb)
1980{
1981 struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
1982
1983 __io_req_task_submit(req);
1984}
1985
1986static void io_req_task_queue(struct io_kiocb *req)
1987{
Jens Axboec40f6372020-06-25 15:39:59 -06001988 int ret;
1989
Jens Axboe7cbf1722021-02-10 00:03:20 +00001990 req->task_work.func = io_req_task_submit;
Jens Axboe355fb9e2020-10-22 20:19:35 -06001991 ret = io_req_task_work_add(req);
Jens Axboec40f6372020-06-25 15:39:59 -06001992 if (unlikely(ret)) {
Pavel Begunkova3df76982021-02-18 22:32:52 +00001993 req->result = -ECANCELED;
Pavel Begunkov04fc6c82021-02-12 03:23:54 +00001994 percpu_ref_get(&req->ctx->refs);
Pavel Begunkoveab30c42021-01-19 13:32:42 +00001995 io_req_task_work_add_fallback(req, io_req_task_cancel);
Jens Axboec40f6372020-06-25 15:39:59 -06001996 }
Jens Axboec40f6372020-06-25 15:39:59 -06001997}
1998
Pavel Begunkova3df76982021-02-18 22:32:52 +00001999static void io_req_task_queue_fail(struct io_kiocb *req, int ret)
2000{
2001 percpu_ref_get(&req->ctx->refs);
2002 req->result = ret;
2003 req->task_work.func = io_req_task_cancel;
2004
2005 if (unlikely(io_req_task_work_add(req)))
2006 io_req_task_work_add_fallback(req, io_req_task_cancel);
2007}
2008
Pavel Begunkovf2f87372020-10-27 23:25:37 +00002009static inline void io_queue_next(struct io_kiocb *req)
Jackie Liuc69f8db2019-11-09 11:00:08 +08002010{
Pavel Begunkov9b5f7bd2020-06-29 13:13:00 +03002011 struct io_kiocb *nxt = io_req_find_next(req);
Pavel Begunkov944e58b2019-11-21 23:21:01 +03002012
Pavel Begunkov906a8c32020-06-27 14:04:55 +03002013 if (nxt)
2014 io_req_task_queue(nxt);
Jackie Liuc69f8db2019-11-09 11:00:08 +08002015}
2016
Jens Axboe9e645e112019-05-10 16:07:28 -06002017static void io_free_req(struct io_kiocb *req)
2018{
Pavel Begunkovc3524382020-06-28 12:52:32 +03002019 io_queue_next(req);
Jens Axboe9e645e112019-05-10 16:07:28 -06002020 __io_free_req(req);
Jens Axboee65ef562019-03-12 10:16:44 -06002021}
2022
Pavel Begunkov2d6500d2020-06-28 12:52:33 +03002023struct req_batch {
Pavel Begunkov5af1d132020-07-18 11:32:52 +03002024 struct task_struct *task;
2025 int task_refs;
Jens Axboe1b4c3512021-02-10 00:03:19 +00002026 int ctx_refs;
Pavel Begunkov2d6500d2020-06-28 12:52:33 +03002027};
2028
Pavel Begunkov5af1d132020-07-18 11:32:52 +03002029static inline void io_init_req_batch(struct req_batch *rb)
Pavel Begunkov7a743e22020-03-03 21:33:13 +03002030{
Pavel Begunkov5af1d132020-07-18 11:32:52 +03002031 rb->task_refs = 0;
Pavel Begunkov9ae72462021-02-10 00:03:16 +00002032 rb->ctx_refs = 0;
Pavel Begunkov5af1d132020-07-18 11:32:52 +03002033 rb->task = NULL;
2034}
Pavel Begunkov8766dd52020-03-14 00:31:04 +03002035
Pavel Begunkov2d6500d2020-06-28 12:52:33 +03002036static void io_req_free_batch_finish(struct io_ring_ctx *ctx,
2037 struct req_batch *rb)
2038{
Pavel Begunkov6e833d52021-02-11 18:28:20 +00002039 if (rb->task)
Pavel Begunkov7c660732021-01-25 11:42:21 +00002040 io_put_task(rb->task, rb->task_refs);
Pavel Begunkov9ae72462021-02-10 00:03:16 +00002041 if (rb->ctx_refs)
2042 percpu_ref_put_many(&ctx->refs, rb->ctx_refs);
Pavel Begunkov2d6500d2020-06-28 12:52:33 +03002043}
2044
Pavel Begunkov6ff119a2021-02-10 00:03:18 +00002045static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req,
2046 struct io_submit_state *state)
Pavel Begunkov2d6500d2020-06-28 12:52:33 +03002047{
Pavel Begunkovf2f87372020-10-27 23:25:37 +00002048 io_queue_next(req);
Pavel Begunkov2d6500d2020-06-28 12:52:33 +03002049
Jens Axboee3bc8e92020-09-24 08:45:57 -06002050 if (req->task != rb->task) {
Pavel Begunkov7c660732021-01-25 11:42:21 +00002051 if (rb->task)
2052 io_put_task(rb->task, rb->task_refs);
Jens Axboee3bc8e92020-09-24 08:45:57 -06002053 rb->task = req->task;
2054 rb->task_refs = 0;
Pavel Begunkov5af1d132020-07-18 11:32:52 +03002055 }
Jens Axboee3bc8e92020-09-24 08:45:57 -06002056 rb->task_refs++;
Pavel Begunkov9ae72462021-02-10 00:03:16 +00002057 rb->ctx_refs++;
Pavel Begunkov5af1d132020-07-18 11:32:52 +03002058
Pavel Begunkov4edf20f2020-10-13 09:43:59 +01002059 io_dismantle_req(req);
Pavel Begunkovbd759042021-02-12 03:23:50 +00002060 if (state->free_reqs != ARRAY_SIZE(state->reqs))
Pavel Begunkov6ff119a2021-02-10 00:03:18 +00002061 state->reqs[state->free_reqs++] = req;
Pavel Begunkovbd759042021-02-12 03:23:50 +00002062 else
2063 list_add(&req->compl.list, &state->comp.free_list);
Pavel Begunkov7a743e22020-03-03 21:33:13 +03002064}
2065
Pavel Begunkov905c1722021-02-10 00:03:14 +00002066static void io_submit_flush_completions(struct io_comp_state *cs,
2067 struct io_ring_ctx *ctx)
2068{
2069 int i, nr = cs->nr;
2070 struct io_kiocb *req;
2071 struct req_batch rb;
2072
2073 io_init_req_batch(&rb);
2074 spin_lock_irq(&ctx->completion_lock);
2075 for (i = 0; i < nr; i++) {
2076 req = cs->reqs[i];
2077 __io_cqring_fill_event(req, req->result, req->compl.cflags);
2078 }
2079 io_commit_cqring(ctx);
2080 spin_unlock_irq(&ctx->completion_lock);
2081
2082 io_cqring_ev_posted(ctx);
2083 for (i = 0; i < nr; i++) {
2084 req = cs->reqs[i];
2085
2086 /* submission and completion refs */
2087 if (refcount_sub_and_test(2, &req->refs))
Pavel Begunkov6ff119a2021-02-10 00:03:18 +00002088 io_req_free_batch(&rb, req, &ctx->submit_state);
Pavel Begunkov905c1722021-02-10 00:03:14 +00002089 }
2090
2091 io_req_free_batch_finish(ctx, &rb);
2092 cs->nr = 0;
Jens Axboee65ef562019-03-12 10:16:44 -06002093}
2094
Jens Axboeba816ad2019-09-28 11:36:45 -06002095/*
2096 * Drop reference to request, return next in chain (if there is one) if this
2097 * was the last reference to this request.
2098 */
Pavel Begunkov9b5f7bd2020-06-29 13:13:00 +03002099static struct io_kiocb *io_put_req_find_next(struct io_kiocb *req)
Jens Axboee65ef562019-03-12 10:16:44 -06002100{
Pavel Begunkov9b5f7bd2020-06-29 13:13:00 +03002101 struct io_kiocb *nxt = NULL;
2102
Jens Axboe2a44f462020-02-25 13:25:41 -07002103 if (refcount_dec_and_test(&req->refs)) {
Pavel Begunkov9b5f7bd2020-06-29 13:13:00 +03002104 nxt = io_req_find_next(req);
Jens Axboe4d7dd462019-11-20 13:03:52 -07002105 __io_free_req(req);
Jens Axboe2a44f462020-02-25 13:25:41 -07002106 }
Pavel Begunkov9b5f7bd2020-06-29 13:13:00 +03002107 return nxt;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002108}
2109
Jens Axboe2b188cc2019-01-07 10:46:33 -07002110static void io_put_req(struct io_kiocb *req)
2111{
Jens Axboedef596e2019-01-09 08:59:42 -07002112 if (refcount_dec_and_test(&req->refs))
2113 io_free_req(req);
2114}
2115
Pavel Begunkov216578e2020-10-13 09:44:00 +01002116static void io_put_req_deferred_cb(struct callback_head *cb)
2117{
2118 struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
2119
2120 io_free_req(req);
2121}
2122
2123static void io_free_req_deferred(struct io_kiocb *req)
2124{
2125 int ret;
2126
Jens Axboe7cbf1722021-02-10 00:03:20 +00002127 req->task_work.func = io_put_req_deferred_cb;
Jens Axboe355fb9e2020-10-22 20:19:35 -06002128 ret = io_req_task_work_add(req);
Pavel Begunkoveab30c42021-01-19 13:32:42 +00002129 if (unlikely(ret))
2130 io_req_task_work_add_fallback(req, io_put_req_deferred_cb);
Pavel Begunkov216578e2020-10-13 09:44:00 +01002131}
2132
2133static inline void io_put_req_deferred(struct io_kiocb *req, int refs)
2134{
2135 if (refcount_sub_and_test(refs, &req->refs))
2136 io_free_req_deferred(req);
2137}
2138
Jens Axboe978db572019-11-14 22:39:04 -07002139static void io_double_put_req(struct io_kiocb *req)
2140{
2141 /* drop both submit and complete references */
2142 if (refcount_sub_and_test(2, &req->refs))
2143 io_free_req(req);
2144}
2145
Pavel Begunkov6c503152021-01-04 20:36:36 +00002146static unsigned io_cqring_events(struct io_ring_ctx *ctx)
Jens Axboea3a0e432019-08-20 11:03:11 -06002147{
2148 /* See comment at the top of this file */
2149 smp_rmb();
Pavel Begunkove23de152020-12-17 00:24:37 +00002150 return __io_cqring_events(ctx);
Jens Axboea3a0e432019-08-20 11:03:11 -06002151}
2152
Pavel Begunkovfb5ccc92019-10-25 12:31:30 +03002153static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
2154{
2155 struct io_rings *rings = ctx->rings;
2156
2157 /* make sure SQ entry isn't read before tail */
2158 return smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head;
2159}
2160
Pavel Begunkov8ff069b2020-07-16 23:28:04 +03002161static unsigned int io_put_kbuf(struct io_kiocb *req, struct io_buffer *kbuf)
Jens Axboee94f1412019-12-19 12:06:02 -07002162{
Pavel Begunkov8ff069b2020-07-16 23:28:04 +03002163 unsigned int cflags;
Jens Axboee94f1412019-12-19 12:06:02 -07002164
Jens Axboebcda7ba2020-02-23 16:42:51 -07002165 cflags = kbuf->bid << IORING_CQE_BUFFER_SHIFT;
2166 cflags |= IORING_CQE_F_BUFFER;
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03002167 req->flags &= ~REQ_F_BUFFER_SELECTED;
Jens Axboebcda7ba2020-02-23 16:42:51 -07002168 kfree(kbuf);
2169 return cflags;
2170}
2171
Pavel Begunkov8ff069b2020-07-16 23:28:04 +03002172static inline unsigned int io_put_rw_kbuf(struct io_kiocb *req)
2173{
2174 struct io_buffer *kbuf;
2175
2176 kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
2177 return io_put_kbuf(req, kbuf);
2178}
2179
Jens Axboe4c6e2772020-07-01 11:29:10 -06002180static inline bool io_run_task_work(void)
2181{
Jens Axboe6200b0a2020-09-13 14:38:30 -06002182 /*
2183 * Not safe to run on exiting task, and the task_work handling will
2184 * not add work to such a task.
2185 */
2186 if (unlikely(current->flags & PF_EXITING))
2187 return false;
Jens Axboe4c6e2772020-07-01 11:29:10 -06002188 if (current->task_works) {
2189 __set_current_state(TASK_RUNNING);
2190 task_work_run();
2191 return true;
2192 }
2193
2194 return false;
2195}
2196
Jens Axboedef596e2019-01-09 08:59:42 -07002197/*
2198 * Find and free completed poll iocbs
2199 */
2200static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
2201 struct list_head *done)
2202{
Jens Axboe8237e042019-12-28 10:48:22 -07002203 struct req_batch rb;
Jens Axboedef596e2019-01-09 08:59:42 -07002204 struct io_kiocb *req;
Xiaoguang Wangbbde0172020-06-16 02:06:38 +08002205
2206 /* order with ->result store in io_complete_rw_iopoll() */
2207 smp_rmb();
Jens Axboedef596e2019-01-09 08:59:42 -07002208
Pavel Begunkov5af1d132020-07-18 11:32:52 +03002209 io_init_req_batch(&rb);
Jens Axboedef596e2019-01-09 08:59:42 -07002210 while (!list_empty(done)) {
Jens Axboebcda7ba2020-02-23 16:42:51 -07002211 int cflags = 0;
2212
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002213 req = list_first_entry(done, struct io_kiocb, inflight_entry);
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002214 list_del(&req->inflight_entry);
Jens Axboedef596e2019-01-09 08:59:42 -07002215
Pavel Begunkovf1613402021-02-11 18:28:21 +00002216 if (READ_ONCE(req->result) == -EAGAIN) {
2217 req->iopoll_completed = 0;
Pavel Begunkov23faba32021-02-11 18:28:22 +00002218 if (io_rw_reissue(req))
Pavel Begunkovf1613402021-02-11 18:28:21 +00002219 continue;
2220 }
2221
Jens Axboebcda7ba2020-02-23 16:42:51 -07002222 if (req->flags & REQ_F_BUFFER_SELECTED)
Pavel Begunkov8ff069b2020-07-16 23:28:04 +03002223 cflags = io_put_rw_kbuf(req);
Jens Axboebcda7ba2020-02-23 16:42:51 -07002224
2225 __io_cqring_fill_event(req, req->result, cflags);
Jens Axboedef596e2019-01-09 08:59:42 -07002226 (*nr_events)++;
2227
Pavel Begunkovc3524382020-06-28 12:52:32 +03002228 if (refcount_dec_and_test(&req->refs))
Pavel Begunkov6ff119a2021-02-10 00:03:18 +00002229 io_req_free_batch(&rb, req, &ctx->submit_state);
Jens Axboedef596e2019-01-09 08:59:42 -07002230 }
Jens Axboedef596e2019-01-09 08:59:42 -07002231
Jens Axboe09bb8392019-03-13 12:39:28 -06002232 io_commit_cqring(ctx);
Pavel Begunkov80c18e42021-01-07 03:15:41 +00002233 io_cqring_ev_posted_iopoll(ctx);
Pavel Begunkov2d6500d2020-06-28 12:52:33 +03002234 io_req_free_batch_finish(ctx, &rb);
Bijan Mottahedeh581f9812020-04-03 13:51:33 -07002235}
2236
Jens Axboedef596e2019-01-09 08:59:42 -07002237static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
2238 long min)
2239{
2240 struct io_kiocb *req, *tmp;
2241 LIST_HEAD(done);
2242 bool spin;
2243 int ret;
2244
2245 /*
2246 * Only spin for completions if we don't have multiple devices hanging
2247 * off our complete list, and we're under the requested amount.
2248 */
2249 spin = !ctx->poll_multi_file && *nr_events < min;
2250
2251 ret = 0;
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002252 list_for_each_entry_safe(req, tmp, &ctx->iopoll_list, inflight_entry) {
Jens Axboe9adbd452019-12-20 08:45:55 -07002253 struct kiocb *kiocb = &req->rw.kiocb;
Jens Axboedef596e2019-01-09 08:59:42 -07002254
2255 /*
Bijan Mottahedeh581f9812020-04-03 13:51:33 -07002256 * Move completed and retryable entries to our local lists.
2257 * If we find a request that requires polling, break out
2258 * and complete those lists first, if we have entries there.
Jens Axboedef596e2019-01-09 08:59:42 -07002259 */
Xiaoguang Wang65a65432020-06-11 23:39:36 +08002260 if (READ_ONCE(req->iopoll_completed)) {
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002261 list_move_tail(&req->inflight_entry, &done);
Jens Axboedef596e2019-01-09 08:59:42 -07002262 continue;
2263 }
2264 if (!list_empty(&done))
2265 break;
2266
2267 ret = kiocb->ki_filp->f_op->iopoll(kiocb, spin);
2268 if (ret < 0)
2269 break;
2270
Pavel Begunkov3aadc232020-07-06 17:59:29 +03002271 /* iopoll may have completed current req */
2272 if (READ_ONCE(req->iopoll_completed))
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002273 list_move_tail(&req->inflight_entry, &done);
Pavel Begunkov3aadc232020-07-06 17:59:29 +03002274
Jens Axboedef596e2019-01-09 08:59:42 -07002275 if (ret && spin)
2276 spin = false;
2277 ret = 0;
2278 }
2279
2280 if (!list_empty(&done))
2281 io_iopoll_complete(ctx, nr_events, &done);
2282
2283 return ret;
2284}
2285
2286/*
Brian Gianforcarod195a662019-12-13 03:09:50 -08002287 * Poll for a minimum of 'min' events. Note that if min == 0 we consider that a
Jens Axboedef596e2019-01-09 08:59:42 -07002288 * non-spinning poll check - we'll still enter the driver poll loop, but only
2289 * as a non-spinning completion check.
2290 */
2291static int io_iopoll_getevents(struct io_ring_ctx *ctx, unsigned int *nr_events,
2292 long min)
2293{
Pavel Begunkov540e32a2020-07-13 23:37:09 +03002294 while (!list_empty(&ctx->iopoll_list) && !need_resched()) {
Jens Axboedef596e2019-01-09 08:59:42 -07002295 int ret;
2296
2297 ret = io_do_iopoll(ctx, nr_events, min);
2298 if (ret < 0)
2299 return ret;
Pavel Begunkoveba0a4d2020-07-06 17:59:30 +03002300 if (*nr_events >= min)
Jens Axboedef596e2019-01-09 08:59:42 -07002301 return 0;
2302 }
2303
2304 return 1;
2305}
2306
2307/*
2308 * We can't just wait for polled events to come to us, we have to actively
2309 * find and complete them.
2310 */
Pavel Begunkovb2edc0a2020-07-07 16:36:22 +03002311static void io_iopoll_try_reap_events(struct io_ring_ctx *ctx)
Jens Axboedef596e2019-01-09 08:59:42 -07002312{
2313 if (!(ctx->flags & IORING_SETUP_IOPOLL))
2314 return;
2315
2316 mutex_lock(&ctx->uring_lock);
Pavel Begunkov540e32a2020-07-13 23:37:09 +03002317 while (!list_empty(&ctx->iopoll_list)) {
Jens Axboedef596e2019-01-09 08:59:42 -07002318 unsigned int nr_events = 0;
2319
Pavel Begunkovb2edc0a2020-07-07 16:36:22 +03002320 io_do_iopoll(ctx, &nr_events, 0);
Jens Axboe08f54392019-08-21 22:19:11 -06002321
Pavel Begunkovb2edc0a2020-07-07 16:36:22 +03002322 /* let it sleep and repeat later if can't complete a request */
2323 if (nr_events == 0)
2324 break;
Jens Axboe08f54392019-08-21 22:19:11 -06002325 /*
2326 * Ensure we allow local-to-the-cpu processing to take place,
2327 * in this case we need to ensure that we reap all events.
Pavel Begunkov3fcee5a2020-07-06 17:59:31 +03002328 * Also let task_work, etc. to progress by releasing the mutex
Jens Axboe08f54392019-08-21 22:19:11 -06002329 */
Pavel Begunkov3fcee5a2020-07-06 17:59:31 +03002330 if (need_resched()) {
2331 mutex_unlock(&ctx->uring_lock);
2332 cond_resched();
2333 mutex_lock(&ctx->uring_lock);
2334 }
Jens Axboedef596e2019-01-09 08:59:42 -07002335 }
2336 mutex_unlock(&ctx->uring_lock);
2337}
2338
Pavel Begunkov7668b922020-07-07 16:36:21 +03002339static int io_iopoll_check(struct io_ring_ctx *ctx, long min)
Jens Axboedef596e2019-01-09 08:59:42 -07002340{
Pavel Begunkov7668b922020-07-07 16:36:21 +03002341 unsigned int nr_events = 0;
Jens Axboe2b2ed972019-10-25 10:06:15 -06002342 int iters = 0, ret = 0;
Jens Axboedef596e2019-01-09 08:59:42 -07002343
Xiaoguang Wangc7849be2020-02-22 14:46:05 +08002344 /*
2345 * We disallow the app entering submit/complete with polling, but we
2346 * still need to lock the ring to prevent racing with polled issue
2347 * that got punted to a workqueue.
2348 */
2349 mutex_lock(&ctx->uring_lock);
Jens Axboedef596e2019-01-09 08:59:42 -07002350 do {
Jens Axboe500f9fb2019-08-19 12:15:59 -06002351 /*
Jens Axboea3a0e432019-08-20 11:03:11 -06002352 * Don't enter poll loop if we already have events pending.
2353 * If we do, we can potentially be spinning for commands that
2354 * already triggered a CQE (eg in error).
2355 */
Pavel Begunkov6c503152021-01-04 20:36:36 +00002356 if (test_bit(0, &ctx->cq_check_overflow))
2357 __io_cqring_overflow_flush(ctx, false, NULL, NULL);
2358 if (io_cqring_events(ctx))
Jens Axboea3a0e432019-08-20 11:03:11 -06002359 break;
2360
2361 /*
Jens Axboe500f9fb2019-08-19 12:15:59 -06002362 * If a submit got punted to a workqueue, we can have the
2363 * application entering polling for a command before it gets
2364 * issued. That app will hold the uring_lock for the duration
2365 * of the poll right here, so we need to take a breather every
2366 * now and then to ensure that the issue has a chance to add
2367 * the poll to the issued list. Otherwise we can spin here
2368 * forever, while the workqueue is stuck trying to acquire the
2369 * very same mutex.
2370 */
2371 if (!(++iters & 7)) {
2372 mutex_unlock(&ctx->uring_lock);
Jens Axboe4c6e2772020-07-01 11:29:10 -06002373 io_run_task_work();
Jens Axboe500f9fb2019-08-19 12:15:59 -06002374 mutex_lock(&ctx->uring_lock);
2375 }
2376
Pavel Begunkov7668b922020-07-07 16:36:21 +03002377 ret = io_iopoll_getevents(ctx, &nr_events, min);
Jens Axboedef596e2019-01-09 08:59:42 -07002378 if (ret <= 0)
2379 break;
2380 ret = 0;
Pavel Begunkov7668b922020-07-07 16:36:21 +03002381 } while (min && !nr_events && !need_resched());
Jens Axboedef596e2019-01-09 08:59:42 -07002382
Jens Axboe500f9fb2019-08-19 12:15:59 -06002383 mutex_unlock(&ctx->uring_lock);
Jens Axboedef596e2019-01-09 08:59:42 -07002384 return ret;
2385}
2386
Jens Axboe491381ce2019-10-17 09:20:46 -06002387static void kiocb_end_write(struct io_kiocb *req)
Jens Axboe2b188cc2019-01-07 10:46:33 -07002388{
Jens Axboe491381ce2019-10-17 09:20:46 -06002389 /*
2390 * Tell lockdep we inherited freeze protection from submission
2391 * thread.
2392 */
2393 if (req->flags & REQ_F_ISREG) {
2394 struct inode *inode = file_inode(req->file);
Jens Axboe2b188cc2019-01-07 10:46:33 -07002395
Jens Axboe491381ce2019-10-17 09:20:46 -06002396 __sb_writers_acquired(inode->i_sb, SB_FREEZE_WRITE);
Jens Axboe2b188cc2019-01-07 10:46:33 -07002397 }
Jens Axboe491381ce2019-10-17 09:20:46 -06002398 file_end_write(req->file);
Jens Axboe2b188cc2019-01-07 10:46:33 -07002399}
2400
Jens Axboeb63534c2020-06-04 11:28:00 -06002401#ifdef CONFIG_BLOCK
Pavel Begunkovdc2a6e92021-01-19 13:32:35 +00002402static bool io_resubmit_prep(struct io_kiocb *req)
Jens Axboeb63534c2020-06-04 11:28:00 -06002403{
2404 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
Colin Ian King4a245472021-02-10 20:00:07 +00002405 int rw, ret;
Jens Axboeb63534c2020-06-04 11:28:00 -06002406 struct iov_iter iter;
Jens Axboeb63534c2020-06-04 11:28:00 -06002407
Pavel Begunkovdc2a6e92021-01-19 13:32:35 +00002408 /* already prepared */
2409 if (req->async_data)
2410 return true;
Jens Axboeb63534c2020-06-04 11:28:00 -06002411
2412 switch (req->opcode) {
2413 case IORING_OP_READV:
2414 case IORING_OP_READ_FIXED:
2415 case IORING_OP_READ:
2416 rw = READ;
2417 break;
2418 case IORING_OP_WRITEV:
2419 case IORING_OP_WRITE_FIXED:
2420 case IORING_OP_WRITE:
2421 rw = WRITE;
2422 break;
2423 default:
2424 printk_once(KERN_WARNING "io_uring: bad opcode in resubmit %d\n",
2425 req->opcode);
Pavel Begunkovdc2a6e92021-01-19 13:32:35 +00002426 return false;
Jens Axboeb63534c2020-06-04 11:28:00 -06002427 }
2428
Pavel Begunkovdc2a6e92021-01-19 13:32:35 +00002429 ret = io_import_iovec(rw, req, &iovec, &iter, false);
2430 if (ret < 0)
2431 return false;
Pavel Begunkov6bf985d2021-02-04 13:52:01 +00002432 return !io_setup_async_rw(req, iovec, inline_vecs, &iter, false);
Jens Axboeb63534c2020-06-04 11:28:00 -06002433}
Jens Axboeb63534c2020-06-04 11:28:00 -06002434
Jens Axboe3e6a0d32021-03-01 13:56:00 -07002435static bool io_rw_should_reissue(struct io_kiocb *req)
Jens Axboeb63534c2020-06-04 11:28:00 -06002436{
Jens Axboe355afae2020-09-02 09:30:31 -06002437 umode_t mode = file_inode(req->file)->i_mode;
Jens Axboe3e6a0d32021-03-01 13:56:00 -07002438 struct io_ring_ctx *ctx = req->ctx;
Jens Axboeb63534c2020-06-04 11:28:00 -06002439
Jens Axboe355afae2020-09-02 09:30:31 -06002440 if (!S_ISBLK(mode) && !S_ISREG(mode))
2441 return false;
Jens Axboe3e6a0d32021-03-01 13:56:00 -07002442 if ((req->flags & REQ_F_NOWAIT) || (io_wq_current_is_worker() &&
2443 !(ctx->flags & IORING_SETUP_IOPOLL)))
Jens Axboeb63534c2020-06-04 11:28:00 -06002444 return false;
Jens Axboe7c977a52021-02-23 19:17:35 -07002445 /*
2446 * If ref is dying, we might be running poll reap from the exit work.
2447 * Don't attempt to reissue from that path, just let it fail with
2448 * -EAGAIN.
2449 */
Jens Axboe3e6a0d32021-03-01 13:56:00 -07002450 if (percpu_ref_is_dying(&ctx->refs))
2451 return false;
2452 return true;
2453}
2454#endif
2455
2456static bool io_rw_reissue(struct io_kiocb *req)
2457{
2458#ifdef CONFIG_BLOCK
2459 if (!io_rw_should_reissue(req))
Jens Axboe7c977a52021-02-23 19:17:35 -07002460 return false;
Jens Axboeb63534c2020-06-04 11:28:00 -06002461
Pavel Begunkov55e6ac12021-01-08 20:57:22 +00002462 lockdep_assert_held(&req->ctx->uring_lock);
2463
Jens Axboe37d1e2e2021-02-17 21:03:43 -07002464 if (io_resubmit_prep(req)) {
Jens Axboefdee9462020-08-27 16:46:24 -06002465 refcount_inc(&req->refs);
2466 io_queue_async_work(req);
Jens Axboeb63534c2020-06-04 11:28:00 -06002467 return true;
Jens Axboefdee9462020-08-27 16:46:24 -06002468 }
Pavel Begunkovdc2a6e92021-01-19 13:32:35 +00002469 req_set_fail_links(req);
Jens Axboeb63534c2020-06-04 11:28:00 -06002470#endif
2471 return false;
2472}
2473
Jens Axboea1d7c392020-06-22 11:09:46 -06002474static void __io_complete_rw(struct io_kiocb *req, long res, long res2,
Pavel Begunkov889fca72021-02-10 00:03:09 +00002475 unsigned int issue_flags)
Jens Axboea1d7c392020-06-22 11:09:46 -06002476{
Pavel Begunkov2f8e45f2021-02-11 18:28:23 +00002477 int cflags = 0;
2478
Pavel Begunkov23faba32021-02-11 18:28:22 +00002479 if ((res == -EAGAIN || res == -EOPNOTSUPP) && io_rw_reissue(req))
2480 return;
Pavel Begunkov2f8e45f2021-02-11 18:28:23 +00002481 if (res != req->result)
2482 req_set_fail_links(req);
Pavel Begunkov23faba32021-02-11 18:28:22 +00002483
Pavel Begunkov2f8e45f2021-02-11 18:28:23 +00002484 if (req->rw.kiocb.ki_flags & IOCB_WRITE)
2485 kiocb_end_write(req);
2486 if (req->flags & REQ_F_BUFFER_SELECTED)
2487 cflags = io_put_rw_kbuf(req);
2488 __io_req_complete(req, issue_flags, res, cflags);
Jens Axboeba816ad2019-09-28 11:36:45 -06002489}
2490
2491static void io_complete_rw(struct kiocb *kiocb, long res, long res2)
2492{
Jens Axboe9adbd452019-12-20 08:45:55 -07002493 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
Jens Axboeba816ad2019-09-28 11:36:45 -06002494
Pavel Begunkov889fca72021-02-10 00:03:09 +00002495 __io_complete_rw(req, res, res2, 0);
Jens Axboe2b188cc2019-01-07 10:46:33 -07002496}
2497
Jens Axboedef596e2019-01-09 08:59:42 -07002498static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2)
2499{
Jens Axboe9adbd452019-12-20 08:45:55 -07002500 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
Jens Axboedef596e2019-01-09 08:59:42 -07002501
Jens Axboe3e6a0d32021-03-01 13:56:00 -07002502#ifdef CONFIG_BLOCK
2503 /* Rewind iter, if we have one. iopoll path resubmits as usual */
2504 if (res == -EAGAIN && io_rw_should_reissue(req)) {
2505 struct io_async_rw *rw = req->async_data;
2506
2507 if (rw)
2508 iov_iter_revert(&rw->iter,
2509 req->result - iov_iter_count(&rw->iter));
2510 else if (!io_resubmit_prep(req))
2511 res = -EIO;
2512 }
2513#endif
2514
Jens Axboe491381ce2019-10-17 09:20:46 -06002515 if (kiocb->ki_flags & IOCB_WRITE)
2516 kiocb_end_write(req);
Jens Axboedef596e2019-01-09 08:59:42 -07002517
Xiaoguang Wang2d7d6792020-06-16 02:06:37 +08002518 if (res != -EAGAIN && res != req->result)
Jens Axboe4e88d6e2019-12-07 20:59:47 -07002519 req_set_fail_links(req);
Xiaoguang Wangbbde0172020-06-16 02:06:38 +08002520
2521 WRITE_ONCE(req->result, res);
2522 /* order with io_poll_complete() checking ->result */
Pavel Begunkovcd664b02020-06-25 12:37:10 +03002523 smp_wmb();
2524 WRITE_ONCE(req->iopoll_completed, 1);
Jens Axboedef596e2019-01-09 08:59:42 -07002525}
2526
2527/*
2528 * After the iocb has been issued, it's safe to be found on the poll list.
2529 * Adding the kiocb to the list AFTER submission ensures that we don't
2530 * find it from a io_iopoll_getevents() thread before the issuer is done
2531 * accessing the kiocb cookie.
2532 */
Xiaoguang Wang2e9dbe92020-11-13 00:44:08 +08002533static void io_iopoll_req_issued(struct io_kiocb *req, bool in_async)
Jens Axboedef596e2019-01-09 08:59:42 -07002534{
2535 struct io_ring_ctx *ctx = req->ctx;
2536
2537 /*
2538 * Track whether we have multiple files in our lists. This will impact
2539 * how we do polling eventually, not spinning if we're on potentially
2540 * different devices.
2541 */
Pavel Begunkov540e32a2020-07-13 23:37:09 +03002542 if (list_empty(&ctx->iopoll_list)) {
Jens Axboedef596e2019-01-09 08:59:42 -07002543 ctx->poll_multi_file = false;
2544 } else if (!ctx->poll_multi_file) {
2545 struct io_kiocb *list_req;
2546
Pavel Begunkov540e32a2020-07-13 23:37:09 +03002547 list_req = list_first_entry(&ctx->iopoll_list, struct io_kiocb,
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002548 inflight_entry);
Jens Axboe9adbd452019-12-20 08:45:55 -07002549 if (list_req->file != req->file)
Jens Axboedef596e2019-01-09 08:59:42 -07002550 ctx->poll_multi_file = true;
2551 }
2552
2553 /*
2554 * For fast devices, IO may have already completed. If it has, add
2555 * it to the front so we find it first.
2556 */
Xiaoguang Wang65a65432020-06-11 23:39:36 +08002557 if (READ_ONCE(req->iopoll_completed))
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002558 list_add(&req->inflight_entry, &ctx->iopoll_list);
Jens Axboedef596e2019-01-09 08:59:42 -07002559 else
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002560 list_add_tail(&req->inflight_entry, &ctx->iopoll_list);
Xiaoguang Wangbdcd3ea2020-02-25 22:12:08 +08002561
Xiaoguang Wang2e9dbe92020-11-13 00:44:08 +08002562 /*
2563 * If IORING_SETUP_SQPOLL is enabled, sqes are either handled in sq thread
2564 * task context or in io worker task context. If current task context is
2565 * sq thread, we don't need to check whether should wake up sq thread.
2566 */
2567 if (in_async && (ctx->flags & IORING_SETUP_SQPOLL) &&
Jens Axboe534ca6d2020-09-02 13:52:19 -06002568 wq_has_sleeper(&ctx->sq_data->wait))
2569 wake_up(&ctx->sq_data->wait);
Jens Axboedef596e2019-01-09 08:59:42 -07002570}
2571
Pavel Begunkov9f13c352020-05-17 14:13:41 +03002572static inline void io_state_file_put(struct io_submit_state *state)
2573{
Pavel Begunkov02b23a92021-01-19 13:32:41 +00002574 if (state->file_refs) {
2575 fput_many(state->file, state->file_refs);
2576 state->file_refs = 0;
2577 }
Jens Axboe9a56a232019-01-09 09:06:50 -07002578}
2579
2580/*
2581 * Get as many references to a file as we have IOs left in this submission,
2582 * assuming most submissions are for one file, or at least that each file
2583 * has more than one submission.
2584 */
Pavel Begunkov8da11c12020-02-24 11:32:44 +03002585static struct file *__io_file_get(struct io_submit_state *state, int fd)
Jens Axboe9a56a232019-01-09 09:06:50 -07002586{
2587 if (!state)
2588 return fget(fd);
2589
Pavel Begunkov6e1271e2020-11-20 15:50:50 +00002590 if (state->file_refs) {
Jens Axboe9a56a232019-01-09 09:06:50 -07002591 if (state->fd == fd) {
Pavel Begunkov6e1271e2020-11-20 15:50:50 +00002592 state->file_refs--;
Jens Axboe9a56a232019-01-09 09:06:50 -07002593 return state->file;
2594 }
Pavel Begunkov02b23a92021-01-19 13:32:41 +00002595 io_state_file_put(state);
Jens Axboe9a56a232019-01-09 09:06:50 -07002596 }
2597 state->file = fget_many(fd, state->ios_left);
Pavel Begunkov6e1271e2020-11-20 15:50:50 +00002598 if (unlikely(!state->file))
Jens Axboe9a56a232019-01-09 09:06:50 -07002599 return NULL;
2600
2601 state->fd = fd;
Pavel Begunkov6e1271e2020-11-20 15:50:50 +00002602 state->file_refs = state->ios_left - 1;
Jens Axboe9a56a232019-01-09 09:06:50 -07002603 return state->file;
2604}
2605
Jens Axboe4503b762020-06-01 10:00:27 -06002606static bool io_bdev_nowait(struct block_device *bdev)
2607{
Jeffle Xu9ba0d0c2020-10-19 16:59:42 +08002608 return !bdev || blk_queue_nowait(bdev_get_queue(bdev));
Jens Axboe4503b762020-06-01 10:00:27 -06002609}
2610
Jens Axboe2b188cc2019-01-07 10:46:33 -07002611/*
2612 * If we tracked the file through the SCM inflight mechanism, we could support
2613 * any file. For now, just ensure that anything potentially problematic is done
2614 * inline.
2615 */
Jens Axboeaf197f52020-04-28 13:15:06 -06002616static bool io_file_supports_async(struct file *file, int rw)
Jens Axboe2b188cc2019-01-07 10:46:33 -07002617{
2618 umode_t mode = file_inode(file)->i_mode;
2619
Jens Axboe4503b762020-06-01 10:00:27 -06002620 if (S_ISBLK(mode)) {
Christoph Hellwig4e7b5672020-11-23 13:38:40 +01002621 if (IS_ENABLED(CONFIG_BLOCK) &&
2622 io_bdev_nowait(I_BDEV(file->f_mapping->host)))
Jens Axboe4503b762020-06-01 10:00:27 -06002623 return true;
2624 return false;
2625 }
2626 if (S_ISCHR(mode) || S_ISSOCK(mode))
Jens Axboe2b188cc2019-01-07 10:46:33 -07002627 return true;
Jens Axboe4503b762020-06-01 10:00:27 -06002628 if (S_ISREG(mode)) {
Christoph Hellwig4e7b5672020-11-23 13:38:40 +01002629 if (IS_ENABLED(CONFIG_BLOCK) &&
2630 io_bdev_nowait(file->f_inode->i_sb->s_bdev) &&
Jens Axboe4503b762020-06-01 10:00:27 -06002631 file->f_op != &io_uring_fops)
2632 return true;
2633 return false;
2634 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07002635
Jens Axboec5b85622020-06-09 19:23:05 -06002636 /* any ->read/write should understand O_NONBLOCK */
2637 if (file->f_flags & O_NONBLOCK)
2638 return true;
2639
Jens Axboeaf197f52020-04-28 13:15:06 -06002640 if (!(file->f_mode & FMODE_NOWAIT))
2641 return false;
2642
2643 if (rw == READ)
2644 return file->f_op->read_iter != NULL;
2645
2646 return file->f_op->write_iter != NULL;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002647}
2648
Pavel Begunkova88fc402020-09-30 22:57:53 +03002649static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboe2b188cc2019-01-07 10:46:33 -07002650{
Jens Axboedef596e2019-01-09 08:59:42 -07002651 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe9adbd452019-12-20 08:45:55 -07002652 struct kiocb *kiocb = &req->rw.kiocb;
Pavel Begunkov75c668c2021-02-04 13:52:05 +00002653 struct file *file = req->file;
Jens Axboe09bb8392019-03-13 12:39:28 -06002654 unsigned ioprio;
2655 int ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002656
Pavel Begunkov75c668c2021-02-04 13:52:05 +00002657 if (S_ISREG(file_inode(file)->i_mode))
Jens Axboe491381ce2019-10-17 09:20:46 -06002658 req->flags |= REQ_F_ISREG;
2659
Jens Axboe2b188cc2019-01-07 10:46:33 -07002660 kiocb->ki_pos = READ_ONCE(sqe->off);
Pavel Begunkov75c668c2021-02-04 13:52:05 +00002661 if (kiocb->ki_pos == -1 && !(file->f_mode & FMODE_STREAM)) {
Jens Axboeba042912019-12-25 16:33:42 -07002662 req->flags |= REQ_F_CUR_POS;
Pavel Begunkov75c668c2021-02-04 13:52:05 +00002663 kiocb->ki_pos = file->f_pos;
Jens Axboeba042912019-12-25 16:33:42 -07002664 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07002665 kiocb->ki_hint = ki_hint_validate(file_write_hint(kiocb->ki_filp));
Pavel Begunkov3e577dc2020-02-01 03:58:42 +03002666 kiocb->ki_flags = iocb_flags(kiocb->ki_filp);
2667 ret = kiocb_set_rw_flags(kiocb, READ_ONCE(sqe->rw_flags));
2668 if (unlikely(ret))
2669 return ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002670
Pavel Begunkov75c668c2021-02-04 13:52:05 +00002671 /* don't allow async punt for O_NONBLOCK or RWF_NOWAIT */
2672 if ((kiocb->ki_flags & IOCB_NOWAIT) || (file->f_flags & O_NONBLOCK))
2673 req->flags |= REQ_F_NOWAIT;
2674
Jens Axboe2b188cc2019-01-07 10:46:33 -07002675 ioprio = READ_ONCE(sqe->ioprio);
2676 if (ioprio) {
2677 ret = ioprio_check_cap(ioprio);
2678 if (ret)
Jens Axboe09bb8392019-03-13 12:39:28 -06002679 return ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002680
2681 kiocb->ki_ioprio = ioprio;
2682 } else
2683 kiocb->ki_ioprio = get_current_ioprio();
2684
Jens Axboedef596e2019-01-09 08:59:42 -07002685 if (ctx->flags & IORING_SETUP_IOPOLL) {
Jens Axboedef596e2019-01-09 08:59:42 -07002686 if (!(kiocb->ki_flags & IOCB_DIRECT) ||
2687 !kiocb->ki_filp->f_op->iopoll)
Jens Axboe09bb8392019-03-13 12:39:28 -06002688 return -EOPNOTSUPP;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002689
Jens Axboedef596e2019-01-09 08:59:42 -07002690 kiocb->ki_flags |= IOCB_HIPRI;
2691 kiocb->ki_complete = io_complete_rw_iopoll;
Xiaoguang Wang65a65432020-06-11 23:39:36 +08002692 req->iopoll_completed = 0;
Jens Axboedef596e2019-01-09 08:59:42 -07002693 } else {
Jens Axboe09bb8392019-03-13 12:39:28 -06002694 if (kiocb->ki_flags & IOCB_HIPRI)
2695 return -EINVAL;
Jens Axboedef596e2019-01-09 08:59:42 -07002696 kiocb->ki_complete = io_complete_rw;
2697 }
Jens Axboe9adbd452019-12-20 08:45:55 -07002698
Jens Axboe3529d8c2019-12-19 18:24:38 -07002699 req->rw.addr = READ_ONCE(sqe->addr);
2700 req->rw.len = READ_ONCE(sqe->len);
Bijan Mottahedeh4f4eeba2020-05-19 14:52:49 -07002701 req->buf_index = READ_ONCE(sqe->buf_index);
Jens Axboe2b188cc2019-01-07 10:46:33 -07002702 return 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002703}
2704
2705static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
2706{
2707 switch (ret) {
2708 case -EIOCBQUEUED:
2709 break;
2710 case -ERESTARTSYS:
2711 case -ERESTARTNOINTR:
2712 case -ERESTARTNOHAND:
2713 case -ERESTART_RESTARTBLOCK:
2714 /*
2715 * We can't just restart the syscall, since previously
2716 * submitted sqes may already be in progress. Just fail this
2717 * IO with EINTR.
2718 */
2719 ret = -EINTR;
Gustavo A. R. Silvadf561f662020-08-23 17:36:59 -05002720 fallthrough;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002721 default:
2722 kiocb->ki_complete(kiocb, ret, 0);
2723 }
2724}
2725
Jens Axboea1d7c392020-06-22 11:09:46 -06002726static void kiocb_done(struct kiocb *kiocb, ssize_t ret,
Pavel Begunkov889fca72021-02-10 00:03:09 +00002727 unsigned int issue_flags)
Jens Axboeba816ad2019-09-28 11:36:45 -06002728{
Jens Axboeba042912019-12-25 16:33:42 -07002729 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
Jens Axboee8c2bc12020-08-15 18:44:09 -07002730 struct io_async_rw *io = req->async_data;
Jens Axboeba042912019-12-25 16:33:42 -07002731
Jens Axboe227c0c92020-08-13 11:51:40 -06002732 /* add previously done IO, if any */
Jens Axboee8c2bc12020-08-15 18:44:09 -07002733 if (io && io->bytes_done > 0) {
Jens Axboe227c0c92020-08-13 11:51:40 -06002734 if (ret < 0)
Jens Axboee8c2bc12020-08-15 18:44:09 -07002735 ret = io->bytes_done;
Jens Axboe227c0c92020-08-13 11:51:40 -06002736 else
Jens Axboee8c2bc12020-08-15 18:44:09 -07002737 ret += io->bytes_done;
Jens Axboe227c0c92020-08-13 11:51:40 -06002738 }
2739
Jens Axboeba042912019-12-25 16:33:42 -07002740 if (req->flags & REQ_F_CUR_POS)
2741 req->file->f_pos = kiocb->ki_pos;
Pavel Begunkovbcaec082020-02-24 11:30:18 +03002742 if (ret >= 0 && kiocb->ki_complete == io_complete_rw)
Pavel Begunkov889fca72021-02-10 00:03:09 +00002743 __io_complete_rw(req, ret, 0, issue_flags);
Jens Axboeba816ad2019-09-28 11:36:45 -06002744 else
2745 io_rw_done(kiocb, ret);
2746}
2747
Pavel Begunkov847595d2021-02-04 13:52:06 +00002748static int io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter)
Jens Axboeedafcce2019-01-09 09:16:05 -07002749{
Jens Axboe9adbd452019-12-20 08:45:55 -07002750 struct io_ring_ctx *ctx = req->ctx;
2751 size_t len = req->rw.len;
Jens Axboeedafcce2019-01-09 09:16:05 -07002752 struct io_mapped_ubuf *imu;
Pavel Begunkov4be1c612020-09-06 00:45:48 +03002753 u16 index, buf_index = req->buf_index;
Jens Axboeedafcce2019-01-09 09:16:05 -07002754 size_t offset;
2755 u64 buf_addr;
2756
Jens Axboeedafcce2019-01-09 09:16:05 -07002757 if (unlikely(buf_index >= ctx->nr_user_bufs))
2758 return -EFAULT;
Jens Axboeedafcce2019-01-09 09:16:05 -07002759 index = array_index_nospec(buf_index, ctx->nr_user_bufs);
2760 imu = &ctx->user_bufs[index];
Jens Axboe9adbd452019-12-20 08:45:55 -07002761 buf_addr = req->rw.addr;
Jens Axboeedafcce2019-01-09 09:16:05 -07002762
2763 /* overflow */
2764 if (buf_addr + len < buf_addr)
2765 return -EFAULT;
2766 /* not inside the mapped region */
2767 if (buf_addr < imu->ubuf || buf_addr + len > imu->ubuf + imu->len)
2768 return -EFAULT;
2769
2770 /*
2771 * May not be a start of buffer, set size appropriately
2772 * and advance us to the beginning.
2773 */
2774 offset = buf_addr - imu->ubuf;
2775 iov_iter_bvec(iter, rw, imu->bvec, imu->nr_bvecs, offset + len);
Jens Axboebd11b3a2019-07-20 08:37:31 -06002776
2777 if (offset) {
2778 /*
2779 * Don't use iov_iter_advance() here, as it's really slow for
2780 * using the latter parts of a big fixed buffer - it iterates
2781 * over each segment manually. We can cheat a bit here, because
2782 * we know that:
2783 *
2784 * 1) it's a BVEC iter, we set it up
2785 * 2) all bvecs are PAGE_SIZE in size, except potentially the
2786 * first and last bvec
2787 *
2788 * So just find our index, and adjust the iterator afterwards.
2789 * If the offset is within the first bvec (or the whole first
2790 * bvec, just use iov_iter_advance(). This makes it easier
2791 * since we can just skip the first segment, which may not
2792 * be PAGE_SIZE aligned.
2793 */
2794 const struct bio_vec *bvec = imu->bvec;
2795
2796 if (offset <= bvec->bv_len) {
2797 iov_iter_advance(iter, offset);
2798 } else {
2799 unsigned long seg_skip;
2800
2801 /* skip first vec */
2802 offset -= bvec->bv_len;
2803 seg_skip = 1 + (offset >> PAGE_SHIFT);
2804
2805 iter->bvec = bvec + seg_skip;
2806 iter->nr_segs -= seg_skip;
Aleix Roca Nonell99c79f62019-08-15 14:03:22 +02002807 iter->count -= bvec->bv_len + offset;
Jens Axboebd11b3a2019-07-20 08:37:31 -06002808 iter->iov_offset = offset & ~PAGE_MASK;
Jens Axboebd11b3a2019-07-20 08:37:31 -06002809 }
2810 }
2811
Pavel Begunkov847595d2021-02-04 13:52:06 +00002812 return 0;
Jens Axboeedafcce2019-01-09 09:16:05 -07002813}
2814
Jens Axboebcda7ba2020-02-23 16:42:51 -07002815static void io_ring_submit_unlock(struct io_ring_ctx *ctx, bool needs_lock)
2816{
2817 if (needs_lock)
2818 mutex_unlock(&ctx->uring_lock);
2819}
2820
2821static void io_ring_submit_lock(struct io_ring_ctx *ctx, bool needs_lock)
2822{
2823 /*
2824 * "Normal" inline submissions always hold the uring_lock, since we
2825 * grab it from the system call. Same is true for the SQPOLL offload.
2826 * The only exception is when we've detached the request and issue it
2827 * from an async worker thread, grab the lock for that case.
2828 */
2829 if (needs_lock)
2830 mutex_lock(&ctx->uring_lock);
2831}
2832
2833static struct io_buffer *io_buffer_select(struct io_kiocb *req, size_t *len,
2834 int bgid, struct io_buffer *kbuf,
2835 bool needs_lock)
2836{
2837 struct io_buffer *head;
2838
2839 if (req->flags & REQ_F_BUFFER_SELECTED)
2840 return kbuf;
2841
2842 io_ring_submit_lock(req->ctx, needs_lock);
2843
2844 lockdep_assert_held(&req->ctx->uring_lock);
2845
Jens Axboe9e15c3a2021-03-13 12:29:43 -07002846 head = xa_load(&req->ctx->io_buffers, bgid);
Jens Axboebcda7ba2020-02-23 16:42:51 -07002847 if (head) {
2848 if (!list_empty(&head->list)) {
2849 kbuf = list_last_entry(&head->list, struct io_buffer,
2850 list);
2851 list_del(&kbuf->list);
2852 } else {
2853 kbuf = head;
Jens Axboe9e15c3a2021-03-13 12:29:43 -07002854 xa_erase(&req->ctx->io_buffers, bgid);
Jens Axboebcda7ba2020-02-23 16:42:51 -07002855 }
2856 if (*len > kbuf->len)
2857 *len = kbuf->len;
2858 } else {
2859 kbuf = ERR_PTR(-ENOBUFS);
2860 }
2861
2862 io_ring_submit_unlock(req->ctx, needs_lock);
2863
2864 return kbuf;
2865}
2866
Jens Axboe4d954c22020-02-27 07:31:19 -07002867static void __user *io_rw_buffer_select(struct io_kiocb *req, size_t *len,
2868 bool needs_lock)
2869{
2870 struct io_buffer *kbuf;
Bijan Mottahedeh4f4eeba2020-05-19 14:52:49 -07002871 u16 bgid;
Jens Axboe4d954c22020-02-27 07:31:19 -07002872
2873 kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
Bijan Mottahedeh4f4eeba2020-05-19 14:52:49 -07002874 bgid = req->buf_index;
Jens Axboe4d954c22020-02-27 07:31:19 -07002875 kbuf = io_buffer_select(req, len, bgid, kbuf, needs_lock);
2876 if (IS_ERR(kbuf))
2877 return kbuf;
2878 req->rw.addr = (u64) (unsigned long) kbuf;
2879 req->flags |= REQ_F_BUFFER_SELECTED;
2880 return u64_to_user_ptr(kbuf->addr);
2881}
2882
2883#ifdef CONFIG_COMPAT
2884static ssize_t io_compat_import(struct io_kiocb *req, struct iovec *iov,
2885 bool needs_lock)
2886{
2887 struct compat_iovec __user *uiov;
2888 compat_ssize_t clen;
2889 void __user *buf;
2890 ssize_t len;
2891
2892 uiov = u64_to_user_ptr(req->rw.addr);
2893 if (!access_ok(uiov, sizeof(*uiov)))
2894 return -EFAULT;
2895 if (__get_user(clen, &uiov->iov_len))
2896 return -EFAULT;
2897 if (clen < 0)
2898 return -EINVAL;
2899
2900 len = clen;
2901 buf = io_rw_buffer_select(req, &len, needs_lock);
2902 if (IS_ERR(buf))
2903 return PTR_ERR(buf);
2904 iov[0].iov_base = buf;
2905 iov[0].iov_len = (compat_size_t) len;
2906 return 0;
2907}
2908#endif
2909
2910static ssize_t __io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
2911 bool needs_lock)
2912{
2913 struct iovec __user *uiov = u64_to_user_ptr(req->rw.addr);
2914 void __user *buf;
2915 ssize_t len;
2916
2917 if (copy_from_user(iov, uiov, sizeof(*uiov)))
2918 return -EFAULT;
2919
2920 len = iov[0].iov_len;
2921 if (len < 0)
2922 return -EINVAL;
2923 buf = io_rw_buffer_select(req, &len, needs_lock);
2924 if (IS_ERR(buf))
2925 return PTR_ERR(buf);
2926 iov[0].iov_base = buf;
2927 iov[0].iov_len = len;
2928 return 0;
2929}
2930
2931static ssize_t io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
2932 bool needs_lock)
2933{
Jens Axboedddb3e22020-06-04 11:27:01 -06002934 if (req->flags & REQ_F_BUFFER_SELECTED) {
2935 struct io_buffer *kbuf;
2936
2937 kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
2938 iov[0].iov_base = u64_to_user_ptr(kbuf->addr);
2939 iov[0].iov_len = kbuf->len;
Jens Axboe4d954c22020-02-27 07:31:19 -07002940 return 0;
Jens Axboedddb3e22020-06-04 11:27:01 -06002941 }
Pavel Begunkovdd201662020-12-19 03:15:43 +00002942 if (req->rw.len != 1)
Jens Axboe4d954c22020-02-27 07:31:19 -07002943 return -EINVAL;
2944
2945#ifdef CONFIG_COMPAT
2946 if (req->ctx->compat)
2947 return io_compat_import(req, iov, needs_lock);
2948#endif
2949
2950 return __io_iov_buffer_select(req, iov, needs_lock);
2951}
2952
Pavel Begunkov847595d2021-02-04 13:52:06 +00002953static int io_import_iovec(int rw, struct io_kiocb *req, struct iovec **iovec,
2954 struct iov_iter *iter, bool needs_lock)
Jens Axboe2b188cc2019-01-07 10:46:33 -07002955{
Jens Axboe9adbd452019-12-20 08:45:55 -07002956 void __user *buf = u64_to_user_ptr(req->rw.addr);
2957 size_t sqe_len = req->rw.len;
Pavel Begunkov847595d2021-02-04 13:52:06 +00002958 u8 opcode = req->opcode;
Jens Axboe4d954c22020-02-27 07:31:19 -07002959 ssize_t ret;
Jens Axboeedafcce2019-01-09 09:16:05 -07002960
Pavel Begunkov7d009162019-11-25 23:14:40 +03002961 if (opcode == IORING_OP_READ_FIXED || opcode == IORING_OP_WRITE_FIXED) {
Jens Axboeedafcce2019-01-09 09:16:05 -07002962 *iovec = NULL;
Jens Axboe9adbd452019-12-20 08:45:55 -07002963 return io_import_fixed(req, rw, iter);
Jens Axboeedafcce2019-01-09 09:16:05 -07002964 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07002965
Jens Axboebcda7ba2020-02-23 16:42:51 -07002966 /* buffer index only valid with fixed read/write, or buffer select */
Bijan Mottahedeh4f4eeba2020-05-19 14:52:49 -07002967 if (req->buf_index && !(req->flags & REQ_F_BUFFER_SELECT))
Jens Axboe9adbd452019-12-20 08:45:55 -07002968 return -EINVAL;
2969
Jens Axboe3a6820f2019-12-22 15:19:35 -07002970 if (opcode == IORING_OP_READ || opcode == IORING_OP_WRITE) {
Jens Axboebcda7ba2020-02-23 16:42:51 -07002971 if (req->flags & REQ_F_BUFFER_SELECT) {
Jens Axboe4d954c22020-02-27 07:31:19 -07002972 buf = io_rw_buffer_select(req, &sqe_len, needs_lock);
Pavel Begunkov867a23e2020-08-20 11:34:39 +03002973 if (IS_ERR(buf))
Jens Axboe4d954c22020-02-27 07:31:19 -07002974 return PTR_ERR(buf);
Jens Axboe3f9d6442020-03-11 12:27:04 -06002975 req->rw.len = sqe_len;
Jens Axboebcda7ba2020-02-23 16:42:51 -07002976 }
2977
Jens Axboe3a6820f2019-12-22 15:19:35 -07002978 ret = import_single_range(rw, buf, sqe_len, *iovec, iter);
2979 *iovec = NULL;
David Laight10fc72e2020-11-07 13:16:25 +00002980 return ret;
Jens Axboe3a6820f2019-12-22 15:19:35 -07002981 }
2982
Jens Axboe4d954c22020-02-27 07:31:19 -07002983 if (req->flags & REQ_F_BUFFER_SELECT) {
2984 ret = io_iov_buffer_select(req, *iovec, needs_lock);
Pavel Begunkov847595d2021-02-04 13:52:06 +00002985 if (!ret)
2986 iov_iter_init(iter, rw, *iovec, 1, (*iovec)->iov_len);
Jens Axboe4d954c22020-02-27 07:31:19 -07002987 *iovec = NULL;
2988 return ret;
2989 }
2990
Christoph Hellwig89cd35c2020-09-25 06:51:41 +02002991 return __import_iovec(rw, buf, sqe_len, UIO_FASTIOV, iovec, iter,
2992 req->ctx->compat);
Jens Axboe2b188cc2019-01-07 10:46:33 -07002993}
2994
Jens Axboe0fef9482020-08-26 10:36:20 -06002995static inline loff_t *io_kiocb_ppos(struct kiocb *kiocb)
2996{
Pavel Begunkov5b09e372020-09-30 22:57:15 +03002997 return (kiocb->ki_filp->f_mode & FMODE_STREAM) ? NULL : &kiocb->ki_pos;
Jens Axboe0fef9482020-08-26 10:36:20 -06002998}
2999
Jens Axboe32960612019-09-23 11:05:34 -06003000/*
3001 * For files that don't have ->read_iter() and ->write_iter(), handle them
3002 * by looping over ->read() or ->write() manually.
3003 */
Jens Axboe4017eb92020-10-22 14:14:12 -06003004static ssize_t loop_rw_iter(int rw, struct io_kiocb *req, struct iov_iter *iter)
Jens Axboe32960612019-09-23 11:05:34 -06003005{
Jens Axboe4017eb92020-10-22 14:14:12 -06003006 struct kiocb *kiocb = &req->rw.kiocb;
3007 struct file *file = req->file;
Jens Axboe32960612019-09-23 11:05:34 -06003008 ssize_t ret = 0;
3009
3010 /*
3011 * Don't support polled IO through this interface, and we can't
3012 * support non-blocking either. For the latter, this just causes
3013 * the kiocb to be handled from an async context.
3014 */
3015 if (kiocb->ki_flags & IOCB_HIPRI)
3016 return -EOPNOTSUPP;
3017 if (kiocb->ki_flags & IOCB_NOWAIT)
3018 return -EAGAIN;
3019
3020 while (iov_iter_count(iter)) {
Pavel Begunkov311ae9e2019-11-24 11:58:24 +03003021 struct iovec iovec;
Jens Axboe32960612019-09-23 11:05:34 -06003022 ssize_t nr;
3023
Pavel Begunkov311ae9e2019-11-24 11:58:24 +03003024 if (!iov_iter_is_bvec(iter)) {
3025 iovec = iov_iter_iovec(iter);
3026 } else {
Jens Axboe4017eb92020-10-22 14:14:12 -06003027 iovec.iov_base = u64_to_user_ptr(req->rw.addr);
3028 iovec.iov_len = req->rw.len;
Pavel Begunkov311ae9e2019-11-24 11:58:24 +03003029 }
3030
Jens Axboe32960612019-09-23 11:05:34 -06003031 if (rw == READ) {
3032 nr = file->f_op->read(file, iovec.iov_base,
Jens Axboe0fef9482020-08-26 10:36:20 -06003033 iovec.iov_len, io_kiocb_ppos(kiocb));
Jens Axboe32960612019-09-23 11:05:34 -06003034 } else {
3035 nr = file->f_op->write(file, iovec.iov_base,
Jens Axboe0fef9482020-08-26 10:36:20 -06003036 iovec.iov_len, io_kiocb_ppos(kiocb));
Jens Axboe32960612019-09-23 11:05:34 -06003037 }
3038
3039 if (nr < 0) {
3040 if (!ret)
3041 ret = nr;
3042 break;
3043 }
3044 ret += nr;
3045 if (nr != iovec.iov_len)
3046 break;
Jens Axboe4017eb92020-10-22 14:14:12 -06003047 req->rw.len -= nr;
3048 req->rw.addr += nr;
Jens Axboe32960612019-09-23 11:05:34 -06003049 iov_iter_advance(iter, nr);
3050 }
3051
3052 return ret;
3053}
3054
Jens Axboeff6165b2020-08-13 09:47:43 -06003055static void io_req_map_rw(struct io_kiocb *req, const struct iovec *iovec,
3056 const struct iovec *fast_iov, struct iov_iter *iter)
Jens Axboef67676d2019-12-02 11:03:47 -07003057{
Jens Axboee8c2bc12020-08-15 18:44:09 -07003058 struct io_async_rw *rw = req->async_data;
Pavel Begunkovb64e3442020-07-13 22:59:18 +03003059
Jens Axboeff6165b2020-08-13 09:47:43 -06003060 memcpy(&rw->iter, iter, sizeof(*iter));
Pavel Begunkovafb87652020-09-06 00:45:46 +03003061 rw->free_iovec = iovec;
Jens Axboe227c0c92020-08-13 11:51:40 -06003062 rw->bytes_done = 0;
Jens Axboeff6165b2020-08-13 09:47:43 -06003063 /* can only be fixed buffers, no need to do anything */
Pavel Begunkov9c3a2052020-11-23 23:20:27 +00003064 if (iov_iter_is_bvec(iter))
Jens Axboeff6165b2020-08-13 09:47:43 -06003065 return;
Pavel Begunkovb64e3442020-07-13 22:59:18 +03003066 if (!iovec) {
Jens Axboeff6165b2020-08-13 09:47:43 -06003067 unsigned iov_off = 0;
3068
3069 rw->iter.iov = rw->fast_iov;
3070 if (iter->iov != fast_iov) {
3071 iov_off = iter->iov - fast_iov;
3072 rw->iter.iov += iov_off;
3073 }
3074 if (rw->fast_iov != fast_iov)
3075 memcpy(rw->fast_iov + iov_off, fast_iov + iov_off,
Xiaoguang Wang45097da2020-04-08 22:29:58 +08003076 sizeof(struct iovec) * iter->nr_segs);
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03003077 } else {
3078 req->flags |= REQ_F_NEED_CLEANUP;
Jens Axboef67676d2019-12-02 11:03:47 -07003079 }
3080}
3081
Jens Axboee8c2bc12020-08-15 18:44:09 -07003082static inline int __io_alloc_async_data(struct io_kiocb *req)
Xiaoguang Wang3d9932a2020-03-27 15:36:52 +08003083{
Jens Axboee8c2bc12020-08-15 18:44:09 -07003084 WARN_ON_ONCE(!io_op_defs[req->opcode].async_size);
3085 req->async_data = kmalloc(io_op_defs[req->opcode].async_size, GFP_KERNEL);
3086 return req->async_data == NULL;
Xiaoguang Wang3d9932a2020-03-27 15:36:52 +08003087}
3088
Jens Axboee8c2bc12020-08-15 18:44:09 -07003089static int io_alloc_async_data(struct io_kiocb *req)
Jens Axboef67676d2019-12-02 11:03:47 -07003090{
Jens Axboee8c2bc12020-08-15 18:44:09 -07003091 if (!io_op_defs[req->opcode].needs_async_data)
Jens Axboed3656342019-12-18 09:50:26 -07003092 return 0;
Xiaoguang Wang3d9932a2020-03-27 15:36:52 +08003093
Jens Axboee8c2bc12020-08-15 18:44:09 -07003094 return __io_alloc_async_data(req);
Jens Axboeb7bb4f72019-12-15 22:13:43 -07003095}
3096
Jens Axboeff6165b2020-08-13 09:47:43 -06003097static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec,
3098 const struct iovec *fast_iov,
Jens Axboe227c0c92020-08-13 11:51:40 -06003099 struct iov_iter *iter, bool force)
Jens Axboeb7bb4f72019-12-15 22:13:43 -07003100{
Jens Axboee8c2bc12020-08-15 18:44:09 -07003101 if (!force && !io_op_defs[req->opcode].needs_async_data)
Jens Axboe74566df2020-01-13 19:23:24 -07003102 return 0;
Jens Axboee8c2bc12020-08-15 18:44:09 -07003103 if (!req->async_data) {
Pavel Begunkov6bf985d2021-02-04 13:52:01 +00003104 if (__io_alloc_async_data(req)) {
3105 kfree(iovec);
Jens Axboe5d204bc2020-01-31 12:06:52 -07003106 return -ENOMEM;
Pavel Begunkov6bf985d2021-02-04 13:52:01 +00003107 }
Jens Axboeb7bb4f72019-12-15 22:13:43 -07003108
Jens Axboeff6165b2020-08-13 09:47:43 -06003109 io_req_map_rw(req, iovec, fast_iov, iter);
Jens Axboe5d204bc2020-01-31 12:06:52 -07003110 }
Jens Axboeb7bb4f72019-12-15 22:13:43 -07003111 return 0;
Jens Axboef67676d2019-12-02 11:03:47 -07003112}
3113
Pavel Begunkov73debe62020-09-30 22:57:54 +03003114static inline int io_rw_prep_async(struct io_kiocb *req, int rw)
Pavel Begunkovc3e330a2020-07-13 22:59:19 +03003115{
Jens Axboee8c2bc12020-08-15 18:44:09 -07003116 struct io_async_rw *iorw = req->async_data;
Pavel Begunkovf4bff102020-09-06 00:45:45 +03003117 struct iovec *iov = iorw->fast_iov;
Pavel Begunkov847595d2021-02-04 13:52:06 +00003118 int ret;
Pavel Begunkovc3e330a2020-07-13 22:59:19 +03003119
Pavel Begunkov2846c482020-11-07 13:16:27 +00003120 ret = io_import_iovec(rw, req, &iov, &iorw->iter, false);
Pavel Begunkovc3e330a2020-07-13 22:59:19 +03003121 if (unlikely(ret < 0))
3122 return ret;
3123
Pavel Begunkovab0b1962020-09-06 00:45:47 +03003124 iorw->bytes_done = 0;
3125 iorw->free_iovec = iov;
3126 if (iov)
3127 req->flags |= REQ_F_NEED_CLEANUP;
Pavel Begunkovc3e330a2020-07-13 22:59:19 +03003128 return 0;
3129}
3130
Pavel Begunkov73debe62020-09-30 22:57:54 +03003131static int io_read_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboef67676d2019-12-02 11:03:47 -07003132{
Jens Axboe3529d8c2019-12-19 18:24:38 -07003133 if (unlikely(!(req->file->f_mode & FMODE_READ)))
3134 return -EBADF;
Pavel Begunkov93642ef2021-02-18 18:29:44 +00003135 return io_prep_rw(req, sqe);
Jens Axboef67676d2019-12-02 11:03:47 -07003136}
3137
Jens Axboec1dd91d2020-08-03 16:43:59 -06003138/*
3139 * This is our waitqueue callback handler, registered through lock_page_async()
3140 * when we initially tried to do the IO with the iocb armed our waitqueue.
3141 * This gets called when the page is unlocked, and we generally expect that to
3142 * happen when the page IO is completed and the page is now uptodate. This will
3143 * queue a task_work based retry of the operation, attempting to copy the data
3144 * again. If the latter fails because the page was NOT uptodate, then we will
3145 * do a thread based blocking retry of the operation. That's the unexpected
3146 * slow path.
3147 */
Jens Axboebcf5a062020-05-22 09:24:42 -06003148static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode,
3149 int sync, void *arg)
3150{
3151 struct wait_page_queue *wpq;
3152 struct io_kiocb *req = wait->private;
Jens Axboebcf5a062020-05-22 09:24:42 -06003153 struct wait_page_key *key = arg;
Jens Axboebcf5a062020-05-22 09:24:42 -06003154
3155 wpq = container_of(wait, struct wait_page_queue, wait);
3156
Linus Torvaldscdc8fcb2020-08-03 13:01:22 -07003157 if (!wake_page_match(wpq, key))
3158 return 0;
3159
Hao Xuc8d317a2020-09-29 20:00:45 +08003160 req->rw.kiocb.ki_flags &= ~IOCB_WAITQ;
Jens Axboebcf5a062020-05-22 09:24:42 -06003161 list_del_init(&wait->entry);
3162
Jens Axboebcf5a062020-05-22 09:24:42 -06003163 /* submit ref gets dropped, acquire a new one */
3164 refcount_inc(&req->refs);
Pavel Begunkov921b9052021-02-12 03:23:53 +00003165 io_req_task_queue(req);
Jens Axboebcf5a062020-05-22 09:24:42 -06003166 return 1;
3167}
3168
Jens Axboec1dd91d2020-08-03 16:43:59 -06003169/*
3170 * This controls whether a given IO request should be armed for async page
3171 * based retry. If we return false here, the request is handed to the async
3172 * worker threads for retry. If we're doing buffered reads on a regular file,
3173 * we prepare a private wait_page_queue entry and retry the operation. This
3174 * will either succeed because the page is now uptodate and unlocked, or it
3175 * will register a callback when the page is unlocked at IO completion. Through
3176 * that callback, io_uring uses task_work to setup a retry of the operation.
3177 * That retry will attempt the buffered read again. The retry will generally
3178 * succeed, or in rare cases where it fails, we then fall back to using the
3179 * async worker threads for a blocking retry.
3180 */
Jens Axboe227c0c92020-08-13 11:51:40 -06003181static bool io_rw_should_retry(struct io_kiocb *req)
Jens Axboebcf5a062020-05-22 09:24:42 -06003182{
Jens Axboee8c2bc12020-08-15 18:44:09 -07003183 struct io_async_rw *rw = req->async_data;
3184 struct wait_page_queue *wait = &rw->wpq;
Jens Axboebcf5a062020-05-22 09:24:42 -06003185 struct kiocb *kiocb = &req->rw.kiocb;
Jens Axboebcf5a062020-05-22 09:24:42 -06003186
3187 /* never retry for NOWAIT, we just complete with -EAGAIN */
3188 if (req->flags & REQ_F_NOWAIT)
3189 return false;
3190
Jens Axboe227c0c92020-08-13 11:51:40 -06003191 /* Only for buffered IO */
Jens Axboe3b2a4432020-08-16 10:58:43 -07003192 if (kiocb->ki_flags & (IOCB_DIRECT | IOCB_HIPRI))
Jens Axboebcf5a062020-05-22 09:24:42 -06003193 return false;
Jens Axboe3b2a4432020-08-16 10:58:43 -07003194
Jens Axboebcf5a062020-05-22 09:24:42 -06003195 /*
3196 * just use poll if we can, and don't attempt if the fs doesn't
3197 * support callback based unlocks
3198 */
3199 if (file_can_poll(req->file) || !(req->file->f_mode & FMODE_BUF_RASYNC))
3200 return false;
3201
Jens Axboe3b2a4432020-08-16 10:58:43 -07003202 wait->wait.func = io_async_buf_func;
3203 wait->wait.private = req;
3204 wait->wait.flags = 0;
3205 INIT_LIST_HEAD(&wait->wait.entry);
3206 kiocb->ki_flags |= IOCB_WAITQ;
Hao Xuc8d317a2020-09-29 20:00:45 +08003207 kiocb->ki_flags &= ~IOCB_NOWAIT;
Jens Axboe3b2a4432020-08-16 10:58:43 -07003208 kiocb->ki_waitq = wait;
Jens Axboe3b2a4432020-08-16 10:58:43 -07003209 return true;
Jens Axboebcf5a062020-05-22 09:24:42 -06003210}
3211
3212static int io_iter_do_read(struct io_kiocb *req, struct iov_iter *iter)
3213{
3214 if (req->file->f_op->read_iter)
3215 return call_read_iter(req->file, &req->rw.kiocb, iter);
Guoyu Huang2dd21112020-08-05 03:53:50 -07003216 else if (req->file->f_op->read)
Jens Axboe4017eb92020-10-22 14:14:12 -06003217 return loop_rw_iter(READ, req, iter);
Guoyu Huang2dd21112020-08-05 03:53:50 -07003218 else
3219 return -EINVAL;
Jens Axboebcf5a062020-05-22 09:24:42 -06003220}
3221
Pavel Begunkov889fca72021-02-10 00:03:09 +00003222static int io_read(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe2b188cc2019-01-07 10:46:33 -07003223{
3224 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
Jens Axboe9adbd452019-12-20 08:45:55 -07003225 struct kiocb *kiocb = &req->rw.kiocb;
Jens Axboeff6165b2020-08-13 09:47:43 -06003226 struct iov_iter __iter, *iter = &__iter;
Jens Axboee8c2bc12020-08-15 18:44:09 -07003227 struct io_async_rw *rw = req->async_data;
Jens Axboe227c0c92020-08-13 11:51:40 -06003228 ssize_t io_size, ret, ret2;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003229 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003230
Pavel Begunkov2846c482020-11-07 13:16:27 +00003231 if (rw) {
Jens Axboee8c2bc12020-08-15 18:44:09 -07003232 iter = &rw->iter;
Pavel Begunkov2846c482020-11-07 13:16:27 +00003233 iovec = NULL;
3234 } else {
3235 ret = io_import_iovec(READ, req, &iovec, iter, !force_nonblock);
3236 if (ret < 0)
3237 return ret;
3238 }
Pavel Begunkov632546c2020-11-07 13:16:26 +00003239 io_size = iov_iter_count(iter);
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003240 req->result = io_size;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003241
Jens Axboefd6c2e42019-12-18 12:19:41 -07003242 /* Ensure we clear previously set non-block flag */
3243 if (!force_nonblock)
Jens Axboe29de5f62020-02-20 09:56:08 -07003244 kiocb->ki_flags &= ~IOCB_NOWAIT;
Pavel Begunkova88fc402020-09-30 22:57:53 +03003245 else
3246 kiocb->ki_flags |= IOCB_NOWAIT;
3247
Pavel Begunkov24c74672020-06-21 13:09:51 +03003248 /* If the file doesn't support async, just async punt */
Pavel Begunkov6713e7a2021-02-04 13:51:59 +00003249 if (force_nonblock && !io_file_supports_async(req->file, READ)) {
3250 ret = io_setup_async_rw(req, iovec, inline_vecs, iter, true);
Pavel Begunkov6bf985d2021-02-04 13:52:01 +00003251 return ret ?: -EAGAIN;
Pavel Begunkov6713e7a2021-02-04 13:51:59 +00003252 }
Jens Axboe9e645e112019-05-10 16:07:28 -06003253
Pavel Begunkov632546c2020-11-07 13:16:26 +00003254 ret = rw_verify_area(READ, req->file, io_kiocb_ppos(kiocb), io_size);
Pavel Begunkov5ea5dd42021-02-04 13:52:03 +00003255 if (unlikely(ret)) {
3256 kfree(iovec);
3257 return ret;
3258 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07003259
Jens Axboe227c0c92020-08-13 11:51:40 -06003260 ret = io_iter_do_read(req, iter);
Jens Axboe32960612019-09-23 11:05:34 -06003261
Pavel Begunkov57cd6572021-02-01 18:59:56 +00003262 if (ret == -EIOCBQUEUED) {
Jens Axboe3e6a0d32021-03-01 13:56:00 -07003263 if (req->async_data)
3264 iov_iter_revert(iter, io_size - iov_iter_count(iter));
Pavel Begunkovfe1cdd52021-02-17 21:02:36 +00003265 goto out_free;
Jens Axboe227c0c92020-08-13 11:51:40 -06003266 } else if (ret == -EAGAIN) {
Jens Axboeeefdf302020-08-27 16:40:19 -06003267 /* IOPOLL retry should happen for io-wq threads */
3268 if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboef91daf52020-08-15 15:58:42 -07003269 goto done;
Pavel Begunkov75c668c2021-02-04 13:52:05 +00003270 /* no retry on NONBLOCK nor RWF_NOWAIT */
3271 if (req->flags & REQ_F_NOWAIT)
Jens Axboe355afae2020-09-02 09:30:31 -06003272 goto done;
Jens Axboe84216312020-08-24 11:45:26 -06003273 /* some cases will consume bytes even on error returns */
Pavel Begunkov632546c2020-11-07 13:16:26 +00003274 iov_iter_revert(iter, io_size - iov_iter_count(iter));
Jens Axboef38c7e32020-09-25 15:23:43 -06003275 ret = 0;
Pavel Begunkov7335e3b2021-02-04 13:52:02 +00003276 } else if (ret <= 0 || ret == io_size || !force_nonblock ||
Pavel Begunkov75c668c2021-02-04 13:52:05 +00003277 (req->flags & REQ_F_NOWAIT) || !(req->flags & REQ_F_ISREG)) {
Pavel Begunkov7335e3b2021-02-04 13:52:02 +00003278 /* read all, failed, already did sync or don't want to retry */
Jens Axboe00d23d52020-08-25 12:59:22 -06003279 goto done;
Jens Axboe227c0c92020-08-13 11:51:40 -06003280 }
3281
Jens Axboe227c0c92020-08-13 11:51:40 -06003282 ret2 = io_setup_async_rw(req, iovec, inline_vecs, iter, true);
Pavel Begunkov6bf985d2021-02-04 13:52:01 +00003283 if (ret2)
3284 return ret2;
3285
Pavel Begunkovfe1cdd52021-02-17 21:02:36 +00003286 iovec = NULL;
Jens Axboee8c2bc12020-08-15 18:44:09 -07003287 rw = req->async_data;
Jens Axboe227c0c92020-08-13 11:51:40 -06003288 /* now use our persistent iterator, if we aren't already */
Jens Axboee8c2bc12020-08-15 18:44:09 -07003289 iter = &rw->iter;
Jens Axboe227c0c92020-08-13 11:51:40 -06003290
Pavel Begunkovb23df912021-02-04 13:52:04 +00003291 do {
3292 io_size -= ret;
3293 rw->bytes_done += ret;
3294 /* if we can retry, do so with the callbacks armed */
3295 if (!io_rw_should_retry(req)) {
3296 kiocb->ki_flags &= ~IOCB_WAITQ;
3297 return -EAGAIN;
3298 }
3299
3300 /*
3301 * Now retry read with the IOCB_WAITQ parts set in the iocb. If
3302 * we get -EIOCBQUEUED, then we'll get a notification when the
3303 * desired page gets unlocked. We can also get a partial read
3304 * here, and if we do, then just retry at the new offset.
3305 */
3306 ret = io_iter_do_read(req, iter);
3307 if (ret == -EIOCBQUEUED)
3308 return 0;
Jens Axboe227c0c92020-08-13 11:51:40 -06003309 /* we got some bytes, but not all. retry. */
Jens Axboeb5b0ecb2021-03-04 21:02:58 -07003310 kiocb->ki_flags &= ~IOCB_WAITQ;
Pavel Begunkovb23df912021-02-04 13:52:04 +00003311 } while (ret > 0 && ret < io_size);
Jens Axboe227c0c92020-08-13 11:51:40 -06003312done:
Pavel Begunkov889fca72021-02-10 00:03:09 +00003313 kiocb_done(kiocb, ret, issue_flags);
Pavel Begunkovfe1cdd52021-02-17 21:02:36 +00003314out_free:
3315 /* it's faster to check here then delegate to kfree */
3316 if (iovec)
3317 kfree(iovec);
Pavel Begunkov5ea5dd42021-02-04 13:52:03 +00003318 return 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003319}
3320
Pavel Begunkov73debe62020-09-30 22:57:54 +03003321static int io_write_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboef67676d2019-12-02 11:03:47 -07003322{
Jens Axboe3529d8c2019-12-19 18:24:38 -07003323 if (unlikely(!(req->file->f_mode & FMODE_WRITE)))
3324 return -EBADF;
Pavel Begunkov93642ef2021-02-18 18:29:44 +00003325 return io_prep_rw(req, sqe);
Jens Axboef67676d2019-12-02 11:03:47 -07003326}
3327
Pavel Begunkov889fca72021-02-10 00:03:09 +00003328static int io_write(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe2b188cc2019-01-07 10:46:33 -07003329{
3330 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
Jens Axboe9adbd452019-12-20 08:45:55 -07003331 struct kiocb *kiocb = &req->rw.kiocb;
Jens Axboeff6165b2020-08-13 09:47:43 -06003332 struct iov_iter __iter, *iter = &__iter;
Jens Axboee8c2bc12020-08-15 18:44:09 -07003333 struct io_async_rw *rw = req->async_data;
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003334 ssize_t ret, ret2, io_size;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003335 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003336
Pavel Begunkov2846c482020-11-07 13:16:27 +00003337 if (rw) {
Jens Axboee8c2bc12020-08-15 18:44:09 -07003338 iter = &rw->iter;
Pavel Begunkov2846c482020-11-07 13:16:27 +00003339 iovec = NULL;
3340 } else {
3341 ret = io_import_iovec(WRITE, req, &iovec, iter, !force_nonblock);
3342 if (ret < 0)
3343 return ret;
3344 }
Pavel Begunkov632546c2020-11-07 13:16:26 +00003345 io_size = iov_iter_count(iter);
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003346 req->result = io_size;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003347
Jens Axboefd6c2e42019-12-18 12:19:41 -07003348 /* Ensure we clear previously set non-block flag */
3349 if (!force_nonblock)
Pavel Begunkova88fc402020-09-30 22:57:53 +03003350 kiocb->ki_flags &= ~IOCB_NOWAIT;
3351 else
3352 kiocb->ki_flags |= IOCB_NOWAIT;
Jens Axboefd6c2e42019-12-18 12:19:41 -07003353
Pavel Begunkov24c74672020-06-21 13:09:51 +03003354 /* If the file doesn't support async, just async punt */
Jens Axboeaf197f52020-04-28 13:15:06 -06003355 if (force_nonblock && !io_file_supports_async(req->file, WRITE))
Jens Axboef67676d2019-12-02 11:03:47 -07003356 goto copy_iov;
Jens Axboef67676d2019-12-02 11:03:47 -07003357
Jens Axboe10d59342019-12-09 20:16:22 -07003358 /* file path doesn't support NOWAIT for non-direct_IO */
3359 if (force_nonblock && !(kiocb->ki_flags & IOCB_DIRECT) &&
3360 (req->flags & REQ_F_ISREG))
Jens Axboef67676d2019-12-02 11:03:47 -07003361 goto copy_iov;
Jens Axboe9e645e112019-05-10 16:07:28 -06003362
Pavel Begunkov632546c2020-11-07 13:16:26 +00003363 ret = rw_verify_area(WRITE, req->file, io_kiocb_ppos(kiocb), io_size);
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003364 if (unlikely(ret))
3365 goto out_free;
Roman Penyaev9bf79332019-03-25 20:09:24 +01003366
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003367 /*
3368 * Open-code file_start_write here to grab freeze protection,
3369 * which will be released by another thread in
3370 * io_complete_rw(). Fool lockdep by telling it the lock got
3371 * released so that it doesn't complain about the held lock when
3372 * we return to userspace.
3373 */
3374 if (req->flags & REQ_F_ISREG) {
Darrick J. Wong8a3c84b2020-11-10 16:50:21 -08003375 sb_start_write(file_inode(req->file)->i_sb);
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003376 __sb_writers_release(file_inode(req->file)->i_sb,
3377 SB_FREEZE_WRITE);
3378 }
3379 kiocb->ki_flags |= IOCB_WRITE;
Roman Penyaev9bf79332019-03-25 20:09:24 +01003380
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003381 if (req->file->f_op->write_iter)
Jens Axboeff6165b2020-08-13 09:47:43 -06003382 ret2 = call_write_iter(req->file, kiocb, iter);
Guoyu Huang2dd21112020-08-05 03:53:50 -07003383 else if (req->file->f_op->write)
Jens Axboe4017eb92020-10-22 14:14:12 -06003384 ret2 = loop_rw_iter(WRITE, req, iter);
Guoyu Huang2dd21112020-08-05 03:53:50 -07003385 else
3386 ret2 = -EINVAL;
Jens Axboe4ed734b2020-03-20 11:23:41 -06003387
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003388 /*
3389 * Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just
3390 * retry them without IOCB_NOWAIT.
3391 */
3392 if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT))
3393 ret2 = -EAGAIN;
Pavel Begunkov75c668c2021-02-04 13:52:05 +00003394 /* no retry on NONBLOCK nor RWF_NOWAIT */
3395 if (ret2 == -EAGAIN && (req->flags & REQ_F_NOWAIT))
Jens Axboe355afae2020-09-02 09:30:31 -06003396 goto done;
Jens Axboe3e6a0d32021-03-01 13:56:00 -07003397 if (ret2 == -EIOCBQUEUED && req->async_data)
3398 iov_iter_revert(iter, io_size - iov_iter_count(iter));
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003399 if (!force_nonblock || ret2 != -EAGAIN) {
Jens Axboeeefdf302020-08-27 16:40:19 -06003400 /* IOPOLL retry should happen for io-wq threads */
3401 if ((req->ctx->flags & IORING_SETUP_IOPOLL) && ret2 == -EAGAIN)
3402 goto copy_iov;
Jens Axboe355afae2020-09-02 09:30:31 -06003403done:
Pavel Begunkov889fca72021-02-10 00:03:09 +00003404 kiocb_done(kiocb, ret2, issue_flags);
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003405 } else {
Jens Axboef67676d2019-12-02 11:03:47 -07003406copy_iov:
Jens Axboe84216312020-08-24 11:45:26 -06003407 /* some cases will consume bytes even on error returns */
Pavel Begunkov632546c2020-11-07 13:16:26 +00003408 iov_iter_revert(iter, io_size - iov_iter_count(iter));
Jens Axboe227c0c92020-08-13 11:51:40 -06003409 ret = io_setup_async_rw(req, iovec, inline_vecs, iter, false);
Pavel Begunkov6bf985d2021-02-04 13:52:01 +00003410 return ret ?: -EAGAIN;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003411 }
Jens Axboe31b51512019-01-18 22:56:34 -07003412out_free:
Pavel Begunkovf261c162020-08-20 11:34:10 +03003413 /* it's reportedly faster than delegating the null check to kfree() */
Pavel Begunkov252917c2020-07-13 22:59:20 +03003414 if (iovec)
Xiaoguang Wang6f2cc162020-06-18 15:01:56 +08003415 kfree(iovec);
Jens Axboe2b188cc2019-01-07 10:46:33 -07003416 return ret;
3417}
3418
Jens Axboe80a261f2020-09-28 14:23:58 -06003419static int io_renameat_prep(struct io_kiocb *req,
3420 const struct io_uring_sqe *sqe)
3421{
3422 struct io_rename *ren = &req->rename;
3423 const char __user *oldf, *newf;
3424
3425 if (unlikely(req->flags & REQ_F_FIXED_FILE))
3426 return -EBADF;
3427
3428 ren->old_dfd = READ_ONCE(sqe->fd);
3429 oldf = u64_to_user_ptr(READ_ONCE(sqe->addr));
3430 newf = u64_to_user_ptr(READ_ONCE(sqe->addr2));
3431 ren->new_dfd = READ_ONCE(sqe->len);
3432 ren->flags = READ_ONCE(sqe->rename_flags);
3433
3434 ren->oldpath = getname(oldf);
3435 if (IS_ERR(ren->oldpath))
3436 return PTR_ERR(ren->oldpath);
3437
3438 ren->newpath = getname(newf);
3439 if (IS_ERR(ren->newpath)) {
3440 putname(ren->oldpath);
3441 return PTR_ERR(ren->newpath);
3442 }
3443
3444 req->flags |= REQ_F_NEED_CLEANUP;
3445 return 0;
3446}
3447
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003448static int io_renameat(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe80a261f2020-09-28 14:23:58 -06003449{
3450 struct io_rename *ren = &req->rename;
3451 int ret;
3452
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003453 if (issue_flags & IO_URING_F_NONBLOCK)
Jens Axboe80a261f2020-09-28 14:23:58 -06003454 return -EAGAIN;
3455
3456 ret = do_renameat2(ren->old_dfd, ren->oldpath, ren->new_dfd,
3457 ren->newpath, ren->flags);
3458
3459 req->flags &= ~REQ_F_NEED_CLEANUP;
3460 if (ret < 0)
3461 req_set_fail_links(req);
3462 io_req_complete(req, ret);
3463 return 0;
3464}
3465
Jens Axboe14a11432020-09-28 14:27:37 -06003466static int io_unlinkat_prep(struct io_kiocb *req,
3467 const struct io_uring_sqe *sqe)
3468{
3469 struct io_unlink *un = &req->unlink;
3470 const char __user *fname;
3471
3472 if (unlikely(req->flags & REQ_F_FIXED_FILE))
3473 return -EBADF;
3474
3475 un->dfd = READ_ONCE(sqe->fd);
3476
3477 un->flags = READ_ONCE(sqe->unlink_flags);
3478 if (un->flags & ~AT_REMOVEDIR)
3479 return -EINVAL;
3480
3481 fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
3482 un->filename = getname(fname);
3483 if (IS_ERR(un->filename))
3484 return PTR_ERR(un->filename);
3485
3486 req->flags |= REQ_F_NEED_CLEANUP;
3487 return 0;
3488}
3489
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003490static int io_unlinkat(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe14a11432020-09-28 14:27:37 -06003491{
3492 struct io_unlink *un = &req->unlink;
3493 int ret;
3494
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003495 if (issue_flags & IO_URING_F_NONBLOCK)
Jens Axboe14a11432020-09-28 14:27:37 -06003496 return -EAGAIN;
3497
3498 if (un->flags & AT_REMOVEDIR)
3499 ret = do_rmdir(un->dfd, un->filename);
3500 else
3501 ret = do_unlinkat(un->dfd, un->filename);
3502
3503 req->flags &= ~REQ_F_NEED_CLEANUP;
3504 if (ret < 0)
3505 req_set_fail_links(req);
3506 io_req_complete(req, ret);
3507 return 0;
3508}
3509
Jens Axboe36f4fa62020-09-05 11:14:22 -06003510static int io_shutdown_prep(struct io_kiocb *req,
3511 const struct io_uring_sqe *sqe)
3512{
3513#if defined(CONFIG_NET)
3514 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3515 return -EINVAL;
3516 if (sqe->ioprio || sqe->off || sqe->addr || sqe->rw_flags ||
3517 sqe->buf_index)
3518 return -EINVAL;
3519
3520 req->shutdown.how = READ_ONCE(sqe->len);
3521 return 0;
3522#else
3523 return -EOPNOTSUPP;
3524#endif
3525}
3526
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003527static int io_shutdown(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe36f4fa62020-09-05 11:14:22 -06003528{
3529#if defined(CONFIG_NET)
3530 struct socket *sock;
3531 int ret;
3532
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003533 if (issue_flags & IO_URING_F_NONBLOCK)
Jens Axboe36f4fa62020-09-05 11:14:22 -06003534 return -EAGAIN;
3535
Linus Torvalds48aba792020-12-16 12:44:05 -08003536 sock = sock_from_file(req->file);
Jens Axboe36f4fa62020-09-05 11:14:22 -06003537 if (unlikely(!sock))
Linus Torvalds48aba792020-12-16 12:44:05 -08003538 return -ENOTSOCK;
Jens Axboe36f4fa62020-09-05 11:14:22 -06003539
3540 ret = __sys_shutdown_sock(sock, req->shutdown.how);
Jens Axboea1464682020-12-14 20:57:27 -07003541 if (ret < 0)
3542 req_set_fail_links(req);
Jens Axboe36f4fa62020-09-05 11:14:22 -06003543 io_req_complete(req, ret);
3544 return 0;
3545#else
3546 return -EOPNOTSUPP;
3547#endif
3548}
3549
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03003550static int __io_splice_prep(struct io_kiocb *req,
3551 const struct io_uring_sqe *sqe)
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003552{
3553 struct io_splice* sp = &req->splice;
3554 unsigned int valid_flags = SPLICE_F_FD_IN_FIXED | SPLICE_F_ALL;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003555
Pavel Begunkov3232dd02020-06-03 18:03:22 +03003556 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3557 return -EINVAL;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003558
3559 sp->file_in = NULL;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003560 sp->len = READ_ONCE(sqe->len);
3561 sp->flags = READ_ONCE(sqe->splice_flags);
3562
3563 if (unlikely(sp->flags & ~valid_flags))
3564 return -EINVAL;
3565
Pavel Begunkov8371adf2020-10-10 18:34:08 +01003566 sp->file_in = io_file_get(NULL, req, READ_ONCE(sqe->splice_fd_in),
3567 (sp->flags & SPLICE_F_FD_IN_FIXED));
3568 if (!sp->file_in)
3569 return -EBADF;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003570 req->flags |= REQ_F_NEED_CLEANUP;
3571
Xiaoguang Wang7cdaf582020-06-10 19:41:19 +08003572 if (!S_ISREG(file_inode(sp->file_in)->i_mode)) {
3573 /*
3574 * Splice operation will be punted aync, and here need to
3575 * modify io_wq_work.flags, so initialize io_wq_work firstly.
3576 */
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003577 req->work.flags |= IO_WQ_WORK_UNBOUND;
Xiaoguang Wang7cdaf582020-06-10 19:41:19 +08003578 }
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003579
3580 return 0;
3581}
3582
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03003583static int io_tee_prep(struct io_kiocb *req,
3584 const struct io_uring_sqe *sqe)
3585{
3586 if (READ_ONCE(sqe->splice_off_in) || READ_ONCE(sqe->off))
3587 return -EINVAL;
3588 return __io_splice_prep(req, sqe);
3589}
3590
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003591static int io_tee(struct io_kiocb *req, unsigned int issue_flags)
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03003592{
3593 struct io_splice *sp = &req->splice;
3594 struct file *in = sp->file_in;
3595 struct file *out = sp->file_out;
3596 unsigned int flags = sp->flags & ~SPLICE_F_FD_IN_FIXED;
3597 long ret = 0;
3598
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003599 if (issue_flags & IO_URING_F_NONBLOCK)
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03003600 return -EAGAIN;
3601 if (sp->len)
3602 ret = do_tee(in, out, sp->len, flags);
3603
3604 io_put_file(req, in, (sp->flags & SPLICE_F_FD_IN_FIXED));
3605 req->flags &= ~REQ_F_NEED_CLEANUP;
3606
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03003607 if (ret != sp->len)
3608 req_set_fail_links(req);
Jens Axboee1e16092020-06-22 09:17:17 -06003609 io_req_complete(req, ret);
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03003610 return 0;
3611}
3612
3613static int io_splice_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3614{
3615 struct io_splice* sp = &req->splice;
3616
3617 sp->off_in = READ_ONCE(sqe->splice_off_in);
3618 sp->off_out = READ_ONCE(sqe->off);
3619 return __io_splice_prep(req, sqe);
3620}
3621
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003622static int io_splice(struct io_kiocb *req, unsigned int issue_flags)
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003623{
3624 struct io_splice *sp = &req->splice;
3625 struct file *in = sp->file_in;
3626 struct file *out = sp->file_out;
3627 unsigned int flags = sp->flags & ~SPLICE_F_FD_IN_FIXED;
3628 loff_t *poff_in, *poff_out;
Pavel Begunkovc9687422020-05-04 23:00:54 +03003629 long ret = 0;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003630
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003631 if (issue_flags & IO_URING_F_NONBLOCK)
Pavel Begunkov2fb3e822020-05-01 17:09:38 +03003632 return -EAGAIN;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003633
3634 poff_in = (sp->off_in == -1) ? NULL : &sp->off_in;
3635 poff_out = (sp->off_out == -1) ? NULL : &sp->off_out;
Pavel Begunkovc9687422020-05-04 23:00:54 +03003636
Jens Axboe948a7742020-05-17 14:21:38 -06003637 if (sp->len)
Pavel Begunkovc9687422020-05-04 23:00:54 +03003638 ret = do_splice(in, poff_in, out, poff_out, sp->len, flags);
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003639
3640 io_put_file(req, in, (sp->flags & SPLICE_F_FD_IN_FIXED));
3641 req->flags &= ~REQ_F_NEED_CLEANUP;
3642
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003643 if (ret != sp->len)
3644 req_set_fail_links(req);
Jens Axboee1e16092020-06-22 09:17:17 -06003645 io_req_complete(req, ret);
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003646 return 0;
3647}
3648
Jens Axboe2b188cc2019-01-07 10:46:33 -07003649/*
3650 * IORING_OP_NOP just posts a completion event, nothing else.
3651 */
Pavel Begunkov889fca72021-02-10 00:03:09 +00003652static int io_nop(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe2b188cc2019-01-07 10:46:33 -07003653{
3654 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003655
Jens Axboedef596e2019-01-09 08:59:42 -07003656 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
3657 return -EINVAL;
3658
Pavel Begunkov889fca72021-02-10 00:03:09 +00003659 __io_req_complete(req, issue_flags, 0, 0);
Jens Axboe2b188cc2019-01-07 10:46:33 -07003660 return 0;
3661}
3662
Pavel Begunkov1155c762021-02-18 18:29:38 +00003663static int io_fsync_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Christoph Hellwigc992fe22019-01-11 09:43:02 -07003664{
Jens Axboe6b063142019-01-10 22:13:58 -07003665 struct io_ring_ctx *ctx = req->ctx;
Christoph Hellwigc992fe22019-01-11 09:43:02 -07003666
Jens Axboe09bb8392019-03-13 12:39:28 -06003667 if (!req->file)
3668 return -EBADF;
Christoph Hellwigc992fe22019-01-11 09:43:02 -07003669
Jens Axboe6b063142019-01-10 22:13:58 -07003670 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboedef596e2019-01-09 08:59:42 -07003671 return -EINVAL;
Jens Axboeedafcce2019-01-09 09:16:05 -07003672 if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index))
Christoph Hellwigc992fe22019-01-11 09:43:02 -07003673 return -EINVAL;
3674
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07003675 req->sync.flags = READ_ONCE(sqe->fsync_flags);
3676 if (unlikely(req->sync.flags & ~IORING_FSYNC_DATASYNC))
3677 return -EINVAL;
3678
3679 req->sync.off = READ_ONCE(sqe->off);
3680 req->sync.len = READ_ONCE(sqe->len);
Christoph Hellwigc992fe22019-01-11 09:43:02 -07003681 return 0;
3682}
3683
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003684static int io_fsync(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe78912932020-01-14 22:09:06 -07003685{
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07003686 loff_t end = req->sync.off + req->sync.len;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07003687 int ret;
3688
Pavel Begunkovac45abc2020-06-08 21:08:18 +03003689 /* fsync always requires a blocking context */
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003690 if (issue_flags & IO_URING_F_NONBLOCK)
Pavel Begunkovac45abc2020-06-08 21:08:18 +03003691 return -EAGAIN;
3692
Jens Axboe9adbd452019-12-20 08:45:55 -07003693 ret = vfs_fsync_range(req->file, req->sync.off,
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07003694 end > 0 ? end : LLONG_MAX,
3695 req->sync.flags & IORING_FSYNC_DATASYNC);
3696 if (ret < 0)
3697 req_set_fail_links(req);
Jens Axboee1e16092020-06-22 09:17:17 -06003698 io_req_complete(req, ret);
Christoph Hellwigc992fe22019-01-11 09:43:02 -07003699 return 0;
3700}
3701
Jens Axboed63d1b52019-12-10 10:38:56 -07003702static int io_fallocate_prep(struct io_kiocb *req,
3703 const struct io_uring_sqe *sqe)
3704{
3705 if (sqe->ioprio || sqe->buf_index || sqe->rw_flags)
3706 return -EINVAL;
Pavel Begunkov3232dd02020-06-03 18:03:22 +03003707 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3708 return -EINVAL;
Jens Axboed63d1b52019-12-10 10:38:56 -07003709
3710 req->sync.off = READ_ONCE(sqe->off);
3711 req->sync.len = READ_ONCE(sqe->addr);
3712 req->sync.mode = READ_ONCE(sqe->len);
3713 return 0;
3714}
3715
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003716static int io_fallocate(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboed63d1b52019-12-10 10:38:56 -07003717{
Pavel Begunkovac45abc2020-06-08 21:08:18 +03003718 int ret;
Jens Axboed63d1b52019-12-10 10:38:56 -07003719
Pavel Begunkovac45abc2020-06-08 21:08:18 +03003720 /* fallocate always requiring blocking context */
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003721 if (issue_flags & IO_URING_F_NONBLOCK)
Pavel Begunkovac45abc2020-06-08 21:08:18 +03003722 return -EAGAIN;
Pavel Begunkovac45abc2020-06-08 21:08:18 +03003723 ret = vfs_fallocate(req->file, req->sync.mode, req->sync.off,
3724 req->sync.len);
Pavel Begunkovac45abc2020-06-08 21:08:18 +03003725 if (ret < 0)
3726 req_set_fail_links(req);
Jens Axboee1e16092020-06-22 09:17:17 -06003727 io_req_complete(req, ret);
Jens Axboed63d1b52019-12-10 10:38:56 -07003728 return 0;
3729}
3730
Pavel Begunkovec65fea2020-06-03 18:03:24 +03003731static int __io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboe15b71ab2019-12-11 11:20:36 -07003732{
Jens Axboef8748882020-01-08 17:47:02 -07003733 const char __user *fname;
Jens Axboe15b71ab2019-12-11 11:20:36 -07003734 int ret;
3735
Pavel Begunkovec65fea2020-06-03 18:03:24 +03003736 if (unlikely(sqe->ioprio || sqe->buf_index))
Jens Axboe15b71ab2019-12-11 11:20:36 -07003737 return -EINVAL;
Pavel Begunkovec65fea2020-06-03 18:03:24 +03003738 if (unlikely(req->flags & REQ_F_FIXED_FILE))
Jens Axboecf3040c2020-02-06 21:31:40 -07003739 return -EBADF;
Jens Axboe15b71ab2019-12-11 11:20:36 -07003740
Pavel Begunkovec65fea2020-06-03 18:03:24 +03003741 /* open.how should be already initialised */
3742 if (!(req->open.how.flags & O_PATH) && force_o_largefile())
Jens Axboe08a1d26eb2020-04-08 09:20:54 -06003743 req->open.how.flags |= O_LARGEFILE;
Jens Axboe15b71ab2019-12-11 11:20:36 -07003744
Pavel Begunkov25e72d12020-06-03 18:03:23 +03003745 req->open.dfd = READ_ONCE(sqe->fd);
3746 fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
Jens Axboef8748882020-01-08 17:47:02 -07003747 req->open.filename = getname(fname);
Jens Axboe15b71ab2019-12-11 11:20:36 -07003748 if (IS_ERR(req->open.filename)) {
3749 ret = PTR_ERR(req->open.filename);
3750 req->open.filename = NULL;
3751 return ret;
3752 }
Jens Axboe4022e7a2020-03-19 19:23:18 -06003753 req->open.nofile = rlimit(RLIMIT_NOFILE);
Pavel Begunkov8fef80b2020-02-07 23:59:53 +03003754 req->flags |= REQ_F_NEED_CLEANUP;
Jens Axboe15b71ab2019-12-11 11:20:36 -07003755 return 0;
3756}
3757
Pavel Begunkovec65fea2020-06-03 18:03:24 +03003758static int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3759{
3760 u64 flags, mode;
3761
Jens Axboe14587a462020-09-05 11:36:08 -06003762 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboe4eb8dde2020-09-18 19:36:24 -06003763 return -EINVAL;
Pavel Begunkovec65fea2020-06-03 18:03:24 +03003764 mode = READ_ONCE(sqe->len);
3765 flags = READ_ONCE(sqe->open_flags);
3766 req->open.how = build_open_how(flags, mode);
3767 return __io_openat_prep(req, sqe);
3768}
3769
Jens Axboecebdb982020-01-08 17:59:24 -07003770static int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3771{
3772 struct open_how __user *how;
Jens Axboecebdb982020-01-08 17:59:24 -07003773 size_t len;
3774 int ret;
3775
Jens Axboe14587a462020-09-05 11:36:08 -06003776 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboe4eb8dde2020-09-18 19:36:24 -06003777 return -EINVAL;
Jens Axboecebdb982020-01-08 17:59:24 -07003778 how = u64_to_user_ptr(READ_ONCE(sqe->addr2));
3779 len = READ_ONCE(sqe->len);
Jens Axboecebdb982020-01-08 17:59:24 -07003780 if (len < OPEN_HOW_SIZE_VER0)
3781 return -EINVAL;
3782
3783 ret = copy_struct_from_user(&req->open.how, sizeof(req->open.how), how,
3784 len);
3785 if (ret)
3786 return ret;
3787
Pavel Begunkovec65fea2020-06-03 18:03:24 +03003788 return __io_openat_prep(req, sqe);
Jens Axboecebdb982020-01-08 17:59:24 -07003789}
3790
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003791static int io_openat2(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe15b71ab2019-12-11 11:20:36 -07003792{
3793 struct open_flags op;
Jens Axboe15b71ab2019-12-11 11:20:36 -07003794 struct file *file;
Jens Axboe3a81fd02020-12-10 12:25:36 -07003795 bool nonblock_set;
3796 bool resolve_nonblock;
Jens Axboe15b71ab2019-12-11 11:20:36 -07003797 int ret;
3798
Jens Axboecebdb982020-01-08 17:59:24 -07003799 ret = build_open_flags(&req->open.how, &op);
Jens Axboe15b71ab2019-12-11 11:20:36 -07003800 if (ret)
3801 goto err;
Jens Axboe3a81fd02020-12-10 12:25:36 -07003802 nonblock_set = op.open_flag & O_NONBLOCK;
3803 resolve_nonblock = req->open.how.resolve & RESOLVE_CACHED;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003804 if (issue_flags & IO_URING_F_NONBLOCK) {
Jens Axboe3a81fd02020-12-10 12:25:36 -07003805 /*
3806 * Don't bother trying for O_TRUNC, O_CREAT, or O_TMPFILE open,
3807 * it'll always -EAGAIN
3808 */
3809 if (req->open.how.flags & (O_TRUNC | O_CREAT | O_TMPFILE))
3810 return -EAGAIN;
3811 op.lookup_flags |= LOOKUP_CACHED;
3812 op.open_flag |= O_NONBLOCK;
3813 }
Jens Axboe15b71ab2019-12-11 11:20:36 -07003814
Jens Axboe4022e7a2020-03-19 19:23:18 -06003815 ret = __get_unused_fd_flags(req->open.how.flags, req->open.nofile);
Jens Axboe15b71ab2019-12-11 11:20:36 -07003816 if (ret < 0)
3817 goto err;
3818
3819 file = do_filp_open(req->open.dfd, req->open.filename, &op);
Jens Axboe3a81fd02020-12-10 12:25:36 -07003820 /* only retry if RESOLVE_CACHED wasn't already set by application */
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003821 if ((!resolve_nonblock && (issue_flags & IO_URING_F_NONBLOCK)) &&
3822 file == ERR_PTR(-EAGAIN)) {
Jens Axboe3a81fd02020-12-10 12:25:36 -07003823 /*
3824 * We could hang on to this 'fd', but seems like marginal
3825 * gain for something that is now known to be a slower path.
3826 * So just put it, and we'll get a new one when we retry.
3827 */
3828 put_unused_fd(ret);
3829 return -EAGAIN;
3830 }
3831
Jens Axboe15b71ab2019-12-11 11:20:36 -07003832 if (IS_ERR(file)) {
3833 put_unused_fd(ret);
3834 ret = PTR_ERR(file);
3835 } else {
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003836 if ((issue_flags & IO_URING_F_NONBLOCK) && !nonblock_set)
Jens Axboe3a81fd02020-12-10 12:25:36 -07003837 file->f_flags &= ~O_NONBLOCK;
Jens Axboe15b71ab2019-12-11 11:20:36 -07003838 fsnotify_open(file);
3839 fd_install(ret, file);
3840 }
3841err:
3842 putname(req->open.filename);
Pavel Begunkov8fef80b2020-02-07 23:59:53 +03003843 req->flags &= ~REQ_F_NEED_CLEANUP;
Jens Axboe15b71ab2019-12-11 11:20:36 -07003844 if (ret < 0)
3845 req_set_fail_links(req);
Jens Axboee1e16092020-06-22 09:17:17 -06003846 io_req_complete(req, ret);
Jens Axboe15b71ab2019-12-11 11:20:36 -07003847 return 0;
3848}
3849
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003850static int io_openat(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboecebdb982020-01-08 17:59:24 -07003851{
Pavel Begunkove45cff52021-02-28 22:35:14 +00003852 return io_openat2(req, issue_flags);
Jens Axboecebdb982020-01-08 17:59:24 -07003853}
3854
Jens Axboe067524e2020-03-02 16:32:28 -07003855static int io_remove_buffers_prep(struct io_kiocb *req,
3856 const struct io_uring_sqe *sqe)
3857{
3858 struct io_provide_buf *p = &req->pbuf;
3859 u64 tmp;
3860
3861 if (sqe->ioprio || sqe->rw_flags || sqe->addr || sqe->len || sqe->off)
3862 return -EINVAL;
3863
3864 tmp = READ_ONCE(sqe->fd);
3865 if (!tmp || tmp > USHRT_MAX)
3866 return -EINVAL;
3867
3868 memset(p, 0, sizeof(*p));
3869 p->nbufs = tmp;
3870 p->bgid = READ_ONCE(sqe->buf_group);
3871 return 0;
3872}
3873
3874static int __io_remove_buffers(struct io_ring_ctx *ctx, struct io_buffer *buf,
3875 int bgid, unsigned nbufs)
3876{
3877 unsigned i = 0;
3878
3879 /* shouldn't happen */
3880 if (!nbufs)
3881 return 0;
3882
3883 /* the head kbuf is the list itself */
3884 while (!list_empty(&buf->list)) {
3885 struct io_buffer *nxt;
3886
3887 nxt = list_first_entry(&buf->list, struct io_buffer, list);
3888 list_del(&nxt->list);
3889 kfree(nxt);
3890 if (++i == nbufs)
3891 return i;
3892 }
3893 i++;
3894 kfree(buf);
Jens Axboe9e15c3a2021-03-13 12:29:43 -07003895 xa_erase(&ctx->io_buffers, bgid);
Jens Axboe067524e2020-03-02 16:32:28 -07003896
3897 return i;
3898}
3899
Pavel Begunkov889fca72021-02-10 00:03:09 +00003900static int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe067524e2020-03-02 16:32:28 -07003901{
3902 struct io_provide_buf *p = &req->pbuf;
3903 struct io_ring_ctx *ctx = req->ctx;
3904 struct io_buffer *head;
3905 int ret = 0;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003906 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Jens Axboe067524e2020-03-02 16:32:28 -07003907
3908 io_ring_submit_lock(ctx, !force_nonblock);
3909
3910 lockdep_assert_held(&ctx->uring_lock);
3911
3912 ret = -ENOENT;
Jens Axboe9e15c3a2021-03-13 12:29:43 -07003913 head = xa_load(&ctx->io_buffers, p->bgid);
Jens Axboe067524e2020-03-02 16:32:28 -07003914 if (head)
3915 ret = __io_remove_buffers(ctx, head, p->bgid, p->nbufs);
Jens Axboe067524e2020-03-02 16:32:28 -07003916 if (ret < 0)
3917 req_set_fail_links(req);
Pavel Begunkov31bff9a2020-12-06 22:22:43 +00003918
3919 /* need to hold the lock to complete IOPOLL requests */
3920 if (ctx->flags & IORING_SETUP_IOPOLL) {
Pavel Begunkov889fca72021-02-10 00:03:09 +00003921 __io_req_complete(req, issue_flags, ret, 0);
Pavel Begunkov31bff9a2020-12-06 22:22:43 +00003922 io_ring_submit_unlock(ctx, !force_nonblock);
3923 } else {
3924 io_ring_submit_unlock(ctx, !force_nonblock);
Pavel Begunkov889fca72021-02-10 00:03:09 +00003925 __io_req_complete(req, issue_flags, ret, 0);
Pavel Begunkov31bff9a2020-12-06 22:22:43 +00003926 }
Jens Axboe067524e2020-03-02 16:32:28 -07003927 return 0;
3928}
3929
Jens Axboeddf0322d2020-02-23 16:41:33 -07003930static int io_provide_buffers_prep(struct io_kiocb *req,
3931 const struct io_uring_sqe *sqe)
3932{
3933 struct io_provide_buf *p = &req->pbuf;
3934 u64 tmp;
3935
3936 if (sqe->ioprio || sqe->rw_flags)
3937 return -EINVAL;
3938
3939 tmp = READ_ONCE(sqe->fd);
3940 if (!tmp || tmp > USHRT_MAX)
3941 return -E2BIG;
3942 p->nbufs = tmp;
3943 p->addr = READ_ONCE(sqe->addr);
3944 p->len = READ_ONCE(sqe->len);
3945
Bijan Mottahedehefe68c12020-06-04 18:01:52 -07003946 if (!access_ok(u64_to_user_ptr(p->addr), (p->len * p->nbufs)))
Jens Axboeddf0322d2020-02-23 16:41:33 -07003947 return -EFAULT;
3948
3949 p->bgid = READ_ONCE(sqe->buf_group);
3950 tmp = READ_ONCE(sqe->off);
3951 if (tmp > USHRT_MAX)
3952 return -E2BIG;
3953 p->bid = tmp;
3954 return 0;
3955}
3956
3957static int io_add_buffers(struct io_provide_buf *pbuf, struct io_buffer **head)
3958{
3959 struct io_buffer *buf;
3960 u64 addr = pbuf->addr;
3961 int i, bid = pbuf->bid;
3962
3963 for (i = 0; i < pbuf->nbufs; i++) {
3964 buf = kmalloc(sizeof(*buf), GFP_KERNEL);
3965 if (!buf)
3966 break;
3967
3968 buf->addr = addr;
3969 buf->len = pbuf->len;
3970 buf->bid = bid;
3971 addr += pbuf->len;
3972 bid++;
3973 if (!*head) {
3974 INIT_LIST_HEAD(&buf->list);
3975 *head = buf;
3976 } else {
3977 list_add_tail(&buf->list, &(*head)->list);
3978 }
3979 }
3980
3981 return i ? i : -ENOMEM;
3982}
3983
Pavel Begunkov889fca72021-02-10 00:03:09 +00003984static int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboeddf0322d2020-02-23 16:41:33 -07003985{
3986 struct io_provide_buf *p = &req->pbuf;
3987 struct io_ring_ctx *ctx = req->ctx;
3988 struct io_buffer *head, *list;
3989 int ret = 0;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003990 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Jens Axboeddf0322d2020-02-23 16:41:33 -07003991
3992 io_ring_submit_lock(ctx, !force_nonblock);
3993
3994 lockdep_assert_held(&ctx->uring_lock);
3995
Jens Axboe9e15c3a2021-03-13 12:29:43 -07003996 list = head = xa_load(&ctx->io_buffers, p->bgid);
Jens Axboeddf0322d2020-02-23 16:41:33 -07003997
3998 ret = io_add_buffers(p, &head);
Jens Axboe9e15c3a2021-03-13 12:29:43 -07003999 if (ret >= 0 && !list) {
4000 ret = xa_insert(&ctx->io_buffers, p->bgid, head, GFP_KERNEL);
4001 if (ret < 0)
Jens Axboe067524e2020-03-02 16:32:28 -07004002 __io_remove_buffers(ctx, head, p->bgid, -1U);
Jens Axboeddf0322d2020-02-23 16:41:33 -07004003 }
Jens Axboeddf0322d2020-02-23 16:41:33 -07004004 if (ret < 0)
4005 req_set_fail_links(req);
Pavel Begunkov31bff9a2020-12-06 22:22:43 +00004006
4007 /* need to hold the lock to complete IOPOLL requests */
4008 if (ctx->flags & IORING_SETUP_IOPOLL) {
Pavel Begunkov889fca72021-02-10 00:03:09 +00004009 __io_req_complete(req, issue_flags, ret, 0);
Pavel Begunkov31bff9a2020-12-06 22:22:43 +00004010 io_ring_submit_unlock(ctx, !force_nonblock);
4011 } else {
4012 io_ring_submit_unlock(ctx, !force_nonblock);
Pavel Begunkov889fca72021-02-10 00:03:09 +00004013 __io_req_complete(req, issue_flags, ret, 0);
Pavel Begunkov31bff9a2020-12-06 22:22:43 +00004014 }
Jens Axboeddf0322d2020-02-23 16:41:33 -07004015 return 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07004016}
4017
Jens Axboe3e4827b2020-01-08 15:18:09 -07004018static int io_epoll_ctl_prep(struct io_kiocb *req,
4019 const struct io_uring_sqe *sqe)
4020{
4021#if defined(CONFIG_EPOLL)
4022 if (sqe->ioprio || sqe->buf_index)
4023 return -EINVAL;
Jens Axboe6ca56f82020-09-18 16:51:19 -06004024 if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL)))
Pavel Begunkov3232dd02020-06-03 18:03:22 +03004025 return -EINVAL;
Jens Axboe3e4827b2020-01-08 15:18:09 -07004026
4027 req->epoll.epfd = READ_ONCE(sqe->fd);
4028 req->epoll.op = READ_ONCE(sqe->len);
4029 req->epoll.fd = READ_ONCE(sqe->off);
4030
4031 if (ep_op_has_event(req->epoll.op)) {
4032 struct epoll_event __user *ev;
4033
4034 ev = u64_to_user_ptr(READ_ONCE(sqe->addr));
4035 if (copy_from_user(&req->epoll.event, ev, sizeof(*ev)))
4036 return -EFAULT;
4037 }
4038
4039 return 0;
4040#else
4041 return -EOPNOTSUPP;
4042#endif
4043}
4044
Pavel Begunkov889fca72021-02-10 00:03:09 +00004045static int io_epoll_ctl(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe3e4827b2020-01-08 15:18:09 -07004046{
4047#if defined(CONFIG_EPOLL)
4048 struct io_epoll *ie = &req->epoll;
4049 int ret;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004050 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Jens Axboe3e4827b2020-01-08 15:18:09 -07004051
4052 ret = do_epoll_ctl(ie->epfd, ie->op, ie->fd, &ie->event, force_nonblock);
4053 if (force_nonblock && ret == -EAGAIN)
4054 return -EAGAIN;
4055
4056 if (ret < 0)
4057 req_set_fail_links(req);
Pavel Begunkov889fca72021-02-10 00:03:09 +00004058 __io_req_complete(req, issue_flags, ret, 0);
Jens Axboe3e4827b2020-01-08 15:18:09 -07004059 return 0;
4060#else
4061 return -EOPNOTSUPP;
4062#endif
4063}
4064
Jens Axboec1ca7572019-12-25 22:18:28 -07004065static int io_madvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4066{
4067#if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
4068 if (sqe->ioprio || sqe->buf_index || sqe->off)
4069 return -EINVAL;
Pavel Begunkov3232dd02020-06-03 18:03:22 +03004070 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4071 return -EINVAL;
Jens Axboec1ca7572019-12-25 22:18:28 -07004072
4073 req->madvise.addr = READ_ONCE(sqe->addr);
4074 req->madvise.len = READ_ONCE(sqe->len);
4075 req->madvise.advice = READ_ONCE(sqe->fadvise_advice);
4076 return 0;
4077#else
4078 return -EOPNOTSUPP;
4079#endif
4080}
4081
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004082static int io_madvise(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboec1ca7572019-12-25 22:18:28 -07004083{
4084#if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
4085 struct io_madvise *ma = &req->madvise;
4086 int ret;
4087
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004088 if (issue_flags & IO_URING_F_NONBLOCK)
Jens Axboec1ca7572019-12-25 22:18:28 -07004089 return -EAGAIN;
4090
Minchan Kim0726b012020-10-17 16:14:50 -07004091 ret = do_madvise(current->mm, ma->addr, ma->len, ma->advice);
Jens Axboec1ca7572019-12-25 22:18:28 -07004092 if (ret < 0)
4093 req_set_fail_links(req);
Jens Axboee1e16092020-06-22 09:17:17 -06004094 io_req_complete(req, ret);
Jens Axboec1ca7572019-12-25 22:18:28 -07004095 return 0;
4096#else
4097 return -EOPNOTSUPP;
4098#endif
4099}
4100
Jens Axboe4840e412019-12-25 22:03:45 -07004101static int io_fadvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4102{
4103 if (sqe->ioprio || sqe->buf_index || sqe->addr)
4104 return -EINVAL;
Pavel Begunkov3232dd02020-06-03 18:03:22 +03004105 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4106 return -EINVAL;
Jens Axboe4840e412019-12-25 22:03:45 -07004107
4108 req->fadvise.offset = READ_ONCE(sqe->off);
4109 req->fadvise.len = READ_ONCE(sqe->len);
4110 req->fadvise.advice = READ_ONCE(sqe->fadvise_advice);
4111 return 0;
4112}
4113
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004114static int io_fadvise(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe4840e412019-12-25 22:03:45 -07004115{
4116 struct io_fadvise *fa = &req->fadvise;
4117 int ret;
4118
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004119 if (issue_flags & IO_URING_F_NONBLOCK) {
Jens Axboe3e694262020-02-01 09:22:49 -07004120 switch (fa->advice) {
4121 case POSIX_FADV_NORMAL:
4122 case POSIX_FADV_RANDOM:
4123 case POSIX_FADV_SEQUENTIAL:
4124 break;
4125 default:
4126 return -EAGAIN;
4127 }
4128 }
Jens Axboe4840e412019-12-25 22:03:45 -07004129
4130 ret = vfs_fadvise(req->file, fa->offset, fa->len, fa->advice);
4131 if (ret < 0)
4132 req_set_fail_links(req);
Jens Axboee1e16092020-06-22 09:17:17 -06004133 io_req_complete(req, ret);
Jens Axboe4840e412019-12-25 22:03:45 -07004134 return 0;
4135}
4136
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004137static int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4138{
Jens Axboe6ca56f82020-09-18 16:51:19 -06004139 if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL)))
Pavel Begunkov3232dd02020-06-03 18:03:22 +03004140 return -EINVAL;
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004141 if (sqe->ioprio || sqe->buf_index)
4142 return -EINVAL;
Pavel Begunkov9c280f92020-04-08 08:58:46 +03004143 if (req->flags & REQ_F_FIXED_FILE)
Jens Axboecf3040c2020-02-06 21:31:40 -07004144 return -EBADF;
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004145
Bijan Mottahedeh1d9e1282020-05-22 21:31:16 -07004146 req->statx.dfd = READ_ONCE(sqe->fd);
4147 req->statx.mask = READ_ONCE(sqe->len);
Bijan Mottahedehe62753e2020-05-22 21:31:18 -07004148 req->statx.filename = u64_to_user_ptr(READ_ONCE(sqe->addr));
Bijan Mottahedeh1d9e1282020-05-22 21:31:16 -07004149 req->statx.buffer = u64_to_user_ptr(READ_ONCE(sqe->addr2));
4150 req->statx.flags = READ_ONCE(sqe->statx_flags);
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004151
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004152 return 0;
4153}
4154
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004155static int io_statx(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004156{
Bijan Mottahedeh1d9e1282020-05-22 21:31:16 -07004157 struct io_statx *ctx = &req->statx;
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004158 int ret;
4159
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004160 if (issue_flags & IO_URING_F_NONBLOCK) {
Jens Axboe5b0bbee2020-04-27 10:41:22 -06004161 /* only need file table for an actual valid fd */
4162 if (ctx->dfd == -1 || ctx->dfd == AT_FDCWD)
4163 req->flags |= REQ_F_NO_FILE_TABLE;
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004164 return -EAGAIN;
Jens Axboe5b0bbee2020-04-27 10:41:22 -06004165 }
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004166
Bijan Mottahedehe62753e2020-05-22 21:31:18 -07004167 ret = do_statx(ctx->dfd, ctx->filename, ctx->flags, ctx->mask,
4168 ctx->buffer);
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004169
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004170 if (ret < 0)
4171 req_set_fail_links(req);
Jens Axboee1e16092020-06-22 09:17:17 -06004172 io_req_complete(req, ret);
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004173 return 0;
4174}
4175
Jens Axboeb5dba592019-12-11 14:02:38 -07004176static int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4177{
Jens Axboe14587a462020-09-05 11:36:08 -06004178 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
Pavel Begunkov3232dd02020-06-03 18:03:22 +03004179 return -EINVAL;
Jens Axboeb5dba592019-12-11 14:02:38 -07004180 if (sqe->ioprio || sqe->off || sqe->addr || sqe->len ||
4181 sqe->rw_flags || sqe->buf_index)
4182 return -EINVAL;
Pavel Begunkov9c280f92020-04-08 08:58:46 +03004183 if (req->flags & REQ_F_FIXED_FILE)
Jens Axboecf3040c2020-02-06 21:31:40 -07004184 return -EBADF;
Jens Axboeb5dba592019-12-11 14:02:38 -07004185
4186 req->close.fd = READ_ONCE(sqe->fd);
Jens Axboeb5dba592019-12-11 14:02:38 -07004187 return 0;
4188}
4189
Pavel Begunkov889fca72021-02-10 00:03:09 +00004190static int io_close(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboeb5dba592019-12-11 14:02:38 -07004191{
Jens Axboe9eac1902021-01-19 15:50:37 -07004192 struct files_struct *files = current->files;
Pavel Begunkov3af73b22020-06-08 21:08:17 +03004193 struct io_close *close = &req->close;
Jens Axboe9eac1902021-01-19 15:50:37 -07004194 struct fdtable *fdt;
4195 struct file *file;
Jens Axboeb5dba592019-12-11 14:02:38 -07004196 int ret;
4197
Jens Axboe9eac1902021-01-19 15:50:37 -07004198 file = NULL;
4199 ret = -EBADF;
4200 spin_lock(&files->file_lock);
4201 fdt = files_fdtable(files);
4202 if (close->fd >= fdt->max_fds) {
4203 spin_unlock(&files->file_lock);
4204 goto err;
4205 }
4206 file = fdt->fd[close->fd];
4207 if (!file) {
4208 spin_unlock(&files->file_lock);
4209 goto err;
4210 }
4211
4212 if (file->f_op == &io_uring_fops) {
4213 spin_unlock(&files->file_lock);
4214 file = NULL;
4215 goto err;
Pavel Begunkov3af73b22020-06-08 21:08:17 +03004216 }
Jens Axboeb5dba592019-12-11 14:02:38 -07004217
4218 /* if the file has a flush method, be safe and punt to async */
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004219 if (file->f_op->flush && (issue_flags & IO_URING_F_NONBLOCK)) {
Jens Axboe9eac1902021-01-19 15:50:37 -07004220 spin_unlock(&files->file_lock);
Pavel Begunkov0bf0eef2020-05-26 20:34:06 +03004221 return -EAGAIN;
Pavel Begunkova2100672020-03-02 23:45:16 +03004222 }
Jens Axboeb5dba592019-12-11 14:02:38 -07004223
Jens Axboe9eac1902021-01-19 15:50:37 -07004224 ret = __close_fd_get_file(close->fd, &file);
4225 spin_unlock(&files->file_lock);
4226 if (ret < 0) {
4227 if (ret == -ENOENT)
4228 ret = -EBADF;
4229 goto err;
4230 }
4231
Pavel Begunkov3af73b22020-06-08 21:08:17 +03004232 /* No ->flush() or already async, safely close from here */
Jens Axboe9eac1902021-01-19 15:50:37 -07004233 ret = filp_close(file, current->files);
4234err:
Pavel Begunkov3af73b22020-06-08 21:08:17 +03004235 if (ret < 0)
4236 req_set_fail_links(req);
Jens Axboe9eac1902021-01-19 15:50:37 -07004237 if (file)
4238 fput(file);
Pavel Begunkov889fca72021-02-10 00:03:09 +00004239 __io_req_complete(req, issue_flags, ret, 0);
Jens Axboe1a417f42020-01-31 17:16:48 -07004240 return 0;
Jens Axboeb5dba592019-12-11 14:02:38 -07004241}
4242
Pavel Begunkov1155c762021-02-18 18:29:38 +00004243static int io_sfr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboe5d17b4a2019-04-09 14:56:44 -06004244{
4245 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe5d17b4a2019-04-09 14:56:44 -06004246
Jens Axboe5d17b4a2019-04-09 14:56:44 -06004247 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
4248 return -EINVAL;
4249 if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index))
4250 return -EINVAL;
4251
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004252 req->sync.off = READ_ONCE(sqe->off);
4253 req->sync.len = READ_ONCE(sqe->len);
4254 req->sync.flags = READ_ONCE(sqe->sync_range_flags);
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004255 return 0;
4256}
4257
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004258static int io_sync_file_range(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004259{
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004260 int ret;
4261
Pavel Begunkovac45abc2020-06-08 21:08:18 +03004262 /* sync_file_range always requires a blocking context */
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004263 if (issue_flags & IO_URING_F_NONBLOCK)
Pavel Begunkovac45abc2020-06-08 21:08:18 +03004264 return -EAGAIN;
4265
Jens Axboe9adbd452019-12-20 08:45:55 -07004266 ret = sync_file_range(req->file, req->sync.off, req->sync.len,
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004267 req->sync.flags);
4268 if (ret < 0)
4269 req_set_fail_links(req);
Jens Axboee1e16092020-06-22 09:17:17 -06004270 io_req_complete(req, ret);
Jens Axboe5d17b4a2019-04-09 14:56:44 -06004271 return 0;
4272}
4273
YueHaibing469956e2020-03-04 15:53:52 +08004274#if defined(CONFIG_NET)
Pavel Begunkov02d27d82020-02-28 10:36:36 +03004275static int io_setup_async_msg(struct io_kiocb *req,
4276 struct io_async_msghdr *kmsg)
4277{
Jens Axboee8c2bc12020-08-15 18:44:09 -07004278 struct io_async_msghdr *async_msg = req->async_data;
4279
4280 if (async_msg)
Pavel Begunkov02d27d82020-02-28 10:36:36 +03004281 return -EAGAIN;
Jens Axboee8c2bc12020-08-15 18:44:09 -07004282 if (io_alloc_async_data(req)) {
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004283 kfree(kmsg->free_iov);
Pavel Begunkov02d27d82020-02-28 10:36:36 +03004284 return -ENOMEM;
4285 }
Jens Axboee8c2bc12020-08-15 18:44:09 -07004286 async_msg = req->async_data;
Pavel Begunkov02d27d82020-02-28 10:36:36 +03004287 req->flags |= REQ_F_NEED_CLEANUP;
Jens Axboee8c2bc12020-08-15 18:44:09 -07004288 memcpy(async_msg, kmsg, sizeof(*kmsg));
Pavel Begunkov2a780802021-02-05 00:57:58 +00004289 async_msg->msg.msg_name = &async_msg->addr;
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004290 /* if were using fast_iov, set it to the new one */
4291 if (!async_msg->free_iov)
4292 async_msg->msg.msg_iter.iov = async_msg->fast_iov;
4293
Pavel Begunkov02d27d82020-02-28 10:36:36 +03004294 return -EAGAIN;
4295}
4296
Pavel Begunkov2ae523e2020-07-12 20:41:06 +03004297static int io_sendmsg_copy_hdr(struct io_kiocb *req,
4298 struct io_async_msghdr *iomsg)
4299{
Pavel Begunkov2ae523e2020-07-12 20:41:06 +03004300 iomsg->msg.msg_name = &iomsg->addr;
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004301 iomsg->free_iov = iomsg->fast_iov;
Pavel Begunkov2ae523e2020-07-12 20:41:06 +03004302 return sendmsg_copy_msghdr(&iomsg->msg, req->sr_msg.umsg,
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004303 req->sr_msg.msg_flags, &iomsg->free_iov);
Pavel Begunkov2ae523e2020-07-12 20:41:06 +03004304}
4305
Pavel Begunkov93642ef2021-02-18 18:29:44 +00004306static int io_sendmsg_prep_async(struct io_kiocb *req)
4307{
4308 int ret;
4309
4310 if (!io_op_defs[req->opcode].needs_async_data)
4311 return 0;
4312 ret = io_sendmsg_copy_hdr(req, req->async_data);
4313 if (!ret)
4314 req->flags |= REQ_F_NEED_CLEANUP;
4315 return ret;
4316}
4317
Jens Axboe3529d8c2019-12-19 18:24:38 -07004318static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboeaa1fa282019-04-19 13:38:09 -06004319{
Jens Axboee47293f2019-12-20 08:58:21 -07004320 struct io_sr_msg *sr = &req->sr_msg;
Jens Axboe03b12302019-12-02 18:50:25 -07004321
Pavel Begunkovd2b6f482020-06-03 18:03:25 +03004322 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4323 return -EINVAL;
4324
Jens Axboee47293f2019-12-20 08:58:21 -07004325 sr->msg_flags = READ_ONCE(sqe->msg_flags);
Pavel Begunkov270a5942020-07-12 20:41:04 +03004326 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
Jens Axboefddafac2020-01-04 20:19:44 -07004327 sr->len = READ_ONCE(sqe->len);
Jens Axboe3529d8c2019-12-19 18:24:38 -07004328
Jens Axboed8768362020-02-27 14:17:49 -07004329#ifdef CONFIG_COMPAT
4330 if (req->ctx->compat)
4331 sr->msg_flags |= MSG_CMSG_COMPAT;
4332#endif
Pavel Begunkov93642ef2021-02-18 18:29:44 +00004333 return 0;
Jens Axboe03b12302019-12-02 18:50:25 -07004334}
4335
Pavel Begunkov889fca72021-02-10 00:03:09 +00004336static int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe03b12302019-12-02 18:50:25 -07004337{
Pavel Begunkov6b754c82020-07-16 23:28:00 +03004338 struct io_async_msghdr iomsg, *kmsg;
Jens Axboe03b12302019-12-02 18:50:25 -07004339 struct socket *sock;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004340 unsigned flags;
Jens Axboe03b12302019-12-02 18:50:25 -07004341 int ret;
4342
Florent Revestdba4a922020-12-04 12:36:04 +01004343 sock = sock_from_file(req->file);
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004344 if (unlikely(!sock))
Florent Revestdba4a922020-12-04 12:36:04 +01004345 return -ENOTSOCK;
Jens Axboe03b12302019-12-02 18:50:25 -07004346
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004347 kmsg = req->async_data;
4348 if (!kmsg) {
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004349 ret = io_sendmsg_copy_hdr(req, &iomsg);
Jens Axboefddafac2020-01-04 20:19:44 -07004350 if (ret)
4351 return ret;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004352 kmsg = &iomsg;
Jens Axboefddafac2020-01-04 20:19:44 -07004353 }
4354
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004355 flags = req->sr_msg.msg_flags;
4356 if (flags & MSG_DONTWAIT)
4357 req->flags |= REQ_F_NOWAIT;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004358 else if (issue_flags & IO_URING_F_NONBLOCK)
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004359 flags |= MSG_DONTWAIT;
4360
4361 ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004362 if ((issue_flags & IO_URING_F_NONBLOCK) && ret == -EAGAIN)
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004363 return io_setup_async_msg(req, kmsg);
4364 if (ret == -ERESTARTSYS)
4365 ret = -EINTR;
4366
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004367 /* fast path, check for non-NULL to avoid function call */
4368 if (kmsg->free_iov)
4369 kfree(kmsg->free_iov);
Jens Axboe03b12302019-12-02 18:50:25 -07004370 req->flags &= ~REQ_F_NEED_CLEANUP;
Jens Axboefddafac2020-01-04 20:19:44 -07004371 if (ret < 0)
4372 req_set_fail_links(req);
Pavel Begunkov889fca72021-02-10 00:03:09 +00004373 __io_req_complete(req, issue_flags, ret, 0);
Jens Axboefddafac2020-01-04 20:19:44 -07004374 return 0;
Jens Axboefddafac2020-01-04 20:19:44 -07004375}
4376
Pavel Begunkov889fca72021-02-10 00:03:09 +00004377static int io_send(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe03b12302019-12-02 18:50:25 -07004378{
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004379 struct io_sr_msg *sr = &req->sr_msg;
4380 struct msghdr msg;
4381 struct iovec iov;
Jens Axboe03b12302019-12-02 18:50:25 -07004382 struct socket *sock;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004383 unsigned flags;
Jens Axboe03b12302019-12-02 18:50:25 -07004384 int ret;
4385
Florent Revestdba4a922020-12-04 12:36:04 +01004386 sock = sock_from_file(req->file);
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004387 if (unlikely(!sock))
Florent Revestdba4a922020-12-04 12:36:04 +01004388 return -ENOTSOCK;
Jens Axboe03b12302019-12-02 18:50:25 -07004389
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004390 ret = import_single_range(WRITE, sr->buf, sr->len, &iov, &msg.msg_iter);
4391 if (unlikely(ret))
Zheng Bin14db8412020-09-09 20:12:37 +08004392 return ret;
Jens Axboe03b12302019-12-02 18:50:25 -07004393
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004394 msg.msg_name = NULL;
4395 msg.msg_control = NULL;
4396 msg.msg_controllen = 0;
4397 msg.msg_namelen = 0;
Jens Axboe03b12302019-12-02 18:50:25 -07004398
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004399 flags = req->sr_msg.msg_flags;
4400 if (flags & MSG_DONTWAIT)
4401 req->flags |= REQ_F_NOWAIT;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004402 else if (issue_flags & IO_URING_F_NONBLOCK)
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004403 flags |= MSG_DONTWAIT;
Jens Axboe03b12302019-12-02 18:50:25 -07004404
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004405 msg.msg_flags = flags;
4406 ret = sock_sendmsg(sock, &msg);
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004407 if ((issue_flags & IO_URING_F_NONBLOCK) && ret == -EAGAIN)
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004408 return -EAGAIN;
4409 if (ret == -ERESTARTSYS)
4410 ret = -EINTR;
Jens Axboe03b12302019-12-02 18:50:25 -07004411
Jens Axboe03b12302019-12-02 18:50:25 -07004412 if (ret < 0)
4413 req_set_fail_links(req);
Pavel Begunkov889fca72021-02-10 00:03:09 +00004414 __io_req_complete(req, issue_flags, ret, 0);
Jens Axboe03b12302019-12-02 18:50:25 -07004415 return 0;
Jens Axboe03b12302019-12-02 18:50:25 -07004416}
4417
Pavel Begunkov1400e692020-07-12 20:41:05 +03004418static int __io_recvmsg_copy_hdr(struct io_kiocb *req,
4419 struct io_async_msghdr *iomsg)
Jens Axboe52de1fe2020-02-27 10:15:42 -07004420{
4421 struct io_sr_msg *sr = &req->sr_msg;
4422 struct iovec __user *uiov;
4423 size_t iov_len;
4424 int ret;
4425
Pavel Begunkov1400e692020-07-12 20:41:05 +03004426 ret = __copy_msghdr_from_user(&iomsg->msg, sr->umsg,
4427 &iomsg->uaddr, &uiov, &iov_len);
Jens Axboe52de1fe2020-02-27 10:15:42 -07004428 if (ret)
4429 return ret;
4430
4431 if (req->flags & REQ_F_BUFFER_SELECT) {
4432 if (iov_len > 1)
4433 return -EINVAL;
Pavel Begunkov5476dfe2021-02-05 00:57:59 +00004434 if (copy_from_user(iomsg->fast_iov, uiov, sizeof(*uiov)))
Jens Axboe52de1fe2020-02-27 10:15:42 -07004435 return -EFAULT;
Pavel Begunkov5476dfe2021-02-05 00:57:59 +00004436 sr->len = iomsg->fast_iov[0].iov_len;
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004437 iomsg->free_iov = NULL;
Jens Axboe52de1fe2020-02-27 10:15:42 -07004438 } else {
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004439 iomsg->free_iov = iomsg->fast_iov;
Christoph Hellwig89cd35c2020-09-25 06:51:41 +02004440 ret = __import_iovec(READ, uiov, iov_len, UIO_FASTIOV,
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004441 &iomsg->free_iov, &iomsg->msg.msg_iter,
Christoph Hellwig89cd35c2020-09-25 06:51:41 +02004442 false);
Jens Axboe52de1fe2020-02-27 10:15:42 -07004443 if (ret > 0)
4444 ret = 0;
4445 }
4446
4447 return ret;
4448}
4449
4450#ifdef CONFIG_COMPAT
4451static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req,
Pavel Begunkov1400e692020-07-12 20:41:05 +03004452 struct io_async_msghdr *iomsg)
Jens Axboe52de1fe2020-02-27 10:15:42 -07004453{
4454 struct compat_msghdr __user *msg_compat;
4455 struct io_sr_msg *sr = &req->sr_msg;
4456 struct compat_iovec __user *uiov;
4457 compat_uptr_t ptr;
4458 compat_size_t len;
4459 int ret;
4460
Pavel Begunkov270a5942020-07-12 20:41:04 +03004461 msg_compat = (struct compat_msghdr __user *) sr->umsg;
Pavel Begunkov1400e692020-07-12 20:41:05 +03004462 ret = __get_compat_msghdr(&iomsg->msg, msg_compat, &iomsg->uaddr,
Jens Axboe52de1fe2020-02-27 10:15:42 -07004463 &ptr, &len);
4464 if (ret)
4465 return ret;
4466
4467 uiov = compat_ptr(ptr);
4468 if (req->flags & REQ_F_BUFFER_SELECT) {
4469 compat_ssize_t clen;
4470
4471 if (len > 1)
4472 return -EINVAL;
4473 if (!access_ok(uiov, sizeof(*uiov)))
4474 return -EFAULT;
4475 if (__get_user(clen, &uiov->iov_len))
4476 return -EFAULT;
4477 if (clen < 0)
4478 return -EINVAL;
Pavel Begunkov2d280bc2020-11-29 18:33:32 +00004479 sr->len = clen;
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004480 iomsg->free_iov = NULL;
Jens Axboe52de1fe2020-02-27 10:15:42 -07004481 } else {
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004482 iomsg->free_iov = iomsg->fast_iov;
Christoph Hellwig89cd35c2020-09-25 06:51:41 +02004483 ret = __import_iovec(READ, (struct iovec __user *)uiov, len,
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004484 UIO_FASTIOV, &iomsg->free_iov,
Christoph Hellwig89cd35c2020-09-25 06:51:41 +02004485 &iomsg->msg.msg_iter, true);
Jens Axboe52de1fe2020-02-27 10:15:42 -07004486 if (ret < 0)
4487 return ret;
4488 }
4489
4490 return 0;
4491}
Jens Axboe03b12302019-12-02 18:50:25 -07004492#endif
Jens Axboe52de1fe2020-02-27 10:15:42 -07004493
Pavel Begunkov1400e692020-07-12 20:41:05 +03004494static int io_recvmsg_copy_hdr(struct io_kiocb *req,
4495 struct io_async_msghdr *iomsg)
Jens Axboe52de1fe2020-02-27 10:15:42 -07004496{
Pavel Begunkov1400e692020-07-12 20:41:05 +03004497 iomsg->msg.msg_name = &iomsg->addr;
Jens Axboe52de1fe2020-02-27 10:15:42 -07004498
4499#ifdef CONFIG_COMPAT
4500 if (req->ctx->compat)
Pavel Begunkov1400e692020-07-12 20:41:05 +03004501 return __io_compat_recvmsg_copy_hdr(req, iomsg);
Jens Axboe52de1fe2020-02-27 10:15:42 -07004502#endif
4503
Pavel Begunkov1400e692020-07-12 20:41:05 +03004504 return __io_recvmsg_copy_hdr(req, iomsg);
Jens Axboe52de1fe2020-02-27 10:15:42 -07004505}
4506
Jens Axboebcda7ba2020-02-23 16:42:51 -07004507static struct io_buffer *io_recv_buffer_select(struct io_kiocb *req,
Pavel Begunkov7fbb1b52020-07-16 23:28:05 +03004508 bool needs_lock)
Jens Axboebcda7ba2020-02-23 16:42:51 -07004509{
4510 struct io_sr_msg *sr = &req->sr_msg;
4511 struct io_buffer *kbuf;
4512
Jens Axboebcda7ba2020-02-23 16:42:51 -07004513 kbuf = io_buffer_select(req, &sr->len, sr->bgid, sr->kbuf, needs_lock);
4514 if (IS_ERR(kbuf))
4515 return kbuf;
4516
4517 sr->kbuf = kbuf;
4518 req->flags |= REQ_F_BUFFER_SELECTED;
Jens Axboebcda7ba2020-02-23 16:42:51 -07004519 return kbuf;
Jens Axboe03b12302019-12-02 18:50:25 -07004520}
4521
Pavel Begunkov7fbb1b52020-07-16 23:28:05 +03004522static inline unsigned int io_put_recv_kbuf(struct io_kiocb *req)
4523{
4524 return io_put_kbuf(req, req->sr_msg.kbuf);
4525}
4526
Pavel Begunkov93642ef2021-02-18 18:29:44 +00004527static int io_recvmsg_prep_async(struct io_kiocb *req)
Jens Axboe03b12302019-12-02 18:50:25 -07004528{
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03004529 int ret;
Jens Axboe06b76d42019-12-19 14:44:26 -07004530
Pavel Begunkov93642ef2021-02-18 18:29:44 +00004531 if (!io_op_defs[req->opcode].needs_async_data)
4532 return 0;
4533 ret = io_recvmsg_copy_hdr(req, req->async_data);
4534 if (!ret)
4535 req->flags |= REQ_F_NEED_CLEANUP;
4536 return ret;
4537}
4538
4539static int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4540{
4541 struct io_sr_msg *sr = &req->sr_msg;
4542
Pavel Begunkovd2b6f482020-06-03 18:03:25 +03004543 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4544 return -EINVAL;
4545
Jens Axboe3529d8c2019-12-19 18:24:38 -07004546 sr->msg_flags = READ_ONCE(sqe->msg_flags);
Pavel Begunkov270a5942020-07-12 20:41:04 +03004547 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
Jens Axboe0b7b21e2020-01-31 08:34:59 -07004548 sr->len = READ_ONCE(sqe->len);
Jens Axboebcda7ba2020-02-23 16:42:51 -07004549 sr->bgid = READ_ONCE(sqe->buf_group);
Jens Axboe3529d8c2019-12-19 18:24:38 -07004550
Jens Axboed8768362020-02-27 14:17:49 -07004551#ifdef CONFIG_COMPAT
4552 if (req->ctx->compat)
4553 sr->msg_flags |= MSG_CMSG_COMPAT;
4554#endif
Pavel Begunkov93642ef2021-02-18 18:29:44 +00004555 return 0;
Jens Axboe03b12302019-12-02 18:50:25 -07004556}
4557
Pavel Begunkov889fca72021-02-10 00:03:09 +00004558static int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe03b12302019-12-02 18:50:25 -07004559{
Pavel Begunkov6b754c82020-07-16 23:28:00 +03004560 struct io_async_msghdr iomsg, *kmsg;
Jens Axboe0fa03c62019-04-19 13:34:07 -06004561 struct socket *sock;
Pavel Begunkov7fbb1b52020-07-16 23:28:05 +03004562 struct io_buffer *kbuf;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004563 unsigned flags;
Jens Axboe52de1fe2020-02-27 10:15:42 -07004564 int ret, cflags = 0;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004565 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Jens Axboe0fa03c62019-04-19 13:34:07 -06004566
Florent Revestdba4a922020-12-04 12:36:04 +01004567 sock = sock_from_file(req->file);
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004568 if (unlikely(!sock))
Florent Revestdba4a922020-12-04 12:36:04 +01004569 return -ENOTSOCK;
Jens Axboe0fa03c62019-04-19 13:34:07 -06004570
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004571 kmsg = req->async_data;
4572 if (!kmsg) {
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004573 ret = io_recvmsg_copy_hdr(req, &iomsg);
4574 if (ret)
Pavel Begunkov681fda82020-07-15 22:20:45 +03004575 return ret;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004576 kmsg = &iomsg;
Jens Axboe0fa03c62019-04-19 13:34:07 -06004577 }
4578
Pavel Begunkovbc02ef32020-07-16 23:28:03 +03004579 if (req->flags & REQ_F_BUFFER_SELECT) {
Pavel Begunkov7fbb1b52020-07-16 23:28:05 +03004580 kbuf = io_recv_buffer_select(req, !force_nonblock);
Pavel Begunkovbc02ef32020-07-16 23:28:03 +03004581 if (IS_ERR(kbuf))
4582 return PTR_ERR(kbuf);
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004583 kmsg->fast_iov[0].iov_base = u64_to_user_ptr(kbuf->addr);
Pavel Begunkov5476dfe2021-02-05 00:57:59 +00004584 kmsg->fast_iov[0].iov_len = req->sr_msg.len;
4585 iov_iter_init(&kmsg->msg.msg_iter, READ, kmsg->fast_iov,
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004586 1, req->sr_msg.len);
4587 }
4588
4589 flags = req->sr_msg.msg_flags;
4590 if (flags & MSG_DONTWAIT)
4591 req->flags |= REQ_F_NOWAIT;
4592 else if (force_nonblock)
4593 flags |= MSG_DONTWAIT;
4594
4595 ret = __sys_recvmsg_sock(sock, &kmsg->msg, req->sr_msg.umsg,
4596 kmsg->uaddr, flags);
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03004597 if (force_nonblock && ret == -EAGAIN)
4598 return io_setup_async_msg(req, kmsg);
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004599 if (ret == -ERESTARTSYS)
4600 ret = -EINTR;
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03004601
Pavel Begunkov7fbb1b52020-07-16 23:28:05 +03004602 if (req->flags & REQ_F_BUFFER_SELECTED)
4603 cflags = io_put_recv_kbuf(req);
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004604 /* fast path, check for non-NULL to avoid function call */
4605 if (kmsg->free_iov)
4606 kfree(kmsg->free_iov);
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03004607 req->flags &= ~REQ_F_NEED_CLEANUP;
Jens Axboe4e88d6e2019-12-07 20:59:47 -07004608 if (ret < 0)
4609 req_set_fail_links(req);
Pavel Begunkov889fca72021-02-10 00:03:09 +00004610 __io_req_complete(req, issue_flags, ret, cflags);
Jens Axboe0fa03c62019-04-19 13:34:07 -06004611 return 0;
Jens Axboe0fa03c62019-04-19 13:34:07 -06004612}
4613
Pavel Begunkov889fca72021-02-10 00:03:09 +00004614static int io_recv(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboefddafac2020-01-04 20:19:44 -07004615{
Pavel Begunkov6b754c82020-07-16 23:28:00 +03004616 struct io_buffer *kbuf;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004617 struct io_sr_msg *sr = &req->sr_msg;
4618 struct msghdr msg;
4619 void __user *buf = sr->buf;
Jens Axboefddafac2020-01-04 20:19:44 -07004620 struct socket *sock;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004621 struct iovec iov;
4622 unsigned flags;
Jens Axboebcda7ba2020-02-23 16:42:51 -07004623 int ret, cflags = 0;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004624 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Jens Axboefddafac2020-01-04 20:19:44 -07004625
Florent Revestdba4a922020-12-04 12:36:04 +01004626 sock = sock_from_file(req->file);
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004627 if (unlikely(!sock))
Florent Revestdba4a922020-12-04 12:36:04 +01004628 return -ENOTSOCK;
Jens Axboefddafac2020-01-04 20:19:44 -07004629
Pavel Begunkovbc02ef32020-07-16 23:28:03 +03004630 if (req->flags & REQ_F_BUFFER_SELECT) {
Pavel Begunkov7fbb1b52020-07-16 23:28:05 +03004631 kbuf = io_recv_buffer_select(req, !force_nonblock);
Jens Axboebcda7ba2020-02-23 16:42:51 -07004632 if (IS_ERR(kbuf))
4633 return PTR_ERR(kbuf);
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004634 buf = u64_to_user_ptr(kbuf->addr);
Jens Axboefddafac2020-01-04 20:19:44 -07004635 }
4636
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004637 ret = import_single_range(READ, buf, sr->len, &iov, &msg.msg_iter);
Pavel Begunkov14c32ee2020-07-16 23:28:01 +03004638 if (unlikely(ret))
4639 goto out_free;
Jens Axboefddafac2020-01-04 20:19:44 -07004640
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004641 msg.msg_name = NULL;
4642 msg.msg_control = NULL;
4643 msg.msg_controllen = 0;
4644 msg.msg_namelen = 0;
4645 msg.msg_iocb = NULL;
4646 msg.msg_flags = 0;
4647
4648 flags = req->sr_msg.msg_flags;
4649 if (flags & MSG_DONTWAIT)
4650 req->flags |= REQ_F_NOWAIT;
4651 else if (force_nonblock)
4652 flags |= MSG_DONTWAIT;
4653
4654 ret = sock_recvmsg(sock, &msg, flags);
4655 if (force_nonblock && ret == -EAGAIN)
4656 return -EAGAIN;
4657 if (ret == -ERESTARTSYS)
4658 ret = -EINTR;
Pavel Begunkov14c32ee2020-07-16 23:28:01 +03004659out_free:
Pavel Begunkov7fbb1b52020-07-16 23:28:05 +03004660 if (req->flags & REQ_F_BUFFER_SELECTED)
4661 cflags = io_put_recv_kbuf(req);
Jens Axboefddafac2020-01-04 20:19:44 -07004662 if (ret < 0)
4663 req_set_fail_links(req);
Pavel Begunkov889fca72021-02-10 00:03:09 +00004664 __io_req_complete(req, issue_flags, ret, cflags);
Jens Axboefddafac2020-01-04 20:19:44 -07004665 return 0;
Jens Axboefddafac2020-01-04 20:19:44 -07004666}
4667
Jens Axboe3529d8c2019-12-19 18:24:38 -07004668static int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboe17f2fe32019-10-17 14:42:58 -06004669{
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004670 struct io_accept *accept = &req->accept;
4671
Jens Axboe14587a462020-09-05 11:36:08 -06004672 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboe17f2fe32019-10-17 14:42:58 -06004673 return -EINVAL;
Hrvoje Zeba8042d6c2019-11-25 14:40:22 -05004674 if (sqe->ioprio || sqe->len || sqe->buf_index)
Jens Axboe17f2fe32019-10-17 14:42:58 -06004675 return -EINVAL;
4676
Jens Axboed55e5f52019-12-11 16:12:15 -07004677 accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
4678 accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2));
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004679 accept->flags = READ_ONCE(sqe->accept_flags);
Jens Axboe09952e32020-03-19 20:16:56 -06004680 accept->nofile = rlimit(RLIMIT_NOFILE);
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004681 return 0;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004682}
Jens Axboe17f2fe32019-10-17 14:42:58 -06004683
Pavel Begunkov889fca72021-02-10 00:03:09 +00004684static int io_accept(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004685{
4686 struct io_accept *accept = &req->accept;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004687 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Pavel Begunkovac45abc2020-06-08 21:08:18 +03004688 unsigned int file_flags = force_nonblock ? O_NONBLOCK : 0;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004689 int ret;
4690
Jiufei Xuee697dee2020-06-10 13:41:59 +08004691 if (req->file->f_flags & O_NONBLOCK)
4692 req->flags |= REQ_F_NOWAIT;
4693
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004694 ret = __sys_accept4_file(req->file, file_flags, accept->addr,
Jens Axboe09952e32020-03-19 20:16:56 -06004695 accept->addr_len, accept->flags,
4696 accept->nofile);
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004697 if (ret == -EAGAIN && force_nonblock)
Jens Axboe17f2fe32019-10-17 14:42:58 -06004698 return -EAGAIN;
Pavel Begunkovac45abc2020-06-08 21:08:18 +03004699 if (ret < 0) {
4700 if (ret == -ERESTARTSYS)
4701 ret = -EINTR;
Jens Axboe4e88d6e2019-12-07 20:59:47 -07004702 req_set_fail_links(req);
Pavel Begunkovac45abc2020-06-08 21:08:18 +03004703 }
Pavel Begunkov889fca72021-02-10 00:03:09 +00004704 __io_req_complete(req, issue_flags, ret, 0);
Jens Axboe17f2fe32019-10-17 14:42:58 -06004705 return 0;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004706}
4707
Pavel Begunkov93642ef2021-02-18 18:29:44 +00004708static int io_connect_prep_async(struct io_kiocb *req)
4709{
4710 struct io_async_connect *io = req->async_data;
4711 struct io_connect *conn = &req->connect;
4712
4713 return move_addr_to_kernel(conn->addr, conn->addr_len, &io->address);
4714}
4715
Jens Axboe3529d8c2019-12-19 18:24:38 -07004716static int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboef499a022019-12-02 16:28:46 -07004717{
Jens Axboe3529d8c2019-12-19 18:24:38 -07004718 struct io_connect *conn = &req->connect;
Jens Axboef499a022019-12-02 16:28:46 -07004719
Jens Axboe14587a462020-09-05 11:36:08 -06004720 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboe3fbb51c2019-12-20 08:51:52 -07004721 return -EINVAL;
4722 if (sqe->ioprio || sqe->len || sqe->buf_index || sqe->rw_flags)
4723 return -EINVAL;
4724
Jens Axboe3529d8c2019-12-19 18:24:38 -07004725 conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
4726 conn->addr_len = READ_ONCE(sqe->addr2);
Pavel Begunkov93642ef2021-02-18 18:29:44 +00004727 return 0;
Jens Axboef499a022019-12-02 16:28:46 -07004728}
4729
Pavel Begunkov889fca72021-02-10 00:03:09 +00004730static int io_connect(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboef8e85cf2019-11-23 14:24:24 -07004731{
Jens Axboee8c2bc12020-08-15 18:44:09 -07004732 struct io_async_connect __io, *io;
Jens Axboef8e85cf2019-11-23 14:24:24 -07004733 unsigned file_flags;
Jens Axboe3fbb51c2019-12-20 08:51:52 -07004734 int ret;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004735 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Jens Axboef8e85cf2019-11-23 14:24:24 -07004736
Jens Axboee8c2bc12020-08-15 18:44:09 -07004737 if (req->async_data) {
4738 io = req->async_data;
Jens Axboef499a022019-12-02 16:28:46 -07004739 } else {
Jens Axboe3529d8c2019-12-19 18:24:38 -07004740 ret = move_addr_to_kernel(req->connect.addr,
4741 req->connect.addr_len,
Jens Axboee8c2bc12020-08-15 18:44:09 -07004742 &__io.address);
Jens Axboef499a022019-12-02 16:28:46 -07004743 if (ret)
4744 goto out;
4745 io = &__io;
4746 }
4747
Jens Axboe3fbb51c2019-12-20 08:51:52 -07004748 file_flags = force_nonblock ? O_NONBLOCK : 0;
4749
Jens Axboee8c2bc12020-08-15 18:44:09 -07004750 ret = __sys_connect_file(req->file, &io->address,
Jens Axboe3fbb51c2019-12-20 08:51:52 -07004751 req->connect.addr_len, file_flags);
Jens Axboe87f80d62019-12-03 11:23:54 -07004752 if ((ret == -EAGAIN || ret == -EINPROGRESS) && force_nonblock) {
Jens Axboee8c2bc12020-08-15 18:44:09 -07004753 if (req->async_data)
Jens Axboeb7bb4f72019-12-15 22:13:43 -07004754 return -EAGAIN;
Jens Axboee8c2bc12020-08-15 18:44:09 -07004755 if (io_alloc_async_data(req)) {
Jens Axboef499a022019-12-02 16:28:46 -07004756 ret = -ENOMEM;
4757 goto out;
4758 }
Jens Axboee8c2bc12020-08-15 18:44:09 -07004759 io = req->async_data;
4760 memcpy(req->async_data, &__io, sizeof(__io));
Jens Axboef8e85cf2019-11-23 14:24:24 -07004761 return -EAGAIN;
Jens Axboef499a022019-12-02 16:28:46 -07004762 }
Jens Axboef8e85cf2019-11-23 14:24:24 -07004763 if (ret == -ERESTARTSYS)
4764 ret = -EINTR;
Jens Axboef499a022019-12-02 16:28:46 -07004765out:
Jens Axboe4e88d6e2019-12-07 20:59:47 -07004766 if (ret < 0)
4767 req_set_fail_links(req);
Pavel Begunkov889fca72021-02-10 00:03:09 +00004768 __io_req_complete(req, issue_flags, ret, 0);
Jens Axboef8e85cf2019-11-23 14:24:24 -07004769 return 0;
Jens Axboef8e85cf2019-11-23 14:24:24 -07004770}
YueHaibing469956e2020-03-04 15:53:52 +08004771#else /* !CONFIG_NET */
Jens Axboe99a10082021-02-19 09:35:19 -07004772#define IO_NETOP_FN(op) \
4773static int io_##op(struct io_kiocb *req, unsigned int issue_flags) \
4774{ \
4775 return -EOPNOTSUPP; \
Jens Axboef8e85cf2019-11-23 14:24:24 -07004776}
4777
Jens Axboe99a10082021-02-19 09:35:19 -07004778#define IO_NETOP_PREP(op) \
4779IO_NETOP_FN(op) \
4780static int io_##op##_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) \
4781{ \
4782 return -EOPNOTSUPP; \
4783} \
4784
4785#define IO_NETOP_PREP_ASYNC(op) \
4786IO_NETOP_PREP(op) \
4787static int io_##op##_prep_async(struct io_kiocb *req) \
4788{ \
4789 return -EOPNOTSUPP; \
YueHaibing469956e2020-03-04 15:53:52 +08004790}
4791
Jens Axboe99a10082021-02-19 09:35:19 -07004792IO_NETOP_PREP_ASYNC(sendmsg);
4793IO_NETOP_PREP_ASYNC(recvmsg);
4794IO_NETOP_PREP_ASYNC(connect);
4795IO_NETOP_PREP(accept);
4796IO_NETOP_FN(send);
4797IO_NETOP_FN(recv);
YueHaibing469956e2020-03-04 15:53:52 +08004798#endif /* CONFIG_NET */
Jens Axboe17f2fe32019-10-17 14:42:58 -06004799
Jens Axboed7718a92020-02-14 22:23:12 -07004800struct io_poll_table {
4801 struct poll_table_struct pt;
4802 struct io_kiocb *req;
4803 int error;
4804};
4805
Jens Axboed7718a92020-02-14 22:23:12 -07004806static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll,
4807 __poll_t mask, task_work_func_t func)
4808{
Jens Axboeaa96bf82020-04-03 11:26:26 -06004809 int ret;
Jens Axboed7718a92020-02-14 22:23:12 -07004810
4811 /* for instances that support it check for an event match first: */
4812 if (mask && !(mask & poll->events))
4813 return 0;
4814
4815 trace_io_uring_task_add(req->ctx, req->opcode, req->user_data, mask);
4816
4817 list_del_init(&poll->wait.entry);
4818
Jens Axboed7718a92020-02-14 22:23:12 -07004819 req->result = mask;
Jens Axboe7cbf1722021-02-10 00:03:20 +00004820 req->task_work.func = func;
Jens Axboe6d816e02020-08-11 08:04:14 -06004821 percpu_ref_get(&req->ctx->refs);
4822
Jens Axboed7718a92020-02-14 22:23:12 -07004823 /*
Jens Axboee3aabf92020-05-18 11:04:17 -06004824 * If this fails, then the task is exiting. When a task exits, the
4825 * work gets canceled, so just cancel this request as well instead
4826 * of executing it. We can't safely execute it anyway, as we may not
4827 * have the needed state needed for it anyway.
Jens Axboed7718a92020-02-14 22:23:12 -07004828 */
Jens Axboe355fb9e2020-10-22 20:19:35 -06004829 ret = io_req_task_work_add(req);
Jens Axboeaa96bf82020-04-03 11:26:26 -06004830 if (unlikely(ret)) {
Jens Axboee3aabf92020-05-18 11:04:17 -06004831 WRITE_ONCE(poll->canceled, true);
Pavel Begunkoveab30c42021-01-19 13:32:42 +00004832 io_req_task_work_add_fallback(req, func);
Jens Axboeaa96bf82020-04-03 11:26:26 -06004833 }
Jens Axboed7718a92020-02-14 22:23:12 -07004834 return 1;
4835}
4836
Jens Axboe74ce6ce2020-04-13 11:09:12 -06004837static bool io_poll_rewait(struct io_kiocb *req, struct io_poll_iocb *poll)
4838 __acquires(&req->ctx->completion_lock)
4839{
4840 struct io_ring_ctx *ctx = req->ctx;
4841
4842 if (!req->result && !READ_ONCE(poll->canceled)) {
4843 struct poll_table_struct pt = { ._key = poll->events };
4844
4845 req->result = vfs_poll(req->file, &pt) & poll->events;
4846 }
4847
4848 spin_lock_irq(&ctx->completion_lock);
4849 if (!req->result && !READ_ONCE(poll->canceled)) {
4850 add_wait_queue(poll->head, &poll->wait);
4851 return true;
4852 }
4853
4854 return false;
4855}
4856
Jens Axboed4e7cd32020-08-15 11:44:50 -07004857static struct io_poll_iocb *io_poll_get_double(struct io_kiocb *req)
Jens Axboe18bceab2020-05-15 11:56:54 -06004858{
Jens Axboee8c2bc12020-08-15 18:44:09 -07004859 /* pure poll stashes this in ->async_data, poll driven retry elsewhere */
Jens Axboed4e7cd32020-08-15 11:44:50 -07004860 if (req->opcode == IORING_OP_POLL_ADD)
Jens Axboee8c2bc12020-08-15 18:44:09 -07004861 return req->async_data;
Jens Axboed4e7cd32020-08-15 11:44:50 -07004862 return req->apoll->double_poll;
4863}
4864
4865static struct io_poll_iocb *io_poll_get_single(struct io_kiocb *req)
4866{
4867 if (req->opcode == IORING_OP_POLL_ADD)
4868 return &req->poll;
4869 return &req->apoll->poll;
4870}
4871
4872static void io_poll_remove_double(struct io_kiocb *req)
4873{
4874 struct io_poll_iocb *poll = io_poll_get_double(req);
Jens Axboe18bceab2020-05-15 11:56:54 -06004875
4876 lockdep_assert_held(&req->ctx->completion_lock);
4877
4878 if (poll && poll->head) {
4879 struct wait_queue_head *head = poll->head;
4880
4881 spin_lock(&head->lock);
4882 list_del_init(&poll->wait.entry);
4883 if (poll->wait.private)
4884 refcount_dec(&req->refs);
4885 poll->head = NULL;
4886 spin_unlock(&head->lock);
4887 }
4888}
4889
4890static void io_poll_complete(struct io_kiocb *req, __poll_t mask, int error)
4891{
4892 struct io_ring_ctx *ctx = req->ctx;
4893
Jens Axboed4e7cd32020-08-15 11:44:50 -07004894 io_poll_remove_double(req);
Jens Axboe18bceab2020-05-15 11:56:54 -06004895 req->poll.done = true;
4896 io_cqring_fill_event(req, error ? error : mangle_poll(mask));
4897 io_commit_cqring(ctx);
4898}
4899
Jens Axboe18bceab2020-05-15 11:56:54 -06004900static void io_poll_task_func(struct callback_head *cb)
4901{
4902 struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
Jens Axboe6d816e02020-08-11 08:04:14 -06004903 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkovdd221f462020-10-18 10:17:42 +01004904 struct io_kiocb *nxt;
Jens Axboe18bceab2020-05-15 11:56:54 -06004905
Pavel Begunkovdd221f462020-10-18 10:17:42 +01004906 if (io_poll_rewait(req, &req->poll)) {
4907 spin_unlock_irq(&ctx->completion_lock);
4908 } else {
4909 hash_del(&req->hash_node);
4910 io_poll_complete(req, req->result, 0);
4911 spin_unlock_irq(&ctx->completion_lock);
4912
4913 nxt = io_put_req_find_next(req);
4914 io_cqring_ev_posted(ctx);
4915 if (nxt)
4916 __io_req_task_submit(nxt);
4917 }
4918
Jens Axboe6d816e02020-08-11 08:04:14 -06004919 percpu_ref_put(&ctx->refs);
Jens Axboe18bceab2020-05-15 11:56:54 -06004920}
4921
4922static int io_poll_double_wake(struct wait_queue_entry *wait, unsigned mode,
4923 int sync, void *key)
4924{
4925 struct io_kiocb *req = wait->private;
Jens Axboed4e7cd32020-08-15 11:44:50 -07004926 struct io_poll_iocb *poll = io_poll_get_single(req);
Jens Axboe18bceab2020-05-15 11:56:54 -06004927 __poll_t mask = key_to_poll(key);
4928
4929 /* for instances that support it check for an event match first: */
4930 if (mask && !(mask & poll->events))
4931 return 0;
4932
Jens Axboe8706e042020-09-28 08:38:54 -06004933 list_del_init(&wait->entry);
4934
Jens Axboe807abcb2020-07-17 17:09:27 -06004935 if (poll && poll->head) {
Jens Axboe18bceab2020-05-15 11:56:54 -06004936 bool done;
4937
Jens Axboe807abcb2020-07-17 17:09:27 -06004938 spin_lock(&poll->head->lock);
4939 done = list_empty(&poll->wait.entry);
Jens Axboe18bceab2020-05-15 11:56:54 -06004940 if (!done)
Jens Axboe807abcb2020-07-17 17:09:27 -06004941 list_del_init(&poll->wait.entry);
Jens Axboed4e7cd32020-08-15 11:44:50 -07004942 /* make sure double remove sees this as being gone */
4943 wait->private = NULL;
Jens Axboe807abcb2020-07-17 17:09:27 -06004944 spin_unlock(&poll->head->lock);
Jens Axboec8b5e262020-10-25 13:53:26 -06004945 if (!done) {
4946 /* use wait func handler, so it matches the rq type */
4947 poll->wait.func(&poll->wait, mode, sync, key);
4948 }
Jens Axboe18bceab2020-05-15 11:56:54 -06004949 }
4950 refcount_dec(&req->refs);
4951 return 1;
4952}
4953
4954static void io_init_poll_iocb(struct io_poll_iocb *poll, __poll_t events,
4955 wait_queue_func_t wake_func)
4956{
4957 poll->head = NULL;
4958 poll->done = false;
4959 poll->canceled = false;
4960 poll->events = events;
4961 INIT_LIST_HEAD(&poll->wait.entry);
4962 init_waitqueue_func_entry(&poll->wait, wake_func);
4963}
4964
4965static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt,
Jens Axboe807abcb2020-07-17 17:09:27 -06004966 struct wait_queue_head *head,
4967 struct io_poll_iocb **poll_ptr)
Jens Axboe18bceab2020-05-15 11:56:54 -06004968{
4969 struct io_kiocb *req = pt->req;
4970
4971 /*
4972 * If poll->head is already set, it's because the file being polled
4973 * uses multiple waitqueues for poll handling (eg one for read, one
4974 * for write). Setup a separate io_poll_iocb if this happens.
4975 */
4976 if (unlikely(poll->head)) {
Pavel Begunkov58852d42020-10-16 20:55:56 +01004977 struct io_poll_iocb *poll_one = poll;
4978
Jens Axboe18bceab2020-05-15 11:56:54 -06004979 /* already have a 2nd entry, fail a third attempt */
Jens Axboe807abcb2020-07-17 17:09:27 -06004980 if (*poll_ptr) {
Jens Axboe18bceab2020-05-15 11:56:54 -06004981 pt->error = -EINVAL;
4982 return;
4983 }
Jens Axboe1c3b3e62021-02-28 16:07:30 -07004984 /* double add on the same waitqueue head, ignore */
4985 if (poll->head == head)
4986 return;
Jens Axboe18bceab2020-05-15 11:56:54 -06004987 poll = kmalloc(sizeof(*poll), GFP_ATOMIC);
4988 if (!poll) {
4989 pt->error = -ENOMEM;
4990 return;
4991 }
Pavel Begunkov58852d42020-10-16 20:55:56 +01004992 io_init_poll_iocb(poll, poll_one->events, io_poll_double_wake);
Jens Axboe18bceab2020-05-15 11:56:54 -06004993 refcount_inc(&req->refs);
4994 poll->wait.private = req;
Jens Axboe807abcb2020-07-17 17:09:27 -06004995 *poll_ptr = poll;
Jens Axboe18bceab2020-05-15 11:56:54 -06004996 }
4997
4998 pt->error = 0;
4999 poll->head = head;
Jiufei Xuea31eb4a2020-06-17 17:53:56 +08005000
5001 if (poll->events & EPOLLEXCLUSIVE)
5002 add_wait_queue_exclusive(head, &poll->wait);
5003 else
5004 add_wait_queue(head, &poll->wait);
Jens Axboe18bceab2020-05-15 11:56:54 -06005005}
5006
5007static void io_async_queue_proc(struct file *file, struct wait_queue_head *head,
5008 struct poll_table_struct *p)
5009{
5010 struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
Jens Axboe807abcb2020-07-17 17:09:27 -06005011 struct async_poll *apoll = pt->req->apoll;
Jens Axboe18bceab2020-05-15 11:56:54 -06005012
Jens Axboe807abcb2020-07-17 17:09:27 -06005013 __io_queue_proc(&apoll->poll, pt, head, &apoll->double_poll);
Jens Axboe18bceab2020-05-15 11:56:54 -06005014}
5015
Jens Axboed7718a92020-02-14 22:23:12 -07005016static void io_async_task_func(struct callback_head *cb)
5017{
5018 struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
5019 struct async_poll *apoll = req->apoll;
5020 struct io_ring_ctx *ctx = req->ctx;
5021
5022 trace_io_uring_task_run(req->ctx, req->opcode, req->user_data);
5023
Jens Axboe74ce6ce2020-04-13 11:09:12 -06005024 if (io_poll_rewait(req, &apoll->poll)) {
Jens Axboed7718a92020-02-14 22:23:12 -07005025 spin_unlock_irq(&ctx->completion_lock);
Jens Axboe6d816e02020-08-11 08:04:14 -06005026 percpu_ref_put(&ctx->refs);
Jens Axboe74ce6ce2020-04-13 11:09:12 -06005027 return;
Jens Axboed7718a92020-02-14 22:23:12 -07005028 }
5029
Jens Axboe31067252020-05-17 17:43:31 -06005030 /* If req is still hashed, it cannot have been canceled. Don't check. */
Pavel Begunkov0be0b0e2020-06-30 15:20:42 +03005031 if (hash_hashed(&req->hash_node))
Jens Axboe74ce6ce2020-04-13 11:09:12 -06005032 hash_del(&req->hash_node);
Jens Axboe2bae0472020-04-13 11:16:34 -06005033
Jens Axboed4e7cd32020-08-15 11:44:50 -07005034 io_poll_remove_double(req);
Jens Axboe74ce6ce2020-04-13 11:09:12 -06005035 spin_unlock_irq(&ctx->completion_lock);
5036
Pavel Begunkov0be0b0e2020-06-30 15:20:42 +03005037 if (!READ_ONCE(apoll->poll.canceled))
5038 __io_req_task_submit(req);
5039 else
5040 __io_req_task_cancel(req, -ECANCELED);
Dan Carpenteraa340842020-07-08 21:47:11 +03005041
Jens Axboe6d816e02020-08-11 08:04:14 -06005042 percpu_ref_put(&ctx->refs);
Jens Axboe807abcb2020-07-17 17:09:27 -06005043 kfree(apoll->double_poll);
Jens Axboe31067252020-05-17 17:43:31 -06005044 kfree(apoll);
Jens Axboed7718a92020-02-14 22:23:12 -07005045}
5046
5047static int io_async_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
5048 void *key)
5049{
5050 struct io_kiocb *req = wait->private;
5051 struct io_poll_iocb *poll = &req->apoll->poll;
5052
5053 trace_io_uring_poll_wake(req->ctx, req->opcode, req->user_data,
5054 key_to_poll(key));
5055
5056 return __io_async_wake(req, poll, key_to_poll(key), io_async_task_func);
5057}
5058
5059static void io_poll_req_insert(struct io_kiocb *req)
5060{
5061 struct io_ring_ctx *ctx = req->ctx;
5062 struct hlist_head *list;
5063
5064 list = &ctx->cancel_hash[hash_long(req->user_data, ctx->cancel_hash_bits)];
5065 hlist_add_head(&req->hash_node, list);
5066}
5067
5068static __poll_t __io_arm_poll_handler(struct io_kiocb *req,
5069 struct io_poll_iocb *poll,
5070 struct io_poll_table *ipt, __poll_t mask,
5071 wait_queue_func_t wake_func)
5072 __acquires(&ctx->completion_lock)
5073{
5074 struct io_ring_ctx *ctx = req->ctx;
5075 bool cancel = false;
5076
Pavel Begunkov4d52f332020-10-18 10:17:43 +01005077 INIT_HLIST_NODE(&req->hash_node);
Jens Axboe18bceab2020-05-15 11:56:54 -06005078 io_init_poll_iocb(poll, mask, wake_func);
Pavel Begunkovb90cd192020-06-21 13:09:52 +03005079 poll->file = req->file;
Jens Axboe18bceab2020-05-15 11:56:54 -06005080 poll->wait.private = req;
Jens Axboed7718a92020-02-14 22:23:12 -07005081
5082 ipt->pt._key = mask;
5083 ipt->req = req;
5084 ipt->error = -EINVAL;
5085
Jens Axboed7718a92020-02-14 22:23:12 -07005086 mask = vfs_poll(req->file, &ipt->pt) & poll->events;
5087
5088 spin_lock_irq(&ctx->completion_lock);
5089 if (likely(poll->head)) {
5090 spin_lock(&poll->head->lock);
5091 if (unlikely(list_empty(&poll->wait.entry))) {
5092 if (ipt->error)
5093 cancel = true;
5094 ipt->error = 0;
5095 mask = 0;
5096 }
5097 if (mask || ipt->error)
5098 list_del_init(&poll->wait.entry);
5099 else if (cancel)
5100 WRITE_ONCE(poll->canceled, true);
5101 else if (!poll->done) /* actually waiting for an event */
5102 io_poll_req_insert(req);
5103 spin_unlock(&poll->head->lock);
5104 }
5105
5106 return mask;
5107}
5108
5109static bool io_arm_poll_handler(struct io_kiocb *req)
5110{
5111 const struct io_op_def *def = &io_op_defs[req->opcode];
5112 struct io_ring_ctx *ctx = req->ctx;
5113 struct async_poll *apoll;
5114 struct io_poll_table ipt;
5115 __poll_t mask, ret;
Jens Axboe9dab14b2020-08-25 12:27:50 -06005116 int rw;
Jens Axboed7718a92020-02-14 22:23:12 -07005117
5118 if (!req->file || !file_can_poll(req->file))
5119 return false;
Pavel Begunkov24c74672020-06-21 13:09:51 +03005120 if (req->flags & REQ_F_POLLED)
Jens Axboed7718a92020-02-14 22:23:12 -07005121 return false;
Jens Axboe9dab14b2020-08-25 12:27:50 -06005122 if (def->pollin)
5123 rw = READ;
5124 else if (def->pollout)
5125 rw = WRITE;
5126 else
5127 return false;
5128 /* if we can't nonblock try, then no point in arming a poll handler */
5129 if (!io_file_supports_async(req->file, rw))
Jens Axboed7718a92020-02-14 22:23:12 -07005130 return false;
5131
5132 apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC);
5133 if (unlikely(!apoll))
5134 return false;
Jens Axboe807abcb2020-07-17 17:09:27 -06005135 apoll->double_poll = NULL;
Jens Axboed7718a92020-02-14 22:23:12 -07005136
5137 req->flags |= REQ_F_POLLED;
Jens Axboed7718a92020-02-14 22:23:12 -07005138 req->apoll = apoll;
Jens Axboed7718a92020-02-14 22:23:12 -07005139
Nathan Chancellor8755d972020-03-02 16:01:19 -07005140 mask = 0;
Jens Axboed7718a92020-02-14 22:23:12 -07005141 if (def->pollin)
Nathan Chancellor8755d972020-03-02 16:01:19 -07005142 mask |= POLLIN | POLLRDNORM;
Jens Axboed7718a92020-02-14 22:23:12 -07005143 if (def->pollout)
5144 mask |= POLLOUT | POLLWRNORM;
Luke Hsiao901341b2020-08-21 21:41:05 -07005145
5146 /* If reading from MSG_ERRQUEUE using recvmsg, ignore POLLIN */
5147 if ((req->opcode == IORING_OP_RECVMSG) &&
5148 (req->sr_msg.msg_flags & MSG_ERRQUEUE))
5149 mask &= ~POLLIN;
5150
Jens Axboed7718a92020-02-14 22:23:12 -07005151 mask |= POLLERR | POLLPRI;
5152
5153 ipt.pt._qproc = io_async_queue_proc;
5154
5155 ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask,
5156 io_async_wake);
Jens Axboea36da652020-08-11 09:50:19 -06005157 if (ret || ipt.error) {
Jens Axboed4e7cd32020-08-15 11:44:50 -07005158 io_poll_remove_double(req);
Jens Axboed7718a92020-02-14 22:23:12 -07005159 spin_unlock_irq(&ctx->completion_lock);
Jens Axboe807abcb2020-07-17 17:09:27 -06005160 kfree(apoll->double_poll);
Jens Axboed7718a92020-02-14 22:23:12 -07005161 kfree(apoll);
5162 return false;
5163 }
5164 spin_unlock_irq(&ctx->completion_lock);
5165 trace_io_uring_poll_arm(ctx, req->opcode, req->user_data, mask,
5166 apoll->poll.events);
5167 return true;
5168}
5169
5170static bool __io_poll_remove_one(struct io_kiocb *req,
5171 struct io_poll_iocb *poll)
5172{
Jens Axboeb41e9852020-02-17 09:52:41 -07005173 bool do_complete = false;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005174
5175 spin_lock(&poll->head->lock);
5176 WRITE_ONCE(poll->canceled, true);
Jens Axboe392edb42019-12-09 17:52:20 -07005177 if (!list_empty(&poll->wait.entry)) {
5178 list_del_init(&poll->wait.entry);
Jens Axboeb41e9852020-02-17 09:52:41 -07005179 do_complete = true;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005180 }
5181 spin_unlock(&poll->head->lock);
Jens Axboe3bfa5bc2020-05-17 13:54:12 -06005182 hash_del(&req->hash_node);
Jens Axboed7718a92020-02-14 22:23:12 -07005183 return do_complete;
5184}
5185
5186static bool io_poll_remove_one(struct io_kiocb *req)
5187{
5188 bool do_complete;
5189
Jens Axboed4e7cd32020-08-15 11:44:50 -07005190 io_poll_remove_double(req);
5191
Jens Axboed7718a92020-02-14 22:23:12 -07005192 if (req->opcode == IORING_OP_POLL_ADD) {
5193 do_complete = __io_poll_remove_one(req, &req->poll);
5194 } else {
Jens Axboe3bfa5bc2020-05-17 13:54:12 -06005195 struct async_poll *apoll = req->apoll;
5196
Jens Axboed7718a92020-02-14 22:23:12 -07005197 /* non-poll requests have submit ref still */
Jens Axboe3bfa5bc2020-05-17 13:54:12 -06005198 do_complete = __io_poll_remove_one(req, &apoll->poll);
5199 if (do_complete) {
Jens Axboed7718a92020-02-14 22:23:12 -07005200 io_put_req(req);
Jens Axboe807abcb2020-07-17 17:09:27 -06005201 kfree(apoll->double_poll);
Jens Axboe3bfa5bc2020-05-17 13:54:12 -06005202 kfree(apoll);
5203 }
Xiaoguang Wangb1f573b2020-04-12 14:50:54 +08005204 }
5205
Jens Axboeb41e9852020-02-17 09:52:41 -07005206 if (do_complete) {
5207 io_cqring_fill_event(req, -ECANCELED);
5208 io_commit_cqring(req->ctx);
Jens Axboef254ac02020-08-12 17:33:30 -06005209 req_set_fail_links(req);
Pavel Begunkov216578e2020-10-13 09:44:00 +01005210 io_put_req_deferred(req, 1);
Jens Axboeb41e9852020-02-17 09:52:41 -07005211 }
5212
5213 return do_complete;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005214}
5215
Jens Axboe76e1b642020-09-26 15:05:03 -06005216/*
5217 * Returns true if we found and killed one or more poll requests
5218 */
Pavel Begunkov6b819282020-11-06 13:00:25 +00005219static bool io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk,
5220 struct files_struct *files)
Jens Axboe221c5eb2019-01-17 09:41:58 -07005221{
Jens Axboe78076bb2019-12-04 19:56:40 -07005222 struct hlist_node *tmp;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005223 struct io_kiocb *req;
Jens Axboe8e2e1fa2020-04-13 17:05:14 -06005224 int posted = 0, i;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005225
5226 spin_lock_irq(&ctx->completion_lock);
Jens Axboe78076bb2019-12-04 19:56:40 -07005227 for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
5228 struct hlist_head *list;
5229
5230 list = &ctx->cancel_hash[i];
Jens Axboef3606e32020-09-22 08:18:24 -06005231 hlist_for_each_entry_safe(req, tmp, list, hash_node) {
Pavel Begunkov6b819282020-11-06 13:00:25 +00005232 if (io_match_task(req, tsk, files))
Jens Axboef3606e32020-09-22 08:18:24 -06005233 posted += io_poll_remove_one(req);
5234 }
Jens Axboe221c5eb2019-01-17 09:41:58 -07005235 }
5236 spin_unlock_irq(&ctx->completion_lock);
Jens Axboeb41e9852020-02-17 09:52:41 -07005237
Jens Axboe8e2e1fa2020-04-13 17:05:14 -06005238 if (posted)
5239 io_cqring_ev_posted(ctx);
Jens Axboe76e1b642020-09-26 15:05:03 -06005240
5241 return posted != 0;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005242}
5243
Jens Axboe47f46762019-11-09 17:43:02 -07005244static int io_poll_cancel(struct io_ring_ctx *ctx, __u64 sqe_addr)
5245{
Jens Axboe78076bb2019-12-04 19:56:40 -07005246 struct hlist_head *list;
Jens Axboe47f46762019-11-09 17:43:02 -07005247 struct io_kiocb *req;
5248
Jens Axboe78076bb2019-12-04 19:56:40 -07005249 list = &ctx->cancel_hash[hash_long(sqe_addr, ctx->cancel_hash_bits)];
5250 hlist_for_each_entry(req, list, hash_node) {
Jens Axboeb41e9852020-02-17 09:52:41 -07005251 if (sqe_addr != req->user_data)
5252 continue;
5253 if (io_poll_remove_one(req))
Jens Axboeeac406c2019-11-14 12:09:58 -07005254 return 0;
Jens Axboeb41e9852020-02-17 09:52:41 -07005255 return -EALREADY;
Jens Axboe47f46762019-11-09 17:43:02 -07005256 }
5257
5258 return -ENOENT;
5259}
5260
Jens Axboe3529d8c2019-12-19 18:24:38 -07005261static int io_poll_remove_prep(struct io_kiocb *req,
5262 const struct io_uring_sqe *sqe)
Jens Axboe221c5eb2019-01-17 09:41:58 -07005263{
Jens Axboe221c5eb2019-01-17 09:41:58 -07005264 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
5265 return -EINVAL;
5266 if (sqe->ioprio || sqe->off || sqe->len || sqe->buf_index ||
5267 sqe->poll_events)
5268 return -EINVAL;
5269
Pavel Begunkov018043b2020-10-27 23:17:18 +00005270 req->poll_remove.addr = READ_ONCE(sqe->addr);
Jens Axboe0969e782019-12-17 18:40:57 -07005271 return 0;
5272}
5273
5274/*
5275 * Find a running poll command that matches one specified in sqe->addr,
5276 * and remove it if found.
5277 */
Pavel Begunkov61e98202021-02-10 00:03:08 +00005278static int io_poll_remove(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe0969e782019-12-17 18:40:57 -07005279{
5280 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe0969e782019-12-17 18:40:57 -07005281 int ret;
5282
Jens Axboe221c5eb2019-01-17 09:41:58 -07005283 spin_lock_irq(&ctx->completion_lock);
Pavel Begunkov018043b2020-10-27 23:17:18 +00005284 ret = io_poll_cancel(ctx, req->poll_remove.addr);
Jens Axboe221c5eb2019-01-17 09:41:58 -07005285 spin_unlock_irq(&ctx->completion_lock);
5286
Jens Axboe4e88d6e2019-12-07 20:59:47 -07005287 if (ret < 0)
5288 req_set_fail_links(req);
Jens Axboee1e16092020-06-22 09:17:17 -06005289 io_req_complete(req, ret);
Jens Axboe221c5eb2019-01-17 09:41:58 -07005290 return 0;
5291}
5292
Jens Axboe221c5eb2019-01-17 09:41:58 -07005293static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
5294 void *key)
5295{
Jens Axboec2f2eb72020-02-10 09:07:05 -07005296 struct io_kiocb *req = wait->private;
5297 struct io_poll_iocb *poll = &req->poll;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005298
Jens Axboed7718a92020-02-14 22:23:12 -07005299 return __io_async_wake(req, poll, key_to_poll(key), io_poll_task_func);
Jens Axboe221c5eb2019-01-17 09:41:58 -07005300}
5301
Jens Axboe221c5eb2019-01-17 09:41:58 -07005302static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head,
5303 struct poll_table_struct *p)
5304{
5305 struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
5306
Jens Axboee8c2bc12020-08-15 18:44:09 -07005307 __io_queue_proc(&pt->req->poll, pt, head, (struct io_poll_iocb **) &pt->req->async_data);
Jens Axboeeac406c2019-11-14 12:09:58 -07005308}
5309
Jens Axboe3529d8c2019-12-19 18:24:38 -07005310static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboe221c5eb2019-01-17 09:41:58 -07005311{
5312 struct io_poll_iocb *poll = &req->poll;
Jiufei Xue5769a352020-06-17 17:53:55 +08005313 u32 events;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005314
5315 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
5316 return -EINVAL;
5317 if (sqe->addr || sqe->ioprio || sqe->off || sqe->len || sqe->buf_index)
5318 return -EINVAL;
5319
Jiufei Xue5769a352020-06-17 17:53:55 +08005320 events = READ_ONCE(sqe->poll32_events);
5321#ifdef __BIG_ENDIAN
5322 events = swahw32(events);
5323#endif
Jiufei Xuea31eb4a2020-06-17 17:53:56 +08005324 poll->events = demangle_poll(events) | EPOLLERR | EPOLLHUP |
5325 (events & EPOLLEXCLUSIVE);
Jens Axboe0969e782019-12-17 18:40:57 -07005326 return 0;
5327}
5328
Pavel Begunkov61e98202021-02-10 00:03:08 +00005329static int io_poll_add(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe0969e782019-12-17 18:40:57 -07005330{
5331 struct io_poll_iocb *poll = &req->poll;
5332 struct io_ring_ctx *ctx = req->ctx;
5333 struct io_poll_table ipt;
Jens Axboe0969e782019-12-17 18:40:57 -07005334 __poll_t mask;
Jens Axboe0969e782019-12-17 18:40:57 -07005335
Jens Axboed7718a92020-02-14 22:23:12 -07005336 ipt.pt._qproc = io_poll_queue_proc;
Jens Axboe36703242019-07-25 10:20:18 -06005337
Jens Axboed7718a92020-02-14 22:23:12 -07005338 mask = __io_arm_poll_handler(req, &req->poll, &ipt, poll->events,
5339 io_poll_wake);
Jens Axboe221c5eb2019-01-17 09:41:58 -07005340
Jens Axboe8c838782019-03-12 15:48:16 -06005341 if (mask) { /* no async, we'd stolen it */
Jens Axboe8c838782019-03-12 15:48:16 -06005342 ipt.error = 0;
Jens Axboeb0dd8a42019-11-18 12:14:54 -07005343 io_poll_complete(req, mask, 0);
Jens Axboe8c838782019-03-12 15:48:16 -06005344 }
Jens Axboe221c5eb2019-01-17 09:41:58 -07005345 spin_unlock_irq(&ctx->completion_lock);
5346
Jens Axboe8c838782019-03-12 15:48:16 -06005347 if (mask) {
5348 io_cqring_ev_posted(ctx);
Pavel Begunkov014db002020-03-03 21:33:12 +03005349 io_put_req(req);
Jens Axboe221c5eb2019-01-17 09:41:58 -07005350 }
Jens Axboe8c838782019-03-12 15:48:16 -06005351 return ipt.error;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005352}
5353
Jens Axboe5262f562019-09-17 12:26:57 -06005354static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
5355{
Jens Axboead8a48a2019-11-15 08:49:11 -07005356 struct io_timeout_data *data = container_of(timer,
5357 struct io_timeout_data, timer);
5358 struct io_kiocb *req = data->req;
5359 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe5262f562019-09-17 12:26:57 -06005360 unsigned long flags;
5361
Jens Axboe5262f562019-09-17 12:26:57 -06005362 spin_lock_irqsave(&ctx->completion_lock, flags);
Pavel Begunkova71976f2020-10-10 18:34:11 +01005363 list_del_init(&req->timeout.list);
Pavel Begunkov01cec8c2020-07-30 18:43:50 +03005364 atomic_set(&req->ctx->cq_timeouts,
5365 atomic_read(&req->ctx->cq_timeouts) + 1);
5366
Jens Axboe78e19bb2019-11-06 15:21:34 -07005367 io_cqring_fill_event(req, -ETIME);
Jens Axboe5262f562019-09-17 12:26:57 -06005368 io_commit_cqring(ctx);
5369 spin_unlock_irqrestore(&ctx->completion_lock, flags);
5370
5371 io_cqring_ev_posted(ctx);
Jens Axboe4e88d6e2019-12-07 20:59:47 -07005372 req_set_fail_links(req);
Jens Axboe5262f562019-09-17 12:26:57 -06005373 io_put_req(req);
5374 return HRTIMER_NORESTART;
5375}
5376
Pavel Begunkovfbd15842020-11-30 19:11:15 +00005377static struct io_kiocb *io_timeout_extract(struct io_ring_ctx *ctx,
5378 __u64 user_data)
Jens Axboe47f46762019-11-09 17:43:02 -07005379{
Pavel Begunkovfbd15842020-11-30 19:11:15 +00005380 struct io_timeout_data *io;
Jens Axboef254ac02020-08-12 17:33:30 -06005381 struct io_kiocb *req;
5382 int ret = -ENOENT;
5383
5384 list_for_each_entry(req, &ctx->timeout_list, timeout.list) {
5385 if (user_data == req->user_data) {
5386 ret = 0;
5387 break;
5388 }
5389 }
5390
5391 if (ret == -ENOENT)
Pavel Begunkovfbd15842020-11-30 19:11:15 +00005392 return ERR_PTR(ret);
Jens Axboef254ac02020-08-12 17:33:30 -06005393
Pavel Begunkovfbd15842020-11-30 19:11:15 +00005394 io = req->async_data;
5395 ret = hrtimer_try_to_cancel(&io->timer);
5396 if (ret == -1)
5397 return ERR_PTR(-EALREADY);
5398 list_del_init(&req->timeout.list);
5399 return req;
5400}
5401
5402static int io_timeout_cancel(struct io_ring_ctx *ctx, __u64 user_data)
5403{
5404 struct io_kiocb *req = io_timeout_extract(ctx, user_data);
5405
5406 if (IS_ERR(req))
5407 return PTR_ERR(req);
5408
5409 req_set_fail_links(req);
5410 io_cqring_fill_event(req, -ECANCELED);
5411 io_put_req_deferred(req, 1);
5412 return 0;
Jens Axboef254ac02020-08-12 17:33:30 -06005413}
5414
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +00005415static int io_timeout_update(struct io_ring_ctx *ctx, __u64 user_data,
5416 struct timespec64 *ts, enum hrtimer_mode mode)
5417{
5418 struct io_kiocb *req = io_timeout_extract(ctx, user_data);
5419 struct io_timeout_data *data;
5420
5421 if (IS_ERR(req))
5422 return PTR_ERR(req);
5423
5424 req->timeout.off = 0; /* noseq */
5425 data = req->async_data;
5426 list_add_tail(&req->timeout.list, &ctx->timeout_list);
5427 hrtimer_init(&data->timer, CLOCK_MONOTONIC, mode);
5428 data->timer.function = io_timeout_fn;
5429 hrtimer_start(&data->timer, timespec64_to_ktime(*ts), mode);
5430 return 0;
Jens Axboe47f46762019-11-09 17:43:02 -07005431}
5432
Jens Axboe3529d8c2019-12-19 18:24:38 -07005433static int io_timeout_remove_prep(struct io_kiocb *req,
5434 const struct io_uring_sqe *sqe)
Jens Axboeb29472e2019-12-17 18:50:29 -07005435{
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +00005436 struct io_timeout_rem *tr = &req->timeout_rem;
5437
Jens Axboeb29472e2019-12-17 18:50:29 -07005438 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
5439 return -EINVAL;
Daniele Albano61710e42020-07-18 14:15:16 -06005440 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
5441 return -EINVAL;
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +00005442 if (sqe->ioprio || sqe->buf_index || sqe->len)
Jens Axboeb29472e2019-12-17 18:50:29 -07005443 return -EINVAL;
5444
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +00005445 tr->addr = READ_ONCE(sqe->addr);
5446 tr->flags = READ_ONCE(sqe->timeout_flags);
5447 if (tr->flags & IORING_TIMEOUT_UPDATE) {
5448 if (tr->flags & ~(IORING_TIMEOUT_UPDATE|IORING_TIMEOUT_ABS))
5449 return -EINVAL;
5450 if (get_timespec64(&tr->ts, u64_to_user_ptr(sqe->addr2)))
5451 return -EFAULT;
5452 } else if (tr->flags) {
5453 /* timeout removal doesn't support flags */
5454 return -EINVAL;
5455 }
5456
Jens Axboeb29472e2019-12-17 18:50:29 -07005457 return 0;
5458}
5459
Pavel Begunkov8662dae2021-01-19 13:32:44 +00005460static inline enum hrtimer_mode io_translate_timeout_mode(unsigned int flags)
5461{
5462 return (flags & IORING_TIMEOUT_ABS) ? HRTIMER_MODE_ABS
5463 : HRTIMER_MODE_REL;
5464}
5465
Jens Axboe11365042019-10-16 09:08:32 -06005466/*
5467 * Remove or update an existing timeout command
5468 */
Pavel Begunkov61e98202021-02-10 00:03:08 +00005469static int io_timeout_remove(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe11365042019-10-16 09:08:32 -06005470{
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +00005471 struct io_timeout_rem *tr = &req->timeout_rem;
Jens Axboe11365042019-10-16 09:08:32 -06005472 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe47f46762019-11-09 17:43:02 -07005473 int ret;
Jens Axboe11365042019-10-16 09:08:32 -06005474
Jens Axboe11365042019-10-16 09:08:32 -06005475 spin_lock_irq(&ctx->completion_lock);
Pavel Begunkov8662dae2021-01-19 13:32:44 +00005476 if (!(req->timeout_rem.flags & IORING_TIMEOUT_UPDATE))
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +00005477 ret = io_timeout_cancel(ctx, tr->addr);
Pavel Begunkov8662dae2021-01-19 13:32:44 +00005478 else
5479 ret = io_timeout_update(ctx, tr->addr, &tr->ts,
5480 io_translate_timeout_mode(tr->flags));
Jens Axboe11365042019-10-16 09:08:32 -06005481
Jens Axboe47f46762019-11-09 17:43:02 -07005482 io_cqring_fill_event(req, ret);
Jens Axboe11365042019-10-16 09:08:32 -06005483 io_commit_cqring(ctx);
5484 spin_unlock_irq(&ctx->completion_lock);
Jens Axboe5262f562019-09-17 12:26:57 -06005485 io_cqring_ev_posted(ctx);
Jens Axboe4e88d6e2019-12-07 20:59:47 -07005486 if (ret < 0)
5487 req_set_fail_links(req);
Jackie Liuec9c02a2019-11-08 23:50:36 +08005488 io_put_req(req);
Jens Axboe11365042019-10-16 09:08:32 -06005489 return 0;
Jens Axboe5262f562019-09-17 12:26:57 -06005490}
5491
Jens Axboe3529d8c2019-12-19 18:24:38 -07005492static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
Jens Axboe2d283902019-12-04 11:08:05 -07005493 bool is_timeout_link)
Jens Axboe5262f562019-09-17 12:26:57 -06005494{
Jens Axboead8a48a2019-11-15 08:49:11 -07005495 struct io_timeout_data *data;
Jens Axboea41525a2019-10-15 16:48:15 -06005496 unsigned flags;
Pavel Begunkov56080b02020-05-26 20:34:04 +03005497 u32 off = READ_ONCE(sqe->off);
Jens Axboe5262f562019-09-17 12:26:57 -06005498
Jens Axboead8a48a2019-11-15 08:49:11 -07005499 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboe5262f562019-09-17 12:26:57 -06005500 return -EINVAL;
Jens Axboead8a48a2019-11-15 08:49:11 -07005501 if (sqe->ioprio || sqe->buf_index || sqe->len != 1)
Jens Axboea41525a2019-10-15 16:48:15 -06005502 return -EINVAL;
Pavel Begunkov56080b02020-05-26 20:34:04 +03005503 if (off && is_timeout_link)
Jens Axboe2d283902019-12-04 11:08:05 -07005504 return -EINVAL;
Jens Axboea41525a2019-10-15 16:48:15 -06005505 flags = READ_ONCE(sqe->timeout_flags);
5506 if (flags & ~IORING_TIMEOUT_ABS)
Jens Axboe5262f562019-09-17 12:26:57 -06005507 return -EINVAL;
Arnd Bergmannbdf20072019-10-01 09:53:29 -06005508
Pavel Begunkovbfe68a22020-05-30 14:54:18 +03005509 req->timeout.off = off;
Jens Axboe26a61672019-12-20 09:02:01 -07005510
Jens Axboee8c2bc12020-08-15 18:44:09 -07005511 if (!req->async_data && io_alloc_async_data(req))
Jens Axboe26a61672019-12-20 09:02:01 -07005512 return -ENOMEM;
5513
Jens Axboee8c2bc12020-08-15 18:44:09 -07005514 data = req->async_data;
Jens Axboead8a48a2019-11-15 08:49:11 -07005515 data->req = req;
Jens Axboead8a48a2019-11-15 08:49:11 -07005516
5517 if (get_timespec64(&data->ts, u64_to_user_ptr(sqe->addr)))
Jens Axboe5262f562019-09-17 12:26:57 -06005518 return -EFAULT;
5519
Pavel Begunkov8662dae2021-01-19 13:32:44 +00005520 data->mode = io_translate_timeout_mode(flags);
Jens Axboead8a48a2019-11-15 08:49:11 -07005521 hrtimer_init(&data->timer, CLOCK_MONOTONIC, data->mode);
Pavel Begunkovdd59a3d2021-03-04 13:59:25 +00005522 io_req_track_inflight(req);
Jens Axboead8a48a2019-11-15 08:49:11 -07005523 return 0;
5524}
5525
Pavel Begunkov61e98202021-02-10 00:03:08 +00005526static int io_timeout(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboead8a48a2019-11-15 08:49:11 -07005527{
Jens Axboead8a48a2019-11-15 08:49:11 -07005528 struct io_ring_ctx *ctx = req->ctx;
Jens Axboee8c2bc12020-08-15 18:44:09 -07005529 struct io_timeout_data *data = req->async_data;
Jens Axboead8a48a2019-11-15 08:49:11 -07005530 struct list_head *entry;
Pavel Begunkovbfe68a22020-05-30 14:54:18 +03005531 u32 tail, off = req->timeout.off;
Jens Axboead8a48a2019-11-15 08:49:11 -07005532
Pavel Begunkov733f5c92020-05-26 20:34:03 +03005533 spin_lock_irq(&ctx->completion_lock);
Jens Axboe93bd25b2019-11-11 23:34:31 -07005534
Jens Axboe5262f562019-09-17 12:26:57 -06005535 /*
5536 * sqe->off holds how many events that need to occur for this
Jens Axboe93bd25b2019-11-11 23:34:31 -07005537 * timeout event to be satisfied. If it isn't set, then this is
5538 * a pure timeout request, sequence isn't used.
Jens Axboe5262f562019-09-17 12:26:57 -06005539 */
Pavel Begunkov8eb7e2d2020-06-29 13:13:02 +03005540 if (io_is_timeout_noseq(req)) {
Jens Axboe93bd25b2019-11-11 23:34:31 -07005541 entry = ctx->timeout_list.prev;
5542 goto add;
5543 }
Jens Axboe5262f562019-09-17 12:26:57 -06005544
Pavel Begunkovbfe68a22020-05-30 14:54:18 +03005545 tail = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
5546 req->timeout.target_seq = tail + off;
Jens Axboe5262f562019-09-17 12:26:57 -06005547
Marcelo Diop-Gonzalezf0105052021-01-15 11:54:40 -05005548 /* Update the last seq here in case io_flush_timeouts() hasn't.
5549 * This is safe because ->completion_lock is held, and submissions
5550 * and completions are never mixed in the same ->completion_lock section.
5551 */
5552 ctx->cq_last_tm_flush = tail;
5553
Jens Axboe5262f562019-09-17 12:26:57 -06005554 /*
5555 * Insertion sort, ensuring the first entry in the list is always
5556 * the one we need first.
5557 */
Jens Axboe5262f562019-09-17 12:26:57 -06005558 list_for_each_prev(entry, &ctx->timeout_list) {
Pavel Begunkov135fcde2020-07-13 23:37:12 +03005559 struct io_kiocb *nxt = list_entry(entry, struct io_kiocb,
5560 timeout.list);
Jens Axboe5262f562019-09-17 12:26:57 -06005561
Pavel Begunkov8eb7e2d2020-06-29 13:13:02 +03005562 if (io_is_timeout_noseq(nxt))
Jens Axboe93bd25b2019-11-11 23:34:31 -07005563 continue;
Pavel Begunkovbfe68a22020-05-30 14:54:18 +03005564 /* nxt.seq is behind @tail, otherwise would've been completed */
5565 if (off >= nxt->timeout.target_seq - tail)
Jens Axboe5262f562019-09-17 12:26:57 -06005566 break;
5567 }
Jens Axboe93bd25b2019-11-11 23:34:31 -07005568add:
Pavel Begunkov135fcde2020-07-13 23:37:12 +03005569 list_add(&req->timeout.list, entry);
Jens Axboead8a48a2019-11-15 08:49:11 -07005570 data->timer.function = io_timeout_fn;
5571 hrtimer_start(&data->timer, timespec64_to_ktime(data->ts), data->mode);
Jens Axboe842f9612019-10-29 12:34:10 -06005572 spin_unlock_irq(&ctx->completion_lock);
Jens Axboe5262f562019-09-17 12:26:57 -06005573 return 0;
5574}
5575
Pavel Begunkovf458dd842021-03-08 12:14:14 +00005576struct io_cancel_data {
5577 struct io_ring_ctx *ctx;
5578 u64 user_data;
5579};
5580
Jens Axboe62755e32019-10-28 21:49:21 -06005581static bool io_cancel_cb(struct io_wq_work *work, void *data)
Jens Axboede0617e2019-04-06 21:51:27 -06005582{
Jens Axboe62755e32019-10-28 21:49:21 -06005583 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
Pavel Begunkovf458dd842021-03-08 12:14:14 +00005584 struct io_cancel_data *cd = data;
Jens Axboede0617e2019-04-06 21:51:27 -06005585
Pavel Begunkovf458dd842021-03-08 12:14:14 +00005586 return req->ctx == cd->ctx && req->user_data == cd->user_data;
Jens Axboe62755e32019-10-28 21:49:21 -06005587}
5588
Pavel Begunkovf458dd842021-03-08 12:14:14 +00005589static int io_async_cancel_one(struct io_uring_task *tctx, u64 user_data,
5590 struct io_ring_ctx *ctx)
Jens Axboe62755e32019-10-28 21:49:21 -06005591{
Pavel Begunkovf458dd842021-03-08 12:14:14 +00005592 struct io_cancel_data data = { .ctx = ctx, .user_data = user_data, };
Jens Axboe62755e32019-10-28 21:49:21 -06005593 enum io_wq_cancel cancel_ret;
Jens Axboe62755e32019-10-28 21:49:21 -06005594 int ret = 0;
5595
Pavel Begunkovf458dd842021-03-08 12:14:14 +00005596 if (!tctx || !tctx->io_wq)
Jens Axboe5aa75ed2021-02-16 12:56:50 -07005597 return -ENOENT;
5598
Pavel Begunkovf458dd842021-03-08 12:14:14 +00005599 cancel_ret = io_wq_cancel_cb(tctx->io_wq, io_cancel_cb, &data, false);
Jens Axboe62755e32019-10-28 21:49:21 -06005600 switch (cancel_ret) {
5601 case IO_WQ_CANCEL_OK:
5602 ret = 0;
5603 break;
5604 case IO_WQ_CANCEL_RUNNING:
5605 ret = -EALREADY;
5606 break;
5607 case IO_WQ_CANCEL_NOTFOUND:
5608 ret = -ENOENT;
5609 break;
5610 }
5611
Jens Axboee977d6d2019-11-05 12:39:45 -07005612 return ret;
5613}
5614
Jens Axboe47f46762019-11-09 17:43:02 -07005615static void io_async_find_and_cancel(struct io_ring_ctx *ctx,
5616 struct io_kiocb *req, __u64 sqe_addr,
Pavel Begunkov014db002020-03-03 21:33:12 +03005617 int success_ret)
Jens Axboe47f46762019-11-09 17:43:02 -07005618{
5619 unsigned long flags;
5620 int ret;
5621
Pavel Begunkovf458dd842021-03-08 12:14:14 +00005622 ret = io_async_cancel_one(req->task->io_uring, sqe_addr, ctx);
Jens Axboe47f46762019-11-09 17:43:02 -07005623 if (ret != -ENOENT) {
5624 spin_lock_irqsave(&ctx->completion_lock, flags);
5625 goto done;
5626 }
5627
5628 spin_lock_irqsave(&ctx->completion_lock, flags);
5629 ret = io_timeout_cancel(ctx, sqe_addr);
5630 if (ret != -ENOENT)
5631 goto done;
5632 ret = io_poll_cancel(ctx, sqe_addr);
5633done:
Jens Axboeb0dd8a42019-11-18 12:14:54 -07005634 if (!ret)
5635 ret = success_ret;
Jens Axboe47f46762019-11-09 17:43:02 -07005636 io_cqring_fill_event(req, ret);
5637 io_commit_cqring(ctx);
5638 spin_unlock_irqrestore(&ctx->completion_lock, flags);
5639 io_cqring_ev_posted(ctx);
5640
Jens Axboe4e88d6e2019-12-07 20:59:47 -07005641 if (ret < 0)
5642 req_set_fail_links(req);
Pavel Begunkov014db002020-03-03 21:33:12 +03005643 io_put_req(req);
Jens Axboe47f46762019-11-09 17:43:02 -07005644}
5645
Jens Axboe3529d8c2019-12-19 18:24:38 -07005646static int io_async_cancel_prep(struct io_kiocb *req,
5647 const struct io_uring_sqe *sqe)
Jens Axboee977d6d2019-11-05 12:39:45 -07005648{
Jens Axboefbf23842019-12-17 18:45:56 -07005649 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboee977d6d2019-11-05 12:39:45 -07005650 return -EINVAL;
Daniele Albano61710e42020-07-18 14:15:16 -06005651 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
5652 return -EINVAL;
5653 if (sqe->ioprio || sqe->off || sqe->len || sqe->cancel_flags)
Jens Axboee977d6d2019-11-05 12:39:45 -07005654 return -EINVAL;
5655
Jens Axboefbf23842019-12-17 18:45:56 -07005656 req->cancel.addr = READ_ONCE(sqe->addr);
5657 return 0;
5658}
5659
Pavel Begunkov61e98202021-02-10 00:03:08 +00005660static int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboefbf23842019-12-17 18:45:56 -07005661{
5662 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkov58f99372021-03-12 16:25:55 +00005663 u64 sqe_addr = req->cancel.addr;
5664 struct io_tctx_node *node;
5665 int ret;
Jens Axboefbf23842019-12-17 18:45:56 -07005666
Pavel Begunkov58f99372021-03-12 16:25:55 +00005667 /* tasks should wait for their io-wq threads, so safe w/o sync */
5668 ret = io_async_cancel_one(req->task->io_uring, sqe_addr, ctx);
5669 spin_lock_irq(&ctx->completion_lock);
5670 if (ret != -ENOENT)
5671 goto done;
5672 ret = io_timeout_cancel(ctx, sqe_addr);
5673 if (ret != -ENOENT)
5674 goto done;
5675 ret = io_poll_cancel(ctx, sqe_addr);
5676 if (ret != -ENOENT)
5677 goto done;
5678 spin_unlock_irq(&ctx->completion_lock);
5679
5680 /* slow path, try all io-wq's */
5681 io_ring_submit_lock(ctx, !(issue_flags & IO_URING_F_NONBLOCK));
5682 ret = -ENOENT;
5683 list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
5684 struct io_uring_task *tctx = node->task->io_uring;
5685
5686 if (!tctx || !tctx->io_wq)
5687 continue;
5688 ret = io_async_cancel_one(tctx, req->cancel.addr, ctx);
5689 if (ret != -ENOENT)
5690 break;
5691 }
5692 io_ring_submit_unlock(ctx, !(issue_flags & IO_URING_F_NONBLOCK));
5693
5694 spin_lock_irq(&ctx->completion_lock);
5695done:
5696 io_cqring_fill_event(req, ret);
5697 io_commit_cqring(ctx);
5698 spin_unlock_irq(&ctx->completion_lock);
5699 io_cqring_ev_posted(ctx);
5700
5701 if (ret < 0)
5702 req_set_fail_links(req);
5703 io_put_req(req);
Jens Axboe62755e32019-10-28 21:49:21 -06005704 return 0;
5705}
5706
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00005707static int io_rsrc_update_prep(struct io_kiocb *req,
Jens Axboe05f3fb32019-12-09 11:22:50 -07005708 const struct io_uring_sqe *sqe)
5709{
Jens Axboe6ca56f82020-09-18 16:51:19 -06005710 if (unlikely(req->ctx->flags & IORING_SETUP_SQPOLL))
5711 return -EINVAL;
Daniele Albano61710e42020-07-18 14:15:16 -06005712 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
5713 return -EINVAL;
5714 if (sqe->ioprio || sqe->rw_flags)
Jens Axboe05f3fb32019-12-09 11:22:50 -07005715 return -EINVAL;
5716
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00005717 req->rsrc_update.offset = READ_ONCE(sqe->off);
5718 req->rsrc_update.nr_args = READ_ONCE(sqe->len);
5719 if (!req->rsrc_update.nr_args)
Jens Axboe05f3fb32019-12-09 11:22:50 -07005720 return -EINVAL;
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00005721 req->rsrc_update.arg = READ_ONCE(sqe->addr);
Jens Axboe05f3fb32019-12-09 11:22:50 -07005722 return 0;
5723}
5724
Pavel Begunkov889fca72021-02-10 00:03:09 +00005725static int io_files_update(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe05f3fb32019-12-09 11:22:50 -07005726{
5727 struct io_ring_ctx *ctx = req->ctx;
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00005728 struct io_uring_rsrc_update up;
Jens Axboe05f3fb32019-12-09 11:22:50 -07005729 int ret;
5730
Pavel Begunkov45d189c2021-02-10 00:03:07 +00005731 if (issue_flags & IO_URING_F_NONBLOCK)
Jens Axboe05f3fb32019-12-09 11:22:50 -07005732 return -EAGAIN;
Jens Axboe05f3fb32019-12-09 11:22:50 -07005733
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00005734 up.offset = req->rsrc_update.offset;
5735 up.data = req->rsrc_update.arg;
Jens Axboe05f3fb32019-12-09 11:22:50 -07005736
5737 mutex_lock(&ctx->uring_lock);
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00005738 ret = __io_sqe_files_update(ctx, &up, req->rsrc_update.nr_args);
Jens Axboe05f3fb32019-12-09 11:22:50 -07005739 mutex_unlock(&ctx->uring_lock);
5740
5741 if (ret < 0)
5742 req_set_fail_links(req);
Pavel Begunkov889fca72021-02-10 00:03:09 +00005743 __io_req_complete(req, issue_flags, ret, 0);
Jens Axboe05f3fb32019-12-09 11:22:50 -07005744 return 0;
5745}
5746
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005747static int io_req_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboef67676d2019-12-02 11:03:47 -07005748{
Jens Axboed625c6e2019-12-17 19:53:05 -07005749 switch (req->opcode) {
Jens Axboee7815732019-12-17 19:45:06 -07005750 case IORING_OP_NOP:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005751 return 0;
Jens Axboef67676d2019-12-02 11:03:47 -07005752 case IORING_OP_READV:
5753 case IORING_OP_READ_FIXED:
Jens Axboe3a6820f2019-12-22 15:19:35 -07005754 case IORING_OP_READ:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005755 return io_read_prep(req, sqe);
Jens Axboef67676d2019-12-02 11:03:47 -07005756 case IORING_OP_WRITEV:
5757 case IORING_OP_WRITE_FIXED:
Jens Axboe3a6820f2019-12-22 15:19:35 -07005758 case IORING_OP_WRITE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005759 return io_write_prep(req, sqe);
Jens Axboe0969e782019-12-17 18:40:57 -07005760 case IORING_OP_POLL_ADD:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005761 return io_poll_add_prep(req, sqe);
Jens Axboe0969e782019-12-17 18:40:57 -07005762 case IORING_OP_POLL_REMOVE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005763 return io_poll_remove_prep(req, sqe);
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07005764 case IORING_OP_FSYNC:
Pavel Begunkov1155c762021-02-18 18:29:38 +00005765 return io_fsync_prep(req, sqe);
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07005766 case IORING_OP_SYNC_FILE_RANGE:
Pavel Begunkov1155c762021-02-18 18:29:38 +00005767 return io_sfr_prep(req, sqe);
Jens Axboe03b12302019-12-02 18:50:25 -07005768 case IORING_OP_SENDMSG:
Jens Axboefddafac2020-01-04 20:19:44 -07005769 case IORING_OP_SEND:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005770 return io_sendmsg_prep(req, sqe);
Jens Axboe03b12302019-12-02 18:50:25 -07005771 case IORING_OP_RECVMSG:
Jens Axboefddafac2020-01-04 20:19:44 -07005772 case IORING_OP_RECV:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005773 return io_recvmsg_prep(req, sqe);
Jens Axboef499a022019-12-02 16:28:46 -07005774 case IORING_OP_CONNECT:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005775 return io_connect_prep(req, sqe);
Jens Axboe2d283902019-12-04 11:08:05 -07005776 case IORING_OP_TIMEOUT:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005777 return io_timeout_prep(req, sqe, false);
Jens Axboeb29472e2019-12-17 18:50:29 -07005778 case IORING_OP_TIMEOUT_REMOVE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005779 return io_timeout_remove_prep(req, sqe);
Jens Axboefbf23842019-12-17 18:45:56 -07005780 case IORING_OP_ASYNC_CANCEL:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005781 return io_async_cancel_prep(req, sqe);
Jens Axboe2d283902019-12-04 11:08:05 -07005782 case IORING_OP_LINK_TIMEOUT:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005783 return io_timeout_prep(req, sqe, true);
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07005784 case IORING_OP_ACCEPT:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005785 return io_accept_prep(req, sqe);
Jens Axboed63d1b52019-12-10 10:38:56 -07005786 case IORING_OP_FALLOCATE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005787 return io_fallocate_prep(req, sqe);
Jens Axboe15b71ab2019-12-11 11:20:36 -07005788 case IORING_OP_OPENAT:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005789 return io_openat_prep(req, sqe);
Jens Axboeb5dba592019-12-11 14:02:38 -07005790 case IORING_OP_CLOSE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005791 return io_close_prep(req, sqe);
Jens Axboe05f3fb32019-12-09 11:22:50 -07005792 case IORING_OP_FILES_UPDATE:
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00005793 return io_rsrc_update_prep(req, sqe);
Jens Axboeeddc7ef2019-12-13 21:18:10 -07005794 case IORING_OP_STATX:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005795 return io_statx_prep(req, sqe);
Jens Axboe4840e412019-12-25 22:03:45 -07005796 case IORING_OP_FADVISE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005797 return io_fadvise_prep(req, sqe);
Jens Axboec1ca7572019-12-25 22:18:28 -07005798 case IORING_OP_MADVISE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005799 return io_madvise_prep(req, sqe);
Jens Axboecebdb982020-01-08 17:59:24 -07005800 case IORING_OP_OPENAT2:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005801 return io_openat2_prep(req, sqe);
Jens Axboe3e4827b2020-01-08 15:18:09 -07005802 case IORING_OP_EPOLL_CTL:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005803 return io_epoll_ctl_prep(req, sqe);
Pavel Begunkov7d67af22020-02-24 11:32:45 +03005804 case IORING_OP_SPLICE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005805 return io_splice_prep(req, sqe);
Jens Axboeddf0322d2020-02-23 16:41:33 -07005806 case IORING_OP_PROVIDE_BUFFERS:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005807 return io_provide_buffers_prep(req, sqe);
Jens Axboe067524e2020-03-02 16:32:28 -07005808 case IORING_OP_REMOVE_BUFFERS:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005809 return io_remove_buffers_prep(req, sqe);
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03005810 case IORING_OP_TEE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005811 return io_tee_prep(req, sqe);
Jens Axboe36f4fa62020-09-05 11:14:22 -06005812 case IORING_OP_SHUTDOWN:
5813 return io_shutdown_prep(req, sqe);
Jens Axboe80a261f2020-09-28 14:23:58 -06005814 case IORING_OP_RENAMEAT:
5815 return io_renameat_prep(req, sqe);
Jens Axboe14a11432020-09-28 14:27:37 -06005816 case IORING_OP_UNLINKAT:
5817 return io_unlinkat_prep(req, sqe);
Jens Axboef67676d2019-12-02 11:03:47 -07005818 }
5819
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005820 printk_once(KERN_WARNING "io_uring: unhandled opcode %d\n",
5821 req->opcode);
5822 return-EINVAL;
5823}
5824
Pavel Begunkov93642ef2021-02-18 18:29:44 +00005825static int io_req_prep_async(struct io_kiocb *req)
Jens Axboedef596e2019-01-09 08:59:42 -07005826{
Pavel Begunkov93642ef2021-02-18 18:29:44 +00005827 switch (req->opcode) {
5828 case IORING_OP_READV:
5829 case IORING_OP_READ_FIXED:
5830 case IORING_OP_READ:
5831 return io_rw_prep_async(req, READ);
5832 case IORING_OP_WRITEV:
5833 case IORING_OP_WRITE_FIXED:
5834 case IORING_OP_WRITE:
5835 return io_rw_prep_async(req, WRITE);
5836 case IORING_OP_SENDMSG:
5837 case IORING_OP_SEND:
5838 return io_sendmsg_prep_async(req);
5839 case IORING_OP_RECVMSG:
5840 case IORING_OP_RECV:
5841 return io_recvmsg_prep_async(req);
5842 case IORING_OP_CONNECT:
5843 return io_connect_prep_async(req);
5844 }
5845 return 0;
5846}
5847
Pavel Begunkovbe7053b2021-02-18 18:29:45 +00005848static int io_req_defer_prep(struct io_kiocb *req)
Jens Axboedef596e2019-01-09 08:59:42 -07005849{
Pavel Begunkovbe7053b2021-02-18 18:29:45 +00005850 if (!io_op_defs[req->opcode].needs_async_data)
Jens Axboe2b188cc2019-01-07 10:46:33 -07005851 return 0;
Pavel Begunkovbe7053b2021-02-18 18:29:45 +00005852 /* some opcodes init it during the inital prep */
Pavel Begunkov93642ef2021-02-18 18:29:44 +00005853 if (req->async_data)
Pavel Begunkovbe7053b2021-02-18 18:29:45 +00005854 return 0;
5855 if (__io_alloc_async_data(req))
Jens Axboeb76da702019-11-20 13:05:32 -07005856 return -EAGAIN;
Pavel Begunkovbe7053b2021-02-18 18:29:45 +00005857 return io_req_prep_async(req);
Jens Axboe2b188cc2019-01-07 10:46:33 -07005858}
5859
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03005860static u32 io_get_sequence(struct io_kiocb *req)
5861{
5862 struct io_kiocb *pos;
5863 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkovf2f87372020-10-27 23:25:37 +00005864 u32 total_submitted, nr_reqs = 0;
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03005865
Pavel Begunkovf2f87372020-10-27 23:25:37 +00005866 io_for_each_link(pos, req)
5867 nr_reqs++;
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03005868
5869 total_submitted = ctx->cached_sq_head - ctx->cached_sq_dropped;
5870 return total_submitted - nr_reqs;
5871}
5872
Pavel Begunkovbe7053b2021-02-18 18:29:45 +00005873static int io_req_defer(struct io_kiocb *req)
Jens Axboe2b188cc2019-01-07 10:46:33 -07005874{
5875 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkov27dc8332020-07-13 23:37:14 +03005876 struct io_defer_entry *de;
Jens Axboe2b188cc2019-01-07 10:46:33 -07005877 int ret;
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03005878 u32 seq;
Jens Axboe2b188cc2019-01-07 10:46:33 -07005879
5880 /* Still need defer if there is pending req in defer list. */
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03005881 if (likely(list_empty_careful(&ctx->defer_list) &&
5882 !(req->flags & REQ_F_IO_DRAIN)))
5883 return 0;
5884
5885 seq = io_get_sequence(req);
5886 /* Still a chance to pass the sequence check */
5887 if (!req_need_defer(req, seq) && list_empty_careful(&ctx->defer_list))
Jens Axboe2b188cc2019-01-07 10:46:33 -07005888 return 0;
5889
Pavel Begunkovbe7053b2021-02-18 18:29:45 +00005890 ret = io_req_defer_prep(req);
5891 if (ret)
5892 return ret;
Pavel Begunkovcbdcb432020-06-29 19:18:43 +03005893 io_prep_async_link(req);
Pavel Begunkov27dc8332020-07-13 23:37:14 +03005894 de = kmalloc(sizeof(*de), GFP_KERNEL);
5895 if (!de)
5896 return -ENOMEM;
Jens Axboe31b51512019-01-18 22:56:34 -07005897
5898 spin_lock_irq(&ctx->completion_lock);
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03005899 if (!req_need_defer(req, seq) && list_empty(&ctx->defer_list)) {
Jens Axboe31b51512019-01-18 22:56:34 -07005900 spin_unlock_irq(&ctx->completion_lock);
Pavel Begunkov27dc8332020-07-13 23:37:14 +03005901 kfree(de);
Pavel Begunkovae348172020-07-23 20:25:20 +03005902 io_queue_async_work(req);
5903 return -EIOCBQUEUED;
Jens Axboe31b51512019-01-18 22:56:34 -07005904 }
5905
5906 trace_io_uring_defer(ctx, req, req->user_data);
Pavel Begunkov27dc8332020-07-13 23:37:14 +03005907 de->req = req;
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03005908 de->seq = seq;
Pavel Begunkov27dc8332020-07-13 23:37:14 +03005909 list_add_tail(&de->list, &ctx->defer_list);
Jens Axboe31b51512019-01-18 22:56:34 -07005910 spin_unlock_irq(&ctx->completion_lock);
5911 return -EIOCBQUEUED;
5912}
Jens Axboeedafcce2019-01-09 09:16:05 -07005913
Pavel Begunkov3ca405e2020-07-13 23:37:08 +03005914static void __io_clean_op(struct io_kiocb *req)
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03005915{
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03005916 if (req->flags & REQ_F_BUFFER_SELECTED) {
5917 switch (req->opcode) {
5918 case IORING_OP_READV:
5919 case IORING_OP_READ_FIXED:
5920 case IORING_OP_READ:
Jens Axboebcda7ba2020-02-23 16:42:51 -07005921 kfree((void *)(unsigned long)req->rw.addr);
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03005922 break;
5923 case IORING_OP_RECVMSG:
5924 case IORING_OP_RECV:
Jens Axboe52de1fe2020-02-27 10:15:42 -07005925 kfree(req->sr_msg.kbuf);
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03005926 break;
5927 }
5928 req->flags &= ~REQ_F_BUFFER_SELECTED;
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03005929 }
5930
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03005931 if (req->flags & REQ_F_NEED_CLEANUP) {
5932 switch (req->opcode) {
5933 case IORING_OP_READV:
5934 case IORING_OP_READ_FIXED:
5935 case IORING_OP_READ:
5936 case IORING_OP_WRITEV:
5937 case IORING_OP_WRITE_FIXED:
Jens Axboee8c2bc12020-08-15 18:44:09 -07005938 case IORING_OP_WRITE: {
5939 struct io_async_rw *io = req->async_data;
5940 if (io->free_iovec)
5941 kfree(io->free_iovec);
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03005942 break;
Jens Axboee8c2bc12020-08-15 18:44:09 -07005943 }
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03005944 case IORING_OP_RECVMSG:
Jens Axboee8c2bc12020-08-15 18:44:09 -07005945 case IORING_OP_SENDMSG: {
5946 struct io_async_msghdr *io = req->async_data;
Pavel Begunkov257e84a2021-02-05 00:58:00 +00005947
5948 kfree(io->free_iov);
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03005949 break;
Jens Axboee8c2bc12020-08-15 18:44:09 -07005950 }
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03005951 case IORING_OP_SPLICE:
5952 case IORING_OP_TEE:
5953 io_put_file(req, req->splice.file_in,
5954 (req->splice.flags & SPLICE_F_FD_IN_FIXED));
5955 break;
Jens Axboef3cd48502020-09-24 14:55:54 -06005956 case IORING_OP_OPENAT:
5957 case IORING_OP_OPENAT2:
5958 if (req->open.filename)
5959 putname(req->open.filename);
5960 break;
Jens Axboe80a261f2020-09-28 14:23:58 -06005961 case IORING_OP_RENAMEAT:
5962 putname(req->rename.oldpath);
5963 putname(req->rename.newpath);
5964 break;
Jens Axboe14a11432020-09-28 14:27:37 -06005965 case IORING_OP_UNLINKAT:
5966 putname(req->unlink.filename);
5967 break;
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03005968 }
5969 req->flags &= ~REQ_F_NEED_CLEANUP;
5970 }
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03005971}
5972
Pavel Begunkov889fca72021-02-10 00:03:09 +00005973static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboeedafcce2019-01-09 09:16:05 -07005974{
Jens Axboeedafcce2019-01-09 09:16:05 -07005975 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe5730b272021-02-27 15:57:30 -07005976 const struct cred *creds = NULL;
Jens Axboed625c6e2019-12-17 19:53:05 -07005977 int ret;
Jens Axboeedafcce2019-01-09 09:16:05 -07005978
Jens Axboe003e8dc2021-03-06 09:22:27 -07005979 if (req->work.creds && req->work.creds != current_cred())
5980 creds = override_creds(req->work.creds);
Jens Axboe5730b272021-02-27 15:57:30 -07005981
Jens Axboed625c6e2019-12-17 19:53:05 -07005982 switch (req->opcode) {
Jens Axboe2b188cc2019-01-07 10:46:33 -07005983 case IORING_OP_NOP:
Pavel Begunkov889fca72021-02-10 00:03:09 +00005984 ret = io_nop(req, issue_flags);
Jens Axboe31b51512019-01-18 22:56:34 -07005985 break;
5986 case IORING_OP_READV:
Jens Axboe3529d8c2019-12-19 18:24:38 -07005987 case IORING_OP_READ_FIXED:
Jens Axboe3a6820f2019-12-22 15:19:35 -07005988 case IORING_OP_READ:
Pavel Begunkov889fca72021-02-10 00:03:09 +00005989 ret = io_read(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07005990 break;
5991 case IORING_OP_WRITEV:
Jens Axboe2b188cc2019-01-07 10:46:33 -07005992 case IORING_OP_WRITE_FIXED:
Jens Axboe3a6820f2019-12-22 15:19:35 -07005993 case IORING_OP_WRITE:
Pavel Begunkov889fca72021-02-10 00:03:09 +00005994 ret = io_write(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07005995 break;
5996 case IORING_OP_FSYNC:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00005997 ret = io_fsync(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07005998 break;
5999 case IORING_OP_POLL_ADD:
Pavel Begunkov61e98202021-02-10 00:03:08 +00006000 ret = io_poll_add(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006001 break;
6002 case IORING_OP_POLL_REMOVE:
Pavel Begunkov61e98202021-02-10 00:03:08 +00006003 ret = io_poll_remove(req, issue_flags);
Jens Axboeb76da702019-11-20 13:05:32 -07006004 break;
6005 case IORING_OP_SYNC_FILE_RANGE:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006006 ret = io_sync_file_range(req, issue_flags);
Jens Axboeb76da702019-11-20 13:05:32 -07006007 break;
6008 case IORING_OP_SENDMSG:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006009 ret = io_sendmsg(req, issue_flags);
Pavel Begunkov062d04d2020-10-10 18:34:12 +01006010 break;
Jens Axboefddafac2020-01-04 20:19:44 -07006011 case IORING_OP_SEND:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006012 ret = io_send(req, issue_flags);
Jens Axboeb76da702019-11-20 13:05:32 -07006013 break;
6014 case IORING_OP_RECVMSG:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006015 ret = io_recvmsg(req, issue_flags);
Pavel Begunkov062d04d2020-10-10 18:34:12 +01006016 break;
Jens Axboefddafac2020-01-04 20:19:44 -07006017 case IORING_OP_RECV:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006018 ret = io_recv(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006019 break;
6020 case IORING_OP_TIMEOUT:
Pavel Begunkov61e98202021-02-10 00:03:08 +00006021 ret = io_timeout(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006022 break;
6023 case IORING_OP_TIMEOUT_REMOVE:
Pavel Begunkov61e98202021-02-10 00:03:08 +00006024 ret = io_timeout_remove(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006025 break;
6026 case IORING_OP_ACCEPT:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006027 ret = io_accept(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006028 break;
6029 case IORING_OP_CONNECT:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006030 ret = io_connect(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006031 break;
6032 case IORING_OP_ASYNC_CANCEL:
Pavel Begunkov61e98202021-02-10 00:03:08 +00006033 ret = io_async_cancel(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006034 break;
Jens Axboed63d1b52019-12-10 10:38:56 -07006035 case IORING_OP_FALLOCATE:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006036 ret = io_fallocate(req, issue_flags);
Jens Axboed63d1b52019-12-10 10:38:56 -07006037 break;
Jens Axboe15b71ab2019-12-11 11:20:36 -07006038 case IORING_OP_OPENAT:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006039 ret = io_openat(req, issue_flags);
Jens Axboe15b71ab2019-12-11 11:20:36 -07006040 break;
Jens Axboeb5dba592019-12-11 14:02:38 -07006041 case IORING_OP_CLOSE:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006042 ret = io_close(req, issue_flags);
Jens Axboeb5dba592019-12-11 14:02:38 -07006043 break;
Jens Axboe05f3fb32019-12-09 11:22:50 -07006044 case IORING_OP_FILES_UPDATE:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006045 ret = io_files_update(req, issue_flags);
Jens Axboe05f3fb32019-12-09 11:22:50 -07006046 break;
Jens Axboeeddc7ef2019-12-13 21:18:10 -07006047 case IORING_OP_STATX:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006048 ret = io_statx(req, issue_flags);
Jens Axboeeddc7ef2019-12-13 21:18:10 -07006049 break;
Jens Axboe4840e412019-12-25 22:03:45 -07006050 case IORING_OP_FADVISE:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006051 ret = io_fadvise(req, issue_flags);
Jens Axboe4840e412019-12-25 22:03:45 -07006052 break;
Jens Axboec1ca7572019-12-25 22:18:28 -07006053 case IORING_OP_MADVISE:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006054 ret = io_madvise(req, issue_flags);
Jens Axboec1ca7572019-12-25 22:18:28 -07006055 break;
Jens Axboecebdb982020-01-08 17:59:24 -07006056 case IORING_OP_OPENAT2:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006057 ret = io_openat2(req, issue_flags);
Jens Axboecebdb982020-01-08 17:59:24 -07006058 break;
Jens Axboe3e4827b2020-01-08 15:18:09 -07006059 case IORING_OP_EPOLL_CTL:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006060 ret = io_epoll_ctl(req, issue_flags);
Jens Axboe3e4827b2020-01-08 15:18:09 -07006061 break;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03006062 case IORING_OP_SPLICE:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006063 ret = io_splice(req, issue_flags);
Pavel Begunkov7d67af22020-02-24 11:32:45 +03006064 break;
Jens Axboeddf0322d2020-02-23 16:41:33 -07006065 case IORING_OP_PROVIDE_BUFFERS:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006066 ret = io_provide_buffers(req, issue_flags);
Jens Axboeddf0322d2020-02-23 16:41:33 -07006067 break;
Jens Axboe067524e2020-03-02 16:32:28 -07006068 case IORING_OP_REMOVE_BUFFERS:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006069 ret = io_remove_buffers(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006070 break;
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03006071 case IORING_OP_TEE:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006072 ret = io_tee(req, issue_flags);
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03006073 break;
Jens Axboe36f4fa62020-09-05 11:14:22 -06006074 case IORING_OP_SHUTDOWN:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006075 ret = io_shutdown(req, issue_flags);
Jens Axboe36f4fa62020-09-05 11:14:22 -06006076 break;
Jens Axboe80a261f2020-09-28 14:23:58 -06006077 case IORING_OP_RENAMEAT:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006078 ret = io_renameat(req, issue_flags);
Jens Axboe80a261f2020-09-28 14:23:58 -06006079 break;
Jens Axboe14a11432020-09-28 14:27:37 -06006080 case IORING_OP_UNLINKAT:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006081 ret = io_unlinkat(req, issue_flags);
Jens Axboe14a11432020-09-28 14:27:37 -06006082 break;
Jens Axboe2b188cc2019-01-07 10:46:33 -07006083 default:
6084 ret = -EINVAL;
6085 break;
Jens Axboe31b51512019-01-18 22:56:34 -07006086 }
6087
Jens Axboe5730b272021-02-27 15:57:30 -07006088 if (creds)
6089 revert_creds(creds);
6090
Jens Axboe2b188cc2019-01-07 10:46:33 -07006091 if (ret)
6092 return ret;
6093
Jens Axboeb5325762020-05-19 21:20:27 -06006094 /* If the op doesn't have a file, we're not polling for it */
6095 if ((ctx->flags & IORING_SETUP_IOPOLL) && req->file) {
Jens Axboe11ba8202020-01-15 21:51:17 -07006096 const bool in_async = io_wq_current_is_worker();
6097
Jens Axboe11ba8202020-01-15 21:51:17 -07006098 /* workqueue context doesn't hold uring_lock, grab it now */
6099 if (in_async)
6100 mutex_lock(&ctx->uring_lock);
6101
Xiaoguang Wang2e9dbe92020-11-13 00:44:08 +08006102 io_iopoll_req_issued(req, in_async);
Jens Axboe11ba8202020-01-15 21:51:17 -07006103
6104 if (in_async)
6105 mutex_unlock(&ctx->uring_lock);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006106 }
6107
6108 return 0;
6109}
6110
Pavel Begunkov5280f7e2021-02-04 13:52:08 +00006111static void io_wq_submit_work(struct io_wq_work *work)
Pavel Begunkovd4c81f32020-06-08 21:08:19 +03006112{
Jens Axboe2b188cc2019-01-07 10:46:33 -07006113 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
Pavel Begunkov6df1db62020-07-03 22:15:06 +03006114 struct io_kiocb *timeout;
Jens Axboe561fb042019-10-24 07:25:42 -06006115 int ret = 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07006116
Pavel Begunkov6df1db62020-07-03 22:15:06 +03006117 timeout = io_prep_linked_timeout(req);
6118 if (timeout)
6119 io_queue_linked_timeout(timeout);
Pavel Begunkovd4c81f32020-06-08 21:08:19 +03006120
Jens Axboe4014d942021-01-19 15:53:54 -07006121 if (work->flags & IO_WQ_WORK_CANCEL)
Jens Axboe561fb042019-10-24 07:25:42 -06006122 ret = -ECANCELED;
Jens Axboe31b51512019-01-18 22:56:34 -07006123
Jens Axboe561fb042019-10-24 07:25:42 -06006124 if (!ret) {
Jens Axboe561fb042019-10-24 07:25:42 -06006125 do {
Pavel Begunkov889fca72021-02-10 00:03:09 +00006126 ret = io_issue_sqe(req, 0);
Jens Axboe561fb042019-10-24 07:25:42 -06006127 /*
6128 * We can get EAGAIN for polled IO even though we're
6129 * forcing a sync submission from here, since we can't
6130 * wait for request slots on the block side.
6131 */
6132 if (ret != -EAGAIN)
6133 break;
6134 cond_resched();
6135 } while (1);
6136 }
Jens Axboe31b51512019-01-18 22:56:34 -07006137
Pavel Begunkova3df76982021-02-18 22:32:52 +00006138 /* avoid locking problems by failing it from a clean context */
Jens Axboe561fb042019-10-24 07:25:42 -06006139 if (ret) {
Pavel Begunkova3df76982021-02-18 22:32:52 +00006140 /* io-wq is going to take one down */
6141 refcount_inc(&req->refs);
6142 io_req_task_queue_fail(req, ret);
Jens Axboeedafcce2019-01-09 09:16:05 -07006143 }
Jens Axboe31b51512019-01-18 22:56:34 -07006144}
Jens Axboe2b188cc2019-01-07 10:46:33 -07006145
Jens Axboe65e19f52019-10-26 07:20:21 -06006146static inline struct file *io_file_from_index(struct io_ring_ctx *ctx,
6147 int index)
Jens Axboe09bb8392019-03-13 12:39:28 -06006148{
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00006149 struct fixed_rsrc_table *table;
Jens Axboe65e19f52019-10-26 07:20:21 -06006150
Jens Axboe05f3fb32019-12-09 11:22:50 -07006151 table = &ctx->file_data->table[index >> IORING_FILE_TABLE_SHIFT];
Xiaoming Ni84695082020-05-11 19:25:43 +08006152 return table->files[index & IORING_FILE_TABLE_MASK];
Jens Axboe65e19f52019-10-26 07:20:21 -06006153}
6154
Pavel Begunkov8371adf2020-10-10 18:34:08 +01006155static struct file *io_file_get(struct io_submit_state *state,
6156 struct io_kiocb *req, int fd, bool fixed)
Pavel Begunkov8da11c12020-02-24 11:32:44 +03006157{
6158 struct io_ring_ctx *ctx = req->ctx;
6159 struct file *file;
6160
6161 if (fixed) {
Pavel Begunkov479f5172020-10-10 18:34:07 +01006162 if (unlikely((unsigned int)fd >= ctx->nr_user_files))
Pavel Begunkov8371adf2020-10-10 18:34:08 +01006163 return NULL;
Pavel Begunkov8da11c12020-02-24 11:32:44 +03006164 fd = array_index_nospec(fd, ctx->nr_user_files);
6165 file = io_file_from_index(ctx, fd);
Pavel Begunkov36f72fe2020-11-18 19:57:26 +00006166 io_set_resource_node(req);
Pavel Begunkov8da11c12020-02-24 11:32:44 +03006167 } else {
6168 trace_io_uring_file_get(ctx, fd);
6169 file = __io_file_get(state, fd);
Pavel Begunkov8da11c12020-02-24 11:32:44 +03006170 }
6171
Pavel Begunkovce3d5aa2021-02-01 18:59:55 +00006172 if (file && unlikely(file->f_op == &io_uring_fops))
6173 io_req_track_inflight(req);
Pavel Begunkov8371adf2020-10-10 18:34:08 +01006174 return file;
Pavel Begunkov8da11c12020-02-24 11:32:44 +03006175}
6176
Jens Axboe2665abf2019-11-05 12:40:47 -07006177static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
6178{
Jens Axboead8a48a2019-11-15 08:49:11 -07006179 struct io_timeout_data *data = container_of(timer,
6180 struct io_timeout_data, timer);
Pavel Begunkov90cd7e42020-10-27 23:25:36 +00006181 struct io_kiocb *prev, *req = data->req;
Jens Axboe2665abf2019-11-05 12:40:47 -07006182 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe2665abf2019-11-05 12:40:47 -07006183 unsigned long flags;
Jens Axboe2665abf2019-11-05 12:40:47 -07006184
6185 spin_lock_irqsave(&ctx->completion_lock, flags);
Pavel Begunkov90cd7e42020-10-27 23:25:36 +00006186 prev = req->timeout.head;
6187 req->timeout.head = NULL;
Jens Axboe2665abf2019-11-05 12:40:47 -07006188
6189 /*
6190 * We don't expect the list to be empty, that will only happen if we
6191 * race with the completion of the linked work.
6192 */
Pavel Begunkov90cd7e42020-10-27 23:25:36 +00006193 if (prev && refcount_inc_not_zero(&prev->refs))
Pavel Begunkovf2f87372020-10-27 23:25:37 +00006194 io_remove_next_linked(prev);
Pavel Begunkov90cd7e42020-10-27 23:25:36 +00006195 else
6196 prev = NULL;
Jens Axboe2665abf2019-11-05 12:40:47 -07006197 spin_unlock_irqrestore(&ctx->completion_lock, flags);
6198
6199 if (prev) {
Jens Axboe4e88d6e2019-12-07 20:59:47 -07006200 req_set_fail_links(prev);
Pavel Begunkov014db002020-03-03 21:33:12 +03006201 io_async_find_and_cancel(ctx, req, prev->user_data, -ETIME);
Pavel Begunkov9ae1f8d2021-02-01 18:59:51 +00006202 io_put_req_deferred(prev, 1);
Jens Axboe47f46762019-11-09 17:43:02 -07006203 } else {
Pavel Begunkov9ae1f8d2021-02-01 18:59:51 +00006204 io_req_complete_post(req, -ETIME, 0);
6205 io_put_req_deferred(req, 1);
Jens Axboe2665abf2019-11-05 12:40:47 -07006206 }
Jens Axboe2665abf2019-11-05 12:40:47 -07006207 return HRTIMER_NORESTART;
6208}
6209
Jens Axboe7271ef32020-08-10 09:55:22 -06006210static void __io_queue_linked_timeout(struct io_kiocb *req)
Jens Axboe2665abf2019-11-05 12:40:47 -07006211{
Jens Axboe76a46e02019-11-10 23:34:16 -07006212 /*
Pavel Begunkovf2f87372020-10-27 23:25:37 +00006213 * If the back reference is NULL, then our linked request finished
6214 * before we got a chance to setup the timer
Jens Axboe76a46e02019-11-10 23:34:16 -07006215 */
Pavel Begunkov90cd7e42020-10-27 23:25:36 +00006216 if (req->timeout.head) {
Jens Axboee8c2bc12020-08-15 18:44:09 -07006217 struct io_timeout_data *data = req->async_data;
Jens Axboe94ae5e72019-11-14 19:39:52 -07006218
Jens Axboead8a48a2019-11-15 08:49:11 -07006219 data->timer.function = io_link_timeout_fn;
6220 hrtimer_start(&data->timer, timespec64_to_ktime(data->ts),
6221 data->mode);
Jens Axboe2665abf2019-11-05 12:40:47 -07006222 }
Jens Axboe7271ef32020-08-10 09:55:22 -06006223}
6224
6225static void io_queue_linked_timeout(struct io_kiocb *req)
6226{
6227 struct io_ring_ctx *ctx = req->ctx;
6228
6229 spin_lock_irq(&ctx->completion_lock);
6230 __io_queue_linked_timeout(req);
Jens Axboe76a46e02019-11-10 23:34:16 -07006231 spin_unlock_irq(&ctx->completion_lock);
Jens Axboe2665abf2019-11-05 12:40:47 -07006232
Jens Axboe2665abf2019-11-05 12:40:47 -07006233 /* drop submission reference */
Jens Axboe76a46e02019-11-10 23:34:16 -07006234 io_put_req(req);
Jens Axboe2665abf2019-11-05 12:40:47 -07006235}
6236
Jens Axboead8a48a2019-11-15 08:49:11 -07006237static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req)
Jens Axboe2665abf2019-11-05 12:40:47 -07006238{
Pavel Begunkovf2f87372020-10-27 23:25:37 +00006239 struct io_kiocb *nxt = req->link;
Jens Axboe2b188cc2019-01-07 10:46:33 -07006240
Pavel Begunkovf2f87372020-10-27 23:25:37 +00006241 if (!nxt || (req->flags & REQ_F_LINK_TIMEOUT) ||
6242 nxt->opcode != IORING_OP_LINK_TIMEOUT)
Jens Axboed7718a92020-02-14 22:23:12 -07006243 return NULL;
Jens Axboe2665abf2019-11-05 12:40:47 -07006244
Pavel Begunkov90cd7e42020-10-27 23:25:36 +00006245 nxt->timeout.head = req;
Pavel Begunkov900fad42020-10-19 16:39:16 +01006246 nxt->flags |= REQ_F_LTIMEOUT_ACTIVE;
Jens Axboe76a46e02019-11-10 23:34:16 -07006247 req->flags |= REQ_F_LINK_TIMEOUT;
Jens Axboe76a46e02019-11-10 23:34:16 -07006248 return nxt;
Jens Axboe2665abf2019-11-05 12:40:47 -07006249}
6250
Pavel Begunkovc5eef2b2021-02-10 00:03:22 +00006251static void __io_queue_sqe(struct io_kiocb *req)
Jens Axboe2b188cc2019-01-07 10:46:33 -07006252{
Pavel Begunkovd3d72982021-02-12 03:23:51 +00006253 struct io_kiocb *linked_timeout = io_prep_linked_timeout(req);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006254 int ret;
6255
Pavel Begunkovc5eef2b2021-02-10 00:03:22 +00006256 ret = io_issue_sqe(req, IO_URING_F_NONBLOCK|IO_URING_F_COMPLETE_DEFER);
Jens Axboe491381ce2019-10-17 09:20:46 -06006257
6258 /*
6259 * We async punt it if the file wasn't marked NOWAIT, or if the file
6260 * doesn't support non-blocking read/write attempts
6261 */
Pavel Begunkov24c74672020-06-21 13:09:51 +03006262 if (ret == -EAGAIN && !(req->flags & REQ_F_NOWAIT)) {
Pavel Begunkovf063c542020-07-25 14:41:59 +03006263 if (!io_arm_poll_handler(req)) {
Pavel Begunkovf063c542020-07-25 14:41:59 +03006264 /*
6265 * Queued up for async execution, worker will release
6266 * submit reference when the iocb is actually submitted.
6267 */
6268 io_queue_async_work(req);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006269 }
Pavel Begunkov0d63c142020-10-22 16:47:18 +01006270 } else if (likely(!ret)) {
6271 /* drop submission reference */
Pavel Begunkove342c802021-01-19 13:32:47 +00006272 if (req->flags & REQ_F_COMPLETE_INLINE) {
Pavel Begunkovc5eef2b2021-02-10 00:03:22 +00006273 struct io_ring_ctx *ctx = req->ctx;
6274 struct io_comp_state *cs = &ctx->submit_state.comp;
Jens Axboee65ef562019-03-12 10:16:44 -06006275
Pavel Begunkov6dd0be12021-02-10 00:03:13 +00006276 cs->reqs[cs->nr++] = req;
Pavel Begunkovd3d72982021-02-12 03:23:51 +00006277 if (cs->nr == ARRAY_SIZE(cs->reqs))
Pavel Begunkovc5eef2b2021-02-10 00:03:22 +00006278 io_submit_flush_completions(cs, ctx);
Pavel Begunkov9affd662021-01-19 13:32:46 +00006279 } else {
Pavel Begunkovd3d72982021-02-12 03:23:51 +00006280 io_put_req(req);
Pavel Begunkov0d63c142020-10-22 16:47:18 +01006281 }
6282 } else {
Jens Axboe4e88d6e2019-12-07 20:59:47 -07006283 req_set_fail_links(req);
Jens Axboee65ef562019-03-12 10:16:44 -06006284 io_put_req(req);
Pavel Begunkov652532a2020-07-03 22:15:07 +03006285 io_req_complete(req, ret);
Jens Axboe9e645e112019-05-10 16:07:28 -06006286 }
Pavel Begunkovd3d72982021-02-12 03:23:51 +00006287 if (linked_timeout)
6288 io_queue_linked_timeout(linked_timeout);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006289}
6290
Pavel Begunkovbe7053b2021-02-18 18:29:45 +00006291static void io_queue_sqe(struct io_kiocb *req)
Jackie Liu4fe2c962019-09-09 20:50:40 +08006292{
6293 int ret;
6294
Pavel Begunkovbe7053b2021-02-18 18:29:45 +00006295 ret = io_req_defer(req);
Jackie Liu4fe2c962019-09-09 20:50:40 +08006296 if (ret) {
6297 if (ret != -EIOCBQUEUED) {
Pavel Begunkov11185912020-01-22 23:09:35 +03006298fail_req:
Jens Axboe4e88d6e2019-12-07 20:59:47 -07006299 req_set_fail_links(req);
Jens Axboee1e16092020-06-22 09:17:17 -06006300 io_put_req(req);
6301 io_req_complete(req, ret);
Jackie Liu4fe2c962019-09-09 20:50:40 +08006302 }
Pavel Begunkov25508782019-12-30 21:24:47 +03006303 } else if (req->flags & REQ_F_FORCE_ASYNC) {
Pavel Begunkovbe7053b2021-02-18 18:29:45 +00006304 ret = io_req_defer_prep(req);
6305 if (unlikely(ret))
6306 goto fail_req;
Jens Axboece35a472019-12-17 08:04:44 -07006307 io_queue_async_work(req);
6308 } else {
Pavel Begunkovc5eef2b2021-02-10 00:03:22 +00006309 __io_queue_sqe(req);
Jens Axboece35a472019-12-17 08:04:44 -07006310 }
Jackie Liu4fe2c962019-09-09 20:50:40 +08006311}
6312
Stefano Garzarella21b55db2020-08-27 16:58:30 +02006313/*
6314 * Check SQE restrictions (opcode and flags).
6315 *
6316 * Returns 'true' if SQE is allowed, 'false' otherwise.
6317 */
6318static inline bool io_check_restriction(struct io_ring_ctx *ctx,
6319 struct io_kiocb *req,
6320 unsigned int sqe_flags)
6321{
6322 if (!ctx->restricted)
6323 return true;
6324
6325 if (!test_bit(req->opcode, ctx->restrictions.sqe_op))
6326 return false;
6327
6328 if ((sqe_flags & ctx->restrictions.sqe_flags_required) !=
6329 ctx->restrictions.sqe_flags_required)
6330 return false;
6331
6332 if (sqe_flags & ~(ctx->restrictions.sqe_flags_allowed |
6333 ctx->restrictions.sqe_flags_required))
6334 return false;
6335
6336 return true;
6337}
6338
Pavel Begunkovef4ff582020-04-12 02:05:05 +03006339static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
Pavel Begunkov258b29a2021-02-10 00:03:10 +00006340 const struct io_uring_sqe *sqe)
Pavel Begunkov0553b8b2020-04-08 08:58:45 +03006341{
Pavel Begunkov258b29a2021-02-10 00:03:10 +00006342 struct io_submit_state *state;
Pavel Begunkovef4ff582020-04-12 02:05:05 +03006343 unsigned int sqe_flags;
Jens Axboe003e8dc2021-03-06 09:22:27 -07006344 int personality, ret = 0;
Pavel Begunkovef4ff582020-04-12 02:05:05 +03006345
Pavel Begunkov0553b8b2020-04-08 08:58:45 +03006346 req->opcode = READ_ONCE(sqe->opcode);
Pavel Begunkov5be9ad12021-02-12 18:41:17 +00006347 /* same numerical values with corresponding REQ_F_*, safe to copy */
6348 req->flags = sqe_flags = READ_ONCE(sqe->flags);
Pavel Begunkov0553b8b2020-04-08 08:58:45 +03006349 req->user_data = READ_ONCE(sqe->user_data);
Jens Axboee8c2bc12020-08-15 18:44:09 -07006350 req->async_data = NULL;
Pavel Begunkov0553b8b2020-04-08 08:58:45 +03006351 req->file = NULL;
6352 req->ctx = ctx;
Pavel Begunkovf2f87372020-10-27 23:25:37 +00006353 req->link = NULL;
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00006354 req->fixed_rsrc_refs = NULL;
Pavel Begunkov0553b8b2020-04-08 08:58:45 +03006355 /* one is dropped after submission, the other at completion */
6356 refcount_set(&req->refs, 2);
Pavel Begunkov4dd28242020-06-15 10:33:13 +03006357 req->task = current;
Pavel Begunkov0553b8b2020-04-08 08:58:45 +03006358 req->result = 0;
Jens Axboe93e68e02021-03-09 07:02:21 -07006359 req->work.list.next = NULL;
6360 req->work.creds = NULL;
6361 req->work.flags = 0;
Pavel Begunkovef4ff582020-04-12 02:05:05 +03006362
Pavel Begunkov5be9ad12021-02-12 18:41:17 +00006363 /* enforce forwards compatibility on users */
Pavel Begunkovebf4a5d2021-02-20 01:39:53 +00006364 if (unlikely(sqe_flags & ~SQE_VALID_FLAGS)) {
6365 req->flags = 0;
Pavel Begunkov5be9ad12021-02-12 18:41:17 +00006366 return -EINVAL;
Pavel Begunkovebf4a5d2021-02-20 01:39:53 +00006367 }
Pavel Begunkov5be9ad12021-02-12 18:41:17 +00006368
Pavel Begunkovef4ff582020-04-12 02:05:05 +03006369 if (unlikely(req->opcode >= IORING_OP_LAST))
6370 return -EINVAL;
6371
Stefano Garzarella21b55db2020-08-27 16:58:30 +02006372 if (unlikely(!io_check_restriction(ctx, req, sqe_flags)))
6373 return -EACCES;
6374
Pavel Begunkovef4ff582020-04-12 02:05:05 +03006375 if ((sqe_flags & IOSQE_BUFFER_SELECT) &&
6376 !io_op_defs[req->opcode].buffer_select)
6377 return -EOPNOTSUPP;
6378
Jens Axboe003e8dc2021-03-06 09:22:27 -07006379 personality = READ_ONCE(sqe->personality);
6380 if (personality) {
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +00006381 req->work.creds = xa_load(&ctx->personalities, personality);
Jens Axboe003e8dc2021-03-06 09:22:27 -07006382 if (!req->work.creds)
6383 return -EINVAL;
6384 get_cred(req->work.creds);
Jens Axboe003e8dc2021-03-06 09:22:27 -07006385 }
Pavel Begunkov258b29a2021-02-10 00:03:10 +00006386 state = &ctx->submit_state;
Pavel Begunkovef4ff582020-04-12 02:05:05 +03006387
Jens Axboe27926b62020-10-28 09:33:23 -06006388 /*
6389 * Plug now if we have more than 1 IO left after this, and the target
6390 * is potentially a read/write to block based storage.
6391 */
6392 if (!state->plug_started && state->ios_left > 1 &&
6393 io_op_defs[req->opcode].plug) {
6394 blk_start_plug(&state->plug);
6395 state->plug_started = true;
6396 }
Jens Axboe63ff8222020-05-07 14:56:15 -06006397
Pavel Begunkovbd5bbda2020-11-20 15:50:51 +00006398 if (io_op_defs[req->opcode].needs_file) {
6399 bool fixed = req->flags & REQ_F_FIXED_FILE;
Jens Axboe63ff8222020-05-07 14:56:15 -06006400
Pavel Begunkovbd5bbda2020-11-20 15:50:51 +00006401 req->file = io_file_get(state, req, READ_ONCE(sqe->fd), fixed);
Pavel Begunkovba13e232021-02-01 18:59:52 +00006402 if (unlikely(!req->file))
Pavel Begunkovbd5bbda2020-11-20 15:50:51 +00006403 ret = -EBADF;
6404 }
6405
Pavel Begunkov71b547c2020-10-10 18:34:09 +01006406 state->ios_left--;
6407 return ret;
Pavel Begunkov0553b8b2020-04-08 08:58:45 +03006408}
6409
Pavel Begunkova6b8cadc2021-02-18 18:29:41 +00006410static int io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
Pavel Begunkova1ab7b32021-02-18 18:29:42 +00006411 const struct io_uring_sqe *sqe)
Jens Axboe6c271ce2019-01-10 11:22:30 -07006412{
Pavel Begunkova1ab7b32021-02-18 18:29:42 +00006413 struct io_submit_link *link = &ctx->submit_state.link;
Jens Axboe6c271ce2019-01-10 11:22:30 -07006414 int ret;
6415
Pavel Begunkova6b8cadc2021-02-18 18:29:41 +00006416 ret = io_init_req(ctx, req, sqe);
6417 if (unlikely(ret)) {
6418fail_req:
6419 io_put_req(req);
6420 io_req_complete(req, ret);
Pavel Begunkovde59bc12021-02-18 18:29:47 +00006421 if (link->head) {
6422 /* fail even hard links since we don't submit */
Pavel Begunkovcf109602021-02-18 18:29:43 +00006423 link->head->flags |= REQ_F_FAIL_LINK;
Pavel Begunkovde59bc12021-02-18 18:29:47 +00006424 io_put_req(link->head);
6425 io_req_complete(link->head, -ECANCELED);
6426 link->head = NULL;
6427 }
Pavel Begunkova6b8cadc2021-02-18 18:29:41 +00006428 return ret;
6429 }
Pavel Begunkovbe7053b2021-02-18 18:29:45 +00006430 ret = io_req_prep(req, sqe);
6431 if (unlikely(ret))
6432 goto fail_req;
Pavel Begunkova6b8cadc2021-02-18 18:29:41 +00006433
Pavel Begunkovbe7053b2021-02-18 18:29:45 +00006434 /* don't need @sqe from now on */
Pavel Begunkova6b8cadc2021-02-18 18:29:41 +00006435 trace_io_uring_submit_sqe(ctx, req->opcode, req->user_data,
6436 true, ctx->flags & IORING_SETUP_SQPOLL);
6437
Jens Axboe6c271ce2019-01-10 11:22:30 -07006438 /*
6439 * If we already have a head request, queue this one for async
6440 * submittal once the head completes. If we don't have a head but
6441 * IOSQE_IO_LINK is set in the sqe, start a new head. This one will be
6442 * submitted sync once the chain is complete. If none of those
6443 * conditions are true (normal request), then just queue it.
6444 */
6445 if (link->head) {
6446 struct io_kiocb *head = link->head;
6447
6448 /*
6449 * Taking sequential execution of a link, draining both sides
6450 * of the link also fullfils IOSQE_IO_DRAIN semantics for all
6451 * requests in the link. So, it drains the head and the
6452 * next after the link request. The last one is done via
6453 * drain_next flag to persist the effect across calls.
6454 */
6455 if (req->flags & REQ_F_IO_DRAIN) {
6456 head->flags |= REQ_F_IO_DRAIN;
6457 ctx->drain_next = 1;
6458 }
Pavel Begunkovbe7053b2021-02-18 18:29:45 +00006459 ret = io_req_defer_prep(req);
Pavel Begunkovcf109602021-02-18 18:29:43 +00006460 if (unlikely(ret))
Pavel Begunkova6b8cadc2021-02-18 18:29:41 +00006461 goto fail_req;
Jens Axboe6c271ce2019-01-10 11:22:30 -07006462 trace_io_uring_link(ctx, req, head);
6463 link->last->link = req;
6464 link->last = req;
6465
6466 /* last request of a link, enqueue the link */
6467 if (!(req->flags & (REQ_F_LINK | REQ_F_HARDLINK))) {
Pavel Begunkovde59bc12021-02-18 18:29:47 +00006468 io_queue_sqe(head);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006469 link->head = NULL;
6470 }
Jackie Liu4fe2c962019-09-09 20:50:40 +08006471 } else {
6472 if (unlikely(ctx->drain_next)) {
6473 req->flags |= REQ_F_IO_DRAIN;
6474 ctx->drain_next = 0;
6475 }
6476 if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) {
Jackie Liu4fe2c962019-09-09 20:50:40 +08006477 link->head = req;
6478 link->last = req;
6479 } else {
Pavel Begunkovbe7053b2021-02-18 18:29:45 +00006480 io_queue_sqe(req);
Jackie Liu4fe2c962019-09-09 20:50:40 +08006481 }
6482 }
6483
6484 return 0;
6485}
6486
6487/*
6488 * Batched submission is done, ensure local IO is flushed out.
6489 */
6490static void io_submit_state_end(struct io_submit_state *state,
6491 struct io_ring_ctx *ctx)
Pavel Begunkov1b4a51b2019-11-21 11:54:28 +03006492{
Pavel Begunkova1ab7b32021-02-18 18:29:42 +00006493 if (state->link.head)
Pavel Begunkovde59bc12021-02-18 18:29:47 +00006494 io_queue_sqe(state->link.head);
Jens Axboe3529d8c2019-12-19 18:24:38 -07006495 if (state->comp.nr)
Jens Axboe9e645e112019-05-10 16:07:28 -06006496 io_submit_flush_completions(&state->comp, ctx);
Jackie Liua197f662019-11-08 08:09:12 -07006497 if (state->plug_started)
Pavel Begunkov32fe5252019-12-17 22:26:58 +03006498 blk_finish_plug(&state->plug);
Jens Axboe75c6a032020-01-28 10:15:23 -07006499 io_state_file_put(state);
Jens Axboe9e645e112019-05-10 16:07:28 -06006500}
Pavel Begunkov32fe5252019-12-17 22:26:58 +03006501
Jens Axboe9e645e112019-05-10 16:07:28 -06006502/*
6503 * Start submission side cache.
Pavel Begunkov32fe5252019-12-17 22:26:58 +03006504 */
Jens Axboe9e645e112019-05-10 16:07:28 -06006505static void io_submit_state_start(struct io_submit_state *state,
Pavel Begunkov196be952019-11-07 01:41:06 +03006506 unsigned int max_ios)
Jens Axboe9e645e112019-05-10 16:07:28 -06006507{
6508 state->plug_started = false;
Jens Axboebcda7ba2020-02-23 16:42:51 -07006509 state->ios_left = max_ios;
Pavel Begunkova1ab7b32021-02-18 18:29:42 +00006510 /* set only head, no need to init link_last in advance */
6511 state->link.head = NULL;
Jens Axboe75c6a032020-01-28 10:15:23 -07006512}
6513
Jens Axboe193155c2020-02-22 23:22:19 -07006514static void io_commit_sqring(struct io_ring_ctx *ctx)
6515{
Jens Axboe75c6a032020-01-28 10:15:23 -07006516 struct io_rings *rings = ctx->rings;
6517
6518 /*
Jens Axboe193155c2020-02-22 23:22:19 -07006519 * Ensure any loads from the SQEs are done at this point,
Jens Axboe75c6a032020-01-28 10:15:23 -07006520 * since once we write the new head, the application could
6521 * write new data to them.
Pavel Begunkov6b47ee62020-01-18 20:22:41 +03006522 */
Pavel Begunkov8da11c12020-02-24 11:32:44 +03006523 smp_store_release(&rings->sq.head, ctx->cached_sq_head);
Jens Axboebcda7ba2020-02-23 16:42:51 -07006524}
6525
Jens Axboe9e645e112019-05-10 16:07:28 -06006526/*
Jens Axboe3529d8c2019-12-19 18:24:38 -07006527 * Fetch an sqe, if one is available. Note that sqe_ptr will point to memory
Jens Axboe9e645e112019-05-10 16:07:28 -06006528 * that is mapped by userspace. This means that care needs to be taken to
6529 * ensure that reads are stable, as we cannot rely on userspace always
Jens Axboe78e19bb2019-11-06 15:21:34 -07006530 * being a good citizen. If members of the sqe are validated and then later
6531 * used, it's important that those reads are done through READ_ONCE() to
Pavel Begunkov2e6e1fd2019-12-05 16:15:45 +03006532 * prevent a re-load down the line.
Jens Axboe9e645e112019-05-10 16:07:28 -06006533 */
6534static const struct io_uring_sqe *io_get_sqe(struct io_ring_ctx *ctx)
Jens Axboe9e645e112019-05-10 16:07:28 -06006535{
6536 u32 *sq_array = ctx->sq_array;
6537 unsigned head;
6538
6539 /*
6540 * The cached sq head (or cq tail) serves two purposes:
6541 *
6542 * 1) allows us to batch the cost of updating the user visible
Pavel Begunkov9d763772019-12-17 02:22:07 +03006543 * head updates.
Jens Axboe9e645e112019-05-10 16:07:28 -06006544 * 2) allows the kernel side to track the head on its own, even
Pavel Begunkov8cdf2192020-01-25 00:40:24 +03006545 * though the application is the one updating it.
6546 */
6547 head = READ_ONCE(sq_array[ctx->cached_sq_head++ & ctx->sq_mask]);
6548 if (likely(head < ctx->sq_entries))
6549 return &ctx->sq_sqes[head];
6550
6551 /* drop invalid entries */
Pavel Begunkov711be032020-01-17 03:57:59 +03006552 ctx->cached_sq_dropped++;
6553 WRITE_ONCE(ctx->rings->sq_dropped, ctx->cached_sq_dropped);
6554 return NULL;
6555}
Jens Axboeb7bb4f72019-12-15 22:13:43 -07006556
Jens Axboe0f212202020-09-13 13:09:39 -06006557static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr)
Jens Axboe6c271ce2019-01-10 11:22:30 -07006558{
Pavel Begunkov46c4e162021-02-18 18:29:37 +00006559 int submitted = 0;
Jens Axboe6c271ce2019-01-10 11:22:30 -07006560
Jens Axboec4a2ed72019-11-21 21:01:26 -07006561 /* if we have a backlog and couldn't flush it all, return BUSY */
Jens Axboead3eb2c2019-12-18 17:12:20 -07006562 if (test_bit(0, &ctx->sq_check_overflow)) {
Pavel Begunkov6c503152021-01-04 20:36:36 +00006563 if (!__io_cqring_overflow_flush(ctx, false, NULL, NULL))
Jens Axboead3eb2c2019-12-18 17:12:20 -07006564 return -EBUSY;
6565 }
Jens Axboe6c271ce2019-01-10 11:22:30 -07006566
Pavel Begunkovee7d46d2019-12-30 21:24:45 +03006567 /* make sure SQ entry isn't read before tail */
6568 nr = min3(nr, ctx->sq_entries, io_sqring_entries(ctx));
Pavel Begunkov9ef4f122019-12-30 21:24:44 +03006569
Pavel Begunkov2b85edf2019-12-28 14:13:03 +03006570 if (!percpu_ref_tryget_many(&ctx->refs, nr))
6571 return -EAGAIN;
Jens Axboe6c271ce2019-01-10 11:22:30 -07006572
Jens Axboed8a6df12020-10-15 16:24:45 -06006573 percpu_counter_add(&current->io_uring->inflight, nr);
Jens Axboefaf7b512020-10-07 12:48:53 -06006574 refcount_add(nr, &current->usage);
Pavel Begunkovba88ff12021-02-10 00:03:11 +00006575 io_submit_state_start(&ctx->submit_state, nr);
Pavel Begunkovb14cca02020-01-17 04:45:59 +03006576
Pavel Begunkov46c4e162021-02-18 18:29:37 +00006577 while (submitted < nr) {
Jens Axboe3529d8c2019-12-19 18:24:38 -07006578 const struct io_uring_sqe *sqe;
Pavel Begunkov196be952019-11-07 01:41:06 +03006579 struct io_kiocb *req;
Pavel Begunkovfb5ccc92019-10-25 12:31:30 +03006580
Pavel Begunkov258b29a2021-02-10 00:03:10 +00006581 req = io_alloc_req(ctx);
Pavel Begunkov196be952019-11-07 01:41:06 +03006582 if (unlikely(!req)) {
6583 if (!submitted)
6584 submitted = -EAGAIN;
Pavel Begunkovfb5ccc92019-10-25 12:31:30 +03006585 break;
Jens Axboe9e645e112019-05-10 16:07:28 -06006586 }
Pavel Begunkov4fccfcb2021-02-12 11:55:17 +00006587 sqe = io_get_sqe(ctx);
6588 if (unlikely(!sqe)) {
6589 kmem_cache_free(req_cachep, req);
6590 break;
6591 }
Jens Axboed3656342019-12-18 09:50:26 -07006592 /* will complete beyond this point, count as submitted */
6593 submitted++;
Pavel Begunkova1ab7b32021-02-18 18:29:42 +00006594 if (io_submit_sqe(ctx, req, sqe))
Jens Axboed3656342019-12-18 09:50:26 -07006595 break;
Jens Axboe6c271ce2019-01-10 11:22:30 -07006596 }
6597
Pavel Begunkov9466f432020-01-25 22:34:01 +03006598 if (unlikely(submitted != nr)) {
6599 int ref_used = (submitted == -EAGAIN) ? 0 : submitted;
Jens Axboed8a6df12020-10-15 16:24:45 -06006600 struct io_uring_task *tctx = current->io_uring;
6601 int unused = nr - ref_used;
Pavel Begunkov9466f432020-01-25 22:34:01 +03006602
Jens Axboed8a6df12020-10-15 16:24:45 -06006603 percpu_ref_put_many(&ctx->refs, unused);
6604 percpu_counter_sub(&tctx->inflight, unused);
6605 put_task_struct_many(current, unused);
Pavel Begunkov9466f432020-01-25 22:34:01 +03006606 }
Jens Axboe6c271ce2019-01-10 11:22:30 -07006607
Pavel Begunkova1ab7b32021-02-18 18:29:42 +00006608 io_submit_state_end(&ctx->submit_state, ctx);
Pavel Begunkovae9428c2019-11-06 00:22:14 +03006609 /* Commit SQ ring head once we've consumed and submitted all SQEs */
6610 io_commit_sqring(ctx);
6611
Jens Axboe6c271ce2019-01-10 11:22:30 -07006612 return submitted;
6613}
6614
Xiaoguang Wang23b36282020-07-23 20:57:24 +08006615static inline void io_ring_set_wakeup_flag(struct io_ring_ctx *ctx)
6616{
6617 /* Tell userspace we may need a wakeup call */
6618 spin_lock_irq(&ctx->completion_lock);
6619 ctx->rings->sq_flags |= IORING_SQ_NEED_WAKEUP;
6620 spin_unlock_irq(&ctx->completion_lock);
6621}
6622
6623static inline void io_ring_clear_wakeup_flag(struct io_ring_ctx *ctx)
6624{
6625 spin_lock_irq(&ctx->completion_lock);
6626 ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP;
6627 spin_unlock_irq(&ctx->completion_lock);
6628}
6629
Xiaoguang Wang08369242020-11-03 14:15:59 +08006630static int __io_sq_thread(struct io_ring_ctx *ctx, bool cap_entries)
Jens Axboe6c271ce2019-01-10 11:22:30 -07006631{
Jens Axboec8d1ba52020-09-14 11:07:26 -06006632 unsigned int to_submit;
Xiaoguang Wangbdcd3ea2020-02-25 22:12:08 +08006633 int ret = 0;
Jens Axboe6c271ce2019-01-10 11:22:30 -07006634
Jens Axboec8d1ba52020-09-14 11:07:26 -06006635 to_submit = io_sqring_entries(ctx);
Jens Axboee95eee22020-09-08 09:11:32 -06006636 /* if we're handling multiple rings, cap submit size for fairness */
6637 if (cap_entries && to_submit > 8)
6638 to_submit = 8;
6639
Xiaoguang Wang906a3c62020-11-12 14:56:00 +08006640 if (!list_empty(&ctx->iopoll_list) || to_submit) {
6641 unsigned nr_events = 0;
6642
Xiaoguang Wang08369242020-11-03 14:15:59 +08006643 mutex_lock(&ctx->uring_lock);
Xiaoguang Wang906a3c62020-11-12 14:56:00 +08006644 if (!list_empty(&ctx->iopoll_list))
6645 io_do_iopoll(ctx, &nr_events, 0);
6646
Pavel Begunkov0298ef92021-03-08 13:20:57 +00006647 if (to_submit && likely(!percpu_ref_is_dying(&ctx->refs)) &&
6648 !(ctx->flags & IORING_SETUP_R_DISABLED))
Xiaoguang Wang08369242020-11-03 14:15:59 +08006649 ret = io_submit_sqes(ctx, to_submit);
6650 mutex_unlock(&ctx->uring_lock);
6651 }
Jens Axboe90554202020-09-03 12:12:41 -06006652
6653 if (!io_sqring_full(ctx) && wq_has_sleeper(&ctx->sqo_sq_wait))
6654 wake_up(&ctx->sqo_sq_wait);
6655
Xiaoguang Wang08369242020-11-03 14:15:59 +08006656 return ret;
6657}
6658
6659static void io_sqd_update_thread_idle(struct io_sq_data *sqd)
6660{
6661 struct io_ring_ctx *ctx;
6662 unsigned sq_thread_idle = 0;
6663
6664 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) {
6665 if (sq_thread_idle < ctx->sq_thread_idle)
6666 sq_thread_idle = ctx->sq_thread_idle;
6667 }
6668
6669 sqd->sq_thread_idle = sq_thread_idle;
Jens Axboec8d1ba52020-09-14 11:07:26 -06006670}
6671
Jens Axboe6c271ce2019-01-10 11:22:30 -07006672static int io_sq_thread(void *data)
6673{
Jens Axboe69fb2132020-09-14 11:16:23 -06006674 struct io_sq_data *sqd = data;
6675 struct io_ring_ctx *ctx;
Xiaoguang Wanga0d92052020-11-12 14:55:59 +08006676 unsigned long timeout = 0;
Jens Axboe37d1e2e2021-02-17 21:03:43 -07006677 char buf[TASK_COMM_LEN];
Xiaoguang Wang08369242020-11-03 14:15:59 +08006678 DEFINE_WAIT(wait);
Jens Axboe6c271ce2019-01-10 11:22:30 -07006679
Jens Axboe37d1e2e2021-02-17 21:03:43 -07006680 sprintf(buf, "iou-sqp-%d", sqd->task_pid);
6681 set_task_comm(current, buf);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07006682 current->pf_io_worker = NULL;
Jens Axboe28cea78a2020-09-14 10:51:17 -06006683
Jens Axboe37d1e2e2021-02-17 21:03:43 -07006684 if (sqd->sq_cpu != -1)
6685 set_cpus_allowed_ptr(current, cpumask_of(sqd->sq_cpu));
6686 else
6687 set_cpus_allowed_ptr(current, cpu_online_mask);
6688 current->flags |= PF_NO_SETAFFINITY;
6689
Jens Axboe05962f92021-03-06 13:58:48 -07006690 down_read(&sqd->rw_lock);
6691
6692 while (!test_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state)) {
Xiaoguang Wang08369242020-11-03 14:15:59 +08006693 int ret;
6694 bool cap_entries, sqt_spin, needs_sched;
Jens Axboec1edbf52019-11-10 16:56:04 -07006695
Jens Axboe05962f92021-03-06 13:58:48 -07006696 if (test_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state)) {
6697 up_read(&sqd->rw_lock);
6698 cond_resched();
6699 down_read(&sqd->rw_lock);
Pavel Begunkov521d6a72021-03-11 23:29:38 +00006700 io_run_task_work();
Xiaoguang Wang08369242020-11-03 14:15:59 +08006701 timeout = jiffies + sqd->sq_thread_idle;
Pavel Begunkov7d41e852021-03-10 13:13:54 +00006702 continue;
Xiaoguang Wang08369242020-11-03 14:15:59 +08006703 }
Jens Axboe37d1e2e2021-02-17 21:03:43 -07006704 if (fatal_signal_pending(current))
6705 break;
Xiaoguang Wang08369242020-11-03 14:15:59 +08006706 sqt_spin = false;
Jens Axboee95eee22020-09-08 09:11:32 -06006707 cap_entries = !list_is_singular(&sqd->ctx_list);
Jens Axboe69fb2132020-09-14 11:16:23 -06006708 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) {
Stefan Metzmacher7c30f36a2021-03-07 11:54:28 +01006709 const struct cred *creds = NULL;
6710
6711 if (ctx->sq_creds != current_cred())
6712 creds = override_creds(ctx->sq_creds);
Xiaoguang Wang08369242020-11-03 14:15:59 +08006713 ret = __io_sq_thread(ctx, cap_entries);
Stefan Metzmacher7c30f36a2021-03-07 11:54:28 +01006714 if (creds)
6715 revert_creds(creds);
Xiaoguang Wang08369242020-11-03 14:15:59 +08006716 if (!sqt_spin && (ret > 0 || !list_empty(&ctx->iopoll_list)))
6717 sqt_spin = true;
Jens Axboe6c271ce2019-01-10 11:22:30 -07006718 }
6719
Xiaoguang Wang08369242020-11-03 14:15:59 +08006720 if (sqt_spin || !time_after(jiffies, timeout)) {
Jens Axboec8d1ba52020-09-14 11:07:26 -06006721 io_run_task_work();
6722 cond_resched();
Xiaoguang Wang08369242020-11-03 14:15:59 +08006723 if (sqt_spin)
6724 timeout = jiffies + sqd->sq_thread_idle;
6725 continue;
6726 }
6727
Xiaoguang Wang08369242020-11-03 14:15:59 +08006728 needs_sched = true;
6729 prepare_to_wait(&sqd->wait, &wait, TASK_INTERRUPTIBLE);
6730 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) {
6731 if ((ctx->flags & IORING_SETUP_IOPOLL) &&
6732 !list_empty_careful(&ctx->iopoll_list)) {
6733 needs_sched = false;
6734 break;
6735 }
6736 if (io_sqring_entries(ctx)) {
6737 needs_sched = false;
6738 break;
6739 }
6740 }
6741
Jens Axboe05962f92021-03-06 13:58:48 -07006742 if (needs_sched && !test_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state)) {
Jens Axboe69fb2132020-09-14 11:16:23 -06006743 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
6744 io_ring_set_wakeup_flag(ctx);
Xiaoguang Wang08369242020-11-03 14:15:59 +08006745
Jens Axboe05962f92021-03-06 13:58:48 -07006746 up_read(&sqd->rw_lock);
Jens Axboe69fb2132020-09-14 11:16:23 -06006747 schedule();
Jens Axboe16efa4f2021-03-12 20:26:13 -07006748 try_to_freeze();
Jens Axboe05962f92021-03-06 13:58:48 -07006749 down_read(&sqd->rw_lock);
Jens Axboe69fb2132020-09-14 11:16:23 -06006750 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
6751 io_ring_clear_wakeup_flag(ctx);
Jens Axboe6c271ce2019-01-10 11:22:30 -07006752 }
Xiaoguang Wang08369242020-11-03 14:15:59 +08006753
6754 finish_wait(&sqd->wait, &wait);
6755 timeout = jiffies + sqd->sq_thread_idle;
Jens Axboe6c271ce2019-01-10 11:22:30 -07006756 }
Pavel Begunkov521d6a72021-03-11 23:29:38 +00006757 up_read(&sqd->rw_lock);
6758 down_write(&sqd->rw_lock);
6759 /*
6760 * someone may have parked and added a cancellation task_work, run
6761 * it first because we don't want it in io_uring_cancel_sqpoll()
6762 */
6763 io_run_task_work();
Jens Axboe6c271ce2019-01-10 11:22:30 -07006764
Jens Axboe37d1e2e2021-02-17 21:03:43 -07006765 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
6766 io_uring_cancel_sqpoll(ctx);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07006767 sqd->thread = NULL;
Jens Axboe05962f92021-03-06 13:58:48 -07006768 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
Jens Axboe5f3f26f2021-02-25 10:17:46 -07006769 io_ring_set_wakeup_flag(ctx);
Jens Axboe05962f92021-03-06 13:58:48 -07006770 up_write(&sqd->rw_lock);
Pavel Begunkov521d6a72021-03-11 23:29:38 +00006771
6772 io_run_task_work();
Jens Axboe37d1e2e2021-02-17 21:03:43 -07006773 complete(&sqd->exited);
6774 do_exit(0);
Jens Axboe6c271ce2019-01-10 11:22:30 -07006775}
6776
Jens Axboebda52162019-09-24 13:47:15 -06006777struct io_wait_queue {
6778 struct wait_queue_entry wq;
6779 struct io_ring_ctx *ctx;
6780 unsigned to_wait;
6781 unsigned nr_timeouts;
6782};
6783
Pavel Begunkov6c503152021-01-04 20:36:36 +00006784static inline bool io_should_wake(struct io_wait_queue *iowq)
Jens Axboebda52162019-09-24 13:47:15 -06006785{
6786 struct io_ring_ctx *ctx = iowq->ctx;
6787
6788 /*
Brian Gianforcarod195a662019-12-13 03:09:50 -08006789 * Wake up if we have enough events, or if a timeout occurred since we
Jens Axboebda52162019-09-24 13:47:15 -06006790 * started waiting. For timeouts, we always want to return to userspace,
6791 * regardless of event count.
6792 */
Pavel Begunkov6c503152021-01-04 20:36:36 +00006793 return io_cqring_events(ctx) >= iowq->to_wait ||
Jens Axboebda52162019-09-24 13:47:15 -06006794 atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts;
6795}
6796
6797static int io_wake_function(struct wait_queue_entry *curr, unsigned int mode,
6798 int wake_flags, void *key)
6799{
6800 struct io_wait_queue *iowq = container_of(curr, struct io_wait_queue,
6801 wq);
6802
Pavel Begunkov6c503152021-01-04 20:36:36 +00006803 /*
6804 * Cannot safely flush overflowed CQEs from here, ensure we wake up
6805 * the task, and the next invocation will do it.
6806 */
6807 if (io_should_wake(iowq) || test_bit(0, &iowq->ctx->cq_check_overflow))
6808 return autoremove_wake_function(curr, mode, wake_flags, key);
6809 return -1;
Jens Axboebda52162019-09-24 13:47:15 -06006810}
6811
Jens Axboeaf9c1a42020-09-24 13:32:18 -06006812static int io_run_task_work_sig(void)
6813{
6814 if (io_run_task_work())
6815 return 1;
6816 if (!signal_pending(current))
6817 return 0;
Jens Axboe792ee0f62020-10-22 20:17:18 -06006818 if (test_tsk_thread_flag(current, TIF_NOTIFY_SIGNAL))
6819 return -ERESTARTSYS;
Jens Axboeaf9c1a42020-09-24 13:32:18 -06006820 return -EINTR;
6821}
6822
Pavel Begunkoveeb60b92021-02-04 13:51:58 +00006823/* when returns >0, the caller should retry */
6824static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
6825 struct io_wait_queue *iowq,
6826 signed long *timeout)
6827{
6828 int ret;
6829
6830 /* make sure we run task_work before checking for signals */
6831 ret = io_run_task_work_sig();
6832 if (ret || io_should_wake(iowq))
6833 return ret;
6834 /* let the caller flush overflows, retry */
6835 if (test_bit(0, &ctx->cq_check_overflow))
6836 return 1;
6837
6838 *timeout = schedule_timeout(*timeout);
6839 return !*timeout ? -ETIME : 1;
6840}
6841
Jens Axboe2b188cc2019-01-07 10:46:33 -07006842/*
6843 * Wait until events become available, if we don't already have some. The
6844 * application must reap them itself, as they reside on the shared cq ring.
6845 */
6846static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
Hao Xuc73ebb62020-11-03 10:54:37 +08006847 const sigset_t __user *sig, size_t sigsz,
6848 struct __kernel_timespec __user *uts)
Jens Axboe2b188cc2019-01-07 10:46:33 -07006849{
Jens Axboebda52162019-09-24 13:47:15 -06006850 struct io_wait_queue iowq = {
6851 .wq = {
6852 .private = current,
6853 .func = io_wake_function,
6854 .entry = LIST_HEAD_INIT(iowq.wq.entry),
6855 },
6856 .ctx = ctx,
6857 .to_wait = min_events,
6858 };
Hristo Venev75b28af2019-08-26 17:23:46 +00006859 struct io_rings *rings = ctx->rings;
Pavel Begunkovc1d5a222021-02-04 13:51:57 +00006860 signed long timeout = MAX_SCHEDULE_TIMEOUT;
6861 int ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07006862
Jens Axboeb41e9852020-02-17 09:52:41 -07006863 do {
Pavel Begunkov6c503152021-01-04 20:36:36 +00006864 io_cqring_overflow_flush(ctx, false, NULL, NULL);
6865 if (io_cqring_events(ctx) >= min_events)
Jens Axboeb41e9852020-02-17 09:52:41 -07006866 return 0;
Jens Axboe4c6e2772020-07-01 11:29:10 -06006867 if (!io_run_task_work())
Jens Axboeb41e9852020-02-17 09:52:41 -07006868 break;
Jens Axboeb41e9852020-02-17 09:52:41 -07006869 } while (1);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006870
6871 if (sig) {
Arnd Bergmann9e75ad52019-03-25 15:34:53 +01006872#ifdef CONFIG_COMPAT
6873 if (in_compat_syscall())
6874 ret = set_compat_user_sigmask((const compat_sigset_t __user *)sig,
Oleg Nesterovb7724342019-07-16 16:29:53 -07006875 sigsz);
Arnd Bergmann9e75ad52019-03-25 15:34:53 +01006876 else
6877#endif
Oleg Nesterovb7724342019-07-16 16:29:53 -07006878 ret = set_user_sigmask(sig, sigsz);
Arnd Bergmann9e75ad52019-03-25 15:34:53 +01006879
Jens Axboe2b188cc2019-01-07 10:46:33 -07006880 if (ret)
6881 return ret;
6882 }
6883
Hao Xuc73ebb62020-11-03 10:54:37 +08006884 if (uts) {
Pavel Begunkovc1d5a222021-02-04 13:51:57 +00006885 struct timespec64 ts;
6886
Hao Xuc73ebb62020-11-03 10:54:37 +08006887 if (get_timespec64(&ts, uts))
6888 return -EFAULT;
6889 timeout = timespec64_to_jiffies(&ts);
6890 }
6891
Jens Axboebda52162019-09-24 13:47:15 -06006892 iowq.nr_timeouts = atomic_read(&ctx->cq_timeouts);
Dmitrii Dolgovc826bd72019-10-15 19:02:01 +02006893 trace_io_uring_cqring_wait(ctx, min_events);
Jens Axboebda52162019-09-24 13:47:15 -06006894 do {
Jens Axboeca0a2652021-03-04 17:15:48 -07006895 /* if we can't even flush overflow, don't wait for more */
6896 if (!io_cqring_overflow_flush(ctx, false, NULL, NULL)) {
6897 ret = -EBUSY;
6898 break;
6899 }
Jens Axboebda52162019-09-24 13:47:15 -06006900 prepare_to_wait_exclusive(&ctx->wait, &iowq.wq,
6901 TASK_INTERRUPTIBLE);
Pavel Begunkoveeb60b92021-02-04 13:51:58 +00006902 ret = io_cqring_wait_schedule(ctx, &iowq, &timeout);
6903 finish_wait(&ctx->wait, &iowq.wq);
Jens Axboeca0a2652021-03-04 17:15:48 -07006904 cond_resched();
Pavel Begunkoveeb60b92021-02-04 13:51:58 +00006905 } while (ret > 0);
Jens Axboebda52162019-09-24 13:47:15 -06006906
Jens Axboeb7db41c2020-07-04 08:55:50 -06006907 restore_saved_sigmask_unless(ret == -EINTR);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006908
Hristo Venev75b28af2019-08-26 17:23:46 +00006909 return READ_ONCE(rings->cq.head) == READ_ONCE(rings->cq.tail) ? ret : 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07006910}
6911
Jens Axboe6b063142019-01-10 22:13:58 -07006912static void __io_sqe_files_unregister(struct io_ring_ctx *ctx)
6913{
6914#if defined(CONFIG_UNIX)
6915 if (ctx->ring_sock) {
6916 struct sock *sock = ctx->ring_sock->sk;
6917 struct sk_buff *skb;
6918
6919 while ((skb = skb_dequeue(&sock->sk_receive_queue)) != NULL)
6920 kfree_skb(skb);
6921 }
6922#else
6923 int i;
6924
Jens Axboe65e19f52019-10-26 07:20:21 -06006925 for (i = 0; i < ctx->nr_user_files; i++) {
6926 struct file *file;
6927
6928 file = io_file_from_index(ctx, i);
6929 if (file)
6930 fput(file);
6931 }
Jens Axboe6b063142019-01-10 22:13:58 -07006932#endif
6933}
6934
Bijan Mottahedeh00835dc2021-01-15 17:37:52 +00006935static void io_rsrc_data_ref_zero(struct percpu_ref *ref)
Jens Axboe05f3fb32019-12-09 11:22:50 -07006936{
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00006937 struct fixed_rsrc_data *data;
Jens Axboe05f3fb32019-12-09 11:22:50 -07006938
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00006939 data = container_of(ref, struct fixed_rsrc_data, refs);
Jens Axboe05f3fb32019-12-09 11:22:50 -07006940 complete(&data->done);
6941}
6942
Bijan Mottahedeh2a63b2d2021-01-15 17:37:47 +00006943static inline void io_rsrc_ref_lock(struct io_ring_ctx *ctx)
Pavel Begunkov1642b442020-12-30 21:34:14 +00006944{
Bijan Mottahedeh2a63b2d2021-01-15 17:37:47 +00006945 spin_lock_bh(&ctx->rsrc_ref_lock);
Pavel Begunkov1642b442020-12-30 21:34:14 +00006946}
6947
Bijan Mottahedeh2a63b2d2021-01-15 17:37:47 +00006948static inline void io_rsrc_ref_unlock(struct io_ring_ctx *ctx)
Jens Axboe6b063142019-01-10 22:13:58 -07006949{
Bijan Mottahedeh2a63b2d2021-01-15 17:37:47 +00006950 spin_unlock_bh(&ctx->rsrc_ref_lock);
6951}
6952
Bijan Mottahedehd67d2262021-01-15 17:37:46 +00006953static void io_sqe_rsrc_set_node(struct io_ring_ctx *ctx,
6954 struct fixed_rsrc_data *rsrc_data,
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00006955 struct fixed_rsrc_ref_node *ref_node)
Jens Axboe6b063142019-01-10 22:13:58 -07006956{
Bijan Mottahedeh2a63b2d2021-01-15 17:37:47 +00006957 io_rsrc_ref_lock(ctx);
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00006958 rsrc_data->node = ref_node;
Bijan Mottahedehd67d2262021-01-15 17:37:46 +00006959 list_add_tail(&ref_node->node, &ctx->rsrc_ref_list);
Bijan Mottahedeh2a63b2d2021-01-15 17:37:47 +00006960 io_rsrc_ref_unlock(ctx);
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00006961 percpu_ref_get(&rsrc_data->refs);
Jens Axboe6b063142019-01-10 22:13:58 -07006962}
6963
Hao Xu8bad28d2021-02-19 17:19:36 +08006964static void io_sqe_rsrc_kill_node(struct io_ring_ctx *ctx, struct fixed_rsrc_data *data)
Jens Axboe6b063142019-01-10 22:13:58 -07006965{
Hao Xu8bad28d2021-02-19 17:19:36 +08006966 struct fixed_rsrc_ref_node *ref_node = NULL;
Jens Axboe65e19f52019-10-26 07:20:21 -06006967
Bijan Mottahedeh2a63b2d2021-01-15 17:37:47 +00006968 io_rsrc_ref_lock(ctx);
Pavel Begunkov1e5d7702020-11-18 14:56:25 +00006969 ref_node = data->node;
Pavel Begunkove6cb0072021-02-20 18:03:47 +00006970 data->node = NULL;
Bijan Mottahedeh2a63b2d2021-01-15 17:37:47 +00006971 io_rsrc_ref_unlock(ctx);
Xiaoguang Wang05589552020-03-31 14:05:18 +08006972 if (ref_node)
6973 percpu_ref_kill(&ref_node->refs);
Hao Xu8bad28d2021-02-19 17:19:36 +08006974}
Xiaoguang Wang05589552020-03-31 14:05:18 +08006975
Hao Xu8bad28d2021-02-19 17:19:36 +08006976static int io_rsrc_ref_quiesce(struct fixed_rsrc_data *data,
6977 struct io_ring_ctx *ctx,
Pavel Begunkovf2303b12021-02-20 18:03:49 +00006978 void (*rsrc_put)(struct io_ring_ctx *ctx,
6979 struct io_rsrc_put *prsrc))
Hao Xu8bad28d2021-02-19 17:19:36 +08006980{
Pavel Begunkovf2303b12021-02-20 18:03:49 +00006981 struct fixed_rsrc_ref_node *backup_node;
Hao Xu8bad28d2021-02-19 17:19:36 +08006982 int ret;
Xiaoguang Wang05589552020-03-31 14:05:18 +08006983
Hao Xu8bad28d2021-02-19 17:19:36 +08006984 if (data->quiesce)
6985 return -ENXIO;
6986
6987 data->quiesce = true;
Pavel Begunkov1ffc5422020-12-30 21:34:15 +00006988 do {
Pavel Begunkovf2303b12021-02-20 18:03:49 +00006989 ret = -ENOMEM;
6990 backup_node = alloc_fixed_rsrc_ref_node(ctx);
6991 if (!backup_node)
6992 break;
6993 backup_node->rsrc_data = data;
6994 backup_node->rsrc_put = rsrc_put;
6995
Hao Xu8bad28d2021-02-19 17:19:36 +08006996 io_sqe_rsrc_kill_node(ctx, data);
6997 percpu_ref_kill(&data->refs);
6998 flush_delayed_work(&ctx->rsrc_put_work);
6999
Pavel Begunkov1ffc5422020-12-30 21:34:15 +00007000 ret = wait_for_completion_interruptible(&data->done);
7001 if (!ret)
7002 break;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007003
Jens Axboecb5e1b82021-02-25 07:37:35 -07007004 percpu_ref_resurrect(&data->refs);
Hao Xu8bad28d2021-02-19 17:19:36 +08007005 io_sqe_rsrc_set_node(ctx, data, backup_node);
7006 backup_node = NULL;
Jens Axboecb5e1b82021-02-25 07:37:35 -07007007 reinit_completion(&data->done);
Hao Xu8bad28d2021-02-19 17:19:36 +08007008 mutex_unlock(&ctx->uring_lock);
7009 ret = io_run_task_work_sig();
7010 mutex_lock(&ctx->uring_lock);
Pavel Begunkovf2303b12021-02-20 18:03:49 +00007011 } while (ret >= 0);
Hao Xu8bad28d2021-02-19 17:19:36 +08007012 data->quiesce = false;
7013
7014 if (backup_node)
7015 destroy_fixed_rsrc_ref_node(backup_node);
7016 return ret;
Bijan Mottahedehd7954b22021-01-15 17:37:50 +00007017}
7018
Bijan Mottahedeh1ad555c2021-01-15 17:37:51 +00007019static struct fixed_rsrc_data *alloc_fixed_rsrc_data(struct io_ring_ctx *ctx)
7020{
7021 struct fixed_rsrc_data *data;
7022
7023 data = kzalloc(sizeof(*data), GFP_KERNEL);
7024 if (!data)
7025 return NULL;
7026
Bijan Mottahedeh00835dc2021-01-15 17:37:52 +00007027 if (percpu_ref_init(&data->refs, io_rsrc_data_ref_zero,
Bijan Mottahedeh1ad555c2021-01-15 17:37:51 +00007028 PERCPU_REF_ALLOW_REINIT, GFP_KERNEL)) {
7029 kfree(data);
7030 return NULL;
7031 }
7032 data->ctx = ctx;
7033 init_completion(&data->done);
7034 return data;
7035}
7036
7037static void free_fixed_rsrc_data(struct fixed_rsrc_data *data)
7038{
7039 percpu_ref_exit(&data->refs);
7040 kfree(data->table);
7041 kfree(data);
7042}
7043
Bijan Mottahedehd7954b22021-01-15 17:37:50 +00007044static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
7045{
7046 struct fixed_rsrc_data *data = ctx->file_data;
Bijan Mottahedehd7954b22021-01-15 17:37:50 +00007047 unsigned nr_tables, i;
7048 int ret;
7049
Hao Xu8bad28d2021-02-19 17:19:36 +08007050 /*
7051 * percpu_ref_is_dying() is to stop parallel files unregister
7052 * Since we possibly drop uring lock later in this function to
7053 * run task work.
7054 */
7055 if (!data || percpu_ref_is_dying(&data->refs))
Bijan Mottahedehd7954b22021-01-15 17:37:50 +00007056 return -ENXIO;
Pavel Begunkovf2303b12021-02-20 18:03:49 +00007057 ret = io_rsrc_ref_quiesce(data, ctx, io_ring_file_put);
Bijan Mottahedehd7954b22021-01-15 17:37:50 +00007058 if (ret)
7059 return ret;
7060
Jens Axboe6b063142019-01-10 22:13:58 -07007061 __io_sqe_files_unregister(ctx);
Jens Axboe65e19f52019-10-26 07:20:21 -06007062 nr_tables = DIV_ROUND_UP(ctx->nr_user_files, IORING_MAX_FILES_TABLE);
7063 for (i = 0; i < nr_tables; i++)
Jens Axboe05f3fb32019-12-09 11:22:50 -07007064 kfree(data->table[i].files);
Bijan Mottahedeh1ad555c2021-01-15 17:37:51 +00007065 free_fixed_rsrc_data(data);
Jens Axboe05f3fb32019-12-09 11:22:50 -07007066 ctx->file_data = NULL;
Jens Axboe6b063142019-01-10 22:13:58 -07007067 ctx->nr_user_files = 0;
7068 return 0;
7069}
7070
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007071static void io_sq_thread_unpark(struct io_sq_data *sqd)
Jens Axboe05962f92021-03-06 13:58:48 -07007072 __releases(&sqd->rw_lock)
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007073{
Pavel Begunkov521d6a72021-03-11 23:29:38 +00007074 WARN_ON_ONCE(sqd->thread == current);
7075
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007076 clear_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state);
Jens Axboe05962f92021-03-06 13:58:48 -07007077 up_write(&sqd->rw_lock);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007078}
7079
Jens Axboe86e0d672021-03-05 08:44:39 -07007080static void io_sq_thread_park(struct io_sq_data *sqd)
Jens Axboe05962f92021-03-06 13:58:48 -07007081 __acquires(&sqd->rw_lock)
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007082{
Pavel Begunkov521d6a72021-03-11 23:29:38 +00007083 WARN_ON_ONCE(sqd->thread == current);
7084
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007085 set_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state);
Jens Axboe05962f92021-03-06 13:58:48 -07007086 down_write(&sqd->rw_lock);
7087 /* set again for consistency, in case concurrent parks are happening */
7088 set_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state);
7089 if (sqd->thread)
Jens Axboe86e0d672021-03-05 08:44:39 -07007090 wake_up_process(sqd->thread);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007091}
7092
7093static void io_sq_thread_stop(struct io_sq_data *sqd)
7094{
Pavel Begunkov521d6a72021-03-11 23:29:38 +00007095 WARN_ON_ONCE(sqd->thread == current);
7096
Jens Axboe05962f92021-03-06 13:58:48 -07007097 down_write(&sqd->rw_lock);
Jens Axboe05962f92021-03-06 13:58:48 -07007098 set_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state);
Jens Axboee8f98f242021-03-09 16:32:13 -07007099 if (sqd->thread)
7100 wake_up_process(sqd->thread);
Jens Axboe05962f92021-03-06 13:58:48 -07007101 up_write(&sqd->rw_lock);
7102 wait_for_completion(&sqd->exited);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007103}
7104
Jens Axboe534ca6d2020-09-02 13:52:19 -06007105static void io_put_sq_data(struct io_sq_data *sqd)
Jens Axboe6c271ce2019-01-10 11:22:30 -07007106{
Jens Axboe534ca6d2020-09-02 13:52:19 -06007107 if (refcount_dec_and_test(&sqd->refs)) {
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007108 io_sq_thread_stop(sqd);
7109 kfree(sqd);
7110 }
7111}
7112
7113static void io_sq_thread_finish(struct io_ring_ctx *ctx)
7114{
7115 struct io_sq_data *sqd = ctx->sq_data;
7116
7117 if (sqd) {
Jens Axboe05962f92021-03-06 13:58:48 -07007118 io_sq_thread_park(sqd);
Pavel Begunkov521d6a72021-03-11 23:29:38 +00007119 list_del_init(&ctx->sqd_list);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007120 io_sqd_update_thread_idle(sqd);
Jens Axboe05962f92021-03-06 13:58:48 -07007121 io_sq_thread_unpark(sqd);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007122
7123 io_put_sq_data(sqd);
7124 ctx->sq_data = NULL;
Stefan Metzmacher7c30f36a2021-03-07 11:54:28 +01007125 if (ctx->sq_creds)
7126 put_cred(ctx->sq_creds);
Jens Axboe534ca6d2020-09-02 13:52:19 -06007127 }
7128}
7129
Jens Axboeaa061652020-09-02 14:50:27 -06007130static struct io_sq_data *io_attach_sq_data(struct io_uring_params *p)
7131{
7132 struct io_ring_ctx *ctx_attach;
7133 struct io_sq_data *sqd;
7134 struct fd f;
7135
7136 f = fdget(p->wq_fd);
7137 if (!f.file)
7138 return ERR_PTR(-ENXIO);
7139 if (f.file->f_op != &io_uring_fops) {
7140 fdput(f);
7141 return ERR_PTR(-EINVAL);
7142 }
7143
7144 ctx_attach = f.file->private_data;
7145 sqd = ctx_attach->sq_data;
7146 if (!sqd) {
7147 fdput(f);
7148 return ERR_PTR(-EINVAL);
7149 }
Jens Axboe5c2469e2021-03-11 10:17:56 -07007150 if (sqd->task_tgid != current->tgid) {
7151 fdput(f);
7152 return ERR_PTR(-EPERM);
7153 }
Jens Axboeaa061652020-09-02 14:50:27 -06007154
7155 refcount_inc(&sqd->refs);
7156 fdput(f);
7157 return sqd;
7158}
7159
Pavel Begunkov26984fb2021-03-11 23:29:37 +00007160static struct io_sq_data *io_get_sq_data(struct io_uring_params *p,
7161 bool *attached)
Jens Axboe534ca6d2020-09-02 13:52:19 -06007162{
7163 struct io_sq_data *sqd;
7164
Pavel Begunkov26984fb2021-03-11 23:29:37 +00007165 *attached = false;
Jens Axboe5c2469e2021-03-11 10:17:56 -07007166 if (p->flags & IORING_SETUP_ATTACH_WQ) {
7167 sqd = io_attach_sq_data(p);
Pavel Begunkov26984fb2021-03-11 23:29:37 +00007168 if (!IS_ERR(sqd)) {
7169 *attached = true;
Jens Axboe5c2469e2021-03-11 10:17:56 -07007170 return sqd;
Pavel Begunkov26984fb2021-03-11 23:29:37 +00007171 }
Jens Axboe5c2469e2021-03-11 10:17:56 -07007172 /* fall through for EPERM case, setup new sqd/task */
7173 if (PTR_ERR(sqd) != -EPERM)
7174 return sqd;
7175 }
Jens Axboeaa061652020-09-02 14:50:27 -06007176
Jens Axboe534ca6d2020-09-02 13:52:19 -06007177 sqd = kzalloc(sizeof(*sqd), GFP_KERNEL);
7178 if (!sqd)
7179 return ERR_PTR(-ENOMEM);
7180
7181 refcount_set(&sqd->refs, 1);
Jens Axboe69fb2132020-09-14 11:16:23 -06007182 INIT_LIST_HEAD(&sqd->ctx_list);
Jens Axboe05962f92021-03-06 13:58:48 -07007183 init_rwsem(&sqd->rw_lock);
Jens Axboe534ca6d2020-09-02 13:52:19 -06007184 init_waitqueue_head(&sqd->wait);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007185 init_completion(&sqd->exited);
Jens Axboe534ca6d2020-09-02 13:52:19 -06007186 return sqd;
7187}
7188
Jens Axboe6b063142019-01-10 22:13:58 -07007189#if defined(CONFIG_UNIX)
Jens Axboe6b063142019-01-10 22:13:58 -07007190/*
7191 * Ensure the UNIX gc is aware of our file set, so we are certain that
7192 * the io_uring can be safely unregistered on process exit, even if we have
7193 * loops in the file referencing.
7194 */
7195static int __io_sqe_files_scm(struct io_ring_ctx *ctx, int nr, int offset)
7196{
7197 struct sock *sk = ctx->ring_sock->sk;
7198 struct scm_fp_list *fpl;
7199 struct sk_buff *skb;
Jens Axboe08a45172019-10-03 08:11:03 -06007200 int i, nr_files;
Jens Axboe6b063142019-01-10 22:13:58 -07007201
Jens Axboe6b063142019-01-10 22:13:58 -07007202 fpl = kzalloc(sizeof(*fpl), GFP_KERNEL);
7203 if (!fpl)
7204 return -ENOMEM;
7205
7206 skb = alloc_skb(0, GFP_KERNEL);
7207 if (!skb) {
7208 kfree(fpl);
7209 return -ENOMEM;
7210 }
7211
7212 skb->sk = sk;
Jens Axboe6b063142019-01-10 22:13:58 -07007213
Jens Axboe08a45172019-10-03 08:11:03 -06007214 nr_files = 0;
Jens Axboe62e398b2021-02-21 16:19:37 -07007215 fpl->user = get_uid(current_user());
Jens Axboe6b063142019-01-10 22:13:58 -07007216 for (i = 0; i < nr; i++) {
Jens Axboe65e19f52019-10-26 07:20:21 -06007217 struct file *file = io_file_from_index(ctx, i + offset);
7218
7219 if (!file)
Jens Axboe08a45172019-10-03 08:11:03 -06007220 continue;
Jens Axboe65e19f52019-10-26 07:20:21 -06007221 fpl->fp[nr_files] = get_file(file);
Jens Axboe08a45172019-10-03 08:11:03 -06007222 unix_inflight(fpl->user, fpl->fp[nr_files]);
7223 nr_files++;
Jens Axboe6b063142019-01-10 22:13:58 -07007224 }
7225
Jens Axboe08a45172019-10-03 08:11:03 -06007226 if (nr_files) {
7227 fpl->max = SCM_MAX_FD;
7228 fpl->count = nr_files;
7229 UNIXCB(skb).fp = fpl;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007230 skb->destructor = unix_destruct_scm;
Jens Axboe08a45172019-10-03 08:11:03 -06007231 refcount_add(skb->truesize, &sk->sk_wmem_alloc);
7232 skb_queue_head(&sk->sk_receive_queue, skb);
Jens Axboe6b063142019-01-10 22:13:58 -07007233
Jens Axboe08a45172019-10-03 08:11:03 -06007234 for (i = 0; i < nr_files; i++)
7235 fput(fpl->fp[i]);
7236 } else {
7237 kfree_skb(skb);
7238 kfree(fpl);
7239 }
Jens Axboe6b063142019-01-10 22:13:58 -07007240
7241 return 0;
7242}
7243
7244/*
7245 * If UNIX sockets are enabled, fd passing can cause a reference cycle which
7246 * causes regular reference counting to break down. We rely on the UNIX
7247 * garbage collection to take care of this problem for us.
7248 */
7249static int io_sqe_files_scm(struct io_ring_ctx *ctx)
7250{
7251 unsigned left, total;
7252 int ret = 0;
7253
7254 total = 0;
7255 left = ctx->nr_user_files;
7256 while (left) {
7257 unsigned this_files = min_t(unsigned, left, SCM_MAX_FD);
Jens Axboe6b063142019-01-10 22:13:58 -07007258
7259 ret = __io_sqe_files_scm(ctx, this_files, total);
7260 if (ret)
7261 break;
7262 left -= this_files;
7263 total += this_files;
7264 }
7265
7266 if (!ret)
7267 return 0;
7268
7269 while (total < ctx->nr_user_files) {
Jens Axboe65e19f52019-10-26 07:20:21 -06007270 struct file *file = io_file_from_index(ctx, total);
7271
7272 if (file)
7273 fput(file);
Jens Axboe6b063142019-01-10 22:13:58 -07007274 total++;
7275 }
7276
7277 return ret;
7278}
7279#else
7280static int io_sqe_files_scm(struct io_ring_ctx *ctx)
7281{
7282 return 0;
7283}
7284#endif
7285
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007286static int io_sqe_alloc_file_tables(struct fixed_rsrc_data *file_data,
Pavel Begunkov5398ae62020-10-10 18:34:14 +01007287 unsigned nr_tables, unsigned nr_files)
Jens Axboe65e19f52019-10-26 07:20:21 -06007288{
7289 int i;
7290
7291 for (i = 0; i < nr_tables; i++) {
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007292 struct fixed_rsrc_table *table = &file_data->table[i];
Jens Axboe65e19f52019-10-26 07:20:21 -06007293 unsigned this_files;
7294
7295 this_files = min(nr_files, IORING_MAX_FILES_TABLE);
7296 table->files = kcalloc(this_files, sizeof(struct file *),
7297 GFP_KERNEL);
7298 if (!table->files)
7299 break;
7300 nr_files -= this_files;
7301 }
7302
7303 if (i == nr_tables)
7304 return 0;
7305
7306 for (i = 0; i < nr_tables; i++) {
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007307 struct fixed_rsrc_table *table = &file_data->table[i];
Jens Axboe65e19f52019-10-26 07:20:21 -06007308 kfree(table->files);
7309 }
7310 return 1;
7311}
7312
Bijan Mottahedeh50238532021-01-15 17:37:45 +00007313static void io_ring_file_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc)
Jens Axboec3a31e62019-10-03 13:59:56 -06007314{
Bijan Mottahedeh50238532021-01-15 17:37:45 +00007315 struct file *file = prsrc->file;
Jens Axboec3a31e62019-10-03 13:59:56 -06007316#if defined(CONFIG_UNIX)
Jens Axboec3a31e62019-10-03 13:59:56 -06007317 struct sock *sock = ctx->ring_sock->sk;
7318 struct sk_buff_head list, *head = &sock->sk_receive_queue;
7319 struct sk_buff *skb;
7320 int i;
7321
7322 __skb_queue_head_init(&list);
7323
7324 /*
7325 * Find the skb that holds this file in its SCM_RIGHTS. When found,
7326 * remove this entry and rearrange the file array.
7327 */
7328 skb = skb_dequeue(head);
7329 while (skb) {
7330 struct scm_fp_list *fp;
7331
7332 fp = UNIXCB(skb).fp;
7333 for (i = 0; i < fp->count; i++) {
7334 int left;
7335
7336 if (fp->fp[i] != file)
7337 continue;
7338
7339 unix_notinflight(fp->user, fp->fp[i]);
7340 left = fp->count - 1 - i;
7341 if (left) {
7342 memmove(&fp->fp[i], &fp->fp[i + 1],
7343 left * sizeof(struct file *));
7344 }
7345 fp->count--;
7346 if (!fp->count) {
7347 kfree_skb(skb);
7348 skb = NULL;
7349 } else {
7350 __skb_queue_tail(&list, skb);
7351 }
7352 fput(file);
7353 file = NULL;
7354 break;
7355 }
7356
7357 if (!file)
7358 break;
7359
7360 __skb_queue_tail(&list, skb);
7361
7362 skb = skb_dequeue(head);
7363 }
7364
7365 if (skb_peek(&list)) {
7366 spin_lock_irq(&head->lock);
7367 while ((skb = __skb_dequeue(&list)) != NULL)
7368 __skb_queue_tail(head, skb);
7369 spin_unlock_irq(&head->lock);
7370 }
7371#else
Jens Axboe05f3fb32019-12-09 11:22:50 -07007372 fput(file);
Jens Axboec3a31e62019-10-03 13:59:56 -06007373#endif
7374}
7375
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007376static void __io_rsrc_put_work(struct fixed_rsrc_ref_node *ref_node)
Jens Axboe05f3fb32019-12-09 11:22:50 -07007377{
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007378 struct fixed_rsrc_data *rsrc_data = ref_node->rsrc_data;
7379 struct io_ring_ctx *ctx = rsrc_data->ctx;
7380 struct io_rsrc_put *prsrc, *tmp;
Xiaoguang Wang05589552020-03-31 14:05:18 +08007381
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007382 list_for_each_entry_safe(prsrc, tmp, &ref_node->rsrc_list, list) {
7383 list_del(&prsrc->list);
Bijan Mottahedeh50238532021-01-15 17:37:45 +00007384 ref_node->rsrc_put(ctx, prsrc);
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007385 kfree(prsrc);
Jens Axboe05f3fb32019-12-09 11:22:50 -07007386 }
7387
Xiaoguang Wang05589552020-03-31 14:05:18 +08007388 percpu_ref_exit(&ref_node->refs);
7389 kfree(ref_node);
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007390 percpu_ref_put(&rsrc_data->refs);
Jens Axboe05f3fb32019-12-09 11:22:50 -07007391}
7392
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007393static void io_rsrc_put_work(struct work_struct *work)
Jens Axboe4a38aed22020-05-14 17:21:15 -06007394{
7395 struct io_ring_ctx *ctx;
7396 struct llist_node *node;
7397
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007398 ctx = container_of(work, struct io_ring_ctx, rsrc_put_work.work);
7399 node = llist_del_all(&ctx->rsrc_put_llist);
Jens Axboe4a38aed22020-05-14 17:21:15 -06007400
7401 while (node) {
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007402 struct fixed_rsrc_ref_node *ref_node;
Jens Axboe4a38aed22020-05-14 17:21:15 -06007403 struct llist_node *next = node->next;
7404
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007405 ref_node = llist_entry(node, struct fixed_rsrc_ref_node, llist);
7406 __io_rsrc_put_work(ref_node);
Jens Axboe4a38aed22020-05-14 17:21:15 -06007407 node = next;
7408 }
7409}
7410
Pavel Begunkovea64ec022021-02-04 13:52:07 +00007411static struct file **io_fixed_file_slot(struct fixed_rsrc_data *file_data,
7412 unsigned i)
Jens Axboe05f3fb32019-12-09 11:22:50 -07007413{
Pavel Begunkovea64ec022021-02-04 13:52:07 +00007414 struct fixed_rsrc_table *table;
7415
7416 table = &file_data->table[i >> IORING_FILE_TABLE_SHIFT];
7417 return &table->files[i & IORING_FILE_TABLE_MASK];
7418}
7419
Bijan Mottahedeh00835dc2021-01-15 17:37:52 +00007420static void io_rsrc_node_ref_zero(struct percpu_ref *ref)
Jens Axboe05f3fb32019-12-09 11:22:50 -07007421{
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007422 struct fixed_rsrc_ref_node *ref_node;
7423 struct fixed_rsrc_data *data;
Jens Axboe4a38aed22020-05-14 17:21:15 -06007424 struct io_ring_ctx *ctx;
Pavel Begunkove2978222020-11-18 14:56:26 +00007425 bool first_add = false;
Jens Axboe4a38aed22020-05-14 17:21:15 -06007426 int delay = HZ;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007427
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007428 ref_node = container_of(ref, struct fixed_rsrc_ref_node, refs);
7429 data = ref_node->rsrc_data;
Pavel Begunkove2978222020-11-18 14:56:26 +00007430 ctx = data->ctx;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007431
Bijan Mottahedeh2a63b2d2021-01-15 17:37:47 +00007432 io_rsrc_ref_lock(ctx);
Pavel Begunkove2978222020-11-18 14:56:26 +00007433 ref_node->done = true;
7434
Bijan Mottahedehd67d2262021-01-15 17:37:46 +00007435 while (!list_empty(&ctx->rsrc_ref_list)) {
7436 ref_node = list_first_entry(&ctx->rsrc_ref_list,
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007437 struct fixed_rsrc_ref_node, node);
Pavel Begunkove2978222020-11-18 14:56:26 +00007438 /* recycle ref nodes in order */
7439 if (!ref_node->done)
7440 break;
7441 list_del(&ref_node->node);
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007442 first_add |= llist_add(&ref_node->llist, &ctx->rsrc_put_llist);
Pavel Begunkove2978222020-11-18 14:56:26 +00007443 }
Bijan Mottahedeh2a63b2d2021-01-15 17:37:47 +00007444 io_rsrc_ref_unlock(ctx);
Pavel Begunkove2978222020-11-18 14:56:26 +00007445
7446 if (percpu_ref_is_dying(&data->refs))
Jens Axboe4a38aed22020-05-14 17:21:15 -06007447 delay = 0;
7448
Jens Axboe4a38aed22020-05-14 17:21:15 -06007449 if (!delay)
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007450 mod_delayed_work(system_wq, &ctx->rsrc_put_work, 0);
Jens Axboe4a38aed22020-05-14 17:21:15 -06007451 else if (first_add)
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007452 queue_delayed_work(system_wq, &ctx->rsrc_put_work, delay);
Xiaoguang Wang05589552020-03-31 14:05:18 +08007453}
7454
Bijan Mottahedeh68025352021-01-15 17:37:48 +00007455static struct fixed_rsrc_ref_node *alloc_fixed_rsrc_ref_node(
Xiaoguang Wang05589552020-03-31 14:05:18 +08007456 struct io_ring_ctx *ctx)
7457{
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007458 struct fixed_rsrc_ref_node *ref_node;
Xiaoguang Wang05589552020-03-31 14:05:18 +08007459
7460 ref_node = kzalloc(sizeof(*ref_node), GFP_KERNEL);
7461 if (!ref_node)
Matthew Wilcox (Oracle)3e2224c2021-01-06 16:09:26 +00007462 return NULL;
Xiaoguang Wang05589552020-03-31 14:05:18 +08007463
Bijan Mottahedeh00835dc2021-01-15 17:37:52 +00007464 if (percpu_ref_init(&ref_node->refs, io_rsrc_node_ref_zero,
Xiaoguang Wang05589552020-03-31 14:05:18 +08007465 0, GFP_KERNEL)) {
7466 kfree(ref_node);
Matthew Wilcox (Oracle)3e2224c2021-01-06 16:09:26 +00007467 return NULL;
Xiaoguang Wang05589552020-03-31 14:05:18 +08007468 }
7469 INIT_LIST_HEAD(&ref_node->node);
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007470 INIT_LIST_HEAD(&ref_node->rsrc_list);
Pavel Begunkove2978222020-11-18 14:56:26 +00007471 ref_node->done = false;
Xiaoguang Wang05589552020-03-31 14:05:18 +08007472 return ref_node;
Xiaoguang Wang05589552020-03-31 14:05:18 +08007473}
7474
Pavel Begunkovbc9744c2021-01-15 17:37:49 +00007475static void init_fixed_file_ref_node(struct io_ring_ctx *ctx,
7476 struct fixed_rsrc_ref_node *ref_node)
Bijan Mottahedeh68025352021-01-15 17:37:48 +00007477{
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007478 ref_node->rsrc_data = ctx->file_data;
Bijan Mottahedeh50238532021-01-15 17:37:45 +00007479 ref_node->rsrc_put = io_ring_file_put;
Xiaoguang Wang05589552020-03-31 14:05:18 +08007480}
7481
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007482static void destroy_fixed_rsrc_ref_node(struct fixed_rsrc_ref_node *ref_node)
Xiaoguang Wang05589552020-03-31 14:05:18 +08007483{
7484 percpu_ref_exit(&ref_node->refs);
7485 kfree(ref_node);
Jens Axboe05f3fb32019-12-09 11:22:50 -07007486}
7487
Pavel Begunkovea64ec022021-02-04 13:52:07 +00007488
Jens Axboe05f3fb32019-12-09 11:22:50 -07007489static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
7490 unsigned nr_args)
7491{
7492 __s32 __user *fds = (__s32 __user *) arg;
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01007493 unsigned nr_tables, i;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007494 struct file *file;
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01007495 int fd, ret = -ENOMEM;
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007496 struct fixed_rsrc_ref_node *ref_node;
7497 struct fixed_rsrc_data *file_data;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007498
7499 if (ctx->file_data)
7500 return -EBUSY;
7501 if (!nr_args)
7502 return -EINVAL;
7503 if (nr_args > IORING_MAX_FIXED_FILES)
7504 return -EMFILE;
7505
Bijan Mottahedeh1ad555c2021-01-15 17:37:51 +00007506 file_data = alloc_fixed_rsrc_data(ctx);
Pavel Begunkov5398ae62020-10-10 18:34:14 +01007507 if (!file_data)
Jens Axboe05f3fb32019-12-09 11:22:50 -07007508 return -ENOMEM;
Dan Carpenter13770a72021-02-01 15:23:42 +03007509 ctx->file_data = file_data;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007510
7511 nr_tables = DIV_ROUND_UP(nr_args, IORING_MAX_FILES_TABLE);
Colin Ian King035fbaf2020-10-12 15:03:41 +01007512 file_data->table = kcalloc(nr_tables, sizeof(*file_data->table),
Pavel Begunkov5398ae62020-10-10 18:34:14 +01007513 GFP_KERNEL);
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01007514 if (!file_data->table)
7515 goto out_free;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007516
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01007517 if (io_sqe_alloc_file_tables(file_data, nr_tables, nr_args))
Jens Axboe05f3fb32019-12-09 11:22:50 -07007518 goto out_free;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007519
Jens Axboe05f3fb32019-12-09 11:22:50 -07007520 for (i = 0; i < nr_args; i++, ctx->nr_user_files++) {
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01007521 if (copy_from_user(&fd, &fds[i], sizeof(fd))) {
7522 ret = -EFAULT;
7523 goto out_fput;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007524 }
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01007525 /* allow sparse sets */
7526 if (fd == -1)
7527 continue;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007528
Jens Axboe05f3fb32019-12-09 11:22:50 -07007529 file = fget(fd);
Jens Axboe05f3fb32019-12-09 11:22:50 -07007530 ret = -EBADF;
7531 if (!file)
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01007532 goto out_fput;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007533
7534 /*
7535 * Don't allow io_uring instances to be registered. If UNIX
7536 * isn't enabled, then this causes a reference cycle and this
7537 * instance can never get freed. If UNIX is enabled we'll
7538 * handle it just fine, but there's still no point in allowing
7539 * a ring fd as it doesn't support regular read/write anyway.
7540 */
7541 if (file->f_op == &io_uring_fops) {
7542 fput(file);
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01007543 goto out_fput;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007544 }
Pavel Begunkovea64ec022021-02-04 13:52:07 +00007545 *io_fixed_file_slot(file_data, i) = file;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007546 }
7547
Jens Axboe05f3fb32019-12-09 11:22:50 -07007548 ret = io_sqe_files_scm(ctx);
Xiaoguang Wang05589552020-03-31 14:05:18 +08007549 if (ret) {
Jens Axboe05f3fb32019-12-09 11:22:50 -07007550 io_sqe_files_unregister(ctx);
Xiaoguang Wang05589552020-03-31 14:05:18 +08007551 return ret;
7552 }
Jens Axboe05f3fb32019-12-09 11:22:50 -07007553
Pavel Begunkovbc9744c2021-01-15 17:37:49 +00007554 ref_node = alloc_fixed_rsrc_ref_node(ctx);
Matthew Wilcox (Oracle)3e2224c2021-01-06 16:09:26 +00007555 if (!ref_node) {
Xiaoguang Wang05589552020-03-31 14:05:18 +08007556 io_sqe_files_unregister(ctx);
Matthew Wilcox (Oracle)3e2224c2021-01-06 16:09:26 +00007557 return -ENOMEM;
Xiaoguang Wang05589552020-03-31 14:05:18 +08007558 }
Pavel Begunkovbc9744c2021-01-15 17:37:49 +00007559 init_fixed_file_ref_node(ctx, ref_node);
Xiaoguang Wang05589552020-03-31 14:05:18 +08007560
Bijan Mottahedehd67d2262021-01-15 17:37:46 +00007561 io_sqe_rsrc_set_node(ctx, file_data, ref_node);
Jens Axboe05f3fb32019-12-09 11:22:50 -07007562 return ret;
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01007563out_fput:
7564 for (i = 0; i < ctx->nr_user_files; i++) {
7565 file = io_file_from_index(ctx, i);
7566 if (file)
7567 fput(file);
7568 }
7569 for (i = 0; i < nr_tables; i++)
7570 kfree(file_data->table[i].files);
7571 ctx->nr_user_files = 0;
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01007572out_free:
Bijan Mottahedeh1ad555c2021-01-15 17:37:51 +00007573 free_fixed_rsrc_data(ctx->file_data);
Jens Axboe55cbc252020-10-14 07:35:57 -06007574 ctx->file_data = NULL;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007575 return ret;
7576}
7577
Jens Axboec3a31e62019-10-03 13:59:56 -06007578static int io_sqe_file_register(struct io_ring_ctx *ctx, struct file *file,
7579 int index)
7580{
7581#if defined(CONFIG_UNIX)
7582 struct sock *sock = ctx->ring_sock->sk;
7583 struct sk_buff_head *head = &sock->sk_receive_queue;
7584 struct sk_buff *skb;
7585
7586 /*
7587 * See if we can merge this file into an existing skb SCM_RIGHTS
7588 * file set. If there's no room, fall back to allocating a new skb
7589 * and filling it in.
7590 */
7591 spin_lock_irq(&head->lock);
7592 skb = skb_peek(head);
7593 if (skb) {
7594 struct scm_fp_list *fpl = UNIXCB(skb).fp;
7595
7596 if (fpl->count < SCM_MAX_FD) {
7597 __skb_unlink(skb, head);
7598 spin_unlock_irq(&head->lock);
7599 fpl->fp[fpl->count] = get_file(file);
7600 unix_inflight(fpl->user, fpl->fp[fpl->count]);
7601 fpl->count++;
7602 spin_lock_irq(&head->lock);
7603 __skb_queue_head(head, skb);
7604 } else {
7605 skb = NULL;
7606 }
7607 }
7608 spin_unlock_irq(&head->lock);
7609
7610 if (skb) {
7611 fput(file);
7612 return 0;
7613 }
7614
7615 return __io_sqe_files_scm(ctx, 1, index);
7616#else
7617 return 0;
7618#endif
7619}
7620
Bijan Mottahedeh50238532021-01-15 17:37:45 +00007621static int io_queue_rsrc_removal(struct fixed_rsrc_data *data, void *rsrc)
Jens Axboe05f3fb32019-12-09 11:22:50 -07007622{
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007623 struct io_rsrc_put *prsrc;
7624 struct fixed_rsrc_ref_node *ref_node = data->node;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007625
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007626 prsrc = kzalloc(sizeof(*prsrc), GFP_KERNEL);
7627 if (!prsrc)
Hillf Dantona5318d32020-03-23 17:47:15 +08007628 return -ENOMEM;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007629
Bijan Mottahedeh50238532021-01-15 17:37:45 +00007630 prsrc->rsrc = rsrc;
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007631 list_add(&prsrc->list, &ref_node->rsrc_list);
Xiaoguang Wang05589552020-03-31 14:05:18 +08007632
Hillf Dantona5318d32020-03-23 17:47:15 +08007633 return 0;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007634}
7635
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007636static inline int io_queue_file_removal(struct fixed_rsrc_data *data,
7637 struct file *file)
7638{
Bijan Mottahedeh50238532021-01-15 17:37:45 +00007639 return io_queue_rsrc_removal(data, (void *)file);
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007640}
7641
Jens Axboe05f3fb32019-12-09 11:22:50 -07007642static int __io_sqe_files_update(struct io_ring_ctx *ctx,
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007643 struct io_uring_rsrc_update *up,
Jens Axboe05f3fb32019-12-09 11:22:50 -07007644 unsigned nr_args)
7645{
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007646 struct fixed_rsrc_data *data = ctx->file_data;
7647 struct fixed_rsrc_ref_node *ref_node;
Pavel Begunkovea64ec022021-02-04 13:52:07 +00007648 struct file *file, **file_slot;
Jens Axboec3a31e62019-10-03 13:59:56 -06007649 __s32 __user *fds;
7650 int fd, i, err;
7651 __u32 done;
Xiaoguang Wang05589552020-03-31 14:05:18 +08007652 bool needs_switch = false;
Jens Axboec3a31e62019-10-03 13:59:56 -06007653
Jens Axboe05f3fb32019-12-09 11:22:50 -07007654 if (check_add_overflow(up->offset, nr_args, &done))
Jens Axboec3a31e62019-10-03 13:59:56 -06007655 return -EOVERFLOW;
7656 if (done > ctx->nr_user_files)
7657 return -EINVAL;
7658
Pavel Begunkovbc9744c2021-01-15 17:37:49 +00007659 ref_node = alloc_fixed_rsrc_ref_node(ctx);
Matthew Wilcox (Oracle)3e2224c2021-01-06 16:09:26 +00007660 if (!ref_node)
7661 return -ENOMEM;
Pavel Begunkovbc9744c2021-01-15 17:37:49 +00007662 init_fixed_file_ref_node(ctx, ref_node);
Xiaoguang Wang05589552020-03-31 14:05:18 +08007663
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007664 fds = u64_to_user_ptr(up->data);
Pavel Begunkov67973b92021-01-26 13:51:09 +00007665 for (done = 0; done < nr_args; done++) {
Jens Axboec3a31e62019-10-03 13:59:56 -06007666 err = 0;
7667 if (copy_from_user(&fd, &fds[done], sizeof(fd))) {
7668 err = -EFAULT;
7669 break;
7670 }
noah4e0377a2021-01-26 15:23:28 -05007671 if (fd == IORING_REGISTER_FILES_SKIP)
7672 continue;
7673
Pavel Begunkov67973b92021-01-26 13:51:09 +00007674 i = array_index_nospec(up->offset + done, ctx->nr_user_files);
Pavel Begunkovea64ec022021-02-04 13:52:07 +00007675 file_slot = io_fixed_file_slot(ctx->file_data, i);
7676
7677 if (*file_slot) {
7678 err = io_queue_file_removal(data, *file_slot);
Hillf Dantona5318d32020-03-23 17:47:15 +08007679 if (err)
7680 break;
Pavel Begunkovea64ec022021-02-04 13:52:07 +00007681 *file_slot = NULL;
Xiaoguang Wang05589552020-03-31 14:05:18 +08007682 needs_switch = true;
Jens Axboec3a31e62019-10-03 13:59:56 -06007683 }
7684 if (fd != -1) {
Jens Axboec3a31e62019-10-03 13:59:56 -06007685 file = fget(fd);
7686 if (!file) {
7687 err = -EBADF;
7688 break;
7689 }
7690 /*
7691 * Don't allow io_uring instances to be registered. If
7692 * UNIX isn't enabled, then this causes a reference
7693 * cycle and this instance can never get freed. If UNIX
7694 * is enabled we'll handle it just fine, but there's
7695 * still no point in allowing a ring fd as it doesn't
7696 * support regular read/write anyway.
7697 */
7698 if (file->f_op == &io_uring_fops) {
7699 fput(file);
7700 err = -EBADF;
7701 break;
7702 }
Jens Axboee68a3ff2021-02-11 07:45:08 -07007703 *file_slot = file;
Jens Axboec3a31e62019-10-03 13:59:56 -06007704 err = io_sqe_file_register(ctx, file, i);
Yang Yingliangf3bd9da2020-07-09 10:11:41 +00007705 if (err) {
Jens Axboee68a3ff2021-02-11 07:45:08 -07007706 *file_slot = NULL;
Yang Yingliangf3bd9da2020-07-09 10:11:41 +00007707 fput(file);
Jens Axboec3a31e62019-10-03 13:59:56 -06007708 break;
Yang Yingliangf3bd9da2020-07-09 10:11:41 +00007709 }
Jens Axboec3a31e62019-10-03 13:59:56 -06007710 }
Jens Axboe05f3fb32019-12-09 11:22:50 -07007711 }
7712
Xiaoguang Wang05589552020-03-31 14:05:18 +08007713 if (needs_switch) {
Pavel Begunkovb2e96852020-10-10 18:34:16 +01007714 percpu_ref_kill(&data->node->refs);
Bijan Mottahedehd67d2262021-01-15 17:37:46 +00007715 io_sqe_rsrc_set_node(ctx, data, ref_node);
Xiaoguang Wang05589552020-03-31 14:05:18 +08007716 } else
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007717 destroy_fixed_rsrc_ref_node(ref_node);
Jens Axboec3a31e62019-10-03 13:59:56 -06007718
7719 return done ? done : err;
7720}
Xiaoguang Wang05589552020-03-31 14:05:18 +08007721
Jens Axboe05f3fb32019-12-09 11:22:50 -07007722static int io_sqe_files_update(struct io_ring_ctx *ctx, void __user *arg,
7723 unsigned nr_args)
7724{
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007725 struct io_uring_rsrc_update up;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007726
7727 if (!ctx->file_data)
7728 return -ENXIO;
7729 if (!nr_args)
7730 return -EINVAL;
7731 if (copy_from_user(&up, arg, sizeof(up)))
7732 return -EFAULT;
7733 if (up.resv)
7734 return -EINVAL;
7735
7736 return __io_sqe_files_update(ctx, &up, nr_args);
7737}
Jens Axboec3a31e62019-10-03 13:59:56 -06007738
Pavel Begunkov5280f7e2021-02-04 13:52:08 +00007739static struct io_wq_work *io_free_work(struct io_wq_work *work)
Jens Axboe7d723062019-11-12 22:31:31 -07007740{
7741 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
7742
Pavel Begunkov5280f7e2021-02-04 13:52:08 +00007743 req = io_put_req_find_next(req);
7744 return req ? &req->work : NULL;
Jens Axboe7d723062019-11-12 22:31:31 -07007745}
7746
Jens Axboe5aa75ed2021-02-16 12:56:50 -07007747static struct io_wq *io_init_wq_offload(struct io_ring_ctx *ctx)
Pavel Begunkov24369c22020-01-28 03:15:48 +03007748{
Jens Axboee9418942021-02-19 12:33:30 -07007749 struct io_wq_hash *hash;
Pavel Begunkov24369c22020-01-28 03:15:48 +03007750 struct io_wq_data data;
Pavel Begunkov24369c22020-01-28 03:15:48 +03007751 unsigned int concurrency;
Pavel Begunkov24369c22020-01-28 03:15:48 +03007752
Jens Axboee9418942021-02-19 12:33:30 -07007753 hash = ctx->hash_map;
7754 if (!hash) {
7755 hash = kzalloc(sizeof(*hash), GFP_KERNEL);
7756 if (!hash)
7757 return ERR_PTR(-ENOMEM);
7758 refcount_set(&hash->refs, 1);
7759 init_waitqueue_head(&hash->wait);
7760 ctx->hash_map = hash;
7761 }
7762
7763 data.hash = hash;
Pavel Begunkove9fd9392020-03-04 16:14:12 +03007764 data.free_work = io_free_work;
Pavel Begunkovf5fa38c2020-06-08 21:08:20 +03007765 data.do_work = io_wq_submit_work;
Pavel Begunkov24369c22020-01-28 03:15:48 +03007766
Jens Axboed25e3a32021-02-16 11:41:41 -07007767 /* Do QD, or 4 * CPUS, whatever is smallest */
7768 concurrency = min(ctx->sq_entries, 4 * num_online_cpus());
Pavel Begunkov24369c22020-01-28 03:15:48 +03007769
Jens Axboe5aa75ed2021-02-16 12:56:50 -07007770 return io_wq_create(concurrency, &data);
Pavel Begunkov24369c22020-01-28 03:15:48 +03007771}
7772
Jens Axboe5aa75ed2021-02-16 12:56:50 -07007773static int io_uring_alloc_task_context(struct task_struct *task,
7774 struct io_ring_ctx *ctx)
Jens Axboe0f212202020-09-13 13:09:39 -06007775{
7776 struct io_uring_task *tctx;
Jens Axboed8a6df12020-10-15 16:24:45 -06007777 int ret;
Jens Axboe0f212202020-09-13 13:09:39 -06007778
7779 tctx = kmalloc(sizeof(*tctx), GFP_KERNEL);
7780 if (unlikely(!tctx))
7781 return -ENOMEM;
7782
Jens Axboed8a6df12020-10-15 16:24:45 -06007783 ret = percpu_counter_init(&tctx->inflight, 0, GFP_KERNEL);
7784 if (unlikely(ret)) {
7785 kfree(tctx);
7786 return ret;
7787 }
7788
Jens Axboe5aa75ed2021-02-16 12:56:50 -07007789 tctx->io_wq = io_init_wq_offload(ctx);
7790 if (IS_ERR(tctx->io_wq)) {
7791 ret = PTR_ERR(tctx->io_wq);
7792 percpu_counter_destroy(&tctx->inflight);
7793 kfree(tctx);
7794 return ret;
7795 }
7796
Jens Axboe0f212202020-09-13 13:09:39 -06007797 xa_init(&tctx->xa);
7798 init_waitqueue_head(&tctx->wait);
7799 tctx->last = NULL;
Jens Axboefdaf0832020-10-30 09:37:30 -06007800 atomic_set(&tctx->in_idle, 0);
Jens Axboe0f212202020-09-13 13:09:39 -06007801 task->io_uring = tctx;
Jens Axboe7cbf1722021-02-10 00:03:20 +00007802 spin_lock_init(&tctx->task_lock);
7803 INIT_WQ_LIST(&tctx->task_list);
7804 tctx->task_state = 0;
7805 init_task_work(&tctx->task_work, tctx_task_work);
Jens Axboe0f212202020-09-13 13:09:39 -06007806 return 0;
7807}
7808
7809void __io_uring_free(struct task_struct *tsk)
7810{
7811 struct io_uring_task *tctx = tsk->io_uring;
7812
7813 WARN_ON_ONCE(!xa_empty(&tctx->xa));
Pavel Begunkovef8eaa42021-02-27 11:16:45 +00007814 WARN_ON_ONCE(tctx->io_wq);
7815
Jens Axboed8a6df12020-10-15 16:24:45 -06007816 percpu_counter_destroy(&tctx->inflight);
Jens Axboe0f212202020-09-13 13:09:39 -06007817 kfree(tctx);
7818 tsk->io_uring = NULL;
7819}
7820
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02007821static int io_sq_offload_create(struct io_ring_ctx *ctx,
7822 struct io_uring_params *p)
Jens Axboe2b188cc2019-01-07 10:46:33 -07007823{
7824 int ret;
7825
Jens Axboed25e3a32021-02-16 11:41:41 -07007826 /* Retain compatibility with failing for an invalid attach attempt */
7827 if ((ctx->flags & (IORING_SETUP_ATTACH_WQ | IORING_SETUP_SQPOLL)) ==
7828 IORING_SETUP_ATTACH_WQ) {
7829 struct fd f;
7830
7831 f = fdget(p->wq_fd);
7832 if (!f.file)
7833 return -ENXIO;
7834 if (f.file->f_op != &io_uring_fops) {
7835 fdput(f);
7836 return -EINVAL;
7837 }
7838 fdput(f);
7839 }
Jens Axboe6c271ce2019-01-10 11:22:30 -07007840 if (ctx->flags & IORING_SETUP_SQPOLL) {
Jens Axboe46fe18b2021-03-04 12:39:36 -07007841 struct task_struct *tsk;
Jens Axboe534ca6d2020-09-02 13:52:19 -06007842 struct io_sq_data *sqd;
Pavel Begunkov26984fb2021-03-11 23:29:37 +00007843 bool attached;
Jens Axboe534ca6d2020-09-02 13:52:19 -06007844
Jens Axboe3ec482d2019-04-08 10:51:01 -06007845 ret = -EPERM;
Jens Axboece59fc62020-09-02 13:28:09 -06007846 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_NICE))
Jens Axboe3ec482d2019-04-08 10:51:01 -06007847 goto err;
7848
Pavel Begunkov26984fb2021-03-11 23:29:37 +00007849 sqd = io_get_sq_data(p, &attached);
Jens Axboe534ca6d2020-09-02 13:52:19 -06007850 if (IS_ERR(sqd)) {
7851 ret = PTR_ERR(sqd);
7852 goto err;
7853 }
Jens Axboe69fb2132020-09-14 11:16:23 -06007854
Stefan Metzmacher7c30f36a2021-03-07 11:54:28 +01007855 ctx->sq_creds = get_current_cred();
Jens Axboe534ca6d2020-09-02 13:52:19 -06007856 ctx->sq_data = sqd;
Jens Axboe917257d2019-04-13 09:28:55 -06007857 ctx->sq_thread_idle = msecs_to_jiffies(p->sq_thread_idle);
7858 if (!ctx->sq_thread_idle)
7859 ctx->sq_thread_idle = HZ;
7860
Pavel Begunkov26984fb2021-03-11 23:29:37 +00007861 ret = 0;
Pavel Begunkov78d7f6b2021-03-10 13:13:53 +00007862 io_sq_thread_park(sqd);
Pavel Begunkov26984fb2021-03-11 23:29:37 +00007863 /* don't attach to a dying SQPOLL thread, would be racy */
7864 if (attached && !sqd->thread) {
7865 ret = -ENXIO;
7866 } else {
7867 list_add(&ctx->sqd_list, &sqd->ctx_list);
7868 io_sqd_update_thread_idle(sqd);
7869 }
Pavel Begunkov78d7f6b2021-03-10 13:13:53 +00007870 io_sq_thread_unpark(sqd);
7871
Pavel Begunkov26984fb2021-03-11 23:29:37 +00007872 if (ret < 0) {
7873 io_put_sq_data(sqd);
7874 ctx->sq_data = NULL;
7875 return ret;
7876 } else if (attached) {
Jens Axboe5aa75ed2021-02-16 12:56:50 -07007877 return 0;
Pavel Begunkov26984fb2021-03-11 23:29:37 +00007878 }
Jens Axboeaa061652020-09-02 14:50:27 -06007879
Jens Axboe6c271ce2019-01-10 11:22:30 -07007880 if (p->flags & IORING_SETUP_SQ_AFF) {
Jens Axboe44a9bd12019-05-14 20:00:30 -06007881 int cpu = p->sq_thread_cpu;
Jens Axboe6c271ce2019-01-10 11:22:30 -07007882
Jens Axboe917257d2019-04-13 09:28:55 -06007883 ret = -EINVAL;
Jens Axboe44a9bd12019-05-14 20:00:30 -06007884 if (cpu >= nr_cpu_ids)
Jens Axboee8f98f242021-03-09 16:32:13 -07007885 goto err_sqpoll;
Shenghui Wang7889f442019-05-07 16:03:19 +08007886 if (!cpu_online(cpu))
Jens Axboee8f98f242021-03-09 16:32:13 -07007887 goto err_sqpoll;
Jens Axboe917257d2019-04-13 09:28:55 -06007888
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007889 sqd->sq_cpu = cpu;
Jens Axboe6c271ce2019-01-10 11:22:30 -07007890 } else {
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007891 sqd->sq_cpu = -1;
Jens Axboe6c271ce2019-01-10 11:22:30 -07007892 }
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007893
7894 sqd->task_pid = current->pid;
Jens Axboe5c2469e2021-03-11 10:17:56 -07007895 sqd->task_tgid = current->tgid;
Jens Axboe46fe18b2021-03-04 12:39:36 -07007896 tsk = create_io_thread(io_sq_thread, sqd, NUMA_NO_NODE);
7897 if (IS_ERR(tsk)) {
7898 ret = PTR_ERR(tsk);
Jens Axboee8f98f242021-03-09 16:32:13 -07007899 goto err_sqpoll;
Jens Axboe6c271ce2019-01-10 11:22:30 -07007900 }
Pavel Begunkov97a73a02021-03-08 17:30:54 +00007901
Jens Axboe46fe18b2021-03-04 12:39:36 -07007902 sqd->thread = tsk;
Pavel Begunkov97a73a02021-03-08 17:30:54 +00007903 ret = io_uring_alloc_task_context(tsk, ctx);
Jens Axboe46fe18b2021-03-04 12:39:36 -07007904 wake_up_new_task(tsk);
Jens Axboe0f212202020-09-13 13:09:39 -06007905 if (ret)
7906 goto err;
Jens Axboe6c271ce2019-01-10 11:22:30 -07007907 } else if (p->flags & IORING_SETUP_SQ_AFF) {
7908 /* Can't have SQ_AFF without SQPOLL */
7909 ret = -EINVAL;
7910 goto err;
7911 }
7912
Jens Axboe2b188cc2019-01-07 10:46:33 -07007913 return 0;
7914err:
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007915 io_sq_thread_finish(ctx);
Jens Axboe2b188cc2019-01-07 10:46:33 -07007916 return ret;
Jens Axboee8f98f242021-03-09 16:32:13 -07007917err_sqpoll:
7918 complete(&ctx->sq_data->exited);
7919 goto err;
Jens Axboe2b188cc2019-01-07 10:46:33 -07007920}
7921
Bijan Mottahedeha087e2b2020-06-16 16:36:07 -07007922static inline void __io_unaccount_mem(struct user_struct *user,
7923 unsigned long nr_pages)
Jens Axboe2b188cc2019-01-07 10:46:33 -07007924{
7925 atomic_long_sub(nr_pages, &user->locked_vm);
7926}
7927
Bijan Mottahedeha087e2b2020-06-16 16:36:07 -07007928static inline int __io_account_mem(struct user_struct *user,
7929 unsigned long nr_pages)
Jens Axboe2b188cc2019-01-07 10:46:33 -07007930{
7931 unsigned long page_limit, cur_pages, new_pages;
7932
7933 /* Don't allow more pages than we can safely lock */
7934 page_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
7935
7936 do {
7937 cur_pages = atomic_long_read(&user->locked_vm);
7938 new_pages = cur_pages + nr_pages;
7939 if (new_pages > page_limit)
7940 return -ENOMEM;
7941 } while (atomic_long_cmpxchg(&user->locked_vm, cur_pages,
7942 new_pages) != cur_pages);
7943
7944 return 0;
7945}
7946
Jens Axboe26bfa89e2021-02-09 20:14:12 -07007947static void io_unaccount_mem(struct io_ring_ctx *ctx, unsigned long nr_pages)
Bijan Mottahedeha087e2b2020-06-16 16:36:07 -07007948{
Jens Axboe62e398b2021-02-21 16:19:37 -07007949 if (ctx->user)
Bijan Mottahedeha087e2b2020-06-16 16:36:07 -07007950 __io_unaccount_mem(ctx->user, nr_pages);
Bijan Mottahedeh30975822020-06-16 16:36:09 -07007951
Jens Axboe26bfa89e2021-02-09 20:14:12 -07007952 if (ctx->mm_account)
7953 atomic64_sub(nr_pages, &ctx->mm_account->pinned_vm);
Bijan Mottahedeha087e2b2020-06-16 16:36:07 -07007954}
7955
Jens Axboe26bfa89e2021-02-09 20:14:12 -07007956static int io_account_mem(struct io_ring_ctx *ctx, unsigned long nr_pages)
Bijan Mottahedeha087e2b2020-06-16 16:36:07 -07007957{
Bijan Mottahedeh30975822020-06-16 16:36:09 -07007958 int ret;
7959
Jens Axboe62e398b2021-02-21 16:19:37 -07007960 if (ctx->user) {
Bijan Mottahedeh30975822020-06-16 16:36:09 -07007961 ret = __io_account_mem(ctx->user, nr_pages);
7962 if (ret)
7963 return ret;
7964 }
7965
Jens Axboe26bfa89e2021-02-09 20:14:12 -07007966 if (ctx->mm_account)
7967 atomic64_add(nr_pages, &ctx->mm_account->pinned_vm);
Bijan Mottahedeha087e2b2020-06-16 16:36:07 -07007968
7969 return 0;
7970}
7971
Jens Axboe2b188cc2019-01-07 10:46:33 -07007972static void io_mem_free(void *ptr)
7973{
Mark Rutland52e04ef2019-04-30 17:30:21 +01007974 struct page *page;
Jens Axboe2b188cc2019-01-07 10:46:33 -07007975
Mark Rutland52e04ef2019-04-30 17:30:21 +01007976 if (!ptr)
7977 return;
7978
7979 page = virt_to_head_page(ptr);
Jens Axboe2b188cc2019-01-07 10:46:33 -07007980 if (put_page_testzero(page))
7981 free_compound_page(page);
7982}
7983
7984static void *io_mem_alloc(size_t size)
7985{
7986 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN | __GFP_COMP |
Jens Axboe26bfa89e2021-02-09 20:14:12 -07007987 __GFP_NORETRY | __GFP_ACCOUNT;
Jens Axboe2b188cc2019-01-07 10:46:33 -07007988
7989 return (void *) __get_free_pages(gfp_flags, get_order(size));
7990}
7991
Hristo Venev75b28af2019-08-26 17:23:46 +00007992static unsigned long rings_size(unsigned sq_entries, unsigned cq_entries,
7993 size_t *sq_offset)
7994{
7995 struct io_rings *rings;
7996 size_t off, sq_array_size;
7997
7998 off = struct_size(rings, cqes, cq_entries);
7999 if (off == SIZE_MAX)
8000 return SIZE_MAX;
8001
8002#ifdef CONFIG_SMP
8003 off = ALIGN(off, SMP_CACHE_BYTES);
8004 if (off == 0)
8005 return SIZE_MAX;
8006#endif
8007
Dmitry Vyukovb36200f2020-07-11 11:31:11 +02008008 if (sq_offset)
8009 *sq_offset = off;
8010
Hristo Venev75b28af2019-08-26 17:23:46 +00008011 sq_array_size = array_size(sizeof(u32), sq_entries);
8012 if (sq_array_size == SIZE_MAX)
8013 return SIZE_MAX;
8014
8015 if (check_add_overflow(off, sq_array_size, &off))
8016 return SIZE_MAX;
8017
Hristo Venev75b28af2019-08-26 17:23:46 +00008018 return off;
8019}
8020
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008021static int io_sqe_buffers_unregister(struct io_ring_ctx *ctx)
Jens Axboeedafcce2019-01-09 09:16:05 -07008022{
8023 int i, j;
8024
8025 if (!ctx->user_bufs)
8026 return -ENXIO;
8027
8028 for (i = 0; i < ctx->nr_user_bufs; i++) {
8029 struct io_mapped_ubuf *imu = &ctx->user_bufs[i];
8030
8031 for (j = 0; j < imu->nr_bvecs; j++)
John Hubbardf1f6a7d2020-01-30 22:13:35 -08008032 unpin_user_page(imu->bvec[j].bv_page);
Jens Axboeedafcce2019-01-09 09:16:05 -07008033
Jens Axboede293932020-09-17 16:19:16 -06008034 if (imu->acct_pages)
Jens Axboe26bfa89e2021-02-09 20:14:12 -07008035 io_unaccount_mem(ctx, imu->acct_pages);
Mark Rutlandd4ef6472019-05-01 16:59:16 +01008036 kvfree(imu->bvec);
Jens Axboeedafcce2019-01-09 09:16:05 -07008037 imu->nr_bvecs = 0;
8038 }
8039
8040 kfree(ctx->user_bufs);
8041 ctx->user_bufs = NULL;
8042 ctx->nr_user_bufs = 0;
8043 return 0;
8044}
8045
8046static int io_copy_iov(struct io_ring_ctx *ctx, struct iovec *dst,
8047 void __user *arg, unsigned index)
8048{
8049 struct iovec __user *src;
8050
8051#ifdef CONFIG_COMPAT
8052 if (ctx->compat) {
8053 struct compat_iovec __user *ciovs;
8054 struct compat_iovec ciov;
8055
8056 ciovs = (struct compat_iovec __user *) arg;
8057 if (copy_from_user(&ciov, &ciovs[index], sizeof(ciov)))
8058 return -EFAULT;
8059
Jens Axboed55e5f52019-12-11 16:12:15 -07008060 dst->iov_base = u64_to_user_ptr((u64)ciov.iov_base);
Jens Axboeedafcce2019-01-09 09:16:05 -07008061 dst->iov_len = ciov.iov_len;
8062 return 0;
8063 }
8064#endif
8065 src = (struct iovec __user *) arg;
8066 if (copy_from_user(dst, &src[index], sizeof(*dst)))
8067 return -EFAULT;
8068 return 0;
8069}
8070
Jens Axboede293932020-09-17 16:19:16 -06008071/*
8072 * Not super efficient, but this is just a registration time. And we do cache
8073 * the last compound head, so generally we'll only do a full search if we don't
8074 * match that one.
8075 *
8076 * We check if the given compound head page has already been accounted, to
8077 * avoid double accounting it. This allows us to account the full size of the
8078 * page, not just the constituent pages of a huge page.
8079 */
8080static bool headpage_already_acct(struct io_ring_ctx *ctx, struct page **pages,
8081 int nr_pages, struct page *hpage)
8082{
8083 int i, j;
8084
8085 /* check current page array */
8086 for (i = 0; i < nr_pages; i++) {
8087 if (!PageCompound(pages[i]))
8088 continue;
8089 if (compound_head(pages[i]) == hpage)
8090 return true;
8091 }
8092
8093 /* check previously registered pages */
8094 for (i = 0; i < ctx->nr_user_bufs; i++) {
8095 struct io_mapped_ubuf *imu = &ctx->user_bufs[i];
8096
8097 for (j = 0; j < imu->nr_bvecs; j++) {
8098 if (!PageCompound(imu->bvec[j].bv_page))
8099 continue;
8100 if (compound_head(imu->bvec[j].bv_page) == hpage)
8101 return true;
8102 }
8103 }
8104
8105 return false;
8106}
8107
8108static int io_buffer_account_pin(struct io_ring_ctx *ctx, struct page **pages,
8109 int nr_pages, struct io_mapped_ubuf *imu,
8110 struct page **last_hpage)
8111{
8112 int i, ret;
8113
8114 for (i = 0; i < nr_pages; i++) {
8115 if (!PageCompound(pages[i])) {
8116 imu->acct_pages++;
8117 } else {
8118 struct page *hpage;
8119
8120 hpage = compound_head(pages[i]);
8121 if (hpage == *last_hpage)
8122 continue;
8123 *last_hpage = hpage;
8124 if (headpage_already_acct(ctx, pages, i, hpage))
8125 continue;
8126 imu->acct_pages += page_size(hpage) >> PAGE_SHIFT;
8127 }
8128 }
8129
8130 if (!imu->acct_pages)
8131 return 0;
8132
Jens Axboe26bfa89e2021-02-09 20:14:12 -07008133 ret = io_account_mem(ctx, imu->acct_pages);
Jens Axboede293932020-09-17 16:19:16 -06008134 if (ret)
8135 imu->acct_pages = 0;
8136 return ret;
8137}
8138
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008139static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov,
8140 struct io_mapped_ubuf *imu,
8141 struct page **last_hpage)
Jens Axboeedafcce2019-01-09 09:16:05 -07008142{
8143 struct vm_area_struct **vmas = NULL;
8144 struct page **pages = NULL;
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008145 unsigned long off, start, end, ubuf;
8146 size_t size;
8147 int ret, pret, nr_pages, i;
Jens Axboeedafcce2019-01-09 09:16:05 -07008148
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008149 ubuf = (unsigned long) iov->iov_base;
8150 end = (ubuf + iov->iov_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
8151 start = ubuf >> PAGE_SHIFT;
8152 nr_pages = end - start;
8153
8154 ret = -ENOMEM;
8155
8156 pages = kvmalloc_array(nr_pages, sizeof(struct page *), GFP_KERNEL);
8157 if (!pages)
8158 goto done;
8159
8160 vmas = kvmalloc_array(nr_pages, sizeof(struct vm_area_struct *),
8161 GFP_KERNEL);
8162 if (!vmas)
8163 goto done;
8164
8165 imu->bvec = kvmalloc_array(nr_pages, sizeof(struct bio_vec),
8166 GFP_KERNEL);
8167 if (!imu->bvec)
8168 goto done;
8169
8170 ret = 0;
8171 mmap_read_lock(current->mm);
8172 pret = pin_user_pages(ubuf, nr_pages, FOLL_WRITE | FOLL_LONGTERM,
8173 pages, vmas);
8174 if (pret == nr_pages) {
8175 /* don't support file backed memory */
8176 for (i = 0; i < nr_pages; i++) {
8177 struct vm_area_struct *vma = vmas[i];
8178
8179 if (vma->vm_file &&
8180 !is_file_hugepages(vma->vm_file)) {
8181 ret = -EOPNOTSUPP;
8182 break;
8183 }
8184 }
8185 } else {
8186 ret = pret < 0 ? pret : -EFAULT;
8187 }
8188 mmap_read_unlock(current->mm);
8189 if (ret) {
8190 /*
8191 * if we did partial map, or found file backed vmas,
8192 * release any pages we did get
8193 */
8194 if (pret > 0)
8195 unpin_user_pages(pages, pret);
8196 kvfree(imu->bvec);
8197 goto done;
8198 }
8199
8200 ret = io_buffer_account_pin(ctx, pages, pret, imu, last_hpage);
8201 if (ret) {
8202 unpin_user_pages(pages, pret);
8203 kvfree(imu->bvec);
8204 goto done;
8205 }
8206
8207 off = ubuf & ~PAGE_MASK;
8208 size = iov->iov_len;
8209 for (i = 0; i < nr_pages; i++) {
8210 size_t vec_len;
8211
8212 vec_len = min_t(size_t, size, PAGE_SIZE - off);
8213 imu->bvec[i].bv_page = pages[i];
8214 imu->bvec[i].bv_len = vec_len;
8215 imu->bvec[i].bv_offset = off;
8216 off = 0;
8217 size -= vec_len;
8218 }
8219 /* store original address for later verification */
8220 imu->ubuf = ubuf;
8221 imu->len = iov->iov_len;
8222 imu->nr_bvecs = nr_pages;
8223 ret = 0;
8224done:
8225 kvfree(pages);
8226 kvfree(vmas);
8227 return ret;
8228}
8229
Bijan Mottahedeh2b358602021-01-06 12:39:11 -08008230static int io_buffers_map_alloc(struct io_ring_ctx *ctx, unsigned int nr_args)
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008231{
Jens Axboeedafcce2019-01-09 09:16:05 -07008232 if (ctx->user_bufs)
8233 return -EBUSY;
8234 if (!nr_args || nr_args > UIO_MAXIOV)
8235 return -EINVAL;
8236
8237 ctx->user_bufs = kcalloc(nr_args, sizeof(struct io_mapped_ubuf),
8238 GFP_KERNEL);
8239 if (!ctx->user_bufs)
8240 return -ENOMEM;
8241
Bijan Mottahedeh2b358602021-01-06 12:39:11 -08008242 return 0;
8243}
8244
8245static int io_buffer_validate(struct iovec *iov)
8246{
8247 /*
8248 * Don't impose further limits on the size and buffer
8249 * constraints here, we'll -EINVAL later when IO is
8250 * submitted if they are wrong.
8251 */
8252 if (!iov->iov_base || !iov->iov_len)
8253 return -EFAULT;
8254
8255 /* arbitrary limit, but we need something */
8256 if (iov->iov_len > SZ_1G)
8257 return -EFAULT;
8258
8259 return 0;
8260}
8261
8262static int io_sqe_buffers_register(struct io_ring_ctx *ctx, void __user *arg,
8263 unsigned int nr_args)
8264{
8265 int i, ret;
8266 struct iovec iov;
8267 struct page *last_hpage = NULL;
8268
8269 ret = io_buffers_map_alloc(ctx, nr_args);
8270 if (ret)
8271 return ret;
8272
Jens Axboeedafcce2019-01-09 09:16:05 -07008273 for (i = 0; i < nr_args; i++) {
8274 struct io_mapped_ubuf *imu = &ctx->user_bufs[i];
Jens Axboeedafcce2019-01-09 09:16:05 -07008275
8276 ret = io_copy_iov(ctx, &iov, arg, i);
8277 if (ret)
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008278 break;
Jens Axboeedafcce2019-01-09 09:16:05 -07008279
Bijan Mottahedeh2b358602021-01-06 12:39:11 -08008280 ret = io_buffer_validate(&iov);
8281 if (ret)
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008282 break;
Jens Axboeedafcce2019-01-09 09:16:05 -07008283
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008284 ret = io_sqe_buffer_register(ctx, &iov, imu, &last_hpage);
8285 if (ret)
8286 break;
Jens Axboeedafcce2019-01-09 09:16:05 -07008287
8288 ctx->nr_user_bufs++;
8289 }
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008290
8291 if (ret)
8292 io_sqe_buffers_unregister(ctx);
8293
Jens Axboeedafcce2019-01-09 09:16:05 -07008294 return ret;
8295}
8296
Jens Axboe9b402842019-04-11 11:45:41 -06008297static int io_eventfd_register(struct io_ring_ctx *ctx, void __user *arg)
8298{
8299 __s32 __user *fds = arg;
8300 int fd;
8301
8302 if (ctx->cq_ev_fd)
8303 return -EBUSY;
8304
8305 if (copy_from_user(&fd, fds, sizeof(*fds)))
8306 return -EFAULT;
8307
8308 ctx->cq_ev_fd = eventfd_ctx_fdget(fd);
8309 if (IS_ERR(ctx->cq_ev_fd)) {
8310 int ret = PTR_ERR(ctx->cq_ev_fd);
8311 ctx->cq_ev_fd = NULL;
8312 return ret;
8313 }
8314
8315 return 0;
8316}
8317
8318static int io_eventfd_unregister(struct io_ring_ctx *ctx)
8319{
8320 if (ctx->cq_ev_fd) {
8321 eventfd_ctx_put(ctx->cq_ev_fd);
8322 ctx->cq_ev_fd = NULL;
8323 return 0;
8324 }
8325
8326 return -ENXIO;
8327}
8328
Jens Axboe5a2e7452020-02-23 16:23:11 -07008329static void io_destroy_buffers(struct io_ring_ctx *ctx)
8330{
Jens Axboe9e15c3a2021-03-13 12:29:43 -07008331 struct io_buffer *buf;
8332 unsigned long index;
8333
8334 xa_for_each(&ctx->io_buffers, index, buf)
8335 __io_remove_buffers(ctx, buf, index, -1U);
Jens Axboe5a2e7452020-02-23 16:23:11 -07008336}
8337
Jens Axboe68e68ee2021-02-13 09:00:02 -07008338static void io_req_cache_free(struct list_head *list, struct task_struct *tsk)
Jens Axboe1b4c3512021-02-10 00:03:19 +00008339{
Jens Axboe68e68ee2021-02-13 09:00:02 -07008340 struct io_kiocb *req, *nxt;
Jens Axboe1b4c3512021-02-10 00:03:19 +00008341
Jens Axboe68e68ee2021-02-13 09:00:02 -07008342 list_for_each_entry_safe(req, nxt, list, compl.list) {
8343 if (tsk && req->task != tsk)
8344 continue;
Jens Axboe1b4c3512021-02-10 00:03:19 +00008345 list_del(&req->compl.list);
8346 kmem_cache_free(req_cachep, req);
8347 }
8348}
8349
Jens Axboe4010fec2021-02-27 15:04:18 -07008350static void io_req_caches_free(struct io_ring_ctx *ctx)
Jens Axboe2b188cc2019-01-07 10:46:33 -07008351{
Pavel Begunkovbf019da2021-02-10 00:03:17 +00008352 struct io_submit_state *submit_state = &ctx->submit_state;
Pavel Begunkove5547d22021-02-23 22:17:20 +00008353 struct io_comp_state *cs = &ctx->submit_state.comp;
Pavel Begunkovbf019da2021-02-10 00:03:17 +00008354
Jens Axboe9a4fdbd2021-02-13 09:09:44 -07008355 mutex_lock(&ctx->uring_lock);
8356
Pavel Begunkov8e5c66c2021-02-22 11:45:55 +00008357 if (submit_state->free_reqs) {
Jens Axboe9a4fdbd2021-02-13 09:09:44 -07008358 kmem_cache_free_bulk(req_cachep, submit_state->free_reqs,
8359 submit_state->reqs);
Pavel Begunkov8e5c66c2021-02-22 11:45:55 +00008360 submit_state->free_reqs = 0;
8361 }
Jens Axboe9a4fdbd2021-02-13 09:09:44 -07008362
8363 spin_lock_irq(&ctx->completion_lock);
Pavel Begunkove5547d22021-02-23 22:17:20 +00008364 list_splice_init(&cs->locked_free_list, &cs->free_list);
8365 cs->locked_free_nr = 0;
Jens Axboe9a4fdbd2021-02-13 09:09:44 -07008366 spin_unlock_irq(&ctx->completion_lock);
8367
Pavel Begunkove5547d22021-02-23 22:17:20 +00008368 io_req_cache_free(&cs->free_list, NULL);
8369
Jens Axboe9a4fdbd2021-02-13 09:09:44 -07008370 mutex_unlock(&ctx->uring_lock);
8371}
8372
Jens Axboe2b188cc2019-01-07 10:46:33 -07008373static void io_ring_ctx_free(struct io_ring_ctx *ctx)
8374{
Pavel Begunkov04fc6c82021-02-12 03:23:54 +00008375 /*
8376 * Some may use context even when all refs and requests have been put,
8377 * and they are free to do so while still holding uring_lock, see
8378 * __io_req_task_submit(). Wait for them to finish.
8379 */
8380 mutex_lock(&ctx->uring_lock);
8381 mutex_unlock(&ctx->uring_lock);
8382
Jens Axboe37d1e2e2021-02-17 21:03:43 -07008383 io_sq_thread_finish(ctx);
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008384 io_sqe_buffers_unregister(ctx);
Jens Axboe2aede0e2020-09-14 10:45:53 -06008385
Jens Axboe37d1e2e2021-02-17 21:03:43 -07008386 if (ctx->mm_account) {
Jens Axboe2aede0e2020-09-14 10:45:53 -06008387 mmdrop(ctx->mm_account);
8388 ctx->mm_account = NULL;
Bijan Mottahedeh30975822020-06-16 16:36:09 -07008389 }
Jens Axboedef596e2019-01-09 08:59:42 -07008390
Hao Xu8bad28d2021-02-19 17:19:36 +08008391 mutex_lock(&ctx->uring_lock);
Jens Axboe6b063142019-01-10 22:13:58 -07008392 io_sqe_files_unregister(ctx);
Hao Xu8bad28d2021-02-19 17:19:36 +08008393 mutex_unlock(&ctx->uring_lock);
Jens Axboe9b402842019-04-11 11:45:41 -06008394 io_eventfd_unregister(ctx);
Jens Axboe5a2e7452020-02-23 16:23:11 -07008395 io_destroy_buffers(ctx);
Jens Axboedef596e2019-01-09 08:59:42 -07008396
Jens Axboe2b188cc2019-01-07 10:46:33 -07008397#if defined(CONFIG_UNIX)
Eric Biggers355e8d22019-06-12 14:58:43 -07008398 if (ctx->ring_sock) {
8399 ctx->ring_sock->file = NULL; /* so that iput() is called */
Jens Axboe2b188cc2019-01-07 10:46:33 -07008400 sock_release(ctx->ring_sock);
Eric Biggers355e8d22019-06-12 14:58:43 -07008401 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07008402#endif
8403
Hristo Venev75b28af2019-08-26 17:23:46 +00008404 io_mem_free(ctx->rings);
Jens Axboe2b188cc2019-01-07 10:46:33 -07008405 io_mem_free(ctx->sq_sqes);
Jens Axboe2b188cc2019-01-07 10:46:33 -07008406
8407 percpu_ref_exit(&ctx->refs);
Jens Axboe2b188cc2019-01-07 10:46:33 -07008408 free_uid(ctx->user);
Jens Axboe4010fec2021-02-27 15:04:18 -07008409 io_req_caches_free(ctx);
Jens Axboee9418942021-02-19 12:33:30 -07008410 if (ctx->hash_map)
8411 io_wq_put_hash(ctx->hash_map);
Jens Axboe78076bb2019-12-04 19:56:40 -07008412 kfree(ctx->cancel_hash);
Jens Axboe2b188cc2019-01-07 10:46:33 -07008413 kfree(ctx);
8414}
8415
8416static __poll_t io_uring_poll(struct file *file, poll_table *wait)
8417{
8418 struct io_ring_ctx *ctx = file->private_data;
8419 __poll_t mask = 0;
8420
8421 poll_wait(file, &ctx->cq_wait, wait);
Stefan Bühler4f7067c2019-04-24 23:54:17 +02008422 /*
8423 * synchronizes with barrier from wq_has_sleeper call in
8424 * io_commit_cqring
8425 */
Jens Axboe2b188cc2019-01-07 10:46:33 -07008426 smp_rmb();
Jens Axboe90554202020-09-03 12:12:41 -06008427 if (!io_sqring_full(ctx))
Jens Axboe2b188cc2019-01-07 10:46:33 -07008428 mask |= EPOLLOUT | EPOLLWRNORM;
Hao Xued670c32021-02-05 16:34:21 +08008429
8430 /*
8431 * Don't flush cqring overflow list here, just do a simple check.
8432 * Otherwise there could possible be ABBA deadlock:
8433 * CPU0 CPU1
8434 * ---- ----
8435 * lock(&ctx->uring_lock);
8436 * lock(&ep->mtx);
8437 * lock(&ctx->uring_lock);
8438 * lock(&ep->mtx);
8439 *
8440 * Users may get EPOLLIN meanwhile seeing nothing in cqring, this
8441 * pushs them to do the flush.
8442 */
8443 if (io_cqring_events(ctx) || test_bit(0, &ctx->cq_check_overflow))
Jens Axboe2b188cc2019-01-07 10:46:33 -07008444 mask |= EPOLLIN | EPOLLRDNORM;
8445
8446 return mask;
8447}
8448
8449static int io_uring_fasync(int fd, struct file *file, int on)
8450{
8451 struct io_ring_ctx *ctx = file->private_data;
8452
8453 return fasync_helper(fd, file, on, &ctx->cq_fasync);
8454}
8455
Yejune Deng0bead8c2020-12-24 11:02:20 +08008456static int io_unregister_personality(struct io_ring_ctx *ctx, unsigned id)
Jens Axboe071698e2020-01-28 10:04:42 -07008457{
Jens Axboe4379bf82021-02-15 13:40:22 -07008458 const struct cred *creds;
Jens Axboe071698e2020-01-28 10:04:42 -07008459
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +00008460 creds = xa_erase(&ctx->personalities, id);
Jens Axboe4379bf82021-02-15 13:40:22 -07008461 if (creds) {
8462 put_cred(creds);
Yejune Deng0bead8c2020-12-24 11:02:20 +08008463 return 0;
Jens Axboe1e6fa522020-10-15 08:46:24 -06008464 }
Yejune Deng0bead8c2020-12-24 11:02:20 +08008465
8466 return -EINVAL;
8467}
8468
Pavel Begunkovba50a032021-02-26 15:47:56 +00008469static bool io_run_ctx_fallback(struct io_ring_ctx *ctx)
Jens Axboe7c25c0d2021-02-16 07:17:00 -07008470{
Pavel Begunkov28c47212021-02-28 22:04:54 +00008471 struct callback_head *work, *next;
Pavel Begunkovba50a032021-02-26 15:47:56 +00008472 bool executed = false;
Jens Axboe7c25c0d2021-02-16 07:17:00 -07008473
8474 do {
Pavel Begunkov28c47212021-02-28 22:04:54 +00008475 work = xchg(&ctx->exit_task_work, NULL);
Jens Axboe7c25c0d2021-02-16 07:17:00 -07008476 if (!work)
8477 break;
8478
8479 do {
8480 next = work->next;
8481 work->func(work);
8482 work = next;
8483 cond_resched();
8484 } while (work);
Pavel Begunkovba50a032021-02-26 15:47:56 +00008485 executed = true;
Jens Axboe7c25c0d2021-02-16 07:17:00 -07008486 } while (1);
Pavel Begunkovba50a032021-02-26 15:47:56 +00008487
8488 return executed;
Jens Axboe7c25c0d2021-02-16 07:17:00 -07008489}
8490
Pavel Begunkovd56d9382021-03-06 11:02:13 +00008491struct io_tctx_exit {
8492 struct callback_head task_work;
8493 struct completion completion;
Pavel Begunkovbaf186c2021-03-06 11:02:15 +00008494 struct io_ring_ctx *ctx;
Pavel Begunkovd56d9382021-03-06 11:02:13 +00008495};
8496
8497static void io_tctx_exit_cb(struct callback_head *cb)
8498{
8499 struct io_uring_task *tctx = current->io_uring;
8500 struct io_tctx_exit *work;
8501
8502 work = container_of(cb, struct io_tctx_exit, task_work);
8503 /*
8504 * When @in_idle, we're in cancellation and it's racy to remove the
8505 * node. It'll be removed by the end of cancellation, just ignore it.
8506 */
8507 if (!atomic_read(&tctx->in_idle))
Pavel Begunkovbaf186c2021-03-06 11:02:15 +00008508 io_uring_del_task_file((unsigned long)work->ctx);
Pavel Begunkovd56d9382021-03-06 11:02:13 +00008509 complete(&work->completion);
8510}
8511
Jens Axboe85faa7b2020-04-09 18:14:00 -06008512static void io_ring_exit_work(struct work_struct *work)
8513{
Pavel Begunkovd56d9382021-03-06 11:02:13 +00008514 struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx, exit_work);
Pavel Begunkovb5bb3a22021-03-06 11:02:16 +00008515 unsigned long timeout = jiffies + HZ * 60 * 5;
Pavel Begunkovd56d9382021-03-06 11:02:13 +00008516 struct io_tctx_exit exit;
8517 struct io_tctx_node *node;
8518 int ret;
Jens Axboe85faa7b2020-04-09 18:14:00 -06008519
Jens Axboe56952e92020-06-17 15:00:04 -06008520 /*
8521 * If we're doing polled IO and end up having requests being
8522 * submitted async (out-of-line), then completions can come in while
8523 * we're waiting for refs to drop. We need to reap these manually,
8524 * as nobody else will be looking for them.
8525 */
Pavel Begunkovb2edc0a2020-07-07 16:36:22 +03008526 do {
Pavel Begunkov9936c7c2021-02-04 13:51:56 +00008527 io_uring_try_cancel_requests(ctx, NULL, NULL);
Pavel Begunkovb5bb3a22021-03-06 11:02:16 +00008528
8529 WARN_ON_ONCE(time_after(jiffies, timeout));
Pavel Begunkovb2edc0a2020-07-07 16:36:22 +03008530 } while (!wait_for_completion_timeout(&ctx->ref_comp, HZ/20));
Pavel Begunkovd56d9382021-03-06 11:02:13 +00008531
8532 mutex_lock(&ctx->uring_lock);
8533 while (!list_empty(&ctx->tctx_list)) {
Pavel Begunkovb5bb3a22021-03-06 11:02:16 +00008534 WARN_ON_ONCE(time_after(jiffies, timeout));
8535
Pavel Begunkovd56d9382021-03-06 11:02:13 +00008536 node = list_first_entry(&ctx->tctx_list, struct io_tctx_node,
8537 ctx_node);
Pavel Begunkovbaf186c2021-03-06 11:02:15 +00008538 exit.ctx = ctx;
Pavel Begunkovd56d9382021-03-06 11:02:13 +00008539 init_completion(&exit.completion);
8540 init_task_work(&exit.task_work, io_tctx_exit_cb);
8541 ret = task_work_add(node->task, &exit.task_work, TWA_SIGNAL);
8542 if (WARN_ON_ONCE(ret))
8543 continue;
8544 wake_up_process(node->task);
8545
8546 mutex_unlock(&ctx->uring_lock);
8547 wait_for_completion(&exit.completion);
8548 cond_resched();
8549 mutex_lock(&ctx->uring_lock);
8550 }
8551 mutex_unlock(&ctx->uring_lock);
8552
Jens Axboe85faa7b2020-04-09 18:14:00 -06008553 io_ring_ctx_free(ctx);
8554}
8555
Jens Axboe2b188cc2019-01-07 10:46:33 -07008556static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
8557{
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +00008558 unsigned long index;
8559 struct creds *creds;
8560
Jens Axboe2b188cc2019-01-07 10:46:33 -07008561 mutex_lock(&ctx->uring_lock);
8562 percpu_ref_kill(&ctx->refs);
Pavel Begunkovcda286f2020-12-17 00:24:35 +00008563 /* if force is set, the ring is going away. always drop after that */
8564 ctx->cq_overflow_flushed = 1;
Pavel Begunkov634578f2020-12-06 22:22:44 +00008565 if (ctx->rings)
Pavel Begunkov6c503152021-01-04 20:36:36 +00008566 __io_cqring_overflow_flush(ctx, true, NULL, NULL);
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +00008567 xa_for_each(&ctx->personalities, index, creds)
8568 io_unregister_personality(ctx, index);
Jens Axboe2b188cc2019-01-07 10:46:33 -07008569 mutex_unlock(&ctx->uring_lock);
8570
Pavel Begunkov6b819282020-11-06 13:00:25 +00008571 io_kill_timeouts(ctx, NULL, NULL);
8572 io_poll_remove_all(ctx, NULL, NULL);
Jens Axboe561fb042019-10-24 07:25:42 -06008573
Jens Axboe15dff282019-11-13 09:09:23 -07008574 /* if we failed setting up the ctx, we might not have any rings */
Pavel Begunkovb2edc0a2020-07-07 16:36:22 +03008575 io_iopoll_try_reap_events(ctx);
Jens Axboe309fc032020-07-10 09:13:34 -06008576
Jens Axboe85faa7b2020-04-09 18:14:00 -06008577 INIT_WORK(&ctx->exit_work, io_ring_exit_work);
Jens Axboefc666772020-08-19 11:10:51 -06008578 /*
8579 * Use system_unbound_wq to avoid spawning tons of event kworkers
8580 * if we're exiting a ton of rings at the same time. It just adds
8581 * noise and overhead, there's no discernable change in runtime
8582 * over using system_wq.
8583 */
8584 queue_work(system_unbound_wq, &ctx->exit_work);
Jens Axboe2b188cc2019-01-07 10:46:33 -07008585}
8586
8587static int io_uring_release(struct inode *inode, struct file *file)
8588{
8589 struct io_ring_ctx *ctx = file->private_data;
8590
8591 file->private_data = NULL;
8592 io_ring_ctx_wait_and_kill(ctx);
8593 return 0;
8594}
8595
Pavel Begunkovf6edbab2020-11-06 13:00:26 +00008596struct io_task_cancel {
8597 struct task_struct *task;
8598 struct files_struct *files;
8599};
Pavel Begunkov67c4d9e2020-06-15 10:24:05 +03008600
Pavel Begunkovf6edbab2020-11-06 13:00:26 +00008601static bool io_cancel_task_cb(struct io_wq_work *work, void *data)
Jens Axboeb711d4e2020-08-16 08:23:05 -07008602{
Pavel Begunkov9a472ef2020-11-05 22:31:37 +00008603 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
Pavel Begunkovf6edbab2020-11-06 13:00:26 +00008604 struct io_task_cancel *cancel = data;
Pavel Begunkov9a472ef2020-11-05 22:31:37 +00008605 bool ret;
8606
Pavel Begunkovf6edbab2020-11-06 13:00:26 +00008607 if (cancel->files && (req->flags & REQ_F_LINK_TIMEOUT)) {
Pavel Begunkov9a472ef2020-11-05 22:31:37 +00008608 unsigned long flags;
8609 struct io_ring_ctx *ctx = req->ctx;
8610
8611 /* protect against races with linked timeouts */
8612 spin_lock_irqsave(&ctx->completion_lock, flags);
Pavel Begunkovf6edbab2020-11-06 13:00:26 +00008613 ret = io_match_task(req, cancel->task, cancel->files);
Pavel Begunkov9a472ef2020-11-05 22:31:37 +00008614 spin_unlock_irqrestore(&ctx->completion_lock, flags);
8615 } else {
Pavel Begunkovf6edbab2020-11-06 13:00:26 +00008616 ret = io_match_task(req, cancel->task, cancel->files);
Pavel Begunkov9a472ef2020-11-05 22:31:37 +00008617 }
8618 return ret;
Jens Axboeb711d4e2020-08-16 08:23:05 -07008619}
8620
Pavel Begunkove1915f72021-03-11 23:29:35 +00008621static bool io_cancel_defer_files(struct io_ring_ctx *ctx,
Pavel Begunkovef9865a2020-11-05 14:06:19 +00008622 struct task_struct *task,
Pavel Begunkovb7ddce32020-09-06 00:45:14 +03008623 struct files_struct *files)
8624{
Pavel Begunkove1915f72021-03-11 23:29:35 +00008625 struct io_defer_entry *de;
Pavel Begunkovb7ddce32020-09-06 00:45:14 +03008626 LIST_HEAD(list);
8627
8628 spin_lock_irq(&ctx->completion_lock);
8629 list_for_each_entry_reverse(de, &ctx->defer_list, list) {
Pavel Begunkov08d23632020-11-06 13:00:22 +00008630 if (io_match_task(de->req, task, files)) {
Pavel Begunkovb7ddce32020-09-06 00:45:14 +03008631 list_cut_position(&list, &ctx->defer_list, &de->list);
8632 break;
8633 }
8634 }
8635 spin_unlock_irq(&ctx->completion_lock);
Pavel Begunkove1915f72021-03-11 23:29:35 +00008636 if (list_empty(&list))
8637 return false;
Pavel Begunkovb7ddce32020-09-06 00:45:14 +03008638
8639 while (!list_empty(&list)) {
8640 de = list_first_entry(&list, struct io_defer_entry, list);
8641 list_del_init(&de->list);
8642 req_set_fail_links(de->req);
8643 io_put_req(de->req);
8644 io_req_complete(de->req, -ECANCELED);
8645 kfree(de);
8646 }
Pavel Begunkove1915f72021-03-11 23:29:35 +00008647 return true;
Pavel Begunkovb7ddce32020-09-06 00:45:14 +03008648}
8649
Pavel Begunkov1b007642021-03-06 11:02:17 +00008650static bool io_cancel_ctx_cb(struct io_wq_work *work, void *data)
8651{
8652 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
8653
8654 return req->ctx == data;
8655}
8656
8657static bool io_uring_try_cancel_iowq(struct io_ring_ctx *ctx)
8658{
8659 struct io_tctx_node *node;
8660 enum io_wq_cancel cret;
8661 bool ret = false;
8662
8663 mutex_lock(&ctx->uring_lock);
8664 list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
8665 struct io_uring_task *tctx = node->task->io_uring;
8666
8667 /*
8668 * io_wq will stay alive while we hold uring_lock, because it's
8669 * killed after ctx nodes, which requires to take the lock.
8670 */
8671 if (!tctx || !tctx->io_wq)
8672 continue;
8673 cret = io_wq_cancel_cb(tctx->io_wq, io_cancel_ctx_cb, ctx, true);
8674 ret |= (cret != IO_WQ_CANCEL_NOTFOUND);
8675 }
8676 mutex_unlock(&ctx->uring_lock);
8677
8678 return ret;
8679}
8680
Pavel Begunkov9936c7c2021-02-04 13:51:56 +00008681static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
8682 struct task_struct *task,
8683 struct files_struct *files)
8684{
8685 struct io_task_cancel cancel = { .task = task, .files = files, };
Pavel Begunkov1b007642021-03-06 11:02:17 +00008686 struct io_uring_task *tctx = task ? task->io_uring : NULL;
Pavel Begunkov9936c7c2021-02-04 13:51:56 +00008687
8688 while (1) {
8689 enum io_wq_cancel cret;
8690 bool ret = false;
8691
Pavel Begunkov1b007642021-03-06 11:02:17 +00008692 if (!task) {
8693 ret |= io_uring_try_cancel_iowq(ctx);
8694 } else if (tctx && tctx->io_wq) {
8695 /*
8696 * Cancels requests of all rings, not only @ctx, but
8697 * it's fine as the task is in exit/exec.
8698 */
Jens Axboe5aa75ed2021-02-16 12:56:50 -07008699 cret = io_wq_cancel_cb(tctx->io_wq, io_cancel_task_cb,
Pavel Begunkov9936c7c2021-02-04 13:51:56 +00008700 &cancel, true);
8701 ret |= (cret != IO_WQ_CANCEL_NOTFOUND);
8702 }
8703
8704 /* SQPOLL thread does its own polling */
Jens Axboed052d1d2021-03-11 10:49:20 -07008705 if ((!(ctx->flags & IORING_SETUP_SQPOLL) && !files) ||
8706 (ctx->sq_data && ctx->sq_data->thread == current)) {
Pavel Begunkov9936c7c2021-02-04 13:51:56 +00008707 while (!list_empty_careful(&ctx->iopoll_list)) {
8708 io_iopoll_try_reap_events(ctx);
8709 ret = true;
8710 }
8711 }
8712
Pavel Begunkove1915f72021-03-11 23:29:35 +00008713 ret |= io_cancel_defer_files(ctx, task, files);
Pavel Begunkov9936c7c2021-02-04 13:51:56 +00008714 ret |= io_poll_remove_all(ctx, task, files);
8715 ret |= io_kill_timeouts(ctx, task, files);
8716 ret |= io_run_task_work();
Pavel Begunkovba50a032021-02-26 15:47:56 +00008717 ret |= io_run_ctx_fallback(ctx);
Pavel Begunkov9936c7c2021-02-04 13:51:56 +00008718 io_cqring_overflow_flush(ctx, true, task, files);
8719 if (!ret)
8720 break;
8721 cond_resched();
8722 }
8723}
8724
Pavel Begunkovca70f002021-01-26 15:28:27 +00008725static int io_uring_count_inflight(struct io_ring_ctx *ctx,
8726 struct task_struct *task,
8727 struct files_struct *files)
8728{
8729 struct io_kiocb *req;
8730 int cnt = 0;
8731
8732 spin_lock_irq(&ctx->inflight_lock);
8733 list_for_each_entry(req, &ctx->inflight_list, inflight_entry)
8734 cnt += io_match_task(req, task, files);
8735 spin_unlock_irq(&ctx->inflight_lock);
8736 return cnt;
8737}
8738
Pavel Begunkovb52fda02020-11-06 13:00:24 +00008739static void io_uring_cancel_files(struct io_ring_ctx *ctx,
Pavel Begunkovdf9923f2020-11-06 13:00:23 +00008740 struct task_struct *task,
Jens Axboefcb323c2019-10-24 12:39:47 -06008741 struct files_struct *files)
8742{
Jens Axboefcb323c2019-10-24 12:39:47 -06008743 while (!list_empty_careful(&ctx->inflight_list)) {
Xiaoguang Wangd8f1b972020-04-26 15:54:43 +08008744 DEFINE_WAIT(wait);
Pavel Begunkovca70f002021-01-26 15:28:27 +00008745 int inflight;
Jens Axboefcb323c2019-10-24 12:39:47 -06008746
Pavel Begunkovca70f002021-01-26 15:28:27 +00008747 inflight = io_uring_count_inflight(ctx, task, files);
8748 if (!inflight)
Jens Axboefcb323c2019-10-24 12:39:47 -06008749 break;
Pavel Begunkovf6edbab2020-11-06 13:00:26 +00008750
Pavel Begunkov9936c7c2021-02-04 13:51:56 +00008751 io_uring_try_cancel_requests(ctx, task, files);
Pavel Begunkovca70f002021-01-26 15:28:27 +00008752
8753 prepare_to_wait(&task->io_uring->wait, &wait,
8754 TASK_UNINTERRUPTIBLE);
8755 if (inflight == io_uring_count_inflight(ctx, task, files))
8756 schedule();
Pavel Begunkovc98de082020-11-15 12:56:32 +00008757 finish_wait(&task->io_uring->wait, &wait);
Jens Axboe0f212202020-09-13 13:09:39 -06008758 }
Jens Axboe0f212202020-09-13 13:09:39 -06008759}
8760
8761/*
Jens Axboe0f212202020-09-13 13:09:39 -06008762 * Note that this task has used io_uring. We use it for cancelation purposes.
8763 */
Pavel Begunkovbaf186c2021-03-06 11:02:15 +00008764static int io_uring_add_task_file(struct io_ring_ctx *ctx)
Jens Axboe0f212202020-09-13 13:09:39 -06008765{
Matthew Wilcox (Oracle)236434c2020-10-09 13:49:52 +01008766 struct io_uring_task *tctx = current->io_uring;
Pavel Begunkov13bf43f2021-03-06 11:02:12 +00008767 struct io_tctx_node *node;
Pavel Begunkova528b042020-12-21 18:34:04 +00008768 int ret;
Matthew Wilcox (Oracle)236434c2020-10-09 13:49:52 +01008769
8770 if (unlikely(!tctx)) {
Jens Axboe5aa75ed2021-02-16 12:56:50 -07008771 ret = io_uring_alloc_task_context(current, ctx);
Jens Axboe0f212202020-09-13 13:09:39 -06008772 if (unlikely(ret))
8773 return ret;
Matthew Wilcox (Oracle)236434c2020-10-09 13:49:52 +01008774 tctx = current->io_uring;
Jens Axboe0f212202020-09-13 13:09:39 -06008775 }
Pavel Begunkovbaf186c2021-03-06 11:02:15 +00008776 if (tctx->last != ctx) {
8777 void *old = xa_load(&tctx->xa, (unsigned long)ctx);
Jens Axboe0f212202020-09-13 13:09:39 -06008778
Matthew Wilcox (Oracle)236434c2020-10-09 13:49:52 +01008779 if (!old) {
Pavel Begunkov13bf43f2021-03-06 11:02:12 +00008780 node = kmalloc(sizeof(*node), GFP_KERNEL);
8781 if (!node)
8782 return -ENOMEM;
8783 node->ctx = ctx;
Pavel Begunkov13bf43f2021-03-06 11:02:12 +00008784 node->task = current;
8785
Pavel Begunkovbaf186c2021-03-06 11:02:15 +00008786 ret = xa_err(xa_store(&tctx->xa, (unsigned long)ctx,
Pavel Begunkov13bf43f2021-03-06 11:02:12 +00008787 node, GFP_KERNEL));
Pavel Begunkova528b042020-12-21 18:34:04 +00008788 if (ret) {
Pavel Begunkov13bf43f2021-03-06 11:02:12 +00008789 kfree(node);
Pavel Begunkova528b042020-12-21 18:34:04 +00008790 return ret;
8791 }
Pavel Begunkov13bf43f2021-03-06 11:02:12 +00008792
8793 mutex_lock(&ctx->uring_lock);
8794 list_add(&node->ctx_node, &ctx->tctx_list);
8795 mutex_unlock(&ctx->uring_lock);
Jens Axboe0f212202020-09-13 13:09:39 -06008796 }
Pavel Begunkovbaf186c2021-03-06 11:02:15 +00008797 tctx->last = ctx;
Jens Axboe0f212202020-09-13 13:09:39 -06008798 }
Jens Axboe0f212202020-09-13 13:09:39 -06008799 return 0;
8800}
8801
8802/*
8803 * Remove this io_uring_file -> task mapping.
8804 */
Pavel Begunkov29412672021-03-06 11:02:11 +00008805static void io_uring_del_task_file(unsigned long index)
Jens Axboe0f212202020-09-13 13:09:39 -06008806{
8807 struct io_uring_task *tctx = current->io_uring;
Pavel Begunkov13bf43f2021-03-06 11:02:12 +00008808 struct io_tctx_node *node;
Pavel Begunkov29412672021-03-06 11:02:11 +00008809
Pavel Begunkoveebd2e32021-03-06 11:02:14 +00008810 if (!tctx)
8811 return;
Pavel Begunkov13bf43f2021-03-06 11:02:12 +00008812 node = xa_erase(&tctx->xa, index);
8813 if (!node)
Pavel Begunkov29412672021-03-06 11:02:11 +00008814 return;
Jens Axboe0f212202020-09-13 13:09:39 -06008815
Pavel Begunkov13bf43f2021-03-06 11:02:12 +00008816 WARN_ON_ONCE(current != node->task);
8817 WARN_ON_ONCE(list_empty(&node->ctx_node));
8818
8819 mutex_lock(&node->ctx->uring_lock);
8820 list_del(&node->ctx_node);
8821 mutex_unlock(&node->ctx->uring_lock);
8822
Pavel Begunkovbaf186c2021-03-06 11:02:15 +00008823 if (tctx->last == node->ctx)
Jens Axboe0f212202020-09-13 13:09:39 -06008824 tctx->last = NULL;
Pavel Begunkov13bf43f2021-03-06 11:02:12 +00008825 kfree(node);
Jens Axboe0f212202020-09-13 13:09:39 -06008826}
8827
Pavel Begunkov8452d4a2021-02-27 11:16:46 +00008828static void io_uring_clean_tctx(struct io_uring_task *tctx)
Pavel Begunkovde7f1d92021-01-04 20:43:29 +00008829{
Pavel Begunkov13bf43f2021-03-06 11:02:12 +00008830 struct io_tctx_node *node;
Pavel Begunkovde7f1d92021-01-04 20:43:29 +00008831 unsigned long index;
8832
Pavel Begunkov13bf43f2021-03-06 11:02:12 +00008833 xa_for_each(&tctx->xa, index, node)
Pavel Begunkov29412672021-03-06 11:02:11 +00008834 io_uring_del_task_file(index);
Pavel Begunkov8452d4a2021-02-27 11:16:46 +00008835 if (tctx->io_wq) {
8836 io_wq_put_and_exit(tctx->io_wq);
8837 tctx->io_wq = NULL;
8838 }
Pavel Begunkovde7f1d92021-01-04 20:43:29 +00008839}
8840
Pavel Begunkov521d6a72021-03-11 23:29:38 +00008841static s64 tctx_inflight(struct io_uring_task *tctx)
8842{
8843 return percpu_counter_sum(&tctx->inflight);
8844}
8845
8846static void io_sqpoll_cancel_cb(struct callback_head *cb)
8847{
8848 struct io_tctx_exit *work = container_of(cb, struct io_tctx_exit, task_work);
8849 struct io_ring_ctx *ctx = work->ctx;
8850 struct io_sq_data *sqd = ctx->sq_data;
8851
8852 if (sqd->thread)
8853 io_uring_cancel_sqpoll(ctx);
8854 complete(&work->completion);
8855}
8856
8857static void io_sqpoll_cancel_sync(struct io_ring_ctx *ctx)
8858{
8859 struct io_sq_data *sqd = ctx->sq_data;
8860 struct io_tctx_exit work = { .ctx = ctx, };
8861 struct task_struct *task;
8862
8863 io_sq_thread_park(sqd);
8864 list_del_init(&ctx->sqd_list);
8865 io_sqd_update_thread_idle(sqd);
8866 task = sqd->thread;
8867 if (task) {
8868 init_completion(&work.completion);
8869 init_task_work(&work.task_work, io_sqpoll_cancel_cb);
8870 WARN_ON_ONCE(task_work_add(task, &work.task_work, TWA_SIGNAL));
8871 wake_up_process(task);
8872 }
8873 io_sq_thread_unpark(sqd);
8874
8875 if (task)
8876 wait_for_completion(&work.completion);
8877}
8878
Jens Axboe0f212202020-09-13 13:09:39 -06008879void __io_uring_files_cancel(struct files_struct *files)
8880{
8881 struct io_uring_task *tctx = current->io_uring;
Pavel Begunkov13bf43f2021-03-06 11:02:12 +00008882 struct io_tctx_node *node;
Matthew Wilcox (Oracle)ce765372020-10-09 13:49:51 +01008883 unsigned long index;
Jens Axboe0f212202020-09-13 13:09:39 -06008884
8885 /* make sure overflow events are dropped */
Jens Axboefdaf0832020-10-30 09:37:30 -06008886 atomic_inc(&tctx->in_idle);
Pavel Begunkov521d6a72021-03-11 23:29:38 +00008887 xa_for_each(&tctx->xa, index, node) {
8888 struct io_ring_ctx *ctx = node->ctx;
8889
8890 if (ctx->sq_data) {
8891 io_sqpoll_cancel_sync(ctx);
8892 continue;
8893 }
8894 io_uring_cancel_files(ctx, current, files);
8895 if (!files)
8896 io_uring_try_cancel_requests(ctx, current, NULL);
8897 }
Jens Axboefdaf0832020-10-30 09:37:30 -06008898 atomic_dec(&tctx->in_idle);
Pavel Begunkovde7f1d92021-01-04 20:43:29 +00008899
Pavel Begunkov8452d4a2021-02-27 11:16:46 +00008900 if (files)
8901 io_uring_clean_tctx(tctx);
Jens Axboefdaf0832020-10-30 09:37:30 -06008902}
8903
Pavel Begunkov521d6a72021-03-11 23:29:38 +00008904/* should only be called by SQPOLL task */
Pavel Begunkov0e9ddb32021-02-07 22:34:26 +00008905static void io_uring_cancel_sqpoll(struct io_ring_ctx *ctx)
8906{
Jens Axboe37d1e2e2021-02-17 21:03:43 -07008907 struct io_sq_data *sqd = ctx->sq_data;
Pavel Begunkov521d6a72021-03-11 23:29:38 +00008908 struct io_uring_task *tctx = current->io_uring;
Jens Axboefdaf0832020-10-30 09:37:30 -06008909 s64 inflight;
Pavel Begunkov0e9ddb32021-02-07 22:34:26 +00008910 DEFINE_WAIT(wait);
Jens Axboefdaf0832020-10-30 09:37:30 -06008911
Pavel Begunkov521d6a72021-03-11 23:29:38 +00008912 WARN_ON_ONCE(!sqd || ctx->sq_data->thread != current);
8913
Pavel Begunkov0e9ddb32021-02-07 22:34:26 +00008914 atomic_inc(&tctx->in_idle);
8915 do {
8916 /* read completions before cancelations */
8917 inflight = tctx_inflight(tctx);
8918 if (!inflight)
8919 break;
Pavel Begunkov521d6a72021-03-11 23:29:38 +00008920 io_uring_try_cancel_requests(ctx, current, NULL);
Jens Axboefdaf0832020-10-30 09:37:30 -06008921
Pavel Begunkov0e9ddb32021-02-07 22:34:26 +00008922 prepare_to_wait(&tctx->wait, &wait, TASK_UNINTERRUPTIBLE);
8923 /*
8924 * If we've seen completions, retry without waiting. This
8925 * avoids a race where a completion comes in before we did
8926 * prepare_to_wait().
8927 */
8928 if (inflight == tctx_inflight(tctx))
8929 schedule();
8930 finish_wait(&tctx->wait, &wait);
8931 } while (1);
8932 atomic_dec(&tctx->in_idle);
Jens Axboe0f212202020-09-13 13:09:39 -06008933}
8934
Jens Axboe0f212202020-09-13 13:09:39 -06008935/*
8936 * Find any io_uring fd that this task has registered or done IO on, and cancel
8937 * requests.
8938 */
8939void __io_uring_task_cancel(void)
8940{
8941 struct io_uring_task *tctx = current->io_uring;
8942 DEFINE_WAIT(wait);
Jens Axboed8a6df12020-10-15 16:24:45 -06008943 s64 inflight;
Jens Axboe0f212202020-09-13 13:09:39 -06008944
8945 /* make sure overflow events are dropped */
Jens Axboefdaf0832020-10-30 09:37:30 -06008946 atomic_inc(&tctx->in_idle);
Jens Axboed8a6df12020-10-15 16:24:45 -06008947 do {
Jens Axboe0f212202020-09-13 13:09:39 -06008948 /* read completions before cancelations */
Jens Axboefdaf0832020-10-30 09:37:30 -06008949 inflight = tctx_inflight(tctx);
Jens Axboed8a6df12020-10-15 16:24:45 -06008950 if (!inflight)
8951 break;
Jens Axboe0f212202020-09-13 13:09:39 -06008952 __io_uring_files_cancel(NULL);
8953
8954 prepare_to_wait(&tctx->wait, &wait, TASK_UNINTERRUPTIBLE);
8955
8956 /*
Pavel Begunkova1bb3cd2021-01-26 15:28:26 +00008957 * If we've seen completions, retry without waiting. This
8958 * avoids a race where a completion comes in before we did
8959 * prepare_to_wait().
Jens Axboe0f212202020-09-13 13:09:39 -06008960 */
Pavel Begunkova1bb3cd2021-01-26 15:28:26 +00008961 if (inflight == tctx_inflight(tctx))
8962 schedule();
Pavel Begunkovf57555e2020-12-20 13:21:44 +00008963 finish_wait(&tctx->wait, &wait);
Jens Axboed8a6df12020-10-15 16:24:45 -06008964 } while (1);
Jens Axboe0f212202020-09-13 13:09:39 -06008965
Jens Axboefdaf0832020-10-30 09:37:30 -06008966 atomic_dec(&tctx->in_idle);
Pavel Begunkovde7f1d92021-01-04 20:43:29 +00008967
Pavel Begunkov8452d4a2021-02-27 11:16:46 +00008968 io_uring_clean_tctx(tctx);
8969 /* all current's requests should be gone, we can kill tctx */
8970 __io_uring_free(current);
Pavel Begunkov44e728b2020-06-15 10:24:04 +03008971}
8972
Roman Penyaev6c5c2402019-11-28 12:53:22 +01008973static void *io_uring_validate_mmap_request(struct file *file,
8974 loff_t pgoff, size_t sz)
Jens Axboe2b188cc2019-01-07 10:46:33 -07008975{
Jens Axboe2b188cc2019-01-07 10:46:33 -07008976 struct io_ring_ctx *ctx = file->private_data;
Roman Penyaev6c5c2402019-11-28 12:53:22 +01008977 loff_t offset = pgoff << PAGE_SHIFT;
Jens Axboe2b188cc2019-01-07 10:46:33 -07008978 struct page *page;
8979 void *ptr;
8980
8981 switch (offset) {
8982 case IORING_OFF_SQ_RING:
Hristo Venev75b28af2019-08-26 17:23:46 +00008983 case IORING_OFF_CQ_RING:
8984 ptr = ctx->rings;
Jens Axboe2b188cc2019-01-07 10:46:33 -07008985 break;
8986 case IORING_OFF_SQES:
8987 ptr = ctx->sq_sqes;
8988 break;
Jens Axboe2b188cc2019-01-07 10:46:33 -07008989 default:
Roman Penyaev6c5c2402019-11-28 12:53:22 +01008990 return ERR_PTR(-EINVAL);
Jens Axboe2b188cc2019-01-07 10:46:33 -07008991 }
8992
8993 page = virt_to_head_page(ptr);
Matthew Wilcox (Oracle)a50b8542019-09-23 15:34:25 -07008994 if (sz > page_size(page))
Roman Penyaev6c5c2402019-11-28 12:53:22 +01008995 return ERR_PTR(-EINVAL);
8996
8997 return ptr;
8998}
8999
9000#ifdef CONFIG_MMU
9001
9002static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
9003{
9004 size_t sz = vma->vm_end - vma->vm_start;
9005 unsigned long pfn;
9006 void *ptr;
9007
9008 ptr = io_uring_validate_mmap_request(file, vma->vm_pgoff, sz);
9009 if (IS_ERR(ptr))
9010 return PTR_ERR(ptr);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009011
9012 pfn = virt_to_phys(ptr) >> PAGE_SHIFT;
9013 return remap_pfn_range(vma, vma->vm_start, pfn, sz, vma->vm_page_prot);
9014}
9015
Roman Penyaev6c5c2402019-11-28 12:53:22 +01009016#else /* !CONFIG_MMU */
9017
9018static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
9019{
9020 return vma->vm_flags & (VM_SHARED | VM_MAYSHARE) ? 0 : -EINVAL;
9021}
9022
9023static unsigned int io_uring_nommu_mmap_capabilities(struct file *file)
9024{
9025 return NOMMU_MAP_DIRECT | NOMMU_MAP_READ | NOMMU_MAP_WRITE;
9026}
9027
9028static unsigned long io_uring_nommu_get_unmapped_area(struct file *file,
9029 unsigned long addr, unsigned long len,
9030 unsigned long pgoff, unsigned long flags)
9031{
9032 void *ptr;
9033
9034 ptr = io_uring_validate_mmap_request(file, pgoff, len);
9035 if (IS_ERR(ptr))
9036 return PTR_ERR(ptr);
9037
9038 return (unsigned long) ptr;
9039}
9040
9041#endif /* !CONFIG_MMU */
9042
Pavel Begunkovd9d05212021-01-08 20:57:25 +00009043static int io_sqpoll_wait_sq(struct io_ring_ctx *ctx)
Jens Axboe90554202020-09-03 12:12:41 -06009044{
9045 DEFINE_WAIT(wait);
9046
9047 do {
9048 if (!io_sqring_full(ctx))
9049 break;
Jens Axboe90554202020-09-03 12:12:41 -06009050 prepare_to_wait(&ctx->sqo_sq_wait, &wait, TASK_INTERRUPTIBLE);
9051
9052 if (!io_sqring_full(ctx))
9053 break;
Jens Axboe90554202020-09-03 12:12:41 -06009054 schedule();
9055 } while (!signal_pending(current));
9056
9057 finish_wait(&ctx->sqo_sq_wait, &wait);
Yang Li51993282021-03-09 14:30:41 +08009058 return 0;
Jens Axboe90554202020-09-03 12:12:41 -06009059}
9060
Hao Xuc73ebb62020-11-03 10:54:37 +08009061static int io_get_ext_arg(unsigned flags, const void __user *argp, size_t *argsz,
9062 struct __kernel_timespec __user **ts,
9063 const sigset_t __user **sig)
9064{
9065 struct io_uring_getevents_arg arg;
9066
9067 /*
9068 * If EXT_ARG isn't set, then we have no timespec and the argp pointer
9069 * is just a pointer to the sigset_t.
9070 */
9071 if (!(flags & IORING_ENTER_EXT_ARG)) {
9072 *sig = (const sigset_t __user *) argp;
9073 *ts = NULL;
9074 return 0;
9075 }
9076
9077 /*
9078 * EXT_ARG is set - ensure we agree on the size of it and copy in our
9079 * timespec and sigset_t pointers if good.
9080 */
9081 if (*argsz != sizeof(arg))
9082 return -EINVAL;
9083 if (copy_from_user(&arg, argp, sizeof(arg)))
9084 return -EFAULT;
9085 *sig = u64_to_user_ptr(arg.sigmask);
9086 *argsz = arg.sigmask_sz;
9087 *ts = u64_to_user_ptr(arg.ts);
9088 return 0;
9089}
9090
Jens Axboe2b188cc2019-01-07 10:46:33 -07009091SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
Hao Xuc73ebb62020-11-03 10:54:37 +08009092 u32, min_complete, u32, flags, const void __user *, argp,
9093 size_t, argsz)
Jens Axboe2b188cc2019-01-07 10:46:33 -07009094{
9095 struct io_ring_ctx *ctx;
9096 long ret = -EBADF;
9097 int submitted = 0;
9098 struct fd f;
9099
Jens Axboe4c6e2772020-07-01 11:29:10 -06009100 io_run_task_work();
Jens Axboeb41e9852020-02-17 09:52:41 -07009101
Jens Axboe90554202020-09-03 12:12:41 -06009102 if (flags & ~(IORING_ENTER_GETEVENTS | IORING_ENTER_SQ_WAKEUP |
Hao Xuc73ebb62020-11-03 10:54:37 +08009103 IORING_ENTER_SQ_WAIT | IORING_ENTER_EXT_ARG))
Jens Axboe2b188cc2019-01-07 10:46:33 -07009104 return -EINVAL;
9105
9106 f = fdget(fd);
9107 if (!f.file)
9108 return -EBADF;
9109
9110 ret = -EOPNOTSUPP;
9111 if (f.file->f_op != &io_uring_fops)
9112 goto out_fput;
9113
9114 ret = -ENXIO;
9115 ctx = f.file->private_data;
9116 if (!percpu_ref_tryget(&ctx->refs))
9117 goto out_fput;
9118
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02009119 ret = -EBADFD;
9120 if (ctx->flags & IORING_SETUP_R_DISABLED)
9121 goto out;
9122
Jens Axboe6c271ce2019-01-10 11:22:30 -07009123 /*
9124 * For SQ polling, the thread will do all submissions and completions.
9125 * Just return the requested submit count, and wake the thread if
9126 * we were asked to.
9127 */
Jens Axboeb2a9ead2019-09-12 14:19:16 -06009128 ret = 0;
Jens Axboe6c271ce2019-01-10 11:22:30 -07009129 if (ctx->flags & IORING_SETUP_SQPOLL) {
Pavel Begunkov6c503152021-01-04 20:36:36 +00009130 io_cqring_overflow_flush(ctx, false, NULL, NULL);
Pavel Begunkov89448c42020-12-17 00:24:39 +00009131
Pavel Begunkovd9d05212021-01-08 20:57:25 +00009132 ret = -EOWNERDEAD;
Stefan Metzmacher04147482021-03-07 11:54:29 +01009133 if (unlikely(ctx->sq_data->thread == NULL)) {
9134 goto out;
9135 }
Jens Axboe6c271ce2019-01-10 11:22:30 -07009136 if (flags & IORING_ENTER_SQ_WAKEUP)
Jens Axboe534ca6d2020-09-02 13:52:19 -06009137 wake_up(&ctx->sq_data->wait);
Pavel Begunkovd9d05212021-01-08 20:57:25 +00009138 if (flags & IORING_ENTER_SQ_WAIT) {
9139 ret = io_sqpoll_wait_sq(ctx);
9140 if (ret)
9141 goto out;
9142 }
Jens Axboe6c271ce2019-01-10 11:22:30 -07009143 submitted = to_submit;
Jens Axboeb2a9ead2019-09-12 14:19:16 -06009144 } else if (to_submit) {
Pavel Begunkovbaf186c2021-03-06 11:02:15 +00009145 ret = io_uring_add_task_file(ctx);
Jens Axboe0f212202020-09-13 13:09:39 -06009146 if (unlikely(ret))
9147 goto out;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009148 mutex_lock(&ctx->uring_lock);
Jens Axboe0f212202020-09-13 13:09:39 -06009149 submitted = io_submit_sqes(ctx, to_submit);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009150 mutex_unlock(&ctx->uring_lock);
Pavel Begunkov7c504e652019-12-18 19:53:45 +03009151
9152 if (submitted != to_submit)
9153 goto out;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009154 }
9155 if (flags & IORING_ENTER_GETEVENTS) {
Hao Xuc73ebb62020-11-03 10:54:37 +08009156 const sigset_t __user *sig;
9157 struct __kernel_timespec __user *ts;
9158
9159 ret = io_get_ext_arg(flags, argp, &argsz, &ts, &sig);
9160 if (unlikely(ret))
9161 goto out;
9162
Jens Axboe2b188cc2019-01-07 10:46:33 -07009163 min_complete = min(min_complete, ctx->cq_entries);
9164
Xiaoguang Wang32b22442020-03-11 09:26:09 +08009165 /*
9166 * When SETUP_IOPOLL and SETUP_SQPOLL are both enabled, user
9167 * space applications don't need to do io completion events
9168 * polling again, they can rely on io_sq_thread to do polling
9169 * work, which can reduce cpu usage and uring_lock contention.
9170 */
9171 if (ctx->flags & IORING_SETUP_IOPOLL &&
9172 !(ctx->flags & IORING_SETUP_SQPOLL)) {
Pavel Begunkov7668b922020-07-07 16:36:21 +03009173 ret = io_iopoll_check(ctx, min_complete);
Jens Axboedef596e2019-01-09 08:59:42 -07009174 } else {
Hao Xuc73ebb62020-11-03 10:54:37 +08009175 ret = io_cqring_wait(ctx, min_complete, sig, argsz, ts);
Jens Axboedef596e2019-01-09 08:59:42 -07009176 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07009177 }
9178
Pavel Begunkov7c504e652019-12-18 19:53:45 +03009179out:
Pavel Begunkov6805b322019-10-08 02:18:42 +03009180 percpu_ref_put(&ctx->refs);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009181out_fput:
9182 fdput(f);
9183 return submitted ? submitted : ret;
9184}
9185
Tobias Klauserbebdb652020-02-26 18:38:32 +01009186#ifdef CONFIG_PROC_FS
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +00009187static int io_uring_show_cred(struct seq_file *m, unsigned int id,
9188 const struct cred *cred)
Jens Axboe87ce9552020-01-30 08:25:34 -07009189{
Jens Axboe87ce9552020-01-30 08:25:34 -07009190 struct user_namespace *uns = seq_user_ns(m);
9191 struct group_info *gi;
9192 kernel_cap_t cap;
9193 unsigned __capi;
9194 int g;
9195
9196 seq_printf(m, "%5d\n", id);
9197 seq_put_decimal_ull(m, "\tUid:\t", from_kuid_munged(uns, cred->uid));
9198 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->euid));
9199 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->suid));
9200 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->fsuid));
9201 seq_put_decimal_ull(m, "\n\tGid:\t", from_kgid_munged(uns, cred->gid));
9202 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->egid));
9203 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->sgid));
9204 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->fsgid));
9205 seq_puts(m, "\n\tGroups:\t");
9206 gi = cred->group_info;
9207 for (g = 0; g < gi->ngroups; g++) {
9208 seq_put_decimal_ull(m, g ? " " : "",
9209 from_kgid_munged(uns, gi->gid[g]));
9210 }
9211 seq_puts(m, "\n\tCapEff:\t");
9212 cap = cred->cap_effective;
9213 CAP_FOR_EACH_U32(__capi)
9214 seq_put_hex_ll(m, NULL, cap.cap[CAP_LAST_U32 - __capi], 8);
9215 seq_putc(m, '\n');
9216 return 0;
9217}
9218
9219static void __io_uring_show_fdinfo(struct io_ring_ctx *ctx, struct seq_file *m)
9220{
Joseph Qidbbe9c62020-09-29 09:01:22 -06009221 struct io_sq_data *sq = NULL;
Jens Axboefad8e0d2020-09-28 08:57:48 -06009222 bool has_lock;
Jens Axboe87ce9552020-01-30 08:25:34 -07009223 int i;
9224
Jens Axboefad8e0d2020-09-28 08:57:48 -06009225 /*
9226 * Avoid ABBA deadlock between the seq lock and the io_uring mutex,
9227 * since fdinfo case grabs it in the opposite direction of normal use
9228 * cases. If we fail to get the lock, we just don't iterate any
9229 * structures that could be going away outside the io_uring mutex.
9230 */
9231 has_lock = mutex_trylock(&ctx->uring_lock);
9232
Jens Axboe5f3f26f2021-02-25 10:17:46 -07009233 if (has_lock && (ctx->flags & IORING_SETUP_SQPOLL)) {
Joseph Qidbbe9c62020-09-29 09:01:22 -06009234 sq = ctx->sq_data;
Jens Axboe5f3f26f2021-02-25 10:17:46 -07009235 if (!sq->thread)
9236 sq = NULL;
9237 }
Joseph Qidbbe9c62020-09-29 09:01:22 -06009238
9239 seq_printf(m, "SqThread:\t%d\n", sq ? task_pid_nr(sq->thread) : -1);
9240 seq_printf(m, "SqThreadCpu:\t%d\n", sq ? task_cpu(sq->thread) : -1);
Jens Axboe87ce9552020-01-30 08:25:34 -07009241 seq_printf(m, "UserFiles:\t%u\n", ctx->nr_user_files);
Jens Axboefad8e0d2020-09-28 08:57:48 -06009242 for (i = 0; has_lock && i < ctx->nr_user_files; i++) {
Pavel Begunkovea64ec022021-02-04 13:52:07 +00009243 struct file *f = *io_fixed_file_slot(ctx->file_data, i);
Jens Axboe87ce9552020-01-30 08:25:34 -07009244
Jens Axboe87ce9552020-01-30 08:25:34 -07009245 if (f)
9246 seq_printf(m, "%5u: %s\n", i, file_dentry(f)->d_iname);
9247 else
9248 seq_printf(m, "%5u: <none>\n", i);
9249 }
9250 seq_printf(m, "UserBufs:\t%u\n", ctx->nr_user_bufs);
Jens Axboefad8e0d2020-09-28 08:57:48 -06009251 for (i = 0; has_lock && i < ctx->nr_user_bufs; i++) {
Jens Axboe87ce9552020-01-30 08:25:34 -07009252 struct io_mapped_ubuf *buf = &ctx->user_bufs[i];
9253
9254 seq_printf(m, "%5u: 0x%llx/%u\n", i, buf->ubuf,
9255 (unsigned int) buf->len);
9256 }
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +00009257 if (has_lock && !xa_empty(&ctx->personalities)) {
9258 unsigned long index;
9259 const struct cred *cred;
9260
Jens Axboe87ce9552020-01-30 08:25:34 -07009261 seq_printf(m, "Personalities:\n");
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +00009262 xa_for_each(&ctx->personalities, index, cred)
9263 io_uring_show_cred(m, index, cred);
Jens Axboe87ce9552020-01-30 08:25:34 -07009264 }
Jens Axboed7718a92020-02-14 22:23:12 -07009265 seq_printf(m, "PollList:\n");
9266 spin_lock_irq(&ctx->completion_lock);
9267 for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
9268 struct hlist_head *list = &ctx->cancel_hash[i];
9269 struct io_kiocb *req;
9270
9271 hlist_for_each_entry(req, list, hash_node)
9272 seq_printf(m, " op=%d, task_works=%d\n", req->opcode,
9273 req->task->task_works != NULL);
9274 }
9275 spin_unlock_irq(&ctx->completion_lock);
Jens Axboefad8e0d2020-09-28 08:57:48 -06009276 if (has_lock)
9277 mutex_unlock(&ctx->uring_lock);
Jens Axboe87ce9552020-01-30 08:25:34 -07009278}
9279
9280static void io_uring_show_fdinfo(struct seq_file *m, struct file *f)
9281{
9282 struct io_ring_ctx *ctx = f->private_data;
9283
9284 if (percpu_ref_tryget(&ctx->refs)) {
9285 __io_uring_show_fdinfo(ctx, m);
9286 percpu_ref_put(&ctx->refs);
9287 }
9288}
Tobias Klauserbebdb652020-02-26 18:38:32 +01009289#endif
Jens Axboe87ce9552020-01-30 08:25:34 -07009290
Jens Axboe2b188cc2019-01-07 10:46:33 -07009291static const struct file_operations io_uring_fops = {
9292 .release = io_uring_release,
9293 .mmap = io_uring_mmap,
Roman Penyaev6c5c2402019-11-28 12:53:22 +01009294#ifndef CONFIG_MMU
9295 .get_unmapped_area = io_uring_nommu_get_unmapped_area,
9296 .mmap_capabilities = io_uring_nommu_mmap_capabilities,
9297#endif
Jens Axboe2b188cc2019-01-07 10:46:33 -07009298 .poll = io_uring_poll,
9299 .fasync = io_uring_fasync,
Tobias Klauserbebdb652020-02-26 18:38:32 +01009300#ifdef CONFIG_PROC_FS
Jens Axboe87ce9552020-01-30 08:25:34 -07009301 .show_fdinfo = io_uring_show_fdinfo,
Tobias Klauserbebdb652020-02-26 18:38:32 +01009302#endif
Jens Axboe2b188cc2019-01-07 10:46:33 -07009303};
9304
9305static int io_allocate_scq_urings(struct io_ring_ctx *ctx,
9306 struct io_uring_params *p)
9307{
Hristo Venev75b28af2019-08-26 17:23:46 +00009308 struct io_rings *rings;
9309 size_t size, sq_array_offset;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009310
Jens Axboebd740482020-08-05 12:58:23 -06009311 /* make sure these are sane, as we already accounted them */
9312 ctx->sq_entries = p->sq_entries;
9313 ctx->cq_entries = p->cq_entries;
9314
Hristo Venev75b28af2019-08-26 17:23:46 +00009315 size = rings_size(p->sq_entries, p->cq_entries, &sq_array_offset);
9316 if (size == SIZE_MAX)
9317 return -EOVERFLOW;
9318
9319 rings = io_mem_alloc(size);
9320 if (!rings)
Jens Axboe2b188cc2019-01-07 10:46:33 -07009321 return -ENOMEM;
9322
Hristo Venev75b28af2019-08-26 17:23:46 +00009323 ctx->rings = rings;
9324 ctx->sq_array = (u32 *)((char *)rings + sq_array_offset);
9325 rings->sq_ring_mask = p->sq_entries - 1;
9326 rings->cq_ring_mask = p->cq_entries - 1;
9327 rings->sq_ring_entries = p->sq_entries;
9328 rings->cq_ring_entries = p->cq_entries;
9329 ctx->sq_mask = rings->sq_ring_mask;
9330 ctx->cq_mask = rings->cq_ring_mask;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009331
9332 size = array_size(sizeof(struct io_uring_sqe), p->sq_entries);
Jens Axboeeb065d32019-11-20 09:26:29 -07009333 if (size == SIZE_MAX) {
9334 io_mem_free(ctx->rings);
9335 ctx->rings = NULL;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009336 return -EOVERFLOW;
Jens Axboeeb065d32019-11-20 09:26:29 -07009337 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07009338
9339 ctx->sq_sqes = io_mem_alloc(size);
Jens Axboeeb065d32019-11-20 09:26:29 -07009340 if (!ctx->sq_sqes) {
9341 io_mem_free(ctx->rings);
9342 ctx->rings = NULL;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009343 return -ENOMEM;
Jens Axboeeb065d32019-11-20 09:26:29 -07009344 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07009345
Jens Axboe2b188cc2019-01-07 10:46:33 -07009346 return 0;
9347}
9348
Pavel Begunkov9faadcc2020-12-21 18:34:05 +00009349static int io_uring_install_fd(struct io_ring_ctx *ctx, struct file *file)
9350{
9351 int ret, fd;
9352
9353 fd = get_unused_fd_flags(O_RDWR | O_CLOEXEC);
9354 if (fd < 0)
9355 return fd;
9356
Pavel Begunkovbaf186c2021-03-06 11:02:15 +00009357 ret = io_uring_add_task_file(ctx);
Pavel Begunkov9faadcc2020-12-21 18:34:05 +00009358 if (ret) {
9359 put_unused_fd(fd);
9360 return ret;
9361 }
9362 fd_install(fd, file);
9363 return fd;
9364}
9365
Jens Axboe2b188cc2019-01-07 10:46:33 -07009366/*
9367 * Allocate an anonymous fd, this is what constitutes the application
9368 * visible backing of an io_uring instance. The application mmaps this
9369 * fd to gain access to the SQ/CQ ring details. If UNIX sockets are enabled,
9370 * we have to tie this fd to a socket for file garbage collection purposes.
9371 */
Pavel Begunkov9faadcc2020-12-21 18:34:05 +00009372static struct file *io_uring_get_file(struct io_ring_ctx *ctx)
Jens Axboe2b188cc2019-01-07 10:46:33 -07009373{
9374 struct file *file;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009375#if defined(CONFIG_UNIX)
Pavel Begunkov9faadcc2020-12-21 18:34:05 +00009376 int ret;
9377
Jens Axboe2b188cc2019-01-07 10:46:33 -07009378 ret = sock_create_kern(&init_net, PF_UNIX, SOCK_RAW, IPPROTO_IP,
9379 &ctx->ring_sock);
9380 if (ret)
Pavel Begunkov9faadcc2020-12-21 18:34:05 +00009381 return ERR_PTR(ret);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009382#endif
9383
Jens Axboe2b188cc2019-01-07 10:46:33 -07009384 file = anon_inode_getfile("[io_uring]", &io_uring_fops, ctx,
9385 O_RDWR | O_CLOEXEC);
Pavel Begunkov9faadcc2020-12-21 18:34:05 +00009386#if defined(CONFIG_UNIX)
Jens Axboe2b188cc2019-01-07 10:46:33 -07009387 if (IS_ERR(file)) {
Pavel Begunkov9faadcc2020-12-21 18:34:05 +00009388 sock_release(ctx->ring_sock);
9389 ctx->ring_sock = NULL;
9390 } else {
9391 ctx->ring_sock->file = file;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009392 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07009393#endif
Pavel Begunkov9faadcc2020-12-21 18:34:05 +00009394 return file;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009395}
9396
Xiaoguang Wang7f136572020-05-05 16:28:53 +08009397static int io_uring_create(unsigned entries, struct io_uring_params *p,
9398 struct io_uring_params __user *params)
Jens Axboe2b188cc2019-01-07 10:46:33 -07009399{
Jens Axboe2b188cc2019-01-07 10:46:33 -07009400 struct io_ring_ctx *ctx;
Pavel Begunkov9faadcc2020-12-21 18:34:05 +00009401 struct file *file;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009402 int ret;
9403
Jens Axboe8110c1a2019-12-28 15:39:54 -07009404 if (!entries)
Jens Axboe2b188cc2019-01-07 10:46:33 -07009405 return -EINVAL;
Jens Axboe8110c1a2019-12-28 15:39:54 -07009406 if (entries > IORING_MAX_ENTRIES) {
9407 if (!(p->flags & IORING_SETUP_CLAMP))
9408 return -EINVAL;
9409 entries = IORING_MAX_ENTRIES;
9410 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07009411
9412 /*
9413 * Use twice as many entries for the CQ ring. It's possible for the
9414 * application to drive a higher depth than the size of the SQ ring,
9415 * since the sqes are only used at submission time. This allows for
Jens Axboe33a107f2019-10-04 12:10:03 -06009416 * some flexibility in overcommitting a bit. If the application has
9417 * set IORING_SETUP_CQSIZE, it will have passed in the desired number
9418 * of CQ ring entries manually.
Jens Axboe2b188cc2019-01-07 10:46:33 -07009419 */
9420 p->sq_entries = roundup_pow_of_two(entries);
Jens Axboe33a107f2019-10-04 12:10:03 -06009421 if (p->flags & IORING_SETUP_CQSIZE) {
9422 /*
9423 * If IORING_SETUP_CQSIZE is set, we do the same roundup
9424 * to a power-of-two, if it isn't already. We do NOT impose
9425 * any cq vs sq ring sizing.
9426 */
Joseph Qieb2667b32020-11-24 15:03:03 +08009427 if (!p->cq_entries)
Jens Axboe33a107f2019-10-04 12:10:03 -06009428 return -EINVAL;
Jens Axboe8110c1a2019-12-28 15:39:54 -07009429 if (p->cq_entries > IORING_MAX_CQ_ENTRIES) {
9430 if (!(p->flags & IORING_SETUP_CLAMP))
9431 return -EINVAL;
9432 p->cq_entries = IORING_MAX_CQ_ENTRIES;
9433 }
Joseph Qieb2667b32020-11-24 15:03:03 +08009434 p->cq_entries = roundup_pow_of_two(p->cq_entries);
9435 if (p->cq_entries < p->sq_entries)
9436 return -EINVAL;
Jens Axboe33a107f2019-10-04 12:10:03 -06009437 } else {
9438 p->cq_entries = 2 * p->sq_entries;
9439 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07009440
Jens Axboe2b188cc2019-01-07 10:46:33 -07009441 ctx = io_ring_ctx_alloc(p);
Jens Axboe62e398b2021-02-21 16:19:37 -07009442 if (!ctx)
Jens Axboe2b188cc2019-01-07 10:46:33 -07009443 return -ENOMEM;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009444 ctx->compat = in_compat_syscall();
Jens Axboe62e398b2021-02-21 16:19:37 -07009445 if (!capable(CAP_IPC_LOCK))
9446 ctx->user = get_uid(current_user());
Jens Axboe2aede0e2020-09-14 10:45:53 -06009447
9448 /*
9449 * This is just grabbed for accounting purposes. When a process exits,
9450 * the mm is exited and dropped before the files, hence we need to hang
9451 * on to this mm purely for the purposes of being able to unaccount
9452 * memory (locked/pinned vm). It's not used for anything else.
9453 */
Jens Axboe6b7898e2020-08-25 07:58:00 -06009454 mmgrab(current->mm);
Jens Axboe2aede0e2020-09-14 10:45:53 -06009455 ctx->mm_account = current->mm;
Jens Axboe6b7898e2020-08-25 07:58:00 -06009456
Jens Axboe2b188cc2019-01-07 10:46:33 -07009457 ret = io_allocate_scq_urings(ctx, p);
9458 if (ret)
9459 goto err;
9460
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02009461 ret = io_sq_offload_create(ctx, p);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009462 if (ret)
9463 goto err;
9464
Jens Axboe2b188cc2019-01-07 10:46:33 -07009465 memset(&p->sq_off, 0, sizeof(p->sq_off));
Hristo Venev75b28af2019-08-26 17:23:46 +00009466 p->sq_off.head = offsetof(struct io_rings, sq.head);
9467 p->sq_off.tail = offsetof(struct io_rings, sq.tail);
9468 p->sq_off.ring_mask = offsetof(struct io_rings, sq_ring_mask);
9469 p->sq_off.ring_entries = offsetof(struct io_rings, sq_ring_entries);
9470 p->sq_off.flags = offsetof(struct io_rings, sq_flags);
9471 p->sq_off.dropped = offsetof(struct io_rings, sq_dropped);
9472 p->sq_off.array = (char *)ctx->sq_array - (char *)ctx->rings;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009473
9474 memset(&p->cq_off, 0, sizeof(p->cq_off));
Hristo Venev75b28af2019-08-26 17:23:46 +00009475 p->cq_off.head = offsetof(struct io_rings, cq.head);
9476 p->cq_off.tail = offsetof(struct io_rings, cq.tail);
9477 p->cq_off.ring_mask = offsetof(struct io_rings, cq_ring_mask);
9478 p->cq_off.ring_entries = offsetof(struct io_rings, cq_ring_entries);
9479 p->cq_off.overflow = offsetof(struct io_rings, cq_overflow);
9480 p->cq_off.cqes = offsetof(struct io_rings, cqes);
Stefano Garzarella0d9b5b32020-05-15 18:38:04 +02009481 p->cq_off.flags = offsetof(struct io_rings, cq_flags);
Jens Axboeac90f242019-09-06 10:26:21 -06009482
Xiaoguang Wang7f136572020-05-05 16:28:53 +08009483 p->features = IORING_FEAT_SINGLE_MMAP | IORING_FEAT_NODROP |
9484 IORING_FEAT_SUBMIT_STABLE | IORING_FEAT_RW_CUR_POS |
Jiufei Xue5769a352020-06-17 17:53:55 +08009485 IORING_FEAT_CUR_PERSONALITY | IORING_FEAT_FAST_POLL |
Hao Xuc73ebb62020-11-03 10:54:37 +08009486 IORING_FEAT_POLL_32BITS | IORING_FEAT_SQPOLL_NONFIXED |
Jens Axboe1c0aa1f2021-02-20 11:55:28 -07009487 IORING_FEAT_EXT_ARG | IORING_FEAT_NATIVE_WORKERS;
Xiaoguang Wang7f136572020-05-05 16:28:53 +08009488
9489 if (copy_to_user(params, p, sizeof(*p))) {
9490 ret = -EFAULT;
9491 goto err;
9492 }
Jens Axboed1719f72020-07-30 13:43:53 -06009493
Pavel Begunkov9faadcc2020-12-21 18:34:05 +00009494 file = io_uring_get_file(ctx);
9495 if (IS_ERR(file)) {
9496 ret = PTR_ERR(file);
9497 goto err;
9498 }
9499
Jens Axboed1719f72020-07-30 13:43:53 -06009500 /*
Jens Axboe044c1ab2019-10-28 09:15:33 -06009501 * Install ring fd as the very last thing, so we don't risk someone
9502 * having closed it before we finish setup
9503 */
Pavel Begunkov9faadcc2020-12-21 18:34:05 +00009504 ret = io_uring_install_fd(ctx, file);
9505 if (ret < 0) {
9506 /* fput will clean it up */
9507 fput(file);
9508 return ret;
9509 }
Jens Axboe044c1ab2019-10-28 09:15:33 -06009510
Dmitrii Dolgovc826bd72019-10-15 19:02:01 +02009511 trace_io_uring_create(ret, ctx, p->sq_entries, p->cq_entries, p->flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009512 return ret;
9513err:
9514 io_ring_ctx_wait_and_kill(ctx);
9515 return ret;
9516}
9517
9518/*
9519 * Sets up an aio uring context, and returns the fd. Applications asks for a
9520 * ring size, we return the actual sq/cq ring sizes (among other things) in the
9521 * params structure passed in.
9522 */
9523static long io_uring_setup(u32 entries, struct io_uring_params __user *params)
9524{
9525 struct io_uring_params p;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009526 int i;
9527
9528 if (copy_from_user(&p, params, sizeof(p)))
9529 return -EFAULT;
9530 for (i = 0; i < ARRAY_SIZE(p.resv); i++) {
9531 if (p.resv[i])
9532 return -EINVAL;
9533 }
9534
Jens Axboe6c271ce2019-01-10 11:22:30 -07009535 if (p.flags & ~(IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL |
Jens Axboe8110c1a2019-12-28 15:39:54 -07009536 IORING_SETUP_SQ_AFF | IORING_SETUP_CQSIZE |
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02009537 IORING_SETUP_CLAMP | IORING_SETUP_ATTACH_WQ |
9538 IORING_SETUP_R_DISABLED))
Jens Axboe2b188cc2019-01-07 10:46:33 -07009539 return -EINVAL;
9540
Xiaoguang Wang7f136572020-05-05 16:28:53 +08009541 return io_uring_create(entries, &p, params);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009542}
9543
9544SYSCALL_DEFINE2(io_uring_setup, u32, entries,
9545 struct io_uring_params __user *, params)
9546{
9547 return io_uring_setup(entries, params);
9548}
9549
Jens Axboe66f4af92020-01-16 15:36:52 -07009550static int io_probe(struct io_ring_ctx *ctx, void __user *arg, unsigned nr_args)
9551{
9552 struct io_uring_probe *p;
9553 size_t size;
9554 int i, ret;
9555
9556 size = struct_size(p, ops, nr_args);
9557 if (size == SIZE_MAX)
9558 return -EOVERFLOW;
9559 p = kzalloc(size, GFP_KERNEL);
9560 if (!p)
9561 return -ENOMEM;
9562
9563 ret = -EFAULT;
9564 if (copy_from_user(p, arg, size))
9565 goto out;
9566 ret = -EINVAL;
9567 if (memchr_inv(p, 0, size))
9568 goto out;
9569
9570 p->last_op = IORING_OP_LAST - 1;
9571 if (nr_args > IORING_OP_LAST)
9572 nr_args = IORING_OP_LAST;
9573
9574 for (i = 0; i < nr_args; i++) {
9575 p->ops[i].op = i;
9576 if (!io_op_defs[i].not_supported)
9577 p->ops[i].flags = IO_URING_OP_SUPPORTED;
9578 }
9579 p->ops_len = i;
9580
9581 ret = 0;
9582 if (copy_to_user(arg, p, size))
9583 ret = -EFAULT;
9584out:
9585 kfree(p);
9586 return ret;
9587}
9588
Jens Axboe071698e2020-01-28 10:04:42 -07009589static int io_register_personality(struct io_ring_ctx *ctx)
9590{
Jens Axboe4379bf82021-02-15 13:40:22 -07009591 const struct cred *creds;
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +00009592 u32 id;
Jens Axboe1e6fa522020-10-15 08:46:24 -06009593 int ret;
Jens Axboe071698e2020-01-28 10:04:42 -07009594
Jens Axboe4379bf82021-02-15 13:40:22 -07009595 creds = get_current_cred();
Jens Axboe1e6fa522020-10-15 08:46:24 -06009596
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +00009597 ret = xa_alloc_cyclic(&ctx->personalities, &id, (void *)creds,
9598 XA_LIMIT(0, USHRT_MAX), &ctx->pers_next, GFP_KERNEL);
9599 if (!ret)
9600 return id;
9601 put_cred(creds);
Jens Axboe1e6fa522020-10-15 08:46:24 -06009602 return ret;
Jens Axboe071698e2020-01-28 10:04:42 -07009603}
9604
Stefano Garzarella21b55db2020-08-27 16:58:30 +02009605static int io_register_restrictions(struct io_ring_ctx *ctx, void __user *arg,
9606 unsigned int nr_args)
9607{
9608 struct io_uring_restriction *res;
9609 size_t size;
9610 int i, ret;
9611
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02009612 /* Restrictions allowed only if rings started disabled */
9613 if (!(ctx->flags & IORING_SETUP_R_DISABLED))
9614 return -EBADFD;
9615
Stefano Garzarella21b55db2020-08-27 16:58:30 +02009616 /* We allow only a single restrictions registration */
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02009617 if (ctx->restrictions.registered)
Stefano Garzarella21b55db2020-08-27 16:58:30 +02009618 return -EBUSY;
9619
9620 if (!arg || nr_args > IORING_MAX_RESTRICTIONS)
9621 return -EINVAL;
9622
9623 size = array_size(nr_args, sizeof(*res));
9624 if (size == SIZE_MAX)
9625 return -EOVERFLOW;
9626
9627 res = memdup_user(arg, size);
9628 if (IS_ERR(res))
9629 return PTR_ERR(res);
9630
9631 ret = 0;
9632
9633 for (i = 0; i < nr_args; i++) {
9634 switch (res[i].opcode) {
9635 case IORING_RESTRICTION_REGISTER_OP:
9636 if (res[i].register_op >= IORING_REGISTER_LAST) {
9637 ret = -EINVAL;
9638 goto out;
9639 }
9640
9641 __set_bit(res[i].register_op,
9642 ctx->restrictions.register_op);
9643 break;
9644 case IORING_RESTRICTION_SQE_OP:
9645 if (res[i].sqe_op >= IORING_OP_LAST) {
9646 ret = -EINVAL;
9647 goto out;
9648 }
9649
9650 __set_bit(res[i].sqe_op, ctx->restrictions.sqe_op);
9651 break;
9652 case IORING_RESTRICTION_SQE_FLAGS_ALLOWED:
9653 ctx->restrictions.sqe_flags_allowed = res[i].sqe_flags;
9654 break;
9655 case IORING_RESTRICTION_SQE_FLAGS_REQUIRED:
9656 ctx->restrictions.sqe_flags_required = res[i].sqe_flags;
9657 break;
9658 default:
9659 ret = -EINVAL;
9660 goto out;
9661 }
9662 }
9663
9664out:
9665 /* Reset all restrictions if an error happened */
9666 if (ret != 0)
9667 memset(&ctx->restrictions, 0, sizeof(ctx->restrictions));
9668 else
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02009669 ctx->restrictions.registered = true;
Stefano Garzarella21b55db2020-08-27 16:58:30 +02009670
9671 kfree(res);
9672 return ret;
9673}
9674
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02009675static int io_register_enable_rings(struct io_ring_ctx *ctx)
9676{
9677 if (!(ctx->flags & IORING_SETUP_R_DISABLED))
9678 return -EBADFD;
9679
9680 if (ctx->restrictions.registered)
9681 ctx->restricted = 1;
9682
Pavel Begunkov0298ef92021-03-08 13:20:57 +00009683 ctx->flags &= ~IORING_SETUP_R_DISABLED;
9684 if (ctx->sq_data && wq_has_sleeper(&ctx->sq_data->wait))
9685 wake_up(&ctx->sq_data->wait);
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02009686 return 0;
9687}
9688
Jens Axboe071698e2020-01-28 10:04:42 -07009689static bool io_register_op_must_quiesce(int op)
9690{
9691 switch (op) {
9692 case IORING_UNREGISTER_FILES:
9693 case IORING_REGISTER_FILES_UPDATE:
9694 case IORING_REGISTER_PROBE:
9695 case IORING_REGISTER_PERSONALITY:
9696 case IORING_UNREGISTER_PERSONALITY:
9697 return false;
9698 default:
9699 return true;
9700 }
9701}
9702
Jens Axboeedafcce2019-01-09 09:16:05 -07009703static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
9704 void __user *arg, unsigned nr_args)
Jens Axboeb19062a2019-04-15 10:49:38 -06009705 __releases(ctx->uring_lock)
9706 __acquires(ctx->uring_lock)
Jens Axboeedafcce2019-01-09 09:16:05 -07009707{
9708 int ret;
9709
Jens Axboe35fa71a2019-04-22 10:23:23 -06009710 /*
9711 * We're inside the ring mutex, if the ref is already dying, then
9712 * someone else killed the ctx or is already going through
9713 * io_uring_register().
9714 */
9715 if (percpu_ref_is_dying(&ctx->refs))
9716 return -ENXIO;
9717
Jens Axboe071698e2020-01-28 10:04:42 -07009718 if (io_register_op_must_quiesce(opcode)) {
Jens Axboe05f3fb32019-12-09 11:22:50 -07009719 percpu_ref_kill(&ctx->refs);
Jens Axboeb19062a2019-04-15 10:49:38 -06009720
Jens Axboe05f3fb32019-12-09 11:22:50 -07009721 /*
9722 * Drop uring mutex before waiting for references to exit. If
9723 * another thread is currently inside io_uring_enter() it might
9724 * need to grab the uring_lock to make progress. If we hold it
9725 * here across the drain wait, then we can deadlock. It's safe
9726 * to drop the mutex here, since no new references will come in
9727 * after we've killed the percpu ref.
9728 */
9729 mutex_unlock(&ctx->uring_lock);
Jens Axboeaf9c1a42020-09-24 13:32:18 -06009730 do {
9731 ret = wait_for_completion_interruptible(&ctx->ref_comp);
9732 if (!ret)
9733 break;
Jens Axboeed6930c2020-10-08 19:09:46 -06009734 ret = io_run_task_work_sig();
9735 if (ret < 0)
9736 break;
Jens Axboeaf9c1a42020-09-24 13:32:18 -06009737 } while (1);
9738
Jens Axboe05f3fb32019-12-09 11:22:50 -07009739 mutex_lock(&ctx->uring_lock);
Jens Axboeaf9c1a42020-09-24 13:32:18 -06009740
Jens Axboec1503682020-01-08 08:26:07 -07009741 if (ret) {
9742 percpu_ref_resurrect(&ctx->refs);
Stefano Garzarella21b55db2020-08-27 16:58:30 +02009743 goto out_quiesce;
9744 }
9745 }
9746
9747 if (ctx->restricted) {
9748 if (opcode >= IORING_REGISTER_LAST) {
9749 ret = -EINVAL;
9750 goto out;
9751 }
9752
9753 if (!test_bit(opcode, ctx->restrictions.register_op)) {
9754 ret = -EACCES;
Jens Axboec1503682020-01-08 08:26:07 -07009755 goto out;
9756 }
Jens Axboe05f3fb32019-12-09 11:22:50 -07009757 }
Jens Axboeedafcce2019-01-09 09:16:05 -07009758
9759 switch (opcode) {
9760 case IORING_REGISTER_BUFFERS:
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08009761 ret = io_sqe_buffers_register(ctx, arg, nr_args);
Jens Axboeedafcce2019-01-09 09:16:05 -07009762 break;
9763 case IORING_UNREGISTER_BUFFERS:
9764 ret = -EINVAL;
9765 if (arg || nr_args)
9766 break;
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08009767 ret = io_sqe_buffers_unregister(ctx);
Jens Axboeedafcce2019-01-09 09:16:05 -07009768 break;
Jens Axboe6b063142019-01-10 22:13:58 -07009769 case IORING_REGISTER_FILES:
9770 ret = io_sqe_files_register(ctx, arg, nr_args);
9771 break;
9772 case IORING_UNREGISTER_FILES:
9773 ret = -EINVAL;
9774 if (arg || nr_args)
9775 break;
9776 ret = io_sqe_files_unregister(ctx);
9777 break;
Jens Axboec3a31e62019-10-03 13:59:56 -06009778 case IORING_REGISTER_FILES_UPDATE:
9779 ret = io_sqe_files_update(ctx, arg, nr_args);
9780 break;
Jens Axboe9b402842019-04-11 11:45:41 -06009781 case IORING_REGISTER_EVENTFD:
Jens Axboef2842ab2020-01-08 11:04:00 -07009782 case IORING_REGISTER_EVENTFD_ASYNC:
Jens Axboe9b402842019-04-11 11:45:41 -06009783 ret = -EINVAL;
9784 if (nr_args != 1)
9785 break;
9786 ret = io_eventfd_register(ctx, arg);
Jens Axboef2842ab2020-01-08 11:04:00 -07009787 if (ret)
9788 break;
9789 if (opcode == IORING_REGISTER_EVENTFD_ASYNC)
9790 ctx->eventfd_async = 1;
9791 else
9792 ctx->eventfd_async = 0;
Jens Axboe9b402842019-04-11 11:45:41 -06009793 break;
9794 case IORING_UNREGISTER_EVENTFD:
9795 ret = -EINVAL;
9796 if (arg || nr_args)
9797 break;
9798 ret = io_eventfd_unregister(ctx);
9799 break;
Jens Axboe66f4af92020-01-16 15:36:52 -07009800 case IORING_REGISTER_PROBE:
9801 ret = -EINVAL;
9802 if (!arg || nr_args > 256)
9803 break;
9804 ret = io_probe(ctx, arg, nr_args);
9805 break;
Jens Axboe071698e2020-01-28 10:04:42 -07009806 case IORING_REGISTER_PERSONALITY:
9807 ret = -EINVAL;
9808 if (arg || nr_args)
9809 break;
9810 ret = io_register_personality(ctx);
9811 break;
9812 case IORING_UNREGISTER_PERSONALITY:
9813 ret = -EINVAL;
9814 if (arg)
9815 break;
9816 ret = io_unregister_personality(ctx, nr_args);
9817 break;
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02009818 case IORING_REGISTER_ENABLE_RINGS:
9819 ret = -EINVAL;
9820 if (arg || nr_args)
9821 break;
9822 ret = io_register_enable_rings(ctx);
9823 break;
Stefano Garzarella21b55db2020-08-27 16:58:30 +02009824 case IORING_REGISTER_RESTRICTIONS:
9825 ret = io_register_restrictions(ctx, arg, nr_args);
9826 break;
Jens Axboeedafcce2019-01-09 09:16:05 -07009827 default:
9828 ret = -EINVAL;
9829 break;
9830 }
9831
Stefano Garzarella21b55db2020-08-27 16:58:30 +02009832out:
Jens Axboe071698e2020-01-28 10:04:42 -07009833 if (io_register_op_must_quiesce(opcode)) {
Jens Axboe05f3fb32019-12-09 11:22:50 -07009834 /* bring the ctx back to life */
Jens Axboe05f3fb32019-12-09 11:22:50 -07009835 percpu_ref_reinit(&ctx->refs);
Stefano Garzarella21b55db2020-08-27 16:58:30 +02009836out_quiesce:
Jens Axboe0f158b42020-05-14 17:18:39 -06009837 reinit_completion(&ctx->ref_comp);
Jens Axboe05f3fb32019-12-09 11:22:50 -07009838 }
Jens Axboeedafcce2019-01-09 09:16:05 -07009839 return ret;
9840}
9841
9842SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode,
9843 void __user *, arg, unsigned int, nr_args)
9844{
9845 struct io_ring_ctx *ctx;
9846 long ret = -EBADF;
9847 struct fd f;
9848
9849 f = fdget(fd);
9850 if (!f.file)
9851 return -EBADF;
9852
9853 ret = -EOPNOTSUPP;
9854 if (f.file->f_op != &io_uring_fops)
9855 goto out_fput;
9856
9857 ctx = f.file->private_data;
9858
Pavel Begunkovb6c23dd2021-02-20 15:17:18 +00009859 io_run_task_work();
9860
Jens Axboeedafcce2019-01-09 09:16:05 -07009861 mutex_lock(&ctx->uring_lock);
9862 ret = __io_uring_register(ctx, opcode, arg, nr_args);
9863 mutex_unlock(&ctx->uring_lock);
Dmitrii Dolgovc826bd72019-10-15 19:02:01 +02009864 trace_io_uring_register(ctx, opcode, ctx->nr_user_files, ctx->nr_user_bufs,
9865 ctx->cq_ev_fd != NULL, ret);
Jens Axboeedafcce2019-01-09 09:16:05 -07009866out_fput:
9867 fdput(f);
9868 return ret;
9869}
9870
Jens Axboe2b188cc2019-01-07 10:46:33 -07009871static int __init io_uring_init(void)
9872{
Stefan Metzmacherd7f62e82020-01-29 14:39:41 +01009873#define __BUILD_BUG_VERIFY_ELEMENT(stype, eoffset, etype, ename) do { \
9874 BUILD_BUG_ON(offsetof(stype, ename) != eoffset); \
9875 BUILD_BUG_ON(sizeof(etype) != sizeof_field(stype, ename)); \
9876} while (0)
9877
9878#define BUILD_BUG_SQE_ELEM(eoffset, etype, ename) \
9879 __BUILD_BUG_VERIFY_ELEMENT(struct io_uring_sqe, eoffset, etype, ename)
9880 BUILD_BUG_ON(sizeof(struct io_uring_sqe) != 64);
9881 BUILD_BUG_SQE_ELEM(0, __u8, opcode);
9882 BUILD_BUG_SQE_ELEM(1, __u8, flags);
9883 BUILD_BUG_SQE_ELEM(2, __u16, ioprio);
9884 BUILD_BUG_SQE_ELEM(4, __s32, fd);
9885 BUILD_BUG_SQE_ELEM(8, __u64, off);
9886 BUILD_BUG_SQE_ELEM(8, __u64, addr2);
9887 BUILD_BUG_SQE_ELEM(16, __u64, addr);
Pavel Begunkov7d67af22020-02-24 11:32:45 +03009888 BUILD_BUG_SQE_ELEM(16, __u64, splice_off_in);
Stefan Metzmacherd7f62e82020-01-29 14:39:41 +01009889 BUILD_BUG_SQE_ELEM(24, __u32, len);
9890 BUILD_BUG_SQE_ELEM(28, __kernel_rwf_t, rw_flags);
9891 BUILD_BUG_SQE_ELEM(28, /* compat */ int, rw_flags);
9892 BUILD_BUG_SQE_ELEM(28, /* compat */ __u32, rw_flags);
9893 BUILD_BUG_SQE_ELEM(28, __u32, fsync_flags);
Jiufei Xue5769a352020-06-17 17:53:55 +08009894 BUILD_BUG_SQE_ELEM(28, /* compat */ __u16, poll_events);
9895 BUILD_BUG_SQE_ELEM(28, __u32, poll32_events);
Stefan Metzmacherd7f62e82020-01-29 14:39:41 +01009896 BUILD_BUG_SQE_ELEM(28, __u32, sync_range_flags);
9897 BUILD_BUG_SQE_ELEM(28, __u32, msg_flags);
9898 BUILD_BUG_SQE_ELEM(28, __u32, timeout_flags);
9899 BUILD_BUG_SQE_ELEM(28, __u32, accept_flags);
9900 BUILD_BUG_SQE_ELEM(28, __u32, cancel_flags);
9901 BUILD_BUG_SQE_ELEM(28, __u32, open_flags);
9902 BUILD_BUG_SQE_ELEM(28, __u32, statx_flags);
9903 BUILD_BUG_SQE_ELEM(28, __u32, fadvise_advice);
Pavel Begunkov7d67af22020-02-24 11:32:45 +03009904 BUILD_BUG_SQE_ELEM(28, __u32, splice_flags);
Stefan Metzmacherd7f62e82020-01-29 14:39:41 +01009905 BUILD_BUG_SQE_ELEM(32, __u64, user_data);
9906 BUILD_BUG_SQE_ELEM(40, __u16, buf_index);
9907 BUILD_BUG_SQE_ELEM(42, __u16, personality);
Pavel Begunkov7d67af22020-02-24 11:32:45 +03009908 BUILD_BUG_SQE_ELEM(44, __s32, splice_fd_in);
Stefan Metzmacherd7f62e82020-01-29 14:39:41 +01009909
Jens Axboed3656342019-12-18 09:50:26 -07009910 BUILD_BUG_ON(ARRAY_SIZE(io_op_defs) != IORING_OP_LAST);
Jens Axboe84557872020-03-03 15:28:17 -07009911 BUILD_BUG_ON(__REQ_F_LAST_BIT >= 8 * sizeof(int));
Jens Axboe91f245d2021-02-09 13:48:50 -07009912 req_cachep = KMEM_CACHE(io_kiocb, SLAB_HWCACHE_ALIGN | SLAB_PANIC |
9913 SLAB_ACCOUNT);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009914 return 0;
9915};
9916__initcall(io_uring_init);