blob: ecac362913cc9bd40c0c0eb945c10a05d3ec9c04 [file] [log] [blame]
Jens Axboe2b188cc2019-01-07 10:46:33 -07001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Shared application/kernel submission and completion ring pairs, for
4 * supporting fast/efficient IO.
5 *
6 * A note on the read/write ordering memory barriers that are matched between
Stefan Bühler1e84b972019-04-24 23:54:16 +02007 * the application and kernel side.
8 *
9 * After the application reads the CQ ring tail, it must use an
10 * appropriate smp_rmb() to pair with the smp_wmb() the kernel uses
11 * before writing the tail (using smp_load_acquire to read the tail will
12 * do). It also needs a smp_mb() before updating CQ head (ordering the
13 * entry load(s) with the head store), pairing with an implicit barrier
Pavel Begunkovd068b502021-05-16 22:58:11 +010014 * through a control-dependency in io_get_cqe (smp_store_release to
Stefan Bühler1e84b972019-04-24 23:54:16 +020015 * store head will do). Failure to do so could lead to reading invalid
16 * CQ entries.
17 *
18 * Likewise, the application must use an appropriate smp_wmb() before
19 * writing the SQ tail (ordering SQ entry stores with the tail store),
20 * which pairs with smp_load_acquire in io_get_sqring (smp_store_release
21 * to store the tail will do). And it needs a barrier ordering the SQ
22 * head load before writing new SQ entries (smp_load_acquire to read
23 * head will do).
24 *
25 * When using the SQ poll thread (IORING_SETUP_SQPOLL), the application
26 * needs to check the SQ flags for IORING_SQ_NEED_WAKEUP *after*
27 * updating the SQ tail; a full memory barrier smp_mb() is needed
28 * between.
Jens Axboe2b188cc2019-01-07 10:46:33 -070029 *
30 * Also see the examples in the liburing library:
31 *
32 * git://git.kernel.dk/liburing
33 *
34 * io_uring also uses READ/WRITE_ONCE() for _any_ store or load that happens
35 * from data shared between the kernel and application. This is done both
36 * for ordering purposes, but also to ensure that once a value is loaded from
37 * data that the application could potentially modify, it remains stable.
38 *
39 * Copyright (C) 2018-2019 Jens Axboe
Christoph Hellwigc992fe22019-01-11 09:43:02 -070040 * Copyright (c) 2018-2019 Christoph Hellwig
Jens Axboe2b188cc2019-01-07 10:46:33 -070041 */
42#include <linux/kernel.h>
43#include <linux/init.h>
44#include <linux/errno.h>
45#include <linux/syscalls.h>
46#include <linux/compat.h>
Jens Axboe52de1fe2020-02-27 10:15:42 -070047#include <net/compat.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070048#include <linux/refcount.h>
49#include <linux/uio.h>
Pavel Begunkov6b47ee62020-01-18 20:22:41 +030050#include <linux/bits.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070051
52#include <linux/sched/signal.h>
53#include <linux/fs.h>
54#include <linux/file.h>
55#include <linux/fdtable.h>
56#include <linux/mm.h>
57#include <linux/mman.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070058#include <linux/percpu.h>
59#include <linux/slab.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070060#include <linux/blkdev.h>
Jens Axboeedafcce2019-01-09 09:16:05 -070061#include <linux/bvec.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070062#include <linux/net.h>
63#include <net/sock.h>
64#include <net/af_unix.h>
Jens Axboe6b063142019-01-10 22:13:58 -070065#include <net/scm.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070066#include <linux/anon_inodes.h>
67#include <linux/sched/mm.h>
68#include <linux/uaccess.h>
69#include <linux/nospec.h>
Jens Axboeedafcce2019-01-09 09:16:05 -070070#include <linux/sizes.h>
71#include <linux/hugetlb.h>
Jens Axboeaa4c3962019-11-29 10:14:00 -070072#include <linux/highmem.h>
Jens Axboe15b71ab2019-12-11 11:20:36 -070073#include <linux/namei.h>
74#include <linux/fsnotify.h>
Jens Axboe4840e412019-12-25 22:03:45 -070075#include <linux/fadvise.h>
Jens Axboe3e4827b2020-01-08 15:18:09 -070076#include <linux/eventpoll.h>
Pavel Begunkov7d67af22020-02-24 11:32:45 +030077#include <linux/splice.h>
Jens Axboeb41e9852020-02-17 09:52:41 -070078#include <linux/task_work.h>
Jens Axboebcf5a062020-05-22 09:24:42 -060079#include <linux/pagemap.h>
Jens Axboe0f212202020-09-13 13:09:39 -060080#include <linux/io_uring.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070081
Dmitrii Dolgovc826bd72019-10-15 19:02:01 +020082#define CREATE_TRACE_POINTS
83#include <trace/events/io_uring.h>
84
Jens Axboe2b188cc2019-01-07 10:46:33 -070085#include <uapi/linux/io_uring.h>
86
87#include "internal.h"
Jens Axboe561fb042019-10-24 07:25:42 -060088#include "io-wq.h"
Jens Axboe2b188cc2019-01-07 10:46:33 -070089
Daniel Xu5277dea2019-09-14 14:23:45 -070090#define IORING_MAX_ENTRIES 32768
Jens Axboe33a107f2019-10-04 12:10:03 -060091#define IORING_MAX_CQ_ENTRIES (2 * IORING_MAX_ENTRIES)
Jens Axboe65e19f52019-10-26 07:20:21 -060092
93/*
94 * Shift of 9 is 512 entries, or exactly one page on 64-bit archs
95 */
96#define IORING_FILE_TABLE_SHIFT 9
97#define IORING_MAX_FILES_TABLE (1U << IORING_FILE_TABLE_SHIFT)
98#define IORING_FILE_TABLE_MASK (IORING_MAX_FILES_TABLE - 1)
99#define IORING_MAX_FIXED_FILES (64 * IORING_MAX_FILES_TABLE)
Stefano Garzarella21b55db2020-08-27 16:58:30 +0200100#define IORING_MAX_RESTRICTIONS (IORING_RESTRICTION_LAST + \
101 IORING_REGISTER_LAST + IORING_OP_LAST)
Jens Axboe2b188cc2019-01-07 10:46:33 -0700102
Pavel Begunkov2d091d62021-06-14 02:36:21 +0100103#define IO_RSRC_TAG_TABLE_SHIFT 9
104#define IO_RSRC_TAG_TABLE_MAX (1U << IO_RSRC_TAG_TABLE_SHIFT)
105#define IO_RSRC_TAG_TABLE_MASK (IO_RSRC_TAG_TABLE_MAX - 1)
106
Pavel Begunkov489809e2021-05-14 12:06:44 +0100107#define IORING_MAX_REG_BUFFERS (1U << 14)
108
Pavel Begunkovb16fed662021-02-18 18:29:40 +0000109#define SQE_VALID_FLAGS (IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK| \
110 IOSQE_IO_HARDLINK | IOSQE_ASYNC | \
111 IOSQE_BUFFER_SELECT)
112
Pavel Begunkov09899b12021-06-14 02:36:22 +0100113#define IO_TCTX_REFS_CACHE_NR (1U << 10)
114
Jens Axboe2b188cc2019-01-07 10:46:33 -0700115struct io_uring {
116 u32 head ____cacheline_aligned_in_smp;
117 u32 tail ____cacheline_aligned_in_smp;
118};
119
Stefan Bühler1e84b972019-04-24 23:54:16 +0200120/*
Hristo Venev75b28af2019-08-26 17:23:46 +0000121 * This data is shared with the application through the mmap at offsets
122 * IORING_OFF_SQ_RING and IORING_OFF_CQ_RING.
Stefan Bühler1e84b972019-04-24 23:54:16 +0200123 *
124 * The offsets to the member fields are published through struct
125 * io_sqring_offsets when calling io_uring_setup.
126 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000127struct io_rings {
Stefan Bühler1e84b972019-04-24 23:54:16 +0200128 /*
129 * Head and tail offsets into the ring; the offsets need to be
130 * masked to get valid indices.
131 *
Hristo Venev75b28af2019-08-26 17:23:46 +0000132 * The kernel controls head of the sq ring and the tail of the cq ring,
133 * and the application controls tail of the sq ring and the head of the
134 * cq ring.
Stefan Bühler1e84b972019-04-24 23:54:16 +0200135 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000136 struct io_uring sq, cq;
Stefan Bühler1e84b972019-04-24 23:54:16 +0200137 /*
Hristo Venev75b28af2019-08-26 17:23:46 +0000138 * Bitmasks to apply to head and tail offsets (constant, equals
Stefan Bühler1e84b972019-04-24 23:54:16 +0200139 * ring_entries - 1)
140 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000141 u32 sq_ring_mask, cq_ring_mask;
142 /* Ring sizes (constant, power of 2) */
143 u32 sq_ring_entries, cq_ring_entries;
Stefan Bühler1e84b972019-04-24 23:54:16 +0200144 /*
145 * Number of invalid entries dropped by the kernel due to
146 * invalid index stored in array
147 *
148 * Written by the kernel, shouldn't be modified by the
149 * application (i.e. get number of "new events" by comparing to
150 * cached value).
151 *
152 * After a new SQ head value was read by the application this
153 * counter includes all submissions that were dropped reaching
154 * the new SQ head (and possibly more).
155 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000156 u32 sq_dropped;
Stefan Bühler1e84b972019-04-24 23:54:16 +0200157 /*
Stefano Garzarella0d9b5b32020-05-15 18:38:04 +0200158 * Runtime SQ flags
Stefan Bühler1e84b972019-04-24 23:54:16 +0200159 *
160 * Written by the kernel, shouldn't be modified by the
161 * application.
162 *
163 * The application needs a full memory barrier before checking
164 * for IORING_SQ_NEED_WAKEUP after updating the sq tail.
165 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000166 u32 sq_flags;
Stefan Bühler1e84b972019-04-24 23:54:16 +0200167 /*
Stefano Garzarella0d9b5b32020-05-15 18:38:04 +0200168 * Runtime CQ flags
169 *
170 * Written by the application, shouldn't be modified by the
171 * kernel.
172 */
173 u32 cq_flags;
174 /*
Stefan Bühler1e84b972019-04-24 23:54:16 +0200175 * Number of completion events lost because the queue was full;
176 * this should be avoided by the application by making sure
LimingWu0b4295b2019-12-05 20:18:18 +0800177 * there are not more requests pending than there is space in
Stefan Bühler1e84b972019-04-24 23:54:16 +0200178 * the completion queue.
179 *
180 * Written by the kernel, shouldn't be modified by the
181 * application (i.e. get number of "new events" by comparing to
182 * cached value).
183 *
184 * As completion events come in out of order this counter is not
185 * ordered with any other data.
186 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000187 u32 cq_overflow;
Stefan Bühler1e84b972019-04-24 23:54:16 +0200188 /*
189 * Ring buffer of completion events.
190 *
191 * The kernel writes completion events fresh every time they are
192 * produced, so the application is allowed to modify pending
193 * entries.
194 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000195 struct io_uring_cqe cqes[] ____cacheline_aligned_in_smp;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700196};
197
Pavel Begunkov45d189c2021-02-10 00:03:07 +0000198enum io_uring_cmd_flags {
199 IO_URING_F_NONBLOCK = 1,
Pavel Begunkov889fca72021-02-10 00:03:09 +0000200 IO_URING_F_COMPLETE_DEFER = 2,
Pavel Begunkov45d189c2021-02-10 00:03:07 +0000201};
202
Jens Axboeedafcce2019-01-09 09:16:05 -0700203struct io_mapped_ubuf {
204 u64 ubuf;
Pavel Begunkov4751f532021-04-01 15:43:55 +0100205 u64 ubuf_end;
Jens Axboeedafcce2019-01-09 09:16:05 -0700206 unsigned int nr_bvecs;
Jens Axboede293932020-09-17 16:19:16 -0600207 unsigned long acct_pages;
Pavel Begunkov41edf1a2021-04-25 14:32:23 +0100208 struct bio_vec bvec[];
Jens Axboeedafcce2019-01-09 09:16:05 -0700209};
210
Bijan Mottahedeh50238532021-01-15 17:37:45 +0000211struct io_ring_ctx;
212
Pavel Begunkov6c2450a2021-02-23 12:40:22 +0000213struct io_overflow_cqe {
214 struct io_uring_cqe cqe;
215 struct list_head list;
216};
217
Pavel Begunkova04b0ac2021-04-01 15:44:04 +0100218struct io_fixed_file {
219 /* file * with additional FFS_* flags */
220 unsigned long file_ptr;
221};
222
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +0000223struct io_rsrc_put {
224 struct list_head list;
Pavel Begunkovb60c8dc2021-04-25 14:32:18 +0100225 u64 tag;
Bijan Mottahedeh50238532021-01-15 17:37:45 +0000226 union {
227 void *rsrc;
228 struct file *file;
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +0100229 struct io_mapped_ubuf *buf;
Bijan Mottahedeh50238532021-01-15 17:37:45 +0000230 };
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +0000231};
232
Pavel Begunkovaeca2412021-04-11 01:46:37 +0100233struct io_file_table {
234 /* two level table */
235 struct io_fixed_file **files;
Jens Axboe31b51512019-01-18 22:56:34 -0700236};
237
Pavel Begunkovb895c9a2021-04-01 15:43:40 +0100238struct io_rsrc_node {
Xiaoguang Wang05589552020-03-31 14:05:18 +0800239 struct percpu_ref refs;
240 struct list_head node;
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +0000241 struct list_head rsrc_list;
Pavel Begunkovb895c9a2021-04-01 15:43:40 +0100242 struct io_rsrc_data *rsrc_data;
Jens Axboe4a38aed22020-05-14 17:21:15 -0600243 struct llist_node llist;
Pavel Begunkove2978222020-11-18 14:56:26 +0000244 bool done;
Xiaoguang Wang05589552020-03-31 14:05:18 +0800245};
246
Pavel Begunkov40ae0ff2021-04-01 15:43:44 +0100247typedef void (rsrc_put_fn)(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc);
248
Pavel Begunkovb895c9a2021-04-01 15:43:40 +0100249struct io_rsrc_data {
Jens Axboe05f3fb32019-12-09 11:22:50 -0700250 struct io_ring_ctx *ctx;
251
Pavel Begunkov2d091d62021-06-14 02:36:21 +0100252 u64 **tags;
253 unsigned int nr;
Pavel Begunkov40ae0ff2021-04-01 15:43:44 +0100254 rsrc_put_fn *do_put;
Pavel Begunkov3e942492021-04-11 01:46:34 +0100255 atomic_t refs;
Jens Axboe05f3fb32019-12-09 11:22:50 -0700256 struct completion done;
Hao Xu8bad28d2021-02-19 17:19:36 +0800257 bool quiesce;
Jens Axboe05f3fb32019-12-09 11:22:50 -0700258};
259
Jens Axboe5a2e7452020-02-23 16:23:11 -0700260struct io_buffer {
261 struct list_head list;
262 __u64 addr;
Thadeu Lima de Souza Cascardod1f82802021-05-05 09:47:06 -0300263 __u32 len;
Jens Axboe5a2e7452020-02-23 16:23:11 -0700264 __u16 bid;
265};
266
Stefano Garzarella21b55db2020-08-27 16:58:30 +0200267struct io_restriction {
268 DECLARE_BITMAP(register_op, IORING_REGISTER_LAST);
269 DECLARE_BITMAP(sqe_op, IORING_OP_LAST);
270 u8 sqe_flags_allowed;
271 u8 sqe_flags_required;
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +0200272 bool registered;
Stefano Garzarella21b55db2020-08-27 16:58:30 +0200273};
274
Jens Axboe37d1e2e2021-02-17 21:03:43 -0700275enum {
276 IO_SQ_THREAD_SHOULD_STOP = 0,
277 IO_SQ_THREAD_SHOULD_PARK,
278};
279
Jens Axboe534ca6d2020-09-02 13:52:19 -0600280struct io_sq_data {
281 refcount_t refs;
Pavel Begunkov9e138a42021-03-14 20:57:12 +0000282 atomic_t park_pending;
Pavel Begunkov09a6f4e2021-03-14 20:57:10 +0000283 struct mutex lock;
Jens Axboe69fb2132020-09-14 11:16:23 -0600284
285 /* ctx's that are using this sqd */
286 struct list_head ctx_list;
Jens Axboe69fb2132020-09-14 11:16:23 -0600287
Jens Axboe534ca6d2020-09-02 13:52:19 -0600288 struct task_struct *thread;
289 struct wait_queue_head wait;
Xiaoguang Wang08369242020-11-03 14:15:59 +0800290
291 unsigned sq_thread_idle;
Jens Axboe37d1e2e2021-02-17 21:03:43 -0700292 int sq_cpu;
293 pid_t task_pid;
Jens Axboe5c2469e2021-03-11 10:17:56 -0700294 pid_t task_tgid;
Jens Axboe37d1e2e2021-02-17 21:03:43 -0700295
296 unsigned long state;
Jens Axboe37d1e2e2021-02-17 21:03:43 -0700297 struct completion exited;
Jens Axboe534ca6d2020-09-02 13:52:19 -0600298};
299
Pavel Begunkov258b29a2021-02-10 00:03:10 +0000300#define IO_IOPOLL_BATCH 8
Pavel Begunkov6dd0be12021-02-10 00:03:13 +0000301#define IO_COMPL_BATCH 32
Pavel Begunkov6ff119a2021-02-10 00:03:18 +0000302#define IO_REQ_CACHE_SIZE 32
Pavel Begunkovbf019da2021-02-10 00:03:17 +0000303#define IO_REQ_ALLOC_BATCH 8
Pavel Begunkov258b29a2021-02-10 00:03:10 +0000304
305struct io_comp_state {
Pavel Begunkov6dd0be12021-02-10 00:03:13 +0000306 struct io_kiocb *reqs[IO_COMPL_BATCH];
Jens Axboe1b4c3512021-02-10 00:03:19 +0000307 unsigned int nr;
Jens Axboec7dae4b2021-02-09 19:53:37 -0700308 /* inline/task_work completion list, under ->uring_lock */
Jens Axboe1b4c3512021-02-10 00:03:19 +0000309 struct list_head free_list;
Pavel Begunkov258b29a2021-02-10 00:03:10 +0000310};
311
Pavel Begunkova1ab7b32021-02-18 18:29:42 +0000312struct io_submit_link {
313 struct io_kiocb *head;
314 struct io_kiocb *last;
315};
316
Pavel Begunkov258b29a2021-02-10 00:03:10 +0000317struct io_submit_state {
318 struct blk_plug plug;
Pavel Begunkova1ab7b32021-02-18 18:29:42 +0000319 struct io_submit_link link;
Pavel Begunkov258b29a2021-02-10 00:03:10 +0000320
321 /*
322 * io_kiocb alloc cache
323 */
Pavel Begunkovbf019da2021-02-10 00:03:17 +0000324 void *reqs[IO_REQ_CACHE_SIZE];
Pavel Begunkov258b29a2021-02-10 00:03:10 +0000325 unsigned int free_reqs;
326
327 bool plug_started;
328
329 /*
330 * Batch completion logic
331 */
332 struct io_comp_state comp;
333
334 /*
335 * File reference cache
336 */
337 struct file *file;
338 unsigned int fd;
339 unsigned int file_refs;
340 unsigned int ios_left;
341};
342
Jens Axboe2b188cc2019-01-07 10:46:33 -0700343struct io_ring_ctx {
Pavel Begunkovb52ecf82021-06-14 23:37:21 +0100344 /* const or read-mostly hot data */
Jens Axboe2b188cc2019-01-07 10:46:33 -0700345 struct {
346 struct percpu_ref refs;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700347
Pavel Begunkovb52ecf82021-06-14 23:37:21 +0100348 struct io_rings *rings;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700349 unsigned int flags;
Randy Dunlape1d85332020-02-05 20:57:10 -0800350 unsigned int compat: 1;
Randy Dunlape1d85332020-02-05 20:57:10 -0800351 unsigned int drain_next: 1;
352 unsigned int eventfd_async: 1;
Stefano Garzarella21b55db2020-08-27 16:58:30 +0200353 unsigned int restricted: 1;
Pavel Begunkovb52ecf82021-06-14 23:37:21 +0100354 } ____cacheline_aligned_in_smp;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700355
Pavel Begunkov7f1129d2021-06-14 23:37:22 +0100356 /* submission data */
Pavel Begunkovb52ecf82021-06-14 23:37:21 +0100357 struct {
Hristo Venev75b28af2019-08-26 17:23:46 +0000358 /*
359 * Ring buffer of indices into array of io_uring_sqe, which is
360 * mmapped by the application using the IORING_OFF_SQES offset.
361 *
362 * This indirection could e.g. be used to assign fixed
363 * io_uring_sqe entries to operations and only submit them to
364 * the queue when needed.
365 *
366 * The kernel modifies neither the indices array nor the entries
367 * array.
368 */
369 u32 *sq_array;
Pavel Begunkovc7af47c2021-06-14 23:37:20 +0100370 struct io_uring_sqe *sq_sqes;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700371 unsigned cached_sq_head;
372 unsigned sq_entries;
Jens Axboead3eb2c2019-12-18 17:12:20 -0700373 unsigned long sq_check_overflow;
Jens Axboede0617e2019-04-06 21:51:27 -0600374 struct list_head defer_list;
Pavel Begunkov7f1129d2021-06-14 23:37:22 +0100375
376 /*
377 * Fixed resources fast path, should be accessed only under
378 * uring_lock, and updated through io_uring_register(2)
379 */
380 struct io_rsrc_node *rsrc_node;
381 struct io_file_table file_table;
382 unsigned nr_user_files;
383 unsigned nr_user_bufs;
384 struct io_mapped_ubuf **user_bufs;
385
386 struct io_submit_state submit_state;
Jens Axboe5262f562019-09-17 12:26:57 -0600387 struct list_head timeout_list;
Jens Axboe1d7bb1d2019-11-06 11:31:17 -0700388 struct list_head cq_overflow_list;
Pavel Begunkov7f1129d2021-06-14 23:37:22 +0100389 struct xarray io_buffers;
390 struct xarray personalities;
391 u32 pers_next;
392 unsigned sq_thread_idle;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700393 } ____cacheline_aligned_in_smp;
394
Jens Axboe3c1a2ea2021-02-11 10:48:03 -0700395 struct {
396 struct mutex uring_lock;
397 wait_queue_head_t wait;
398 } ____cacheline_aligned_in_smp;
399
Pavel Begunkovd0acdee2021-05-16 22:58:12 +0100400 /* IRQ completion list, under ->completion_lock */
401 struct list_head locked_free_list;
402 unsigned int locked_free_nr;
Jens Axboe3c1a2ea2021-02-11 10:48:03 -0700403
Stefan Metzmacher7c30f36a2021-03-07 11:54:28 +0100404 const struct cred *sq_creds; /* cred used for __io_sq_thread() */
Jens Axboe534ca6d2020-09-02 13:52:19 -0600405 struct io_sq_data *sq_data; /* if using sq thread polling */
406
Jens Axboe90554202020-09-03 12:12:41 -0600407 struct wait_queue_head sqo_sq_wait;
Jens Axboe69fb2132020-09-14 11:16:23 -0600408 struct list_head sqd_list;
Hristo Venev75b28af2019-08-26 17:23:46 +0000409
Jens Axboe206aefd2019-11-07 18:27:42 -0700410 struct {
411 unsigned cached_cq_tail;
412 unsigned cq_entries;
Jens Axboe206aefd2019-11-07 18:27:42 -0700413 atomic_t cq_timeouts;
Marcelo Diop-Gonzalezf0105052021-01-15 11:54:40 -0500414 unsigned cq_last_tm_flush;
Hao Xu7b289c32021-04-13 15:20:39 +0800415 unsigned cq_extra;
Jens Axboead3eb2c2019-12-18 17:12:20 -0700416 unsigned long cq_check_overflow;
Jens Axboe206aefd2019-11-07 18:27:42 -0700417 struct wait_queue_head cq_wait;
418 struct fasync_struct *cq_fasync;
419 struct eventfd_ctx *cq_ev_fd;
420 } ____cacheline_aligned_in_smp;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700421
422 struct {
Jens Axboe2b188cc2019-01-07 10:46:33 -0700423 spinlock_t completion_lock;
Jens Axboee94f1412019-12-19 12:06:02 -0700424
Jens Axboedef596e2019-01-09 08:59:42 -0700425 /*
Pavel Begunkov540e32a2020-07-13 23:37:09 +0300426 * ->iopoll_list is protected by the ctx->uring_lock for
Jens Axboedef596e2019-01-09 08:59:42 -0700427 * io_uring instances that don't use IORING_SETUP_SQPOLL.
428 * For SQPOLL, only the single threaded io_sq_thread() will
429 * manipulate the list, hence no extra locking is needed there.
430 */
Pavel Begunkov540e32a2020-07-13 23:37:09 +0300431 struct list_head iopoll_list;
Jens Axboe78076bb2019-12-04 19:56:40 -0700432 struct hlist_head *cancel_hash;
433 unsigned cancel_hash_bits;
Jens Axboee94f1412019-12-19 12:06:02 -0700434 bool poll_multi_file;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700435 } ____cacheline_aligned_in_smp;
Jens Axboe85faa7b2020-04-09 18:14:00 -0600436
Stefano Garzarella21b55db2020-08-27 16:58:30 +0200437 struct io_restriction restrictions;
Jens Axboe3c1a2ea2021-02-11 10:48:03 -0700438
Pavel Begunkovb13a8912021-05-16 22:58:07 +0100439 /* slow path rsrc auxilary data, used by update/register */
440 struct {
441 struct io_rsrc_node *rsrc_backup_node;
442 struct io_mapped_ubuf *dummy_ubuf;
443 struct io_rsrc_data *file_data;
444 struct io_rsrc_data *buf_data;
445
446 struct delayed_work rsrc_put_work;
447 struct llist_head rsrc_put_llist;
448 struct list_head rsrc_ref_list;
449 spinlock_t rsrc_ref_lock;
450 };
451
Jens Axboe3c1a2ea2021-02-11 10:48:03 -0700452 /* Keep this last, we don't need it for the fast path */
Pavel Begunkovb986af72021-05-16 22:58:06 +0100453 struct {
454 #if defined(CONFIG_UNIX)
455 struct socket *ring_sock;
456 #endif
457 /* hashed buffered write serialization */
458 struct io_wq_hash *hash_map;
459
460 /* Only used for accounting purposes */
461 struct user_struct *user;
462 struct mm_struct *mm_account;
463
464 /* ctx exit and cancelation */
465 struct callback_head *exit_task_work;
466 struct work_struct exit_work;
467 struct list_head tctx_list;
468 struct completion ref_comp;
469 };
Jens Axboe2b188cc2019-01-07 10:46:33 -0700470};
471
Stefan Metzmacher53e043b2021-03-15 12:56:56 +0100472struct io_uring_task {
473 /* submission side */
Pavel Begunkov09899b12021-06-14 02:36:22 +0100474 int cached_refs;
Stefan Metzmacher53e043b2021-03-15 12:56:56 +0100475 struct xarray xa;
476 struct wait_queue_head wait;
Stefan Metzmacheree53fb22021-03-15 12:56:57 +0100477 const struct io_ring_ctx *last;
478 struct io_wq *io_wq;
Stefan Metzmacher53e043b2021-03-15 12:56:56 +0100479 struct percpu_counter inflight;
Pavel Begunkovb303fe22021-04-11 01:46:26 +0100480 atomic_t inflight_tracked;
Stefan Metzmacher53e043b2021-03-15 12:56:56 +0100481 atomic_t in_idle;
Stefan Metzmacher53e043b2021-03-15 12:56:56 +0100482
483 spinlock_t task_lock;
484 struct io_wq_work_list task_list;
485 unsigned long task_state;
486 struct callback_head task_work;
487};
488
Jens Axboe09bb8392019-03-13 12:39:28 -0600489/*
490 * First field must be the file pointer in all the
491 * iocb unions! See also 'struct kiocb' in <linux/fs.h>
492 */
Jens Axboe221c5eb2019-01-17 09:41:58 -0700493struct io_poll_iocb {
494 struct file *file;
Pavel Begunkov018043b2020-10-27 23:17:18 +0000495 struct wait_queue_head *head;
Jens Axboe221c5eb2019-01-17 09:41:58 -0700496 __poll_t events;
Jens Axboe8c838782019-03-12 15:48:16 -0600497 bool done;
Jens Axboe221c5eb2019-01-17 09:41:58 -0700498 bool canceled;
Jens Axboe392edb42019-12-09 17:52:20 -0700499 struct wait_queue_entry wait;
Jens Axboe221c5eb2019-01-17 09:41:58 -0700500};
501
Pavel Begunkov9d805892021-04-13 02:58:40 +0100502struct io_poll_update {
Pavel Begunkov018043b2020-10-27 23:17:18 +0000503 struct file *file;
Pavel Begunkov9d805892021-04-13 02:58:40 +0100504 u64 old_user_data;
505 u64 new_user_data;
506 __poll_t events;
Jens Axboeb69de282021-03-17 08:37:41 -0600507 bool update_events;
508 bool update_user_data;
Pavel Begunkov018043b2020-10-27 23:17:18 +0000509};
510
Jens Axboeb5dba592019-12-11 14:02:38 -0700511struct io_close {
512 struct file *file;
Jens Axboeb5dba592019-12-11 14:02:38 -0700513 int fd;
514};
515
Jens Axboead8a48a2019-11-15 08:49:11 -0700516struct io_timeout_data {
517 struct io_kiocb *req;
518 struct hrtimer timer;
519 struct timespec64 ts;
520 enum hrtimer_mode mode;
521};
522
Jens Axboe8ed8d3c2019-12-16 11:55:28 -0700523struct io_accept {
524 struct file *file;
525 struct sockaddr __user *addr;
526 int __user *addr_len;
527 int flags;
Jens Axboe09952e32020-03-19 20:16:56 -0600528 unsigned long nofile;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -0700529};
530
531struct io_sync {
532 struct file *file;
533 loff_t len;
534 loff_t off;
535 int flags;
Jens Axboed63d1b52019-12-10 10:38:56 -0700536 int mode;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -0700537};
538
Jens Axboefbf23842019-12-17 18:45:56 -0700539struct io_cancel {
540 struct file *file;
541 u64 addr;
542};
543
Jens Axboeb29472e2019-12-17 18:50:29 -0700544struct io_timeout {
545 struct file *file;
Pavel Begunkovbfe68a22020-05-30 14:54:18 +0300546 u32 off;
547 u32 target_seq;
Pavel Begunkov135fcde2020-07-13 23:37:12 +0300548 struct list_head list;
Pavel Begunkov90cd7e42020-10-27 23:25:36 +0000549 /* head of the link, used by linked timeouts only */
550 struct io_kiocb *head;
Jens Axboeb29472e2019-12-17 18:50:29 -0700551};
552
Pavel Begunkov0bdf7a22020-10-10 18:34:10 +0100553struct io_timeout_rem {
554 struct file *file;
555 u64 addr;
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +0000556
557 /* timeout update */
558 struct timespec64 ts;
559 u32 flags;
Pavel Begunkov0bdf7a22020-10-10 18:34:10 +0100560};
561
Jens Axboe9adbd452019-12-20 08:45:55 -0700562struct io_rw {
563 /* NOTE: kiocb has the file as the first member, so don't do it here */
564 struct kiocb kiocb;
565 u64 addr;
566 u64 len;
567};
568
Jens Axboe3fbb51c2019-12-20 08:51:52 -0700569struct io_connect {
570 struct file *file;
571 struct sockaddr __user *addr;
572 int addr_len;
573};
574
Jens Axboee47293f2019-12-20 08:58:21 -0700575struct io_sr_msg {
576 struct file *file;
Jens Axboefddafac2020-01-04 20:19:44 -0700577 union {
Pavel Begunkov4af34172021-04-11 01:46:30 +0100578 struct compat_msghdr __user *umsg_compat;
579 struct user_msghdr __user *umsg;
580 void __user *buf;
Jens Axboefddafac2020-01-04 20:19:44 -0700581 };
Jens Axboee47293f2019-12-20 08:58:21 -0700582 int msg_flags;
Jens Axboebcda7ba2020-02-23 16:42:51 -0700583 int bgid;
Jens Axboefddafac2020-01-04 20:19:44 -0700584 size_t len;
Jens Axboebcda7ba2020-02-23 16:42:51 -0700585 struct io_buffer *kbuf;
Jens Axboee47293f2019-12-20 08:58:21 -0700586};
587
Jens Axboe15b71ab2019-12-11 11:20:36 -0700588struct io_open {
589 struct file *file;
590 int dfd;
Jens Axboe15b71ab2019-12-11 11:20:36 -0700591 struct filename *filename;
Jens Axboec12cedf2020-01-08 17:41:21 -0700592 struct open_how how;
Jens Axboe4022e7a2020-03-19 19:23:18 -0600593 unsigned long nofile;
Jens Axboe15b71ab2019-12-11 11:20:36 -0700594};
595
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +0000596struct io_rsrc_update {
Jens Axboe05f3fb32019-12-09 11:22:50 -0700597 struct file *file;
598 u64 arg;
599 u32 nr_args;
600 u32 offset;
601};
602
Jens Axboe4840e412019-12-25 22:03:45 -0700603struct io_fadvise {
604 struct file *file;
605 u64 offset;
606 u32 len;
607 u32 advice;
608};
609
Jens Axboec1ca7572019-12-25 22:18:28 -0700610struct io_madvise {
611 struct file *file;
612 u64 addr;
613 u32 len;
614 u32 advice;
615};
616
Jens Axboe3e4827b2020-01-08 15:18:09 -0700617struct io_epoll {
618 struct file *file;
619 int epfd;
620 int op;
621 int fd;
622 struct epoll_event event;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700623};
624
Pavel Begunkov7d67af22020-02-24 11:32:45 +0300625struct io_splice {
626 struct file *file_out;
627 struct file *file_in;
628 loff_t off_out;
629 loff_t off_in;
630 u64 len;
631 unsigned int flags;
632};
633
Jens Axboeddf0322d2020-02-23 16:41:33 -0700634struct io_provide_buf {
635 struct file *file;
636 __u64 addr;
Pavel Begunkov38134ad2021-04-15 13:07:39 +0100637 __u32 len;
Jens Axboeddf0322d2020-02-23 16:41:33 -0700638 __u32 bgid;
639 __u16 nbufs;
640 __u16 bid;
641};
642
Bijan Mottahedeh1d9e1282020-05-22 21:31:16 -0700643struct io_statx {
644 struct file *file;
645 int dfd;
646 unsigned int mask;
647 unsigned int flags;
Bijan Mottahedehe62753e2020-05-22 21:31:18 -0700648 const char __user *filename;
Bijan Mottahedeh1d9e1282020-05-22 21:31:16 -0700649 struct statx __user *buffer;
650};
651
Jens Axboe36f4fa62020-09-05 11:14:22 -0600652struct io_shutdown {
653 struct file *file;
654 int how;
655};
656
Jens Axboe80a261f2020-09-28 14:23:58 -0600657struct io_rename {
658 struct file *file;
659 int old_dfd;
660 int new_dfd;
661 struct filename *oldpath;
662 struct filename *newpath;
663 int flags;
664};
665
Jens Axboe14a11432020-09-28 14:27:37 -0600666struct io_unlink {
667 struct file *file;
668 int dfd;
669 int flags;
670 struct filename *filename;
671};
672
Pavel Begunkov3ca405e2020-07-13 23:37:08 +0300673struct io_completion {
674 struct file *file;
675 struct list_head list;
Pavel Begunkov8c3f9cd2021-02-28 22:35:15 +0000676 u32 cflags;
Pavel Begunkov3ca405e2020-07-13 23:37:08 +0300677};
678
Jens Axboef499a022019-12-02 16:28:46 -0700679struct io_async_connect {
680 struct sockaddr_storage address;
681};
682
Jens Axboe03b12302019-12-02 18:50:25 -0700683struct io_async_msghdr {
684 struct iovec fast_iov[UIO_FASTIOV];
Pavel Begunkov257e84a2021-02-05 00:58:00 +0000685 /* points to an allocated iov, if NULL we use fast_iov instead */
686 struct iovec *free_iov;
Jens Axboe03b12302019-12-02 18:50:25 -0700687 struct sockaddr __user *uaddr;
688 struct msghdr msg;
Jens Axboeb5379162020-02-09 11:29:15 -0700689 struct sockaddr_storage addr;
Jens Axboe03b12302019-12-02 18:50:25 -0700690};
691
Jens Axboef67676d2019-12-02 11:03:47 -0700692struct io_async_rw {
693 struct iovec fast_iov[UIO_FASTIOV];
Jens Axboeff6165b2020-08-13 09:47:43 -0600694 const struct iovec *free_iovec;
695 struct iov_iter iter;
Jens Axboe227c0c92020-08-13 11:51:40 -0600696 size_t bytes_done;
Jens Axboebcf5a062020-05-22 09:24:42 -0600697 struct wait_page_queue wpq;
Jens Axboef67676d2019-12-02 11:03:47 -0700698};
699
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300700enum {
701 REQ_F_FIXED_FILE_BIT = IOSQE_FIXED_FILE_BIT,
702 REQ_F_IO_DRAIN_BIT = IOSQE_IO_DRAIN_BIT,
703 REQ_F_LINK_BIT = IOSQE_IO_LINK_BIT,
704 REQ_F_HARDLINK_BIT = IOSQE_IO_HARDLINK_BIT,
705 REQ_F_FORCE_ASYNC_BIT = IOSQE_ASYNC_BIT,
Jens Axboebcda7ba2020-02-23 16:42:51 -0700706 REQ_F_BUFFER_SELECT_BIT = IOSQE_BUFFER_SELECT_BIT,
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300707
Pavel Begunkovdddca222021-04-27 16:13:52 +0100708 /* first byte is taken by user flags, shift it to not overlap */
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +0100709 REQ_F_FAIL_BIT = 8,
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300710 REQ_F_INFLIGHT_BIT,
711 REQ_F_CUR_POS_BIT,
712 REQ_F_NOWAIT_BIT,
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300713 REQ_F_LINK_TIMEOUT_BIT,
Pavel Begunkov99bc4c32020-02-07 22:04:45 +0300714 REQ_F_NEED_CLEANUP_BIT,
Jens Axboed7718a92020-02-14 22:23:12 -0700715 REQ_F_POLLED_BIT,
Jens Axboebcda7ba2020-02-23 16:42:51 -0700716 REQ_F_BUFFER_SELECTED_BIT,
Pavel Begunkov900fad42020-10-19 16:39:16 +0100717 REQ_F_LTIMEOUT_ACTIVE_BIT,
Pavel Begunkove342c802021-01-19 13:32:47 +0000718 REQ_F_COMPLETE_INLINE_BIT,
Jens Axboe230d50d2021-04-01 20:41:15 -0600719 REQ_F_REISSUE_BIT,
Pavel Begunkov8c130822021-03-22 01:58:32 +0000720 REQ_F_DONT_REISSUE_BIT,
Jens Axboe7b29f922021-03-12 08:30:14 -0700721 /* keep async read/write and isreg together and in order */
722 REQ_F_ASYNC_READ_BIT,
723 REQ_F_ASYNC_WRITE_BIT,
724 REQ_F_ISREG_BIT,
Jens Axboe84557872020-03-03 15:28:17 -0700725
726 /* not a real bit, just to check we're not overflowing the space */
727 __REQ_F_LAST_BIT,
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300728};
729
730enum {
731 /* ctx owns file */
732 REQ_F_FIXED_FILE = BIT(REQ_F_FIXED_FILE_BIT),
733 /* drain existing IO first */
734 REQ_F_IO_DRAIN = BIT(REQ_F_IO_DRAIN_BIT),
735 /* linked sqes */
736 REQ_F_LINK = BIT(REQ_F_LINK_BIT),
737 /* doesn't sever on completion < 0 */
738 REQ_F_HARDLINK = BIT(REQ_F_HARDLINK_BIT),
739 /* IOSQE_ASYNC */
740 REQ_F_FORCE_ASYNC = BIT(REQ_F_FORCE_ASYNC_BIT),
Jens Axboebcda7ba2020-02-23 16:42:51 -0700741 /* IOSQE_BUFFER_SELECT */
742 REQ_F_BUFFER_SELECT = BIT(REQ_F_BUFFER_SELECT_BIT),
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300743
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300744 /* fail rest of links */
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +0100745 REQ_F_FAIL = BIT(REQ_F_FAIL_BIT),
Pavel Begunkovb05a1bc2021-03-04 13:59:24 +0000746 /* on inflight list, should be cancelled and waited on exit reliably */
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300747 REQ_F_INFLIGHT = BIT(REQ_F_INFLIGHT_BIT),
748 /* read/write uses file position */
749 REQ_F_CUR_POS = BIT(REQ_F_CUR_POS_BIT),
750 /* must not punt to workers */
751 REQ_F_NOWAIT = BIT(REQ_F_NOWAIT_BIT),
Pavel Begunkov900fad42020-10-19 16:39:16 +0100752 /* has or had linked timeout */
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300753 REQ_F_LINK_TIMEOUT = BIT(REQ_F_LINK_TIMEOUT_BIT),
Pavel Begunkov99bc4c32020-02-07 22:04:45 +0300754 /* needs cleanup */
755 REQ_F_NEED_CLEANUP = BIT(REQ_F_NEED_CLEANUP_BIT),
Jens Axboed7718a92020-02-14 22:23:12 -0700756 /* already went through poll handler */
757 REQ_F_POLLED = BIT(REQ_F_POLLED_BIT),
Jens Axboebcda7ba2020-02-23 16:42:51 -0700758 /* buffer already selected */
759 REQ_F_BUFFER_SELECTED = BIT(REQ_F_BUFFER_SELECTED_BIT),
Pavel Begunkov900fad42020-10-19 16:39:16 +0100760 /* linked timeout is active, i.e. prepared by link's head */
761 REQ_F_LTIMEOUT_ACTIVE = BIT(REQ_F_LTIMEOUT_ACTIVE_BIT),
Pavel Begunkove342c802021-01-19 13:32:47 +0000762 /* completion is deferred through io_comp_state */
763 REQ_F_COMPLETE_INLINE = BIT(REQ_F_COMPLETE_INLINE_BIT),
Jens Axboe230d50d2021-04-01 20:41:15 -0600764 /* caller should reissue async */
765 REQ_F_REISSUE = BIT(REQ_F_REISSUE_BIT),
Pavel Begunkov8c130822021-03-22 01:58:32 +0000766 /* don't attempt request reissue, see io_rw_reissue() */
767 REQ_F_DONT_REISSUE = BIT(REQ_F_DONT_REISSUE_BIT),
Jens Axboe7b29f922021-03-12 08:30:14 -0700768 /* supports async reads */
769 REQ_F_ASYNC_READ = BIT(REQ_F_ASYNC_READ_BIT),
770 /* supports async writes */
771 REQ_F_ASYNC_WRITE = BIT(REQ_F_ASYNC_WRITE_BIT),
772 /* regular file */
773 REQ_F_ISREG = BIT(REQ_F_ISREG_BIT),
Jens Axboed7718a92020-02-14 22:23:12 -0700774};
775
776struct async_poll {
777 struct io_poll_iocb poll;
Jens Axboe807abcb2020-07-17 17:09:27 -0600778 struct io_poll_iocb *double_poll;
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300779};
780
Jens Axboe7cbf1722021-02-10 00:03:20 +0000781struct io_task_work {
782 struct io_wq_work_node node;
783 task_work_func_t func;
784};
785
Pavel Begunkov992da012021-06-10 16:37:37 +0100786enum {
787 IORING_RSRC_FILE = 0,
788 IORING_RSRC_BUFFER = 1,
789};
790
Jens Axboe09bb8392019-03-13 12:39:28 -0600791/*
792 * NOTE! Each of the iocb union members has the file pointer
793 * as the first entry in their struct definition. So you can
794 * access the file pointer through any of the sub-structs,
795 * or directly as just 'ki_filp' in this struct.
796 */
Jens Axboe2b188cc2019-01-07 10:46:33 -0700797struct io_kiocb {
Jens Axboe221c5eb2019-01-17 09:41:58 -0700798 union {
Jens Axboe09bb8392019-03-13 12:39:28 -0600799 struct file *file;
Jens Axboe9adbd452019-12-20 08:45:55 -0700800 struct io_rw rw;
Jens Axboe221c5eb2019-01-17 09:41:58 -0700801 struct io_poll_iocb poll;
Pavel Begunkov9d805892021-04-13 02:58:40 +0100802 struct io_poll_update poll_update;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -0700803 struct io_accept accept;
804 struct io_sync sync;
Jens Axboefbf23842019-12-17 18:45:56 -0700805 struct io_cancel cancel;
Jens Axboeb29472e2019-12-17 18:50:29 -0700806 struct io_timeout timeout;
Pavel Begunkov0bdf7a22020-10-10 18:34:10 +0100807 struct io_timeout_rem timeout_rem;
Jens Axboe3fbb51c2019-12-20 08:51:52 -0700808 struct io_connect connect;
Jens Axboee47293f2019-12-20 08:58:21 -0700809 struct io_sr_msg sr_msg;
Jens Axboe15b71ab2019-12-11 11:20:36 -0700810 struct io_open open;
Jens Axboeb5dba592019-12-11 14:02:38 -0700811 struct io_close close;
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +0000812 struct io_rsrc_update rsrc_update;
Jens Axboe4840e412019-12-25 22:03:45 -0700813 struct io_fadvise fadvise;
Jens Axboec1ca7572019-12-25 22:18:28 -0700814 struct io_madvise madvise;
Jens Axboe3e4827b2020-01-08 15:18:09 -0700815 struct io_epoll epoll;
Pavel Begunkov7d67af22020-02-24 11:32:45 +0300816 struct io_splice splice;
Jens Axboeddf0322d2020-02-23 16:41:33 -0700817 struct io_provide_buf pbuf;
Bijan Mottahedeh1d9e1282020-05-22 21:31:16 -0700818 struct io_statx statx;
Jens Axboe36f4fa62020-09-05 11:14:22 -0600819 struct io_shutdown shutdown;
Jens Axboe80a261f2020-09-28 14:23:58 -0600820 struct io_rename rename;
Jens Axboe14a11432020-09-28 14:27:37 -0600821 struct io_unlink unlink;
Pavel Begunkov3ca405e2020-07-13 23:37:08 +0300822 /* use only after cleaning per-op data, see io_clean_op() */
823 struct io_completion compl;
Jens Axboe221c5eb2019-01-17 09:41:58 -0700824 };
Jens Axboe2b188cc2019-01-07 10:46:33 -0700825
Jens Axboee8c2bc12020-08-15 18:44:09 -0700826 /* opcode allocated if it needs to store data for async defer */
827 void *async_data;
Jens Axboed625c6e2019-12-17 19:53:05 -0700828 u8 opcode;
Xiaoguang Wang65a65432020-06-11 23:39:36 +0800829 /* polled IO has completed */
830 u8 iopoll_completed;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700831
Bijan Mottahedeh4f4eeba2020-05-19 14:52:49 -0700832 u16 buf_index;
Pavel Begunkov9cf7c102020-07-13 23:37:15 +0300833 u32 result;
Bijan Mottahedeh4f4eeba2020-05-19 14:52:49 -0700834
Pavel Begunkov010e8e62020-07-30 18:43:45 +0300835 struct io_ring_ctx *ctx;
836 unsigned int flags;
Jens Axboeabc54d62021-02-24 13:32:30 -0700837 atomic_t refs;
Pavel Begunkov010e8e62020-07-30 18:43:45 +0300838 struct task_struct *task;
839 u64 user_data;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700840
Pavel Begunkovf2f87372020-10-27 23:25:37 +0000841 struct io_kiocb *link;
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +0000842 struct percpu_ref *fixed_rsrc_refs;
Jens Axboed7718a92020-02-14 22:23:12 -0700843
Pavel Begunkovb303fe22021-04-11 01:46:26 +0100844 /* used with ctx->iopoll_list with reads/writes */
Pavel Begunkov010e8e62020-07-30 18:43:45 +0300845 struct list_head inflight_entry;
Jens Axboe7cbf1722021-02-10 00:03:20 +0000846 union {
847 struct io_task_work io_task_work;
848 struct callback_head task_work;
849 };
Pavel Begunkov010e8e62020-07-30 18:43:45 +0300850 /* for polled requests, i.e. IORING_OP_POLL_ADD and async armed poll */
851 struct hlist_node hash_node;
852 struct async_poll *apoll;
853 struct io_wq_work work;
Pavel Begunkoveae071c2021-04-25 14:32:24 +0100854 /* store used ubuf, so we can prevent reloading */
855 struct io_mapped_ubuf *imu;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700856};
857
Pavel Begunkov13bf43f2021-03-06 11:02:12 +0000858struct io_tctx_node {
859 struct list_head ctx_node;
860 struct task_struct *task;
Pavel Begunkov13bf43f2021-03-06 11:02:12 +0000861 struct io_ring_ctx *ctx;
862};
863
Pavel Begunkov27dc8332020-07-13 23:37:14 +0300864struct io_defer_entry {
865 struct list_head list;
866 struct io_kiocb *req;
Pavel Begunkov9cf7c102020-07-13 23:37:15 +0300867 u32 seq;
Pavel Begunkov27dc8332020-07-13 23:37:14 +0300868};
869
Jens Axboed3656342019-12-18 09:50:26 -0700870struct io_op_def {
Jens Axboed3656342019-12-18 09:50:26 -0700871 /* needs req->file assigned */
872 unsigned needs_file : 1;
Jens Axboed3656342019-12-18 09:50:26 -0700873 /* hash wq insertion if file is a regular file */
874 unsigned hash_reg_file : 1;
875 /* unbound wq insertion if file is a non-regular file */
876 unsigned unbound_nonreg_file : 1;
Jens Axboe66f4af92020-01-16 15:36:52 -0700877 /* opcode is not supported by this kernel */
878 unsigned not_supported : 1;
Jens Axboe8a727582020-02-20 09:59:44 -0700879 /* set if opcode supports polled "wait" */
880 unsigned pollin : 1;
881 unsigned pollout : 1;
Jens Axboebcda7ba2020-02-23 16:42:51 -0700882 /* op supports buffer selection */
883 unsigned buffer_select : 1;
Pavel Begunkov26f05052021-02-28 22:35:18 +0000884 /* do prep async if is going to be punted */
885 unsigned needs_async_setup : 1;
Jens Axboe27926b62020-10-28 09:33:23 -0600886 /* should block plug */
887 unsigned plug : 1;
Jens Axboee8c2bc12020-08-15 18:44:09 -0700888 /* size of async data needed, if any */
889 unsigned short async_size;
Jens Axboed3656342019-12-18 09:50:26 -0700890};
891
Jens Axboe09186822020-10-13 15:01:40 -0600892static const struct io_op_def io_op_defs[] = {
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300893 [IORING_OP_NOP] = {},
894 [IORING_OP_READV] = {
Jens Axboed3656342019-12-18 09:50:26 -0700895 .needs_file = 1,
896 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700897 .pollin = 1,
Jens Axboe4d954c22020-02-27 07:31:19 -0700898 .buffer_select = 1,
Pavel Begunkov26f05052021-02-28 22:35:18 +0000899 .needs_async_setup = 1,
Jens Axboe27926b62020-10-28 09:33:23 -0600900 .plug = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700901 .async_size = sizeof(struct io_async_rw),
Jens Axboed3656342019-12-18 09:50:26 -0700902 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300903 [IORING_OP_WRITEV] = {
Jens Axboed3656342019-12-18 09:50:26 -0700904 .needs_file = 1,
905 .hash_reg_file = 1,
906 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700907 .pollout = 1,
Pavel Begunkov26f05052021-02-28 22:35:18 +0000908 .needs_async_setup = 1,
Jens Axboe27926b62020-10-28 09:33:23 -0600909 .plug = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700910 .async_size = sizeof(struct io_async_rw),
Jens Axboed3656342019-12-18 09:50:26 -0700911 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300912 [IORING_OP_FSYNC] = {
Jens Axboed3656342019-12-18 09:50:26 -0700913 .needs_file = 1,
914 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300915 [IORING_OP_READ_FIXED] = {
Jens Axboed3656342019-12-18 09:50:26 -0700916 .needs_file = 1,
917 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700918 .pollin = 1,
Jens Axboe27926b62020-10-28 09:33:23 -0600919 .plug = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700920 .async_size = sizeof(struct io_async_rw),
Jens Axboed3656342019-12-18 09:50:26 -0700921 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300922 [IORING_OP_WRITE_FIXED] = {
Jens Axboed3656342019-12-18 09:50:26 -0700923 .needs_file = 1,
924 .hash_reg_file = 1,
925 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700926 .pollout = 1,
Jens Axboe27926b62020-10-28 09:33:23 -0600927 .plug = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700928 .async_size = sizeof(struct io_async_rw),
Jens Axboed3656342019-12-18 09:50:26 -0700929 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300930 [IORING_OP_POLL_ADD] = {
Jens Axboed3656342019-12-18 09:50:26 -0700931 .needs_file = 1,
932 .unbound_nonreg_file = 1,
933 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300934 [IORING_OP_POLL_REMOVE] = {},
935 [IORING_OP_SYNC_FILE_RANGE] = {
Jens Axboed3656342019-12-18 09:50:26 -0700936 .needs_file = 1,
937 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300938 [IORING_OP_SENDMSG] = {
Jens Axboed3656342019-12-18 09:50:26 -0700939 .needs_file = 1,
940 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700941 .pollout = 1,
Pavel Begunkov26f05052021-02-28 22:35:18 +0000942 .needs_async_setup = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700943 .async_size = sizeof(struct io_async_msghdr),
Jens Axboed3656342019-12-18 09:50:26 -0700944 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300945 [IORING_OP_RECVMSG] = {
Jens Axboed3656342019-12-18 09:50:26 -0700946 .needs_file = 1,
947 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700948 .pollin = 1,
Jens Axboe52de1fe2020-02-27 10:15:42 -0700949 .buffer_select = 1,
Pavel Begunkov26f05052021-02-28 22:35:18 +0000950 .needs_async_setup = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700951 .async_size = sizeof(struct io_async_msghdr),
Jens Axboed3656342019-12-18 09:50:26 -0700952 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300953 [IORING_OP_TIMEOUT] = {
Jens Axboee8c2bc12020-08-15 18:44:09 -0700954 .async_size = sizeof(struct io_timeout_data),
Jens Axboed3656342019-12-18 09:50:26 -0700955 },
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +0000956 [IORING_OP_TIMEOUT_REMOVE] = {
957 /* used by timeout updates' prep() */
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +0000958 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300959 [IORING_OP_ACCEPT] = {
Jens Axboed3656342019-12-18 09:50:26 -0700960 .needs_file = 1,
961 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700962 .pollin = 1,
Jens Axboed3656342019-12-18 09:50:26 -0700963 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300964 [IORING_OP_ASYNC_CANCEL] = {},
965 [IORING_OP_LINK_TIMEOUT] = {
Jens Axboee8c2bc12020-08-15 18:44:09 -0700966 .async_size = sizeof(struct io_timeout_data),
Jens Axboed3656342019-12-18 09:50:26 -0700967 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300968 [IORING_OP_CONNECT] = {
Jens Axboed3656342019-12-18 09:50:26 -0700969 .needs_file = 1,
970 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700971 .pollout = 1,
Pavel Begunkov26f05052021-02-28 22:35:18 +0000972 .needs_async_setup = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700973 .async_size = sizeof(struct io_async_connect),
Jens Axboed3656342019-12-18 09:50:26 -0700974 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300975 [IORING_OP_FALLOCATE] = {
Jens Axboed3656342019-12-18 09:50:26 -0700976 .needs_file = 1,
977 },
Jens Axboe44526be2021-02-15 13:32:18 -0700978 [IORING_OP_OPENAT] = {},
979 [IORING_OP_CLOSE] = {},
980 [IORING_OP_FILES_UPDATE] = {},
981 [IORING_OP_STATX] = {},
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300982 [IORING_OP_READ] = {
Jens Axboe3a6820f2019-12-22 15:19:35 -0700983 .needs_file = 1,
984 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700985 .pollin = 1,
Jens Axboebcda7ba2020-02-23 16:42:51 -0700986 .buffer_select = 1,
Jens Axboe27926b62020-10-28 09:33:23 -0600987 .plug = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700988 .async_size = sizeof(struct io_async_rw),
Jens Axboe3a6820f2019-12-22 15:19:35 -0700989 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300990 [IORING_OP_WRITE] = {
Jens Axboe3a6820f2019-12-22 15:19:35 -0700991 .needs_file = 1,
992 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700993 .pollout = 1,
Jens Axboe27926b62020-10-28 09:33:23 -0600994 .plug = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700995 .async_size = sizeof(struct io_async_rw),
Jens Axboe3a6820f2019-12-22 15:19:35 -0700996 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300997 [IORING_OP_FADVISE] = {
Jens Axboe4840e412019-12-25 22:03:45 -0700998 .needs_file = 1,
999 },
Jens Axboe44526be2021-02-15 13:32:18 -07001000 [IORING_OP_MADVISE] = {},
Pavel Begunkov0463b6c2020-01-18 21:35:38 +03001001 [IORING_OP_SEND] = {
Jens Axboefddafac2020-01-04 20:19:44 -07001002 .needs_file = 1,
1003 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -07001004 .pollout = 1,
Jens Axboefddafac2020-01-04 20:19:44 -07001005 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +03001006 [IORING_OP_RECV] = {
Jens Axboefddafac2020-01-04 20:19:44 -07001007 .needs_file = 1,
1008 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -07001009 .pollin = 1,
Jens Axboebcda7ba2020-02-23 16:42:51 -07001010 .buffer_select = 1,
Jens Axboefddafac2020-01-04 20:19:44 -07001011 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +03001012 [IORING_OP_OPENAT2] = {
Jens Axboecebdb982020-01-08 17:59:24 -07001013 },
Jens Axboe3e4827b2020-01-08 15:18:09 -07001014 [IORING_OP_EPOLL_CTL] = {
1015 .unbound_nonreg_file = 1,
Jens Axboe3e4827b2020-01-08 15:18:09 -07001016 },
Pavel Begunkov7d67af22020-02-24 11:32:45 +03001017 [IORING_OP_SPLICE] = {
1018 .needs_file = 1,
1019 .hash_reg_file = 1,
1020 .unbound_nonreg_file = 1,
Jens Axboeddf0322d2020-02-23 16:41:33 -07001021 },
1022 [IORING_OP_PROVIDE_BUFFERS] = {},
Jens Axboe067524e2020-03-02 16:32:28 -07001023 [IORING_OP_REMOVE_BUFFERS] = {},
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03001024 [IORING_OP_TEE] = {
1025 .needs_file = 1,
1026 .hash_reg_file = 1,
1027 .unbound_nonreg_file = 1,
1028 },
Jens Axboe36f4fa62020-09-05 11:14:22 -06001029 [IORING_OP_SHUTDOWN] = {
1030 .needs_file = 1,
1031 },
Jens Axboe44526be2021-02-15 13:32:18 -07001032 [IORING_OP_RENAMEAT] = {},
1033 [IORING_OP_UNLINKAT] = {},
Jens Axboed3656342019-12-18 09:50:26 -07001034};
1035
Pavel Begunkov7a612352021-03-09 00:37:59 +00001036static bool io_disarm_next(struct io_kiocb *req);
Pavel Begunkoveef51da2021-06-14 02:36:15 +01001037static void io_uring_del_tctx_node(unsigned long index);
Pavel Begunkov9936c7c2021-02-04 13:51:56 +00001038static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
1039 struct task_struct *task,
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01001040 bool cancel_all);
Pavel Begunkov78cc6872021-06-14 02:36:23 +01001041static void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd);
Pavel Begunkovb895c9a2021-04-01 15:43:40 +01001042static struct io_rsrc_node *io_rsrc_node_alloc(struct io_ring_ctx *ctx);
Pavel Begunkov1ffc5422020-12-30 21:34:15 +00001043
Pavel Begunkovd4d19c12021-04-25 14:32:17 +01001044static bool io_cqring_fill_event(struct io_ring_ctx *ctx, u64 user_data,
1045 long res, unsigned int cflags);
Jackie Liuec9c02a2019-11-08 23:50:36 +08001046static void io_put_req(struct io_kiocb *req);
Pavel Begunkov216578e2020-10-13 09:44:00 +01001047static void io_put_req_deferred(struct io_kiocb *req, int nr);
Jens Axboec7dae4b2021-02-09 19:53:37 -07001048static void io_dismantle_req(struct io_kiocb *req);
1049static void io_put_task(struct task_struct *task, int nr);
Jens Axboe94ae5e72019-11-14 19:39:52 -07001050static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req);
1051static void io_queue_linked_timeout(struct io_kiocb *req);
Pavel Begunkovfdecb662021-04-25 14:32:20 +01001052static int __io_register_rsrc_update(struct io_ring_ctx *ctx, unsigned type,
Pavel Begunkovc3bdad02021-04-25 14:32:22 +01001053 struct io_uring_rsrc_update2 *up,
Pavel Begunkov98f0b3b2021-04-25 14:32:19 +01001054 unsigned nr_args);
Pavel Begunkov68fb8972021-03-19 17:22:41 +00001055static void io_clean_op(struct io_kiocb *req);
Pavel Begunkov8371adf2020-10-10 18:34:08 +01001056static struct file *io_file_get(struct io_submit_state *state,
1057 struct io_kiocb *req, int fd, bool fixed);
Pavel Begunkovc5eef2b2021-02-10 00:03:22 +00001058static void __io_queue_sqe(struct io_kiocb *req);
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00001059static void io_rsrc_put_work(struct work_struct *work);
Jens Axboede0617e2019-04-06 21:51:27 -06001060
Pavel Begunkov907d1df2021-01-26 23:35:10 +00001061static void io_req_task_queue(struct io_kiocb *req);
Jens Axboe65453d12021-02-10 00:03:21 +00001062static void io_submit_flush_completions(struct io_comp_state *cs,
1063 struct io_ring_ctx *ctx);
Jens Axboe50826202021-02-23 09:02:26 -07001064static bool io_poll_remove_waitqs(struct io_kiocb *req);
Pavel Begunkov179ae0d2021-02-28 22:35:20 +00001065static int io_req_prep_async(struct io_kiocb *req);
Jens Axboe9a56a232019-01-09 09:06:50 -07001066
Jens Axboe2b188cc2019-01-07 10:46:33 -07001067static struct kmem_cache *req_cachep;
1068
Jens Axboe09186822020-10-13 15:01:40 -06001069static const struct file_operations io_uring_fops;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001070
1071struct sock *io_uring_get_socket(struct file *file)
1072{
1073#if defined(CONFIG_UNIX)
1074 if (file->f_op == &io_uring_fops) {
1075 struct io_ring_ctx *ctx = file->private_data;
1076
1077 return ctx->ring_sock->sk;
1078 }
1079#endif
1080 return NULL;
1081}
1082EXPORT_SYMBOL(io_uring_get_socket);
1083
Pavel Begunkovf2f87372020-10-27 23:25:37 +00001084#define io_for_each_link(pos, head) \
1085 for (pos = (head); pos; pos = pos->link)
1086
Pavel Begunkovb895c9a2021-04-01 15:43:40 +01001087static inline void io_req_set_rsrc_node(struct io_kiocb *req)
Jens Axboec40f6372020-06-25 15:39:59 -06001088{
Pavel Begunkov36f72fe2020-11-18 19:57:26 +00001089 struct io_ring_ctx *ctx = req->ctx;
1090
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00001091 if (!req->fixed_rsrc_refs) {
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01001092 req->fixed_rsrc_refs = &ctx->rsrc_node->refs;
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00001093 percpu_ref_get(req->fixed_rsrc_refs);
Pavel Begunkov36f72fe2020-11-18 19:57:26 +00001094 }
1095}
1096
Pavel Begunkovf70865d2021-04-11 01:46:40 +01001097static void io_refs_resurrect(struct percpu_ref *ref, struct completion *compl)
1098{
1099 bool got = percpu_ref_tryget(ref);
1100
1101 /* already at zero, wait for ->release() */
1102 if (!got)
1103 wait_for_completion(compl);
1104 percpu_ref_resurrect(ref);
1105 if (got)
1106 percpu_ref_put(ref);
1107}
1108
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01001109static bool io_match_task(struct io_kiocb *head, struct task_struct *task,
1110 bool cancel_all)
Pavel Begunkov08d23632020-11-06 13:00:22 +00001111{
1112 struct io_kiocb *req;
1113
Pavel Begunkov68207682021-03-22 01:58:25 +00001114 if (task && head->task != task)
Pavel Begunkov08d23632020-11-06 13:00:22 +00001115 return false;
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01001116 if (cancel_all)
Pavel Begunkov08d23632020-11-06 13:00:22 +00001117 return true;
1118
1119 io_for_each_link(req, head) {
Pavel Begunkovb05a1bc2021-03-04 13:59:24 +00001120 if (req->flags & REQ_F_INFLIGHT)
Jens Axboe02a13672021-01-23 15:49:31 -07001121 return true;
Pavel Begunkov08d23632020-11-06 13:00:22 +00001122 }
1123 return false;
1124}
1125
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01001126static inline void req_set_fail(struct io_kiocb *req)
Jens Axboec40f6372020-06-25 15:39:59 -06001127{
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01001128 req->flags |= REQ_F_FAIL;
Jens Axboec40f6372020-06-25 15:39:59 -06001129}
Jens Axboe4a38aed22020-05-14 17:21:15 -06001130
Jens Axboe2b188cc2019-01-07 10:46:33 -07001131static void io_ring_ctx_ref_free(struct percpu_ref *ref)
1132{
1133 struct io_ring_ctx *ctx = container_of(ref, struct io_ring_ctx, refs);
1134
Jens Axboe0f158b42020-05-14 17:18:39 -06001135 complete(&ctx->ref_comp);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001136}
1137
Pavel Begunkov8eb7e2d2020-06-29 13:13:02 +03001138static inline bool io_is_timeout_noseq(struct io_kiocb *req)
1139{
1140 return !req->timeout.off;
1141}
1142
Jens Axboe2b188cc2019-01-07 10:46:33 -07001143static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
1144{
1145 struct io_ring_ctx *ctx;
Jens Axboe78076bb2019-12-04 19:56:40 -07001146 int hash_bits;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001147
1148 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
1149 if (!ctx)
1150 return NULL;
1151
Jens Axboe78076bb2019-12-04 19:56:40 -07001152 /*
1153 * Use 5 bits less than the max cq entries, that should give us around
1154 * 32 entries per hash list if totally full and uniformly spread.
1155 */
1156 hash_bits = ilog2(p->cq_entries);
1157 hash_bits -= 5;
1158 if (hash_bits <= 0)
1159 hash_bits = 1;
1160 ctx->cancel_hash_bits = hash_bits;
1161 ctx->cancel_hash = kmalloc((1U << hash_bits) * sizeof(struct hlist_head),
1162 GFP_KERNEL);
1163 if (!ctx->cancel_hash)
1164 goto err;
1165 __hash_init(ctx->cancel_hash, 1U << hash_bits);
1166
Pavel Begunkov62248432021-04-28 13:11:29 +01001167 ctx->dummy_ubuf = kzalloc(sizeof(*ctx->dummy_ubuf), GFP_KERNEL);
1168 if (!ctx->dummy_ubuf)
1169 goto err;
1170 /* set invalid range, so io_import_fixed() fails meeting it */
1171 ctx->dummy_ubuf->ubuf = -1UL;
1172
Roman Gushchin21482892019-05-07 10:01:48 -07001173 if (percpu_ref_init(&ctx->refs, io_ring_ctx_ref_free,
Jens Axboe206aefd2019-11-07 18:27:42 -07001174 PERCPU_REF_ALLOW_REINIT, GFP_KERNEL))
1175 goto err;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001176
1177 ctx->flags = p->flags;
Jens Axboe90554202020-09-03 12:12:41 -06001178 init_waitqueue_head(&ctx->sqo_sq_wait);
Jens Axboe69fb2132020-09-14 11:16:23 -06001179 INIT_LIST_HEAD(&ctx->sqd_list);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001180 init_waitqueue_head(&ctx->cq_wait);
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001181 INIT_LIST_HEAD(&ctx->cq_overflow_list);
Jens Axboe0f158b42020-05-14 17:18:39 -06001182 init_completion(&ctx->ref_comp);
Jens Axboe9e15c3a2021-03-13 12:29:43 -07001183 xa_init_flags(&ctx->io_buffers, XA_FLAGS_ALLOC1);
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +00001184 xa_init_flags(&ctx->personalities, XA_FLAGS_ALLOC1);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001185 mutex_init(&ctx->uring_lock);
1186 init_waitqueue_head(&ctx->wait);
1187 spin_lock_init(&ctx->completion_lock);
Pavel Begunkov540e32a2020-07-13 23:37:09 +03001188 INIT_LIST_HEAD(&ctx->iopoll_list);
Jens Axboede0617e2019-04-06 21:51:27 -06001189 INIT_LIST_HEAD(&ctx->defer_list);
Jens Axboe5262f562019-09-17 12:26:57 -06001190 INIT_LIST_HEAD(&ctx->timeout_list);
Bijan Mottahedehd67d2262021-01-15 17:37:46 +00001191 spin_lock_init(&ctx->rsrc_ref_lock);
1192 INIT_LIST_HEAD(&ctx->rsrc_ref_list);
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00001193 INIT_DELAYED_WORK(&ctx->rsrc_put_work, io_rsrc_put_work);
1194 init_llist_head(&ctx->rsrc_put_llist);
Pavel Begunkov13bf43f2021-03-06 11:02:12 +00001195 INIT_LIST_HEAD(&ctx->tctx_list);
Jens Axboe1b4c3512021-02-10 00:03:19 +00001196 INIT_LIST_HEAD(&ctx->submit_state.comp.free_list);
Pavel Begunkovd0acdee2021-05-16 22:58:12 +01001197 INIT_LIST_HEAD(&ctx->locked_free_list);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001198 return ctx;
Jens Axboe206aefd2019-11-07 18:27:42 -07001199err:
Pavel Begunkov62248432021-04-28 13:11:29 +01001200 kfree(ctx->dummy_ubuf);
Jens Axboe78076bb2019-12-04 19:56:40 -07001201 kfree(ctx->cancel_hash);
Jens Axboe206aefd2019-11-07 18:27:42 -07001202 kfree(ctx);
1203 return NULL;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001204}
1205
Pavel Begunkov8f6ed492021-05-16 22:58:10 +01001206static void io_account_cq_overflow(struct io_ring_ctx *ctx)
1207{
1208 struct io_rings *r = ctx->rings;
1209
1210 WRITE_ONCE(r->cq_overflow, READ_ONCE(r->cq_overflow) + 1);
1211 ctx->cq_extra--;
1212}
1213
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03001214static bool req_need_defer(struct io_kiocb *req, u32 seq)
Jens Axboede0617e2019-04-06 21:51:27 -06001215{
Jens Axboe2bc99302020-07-09 09:43:27 -06001216 if (unlikely(req->flags & REQ_F_IO_DRAIN)) {
1217 struct io_ring_ctx *ctx = req->ctx;
Jackie Liua197f662019-11-08 08:09:12 -07001218
Pavel Begunkov8f6ed492021-05-16 22:58:10 +01001219 return seq + READ_ONCE(ctx->cq_extra) != ctx->cached_cq_tail;
Jens Axboe2bc99302020-07-09 09:43:27 -06001220 }
Jens Axboe7adf4ea2019-10-10 21:42:58 -06001221
Bob Liu9d858b22019-11-13 18:06:25 +08001222 return false;
Jens Axboe7adf4ea2019-10-10 21:42:58 -06001223}
1224
Pavel Begunkovce3d5aa2021-02-01 18:59:55 +00001225static void io_req_track_inflight(struct io_kiocb *req)
1226{
Pavel Begunkovce3d5aa2021-02-01 18:59:55 +00001227 if (!(req->flags & REQ_F_INFLIGHT)) {
Pavel Begunkovce3d5aa2021-02-01 18:59:55 +00001228 req->flags |= REQ_F_INFLIGHT;
Pavel Begunkovb303fe22021-04-11 01:46:26 +01001229 atomic_inc(&current->io_uring->inflight_tracked);
Pavel Begunkovce3d5aa2021-02-01 18:59:55 +00001230 }
1231}
1232
Pavel Begunkovcbdcb432020-06-29 19:18:43 +03001233static void io_prep_async_work(struct io_kiocb *req)
Jens Axboe561fb042019-10-24 07:25:42 -06001234{
Jens Axboed3656342019-12-18 09:50:26 -07001235 const struct io_op_def *def = &io_op_defs[req->opcode];
Pavel Begunkov23329512020-10-10 18:34:06 +01001236 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe54a91f32019-09-10 09:15:04 -06001237
Jens Axboe003e8dc2021-03-06 09:22:27 -07001238 if (!req->work.creds)
1239 req->work.creds = get_current_cred();
1240
Pavel Begunkove1d675d2021-03-22 01:58:29 +00001241 req->work.list.next = NULL;
1242 req->work.flags = 0;
Pavel Begunkovfeaadc42020-10-22 16:47:16 +01001243 if (req->flags & REQ_F_FORCE_ASYNC)
1244 req->work.flags |= IO_WQ_WORK_CONCURRENT;
1245
Jens Axboed3656342019-12-18 09:50:26 -07001246 if (req->flags & REQ_F_ISREG) {
Pavel Begunkov23329512020-10-10 18:34:06 +01001247 if (def->hash_reg_file || (ctx->flags & IORING_SETUP_IOPOLL))
Pavel Begunkov8766dd52020-03-14 00:31:04 +03001248 io_wq_hash_work(&req->work, file_inode(req->file));
Jens Axboe4b982bd2021-04-01 08:38:34 -06001249 } else if (!req->file || !S_ISBLK(file_inode(req->file)->i_mode)) {
Jens Axboed3656342019-12-18 09:50:26 -07001250 if (def->unbound_nonreg_file)
Jens Axboe3529d8c2019-12-19 18:24:38 -07001251 req->work.flags |= IO_WQ_WORK_UNBOUND;
Jens Axboe54a91f32019-09-10 09:15:04 -06001252 }
Pavel Begunkove1d675d2021-03-22 01:58:29 +00001253
1254 switch (req->opcode) {
1255 case IORING_OP_SPLICE:
1256 case IORING_OP_TEE:
Pavel Begunkove1d675d2021-03-22 01:58:29 +00001257 if (!S_ISREG(file_inode(req->splice.file_in)->i_mode))
1258 req->work.flags |= IO_WQ_WORK_UNBOUND;
1259 break;
1260 }
Jens Axboe561fb042019-10-24 07:25:42 -06001261}
1262
Pavel Begunkovcbdcb432020-06-29 19:18:43 +03001263static void io_prep_async_link(struct io_kiocb *req)
1264{
1265 struct io_kiocb *cur;
1266
Pavel Begunkovf2f87372020-10-27 23:25:37 +00001267 io_for_each_link(cur, req)
1268 io_prep_async_work(cur);
Pavel Begunkovcbdcb432020-06-29 19:18:43 +03001269}
1270
Pavel Begunkovebf93662021-03-01 18:20:47 +00001271static void io_queue_async_work(struct io_kiocb *req)
Jens Axboe561fb042019-10-24 07:25:42 -06001272{
Jackie Liua197f662019-11-08 08:09:12 -07001273 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkovcbdcb432020-06-29 19:18:43 +03001274 struct io_kiocb *link = io_prep_linked_timeout(req);
Jens Axboe5aa75ed2021-02-16 12:56:50 -07001275 struct io_uring_task *tctx = req->task->io_uring;
Jens Axboe561fb042019-10-24 07:25:42 -06001276
Jens Axboe3bfe6102021-02-16 14:15:30 -07001277 BUG_ON(!tctx);
1278 BUG_ON(!tctx->io_wq);
Jens Axboe561fb042019-10-24 07:25:42 -06001279
Pavel Begunkovcbdcb432020-06-29 19:18:43 +03001280 /* init ->work of the whole link before punting */
1281 io_prep_async_link(req);
Pavel Begunkovd07f1e8a2021-03-22 01:45:58 +00001282 trace_io_uring_queue_async_work(ctx, io_wq_is_hashed(&req->work), req,
1283 &req->work, req->flags);
Pavel Begunkovebf93662021-03-01 18:20:47 +00001284 io_wq_enqueue(tctx->io_wq, &req->work);
Jens Axboe7271ef32020-08-10 09:55:22 -06001285 if (link)
1286 io_queue_linked_timeout(link);
Pavel Begunkovcbdcb432020-06-29 19:18:43 +03001287}
1288
Pavel Begunkov1ee41602021-03-25 18:32:42 +00001289static void io_kill_timeout(struct io_kiocb *req, int status)
Pavel Begunkov8c855882021-04-13 02:58:41 +01001290 __must_hold(&req->ctx->completion_lock)
Jens Axboe5262f562019-09-17 12:26:57 -06001291{
Jens Axboee8c2bc12020-08-15 18:44:09 -07001292 struct io_timeout_data *io = req->async_data;
Jens Axboe5262f562019-09-17 12:26:57 -06001293
Pavel Begunkovfd9c7bc2021-04-13 02:58:42 +01001294 if (hrtimer_try_to_cancel(&io->timer) != -1) {
Pavel Begunkov01cec8c2020-07-30 18:43:50 +03001295 atomic_set(&req->ctx->cq_timeouts,
1296 atomic_read(&req->ctx->cq_timeouts) + 1);
Pavel Begunkov135fcde2020-07-13 23:37:12 +03001297 list_del_init(&req->timeout.list);
Pavel Begunkovd4d19c12021-04-25 14:32:17 +01001298 io_cqring_fill_event(req->ctx, req->user_data, status, 0);
Pavel Begunkov216578e2020-10-13 09:44:00 +01001299 io_put_req_deferred(req, 1);
Jens Axboe5262f562019-09-17 12:26:57 -06001300 }
1301}
1302
Pavel Begunkov04518942020-05-26 20:34:05 +03001303static void __io_queue_deferred(struct io_ring_ctx *ctx)
1304{
1305 do {
Pavel Begunkov27dc8332020-07-13 23:37:14 +03001306 struct io_defer_entry *de = list_first_entry(&ctx->defer_list,
1307 struct io_defer_entry, list);
Pavel Begunkov04518942020-05-26 20:34:05 +03001308
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03001309 if (req_need_defer(de->req, de->seq))
Pavel Begunkov04518942020-05-26 20:34:05 +03001310 break;
Pavel Begunkov27dc8332020-07-13 23:37:14 +03001311 list_del_init(&de->list);
Pavel Begunkov907d1df2021-01-26 23:35:10 +00001312 io_req_task_queue(de->req);
Pavel Begunkov27dc8332020-07-13 23:37:14 +03001313 kfree(de);
Pavel Begunkov04518942020-05-26 20:34:05 +03001314 } while (!list_empty(&ctx->defer_list));
1315}
1316
Pavel Begunkov360428f2020-05-30 14:54:17 +03001317static void io_flush_timeouts(struct io_ring_ctx *ctx)
1318{
Marcelo Diop-Gonzalezf0105052021-01-15 11:54:40 -05001319 u32 seq;
1320
1321 if (list_empty(&ctx->timeout_list))
1322 return;
1323
1324 seq = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
1325
1326 do {
1327 u32 events_needed, events_got;
Pavel Begunkov360428f2020-05-30 14:54:17 +03001328 struct io_kiocb *req = list_first_entry(&ctx->timeout_list,
Pavel Begunkov135fcde2020-07-13 23:37:12 +03001329 struct io_kiocb, timeout.list);
Pavel Begunkov360428f2020-05-30 14:54:17 +03001330
Pavel Begunkov8eb7e2d2020-06-29 13:13:02 +03001331 if (io_is_timeout_noseq(req))
Pavel Begunkov360428f2020-05-30 14:54:17 +03001332 break;
Marcelo Diop-Gonzalezf0105052021-01-15 11:54:40 -05001333
1334 /*
1335 * Since seq can easily wrap around over time, subtract
1336 * the last seq at which timeouts were flushed before comparing.
1337 * Assuming not more than 2^31-1 events have happened since,
1338 * these subtractions won't have wrapped, so we can check if
1339 * target is in [last_seq, current_seq] by comparing the two.
1340 */
1341 events_needed = req->timeout.target_seq - ctx->cq_last_tm_flush;
1342 events_got = seq - ctx->cq_last_tm_flush;
1343 if (events_got < events_needed)
Pavel Begunkov360428f2020-05-30 14:54:17 +03001344 break;
Pavel Begunkovbfe68a22020-05-30 14:54:18 +03001345
Pavel Begunkov135fcde2020-07-13 23:37:12 +03001346 list_del_init(&req->timeout.list);
Pavel Begunkov1ee41602021-03-25 18:32:42 +00001347 io_kill_timeout(req, 0);
Marcelo Diop-Gonzalezf0105052021-01-15 11:54:40 -05001348 } while (!list_empty(&ctx->timeout_list));
1349
1350 ctx->cq_last_tm_flush = seq;
Pavel Begunkov360428f2020-05-30 14:54:17 +03001351}
1352
Jens Axboede0617e2019-04-06 21:51:27 -06001353static void io_commit_cqring(struct io_ring_ctx *ctx)
1354{
Pavel Begunkov360428f2020-05-30 14:54:17 +03001355 io_flush_timeouts(ctx);
Pavel Begunkovec30e042021-01-19 13:32:38 +00001356
1357 /* order cqe stores with ring update */
1358 smp_store_release(&ctx->rings->cq.tail, ctx->cached_cq_tail);
Jens Axboede0617e2019-04-06 21:51:27 -06001359
Pavel Begunkov04518942020-05-26 20:34:05 +03001360 if (unlikely(!list_empty(&ctx->defer_list)))
1361 __io_queue_deferred(ctx);
Jens Axboede0617e2019-04-06 21:51:27 -06001362}
1363
Jens Axboe90554202020-09-03 12:12:41 -06001364static inline bool io_sqring_full(struct io_ring_ctx *ctx)
1365{
1366 struct io_rings *r = ctx->rings;
1367
Pavel Begunkova566c552021-05-16 22:58:08 +01001368 return READ_ONCE(r->sq.tail) - ctx->cached_sq_head == ctx->sq_entries;
Jens Axboe90554202020-09-03 12:12:41 -06001369}
1370
Pavel Begunkov888aae22021-01-19 13:32:39 +00001371static inline unsigned int __io_cqring_events(struct io_ring_ctx *ctx)
1372{
1373 return ctx->cached_cq_tail - READ_ONCE(ctx->rings->cq.head);
1374}
1375
Pavel Begunkovd068b502021-05-16 22:58:11 +01001376static inline struct io_uring_cqe *io_get_cqe(struct io_ring_ctx *ctx)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001377{
Hristo Venev75b28af2019-08-26 17:23:46 +00001378 struct io_rings *rings = ctx->rings;
Pavel Begunkovea5ab3b2021-05-16 22:58:09 +01001379 unsigned tail, mask = ctx->cq_entries - 1;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001380
Stefan Bühler115e12e2019-04-24 23:54:18 +02001381 /*
1382 * writes to the cq entry need to come after reading head; the
1383 * control dependency is enough as we're using WRITE_ONCE to
1384 * fill the cq entry
1385 */
Pavel Begunkova566c552021-05-16 22:58:08 +01001386 if (__io_cqring_events(ctx) == ctx->cq_entries)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001387 return NULL;
1388
Pavel Begunkov888aae22021-01-19 13:32:39 +00001389 tail = ctx->cached_cq_tail++;
Pavel Begunkovea5ab3b2021-05-16 22:58:09 +01001390 return &rings->cqes[tail & mask];
Jens Axboe2b188cc2019-01-07 10:46:33 -07001391}
1392
Jens Axboef2842ab2020-01-08 11:04:00 -07001393static inline bool io_should_trigger_evfd(struct io_ring_ctx *ctx)
1394{
Pavel Begunkov44c769d2021-04-11 01:46:31 +01001395 if (likely(!ctx->cq_ev_fd))
Jens Axboef0b493e2020-02-01 21:30:11 -07001396 return false;
Stefano Garzarella7e55a192020-05-15 18:38:05 +02001397 if (READ_ONCE(ctx->rings->cq_flags) & IORING_CQ_EVENTFD_DISABLED)
1398 return false;
Pavel Begunkov44c769d2021-04-11 01:46:31 +01001399 return !ctx->eventfd_async || io_wq_current_is_worker();
Jens Axboef2842ab2020-01-08 11:04:00 -07001400}
1401
Jens Axboeb41e9852020-02-17 09:52:41 -07001402static void io_cqring_ev_posted(struct io_ring_ctx *ctx)
Jens Axboe8c838782019-03-12 15:48:16 -06001403{
Pavel Begunkovb1445e52021-01-07 03:15:43 +00001404 /* see waitqueue_active() comment */
1405 smp_mb();
1406
Jens Axboe8c838782019-03-12 15:48:16 -06001407 if (waitqueue_active(&ctx->wait))
1408 wake_up(&ctx->wait);
Jens Axboe534ca6d2020-09-02 13:52:19 -06001409 if (ctx->sq_data && waitqueue_active(&ctx->sq_data->wait))
1410 wake_up(&ctx->sq_data->wait);
Jens Axboeb41e9852020-02-17 09:52:41 -07001411 if (io_should_trigger_evfd(ctx))
Jens Axboe9b402842019-04-11 11:45:41 -06001412 eventfd_signal(ctx->cq_ev_fd, 1);
Pavel Begunkovb1445e52021-01-07 03:15:43 +00001413 if (waitqueue_active(&ctx->cq_wait)) {
Pavel Begunkov4aa84f22021-01-07 03:15:42 +00001414 wake_up_interruptible(&ctx->cq_wait);
1415 kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN);
1416 }
Jens Axboe8c838782019-03-12 15:48:16 -06001417}
1418
Pavel Begunkov80c18e42021-01-07 03:15:41 +00001419static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx)
1420{
Pavel Begunkovb1445e52021-01-07 03:15:43 +00001421 /* see waitqueue_active() comment */
1422 smp_mb();
1423
Pavel Begunkov80c18e42021-01-07 03:15:41 +00001424 if (ctx->flags & IORING_SETUP_SQPOLL) {
1425 if (waitqueue_active(&ctx->wait))
1426 wake_up(&ctx->wait);
1427 }
1428 if (io_should_trigger_evfd(ctx))
1429 eventfd_signal(ctx->cq_ev_fd, 1);
Pavel Begunkovb1445e52021-01-07 03:15:43 +00001430 if (waitqueue_active(&ctx->cq_wait)) {
Pavel Begunkov4aa84f22021-01-07 03:15:42 +00001431 wake_up_interruptible(&ctx->cq_wait);
1432 kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN);
1433 }
Pavel Begunkov80c18e42021-01-07 03:15:41 +00001434}
1435
Jens Axboec4a2ed72019-11-21 21:01:26 -07001436/* Returns true if there are no backlogged entries after the flush */
Pavel Begunkov6c2450a2021-02-23 12:40:22 +00001437static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001438{
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001439 unsigned long flags;
Jens Axboeb18032b2021-01-24 16:58:56 -07001440 bool all_flushed, posted;
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001441
Pavel Begunkova566c552021-05-16 22:58:08 +01001442 if (!force && __io_cqring_events(ctx) == ctx->cq_entries)
Pavel Begunkove23de152020-12-17 00:24:37 +00001443 return false;
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001444
Jens Axboeb18032b2021-01-24 16:58:56 -07001445 posted = false;
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001446 spin_lock_irqsave(&ctx->completion_lock, flags);
Pavel Begunkov6c2450a2021-02-23 12:40:22 +00001447 while (!list_empty(&ctx->cq_overflow_list)) {
Pavel Begunkovd068b502021-05-16 22:58:11 +01001448 struct io_uring_cqe *cqe = io_get_cqe(ctx);
Pavel Begunkov6c2450a2021-02-23 12:40:22 +00001449 struct io_overflow_cqe *ocqe;
Jens Axboee6c8aa92020-09-28 13:10:13 -06001450
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001451 if (!cqe && !force)
1452 break;
Pavel Begunkov6c2450a2021-02-23 12:40:22 +00001453 ocqe = list_first_entry(&ctx->cq_overflow_list,
1454 struct io_overflow_cqe, list);
1455 if (cqe)
1456 memcpy(cqe, &ocqe->cqe, sizeof(*cqe));
1457 else
Pavel Begunkov8f6ed492021-05-16 22:58:10 +01001458 io_account_cq_overflow(ctx);
1459
Jens Axboeb18032b2021-01-24 16:58:56 -07001460 posted = true;
Pavel Begunkov6c2450a2021-02-23 12:40:22 +00001461 list_del(&ocqe->list);
1462 kfree(ocqe);
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001463 }
1464
Pavel Begunkov09e88402020-12-17 00:24:38 +00001465 all_flushed = list_empty(&ctx->cq_overflow_list);
1466 if (all_flushed) {
1467 clear_bit(0, &ctx->sq_check_overflow);
1468 clear_bit(0, &ctx->cq_check_overflow);
1469 ctx->rings->sq_flags &= ~IORING_SQ_CQ_OVERFLOW;
1470 }
Pavel Begunkov46930142020-07-30 18:43:49 +03001471
Jens Axboeb18032b2021-01-24 16:58:56 -07001472 if (posted)
1473 io_commit_cqring(ctx);
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001474 spin_unlock_irqrestore(&ctx->completion_lock, flags);
Jens Axboeb18032b2021-01-24 16:58:56 -07001475 if (posted)
1476 io_cqring_ev_posted(ctx);
Pavel Begunkov09e88402020-12-17 00:24:38 +00001477 return all_flushed;
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001478}
1479
Pavel Begunkov6c2450a2021-02-23 12:40:22 +00001480static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)
Pavel Begunkov6c503152021-01-04 20:36:36 +00001481{
Jens Axboeca0a2652021-03-04 17:15:48 -07001482 bool ret = true;
1483
Pavel Begunkov6c503152021-01-04 20:36:36 +00001484 if (test_bit(0, &ctx->cq_check_overflow)) {
1485 /* iopoll syncs against uring_lock, not completion_lock */
1486 if (ctx->flags & IORING_SETUP_IOPOLL)
1487 mutex_lock(&ctx->uring_lock);
Pavel Begunkov6c2450a2021-02-23 12:40:22 +00001488 ret = __io_cqring_overflow_flush(ctx, force);
Pavel Begunkov6c503152021-01-04 20:36:36 +00001489 if (ctx->flags & IORING_SETUP_IOPOLL)
1490 mutex_unlock(&ctx->uring_lock);
1491 }
Jens Axboeca0a2652021-03-04 17:15:48 -07001492
1493 return ret;
Pavel Begunkov6c503152021-01-04 20:36:36 +00001494}
1495
Jens Axboeabc54d62021-02-24 13:32:30 -07001496/*
1497 * Shamelessly stolen from the mm implementation of page reference checking,
1498 * see commit f958d7b528b1 for details.
1499 */
1500#define req_ref_zero_or_close_to_overflow(req) \
1501 ((unsigned int) atomic_read(&(req->refs)) + 127u <= 127u)
1502
Jens Axboede9b4cc2021-02-24 13:28:27 -07001503static inline bool req_ref_inc_not_zero(struct io_kiocb *req)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001504{
Jens Axboeabc54d62021-02-24 13:32:30 -07001505 return atomic_inc_not_zero(&req->refs);
Jens Axboede9b4cc2021-02-24 13:28:27 -07001506}
1507
1508static inline bool req_ref_sub_and_test(struct io_kiocb *req, int refs)
1509{
Jens Axboeabc54d62021-02-24 13:32:30 -07001510 WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req));
1511 return atomic_sub_and_test(refs, &req->refs);
Jens Axboede9b4cc2021-02-24 13:28:27 -07001512}
1513
1514static inline bool req_ref_put_and_test(struct io_kiocb *req)
1515{
Jens Axboeabc54d62021-02-24 13:32:30 -07001516 WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req));
1517 return atomic_dec_and_test(&req->refs);
Jens Axboede9b4cc2021-02-24 13:28:27 -07001518}
1519
1520static inline void req_ref_put(struct io_kiocb *req)
1521{
Jens Axboeabc54d62021-02-24 13:32:30 -07001522 WARN_ON_ONCE(req_ref_put_and_test(req));
Jens Axboede9b4cc2021-02-24 13:28:27 -07001523}
1524
1525static inline void req_ref_get(struct io_kiocb *req)
1526{
Jens Axboeabc54d62021-02-24 13:32:30 -07001527 WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req));
1528 atomic_inc(&req->refs);
Jens Axboede9b4cc2021-02-24 13:28:27 -07001529}
1530
Pavel Begunkovd4d19c12021-04-25 14:32:17 +01001531static bool io_cqring_event_overflow(struct io_ring_ctx *ctx, u64 user_data,
1532 long res, unsigned int cflags)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001533{
Pavel Begunkovcce4b8b2021-04-13 02:58:44 +01001534 struct io_overflow_cqe *ocqe;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001535
Pavel Begunkovcce4b8b2021-04-13 02:58:44 +01001536 ocqe = kmalloc(sizeof(*ocqe), GFP_ATOMIC | __GFP_ACCOUNT);
1537 if (!ocqe) {
1538 /*
1539 * If we're in ring overflow flush mode, or in task cancel mode,
1540 * or cannot allocate an overflow entry, then we need to drop it
1541 * on the floor.
1542 */
Pavel Begunkov8f6ed492021-05-16 22:58:10 +01001543 io_account_cq_overflow(ctx);
Pavel Begunkovcce4b8b2021-04-13 02:58:44 +01001544 return false;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001545 }
Pavel Begunkovcce4b8b2021-04-13 02:58:44 +01001546 if (list_empty(&ctx->cq_overflow_list)) {
1547 set_bit(0, &ctx->sq_check_overflow);
1548 set_bit(0, &ctx->cq_check_overflow);
1549 ctx->rings->sq_flags |= IORING_SQ_CQ_OVERFLOW;
1550 }
Pavel Begunkovd4d19c12021-04-25 14:32:17 +01001551 ocqe->cqe.user_data = user_data;
Pavel Begunkovcce4b8b2021-04-13 02:58:44 +01001552 ocqe->cqe.res = res;
1553 ocqe->cqe.flags = cflags;
1554 list_add_tail(&ocqe->list, &ctx->cq_overflow_list);
1555 return true;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001556}
1557
Pavel Begunkovd4d19c12021-04-25 14:32:17 +01001558static inline bool __io_cqring_fill_event(struct io_ring_ctx *ctx, u64 user_data,
1559 long res, unsigned int cflags)
Pavel Begunkov8d133262021-04-11 01:46:33 +01001560{
Jens Axboe2b188cc2019-01-07 10:46:33 -07001561 struct io_uring_cqe *cqe;
1562
Pavel Begunkovd4d19c12021-04-25 14:32:17 +01001563 trace_io_uring_complete(ctx, user_data, res, cflags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001564
1565 /*
1566 * If we can't get a cq entry, userspace overflowed the
1567 * submission (by quite a lot). Increment the overflow count in
1568 * the ring.
1569 */
Pavel Begunkovd068b502021-05-16 22:58:11 +01001570 cqe = io_get_cqe(ctx);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001571 if (likely(cqe)) {
Pavel Begunkovd4d19c12021-04-25 14:32:17 +01001572 WRITE_ONCE(cqe->user_data, user_data);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001573 WRITE_ONCE(cqe->res, res);
1574 WRITE_ONCE(cqe->flags, cflags);
Pavel Begunkov8d133262021-04-11 01:46:33 +01001575 return true;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001576 }
Pavel Begunkovd4d19c12021-04-25 14:32:17 +01001577 return io_cqring_event_overflow(ctx, user_data, res, cflags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001578}
1579
Pavel Begunkov8d133262021-04-11 01:46:33 +01001580/* not as hot to bloat with inlining */
Pavel Begunkovd4d19c12021-04-25 14:32:17 +01001581static noinline bool io_cqring_fill_event(struct io_ring_ctx *ctx, u64 user_data,
1582 long res, unsigned int cflags)
Jens Axboebcda7ba2020-02-23 16:42:51 -07001583{
Pavel Begunkovd4d19c12021-04-25 14:32:17 +01001584 return __io_cqring_fill_event(ctx, user_data, res, cflags);
Jens Axboebcda7ba2020-02-23 16:42:51 -07001585}
1586
Pavel Begunkov7a612352021-03-09 00:37:59 +00001587static void io_req_complete_post(struct io_kiocb *req, long res,
1588 unsigned int cflags)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001589{
Jens Axboe78e19bb2019-11-06 15:21:34 -07001590 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001591 unsigned long flags;
1592
1593 spin_lock_irqsave(&ctx->completion_lock, flags);
Pavel Begunkovd4d19c12021-04-25 14:32:17 +01001594 __io_cqring_fill_event(ctx, req->user_data, res, cflags);
Jens Axboec7dae4b2021-02-09 19:53:37 -07001595 /*
1596 * If we're the last reference to this request, add to our locked
1597 * free_list cache.
1598 */
Jens Axboede9b4cc2021-02-24 13:28:27 -07001599 if (req_ref_put_and_test(req)) {
Pavel Begunkov7a612352021-03-09 00:37:59 +00001600 if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) {
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01001601 if (req->flags & (REQ_F_LINK_TIMEOUT | REQ_F_FAIL))
Pavel Begunkov7a612352021-03-09 00:37:59 +00001602 io_disarm_next(req);
1603 if (req->link) {
1604 io_req_task_queue(req->link);
1605 req->link = NULL;
1606 }
1607 }
Jens Axboec7dae4b2021-02-09 19:53:37 -07001608 io_dismantle_req(req);
1609 io_put_task(req->task, 1);
Pavel Begunkovd0acdee2021-05-16 22:58:12 +01001610 list_add(&req->compl.list, &ctx->locked_free_list);
1611 ctx->locked_free_nr++;
Pavel Begunkov180f8292021-03-14 20:57:09 +00001612 } else {
1613 if (!percpu_ref_tryget(&ctx->refs))
1614 req = NULL;
1615 }
Pavel Begunkov7a612352021-03-09 00:37:59 +00001616 io_commit_cqring(ctx);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001617 spin_unlock_irqrestore(&ctx->completion_lock, flags);
Pavel Begunkov7a612352021-03-09 00:37:59 +00001618
Pavel Begunkov180f8292021-03-14 20:57:09 +00001619 if (req) {
1620 io_cqring_ev_posted(ctx);
Jens Axboec7dae4b2021-02-09 19:53:37 -07001621 percpu_ref_put(&ctx->refs);
Pavel Begunkov180f8292021-03-14 20:57:09 +00001622 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07001623}
1624
Jens Axboe4e3d9ff2021-04-15 17:44:34 -06001625static inline bool io_req_needs_clean(struct io_kiocb *req)
1626{
Jens Axboe75652a302021-04-15 09:52:40 -06001627 return req->flags & (REQ_F_BUFFER_SELECTED | REQ_F_NEED_CLEANUP |
Pavel Begunkov3a0a6902021-04-20 12:03:31 +01001628 REQ_F_POLLED | REQ_F_INFLIGHT);
Jens Axboe4e3d9ff2021-04-15 17:44:34 -06001629}
1630
Pavel Begunkova38d68d2021-01-19 13:32:45 +00001631static void io_req_complete_state(struct io_kiocb *req, long res,
Pavel Begunkov889fca72021-02-10 00:03:09 +00001632 unsigned int cflags)
Jens Axboebcda7ba2020-02-23 16:42:51 -07001633{
Jens Axboe4e3d9ff2021-04-15 17:44:34 -06001634 if (io_req_needs_clean(req))
Pavel Begunkov68fb8972021-03-19 17:22:41 +00001635 io_clean_op(req);
Pavel Begunkova38d68d2021-01-19 13:32:45 +00001636 req->result = res;
1637 req->compl.cflags = cflags;
Pavel Begunkove342c802021-01-19 13:32:47 +00001638 req->flags |= REQ_F_COMPLETE_INLINE;
Jens Axboee1e16092020-06-22 09:17:17 -06001639}
Jens Axboe2b188cc2019-01-07 10:46:33 -07001640
Pavel Begunkov889fca72021-02-10 00:03:09 +00001641static inline void __io_req_complete(struct io_kiocb *req, unsigned issue_flags,
1642 long res, unsigned cflags)
Pavel Begunkova38d68d2021-01-19 13:32:45 +00001643{
Pavel Begunkov889fca72021-02-10 00:03:09 +00001644 if (issue_flags & IO_URING_F_COMPLETE_DEFER)
1645 io_req_complete_state(req, res, cflags);
Pavel Begunkova38d68d2021-01-19 13:32:45 +00001646 else
Jens Axboec7dae4b2021-02-09 19:53:37 -07001647 io_req_complete_post(req, res, cflags);
Pavel Begunkova38d68d2021-01-19 13:32:45 +00001648}
Jens Axboebcda7ba2020-02-23 16:42:51 -07001649
Pavel Begunkova38d68d2021-01-19 13:32:45 +00001650static inline void io_req_complete(struct io_kiocb *req, long res)
Jens Axboee1e16092020-06-22 09:17:17 -06001651{
Pavel Begunkov889fca72021-02-10 00:03:09 +00001652 __io_req_complete(req, 0, res, 0);
Jens Axboebcda7ba2020-02-23 16:42:51 -07001653}
1654
Pavel Begunkovf41db2732021-02-28 22:35:12 +00001655static void io_req_complete_failed(struct io_kiocb *req, long res)
1656{
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01001657 req_set_fail(req);
Pavel Begunkovf41db2732021-02-28 22:35:12 +00001658 io_put_req(req);
1659 io_req_complete_post(req, res, 0);
1660}
1661
Pavel Begunkovdac7a092021-03-19 17:22:39 +00001662static void io_flush_cached_locked_reqs(struct io_ring_ctx *ctx,
1663 struct io_comp_state *cs)
1664{
1665 spin_lock_irq(&ctx->completion_lock);
Pavel Begunkovd0acdee2021-05-16 22:58:12 +01001666 list_splice_init(&ctx->locked_free_list, &cs->free_list);
1667 ctx->locked_free_nr = 0;
Pavel Begunkovdac7a092021-03-19 17:22:39 +00001668 spin_unlock_irq(&ctx->completion_lock);
1669}
1670
Pavel Begunkovdd78f492021-03-19 17:22:35 +00001671/* Returns true IFF there are requests in the cache */
Jens Axboec7dae4b2021-02-09 19:53:37 -07001672static bool io_flush_cached_reqs(struct io_ring_ctx *ctx)
Pavel Begunkove5d1bc02021-02-10 00:03:23 +00001673{
Jens Axboec7dae4b2021-02-09 19:53:37 -07001674 struct io_submit_state *state = &ctx->submit_state;
1675 struct io_comp_state *cs = &state->comp;
Pavel Begunkovdd78f492021-03-19 17:22:35 +00001676 int nr;
Pavel Begunkove5d1bc02021-02-10 00:03:23 +00001677
Jens Axboec7dae4b2021-02-09 19:53:37 -07001678 /*
1679 * If we have more than a batch's worth of requests in our IRQ side
1680 * locked cache, grab the lock and move them over to our submission
1681 * side cache.
1682 */
Pavel Begunkovd0acdee2021-05-16 22:58:12 +01001683 if (READ_ONCE(ctx->locked_free_nr) > IO_COMPL_BATCH)
Pavel Begunkovdac7a092021-03-19 17:22:39 +00001684 io_flush_cached_locked_reqs(ctx, cs);
Jens Axboec7dae4b2021-02-09 19:53:37 -07001685
Pavel Begunkovdd78f492021-03-19 17:22:35 +00001686 nr = state->free_reqs;
Jens Axboec7dae4b2021-02-09 19:53:37 -07001687 while (!list_empty(&cs->free_list)) {
Pavel Begunkovdd78f492021-03-19 17:22:35 +00001688 struct io_kiocb *req = list_first_entry(&cs->free_list,
1689 struct io_kiocb, compl.list);
1690
Jens Axboe2b188cc2019-01-07 10:46:33 -07001691 list_del(&req->compl.list);
Pavel Begunkovdd78f492021-03-19 17:22:35 +00001692 state->reqs[nr++] = req;
1693 if (nr == ARRAY_SIZE(state->reqs))
Pavel Begunkove5d1bc02021-02-10 00:03:23 +00001694 break;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001695 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07001696
Pavel Begunkovdd78f492021-03-19 17:22:35 +00001697 state->free_reqs = nr;
1698 return nr != 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001699}
1700
Pavel Begunkov258b29a2021-02-10 00:03:10 +00001701static struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001702{
Pavel Begunkov258b29a2021-02-10 00:03:10 +00001703 struct io_submit_state *state = &ctx->submit_state;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001704
Pavel Begunkovbf019da2021-02-10 00:03:17 +00001705 BUILD_BUG_ON(IO_REQ_ALLOC_BATCH > ARRAY_SIZE(state->reqs));
Jens Axboe2b188cc2019-01-07 10:46:33 -07001706
Pavel Begunkovf6b6c7d2020-06-21 13:09:53 +03001707 if (!state->free_reqs) {
Pavel Begunkov291b2822020-09-30 22:57:01 +03001708 gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
Jens Axboe2579f912019-01-09 09:10:43 -07001709 int ret;
1710
Jens Axboec7dae4b2021-02-09 19:53:37 -07001711 if (io_flush_cached_reqs(ctx))
Pavel Begunkove5d1bc02021-02-10 00:03:23 +00001712 goto got_req;
1713
Pavel Begunkovbf019da2021-02-10 00:03:17 +00001714 ret = kmem_cache_alloc_bulk(req_cachep, gfp, IO_REQ_ALLOC_BATCH,
1715 state->reqs);
Jens Axboefd6fab22019-03-14 16:30:06 -06001716
1717 /*
1718 * Bulk alloc is all-or-nothing. If we fail to get a batch,
1719 * retry single alloc to be on the safe side.
1720 */
1721 if (unlikely(ret <= 0)) {
1722 state->reqs[0] = kmem_cache_alloc(req_cachep, gfp);
1723 if (!state->reqs[0])
Pavel Begunkov3893f392021-02-10 00:03:15 +00001724 return NULL;
Jens Axboefd6fab22019-03-14 16:30:06 -06001725 ret = 1;
1726 }
Pavel Begunkov291b2822020-09-30 22:57:01 +03001727 state->free_reqs = ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001728 }
Pavel Begunkove5d1bc02021-02-10 00:03:23 +00001729got_req:
Pavel Begunkov291b2822020-09-30 22:57:01 +03001730 state->free_reqs--;
1731 return state->reqs[state->free_reqs];
Jens Axboe2b188cc2019-01-07 10:46:33 -07001732}
1733
Pavel Begunkove1d767f2021-03-19 17:22:43 +00001734static inline void io_put_file(struct file *file)
Pavel Begunkov8da11c12020-02-24 11:32:44 +03001735{
Pavel Begunkove1d767f2021-03-19 17:22:43 +00001736 if (file)
Pavel Begunkov8da11c12020-02-24 11:32:44 +03001737 fput(file);
1738}
1739
Pavel Begunkov4edf20f2020-10-13 09:43:59 +01001740static void io_dismantle_req(struct io_kiocb *req)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001741{
Pavel Begunkov094bae42021-03-19 17:22:42 +00001742 unsigned int flags = req->flags;
Pavel Begunkov929a3af2020-02-19 00:19:09 +03001743
Pavel Begunkov3a0a6902021-04-20 12:03:31 +01001744 if (io_req_needs_clean(req))
1745 io_clean_op(req);
Pavel Begunkove1d767f2021-03-19 17:22:43 +00001746 if (!(flags & REQ_F_FIXED_FILE))
1747 io_put_file(req->file);
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00001748 if (req->fixed_rsrc_refs)
1749 percpu_ref_put(req->fixed_rsrc_refs);
Pavel Begunkov094bae42021-03-19 17:22:42 +00001750 if (req->async_data)
1751 kfree(req->async_data);
Jens Axboe003e8dc2021-03-06 09:22:27 -07001752 if (req->work.creds) {
1753 put_cred(req->work.creds);
1754 req->work.creds = NULL;
1755 }
Pavel Begunkove6543a82020-06-28 12:52:30 +03001756}
Pavel Begunkov2b85edf2019-12-28 14:13:03 +03001757
Pavel Begunkovb23fcf42021-03-01 18:20:48 +00001758/* must to be called somewhat shortly after putting a request */
Pavel Begunkov7c660732021-01-25 11:42:21 +00001759static inline void io_put_task(struct task_struct *task, int nr)
1760{
1761 struct io_uring_task *tctx = task->io_uring;
1762
1763 percpu_counter_sub(&tctx->inflight, nr);
1764 if (unlikely(atomic_read(&tctx->in_idle)))
1765 wake_up(&tctx->wait);
1766 put_task_struct_many(task, nr);
1767}
1768
Pavel Begunkov216578e2020-10-13 09:44:00 +01001769static void __io_free_req(struct io_kiocb *req)
Pavel Begunkove6543a82020-06-28 12:52:30 +03001770{
Jens Axboe51a4cc12020-08-10 10:55:56 -06001771 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkovecfc5172020-06-29 13:13:03 +03001772
Pavel Begunkov216578e2020-10-13 09:44:00 +01001773 io_dismantle_req(req);
Pavel Begunkov7c660732021-01-25 11:42:21 +00001774 io_put_task(req->task, 1);
Pavel Begunkove6543a82020-06-28 12:52:30 +03001775
Pavel Begunkov3893f392021-02-10 00:03:15 +00001776 kmem_cache_free(req_cachep, req);
Pavel Begunkovecfc5172020-06-29 13:13:03 +03001777 percpu_ref_put(&ctx->refs);
Jens Axboee65ef562019-03-12 10:16:44 -06001778}
1779
Pavel Begunkovf2f87372020-10-27 23:25:37 +00001780static inline void io_remove_next_linked(struct io_kiocb *req)
1781{
1782 struct io_kiocb *nxt = req->link;
1783
1784 req->link = nxt->link;
1785 nxt->link = NULL;
1786}
1787
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00001788static bool io_kill_linked_timeout(struct io_kiocb *req)
1789 __must_hold(&req->ctx->completion_lock)
Jens Axboe9e645e112019-05-10 16:07:28 -06001790{
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00001791 struct io_kiocb *link = req->link;
Pavel Begunkovf2f87372020-10-27 23:25:37 +00001792
Pavel Begunkov900fad42020-10-19 16:39:16 +01001793 /*
1794 * Can happen if a linked timeout fired and link had been like
1795 * req -> link t-out -> link t-out [-> ...]
1796 */
Pavel Begunkovc9abd7a2020-10-22 16:43:11 +01001797 if (link && (link->flags & REQ_F_LTIMEOUT_ACTIVE)) {
1798 struct io_timeout_data *io = link->async_data;
Pavel Begunkov7c86ffe2020-06-29 13:12:59 +03001799
Pavel Begunkovf2f87372020-10-27 23:25:37 +00001800 io_remove_next_linked(req);
Pavel Begunkov90cd7e42020-10-27 23:25:36 +00001801 link->timeout.head = NULL;
Pavel Begunkovfd9c7bc2021-04-13 02:58:42 +01001802 if (hrtimer_try_to_cancel(&io->timer) != -1) {
Pavel Begunkovd4d19c12021-04-25 14:32:17 +01001803 io_cqring_fill_event(link->ctx, link->user_data,
1804 -ECANCELED, 0);
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00001805 io_put_req_deferred(link, 1);
Pavel Begunkovd4729fb2021-03-22 01:58:24 +00001806 return true;
Pavel Begunkovc9abd7a2020-10-22 16:43:11 +01001807 }
1808 }
Pavel Begunkovd4729fb2021-03-22 01:58:24 +00001809 return false;
Pavel Begunkov7c86ffe2020-06-29 13:12:59 +03001810}
1811
Pavel Begunkovd148ca42020-10-18 10:17:39 +01001812static void io_fail_links(struct io_kiocb *req)
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00001813 __must_hold(&req->ctx->completion_lock)
Jens Axboe9e645e112019-05-10 16:07:28 -06001814{
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00001815 struct io_kiocb *nxt, *link = req->link;
Jens Axboe9e645e112019-05-10 16:07:28 -06001816
Pavel Begunkovf2f87372020-10-27 23:25:37 +00001817 req->link = NULL;
Pavel Begunkovf2f87372020-10-27 23:25:37 +00001818 while (link) {
1819 nxt = link->link;
1820 link->link = NULL;
1821
Dmitrii Dolgovc826bd72019-10-15 19:02:01 +02001822 trace_io_uring_fail_link(req, link);
Pavel Begunkovd4d19c12021-04-25 14:32:17 +01001823 io_cqring_fill_event(link->ctx, link->user_data, -ECANCELED, 0);
Jens Axboe1575f212021-02-27 15:20:49 -07001824 io_put_req_deferred(link, 2);
Pavel Begunkovf2f87372020-10-27 23:25:37 +00001825 link = nxt;
Jens Axboe9e645e112019-05-10 16:07:28 -06001826 }
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00001827}
Pavel Begunkov7c86ffe2020-06-29 13:12:59 +03001828
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00001829static bool io_disarm_next(struct io_kiocb *req)
1830 __must_hold(&req->ctx->completion_lock)
1831{
1832 bool posted = false;
1833
1834 if (likely(req->flags & REQ_F_LINK_TIMEOUT))
1835 posted = io_kill_linked_timeout(req);
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01001836 if (unlikely((req->flags & REQ_F_FAIL) &&
Pavel Begunkove4335ed2021-04-11 01:46:39 +01001837 !(req->flags & REQ_F_HARDLINK))) {
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00001838 posted |= (req->link != NULL);
1839 io_fail_links(req);
1840 }
1841 return posted;
Jens Axboe9e645e112019-05-10 16:07:28 -06001842}
1843
Pavel Begunkov3fa5e0f2020-06-30 15:20:43 +03001844static struct io_kiocb *__io_req_find_next(struct io_kiocb *req)
Jens Axboe9e645e112019-05-10 16:07:28 -06001845{
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00001846 struct io_kiocb *nxt;
Jens Axboe2665abf2019-11-05 12:40:47 -07001847
Jens Axboe9e645e112019-05-10 16:07:28 -06001848 /*
1849 * If LINK is set, we have dependent requests in this chain. If we
1850 * didn't fail this request, queue the first one up, moving any other
1851 * dependencies to the next request. In case of failure, fail the rest
1852 * of the chain.
1853 */
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01001854 if (req->flags & (REQ_F_LINK_TIMEOUT | REQ_F_FAIL)) {
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00001855 struct io_ring_ctx *ctx = req->ctx;
1856 unsigned long flags;
1857 bool posted;
Pavel Begunkovf2f87372020-10-27 23:25:37 +00001858
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00001859 spin_lock_irqsave(&ctx->completion_lock, flags);
1860 posted = io_disarm_next(req);
1861 if (posted)
1862 io_commit_cqring(req->ctx);
1863 spin_unlock_irqrestore(&ctx->completion_lock, flags);
1864 if (posted)
1865 io_cqring_ev_posted(ctx);
Pavel Begunkovf2f87372020-10-27 23:25:37 +00001866 }
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00001867 nxt = req->link;
1868 req->link = NULL;
1869 return nxt;
Jens Axboe4d7dd462019-11-20 13:03:52 -07001870}
Jens Axboe2665abf2019-11-05 12:40:47 -07001871
Pavel Begunkovf2f87372020-10-27 23:25:37 +00001872static inline struct io_kiocb *io_req_find_next(struct io_kiocb *req)
Pavel Begunkov3fa5e0f2020-06-30 15:20:43 +03001873{
Pavel Begunkovcdbff982021-02-12 18:41:16 +00001874 if (likely(!(req->flags & (REQ_F_LINK|REQ_F_HARDLINK))))
Pavel Begunkov3fa5e0f2020-06-30 15:20:43 +03001875 return NULL;
1876 return __io_req_find_next(req);
1877}
1878
Pavel Begunkov2c323952021-02-28 22:04:53 +00001879static void ctx_flush_and_put(struct io_ring_ctx *ctx)
1880{
1881 if (!ctx)
1882 return;
1883 if (ctx->submit_state.comp.nr) {
1884 mutex_lock(&ctx->uring_lock);
1885 io_submit_flush_completions(&ctx->submit_state.comp, ctx);
1886 mutex_unlock(&ctx->uring_lock);
1887 }
1888 percpu_ref_put(&ctx->refs);
1889}
1890
Jens Axboe7cbf1722021-02-10 00:03:20 +00001891static bool __tctx_task_work(struct io_uring_task *tctx)
1892{
Jens Axboe65453d12021-02-10 00:03:21 +00001893 struct io_ring_ctx *ctx = NULL;
Jens Axboe7cbf1722021-02-10 00:03:20 +00001894 struct io_wq_work_list list;
1895 struct io_wq_work_node *node;
1896
1897 if (wq_list_empty(&tctx->task_list))
1898 return false;
1899
Jens Axboe0b81e802021-02-16 10:33:53 -07001900 spin_lock_irq(&tctx->task_lock);
Jens Axboe7cbf1722021-02-10 00:03:20 +00001901 list = tctx->task_list;
1902 INIT_WQ_LIST(&tctx->task_list);
Jens Axboe0b81e802021-02-16 10:33:53 -07001903 spin_unlock_irq(&tctx->task_lock);
Jens Axboe7cbf1722021-02-10 00:03:20 +00001904
1905 node = list.first;
1906 while (node) {
1907 struct io_wq_work_node *next = node->next;
1908 struct io_kiocb *req;
1909
1910 req = container_of(node, struct io_kiocb, io_task_work.node);
Pavel Begunkov2c323952021-02-28 22:04:53 +00001911 if (req->ctx != ctx) {
1912 ctx_flush_and_put(ctx);
1913 ctx = req->ctx;
1914 percpu_ref_get(&ctx->refs);
1915 }
1916
Jens Axboe7cbf1722021-02-10 00:03:20 +00001917 req->task_work.func(&req->task_work);
1918 node = next;
Jens Axboe65453d12021-02-10 00:03:21 +00001919 }
1920
Pavel Begunkov2c323952021-02-28 22:04:53 +00001921 ctx_flush_and_put(ctx);
Jens Axboe7cbf1722021-02-10 00:03:20 +00001922 return list.first != NULL;
1923}
1924
1925static void tctx_task_work(struct callback_head *cb)
1926{
1927 struct io_uring_task *tctx = container_of(cb, struct io_uring_task, task_work);
1928
Jens Axboe1d5f3602021-02-26 14:54:16 -07001929 clear_bit(0, &tctx->task_state);
1930
Jens Axboe7cbf1722021-02-10 00:03:20 +00001931 while (__tctx_task_work(tctx))
1932 cond_resched();
Jens Axboe7cbf1722021-02-10 00:03:20 +00001933}
1934
Pavel Begunkovc15b79d2021-03-19 17:22:44 +00001935static int io_req_task_work_add(struct io_kiocb *req)
Jens Axboe7cbf1722021-02-10 00:03:20 +00001936{
Pavel Begunkovc15b79d2021-03-19 17:22:44 +00001937 struct task_struct *tsk = req->task;
Jens Axboe7cbf1722021-02-10 00:03:20 +00001938 struct io_uring_task *tctx = tsk->io_uring;
Pavel Begunkovc15b79d2021-03-19 17:22:44 +00001939 enum task_work_notify_mode notify;
Jens Axboe7cbf1722021-02-10 00:03:20 +00001940 struct io_wq_work_node *node, *prev;
Jens Axboe0b81e802021-02-16 10:33:53 -07001941 unsigned long flags;
Pavel Begunkovc15b79d2021-03-19 17:22:44 +00001942 int ret = 0;
1943
1944 if (unlikely(tsk->flags & PF_EXITING))
1945 return -ESRCH;
Jens Axboe7cbf1722021-02-10 00:03:20 +00001946
1947 WARN_ON_ONCE(!tctx);
1948
Jens Axboe0b81e802021-02-16 10:33:53 -07001949 spin_lock_irqsave(&tctx->task_lock, flags);
Jens Axboe7cbf1722021-02-10 00:03:20 +00001950 wq_list_add_tail(&req->io_task_work.node, &tctx->task_list);
Jens Axboe0b81e802021-02-16 10:33:53 -07001951 spin_unlock_irqrestore(&tctx->task_lock, flags);
Jens Axboe7cbf1722021-02-10 00:03:20 +00001952
1953 /* task_work already pending, we're done */
1954 if (test_bit(0, &tctx->task_state) ||
1955 test_and_set_bit(0, &tctx->task_state))
1956 return 0;
1957
Pavel Begunkovc15b79d2021-03-19 17:22:44 +00001958 /*
1959 * SQPOLL kernel thread doesn't need notification, just a wakeup. For
1960 * all other cases, use TWA_SIGNAL unconditionally to ensure we're
1961 * processing task_work. There's no reliable way to tell if TWA_RESUME
1962 * will do the job.
1963 */
1964 notify = (req->ctx->flags & IORING_SETUP_SQPOLL) ? TWA_NONE : TWA_SIGNAL;
1965
1966 if (!task_work_add(tsk, &tctx->task_work, notify)) {
1967 wake_up_process(tsk);
Jens Axboe7cbf1722021-02-10 00:03:20 +00001968 return 0;
Pavel Begunkovc15b79d2021-03-19 17:22:44 +00001969 }
Jens Axboe7cbf1722021-02-10 00:03:20 +00001970
1971 /*
1972 * Slow path - we failed, find and delete work. if the work is not
1973 * in the list, it got run and we're fine.
1974 */
Jens Axboe0b81e802021-02-16 10:33:53 -07001975 spin_lock_irqsave(&tctx->task_lock, flags);
Jens Axboe7cbf1722021-02-10 00:03:20 +00001976 wq_list_for_each(node, prev, &tctx->task_list) {
1977 if (&req->io_task_work.node == node) {
1978 wq_list_del(&tctx->task_list, node, prev);
1979 ret = 1;
1980 break;
1981 }
1982 }
Jens Axboe0b81e802021-02-16 10:33:53 -07001983 spin_unlock_irqrestore(&tctx->task_lock, flags);
Jens Axboe7cbf1722021-02-10 00:03:20 +00001984 clear_bit(0, &tctx->task_state);
1985 return ret;
1986}
1987
Pavel Begunkov9b465712021-03-15 14:23:07 +00001988static bool io_run_task_work_head(struct callback_head **work_head)
1989{
1990 struct callback_head *work, *next;
1991 bool executed = false;
1992
1993 do {
1994 work = xchg(work_head, NULL);
1995 if (!work)
1996 break;
1997
1998 do {
1999 next = work->next;
2000 work->func(work);
2001 work = next;
2002 cond_resched();
2003 } while (work);
2004 executed = true;
2005 } while (1);
2006
2007 return executed;
2008}
2009
2010static void io_task_work_add_head(struct callback_head **work_head,
2011 struct callback_head *task_work)
2012{
2013 struct callback_head *head;
2014
2015 do {
2016 head = READ_ONCE(*work_head);
2017 task_work->next = head;
2018 } while (cmpxchg(work_head, head, task_work) != head);
2019}
2020
Pavel Begunkoveab30c42021-01-19 13:32:42 +00002021static void io_req_task_work_add_fallback(struct io_kiocb *req,
Jens Axboe7cbf1722021-02-10 00:03:20 +00002022 task_work_func_t cb)
Pavel Begunkoveab30c42021-01-19 13:32:42 +00002023{
Pavel Begunkoveab30c42021-01-19 13:32:42 +00002024 init_task_work(&req->task_work, cb);
Pavel Begunkov9b465712021-03-15 14:23:07 +00002025 io_task_work_add_head(&req->ctx->exit_task_work, &req->task_work);
Pavel Begunkoveab30c42021-01-19 13:32:42 +00002026}
2027
Jens Axboec40f6372020-06-25 15:39:59 -06002028static void io_req_task_cancel(struct callback_head *cb)
2029{
2030 struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
Jens Axboe87ceb6a2020-09-14 08:20:12 -06002031 struct io_ring_ctx *ctx = req->ctx;
Jens Axboec40f6372020-06-25 15:39:59 -06002032
Pavel Begunkove83acd72021-02-28 22:35:09 +00002033 /* ctx is guaranteed to stay alive while we hold uring_lock */
Pavel Begunkov792bb6e2021-02-18 22:32:51 +00002034 mutex_lock(&ctx->uring_lock);
Pavel Begunkov25935532021-03-19 17:22:40 +00002035 io_req_complete_failed(req, req->result);
Pavel Begunkov792bb6e2021-02-18 22:32:51 +00002036 mutex_unlock(&ctx->uring_lock);
Jens Axboec40f6372020-06-25 15:39:59 -06002037}
2038
2039static void __io_req_task_submit(struct io_kiocb *req)
2040{
2041 struct io_ring_ctx *ctx = req->ctx;
2042
Pavel Begunkov04fc6c82021-02-12 03:23:54 +00002043 /* ctx stays valid until unlock, even if we drop all ours ctx->refs */
Pavel Begunkov81b6d052021-01-04 20:36:35 +00002044 mutex_lock(&ctx->uring_lock);
Pavel Begunkov70aacfe2021-03-01 13:02:15 +00002045 if (!(current->flags & PF_EXITING) && !current->in_execve)
Pavel Begunkovc5eef2b2021-02-10 00:03:22 +00002046 __io_queue_sqe(req);
Pavel Begunkov81b6d052021-01-04 20:36:35 +00002047 else
Pavel Begunkov25935532021-03-19 17:22:40 +00002048 io_req_complete_failed(req, -EFAULT);
Pavel Begunkov81b6d052021-01-04 20:36:35 +00002049 mutex_unlock(&ctx->uring_lock);
Jens Axboe9e645e112019-05-10 16:07:28 -06002050}
2051
Jens Axboec40f6372020-06-25 15:39:59 -06002052static void io_req_task_submit(struct callback_head *cb)
2053{
2054 struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
2055
2056 __io_req_task_submit(req);
2057}
2058
Pavel Begunkova3df76982021-02-18 22:32:52 +00002059static void io_req_task_queue_fail(struct io_kiocb *req, int ret)
2060{
Pavel Begunkova3df76982021-02-18 22:32:52 +00002061 req->result = ret;
2062 req->task_work.func = io_req_task_cancel;
2063
2064 if (unlikely(io_req_task_work_add(req)))
2065 io_req_task_work_add_fallback(req, io_req_task_cancel);
2066}
2067
Pavel Begunkov2c4b8eb2021-02-28 22:35:10 +00002068static void io_req_task_queue(struct io_kiocb *req)
2069{
2070 req->task_work.func = io_req_task_submit;
2071
2072 if (unlikely(io_req_task_work_add(req)))
2073 io_req_task_queue_fail(req, -ECANCELED);
2074}
2075
Pavel Begunkovf2f87372020-10-27 23:25:37 +00002076static inline void io_queue_next(struct io_kiocb *req)
Jackie Liuc69f8db2019-11-09 11:00:08 +08002077{
Pavel Begunkov9b5f7bd2020-06-29 13:13:00 +03002078 struct io_kiocb *nxt = io_req_find_next(req);
Pavel Begunkov944e58b2019-11-21 23:21:01 +03002079
Pavel Begunkov906a8c32020-06-27 14:04:55 +03002080 if (nxt)
2081 io_req_task_queue(nxt);
Jackie Liuc69f8db2019-11-09 11:00:08 +08002082}
2083
Jens Axboe9e645e112019-05-10 16:07:28 -06002084static void io_free_req(struct io_kiocb *req)
2085{
Pavel Begunkovc3524382020-06-28 12:52:32 +03002086 io_queue_next(req);
Jens Axboe9e645e112019-05-10 16:07:28 -06002087 __io_free_req(req);
Jens Axboee65ef562019-03-12 10:16:44 -06002088}
2089
Pavel Begunkov2d6500d2020-06-28 12:52:33 +03002090struct req_batch {
Pavel Begunkov5af1d132020-07-18 11:32:52 +03002091 struct task_struct *task;
2092 int task_refs;
Jens Axboe1b4c3512021-02-10 00:03:19 +00002093 int ctx_refs;
Pavel Begunkov2d6500d2020-06-28 12:52:33 +03002094};
2095
Pavel Begunkov5af1d132020-07-18 11:32:52 +03002096static inline void io_init_req_batch(struct req_batch *rb)
Pavel Begunkov7a743e22020-03-03 21:33:13 +03002097{
Pavel Begunkov5af1d132020-07-18 11:32:52 +03002098 rb->task_refs = 0;
Pavel Begunkov9ae72462021-02-10 00:03:16 +00002099 rb->ctx_refs = 0;
Pavel Begunkov5af1d132020-07-18 11:32:52 +03002100 rb->task = NULL;
2101}
Pavel Begunkov8766dd52020-03-14 00:31:04 +03002102
Pavel Begunkov2d6500d2020-06-28 12:52:33 +03002103static void io_req_free_batch_finish(struct io_ring_ctx *ctx,
2104 struct req_batch *rb)
2105{
Pavel Begunkov6e833d52021-02-11 18:28:20 +00002106 if (rb->task)
Pavel Begunkov7c660732021-01-25 11:42:21 +00002107 io_put_task(rb->task, rb->task_refs);
Pavel Begunkov9ae72462021-02-10 00:03:16 +00002108 if (rb->ctx_refs)
2109 percpu_ref_put_many(&ctx->refs, rb->ctx_refs);
Pavel Begunkov2d6500d2020-06-28 12:52:33 +03002110}
2111
Pavel Begunkov6ff119a2021-02-10 00:03:18 +00002112static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req,
2113 struct io_submit_state *state)
Pavel Begunkov2d6500d2020-06-28 12:52:33 +03002114{
Pavel Begunkovf2f87372020-10-27 23:25:37 +00002115 io_queue_next(req);
Pavel Begunkov96670652021-03-19 17:22:32 +00002116 io_dismantle_req(req);
Pavel Begunkov2d6500d2020-06-28 12:52:33 +03002117
Jens Axboee3bc8e92020-09-24 08:45:57 -06002118 if (req->task != rb->task) {
Pavel Begunkov7c660732021-01-25 11:42:21 +00002119 if (rb->task)
2120 io_put_task(rb->task, rb->task_refs);
Jens Axboee3bc8e92020-09-24 08:45:57 -06002121 rb->task = req->task;
2122 rb->task_refs = 0;
Pavel Begunkov5af1d132020-07-18 11:32:52 +03002123 }
Jens Axboee3bc8e92020-09-24 08:45:57 -06002124 rb->task_refs++;
Pavel Begunkov9ae72462021-02-10 00:03:16 +00002125 rb->ctx_refs++;
Pavel Begunkov5af1d132020-07-18 11:32:52 +03002126
Pavel Begunkovbd759042021-02-12 03:23:50 +00002127 if (state->free_reqs != ARRAY_SIZE(state->reqs))
Pavel Begunkov6ff119a2021-02-10 00:03:18 +00002128 state->reqs[state->free_reqs++] = req;
Pavel Begunkovbd759042021-02-12 03:23:50 +00002129 else
2130 list_add(&req->compl.list, &state->comp.free_list);
Pavel Begunkov7a743e22020-03-03 21:33:13 +03002131}
2132
Pavel Begunkov905c1722021-02-10 00:03:14 +00002133static void io_submit_flush_completions(struct io_comp_state *cs,
2134 struct io_ring_ctx *ctx)
2135{
2136 int i, nr = cs->nr;
2137 struct io_kiocb *req;
2138 struct req_batch rb;
2139
2140 io_init_req_batch(&rb);
2141 spin_lock_irq(&ctx->completion_lock);
2142 for (i = 0; i < nr; i++) {
2143 req = cs->reqs[i];
Pavel Begunkovd4d19c12021-04-25 14:32:17 +01002144 __io_cqring_fill_event(ctx, req->user_data, req->result,
2145 req->compl.cflags);
Pavel Begunkov905c1722021-02-10 00:03:14 +00002146 }
2147 io_commit_cqring(ctx);
2148 spin_unlock_irq(&ctx->completion_lock);
2149
2150 io_cqring_ev_posted(ctx);
2151 for (i = 0; i < nr; i++) {
2152 req = cs->reqs[i];
2153
2154 /* submission and completion refs */
Jens Axboede9b4cc2021-02-24 13:28:27 -07002155 if (req_ref_sub_and_test(req, 2))
Pavel Begunkov6ff119a2021-02-10 00:03:18 +00002156 io_req_free_batch(&rb, req, &ctx->submit_state);
Pavel Begunkov905c1722021-02-10 00:03:14 +00002157 }
2158
2159 io_req_free_batch_finish(ctx, &rb);
2160 cs->nr = 0;
Jens Axboee65ef562019-03-12 10:16:44 -06002161}
2162
Jens Axboeba816ad2019-09-28 11:36:45 -06002163/*
2164 * Drop reference to request, return next in chain (if there is one) if this
2165 * was the last reference to this request.
2166 */
Pavel Begunkov0d850352021-03-19 17:22:37 +00002167static inline struct io_kiocb *io_put_req_find_next(struct io_kiocb *req)
Jens Axboee65ef562019-03-12 10:16:44 -06002168{
Pavel Begunkov9b5f7bd2020-06-29 13:13:00 +03002169 struct io_kiocb *nxt = NULL;
2170
Jens Axboede9b4cc2021-02-24 13:28:27 -07002171 if (req_ref_put_and_test(req)) {
Pavel Begunkov9b5f7bd2020-06-29 13:13:00 +03002172 nxt = io_req_find_next(req);
Jens Axboe4d7dd462019-11-20 13:03:52 -07002173 __io_free_req(req);
Jens Axboe2a44f462020-02-25 13:25:41 -07002174 }
Pavel Begunkov9b5f7bd2020-06-29 13:13:00 +03002175 return nxt;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002176}
2177
Pavel Begunkov0d850352021-03-19 17:22:37 +00002178static inline void io_put_req(struct io_kiocb *req)
Jens Axboe2b188cc2019-01-07 10:46:33 -07002179{
Jens Axboede9b4cc2021-02-24 13:28:27 -07002180 if (req_ref_put_and_test(req))
Jens Axboedef596e2019-01-09 08:59:42 -07002181 io_free_req(req);
2182}
2183
Pavel Begunkov216578e2020-10-13 09:44:00 +01002184static void io_put_req_deferred_cb(struct callback_head *cb)
2185{
2186 struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
2187
2188 io_free_req(req);
2189}
2190
2191static void io_free_req_deferred(struct io_kiocb *req)
2192{
Jens Axboe7cbf1722021-02-10 00:03:20 +00002193 req->task_work.func = io_put_req_deferred_cb;
Pavel Begunkova05432f2021-03-19 17:22:38 +00002194 if (unlikely(io_req_task_work_add(req)))
Pavel Begunkoveab30c42021-01-19 13:32:42 +00002195 io_req_task_work_add_fallback(req, io_put_req_deferred_cb);
Pavel Begunkov216578e2020-10-13 09:44:00 +01002196}
2197
2198static inline void io_put_req_deferred(struct io_kiocb *req, int refs)
2199{
Jens Axboede9b4cc2021-02-24 13:28:27 -07002200 if (req_ref_sub_and_test(req, refs))
Pavel Begunkov216578e2020-10-13 09:44:00 +01002201 io_free_req_deferred(req);
2202}
2203
Pavel Begunkov6c503152021-01-04 20:36:36 +00002204static unsigned io_cqring_events(struct io_ring_ctx *ctx)
Jens Axboea3a0e432019-08-20 11:03:11 -06002205{
2206 /* See comment at the top of this file */
2207 smp_rmb();
Pavel Begunkove23de152020-12-17 00:24:37 +00002208 return __io_cqring_events(ctx);
Jens Axboea3a0e432019-08-20 11:03:11 -06002209}
2210
Pavel Begunkovfb5ccc92019-10-25 12:31:30 +03002211static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
2212{
2213 struct io_rings *rings = ctx->rings;
2214
2215 /* make sure SQ entry isn't read before tail */
2216 return smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head;
2217}
2218
Pavel Begunkov8ff069b2020-07-16 23:28:04 +03002219static unsigned int io_put_kbuf(struct io_kiocb *req, struct io_buffer *kbuf)
Jens Axboee94f1412019-12-19 12:06:02 -07002220{
Pavel Begunkov8ff069b2020-07-16 23:28:04 +03002221 unsigned int cflags;
Jens Axboee94f1412019-12-19 12:06:02 -07002222
Jens Axboebcda7ba2020-02-23 16:42:51 -07002223 cflags = kbuf->bid << IORING_CQE_BUFFER_SHIFT;
2224 cflags |= IORING_CQE_F_BUFFER;
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03002225 req->flags &= ~REQ_F_BUFFER_SELECTED;
Jens Axboebcda7ba2020-02-23 16:42:51 -07002226 kfree(kbuf);
2227 return cflags;
2228}
2229
Pavel Begunkov8ff069b2020-07-16 23:28:04 +03002230static inline unsigned int io_put_rw_kbuf(struct io_kiocb *req)
2231{
2232 struct io_buffer *kbuf;
2233
2234 kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
2235 return io_put_kbuf(req, kbuf);
2236}
2237
Jens Axboe4c6e2772020-07-01 11:29:10 -06002238static inline bool io_run_task_work(void)
2239{
Jens Axboe6200b0a2020-09-13 14:38:30 -06002240 /*
2241 * Not safe to run on exiting task, and the task_work handling will
2242 * not add work to such a task.
2243 */
2244 if (unlikely(current->flags & PF_EXITING))
2245 return false;
Jens Axboe4c6e2772020-07-01 11:29:10 -06002246 if (current->task_works) {
2247 __set_current_state(TASK_RUNNING);
2248 task_work_run();
2249 return true;
2250 }
2251
2252 return false;
2253}
2254
Jens Axboedef596e2019-01-09 08:59:42 -07002255/*
2256 * Find and free completed poll iocbs
2257 */
2258static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
2259 struct list_head *done)
2260{
Jens Axboe8237e042019-12-28 10:48:22 -07002261 struct req_batch rb;
Jens Axboedef596e2019-01-09 08:59:42 -07002262 struct io_kiocb *req;
Xiaoguang Wangbbde0172020-06-16 02:06:38 +08002263
2264 /* order with ->result store in io_complete_rw_iopoll() */
2265 smp_rmb();
Jens Axboedef596e2019-01-09 08:59:42 -07002266
Pavel Begunkov5af1d132020-07-18 11:32:52 +03002267 io_init_req_batch(&rb);
Jens Axboedef596e2019-01-09 08:59:42 -07002268 while (!list_empty(done)) {
Jens Axboebcda7ba2020-02-23 16:42:51 -07002269 int cflags = 0;
2270
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002271 req = list_first_entry(done, struct io_kiocb, inflight_entry);
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002272 list_del(&req->inflight_entry);
Jens Axboedef596e2019-01-09 08:59:42 -07002273
Pavel Begunkov8c130822021-03-22 01:58:32 +00002274 if (READ_ONCE(req->result) == -EAGAIN &&
2275 !(req->flags & REQ_F_DONT_REISSUE)) {
Pavel Begunkovf1613402021-02-11 18:28:21 +00002276 req->iopoll_completed = 0;
Pavel Begunkov8c130822021-03-22 01:58:32 +00002277 req_ref_get(req);
2278 io_queue_async_work(req);
2279 continue;
Pavel Begunkovf1613402021-02-11 18:28:21 +00002280 }
2281
Jens Axboebcda7ba2020-02-23 16:42:51 -07002282 if (req->flags & REQ_F_BUFFER_SELECTED)
Pavel Begunkov8ff069b2020-07-16 23:28:04 +03002283 cflags = io_put_rw_kbuf(req);
Jens Axboebcda7ba2020-02-23 16:42:51 -07002284
Pavel Begunkovd4d19c12021-04-25 14:32:17 +01002285 __io_cqring_fill_event(ctx, req->user_data, req->result, cflags);
Jens Axboedef596e2019-01-09 08:59:42 -07002286 (*nr_events)++;
2287
Jens Axboede9b4cc2021-02-24 13:28:27 -07002288 if (req_ref_put_and_test(req))
Pavel Begunkov6ff119a2021-02-10 00:03:18 +00002289 io_req_free_batch(&rb, req, &ctx->submit_state);
Jens Axboedef596e2019-01-09 08:59:42 -07002290 }
Jens Axboedef596e2019-01-09 08:59:42 -07002291
Jens Axboe09bb8392019-03-13 12:39:28 -06002292 io_commit_cqring(ctx);
Pavel Begunkov80c18e42021-01-07 03:15:41 +00002293 io_cqring_ev_posted_iopoll(ctx);
Pavel Begunkov2d6500d2020-06-28 12:52:33 +03002294 io_req_free_batch_finish(ctx, &rb);
Bijan Mottahedeh581f9812020-04-03 13:51:33 -07002295}
2296
Jens Axboedef596e2019-01-09 08:59:42 -07002297static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
2298 long min)
2299{
2300 struct io_kiocb *req, *tmp;
2301 LIST_HEAD(done);
2302 bool spin;
2303 int ret;
2304
2305 /*
2306 * Only spin for completions if we don't have multiple devices hanging
2307 * off our complete list, and we're under the requested amount.
2308 */
2309 spin = !ctx->poll_multi_file && *nr_events < min;
2310
2311 ret = 0;
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002312 list_for_each_entry_safe(req, tmp, &ctx->iopoll_list, inflight_entry) {
Jens Axboe9adbd452019-12-20 08:45:55 -07002313 struct kiocb *kiocb = &req->rw.kiocb;
Jens Axboedef596e2019-01-09 08:59:42 -07002314
2315 /*
Bijan Mottahedeh581f9812020-04-03 13:51:33 -07002316 * Move completed and retryable entries to our local lists.
2317 * If we find a request that requires polling, break out
2318 * and complete those lists first, if we have entries there.
Jens Axboedef596e2019-01-09 08:59:42 -07002319 */
Xiaoguang Wang65a65432020-06-11 23:39:36 +08002320 if (READ_ONCE(req->iopoll_completed)) {
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002321 list_move_tail(&req->inflight_entry, &done);
Jens Axboedef596e2019-01-09 08:59:42 -07002322 continue;
2323 }
2324 if (!list_empty(&done))
2325 break;
2326
2327 ret = kiocb->ki_filp->f_op->iopoll(kiocb, spin);
2328 if (ret < 0)
2329 break;
2330
Pavel Begunkov3aadc232020-07-06 17:59:29 +03002331 /* iopoll may have completed current req */
2332 if (READ_ONCE(req->iopoll_completed))
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002333 list_move_tail(&req->inflight_entry, &done);
Pavel Begunkov3aadc232020-07-06 17:59:29 +03002334
Jens Axboedef596e2019-01-09 08:59:42 -07002335 if (ret && spin)
2336 spin = false;
2337 ret = 0;
2338 }
2339
2340 if (!list_empty(&done))
2341 io_iopoll_complete(ctx, nr_events, &done);
2342
2343 return ret;
2344}
2345
2346/*
Jens Axboedef596e2019-01-09 08:59:42 -07002347 * We can't just wait for polled events to come to us, we have to actively
2348 * find and complete them.
2349 */
Pavel Begunkovb2edc0a2020-07-07 16:36:22 +03002350static void io_iopoll_try_reap_events(struct io_ring_ctx *ctx)
Jens Axboedef596e2019-01-09 08:59:42 -07002351{
2352 if (!(ctx->flags & IORING_SETUP_IOPOLL))
2353 return;
2354
2355 mutex_lock(&ctx->uring_lock);
Pavel Begunkov540e32a2020-07-13 23:37:09 +03002356 while (!list_empty(&ctx->iopoll_list)) {
Jens Axboedef596e2019-01-09 08:59:42 -07002357 unsigned int nr_events = 0;
2358
Pavel Begunkovb2edc0a2020-07-07 16:36:22 +03002359 io_do_iopoll(ctx, &nr_events, 0);
Jens Axboe08f54392019-08-21 22:19:11 -06002360
Pavel Begunkovb2edc0a2020-07-07 16:36:22 +03002361 /* let it sleep and repeat later if can't complete a request */
2362 if (nr_events == 0)
2363 break;
Jens Axboe08f54392019-08-21 22:19:11 -06002364 /*
2365 * Ensure we allow local-to-the-cpu processing to take place,
2366 * in this case we need to ensure that we reap all events.
Pavel Begunkov3fcee5a2020-07-06 17:59:31 +03002367 * Also let task_work, etc. to progress by releasing the mutex
Jens Axboe08f54392019-08-21 22:19:11 -06002368 */
Pavel Begunkov3fcee5a2020-07-06 17:59:31 +03002369 if (need_resched()) {
2370 mutex_unlock(&ctx->uring_lock);
2371 cond_resched();
2372 mutex_lock(&ctx->uring_lock);
2373 }
Jens Axboedef596e2019-01-09 08:59:42 -07002374 }
2375 mutex_unlock(&ctx->uring_lock);
2376}
2377
Pavel Begunkov7668b922020-07-07 16:36:21 +03002378static int io_iopoll_check(struct io_ring_ctx *ctx, long min)
Jens Axboedef596e2019-01-09 08:59:42 -07002379{
Pavel Begunkov7668b922020-07-07 16:36:21 +03002380 unsigned int nr_events = 0;
Pavel Begunkove9979b32021-04-13 02:58:45 +01002381 int ret = 0;
Jens Axboedef596e2019-01-09 08:59:42 -07002382
Xiaoguang Wangc7849be2020-02-22 14:46:05 +08002383 /*
2384 * We disallow the app entering submit/complete with polling, but we
2385 * still need to lock the ring to prevent racing with polled issue
2386 * that got punted to a workqueue.
2387 */
2388 mutex_lock(&ctx->uring_lock);
Pavel Begunkovf39c8a52021-04-13 02:58:46 +01002389 /*
2390 * Don't enter poll loop if we already have events pending.
2391 * If we do, we can potentially be spinning for commands that
2392 * already triggered a CQE (eg in error).
2393 */
2394 if (test_bit(0, &ctx->cq_check_overflow))
2395 __io_cqring_overflow_flush(ctx, false);
2396 if (io_cqring_events(ctx))
2397 goto out;
Jens Axboedef596e2019-01-09 08:59:42 -07002398 do {
Jens Axboe500f9fb2019-08-19 12:15:59 -06002399 /*
2400 * If a submit got punted to a workqueue, we can have the
2401 * application entering polling for a command before it gets
2402 * issued. That app will hold the uring_lock for the duration
2403 * of the poll right here, so we need to take a breather every
2404 * now and then to ensure that the issue has a chance to add
2405 * the poll to the issued list. Otherwise we can spin here
2406 * forever, while the workqueue is stuck trying to acquire the
2407 * very same mutex.
2408 */
Pavel Begunkove9979b32021-04-13 02:58:45 +01002409 if (list_empty(&ctx->iopoll_list)) {
Jens Axboe500f9fb2019-08-19 12:15:59 -06002410 mutex_unlock(&ctx->uring_lock);
Jens Axboe4c6e2772020-07-01 11:29:10 -06002411 io_run_task_work();
Jens Axboe500f9fb2019-08-19 12:15:59 -06002412 mutex_lock(&ctx->uring_lock);
Pavel Begunkove9979b32021-04-13 02:58:45 +01002413
2414 if (list_empty(&ctx->iopoll_list))
2415 break;
Jens Axboe500f9fb2019-08-19 12:15:59 -06002416 }
Pavel Begunkovf39c8a52021-04-13 02:58:46 +01002417 ret = io_do_iopoll(ctx, &nr_events, min);
2418 } while (!ret && nr_events < min && !need_resched());
2419out:
Jens Axboe500f9fb2019-08-19 12:15:59 -06002420 mutex_unlock(&ctx->uring_lock);
Jens Axboedef596e2019-01-09 08:59:42 -07002421 return ret;
2422}
2423
Jens Axboe491381ce2019-10-17 09:20:46 -06002424static void kiocb_end_write(struct io_kiocb *req)
Jens Axboe2b188cc2019-01-07 10:46:33 -07002425{
Jens Axboe491381ce2019-10-17 09:20:46 -06002426 /*
2427 * Tell lockdep we inherited freeze protection from submission
2428 * thread.
2429 */
2430 if (req->flags & REQ_F_ISREG) {
Pavel Begunkov1c986792021-03-22 01:58:31 +00002431 struct super_block *sb = file_inode(req->file)->i_sb;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002432
Pavel Begunkov1c986792021-03-22 01:58:31 +00002433 __sb_writers_acquired(sb, SB_FREEZE_WRITE);
2434 sb_end_write(sb);
Jens Axboe2b188cc2019-01-07 10:46:33 -07002435 }
2436}
2437
Jens Axboeb63534c2020-06-04 11:28:00 -06002438#ifdef CONFIG_BLOCK
Pavel Begunkovdc2a6e92021-01-19 13:32:35 +00002439static bool io_resubmit_prep(struct io_kiocb *req)
Jens Axboeb63534c2020-06-04 11:28:00 -06002440{
Pavel Begunkovab454432021-03-22 01:58:33 +00002441 struct io_async_rw *rw = req->async_data;
Jens Axboeb63534c2020-06-04 11:28:00 -06002442
Pavel Begunkovab454432021-03-22 01:58:33 +00002443 if (!rw)
2444 return !io_req_prep_async(req);
2445 /* may have left rw->iter inconsistent on -EIOCBQUEUED */
2446 iov_iter_revert(&rw->iter, req->result - iov_iter_count(&rw->iter));
2447 return true;
Jens Axboeb63534c2020-06-04 11:28:00 -06002448}
Jens Axboeb63534c2020-06-04 11:28:00 -06002449
Jens Axboe3e6a0d32021-03-01 13:56:00 -07002450static bool io_rw_should_reissue(struct io_kiocb *req)
Jens Axboeb63534c2020-06-04 11:28:00 -06002451{
Jens Axboe355afae2020-09-02 09:30:31 -06002452 umode_t mode = file_inode(req->file)->i_mode;
Jens Axboe3e6a0d32021-03-01 13:56:00 -07002453 struct io_ring_ctx *ctx = req->ctx;
Jens Axboeb63534c2020-06-04 11:28:00 -06002454
Jens Axboe355afae2020-09-02 09:30:31 -06002455 if (!S_ISBLK(mode) && !S_ISREG(mode))
2456 return false;
Jens Axboe3e6a0d32021-03-01 13:56:00 -07002457 if ((req->flags & REQ_F_NOWAIT) || (io_wq_current_is_worker() &&
2458 !(ctx->flags & IORING_SETUP_IOPOLL)))
Jens Axboeb63534c2020-06-04 11:28:00 -06002459 return false;
Jens Axboe7c977a52021-02-23 19:17:35 -07002460 /*
2461 * If ref is dying, we might be running poll reap from the exit work.
2462 * Don't attempt to reissue from that path, just let it fail with
2463 * -EAGAIN.
2464 */
Jens Axboe3e6a0d32021-03-01 13:56:00 -07002465 if (percpu_ref_is_dying(&ctx->refs))
2466 return false;
2467 return true;
2468}
Jens Axboee82ad482021-04-02 19:45:34 -06002469#else
Jens Axboea1ff1e32021-04-12 06:40:02 -06002470static bool io_resubmit_prep(struct io_kiocb *req)
2471{
2472 return false;
2473}
Jens Axboee82ad482021-04-02 19:45:34 -06002474static bool io_rw_should_reissue(struct io_kiocb *req)
2475{
2476 return false;
2477}
Jens Axboe3e6a0d32021-03-01 13:56:00 -07002478#endif
2479
Jens Axboea1d7c392020-06-22 11:09:46 -06002480static void __io_complete_rw(struct io_kiocb *req, long res, long res2,
Pavel Begunkov889fca72021-02-10 00:03:09 +00002481 unsigned int issue_flags)
Jens Axboea1d7c392020-06-22 11:09:46 -06002482{
Pavel Begunkov2f8e45f2021-02-11 18:28:23 +00002483 int cflags = 0;
2484
Pavel Begunkovb65c1282021-03-22 01:45:59 +00002485 if (req->rw.kiocb.ki_flags & IOCB_WRITE)
2486 kiocb_end_write(req);
Pavel Begunkov9532b992021-03-22 01:58:34 +00002487 if (res != req->result) {
2488 if ((res == -EAGAIN || res == -EOPNOTSUPP) &&
2489 io_rw_should_reissue(req)) {
2490 req->flags |= REQ_F_REISSUE;
2491 return;
2492 }
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01002493 req_set_fail(req);
Pavel Begunkov9532b992021-03-22 01:58:34 +00002494 }
Pavel Begunkov2f8e45f2021-02-11 18:28:23 +00002495 if (req->flags & REQ_F_BUFFER_SELECTED)
2496 cflags = io_put_rw_kbuf(req);
2497 __io_req_complete(req, issue_flags, res, cflags);
Jens Axboeba816ad2019-09-28 11:36:45 -06002498}
2499
2500static void io_complete_rw(struct kiocb *kiocb, long res, long res2)
2501{
Jens Axboe9adbd452019-12-20 08:45:55 -07002502 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
Jens Axboeba816ad2019-09-28 11:36:45 -06002503
Pavel Begunkov889fca72021-02-10 00:03:09 +00002504 __io_complete_rw(req, res, res2, 0);
Jens Axboe2b188cc2019-01-07 10:46:33 -07002505}
2506
Jens Axboedef596e2019-01-09 08:59:42 -07002507static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2)
2508{
Jens Axboe9adbd452019-12-20 08:45:55 -07002509 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
Jens Axboedef596e2019-01-09 08:59:42 -07002510
Jens Axboe491381ce2019-10-17 09:20:46 -06002511 if (kiocb->ki_flags & IOCB_WRITE)
2512 kiocb_end_write(req);
Pavel Begunkov9532b992021-03-22 01:58:34 +00002513 if (unlikely(res != req->result)) {
Jens Axboea1ff1e32021-04-12 06:40:02 -06002514 if (!(res == -EAGAIN && io_rw_should_reissue(req) &&
2515 io_resubmit_prep(req))) {
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01002516 req_set_fail(req);
Pavel Begunkov9532b992021-03-22 01:58:34 +00002517 req->flags |= REQ_F_DONT_REISSUE;
2518 }
Pavel Begunkov8c130822021-03-22 01:58:32 +00002519 }
Xiaoguang Wangbbde0172020-06-16 02:06:38 +08002520
2521 WRITE_ONCE(req->result, res);
Jens Axboeb9b0e0d2021-02-23 08:18:36 -07002522 /* order with io_iopoll_complete() checking ->result */
Pavel Begunkovcd664b02020-06-25 12:37:10 +03002523 smp_wmb();
2524 WRITE_ONCE(req->iopoll_completed, 1);
Jens Axboedef596e2019-01-09 08:59:42 -07002525}
2526
2527/*
2528 * After the iocb has been issued, it's safe to be found on the poll list.
2529 * Adding the kiocb to the list AFTER submission ensures that we don't
Pavel Begunkovf39c8a52021-04-13 02:58:46 +01002530 * find it from a io_do_iopoll() thread before the issuer is done
Jens Axboedef596e2019-01-09 08:59:42 -07002531 * accessing the kiocb cookie.
2532 */
Pavel Begunkovcb3d8972021-06-14 02:36:14 +01002533static void io_iopoll_req_issued(struct io_kiocb *req)
Jens Axboedef596e2019-01-09 08:59:42 -07002534{
2535 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkovcb3d8972021-06-14 02:36:14 +01002536 const bool in_async = io_wq_current_is_worker();
2537
2538 /* workqueue context doesn't hold uring_lock, grab it now */
2539 if (unlikely(in_async))
2540 mutex_lock(&ctx->uring_lock);
Jens Axboedef596e2019-01-09 08:59:42 -07002541
2542 /*
2543 * Track whether we have multiple files in our lists. This will impact
2544 * how we do polling eventually, not spinning if we're on potentially
2545 * different devices.
2546 */
Pavel Begunkov540e32a2020-07-13 23:37:09 +03002547 if (list_empty(&ctx->iopoll_list)) {
Jens Axboedef596e2019-01-09 08:59:42 -07002548 ctx->poll_multi_file = false;
2549 } else if (!ctx->poll_multi_file) {
2550 struct io_kiocb *list_req;
2551
Pavel Begunkov540e32a2020-07-13 23:37:09 +03002552 list_req = list_first_entry(&ctx->iopoll_list, struct io_kiocb,
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002553 inflight_entry);
Jens Axboe9adbd452019-12-20 08:45:55 -07002554 if (list_req->file != req->file)
Jens Axboedef596e2019-01-09 08:59:42 -07002555 ctx->poll_multi_file = true;
2556 }
2557
2558 /*
2559 * For fast devices, IO may have already completed. If it has, add
2560 * it to the front so we find it first.
2561 */
Xiaoguang Wang65a65432020-06-11 23:39:36 +08002562 if (READ_ONCE(req->iopoll_completed))
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002563 list_add(&req->inflight_entry, &ctx->iopoll_list);
Jens Axboedef596e2019-01-09 08:59:42 -07002564 else
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002565 list_add_tail(&req->inflight_entry, &ctx->iopoll_list);
Xiaoguang Wangbdcd3ea2020-02-25 22:12:08 +08002566
Pavel Begunkovcb3d8972021-06-14 02:36:14 +01002567 if (unlikely(in_async)) {
2568 /*
2569 * If IORING_SETUP_SQPOLL is enabled, sqes are either handle
2570 * in sq thread task context or in io worker task context. If
2571 * current task context is sq thread, we don't need to check
2572 * whether should wake up sq thread.
2573 */
2574 if ((ctx->flags & IORING_SETUP_SQPOLL) &&
2575 wq_has_sleeper(&ctx->sq_data->wait))
2576 wake_up(&ctx->sq_data->wait);
2577
2578 mutex_unlock(&ctx->uring_lock);
2579 }
Jens Axboedef596e2019-01-09 08:59:42 -07002580}
2581
Pavel Begunkov9f13c352020-05-17 14:13:41 +03002582static inline void io_state_file_put(struct io_submit_state *state)
2583{
Pavel Begunkov02b23a92021-01-19 13:32:41 +00002584 if (state->file_refs) {
2585 fput_many(state->file, state->file_refs);
2586 state->file_refs = 0;
2587 }
Jens Axboe9a56a232019-01-09 09:06:50 -07002588}
2589
2590/*
2591 * Get as many references to a file as we have IOs left in this submission,
2592 * assuming most submissions are for one file, or at least that each file
2593 * has more than one submission.
2594 */
Pavel Begunkov8da11c12020-02-24 11:32:44 +03002595static struct file *__io_file_get(struct io_submit_state *state, int fd)
Jens Axboe9a56a232019-01-09 09:06:50 -07002596{
2597 if (!state)
2598 return fget(fd);
2599
Pavel Begunkov6e1271e2020-11-20 15:50:50 +00002600 if (state->file_refs) {
Jens Axboe9a56a232019-01-09 09:06:50 -07002601 if (state->fd == fd) {
Pavel Begunkov6e1271e2020-11-20 15:50:50 +00002602 state->file_refs--;
Jens Axboe9a56a232019-01-09 09:06:50 -07002603 return state->file;
2604 }
Pavel Begunkov02b23a92021-01-19 13:32:41 +00002605 io_state_file_put(state);
Jens Axboe9a56a232019-01-09 09:06:50 -07002606 }
2607 state->file = fget_many(fd, state->ios_left);
Pavel Begunkov6e1271e2020-11-20 15:50:50 +00002608 if (unlikely(!state->file))
Jens Axboe9a56a232019-01-09 09:06:50 -07002609 return NULL;
2610
2611 state->fd = fd;
Pavel Begunkov6e1271e2020-11-20 15:50:50 +00002612 state->file_refs = state->ios_left - 1;
Jens Axboe9a56a232019-01-09 09:06:50 -07002613 return state->file;
2614}
2615
Jens Axboe4503b762020-06-01 10:00:27 -06002616static bool io_bdev_nowait(struct block_device *bdev)
2617{
Jeffle Xu9ba0d0c2020-10-19 16:59:42 +08002618 return !bdev || blk_queue_nowait(bdev_get_queue(bdev));
Jens Axboe4503b762020-06-01 10:00:27 -06002619}
2620
Jens Axboe2b188cc2019-01-07 10:46:33 -07002621/*
2622 * If we tracked the file through the SCM inflight mechanism, we could support
2623 * any file. For now, just ensure that anything potentially problematic is done
2624 * inline.
2625 */
Jens Axboe7b29f922021-03-12 08:30:14 -07002626static bool __io_file_supports_async(struct file *file, int rw)
Jens Axboe2b188cc2019-01-07 10:46:33 -07002627{
2628 umode_t mode = file_inode(file)->i_mode;
2629
Jens Axboe4503b762020-06-01 10:00:27 -06002630 if (S_ISBLK(mode)) {
Christoph Hellwig4e7b5672020-11-23 13:38:40 +01002631 if (IS_ENABLED(CONFIG_BLOCK) &&
2632 io_bdev_nowait(I_BDEV(file->f_mapping->host)))
Jens Axboe4503b762020-06-01 10:00:27 -06002633 return true;
2634 return false;
2635 }
Pavel Begunkov976517f2021-06-09 12:07:25 +01002636 if (S_ISSOCK(mode))
Jens Axboe2b188cc2019-01-07 10:46:33 -07002637 return true;
Jens Axboe4503b762020-06-01 10:00:27 -06002638 if (S_ISREG(mode)) {
Christoph Hellwig4e7b5672020-11-23 13:38:40 +01002639 if (IS_ENABLED(CONFIG_BLOCK) &&
2640 io_bdev_nowait(file->f_inode->i_sb->s_bdev) &&
Jens Axboe4503b762020-06-01 10:00:27 -06002641 file->f_op != &io_uring_fops)
2642 return true;
2643 return false;
2644 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07002645
Jens Axboec5b85622020-06-09 19:23:05 -06002646 /* any ->read/write should understand O_NONBLOCK */
2647 if (file->f_flags & O_NONBLOCK)
2648 return true;
2649
Jens Axboeaf197f52020-04-28 13:15:06 -06002650 if (!(file->f_mode & FMODE_NOWAIT))
2651 return false;
2652
2653 if (rw == READ)
2654 return file->f_op->read_iter != NULL;
2655
2656 return file->f_op->write_iter != NULL;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002657}
2658
Jens Axboe7b29f922021-03-12 08:30:14 -07002659static bool io_file_supports_async(struct io_kiocb *req, int rw)
2660{
2661 if (rw == READ && (req->flags & REQ_F_ASYNC_READ))
2662 return true;
2663 else if (rw == WRITE && (req->flags & REQ_F_ASYNC_WRITE))
2664 return true;
2665
2666 return __io_file_supports_async(req->file, rw);
2667}
2668
Pavel Begunkova88fc402020-09-30 22:57:53 +03002669static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboe2b188cc2019-01-07 10:46:33 -07002670{
Jens Axboedef596e2019-01-09 08:59:42 -07002671 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe9adbd452019-12-20 08:45:55 -07002672 struct kiocb *kiocb = &req->rw.kiocb;
Pavel Begunkov75c668c2021-02-04 13:52:05 +00002673 struct file *file = req->file;
Jens Axboe09bb8392019-03-13 12:39:28 -06002674 unsigned ioprio;
2675 int ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002676
Jens Axboe7b29f922021-03-12 08:30:14 -07002677 if (!(req->flags & REQ_F_ISREG) && S_ISREG(file_inode(file)->i_mode))
Jens Axboe491381ce2019-10-17 09:20:46 -06002678 req->flags |= REQ_F_ISREG;
2679
Jens Axboe2b188cc2019-01-07 10:46:33 -07002680 kiocb->ki_pos = READ_ONCE(sqe->off);
Pavel Begunkov75c668c2021-02-04 13:52:05 +00002681 if (kiocb->ki_pos == -1 && !(file->f_mode & FMODE_STREAM)) {
Jens Axboeba042912019-12-25 16:33:42 -07002682 req->flags |= REQ_F_CUR_POS;
Pavel Begunkov75c668c2021-02-04 13:52:05 +00002683 kiocb->ki_pos = file->f_pos;
Jens Axboeba042912019-12-25 16:33:42 -07002684 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07002685 kiocb->ki_hint = ki_hint_validate(file_write_hint(kiocb->ki_filp));
Pavel Begunkov3e577dc2020-02-01 03:58:42 +03002686 kiocb->ki_flags = iocb_flags(kiocb->ki_filp);
2687 ret = kiocb_set_rw_flags(kiocb, READ_ONCE(sqe->rw_flags));
2688 if (unlikely(ret))
2689 return ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002690
Pavel Begunkov75c668c2021-02-04 13:52:05 +00002691 /* don't allow async punt for O_NONBLOCK or RWF_NOWAIT */
2692 if ((kiocb->ki_flags & IOCB_NOWAIT) || (file->f_flags & O_NONBLOCK))
2693 req->flags |= REQ_F_NOWAIT;
2694
Jens Axboe2b188cc2019-01-07 10:46:33 -07002695 ioprio = READ_ONCE(sqe->ioprio);
2696 if (ioprio) {
2697 ret = ioprio_check_cap(ioprio);
2698 if (ret)
Jens Axboe09bb8392019-03-13 12:39:28 -06002699 return ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002700
2701 kiocb->ki_ioprio = ioprio;
2702 } else
2703 kiocb->ki_ioprio = get_current_ioprio();
2704
Jens Axboedef596e2019-01-09 08:59:42 -07002705 if (ctx->flags & IORING_SETUP_IOPOLL) {
Jens Axboedef596e2019-01-09 08:59:42 -07002706 if (!(kiocb->ki_flags & IOCB_DIRECT) ||
2707 !kiocb->ki_filp->f_op->iopoll)
Jens Axboe09bb8392019-03-13 12:39:28 -06002708 return -EOPNOTSUPP;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002709
Jens Axboedef596e2019-01-09 08:59:42 -07002710 kiocb->ki_flags |= IOCB_HIPRI;
2711 kiocb->ki_complete = io_complete_rw_iopoll;
Xiaoguang Wang65a65432020-06-11 23:39:36 +08002712 req->iopoll_completed = 0;
Jens Axboedef596e2019-01-09 08:59:42 -07002713 } else {
Jens Axboe09bb8392019-03-13 12:39:28 -06002714 if (kiocb->ki_flags & IOCB_HIPRI)
2715 return -EINVAL;
Jens Axboedef596e2019-01-09 08:59:42 -07002716 kiocb->ki_complete = io_complete_rw;
2717 }
Jens Axboe9adbd452019-12-20 08:45:55 -07002718
Pavel Begunkoveae071c2021-04-25 14:32:24 +01002719 if (req->opcode == IORING_OP_READ_FIXED ||
2720 req->opcode == IORING_OP_WRITE_FIXED) {
2721 req->imu = NULL;
2722 io_req_set_rsrc_node(req);
2723 }
2724
Jens Axboe3529d8c2019-12-19 18:24:38 -07002725 req->rw.addr = READ_ONCE(sqe->addr);
2726 req->rw.len = READ_ONCE(sqe->len);
Bijan Mottahedeh4f4eeba2020-05-19 14:52:49 -07002727 req->buf_index = READ_ONCE(sqe->buf_index);
Jens Axboe2b188cc2019-01-07 10:46:33 -07002728 return 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002729}
2730
2731static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
2732{
2733 switch (ret) {
2734 case -EIOCBQUEUED:
2735 break;
2736 case -ERESTARTSYS:
2737 case -ERESTARTNOINTR:
2738 case -ERESTARTNOHAND:
2739 case -ERESTART_RESTARTBLOCK:
2740 /*
2741 * We can't just restart the syscall, since previously
2742 * submitted sqes may already be in progress. Just fail this
2743 * IO with EINTR.
2744 */
2745 ret = -EINTR;
Gustavo A. R. Silvadf561f662020-08-23 17:36:59 -05002746 fallthrough;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002747 default:
2748 kiocb->ki_complete(kiocb, ret, 0);
2749 }
2750}
2751
Jens Axboea1d7c392020-06-22 11:09:46 -06002752static void kiocb_done(struct kiocb *kiocb, ssize_t ret,
Pavel Begunkov889fca72021-02-10 00:03:09 +00002753 unsigned int issue_flags)
Jens Axboeba816ad2019-09-28 11:36:45 -06002754{
Jens Axboeba042912019-12-25 16:33:42 -07002755 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
Jens Axboee8c2bc12020-08-15 18:44:09 -07002756 struct io_async_rw *io = req->async_data;
Pavel Begunkov97284632021-04-08 19:28:03 +01002757 bool check_reissue = kiocb->ki_complete == io_complete_rw;
Jens Axboeba042912019-12-25 16:33:42 -07002758
Jens Axboe227c0c92020-08-13 11:51:40 -06002759 /* add previously done IO, if any */
Jens Axboee8c2bc12020-08-15 18:44:09 -07002760 if (io && io->bytes_done > 0) {
Jens Axboe227c0c92020-08-13 11:51:40 -06002761 if (ret < 0)
Jens Axboee8c2bc12020-08-15 18:44:09 -07002762 ret = io->bytes_done;
Jens Axboe227c0c92020-08-13 11:51:40 -06002763 else
Jens Axboee8c2bc12020-08-15 18:44:09 -07002764 ret += io->bytes_done;
Jens Axboe227c0c92020-08-13 11:51:40 -06002765 }
2766
Jens Axboeba042912019-12-25 16:33:42 -07002767 if (req->flags & REQ_F_CUR_POS)
2768 req->file->f_pos = kiocb->ki_pos;
Pavel Begunkovbcaec082020-02-24 11:30:18 +03002769 if (ret >= 0 && kiocb->ki_complete == io_complete_rw)
Pavel Begunkov889fca72021-02-10 00:03:09 +00002770 __io_complete_rw(req, ret, 0, issue_flags);
Jens Axboeba816ad2019-09-28 11:36:45 -06002771 else
2772 io_rw_done(kiocb, ret);
Pavel Begunkov97284632021-04-08 19:28:03 +01002773
2774 if (check_reissue && req->flags & REQ_F_REISSUE) {
2775 req->flags &= ~REQ_F_REISSUE;
Jens Axboea7be7c22021-04-15 11:31:14 -06002776 if (io_resubmit_prep(req)) {
Pavel Begunkov8c130822021-03-22 01:58:32 +00002777 req_ref_get(req);
2778 io_queue_async_work(req);
2779 } else {
Pavel Begunkov97284632021-04-08 19:28:03 +01002780 int cflags = 0;
2781
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01002782 req_set_fail(req);
Pavel Begunkov97284632021-04-08 19:28:03 +01002783 if (req->flags & REQ_F_BUFFER_SELECTED)
2784 cflags = io_put_rw_kbuf(req);
2785 __io_req_complete(req, issue_flags, ret, cflags);
2786 }
2787 }
Jens Axboeba816ad2019-09-28 11:36:45 -06002788}
2789
Pavel Begunkoveae071c2021-04-25 14:32:24 +01002790static int __io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter,
2791 struct io_mapped_ubuf *imu)
Jens Axboeedafcce2019-01-09 09:16:05 -07002792{
Jens Axboe9adbd452019-12-20 08:45:55 -07002793 size_t len = req->rw.len;
Pavel Begunkov75769e32021-04-01 15:43:54 +01002794 u64 buf_end, buf_addr = req->rw.addr;
Jens Axboeedafcce2019-01-09 09:16:05 -07002795 size_t offset;
Jens Axboeedafcce2019-01-09 09:16:05 -07002796
Pavel Begunkov75769e32021-04-01 15:43:54 +01002797 if (unlikely(check_add_overflow(buf_addr, (u64)len, &buf_end)))
Jens Axboeedafcce2019-01-09 09:16:05 -07002798 return -EFAULT;
2799 /* not inside the mapped region */
Pavel Begunkov4751f532021-04-01 15:43:55 +01002800 if (unlikely(buf_addr < imu->ubuf || buf_end > imu->ubuf_end))
Jens Axboeedafcce2019-01-09 09:16:05 -07002801 return -EFAULT;
2802
2803 /*
2804 * May not be a start of buffer, set size appropriately
2805 * and advance us to the beginning.
2806 */
2807 offset = buf_addr - imu->ubuf;
2808 iov_iter_bvec(iter, rw, imu->bvec, imu->nr_bvecs, offset + len);
Jens Axboebd11b3a2019-07-20 08:37:31 -06002809
2810 if (offset) {
2811 /*
2812 * Don't use iov_iter_advance() here, as it's really slow for
2813 * using the latter parts of a big fixed buffer - it iterates
2814 * over each segment manually. We can cheat a bit here, because
2815 * we know that:
2816 *
2817 * 1) it's a BVEC iter, we set it up
2818 * 2) all bvecs are PAGE_SIZE in size, except potentially the
2819 * first and last bvec
2820 *
2821 * So just find our index, and adjust the iterator afterwards.
2822 * If the offset is within the first bvec (or the whole first
2823 * bvec, just use iov_iter_advance(). This makes it easier
2824 * since we can just skip the first segment, which may not
2825 * be PAGE_SIZE aligned.
2826 */
2827 const struct bio_vec *bvec = imu->bvec;
2828
2829 if (offset <= bvec->bv_len) {
2830 iov_iter_advance(iter, offset);
2831 } else {
2832 unsigned long seg_skip;
2833
2834 /* skip first vec */
2835 offset -= bvec->bv_len;
2836 seg_skip = 1 + (offset >> PAGE_SHIFT);
2837
2838 iter->bvec = bvec + seg_skip;
2839 iter->nr_segs -= seg_skip;
Aleix Roca Nonell99c79f62019-08-15 14:03:22 +02002840 iter->count -= bvec->bv_len + offset;
Jens Axboebd11b3a2019-07-20 08:37:31 -06002841 iter->iov_offset = offset & ~PAGE_MASK;
Jens Axboebd11b3a2019-07-20 08:37:31 -06002842 }
2843 }
2844
Pavel Begunkov847595d2021-02-04 13:52:06 +00002845 return 0;
Jens Axboeedafcce2019-01-09 09:16:05 -07002846}
2847
Pavel Begunkoveae071c2021-04-25 14:32:24 +01002848static int io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter)
2849{
2850 struct io_ring_ctx *ctx = req->ctx;
2851 struct io_mapped_ubuf *imu = req->imu;
2852 u16 index, buf_index = req->buf_index;
2853
2854 if (likely(!imu)) {
2855 if (unlikely(buf_index >= ctx->nr_user_bufs))
2856 return -EFAULT;
2857 index = array_index_nospec(buf_index, ctx->nr_user_bufs);
2858 imu = READ_ONCE(ctx->user_bufs[index]);
2859 req->imu = imu;
2860 }
2861 return __io_import_fixed(req, rw, iter, imu);
2862}
2863
Jens Axboebcda7ba2020-02-23 16:42:51 -07002864static void io_ring_submit_unlock(struct io_ring_ctx *ctx, bool needs_lock)
2865{
2866 if (needs_lock)
2867 mutex_unlock(&ctx->uring_lock);
2868}
2869
2870static void io_ring_submit_lock(struct io_ring_ctx *ctx, bool needs_lock)
2871{
2872 /*
2873 * "Normal" inline submissions always hold the uring_lock, since we
2874 * grab it from the system call. Same is true for the SQPOLL offload.
2875 * The only exception is when we've detached the request and issue it
2876 * from an async worker thread, grab the lock for that case.
2877 */
2878 if (needs_lock)
2879 mutex_lock(&ctx->uring_lock);
2880}
2881
2882static struct io_buffer *io_buffer_select(struct io_kiocb *req, size_t *len,
2883 int bgid, struct io_buffer *kbuf,
2884 bool needs_lock)
2885{
2886 struct io_buffer *head;
2887
2888 if (req->flags & REQ_F_BUFFER_SELECTED)
2889 return kbuf;
2890
2891 io_ring_submit_lock(req->ctx, needs_lock);
2892
2893 lockdep_assert_held(&req->ctx->uring_lock);
2894
Jens Axboe9e15c3a2021-03-13 12:29:43 -07002895 head = xa_load(&req->ctx->io_buffers, bgid);
Jens Axboebcda7ba2020-02-23 16:42:51 -07002896 if (head) {
2897 if (!list_empty(&head->list)) {
2898 kbuf = list_last_entry(&head->list, struct io_buffer,
2899 list);
2900 list_del(&kbuf->list);
2901 } else {
2902 kbuf = head;
Jens Axboe9e15c3a2021-03-13 12:29:43 -07002903 xa_erase(&req->ctx->io_buffers, bgid);
Jens Axboebcda7ba2020-02-23 16:42:51 -07002904 }
2905 if (*len > kbuf->len)
2906 *len = kbuf->len;
2907 } else {
2908 kbuf = ERR_PTR(-ENOBUFS);
2909 }
2910
2911 io_ring_submit_unlock(req->ctx, needs_lock);
2912
2913 return kbuf;
2914}
2915
Jens Axboe4d954c22020-02-27 07:31:19 -07002916static void __user *io_rw_buffer_select(struct io_kiocb *req, size_t *len,
2917 bool needs_lock)
2918{
2919 struct io_buffer *kbuf;
Bijan Mottahedeh4f4eeba2020-05-19 14:52:49 -07002920 u16 bgid;
Jens Axboe4d954c22020-02-27 07:31:19 -07002921
2922 kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
Bijan Mottahedeh4f4eeba2020-05-19 14:52:49 -07002923 bgid = req->buf_index;
Jens Axboe4d954c22020-02-27 07:31:19 -07002924 kbuf = io_buffer_select(req, len, bgid, kbuf, needs_lock);
2925 if (IS_ERR(kbuf))
2926 return kbuf;
2927 req->rw.addr = (u64) (unsigned long) kbuf;
2928 req->flags |= REQ_F_BUFFER_SELECTED;
2929 return u64_to_user_ptr(kbuf->addr);
2930}
2931
2932#ifdef CONFIG_COMPAT
2933static ssize_t io_compat_import(struct io_kiocb *req, struct iovec *iov,
2934 bool needs_lock)
2935{
2936 struct compat_iovec __user *uiov;
2937 compat_ssize_t clen;
2938 void __user *buf;
2939 ssize_t len;
2940
2941 uiov = u64_to_user_ptr(req->rw.addr);
2942 if (!access_ok(uiov, sizeof(*uiov)))
2943 return -EFAULT;
2944 if (__get_user(clen, &uiov->iov_len))
2945 return -EFAULT;
2946 if (clen < 0)
2947 return -EINVAL;
2948
2949 len = clen;
2950 buf = io_rw_buffer_select(req, &len, needs_lock);
2951 if (IS_ERR(buf))
2952 return PTR_ERR(buf);
2953 iov[0].iov_base = buf;
2954 iov[0].iov_len = (compat_size_t) len;
2955 return 0;
2956}
2957#endif
2958
2959static ssize_t __io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
2960 bool needs_lock)
2961{
2962 struct iovec __user *uiov = u64_to_user_ptr(req->rw.addr);
2963 void __user *buf;
2964 ssize_t len;
2965
2966 if (copy_from_user(iov, uiov, sizeof(*uiov)))
2967 return -EFAULT;
2968
2969 len = iov[0].iov_len;
2970 if (len < 0)
2971 return -EINVAL;
2972 buf = io_rw_buffer_select(req, &len, needs_lock);
2973 if (IS_ERR(buf))
2974 return PTR_ERR(buf);
2975 iov[0].iov_base = buf;
2976 iov[0].iov_len = len;
2977 return 0;
2978}
2979
2980static ssize_t io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
2981 bool needs_lock)
2982{
Jens Axboedddb3e22020-06-04 11:27:01 -06002983 if (req->flags & REQ_F_BUFFER_SELECTED) {
2984 struct io_buffer *kbuf;
2985
2986 kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
2987 iov[0].iov_base = u64_to_user_ptr(kbuf->addr);
2988 iov[0].iov_len = kbuf->len;
Jens Axboe4d954c22020-02-27 07:31:19 -07002989 return 0;
Jens Axboedddb3e22020-06-04 11:27:01 -06002990 }
Pavel Begunkovdd201662020-12-19 03:15:43 +00002991 if (req->rw.len != 1)
Jens Axboe4d954c22020-02-27 07:31:19 -07002992 return -EINVAL;
2993
2994#ifdef CONFIG_COMPAT
2995 if (req->ctx->compat)
2996 return io_compat_import(req, iov, needs_lock);
2997#endif
2998
2999 return __io_iov_buffer_select(req, iov, needs_lock);
3000}
3001
Pavel Begunkov847595d2021-02-04 13:52:06 +00003002static int io_import_iovec(int rw, struct io_kiocb *req, struct iovec **iovec,
3003 struct iov_iter *iter, bool needs_lock)
Jens Axboe2b188cc2019-01-07 10:46:33 -07003004{
Jens Axboe9adbd452019-12-20 08:45:55 -07003005 void __user *buf = u64_to_user_ptr(req->rw.addr);
3006 size_t sqe_len = req->rw.len;
Pavel Begunkov847595d2021-02-04 13:52:06 +00003007 u8 opcode = req->opcode;
Jens Axboe4d954c22020-02-27 07:31:19 -07003008 ssize_t ret;
Jens Axboeedafcce2019-01-09 09:16:05 -07003009
Pavel Begunkov7d009162019-11-25 23:14:40 +03003010 if (opcode == IORING_OP_READ_FIXED || opcode == IORING_OP_WRITE_FIXED) {
Jens Axboeedafcce2019-01-09 09:16:05 -07003011 *iovec = NULL;
Jens Axboe9adbd452019-12-20 08:45:55 -07003012 return io_import_fixed(req, rw, iter);
Jens Axboeedafcce2019-01-09 09:16:05 -07003013 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07003014
Jens Axboebcda7ba2020-02-23 16:42:51 -07003015 /* buffer index only valid with fixed read/write, or buffer select */
Bijan Mottahedeh4f4eeba2020-05-19 14:52:49 -07003016 if (req->buf_index && !(req->flags & REQ_F_BUFFER_SELECT))
Jens Axboe9adbd452019-12-20 08:45:55 -07003017 return -EINVAL;
3018
Jens Axboe3a6820f2019-12-22 15:19:35 -07003019 if (opcode == IORING_OP_READ || opcode == IORING_OP_WRITE) {
Jens Axboebcda7ba2020-02-23 16:42:51 -07003020 if (req->flags & REQ_F_BUFFER_SELECT) {
Jens Axboe4d954c22020-02-27 07:31:19 -07003021 buf = io_rw_buffer_select(req, &sqe_len, needs_lock);
Pavel Begunkov867a23e2020-08-20 11:34:39 +03003022 if (IS_ERR(buf))
Jens Axboe4d954c22020-02-27 07:31:19 -07003023 return PTR_ERR(buf);
Jens Axboe3f9d6442020-03-11 12:27:04 -06003024 req->rw.len = sqe_len;
Jens Axboebcda7ba2020-02-23 16:42:51 -07003025 }
3026
Jens Axboe3a6820f2019-12-22 15:19:35 -07003027 ret = import_single_range(rw, buf, sqe_len, *iovec, iter);
3028 *iovec = NULL;
David Laight10fc72e2020-11-07 13:16:25 +00003029 return ret;
Jens Axboe3a6820f2019-12-22 15:19:35 -07003030 }
3031
Jens Axboe4d954c22020-02-27 07:31:19 -07003032 if (req->flags & REQ_F_BUFFER_SELECT) {
3033 ret = io_iov_buffer_select(req, *iovec, needs_lock);
Pavel Begunkov847595d2021-02-04 13:52:06 +00003034 if (!ret)
3035 iov_iter_init(iter, rw, *iovec, 1, (*iovec)->iov_len);
Jens Axboe4d954c22020-02-27 07:31:19 -07003036 *iovec = NULL;
3037 return ret;
3038 }
3039
Christoph Hellwig89cd35c2020-09-25 06:51:41 +02003040 return __import_iovec(rw, buf, sqe_len, UIO_FASTIOV, iovec, iter,
3041 req->ctx->compat);
Jens Axboe2b188cc2019-01-07 10:46:33 -07003042}
3043
Jens Axboe0fef9482020-08-26 10:36:20 -06003044static inline loff_t *io_kiocb_ppos(struct kiocb *kiocb)
3045{
Pavel Begunkov5b09e372020-09-30 22:57:15 +03003046 return (kiocb->ki_filp->f_mode & FMODE_STREAM) ? NULL : &kiocb->ki_pos;
Jens Axboe0fef9482020-08-26 10:36:20 -06003047}
3048
Jens Axboe32960612019-09-23 11:05:34 -06003049/*
3050 * For files that don't have ->read_iter() and ->write_iter(), handle them
3051 * by looping over ->read() or ->write() manually.
3052 */
Jens Axboe4017eb92020-10-22 14:14:12 -06003053static ssize_t loop_rw_iter(int rw, struct io_kiocb *req, struct iov_iter *iter)
Jens Axboe32960612019-09-23 11:05:34 -06003054{
Jens Axboe4017eb92020-10-22 14:14:12 -06003055 struct kiocb *kiocb = &req->rw.kiocb;
3056 struct file *file = req->file;
Jens Axboe32960612019-09-23 11:05:34 -06003057 ssize_t ret = 0;
3058
3059 /*
3060 * Don't support polled IO through this interface, and we can't
3061 * support non-blocking either. For the latter, this just causes
3062 * the kiocb to be handled from an async context.
3063 */
3064 if (kiocb->ki_flags & IOCB_HIPRI)
3065 return -EOPNOTSUPP;
3066 if (kiocb->ki_flags & IOCB_NOWAIT)
3067 return -EAGAIN;
3068
3069 while (iov_iter_count(iter)) {
Pavel Begunkov311ae9e2019-11-24 11:58:24 +03003070 struct iovec iovec;
Jens Axboe32960612019-09-23 11:05:34 -06003071 ssize_t nr;
3072
Pavel Begunkov311ae9e2019-11-24 11:58:24 +03003073 if (!iov_iter_is_bvec(iter)) {
3074 iovec = iov_iter_iovec(iter);
3075 } else {
Jens Axboe4017eb92020-10-22 14:14:12 -06003076 iovec.iov_base = u64_to_user_ptr(req->rw.addr);
3077 iovec.iov_len = req->rw.len;
Pavel Begunkov311ae9e2019-11-24 11:58:24 +03003078 }
3079
Jens Axboe32960612019-09-23 11:05:34 -06003080 if (rw == READ) {
3081 nr = file->f_op->read(file, iovec.iov_base,
Jens Axboe0fef9482020-08-26 10:36:20 -06003082 iovec.iov_len, io_kiocb_ppos(kiocb));
Jens Axboe32960612019-09-23 11:05:34 -06003083 } else {
3084 nr = file->f_op->write(file, iovec.iov_base,
Jens Axboe0fef9482020-08-26 10:36:20 -06003085 iovec.iov_len, io_kiocb_ppos(kiocb));
Jens Axboe32960612019-09-23 11:05:34 -06003086 }
3087
3088 if (nr < 0) {
3089 if (!ret)
3090 ret = nr;
3091 break;
3092 }
3093 ret += nr;
3094 if (nr != iovec.iov_len)
3095 break;
Jens Axboe4017eb92020-10-22 14:14:12 -06003096 req->rw.len -= nr;
3097 req->rw.addr += nr;
Jens Axboe32960612019-09-23 11:05:34 -06003098 iov_iter_advance(iter, nr);
3099 }
3100
3101 return ret;
3102}
3103
Jens Axboeff6165b2020-08-13 09:47:43 -06003104static void io_req_map_rw(struct io_kiocb *req, const struct iovec *iovec,
3105 const struct iovec *fast_iov, struct iov_iter *iter)
Jens Axboef67676d2019-12-02 11:03:47 -07003106{
Jens Axboee8c2bc12020-08-15 18:44:09 -07003107 struct io_async_rw *rw = req->async_data;
Pavel Begunkovb64e3442020-07-13 22:59:18 +03003108
Jens Axboeff6165b2020-08-13 09:47:43 -06003109 memcpy(&rw->iter, iter, sizeof(*iter));
Pavel Begunkovafb87652020-09-06 00:45:46 +03003110 rw->free_iovec = iovec;
Jens Axboe227c0c92020-08-13 11:51:40 -06003111 rw->bytes_done = 0;
Jens Axboeff6165b2020-08-13 09:47:43 -06003112 /* can only be fixed buffers, no need to do anything */
Pavel Begunkov9c3a2052020-11-23 23:20:27 +00003113 if (iov_iter_is_bvec(iter))
Jens Axboeff6165b2020-08-13 09:47:43 -06003114 return;
Pavel Begunkovb64e3442020-07-13 22:59:18 +03003115 if (!iovec) {
Jens Axboeff6165b2020-08-13 09:47:43 -06003116 unsigned iov_off = 0;
3117
3118 rw->iter.iov = rw->fast_iov;
3119 if (iter->iov != fast_iov) {
3120 iov_off = iter->iov - fast_iov;
3121 rw->iter.iov += iov_off;
3122 }
3123 if (rw->fast_iov != fast_iov)
3124 memcpy(rw->fast_iov + iov_off, fast_iov + iov_off,
Xiaoguang Wang45097da2020-04-08 22:29:58 +08003125 sizeof(struct iovec) * iter->nr_segs);
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03003126 } else {
3127 req->flags |= REQ_F_NEED_CLEANUP;
Jens Axboef67676d2019-12-02 11:03:47 -07003128 }
3129}
3130
Pavel Begunkov6cb78682021-02-28 22:35:17 +00003131static inline int io_alloc_async_data(struct io_kiocb *req)
Xiaoguang Wang3d9932a2020-03-27 15:36:52 +08003132{
Jens Axboee8c2bc12020-08-15 18:44:09 -07003133 WARN_ON_ONCE(!io_op_defs[req->opcode].async_size);
3134 req->async_data = kmalloc(io_op_defs[req->opcode].async_size, GFP_KERNEL);
3135 return req->async_data == NULL;
Xiaoguang Wang3d9932a2020-03-27 15:36:52 +08003136}
3137
Jens Axboeff6165b2020-08-13 09:47:43 -06003138static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec,
3139 const struct iovec *fast_iov,
Jens Axboe227c0c92020-08-13 11:51:40 -06003140 struct iov_iter *iter, bool force)
Jens Axboeb7bb4f72019-12-15 22:13:43 -07003141{
Pavel Begunkov26f05052021-02-28 22:35:18 +00003142 if (!force && !io_op_defs[req->opcode].needs_async_setup)
Jens Axboe74566df2020-01-13 19:23:24 -07003143 return 0;
Jens Axboee8c2bc12020-08-15 18:44:09 -07003144 if (!req->async_data) {
Pavel Begunkov6cb78682021-02-28 22:35:17 +00003145 if (io_alloc_async_data(req)) {
Pavel Begunkov6bf985d2021-02-04 13:52:01 +00003146 kfree(iovec);
Jens Axboe5d204bc2020-01-31 12:06:52 -07003147 return -ENOMEM;
Pavel Begunkov6bf985d2021-02-04 13:52:01 +00003148 }
Jens Axboeb7bb4f72019-12-15 22:13:43 -07003149
Jens Axboeff6165b2020-08-13 09:47:43 -06003150 io_req_map_rw(req, iovec, fast_iov, iter);
Jens Axboe5d204bc2020-01-31 12:06:52 -07003151 }
Jens Axboeb7bb4f72019-12-15 22:13:43 -07003152 return 0;
Jens Axboef67676d2019-12-02 11:03:47 -07003153}
3154
Pavel Begunkov73debe62020-09-30 22:57:54 +03003155static inline int io_rw_prep_async(struct io_kiocb *req, int rw)
Pavel Begunkovc3e330a2020-07-13 22:59:19 +03003156{
Jens Axboee8c2bc12020-08-15 18:44:09 -07003157 struct io_async_rw *iorw = req->async_data;
Pavel Begunkovf4bff102020-09-06 00:45:45 +03003158 struct iovec *iov = iorw->fast_iov;
Pavel Begunkov847595d2021-02-04 13:52:06 +00003159 int ret;
Pavel Begunkovc3e330a2020-07-13 22:59:19 +03003160
Pavel Begunkov2846c482020-11-07 13:16:27 +00003161 ret = io_import_iovec(rw, req, &iov, &iorw->iter, false);
Pavel Begunkovc3e330a2020-07-13 22:59:19 +03003162 if (unlikely(ret < 0))
3163 return ret;
3164
Pavel Begunkovab0b1962020-09-06 00:45:47 +03003165 iorw->bytes_done = 0;
3166 iorw->free_iovec = iov;
3167 if (iov)
3168 req->flags |= REQ_F_NEED_CLEANUP;
Pavel Begunkovc3e330a2020-07-13 22:59:19 +03003169 return 0;
3170}
3171
Pavel Begunkov73debe62020-09-30 22:57:54 +03003172static int io_read_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboef67676d2019-12-02 11:03:47 -07003173{
Jens Axboe3529d8c2019-12-19 18:24:38 -07003174 if (unlikely(!(req->file->f_mode & FMODE_READ)))
3175 return -EBADF;
Pavel Begunkov93642ef2021-02-18 18:29:44 +00003176 return io_prep_rw(req, sqe);
Jens Axboef67676d2019-12-02 11:03:47 -07003177}
3178
Jens Axboec1dd91d2020-08-03 16:43:59 -06003179/*
3180 * This is our waitqueue callback handler, registered through lock_page_async()
3181 * when we initially tried to do the IO with the iocb armed our waitqueue.
3182 * This gets called when the page is unlocked, and we generally expect that to
3183 * happen when the page IO is completed and the page is now uptodate. This will
3184 * queue a task_work based retry of the operation, attempting to copy the data
3185 * again. If the latter fails because the page was NOT uptodate, then we will
3186 * do a thread based blocking retry of the operation. That's the unexpected
3187 * slow path.
3188 */
Jens Axboebcf5a062020-05-22 09:24:42 -06003189static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode,
3190 int sync, void *arg)
3191{
3192 struct wait_page_queue *wpq;
3193 struct io_kiocb *req = wait->private;
Jens Axboebcf5a062020-05-22 09:24:42 -06003194 struct wait_page_key *key = arg;
Jens Axboebcf5a062020-05-22 09:24:42 -06003195
3196 wpq = container_of(wait, struct wait_page_queue, wait);
3197
Linus Torvaldscdc8fcb2020-08-03 13:01:22 -07003198 if (!wake_page_match(wpq, key))
3199 return 0;
3200
Hao Xuc8d317a2020-09-29 20:00:45 +08003201 req->rw.kiocb.ki_flags &= ~IOCB_WAITQ;
Jens Axboebcf5a062020-05-22 09:24:42 -06003202 list_del_init(&wait->entry);
3203
Jens Axboebcf5a062020-05-22 09:24:42 -06003204 /* submit ref gets dropped, acquire a new one */
Jens Axboede9b4cc2021-02-24 13:28:27 -07003205 req_ref_get(req);
Pavel Begunkov921b9052021-02-12 03:23:53 +00003206 io_req_task_queue(req);
Jens Axboebcf5a062020-05-22 09:24:42 -06003207 return 1;
3208}
3209
Jens Axboec1dd91d2020-08-03 16:43:59 -06003210/*
3211 * This controls whether a given IO request should be armed for async page
3212 * based retry. If we return false here, the request is handed to the async
3213 * worker threads for retry. If we're doing buffered reads on a regular file,
3214 * we prepare a private wait_page_queue entry and retry the operation. This
3215 * will either succeed because the page is now uptodate and unlocked, or it
3216 * will register a callback when the page is unlocked at IO completion. Through
3217 * that callback, io_uring uses task_work to setup a retry of the operation.
3218 * That retry will attempt the buffered read again. The retry will generally
3219 * succeed, or in rare cases where it fails, we then fall back to using the
3220 * async worker threads for a blocking retry.
3221 */
Jens Axboe227c0c92020-08-13 11:51:40 -06003222static bool io_rw_should_retry(struct io_kiocb *req)
Jens Axboebcf5a062020-05-22 09:24:42 -06003223{
Jens Axboee8c2bc12020-08-15 18:44:09 -07003224 struct io_async_rw *rw = req->async_data;
3225 struct wait_page_queue *wait = &rw->wpq;
Jens Axboebcf5a062020-05-22 09:24:42 -06003226 struct kiocb *kiocb = &req->rw.kiocb;
Jens Axboebcf5a062020-05-22 09:24:42 -06003227
3228 /* never retry for NOWAIT, we just complete with -EAGAIN */
3229 if (req->flags & REQ_F_NOWAIT)
3230 return false;
3231
Jens Axboe227c0c92020-08-13 11:51:40 -06003232 /* Only for buffered IO */
Jens Axboe3b2a4432020-08-16 10:58:43 -07003233 if (kiocb->ki_flags & (IOCB_DIRECT | IOCB_HIPRI))
Jens Axboebcf5a062020-05-22 09:24:42 -06003234 return false;
Jens Axboe3b2a4432020-08-16 10:58:43 -07003235
Jens Axboebcf5a062020-05-22 09:24:42 -06003236 /*
3237 * just use poll if we can, and don't attempt if the fs doesn't
3238 * support callback based unlocks
3239 */
3240 if (file_can_poll(req->file) || !(req->file->f_mode & FMODE_BUF_RASYNC))
3241 return false;
3242
Jens Axboe3b2a4432020-08-16 10:58:43 -07003243 wait->wait.func = io_async_buf_func;
3244 wait->wait.private = req;
3245 wait->wait.flags = 0;
3246 INIT_LIST_HEAD(&wait->wait.entry);
3247 kiocb->ki_flags |= IOCB_WAITQ;
Hao Xuc8d317a2020-09-29 20:00:45 +08003248 kiocb->ki_flags &= ~IOCB_NOWAIT;
Jens Axboe3b2a4432020-08-16 10:58:43 -07003249 kiocb->ki_waitq = wait;
Jens Axboe3b2a4432020-08-16 10:58:43 -07003250 return true;
Jens Axboebcf5a062020-05-22 09:24:42 -06003251}
3252
Pavel Begunkovaeab9502021-06-14 02:36:24 +01003253static inline int io_iter_do_read(struct io_kiocb *req, struct iov_iter *iter)
Jens Axboebcf5a062020-05-22 09:24:42 -06003254{
3255 if (req->file->f_op->read_iter)
3256 return call_read_iter(req->file, &req->rw.kiocb, iter);
Guoyu Huang2dd21112020-08-05 03:53:50 -07003257 else if (req->file->f_op->read)
Jens Axboe4017eb92020-10-22 14:14:12 -06003258 return loop_rw_iter(READ, req, iter);
Guoyu Huang2dd21112020-08-05 03:53:50 -07003259 else
3260 return -EINVAL;
Jens Axboebcf5a062020-05-22 09:24:42 -06003261}
3262
Pavel Begunkov889fca72021-02-10 00:03:09 +00003263static int io_read(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe2b188cc2019-01-07 10:46:33 -07003264{
3265 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
Jens Axboe9adbd452019-12-20 08:45:55 -07003266 struct kiocb *kiocb = &req->rw.kiocb;
Jens Axboeff6165b2020-08-13 09:47:43 -06003267 struct iov_iter __iter, *iter = &__iter;
Jens Axboee8c2bc12020-08-15 18:44:09 -07003268 struct io_async_rw *rw = req->async_data;
Jens Axboe227c0c92020-08-13 11:51:40 -06003269 ssize_t io_size, ret, ret2;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003270 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003271
Pavel Begunkov2846c482020-11-07 13:16:27 +00003272 if (rw) {
Jens Axboee8c2bc12020-08-15 18:44:09 -07003273 iter = &rw->iter;
Pavel Begunkov2846c482020-11-07 13:16:27 +00003274 iovec = NULL;
3275 } else {
3276 ret = io_import_iovec(READ, req, &iovec, iter, !force_nonblock);
3277 if (ret < 0)
3278 return ret;
3279 }
Pavel Begunkov632546c2020-11-07 13:16:26 +00003280 io_size = iov_iter_count(iter);
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003281 req->result = io_size;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003282
Jens Axboefd6c2e42019-12-18 12:19:41 -07003283 /* Ensure we clear previously set non-block flag */
3284 if (!force_nonblock)
Jens Axboe29de5f62020-02-20 09:56:08 -07003285 kiocb->ki_flags &= ~IOCB_NOWAIT;
Pavel Begunkova88fc402020-09-30 22:57:53 +03003286 else
3287 kiocb->ki_flags |= IOCB_NOWAIT;
3288
Pavel Begunkov24c74672020-06-21 13:09:51 +03003289 /* If the file doesn't support async, just async punt */
Jens Axboe7b29f922021-03-12 08:30:14 -07003290 if (force_nonblock && !io_file_supports_async(req, READ)) {
Pavel Begunkov6713e7a2021-02-04 13:51:59 +00003291 ret = io_setup_async_rw(req, iovec, inline_vecs, iter, true);
Pavel Begunkov6bf985d2021-02-04 13:52:01 +00003292 return ret ?: -EAGAIN;
Pavel Begunkov6713e7a2021-02-04 13:51:59 +00003293 }
Jens Axboe9e645e112019-05-10 16:07:28 -06003294
Pavel Begunkov632546c2020-11-07 13:16:26 +00003295 ret = rw_verify_area(READ, req->file, io_kiocb_ppos(kiocb), io_size);
Pavel Begunkov5ea5dd42021-02-04 13:52:03 +00003296 if (unlikely(ret)) {
3297 kfree(iovec);
3298 return ret;
3299 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07003300
Jens Axboe227c0c92020-08-13 11:51:40 -06003301 ret = io_iter_do_read(req, iter);
Jens Axboe32960612019-09-23 11:05:34 -06003302
Jens Axboe230d50d2021-04-01 20:41:15 -06003303 if (ret == -EAGAIN || (req->flags & REQ_F_REISSUE)) {
Pavel Begunkov6ad7f232021-04-08 01:54:39 +01003304 req->flags &= ~REQ_F_REISSUE;
Jens Axboeeefdf302020-08-27 16:40:19 -06003305 /* IOPOLL retry should happen for io-wq threads */
3306 if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboef91daf52020-08-15 15:58:42 -07003307 goto done;
Pavel Begunkov75c668c2021-02-04 13:52:05 +00003308 /* no retry on NONBLOCK nor RWF_NOWAIT */
3309 if (req->flags & REQ_F_NOWAIT)
Jens Axboe355afae2020-09-02 09:30:31 -06003310 goto done;
Jens Axboe84216312020-08-24 11:45:26 -06003311 /* some cases will consume bytes even on error returns */
Pavel Begunkov632546c2020-11-07 13:16:26 +00003312 iov_iter_revert(iter, io_size - iov_iter_count(iter));
Jens Axboef38c7e32020-09-25 15:23:43 -06003313 ret = 0;
Jens Axboe230d50d2021-04-01 20:41:15 -06003314 } else if (ret == -EIOCBQUEUED) {
3315 goto out_free;
Pavel Begunkov7335e3b2021-02-04 13:52:02 +00003316 } else if (ret <= 0 || ret == io_size || !force_nonblock ||
Pavel Begunkov75c668c2021-02-04 13:52:05 +00003317 (req->flags & REQ_F_NOWAIT) || !(req->flags & REQ_F_ISREG)) {
Pavel Begunkov7335e3b2021-02-04 13:52:02 +00003318 /* read all, failed, already did sync or don't want to retry */
Jens Axboe00d23d52020-08-25 12:59:22 -06003319 goto done;
Jens Axboe227c0c92020-08-13 11:51:40 -06003320 }
3321
Jens Axboe227c0c92020-08-13 11:51:40 -06003322 ret2 = io_setup_async_rw(req, iovec, inline_vecs, iter, true);
Pavel Begunkov6bf985d2021-02-04 13:52:01 +00003323 if (ret2)
3324 return ret2;
3325
Pavel Begunkovfe1cdd52021-02-17 21:02:36 +00003326 iovec = NULL;
Jens Axboee8c2bc12020-08-15 18:44:09 -07003327 rw = req->async_data;
Jens Axboe227c0c92020-08-13 11:51:40 -06003328 /* now use our persistent iterator, if we aren't already */
Jens Axboee8c2bc12020-08-15 18:44:09 -07003329 iter = &rw->iter;
Jens Axboe227c0c92020-08-13 11:51:40 -06003330
Pavel Begunkovb23df912021-02-04 13:52:04 +00003331 do {
3332 io_size -= ret;
3333 rw->bytes_done += ret;
3334 /* if we can retry, do so with the callbacks armed */
3335 if (!io_rw_should_retry(req)) {
3336 kiocb->ki_flags &= ~IOCB_WAITQ;
3337 return -EAGAIN;
3338 }
3339
3340 /*
3341 * Now retry read with the IOCB_WAITQ parts set in the iocb. If
3342 * we get -EIOCBQUEUED, then we'll get a notification when the
3343 * desired page gets unlocked. We can also get a partial read
3344 * here, and if we do, then just retry at the new offset.
3345 */
3346 ret = io_iter_do_read(req, iter);
3347 if (ret == -EIOCBQUEUED)
3348 return 0;
Jens Axboe227c0c92020-08-13 11:51:40 -06003349 /* we got some bytes, but not all. retry. */
Jens Axboeb5b0ecb2021-03-04 21:02:58 -07003350 kiocb->ki_flags &= ~IOCB_WAITQ;
Pavel Begunkovb23df912021-02-04 13:52:04 +00003351 } while (ret > 0 && ret < io_size);
Jens Axboe227c0c92020-08-13 11:51:40 -06003352done:
Pavel Begunkov889fca72021-02-10 00:03:09 +00003353 kiocb_done(kiocb, ret, issue_flags);
Pavel Begunkovfe1cdd52021-02-17 21:02:36 +00003354out_free:
3355 /* it's faster to check here then delegate to kfree */
3356 if (iovec)
3357 kfree(iovec);
Pavel Begunkov5ea5dd42021-02-04 13:52:03 +00003358 return 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003359}
3360
Pavel Begunkov73debe62020-09-30 22:57:54 +03003361static int io_write_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboef67676d2019-12-02 11:03:47 -07003362{
Jens Axboe3529d8c2019-12-19 18:24:38 -07003363 if (unlikely(!(req->file->f_mode & FMODE_WRITE)))
3364 return -EBADF;
Pavel Begunkov93642ef2021-02-18 18:29:44 +00003365 return io_prep_rw(req, sqe);
Jens Axboef67676d2019-12-02 11:03:47 -07003366}
3367
Pavel Begunkov889fca72021-02-10 00:03:09 +00003368static int io_write(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe2b188cc2019-01-07 10:46:33 -07003369{
3370 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
Jens Axboe9adbd452019-12-20 08:45:55 -07003371 struct kiocb *kiocb = &req->rw.kiocb;
Jens Axboeff6165b2020-08-13 09:47:43 -06003372 struct iov_iter __iter, *iter = &__iter;
Jens Axboee8c2bc12020-08-15 18:44:09 -07003373 struct io_async_rw *rw = req->async_data;
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003374 ssize_t ret, ret2, io_size;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003375 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003376
Pavel Begunkov2846c482020-11-07 13:16:27 +00003377 if (rw) {
Jens Axboee8c2bc12020-08-15 18:44:09 -07003378 iter = &rw->iter;
Pavel Begunkov2846c482020-11-07 13:16:27 +00003379 iovec = NULL;
3380 } else {
3381 ret = io_import_iovec(WRITE, req, &iovec, iter, !force_nonblock);
3382 if (ret < 0)
3383 return ret;
3384 }
Pavel Begunkov632546c2020-11-07 13:16:26 +00003385 io_size = iov_iter_count(iter);
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003386 req->result = io_size;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003387
Jens Axboefd6c2e42019-12-18 12:19:41 -07003388 /* Ensure we clear previously set non-block flag */
3389 if (!force_nonblock)
Pavel Begunkova88fc402020-09-30 22:57:53 +03003390 kiocb->ki_flags &= ~IOCB_NOWAIT;
3391 else
3392 kiocb->ki_flags |= IOCB_NOWAIT;
Jens Axboefd6c2e42019-12-18 12:19:41 -07003393
Pavel Begunkov24c74672020-06-21 13:09:51 +03003394 /* If the file doesn't support async, just async punt */
Jens Axboe7b29f922021-03-12 08:30:14 -07003395 if (force_nonblock && !io_file_supports_async(req, WRITE))
Jens Axboef67676d2019-12-02 11:03:47 -07003396 goto copy_iov;
Jens Axboef67676d2019-12-02 11:03:47 -07003397
Jens Axboe10d59342019-12-09 20:16:22 -07003398 /* file path doesn't support NOWAIT for non-direct_IO */
3399 if (force_nonblock && !(kiocb->ki_flags & IOCB_DIRECT) &&
3400 (req->flags & REQ_F_ISREG))
Jens Axboef67676d2019-12-02 11:03:47 -07003401 goto copy_iov;
Jens Axboe9e645e112019-05-10 16:07:28 -06003402
Pavel Begunkov632546c2020-11-07 13:16:26 +00003403 ret = rw_verify_area(WRITE, req->file, io_kiocb_ppos(kiocb), io_size);
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003404 if (unlikely(ret))
3405 goto out_free;
Roman Penyaev9bf79332019-03-25 20:09:24 +01003406
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003407 /*
3408 * Open-code file_start_write here to grab freeze protection,
3409 * which will be released by another thread in
3410 * io_complete_rw(). Fool lockdep by telling it the lock got
3411 * released so that it doesn't complain about the held lock when
3412 * we return to userspace.
3413 */
3414 if (req->flags & REQ_F_ISREG) {
Darrick J. Wong8a3c84b2020-11-10 16:50:21 -08003415 sb_start_write(file_inode(req->file)->i_sb);
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003416 __sb_writers_release(file_inode(req->file)->i_sb,
3417 SB_FREEZE_WRITE);
3418 }
3419 kiocb->ki_flags |= IOCB_WRITE;
Roman Penyaev9bf79332019-03-25 20:09:24 +01003420
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003421 if (req->file->f_op->write_iter)
Jens Axboeff6165b2020-08-13 09:47:43 -06003422 ret2 = call_write_iter(req->file, kiocb, iter);
Guoyu Huang2dd21112020-08-05 03:53:50 -07003423 else if (req->file->f_op->write)
Jens Axboe4017eb92020-10-22 14:14:12 -06003424 ret2 = loop_rw_iter(WRITE, req, iter);
Guoyu Huang2dd21112020-08-05 03:53:50 -07003425 else
3426 ret2 = -EINVAL;
Jens Axboe4ed734b2020-03-20 11:23:41 -06003427
Pavel Begunkov6ad7f232021-04-08 01:54:39 +01003428 if (req->flags & REQ_F_REISSUE) {
3429 req->flags &= ~REQ_F_REISSUE;
Jens Axboe230d50d2021-04-01 20:41:15 -06003430 ret2 = -EAGAIN;
Pavel Begunkov6ad7f232021-04-08 01:54:39 +01003431 }
Jens Axboe230d50d2021-04-01 20:41:15 -06003432
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003433 /*
3434 * Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just
3435 * retry them without IOCB_NOWAIT.
3436 */
3437 if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT))
3438 ret2 = -EAGAIN;
Pavel Begunkov75c668c2021-02-04 13:52:05 +00003439 /* no retry on NONBLOCK nor RWF_NOWAIT */
3440 if (ret2 == -EAGAIN && (req->flags & REQ_F_NOWAIT))
Jens Axboe355afae2020-09-02 09:30:31 -06003441 goto done;
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003442 if (!force_nonblock || ret2 != -EAGAIN) {
Jens Axboeeefdf302020-08-27 16:40:19 -06003443 /* IOPOLL retry should happen for io-wq threads */
3444 if ((req->ctx->flags & IORING_SETUP_IOPOLL) && ret2 == -EAGAIN)
3445 goto copy_iov;
Jens Axboe355afae2020-09-02 09:30:31 -06003446done:
Pavel Begunkov889fca72021-02-10 00:03:09 +00003447 kiocb_done(kiocb, ret2, issue_flags);
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003448 } else {
Jens Axboef67676d2019-12-02 11:03:47 -07003449copy_iov:
Jens Axboe84216312020-08-24 11:45:26 -06003450 /* some cases will consume bytes even on error returns */
Pavel Begunkov632546c2020-11-07 13:16:26 +00003451 iov_iter_revert(iter, io_size - iov_iter_count(iter));
Jens Axboe227c0c92020-08-13 11:51:40 -06003452 ret = io_setup_async_rw(req, iovec, inline_vecs, iter, false);
Pavel Begunkov6bf985d2021-02-04 13:52:01 +00003453 return ret ?: -EAGAIN;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003454 }
Jens Axboe31b51512019-01-18 22:56:34 -07003455out_free:
Pavel Begunkovf261c162020-08-20 11:34:10 +03003456 /* it's reportedly faster than delegating the null check to kfree() */
Pavel Begunkov252917c2020-07-13 22:59:20 +03003457 if (iovec)
Xiaoguang Wang6f2cc162020-06-18 15:01:56 +08003458 kfree(iovec);
Jens Axboe2b188cc2019-01-07 10:46:33 -07003459 return ret;
3460}
3461
Jens Axboe80a261f2020-09-28 14:23:58 -06003462static int io_renameat_prep(struct io_kiocb *req,
3463 const struct io_uring_sqe *sqe)
3464{
3465 struct io_rename *ren = &req->rename;
3466 const char __user *oldf, *newf;
3467
3468 if (unlikely(req->flags & REQ_F_FIXED_FILE))
3469 return -EBADF;
3470
3471 ren->old_dfd = READ_ONCE(sqe->fd);
3472 oldf = u64_to_user_ptr(READ_ONCE(sqe->addr));
3473 newf = u64_to_user_ptr(READ_ONCE(sqe->addr2));
3474 ren->new_dfd = READ_ONCE(sqe->len);
3475 ren->flags = READ_ONCE(sqe->rename_flags);
3476
3477 ren->oldpath = getname(oldf);
3478 if (IS_ERR(ren->oldpath))
3479 return PTR_ERR(ren->oldpath);
3480
3481 ren->newpath = getname(newf);
3482 if (IS_ERR(ren->newpath)) {
3483 putname(ren->oldpath);
3484 return PTR_ERR(ren->newpath);
3485 }
3486
3487 req->flags |= REQ_F_NEED_CLEANUP;
3488 return 0;
3489}
3490
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003491static int io_renameat(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe80a261f2020-09-28 14:23:58 -06003492{
3493 struct io_rename *ren = &req->rename;
3494 int ret;
3495
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003496 if (issue_flags & IO_URING_F_NONBLOCK)
Jens Axboe80a261f2020-09-28 14:23:58 -06003497 return -EAGAIN;
3498
3499 ret = do_renameat2(ren->old_dfd, ren->oldpath, ren->new_dfd,
3500 ren->newpath, ren->flags);
3501
3502 req->flags &= ~REQ_F_NEED_CLEANUP;
3503 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01003504 req_set_fail(req);
Jens Axboe80a261f2020-09-28 14:23:58 -06003505 io_req_complete(req, ret);
3506 return 0;
3507}
3508
Jens Axboe14a11432020-09-28 14:27:37 -06003509static int io_unlinkat_prep(struct io_kiocb *req,
3510 const struct io_uring_sqe *sqe)
3511{
3512 struct io_unlink *un = &req->unlink;
3513 const char __user *fname;
3514
3515 if (unlikely(req->flags & REQ_F_FIXED_FILE))
3516 return -EBADF;
3517
3518 un->dfd = READ_ONCE(sqe->fd);
3519
3520 un->flags = READ_ONCE(sqe->unlink_flags);
3521 if (un->flags & ~AT_REMOVEDIR)
3522 return -EINVAL;
3523
3524 fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
3525 un->filename = getname(fname);
3526 if (IS_ERR(un->filename))
3527 return PTR_ERR(un->filename);
3528
3529 req->flags |= REQ_F_NEED_CLEANUP;
3530 return 0;
3531}
3532
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003533static int io_unlinkat(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe14a11432020-09-28 14:27:37 -06003534{
3535 struct io_unlink *un = &req->unlink;
3536 int ret;
3537
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003538 if (issue_flags & IO_URING_F_NONBLOCK)
Jens Axboe14a11432020-09-28 14:27:37 -06003539 return -EAGAIN;
3540
3541 if (un->flags & AT_REMOVEDIR)
3542 ret = do_rmdir(un->dfd, un->filename);
3543 else
3544 ret = do_unlinkat(un->dfd, un->filename);
3545
3546 req->flags &= ~REQ_F_NEED_CLEANUP;
3547 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01003548 req_set_fail(req);
Jens Axboe14a11432020-09-28 14:27:37 -06003549 io_req_complete(req, ret);
3550 return 0;
3551}
3552
Jens Axboe36f4fa62020-09-05 11:14:22 -06003553static int io_shutdown_prep(struct io_kiocb *req,
3554 const struct io_uring_sqe *sqe)
3555{
3556#if defined(CONFIG_NET)
3557 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3558 return -EINVAL;
3559 if (sqe->ioprio || sqe->off || sqe->addr || sqe->rw_flags ||
3560 sqe->buf_index)
3561 return -EINVAL;
3562
3563 req->shutdown.how = READ_ONCE(sqe->len);
3564 return 0;
3565#else
3566 return -EOPNOTSUPP;
3567#endif
3568}
3569
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003570static int io_shutdown(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe36f4fa62020-09-05 11:14:22 -06003571{
3572#if defined(CONFIG_NET)
3573 struct socket *sock;
3574 int ret;
3575
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003576 if (issue_flags & IO_URING_F_NONBLOCK)
Jens Axboe36f4fa62020-09-05 11:14:22 -06003577 return -EAGAIN;
3578
Linus Torvalds48aba792020-12-16 12:44:05 -08003579 sock = sock_from_file(req->file);
Jens Axboe36f4fa62020-09-05 11:14:22 -06003580 if (unlikely(!sock))
Linus Torvalds48aba792020-12-16 12:44:05 -08003581 return -ENOTSOCK;
Jens Axboe36f4fa62020-09-05 11:14:22 -06003582
3583 ret = __sys_shutdown_sock(sock, req->shutdown.how);
Jens Axboea1464682020-12-14 20:57:27 -07003584 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01003585 req_set_fail(req);
Jens Axboe36f4fa62020-09-05 11:14:22 -06003586 io_req_complete(req, ret);
3587 return 0;
3588#else
3589 return -EOPNOTSUPP;
3590#endif
3591}
3592
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03003593static int __io_splice_prep(struct io_kiocb *req,
3594 const struct io_uring_sqe *sqe)
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003595{
3596 struct io_splice* sp = &req->splice;
3597 unsigned int valid_flags = SPLICE_F_FD_IN_FIXED | SPLICE_F_ALL;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003598
Pavel Begunkov3232dd02020-06-03 18:03:22 +03003599 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3600 return -EINVAL;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003601
3602 sp->file_in = NULL;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003603 sp->len = READ_ONCE(sqe->len);
3604 sp->flags = READ_ONCE(sqe->splice_flags);
3605
3606 if (unlikely(sp->flags & ~valid_flags))
3607 return -EINVAL;
3608
Pavel Begunkov8371adf2020-10-10 18:34:08 +01003609 sp->file_in = io_file_get(NULL, req, READ_ONCE(sqe->splice_fd_in),
3610 (sp->flags & SPLICE_F_FD_IN_FIXED));
3611 if (!sp->file_in)
3612 return -EBADF;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003613 req->flags |= REQ_F_NEED_CLEANUP;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003614 return 0;
3615}
3616
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03003617static int io_tee_prep(struct io_kiocb *req,
3618 const struct io_uring_sqe *sqe)
3619{
3620 if (READ_ONCE(sqe->splice_off_in) || READ_ONCE(sqe->off))
3621 return -EINVAL;
3622 return __io_splice_prep(req, sqe);
3623}
3624
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003625static int io_tee(struct io_kiocb *req, unsigned int issue_flags)
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03003626{
3627 struct io_splice *sp = &req->splice;
3628 struct file *in = sp->file_in;
3629 struct file *out = sp->file_out;
3630 unsigned int flags = sp->flags & ~SPLICE_F_FD_IN_FIXED;
3631 long ret = 0;
3632
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003633 if (issue_flags & IO_URING_F_NONBLOCK)
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03003634 return -EAGAIN;
3635 if (sp->len)
3636 ret = do_tee(in, out, sp->len, flags);
3637
Pavel Begunkove1d767f2021-03-19 17:22:43 +00003638 if (!(sp->flags & SPLICE_F_FD_IN_FIXED))
3639 io_put_file(in);
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03003640 req->flags &= ~REQ_F_NEED_CLEANUP;
3641
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03003642 if (ret != sp->len)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01003643 req_set_fail(req);
Jens Axboee1e16092020-06-22 09:17:17 -06003644 io_req_complete(req, ret);
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03003645 return 0;
3646}
3647
3648static int io_splice_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3649{
3650 struct io_splice* sp = &req->splice;
3651
3652 sp->off_in = READ_ONCE(sqe->splice_off_in);
3653 sp->off_out = READ_ONCE(sqe->off);
3654 return __io_splice_prep(req, sqe);
3655}
3656
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003657static int io_splice(struct io_kiocb *req, unsigned int issue_flags)
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003658{
3659 struct io_splice *sp = &req->splice;
3660 struct file *in = sp->file_in;
3661 struct file *out = sp->file_out;
3662 unsigned int flags = sp->flags & ~SPLICE_F_FD_IN_FIXED;
3663 loff_t *poff_in, *poff_out;
Pavel Begunkovc9687422020-05-04 23:00:54 +03003664 long ret = 0;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003665
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003666 if (issue_flags & IO_URING_F_NONBLOCK)
Pavel Begunkov2fb3e822020-05-01 17:09:38 +03003667 return -EAGAIN;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003668
3669 poff_in = (sp->off_in == -1) ? NULL : &sp->off_in;
3670 poff_out = (sp->off_out == -1) ? NULL : &sp->off_out;
Pavel Begunkovc9687422020-05-04 23:00:54 +03003671
Jens Axboe948a7742020-05-17 14:21:38 -06003672 if (sp->len)
Pavel Begunkovc9687422020-05-04 23:00:54 +03003673 ret = do_splice(in, poff_in, out, poff_out, sp->len, flags);
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003674
Pavel Begunkove1d767f2021-03-19 17:22:43 +00003675 if (!(sp->flags & SPLICE_F_FD_IN_FIXED))
3676 io_put_file(in);
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003677 req->flags &= ~REQ_F_NEED_CLEANUP;
3678
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003679 if (ret != sp->len)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01003680 req_set_fail(req);
Jens Axboee1e16092020-06-22 09:17:17 -06003681 io_req_complete(req, ret);
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003682 return 0;
3683}
3684
Jens Axboe2b188cc2019-01-07 10:46:33 -07003685/*
3686 * IORING_OP_NOP just posts a completion event, nothing else.
3687 */
Pavel Begunkov889fca72021-02-10 00:03:09 +00003688static int io_nop(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe2b188cc2019-01-07 10:46:33 -07003689{
3690 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003691
Jens Axboedef596e2019-01-09 08:59:42 -07003692 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
3693 return -EINVAL;
3694
Pavel Begunkov889fca72021-02-10 00:03:09 +00003695 __io_req_complete(req, issue_flags, 0, 0);
Jens Axboe2b188cc2019-01-07 10:46:33 -07003696 return 0;
3697}
3698
Pavel Begunkov1155c762021-02-18 18:29:38 +00003699static int io_fsync_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Christoph Hellwigc992fe22019-01-11 09:43:02 -07003700{
Jens Axboe6b063142019-01-10 22:13:58 -07003701 struct io_ring_ctx *ctx = req->ctx;
Christoph Hellwigc992fe22019-01-11 09:43:02 -07003702
Jens Axboe09bb8392019-03-13 12:39:28 -06003703 if (!req->file)
3704 return -EBADF;
Christoph Hellwigc992fe22019-01-11 09:43:02 -07003705
Jens Axboe6b063142019-01-10 22:13:58 -07003706 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboedef596e2019-01-09 08:59:42 -07003707 return -EINVAL;
Jens Axboeedafcce2019-01-09 09:16:05 -07003708 if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index))
Christoph Hellwigc992fe22019-01-11 09:43:02 -07003709 return -EINVAL;
3710
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07003711 req->sync.flags = READ_ONCE(sqe->fsync_flags);
3712 if (unlikely(req->sync.flags & ~IORING_FSYNC_DATASYNC))
3713 return -EINVAL;
3714
3715 req->sync.off = READ_ONCE(sqe->off);
3716 req->sync.len = READ_ONCE(sqe->len);
Christoph Hellwigc992fe22019-01-11 09:43:02 -07003717 return 0;
3718}
3719
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003720static int io_fsync(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe78912932020-01-14 22:09:06 -07003721{
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07003722 loff_t end = req->sync.off + req->sync.len;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07003723 int ret;
3724
Pavel Begunkovac45abc2020-06-08 21:08:18 +03003725 /* fsync always requires a blocking context */
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003726 if (issue_flags & IO_URING_F_NONBLOCK)
Pavel Begunkovac45abc2020-06-08 21:08:18 +03003727 return -EAGAIN;
3728
Jens Axboe9adbd452019-12-20 08:45:55 -07003729 ret = vfs_fsync_range(req->file, req->sync.off,
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07003730 end > 0 ? end : LLONG_MAX,
3731 req->sync.flags & IORING_FSYNC_DATASYNC);
3732 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01003733 req_set_fail(req);
Jens Axboee1e16092020-06-22 09:17:17 -06003734 io_req_complete(req, ret);
Christoph Hellwigc992fe22019-01-11 09:43:02 -07003735 return 0;
3736}
3737
Jens Axboed63d1b52019-12-10 10:38:56 -07003738static int io_fallocate_prep(struct io_kiocb *req,
3739 const struct io_uring_sqe *sqe)
3740{
3741 if (sqe->ioprio || sqe->buf_index || sqe->rw_flags)
3742 return -EINVAL;
Pavel Begunkov3232dd02020-06-03 18:03:22 +03003743 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3744 return -EINVAL;
Jens Axboed63d1b52019-12-10 10:38:56 -07003745
3746 req->sync.off = READ_ONCE(sqe->off);
3747 req->sync.len = READ_ONCE(sqe->addr);
3748 req->sync.mode = READ_ONCE(sqe->len);
3749 return 0;
3750}
3751
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003752static int io_fallocate(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboed63d1b52019-12-10 10:38:56 -07003753{
Pavel Begunkovac45abc2020-06-08 21:08:18 +03003754 int ret;
Jens Axboed63d1b52019-12-10 10:38:56 -07003755
Pavel Begunkovac45abc2020-06-08 21:08:18 +03003756 /* fallocate always requiring blocking context */
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003757 if (issue_flags & IO_URING_F_NONBLOCK)
Pavel Begunkovac45abc2020-06-08 21:08:18 +03003758 return -EAGAIN;
Pavel Begunkovac45abc2020-06-08 21:08:18 +03003759 ret = vfs_fallocate(req->file, req->sync.mode, req->sync.off,
3760 req->sync.len);
Pavel Begunkovac45abc2020-06-08 21:08:18 +03003761 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01003762 req_set_fail(req);
Jens Axboee1e16092020-06-22 09:17:17 -06003763 io_req_complete(req, ret);
Jens Axboed63d1b52019-12-10 10:38:56 -07003764 return 0;
3765}
3766
Pavel Begunkovec65fea2020-06-03 18:03:24 +03003767static int __io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboe15b71ab2019-12-11 11:20:36 -07003768{
Jens Axboef8748882020-01-08 17:47:02 -07003769 const char __user *fname;
Jens Axboe15b71ab2019-12-11 11:20:36 -07003770 int ret;
3771
Pavel Begunkovec65fea2020-06-03 18:03:24 +03003772 if (unlikely(sqe->ioprio || sqe->buf_index))
Jens Axboe15b71ab2019-12-11 11:20:36 -07003773 return -EINVAL;
Pavel Begunkovec65fea2020-06-03 18:03:24 +03003774 if (unlikely(req->flags & REQ_F_FIXED_FILE))
Jens Axboecf3040c2020-02-06 21:31:40 -07003775 return -EBADF;
Jens Axboe15b71ab2019-12-11 11:20:36 -07003776
Pavel Begunkovec65fea2020-06-03 18:03:24 +03003777 /* open.how should be already initialised */
3778 if (!(req->open.how.flags & O_PATH) && force_o_largefile())
Jens Axboe08a1d26eb2020-04-08 09:20:54 -06003779 req->open.how.flags |= O_LARGEFILE;
Jens Axboe15b71ab2019-12-11 11:20:36 -07003780
Pavel Begunkov25e72d12020-06-03 18:03:23 +03003781 req->open.dfd = READ_ONCE(sqe->fd);
3782 fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
Jens Axboef8748882020-01-08 17:47:02 -07003783 req->open.filename = getname(fname);
Jens Axboe15b71ab2019-12-11 11:20:36 -07003784 if (IS_ERR(req->open.filename)) {
3785 ret = PTR_ERR(req->open.filename);
3786 req->open.filename = NULL;
3787 return ret;
3788 }
Jens Axboe4022e7a2020-03-19 19:23:18 -06003789 req->open.nofile = rlimit(RLIMIT_NOFILE);
Pavel Begunkov8fef80b2020-02-07 23:59:53 +03003790 req->flags |= REQ_F_NEED_CLEANUP;
Jens Axboe15b71ab2019-12-11 11:20:36 -07003791 return 0;
3792}
3793
Pavel Begunkovec65fea2020-06-03 18:03:24 +03003794static int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3795{
3796 u64 flags, mode;
3797
Jens Axboe14587a462020-09-05 11:36:08 -06003798 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboe4eb8dde2020-09-18 19:36:24 -06003799 return -EINVAL;
Pavel Begunkovec65fea2020-06-03 18:03:24 +03003800 mode = READ_ONCE(sqe->len);
3801 flags = READ_ONCE(sqe->open_flags);
3802 req->open.how = build_open_how(flags, mode);
3803 return __io_openat_prep(req, sqe);
3804}
3805
Jens Axboecebdb982020-01-08 17:59:24 -07003806static int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3807{
3808 struct open_how __user *how;
Jens Axboecebdb982020-01-08 17:59:24 -07003809 size_t len;
3810 int ret;
3811
Jens Axboe14587a462020-09-05 11:36:08 -06003812 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboe4eb8dde2020-09-18 19:36:24 -06003813 return -EINVAL;
Jens Axboecebdb982020-01-08 17:59:24 -07003814 how = u64_to_user_ptr(READ_ONCE(sqe->addr2));
3815 len = READ_ONCE(sqe->len);
Jens Axboecebdb982020-01-08 17:59:24 -07003816 if (len < OPEN_HOW_SIZE_VER0)
3817 return -EINVAL;
3818
3819 ret = copy_struct_from_user(&req->open.how, sizeof(req->open.how), how,
3820 len);
3821 if (ret)
3822 return ret;
3823
Pavel Begunkovec65fea2020-06-03 18:03:24 +03003824 return __io_openat_prep(req, sqe);
Jens Axboecebdb982020-01-08 17:59:24 -07003825}
3826
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003827static int io_openat2(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe15b71ab2019-12-11 11:20:36 -07003828{
3829 struct open_flags op;
Jens Axboe15b71ab2019-12-11 11:20:36 -07003830 struct file *file;
Jens Axboe3a81fd02020-12-10 12:25:36 -07003831 bool nonblock_set;
3832 bool resolve_nonblock;
Jens Axboe15b71ab2019-12-11 11:20:36 -07003833 int ret;
3834
Jens Axboecebdb982020-01-08 17:59:24 -07003835 ret = build_open_flags(&req->open.how, &op);
Jens Axboe15b71ab2019-12-11 11:20:36 -07003836 if (ret)
3837 goto err;
Jens Axboe3a81fd02020-12-10 12:25:36 -07003838 nonblock_set = op.open_flag & O_NONBLOCK;
3839 resolve_nonblock = req->open.how.resolve & RESOLVE_CACHED;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003840 if (issue_flags & IO_URING_F_NONBLOCK) {
Jens Axboe3a81fd02020-12-10 12:25:36 -07003841 /*
3842 * Don't bother trying for O_TRUNC, O_CREAT, or O_TMPFILE open,
3843 * it'll always -EAGAIN
3844 */
3845 if (req->open.how.flags & (O_TRUNC | O_CREAT | O_TMPFILE))
3846 return -EAGAIN;
3847 op.lookup_flags |= LOOKUP_CACHED;
3848 op.open_flag |= O_NONBLOCK;
3849 }
Jens Axboe15b71ab2019-12-11 11:20:36 -07003850
Jens Axboe4022e7a2020-03-19 19:23:18 -06003851 ret = __get_unused_fd_flags(req->open.how.flags, req->open.nofile);
Jens Axboe15b71ab2019-12-11 11:20:36 -07003852 if (ret < 0)
3853 goto err;
3854
3855 file = do_filp_open(req->open.dfd, req->open.filename, &op);
Jens Axboe3a81fd02020-12-10 12:25:36 -07003856 /* only retry if RESOLVE_CACHED wasn't already set by application */
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003857 if ((!resolve_nonblock && (issue_flags & IO_URING_F_NONBLOCK)) &&
3858 file == ERR_PTR(-EAGAIN)) {
Jens Axboe3a81fd02020-12-10 12:25:36 -07003859 /*
3860 * We could hang on to this 'fd', but seems like marginal
3861 * gain for something that is now known to be a slower path.
3862 * So just put it, and we'll get a new one when we retry.
3863 */
3864 put_unused_fd(ret);
3865 return -EAGAIN;
3866 }
3867
Jens Axboe15b71ab2019-12-11 11:20:36 -07003868 if (IS_ERR(file)) {
3869 put_unused_fd(ret);
3870 ret = PTR_ERR(file);
3871 } else {
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003872 if ((issue_flags & IO_URING_F_NONBLOCK) && !nonblock_set)
Jens Axboe3a81fd02020-12-10 12:25:36 -07003873 file->f_flags &= ~O_NONBLOCK;
Jens Axboe15b71ab2019-12-11 11:20:36 -07003874 fsnotify_open(file);
3875 fd_install(ret, file);
3876 }
3877err:
3878 putname(req->open.filename);
Pavel Begunkov8fef80b2020-02-07 23:59:53 +03003879 req->flags &= ~REQ_F_NEED_CLEANUP;
Jens Axboe15b71ab2019-12-11 11:20:36 -07003880 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01003881 req_set_fail(req);
Pavel Begunkov0bdf3392021-04-11 01:46:29 +01003882 __io_req_complete(req, issue_flags, ret, 0);
Jens Axboe15b71ab2019-12-11 11:20:36 -07003883 return 0;
3884}
3885
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003886static int io_openat(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboecebdb982020-01-08 17:59:24 -07003887{
Pavel Begunkove45cff52021-02-28 22:35:14 +00003888 return io_openat2(req, issue_flags);
Jens Axboecebdb982020-01-08 17:59:24 -07003889}
3890
Jens Axboe067524e2020-03-02 16:32:28 -07003891static int io_remove_buffers_prep(struct io_kiocb *req,
3892 const struct io_uring_sqe *sqe)
3893{
3894 struct io_provide_buf *p = &req->pbuf;
3895 u64 tmp;
3896
3897 if (sqe->ioprio || sqe->rw_flags || sqe->addr || sqe->len || sqe->off)
3898 return -EINVAL;
3899
3900 tmp = READ_ONCE(sqe->fd);
3901 if (!tmp || tmp > USHRT_MAX)
3902 return -EINVAL;
3903
3904 memset(p, 0, sizeof(*p));
3905 p->nbufs = tmp;
3906 p->bgid = READ_ONCE(sqe->buf_group);
3907 return 0;
3908}
3909
3910static int __io_remove_buffers(struct io_ring_ctx *ctx, struct io_buffer *buf,
3911 int bgid, unsigned nbufs)
3912{
3913 unsigned i = 0;
3914
3915 /* shouldn't happen */
3916 if (!nbufs)
3917 return 0;
3918
3919 /* the head kbuf is the list itself */
3920 while (!list_empty(&buf->list)) {
3921 struct io_buffer *nxt;
3922
3923 nxt = list_first_entry(&buf->list, struct io_buffer, list);
3924 list_del(&nxt->list);
3925 kfree(nxt);
3926 if (++i == nbufs)
3927 return i;
3928 }
3929 i++;
3930 kfree(buf);
Jens Axboe9e15c3a2021-03-13 12:29:43 -07003931 xa_erase(&ctx->io_buffers, bgid);
Jens Axboe067524e2020-03-02 16:32:28 -07003932
3933 return i;
3934}
3935
Pavel Begunkov889fca72021-02-10 00:03:09 +00003936static int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe067524e2020-03-02 16:32:28 -07003937{
3938 struct io_provide_buf *p = &req->pbuf;
3939 struct io_ring_ctx *ctx = req->ctx;
3940 struct io_buffer *head;
3941 int ret = 0;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003942 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Jens Axboe067524e2020-03-02 16:32:28 -07003943
3944 io_ring_submit_lock(ctx, !force_nonblock);
3945
3946 lockdep_assert_held(&ctx->uring_lock);
3947
3948 ret = -ENOENT;
Jens Axboe9e15c3a2021-03-13 12:29:43 -07003949 head = xa_load(&ctx->io_buffers, p->bgid);
Jens Axboe067524e2020-03-02 16:32:28 -07003950 if (head)
3951 ret = __io_remove_buffers(ctx, head, p->bgid, p->nbufs);
Jens Axboe067524e2020-03-02 16:32:28 -07003952 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01003953 req_set_fail(req);
Pavel Begunkov31bff9a2020-12-06 22:22:43 +00003954
Pavel Begunkov9fb8cb42021-02-28 22:35:13 +00003955 /* complete before unlock, IOPOLL may need the lock */
3956 __io_req_complete(req, issue_flags, ret, 0);
3957 io_ring_submit_unlock(ctx, !force_nonblock);
Jens Axboe067524e2020-03-02 16:32:28 -07003958 return 0;
3959}
3960
Jens Axboeddf0322d2020-02-23 16:41:33 -07003961static int io_provide_buffers_prep(struct io_kiocb *req,
3962 const struct io_uring_sqe *sqe)
3963{
Pavel Begunkov38134ad2021-04-15 13:07:39 +01003964 unsigned long size, tmp_check;
Jens Axboeddf0322d2020-02-23 16:41:33 -07003965 struct io_provide_buf *p = &req->pbuf;
3966 u64 tmp;
3967
3968 if (sqe->ioprio || sqe->rw_flags)
3969 return -EINVAL;
3970
3971 tmp = READ_ONCE(sqe->fd);
3972 if (!tmp || tmp > USHRT_MAX)
3973 return -E2BIG;
3974 p->nbufs = tmp;
3975 p->addr = READ_ONCE(sqe->addr);
3976 p->len = READ_ONCE(sqe->len);
3977
Pavel Begunkov38134ad2021-04-15 13:07:39 +01003978 if (check_mul_overflow((unsigned long)p->len, (unsigned long)p->nbufs,
3979 &size))
3980 return -EOVERFLOW;
3981 if (check_add_overflow((unsigned long)p->addr, size, &tmp_check))
3982 return -EOVERFLOW;
3983
Pavel Begunkovd81269f2021-03-19 10:21:19 +00003984 size = (unsigned long)p->len * p->nbufs;
3985 if (!access_ok(u64_to_user_ptr(p->addr), size))
Jens Axboeddf0322d2020-02-23 16:41:33 -07003986 return -EFAULT;
3987
3988 p->bgid = READ_ONCE(sqe->buf_group);
3989 tmp = READ_ONCE(sqe->off);
3990 if (tmp > USHRT_MAX)
3991 return -E2BIG;
3992 p->bid = tmp;
3993 return 0;
3994}
3995
3996static int io_add_buffers(struct io_provide_buf *pbuf, struct io_buffer **head)
3997{
3998 struct io_buffer *buf;
3999 u64 addr = pbuf->addr;
4000 int i, bid = pbuf->bid;
4001
4002 for (i = 0; i < pbuf->nbufs; i++) {
4003 buf = kmalloc(sizeof(*buf), GFP_KERNEL);
4004 if (!buf)
4005 break;
4006
4007 buf->addr = addr;
Thadeu Lima de Souza Cascardod1f82802021-05-05 09:47:06 -03004008 buf->len = min_t(__u32, pbuf->len, MAX_RW_COUNT);
Jens Axboeddf0322d2020-02-23 16:41:33 -07004009 buf->bid = bid;
4010 addr += pbuf->len;
4011 bid++;
4012 if (!*head) {
4013 INIT_LIST_HEAD(&buf->list);
4014 *head = buf;
4015 } else {
4016 list_add_tail(&buf->list, &(*head)->list);
4017 }
4018 }
4019
4020 return i ? i : -ENOMEM;
4021}
4022
Pavel Begunkov889fca72021-02-10 00:03:09 +00004023static int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboeddf0322d2020-02-23 16:41:33 -07004024{
4025 struct io_provide_buf *p = &req->pbuf;
4026 struct io_ring_ctx *ctx = req->ctx;
4027 struct io_buffer *head, *list;
4028 int ret = 0;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004029 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Jens Axboeddf0322d2020-02-23 16:41:33 -07004030
4031 io_ring_submit_lock(ctx, !force_nonblock);
4032
4033 lockdep_assert_held(&ctx->uring_lock);
4034
Jens Axboe9e15c3a2021-03-13 12:29:43 -07004035 list = head = xa_load(&ctx->io_buffers, p->bgid);
Jens Axboeddf0322d2020-02-23 16:41:33 -07004036
4037 ret = io_add_buffers(p, &head);
Jens Axboe9e15c3a2021-03-13 12:29:43 -07004038 if (ret >= 0 && !list) {
4039 ret = xa_insert(&ctx->io_buffers, p->bgid, head, GFP_KERNEL);
4040 if (ret < 0)
Jens Axboe067524e2020-03-02 16:32:28 -07004041 __io_remove_buffers(ctx, head, p->bgid, -1U);
Jens Axboeddf0322d2020-02-23 16:41:33 -07004042 }
Jens Axboeddf0322d2020-02-23 16:41:33 -07004043 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01004044 req_set_fail(req);
Pavel Begunkov9fb8cb42021-02-28 22:35:13 +00004045 /* complete before unlock, IOPOLL may need the lock */
4046 __io_req_complete(req, issue_flags, ret, 0);
4047 io_ring_submit_unlock(ctx, !force_nonblock);
Jens Axboeddf0322d2020-02-23 16:41:33 -07004048 return 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07004049}
4050
Jens Axboe3e4827b2020-01-08 15:18:09 -07004051static int io_epoll_ctl_prep(struct io_kiocb *req,
4052 const struct io_uring_sqe *sqe)
4053{
4054#if defined(CONFIG_EPOLL)
4055 if (sqe->ioprio || sqe->buf_index)
4056 return -EINVAL;
Pavel Begunkov2d74d042021-05-14 12:05:46 +01004057 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
Pavel Begunkov3232dd02020-06-03 18:03:22 +03004058 return -EINVAL;
Jens Axboe3e4827b2020-01-08 15:18:09 -07004059
4060 req->epoll.epfd = READ_ONCE(sqe->fd);
4061 req->epoll.op = READ_ONCE(sqe->len);
4062 req->epoll.fd = READ_ONCE(sqe->off);
4063
4064 if (ep_op_has_event(req->epoll.op)) {
4065 struct epoll_event __user *ev;
4066
4067 ev = u64_to_user_ptr(READ_ONCE(sqe->addr));
4068 if (copy_from_user(&req->epoll.event, ev, sizeof(*ev)))
4069 return -EFAULT;
4070 }
4071
4072 return 0;
4073#else
4074 return -EOPNOTSUPP;
4075#endif
4076}
4077
Pavel Begunkov889fca72021-02-10 00:03:09 +00004078static int io_epoll_ctl(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe3e4827b2020-01-08 15:18:09 -07004079{
4080#if defined(CONFIG_EPOLL)
4081 struct io_epoll *ie = &req->epoll;
4082 int ret;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004083 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Jens Axboe3e4827b2020-01-08 15:18:09 -07004084
4085 ret = do_epoll_ctl(ie->epfd, ie->op, ie->fd, &ie->event, force_nonblock);
4086 if (force_nonblock && ret == -EAGAIN)
4087 return -EAGAIN;
4088
4089 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01004090 req_set_fail(req);
Pavel Begunkov889fca72021-02-10 00:03:09 +00004091 __io_req_complete(req, issue_flags, ret, 0);
Jens Axboe3e4827b2020-01-08 15:18:09 -07004092 return 0;
4093#else
4094 return -EOPNOTSUPP;
4095#endif
4096}
4097
Jens Axboec1ca7572019-12-25 22:18:28 -07004098static int io_madvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4099{
4100#if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
4101 if (sqe->ioprio || sqe->buf_index || sqe->off)
4102 return -EINVAL;
Pavel Begunkov3232dd02020-06-03 18:03:22 +03004103 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4104 return -EINVAL;
Jens Axboec1ca7572019-12-25 22:18:28 -07004105
4106 req->madvise.addr = READ_ONCE(sqe->addr);
4107 req->madvise.len = READ_ONCE(sqe->len);
4108 req->madvise.advice = READ_ONCE(sqe->fadvise_advice);
4109 return 0;
4110#else
4111 return -EOPNOTSUPP;
4112#endif
4113}
4114
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004115static int io_madvise(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboec1ca7572019-12-25 22:18:28 -07004116{
4117#if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
4118 struct io_madvise *ma = &req->madvise;
4119 int ret;
4120
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004121 if (issue_flags & IO_URING_F_NONBLOCK)
Jens Axboec1ca7572019-12-25 22:18:28 -07004122 return -EAGAIN;
4123
Minchan Kim0726b012020-10-17 16:14:50 -07004124 ret = do_madvise(current->mm, ma->addr, ma->len, ma->advice);
Jens Axboec1ca7572019-12-25 22:18:28 -07004125 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01004126 req_set_fail(req);
Jens Axboee1e16092020-06-22 09:17:17 -06004127 io_req_complete(req, ret);
Jens Axboec1ca7572019-12-25 22:18:28 -07004128 return 0;
4129#else
4130 return -EOPNOTSUPP;
4131#endif
4132}
4133
Jens Axboe4840e412019-12-25 22:03:45 -07004134static int io_fadvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4135{
4136 if (sqe->ioprio || sqe->buf_index || sqe->addr)
4137 return -EINVAL;
Pavel Begunkov3232dd02020-06-03 18:03:22 +03004138 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4139 return -EINVAL;
Jens Axboe4840e412019-12-25 22:03:45 -07004140
4141 req->fadvise.offset = READ_ONCE(sqe->off);
4142 req->fadvise.len = READ_ONCE(sqe->len);
4143 req->fadvise.advice = READ_ONCE(sqe->fadvise_advice);
4144 return 0;
4145}
4146
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004147static int io_fadvise(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe4840e412019-12-25 22:03:45 -07004148{
4149 struct io_fadvise *fa = &req->fadvise;
4150 int ret;
4151
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004152 if (issue_flags & IO_URING_F_NONBLOCK) {
Jens Axboe3e694262020-02-01 09:22:49 -07004153 switch (fa->advice) {
4154 case POSIX_FADV_NORMAL:
4155 case POSIX_FADV_RANDOM:
4156 case POSIX_FADV_SEQUENTIAL:
4157 break;
4158 default:
4159 return -EAGAIN;
4160 }
4161 }
Jens Axboe4840e412019-12-25 22:03:45 -07004162
4163 ret = vfs_fadvise(req->file, fa->offset, fa->len, fa->advice);
4164 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01004165 req_set_fail(req);
Pavel Begunkov0bdf3392021-04-11 01:46:29 +01004166 __io_req_complete(req, issue_flags, ret, 0);
Jens Axboe4840e412019-12-25 22:03:45 -07004167 return 0;
4168}
4169
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004170static int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4171{
Pavel Begunkov2d74d042021-05-14 12:05:46 +01004172 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
Pavel Begunkov3232dd02020-06-03 18:03:22 +03004173 return -EINVAL;
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004174 if (sqe->ioprio || sqe->buf_index)
4175 return -EINVAL;
Pavel Begunkov9c280f92020-04-08 08:58:46 +03004176 if (req->flags & REQ_F_FIXED_FILE)
Jens Axboecf3040c2020-02-06 21:31:40 -07004177 return -EBADF;
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004178
Bijan Mottahedeh1d9e1282020-05-22 21:31:16 -07004179 req->statx.dfd = READ_ONCE(sqe->fd);
4180 req->statx.mask = READ_ONCE(sqe->len);
Bijan Mottahedehe62753e2020-05-22 21:31:18 -07004181 req->statx.filename = u64_to_user_ptr(READ_ONCE(sqe->addr));
Bijan Mottahedeh1d9e1282020-05-22 21:31:16 -07004182 req->statx.buffer = u64_to_user_ptr(READ_ONCE(sqe->addr2));
4183 req->statx.flags = READ_ONCE(sqe->statx_flags);
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004184
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004185 return 0;
4186}
4187
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004188static int io_statx(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004189{
Bijan Mottahedeh1d9e1282020-05-22 21:31:16 -07004190 struct io_statx *ctx = &req->statx;
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004191 int ret;
4192
Pavel Begunkov59d70012021-03-22 01:58:30 +00004193 if (issue_flags & IO_URING_F_NONBLOCK)
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004194 return -EAGAIN;
4195
Bijan Mottahedehe62753e2020-05-22 21:31:18 -07004196 ret = do_statx(ctx->dfd, ctx->filename, ctx->flags, ctx->mask,
4197 ctx->buffer);
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004198
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004199 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01004200 req_set_fail(req);
Jens Axboee1e16092020-06-22 09:17:17 -06004201 io_req_complete(req, ret);
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004202 return 0;
4203}
4204
Jens Axboeb5dba592019-12-11 14:02:38 -07004205static int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4206{
Jens Axboe14587a462020-09-05 11:36:08 -06004207 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
Pavel Begunkov3232dd02020-06-03 18:03:22 +03004208 return -EINVAL;
Jens Axboeb5dba592019-12-11 14:02:38 -07004209 if (sqe->ioprio || sqe->off || sqe->addr || sqe->len ||
4210 sqe->rw_flags || sqe->buf_index)
4211 return -EINVAL;
Pavel Begunkov9c280f92020-04-08 08:58:46 +03004212 if (req->flags & REQ_F_FIXED_FILE)
Jens Axboecf3040c2020-02-06 21:31:40 -07004213 return -EBADF;
Jens Axboeb5dba592019-12-11 14:02:38 -07004214
4215 req->close.fd = READ_ONCE(sqe->fd);
Jens Axboeb5dba592019-12-11 14:02:38 -07004216 return 0;
4217}
4218
Pavel Begunkov889fca72021-02-10 00:03:09 +00004219static int io_close(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboeb5dba592019-12-11 14:02:38 -07004220{
Jens Axboe9eac1902021-01-19 15:50:37 -07004221 struct files_struct *files = current->files;
Pavel Begunkov3af73b22020-06-08 21:08:17 +03004222 struct io_close *close = &req->close;
Jens Axboe9eac1902021-01-19 15:50:37 -07004223 struct fdtable *fdt;
Pavel Begunkova1fde922021-04-11 01:46:28 +01004224 struct file *file = NULL;
4225 int ret = -EBADF;
Jens Axboeb5dba592019-12-11 14:02:38 -07004226
Jens Axboe9eac1902021-01-19 15:50:37 -07004227 spin_lock(&files->file_lock);
4228 fdt = files_fdtable(files);
4229 if (close->fd >= fdt->max_fds) {
4230 spin_unlock(&files->file_lock);
4231 goto err;
4232 }
4233 file = fdt->fd[close->fd];
Pavel Begunkova1fde922021-04-11 01:46:28 +01004234 if (!file || file->f_op == &io_uring_fops) {
Jens Axboe9eac1902021-01-19 15:50:37 -07004235 spin_unlock(&files->file_lock);
4236 file = NULL;
4237 goto err;
Pavel Begunkov3af73b22020-06-08 21:08:17 +03004238 }
Jens Axboeb5dba592019-12-11 14:02:38 -07004239
4240 /* if the file has a flush method, be safe and punt to async */
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004241 if (file->f_op->flush && (issue_flags & IO_URING_F_NONBLOCK)) {
Jens Axboe9eac1902021-01-19 15:50:37 -07004242 spin_unlock(&files->file_lock);
Pavel Begunkov0bf0eef2020-05-26 20:34:06 +03004243 return -EAGAIN;
Pavel Begunkova2100672020-03-02 23:45:16 +03004244 }
Jens Axboeb5dba592019-12-11 14:02:38 -07004245
Jens Axboe9eac1902021-01-19 15:50:37 -07004246 ret = __close_fd_get_file(close->fd, &file);
4247 spin_unlock(&files->file_lock);
4248 if (ret < 0) {
4249 if (ret == -ENOENT)
4250 ret = -EBADF;
4251 goto err;
4252 }
4253
Pavel Begunkov3af73b22020-06-08 21:08:17 +03004254 /* No ->flush() or already async, safely close from here */
Jens Axboe9eac1902021-01-19 15:50:37 -07004255 ret = filp_close(file, current->files);
4256err:
Pavel Begunkov3af73b22020-06-08 21:08:17 +03004257 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01004258 req_set_fail(req);
Jens Axboe9eac1902021-01-19 15:50:37 -07004259 if (file)
4260 fput(file);
Pavel Begunkov889fca72021-02-10 00:03:09 +00004261 __io_req_complete(req, issue_flags, ret, 0);
Jens Axboe1a417f42020-01-31 17:16:48 -07004262 return 0;
Jens Axboeb5dba592019-12-11 14:02:38 -07004263}
4264
Pavel Begunkov1155c762021-02-18 18:29:38 +00004265static int io_sfr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboe5d17b4a2019-04-09 14:56:44 -06004266{
4267 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe5d17b4a2019-04-09 14:56:44 -06004268
Jens Axboe5d17b4a2019-04-09 14:56:44 -06004269 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
4270 return -EINVAL;
4271 if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index))
4272 return -EINVAL;
4273
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004274 req->sync.off = READ_ONCE(sqe->off);
4275 req->sync.len = READ_ONCE(sqe->len);
4276 req->sync.flags = READ_ONCE(sqe->sync_range_flags);
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004277 return 0;
4278}
4279
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004280static int io_sync_file_range(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004281{
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004282 int ret;
4283
Pavel Begunkovac45abc2020-06-08 21:08:18 +03004284 /* sync_file_range always requires a blocking context */
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004285 if (issue_flags & IO_URING_F_NONBLOCK)
Pavel Begunkovac45abc2020-06-08 21:08:18 +03004286 return -EAGAIN;
4287
Jens Axboe9adbd452019-12-20 08:45:55 -07004288 ret = sync_file_range(req->file, req->sync.off, req->sync.len,
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004289 req->sync.flags);
4290 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01004291 req_set_fail(req);
Jens Axboee1e16092020-06-22 09:17:17 -06004292 io_req_complete(req, ret);
Jens Axboe5d17b4a2019-04-09 14:56:44 -06004293 return 0;
4294}
4295
YueHaibing469956e2020-03-04 15:53:52 +08004296#if defined(CONFIG_NET)
Pavel Begunkov02d27d82020-02-28 10:36:36 +03004297static int io_setup_async_msg(struct io_kiocb *req,
4298 struct io_async_msghdr *kmsg)
4299{
Jens Axboee8c2bc12020-08-15 18:44:09 -07004300 struct io_async_msghdr *async_msg = req->async_data;
4301
4302 if (async_msg)
Pavel Begunkov02d27d82020-02-28 10:36:36 +03004303 return -EAGAIN;
Jens Axboee8c2bc12020-08-15 18:44:09 -07004304 if (io_alloc_async_data(req)) {
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004305 kfree(kmsg->free_iov);
Pavel Begunkov02d27d82020-02-28 10:36:36 +03004306 return -ENOMEM;
4307 }
Jens Axboee8c2bc12020-08-15 18:44:09 -07004308 async_msg = req->async_data;
Pavel Begunkov02d27d82020-02-28 10:36:36 +03004309 req->flags |= REQ_F_NEED_CLEANUP;
Jens Axboee8c2bc12020-08-15 18:44:09 -07004310 memcpy(async_msg, kmsg, sizeof(*kmsg));
Pavel Begunkov2a780802021-02-05 00:57:58 +00004311 async_msg->msg.msg_name = &async_msg->addr;
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004312 /* if were using fast_iov, set it to the new one */
4313 if (!async_msg->free_iov)
4314 async_msg->msg.msg_iter.iov = async_msg->fast_iov;
4315
Pavel Begunkov02d27d82020-02-28 10:36:36 +03004316 return -EAGAIN;
4317}
4318
Pavel Begunkov2ae523e2020-07-12 20:41:06 +03004319static int io_sendmsg_copy_hdr(struct io_kiocb *req,
4320 struct io_async_msghdr *iomsg)
4321{
Pavel Begunkov2ae523e2020-07-12 20:41:06 +03004322 iomsg->msg.msg_name = &iomsg->addr;
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004323 iomsg->free_iov = iomsg->fast_iov;
Pavel Begunkov2ae523e2020-07-12 20:41:06 +03004324 return sendmsg_copy_msghdr(&iomsg->msg, req->sr_msg.umsg,
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004325 req->sr_msg.msg_flags, &iomsg->free_iov);
Pavel Begunkov2ae523e2020-07-12 20:41:06 +03004326}
4327
Pavel Begunkov93642ef2021-02-18 18:29:44 +00004328static int io_sendmsg_prep_async(struct io_kiocb *req)
4329{
4330 int ret;
4331
Pavel Begunkov93642ef2021-02-18 18:29:44 +00004332 ret = io_sendmsg_copy_hdr(req, req->async_data);
4333 if (!ret)
4334 req->flags |= REQ_F_NEED_CLEANUP;
4335 return ret;
4336}
4337
Jens Axboe3529d8c2019-12-19 18:24:38 -07004338static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboeaa1fa282019-04-19 13:38:09 -06004339{
Jens Axboee47293f2019-12-20 08:58:21 -07004340 struct io_sr_msg *sr = &req->sr_msg;
Jens Axboe03b12302019-12-02 18:50:25 -07004341
Pavel Begunkovd2b6f482020-06-03 18:03:25 +03004342 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4343 return -EINVAL;
4344
Pavel Begunkov270a5942020-07-12 20:41:04 +03004345 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
Jens Axboefddafac2020-01-04 20:19:44 -07004346 sr->len = READ_ONCE(sqe->len);
Pavel Begunkov04411802021-04-01 15:44:00 +01004347 sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
4348 if (sr->msg_flags & MSG_DONTWAIT)
4349 req->flags |= REQ_F_NOWAIT;
Jens Axboe3529d8c2019-12-19 18:24:38 -07004350
Jens Axboed8768362020-02-27 14:17:49 -07004351#ifdef CONFIG_COMPAT
4352 if (req->ctx->compat)
4353 sr->msg_flags |= MSG_CMSG_COMPAT;
4354#endif
Pavel Begunkov93642ef2021-02-18 18:29:44 +00004355 return 0;
Jens Axboe03b12302019-12-02 18:50:25 -07004356}
4357
Pavel Begunkov889fca72021-02-10 00:03:09 +00004358static int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe03b12302019-12-02 18:50:25 -07004359{
Pavel Begunkov6b754c82020-07-16 23:28:00 +03004360 struct io_async_msghdr iomsg, *kmsg;
Jens Axboe03b12302019-12-02 18:50:25 -07004361 struct socket *sock;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004362 unsigned flags;
Stefan Metzmacher00312752021-03-20 20:33:36 +01004363 int min_ret = 0;
Jens Axboe03b12302019-12-02 18:50:25 -07004364 int ret;
4365
Florent Revestdba4a922020-12-04 12:36:04 +01004366 sock = sock_from_file(req->file);
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004367 if (unlikely(!sock))
Florent Revestdba4a922020-12-04 12:36:04 +01004368 return -ENOTSOCK;
Jens Axboe03b12302019-12-02 18:50:25 -07004369
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004370 kmsg = req->async_data;
4371 if (!kmsg) {
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004372 ret = io_sendmsg_copy_hdr(req, &iomsg);
Jens Axboefddafac2020-01-04 20:19:44 -07004373 if (ret)
4374 return ret;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004375 kmsg = &iomsg;
Jens Axboefddafac2020-01-04 20:19:44 -07004376 }
4377
Pavel Begunkov04411802021-04-01 15:44:00 +01004378 flags = req->sr_msg.msg_flags;
4379 if (issue_flags & IO_URING_F_NONBLOCK)
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004380 flags |= MSG_DONTWAIT;
Stefan Metzmacher00312752021-03-20 20:33:36 +01004381 if (flags & MSG_WAITALL)
4382 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
4383
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004384 ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004385 if ((issue_flags & IO_URING_F_NONBLOCK) && ret == -EAGAIN)
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004386 return io_setup_async_msg(req, kmsg);
4387 if (ret == -ERESTARTSYS)
4388 ret = -EINTR;
4389
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004390 /* fast path, check for non-NULL to avoid function call */
4391 if (kmsg->free_iov)
4392 kfree(kmsg->free_iov);
Jens Axboe03b12302019-12-02 18:50:25 -07004393 req->flags &= ~REQ_F_NEED_CLEANUP;
Stefan Metzmacher00312752021-03-20 20:33:36 +01004394 if (ret < min_ret)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01004395 req_set_fail(req);
Pavel Begunkov889fca72021-02-10 00:03:09 +00004396 __io_req_complete(req, issue_flags, ret, 0);
Jens Axboefddafac2020-01-04 20:19:44 -07004397 return 0;
Jens Axboefddafac2020-01-04 20:19:44 -07004398}
4399
Pavel Begunkov889fca72021-02-10 00:03:09 +00004400static int io_send(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe03b12302019-12-02 18:50:25 -07004401{
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004402 struct io_sr_msg *sr = &req->sr_msg;
4403 struct msghdr msg;
4404 struct iovec iov;
Jens Axboe03b12302019-12-02 18:50:25 -07004405 struct socket *sock;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004406 unsigned flags;
Stefan Metzmacher00312752021-03-20 20:33:36 +01004407 int min_ret = 0;
Jens Axboe03b12302019-12-02 18:50:25 -07004408 int ret;
4409
Florent Revestdba4a922020-12-04 12:36:04 +01004410 sock = sock_from_file(req->file);
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004411 if (unlikely(!sock))
Florent Revestdba4a922020-12-04 12:36:04 +01004412 return -ENOTSOCK;
Jens Axboe03b12302019-12-02 18:50:25 -07004413
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004414 ret = import_single_range(WRITE, sr->buf, sr->len, &iov, &msg.msg_iter);
4415 if (unlikely(ret))
Zheng Bin14db8412020-09-09 20:12:37 +08004416 return ret;
Jens Axboe03b12302019-12-02 18:50:25 -07004417
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004418 msg.msg_name = NULL;
4419 msg.msg_control = NULL;
4420 msg.msg_controllen = 0;
4421 msg.msg_namelen = 0;
Jens Axboe03b12302019-12-02 18:50:25 -07004422
Pavel Begunkov04411802021-04-01 15:44:00 +01004423 flags = req->sr_msg.msg_flags;
4424 if (issue_flags & IO_URING_F_NONBLOCK)
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004425 flags |= MSG_DONTWAIT;
Stefan Metzmacher00312752021-03-20 20:33:36 +01004426 if (flags & MSG_WAITALL)
4427 min_ret = iov_iter_count(&msg.msg_iter);
4428
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004429 msg.msg_flags = flags;
4430 ret = sock_sendmsg(sock, &msg);
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004431 if ((issue_flags & IO_URING_F_NONBLOCK) && ret == -EAGAIN)
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004432 return -EAGAIN;
4433 if (ret == -ERESTARTSYS)
4434 ret = -EINTR;
Jens Axboe03b12302019-12-02 18:50:25 -07004435
Stefan Metzmacher00312752021-03-20 20:33:36 +01004436 if (ret < min_ret)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01004437 req_set_fail(req);
Pavel Begunkov889fca72021-02-10 00:03:09 +00004438 __io_req_complete(req, issue_flags, ret, 0);
Jens Axboe03b12302019-12-02 18:50:25 -07004439 return 0;
Jens Axboe03b12302019-12-02 18:50:25 -07004440}
4441
Pavel Begunkov1400e692020-07-12 20:41:05 +03004442static int __io_recvmsg_copy_hdr(struct io_kiocb *req,
4443 struct io_async_msghdr *iomsg)
Jens Axboe52de1fe2020-02-27 10:15:42 -07004444{
4445 struct io_sr_msg *sr = &req->sr_msg;
4446 struct iovec __user *uiov;
4447 size_t iov_len;
4448 int ret;
4449
Pavel Begunkov1400e692020-07-12 20:41:05 +03004450 ret = __copy_msghdr_from_user(&iomsg->msg, sr->umsg,
4451 &iomsg->uaddr, &uiov, &iov_len);
Jens Axboe52de1fe2020-02-27 10:15:42 -07004452 if (ret)
4453 return ret;
4454
4455 if (req->flags & REQ_F_BUFFER_SELECT) {
4456 if (iov_len > 1)
4457 return -EINVAL;
Pavel Begunkov5476dfe2021-02-05 00:57:59 +00004458 if (copy_from_user(iomsg->fast_iov, uiov, sizeof(*uiov)))
Jens Axboe52de1fe2020-02-27 10:15:42 -07004459 return -EFAULT;
Pavel Begunkov5476dfe2021-02-05 00:57:59 +00004460 sr->len = iomsg->fast_iov[0].iov_len;
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004461 iomsg->free_iov = NULL;
Jens Axboe52de1fe2020-02-27 10:15:42 -07004462 } else {
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004463 iomsg->free_iov = iomsg->fast_iov;
Christoph Hellwig89cd35c2020-09-25 06:51:41 +02004464 ret = __import_iovec(READ, uiov, iov_len, UIO_FASTIOV,
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004465 &iomsg->free_iov, &iomsg->msg.msg_iter,
Christoph Hellwig89cd35c2020-09-25 06:51:41 +02004466 false);
Jens Axboe52de1fe2020-02-27 10:15:42 -07004467 if (ret > 0)
4468 ret = 0;
4469 }
4470
4471 return ret;
4472}
4473
4474#ifdef CONFIG_COMPAT
4475static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req,
Pavel Begunkov1400e692020-07-12 20:41:05 +03004476 struct io_async_msghdr *iomsg)
Jens Axboe52de1fe2020-02-27 10:15:42 -07004477{
Jens Axboe52de1fe2020-02-27 10:15:42 -07004478 struct io_sr_msg *sr = &req->sr_msg;
4479 struct compat_iovec __user *uiov;
4480 compat_uptr_t ptr;
4481 compat_size_t len;
4482 int ret;
4483
Pavel Begunkov4af34172021-04-11 01:46:30 +01004484 ret = __get_compat_msghdr(&iomsg->msg, sr->umsg_compat, &iomsg->uaddr,
4485 &ptr, &len);
Jens Axboe52de1fe2020-02-27 10:15:42 -07004486 if (ret)
4487 return ret;
4488
4489 uiov = compat_ptr(ptr);
4490 if (req->flags & REQ_F_BUFFER_SELECT) {
4491 compat_ssize_t clen;
4492
4493 if (len > 1)
4494 return -EINVAL;
4495 if (!access_ok(uiov, sizeof(*uiov)))
4496 return -EFAULT;
4497 if (__get_user(clen, &uiov->iov_len))
4498 return -EFAULT;
4499 if (clen < 0)
4500 return -EINVAL;
Pavel Begunkov2d280bc2020-11-29 18:33:32 +00004501 sr->len = clen;
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004502 iomsg->free_iov = NULL;
Jens Axboe52de1fe2020-02-27 10:15:42 -07004503 } else {
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004504 iomsg->free_iov = iomsg->fast_iov;
Christoph Hellwig89cd35c2020-09-25 06:51:41 +02004505 ret = __import_iovec(READ, (struct iovec __user *)uiov, len,
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004506 UIO_FASTIOV, &iomsg->free_iov,
Christoph Hellwig89cd35c2020-09-25 06:51:41 +02004507 &iomsg->msg.msg_iter, true);
Jens Axboe52de1fe2020-02-27 10:15:42 -07004508 if (ret < 0)
4509 return ret;
4510 }
4511
4512 return 0;
4513}
Jens Axboe03b12302019-12-02 18:50:25 -07004514#endif
Jens Axboe52de1fe2020-02-27 10:15:42 -07004515
Pavel Begunkov1400e692020-07-12 20:41:05 +03004516static int io_recvmsg_copy_hdr(struct io_kiocb *req,
4517 struct io_async_msghdr *iomsg)
Jens Axboe52de1fe2020-02-27 10:15:42 -07004518{
Pavel Begunkov1400e692020-07-12 20:41:05 +03004519 iomsg->msg.msg_name = &iomsg->addr;
Jens Axboe52de1fe2020-02-27 10:15:42 -07004520
4521#ifdef CONFIG_COMPAT
4522 if (req->ctx->compat)
Pavel Begunkov1400e692020-07-12 20:41:05 +03004523 return __io_compat_recvmsg_copy_hdr(req, iomsg);
Jens Axboe52de1fe2020-02-27 10:15:42 -07004524#endif
4525
Pavel Begunkov1400e692020-07-12 20:41:05 +03004526 return __io_recvmsg_copy_hdr(req, iomsg);
Jens Axboe52de1fe2020-02-27 10:15:42 -07004527}
4528
Jens Axboebcda7ba2020-02-23 16:42:51 -07004529static struct io_buffer *io_recv_buffer_select(struct io_kiocb *req,
Pavel Begunkov7fbb1b52020-07-16 23:28:05 +03004530 bool needs_lock)
Jens Axboebcda7ba2020-02-23 16:42:51 -07004531{
4532 struct io_sr_msg *sr = &req->sr_msg;
4533 struct io_buffer *kbuf;
4534
Jens Axboebcda7ba2020-02-23 16:42:51 -07004535 kbuf = io_buffer_select(req, &sr->len, sr->bgid, sr->kbuf, needs_lock);
4536 if (IS_ERR(kbuf))
4537 return kbuf;
4538
4539 sr->kbuf = kbuf;
4540 req->flags |= REQ_F_BUFFER_SELECTED;
Jens Axboebcda7ba2020-02-23 16:42:51 -07004541 return kbuf;
Jens Axboe03b12302019-12-02 18:50:25 -07004542}
4543
Pavel Begunkov7fbb1b52020-07-16 23:28:05 +03004544static inline unsigned int io_put_recv_kbuf(struct io_kiocb *req)
4545{
4546 return io_put_kbuf(req, req->sr_msg.kbuf);
4547}
4548
Pavel Begunkov93642ef2021-02-18 18:29:44 +00004549static int io_recvmsg_prep_async(struct io_kiocb *req)
Jens Axboe03b12302019-12-02 18:50:25 -07004550{
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03004551 int ret;
Jens Axboe06b76d42019-12-19 14:44:26 -07004552
Pavel Begunkov93642ef2021-02-18 18:29:44 +00004553 ret = io_recvmsg_copy_hdr(req, req->async_data);
4554 if (!ret)
4555 req->flags |= REQ_F_NEED_CLEANUP;
4556 return ret;
4557}
4558
4559static int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4560{
4561 struct io_sr_msg *sr = &req->sr_msg;
4562
Pavel Begunkovd2b6f482020-06-03 18:03:25 +03004563 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4564 return -EINVAL;
4565
Pavel Begunkov270a5942020-07-12 20:41:04 +03004566 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
Jens Axboe0b7b21e2020-01-31 08:34:59 -07004567 sr->len = READ_ONCE(sqe->len);
Jens Axboebcda7ba2020-02-23 16:42:51 -07004568 sr->bgid = READ_ONCE(sqe->buf_group);
Pavel Begunkov04411802021-04-01 15:44:00 +01004569 sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
4570 if (sr->msg_flags & MSG_DONTWAIT)
4571 req->flags |= REQ_F_NOWAIT;
Jens Axboe3529d8c2019-12-19 18:24:38 -07004572
Jens Axboed8768362020-02-27 14:17:49 -07004573#ifdef CONFIG_COMPAT
4574 if (req->ctx->compat)
4575 sr->msg_flags |= MSG_CMSG_COMPAT;
4576#endif
Pavel Begunkov93642ef2021-02-18 18:29:44 +00004577 return 0;
Jens Axboe03b12302019-12-02 18:50:25 -07004578}
4579
Pavel Begunkov889fca72021-02-10 00:03:09 +00004580static int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe03b12302019-12-02 18:50:25 -07004581{
Pavel Begunkov6b754c82020-07-16 23:28:00 +03004582 struct io_async_msghdr iomsg, *kmsg;
Jens Axboe0fa03c62019-04-19 13:34:07 -06004583 struct socket *sock;
Pavel Begunkov7fbb1b52020-07-16 23:28:05 +03004584 struct io_buffer *kbuf;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004585 unsigned flags;
Stefan Metzmacher00312752021-03-20 20:33:36 +01004586 int min_ret = 0;
Jens Axboe52de1fe2020-02-27 10:15:42 -07004587 int ret, cflags = 0;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004588 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Jens Axboe0fa03c62019-04-19 13:34:07 -06004589
Florent Revestdba4a922020-12-04 12:36:04 +01004590 sock = sock_from_file(req->file);
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004591 if (unlikely(!sock))
Florent Revestdba4a922020-12-04 12:36:04 +01004592 return -ENOTSOCK;
Jens Axboe0fa03c62019-04-19 13:34:07 -06004593
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004594 kmsg = req->async_data;
4595 if (!kmsg) {
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004596 ret = io_recvmsg_copy_hdr(req, &iomsg);
4597 if (ret)
Pavel Begunkov681fda82020-07-15 22:20:45 +03004598 return ret;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004599 kmsg = &iomsg;
Jens Axboe0fa03c62019-04-19 13:34:07 -06004600 }
4601
Pavel Begunkovbc02ef32020-07-16 23:28:03 +03004602 if (req->flags & REQ_F_BUFFER_SELECT) {
Pavel Begunkov7fbb1b52020-07-16 23:28:05 +03004603 kbuf = io_recv_buffer_select(req, !force_nonblock);
Pavel Begunkovbc02ef32020-07-16 23:28:03 +03004604 if (IS_ERR(kbuf))
4605 return PTR_ERR(kbuf);
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004606 kmsg->fast_iov[0].iov_base = u64_to_user_ptr(kbuf->addr);
Pavel Begunkov5476dfe2021-02-05 00:57:59 +00004607 kmsg->fast_iov[0].iov_len = req->sr_msg.len;
4608 iov_iter_init(&kmsg->msg.msg_iter, READ, kmsg->fast_iov,
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004609 1, req->sr_msg.len);
4610 }
4611
Pavel Begunkov04411802021-04-01 15:44:00 +01004612 flags = req->sr_msg.msg_flags;
4613 if (force_nonblock)
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004614 flags |= MSG_DONTWAIT;
Stefan Metzmacher00312752021-03-20 20:33:36 +01004615 if (flags & MSG_WAITALL)
4616 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
4617
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004618 ret = __sys_recvmsg_sock(sock, &kmsg->msg, req->sr_msg.umsg,
4619 kmsg->uaddr, flags);
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03004620 if (force_nonblock && ret == -EAGAIN)
4621 return io_setup_async_msg(req, kmsg);
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004622 if (ret == -ERESTARTSYS)
4623 ret = -EINTR;
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03004624
Pavel Begunkov7fbb1b52020-07-16 23:28:05 +03004625 if (req->flags & REQ_F_BUFFER_SELECTED)
4626 cflags = io_put_recv_kbuf(req);
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004627 /* fast path, check for non-NULL to avoid function call */
4628 if (kmsg->free_iov)
4629 kfree(kmsg->free_iov);
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03004630 req->flags &= ~REQ_F_NEED_CLEANUP;
Stefan Metzmacher00312752021-03-20 20:33:36 +01004631 if (ret < min_ret || ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))))
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01004632 req_set_fail(req);
Pavel Begunkov889fca72021-02-10 00:03:09 +00004633 __io_req_complete(req, issue_flags, ret, cflags);
Jens Axboe0fa03c62019-04-19 13:34:07 -06004634 return 0;
Jens Axboe0fa03c62019-04-19 13:34:07 -06004635}
4636
Pavel Begunkov889fca72021-02-10 00:03:09 +00004637static int io_recv(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboefddafac2020-01-04 20:19:44 -07004638{
Pavel Begunkov6b754c82020-07-16 23:28:00 +03004639 struct io_buffer *kbuf;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004640 struct io_sr_msg *sr = &req->sr_msg;
4641 struct msghdr msg;
4642 void __user *buf = sr->buf;
Jens Axboefddafac2020-01-04 20:19:44 -07004643 struct socket *sock;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004644 struct iovec iov;
4645 unsigned flags;
Stefan Metzmacher00312752021-03-20 20:33:36 +01004646 int min_ret = 0;
Jens Axboebcda7ba2020-02-23 16:42:51 -07004647 int ret, cflags = 0;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004648 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Jens Axboefddafac2020-01-04 20:19:44 -07004649
Florent Revestdba4a922020-12-04 12:36:04 +01004650 sock = sock_from_file(req->file);
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004651 if (unlikely(!sock))
Florent Revestdba4a922020-12-04 12:36:04 +01004652 return -ENOTSOCK;
Jens Axboefddafac2020-01-04 20:19:44 -07004653
Pavel Begunkovbc02ef32020-07-16 23:28:03 +03004654 if (req->flags & REQ_F_BUFFER_SELECT) {
Pavel Begunkov7fbb1b52020-07-16 23:28:05 +03004655 kbuf = io_recv_buffer_select(req, !force_nonblock);
Jens Axboebcda7ba2020-02-23 16:42:51 -07004656 if (IS_ERR(kbuf))
4657 return PTR_ERR(kbuf);
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004658 buf = u64_to_user_ptr(kbuf->addr);
Jens Axboefddafac2020-01-04 20:19:44 -07004659 }
4660
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004661 ret = import_single_range(READ, buf, sr->len, &iov, &msg.msg_iter);
Pavel Begunkov14c32ee2020-07-16 23:28:01 +03004662 if (unlikely(ret))
4663 goto out_free;
Jens Axboefddafac2020-01-04 20:19:44 -07004664
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004665 msg.msg_name = NULL;
4666 msg.msg_control = NULL;
4667 msg.msg_controllen = 0;
4668 msg.msg_namelen = 0;
4669 msg.msg_iocb = NULL;
4670 msg.msg_flags = 0;
4671
Pavel Begunkov04411802021-04-01 15:44:00 +01004672 flags = req->sr_msg.msg_flags;
4673 if (force_nonblock)
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004674 flags |= MSG_DONTWAIT;
Stefan Metzmacher00312752021-03-20 20:33:36 +01004675 if (flags & MSG_WAITALL)
4676 min_ret = iov_iter_count(&msg.msg_iter);
4677
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004678 ret = sock_recvmsg(sock, &msg, flags);
4679 if (force_nonblock && ret == -EAGAIN)
4680 return -EAGAIN;
4681 if (ret == -ERESTARTSYS)
4682 ret = -EINTR;
Pavel Begunkov14c32ee2020-07-16 23:28:01 +03004683out_free:
Pavel Begunkov7fbb1b52020-07-16 23:28:05 +03004684 if (req->flags & REQ_F_BUFFER_SELECTED)
4685 cflags = io_put_recv_kbuf(req);
Stefan Metzmacher00312752021-03-20 20:33:36 +01004686 if (ret < min_ret || ((flags & MSG_WAITALL) && (msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))))
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01004687 req_set_fail(req);
Pavel Begunkov889fca72021-02-10 00:03:09 +00004688 __io_req_complete(req, issue_flags, ret, cflags);
Jens Axboefddafac2020-01-04 20:19:44 -07004689 return 0;
Jens Axboefddafac2020-01-04 20:19:44 -07004690}
4691
Jens Axboe3529d8c2019-12-19 18:24:38 -07004692static int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboe17f2fe32019-10-17 14:42:58 -06004693{
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004694 struct io_accept *accept = &req->accept;
4695
Jens Axboe14587a462020-09-05 11:36:08 -06004696 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboe17f2fe32019-10-17 14:42:58 -06004697 return -EINVAL;
Hrvoje Zeba8042d6c2019-11-25 14:40:22 -05004698 if (sqe->ioprio || sqe->len || sqe->buf_index)
Jens Axboe17f2fe32019-10-17 14:42:58 -06004699 return -EINVAL;
4700
Jens Axboed55e5f52019-12-11 16:12:15 -07004701 accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
4702 accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2));
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004703 accept->flags = READ_ONCE(sqe->accept_flags);
Jens Axboe09952e32020-03-19 20:16:56 -06004704 accept->nofile = rlimit(RLIMIT_NOFILE);
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004705 return 0;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004706}
Jens Axboe17f2fe32019-10-17 14:42:58 -06004707
Pavel Begunkov889fca72021-02-10 00:03:09 +00004708static int io_accept(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004709{
4710 struct io_accept *accept = &req->accept;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004711 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Pavel Begunkovac45abc2020-06-08 21:08:18 +03004712 unsigned int file_flags = force_nonblock ? O_NONBLOCK : 0;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004713 int ret;
4714
Jiufei Xuee697dee2020-06-10 13:41:59 +08004715 if (req->file->f_flags & O_NONBLOCK)
4716 req->flags |= REQ_F_NOWAIT;
4717
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004718 ret = __sys_accept4_file(req->file, file_flags, accept->addr,
Jens Axboe09952e32020-03-19 20:16:56 -06004719 accept->addr_len, accept->flags,
4720 accept->nofile);
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004721 if (ret == -EAGAIN && force_nonblock)
Jens Axboe17f2fe32019-10-17 14:42:58 -06004722 return -EAGAIN;
Pavel Begunkovac45abc2020-06-08 21:08:18 +03004723 if (ret < 0) {
4724 if (ret == -ERESTARTSYS)
4725 ret = -EINTR;
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01004726 req_set_fail(req);
Pavel Begunkovac45abc2020-06-08 21:08:18 +03004727 }
Pavel Begunkov889fca72021-02-10 00:03:09 +00004728 __io_req_complete(req, issue_flags, ret, 0);
Jens Axboe17f2fe32019-10-17 14:42:58 -06004729 return 0;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004730}
4731
Pavel Begunkov93642ef2021-02-18 18:29:44 +00004732static int io_connect_prep_async(struct io_kiocb *req)
4733{
4734 struct io_async_connect *io = req->async_data;
4735 struct io_connect *conn = &req->connect;
4736
4737 return move_addr_to_kernel(conn->addr, conn->addr_len, &io->address);
4738}
4739
Jens Axboe3529d8c2019-12-19 18:24:38 -07004740static int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboef499a022019-12-02 16:28:46 -07004741{
Jens Axboe3529d8c2019-12-19 18:24:38 -07004742 struct io_connect *conn = &req->connect;
Jens Axboef499a022019-12-02 16:28:46 -07004743
Jens Axboe14587a462020-09-05 11:36:08 -06004744 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboe3fbb51c2019-12-20 08:51:52 -07004745 return -EINVAL;
4746 if (sqe->ioprio || sqe->len || sqe->buf_index || sqe->rw_flags)
4747 return -EINVAL;
4748
Jens Axboe3529d8c2019-12-19 18:24:38 -07004749 conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
4750 conn->addr_len = READ_ONCE(sqe->addr2);
Pavel Begunkov93642ef2021-02-18 18:29:44 +00004751 return 0;
Jens Axboef499a022019-12-02 16:28:46 -07004752}
4753
Pavel Begunkov889fca72021-02-10 00:03:09 +00004754static int io_connect(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboef8e85cf2019-11-23 14:24:24 -07004755{
Jens Axboee8c2bc12020-08-15 18:44:09 -07004756 struct io_async_connect __io, *io;
Jens Axboef8e85cf2019-11-23 14:24:24 -07004757 unsigned file_flags;
Jens Axboe3fbb51c2019-12-20 08:51:52 -07004758 int ret;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004759 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Jens Axboef8e85cf2019-11-23 14:24:24 -07004760
Jens Axboee8c2bc12020-08-15 18:44:09 -07004761 if (req->async_data) {
4762 io = req->async_data;
Jens Axboef499a022019-12-02 16:28:46 -07004763 } else {
Jens Axboe3529d8c2019-12-19 18:24:38 -07004764 ret = move_addr_to_kernel(req->connect.addr,
4765 req->connect.addr_len,
Jens Axboee8c2bc12020-08-15 18:44:09 -07004766 &__io.address);
Jens Axboef499a022019-12-02 16:28:46 -07004767 if (ret)
4768 goto out;
4769 io = &__io;
4770 }
4771
Jens Axboe3fbb51c2019-12-20 08:51:52 -07004772 file_flags = force_nonblock ? O_NONBLOCK : 0;
4773
Jens Axboee8c2bc12020-08-15 18:44:09 -07004774 ret = __sys_connect_file(req->file, &io->address,
Jens Axboe3fbb51c2019-12-20 08:51:52 -07004775 req->connect.addr_len, file_flags);
Jens Axboe87f80d62019-12-03 11:23:54 -07004776 if ((ret == -EAGAIN || ret == -EINPROGRESS) && force_nonblock) {
Jens Axboee8c2bc12020-08-15 18:44:09 -07004777 if (req->async_data)
Jens Axboeb7bb4f72019-12-15 22:13:43 -07004778 return -EAGAIN;
Jens Axboee8c2bc12020-08-15 18:44:09 -07004779 if (io_alloc_async_data(req)) {
Jens Axboef499a022019-12-02 16:28:46 -07004780 ret = -ENOMEM;
4781 goto out;
4782 }
Jens Axboee8c2bc12020-08-15 18:44:09 -07004783 memcpy(req->async_data, &__io, sizeof(__io));
Jens Axboef8e85cf2019-11-23 14:24:24 -07004784 return -EAGAIN;
Jens Axboef499a022019-12-02 16:28:46 -07004785 }
Jens Axboef8e85cf2019-11-23 14:24:24 -07004786 if (ret == -ERESTARTSYS)
4787 ret = -EINTR;
Jens Axboef499a022019-12-02 16:28:46 -07004788out:
Jens Axboe4e88d6e2019-12-07 20:59:47 -07004789 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01004790 req_set_fail(req);
Pavel Begunkov889fca72021-02-10 00:03:09 +00004791 __io_req_complete(req, issue_flags, ret, 0);
Jens Axboef8e85cf2019-11-23 14:24:24 -07004792 return 0;
Jens Axboef8e85cf2019-11-23 14:24:24 -07004793}
YueHaibing469956e2020-03-04 15:53:52 +08004794#else /* !CONFIG_NET */
Jens Axboe99a10082021-02-19 09:35:19 -07004795#define IO_NETOP_FN(op) \
4796static int io_##op(struct io_kiocb *req, unsigned int issue_flags) \
4797{ \
4798 return -EOPNOTSUPP; \
Jens Axboef8e85cf2019-11-23 14:24:24 -07004799}
4800
Jens Axboe99a10082021-02-19 09:35:19 -07004801#define IO_NETOP_PREP(op) \
4802IO_NETOP_FN(op) \
4803static int io_##op##_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) \
4804{ \
4805 return -EOPNOTSUPP; \
4806} \
4807
4808#define IO_NETOP_PREP_ASYNC(op) \
4809IO_NETOP_PREP(op) \
4810static int io_##op##_prep_async(struct io_kiocb *req) \
4811{ \
4812 return -EOPNOTSUPP; \
YueHaibing469956e2020-03-04 15:53:52 +08004813}
4814
Jens Axboe99a10082021-02-19 09:35:19 -07004815IO_NETOP_PREP_ASYNC(sendmsg);
4816IO_NETOP_PREP_ASYNC(recvmsg);
4817IO_NETOP_PREP_ASYNC(connect);
4818IO_NETOP_PREP(accept);
4819IO_NETOP_FN(send);
4820IO_NETOP_FN(recv);
YueHaibing469956e2020-03-04 15:53:52 +08004821#endif /* CONFIG_NET */
Jens Axboe17f2fe32019-10-17 14:42:58 -06004822
Jens Axboed7718a92020-02-14 22:23:12 -07004823struct io_poll_table {
4824 struct poll_table_struct pt;
4825 struct io_kiocb *req;
4826 int error;
4827};
4828
Jens Axboed7718a92020-02-14 22:23:12 -07004829static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll,
4830 __poll_t mask, task_work_func_t func)
4831{
Jens Axboeaa96bf82020-04-03 11:26:26 -06004832 int ret;
Jens Axboed7718a92020-02-14 22:23:12 -07004833
4834 /* for instances that support it check for an event match first: */
4835 if (mask && !(mask & poll->events))
4836 return 0;
4837
4838 trace_io_uring_task_add(req->ctx, req->opcode, req->user_data, mask);
4839
4840 list_del_init(&poll->wait.entry);
4841
Jens Axboed7718a92020-02-14 22:23:12 -07004842 req->result = mask;
Jens Axboe7cbf1722021-02-10 00:03:20 +00004843 req->task_work.func = func;
Jens Axboe6d816e02020-08-11 08:04:14 -06004844
Jens Axboed7718a92020-02-14 22:23:12 -07004845 /*
Jens Axboee3aabf92020-05-18 11:04:17 -06004846 * If this fails, then the task is exiting. When a task exits, the
4847 * work gets canceled, so just cancel this request as well instead
4848 * of executing it. We can't safely execute it anyway, as we may not
4849 * have the needed state needed for it anyway.
Jens Axboed7718a92020-02-14 22:23:12 -07004850 */
Jens Axboe355fb9e2020-10-22 20:19:35 -06004851 ret = io_req_task_work_add(req);
Jens Axboeaa96bf82020-04-03 11:26:26 -06004852 if (unlikely(ret)) {
Jens Axboee3aabf92020-05-18 11:04:17 -06004853 WRITE_ONCE(poll->canceled, true);
Pavel Begunkoveab30c42021-01-19 13:32:42 +00004854 io_req_task_work_add_fallback(req, func);
Jens Axboeaa96bf82020-04-03 11:26:26 -06004855 }
Jens Axboed7718a92020-02-14 22:23:12 -07004856 return 1;
4857}
4858
Jens Axboe74ce6ce2020-04-13 11:09:12 -06004859static bool io_poll_rewait(struct io_kiocb *req, struct io_poll_iocb *poll)
4860 __acquires(&req->ctx->completion_lock)
4861{
4862 struct io_ring_ctx *ctx = req->ctx;
4863
4864 if (!req->result && !READ_ONCE(poll->canceled)) {
4865 struct poll_table_struct pt = { ._key = poll->events };
4866
4867 req->result = vfs_poll(req->file, &pt) & poll->events;
4868 }
4869
4870 spin_lock_irq(&ctx->completion_lock);
4871 if (!req->result && !READ_ONCE(poll->canceled)) {
4872 add_wait_queue(poll->head, &poll->wait);
4873 return true;
4874 }
4875
4876 return false;
4877}
4878
Jens Axboed4e7cd32020-08-15 11:44:50 -07004879static struct io_poll_iocb *io_poll_get_double(struct io_kiocb *req)
Jens Axboe18bceab2020-05-15 11:56:54 -06004880{
Jens Axboee8c2bc12020-08-15 18:44:09 -07004881 /* pure poll stashes this in ->async_data, poll driven retry elsewhere */
Jens Axboed4e7cd32020-08-15 11:44:50 -07004882 if (req->opcode == IORING_OP_POLL_ADD)
Jens Axboee8c2bc12020-08-15 18:44:09 -07004883 return req->async_data;
Jens Axboed4e7cd32020-08-15 11:44:50 -07004884 return req->apoll->double_poll;
4885}
4886
4887static struct io_poll_iocb *io_poll_get_single(struct io_kiocb *req)
4888{
4889 if (req->opcode == IORING_OP_POLL_ADD)
4890 return &req->poll;
4891 return &req->apoll->poll;
4892}
4893
4894static void io_poll_remove_double(struct io_kiocb *req)
Pavel Begunkove07785b2021-04-01 15:43:57 +01004895 __must_hold(&req->ctx->completion_lock)
Jens Axboed4e7cd32020-08-15 11:44:50 -07004896{
4897 struct io_poll_iocb *poll = io_poll_get_double(req);
Jens Axboe18bceab2020-05-15 11:56:54 -06004898
4899 lockdep_assert_held(&req->ctx->completion_lock);
4900
4901 if (poll && poll->head) {
4902 struct wait_queue_head *head = poll->head;
4903
4904 spin_lock(&head->lock);
4905 list_del_init(&poll->wait.entry);
4906 if (poll->wait.private)
Jens Axboede9b4cc2021-02-24 13:28:27 -07004907 req_ref_put(req);
Jens Axboe18bceab2020-05-15 11:56:54 -06004908 poll->head = NULL;
4909 spin_unlock(&head->lock);
4910 }
4911}
4912
Pavel Begunkove27414b2021-04-09 09:13:20 +01004913static bool io_poll_complete(struct io_kiocb *req, __poll_t mask)
Pavel Begunkove07785b2021-04-01 15:43:57 +01004914 __must_hold(&req->ctx->completion_lock)
Jens Axboe18bceab2020-05-15 11:56:54 -06004915{
4916 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe88e41cf2021-02-22 22:08:01 -07004917 unsigned flags = IORING_CQE_F_MORE;
Pavel Begunkove27414b2021-04-09 09:13:20 +01004918 int error;
Jens Axboe18bceab2020-05-15 11:56:54 -06004919
Pavel Begunkove27414b2021-04-09 09:13:20 +01004920 if (READ_ONCE(req->poll.canceled)) {
Jens Axboe45ab03b2021-02-23 08:19:33 -07004921 error = -ECANCELED;
Jens Axboe88e41cf2021-02-22 22:08:01 -07004922 req->poll.events |= EPOLLONESHOT;
Pavel Begunkove27414b2021-04-09 09:13:20 +01004923 } else {
Jens Axboe50826202021-02-23 09:02:26 -07004924 error = mangle_poll(mask);
Pavel Begunkove27414b2021-04-09 09:13:20 +01004925 }
Jens Axboeb69de282021-03-17 08:37:41 -06004926 if (req->poll.events & EPOLLONESHOT)
4927 flags = 0;
Pavel Begunkovd4d19c12021-04-25 14:32:17 +01004928 if (!io_cqring_fill_event(ctx, req->user_data, error, flags)) {
Jens Axboe50826202021-02-23 09:02:26 -07004929 io_poll_remove_waitqs(req);
Jens Axboe88e41cf2021-02-22 22:08:01 -07004930 req->poll.done = true;
4931 flags = 0;
4932 }
Hao Xu7b289c32021-04-13 15:20:39 +08004933 if (flags & IORING_CQE_F_MORE)
4934 ctx->cq_extra++;
4935
Jens Axboe18bceab2020-05-15 11:56:54 -06004936 io_commit_cqring(ctx);
Jens Axboe88e41cf2021-02-22 22:08:01 -07004937 return !(flags & IORING_CQE_F_MORE);
Jens Axboe18bceab2020-05-15 11:56:54 -06004938}
4939
Jens Axboe18bceab2020-05-15 11:56:54 -06004940static void io_poll_task_func(struct callback_head *cb)
4941{
4942 struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
Jens Axboe6d816e02020-08-11 08:04:14 -06004943 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkovdd221f462020-10-18 10:17:42 +01004944 struct io_kiocb *nxt;
Jens Axboe18bceab2020-05-15 11:56:54 -06004945
Pavel Begunkovdd221f462020-10-18 10:17:42 +01004946 if (io_poll_rewait(req, &req->poll)) {
4947 spin_unlock_irq(&ctx->completion_lock);
4948 } else {
Pavel Begunkovf40b9642021-04-09 09:13:19 +01004949 bool done;
Jens Axboe88e41cf2021-02-22 22:08:01 -07004950
Pavel Begunkove27414b2021-04-09 09:13:20 +01004951 done = io_poll_complete(req, req->result);
Jens Axboe88e41cf2021-02-22 22:08:01 -07004952 if (done) {
4953 hash_del(&req->hash_node);
Pavel Begunkovf40b9642021-04-09 09:13:19 +01004954 } else {
Jens Axboe88e41cf2021-02-22 22:08:01 -07004955 req->result = 0;
4956 add_wait_queue(req->poll.head, &req->poll.wait);
4957 }
Pavel Begunkovdd221f462020-10-18 10:17:42 +01004958 spin_unlock_irq(&ctx->completion_lock);
Pavel Begunkovdd221f462020-10-18 10:17:42 +01004959 io_cqring_ev_posted(ctx);
Pavel Begunkovdd221f462020-10-18 10:17:42 +01004960
Jens Axboe88e41cf2021-02-22 22:08:01 -07004961 if (done) {
4962 nxt = io_put_req_find_next(req);
4963 if (nxt)
4964 __io_req_task_submit(nxt);
4965 }
Pavel Begunkovea1164e2020-06-30 15:20:41 +03004966 }
Jens Axboe18bceab2020-05-15 11:56:54 -06004967}
4968
4969static int io_poll_double_wake(struct wait_queue_entry *wait, unsigned mode,
4970 int sync, void *key)
4971{
4972 struct io_kiocb *req = wait->private;
Jens Axboed4e7cd32020-08-15 11:44:50 -07004973 struct io_poll_iocb *poll = io_poll_get_single(req);
Jens Axboe18bceab2020-05-15 11:56:54 -06004974 __poll_t mask = key_to_poll(key);
4975
4976 /* for instances that support it check for an event match first: */
4977 if (mask && !(mask & poll->events))
4978 return 0;
Jens Axboe88e41cf2021-02-22 22:08:01 -07004979 if (!(poll->events & EPOLLONESHOT))
4980 return poll->wait.func(&poll->wait, mode, sync, key);
Jens Axboe18bceab2020-05-15 11:56:54 -06004981
Jens Axboe8706e042020-09-28 08:38:54 -06004982 list_del_init(&wait->entry);
4983
Jens Axboe807abcb2020-07-17 17:09:27 -06004984 if (poll && poll->head) {
Jens Axboe18bceab2020-05-15 11:56:54 -06004985 bool done;
4986
Jens Axboe807abcb2020-07-17 17:09:27 -06004987 spin_lock(&poll->head->lock);
4988 done = list_empty(&poll->wait.entry);
Jens Axboe18bceab2020-05-15 11:56:54 -06004989 if (!done)
Jens Axboe807abcb2020-07-17 17:09:27 -06004990 list_del_init(&poll->wait.entry);
Jens Axboed4e7cd32020-08-15 11:44:50 -07004991 /* make sure double remove sees this as being gone */
4992 wait->private = NULL;
Jens Axboe807abcb2020-07-17 17:09:27 -06004993 spin_unlock(&poll->head->lock);
Jens Axboec8b5e262020-10-25 13:53:26 -06004994 if (!done) {
4995 /* use wait func handler, so it matches the rq type */
4996 poll->wait.func(&poll->wait, mode, sync, key);
4997 }
Jens Axboe18bceab2020-05-15 11:56:54 -06004998 }
Jens Axboede9b4cc2021-02-24 13:28:27 -07004999 req_ref_put(req);
Jens Axboe18bceab2020-05-15 11:56:54 -06005000 return 1;
5001}
5002
5003static void io_init_poll_iocb(struct io_poll_iocb *poll, __poll_t events,
5004 wait_queue_func_t wake_func)
5005{
5006 poll->head = NULL;
5007 poll->done = false;
5008 poll->canceled = false;
Jens Axboe464dca62021-03-19 14:06:24 -06005009#define IO_POLL_UNMASK (EPOLLERR|EPOLLHUP|EPOLLNVAL|EPOLLRDHUP)
5010 /* mask in events that we always want/need */
5011 poll->events = events | IO_POLL_UNMASK;
Jens Axboe18bceab2020-05-15 11:56:54 -06005012 INIT_LIST_HEAD(&poll->wait.entry);
5013 init_waitqueue_func_entry(&poll->wait, wake_func);
5014}
5015
5016static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt,
Jens Axboe807abcb2020-07-17 17:09:27 -06005017 struct wait_queue_head *head,
5018 struct io_poll_iocb **poll_ptr)
Jens Axboe18bceab2020-05-15 11:56:54 -06005019{
5020 struct io_kiocb *req = pt->req;
5021
5022 /*
5023 * If poll->head is already set, it's because the file being polled
5024 * uses multiple waitqueues for poll handling (eg one for read, one
5025 * for write). Setup a separate io_poll_iocb if this happens.
5026 */
5027 if (unlikely(poll->head)) {
Pavel Begunkov58852d42020-10-16 20:55:56 +01005028 struct io_poll_iocb *poll_one = poll;
5029
Jens Axboe18bceab2020-05-15 11:56:54 -06005030 /* already have a 2nd entry, fail a third attempt */
Jens Axboe807abcb2020-07-17 17:09:27 -06005031 if (*poll_ptr) {
Jens Axboe18bceab2020-05-15 11:56:54 -06005032 pt->error = -EINVAL;
5033 return;
5034 }
Jens Axboeea6a693d2021-04-15 09:47:13 -06005035 /*
5036 * Can't handle multishot for double wait for now, turn it
5037 * into one-shot mode.
5038 */
Pavel Begunkov7a274722021-05-17 12:43:34 +01005039 if (!(poll_one->events & EPOLLONESHOT))
5040 poll_one->events |= EPOLLONESHOT;
Jens Axboe1c3b3e62021-02-28 16:07:30 -07005041 /* double add on the same waitqueue head, ignore */
Pavel Begunkov7a274722021-05-17 12:43:34 +01005042 if (poll_one->head == head)
Jens Axboe1c3b3e62021-02-28 16:07:30 -07005043 return;
Jens Axboe18bceab2020-05-15 11:56:54 -06005044 poll = kmalloc(sizeof(*poll), GFP_ATOMIC);
5045 if (!poll) {
5046 pt->error = -ENOMEM;
5047 return;
5048 }
Pavel Begunkov58852d42020-10-16 20:55:56 +01005049 io_init_poll_iocb(poll, poll_one->events, io_poll_double_wake);
Jens Axboede9b4cc2021-02-24 13:28:27 -07005050 req_ref_get(req);
Jens Axboe18bceab2020-05-15 11:56:54 -06005051 poll->wait.private = req;
Jens Axboe807abcb2020-07-17 17:09:27 -06005052 *poll_ptr = poll;
Jens Axboe18bceab2020-05-15 11:56:54 -06005053 }
5054
5055 pt->error = 0;
5056 poll->head = head;
Jiufei Xuea31eb4a2020-06-17 17:53:56 +08005057
5058 if (poll->events & EPOLLEXCLUSIVE)
5059 add_wait_queue_exclusive(head, &poll->wait);
5060 else
5061 add_wait_queue(head, &poll->wait);
Jens Axboe18bceab2020-05-15 11:56:54 -06005062}
5063
5064static void io_async_queue_proc(struct file *file, struct wait_queue_head *head,
5065 struct poll_table_struct *p)
5066{
5067 struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
Jens Axboe807abcb2020-07-17 17:09:27 -06005068 struct async_poll *apoll = pt->req->apoll;
Jens Axboe18bceab2020-05-15 11:56:54 -06005069
Jens Axboe807abcb2020-07-17 17:09:27 -06005070 __io_queue_proc(&apoll->poll, pt, head, &apoll->double_poll);
Jens Axboe18bceab2020-05-15 11:56:54 -06005071}
5072
Jens Axboed7718a92020-02-14 22:23:12 -07005073static void io_async_task_func(struct callback_head *cb)
5074{
5075 struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
5076 struct async_poll *apoll = req->apoll;
5077 struct io_ring_ctx *ctx = req->ctx;
5078
5079 trace_io_uring_task_run(req->ctx, req->opcode, req->user_data);
5080
Jens Axboe74ce6ce2020-04-13 11:09:12 -06005081 if (io_poll_rewait(req, &apoll->poll)) {
Jens Axboed7718a92020-02-14 22:23:12 -07005082 spin_unlock_irq(&ctx->completion_lock);
Jens Axboe74ce6ce2020-04-13 11:09:12 -06005083 return;
Jens Axboed7718a92020-02-14 22:23:12 -07005084 }
5085
Pavel Begunkov0ea13b42021-04-09 09:13:21 +01005086 hash_del(&req->hash_node);
Jens Axboed4e7cd32020-08-15 11:44:50 -07005087 io_poll_remove_double(req);
Jens Axboe74ce6ce2020-04-13 11:09:12 -06005088 spin_unlock_irq(&ctx->completion_lock);
5089
Pavel Begunkov0be0b0e2020-06-30 15:20:42 +03005090 if (!READ_ONCE(apoll->poll.canceled))
5091 __io_req_task_submit(req);
5092 else
Pavel Begunkov25935532021-03-19 17:22:40 +00005093 io_req_complete_failed(req, -ECANCELED);
Jens Axboed7718a92020-02-14 22:23:12 -07005094}
5095
5096static int io_async_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
5097 void *key)
5098{
5099 struct io_kiocb *req = wait->private;
5100 struct io_poll_iocb *poll = &req->apoll->poll;
5101
5102 trace_io_uring_poll_wake(req->ctx, req->opcode, req->user_data,
5103 key_to_poll(key));
5104
5105 return __io_async_wake(req, poll, key_to_poll(key), io_async_task_func);
5106}
5107
5108static void io_poll_req_insert(struct io_kiocb *req)
5109{
5110 struct io_ring_ctx *ctx = req->ctx;
5111 struct hlist_head *list;
5112
5113 list = &ctx->cancel_hash[hash_long(req->user_data, ctx->cancel_hash_bits)];
5114 hlist_add_head(&req->hash_node, list);
5115}
5116
5117static __poll_t __io_arm_poll_handler(struct io_kiocb *req,
5118 struct io_poll_iocb *poll,
5119 struct io_poll_table *ipt, __poll_t mask,
5120 wait_queue_func_t wake_func)
5121 __acquires(&ctx->completion_lock)
5122{
5123 struct io_ring_ctx *ctx = req->ctx;
5124 bool cancel = false;
5125
Pavel Begunkov4d52f332020-10-18 10:17:43 +01005126 INIT_HLIST_NODE(&req->hash_node);
Jens Axboe18bceab2020-05-15 11:56:54 -06005127 io_init_poll_iocb(poll, mask, wake_func);
Pavel Begunkovb90cd192020-06-21 13:09:52 +03005128 poll->file = req->file;
Jens Axboe18bceab2020-05-15 11:56:54 -06005129 poll->wait.private = req;
Jens Axboed7718a92020-02-14 22:23:12 -07005130
5131 ipt->pt._key = mask;
5132 ipt->req = req;
5133 ipt->error = -EINVAL;
5134
Jens Axboed7718a92020-02-14 22:23:12 -07005135 mask = vfs_poll(req->file, &ipt->pt) & poll->events;
5136
5137 spin_lock_irq(&ctx->completion_lock);
5138 if (likely(poll->head)) {
5139 spin_lock(&poll->head->lock);
5140 if (unlikely(list_empty(&poll->wait.entry))) {
5141 if (ipt->error)
5142 cancel = true;
5143 ipt->error = 0;
5144 mask = 0;
5145 }
Jens Axboe88e41cf2021-02-22 22:08:01 -07005146 if ((mask && (poll->events & EPOLLONESHOT)) || ipt->error)
Jens Axboed7718a92020-02-14 22:23:12 -07005147 list_del_init(&poll->wait.entry);
5148 else if (cancel)
5149 WRITE_ONCE(poll->canceled, true);
5150 else if (!poll->done) /* actually waiting for an event */
5151 io_poll_req_insert(req);
5152 spin_unlock(&poll->head->lock);
5153 }
5154
5155 return mask;
5156}
5157
5158static bool io_arm_poll_handler(struct io_kiocb *req)
5159{
5160 const struct io_op_def *def = &io_op_defs[req->opcode];
5161 struct io_ring_ctx *ctx = req->ctx;
5162 struct async_poll *apoll;
5163 struct io_poll_table ipt;
5164 __poll_t mask, ret;
Jens Axboe9dab14b2020-08-25 12:27:50 -06005165 int rw;
Jens Axboed7718a92020-02-14 22:23:12 -07005166
5167 if (!req->file || !file_can_poll(req->file))
5168 return false;
Pavel Begunkov24c74672020-06-21 13:09:51 +03005169 if (req->flags & REQ_F_POLLED)
Jens Axboed7718a92020-02-14 22:23:12 -07005170 return false;
Jens Axboe9dab14b2020-08-25 12:27:50 -06005171 if (def->pollin)
5172 rw = READ;
5173 else if (def->pollout)
5174 rw = WRITE;
5175 else
5176 return false;
5177 /* if we can't nonblock try, then no point in arming a poll handler */
Jens Axboe7b29f922021-03-12 08:30:14 -07005178 if (!io_file_supports_async(req, rw))
Jens Axboed7718a92020-02-14 22:23:12 -07005179 return false;
5180
5181 apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC);
5182 if (unlikely(!apoll))
5183 return false;
Jens Axboe807abcb2020-07-17 17:09:27 -06005184 apoll->double_poll = NULL;
Jens Axboed7718a92020-02-14 22:23:12 -07005185
5186 req->flags |= REQ_F_POLLED;
Jens Axboed7718a92020-02-14 22:23:12 -07005187 req->apoll = apoll;
Jens Axboed7718a92020-02-14 22:23:12 -07005188
Jens Axboe88e41cf2021-02-22 22:08:01 -07005189 mask = EPOLLONESHOT;
Jens Axboed7718a92020-02-14 22:23:12 -07005190 if (def->pollin)
Nathan Chancellor8755d972020-03-02 16:01:19 -07005191 mask |= POLLIN | POLLRDNORM;
Jens Axboed7718a92020-02-14 22:23:12 -07005192 if (def->pollout)
5193 mask |= POLLOUT | POLLWRNORM;
Luke Hsiao901341b2020-08-21 21:41:05 -07005194
5195 /* If reading from MSG_ERRQUEUE using recvmsg, ignore POLLIN */
5196 if ((req->opcode == IORING_OP_RECVMSG) &&
5197 (req->sr_msg.msg_flags & MSG_ERRQUEUE))
5198 mask &= ~POLLIN;
5199
Jens Axboed7718a92020-02-14 22:23:12 -07005200 mask |= POLLERR | POLLPRI;
5201
5202 ipt.pt._qproc = io_async_queue_proc;
5203
5204 ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask,
5205 io_async_wake);
Jens Axboea36da652020-08-11 09:50:19 -06005206 if (ret || ipt.error) {
Jens Axboed4e7cd32020-08-15 11:44:50 -07005207 io_poll_remove_double(req);
Jens Axboed7718a92020-02-14 22:23:12 -07005208 spin_unlock_irq(&ctx->completion_lock);
Jens Axboed7718a92020-02-14 22:23:12 -07005209 return false;
5210 }
5211 spin_unlock_irq(&ctx->completion_lock);
5212 trace_io_uring_poll_arm(ctx, req->opcode, req->user_data, mask,
5213 apoll->poll.events);
5214 return true;
5215}
5216
5217static bool __io_poll_remove_one(struct io_kiocb *req,
Jens Axboeb2e720a2021-03-31 09:03:03 -06005218 struct io_poll_iocb *poll, bool do_cancel)
Pavel Begunkove07785b2021-04-01 15:43:57 +01005219 __must_hold(&req->ctx->completion_lock)
Jens Axboed7718a92020-02-14 22:23:12 -07005220{
Jens Axboeb41e9852020-02-17 09:52:41 -07005221 bool do_complete = false;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005222
Jens Axboe50826202021-02-23 09:02:26 -07005223 if (!poll->head)
5224 return false;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005225 spin_lock(&poll->head->lock);
Jens Axboeb2e720a2021-03-31 09:03:03 -06005226 if (do_cancel)
5227 WRITE_ONCE(poll->canceled, true);
Jens Axboe392edb42019-12-09 17:52:20 -07005228 if (!list_empty(&poll->wait.entry)) {
5229 list_del_init(&poll->wait.entry);
Jens Axboeb41e9852020-02-17 09:52:41 -07005230 do_complete = true;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005231 }
5232 spin_unlock(&poll->head->lock);
Jens Axboe3bfa5bc2020-05-17 13:54:12 -06005233 hash_del(&req->hash_node);
Jens Axboed7718a92020-02-14 22:23:12 -07005234 return do_complete;
5235}
5236
Jens Axboeb2c3f7e2021-02-23 08:58:04 -07005237static bool io_poll_remove_waitqs(struct io_kiocb *req)
Pavel Begunkove07785b2021-04-01 15:43:57 +01005238 __must_hold(&req->ctx->completion_lock)
Jens Axboed7718a92020-02-14 22:23:12 -07005239{
5240 bool do_complete;
5241
Jens Axboed4e7cd32020-08-15 11:44:50 -07005242 io_poll_remove_double(req);
Pavel Begunkove31001a2021-04-13 02:58:43 +01005243 do_complete = __io_poll_remove_one(req, io_poll_get_single(req), true);
Jens Axboed4e7cd32020-08-15 11:44:50 -07005244
Pavel Begunkove31001a2021-04-13 02:58:43 +01005245 if (req->opcode != IORING_OP_POLL_ADD && do_complete) {
Jens Axboed7718a92020-02-14 22:23:12 -07005246 /* non-poll requests have submit ref still */
Pavel Begunkove31001a2021-04-13 02:58:43 +01005247 req_ref_put(req);
Xiaoguang Wangb1f573b2020-04-12 14:50:54 +08005248 }
Jens Axboeb2c3f7e2021-02-23 08:58:04 -07005249 return do_complete;
5250}
Xiaoguang Wangb1f573b2020-04-12 14:50:54 +08005251
Jens Axboeb2c3f7e2021-02-23 08:58:04 -07005252static bool io_poll_remove_one(struct io_kiocb *req)
Pavel Begunkove07785b2021-04-01 15:43:57 +01005253 __must_hold(&req->ctx->completion_lock)
Jens Axboeb2c3f7e2021-02-23 08:58:04 -07005254{
5255 bool do_complete;
5256
5257 do_complete = io_poll_remove_waitqs(req);
Jens Axboeb41e9852020-02-17 09:52:41 -07005258 if (do_complete) {
Pavel Begunkovd4d19c12021-04-25 14:32:17 +01005259 io_cqring_fill_event(req->ctx, req->user_data, -ECANCELED, 0);
Jens Axboeb41e9852020-02-17 09:52:41 -07005260 io_commit_cqring(req->ctx);
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01005261 req_set_fail(req);
Pavel Begunkov216578e2020-10-13 09:44:00 +01005262 io_put_req_deferred(req, 1);
Jens Axboeb41e9852020-02-17 09:52:41 -07005263 }
5264
5265 return do_complete;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005266}
5267
Jens Axboe76e1b642020-09-26 15:05:03 -06005268/*
5269 * Returns true if we found and killed one or more poll requests
5270 */
Pavel Begunkov6b819282020-11-06 13:00:25 +00005271static bool io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk,
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01005272 bool cancel_all)
Jens Axboe221c5eb2019-01-17 09:41:58 -07005273{
Jens Axboe78076bb2019-12-04 19:56:40 -07005274 struct hlist_node *tmp;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005275 struct io_kiocb *req;
Jens Axboe8e2e1fa2020-04-13 17:05:14 -06005276 int posted = 0, i;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005277
5278 spin_lock_irq(&ctx->completion_lock);
Jens Axboe78076bb2019-12-04 19:56:40 -07005279 for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
5280 struct hlist_head *list;
5281
5282 list = &ctx->cancel_hash[i];
Jens Axboef3606e32020-09-22 08:18:24 -06005283 hlist_for_each_entry_safe(req, tmp, list, hash_node) {
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01005284 if (io_match_task(req, tsk, cancel_all))
Jens Axboef3606e32020-09-22 08:18:24 -06005285 posted += io_poll_remove_one(req);
5286 }
Jens Axboe221c5eb2019-01-17 09:41:58 -07005287 }
5288 spin_unlock_irq(&ctx->completion_lock);
Jens Axboeb41e9852020-02-17 09:52:41 -07005289
Jens Axboe8e2e1fa2020-04-13 17:05:14 -06005290 if (posted)
5291 io_cqring_ev_posted(ctx);
Jens Axboe76e1b642020-09-26 15:05:03 -06005292
5293 return posted != 0;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005294}
5295
Pavel Begunkov9ba5fac2021-04-14 13:38:35 +01005296static struct io_kiocb *io_poll_find(struct io_ring_ctx *ctx, __u64 sqe_addr,
5297 bool poll_only)
Pavel Begunkove07785b2021-04-01 15:43:57 +01005298 __must_hold(&ctx->completion_lock)
Jens Axboe47f46762019-11-09 17:43:02 -07005299{
Jens Axboe78076bb2019-12-04 19:56:40 -07005300 struct hlist_head *list;
Jens Axboe47f46762019-11-09 17:43:02 -07005301 struct io_kiocb *req;
5302
Jens Axboe78076bb2019-12-04 19:56:40 -07005303 list = &ctx->cancel_hash[hash_long(sqe_addr, ctx->cancel_hash_bits)];
5304 hlist_for_each_entry(req, list, hash_node) {
Jens Axboeb41e9852020-02-17 09:52:41 -07005305 if (sqe_addr != req->user_data)
5306 continue;
Pavel Begunkov9ba5fac2021-04-14 13:38:35 +01005307 if (poll_only && req->opcode != IORING_OP_POLL_ADD)
5308 continue;
Jens Axboeb2cb8052021-03-17 08:17:19 -06005309 return req;
Jens Axboe47f46762019-11-09 17:43:02 -07005310 }
Jens Axboeb2cb8052021-03-17 08:17:19 -06005311 return NULL;
Jens Axboe47f46762019-11-09 17:43:02 -07005312}
5313
Pavel Begunkov9ba5fac2021-04-14 13:38:35 +01005314static int io_poll_cancel(struct io_ring_ctx *ctx, __u64 sqe_addr,
5315 bool poll_only)
Pavel Begunkove07785b2021-04-01 15:43:57 +01005316 __must_hold(&ctx->completion_lock)
Jens Axboeb2cb8052021-03-17 08:17:19 -06005317{
5318 struct io_kiocb *req;
5319
Pavel Begunkov9ba5fac2021-04-14 13:38:35 +01005320 req = io_poll_find(ctx, sqe_addr, poll_only);
Jens Axboeb2cb8052021-03-17 08:17:19 -06005321 if (!req)
5322 return -ENOENT;
5323 if (io_poll_remove_one(req))
5324 return 0;
5325
5326 return -EALREADY;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005327}
5328
Pavel Begunkov9096af32021-04-14 13:38:36 +01005329static __poll_t io_poll_parse_events(const struct io_uring_sqe *sqe,
5330 unsigned int flags)
5331{
5332 u32 events;
5333
5334 events = READ_ONCE(sqe->poll32_events);
5335#ifdef __BIG_ENDIAN
5336 events = swahw32(events);
5337#endif
5338 if (!(flags & IORING_POLL_ADD_MULTI))
5339 events |= EPOLLONESHOT;
5340 return demangle_poll(events) | (events & (EPOLLEXCLUSIVE|EPOLLONESHOT));
5341}
5342
Pavel Begunkovc5de0032021-04-14 13:38:37 +01005343static int io_poll_update_prep(struct io_kiocb *req,
Jens Axboe3529d8c2019-12-19 18:24:38 -07005344 const struct io_uring_sqe *sqe)
Jens Axboe221c5eb2019-01-17 09:41:58 -07005345{
Pavel Begunkovc5de0032021-04-14 13:38:37 +01005346 struct io_poll_update *upd = &req->poll_update;
5347 u32 flags;
5348
Jens Axboe221c5eb2019-01-17 09:41:58 -07005349 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
5350 return -EINVAL;
Pavel Begunkovc5de0032021-04-14 13:38:37 +01005351 if (sqe->ioprio || sqe->buf_index)
5352 return -EINVAL;
5353 flags = READ_ONCE(sqe->len);
5354 if (flags & ~(IORING_POLL_UPDATE_EVENTS | IORING_POLL_UPDATE_USER_DATA |
5355 IORING_POLL_ADD_MULTI))
5356 return -EINVAL;
5357 /* meaningless without update */
5358 if (flags == IORING_POLL_ADD_MULTI)
Jens Axboe221c5eb2019-01-17 09:41:58 -07005359 return -EINVAL;
5360
Pavel Begunkovc5de0032021-04-14 13:38:37 +01005361 upd->old_user_data = READ_ONCE(sqe->addr);
5362 upd->update_events = flags & IORING_POLL_UPDATE_EVENTS;
5363 upd->update_user_data = flags & IORING_POLL_UPDATE_USER_DATA;
Jens Axboe0969e782019-12-17 18:40:57 -07005364
Pavel Begunkovc5de0032021-04-14 13:38:37 +01005365 upd->new_user_data = READ_ONCE(sqe->off);
5366 if (!upd->update_user_data && upd->new_user_data)
5367 return -EINVAL;
5368 if (upd->update_events)
5369 upd->events = io_poll_parse_events(sqe, flags);
5370 else if (sqe->poll32_events)
5371 return -EINVAL;
Jens Axboe0969e782019-12-17 18:40:57 -07005372
Jens Axboe221c5eb2019-01-17 09:41:58 -07005373 return 0;
5374}
5375
Jens Axboe221c5eb2019-01-17 09:41:58 -07005376static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
5377 void *key)
5378{
Jens Axboec2f2eb72020-02-10 09:07:05 -07005379 struct io_kiocb *req = wait->private;
5380 struct io_poll_iocb *poll = &req->poll;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005381
Jens Axboed7718a92020-02-14 22:23:12 -07005382 return __io_async_wake(req, poll, key_to_poll(key), io_poll_task_func);
Jens Axboe221c5eb2019-01-17 09:41:58 -07005383}
5384
Jens Axboe221c5eb2019-01-17 09:41:58 -07005385static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head,
5386 struct poll_table_struct *p)
5387{
5388 struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
5389
Jens Axboee8c2bc12020-08-15 18:44:09 -07005390 __io_queue_proc(&pt->req->poll, pt, head, (struct io_poll_iocb **) &pt->req->async_data);
Jens Axboeeac406c2019-11-14 12:09:58 -07005391}
5392
Jens Axboe3529d8c2019-12-19 18:24:38 -07005393static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboe221c5eb2019-01-17 09:41:58 -07005394{
5395 struct io_poll_iocb *poll = &req->poll;
Pavel Begunkovc5de0032021-04-14 13:38:37 +01005396 u32 flags;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005397
5398 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
5399 return -EINVAL;
Pavel Begunkovc5de0032021-04-14 13:38:37 +01005400 if (sqe->ioprio || sqe->buf_index || sqe->off || sqe->addr)
Jens Axboe88e41cf2021-02-22 22:08:01 -07005401 return -EINVAL;
5402 flags = READ_ONCE(sqe->len);
Pavel Begunkovc5de0032021-04-14 13:38:37 +01005403 if (flags & ~IORING_POLL_ADD_MULTI)
Jens Axboe221c5eb2019-01-17 09:41:58 -07005404 return -EINVAL;
5405
Pavel Begunkovc5de0032021-04-14 13:38:37 +01005406 poll->events = io_poll_parse_events(sqe, flags);
Jens Axboe0969e782019-12-17 18:40:57 -07005407 return 0;
5408}
5409
Pavel Begunkov61e98202021-02-10 00:03:08 +00005410static int io_poll_add(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe0969e782019-12-17 18:40:57 -07005411{
5412 struct io_poll_iocb *poll = &req->poll;
5413 struct io_ring_ctx *ctx = req->ctx;
5414 struct io_poll_table ipt;
Jens Axboe0969e782019-12-17 18:40:57 -07005415 __poll_t mask;
Jens Axboe0969e782019-12-17 18:40:57 -07005416
Jens Axboed7718a92020-02-14 22:23:12 -07005417 ipt.pt._qproc = io_poll_queue_proc;
Jens Axboe36703242019-07-25 10:20:18 -06005418
Jens Axboed7718a92020-02-14 22:23:12 -07005419 mask = __io_arm_poll_handler(req, &req->poll, &ipt, poll->events,
5420 io_poll_wake);
Jens Axboe221c5eb2019-01-17 09:41:58 -07005421
Jens Axboe8c838782019-03-12 15:48:16 -06005422 if (mask) { /* no async, we'd stolen it */
Jens Axboe8c838782019-03-12 15:48:16 -06005423 ipt.error = 0;
Pavel Begunkove27414b2021-04-09 09:13:20 +01005424 io_poll_complete(req, mask);
Jens Axboe8c838782019-03-12 15:48:16 -06005425 }
Jens Axboe221c5eb2019-01-17 09:41:58 -07005426 spin_unlock_irq(&ctx->completion_lock);
5427
Jens Axboe8c838782019-03-12 15:48:16 -06005428 if (mask) {
5429 io_cqring_ev_posted(ctx);
Jens Axboe88e41cf2021-02-22 22:08:01 -07005430 if (poll->events & EPOLLONESHOT)
5431 io_put_req(req);
Jens Axboe221c5eb2019-01-17 09:41:58 -07005432 }
Jens Axboe8c838782019-03-12 15:48:16 -06005433 return ipt.error;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005434}
5435
Pavel Begunkovc5de0032021-04-14 13:38:37 +01005436static int io_poll_update(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboeb69de282021-03-17 08:37:41 -06005437{
5438 struct io_ring_ctx *ctx = req->ctx;
5439 struct io_kiocb *preq;
Jens Axboecb3b200e2021-04-06 09:49:31 -06005440 bool completing;
Jens Axboeb69de282021-03-17 08:37:41 -06005441 int ret;
5442
5443 spin_lock_irq(&ctx->completion_lock);
Pavel Begunkov9ba5fac2021-04-14 13:38:35 +01005444 preq = io_poll_find(ctx, req->poll_update.old_user_data, true);
Jens Axboeb69de282021-03-17 08:37:41 -06005445 if (!preq) {
5446 ret = -ENOENT;
5447 goto err;
Jens Axboeb69de282021-03-17 08:37:41 -06005448 }
Jens Axboecb3b200e2021-04-06 09:49:31 -06005449
Pavel Begunkovc5de0032021-04-14 13:38:37 +01005450 if (!req->poll_update.update_events && !req->poll_update.update_user_data) {
5451 completing = true;
5452 ret = io_poll_remove_one(preq) ? 0 : -EALREADY;
5453 goto err;
5454 }
5455
Jens Axboecb3b200e2021-04-06 09:49:31 -06005456 /*
5457 * Don't allow racy completion with singleshot, as we cannot safely
5458 * update those. For multishot, if we're racing with completion, just
5459 * let completion re-add it.
5460 */
5461 completing = !__io_poll_remove_one(preq, &preq->poll, false);
5462 if (completing && (preq->poll.events & EPOLLONESHOT)) {
5463 ret = -EALREADY;
5464 goto err;
Jens Axboeb69de282021-03-17 08:37:41 -06005465 }
5466 /* we now have a detached poll request. reissue. */
5467 ret = 0;
5468err:
Jens Axboeb69de282021-03-17 08:37:41 -06005469 if (ret < 0) {
Jens Axboecb3b200e2021-04-06 09:49:31 -06005470 spin_unlock_irq(&ctx->completion_lock);
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01005471 req_set_fail(req);
Jens Axboeb69de282021-03-17 08:37:41 -06005472 io_req_complete(req, ret);
5473 return 0;
5474 }
5475 /* only mask one event flags, keep behavior flags */
Pavel Begunkov9d805892021-04-13 02:58:40 +01005476 if (req->poll_update.update_events) {
Jens Axboeb69de282021-03-17 08:37:41 -06005477 preq->poll.events &= ~0xffff;
Pavel Begunkov9d805892021-04-13 02:58:40 +01005478 preq->poll.events |= req->poll_update.events & 0xffff;
Jens Axboeb69de282021-03-17 08:37:41 -06005479 preq->poll.events |= IO_POLL_UNMASK;
5480 }
Pavel Begunkov9d805892021-04-13 02:58:40 +01005481 if (req->poll_update.update_user_data)
5482 preq->user_data = req->poll_update.new_user_data;
Jens Axboecb3b200e2021-04-06 09:49:31 -06005483 spin_unlock_irq(&ctx->completion_lock);
5484
Jens Axboeb69de282021-03-17 08:37:41 -06005485 /* complete update request, we're done with it */
5486 io_req_complete(req, ret);
5487
Jens Axboecb3b200e2021-04-06 09:49:31 -06005488 if (!completing) {
Pavel Begunkovc5de0032021-04-14 13:38:37 +01005489 ret = io_poll_add(preq, issue_flags);
Jens Axboecb3b200e2021-04-06 09:49:31 -06005490 if (ret < 0) {
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01005491 req_set_fail(preq);
Jens Axboecb3b200e2021-04-06 09:49:31 -06005492 io_req_complete(preq, ret);
5493 }
Jens Axboeb69de282021-03-17 08:37:41 -06005494 }
5495 return 0;
5496}
5497
Jens Axboe5262f562019-09-17 12:26:57 -06005498static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
5499{
Jens Axboead8a48a2019-11-15 08:49:11 -07005500 struct io_timeout_data *data = container_of(timer,
5501 struct io_timeout_data, timer);
5502 struct io_kiocb *req = data->req;
5503 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe5262f562019-09-17 12:26:57 -06005504 unsigned long flags;
5505
Jens Axboe5262f562019-09-17 12:26:57 -06005506 spin_lock_irqsave(&ctx->completion_lock, flags);
Pavel Begunkova71976f2020-10-10 18:34:11 +01005507 list_del_init(&req->timeout.list);
Pavel Begunkov01cec8c2020-07-30 18:43:50 +03005508 atomic_set(&req->ctx->cq_timeouts,
5509 atomic_read(&req->ctx->cq_timeouts) + 1);
5510
Pavel Begunkovd4d19c12021-04-25 14:32:17 +01005511 io_cqring_fill_event(ctx, req->user_data, -ETIME, 0);
Jens Axboe5262f562019-09-17 12:26:57 -06005512 io_commit_cqring(ctx);
5513 spin_unlock_irqrestore(&ctx->completion_lock, flags);
5514
5515 io_cqring_ev_posted(ctx);
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01005516 req_set_fail(req);
Jens Axboe5262f562019-09-17 12:26:57 -06005517 io_put_req(req);
5518 return HRTIMER_NORESTART;
5519}
5520
Pavel Begunkovfbd15842020-11-30 19:11:15 +00005521static struct io_kiocb *io_timeout_extract(struct io_ring_ctx *ctx,
5522 __u64 user_data)
Pavel Begunkove07785b2021-04-01 15:43:57 +01005523 __must_hold(&ctx->completion_lock)
Jens Axboe47f46762019-11-09 17:43:02 -07005524{
Pavel Begunkovfbd15842020-11-30 19:11:15 +00005525 struct io_timeout_data *io;
Jens Axboef254ac02020-08-12 17:33:30 -06005526 struct io_kiocb *req;
Pavel Begunkovfd9c7bc2021-04-13 02:58:42 +01005527 bool found = false;
Jens Axboef254ac02020-08-12 17:33:30 -06005528
5529 list_for_each_entry(req, &ctx->timeout_list, timeout.list) {
Pavel Begunkovfd9c7bc2021-04-13 02:58:42 +01005530 found = user_data == req->user_data;
5531 if (found)
Jens Axboef254ac02020-08-12 17:33:30 -06005532 break;
Jens Axboef254ac02020-08-12 17:33:30 -06005533 }
Pavel Begunkovfd9c7bc2021-04-13 02:58:42 +01005534 if (!found)
5535 return ERR_PTR(-ENOENT);
Jens Axboef254ac02020-08-12 17:33:30 -06005536
Pavel Begunkovfbd15842020-11-30 19:11:15 +00005537 io = req->async_data;
Pavel Begunkovfd9c7bc2021-04-13 02:58:42 +01005538 if (hrtimer_try_to_cancel(&io->timer) == -1)
Pavel Begunkovfbd15842020-11-30 19:11:15 +00005539 return ERR_PTR(-EALREADY);
5540 list_del_init(&req->timeout.list);
5541 return req;
5542}
5543
5544static int io_timeout_cancel(struct io_ring_ctx *ctx, __u64 user_data)
Pavel Begunkove07785b2021-04-01 15:43:57 +01005545 __must_hold(&ctx->completion_lock)
Pavel Begunkovfbd15842020-11-30 19:11:15 +00005546{
5547 struct io_kiocb *req = io_timeout_extract(ctx, user_data);
5548
5549 if (IS_ERR(req))
5550 return PTR_ERR(req);
5551
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01005552 req_set_fail(req);
Pavel Begunkovd4d19c12021-04-25 14:32:17 +01005553 io_cqring_fill_event(ctx, req->user_data, -ECANCELED, 0);
Pavel Begunkovfbd15842020-11-30 19:11:15 +00005554 io_put_req_deferred(req, 1);
5555 return 0;
Jens Axboef254ac02020-08-12 17:33:30 -06005556}
5557
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +00005558static int io_timeout_update(struct io_ring_ctx *ctx, __u64 user_data,
5559 struct timespec64 *ts, enum hrtimer_mode mode)
Pavel Begunkove07785b2021-04-01 15:43:57 +01005560 __must_hold(&ctx->completion_lock)
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +00005561{
5562 struct io_kiocb *req = io_timeout_extract(ctx, user_data);
5563 struct io_timeout_data *data;
5564
5565 if (IS_ERR(req))
5566 return PTR_ERR(req);
5567
5568 req->timeout.off = 0; /* noseq */
5569 data = req->async_data;
5570 list_add_tail(&req->timeout.list, &ctx->timeout_list);
5571 hrtimer_init(&data->timer, CLOCK_MONOTONIC, mode);
5572 data->timer.function = io_timeout_fn;
5573 hrtimer_start(&data->timer, timespec64_to_ktime(*ts), mode);
5574 return 0;
Jens Axboe47f46762019-11-09 17:43:02 -07005575}
5576
Jens Axboe3529d8c2019-12-19 18:24:38 -07005577static int io_timeout_remove_prep(struct io_kiocb *req,
5578 const struct io_uring_sqe *sqe)
Jens Axboeb29472e2019-12-17 18:50:29 -07005579{
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +00005580 struct io_timeout_rem *tr = &req->timeout_rem;
5581
Jens Axboeb29472e2019-12-17 18:50:29 -07005582 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
5583 return -EINVAL;
Daniele Albano61710e42020-07-18 14:15:16 -06005584 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
5585 return -EINVAL;
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +00005586 if (sqe->ioprio || sqe->buf_index || sqe->len)
Jens Axboeb29472e2019-12-17 18:50:29 -07005587 return -EINVAL;
5588
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +00005589 tr->addr = READ_ONCE(sqe->addr);
5590 tr->flags = READ_ONCE(sqe->timeout_flags);
5591 if (tr->flags & IORING_TIMEOUT_UPDATE) {
5592 if (tr->flags & ~(IORING_TIMEOUT_UPDATE|IORING_TIMEOUT_ABS))
5593 return -EINVAL;
5594 if (get_timespec64(&tr->ts, u64_to_user_ptr(sqe->addr2)))
5595 return -EFAULT;
5596 } else if (tr->flags) {
5597 /* timeout removal doesn't support flags */
5598 return -EINVAL;
5599 }
5600
Jens Axboeb29472e2019-12-17 18:50:29 -07005601 return 0;
5602}
5603
Pavel Begunkov8662dae2021-01-19 13:32:44 +00005604static inline enum hrtimer_mode io_translate_timeout_mode(unsigned int flags)
5605{
5606 return (flags & IORING_TIMEOUT_ABS) ? HRTIMER_MODE_ABS
5607 : HRTIMER_MODE_REL;
5608}
5609
Jens Axboe11365042019-10-16 09:08:32 -06005610/*
5611 * Remove or update an existing timeout command
5612 */
Pavel Begunkov61e98202021-02-10 00:03:08 +00005613static int io_timeout_remove(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe11365042019-10-16 09:08:32 -06005614{
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +00005615 struct io_timeout_rem *tr = &req->timeout_rem;
Jens Axboe11365042019-10-16 09:08:32 -06005616 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe47f46762019-11-09 17:43:02 -07005617 int ret;
Jens Axboe11365042019-10-16 09:08:32 -06005618
Jens Axboe11365042019-10-16 09:08:32 -06005619 spin_lock_irq(&ctx->completion_lock);
Pavel Begunkov8662dae2021-01-19 13:32:44 +00005620 if (!(req->timeout_rem.flags & IORING_TIMEOUT_UPDATE))
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +00005621 ret = io_timeout_cancel(ctx, tr->addr);
Pavel Begunkov8662dae2021-01-19 13:32:44 +00005622 else
5623 ret = io_timeout_update(ctx, tr->addr, &tr->ts,
5624 io_translate_timeout_mode(tr->flags));
Jens Axboe11365042019-10-16 09:08:32 -06005625
Pavel Begunkovd4d19c12021-04-25 14:32:17 +01005626 io_cqring_fill_event(ctx, req->user_data, ret, 0);
Jens Axboe11365042019-10-16 09:08:32 -06005627 io_commit_cqring(ctx);
5628 spin_unlock_irq(&ctx->completion_lock);
Jens Axboe5262f562019-09-17 12:26:57 -06005629 io_cqring_ev_posted(ctx);
Jens Axboe4e88d6e2019-12-07 20:59:47 -07005630 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01005631 req_set_fail(req);
Jackie Liuec9c02a2019-11-08 23:50:36 +08005632 io_put_req(req);
Jens Axboe11365042019-10-16 09:08:32 -06005633 return 0;
Jens Axboe5262f562019-09-17 12:26:57 -06005634}
5635
Jens Axboe3529d8c2019-12-19 18:24:38 -07005636static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
Jens Axboe2d283902019-12-04 11:08:05 -07005637 bool is_timeout_link)
Jens Axboe5262f562019-09-17 12:26:57 -06005638{
Jens Axboead8a48a2019-11-15 08:49:11 -07005639 struct io_timeout_data *data;
Jens Axboea41525a2019-10-15 16:48:15 -06005640 unsigned flags;
Pavel Begunkov56080b02020-05-26 20:34:04 +03005641 u32 off = READ_ONCE(sqe->off);
Jens Axboe5262f562019-09-17 12:26:57 -06005642
Jens Axboead8a48a2019-11-15 08:49:11 -07005643 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboe5262f562019-09-17 12:26:57 -06005644 return -EINVAL;
Jens Axboead8a48a2019-11-15 08:49:11 -07005645 if (sqe->ioprio || sqe->buf_index || sqe->len != 1)
Jens Axboea41525a2019-10-15 16:48:15 -06005646 return -EINVAL;
Pavel Begunkov56080b02020-05-26 20:34:04 +03005647 if (off && is_timeout_link)
Jens Axboe2d283902019-12-04 11:08:05 -07005648 return -EINVAL;
Jens Axboea41525a2019-10-15 16:48:15 -06005649 flags = READ_ONCE(sqe->timeout_flags);
5650 if (flags & ~IORING_TIMEOUT_ABS)
Jens Axboe5262f562019-09-17 12:26:57 -06005651 return -EINVAL;
Arnd Bergmannbdf20072019-10-01 09:53:29 -06005652
Pavel Begunkovbfe68a22020-05-30 14:54:18 +03005653 req->timeout.off = off;
Jens Axboe26a61672019-12-20 09:02:01 -07005654
Jens Axboee8c2bc12020-08-15 18:44:09 -07005655 if (!req->async_data && io_alloc_async_data(req))
Jens Axboe26a61672019-12-20 09:02:01 -07005656 return -ENOMEM;
5657
Jens Axboee8c2bc12020-08-15 18:44:09 -07005658 data = req->async_data;
Jens Axboead8a48a2019-11-15 08:49:11 -07005659 data->req = req;
Jens Axboead8a48a2019-11-15 08:49:11 -07005660
5661 if (get_timespec64(&data->ts, u64_to_user_ptr(sqe->addr)))
Jens Axboe5262f562019-09-17 12:26:57 -06005662 return -EFAULT;
5663
Pavel Begunkov8662dae2021-01-19 13:32:44 +00005664 data->mode = io_translate_timeout_mode(flags);
Jens Axboead8a48a2019-11-15 08:49:11 -07005665 hrtimer_init(&data->timer, CLOCK_MONOTONIC, data->mode);
Pavel Begunkov2482b582021-03-25 18:32:44 +00005666 if (is_timeout_link)
5667 io_req_track_inflight(req);
Jens Axboead8a48a2019-11-15 08:49:11 -07005668 return 0;
5669}
5670
Pavel Begunkov61e98202021-02-10 00:03:08 +00005671static int io_timeout(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboead8a48a2019-11-15 08:49:11 -07005672{
Jens Axboead8a48a2019-11-15 08:49:11 -07005673 struct io_ring_ctx *ctx = req->ctx;
Jens Axboee8c2bc12020-08-15 18:44:09 -07005674 struct io_timeout_data *data = req->async_data;
Jens Axboead8a48a2019-11-15 08:49:11 -07005675 struct list_head *entry;
Pavel Begunkovbfe68a22020-05-30 14:54:18 +03005676 u32 tail, off = req->timeout.off;
Jens Axboead8a48a2019-11-15 08:49:11 -07005677
Pavel Begunkov733f5c92020-05-26 20:34:03 +03005678 spin_lock_irq(&ctx->completion_lock);
Jens Axboe93bd25b2019-11-11 23:34:31 -07005679
Jens Axboe5262f562019-09-17 12:26:57 -06005680 /*
5681 * sqe->off holds how many events that need to occur for this
Jens Axboe93bd25b2019-11-11 23:34:31 -07005682 * timeout event to be satisfied. If it isn't set, then this is
5683 * a pure timeout request, sequence isn't used.
Jens Axboe5262f562019-09-17 12:26:57 -06005684 */
Pavel Begunkov8eb7e2d2020-06-29 13:13:02 +03005685 if (io_is_timeout_noseq(req)) {
Jens Axboe93bd25b2019-11-11 23:34:31 -07005686 entry = ctx->timeout_list.prev;
5687 goto add;
5688 }
Jens Axboe5262f562019-09-17 12:26:57 -06005689
Pavel Begunkovbfe68a22020-05-30 14:54:18 +03005690 tail = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
5691 req->timeout.target_seq = tail + off;
Jens Axboe5262f562019-09-17 12:26:57 -06005692
Marcelo Diop-Gonzalezf0105052021-01-15 11:54:40 -05005693 /* Update the last seq here in case io_flush_timeouts() hasn't.
5694 * This is safe because ->completion_lock is held, and submissions
5695 * and completions are never mixed in the same ->completion_lock section.
5696 */
5697 ctx->cq_last_tm_flush = tail;
5698
Jens Axboe5262f562019-09-17 12:26:57 -06005699 /*
5700 * Insertion sort, ensuring the first entry in the list is always
5701 * the one we need first.
5702 */
Jens Axboe5262f562019-09-17 12:26:57 -06005703 list_for_each_prev(entry, &ctx->timeout_list) {
Pavel Begunkov135fcde2020-07-13 23:37:12 +03005704 struct io_kiocb *nxt = list_entry(entry, struct io_kiocb,
5705 timeout.list);
Jens Axboe5262f562019-09-17 12:26:57 -06005706
Pavel Begunkov8eb7e2d2020-06-29 13:13:02 +03005707 if (io_is_timeout_noseq(nxt))
Jens Axboe93bd25b2019-11-11 23:34:31 -07005708 continue;
Pavel Begunkovbfe68a22020-05-30 14:54:18 +03005709 /* nxt.seq is behind @tail, otherwise would've been completed */
5710 if (off >= nxt->timeout.target_seq - tail)
Jens Axboe5262f562019-09-17 12:26:57 -06005711 break;
5712 }
Jens Axboe93bd25b2019-11-11 23:34:31 -07005713add:
Pavel Begunkov135fcde2020-07-13 23:37:12 +03005714 list_add(&req->timeout.list, entry);
Jens Axboead8a48a2019-11-15 08:49:11 -07005715 data->timer.function = io_timeout_fn;
5716 hrtimer_start(&data->timer, timespec64_to_ktime(data->ts), data->mode);
Jens Axboe842f9612019-10-29 12:34:10 -06005717 spin_unlock_irq(&ctx->completion_lock);
Jens Axboe5262f562019-09-17 12:26:57 -06005718 return 0;
5719}
5720
Pavel Begunkovf458dd842021-03-08 12:14:14 +00005721struct io_cancel_data {
5722 struct io_ring_ctx *ctx;
5723 u64 user_data;
5724};
5725
Jens Axboe62755e32019-10-28 21:49:21 -06005726static bool io_cancel_cb(struct io_wq_work *work, void *data)
Jens Axboede0617e2019-04-06 21:51:27 -06005727{
Jens Axboe62755e32019-10-28 21:49:21 -06005728 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
Pavel Begunkovf458dd842021-03-08 12:14:14 +00005729 struct io_cancel_data *cd = data;
Jens Axboede0617e2019-04-06 21:51:27 -06005730
Pavel Begunkovf458dd842021-03-08 12:14:14 +00005731 return req->ctx == cd->ctx && req->user_data == cd->user_data;
Jens Axboe62755e32019-10-28 21:49:21 -06005732}
5733
Pavel Begunkovf458dd842021-03-08 12:14:14 +00005734static int io_async_cancel_one(struct io_uring_task *tctx, u64 user_data,
5735 struct io_ring_ctx *ctx)
Jens Axboe62755e32019-10-28 21:49:21 -06005736{
Pavel Begunkovf458dd842021-03-08 12:14:14 +00005737 struct io_cancel_data data = { .ctx = ctx, .user_data = user_data, };
Jens Axboe62755e32019-10-28 21:49:21 -06005738 enum io_wq_cancel cancel_ret;
Jens Axboe62755e32019-10-28 21:49:21 -06005739 int ret = 0;
5740
Pavel Begunkovf458dd842021-03-08 12:14:14 +00005741 if (!tctx || !tctx->io_wq)
Jens Axboe5aa75ed2021-02-16 12:56:50 -07005742 return -ENOENT;
5743
Pavel Begunkovf458dd842021-03-08 12:14:14 +00005744 cancel_ret = io_wq_cancel_cb(tctx->io_wq, io_cancel_cb, &data, false);
Jens Axboe62755e32019-10-28 21:49:21 -06005745 switch (cancel_ret) {
5746 case IO_WQ_CANCEL_OK:
5747 ret = 0;
5748 break;
5749 case IO_WQ_CANCEL_RUNNING:
5750 ret = -EALREADY;
5751 break;
5752 case IO_WQ_CANCEL_NOTFOUND:
5753 ret = -ENOENT;
5754 break;
5755 }
5756
Jens Axboee977d6d2019-11-05 12:39:45 -07005757 return ret;
5758}
5759
Jens Axboe47f46762019-11-09 17:43:02 -07005760static void io_async_find_and_cancel(struct io_ring_ctx *ctx,
5761 struct io_kiocb *req, __u64 sqe_addr,
Pavel Begunkov014db002020-03-03 21:33:12 +03005762 int success_ret)
Jens Axboe47f46762019-11-09 17:43:02 -07005763{
5764 unsigned long flags;
5765 int ret;
5766
Pavel Begunkovf458dd842021-03-08 12:14:14 +00005767 ret = io_async_cancel_one(req->task->io_uring, sqe_addr, ctx);
Jens Axboe47f46762019-11-09 17:43:02 -07005768 spin_lock_irqsave(&ctx->completion_lock, flags);
Pavel Begunkovdf9727a2021-04-01 15:43:59 +01005769 if (ret != -ENOENT)
5770 goto done;
Jens Axboe47f46762019-11-09 17:43:02 -07005771 ret = io_timeout_cancel(ctx, sqe_addr);
5772 if (ret != -ENOENT)
5773 goto done;
Pavel Begunkov9ba5fac2021-04-14 13:38:35 +01005774 ret = io_poll_cancel(ctx, sqe_addr, false);
Jens Axboe47f46762019-11-09 17:43:02 -07005775done:
Jens Axboeb0dd8a42019-11-18 12:14:54 -07005776 if (!ret)
5777 ret = success_ret;
Pavel Begunkovd4d19c12021-04-25 14:32:17 +01005778 io_cqring_fill_event(ctx, req->user_data, ret, 0);
Jens Axboe47f46762019-11-09 17:43:02 -07005779 io_commit_cqring(ctx);
5780 spin_unlock_irqrestore(&ctx->completion_lock, flags);
5781 io_cqring_ev_posted(ctx);
5782
Jens Axboe4e88d6e2019-12-07 20:59:47 -07005783 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01005784 req_set_fail(req);
Jens Axboe47f46762019-11-09 17:43:02 -07005785}
5786
Jens Axboe3529d8c2019-12-19 18:24:38 -07005787static int io_async_cancel_prep(struct io_kiocb *req,
5788 const struct io_uring_sqe *sqe)
Jens Axboee977d6d2019-11-05 12:39:45 -07005789{
Jens Axboefbf23842019-12-17 18:45:56 -07005790 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboee977d6d2019-11-05 12:39:45 -07005791 return -EINVAL;
Daniele Albano61710e42020-07-18 14:15:16 -06005792 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
5793 return -EINVAL;
5794 if (sqe->ioprio || sqe->off || sqe->len || sqe->cancel_flags)
Jens Axboee977d6d2019-11-05 12:39:45 -07005795 return -EINVAL;
5796
Jens Axboefbf23842019-12-17 18:45:56 -07005797 req->cancel.addr = READ_ONCE(sqe->addr);
5798 return 0;
5799}
5800
Pavel Begunkov61e98202021-02-10 00:03:08 +00005801static int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboefbf23842019-12-17 18:45:56 -07005802{
5803 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkov58f99372021-03-12 16:25:55 +00005804 u64 sqe_addr = req->cancel.addr;
5805 struct io_tctx_node *node;
5806 int ret;
Jens Axboefbf23842019-12-17 18:45:56 -07005807
Pavel Begunkov58f99372021-03-12 16:25:55 +00005808 /* tasks should wait for their io-wq threads, so safe w/o sync */
5809 ret = io_async_cancel_one(req->task->io_uring, sqe_addr, ctx);
5810 spin_lock_irq(&ctx->completion_lock);
5811 if (ret != -ENOENT)
5812 goto done;
5813 ret = io_timeout_cancel(ctx, sqe_addr);
5814 if (ret != -ENOENT)
5815 goto done;
Pavel Begunkov9ba5fac2021-04-14 13:38:35 +01005816 ret = io_poll_cancel(ctx, sqe_addr, false);
Pavel Begunkov58f99372021-03-12 16:25:55 +00005817 if (ret != -ENOENT)
5818 goto done;
5819 spin_unlock_irq(&ctx->completion_lock);
5820
5821 /* slow path, try all io-wq's */
5822 io_ring_submit_lock(ctx, !(issue_flags & IO_URING_F_NONBLOCK));
5823 ret = -ENOENT;
5824 list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
5825 struct io_uring_task *tctx = node->task->io_uring;
5826
Pavel Begunkov58f99372021-03-12 16:25:55 +00005827 ret = io_async_cancel_one(tctx, req->cancel.addr, ctx);
5828 if (ret != -ENOENT)
5829 break;
5830 }
5831 io_ring_submit_unlock(ctx, !(issue_flags & IO_URING_F_NONBLOCK));
5832
5833 spin_lock_irq(&ctx->completion_lock);
5834done:
Pavel Begunkovd4d19c12021-04-25 14:32:17 +01005835 io_cqring_fill_event(ctx, req->user_data, ret, 0);
Pavel Begunkov58f99372021-03-12 16:25:55 +00005836 io_commit_cqring(ctx);
5837 spin_unlock_irq(&ctx->completion_lock);
5838 io_cqring_ev_posted(ctx);
5839
5840 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01005841 req_set_fail(req);
Pavel Begunkov58f99372021-03-12 16:25:55 +00005842 io_put_req(req);
Jens Axboe62755e32019-10-28 21:49:21 -06005843 return 0;
5844}
5845
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00005846static int io_rsrc_update_prep(struct io_kiocb *req,
Jens Axboe05f3fb32019-12-09 11:22:50 -07005847 const struct io_uring_sqe *sqe)
5848{
Daniele Albano61710e42020-07-18 14:15:16 -06005849 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
5850 return -EINVAL;
5851 if (sqe->ioprio || sqe->rw_flags)
Jens Axboe05f3fb32019-12-09 11:22:50 -07005852 return -EINVAL;
5853
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00005854 req->rsrc_update.offset = READ_ONCE(sqe->off);
5855 req->rsrc_update.nr_args = READ_ONCE(sqe->len);
5856 if (!req->rsrc_update.nr_args)
Jens Axboe05f3fb32019-12-09 11:22:50 -07005857 return -EINVAL;
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00005858 req->rsrc_update.arg = READ_ONCE(sqe->addr);
Jens Axboe05f3fb32019-12-09 11:22:50 -07005859 return 0;
5860}
5861
Pavel Begunkov889fca72021-02-10 00:03:09 +00005862static int io_files_update(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe05f3fb32019-12-09 11:22:50 -07005863{
5864 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkovc3bdad02021-04-25 14:32:22 +01005865 struct io_uring_rsrc_update2 up;
Jens Axboe05f3fb32019-12-09 11:22:50 -07005866 int ret;
5867
Pavel Begunkov45d189c2021-02-10 00:03:07 +00005868 if (issue_flags & IO_URING_F_NONBLOCK)
Jens Axboe05f3fb32019-12-09 11:22:50 -07005869 return -EAGAIN;
Jens Axboe05f3fb32019-12-09 11:22:50 -07005870
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00005871 up.offset = req->rsrc_update.offset;
5872 up.data = req->rsrc_update.arg;
Pavel Begunkovc3bdad02021-04-25 14:32:22 +01005873 up.nr = 0;
5874 up.tags = 0;
Colin Ian King615cee42021-04-26 10:47:35 +01005875 up.resv = 0;
Jens Axboe05f3fb32019-12-09 11:22:50 -07005876
5877 mutex_lock(&ctx->uring_lock);
Pavel Begunkovfdecb662021-04-25 14:32:20 +01005878 ret = __io_register_rsrc_update(ctx, IORING_RSRC_FILE,
Pavel Begunkov98f0b3b2021-04-25 14:32:19 +01005879 &up, req->rsrc_update.nr_args);
Jens Axboe05f3fb32019-12-09 11:22:50 -07005880 mutex_unlock(&ctx->uring_lock);
5881
5882 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01005883 req_set_fail(req);
Pavel Begunkov889fca72021-02-10 00:03:09 +00005884 __io_req_complete(req, issue_flags, ret, 0);
Jens Axboe05f3fb32019-12-09 11:22:50 -07005885 return 0;
5886}
5887
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005888static int io_req_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboef67676d2019-12-02 11:03:47 -07005889{
Jens Axboed625c6e2019-12-17 19:53:05 -07005890 switch (req->opcode) {
Jens Axboee7815732019-12-17 19:45:06 -07005891 case IORING_OP_NOP:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005892 return 0;
Jens Axboef67676d2019-12-02 11:03:47 -07005893 case IORING_OP_READV:
5894 case IORING_OP_READ_FIXED:
Jens Axboe3a6820f2019-12-22 15:19:35 -07005895 case IORING_OP_READ:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005896 return io_read_prep(req, sqe);
Jens Axboef67676d2019-12-02 11:03:47 -07005897 case IORING_OP_WRITEV:
5898 case IORING_OP_WRITE_FIXED:
Jens Axboe3a6820f2019-12-22 15:19:35 -07005899 case IORING_OP_WRITE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005900 return io_write_prep(req, sqe);
Jens Axboe0969e782019-12-17 18:40:57 -07005901 case IORING_OP_POLL_ADD:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005902 return io_poll_add_prep(req, sqe);
Jens Axboe0969e782019-12-17 18:40:57 -07005903 case IORING_OP_POLL_REMOVE:
Pavel Begunkovc5de0032021-04-14 13:38:37 +01005904 return io_poll_update_prep(req, sqe);
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07005905 case IORING_OP_FSYNC:
Pavel Begunkov1155c762021-02-18 18:29:38 +00005906 return io_fsync_prep(req, sqe);
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07005907 case IORING_OP_SYNC_FILE_RANGE:
Pavel Begunkov1155c762021-02-18 18:29:38 +00005908 return io_sfr_prep(req, sqe);
Jens Axboe03b12302019-12-02 18:50:25 -07005909 case IORING_OP_SENDMSG:
Jens Axboefddafac2020-01-04 20:19:44 -07005910 case IORING_OP_SEND:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005911 return io_sendmsg_prep(req, sqe);
Jens Axboe03b12302019-12-02 18:50:25 -07005912 case IORING_OP_RECVMSG:
Jens Axboefddafac2020-01-04 20:19:44 -07005913 case IORING_OP_RECV:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005914 return io_recvmsg_prep(req, sqe);
Jens Axboef499a022019-12-02 16:28:46 -07005915 case IORING_OP_CONNECT:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005916 return io_connect_prep(req, sqe);
Jens Axboe2d283902019-12-04 11:08:05 -07005917 case IORING_OP_TIMEOUT:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005918 return io_timeout_prep(req, sqe, false);
Jens Axboeb29472e2019-12-17 18:50:29 -07005919 case IORING_OP_TIMEOUT_REMOVE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005920 return io_timeout_remove_prep(req, sqe);
Jens Axboefbf23842019-12-17 18:45:56 -07005921 case IORING_OP_ASYNC_CANCEL:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005922 return io_async_cancel_prep(req, sqe);
Jens Axboe2d283902019-12-04 11:08:05 -07005923 case IORING_OP_LINK_TIMEOUT:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005924 return io_timeout_prep(req, sqe, true);
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07005925 case IORING_OP_ACCEPT:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005926 return io_accept_prep(req, sqe);
Jens Axboed63d1b52019-12-10 10:38:56 -07005927 case IORING_OP_FALLOCATE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005928 return io_fallocate_prep(req, sqe);
Jens Axboe15b71ab2019-12-11 11:20:36 -07005929 case IORING_OP_OPENAT:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005930 return io_openat_prep(req, sqe);
Jens Axboeb5dba592019-12-11 14:02:38 -07005931 case IORING_OP_CLOSE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005932 return io_close_prep(req, sqe);
Jens Axboe05f3fb32019-12-09 11:22:50 -07005933 case IORING_OP_FILES_UPDATE:
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00005934 return io_rsrc_update_prep(req, sqe);
Jens Axboeeddc7ef2019-12-13 21:18:10 -07005935 case IORING_OP_STATX:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005936 return io_statx_prep(req, sqe);
Jens Axboe4840e412019-12-25 22:03:45 -07005937 case IORING_OP_FADVISE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005938 return io_fadvise_prep(req, sqe);
Jens Axboec1ca7572019-12-25 22:18:28 -07005939 case IORING_OP_MADVISE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005940 return io_madvise_prep(req, sqe);
Jens Axboecebdb982020-01-08 17:59:24 -07005941 case IORING_OP_OPENAT2:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005942 return io_openat2_prep(req, sqe);
Jens Axboe3e4827b2020-01-08 15:18:09 -07005943 case IORING_OP_EPOLL_CTL:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005944 return io_epoll_ctl_prep(req, sqe);
Pavel Begunkov7d67af22020-02-24 11:32:45 +03005945 case IORING_OP_SPLICE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005946 return io_splice_prep(req, sqe);
Jens Axboeddf0322d2020-02-23 16:41:33 -07005947 case IORING_OP_PROVIDE_BUFFERS:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005948 return io_provide_buffers_prep(req, sqe);
Jens Axboe067524e2020-03-02 16:32:28 -07005949 case IORING_OP_REMOVE_BUFFERS:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005950 return io_remove_buffers_prep(req, sqe);
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03005951 case IORING_OP_TEE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005952 return io_tee_prep(req, sqe);
Jens Axboe36f4fa62020-09-05 11:14:22 -06005953 case IORING_OP_SHUTDOWN:
5954 return io_shutdown_prep(req, sqe);
Jens Axboe80a261f2020-09-28 14:23:58 -06005955 case IORING_OP_RENAMEAT:
5956 return io_renameat_prep(req, sqe);
Jens Axboe14a11432020-09-28 14:27:37 -06005957 case IORING_OP_UNLINKAT:
5958 return io_unlinkat_prep(req, sqe);
Jens Axboef67676d2019-12-02 11:03:47 -07005959 }
5960
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005961 printk_once(KERN_WARNING "io_uring: unhandled opcode %d\n",
5962 req->opcode);
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01005963 return -EINVAL;
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005964}
5965
Pavel Begunkov93642ef2021-02-18 18:29:44 +00005966static int io_req_prep_async(struct io_kiocb *req)
Jens Axboedef596e2019-01-09 08:59:42 -07005967{
Pavel Begunkovb7e298d2021-02-28 22:35:19 +00005968 if (!io_op_defs[req->opcode].needs_async_setup)
5969 return 0;
5970 if (WARN_ON_ONCE(req->async_data))
5971 return -EFAULT;
5972 if (io_alloc_async_data(req))
5973 return -EAGAIN;
5974
Pavel Begunkov93642ef2021-02-18 18:29:44 +00005975 switch (req->opcode) {
5976 case IORING_OP_READV:
Pavel Begunkov93642ef2021-02-18 18:29:44 +00005977 return io_rw_prep_async(req, READ);
5978 case IORING_OP_WRITEV:
Pavel Begunkov93642ef2021-02-18 18:29:44 +00005979 return io_rw_prep_async(req, WRITE);
5980 case IORING_OP_SENDMSG:
Pavel Begunkov93642ef2021-02-18 18:29:44 +00005981 return io_sendmsg_prep_async(req);
5982 case IORING_OP_RECVMSG:
Pavel Begunkov93642ef2021-02-18 18:29:44 +00005983 return io_recvmsg_prep_async(req);
5984 case IORING_OP_CONNECT:
5985 return io_connect_prep_async(req);
5986 }
Pavel Begunkovb7e298d2021-02-28 22:35:19 +00005987 printk_once(KERN_WARNING "io_uring: prep_async() bad opcode %d\n",
5988 req->opcode);
5989 return -EFAULT;
Jens Axboedef596e2019-01-09 08:59:42 -07005990}
5991
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03005992static u32 io_get_sequence(struct io_kiocb *req)
5993{
5994 struct io_kiocb *pos;
5995 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkov15641e42021-06-14 23:37:24 +01005996 u32 nr_reqs = 0;
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03005997
Pavel Begunkovf2f87372020-10-27 23:25:37 +00005998 io_for_each_link(pos, req)
5999 nr_reqs++;
Pavel Begunkov15641e42021-06-14 23:37:24 +01006000 return ctx->cached_sq_head - nr_reqs;
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03006001}
6002
Pavel Begunkovbe7053b2021-02-18 18:29:45 +00006003static int io_req_defer(struct io_kiocb *req)
Jens Axboedef596e2019-01-09 08:59:42 -07006004{
6005 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkov27dc8332020-07-13 23:37:14 +03006006 struct io_defer_entry *de;
Jens Axboedef596e2019-01-09 08:59:42 -07006007 int ret;
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03006008 u32 seq;
Jens Axboedef596e2019-01-09 08:59:42 -07006009
6010 /* Still need defer if there is pending req in defer list. */
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03006011 if (likely(list_empty_careful(&ctx->defer_list) &&
6012 !(req->flags & REQ_F_IO_DRAIN)))
6013 return 0;
6014
6015 seq = io_get_sequence(req);
6016 /* Still a chance to pass the sequence check */
6017 if (!req_need_defer(req, seq) && list_empty_careful(&ctx->defer_list))
Jens Axboedef596e2019-01-09 08:59:42 -07006018 return 0;
6019
Pavel Begunkovb7e298d2021-02-28 22:35:19 +00006020 ret = io_req_prep_async(req);
Pavel Begunkovbe7053b2021-02-18 18:29:45 +00006021 if (ret)
6022 return ret;
Pavel Begunkovcbdcb432020-06-29 19:18:43 +03006023 io_prep_async_link(req);
Pavel Begunkov27dc8332020-07-13 23:37:14 +03006024 de = kmalloc(sizeof(*de), GFP_KERNEL);
6025 if (!de)
6026 return -ENOMEM;
Jens Axboe31b51512019-01-18 22:56:34 -07006027
6028 spin_lock_irq(&ctx->completion_lock);
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03006029 if (!req_need_defer(req, seq) && list_empty(&ctx->defer_list)) {
Jens Axboe31b51512019-01-18 22:56:34 -07006030 spin_unlock_irq(&ctx->completion_lock);
Pavel Begunkov27dc8332020-07-13 23:37:14 +03006031 kfree(de);
Pavel Begunkovae348172020-07-23 20:25:20 +03006032 io_queue_async_work(req);
6033 return -EIOCBQUEUED;
Jens Axboe31b51512019-01-18 22:56:34 -07006034 }
6035
6036 trace_io_uring_defer(ctx, req, req->user_data);
Pavel Begunkov27dc8332020-07-13 23:37:14 +03006037 de->req = req;
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03006038 de->seq = seq;
Pavel Begunkov27dc8332020-07-13 23:37:14 +03006039 list_add_tail(&de->list, &ctx->defer_list);
Jens Axboe31b51512019-01-18 22:56:34 -07006040 spin_unlock_irq(&ctx->completion_lock);
6041 return -EIOCBQUEUED;
6042}
6043
Pavel Begunkov68fb8972021-03-19 17:22:41 +00006044static void io_clean_op(struct io_kiocb *req)
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03006045{
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03006046 if (req->flags & REQ_F_BUFFER_SELECTED) {
6047 switch (req->opcode) {
6048 case IORING_OP_READV:
6049 case IORING_OP_READ_FIXED:
6050 case IORING_OP_READ:
Jens Axboebcda7ba2020-02-23 16:42:51 -07006051 kfree((void *)(unsigned long)req->rw.addr);
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03006052 break;
6053 case IORING_OP_RECVMSG:
6054 case IORING_OP_RECV:
Jens Axboe52de1fe2020-02-27 10:15:42 -07006055 kfree(req->sr_msg.kbuf);
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03006056 break;
6057 }
6058 req->flags &= ~REQ_F_BUFFER_SELECTED;
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03006059 }
6060
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03006061 if (req->flags & REQ_F_NEED_CLEANUP) {
6062 switch (req->opcode) {
6063 case IORING_OP_READV:
6064 case IORING_OP_READ_FIXED:
6065 case IORING_OP_READ:
6066 case IORING_OP_WRITEV:
6067 case IORING_OP_WRITE_FIXED:
Jens Axboee8c2bc12020-08-15 18:44:09 -07006068 case IORING_OP_WRITE: {
6069 struct io_async_rw *io = req->async_data;
6070 if (io->free_iovec)
6071 kfree(io->free_iovec);
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03006072 break;
Jens Axboee8c2bc12020-08-15 18:44:09 -07006073 }
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03006074 case IORING_OP_RECVMSG:
Jens Axboee8c2bc12020-08-15 18:44:09 -07006075 case IORING_OP_SENDMSG: {
6076 struct io_async_msghdr *io = req->async_data;
Pavel Begunkov257e84a2021-02-05 00:58:00 +00006077
6078 kfree(io->free_iov);
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03006079 break;
Jens Axboee8c2bc12020-08-15 18:44:09 -07006080 }
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03006081 case IORING_OP_SPLICE:
6082 case IORING_OP_TEE:
Pavel Begunkove1d767f2021-03-19 17:22:43 +00006083 if (!(req->splice.flags & SPLICE_F_FD_IN_FIXED))
6084 io_put_file(req->splice.file_in);
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03006085 break;
Jens Axboef3cd48502020-09-24 14:55:54 -06006086 case IORING_OP_OPENAT:
6087 case IORING_OP_OPENAT2:
6088 if (req->open.filename)
6089 putname(req->open.filename);
6090 break;
Jens Axboe80a261f2020-09-28 14:23:58 -06006091 case IORING_OP_RENAMEAT:
6092 putname(req->rename.oldpath);
6093 putname(req->rename.newpath);
6094 break;
Jens Axboe14a11432020-09-28 14:27:37 -06006095 case IORING_OP_UNLINKAT:
6096 putname(req->unlink.filename);
6097 break;
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03006098 }
6099 req->flags &= ~REQ_F_NEED_CLEANUP;
6100 }
Jens Axboe75652a302021-04-15 09:52:40 -06006101 if ((req->flags & REQ_F_POLLED) && req->apoll) {
6102 kfree(req->apoll->double_poll);
6103 kfree(req->apoll);
6104 req->apoll = NULL;
6105 }
Pavel Begunkov3a0a6902021-04-20 12:03:31 +01006106 if (req->flags & REQ_F_INFLIGHT) {
6107 struct io_uring_task *tctx = req->task->io_uring;
6108
6109 atomic_dec(&tctx->inflight_tracked);
6110 req->flags &= ~REQ_F_INFLIGHT;
6111 }
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03006112}
6113
Pavel Begunkov889fca72021-02-10 00:03:09 +00006114static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboeedafcce2019-01-09 09:16:05 -07006115{
Jens Axboeedafcce2019-01-09 09:16:05 -07006116 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe5730b272021-02-27 15:57:30 -07006117 const struct cred *creds = NULL;
Jens Axboed625c6e2019-12-17 19:53:05 -07006118 int ret;
Jens Axboeedafcce2019-01-09 09:16:05 -07006119
Jens Axboe003e8dc2021-03-06 09:22:27 -07006120 if (req->work.creds && req->work.creds != current_cred())
6121 creds = override_creds(req->work.creds);
Jens Axboe5730b272021-02-27 15:57:30 -07006122
Jens Axboed625c6e2019-12-17 19:53:05 -07006123 switch (req->opcode) {
Jens Axboe2b188cc2019-01-07 10:46:33 -07006124 case IORING_OP_NOP:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006125 ret = io_nop(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006126 break;
Jens Axboe2b188cc2019-01-07 10:46:33 -07006127 case IORING_OP_READV:
Jens Axboe3529d8c2019-12-19 18:24:38 -07006128 case IORING_OP_READ_FIXED:
Jens Axboe3a6820f2019-12-22 15:19:35 -07006129 case IORING_OP_READ:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006130 ret = io_read(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006131 break;
6132 case IORING_OP_WRITEV:
Jens Axboe2b188cc2019-01-07 10:46:33 -07006133 case IORING_OP_WRITE_FIXED:
Jens Axboe3a6820f2019-12-22 15:19:35 -07006134 case IORING_OP_WRITE:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006135 ret = io_write(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006136 break;
6137 case IORING_OP_FSYNC:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006138 ret = io_fsync(req, issue_flags);
Jackie Liuba5290c2019-10-09 09:19:59 +08006139 break;
Jens Axboe2b188cc2019-01-07 10:46:33 -07006140 case IORING_OP_POLL_ADD:
Pavel Begunkov61e98202021-02-10 00:03:08 +00006141 ret = io_poll_add(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006142 break;
6143 case IORING_OP_POLL_REMOVE:
Pavel Begunkovc5de0032021-04-14 13:38:37 +01006144 ret = io_poll_update(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006145 break;
Jens Axboeb76da702019-11-20 13:05:32 -07006146 case IORING_OP_SYNC_FILE_RANGE:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006147 ret = io_sync_file_range(req, issue_flags);
Jens Axboeb76da702019-11-20 13:05:32 -07006148 break;
6149 case IORING_OP_SENDMSG:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006150 ret = io_sendmsg(req, issue_flags);
Pavel Begunkov062d04d2020-10-10 18:34:12 +01006151 break;
Jens Axboefddafac2020-01-04 20:19:44 -07006152 case IORING_OP_SEND:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006153 ret = io_send(req, issue_flags);
Jens Axboeb76da702019-11-20 13:05:32 -07006154 break;
6155 case IORING_OP_RECVMSG:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006156 ret = io_recvmsg(req, issue_flags);
Pavel Begunkov062d04d2020-10-10 18:34:12 +01006157 break;
Jens Axboefddafac2020-01-04 20:19:44 -07006158 case IORING_OP_RECV:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006159 ret = io_recv(req, issue_flags);
Jens Axboeb76da702019-11-20 13:05:32 -07006160 break;
Jens Axboe561fb042019-10-24 07:25:42 -06006161 case IORING_OP_TIMEOUT:
Pavel Begunkov61e98202021-02-10 00:03:08 +00006162 ret = io_timeout(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006163 break;
6164 case IORING_OP_TIMEOUT_REMOVE:
Pavel Begunkov61e98202021-02-10 00:03:08 +00006165 ret = io_timeout_remove(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006166 break;
6167 case IORING_OP_ACCEPT:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006168 ret = io_accept(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006169 break;
6170 case IORING_OP_CONNECT:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006171 ret = io_connect(req, issue_flags);
Jens Axboe31b51512019-01-18 22:56:34 -07006172 break;
6173 case IORING_OP_ASYNC_CANCEL:
Pavel Begunkov61e98202021-02-10 00:03:08 +00006174 ret = io_async_cancel(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006175 break;
Jens Axboed63d1b52019-12-10 10:38:56 -07006176 case IORING_OP_FALLOCATE:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006177 ret = io_fallocate(req, issue_flags);
Jens Axboed63d1b52019-12-10 10:38:56 -07006178 break;
Jens Axboe15b71ab2019-12-11 11:20:36 -07006179 case IORING_OP_OPENAT:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006180 ret = io_openat(req, issue_flags);
Jens Axboe15b71ab2019-12-11 11:20:36 -07006181 break;
Jens Axboeb5dba592019-12-11 14:02:38 -07006182 case IORING_OP_CLOSE:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006183 ret = io_close(req, issue_flags);
Jens Axboeb5dba592019-12-11 14:02:38 -07006184 break;
Jens Axboe05f3fb32019-12-09 11:22:50 -07006185 case IORING_OP_FILES_UPDATE:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006186 ret = io_files_update(req, issue_flags);
Jens Axboe05f3fb32019-12-09 11:22:50 -07006187 break;
Jens Axboeeddc7ef2019-12-13 21:18:10 -07006188 case IORING_OP_STATX:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006189 ret = io_statx(req, issue_flags);
Jens Axboeeddc7ef2019-12-13 21:18:10 -07006190 break;
Jens Axboe4840e412019-12-25 22:03:45 -07006191 case IORING_OP_FADVISE:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006192 ret = io_fadvise(req, issue_flags);
Jens Axboe4840e412019-12-25 22:03:45 -07006193 break;
Jens Axboec1ca7572019-12-25 22:18:28 -07006194 case IORING_OP_MADVISE:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006195 ret = io_madvise(req, issue_flags);
Jens Axboec1ca7572019-12-25 22:18:28 -07006196 break;
Jens Axboecebdb982020-01-08 17:59:24 -07006197 case IORING_OP_OPENAT2:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006198 ret = io_openat2(req, issue_flags);
Jens Axboecebdb982020-01-08 17:59:24 -07006199 break;
Jens Axboe3e4827b2020-01-08 15:18:09 -07006200 case IORING_OP_EPOLL_CTL:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006201 ret = io_epoll_ctl(req, issue_flags);
Jens Axboe3e4827b2020-01-08 15:18:09 -07006202 break;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03006203 case IORING_OP_SPLICE:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006204 ret = io_splice(req, issue_flags);
Pavel Begunkov7d67af22020-02-24 11:32:45 +03006205 break;
Jens Axboeddf0322d2020-02-23 16:41:33 -07006206 case IORING_OP_PROVIDE_BUFFERS:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006207 ret = io_provide_buffers(req, issue_flags);
Jens Axboeddf0322d2020-02-23 16:41:33 -07006208 break;
Jens Axboe067524e2020-03-02 16:32:28 -07006209 case IORING_OP_REMOVE_BUFFERS:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006210 ret = io_remove_buffers(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006211 break;
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03006212 case IORING_OP_TEE:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006213 ret = io_tee(req, issue_flags);
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03006214 break;
Jens Axboe36f4fa62020-09-05 11:14:22 -06006215 case IORING_OP_SHUTDOWN:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006216 ret = io_shutdown(req, issue_flags);
Jens Axboe36f4fa62020-09-05 11:14:22 -06006217 break;
Jens Axboe80a261f2020-09-28 14:23:58 -06006218 case IORING_OP_RENAMEAT:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006219 ret = io_renameat(req, issue_flags);
Jens Axboe80a261f2020-09-28 14:23:58 -06006220 break;
Jens Axboe14a11432020-09-28 14:27:37 -06006221 case IORING_OP_UNLINKAT:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006222 ret = io_unlinkat(req, issue_flags);
Jens Axboe14a11432020-09-28 14:27:37 -06006223 break;
Jens Axboe2b188cc2019-01-07 10:46:33 -07006224 default:
6225 ret = -EINVAL;
6226 break;
6227 }
Jens Axboe31b51512019-01-18 22:56:34 -07006228
Jens Axboe5730b272021-02-27 15:57:30 -07006229 if (creds)
6230 revert_creds(creds);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006231 if (ret)
6232 return ret;
Jens Axboeb5325762020-05-19 21:20:27 -06006233 /* If the op doesn't have a file, we're not polling for it */
Pavel Begunkovcb3d8972021-06-14 02:36:14 +01006234 if ((ctx->flags & IORING_SETUP_IOPOLL) && req->file)
6235 io_iopoll_req_issued(req);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006236
6237 return 0;
6238}
6239
Pavel Begunkov5280f7e2021-02-04 13:52:08 +00006240static void io_wq_submit_work(struct io_wq_work *work)
Pavel Begunkovd4c81f32020-06-08 21:08:19 +03006241{
Jens Axboe2b188cc2019-01-07 10:46:33 -07006242 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
Pavel Begunkov6df1db62020-07-03 22:15:06 +03006243 struct io_kiocb *timeout;
Jens Axboe561fb042019-10-24 07:25:42 -06006244 int ret = 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07006245
Pavel Begunkov6df1db62020-07-03 22:15:06 +03006246 timeout = io_prep_linked_timeout(req);
6247 if (timeout)
6248 io_queue_linked_timeout(timeout);
Pavel Begunkovd4c81f32020-06-08 21:08:19 +03006249
Jens Axboe4014d942021-01-19 15:53:54 -07006250 if (work->flags & IO_WQ_WORK_CANCEL)
Jens Axboe561fb042019-10-24 07:25:42 -06006251 ret = -ECANCELED;
Jens Axboe31b51512019-01-18 22:56:34 -07006252
Jens Axboe561fb042019-10-24 07:25:42 -06006253 if (!ret) {
Jens Axboe561fb042019-10-24 07:25:42 -06006254 do {
Pavel Begunkov889fca72021-02-10 00:03:09 +00006255 ret = io_issue_sqe(req, 0);
Jens Axboe561fb042019-10-24 07:25:42 -06006256 /*
6257 * We can get EAGAIN for polled IO even though we're
6258 * forcing a sync submission from here, since we can't
6259 * wait for request slots on the block side.
6260 */
6261 if (ret != -EAGAIN)
6262 break;
6263 cond_resched();
6264 } while (1);
6265 }
Jens Axboe31b51512019-01-18 22:56:34 -07006266
Pavel Begunkova3df76982021-02-18 22:32:52 +00006267 /* avoid locking problems by failing it from a clean context */
Jens Axboe561fb042019-10-24 07:25:42 -06006268 if (ret) {
Pavel Begunkova3df76982021-02-18 22:32:52 +00006269 /* io-wq is going to take one down */
Jens Axboede9b4cc2021-02-24 13:28:27 -07006270 req_ref_get(req);
Pavel Begunkova3df76982021-02-18 22:32:52 +00006271 io_req_task_queue_fail(req, ret);
Jens Axboeedafcce2019-01-09 09:16:05 -07006272 }
Jens Axboe31b51512019-01-18 22:56:34 -07006273}
Jens Axboe2b188cc2019-01-07 10:46:33 -07006274
Jens Axboe7b29f922021-03-12 08:30:14 -07006275#define FFS_ASYNC_READ 0x1UL
6276#define FFS_ASYNC_WRITE 0x2UL
6277#ifdef CONFIG_64BIT
6278#define FFS_ISREG 0x4UL
6279#else
6280#define FFS_ISREG 0x0UL
6281#endif
6282#define FFS_MASK ~(FFS_ASYNC_READ|FFS_ASYNC_WRITE|FFS_ISREG)
6283
Pavel Begunkovaeca2412021-04-11 01:46:37 +01006284static inline struct io_fixed_file *io_fixed_file_slot(struct io_file_table *table,
Pavel Begunkova04b0ac2021-04-01 15:44:04 +01006285 unsigned i)
Jens Axboe09bb8392019-03-13 12:39:28 -06006286{
Pavel Begunkovaeca2412021-04-11 01:46:37 +01006287 struct io_fixed_file *table_l2;
Jens Axboe65e19f52019-10-26 07:20:21 -06006288
Pavel Begunkovaeca2412021-04-11 01:46:37 +01006289 table_l2 = table->files[i >> IORING_FILE_TABLE_SHIFT];
6290 return &table_l2[i & IORING_FILE_TABLE_MASK];
Pavel Begunkovdafecf12021-02-28 22:35:11 +00006291}
6292
Jens Axboe09bb8392019-03-13 12:39:28 -06006293static inline struct file *io_file_from_index(struct io_ring_ctx *ctx,
6294 int index)
6295{
Pavel Begunkovaeca2412021-04-11 01:46:37 +01006296 struct io_fixed_file *slot = io_fixed_file_slot(&ctx->file_table, index);
Jens Axboe65e19f52019-10-26 07:20:21 -06006297
Pavel Begunkova04b0ac2021-04-01 15:44:04 +01006298 return (struct file *) (slot->file_ptr & FFS_MASK);
Jens Axboe65e19f52019-10-26 07:20:21 -06006299}
6300
Pavel Begunkova04b0ac2021-04-01 15:44:04 +01006301static void io_fixed_file_set(struct io_fixed_file *file_slot, struct file *file)
Pavel Begunkov9a321c92021-04-01 15:44:01 +01006302{
6303 unsigned long file_ptr = (unsigned long) file;
6304
6305 if (__io_file_supports_async(file, READ))
6306 file_ptr |= FFS_ASYNC_READ;
6307 if (__io_file_supports_async(file, WRITE))
6308 file_ptr |= FFS_ASYNC_WRITE;
6309 if (S_ISREG(file_inode(file)->i_mode))
6310 file_ptr |= FFS_ISREG;
Pavel Begunkova04b0ac2021-04-01 15:44:04 +01006311 file_slot->file_ptr = file_ptr;
Jens Axboe09bb8392019-03-13 12:39:28 -06006312}
6313
Pavel Begunkov8371adf2020-10-10 18:34:08 +01006314static struct file *io_file_get(struct io_submit_state *state,
6315 struct io_kiocb *req, int fd, bool fixed)
Pavel Begunkov8da11c12020-02-24 11:32:44 +03006316{
6317 struct io_ring_ctx *ctx = req->ctx;
6318 struct file *file;
6319
6320 if (fixed) {
Jens Axboe7b29f922021-03-12 08:30:14 -07006321 unsigned long file_ptr;
6322
Pavel Begunkov479f5172020-10-10 18:34:07 +01006323 if (unlikely((unsigned int)fd >= ctx->nr_user_files))
Pavel Begunkov8371adf2020-10-10 18:34:08 +01006324 return NULL;
Pavel Begunkov8da11c12020-02-24 11:32:44 +03006325 fd = array_index_nospec(fd, ctx->nr_user_files);
Pavel Begunkovaeca2412021-04-11 01:46:37 +01006326 file_ptr = io_fixed_file_slot(&ctx->file_table, fd)->file_ptr;
Jens Axboe7b29f922021-03-12 08:30:14 -07006327 file = (struct file *) (file_ptr & FFS_MASK);
6328 file_ptr &= ~FFS_MASK;
6329 /* mask in overlapping REQ_F and FFS bits */
6330 req->flags |= (file_ptr << REQ_F_ASYNC_READ_BIT);
Pavel Begunkovb895c9a2021-04-01 15:43:40 +01006331 io_req_set_rsrc_node(req);
Pavel Begunkov8da11c12020-02-24 11:32:44 +03006332 } else {
6333 trace_io_uring_file_get(ctx, fd);
6334 file = __io_file_get(state, fd);
Jens Axboed44f5542021-03-12 08:27:05 -07006335
6336 /* we don't allow fixed io_uring files */
6337 if (file && unlikely(file->f_op == &io_uring_fops))
6338 io_req_track_inflight(req);
Pavel Begunkov8da11c12020-02-24 11:32:44 +03006339 }
6340
Pavel Begunkov8371adf2020-10-10 18:34:08 +01006341 return file;
Pavel Begunkov8da11c12020-02-24 11:32:44 +03006342}
6343
Jens Axboe2665abf2019-11-05 12:40:47 -07006344static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
6345{
Jens Axboead8a48a2019-11-15 08:49:11 -07006346 struct io_timeout_data *data = container_of(timer,
6347 struct io_timeout_data, timer);
Pavel Begunkov90cd7e42020-10-27 23:25:36 +00006348 struct io_kiocb *prev, *req = data->req;
Jens Axboe2665abf2019-11-05 12:40:47 -07006349 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe2665abf2019-11-05 12:40:47 -07006350 unsigned long flags;
Jens Axboe2665abf2019-11-05 12:40:47 -07006351
6352 spin_lock_irqsave(&ctx->completion_lock, flags);
Pavel Begunkov90cd7e42020-10-27 23:25:36 +00006353 prev = req->timeout.head;
6354 req->timeout.head = NULL;
Jens Axboe2665abf2019-11-05 12:40:47 -07006355
6356 /*
6357 * We don't expect the list to be empty, that will only happen if we
6358 * race with the completion of the linked work.
6359 */
Pavel Begunkov447c19f2021-05-14 12:02:50 +01006360 if (prev) {
Pavel Begunkovf2f87372020-10-27 23:25:37 +00006361 io_remove_next_linked(prev);
Pavel Begunkov447c19f2021-05-14 12:02:50 +01006362 if (!req_ref_inc_not_zero(prev))
6363 prev = NULL;
6364 }
Jens Axboe2665abf2019-11-05 12:40:47 -07006365 spin_unlock_irqrestore(&ctx->completion_lock, flags);
6366
6367 if (prev) {
Pavel Begunkov014db002020-03-03 21:33:12 +03006368 io_async_find_and_cancel(ctx, req, prev->user_data, -ETIME);
Pavel Begunkov9ae1f8d2021-02-01 18:59:51 +00006369 io_put_req_deferred(prev, 1);
Pavel Begunkova2982322021-05-07 21:06:38 +01006370 io_put_req_deferred(req, 1);
Jens Axboe47f46762019-11-09 17:43:02 -07006371 } else {
Pavel Begunkov9ae1f8d2021-02-01 18:59:51 +00006372 io_req_complete_post(req, -ETIME, 0);
Jens Axboe2665abf2019-11-05 12:40:47 -07006373 }
Jens Axboe2665abf2019-11-05 12:40:47 -07006374 return HRTIMER_NORESTART;
6375}
6376
Pavel Begunkovde968c12021-03-19 17:22:33 +00006377static void io_queue_linked_timeout(struct io_kiocb *req)
Jens Axboe2665abf2019-11-05 12:40:47 -07006378{
Pavel Begunkovde968c12021-03-19 17:22:33 +00006379 struct io_ring_ctx *ctx = req->ctx;
6380
6381 spin_lock_irq(&ctx->completion_lock);
Jens Axboe76a46e02019-11-10 23:34:16 -07006382 /*
Pavel Begunkovf2f87372020-10-27 23:25:37 +00006383 * If the back reference is NULL, then our linked request finished
6384 * before we got a chance to setup the timer
Jens Axboe76a46e02019-11-10 23:34:16 -07006385 */
Pavel Begunkov90cd7e42020-10-27 23:25:36 +00006386 if (req->timeout.head) {
Jens Axboee8c2bc12020-08-15 18:44:09 -07006387 struct io_timeout_data *data = req->async_data;
Jens Axboe94ae5e72019-11-14 19:39:52 -07006388
Jens Axboead8a48a2019-11-15 08:49:11 -07006389 data->timer.function = io_link_timeout_fn;
6390 hrtimer_start(&data->timer, timespec64_to_ktime(data->ts),
6391 data->mode);
Jens Axboe2665abf2019-11-05 12:40:47 -07006392 }
Jens Axboe76a46e02019-11-10 23:34:16 -07006393 spin_unlock_irq(&ctx->completion_lock);
Jens Axboe2665abf2019-11-05 12:40:47 -07006394 /* drop submission reference */
Jens Axboe76a46e02019-11-10 23:34:16 -07006395 io_put_req(req);
Jens Axboe2665abf2019-11-05 12:40:47 -07006396}
6397
Jens Axboead8a48a2019-11-15 08:49:11 -07006398static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req)
Jens Axboe2665abf2019-11-05 12:40:47 -07006399{
Pavel Begunkovf2f87372020-10-27 23:25:37 +00006400 struct io_kiocb *nxt = req->link;
Jens Axboe2b188cc2019-01-07 10:46:33 -07006401
Pavel Begunkovf2f87372020-10-27 23:25:37 +00006402 if (!nxt || (req->flags & REQ_F_LINK_TIMEOUT) ||
6403 nxt->opcode != IORING_OP_LINK_TIMEOUT)
Jens Axboed7718a92020-02-14 22:23:12 -07006404 return NULL;
Jens Axboe2665abf2019-11-05 12:40:47 -07006405
Pavel Begunkov90cd7e42020-10-27 23:25:36 +00006406 nxt->timeout.head = req;
Pavel Begunkov900fad42020-10-19 16:39:16 +01006407 nxt->flags |= REQ_F_LTIMEOUT_ACTIVE;
Jens Axboe76a46e02019-11-10 23:34:16 -07006408 req->flags |= REQ_F_LINK_TIMEOUT;
Jens Axboe76a46e02019-11-10 23:34:16 -07006409 return nxt;
Jens Axboe2665abf2019-11-05 12:40:47 -07006410}
6411
Pavel Begunkovc5eef2b2021-02-10 00:03:22 +00006412static void __io_queue_sqe(struct io_kiocb *req)
Jens Axboe2b188cc2019-01-07 10:46:33 -07006413{
Pavel Begunkovd3d72982021-02-12 03:23:51 +00006414 struct io_kiocb *linked_timeout = io_prep_linked_timeout(req);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006415 int ret;
6416
Pavel Begunkovc5eef2b2021-02-10 00:03:22 +00006417 ret = io_issue_sqe(req, IO_URING_F_NONBLOCK|IO_URING_F_COMPLETE_DEFER);
Jens Axboe491381ce2019-10-17 09:20:46 -06006418
6419 /*
6420 * We async punt it if the file wasn't marked NOWAIT, or if the file
6421 * doesn't support non-blocking read/write attempts
6422 */
Pavel Begunkov18400382021-03-19 17:22:34 +00006423 if (likely(!ret)) {
Pavel Begunkov0d63c142020-10-22 16:47:18 +01006424 /* drop submission reference */
Pavel Begunkove342c802021-01-19 13:32:47 +00006425 if (req->flags & REQ_F_COMPLETE_INLINE) {
Pavel Begunkovc5eef2b2021-02-10 00:03:22 +00006426 struct io_ring_ctx *ctx = req->ctx;
6427 struct io_comp_state *cs = &ctx->submit_state.comp;
Jens Axboee65ef562019-03-12 10:16:44 -06006428
Pavel Begunkov6dd0be12021-02-10 00:03:13 +00006429 cs->reqs[cs->nr++] = req;
Pavel Begunkovd3d72982021-02-12 03:23:51 +00006430 if (cs->nr == ARRAY_SIZE(cs->reqs))
Pavel Begunkovc5eef2b2021-02-10 00:03:22 +00006431 io_submit_flush_completions(cs, ctx);
Pavel Begunkov9affd662021-01-19 13:32:46 +00006432 } else {
Pavel Begunkovd3d72982021-02-12 03:23:51 +00006433 io_put_req(req);
Pavel Begunkov0d63c142020-10-22 16:47:18 +01006434 }
Pavel Begunkov18400382021-03-19 17:22:34 +00006435 } else if (ret == -EAGAIN && !(req->flags & REQ_F_NOWAIT)) {
6436 if (!io_arm_poll_handler(req)) {
6437 /*
6438 * Queued up for async execution, worker will release
6439 * submit reference when the iocb is actually submitted.
6440 */
6441 io_queue_async_work(req);
6442 }
Pavel Begunkov0d63c142020-10-22 16:47:18 +01006443 } else {
Pavel Begunkovf41db2732021-02-28 22:35:12 +00006444 io_req_complete_failed(req, ret);
Jens Axboe9e645e112019-05-10 16:07:28 -06006445 }
Pavel Begunkovd3d72982021-02-12 03:23:51 +00006446 if (linked_timeout)
6447 io_queue_linked_timeout(linked_timeout);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006448}
6449
Pavel Begunkovbe7053b2021-02-18 18:29:45 +00006450static void io_queue_sqe(struct io_kiocb *req)
Jackie Liu4fe2c962019-09-09 20:50:40 +08006451{
6452 int ret;
6453
Pavel Begunkovbe7053b2021-02-18 18:29:45 +00006454 ret = io_req_defer(req);
Jackie Liu4fe2c962019-09-09 20:50:40 +08006455 if (ret) {
6456 if (ret != -EIOCBQUEUED) {
Pavel Begunkov11185912020-01-22 23:09:35 +03006457fail_req:
Pavel Begunkovf41db2732021-02-28 22:35:12 +00006458 io_req_complete_failed(req, ret);
Jackie Liu4fe2c962019-09-09 20:50:40 +08006459 }
Pavel Begunkov25508782019-12-30 21:24:47 +03006460 } else if (req->flags & REQ_F_FORCE_ASYNC) {
Pavel Begunkovb7e298d2021-02-28 22:35:19 +00006461 ret = io_req_prep_async(req);
Pavel Begunkovbe7053b2021-02-18 18:29:45 +00006462 if (unlikely(ret))
6463 goto fail_req;
Jens Axboece35a472019-12-17 08:04:44 -07006464 io_queue_async_work(req);
6465 } else {
Pavel Begunkovc5eef2b2021-02-10 00:03:22 +00006466 __io_queue_sqe(req);
Jens Axboece35a472019-12-17 08:04:44 -07006467 }
Jackie Liu4fe2c962019-09-09 20:50:40 +08006468}
6469
Stefano Garzarella21b55db2020-08-27 16:58:30 +02006470/*
6471 * Check SQE restrictions (opcode and flags).
6472 *
6473 * Returns 'true' if SQE is allowed, 'false' otherwise.
6474 */
6475static inline bool io_check_restriction(struct io_ring_ctx *ctx,
6476 struct io_kiocb *req,
6477 unsigned int sqe_flags)
6478{
6479 if (!ctx->restricted)
6480 return true;
6481
6482 if (!test_bit(req->opcode, ctx->restrictions.sqe_op))
6483 return false;
6484
6485 if ((sqe_flags & ctx->restrictions.sqe_flags_required) !=
6486 ctx->restrictions.sqe_flags_required)
6487 return false;
6488
6489 if (sqe_flags & ~(ctx->restrictions.sqe_flags_allowed |
6490 ctx->restrictions.sqe_flags_required))
6491 return false;
6492
6493 return true;
6494}
6495
Pavel Begunkovef4ff582020-04-12 02:05:05 +03006496static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
Pavel Begunkov258b29a2021-02-10 00:03:10 +00006497 const struct io_uring_sqe *sqe)
Pavel Begunkov0553b8b2020-04-08 08:58:45 +03006498{
Pavel Begunkov258b29a2021-02-10 00:03:10 +00006499 struct io_submit_state *state;
Pavel Begunkovef4ff582020-04-12 02:05:05 +03006500 unsigned int sqe_flags;
Jens Axboe003e8dc2021-03-06 09:22:27 -07006501 int personality, ret = 0;
Pavel Begunkovef4ff582020-04-12 02:05:05 +03006502
Pavel Begunkov0553b8b2020-04-08 08:58:45 +03006503 req->opcode = READ_ONCE(sqe->opcode);
Pavel Begunkov5be9ad12021-02-12 18:41:17 +00006504 /* same numerical values with corresponding REQ_F_*, safe to copy */
6505 req->flags = sqe_flags = READ_ONCE(sqe->flags);
Pavel Begunkov0553b8b2020-04-08 08:58:45 +03006506 req->user_data = READ_ONCE(sqe->user_data);
Jens Axboee8c2bc12020-08-15 18:44:09 -07006507 req->async_data = NULL;
Pavel Begunkov0553b8b2020-04-08 08:58:45 +03006508 req->file = NULL;
6509 req->ctx = ctx;
Pavel Begunkovf2f87372020-10-27 23:25:37 +00006510 req->link = NULL;
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00006511 req->fixed_rsrc_refs = NULL;
Pavel Begunkov0553b8b2020-04-08 08:58:45 +03006512 /* one is dropped after submission, the other at completion */
Jens Axboeabc54d62021-02-24 13:32:30 -07006513 atomic_set(&req->refs, 2);
Pavel Begunkov4dd28242020-06-15 10:33:13 +03006514 req->task = current;
Pavel Begunkov0553b8b2020-04-08 08:58:45 +03006515 req->result = 0;
Jens Axboe93e68e02021-03-09 07:02:21 -07006516 req->work.creds = NULL;
Pavel Begunkovef4ff582020-04-12 02:05:05 +03006517
Pavel Begunkov5be9ad12021-02-12 18:41:17 +00006518 /* enforce forwards compatibility on users */
Pavel Begunkovdddca222021-04-27 16:13:52 +01006519 if (unlikely(sqe_flags & ~SQE_VALID_FLAGS))
Pavel Begunkov5be9ad12021-02-12 18:41:17 +00006520 return -EINVAL;
Pavel Begunkovef4ff582020-04-12 02:05:05 +03006521 if (unlikely(req->opcode >= IORING_OP_LAST))
6522 return -EINVAL;
Stefano Garzarella21b55db2020-08-27 16:58:30 +02006523 if (unlikely(!io_check_restriction(ctx, req, sqe_flags)))
6524 return -EACCES;
6525
Pavel Begunkovef4ff582020-04-12 02:05:05 +03006526 if ((sqe_flags & IOSQE_BUFFER_SELECT) &&
6527 !io_op_defs[req->opcode].buffer_select)
6528 return -EOPNOTSUPP;
6529
Jens Axboe003e8dc2021-03-06 09:22:27 -07006530 personality = READ_ONCE(sqe->personality);
6531 if (personality) {
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +00006532 req->work.creds = xa_load(&ctx->personalities, personality);
Jens Axboe003e8dc2021-03-06 09:22:27 -07006533 if (!req->work.creds)
6534 return -EINVAL;
6535 get_cred(req->work.creds);
Jens Axboe003e8dc2021-03-06 09:22:27 -07006536 }
Pavel Begunkov258b29a2021-02-10 00:03:10 +00006537 state = &ctx->submit_state;
Pavel Begunkovef4ff582020-04-12 02:05:05 +03006538
Jens Axboe27926b62020-10-28 09:33:23 -06006539 /*
6540 * Plug now if we have more than 1 IO left after this, and the target
6541 * is potentially a read/write to block based storage.
6542 */
6543 if (!state->plug_started && state->ios_left > 1 &&
6544 io_op_defs[req->opcode].plug) {
6545 blk_start_plug(&state->plug);
6546 state->plug_started = true;
6547 }
Jens Axboe63ff8222020-05-07 14:56:15 -06006548
Pavel Begunkovbd5bbda2020-11-20 15:50:51 +00006549 if (io_op_defs[req->opcode].needs_file) {
6550 bool fixed = req->flags & REQ_F_FIXED_FILE;
Jens Axboe63ff8222020-05-07 14:56:15 -06006551
Pavel Begunkovbd5bbda2020-11-20 15:50:51 +00006552 req->file = io_file_get(state, req, READ_ONCE(sqe->fd), fixed);
Pavel Begunkovba13e232021-02-01 18:59:52 +00006553 if (unlikely(!req->file))
Pavel Begunkovbd5bbda2020-11-20 15:50:51 +00006554 ret = -EBADF;
6555 }
6556
Pavel Begunkov71b547c2020-10-10 18:34:09 +01006557 state->ios_left--;
6558 return ret;
Pavel Begunkov0553b8b2020-04-08 08:58:45 +03006559}
6560
Pavel Begunkova6b8cadc2021-02-18 18:29:41 +00006561static int io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
Pavel Begunkova1ab7b32021-02-18 18:29:42 +00006562 const struct io_uring_sqe *sqe)
Jens Axboe6c271ce2019-01-10 11:22:30 -07006563{
Pavel Begunkova1ab7b32021-02-18 18:29:42 +00006564 struct io_submit_link *link = &ctx->submit_state.link;
Jens Axboe6c271ce2019-01-10 11:22:30 -07006565 int ret;
6566
Pavel Begunkova6b8cadc2021-02-18 18:29:41 +00006567 ret = io_init_req(ctx, req, sqe);
6568 if (unlikely(ret)) {
6569fail_req:
Pavel Begunkovde59bc12021-02-18 18:29:47 +00006570 if (link->head) {
6571 /* fail even hard links since we don't submit */
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01006572 req_set_fail(link->head);
Pavel Begunkovf41db2732021-02-28 22:35:12 +00006573 io_req_complete_failed(link->head, -ECANCELED);
Pavel Begunkovde59bc12021-02-18 18:29:47 +00006574 link->head = NULL;
6575 }
Pavel Begunkovf41db2732021-02-28 22:35:12 +00006576 io_req_complete_failed(req, ret);
Pavel Begunkova6b8cadc2021-02-18 18:29:41 +00006577 return ret;
6578 }
Pavel Begunkovbe7053b2021-02-18 18:29:45 +00006579 ret = io_req_prep(req, sqe);
6580 if (unlikely(ret))
6581 goto fail_req;
Pavel Begunkova6b8cadc2021-02-18 18:29:41 +00006582
Pavel Begunkovbe7053b2021-02-18 18:29:45 +00006583 /* don't need @sqe from now on */
Pavel Begunkova6b8cadc2021-02-18 18:29:41 +00006584 trace_io_uring_submit_sqe(ctx, req->opcode, req->user_data,
6585 true, ctx->flags & IORING_SETUP_SQPOLL);
6586
Jens Axboe6c271ce2019-01-10 11:22:30 -07006587 /*
6588 * If we already have a head request, queue this one for async
6589 * submittal once the head completes. If we don't have a head but
6590 * IOSQE_IO_LINK is set in the sqe, start a new head. This one will be
6591 * submitted sync once the chain is complete. If none of those
6592 * conditions are true (normal request), then just queue it.
6593 */
6594 if (link->head) {
6595 struct io_kiocb *head = link->head;
6596
6597 /*
6598 * Taking sequential execution of a link, draining both sides
6599 * of the link also fullfils IOSQE_IO_DRAIN semantics for all
6600 * requests in the link. So, it drains the head and the
6601 * next after the link request. The last one is done via
6602 * drain_next flag to persist the effect across calls.
6603 */
6604 if (req->flags & REQ_F_IO_DRAIN) {
6605 head->flags |= REQ_F_IO_DRAIN;
6606 ctx->drain_next = 1;
6607 }
Pavel Begunkovb7e298d2021-02-28 22:35:19 +00006608 ret = io_req_prep_async(req);
Pavel Begunkovcf109602021-02-18 18:29:43 +00006609 if (unlikely(ret))
Pavel Begunkova6b8cadc2021-02-18 18:29:41 +00006610 goto fail_req;
Jens Axboe6c271ce2019-01-10 11:22:30 -07006611 trace_io_uring_link(ctx, req, head);
6612 link->last->link = req;
6613 link->last = req;
6614
6615 /* last request of a link, enqueue the link */
6616 if (!(req->flags & (REQ_F_LINK | REQ_F_HARDLINK))) {
Pavel Begunkovde59bc12021-02-18 18:29:47 +00006617 io_queue_sqe(head);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006618 link->head = NULL;
6619 }
Jackie Liu4fe2c962019-09-09 20:50:40 +08006620 } else {
6621 if (unlikely(ctx->drain_next)) {
6622 req->flags |= REQ_F_IO_DRAIN;
6623 ctx->drain_next = 0;
6624 }
6625 if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) {
Jackie Liu4fe2c962019-09-09 20:50:40 +08006626 link->head = req;
6627 link->last = req;
6628 } else {
Pavel Begunkovbe7053b2021-02-18 18:29:45 +00006629 io_queue_sqe(req);
Jackie Liu4fe2c962019-09-09 20:50:40 +08006630 }
6631 }
6632
6633 return 0;
6634}
6635
6636/*
6637 * Batched submission is done, ensure local IO is flushed out.
6638 */
6639static void io_submit_state_end(struct io_submit_state *state,
6640 struct io_ring_ctx *ctx)
Pavel Begunkov1b4a51b2019-11-21 11:54:28 +03006641{
Pavel Begunkova1ab7b32021-02-18 18:29:42 +00006642 if (state->link.head)
Pavel Begunkovde59bc12021-02-18 18:29:47 +00006643 io_queue_sqe(state->link.head);
Jens Axboe3529d8c2019-12-19 18:24:38 -07006644 if (state->comp.nr)
Jens Axboe9e645e112019-05-10 16:07:28 -06006645 io_submit_flush_completions(&state->comp, ctx);
Jackie Liua197f662019-11-08 08:09:12 -07006646 if (state->plug_started)
Pavel Begunkov32fe5252019-12-17 22:26:58 +03006647 blk_finish_plug(&state->plug);
Jens Axboe75c6a032020-01-28 10:15:23 -07006648 io_state_file_put(state);
Jens Axboe9e645e112019-05-10 16:07:28 -06006649}
Pavel Begunkov32fe5252019-12-17 22:26:58 +03006650
Jens Axboe9e645e112019-05-10 16:07:28 -06006651/*
6652 * Start submission side cache.
Pavel Begunkov32fe5252019-12-17 22:26:58 +03006653 */
Jens Axboe9e645e112019-05-10 16:07:28 -06006654static void io_submit_state_start(struct io_submit_state *state,
Pavel Begunkov196be952019-11-07 01:41:06 +03006655 unsigned int max_ios)
Jens Axboe9e645e112019-05-10 16:07:28 -06006656{
6657 state->plug_started = false;
Jens Axboebcda7ba2020-02-23 16:42:51 -07006658 state->ios_left = max_ios;
Pavel Begunkova1ab7b32021-02-18 18:29:42 +00006659 /* set only head, no need to init link_last in advance */
6660 state->link.head = NULL;
Jens Axboe75c6a032020-01-28 10:15:23 -07006661}
6662
Jens Axboe193155c2020-02-22 23:22:19 -07006663static void io_commit_sqring(struct io_ring_ctx *ctx)
6664{
Jens Axboe75c6a032020-01-28 10:15:23 -07006665 struct io_rings *rings = ctx->rings;
6666
6667 /*
Jens Axboe193155c2020-02-22 23:22:19 -07006668 * Ensure any loads from the SQEs are done at this point,
Jens Axboe75c6a032020-01-28 10:15:23 -07006669 * since once we write the new head, the application could
6670 * write new data to them.
Pavel Begunkov6b47ee62020-01-18 20:22:41 +03006671 */
Pavel Begunkov8da11c12020-02-24 11:32:44 +03006672 smp_store_release(&rings->sq.head, ctx->cached_sq_head);
Jens Axboebcda7ba2020-02-23 16:42:51 -07006673}
6674
Jens Axboe9e645e112019-05-10 16:07:28 -06006675/*
Jens Axboe3529d8c2019-12-19 18:24:38 -07006676 * Fetch an sqe, if one is available. Note that sqe_ptr will point to memory
Jens Axboe9e645e112019-05-10 16:07:28 -06006677 * that is mapped by userspace. This means that care needs to be taken to
6678 * ensure that reads are stable, as we cannot rely on userspace always
Jens Axboe78e19bb2019-11-06 15:21:34 -07006679 * being a good citizen. If members of the sqe are validated and then later
6680 * used, it's important that those reads are done through READ_ONCE() to
Pavel Begunkov2e6e1fd2019-12-05 16:15:45 +03006681 * prevent a re-load down the line.
Jens Axboe9e645e112019-05-10 16:07:28 -06006682 */
6683static const struct io_uring_sqe *io_get_sqe(struct io_ring_ctx *ctx)
Jens Axboe9e645e112019-05-10 16:07:28 -06006684{
Pavel Begunkovea5ab3b2021-05-16 22:58:09 +01006685 unsigned head, mask = ctx->sq_entries - 1;
Pavel Begunkov17d3aeb2021-06-14 23:37:23 +01006686 unsigned sq_idx = ctx->cached_sq_head++ & mask;
Jens Axboe9e645e112019-05-10 16:07:28 -06006687
6688 /*
6689 * The cached sq head (or cq tail) serves two purposes:
6690 *
6691 * 1) allows us to batch the cost of updating the user visible
Pavel Begunkov9d763772019-12-17 02:22:07 +03006692 * head updates.
Jens Axboe9e645e112019-05-10 16:07:28 -06006693 * 2) allows the kernel side to track the head on its own, even
Pavel Begunkov8cdf2192020-01-25 00:40:24 +03006694 * though the application is the one updating it.
6695 */
Pavel Begunkov17d3aeb2021-06-14 23:37:23 +01006696 head = READ_ONCE(ctx->sq_array[sq_idx]);
Pavel Begunkov8cdf2192020-01-25 00:40:24 +03006697 if (likely(head < ctx->sq_entries))
6698 return &ctx->sq_sqes[head];
6699
6700 /* drop invalid entries */
Pavel Begunkov15641e42021-06-14 23:37:24 +01006701 ctx->cq_extra--;
6702 WRITE_ONCE(ctx->rings->sq_dropped,
6703 READ_ONCE(ctx->rings->sq_dropped) + 1);
Pavel Begunkov711be032020-01-17 03:57:59 +03006704 return NULL;
6705}
Jens Axboeb7bb4f72019-12-15 22:13:43 -07006706
Jens Axboe0f212202020-09-13 13:09:39 -06006707static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr)
Jens Axboe6c271ce2019-01-10 11:22:30 -07006708{
Pavel Begunkov09899b12021-06-14 02:36:22 +01006709 struct io_uring_task *tctx;
Pavel Begunkov46c4e162021-02-18 18:29:37 +00006710 int submitted = 0;
Jens Axboe6c271ce2019-01-10 11:22:30 -07006711
Pavel Begunkovee7d46d2019-12-30 21:24:45 +03006712 /* make sure SQ entry isn't read before tail */
6713 nr = min3(nr, ctx->sq_entries, io_sqring_entries(ctx));
Pavel Begunkov2b85edf2019-12-28 14:13:03 +03006714 if (!percpu_ref_tryget_many(&ctx->refs, nr))
6715 return -EAGAIN;
Jens Axboe6c271ce2019-01-10 11:22:30 -07006716
Pavel Begunkov09899b12021-06-14 02:36:22 +01006717 tctx = current->io_uring;
6718 tctx->cached_refs -= nr;
6719 if (unlikely(tctx->cached_refs < 0)) {
6720 unsigned int refill = -tctx->cached_refs + IO_TCTX_REFS_CACHE_NR;
6721
6722 percpu_counter_add(&tctx->inflight, refill);
6723 refcount_add(refill, &current->usage);
6724 tctx->cached_refs += refill;
6725 }
Pavel Begunkovba88ff12021-02-10 00:03:11 +00006726 io_submit_state_start(&ctx->submit_state, nr);
Pavel Begunkovb14cca02020-01-17 04:45:59 +03006727
Pavel Begunkov46c4e162021-02-18 18:29:37 +00006728 while (submitted < nr) {
Jens Axboe3529d8c2019-12-19 18:24:38 -07006729 const struct io_uring_sqe *sqe;
Pavel Begunkov196be952019-11-07 01:41:06 +03006730 struct io_kiocb *req;
Pavel Begunkovfb5ccc92019-10-25 12:31:30 +03006731
Pavel Begunkov258b29a2021-02-10 00:03:10 +00006732 req = io_alloc_req(ctx);
Pavel Begunkov196be952019-11-07 01:41:06 +03006733 if (unlikely(!req)) {
6734 if (!submitted)
6735 submitted = -EAGAIN;
Pavel Begunkovfb5ccc92019-10-25 12:31:30 +03006736 break;
Jens Axboe9e645e112019-05-10 16:07:28 -06006737 }
Pavel Begunkov4fccfcb2021-02-12 11:55:17 +00006738 sqe = io_get_sqe(ctx);
6739 if (unlikely(!sqe)) {
6740 kmem_cache_free(req_cachep, req);
6741 break;
6742 }
Jens Axboed3656342019-12-18 09:50:26 -07006743 /* will complete beyond this point, count as submitted */
6744 submitted++;
Pavel Begunkova1ab7b32021-02-18 18:29:42 +00006745 if (io_submit_sqe(ctx, req, sqe))
Jens Axboed3656342019-12-18 09:50:26 -07006746 break;
Jens Axboe6c271ce2019-01-10 11:22:30 -07006747 }
6748
Pavel Begunkov9466f432020-01-25 22:34:01 +03006749 if (unlikely(submitted != nr)) {
6750 int ref_used = (submitted == -EAGAIN) ? 0 : submitted;
Jens Axboed8a6df12020-10-15 16:24:45 -06006751 int unused = nr - ref_used;
Pavel Begunkov9466f432020-01-25 22:34:01 +03006752
Pavel Begunkov09899b12021-06-14 02:36:22 +01006753 current->io_uring->cached_refs += unused;
Jens Axboed8a6df12020-10-15 16:24:45 -06006754 percpu_ref_put_many(&ctx->refs, unused);
Pavel Begunkov9466f432020-01-25 22:34:01 +03006755 }
Jens Axboe6c271ce2019-01-10 11:22:30 -07006756
Pavel Begunkova1ab7b32021-02-18 18:29:42 +00006757 io_submit_state_end(&ctx->submit_state, ctx);
Pavel Begunkovae9428c2019-11-06 00:22:14 +03006758 /* Commit SQ ring head once we've consumed and submitted all SQEs */
6759 io_commit_sqring(ctx);
6760
Jens Axboe6c271ce2019-01-10 11:22:30 -07006761 return submitted;
6762}
6763
Pavel Begunkove4b6d902021-05-16 22:58:00 +01006764static inline bool io_sqd_events_pending(struct io_sq_data *sqd)
6765{
6766 return READ_ONCE(sqd->state);
6767}
6768
Xiaoguang Wang23b36282020-07-23 20:57:24 +08006769static inline void io_ring_set_wakeup_flag(struct io_ring_ctx *ctx)
6770{
6771 /* Tell userspace we may need a wakeup call */
6772 spin_lock_irq(&ctx->completion_lock);
6773 ctx->rings->sq_flags |= IORING_SQ_NEED_WAKEUP;
6774 spin_unlock_irq(&ctx->completion_lock);
6775}
6776
6777static inline void io_ring_clear_wakeup_flag(struct io_ring_ctx *ctx)
6778{
6779 spin_lock_irq(&ctx->completion_lock);
6780 ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP;
6781 spin_unlock_irq(&ctx->completion_lock);
6782}
6783
Xiaoguang Wang08369242020-11-03 14:15:59 +08006784static int __io_sq_thread(struct io_ring_ctx *ctx, bool cap_entries)
Jens Axboe6c271ce2019-01-10 11:22:30 -07006785{
Jens Axboec8d1ba52020-09-14 11:07:26 -06006786 unsigned int to_submit;
Xiaoguang Wangbdcd3ea2020-02-25 22:12:08 +08006787 int ret = 0;
Jens Axboe6c271ce2019-01-10 11:22:30 -07006788
Jens Axboec8d1ba52020-09-14 11:07:26 -06006789 to_submit = io_sqring_entries(ctx);
Jens Axboee95eee22020-09-08 09:11:32 -06006790 /* if we're handling multiple rings, cap submit size for fairness */
6791 if (cap_entries && to_submit > 8)
6792 to_submit = 8;
6793
Xiaoguang Wang906a3c62020-11-12 14:56:00 +08006794 if (!list_empty(&ctx->iopoll_list) || to_submit) {
6795 unsigned nr_events = 0;
6796
Xiaoguang Wang08369242020-11-03 14:15:59 +08006797 mutex_lock(&ctx->uring_lock);
Xiaoguang Wang906a3c62020-11-12 14:56:00 +08006798 if (!list_empty(&ctx->iopoll_list))
6799 io_do_iopoll(ctx, &nr_events, 0);
6800
Pavel Begunkov3b763ba2021-04-18 14:52:08 +01006801 /*
6802 * Don't submit if refs are dying, good for io_uring_register(),
6803 * but also it is relied upon by io_ring_exit_work()
6804 */
Pavel Begunkov0298ef92021-03-08 13:20:57 +00006805 if (to_submit && likely(!percpu_ref_is_dying(&ctx->refs)) &&
6806 !(ctx->flags & IORING_SETUP_R_DISABLED))
Xiaoguang Wang08369242020-11-03 14:15:59 +08006807 ret = io_submit_sqes(ctx, to_submit);
6808 mutex_unlock(&ctx->uring_lock);
Jens Axboe90554202020-09-03 12:12:41 -06006809
Pavel Begunkovacfb3812021-05-16 22:58:03 +01006810 if (to_submit && wq_has_sleeper(&ctx->sqo_sq_wait))
6811 wake_up(&ctx->sqo_sq_wait);
6812 }
Jens Axboe90554202020-09-03 12:12:41 -06006813
Xiaoguang Wang08369242020-11-03 14:15:59 +08006814 return ret;
6815}
6816
6817static void io_sqd_update_thread_idle(struct io_sq_data *sqd)
6818{
6819 struct io_ring_ctx *ctx;
6820 unsigned sq_thread_idle = 0;
6821
Pavel Begunkovc9dca272021-03-10 13:13:55 +00006822 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
6823 sq_thread_idle = max(sq_thread_idle, ctx->sq_thread_idle);
Xiaoguang Wang08369242020-11-03 14:15:59 +08006824 sqd->sq_thread_idle = sq_thread_idle;
Jens Axboec8d1ba52020-09-14 11:07:26 -06006825}
6826
Pavel Begunkove4b6d902021-05-16 22:58:00 +01006827static bool io_sqd_handle_event(struct io_sq_data *sqd)
6828{
6829 bool did_sig = false;
6830 struct ksignal ksig;
6831
6832 if (test_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state) ||
6833 signal_pending(current)) {
6834 mutex_unlock(&sqd->lock);
6835 if (signal_pending(current))
6836 did_sig = get_signal(&ksig);
6837 cond_resched();
6838 mutex_lock(&sqd->lock);
6839 }
6840 io_run_task_work();
Pavel Begunkove4b6d902021-05-16 22:58:00 +01006841 return did_sig || test_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state);
6842}
6843
Jens Axboe6c271ce2019-01-10 11:22:30 -07006844static int io_sq_thread(void *data)
6845{
Jens Axboe69fb2132020-09-14 11:16:23 -06006846 struct io_sq_data *sqd = data;
6847 struct io_ring_ctx *ctx;
Xiaoguang Wanga0d92052020-11-12 14:55:59 +08006848 unsigned long timeout = 0;
Jens Axboe37d1e2e2021-02-17 21:03:43 -07006849 char buf[TASK_COMM_LEN];
Xiaoguang Wang08369242020-11-03 14:15:59 +08006850 DEFINE_WAIT(wait);
Jens Axboe6c271ce2019-01-10 11:22:30 -07006851
Pavel Begunkov696ee882021-04-01 09:55:04 +01006852 snprintf(buf, sizeof(buf), "iou-sqp-%d", sqd->task_pid);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07006853 set_task_comm(current, buf);
Jens Axboe28cea78a2020-09-14 10:51:17 -06006854
Jens Axboe37d1e2e2021-02-17 21:03:43 -07006855 if (sqd->sq_cpu != -1)
6856 set_cpus_allowed_ptr(current, cpumask_of(sqd->sq_cpu));
6857 else
6858 set_cpus_allowed_ptr(current, cpu_online_mask);
6859 current->flags |= PF_NO_SETAFFINITY;
6860
Pavel Begunkov09a6f4e2021-03-14 20:57:10 +00006861 mutex_lock(&sqd->lock);
Pavel Begunkove4b6d902021-05-16 22:58:00 +01006862 while (1) {
Xiaoguang Wang08369242020-11-03 14:15:59 +08006863 int ret;
6864 bool cap_entries, sqt_spin, needs_sched;
Jens Axboec1edbf52019-11-10 16:56:04 -07006865
Pavel Begunkove4b6d902021-05-16 22:58:00 +01006866 if (io_sqd_events_pending(sqd) || signal_pending(current)) {
6867 if (io_sqd_handle_event(sqd))
Pavel Begunkovc7d95612021-04-13 11:43:00 +01006868 break;
Xiaoguang Wang08369242020-11-03 14:15:59 +08006869 timeout = jiffies + sqd->sq_thread_idle;
Pavel Begunkov7d41e852021-03-10 13:13:54 +00006870 continue;
Xiaoguang Wang08369242020-11-03 14:15:59 +08006871 }
Pavel Begunkove4b6d902021-05-16 22:58:00 +01006872
Xiaoguang Wang08369242020-11-03 14:15:59 +08006873 sqt_spin = false;
Jens Axboee95eee22020-09-08 09:11:32 -06006874 cap_entries = !list_is_singular(&sqd->ctx_list);
Jens Axboe69fb2132020-09-14 11:16:23 -06006875 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) {
Stefan Metzmacher7c30f36a2021-03-07 11:54:28 +01006876 const struct cred *creds = NULL;
6877
6878 if (ctx->sq_creds != current_cred())
6879 creds = override_creds(ctx->sq_creds);
Xiaoguang Wang08369242020-11-03 14:15:59 +08006880 ret = __io_sq_thread(ctx, cap_entries);
Stefan Metzmacher7c30f36a2021-03-07 11:54:28 +01006881 if (creds)
6882 revert_creds(creds);
Xiaoguang Wang08369242020-11-03 14:15:59 +08006883 if (!sqt_spin && (ret > 0 || !list_empty(&ctx->iopoll_list)))
6884 sqt_spin = true;
Jens Axboe6c271ce2019-01-10 11:22:30 -07006885 }
6886
Xiaoguang Wang08369242020-11-03 14:15:59 +08006887 if (sqt_spin || !time_after(jiffies, timeout)) {
Jens Axboec8d1ba52020-09-14 11:07:26 -06006888 io_run_task_work();
6889 cond_resched();
Xiaoguang Wang08369242020-11-03 14:15:59 +08006890 if (sqt_spin)
6891 timeout = jiffies + sqd->sq_thread_idle;
6892 continue;
6893 }
6894
Xiaoguang Wang08369242020-11-03 14:15:59 +08006895 prepare_to_wait(&sqd->wait, &wait, TASK_INTERRUPTIBLE);
Pavel Begunkove4b6d902021-05-16 22:58:00 +01006896 if (!io_sqd_events_pending(sqd)) {
Hao Xu724cb4f2021-04-21 23:19:11 +08006897 needs_sched = true;
6898 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) {
Pavel Begunkovaaa9f0f2021-05-16 22:58:01 +01006899 io_ring_set_wakeup_flag(ctx);
6900
Hao Xu724cb4f2021-04-21 23:19:11 +08006901 if ((ctx->flags & IORING_SETUP_IOPOLL) &&
6902 !list_empty_careful(&ctx->iopoll_list)) {
6903 needs_sched = false;
6904 break;
6905 }
6906 if (io_sqring_entries(ctx)) {
6907 needs_sched = false;
6908 break;
6909 }
6910 }
6911
6912 if (needs_sched) {
6913 mutex_unlock(&sqd->lock);
6914 schedule();
6915 mutex_lock(&sqd->lock);
6916 }
Jens Axboe69fb2132020-09-14 11:16:23 -06006917 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
6918 io_ring_clear_wakeup_flag(ctx);
Jens Axboe6c271ce2019-01-10 11:22:30 -07006919 }
Xiaoguang Wang08369242020-11-03 14:15:59 +08006920
6921 finish_wait(&sqd->wait, &wait);
6922 timeout = jiffies + sqd->sq_thread_idle;
Jens Axboe6c271ce2019-01-10 11:22:30 -07006923 }
6924
Pavel Begunkov78cc6872021-06-14 02:36:23 +01006925 io_uring_cancel_generic(true, sqd);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07006926 sqd->thread = NULL;
Jens Axboe05962f92021-03-06 13:58:48 -07006927 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
Jens Axboe5f3f26f2021-02-25 10:17:46 -07006928 io_ring_set_wakeup_flag(ctx);
Pavel Begunkov521d6a72021-03-11 23:29:38 +00006929 io_run_task_work();
Pavel Begunkov734551d2021-04-18 14:52:09 +01006930 mutex_unlock(&sqd->lock);
6931
Jens Axboe37d1e2e2021-02-17 21:03:43 -07006932 complete(&sqd->exited);
6933 do_exit(0);
Jens Axboe6c271ce2019-01-10 11:22:30 -07006934}
6935
Jens Axboebda52162019-09-24 13:47:15 -06006936struct io_wait_queue {
6937 struct wait_queue_entry wq;
6938 struct io_ring_ctx *ctx;
6939 unsigned to_wait;
6940 unsigned nr_timeouts;
6941};
6942
Pavel Begunkov6c503152021-01-04 20:36:36 +00006943static inline bool io_should_wake(struct io_wait_queue *iowq)
Jens Axboebda52162019-09-24 13:47:15 -06006944{
6945 struct io_ring_ctx *ctx = iowq->ctx;
6946
6947 /*
Brian Gianforcarod195a662019-12-13 03:09:50 -08006948 * Wake up if we have enough events, or if a timeout occurred since we
Jens Axboebda52162019-09-24 13:47:15 -06006949 * started waiting. For timeouts, we always want to return to userspace,
6950 * regardless of event count.
6951 */
Pavel Begunkov6c503152021-01-04 20:36:36 +00006952 return io_cqring_events(ctx) >= iowq->to_wait ||
Jens Axboebda52162019-09-24 13:47:15 -06006953 atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts;
6954}
6955
6956static int io_wake_function(struct wait_queue_entry *curr, unsigned int mode,
6957 int wake_flags, void *key)
6958{
6959 struct io_wait_queue *iowq = container_of(curr, struct io_wait_queue,
6960 wq);
6961
Pavel Begunkov6c503152021-01-04 20:36:36 +00006962 /*
6963 * Cannot safely flush overflowed CQEs from here, ensure we wake up
6964 * the task, and the next invocation will do it.
6965 */
6966 if (io_should_wake(iowq) || test_bit(0, &iowq->ctx->cq_check_overflow))
6967 return autoremove_wake_function(curr, mode, wake_flags, key);
6968 return -1;
Jens Axboebda52162019-09-24 13:47:15 -06006969}
6970
Jens Axboeaf9c1a42020-09-24 13:32:18 -06006971static int io_run_task_work_sig(void)
6972{
6973 if (io_run_task_work())
6974 return 1;
6975 if (!signal_pending(current))
6976 return 0;
Jens Axboe0b8cfa92021-03-21 14:16:08 -06006977 if (test_thread_flag(TIF_NOTIFY_SIGNAL))
Jens Axboe792ee0f62020-10-22 20:17:18 -06006978 return -ERESTARTSYS;
Jens Axboeaf9c1a42020-09-24 13:32:18 -06006979 return -EINTR;
6980}
6981
Pavel Begunkoveeb60b92021-02-04 13:51:58 +00006982/* when returns >0, the caller should retry */
6983static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
6984 struct io_wait_queue *iowq,
6985 signed long *timeout)
6986{
6987 int ret;
6988
6989 /* make sure we run task_work before checking for signals */
6990 ret = io_run_task_work_sig();
6991 if (ret || io_should_wake(iowq))
6992 return ret;
6993 /* let the caller flush overflows, retry */
6994 if (test_bit(0, &ctx->cq_check_overflow))
6995 return 1;
6996
6997 *timeout = schedule_timeout(*timeout);
6998 return !*timeout ? -ETIME : 1;
6999}
7000
Jens Axboe2b188cc2019-01-07 10:46:33 -07007001/*
7002 * Wait until events become available, if we don't already have some. The
7003 * application must reap them itself, as they reside on the shared cq ring.
7004 */
7005static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
Hao Xuc73ebb62020-11-03 10:54:37 +08007006 const sigset_t __user *sig, size_t sigsz,
7007 struct __kernel_timespec __user *uts)
Jens Axboe2b188cc2019-01-07 10:46:33 -07007008{
Jens Axboebda52162019-09-24 13:47:15 -06007009 struct io_wait_queue iowq = {
7010 .wq = {
7011 .private = current,
7012 .func = io_wake_function,
7013 .entry = LIST_HEAD_INIT(iowq.wq.entry),
7014 },
7015 .ctx = ctx,
7016 .to_wait = min_events,
7017 };
Hristo Venev75b28af2019-08-26 17:23:46 +00007018 struct io_rings *rings = ctx->rings;
Pavel Begunkovc1d5a222021-02-04 13:51:57 +00007019 signed long timeout = MAX_SCHEDULE_TIMEOUT;
7020 int ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07007021
Jens Axboeb41e9852020-02-17 09:52:41 -07007022 do {
Pavel Begunkov6c2450a2021-02-23 12:40:22 +00007023 io_cqring_overflow_flush(ctx, false);
Pavel Begunkov6c503152021-01-04 20:36:36 +00007024 if (io_cqring_events(ctx) >= min_events)
Jens Axboeb41e9852020-02-17 09:52:41 -07007025 return 0;
Jens Axboe4c6e2772020-07-01 11:29:10 -06007026 if (!io_run_task_work())
Jens Axboeb41e9852020-02-17 09:52:41 -07007027 break;
Jens Axboeb41e9852020-02-17 09:52:41 -07007028 } while (1);
Jens Axboe2b188cc2019-01-07 10:46:33 -07007029
7030 if (sig) {
Arnd Bergmann9e75ad52019-03-25 15:34:53 +01007031#ifdef CONFIG_COMPAT
7032 if (in_compat_syscall())
7033 ret = set_compat_user_sigmask((const compat_sigset_t __user *)sig,
Oleg Nesterovb7724342019-07-16 16:29:53 -07007034 sigsz);
Arnd Bergmann9e75ad52019-03-25 15:34:53 +01007035 else
7036#endif
Oleg Nesterovb7724342019-07-16 16:29:53 -07007037 ret = set_user_sigmask(sig, sigsz);
Arnd Bergmann9e75ad52019-03-25 15:34:53 +01007038
Jens Axboe2b188cc2019-01-07 10:46:33 -07007039 if (ret)
7040 return ret;
7041 }
7042
Hao Xuc73ebb62020-11-03 10:54:37 +08007043 if (uts) {
Pavel Begunkovc1d5a222021-02-04 13:51:57 +00007044 struct timespec64 ts;
7045
Hao Xuc73ebb62020-11-03 10:54:37 +08007046 if (get_timespec64(&ts, uts))
7047 return -EFAULT;
7048 timeout = timespec64_to_jiffies(&ts);
7049 }
7050
Jens Axboebda52162019-09-24 13:47:15 -06007051 iowq.nr_timeouts = atomic_read(&ctx->cq_timeouts);
Dmitrii Dolgovc826bd72019-10-15 19:02:01 +02007052 trace_io_uring_cqring_wait(ctx, min_events);
Jens Axboebda52162019-09-24 13:47:15 -06007053 do {
Jens Axboeca0a2652021-03-04 17:15:48 -07007054 /* if we can't even flush overflow, don't wait for more */
Pavel Begunkov6c2450a2021-02-23 12:40:22 +00007055 if (!io_cqring_overflow_flush(ctx, false)) {
Jens Axboeca0a2652021-03-04 17:15:48 -07007056 ret = -EBUSY;
7057 break;
7058 }
Jens Axboebda52162019-09-24 13:47:15 -06007059 prepare_to_wait_exclusive(&ctx->wait, &iowq.wq,
7060 TASK_INTERRUPTIBLE);
Pavel Begunkoveeb60b92021-02-04 13:51:58 +00007061 ret = io_cqring_wait_schedule(ctx, &iowq, &timeout);
7062 finish_wait(&ctx->wait, &iowq.wq);
Jens Axboeca0a2652021-03-04 17:15:48 -07007063 cond_resched();
Pavel Begunkoveeb60b92021-02-04 13:51:58 +00007064 } while (ret > 0);
Jens Axboebda52162019-09-24 13:47:15 -06007065
Jens Axboeb7db41c2020-07-04 08:55:50 -06007066 restore_saved_sigmask_unless(ret == -EINTR);
Jens Axboe2b188cc2019-01-07 10:46:33 -07007067
Hristo Venev75b28af2019-08-26 17:23:46 +00007068 return READ_ONCE(rings->cq.head) == READ_ONCE(rings->cq.tail) ? ret : 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07007069}
7070
Pavel Begunkov9123c8f2021-06-14 02:36:20 +01007071static void io_free_page_table(void **table, size_t size)
Pavel Begunkov846a4ef2021-04-01 15:44:03 +01007072{
Pavel Begunkov9123c8f2021-06-14 02:36:20 +01007073 unsigned i, nr_tables = DIV_ROUND_UP(size, PAGE_SIZE);
Pavel Begunkov846a4ef2021-04-01 15:44:03 +01007074
7075 for (i = 0; i < nr_tables; i++)
Pavel Begunkov9123c8f2021-06-14 02:36:20 +01007076 kfree(table[i]);
7077 kfree(table);
7078}
7079
7080static void **io_alloc_page_table(size_t size)
7081{
7082 unsigned i, nr_tables = DIV_ROUND_UP(size, PAGE_SIZE);
7083 size_t init_size = size;
7084 void **table;
7085
7086 table = kcalloc(nr_tables, sizeof(*table), GFP_KERNEL);
7087 if (!table)
7088 return NULL;
7089
7090 for (i = 0; i < nr_tables; i++) {
7091 unsigned int this_size = min(size, PAGE_SIZE);
7092
7093 table[i] = kzalloc(this_size, GFP_KERNEL);
7094 if (!table[i]) {
7095 io_free_page_table(table, init_size);
7096 return NULL;
7097 }
7098 size -= this_size;
7099 }
7100 return table;
Pavel Begunkov846a4ef2021-04-01 15:44:03 +01007101}
7102
Bijan Mottahedeh2a63b2d2021-01-15 17:37:47 +00007103static inline void io_rsrc_ref_lock(struct io_ring_ctx *ctx)
Pavel Begunkov1642b442020-12-30 21:34:14 +00007104{
Bijan Mottahedeh2a63b2d2021-01-15 17:37:47 +00007105 spin_lock_bh(&ctx->rsrc_ref_lock);
Pavel Begunkov1642b442020-12-30 21:34:14 +00007106}
7107
Bijan Mottahedeh2a63b2d2021-01-15 17:37:47 +00007108static inline void io_rsrc_ref_unlock(struct io_ring_ctx *ctx)
Jens Axboe6b063142019-01-10 22:13:58 -07007109{
Bijan Mottahedeh2a63b2d2021-01-15 17:37:47 +00007110 spin_unlock_bh(&ctx->rsrc_ref_lock);
7111}
7112
Pavel Begunkov28a9fe22021-04-01 15:43:47 +01007113static void io_rsrc_node_destroy(struct io_rsrc_node *ref_node)
7114{
7115 percpu_ref_exit(&ref_node->refs);
7116 kfree(ref_node);
7117}
7118
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01007119static void io_rsrc_node_switch(struct io_ring_ctx *ctx,
7120 struct io_rsrc_data *data_to_kill)
Jens Axboe6b063142019-01-10 22:13:58 -07007121{
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01007122 WARN_ON_ONCE(!ctx->rsrc_backup_node);
7123 WARN_ON_ONCE(data_to_kill && !ctx->rsrc_node);
Pavel Begunkov82fbcfa2021-04-01 15:43:43 +01007124
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01007125 if (data_to_kill) {
7126 struct io_rsrc_node *rsrc_node = ctx->rsrc_node;
Pavel Begunkov82fbcfa2021-04-01 15:43:43 +01007127
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01007128 rsrc_node->rsrc_data = data_to_kill;
7129 io_rsrc_ref_lock(ctx);
7130 list_add_tail(&rsrc_node->node, &ctx->rsrc_ref_list);
7131 io_rsrc_ref_unlock(ctx);
Pavel Begunkov82fbcfa2021-04-01 15:43:43 +01007132
Pavel Begunkov3e942492021-04-11 01:46:34 +01007133 atomic_inc(&data_to_kill->refs);
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01007134 percpu_ref_kill(&rsrc_node->refs);
7135 ctx->rsrc_node = NULL;
7136 }
7137
7138 if (!ctx->rsrc_node) {
7139 ctx->rsrc_node = ctx->rsrc_backup_node;
7140 ctx->rsrc_backup_node = NULL;
7141 }
Jens Axboe6b063142019-01-10 22:13:58 -07007142}
7143
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01007144static int io_rsrc_node_switch_start(struct io_ring_ctx *ctx)
Pavel Begunkov8dd03af2021-03-19 17:22:36 +00007145{
7146 if (ctx->rsrc_backup_node)
7147 return 0;
Pavel Begunkovb895c9a2021-04-01 15:43:40 +01007148 ctx->rsrc_backup_node = io_rsrc_node_alloc(ctx);
Pavel Begunkov8dd03af2021-03-19 17:22:36 +00007149 return ctx->rsrc_backup_node ? 0 : -ENOMEM;
7150}
7151
Pavel Begunkov40ae0ff2021-04-01 15:43:44 +01007152static int io_rsrc_ref_quiesce(struct io_rsrc_data *data, struct io_ring_ctx *ctx)
Hao Xu8bad28d2021-02-19 17:19:36 +08007153{
7154 int ret;
Xiaoguang Wang05589552020-03-31 14:05:18 +08007155
Pavel Begunkov215c3902021-04-01 15:43:48 +01007156 /* As we may drop ->uring_lock, other task may have started quiesce */
Hao Xu8bad28d2021-02-19 17:19:36 +08007157 if (data->quiesce)
7158 return -ENXIO;
7159
7160 data->quiesce = true;
Pavel Begunkov1ffc5422020-12-30 21:34:15 +00007161 do {
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01007162 ret = io_rsrc_node_switch_start(ctx);
Pavel Begunkov8dd03af2021-03-19 17:22:36 +00007163 if (ret)
Pavel Begunkovf2303b12021-02-20 18:03:49 +00007164 break;
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01007165 io_rsrc_node_switch(ctx, data);
7166
Pavel Begunkov3e942492021-04-11 01:46:34 +01007167 /* kill initial ref, already quiesced if zero */
7168 if (atomic_dec_and_test(&data->refs))
7169 break;
Hao Xu8bad28d2021-02-19 17:19:36 +08007170 flush_delayed_work(&ctx->rsrc_put_work);
Pavel Begunkov1ffc5422020-12-30 21:34:15 +00007171 ret = wait_for_completion_interruptible(&data->done);
7172 if (!ret)
7173 break;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007174
Pavel Begunkov3e942492021-04-11 01:46:34 +01007175 atomic_inc(&data->refs);
7176 /* wait for all works potentially completing data->done */
7177 flush_delayed_work(&ctx->rsrc_put_work);
Jens Axboecb5e1b82021-02-25 07:37:35 -07007178 reinit_completion(&data->done);
Pavel Begunkov8dd03af2021-03-19 17:22:36 +00007179
Hao Xu8bad28d2021-02-19 17:19:36 +08007180 mutex_unlock(&ctx->uring_lock);
7181 ret = io_run_task_work_sig();
7182 mutex_lock(&ctx->uring_lock);
Pavel Begunkovf2303b12021-02-20 18:03:49 +00007183 } while (ret >= 0);
Hao Xu8bad28d2021-02-19 17:19:36 +08007184 data->quiesce = false;
7185
Hao Xu8bad28d2021-02-19 17:19:36 +08007186 return ret;
Bijan Mottahedehd7954b22021-01-15 17:37:50 +00007187}
7188
Pavel Begunkov2d091d62021-06-14 02:36:21 +01007189static u64 *io_get_tag_slot(struct io_rsrc_data *data, unsigned int idx)
7190{
7191 unsigned int off = idx & IO_RSRC_TAG_TABLE_MASK;
7192 unsigned int table_idx = idx >> IO_RSRC_TAG_TABLE_SHIFT;
7193
7194 return &data->tags[table_idx][off];
7195}
7196
Pavel Begunkov44b31f22021-04-25 14:32:16 +01007197static void io_rsrc_data_free(struct io_rsrc_data *data)
7198{
Pavel Begunkov2d091d62021-06-14 02:36:21 +01007199 size_t size = data->nr * sizeof(data->tags[0][0]);
7200
7201 if (data->tags)
7202 io_free_page_table((void **)data->tags, size);
Pavel Begunkov44b31f22021-04-25 14:32:16 +01007203 kfree(data);
7204}
7205
Pavel Begunkovd878c812021-06-14 02:36:18 +01007206static int io_rsrc_data_alloc(struct io_ring_ctx *ctx, rsrc_put_fn *do_put,
7207 u64 __user *utags, unsigned nr,
7208 struct io_rsrc_data **pdata)
Bijan Mottahedeh1ad555c2021-01-15 17:37:51 +00007209{
Pavel Begunkovb895c9a2021-04-01 15:43:40 +01007210 struct io_rsrc_data *data;
Pavel Begunkov2d091d62021-06-14 02:36:21 +01007211 int ret = -ENOMEM;
Pavel Begunkovd878c812021-06-14 02:36:18 +01007212 unsigned i;
Bijan Mottahedeh1ad555c2021-01-15 17:37:51 +00007213
7214 data = kzalloc(sizeof(*data), GFP_KERNEL);
7215 if (!data)
Pavel Begunkovd878c812021-06-14 02:36:18 +01007216 return -ENOMEM;
Pavel Begunkov2d091d62021-06-14 02:36:21 +01007217 data->tags = (u64 **)io_alloc_page_table(nr * sizeof(data->tags[0][0]));
Pavel Begunkovb60c8dc2021-04-25 14:32:18 +01007218 if (!data->tags) {
7219 kfree(data);
Pavel Begunkovd878c812021-06-14 02:36:18 +01007220 return -ENOMEM;
7221 }
Pavel Begunkov2d091d62021-06-14 02:36:21 +01007222
7223 data->nr = nr;
7224 data->ctx = ctx;
7225 data->do_put = do_put;
Pavel Begunkovd878c812021-06-14 02:36:18 +01007226 if (utags) {
Pavel Begunkov2d091d62021-06-14 02:36:21 +01007227 ret = -EFAULT;
Pavel Begunkovd878c812021-06-14 02:36:18 +01007228 for (i = 0; i < nr; i++) {
Colin Ian Kingfdd1dc32021-06-15 14:00:11 +01007229 u64 *tag_slot = io_get_tag_slot(data, i);
7230
7231 if (copy_from_user(tag_slot, &utags[i],
7232 sizeof(*tag_slot)))
Pavel Begunkov2d091d62021-06-14 02:36:21 +01007233 goto fail;
Pavel Begunkovd878c812021-06-14 02:36:18 +01007234 }
Pavel Begunkovb60c8dc2021-04-25 14:32:18 +01007235 }
7236
Pavel Begunkov3e942492021-04-11 01:46:34 +01007237 atomic_set(&data->refs, 1);
Bijan Mottahedeh1ad555c2021-01-15 17:37:51 +00007238 init_completion(&data->done);
Pavel Begunkovd878c812021-06-14 02:36:18 +01007239 *pdata = data;
7240 return 0;
Pavel Begunkov2d091d62021-06-14 02:36:21 +01007241fail:
7242 io_rsrc_data_free(data);
7243 return ret;
Bijan Mottahedeh1ad555c2021-01-15 17:37:51 +00007244}
7245
Pavel Begunkov9123c8f2021-06-14 02:36:20 +01007246static bool io_alloc_file_tables(struct io_file_table *table, unsigned nr_files)
7247{
7248 size_t size = nr_files * sizeof(struct io_fixed_file);
7249
7250 table->files = (struct io_fixed_file **)io_alloc_page_table(size);
7251 return !!table->files;
7252}
7253
7254static void io_free_file_tables(struct io_file_table *table, unsigned nr_files)
7255{
7256 size_t size = nr_files * sizeof(struct io_fixed_file);
7257
7258 io_free_page_table((void **)table->files, size);
7259 table->files = NULL;
7260}
7261
Roman Penyaev2bbcd6d2019-05-16 10:53:57 +02007262static void __io_sqe_files_unregister(struct io_ring_ctx *ctx)
7263{
Jens Axboe06058632019-04-13 09:26:03 -06007264#if defined(CONFIG_UNIX)
Jens Axboe6c271ce2019-01-10 11:22:30 -07007265 if (ctx->ring_sock) {
7266 struct sock *sock = ctx->ring_sock->sk;
7267 struct sk_buff *skb;
7268
7269 while ((skb = skb_dequeue(&sock->sk_receive_queue)) != NULL)
Jens Axboe6b063142019-01-10 22:13:58 -07007270 kfree_skb(skb);
7271 }
Jens Axboe6c271ce2019-01-10 11:22:30 -07007272#else
7273 int i;
Jens Axboe6b063142019-01-10 22:13:58 -07007274
7275 for (i = 0; i < ctx->nr_user_files; i++) {
7276 struct file *file;
7277
7278 file = io_file_from_index(ctx, i);
7279 if (file)
7280 fput(file);
7281 }
7282#endif
Pavel Begunkovfff4db72021-04-25 14:32:15 +01007283 io_free_file_tables(&ctx->file_table, ctx->nr_user_files);
Pavel Begunkov44b31f22021-04-25 14:32:16 +01007284 io_rsrc_data_free(ctx->file_data);
Pavel Begunkovfff4db72021-04-25 14:32:15 +01007285 ctx->file_data = NULL;
7286 ctx->nr_user_files = 0;
Bijan Mottahedeh1ad555c2021-01-15 17:37:51 +00007287}
7288
Bijan Mottahedehd7954b22021-01-15 17:37:50 +00007289static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
7290{
Bijan Mottahedehd7954b22021-01-15 17:37:50 +00007291 int ret;
7292
Pavel Begunkov08480402021-04-13 02:58:38 +01007293 if (!ctx->file_data)
Bijan Mottahedehd7954b22021-01-15 17:37:50 +00007294 return -ENXIO;
Pavel Begunkov08480402021-04-13 02:58:38 +01007295 ret = io_rsrc_ref_quiesce(ctx->file_data, ctx);
7296 if (!ret)
7297 __io_sqe_files_unregister(ctx);
7298 return ret;
Jens Axboe6c271ce2019-01-10 11:22:30 -07007299}
7300
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007301static void io_sq_thread_unpark(struct io_sq_data *sqd)
Pavel Begunkov09a6f4e2021-03-14 20:57:10 +00007302 __releases(&sqd->lock)
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007303{
Pavel Begunkov521d6a72021-03-11 23:29:38 +00007304 WARN_ON_ONCE(sqd->thread == current);
7305
Pavel Begunkov9e138a42021-03-14 20:57:12 +00007306 /*
7307 * Do the dance but not conditional clear_bit() because it'd race with
7308 * other threads incrementing park_pending and setting the bit.
7309 */
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007310 clear_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state);
Pavel Begunkov9e138a42021-03-14 20:57:12 +00007311 if (atomic_dec_return(&sqd->park_pending))
7312 set_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state);
Pavel Begunkov09a6f4e2021-03-14 20:57:10 +00007313 mutex_unlock(&sqd->lock);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007314}
7315
Jens Axboe86e0d672021-03-05 08:44:39 -07007316static void io_sq_thread_park(struct io_sq_data *sqd)
Pavel Begunkov09a6f4e2021-03-14 20:57:10 +00007317 __acquires(&sqd->lock)
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007318{
Pavel Begunkov521d6a72021-03-11 23:29:38 +00007319 WARN_ON_ONCE(sqd->thread == current);
7320
Pavel Begunkov9e138a42021-03-14 20:57:12 +00007321 atomic_inc(&sqd->park_pending);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007322 set_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state);
Pavel Begunkov09a6f4e2021-03-14 20:57:10 +00007323 mutex_lock(&sqd->lock);
Jens Axboe05962f92021-03-06 13:58:48 -07007324 if (sqd->thread)
Jens Axboe86e0d672021-03-05 08:44:39 -07007325 wake_up_process(sqd->thread);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007326}
7327
7328static void io_sq_thread_stop(struct io_sq_data *sqd)
7329{
Pavel Begunkov521d6a72021-03-11 23:29:38 +00007330 WARN_ON_ONCE(sqd->thread == current);
Pavel Begunkov88885f62021-04-11 01:46:38 +01007331 WARN_ON_ONCE(test_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state));
Pavel Begunkov521d6a72021-03-11 23:29:38 +00007332
Jens Axboe05962f92021-03-06 13:58:48 -07007333 set_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state);
Pavel Begunkov88885f62021-04-11 01:46:38 +01007334 mutex_lock(&sqd->lock);
Jens Axboee8f98f242021-03-09 16:32:13 -07007335 if (sqd->thread)
7336 wake_up_process(sqd->thread);
Pavel Begunkov09a6f4e2021-03-14 20:57:10 +00007337 mutex_unlock(&sqd->lock);
Jens Axboe05962f92021-03-06 13:58:48 -07007338 wait_for_completion(&sqd->exited);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007339}
7340
Jens Axboe534ca6d2020-09-02 13:52:19 -06007341static void io_put_sq_data(struct io_sq_data *sqd)
Jens Axboe6c271ce2019-01-10 11:22:30 -07007342{
Jens Axboe534ca6d2020-09-02 13:52:19 -06007343 if (refcount_dec_and_test(&sqd->refs)) {
Pavel Begunkov9e138a42021-03-14 20:57:12 +00007344 WARN_ON_ONCE(atomic_read(&sqd->park_pending));
7345
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007346 io_sq_thread_stop(sqd);
7347 kfree(sqd);
7348 }
7349}
7350
7351static void io_sq_thread_finish(struct io_ring_ctx *ctx)
7352{
7353 struct io_sq_data *sqd = ctx->sq_data;
7354
7355 if (sqd) {
Jens Axboe05962f92021-03-06 13:58:48 -07007356 io_sq_thread_park(sqd);
Pavel Begunkov521d6a72021-03-11 23:29:38 +00007357 list_del_init(&ctx->sqd_list);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007358 io_sqd_update_thread_idle(sqd);
Jens Axboe05962f92021-03-06 13:58:48 -07007359 io_sq_thread_unpark(sqd);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007360
7361 io_put_sq_data(sqd);
7362 ctx->sq_data = NULL;
Jens Axboe534ca6d2020-09-02 13:52:19 -06007363 }
7364}
7365
Jens Axboeaa061652020-09-02 14:50:27 -06007366static struct io_sq_data *io_attach_sq_data(struct io_uring_params *p)
7367{
7368 struct io_ring_ctx *ctx_attach;
7369 struct io_sq_data *sqd;
7370 struct fd f;
7371
7372 f = fdget(p->wq_fd);
7373 if (!f.file)
7374 return ERR_PTR(-ENXIO);
7375 if (f.file->f_op != &io_uring_fops) {
7376 fdput(f);
7377 return ERR_PTR(-EINVAL);
7378 }
7379
7380 ctx_attach = f.file->private_data;
7381 sqd = ctx_attach->sq_data;
7382 if (!sqd) {
7383 fdput(f);
7384 return ERR_PTR(-EINVAL);
7385 }
Jens Axboe5c2469e2021-03-11 10:17:56 -07007386 if (sqd->task_tgid != current->tgid) {
7387 fdput(f);
7388 return ERR_PTR(-EPERM);
7389 }
Jens Axboeaa061652020-09-02 14:50:27 -06007390
7391 refcount_inc(&sqd->refs);
7392 fdput(f);
7393 return sqd;
7394}
7395
Pavel Begunkov26984fb2021-03-11 23:29:37 +00007396static struct io_sq_data *io_get_sq_data(struct io_uring_params *p,
7397 bool *attached)
Jens Axboe534ca6d2020-09-02 13:52:19 -06007398{
7399 struct io_sq_data *sqd;
7400
Pavel Begunkov26984fb2021-03-11 23:29:37 +00007401 *attached = false;
Jens Axboe5c2469e2021-03-11 10:17:56 -07007402 if (p->flags & IORING_SETUP_ATTACH_WQ) {
7403 sqd = io_attach_sq_data(p);
Pavel Begunkov26984fb2021-03-11 23:29:37 +00007404 if (!IS_ERR(sqd)) {
7405 *attached = true;
Jens Axboe5c2469e2021-03-11 10:17:56 -07007406 return sqd;
Pavel Begunkov26984fb2021-03-11 23:29:37 +00007407 }
Jens Axboe5c2469e2021-03-11 10:17:56 -07007408 /* fall through for EPERM case, setup new sqd/task */
7409 if (PTR_ERR(sqd) != -EPERM)
7410 return sqd;
7411 }
Jens Axboeaa061652020-09-02 14:50:27 -06007412
Jens Axboe534ca6d2020-09-02 13:52:19 -06007413 sqd = kzalloc(sizeof(*sqd), GFP_KERNEL);
7414 if (!sqd)
7415 return ERR_PTR(-ENOMEM);
7416
Pavel Begunkov9e138a42021-03-14 20:57:12 +00007417 atomic_set(&sqd->park_pending, 0);
Jens Axboe534ca6d2020-09-02 13:52:19 -06007418 refcount_set(&sqd->refs, 1);
Jens Axboe69fb2132020-09-14 11:16:23 -06007419 INIT_LIST_HEAD(&sqd->ctx_list);
Pavel Begunkov09a6f4e2021-03-14 20:57:10 +00007420 mutex_init(&sqd->lock);
Jens Axboe534ca6d2020-09-02 13:52:19 -06007421 init_waitqueue_head(&sqd->wait);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007422 init_completion(&sqd->exited);
Jens Axboe534ca6d2020-09-02 13:52:19 -06007423 return sqd;
7424}
7425
Jens Axboe6b063142019-01-10 22:13:58 -07007426#if defined(CONFIG_UNIX)
Jens Axboe6b063142019-01-10 22:13:58 -07007427/*
7428 * Ensure the UNIX gc is aware of our file set, so we are certain that
7429 * the io_uring can be safely unregistered on process exit, even if we have
7430 * loops in the file referencing.
7431 */
7432static int __io_sqe_files_scm(struct io_ring_ctx *ctx, int nr, int offset)
7433{
7434 struct sock *sk = ctx->ring_sock->sk;
7435 struct scm_fp_list *fpl;
7436 struct sk_buff *skb;
Jens Axboe08a45172019-10-03 08:11:03 -06007437 int i, nr_files;
Jens Axboe6b063142019-01-10 22:13:58 -07007438
Jens Axboe6b063142019-01-10 22:13:58 -07007439 fpl = kzalloc(sizeof(*fpl), GFP_KERNEL);
7440 if (!fpl)
7441 return -ENOMEM;
7442
7443 skb = alloc_skb(0, GFP_KERNEL);
7444 if (!skb) {
7445 kfree(fpl);
7446 return -ENOMEM;
7447 }
7448
7449 skb->sk = sk;
Jens Axboe6b063142019-01-10 22:13:58 -07007450
Jens Axboe08a45172019-10-03 08:11:03 -06007451 nr_files = 0;
Jens Axboe62e398b2021-02-21 16:19:37 -07007452 fpl->user = get_uid(current_user());
Jens Axboe6b063142019-01-10 22:13:58 -07007453 for (i = 0; i < nr; i++) {
Jens Axboe65e19f52019-10-26 07:20:21 -06007454 struct file *file = io_file_from_index(ctx, i + offset);
7455
7456 if (!file)
Jens Axboe08a45172019-10-03 08:11:03 -06007457 continue;
Jens Axboe65e19f52019-10-26 07:20:21 -06007458 fpl->fp[nr_files] = get_file(file);
Jens Axboe08a45172019-10-03 08:11:03 -06007459 unix_inflight(fpl->user, fpl->fp[nr_files]);
7460 nr_files++;
Jens Axboe6b063142019-01-10 22:13:58 -07007461 }
7462
Jens Axboe08a45172019-10-03 08:11:03 -06007463 if (nr_files) {
7464 fpl->max = SCM_MAX_FD;
7465 fpl->count = nr_files;
7466 UNIXCB(skb).fp = fpl;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007467 skb->destructor = unix_destruct_scm;
Jens Axboe08a45172019-10-03 08:11:03 -06007468 refcount_add(skb->truesize, &sk->sk_wmem_alloc);
7469 skb_queue_head(&sk->sk_receive_queue, skb);
Jens Axboe6b063142019-01-10 22:13:58 -07007470
Jens Axboe08a45172019-10-03 08:11:03 -06007471 for (i = 0; i < nr_files; i++)
7472 fput(fpl->fp[i]);
7473 } else {
7474 kfree_skb(skb);
7475 kfree(fpl);
7476 }
Jens Axboe6b063142019-01-10 22:13:58 -07007477
7478 return 0;
7479}
7480
7481/*
7482 * If UNIX sockets are enabled, fd passing can cause a reference cycle which
7483 * causes regular reference counting to break down. We rely on the UNIX
7484 * garbage collection to take care of this problem for us.
7485 */
7486static int io_sqe_files_scm(struct io_ring_ctx *ctx)
7487{
7488 unsigned left, total;
7489 int ret = 0;
7490
7491 total = 0;
7492 left = ctx->nr_user_files;
7493 while (left) {
7494 unsigned this_files = min_t(unsigned, left, SCM_MAX_FD);
7495
7496 ret = __io_sqe_files_scm(ctx, this_files, total);
7497 if (ret)
7498 break;
7499 left -= this_files;
7500 total += this_files;
7501 }
7502
7503 if (!ret)
7504 return 0;
7505
7506 while (total < ctx->nr_user_files) {
Jens Axboe65e19f52019-10-26 07:20:21 -06007507 struct file *file = io_file_from_index(ctx, total);
7508
7509 if (file)
7510 fput(file);
Jens Axboe6b063142019-01-10 22:13:58 -07007511 total++;
7512 }
7513
7514 return ret;
7515}
7516#else
7517static int io_sqe_files_scm(struct io_ring_ctx *ctx)
7518{
7519 return 0;
7520}
7521#endif
7522
Pavel Begunkov47e90392021-04-01 15:43:56 +01007523static void io_rsrc_file_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc)
Jens Axboec3a31e62019-10-03 13:59:56 -06007524{
Bijan Mottahedeh50238532021-01-15 17:37:45 +00007525 struct file *file = prsrc->file;
Jens Axboec3a31e62019-10-03 13:59:56 -06007526#if defined(CONFIG_UNIX)
Jens Axboec3a31e62019-10-03 13:59:56 -06007527 struct sock *sock = ctx->ring_sock->sk;
7528 struct sk_buff_head list, *head = &sock->sk_receive_queue;
7529 struct sk_buff *skb;
7530 int i;
7531
7532 __skb_queue_head_init(&list);
7533
7534 /*
7535 * Find the skb that holds this file in its SCM_RIGHTS. When found,
7536 * remove this entry and rearrange the file array.
7537 */
7538 skb = skb_dequeue(head);
7539 while (skb) {
7540 struct scm_fp_list *fp;
7541
7542 fp = UNIXCB(skb).fp;
7543 for (i = 0; i < fp->count; i++) {
7544 int left;
7545
7546 if (fp->fp[i] != file)
7547 continue;
7548
7549 unix_notinflight(fp->user, fp->fp[i]);
7550 left = fp->count - 1 - i;
7551 if (left) {
7552 memmove(&fp->fp[i], &fp->fp[i + 1],
7553 left * sizeof(struct file *));
7554 }
7555 fp->count--;
7556 if (!fp->count) {
7557 kfree_skb(skb);
7558 skb = NULL;
7559 } else {
7560 __skb_queue_tail(&list, skb);
7561 }
7562 fput(file);
7563 file = NULL;
7564 break;
7565 }
7566
7567 if (!file)
7568 break;
7569
7570 __skb_queue_tail(&list, skb);
7571
7572 skb = skb_dequeue(head);
7573 }
7574
7575 if (skb_peek(&list)) {
7576 spin_lock_irq(&head->lock);
7577 while ((skb = __skb_dequeue(&list)) != NULL)
7578 __skb_queue_tail(head, skb);
7579 spin_unlock_irq(&head->lock);
7580 }
7581#else
Jens Axboe05f3fb32019-12-09 11:22:50 -07007582 fput(file);
Jens Axboec3a31e62019-10-03 13:59:56 -06007583#endif
7584}
7585
Pavel Begunkovb895c9a2021-04-01 15:43:40 +01007586static void __io_rsrc_put_work(struct io_rsrc_node *ref_node)
Jens Axboe05f3fb32019-12-09 11:22:50 -07007587{
Pavel Begunkovb895c9a2021-04-01 15:43:40 +01007588 struct io_rsrc_data *rsrc_data = ref_node->rsrc_data;
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007589 struct io_ring_ctx *ctx = rsrc_data->ctx;
7590 struct io_rsrc_put *prsrc, *tmp;
Xiaoguang Wang05589552020-03-31 14:05:18 +08007591
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007592 list_for_each_entry_safe(prsrc, tmp, &ref_node->rsrc_list, list) {
7593 list_del(&prsrc->list);
Pavel Begunkovb60c8dc2021-04-25 14:32:18 +01007594
7595 if (prsrc->tag) {
7596 bool lock_ring = ctx->flags & IORING_SETUP_IOPOLL;
Pavel Begunkovb60c8dc2021-04-25 14:32:18 +01007597
7598 io_ring_submit_lock(ctx, lock_ring);
Pavel Begunkov157d2572021-06-14 02:36:19 +01007599 spin_lock_irq(&ctx->completion_lock);
Pavel Begunkovb60c8dc2021-04-25 14:32:18 +01007600 io_cqring_fill_event(ctx, prsrc->tag, 0, 0);
Pavel Begunkov2840f712021-04-27 16:13:51 +01007601 ctx->cq_extra++;
Pavel Begunkovb60c8dc2021-04-25 14:32:18 +01007602 io_commit_cqring(ctx);
Pavel Begunkov157d2572021-06-14 02:36:19 +01007603 spin_unlock_irq(&ctx->completion_lock);
Pavel Begunkovb60c8dc2021-04-25 14:32:18 +01007604 io_cqring_ev_posted(ctx);
7605 io_ring_submit_unlock(ctx, lock_ring);
7606 }
7607
Pavel Begunkov40ae0ff2021-04-01 15:43:44 +01007608 rsrc_data->do_put(ctx, prsrc);
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007609 kfree(prsrc);
Jens Axboe05f3fb32019-12-09 11:22:50 -07007610 }
7611
Pavel Begunkov28a9fe22021-04-01 15:43:47 +01007612 io_rsrc_node_destroy(ref_node);
Pavel Begunkov3e942492021-04-11 01:46:34 +01007613 if (atomic_dec_and_test(&rsrc_data->refs))
7614 complete(&rsrc_data->done);
Jens Axboe05f3fb32019-12-09 11:22:50 -07007615}
7616
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007617static void io_rsrc_put_work(struct work_struct *work)
Jens Axboe4a38aed22020-05-14 17:21:15 -06007618{
7619 struct io_ring_ctx *ctx;
7620 struct llist_node *node;
7621
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007622 ctx = container_of(work, struct io_ring_ctx, rsrc_put_work.work);
7623 node = llist_del_all(&ctx->rsrc_put_llist);
Jens Axboe4a38aed22020-05-14 17:21:15 -06007624
7625 while (node) {
Pavel Begunkovb895c9a2021-04-01 15:43:40 +01007626 struct io_rsrc_node *ref_node;
Jens Axboe4a38aed22020-05-14 17:21:15 -06007627 struct llist_node *next = node->next;
7628
Pavel Begunkovb895c9a2021-04-01 15:43:40 +01007629 ref_node = llist_entry(node, struct io_rsrc_node, llist);
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007630 __io_rsrc_put_work(ref_node);
Jens Axboe4a38aed22020-05-14 17:21:15 -06007631 node = next;
7632 }
7633}
7634
Bijan Mottahedeh00835dc2021-01-15 17:37:52 +00007635static void io_rsrc_node_ref_zero(struct percpu_ref *ref)
Jens Axboe05f3fb32019-12-09 11:22:50 -07007636{
Pavel Begunkovb895c9a2021-04-01 15:43:40 +01007637 struct io_rsrc_node *node = container_of(ref, struct io_rsrc_node, refs);
Pavel Begunkov3e942492021-04-11 01:46:34 +01007638 struct io_ring_ctx *ctx = node->rsrc_data->ctx;
Pavel Begunkove2978222020-11-18 14:56:26 +00007639 bool first_add = false;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007640
Bijan Mottahedeh2a63b2d2021-01-15 17:37:47 +00007641 io_rsrc_ref_lock(ctx);
Pavel Begunkovb895c9a2021-04-01 15:43:40 +01007642 node->done = true;
Pavel Begunkove2978222020-11-18 14:56:26 +00007643
Bijan Mottahedehd67d2262021-01-15 17:37:46 +00007644 while (!list_empty(&ctx->rsrc_ref_list)) {
Pavel Begunkovb895c9a2021-04-01 15:43:40 +01007645 node = list_first_entry(&ctx->rsrc_ref_list,
7646 struct io_rsrc_node, node);
Pavel Begunkove2978222020-11-18 14:56:26 +00007647 /* recycle ref nodes in order */
Pavel Begunkovb895c9a2021-04-01 15:43:40 +01007648 if (!node->done)
Pavel Begunkove2978222020-11-18 14:56:26 +00007649 break;
Pavel Begunkovb895c9a2021-04-01 15:43:40 +01007650 list_del(&node->node);
7651 first_add |= llist_add(&node->llist, &ctx->rsrc_put_llist);
Pavel Begunkove2978222020-11-18 14:56:26 +00007652 }
Bijan Mottahedeh2a63b2d2021-01-15 17:37:47 +00007653 io_rsrc_ref_unlock(ctx);
Pavel Begunkove2978222020-11-18 14:56:26 +00007654
Pavel Begunkov3e942492021-04-11 01:46:34 +01007655 if (first_add)
7656 mod_delayed_work(system_wq, &ctx->rsrc_put_work, HZ);
Xiaoguang Wang05589552020-03-31 14:05:18 +08007657}
7658
Pavel Begunkovb895c9a2021-04-01 15:43:40 +01007659static struct io_rsrc_node *io_rsrc_node_alloc(struct io_ring_ctx *ctx)
Xiaoguang Wang05589552020-03-31 14:05:18 +08007660{
Pavel Begunkovb895c9a2021-04-01 15:43:40 +01007661 struct io_rsrc_node *ref_node;
Xiaoguang Wang05589552020-03-31 14:05:18 +08007662
7663 ref_node = kzalloc(sizeof(*ref_node), GFP_KERNEL);
7664 if (!ref_node)
Matthew Wilcox (Oracle)3e2224c2021-01-06 16:09:26 +00007665 return NULL;
Xiaoguang Wang05589552020-03-31 14:05:18 +08007666
Bijan Mottahedeh00835dc2021-01-15 17:37:52 +00007667 if (percpu_ref_init(&ref_node->refs, io_rsrc_node_ref_zero,
Xiaoguang Wang05589552020-03-31 14:05:18 +08007668 0, GFP_KERNEL)) {
7669 kfree(ref_node);
Matthew Wilcox (Oracle)3e2224c2021-01-06 16:09:26 +00007670 return NULL;
Xiaoguang Wang05589552020-03-31 14:05:18 +08007671 }
7672 INIT_LIST_HEAD(&ref_node->node);
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007673 INIT_LIST_HEAD(&ref_node->rsrc_list);
Pavel Begunkove2978222020-11-18 14:56:26 +00007674 ref_node->done = false;
Xiaoguang Wang05589552020-03-31 14:05:18 +08007675 return ref_node;
Xiaoguang Wang05589552020-03-31 14:05:18 +08007676}
7677
Jens Axboe05f3fb32019-12-09 11:22:50 -07007678static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
Pavel Begunkov792e3582021-04-25 14:32:21 +01007679 unsigned nr_args, u64 __user *tags)
Jens Axboe05f3fb32019-12-09 11:22:50 -07007680{
7681 __s32 __user *fds = (__s32 __user *) arg;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007682 struct file *file;
Pavel Begunkovf3baed32021-04-01 15:43:42 +01007683 int fd, ret;
Pavel Begunkov846a4ef2021-04-01 15:44:03 +01007684 unsigned i;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007685
7686 if (ctx->file_data)
7687 return -EBUSY;
7688 if (!nr_args)
7689 return -EINVAL;
7690 if (nr_args > IORING_MAX_FIXED_FILES)
7691 return -EMFILE;
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01007692 ret = io_rsrc_node_switch_start(ctx);
Pavel Begunkovf3baed32021-04-01 15:43:42 +01007693 if (ret)
7694 return ret;
Pavel Begunkovd878c812021-06-14 02:36:18 +01007695 ret = io_rsrc_data_alloc(ctx, io_rsrc_file_put, tags, nr_args,
7696 &ctx->file_data);
7697 if (ret)
7698 return ret;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007699
Pavel Begunkovf3baed32021-04-01 15:43:42 +01007700 ret = -ENOMEM;
Pavel Begunkovaeca2412021-04-11 01:46:37 +01007701 if (!io_alloc_file_tables(&ctx->file_table, nr_args))
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01007702 goto out_free;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007703
Jens Axboe05f3fb32019-12-09 11:22:50 -07007704 for (i = 0; i < nr_args; i++, ctx->nr_user_files++) {
Pavel Begunkovd878c812021-06-14 02:36:18 +01007705 if (copy_from_user(&fd, &fds[i], sizeof(fd))) {
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01007706 ret = -EFAULT;
7707 goto out_fput;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007708 }
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01007709 /* allow sparse sets */
Pavel Begunkov792e3582021-04-25 14:32:21 +01007710 if (fd == -1) {
7711 ret = -EINVAL;
Pavel Begunkov2d091d62021-06-14 02:36:21 +01007712 if (unlikely(*io_get_tag_slot(ctx->file_data, i)))
Pavel Begunkov792e3582021-04-25 14:32:21 +01007713 goto out_fput;
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01007714 continue;
Pavel Begunkov792e3582021-04-25 14:32:21 +01007715 }
Jens Axboe05f3fb32019-12-09 11:22:50 -07007716
Jens Axboe05f3fb32019-12-09 11:22:50 -07007717 file = fget(fd);
Jens Axboe05f3fb32019-12-09 11:22:50 -07007718 ret = -EBADF;
Pavel Begunkov792e3582021-04-25 14:32:21 +01007719 if (unlikely(!file))
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01007720 goto out_fput;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007721
7722 /*
7723 * Don't allow io_uring instances to be registered. If UNIX
7724 * isn't enabled, then this causes a reference cycle and this
7725 * instance can never get freed. If UNIX is enabled we'll
7726 * handle it just fine, but there's still no point in allowing
7727 * a ring fd as it doesn't support regular read/write anyway.
7728 */
7729 if (file->f_op == &io_uring_fops) {
7730 fput(file);
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01007731 goto out_fput;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007732 }
Pavel Begunkovaeca2412021-04-11 01:46:37 +01007733 io_fixed_file_set(io_fixed_file_slot(&ctx->file_table, i), file);
Jens Axboe05f3fb32019-12-09 11:22:50 -07007734 }
7735
Jens Axboe05f3fb32019-12-09 11:22:50 -07007736 ret = io_sqe_files_scm(ctx);
Xiaoguang Wang05589552020-03-31 14:05:18 +08007737 if (ret) {
Pavel Begunkov08480402021-04-13 02:58:38 +01007738 __io_sqe_files_unregister(ctx);
Xiaoguang Wang05589552020-03-31 14:05:18 +08007739 return ret;
7740 }
Jens Axboe05f3fb32019-12-09 11:22:50 -07007741
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01007742 io_rsrc_node_switch(ctx, NULL);
Jens Axboe05f3fb32019-12-09 11:22:50 -07007743 return ret;
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01007744out_fput:
7745 for (i = 0; i < ctx->nr_user_files; i++) {
7746 file = io_file_from_index(ctx, i);
7747 if (file)
7748 fput(file);
7749 }
Pavel Begunkovaeca2412021-04-11 01:46:37 +01007750 io_free_file_tables(&ctx->file_table, nr_args);
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01007751 ctx->nr_user_files = 0;
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01007752out_free:
Pavel Begunkov44b31f22021-04-25 14:32:16 +01007753 io_rsrc_data_free(ctx->file_data);
Jens Axboe55cbc252020-10-14 07:35:57 -06007754 ctx->file_data = NULL;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007755 return ret;
7756}
7757
Jens Axboec3a31e62019-10-03 13:59:56 -06007758static int io_sqe_file_register(struct io_ring_ctx *ctx, struct file *file,
7759 int index)
7760{
7761#if defined(CONFIG_UNIX)
7762 struct sock *sock = ctx->ring_sock->sk;
7763 struct sk_buff_head *head = &sock->sk_receive_queue;
7764 struct sk_buff *skb;
7765
7766 /*
7767 * See if we can merge this file into an existing skb SCM_RIGHTS
7768 * file set. If there's no room, fall back to allocating a new skb
7769 * and filling it in.
7770 */
7771 spin_lock_irq(&head->lock);
7772 skb = skb_peek(head);
7773 if (skb) {
7774 struct scm_fp_list *fpl = UNIXCB(skb).fp;
7775
7776 if (fpl->count < SCM_MAX_FD) {
7777 __skb_unlink(skb, head);
7778 spin_unlock_irq(&head->lock);
7779 fpl->fp[fpl->count] = get_file(file);
7780 unix_inflight(fpl->user, fpl->fp[fpl->count]);
7781 fpl->count++;
7782 spin_lock_irq(&head->lock);
7783 __skb_queue_head(head, skb);
7784 } else {
7785 skb = NULL;
7786 }
7787 }
7788 spin_unlock_irq(&head->lock);
7789
7790 if (skb) {
7791 fput(file);
7792 return 0;
7793 }
7794
7795 return __io_sqe_files_scm(ctx, 1, index);
7796#else
7797 return 0;
7798#endif
7799}
7800
Pavel Begunkovb60c8dc2021-04-25 14:32:18 +01007801static int io_queue_rsrc_removal(struct io_rsrc_data *data, unsigned idx,
Pavel Begunkove7c78372021-04-01 15:43:45 +01007802 struct io_rsrc_node *node, void *rsrc)
Jens Axboe05f3fb32019-12-09 11:22:50 -07007803{
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007804 struct io_rsrc_put *prsrc;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007805
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007806 prsrc = kzalloc(sizeof(*prsrc), GFP_KERNEL);
7807 if (!prsrc)
Hillf Dantona5318d32020-03-23 17:47:15 +08007808 return -ENOMEM;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007809
Pavel Begunkov2d091d62021-06-14 02:36:21 +01007810 prsrc->tag = *io_get_tag_slot(data, idx);
Bijan Mottahedeh50238532021-01-15 17:37:45 +00007811 prsrc->rsrc = rsrc;
Pavel Begunkove7c78372021-04-01 15:43:45 +01007812 list_add(&prsrc->list, &node->rsrc_list);
Hillf Dantona5318d32020-03-23 17:47:15 +08007813 return 0;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007814}
7815
7816static int __io_sqe_files_update(struct io_ring_ctx *ctx,
Pavel Begunkovc3bdad02021-04-25 14:32:22 +01007817 struct io_uring_rsrc_update2 *up,
Jens Axboe05f3fb32019-12-09 11:22:50 -07007818 unsigned nr_args)
7819{
Pavel Begunkovc3bdad02021-04-25 14:32:22 +01007820 u64 __user *tags = u64_to_user_ptr(up->tags);
Pavel Begunkov98f0b3b2021-04-25 14:32:19 +01007821 __s32 __user *fds = u64_to_user_ptr(up->data);
Pavel Begunkovb895c9a2021-04-01 15:43:40 +01007822 struct io_rsrc_data *data = ctx->file_data;
Pavel Begunkova04b0ac2021-04-01 15:44:04 +01007823 struct io_fixed_file *file_slot;
7824 struct file *file;
Pavel Begunkov98f0b3b2021-04-25 14:32:19 +01007825 int fd, i, err = 0;
7826 unsigned int done;
Xiaoguang Wang05589552020-03-31 14:05:18 +08007827 bool needs_switch = false;
Jens Axboec3a31e62019-10-03 13:59:56 -06007828
Pavel Begunkov98f0b3b2021-04-25 14:32:19 +01007829 if (!ctx->file_data)
7830 return -ENXIO;
7831 if (up->offset + nr_args > ctx->nr_user_files)
Jens Axboec3a31e62019-10-03 13:59:56 -06007832 return -EINVAL;
7833
Pavel Begunkov67973b92021-01-26 13:51:09 +00007834 for (done = 0; done < nr_args; done++) {
Pavel Begunkovc3bdad02021-04-25 14:32:22 +01007835 u64 tag = 0;
7836
7837 if ((tags && copy_from_user(&tag, &tags[done], sizeof(tag))) ||
7838 copy_from_user(&fd, &fds[done], sizeof(fd))) {
Jens Axboec3a31e62019-10-03 13:59:56 -06007839 err = -EFAULT;
7840 break;
7841 }
Pavel Begunkovc3bdad02021-04-25 14:32:22 +01007842 if ((fd == IORING_REGISTER_FILES_SKIP || fd == -1) && tag) {
7843 err = -EINVAL;
7844 break;
7845 }
noah4e0377a2021-01-26 15:23:28 -05007846 if (fd == IORING_REGISTER_FILES_SKIP)
7847 continue;
7848
Pavel Begunkov67973b92021-01-26 13:51:09 +00007849 i = array_index_nospec(up->offset + done, ctx->nr_user_files);
Pavel Begunkovaeca2412021-04-11 01:46:37 +01007850 file_slot = io_fixed_file_slot(&ctx->file_table, i);
Pavel Begunkovea64ec022021-02-04 13:52:07 +00007851
Pavel Begunkova04b0ac2021-04-01 15:44:04 +01007852 if (file_slot->file_ptr) {
7853 file = (struct file *)(file_slot->file_ptr & FFS_MASK);
Pavel Begunkovb60c8dc2021-04-25 14:32:18 +01007854 err = io_queue_rsrc_removal(data, up->offset + done,
7855 ctx->rsrc_node, file);
Hillf Dantona5318d32020-03-23 17:47:15 +08007856 if (err)
7857 break;
Pavel Begunkova04b0ac2021-04-01 15:44:04 +01007858 file_slot->file_ptr = 0;
Xiaoguang Wang05589552020-03-31 14:05:18 +08007859 needs_switch = true;
Jens Axboec3a31e62019-10-03 13:59:56 -06007860 }
7861 if (fd != -1) {
Jens Axboec3a31e62019-10-03 13:59:56 -06007862 file = fget(fd);
7863 if (!file) {
7864 err = -EBADF;
7865 break;
7866 }
7867 /*
7868 * Don't allow io_uring instances to be registered. If
7869 * UNIX isn't enabled, then this causes a reference
7870 * cycle and this instance can never get freed. If UNIX
7871 * is enabled we'll handle it just fine, but there's
7872 * still no point in allowing a ring fd as it doesn't
7873 * support regular read/write anyway.
7874 */
7875 if (file->f_op == &io_uring_fops) {
7876 fput(file);
7877 err = -EBADF;
7878 break;
7879 }
Pavel Begunkov2d091d62021-06-14 02:36:21 +01007880 *io_get_tag_slot(data, up->offset + done) = tag;
Pavel Begunkov9a321c92021-04-01 15:44:01 +01007881 io_fixed_file_set(file_slot, file);
Jens Axboec3a31e62019-10-03 13:59:56 -06007882 err = io_sqe_file_register(ctx, file, i);
Yang Yingliangf3bd9da2020-07-09 10:11:41 +00007883 if (err) {
Pavel Begunkova04b0ac2021-04-01 15:44:04 +01007884 file_slot->file_ptr = 0;
Yang Yingliangf3bd9da2020-07-09 10:11:41 +00007885 fput(file);
Jens Axboec3a31e62019-10-03 13:59:56 -06007886 break;
Yang Yingliangf3bd9da2020-07-09 10:11:41 +00007887 }
Jens Axboec3a31e62019-10-03 13:59:56 -06007888 }
Jens Axboe05f3fb32019-12-09 11:22:50 -07007889 }
7890
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01007891 if (needs_switch)
7892 io_rsrc_node_switch(ctx, data);
Jens Axboec3a31e62019-10-03 13:59:56 -06007893 return done ? done : err;
7894}
Xiaoguang Wang05589552020-03-31 14:05:18 +08007895
Pavel Begunkov5280f7e2021-02-04 13:52:08 +00007896static struct io_wq_work *io_free_work(struct io_wq_work *work)
Jens Axboe7d723062019-11-12 22:31:31 -07007897{
7898 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
7899
Pavel Begunkov5280f7e2021-02-04 13:52:08 +00007900 req = io_put_req_find_next(req);
7901 return req ? &req->work : NULL;
Jens Axboe7d723062019-11-12 22:31:31 -07007902}
7903
Jens Axboe685fe7f2021-03-08 09:37:51 -07007904static struct io_wq *io_init_wq_offload(struct io_ring_ctx *ctx,
7905 struct task_struct *task)
Pavel Begunkov24369c22020-01-28 03:15:48 +03007906{
Jens Axboee9418942021-02-19 12:33:30 -07007907 struct io_wq_hash *hash;
Pavel Begunkov24369c22020-01-28 03:15:48 +03007908 struct io_wq_data data;
Pavel Begunkov24369c22020-01-28 03:15:48 +03007909 unsigned int concurrency;
Pavel Begunkov24369c22020-01-28 03:15:48 +03007910
Jens Axboee9418942021-02-19 12:33:30 -07007911 hash = ctx->hash_map;
7912 if (!hash) {
7913 hash = kzalloc(sizeof(*hash), GFP_KERNEL);
7914 if (!hash)
7915 return ERR_PTR(-ENOMEM);
7916 refcount_set(&hash->refs, 1);
7917 init_waitqueue_head(&hash->wait);
7918 ctx->hash_map = hash;
7919 }
7920
7921 data.hash = hash;
Jens Axboe685fe7f2021-03-08 09:37:51 -07007922 data.task = task;
Pavel Begunkove9fd9392020-03-04 16:14:12 +03007923 data.free_work = io_free_work;
Pavel Begunkovf5fa38c2020-06-08 21:08:20 +03007924 data.do_work = io_wq_submit_work;
Pavel Begunkov24369c22020-01-28 03:15:48 +03007925
Jens Axboed25e3a32021-02-16 11:41:41 -07007926 /* Do QD, or 4 * CPUS, whatever is smallest */
7927 concurrency = min(ctx->sq_entries, 4 * num_online_cpus());
Pavel Begunkov24369c22020-01-28 03:15:48 +03007928
Jens Axboe5aa75ed2021-02-16 12:56:50 -07007929 return io_wq_create(concurrency, &data);
Pavel Begunkov24369c22020-01-28 03:15:48 +03007930}
7931
Jens Axboe5aa75ed2021-02-16 12:56:50 -07007932static int io_uring_alloc_task_context(struct task_struct *task,
7933 struct io_ring_ctx *ctx)
Jens Axboe0f212202020-09-13 13:09:39 -06007934{
7935 struct io_uring_task *tctx;
Jens Axboed8a6df12020-10-15 16:24:45 -06007936 int ret;
Jens Axboe0f212202020-09-13 13:09:39 -06007937
Pavel Begunkov09899b12021-06-14 02:36:22 +01007938 tctx = kzalloc(sizeof(*tctx), GFP_KERNEL);
Jens Axboe0f212202020-09-13 13:09:39 -06007939 if (unlikely(!tctx))
7940 return -ENOMEM;
7941
Jens Axboed8a6df12020-10-15 16:24:45 -06007942 ret = percpu_counter_init(&tctx->inflight, 0, GFP_KERNEL);
7943 if (unlikely(ret)) {
7944 kfree(tctx);
7945 return ret;
7946 }
7947
Jens Axboe685fe7f2021-03-08 09:37:51 -07007948 tctx->io_wq = io_init_wq_offload(ctx, task);
Jens Axboe5aa75ed2021-02-16 12:56:50 -07007949 if (IS_ERR(tctx->io_wq)) {
7950 ret = PTR_ERR(tctx->io_wq);
7951 percpu_counter_destroy(&tctx->inflight);
7952 kfree(tctx);
7953 return ret;
7954 }
7955
Jens Axboe0f212202020-09-13 13:09:39 -06007956 xa_init(&tctx->xa);
7957 init_waitqueue_head(&tctx->wait);
Jens Axboefdaf0832020-10-30 09:37:30 -06007958 atomic_set(&tctx->in_idle, 0);
Pavel Begunkovb303fe22021-04-11 01:46:26 +01007959 atomic_set(&tctx->inflight_tracked, 0);
Jens Axboe0f212202020-09-13 13:09:39 -06007960 task->io_uring = tctx;
Jens Axboe7cbf1722021-02-10 00:03:20 +00007961 spin_lock_init(&tctx->task_lock);
7962 INIT_WQ_LIST(&tctx->task_list);
Jens Axboe7cbf1722021-02-10 00:03:20 +00007963 init_task_work(&tctx->task_work, tctx_task_work);
Jens Axboe0f212202020-09-13 13:09:39 -06007964 return 0;
7965}
7966
7967void __io_uring_free(struct task_struct *tsk)
7968{
7969 struct io_uring_task *tctx = tsk->io_uring;
7970
7971 WARN_ON_ONCE(!xa_empty(&tctx->xa));
Pavel Begunkovef8eaa42021-02-27 11:16:45 +00007972 WARN_ON_ONCE(tctx->io_wq);
Pavel Begunkov09899b12021-06-14 02:36:22 +01007973 WARN_ON_ONCE(tctx->cached_refs);
Pavel Begunkovef8eaa42021-02-27 11:16:45 +00007974
Jens Axboed8a6df12020-10-15 16:24:45 -06007975 percpu_counter_destroy(&tctx->inflight);
Jens Axboe0f212202020-09-13 13:09:39 -06007976 kfree(tctx);
7977 tsk->io_uring = NULL;
7978}
7979
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02007980static int io_sq_offload_create(struct io_ring_ctx *ctx,
7981 struct io_uring_params *p)
Jens Axboe6b063142019-01-10 22:13:58 -07007982{
7983 int ret;
7984
Jens Axboed25e3a32021-02-16 11:41:41 -07007985 /* Retain compatibility with failing for an invalid attach attempt */
7986 if ((ctx->flags & (IORING_SETUP_ATTACH_WQ | IORING_SETUP_SQPOLL)) ==
7987 IORING_SETUP_ATTACH_WQ) {
7988 struct fd f;
7989
7990 f = fdget(p->wq_fd);
7991 if (!f.file)
7992 return -ENXIO;
Jens Axboed25e3a32021-02-16 11:41:41 -07007993 fdput(f);
Pavel Begunkovf2a48dd2021-04-20 12:03:33 +01007994 if (f.file->f_op != &io_uring_fops)
7995 return -EINVAL;
Jens Axboed25e3a32021-02-16 11:41:41 -07007996 }
Jens Axboe6b063142019-01-10 22:13:58 -07007997 if (ctx->flags & IORING_SETUP_SQPOLL) {
Jens Axboe46fe18b2021-03-04 12:39:36 -07007998 struct task_struct *tsk;
Jens Axboe534ca6d2020-09-02 13:52:19 -06007999 struct io_sq_data *sqd;
Pavel Begunkov26984fb2021-03-11 23:29:37 +00008000 bool attached;
Jens Axboe534ca6d2020-09-02 13:52:19 -06008001
Pavel Begunkov26984fb2021-03-11 23:29:37 +00008002 sqd = io_get_sq_data(p, &attached);
Jens Axboe534ca6d2020-09-02 13:52:19 -06008003 if (IS_ERR(sqd)) {
8004 ret = PTR_ERR(sqd);
8005 goto err;
8006 }
Jens Axboe69fb2132020-09-14 11:16:23 -06008007
Stefan Metzmacher7c30f36a2021-03-07 11:54:28 +01008008 ctx->sq_creds = get_current_cred();
Jens Axboe534ca6d2020-09-02 13:52:19 -06008009 ctx->sq_data = sqd;
Jens Axboe6b063142019-01-10 22:13:58 -07008010 ctx->sq_thread_idle = msecs_to_jiffies(p->sq_thread_idle);
8011 if (!ctx->sq_thread_idle)
8012 ctx->sq_thread_idle = HZ;
8013
Pavel Begunkov78d7f6b2021-03-10 13:13:53 +00008014 io_sq_thread_park(sqd);
Pavel Begunkovde75a3d2021-03-18 11:54:35 +00008015 list_add(&ctx->sqd_list, &sqd->ctx_list);
8016 io_sqd_update_thread_idle(sqd);
Pavel Begunkov26984fb2021-03-11 23:29:37 +00008017 /* don't attach to a dying SQPOLL thread, would be racy */
Pavel Begunkovf2a48dd2021-04-20 12:03:33 +01008018 ret = (attached && !sqd->thread) ? -ENXIO : 0;
Pavel Begunkov78d7f6b2021-03-10 13:13:53 +00008019 io_sq_thread_unpark(sqd);
8020
Pavel Begunkovde75a3d2021-03-18 11:54:35 +00008021 if (ret < 0)
8022 goto err;
8023 if (attached)
Jens Axboe5aa75ed2021-02-16 12:56:50 -07008024 return 0;
Jens Axboeaa061652020-09-02 14:50:27 -06008025
Jens Axboe6b063142019-01-10 22:13:58 -07008026 if (p->flags & IORING_SETUP_SQ_AFF) {
8027 int cpu = p->sq_thread_cpu;
8028
8029 ret = -EINVAL;
Pavel Begunkovf2a48dd2021-04-20 12:03:33 +01008030 if (cpu >= nr_cpu_ids || !cpu_online(cpu))
Jens Axboee8f98f242021-03-09 16:32:13 -07008031 goto err_sqpoll;
Jens Axboe37d1e2e2021-02-17 21:03:43 -07008032 sqd->sq_cpu = cpu;
Jens Axboe6c271ce2019-01-10 11:22:30 -07008033 } else {
Jens Axboe37d1e2e2021-02-17 21:03:43 -07008034 sqd->sq_cpu = -1;
Jens Axboe6c271ce2019-01-10 11:22:30 -07008035 }
Jens Axboe37d1e2e2021-02-17 21:03:43 -07008036
8037 sqd->task_pid = current->pid;
Jens Axboe5c2469e2021-03-11 10:17:56 -07008038 sqd->task_tgid = current->tgid;
Jens Axboe46fe18b2021-03-04 12:39:36 -07008039 tsk = create_io_thread(io_sq_thread, sqd, NUMA_NO_NODE);
8040 if (IS_ERR(tsk)) {
8041 ret = PTR_ERR(tsk);
Jens Axboee8f98f242021-03-09 16:32:13 -07008042 goto err_sqpoll;
Jens Axboe6c271ce2019-01-10 11:22:30 -07008043 }
Pavel Begunkov97a73a02021-03-08 17:30:54 +00008044
Jens Axboe46fe18b2021-03-04 12:39:36 -07008045 sqd->thread = tsk;
Pavel Begunkov97a73a02021-03-08 17:30:54 +00008046 ret = io_uring_alloc_task_context(tsk, ctx);
Jens Axboe46fe18b2021-03-04 12:39:36 -07008047 wake_up_new_task(tsk);
Jens Axboe0f212202020-09-13 13:09:39 -06008048 if (ret)
8049 goto err;
Jens Axboe6c271ce2019-01-10 11:22:30 -07008050 } else if (p->flags & IORING_SETUP_SQ_AFF) {
8051 /* Can't have SQ_AFF without SQPOLL */
8052 ret = -EINVAL;
8053 goto err;
8054 }
8055
Jens Axboe2b188cc2019-01-07 10:46:33 -07008056 return 0;
Pavel Begunkovf2a48dd2021-04-20 12:03:33 +01008057err_sqpoll:
8058 complete(&ctx->sq_data->exited);
Jens Axboe2b188cc2019-01-07 10:46:33 -07008059err:
Jens Axboe37d1e2e2021-02-17 21:03:43 -07008060 io_sq_thread_finish(ctx);
Jens Axboe2b188cc2019-01-07 10:46:33 -07008061 return ret;
8062}
8063
Bijan Mottahedeha087e2b2020-06-16 16:36:07 -07008064static inline void __io_unaccount_mem(struct user_struct *user,
8065 unsigned long nr_pages)
Jens Axboe2b188cc2019-01-07 10:46:33 -07008066{
8067 atomic_long_sub(nr_pages, &user->locked_vm);
8068}
8069
Bijan Mottahedeha087e2b2020-06-16 16:36:07 -07008070static inline int __io_account_mem(struct user_struct *user,
8071 unsigned long nr_pages)
Jens Axboe2b188cc2019-01-07 10:46:33 -07008072{
8073 unsigned long page_limit, cur_pages, new_pages;
8074
8075 /* Don't allow more pages than we can safely lock */
8076 page_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
8077
8078 do {
8079 cur_pages = atomic_long_read(&user->locked_vm);
8080 new_pages = cur_pages + nr_pages;
8081 if (new_pages > page_limit)
8082 return -ENOMEM;
8083 } while (atomic_long_cmpxchg(&user->locked_vm, cur_pages,
8084 new_pages) != cur_pages);
8085
8086 return 0;
8087}
8088
Jens Axboe26bfa89e2021-02-09 20:14:12 -07008089static void io_unaccount_mem(struct io_ring_ctx *ctx, unsigned long nr_pages)
Bijan Mottahedeha087e2b2020-06-16 16:36:07 -07008090{
Jens Axboe62e398b2021-02-21 16:19:37 -07008091 if (ctx->user)
Bijan Mottahedeha087e2b2020-06-16 16:36:07 -07008092 __io_unaccount_mem(ctx->user, nr_pages);
Bijan Mottahedeh30975822020-06-16 16:36:09 -07008093
Jens Axboe26bfa89e2021-02-09 20:14:12 -07008094 if (ctx->mm_account)
8095 atomic64_sub(nr_pages, &ctx->mm_account->pinned_vm);
Bijan Mottahedeha087e2b2020-06-16 16:36:07 -07008096}
8097
Jens Axboe26bfa89e2021-02-09 20:14:12 -07008098static int io_account_mem(struct io_ring_ctx *ctx, unsigned long nr_pages)
Bijan Mottahedeha087e2b2020-06-16 16:36:07 -07008099{
Bijan Mottahedeh30975822020-06-16 16:36:09 -07008100 int ret;
8101
Jens Axboe62e398b2021-02-21 16:19:37 -07008102 if (ctx->user) {
Bijan Mottahedeh30975822020-06-16 16:36:09 -07008103 ret = __io_account_mem(ctx->user, nr_pages);
8104 if (ret)
8105 return ret;
8106 }
8107
Jens Axboe26bfa89e2021-02-09 20:14:12 -07008108 if (ctx->mm_account)
8109 atomic64_add(nr_pages, &ctx->mm_account->pinned_vm);
Bijan Mottahedeha087e2b2020-06-16 16:36:07 -07008110
8111 return 0;
8112}
8113
Jens Axboe2b188cc2019-01-07 10:46:33 -07008114static void io_mem_free(void *ptr)
8115{
Mark Rutland52e04ef2019-04-30 17:30:21 +01008116 struct page *page;
Jens Axboe2b188cc2019-01-07 10:46:33 -07008117
Mark Rutland52e04ef2019-04-30 17:30:21 +01008118 if (!ptr)
8119 return;
8120
8121 page = virt_to_head_page(ptr);
Jens Axboe2b188cc2019-01-07 10:46:33 -07008122 if (put_page_testzero(page))
8123 free_compound_page(page);
8124}
8125
8126static void *io_mem_alloc(size_t size)
8127{
8128 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN | __GFP_COMP |
Jens Axboe26bfa89e2021-02-09 20:14:12 -07008129 __GFP_NORETRY | __GFP_ACCOUNT;
Jens Axboe2b188cc2019-01-07 10:46:33 -07008130
8131 return (void *) __get_free_pages(gfp_flags, get_order(size));
8132}
8133
Hristo Venev75b28af2019-08-26 17:23:46 +00008134static unsigned long rings_size(unsigned sq_entries, unsigned cq_entries,
8135 size_t *sq_offset)
8136{
8137 struct io_rings *rings;
8138 size_t off, sq_array_size;
8139
8140 off = struct_size(rings, cqes, cq_entries);
8141 if (off == SIZE_MAX)
8142 return SIZE_MAX;
8143
8144#ifdef CONFIG_SMP
8145 off = ALIGN(off, SMP_CACHE_BYTES);
8146 if (off == 0)
8147 return SIZE_MAX;
8148#endif
8149
Dmitry Vyukovb36200f2020-07-11 11:31:11 +02008150 if (sq_offset)
8151 *sq_offset = off;
8152
Hristo Venev75b28af2019-08-26 17:23:46 +00008153 sq_array_size = array_size(sizeof(u32), sq_entries);
8154 if (sq_array_size == SIZE_MAX)
8155 return SIZE_MAX;
8156
8157 if (check_add_overflow(off, sq_array_size, &off))
8158 return SIZE_MAX;
8159
Hristo Venev75b28af2019-08-26 17:23:46 +00008160 return off;
8161}
8162
Pavel Begunkov41edf1a2021-04-25 14:32:23 +01008163static void io_buffer_unmap(struct io_ring_ctx *ctx, struct io_mapped_ubuf **slot)
Pavel Begunkov7f61a1e2021-04-11 01:46:35 +01008164{
Pavel Begunkov41edf1a2021-04-25 14:32:23 +01008165 struct io_mapped_ubuf *imu = *slot;
Pavel Begunkov7f61a1e2021-04-11 01:46:35 +01008166 unsigned int i;
8167
Pavel Begunkov62248432021-04-28 13:11:29 +01008168 if (imu != ctx->dummy_ubuf) {
8169 for (i = 0; i < imu->nr_bvecs; i++)
8170 unpin_user_page(imu->bvec[i].bv_page);
8171 if (imu->acct_pages)
8172 io_unaccount_mem(ctx, imu->acct_pages);
8173 kvfree(imu);
8174 }
Pavel Begunkov41edf1a2021-04-25 14:32:23 +01008175 *slot = NULL;
Pavel Begunkov7f61a1e2021-04-11 01:46:35 +01008176}
8177
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01008178static void io_rsrc_buf_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc)
8179{
Pavel Begunkov634d00d2021-04-25 14:32:26 +01008180 io_buffer_unmap(ctx, &prsrc->buf);
8181 prsrc->buf = NULL;
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01008182}
8183
8184static void __io_sqe_buffers_unregister(struct io_ring_ctx *ctx)
Jens Axboeedafcce2019-01-09 09:16:05 -07008185{
Pavel Begunkov7f61a1e2021-04-11 01:46:35 +01008186 unsigned int i;
Jens Axboeedafcce2019-01-09 09:16:05 -07008187
Pavel Begunkov7f61a1e2021-04-11 01:46:35 +01008188 for (i = 0; i < ctx->nr_user_bufs; i++)
8189 io_buffer_unmap(ctx, &ctx->user_bufs[i]);
Jens Axboeedafcce2019-01-09 09:16:05 -07008190 kfree(ctx->user_bufs);
Zqiangbb6659c2021-04-30 16:25:15 +08008191 io_rsrc_data_free(ctx->buf_data);
Jens Axboeedafcce2019-01-09 09:16:05 -07008192 ctx->user_bufs = NULL;
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01008193 ctx->buf_data = NULL;
Jens Axboeedafcce2019-01-09 09:16:05 -07008194 ctx->nr_user_bufs = 0;
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01008195}
8196
Jens Axboeedafcce2019-01-09 09:16:05 -07008197static int io_sqe_buffers_unregister(struct io_ring_ctx *ctx)
8198{
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01008199 int ret;
Jens Axboeedafcce2019-01-09 09:16:05 -07008200
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01008201 if (!ctx->buf_data)
Jens Axboeedafcce2019-01-09 09:16:05 -07008202 return -ENXIO;
8203
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01008204 ret = io_rsrc_ref_quiesce(ctx->buf_data, ctx);
8205 if (!ret)
8206 __io_sqe_buffers_unregister(ctx);
8207 return ret;
Jens Axboeedafcce2019-01-09 09:16:05 -07008208}
8209
8210static int io_copy_iov(struct io_ring_ctx *ctx, struct iovec *dst,
8211 void __user *arg, unsigned index)
8212{
8213 struct iovec __user *src;
8214
8215#ifdef CONFIG_COMPAT
8216 if (ctx->compat) {
8217 struct compat_iovec __user *ciovs;
8218 struct compat_iovec ciov;
8219
8220 ciovs = (struct compat_iovec __user *) arg;
8221 if (copy_from_user(&ciov, &ciovs[index], sizeof(ciov)))
8222 return -EFAULT;
8223
Jens Axboed55e5f52019-12-11 16:12:15 -07008224 dst->iov_base = u64_to_user_ptr((u64)ciov.iov_base);
Jens Axboeedafcce2019-01-09 09:16:05 -07008225 dst->iov_len = ciov.iov_len;
8226 return 0;
8227 }
8228#endif
8229 src = (struct iovec __user *) arg;
8230 if (copy_from_user(dst, &src[index], sizeof(*dst)))
8231 return -EFAULT;
8232 return 0;
8233}
8234
Jens Axboede293932020-09-17 16:19:16 -06008235/*
8236 * Not super efficient, but this is just a registration time. And we do cache
8237 * the last compound head, so generally we'll only do a full search if we don't
8238 * match that one.
8239 *
8240 * We check if the given compound head page has already been accounted, to
8241 * avoid double accounting it. This allows us to account the full size of the
8242 * page, not just the constituent pages of a huge page.
8243 */
8244static bool headpage_already_acct(struct io_ring_ctx *ctx, struct page **pages,
8245 int nr_pages, struct page *hpage)
8246{
8247 int i, j;
8248
8249 /* check current page array */
8250 for (i = 0; i < nr_pages; i++) {
8251 if (!PageCompound(pages[i]))
8252 continue;
8253 if (compound_head(pages[i]) == hpage)
8254 return true;
8255 }
8256
8257 /* check previously registered pages */
8258 for (i = 0; i < ctx->nr_user_bufs; i++) {
Pavel Begunkov41edf1a2021-04-25 14:32:23 +01008259 struct io_mapped_ubuf *imu = ctx->user_bufs[i];
Jens Axboede293932020-09-17 16:19:16 -06008260
8261 for (j = 0; j < imu->nr_bvecs; j++) {
8262 if (!PageCompound(imu->bvec[j].bv_page))
8263 continue;
8264 if (compound_head(imu->bvec[j].bv_page) == hpage)
8265 return true;
8266 }
8267 }
8268
8269 return false;
8270}
8271
8272static int io_buffer_account_pin(struct io_ring_ctx *ctx, struct page **pages,
8273 int nr_pages, struct io_mapped_ubuf *imu,
8274 struct page **last_hpage)
8275{
8276 int i, ret;
8277
Pavel Begunkov216e5832021-05-29 12:01:02 +01008278 imu->acct_pages = 0;
Jens Axboede293932020-09-17 16:19:16 -06008279 for (i = 0; i < nr_pages; i++) {
8280 if (!PageCompound(pages[i])) {
8281 imu->acct_pages++;
8282 } else {
8283 struct page *hpage;
8284
8285 hpage = compound_head(pages[i]);
8286 if (hpage == *last_hpage)
8287 continue;
8288 *last_hpage = hpage;
8289 if (headpage_already_acct(ctx, pages, i, hpage))
8290 continue;
8291 imu->acct_pages += page_size(hpage) >> PAGE_SHIFT;
8292 }
8293 }
8294
8295 if (!imu->acct_pages)
8296 return 0;
8297
Jens Axboe26bfa89e2021-02-09 20:14:12 -07008298 ret = io_account_mem(ctx, imu->acct_pages);
Jens Axboede293932020-09-17 16:19:16 -06008299 if (ret)
8300 imu->acct_pages = 0;
8301 return ret;
8302}
8303
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008304static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov,
Pavel Begunkov41edf1a2021-04-25 14:32:23 +01008305 struct io_mapped_ubuf **pimu,
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008306 struct page **last_hpage)
Jens Axboeedafcce2019-01-09 09:16:05 -07008307{
Pavel Begunkov41edf1a2021-04-25 14:32:23 +01008308 struct io_mapped_ubuf *imu = NULL;
Jens Axboeedafcce2019-01-09 09:16:05 -07008309 struct vm_area_struct **vmas = NULL;
8310 struct page **pages = NULL;
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008311 unsigned long off, start, end, ubuf;
8312 size_t size;
8313 int ret, pret, nr_pages, i;
Jens Axboeedafcce2019-01-09 09:16:05 -07008314
Pavel Begunkov62248432021-04-28 13:11:29 +01008315 if (!iov->iov_base) {
8316 *pimu = ctx->dummy_ubuf;
8317 return 0;
8318 }
8319
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008320 ubuf = (unsigned long) iov->iov_base;
8321 end = (ubuf + iov->iov_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
8322 start = ubuf >> PAGE_SHIFT;
8323 nr_pages = end - start;
8324
Pavel Begunkov41edf1a2021-04-25 14:32:23 +01008325 *pimu = NULL;
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008326 ret = -ENOMEM;
8327
8328 pages = kvmalloc_array(nr_pages, sizeof(struct page *), GFP_KERNEL);
8329 if (!pages)
8330 goto done;
8331
8332 vmas = kvmalloc_array(nr_pages, sizeof(struct vm_area_struct *),
8333 GFP_KERNEL);
8334 if (!vmas)
8335 goto done;
8336
Pavel Begunkov41edf1a2021-04-25 14:32:23 +01008337 imu = kvmalloc(struct_size(imu, bvec, nr_pages), GFP_KERNEL);
Pavel Begunkova2b41982021-04-26 00:16:31 +01008338 if (!imu)
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008339 goto done;
8340
8341 ret = 0;
8342 mmap_read_lock(current->mm);
8343 pret = pin_user_pages(ubuf, nr_pages, FOLL_WRITE | FOLL_LONGTERM,
8344 pages, vmas);
8345 if (pret == nr_pages) {
8346 /* don't support file backed memory */
8347 for (i = 0; i < nr_pages; i++) {
8348 struct vm_area_struct *vma = vmas[i];
8349
Pavel Begunkov40dad762021-06-09 15:26:54 +01008350 if (vma_is_shmem(vma))
8351 continue;
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008352 if (vma->vm_file &&
8353 !is_file_hugepages(vma->vm_file)) {
8354 ret = -EOPNOTSUPP;
8355 break;
8356 }
8357 }
8358 } else {
8359 ret = pret < 0 ? pret : -EFAULT;
8360 }
8361 mmap_read_unlock(current->mm);
8362 if (ret) {
8363 /*
8364 * if we did partial map, or found file backed vmas,
8365 * release any pages we did get
8366 */
8367 if (pret > 0)
8368 unpin_user_pages(pages, pret);
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008369 goto done;
8370 }
8371
8372 ret = io_buffer_account_pin(ctx, pages, pret, imu, last_hpage);
8373 if (ret) {
8374 unpin_user_pages(pages, pret);
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008375 goto done;
8376 }
8377
8378 off = ubuf & ~PAGE_MASK;
8379 size = iov->iov_len;
8380 for (i = 0; i < nr_pages; i++) {
8381 size_t vec_len;
8382
8383 vec_len = min_t(size_t, size, PAGE_SIZE - off);
8384 imu->bvec[i].bv_page = pages[i];
8385 imu->bvec[i].bv_len = vec_len;
8386 imu->bvec[i].bv_offset = off;
8387 off = 0;
8388 size -= vec_len;
8389 }
8390 /* store original address for later verification */
8391 imu->ubuf = ubuf;
Pavel Begunkov4751f532021-04-01 15:43:55 +01008392 imu->ubuf_end = ubuf + iov->iov_len;
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008393 imu->nr_bvecs = nr_pages;
Pavel Begunkov41edf1a2021-04-25 14:32:23 +01008394 *pimu = imu;
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008395 ret = 0;
8396done:
Pavel Begunkov41edf1a2021-04-25 14:32:23 +01008397 if (ret)
8398 kvfree(imu);
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008399 kvfree(pages);
8400 kvfree(vmas);
8401 return ret;
8402}
8403
Bijan Mottahedeh2b358602021-01-06 12:39:11 -08008404static int io_buffers_map_alloc(struct io_ring_ctx *ctx, unsigned int nr_args)
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008405{
Pavel Begunkov87094462021-04-11 01:46:36 +01008406 ctx->user_bufs = kcalloc(nr_args, sizeof(*ctx->user_bufs), GFP_KERNEL);
8407 return ctx->user_bufs ? 0 : -ENOMEM;
Bijan Mottahedeh2b358602021-01-06 12:39:11 -08008408}
8409
8410static int io_buffer_validate(struct iovec *iov)
8411{
Pavel Begunkov50e96982021-03-24 22:59:01 +00008412 unsigned long tmp, acct_len = iov->iov_len + (PAGE_SIZE - 1);
8413
Bijan Mottahedeh2b358602021-01-06 12:39:11 -08008414 /*
8415 * Don't impose further limits on the size and buffer
8416 * constraints here, we'll -EINVAL later when IO is
8417 * submitted if they are wrong.
8418 */
Pavel Begunkov62248432021-04-28 13:11:29 +01008419 if (!iov->iov_base)
8420 return iov->iov_len ? -EFAULT : 0;
8421 if (!iov->iov_len)
Bijan Mottahedeh2b358602021-01-06 12:39:11 -08008422 return -EFAULT;
8423
8424 /* arbitrary limit, but we need something */
8425 if (iov->iov_len > SZ_1G)
8426 return -EFAULT;
8427
Pavel Begunkov50e96982021-03-24 22:59:01 +00008428 if (check_add_overflow((unsigned long)iov->iov_base, acct_len, &tmp))
8429 return -EOVERFLOW;
8430
Bijan Mottahedeh2b358602021-01-06 12:39:11 -08008431 return 0;
8432}
8433
8434static int io_sqe_buffers_register(struct io_ring_ctx *ctx, void __user *arg,
Pavel Begunkov634d00d2021-04-25 14:32:26 +01008435 unsigned int nr_args, u64 __user *tags)
Bijan Mottahedeh2b358602021-01-06 12:39:11 -08008436{
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01008437 struct page *last_hpage = NULL;
8438 struct io_rsrc_data *data;
Bijan Mottahedeh2b358602021-01-06 12:39:11 -08008439 int i, ret;
8440 struct iovec iov;
Bijan Mottahedeh2b358602021-01-06 12:39:11 -08008441
Pavel Begunkov87094462021-04-11 01:46:36 +01008442 if (ctx->user_bufs)
8443 return -EBUSY;
Pavel Begunkov489809e2021-05-14 12:06:44 +01008444 if (!nr_args || nr_args > IORING_MAX_REG_BUFFERS)
Pavel Begunkov87094462021-04-11 01:46:36 +01008445 return -EINVAL;
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01008446 ret = io_rsrc_node_switch_start(ctx);
Bijan Mottahedeh2b358602021-01-06 12:39:11 -08008447 if (ret)
8448 return ret;
Pavel Begunkovd878c812021-06-14 02:36:18 +01008449 ret = io_rsrc_data_alloc(ctx, io_rsrc_buf_put, tags, nr_args, &data);
8450 if (ret)
8451 return ret;
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01008452 ret = io_buffers_map_alloc(ctx, nr_args);
8453 if (ret) {
Zqiangbb6659c2021-04-30 16:25:15 +08008454 io_rsrc_data_free(data);
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01008455 return ret;
8456 }
Bijan Mottahedeh2b358602021-01-06 12:39:11 -08008457
Pavel Begunkov87094462021-04-11 01:46:36 +01008458 for (i = 0; i < nr_args; i++, ctx->nr_user_bufs++) {
Jens Axboeedafcce2019-01-09 09:16:05 -07008459 ret = io_copy_iov(ctx, &iov, arg, i);
8460 if (ret)
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008461 break;
Bijan Mottahedeh2b358602021-01-06 12:39:11 -08008462 ret = io_buffer_validate(&iov);
8463 if (ret)
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008464 break;
Pavel Begunkov2d091d62021-06-14 02:36:21 +01008465 if (!iov.iov_base && *io_get_tag_slot(data, i)) {
Colin Ian Kingcf3770e2021-04-29 11:46:02 +01008466 ret = -EINVAL;
8467 break;
8468 }
Jens Axboeedafcce2019-01-09 09:16:05 -07008469
Pavel Begunkov41edf1a2021-04-25 14:32:23 +01008470 ret = io_sqe_buffer_register(ctx, &iov, &ctx->user_bufs[i],
8471 &last_hpage);
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008472 if (ret)
8473 break;
Jens Axboeedafcce2019-01-09 09:16:05 -07008474 }
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008475
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01008476 WARN_ON_ONCE(ctx->buf_data);
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008477
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01008478 ctx->buf_data = data;
8479 if (ret)
8480 __io_sqe_buffers_unregister(ctx);
8481 else
8482 io_rsrc_node_switch(ctx, NULL);
Jens Axboeedafcce2019-01-09 09:16:05 -07008483 return ret;
8484}
8485
Pavel Begunkov634d00d2021-04-25 14:32:26 +01008486static int __io_sqe_buffers_update(struct io_ring_ctx *ctx,
8487 struct io_uring_rsrc_update2 *up,
8488 unsigned int nr_args)
8489{
8490 u64 __user *tags = u64_to_user_ptr(up->tags);
8491 struct iovec iov, __user *iovs = u64_to_user_ptr(up->data);
Pavel Begunkov634d00d2021-04-25 14:32:26 +01008492 struct page *last_hpage = NULL;
8493 bool needs_switch = false;
8494 __u32 done;
8495 int i, err;
8496
8497 if (!ctx->buf_data)
8498 return -ENXIO;
8499 if (up->offset + nr_args > ctx->nr_user_bufs)
8500 return -EINVAL;
8501
8502 for (done = 0; done < nr_args; done++) {
Pavel Begunkov0b8c0e72021-04-26 15:17:38 +01008503 struct io_mapped_ubuf *imu;
8504 int offset = up->offset + done;
Pavel Begunkov634d00d2021-04-25 14:32:26 +01008505 u64 tag = 0;
8506
8507 err = io_copy_iov(ctx, &iov, iovs, done);
8508 if (err)
8509 break;
8510 if (tags && copy_from_user(&tag, &tags[done], sizeof(tag))) {
8511 err = -EFAULT;
8512 break;
8513 }
Pavel Begunkov0b8c0e72021-04-26 15:17:38 +01008514 err = io_buffer_validate(&iov);
8515 if (err)
8516 break;
Colin Ian Kingcf3770e2021-04-29 11:46:02 +01008517 if (!iov.iov_base && tag) {
8518 err = -EINVAL;
8519 break;
8520 }
Pavel Begunkov0b8c0e72021-04-26 15:17:38 +01008521 err = io_sqe_buffer_register(ctx, &iov, &imu, &last_hpage);
8522 if (err)
8523 break;
Pavel Begunkov634d00d2021-04-25 14:32:26 +01008524
Pavel Begunkov0b8c0e72021-04-26 15:17:38 +01008525 i = array_index_nospec(offset, ctx->nr_user_bufs);
Pavel Begunkov62248432021-04-28 13:11:29 +01008526 if (ctx->user_bufs[i] != ctx->dummy_ubuf) {
Pavel Begunkov0b8c0e72021-04-26 15:17:38 +01008527 err = io_queue_rsrc_removal(ctx->buf_data, offset,
8528 ctx->rsrc_node, ctx->user_bufs[i]);
8529 if (unlikely(err)) {
8530 io_buffer_unmap(ctx, &imu);
Pavel Begunkov634d00d2021-04-25 14:32:26 +01008531 break;
Pavel Begunkov0b8c0e72021-04-26 15:17:38 +01008532 }
Pavel Begunkov634d00d2021-04-25 14:32:26 +01008533 ctx->user_bufs[i] = NULL;
8534 needs_switch = true;
8535 }
8536
Pavel Begunkov0b8c0e72021-04-26 15:17:38 +01008537 ctx->user_bufs[i] = imu;
Pavel Begunkov2d091d62021-06-14 02:36:21 +01008538 *io_get_tag_slot(ctx->buf_data, offset) = tag;
Pavel Begunkov634d00d2021-04-25 14:32:26 +01008539 }
8540
8541 if (needs_switch)
8542 io_rsrc_node_switch(ctx, ctx->buf_data);
8543 return done ? done : err;
8544}
8545
Jens Axboe9b402842019-04-11 11:45:41 -06008546static int io_eventfd_register(struct io_ring_ctx *ctx, void __user *arg)
8547{
8548 __s32 __user *fds = arg;
8549 int fd;
8550
8551 if (ctx->cq_ev_fd)
8552 return -EBUSY;
8553
8554 if (copy_from_user(&fd, fds, sizeof(*fds)))
8555 return -EFAULT;
8556
8557 ctx->cq_ev_fd = eventfd_ctx_fdget(fd);
8558 if (IS_ERR(ctx->cq_ev_fd)) {
8559 int ret = PTR_ERR(ctx->cq_ev_fd);
8560 ctx->cq_ev_fd = NULL;
8561 return ret;
8562 }
8563
8564 return 0;
8565}
8566
8567static int io_eventfd_unregister(struct io_ring_ctx *ctx)
8568{
8569 if (ctx->cq_ev_fd) {
8570 eventfd_ctx_put(ctx->cq_ev_fd);
8571 ctx->cq_ev_fd = NULL;
8572 return 0;
8573 }
8574
8575 return -ENXIO;
8576}
8577
Jens Axboe5a2e7452020-02-23 16:23:11 -07008578static void io_destroy_buffers(struct io_ring_ctx *ctx)
8579{
Jens Axboe9e15c3a2021-03-13 12:29:43 -07008580 struct io_buffer *buf;
8581 unsigned long index;
8582
8583 xa_for_each(&ctx->io_buffers, index, buf)
8584 __io_remove_buffers(ctx, buf, index, -1U);
Jens Axboe5a2e7452020-02-23 16:23:11 -07008585}
8586
Jens Axboe68e68ee2021-02-13 09:00:02 -07008587static void io_req_cache_free(struct list_head *list, struct task_struct *tsk)
Jens Axboe1b4c3512021-02-10 00:03:19 +00008588{
Jens Axboe68e68ee2021-02-13 09:00:02 -07008589 struct io_kiocb *req, *nxt;
Jens Axboe1b4c3512021-02-10 00:03:19 +00008590
Jens Axboe68e68ee2021-02-13 09:00:02 -07008591 list_for_each_entry_safe(req, nxt, list, compl.list) {
8592 if (tsk && req->task != tsk)
8593 continue;
Jens Axboe1b4c3512021-02-10 00:03:19 +00008594 list_del(&req->compl.list);
8595 kmem_cache_free(req_cachep, req);
8596 }
8597}
8598
Jens Axboe4010fec2021-02-27 15:04:18 -07008599static void io_req_caches_free(struct io_ring_ctx *ctx)
Jens Axboe2b188cc2019-01-07 10:46:33 -07008600{
Pavel Begunkovbf019da2021-02-10 00:03:17 +00008601 struct io_submit_state *submit_state = &ctx->submit_state;
Pavel Begunkove5547d22021-02-23 22:17:20 +00008602 struct io_comp_state *cs = &ctx->submit_state.comp;
Pavel Begunkovbf019da2021-02-10 00:03:17 +00008603
Jens Axboe9a4fdbd2021-02-13 09:09:44 -07008604 mutex_lock(&ctx->uring_lock);
8605
Pavel Begunkov8e5c66c2021-02-22 11:45:55 +00008606 if (submit_state->free_reqs) {
Jens Axboe9a4fdbd2021-02-13 09:09:44 -07008607 kmem_cache_free_bulk(req_cachep, submit_state->free_reqs,
8608 submit_state->reqs);
Pavel Begunkov8e5c66c2021-02-22 11:45:55 +00008609 submit_state->free_reqs = 0;
8610 }
Jens Axboe9a4fdbd2021-02-13 09:09:44 -07008611
Pavel Begunkovdac7a092021-03-19 17:22:39 +00008612 io_flush_cached_locked_reqs(ctx, cs);
Pavel Begunkove5547d22021-02-23 22:17:20 +00008613 io_req_cache_free(&cs->free_list, NULL);
Jens Axboe9a4fdbd2021-02-13 09:09:44 -07008614 mutex_unlock(&ctx->uring_lock);
8615}
8616
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01008617static bool io_wait_rsrc_data(struct io_rsrc_data *data)
8618{
8619 if (!data)
8620 return false;
8621 if (!atomic_dec_and_test(&data->refs))
8622 wait_for_completion(&data->done);
8623 return true;
8624}
8625
Jens Axboe2b188cc2019-01-07 10:46:33 -07008626static void io_ring_ctx_free(struct io_ring_ctx *ctx)
8627{
Jens Axboe37d1e2e2021-02-17 21:03:43 -07008628 io_sq_thread_finish(ctx);
Jens Axboe2aede0e2020-09-14 10:45:53 -06008629
Jens Axboe37d1e2e2021-02-17 21:03:43 -07008630 if (ctx->mm_account) {
Jens Axboe2aede0e2020-09-14 10:45:53 -06008631 mmdrop(ctx->mm_account);
8632 ctx->mm_account = NULL;
Bijan Mottahedeh30975822020-06-16 16:36:09 -07008633 }
Jens Axboedef596e2019-01-09 08:59:42 -07008634
Hao Xu8bad28d2021-02-19 17:19:36 +08008635 mutex_lock(&ctx->uring_lock);
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01008636 if (io_wait_rsrc_data(ctx->buf_data))
8637 __io_sqe_buffers_unregister(ctx);
8638 if (io_wait_rsrc_data(ctx->file_data))
Pavel Begunkov08480402021-04-13 02:58:38 +01008639 __io_sqe_files_unregister(ctx);
Pavel Begunkovc4ea0602021-04-01 15:43:58 +01008640 if (ctx->rings)
8641 __io_cqring_overflow_flush(ctx, true);
Hao Xu8bad28d2021-02-19 17:19:36 +08008642 mutex_unlock(&ctx->uring_lock);
Jens Axboe9b402842019-04-11 11:45:41 -06008643 io_eventfd_unregister(ctx);
Jens Axboe5a2e7452020-02-23 16:23:11 -07008644 io_destroy_buffers(ctx);
Pavel Begunkov07db2982021-04-20 12:03:32 +01008645 if (ctx->sq_creds)
8646 put_cred(ctx->sq_creds);
Jens Axboedef596e2019-01-09 08:59:42 -07008647
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01008648 /* there are no registered resources left, nobody uses it */
8649 if (ctx->rsrc_node)
8650 io_rsrc_node_destroy(ctx->rsrc_node);
Pavel Begunkov8dd03af2021-03-19 17:22:36 +00008651 if (ctx->rsrc_backup_node)
Pavel Begunkovb895c9a2021-04-01 15:43:40 +01008652 io_rsrc_node_destroy(ctx->rsrc_backup_node);
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01008653 flush_delayed_work(&ctx->rsrc_put_work);
8654
8655 WARN_ON_ONCE(!list_empty(&ctx->rsrc_ref_list));
8656 WARN_ON_ONCE(!llist_empty(&ctx->rsrc_put_llist));
Jens Axboe2b188cc2019-01-07 10:46:33 -07008657
8658#if defined(CONFIG_UNIX)
Eric Biggers355e8d22019-06-12 14:58:43 -07008659 if (ctx->ring_sock) {
8660 ctx->ring_sock->file = NULL; /* so that iput() is called */
Jens Axboe2b188cc2019-01-07 10:46:33 -07008661 sock_release(ctx->ring_sock);
Eric Biggers355e8d22019-06-12 14:58:43 -07008662 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07008663#endif
8664
Hristo Venev75b28af2019-08-26 17:23:46 +00008665 io_mem_free(ctx->rings);
Jens Axboe2b188cc2019-01-07 10:46:33 -07008666 io_mem_free(ctx->sq_sqes);
Jens Axboe2b188cc2019-01-07 10:46:33 -07008667
8668 percpu_ref_exit(&ctx->refs);
Jens Axboe2b188cc2019-01-07 10:46:33 -07008669 free_uid(ctx->user);
Jens Axboe4010fec2021-02-27 15:04:18 -07008670 io_req_caches_free(ctx);
Jens Axboee9418942021-02-19 12:33:30 -07008671 if (ctx->hash_map)
8672 io_wq_put_hash(ctx->hash_map);
Jens Axboe78076bb2019-12-04 19:56:40 -07008673 kfree(ctx->cancel_hash);
Pavel Begunkov62248432021-04-28 13:11:29 +01008674 kfree(ctx->dummy_ubuf);
Jens Axboe2b188cc2019-01-07 10:46:33 -07008675 kfree(ctx);
8676}
8677
8678static __poll_t io_uring_poll(struct file *file, poll_table *wait)
8679{
8680 struct io_ring_ctx *ctx = file->private_data;
8681 __poll_t mask = 0;
8682
8683 poll_wait(file, &ctx->cq_wait, wait);
Stefan Bühler4f7067c2019-04-24 23:54:17 +02008684 /*
8685 * synchronizes with barrier from wq_has_sleeper call in
8686 * io_commit_cqring
8687 */
Jens Axboe2b188cc2019-01-07 10:46:33 -07008688 smp_rmb();
Jens Axboe90554202020-09-03 12:12:41 -06008689 if (!io_sqring_full(ctx))
Jens Axboe2b188cc2019-01-07 10:46:33 -07008690 mask |= EPOLLOUT | EPOLLWRNORM;
Hao Xued670c32021-02-05 16:34:21 +08008691
8692 /*
8693 * Don't flush cqring overflow list here, just do a simple check.
8694 * Otherwise there could possible be ABBA deadlock:
8695 * CPU0 CPU1
8696 * ---- ----
8697 * lock(&ctx->uring_lock);
8698 * lock(&ep->mtx);
8699 * lock(&ctx->uring_lock);
8700 * lock(&ep->mtx);
8701 *
8702 * Users may get EPOLLIN meanwhile seeing nothing in cqring, this
8703 * pushs them to do the flush.
8704 */
8705 if (io_cqring_events(ctx) || test_bit(0, &ctx->cq_check_overflow))
Jens Axboe2b188cc2019-01-07 10:46:33 -07008706 mask |= EPOLLIN | EPOLLRDNORM;
8707
8708 return mask;
8709}
8710
8711static int io_uring_fasync(int fd, struct file *file, int on)
8712{
8713 struct io_ring_ctx *ctx = file->private_data;
8714
8715 return fasync_helper(fd, file, on, &ctx->cq_fasync);
8716}
8717
Yejune Deng0bead8c2020-12-24 11:02:20 +08008718static int io_unregister_personality(struct io_ring_ctx *ctx, unsigned id)
Jens Axboe071698e2020-01-28 10:04:42 -07008719{
Jens Axboe4379bf82021-02-15 13:40:22 -07008720 const struct cred *creds;
Jens Axboe071698e2020-01-28 10:04:42 -07008721
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +00008722 creds = xa_erase(&ctx->personalities, id);
Jens Axboe4379bf82021-02-15 13:40:22 -07008723 if (creds) {
8724 put_cred(creds);
Yejune Deng0bead8c2020-12-24 11:02:20 +08008725 return 0;
Jens Axboe1e6fa522020-10-15 08:46:24 -06008726 }
Yejune Deng0bead8c2020-12-24 11:02:20 +08008727
8728 return -EINVAL;
8729}
8730
Pavel Begunkov9b465712021-03-15 14:23:07 +00008731static inline bool io_run_ctx_fallback(struct io_ring_ctx *ctx)
Jens Axboe7c25c0d2021-02-16 07:17:00 -07008732{
Pavel Begunkov9b465712021-03-15 14:23:07 +00008733 return io_run_task_work_head(&ctx->exit_task_work);
Jens Axboe7c25c0d2021-02-16 07:17:00 -07008734}
8735
Pavel Begunkovd56d9382021-03-06 11:02:13 +00008736struct io_tctx_exit {
8737 struct callback_head task_work;
8738 struct completion completion;
Pavel Begunkovbaf186c2021-03-06 11:02:15 +00008739 struct io_ring_ctx *ctx;
Pavel Begunkovd56d9382021-03-06 11:02:13 +00008740};
8741
8742static void io_tctx_exit_cb(struct callback_head *cb)
8743{
8744 struct io_uring_task *tctx = current->io_uring;
8745 struct io_tctx_exit *work;
8746
8747 work = container_of(cb, struct io_tctx_exit, task_work);
8748 /*
8749 * When @in_idle, we're in cancellation and it's racy to remove the
8750 * node. It'll be removed by the end of cancellation, just ignore it.
8751 */
8752 if (!atomic_read(&tctx->in_idle))
Pavel Begunkoveef51da2021-06-14 02:36:15 +01008753 io_uring_del_tctx_node((unsigned long)work->ctx);
Pavel Begunkovd56d9382021-03-06 11:02:13 +00008754 complete(&work->completion);
8755}
8756
Pavel Begunkov28090c12021-04-25 23:34:45 +01008757static bool io_cancel_ctx_cb(struct io_wq_work *work, void *data)
8758{
8759 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
8760
8761 return req->ctx == data;
8762}
8763
Jens Axboe85faa7b2020-04-09 18:14:00 -06008764static void io_ring_exit_work(struct work_struct *work)
8765{
Pavel Begunkovd56d9382021-03-06 11:02:13 +00008766 struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx, exit_work);
Pavel Begunkovb5bb3a22021-03-06 11:02:16 +00008767 unsigned long timeout = jiffies + HZ * 60 * 5;
Pavel Begunkovd56d9382021-03-06 11:02:13 +00008768 struct io_tctx_exit exit;
8769 struct io_tctx_node *node;
8770 int ret;
Jens Axboe85faa7b2020-04-09 18:14:00 -06008771
Jens Axboe56952e92020-06-17 15:00:04 -06008772 /*
8773 * If we're doing polled IO and end up having requests being
8774 * submitted async (out-of-line), then completions can come in while
8775 * we're waiting for refs to drop. We need to reap these manually,
8776 * as nobody else will be looking for them.
8777 */
Pavel Begunkovb2edc0a2020-07-07 16:36:22 +03008778 do {
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01008779 io_uring_try_cancel_requests(ctx, NULL, true);
Pavel Begunkov28090c12021-04-25 23:34:45 +01008780 if (ctx->sq_data) {
8781 struct io_sq_data *sqd = ctx->sq_data;
8782 struct task_struct *tsk;
8783
8784 io_sq_thread_park(sqd);
8785 tsk = sqd->thread;
8786 if (tsk && tsk->io_uring && tsk->io_uring->io_wq)
8787 io_wq_cancel_cb(tsk->io_uring->io_wq,
8788 io_cancel_ctx_cb, ctx, true);
8789 io_sq_thread_unpark(sqd);
8790 }
Pavel Begunkovb5bb3a22021-03-06 11:02:16 +00008791
8792 WARN_ON_ONCE(time_after(jiffies, timeout));
Pavel Begunkovb2edc0a2020-07-07 16:36:22 +03008793 } while (!wait_for_completion_timeout(&ctx->ref_comp, HZ/20));
Pavel Begunkovd56d9382021-03-06 11:02:13 +00008794
Pavel Begunkov7f006512021-04-14 13:38:34 +01008795 init_completion(&exit.completion);
8796 init_task_work(&exit.task_work, io_tctx_exit_cb);
8797 exit.ctx = ctx;
Pavel Begunkov89b50662021-04-01 15:43:50 +01008798 /*
8799 * Some may use context even when all refs and requests have been put,
8800 * and they are free to do so while still holding uring_lock or
8801 * completion_lock, see __io_req_task_submit(). Apart from other work,
8802 * this lock/unlock section also waits them to finish.
8803 */
Pavel Begunkovd56d9382021-03-06 11:02:13 +00008804 mutex_lock(&ctx->uring_lock);
8805 while (!list_empty(&ctx->tctx_list)) {
Pavel Begunkovb5bb3a22021-03-06 11:02:16 +00008806 WARN_ON_ONCE(time_after(jiffies, timeout));
8807
Pavel Begunkovd56d9382021-03-06 11:02:13 +00008808 node = list_first_entry(&ctx->tctx_list, struct io_tctx_node,
8809 ctx_node);
Pavel Begunkov7f006512021-04-14 13:38:34 +01008810 /* don't spin on a single task if cancellation failed */
8811 list_rotate_left(&ctx->tctx_list);
Pavel Begunkovd56d9382021-03-06 11:02:13 +00008812 ret = task_work_add(node->task, &exit.task_work, TWA_SIGNAL);
8813 if (WARN_ON_ONCE(ret))
8814 continue;
8815 wake_up_process(node->task);
8816
8817 mutex_unlock(&ctx->uring_lock);
8818 wait_for_completion(&exit.completion);
Pavel Begunkovd56d9382021-03-06 11:02:13 +00008819 mutex_lock(&ctx->uring_lock);
8820 }
8821 mutex_unlock(&ctx->uring_lock);
Pavel Begunkov89b50662021-04-01 15:43:50 +01008822 spin_lock_irq(&ctx->completion_lock);
8823 spin_unlock_irq(&ctx->completion_lock);
Pavel Begunkovd56d9382021-03-06 11:02:13 +00008824
Jens Axboe85faa7b2020-04-09 18:14:00 -06008825 io_ring_ctx_free(ctx);
8826}
8827
Pavel Begunkov80c4cbd2021-03-25 18:32:43 +00008828/* Returns true if we found and killed one or more timeouts */
8829static bool io_kill_timeouts(struct io_ring_ctx *ctx, struct task_struct *tsk,
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01008830 bool cancel_all)
Pavel Begunkov80c4cbd2021-03-25 18:32:43 +00008831{
8832 struct io_kiocb *req, *tmp;
8833 int canceled = 0;
8834
8835 spin_lock_irq(&ctx->completion_lock);
8836 list_for_each_entry_safe(req, tmp, &ctx->timeout_list, timeout.list) {
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01008837 if (io_match_task(req, tsk, cancel_all)) {
Pavel Begunkov80c4cbd2021-03-25 18:32:43 +00008838 io_kill_timeout(req, -ECANCELED);
8839 canceled++;
8840 }
8841 }
Pavel Begunkov51520422021-03-29 11:39:29 +01008842 if (canceled != 0)
8843 io_commit_cqring(ctx);
Pavel Begunkov80c4cbd2021-03-25 18:32:43 +00008844 spin_unlock_irq(&ctx->completion_lock);
Pavel Begunkov80c4cbd2021-03-25 18:32:43 +00008845 if (canceled != 0)
8846 io_cqring_ev_posted(ctx);
8847 return canceled != 0;
8848}
8849
Jens Axboe2b188cc2019-01-07 10:46:33 -07008850static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
8851{
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +00008852 unsigned long index;
8853 struct creds *creds;
8854
Jens Axboe2b188cc2019-01-07 10:46:33 -07008855 mutex_lock(&ctx->uring_lock);
8856 percpu_ref_kill(&ctx->refs);
Pavel Begunkov634578f2020-12-06 22:22:44 +00008857 if (ctx->rings)
Pavel Begunkov6c2450a2021-02-23 12:40:22 +00008858 __io_cqring_overflow_flush(ctx, true);
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +00008859 xa_for_each(&ctx->personalities, index, creds)
8860 io_unregister_personality(ctx, index);
Jens Axboe2b188cc2019-01-07 10:46:33 -07008861 mutex_unlock(&ctx->uring_lock);
8862
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01008863 io_kill_timeouts(ctx, NULL, true);
8864 io_poll_remove_all(ctx, NULL, true);
Jens Axboe561fb042019-10-24 07:25:42 -06008865
Jens Axboe15dff282019-11-13 09:09:23 -07008866 /* if we failed setting up the ctx, we might not have any rings */
Pavel Begunkovb2edc0a2020-07-07 16:36:22 +03008867 io_iopoll_try_reap_events(ctx);
Jens Axboe309fc032020-07-10 09:13:34 -06008868
Jens Axboe85faa7b2020-04-09 18:14:00 -06008869 INIT_WORK(&ctx->exit_work, io_ring_exit_work);
Jens Axboefc666772020-08-19 11:10:51 -06008870 /*
8871 * Use system_unbound_wq to avoid spawning tons of event kworkers
8872 * if we're exiting a ton of rings at the same time. It just adds
8873 * noise and overhead, there's no discernable change in runtime
8874 * over using system_wq.
8875 */
8876 queue_work(system_unbound_wq, &ctx->exit_work);
Jens Axboe2b188cc2019-01-07 10:46:33 -07008877}
8878
8879static int io_uring_release(struct inode *inode, struct file *file)
8880{
8881 struct io_ring_ctx *ctx = file->private_data;
8882
8883 file->private_data = NULL;
8884 io_ring_ctx_wait_and_kill(ctx);
8885 return 0;
8886}
8887
Pavel Begunkovf6edbab2020-11-06 13:00:26 +00008888struct io_task_cancel {
8889 struct task_struct *task;
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01008890 bool all;
Pavel Begunkovf6edbab2020-11-06 13:00:26 +00008891};
Pavel Begunkov67c4d9e2020-06-15 10:24:05 +03008892
Pavel Begunkovf6edbab2020-11-06 13:00:26 +00008893static bool io_cancel_task_cb(struct io_wq_work *work, void *data)
Jens Axboeb711d4e2020-08-16 08:23:05 -07008894{
Pavel Begunkov9a472ef2020-11-05 22:31:37 +00008895 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
Pavel Begunkovf6edbab2020-11-06 13:00:26 +00008896 struct io_task_cancel *cancel = data;
Pavel Begunkov9a472ef2020-11-05 22:31:37 +00008897 bool ret;
8898
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01008899 if (!cancel->all && (req->flags & REQ_F_LINK_TIMEOUT)) {
Pavel Begunkov9a472ef2020-11-05 22:31:37 +00008900 unsigned long flags;
8901 struct io_ring_ctx *ctx = req->ctx;
8902
8903 /* protect against races with linked timeouts */
8904 spin_lock_irqsave(&ctx->completion_lock, flags);
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01008905 ret = io_match_task(req, cancel->task, cancel->all);
Pavel Begunkov9a472ef2020-11-05 22:31:37 +00008906 spin_unlock_irqrestore(&ctx->completion_lock, flags);
8907 } else {
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01008908 ret = io_match_task(req, cancel->task, cancel->all);
Pavel Begunkov9a472ef2020-11-05 22:31:37 +00008909 }
8910 return ret;
Jens Axboeb711d4e2020-08-16 08:23:05 -07008911}
8912
Pavel Begunkove1915f72021-03-11 23:29:35 +00008913static bool io_cancel_defer_files(struct io_ring_ctx *ctx,
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01008914 struct task_struct *task, bool cancel_all)
Pavel Begunkovb7ddce32020-09-06 00:45:14 +03008915{
Pavel Begunkove1915f72021-03-11 23:29:35 +00008916 struct io_defer_entry *de;
Pavel Begunkovb7ddce32020-09-06 00:45:14 +03008917 LIST_HEAD(list);
8918
8919 spin_lock_irq(&ctx->completion_lock);
8920 list_for_each_entry_reverse(de, &ctx->defer_list, list) {
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01008921 if (io_match_task(de->req, task, cancel_all)) {
Pavel Begunkovb7ddce32020-09-06 00:45:14 +03008922 list_cut_position(&list, &ctx->defer_list, &de->list);
8923 break;
8924 }
8925 }
8926 spin_unlock_irq(&ctx->completion_lock);
Pavel Begunkove1915f72021-03-11 23:29:35 +00008927 if (list_empty(&list))
8928 return false;
Pavel Begunkovb7ddce32020-09-06 00:45:14 +03008929
8930 while (!list_empty(&list)) {
8931 de = list_first_entry(&list, struct io_defer_entry, list);
8932 list_del_init(&de->list);
Pavel Begunkovf41db2732021-02-28 22:35:12 +00008933 io_req_complete_failed(de->req, -ECANCELED);
Pavel Begunkovb7ddce32020-09-06 00:45:14 +03008934 kfree(de);
8935 }
Pavel Begunkove1915f72021-03-11 23:29:35 +00008936 return true;
Pavel Begunkovb7ddce32020-09-06 00:45:14 +03008937}
8938
Pavel Begunkov1b007642021-03-06 11:02:17 +00008939static bool io_uring_try_cancel_iowq(struct io_ring_ctx *ctx)
8940{
8941 struct io_tctx_node *node;
8942 enum io_wq_cancel cret;
8943 bool ret = false;
8944
8945 mutex_lock(&ctx->uring_lock);
8946 list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
8947 struct io_uring_task *tctx = node->task->io_uring;
8948
8949 /*
8950 * io_wq will stay alive while we hold uring_lock, because it's
8951 * killed after ctx nodes, which requires to take the lock.
8952 */
8953 if (!tctx || !tctx->io_wq)
8954 continue;
8955 cret = io_wq_cancel_cb(tctx->io_wq, io_cancel_ctx_cb, ctx, true);
8956 ret |= (cret != IO_WQ_CANCEL_NOTFOUND);
8957 }
8958 mutex_unlock(&ctx->uring_lock);
8959
8960 return ret;
8961}
8962
Pavel Begunkov9936c7c2021-02-04 13:51:56 +00008963static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
8964 struct task_struct *task,
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01008965 bool cancel_all)
Pavel Begunkov9936c7c2021-02-04 13:51:56 +00008966{
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01008967 struct io_task_cancel cancel = { .task = task, .all = cancel_all, };
Pavel Begunkov1b007642021-03-06 11:02:17 +00008968 struct io_uring_task *tctx = task ? task->io_uring : NULL;
Pavel Begunkov9936c7c2021-02-04 13:51:56 +00008969
8970 while (1) {
8971 enum io_wq_cancel cret;
8972 bool ret = false;
8973
Pavel Begunkov1b007642021-03-06 11:02:17 +00008974 if (!task) {
8975 ret |= io_uring_try_cancel_iowq(ctx);
8976 } else if (tctx && tctx->io_wq) {
8977 /*
8978 * Cancels requests of all rings, not only @ctx, but
8979 * it's fine as the task is in exit/exec.
8980 */
Jens Axboe5aa75ed2021-02-16 12:56:50 -07008981 cret = io_wq_cancel_cb(tctx->io_wq, io_cancel_task_cb,
Pavel Begunkov9936c7c2021-02-04 13:51:56 +00008982 &cancel, true);
8983 ret |= (cret != IO_WQ_CANCEL_NOTFOUND);
8984 }
8985
8986 /* SQPOLL thread does its own polling */
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01008987 if ((!(ctx->flags & IORING_SETUP_SQPOLL) && cancel_all) ||
Jens Axboed052d1d2021-03-11 10:49:20 -07008988 (ctx->sq_data && ctx->sq_data->thread == current)) {
Pavel Begunkov9936c7c2021-02-04 13:51:56 +00008989 while (!list_empty_careful(&ctx->iopoll_list)) {
8990 io_iopoll_try_reap_events(ctx);
8991 ret = true;
8992 }
8993 }
8994
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01008995 ret |= io_cancel_defer_files(ctx, task, cancel_all);
8996 ret |= io_poll_remove_all(ctx, task, cancel_all);
8997 ret |= io_kill_timeouts(ctx, task, cancel_all);
Pavel Begunkov9936c7c2021-02-04 13:51:56 +00008998 ret |= io_run_task_work();
Pavel Begunkovba50a032021-02-26 15:47:56 +00008999 ret |= io_run_ctx_fallback(ctx);
Pavel Begunkov9936c7c2021-02-04 13:51:56 +00009000 if (!ret)
9001 break;
9002 cond_resched();
9003 }
9004}
9005
Pavel Begunkoveef51da2021-06-14 02:36:15 +01009006static int __io_uring_add_tctx_node(struct io_ring_ctx *ctx)
Jens Axboe0f212202020-09-13 13:09:39 -06009007{
Matthew Wilcox (Oracle)236434c2020-10-09 13:49:52 +01009008 struct io_uring_task *tctx = current->io_uring;
Pavel Begunkov13bf43f2021-03-06 11:02:12 +00009009 struct io_tctx_node *node;
Pavel Begunkova528b042020-12-21 18:34:04 +00009010 int ret;
Matthew Wilcox (Oracle)236434c2020-10-09 13:49:52 +01009011
9012 if (unlikely(!tctx)) {
Jens Axboe5aa75ed2021-02-16 12:56:50 -07009013 ret = io_uring_alloc_task_context(current, ctx);
Jens Axboe0f212202020-09-13 13:09:39 -06009014 if (unlikely(ret))
9015 return ret;
Matthew Wilcox (Oracle)236434c2020-10-09 13:49:52 +01009016 tctx = current->io_uring;
Jens Axboe0f212202020-09-13 13:09:39 -06009017 }
Pavel Begunkovcf27f3b2021-03-19 17:22:31 +00009018 if (!xa_load(&tctx->xa, (unsigned long)ctx)) {
9019 node = kmalloc(sizeof(*node), GFP_KERNEL);
9020 if (!node)
9021 return -ENOMEM;
9022 node->ctx = ctx;
9023 node->task = current;
Jens Axboe0f212202020-09-13 13:09:39 -06009024
Pavel Begunkovcf27f3b2021-03-19 17:22:31 +00009025 ret = xa_err(xa_store(&tctx->xa, (unsigned long)ctx,
9026 node, GFP_KERNEL));
9027 if (ret) {
9028 kfree(node);
9029 return ret;
Jens Axboe0f212202020-09-13 13:09:39 -06009030 }
Pavel Begunkovcf27f3b2021-03-19 17:22:31 +00009031
9032 mutex_lock(&ctx->uring_lock);
9033 list_add(&node->ctx_node, &ctx->tctx_list);
9034 mutex_unlock(&ctx->uring_lock);
Jens Axboe0f212202020-09-13 13:09:39 -06009035 }
Pavel Begunkovcf27f3b2021-03-19 17:22:31 +00009036 tctx->last = ctx;
Jens Axboe0f212202020-09-13 13:09:39 -06009037 return 0;
9038}
9039
9040/*
Pavel Begunkovcf27f3b2021-03-19 17:22:31 +00009041 * Note that this task has used io_uring. We use it for cancelation purposes.
9042 */
Pavel Begunkoveef51da2021-06-14 02:36:15 +01009043static inline int io_uring_add_tctx_node(struct io_ring_ctx *ctx)
Pavel Begunkovcf27f3b2021-03-19 17:22:31 +00009044{
9045 struct io_uring_task *tctx = current->io_uring;
9046
9047 if (likely(tctx && tctx->last == ctx))
9048 return 0;
Pavel Begunkoveef51da2021-06-14 02:36:15 +01009049 return __io_uring_add_tctx_node(ctx);
Pavel Begunkovcf27f3b2021-03-19 17:22:31 +00009050}
9051
9052/*
Jens Axboe0f212202020-09-13 13:09:39 -06009053 * Remove this io_uring_file -> task mapping.
9054 */
Pavel Begunkoveef51da2021-06-14 02:36:15 +01009055static void io_uring_del_tctx_node(unsigned long index)
Jens Axboe0f212202020-09-13 13:09:39 -06009056{
9057 struct io_uring_task *tctx = current->io_uring;
Pavel Begunkov13bf43f2021-03-06 11:02:12 +00009058 struct io_tctx_node *node;
Pavel Begunkov29412672021-03-06 11:02:11 +00009059
Pavel Begunkoveebd2e32021-03-06 11:02:14 +00009060 if (!tctx)
9061 return;
Pavel Begunkov13bf43f2021-03-06 11:02:12 +00009062 node = xa_erase(&tctx->xa, index);
9063 if (!node)
Pavel Begunkov29412672021-03-06 11:02:11 +00009064 return;
Jens Axboe0f212202020-09-13 13:09:39 -06009065
Pavel Begunkov13bf43f2021-03-06 11:02:12 +00009066 WARN_ON_ONCE(current != node->task);
9067 WARN_ON_ONCE(list_empty(&node->ctx_node));
9068
9069 mutex_lock(&node->ctx->uring_lock);
9070 list_del(&node->ctx_node);
9071 mutex_unlock(&node->ctx->uring_lock);
9072
Pavel Begunkovbaf186c2021-03-06 11:02:15 +00009073 if (tctx->last == node->ctx)
Jens Axboe0f212202020-09-13 13:09:39 -06009074 tctx->last = NULL;
Pavel Begunkov13bf43f2021-03-06 11:02:12 +00009075 kfree(node);
Jens Axboe0f212202020-09-13 13:09:39 -06009076}
9077
Pavel Begunkov8452d4a2021-02-27 11:16:46 +00009078static void io_uring_clean_tctx(struct io_uring_task *tctx)
Pavel Begunkovde7f1d92021-01-04 20:43:29 +00009079{
Pavel Begunkovba5ef6d2021-05-20 13:21:20 +01009080 struct io_wq *wq = tctx->io_wq;
Pavel Begunkov13bf43f2021-03-06 11:02:12 +00009081 struct io_tctx_node *node;
Pavel Begunkovde7f1d92021-01-04 20:43:29 +00009082 unsigned long index;
9083
Pavel Begunkov13bf43f2021-03-06 11:02:12 +00009084 xa_for_each(&tctx->xa, index, node)
Pavel Begunkoveef51da2021-06-14 02:36:15 +01009085 io_uring_del_tctx_node(index);
Marco Elverb16ef422021-05-27 11:25:48 +02009086 if (wq) {
9087 /*
9088 * Must be after io_uring_del_task_file() (removes nodes under
9089 * uring_lock) to avoid race with io_uring_try_cancel_iowq().
9090 */
9091 tctx->io_wq = NULL;
Pavel Begunkovba5ef6d2021-05-20 13:21:20 +01009092 io_wq_put_and_exit(wq);
Marco Elverb16ef422021-05-27 11:25:48 +02009093 }
Pavel Begunkovde7f1d92021-01-04 20:43:29 +00009094}
9095
Pavel Begunkov3f48cf12021-04-11 01:46:27 +01009096static s64 tctx_inflight(struct io_uring_task *tctx, bool tracked)
Pavel Begunkov521d6a72021-03-11 23:29:38 +00009097{
Pavel Begunkov3f48cf12021-04-11 01:46:27 +01009098 if (tracked)
9099 return atomic_read(&tctx->inflight_tracked);
Pavel Begunkov521d6a72021-03-11 23:29:38 +00009100 return percpu_counter_sum(&tctx->inflight);
9101}
9102
Pavel Begunkov09899b12021-06-14 02:36:22 +01009103static void io_uring_drop_tctx_refs(struct task_struct *task)
9104{
9105 struct io_uring_task *tctx = task->io_uring;
9106 unsigned int refs = tctx->cached_refs;
9107
9108 tctx->cached_refs = 0;
9109 percpu_counter_sub(&tctx->inflight, refs);
9110 put_task_struct_many(task, refs);
9111}
9112
Pavel Begunkov78cc6872021-06-14 02:36:23 +01009113/*
9114 * Find any io_uring ctx that this task has registered or done IO on, and cancel
9115 * requests. @sqd should be not-null IIF it's an SQPOLL thread cancellation.
9116 */
9117static void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd)
Pavel Begunkov0e9ddb32021-02-07 22:34:26 +00009118{
Pavel Begunkov521d6a72021-03-11 23:29:38 +00009119 struct io_uring_task *tctx = current->io_uring;
Pavel Begunkov734551d2021-04-18 14:52:09 +01009120 struct io_ring_ctx *ctx;
Jens Axboefdaf0832020-10-30 09:37:30 -06009121 s64 inflight;
Pavel Begunkov0e9ddb32021-02-07 22:34:26 +00009122 DEFINE_WAIT(wait);
Jens Axboefdaf0832020-10-30 09:37:30 -06009123
Pavel Begunkov78cc6872021-06-14 02:36:23 +01009124 WARN_ON_ONCE(sqd && sqd->thread != current);
9125
Palash Oswal6d042ff2021-04-27 18:21:49 +05309126 if (!current->io_uring)
9127 return;
Pavel Begunkov17a91052021-05-23 15:48:39 +01009128 if (tctx->io_wq)
9129 io_wq_exit_start(tctx->io_wq);
9130
Pavel Begunkov09899b12021-06-14 02:36:22 +01009131 io_uring_drop_tctx_refs(current);
Jens Axboefdaf0832020-10-30 09:37:30 -06009132 atomic_inc(&tctx->in_idle);
Jens Axboed8a6df12020-10-15 16:24:45 -06009133 do {
Jens Axboe0f212202020-09-13 13:09:39 -06009134 /* read completions before cancelations */
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01009135 inflight = tctx_inflight(tctx, !cancel_all);
Jens Axboed8a6df12020-10-15 16:24:45 -06009136 if (!inflight)
9137 break;
Jens Axboe0f212202020-09-13 13:09:39 -06009138
Pavel Begunkov78cc6872021-06-14 02:36:23 +01009139 if (!sqd) {
9140 struct io_tctx_node *node;
9141 unsigned long index;
9142
9143 xa_for_each(&tctx->xa, index, node) {
9144 /* sqpoll task will cancel all its requests */
9145 if (node->ctx->sq_data)
9146 continue;
9147 io_uring_try_cancel_requests(node->ctx, current,
9148 cancel_all);
9149 }
9150 } else {
9151 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
9152 io_uring_try_cancel_requests(ctx, current,
9153 cancel_all);
9154 }
9155
9156 prepare_to_wait(&tctx->wait, &wait, TASK_UNINTERRUPTIBLE);
Jens Axboe0f212202020-09-13 13:09:39 -06009157 /*
Pavel Begunkova1bb3cd2021-01-26 15:28:26 +00009158 * If we've seen completions, retry without waiting. This
9159 * avoids a race where a completion comes in before we did
9160 * prepare_to_wait().
Jens Axboe0f212202020-09-13 13:09:39 -06009161 */
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01009162 if (inflight == tctx_inflight(tctx, !cancel_all))
Pavel Begunkova1bb3cd2021-01-26 15:28:26 +00009163 schedule();
Pavel Begunkovf57555e2020-12-20 13:21:44 +00009164 finish_wait(&tctx->wait, &wait);
Jens Axboed8a6df12020-10-15 16:24:45 -06009165 } while (1);
Jens Axboefdaf0832020-10-30 09:37:30 -06009166 atomic_dec(&tctx->in_idle);
Pavel Begunkovde7f1d92021-01-04 20:43:29 +00009167
Pavel Begunkov8452d4a2021-02-27 11:16:46 +00009168 io_uring_clean_tctx(tctx);
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01009169 if (cancel_all) {
Pavel Begunkov3f48cf12021-04-11 01:46:27 +01009170 /* for exec all current's requests should be gone, kill tctx */
9171 __io_uring_free(current);
9172 }
Pavel Begunkov44e728b2020-06-15 10:24:04 +03009173}
9174
Pavel Begunkov78cc6872021-06-14 02:36:23 +01009175void __io_uring_cancel(struct files_struct *files)
9176{
9177 io_uring_cancel_generic(!files, NULL);
9178}
9179
Roman Penyaev6c5c2402019-11-28 12:53:22 +01009180static void *io_uring_validate_mmap_request(struct file *file,
9181 loff_t pgoff, size_t sz)
Jens Axboe2b188cc2019-01-07 10:46:33 -07009182{
Jens Axboe2b188cc2019-01-07 10:46:33 -07009183 struct io_ring_ctx *ctx = file->private_data;
Roman Penyaev6c5c2402019-11-28 12:53:22 +01009184 loff_t offset = pgoff << PAGE_SHIFT;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009185 struct page *page;
9186 void *ptr;
9187
9188 switch (offset) {
9189 case IORING_OFF_SQ_RING:
Hristo Venev75b28af2019-08-26 17:23:46 +00009190 case IORING_OFF_CQ_RING:
9191 ptr = ctx->rings;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009192 break;
9193 case IORING_OFF_SQES:
9194 ptr = ctx->sq_sqes;
9195 break;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009196 default:
Roman Penyaev6c5c2402019-11-28 12:53:22 +01009197 return ERR_PTR(-EINVAL);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009198 }
9199
9200 page = virt_to_head_page(ptr);
Matthew Wilcox (Oracle)a50b8542019-09-23 15:34:25 -07009201 if (sz > page_size(page))
Roman Penyaev6c5c2402019-11-28 12:53:22 +01009202 return ERR_PTR(-EINVAL);
9203
9204 return ptr;
9205}
9206
9207#ifdef CONFIG_MMU
9208
9209static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
9210{
9211 size_t sz = vma->vm_end - vma->vm_start;
9212 unsigned long pfn;
9213 void *ptr;
9214
9215 ptr = io_uring_validate_mmap_request(file, vma->vm_pgoff, sz);
9216 if (IS_ERR(ptr))
9217 return PTR_ERR(ptr);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009218
9219 pfn = virt_to_phys(ptr) >> PAGE_SHIFT;
9220 return remap_pfn_range(vma, vma->vm_start, pfn, sz, vma->vm_page_prot);
9221}
9222
Roman Penyaev6c5c2402019-11-28 12:53:22 +01009223#else /* !CONFIG_MMU */
9224
9225static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
9226{
9227 return vma->vm_flags & (VM_SHARED | VM_MAYSHARE) ? 0 : -EINVAL;
9228}
9229
9230static unsigned int io_uring_nommu_mmap_capabilities(struct file *file)
9231{
9232 return NOMMU_MAP_DIRECT | NOMMU_MAP_READ | NOMMU_MAP_WRITE;
9233}
9234
9235static unsigned long io_uring_nommu_get_unmapped_area(struct file *file,
9236 unsigned long addr, unsigned long len,
9237 unsigned long pgoff, unsigned long flags)
9238{
9239 void *ptr;
9240
9241 ptr = io_uring_validate_mmap_request(file, pgoff, len);
9242 if (IS_ERR(ptr))
9243 return PTR_ERR(ptr);
9244
9245 return (unsigned long) ptr;
9246}
9247
9248#endif /* !CONFIG_MMU */
9249
Pavel Begunkovd9d05212021-01-08 20:57:25 +00009250static int io_sqpoll_wait_sq(struct io_ring_ctx *ctx)
Jens Axboe90554202020-09-03 12:12:41 -06009251{
9252 DEFINE_WAIT(wait);
9253
9254 do {
9255 if (!io_sqring_full(ctx))
9256 break;
Jens Axboe90554202020-09-03 12:12:41 -06009257 prepare_to_wait(&ctx->sqo_sq_wait, &wait, TASK_INTERRUPTIBLE);
9258
9259 if (!io_sqring_full(ctx))
9260 break;
Jens Axboe90554202020-09-03 12:12:41 -06009261 schedule();
9262 } while (!signal_pending(current));
9263
9264 finish_wait(&ctx->sqo_sq_wait, &wait);
Yang Li51993282021-03-09 14:30:41 +08009265 return 0;
Jens Axboe90554202020-09-03 12:12:41 -06009266}
9267
Hao Xuc73ebb62020-11-03 10:54:37 +08009268static int io_get_ext_arg(unsigned flags, const void __user *argp, size_t *argsz,
9269 struct __kernel_timespec __user **ts,
9270 const sigset_t __user **sig)
9271{
9272 struct io_uring_getevents_arg arg;
9273
9274 /*
9275 * If EXT_ARG isn't set, then we have no timespec and the argp pointer
9276 * is just a pointer to the sigset_t.
9277 */
9278 if (!(flags & IORING_ENTER_EXT_ARG)) {
9279 *sig = (const sigset_t __user *) argp;
9280 *ts = NULL;
9281 return 0;
9282 }
9283
9284 /*
9285 * EXT_ARG is set - ensure we agree on the size of it and copy in our
9286 * timespec and sigset_t pointers if good.
9287 */
9288 if (*argsz != sizeof(arg))
9289 return -EINVAL;
9290 if (copy_from_user(&arg, argp, sizeof(arg)))
9291 return -EFAULT;
9292 *sig = u64_to_user_ptr(arg.sigmask);
9293 *argsz = arg.sigmask_sz;
9294 *ts = u64_to_user_ptr(arg.ts);
9295 return 0;
9296}
9297
Jens Axboe2b188cc2019-01-07 10:46:33 -07009298SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
Hao Xuc73ebb62020-11-03 10:54:37 +08009299 u32, min_complete, u32, flags, const void __user *, argp,
9300 size_t, argsz)
Jens Axboe2b188cc2019-01-07 10:46:33 -07009301{
9302 struct io_ring_ctx *ctx;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009303 int submitted = 0;
9304 struct fd f;
Pavel Begunkov33f993d2021-03-19 17:22:30 +00009305 long ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009306
Jens Axboe4c6e2772020-07-01 11:29:10 -06009307 io_run_task_work();
Jens Axboeb41e9852020-02-17 09:52:41 -07009308
Pavel Begunkov33f993d2021-03-19 17:22:30 +00009309 if (unlikely(flags & ~(IORING_ENTER_GETEVENTS | IORING_ENTER_SQ_WAKEUP |
9310 IORING_ENTER_SQ_WAIT | IORING_ENTER_EXT_ARG)))
Jens Axboe2b188cc2019-01-07 10:46:33 -07009311 return -EINVAL;
9312
9313 f = fdget(fd);
Pavel Begunkov33f993d2021-03-19 17:22:30 +00009314 if (unlikely(!f.file))
Jens Axboe2b188cc2019-01-07 10:46:33 -07009315 return -EBADF;
9316
9317 ret = -EOPNOTSUPP;
Pavel Begunkov33f993d2021-03-19 17:22:30 +00009318 if (unlikely(f.file->f_op != &io_uring_fops))
Jens Axboe2b188cc2019-01-07 10:46:33 -07009319 goto out_fput;
9320
9321 ret = -ENXIO;
9322 ctx = f.file->private_data;
Pavel Begunkov33f993d2021-03-19 17:22:30 +00009323 if (unlikely(!percpu_ref_tryget(&ctx->refs)))
Jens Axboe2b188cc2019-01-07 10:46:33 -07009324 goto out_fput;
9325
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02009326 ret = -EBADFD;
Pavel Begunkov33f993d2021-03-19 17:22:30 +00009327 if (unlikely(ctx->flags & IORING_SETUP_R_DISABLED))
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02009328 goto out;
9329
Jens Axboe6c271ce2019-01-10 11:22:30 -07009330 /*
9331 * For SQ polling, the thread will do all submissions and completions.
9332 * Just return the requested submit count, and wake the thread if
9333 * we were asked to.
9334 */
Jens Axboeb2a9ead2019-09-12 14:19:16 -06009335 ret = 0;
Jens Axboe6c271ce2019-01-10 11:22:30 -07009336 if (ctx->flags & IORING_SETUP_SQPOLL) {
Pavel Begunkov6c2450a2021-02-23 12:40:22 +00009337 io_cqring_overflow_flush(ctx, false);
Pavel Begunkov89448c42020-12-17 00:24:39 +00009338
Pavel Begunkovd9d05212021-01-08 20:57:25 +00009339 ret = -EOWNERDEAD;
Stefan Metzmacher04147482021-03-07 11:54:29 +01009340 if (unlikely(ctx->sq_data->thread == NULL)) {
9341 goto out;
9342 }
Jens Axboe6c271ce2019-01-10 11:22:30 -07009343 if (flags & IORING_ENTER_SQ_WAKEUP)
Jens Axboe534ca6d2020-09-02 13:52:19 -06009344 wake_up(&ctx->sq_data->wait);
Pavel Begunkovd9d05212021-01-08 20:57:25 +00009345 if (flags & IORING_ENTER_SQ_WAIT) {
9346 ret = io_sqpoll_wait_sq(ctx);
9347 if (ret)
9348 goto out;
9349 }
Jens Axboe6c271ce2019-01-10 11:22:30 -07009350 submitted = to_submit;
Jens Axboeb2a9ead2019-09-12 14:19:16 -06009351 } else if (to_submit) {
Pavel Begunkoveef51da2021-06-14 02:36:15 +01009352 ret = io_uring_add_tctx_node(ctx);
Jens Axboe0f212202020-09-13 13:09:39 -06009353 if (unlikely(ret))
9354 goto out;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009355 mutex_lock(&ctx->uring_lock);
Jens Axboe0f212202020-09-13 13:09:39 -06009356 submitted = io_submit_sqes(ctx, to_submit);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009357 mutex_unlock(&ctx->uring_lock);
Pavel Begunkov7c504e652019-12-18 19:53:45 +03009358
9359 if (submitted != to_submit)
9360 goto out;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009361 }
9362 if (flags & IORING_ENTER_GETEVENTS) {
Hao Xuc73ebb62020-11-03 10:54:37 +08009363 const sigset_t __user *sig;
9364 struct __kernel_timespec __user *ts;
9365
9366 ret = io_get_ext_arg(flags, argp, &argsz, &ts, &sig);
9367 if (unlikely(ret))
9368 goto out;
9369
Jens Axboe2b188cc2019-01-07 10:46:33 -07009370 min_complete = min(min_complete, ctx->cq_entries);
9371
Xiaoguang Wang32b22442020-03-11 09:26:09 +08009372 /*
9373 * When SETUP_IOPOLL and SETUP_SQPOLL are both enabled, user
9374 * space applications don't need to do io completion events
9375 * polling again, they can rely on io_sq_thread to do polling
9376 * work, which can reduce cpu usage and uring_lock contention.
9377 */
9378 if (ctx->flags & IORING_SETUP_IOPOLL &&
9379 !(ctx->flags & IORING_SETUP_SQPOLL)) {
Pavel Begunkov7668b922020-07-07 16:36:21 +03009380 ret = io_iopoll_check(ctx, min_complete);
Jens Axboedef596e2019-01-09 08:59:42 -07009381 } else {
Hao Xuc73ebb62020-11-03 10:54:37 +08009382 ret = io_cqring_wait(ctx, min_complete, sig, argsz, ts);
Jens Axboedef596e2019-01-09 08:59:42 -07009383 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07009384 }
9385
Pavel Begunkov7c504e652019-12-18 19:53:45 +03009386out:
Pavel Begunkov6805b322019-10-08 02:18:42 +03009387 percpu_ref_put(&ctx->refs);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009388out_fput:
9389 fdput(f);
9390 return submitted ? submitted : ret;
9391}
9392
Tobias Klauserbebdb652020-02-26 18:38:32 +01009393#ifdef CONFIG_PROC_FS
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +00009394static int io_uring_show_cred(struct seq_file *m, unsigned int id,
9395 const struct cred *cred)
Jens Axboe87ce9552020-01-30 08:25:34 -07009396{
Jens Axboe87ce9552020-01-30 08:25:34 -07009397 struct user_namespace *uns = seq_user_ns(m);
9398 struct group_info *gi;
9399 kernel_cap_t cap;
9400 unsigned __capi;
9401 int g;
9402
9403 seq_printf(m, "%5d\n", id);
9404 seq_put_decimal_ull(m, "\tUid:\t", from_kuid_munged(uns, cred->uid));
9405 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->euid));
9406 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->suid));
9407 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->fsuid));
9408 seq_put_decimal_ull(m, "\n\tGid:\t", from_kgid_munged(uns, cred->gid));
9409 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->egid));
9410 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->sgid));
9411 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->fsgid));
9412 seq_puts(m, "\n\tGroups:\t");
9413 gi = cred->group_info;
9414 for (g = 0; g < gi->ngroups; g++) {
9415 seq_put_decimal_ull(m, g ? " " : "",
9416 from_kgid_munged(uns, gi->gid[g]));
9417 }
9418 seq_puts(m, "\n\tCapEff:\t");
9419 cap = cred->cap_effective;
9420 CAP_FOR_EACH_U32(__capi)
9421 seq_put_hex_ll(m, NULL, cap.cap[CAP_LAST_U32 - __capi], 8);
9422 seq_putc(m, '\n');
9423 return 0;
9424}
9425
9426static void __io_uring_show_fdinfo(struct io_ring_ctx *ctx, struct seq_file *m)
9427{
Joseph Qidbbe9c62020-09-29 09:01:22 -06009428 struct io_sq_data *sq = NULL;
Jens Axboefad8e0d2020-09-28 08:57:48 -06009429 bool has_lock;
Jens Axboe87ce9552020-01-30 08:25:34 -07009430 int i;
9431
Jens Axboefad8e0d2020-09-28 08:57:48 -06009432 /*
9433 * Avoid ABBA deadlock between the seq lock and the io_uring mutex,
9434 * since fdinfo case grabs it in the opposite direction of normal use
9435 * cases. If we fail to get the lock, we just don't iterate any
9436 * structures that could be going away outside the io_uring mutex.
9437 */
9438 has_lock = mutex_trylock(&ctx->uring_lock);
9439
Jens Axboe5f3f26f2021-02-25 10:17:46 -07009440 if (has_lock && (ctx->flags & IORING_SETUP_SQPOLL)) {
Joseph Qidbbe9c62020-09-29 09:01:22 -06009441 sq = ctx->sq_data;
Jens Axboe5f3f26f2021-02-25 10:17:46 -07009442 if (!sq->thread)
9443 sq = NULL;
9444 }
Joseph Qidbbe9c62020-09-29 09:01:22 -06009445
9446 seq_printf(m, "SqThread:\t%d\n", sq ? task_pid_nr(sq->thread) : -1);
9447 seq_printf(m, "SqThreadCpu:\t%d\n", sq ? task_cpu(sq->thread) : -1);
Jens Axboe87ce9552020-01-30 08:25:34 -07009448 seq_printf(m, "UserFiles:\t%u\n", ctx->nr_user_files);
Jens Axboefad8e0d2020-09-28 08:57:48 -06009449 for (i = 0; has_lock && i < ctx->nr_user_files; i++) {
Jens Axboe7b29f922021-03-12 08:30:14 -07009450 struct file *f = io_file_from_index(ctx, i);
Jens Axboe87ce9552020-01-30 08:25:34 -07009451
Jens Axboe87ce9552020-01-30 08:25:34 -07009452 if (f)
9453 seq_printf(m, "%5u: %s\n", i, file_dentry(f)->d_iname);
9454 else
9455 seq_printf(m, "%5u: <none>\n", i);
9456 }
9457 seq_printf(m, "UserBufs:\t%u\n", ctx->nr_user_bufs);
Jens Axboefad8e0d2020-09-28 08:57:48 -06009458 for (i = 0; has_lock && i < ctx->nr_user_bufs; i++) {
Pavel Begunkov41edf1a2021-04-25 14:32:23 +01009459 struct io_mapped_ubuf *buf = ctx->user_bufs[i];
Pavel Begunkov4751f532021-04-01 15:43:55 +01009460 unsigned int len = buf->ubuf_end - buf->ubuf;
Jens Axboe87ce9552020-01-30 08:25:34 -07009461
Pavel Begunkov4751f532021-04-01 15:43:55 +01009462 seq_printf(m, "%5u: 0x%llx/%u\n", i, buf->ubuf, len);
Jens Axboe87ce9552020-01-30 08:25:34 -07009463 }
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +00009464 if (has_lock && !xa_empty(&ctx->personalities)) {
9465 unsigned long index;
9466 const struct cred *cred;
9467
Jens Axboe87ce9552020-01-30 08:25:34 -07009468 seq_printf(m, "Personalities:\n");
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +00009469 xa_for_each(&ctx->personalities, index, cred)
9470 io_uring_show_cred(m, index, cred);
Jens Axboe87ce9552020-01-30 08:25:34 -07009471 }
Jens Axboed7718a92020-02-14 22:23:12 -07009472 seq_printf(m, "PollList:\n");
9473 spin_lock_irq(&ctx->completion_lock);
9474 for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
9475 struct hlist_head *list = &ctx->cancel_hash[i];
9476 struct io_kiocb *req;
9477
9478 hlist_for_each_entry(req, list, hash_node)
9479 seq_printf(m, " op=%d, task_works=%d\n", req->opcode,
9480 req->task->task_works != NULL);
9481 }
9482 spin_unlock_irq(&ctx->completion_lock);
Jens Axboefad8e0d2020-09-28 08:57:48 -06009483 if (has_lock)
9484 mutex_unlock(&ctx->uring_lock);
Jens Axboe87ce9552020-01-30 08:25:34 -07009485}
9486
9487static void io_uring_show_fdinfo(struct seq_file *m, struct file *f)
9488{
9489 struct io_ring_ctx *ctx = f->private_data;
9490
9491 if (percpu_ref_tryget(&ctx->refs)) {
9492 __io_uring_show_fdinfo(ctx, m);
9493 percpu_ref_put(&ctx->refs);
9494 }
9495}
Tobias Klauserbebdb652020-02-26 18:38:32 +01009496#endif
Jens Axboe87ce9552020-01-30 08:25:34 -07009497
Jens Axboe2b188cc2019-01-07 10:46:33 -07009498static const struct file_operations io_uring_fops = {
9499 .release = io_uring_release,
9500 .mmap = io_uring_mmap,
Roman Penyaev6c5c2402019-11-28 12:53:22 +01009501#ifndef CONFIG_MMU
9502 .get_unmapped_area = io_uring_nommu_get_unmapped_area,
9503 .mmap_capabilities = io_uring_nommu_mmap_capabilities,
9504#endif
Jens Axboe2b188cc2019-01-07 10:46:33 -07009505 .poll = io_uring_poll,
9506 .fasync = io_uring_fasync,
Tobias Klauserbebdb652020-02-26 18:38:32 +01009507#ifdef CONFIG_PROC_FS
Jens Axboe87ce9552020-01-30 08:25:34 -07009508 .show_fdinfo = io_uring_show_fdinfo,
Tobias Klauserbebdb652020-02-26 18:38:32 +01009509#endif
Jens Axboe2b188cc2019-01-07 10:46:33 -07009510};
9511
9512static int io_allocate_scq_urings(struct io_ring_ctx *ctx,
9513 struct io_uring_params *p)
9514{
Hristo Venev75b28af2019-08-26 17:23:46 +00009515 struct io_rings *rings;
9516 size_t size, sq_array_offset;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009517
Jens Axboebd740482020-08-05 12:58:23 -06009518 /* make sure these are sane, as we already accounted them */
9519 ctx->sq_entries = p->sq_entries;
9520 ctx->cq_entries = p->cq_entries;
9521
Hristo Venev75b28af2019-08-26 17:23:46 +00009522 size = rings_size(p->sq_entries, p->cq_entries, &sq_array_offset);
9523 if (size == SIZE_MAX)
9524 return -EOVERFLOW;
9525
9526 rings = io_mem_alloc(size);
9527 if (!rings)
Jens Axboe2b188cc2019-01-07 10:46:33 -07009528 return -ENOMEM;
9529
Hristo Venev75b28af2019-08-26 17:23:46 +00009530 ctx->rings = rings;
9531 ctx->sq_array = (u32 *)((char *)rings + sq_array_offset);
9532 rings->sq_ring_mask = p->sq_entries - 1;
9533 rings->cq_ring_mask = p->cq_entries - 1;
9534 rings->sq_ring_entries = p->sq_entries;
9535 rings->cq_ring_entries = p->cq_entries;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009536
9537 size = array_size(sizeof(struct io_uring_sqe), p->sq_entries);
Jens Axboeeb065d32019-11-20 09:26:29 -07009538 if (size == SIZE_MAX) {
9539 io_mem_free(ctx->rings);
9540 ctx->rings = NULL;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009541 return -EOVERFLOW;
Jens Axboeeb065d32019-11-20 09:26:29 -07009542 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07009543
9544 ctx->sq_sqes = io_mem_alloc(size);
Jens Axboeeb065d32019-11-20 09:26:29 -07009545 if (!ctx->sq_sqes) {
9546 io_mem_free(ctx->rings);
9547 ctx->rings = NULL;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009548 return -ENOMEM;
Jens Axboeeb065d32019-11-20 09:26:29 -07009549 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07009550
Jens Axboe2b188cc2019-01-07 10:46:33 -07009551 return 0;
9552}
9553
Pavel Begunkov9faadcc2020-12-21 18:34:05 +00009554static int io_uring_install_fd(struct io_ring_ctx *ctx, struct file *file)
9555{
9556 int ret, fd;
9557
9558 fd = get_unused_fd_flags(O_RDWR | O_CLOEXEC);
9559 if (fd < 0)
9560 return fd;
9561
Pavel Begunkoveef51da2021-06-14 02:36:15 +01009562 ret = io_uring_add_tctx_node(ctx);
Pavel Begunkov9faadcc2020-12-21 18:34:05 +00009563 if (ret) {
9564 put_unused_fd(fd);
9565 return ret;
9566 }
9567 fd_install(fd, file);
9568 return fd;
9569}
9570
Jens Axboe2b188cc2019-01-07 10:46:33 -07009571/*
9572 * Allocate an anonymous fd, this is what constitutes the application
9573 * visible backing of an io_uring instance. The application mmaps this
9574 * fd to gain access to the SQ/CQ ring details. If UNIX sockets are enabled,
9575 * we have to tie this fd to a socket for file garbage collection purposes.
9576 */
Pavel Begunkov9faadcc2020-12-21 18:34:05 +00009577static struct file *io_uring_get_file(struct io_ring_ctx *ctx)
Jens Axboe2b188cc2019-01-07 10:46:33 -07009578{
9579 struct file *file;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009580#if defined(CONFIG_UNIX)
Pavel Begunkov9faadcc2020-12-21 18:34:05 +00009581 int ret;
9582
Jens Axboe2b188cc2019-01-07 10:46:33 -07009583 ret = sock_create_kern(&init_net, PF_UNIX, SOCK_RAW, IPPROTO_IP,
9584 &ctx->ring_sock);
9585 if (ret)
Pavel Begunkov9faadcc2020-12-21 18:34:05 +00009586 return ERR_PTR(ret);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009587#endif
9588
Jens Axboe2b188cc2019-01-07 10:46:33 -07009589 file = anon_inode_getfile("[io_uring]", &io_uring_fops, ctx,
9590 O_RDWR | O_CLOEXEC);
Pavel Begunkov9faadcc2020-12-21 18:34:05 +00009591#if defined(CONFIG_UNIX)
Jens Axboe2b188cc2019-01-07 10:46:33 -07009592 if (IS_ERR(file)) {
Pavel Begunkov9faadcc2020-12-21 18:34:05 +00009593 sock_release(ctx->ring_sock);
9594 ctx->ring_sock = NULL;
9595 } else {
9596 ctx->ring_sock->file = file;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009597 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07009598#endif
Pavel Begunkov9faadcc2020-12-21 18:34:05 +00009599 return file;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009600}
9601
Xiaoguang Wang7f136572020-05-05 16:28:53 +08009602static int io_uring_create(unsigned entries, struct io_uring_params *p,
9603 struct io_uring_params __user *params)
Jens Axboe2b188cc2019-01-07 10:46:33 -07009604{
Jens Axboe2b188cc2019-01-07 10:46:33 -07009605 struct io_ring_ctx *ctx;
Pavel Begunkov9faadcc2020-12-21 18:34:05 +00009606 struct file *file;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009607 int ret;
9608
Jens Axboe8110c1a2019-12-28 15:39:54 -07009609 if (!entries)
Jens Axboe2b188cc2019-01-07 10:46:33 -07009610 return -EINVAL;
Jens Axboe8110c1a2019-12-28 15:39:54 -07009611 if (entries > IORING_MAX_ENTRIES) {
9612 if (!(p->flags & IORING_SETUP_CLAMP))
9613 return -EINVAL;
9614 entries = IORING_MAX_ENTRIES;
9615 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07009616
9617 /*
9618 * Use twice as many entries for the CQ ring. It's possible for the
9619 * application to drive a higher depth than the size of the SQ ring,
9620 * since the sqes are only used at submission time. This allows for
Jens Axboe33a107f2019-10-04 12:10:03 -06009621 * some flexibility in overcommitting a bit. If the application has
9622 * set IORING_SETUP_CQSIZE, it will have passed in the desired number
9623 * of CQ ring entries manually.
Jens Axboe2b188cc2019-01-07 10:46:33 -07009624 */
9625 p->sq_entries = roundup_pow_of_two(entries);
Jens Axboe33a107f2019-10-04 12:10:03 -06009626 if (p->flags & IORING_SETUP_CQSIZE) {
9627 /*
9628 * If IORING_SETUP_CQSIZE is set, we do the same roundup
9629 * to a power-of-two, if it isn't already. We do NOT impose
9630 * any cq vs sq ring sizing.
9631 */
Joseph Qieb2667b32020-11-24 15:03:03 +08009632 if (!p->cq_entries)
Jens Axboe33a107f2019-10-04 12:10:03 -06009633 return -EINVAL;
Jens Axboe8110c1a2019-12-28 15:39:54 -07009634 if (p->cq_entries > IORING_MAX_CQ_ENTRIES) {
9635 if (!(p->flags & IORING_SETUP_CLAMP))
9636 return -EINVAL;
9637 p->cq_entries = IORING_MAX_CQ_ENTRIES;
9638 }
Joseph Qieb2667b32020-11-24 15:03:03 +08009639 p->cq_entries = roundup_pow_of_two(p->cq_entries);
9640 if (p->cq_entries < p->sq_entries)
9641 return -EINVAL;
Jens Axboe33a107f2019-10-04 12:10:03 -06009642 } else {
9643 p->cq_entries = 2 * p->sq_entries;
9644 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07009645
Jens Axboe2b188cc2019-01-07 10:46:33 -07009646 ctx = io_ring_ctx_alloc(p);
Jens Axboe62e398b2021-02-21 16:19:37 -07009647 if (!ctx)
Jens Axboe2b188cc2019-01-07 10:46:33 -07009648 return -ENOMEM;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009649 ctx->compat = in_compat_syscall();
Jens Axboe62e398b2021-02-21 16:19:37 -07009650 if (!capable(CAP_IPC_LOCK))
9651 ctx->user = get_uid(current_user());
Jens Axboe2aede0e2020-09-14 10:45:53 -06009652
9653 /*
9654 * This is just grabbed for accounting purposes. When a process exits,
9655 * the mm is exited and dropped before the files, hence we need to hang
9656 * on to this mm purely for the purposes of being able to unaccount
9657 * memory (locked/pinned vm). It's not used for anything else.
9658 */
Jens Axboe6b7898e2020-08-25 07:58:00 -06009659 mmgrab(current->mm);
Jens Axboe2aede0e2020-09-14 10:45:53 -06009660 ctx->mm_account = current->mm;
Jens Axboe6b7898e2020-08-25 07:58:00 -06009661
Jens Axboe2b188cc2019-01-07 10:46:33 -07009662 ret = io_allocate_scq_urings(ctx, p);
9663 if (ret)
9664 goto err;
9665
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02009666 ret = io_sq_offload_create(ctx, p);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009667 if (ret)
9668 goto err;
Pavel Begunkoveae071c2021-04-25 14:32:24 +01009669 /* always set a rsrc node */
Pavel Begunkov47b228c2021-04-29 11:46:48 +01009670 ret = io_rsrc_node_switch_start(ctx);
9671 if (ret)
9672 goto err;
Pavel Begunkoveae071c2021-04-25 14:32:24 +01009673 io_rsrc_node_switch(ctx, NULL);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009674
Jens Axboe2b188cc2019-01-07 10:46:33 -07009675 memset(&p->sq_off, 0, sizeof(p->sq_off));
Hristo Venev75b28af2019-08-26 17:23:46 +00009676 p->sq_off.head = offsetof(struct io_rings, sq.head);
9677 p->sq_off.tail = offsetof(struct io_rings, sq.tail);
9678 p->sq_off.ring_mask = offsetof(struct io_rings, sq_ring_mask);
9679 p->sq_off.ring_entries = offsetof(struct io_rings, sq_ring_entries);
9680 p->sq_off.flags = offsetof(struct io_rings, sq_flags);
9681 p->sq_off.dropped = offsetof(struct io_rings, sq_dropped);
9682 p->sq_off.array = (char *)ctx->sq_array - (char *)ctx->rings;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009683
9684 memset(&p->cq_off, 0, sizeof(p->cq_off));
Hristo Venev75b28af2019-08-26 17:23:46 +00009685 p->cq_off.head = offsetof(struct io_rings, cq.head);
9686 p->cq_off.tail = offsetof(struct io_rings, cq.tail);
9687 p->cq_off.ring_mask = offsetof(struct io_rings, cq_ring_mask);
9688 p->cq_off.ring_entries = offsetof(struct io_rings, cq_ring_entries);
9689 p->cq_off.overflow = offsetof(struct io_rings, cq_overflow);
9690 p->cq_off.cqes = offsetof(struct io_rings, cqes);
Stefano Garzarella0d9b5b32020-05-15 18:38:04 +02009691 p->cq_off.flags = offsetof(struct io_rings, cq_flags);
Jens Axboeac90f242019-09-06 10:26:21 -06009692
Xiaoguang Wang7f136572020-05-05 16:28:53 +08009693 p->features = IORING_FEAT_SINGLE_MMAP | IORING_FEAT_NODROP |
9694 IORING_FEAT_SUBMIT_STABLE | IORING_FEAT_RW_CUR_POS |
Jiufei Xue5769a352020-06-17 17:53:55 +08009695 IORING_FEAT_CUR_PERSONALITY | IORING_FEAT_FAST_POLL |
Hao Xuc73ebb62020-11-03 10:54:37 +08009696 IORING_FEAT_POLL_32BITS | IORING_FEAT_SQPOLL_NONFIXED |
Pavel Begunkov96905572021-06-10 16:37:38 +01009697 IORING_FEAT_EXT_ARG | IORING_FEAT_NATIVE_WORKERS |
9698 IORING_FEAT_RSRC_TAGS;
Xiaoguang Wang7f136572020-05-05 16:28:53 +08009699
9700 if (copy_to_user(params, p, sizeof(*p))) {
9701 ret = -EFAULT;
9702 goto err;
9703 }
Jens Axboed1719f72020-07-30 13:43:53 -06009704
Pavel Begunkov9faadcc2020-12-21 18:34:05 +00009705 file = io_uring_get_file(ctx);
9706 if (IS_ERR(file)) {
9707 ret = PTR_ERR(file);
9708 goto err;
9709 }
9710
Jens Axboed1719f72020-07-30 13:43:53 -06009711 /*
Jens Axboe044c1ab2019-10-28 09:15:33 -06009712 * Install ring fd as the very last thing, so we don't risk someone
9713 * having closed it before we finish setup
9714 */
Pavel Begunkov9faadcc2020-12-21 18:34:05 +00009715 ret = io_uring_install_fd(ctx, file);
9716 if (ret < 0) {
9717 /* fput will clean it up */
9718 fput(file);
9719 return ret;
9720 }
Jens Axboe044c1ab2019-10-28 09:15:33 -06009721
Dmitrii Dolgovc826bd72019-10-15 19:02:01 +02009722 trace_io_uring_create(ret, ctx, p->sq_entries, p->cq_entries, p->flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009723 return ret;
9724err:
9725 io_ring_ctx_wait_and_kill(ctx);
9726 return ret;
9727}
9728
9729/*
9730 * Sets up an aio uring context, and returns the fd. Applications asks for a
9731 * ring size, we return the actual sq/cq ring sizes (among other things) in the
9732 * params structure passed in.
9733 */
9734static long io_uring_setup(u32 entries, struct io_uring_params __user *params)
9735{
9736 struct io_uring_params p;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009737 int i;
9738
9739 if (copy_from_user(&p, params, sizeof(p)))
9740 return -EFAULT;
9741 for (i = 0; i < ARRAY_SIZE(p.resv); i++) {
9742 if (p.resv[i])
9743 return -EINVAL;
9744 }
9745
Jens Axboe6c271ce2019-01-10 11:22:30 -07009746 if (p.flags & ~(IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL |
Jens Axboe8110c1a2019-12-28 15:39:54 -07009747 IORING_SETUP_SQ_AFF | IORING_SETUP_CQSIZE |
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02009748 IORING_SETUP_CLAMP | IORING_SETUP_ATTACH_WQ |
9749 IORING_SETUP_R_DISABLED))
Jens Axboe2b188cc2019-01-07 10:46:33 -07009750 return -EINVAL;
9751
Xiaoguang Wang7f136572020-05-05 16:28:53 +08009752 return io_uring_create(entries, &p, params);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009753}
9754
9755SYSCALL_DEFINE2(io_uring_setup, u32, entries,
9756 struct io_uring_params __user *, params)
9757{
9758 return io_uring_setup(entries, params);
9759}
9760
Jens Axboe66f4af92020-01-16 15:36:52 -07009761static int io_probe(struct io_ring_ctx *ctx, void __user *arg, unsigned nr_args)
9762{
9763 struct io_uring_probe *p;
9764 size_t size;
9765 int i, ret;
9766
9767 size = struct_size(p, ops, nr_args);
9768 if (size == SIZE_MAX)
9769 return -EOVERFLOW;
9770 p = kzalloc(size, GFP_KERNEL);
9771 if (!p)
9772 return -ENOMEM;
9773
9774 ret = -EFAULT;
9775 if (copy_from_user(p, arg, size))
9776 goto out;
9777 ret = -EINVAL;
9778 if (memchr_inv(p, 0, size))
9779 goto out;
9780
9781 p->last_op = IORING_OP_LAST - 1;
9782 if (nr_args > IORING_OP_LAST)
9783 nr_args = IORING_OP_LAST;
9784
9785 for (i = 0; i < nr_args; i++) {
9786 p->ops[i].op = i;
9787 if (!io_op_defs[i].not_supported)
9788 p->ops[i].flags = IO_URING_OP_SUPPORTED;
9789 }
9790 p->ops_len = i;
9791
9792 ret = 0;
9793 if (copy_to_user(arg, p, size))
9794 ret = -EFAULT;
9795out:
9796 kfree(p);
9797 return ret;
9798}
9799
Jens Axboe071698e2020-01-28 10:04:42 -07009800static int io_register_personality(struct io_ring_ctx *ctx)
9801{
Jens Axboe4379bf82021-02-15 13:40:22 -07009802 const struct cred *creds;
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +00009803 u32 id;
Jens Axboe1e6fa522020-10-15 08:46:24 -06009804 int ret;
Jens Axboe071698e2020-01-28 10:04:42 -07009805
Jens Axboe4379bf82021-02-15 13:40:22 -07009806 creds = get_current_cred();
Jens Axboe1e6fa522020-10-15 08:46:24 -06009807
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +00009808 ret = xa_alloc_cyclic(&ctx->personalities, &id, (void *)creds,
9809 XA_LIMIT(0, USHRT_MAX), &ctx->pers_next, GFP_KERNEL);
9810 if (!ret)
9811 return id;
9812 put_cred(creds);
Jens Axboe1e6fa522020-10-15 08:46:24 -06009813 return ret;
Jens Axboe071698e2020-01-28 10:04:42 -07009814}
9815
Stefano Garzarella21b55db2020-08-27 16:58:30 +02009816static int io_register_restrictions(struct io_ring_ctx *ctx, void __user *arg,
9817 unsigned int nr_args)
9818{
9819 struct io_uring_restriction *res;
9820 size_t size;
9821 int i, ret;
9822
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02009823 /* Restrictions allowed only if rings started disabled */
9824 if (!(ctx->flags & IORING_SETUP_R_DISABLED))
9825 return -EBADFD;
9826
Stefano Garzarella21b55db2020-08-27 16:58:30 +02009827 /* We allow only a single restrictions registration */
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02009828 if (ctx->restrictions.registered)
Stefano Garzarella21b55db2020-08-27 16:58:30 +02009829 return -EBUSY;
9830
9831 if (!arg || nr_args > IORING_MAX_RESTRICTIONS)
9832 return -EINVAL;
9833
9834 size = array_size(nr_args, sizeof(*res));
9835 if (size == SIZE_MAX)
9836 return -EOVERFLOW;
9837
9838 res = memdup_user(arg, size);
9839 if (IS_ERR(res))
9840 return PTR_ERR(res);
9841
9842 ret = 0;
9843
9844 for (i = 0; i < nr_args; i++) {
9845 switch (res[i].opcode) {
9846 case IORING_RESTRICTION_REGISTER_OP:
9847 if (res[i].register_op >= IORING_REGISTER_LAST) {
9848 ret = -EINVAL;
9849 goto out;
9850 }
9851
9852 __set_bit(res[i].register_op,
9853 ctx->restrictions.register_op);
9854 break;
9855 case IORING_RESTRICTION_SQE_OP:
9856 if (res[i].sqe_op >= IORING_OP_LAST) {
9857 ret = -EINVAL;
9858 goto out;
9859 }
9860
9861 __set_bit(res[i].sqe_op, ctx->restrictions.sqe_op);
9862 break;
9863 case IORING_RESTRICTION_SQE_FLAGS_ALLOWED:
9864 ctx->restrictions.sqe_flags_allowed = res[i].sqe_flags;
9865 break;
9866 case IORING_RESTRICTION_SQE_FLAGS_REQUIRED:
9867 ctx->restrictions.sqe_flags_required = res[i].sqe_flags;
9868 break;
9869 default:
9870 ret = -EINVAL;
9871 goto out;
9872 }
9873 }
9874
9875out:
9876 /* Reset all restrictions if an error happened */
9877 if (ret != 0)
9878 memset(&ctx->restrictions, 0, sizeof(ctx->restrictions));
9879 else
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02009880 ctx->restrictions.registered = true;
Stefano Garzarella21b55db2020-08-27 16:58:30 +02009881
9882 kfree(res);
9883 return ret;
9884}
9885
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02009886static int io_register_enable_rings(struct io_ring_ctx *ctx)
9887{
9888 if (!(ctx->flags & IORING_SETUP_R_DISABLED))
9889 return -EBADFD;
9890
9891 if (ctx->restrictions.registered)
9892 ctx->restricted = 1;
9893
Pavel Begunkov0298ef92021-03-08 13:20:57 +00009894 ctx->flags &= ~IORING_SETUP_R_DISABLED;
9895 if (ctx->sq_data && wq_has_sleeper(&ctx->sq_data->wait))
9896 wake_up(&ctx->sq_data->wait);
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02009897 return 0;
9898}
9899
Pavel Begunkovfdecb662021-04-25 14:32:20 +01009900static int __io_register_rsrc_update(struct io_ring_ctx *ctx, unsigned type,
Pavel Begunkovc3bdad02021-04-25 14:32:22 +01009901 struct io_uring_rsrc_update2 *up,
Pavel Begunkov98f0b3b2021-04-25 14:32:19 +01009902 unsigned nr_args)
9903{
9904 __u32 tmp;
9905 int err;
9906
Pavel Begunkovc3bdad02021-04-25 14:32:22 +01009907 if (up->resv)
9908 return -EINVAL;
Pavel Begunkov98f0b3b2021-04-25 14:32:19 +01009909 if (check_add_overflow(up->offset, nr_args, &tmp))
9910 return -EOVERFLOW;
9911 err = io_rsrc_node_switch_start(ctx);
9912 if (err)
9913 return err;
9914
Pavel Begunkovfdecb662021-04-25 14:32:20 +01009915 switch (type) {
9916 case IORING_RSRC_FILE:
Pavel Begunkov98f0b3b2021-04-25 14:32:19 +01009917 return __io_sqe_files_update(ctx, up, nr_args);
Pavel Begunkov634d00d2021-04-25 14:32:26 +01009918 case IORING_RSRC_BUFFER:
9919 return __io_sqe_buffers_update(ctx, up, nr_args);
Pavel Begunkov98f0b3b2021-04-25 14:32:19 +01009920 }
9921 return -EINVAL;
9922}
9923
Pavel Begunkovc3bdad02021-04-25 14:32:22 +01009924static int io_register_files_update(struct io_ring_ctx *ctx, void __user *arg,
9925 unsigned nr_args)
Pavel Begunkov98f0b3b2021-04-25 14:32:19 +01009926{
Pavel Begunkovc3bdad02021-04-25 14:32:22 +01009927 struct io_uring_rsrc_update2 up;
Pavel Begunkov98f0b3b2021-04-25 14:32:19 +01009928
9929 if (!nr_args)
9930 return -EINVAL;
Pavel Begunkovc3bdad02021-04-25 14:32:22 +01009931 memset(&up, 0, sizeof(up));
9932 if (copy_from_user(&up, arg, sizeof(struct io_uring_rsrc_update)))
9933 return -EFAULT;
9934 return __io_register_rsrc_update(ctx, IORING_RSRC_FILE, &up, nr_args);
9935}
9936
9937static int io_register_rsrc_update(struct io_ring_ctx *ctx, void __user *arg,
Pavel Begunkov992da012021-06-10 16:37:37 +01009938 unsigned size, unsigned type)
Pavel Begunkovc3bdad02021-04-25 14:32:22 +01009939{
9940 struct io_uring_rsrc_update2 up;
9941
9942 if (size != sizeof(up))
9943 return -EINVAL;
Pavel Begunkov98f0b3b2021-04-25 14:32:19 +01009944 if (copy_from_user(&up, arg, sizeof(up)))
9945 return -EFAULT;
Pavel Begunkov992da012021-06-10 16:37:37 +01009946 if (!up.nr || up.resv)
Pavel Begunkov98f0b3b2021-04-25 14:32:19 +01009947 return -EINVAL;
Pavel Begunkov992da012021-06-10 16:37:37 +01009948 return __io_register_rsrc_update(ctx, type, &up, up.nr);
Pavel Begunkov98f0b3b2021-04-25 14:32:19 +01009949}
9950
Pavel Begunkov792e3582021-04-25 14:32:21 +01009951static int io_register_rsrc(struct io_ring_ctx *ctx, void __user *arg,
Pavel Begunkov992da012021-06-10 16:37:37 +01009952 unsigned int size, unsigned int type)
Pavel Begunkov792e3582021-04-25 14:32:21 +01009953{
9954 struct io_uring_rsrc_register rr;
9955
9956 /* keep it extendible */
9957 if (size != sizeof(rr))
9958 return -EINVAL;
9959
9960 memset(&rr, 0, sizeof(rr));
9961 if (copy_from_user(&rr, arg, size))
9962 return -EFAULT;
Pavel Begunkov992da012021-06-10 16:37:37 +01009963 if (!rr.nr || rr.resv || rr.resv2)
Pavel Begunkov792e3582021-04-25 14:32:21 +01009964 return -EINVAL;
9965
Pavel Begunkov992da012021-06-10 16:37:37 +01009966 switch (type) {
Pavel Begunkov792e3582021-04-25 14:32:21 +01009967 case IORING_RSRC_FILE:
9968 return io_sqe_files_register(ctx, u64_to_user_ptr(rr.data),
9969 rr.nr, u64_to_user_ptr(rr.tags));
Pavel Begunkov634d00d2021-04-25 14:32:26 +01009970 case IORING_RSRC_BUFFER:
9971 return io_sqe_buffers_register(ctx, u64_to_user_ptr(rr.data),
9972 rr.nr, u64_to_user_ptr(rr.tags));
Pavel Begunkov792e3582021-04-25 14:32:21 +01009973 }
9974 return -EINVAL;
9975}
9976
Jens Axboe071698e2020-01-28 10:04:42 -07009977static bool io_register_op_must_quiesce(int op)
9978{
9979 switch (op) {
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01009980 case IORING_REGISTER_BUFFERS:
9981 case IORING_UNREGISTER_BUFFERS:
Pavel Begunkovf4f7d212021-04-01 15:44:02 +01009982 case IORING_REGISTER_FILES:
Jens Axboe071698e2020-01-28 10:04:42 -07009983 case IORING_UNREGISTER_FILES:
9984 case IORING_REGISTER_FILES_UPDATE:
9985 case IORING_REGISTER_PROBE:
9986 case IORING_REGISTER_PERSONALITY:
9987 case IORING_UNREGISTER_PERSONALITY:
Pavel Begunkov992da012021-06-10 16:37:37 +01009988 case IORING_REGISTER_FILES2:
9989 case IORING_REGISTER_FILES_UPDATE2:
9990 case IORING_REGISTER_BUFFERS2:
9991 case IORING_REGISTER_BUFFERS_UPDATE:
Jens Axboe071698e2020-01-28 10:04:42 -07009992 return false;
9993 default:
9994 return true;
9995 }
9996}
9997
Jens Axboeedafcce2019-01-09 09:16:05 -07009998static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
9999 void __user *arg, unsigned nr_args)
Jens Axboeb19062a2019-04-15 10:49:38 -060010000 __releases(ctx->uring_lock)
10001 __acquires(ctx->uring_lock)
Jens Axboeedafcce2019-01-09 09:16:05 -070010002{
10003 int ret;
10004
Jens Axboe35fa71a2019-04-22 10:23:23 -060010005 /*
10006 * We're inside the ring mutex, if the ref is already dying, then
10007 * someone else killed the ctx or is already going through
10008 * io_uring_register().
10009 */
10010 if (percpu_ref_is_dying(&ctx->refs))
10011 return -ENXIO;
10012
Pavel Begunkov75c40212021-04-15 13:07:40 +010010013 if (ctx->restricted) {
10014 if (opcode >= IORING_REGISTER_LAST)
10015 return -EINVAL;
10016 opcode = array_index_nospec(opcode, IORING_REGISTER_LAST);
10017 if (!test_bit(opcode, ctx->restrictions.register_op))
10018 return -EACCES;
10019 }
10020
Jens Axboe071698e2020-01-28 10:04:42 -070010021 if (io_register_op_must_quiesce(opcode)) {
Jens Axboe05f3fb32019-12-09 11:22:50 -070010022 percpu_ref_kill(&ctx->refs);
Jens Axboeb19062a2019-04-15 10:49:38 -060010023
Jens Axboe05f3fb32019-12-09 11:22:50 -070010024 /*
10025 * Drop uring mutex before waiting for references to exit. If
10026 * another thread is currently inside io_uring_enter() it might
10027 * need to grab the uring_lock to make progress. If we hold it
10028 * here across the drain wait, then we can deadlock. It's safe
10029 * to drop the mutex here, since no new references will come in
10030 * after we've killed the percpu ref.
10031 */
10032 mutex_unlock(&ctx->uring_lock);
Jens Axboeaf9c1a42020-09-24 13:32:18 -060010033 do {
10034 ret = wait_for_completion_interruptible(&ctx->ref_comp);
10035 if (!ret)
10036 break;
Jens Axboeed6930c2020-10-08 19:09:46 -060010037 ret = io_run_task_work_sig();
10038 if (ret < 0)
10039 break;
Jens Axboeaf9c1a42020-09-24 13:32:18 -060010040 } while (1);
Jens Axboe05f3fb32019-12-09 11:22:50 -070010041 mutex_lock(&ctx->uring_lock);
Jens Axboeaf9c1a42020-09-24 13:32:18 -060010042
Jens Axboec1503682020-01-08 08:26:07 -070010043 if (ret) {
Pavel Begunkovf70865d2021-04-11 01:46:40 +010010044 io_refs_resurrect(&ctx->refs, &ctx->ref_comp);
10045 return ret;
Jens Axboec1503682020-01-08 08:26:07 -070010046 }
Jens Axboe05f3fb32019-12-09 11:22:50 -070010047 }
Jens Axboeedafcce2019-01-09 09:16:05 -070010048
10049 switch (opcode) {
10050 case IORING_REGISTER_BUFFERS:
Pavel Begunkov634d00d2021-04-25 14:32:26 +010010051 ret = io_sqe_buffers_register(ctx, arg, nr_args, NULL);
Jens Axboeedafcce2019-01-09 09:16:05 -070010052 break;
10053 case IORING_UNREGISTER_BUFFERS:
10054 ret = -EINVAL;
10055 if (arg || nr_args)
10056 break;
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -080010057 ret = io_sqe_buffers_unregister(ctx);
Jens Axboeedafcce2019-01-09 09:16:05 -070010058 break;
Jens Axboe6b063142019-01-10 22:13:58 -070010059 case IORING_REGISTER_FILES:
Pavel Begunkov792e3582021-04-25 14:32:21 +010010060 ret = io_sqe_files_register(ctx, arg, nr_args, NULL);
Jens Axboe6b063142019-01-10 22:13:58 -070010061 break;
10062 case IORING_UNREGISTER_FILES:
10063 ret = -EINVAL;
10064 if (arg || nr_args)
10065 break;
10066 ret = io_sqe_files_unregister(ctx);
10067 break;
Jens Axboec3a31e62019-10-03 13:59:56 -060010068 case IORING_REGISTER_FILES_UPDATE:
Pavel Begunkovc3bdad02021-04-25 14:32:22 +010010069 ret = io_register_files_update(ctx, arg, nr_args);
Jens Axboec3a31e62019-10-03 13:59:56 -060010070 break;
Jens Axboe9b402842019-04-11 11:45:41 -060010071 case IORING_REGISTER_EVENTFD:
Jens Axboef2842ab2020-01-08 11:04:00 -070010072 case IORING_REGISTER_EVENTFD_ASYNC:
Jens Axboe9b402842019-04-11 11:45:41 -060010073 ret = -EINVAL;
10074 if (nr_args != 1)
10075 break;
10076 ret = io_eventfd_register(ctx, arg);
Jens Axboef2842ab2020-01-08 11:04:00 -070010077 if (ret)
10078 break;
10079 if (opcode == IORING_REGISTER_EVENTFD_ASYNC)
10080 ctx->eventfd_async = 1;
10081 else
10082 ctx->eventfd_async = 0;
Jens Axboe9b402842019-04-11 11:45:41 -060010083 break;
10084 case IORING_UNREGISTER_EVENTFD:
10085 ret = -EINVAL;
10086 if (arg || nr_args)
10087 break;
10088 ret = io_eventfd_unregister(ctx);
10089 break;
Jens Axboe66f4af92020-01-16 15:36:52 -070010090 case IORING_REGISTER_PROBE:
10091 ret = -EINVAL;
10092 if (!arg || nr_args > 256)
10093 break;
10094 ret = io_probe(ctx, arg, nr_args);
10095 break;
Jens Axboe071698e2020-01-28 10:04:42 -070010096 case IORING_REGISTER_PERSONALITY:
10097 ret = -EINVAL;
10098 if (arg || nr_args)
10099 break;
10100 ret = io_register_personality(ctx);
10101 break;
10102 case IORING_UNREGISTER_PERSONALITY:
10103 ret = -EINVAL;
10104 if (arg)
10105 break;
10106 ret = io_unregister_personality(ctx, nr_args);
10107 break;
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +020010108 case IORING_REGISTER_ENABLE_RINGS:
10109 ret = -EINVAL;
10110 if (arg || nr_args)
10111 break;
10112 ret = io_register_enable_rings(ctx);
10113 break;
Stefano Garzarella21b55db2020-08-27 16:58:30 +020010114 case IORING_REGISTER_RESTRICTIONS:
10115 ret = io_register_restrictions(ctx, arg, nr_args);
10116 break;
Pavel Begunkov992da012021-06-10 16:37:37 +010010117 case IORING_REGISTER_FILES2:
10118 ret = io_register_rsrc(ctx, arg, nr_args, IORING_RSRC_FILE);
Pavel Begunkov792e3582021-04-25 14:32:21 +010010119 break;
Pavel Begunkov992da012021-06-10 16:37:37 +010010120 case IORING_REGISTER_FILES_UPDATE2:
10121 ret = io_register_rsrc_update(ctx, arg, nr_args,
10122 IORING_RSRC_FILE);
10123 break;
10124 case IORING_REGISTER_BUFFERS2:
10125 ret = io_register_rsrc(ctx, arg, nr_args, IORING_RSRC_BUFFER);
10126 break;
10127 case IORING_REGISTER_BUFFERS_UPDATE:
10128 ret = io_register_rsrc_update(ctx, arg, nr_args,
10129 IORING_RSRC_BUFFER);
Pavel Begunkovc3bdad02021-04-25 14:32:22 +010010130 break;
Jens Axboeedafcce2019-01-09 09:16:05 -070010131 default:
10132 ret = -EINVAL;
10133 break;
10134 }
10135
Jens Axboe071698e2020-01-28 10:04:42 -070010136 if (io_register_op_must_quiesce(opcode)) {
Jens Axboe05f3fb32019-12-09 11:22:50 -070010137 /* bring the ctx back to life */
Jens Axboe05f3fb32019-12-09 11:22:50 -070010138 percpu_ref_reinit(&ctx->refs);
Jens Axboe0f158b42020-05-14 17:18:39 -060010139 reinit_completion(&ctx->ref_comp);
Jens Axboe05f3fb32019-12-09 11:22:50 -070010140 }
Jens Axboeedafcce2019-01-09 09:16:05 -070010141 return ret;
10142}
10143
10144SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode,
10145 void __user *, arg, unsigned int, nr_args)
10146{
10147 struct io_ring_ctx *ctx;
10148 long ret = -EBADF;
10149 struct fd f;
10150
10151 f = fdget(fd);
10152 if (!f.file)
10153 return -EBADF;
10154
10155 ret = -EOPNOTSUPP;
10156 if (f.file->f_op != &io_uring_fops)
10157 goto out_fput;
10158
10159 ctx = f.file->private_data;
10160
Pavel Begunkovb6c23dd2021-02-20 15:17:18 +000010161 io_run_task_work();
10162
Jens Axboeedafcce2019-01-09 09:16:05 -070010163 mutex_lock(&ctx->uring_lock);
10164 ret = __io_uring_register(ctx, opcode, arg, nr_args);
10165 mutex_unlock(&ctx->uring_lock);
Dmitrii Dolgovc826bd72019-10-15 19:02:01 +020010166 trace_io_uring_register(ctx, opcode, ctx->nr_user_files, ctx->nr_user_bufs,
10167 ctx->cq_ev_fd != NULL, ret);
Jens Axboeedafcce2019-01-09 09:16:05 -070010168out_fput:
10169 fdput(f);
10170 return ret;
10171}
10172
Jens Axboe2b188cc2019-01-07 10:46:33 -070010173static int __init io_uring_init(void)
10174{
Stefan Metzmacherd7f62e82020-01-29 14:39:41 +010010175#define __BUILD_BUG_VERIFY_ELEMENT(stype, eoffset, etype, ename) do { \
10176 BUILD_BUG_ON(offsetof(stype, ename) != eoffset); \
10177 BUILD_BUG_ON(sizeof(etype) != sizeof_field(stype, ename)); \
10178} while (0)
10179
10180#define BUILD_BUG_SQE_ELEM(eoffset, etype, ename) \
10181 __BUILD_BUG_VERIFY_ELEMENT(struct io_uring_sqe, eoffset, etype, ename)
10182 BUILD_BUG_ON(sizeof(struct io_uring_sqe) != 64);
10183 BUILD_BUG_SQE_ELEM(0, __u8, opcode);
10184 BUILD_BUG_SQE_ELEM(1, __u8, flags);
10185 BUILD_BUG_SQE_ELEM(2, __u16, ioprio);
10186 BUILD_BUG_SQE_ELEM(4, __s32, fd);
10187 BUILD_BUG_SQE_ELEM(8, __u64, off);
10188 BUILD_BUG_SQE_ELEM(8, __u64, addr2);
10189 BUILD_BUG_SQE_ELEM(16, __u64, addr);
Pavel Begunkov7d67af22020-02-24 11:32:45 +030010190 BUILD_BUG_SQE_ELEM(16, __u64, splice_off_in);
Stefan Metzmacherd7f62e82020-01-29 14:39:41 +010010191 BUILD_BUG_SQE_ELEM(24, __u32, len);
10192 BUILD_BUG_SQE_ELEM(28, __kernel_rwf_t, rw_flags);
10193 BUILD_BUG_SQE_ELEM(28, /* compat */ int, rw_flags);
10194 BUILD_BUG_SQE_ELEM(28, /* compat */ __u32, rw_flags);
10195 BUILD_BUG_SQE_ELEM(28, __u32, fsync_flags);
Jiufei Xue5769a352020-06-17 17:53:55 +080010196 BUILD_BUG_SQE_ELEM(28, /* compat */ __u16, poll_events);
10197 BUILD_BUG_SQE_ELEM(28, __u32, poll32_events);
Stefan Metzmacherd7f62e82020-01-29 14:39:41 +010010198 BUILD_BUG_SQE_ELEM(28, __u32, sync_range_flags);
10199 BUILD_BUG_SQE_ELEM(28, __u32, msg_flags);
10200 BUILD_BUG_SQE_ELEM(28, __u32, timeout_flags);
10201 BUILD_BUG_SQE_ELEM(28, __u32, accept_flags);
10202 BUILD_BUG_SQE_ELEM(28, __u32, cancel_flags);
10203 BUILD_BUG_SQE_ELEM(28, __u32, open_flags);
10204 BUILD_BUG_SQE_ELEM(28, __u32, statx_flags);
10205 BUILD_BUG_SQE_ELEM(28, __u32, fadvise_advice);
Pavel Begunkov7d67af22020-02-24 11:32:45 +030010206 BUILD_BUG_SQE_ELEM(28, __u32, splice_flags);
Stefan Metzmacherd7f62e82020-01-29 14:39:41 +010010207 BUILD_BUG_SQE_ELEM(32, __u64, user_data);
10208 BUILD_BUG_SQE_ELEM(40, __u16, buf_index);
10209 BUILD_BUG_SQE_ELEM(42, __u16, personality);
Pavel Begunkov7d67af22020-02-24 11:32:45 +030010210 BUILD_BUG_SQE_ELEM(44, __s32, splice_fd_in);
Stefan Metzmacherd7f62e82020-01-29 14:39:41 +010010211
Pavel Begunkovb0d658ec2021-04-27 16:13:53 +010010212 BUILD_BUG_ON(sizeof(struct io_uring_files_update) !=
10213 sizeof(struct io_uring_rsrc_update));
10214 BUILD_BUG_ON(sizeof(struct io_uring_rsrc_update) >
10215 sizeof(struct io_uring_rsrc_update2));
10216 /* should fit into one byte */
10217 BUILD_BUG_ON(SQE_VALID_FLAGS >= (1 << 8));
10218
Jens Axboed3656342019-12-18 09:50:26 -070010219 BUILD_BUG_ON(ARRAY_SIZE(io_op_defs) != IORING_OP_LAST);
Jens Axboe84557872020-03-03 15:28:17 -070010220 BUILD_BUG_ON(__REQ_F_LAST_BIT >= 8 * sizeof(int));
Jens Axboe91f245d2021-02-09 13:48:50 -070010221 req_cachep = KMEM_CACHE(io_kiocb, SLAB_HWCACHE_ALIGN | SLAB_PANIC |
10222 SLAB_ACCOUNT);
Jens Axboe2b188cc2019-01-07 10:46:33 -070010223 return 0;
10224};
10225__initcall(io_uring_init);