blob: d6e0e7e317da586d641140dd0ca7e27fc1dee20c [file] [log] [blame]
Jens Axboe2b188cc2019-01-07 10:46:33 -07001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Shared application/kernel submission and completion ring pairs, for
4 * supporting fast/efficient IO.
5 *
6 * A note on the read/write ordering memory barriers that are matched between
Stefan Bühler1e84b972019-04-24 23:54:16 +02007 * the application and kernel side.
8 *
9 * After the application reads the CQ ring tail, it must use an
10 * appropriate smp_rmb() to pair with the smp_wmb() the kernel uses
11 * before writing the tail (using smp_load_acquire to read the tail will
12 * do). It also needs a smp_mb() before updating CQ head (ordering the
13 * entry load(s) with the head store), pairing with an implicit barrier
Pavel Begunkovd068b502021-05-16 22:58:11 +010014 * through a control-dependency in io_get_cqe (smp_store_release to
Stefan Bühler1e84b972019-04-24 23:54:16 +020015 * store head will do). Failure to do so could lead to reading invalid
16 * CQ entries.
17 *
18 * Likewise, the application must use an appropriate smp_wmb() before
19 * writing the SQ tail (ordering SQ entry stores with the tail store),
20 * which pairs with smp_load_acquire in io_get_sqring (smp_store_release
21 * to store the tail will do). And it needs a barrier ordering the SQ
22 * head load before writing new SQ entries (smp_load_acquire to read
23 * head will do).
24 *
25 * When using the SQ poll thread (IORING_SETUP_SQPOLL), the application
26 * needs to check the SQ flags for IORING_SQ_NEED_WAKEUP *after*
27 * updating the SQ tail; a full memory barrier smp_mb() is needed
28 * between.
Jens Axboe2b188cc2019-01-07 10:46:33 -070029 *
30 * Also see the examples in the liburing library:
31 *
32 * git://git.kernel.dk/liburing
33 *
34 * io_uring also uses READ/WRITE_ONCE() for _any_ store or load that happens
35 * from data shared between the kernel and application. This is done both
36 * for ordering purposes, but also to ensure that once a value is loaded from
37 * data that the application could potentially modify, it remains stable.
38 *
39 * Copyright (C) 2018-2019 Jens Axboe
Christoph Hellwigc992fe22019-01-11 09:43:02 -070040 * Copyright (c) 2018-2019 Christoph Hellwig
Jens Axboe2b188cc2019-01-07 10:46:33 -070041 */
42#include <linux/kernel.h>
43#include <linux/init.h>
44#include <linux/errno.h>
45#include <linux/syscalls.h>
46#include <linux/compat.h>
Jens Axboe52de1fe2020-02-27 10:15:42 -070047#include <net/compat.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070048#include <linux/refcount.h>
49#include <linux/uio.h>
Pavel Begunkov6b47ee62020-01-18 20:22:41 +030050#include <linux/bits.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070051
52#include <linux/sched/signal.h>
53#include <linux/fs.h>
54#include <linux/file.h>
55#include <linux/fdtable.h>
56#include <linux/mm.h>
57#include <linux/mman.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070058#include <linux/percpu.h>
59#include <linux/slab.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070060#include <linux/blkdev.h>
Jens Axboeedafcce2019-01-09 09:16:05 -070061#include <linux/bvec.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070062#include <linux/net.h>
63#include <net/sock.h>
64#include <net/af_unix.h>
Jens Axboe6b063142019-01-10 22:13:58 -070065#include <net/scm.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070066#include <linux/anon_inodes.h>
67#include <linux/sched/mm.h>
68#include <linux/uaccess.h>
69#include <linux/nospec.h>
Jens Axboeedafcce2019-01-09 09:16:05 -070070#include <linux/sizes.h>
71#include <linux/hugetlb.h>
Jens Axboeaa4c3962019-11-29 10:14:00 -070072#include <linux/highmem.h>
Jens Axboe15b71ab2019-12-11 11:20:36 -070073#include <linux/namei.h>
74#include <linux/fsnotify.h>
Jens Axboe4840e412019-12-25 22:03:45 -070075#include <linux/fadvise.h>
Jens Axboe3e4827b2020-01-08 15:18:09 -070076#include <linux/eventpoll.h>
Pavel Begunkov7d67af22020-02-24 11:32:45 +030077#include <linux/splice.h>
Jens Axboeb41e9852020-02-17 09:52:41 -070078#include <linux/task_work.h>
Jens Axboebcf5a062020-05-22 09:24:42 -060079#include <linux/pagemap.h>
Jens Axboe0f212202020-09-13 13:09:39 -060080#include <linux/io_uring.h>
Nadav Amitef98eb02021-08-07 17:13:41 -070081#include <linux/tracehook.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070082
Dmitrii Dolgovc826bd72019-10-15 19:02:01 +020083#define CREATE_TRACE_POINTS
84#include <trace/events/io_uring.h>
85
Jens Axboe2b188cc2019-01-07 10:46:33 -070086#include <uapi/linux/io_uring.h>
87
88#include "internal.h"
Jens Axboe561fb042019-10-24 07:25:42 -060089#include "io-wq.h"
Jens Axboe2b188cc2019-01-07 10:46:33 -070090
Daniel Xu5277dea2019-09-14 14:23:45 -070091#define IORING_MAX_ENTRIES 32768
Jens Axboe33a107f2019-10-04 12:10:03 -060092#define IORING_MAX_CQ_ENTRIES (2 * IORING_MAX_ENTRIES)
Olivier Langlois4ce8ad92021-06-23 11:50:18 -070093#define IORING_SQPOLL_CAP_ENTRIES_VALUE 8
Jens Axboe65e19f52019-10-26 07:20:21 -060094
Pavel Begunkov042b0d82021-08-09 13:04:01 +010095/* 512 entries per page on 64-bit archs, 64 pages max */
96#define IORING_MAX_FIXED_FILES (1U << 15)
Stefano Garzarella21b55db2020-08-27 16:58:30 +020097#define IORING_MAX_RESTRICTIONS (IORING_RESTRICTION_LAST + \
98 IORING_REGISTER_LAST + IORING_OP_LAST)
Jens Axboe2b188cc2019-01-07 10:46:33 -070099
Pavel Begunkov2d091d62021-06-14 02:36:21 +0100100#define IO_RSRC_TAG_TABLE_SHIFT 9
101#define IO_RSRC_TAG_TABLE_MAX (1U << IO_RSRC_TAG_TABLE_SHIFT)
102#define IO_RSRC_TAG_TABLE_MASK (IO_RSRC_TAG_TABLE_MAX - 1)
103
Pavel Begunkov489809e2021-05-14 12:06:44 +0100104#define IORING_MAX_REG_BUFFERS (1U << 14)
105
Pavel Begunkovb16fed662021-02-18 18:29:40 +0000106#define SQE_VALID_FLAGS (IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK| \
107 IOSQE_IO_HARDLINK | IOSQE_ASYNC | \
108 IOSQE_BUFFER_SELECT)
Pavel Begunkovc8543572021-06-17 18:14:04 +0100109#define IO_REQ_CLEAN_FLAGS (REQ_F_BUFFER_SELECTED | REQ_F_NEED_CLEANUP | \
110 REQ_F_POLLED | REQ_F_INFLIGHT | REQ_F_CREDS)
Pavel Begunkovb16fed662021-02-18 18:29:40 +0000111
Pavel Begunkov09899b12021-06-14 02:36:22 +0100112#define IO_TCTX_REFS_CACHE_NR (1U << 10)
113
Jens Axboe2b188cc2019-01-07 10:46:33 -0700114struct io_uring {
115 u32 head ____cacheline_aligned_in_smp;
116 u32 tail ____cacheline_aligned_in_smp;
117};
118
Stefan Bühler1e84b972019-04-24 23:54:16 +0200119/*
Hristo Venev75b28af2019-08-26 17:23:46 +0000120 * This data is shared with the application through the mmap at offsets
121 * IORING_OFF_SQ_RING and IORING_OFF_CQ_RING.
Stefan Bühler1e84b972019-04-24 23:54:16 +0200122 *
123 * The offsets to the member fields are published through struct
124 * io_sqring_offsets when calling io_uring_setup.
125 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000126struct io_rings {
Stefan Bühler1e84b972019-04-24 23:54:16 +0200127 /*
128 * Head and tail offsets into the ring; the offsets need to be
129 * masked to get valid indices.
130 *
Hristo Venev75b28af2019-08-26 17:23:46 +0000131 * The kernel controls head of the sq ring and the tail of the cq ring,
132 * and the application controls tail of the sq ring and the head of the
133 * cq ring.
Stefan Bühler1e84b972019-04-24 23:54:16 +0200134 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000135 struct io_uring sq, cq;
Stefan Bühler1e84b972019-04-24 23:54:16 +0200136 /*
Hristo Venev75b28af2019-08-26 17:23:46 +0000137 * Bitmasks to apply to head and tail offsets (constant, equals
Stefan Bühler1e84b972019-04-24 23:54:16 +0200138 * ring_entries - 1)
139 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000140 u32 sq_ring_mask, cq_ring_mask;
141 /* Ring sizes (constant, power of 2) */
142 u32 sq_ring_entries, cq_ring_entries;
Stefan Bühler1e84b972019-04-24 23:54:16 +0200143 /*
144 * Number of invalid entries dropped by the kernel due to
145 * invalid index stored in array
146 *
147 * Written by the kernel, shouldn't be modified by the
148 * application (i.e. get number of "new events" by comparing to
149 * cached value).
150 *
151 * After a new SQ head value was read by the application this
152 * counter includes all submissions that were dropped reaching
153 * the new SQ head (and possibly more).
154 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000155 u32 sq_dropped;
Stefan Bühler1e84b972019-04-24 23:54:16 +0200156 /*
Stefano Garzarella0d9b5b32020-05-15 18:38:04 +0200157 * Runtime SQ flags
Stefan Bühler1e84b972019-04-24 23:54:16 +0200158 *
159 * Written by the kernel, shouldn't be modified by the
160 * application.
161 *
162 * The application needs a full memory barrier before checking
163 * for IORING_SQ_NEED_WAKEUP after updating the sq tail.
164 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000165 u32 sq_flags;
Stefan Bühler1e84b972019-04-24 23:54:16 +0200166 /*
Stefano Garzarella0d9b5b32020-05-15 18:38:04 +0200167 * Runtime CQ flags
168 *
169 * Written by the application, shouldn't be modified by the
170 * kernel.
171 */
Pavel Begunkovfe7e3252021-06-24 15:09:57 +0100172 u32 cq_flags;
Stefano Garzarella0d9b5b32020-05-15 18:38:04 +0200173 /*
Stefan Bühler1e84b972019-04-24 23:54:16 +0200174 * Number of completion events lost because the queue was full;
175 * this should be avoided by the application by making sure
LimingWu0b4295b2019-12-05 20:18:18 +0800176 * there are not more requests pending than there is space in
Stefan Bühler1e84b972019-04-24 23:54:16 +0200177 * the completion queue.
178 *
179 * Written by the kernel, shouldn't be modified by the
180 * application (i.e. get number of "new events" by comparing to
181 * cached value).
182 *
183 * As completion events come in out of order this counter is not
184 * ordered with any other data.
185 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000186 u32 cq_overflow;
Stefan Bühler1e84b972019-04-24 23:54:16 +0200187 /*
188 * Ring buffer of completion events.
189 *
190 * The kernel writes completion events fresh every time they are
191 * produced, so the application is allowed to modify pending
192 * entries.
193 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000194 struct io_uring_cqe cqes[] ____cacheline_aligned_in_smp;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700195};
196
Pavel Begunkov45d189c2021-02-10 00:03:07 +0000197enum io_uring_cmd_flags {
198 IO_URING_F_NONBLOCK = 1,
Pavel Begunkov889fca72021-02-10 00:03:09 +0000199 IO_URING_F_COMPLETE_DEFER = 2,
Pavel Begunkov45d189c2021-02-10 00:03:07 +0000200};
201
Jens Axboeedafcce2019-01-09 09:16:05 -0700202struct io_mapped_ubuf {
203 u64 ubuf;
Pavel Begunkov4751f532021-04-01 15:43:55 +0100204 u64 ubuf_end;
Jens Axboeedafcce2019-01-09 09:16:05 -0700205 unsigned int nr_bvecs;
Jens Axboede293932020-09-17 16:19:16 -0600206 unsigned long acct_pages;
Pavel Begunkov41edf1a2021-04-25 14:32:23 +0100207 struct bio_vec bvec[];
Jens Axboeedafcce2019-01-09 09:16:05 -0700208};
209
Bijan Mottahedeh50238532021-01-15 17:37:45 +0000210struct io_ring_ctx;
211
Pavel Begunkov6c2450a2021-02-23 12:40:22 +0000212struct io_overflow_cqe {
213 struct io_uring_cqe cqe;
214 struct list_head list;
215};
216
Pavel Begunkova04b0ac2021-04-01 15:44:04 +0100217struct io_fixed_file {
218 /* file * with additional FFS_* flags */
219 unsigned long file_ptr;
220};
221
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +0000222struct io_rsrc_put {
223 struct list_head list;
Pavel Begunkovb60c8dc2021-04-25 14:32:18 +0100224 u64 tag;
Bijan Mottahedeh50238532021-01-15 17:37:45 +0000225 union {
226 void *rsrc;
227 struct file *file;
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +0100228 struct io_mapped_ubuf *buf;
Bijan Mottahedeh50238532021-01-15 17:37:45 +0000229 };
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +0000230};
231
Pavel Begunkovaeca2412021-04-11 01:46:37 +0100232struct io_file_table {
Pavel Begunkov042b0d82021-08-09 13:04:01 +0100233 struct io_fixed_file *files;
Jens Axboe31b51512019-01-18 22:56:34 -0700234};
235
Pavel Begunkovb895c9a2021-04-01 15:43:40 +0100236struct io_rsrc_node {
Xiaoguang Wang05589552020-03-31 14:05:18 +0800237 struct percpu_ref refs;
238 struct list_head node;
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +0000239 struct list_head rsrc_list;
Pavel Begunkovb895c9a2021-04-01 15:43:40 +0100240 struct io_rsrc_data *rsrc_data;
Jens Axboe4a38aed22020-05-14 17:21:15 -0600241 struct llist_node llist;
Pavel Begunkove2978222020-11-18 14:56:26 +0000242 bool done;
Xiaoguang Wang05589552020-03-31 14:05:18 +0800243};
244
Pavel Begunkov40ae0ff2021-04-01 15:43:44 +0100245typedef void (rsrc_put_fn)(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc);
246
Pavel Begunkovb895c9a2021-04-01 15:43:40 +0100247struct io_rsrc_data {
Jens Axboe05f3fb32019-12-09 11:22:50 -0700248 struct io_ring_ctx *ctx;
249
Pavel Begunkov2d091d62021-06-14 02:36:21 +0100250 u64 **tags;
251 unsigned int nr;
Pavel Begunkov40ae0ff2021-04-01 15:43:44 +0100252 rsrc_put_fn *do_put;
Pavel Begunkov3e942492021-04-11 01:46:34 +0100253 atomic_t refs;
Jens Axboe05f3fb32019-12-09 11:22:50 -0700254 struct completion done;
Hao Xu8bad28d2021-02-19 17:19:36 +0800255 bool quiesce;
Jens Axboe05f3fb32019-12-09 11:22:50 -0700256};
257
Jens Axboe5a2e7452020-02-23 16:23:11 -0700258struct io_buffer {
259 struct list_head list;
260 __u64 addr;
Thadeu Lima de Souza Cascardod1f82802021-05-05 09:47:06 -0300261 __u32 len;
Jens Axboe5a2e7452020-02-23 16:23:11 -0700262 __u16 bid;
263};
264
Stefano Garzarella21b55db2020-08-27 16:58:30 +0200265struct io_restriction {
266 DECLARE_BITMAP(register_op, IORING_REGISTER_LAST);
267 DECLARE_BITMAP(sqe_op, IORING_OP_LAST);
268 u8 sqe_flags_allowed;
269 u8 sqe_flags_required;
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +0200270 bool registered;
Stefano Garzarella21b55db2020-08-27 16:58:30 +0200271};
272
Jens Axboe37d1e2e2021-02-17 21:03:43 -0700273enum {
274 IO_SQ_THREAD_SHOULD_STOP = 0,
275 IO_SQ_THREAD_SHOULD_PARK,
276};
277
Jens Axboe534ca6d2020-09-02 13:52:19 -0600278struct io_sq_data {
279 refcount_t refs;
Pavel Begunkov9e138a42021-03-14 20:57:12 +0000280 atomic_t park_pending;
Pavel Begunkov09a6f4e2021-03-14 20:57:10 +0000281 struct mutex lock;
Jens Axboe69fb2132020-09-14 11:16:23 -0600282
283 /* ctx's that are using this sqd */
284 struct list_head ctx_list;
Jens Axboe69fb2132020-09-14 11:16:23 -0600285
Jens Axboe534ca6d2020-09-02 13:52:19 -0600286 struct task_struct *thread;
287 struct wait_queue_head wait;
Xiaoguang Wang08369242020-11-03 14:15:59 +0800288
289 unsigned sq_thread_idle;
Jens Axboe37d1e2e2021-02-17 21:03:43 -0700290 int sq_cpu;
291 pid_t task_pid;
Jens Axboe5c2469e2021-03-11 10:17:56 -0700292 pid_t task_tgid;
Jens Axboe37d1e2e2021-02-17 21:03:43 -0700293
294 unsigned long state;
Jens Axboe37d1e2e2021-02-17 21:03:43 -0700295 struct completion exited;
Jens Axboe534ca6d2020-09-02 13:52:19 -0600296};
297
Pavel Begunkov6dd0be12021-02-10 00:03:13 +0000298#define IO_COMPL_BATCH 32
Pavel Begunkov6ff119a2021-02-10 00:03:18 +0000299#define IO_REQ_CACHE_SIZE 32
Pavel Begunkovbf019da2021-02-10 00:03:17 +0000300#define IO_REQ_ALLOC_BATCH 8
Pavel Begunkov258b29a2021-02-10 00:03:10 +0000301
Pavel Begunkova1ab7b32021-02-18 18:29:42 +0000302struct io_submit_link {
303 struct io_kiocb *head;
304 struct io_kiocb *last;
305};
306
Pavel Begunkov258b29a2021-02-10 00:03:10 +0000307struct io_submit_state {
308 struct blk_plug plug;
Pavel Begunkova1ab7b32021-02-18 18:29:42 +0000309 struct io_submit_link link;
Pavel Begunkov258b29a2021-02-10 00:03:10 +0000310
311 /*
312 * io_kiocb alloc cache
313 */
Pavel Begunkovbf019da2021-02-10 00:03:17 +0000314 void *reqs[IO_REQ_CACHE_SIZE];
Pavel Begunkov258b29a2021-02-10 00:03:10 +0000315 unsigned int free_reqs;
316
317 bool plug_started;
318
319 /*
320 * Batch completion logic
321 */
Pavel Begunkovcd0ca2e2021-08-09 20:18:11 +0100322 struct io_kiocb *compl_reqs[IO_COMPL_BATCH];
323 unsigned int compl_nr;
324 /* inline/task_work completion list, under ->uring_lock */
325 struct list_head free_list;
Pavel Begunkov258b29a2021-02-10 00:03:10 +0000326
Pavel Begunkov258b29a2021-02-10 00:03:10 +0000327 unsigned int ios_left;
328};
329
Jens Axboe2b188cc2019-01-07 10:46:33 -0700330struct io_ring_ctx {
Pavel Begunkovb52ecf82021-06-14 23:37:21 +0100331 /* const or read-mostly hot data */
Jens Axboe2b188cc2019-01-07 10:46:33 -0700332 struct {
333 struct percpu_ref refs;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700334
Pavel Begunkovb52ecf82021-06-14 23:37:21 +0100335 struct io_rings *rings;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700336 unsigned int flags;
Randy Dunlape1d85332020-02-05 20:57:10 -0800337 unsigned int compat: 1;
Randy Dunlape1d85332020-02-05 20:57:10 -0800338 unsigned int drain_next: 1;
339 unsigned int eventfd_async: 1;
Stefano Garzarella21b55db2020-08-27 16:58:30 +0200340 unsigned int restricted: 1;
Pavel Begunkovf18ee4c2021-06-14 23:37:25 +0100341 unsigned int off_timeout_used: 1;
Pavel Begunkov10c66902021-06-15 16:47:56 +0100342 unsigned int drain_active: 1;
Pavel Begunkovb52ecf82021-06-14 23:37:21 +0100343 } ____cacheline_aligned_in_smp;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700344
Pavel Begunkov7f1129d2021-06-14 23:37:22 +0100345 /* submission data */
Pavel Begunkovb52ecf82021-06-14 23:37:21 +0100346 struct {
Pavel Begunkov0499e582021-06-14 23:37:29 +0100347 struct mutex uring_lock;
348
Hristo Venev75b28af2019-08-26 17:23:46 +0000349 /*
350 * Ring buffer of indices into array of io_uring_sqe, which is
351 * mmapped by the application using the IORING_OFF_SQES offset.
352 *
353 * This indirection could e.g. be used to assign fixed
354 * io_uring_sqe entries to operations and only submit them to
355 * the queue when needed.
356 *
357 * The kernel modifies neither the indices array nor the entries
358 * array.
359 */
360 u32 *sq_array;
Pavel Begunkovc7af47c2021-06-14 23:37:20 +0100361 struct io_uring_sqe *sq_sqes;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700362 unsigned cached_sq_head;
363 unsigned sq_entries;
Jens Axboede0617e2019-04-06 21:51:27 -0600364 struct list_head defer_list;
Pavel Begunkov7f1129d2021-06-14 23:37:22 +0100365
366 /*
367 * Fixed resources fast path, should be accessed only under
368 * uring_lock, and updated through io_uring_register(2)
369 */
370 struct io_rsrc_node *rsrc_node;
371 struct io_file_table file_table;
372 unsigned nr_user_files;
373 unsigned nr_user_bufs;
374 struct io_mapped_ubuf **user_bufs;
375
376 struct io_submit_state submit_state;
Jens Axboe5262f562019-09-17 12:26:57 -0600377 struct list_head timeout_list;
Jens Axboe1d7bb1d2019-11-06 11:31:17 -0700378 struct list_head cq_overflow_list;
Pavel Begunkov7f1129d2021-06-14 23:37:22 +0100379 struct xarray io_buffers;
380 struct xarray personalities;
381 u32 pers_next;
382 unsigned sq_thread_idle;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700383 } ____cacheline_aligned_in_smp;
384
Pavel Begunkovd0acdee2021-05-16 22:58:12 +0100385 /* IRQ completion list, under ->completion_lock */
386 struct list_head locked_free_list;
387 unsigned int locked_free_nr;
Jens Axboe3c1a2ea2021-02-11 10:48:03 -0700388
Stefan Metzmacher7c30f36a2021-03-07 11:54:28 +0100389 const struct cred *sq_creds; /* cred used for __io_sq_thread() */
Jens Axboe534ca6d2020-09-02 13:52:19 -0600390 struct io_sq_data *sq_data; /* if using sq thread polling */
391
Jens Axboe90554202020-09-03 12:12:41 -0600392 struct wait_queue_head sqo_sq_wait;
Jens Axboe69fb2132020-09-14 11:16:23 -0600393 struct list_head sqd_list;
Hristo Venev75b28af2019-08-26 17:23:46 +0000394
Pavel Begunkov5ed7a372021-06-14 23:37:27 +0100395 unsigned long check_cq_overflow;
396
Jens Axboe206aefd2019-11-07 18:27:42 -0700397 struct {
398 unsigned cached_cq_tail;
399 unsigned cq_entries;
Jens Axboe206aefd2019-11-07 18:27:42 -0700400 struct eventfd_ctx *cq_ev_fd;
Pavel Begunkov0499e582021-06-14 23:37:29 +0100401 struct wait_queue_head poll_wait;
402 struct wait_queue_head cq_wait;
403 unsigned cq_extra;
404 atomic_t cq_timeouts;
405 struct fasync_struct *cq_fasync;
406 unsigned cq_last_tm_flush;
Jens Axboe206aefd2019-11-07 18:27:42 -0700407 } ____cacheline_aligned_in_smp;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700408
409 struct {
Jens Axboe2b188cc2019-01-07 10:46:33 -0700410 spinlock_t completion_lock;
Jens Axboee94f1412019-12-19 12:06:02 -0700411
Jens Axboe89850fc2021-08-10 15:11:51 -0600412 spinlock_t timeout_lock;
413
Jens Axboedef596e2019-01-09 08:59:42 -0700414 /*
Pavel Begunkov540e32a2020-07-13 23:37:09 +0300415 * ->iopoll_list is protected by the ctx->uring_lock for
Jens Axboedef596e2019-01-09 08:59:42 -0700416 * io_uring instances that don't use IORING_SETUP_SQPOLL.
417 * For SQPOLL, only the single threaded io_sq_thread() will
418 * manipulate the list, hence no extra locking is needed there.
419 */
Pavel Begunkov540e32a2020-07-13 23:37:09 +0300420 struct list_head iopoll_list;
Jens Axboe78076bb2019-12-04 19:56:40 -0700421 struct hlist_head *cancel_hash;
422 unsigned cancel_hash_bits;
Hao Xu915b3dd2021-06-28 05:37:30 +0800423 bool poll_multi_queue;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700424 } ____cacheline_aligned_in_smp;
Jens Axboe85faa7b2020-04-09 18:14:00 -0600425
Stefano Garzarella21b55db2020-08-27 16:58:30 +0200426 struct io_restriction restrictions;
Jens Axboe3c1a2ea2021-02-11 10:48:03 -0700427
Pavel Begunkovb13a8912021-05-16 22:58:07 +0100428 /* slow path rsrc auxilary data, used by update/register */
429 struct {
430 struct io_rsrc_node *rsrc_backup_node;
431 struct io_mapped_ubuf *dummy_ubuf;
432 struct io_rsrc_data *file_data;
433 struct io_rsrc_data *buf_data;
434
435 struct delayed_work rsrc_put_work;
436 struct llist_head rsrc_put_llist;
437 struct list_head rsrc_ref_list;
438 spinlock_t rsrc_ref_lock;
439 };
440
Jens Axboe3c1a2ea2021-02-11 10:48:03 -0700441 /* Keep this last, we don't need it for the fast path */
Pavel Begunkovb986af72021-05-16 22:58:06 +0100442 struct {
443 #if defined(CONFIG_UNIX)
444 struct socket *ring_sock;
445 #endif
446 /* hashed buffered write serialization */
447 struct io_wq_hash *hash_map;
448
449 /* Only used for accounting purposes */
450 struct user_struct *user;
451 struct mm_struct *mm_account;
452
453 /* ctx exit and cancelation */
Pavel Begunkov9011bf92021-06-30 21:54:03 +0100454 struct llist_head fallback_llist;
455 struct delayed_work fallback_work;
Pavel Begunkovb986af72021-05-16 22:58:06 +0100456 struct work_struct exit_work;
457 struct list_head tctx_list;
458 struct completion ref_comp;
459 };
Jens Axboe2b188cc2019-01-07 10:46:33 -0700460};
461
Stefan Metzmacher53e043b2021-03-15 12:56:56 +0100462struct io_uring_task {
463 /* submission side */
Pavel Begunkov09899b12021-06-14 02:36:22 +0100464 int cached_refs;
Stefan Metzmacher53e043b2021-03-15 12:56:56 +0100465 struct xarray xa;
466 struct wait_queue_head wait;
Stefan Metzmacheree53fb22021-03-15 12:56:57 +0100467 const struct io_ring_ctx *last;
468 struct io_wq *io_wq;
Stefan Metzmacher53e043b2021-03-15 12:56:56 +0100469 struct percpu_counter inflight;
Pavel Begunkovb303fe22021-04-11 01:46:26 +0100470 atomic_t inflight_tracked;
Stefan Metzmacher53e043b2021-03-15 12:56:56 +0100471 atomic_t in_idle;
Stefan Metzmacher53e043b2021-03-15 12:56:56 +0100472
473 spinlock_t task_lock;
474 struct io_wq_work_list task_list;
Stefan Metzmacher53e043b2021-03-15 12:56:56 +0100475 struct callback_head task_work;
Pavel Begunkov6294f362021-08-10 17:53:55 +0100476 bool task_running;
Stefan Metzmacher53e043b2021-03-15 12:56:56 +0100477};
478
Jens Axboe09bb8392019-03-13 12:39:28 -0600479/*
480 * First field must be the file pointer in all the
481 * iocb unions! See also 'struct kiocb' in <linux/fs.h>
482 */
Jens Axboe221c5eb2019-01-17 09:41:58 -0700483struct io_poll_iocb {
484 struct file *file;
Pavel Begunkov018043b2020-10-27 23:17:18 +0000485 struct wait_queue_head *head;
Jens Axboe221c5eb2019-01-17 09:41:58 -0700486 __poll_t events;
Jens Axboe8c838782019-03-12 15:48:16 -0600487 bool done;
Jens Axboe221c5eb2019-01-17 09:41:58 -0700488 bool canceled;
Jens Axboe392edb42019-12-09 17:52:20 -0700489 struct wait_queue_entry wait;
Jens Axboe221c5eb2019-01-17 09:41:58 -0700490};
491
Pavel Begunkov9d805892021-04-13 02:58:40 +0100492struct io_poll_update {
Pavel Begunkov018043b2020-10-27 23:17:18 +0000493 struct file *file;
Pavel Begunkov9d805892021-04-13 02:58:40 +0100494 u64 old_user_data;
495 u64 new_user_data;
496 __poll_t events;
Jens Axboeb69de282021-03-17 08:37:41 -0600497 bool update_events;
498 bool update_user_data;
Pavel Begunkov018043b2020-10-27 23:17:18 +0000499};
500
Jens Axboeb5dba592019-12-11 14:02:38 -0700501struct io_close {
502 struct file *file;
Jens Axboeb5dba592019-12-11 14:02:38 -0700503 int fd;
504};
505
Jens Axboead8a48a2019-11-15 08:49:11 -0700506struct io_timeout_data {
507 struct io_kiocb *req;
508 struct hrtimer timer;
509 struct timespec64 ts;
510 enum hrtimer_mode mode;
511};
512
Jens Axboe8ed8d3c2019-12-16 11:55:28 -0700513struct io_accept {
514 struct file *file;
515 struct sockaddr __user *addr;
516 int __user *addr_len;
517 int flags;
Jens Axboe09952e32020-03-19 20:16:56 -0600518 unsigned long nofile;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -0700519};
520
521struct io_sync {
522 struct file *file;
523 loff_t len;
524 loff_t off;
525 int flags;
Jens Axboed63d1b52019-12-10 10:38:56 -0700526 int mode;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -0700527};
528
Jens Axboefbf23842019-12-17 18:45:56 -0700529struct io_cancel {
530 struct file *file;
531 u64 addr;
532};
533
Jens Axboeb29472e2019-12-17 18:50:29 -0700534struct io_timeout {
535 struct file *file;
Pavel Begunkovbfe68a22020-05-30 14:54:18 +0300536 u32 off;
537 u32 target_seq;
Pavel Begunkov135fcde2020-07-13 23:37:12 +0300538 struct list_head list;
Pavel Begunkov90cd7e42020-10-27 23:25:36 +0000539 /* head of the link, used by linked timeouts only */
540 struct io_kiocb *head;
Jens Axboe89b263f2021-08-10 15:14:18 -0600541 /* for linked completions */
542 struct io_kiocb *prev;
Jens Axboeb29472e2019-12-17 18:50:29 -0700543};
544
Pavel Begunkov0bdf7a22020-10-10 18:34:10 +0100545struct io_timeout_rem {
546 struct file *file;
547 u64 addr;
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +0000548
549 /* timeout update */
550 struct timespec64 ts;
551 u32 flags;
Pavel Begunkov0bdf7a22020-10-10 18:34:10 +0100552};
553
Jens Axboe9adbd452019-12-20 08:45:55 -0700554struct io_rw {
555 /* NOTE: kiocb has the file as the first member, so don't do it here */
556 struct kiocb kiocb;
557 u64 addr;
558 u64 len;
559};
560
Jens Axboe3fbb51c2019-12-20 08:51:52 -0700561struct io_connect {
562 struct file *file;
563 struct sockaddr __user *addr;
564 int addr_len;
565};
566
Jens Axboee47293f2019-12-20 08:58:21 -0700567struct io_sr_msg {
568 struct file *file;
Jens Axboefddafac2020-01-04 20:19:44 -0700569 union {
Pavel Begunkov4af34172021-04-11 01:46:30 +0100570 struct compat_msghdr __user *umsg_compat;
571 struct user_msghdr __user *umsg;
572 void __user *buf;
Jens Axboefddafac2020-01-04 20:19:44 -0700573 };
Jens Axboee47293f2019-12-20 08:58:21 -0700574 int msg_flags;
Jens Axboebcda7ba2020-02-23 16:42:51 -0700575 int bgid;
Jens Axboefddafac2020-01-04 20:19:44 -0700576 size_t len;
Jens Axboebcda7ba2020-02-23 16:42:51 -0700577 struct io_buffer *kbuf;
Jens Axboee47293f2019-12-20 08:58:21 -0700578};
579
Jens Axboe15b71ab2019-12-11 11:20:36 -0700580struct io_open {
581 struct file *file;
582 int dfd;
Jens Axboe15b71ab2019-12-11 11:20:36 -0700583 struct filename *filename;
Jens Axboec12cedf2020-01-08 17:41:21 -0700584 struct open_how how;
Jens Axboe4022e7a2020-03-19 19:23:18 -0600585 unsigned long nofile;
Jens Axboe15b71ab2019-12-11 11:20:36 -0700586};
587
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +0000588struct io_rsrc_update {
Jens Axboe05f3fb32019-12-09 11:22:50 -0700589 struct file *file;
590 u64 arg;
591 u32 nr_args;
592 u32 offset;
593};
594
Jens Axboe4840e412019-12-25 22:03:45 -0700595struct io_fadvise {
596 struct file *file;
597 u64 offset;
598 u32 len;
599 u32 advice;
600};
601
Jens Axboec1ca7572019-12-25 22:18:28 -0700602struct io_madvise {
603 struct file *file;
604 u64 addr;
605 u32 len;
606 u32 advice;
607};
608
Jens Axboe3e4827b2020-01-08 15:18:09 -0700609struct io_epoll {
610 struct file *file;
611 int epfd;
612 int op;
613 int fd;
614 struct epoll_event event;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700615};
616
Pavel Begunkov7d67af22020-02-24 11:32:45 +0300617struct io_splice {
618 struct file *file_out;
619 struct file *file_in;
620 loff_t off_out;
621 loff_t off_in;
622 u64 len;
623 unsigned int flags;
624};
625
Jens Axboeddf0322d2020-02-23 16:41:33 -0700626struct io_provide_buf {
627 struct file *file;
628 __u64 addr;
Pavel Begunkov38134ad2021-04-15 13:07:39 +0100629 __u32 len;
Jens Axboeddf0322d2020-02-23 16:41:33 -0700630 __u32 bgid;
631 __u16 nbufs;
632 __u16 bid;
633};
634
Bijan Mottahedeh1d9e1282020-05-22 21:31:16 -0700635struct io_statx {
636 struct file *file;
637 int dfd;
638 unsigned int mask;
639 unsigned int flags;
Bijan Mottahedehe62753e2020-05-22 21:31:18 -0700640 const char __user *filename;
Bijan Mottahedeh1d9e1282020-05-22 21:31:16 -0700641 struct statx __user *buffer;
642};
643
Jens Axboe36f4fa62020-09-05 11:14:22 -0600644struct io_shutdown {
645 struct file *file;
646 int how;
647};
648
Jens Axboe80a261f2020-09-28 14:23:58 -0600649struct io_rename {
650 struct file *file;
651 int old_dfd;
652 int new_dfd;
653 struct filename *oldpath;
654 struct filename *newpath;
655 int flags;
656};
657
Jens Axboe14a11432020-09-28 14:27:37 -0600658struct io_unlink {
659 struct file *file;
660 int dfd;
661 int flags;
662 struct filename *filename;
663};
664
Pavel Begunkov3ca405e2020-07-13 23:37:08 +0300665struct io_completion {
666 struct file *file;
Pavel Begunkov8c3f9cd2021-02-28 22:35:15 +0000667 u32 cflags;
Pavel Begunkov3ca405e2020-07-13 23:37:08 +0300668};
669
Jens Axboef499a022019-12-02 16:28:46 -0700670struct io_async_connect {
671 struct sockaddr_storage address;
672};
673
Jens Axboe03b12302019-12-02 18:50:25 -0700674struct io_async_msghdr {
675 struct iovec fast_iov[UIO_FASTIOV];
Pavel Begunkov257e84a2021-02-05 00:58:00 +0000676 /* points to an allocated iov, if NULL we use fast_iov instead */
677 struct iovec *free_iov;
Jens Axboe03b12302019-12-02 18:50:25 -0700678 struct sockaddr __user *uaddr;
679 struct msghdr msg;
Jens Axboeb5379162020-02-09 11:29:15 -0700680 struct sockaddr_storage addr;
Jens Axboe03b12302019-12-02 18:50:25 -0700681};
682
Jens Axboef67676d2019-12-02 11:03:47 -0700683struct io_async_rw {
684 struct iovec fast_iov[UIO_FASTIOV];
Jens Axboeff6165b2020-08-13 09:47:43 -0600685 const struct iovec *free_iovec;
686 struct iov_iter iter;
Jens Axboe227c0c92020-08-13 11:51:40 -0600687 size_t bytes_done;
Jens Axboebcf5a062020-05-22 09:24:42 -0600688 struct wait_page_queue wpq;
Jens Axboef67676d2019-12-02 11:03:47 -0700689};
690
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300691enum {
692 REQ_F_FIXED_FILE_BIT = IOSQE_FIXED_FILE_BIT,
693 REQ_F_IO_DRAIN_BIT = IOSQE_IO_DRAIN_BIT,
694 REQ_F_LINK_BIT = IOSQE_IO_LINK_BIT,
695 REQ_F_HARDLINK_BIT = IOSQE_IO_HARDLINK_BIT,
696 REQ_F_FORCE_ASYNC_BIT = IOSQE_ASYNC_BIT,
Jens Axboebcda7ba2020-02-23 16:42:51 -0700697 REQ_F_BUFFER_SELECT_BIT = IOSQE_BUFFER_SELECT_BIT,
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300698
Pavel Begunkovdddca222021-04-27 16:13:52 +0100699 /* first byte is taken by user flags, shift it to not overlap */
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +0100700 REQ_F_FAIL_BIT = 8,
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300701 REQ_F_INFLIGHT_BIT,
702 REQ_F_CUR_POS_BIT,
703 REQ_F_NOWAIT_BIT,
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300704 REQ_F_LINK_TIMEOUT_BIT,
Pavel Begunkov99bc4c32020-02-07 22:04:45 +0300705 REQ_F_NEED_CLEANUP_BIT,
Jens Axboed7718a92020-02-14 22:23:12 -0700706 REQ_F_POLLED_BIT,
Jens Axboebcda7ba2020-02-23 16:42:51 -0700707 REQ_F_BUFFER_SELECTED_BIT,
Pavel Begunkov900fad42020-10-19 16:39:16 +0100708 REQ_F_LTIMEOUT_ACTIVE_BIT,
Pavel Begunkove342c802021-01-19 13:32:47 +0000709 REQ_F_COMPLETE_INLINE_BIT,
Jens Axboe230d50d2021-04-01 20:41:15 -0600710 REQ_F_REISSUE_BIT,
Pavel Begunkov8c130822021-03-22 01:58:32 +0000711 REQ_F_DONT_REISSUE_BIT,
Pavel Begunkovb8e64b52021-06-17 18:14:02 +0100712 REQ_F_CREDS_BIT,
Pavel Begunkov20e60a32021-08-11 19:28:30 +0100713 REQ_F_REFCOUNT_BIT,
Jens Axboe7b29f922021-03-12 08:30:14 -0700714 /* keep async read/write and isreg together and in order */
Pavel Begunkovb191e2d2021-08-09 13:04:03 +0100715 REQ_F_NOWAIT_READ_BIT,
716 REQ_F_NOWAIT_WRITE_BIT,
Jens Axboe7b29f922021-03-12 08:30:14 -0700717 REQ_F_ISREG_BIT,
Jens Axboe84557872020-03-03 15:28:17 -0700718
719 /* not a real bit, just to check we're not overflowing the space */
720 __REQ_F_LAST_BIT,
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300721};
722
723enum {
724 /* ctx owns file */
725 REQ_F_FIXED_FILE = BIT(REQ_F_FIXED_FILE_BIT),
726 /* drain existing IO first */
727 REQ_F_IO_DRAIN = BIT(REQ_F_IO_DRAIN_BIT),
728 /* linked sqes */
729 REQ_F_LINK = BIT(REQ_F_LINK_BIT),
730 /* doesn't sever on completion < 0 */
731 REQ_F_HARDLINK = BIT(REQ_F_HARDLINK_BIT),
732 /* IOSQE_ASYNC */
733 REQ_F_FORCE_ASYNC = BIT(REQ_F_FORCE_ASYNC_BIT),
Jens Axboebcda7ba2020-02-23 16:42:51 -0700734 /* IOSQE_BUFFER_SELECT */
735 REQ_F_BUFFER_SELECT = BIT(REQ_F_BUFFER_SELECT_BIT),
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300736
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300737 /* fail rest of links */
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +0100738 REQ_F_FAIL = BIT(REQ_F_FAIL_BIT),
Pavel Begunkovb05a1bc2021-03-04 13:59:24 +0000739 /* on inflight list, should be cancelled and waited on exit reliably */
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300740 REQ_F_INFLIGHT = BIT(REQ_F_INFLIGHT_BIT),
741 /* read/write uses file position */
742 REQ_F_CUR_POS = BIT(REQ_F_CUR_POS_BIT),
743 /* must not punt to workers */
744 REQ_F_NOWAIT = BIT(REQ_F_NOWAIT_BIT),
Pavel Begunkov900fad42020-10-19 16:39:16 +0100745 /* has or had linked timeout */
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300746 REQ_F_LINK_TIMEOUT = BIT(REQ_F_LINK_TIMEOUT_BIT),
Pavel Begunkov99bc4c32020-02-07 22:04:45 +0300747 /* needs cleanup */
748 REQ_F_NEED_CLEANUP = BIT(REQ_F_NEED_CLEANUP_BIT),
Jens Axboed7718a92020-02-14 22:23:12 -0700749 /* already went through poll handler */
750 REQ_F_POLLED = BIT(REQ_F_POLLED_BIT),
Jens Axboebcda7ba2020-02-23 16:42:51 -0700751 /* buffer already selected */
752 REQ_F_BUFFER_SELECTED = BIT(REQ_F_BUFFER_SELECTED_BIT),
Pavel Begunkov900fad42020-10-19 16:39:16 +0100753 /* linked timeout is active, i.e. prepared by link's head */
754 REQ_F_LTIMEOUT_ACTIVE = BIT(REQ_F_LTIMEOUT_ACTIVE_BIT),
Pavel Begunkove342c802021-01-19 13:32:47 +0000755 /* completion is deferred through io_comp_state */
756 REQ_F_COMPLETE_INLINE = BIT(REQ_F_COMPLETE_INLINE_BIT),
Jens Axboe230d50d2021-04-01 20:41:15 -0600757 /* caller should reissue async */
758 REQ_F_REISSUE = BIT(REQ_F_REISSUE_BIT),
Pavel Begunkov8c130822021-03-22 01:58:32 +0000759 /* don't attempt request reissue, see io_rw_reissue() */
760 REQ_F_DONT_REISSUE = BIT(REQ_F_DONT_REISSUE_BIT),
Jens Axboe7b29f922021-03-12 08:30:14 -0700761 /* supports async reads */
Pavel Begunkovb191e2d2021-08-09 13:04:03 +0100762 REQ_F_NOWAIT_READ = BIT(REQ_F_NOWAIT_READ_BIT),
Jens Axboe7b29f922021-03-12 08:30:14 -0700763 /* supports async writes */
Pavel Begunkovb191e2d2021-08-09 13:04:03 +0100764 REQ_F_NOWAIT_WRITE = BIT(REQ_F_NOWAIT_WRITE_BIT),
Jens Axboe7b29f922021-03-12 08:30:14 -0700765 /* regular file */
766 REQ_F_ISREG = BIT(REQ_F_ISREG_BIT),
Pavel Begunkovb8e64b52021-06-17 18:14:02 +0100767 /* has creds assigned */
768 REQ_F_CREDS = BIT(REQ_F_CREDS_BIT),
Pavel Begunkov20e60a32021-08-11 19:28:30 +0100769 /* skip refcounting if not set */
770 REQ_F_REFCOUNT = BIT(REQ_F_REFCOUNT_BIT),
Jens Axboed7718a92020-02-14 22:23:12 -0700771};
772
773struct async_poll {
774 struct io_poll_iocb poll;
Jens Axboe807abcb2020-07-17 17:09:27 -0600775 struct io_poll_iocb *double_poll;
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300776};
777
Pavel Begunkov5b0a6ac2021-06-30 21:54:04 +0100778typedef void (*io_req_tw_func_t)(struct io_kiocb *req);
779
Jens Axboe7cbf1722021-02-10 00:03:20 +0000780struct io_task_work {
Pavel Begunkov5b0a6ac2021-06-30 21:54:04 +0100781 union {
782 struct io_wq_work_node node;
783 struct llist_node fallback_node;
784 };
785 io_req_tw_func_t func;
Jens Axboe7cbf1722021-02-10 00:03:20 +0000786};
787
Pavel Begunkov992da012021-06-10 16:37:37 +0100788enum {
789 IORING_RSRC_FILE = 0,
790 IORING_RSRC_BUFFER = 1,
791};
792
Jens Axboe09bb8392019-03-13 12:39:28 -0600793/*
794 * NOTE! Each of the iocb union members has the file pointer
795 * as the first entry in their struct definition. So you can
796 * access the file pointer through any of the sub-structs,
797 * or directly as just 'ki_filp' in this struct.
798 */
Jens Axboe2b188cc2019-01-07 10:46:33 -0700799struct io_kiocb {
Jens Axboe221c5eb2019-01-17 09:41:58 -0700800 union {
Jens Axboe09bb8392019-03-13 12:39:28 -0600801 struct file *file;
Jens Axboe9adbd452019-12-20 08:45:55 -0700802 struct io_rw rw;
Jens Axboe221c5eb2019-01-17 09:41:58 -0700803 struct io_poll_iocb poll;
Pavel Begunkov9d805892021-04-13 02:58:40 +0100804 struct io_poll_update poll_update;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -0700805 struct io_accept accept;
806 struct io_sync sync;
Jens Axboefbf23842019-12-17 18:45:56 -0700807 struct io_cancel cancel;
Jens Axboeb29472e2019-12-17 18:50:29 -0700808 struct io_timeout timeout;
Pavel Begunkov0bdf7a22020-10-10 18:34:10 +0100809 struct io_timeout_rem timeout_rem;
Jens Axboe3fbb51c2019-12-20 08:51:52 -0700810 struct io_connect connect;
Jens Axboee47293f2019-12-20 08:58:21 -0700811 struct io_sr_msg sr_msg;
Jens Axboe15b71ab2019-12-11 11:20:36 -0700812 struct io_open open;
Jens Axboeb5dba592019-12-11 14:02:38 -0700813 struct io_close close;
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +0000814 struct io_rsrc_update rsrc_update;
Jens Axboe4840e412019-12-25 22:03:45 -0700815 struct io_fadvise fadvise;
Jens Axboec1ca7572019-12-25 22:18:28 -0700816 struct io_madvise madvise;
Jens Axboe3e4827b2020-01-08 15:18:09 -0700817 struct io_epoll epoll;
Pavel Begunkov7d67af22020-02-24 11:32:45 +0300818 struct io_splice splice;
Jens Axboeddf0322d2020-02-23 16:41:33 -0700819 struct io_provide_buf pbuf;
Bijan Mottahedeh1d9e1282020-05-22 21:31:16 -0700820 struct io_statx statx;
Jens Axboe36f4fa62020-09-05 11:14:22 -0600821 struct io_shutdown shutdown;
Jens Axboe80a261f2020-09-28 14:23:58 -0600822 struct io_rename rename;
Jens Axboe14a11432020-09-28 14:27:37 -0600823 struct io_unlink unlink;
Pavel Begunkov3ca405e2020-07-13 23:37:08 +0300824 /* use only after cleaning per-op data, see io_clean_op() */
825 struct io_completion compl;
Jens Axboe221c5eb2019-01-17 09:41:58 -0700826 };
Jens Axboe2b188cc2019-01-07 10:46:33 -0700827
Jens Axboee8c2bc12020-08-15 18:44:09 -0700828 /* opcode allocated if it needs to store data for async defer */
829 void *async_data;
Jens Axboed625c6e2019-12-17 19:53:05 -0700830 u8 opcode;
Xiaoguang Wang65a65432020-06-11 23:39:36 +0800831 /* polled IO has completed */
832 u8 iopoll_completed;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700833
Bijan Mottahedeh4f4eeba2020-05-19 14:52:49 -0700834 u16 buf_index;
Pavel Begunkov9cf7c102020-07-13 23:37:15 +0300835 u32 result;
Bijan Mottahedeh4f4eeba2020-05-19 14:52:49 -0700836
Pavel Begunkov010e8e62020-07-30 18:43:45 +0300837 struct io_ring_ctx *ctx;
838 unsigned int flags;
Jens Axboeabc54d62021-02-24 13:32:30 -0700839 atomic_t refs;
Pavel Begunkov010e8e62020-07-30 18:43:45 +0300840 struct task_struct *task;
841 u64 user_data;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700842
Pavel Begunkovf2f87372020-10-27 23:25:37 +0000843 struct io_kiocb *link;
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +0000844 struct percpu_ref *fixed_rsrc_refs;
Jens Axboed7718a92020-02-14 22:23:12 -0700845
Pavel Begunkovb303fe22021-04-11 01:46:26 +0100846 /* used with ctx->iopoll_list with reads/writes */
Pavel Begunkov010e8e62020-07-30 18:43:45 +0300847 struct list_head inflight_entry;
Pavel Begunkov5b0a6ac2021-06-30 21:54:04 +0100848 struct io_task_work io_task_work;
Pavel Begunkov010e8e62020-07-30 18:43:45 +0300849 /* for polled requests, i.e. IORING_OP_POLL_ADD and async armed poll */
850 struct hlist_node hash_node;
851 struct async_poll *apoll;
852 struct io_wq_work work;
Pavel Begunkovfe7e3252021-06-24 15:09:57 +0100853 const struct cred *creds;
Pavel Begunkovc10d1f92021-06-17 18:14:01 +0100854
Pavel Begunkoveae071c2021-04-25 14:32:24 +0100855 /* store used ubuf, so we can prevent reloading */
856 struct io_mapped_ubuf *imu;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700857};
858
Pavel Begunkov13bf43f2021-03-06 11:02:12 +0000859struct io_tctx_node {
860 struct list_head ctx_node;
861 struct task_struct *task;
Pavel Begunkov13bf43f2021-03-06 11:02:12 +0000862 struct io_ring_ctx *ctx;
863};
864
Pavel Begunkov27dc8332020-07-13 23:37:14 +0300865struct io_defer_entry {
866 struct list_head list;
867 struct io_kiocb *req;
Pavel Begunkov9cf7c102020-07-13 23:37:15 +0300868 u32 seq;
Pavel Begunkov27dc8332020-07-13 23:37:14 +0300869};
870
Jens Axboed3656342019-12-18 09:50:26 -0700871struct io_op_def {
Jens Axboed3656342019-12-18 09:50:26 -0700872 /* needs req->file assigned */
873 unsigned needs_file : 1;
Jens Axboed3656342019-12-18 09:50:26 -0700874 /* hash wq insertion if file is a regular file */
875 unsigned hash_reg_file : 1;
876 /* unbound wq insertion if file is a non-regular file */
877 unsigned unbound_nonreg_file : 1;
Jens Axboe66f4af92020-01-16 15:36:52 -0700878 /* opcode is not supported by this kernel */
879 unsigned not_supported : 1;
Jens Axboe8a727582020-02-20 09:59:44 -0700880 /* set if opcode supports polled "wait" */
881 unsigned pollin : 1;
882 unsigned pollout : 1;
Jens Axboebcda7ba2020-02-23 16:42:51 -0700883 /* op supports buffer selection */
884 unsigned buffer_select : 1;
Pavel Begunkov26f05052021-02-28 22:35:18 +0000885 /* do prep async if is going to be punted */
886 unsigned needs_async_setup : 1;
Jens Axboe27926b62020-10-28 09:33:23 -0600887 /* should block plug */
888 unsigned plug : 1;
Jens Axboee8c2bc12020-08-15 18:44:09 -0700889 /* size of async data needed, if any */
890 unsigned short async_size;
Jens Axboed3656342019-12-18 09:50:26 -0700891};
892
Jens Axboe09186822020-10-13 15:01:40 -0600893static const struct io_op_def io_op_defs[] = {
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300894 [IORING_OP_NOP] = {},
895 [IORING_OP_READV] = {
Jens Axboed3656342019-12-18 09:50:26 -0700896 .needs_file = 1,
897 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700898 .pollin = 1,
Jens Axboe4d954c22020-02-27 07:31:19 -0700899 .buffer_select = 1,
Pavel Begunkov26f05052021-02-28 22:35:18 +0000900 .needs_async_setup = 1,
Jens Axboe27926b62020-10-28 09:33:23 -0600901 .plug = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700902 .async_size = sizeof(struct io_async_rw),
Jens Axboed3656342019-12-18 09:50:26 -0700903 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300904 [IORING_OP_WRITEV] = {
Jens Axboed3656342019-12-18 09:50:26 -0700905 .needs_file = 1,
906 .hash_reg_file = 1,
907 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700908 .pollout = 1,
Pavel Begunkov26f05052021-02-28 22:35:18 +0000909 .needs_async_setup = 1,
Jens Axboe27926b62020-10-28 09:33:23 -0600910 .plug = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700911 .async_size = sizeof(struct io_async_rw),
Jens Axboed3656342019-12-18 09:50:26 -0700912 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300913 [IORING_OP_FSYNC] = {
Jens Axboed3656342019-12-18 09:50:26 -0700914 .needs_file = 1,
915 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300916 [IORING_OP_READ_FIXED] = {
Jens Axboed3656342019-12-18 09:50:26 -0700917 .needs_file = 1,
918 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700919 .pollin = 1,
Jens Axboe27926b62020-10-28 09:33:23 -0600920 .plug = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700921 .async_size = sizeof(struct io_async_rw),
Jens Axboed3656342019-12-18 09:50:26 -0700922 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300923 [IORING_OP_WRITE_FIXED] = {
Jens Axboed3656342019-12-18 09:50:26 -0700924 .needs_file = 1,
925 .hash_reg_file = 1,
926 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700927 .pollout = 1,
Jens Axboe27926b62020-10-28 09:33:23 -0600928 .plug = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700929 .async_size = sizeof(struct io_async_rw),
Jens Axboed3656342019-12-18 09:50:26 -0700930 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300931 [IORING_OP_POLL_ADD] = {
Jens Axboed3656342019-12-18 09:50:26 -0700932 .needs_file = 1,
933 .unbound_nonreg_file = 1,
934 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300935 [IORING_OP_POLL_REMOVE] = {},
936 [IORING_OP_SYNC_FILE_RANGE] = {
Jens Axboed3656342019-12-18 09:50:26 -0700937 .needs_file = 1,
938 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300939 [IORING_OP_SENDMSG] = {
Jens Axboed3656342019-12-18 09:50:26 -0700940 .needs_file = 1,
941 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700942 .pollout = 1,
Pavel Begunkov26f05052021-02-28 22:35:18 +0000943 .needs_async_setup = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700944 .async_size = sizeof(struct io_async_msghdr),
Jens Axboed3656342019-12-18 09:50:26 -0700945 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300946 [IORING_OP_RECVMSG] = {
Jens Axboed3656342019-12-18 09:50:26 -0700947 .needs_file = 1,
948 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700949 .pollin = 1,
Jens Axboe52de1fe2020-02-27 10:15:42 -0700950 .buffer_select = 1,
Pavel Begunkov26f05052021-02-28 22:35:18 +0000951 .needs_async_setup = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700952 .async_size = sizeof(struct io_async_msghdr),
Jens Axboed3656342019-12-18 09:50:26 -0700953 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300954 [IORING_OP_TIMEOUT] = {
Jens Axboee8c2bc12020-08-15 18:44:09 -0700955 .async_size = sizeof(struct io_timeout_data),
Jens Axboed3656342019-12-18 09:50:26 -0700956 },
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +0000957 [IORING_OP_TIMEOUT_REMOVE] = {
958 /* used by timeout updates' prep() */
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +0000959 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300960 [IORING_OP_ACCEPT] = {
Jens Axboed3656342019-12-18 09:50:26 -0700961 .needs_file = 1,
962 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700963 .pollin = 1,
Jens Axboed3656342019-12-18 09:50:26 -0700964 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300965 [IORING_OP_ASYNC_CANCEL] = {},
966 [IORING_OP_LINK_TIMEOUT] = {
Jens Axboee8c2bc12020-08-15 18:44:09 -0700967 .async_size = sizeof(struct io_timeout_data),
Jens Axboed3656342019-12-18 09:50:26 -0700968 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300969 [IORING_OP_CONNECT] = {
Jens Axboed3656342019-12-18 09:50:26 -0700970 .needs_file = 1,
971 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700972 .pollout = 1,
Pavel Begunkov26f05052021-02-28 22:35:18 +0000973 .needs_async_setup = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700974 .async_size = sizeof(struct io_async_connect),
Jens Axboed3656342019-12-18 09:50:26 -0700975 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300976 [IORING_OP_FALLOCATE] = {
Jens Axboed3656342019-12-18 09:50:26 -0700977 .needs_file = 1,
978 },
Jens Axboe44526be2021-02-15 13:32:18 -0700979 [IORING_OP_OPENAT] = {},
980 [IORING_OP_CLOSE] = {},
981 [IORING_OP_FILES_UPDATE] = {},
982 [IORING_OP_STATX] = {},
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300983 [IORING_OP_READ] = {
Jens Axboe3a6820f2019-12-22 15:19:35 -0700984 .needs_file = 1,
985 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700986 .pollin = 1,
Jens Axboebcda7ba2020-02-23 16:42:51 -0700987 .buffer_select = 1,
Jens Axboe27926b62020-10-28 09:33:23 -0600988 .plug = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700989 .async_size = sizeof(struct io_async_rw),
Jens Axboe3a6820f2019-12-22 15:19:35 -0700990 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300991 [IORING_OP_WRITE] = {
Jens Axboe3a6820f2019-12-22 15:19:35 -0700992 .needs_file = 1,
993 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700994 .pollout = 1,
Jens Axboe27926b62020-10-28 09:33:23 -0600995 .plug = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700996 .async_size = sizeof(struct io_async_rw),
Jens Axboe3a6820f2019-12-22 15:19:35 -0700997 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300998 [IORING_OP_FADVISE] = {
Jens Axboe4840e412019-12-25 22:03:45 -0700999 .needs_file = 1,
1000 },
Jens Axboe44526be2021-02-15 13:32:18 -07001001 [IORING_OP_MADVISE] = {},
Pavel Begunkov0463b6c2020-01-18 21:35:38 +03001002 [IORING_OP_SEND] = {
Jens Axboefddafac2020-01-04 20:19:44 -07001003 .needs_file = 1,
1004 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -07001005 .pollout = 1,
Jens Axboefddafac2020-01-04 20:19:44 -07001006 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +03001007 [IORING_OP_RECV] = {
Jens Axboefddafac2020-01-04 20:19:44 -07001008 .needs_file = 1,
1009 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -07001010 .pollin = 1,
Jens Axboebcda7ba2020-02-23 16:42:51 -07001011 .buffer_select = 1,
Jens Axboefddafac2020-01-04 20:19:44 -07001012 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +03001013 [IORING_OP_OPENAT2] = {
Jens Axboecebdb982020-01-08 17:59:24 -07001014 },
Jens Axboe3e4827b2020-01-08 15:18:09 -07001015 [IORING_OP_EPOLL_CTL] = {
1016 .unbound_nonreg_file = 1,
Jens Axboe3e4827b2020-01-08 15:18:09 -07001017 },
Pavel Begunkov7d67af22020-02-24 11:32:45 +03001018 [IORING_OP_SPLICE] = {
1019 .needs_file = 1,
1020 .hash_reg_file = 1,
1021 .unbound_nonreg_file = 1,
Jens Axboeddf0322d2020-02-23 16:41:33 -07001022 },
1023 [IORING_OP_PROVIDE_BUFFERS] = {},
Jens Axboe067524e2020-03-02 16:32:28 -07001024 [IORING_OP_REMOVE_BUFFERS] = {},
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03001025 [IORING_OP_TEE] = {
1026 .needs_file = 1,
1027 .hash_reg_file = 1,
1028 .unbound_nonreg_file = 1,
1029 },
Jens Axboe36f4fa62020-09-05 11:14:22 -06001030 [IORING_OP_SHUTDOWN] = {
1031 .needs_file = 1,
1032 },
Jens Axboe44526be2021-02-15 13:32:18 -07001033 [IORING_OP_RENAMEAT] = {},
1034 [IORING_OP_UNLINKAT] = {},
Jens Axboed3656342019-12-18 09:50:26 -07001035};
1036
Pavel Begunkov7a612352021-03-09 00:37:59 +00001037static bool io_disarm_next(struct io_kiocb *req);
Pavel Begunkoveef51da2021-06-14 02:36:15 +01001038static void io_uring_del_tctx_node(unsigned long index);
Pavel Begunkov9936c7c2021-02-04 13:51:56 +00001039static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
1040 struct task_struct *task,
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01001041 bool cancel_all);
Pavel Begunkov78cc6872021-06-14 02:36:23 +01001042static void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd);
Pavel Begunkov1ffc5422020-12-30 21:34:15 +00001043
Pavel Begunkovd4d19c12021-04-25 14:32:17 +01001044static bool io_cqring_fill_event(struct io_ring_ctx *ctx, u64 user_data,
1045 long res, unsigned int cflags);
Jackie Liuec9c02a2019-11-08 23:50:36 +08001046static void io_put_req(struct io_kiocb *req);
Pavel Begunkov91c2f692021-08-11 19:28:28 +01001047static void io_put_req_deferred(struct io_kiocb *req);
Jens Axboec7dae4b2021-02-09 19:53:37 -07001048static void io_dismantle_req(struct io_kiocb *req);
Jens Axboe94ae5e72019-11-14 19:39:52 -07001049static void io_queue_linked_timeout(struct io_kiocb *req);
Pavel Begunkovfdecb662021-04-25 14:32:20 +01001050static int __io_register_rsrc_update(struct io_ring_ctx *ctx, unsigned type,
Pavel Begunkovc3bdad02021-04-25 14:32:22 +01001051 struct io_uring_rsrc_update2 *up,
Pavel Begunkov98f0b3b2021-04-25 14:32:19 +01001052 unsigned nr_args);
Pavel Begunkov68fb8972021-03-19 17:22:41 +00001053static void io_clean_op(struct io_kiocb *req);
Pavel Begunkovac177052021-08-09 13:04:02 +01001054static struct file *io_file_get(struct io_ring_ctx *ctx,
Pavel Begunkov8371adf2020-10-10 18:34:08 +01001055 struct io_kiocb *req, int fd, bool fixed);
Pavel Begunkovc5eef2b2021-02-10 00:03:22 +00001056static void __io_queue_sqe(struct io_kiocb *req);
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00001057static void io_rsrc_put_work(struct work_struct *work);
Jens Axboede0617e2019-04-06 21:51:27 -06001058
Pavel Begunkov907d1df2021-01-26 23:35:10 +00001059static void io_req_task_queue(struct io_kiocb *req);
Pavel Begunkov2a2758f2021-06-17 18:14:00 +01001060static void io_submit_flush_completions(struct io_ring_ctx *ctx);
Pavel Begunkov179ae0d2021-02-28 22:35:20 +00001061static int io_req_prep_async(struct io_kiocb *req);
Jens Axboe9a56a232019-01-09 09:06:50 -07001062
Jens Axboe2b188cc2019-01-07 10:46:33 -07001063static struct kmem_cache *req_cachep;
1064
Jens Axboe09186822020-10-13 15:01:40 -06001065static const struct file_operations io_uring_fops;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001066
1067struct sock *io_uring_get_socket(struct file *file)
1068{
1069#if defined(CONFIG_UNIX)
1070 if (file->f_op == &io_uring_fops) {
1071 struct io_ring_ctx *ctx = file->private_data;
1072
1073 return ctx->ring_sock->sk;
1074 }
1075#endif
1076 return NULL;
1077}
1078EXPORT_SYMBOL(io_uring_get_socket);
1079
Pavel Begunkovf2f87372020-10-27 23:25:37 +00001080#define io_for_each_link(pos, head) \
1081 for (pos = (head); pos; pos = pos->link)
1082
Pavel Begunkov21c843d2021-08-11 19:28:27 +01001083/*
1084 * Shamelessly stolen from the mm implementation of page reference checking,
1085 * see commit f958d7b528b1 for details.
1086 */
1087#define req_ref_zero_or_close_to_overflow(req) \
1088 ((unsigned int) atomic_read(&(req->refs)) + 127u <= 127u)
1089
1090static inline bool req_ref_inc_not_zero(struct io_kiocb *req)
1091{
Pavel Begunkov20e60a32021-08-11 19:28:30 +01001092 WARN_ON_ONCE(!(req->flags & REQ_F_REFCOUNT));
Pavel Begunkov21c843d2021-08-11 19:28:27 +01001093 return atomic_inc_not_zero(&req->refs);
1094}
1095
Pavel Begunkov21c843d2021-08-11 19:28:27 +01001096static inline bool req_ref_put_and_test(struct io_kiocb *req)
1097{
Pavel Begunkov20e60a32021-08-11 19:28:30 +01001098 if (likely(!(req->flags & REQ_F_REFCOUNT)))
1099 return true;
1100
Pavel Begunkov21c843d2021-08-11 19:28:27 +01001101 WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req));
1102 return atomic_dec_and_test(&req->refs);
1103}
1104
1105static inline void req_ref_put(struct io_kiocb *req)
1106{
Pavel Begunkov20e60a32021-08-11 19:28:30 +01001107 WARN_ON_ONCE(!(req->flags & REQ_F_REFCOUNT));
Pavel Begunkov21c843d2021-08-11 19:28:27 +01001108 WARN_ON_ONCE(req_ref_put_and_test(req));
1109}
1110
1111static inline void req_ref_get(struct io_kiocb *req)
1112{
Pavel Begunkov20e60a32021-08-11 19:28:30 +01001113 WARN_ON_ONCE(!(req->flags & REQ_F_REFCOUNT));
Pavel Begunkov21c843d2021-08-11 19:28:27 +01001114 WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req));
1115 atomic_inc(&req->refs);
1116}
1117
Pavel Begunkov48dcd382021-08-15 10:40:18 +01001118static inline void __io_req_set_refcount(struct io_kiocb *req, int nr)
Pavel Begunkov20e60a32021-08-11 19:28:30 +01001119{
1120 if (!(req->flags & REQ_F_REFCOUNT)) {
1121 req->flags |= REQ_F_REFCOUNT;
Pavel Begunkov48dcd382021-08-15 10:40:18 +01001122 atomic_set(&req->refs, nr);
Pavel Begunkov20e60a32021-08-11 19:28:30 +01001123 }
1124}
1125
Pavel Begunkov48dcd382021-08-15 10:40:18 +01001126static inline void io_req_set_refcount(struct io_kiocb *req)
1127{
1128 __io_req_set_refcount(req, 1);
1129}
1130
Pavel Begunkovb895c9a2021-04-01 15:43:40 +01001131static inline void io_req_set_rsrc_node(struct io_kiocb *req)
Jens Axboec40f6372020-06-25 15:39:59 -06001132{
Pavel Begunkov36f72fe2020-11-18 19:57:26 +00001133 struct io_ring_ctx *ctx = req->ctx;
1134
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00001135 if (!req->fixed_rsrc_refs) {
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01001136 req->fixed_rsrc_refs = &ctx->rsrc_node->refs;
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00001137 percpu_ref_get(req->fixed_rsrc_refs);
Pavel Begunkov36f72fe2020-11-18 19:57:26 +00001138 }
1139}
1140
Pavel Begunkovf70865d2021-04-11 01:46:40 +01001141static void io_refs_resurrect(struct percpu_ref *ref, struct completion *compl)
1142{
1143 bool got = percpu_ref_tryget(ref);
1144
1145 /* already at zero, wait for ->release() */
1146 if (!got)
1147 wait_for_completion(compl);
1148 percpu_ref_resurrect(ref);
1149 if (got)
1150 percpu_ref_put(ref);
1151}
1152
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01001153static bool io_match_task(struct io_kiocb *head, struct task_struct *task,
1154 bool cancel_all)
Pavel Begunkov08d23632020-11-06 13:00:22 +00001155{
1156 struct io_kiocb *req;
1157
Pavel Begunkov68207682021-03-22 01:58:25 +00001158 if (task && head->task != task)
Pavel Begunkov08d23632020-11-06 13:00:22 +00001159 return false;
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01001160 if (cancel_all)
Pavel Begunkov08d23632020-11-06 13:00:22 +00001161 return true;
1162
1163 io_for_each_link(req, head) {
Pavel Begunkovb05a1bc2021-03-04 13:59:24 +00001164 if (req->flags & REQ_F_INFLIGHT)
Jens Axboe02a13672021-01-23 15:49:31 -07001165 return true;
Pavel Begunkov08d23632020-11-06 13:00:22 +00001166 }
1167 return false;
1168}
1169
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01001170static inline void req_set_fail(struct io_kiocb *req)
Jens Axboec40f6372020-06-25 15:39:59 -06001171{
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01001172 req->flags |= REQ_F_FAIL;
Jens Axboec40f6372020-06-25 15:39:59 -06001173}
Jens Axboe4a38aed22020-05-14 17:21:15 -06001174
Jens Axboe2b188cc2019-01-07 10:46:33 -07001175static void io_ring_ctx_ref_free(struct percpu_ref *ref)
1176{
1177 struct io_ring_ctx *ctx = container_of(ref, struct io_ring_ctx, refs);
1178
Jens Axboe0f158b42020-05-14 17:18:39 -06001179 complete(&ctx->ref_comp);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001180}
1181
Pavel Begunkov8eb7e2d2020-06-29 13:13:02 +03001182static inline bool io_is_timeout_noseq(struct io_kiocb *req)
1183{
1184 return !req->timeout.off;
1185}
1186
Pavel Begunkovf56165e2021-08-09 20:18:07 +01001187static void io_fallback_req_func(struct work_struct *work)
1188{
1189 struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx,
1190 fallback_work.work);
1191 struct llist_node *node = llist_del_all(&ctx->fallback_llist);
1192 struct io_kiocb *req, *tmp;
1193
1194 percpu_ref_get(&ctx->refs);
1195 llist_for_each_entry_safe(req, tmp, node, io_task_work.fallback_node)
1196 req->io_task_work.func(req);
1197 percpu_ref_put(&ctx->refs);
1198}
1199
Jens Axboe2b188cc2019-01-07 10:46:33 -07001200static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
1201{
1202 struct io_ring_ctx *ctx;
Jens Axboe78076bb2019-12-04 19:56:40 -07001203 int hash_bits;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001204
1205 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
1206 if (!ctx)
1207 return NULL;
1208
Jens Axboe78076bb2019-12-04 19:56:40 -07001209 /*
1210 * Use 5 bits less than the max cq entries, that should give us around
1211 * 32 entries per hash list if totally full and uniformly spread.
1212 */
1213 hash_bits = ilog2(p->cq_entries);
1214 hash_bits -= 5;
1215 if (hash_bits <= 0)
1216 hash_bits = 1;
1217 ctx->cancel_hash_bits = hash_bits;
1218 ctx->cancel_hash = kmalloc((1U << hash_bits) * sizeof(struct hlist_head),
1219 GFP_KERNEL);
1220 if (!ctx->cancel_hash)
1221 goto err;
1222 __hash_init(ctx->cancel_hash, 1U << hash_bits);
1223
Pavel Begunkov62248432021-04-28 13:11:29 +01001224 ctx->dummy_ubuf = kzalloc(sizeof(*ctx->dummy_ubuf), GFP_KERNEL);
1225 if (!ctx->dummy_ubuf)
1226 goto err;
1227 /* set invalid range, so io_import_fixed() fails meeting it */
1228 ctx->dummy_ubuf->ubuf = -1UL;
1229
Roman Gushchin21482892019-05-07 10:01:48 -07001230 if (percpu_ref_init(&ctx->refs, io_ring_ctx_ref_free,
Jens Axboe206aefd2019-11-07 18:27:42 -07001231 PERCPU_REF_ALLOW_REINIT, GFP_KERNEL))
1232 goto err;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001233
1234 ctx->flags = p->flags;
Jens Axboe90554202020-09-03 12:12:41 -06001235 init_waitqueue_head(&ctx->sqo_sq_wait);
Jens Axboe69fb2132020-09-14 11:16:23 -06001236 INIT_LIST_HEAD(&ctx->sqd_list);
Pavel Begunkov311997b2021-06-14 23:37:28 +01001237 init_waitqueue_head(&ctx->poll_wait);
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001238 INIT_LIST_HEAD(&ctx->cq_overflow_list);
Jens Axboe0f158b42020-05-14 17:18:39 -06001239 init_completion(&ctx->ref_comp);
Jens Axboe9e15c3a2021-03-13 12:29:43 -07001240 xa_init_flags(&ctx->io_buffers, XA_FLAGS_ALLOC1);
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +00001241 xa_init_flags(&ctx->personalities, XA_FLAGS_ALLOC1);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001242 mutex_init(&ctx->uring_lock);
Pavel Begunkov311997b2021-06-14 23:37:28 +01001243 init_waitqueue_head(&ctx->cq_wait);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001244 spin_lock_init(&ctx->completion_lock);
Jens Axboe89850fc2021-08-10 15:11:51 -06001245 spin_lock_init(&ctx->timeout_lock);
Pavel Begunkov540e32a2020-07-13 23:37:09 +03001246 INIT_LIST_HEAD(&ctx->iopoll_list);
Jens Axboede0617e2019-04-06 21:51:27 -06001247 INIT_LIST_HEAD(&ctx->defer_list);
Jens Axboe5262f562019-09-17 12:26:57 -06001248 INIT_LIST_HEAD(&ctx->timeout_list);
Bijan Mottahedehd67d2262021-01-15 17:37:46 +00001249 spin_lock_init(&ctx->rsrc_ref_lock);
1250 INIT_LIST_HEAD(&ctx->rsrc_ref_list);
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00001251 INIT_DELAYED_WORK(&ctx->rsrc_put_work, io_rsrc_put_work);
1252 init_llist_head(&ctx->rsrc_put_llist);
Pavel Begunkov13bf43f2021-03-06 11:02:12 +00001253 INIT_LIST_HEAD(&ctx->tctx_list);
Pavel Begunkovcd0ca2e2021-08-09 20:18:11 +01001254 INIT_LIST_HEAD(&ctx->submit_state.free_list);
Pavel Begunkovd0acdee2021-05-16 22:58:12 +01001255 INIT_LIST_HEAD(&ctx->locked_free_list);
Pavel Begunkov9011bf92021-06-30 21:54:03 +01001256 INIT_DELAYED_WORK(&ctx->fallback_work, io_fallback_req_func);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001257 return ctx;
Jens Axboe206aefd2019-11-07 18:27:42 -07001258err:
Pavel Begunkov62248432021-04-28 13:11:29 +01001259 kfree(ctx->dummy_ubuf);
Jens Axboe78076bb2019-12-04 19:56:40 -07001260 kfree(ctx->cancel_hash);
Jens Axboe206aefd2019-11-07 18:27:42 -07001261 kfree(ctx);
1262 return NULL;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001263}
1264
Pavel Begunkov8f6ed492021-05-16 22:58:10 +01001265static void io_account_cq_overflow(struct io_ring_ctx *ctx)
1266{
1267 struct io_rings *r = ctx->rings;
1268
1269 WRITE_ONCE(r->cq_overflow, READ_ONCE(r->cq_overflow) + 1);
1270 ctx->cq_extra--;
1271}
1272
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03001273static bool req_need_defer(struct io_kiocb *req, u32 seq)
Jens Axboede0617e2019-04-06 21:51:27 -06001274{
Jens Axboe2bc99302020-07-09 09:43:27 -06001275 if (unlikely(req->flags & REQ_F_IO_DRAIN)) {
1276 struct io_ring_ctx *ctx = req->ctx;
Jackie Liua197f662019-11-08 08:09:12 -07001277
Pavel Begunkov8f6ed492021-05-16 22:58:10 +01001278 return seq + READ_ONCE(ctx->cq_extra) != ctx->cached_cq_tail;
Jens Axboe2bc99302020-07-09 09:43:27 -06001279 }
Jens Axboe7adf4ea2019-10-10 21:42:58 -06001280
Bob Liu9d858b22019-11-13 18:06:25 +08001281 return false;
Jens Axboe7adf4ea2019-10-10 21:42:58 -06001282}
1283
Pavel Begunkovc97d8a02021-08-09 13:04:04 +01001284#define FFS_ASYNC_READ 0x1UL
1285#define FFS_ASYNC_WRITE 0x2UL
1286#ifdef CONFIG_64BIT
1287#define FFS_ISREG 0x4UL
1288#else
1289#define FFS_ISREG 0x0UL
1290#endif
1291#define FFS_MASK ~(FFS_ASYNC_READ|FFS_ASYNC_WRITE|FFS_ISREG)
1292
1293static inline bool io_req_ffs_set(struct io_kiocb *req)
1294{
1295 return IS_ENABLED(CONFIG_64BIT) && (req->flags & REQ_F_FIXED_FILE);
1296}
1297
Pavel Begunkovce3d5aa2021-02-01 18:59:55 +00001298static void io_req_track_inflight(struct io_kiocb *req)
1299{
Pavel Begunkovce3d5aa2021-02-01 18:59:55 +00001300 if (!(req->flags & REQ_F_INFLIGHT)) {
Pavel Begunkovce3d5aa2021-02-01 18:59:55 +00001301 req->flags |= REQ_F_INFLIGHT;
Pavel Begunkovb303fe22021-04-11 01:46:26 +01001302 atomic_inc(&current->io_uring->inflight_tracked);
Pavel Begunkovce3d5aa2021-02-01 18:59:55 +00001303 }
1304}
1305
Pavel Begunkovfd08e532021-08-11 19:28:31 +01001306static struct io_kiocb *__io_prep_linked_timeout(struct io_kiocb *req)
1307{
1308 struct io_kiocb *nxt = req->link;
1309
1310 if (req->flags & REQ_F_LINK_TIMEOUT)
1311 return NULL;
1312
1313 /* linked timeouts should have two refs once prep'ed */
Pavel Begunkov48dcd382021-08-15 10:40:18 +01001314 io_req_set_refcount(req);
Pavel Begunkovfb682092021-08-15 10:40:20 +01001315 __io_req_set_refcount(nxt, 2);
Pavel Begunkovfd08e532021-08-11 19:28:31 +01001316
1317 nxt->timeout.head = req;
1318 nxt->flags |= REQ_F_LTIMEOUT_ACTIVE;
1319 req->flags |= REQ_F_LINK_TIMEOUT;
1320 return nxt;
1321}
1322
1323static inline struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req)
1324{
1325 if (likely(!req->link || req->link->opcode != IORING_OP_LINK_TIMEOUT))
1326 return NULL;
1327 return __io_prep_linked_timeout(req);
1328}
1329
Pavel Begunkovcbdcb432020-06-29 19:18:43 +03001330static void io_prep_async_work(struct io_kiocb *req)
Jens Axboe561fb042019-10-24 07:25:42 -06001331{
Jens Axboed3656342019-12-18 09:50:26 -07001332 const struct io_op_def *def = &io_op_defs[req->opcode];
Pavel Begunkov23329512020-10-10 18:34:06 +01001333 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe54a91f32019-09-10 09:15:04 -06001334
Pavel Begunkovb8e64b52021-06-17 18:14:02 +01001335 if (!(req->flags & REQ_F_CREDS)) {
1336 req->flags |= REQ_F_CREDS;
Pavel Begunkovc10d1f92021-06-17 18:14:01 +01001337 req->creds = get_current_cred();
Pavel Begunkovb8e64b52021-06-17 18:14:02 +01001338 }
Jens Axboe003e8dc2021-03-06 09:22:27 -07001339
Pavel Begunkove1d675d2021-03-22 01:58:29 +00001340 req->work.list.next = NULL;
1341 req->work.flags = 0;
Pavel Begunkovfeaadc42020-10-22 16:47:16 +01001342 if (req->flags & REQ_F_FORCE_ASYNC)
1343 req->work.flags |= IO_WQ_WORK_CONCURRENT;
1344
Jens Axboed3656342019-12-18 09:50:26 -07001345 if (req->flags & REQ_F_ISREG) {
Pavel Begunkov23329512020-10-10 18:34:06 +01001346 if (def->hash_reg_file || (ctx->flags & IORING_SETUP_IOPOLL))
Pavel Begunkov8766dd52020-03-14 00:31:04 +03001347 io_wq_hash_work(&req->work, file_inode(req->file));
Jens Axboe4b982bd2021-04-01 08:38:34 -06001348 } else if (!req->file || !S_ISBLK(file_inode(req->file)->i_mode)) {
Jens Axboed3656342019-12-18 09:50:26 -07001349 if (def->unbound_nonreg_file)
Jens Axboe3529d8c2019-12-19 18:24:38 -07001350 req->work.flags |= IO_WQ_WORK_UNBOUND;
Jens Axboe54a91f32019-09-10 09:15:04 -06001351 }
Pavel Begunkove1d675d2021-03-22 01:58:29 +00001352
1353 switch (req->opcode) {
1354 case IORING_OP_SPLICE:
1355 case IORING_OP_TEE:
Pavel Begunkove1d675d2021-03-22 01:58:29 +00001356 if (!S_ISREG(file_inode(req->splice.file_in)->i_mode))
1357 req->work.flags |= IO_WQ_WORK_UNBOUND;
1358 break;
1359 }
Jens Axboe561fb042019-10-24 07:25:42 -06001360}
1361
Pavel Begunkovcbdcb432020-06-29 19:18:43 +03001362static void io_prep_async_link(struct io_kiocb *req)
1363{
1364 struct io_kiocb *cur;
1365
Pavel Begunkov44eff402021-07-26 14:14:31 +01001366 if (req->flags & REQ_F_LINK_TIMEOUT) {
1367 struct io_ring_ctx *ctx = req->ctx;
1368
Jens Axboe79ebeae2021-08-10 15:18:27 -06001369 spin_lock(&ctx->completion_lock);
Pavel Begunkov44eff402021-07-26 14:14:31 +01001370 io_for_each_link(cur, req)
1371 io_prep_async_work(cur);
Jens Axboe79ebeae2021-08-10 15:18:27 -06001372 spin_unlock(&ctx->completion_lock);
Pavel Begunkov44eff402021-07-26 14:14:31 +01001373 } else {
1374 io_for_each_link(cur, req)
1375 io_prep_async_work(cur);
1376 }
Pavel Begunkovcbdcb432020-06-29 19:18:43 +03001377}
1378
Pavel Begunkovebf93662021-03-01 18:20:47 +00001379static void io_queue_async_work(struct io_kiocb *req)
Jens Axboe561fb042019-10-24 07:25:42 -06001380{
Jackie Liua197f662019-11-08 08:09:12 -07001381 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkovcbdcb432020-06-29 19:18:43 +03001382 struct io_kiocb *link = io_prep_linked_timeout(req);
Jens Axboe5aa75ed2021-02-16 12:56:50 -07001383 struct io_uring_task *tctx = req->task->io_uring;
Jens Axboe561fb042019-10-24 07:25:42 -06001384
Jens Axboe3bfe6102021-02-16 14:15:30 -07001385 BUG_ON(!tctx);
1386 BUG_ON(!tctx->io_wq);
Jens Axboe561fb042019-10-24 07:25:42 -06001387
Pavel Begunkovcbdcb432020-06-29 19:18:43 +03001388 /* init ->work of the whole link before punting */
1389 io_prep_async_link(req);
Jens Axboe991468d2021-07-23 11:53:54 -06001390
1391 /*
1392 * Not expected to happen, but if we do have a bug where this _can_
1393 * happen, catch it here and ensure the request is marked as
1394 * canceled. That will make io-wq go through the usual work cancel
1395 * procedure rather than attempt to run this request (or create a new
1396 * worker for it).
1397 */
1398 if (WARN_ON_ONCE(!same_thread_group(req->task, current)))
1399 req->work.flags |= IO_WQ_WORK_CANCEL;
1400
Pavel Begunkovd07f1e8a2021-03-22 01:45:58 +00001401 trace_io_uring_queue_async_work(ctx, io_wq_is_hashed(&req->work), req,
1402 &req->work, req->flags);
Pavel Begunkovebf93662021-03-01 18:20:47 +00001403 io_wq_enqueue(tctx->io_wq, &req->work);
Jens Axboe7271ef32020-08-10 09:55:22 -06001404 if (link)
1405 io_queue_linked_timeout(link);
Pavel Begunkovcbdcb432020-06-29 19:18:43 +03001406}
1407
Pavel Begunkov1ee41602021-03-25 18:32:42 +00001408static void io_kill_timeout(struct io_kiocb *req, int status)
Pavel Begunkov8c855882021-04-13 02:58:41 +01001409 __must_hold(&req->ctx->completion_lock)
Jens Axboe89850fc2021-08-10 15:11:51 -06001410 __must_hold(&req->ctx->timeout_lock)
Jens Axboe5262f562019-09-17 12:26:57 -06001411{
Jens Axboee8c2bc12020-08-15 18:44:09 -07001412 struct io_timeout_data *io = req->async_data;
Jens Axboe5262f562019-09-17 12:26:57 -06001413
Pavel Begunkovfd9c7bc2021-04-13 02:58:42 +01001414 if (hrtimer_try_to_cancel(&io->timer) != -1) {
Pavel Begunkov01cec8c2020-07-30 18:43:50 +03001415 atomic_set(&req->ctx->cq_timeouts,
1416 atomic_read(&req->ctx->cq_timeouts) + 1);
Pavel Begunkov135fcde2020-07-13 23:37:12 +03001417 list_del_init(&req->timeout.list);
Pavel Begunkovd4d19c12021-04-25 14:32:17 +01001418 io_cqring_fill_event(req->ctx, req->user_data, status, 0);
Pavel Begunkov91c2f692021-08-11 19:28:28 +01001419 io_put_req_deferred(req);
Jens Axboe5262f562019-09-17 12:26:57 -06001420 }
1421}
1422
Pavel Begunkov441b8a72021-06-14 23:37:31 +01001423static void io_queue_deferred(struct io_ring_ctx *ctx)
Pavel Begunkov04518942020-05-26 20:34:05 +03001424{
Pavel Begunkov441b8a72021-06-14 23:37:31 +01001425 while (!list_empty(&ctx->defer_list)) {
Pavel Begunkov27dc8332020-07-13 23:37:14 +03001426 struct io_defer_entry *de = list_first_entry(&ctx->defer_list,
1427 struct io_defer_entry, list);
Pavel Begunkov04518942020-05-26 20:34:05 +03001428
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03001429 if (req_need_defer(de->req, de->seq))
Pavel Begunkov04518942020-05-26 20:34:05 +03001430 break;
Pavel Begunkov27dc8332020-07-13 23:37:14 +03001431 list_del_init(&de->list);
Pavel Begunkov907d1df2021-01-26 23:35:10 +00001432 io_req_task_queue(de->req);
Pavel Begunkov27dc8332020-07-13 23:37:14 +03001433 kfree(de);
Pavel Begunkov441b8a72021-06-14 23:37:31 +01001434 }
Pavel Begunkov04518942020-05-26 20:34:05 +03001435}
1436
Pavel Begunkov360428f2020-05-30 14:54:17 +03001437static void io_flush_timeouts(struct io_ring_ctx *ctx)
Jens Axboe89850fc2021-08-10 15:11:51 -06001438 __must_hold(&ctx->completion_lock)
Pavel Begunkov360428f2020-05-30 14:54:17 +03001439{
Pavel Begunkov441b8a72021-06-14 23:37:31 +01001440 u32 seq = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
Marcelo Diop-Gonzalezf0105052021-01-15 11:54:40 -05001441
Jens Axboe79ebeae2021-08-10 15:18:27 -06001442 spin_lock_irq(&ctx->timeout_lock);
Pavel Begunkovf18ee4c2021-06-14 23:37:25 +01001443 while (!list_empty(&ctx->timeout_list)) {
Marcelo Diop-Gonzalezf0105052021-01-15 11:54:40 -05001444 u32 events_needed, events_got;
Pavel Begunkov360428f2020-05-30 14:54:17 +03001445 struct io_kiocb *req = list_first_entry(&ctx->timeout_list,
Pavel Begunkov135fcde2020-07-13 23:37:12 +03001446 struct io_kiocb, timeout.list);
Pavel Begunkov360428f2020-05-30 14:54:17 +03001447
Pavel Begunkov8eb7e2d2020-06-29 13:13:02 +03001448 if (io_is_timeout_noseq(req))
Pavel Begunkov360428f2020-05-30 14:54:17 +03001449 break;
Marcelo Diop-Gonzalezf0105052021-01-15 11:54:40 -05001450
1451 /*
1452 * Since seq can easily wrap around over time, subtract
1453 * the last seq at which timeouts were flushed before comparing.
1454 * Assuming not more than 2^31-1 events have happened since,
1455 * these subtractions won't have wrapped, so we can check if
1456 * target is in [last_seq, current_seq] by comparing the two.
1457 */
1458 events_needed = req->timeout.target_seq - ctx->cq_last_tm_flush;
1459 events_got = seq - ctx->cq_last_tm_flush;
1460 if (events_got < events_needed)
Pavel Begunkov360428f2020-05-30 14:54:17 +03001461 break;
Pavel Begunkovbfe68a22020-05-30 14:54:18 +03001462
Pavel Begunkov135fcde2020-07-13 23:37:12 +03001463 list_del_init(&req->timeout.list);
Pavel Begunkov1ee41602021-03-25 18:32:42 +00001464 io_kill_timeout(req, 0);
Pavel Begunkovf18ee4c2021-06-14 23:37:25 +01001465 }
Marcelo Diop-Gonzalezf0105052021-01-15 11:54:40 -05001466 ctx->cq_last_tm_flush = seq;
Jens Axboe79ebeae2021-08-10 15:18:27 -06001467 spin_unlock_irq(&ctx->timeout_lock);
Pavel Begunkov360428f2020-05-30 14:54:17 +03001468}
1469
Pavel Begunkov2335f6f2021-06-15 16:47:58 +01001470static void __io_commit_cqring_flush(struct io_ring_ctx *ctx)
Jens Axboede0617e2019-04-06 21:51:27 -06001471{
Pavel Begunkov2335f6f2021-06-15 16:47:58 +01001472 if (ctx->off_timeout_used)
1473 io_flush_timeouts(ctx);
1474 if (ctx->drain_active)
1475 io_queue_deferred(ctx);
1476}
1477
1478static inline void io_commit_cqring(struct io_ring_ctx *ctx)
1479{
1480 if (unlikely(ctx->off_timeout_used || ctx->drain_active))
1481 __io_commit_cqring_flush(ctx);
Pavel Begunkovec30e042021-01-19 13:32:38 +00001482 /* order cqe stores with ring update */
1483 smp_store_release(&ctx->rings->cq.tail, ctx->cached_cq_tail);
Jens Axboede0617e2019-04-06 21:51:27 -06001484}
1485
Jens Axboe90554202020-09-03 12:12:41 -06001486static inline bool io_sqring_full(struct io_ring_ctx *ctx)
1487{
1488 struct io_rings *r = ctx->rings;
1489
Pavel Begunkova566c552021-05-16 22:58:08 +01001490 return READ_ONCE(r->sq.tail) - ctx->cached_sq_head == ctx->sq_entries;
Jens Axboe90554202020-09-03 12:12:41 -06001491}
1492
Pavel Begunkov888aae22021-01-19 13:32:39 +00001493static inline unsigned int __io_cqring_events(struct io_ring_ctx *ctx)
1494{
1495 return ctx->cached_cq_tail - READ_ONCE(ctx->rings->cq.head);
1496}
1497
Pavel Begunkovd068b502021-05-16 22:58:11 +01001498static inline struct io_uring_cqe *io_get_cqe(struct io_ring_ctx *ctx)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001499{
Hristo Venev75b28af2019-08-26 17:23:46 +00001500 struct io_rings *rings = ctx->rings;
Pavel Begunkovea5ab3b2021-05-16 22:58:09 +01001501 unsigned tail, mask = ctx->cq_entries - 1;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001502
Stefan Bühler115e12e2019-04-24 23:54:18 +02001503 /*
1504 * writes to the cq entry need to come after reading head; the
1505 * control dependency is enough as we're using WRITE_ONCE to
1506 * fill the cq entry
1507 */
Pavel Begunkova566c552021-05-16 22:58:08 +01001508 if (__io_cqring_events(ctx) == ctx->cq_entries)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001509 return NULL;
1510
Pavel Begunkov888aae22021-01-19 13:32:39 +00001511 tail = ctx->cached_cq_tail++;
Pavel Begunkovea5ab3b2021-05-16 22:58:09 +01001512 return &rings->cqes[tail & mask];
Jens Axboe2b188cc2019-01-07 10:46:33 -07001513}
1514
Jens Axboef2842ab2020-01-08 11:04:00 -07001515static inline bool io_should_trigger_evfd(struct io_ring_ctx *ctx)
1516{
Pavel Begunkov44c769d2021-04-11 01:46:31 +01001517 if (likely(!ctx->cq_ev_fd))
Jens Axboef0b493e2020-02-01 21:30:11 -07001518 return false;
Stefano Garzarella7e55a192020-05-15 18:38:05 +02001519 if (READ_ONCE(ctx->rings->cq_flags) & IORING_CQ_EVENTFD_DISABLED)
1520 return false;
Pavel Begunkov44c769d2021-04-11 01:46:31 +01001521 return !ctx->eventfd_async || io_wq_current_is_worker();
Jens Axboef2842ab2020-01-08 11:04:00 -07001522}
1523
Jens Axboeb41e9852020-02-17 09:52:41 -07001524static void io_cqring_ev_posted(struct io_ring_ctx *ctx)
Jens Axboe8c838782019-03-12 15:48:16 -06001525{
Jens Axboe5fd46172021-08-06 14:04:31 -06001526 /*
1527 * wake_up_all() may seem excessive, but io_wake_function() and
1528 * io_should_wake() handle the termination of the loop and only
1529 * wake as many waiters as we need to.
1530 */
1531 if (wq_has_sleeper(&ctx->cq_wait))
1532 wake_up_all(&ctx->cq_wait);
Jens Axboe534ca6d2020-09-02 13:52:19 -06001533 if (ctx->sq_data && waitqueue_active(&ctx->sq_data->wait))
1534 wake_up(&ctx->sq_data->wait);
Jens Axboeb41e9852020-02-17 09:52:41 -07001535 if (io_should_trigger_evfd(ctx))
Jens Axboe9b402842019-04-11 11:45:41 -06001536 eventfd_signal(ctx->cq_ev_fd, 1);
Pavel Begunkov311997b2021-06-14 23:37:28 +01001537 if (waitqueue_active(&ctx->poll_wait)) {
1538 wake_up_interruptible(&ctx->poll_wait);
Pavel Begunkov4aa84f22021-01-07 03:15:42 +00001539 kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN);
1540 }
Jens Axboe8c838782019-03-12 15:48:16 -06001541}
1542
Pavel Begunkov80c18e42021-01-07 03:15:41 +00001543static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx)
1544{
1545 if (ctx->flags & IORING_SETUP_SQPOLL) {
Jens Axboe5fd46172021-08-06 14:04:31 -06001546 if (wq_has_sleeper(&ctx->cq_wait))
1547 wake_up_all(&ctx->cq_wait);
Pavel Begunkov80c18e42021-01-07 03:15:41 +00001548 }
1549 if (io_should_trigger_evfd(ctx))
1550 eventfd_signal(ctx->cq_ev_fd, 1);
Pavel Begunkov311997b2021-06-14 23:37:28 +01001551 if (waitqueue_active(&ctx->poll_wait)) {
1552 wake_up_interruptible(&ctx->poll_wait);
Pavel Begunkov4aa84f22021-01-07 03:15:42 +00001553 kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN);
1554 }
Pavel Begunkov80c18e42021-01-07 03:15:41 +00001555}
1556
Jens Axboec4a2ed72019-11-21 21:01:26 -07001557/* Returns true if there are no backlogged entries after the flush */
Pavel Begunkov6c2450a2021-02-23 12:40:22 +00001558static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001559{
Jens Axboeb18032b2021-01-24 16:58:56 -07001560 bool all_flushed, posted;
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001561
Pavel Begunkova566c552021-05-16 22:58:08 +01001562 if (!force && __io_cqring_events(ctx) == ctx->cq_entries)
Pavel Begunkove23de152020-12-17 00:24:37 +00001563 return false;
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001564
Jens Axboeb18032b2021-01-24 16:58:56 -07001565 posted = false;
Jens Axboe79ebeae2021-08-10 15:18:27 -06001566 spin_lock(&ctx->completion_lock);
Pavel Begunkov6c2450a2021-02-23 12:40:22 +00001567 while (!list_empty(&ctx->cq_overflow_list)) {
Pavel Begunkovd068b502021-05-16 22:58:11 +01001568 struct io_uring_cqe *cqe = io_get_cqe(ctx);
Pavel Begunkov6c2450a2021-02-23 12:40:22 +00001569 struct io_overflow_cqe *ocqe;
Jens Axboee6c8aa92020-09-28 13:10:13 -06001570
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001571 if (!cqe && !force)
1572 break;
Pavel Begunkov6c2450a2021-02-23 12:40:22 +00001573 ocqe = list_first_entry(&ctx->cq_overflow_list,
1574 struct io_overflow_cqe, list);
1575 if (cqe)
1576 memcpy(cqe, &ocqe->cqe, sizeof(*cqe));
1577 else
Pavel Begunkov8f6ed492021-05-16 22:58:10 +01001578 io_account_cq_overflow(ctx);
1579
Jens Axboeb18032b2021-01-24 16:58:56 -07001580 posted = true;
Pavel Begunkov6c2450a2021-02-23 12:40:22 +00001581 list_del(&ocqe->list);
1582 kfree(ocqe);
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001583 }
1584
Pavel Begunkov09e88402020-12-17 00:24:38 +00001585 all_flushed = list_empty(&ctx->cq_overflow_list);
1586 if (all_flushed) {
Pavel Begunkov5ed7a372021-06-14 23:37:27 +01001587 clear_bit(0, &ctx->check_cq_overflow);
Nadav Amit20c0b382021-08-07 17:13:42 -07001588 WRITE_ONCE(ctx->rings->sq_flags,
1589 ctx->rings->sq_flags & ~IORING_SQ_CQ_OVERFLOW);
Pavel Begunkov09e88402020-12-17 00:24:38 +00001590 }
Pavel Begunkov46930142020-07-30 18:43:49 +03001591
Jens Axboeb18032b2021-01-24 16:58:56 -07001592 if (posted)
1593 io_commit_cqring(ctx);
Jens Axboe79ebeae2021-08-10 15:18:27 -06001594 spin_unlock(&ctx->completion_lock);
Jens Axboeb18032b2021-01-24 16:58:56 -07001595 if (posted)
1596 io_cqring_ev_posted(ctx);
Pavel Begunkov09e88402020-12-17 00:24:38 +00001597 return all_flushed;
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001598}
1599
Pavel Begunkov90f67362021-08-09 20:18:12 +01001600static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx)
Pavel Begunkov6c503152021-01-04 20:36:36 +00001601{
Jens Axboeca0a2652021-03-04 17:15:48 -07001602 bool ret = true;
1603
Pavel Begunkov5ed7a372021-06-14 23:37:27 +01001604 if (test_bit(0, &ctx->check_cq_overflow)) {
Pavel Begunkov6c503152021-01-04 20:36:36 +00001605 /* iopoll syncs against uring_lock, not completion_lock */
1606 if (ctx->flags & IORING_SETUP_IOPOLL)
1607 mutex_lock(&ctx->uring_lock);
Pavel Begunkov90f67362021-08-09 20:18:12 +01001608 ret = __io_cqring_overflow_flush(ctx, false);
Pavel Begunkov6c503152021-01-04 20:36:36 +00001609 if (ctx->flags & IORING_SETUP_IOPOLL)
1610 mutex_unlock(&ctx->uring_lock);
1611 }
Jens Axboeca0a2652021-03-04 17:15:48 -07001612
1613 return ret;
Pavel Begunkov6c503152021-01-04 20:36:36 +00001614}
1615
Pavel Begunkov6a290a12021-08-09 13:04:13 +01001616/* must to be called somewhat shortly after putting a request */
1617static inline void io_put_task(struct task_struct *task, int nr)
1618{
1619 struct io_uring_task *tctx = task->io_uring;
1620
1621 percpu_counter_sub(&tctx->inflight, nr);
1622 if (unlikely(atomic_read(&tctx->in_idle)))
1623 wake_up(&tctx->wait);
1624 put_task_struct_many(task, nr);
1625}
1626
Pavel Begunkovd4d19c12021-04-25 14:32:17 +01001627static bool io_cqring_event_overflow(struct io_ring_ctx *ctx, u64 user_data,
1628 long res, unsigned int cflags)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001629{
Pavel Begunkovcce4b8b2021-04-13 02:58:44 +01001630 struct io_overflow_cqe *ocqe;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001631
Pavel Begunkovcce4b8b2021-04-13 02:58:44 +01001632 ocqe = kmalloc(sizeof(*ocqe), GFP_ATOMIC | __GFP_ACCOUNT);
1633 if (!ocqe) {
1634 /*
1635 * If we're in ring overflow flush mode, or in task cancel mode,
1636 * or cannot allocate an overflow entry, then we need to drop it
1637 * on the floor.
1638 */
Pavel Begunkov8f6ed492021-05-16 22:58:10 +01001639 io_account_cq_overflow(ctx);
Pavel Begunkovcce4b8b2021-04-13 02:58:44 +01001640 return false;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001641 }
Pavel Begunkovcce4b8b2021-04-13 02:58:44 +01001642 if (list_empty(&ctx->cq_overflow_list)) {
Pavel Begunkov5ed7a372021-06-14 23:37:27 +01001643 set_bit(0, &ctx->check_cq_overflow);
Nadav Amit20c0b382021-08-07 17:13:42 -07001644 WRITE_ONCE(ctx->rings->sq_flags,
1645 ctx->rings->sq_flags | IORING_SQ_CQ_OVERFLOW);
1646
Pavel Begunkovcce4b8b2021-04-13 02:58:44 +01001647 }
Pavel Begunkovd4d19c12021-04-25 14:32:17 +01001648 ocqe->cqe.user_data = user_data;
Pavel Begunkovcce4b8b2021-04-13 02:58:44 +01001649 ocqe->cqe.res = res;
1650 ocqe->cqe.flags = cflags;
1651 list_add_tail(&ocqe->list, &ctx->cq_overflow_list);
1652 return true;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001653}
1654
Pavel Begunkovd4d19c12021-04-25 14:32:17 +01001655static inline bool __io_cqring_fill_event(struct io_ring_ctx *ctx, u64 user_data,
1656 long res, unsigned int cflags)
Pavel Begunkov8d133262021-04-11 01:46:33 +01001657{
Jens Axboe2b188cc2019-01-07 10:46:33 -07001658 struct io_uring_cqe *cqe;
1659
Pavel Begunkovd4d19c12021-04-25 14:32:17 +01001660 trace_io_uring_complete(ctx, user_data, res, cflags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001661
1662 /*
1663 * If we can't get a cq entry, userspace overflowed the
1664 * submission (by quite a lot). Increment the overflow count in
1665 * the ring.
1666 */
Pavel Begunkovd068b502021-05-16 22:58:11 +01001667 cqe = io_get_cqe(ctx);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001668 if (likely(cqe)) {
Pavel Begunkovd4d19c12021-04-25 14:32:17 +01001669 WRITE_ONCE(cqe->user_data, user_data);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001670 WRITE_ONCE(cqe->res, res);
1671 WRITE_ONCE(cqe->flags, cflags);
Pavel Begunkov8d133262021-04-11 01:46:33 +01001672 return true;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001673 }
Pavel Begunkovd4d19c12021-04-25 14:32:17 +01001674 return io_cqring_event_overflow(ctx, user_data, res, cflags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001675}
1676
Pavel Begunkov8d133262021-04-11 01:46:33 +01001677/* not as hot to bloat with inlining */
Pavel Begunkovd4d19c12021-04-25 14:32:17 +01001678static noinline bool io_cqring_fill_event(struct io_ring_ctx *ctx, u64 user_data,
1679 long res, unsigned int cflags)
Jens Axboebcda7ba2020-02-23 16:42:51 -07001680{
Pavel Begunkovd4d19c12021-04-25 14:32:17 +01001681 return __io_cqring_fill_event(ctx, user_data, res, cflags);
Jens Axboebcda7ba2020-02-23 16:42:51 -07001682}
1683
Pavel Begunkov7a612352021-03-09 00:37:59 +00001684static void io_req_complete_post(struct io_kiocb *req, long res,
1685 unsigned int cflags)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001686{
Jens Axboe78e19bb2019-11-06 15:21:34 -07001687 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001688
Jens Axboe79ebeae2021-08-10 15:18:27 -06001689 spin_lock(&ctx->completion_lock);
Pavel Begunkovd4d19c12021-04-25 14:32:17 +01001690 __io_cqring_fill_event(ctx, req->user_data, res, cflags);
Jens Axboec7dae4b2021-02-09 19:53:37 -07001691 /*
1692 * If we're the last reference to this request, add to our locked
1693 * free_list cache.
1694 */
Jens Axboede9b4cc2021-02-24 13:28:27 -07001695 if (req_ref_put_and_test(req)) {
Pavel Begunkov7a612352021-03-09 00:37:59 +00001696 if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) {
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01001697 if (req->flags & (REQ_F_LINK_TIMEOUT | REQ_F_FAIL))
Pavel Begunkov7a612352021-03-09 00:37:59 +00001698 io_disarm_next(req);
1699 if (req->link) {
1700 io_req_task_queue(req->link);
1701 req->link = NULL;
1702 }
1703 }
Jens Axboec7dae4b2021-02-09 19:53:37 -07001704 io_dismantle_req(req);
1705 io_put_task(req->task, 1);
Pavel Begunkovbb943b82021-08-09 20:18:10 +01001706 list_add(&req->inflight_entry, &ctx->locked_free_list);
Pavel Begunkovd0acdee2021-05-16 22:58:12 +01001707 ctx->locked_free_nr++;
Pavel Begunkov180f8292021-03-14 20:57:09 +00001708 } else {
1709 if (!percpu_ref_tryget(&ctx->refs))
1710 req = NULL;
1711 }
Pavel Begunkov7a612352021-03-09 00:37:59 +00001712 io_commit_cqring(ctx);
Jens Axboe79ebeae2021-08-10 15:18:27 -06001713 spin_unlock(&ctx->completion_lock);
Pavel Begunkov7a612352021-03-09 00:37:59 +00001714
Pavel Begunkov180f8292021-03-14 20:57:09 +00001715 if (req) {
1716 io_cqring_ev_posted(ctx);
Jens Axboec7dae4b2021-02-09 19:53:37 -07001717 percpu_ref_put(&ctx->refs);
Pavel Begunkov180f8292021-03-14 20:57:09 +00001718 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07001719}
1720
Jens Axboe4e3d9ff2021-04-15 17:44:34 -06001721static inline bool io_req_needs_clean(struct io_kiocb *req)
1722{
Pavel Begunkovc8543572021-06-17 18:14:04 +01001723 return req->flags & IO_REQ_CLEAN_FLAGS;
Jens Axboe4e3d9ff2021-04-15 17:44:34 -06001724}
1725
Pavel Begunkova38d68d2021-01-19 13:32:45 +00001726static void io_req_complete_state(struct io_kiocb *req, long res,
Pavel Begunkov889fca72021-02-10 00:03:09 +00001727 unsigned int cflags)
Jens Axboebcda7ba2020-02-23 16:42:51 -07001728{
Jens Axboe4e3d9ff2021-04-15 17:44:34 -06001729 if (io_req_needs_clean(req))
Pavel Begunkov68fb8972021-03-19 17:22:41 +00001730 io_clean_op(req);
Pavel Begunkova38d68d2021-01-19 13:32:45 +00001731 req->result = res;
1732 req->compl.cflags = cflags;
Pavel Begunkove342c802021-01-19 13:32:47 +00001733 req->flags |= REQ_F_COMPLETE_INLINE;
Jens Axboee1e16092020-06-22 09:17:17 -06001734}
Jens Axboe2b188cc2019-01-07 10:46:33 -07001735
Pavel Begunkov889fca72021-02-10 00:03:09 +00001736static inline void __io_req_complete(struct io_kiocb *req, unsigned issue_flags,
1737 long res, unsigned cflags)
Pavel Begunkova38d68d2021-01-19 13:32:45 +00001738{
Pavel Begunkov889fca72021-02-10 00:03:09 +00001739 if (issue_flags & IO_URING_F_COMPLETE_DEFER)
1740 io_req_complete_state(req, res, cflags);
Pavel Begunkova38d68d2021-01-19 13:32:45 +00001741 else
Jens Axboec7dae4b2021-02-09 19:53:37 -07001742 io_req_complete_post(req, res, cflags);
Pavel Begunkova38d68d2021-01-19 13:32:45 +00001743}
Jens Axboebcda7ba2020-02-23 16:42:51 -07001744
Pavel Begunkova38d68d2021-01-19 13:32:45 +00001745static inline void io_req_complete(struct io_kiocb *req, long res)
Jens Axboee1e16092020-06-22 09:17:17 -06001746{
Pavel Begunkov889fca72021-02-10 00:03:09 +00001747 __io_req_complete(req, 0, res, 0);
Jens Axboebcda7ba2020-02-23 16:42:51 -07001748}
1749
Pavel Begunkovf41db2732021-02-28 22:35:12 +00001750static void io_req_complete_failed(struct io_kiocb *req, long res)
1751{
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01001752 req_set_fail(req);
Pavel Begunkovf41db2732021-02-28 22:35:12 +00001753 io_req_complete_post(req, res, 0);
1754}
1755
Pavel Begunkov864ea922021-08-09 13:04:08 +01001756/*
1757 * Don't initialise the fields below on every allocation, but do that in
1758 * advance and keep them valid across allocations.
1759 */
1760static void io_preinit_req(struct io_kiocb *req, struct io_ring_ctx *ctx)
1761{
1762 req->ctx = ctx;
1763 req->link = NULL;
1764 req->async_data = NULL;
1765 /* not necessary, but safer to zero */
1766 req->result = 0;
1767}
1768
Pavel Begunkovdac7a092021-03-19 17:22:39 +00001769static void io_flush_cached_locked_reqs(struct io_ring_ctx *ctx,
Pavel Begunkovcd0ca2e2021-08-09 20:18:11 +01001770 struct io_submit_state *state)
Pavel Begunkovdac7a092021-03-19 17:22:39 +00001771{
Jens Axboe79ebeae2021-08-10 15:18:27 -06001772 spin_lock(&ctx->completion_lock);
Pavel Begunkovcd0ca2e2021-08-09 20:18:11 +01001773 list_splice_init(&ctx->locked_free_list, &state->free_list);
Pavel Begunkovd0acdee2021-05-16 22:58:12 +01001774 ctx->locked_free_nr = 0;
Jens Axboe79ebeae2021-08-10 15:18:27 -06001775 spin_unlock(&ctx->completion_lock);
Pavel Begunkovdac7a092021-03-19 17:22:39 +00001776}
1777
Pavel Begunkovdd78f492021-03-19 17:22:35 +00001778/* Returns true IFF there are requests in the cache */
Jens Axboec7dae4b2021-02-09 19:53:37 -07001779static bool io_flush_cached_reqs(struct io_ring_ctx *ctx)
Pavel Begunkove5d1bc02021-02-10 00:03:23 +00001780{
Jens Axboec7dae4b2021-02-09 19:53:37 -07001781 struct io_submit_state *state = &ctx->submit_state;
Pavel Begunkovdd78f492021-03-19 17:22:35 +00001782 int nr;
Pavel Begunkove5d1bc02021-02-10 00:03:23 +00001783
Jens Axboec7dae4b2021-02-09 19:53:37 -07001784 /*
1785 * If we have more than a batch's worth of requests in our IRQ side
1786 * locked cache, grab the lock and move them over to our submission
1787 * side cache.
1788 */
Pavel Begunkovd0acdee2021-05-16 22:58:12 +01001789 if (READ_ONCE(ctx->locked_free_nr) > IO_COMPL_BATCH)
Pavel Begunkovcd0ca2e2021-08-09 20:18:11 +01001790 io_flush_cached_locked_reqs(ctx, state);
Jens Axboec7dae4b2021-02-09 19:53:37 -07001791
Pavel Begunkovdd78f492021-03-19 17:22:35 +00001792 nr = state->free_reqs;
Pavel Begunkovcd0ca2e2021-08-09 20:18:11 +01001793 while (!list_empty(&state->free_list)) {
1794 struct io_kiocb *req = list_first_entry(&state->free_list,
Pavel Begunkovbb943b82021-08-09 20:18:10 +01001795 struct io_kiocb, inflight_entry);
Pavel Begunkovdd78f492021-03-19 17:22:35 +00001796
Pavel Begunkovbb943b82021-08-09 20:18:10 +01001797 list_del(&req->inflight_entry);
Pavel Begunkovdd78f492021-03-19 17:22:35 +00001798 state->reqs[nr++] = req;
1799 if (nr == ARRAY_SIZE(state->reqs))
Pavel Begunkove5d1bc02021-02-10 00:03:23 +00001800 break;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001801 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07001802
Pavel Begunkovdd78f492021-03-19 17:22:35 +00001803 state->free_reqs = nr;
1804 return nr != 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001805}
1806
Pavel Begunkov5d5901a2021-08-11 19:28:29 +01001807/*
1808 * A request might get retired back into the request caches even before opcode
1809 * handlers and io_issue_sqe() are done with it, e.g. inline completion path.
1810 * Because of that, io_alloc_req() should be called only under ->uring_lock
1811 * and with extra caution to not get a request that is still worked on.
1812 */
Pavel Begunkov258b29a2021-02-10 00:03:10 +00001813static struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx)
Pavel Begunkov5d5901a2021-08-11 19:28:29 +01001814 __must_hold(&ctx->uring_lock)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001815{
Pavel Begunkov258b29a2021-02-10 00:03:10 +00001816 struct io_submit_state *state = &ctx->submit_state;
Pavel Begunkov864ea922021-08-09 13:04:08 +01001817 gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
1818 int ret, i;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001819
Pavel Begunkovfe7e3252021-06-24 15:09:57 +01001820 BUILD_BUG_ON(ARRAY_SIZE(state->reqs) < IO_REQ_ALLOC_BATCH);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001821
Pavel Begunkov864ea922021-08-09 13:04:08 +01001822 if (likely(state->free_reqs || io_flush_cached_reqs(ctx)))
1823 goto got_req;
Jens Axboe2579f912019-01-09 09:10:43 -07001824
Pavel Begunkov864ea922021-08-09 13:04:08 +01001825 ret = kmem_cache_alloc_bulk(req_cachep, gfp, IO_REQ_ALLOC_BATCH,
1826 state->reqs);
Pavel Begunkove5d1bc02021-02-10 00:03:23 +00001827
Pavel Begunkov864ea922021-08-09 13:04:08 +01001828 /*
1829 * Bulk alloc is all-or-nothing. If we fail to get a batch,
1830 * retry single alloc to be on the safe side.
1831 */
1832 if (unlikely(ret <= 0)) {
1833 state->reqs[0] = kmem_cache_alloc(req_cachep, gfp);
1834 if (!state->reqs[0])
1835 return NULL;
1836 ret = 1;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001837 }
Pavel Begunkov864ea922021-08-09 13:04:08 +01001838
1839 for (i = 0; i < ret; i++)
1840 io_preinit_req(state->reqs[i], ctx);
1841 state->free_reqs = ret;
Pavel Begunkove5d1bc02021-02-10 00:03:23 +00001842got_req:
Pavel Begunkov291b2822020-09-30 22:57:01 +03001843 state->free_reqs--;
1844 return state->reqs[state->free_reqs];
Jens Axboe2b188cc2019-01-07 10:46:33 -07001845}
1846
Pavel Begunkove1d767f2021-03-19 17:22:43 +00001847static inline void io_put_file(struct file *file)
Pavel Begunkov8da11c12020-02-24 11:32:44 +03001848{
Pavel Begunkove1d767f2021-03-19 17:22:43 +00001849 if (file)
Pavel Begunkov8da11c12020-02-24 11:32:44 +03001850 fput(file);
1851}
1852
Pavel Begunkov4edf20f2020-10-13 09:43:59 +01001853static void io_dismantle_req(struct io_kiocb *req)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001854{
Pavel Begunkov094bae42021-03-19 17:22:42 +00001855 unsigned int flags = req->flags;
Pavel Begunkov929a3af2020-02-19 00:19:09 +03001856
Pavel Begunkov3a0a6902021-04-20 12:03:31 +01001857 if (io_req_needs_clean(req))
1858 io_clean_op(req);
Pavel Begunkove1d767f2021-03-19 17:22:43 +00001859 if (!(flags & REQ_F_FIXED_FILE))
1860 io_put_file(req->file);
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00001861 if (req->fixed_rsrc_refs)
1862 percpu_ref_put(req->fixed_rsrc_refs);
Pavel Begunkov99ebe4e2021-06-26 21:40:49 +01001863 if (req->async_data) {
Pavel Begunkov094bae42021-03-19 17:22:42 +00001864 kfree(req->async_data);
Pavel Begunkov99ebe4e2021-06-26 21:40:49 +01001865 req->async_data = NULL;
1866 }
Pavel Begunkove6543a82020-06-28 12:52:30 +03001867}
Pavel Begunkov2b85edf2019-12-28 14:13:03 +03001868
Pavel Begunkov216578e2020-10-13 09:44:00 +01001869static void __io_free_req(struct io_kiocb *req)
Pavel Begunkove6543a82020-06-28 12:52:30 +03001870{
Jens Axboe51a4cc12020-08-10 10:55:56 -06001871 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkovecfc5172020-06-29 13:13:03 +03001872
Pavel Begunkov216578e2020-10-13 09:44:00 +01001873 io_dismantle_req(req);
Pavel Begunkov7c660732021-01-25 11:42:21 +00001874 io_put_task(req->task, 1);
Pavel Begunkove6543a82020-06-28 12:52:30 +03001875
Jens Axboe79ebeae2021-08-10 15:18:27 -06001876 spin_lock(&ctx->completion_lock);
Pavel Begunkovbb943b82021-08-09 20:18:10 +01001877 list_add(&req->inflight_entry, &ctx->locked_free_list);
Pavel Begunkovc34b0252021-08-09 20:18:08 +01001878 ctx->locked_free_nr++;
Jens Axboe79ebeae2021-08-10 15:18:27 -06001879 spin_unlock(&ctx->completion_lock);
Pavel Begunkovc34b0252021-08-09 20:18:08 +01001880
Pavel Begunkovecfc5172020-06-29 13:13:03 +03001881 percpu_ref_put(&ctx->refs);
Jens Axboee65ef562019-03-12 10:16:44 -06001882}
1883
Pavel Begunkovf2f87372020-10-27 23:25:37 +00001884static inline void io_remove_next_linked(struct io_kiocb *req)
1885{
1886 struct io_kiocb *nxt = req->link;
1887
1888 req->link = nxt->link;
1889 nxt->link = NULL;
1890}
1891
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00001892static bool io_kill_linked_timeout(struct io_kiocb *req)
1893 __must_hold(&req->ctx->completion_lock)
Jens Axboe89b263f2021-08-10 15:14:18 -06001894 __must_hold(&req->ctx->timeout_lock)
Jens Axboe9e645e112019-05-10 16:07:28 -06001895{
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00001896 struct io_kiocb *link = req->link;
Pavel Begunkovf2f87372020-10-27 23:25:37 +00001897
Pavel Begunkov900fad42020-10-19 16:39:16 +01001898 /*
1899 * Can happen if a linked timeout fired and link had been like
1900 * req -> link t-out -> link t-out [-> ...]
1901 */
Pavel Begunkovc9abd7a2020-10-22 16:43:11 +01001902 if (link && (link->flags & REQ_F_LTIMEOUT_ACTIVE)) {
1903 struct io_timeout_data *io = link->async_data;
Pavel Begunkov7c86ffe2020-06-29 13:12:59 +03001904
Pavel Begunkovf2f87372020-10-27 23:25:37 +00001905 io_remove_next_linked(req);
Pavel Begunkov90cd7e42020-10-27 23:25:36 +00001906 link->timeout.head = NULL;
Pavel Begunkovfd9c7bc2021-04-13 02:58:42 +01001907 if (hrtimer_try_to_cancel(&io->timer) != -1) {
Pavel Begunkovd4d19c12021-04-25 14:32:17 +01001908 io_cqring_fill_event(link->ctx, link->user_data,
1909 -ECANCELED, 0);
Pavel Begunkov91c2f692021-08-11 19:28:28 +01001910 io_put_req_deferred(link);
Pavel Begunkovd4729fb2021-03-22 01:58:24 +00001911 return true;
Pavel Begunkovc9abd7a2020-10-22 16:43:11 +01001912 }
1913 }
Pavel Begunkovd4729fb2021-03-22 01:58:24 +00001914 return false;
Pavel Begunkov7c86ffe2020-06-29 13:12:59 +03001915}
1916
Pavel Begunkovd148ca42020-10-18 10:17:39 +01001917static void io_fail_links(struct io_kiocb *req)
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00001918 __must_hold(&req->ctx->completion_lock)
Jens Axboe9e645e112019-05-10 16:07:28 -06001919{
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00001920 struct io_kiocb *nxt, *link = req->link;
Jens Axboe9e645e112019-05-10 16:07:28 -06001921
Pavel Begunkovf2f87372020-10-27 23:25:37 +00001922 req->link = NULL;
Pavel Begunkovf2f87372020-10-27 23:25:37 +00001923 while (link) {
1924 nxt = link->link;
1925 link->link = NULL;
1926
Dmitrii Dolgovc826bd72019-10-15 19:02:01 +02001927 trace_io_uring_fail_link(req, link);
Pavel Begunkovd4d19c12021-04-25 14:32:17 +01001928 io_cqring_fill_event(link->ctx, link->user_data, -ECANCELED, 0);
Pavel Begunkov91c2f692021-08-11 19:28:28 +01001929 io_put_req_deferred(link);
Pavel Begunkovf2f87372020-10-27 23:25:37 +00001930 link = nxt;
Jens Axboe9e645e112019-05-10 16:07:28 -06001931 }
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00001932}
Pavel Begunkov7c86ffe2020-06-29 13:12:59 +03001933
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00001934static bool io_disarm_next(struct io_kiocb *req)
1935 __must_hold(&req->ctx->completion_lock)
1936{
1937 bool posted = false;
1938
Jens Axboe89b263f2021-08-10 15:14:18 -06001939 if (likely(req->flags & REQ_F_LINK_TIMEOUT)) {
1940 struct io_ring_ctx *ctx = req->ctx;
1941
1942 spin_lock_irq(&ctx->timeout_lock);
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00001943 posted = io_kill_linked_timeout(req);
Jens Axboe89b263f2021-08-10 15:14:18 -06001944 spin_unlock_irq(&ctx->timeout_lock);
1945 }
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01001946 if (unlikely((req->flags & REQ_F_FAIL) &&
Pavel Begunkove4335ed2021-04-11 01:46:39 +01001947 !(req->flags & REQ_F_HARDLINK))) {
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00001948 posted |= (req->link != NULL);
1949 io_fail_links(req);
1950 }
1951 return posted;
Jens Axboe9e645e112019-05-10 16:07:28 -06001952}
1953
Pavel Begunkov3fa5e0f2020-06-30 15:20:43 +03001954static struct io_kiocb *__io_req_find_next(struct io_kiocb *req)
Jens Axboe9e645e112019-05-10 16:07:28 -06001955{
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00001956 struct io_kiocb *nxt;
Jens Axboe2665abf2019-11-05 12:40:47 -07001957
Jens Axboe9e645e112019-05-10 16:07:28 -06001958 /*
1959 * If LINK is set, we have dependent requests in this chain. If we
1960 * didn't fail this request, queue the first one up, moving any other
1961 * dependencies to the next request. In case of failure, fail the rest
1962 * of the chain.
1963 */
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01001964 if (req->flags & (REQ_F_LINK_TIMEOUT | REQ_F_FAIL)) {
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00001965 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00001966 bool posted;
Pavel Begunkovf2f87372020-10-27 23:25:37 +00001967
Jens Axboe79ebeae2021-08-10 15:18:27 -06001968 spin_lock(&ctx->completion_lock);
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00001969 posted = io_disarm_next(req);
1970 if (posted)
1971 io_commit_cqring(req->ctx);
Jens Axboe79ebeae2021-08-10 15:18:27 -06001972 spin_unlock(&ctx->completion_lock);
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00001973 if (posted)
1974 io_cqring_ev_posted(ctx);
Pavel Begunkovf2f87372020-10-27 23:25:37 +00001975 }
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00001976 nxt = req->link;
1977 req->link = NULL;
1978 return nxt;
Jens Axboe4d7dd462019-11-20 13:03:52 -07001979}
Jens Axboe2665abf2019-11-05 12:40:47 -07001980
Pavel Begunkovf2f87372020-10-27 23:25:37 +00001981static inline struct io_kiocb *io_req_find_next(struct io_kiocb *req)
Pavel Begunkov3fa5e0f2020-06-30 15:20:43 +03001982{
Pavel Begunkovcdbff982021-02-12 18:41:16 +00001983 if (likely(!(req->flags & (REQ_F_LINK|REQ_F_HARDLINK))))
Pavel Begunkov3fa5e0f2020-06-30 15:20:43 +03001984 return NULL;
1985 return __io_req_find_next(req);
1986}
1987
Pavel Begunkov2c323952021-02-28 22:04:53 +00001988static void ctx_flush_and_put(struct io_ring_ctx *ctx)
1989{
1990 if (!ctx)
1991 return;
Pavel Begunkovcd0ca2e2021-08-09 20:18:11 +01001992 if (ctx->submit_state.compl_nr) {
Pavel Begunkov2c323952021-02-28 22:04:53 +00001993 mutex_lock(&ctx->uring_lock);
Pavel Begunkov2a2758f2021-06-17 18:14:00 +01001994 io_submit_flush_completions(ctx);
Pavel Begunkov2c323952021-02-28 22:04:53 +00001995 mutex_unlock(&ctx->uring_lock);
1996 }
1997 percpu_ref_put(&ctx->refs);
1998}
1999
Jens Axboe7cbf1722021-02-10 00:03:20 +00002000static void tctx_task_work(struct callback_head *cb)
2001{
Pavel Begunkovebd0df22021-06-17 18:14:07 +01002002 struct io_ring_ctx *ctx = NULL;
Pavel Begunkov3f184072021-06-17 18:14:06 +01002003 struct io_uring_task *tctx = container_of(cb, struct io_uring_task,
2004 task_work);
Jens Axboe7cbf1722021-02-10 00:03:20 +00002005
Pavel Begunkov16f72072021-06-17 18:14:09 +01002006 while (1) {
Pavel Begunkov3f184072021-06-17 18:14:06 +01002007 struct io_wq_work_node *node;
2008
2009 spin_lock_irq(&tctx->task_lock);
Pavel Begunkovc6538be2021-06-17 18:14:08 +01002010 node = tctx->task_list.first;
Pavel Begunkov3f184072021-06-17 18:14:06 +01002011 INIT_WQ_LIST(&tctx->task_list);
Pavel Begunkov6294f362021-08-10 17:53:55 +01002012 if (!node)
2013 tctx->task_running = false;
Pavel Begunkov3f184072021-06-17 18:14:06 +01002014 spin_unlock_irq(&tctx->task_lock);
Pavel Begunkov6294f362021-08-10 17:53:55 +01002015 if (!node)
2016 break;
Pavel Begunkov3f184072021-06-17 18:14:06 +01002017
Pavel Begunkov6294f362021-08-10 17:53:55 +01002018 do {
Pavel Begunkov3f184072021-06-17 18:14:06 +01002019 struct io_wq_work_node *next = node->next;
2020 struct io_kiocb *req = container_of(node, struct io_kiocb,
2021 io_task_work.node);
2022
2023 if (req->ctx != ctx) {
2024 ctx_flush_and_put(ctx);
2025 ctx = req->ctx;
2026 percpu_ref_get(&ctx->refs);
2027 }
Pavel Begunkov5b0a6ac2021-06-30 21:54:04 +01002028 req->io_task_work.func(req);
Pavel Begunkov3f184072021-06-17 18:14:06 +01002029 node = next;
Pavel Begunkov6294f362021-08-10 17:53:55 +01002030 } while (node);
2031
Jens Axboe7cbf1722021-02-10 00:03:20 +00002032 cond_resched();
Pavel Begunkov3f184072021-06-17 18:14:06 +01002033 }
Pavel Begunkovebd0df22021-06-17 18:14:07 +01002034
2035 ctx_flush_and_put(ctx);
Jens Axboe7cbf1722021-02-10 00:03:20 +00002036}
2037
Pavel Begunkove09ee512021-07-01 13:26:05 +01002038static void io_req_task_work_add(struct io_kiocb *req)
Jens Axboe7cbf1722021-02-10 00:03:20 +00002039{
Pavel Begunkovc15b79d2021-03-19 17:22:44 +00002040 struct task_struct *tsk = req->task;
Jens Axboe7cbf1722021-02-10 00:03:20 +00002041 struct io_uring_task *tctx = tsk->io_uring;
Pavel Begunkovc15b79d2021-03-19 17:22:44 +00002042 enum task_work_notify_mode notify;
Pavel Begunkove09ee512021-07-01 13:26:05 +01002043 struct io_wq_work_node *node;
Jens Axboe0b81e802021-02-16 10:33:53 -07002044 unsigned long flags;
Pavel Begunkov6294f362021-08-10 17:53:55 +01002045 bool running;
Jens Axboe7cbf1722021-02-10 00:03:20 +00002046
2047 WARN_ON_ONCE(!tctx);
2048
Jens Axboe0b81e802021-02-16 10:33:53 -07002049 spin_lock_irqsave(&tctx->task_lock, flags);
Jens Axboe7cbf1722021-02-10 00:03:20 +00002050 wq_list_add_tail(&req->io_task_work.node, &tctx->task_list);
Pavel Begunkov6294f362021-08-10 17:53:55 +01002051 running = tctx->task_running;
2052 if (!running)
2053 tctx->task_running = true;
Jens Axboe0b81e802021-02-16 10:33:53 -07002054 spin_unlock_irqrestore(&tctx->task_lock, flags);
Jens Axboe7cbf1722021-02-10 00:03:20 +00002055
2056 /* task_work already pending, we're done */
Pavel Begunkov6294f362021-08-10 17:53:55 +01002057 if (running)
Pavel Begunkove09ee512021-07-01 13:26:05 +01002058 return;
Jens Axboe7cbf1722021-02-10 00:03:20 +00002059
Pavel Begunkovc15b79d2021-03-19 17:22:44 +00002060 /*
2061 * SQPOLL kernel thread doesn't need notification, just a wakeup. For
2062 * all other cases, use TWA_SIGNAL unconditionally to ensure we're
2063 * processing task_work. There's no reliable way to tell if TWA_RESUME
2064 * will do the job.
2065 */
2066 notify = (req->ctx->flags & IORING_SETUP_SQPOLL) ? TWA_NONE : TWA_SIGNAL;
Pavel Begunkovc15b79d2021-03-19 17:22:44 +00002067 if (!task_work_add(tsk, &tctx->task_work, notify)) {
2068 wake_up_process(tsk);
Pavel Begunkove09ee512021-07-01 13:26:05 +01002069 return;
Pavel Begunkovc15b79d2021-03-19 17:22:44 +00002070 }
Pavel Begunkov2215bed2021-08-09 13:04:06 +01002071
Pavel Begunkove09ee512021-07-01 13:26:05 +01002072 spin_lock_irqsave(&tctx->task_lock, flags);
Pavel Begunkov6294f362021-08-10 17:53:55 +01002073 tctx->task_running = false;
Pavel Begunkove09ee512021-07-01 13:26:05 +01002074 node = tctx->task_list.first;
2075 INIT_WQ_LIST(&tctx->task_list);
2076 spin_unlock_irqrestore(&tctx->task_lock, flags);
Jens Axboe7cbf1722021-02-10 00:03:20 +00002077
Pavel Begunkove09ee512021-07-01 13:26:05 +01002078 while (node) {
2079 req = container_of(node, struct io_kiocb, io_task_work.node);
2080 node = node->next;
2081 if (llist_add(&req->io_task_work.fallback_node,
2082 &req->ctx->fallback_llist))
2083 schedule_delayed_work(&req->ctx->fallback_work, 1);
2084 }
Pavel Begunkoveab30c42021-01-19 13:32:42 +00002085}
2086
Pavel Begunkov5b0a6ac2021-06-30 21:54:04 +01002087static void io_req_task_cancel(struct io_kiocb *req)
Jens Axboec40f6372020-06-25 15:39:59 -06002088{
Jens Axboe87ceb6a2020-09-14 08:20:12 -06002089 struct io_ring_ctx *ctx = req->ctx;
Jens Axboec40f6372020-06-25 15:39:59 -06002090
Pavel Begunkove83acd72021-02-28 22:35:09 +00002091 /* ctx is guaranteed to stay alive while we hold uring_lock */
Pavel Begunkov792bb6e2021-02-18 22:32:51 +00002092 mutex_lock(&ctx->uring_lock);
Pavel Begunkov25935532021-03-19 17:22:40 +00002093 io_req_complete_failed(req, req->result);
Pavel Begunkov792bb6e2021-02-18 22:32:51 +00002094 mutex_unlock(&ctx->uring_lock);
Jens Axboec40f6372020-06-25 15:39:59 -06002095}
2096
Pavel Begunkov5b0a6ac2021-06-30 21:54:04 +01002097static void io_req_task_submit(struct io_kiocb *req)
Jens Axboec40f6372020-06-25 15:39:59 -06002098{
2099 struct io_ring_ctx *ctx = req->ctx;
2100
Pavel Begunkov04fc6c82021-02-12 03:23:54 +00002101 /* ctx stays valid until unlock, even if we drop all ours ctx->refs */
Pavel Begunkov81b6d052021-01-04 20:36:35 +00002102 mutex_lock(&ctx->uring_lock);
Pavel Begunkovaf066f32021-08-09 13:04:19 +01002103 if (likely(!(req->task->flags & PF_EXITING)))
Pavel Begunkovc5eef2b2021-02-10 00:03:22 +00002104 __io_queue_sqe(req);
Pavel Begunkov81b6d052021-01-04 20:36:35 +00002105 else
Pavel Begunkov25935532021-03-19 17:22:40 +00002106 io_req_complete_failed(req, -EFAULT);
Pavel Begunkov81b6d052021-01-04 20:36:35 +00002107 mutex_unlock(&ctx->uring_lock);
Jens Axboe9e645e112019-05-10 16:07:28 -06002108}
2109
Pavel Begunkova3df76982021-02-18 22:32:52 +00002110static void io_req_task_queue_fail(struct io_kiocb *req, int ret)
2111{
Pavel Begunkova3df76982021-02-18 22:32:52 +00002112 req->result = ret;
Pavel Begunkov5b0a6ac2021-06-30 21:54:04 +01002113 req->io_task_work.func = io_req_task_cancel;
Pavel Begunkove09ee512021-07-01 13:26:05 +01002114 io_req_task_work_add(req);
Pavel Begunkova3df76982021-02-18 22:32:52 +00002115}
2116
Pavel Begunkov2c4b8eb2021-02-28 22:35:10 +00002117static void io_req_task_queue(struct io_kiocb *req)
2118{
Pavel Begunkov5b0a6ac2021-06-30 21:54:04 +01002119 req->io_task_work.func = io_req_task_submit;
Pavel Begunkove09ee512021-07-01 13:26:05 +01002120 io_req_task_work_add(req);
Pavel Begunkov2c4b8eb2021-02-28 22:35:10 +00002121}
2122
Jens Axboe773af692021-07-27 10:25:55 -06002123static void io_req_task_queue_reissue(struct io_kiocb *req)
2124{
2125 req->io_task_work.func = io_queue_async_work;
2126 io_req_task_work_add(req);
2127}
2128
Pavel Begunkovf2f87372020-10-27 23:25:37 +00002129static inline void io_queue_next(struct io_kiocb *req)
Jackie Liuc69f8db2019-11-09 11:00:08 +08002130{
Pavel Begunkov9b5f7bd2020-06-29 13:13:00 +03002131 struct io_kiocb *nxt = io_req_find_next(req);
Pavel Begunkov944e58b2019-11-21 23:21:01 +03002132
Pavel Begunkov906a8c32020-06-27 14:04:55 +03002133 if (nxt)
2134 io_req_task_queue(nxt);
Jackie Liuc69f8db2019-11-09 11:00:08 +08002135}
2136
Jens Axboe9e645e112019-05-10 16:07:28 -06002137static void io_free_req(struct io_kiocb *req)
2138{
Pavel Begunkovc3524382020-06-28 12:52:32 +03002139 io_queue_next(req);
Jens Axboe9e645e112019-05-10 16:07:28 -06002140 __io_free_req(req);
Jens Axboee65ef562019-03-12 10:16:44 -06002141}
2142
Pavel Begunkov2d6500d2020-06-28 12:52:33 +03002143struct req_batch {
Pavel Begunkov5af1d132020-07-18 11:32:52 +03002144 struct task_struct *task;
2145 int task_refs;
Jens Axboe1b4c3512021-02-10 00:03:19 +00002146 int ctx_refs;
Pavel Begunkov2d6500d2020-06-28 12:52:33 +03002147};
2148
Pavel Begunkov5af1d132020-07-18 11:32:52 +03002149static inline void io_init_req_batch(struct req_batch *rb)
Pavel Begunkov7a743e22020-03-03 21:33:13 +03002150{
Pavel Begunkov5af1d132020-07-18 11:32:52 +03002151 rb->task_refs = 0;
Pavel Begunkov9ae72462021-02-10 00:03:16 +00002152 rb->ctx_refs = 0;
Pavel Begunkov5af1d132020-07-18 11:32:52 +03002153 rb->task = NULL;
2154}
Pavel Begunkov8766dd52020-03-14 00:31:04 +03002155
Pavel Begunkov2d6500d2020-06-28 12:52:33 +03002156static void io_req_free_batch_finish(struct io_ring_ctx *ctx,
2157 struct req_batch *rb)
2158{
Pavel Begunkov9ae72462021-02-10 00:03:16 +00002159 if (rb->ctx_refs)
2160 percpu_ref_put_many(&ctx->refs, rb->ctx_refs);
Pavel Begunkove9dbe222021-08-09 13:04:20 +01002161 if (rb->task == current)
2162 current->io_uring->cached_refs += rb->task_refs;
2163 else if (rb->task)
2164 io_put_task(rb->task, rb->task_refs);
Pavel Begunkov2d6500d2020-06-28 12:52:33 +03002165}
2166
Pavel Begunkov6ff119a2021-02-10 00:03:18 +00002167static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req,
2168 struct io_submit_state *state)
Pavel Begunkov2d6500d2020-06-28 12:52:33 +03002169{
Pavel Begunkovf2f87372020-10-27 23:25:37 +00002170 io_queue_next(req);
Pavel Begunkov96670652021-03-19 17:22:32 +00002171 io_dismantle_req(req);
Pavel Begunkov2d6500d2020-06-28 12:52:33 +03002172
Jens Axboee3bc8e92020-09-24 08:45:57 -06002173 if (req->task != rb->task) {
Pavel Begunkov7c660732021-01-25 11:42:21 +00002174 if (rb->task)
2175 io_put_task(rb->task, rb->task_refs);
Jens Axboee3bc8e92020-09-24 08:45:57 -06002176 rb->task = req->task;
2177 rb->task_refs = 0;
Pavel Begunkov5af1d132020-07-18 11:32:52 +03002178 }
Jens Axboee3bc8e92020-09-24 08:45:57 -06002179 rb->task_refs++;
Pavel Begunkov9ae72462021-02-10 00:03:16 +00002180 rb->ctx_refs++;
Pavel Begunkov5af1d132020-07-18 11:32:52 +03002181
Pavel Begunkovbd759042021-02-12 03:23:50 +00002182 if (state->free_reqs != ARRAY_SIZE(state->reqs))
Pavel Begunkov6ff119a2021-02-10 00:03:18 +00002183 state->reqs[state->free_reqs++] = req;
Pavel Begunkovbd759042021-02-12 03:23:50 +00002184 else
Pavel Begunkovcd0ca2e2021-08-09 20:18:11 +01002185 list_add(&req->inflight_entry, &state->free_list);
Pavel Begunkov7a743e22020-03-03 21:33:13 +03002186}
2187
Pavel Begunkov2a2758f2021-06-17 18:14:00 +01002188static void io_submit_flush_completions(struct io_ring_ctx *ctx)
Jens Axboea141dd82021-08-12 12:48:34 -06002189 __must_hold(&ctx->uring_lock)
Pavel Begunkov905c1722021-02-10 00:03:14 +00002190{
Pavel Begunkovcd0ca2e2021-08-09 20:18:11 +01002191 struct io_submit_state *state = &ctx->submit_state;
2192 int i, nr = state->compl_nr;
Pavel Begunkov905c1722021-02-10 00:03:14 +00002193 struct req_batch rb;
2194
Jens Axboe79ebeae2021-08-10 15:18:27 -06002195 spin_lock(&ctx->completion_lock);
Pavel Begunkov905c1722021-02-10 00:03:14 +00002196 for (i = 0; i < nr; i++) {
Pavel Begunkovcd0ca2e2021-08-09 20:18:11 +01002197 struct io_kiocb *req = state->compl_reqs[i];
Pavel Begunkov5182ed22021-06-26 21:40:48 +01002198
Pavel Begunkovd4d19c12021-04-25 14:32:17 +01002199 __io_cqring_fill_event(ctx, req->user_data, req->result,
2200 req->compl.cflags);
Pavel Begunkov905c1722021-02-10 00:03:14 +00002201 }
2202 io_commit_cqring(ctx);
Jens Axboe79ebeae2021-08-10 15:18:27 -06002203 spin_unlock(&ctx->completion_lock);
Pavel Begunkov905c1722021-02-10 00:03:14 +00002204 io_cqring_ev_posted(ctx);
Pavel Begunkov5182ed22021-06-26 21:40:48 +01002205
2206 io_init_req_batch(&rb);
Pavel Begunkov905c1722021-02-10 00:03:14 +00002207 for (i = 0; i < nr; i++) {
Pavel Begunkovcd0ca2e2021-08-09 20:18:11 +01002208 struct io_kiocb *req = state->compl_reqs[i];
Pavel Begunkov905c1722021-02-10 00:03:14 +00002209
Pavel Begunkov91c2f692021-08-11 19:28:28 +01002210 if (req_ref_put_and_test(req))
Pavel Begunkov6ff119a2021-02-10 00:03:18 +00002211 io_req_free_batch(&rb, req, &ctx->submit_state);
Pavel Begunkov905c1722021-02-10 00:03:14 +00002212 }
2213
2214 io_req_free_batch_finish(ctx, &rb);
Pavel Begunkovcd0ca2e2021-08-09 20:18:11 +01002215 state->compl_nr = 0;
Jens Axboee65ef562019-03-12 10:16:44 -06002216}
2217
Jens Axboeba816ad2019-09-28 11:36:45 -06002218/*
2219 * Drop reference to request, return next in chain (if there is one) if this
2220 * was the last reference to this request.
2221 */
Pavel Begunkov0d850352021-03-19 17:22:37 +00002222static inline struct io_kiocb *io_put_req_find_next(struct io_kiocb *req)
Jens Axboee65ef562019-03-12 10:16:44 -06002223{
Pavel Begunkov9b5f7bd2020-06-29 13:13:00 +03002224 struct io_kiocb *nxt = NULL;
2225
Jens Axboede9b4cc2021-02-24 13:28:27 -07002226 if (req_ref_put_and_test(req)) {
Pavel Begunkov9b5f7bd2020-06-29 13:13:00 +03002227 nxt = io_req_find_next(req);
Jens Axboe4d7dd462019-11-20 13:03:52 -07002228 __io_free_req(req);
Jens Axboe2a44f462020-02-25 13:25:41 -07002229 }
Pavel Begunkov9b5f7bd2020-06-29 13:13:00 +03002230 return nxt;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002231}
2232
Pavel Begunkov0d850352021-03-19 17:22:37 +00002233static inline void io_put_req(struct io_kiocb *req)
Jens Axboe2b188cc2019-01-07 10:46:33 -07002234{
Jens Axboede9b4cc2021-02-24 13:28:27 -07002235 if (req_ref_put_and_test(req))
Jens Axboedef596e2019-01-09 08:59:42 -07002236 io_free_req(req);
2237}
2238
Pavel Begunkov91c2f692021-08-11 19:28:28 +01002239static inline void io_put_req_deferred(struct io_kiocb *req)
Pavel Begunkov216578e2020-10-13 09:44:00 +01002240{
Pavel Begunkov91c2f692021-08-11 19:28:28 +01002241 if (req_ref_put_and_test(req)) {
Pavel Begunkov543af3a2021-08-09 13:04:15 +01002242 req->io_task_work.func = io_free_req;
2243 io_req_task_work_add(req);
2244 }
Pavel Begunkov216578e2020-10-13 09:44:00 +01002245}
2246
Pavel Begunkov6c503152021-01-04 20:36:36 +00002247static unsigned io_cqring_events(struct io_ring_ctx *ctx)
Jens Axboea3a0e432019-08-20 11:03:11 -06002248{
2249 /* See comment at the top of this file */
2250 smp_rmb();
Pavel Begunkove23de152020-12-17 00:24:37 +00002251 return __io_cqring_events(ctx);
Jens Axboea3a0e432019-08-20 11:03:11 -06002252}
2253
Pavel Begunkovfb5ccc92019-10-25 12:31:30 +03002254static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
2255{
2256 struct io_rings *rings = ctx->rings;
2257
2258 /* make sure SQ entry isn't read before tail */
2259 return smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head;
2260}
2261
Pavel Begunkov8ff069b2020-07-16 23:28:04 +03002262static unsigned int io_put_kbuf(struct io_kiocb *req, struct io_buffer *kbuf)
Jens Axboee94f1412019-12-19 12:06:02 -07002263{
Pavel Begunkov8ff069b2020-07-16 23:28:04 +03002264 unsigned int cflags;
Jens Axboee94f1412019-12-19 12:06:02 -07002265
Jens Axboebcda7ba2020-02-23 16:42:51 -07002266 cflags = kbuf->bid << IORING_CQE_BUFFER_SHIFT;
2267 cflags |= IORING_CQE_F_BUFFER;
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03002268 req->flags &= ~REQ_F_BUFFER_SELECTED;
Jens Axboebcda7ba2020-02-23 16:42:51 -07002269 kfree(kbuf);
2270 return cflags;
2271}
2272
Pavel Begunkov8ff069b2020-07-16 23:28:04 +03002273static inline unsigned int io_put_rw_kbuf(struct io_kiocb *req)
2274{
2275 struct io_buffer *kbuf;
2276
2277 kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
2278 return io_put_kbuf(req, kbuf);
2279}
2280
Jens Axboe4c6e2772020-07-01 11:29:10 -06002281static inline bool io_run_task_work(void)
2282{
Nadav Amitef98eb02021-08-07 17:13:41 -07002283 if (test_thread_flag(TIF_NOTIFY_SIGNAL) || current->task_works) {
Jens Axboe4c6e2772020-07-01 11:29:10 -06002284 __set_current_state(TASK_RUNNING);
Nadav Amitef98eb02021-08-07 17:13:41 -07002285 tracehook_notify_signal();
Jens Axboe4c6e2772020-07-01 11:29:10 -06002286 return true;
2287 }
2288
2289 return false;
2290}
2291
Jens Axboedef596e2019-01-09 08:59:42 -07002292/*
2293 * Find and free completed poll iocbs
2294 */
2295static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
Pavel Begunkova8576af2021-08-15 10:40:21 +01002296 struct list_head *done)
Jens Axboedef596e2019-01-09 08:59:42 -07002297{
Jens Axboe8237e042019-12-28 10:48:22 -07002298 struct req_batch rb;
Jens Axboedef596e2019-01-09 08:59:42 -07002299 struct io_kiocb *req;
Xiaoguang Wangbbde0172020-06-16 02:06:38 +08002300
2301 /* order with ->result store in io_complete_rw_iopoll() */
2302 smp_rmb();
Jens Axboedef596e2019-01-09 08:59:42 -07002303
Pavel Begunkov5af1d132020-07-18 11:32:52 +03002304 io_init_req_batch(&rb);
Jens Axboedef596e2019-01-09 08:59:42 -07002305 while (!list_empty(done)) {
Jens Axboebcda7ba2020-02-23 16:42:51 -07002306 int cflags = 0;
2307
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002308 req = list_first_entry(done, struct io_kiocb, inflight_entry);
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002309 list_del(&req->inflight_entry);
Jens Axboedef596e2019-01-09 08:59:42 -07002310
Pavel Begunkova8576af2021-08-15 10:40:21 +01002311 if (READ_ONCE(req->result) == -EAGAIN &&
Pavel Begunkov8c130822021-03-22 01:58:32 +00002312 !(req->flags & REQ_F_DONT_REISSUE)) {
Pavel Begunkovf1613402021-02-11 18:28:21 +00002313 req->iopoll_completed = 0;
Jens Axboe773af692021-07-27 10:25:55 -06002314 io_req_task_queue_reissue(req);
Pavel Begunkov8c130822021-03-22 01:58:32 +00002315 continue;
Pavel Begunkovf1613402021-02-11 18:28:21 +00002316 }
2317
Jens Axboebcda7ba2020-02-23 16:42:51 -07002318 if (req->flags & REQ_F_BUFFER_SELECTED)
Pavel Begunkov8ff069b2020-07-16 23:28:04 +03002319 cflags = io_put_rw_kbuf(req);
Jens Axboebcda7ba2020-02-23 16:42:51 -07002320
Pavel Begunkovd4d19c12021-04-25 14:32:17 +01002321 __io_cqring_fill_event(ctx, req->user_data, req->result, cflags);
Jens Axboedef596e2019-01-09 08:59:42 -07002322 (*nr_events)++;
2323
Jens Axboede9b4cc2021-02-24 13:28:27 -07002324 if (req_ref_put_and_test(req))
Pavel Begunkov6ff119a2021-02-10 00:03:18 +00002325 io_req_free_batch(&rb, req, &ctx->submit_state);
Jens Axboedef596e2019-01-09 08:59:42 -07002326 }
Jens Axboedef596e2019-01-09 08:59:42 -07002327
Jens Axboe09bb8392019-03-13 12:39:28 -06002328 io_commit_cqring(ctx);
Pavel Begunkov80c18e42021-01-07 03:15:41 +00002329 io_cqring_ev_posted_iopoll(ctx);
Pavel Begunkov2d6500d2020-06-28 12:52:33 +03002330 io_req_free_batch_finish(ctx, &rb);
Bijan Mottahedeh581f9812020-04-03 13:51:33 -07002331}
2332
Jens Axboedef596e2019-01-09 08:59:42 -07002333static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
Pavel Begunkova8576af2021-08-15 10:40:21 +01002334 long min)
Jens Axboedef596e2019-01-09 08:59:42 -07002335{
2336 struct io_kiocb *req, *tmp;
2337 LIST_HEAD(done);
2338 bool spin;
Jens Axboedef596e2019-01-09 08:59:42 -07002339
2340 /*
2341 * Only spin for completions if we don't have multiple devices hanging
2342 * off our complete list, and we're under the requested amount.
2343 */
Hao Xu915b3dd2021-06-28 05:37:30 +08002344 spin = !ctx->poll_multi_queue && *nr_events < min;
Jens Axboedef596e2019-01-09 08:59:42 -07002345
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002346 list_for_each_entry_safe(req, tmp, &ctx->iopoll_list, inflight_entry) {
Jens Axboe9adbd452019-12-20 08:45:55 -07002347 struct kiocb *kiocb = &req->rw.kiocb;
Pavel Begunkova2416e12021-08-09 13:04:09 +01002348 int ret;
Jens Axboedef596e2019-01-09 08:59:42 -07002349
2350 /*
Bijan Mottahedeh581f9812020-04-03 13:51:33 -07002351 * Move completed and retryable entries to our local lists.
2352 * If we find a request that requires polling, break out
2353 * and complete those lists first, if we have entries there.
Jens Axboedef596e2019-01-09 08:59:42 -07002354 */
Xiaoguang Wang65a65432020-06-11 23:39:36 +08002355 if (READ_ONCE(req->iopoll_completed)) {
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002356 list_move_tail(&req->inflight_entry, &done);
Jens Axboedef596e2019-01-09 08:59:42 -07002357 continue;
2358 }
2359 if (!list_empty(&done))
2360 break;
2361
2362 ret = kiocb->ki_filp->f_op->iopoll(kiocb, spin);
Pavel Begunkova2416e12021-08-09 13:04:09 +01002363 if (unlikely(ret < 0))
2364 return ret;
2365 else if (ret)
2366 spin = false;
Jens Axboedef596e2019-01-09 08:59:42 -07002367
Pavel Begunkov3aadc232020-07-06 17:59:29 +03002368 /* iopoll may have completed current req */
2369 if (READ_ONCE(req->iopoll_completed))
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002370 list_move_tail(&req->inflight_entry, &done);
Jens Axboedef596e2019-01-09 08:59:42 -07002371 }
2372
2373 if (!list_empty(&done))
Pavel Begunkova8576af2021-08-15 10:40:21 +01002374 io_iopoll_complete(ctx, nr_events, &done);
Jens Axboedef596e2019-01-09 08:59:42 -07002375
Pavel Begunkova2416e12021-08-09 13:04:09 +01002376 return 0;
Jens Axboedef596e2019-01-09 08:59:42 -07002377}
2378
2379/*
Jens Axboedef596e2019-01-09 08:59:42 -07002380 * We can't just wait for polled events to come to us, we have to actively
2381 * find and complete them.
2382 */
Pavel Begunkovb2edc0a2020-07-07 16:36:22 +03002383static void io_iopoll_try_reap_events(struct io_ring_ctx *ctx)
Jens Axboedef596e2019-01-09 08:59:42 -07002384{
2385 if (!(ctx->flags & IORING_SETUP_IOPOLL))
2386 return;
2387
2388 mutex_lock(&ctx->uring_lock);
Pavel Begunkov540e32a2020-07-13 23:37:09 +03002389 while (!list_empty(&ctx->iopoll_list)) {
Jens Axboedef596e2019-01-09 08:59:42 -07002390 unsigned int nr_events = 0;
2391
Pavel Begunkova8576af2021-08-15 10:40:21 +01002392 io_do_iopoll(ctx, &nr_events, 0);
Jens Axboe08f54392019-08-21 22:19:11 -06002393
Pavel Begunkovb2edc0a2020-07-07 16:36:22 +03002394 /* let it sleep and repeat later if can't complete a request */
2395 if (nr_events == 0)
2396 break;
Jens Axboe08f54392019-08-21 22:19:11 -06002397 /*
2398 * Ensure we allow local-to-the-cpu processing to take place,
2399 * in this case we need to ensure that we reap all events.
Pavel Begunkov3fcee5a2020-07-06 17:59:31 +03002400 * Also let task_work, etc. to progress by releasing the mutex
Jens Axboe08f54392019-08-21 22:19:11 -06002401 */
Pavel Begunkov3fcee5a2020-07-06 17:59:31 +03002402 if (need_resched()) {
2403 mutex_unlock(&ctx->uring_lock);
2404 cond_resched();
2405 mutex_lock(&ctx->uring_lock);
2406 }
Jens Axboedef596e2019-01-09 08:59:42 -07002407 }
2408 mutex_unlock(&ctx->uring_lock);
2409}
2410
Pavel Begunkov7668b922020-07-07 16:36:21 +03002411static int io_iopoll_check(struct io_ring_ctx *ctx, long min)
Jens Axboedef596e2019-01-09 08:59:42 -07002412{
Pavel Begunkov7668b922020-07-07 16:36:21 +03002413 unsigned int nr_events = 0;
Pavel Begunkove9979b32021-04-13 02:58:45 +01002414 int ret = 0;
Jens Axboedef596e2019-01-09 08:59:42 -07002415
Xiaoguang Wangc7849be2020-02-22 14:46:05 +08002416 /*
2417 * We disallow the app entering submit/complete with polling, but we
2418 * still need to lock the ring to prevent racing with polled issue
2419 * that got punted to a workqueue.
2420 */
2421 mutex_lock(&ctx->uring_lock);
Pavel Begunkovf39c8a52021-04-13 02:58:46 +01002422 /*
2423 * Don't enter poll loop if we already have events pending.
2424 * If we do, we can potentially be spinning for commands that
2425 * already triggered a CQE (eg in error).
2426 */
Pavel Begunkov5ed7a372021-06-14 23:37:27 +01002427 if (test_bit(0, &ctx->check_cq_overflow))
Pavel Begunkovf39c8a52021-04-13 02:58:46 +01002428 __io_cqring_overflow_flush(ctx, false);
2429 if (io_cqring_events(ctx))
2430 goto out;
Jens Axboedef596e2019-01-09 08:59:42 -07002431 do {
Jens Axboe500f9fb2019-08-19 12:15:59 -06002432 /*
2433 * If a submit got punted to a workqueue, we can have the
2434 * application entering polling for a command before it gets
2435 * issued. That app will hold the uring_lock for the duration
2436 * of the poll right here, so we need to take a breather every
2437 * now and then to ensure that the issue has a chance to add
2438 * the poll to the issued list. Otherwise we can spin here
2439 * forever, while the workqueue is stuck trying to acquire the
2440 * very same mutex.
2441 */
Pavel Begunkove9979b32021-04-13 02:58:45 +01002442 if (list_empty(&ctx->iopoll_list)) {
Pavel Begunkov8f487ef2021-07-08 13:37:06 +01002443 u32 tail = ctx->cached_cq_tail;
2444
Jens Axboe500f9fb2019-08-19 12:15:59 -06002445 mutex_unlock(&ctx->uring_lock);
Jens Axboe4c6e2772020-07-01 11:29:10 -06002446 io_run_task_work();
Jens Axboe500f9fb2019-08-19 12:15:59 -06002447 mutex_lock(&ctx->uring_lock);
Pavel Begunkove9979b32021-04-13 02:58:45 +01002448
Pavel Begunkov8f487ef2021-07-08 13:37:06 +01002449 /* some requests don't go through iopoll_list */
2450 if (tail != ctx->cached_cq_tail ||
2451 list_empty(&ctx->iopoll_list))
Pavel Begunkove9979b32021-04-13 02:58:45 +01002452 break;
Jens Axboe500f9fb2019-08-19 12:15:59 -06002453 }
Pavel Begunkova8576af2021-08-15 10:40:21 +01002454 ret = io_do_iopoll(ctx, &nr_events, min);
Pavel Begunkovf39c8a52021-04-13 02:58:46 +01002455 } while (!ret && nr_events < min && !need_resched());
2456out:
Jens Axboe500f9fb2019-08-19 12:15:59 -06002457 mutex_unlock(&ctx->uring_lock);
Jens Axboedef596e2019-01-09 08:59:42 -07002458 return ret;
2459}
2460
Jens Axboe491381ce2019-10-17 09:20:46 -06002461static void kiocb_end_write(struct io_kiocb *req)
Jens Axboe2b188cc2019-01-07 10:46:33 -07002462{
Jens Axboe491381ce2019-10-17 09:20:46 -06002463 /*
2464 * Tell lockdep we inherited freeze protection from submission
2465 * thread.
2466 */
2467 if (req->flags & REQ_F_ISREG) {
Pavel Begunkov1c986792021-03-22 01:58:31 +00002468 struct super_block *sb = file_inode(req->file)->i_sb;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002469
Pavel Begunkov1c986792021-03-22 01:58:31 +00002470 __sb_writers_acquired(sb, SB_FREEZE_WRITE);
2471 sb_end_write(sb);
Jens Axboe2b188cc2019-01-07 10:46:33 -07002472 }
2473}
2474
Jens Axboeb63534c2020-06-04 11:28:00 -06002475#ifdef CONFIG_BLOCK
Pavel Begunkovdc2a6e92021-01-19 13:32:35 +00002476static bool io_resubmit_prep(struct io_kiocb *req)
Jens Axboeb63534c2020-06-04 11:28:00 -06002477{
Pavel Begunkovab454432021-03-22 01:58:33 +00002478 struct io_async_rw *rw = req->async_data;
Jens Axboeb63534c2020-06-04 11:28:00 -06002479
Pavel Begunkovab454432021-03-22 01:58:33 +00002480 if (!rw)
2481 return !io_req_prep_async(req);
2482 /* may have left rw->iter inconsistent on -EIOCBQUEUED */
2483 iov_iter_revert(&rw->iter, req->result - iov_iter_count(&rw->iter));
2484 return true;
Jens Axboeb63534c2020-06-04 11:28:00 -06002485}
Jens Axboeb63534c2020-06-04 11:28:00 -06002486
Jens Axboe3e6a0d32021-03-01 13:56:00 -07002487static bool io_rw_should_reissue(struct io_kiocb *req)
Jens Axboeb63534c2020-06-04 11:28:00 -06002488{
Jens Axboe355afae2020-09-02 09:30:31 -06002489 umode_t mode = file_inode(req->file)->i_mode;
Jens Axboe3e6a0d32021-03-01 13:56:00 -07002490 struct io_ring_ctx *ctx = req->ctx;
Jens Axboeb63534c2020-06-04 11:28:00 -06002491
Jens Axboe355afae2020-09-02 09:30:31 -06002492 if (!S_ISBLK(mode) && !S_ISREG(mode))
2493 return false;
Jens Axboe3e6a0d32021-03-01 13:56:00 -07002494 if ((req->flags & REQ_F_NOWAIT) || (io_wq_current_is_worker() &&
2495 !(ctx->flags & IORING_SETUP_IOPOLL)))
Jens Axboeb63534c2020-06-04 11:28:00 -06002496 return false;
Jens Axboe7c977a52021-02-23 19:17:35 -07002497 /*
2498 * If ref is dying, we might be running poll reap from the exit work.
2499 * Don't attempt to reissue from that path, just let it fail with
2500 * -EAGAIN.
2501 */
Jens Axboe3e6a0d32021-03-01 13:56:00 -07002502 if (percpu_ref_is_dying(&ctx->refs))
2503 return false;
Jens Axboeef046882021-07-27 10:50:31 -06002504 /*
2505 * Play it safe and assume not safe to re-import and reissue if we're
2506 * not in the original thread group (or in task context).
2507 */
2508 if (!same_thread_group(req->task, current) || !in_task())
2509 return false;
Jens Axboe3e6a0d32021-03-01 13:56:00 -07002510 return true;
2511}
Jens Axboee82ad482021-04-02 19:45:34 -06002512#else
Jens Axboea1ff1e32021-04-12 06:40:02 -06002513static bool io_resubmit_prep(struct io_kiocb *req)
2514{
2515 return false;
2516}
Jens Axboee82ad482021-04-02 19:45:34 -06002517static bool io_rw_should_reissue(struct io_kiocb *req)
2518{
2519 return false;
2520}
Jens Axboe3e6a0d32021-03-01 13:56:00 -07002521#endif
2522
Jens Axboe8ef12ef2021-08-10 15:15:25 -06002523static bool __io_complete_rw_common(struct io_kiocb *req, long res)
Jens Axboea1d7c392020-06-22 11:09:46 -06002524{
Pavel Begunkovb65c1282021-03-22 01:45:59 +00002525 if (req->rw.kiocb.ki_flags & IOCB_WRITE)
2526 kiocb_end_write(req);
Pavel Begunkov9532b992021-03-22 01:58:34 +00002527 if (res != req->result) {
2528 if ((res == -EAGAIN || res == -EOPNOTSUPP) &&
2529 io_rw_should_reissue(req)) {
2530 req->flags |= REQ_F_REISSUE;
Jens Axboe8ef12ef2021-08-10 15:15:25 -06002531 return true;
Pavel Begunkov9532b992021-03-22 01:58:34 +00002532 }
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01002533 req_set_fail(req);
Jens Axboe8ef12ef2021-08-10 15:15:25 -06002534 req->result = res;
Pavel Begunkov9532b992021-03-22 01:58:34 +00002535 }
Jens Axboe8ef12ef2021-08-10 15:15:25 -06002536 return false;
2537}
2538
2539static void io_req_task_complete(struct io_kiocb *req)
2540{
2541 int cflags = 0;
2542
Pavel Begunkov2f8e45f2021-02-11 18:28:23 +00002543 if (req->flags & REQ_F_BUFFER_SELECTED)
2544 cflags = io_put_rw_kbuf(req);
Jens Axboe8ef12ef2021-08-10 15:15:25 -06002545 __io_req_complete(req, 0, req->result, cflags);
2546}
2547
2548static void __io_complete_rw(struct io_kiocb *req, long res, long res2,
2549 unsigned int issue_flags)
2550{
2551 if (__io_complete_rw_common(req, res))
2552 return;
2553 io_req_task_complete(req);
Jens Axboeba816ad2019-09-28 11:36:45 -06002554}
2555
2556static void io_complete_rw(struct kiocb *kiocb, long res, long res2)
2557{
Jens Axboe9adbd452019-12-20 08:45:55 -07002558 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
Jens Axboeba816ad2019-09-28 11:36:45 -06002559
Jens Axboe8ef12ef2021-08-10 15:15:25 -06002560 if (__io_complete_rw_common(req, res))
2561 return;
2562 req->result = res;
2563 req->io_task_work.func = io_req_task_complete;
2564 io_req_task_work_add(req);
Jens Axboe2b188cc2019-01-07 10:46:33 -07002565}
2566
Jens Axboedef596e2019-01-09 08:59:42 -07002567static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2)
2568{
Jens Axboe9adbd452019-12-20 08:45:55 -07002569 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
Jens Axboedef596e2019-01-09 08:59:42 -07002570
Jens Axboe491381ce2019-10-17 09:20:46 -06002571 if (kiocb->ki_flags & IOCB_WRITE)
2572 kiocb_end_write(req);
Pavel Begunkov9532b992021-03-22 01:58:34 +00002573 if (unlikely(res != req->result)) {
Jens Axboea1ff1e32021-04-12 06:40:02 -06002574 if (!(res == -EAGAIN && io_rw_should_reissue(req) &&
2575 io_resubmit_prep(req))) {
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01002576 req_set_fail(req);
Pavel Begunkov9532b992021-03-22 01:58:34 +00002577 req->flags |= REQ_F_DONT_REISSUE;
2578 }
Pavel Begunkov8c130822021-03-22 01:58:32 +00002579 }
Xiaoguang Wangbbde0172020-06-16 02:06:38 +08002580
2581 WRITE_ONCE(req->result, res);
Jens Axboeb9b0e0d2021-02-23 08:18:36 -07002582 /* order with io_iopoll_complete() checking ->result */
Pavel Begunkovcd664b02020-06-25 12:37:10 +03002583 smp_wmb();
2584 WRITE_ONCE(req->iopoll_completed, 1);
Jens Axboedef596e2019-01-09 08:59:42 -07002585}
2586
2587/*
2588 * After the iocb has been issued, it's safe to be found on the poll list.
2589 * Adding the kiocb to the list AFTER submission ensures that we don't
Pavel Begunkovf39c8a52021-04-13 02:58:46 +01002590 * find it from a io_do_iopoll() thread before the issuer is done
Jens Axboedef596e2019-01-09 08:59:42 -07002591 * accessing the kiocb cookie.
2592 */
Pavel Begunkovcb3d8972021-06-14 02:36:14 +01002593static void io_iopoll_req_issued(struct io_kiocb *req)
Jens Axboedef596e2019-01-09 08:59:42 -07002594{
2595 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkovcb3d8972021-06-14 02:36:14 +01002596 const bool in_async = io_wq_current_is_worker();
2597
2598 /* workqueue context doesn't hold uring_lock, grab it now */
2599 if (unlikely(in_async))
2600 mutex_lock(&ctx->uring_lock);
Jens Axboedef596e2019-01-09 08:59:42 -07002601
2602 /*
2603 * Track whether we have multiple files in our lists. This will impact
2604 * how we do polling eventually, not spinning if we're on potentially
2605 * different devices.
2606 */
Pavel Begunkov540e32a2020-07-13 23:37:09 +03002607 if (list_empty(&ctx->iopoll_list)) {
Hao Xu915b3dd2021-06-28 05:37:30 +08002608 ctx->poll_multi_queue = false;
2609 } else if (!ctx->poll_multi_queue) {
Jens Axboedef596e2019-01-09 08:59:42 -07002610 struct io_kiocb *list_req;
Hao Xu915b3dd2021-06-28 05:37:30 +08002611 unsigned int queue_num0, queue_num1;
Jens Axboedef596e2019-01-09 08:59:42 -07002612
Pavel Begunkov540e32a2020-07-13 23:37:09 +03002613 list_req = list_first_entry(&ctx->iopoll_list, struct io_kiocb,
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002614 inflight_entry);
Hao Xu915b3dd2021-06-28 05:37:30 +08002615
2616 if (list_req->file != req->file) {
2617 ctx->poll_multi_queue = true;
2618 } else {
2619 queue_num0 = blk_qc_t_to_queue_num(list_req->rw.kiocb.ki_cookie);
2620 queue_num1 = blk_qc_t_to_queue_num(req->rw.kiocb.ki_cookie);
2621 if (queue_num0 != queue_num1)
2622 ctx->poll_multi_queue = true;
2623 }
Jens Axboedef596e2019-01-09 08:59:42 -07002624 }
2625
2626 /*
2627 * For fast devices, IO may have already completed. If it has, add
2628 * it to the front so we find it first.
2629 */
Xiaoguang Wang65a65432020-06-11 23:39:36 +08002630 if (READ_ONCE(req->iopoll_completed))
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002631 list_add(&req->inflight_entry, &ctx->iopoll_list);
Jens Axboedef596e2019-01-09 08:59:42 -07002632 else
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002633 list_add_tail(&req->inflight_entry, &ctx->iopoll_list);
Xiaoguang Wangbdcd3ea2020-02-25 22:12:08 +08002634
Pavel Begunkovcb3d8972021-06-14 02:36:14 +01002635 if (unlikely(in_async)) {
2636 /*
2637 * If IORING_SETUP_SQPOLL is enabled, sqes are either handle
2638 * in sq thread task context or in io worker task context. If
2639 * current task context is sq thread, we don't need to check
2640 * whether should wake up sq thread.
2641 */
2642 if ((ctx->flags & IORING_SETUP_SQPOLL) &&
2643 wq_has_sleeper(&ctx->sq_data->wait))
2644 wake_up(&ctx->sq_data->wait);
2645
2646 mutex_unlock(&ctx->uring_lock);
2647 }
Jens Axboedef596e2019-01-09 08:59:42 -07002648}
2649
Jens Axboe4503b762020-06-01 10:00:27 -06002650static bool io_bdev_nowait(struct block_device *bdev)
2651{
Jeffle Xu9ba0d0c2020-10-19 16:59:42 +08002652 return !bdev || blk_queue_nowait(bdev_get_queue(bdev));
Jens Axboe4503b762020-06-01 10:00:27 -06002653}
2654
Jens Axboe2b188cc2019-01-07 10:46:33 -07002655/*
2656 * If we tracked the file through the SCM inflight mechanism, we could support
2657 * any file. For now, just ensure that anything potentially problematic is done
2658 * inline.
2659 */
Pavel Begunkovb191e2d2021-08-09 13:04:03 +01002660static bool __io_file_supports_nowait(struct file *file, int rw)
Jens Axboe2b188cc2019-01-07 10:46:33 -07002661{
2662 umode_t mode = file_inode(file)->i_mode;
2663
Jens Axboe4503b762020-06-01 10:00:27 -06002664 if (S_ISBLK(mode)) {
Christoph Hellwig4e7b5672020-11-23 13:38:40 +01002665 if (IS_ENABLED(CONFIG_BLOCK) &&
2666 io_bdev_nowait(I_BDEV(file->f_mapping->host)))
Jens Axboe4503b762020-06-01 10:00:27 -06002667 return true;
2668 return false;
2669 }
Pavel Begunkov976517f2021-06-09 12:07:25 +01002670 if (S_ISSOCK(mode))
Jens Axboe2b188cc2019-01-07 10:46:33 -07002671 return true;
Jens Axboe4503b762020-06-01 10:00:27 -06002672 if (S_ISREG(mode)) {
Christoph Hellwig4e7b5672020-11-23 13:38:40 +01002673 if (IS_ENABLED(CONFIG_BLOCK) &&
2674 io_bdev_nowait(file->f_inode->i_sb->s_bdev) &&
Jens Axboe4503b762020-06-01 10:00:27 -06002675 file->f_op != &io_uring_fops)
2676 return true;
2677 return false;
2678 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07002679
Jens Axboec5b85622020-06-09 19:23:05 -06002680 /* any ->read/write should understand O_NONBLOCK */
2681 if (file->f_flags & O_NONBLOCK)
2682 return true;
2683
Jens Axboeaf197f52020-04-28 13:15:06 -06002684 if (!(file->f_mode & FMODE_NOWAIT))
2685 return false;
2686
2687 if (rw == READ)
2688 return file->f_op->read_iter != NULL;
2689
2690 return file->f_op->write_iter != NULL;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002691}
2692
Pavel Begunkovb191e2d2021-08-09 13:04:03 +01002693static bool io_file_supports_nowait(struct io_kiocb *req, int rw)
Jens Axboe7b29f922021-03-12 08:30:14 -07002694{
Pavel Begunkovb191e2d2021-08-09 13:04:03 +01002695 if (rw == READ && (req->flags & REQ_F_NOWAIT_READ))
Jens Axboe7b29f922021-03-12 08:30:14 -07002696 return true;
Pavel Begunkovb191e2d2021-08-09 13:04:03 +01002697 else if (rw == WRITE && (req->flags & REQ_F_NOWAIT_WRITE))
Jens Axboe7b29f922021-03-12 08:30:14 -07002698 return true;
2699
Pavel Begunkovb191e2d2021-08-09 13:04:03 +01002700 return __io_file_supports_nowait(req->file, rw);
Jens Axboe7b29f922021-03-12 08:30:14 -07002701}
2702
Pavel Begunkova88fc402020-09-30 22:57:53 +03002703static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboe2b188cc2019-01-07 10:46:33 -07002704{
Jens Axboedef596e2019-01-09 08:59:42 -07002705 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe9adbd452019-12-20 08:45:55 -07002706 struct kiocb *kiocb = &req->rw.kiocb;
Pavel Begunkov75c668c2021-02-04 13:52:05 +00002707 struct file *file = req->file;
Jens Axboe09bb8392019-03-13 12:39:28 -06002708 unsigned ioprio;
2709 int ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002710
Pavel Begunkovc97d8a02021-08-09 13:04:04 +01002711 if (!io_req_ffs_set(req) && S_ISREG(file_inode(file)->i_mode))
Jens Axboe491381ce2019-10-17 09:20:46 -06002712 req->flags |= REQ_F_ISREG;
2713
Jens Axboe2b188cc2019-01-07 10:46:33 -07002714 kiocb->ki_pos = READ_ONCE(sqe->off);
Pavel Begunkov75c668c2021-02-04 13:52:05 +00002715 if (kiocb->ki_pos == -1 && !(file->f_mode & FMODE_STREAM)) {
Jens Axboeba042912019-12-25 16:33:42 -07002716 req->flags |= REQ_F_CUR_POS;
Pavel Begunkov75c668c2021-02-04 13:52:05 +00002717 kiocb->ki_pos = file->f_pos;
Jens Axboeba042912019-12-25 16:33:42 -07002718 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07002719 kiocb->ki_hint = ki_hint_validate(file_write_hint(kiocb->ki_filp));
Pavel Begunkov3e577dc2020-02-01 03:58:42 +03002720 kiocb->ki_flags = iocb_flags(kiocb->ki_filp);
2721 ret = kiocb_set_rw_flags(kiocb, READ_ONCE(sqe->rw_flags));
2722 if (unlikely(ret))
2723 return ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002724
Pavel Begunkov75c668c2021-02-04 13:52:05 +00002725 /* don't allow async punt for O_NONBLOCK or RWF_NOWAIT */
2726 if ((kiocb->ki_flags & IOCB_NOWAIT) || (file->f_flags & O_NONBLOCK))
2727 req->flags |= REQ_F_NOWAIT;
2728
Jens Axboe2b188cc2019-01-07 10:46:33 -07002729 ioprio = READ_ONCE(sqe->ioprio);
2730 if (ioprio) {
2731 ret = ioprio_check_cap(ioprio);
2732 if (ret)
Jens Axboe09bb8392019-03-13 12:39:28 -06002733 return ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002734
2735 kiocb->ki_ioprio = ioprio;
2736 } else
2737 kiocb->ki_ioprio = get_current_ioprio();
2738
Jens Axboedef596e2019-01-09 08:59:42 -07002739 if (ctx->flags & IORING_SETUP_IOPOLL) {
Jens Axboedef596e2019-01-09 08:59:42 -07002740 if (!(kiocb->ki_flags & IOCB_DIRECT) ||
2741 !kiocb->ki_filp->f_op->iopoll)
Jens Axboe09bb8392019-03-13 12:39:28 -06002742 return -EOPNOTSUPP;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002743
Jens Axboedef596e2019-01-09 08:59:42 -07002744 kiocb->ki_flags |= IOCB_HIPRI;
2745 kiocb->ki_complete = io_complete_rw_iopoll;
Xiaoguang Wang65a65432020-06-11 23:39:36 +08002746 req->iopoll_completed = 0;
Jens Axboedef596e2019-01-09 08:59:42 -07002747 } else {
Jens Axboe09bb8392019-03-13 12:39:28 -06002748 if (kiocb->ki_flags & IOCB_HIPRI)
2749 return -EINVAL;
Jens Axboedef596e2019-01-09 08:59:42 -07002750 kiocb->ki_complete = io_complete_rw;
2751 }
Jens Axboe9adbd452019-12-20 08:45:55 -07002752
Pavel Begunkoveae071c2021-04-25 14:32:24 +01002753 if (req->opcode == IORING_OP_READ_FIXED ||
2754 req->opcode == IORING_OP_WRITE_FIXED) {
2755 req->imu = NULL;
2756 io_req_set_rsrc_node(req);
2757 }
2758
Jens Axboe3529d8c2019-12-19 18:24:38 -07002759 req->rw.addr = READ_ONCE(sqe->addr);
2760 req->rw.len = READ_ONCE(sqe->len);
Bijan Mottahedeh4f4eeba2020-05-19 14:52:49 -07002761 req->buf_index = READ_ONCE(sqe->buf_index);
Jens Axboe2b188cc2019-01-07 10:46:33 -07002762 return 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002763}
2764
2765static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
2766{
2767 switch (ret) {
2768 case -EIOCBQUEUED:
2769 break;
2770 case -ERESTARTSYS:
2771 case -ERESTARTNOINTR:
2772 case -ERESTARTNOHAND:
2773 case -ERESTART_RESTARTBLOCK:
2774 /*
2775 * We can't just restart the syscall, since previously
2776 * submitted sqes may already be in progress. Just fail this
2777 * IO with EINTR.
2778 */
2779 ret = -EINTR;
Gustavo A. R. Silvadf561f662020-08-23 17:36:59 -05002780 fallthrough;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002781 default:
2782 kiocb->ki_complete(kiocb, ret, 0);
2783 }
2784}
2785
Jens Axboea1d7c392020-06-22 11:09:46 -06002786static void kiocb_done(struct kiocb *kiocb, ssize_t ret,
Pavel Begunkov889fca72021-02-10 00:03:09 +00002787 unsigned int issue_flags)
Jens Axboeba816ad2019-09-28 11:36:45 -06002788{
Jens Axboeba042912019-12-25 16:33:42 -07002789 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
Jens Axboee8c2bc12020-08-15 18:44:09 -07002790 struct io_async_rw *io = req->async_data;
Pavel Begunkov97284632021-04-08 19:28:03 +01002791 bool check_reissue = kiocb->ki_complete == io_complete_rw;
Jens Axboeba042912019-12-25 16:33:42 -07002792
Jens Axboe227c0c92020-08-13 11:51:40 -06002793 /* add previously done IO, if any */
Jens Axboee8c2bc12020-08-15 18:44:09 -07002794 if (io && io->bytes_done > 0) {
Jens Axboe227c0c92020-08-13 11:51:40 -06002795 if (ret < 0)
Jens Axboee8c2bc12020-08-15 18:44:09 -07002796 ret = io->bytes_done;
Jens Axboe227c0c92020-08-13 11:51:40 -06002797 else
Jens Axboee8c2bc12020-08-15 18:44:09 -07002798 ret += io->bytes_done;
Jens Axboe227c0c92020-08-13 11:51:40 -06002799 }
2800
Jens Axboeba042912019-12-25 16:33:42 -07002801 if (req->flags & REQ_F_CUR_POS)
2802 req->file->f_pos = kiocb->ki_pos;
Hao Xue149bd742021-06-28 05:48:05 +08002803 if (ret >= 0 && check_reissue)
Pavel Begunkov889fca72021-02-10 00:03:09 +00002804 __io_complete_rw(req, ret, 0, issue_flags);
Jens Axboeba816ad2019-09-28 11:36:45 -06002805 else
2806 io_rw_done(kiocb, ret);
Pavel Begunkov97284632021-04-08 19:28:03 +01002807
Pavel Begunkovfe7e3252021-06-24 15:09:57 +01002808 if (check_reissue && (req->flags & REQ_F_REISSUE)) {
Pavel Begunkov97284632021-04-08 19:28:03 +01002809 req->flags &= ~REQ_F_REISSUE;
Jens Axboea7be7c22021-04-15 11:31:14 -06002810 if (io_resubmit_prep(req)) {
Jens Axboe773af692021-07-27 10:25:55 -06002811 io_req_task_queue_reissue(req);
Pavel Begunkov8c130822021-03-22 01:58:32 +00002812 } else {
Pavel Begunkov97284632021-04-08 19:28:03 +01002813 int cflags = 0;
2814
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01002815 req_set_fail(req);
Pavel Begunkov97284632021-04-08 19:28:03 +01002816 if (req->flags & REQ_F_BUFFER_SELECTED)
2817 cflags = io_put_rw_kbuf(req);
2818 __io_req_complete(req, issue_flags, ret, cflags);
2819 }
2820 }
Jens Axboeba816ad2019-09-28 11:36:45 -06002821}
2822
Pavel Begunkoveae071c2021-04-25 14:32:24 +01002823static int __io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter,
2824 struct io_mapped_ubuf *imu)
Jens Axboeedafcce2019-01-09 09:16:05 -07002825{
Jens Axboe9adbd452019-12-20 08:45:55 -07002826 size_t len = req->rw.len;
Pavel Begunkov75769e32021-04-01 15:43:54 +01002827 u64 buf_end, buf_addr = req->rw.addr;
Jens Axboeedafcce2019-01-09 09:16:05 -07002828 size_t offset;
Jens Axboeedafcce2019-01-09 09:16:05 -07002829
Pavel Begunkov75769e32021-04-01 15:43:54 +01002830 if (unlikely(check_add_overflow(buf_addr, (u64)len, &buf_end)))
Jens Axboeedafcce2019-01-09 09:16:05 -07002831 return -EFAULT;
2832 /* not inside the mapped region */
Pavel Begunkov4751f532021-04-01 15:43:55 +01002833 if (unlikely(buf_addr < imu->ubuf || buf_end > imu->ubuf_end))
Jens Axboeedafcce2019-01-09 09:16:05 -07002834 return -EFAULT;
2835
2836 /*
2837 * May not be a start of buffer, set size appropriately
2838 * and advance us to the beginning.
2839 */
2840 offset = buf_addr - imu->ubuf;
2841 iov_iter_bvec(iter, rw, imu->bvec, imu->nr_bvecs, offset + len);
Jens Axboebd11b3a2019-07-20 08:37:31 -06002842
2843 if (offset) {
2844 /*
2845 * Don't use iov_iter_advance() here, as it's really slow for
2846 * using the latter parts of a big fixed buffer - it iterates
2847 * over each segment manually. We can cheat a bit here, because
2848 * we know that:
2849 *
2850 * 1) it's a BVEC iter, we set it up
2851 * 2) all bvecs are PAGE_SIZE in size, except potentially the
2852 * first and last bvec
2853 *
2854 * So just find our index, and adjust the iterator afterwards.
2855 * If the offset is within the first bvec (or the whole first
2856 * bvec, just use iov_iter_advance(). This makes it easier
2857 * since we can just skip the first segment, which may not
2858 * be PAGE_SIZE aligned.
2859 */
2860 const struct bio_vec *bvec = imu->bvec;
2861
2862 if (offset <= bvec->bv_len) {
2863 iov_iter_advance(iter, offset);
2864 } else {
2865 unsigned long seg_skip;
2866
2867 /* skip first vec */
2868 offset -= bvec->bv_len;
2869 seg_skip = 1 + (offset >> PAGE_SHIFT);
2870
2871 iter->bvec = bvec + seg_skip;
2872 iter->nr_segs -= seg_skip;
Aleix Roca Nonell99c79f62019-08-15 14:03:22 +02002873 iter->count -= bvec->bv_len + offset;
Jens Axboebd11b3a2019-07-20 08:37:31 -06002874 iter->iov_offset = offset & ~PAGE_MASK;
Jens Axboebd11b3a2019-07-20 08:37:31 -06002875 }
2876 }
2877
Pavel Begunkov847595d2021-02-04 13:52:06 +00002878 return 0;
Jens Axboeedafcce2019-01-09 09:16:05 -07002879}
2880
Pavel Begunkoveae071c2021-04-25 14:32:24 +01002881static int io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter)
2882{
2883 struct io_ring_ctx *ctx = req->ctx;
2884 struct io_mapped_ubuf *imu = req->imu;
2885 u16 index, buf_index = req->buf_index;
2886
2887 if (likely(!imu)) {
2888 if (unlikely(buf_index >= ctx->nr_user_bufs))
2889 return -EFAULT;
2890 index = array_index_nospec(buf_index, ctx->nr_user_bufs);
2891 imu = READ_ONCE(ctx->user_bufs[index]);
2892 req->imu = imu;
2893 }
2894 return __io_import_fixed(req, rw, iter, imu);
2895}
2896
Jens Axboebcda7ba2020-02-23 16:42:51 -07002897static void io_ring_submit_unlock(struct io_ring_ctx *ctx, bool needs_lock)
2898{
2899 if (needs_lock)
2900 mutex_unlock(&ctx->uring_lock);
2901}
2902
2903static void io_ring_submit_lock(struct io_ring_ctx *ctx, bool needs_lock)
2904{
2905 /*
2906 * "Normal" inline submissions always hold the uring_lock, since we
2907 * grab it from the system call. Same is true for the SQPOLL offload.
2908 * The only exception is when we've detached the request and issue it
2909 * from an async worker thread, grab the lock for that case.
2910 */
2911 if (needs_lock)
2912 mutex_lock(&ctx->uring_lock);
2913}
2914
2915static struct io_buffer *io_buffer_select(struct io_kiocb *req, size_t *len,
2916 int bgid, struct io_buffer *kbuf,
2917 bool needs_lock)
2918{
2919 struct io_buffer *head;
2920
2921 if (req->flags & REQ_F_BUFFER_SELECTED)
2922 return kbuf;
2923
2924 io_ring_submit_lock(req->ctx, needs_lock);
2925
2926 lockdep_assert_held(&req->ctx->uring_lock);
2927
Jens Axboe9e15c3a2021-03-13 12:29:43 -07002928 head = xa_load(&req->ctx->io_buffers, bgid);
Jens Axboebcda7ba2020-02-23 16:42:51 -07002929 if (head) {
2930 if (!list_empty(&head->list)) {
2931 kbuf = list_last_entry(&head->list, struct io_buffer,
2932 list);
2933 list_del(&kbuf->list);
2934 } else {
2935 kbuf = head;
Jens Axboe9e15c3a2021-03-13 12:29:43 -07002936 xa_erase(&req->ctx->io_buffers, bgid);
Jens Axboebcda7ba2020-02-23 16:42:51 -07002937 }
2938 if (*len > kbuf->len)
2939 *len = kbuf->len;
2940 } else {
2941 kbuf = ERR_PTR(-ENOBUFS);
2942 }
2943
2944 io_ring_submit_unlock(req->ctx, needs_lock);
2945
2946 return kbuf;
2947}
2948
Jens Axboe4d954c22020-02-27 07:31:19 -07002949static void __user *io_rw_buffer_select(struct io_kiocb *req, size_t *len,
2950 bool needs_lock)
2951{
2952 struct io_buffer *kbuf;
Bijan Mottahedeh4f4eeba2020-05-19 14:52:49 -07002953 u16 bgid;
Jens Axboe4d954c22020-02-27 07:31:19 -07002954
2955 kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
Bijan Mottahedeh4f4eeba2020-05-19 14:52:49 -07002956 bgid = req->buf_index;
Jens Axboe4d954c22020-02-27 07:31:19 -07002957 kbuf = io_buffer_select(req, len, bgid, kbuf, needs_lock);
2958 if (IS_ERR(kbuf))
2959 return kbuf;
2960 req->rw.addr = (u64) (unsigned long) kbuf;
2961 req->flags |= REQ_F_BUFFER_SELECTED;
2962 return u64_to_user_ptr(kbuf->addr);
2963}
2964
2965#ifdef CONFIG_COMPAT
2966static ssize_t io_compat_import(struct io_kiocb *req, struct iovec *iov,
2967 bool needs_lock)
2968{
2969 struct compat_iovec __user *uiov;
2970 compat_ssize_t clen;
2971 void __user *buf;
2972 ssize_t len;
2973
2974 uiov = u64_to_user_ptr(req->rw.addr);
2975 if (!access_ok(uiov, sizeof(*uiov)))
2976 return -EFAULT;
2977 if (__get_user(clen, &uiov->iov_len))
2978 return -EFAULT;
2979 if (clen < 0)
2980 return -EINVAL;
2981
2982 len = clen;
2983 buf = io_rw_buffer_select(req, &len, needs_lock);
2984 if (IS_ERR(buf))
2985 return PTR_ERR(buf);
2986 iov[0].iov_base = buf;
2987 iov[0].iov_len = (compat_size_t) len;
2988 return 0;
2989}
2990#endif
2991
2992static ssize_t __io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
2993 bool needs_lock)
2994{
2995 struct iovec __user *uiov = u64_to_user_ptr(req->rw.addr);
2996 void __user *buf;
2997 ssize_t len;
2998
2999 if (copy_from_user(iov, uiov, sizeof(*uiov)))
3000 return -EFAULT;
3001
3002 len = iov[0].iov_len;
3003 if (len < 0)
3004 return -EINVAL;
3005 buf = io_rw_buffer_select(req, &len, needs_lock);
3006 if (IS_ERR(buf))
3007 return PTR_ERR(buf);
3008 iov[0].iov_base = buf;
3009 iov[0].iov_len = len;
3010 return 0;
3011}
3012
3013static ssize_t io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
3014 bool needs_lock)
3015{
Jens Axboedddb3e22020-06-04 11:27:01 -06003016 if (req->flags & REQ_F_BUFFER_SELECTED) {
3017 struct io_buffer *kbuf;
3018
3019 kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
3020 iov[0].iov_base = u64_to_user_ptr(kbuf->addr);
3021 iov[0].iov_len = kbuf->len;
Jens Axboe4d954c22020-02-27 07:31:19 -07003022 return 0;
Jens Axboedddb3e22020-06-04 11:27:01 -06003023 }
Pavel Begunkovdd201662020-12-19 03:15:43 +00003024 if (req->rw.len != 1)
Jens Axboe4d954c22020-02-27 07:31:19 -07003025 return -EINVAL;
3026
3027#ifdef CONFIG_COMPAT
3028 if (req->ctx->compat)
3029 return io_compat_import(req, iov, needs_lock);
3030#endif
3031
3032 return __io_iov_buffer_select(req, iov, needs_lock);
3033}
3034
Pavel Begunkov847595d2021-02-04 13:52:06 +00003035static int io_import_iovec(int rw, struct io_kiocb *req, struct iovec **iovec,
3036 struct iov_iter *iter, bool needs_lock)
Jens Axboe2b188cc2019-01-07 10:46:33 -07003037{
Jens Axboe9adbd452019-12-20 08:45:55 -07003038 void __user *buf = u64_to_user_ptr(req->rw.addr);
3039 size_t sqe_len = req->rw.len;
Pavel Begunkov847595d2021-02-04 13:52:06 +00003040 u8 opcode = req->opcode;
Jens Axboe4d954c22020-02-27 07:31:19 -07003041 ssize_t ret;
Jens Axboeedafcce2019-01-09 09:16:05 -07003042
Pavel Begunkov7d009162019-11-25 23:14:40 +03003043 if (opcode == IORING_OP_READ_FIXED || opcode == IORING_OP_WRITE_FIXED) {
Jens Axboeedafcce2019-01-09 09:16:05 -07003044 *iovec = NULL;
Jens Axboe9adbd452019-12-20 08:45:55 -07003045 return io_import_fixed(req, rw, iter);
Jens Axboeedafcce2019-01-09 09:16:05 -07003046 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07003047
Jens Axboebcda7ba2020-02-23 16:42:51 -07003048 /* buffer index only valid with fixed read/write, or buffer select */
Bijan Mottahedeh4f4eeba2020-05-19 14:52:49 -07003049 if (req->buf_index && !(req->flags & REQ_F_BUFFER_SELECT))
Jens Axboe9adbd452019-12-20 08:45:55 -07003050 return -EINVAL;
3051
Jens Axboe3a6820f2019-12-22 15:19:35 -07003052 if (opcode == IORING_OP_READ || opcode == IORING_OP_WRITE) {
Jens Axboebcda7ba2020-02-23 16:42:51 -07003053 if (req->flags & REQ_F_BUFFER_SELECT) {
Jens Axboe4d954c22020-02-27 07:31:19 -07003054 buf = io_rw_buffer_select(req, &sqe_len, needs_lock);
Pavel Begunkov867a23e2020-08-20 11:34:39 +03003055 if (IS_ERR(buf))
Jens Axboe4d954c22020-02-27 07:31:19 -07003056 return PTR_ERR(buf);
Jens Axboe3f9d6442020-03-11 12:27:04 -06003057 req->rw.len = sqe_len;
Jens Axboebcda7ba2020-02-23 16:42:51 -07003058 }
3059
Jens Axboe3a6820f2019-12-22 15:19:35 -07003060 ret = import_single_range(rw, buf, sqe_len, *iovec, iter);
3061 *iovec = NULL;
David Laight10fc72e2020-11-07 13:16:25 +00003062 return ret;
Jens Axboe3a6820f2019-12-22 15:19:35 -07003063 }
3064
Jens Axboe4d954c22020-02-27 07:31:19 -07003065 if (req->flags & REQ_F_BUFFER_SELECT) {
3066 ret = io_iov_buffer_select(req, *iovec, needs_lock);
Pavel Begunkov847595d2021-02-04 13:52:06 +00003067 if (!ret)
3068 iov_iter_init(iter, rw, *iovec, 1, (*iovec)->iov_len);
Jens Axboe4d954c22020-02-27 07:31:19 -07003069 *iovec = NULL;
3070 return ret;
3071 }
3072
Christoph Hellwig89cd35c2020-09-25 06:51:41 +02003073 return __import_iovec(rw, buf, sqe_len, UIO_FASTIOV, iovec, iter,
3074 req->ctx->compat);
Jens Axboe2b188cc2019-01-07 10:46:33 -07003075}
3076
Jens Axboe0fef9482020-08-26 10:36:20 -06003077static inline loff_t *io_kiocb_ppos(struct kiocb *kiocb)
3078{
Pavel Begunkov5b09e372020-09-30 22:57:15 +03003079 return (kiocb->ki_filp->f_mode & FMODE_STREAM) ? NULL : &kiocb->ki_pos;
Jens Axboe0fef9482020-08-26 10:36:20 -06003080}
3081
Jens Axboe32960612019-09-23 11:05:34 -06003082/*
3083 * For files that don't have ->read_iter() and ->write_iter(), handle them
3084 * by looping over ->read() or ->write() manually.
3085 */
Jens Axboe4017eb92020-10-22 14:14:12 -06003086static ssize_t loop_rw_iter(int rw, struct io_kiocb *req, struct iov_iter *iter)
Jens Axboe32960612019-09-23 11:05:34 -06003087{
Jens Axboe4017eb92020-10-22 14:14:12 -06003088 struct kiocb *kiocb = &req->rw.kiocb;
3089 struct file *file = req->file;
Jens Axboe32960612019-09-23 11:05:34 -06003090 ssize_t ret = 0;
3091
3092 /*
3093 * Don't support polled IO through this interface, and we can't
3094 * support non-blocking either. For the latter, this just causes
3095 * the kiocb to be handled from an async context.
3096 */
3097 if (kiocb->ki_flags & IOCB_HIPRI)
3098 return -EOPNOTSUPP;
3099 if (kiocb->ki_flags & IOCB_NOWAIT)
3100 return -EAGAIN;
3101
3102 while (iov_iter_count(iter)) {
Pavel Begunkov311ae9e2019-11-24 11:58:24 +03003103 struct iovec iovec;
Jens Axboe32960612019-09-23 11:05:34 -06003104 ssize_t nr;
3105
Pavel Begunkov311ae9e2019-11-24 11:58:24 +03003106 if (!iov_iter_is_bvec(iter)) {
3107 iovec = iov_iter_iovec(iter);
3108 } else {
Jens Axboe4017eb92020-10-22 14:14:12 -06003109 iovec.iov_base = u64_to_user_ptr(req->rw.addr);
3110 iovec.iov_len = req->rw.len;
Pavel Begunkov311ae9e2019-11-24 11:58:24 +03003111 }
3112
Jens Axboe32960612019-09-23 11:05:34 -06003113 if (rw == READ) {
3114 nr = file->f_op->read(file, iovec.iov_base,
Jens Axboe0fef9482020-08-26 10:36:20 -06003115 iovec.iov_len, io_kiocb_ppos(kiocb));
Jens Axboe32960612019-09-23 11:05:34 -06003116 } else {
3117 nr = file->f_op->write(file, iovec.iov_base,
Jens Axboe0fef9482020-08-26 10:36:20 -06003118 iovec.iov_len, io_kiocb_ppos(kiocb));
Jens Axboe32960612019-09-23 11:05:34 -06003119 }
3120
3121 if (nr < 0) {
3122 if (!ret)
3123 ret = nr;
3124 break;
3125 }
3126 ret += nr;
3127 if (nr != iovec.iov_len)
3128 break;
Jens Axboe4017eb92020-10-22 14:14:12 -06003129 req->rw.len -= nr;
3130 req->rw.addr += nr;
Jens Axboe32960612019-09-23 11:05:34 -06003131 iov_iter_advance(iter, nr);
3132 }
3133
3134 return ret;
3135}
3136
Jens Axboeff6165b2020-08-13 09:47:43 -06003137static void io_req_map_rw(struct io_kiocb *req, const struct iovec *iovec,
3138 const struct iovec *fast_iov, struct iov_iter *iter)
Jens Axboef67676d2019-12-02 11:03:47 -07003139{
Jens Axboee8c2bc12020-08-15 18:44:09 -07003140 struct io_async_rw *rw = req->async_data;
Pavel Begunkovb64e3442020-07-13 22:59:18 +03003141
Jens Axboeff6165b2020-08-13 09:47:43 -06003142 memcpy(&rw->iter, iter, sizeof(*iter));
Pavel Begunkovafb87652020-09-06 00:45:46 +03003143 rw->free_iovec = iovec;
Jens Axboe227c0c92020-08-13 11:51:40 -06003144 rw->bytes_done = 0;
Jens Axboeff6165b2020-08-13 09:47:43 -06003145 /* can only be fixed buffers, no need to do anything */
Pavel Begunkov9c3a2052020-11-23 23:20:27 +00003146 if (iov_iter_is_bvec(iter))
Jens Axboeff6165b2020-08-13 09:47:43 -06003147 return;
Pavel Begunkovb64e3442020-07-13 22:59:18 +03003148 if (!iovec) {
Jens Axboeff6165b2020-08-13 09:47:43 -06003149 unsigned iov_off = 0;
3150
3151 rw->iter.iov = rw->fast_iov;
3152 if (iter->iov != fast_iov) {
3153 iov_off = iter->iov - fast_iov;
3154 rw->iter.iov += iov_off;
3155 }
3156 if (rw->fast_iov != fast_iov)
3157 memcpy(rw->fast_iov + iov_off, fast_iov + iov_off,
Xiaoguang Wang45097da2020-04-08 22:29:58 +08003158 sizeof(struct iovec) * iter->nr_segs);
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03003159 } else {
3160 req->flags |= REQ_F_NEED_CLEANUP;
Jens Axboef67676d2019-12-02 11:03:47 -07003161 }
3162}
3163
Pavel Begunkov6cb78682021-02-28 22:35:17 +00003164static inline int io_alloc_async_data(struct io_kiocb *req)
Xiaoguang Wang3d9932a2020-03-27 15:36:52 +08003165{
Jens Axboee8c2bc12020-08-15 18:44:09 -07003166 WARN_ON_ONCE(!io_op_defs[req->opcode].async_size);
3167 req->async_data = kmalloc(io_op_defs[req->opcode].async_size, GFP_KERNEL);
3168 return req->async_data == NULL;
Xiaoguang Wang3d9932a2020-03-27 15:36:52 +08003169}
3170
Jens Axboeff6165b2020-08-13 09:47:43 -06003171static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec,
3172 const struct iovec *fast_iov,
Jens Axboe227c0c92020-08-13 11:51:40 -06003173 struct iov_iter *iter, bool force)
Jens Axboeb7bb4f72019-12-15 22:13:43 -07003174{
Pavel Begunkov26f05052021-02-28 22:35:18 +00003175 if (!force && !io_op_defs[req->opcode].needs_async_setup)
Jens Axboe74566df2020-01-13 19:23:24 -07003176 return 0;
Jens Axboee8c2bc12020-08-15 18:44:09 -07003177 if (!req->async_data) {
Pavel Begunkov6cb78682021-02-28 22:35:17 +00003178 if (io_alloc_async_data(req)) {
Pavel Begunkov6bf985d2021-02-04 13:52:01 +00003179 kfree(iovec);
Jens Axboe5d204bc2020-01-31 12:06:52 -07003180 return -ENOMEM;
Pavel Begunkov6bf985d2021-02-04 13:52:01 +00003181 }
Jens Axboeb7bb4f72019-12-15 22:13:43 -07003182
Jens Axboeff6165b2020-08-13 09:47:43 -06003183 io_req_map_rw(req, iovec, fast_iov, iter);
Jens Axboe5d204bc2020-01-31 12:06:52 -07003184 }
Jens Axboeb7bb4f72019-12-15 22:13:43 -07003185 return 0;
Jens Axboef67676d2019-12-02 11:03:47 -07003186}
3187
Pavel Begunkov73debe62020-09-30 22:57:54 +03003188static inline int io_rw_prep_async(struct io_kiocb *req, int rw)
Pavel Begunkovc3e330a2020-07-13 22:59:19 +03003189{
Jens Axboee8c2bc12020-08-15 18:44:09 -07003190 struct io_async_rw *iorw = req->async_data;
Pavel Begunkovf4bff102020-09-06 00:45:45 +03003191 struct iovec *iov = iorw->fast_iov;
Pavel Begunkov847595d2021-02-04 13:52:06 +00003192 int ret;
Pavel Begunkovc3e330a2020-07-13 22:59:19 +03003193
Pavel Begunkov2846c482020-11-07 13:16:27 +00003194 ret = io_import_iovec(rw, req, &iov, &iorw->iter, false);
Pavel Begunkovc3e330a2020-07-13 22:59:19 +03003195 if (unlikely(ret < 0))
3196 return ret;
3197
Pavel Begunkovab0b1962020-09-06 00:45:47 +03003198 iorw->bytes_done = 0;
3199 iorw->free_iovec = iov;
3200 if (iov)
3201 req->flags |= REQ_F_NEED_CLEANUP;
Pavel Begunkovc3e330a2020-07-13 22:59:19 +03003202 return 0;
3203}
3204
Pavel Begunkov73debe62020-09-30 22:57:54 +03003205static int io_read_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboef67676d2019-12-02 11:03:47 -07003206{
Jens Axboe3529d8c2019-12-19 18:24:38 -07003207 if (unlikely(!(req->file->f_mode & FMODE_READ)))
3208 return -EBADF;
Pavel Begunkov93642ef2021-02-18 18:29:44 +00003209 return io_prep_rw(req, sqe);
Jens Axboef67676d2019-12-02 11:03:47 -07003210}
3211
Jens Axboec1dd91d2020-08-03 16:43:59 -06003212/*
3213 * This is our waitqueue callback handler, registered through lock_page_async()
3214 * when we initially tried to do the IO with the iocb armed our waitqueue.
3215 * This gets called when the page is unlocked, and we generally expect that to
3216 * happen when the page IO is completed and the page is now uptodate. This will
3217 * queue a task_work based retry of the operation, attempting to copy the data
3218 * again. If the latter fails because the page was NOT uptodate, then we will
3219 * do a thread based blocking retry of the operation. That's the unexpected
3220 * slow path.
3221 */
Jens Axboebcf5a062020-05-22 09:24:42 -06003222static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode,
3223 int sync, void *arg)
3224{
3225 struct wait_page_queue *wpq;
3226 struct io_kiocb *req = wait->private;
Jens Axboebcf5a062020-05-22 09:24:42 -06003227 struct wait_page_key *key = arg;
Jens Axboebcf5a062020-05-22 09:24:42 -06003228
3229 wpq = container_of(wait, struct wait_page_queue, wait);
3230
Linus Torvaldscdc8fcb2020-08-03 13:01:22 -07003231 if (!wake_page_match(wpq, key))
3232 return 0;
3233
Hao Xuc8d317a2020-09-29 20:00:45 +08003234 req->rw.kiocb.ki_flags &= ~IOCB_WAITQ;
Jens Axboebcf5a062020-05-22 09:24:42 -06003235 list_del_init(&wait->entry);
Pavel Begunkov921b9052021-02-12 03:23:53 +00003236 io_req_task_queue(req);
Jens Axboebcf5a062020-05-22 09:24:42 -06003237 return 1;
3238}
3239
Jens Axboec1dd91d2020-08-03 16:43:59 -06003240/*
3241 * This controls whether a given IO request should be armed for async page
3242 * based retry. If we return false here, the request is handed to the async
3243 * worker threads for retry. If we're doing buffered reads on a regular file,
3244 * we prepare a private wait_page_queue entry and retry the operation. This
3245 * will either succeed because the page is now uptodate and unlocked, or it
3246 * will register a callback when the page is unlocked at IO completion. Through
3247 * that callback, io_uring uses task_work to setup a retry of the operation.
3248 * That retry will attempt the buffered read again. The retry will generally
3249 * succeed, or in rare cases where it fails, we then fall back to using the
3250 * async worker threads for a blocking retry.
3251 */
Jens Axboe227c0c92020-08-13 11:51:40 -06003252static bool io_rw_should_retry(struct io_kiocb *req)
Jens Axboebcf5a062020-05-22 09:24:42 -06003253{
Jens Axboee8c2bc12020-08-15 18:44:09 -07003254 struct io_async_rw *rw = req->async_data;
3255 struct wait_page_queue *wait = &rw->wpq;
Jens Axboebcf5a062020-05-22 09:24:42 -06003256 struct kiocb *kiocb = &req->rw.kiocb;
Jens Axboebcf5a062020-05-22 09:24:42 -06003257
3258 /* never retry for NOWAIT, we just complete with -EAGAIN */
3259 if (req->flags & REQ_F_NOWAIT)
3260 return false;
3261
Jens Axboe227c0c92020-08-13 11:51:40 -06003262 /* Only for buffered IO */
Jens Axboe3b2a4432020-08-16 10:58:43 -07003263 if (kiocb->ki_flags & (IOCB_DIRECT | IOCB_HIPRI))
Jens Axboebcf5a062020-05-22 09:24:42 -06003264 return false;
Jens Axboe3b2a4432020-08-16 10:58:43 -07003265
Jens Axboebcf5a062020-05-22 09:24:42 -06003266 /*
3267 * just use poll if we can, and don't attempt if the fs doesn't
3268 * support callback based unlocks
3269 */
3270 if (file_can_poll(req->file) || !(req->file->f_mode & FMODE_BUF_RASYNC))
3271 return false;
3272
Jens Axboe3b2a4432020-08-16 10:58:43 -07003273 wait->wait.func = io_async_buf_func;
3274 wait->wait.private = req;
3275 wait->wait.flags = 0;
3276 INIT_LIST_HEAD(&wait->wait.entry);
3277 kiocb->ki_flags |= IOCB_WAITQ;
Hao Xuc8d317a2020-09-29 20:00:45 +08003278 kiocb->ki_flags &= ~IOCB_NOWAIT;
Jens Axboe3b2a4432020-08-16 10:58:43 -07003279 kiocb->ki_waitq = wait;
Jens Axboe3b2a4432020-08-16 10:58:43 -07003280 return true;
Jens Axboebcf5a062020-05-22 09:24:42 -06003281}
3282
Pavel Begunkovaeab9502021-06-14 02:36:24 +01003283static inline int io_iter_do_read(struct io_kiocb *req, struct iov_iter *iter)
Jens Axboebcf5a062020-05-22 09:24:42 -06003284{
3285 if (req->file->f_op->read_iter)
3286 return call_read_iter(req->file, &req->rw.kiocb, iter);
Guoyu Huang2dd21112020-08-05 03:53:50 -07003287 else if (req->file->f_op->read)
Jens Axboe4017eb92020-10-22 14:14:12 -06003288 return loop_rw_iter(READ, req, iter);
Guoyu Huang2dd21112020-08-05 03:53:50 -07003289 else
3290 return -EINVAL;
Jens Axboebcf5a062020-05-22 09:24:42 -06003291}
3292
Pavel Begunkov889fca72021-02-10 00:03:09 +00003293static int io_read(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe2b188cc2019-01-07 10:46:33 -07003294{
3295 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
Jens Axboe9adbd452019-12-20 08:45:55 -07003296 struct kiocb *kiocb = &req->rw.kiocb;
Jens Axboeff6165b2020-08-13 09:47:43 -06003297 struct iov_iter __iter, *iter = &__iter;
Jens Axboee8c2bc12020-08-15 18:44:09 -07003298 struct io_async_rw *rw = req->async_data;
Jens Axboe227c0c92020-08-13 11:51:40 -06003299 ssize_t io_size, ret, ret2;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003300 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003301
Pavel Begunkov2846c482020-11-07 13:16:27 +00003302 if (rw) {
Jens Axboee8c2bc12020-08-15 18:44:09 -07003303 iter = &rw->iter;
Pavel Begunkov2846c482020-11-07 13:16:27 +00003304 iovec = NULL;
3305 } else {
3306 ret = io_import_iovec(READ, req, &iovec, iter, !force_nonblock);
3307 if (ret < 0)
3308 return ret;
3309 }
Pavel Begunkov632546c2020-11-07 13:16:26 +00003310 io_size = iov_iter_count(iter);
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003311 req->result = io_size;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003312
Jens Axboefd6c2e42019-12-18 12:19:41 -07003313 /* Ensure we clear previously set non-block flag */
3314 if (!force_nonblock)
Jens Axboe29de5f62020-02-20 09:56:08 -07003315 kiocb->ki_flags &= ~IOCB_NOWAIT;
Pavel Begunkova88fc402020-09-30 22:57:53 +03003316 else
3317 kiocb->ki_flags |= IOCB_NOWAIT;
3318
Pavel Begunkov24c74672020-06-21 13:09:51 +03003319 /* If the file doesn't support async, just async punt */
Pavel Begunkovb191e2d2021-08-09 13:04:03 +01003320 if (force_nonblock && !io_file_supports_nowait(req, READ)) {
Pavel Begunkov6713e7a2021-02-04 13:51:59 +00003321 ret = io_setup_async_rw(req, iovec, inline_vecs, iter, true);
Pavel Begunkov6bf985d2021-02-04 13:52:01 +00003322 return ret ?: -EAGAIN;
Pavel Begunkov6713e7a2021-02-04 13:51:59 +00003323 }
Jens Axboe9e645e112019-05-10 16:07:28 -06003324
Pavel Begunkov632546c2020-11-07 13:16:26 +00003325 ret = rw_verify_area(READ, req->file, io_kiocb_ppos(kiocb), io_size);
Pavel Begunkov5ea5dd42021-02-04 13:52:03 +00003326 if (unlikely(ret)) {
3327 kfree(iovec);
3328 return ret;
3329 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07003330
Jens Axboe227c0c92020-08-13 11:51:40 -06003331 ret = io_iter_do_read(req, iter);
Jens Axboe32960612019-09-23 11:05:34 -06003332
Jens Axboe230d50d2021-04-01 20:41:15 -06003333 if (ret == -EAGAIN || (req->flags & REQ_F_REISSUE)) {
Pavel Begunkov6ad7f232021-04-08 01:54:39 +01003334 req->flags &= ~REQ_F_REISSUE;
Jens Axboeeefdf302020-08-27 16:40:19 -06003335 /* IOPOLL retry should happen for io-wq threads */
3336 if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboef91daf52020-08-15 15:58:42 -07003337 goto done;
Pavel Begunkov75c668c2021-02-04 13:52:05 +00003338 /* no retry on NONBLOCK nor RWF_NOWAIT */
3339 if (req->flags & REQ_F_NOWAIT)
Jens Axboe355afae2020-09-02 09:30:31 -06003340 goto done;
Jens Axboe84216312020-08-24 11:45:26 -06003341 /* some cases will consume bytes even on error returns */
Pavel Begunkov632546c2020-11-07 13:16:26 +00003342 iov_iter_revert(iter, io_size - iov_iter_count(iter));
Jens Axboef38c7e32020-09-25 15:23:43 -06003343 ret = 0;
Jens Axboe230d50d2021-04-01 20:41:15 -06003344 } else if (ret == -EIOCBQUEUED) {
3345 goto out_free;
Pavel Begunkov7335e3b2021-02-04 13:52:02 +00003346 } else if (ret <= 0 || ret == io_size || !force_nonblock ||
Pavel Begunkov75c668c2021-02-04 13:52:05 +00003347 (req->flags & REQ_F_NOWAIT) || !(req->flags & REQ_F_ISREG)) {
Pavel Begunkov7335e3b2021-02-04 13:52:02 +00003348 /* read all, failed, already did sync or don't want to retry */
Jens Axboe00d23d52020-08-25 12:59:22 -06003349 goto done;
Jens Axboe227c0c92020-08-13 11:51:40 -06003350 }
3351
Jens Axboe227c0c92020-08-13 11:51:40 -06003352 ret2 = io_setup_async_rw(req, iovec, inline_vecs, iter, true);
Pavel Begunkov6bf985d2021-02-04 13:52:01 +00003353 if (ret2)
3354 return ret2;
3355
Pavel Begunkovfe1cdd52021-02-17 21:02:36 +00003356 iovec = NULL;
Jens Axboee8c2bc12020-08-15 18:44:09 -07003357 rw = req->async_data;
Jens Axboe227c0c92020-08-13 11:51:40 -06003358 /* now use our persistent iterator, if we aren't already */
Jens Axboee8c2bc12020-08-15 18:44:09 -07003359 iter = &rw->iter;
Jens Axboe227c0c92020-08-13 11:51:40 -06003360
Pavel Begunkovb23df912021-02-04 13:52:04 +00003361 do {
3362 io_size -= ret;
3363 rw->bytes_done += ret;
3364 /* if we can retry, do so with the callbacks armed */
3365 if (!io_rw_should_retry(req)) {
3366 kiocb->ki_flags &= ~IOCB_WAITQ;
3367 return -EAGAIN;
3368 }
3369
3370 /*
3371 * Now retry read with the IOCB_WAITQ parts set in the iocb. If
3372 * we get -EIOCBQUEUED, then we'll get a notification when the
3373 * desired page gets unlocked. We can also get a partial read
3374 * here, and if we do, then just retry at the new offset.
3375 */
3376 ret = io_iter_do_read(req, iter);
3377 if (ret == -EIOCBQUEUED)
3378 return 0;
Jens Axboe227c0c92020-08-13 11:51:40 -06003379 /* we got some bytes, but not all. retry. */
Jens Axboeb5b0ecb2021-03-04 21:02:58 -07003380 kiocb->ki_flags &= ~IOCB_WAITQ;
Pavel Begunkovb23df912021-02-04 13:52:04 +00003381 } while (ret > 0 && ret < io_size);
Jens Axboe227c0c92020-08-13 11:51:40 -06003382done:
Pavel Begunkov889fca72021-02-10 00:03:09 +00003383 kiocb_done(kiocb, ret, issue_flags);
Pavel Begunkovfe1cdd52021-02-17 21:02:36 +00003384out_free:
3385 /* it's faster to check here then delegate to kfree */
3386 if (iovec)
3387 kfree(iovec);
Pavel Begunkov5ea5dd42021-02-04 13:52:03 +00003388 return 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003389}
3390
Pavel Begunkov73debe62020-09-30 22:57:54 +03003391static int io_write_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboef67676d2019-12-02 11:03:47 -07003392{
Jens Axboe3529d8c2019-12-19 18:24:38 -07003393 if (unlikely(!(req->file->f_mode & FMODE_WRITE)))
3394 return -EBADF;
Pavel Begunkov93642ef2021-02-18 18:29:44 +00003395 return io_prep_rw(req, sqe);
Jens Axboef67676d2019-12-02 11:03:47 -07003396}
3397
Pavel Begunkov889fca72021-02-10 00:03:09 +00003398static int io_write(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe2b188cc2019-01-07 10:46:33 -07003399{
3400 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
Jens Axboe9adbd452019-12-20 08:45:55 -07003401 struct kiocb *kiocb = &req->rw.kiocb;
Jens Axboeff6165b2020-08-13 09:47:43 -06003402 struct iov_iter __iter, *iter = &__iter;
Jens Axboee8c2bc12020-08-15 18:44:09 -07003403 struct io_async_rw *rw = req->async_data;
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003404 ssize_t ret, ret2, io_size;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003405 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003406
Pavel Begunkov2846c482020-11-07 13:16:27 +00003407 if (rw) {
Jens Axboee8c2bc12020-08-15 18:44:09 -07003408 iter = &rw->iter;
Pavel Begunkov2846c482020-11-07 13:16:27 +00003409 iovec = NULL;
3410 } else {
3411 ret = io_import_iovec(WRITE, req, &iovec, iter, !force_nonblock);
3412 if (ret < 0)
3413 return ret;
3414 }
Pavel Begunkov632546c2020-11-07 13:16:26 +00003415 io_size = iov_iter_count(iter);
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003416 req->result = io_size;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003417
Jens Axboefd6c2e42019-12-18 12:19:41 -07003418 /* Ensure we clear previously set non-block flag */
3419 if (!force_nonblock)
Pavel Begunkova88fc402020-09-30 22:57:53 +03003420 kiocb->ki_flags &= ~IOCB_NOWAIT;
3421 else
3422 kiocb->ki_flags |= IOCB_NOWAIT;
Jens Axboefd6c2e42019-12-18 12:19:41 -07003423
Pavel Begunkov24c74672020-06-21 13:09:51 +03003424 /* If the file doesn't support async, just async punt */
Pavel Begunkovb191e2d2021-08-09 13:04:03 +01003425 if (force_nonblock && !io_file_supports_nowait(req, WRITE))
Jens Axboef67676d2019-12-02 11:03:47 -07003426 goto copy_iov;
Jens Axboef67676d2019-12-02 11:03:47 -07003427
Jens Axboe10d59342019-12-09 20:16:22 -07003428 /* file path doesn't support NOWAIT for non-direct_IO */
3429 if (force_nonblock && !(kiocb->ki_flags & IOCB_DIRECT) &&
3430 (req->flags & REQ_F_ISREG))
Jens Axboef67676d2019-12-02 11:03:47 -07003431 goto copy_iov;
Jens Axboe9e645e112019-05-10 16:07:28 -06003432
Pavel Begunkov632546c2020-11-07 13:16:26 +00003433 ret = rw_verify_area(WRITE, req->file, io_kiocb_ppos(kiocb), io_size);
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003434 if (unlikely(ret))
3435 goto out_free;
Roman Penyaev9bf79332019-03-25 20:09:24 +01003436
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003437 /*
3438 * Open-code file_start_write here to grab freeze protection,
3439 * which will be released by another thread in
3440 * io_complete_rw(). Fool lockdep by telling it the lock got
3441 * released so that it doesn't complain about the held lock when
3442 * we return to userspace.
3443 */
3444 if (req->flags & REQ_F_ISREG) {
Darrick J. Wong8a3c84b2020-11-10 16:50:21 -08003445 sb_start_write(file_inode(req->file)->i_sb);
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003446 __sb_writers_release(file_inode(req->file)->i_sb,
3447 SB_FREEZE_WRITE);
3448 }
3449 kiocb->ki_flags |= IOCB_WRITE;
Roman Penyaev9bf79332019-03-25 20:09:24 +01003450
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003451 if (req->file->f_op->write_iter)
Jens Axboeff6165b2020-08-13 09:47:43 -06003452 ret2 = call_write_iter(req->file, kiocb, iter);
Guoyu Huang2dd21112020-08-05 03:53:50 -07003453 else if (req->file->f_op->write)
Jens Axboe4017eb92020-10-22 14:14:12 -06003454 ret2 = loop_rw_iter(WRITE, req, iter);
Guoyu Huang2dd21112020-08-05 03:53:50 -07003455 else
3456 ret2 = -EINVAL;
Jens Axboe4ed734b2020-03-20 11:23:41 -06003457
Pavel Begunkov6ad7f232021-04-08 01:54:39 +01003458 if (req->flags & REQ_F_REISSUE) {
3459 req->flags &= ~REQ_F_REISSUE;
Jens Axboe230d50d2021-04-01 20:41:15 -06003460 ret2 = -EAGAIN;
Pavel Begunkov6ad7f232021-04-08 01:54:39 +01003461 }
Jens Axboe230d50d2021-04-01 20:41:15 -06003462
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003463 /*
3464 * Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just
3465 * retry them without IOCB_NOWAIT.
3466 */
3467 if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT))
3468 ret2 = -EAGAIN;
Pavel Begunkov75c668c2021-02-04 13:52:05 +00003469 /* no retry on NONBLOCK nor RWF_NOWAIT */
3470 if (ret2 == -EAGAIN && (req->flags & REQ_F_NOWAIT))
Jens Axboe355afae2020-09-02 09:30:31 -06003471 goto done;
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003472 if (!force_nonblock || ret2 != -EAGAIN) {
Jens Axboeeefdf302020-08-27 16:40:19 -06003473 /* IOPOLL retry should happen for io-wq threads */
3474 if ((req->ctx->flags & IORING_SETUP_IOPOLL) && ret2 == -EAGAIN)
3475 goto copy_iov;
Jens Axboe355afae2020-09-02 09:30:31 -06003476done:
Pavel Begunkov889fca72021-02-10 00:03:09 +00003477 kiocb_done(kiocb, ret2, issue_flags);
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003478 } else {
Jens Axboef67676d2019-12-02 11:03:47 -07003479copy_iov:
Jens Axboe84216312020-08-24 11:45:26 -06003480 /* some cases will consume bytes even on error returns */
Pavel Begunkov632546c2020-11-07 13:16:26 +00003481 iov_iter_revert(iter, io_size - iov_iter_count(iter));
Jens Axboe227c0c92020-08-13 11:51:40 -06003482 ret = io_setup_async_rw(req, iovec, inline_vecs, iter, false);
Pavel Begunkov6bf985d2021-02-04 13:52:01 +00003483 return ret ?: -EAGAIN;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003484 }
Jens Axboe31b51512019-01-18 22:56:34 -07003485out_free:
Pavel Begunkovf261c162020-08-20 11:34:10 +03003486 /* it's reportedly faster than delegating the null check to kfree() */
Pavel Begunkov252917c2020-07-13 22:59:20 +03003487 if (iovec)
Xiaoguang Wang6f2cc162020-06-18 15:01:56 +08003488 kfree(iovec);
Jens Axboe2b188cc2019-01-07 10:46:33 -07003489 return ret;
3490}
3491
Jens Axboe80a261f2020-09-28 14:23:58 -06003492static int io_renameat_prep(struct io_kiocb *req,
3493 const struct io_uring_sqe *sqe)
3494{
3495 struct io_rename *ren = &req->rename;
3496 const char __user *oldf, *newf;
3497
Jens Axboeed7eb252021-06-23 09:04:13 -06003498 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3499 return -EINVAL;
3500 if (sqe->ioprio || sqe->buf_index)
3501 return -EINVAL;
Jens Axboe80a261f2020-09-28 14:23:58 -06003502 if (unlikely(req->flags & REQ_F_FIXED_FILE))
3503 return -EBADF;
3504
3505 ren->old_dfd = READ_ONCE(sqe->fd);
3506 oldf = u64_to_user_ptr(READ_ONCE(sqe->addr));
3507 newf = u64_to_user_ptr(READ_ONCE(sqe->addr2));
3508 ren->new_dfd = READ_ONCE(sqe->len);
3509 ren->flags = READ_ONCE(sqe->rename_flags);
3510
3511 ren->oldpath = getname(oldf);
3512 if (IS_ERR(ren->oldpath))
3513 return PTR_ERR(ren->oldpath);
3514
3515 ren->newpath = getname(newf);
3516 if (IS_ERR(ren->newpath)) {
3517 putname(ren->oldpath);
3518 return PTR_ERR(ren->newpath);
3519 }
3520
3521 req->flags |= REQ_F_NEED_CLEANUP;
3522 return 0;
3523}
3524
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003525static int io_renameat(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe80a261f2020-09-28 14:23:58 -06003526{
3527 struct io_rename *ren = &req->rename;
3528 int ret;
3529
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003530 if (issue_flags & IO_URING_F_NONBLOCK)
Jens Axboe80a261f2020-09-28 14:23:58 -06003531 return -EAGAIN;
3532
3533 ret = do_renameat2(ren->old_dfd, ren->oldpath, ren->new_dfd,
3534 ren->newpath, ren->flags);
3535
3536 req->flags &= ~REQ_F_NEED_CLEANUP;
3537 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01003538 req_set_fail(req);
Jens Axboe80a261f2020-09-28 14:23:58 -06003539 io_req_complete(req, ret);
3540 return 0;
3541}
3542
Jens Axboe14a11432020-09-28 14:27:37 -06003543static int io_unlinkat_prep(struct io_kiocb *req,
3544 const struct io_uring_sqe *sqe)
3545{
3546 struct io_unlink *un = &req->unlink;
3547 const char __user *fname;
3548
Jens Axboe22634bc2021-06-23 09:07:45 -06003549 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3550 return -EINVAL;
3551 if (sqe->ioprio || sqe->off || sqe->len || sqe->buf_index)
3552 return -EINVAL;
Jens Axboe14a11432020-09-28 14:27:37 -06003553 if (unlikely(req->flags & REQ_F_FIXED_FILE))
3554 return -EBADF;
3555
3556 un->dfd = READ_ONCE(sqe->fd);
3557
3558 un->flags = READ_ONCE(sqe->unlink_flags);
3559 if (un->flags & ~AT_REMOVEDIR)
3560 return -EINVAL;
3561
3562 fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
3563 un->filename = getname(fname);
3564 if (IS_ERR(un->filename))
3565 return PTR_ERR(un->filename);
3566
3567 req->flags |= REQ_F_NEED_CLEANUP;
3568 return 0;
3569}
3570
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003571static int io_unlinkat(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe14a11432020-09-28 14:27:37 -06003572{
3573 struct io_unlink *un = &req->unlink;
3574 int ret;
3575
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003576 if (issue_flags & IO_URING_F_NONBLOCK)
Jens Axboe14a11432020-09-28 14:27:37 -06003577 return -EAGAIN;
3578
3579 if (un->flags & AT_REMOVEDIR)
3580 ret = do_rmdir(un->dfd, un->filename);
3581 else
3582 ret = do_unlinkat(un->dfd, un->filename);
3583
3584 req->flags &= ~REQ_F_NEED_CLEANUP;
3585 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01003586 req_set_fail(req);
Jens Axboe14a11432020-09-28 14:27:37 -06003587 io_req_complete(req, ret);
3588 return 0;
3589}
3590
Jens Axboe36f4fa62020-09-05 11:14:22 -06003591static int io_shutdown_prep(struct io_kiocb *req,
3592 const struct io_uring_sqe *sqe)
3593{
3594#if defined(CONFIG_NET)
3595 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3596 return -EINVAL;
3597 if (sqe->ioprio || sqe->off || sqe->addr || sqe->rw_flags ||
3598 sqe->buf_index)
3599 return -EINVAL;
3600
3601 req->shutdown.how = READ_ONCE(sqe->len);
3602 return 0;
3603#else
3604 return -EOPNOTSUPP;
3605#endif
3606}
3607
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003608static int io_shutdown(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe36f4fa62020-09-05 11:14:22 -06003609{
3610#if defined(CONFIG_NET)
3611 struct socket *sock;
3612 int ret;
3613
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003614 if (issue_flags & IO_URING_F_NONBLOCK)
Jens Axboe36f4fa62020-09-05 11:14:22 -06003615 return -EAGAIN;
3616
Linus Torvalds48aba792020-12-16 12:44:05 -08003617 sock = sock_from_file(req->file);
Jens Axboe36f4fa62020-09-05 11:14:22 -06003618 if (unlikely(!sock))
Linus Torvalds48aba792020-12-16 12:44:05 -08003619 return -ENOTSOCK;
Jens Axboe36f4fa62020-09-05 11:14:22 -06003620
3621 ret = __sys_shutdown_sock(sock, req->shutdown.how);
Jens Axboea1464682020-12-14 20:57:27 -07003622 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01003623 req_set_fail(req);
Jens Axboe36f4fa62020-09-05 11:14:22 -06003624 io_req_complete(req, ret);
3625 return 0;
3626#else
3627 return -EOPNOTSUPP;
3628#endif
3629}
3630
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03003631static int __io_splice_prep(struct io_kiocb *req,
3632 const struct io_uring_sqe *sqe)
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003633{
Pavel Begunkovfe7e3252021-06-24 15:09:57 +01003634 struct io_splice *sp = &req->splice;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003635 unsigned int valid_flags = SPLICE_F_FD_IN_FIXED | SPLICE_F_ALL;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003636
Pavel Begunkov3232dd02020-06-03 18:03:22 +03003637 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3638 return -EINVAL;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003639
3640 sp->file_in = NULL;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003641 sp->len = READ_ONCE(sqe->len);
3642 sp->flags = READ_ONCE(sqe->splice_flags);
3643
3644 if (unlikely(sp->flags & ~valid_flags))
3645 return -EINVAL;
3646
Pavel Begunkov62906e82021-08-10 14:52:47 +01003647 sp->file_in = io_file_get(req->ctx, req, READ_ONCE(sqe->splice_fd_in),
Pavel Begunkov8371adf2020-10-10 18:34:08 +01003648 (sp->flags & SPLICE_F_FD_IN_FIXED));
3649 if (!sp->file_in)
3650 return -EBADF;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003651 req->flags |= REQ_F_NEED_CLEANUP;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003652 return 0;
3653}
3654
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03003655static int io_tee_prep(struct io_kiocb *req,
3656 const struct io_uring_sqe *sqe)
3657{
3658 if (READ_ONCE(sqe->splice_off_in) || READ_ONCE(sqe->off))
3659 return -EINVAL;
3660 return __io_splice_prep(req, sqe);
3661}
3662
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003663static int io_tee(struct io_kiocb *req, unsigned int issue_flags)
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03003664{
3665 struct io_splice *sp = &req->splice;
3666 struct file *in = sp->file_in;
3667 struct file *out = sp->file_out;
3668 unsigned int flags = sp->flags & ~SPLICE_F_FD_IN_FIXED;
3669 long ret = 0;
3670
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003671 if (issue_flags & IO_URING_F_NONBLOCK)
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03003672 return -EAGAIN;
3673 if (sp->len)
3674 ret = do_tee(in, out, sp->len, flags);
3675
Pavel Begunkove1d767f2021-03-19 17:22:43 +00003676 if (!(sp->flags & SPLICE_F_FD_IN_FIXED))
3677 io_put_file(in);
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03003678 req->flags &= ~REQ_F_NEED_CLEANUP;
3679
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03003680 if (ret != sp->len)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01003681 req_set_fail(req);
Jens Axboee1e16092020-06-22 09:17:17 -06003682 io_req_complete(req, ret);
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03003683 return 0;
3684}
3685
3686static int io_splice_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3687{
Pavel Begunkovfe7e3252021-06-24 15:09:57 +01003688 struct io_splice *sp = &req->splice;
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03003689
3690 sp->off_in = READ_ONCE(sqe->splice_off_in);
3691 sp->off_out = READ_ONCE(sqe->off);
3692 return __io_splice_prep(req, sqe);
3693}
3694
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003695static int io_splice(struct io_kiocb *req, unsigned int issue_flags)
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003696{
3697 struct io_splice *sp = &req->splice;
3698 struct file *in = sp->file_in;
3699 struct file *out = sp->file_out;
3700 unsigned int flags = sp->flags & ~SPLICE_F_FD_IN_FIXED;
3701 loff_t *poff_in, *poff_out;
Pavel Begunkovc9687422020-05-04 23:00:54 +03003702 long ret = 0;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003703
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003704 if (issue_flags & IO_URING_F_NONBLOCK)
Pavel Begunkov2fb3e822020-05-01 17:09:38 +03003705 return -EAGAIN;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003706
3707 poff_in = (sp->off_in == -1) ? NULL : &sp->off_in;
3708 poff_out = (sp->off_out == -1) ? NULL : &sp->off_out;
Pavel Begunkovc9687422020-05-04 23:00:54 +03003709
Jens Axboe948a7742020-05-17 14:21:38 -06003710 if (sp->len)
Pavel Begunkovc9687422020-05-04 23:00:54 +03003711 ret = do_splice(in, poff_in, out, poff_out, sp->len, flags);
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003712
Pavel Begunkove1d767f2021-03-19 17:22:43 +00003713 if (!(sp->flags & SPLICE_F_FD_IN_FIXED))
3714 io_put_file(in);
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003715 req->flags &= ~REQ_F_NEED_CLEANUP;
3716
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003717 if (ret != sp->len)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01003718 req_set_fail(req);
Jens Axboee1e16092020-06-22 09:17:17 -06003719 io_req_complete(req, ret);
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003720 return 0;
3721}
3722
Jens Axboe2b188cc2019-01-07 10:46:33 -07003723/*
3724 * IORING_OP_NOP just posts a completion event, nothing else.
3725 */
Pavel Begunkov889fca72021-02-10 00:03:09 +00003726static int io_nop(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe2b188cc2019-01-07 10:46:33 -07003727{
3728 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003729
Jens Axboedef596e2019-01-09 08:59:42 -07003730 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
3731 return -EINVAL;
3732
Pavel Begunkov889fca72021-02-10 00:03:09 +00003733 __io_req_complete(req, issue_flags, 0, 0);
Jens Axboe2b188cc2019-01-07 10:46:33 -07003734 return 0;
3735}
3736
Pavel Begunkov1155c762021-02-18 18:29:38 +00003737static int io_fsync_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Christoph Hellwigc992fe22019-01-11 09:43:02 -07003738{
Jens Axboe6b063142019-01-10 22:13:58 -07003739 struct io_ring_ctx *ctx = req->ctx;
Christoph Hellwigc992fe22019-01-11 09:43:02 -07003740
Jens Axboe09bb8392019-03-13 12:39:28 -06003741 if (!req->file)
3742 return -EBADF;
Christoph Hellwigc992fe22019-01-11 09:43:02 -07003743
Jens Axboe6b063142019-01-10 22:13:58 -07003744 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboedef596e2019-01-09 08:59:42 -07003745 return -EINVAL;
Jens Axboeedafcce2019-01-09 09:16:05 -07003746 if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index))
Christoph Hellwigc992fe22019-01-11 09:43:02 -07003747 return -EINVAL;
3748
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07003749 req->sync.flags = READ_ONCE(sqe->fsync_flags);
3750 if (unlikely(req->sync.flags & ~IORING_FSYNC_DATASYNC))
3751 return -EINVAL;
3752
3753 req->sync.off = READ_ONCE(sqe->off);
3754 req->sync.len = READ_ONCE(sqe->len);
Christoph Hellwigc992fe22019-01-11 09:43:02 -07003755 return 0;
3756}
3757
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003758static int io_fsync(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe78912932020-01-14 22:09:06 -07003759{
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07003760 loff_t end = req->sync.off + req->sync.len;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07003761 int ret;
3762
Pavel Begunkovac45abc2020-06-08 21:08:18 +03003763 /* fsync always requires a blocking context */
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003764 if (issue_flags & IO_URING_F_NONBLOCK)
Pavel Begunkovac45abc2020-06-08 21:08:18 +03003765 return -EAGAIN;
3766
Jens Axboe9adbd452019-12-20 08:45:55 -07003767 ret = vfs_fsync_range(req->file, req->sync.off,
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07003768 end > 0 ? end : LLONG_MAX,
3769 req->sync.flags & IORING_FSYNC_DATASYNC);
3770 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01003771 req_set_fail(req);
Jens Axboee1e16092020-06-22 09:17:17 -06003772 io_req_complete(req, ret);
Christoph Hellwigc992fe22019-01-11 09:43:02 -07003773 return 0;
3774}
3775
Jens Axboed63d1b52019-12-10 10:38:56 -07003776static int io_fallocate_prep(struct io_kiocb *req,
3777 const struct io_uring_sqe *sqe)
3778{
3779 if (sqe->ioprio || sqe->buf_index || sqe->rw_flags)
3780 return -EINVAL;
Pavel Begunkov3232dd02020-06-03 18:03:22 +03003781 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3782 return -EINVAL;
Jens Axboed63d1b52019-12-10 10:38:56 -07003783
3784 req->sync.off = READ_ONCE(sqe->off);
3785 req->sync.len = READ_ONCE(sqe->addr);
3786 req->sync.mode = READ_ONCE(sqe->len);
3787 return 0;
3788}
3789
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003790static int io_fallocate(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboed63d1b52019-12-10 10:38:56 -07003791{
Pavel Begunkovac45abc2020-06-08 21:08:18 +03003792 int ret;
Jens Axboed63d1b52019-12-10 10:38:56 -07003793
Pavel Begunkovac45abc2020-06-08 21:08:18 +03003794 /* fallocate always requiring blocking context */
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003795 if (issue_flags & IO_URING_F_NONBLOCK)
Pavel Begunkovac45abc2020-06-08 21:08:18 +03003796 return -EAGAIN;
Pavel Begunkovac45abc2020-06-08 21:08:18 +03003797 ret = vfs_fallocate(req->file, req->sync.mode, req->sync.off,
3798 req->sync.len);
Pavel Begunkovac45abc2020-06-08 21:08:18 +03003799 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01003800 req_set_fail(req);
Jens Axboee1e16092020-06-22 09:17:17 -06003801 io_req_complete(req, ret);
Jens Axboed63d1b52019-12-10 10:38:56 -07003802 return 0;
3803}
3804
Pavel Begunkovec65fea2020-06-03 18:03:24 +03003805static int __io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboe15b71ab2019-12-11 11:20:36 -07003806{
Jens Axboef8748882020-01-08 17:47:02 -07003807 const char __user *fname;
Jens Axboe15b71ab2019-12-11 11:20:36 -07003808 int ret;
3809
Pavel Begunkovd3fddf62021-08-09 13:04:16 +01003810 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3811 return -EINVAL;
Pavel Begunkovec65fea2020-06-03 18:03:24 +03003812 if (unlikely(sqe->ioprio || sqe->buf_index))
Jens Axboe15b71ab2019-12-11 11:20:36 -07003813 return -EINVAL;
Pavel Begunkovec65fea2020-06-03 18:03:24 +03003814 if (unlikely(req->flags & REQ_F_FIXED_FILE))
Jens Axboecf3040c2020-02-06 21:31:40 -07003815 return -EBADF;
Jens Axboe15b71ab2019-12-11 11:20:36 -07003816
Pavel Begunkovec65fea2020-06-03 18:03:24 +03003817 /* open.how should be already initialised */
3818 if (!(req->open.how.flags & O_PATH) && force_o_largefile())
Jens Axboe08a1d26eb2020-04-08 09:20:54 -06003819 req->open.how.flags |= O_LARGEFILE;
Jens Axboe15b71ab2019-12-11 11:20:36 -07003820
Pavel Begunkov25e72d12020-06-03 18:03:23 +03003821 req->open.dfd = READ_ONCE(sqe->fd);
3822 fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
Jens Axboef8748882020-01-08 17:47:02 -07003823 req->open.filename = getname(fname);
Jens Axboe15b71ab2019-12-11 11:20:36 -07003824 if (IS_ERR(req->open.filename)) {
3825 ret = PTR_ERR(req->open.filename);
3826 req->open.filename = NULL;
3827 return ret;
3828 }
Jens Axboe4022e7a2020-03-19 19:23:18 -06003829 req->open.nofile = rlimit(RLIMIT_NOFILE);
Pavel Begunkov8fef80b2020-02-07 23:59:53 +03003830 req->flags |= REQ_F_NEED_CLEANUP;
Jens Axboe15b71ab2019-12-11 11:20:36 -07003831 return 0;
3832}
3833
Pavel Begunkovec65fea2020-06-03 18:03:24 +03003834static int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3835{
Pavel Begunkovd3fddf62021-08-09 13:04:16 +01003836 u64 mode = READ_ONCE(sqe->len);
3837 u64 flags = READ_ONCE(sqe->open_flags);
Pavel Begunkovec65fea2020-06-03 18:03:24 +03003838
Pavel Begunkovec65fea2020-06-03 18:03:24 +03003839 req->open.how = build_open_how(flags, mode);
3840 return __io_openat_prep(req, sqe);
3841}
3842
Jens Axboecebdb982020-01-08 17:59:24 -07003843static int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3844{
3845 struct open_how __user *how;
Jens Axboecebdb982020-01-08 17:59:24 -07003846 size_t len;
3847 int ret;
3848
Jens Axboecebdb982020-01-08 17:59:24 -07003849 how = u64_to_user_ptr(READ_ONCE(sqe->addr2));
3850 len = READ_ONCE(sqe->len);
Jens Axboecebdb982020-01-08 17:59:24 -07003851 if (len < OPEN_HOW_SIZE_VER0)
3852 return -EINVAL;
3853
3854 ret = copy_struct_from_user(&req->open.how, sizeof(req->open.how), how,
3855 len);
3856 if (ret)
3857 return ret;
3858
Pavel Begunkovec65fea2020-06-03 18:03:24 +03003859 return __io_openat_prep(req, sqe);
Jens Axboecebdb982020-01-08 17:59:24 -07003860}
3861
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003862static int io_openat2(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe15b71ab2019-12-11 11:20:36 -07003863{
3864 struct open_flags op;
Jens Axboe15b71ab2019-12-11 11:20:36 -07003865 struct file *file;
Jens Axboe3a81fd02020-12-10 12:25:36 -07003866 bool nonblock_set;
3867 bool resolve_nonblock;
Jens Axboe15b71ab2019-12-11 11:20:36 -07003868 int ret;
3869
Jens Axboecebdb982020-01-08 17:59:24 -07003870 ret = build_open_flags(&req->open.how, &op);
Jens Axboe15b71ab2019-12-11 11:20:36 -07003871 if (ret)
3872 goto err;
Jens Axboe3a81fd02020-12-10 12:25:36 -07003873 nonblock_set = op.open_flag & O_NONBLOCK;
3874 resolve_nonblock = req->open.how.resolve & RESOLVE_CACHED;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003875 if (issue_flags & IO_URING_F_NONBLOCK) {
Jens Axboe3a81fd02020-12-10 12:25:36 -07003876 /*
3877 * Don't bother trying for O_TRUNC, O_CREAT, or O_TMPFILE open,
3878 * it'll always -EAGAIN
3879 */
3880 if (req->open.how.flags & (O_TRUNC | O_CREAT | O_TMPFILE))
3881 return -EAGAIN;
3882 op.lookup_flags |= LOOKUP_CACHED;
3883 op.open_flag |= O_NONBLOCK;
3884 }
Jens Axboe15b71ab2019-12-11 11:20:36 -07003885
Jens Axboe4022e7a2020-03-19 19:23:18 -06003886 ret = __get_unused_fd_flags(req->open.how.flags, req->open.nofile);
Jens Axboe15b71ab2019-12-11 11:20:36 -07003887 if (ret < 0)
3888 goto err;
3889
3890 file = do_filp_open(req->open.dfd, req->open.filename, &op);
Pavel Begunkov12dcb58a2021-06-24 15:10:00 +01003891 if (IS_ERR(file)) {
Jens Axboe3a81fd02020-12-10 12:25:36 -07003892 /*
Pavel Begunkov12dcb58a2021-06-24 15:10:00 +01003893 * We could hang on to this 'fd' on retrying, but seems like
3894 * marginal gain for something that is now known to be a slower
3895 * path. So just put it, and we'll get a new one when we retry.
Jens Axboe3a81fd02020-12-10 12:25:36 -07003896 */
3897 put_unused_fd(ret);
Pavel Begunkov12dcb58a2021-06-24 15:10:00 +01003898
3899 ret = PTR_ERR(file);
3900 /* only retry if RESOLVE_CACHED wasn't already set by application */
3901 if (ret == -EAGAIN &&
3902 (!resolve_nonblock && (issue_flags & IO_URING_F_NONBLOCK)))
3903 return -EAGAIN;
3904 goto err;
Jens Axboe3a81fd02020-12-10 12:25:36 -07003905 }
3906
Pavel Begunkov12dcb58a2021-06-24 15:10:00 +01003907 if ((issue_flags & IO_URING_F_NONBLOCK) && !nonblock_set)
3908 file->f_flags &= ~O_NONBLOCK;
3909 fsnotify_open(file);
3910 fd_install(ret, file);
Jens Axboe15b71ab2019-12-11 11:20:36 -07003911err:
3912 putname(req->open.filename);
Pavel Begunkov8fef80b2020-02-07 23:59:53 +03003913 req->flags &= ~REQ_F_NEED_CLEANUP;
Jens Axboe15b71ab2019-12-11 11:20:36 -07003914 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01003915 req_set_fail(req);
Pavel Begunkov0bdf3392021-04-11 01:46:29 +01003916 __io_req_complete(req, issue_flags, ret, 0);
Jens Axboe15b71ab2019-12-11 11:20:36 -07003917 return 0;
3918}
3919
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003920static int io_openat(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboecebdb982020-01-08 17:59:24 -07003921{
Pavel Begunkove45cff52021-02-28 22:35:14 +00003922 return io_openat2(req, issue_flags);
Jens Axboecebdb982020-01-08 17:59:24 -07003923}
3924
Jens Axboe067524e2020-03-02 16:32:28 -07003925static int io_remove_buffers_prep(struct io_kiocb *req,
3926 const struct io_uring_sqe *sqe)
3927{
3928 struct io_provide_buf *p = &req->pbuf;
3929 u64 tmp;
3930
3931 if (sqe->ioprio || sqe->rw_flags || sqe->addr || sqe->len || sqe->off)
3932 return -EINVAL;
3933
3934 tmp = READ_ONCE(sqe->fd);
3935 if (!tmp || tmp > USHRT_MAX)
3936 return -EINVAL;
3937
3938 memset(p, 0, sizeof(*p));
3939 p->nbufs = tmp;
3940 p->bgid = READ_ONCE(sqe->buf_group);
3941 return 0;
3942}
3943
3944static int __io_remove_buffers(struct io_ring_ctx *ctx, struct io_buffer *buf,
3945 int bgid, unsigned nbufs)
3946{
3947 unsigned i = 0;
3948
3949 /* shouldn't happen */
3950 if (!nbufs)
3951 return 0;
3952
3953 /* the head kbuf is the list itself */
3954 while (!list_empty(&buf->list)) {
3955 struct io_buffer *nxt;
3956
3957 nxt = list_first_entry(&buf->list, struct io_buffer, list);
3958 list_del(&nxt->list);
3959 kfree(nxt);
3960 if (++i == nbufs)
3961 return i;
3962 }
3963 i++;
3964 kfree(buf);
Jens Axboe9e15c3a2021-03-13 12:29:43 -07003965 xa_erase(&ctx->io_buffers, bgid);
Jens Axboe067524e2020-03-02 16:32:28 -07003966
3967 return i;
3968}
3969
Pavel Begunkov889fca72021-02-10 00:03:09 +00003970static int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe067524e2020-03-02 16:32:28 -07003971{
3972 struct io_provide_buf *p = &req->pbuf;
3973 struct io_ring_ctx *ctx = req->ctx;
3974 struct io_buffer *head;
3975 int ret = 0;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003976 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Jens Axboe067524e2020-03-02 16:32:28 -07003977
3978 io_ring_submit_lock(ctx, !force_nonblock);
3979
3980 lockdep_assert_held(&ctx->uring_lock);
3981
3982 ret = -ENOENT;
Jens Axboe9e15c3a2021-03-13 12:29:43 -07003983 head = xa_load(&ctx->io_buffers, p->bgid);
Jens Axboe067524e2020-03-02 16:32:28 -07003984 if (head)
3985 ret = __io_remove_buffers(ctx, head, p->bgid, p->nbufs);
Jens Axboe067524e2020-03-02 16:32:28 -07003986 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01003987 req_set_fail(req);
Pavel Begunkov31bff9a2020-12-06 22:22:43 +00003988
Pavel Begunkov9fb8cb42021-02-28 22:35:13 +00003989 /* complete before unlock, IOPOLL may need the lock */
3990 __io_req_complete(req, issue_flags, ret, 0);
3991 io_ring_submit_unlock(ctx, !force_nonblock);
Jens Axboe067524e2020-03-02 16:32:28 -07003992 return 0;
3993}
3994
Jens Axboeddf0322d2020-02-23 16:41:33 -07003995static int io_provide_buffers_prep(struct io_kiocb *req,
3996 const struct io_uring_sqe *sqe)
3997{
Pavel Begunkov38134ad2021-04-15 13:07:39 +01003998 unsigned long size, tmp_check;
Jens Axboeddf0322d2020-02-23 16:41:33 -07003999 struct io_provide_buf *p = &req->pbuf;
4000 u64 tmp;
4001
4002 if (sqe->ioprio || sqe->rw_flags)
4003 return -EINVAL;
4004
4005 tmp = READ_ONCE(sqe->fd);
4006 if (!tmp || tmp > USHRT_MAX)
4007 return -E2BIG;
4008 p->nbufs = tmp;
4009 p->addr = READ_ONCE(sqe->addr);
4010 p->len = READ_ONCE(sqe->len);
4011
Pavel Begunkov38134ad2021-04-15 13:07:39 +01004012 if (check_mul_overflow((unsigned long)p->len, (unsigned long)p->nbufs,
4013 &size))
4014 return -EOVERFLOW;
4015 if (check_add_overflow((unsigned long)p->addr, size, &tmp_check))
4016 return -EOVERFLOW;
4017
Pavel Begunkovd81269f2021-03-19 10:21:19 +00004018 size = (unsigned long)p->len * p->nbufs;
4019 if (!access_ok(u64_to_user_ptr(p->addr), size))
Jens Axboeddf0322d2020-02-23 16:41:33 -07004020 return -EFAULT;
4021
4022 p->bgid = READ_ONCE(sqe->buf_group);
4023 tmp = READ_ONCE(sqe->off);
4024 if (tmp > USHRT_MAX)
4025 return -E2BIG;
4026 p->bid = tmp;
4027 return 0;
4028}
4029
4030static int io_add_buffers(struct io_provide_buf *pbuf, struct io_buffer **head)
4031{
4032 struct io_buffer *buf;
4033 u64 addr = pbuf->addr;
4034 int i, bid = pbuf->bid;
4035
4036 for (i = 0; i < pbuf->nbufs; i++) {
4037 buf = kmalloc(sizeof(*buf), GFP_KERNEL);
4038 if (!buf)
4039 break;
4040
4041 buf->addr = addr;
Thadeu Lima de Souza Cascardod1f82802021-05-05 09:47:06 -03004042 buf->len = min_t(__u32, pbuf->len, MAX_RW_COUNT);
Jens Axboeddf0322d2020-02-23 16:41:33 -07004043 buf->bid = bid;
4044 addr += pbuf->len;
4045 bid++;
4046 if (!*head) {
4047 INIT_LIST_HEAD(&buf->list);
4048 *head = buf;
4049 } else {
4050 list_add_tail(&buf->list, &(*head)->list);
4051 }
4052 }
4053
4054 return i ? i : -ENOMEM;
4055}
4056
Pavel Begunkov889fca72021-02-10 00:03:09 +00004057static int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboeddf0322d2020-02-23 16:41:33 -07004058{
4059 struct io_provide_buf *p = &req->pbuf;
4060 struct io_ring_ctx *ctx = req->ctx;
4061 struct io_buffer *head, *list;
4062 int ret = 0;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004063 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Jens Axboeddf0322d2020-02-23 16:41:33 -07004064
4065 io_ring_submit_lock(ctx, !force_nonblock);
4066
4067 lockdep_assert_held(&ctx->uring_lock);
4068
Jens Axboe9e15c3a2021-03-13 12:29:43 -07004069 list = head = xa_load(&ctx->io_buffers, p->bgid);
Jens Axboeddf0322d2020-02-23 16:41:33 -07004070
4071 ret = io_add_buffers(p, &head);
Jens Axboe9e15c3a2021-03-13 12:29:43 -07004072 if (ret >= 0 && !list) {
4073 ret = xa_insert(&ctx->io_buffers, p->bgid, head, GFP_KERNEL);
4074 if (ret < 0)
Jens Axboe067524e2020-03-02 16:32:28 -07004075 __io_remove_buffers(ctx, head, p->bgid, -1U);
Jens Axboeddf0322d2020-02-23 16:41:33 -07004076 }
Jens Axboeddf0322d2020-02-23 16:41:33 -07004077 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01004078 req_set_fail(req);
Pavel Begunkov9fb8cb42021-02-28 22:35:13 +00004079 /* complete before unlock, IOPOLL may need the lock */
4080 __io_req_complete(req, issue_flags, ret, 0);
4081 io_ring_submit_unlock(ctx, !force_nonblock);
Jens Axboeddf0322d2020-02-23 16:41:33 -07004082 return 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07004083}
4084
Jens Axboe3e4827b2020-01-08 15:18:09 -07004085static int io_epoll_ctl_prep(struct io_kiocb *req,
4086 const struct io_uring_sqe *sqe)
4087{
4088#if defined(CONFIG_EPOLL)
4089 if (sqe->ioprio || sqe->buf_index)
4090 return -EINVAL;
Pavel Begunkov2d74d042021-05-14 12:05:46 +01004091 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
Pavel Begunkov3232dd02020-06-03 18:03:22 +03004092 return -EINVAL;
Jens Axboe3e4827b2020-01-08 15:18:09 -07004093
4094 req->epoll.epfd = READ_ONCE(sqe->fd);
4095 req->epoll.op = READ_ONCE(sqe->len);
4096 req->epoll.fd = READ_ONCE(sqe->off);
4097
4098 if (ep_op_has_event(req->epoll.op)) {
4099 struct epoll_event __user *ev;
4100
4101 ev = u64_to_user_ptr(READ_ONCE(sqe->addr));
4102 if (copy_from_user(&req->epoll.event, ev, sizeof(*ev)))
4103 return -EFAULT;
4104 }
4105
4106 return 0;
4107#else
4108 return -EOPNOTSUPP;
4109#endif
4110}
4111
Pavel Begunkov889fca72021-02-10 00:03:09 +00004112static int io_epoll_ctl(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe3e4827b2020-01-08 15:18:09 -07004113{
4114#if defined(CONFIG_EPOLL)
4115 struct io_epoll *ie = &req->epoll;
4116 int ret;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004117 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Jens Axboe3e4827b2020-01-08 15:18:09 -07004118
4119 ret = do_epoll_ctl(ie->epfd, ie->op, ie->fd, &ie->event, force_nonblock);
4120 if (force_nonblock && ret == -EAGAIN)
4121 return -EAGAIN;
4122
4123 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01004124 req_set_fail(req);
Pavel Begunkov889fca72021-02-10 00:03:09 +00004125 __io_req_complete(req, issue_flags, ret, 0);
Jens Axboe3e4827b2020-01-08 15:18:09 -07004126 return 0;
4127#else
4128 return -EOPNOTSUPP;
4129#endif
4130}
4131
Jens Axboec1ca7572019-12-25 22:18:28 -07004132static int io_madvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4133{
4134#if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
4135 if (sqe->ioprio || sqe->buf_index || sqe->off)
4136 return -EINVAL;
Pavel Begunkov3232dd02020-06-03 18:03:22 +03004137 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4138 return -EINVAL;
Jens Axboec1ca7572019-12-25 22:18:28 -07004139
4140 req->madvise.addr = READ_ONCE(sqe->addr);
4141 req->madvise.len = READ_ONCE(sqe->len);
4142 req->madvise.advice = READ_ONCE(sqe->fadvise_advice);
4143 return 0;
4144#else
4145 return -EOPNOTSUPP;
4146#endif
4147}
4148
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004149static int io_madvise(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboec1ca7572019-12-25 22:18:28 -07004150{
4151#if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
4152 struct io_madvise *ma = &req->madvise;
4153 int ret;
4154
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004155 if (issue_flags & IO_URING_F_NONBLOCK)
Jens Axboec1ca7572019-12-25 22:18:28 -07004156 return -EAGAIN;
4157
Minchan Kim0726b012020-10-17 16:14:50 -07004158 ret = do_madvise(current->mm, ma->addr, ma->len, ma->advice);
Jens Axboec1ca7572019-12-25 22:18:28 -07004159 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01004160 req_set_fail(req);
Jens Axboee1e16092020-06-22 09:17:17 -06004161 io_req_complete(req, ret);
Jens Axboec1ca7572019-12-25 22:18:28 -07004162 return 0;
4163#else
4164 return -EOPNOTSUPP;
4165#endif
4166}
4167
Jens Axboe4840e412019-12-25 22:03:45 -07004168static int io_fadvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4169{
4170 if (sqe->ioprio || sqe->buf_index || sqe->addr)
4171 return -EINVAL;
Pavel Begunkov3232dd02020-06-03 18:03:22 +03004172 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4173 return -EINVAL;
Jens Axboe4840e412019-12-25 22:03:45 -07004174
4175 req->fadvise.offset = READ_ONCE(sqe->off);
4176 req->fadvise.len = READ_ONCE(sqe->len);
4177 req->fadvise.advice = READ_ONCE(sqe->fadvise_advice);
4178 return 0;
4179}
4180
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004181static int io_fadvise(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe4840e412019-12-25 22:03:45 -07004182{
4183 struct io_fadvise *fa = &req->fadvise;
4184 int ret;
4185
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004186 if (issue_flags & IO_URING_F_NONBLOCK) {
Jens Axboe3e694262020-02-01 09:22:49 -07004187 switch (fa->advice) {
4188 case POSIX_FADV_NORMAL:
4189 case POSIX_FADV_RANDOM:
4190 case POSIX_FADV_SEQUENTIAL:
4191 break;
4192 default:
4193 return -EAGAIN;
4194 }
4195 }
Jens Axboe4840e412019-12-25 22:03:45 -07004196
4197 ret = vfs_fadvise(req->file, fa->offset, fa->len, fa->advice);
4198 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01004199 req_set_fail(req);
Pavel Begunkov0bdf3392021-04-11 01:46:29 +01004200 __io_req_complete(req, issue_flags, ret, 0);
Jens Axboe4840e412019-12-25 22:03:45 -07004201 return 0;
4202}
4203
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004204static int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4205{
Pavel Begunkov2d74d042021-05-14 12:05:46 +01004206 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
Pavel Begunkov3232dd02020-06-03 18:03:22 +03004207 return -EINVAL;
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004208 if (sqe->ioprio || sqe->buf_index)
4209 return -EINVAL;
Pavel Begunkov9c280f92020-04-08 08:58:46 +03004210 if (req->flags & REQ_F_FIXED_FILE)
Jens Axboecf3040c2020-02-06 21:31:40 -07004211 return -EBADF;
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004212
Bijan Mottahedeh1d9e1282020-05-22 21:31:16 -07004213 req->statx.dfd = READ_ONCE(sqe->fd);
4214 req->statx.mask = READ_ONCE(sqe->len);
Bijan Mottahedehe62753e2020-05-22 21:31:18 -07004215 req->statx.filename = u64_to_user_ptr(READ_ONCE(sqe->addr));
Bijan Mottahedeh1d9e1282020-05-22 21:31:16 -07004216 req->statx.buffer = u64_to_user_ptr(READ_ONCE(sqe->addr2));
4217 req->statx.flags = READ_ONCE(sqe->statx_flags);
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004218
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004219 return 0;
4220}
4221
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004222static int io_statx(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004223{
Bijan Mottahedeh1d9e1282020-05-22 21:31:16 -07004224 struct io_statx *ctx = &req->statx;
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004225 int ret;
4226
Pavel Begunkov59d70012021-03-22 01:58:30 +00004227 if (issue_flags & IO_URING_F_NONBLOCK)
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004228 return -EAGAIN;
4229
Bijan Mottahedehe62753e2020-05-22 21:31:18 -07004230 ret = do_statx(ctx->dfd, ctx->filename, ctx->flags, ctx->mask,
4231 ctx->buffer);
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004232
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004233 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01004234 req_set_fail(req);
Jens Axboee1e16092020-06-22 09:17:17 -06004235 io_req_complete(req, ret);
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004236 return 0;
4237}
4238
Jens Axboeb5dba592019-12-11 14:02:38 -07004239static int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4240{
Jens Axboe14587a462020-09-05 11:36:08 -06004241 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
Pavel Begunkov3232dd02020-06-03 18:03:22 +03004242 return -EINVAL;
Jens Axboeb5dba592019-12-11 14:02:38 -07004243 if (sqe->ioprio || sqe->off || sqe->addr || sqe->len ||
4244 sqe->rw_flags || sqe->buf_index)
4245 return -EINVAL;
Pavel Begunkov9c280f92020-04-08 08:58:46 +03004246 if (req->flags & REQ_F_FIXED_FILE)
Jens Axboecf3040c2020-02-06 21:31:40 -07004247 return -EBADF;
Jens Axboeb5dba592019-12-11 14:02:38 -07004248
4249 req->close.fd = READ_ONCE(sqe->fd);
Jens Axboeb5dba592019-12-11 14:02:38 -07004250 return 0;
4251}
4252
Pavel Begunkov889fca72021-02-10 00:03:09 +00004253static int io_close(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboeb5dba592019-12-11 14:02:38 -07004254{
Jens Axboe9eac1902021-01-19 15:50:37 -07004255 struct files_struct *files = current->files;
Pavel Begunkov3af73b22020-06-08 21:08:17 +03004256 struct io_close *close = &req->close;
Jens Axboe9eac1902021-01-19 15:50:37 -07004257 struct fdtable *fdt;
Pavel Begunkova1fde922021-04-11 01:46:28 +01004258 struct file *file = NULL;
4259 int ret = -EBADF;
Jens Axboeb5dba592019-12-11 14:02:38 -07004260
Jens Axboe9eac1902021-01-19 15:50:37 -07004261 spin_lock(&files->file_lock);
4262 fdt = files_fdtable(files);
4263 if (close->fd >= fdt->max_fds) {
4264 spin_unlock(&files->file_lock);
4265 goto err;
4266 }
4267 file = fdt->fd[close->fd];
Pavel Begunkova1fde922021-04-11 01:46:28 +01004268 if (!file || file->f_op == &io_uring_fops) {
Jens Axboe9eac1902021-01-19 15:50:37 -07004269 spin_unlock(&files->file_lock);
4270 file = NULL;
4271 goto err;
Pavel Begunkov3af73b22020-06-08 21:08:17 +03004272 }
Jens Axboeb5dba592019-12-11 14:02:38 -07004273
4274 /* if the file has a flush method, be safe and punt to async */
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004275 if (file->f_op->flush && (issue_flags & IO_URING_F_NONBLOCK)) {
Jens Axboe9eac1902021-01-19 15:50:37 -07004276 spin_unlock(&files->file_lock);
Pavel Begunkov0bf0eef2020-05-26 20:34:06 +03004277 return -EAGAIN;
Pavel Begunkova2100672020-03-02 23:45:16 +03004278 }
Jens Axboeb5dba592019-12-11 14:02:38 -07004279
Jens Axboe9eac1902021-01-19 15:50:37 -07004280 ret = __close_fd_get_file(close->fd, &file);
4281 spin_unlock(&files->file_lock);
4282 if (ret < 0) {
4283 if (ret == -ENOENT)
4284 ret = -EBADF;
4285 goto err;
4286 }
4287
Pavel Begunkov3af73b22020-06-08 21:08:17 +03004288 /* No ->flush() or already async, safely close from here */
Jens Axboe9eac1902021-01-19 15:50:37 -07004289 ret = filp_close(file, current->files);
4290err:
Pavel Begunkov3af73b22020-06-08 21:08:17 +03004291 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01004292 req_set_fail(req);
Jens Axboe9eac1902021-01-19 15:50:37 -07004293 if (file)
4294 fput(file);
Pavel Begunkov889fca72021-02-10 00:03:09 +00004295 __io_req_complete(req, issue_flags, ret, 0);
Jens Axboe1a417f42020-01-31 17:16:48 -07004296 return 0;
Jens Axboeb5dba592019-12-11 14:02:38 -07004297}
4298
Pavel Begunkov1155c762021-02-18 18:29:38 +00004299static int io_sfr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboe5d17b4a2019-04-09 14:56:44 -06004300{
4301 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe5d17b4a2019-04-09 14:56:44 -06004302
Jens Axboe5d17b4a2019-04-09 14:56:44 -06004303 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
4304 return -EINVAL;
4305 if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index))
4306 return -EINVAL;
4307
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004308 req->sync.off = READ_ONCE(sqe->off);
4309 req->sync.len = READ_ONCE(sqe->len);
4310 req->sync.flags = READ_ONCE(sqe->sync_range_flags);
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004311 return 0;
4312}
4313
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004314static int io_sync_file_range(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004315{
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004316 int ret;
4317
Pavel Begunkovac45abc2020-06-08 21:08:18 +03004318 /* sync_file_range always requires a blocking context */
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004319 if (issue_flags & IO_URING_F_NONBLOCK)
Pavel Begunkovac45abc2020-06-08 21:08:18 +03004320 return -EAGAIN;
4321
Jens Axboe9adbd452019-12-20 08:45:55 -07004322 ret = sync_file_range(req->file, req->sync.off, req->sync.len,
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004323 req->sync.flags);
4324 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01004325 req_set_fail(req);
Jens Axboee1e16092020-06-22 09:17:17 -06004326 io_req_complete(req, ret);
Jens Axboe5d17b4a2019-04-09 14:56:44 -06004327 return 0;
4328}
4329
YueHaibing469956e2020-03-04 15:53:52 +08004330#if defined(CONFIG_NET)
Pavel Begunkov02d27d82020-02-28 10:36:36 +03004331static int io_setup_async_msg(struct io_kiocb *req,
4332 struct io_async_msghdr *kmsg)
4333{
Jens Axboee8c2bc12020-08-15 18:44:09 -07004334 struct io_async_msghdr *async_msg = req->async_data;
4335
4336 if (async_msg)
Pavel Begunkov02d27d82020-02-28 10:36:36 +03004337 return -EAGAIN;
Jens Axboee8c2bc12020-08-15 18:44:09 -07004338 if (io_alloc_async_data(req)) {
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004339 kfree(kmsg->free_iov);
Pavel Begunkov02d27d82020-02-28 10:36:36 +03004340 return -ENOMEM;
4341 }
Jens Axboee8c2bc12020-08-15 18:44:09 -07004342 async_msg = req->async_data;
Pavel Begunkov02d27d82020-02-28 10:36:36 +03004343 req->flags |= REQ_F_NEED_CLEANUP;
Jens Axboee8c2bc12020-08-15 18:44:09 -07004344 memcpy(async_msg, kmsg, sizeof(*kmsg));
Pavel Begunkov2a780802021-02-05 00:57:58 +00004345 async_msg->msg.msg_name = &async_msg->addr;
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004346 /* if were using fast_iov, set it to the new one */
4347 if (!async_msg->free_iov)
4348 async_msg->msg.msg_iter.iov = async_msg->fast_iov;
4349
Pavel Begunkov02d27d82020-02-28 10:36:36 +03004350 return -EAGAIN;
4351}
4352
Pavel Begunkov2ae523e2020-07-12 20:41:06 +03004353static int io_sendmsg_copy_hdr(struct io_kiocb *req,
4354 struct io_async_msghdr *iomsg)
4355{
Pavel Begunkov2ae523e2020-07-12 20:41:06 +03004356 iomsg->msg.msg_name = &iomsg->addr;
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004357 iomsg->free_iov = iomsg->fast_iov;
Pavel Begunkov2ae523e2020-07-12 20:41:06 +03004358 return sendmsg_copy_msghdr(&iomsg->msg, req->sr_msg.umsg,
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004359 req->sr_msg.msg_flags, &iomsg->free_iov);
Pavel Begunkov2ae523e2020-07-12 20:41:06 +03004360}
4361
Pavel Begunkov93642ef2021-02-18 18:29:44 +00004362static int io_sendmsg_prep_async(struct io_kiocb *req)
4363{
4364 int ret;
4365
Pavel Begunkov93642ef2021-02-18 18:29:44 +00004366 ret = io_sendmsg_copy_hdr(req, req->async_data);
4367 if (!ret)
4368 req->flags |= REQ_F_NEED_CLEANUP;
4369 return ret;
4370}
4371
Jens Axboe3529d8c2019-12-19 18:24:38 -07004372static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboeaa1fa282019-04-19 13:38:09 -06004373{
Jens Axboee47293f2019-12-20 08:58:21 -07004374 struct io_sr_msg *sr = &req->sr_msg;
Jens Axboe03b12302019-12-02 18:50:25 -07004375
Pavel Begunkovd2b6f482020-06-03 18:03:25 +03004376 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4377 return -EINVAL;
4378
Pavel Begunkov270a5942020-07-12 20:41:04 +03004379 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
Jens Axboefddafac2020-01-04 20:19:44 -07004380 sr->len = READ_ONCE(sqe->len);
Pavel Begunkov04411802021-04-01 15:44:00 +01004381 sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
4382 if (sr->msg_flags & MSG_DONTWAIT)
4383 req->flags |= REQ_F_NOWAIT;
Jens Axboe3529d8c2019-12-19 18:24:38 -07004384
Jens Axboed8768362020-02-27 14:17:49 -07004385#ifdef CONFIG_COMPAT
4386 if (req->ctx->compat)
4387 sr->msg_flags |= MSG_CMSG_COMPAT;
4388#endif
Pavel Begunkov93642ef2021-02-18 18:29:44 +00004389 return 0;
Jens Axboe03b12302019-12-02 18:50:25 -07004390}
4391
Pavel Begunkov889fca72021-02-10 00:03:09 +00004392static int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe03b12302019-12-02 18:50:25 -07004393{
Pavel Begunkov6b754c82020-07-16 23:28:00 +03004394 struct io_async_msghdr iomsg, *kmsg;
Jens Axboe03b12302019-12-02 18:50:25 -07004395 struct socket *sock;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004396 unsigned flags;
Stefan Metzmacher00312752021-03-20 20:33:36 +01004397 int min_ret = 0;
Jens Axboe03b12302019-12-02 18:50:25 -07004398 int ret;
4399
Florent Revestdba4a922020-12-04 12:36:04 +01004400 sock = sock_from_file(req->file);
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004401 if (unlikely(!sock))
Florent Revestdba4a922020-12-04 12:36:04 +01004402 return -ENOTSOCK;
Jens Axboe03b12302019-12-02 18:50:25 -07004403
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004404 kmsg = req->async_data;
4405 if (!kmsg) {
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004406 ret = io_sendmsg_copy_hdr(req, &iomsg);
Jens Axboefddafac2020-01-04 20:19:44 -07004407 if (ret)
4408 return ret;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004409 kmsg = &iomsg;
Jens Axboefddafac2020-01-04 20:19:44 -07004410 }
4411
Pavel Begunkov04411802021-04-01 15:44:00 +01004412 flags = req->sr_msg.msg_flags;
4413 if (issue_flags & IO_URING_F_NONBLOCK)
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004414 flags |= MSG_DONTWAIT;
Stefan Metzmacher00312752021-03-20 20:33:36 +01004415 if (flags & MSG_WAITALL)
4416 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
4417
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004418 ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004419 if ((issue_flags & IO_URING_F_NONBLOCK) && ret == -EAGAIN)
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004420 return io_setup_async_msg(req, kmsg);
4421 if (ret == -ERESTARTSYS)
4422 ret = -EINTR;
4423
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004424 /* fast path, check for non-NULL to avoid function call */
4425 if (kmsg->free_iov)
4426 kfree(kmsg->free_iov);
Jens Axboe03b12302019-12-02 18:50:25 -07004427 req->flags &= ~REQ_F_NEED_CLEANUP;
Stefan Metzmacher00312752021-03-20 20:33:36 +01004428 if (ret < min_ret)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01004429 req_set_fail(req);
Pavel Begunkov889fca72021-02-10 00:03:09 +00004430 __io_req_complete(req, issue_flags, ret, 0);
Jens Axboefddafac2020-01-04 20:19:44 -07004431 return 0;
Jens Axboefddafac2020-01-04 20:19:44 -07004432}
4433
Pavel Begunkov889fca72021-02-10 00:03:09 +00004434static int io_send(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe03b12302019-12-02 18:50:25 -07004435{
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004436 struct io_sr_msg *sr = &req->sr_msg;
4437 struct msghdr msg;
4438 struct iovec iov;
Jens Axboe03b12302019-12-02 18:50:25 -07004439 struct socket *sock;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004440 unsigned flags;
Stefan Metzmacher00312752021-03-20 20:33:36 +01004441 int min_ret = 0;
Jens Axboe03b12302019-12-02 18:50:25 -07004442 int ret;
4443
Florent Revestdba4a922020-12-04 12:36:04 +01004444 sock = sock_from_file(req->file);
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004445 if (unlikely(!sock))
Florent Revestdba4a922020-12-04 12:36:04 +01004446 return -ENOTSOCK;
Jens Axboe03b12302019-12-02 18:50:25 -07004447
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004448 ret = import_single_range(WRITE, sr->buf, sr->len, &iov, &msg.msg_iter);
4449 if (unlikely(ret))
Zheng Bin14db8412020-09-09 20:12:37 +08004450 return ret;
Jens Axboe03b12302019-12-02 18:50:25 -07004451
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004452 msg.msg_name = NULL;
4453 msg.msg_control = NULL;
4454 msg.msg_controllen = 0;
4455 msg.msg_namelen = 0;
Jens Axboe03b12302019-12-02 18:50:25 -07004456
Pavel Begunkov04411802021-04-01 15:44:00 +01004457 flags = req->sr_msg.msg_flags;
4458 if (issue_flags & IO_URING_F_NONBLOCK)
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004459 flags |= MSG_DONTWAIT;
Stefan Metzmacher00312752021-03-20 20:33:36 +01004460 if (flags & MSG_WAITALL)
4461 min_ret = iov_iter_count(&msg.msg_iter);
4462
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004463 msg.msg_flags = flags;
4464 ret = sock_sendmsg(sock, &msg);
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004465 if ((issue_flags & IO_URING_F_NONBLOCK) && ret == -EAGAIN)
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004466 return -EAGAIN;
4467 if (ret == -ERESTARTSYS)
4468 ret = -EINTR;
Jens Axboe03b12302019-12-02 18:50:25 -07004469
Stefan Metzmacher00312752021-03-20 20:33:36 +01004470 if (ret < min_ret)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01004471 req_set_fail(req);
Pavel Begunkov889fca72021-02-10 00:03:09 +00004472 __io_req_complete(req, issue_flags, ret, 0);
Jens Axboe03b12302019-12-02 18:50:25 -07004473 return 0;
Jens Axboe03b12302019-12-02 18:50:25 -07004474}
4475
Pavel Begunkov1400e692020-07-12 20:41:05 +03004476static int __io_recvmsg_copy_hdr(struct io_kiocb *req,
4477 struct io_async_msghdr *iomsg)
Jens Axboe52de1fe2020-02-27 10:15:42 -07004478{
4479 struct io_sr_msg *sr = &req->sr_msg;
4480 struct iovec __user *uiov;
4481 size_t iov_len;
4482 int ret;
4483
Pavel Begunkov1400e692020-07-12 20:41:05 +03004484 ret = __copy_msghdr_from_user(&iomsg->msg, sr->umsg,
4485 &iomsg->uaddr, &uiov, &iov_len);
Jens Axboe52de1fe2020-02-27 10:15:42 -07004486 if (ret)
4487 return ret;
4488
4489 if (req->flags & REQ_F_BUFFER_SELECT) {
4490 if (iov_len > 1)
4491 return -EINVAL;
Pavel Begunkov5476dfe2021-02-05 00:57:59 +00004492 if (copy_from_user(iomsg->fast_iov, uiov, sizeof(*uiov)))
Jens Axboe52de1fe2020-02-27 10:15:42 -07004493 return -EFAULT;
Pavel Begunkov5476dfe2021-02-05 00:57:59 +00004494 sr->len = iomsg->fast_iov[0].iov_len;
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004495 iomsg->free_iov = NULL;
Jens Axboe52de1fe2020-02-27 10:15:42 -07004496 } else {
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004497 iomsg->free_iov = iomsg->fast_iov;
Christoph Hellwig89cd35c2020-09-25 06:51:41 +02004498 ret = __import_iovec(READ, uiov, iov_len, UIO_FASTIOV,
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004499 &iomsg->free_iov, &iomsg->msg.msg_iter,
Christoph Hellwig89cd35c2020-09-25 06:51:41 +02004500 false);
Jens Axboe52de1fe2020-02-27 10:15:42 -07004501 if (ret > 0)
4502 ret = 0;
4503 }
4504
4505 return ret;
4506}
4507
4508#ifdef CONFIG_COMPAT
4509static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req,
Pavel Begunkov1400e692020-07-12 20:41:05 +03004510 struct io_async_msghdr *iomsg)
Jens Axboe52de1fe2020-02-27 10:15:42 -07004511{
Jens Axboe52de1fe2020-02-27 10:15:42 -07004512 struct io_sr_msg *sr = &req->sr_msg;
4513 struct compat_iovec __user *uiov;
4514 compat_uptr_t ptr;
4515 compat_size_t len;
4516 int ret;
4517
Pavel Begunkov4af34172021-04-11 01:46:30 +01004518 ret = __get_compat_msghdr(&iomsg->msg, sr->umsg_compat, &iomsg->uaddr,
4519 &ptr, &len);
Jens Axboe52de1fe2020-02-27 10:15:42 -07004520 if (ret)
4521 return ret;
4522
4523 uiov = compat_ptr(ptr);
4524 if (req->flags & REQ_F_BUFFER_SELECT) {
4525 compat_ssize_t clen;
4526
4527 if (len > 1)
4528 return -EINVAL;
4529 if (!access_ok(uiov, sizeof(*uiov)))
4530 return -EFAULT;
4531 if (__get_user(clen, &uiov->iov_len))
4532 return -EFAULT;
4533 if (clen < 0)
4534 return -EINVAL;
Pavel Begunkov2d280bc2020-11-29 18:33:32 +00004535 sr->len = clen;
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004536 iomsg->free_iov = NULL;
Jens Axboe52de1fe2020-02-27 10:15:42 -07004537 } else {
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004538 iomsg->free_iov = iomsg->fast_iov;
Christoph Hellwig89cd35c2020-09-25 06:51:41 +02004539 ret = __import_iovec(READ, (struct iovec __user *)uiov, len,
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004540 UIO_FASTIOV, &iomsg->free_iov,
Christoph Hellwig89cd35c2020-09-25 06:51:41 +02004541 &iomsg->msg.msg_iter, true);
Jens Axboe52de1fe2020-02-27 10:15:42 -07004542 if (ret < 0)
4543 return ret;
4544 }
4545
4546 return 0;
4547}
Jens Axboe03b12302019-12-02 18:50:25 -07004548#endif
Jens Axboe52de1fe2020-02-27 10:15:42 -07004549
Pavel Begunkov1400e692020-07-12 20:41:05 +03004550static int io_recvmsg_copy_hdr(struct io_kiocb *req,
4551 struct io_async_msghdr *iomsg)
Jens Axboe52de1fe2020-02-27 10:15:42 -07004552{
Pavel Begunkov1400e692020-07-12 20:41:05 +03004553 iomsg->msg.msg_name = &iomsg->addr;
Jens Axboe52de1fe2020-02-27 10:15:42 -07004554
4555#ifdef CONFIG_COMPAT
4556 if (req->ctx->compat)
Pavel Begunkov1400e692020-07-12 20:41:05 +03004557 return __io_compat_recvmsg_copy_hdr(req, iomsg);
Jens Axboe52de1fe2020-02-27 10:15:42 -07004558#endif
4559
Pavel Begunkov1400e692020-07-12 20:41:05 +03004560 return __io_recvmsg_copy_hdr(req, iomsg);
Jens Axboe52de1fe2020-02-27 10:15:42 -07004561}
4562
Jens Axboebcda7ba2020-02-23 16:42:51 -07004563static struct io_buffer *io_recv_buffer_select(struct io_kiocb *req,
Pavel Begunkov7fbb1b52020-07-16 23:28:05 +03004564 bool needs_lock)
Jens Axboebcda7ba2020-02-23 16:42:51 -07004565{
4566 struct io_sr_msg *sr = &req->sr_msg;
4567 struct io_buffer *kbuf;
4568
Jens Axboebcda7ba2020-02-23 16:42:51 -07004569 kbuf = io_buffer_select(req, &sr->len, sr->bgid, sr->kbuf, needs_lock);
4570 if (IS_ERR(kbuf))
4571 return kbuf;
4572
4573 sr->kbuf = kbuf;
4574 req->flags |= REQ_F_BUFFER_SELECTED;
Jens Axboebcda7ba2020-02-23 16:42:51 -07004575 return kbuf;
Jens Axboe03b12302019-12-02 18:50:25 -07004576}
4577
Pavel Begunkov7fbb1b52020-07-16 23:28:05 +03004578static inline unsigned int io_put_recv_kbuf(struct io_kiocb *req)
4579{
4580 return io_put_kbuf(req, req->sr_msg.kbuf);
4581}
4582
Pavel Begunkov93642ef2021-02-18 18:29:44 +00004583static int io_recvmsg_prep_async(struct io_kiocb *req)
Jens Axboe03b12302019-12-02 18:50:25 -07004584{
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03004585 int ret;
Jens Axboe06b76d42019-12-19 14:44:26 -07004586
Pavel Begunkov93642ef2021-02-18 18:29:44 +00004587 ret = io_recvmsg_copy_hdr(req, req->async_data);
4588 if (!ret)
4589 req->flags |= REQ_F_NEED_CLEANUP;
4590 return ret;
4591}
4592
4593static int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4594{
4595 struct io_sr_msg *sr = &req->sr_msg;
4596
Pavel Begunkovd2b6f482020-06-03 18:03:25 +03004597 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4598 return -EINVAL;
4599
Pavel Begunkov270a5942020-07-12 20:41:04 +03004600 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
Jens Axboe0b7b21e2020-01-31 08:34:59 -07004601 sr->len = READ_ONCE(sqe->len);
Jens Axboebcda7ba2020-02-23 16:42:51 -07004602 sr->bgid = READ_ONCE(sqe->buf_group);
Pavel Begunkov04411802021-04-01 15:44:00 +01004603 sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
4604 if (sr->msg_flags & MSG_DONTWAIT)
4605 req->flags |= REQ_F_NOWAIT;
Jens Axboe3529d8c2019-12-19 18:24:38 -07004606
Jens Axboed8768362020-02-27 14:17:49 -07004607#ifdef CONFIG_COMPAT
4608 if (req->ctx->compat)
4609 sr->msg_flags |= MSG_CMSG_COMPAT;
4610#endif
Pavel Begunkov93642ef2021-02-18 18:29:44 +00004611 return 0;
Jens Axboe03b12302019-12-02 18:50:25 -07004612}
4613
Pavel Begunkov889fca72021-02-10 00:03:09 +00004614static int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe03b12302019-12-02 18:50:25 -07004615{
Pavel Begunkov6b754c82020-07-16 23:28:00 +03004616 struct io_async_msghdr iomsg, *kmsg;
Jens Axboe0fa03c62019-04-19 13:34:07 -06004617 struct socket *sock;
Pavel Begunkov7fbb1b52020-07-16 23:28:05 +03004618 struct io_buffer *kbuf;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004619 unsigned flags;
Stefan Metzmacher00312752021-03-20 20:33:36 +01004620 int min_ret = 0;
Jens Axboe52de1fe2020-02-27 10:15:42 -07004621 int ret, cflags = 0;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004622 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Jens Axboe0fa03c62019-04-19 13:34:07 -06004623
Florent Revestdba4a922020-12-04 12:36:04 +01004624 sock = sock_from_file(req->file);
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004625 if (unlikely(!sock))
Florent Revestdba4a922020-12-04 12:36:04 +01004626 return -ENOTSOCK;
Jens Axboe0fa03c62019-04-19 13:34:07 -06004627
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004628 kmsg = req->async_data;
4629 if (!kmsg) {
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004630 ret = io_recvmsg_copy_hdr(req, &iomsg);
4631 if (ret)
Pavel Begunkov681fda82020-07-15 22:20:45 +03004632 return ret;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004633 kmsg = &iomsg;
Jens Axboe0fa03c62019-04-19 13:34:07 -06004634 }
4635
Pavel Begunkovbc02ef32020-07-16 23:28:03 +03004636 if (req->flags & REQ_F_BUFFER_SELECT) {
Pavel Begunkov7fbb1b52020-07-16 23:28:05 +03004637 kbuf = io_recv_buffer_select(req, !force_nonblock);
Pavel Begunkovbc02ef32020-07-16 23:28:03 +03004638 if (IS_ERR(kbuf))
4639 return PTR_ERR(kbuf);
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004640 kmsg->fast_iov[0].iov_base = u64_to_user_ptr(kbuf->addr);
Pavel Begunkov5476dfe2021-02-05 00:57:59 +00004641 kmsg->fast_iov[0].iov_len = req->sr_msg.len;
4642 iov_iter_init(&kmsg->msg.msg_iter, READ, kmsg->fast_iov,
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004643 1, req->sr_msg.len);
4644 }
4645
Pavel Begunkov04411802021-04-01 15:44:00 +01004646 flags = req->sr_msg.msg_flags;
4647 if (force_nonblock)
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004648 flags |= MSG_DONTWAIT;
Stefan Metzmacher00312752021-03-20 20:33:36 +01004649 if (flags & MSG_WAITALL)
4650 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
4651
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004652 ret = __sys_recvmsg_sock(sock, &kmsg->msg, req->sr_msg.umsg,
4653 kmsg->uaddr, flags);
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03004654 if (force_nonblock && ret == -EAGAIN)
4655 return io_setup_async_msg(req, kmsg);
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004656 if (ret == -ERESTARTSYS)
4657 ret = -EINTR;
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03004658
Pavel Begunkov7fbb1b52020-07-16 23:28:05 +03004659 if (req->flags & REQ_F_BUFFER_SELECTED)
4660 cflags = io_put_recv_kbuf(req);
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004661 /* fast path, check for non-NULL to avoid function call */
4662 if (kmsg->free_iov)
4663 kfree(kmsg->free_iov);
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03004664 req->flags &= ~REQ_F_NEED_CLEANUP;
Stefan Metzmacher00312752021-03-20 20:33:36 +01004665 if (ret < min_ret || ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))))
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01004666 req_set_fail(req);
Pavel Begunkov889fca72021-02-10 00:03:09 +00004667 __io_req_complete(req, issue_flags, ret, cflags);
Jens Axboe0fa03c62019-04-19 13:34:07 -06004668 return 0;
Jens Axboe0fa03c62019-04-19 13:34:07 -06004669}
4670
Pavel Begunkov889fca72021-02-10 00:03:09 +00004671static int io_recv(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboefddafac2020-01-04 20:19:44 -07004672{
Pavel Begunkov6b754c82020-07-16 23:28:00 +03004673 struct io_buffer *kbuf;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004674 struct io_sr_msg *sr = &req->sr_msg;
4675 struct msghdr msg;
4676 void __user *buf = sr->buf;
Jens Axboefddafac2020-01-04 20:19:44 -07004677 struct socket *sock;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004678 struct iovec iov;
4679 unsigned flags;
Stefan Metzmacher00312752021-03-20 20:33:36 +01004680 int min_ret = 0;
Jens Axboebcda7ba2020-02-23 16:42:51 -07004681 int ret, cflags = 0;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004682 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Jens Axboefddafac2020-01-04 20:19:44 -07004683
Florent Revestdba4a922020-12-04 12:36:04 +01004684 sock = sock_from_file(req->file);
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004685 if (unlikely(!sock))
Florent Revestdba4a922020-12-04 12:36:04 +01004686 return -ENOTSOCK;
Jens Axboefddafac2020-01-04 20:19:44 -07004687
Pavel Begunkovbc02ef32020-07-16 23:28:03 +03004688 if (req->flags & REQ_F_BUFFER_SELECT) {
Pavel Begunkov7fbb1b52020-07-16 23:28:05 +03004689 kbuf = io_recv_buffer_select(req, !force_nonblock);
Jens Axboebcda7ba2020-02-23 16:42:51 -07004690 if (IS_ERR(kbuf))
4691 return PTR_ERR(kbuf);
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004692 buf = u64_to_user_ptr(kbuf->addr);
Jens Axboefddafac2020-01-04 20:19:44 -07004693 }
4694
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004695 ret = import_single_range(READ, buf, sr->len, &iov, &msg.msg_iter);
Pavel Begunkov14c32ee2020-07-16 23:28:01 +03004696 if (unlikely(ret))
4697 goto out_free;
Jens Axboefddafac2020-01-04 20:19:44 -07004698
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004699 msg.msg_name = NULL;
4700 msg.msg_control = NULL;
4701 msg.msg_controllen = 0;
4702 msg.msg_namelen = 0;
4703 msg.msg_iocb = NULL;
4704 msg.msg_flags = 0;
4705
Pavel Begunkov04411802021-04-01 15:44:00 +01004706 flags = req->sr_msg.msg_flags;
4707 if (force_nonblock)
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004708 flags |= MSG_DONTWAIT;
Stefan Metzmacher00312752021-03-20 20:33:36 +01004709 if (flags & MSG_WAITALL)
4710 min_ret = iov_iter_count(&msg.msg_iter);
4711
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004712 ret = sock_recvmsg(sock, &msg, flags);
4713 if (force_nonblock && ret == -EAGAIN)
4714 return -EAGAIN;
4715 if (ret == -ERESTARTSYS)
4716 ret = -EINTR;
Pavel Begunkov14c32ee2020-07-16 23:28:01 +03004717out_free:
Pavel Begunkov7fbb1b52020-07-16 23:28:05 +03004718 if (req->flags & REQ_F_BUFFER_SELECTED)
4719 cflags = io_put_recv_kbuf(req);
Stefan Metzmacher00312752021-03-20 20:33:36 +01004720 if (ret < min_ret || ((flags & MSG_WAITALL) && (msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))))
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01004721 req_set_fail(req);
Pavel Begunkov889fca72021-02-10 00:03:09 +00004722 __io_req_complete(req, issue_flags, ret, cflags);
Jens Axboefddafac2020-01-04 20:19:44 -07004723 return 0;
Jens Axboefddafac2020-01-04 20:19:44 -07004724}
4725
Jens Axboe3529d8c2019-12-19 18:24:38 -07004726static int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboe17f2fe32019-10-17 14:42:58 -06004727{
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004728 struct io_accept *accept = &req->accept;
4729
Jens Axboe14587a462020-09-05 11:36:08 -06004730 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboe17f2fe32019-10-17 14:42:58 -06004731 return -EINVAL;
Hrvoje Zeba8042d6c2019-11-25 14:40:22 -05004732 if (sqe->ioprio || sqe->len || sqe->buf_index)
Jens Axboe17f2fe32019-10-17 14:42:58 -06004733 return -EINVAL;
4734
Jens Axboed55e5f52019-12-11 16:12:15 -07004735 accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
4736 accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2));
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004737 accept->flags = READ_ONCE(sqe->accept_flags);
Jens Axboe09952e32020-03-19 20:16:56 -06004738 accept->nofile = rlimit(RLIMIT_NOFILE);
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004739 return 0;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004740}
Jens Axboe17f2fe32019-10-17 14:42:58 -06004741
Pavel Begunkov889fca72021-02-10 00:03:09 +00004742static int io_accept(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004743{
4744 struct io_accept *accept = &req->accept;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004745 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Pavel Begunkovac45abc2020-06-08 21:08:18 +03004746 unsigned int file_flags = force_nonblock ? O_NONBLOCK : 0;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004747 int ret;
4748
Jiufei Xuee697dee2020-06-10 13:41:59 +08004749 if (req->file->f_flags & O_NONBLOCK)
4750 req->flags |= REQ_F_NOWAIT;
4751
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004752 ret = __sys_accept4_file(req->file, file_flags, accept->addr,
Jens Axboe09952e32020-03-19 20:16:56 -06004753 accept->addr_len, accept->flags,
4754 accept->nofile);
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004755 if (ret == -EAGAIN && force_nonblock)
Jens Axboe17f2fe32019-10-17 14:42:58 -06004756 return -EAGAIN;
Pavel Begunkovac45abc2020-06-08 21:08:18 +03004757 if (ret < 0) {
4758 if (ret == -ERESTARTSYS)
4759 ret = -EINTR;
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01004760 req_set_fail(req);
Pavel Begunkovac45abc2020-06-08 21:08:18 +03004761 }
Pavel Begunkov889fca72021-02-10 00:03:09 +00004762 __io_req_complete(req, issue_flags, ret, 0);
Jens Axboe17f2fe32019-10-17 14:42:58 -06004763 return 0;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004764}
4765
Pavel Begunkov93642ef2021-02-18 18:29:44 +00004766static int io_connect_prep_async(struct io_kiocb *req)
4767{
4768 struct io_async_connect *io = req->async_data;
4769 struct io_connect *conn = &req->connect;
4770
4771 return move_addr_to_kernel(conn->addr, conn->addr_len, &io->address);
4772}
4773
Jens Axboe3529d8c2019-12-19 18:24:38 -07004774static int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboef499a022019-12-02 16:28:46 -07004775{
Jens Axboe3529d8c2019-12-19 18:24:38 -07004776 struct io_connect *conn = &req->connect;
Jens Axboef499a022019-12-02 16:28:46 -07004777
Jens Axboe14587a462020-09-05 11:36:08 -06004778 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboe3fbb51c2019-12-20 08:51:52 -07004779 return -EINVAL;
4780 if (sqe->ioprio || sqe->len || sqe->buf_index || sqe->rw_flags)
4781 return -EINVAL;
4782
Jens Axboe3529d8c2019-12-19 18:24:38 -07004783 conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
4784 conn->addr_len = READ_ONCE(sqe->addr2);
Pavel Begunkov93642ef2021-02-18 18:29:44 +00004785 return 0;
Jens Axboef499a022019-12-02 16:28:46 -07004786}
4787
Pavel Begunkov889fca72021-02-10 00:03:09 +00004788static int io_connect(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboef8e85cf2019-11-23 14:24:24 -07004789{
Jens Axboee8c2bc12020-08-15 18:44:09 -07004790 struct io_async_connect __io, *io;
Jens Axboef8e85cf2019-11-23 14:24:24 -07004791 unsigned file_flags;
Jens Axboe3fbb51c2019-12-20 08:51:52 -07004792 int ret;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004793 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Jens Axboef8e85cf2019-11-23 14:24:24 -07004794
Jens Axboee8c2bc12020-08-15 18:44:09 -07004795 if (req->async_data) {
4796 io = req->async_data;
Jens Axboef499a022019-12-02 16:28:46 -07004797 } else {
Jens Axboe3529d8c2019-12-19 18:24:38 -07004798 ret = move_addr_to_kernel(req->connect.addr,
4799 req->connect.addr_len,
Jens Axboee8c2bc12020-08-15 18:44:09 -07004800 &__io.address);
Jens Axboef499a022019-12-02 16:28:46 -07004801 if (ret)
4802 goto out;
4803 io = &__io;
4804 }
4805
Jens Axboe3fbb51c2019-12-20 08:51:52 -07004806 file_flags = force_nonblock ? O_NONBLOCK : 0;
4807
Jens Axboee8c2bc12020-08-15 18:44:09 -07004808 ret = __sys_connect_file(req->file, &io->address,
Jens Axboe3fbb51c2019-12-20 08:51:52 -07004809 req->connect.addr_len, file_flags);
Jens Axboe87f80d62019-12-03 11:23:54 -07004810 if ((ret == -EAGAIN || ret == -EINPROGRESS) && force_nonblock) {
Jens Axboee8c2bc12020-08-15 18:44:09 -07004811 if (req->async_data)
Jens Axboeb7bb4f72019-12-15 22:13:43 -07004812 return -EAGAIN;
Jens Axboee8c2bc12020-08-15 18:44:09 -07004813 if (io_alloc_async_data(req)) {
Jens Axboef499a022019-12-02 16:28:46 -07004814 ret = -ENOMEM;
4815 goto out;
4816 }
Jens Axboee8c2bc12020-08-15 18:44:09 -07004817 memcpy(req->async_data, &__io, sizeof(__io));
Jens Axboef8e85cf2019-11-23 14:24:24 -07004818 return -EAGAIN;
Jens Axboef499a022019-12-02 16:28:46 -07004819 }
Jens Axboef8e85cf2019-11-23 14:24:24 -07004820 if (ret == -ERESTARTSYS)
4821 ret = -EINTR;
Jens Axboef499a022019-12-02 16:28:46 -07004822out:
Jens Axboe4e88d6e2019-12-07 20:59:47 -07004823 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01004824 req_set_fail(req);
Pavel Begunkov889fca72021-02-10 00:03:09 +00004825 __io_req_complete(req, issue_flags, ret, 0);
Jens Axboef8e85cf2019-11-23 14:24:24 -07004826 return 0;
Jens Axboef8e85cf2019-11-23 14:24:24 -07004827}
YueHaibing469956e2020-03-04 15:53:52 +08004828#else /* !CONFIG_NET */
Jens Axboe99a10082021-02-19 09:35:19 -07004829#define IO_NETOP_FN(op) \
4830static int io_##op(struct io_kiocb *req, unsigned int issue_flags) \
4831{ \
4832 return -EOPNOTSUPP; \
Jens Axboef8e85cf2019-11-23 14:24:24 -07004833}
4834
Jens Axboe99a10082021-02-19 09:35:19 -07004835#define IO_NETOP_PREP(op) \
4836IO_NETOP_FN(op) \
4837static int io_##op##_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) \
4838{ \
4839 return -EOPNOTSUPP; \
4840} \
4841
4842#define IO_NETOP_PREP_ASYNC(op) \
4843IO_NETOP_PREP(op) \
4844static int io_##op##_prep_async(struct io_kiocb *req) \
4845{ \
4846 return -EOPNOTSUPP; \
YueHaibing469956e2020-03-04 15:53:52 +08004847}
4848
Jens Axboe99a10082021-02-19 09:35:19 -07004849IO_NETOP_PREP_ASYNC(sendmsg);
4850IO_NETOP_PREP_ASYNC(recvmsg);
4851IO_NETOP_PREP_ASYNC(connect);
4852IO_NETOP_PREP(accept);
4853IO_NETOP_FN(send);
4854IO_NETOP_FN(recv);
YueHaibing469956e2020-03-04 15:53:52 +08004855#endif /* CONFIG_NET */
Jens Axboe17f2fe32019-10-17 14:42:58 -06004856
Jens Axboed7718a92020-02-14 22:23:12 -07004857struct io_poll_table {
4858 struct poll_table_struct pt;
4859 struct io_kiocb *req;
Pavel Begunkov68b11e82021-07-20 10:50:43 +01004860 int nr_entries;
Jens Axboed7718a92020-02-14 22:23:12 -07004861 int error;
4862};
4863
Jens Axboed7718a92020-02-14 22:23:12 -07004864static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll,
Pavel Begunkov5b0a6ac2021-06-30 21:54:04 +01004865 __poll_t mask, io_req_tw_func_t func)
Jens Axboed7718a92020-02-14 22:23:12 -07004866{
Jens Axboed7718a92020-02-14 22:23:12 -07004867 /* for instances that support it check for an event match first: */
4868 if (mask && !(mask & poll->events))
4869 return 0;
4870
4871 trace_io_uring_task_add(req->ctx, req->opcode, req->user_data, mask);
4872
4873 list_del_init(&poll->wait.entry);
4874
Jens Axboed7718a92020-02-14 22:23:12 -07004875 req->result = mask;
Pavel Begunkov5b0a6ac2021-06-30 21:54:04 +01004876 req->io_task_work.func = func;
Jens Axboe6d816e02020-08-11 08:04:14 -06004877
Jens Axboed7718a92020-02-14 22:23:12 -07004878 /*
Jens Axboee3aabf92020-05-18 11:04:17 -06004879 * If this fails, then the task is exiting. When a task exits, the
4880 * work gets canceled, so just cancel this request as well instead
4881 * of executing it. We can't safely execute it anyway, as we may not
4882 * have the needed state needed for it anyway.
Jens Axboed7718a92020-02-14 22:23:12 -07004883 */
Pavel Begunkove09ee512021-07-01 13:26:05 +01004884 io_req_task_work_add(req);
Jens Axboed7718a92020-02-14 22:23:12 -07004885 return 1;
4886}
4887
Jens Axboe74ce6ce2020-04-13 11:09:12 -06004888static bool io_poll_rewait(struct io_kiocb *req, struct io_poll_iocb *poll)
4889 __acquires(&req->ctx->completion_lock)
4890{
4891 struct io_ring_ctx *ctx = req->ctx;
4892
Pavel Begunkove09ee512021-07-01 13:26:05 +01004893 if (unlikely(req->task->flags & PF_EXITING))
4894 WRITE_ONCE(poll->canceled, true);
4895
Jens Axboe74ce6ce2020-04-13 11:09:12 -06004896 if (!req->result && !READ_ONCE(poll->canceled)) {
4897 struct poll_table_struct pt = { ._key = poll->events };
4898
4899 req->result = vfs_poll(req->file, &pt) & poll->events;
4900 }
4901
Jens Axboe79ebeae2021-08-10 15:18:27 -06004902 spin_lock(&ctx->completion_lock);
Jens Axboe74ce6ce2020-04-13 11:09:12 -06004903 if (!req->result && !READ_ONCE(poll->canceled)) {
4904 add_wait_queue(poll->head, &poll->wait);
4905 return true;
4906 }
4907
4908 return false;
4909}
4910
Jens Axboed4e7cd32020-08-15 11:44:50 -07004911static struct io_poll_iocb *io_poll_get_double(struct io_kiocb *req)
Jens Axboe18bceab2020-05-15 11:56:54 -06004912{
Jens Axboee8c2bc12020-08-15 18:44:09 -07004913 /* pure poll stashes this in ->async_data, poll driven retry elsewhere */
Jens Axboed4e7cd32020-08-15 11:44:50 -07004914 if (req->opcode == IORING_OP_POLL_ADD)
Jens Axboee8c2bc12020-08-15 18:44:09 -07004915 return req->async_data;
Jens Axboed4e7cd32020-08-15 11:44:50 -07004916 return req->apoll->double_poll;
4917}
4918
4919static struct io_poll_iocb *io_poll_get_single(struct io_kiocb *req)
4920{
4921 if (req->opcode == IORING_OP_POLL_ADD)
4922 return &req->poll;
4923 return &req->apoll->poll;
4924}
4925
4926static void io_poll_remove_double(struct io_kiocb *req)
Pavel Begunkove07785b2021-04-01 15:43:57 +01004927 __must_hold(&req->ctx->completion_lock)
Jens Axboed4e7cd32020-08-15 11:44:50 -07004928{
4929 struct io_poll_iocb *poll = io_poll_get_double(req);
Jens Axboe18bceab2020-05-15 11:56:54 -06004930
4931 lockdep_assert_held(&req->ctx->completion_lock);
4932
4933 if (poll && poll->head) {
4934 struct wait_queue_head *head = poll->head;
4935
Jens Axboe79ebeae2021-08-10 15:18:27 -06004936 spin_lock_irq(&head->lock);
Jens Axboe18bceab2020-05-15 11:56:54 -06004937 list_del_init(&poll->wait.entry);
4938 if (poll->wait.private)
Jens Axboede9b4cc2021-02-24 13:28:27 -07004939 req_ref_put(req);
Jens Axboe18bceab2020-05-15 11:56:54 -06004940 poll->head = NULL;
Jens Axboe79ebeae2021-08-10 15:18:27 -06004941 spin_unlock_irq(&head->lock);
Jens Axboe18bceab2020-05-15 11:56:54 -06004942 }
4943}
4944
Pavel Begunkove27414b2021-04-09 09:13:20 +01004945static bool io_poll_complete(struct io_kiocb *req, __poll_t mask)
Pavel Begunkove07785b2021-04-01 15:43:57 +01004946 __must_hold(&req->ctx->completion_lock)
Jens Axboe18bceab2020-05-15 11:56:54 -06004947{
4948 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe88e41cf2021-02-22 22:08:01 -07004949 unsigned flags = IORING_CQE_F_MORE;
Pavel Begunkove27414b2021-04-09 09:13:20 +01004950 int error;
Jens Axboe18bceab2020-05-15 11:56:54 -06004951
Pavel Begunkove27414b2021-04-09 09:13:20 +01004952 if (READ_ONCE(req->poll.canceled)) {
Jens Axboe45ab03b2021-02-23 08:19:33 -07004953 error = -ECANCELED;
Jens Axboe88e41cf2021-02-22 22:08:01 -07004954 req->poll.events |= EPOLLONESHOT;
Pavel Begunkove27414b2021-04-09 09:13:20 +01004955 } else {
Jens Axboe50826202021-02-23 09:02:26 -07004956 error = mangle_poll(mask);
Pavel Begunkove27414b2021-04-09 09:13:20 +01004957 }
Jens Axboeb69de282021-03-17 08:37:41 -06004958 if (req->poll.events & EPOLLONESHOT)
4959 flags = 0;
Pavel Begunkovd4d19c12021-04-25 14:32:17 +01004960 if (!io_cqring_fill_event(ctx, req->user_data, error, flags)) {
Jens Axboe88e41cf2021-02-22 22:08:01 -07004961 req->poll.done = true;
4962 flags = 0;
4963 }
Hao Xu7b289c32021-04-13 15:20:39 +08004964 if (flags & IORING_CQE_F_MORE)
4965 ctx->cq_extra++;
4966
Jens Axboe18bceab2020-05-15 11:56:54 -06004967 io_commit_cqring(ctx);
Jens Axboe88e41cf2021-02-22 22:08:01 -07004968 return !(flags & IORING_CQE_F_MORE);
Jens Axboe18bceab2020-05-15 11:56:54 -06004969}
4970
Pavel Begunkov5b0a6ac2021-06-30 21:54:04 +01004971static void io_poll_task_func(struct io_kiocb *req)
Jens Axboe18bceab2020-05-15 11:56:54 -06004972{
Jens Axboe6d816e02020-08-11 08:04:14 -06004973 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkovdd221f462020-10-18 10:17:42 +01004974 struct io_kiocb *nxt;
Jens Axboe18bceab2020-05-15 11:56:54 -06004975
Pavel Begunkovdd221f462020-10-18 10:17:42 +01004976 if (io_poll_rewait(req, &req->poll)) {
Jens Axboe79ebeae2021-08-10 15:18:27 -06004977 spin_unlock(&ctx->completion_lock);
Pavel Begunkovdd221f462020-10-18 10:17:42 +01004978 } else {
Pavel Begunkovf40b9642021-04-09 09:13:19 +01004979 bool done;
Jens Axboe88e41cf2021-02-22 22:08:01 -07004980
Pavel Begunkove27414b2021-04-09 09:13:20 +01004981 done = io_poll_complete(req, req->result);
Jens Axboe88e41cf2021-02-22 22:08:01 -07004982 if (done) {
Hao Xua890d012021-07-28 11:03:22 +08004983 io_poll_remove_double(req);
Jens Axboe88e41cf2021-02-22 22:08:01 -07004984 hash_del(&req->hash_node);
Pavel Begunkovf40b9642021-04-09 09:13:19 +01004985 } else {
Jens Axboe88e41cf2021-02-22 22:08:01 -07004986 req->result = 0;
4987 add_wait_queue(req->poll.head, &req->poll.wait);
4988 }
Jens Axboe79ebeae2021-08-10 15:18:27 -06004989 spin_unlock(&ctx->completion_lock);
Pavel Begunkovdd221f462020-10-18 10:17:42 +01004990 io_cqring_ev_posted(ctx);
Pavel Begunkovdd221f462020-10-18 10:17:42 +01004991
Jens Axboe88e41cf2021-02-22 22:08:01 -07004992 if (done) {
4993 nxt = io_put_req_find_next(req);
4994 if (nxt)
Pavel Begunkov5b0a6ac2021-06-30 21:54:04 +01004995 io_req_task_submit(nxt);
Jens Axboe88e41cf2021-02-22 22:08:01 -07004996 }
Pavel Begunkovea1164e2020-06-30 15:20:41 +03004997 }
Jens Axboe18bceab2020-05-15 11:56:54 -06004998}
4999
5000static int io_poll_double_wake(struct wait_queue_entry *wait, unsigned mode,
5001 int sync, void *key)
5002{
5003 struct io_kiocb *req = wait->private;
Jens Axboed4e7cd32020-08-15 11:44:50 -07005004 struct io_poll_iocb *poll = io_poll_get_single(req);
Jens Axboe18bceab2020-05-15 11:56:54 -06005005 __poll_t mask = key_to_poll(key);
Jens Axboe79ebeae2021-08-10 15:18:27 -06005006 unsigned long flags;
Jens Axboe18bceab2020-05-15 11:56:54 -06005007
5008 /* for instances that support it check for an event match first: */
5009 if (mask && !(mask & poll->events))
5010 return 0;
Jens Axboe88e41cf2021-02-22 22:08:01 -07005011 if (!(poll->events & EPOLLONESHOT))
5012 return poll->wait.func(&poll->wait, mode, sync, key);
Jens Axboe18bceab2020-05-15 11:56:54 -06005013
Jens Axboe8706e042020-09-28 08:38:54 -06005014 list_del_init(&wait->entry);
5015
Jens Axboe9ce85ef2021-07-09 08:20:28 -06005016 if (poll->head) {
Jens Axboe18bceab2020-05-15 11:56:54 -06005017 bool done;
5018
Jens Axboe79ebeae2021-08-10 15:18:27 -06005019 spin_lock_irqsave(&poll->head->lock, flags);
Jens Axboe807abcb2020-07-17 17:09:27 -06005020 done = list_empty(&poll->wait.entry);
Jens Axboe18bceab2020-05-15 11:56:54 -06005021 if (!done)
Jens Axboe807abcb2020-07-17 17:09:27 -06005022 list_del_init(&poll->wait.entry);
Jens Axboed4e7cd32020-08-15 11:44:50 -07005023 /* make sure double remove sees this as being gone */
5024 wait->private = NULL;
Jens Axboe79ebeae2021-08-10 15:18:27 -06005025 spin_unlock_irqrestore(&poll->head->lock, flags);
Jens Axboec8b5e262020-10-25 13:53:26 -06005026 if (!done) {
5027 /* use wait func handler, so it matches the rq type */
5028 poll->wait.func(&poll->wait, mode, sync, key);
5029 }
Jens Axboe18bceab2020-05-15 11:56:54 -06005030 }
Jens Axboede9b4cc2021-02-24 13:28:27 -07005031 req_ref_put(req);
Jens Axboe18bceab2020-05-15 11:56:54 -06005032 return 1;
5033}
5034
5035static void io_init_poll_iocb(struct io_poll_iocb *poll, __poll_t events,
5036 wait_queue_func_t wake_func)
5037{
5038 poll->head = NULL;
5039 poll->done = false;
5040 poll->canceled = false;
Jens Axboe464dca62021-03-19 14:06:24 -06005041#define IO_POLL_UNMASK (EPOLLERR|EPOLLHUP|EPOLLNVAL|EPOLLRDHUP)
5042 /* mask in events that we always want/need */
5043 poll->events = events | IO_POLL_UNMASK;
Jens Axboe18bceab2020-05-15 11:56:54 -06005044 INIT_LIST_HEAD(&poll->wait.entry);
5045 init_waitqueue_func_entry(&poll->wait, wake_func);
5046}
5047
5048static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt,
Jens Axboe807abcb2020-07-17 17:09:27 -06005049 struct wait_queue_head *head,
5050 struct io_poll_iocb **poll_ptr)
Jens Axboe18bceab2020-05-15 11:56:54 -06005051{
5052 struct io_kiocb *req = pt->req;
5053
5054 /*
Pavel Begunkov68b11e82021-07-20 10:50:43 +01005055 * The file being polled uses multiple waitqueues for poll handling
5056 * (e.g. one for read, one for write). Setup a separate io_poll_iocb
5057 * if this happens.
Jens Axboe18bceab2020-05-15 11:56:54 -06005058 */
Pavel Begunkov68b11e82021-07-20 10:50:43 +01005059 if (unlikely(pt->nr_entries)) {
Pavel Begunkov58852d42020-10-16 20:55:56 +01005060 struct io_poll_iocb *poll_one = poll;
5061
Jens Axboe18bceab2020-05-15 11:56:54 -06005062 /* already have a 2nd entry, fail a third attempt */
Jens Axboe807abcb2020-07-17 17:09:27 -06005063 if (*poll_ptr) {
Jens Axboe18bceab2020-05-15 11:56:54 -06005064 pt->error = -EINVAL;
5065 return;
5066 }
Jens Axboeea6a693d2021-04-15 09:47:13 -06005067 /*
5068 * Can't handle multishot for double wait for now, turn it
5069 * into one-shot mode.
5070 */
Pavel Begunkov7a274722021-05-17 12:43:34 +01005071 if (!(poll_one->events & EPOLLONESHOT))
5072 poll_one->events |= EPOLLONESHOT;
Jens Axboe1c3b3e62021-02-28 16:07:30 -07005073 /* double add on the same waitqueue head, ignore */
Pavel Begunkov7a274722021-05-17 12:43:34 +01005074 if (poll_one->head == head)
Jens Axboe1c3b3e62021-02-28 16:07:30 -07005075 return;
Jens Axboe18bceab2020-05-15 11:56:54 -06005076 poll = kmalloc(sizeof(*poll), GFP_ATOMIC);
5077 if (!poll) {
5078 pt->error = -ENOMEM;
5079 return;
5080 }
Pavel Begunkov58852d42020-10-16 20:55:56 +01005081 io_init_poll_iocb(poll, poll_one->events, io_poll_double_wake);
Jens Axboede9b4cc2021-02-24 13:28:27 -07005082 req_ref_get(req);
Jens Axboe18bceab2020-05-15 11:56:54 -06005083 poll->wait.private = req;
Jens Axboe807abcb2020-07-17 17:09:27 -06005084 *poll_ptr = poll;
Jens Axboe18bceab2020-05-15 11:56:54 -06005085 }
5086
Pavel Begunkov68b11e82021-07-20 10:50:43 +01005087 pt->nr_entries++;
Jens Axboe18bceab2020-05-15 11:56:54 -06005088 poll->head = head;
Jiufei Xuea31eb4a2020-06-17 17:53:56 +08005089
5090 if (poll->events & EPOLLEXCLUSIVE)
5091 add_wait_queue_exclusive(head, &poll->wait);
5092 else
5093 add_wait_queue(head, &poll->wait);
Jens Axboe18bceab2020-05-15 11:56:54 -06005094}
5095
5096static void io_async_queue_proc(struct file *file, struct wait_queue_head *head,
5097 struct poll_table_struct *p)
5098{
5099 struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
Jens Axboe807abcb2020-07-17 17:09:27 -06005100 struct async_poll *apoll = pt->req->apoll;
Jens Axboe18bceab2020-05-15 11:56:54 -06005101
Jens Axboe807abcb2020-07-17 17:09:27 -06005102 __io_queue_proc(&apoll->poll, pt, head, &apoll->double_poll);
Jens Axboe18bceab2020-05-15 11:56:54 -06005103}
5104
Pavel Begunkov5b0a6ac2021-06-30 21:54:04 +01005105static void io_async_task_func(struct io_kiocb *req)
Jens Axboed7718a92020-02-14 22:23:12 -07005106{
Jens Axboed7718a92020-02-14 22:23:12 -07005107 struct async_poll *apoll = req->apoll;
5108 struct io_ring_ctx *ctx = req->ctx;
5109
Olivier Langlois236daeae2021-05-31 02:36:37 -04005110 trace_io_uring_task_run(req->ctx, req, req->opcode, req->user_data);
Jens Axboed7718a92020-02-14 22:23:12 -07005111
Jens Axboe74ce6ce2020-04-13 11:09:12 -06005112 if (io_poll_rewait(req, &apoll->poll)) {
Jens Axboe79ebeae2021-08-10 15:18:27 -06005113 spin_unlock(&ctx->completion_lock);
Jens Axboe74ce6ce2020-04-13 11:09:12 -06005114 return;
Jens Axboed7718a92020-02-14 22:23:12 -07005115 }
5116
Pavel Begunkov0ea13b42021-04-09 09:13:21 +01005117 hash_del(&req->hash_node);
Jens Axboed4e7cd32020-08-15 11:44:50 -07005118 io_poll_remove_double(req);
Jens Axboe79ebeae2021-08-10 15:18:27 -06005119 spin_unlock(&ctx->completion_lock);
Jens Axboe74ce6ce2020-04-13 11:09:12 -06005120
Pavel Begunkov0be0b0e2020-06-30 15:20:42 +03005121 if (!READ_ONCE(apoll->poll.canceled))
Pavel Begunkov5b0a6ac2021-06-30 21:54:04 +01005122 io_req_task_submit(req);
Pavel Begunkov0be0b0e2020-06-30 15:20:42 +03005123 else
Pavel Begunkov25935532021-03-19 17:22:40 +00005124 io_req_complete_failed(req, -ECANCELED);
Jens Axboed7718a92020-02-14 22:23:12 -07005125}
5126
5127static int io_async_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
5128 void *key)
5129{
5130 struct io_kiocb *req = wait->private;
5131 struct io_poll_iocb *poll = &req->apoll->poll;
5132
5133 trace_io_uring_poll_wake(req->ctx, req->opcode, req->user_data,
5134 key_to_poll(key));
5135
5136 return __io_async_wake(req, poll, key_to_poll(key), io_async_task_func);
5137}
5138
5139static void io_poll_req_insert(struct io_kiocb *req)
5140{
5141 struct io_ring_ctx *ctx = req->ctx;
5142 struct hlist_head *list;
5143
5144 list = &ctx->cancel_hash[hash_long(req->user_data, ctx->cancel_hash_bits)];
5145 hlist_add_head(&req->hash_node, list);
5146}
5147
5148static __poll_t __io_arm_poll_handler(struct io_kiocb *req,
5149 struct io_poll_iocb *poll,
5150 struct io_poll_table *ipt, __poll_t mask,
5151 wait_queue_func_t wake_func)
5152 __acquires(&ctx->completion_lock)
5153{
5154 struct io_ring_ctx *ctx = req->ctx;
5155 bool cancel = false;
5156
Pavel Begunkov4d52f332020-10-18 10:17:43 +01005157 INIT_HLIST_NODE(&req->hash_node);
Jens Axboe18bceab2020-05-15 11:56:54 -06005158 io_init_poll_iocb(poll, mask, wake_func);
Pavel Begunkovb90cd192020-06-21 13:09:52 +03005159 poll->file = req->file;
Jens Axboe18bceab2020-05-15 11:56:54 -06005160 poll->wait.private = req;
Jens Axboed7718a92020-02-14 22:23:12 -07005161
5162 ipt->pt._key = mask;
5163 ipt->req = req;
Pavel Begunkov68b11e82021-07-20 10:50:43 +01005164 ipt->error = 0;
5165 ipt->nr_entries = 0;
Jens Axboed7718a92020-02-14 22:23:12 -07005166
Jens Axboed7718a92020-02-14 22:23:12 -07005167 mask = vfs_poll(req->file, &ipt->pt) & poll->events;
Pavel Begunkov68b11e82021-07-20 10:50:43 +01005168 if (unlikely(!ipt->nr_entries) && !ipt->error)
5169 ipt->error = -EINVAL;
Jens Axboed7718a92020-02-14 22:23:12 -07005170
Jens Axboe79ebeae2021-08-10 15:18:27 -06005171 spin_lock(&ctx->completion_lock);
Hao Xua890d012021-07-28 11:03:22 +08005172 if (ipt->error || (mask && (poll->events & EPOLLONESHOT)))
Pavel Begunkov46fee9a2021-07-20 10:50:44 +01005173 io_poll_remove_double(req);
Jens Axboed7718a92020-02-14 22:23:12 -07005174 if (likely(poll->head)) {
Jens Axboe79ebeae2021-08-10 15:18:27 -06005175 spin_lock_irq(&poll->head->lock);
Jens Axboed7718a92020-02-14 22:23:12 -07005176 if (unlikely(list_empty(&poll->wait.entry))) {
5177 if (ipt->error)
5178 cancel = true;
5179 ipt->error = 0;
5180 mask = 0;
5181 }
Jens Axboe88e41cf2021-02-22 22:08:01 -07005182 if ((mask && (poll->events & EPOLLONESHOT)) || ipt->error)
Jens Axboed7718a92020-02-14 22:23:12 -07005183 list_del_init(&poll->wait.entry);
5184 else if (cancel)
5185 WRITE_ONCE(poll->canceled, true);
5186 else if (!poll->done) /* actually waiting for an event */
5187 io_poll_req_insert(req);
Jens Axboe79ebeae2021-08-10 15:18:27 -06005188 spin_unlock_irq(&poll->head->lock);
Jens Axboed7718a92020-02-14 22:23:12 -07005189 }
5190
5191 return mask;
5192}
5193
Olivier Langlois59b735a2021-06-22 05:17:39 -07005194enum {
5195 IO_APOLL_OK,
5196 IO_APOLL_ABORTED,
5197 IO_APOLL_READY
5198};
5199
5200static int io_arm_poll_handler(struct io_kiocb *req)
Jens Axboed7718a92020-02-14 22:23:12 -07005201{
5202 const struct io_op_def *def = &io_op_defs[req->opcode];
5203 struct io_ring_ctx *ctx = req->ctx;
5204 struct async_poll *apoll;
5205 struct io_poll_table ipt;
Pavel Begunkovb2d9c3d2021-06-26 21:40:44 +01005206 __poll_t ret, mask = EPOLLONESHOT | POLLERR | POLLPRI;
Jens Axboe9dab14b2020-08-25 12:27:50 -06005207 int rw;
Jens Axboed7718a92020-02-14 22:23:12 -07005208
5209 if (!req->file || !file_can_poll(req->file))
Olivier Langlois59b735a2021-06-22 05:17:39 -07005210 return IO_APOLL_ABORTED;
Pavel Begunkov24c74672020-06-21 13:09:51 +03005211 if (req->flags & REQ_F_POLLED)
Olivier Langlois59b735a2021-06-22 05:17:39 -07005212 return IO_APOLL_ABORTED;
Pavel Begunkovb2d9c3d2021-06-26 21:40:44 +01005213 if (!def->pollin && !def->pollout)
Olivier Langlois59b735a2021-06-22 05:17:39 -07005214 return IO_APOLL_ABORTED;
Pavel Begunkovb2d9c3d2021-06-26 21:40:44 +01005215
5216 if (def->pollin) {
5217 rw = READ;
5218 mask |= POLLIN | POLLRDNORM;
5219
5220 /* If reading from MSG_ERRQUEUE using recvmsg, ignore POLLIN */
5221 if ((req->opcode == IORING_OP_RECVMSG) &&
5222 (req->sr_msg.msg_flags & MSG_ERRQUEUE))
5223 mask &= ~POLLIN;
5224 } else {
5225 rw = WRITE;
5226 mask |= POLLOUT | POLLWRNORM;
5227 }
5228
Jens Axboe9dab14b2020-08-25 12:27:50 -06005229 /* if we can't nonblock try, then no point in arming a poll handler */
Pavel Begunkovb191e2d2021-08-09 13:04:03 +01005230 if (!io_file_supports_nowait(req, rw))
Olivier Langlois59b735a2021-06-22 05:17:39 -07005231 return IO_APOLL_ABORTED;
Jens Axboed7718a92020-02-14 22:23:12 -07005232
5233 apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC);
5234 if (unlikely(!apoll))
Olivier Langlois59b735a2021-06-22 05:17:39 -07005235 return IO_APOLL_ABORTED;
Jens Axboe807abcb2020-07-17 17:09:27 -06005236 apoll->double_poll = NULL;
Jens Axboed7718a92020-02-14 22:23:12 -07005237 req->apoll = apoll;
Pavel Begunkovb2d9c3d2021-06-26 21:40:44 +01005238 req->flags |= REQ_F_POLLED;
Jens Axboed7718a92020-02-14 22:23:12 -07005239 ipt.pt._qproc = io_async_queue_proc;
Pavel Begunkov48dcd382021-08-15 10:40:18 +01005240 io_req_set_refcount(req);
Jens Axboed7718a92020-02-14 22:23:12 -07005241
5242 ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask,
5243 io_async_wake);
Jens Axboe79ebeae2021-08-10 15:18:27 -06005244 spin_unlock(&ctx->completion_lock);
Hao Xu41a51692021-08-12 15:47:02 +08005245 if (ret || ipt.error)
5246 return ret ? IO_APOLL_READY : IO_APOLL_ABORTED;
5247
Olivier Langlois236daeae2021-05-31 02:36:37 -04005248 trace_io_uring_poll_arm(ctx, req, req->opcode, req->user_data,
5249 mask, apoll->poll.events);
Olivier Langlois59b735a2021-06-22 05:17:39 -07005250 return IO_APOLL_OK;
Jens Axboed7718a92020-02-14 22:23:12 -07005251}
5252
5253static bool __io_poll_remove_one(struct io_kiocb *req,
Jens Axboeb2e720a2021-03-31 09:03:03 -06005254 struct io_poll_iocb *poll, bool do_cancel)
Pavel Begunkove07785b2021-04-01 15:43:57 +01005255 __must_hold(&req->ctx->completion_lock)
Jens Axboed7718a92020-02-14 22:23:12 -07005256{
Jens Axboeb41e9852020-02-17 09:52:41 -07005257 bool do_complete = false;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005258
Jens Axboe50826202021-02-23 09:02:26 -07005259 if (!poll->head)
5260 return false;
Jens Axboe79ebeae2021-08-10 15:18:27 -06005261 spin_lock_irq(&poll->head->lock);
Jens Axboeb2e720a2021-03-31 09:03:03 -06005262 if (do_cancel)
5263 WRITE_ONCE(poll->canceled, true);
Jens Axboe392edb42019-12-09 17:52:20 -07005264 if (!list_empty(&poll->wait.entry)) {
5265 list_del_init(&poll->wait.entry);
Jens Axboeb41e9852020-02-17 09:52:41 -07005266 do_complete = true;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005267 }
Jens Axboe79ebeae2021-08-10 15:18:27 -06005268 spin_unlock_irq(&poll->head->lock);
Jens Axboe3bfa5bc2020-05-17 13:54:12 -06005269 hash_del(&req->hash_node);
Jens Axboed7718a92020-02-14 22:23:12 -07005270 return do_complete;
5271}
5272
Pavel Begunkov5d709042021-08-09 20:18:13 +01005273static bool io_poll_remove_one(struct io_kiocb *req)
Pavel Begunkove07785b2021-04-01 15:43:57 +01005274 __must_hold(&req->ctx->completion_lock)
Jens Axboed7718a92020-02-14 22:23:12 -07005275{
5276 bool do_complete;
5277
Jens Axboed4e7cd32020-08-15 11:44:50 -07005278 io_poll_remove_double(req);
Pavel Begunkove31001a2021-04-13 02:58:43 +01005279 do_complete = __io_poll_remove_one(req, io_poll_get_single(req), true);
Jens Axboed4e7cd32020-08-15 11:44:50 -07005280
Jens Axboeb41e9852020-02-17 09:52:41 -07005281 if (do_complete) {
Pavel Begunkovd4d19c12021-04-25 14:32:17 +01005282 io_cqring_fill_event(req->ctx, req->user_data, -ECANCELED, 0);
Jens Axboeb41e9852020-02-17 09:52:41 -07005283 io_commit_cqring(req->ctx);
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01005284 req_set_fail(req);
Pavel Begunkov91c2f692021-08-11 19:28:28 +01005285 io_put_req_deferred(req);
Pavel Begunkov5d709042021-08-09 20:18:13 +01005286 }
Jens Axboeb41e9852020-02-17 09:52:41 -07005287 return do_complete;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005288}
5289
Jens Axboe76e1b642020-09-26 15:05:03 -06005290/*
5291 * Returns true if we found and killed one or more poll requests
5292 */
Pavel Begunkov6b819282020-11-06 13:00:25 +00005293static bool io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk,
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01005294 bool cancel_all)
Jens Axboe221c5eb2019-01-17 09:41:58 -07005295{
Jens Axboe78076bb2019-12-04 19:56:40 -07005296 struct hlist_node *tmp;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005297 struct io_kiocb *req;
Jens Axboe8e2e1fa2020-04-13 17:05:14 -06005298 int posted = 0, i;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005299
Jens Axboe79ebeae2021-08-10 15:18:27 -06005300 spin_lock(&ctx->completion_lock);
Jens Axboe78076bb2019-12-04 19:56:40 -07005301 for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
5302 struct hlist_head *list;
5303
5304 list = &ctx->cancel_hash[i];
Jens Axboef3606e32020-09-22 08:18:24 -06005305 hlist_for_each_entry_safe(req, tmp, list, hash_node) {
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01005306 if (io_match_task(req, tsk, cancel_all))
Jens Axboef3606e32020-09-22 08:18:24 -06005307 posted += io_poll_remove_one(req);
5308 }
Jens Axboe221c5eb2019-01-17 09:41:58 -07005309 }
Jens Axboe79ebeae2021-08-10 15:18:27 -06005310 spin_unlock(&ctx->completion_lock);
Jens Axboeb41e9852020-02-17 09:52:41 -07005311
Jens Axboe8e2e1fa2020-04-13 17:05:14 -06005312 if (posted)
5313 io_cqring_ev_posted(ctx);
Jens Axboe76e1b642020-09-26 15:05:03 -06005314
5315 return posted != 0;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005316}
5317
Pavel Begunkov9ba5fac2021-04-14 13:38:35 +01005318static struct io_kiocb *io_poll_find(struct io_ring_ctx *ctx, __u64 sqe_addr,
5319 bool poll_only)
Pavel Begunkove07785b2021-04-01 15:43:57 +01005320 __must_hold(&ctx->completion_lock)
Jens Axboe47f46762019-11-09 17:43:02 -07005321{
Jens Axboe78076bb2019-12-04 19:56:40 -07005322 struct hlist_head *list;
Jens Axboe47f46762019-11-09 17:43:02 -07005323 struct io_kiocb *req;
5324
Jens Axboe78076bb2019-12-04 19:56:40 -07005325 list = &ctx->cancel_hash[hash_long(sqe_addr, ctx->cancel_hash_bits)];
5326 hlist_for_each_entry(req, list, hash_node) {
Jens Axboeb41e9852020-02-17 09:52:41 -07005327 if (sqe_addr != req->user_data)
5328 continue;
Pavel Begunkov9ba5fac2021-04-14 13:38:35 +01005329 if (poll_only && req->opcode != IORING_OP_POLL_ADD)
5330 continue;
Jens Axboeb2cb8052021-03-17 08:17:19 -06005331 return req;
Jens Axboe47f46762019-11-09 17:43:02 -07005332 }
Jens Axboeb2cb8052021-03-17 08:17:19 -06005333 return NULL;
Jens Axboe47f46762019-11-09 17:43:02 -07005334}
5335
Pavel Begunkov9ba5fac2021-04-14 13:38:35 +01005336static int io_poll_cancel(struct io_ring_ctx *ctx, __u64 sqe_addr,
5337 bool poll_only)
Pavel Begunkove07785b2021-04-01 15:43:57 +01005338 __must_hold(&ctx->completion_lock)
Jens Axboeb2cb8052021-03-17 08:17:19 -06005339{
5340 struct io_kiocb *req;
5341
Pavel Begunkov9ba5fac2021-04-14 13:38:35 +01005342 req = io_poll_find(ctx, sqe_addr, poll_only);
Jens Axboeb2cb8052021-03-17 08:17:19 -06005343 if (!req)
5344 return -ENOENT;
5345 if (io_poll_remove_one(req))
5346 return 0;
5347
5348 return -EALREADY;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005349}
5350
Pavel Begunkov9096af32021-04-14 13:38:36 +01005351static __poll_t io_poll_parse_events(const struct io_uring_sqe *sqe,
5352 unsigned int flags)
5353{
5354 u32 events;
5355
5356 events = READ_ONCE(sqe->poll32_events);
5357#ifdef __BIG_ENDIAN
5358 events = swahw32(events);
5359#endif
5360 if (!(flags & IORING_POLL_ADD_MULTI))
5361 events |= EPOLLONESHOT;
5362 return demangle_poll(events) | (events & (EPOLLEXCLUSIVE|EPOLLONESHOT));
5363}
5364
Pavel Begunkovc5de0032021-04-14 13:38:37 +01005365static int io_poll_update_prep(struct io_kiocb *req,
Jens Axboe3529d8c2019-12-19 18:24:38 -07005366 const struct io_uring_sqe *sqe)
Jens Axboe221c5eb2019-01-17 09:41:58 -07005367{
Pavel Begunkovc5de0032021-04-14 13:38:37 +01005368 struct io_poll_update *upd = &req->poll_update;
5369 u32 flags;
5370
Jens Axboe221c5eb2019-01-17 09:41:58 -07005371 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
5372 return -EINVAL;
Pavel Begunkovc5de0032021-04-14 13:38:37 +01005373 if (sqe->ioprio || sqe->buf_index)
5374 return -EINVAL;
5375 flags = READ_ONCE(sqe->len);
5376 if (flags & ~(IORING_POLL_UPDATE_EVENTS | IORING_POLL_UPDATE_USER_DATA |
5377 IORING_POLL_ADD_MULTI))
5378 return -EINVAL;
5379 /* meaningless without update */
5380 if (flags == IORING_POLL_ADD_MULTI)
Jens Axboe221c5eb2019-01-17 09:41:58 -07005381 return -EINVAL;
5382
Pavel Begunkovc5de0032021-04-14 13:38:37 +01005383 upd->old_user_data = READ_ONCE(sqe->addr);
5384 upd->update_events = flags & IORING_POLL_UPDATE_EVENTS;
5385 upd->update_user_data = flags & IORING_POLL_UPDATE_USER_DATA;
Jens Axboe0969e782019-12-17 18:40:57 -07005386
Pavel Begunkovc5de0032021-04-14 13:38:37 +01005387 upd->new_user_data = READ_ONCE(sqe->off);
5388 if (!upd->update_user_data && upd->new_user_data)
5389 return -EINVAL;
5390 if (upd->update_events)
5391 upd->events = io_poll_parse_events(sqe, flags);
5392 else if (sqe->poll32_events)
5393 return -EINVAL;
Jens Axboe0969e782019-12-17 18:40:57 -07005394
Jens Axboe221c5eb2019-01-17 09:41:58 -07005395 return 0;
5396}
5397
Jens Axboe221c5eb2019-01-17 09:41:58 -07005398static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
5399 void *key)
5400{
Jens Axboec2f2eb72020-02-10 09:07:05 -07005401 struct io_kiocb *req = wait->private;
5402 struct io_poll_iocb *poll = &req->poll;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005403
Jens Axboed7718a92020-02-14 22:23:12 -07005404 return __io_async_wake(req, poll, key_to_poll(key), io_poll_task_func);
Jens Axboe221c5eb2019-01-17 09:41:58 -07005405}
5406
Jens Axboe221c5eb2019-01-17 09:41:58 -07005407static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head,
5408 struct poll_table_struct *p)
5409{
5410 struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
5411
Jens Axboee8c2bc12020-08-15 18:44:09 -07005412 __io_queue_proc(&pt->req->poll, pt, head, (struct io_poll_iocb **) &pt->req->async_data);
Jens Axboeeac406c2019-11-14 12:09:58 -07005413}
5414
Jens Axboe3529d8c2019-12-19 18:24:38 -07005415static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboe221c5eb2019-01-17 09:41:58 -07005416{
5417 struct io_poll_iocb *poll = &req->poll;
Pavel Begunkovc5de0032021-04-14 13:38:37 +01005418 u32 flags;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005419
5420 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
5421 return -EINVAL;
Pavel Begunkovc5de0032021-04-14 13:38:37 +01005422 if (sqe->ioprio || sqe->buf_index || sqe->off || sqe->addr)
Jens Axboe88e41cf2021-02-22 22:08:01 -07005423 return -EINVAL;
5424 flags = READ_ONCE(sqe->len);
Pavel Begunkovc5de0032021-04-14 13:38:37 +01005425 if (flags & ~IORING_POLL_ADD_MULTI)
Jens Axboe221c5eb2019-01-17 09:41:58 -07005426 return -EINVAL;
5427
Pavel Begunkov48dcd382021-08-15 10:40:18 +01005428 io_req_set_refcount(req);
Pavel Begunkovc5de0032021-04-14 13:38:37 +01005429 poll->events = io_poll_parse_events(sqe, flags);
Jens Axboe0969e782019-12-17 18:40:57 -07005430 return 0;
5431}
5432
Pavel Begunkov61e98202021-02-10 00:03:08 +00005433static int io_poll_add(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe0969e782019-12-17 18:40:57 -07005434{
5435 struct io_poll_iocb *poll = &req->poll;
5436 struct io_ring_ctx *ctx = req->ctx;
5437 struct io_poll_table ipt;
Jens Axboe0969e782019-12-17 18:40:57 -07005438 __poll_t mask;
Jens Axboe0969e782019-12-17 18:40:57 -07005439
Jens Axboed7718a92020-02-14 22:23:12 -07005440 ipt.pt._qproc = io_poll_queue_proc;
Jens Axboe36703242019-07-25 10:20:18 -06005441
Jens Axboed7718a92020-02-14 22:23:12 -07005442 mask = __io_arm_poll_handler(req, &req->poll, &ipt, poll->events,
5443 io_poll_wake);
Jens Axboe221c5eb2019-01-17 09:41:58 -07005444
Jens Axboe8c838782019-03-12 15:48:16 -06005445 if (mask) { /* no async, we'd stolen it */
Jens Axboe8c838782019-03-12 15:48:16 -06005446 ipt.error = 0;
Pavel Begunkove27414b2021-04-09 09:13:20 +01005447 io_poll_complete(req, mask);
Jens Axboe8c838782019-03-12 15:48:16 -06005448 }
Jens Axboe79ebeae2021-08-10 15:18:27 -06005449 spin_unlock(&ctx->completion_lock);
Jens Axboe221c5eb2019-01-17 09:41:58 -07005450
Jens Axboe8c838782019-03-12 15:48:16 -06005451 if (mask) {
5452 io_cqring_ev_posted(ctx);
Jens Axboe88e41cf2021-02-22 22:08:01 -07005453 if (poll->events & EPOLLONESHOT)
5454 io_put_req(req);
Jens Axboe221c5eb2019-01-17 09:41:58 -07005455 }
Jens Axboe8c838782019-03-12 15:48:16 -06005456 return ipt.error;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005457}
5458
Pavel Begunkovc5de0032021-04-14 13:38:37 +01005459static int io_poll_update(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboeb69de282021-03-17 08:37:41 -06005460{
5461 struct io_ring_ctx *ctx = req->ctx;
5462 struct io_kiocb *preq;
Jens Axboecb3b200e2021-04-06 09:49:31 -06005463 bool completing;
Jens Axboeb69de282021-03-17 08:37:41 -06005464 int ret;
5465
Jens Axboe79ebeae2021-08-10 15:18:27 -06005466 spin_lock(&ctx->completion_lock);
Pavel Begunkov9ba5fac2021-04-14 13:38:35 +01005467 preq = io_poll_find(ctx, req->poll_update.old_user_data, true);
Jens Axboeb69de282021-03-17 08:37:41 -06005468 if (!preq) {
5469 ret = -ENOENT;
5470 goto err;
Jens Axboeb69de282021-03-17 08:37:41 -06005471 }
Jens Axboecb3b200e2021-04-06 09:49:31 -06005472
Pavel Begunkovc5de0032021-04-14 13:38:37 +01005473 if (!req->poll_update.update_events && !req->poll_update.update_user_data) {
5474 completing = true;
5475 ret = io_poll_remove_one(preq) ? 0 : -EALREADY;
5476 goto err;
5477 }
5478
Jens Axboecb3b200e2021-04-06 09:49:31 -06005479 /*
5480 * Don't allow racy completion with singleshot, as we cannot safely
5481 * update those. For multishot, if we're racing with completion, just
5482 * let completion re-add it.
5483 */
5484 completing = !__io_poll_remove_one(preq, &preq->poll, false);
5485 if (completing && (preq->poll.events & EPOLLONESHOT)) {
5486 ret = -EALREADY;
5487 goto err;
Jens Axboeb69de282021-03-17 08:37:41 -06005488 }
5489 /* we now have a detached poll request. reissue. */
5490 ret = 0;
5491err:
Jens Axboeb69de282021-03-17 08:37:41 -06005492 if (ret < 0) {
Jens Axboe79ebeae2021-08-10 15:18:27 -06005493 spin_unlock(&ctx->completion_lock);
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01005494 req_set_fail(req);
Jens Axboeb69de282021-03-17 08:37:41 -06005495 io_req_complete(req, ret);
5496 return 0;
5497 }
5498 /* only mask one event flags, keep behavior flags */
Pavel Begunkov9d805892021-04-13 02:58:40 +01005499 if (req->poll_update.update_events) {
Jens Axboeb69de282021-03-17 08:37:41 -06005500 preq->poll.events &= ~0xffff;
Pavel Begunkov9d805892021-04-13 02:58:40 +01005501 preq->poll.events |= req->poll_update.events & 0xffff;
Jens Axboeb69de282021-03-17 08:37:41 -06005502 preq->poll.events |= IO_POLL_UNMASK;
5503 }
Pavel Begunkov9d805892021-04-13 02:58:40 +01005504 if (req->poll_update.update_user_data)
5505 preq->user_data = req->poll_update.new_user_data;
Jens Axboe79ebeae2021-08-10 15:18:27 -06005506 spin_unlock(&ctx->completion_lock);
Jens Axboecb3b200e2021-04-06 09:49:31 -06005507
Jens Axboeb69de282021-03-17 08:37:41 -06005508 /* complete update request, we're done with it */
5509 io_req_complete(req, ret);
5510
Jens Axboecb3b200e2021-04-06 09:49:31 -06005511 if (!completing) {
Pavel Begunkovc5de0032021-04-14 13:38:37 +01005512 ret = io_poll_add(preq, issue_flags);
Jens Axboecb3b200e2021-04-06 09:49:31 -06005513 if (ret < 0) {
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01005514 req_set_fail(preq);
Jens Axboecb3b200e2021-04-06 09:49:31 -06005515 io_req_complete(preq, ret);
5516 }
Jens Axboeb69de282021-03-17 08:37:41 -06005517 }
5518 return 0;
5519}
5520
Jens Axboe89850fc2021-08-10 15:11:51 -06005521static void io_req_task_timeout(struct io_kiocb *req)
5522{
5523 struct io_ring_ctx *ctx = req->ctx;
5524
Jens Axboe79ebeae2021-08-10 15:18:27 -06005525 spin_lock(&ctx->completion_lock);
Jens Axboe89850fc2021-08-10 15:11:51 -06005526 io_cqring_fill_event(ctx, req->user_data, -ETIME, 0);
5527 io_commit_cqring(ctx);
Jens Axboe79ebeae2021-08-10 15:18:27 -06005528 spin_unlock(&ctx->completion_lock);
Jens Axboe89850fc2021-08-10 15:11:51 -06005529
5530 io_cqring_ev_posted(ctx);
5531 req_set_fail(req);
5532 io_put_req(req);
5533}
5534
Jens Axboe5262f562019-09-17 12:26:57 -06005535static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
5536{
Jens Axboead8a48a2019-11-15 08:49:11 -07005537 struct io_timeout_data *data = container_of(timer,
5538 struct io_timeout_data, timer);
5539 struct io_kiocb *req = data->req;
5540 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe5262f562019-09-17 12:26:57 -06005541 unsigned long flags;
5542
Jens Axboe89850fc2021-08-10 15:11:51 -06005543 spin_lock_irqsave(&ctx->timeout_lock, flags);
Pavel Begunkova71976f2020-10-10 18:34:11 +01005544 list_del_init(&req->timeout.list);
Pavel Begunkov01cec8c2020-07-30 18:43:50 +03005545 atomic_set(&req->ctx->cq_timeouts,
5546 atomic_read(&req->ctx->cq_timeouts) + 1);
Jens Axboe89850fc2021-08-10 15:11:51 -06005547 spin_unlock_irqrestore(&ctx->timeout_lock, flags);
Pavel Begunkov01cec8c2020-07-30 18:43:50 +03005548
Jens Axboe89850fc2021-08-10 15:11:51 -06005549 req->io_task_work.func = io_req_task_timeout;
5550 io_req_task_work_add(req);
Jens Axboe5262f562019-09-17 12:26:57 -06005551 return HRTIMER_NORESTART;
5552}
5553
Pavel Begunkovfbd15842020-11-30 19:11:15 +00005554static struct io_kiocb *io_timeout_extract(struct io_ring_ctx *ctx,
5555 __u64 user_data)
Jens Axboe89850fc2021-08-10 15:11:51 -06005556 __must_hold(&ctx->timeout_lock)
Jens Axboe47f46762019-11-09 17:43:02 -07005557{
Pavel Begunkovfbd15842020-11-30 19:11:15 +00005558 struct io_timeout_data *io;
Jens Axboef254ac02020-08-12 17:33:30 -06005559 struct io_kiocb *req;
Pavel Begunkovfd9c7bc2021-04-13 02:58:42 +01005560 bool found = false;
Jens Axboef254ac02020-08-12 17:33:30 -06005561
5562 list_for_each_entry(req, &ctx->timeout_list, timeout.list) {
Pavel Begunkovfd9c7bc2021-04-13 02:58:42 +01005563 found = user_data == req->user_data;
5564 if (found)
Jens Axboef254ac02020-08-12 17:33:30 -06005565 break;
Jens Axboef254ac02020-08-12 17:33:30 -06005566 }
Pavel Begunkovfd9c7bc2021-04-13 02:58:42 +01005567 if (!found)
5568 return ERR_PTR(-ENOENT);
Jens Axboef254ac02020-08-12 17:33:30 -06005569
Pavel Begunkovfbd15842020-11-30 19:11:15 +00005570 io = req->async_data;
Pavel Begunkovfd9c7bc2021-04-13 02:58:42 +01005571 if (hrtimer_try_to_cancel(&io->timer) == -1)
Pavel Begunkovfbd15842020-11-30 19:11:15 +00005572 return ERR_PTR(-EALREADY);
5573 list_del_init(&req->timeout.list);
5574 return req;
5575}
5576
5577static int io_timeout_cancel(struct io_ring_ctx *ctx, __u64 user_data)
Jens Axboe89850fc2021-08-10 15:11:51 -06005578 __must_hold(&ctx->timeout_lock)
Pavel Begunkovfbd15842020-11-30 19:11:15 +00005579{
5580 struct io_kiocb *req = io_timeout_extract(ctx, user_data);
5581
5582 if (IS_ERR(req))
5583 return PTR_ERR(req);
5584
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01005585 req_set_fail(req);
Pavel Begunkovd4d19c12021-04-25 14:32:17 +01005586 io_cqring_fill_event(ctx, req->user_data, -ECANCELED, 0);
Pavel Begunkov91c2f692021-08-11 19:28:28 +01005587 io_put_req_deferred(req);
Pavel Begunkovfbd15842020-11-30 19:11:15 +00005588 return 0;
Jens Axboef254ac02020-08-12 17:33:30 -06005589}
5590
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +00005591static int io_timeout_update(struct io_ring_ctx *ctx, __u64 user_data,
5592 struct timespec64 *ts, enum hrtimer_mode mode)
Jens Axboe89850fc2021-08-10 15:11:51 -06005593 __must_hold(&ctx->timeout_lock)
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +00005594{
5595 struct io_kiocb *req = io_timeout_extract(ctx, user_data);
5596 struct io_timeout_data *data;
5597
5598 if (IS_ERR(req))
5599 return PTR_ERR(req);
5600
5601 req->timeout.off = 0; /* noseq */
5602 data = req->async_data;
5603 list_add_tail(&req->timeout.list, &ctx->timeout_list);
5604 hrtimer_init(&data->timer, CLOCK_MONOTONIC, mode);
5605 data->timer.function = io_timeout_fn;
5606 hrtimer_start(&data->timer, timespec64_to_ktime(*ts), mode);
5607 return 0;
Jens Axboe47f46762019-11-09 17:43:02 -07005608}
5609
Jens Axboe3529d8c2019-12-19 18:24:38 -07005610static int io_timeout_remove_prep(struct io_kiocb *req,
5611 const struct io_uring_sqe *sqe)
Jens Axboeb29472e2019-12-17 18:50:29 -07005612{
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +00005613 struct io_timeout_rem *tr = &req->timeout_rem;
5614
Jens Axboeb29472e2019-12-17 18:50:29 -07005615 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
5616 return -EINVAL;
Daniele Albano61710e42020-07-18 14:15:16 -06005617 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
5618 return -EINVAL;
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +00005619 if (sqe->ioprio || sqe->buf_index || sqe->len)
Jens Axboeb29472e2019-12-17 18:50:29 -07005620 return -EINVAL;
5621
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +00005622 tr->addr = READ_ONCE(sqe->addr);
5623 tr->flags = READ_ONCE(sqe->timeout_flags);
5624 if (tr->flags & IORING_TIMEOUT_UPDATE) {
5625 if (tr->flags & ~(IORING_TIMEOUT_UPDATE|IORING_TIMEOUT_ABS))
5626 return -EINVAL;
5627 if (get_timespec64(&tr->ts, u64_to_user_ptr(sqe->addr2)))
5628 return -EFAULT;
5629 } else if (tr->flags) {
5630 /* timeout removal doesn't support flags */
5631 return -EINVAL;
5632 }
5633
Jens Axboeb29472e2019-12-17 18:50:29 -07005634 return 0;
5635}
5636
Pavel Begunkov8662dae2021-01-19 13:32:44 +00005637static inline enum hrtimer_mode io_translate_timeout_mode(unsigned int flags)
5638{
5639 return (flags & IORING_TIMEOUT_ABS) ? HRTIMER_MODE_ABS
5640 : HRTIMER_MODE_REL;
5641}
5642
Jens Axboe11365042019-10-16 09:08:32 -06005643/*
5644 * Remove or update an existing timeout command
5645 */
Pavel Begunkov61e98202021-02-10 00:03:08 +00005646static int io_timeout_remove(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe11365042019-10-16 09:08:32 -06005647{
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +00005648 struct io_timeout_rem *tr = &req->timeout_rem;
Jens Axboe11365042019-10-16 09:08:32 -06005649 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe47f46762019-11-09 17:43:02 -07005650 int ret;
Jens Axboe11365042019-10-16 09:08:32 -06005651
Jens Axboe89850fc2021-08-10 15:11:51 -06005652 spin_lock_irq(&ctx->timeout_lock);
Pavel Begunkov8662dae2021-01-19 13:32:44 +00005653 if (!(req->timeout_rem.flags & IORING_TIMEOUT_UPDATE))
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +00005654 ret = io_timeout_cancel(ctx, tr->addr);
Pavel Begunkov8662dae2021-01-19 13:32:44 +00005655 else
5656 ret = io_timeout_update(ctx, tr->addr, &tr->ts,
5657 io_translate_timeout_mode(tr->flags));
Jens Axboe89850fc2021-08-10 15:11:51 -06005658 spin_unlock_irq(&ctx->timeout_lock);
Jens Axboe11365042019-10-16 09:08:32 -06005659
Jens Axboe79ebeae2021-08-10 15:18:27 -06005660 spin_lock(&ctx->completion_lock);
Pavel Begunkovd4d19c12021-04-25 14:32:17 +01005661 io_cqring_fill_event(ctx, req->user_data, ret, 0);
Jens Axboe11365042019-10-16 09:08:32 -06005662 io_commit_cqring(ctx);
Jens Axboe79ebeae2021-08-10 15:18:27 -06005663 spin_unlock(&ctx->completion_lock);
Jens Axboe5262f562019-09-17 12:26:57 -06005664 io_cqring_ev_posted(ctx);
Jens Axboe4e88d6e2019-12-07 20:59:47 -07005665 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01005666 req_set_fail(req);
Jackie Liuec9c02a2019-11-08 23:50:36 +08005667 io_put_req(req);
Jens Axboe11365042019-10-16 09:08:32 -06005668 return 0;
Jens Axboe5262f562019-09-17 12:26:57 -06005669}
5670
Jens Axboe3529d8c2019-12-19 18:24:38 -07005671static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
Jens Axboe2d283902019-12-04 11:08:05 -07005672 bool is_timeout_link)
Jens Axboe5262f562019-09-17 12:26:57 -06005673{
Jens Axboead8a48a2019-11-15 08:49:11 -07005674 struct io_timeout_data *data;
Jens Axboea41525a2019-10-15 16:48:15 -06005675 unsigned flags;
Pavel Begunkov56080b02020-05-26 20:34:04 +03005676 u32 off = READ_ONCE(sqe->off);
Jens Axboe5262f562019-09-17 12:26:57 -06005677
Jens Axboead8a48a2019-11-15 08:49:11 -07005678 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboe5262f562019-09-17 12:26:57 -06005679 return -EINVAL;
Jens Axboead8a48a2019-11-15 08:49:11 -07005680 if (sqe->ioprio || sqe->buf_index || sqe->len != 1)
Jens Axboea41525a2019-10-15 16:48:15 -06005681 return -EINVAL;
Pavel Begunkov56080b02020-05-26 20:34:04 +03005682 if (off && is_timeout_link)
Jens Axboe2d283902019-12-04 11:08:05 -07005683 return -EINVAL;
Jens Axboea41525a2019-10-15 16:48:15 -06005684 flags = READ_ONCE(sqe->timeout_flags);
5685 if (flags & ~IORING_TIMEOUT_ABS)
Jens Axboe5262f562019-09-17 12:26:57 -06005686 return -EINVAL;
Arnd Bergmannbdf20072019-10-01 09:53:29 -06005687
Pavel Begunkovbfe68a22020-05-30 14:54:18 +03005688 req->timeout.off = off;
Pavel Begunkovf18ee4c2021-06-14 23:37:25 +01005689 if (unlikely(off && !req->ctx->off_timeout_used))
5690 req->ctx->off_timeout_used = true;
Jens Axboe26a61672019-12-20 09:02:01 -07005691
Jens Axboee8c2bc12020-08-15 18:44:09 -07005692 if (!req->async_data && io_alloc_async_data(req))
Jens Axboe26a61672019-12-20 09:02:01 -07005693 return -ENOMEM;
5694
Jens Axboee8c2bc12020-08-15 18:44:09 -07005695 data = req->async_data;
Jens Axboead8a48a2019-11-15 08:49:11 -07005696 data->req = req;
Jens Axboead8a48a2019-11-15 08:49:11 -07005697
5698 if (get_timespec64(&data->ts, u64_to_user_ptr(sqe->addr)))
Jens Axboe5262f562019-09-17 12:26:57 -06005699 return -EFAULT;
5700
Pavel Begunkov8662dae2021-01-19 13:32:44 +00005701 data->mode = io_translate_timeout_mode(flags);
Jens Axboead8a48a2019-11-15 08:49:11 -07005702 hrtimer_init(&data->timer, CLOCK_MONOTONIC, data->mode);
5703 return 0;
5704}
5705
Pavel Begunkov61e98202021-02-10 00:03:08 +00005706static int io_timeout(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboead8a48a2019-11-15 08:49:11 -07005707{
Jens Axboead8a48a2019-11-15 08:49:11 -07005708 struct io_ring_ctx *ctx = req->ctx;
Jens Axboee8c2bc12020-08-15 18:44:09 -07005709 struct io_timeout_data *data = req->async_data;
Jens Axboead8a48a2019-11-15 08:49:11 -07005710 struct list_head *entry;
Pavel Begunkovbfe68a22020-05-30 14:54:18 +03005711 u32 tail, off = req->timeout.off;
Jens Axboead8a48a2019-11-15 08:49:11 -07005712
Jens Axboe89850fc2021-08-10 15:11:51 -06005713 spin_lock_irq(&ctx->timeout_lock);
Jens Axboe93bd25b2019-11-11 23:34:31 -07005714
Jens Axboe5262f562019-09-17 12:26:57 -06005715 /*
5716 * sqe->off holds how many events that need to occur for this
Jens Axboe93bd25b2019-11-11 23:34:31 -07005717 * timeout event to be satisfied. If it isn't set, then this is
5718 * a pure timeout request, sequence isn't used.
Jens Axboe5262f562019-09-17 12:26:57 -06005719 */
Pavel Begunkov8eb7e2d2020-06-29 13:13:02 +03005720 if (io_is_timeout_noseq(req)) {
Jens Axboe93bd25b2019-11-11 23:34:31 -07005721 entry = ctx->timeout_list.prev;
5722 goto add;
5723 }
Jens Axboe5262f562019-09-17 12:26:57 -06005724
Pavel Begunkovbfe68a22020-05-30 14:54:18 +03005725 tail = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
5726 req->timeout.target_seq = tail + off;
Jens Axboe5262f562019-09-17 12:26:57 -06005727
Marcelo Diop-Gonzalezf0105052021-01-15 11:54:40 -05005728 /* Update the last seq here in case io_flush_timeouts() hasn't.
5729 * This is safe because ->completion_lock is held, and submissions
5730 * and completions are never mixed in the same ->completion_lock section.
5731 */
5732 ctx->cq_last_tm_flush = tail;
5733
Jens Axboe5262f562019-09-17 12:26:57 -06005734 /*
5735 * Insertion sort, ensuring the first entry in the list is always
5736 * the one we need first.
5737 */
Jens Axboe5262f562019-09-17 12:26:57 -06005738 list_for_each_prev(entry, &ctx->timeout_list) {
Pavel Begunkov135fcde2020-07-13 23:37:12 +03005739 struct io_kiocb *nxt = list_entry(entry, struct io_kiocb,
5740 timeout.list);
Jens Axboe5262f562019-09-17 12:26:57 -06005741
Pavel Begunkov8eb7e2d2020-06-29 13:13:02 +03005742 if (io_is_timeout_noseq(nxt))
Jens Axboe93bd25b2019-11-11 23:34:31 -07005743 continue;
Pavel Begunkovbfe68a22020-05-30 14:54:18 +03005744 /* nxt.seq is behind @tail, otherwise would've been completed */
5745 if (off >= nxt->timeout.target_seq - tail)
Jens Axboe5262f562019-09-17 12:26:57 -06005746 break;
5747 }
Jens Axboe93bd25b2019-11-11 23:34:31 -07005748add:
Pavel Begunkov135fcde2020-07-13 23:37:12 +03005749 list_add(&req->timeout.list, entry);
Jens Axboead8a48a2019-11-15 08:49:11 -07005750 data->timer.function = io_timeout_fn;
5751 hrtimer_start(&data->timer, timespec64_to_ktime(data->ts), data->mode);
Jens Axboe89850fc2021-08-10 15:11:51 -06005752 spin_unlock_irq(&ctx->timeout_lock);
Jens Axboe5262f562019-09-17 12:26:57 -06005753 return 0;
5754}
5755
Pavel Begunkovf458dd842021-03-08 12:14:14 +00005756struct io_cancel_data {
5757 struct io_ring_ctx *ctx;
5758 u64 user_data;
5759};
5760
Jens Axboe62755e32019-10-28 21:49:21 -06005761static bool io_cancel_cb(struct io_wq_work *work, void *data)
Jens Axboede0617e2019-04-06 21:51:27 -06005762{
Jens Axboe62755e32019-10-28 21:49:21 -06005763 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
Pavel Begunkovf458dd842021-03-08 12:14:14 +00005764 struct io_cancel_data *cd = data;
Jens Axboede0617e2019-04-06 21:51:27 -06005765
Pavel Begunkovf458dd842021-03-08 12:14:14 +00005766 return req->ctx == cd->ctx && req->user_data == cd->user_data;
Jens Axboe62755e32019-10-28 21:49:21 -06005767}
5768
Pavel Begunkovf458dd842021-03-08 12:14:14 +00005769static int io_async_cancel_one(struct io_uring_task *tctx, u64 user_data,
5770 struct io_ring_ctx *ctx)
Jens Axboe62755e32019-10-28 21:49:21 -06005771{
Pavel Begunkovf458dd842021-03-08 12:14:14 +00005772 struct io_cancel_data data = { .ctx = ctx, .user_data = user_data, };
Jens Axboe62755e32019-10-28 21:49:21 -06005773 enum io_wq_cancel cancel_ret;
Jens Axboe62755e32019-10-28 21:49:21 -06005774 int ret = 0;
5775
Pavel Begunkovf458dd842021-03-08 12:14:14 +00005776 if (!tctx || !tctx->io_wq)
Jens Axboe5aa75ed2021-02-16 12:56:50 -07005777 return -ENOENT;
5778
Pavel Begunkovf458dd842021-03-08 12:14:14 +00005779 cancel_ret = io_wq_cancel_cb(tctx->io_wq, io_cancel_cb, &data, false);
Jens Axboe62755e32019-10-28 21:49:21 -06005780 switch (cancel_ret) {
5781 case IO_WQ_CANCEL_OK:
5782 ret = 0;
5783 break;
5784 case IO_WQ_CANCEL_RUNNING:
5785 ret = -EALREADY;
5786 break;
5787 case IO_WQ_CANCEL_NOTFOUND:
5788 ret = -ENOENT;
5789 break;
5790 }
5791
Jens Axboee977d6d2019-11-05 12:39:45 -07005792 return ret;
5793}
5794
Jens Axboe47f46762019-11-09 17:43:02 -07005795static void io_async_find_and_cancel(struct io_ring_ctx *ctx,
5796 struct io_kiocb *req, __u64 sqe_addr,
Pavel Begunkov014db002020-03-03 21:33:12 +03005797 int success_ret)
Jens Axboe47f46762019-11-09 17:43:02 -07005798{
Jens Axboe47f46762019-11-09 17:43:02 -07005799 int ret;
5800
Pavel Begunkovf458dd842021-03-08 12:14:14 +00005801 ret = io_async_cancel_one(req->task->io_uring, sqe_addr, ctx);
Jens Axboe79ebeae2021-08-10 15:18:27 -06005802 spin_lock(&ctx->completion_lock);
Pavel Begunkovdf9727a2021-04-01 15:43:59 +01005803 if (ret != -ENOENT)
5804 goto done;
Jens Axboe79ebeae2021-08-10 15:18:27 -06005805 spin_lock_irq(&ctx->timeout_lock);
Jens Axboe47f46762019-11-09 17:43:02 -07005806 ret = io_timeout_cancel(ctx, sqe_addr);
Jens Axboe79ebeae2021-08-10 15:18:27 -06005807 spin_unlock_irq(&ctx->timeout_lock);
Jens Axboe47f46762019-11-09 17:43:02 -07005808 if (ret != -ENOENT)
5809 goto done;
Pavel Begunkov9ba5fac2021-04-14 13:38:35 +01005810 ret = io_poll_cancel(ctx, sqe_addr, false);
Jens Axboe47f46762019-11-09 17:43:02 -07005811done:
Jens Axboeb0dd8a42019-11-18 12:14:54 -07005812 if (!ret)
5813 ret = success_ret;
Pavel Begunkovd4d19c12021-04-25 14:32:17 +01005814 io_cqring_fill_event(ctx, req->user_data, ret, 0);
Jens Axboe47f46762019-11-09 17:43:02 -07005815 io_commit_cqring(ctx);
Jens Axboe79ebeae2021-08-10 15:18:27 -06005816 spin_unlock(&ctx->completion_lock);
Jens Axboe47f46762019-11-09 17:43:02 -07005817 io_cqring_ev_posted(ctx);
5818
Jens Axboe4e88d6e2019-12-07 20:59:47 -07005819 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01005820 req_set_fail(req);
Jens Axboe47f46762019-11-09 17:43:02 -07005821}
5822
Jens Axboe3529d8c2019-12-19 18:24:38 -07005823static int io_async_cancel_prep(struct io_kiocb *req,
5824 const struct io_uring_sqe *sqe)
Jens Axboee977d6d2019-11-05 12:39:45 -07005825{
Jens Axboefbf23842019-12-17 18:45:56 -07005826 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboee977d6d2019-11-05 12:39:45 -07005827 return -EINVAL;
Daniele Albano61710e42020-07-18 14:15:16 -06005828 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
5829 return -EINVAL;
5830 if (sqe->ioprio || sqe->off || sqe->len || sqe->cancel_flags)
Jens Axboee977d6d2019-11-05 12:39:45 -07005831 return -EINVAL;
5832
Jens Axboefbf23842019-12-17 18:45:56 -07005833 req->cancel.addr = READ_ONCE(sqe->addr);
5834 return 0;
5835}
5836
Pavel Begunkov61e98202021-02-10 00:03:08 +00005837static int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboefbf23842019-12-17 18:45:56 -07005838{
5839 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkov58f99372021-03-12 16:25:55 +00005840 u64 sqe_addr = req->cancel.addr;
5841 struct io_tctx_node *node;
5842 int ret;
Jens Axboefbf23842019-12-17 18:45:56 -07005843
Pavel Begunkov58f99372021-03-12 16:25:55 +00005844 /* tasks should wait for their io-wq threads, so safe w/o sync */
5845 ret = io_async_cancel_one(req->task->io_uring, sqe_addr, ctx);
Jens Axboe79ebeae2021-08-10 15:18:27 -06005846 spin_lock(&ctx->completion_lock);
Pavel Begunkov58f99372021-03-12 16:25:55 +00005847 if (ret != -ENOENT)
5848 goto done;
Jens Axboe79ebeae2021-08-10 15:18:27 -06005849 spin_lock_irq(&ctx->timeout_lock);
Pavel Begunkov58f99372021-03-12 16:25:55 +00005850 ret = io_timeout_cancel(ctx, sqe_addr);
Jens Axboe79ebeae2021-08-10 15:18:27 -06005851 spin_unlock_irq(&ctx->timeout_lock);
Pavel Begunkov58f99372021-03-12 16:25:55 +00005852 if (ret != -ENOENT)
5853 goto done;
Pavel Begunkov9ba5fac2021-04-14 13:38:35 +01005854 ret = io_poll_cancel(ctx, sqe_addr, false);
Pavel Begunkov58f99372021-03-12 16:25:55 +00005855 if (ret != -ENOENT)
5856 goto done;
Jens Axboe79ebeae2021-08-10 15:18:27 -06005857 spin_unlock(&ctx->completion_lock);
Pavel Begunkov58f99372021-03-12 16:25:55 +00005858
5859 /* slow path, try all io-wq's */
5860 io_ring_submit_lock(ctx, !(issue_flags & IO_URING_F_NONBLOCK));
5861 ret = -ENOENT;
5862 list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
5863 struct io_uring_task *tctx = node->task->io_uring;
5864
Pavel Begunkov58f99372021-03-12 16:25:55 +00005865 ret = io_async_cancel_one(tctx, req->cancel.addr, ctx);
5866 if (ret != -ENOENT)
5867 break;
5868 }
5869 io_ring_submit_unlock(ctx, !(issue_flags & IO_URING_F_NONBLOCK));
5870
Jens Axboe79ebeae2021-08-10 15:18:27 -06005871 spin_lock(&ctx->completion_lock);
Pavel Begunkov58f99372021-03-12 16:25:55 +00005872done:
Pavel Begunkovd4d19c12021-04-25 14:32:17 +01005873 io_cqring_fill_event(ctx, req->user_data, ret, 0);
Pavel Begunkov58f99372021-03-12 16:25:55 +00005874 io_commit_cqring(ctx);
Jens Axboe79ebeae2021-08-10 15:18:27 -06005875 spin_unlock(&ctx->completion_lock);
Pavel Begunkov58f99372021-03-12 16:25:55 +00005876 io_cqring_ev_posted(ctx);
5877
5878 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01005879 req_set_fail(req);
Pavel Begunkov58f99372021-03-12 16:25:55 +00005880 io_put_req(req);
Jens Axboe62755e32019-10-28 21:49:21 -06005881 return 0;
5882}
5883
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00005884static int io_rsrc_update_prep(struct io_kiocb *req,
Jens Axboe05f3fb32019-12-09 11:22:50 -07005885 const struct io_uring_sqe *sqe)
5886{
Daniele Albano61710e42020-07-18 14:15:16 -06005887 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
5888 return -EINVAL;
5889 if (sqe->ioprio || sqe->rw_flags)
Jens Axboe05f3fb32019-12-09 11:22:50 -07005890 return -EINVAL;
5891
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00005892 req->rsrc_update.offset = READ_ONCE(sqe->off);
5893 req->rsrc_update.nr_args = READ_ONCE(sqe->len);
5894 if (!req->rsrc_update.nr_args)
Jens Axboe05f3fb32019-12-09 11:22:50 -07005895 return -EINVAL;
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00005896 req->rsrc_update.arg = READ_ONCE(sqe->addr);
Jens Axboe05f3fb32019-12-09 11:22:50 -07005897 return 0;
5898}
5899
Pavel Begunkov889fca72021-02-10 00:03:09 +00005900static int io_files_update(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe05f3fb32019-12-09 11:22:50 -07005901{
5902 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkovc3bdad02021-04-25 14:32:22 +01005903 struct io_uring_rsrc_update2 up;
Jens Axboe05f3fb32019-12-09 11:22:50 -07005904 int ret;
5905
Pavel Begunkov45d189c2021-02-10 00:03:07 +00005906 if (issue_flags & IO_URING_F_NONBLOCK)
Jens Axboe05f3fb32019-12-09 11:22:50 -07005907 return -EAGAIN;
Jens Axboe05f3fb32019-12-09 11:22:50 -07005908
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00005909 up.offset = req->rsrc_update.offset;
5910 up.data = req->rsrc_update.arg;
Pavel Begunkovc3bdad02021-04-25 14:32:22 +01005911 up.nr = 0;
5912 up.tags = 0;
Colin Ian King615cee42021-04-26 10:47:35 +01005913 up.resv = 0;
Jens Axboe05f3fb32019-12-09 11:22:50 -07005914
5915 mutex_lock(&ctx->uring_lock);
Pavel Begunkovfdecb662021-04-25 14:32:20 +01005916 ret = __io_register_rsrc_update(ctx, IORING_RSRC_FILE,
Pavel Begunkov98f0b3b2021-04-25 14:32:19 +01005917 &up, req->rsrc_update.nr_args);
Jens Axboe05f3fb32019-12-09 11:22:50 -07005918 mutex_unlock(&ctx->uring_lock);
5919
5920 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01005921 req_set_fail(req);
Pavel Begunkov889fca72021-02-10 00:03:09 +00005922 __io_req_complete(req, issue_flags, ret, 0);
Jens Axboe05f3fb32019-12-09 11:22:50 -07005923 return 0;
5924}
5925
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005926static int io_req_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboef67676d2019-12-02 11:03:47 -07005927{
Jens Axboed625c6e2019-12-17 19:53:05 -07005928 switch (req->opcode) {
Jens Axboee7815732019-12-17 19:45:06 -07005929 case IORING_OP_NOP:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005930 return 0;
Jens Axboef67676d2019-12-02 11:03:47 -07005931 case IORING_OP_READV:
5932 case IORING_OP_READ_FIXED:
Jens Axboe3a6820f2019-12-22 15:19:35 -07005933 case IORING_OP_READ:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005934 return io_read_prep(req, sqe);
Jens Axboef67676d2019-12-02 11:03:47 -07005935 case IORING_OP_WRITEV:
5936 case IORING_OP_WRITE_FIXED:
Jens Axboe3a6820f2019-12-22 15:19:35 -07005937 case IORING_OP_WRITE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005938 return io_write_prep(req, sqe);
Jens Axboe0969e782019-12-17 18:40:57 -07005939 case IORING_OP_POLL_ADD:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005940 return io_poll_add_prep(req, sqe);
Jens Axboe0969e782019-12-17 18:40:57 -07005941 case IORING_OP_POLL_REMOVE:
Pavel Begunkovc5de0032021-04-14 13:38:37 +01005942 return io_poll_update_prep(req, sqe);
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07005943 case IORING_OP_FSYNC:
Pavel Begunkov1155c762021-02-18 18:29:38 +00005944 return io_fsync_prep(req, sqe);
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07005945 case IORING_OP_SYNC_FILE_RANGE:
Pavel Begunkov1155c762021-02-18 18:29:38 +00005946 return io_sfr_prep(req, sqe);
Jens Axboe03b12302019-12-02 18:50:25 -07005947 case IORING_OP_SENDMSG:
Jens Axboefddafac2020-01-04 20:19:44 -07005948 case IORING_OP_SEND:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005949 return io_sendmsg_prep(req, sqe);
Jens Axboe03b12302019-12-02 18:50:25 -07005950 case IORING_OP_RECVMSG:
Jens Axboefddafac2020-01-04 20:19:44 -07005951 case IORING_OP_RECV:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005952 return io_recvmsg_prep(req, sqe);
Jens Axboef499a022019-12-02 16:28:46 -07005953 case IORING_OP_CONNECT:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005954 return io_connect_prep(req, sqe);
Jens Axboe2d283902019-12-04 11:08:05 -07005955 case IORING_OP_TIMEOUT:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005956 return io_timeout_prep(req, sqe, false);
Jens Axboeb29472e2019-12-17 18:50:29 -07005957 case IORING_OP_TIMEOUT_REMOVE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005958 return io_timeout_remove_prep(req, sqe);
Jens Axboefbf23842019-12-17 18:45:56 -07005959 case IORING_OP_ASYNC_CANCEL:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005960 return io_async_cancel_prep(req, sqe);
Jens Axboe2d283902019-12-04 11:08:05 -07005961 case IORING_OP_LINK_TIMEOUT:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005962 return io_timeout_prep(req, sqe, true);
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07005963 case IORING_OP_ACCEPT:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005964 return io_accept_prep(req, sqe);
Jens Axboed63d1b52019-12-10 10:38:56 -07005965 case IORING_OP_FALLOCATE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005966 return io_fallocate_prep(req, sqe);
Jens Axboe15b71ab2019-12-11 11:20:36 -07005967 case IORING_OP_OPENAT:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005968 return io_openat_prep(req, sqe);
Jens Axboeb5dba592019-12-11 14:02:38 -07005969 case IORING_OP_CLOSE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005970 return io_close_prep(req, sqe);
Jens Axboe05f3fb32019-12-09 11:22:50 -07005971 case IORING_OP_FILES_UPDATE:
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00005972 return io_rsrc_update_prep(req, sqe);
Jens Axboeeddc7ef2019-12-13 21:18:10 -07005973 case IORING_OP_STATX:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005974 return io_statx_prep(req, sqe);
Jens Axboe4840e412019-12-25 22:03:45 -07005975 case IORING_OP_FADVISE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005976 return io_fadvise_prep(req, sqe);
Jens Axboec1ca7572019-12-25 22:18:28 -07005977 case IORING_OP_MADVISE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005978 return io_madvise_prep(req, sqe);
Jens Axboecebdb982020-01-08 17:59:24 -07005979 case IORING_OP_OPENAT2:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005980 return io_openat2_prep(req, sqe);
Jens Axboe3e4827b2020-01-08 15:18:09 -07005981 case IORING_OP_EPOLL_CTL:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005982 return io_epoll_ctl_prep(req, sqe);
Pavel Begunkov7d67af22020-02-24 11:32:45 +03005983 case IORING_OP_SPLICE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005984 return io_splice_prep(req, sqe);
Jens Axboeddf0322d2020-02-23 16:41:33 -07005985 case IORING_OP_PROVIDE_BUFFERS:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005986 return io_provide_buffers_prep(req, sqe);
Jens Axboe067524e2020-03-02 16:32:28 -07005987 case IORING_OP_REMOVE_BUFFERS:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005988 return io_remove_buffers_prep(req, sqe);
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03005989 case IORING_OP_TEE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005990 return io_tee_prep(req, sqe);
Jens Axboe36f4fa62020-09-05 11:14:22 -06005991 case IORING_OP_SHUTDOWN:
5992 return io_shutdown_prep(req, sqe);
Jens Axboe80a261f2020-09-28 14:23:58 -06005993 case IORING_OP_RENAMEAT:
5994 return io_renameat_prep(req, sqe);
Jens Axboe14a11432020-09-28 14:27:37 -06005995 case IORING_OP_UNLINKAT:
5996 return io_unlinkat_prep(req, sqe);
Jens Axboef67676d2019-12-02 11:03:47 -07005997 }
5998
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005999 printk_once(KERN_WARNING "io_uring: unhandled opcode %d\n",
6000 req->opcode);
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01006001 return -EINVAL;
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006002}
6003
Pavel Begunkov93642ef2021-02-18 18:29:44 +00006004static int io_req_prep_async(struct io_kiocb *req)
Jens Axboedef596e2019-01-09 08:59:42 -07006005{
Pavel Begunkovb7e298d2021-02-28 22:35:19 +00006006 if (!io_op_defs[req->opcode].needs_async_setup)
6007 return 0;
6008 if (WARN_ON_ONCE(req->async_data))
6009 return -EFAULT;
6010 if (io_alloc_async_data(req))
6011 return -EAGAIN;
6012
Pavel Begunkov93642ef2021-02-18 18:29:44 +00006013 switch (req->opcode) {
6014 case IORING_OP_READV:
Pavel Begunkov93642ef2021-02-18 18:29:44 +00006015 return io_rw_prep_async(req, READ);
6016 case IORING_OP_WRITEV:
Pavel Begunkov93642ef2021-02-18 18:29:44 +00006017 return io_rw_prep_async(req, WRITE);
6018 case IORING_OP_SENDMSG:
Pavel Begunkov93642ef2021-02-18 18:29:44 +00006019 return io_sendmsg_prep_async(req);
6020 case IORING_OP_RECVMSG:
Pavel Begunkov93642ef2021-02-18 18:29:44 +00006021 return io_recvmsg_prep_async(req);
6022 case IORING_OP_CONNECT:
6023 return io_connect_prep_async(req);
6024 }
Pavel Begunkovb7e298d2021-02-28 22:35:19 +00006025 printk_once(KERN_WARNING "io_uring: prep_async() bad opcode %d\n",
6026 req->opcode);
6027 return -EFAULT;
Jens Axboedef596e2019-01-09 08:59:42 -07006028}
6029
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03006030static u32 io_get_sequence(struct io_kiocb *req)
6031{
Pavel Begunkova3dbdf52021-06-17 18:14:05 +01006032 u32 seq = req->ctx->cached_sq_head;
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03006033
Pavel Begunkova3dbdf52021-06-17 18:14:05 +01006034 /* need original cached_sq_head, but it was increased for each req */
6035 io_for_each_link(req, req)
6036 seq--;
6037 return seq;
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03006038}
6039
Pavel Begunkov76cc33d2021-06-14 23:37:30 +01006040static bool io_drain_req(struct io_kiocb *req)
Jens Axboedef596e2019-01-09 08:59:42 -07006041{
Pavel Begunkov3c199662021-06-15 16:47:57 +01006042 struct io_kiocb *pos;
Jens Axboedef596e2019-01-09 08:59:42 -07006043 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkov27dc8332020-07-13 23:37:14 +03006044 struct io_defer_entry *de;
Jens Axboedef596e2019-01-09 08:59:42 -07006045 int ret;
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03006046 u32 seq;
Jens Axboedef596e2019-01-09 08:59:42 -07006047
Pavel Begunkov3c199662021-06-15 16:47:57 +01006048 /*
6049 * If we need to drain a request in the middle of a link, drain the
6050 * head request and the next request/link after the current link.
6051 * Considering sequential execution of links, IOSQE_IO_DRAIN will be
6052 * maintained for every request of our link.
6053 */
6054 if (ctx->drain_next) {
6055 req->flags |= REQ_F_IO_DRAIN;
6056 ctx->drain_next = false;
6057 }
6058 /* not interested in head, start from the first linked */
6059 io_for_each_link(pos, req->link) {
6060 if (pos->flags & REQ_F_IO_DRAIN) {
6061 ctx->drain_next = true;
6062 req->flags |= REQ_F_IO_DRAIN;
6063 break;
6064 }
6065 }
6066
Jens Axboedef596e2019-01-09 08:59:42 -07006067 /* Still need defer if there is pending req in defer list. */
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03006068 if (likely(list_empty_careful(&ctx->defer_list) &&
Pavel Begunkov10c66902021-06-15 16:47:56 +01006069 !(req->flags & REQ_F_IO_DRAIN))) {
6070 ctx->drain_active = false;
Pavel Begunkov76cc33d2021-06-14 23:37:30 +01006071 return false;
Pavel Begunkov10c66902021-06-15 16:47:56 +01006072 }
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03006073
6074 seq = io_get_sequence(req);
6075 /* Still a chance to pass the sequence check */
6076 if (!req_need_defer(req, seq) && list_empty_careful(&ctx->defer_list))
Pavel Begunkov76cc33d2021-06-14 23:37:30 +01006077 return false;
Jens Axboedef596e2019-01-09 08:59:42 -07006078
Pavel Begunkovb7e298d2021-02-28 22:35:19 +00006079 ret = io_req_prep_async(req);
Pavel Begunkovbe7053b2021-02-18 18:29:45 +00006080 if (ret)
Pavel Begunkov1b487732021-07-11 22:41:13 +01006081 goto fail;
Pavel Begunkovcbdcb432020-06-29 19:18:43 +03006082 io_prep_async_link(req);
Pavel Begunkov27dc8332020-07-13 23:37:14 +03006083 de = kmalloc(sizeof(*de), GFP_KERNEL);
Pavel Begunkov76cc33d2021-06-14 23:37:30 +01006084 if (!de) {
Pavel Begunkov1b487732021-07-11 22:41:13 +01006085 ret = -ENOMEM;
6086fail:
6087 io_req_complete_failed(req, ret);
Pavel Begunkov76cc33d2021-06-14 23:37:30 +01006088 return true;
6089 }
Jens Axboe31b51512019-01-18 22:56:34 -07006090
Jens Axboe79ebeae2021-08-10 15:18:27 -06006091 spin_lock(&ctx->completion_lock);
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03006092 if (!req_need_defer(req, seq) && list_empty(&ctx->defer_list)) {
Jens Axboe79ebeae2021-08-10 15:18:27 -06006093 spin_unlock(&ctx->completion_lock);
Pavel Begunkov27dc8332020-07-13 23:37:14 +03006094 kfree(de);
Pavel Begunkovae348172020-07-23 20:25:20 +03006095 io_queue_async_work(req);
Pavel Begunkov76cc33d2021-06-14 23:37:30 +01006096 return true;
Jens Axboe31b51512019-01-18 22:56:34 -07006097 }
6098
6099 trace_io_uring_defer(ctx, req, req->user_data);
Pavel Begunkov27dc8332020-07-13 23:37:14 +03006100 de->req = req;
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03006101 de->seq = seq;
Pavel Begunkov27dc8332020-07-13 23:37:14 +03006102 list_add_tail(&de->list, &ctx->defer_list);
Jens Axboe79ebeae2021-08-10 15:18:27 -06006103 spin_unlock(&ctx->completion_lock);
Pavel Begunkov76cc33d2021-06-14 23:37:30 +01006104 return true;
Jens Axboe31b51512019-01-18 22:56:34 -07006105}
6106
Pavel Begunkov68fb8972021-03-19 17:22:41 +00006107static void io_clean_op(struct io_kiocb *req)
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03006108{
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03006109 if (req->flags & REQ_F_BUFFER_SELECTED) {
6110 switch (req->opcode) {
6111 case IORING_OP_READV:
6112 case IORING_OP_READ_FIXED:
6113 case IORING_OP_READ:
Jens Axboebcda7ba2020-02-23 16:42:51 -07006114 kfree((void *)(unsigned long)req->rw.addr);
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03006115 break;
6116 case IORING_OP_RECVMSG:
6117 case IORING_OP_RECV:
Jens Axboe52de1fe2020-02-27 10:15:42 -07006118 kfree(req->sr_msg.kbuf);
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03006119 break;
6120 }
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03006121 }
6122
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03006123 if (req->flags & REQ_F_NEED_CLEANUP) {
6124 switch (req->opcode) {
6125 case IORING_OP_READV:
6126 case IORING_OP_READ_FIXED:
6127 case IORING_OP_READ:
6128 case IORING_OP_WRITEV:
6129 case IORING_OP_WRITE_FIXED:
Jens Axboee8c2bc12020-08-15 18:44:09 -07006130 case IORING_OP_WRITE: {
6131 struct io_async_rw *io = req->async_data;
Pavel Begunkov1dacb4d2021-06-17 18:14:03 +01006132
6133 kfree(io->free_iovec);
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03006134 break;
Jens Axboee8c2bc12020-08-15 18:44:09 -07006135 }
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03006136 case IORING_OP_RECVMSG:
Jens Axboee8c2bc12020-08-15 18:44:09 -07006137 case IORING_OP_SENDMSG: {
6138 struct io_async_msghdr *io = req->async_data;
Pavel Begunkov257e84a2021-02-05 00:58:00 +00006139
6140 kfree(io->free_iov);
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03006141 break;
Jens Axboee8c2bc12020-08-15 18:44:09 -07006142 }
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03006143 case IORING_OP_SPLICE:
6144 case IORING_OP_TEE:
Pavel Begunkove1d767f2021-03-19 17:22:43 +00006145 if (!(req->splice.flags & SPLICE_F_FD_IN_FIXED))
6146 io_put_file(req->splice.file_in);
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03006147 break;
Jens Axboef3cd48502020-09-24 14:55:54 -06006148 case IORING_OP_OPENAT:
6149 case IORING_OP_OPENAT2:
6150 if (req->open.filename)
6151 putname(req->open.filename);
6152 break;
Jens Axboe80a261f2020-09-28 14:23:58 -06006153 case IORING_OP_RENAMEAT:
6154 putname(req->rename.oldpath);
6155 putname(req->rename.newpath);
6156 break;
Jens Axboe14a11432020-09-28 14:27:37 -06006157 case IORING_OP_UNLINKAT:
6158 putname(req->unlink.filename);
6159 break;
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03006160 }
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03006161 }
Jens Axboe75652a302021-04-15 09:52:40 -06006162 if ((req->flags & REQ_F_POLLED) && req->apoll) {
6163 kfree(req->apoll->double_poll);
6164 kfree(req->apoll);
6165 req->apoll = NULL;
6166 }
Pavel Begunkov3a0a6902021-04-20 12:03:31 +01006167 if (req->flags & REQ_F_INFLIGHT) {
6168 struct io_uring_task *tctx = req->task->io_uring;
6169
6170 atomic_dec(&tctx->inflight_tracked);
Pavel Begunkov3a0a6902021-04-20 12:03:31 +01006171 }
Pavel Begunkovc8543572021-06-17 18:14:04 +01006172 if (req->flags & REQ_F_CREDS)
Pavel Begunkovb8e64b52021-06-17 18:14:02 +01006173 put_cred(req->creds);
Pavel Begunkovc8543572021-06-17 18:14:04 +01006174
6175 req->flags &= ~IO_REQ_CLEAN_FLAGS;
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03006176}
6177
Pavel Begunkov889fca72021-02-10 00:03:09 +00006178static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboeedafcce2019-01-09 09:16:05 -07006179{
Jens Axboeedafcce2019-01-09 09:16:05 -07006180 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe5730b272021-02-27 15:57:30 -07006181 const struct cred *creds = NULL;
Jens Axboed625c6e2019-12-17 19:53:05 -07006182 int ret;
Jens Axboeedafcce2019-01-09 09:16:05 -07006183
Pavel Begunkovb8e64b52021-06-17 18:14:02 +01006184 if ((req->flags & REQ_F_CREDS) && req->creds != current_cred())
Pavel Begunkovc10d1f92021-06-17 18:14:01 +01006185 creds = override_creds(req->creds);
Jens Axboe5730b272021-02-27 15:57:30 -07006186
Jens Axboed625c6e2019-12-17 19:53:05 -07006187 switch (req->opcode) {
Jens Axboe2b188cc2019-01-07 10:46:33 -07006188 case IORING_OP_NOP:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006189 ret = io_nop(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006190 break;
Jens Axboe2b188cc2019-01-07 10:46:33 -07006191 case IORING_OP_READV:
Jens Axboe3529d8c2019-12-19 18:24:38 -07006192 case IORING_OP_READ_FIXED:
Jens Axboe3a6820f2019-12-22 15:19:35 -07006193 case IORING_OP_READ:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006194 ret = io_read(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006195 break;
6196 case IORING_OP_WRITEV:
Jens Axboe2b188cc2019-01-07 10:46:33 -07006197 case IORING_OP_WRITE_FIXED:
Jens Axboe3a6820f2019-12-22 15:19:35 -07006198 case IORING_OP_WRITE:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006199 ret = io_write(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006200 break;
6201 case IORING_OP_FSYNC:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006202 ret = io_fsync(req, issue_flags);
Jackie Liuba5290c2019-10-09 09:19:59 +08006203 break;
Jens Axboe2b188cc2019-01-07 10:46:33 -07006204 case IORING_OP_POLL_ADD:
Pavel Begunkov61e98202021-02-10 00:03:08 +00006205 ret = io_poll_add(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006206 break;
6207 case IORING_OP_POLL_REMOVE:
Pavel Begunkovc5de0032021-04-14 13:38:37 +01006208 ret = io_poll_update(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006209 break;
Jens Axboeb76da702019-11-20 13:05:32 -07006210 case IORING_OP_SYNC_FILE_RANGE:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006211 ret = io_sync_file_range(req, issue_flags);
Jens Axboeb76da702019-11-20 13:05:32 -07006212 break;
6213 case IORING_OP_SENDMSG:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006214 ret = io_sendmsg(req, issue_flags);
Pavel Begunkov062d04d2020-10-10 18:34:12 +01006215 break;
Jens Axboefddafac2020-01-04 20:19:44 -07006216 case IORING_OP_SEND:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006217 ret = io_send(req, issue_flags);
Jens Axboeb76da702019-11-20 13:05:32 -07006218 break;
6219 case IORING_OP_RECVMSG:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006220 ret = io_recvmsg(req, issue_flags);
Pavel Begunkov062d04d2020-10-10 18:34:12 +01006221 break;
Jens Axboefddafac2020-01-04 20:19:44 -07006222 case IORING_OP_RECV:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006223 ret = io_recv(req, issue_flags);
Jens Axboeb76da702019-11-20 13:05:32 -07006224 break;
Jens Axboe561fb042019-10-24 07:25:42 -06006225 case IORING_OP_TIMEOUT:
Pavel Begunkov61e98202021-02-10 00:03:08 +00006226 ret = io_timeout(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006227 break;
6228 case IORING_OP_TIMEOUT_REMOVE:
Pavel Begunkov61e98202021-02-10 00:03:08 +00006229 ret = io_timeout_remove(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006230 break;
6231 case IORING_OP_ACCEPT:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006232 ret = io_accept(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006233 break;
6234 case IORING_OP_CONNECT:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006235 ret = io_connect(req, issue_flags);
Jens Axboe31b51512019-01-18 22:56:34 -07006236 break;
6237 case IORING_OP_ASYNC_CANCEL:
Pavel Begunkov61e98202021-02-10 00:03:08 +00006238 ret = io_async_cancel(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006239 break;
Jens Axboed63d1b52019-12-10 10:38:56 -07006240 case IORING_OP_FALLOCATE:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006241 ret = io_fallocate(req, issue_flags);
Jens Axboed63d1b52019-12-10 10:38:56 -07006242 break;
Jens Axboe15b71ab2019-12-11 11:20:36 -07006243 case IORING_OP_OPENAT:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006244 ret = io_openat(req, issue_flags);
Jens Axboe15b71ab2019-12-11 11:20:36 -07006245 break;
Jens Axboeb5dba592019-12-11 14:02:38 -07006246 case IORING_OP_CLOSE:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006247 ret = io_close(req, issue_flags);
Jens Axboeb5dba592019-12-11 14:02:38 -07006248 break;
Jens Axboe05f3fb32019-12-09 11:22:50 -07006249 case IORING_OP_FILES_UPDATE:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006250 ret = io_files_update(req, issue_flags);
Jens Axboe05f3fb32019-12-09 11:22:50 -07006251 break;
Jens Axboeeddc7ef2019-12-13 21:18:10 -07006252 case IORING_OP_STATX:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006253 ret = io_statx(req, issue_flags);
Jens Axboeeddc7ef2019-12-13 21:18:10 -07006254 break;
Jens Axboe4840e412019-12-25 22:03:45 -07006255 case IORING_OP_FADVISE:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006256 ret = io_fadvise(req, issue_flags);
Jens Axboe4840e412019-12-25 22:03:45 -07006257 break;
Jens Axboec1ca7572019-12-25 22:18:28 -07006258 case IORING_OP_MADVISE:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006259 ret = io_madvise(req, issue_flags);
Jens Axboec1ca7572019-12-25 22:18:28 -07006260 break;
Jens Axboecebdb982020-01-08 17:59:24 -07006261 case IORING_OP_OPENAT2:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006262 ret = io_openat2(req, issue_flags);
Jens Axboecebdb982020-01-08 17:59:24 -07006263 break;
Jens Axboe3e4827b2020-01-08 15:18:09 -07006264 case IORING_OP_EPOLL_CTL:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006265 ret = io_epoll_ctl(req, issue_flags);
Jens Axboe3e4827b2020-01-08 15:18:09 -07006266 break;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03006267 case IORING_OP_SPLICE:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006268 ret = io_splice(req, issue_flags);
Pavel Begunkov7d67af22020-02-24 11:32:45 +03006269 break;
Jens Axboeddf0322d2020-02-23 16:41:33 -07006270 case IORING_OP_PROVIDE_BUFFERS:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006271 ret = io_provide_buffers(req, issue_flags);
Jens Axboeddf0322d2020-02-23 16:41:33 -07006272 break;
Jens Axboe067524e2020-03-02 16:32:28 -07006273 case IORING_OP_REMOVE_BUFFERS:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006274 ret = io_remove_buffers(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006275 break;
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03006276 case IORING_OP_TEE:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006277 ret = io_tee(req, issue_flags);
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03006278 break;
Jens Axboe36f4fa62020-09-05 11:14:22 -06006279 case IORING_OP_SHUTDOWN:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006280 ret = io_shutdown(req, issue_flags);
Jens Axboe36f4fa62020-09-05 11:14:22 -06006281 break;
Jens Axboe80a261f2020-09-28 14:23:58 -06006282 case IORING_OP_RENAMEAT:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006283 ret = io_renameat(req, issue_flags);
Jens Axboe80a261f2020-09-28 14:23:58 -06006284 break;
Jens Axboe14a11432020-09-28 14:27:37 -06006285 case IORING_OP_UNLINKAT:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006286 ret = io_unlinkat(req, issue_flags);
Jens Axboe14a11432020-09-28 14:27:37 -06006287 break;
Jens Axboe2b188cc2019-01-07 10:46:33 -07006288 default:
6289 ret = -EINVAL;
6290 break;
6291 }
Jens Axboe31b51512019-01-18 22:56:34 -07006292
Jens Axboe5730b272021-02-27 15:57:30 -07006293 if (creds)
6294 revert_creds(creds);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006295 if (ret)
6296 return ret;
Jens Axboeb5325762020-05-19 21:20:27 -06006297 /* If the op doesn't have a file, we're not polling for it */
Pavel Begunkovcb3d8972021-06-14 02:36:14 +01006298 if ((ctx->flags & IORING_SETUP_IOPOLL) && req->file)
6299 io_iopoll_req_issued(req);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006300
6301 return 0;
6302}
6303
Pavel Begunkovebc11b62021-08-09 13:04:05 +01006304static struct io_wq_work *io_wq_free_work(struct io_wq_work *work)
6305{
6306 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
6307
6308 req = io_put_req_find_next(req);
6309 return req ? &req->work : NULL;
6310}
6311
Pavel Begunkov5280f7e2021-02-04 13:52:08 +00006312static void io_wq_submit_work(struct io_wq_work *work)
Pavel Begunkovd4c81f32020-06-08 21:08:19 +03006313{
Jens Axboe2b188cc2019-01-07 10:46:33 -07006314 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
Pavel Begunkov6df1db62020-07-03 22:15:06 +03006315 struct io_kiocb *timeout;
Jens Axboe561fb042019-10-24 07:25:42 -06006316 int ret = 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07006317
Pavel Begunkov48dcd382021-08-15 10:40:18 +01006318 /* one will be dropped by ->io_free_work() after returning to io-wq */
6319 if (!(req->flags & REQ_F_REFCOUNT))
6320 __io_req_set_refcount(req, 2);
6321 else
6322 req_ref_get(req);
Pavel Begunkov5d5901a2021-08-11 19:28:29 +01006323
Pavel Begunkov6df1db62020-07-03 22:15:06 +03006324 timeout = io_prep_linked_timeout(req);
6325 if (timeout)
6326 io_queue_linked_timeout(timeout);
Pavel Begunkovd4c81f32020-06-08 21:08:19 +03006327
Jens Axboe4014d942021-01-19 15:53:54 -07006328 if (work->flags & IO_WQ_WORK_CANCEL)
Jens Axboe561fb042019-10-24 07:25:42 -06006329 ret = -ECANCELED;
Jens Axboe31b51512019-01-18 22:56:34 -07006330
Jens Axboe561fb042019-10-24 07:25:42 -06006331 if (!ret) {
Jens Axboe561fb042019-10-24 07:25:42 -06006332 do {
Pavel Begunkov889fca72021-02-10 00:03:09 +00006333 ret = io_issue_sqe(req, 0);
Jens Axboe561fb042019-10-24 07:25:42 -06006334 /*
6335 * We can get EAGAIN for polled IO even though we're
6336 * forcing a sync submission from here, since we can't
6337 * wait for request slots on the block side.
6338 */
6339 if (ret != -EAGAIN)
6340 break;
6341 cond_resched();
6342 } while (1);
6343 }
Jens Axboe31b51512019-01-18 22:56:34 -07006344
Pavel Begunkova3df76982021-02-18 22:32:52 +00006345 /* avoid locking problems by failing it from a clean context */
Pavel Begunkov5d5901a2021-08-11 19:28:29 +01006346 if (ret)
Pavel Begunkova3df76982021-02-18 22:32:52 +00006347 io_req_task_queue_fail(req, ret);
Jens Axboe31b51512019-01-18 22:56:34 -07006348}
Jens Axboe2b188cc2019-01-07 10:46:33 -07006349
Pavel Begunkovaeca2412021-04-11 01:46:37 +01006350static inline struct io_fixed_file *io_fixed_file_slot(struct io_file_table *table,
Pavel Begunkov042b0d82021-08-09 13:04:01 +01006351 unsigned i)
Jens Axboe09bb8392019-03-13 12:39:28 -06006352{
Pavel Begunkov042b0d82021-08-09 13:04:01 +01006353 return &table->files[i];
Pavel Begunkovdafecf12021-02-28 22:35:11 +00006354}
6355
Jens Axboe09bb8392019-03-13 12:39:28 -06006356static inline struct file *io_file_from_index(struct io_ring_ctx *ctx,
6357 int index)
6358{
Pavel Begunkovaeca2412021-04-11 01:46:37 +01006359 struct io_fixed_file *slot = io_fixed_file_slot(&ctx->file_table, index);
Jens Axboe65e19f52019-10-26 07:20:21 -06006360
Pavel Begunkova04b0ac2021-04-01 15:44:04 +01006361 return (struct file *) (slot->file_ptr & FFS_MASK);
Jens Axboe65e19f52019-10-26 07:20:21 -06006362}
6363
Pavel Begunkova04b0ac2021-04-01 15:44:04 +01006364static void io_fixed_file_set(struct io_fixed_file *file_slot, struct file *file)
Pavel Begunkov9a321c92021-04-01 15:44:01 +01006365{
6366 unsigned long file_ptr = (unsigned long) file;
6367
Pavel Begunkovb191e2d2021-08-09 13:04:03 +01006368 if (__io_file_supports_nowait(file, READ))
Pavel Begunkov9a321c92021-04-01 15:44:01 +01006369 file_ptr |= FFS_ASYNC_READ;
Pavel Begunkovb191e2d2021-08-09 13:04:03 +01006370 if (__io_file_supports_nowait(file, WRITE))
Pavel Begunkov9a321c92021-04-01 15:44:01 +01006371 file_ptr |= FFS_ASYNC_WRITE;
6372 if (S_ISREG(file_inode(file)->i_mode))
6373 file_ptr |= FFS_ISREG;
Pavel Begunkova04b0ac2021-04-01 15:44:04 +01006374 file_slot->file_ptr = file_ptr;
Jens Axboe09bb8392019-03-13 12:39:28 -06006375}
6376
Pavel Begunkovac177052021-08-09 13:04:02 +01006377static inline struct file *io_file_get_fixed(struct io_ring_ctx *ctx,
6378 struct io_kiocb *req, int fd)
Pavel Begunkov8da11c12020-02-24 11:32:44 +03006379{
Pavel Begunkov8da11c12020-02-24 11:32:44 +03006380 struct file *file;
Pavel Begunkovac177052021-08-09 13:04:02 +01006381 unsigned long file_ptr;
Pavel Begunkov8da11c12020-02-24 11:32:44 +03006382
Pavel Begunkovac177052021-08-09 13:04:02 +01006383 if (unlikely((unsigned int)fd >= ctx->nr_user_files))
6384 return NULL;
6385 fd = array_index_nospec(fd, ctx->nr_user_files);
6386 file_ptr = io_fixed_file_slot(&ctx->file_table, fd)->file_ptr;
6387 file = (struct file *) (file_ptr & FFS_MASK);
6388 file_ptr &= ~FFS_MASK;
6389 /* mask in overlapping REQ_F and FFS bits */
Pavel Begunkovb191e2d2021-08-09 13:04:03 +01006390 req->flags |= (file_ptr << REQ_F_NOWAIT_READ_BIT);
Pavel Begunkovac177052021-08-09 13:04:02 +01006391 io_req_set_rsrc_node(req);
Pavel Begunkov8371adf2020-10-10 18:34:08 +01006392 return file;
Pavel Begunkov8da11c12020-02-24 11:32:44 +03006393}
6394
Pavel Begunkovac177052021-08-09 13:04:02 +01006395static struct file *io_file_get_normal(struct io_ring_ctx *ctx,
Pavel Begunkovac177052021-08-09 13:04:02 +01006396 struct io_kiocb *req, int fd)
6397{
Pavel Begunkov62906e82021-08-10 14:52:47 +01006398 struct file *file = fget(fd);
Pavel Begunkovac177052021-08-09 13:04:02 +01006399
6400 trace_io_uring_file_get(ctx, fd);
6401
6402 /* we don't allow fixed io_uring files */
6403 if (file && unlikely(file->f_op == &io_uring_fops))
6404 io_req_track_inflight(req);
6405 return file;
6406}
6407
6408static inline struct file *io_file_get(struct io_ring_ctx *ctx,
Pavel Begunkovac177052021-08-09 13:04:02 +01006409 struct io_kiocb *req, int fd, bool fixed)
6410{
6411 if (fixed)
6412 return io_file_get_fixed(ctx, req, fd);
6413 else
Pavel Begunkov62906e82021-08-10 14:52:47 +01006414 return io_file_get_normal(ctx, req, fd);
Pavel Begunkovac177052021-08-09 13:04:02 +01006415}
6416
Jens Axboe89b263f2021-08-10 15:14:18 -06006417static void io_req_task_link_timeout(struct io_kiocb *req)
6418{
6419 struct io_kiocb *prev = req->timeout.prev;
6420 struct io_ring_ctx *ctx = req->ctx;
6421
6422 if (prev) {
6423 io_async_find_and_cancel(ctx, req, prev->user_data, -ETIME);
6424 io_put_req(prev);
6425 io_put_req(req);
6426 } else {
6427 io_req_complete_post(req, -ETIME, 0);
6428 }
6429}
6430
Jens Axboe2665abf2019-11-05 12:40:47 -07006431static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
6432{
Jens Axboead8a48a2019-11-15 08:49:11 -07006433 struct io_timeout_data *data = container_of(timer,
6434 struct io_timeout_data, timer);
Pavel Begunkov90cd7e42020-10-27 23:25:36 +00006435 struct io_kiocb *prev, *req = data->req;
Jens Axboe2665abf2019-11-05 12:40:47 -07006436 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe2665abf2019-11-05 12:40:47 -07006437 unsigned long flags;
Jens Axboe2665abf2019-11-05 12:40:47 -07006438
Jens Axboe89b263f2021-08-10 15:14:18 -06006439 spin_lock_irqsave(&ctx->timeout_lock, flags);
Pavel Begunkov90cd7e42020-10-27 23:25:36 +00006440 prev = req->timeout.head;
6441 req->timeout.head = NULL;
Jens Axboe2665abf2019-11-05 12:40:47 -07006442
6443 /*
6444 * We don't expect the list to be empty, that will only happen if we
6445 * race with the completion of the linked work.
6446 */
Pavel Begunkov447c19f2021-05-14 12:02:50 +01006447 if (prev) {
Pavel Begunkovf2f87372020-10-27 23:25:37 +00006448 io_remove_next_linked(prev);
Pavel Begunkov447c19f2021-05-14 12:02:50 +01006449 if (!req_ref_inc_not_zero(prev))
6450 prev = NULL;
6451 }
Jens Axboe89b263f2021-08-10 15:14:18 -06006452 req->timeout.prev = prev;
6453 spin_unlock_irqrestore(&ctx->timeout_lock, flags);
Jens Axboe2665abf2019-11-05 12:40:47 -07006454
Jens Axboe89b263f2021-08-10 15:14:18 -06006455 req->io_task_work.func = io_req_task_link_timeout;
6456 io_req_task_work_add(req);
Jens Axboe2665abf2019-11-05 12:40:47 -07006457 return HRTIMER_NORESTART;
6458}
6459
Pavel Begunkovde968c12021-03-19 17:22:33 +00006460static void io_queue_linked_timeout(struct io_kiocb *req)
Jens Axboe2665abf2019-11-05 12:40:47 -07006461{
Pavel Begunkovde968c12021-03-19 17:22:33 +00006462 struct io_ring_ctx *ctx = req->ctx;
6463
Jens Axboe89b263f2021-08-10 15:14:18 -06006464 spin_lock_irq(&ctx->timeout_lock);
Jens Axboe76a46e02019-11-10 23:34:16 -07006465 /*
Pavel Begunkovf2f87372020-10-27 23:25:37 +00006466 * If the back reference is NULL, then our linked request finished
6467 * before we got a chance to setup the timer
Jens Axboe76a46e02019-11-10 23:34:16 -07006468 */
Pavel Begunkov90cd7e42020-10-27 23:25:36 +00006469 if (req->timeout.head) {
Jens Axboee8c2bc12020-08-15 18:44:09 -07006470 struct io_timeout_data *data = req->async_data;
Jens Axboe94ae5e72019-11-14 19:39:52 -07006471
Jens Axboead8a48a2019-11-15 08:49:11 -07006472 data->timer.function = io_link_timeout_fn;
6473 hrtimer_start(&data->timer, timespec64_to_ktime(data->ts),
6474 data->mode);
Jens Axboe2665abf2019-11-05 12:40:47 -07006475 }
Jens Axboe89b263f2021-08-10 15:14:18 -06006476 spin_unlock_irq(&ctx->timeout_lock);
Jens Axboe2665abf2019-11-05 12:40:47 -07006477 /* drop submission reference */
Jens Axboe76a46e02019-11-10 23:34:16 -07006478 io_put_req(req);
Jens Axboe2665abf2019-11-05 12:40:47 -07006479}
6480
Pavel Begunkovc5eef2b2021-02-10 00:03:22 +00006481static void __io_queue_sqe(struct io_kiocb *req)
Pavel Begunkov282cdc82021-08-09 13:04:10 +01006482 __must_hold(&req->ctx->uring_lock)
Jens Axboe2b188cc2019-01-07 10:46:33 -07006483{
Pavel Begunkovd3d72982021-02-12 03:23:51 +00006484 struct io_kiocb *linked_timeout = io_prep_linked_timeout(req);
Jens Axboee0c5c572019-03-12 10:18:47 -06006485 int ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07006486
Olivier Langlois59b735a2021-06-22 05:17:39 -07006487issue_sqe:
Pavel Begunkovc5eef2b2021-02-10 00:03:22 +00006488 ret = io_issue_sqe(req, IO_URING_F_NONBLOCK|IO_URING_F_COMPLETE_DEFER);
Jens Axboe491381ce2019-10-17 09:20:46 -06006489
6490 /*
6491 * We async punt it if the file wasn't marked NOWAIT, or if the file
6492 * doesn't support non-blocking read/write attempts
6493 */
Pavel Begunkov18400382021-03-19 17:22:34 +00006494 if (likely(!ret)) {
Pavel Begunkove342c802021-01-19 13:32:47 +00006495 if (req->flags & REQ_F_COMPLETE_INLINE) {
Pavel Begunkovc5eef2b2021-02-10 00:03:22 +00006496 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkovcd0ca2e2021-08-09 20:18:11 +01006497 struct io_submit_state *state = &ctx->submit_state;
Jens Axboee65ef562019-03-12 10:16:44 -06006498
Pavel Begunkovcd0ca2e2021-08-09 20:18:11 +01006499 state->compl_reqs[state->compl_nr++] = req;
6500 if (state->compl_nr == ARRAY_SIZE(state->compl_reqs))
Pavel Begunkov2a2758f2021-06-17 18:14:00 +01006501 io_submit_flush_completions(ctx);
Pavel Begunkov0d63c142020-10-22 16:47:18 +01006502 }
Pavel Begunkov18400382021-03-19 17:22:34 +00006503 } else if (ret == -EAGAIN && !(req->flags & REQ_F_NOWAIT)) {
Olivier Langlois59b735a2021-06-22 05:17:39 -07006504 switch (io_arm_poll_handler(req)) {
6505 case IO_APOLL_READY:
6506 goto issue_sqe;
6507 case IO_APOLL_ABORTED:
Pavel Begunkov18400382021-03-19 17:22:34 +00006508 /*
6509 * Queued up for async execution, worker will release
6510 * submit reference when the iocb is actually submitted.
6511 */
6512 io_queue_async_work(req);
Olivier Langlois59b735a2021-06-22 05:17:39 -07006513 break;
Pavel Begunkov18400382021-03-19 17:22:34 +00006514 }
Pavel Begunkov0d63c142020-10-22 16:47:18 +01006515 } else {
Pavel Begunkovf41db2732021-02-28 22:35:12 +00006516 io_req_complete_failed(req, ret);
Jens Axboe9e645e112019-05-10 16:07:28 -06006517 }
Pavel Begunkovd3d72982021-02-12 03:23:51 +00006518 if (linked_timeout)
6519 io_queue_linked_timeout(linked_timeout);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006520}
6521
Pavel Begunkov441b8a72021-06-14 23:37:31 +01006522static inline void io_queue_sqe(struct io_kiocb *req)
Pavel Begunkov282cdc82021-08-09 13:04:10 +01006523 __must_hold(&req->ctx->uring_lock)
Jackie Liu4fe2c962019-09-09 20:50:40 +08006524{
Pavel Begunkov10c66902021-06-15 16:47:56 +01006525 if (unlikely(req->ctx->drain_active) && io_drain_req(req))
Pavel Begunkov76cc33d2021-06-14 23:37:30 +01006526 return;
Jackie Liu4fe2c962019-09-09 20:50:40 +08006527
Pavel Begunkov76cc33d2021-06-14 23:37:30 +01006528 if (likely(!(req->flags & REQ_F_FORCE_ASYNC))) {
Pavel Begunkovc5eef2b2021-02-10 00:03:22 +00006529 __io_queue_sqe(req);
Pavel Begunkov76cc33d2021-06-14 23:37:30 +01006530 } else {
6531 int ret = io_req_prep_async(req);
6532
6533 if (unlikely(ret))
6534 io_req_complete_failed(req, ret);
6535 else
6536 io_queue_async_work(req);
Jens Axboece35a472019-12-17 08:04:44 -07006537 }
Jackie Liu4fe2c962019-09-09 20:50:40 +08006538}
6539
Stefano Garzarella21b55db2020-08-27 16:58:30 +02006540/*
6541 * Check SQE restrictions (opcode and flags).
6542 *
6543 * Returns 'true' if SQE is allowed, 'false' otherwise.
6544 */
6545static inline bool io_check_restriction(struct io_ring_ctx *ctx,
6546 struct io_kiocb *req,
6547 unsigned int sqe_flags)
6548{
Pavel Begunkov4cfb25b2021-06-26 21:40:47 +01006549 if (likely(!ctx->restricted))
Stefano Garzarella21b55db2020-08-27 16:58:30 +02006550 return true;
6551
6552 if (!test_bit(req->opcode, ctx->restrictions.sqe_op))
6553 return false;
6554
6555 if ((sqe_flags & ctx->restrictions.sqe_flags_required) !=
6556 ctx->restrictions.sqe_flags_required)
6557 return false;
6558
6559 if (sqe_flags & ~(ctx->restrictions.sqe_flags_allowed |
6560 ctx->restrictions.sqe_flags_required))
6561 return false;
6562
6563 return true;
6564}
6565
Pavel Begunkovef4ff582020-04-12 02:05:05 +03006566static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
Pavel Begunkov258b29a2021-02-10 00:03:10 +00006567 const struct io_uring_sqe *sqe)
Pavel Begunkov282cdc82021-08-09 13:04:10 +01006568 __must_hold(&ctx->uring_lock)
Pavel Begunkov0553b8b2020-04-08 08:58:45 +03006569{
Pavel Begunkov258b29a2021-02-10 00:03:10 +00006570 struct io_submit_state *state;
Pavel Begunkovef4ff582020-04-12 02:05:05 +03006571 unsigned int sqe_flags;
Jens Axboe003e8dc2021-03-06 09:22:27 -07006572 int personality, ret = 0;
Pavel Begunkovef4ff582020-04-12 02:05:05 +03006573
Pavel Begunkov864ea922021-08-09 13:04:08 +01006574 /* req is partially pre-initialised, see io_preinit_req() */
Pavel Begunkov0553b8b2020-04-08 08:58:45 +03006575 req->opcode = READ_ONCE(sqe->opcode);
Pavel Begunkov5be9ad12021-02-12 18:41:17 +00006576 /* same numerical values with corresponding REQ_F_*, safe to copy */
6577 req->flags = sqe_flags = READ_ONCE(sqe->flags);
Pavel Begunkov0553b8b2020-04-08 08:58:45 +03006578 req->user_data = READ_ONCE(sqe->user_data);
Pavel Begunkov0553b8b2020-04-08 08:58:45 +03006579 req->file = NULL;
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00006580 req->fixed_rsrc_refs = NULL;
Pavel Begunkov4dd28242020-06-15 10:33:13 +03006581 req->task = current;
Pavel Begunkovef4ff582020-04-12 02:05:05 +03006582
Pavel Begunkov5be9ad12021-02-12 18:41:17 +00006583 /* enforce forwards compatibility on users */
Pavel Begunkovdddca222021-04-27 16:13:52 +01006584 if (unlikely(sqe_flags & ~SQE_VALID_FLAGS))
Pavel Begunkov5be9ad12021-02-12 18:41:17 +00006585 return -EINVAL;
Pavel Begunkovef4ff582020-04-12 02:05:05 +03006586 if (unlikely(req->opcode >= IORING_OP_LAST))
6587 return -EINVAL;
Pavel Begunkov4cfb25b2021-06-26 21:40:47 +01006588 if (!io_check_restriction(ctx, req, sqe_flags))
Stefano Garzarella21b55db2020-08-27 16:58:30 +02006589 return -EACCES;
6590
Pavel Begunkovef4ff582020-04-12 02:05:05 +03006591 if ((sqe_flags & IOSQE_BUFFER_SELECT) &&
6592 !io_op_defs[req->opcode].buffer_select)
6593 return -EOPNOTSUPP;
Pavel Begunkov3c199662021-06-15 16:47:57 +01006594 if (unlikely(sqe_flags & IOSQE_IO_DRAIN))
6595 ctx->drain_active = true;
Pavel Begunkovef4ff582020-04-12 02:05:05 +03006596
Jens Axboe003e8dc2021-03-06 09:22:27 -07006597 personality = READ_ONCE(sqe->personality);
6598 if (personality) {
Pavel Begunkovc10d1f92021-06-17 18:14:01 +01006599 req->creds = xa_load(&ctx->personalities, personality);
6600 if (!req->creds)
Jens Axboe003e8dc2021-03-06 09:22:27 -07006601 return -EINVAL;
Pavel Begunkovc10d1f92021-06-17 18:14:01 +01006602 get_cred(req->creds);
Pavel Begunkovb8e64b52021-06-17 18:14:02 +01006603 req->flags |= REQ_F_CREDS;
Jens Axboe003e8dc2021-03-06 09:22:27 -07006604 }
Pavel Begunkov258b29a2021-02-10 00:03:10 +00006605 state = &ctx->submit_state;
Pavel Begunkovef4ff582020-04-12 02:05:05 +03006606
Jens Axboe27926b62020-10-28 09:33:23 -06006607 /*
6608 * Plug now if we have more than 1 IO left after this, and the target
6609 * is potentially a read/write to block based storage.
6610 */
6611 if (!state->plug_started && state->ios_left > 1 &&
6612 io_op_defs[req->opcode].plug) {
6613 blk_start_plug(&state->plug);
6614 state->plug_started = true;
6615 }
Jens Axboe63ff8222020-05-07 14:56:15 -06006616
Pavel Begunkovbd5bbda2020-11-20 15:50:51 +00006617 if (io_op_defs[req->opcode].needs_file) {
Pavel Begunkov62906e82021-08-10 14:52:47 +01006618 req->file = io_file_get(ctx, req, READ_ONCE(sqe->fd),
Pavel Begunkovac177052021-08-09 13:04:02 +01006619 (sqe_flags & IOSQE_FIXED_FILE));
Pavel Begunkovba13e232021-02-01 18:59:52 +00006620 if (unlikely(!req->file))
Pavel Begunkovbd5bbda2020-11-20 15:50:51 +00006621 ret = -EBADF;
6622 }
6623
Pavel Begunkov71b547c2020-10-10 18:34:09 +01006624 state->ios_left--;
6625 return ret;
Pavel Begunkov0553b8b2020-04-08 08:58:45 +03006626}
6627
Pavel Begunkova6b8cadc2021-02-18 18:29:41 +00006628static int io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
Pavel Begunkova1ab7b32021-02-18 18:29:42 +00006629 const struct io_uring_sqe *sqe)
Pavel Begunkov282cdc82021-08-09 13:04:10 +01006630 __must_hold(&ctx->uring_lock)
Jens Axboe6c271ce2019-01-10 11:22:30 -07006631{
Pavel Begunkova1ab7b32021-02-18 18:29:42 +00006632 struct io_submit_link *link = &ctx->submit_state.link;
Jens Axboe6c271ce2019-01-10 11:22:30 -07006633 int ret;
6634
Pavel Begunkova6b8cadc2021-02-18 18:29:41 +00006635 ret = io_init_req(ctx, req, sqe);
6636 if (unlikely(ret)) {
6637fail_req:
Pavel Begunkovde59bc12021-02-18 18:29:47 +00006638 if (link->head) {
6639 /* fail even hard links since we don't submit */
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01006640 req_set_fail(link->head);
Pavel Begunkovf41db2732021-02-28 22:35:12 +00006641 io_req_complete_failed(link->head, -ECANCELED);
Pavel Begunkovde59bc12021-02-18 18:29:47 +00006642 link->head = NULL;
6643 }
Pavel Begunkovf41db2732021-02-28 22:35:12 +00006644 io_req_complete_failed(req, ret);
Pavel Begunkova6b8cadc2021-02-18 18:29:41 +00006645 return ret;
6646 }
Pavel Begunkov441b8a72021-06-14 23:37:31 +01006647
Pavel Begunkovbe7053b2021-02-18 18:29:45 +00006648 ret = io_req_prep(req, sqe);
6649 if (unlikely(ret))
6650 goto fail_req;
Pavel Begunkova6b8cadc2021-02-18 18:29:41 +00006651
Pavel Begunkovbe7053b2021-02-18 18:29:45 +00006652 /* don't need @sqe from now on */
Olivier Langlois236daeae2021-05-31 02:36:37 -04006653 trace_io_uring_submit_sqe(ctx, req, req->opcode, req->user_data,
6654 req->flags, true,
6655 ctx->flags & IORING_SETUP_SQPOLL);
Pavel Begunkova6b8cadc2021-02-18 18:29:41 +00006656
Jens Axboe6c271ce2019-01-10 11:22:30 -07006657 /*
6658 * If we already have a head request, queue this one for async
6659 * submittal once the head completes. If we don't have a head but
6660 * IOSQE_IO_LINK is set in the sqe, start a new head. This one will be
6661 * submitted sync once the chain is complete. If none of those
6662 * conditions are true (normal request), then just queue it.
6663 */
6664 if (link->head) {
6665 struct io_kiocb *head = link->head;
6666
Pavel Begunkovb7e298d2021-02-28 22:35:19 +00006667 ret = io_req_prep_async(req);
Pavel Begunkovcf109602021-02-18 18:29:43 +00006668 if (unlikely(ret))
Pavel Begunkova6b8cadc2021-02-18 18:29:41 +00006669 goto fail_req;
Jens Axboe6c271ce2019-01-10 11:22:30 -07006670 trace_io_uring_link(ctx, req, head);
6671 link->last->link = req;
6672 link->last = req;
6673
6674 /* last request of a link, enqueue the link */
6675 if (!(req->flags & (REQ_F_LINK | REQ_F_HARDLINK))) {
6676 link->head = NULL;
Pavel Begunkov5e159202021-06-14 23:37:26 +01006677 io_queue_sqe(head);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006678 }
Jackie Liu4fe2c962019-09-09 20:50:40 +08006679 } else {
Jens Axboe2b188cc2019-01-07 10:46:33 -07006680 if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) {
Jackie Liu4fe2c962019-09-09 20:50:40 +08006681 link->head = req;
6682 link->last = req;
6683 } else {
Pavel Begunkovbe7053b2021-02-18 18:29:45 +00006684 io_queue_sqe(req);
Jackie Liu4fe2c962019-09-09 20:50:40 +08006685 }
6686 }
6687
6688 return 0;
6689}
6690
6691/*
6692 * Batched submission is done, ensure local IO is flushed out.
6693 */
6694static void io_submit_state_end(struct io_submit_state *state,
6695 struct io_ring_ctx *ctx)
Pavel Begunkov1b4a51b2019-11-21 11:54:28 +03006696{
Pavel Begunkova1ab7b32021-02-18 18:29:42 +00006697 if (state->link.head)
Pavel Begunkovde59bc12021-02-18 18:29:47 +00006698 io_queue_sqe(state->link.head);
Pavel Begunkovcd0ca2e2021-08-09 20:18:11 +01006699 if (state->compl_nr)
Pavel Begunkov2a2758f2021-06-17 18:14:00 +01006700 io_submit_flush_completions(ctx);
Jackie Liua197f662019-11-08 08:09:12 -07006701 if (state->plug_started)
Pavel Begunkov32fe5252019-12-17 22:26:58 +03006702 blk_finish_plug(&state->plug);
Jens Axboe9e645e112019-05-10 16:07:28 -06006703}
Pavel Begunkov32fe5252019-12-17 22:26:58 +03006704
Jens Axboe9e645e112019-05-10 16:07:28 -06006705/*
6706 * Start submission side cache.
Pavel Begunkov32fe5252019-12-17 22:26:58 +03006707 */
Jens Axboe9e645e112019-05-10 16:07:28 -06006708static void io_submit_state_start(struct io_submit_state *state,
Pavel Begunkov196be952019-11-07 01:41:06 +03006709 unsigned int max_ios)
Jens Axboe9e645e112019-05-10 16:07:28 -06006710{
6711 state->plug_started = false;
Jens Axboebcda7ba2020-02-23 16:42:51 -07006712 state->ios_left = max_ios;
Pavel Begunkova1ab7b32021-02-18 18:29:42 +00006713 /* set only head, no need to init link_last in advance */
6714 state->link.head = NULL;
Jens Axboe75c6a032020-01-28 10:15:23 -07006715}
6716
Jens Axboe193155c2020-02-22 23:22:19 -07006717static void io_commit_sqring(struct io_ring_ctx *ctx)
6718{
Jens Axboe75c6a032020-01-28 10:15:23 -07006719 struct io_rings *rings = ctx->rings;
6720
6721 /*
Jens Axboe193155c2020-02-22 23:22:19 -07006722 * Ensure any loads from the SQEs are done at this point,
Jens Axboe75c6a032020-01-28 10:15:23 -07006723 * since once we write the new head, the application could
6724 * write new data to them.
Pavel Begunkov6b47ee62020-01-18 20:22:41 +03006725 */
Pavel Begunkov8da11c12020-02-24 11:32:44 +03006726 smp_store_release(&rings->sq.head, ctx->cached_sq_head);
Jens Axboebcda7ba2020-02-23 16:42:51 -07006727}
6728
Jens Axboe9e645e112019-05-10 16:07:28 -06006729/*
Fam Zhengdd9ae8a2021-06-04 17:42:56 +01006730 * Fetch an sqe, if one is available. Note this returns a pointer to memory
Jens Axboe9e645e112019-05-10 16:07:28 -06006731 * that is mapped by userspace. This means that care needs to be taken to
6732 * ensure that reads are stable, as we cannot rely on userspace always
Jens Axboe78e19bb2019-11-06 15:21:34 -07006733 * being a good citizen. If members of the sqe are validated and then later
6734 * used, it's important that those reads are done through READ_ONCE() to
Pavel Begunkov2e6e1fd2019-12-05 16:15:45 +03006735 * prevent a re-load down the line.
Jens Axboe9e645e112019-05-10 16:07:28 -06006736 */
6737static const struct io_uring_sqe *io_get_sqe(struct io_ring_ctx *ctx)
Jens Axboe9e645e112019-05-10 16:07:28 -06006738{
Pavel Begunkovea5ab3b2021-05-16 22:58:09 +01006739 unsigned head, mask = ctx->sq_entries - 1;
Pavel Begunkov17d3aeb2021-06-14 23:37:23 +01006740 unsigned sq_idx = ctx->cached_sq_head++ & mask;
Jens Axboe9e645e112019-05-10 16:07:28 -06006741
6742 /*
6743 * The cached sq head (or cq tail) serves two purposes:
6744 *
6745 * 1) allows us to batch the cost of updating the user visible
Pavel Begunkov9d763772019-12-17 02:22:07 +03006746 * head updates.
Jens Axboe9e645e112019-05-10 16:07:28 -06006747 * 2) allows the kernel side to track the head on its own, even
Pavel Begunkov8cdf2192020-01-25 00:40:24 +03006748 * though the application is the one updating it.
6749 */
Pavel Begunkov17d3aeb2021-06-14 23:37:23 +01006750 head = READ_ONCE(ctx->sq_array[sq_idx]);
Pavel Begunkov8cdf2192020-01-25 00:40:24 +03006751 if (likely(head < ctx->sq_entries))
6752 return &ctx->sq_sqes[head];
6753
6754 /* drop invalid entries */
Pavel Begunkov15641e42021-06-14 23:37:24 +01006755 ctx->cq_extra--;
6756 WRITE_ONCE(ctx->rings->sq_dropped,
6757 READ_ONCE(ctx->rings->sq_dropped) + 1);
Pavel Begunkov711be032020-01-17 03:57:59 +03006758 return NULL;
6759}
Jens Axboeb7bb4f72019-12-15 22:13:43 -07006760
Jens Axboe0f212202020-09-13 13:09:39 -06006761static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr)
Pavel Begunkov282cdc82021-08-09 13:04:10 +01006762 __must_hold(&ctx->uring_lock)
Jens Axboe6c271ce2019-01-10 11:22:30 -07006763{
Pavel Begunkov09899b12021-06-14 02:36:22 +01006764 struct io_uring_task *tctx;
Pavel Begunkov46c4e162021-02-18 18:29:37 +00006765 int submitted = 0;
Jens Axboe6c271ce2019-01-10 11:22:30 -07006766
Pavel Begunkovee7d46d2019-12-30 21:24:45 +03006767 /* make sure SQ entry isn't read before tail */
6768 nr = min3(nr, ctx->sq_entries, io_sqring_entries(ctx));
Pavel Begunkov2b85edf2019-12-28 14:13:03 +03006769 if (!percpu_ref_tryget_many(&ctx->refs, nr))
6770 return -EAGAIN;
Jens Axboe6c271ce2019-01-10 11:22:30 -07006771
Pavel Begunkov09899b12021-06-14 02:36:22 +01006772 tctx = current->io_uring;
6773 tctx->cached_refs -= nr;
6774 if (unlikely(tctx->cached_refs < 0)) {
6775 unsigned int refill = -tctx->cached_refs + IO_TCTX_REFS_CACHE_NR;
6776
6777 percpu_counter_add(&tctx->inflight, refill);
6778 refcount_add(refill, &current->usage);
6779 tctx->cached_refs += refill;
6780 }
Pavel Begunkovba88ff12021-02-10 00:03:11 +00006781 io_submit_state_start(&ctx->submit_state, nr);
Pavel Begunkovb14cca02020-01-17 04:45:59 +03006782
Pavel Begunkov46c4e162021-02-18 18:29:37 +00006783 while (submitted < nr) {
Jens Axboe3529d8c2019-12-19 18:24:38 -07006784 const struct io_uring_sqe *sqe;
Pavel Begunkov196be952019-11-07 01:41:06 +03006785 struct io_kiocb *req;
Pavel Begunkovfb5ccc92019-10-25 12:31:30 +03006786
Pavel Begunkov258b29a2021-02-10 00:03:10 +00006787 req = io_alloc_req(ctx);
Pavel Begunkov196be952019-11-07 01:41:06 +03006788 if (unlikely(!req)) {
6789 if (!submitted)
6790 submitted = -EAGAIN;
Pavel Begunkovfb5ccc92019-10-25 12:31:30 +03006791 break;
Jens Axboe9e645e112019-05-10 16:07:28 -06006792 }
Pavel Begunkov4fccfcb2021-02-12 11:55:17 +00006793 sqe = io_get_sqe(ctx);
6794 if (unlikely(!sqe)) {
6795 kmem_cache_free(req_cachep, req);
6796 break;
6797 }
Jens Axboed3656342019-12-18 09:50:26 -07006798 /* will complete beyond this point, count as submitted */
6799 submitted++;
Pavel Begunkova1ab7b32021-02-18 18:29:42 +00006800 if (io_submit_sqe(ctx, req, sqe))
Jens Axboed3656342019-12-18 09:50:26 -07006801 break;
Jens Axboe6c271ce2019-01-10 11:22:30 -07006802 }
6803
Pavel Begunkov9466f432020-01-25 22:34:01 +03006804 if (unlikely(submitted != nr)) {
6805 int ref_used = (submitted == -EAGAIN) ? 0 : submitted;
Jens Axboed8a6df12020-10-15 16:24:45 -06006806 int unused = nr - ref_used;
Pavel Begunkov9466f432020-01-25 22:34:01 +03006807
Pavel Begunkov09899b12021-06-14 02:36:22 +01006808 current->io_uring->cached_refs += unused;
Jens Axboed8a6df12020-10-15 16:24:45 -06006809 percpu_ref_put_many(&ctx->refs, unused);
Pavel Begunkov9466f432020-01-25 22:34:01 +03006810 }
Jens Axboe6c271ce2019-01-10 11:22:30 -07006811
Pavel Begunkova1ab7b32021-02-18 18:29:42 +00006812 io_submit_state_end(&ctx->submit_state, ctx);
Pavel Begunkovae9428c2019-11-06 00:22:14 +03006813 /* Commit SQ ring head once we've consumed and submitted all SQEs */
6814 io_commit_sqring(ctx);
6815
Jens Axboe6c271ce2019-01-10 11:22:30 -07006816 return submitted;
6817}
6818
Pavel Begunkove4b6d902021-05-16 22:58:00 +01006819static inline bool io_sqd_events_pending(struct io_sq_data *sqd)
6820{
6821 return READ_ONCE(sqd->state);
6822}
6823
Xiaoguang Wang23b36282020-07-23 20:57:24 +08006824static inline void io_ring_set_wakeup_flag(struct io_ring_ctx *ctx)
6825{
6826 /* Tell userspace we may need a wakeup call */
Jens Axboe79ebeae2021-08-10 15:18:27 -06006827 spin_lock(&ctx->completion_lock);
Nadav Amit20c0b382021-08-07 17:13:42 -07006828 WRITE_ONCE(ctx->rings->sq_flags,
6829 ctx->rings->sq_flags | IORING_SQ_NEED_WAKEUP);
Jens Axboe79ebeae2021-08-10 15:18:27 -06006830 spin_unlock(&ctx->completion_lock);
Xiaoguang Wang23b36282020-07-23 20:57:24 +08006831}
6832
6833static inline void io_ring_clear_wakeup_flag(struct io_ring_ctx *ctx)
6834{
Jens Axboe79ebeae2021-08-10 15:18:27 -06006835 spin_lock(&ctx->completion_lock);
Nadav Amit20c0b382021-08-07 17:13:42 -07006836 WRITE_ONCE(ctx->rings->sq_flags,
6837 ctx->rings->sq_flags & ~IORING_SQ_NEED_WAKEUP);
Jens Axboe79ebeae2021-08-10 15:18:27 -06006838 spin_unlock(&ctx->completion_lock);
Xiaoguang Wang23b36282020-07-23 20:57:24 +08006839}
6840
Xiaoguang Wang08369242020-11-03 14:15:59 +08006841static int __io_sq_thread(struct io_ring_ctx *ctx, bool cap_entries)
Jens Axboe6c271ce2019-01-10 11:22:30 -07006842{
Jens Axboec8d1ba52020-09-14 11:07:26 -06006843 unsigned int to_submit;
Xiaoguang Wangbdcd3ea2020-02-25 22:12:08 +08006844 int ret = 0;
Jens Axboe6c271ce2019-01-10 11:22:30 -07006845
Jens Axboec8d1ba52020-09-14 11:07:26 -06006846 to_submit = io_sqring_entries(ctx);
Jens Axboee95eee22020-09-08 09:11:32 -06006847 /* if we're handling multiple rings, cap submit size for fairness */
Olivier Langlois4ce8ad92021-06-23 11:50:18 -07006848 if (cap_entries && to_submit > IORING_SQPOLL_CAP_ENTRIES_VALUE)
6849 to_submit = IORING_SQPOLL_CAP_ENTRIES_VALUE;
Jens Axboee95eee22020-09-08 09:11:32 -06006850
Xiaoguang Wang906a3c62020-11-12 14:56:00 +08006851 if (!list_empty(&ctx->iopoll_list) || to_submit) {
6852 unsigned nr_events = 0;
Pavel Begunkov948e1942021-06-24 15:09:55 +01006853 const struct cred *creds = NULL;
6854
6855 if (ctx->sq_creds != current_cred())
6856 creds = override_creds(ctx->sq_creds);
Xiaoguang Wang906a3c62020-11-12 14:56:00 +08006857
Xiaoguang Wang08369242020-11-03 14:15:59 +08006858 mutex_lock(&ctx->uring_lock);
Xiaoguang Wang906a3c62020-11-12 14:56:00 +08006859 if (!list_empty(&ctx->iopoll_list))
Pavel Begunkova8576af2021-08-15 10:40:21 +01006860 io_do_iopoll(ctx, &nr_events, 0);
Xiaoguang Wang906a3c62020-11-12 14:56:00 +08006861
Pavel Begunkov3b763ba2021-04-18 14:52:08 +01006862 /*
6863 * Don't submit if refs are dying, good for io_uring_register(),
6864 * but also it is relied upon by io_ring_exit_work()
6865 */
Pavel Begunkov0298ef92021-03-08 13:20:57 +00006866 if (to_submit && likely(!percpu_ref_is_dying(&ctx->refs)) &&
6867 !(ctx->flags & IORING_SETUP_R_DISABLED))
Xiaoguang Wang08369242020-11-03 14:15:59 +08006868 ret = io_submit_sqes(ctx, to_submit);
6869 mutex_unlock(&ctx->uring_lock);
Jens Axboe90554202020-09-03 12:12:41 -06006870
Pavel Begunkovacfb3812021-05-16 22:58:03 +01006871 if (to_submit && wq_has_sleeper(&ctx->sqo_sq_wait))
6872 wake_up(&ctx->sqo_sq_wait);
Pavel Begunkov948e1942021-06-24 15:09:55 +01006873 if (creds)
6874 revert_creds(creds);
Pavel Begunkovacfb3812021-05-16 22:58:03 +01006875 }
Jens Axboe90554202020-09-03 12:12:41 -06006876
Xiaoguang Wang08369242020-11-03 14:15:59 +08006877 return ret;
6878}
6879
6880static void io_sqd_update_thread_idle(struct io_sq_data *sqd)
6881{
6882 struct io_ring_ctx *ctx;
6883 unsigned sq_thread_idle = 0;
6884
Pavel Begunkovc9dca272021-03-10 13:13:55 +00006885 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
6886 sq_thread_idle = max(sq_thread_idle, ctx->sq_thread_idle);
Xiaoguang Wang08369242020-11-03 14:15:59 +08006887 sqd->sq_thread_idle = sq_thread_idle;
Jens Axboec8d1ba52020-09-14 11:07:26 -06006888}
6889
Pavel Begunkove4b6d902021-05-16 22:58:00 +01006890static bool io_sqd_handle_event(struct io_sq_data *sqd)
6891{
6892 bool did_sig = false;
6893 struct ksignal ksig;
6894
6895 if (test_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state) ||
6896 signal_pending(current)) {
6897 mutex_unlock(&sqd->lock);
6898 if (signal_pending(current))
6899 did_sig = get_signal(&ksig);
6900 cond_resched();
6901 mutex_lock(&sqd->lock);
6902 }
Pavel Begunkove4b6d902021-05-16 22:58:00 +01006903 return did_sig || test_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state);
6904}
6905
Jens Axboe6c271ce2019-01-10 11:22:30 -07006906static int io_sq_thread(void *data)
6907{
Jens Axboe69fb2132020-09-14 11:16:23 -06006908 struct io_sq_data *sqd = data;
6909 struct io_ring_ctx *ctx;
Xiaoguang Wanga0d92052020-11-12 14:55:59 +08006910 unsigned long timeout = 0;
Jens Axboe37d1e2e2021-02-17 21:03:43 -07006911 char buf[TASK_COMM_LEN];
Xiaoguang Wang08369242020-11-03 14:15:59 +08006912 DEFINE_WAIT(wait);
Jens Axboe6c271ce2019-01-10 11:22:30 -07006913
Pavel Begunkov696ee882021-04-01 09:55:04 +01006914 snprintf(buf, sizeof(buf), "iou-sqp-%d", sqd->task_pid);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07006915 set_task_comm(current, buf);
Jens Axboe28cea78a2020-09-14 10:51:17 -06006916
Jens Axboe37d1e2e2021-02-17 21:03:43 -07006917 if (sqd->sq_cpu != -1)
6918 set_cpus_allowed_ptr(current, cpumask_of(sqd->sq_cpu));
6919 else
6920 set_cpus_allowed_ptr(current, cpu_online_mask);
6921 current->flags |= PF_NO_SETAFFINITY;
6922
Pavel Begunkov09a6f4e2021-03-14 20:57:10 +00006923 mutex_lock(&sqd->lock);
Pavel Begunkove4b6d902021-05-16 22:58:00 +01006924 while (1) {
Pavel Begunkov1a924a82021-06-24 15:09:56 +01006925 bool cap_entries, sqt_spin = false;
Jens Axboec1edbf52019-11-10 16:56:04 -07006926
Pavel Begunkove4b6d902021-05-16 22:58:00 +01006927 if (io_sqd_events_pending(sqd) || signal_pending(current)) {
6928 if (io_sqd_handle_event(sqd))
Pavel Begunkovc7d95612021-04-13 11:43:00 +01006929 break;
Xiaoguang Wang08369242020-11-03 14:15:59 +08006930 timeout = jiffies + sqd->sq_thread_idle;
6931 }
Pavel Begunkove4b6d902021-05-16 22:58:00 +01006932
Jens Axboee95eee22020-09-08 09:11:32 -06006933 cap_entries = !list_is_singular(&sqd->ctx_list);
Jens Axboe69fb2132020-09-14 11:16:23 -06006934 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) {
Pavel Begunkov948e1942021-06-24 15:09:55 +01006935 int ret = __io_sq_thread(ctx, cap_entries);
Stefan Metzmacher7c30f36a2021-03-07 11:54:28 +01006936
Xiaoguang Wang08369242020-11-03 14:15:59 +08006937 if (!sqt_spin && (ret > 0 || !list_empty(&ctx->iopoll_list)))
6938 sqt_spin = true;
Jens Axboe6c271ce2019-01-10 11:22:30 -07006939 }
Pavel Begunkovdd432ea52021-06-26 21:40:45 +01006940 if (io_run_task_work())
6941 sqt_spin = true;
Jens Axboe6c271ce2019-01-10 11:22:30 -07006942
Xiaoguang Wang08369242020-11-03 14:15:59 +08006943 if (sqt_spin || !time_after(jiffies, timeout)) {
Jens Axboec8d1ba52020-09-14 11:07:26 -06006944 cond_resched();
Xiaoguang Wang08369242020-11-03 14:15:59 +08006945 if (sqt_spin)
6946 timeout = jiffies + sqd->sq_thread_idle;
6947 continue;
6948 }
6949
Xiaoguang Wang08369242020-11-03 14:15:59 +08006950 prepare_to_wait(&sqd->wait, &wait, TASK_INTERRUPTIBLE);
Pavel Begunkovdd432ea52021-06-26 21:40:45 +01006951 if (!io_sqd_events_pending(sqd) && !current->task_works) {
Pavel Begunkov1a924a82021-06-24 15:09:56 +01006952 bool needs_sched = true;
6953
Hao Xu724cb4f2021-04-21 23:19:11 +08006954 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) {
Pavel Begunkovaaa9f0f2021-05-16 22:58:01 +01006955 io_ring_set_wakeup_flag(ctx);
6956
Hao Xu724cb4f2021-04-21 23:19:11 +08006957 if ((ctx->flags & IORING_SETUP_IOPOLL) &&
6958 !list_empty_careful(&ctx->iopoll_list)) {
6959 needs_sched = false;
6960 break;
6961 }
6962 if (io_sqring_entries(ctx)) {
6963 needs_sched = false;
6964 break;
6965 }
6966 }
6967
6968 if (needs_sched) {
6969 mutex_unlock(&sqd->lock);
6970 schedule();
6971 mutex_lock(&sqd->lock);
6972 }
Jens Axboe69fb2132020-09-14 11:16:23 -06006973 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
6974 io_ring_clear_wakeup_flag(ctx);
Jens Axboe6c271ce2019-01-10 11:22:30 -07006975 }
Xiaoguang Wang08369242020-11-03 14:15:59 +08006976
6977 finish_wait(&sqd->wait, &wait);
6978 timeout = jiffies + sqd->sq_thread_idle;
Jens Axboe6c271ce2019-01-10 11:22:30 -07006979 }
6980
Pavel Begunkov78cc6872021-06-14 02:36:23 +01006981 io_uring_cancel_generic(true, sqd);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07006982 sqd->thread = NULL;
Jens Axboe05962f92021-03-06 13:58:48 -07006983 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
Jens Axboe5f3f26f2021-02-25 10:17:46 -07006984 io_ring_set_wakeup_flag(ctx);
Pavel Begunkov521d6a72021-03-11 23:29:38 +00006985 io_run_task_work();
Pavel Begunkov734551d2021-04-18 14:52:09 +01006986 mutex_unlock(&sqd->lock);
6987
Jens Axboe37d1e2e2021-02-17 21:03:43 -07006988 complete(&sqd->exited);
6989 do_exit(0);
Jens Axboe6c271ce2019-01-10 11:22:30 -07006990}
6991
Jens Axboebda52162019-09-24 13:47:15 -06006992struct io_wait_queue {
6993 struct wait_queue_entry wq;
6994 struct io_ring_ctx *ctx;
Jens Axboe5fd46172021-08-06 14:04:31 -06006995 unsigned cq_tail;
Jens Axboebda52162019-09-24 13:47:15 -06006996 unsigned nr_timeouts;
6997};
6998
Pavel Begunkov6c503152021-01-04 20:36:36 +00006999static inline bool io_should_wake(struct io_wait_queue *iowq)
Jens Axboebda52162019-09-24 13:47:15 -06007000{
7001 struct io_ring_ctx *ctx = iowq->ctx;
Jens Axboe5fd46172021-08-06 14:04:31 -06007002 int dist = ctx->cached_cq_tail - (int) iowq->cq_tail;
Jens Axboebda52162019-09-24 13:47:15 -06007003
7004 /*
Brian Gianforcarod195a662019-12-13 03:09:50 -08007005 * Wake up if we have enough events, or if a timeout occurred since we
Jens Axboebda52162019-09-24 13:47:15 -06007006 * started waiting. For timeouts, we always want to return to userspace,
7007 * regardless of event count.
7008 */
Jens Axboe5fd46172021-08-06 14:04:31 -06007009 return dist >= 0 || atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts;
Jens Axboebda52162019-09-24 13:47:15 -06007010}
7011
7012static int io_wake_function(struct wait_queue_entry *curr, unsigned int mode,
7013 int wake_flags, void *key)
7014{
7015 struct io_wait_queue *iowq = container_of(curr, struct io_wait_queue,
7016 wq);
7017
Pavel Begunkov6c503152021-01-04 20:36:36 +00007018 /*
7019 * Cannot safely flush overflowed CQEs from here, ensure we wake up
7020 * the task, and the next invocation will do it.
7021 */
Pavel Begunkov5ed7a372021-06-14 23:37:27 +01007022 if (io_should_wake(iowq) || test_bit(0, &iowq->ctx->check_cq_overflow))
Pavel Begunkov6c503152021-01-04 20:36:36 +00007023 return autoremove_wake_function(curr, mode, wake_flags, key);
7024 return -1;
Jens Axboebda52162019-09-24 13:47:15 -06007025}
7026
Jens Axboeaf9c1a42020-09-24 13:32:18 -06007027static int io_run_task_work_sig(void)
7028{
7029 if (io_run_task_work())
7030 return 1;
7031 if (!signal_pending(current))
7032 return 0;
Jens Axboe0b8cfa92021-03-21 14:16:08 -06007033 if (test_thread_flag(TIF_NOTIFY_SIGNAL))
Jens Axboe792ee0f62020-10-22 20:17:18 -06007034 return -ERESTARTSYS;
Jens Axboeaf9c1a42020-09-24 13:32:18 -06007035 return -EINTR;
7036}
7037
Pavel Begunkoveeb60b92021-02-04 13:51:58 +00007038/* when returns >0, the caller should retry */
7039static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
7040 struct io_wait_queue *iowq,
7041 signed long *timeout)
7042{
7043 int ret;
7044
7045 /* make sure we run task_work before checking for signals */
7046 ret = io_run_task_work_sig();
7047 if (ret || io_should_wake(iowq))
7048 return ret;
7049 /* let the caller flush overflows, retry */
Pavel Begunkov5ed7a372021-06-14 23:37:27 +01007050 if (test_bit(0, &ctx->check_cq_overflow))
Pavel Begunkoveeb60b92021-02-04 13:51:58 +00007051 return 1;
7052
7053 *timeout = schedule_timeout(*timeout);
7054 return !*timeout ? -ETIME : 1;
7055}
7056
Jens Axboe2b188cc2019-01-07 10:46:33 -07007057/*
7058 * Wait until events become available, if we don't already have some. The
7059 * application must reap them itself, as they reside on the shared cq ring.
7060 */
7061static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
Hao Xuc73ebb62020-11-03 10:54:37 +08007062 const sigset_t __user *sig, size_t sigsz,
7063 struct __kernel_timespec __user *uts)
Jens Axboe2b188cc2019-01-07 10:46:33 -07007064{
Pavel Begunkov902910992021-08-09 09:07:32 -06007065 struct io_wait_queue iowq;
Hristo Venev75b28af2019-08-26 17:23:46 +00007066 struct io_rings *rings = ctx->rings;
Pavel Begunkovc1d5a222021-02-04 13:51:57 +00007067 signed long timeout = MAX_SCHEDULE_TIMEOUT;
7068 int ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07007069
Jens Axboeb41e9852020-02-17 09:52:41 -07007070 do {
Pavel Begunkov90f67362021-08-09 20:18:12 +01007071 io_cqring_overflow_flush(ctx);
Pavel Begunkov6c503152021-01-04 20:36:36 +00007072 if (io_cqring_events(ctx) >= min_events)
Jens Axboeb41e9852020-02-17 09:52:41 -07007073 return 0;
Jens Axboe4c6e2772020-07-01 11:29:10 -06007074 if (!io_run_task_work())
Jens Axboeb41e9852020-02-17 09:52:41 -07007075 break;
Jens Axboeb41e9852020-02-17 09:52:41 -07007076 } while (1);
Jens Axboe2b188cc2019-01-07 10:46:33 -07007077
7078 if (sig) {
Arnd Bergmann9e75ad52019-03-25 15:34:53 +01007079#ifdef CONFIG_COMPAT
7080 if (in_compat_syscall())
7081 ret = set_compat_user_sigmask((const compat_sigset_t __user *)sig,
Oleg Nesterovb7724342019-07-16 16:29:53 -07007082 sigsz);
Arnd Bergmann9e75ad52019-03-25 15:34:53 +01007083 else
7084#endif
Oleg Nesterovb7724342019-07-16 16:29:53 -07007085 ret = set_user_sigmask(sig, sigsz);
Arnd Bergmann9e75ad52019-03-25 15:34:53 +01007086
Jens Axboe2b188cc2019-01-07 10:46:33 -07007087 if (ret)
7088 return ret;
7089 }
7090
Hao Xuc73ebb62020-11-03 10:54:37 +08007091 if (uts) {
Pavel Begunkovc1d5a222021-02-04 13:51:57 +00007092 struct timespec64 ts;
7093
Hao Xuc73ebb62020-11-03 10:54:37 +08007094 if (get_timespec64(&ts, uts))
7095 return -EFAULT;
7096 timeout = timespec64_to_jiffies(&ts);
7097 }
7098
Pavel Begunkov902910992021-08-09 09:07:32 -06007099 init_waitqueue_func_entry(&iowq.wq, io_wake_function);
7100 iowq.wq.private = current;
7101 INIT_LIST_HEAD(&iowq.wq.entry);
7102 iowq.ctx = ctx;
Jens Axboebda52162019-09-24 13:47:15 -06007103 iowq.nr_timeouts = atomic_read(&ctx->cq_timeouts);
Jens Axboe5fd46172021-08-06 14:04:31 -06007104 iowq.cq_tail = READ_ONCE(ctx->rings->cq.head) + min_events;
Pavel Begunkov902910992021-08-09 09:07:32 -06007105
Dmitrii Dolgovc826bd72019-10-15 19:02:01 +02007106 trace_io_uring_cqring_wait(ctx, min_events);
Jens Axboebda52162019-09-24 13:47:15 -06007107 do {
Jens Axboeca0a2652021-03-04 17:15:48 -07007108 /* if we can't even flush overflow, don't wait for more */
Pavel Begunkov90f67362021-08-09 20:18:12 +01007109 if (!io_cqring_overflow_flush(ctx)) {
Jens Axboeca0a2652021-03-04 17:15:48 -07007110 ret = -EBUSY;
7111 break;
7112 }
Pavel Begunkov311997b2021-06-14 23:37:28 +01007113 prepare_to_wait_exclusive(&ctx->cq_wait, &iowq.wq,
Jens Axboebda52162019-09-24 13:47:15 -06007114 TASK_INTERRUPTIBLE);
Pavel Begunkoveeb60b92021-02-04 13:51:58 +00007115 ret = io_cqring_wait_schedule(ctx, &iowq, &timeout);
Pavel Begunkov311997b2021-06-14 23:37:28 +01007116 finish_wait(&ctx->cq_wait, &iowq.wq);
Jens Axboeca0a2652021-03-04 17:15:48 -07007117 cond_resched();
Pavel Begunkoveeb60b92021-02-04 13:51:58 +00007118 } while (ret > 0);
Jens Axboebda52162019-09-24 13:47:15 -06007119
Jens Axboeb7db41c2020-07-04 08:55:50 -06007120 restore_saved_sigmask_unless(ret == -EINTR);
Jens Axboe2b188cc2019-01-07 10:46:33 -07007121
Hristo Venev75b28af2019-08-26 17:23:46 +00007122 return READ_ONCE(rings->cq.head) == READ_ONCE(rings->cq.tail) ? ret : 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07007123}
7124
Pavel Begunkov9123c8f2021-06-14 02:36:20 +01007125static void io_free_page_table(void **table, size_t size)
Pavel Begunkov846a4ef2021-04-01 15:44:03 +01007126{
Pavel Begunkov9123c8f2021-06-14 02:36:20 +01007127 unsigned i, nr_tables = DIV_ROUND_UP(size, PAGE_SIZE);
Pavel Begunkov846a4ef2021-04-01 15:44:03 +01007128
7129 for (i = 0; i < nr_tables; i++)
Pavel Begunkov9123c8f2021-06-14 02:36:20 +01007130 kfree(table[i]);
7131 kfree(table);
7132}
7133
7134static void **io_alloc_page_table(size_t size)
7135{
7136 unsigned i, nr_tables = DIV_ROUND_UP(size, PAGE_SIZE);
7137 size_t init_size = size;
7138 void **table;
7139
7140 table = kcalloc(nr_tables, sizeof(*table), GFP_KERNEL);
7141 if (!table)
7142 return NULL;
7143
7144 for (i = 0; i < nr_tables; i++) {
Pavel Begunkov27f6b312021-06-15 13:20:13 +01007145 unsigned int this_size = min_t(size_t, size, PAGE_SIZE);
Pavel Begunkov9123c8f2021-06-14 02:36:20 +01007146
7147 table[i] = kzalloc(this_size, GFP_KERNEL);
7148 if (!table[i]) {
7149 io_free_page_table(table, init_size);
7150 return NULL;
7151 }
7152 size -= this_size;
7153 }
7154 return table;
Pavel Begunkov846a4ef2021-04-01 15:44:03 +01007155}
7156
Pavel Begunkov28a9fe22021-04-01 15:43:47 +01007157static void io_rsrc_node_destroy(struct io_rsrc_node *ref_node)
7158{
7159 percpu_ref_exit(&ref_node->refs);
7160 kfree(ref_node);
7161}
7162
Pavel Begunkovb9bd2be2021-08-09 09:09:47 -06007163static void io_rsrc_node_ref_zero(struct percpu_ref *ref)
7164{
7165 struct io_rsrc_node *node = container_of(ref, struct io_rsrc_node, refs);
7166 struct io_ring_ctx *ctx = node->rsrc_data->ctx;
7167 unsigned long flags;
7168 bool first_add = false;
7169
7170 spin_lock_irqsave(&ctx->rsrc_ref_lock, flags);
7171 node->done = true;
7172
7173 while (!list_empty(&ctx->rsrc_ref_list)) {
7174 node = list_first_entry(&ctx->rsrc_ref_list,
7175 struct io_rsrc_node, node);
7176 /* recycle ref nodes in order */
7177 if (!node->done)
7178 break;
7179 list_del(&node->node);
7180 first_add |= llist_add(&node->llist, &ctx->rsrc_put_llist);
7181 }
7182 spin_unlock_irqrestore(&ctx->rsrc_ref_lock, flags);
7183
7184 if (first_add)
7185 mod_delayed_work(system_wq, &ctx->rsrc_put_work, HZ);
7186}
7187
7188static struct io_rsrc_node *io_rsrc_node_alloc(struct io_ring_ctx *ctx)
7189{
7190 struct io_rsrc_node *ref_node;
7191
7192 ref_node = kzalloc(sizeof(*ref_node), GFP_KERNEL);
7193 if (!ref_node)
7194 return NULL;
7195
7196 if (percpu_ref_init(&ref_node->refs, io_rsrc_node_ref_zero,
7197 0, GFP_KERNEL)) {
7198 kfree(ref_node);
7199 return NULL;
7200 }
7201 INIT_LIST_HEAD(&ref_node->node);
7202 INIT_LIST_HEAD(&ref_node->rsrc_list);
7203 ref_node->done = false;
7204 return ref_node;
7205}
7206
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01007207static void io_rsrc_node_switch(struct io_ring_ctx *ctx,
7208 struct io_rsrc_data *data_to_kill)
Pavel Begunkov1642b442020-12-30 21:34:14 +00007209{
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01007210 WARN_ON_ONCE(!ctx->rsrc_backup_node);
7211 WARN_ON_ONCE(data_to_kill && !ctx->rsrc_node);
Pavel Begunkov82fbcfa2021-04-01 15:43:43 +01007212
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01007213 if (data_to_kill) {
7214 struct io_rsrc_node *rsrc_node = ctx->rsrc_node;
Pavel Begunkov82fbcfa2021-04-01 15:43:43 +01007215
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01007216 rsrc_node->rsrc_data = data_to_kill;
Jens Axboe4956b9e2021-08-09 07:49:41 -06007217 spin_lock_irq(&ctx->rsrc_ref_lock);
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01007218 list_add_tail(&rsrc_node->node, &ctx->rsrc_ref_list);
Jens Axboe4956b9e2021-08-09 07:49:41 -06007219 spin_unlock_irq(&ctx->rsrc_ref_lock);
Pavel Begunkov82fbcfa2021-04-01 15:43:43 +01007220
Pavel Begunkov3e942492021-04-11 01:46:34 +01007221 atomic_inc(&data_to_kill->refs);
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01007222 percpu_ref_kill(&rsrc_node->refs);
7223 ctx->rsrc_node = NULL;
7224 }
7225
7226 if (!ctx->rsrc_node) {
7227 ctx->rsrc_node = ctx->rsrc_backup_node;
7228 ctx->rsrc_backup_node = NULL;
7229 }
Pavel Begunkov1642b442020-12-30 21:34:14 +00007230}
7231
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01007232static int io_rsrc_node_switch_start(struct io_ring_ctx *ctx)
Pavel Begunkov8dd03af2021-03-19 17:22:36 +00007233{
7234 if (ctx->rsrc_backup_node)
7235 return 0;
Pavel Begunkovb895c9a2021-04-01 15:43:40 +01007236 ctx->rsrc_backup_node = io_rsrc_node_alloc(ctx);
Pavel Begunkov8dd03af2021-03-19 17:22:36 +00007237 return ctx->rsrc_backup_node ? 0 : -ENOMEM;
7238}
7239
Pavel Begunkov40ae0ff2021-04-01 15:43:44 +01007240static int io_rsrc_ref_quiesce(struct io_rsrc_data *data, struct io_ring_ctx *ctx)
Hao Xu8bad28d2021-02-19 17:19:36 +08007241{
7242 int ret;
Xiaoguang Wang05589552020-03-31 14:05:18 +08007243
Pavel Begunkov215c3902021-04-01 15:43:48 +01007244 /* As we may drop ->uring_lock, other task may have started quiesce */
Hao Xu8bad28d2021-02-19 17:19:36 +08007245 if (data->quiesce)
7246 return -ENXIO;
7247
7248 data->quiesce = true;
Pavel Begunkov1ffc5422020-12-30 21:34:15 +00007249 do {
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01007250 ret = io_rsrc_node_switch_start(ctx);
Pavel Begunkov8dd03af2021-03-19 17:22:36 +00007251 if (ret)
Pavel Begunkovf2303b12021-02-20 18:03:49 +00007252 break;
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01007253 io_rsrc_node_switch(ctx, data);
7254
Pavel Begunkov3e942492021-04-11 01:46:34 +01007255 /* kill initial ref, already quiesced if zero */
7256 if (atomic_dec_and_test(&data->refs))
7257 break;
Jens Axboec018db42021-08-09 08:15:50 -06007258 mutex_unlock(&ctx->uring_lock);
Hao Xu8bad28d2021-02-19 17:19:36 +08007259 flush_delayed_work(&ctx->rsrc_put_work);
Pavel Begunkov1ffc5422020-12-30 21:34:15 +00007260 ret = wait_for_completion_interruptible(&data->done);
Jens Axboec018db42021-08-09 08:15:50 -06007261 if (!ret) {
7262 mutex_lock(&ctx->uring_lock);
Pavel Begunkov1ffc5422020-12-30 21:34:15 +00007263 break;
Jens Axboec018db42021-08-09 08:15:50 -06007264 }
Jens Axboe05f3fb32019-12-09 11:22:50 -07007265
Pavel Begunkov3e942492021-04-11 01:46:34 +01007266 atomic_inc(&data->refs);
7267 /* wait for all works potentially completing data->done */
7268 flush_delayed_work(&ctx->rsrc_put_work);
Jens Axboecb5e1b82021-02-25 07:37:35 -07007269 reinit_completion(&data->done);
Pavel Begunkov8dd03af2021-03-19 17:22:36 +00007270
Hao Xu8bad28d2021-02-19 17:19:36 +08007271 ret = io_run_task_work_sig();
7272 mutex_lock(&ctx->uring_lock);
Pavel Begunkovf2303b12021-02-20 18:03:49 +00007273 } while (ret >= 0);
Hao Xu8bad28d2021-02-19 17:19:36 +08007274 data->quiesce = false;
7275
Hao Xu8bad28d2021-02-19 17:19:36 +08007276 return ret;
Bijan Mottahedehd7954b22021-01-15 17:37:50 +00007277}
7278
Pavel Begunkov2d091d62021-06-14 02:36:21 +01007279static u64 *io_get_tag_slot(struct io_rsrc_data *data, unsigned int idx)
7280{
7281 unsigned int off = idx & IO_RSRC_TAG_TABLE_MASK;
7282 unsigned int table_idx = idx >> IO_RSRC_TAG_TABLE_SHIFT;
7283
7284 return &data->tags[table_idx][off];
7285}
7286
Pavel Begunkov44b31f22021-04-25 14:32:16 +01007287static void io_rsrc_data_free(struct io_rsrc_data *data)
7288{
Pavel Begunkov2d091d62021-06-14 02:36:21 +01007289 size_t size = data->nr * sizeof(data->tags[0][0]);
7290
7291 if (data->tags)
7292 io_free_page_table((void **)data->tags, size);
Pavel Begunkov44b31f22021-04-25 14:32:16 +01007293 kfree(data);
7294}
7295
Pavel Begunkovd878c812021-06-14 02:36:18 +01007296static int io_rsrc_data_alloc(struct io_ring_ctx *ctx, rsrc_put_fn *do_put,
7297 u64 __user *utags, unsigned nr,
7298 struct io_rsrc_data **pdata)
Bijan Mottahedeh1ad555c2021-01-15 17:37:51 +00007299{
Pavel Begunkovb895c9a2021-04-01 15:43:40 +01007300 struct io_rsrc_data *data;
Pavel Begunkov2d091d62021-06-14 02:36:21 +01007301 int ret = -ENOMEM;
Pavel Begunkovd878c812021-06-14 02:36:18 +01007302 unsigned i;
Bijan Mottahedeh1ad555c2021-01-15 17:37:51 +00007303
7304 data = kzalloc(sizeof(*data), GFP_KERNEL);
7305 if (!data)
Pavel Begunkovd878c812021-06-14 02:36:18 +01007306 return -ENOMEM;
Pavel Begunkov2d091d62021-06-14 02:36:21 +01007307 data->tags = (u64 **)io_alloc_page_table(nr * sizeof(data->tags[0][0]));
Pavel Begunkovb60c8dc2021-04-25 14:32:18 +01007308 if (!data->tags) {
7309 kfree(data);
Pavel Begunkovd878c812021-06-14 02:36:18 +01007310 return -ENOMEM;
7311 }
Pavel Begunkov2d091d62021-06-14 02:36:21 +01007312
7313 data->nr = nr;
7314 data->ctx = ctx;
7315 data->do_put = do_put;
Pavel Begunkovd878c812021-06-14 02:36:18 +01007316 if (utags) {
Pavel Begunkov2d091d62021-06-14 02:36:21 +01007317 ret = -EFAULT;
Pavel Begunkovd878c812021-06-14 02:36:18 +01007318 for (i = 0; i < nr; i++) {
Colin Ian Kingfdd1dc32021-06-15 14:00:11 +01007319 u64 *tag_slot = io_get_tag_slot(data, i);
7320
7321 if (copy_from_user(tag_slot, &utags[i],
7322 sizeof(*tag_slot)))
Pavel Begunkov2d091d62021-06-14 02:36:21 +01007323 goto fail;
Pavel Begunkovd878c812021-06-14 02:36:18 +01007324 }
Pavel Begunkovb60c8dc2021-04-25 14:32:18 +01007325 }
7326
Pavel Begunkov3e942492021-04-11 01:46:34 +01007327 atomic_set(&data->refs, 1);
Bijan Mottahedeh1ad555c2021-01-15 17:37:51 +00007328 init_completion(&data->done);
Pavel Begunkovd878c812021-06-14 02:36:18 +01007329 *pdata = data;
7330 return 0;
Pavel Begunkov2d091d62021-06-14 02:36:21 +01007331fail:
7332 io_rsrc_data_free(data);
7333 return ret;
Bijan Mottahedeh1ad555c2021-01-15 17:37:51 +00007334}
7335
Pavel Begunkov9123c8f2021-06-14 02:36:20 +01007336static bool io_alloc_file_tables(struct io_file_table *table, unsigned nr_files)
7337{
Pavel Begunkov042b0d82021-08-09 13:04:01 +01007338 table->files = kvcalloc(nr_files, sizeof(table->files[0]), GFP_KERNEL);
Pavel Begunkov9123c8f2021-06-14 02:36:20 +01007339 return !!table->files;
7340}
7341
Pavel Begunkov042b0d82021-08-09 13:04:01 +01007342static void io_free_file_tables(struct io_file_table *table)
Pavel Begunkov9123c8f2021-06-14 02:36:20 +01007343{
Pavel Begunkov042b0d82021-08-09 13:04:01 +01007344 kvfree(table->files);
Pavel Begunkov9123c8f2021-06-14 02:36:20 +01007345 table->files = NULL;
7346}
7347
Jens Axboe2b188cc2019-01-07 10:46:33 -07007348static void __io_sqe_files_unregister(struct io_ring_ctx *ctx)
7349{
7350#if defined(CONFIG_UNIX)
7351 if (ctx->ring_sock) {
7352 struct sock *sock = ctx->ring_sock->sk;
7353 struct sk_buff *skb;
7354
7355 while ((skb = skb_dequeue(&sock->sk_receive_queue)) != NULL)
7356 kfree_skb(skb);
7357 }
7358#else
7359 int i;
7360
7361 for (i = 0; i < ctx->nr_user_files; i++) {
7362 struct file *file;
7363
7364 file = io_file_from_index(ctx, i);
7365 if (file)
7366 fput(file);
7367 }
7368#endif
Pavel Begunkov042b0d82021-08-09 13:04:01 +01007369 io_free_file_tables(&ctx->file_table);
Pavel Begunkov44b31f22021-04-25 14:32:16 +01007370 io_rsrc_data_free(ctx->file_data);
Pavel Begunkovfff4db72021-04-25 14:32:15 +01007371 ctx->file_data = NULL;
7372 ctx->nr_user_files = 0;
Bijan Mottahedeh1ad555c2021-01-15 17:37:51 +00007373}
7374
Bijan Mottahedehd7954b22021-01-15 17:37:50 +00007375static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
7376{
Bijan Mottahedehd7954b22021-01-15 17:37:50 +00007377 int ret;
7378
Pavel Begunkov08480402021-04-13 02:58:38 +01007379 if (!ctx->file_data)
Bijan Mottahedehd7954b22021-01-15 17:37:50 +00007380 return -ENXIO;
Pavel Begunkov08480402021-04-13 02:58:38 +01007381 ret = io_rsrc_ref_quiesce(ctx->file_data, ctx);
7382 if (!ret)
7383 __io_sqe_files_unregister(ctx);
7384 return ret;
Jens Axboe6b063142019-01-10 22:13:58 -07007385}
7386
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007387static void io_sq_thread_unpark(struct io_sq_data *sqd)
Pavel Begunkov09a6f4e2021-03-14 20:57:10 +00007388 __releases(&sqd->lock)
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007389{
Pavel Begunkov521d6a72021-03-11 23:29:38 +00007390 WARN_ON_ONCE(sqd->thread == current);
7391
Pavel Begunkov9e138a42021-03-14 20:57:12 +00007392 /*
7393 * Do the dance but not conditional clear_bit() because it'd race with
7394 * other threads incrementing park_pending and setting the bit.
7395 */
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007396 clear_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state);
Pavel Begunkov9e138a42021-03-14 20:57:12 +00007397 if (atomic_dec_return(&sqd->park_pending))
7398 set_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state);
Pavel Begunkov09a6f4e2021-03-14 20:57:10 +00007399 mutex_unlock(&sqd->lock);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007400}
7401
Jens Axboe86e0d672021-03-05 08:44:39 -07007402static void io_sq_thread_park(struct io_sq_data *sqd)
Pavel Begunkov09a6f4e2021-03-14 20:57:10 +00007403 __acquires(&sqd->lock)
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007404{
Pavel Begunkov521d6a72021-03-11 23:29:38 +00007405 WARN_ON_ONCE(sqd->thread == current);
7406
Pavel Begunkov9e138a42021-03-14 20:57:12 +00007407 atomic_inc(&sqd->park_pending);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007408 set_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state);
Pavel Begunkov09a6f4e2021-03-14 20:57:10 +00007409 mutex_lock(&sqd->lock);
Jens Axboe05962f92021-03-06 13:58:48 -07007410 if (sqd->thread)
Jens Axboe86e0d672021-03-05 08:44:39 -07007411 wake_up_process(sqd->thread);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007412}
7413
7414static void io_sq_thread_stop(struct io_sq_data *sqd)
7415{
Pavel Begunkov521d6a72021-03-11 23:29:38 +00007416 WARN_ON_ONCE(sqd->thread == current);
Pavel Begunkov88885f62021-04-11 01:46:38 +01007417 WARN_ON_ONCE(test_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state));
Pavel Begunkov521d6a72021-03-11 23:29:38 +00007418
Jens Axboe05962f92021-03-06 13:58:48 -07007419 set_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state);
Pavel Begunkov88885f62021-04-11 01:46:38 +01007420 mutex_lock(&sqd->lock);
Jens Axboee8f98f242021-03-09 16:32:13 -07007421 if (sqd->thread)
7422 wake_up_process(sqd->thread);
Pavel Begunkov09a6f4e2021-03-14 20:57:10 +00007423 mutex_unlock(&sqd->lock);
Jens Axboe05962f92021-03-06 13:58:48 -07007424 wait_for_completion(&sqd->exited);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007425}
7426
Jens Axboe534ca6d2020-09-02 13:52:19 -06007427static void io_put_sq_data(struct io_sq_data *sqd)
Jens Axboe6c271ce2019-01-10 11:22:30 -07007428{
Jens Axboe534ca6d2020-09-02 13:52:19 -06007429 if (refcount_dec_and_test(&sqd->refs)) {
Pavel Begunkov9e138a42021-03-14 20:57:12 +00007430 WARN_ON_ONCE(atomic_read(&sqd->park_pending));
7431
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007432 io_sq_thread_stop(sqd);
7433 kfree(sqd);
7434 }
7435}
7436
7437static void io_sq_thread_finish(struct io_ring_ctx *ctx)
7438{
7439 struct io_sq_data *sqd = ctx->sq_data;
7440
7441 if (sqd) {
Jens Axboe05962f92021-03-06 13:58:48 -07007442 io_sq_thread_park(sqd);
Pavel Begunkov521d6a72021-03-11 23:29:38 +00007443 list_del_init(&ctx->sqd_list);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007444 io_sqd_update_thread_idle(sqd);
Jens Axboe05962f92021-03-06 13:58:48 -07007445 io_sq_thread_unpark(sqd);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007446
7447 io_put_sq_data(sqd);
7448 ctx->sq_data = NULL;
Jens Axboe534ca6d2020-09-02 13:52:19 -06007449 }
7450}
7451
Jens Axboeaa061652020-09-02 14:50:27 -06007452static struct io_sq_data *io_attach_sq_data(struct io_uring_params *p)
7453{
7454 struct io_ring_ctx *ctx_attach;
7455 struct io_sq_data *sqd;
7456 struct fd f;
7457
7458 f = fdget(p->wq_fd);
7459 if (!f.file)
7460 return ERR_PTR(-ENXIO);
7461 if (f.file->f_op != &io_uring_fops) {
7462 fdput(f);
7463 return ERR_PTR(-EINVAL);
7464 }
7465
7466 ctx_attach = f.file->private_data;
7467 sqd = ctx_attach->sq_data;
7468 if (!sqd) {
7469 fdput(f);
7470 return ERR_PTR(-EINVAL);
7471 }
Jens Axboe5c2469e2021-03-11 10:17:56 -07007472 if (sqd->task_tgid != current->tgid) {
7473 fdput(f);
7474 return ERR_PTR(-EPERM);
7475 }
Jens Axboeaa061652020-09-02 14:50:27 -06007476
7477 refcount_inc(&sqd->refs);
7478 fdput(f);
7479 return sqd;
7480}
7481
Pavel Begunkov26984fb2021-03-11 23:29:37 +00007482static struct io_sq_data *io_get_sq_data(struct io_uring_params *p,
7483 bool *attached)
Jens Axboe534ca6d2020-09-02 13:52:19 -06007484{
7485 struct io_sq_data *sqd;
7486
Pavel Begunkov26984fb2021-03-11 23:29:37 +00007487 *attached = false;
Jens Axboe5c2469e2021-03-11 10:17:56 -07007488 if (p->flags & IORING_SETUP_ATTACH_WQ) {
7489 sqd = io_attach_sq_data(p);
Pavel Begunkov26984fb2021-03-11 23:29:37 +00007490 if (!IS_ERR(sqd)) {
7491 *attached = true;
Jens Axboe5c2469e2021-03-11 10:17:56 -07007492 return sqd;
Pavel Begunkov26984fb2021-03-11 23:29:37 +00007493 }
Jens Axboe5c2469e2021-03-11 10:17:56 -07007494 /* fall through for EPERM case, setup new sqd/task */
7495 if (PTR_ERR(sqd) != -EPERM)
7496 return sqd;
7497 }
Jens Axboeaa061652020-09-02 14:50:27 -06007498
Jens Axboe534ca6d2020-09-02 13:52:19 -06007499 sqd = kzalloc(sizeof(*sqd), GFP_KERNEL);
7500 if (!sqd)
7501 return ERR_PTR(-ENOMEM);
7502
Pavel Begunkov9e138a42021-03-14 20:57:12 +00007503 atomic_set(&sqd->park_pending, 0);
Jens Axboe534ca6d2020-09-02 13:52:19 -06007504 refcount_set(&sqd->refs, 1);
Jens Axboe69fb2132020-09-14 11:16:23 -06007505 INIT_LIST_HEAD(&sqd->ctx_list);
Pavel Begunkov09a6f4e2021-03-14 20:57:10 +00007506 mutex_init(&sqd->lock);
Jens Axboe534ca6d2020-09-02 13:52:19 -06007507 init_waitqueue_head(&sqd->wait);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007508 init_completion(&sqd->exited);
Jens Axboe534ca6d2020-09-02 13:52:19 -06007509 return sqd;
7510}
7511
Jens Axboe6b063142019-01-10 22:13:58 -07007512#if defined(CONFIG_UNIX)
Jens Axboe6b063142019-01-10 22:13:58 -07007513/*
7514 * Ensure the UNIX gc is aware of our file set, so we are certain that
7515 * the io_uring can be safely unregistered on process exit, even if we have
7516 * loops in the file referencing.
7517 */
7518static int __io_sqe_files_scm(struct io_ring_ctx *ctx, int nr, int offset)
7519{
7520 struct sock *sk = ctx->ring_sock->sk;
7521 struct scm_fp_list *fpl;
7522 struct sk_buff *skb;
Jens Axboe08a45172019-10-03 08:11:03 -06007523 int i, nr_files;
Jens Axboe6b063142019-01-10 22:13:58 -07007524
Jens Axboe6b063142019-01-10 22:13:58 -07007525 fpl = kzalloc(sizeof(*fpl), GFP_KERNEL);
7526 if (!fpl)
7527 return -ENOMEM;
7528
7529 skb = alloc_skb(0, GFP_KERNEL);
7530 if (!skb) {
7531 kfree(fpl);
7532 return -ENOMEM;
7533 }
7534
7535 skb->sk = sk;
Jens Axboe6b063142019-01-10 22:13:58 -07007536
Jens Axboe08a45172019-10-03 08:11:03 -06007537 nr_files = 0;
Jens Axboe62e398b2021-02-21 16:19:37 -07007538 fpl->user = get_uid(current_user());
Jens Axboe6b063142019-01-10 22:13:58 -07007539 for (i = 0; i < nr; i++) {
Jens Axboe65e19f52019-10-26 07:20:21 -06007540 struct file *file = io_file_from_index(ctx, i + offset);
7541
7542 if (!file)
Jens Axboe08a45172019-10-03 08:11:03 -06007543 continue;
Jens Axboe65e19f52019-10-26 07:20:21 -06007544 fpl->fp[nr_files] = get_file(file);
Jens Axboe08a45172019-10-03 08:11:03 -06007545 unix_inflight(fpl->user, fpl->fp[nr_files]);
7546 nr_files++;
Jens Axboe6b063142019-01-10 22:13:58 -07007547 }
7548
Jens Axboe08a45172019-10-03 08:11:03 -06007549 if (nr_files) {
7550 fpl->max = SCM_MAX_FD;
7551 fpl->count = nr_files;
7552 UNIXCB(skb).fp = fpl;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007553 skb->destructor = unix_destruct_scm;
Jens Axboe08a45172019-10-03 08:11:03 -06007554 refcount_add(skb->truesize, &sk->sk_wmem_alloc);
7555 skb_queue_head(&sk->sk_receive_queue, skb);
Jens Axboe6b063142019-01-10 22:13:58 -07007556
Jens Axboe08a45172019-10-03 08:11:03 -06007557 for (i = 0; i < nr_files; i++)
7558 fput(fpl->fp[i]);
7559 } else {
7560 kfree_skb(skb);
7561 kfree(fpl);
7562 }
Jens Axboe6b063142019-01-10 22:13:58 -07007563
7564 return 0;
7565}
7566
7567/*
7568 * If UNIX sockets are enabled, fd passing can cause a reference cycle which
7569 * causes regular reference counting to break down. We rely on the UNIX
7570 * garbage collection to take care of this problem for us.
7571 */
7572static int io_sqe_files_scm(struct io_ring_ctx *ctx)
7573{
7574 unsigned left, total;
7575 int ret = 0;
7576
7577 total = 0;
7578 left = ctx->nr_user_files;
7579 while (left) {
7580 unsigned this_files = min_t(unsigned, left, SCM_MAX_FD);
Jens Axboe6b063142019-01-10 22:13:58 -07007581
7582 ret = __io_sqe_files_scm(ctx, this_files, total);
7583 if (ret)
7584 break;
7585 left -= this_files;
7586 total += this_files;
7587 }
7588
7589 if (!ret)
7590 return 0;
7591
7592 while (total < ctx->nr_user_files) {
Jens Axboe65e19f52019-10-26 07:20:21 -06007593 struct file *file = io_file_from_index(ctx, total);
7594
7595 if (file)
7596 fput(file);
Jens Axboe6b063142019-01-10 22:13:58 -07007597 total++;
7598 }
7599
7600 return ret;
7601}
7602#else
7603static int io_sqe_files_scm(struct io_ring_ctx *ctx)
7604{
7605 return 0;
7606}
7607#endif
7608
Pavel Begunkov47e90392021-04-01 15:43:56 +01007609static void io_rsrc_file_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc)
Jens Axboec3a31e62019-10-03 13:59:56 -06007610{
Bijan Mottahedeh50238532021-01-15 17:37:45 +00007611 struct file *file = prsrc->file;
Jens Axboec3a31e62019-10-03 13:59:56 -06007612#if defined(CONFIG_UNIX)
Jens Axboec3a31e62019-10-03 13:59:56 -06007613 struct sock *sock = ctx->ring_sock->sk;
7614 struct sk_buff_head list, *head = &sock->sk_receive_queue;
7615 struct sk_buff *skb;
7616 int i;
7617
7618 __skb_queue_head_init(&list);
7619
7620 /*
7621 * Find the skb that holds this file in its SCM_RIGHTS. When found,
7622 * remove this entry and rearrange the file array.
7623 */
7624 skb = skb_dequeue(head);
7625 while (skb) {
7626 struct scm_fp_list *fp;
7627
7628 fp = UNIXCB(skb).fp;
7629 for (i = 0; i < fp->count; i++) {
7630 int left;
7631
7632 if (fp->fp[i] != file)
7633 continue;
7634
7635 unix_notinflight(fp->user, fp->fp[i]);
7636 left = fp->count - 1 - i;
7637 if (left) {
7638 memmove(&fp->fp[i], &fp->fp[i + 1],
7639 left * sizeof(struct file *));
7640 }
7641 fp->count--;
7642 if (!fp->count) {
7643 kfree_skb(skb);
7644 skb = NULL;
7645 } else {
7646 __skb_queue_tail(&list, skb);
7647 }
7648 fput(file);
7649 file = NULL;
7650 break;
7651 }
7652
7653 if (!file)
7654 break;
7655
7656 __skb_queue_tail(&list, skb);
7657
7658 skb = skb_dequeue(head);
7659 }
7660
7661 if (skb_peek(&list)) {
7662 spin_lock_irq(&head->lock);
7663 while ((skb = __skb_dequeue(&list)) != NULL)
7664 __skb_queue_tail(head, skb);
7665 spin_unlock_irq(&head->lock);
7666 }
7667#else
Jens Axboe05f3fb32019-12-09 11:22:50 -07007668 fput(file);
Jens Axboec3a31e62019-10-03 13:59:56 -06007669#endif
7670}
7671
Pavel Begunkovb895c9a2021-04-01 15:43:40 +01007672static void __io_rsrc_put_work(struct io_rsrc_node *ref_node)
Jens Axboe05f3fb32019-12-09 11:22:50 -07007673{
Pavel Begunkovb895c9a2021-04-01 15:43:40 +01007674 struct io_rsrc_data *rsrc_data = ref_node->rsrc_data;
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007675 struct io_ring_ctx *ctx = rsrc_data->ctx;
7676 struct io_rsrc_put *prsrc, *tmp;
Xiaoguang Wang05589552020-03-31 14:05:18 +08007677
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007678 list_for_each_entry_safe(prsrc, tmp, &ref_node->rsrc_list, list) {
7679 list_del(&prsrc->list);
Pavel Begunkovb60c8dc2021-04-25 14:32:18 +01007680
7681 if (prsrc->tag) {
7682 bool lock_ring = ctx->flags & IORING_SETUP_IOPOLL;
Pavel Begunkovb60c8dc2021-04-25 14:32:18 +01007683
7684 io_ring_submit_lock(ctx, lock_ring);
Jens Axboe79ebeae2021-08-10 15:18:27 -06007685 spin_lock(&ctx->completion_lock);
Pavel Begunkovb60c8dc2021-04-25 14:32:18 +01007686 io_cqring_fill_event(ctx, prsrc->tag, 0, 0);
Pavel Begunkov2840f712021-04-27 16:13:51 +01007687 ctx->cq_extra++;
Pavel Begunkovb60c8dc2021-04-25 14:32:18 +01007688 io_commit_cqring(ctx);
Jens Axboe79ebeae2021-08-10 15:18:27 -06007689 spin_unlock(&ctx->completion_lock);
Pavel Begunkovb60c8dc2021-04-25 14:32:18 +01007690 io_cqring_ev_posted(ctx);
7691 io_ring_submit_unlock(ctx, lock_ring);
7692 }
7693
Pavel Begunkov40ae0ff2021-04-01 15:43:44 +01007694 rsrc_data->do_put(ctx, prsrc);
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007695 kfree(prsrc);
Jens Axboe05f3fb32019-12-09 11:22:50 -07007696 }
7697
Pavel Begunkov28a9fe22021-04-01 15:43:47 +01007698 io_rsrc_node_destroy(ref_node);
Pavel Begunkov3e942492021-04-11 01:46:34 +01007699 if (atomic_dec_and_test(&rsrc_data->refs))
7700 complete(&rsrc_data->done);
Jens Axboe05f3fb32019-12-09 11:22:50 -07007701}
7702
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007703static void io_rsrc_put_work(struct work_struct *work)
Jens Axboe4a38aed22020-05-14 17:21:15 -06007704{
7705 struct io_ring_ctx *ctx;
7706 struct llist_node *node;
7707
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007708 ctx = container_of(work, struct io_ring_ctx, rsrc_put_work.work);
7709 node = llist_del_all(&ctx->rsrc_put_llist);
Jens Axboe4a38aed22020-05-14 17:21:15 -06007710
7711 while (node) {
Pavel Begunkovb895c9a2021-04-01 15:43:40 +01007712 struct io_rsrc_node *ref_node;
Jens Axboe4a38aed22020-05-14 17:21:15 -06007713 struct llist_node *next = node->next;
7714
Pavel Begunkovb895c9a2021-04-01 15:43:40 +01007715 ref_node = llist_entry(node, struct io_rsrc_node, llist);
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007716 __io_rsrc_put_work(ref_node);
Jens Axboe4a38aed22020-05-14 17:21:15 -06007717 node = next;
7718 }
7719}
7720
Jens Axboe05f3fb32019-12-09 11:22:50 -07007721static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
Pavel Begunkov792e3582021-04-25 14:32:21 +01007722 unsigned nr_args, u64 __user *tags)
Jens Axboe05f3fb32019-12-09 11:22:50 -07007723{
7724 __s32 __user *fds = (__s32 __user *) arg;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007725 struct file *file;
Pavel Begunkovf3baed32021-04-01 15:43:42 +01007726 int fd, ret;
Pavel Begunkov846a4ef2021-04-01 15:44:03 +01007727 unsigned i;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007728
7729 if (ctx->file_data)
7730 return -EBUSY;
7731 if (!nr_args)
7732 return -EINVAL;
7733 if (nr_args > IORING_MAX_FIXED_FILES)
7734 return -EMFILE;
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01007735 ret = io_rsrc_node_switch_start(ctx);
Pavel Begunkovf3baed32021-04-01 15:43:42 +01007736 if (ret)
7737 return ret;
Pavel Begunkovd878c812021-06-14 02:36:18 +01007738 ret = io_rsrc_data_alloc(ctx, io_rsrc_file_put, tags, nr_args,
7739 &ctx->file_data);
7740 if (ret)
7741 return ret;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007742
Pavel Begunkovf3baed32021-04-01 15:43:42 +01007743 ret = -ENOMEM;
Pavel Begunkovaeca2412021-04-11 01:46:37 +01007744 if (!io_alloc_file_tables(&ctx->file_table, nr_args))
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01007745 goto out_free;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007746
Jens Axboe05f3fb32019-12-09 11:22:50 -07007747 for (i = 0; i < nr_args; i++, ctx->nr_user_files++) {
Pavel Begunkovd878c812021-06-14 02:36:18 +01007748 if (copy_from_user(&fd, &fds[i], sizeof(fd))) {
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01007749 ret = -EFAULT;
7750 goto out_fput;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007751 }
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01007752 /* allow sparse sets */
Pavel Begunkov792e3582021-04-25 14:32:21 +01007753 if (fd == -1) {
7754 ret = -EINVAL;
Pavel Begunkov2d091d62021-06-14 02:36:21 +01007755 if (unlikely(*io_get_tag_slot(ctx->file_data, i)))
Pavel Begunkov792e3582021-04-25 14:32:21 +01007756 goto out_fput;
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01007757 continue;
Pavel Begunkov792e3582021-04-25 14:32:21 +01007758 }
Jens Axboe05f3fb32019-12-09 11:22:50 -07007759
Jens Axboe05f3fb32019-12-09 11:22:50 -07007760 file = fget(fd);
Jens Axboe05f3fb32019-12-09 11:22:50 -07007761 ret = -EBADF;
Pavel Begunkov792e3582021-04-25 14:32:21 +01007762 if (unlikely(!file))
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01007763 goto out_fput;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007764
7765 /*
7766 * Don't allow io_uring instances to be registered. If UNIX
7767 * isn't enabled, then this causes a reference cycle and this
7768 * instance can never get freed. If UNIX is enabled we'll
7769 * handle it just fine, but there's still no point in allowing
7770 * a ring fd as it doesn't support regular read/write anyway.
7771 */
7772 if (file->f_op == &io_uring_fops) {
7773 fput(file);
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01007774 goto out_fput;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007775 }
Pavel Begunkovaeca2412021-04-11 01:46:37 +01007776 io_fixed_file_set(io_fixed_file_slot(&ctx->file_table, i), file);
Jens Axboe05f3fb32019-12-09 11:22:50 -07007777 }
7778
Jens Axboe05f3fb32019-12-09 11:22:50 -07007779 ret = io_sqe_files_scm(ctx);
Xiaoguang Wang05589552020-03-31 14:05:18 +08007780 if (ret) {
Pavel Begunkov08480402021-04-13 02:58:38 +01007781 __io_sqe_files_unregister(ctx);
Xiaoguang Wang05589552020-03-31 14:05:18 +08007782 return ret;
7783 }
Jens Axboe05f3fb32019-12-09 11:22:50 -07007784
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01007785 io_rsrc_node_switch(ctx, NULL);
Jens Axboe05f3fb32019-12-09 11:22:50 -07007786 return ret;
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01007787out_fput:
7788 for (i = 0; i < ctx->nr_user_files; i++) {
7789 file = io_file_from_index(ctx, i);
7790 if (file)
7791 fput(file);
7792 }
Pavel Begunkov042b0d82021-08-09 13:04:01 +01007793 io_free_file_tables(&ctx->file_table);
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01007794 ctx->nr_user_files = 0;
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01007795out_free:
Pavel Begunkov44b31f22021-04-25 14:32:16 +01007796 io_rsrc_data_free(ctx->file_data);
Jens Axboe55cbc252020-10-14 07:35:57 -06007797 ctx->file_data = NULL;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007798 return ret;
7799}
7800
Jens Axboec3a31e62019-10-03 13:59:56 -06007801static int io_sqe_file_register(struct io_ring_ctx *ctx, struct file *file,
7802 int index)
7803{
7804#if defined(CONFIG_UNIX)
7805 struct sock *sock = ctx->ring_sock->sk;
7806 struct sk_buff_head *head = &sock->sk_receive_queue;
7807 struct sk_buff *skb;
7808
7809 /*
7810 * See if we can merge this file into an existing skb SCM_RIGHTS
7811 * file set. If there's no room, fall back to allocating a new skb
7812 * and filling it in.
7813 */
7814 spin_lock_irq(&head->lock);
7815 skb = skb_peek(head);
7816 if (skb) {
7817 struct scm_fp_list *fpl = UNIXCB(skb).fp;
7818
7819 if (fpl->count < SCM_MAX_FD) {
7820 __skb_unlink(skb, head);
7821 spin_unlock_irq(&head->lock);
7822 fpl->fp[fpl->count] = get_file(file);
7823 unix_inflight(fpl->user, fpl->fp[fpl->count]);
7824 fpl->count++;
7825 spin_lock_irq(&head->lock);
7826 __skb_queue_head(head, skb);
7827 } else {
7828 skb = NULL;
7829 }
7830 }
7831 spin_unlock_irq(&head->lock);
7832
7833 if (skb) {
7834 fput(file);
7835 return 0;
7836 }
7837
7838 return __io_sqe_files_scm(ctx, 1, index);
7839#else
7840 return 0;
7841#endif
7842}
7843
Pavel Begunkovb60c8dc2021-04-25 14:32:18 +01007844static int io_queue_rsrc_removal(struct io_rsrc_data *data, unsigned idx,
Pavel Begunkove7c78372021-04-01 15:43:45 +01007845 struct io_rsrc_node *node, void *rsrc)
Jens Axboe05f3fb32019-12-09 11:22:50 -07007846{
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007847 struct io_rsrc_put *prsrc;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007848
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007849 prsrc = kzalloc(sizeof(*prsrc), GFP_KERNEL);
7850 if (!prsrc)
Hillf Dantona5318d32020-03-23 17:47:15 +08007851 return -ENOMEM;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007852
Pavel Begunkov2d091d62021-06-14 02:36:21 +01007853 prsrc->tag = *io_get_tag_slot(data, idx);
Bijan Mottahedeh50238532021-01-15 17:37:45 +00007854 prsrc->rsrc = rsrc;
Pavel Begunkove7c78372021-04-01 15:43:45 +01007855 list_add(&prsrc->list, &node->rsrc_list);
Hillf Dantona5318d32020-03-23 17:47:15 +08007856 return 0;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007857}
7858
7859static int __io_sqe_files_update(struct io_ring_ctx *ctx,
Pavel Begunkovc3bdad02021-04-25 14:32:22 +01007860 struct io_uring_rsrc_update2 *up,
Jens Axboe05f3fb32019-12-09 11:22:50 -07007861 unsigned nr_args)
7862{
Pavel Begunkovc3bdad02021-04-25 14:32:22 +01007863 u64 __user *tags = u64_to_user_ptr(up->tags);
Pavel Begunkov98f0b3b2021-04-25 14:32:19 +01007864 __s32 __user *fds = u64_to_user_ptr(up->data);
Pavel Begunkovb895c9a2021-04-01 15:43:40 +01007865 struct io_rsrc_data *data = ctx->file_data;
Pavel Begunkova04b0ac2021-04-01 15:44:04 +01007866 struct io_fixed_file *file_slot;
7867 struct file *file;
Pavel Begunkov98f0b3b2021-04-25 14:32:19 +01007868 int fd, i, err = 0;
7869 unsigned int done;
Xiaoguang Wang05589552020-03-31 14:05:18 +08007870 bool needs_switch = false;
Jens Axboec3a31e62019-10-03 13:59:56 -06007871
Pavel Begunkov98f0b3b2021-04-25 14:32:19 +01007872 if (!ctx->file_data)
7873 return -ENXIO;
7874 if (up->offset + nr_args > ctx->nr_user_files)
Jens Axboec3a31e62019-10-03 13:59:56 -06007875 return -EINVAL;
7876
Pavel Begunkov67973b92021-01-26 13:51:09 +00007877 for (done = 0; done < nr_args; done++) {
Pavel Begunkovc3bdad02021-04-25 14:32:22 +01007878 u64 tag = 0;
7879
7880 if ((tags && copy_from_user(&tag, &tags[done], sizeof(tag))) ||
7881 copy_from_user(&fd, &fds[done], sizeof(fd))) {
Jens Axboec3a31e62019-10-03 13:59:56 -06007882 err = -EFAULT;
7883 break;
7884 }
Pavel Begunkovc3bdad02021-04-25 14:32:22 +01007885 if ((fd == IORING_REGISTER_FILES_SKIP || fd == -1) && tag) {
7886 err = -EINVAL;
7887 break;
7888 }
noah4e0377a2021-01-26 15:23:28 -05007889 if (fd == IORING_REGISTER_FILES_SKIP)
7890 continue;
7891
Pavel Begunkov67973b92021-01-26 13:51:09 +00007892 i = array_index_nospec(up->offset + done, ctx->nr_user_files);
Pavel Begunkovaeca2412021-04-11 01:46:37 +01007893 file_slot = io_fixed_file_slot(&ctx->file_table, i);
Pavel Begunkovea64ec022021-02-04 13:52:07 +00007894
Pavel Begunkova04b0ac2021-04-01 15:44:04 +01007895 if (file_slot->file_ptr) {
7896 file = (struct file *)(file_slot->file_ptr & FFS_MASK);
Pavel Begunkovb60c8dc2021-04-25 14:32:18 +01007897 err = io_queue_rsrc_removal(data, up->offset + done,
7898 ctx->rsrc_node, file);
Hillf Dantona5318d32020-03-23 17:47:15 +08007899 if (err)
7900 break;
Pavel Begunkova04b0ac2021-04-01 15:44:04 +01007901 file_slot->file_ptr = 0;
Xiaoguang Wang05589552020-03-31 14:05:18 +08007902 needs_switch = true;
Jens Axboec3a31e62019-10-03 13:59:56 -06007903 }
7904 if (fd != -1) {
Jens Axboec3a31e62019-10-03 13:59:56 -06007905 file = fget(fd);
7906 if (!file) {
7907 err = -EBADF;
7908 break;
7909 }
7910 /*
7911 * Don't allow io_uring instances to be registered. If
7912 * UNIX isn't enabled, then this causes a reference
7913 * cycle and this instance can never get freed. If UNIX
7914 * is enabled we'll handle it just fine, but there's
7915 * still no point in allowing a ring fd as it doesn't
7916 * support regular read/write anyway.
7917 */
7918 if (file->f_op == &io_uring_fops) {
7919 fput(file);
7920 err = -EBADF;
7921 break;
7922 }
Pavel Begunkov2d091d62021-06-14 02:36:21 +01007923 *io_get_tag_slot(data, up->offset + done) = tag;
Pavel Begunkov9a321c92021-04-01 15:44:01 +01007924 io_fixed_file_set(file_slot, file);
Jens Axboec3a31e62019-10-03 13:59:56 -06007925 err = io_sqe_file_register(ctx, file, i);
Yang Yingliangf3bd9da2020-07-09 10:11:41 +00007926 if (err) {
Pavel Begunkova04b0ac2021-04-01 15:44:04 +01007927 file_slot->file_ptr = 0;
Yang Yingliangf3bd9da2020-07-09 10:11:41 +00007928 fput(file);
Jens Axboec3a31e62019-10-03 13:59:56 -06007929 break;
Yang Yingliangf3bd9da2020-07-09 10:11:41 +00007930 }
Jens Axboec3a31e62019-10-03 13:59:56 -06007931 }
Jens Axboe05f3fb32019-12-09 11:22:50 -07007932 }
7933
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01007934 if (needs_switch)
7935 io_rsrc_node_switch(ctx, data);
Jens Axboec3a31e62019-10-03 13:59:56 -06007936 return done ? done : err;
7937}
Xiaoguang Wang05589552020-03-31 14:05:18 +08007938
Jens Axboe685fe7f2021-03-08 09:37:51 -07007939static struct io_wq *io_init_wq_offload(struct io_ring_ctx *ctx,
7940 struct task_struct *task)
Pavel Begunkov24369c22020-01-28 03:15:48 +03007941{
Jens Axboee9418942021-02-19 12:33:30 -07007942 struct io_wq_hash *hash;
Pavel Begunkov24369c22020-01-28 03:15:48 +03007943 struct io_wq_data data;
Pavel Begunkov24369c22020-01-28 03:15:48 +03007944 unsigned int concurrency;
Pavel Begunkov24369c22020-01-28 03:15:48 +03007945
Yang Yingliang362a9e62021-07-20 16:38:05 +08007946 mutex_lock(&ctx->uring_lock);
Jens Axboee9418942021-02-19 12:33:30 -07007947 hash = ctx->hash_map;
7948 if (!hash) {
7949 hash = kzalloc(sizeof(*hash), GFP_KERNEL);
Yang Yingliang362a9e62021-07-20 16:38:05 +08007950 if (!hash) {
7951 mutex_unlock(&ctx->uring_lock);
Jens Axboee9418942021-02-19 12:33:30 -07007952 return ERR_PTR(-ENOMEM);
Yang Yingliang362a9e62021-07-20 16:38:05 +08007953 }
Jens Axboee9418942021-02-19 12:33:30 -07007954 refcount_set(&hash->refs, 1);
7955 init_waitqueue_head(&hash->wait);
7956 ctx->hash_map = hash;
7957 }
Yang Yingliang362a9e62021-07-20 16:38:05 +08007958 mutex_unlock(&ctx->uring_lock);
Jens Axboee9418942021-02-19 12:33:30 -07007959
7960 data.hash = hash;
Jens Axboe685fe7f2021-03-08 09:37:51 -07007961 data.task = task;
Pavel Begunkovebc11b62021-08-09 13:04:05 +01007962 data.free_work = io_wq_free_work;
Pavel Begunkovf5fa38c2020-06-08 21:08:20 +03007963 data.do_work = io_wq_submit_work;
Pavel Begunkov24369c22020-01-28 03:15:48 +03007964
Jens Axboed25e3a32021-02-16 11:41:41 -07007965 /* Do QD, or 4 * CPUS, whatever is smallest */
7966 concurrency = min(ctx->sq_entries, 4 * num_online_cpus());
Pavel Begunkov24369c22020-01-28 03:15:48 +03007967
Jens Axboe5aa75ed2021-02-16 12:56:50 -07007968 return io_wq_create(concurrency, &data);
Pavel Begunkov24369c22020-01-28 03:15:48 +03007969}
7970
Jens Axboe5aa75ed2021-02-16 12:56:50 -07007971static int io_uring_alloc_task_context(struct task_struct *task,
7972 struct io_ring_ctx *ctx)
Jens Axboe0f212202020-09-13 13:09:39 -06007973{
7974 struct io_uring_task *tctx;
Jens Axboed8a6df12020-10-15 16:24:45 -06007975 int ret;
Jens Axboe0f212202020-09-13 13:09:39 -06007976
Pavel Begunkov09899b12021-06-14 02:36:22 +01007977 tctx = kzalloc(sizeof(*tctx), GFP_KERNEL);
Jens Axboe0f212202020-09-13 13:09:39 -06007978 if (unlikely(!tctx))
7979 return -ENOMEM;
7980
Jens Axboed8a6df12020-10-15 16:24:45 -06007981 ret = percpu_counter_init(&tctx->inflight, 0, GFP_KERNEL);
7982 if (unlikely(ret)) {
7983 kfree(tctx);
7984 return ret;
7985 }
7986
Jens Axboe685fe7f2021-03-08 09:37:51 -07007987 tctx->io_wq = io_init_wq_offload(ctx, task);
Jens Axboe5aa75ed2021-02-16 12:56:50 -07007988 if (IS_ERR(tctx->io_wq)) {
7989 ret = PTR_ERR(tctx->io_wq);
7990 percpu_counter_destroy(&tctx->inflight);
7991 kfree(tctx);
7992 return ret;
7993 }
7994
Jens Axboe0f212202020-09-13 13:09:39 -06007995 xa_init(&tctx->xa);
7996 init_waitqueue_head(&tctx->wait);
Jens Axboefdaf0832020-10-30 09:37:30 -06007997 atomic_set(&tctx->in_idle, 0);
Pavel Begunkovb303fe22021-04-11 01:46:26 +01007998 atomic_set(&tctx->inflight_tracked, 0);
Jens Axboe0f212202020-09-13 13:09:39 -06007999 task->io_uring = tctx;
Jens Axboe7cbf1722021-02-10 00:03:20 +00008000 spin_lock_init(&tctx->task_lock);
8001 INIT_WQ_LIST(&tctx->task_list);
Jens Axboe7cbf1722021-02-10 00:03:20 +00008002 init_task_work(&tctx->task_work, tctx_task_work);
Jens Axboe0f212202020-09-13 13:09:39 -06008003 return 0;
8004}
8005
8006void __io_uring_free(struct task_struct *tsk)
8007{
8008 struct io_uring_task *tctx = tsk->io_uring;
8009
8010 WARN_ON_ONCE(!xa_empty(&tctx->xa));
Pavel Begunkovef8eaa42021-02-27 11:16:45 +00008011 WARN_ON_ONCE(tctx->io_wq);
Pavel Begunkov09899b12021-06-14 02:36:22 +01008012 WARN_ON_ONCE(tctx->cached_refs);
Pavel Begunkovef8eaa42021-02-27 11:16:45 +00008013
Jens Axboed8a6df12020-10-15 16:24:45 -06008014 percpu_counter_destroy(&tctx->inflight);
Jens Axboe0f212202020-09-13 13:09:39 -06008015 kfree(tctx);
8016 tsk->io_uring = NULL;
8017}
8018
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02008019static int io_sq_offload_create(struct io_ring_ctx *ctx,
8020 struct io_uring_params *p)
Jens Axboe2b188cc2019-01-07 10:46:33 -07008021{
8022 int ret;
8023
Jens Axboed25e3a32021-02-16 11:41:41 -07008024 /* Retain compatibility with failing for an invalid attach attempt */
8025 if ((ctx->flags & (IORING_SETUP_ATTACH_WQ | IORING_SETUP_SQPOLL)) ==
8026 IORING_SETUP_ATTACH_WQ) {
8027 struct fd f;
8028
8029 f = fdget(p->wq_fd);
8030 if (!f.file)
8031 return -ENXIO;
Jens Axboe0cc936f2021-07-22 17:08:07 -06008032 if (f.file->f_op != &io_uring_fops) {
8033 fdput(f);
Pavel Begunkovf2a48dd2021-04-20 12:03:33 +01008034 return -EINVAL;
Jens Axboe0cc936f2021-07-22 17:08:07 -06008035 }
8036 fdput(f);
Jens Axboed25e3a32021-02-16 11:41:41 -07008037 }
Jens Axboe6c271ce2019-01-10 11:22:30 -07008038 if (ctx->flags & IORING_SETUP_SQPOLL) {
Jens Axboe46fe18b2021-03-04 12:39:36 -07008039 struct task_struct *tsk;
Jens Axboe534ca6d2020-09-02 13:52:19 -06008040 struct io_sq_data *sqd;
Pavel Begunkov26984fb2021-03-11 23:29:37 +00008041 bool attached;
Jens Axboe534ca6d2020-09-02 13:52:19 -06008042
Pavel Begunkov26984fb2021-03-11 23:29:37 +00008043 sqd = io_get_sq_data(p, &attached);
Jens Axboe534ca6d2020-09-02 13:52:19 -06008044 if (IS_ERR(sqd)) {
8045 ret = PTR_ERR(sqd);
8046 goto err;
8047 }
Jens Axboe69fb2132020-09-14 11:16:23 -06008048
Stefan Metzmacher7c30f36a2021-03-07 11:54:28 +01008049 ctx->sq_creds = get_current_cred();
Jens Axboe534ca6d2020-09-02 13:52:19 -06008050 ctx->sq_data = sqd;
Jens Axboe917257d2019-04-13 09:28:55 -06008051 ctx->sq_thread_idle = msecs_to_jiffies(p->sq_thread_idle);
8052 if (!ctx->sq_thread_idle)
8053 ctx->sq_thread_idle = HZ;
8054
Pavel Begunkov78d7f6b2021-03-10 13:13:53 +00008055 io_sq_thread_park(sqd);
Pavel Begunkovde75a3d2021-03-18 11:54:35 +00008056 list_add(&ctx->sqd_list, &sqd->ctx_list);
8057 io_sqd_update_thread_idle(sqd);
Pavel Begunkov26984fb2021-03-11 23:29:37 +00008058 /* don't attach to a dying SQPOLL thread, would be racy */
Pavel Begunkovf2a48dd2021-04-20 12:03:33 +01008059 ret = (attached && !sqd->thread) ? -ENXIO : 0;
Pavel Begunkov78d7f6b2021-03-10 13:13:53 +00008060 io_sq_thread_unpark(sqd);
8061
Pavel Begunkovde75a3d2021-03-18 11:54:35 +00008062 if (ret < 0)
8063 goto err;
8064 if (attached)
Jens Axboe5aa75ed2021-02-16 12:56:50 -07008065 return 0;
Jens Axboeaa061652020-09-02 14:50:27 -06008066
Jens Axboe6c271ce2019-01-10 11:22:30 -07008067 if (p->flags & IORING_SETUP_SQ_AFF) {
Jens Axboe44a9bd12019-05-14 20:00:30 -06008068 int cpu = p->sq_thread_cpu;
Jens Axboe6c271ce2019-01-10 11:22:30 -07008069
Jens Axboe917257d2019-04-13 09:28:55 -06008070 ret = -EINVAL;
Pavel Begunkovf2a48dd2021-04-20 12:03:33 +01008071 if (cpu >= nr_cpu_ids || !cpu_online(cpu))
Jens Axboee8f98f242021-03-09 16:32:13 -07008072 goto err_sqpoll;
Jens Axboe37d1e2e2021-02-17 21:03:43 -07008073 sqd->sq_cpu = cpu;
Jens Axboe6c271ce2019-01-10 11:22:30 -07008074 } else {
Jens Axboe37d1e2e2021-02-17 21:03:43 -07008075 sqd->sq_cpu = -1;
Jens Axboe6c271ce2019-01-10 11:22:30 -07008076 }
Jens Axboe37d1e2e2021-02-17 21:03:43 -07008077
8078 sqd->task_pid = current->pid;
Jens Axboe5c2469e2021-03-11 10:17:56 -07008079 sqd->task_tgid = current->tgid;
Jens Axboe46fe18b2021-03-04 12:39:36 -07008080 tsk = create_io_thread(io_sq_thread, sqd, NUMA_NO_NODE);
8081 if (IS_ERR(tsk)) {
8082 ret = PTR_ERR(tsk);
Jens Axboee8f98f242021-03-09 16:32:13 -07008083 goto err_sqpoll;
Jens Axboe6c271ce2019-01-10 11:22:30 -07008084 }
Pavel Begunkov97a73a02021-03-08 17:30:54 +00008085
Jens Axboe46fe18b2021-03-04 12:39:36 -07008086 sqd->thread = tsk;
Pavel Begunkov97a73a02021-03-08 17:30:54 +00008087 ret = io_uring_alloc_task_context(tsk, ctx);
Jens Axboe46fe18b2021-03-04 12:39:36 -07008088 wake_up_new_task(tsk);
Jens Axboe0f212202020-09-13 13:09:39 -06008089 if (ret)
8090 goto err;
Jens Axboe6c271ce2019-01-10 11:22:30 -07008091 } else if (p->flags & IORING_SETUP_SQ_AFF) {
8092 /* Can't have SQ_AFF without SQPOLL */
8093 ret = -EINVAL;
8094 goto err;
8095 }
8096
Jens Axboe2b188cc2019-01-07 10:46:33 -07008097 return 0;
Pavel Begunkovf2a48dd2021-04-20 12:03:33 +01008098err_sqpoll:
8099 complete(&ctx->sq_data->exited);
Jens Axboe2b188cc2019-01-07 10:46:33 -07008100err:
Jens Axboe37d1e2e2021-02-17 21:03:43 -07008101 io_sq_thread_finish(ctx);
Jens Axboe2b188cc2019-01-07 10:46:33 -07008102 return ret;
8103}
8104
Bijan Mottahedeha087e2b2020-06-16 16:36:07 -07008105static inline void __io_unaccount_mem(struct user_struct *user,
8106 unsigned long nr_pages)
Jens Axboe2b188cc2019-01-07 10:46:33 -07008107{
8108 atomic_long_sub(nr_pages, &user->locked_vm);
8109}
8110
Bijan Mottahedeha087e2b2020-06-16 16:36:07 -07008111static inline int __io_account_mem(struct user_struct *user,
8112 unsigned long nr_pages)
Jens Axboe2b188cc2019-01-07 10:46:33 -07008113{
8114 unsigned long page_limit, cur_pages, new_pages;
8115
8116 /* Don't allow more pages than we can safely lock */
8117 page_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
8118
8119 do {
8120 cur_pages = atomic_long_read(&user->locked_vm);
8121 new_pages = cur_pages + nr_pages;
8122 if (new_pages > page_limit)
8123 return -ENOMEM;
8124 } while (atomic_long_cmpxchg(&user->locked_vm, cur_pages,
8125 new_pages) != cur_pages);
8126
8127 return 0;
8128}
8129
Jens Axboe26bfa89e2021-02-09 20:14:12 -07008130static void io_unaccount_mem(struct io_ring_ctx *ctx, unsigned long nr_pages)
Bijan Mottahedeha087e2b2020-06-16 16:36:07 -07008131{
Jens Axboe62e398b2021-02-21 16:19:37 -07008132 if (ctx->user)
Bijan Mottahedeha087e2b2020-06-16 16:36:07 -07008133 __io_unaccount_mem(ctx->user, nr_pages);
Bijan Mottahedeh30975822020-06-16 16:36:09 -07008134
Jens Axboe26bfa89e2021-02-09 20:14:12 -07008135 if (ctx->mm_account)
8136 atomic64_sub(nr_pages, &ctx->mm_account->pinned_vm);
Bijan Mottahedeha087e2b2020-06-16 16:36:07 -07008137}
8138
Jens Axboe26bfa89e2021-02-09 20:14:12 -07008139static int io_account_mem(struct io_ring_ctx *ctx, unsigned long nr_pages)
Bijan Mottahedeha087e2b2020-06-16 16:36:07 -07008140{
Bijan Mottahedeh30975822020-06-16 16:36:09 -07008141 int ret;
8142
Jens Axboe62e398b2021-02-21 16:19:37 -07008143 if (ctx->user) {
Bijan Mottahedeh30975822020-06-16 16:36:09 -07008144 ret = __io_account_mem(ctx->user, nr_pages);
8145 if (ret)
8146 return ret;
8147 }
8148
Jens Axboe26bfa89e2021-02-09 20:14:12 -07008149 if (ctx->mm_account)
8150 atomic64_add(nr_pages, &ctx->mm_account->pinned_vm);
Bijan Mottahedeha087e2b2020-06-16 16:36:07 -07008151
8152 return 0;
8153}
8154
Jens Axboe2b188cc2019-01-07 10:46:33 -07008155static void io_mem_free(void *ptr)
8156{
Mark Rutland52e04ef2019-04-30 17:30:21 +01008157 struct page *page;
Jens Axboe2b188cc2019-01-07 10:46:33 -07008158
Mark Rutland52e04ef2019-04-30 17:30:21 +01008159 if (!ptr)
8160 return;
8161
8162 page = virt_to_head_page(ptr);
Jens Axboe2b188cc2019-01-07 10:46:33 -07008163 if (put_page_testzero(page))
8164 free_compound_page(page);
8165}
8166
8167static void *io_mem_alloc(size_t size)
8168{
8169 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN | __GFP_COMP |
Jens Axboe26bfa89e2021-02-09 20:14:12 -07008170 __GFP_NORETRY | __GFP_ACCOUNT;
Jens Axboe2b188cc2019-01-07 10:46:33 -07008171
8172 return (void *) __get_free_pages(gfp_flags, get_order(size));
8173}
8174
Hristo Venev75b28af2019-08-26 17:23:46 +00008175static unsigned long rings_size(unsigned sq_entries, unsigned cq_entries,
8176 size_t *sq_offset)
8177{
8178 struct io_rings *rings;
8179 size_t off, sq_array_size;
8180
8181 off = struct_size(rings, cqes, cq_entries);
8182 if (off == SIZE_MAX)
8183 return SIZE_MAX;
8184
8185#ifdef CONFIG_SMP
8186 off = ALIGN(off, SMP_CACHE_BYTES);
8187 if (off == 0)
8188 return SIZE_MAX;
8189#endif
8190
Dmitry Vyukovb36200f2020-07-11 11:31:11 +02008191 if (sq_offset)
8192 *sq_offset = off;
8193
Hristo Venev75b28af2019-08-26 17:23:46 +00008194 sq_array_size = array_size(sizeof(u32), sq_entries);
8195 if (sq_array_size == SIZE_MAX)
8196 return SIZE_MAX;
8197
8198 if (check_add_overflow(off, sq_array_size, &off))
8199 return SIZE_MAX;
8200
Hristo Venev75b28af2019-08-26 17:23:46 +00008201 return off;
8202}
8203
Pavel Begunkov41edf1a2021-04-25 14:32:23 +01008204static void io_buffer_unmap(struct io_ring_ctx *ctx, struct io_mapped_ubuf **slot)
Pavel Begunkov7f61a1e2021-04-11 01:46:35 +01008205{
Pavel Begunkov41edf1a2021-04-25 14:32:23 +01008206 struct io_mapped_ubuf *imu = *slot;
Pavel Begunkov7f61a1e2021-04-11 01:46:35 +01008207 unsigned int i;
8208
Pavel Begunkov62248432021-04-28 13:11:29 +01008209 if (imu != ctx->dummy_ubuf) {
8210 for (i = 0; i < imu->nr_bvecs; i++)
8211 unpin_user_page(imu->bvec[i].bv_page);
8212 if (imu->acct_pages)
8213 io_unaccount_mem(ctx, imu->acct_pages);
8214 kvfree(imu);
8215 }
Pavel Begunkov41edf1a2021-04-25 14:32:23 +01008216 *slot = NULL;
Pavel Begunkov7f61a1e2021-04-11 01:46:35 +01008217}
8218
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01008219static void io_rsrc_buf_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc)
8220{
Pavel Begunkov634d00d2021-04-25 14:32:26 +01008221 io_buffer_unmap(ctx, &prsrc->buf);
8222 prsrc->buf = NULL;
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01008223}
8224
8225static void __io_sqe_buffers_unregister(struct io_ring_ctx *ctx)
Jens Axboeedafcce2019-01-09 09:16:05 -07008226{
Pavel Begunkov7f61a1e2021-04-11 01:46:35 +01008227 unsigned int i;
Jens Axboeedafcce2019-01-09 09:16:05 -07008228
Pavel Begunkov7f61a1e2021-04-11 01:46:35 +01008229 for (i = 0; i < ctx->nr_user_bufs; i++)
8230 io_buffer_unmap(ctx, &ctx->user_bufs[i]);
Jens Axboeedafcce2019-01-09 09:16:05 -07008231 kfree(ctx->user_bufs);
Zqiangbb6659c2021-04-30 16:25:15 +08008232 io_rsrc_data_free(ctx->buf_data);
Jens Axboeedafcce2019-01-09 09:16:05 -07008233 ctx->user_bufs = NULL;
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01008234 ctx->buf_data = NULL;
Jens Axboeedafcce2019-01-09 09:16:05 -07008235 ctx->nr_user_bufs = 0;
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01008236}
8237
Jens Axboeedafcce2019-01-09 09:16:05 -07008238static int io_sqe_buffers_unregister(struct io_ring_ctx *ctx)
8239{
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01008240 int ret;
Jens Axboeedafcce2019-01-09 09:16:05 -07008241
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01008242 if (!ctx->buf_data)
Jens Axboeedafcce2019-01-09 09:16:05 -07008243 return -ENXIO;
8244
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01008245 ret = io_rsrc_ref_quiesce(ctx->buf_data, ctx);
8246 if (!ret)
8247 __io_sqe_buffers_unregister(ctx);
8248 return ret;
Jens Axboeedafcce2019-01-09 09:16:05 -07008249}
8250
8251static int io_copy_iov(struct io_ring_ctx *ctx, struct iovec *dst,
8252 void __user *arg, unsigned index)
8253{
8254 struct iovec __user *src;
8255
8256#ifdef CONFIG_COMPAT
8257 if (ctx->compat) {
8258 struct compat_iovec __user *ciovs;
8259 struct compat_iovec ciov;
8260
8261 ciovs = (struct compat_iovec __user *) arg;
8262 if (copy_from_user(&ciov, &ciovs[index], sizeof(ciov)))
8263 return -EFAULT;
8264
Jens Axboed55e5f52019-12-11 16:12:15 -07008265 dst->iov_base = u64_to_user_ptr((u64)ciov.iov_base);
Jens Axboeedafcce2019-01-09 09:16:05 -07008266 dst->iov_len = ciov.iov_len;
8267 return 0;
8268 }
8269#endif
8270 src = (struct iovec __user *) arg;
8271 if (copy_from_user(dst, &src[index], sizeof(*dst)))
8272 return -EFAULT;
8273 return 0;
8274}
8275
Jens Axboede293932020-09-17 16:19:16 -06008276/*
8277 * Not super efficient, but this is just a registration time. And we do cache
8278 * the last compound head, so generally we'll only do a full search if we don't
8279 * match that one.
8280 *
8281 * We check if the given compound head page has already been accounted, to
8282 * avoid double accounting it. This allows us to account the full size of the
8283 * page, not just the constituent pages of a huge page.
8284 */
8285static bool headpage_already_acct(struct io_ring_ctx *ctx, struct page **pages,
8286 int nr_pages, struct page *hpage)
8287{
8288 int i, j;
8289
8290 /* check current page array */
8291 for (i = 0; i < nr_pages; i++) {
8292 if (!PageCompound(pages[i]))
8293 continue;
8294 if (compound_head(pages[i]) == hpage)
8295 return true;
8296 }
8297
8298 /* check previously registered pages */
8299 for (i = 0; i < ctx->nr_user_bufs; i++) {
Pavel Begunkov41edf1a2021-04-25 14:32:23 +01008300 struct io_mapped_ubuf *imu = ctx->user_bufs[i];
Jens Axboede293932020-09-17 16:19:16 -06008301
8302 for (j = 0; j < imu->nr_bvecs; j++) {
8303 if (!PageCompound(imu->bvec[j].bv_page))
8304 continue;
8305 if (compound_head(imu->bvec[j].bv_page) == hpage)
8306 return true;
8307 }
8308 }
8309
8310 return false;
8311}
8312
8313static int io_buffer_account_pin(struct io_ring_ctx *ctx, struct page **pages,
8314 int nr_pages, struct io_mapped_ubuf *imu,
8315 struct page **last_hpage)
8316{
8317 int i, ret;
8318
Pavel Begunkov216e5832021-05-29 12:01:02 +01008319 imu->acct_pages = 0;
Jens Axboede293932020-09-17 16:19:16 -06008320 for (i = 0; i < nr_pages; i++) {
8321 if (!PageCompound(pages[i])) {
8322 imu->acct_pages++;
8323 } else {
8324 struct page *hpage;
8325
8326 hpage = compound_head(pages[i]);
8327 if (hpage == *last_hpage)
8328 continue;
8329 *last_hpage = hpage;
8330 if (headpage_already_acct(ctx, pages, i, hpage))
8331 continue;
8332 imu->acct_pages += page_size(hpage) >> PAGE_SHIFT;
8333 }
8334 }
8335
8336 if (!imu->acct_pages)
8337 return 0;
8338
Jens Axboe26bfa89e2021-02-09 20:14:12 -07008339 ret = io_account_mem(ctx, imu->acct_pages);
Jens Axboede293932020-09-17 16:19:16 -06008340 if (ret)
8341 imu->acct_pages = 0;
8342 return ret;
8343}
8344
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008345static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov,
Pavel Begunkov41edf1a2021-04-25 14:32:23 +01008346 struct io_mapped_ubuf **pimu,
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008347 struct page **last_hpage)
Jens Axboeedafcce2019-01-09 09:16:05 -07008348{
Pavel Begunkov41edf1a2021-04-25 14:32:23 +01008349 struct io_mapped_ubuf *imu = NULL;
Jens Axboeedafcce2019-01-09 09:16:05 -07008350 struct vm_area_struct **vmas = NULL;
8351 struct page **pages = NULL;
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008352 unsigned long off, start, end, ubuf;
8353 size_t size;
8354 int ret, pret, nr_pages, i;
Jens Axboeedafcce2019-01-09 09:16:05 -07008355
Pavel Begunkov62248432021-04-28 13:11:29 +01008356 if (!iov->iov_base) {
8357 *pimu = ctx->dummy_ubuf;
8358 return 0;
8359 }
8360
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008361 ubuf = (unsigned long) iov->iov_base;
8362 end = (ubuf + iov->iov_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
8363 start = ubuf >> PAGE_SHIFT;
8364 nr_pages = end - start;
8365
Pavel Begunkov41edf1a2021-04-25 14:32:23 +01008366 *pimu = NULL;
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008367 ret = -ENOMEM;
8368
8369 pages = kvmalloc_array(nr_pages, sizeof(struct page *), GFP_KERNEL);
8370 if (!pages)
8371 goto done;
8372
8373 vmas = kvmalloc_array(nr_pages, sizeof(struct vm_area_struct *),
8374 GFP_KERNEL);
8375 if (!vmas)
8376 goto done;
8377
Pavel Begunkov41edf1a2021-04-25 14:32:23 +01008378 imu = kvmalloc(struct_size(imu, bvec, nr_pages), GFP_KERNEL);
Pavel Begunkova2b41982021-04-26 00:16:31 +01008379 if (!imu)
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008380 goto done;
8381
8382 ret = 0;
8383 mmap_read_lock(current->mm);
8384 pret = pin_user_pages(ubuf, nr_pages, FOLL_WRITE | FOLL_LONGTERM,
8385 pages, vmas);
8386 if (pret == nr_pages) {
8387 /* don't support file backed memory */
8388 for (i = 0; i < nr_pages; i++) {
8389 struct vm_area_struct *vma = vmas[i];
8390
Pavel Begunkov40dad762021-06-09 15:26:54 +01008391 if (vma_is_shmem(vma))
8392 continue;
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008393 if (vma->vm_file &&
8394 !is_file_hugepages(vma->vm_file)) {
8395 ret = -EOPNOTSUPP;
8396 break;
8397 }
8398 }
8399 } else {
8400 ret = pret < 0 ? pret : -EFAULT;
8401 }
8402 mmap_read_unlock(current->mm);
8403 if (ret) {
8404 /*
8405 * if we did partial map, or found file backed vmas,
8406 * release any pages we did get
8407 */
8408 if (pret > 0)
8409 unpin_user_pages(pages, pret);
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008410 goto done;
8411 }
8412
8413 ret = io_buffer_account_pin(ctx, pages, pret, imu, last_hpage);
8414 if (ret) {
8415 unpin_user_pages(pages, pret);
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008416 goto done;
8417 }
8418
8419 off = ubuf & ~PAGE_MASK;
8420 size = iov->iov_len;
8421 for (i = 0; i < nr_pages; i++) {
8422 size_t vec_len;
8423
8424 vec_len = min_t(size_t, size, PAGE_SIZE - off);
8425 imu->bvec[i].bv_page = pages[i];
8426 imu->bvec[i].bv_len = vec_len;
8427 imu->bvec[i].bv_offset = off;
8428 off = 0;
8429 size -= vec_len;
8430 }
8431 /* store original address for later verification */
8432 imu->ubuf = ubuf;
Pavel Begunkov4751f532021-04-01 15:43:55 +01008433 imu->ubuf_end = ubuf + iov->iov_len;
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008434 imu->nr_bvecs = nr_pages;
Pavel Begunkov41edf1a2021-04-25 14:32:23 +01008435 *pimu = imu;
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008436 ret = 0;
8437done:
Pavel Begunkov41edf1a2021-04-25 14:32:23 +01008438 if (ret)
8439 kvfree(imu);
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008440 kvfree(pages);
8441 kvfree(vmas);
8442 return ret;
8443}
8444
Bijan Mottahedeh2b358602021-01-06 12:39:11 -08008445static int io_buffers_map_alloc(struct io_ring_ctx *ctx, unsigned int nr_args)
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008446{
Pavel Begunkov87094462021-04-11 01:46:36 +01008447 ctx->user_bufs = kcalloc(nr_args, sizeof(*ctx->user_bufs), GFP_KERNEL);
8448 return ctx->user_bufs ? 0 : -ENOMEM;
Bijan Mottahedeh2b358602021-01-06 12:39:11 -08008449}
8450
8451static int io_buffer_validate(struct iovec *iov)
8452{
Pavel Begunkov50e96982021-03-24 22:59:01 +00008453 unsigned long tmp, acct_len = iov->iov_len + (PAGE_SIZE - 1);
8454
Bijan Mottahedeh2b358602021-01-06 12:39:11 -08008455 /*
8456 * Don't impose further limits on the size and buffer
8457 * constraints here, we'll -EINVAL later when IO is
8458 * submitted if they are wrong.
8459 */
Pavel Begunkov62248432021-04-28 13:11:29 +01008460 if (!iov->iov_base)
8461 return iov->iov_len ? -EFAULT : 0;
8462 if (!iov->iov_len)
Bijan Mottahedeh2b358602021-01-06 12:39:11 -08008463 return -EFAULT;
8464
8465 /* arbitrary limit, but we need something */
8466 if (iov->iov_len > SZ_1G)
8467 return -EFAULT;
8468
Pavel Begunkov50e96982021-03-24 22:59:01 +00008469 if (check_add_overflow((unsigned long)iov->iov_base, acct_len, &tmp))
8470 return -EOVERFLOW;
8471
Bijan Mottahedeh2b358602021-01-06 12:39:11 -08008472 return 0;
8473}
8474
8475static int io_sqe_buffers_register(struct io_ring_ctx *ctx, void __user *arg,
Pavel Begunkov634d00d2021-04-25 14:32:26 +01008476 unsigned int nr_args, u64 __user *tags)
Bijan Mottahedeh2b358602021-01-06 12:39:11 -08008477{
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01008478 struct page *last_hpage = NULL;
8479 struct io_rsrc_data *data;
Bijan Mottahedeh2b358602021-01-06 12:39:11 -08008480 int i, ret;
8481 struct iovec iov;
Bijan Mottahedeh2b358602021-01-06 12:39:11 -08008482
Pavel Begunkov87094462021-04-11 01:46:36 +01008483 if (ctx->user_bufs)
8484 return -EBUSY;
Pavel Begunkov489809e2021-05-14 12:06:44 +01008485 if (!nr_args || nr_args > IORING_MAX_REG_BUFFERS)
Pavel Begunkov87094462021-04-11 01:46:36 +01008486 return -EINVAL;
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01008487 ret = io_rsrc_node_switch_start(ctx);
Bijan Mottahedeh2b358602021-01-06 12:39:11 -08008488 if (ret)
8489 return ret;
Pavel Begunkovd878c812021-06-14 02:36:18 +01008490 ret = io_rsrc_data_alloc(ctx, io_rsrc_buf_put, tags, nr_args, &data);
8491 if (ret)
8492 return ret;
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01008493 ret = io_buffers_map_alloc(ctx, nr_args);
8494 if (ret) {
Zqiangbb6659c2021-04-30 16:25:15 +08008495 io_rsrc_data_free(data);
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01008496 return ret;
8497 }
Bijan Mottahedeh2b358602021-01-06 12:39:11 -08008498
Pavel Begunkov87094462021-04-11 01:46:36 +01008499 for (i = 0; i < nr_args; i++, ctx->nr_user_bufs++) {
Jens Axboeedafcce2019-01-09 09:16:05 -07008500 ret = io_copy_iov(ctx, &iov, arg, i);
8501 if (ret)
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008502 break;
Bijan Mottahedeh2b358602021-01-06 12:39:11 -08008503 ret = io_buffer_validate(&iov);
8504 if (ret)
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008505 break;
Pavel Begunkov2d091d62021-06-14 02:36:21 +01008506 if (!iov.iov_base && *io_get_tag_slot(data, i)) {
Colin Ian Kingcf3770e2021-04-29 11:46:02 +01008507 ret = -EINVAL;
8508 break;
8509 }
Jens Axboeedafcce2019-01-09 09:16:05 -07008510
Pavel Begunkov41edf1a2021-04-25 14:32:23 +01008511 ret = io_sqe_buffer_register(ctx, &iov, &ctx->user_bufs[i],
8512 &last_hpage);
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008513 if (ret)
8514 break;
Jens Axboeedafcce2019-01-09 09:16:05 -07008515 }
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008516
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01008517 WARN_ON_ONCE(ctx->buf_data);
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008518
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01008519 ctx->buf_data = data;
8520 if (ret)
8521 __io_sqe_buffers_unregister(ctx);
8522 else
8523 io_rsrc_node_switch(ctx, NULL);
Jens Axboeedafcce2019-01-09 09:16:05 -07008524 return ret;
8525}
8526
Pavel Begunkov634d00d2021-04-25 14:32:26 +01008527static int __io_sqe_buffers_update(struct io_ring_ctx *ctx,
8528 struct io_uring_rsrc_update2 *up,
8529 unsigned int nr_args)
8530{
8531 u64 __user *tags = u64_to_user_ptr(up->tags);
8532 struct iovec iov, __user *iovs = u64_to_user_ptr(up->data);
Pavel Begunkov634d00d2021-04-25 14:32:26 +01008533 struct page *last_hpage = NULL;
8534 bool needs_switch = false;
8535 __u32 done;
8536 int i, err;
8537
8538 if (!ctx->buf_data)
8539 return -ENXIO;
8540 if (up->offset + nr_args > ctx->nr_user_bufs)
8541 return -EINVAL;
8542
8543 for (done = 0; done < nr_args; done++) {
Pavel Begunkov0b8c0e72021-04-26 15:17:38 +01008544 struct io_mapped_ubuf *imu;
8545 int offset = up->offset + done;
Pavel Begunkov634d00d2021-04-25 14:32:26 +01008546 u64 tag = 0;
8547
8548 err = io_copy_iov(ctx, &iov, iovs, done);
8549 if (err)
8550 break;
8551 if (tags && copy_from_user(&tag, &tags[done], sizeof(tag))) {
8552 err = -EFAULT;
8553 break;
8554 }
Pavel Begunkov0b8c0e72021-04-26 15:17:38 +01008555 err = io_buffer_validate(&iov);
8556 if (err)
8557 break;
Colin Ian Kingcf3770e2021-04-29 11:46:02 +01008558 if (!iov.iov_base && tag) {
8559 err = -EINVAL;
8560 break;
8561 }
Pavel Begunkov0b8c0e72021-04-26 15:17:38 +01008562 err = io_sqe_buffer_register(ctx, &iov, &imu, &last_hpage);
8563 if (err)
8564 break;
Pavel Begunkov634d00d2021-04-25 14:32:26 +01008565
Pavel Begunkov0b8c0e72021-04-26 15:17:38 +01008566 i = array_index_nospec(offset, ctx->nr_user_bufs);
Pavel Begunkov62248432021-04-28 13:11:29 +01008567 if (ctx->user_bufs[i] != ctx->dummy_ubuf) {
Pavel Begunkov0b8c0e72021-04-26 15:17:38 +01008568 err = io_queue_rsrc_removal(ctx->buf_data, offset,
8569 ctx->rsrc_node, ctx->user_bufs[i]);
8570 if (unlikely(err)) {
8571 io_buffer_unmap(ctx, &imu);
Pavel Begunkov634d00d2021-04-25 14:32:26 +01008572 break;
Pavel Begunkov0b8c0e72021-04-26 15:17:38 +01008573 }
Pavel Begunkov634d00d2021-04-25 14:32:26 +01008574 ctx->user_bufs[i] = NULL;
8575 needs_switch = true;
8576 }
8577
Pavel Begunkov0b8c0e72021-04-26 15:17:38 +01008578 ctx->user_bufs[i] = imu;
Pavel Begunkov2d091d62021-06-14 02:36:21 +01008579 *io_get_tag_slot(ctx->buf_data, offset) = tag;
Pavel Begunkov634d00d2021-04-25 14:32:26 +01008580 }
8581
8582 if (needs_switch)
8583 io_rsrc_node_switch(ctx, ctx->buf_data);
8584 return done ? done : err;
8585}
8586
Jens Axboe9b402842019-04-11 11:45:41 -06008587static int io_eventfd_register(struct io_ring_ctx *ctx, void __user *arg)
8588{
8589 __s32 __user *fds = arg;
8590 int fd;
8591
8592 if (ctx->cq_ev_fd)
8593 return -EBUSY;
8594
8595 if (copy_from_user(&fd, fds, sizeof(*fds)))
8596 return -EFAULT;
8597
8598 ctx->cq_ev_fd = eventfd_ctx_fdget(fd);
8599 if (IS_ERR(ctx->cq_ev_fd)) {
8600 int ret = PTR_ERR(ctx->cq_ev_fd);
Pavel Begunkovfe7e3252021-06-24 15:09:57 +01008601
Jens Axboe9b402842019-04-11 11:45:41 -06008602 ctx->cq_ev_fd = NULL;
8603 return ret;
8604 }
8605
8606 return 0;
8607}
8608
8609static int io_eventfd_unregister(struct io_ring_ctx *ctx)
8610{
8611 if (ctx->cq_ev_fd) {
8612 eventfd_ctx_put(ctx->cq_ev_fd);
8613 ctx->cq_ev_fd = NULL;
8614 return 0;
8615 }
8616
8617 return -ENXIO;
8618}
8619
Jens Axboe5a2e7452020-02-23 16:23:11 -07008620static void io_destroy_buffers(struct io_ring_ctx *ctx)
8621{
Jens Axboe9e15c3a2021-03-13 12:29:43 -07008622 struct io_buffer *buf;
8623 unsigned long index;
8624
8625 xa_for_each(&ctx->io_buffers, index, buf)
8626 __io_remove_buffers(ctx, buf, index, -1U);
Jens Axboe5a2e7452020-02-23 16:23:11 -07008627}
8628
Pavel Begunkov72558342021-08-09 20:18:09 +01008629static void io_req_cache_free(struct list_head *list)
Jens Axboe1b4c3512021-02-10 00:03:19 +00008630{
Jens Axboe68e68ee2021-02-13 09:00:02 -07008631 struct io_kiocb *req, *nxt;
Jens Axboe1b4c3512021-02-10 00:03:19 +00008632
Pavel Begunkovbb943b82021-08-09 20:18:10 +01008633 list_for_each_entry_safe(req, nxt, list, inflight_entry) {
8634 list_del(&req->inflight_entry);
Jens Axboe1b4c3512021-02-10 00:03:19 +00008635 kmem_cache_free(req_cachep, req);
8636 }
8637}
8638
Jens Axboe4010fec2021-02-27 15:04:18 -07008639static void io_req_caches_free(struct io_ring_ctx *ctx)
Jens Axboe2b188cc2019-01-07 10:46:33 -07008640{
Pavel Begunkovcd0ca2e2021-08-09 20:18:11 +01008641 struct io_submit_state *state = &ctx->submit_state;
Pavel Begunkovbf019da2021-02-10 00:03:17 +00008642
Jens Axboe9a4fdbd2021-02-13 09:09:44 -07008643 mutex_lock(&ctx->uring_lock);
8644
Pavel Begunkovcd0ca2e2021-08-09 20:18:11 +01008645 if (state->free_reqs) {
8646 kmem_cache_free_bulk(req_cachep, state->free_reqs, state->reqs);
8647 state->free_reqs = 0;
Pavel Begunkov8e5c66c2021-02-22 11:45:55 +00008648 }
Jens Axboe9a4fdbd2021-02-13 09:09:44 -07008649
Pavel Begunkovcd0ca2e2021-08-09 20:18:11 +01008650 io_flush_cached_locked_reqs(ctx, state);
8651 io_req_cache_free(&state->free_list);
Jens Axboe9a4fdbd2021-02-13 09:09:44 -07008652 mutex_unlock(&ctx->uring_lock);
8653}
8654
Pavel Begunkov43597aa2021-08-10 02:44:23 +01008655static void io_wait_rsrc_data(struct io_rsrc_data *data)
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01008656{
Pavel Begunkov43597aa2021-08-10 02:44:23 +01008657 if (data && !atomic_dec_and_test(&data->refs))
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01008658 wait_for_completion(&data->done);
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01008659}
8660
Jens Axboe2b188cc2019-01-07 10:46:33 -07008661static void io_ring_ctx_free(struct io_ring_ctx *ctx)
8662{
Jens Axboe37d1e2e2021-02-17 21:03:43 -07008663 io_sq_thread_finish(ctx);
Jens Axboe2aede0e2020-09-14 10:45:53 -06008664
Jens Axboe37d1e2e2021-02-17 21:03:43 -07008665 if (ctx->mm_account) {
Jens Axboe2aede0e2020-09-14 10:45:53 -06008666 mmdrop(ctx->mm_account);
8667 ctx->mm_account = NULL;
Bijan Mottahedeh30975822020-06-16 16:36:09 -07008668 }
Jens Axboedef596e2019-01-09 08:59:42 -07008669
Pavel Begunkov43597aa2021-08-10 02:44:23 +01008670 /* __io_rsrc_put_work() may need uring_lock to progress, wait w/o it */
8671 io_wait_rsrc_data(ctx->buf_data);
8672 io_wait_rsrc_data(ctx->file_data);
8673
Hao Xu8bad28d2021-02-19 17:19:36 +08008674 mutex_lock(&ctx->uring_lock);
Pavel Begunkov43597aa2021-08-10 02:44:23 +01008675 if (ctx->buf_data)
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01008676 __io_sqe_buffers_unregister(ctx);
Pavel Begunkov43597aa2021-08-10 02:44:23 +01008677 if (ctx->file_data)
Pavel Begunkov08480402021-04-13 02:58:38 +01008678 __io_sqe_files_unregister(ctx);
Pavel Begunkovc4ea0602021-04-01 15:43:58 +01008679 if (ctx->rings)
8680 __io_cqring_overflow_flush(ctx, true);
Hao Xu8bad28d2021-02-19 17:19:36 +08008681 mutex_unlock(&ctx->uring_lock);
Jens Axboe9b402842019-04-11 11:45:41 -06008682 io_eventfd_unregister(ctx);
Jens Axboe5a2e7452020-02-23 16:23:11 -07008683 io_destroy_buffers(ctx);
Pavel Begunkov07db2982021-04-20 12:03:32 +01008684 if (ctx->sq_creds)
8685 put_cred(ctx->sq_creds);
Jens Axboedef596e2019-01-09 08:59:42 -07008686
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01008687 /* there are no registered resources left, nobody uses it */
8688 if (ctx->rsrc_node)
8689 io_rsrc_node_destroy(ctx->rsrc_node);
Pavel Begunkov8dd03af2021-03-19 17:22:36 +00008690 if (ctx->rsrc_backup_node)
Pavel Begunkovb895c9a2021-04-01 15:43:40 +01008691 io_rsrc_node_destroy(ctx->rsrc_backup_node);
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01008692 flush_delayed_work(&ctx->rsrc_put_work);
8693
8694 WARN_ON_ONCE(!list_empty(&ctx->rsrc_ref_list));
8695 WARN_ON_ONCE(!llist_empty(&ctx->rsrc_put_llist));
Jens Axboe2b188cc2019-01-07 10:46:33 -07008696
8697#if defined(CONFIG_UNIX)
Eric Biggers355e8d22019-06-12 14:58:43 -07008698 if (ctx->ring_sock) {
8699 ctx->ring_sock->file = NULL; /* so that iput() is called */
Jens Axboe2b188cc2019-01-07 10:46:33 -07008700 sock_release(ctx->ring_sock);
Eric Biggers355e8d22019-06-12 14:58:43 -07008701 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07008702#endif
8703
Hristo Venev75b28af2019-08-26 17:23:46 +00008704 io_mem_free(ctx->rings);
Jens Axboe2b188cc2019-01-07 10:46:33 -07008705 io_mem_free(ctx->sq_sqes);
Jens Axboe2b188cc2019-01-07 10:46:33 -07008706
8707 percpu_ref_exit(&ctx->refs);
Jens Axboe2b188cc2019-01-07 10:46:33 -07008708 free_uid(ctx->user);
Jens Axboe4010fec2021-02-27 15:04:18 -07008709 io_req_caches_free(ctx);
Jens Axboee9418942021-02-19 12:33:30 -07008710 if (ctx->hash_map)
8711 io_wq_put_hash(ctx->hash_map);
Jens Axboe78076bb2019-12-04 19:56:40 -07008712 kfree(ctx->cancel_hash);
Pavel Begunkov62248432021-04-28 13:11:29 +01008713 kfree(ctx->dummy_ubuf);
Jens Axboe2b188cc2019-01-07 10:46:33 -07008714 kfree(ctx);
8715}
8716
8717static __poll_t io_uring_poll(struct file *file, poll_table *wait)
8718{
8719 struct io_ring_ctx *ctx = file->private_data;
8720 __poll_t mask = 0;
8721
Pavel Begunkov311997b2021-06-14 23:37:28 +01008722 poll_wait(file, &ctx->poll_wait, wait);
Stefan Bühler4f7067c2019-04-24 23:54:17 +02008723 /*
8724 * synchronizes with barrier from wq_has_sleeper call in
8725 * io_commit_cqring
8726 */
Jens Axboe2b188cc2019-01-07 10:46:33 -07008727 smp_rmb();
Jens Axboe90554202020-09-03 12:12:41 -06008728 if (!io_sqring_full(ctx))
Jens Axboe2b188cc2019-01-07 10:46:33 -07008729 mask |= EPOLLOUT | EPOLLWRNORM;
Hao Xued670c32021-02-05 16:34:21 +08008730
8731 /*
8732 * Don't flush cqring overflow list here, just do a simple check.
8733 * Otherwise there could possible be ABBA deadlock:
8734 * CPU0 CPU1
8735 * ---- ----
8736 * lock(&ctx->uring_lock);
8737 * lock(&ep->mtx);
8738 * lock(&ctx->uring_lock);
8739 * lock(&ep->mtx);
8740 *
8741 * Users may get EPOLLIN meanwhile seeing nothing in cqring, this
8742 * pushs them to do the flush.
8743 */
Pavel Begunkov5ed7a372021-06-14 23:37:27 +01008744 if (io_cqring_events(ctx) || test_bit(0, &ctx->check_cq_overflow))
Jens Axboe2b188cc2019-01-07 10:46:33 -07008745 mask |= EPOLLIN | EPOLLRDNORM;
8746
8747 return mask;
8748}
8749
8750static int io_uring_fasync(int fd, struct file *file, int on)
8751{
8752 struct io_ring_ctx *ctx = file->private_data;
8753
8754 return fasync_helper(fd, file, on, &ctx->cq_fasync);
8755}
8756
Yejune Deng0bead8c2020-12-24 11:02:20 +08008757static int io_unregister_personality(struct io_ring_ctx *ctx, unsigned id)
Jens Axboe071698e2020-01-28 10:04:42 -07008758{
Jens Axboe4379bf82021-02-15 13:40:22 -07008759 const struct cred *creds;
Jens Axboe071698e2020-01-28 10:04:42 -07008760
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +00008761 creds = xa_erase(&ctx->personalities, id);
Jens Axboe4379bf82021-02-15 13:40:22 -07008762 if (creds) {
8763 put_cred(creds);
Yejune Deng0bead8c2020-12-24 11:02:20 +08008764 return 0;
Jens Axboe1e6fa522020-10-15 08:46:24 -06008765 }
Yejune Deng0bead8c2020-12-24 11:02:20 +08008766
8767 return -EINVAL;
8768}
8769
Pavel Begunkovd56d9382021-03-06 11:02:13 +00008770struct io_tctx_exit {
8771 struct callback_head task_work;
8772 struct completion completion;
Pavel Begunkovbaf186c2021-03-06 11:02:15 +00008773 struct io_ring_ctx *ctx;
Pavel Begunkovd56d9382021-03-06 11:02:13 +00008774};
8775
8776static void io_tctx_exit_cb(struct callback_head *cb)
8777{
8778 struct io_uring_task *tctx = current->io_uring;
8779 struct io_tctx_exit *work;
8780
8781 work = container_of(cb, struct io_tctx_exit, task_work);
8782 /*
8783 * When @in_idle, we're in cancellation and it's racy to remove the
8784 * node. It'll be removed by the end of cancellation, just ignore it.
8785 */
8786 if (!atomic_read(&tctx->in_idle))
Pavel Begunkoveef51da2021-06-14 02:36:15 +01008787 io_uring_del_tctx_node((unsigned long)work->ctx);
Pavel Begunkovd56d9382021-03-06 11:02:13 +00008788 complete(&work->completion);
8789}
8790
Pavel Begunkov28090c12021-04-25 23:34:45 +01008791static bool io_cancel_ctx_cb(struct io_wq_work *work, void *data)
8792{
8793 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
8794
8795 return req->ctx == data;
8796}
8797
Jens Axboe85faa7b2020-04-09 18:14:00 -06008798static void io_ring_exit_work(struct work_struct *work)
8799{
Pavel Begunkovd56d9382021-03-06 11:02:13 +00008800 struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx, exit_work);
Pavel Begunkovb5bb3a22021-03-06 11:02:16 +00008801 unsigned long timeout = jiffies + HZ * 60 * 5;
Pavel Begunkov58d3be22021-08-09 13:04:17 +01008802 unsigned long interval = HZ / 20;
Pavel Begunkovd56d9382021-03-06 11:02:13 +00008803 struct io_tctx_exit exit;
8804 struct io_tctx_node *node;
8805 int ret;
Jens Axboe85faa7b2020-04-09 18:14:00 -06008806
Jens Axboe56952e92020-06-17 15:00:04 -06008807 /*
8808 * If we're doing polled IO and end up having requests being
8809 * submitted async (out-of-line), then completions can come in while
8810 * we're waiting for refs to drop. We need to reap these manually,
8811 * as nobody else will be looking for them.
8812 */
Pavel Begunkovb2edc0a2020-07-07 16:36:22 +03008813 do {
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01008814 io_uring_try_cancel_requests(ctx, NULL, true);
Pavel Begunkov28090c12021-04-25 23:34:45 +01008815 if (ctx->sq_data) {
8816 struct io_sq_data *sqd = ctx->sq_data;
8817 struct task_struct *tsk;
8818
8819 io_sq_thread_park(sqd);
8820 tsk = sqd->thread;
8821 if (tsk && tsk->io_uring && tsk->io_uring->io_wq)
8822 io_wq_cancel_cb(tsk->io_uring->io_wq,
8823 io_cancel_ctx_cb, ctx, true);
8824 io_sq_thread_unpark(sqd);
8825 }
Pavel Begunkovb5bb3a22021-03-06 11:02:16 +00008826
Pavel Begunkov58d3be22021-08-09 13:04:17 +01008827 if (WARN_ON_ONCE(time_after(jiffies, timeout))) {
8828 /* there is little hope left, don't run it too often */
8829 interval = HZ * 60;
8830 }
8831 } while (!wait_for_completion_timeout(&ctx->ref_comp, interval));
Pavel Begunkovd56d9382021-03-06 11:02:13 +00008832
Pavel Begunkov7f006512021-04-14 13:38:34 +01008833 init_completion(&exit.completion);
8834 init_task_work(&exit.task_work, io_tctx_exit_cb);
8835 exit.ctx = ctx;
Pavel Begunkov89b50662021-04-01 15:43:50 +01008836 /*
8837 * Some may use context even when all refs and requests have been put,
8838 * and they are free to do so while still holding uring_lock or
Pavel Begunkov5b0a6ac2021-06-30 21:54:04 +01008839 * completion_lock, see io_req_task_submit(). Apart from other work,
Pavel Begunkov89b50662021-04-01 15:43:50 +01008840 * this lock/unlock section also waits them to finish.
8841 */
Pavel Begunkovd56d9382021-03-06 11:02:13 +00008842 mutex_lock(&ctx->uring_lock);
8843 while (!list_empty(&ctx->tctx_list)) {
Pavel Begunkovb5bb3a22021-03-06 11:02:16 +00008844 WARN_ON_ONCE(time_after(jiffies, timeout));
8845
Pavel Begunkovd56d9382021-03-06 11:02:13 +00008846 node = list_first_entry(&ctx->tctx_list, struct io_tctx_node,
8847 ctx_node);
Pavel Begunkov7f006512021-04-14 13:38:34 +01008848 /* don't spin on a single task if cancellation failed */
8849 list_rotate_left(&ctx->tctx_list);
Pavel Begunkovd56d9382021-03-06 11:02:13 +00008850 ret = task_work_add(node->task, &exit.task_work, TWA_SIGNAL);
8851 if (WARN_ON_ONCE(ret))
8852 continue;
8853 wake_up_process(node->task);
8854
8855 mutex_unlock(&ctx->uring_lock);
8856 wait_for_completion(&exit.completion);
Pavel Begunkovd56d9382021-03-06 11:02:13 +00008857 mutex_lock(&ctx->uring_lock);
8858 }
8859 mutex_unlock(&ctx->uring_lock);
Jens Axboe79ebeae2021-08-10 15:18:27 -06008860 spin_lock(&ctx->completion_lock);
8861 spin_unlock(&ctx->completion_lock);
Pavel Begunkovd56d9382021-03-06 11:02:13 +00008862
Jens Axboe85faa7b2020-04-09 18:14:00 -06008863 io_ring_ctx_free(ctx);
8864}
8865
Pavel Begunkov80c4cbd2021-03-25 18:32:43 +00008866/* Returns true if we found and killed one or more timeouts */
8867static bool io_kill_timeouts(struct io_ring_ctx *ctx, struct task_struct *tsk,
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01008868 bool cancel_all)
Pavel Begunkov80c4cbd2021-03-25 18:32:43 +00008869{
8870 struct io_kiocb *req, *tmp;
8871 int canceled = 0;
8872
Jens Axboe79ebeae2021-08-10 15:18:27 -06008873 spin_lock(&ctx->completion_lock);
8874 spin_lock_irq(&ctx->timeout_lock);
Pavel Begunkov80c4cbd2021-03-25 18:32:43 +00008875 list_for_each_entry_safe(req, tmp, &ctx->timeout_list, timeout.list) {
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01008876 if (io_match_task(req, tsk, cancel_all)) {
Pavel Begunkov80c4cbd2021-03-25 18:32:43 +00008877 io_kill_timeout(req, -ECANCELED);
8878 canceled++;
8879 }
8880 }
Jens Axboe79ebeae2021-08-10 15:18:27 -06008881 spin_unlock_irq(&ctx->timeout_lock);
Pavel Begunkov51520422021-03-29 11:39:29 +01008882 if (canceled != 0)
8883 io_commit_cqring(ctx);
Jens Axboe79ebeae2021-08-10 15:18:27 -06008884 spin_unlock(&ctx->completion_lock);
Pavel Begunkov80c4cbd2021-03-25 18:32:43 +00008885 if (canceled != 0)
8886 io_cqring_ev_posted(ctx);
8887 return canceled != 0;
8888}
8889
Jens Axboe2b188cc2019-01-07 10:46:33 -07008890static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
8891{
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +00008892 unsigned long index;
8893 struct creds *creds;
8894
Jens Axboe2b188cc2019-01-07 10:46:33 -07008895 mutex_lock(&ctx->uring_lock);
8896 percpu_ref_kill(&ctx->refs);
Pavel Begunkov634578f2020-12-06 22:22:44 +00008897 if (ctx->rings)
Pavel Begunkov6c2450a2021-02-23 12:40:22 +00008898 __io_cqring_overflow_flush(ctx, true);
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +00008899 xa_for_each(&ctx->personalities, index, creds)
8900 io_unregister_personality(ctx, index);
Jens Axboe2b188cc2019-01-07 10:46:33 -07008901 mutex_unlock(&ctx->uring_lock);
8902
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01008903 io_kill_timeouts(ctx, NULL, true);
8904 io_poll_remove_all(ctx, NULL, true);
Jens Axboe561fb042019-10-24 07:25:42 -06008905
Jens Axboe15dff282019-11-13 09:09:23 -07008906 /* if we failed setting up the ctx, we might not have any rings */
Pavel Begunkovb2edc0a2020-07-07 16:36:22 +03008907 io_iopoll_try_reap_events(ctx);
Jens Axboe309fc032020-07-10 09:13:34 -06008908
Jens Axboe85faa7b2020-04-09 18:14:00 -06008909 INIT_WORK(&ctx->exit_work, io_ring_exit_work);
Jens Axboefc666772020-08-19 11:10:51 -06008910 /*
8911 * Use system_unbound_wq to avoid spawning tons of event kworkers
8912 * if we're exiting a ton of rings at the same time. It just adds
8913 * noise and overhead, there's no discernable change in runtime
8914 * over using system_wq.
8915 */
8916 queue_work(system_unbound_wq, &ctx->exit_work);
Jens Axboe2b188cc2019-01-07 10:46:33 -07008917}
8918
8919static int io_uring_release(struct inode *inode, struct file *file)
8920{
8921 struct io_ring_ctx *ctx = file->private_data;
8922
8923 file->private_data = NULL;
8924 io_ring_ctx_wait_and_kill(ctx);
8925 return 0;
8926}
8927
Pavel Begunkovf6edbab2020-11-06 13:00:26 +00008928struct io_task_cancel {
8929 struct task_struct *task;
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01008930 bool all;
Pavel Begunkovf6edbab2020-11-06 13:00:26 +00008931};
Pavel Begunkov67c4d9e2020-06-15 10:24:05 +03008932
Pavel Begunkovf6edbab2020-11-06 13:00:26 +00008933static bool io_cancel_task_cb(struct io_wq_work *work, void *data)
Jens Axboeb711d4e2020-08-16 08:23:05 -07008934{
Pavel Begunkov9a472ef2020-11-05 22:31:37 +00008935 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
Pavel Begunkovf6edbab2020-11-06 13:00:26 +00008936 struct io_task_cancel *cancel = data;
Pavel Begunkov9a472ef2020-11-05 22:31:37 +00008937 bool ret;
8938
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01008939 if (!cancel->all && (req->flags & REQ_F_LINK_TIMEOUT)) {
Pavel Begunkov9a472ef2020-11-05 22:31:37 +00008940 struct io_ring_ctx *ctx = req->ctx;
8941
8942 /* protect against races with linked timeouts */
Jens Axboe79ebeae2021-08-10 15:18:27 -06008943 spin_lock(&ctx->completion_lock);
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01008944 ret = io_match_task(req, cancel->task, cancel->all);
Jens Axboe79ebeae2021-08-10 15:18:27 -06008945 spin_unlock(&ctx->completion_lock);
Pavel Begunkov9a472ef2020-11-05 22:31:37 +00008946 } else {
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01008947 ret = io_match_task(req, cancel->task, cancel->all);
Pavel Begunkov9a472ef2020-11-05 22:31:37 +00008948 }
8949 return ret;
Jens Axboeb711d4e2020-08-16 08:23:05 -07008950}
8951
Pavel Begunkove1915f72021-03-11 23:29:35 +00008952static bool io_cancel_defer_files(struct io_ring_ctx *ctx,
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01008953 struct task_struct *task, bool cancel_all)
Pavel Begunkovb7ddce32020-09-06 00:45:14 +03008954{
Pavel Begunkove1915f72021-03-11 23:29:35 +00008955 struct io_defer_entry *de;
Pavel Begunkovb7ddce32020-09-06 00:45:14 +03008956 LIST_HEAD(list);
8957
Jens Axboe79ebeae2021-08-10 15:18:27 -06008958 spin_lock(&ctx->completion_lock);
Pavel Begunkovb7ddce32020-09-06 00:45:14 +03008959 list_for_each_entry_reverse(de, &ctx->defer_list, list) {
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01008960 if (io_match_task(de->req, task, cancel_all)) {
Pavel Begunkovb7ddce32020-09-06 00:45:14 +03008961 list_cut_position(&list, &ctx->defer_list, &de->list);
8962 break;
8963 }
8964 }
Jens Axboe79ebeae2021-08-10 15:18:27 -06008965 spin_unlock(&ctx->completion_lock);
Pavel Begunkove1915f72021-03-11 23:29:35 +00008966 if (list_empty(&list))
8967 return false;
Pavel Begunkovb7ddce32020-09-06 00:45:14 +03008968
8969 while (!list_empty(&list)) {
8970 de = list_first_entry(&list, struct io_defer_entry, list);
8971 list_del_init(&de->list);
Pavel Begunkovf41db2732021-02-28 22:35:12 +00008972 io_req_complete_failed(de->req, -ECANCELED);
Pavel Begunkovb7ddce32020-09-06 00:45:14 +03008973 kfree(de);
8974 }
Pavel Begunkove1915f72021-03-11 23:29:35 +00008975 return true;
Pavel Begunkovb7ddce32020-09-06 00:45:14 +03008976}
8977
Pavel Begunkov1b007642021-03-06 11:02:17 +00008978static bool io_uring_try_cancel_iowq(struct io_ring_ctx *ctx)
8979{
8980 struct io_tctx_node *node;
8981 enum io_wq_cancel cret;
8982 bool ret = false;
8983
8984 mutex_lock(&ctx->uring_lock);
8985 list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
8986 struct io_uring_task *tctx = node->task->io_uring;
8987
8988 /*
8989 * io_wq will stay alive while we hold uring_lock, because it's
8990 * killed after ctx nodes, which requires to take the lock.
8991 */
8992 if (!tctx || !tctx->io_wq)
8993 continue;
8994 cret = io_wq_cancel_cb(tctx->io_wq, io_cancel_ctx_cb, ctx, true);
8995 ret |= (cret != IO_WQ_CANCEL_NOTFOUND);
8996 }
8997 mutex_unlock(&ctx->uring_lock);
8998
8999 return ret;
9000}
9001
Pavel Begunkov9936c7c2021-02-04 13:51:56 +00009002static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
9003 struct task_struct *task,
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01009004 bool cancel_all)
Pavel Begunkov9936c7c2021-02-04 13:51:56 +00009005{
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01009006 struct io_task_cancel cancel = { .task = task, .all = cancel_all, };
Pavel Begunkov1b007642021-03-06 11:02:17 +00009007 struct io_uring_task *tctx = task ? task->io_uring : NULL;
Pavel Begunkov9936c7c2021-02-04 13:51:56 +00009008
9009 while (1) {
9010 enum io_wq_cancel cret;
9011 bool ret = false;
9012
Pavel Begunkov1b007642021-03-06 11:02:17 +00009013 if (!task) {
9014 ret |= io_uring_try_cancel_iowq(ctx);
9015 } else if (tctx && tctx->io_wq) {
9016 /*
9017 * Cancels requests of all rings, not only @ctx, but
9018 * it's fine as the task is in exit/exec.
9019 */
Jens Axboe5aa75ed2021-02-16 12:56:50 -07009020 cret = io_wq_cancel_cb(tctx->io_wq, io_cancel_task_cb,
Pavel Begunkov9936c7c2021-02-04 13:51:56 +00009021 &cancel, true);
9022 ret |= (cret != IO_WQ_CANCEL_NOTFOUND);
9023 }
9024
9025 /* SQPOLL thread does its own polling */
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01009026 if ((!(ctx->flags & IORING_SETUP_SQPOLL) && cancel_all) ||
Jens Axboed052d1d2021-03-11 10:49:20 -07009027 (ctx->sq_data && ctx->sq_data->thread == current)) {
Pavel Begunkov9936c7c2021-02-04 13:51:56 +00009028 while (!list_empty_careful(&ctx->iopoll_list)) {
9029 io_iopoll_try_reap_events(ctx);
9030 ret = true;
9031 }
9032 }
9033
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01009034 ret |= io_cancel_defer_files(ctx, task, cancel_all);
9035 ret |= io_poll_remove_all(ctx, task, cancel_all);
9036 ret |= io_kill_timeouts(ctx, task, cancel_all);
Pavel Begunkove5dc4802021-06-26 21:40:46 +01009037 if (task)
9038 ret |= io_run_task_work();
Pavel Begunkov9936c7c2021-02-04 13:51:56 +00009039 if (!ret)
9040 break;
9041 cond_resched();
9042 }
9043}
9044
Pavel Begunkoveef51da2021-06-14 02:36:15 +01009045static int __io_uring_add_tctx_node(struct io_ring_ctx *ctx)
Jens Axboe0f212202020-09-13 13:09:39 -06009046{
Matthew Wilcox (Oracle)236434c2020-10-09 13:49:52 +01009047 struct io_uring_task *tctx = current->io_uring;
Pavel Begunkov13bf43f2021-03-06 11:02:12 +00009048 struct io_tctx_node *node;
Pavel Begunkova528b042020-12-21 18:34:04 +00009049 int ret;
Matthew Wilcox (Oracle)236434c2020-10-09 13:49:52 +01009050
9051 if (unlikely(!tctx)) {
Jens Axboe5aa75ed2021-02-16 12:56:50 -07009052 ret = io_uring_alloc_task_context(current, ctx);
Jens Axboe0f212202020-09-13 13:09:39 -06009053 if (unlikely(ret))
9054 return ret;
Matthew Wilcox (Oracle)236434c2020-10-09 13:49:52 +01009055 tctx = current->io_uring;
Jens Axboe0f212202020-09-13 13:09:39 -06009056 }
Pavel Begunkovcf27f3b2021-03-19 17:22:31 +00009057 if (!xa_load(&tctx->xa, (unsigned long)ctx)) {
9058 node = kmalloc(sizeof(*node), GFP_KERNEL);
9059 if (!node)
9060 return -ENOMEM;
9061 node->ctx = ctx;
9062 node->task = current;
Jens Axboe0f212202020-09-13 13:09:39 -06009063
Pavel Begunkovcf27f3b2021-03-19 17:22:31 +00009064 ret = xa_err(xa_store(&tctx->xa, (unsigned long)ctx,
9065 node, GFP_KERNEL));
9066 if (ret) {
9067 kfree(node);
9068 return ret;
Jens Axboe0f212202020-09-13 13:09:39 -06009069 }
Pavel Begunkovcf27f3b2021-03-19 17:22:31 +00009070
9071 mutex_lock(&ctx->uring_lock);
9072 list_add(&node->ctx_node, &ctx->tctx_list);
9073 mutex_unlock(&ctx->uring_lock);
Jens Axboe0f212202020-09-13 13:09:39 -06009074 }
Pavel Begunkovcf27f3b2021-03-19 17:22:31 +00009075 tctx->last = ctx;
Jens Axboe0f212202020-09-13 13:09:39 -06009076 return 0;
9077}
9078
9079/*
Pavel Begunkovcf27f3b2021-03-19 17:22:31 +00009080 * Note that this task has used io_uring. We use it for cancelation purposes.
9081 */
Pavel Begunkoveef51da2021-06-14 02:36:15 +01009082static inline int io_uring_add_tctx_node(struct io_ring_ctx *ctx)
Pavel Begunkovcf27f3b2021-03-19 17:22:31 +00009083{
9084 struct io_uring_task *tctx = current->io_uring;
9085
9086 if (likely(tctx && tctx->last == ctx))
9087 return 0;
Pavel Begunkoveef51da2021-06-14 02:36:15 +01009088 return __io_uring_add_tctx_node(ctx);
Pavel Begunkovcf27f3b2021-03-19 17:22:31 +00009089}
9090
9091/*
Jens Axboe0f212202020-09-13 13:09:39 -06009092 * Remove this io_uring_file -> task mapping.
9093 */
Pavel Begunkoveef51da2021-06-14 02:36:15 +01009094static void io_uring_del_tctx_node(unsigned long index)
Jens Axboe0f212202020-09-13 13:09:39 -06009095{
9096 struct io_uring_task *tctx = current->io_uring;
Pavel Begunkov13bf43f2021-03-06 11:02:12 +00009097 struct io_tctx_node *node;
Pavel Begunkov29412672021-03-06 11:02:11 +00009098
Pavel Begunkoveebd2e32021-03-06 11:02:14 +00009099 if (!tctx)
9100 return;
Pavel Begunkov13bf43f2021-03-06 11:02:12 +00009101 node = xa_erase(&tctx->xa, index);
9102 if (!node)
Pavel Begunkov29412672021-03-06 11:02:11 +00009103 return;
Jens Axboe0f212202020-09-13 13:09:39 -06009104
Pavel Begunkov13bf43f2021-03-06 11:02:12 +00009105 WARN_ON_ONCE(current != node->task);
9106 WARN_ON_ONCE(list_empty(&node->ctx_node));
9107
9108 mutex_lock(&node->ctx->uring_lock);
9109 list_del(&node->ctx_node);
9110 mutex_unlock(&node->ctx->uring_lock);
9111
Pavel Begunkovbaf186c2021-03-06 11:02:15 +00009112 if (tctx->last == node->ctx)
Jens Axboe0f212202020-09-13 13:09:39 -06009113 tctx->last = NULL;
Pavel Begunkov13bf43f2021-03-06 11:02:12 +00009114 kfree(node);
Jens Axboe0f212202020-09-13 13:09:39 -06009115}
9116
Pavel Begunkov8452d4a2021-02-27 11:16:46 +00009117static void io_uring_clean_tctx(struct io_uring_task *tctx)
Pavel Begunkovde7f1d92021-01-04 20:43:29 +00009118{
Pavel Begunkovba5ef6d2021-05-20 13:21:20 +01009119 struct io_wq *wq = tctx->io_wq;
Pavel Begunkov13bf43f2021-03-06 11:02:12 +00009120 struct io_tctx_node *node;
Pavel Begunkovde7f1d92021-01-04 20:43:29 +00009121 unsigned long index;
9122
Pavel Begunkov13bf43f2021-03-06 11:02:12 +00009123 xa_for_each(&tctx->xa, index, node)
Pavel Begunkoveef51da2021-06-14 02:36:15 +01009124 io_uring_del_tctx_node(index);
Marco Elverb16ef422021-05-27 11:25:48 +02009125 if (wq) {
9126 /*
9127 * Must be after io_uring_del_task_file() (removes nodes under
9128 * uring_lock) to avoid race with io_uring_try_cancel_iowq().
9129 */
9130 tctx->io_wq = NULL;
Pavel Begunkovba5ef6d2021-05-20 13:21:20 +01009131 io_wq_put_and_exit(wq);
Marco Elverb16ef422021-05-27 11:25:48 +02009132 }
Pavel Begunkovde7f1d92021-01-04 20:43:29 +00009133}
9134
Pavel Begunkov3f48cf12021-04-11 01:46:27 +01009135static s64 tctx_inflight(struct io_uring_task *tctx, bool tracked)
Pavel Begunkov521d6a72021-03-11 23:29:38 +00009136{
Pavel Begunkov3f48cf12021-04-11 01:46:27 +01009137 if (tracked)
9138 return atomic_read(&tctx->inflight_tracked);
Pavel Begunkov521d6a72021-03-11 23:29:38 +00009139 return percpu_counter_sum(&tctx->inflight);
9140}
9141
Pavel Begunkov09899b12021-06-14 02:36:22 +01009142static void io_uring_drop_tctx_refs(struct task_struct *task)
9143{
9144 struct io_uring_task *tctx = task->io_uring;
9145 unsigned int refs = tctx->cached_refs;
9146
Pavel Begunkove9dbe222021-08-09 13:04:20 +01009147 if (refs) {
9148 tctx->cached_refs = 0;
9149 percpu_counter_sub(&tctx->inflight, refs);
9150 put_task_struct_many(task, refs);
9151 }
Pavel Begunkov09899b12021-06-14 02:36:22 +01009152}
9153
Pavel Begunkov78cc6872021-06-14 02:36:23 +01009154/*
9155 * Find any io_uring ctx that this task has registered or done IO on, and cancel
9156 * requests. @sqd should be not-null IIF it's an SQPOLL thread cancellation.
9157 */
9158static void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd)
Pavel Begunkov0e9ddb32021-02-07 22:34:26 +00009159{
Pavel Begunkov521d6a72021-03-11 23:29:38 +00009160 struct io_uring_task *tctx = current->io_uring;
Pavel Begunkov734551d2021-04-18 14:52:09 +01009161 struct io_ring_ctx *ctx;
Jens Axboefdaf0832020-10-30 09:37:30 -06009162 s64 inflight;
Pavel Begunkov0e9ddb32021-02-07 22:34:26 +00009163 DEFINE_WAIT(wait);
Jens Axboefdaf0832020-10-30 09:37:30 -06009164
Pavel Begunkov78cc6872021-06-14 02:36:23 +01009165 WARN_ON_ONCE(sqd && sqd->thread != current);
9166
Palash Oswal6d042ff2021-04-27 18:21:49 +05309167 if (!current->io_uring)
9168 return;
Pavel Begunkov17a91052021-05-23 15:48:39 +01009169 if (tctx->io_wq)
9170 io_wq_exit_start(tctx->io_wq);
9171
Jens Axboefdaf0832020-10-30 09:37:30 -06009172 atomic_inc(&tctx->in_idle);
Jens Axboed8a6df12020-10-15 16:24:45 -06009173 do {
Pavel Begunkove9dbe222021-08-09 13:04:20 +01009174 io_uring_drop_tctx_refs(current);
Jens Axboe0f212202020-09-13 13:09:39 -06009175 /* read completions before cancelations */
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01009176 inflight = tctx_inflight(tctx, !cancel_all);
Jens Axboed8a6df12020-10-15 16:24:45 -06009177 if (!inflight)
9178 break;
Jens Axboe0f212202020-09-13 13:09:39 -06009179
Pavel Begunkov78cc6872021-06-14 02:36:23 +01009180 if (!sqd) {
9181 struct io_tctx_node *node;
9182 unsigned long index;
9183
9184 xa_for_each(&tctx->xa, index, node) {
9185 /* sqpoll task will cancel all its requests */
9186 if (node->ctx->sq_data)
9187 continue;
9188 io_uring_try_cancel_requests(node->ctx, current,
9189 cancel_all);
9190 }
9191 } else {
9192 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
9193 io_uring_try_cancel_requests(ctx, current,
9194 cancel_all);
9195 }
9196
9197 prepare_to_wait(&tctx->wait, &wait, TASK_UNINTERRUPTIBLE);
Pavel Begunkove9dbe222021-08-09 13:04:20 +01009198 io_uring_drop_tctx_refs(current);
Jens Axboe0f212202020-09-13 13:09:39 -06009199 /*
Pavel Begunkova1bb3cd2021-01-26 15:28:26 +00009200 * If we've seen completions, retry without waiting. This
9201 * avoids a race where a completion comes in before we did
9202 * prepare_to_wait().
Jens Axboe0f212202020-09-13 13:09:39 -06009203 */
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01009204 if (inflight == tctx_inflight(tctx, !cancel_all))
Pavel Begunkova1bb3cd2021-01-26 15:28:26 +00009205 schedule();
Pavel Begunkovf57555e2020-12-20 13:21:44 +00009206 finish_wait(&tctx->wait, &wait);
Jens Axboed8a6df12020-10-15 16:24:45 -06009207 } while (1);
Jens Axboefdaf0832020-10-30 09:37:30 -06009208 atomic_dec(&tctx->in_idle);
Pavel Begunkovde7f1d92021-01-04 20:43:29 +00009209
Pavel Begunkov8452d4a2021-02-27 11:16:46 +00009210 io_uring_clean_tctx(tctx);
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01009211 if (cancel_all) {
Pavel Begunkov3f48cf12021-04-11 01:46:27 +01009212 /* for exec all current's requests should be gone, kill tctx */
9213 __io_uring_free(current);
9214 }
Pavel Begunkov44e728b2020-06-15 10:24:04 +03009215}
9216
Hao Xuf552a272021-08-12 12:14:35 +08009217void __io_uring_cancel(bool cancel_all)
Pavel Begunkov78cc6872021-06-14 02:36:23 +01009218{
Hao Xuf552a272021-08-12 12:14:35 +08009219 io_uring_cancel_generic(cancel_all, NULL);
Pavel Begunkov78cc6872021-06-14 02:36:23 +01009220}
9221
Roman Penyaev6c5c2402019-11-28 12:53:22 +01009222static void *io_uring_validate_mmap_request(struct file *file,
9223 loff_t pgoff, size_t sz)
Jens Axboe2b188cc2019-01-07 10:46:33 -07009224{
Jens Axboe2b188cc2019-01-07 10:46:33 -07009225 struct io_ring_ctx *ctx = file->private_data;
Roman Penyaev6c5c2402019-11-28 12:53:22 +01009226 loff_t offset = pgoff << PAGE_SHIFT;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009227 struct page *page;
9228 void *ptr;
9229
9230 switch (offset) {
9231 case IORING_OFF_SQ_RING:
Hristo Venev75b28af2019-08-26 17:23:46 +00009232 case IORING_OFF_CQ_RING:
9233 ptr = ctx->rings;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009234 break;
9235 case IORING_OFF_SQES:
9236 ptr = ctx->sq_sqes;
9237 break;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009238 default:
Roman Penyaev6c5c2402019-11-28 12:53:22 +01009239 return ERR_PTR(-EINVAL);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009240 }
9241
9242 page = virt_to_head_page(ptr);
Matthew Wilcox (Oracle)a50b8542019-09-23 15:34:25 -07009243 if (sz > page_size(page))
Roman Penyaev6c5c2402019-11-28 12:53:22 +01009244 return ERR_PTR(-EINVAL);
9245
9246 return ptr;
9247}
9248
9249#ifdef CONFIG_MMU
9250
9251static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
9252{
9253 size_t sz = vma->vm_end - vma->vm_start;
9254 unsigned long pfn;
9255 void *ptr;
9256
9257 ptr = io_uring_validate_mmap_request(file, vma->vm_pgoff, sz);
9258 if (IS_ERR(ptr))
9259 return PTR_ERR(ptr);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009260
9261 pfn = virt_to_phys(ptr) >> PAGE_SHIFT;
9262 return remap_pfn_range(vma, vma->vm_start, pfn, sz, vma->vm_page_prot);
9263}
9264
Roman Penyaev6c5c2402019-11-28 12:53:22 +01009265#else /* !CONFIG_MMU */
9266
9267static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
9268{
9269 return vma->vm_flags & (VM_SHARED | VM_MAYSHARE) ? 0 : -EINVAL;
9270}
9271
9272static unsigned int io_uring_nommu_mmap_capabilities(struct file *file)
9273{
9274 return NOMMU_MAP_DIRECT | NOMMU_MAP_READ | NOMMU_MAP_WRITE;
9275}
9276
9277static unsigned long io_uring_nommu_get_unmapped_area(struct file *file,
9278 unsigned long addr, unsigned long len,
9279 unsigned long pgoff, unsigned long flags)
9280{
9281 void *ptr;
9282
9283 ptr = io_uring_validate_mmap_request(file, pgoff, len);
9284 if (IS_ERR(ptr))
9285 return PTR_ERR(ptr);
9286
9287 return (unsigned long) ptr;
9288}
9289
9290#endif /* !CONFIG_MMU */
9291
Pavel Begunkovd9d05212021-01-08 20:57:25 +00009292static int io_sqpoll_wait_sq(struct io_ring_ctx *ctx)
Jens Axboe90554202020-09-03 12:12:41 -06009293{
9294 DEFINE_WAIT(wait);
9295
9296 do {
9297 if (!io_sqring_full(ctx))
9298 break;
Jens Axboe90554202020-09-03 12:12:41 -06009299 prepare_to_wait(&ctx->sqo_sq_wait, &wait, TASK_INTERRUPTIBLE);
9300
9301 if (!io_sqring_full(ctx))
9302 break;
Jens Axboe90554202020-09-03 12:12:41 -06009303 schedule();
9304 } while (!signal_pending(current));
9305
9306 finish_wait(&ctx->sqo_sq_wait, &wait);
Yang Li51993282021-03-09 14:30:41 +08009307 return 0;
Jens Axboe90554202020-09-03 12:12:41 -06009308}
9309
Hao Xuc73ebb62020-11-03 10:54:37 +08009310static int io_get_ext_arg(unsigned flags, const void __user *argp, size_t *argsz,
9311 struct __kernel_timespec __user **ts,
9312 const sigset_t __user **sig)
9313{
9314 struct io_uring_getevents_arg arg;
9315
9316 /*
9317 * If EXT_ARG isn't set, then we have no timespec and the argp pointer
9318 * is just a pointer to the sigset_t.
9319 */
9320 if (!(flags & IORING_ENTER_EXT_ARG)) {
9321 *sig = (const sigset_t __user *) argp;
9322 *ts = NULL;
9323 return 0;
9324 }
9325
9326 /*
9327 * EXT_ARG is set - ensure we agree on the size of it and copy in our
9328 * timespec and sigset_t pointers if good.
9329 */
9330 if (*argsz != sizeof(arg))
9331 return -EINVAL;
9332 if (copy_from_user(&arg, argp, sizeof(arg)))
9333 return -EFAULT;
9334 *sig = u64_to_user_ptr(arg.sigmask);
9335 *argsz = arg.sigmask_sz;
9336 *ts = u64_to_user_ptr(arg.ts);
9337 return 0;
9338}
9339
Jens Axboe2b188cc2019-01-07 10:46:33 -07009340SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
Hao Xuc73ebb62020-11-03 10:54:37 +08009341 u32, min_complete, u32, flags, const void __user *, argp,
9342 size_t, argsz)
Jens Axboe2b188cc2019-01-07 10:46:33 -07009343{
9344 struct io_ring_ctx *ctx;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009345 int submitted = 0;
9346 struct fd f;
Pavel Begunkov33f993d2021-03-19 17:22:30 +00009347 long ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009348
Jens Axboe4c6e2772020-07-01 11:29:10 -06009349 io_run_task_work();
Jens Axboeb41e9852020-02-17 09:52:41 -07009350
Pavel Begunkov33f993d2021-03-19 17:22:30 +00009351 if (unlikely(flags & ~(IORING_ENTER_GETEVENTS | IORING_ENTER_SQ_WAKEUP |
9352 IORING_ENTER_SQ_WAIT | IORING_ENTER_EXT_ARG)))
Jens Axboe2b188cc2019-01-07 10:46:33 -07009353 return -EINVAL;
9354
9355 f = fdget(fd);
Pavel Begunkov33f993d2021-03-19 17:22:30 +00009356 if (unlikely(!f.file))
Jens Axboe2b188cc2019-01-07 10:46:33 -07009357 return -EBADF;
9358
9359 ret = -EOPNOTSUPP;
Pavel Begunkov33f993d2021-03-19 17:22:30 +00009360 if (unlikely(f.file->f_op != &io_uring_fops))
Jens Axboe2b188cc2019-01-07 10:46:33 -07009361 goto out_fput;
9362
9363 ret = -ENXIO;
9364 ctx = f.file->private_data;
Pavel Begunkov33f993d2021-03-19 17:22:30 +00009365 if (unlikely(!percpu_ref_tryget(&ctx->refs)))
Jens Axboe2b188cc2019-01-07 10:46:33 -07009366 goto out_fput;
9367
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02009368 ret = -EBADFD;
Pavel Begunkov33f993d2021-03-19 17:22:30 +00009369 if (unlikely(ctx->flags & IORING_SETUP_R_DISABLED))
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02009370 goto out;
9371
Jens Axboe6c271ce2019-01-10 11:22:30 -07009372 /*
9373 * For SQ polling, the thread will do all submissions and completions.
9374 * Just return the requested submit count, and wake the thread if
9375 * we were asked to.
9376 */
Jens Axboeb2a9ead2019-09-12 14:19:16 -06009377 ret = 0;
Jens Axboe6c271ce2019-01-10 11:22:30 -07009378 if (ctx->flags & IORING_SETUP_SQPOLL) {
Pavel Begunkov90f67362021-08-09 20:18:12 +01009379 io_cqring_overflow_flush(ctx);
Pavel Begunkov89448c42020-12-17 00:24:39 +00009380
Jens Axboe21f96522021-08-14 09:04:40 -06009381 if (unlikely(ctx->sq_data->thread == NULL)) {
9382 ret = -EOWNERDEAD;
Stefan Metzmacher04147482021-03-07 11:54:29 +01009383 goto out;
Jens Axboe21f96522021-08-14 09:04:40 -06009384 }
Jens Axboe6c271ce2019-01-10 11:22:30 -07009385 if (flags & IORING_ENTER_SQ_WAKEUP)
Jens Axboe534ca6d2020-09-02 13:52:19 -06009386 wake_up(&ctx->sq_data->wait);
Pavel Begunkovd9d05212021-01-08 20:57:25 +00009387 if (flags & IORING_ENTER_SQ_WAIT) {
9388 ret = io_sqpoll_wait_sq(ctx);
9389 if (ret)
9390 goto out;
9391 }
Jens Axboe6c271ce2019-01-10 11:22:30 -07009392 submitted = to_submit;
Jens Axboeb2a9ead2019-09-12 14:19:16 -06009393 } else if (to_submit) {
Pavel Begunkoveef51da2021-06-14 02:36:15 +01009394 ret = io_uring_add_tctx_node(ctx);
Jens Axboe0f212202020-09-13 13:09:39 -06009395 if (unlikely(ret))
9396 goto out;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009397 mutex_lock(&ctx->uring_lock);
Jens Axboe0f212202020-09-13 13:09:39 -06009398 submitted = io_submit_sqes(ctx, to_submit);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009399 mutex_unlock(&ctx->uring_lock);
Pavel Begunkov7c504e652019-12-18 19:53:45 +03009400
9401 if (submitted != to_submit)
9402 goto out;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009403 }
9404 if (flags & IORING_ENTER_GETEVENTS) {
Hao Xuc73ebb62020-11-03 10:54:37 +08009405 const sigset_t __user *sig;
9406 struct __kernel_timespec __user *ts;
9407
9408 ret = io_get_ext_arg(flags, argp, &argsz, &ts, &sig);
9409 if (unlikely(ret))
9410 goto out;
9411
Jens Axboe2b188cc2019-01-07 10:46:33 -07009412 min_complete = min(min_complete, ctx->cq_entries);
9413
Xiaoguang Wang32b22442020-03-11 09:26:09 +08009414 /*
9415 * When SETUP_IOPOLL and SETUP_SQPOLL are both enabled, user
9416 * space applications don't need to do io completion events
9417 * polling again, they can rely on io_sq_thread to do polling
9418 * work, which can reduce cpu usage and uring_lock contention.
9419 */
9420 if (ctx->flags & IORING_SETUP_IOPOLL &&
9421 !(ctx->flags & IORING_SETUP_SQPOLL)) {
Pavel Begunkov7668b922020-07-07 16:36:21 +03009422 ret = io_iopoll_check(ctx, min_complete);
Jens Axboedef596e2019-01-09 08:59:42 -07009423 } else {
Hao Xuc73ebb62020-11-03 10:54:37 +08009424 ret = io_cqring_wait(ctx, min_complete, sig, argsz, ts);
Jens Axboedef596e2019-01-09 08:59:42 -07009425 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07009426 }
9427
Pavel Begunkov7c504e652019-12-18 19:53:45 +03009428out:
Pavel Begunkov6805b322019-10-08 02:18:42 +03009429 percpu_ref_put(&ctx->refs);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009430out_fput:
9431 fdput(f);
9432 return submitted ? submitted : ret;
9433}
9434
Tobias Klauserbebdb652020-02-26 18:38:32 +01009435#ifdef CONFIG_PROC_FS
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +00009436static int io_uring_show_cred(struct seq_file *m, unsigned int id,
9437 const struct cred *cred)
Jens Axboe87ce9552020-01-30 08:25:34 -07009438{
Jens Axboe87ce9552020-01-30 08:25:34 -07009439 struct user_namespace *uns = seq_user_ns(m);
9440 struct group_info *gi;
9441 kernel_cap_t cap;
9442 unsigned __capi;
9443 int g;
9444
9445 seq_printf(m, "%5d\n", id);
9446 seq_put_decimal_ull(m, "\tUid:\t", from_kuid_munged(uns, cred->uid));
9447 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->euid));
9448 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->suid));
9449 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->fsuid));
9450 seq_put_decimal_ull(m, "\n\tGid:\t", from_kgid_munged(uns, cred->gid));
9451 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->egid));
9452 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->sgid));
9453 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->fsgid));
9454 seq_puts(m, "\n\tGroups:\t");
9455 gi = cred->group_info;
9456 for (g = 0; g < gi->ngroups; g++) {
9457 seq_put_decimal_ull(m, g ? " " : "",
9458 from_kgid_munged(uns, gi->gid[g]));
9459 }
9460 seq_puts(m, "\n\tCapEff:\t");
9461 cap = cred->cap_effective;
9462 CAP_FOR_EACH_U32(__capi)
9463 seq_put_hex_ll(m, NULL, cap.cap[CAP_LAST_U32 - __capi], 8);
9464 seq_putc(m, '\n');
9465 return 0;
9466}
9467
9468static void __io_uring_show_fdinfo(struct io_ring_ctx *ctx, struct seq_file *m)
9469{
Joseph Qidbbe9c62020-09-29 09:01:22 -06009470 struct io_sq_data *sq = NULL;
Jens Axboefad8e0d2020-09-28 08:57:48 -06009471 bool has_lock;
Jens Axboe87ce9552020-01-30 08:25:34 -07009472 int i;
9473
Jens Axboefad8e0d2020-09-28 08:57:48 -06009474 /*
9475 * Avoid ABBA deadlock between the seq lock and the io_uring mutex,
9476 * since fdinfo case grabs it in the opposite direction of normal use
9477 * cases. If we fail to get the lock, we just don't iterate any
9478 * structures that could be going away outside the io_uring mutex.
9479 */
9480 has_lock = mutex_trylock(&ctx->uring_lock);
9481
Jens Axboe5f3f26f2021-02-25 10:17:46 -07009482 if (has_lock && (ctx->flags & IORING_SETUP_SQPOLL)) {
Joseph Qidbbe9c62020-09-29 09:01:22 -06009483 sq = ctx->sq_data;
Jens Axboe5f3f26f2021-02-25 10:17:46 -07009484 if (!sq->thread)
9485 sq = NULL;
9486 }
Joseph Qidbbe9c62020-09-29 09:01:22 -06009487
9488 seq_printf(m, "SqThread:\t%d\n", sq ? task_pid_nr(sq->thread) : -1);
9489 seq_printf(m, "SqThreadCpu:\t%d\n", sq ? task_cpu(sq->thread) : -1);
Jens Axboe87ce9552020-01-30 08:25:34 -07009490 seq_printf(m, "UserFiles:\t%u\n", ctx->nr_user_files);
Jens Axboefad8e0d2020-09-28 08:57:48 -06009491 for (i = 0; has_lock && i < ctx->nr_user_files; i++) {
Jens Axboe7b29f922021-03-12 08:30:14 -07009492 struct file *f = io_file_from_index(ctx, i);
Jens Axboe87ce9552020-01-30 08:25:34 -07009493
Jens Axboe87ce9552020-01-30 08:25:34 -07009494 if (f)
9495 seq_printf(m, "%5u: %s\n", i, file_dentry(f)->d_iname);
9496 else
9497 seq_printf(m, "%5u: <none>\n", i);
9498 }
9499 seq_printf(m, "UserBufs:\t%u\n", ctx->nr_user_bufs);
Jens Axboefad8e0d2020-09-28 08:57:48 -06009500 for (i = 0; has_lock && i < ctx->nr_user_bufs; i++) {
Pavel Begunkov41edf1a2021-04-25 14:32:23 +01009501 struct io_mapped_ubuf *buf = ctx->user_bufs[i];
Pavel Begunkov4751f532021-04-01 15:43:55 +01009502 unsigned int len = buf->ubuf_end - buf->ubuf;
Jens Axboe87ce9552020-01-30 08:25:34 -07009503
Pavel Begunkov4751f532021-04-01 15:43:55 +01009504 seq_printf(m, "%5u: 0x%llx/%u\n", i, buf->ubuf, len);
Jens Axboe87ce9552020-01-30 08:25:34 -07009505 }
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +00009506 if (has_lock && !xa_empty(&ctx->personalities)) {
9507 unsigned long index;
9508 const struct cred *cred;
9509
Jens Axboe87ce9552020-01-30 08:25:34 -07009510 seq_printf(m, "Personalities:\n");
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +00009511 xa_for_each(&ctx->personalities, index, cred)
9512 io_uring_show_cred(m, index, cred);
Jens Axboe87ce9552020-01-30 08:25:34 -07009513 }
Jens Axboed7718a92020-02-14 22:23:12 -07009514 seq_printf(m, "PollList:\n");
Jens Axboe79ebeae2021-08-10 15:18:27 -06009515 spin_lock(&ctx->completion_lock);
Jens Axboed7718a92020-02-14 22:23:12 -07009516 for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
9517 struct hlist_head *list = &ctx->cancel_hash[i];
9518 struct io_kiocb *req;
9519
9520 hlist_for_each_entry(req, list, hash_node)
9521 seq_printf(m, " op=%d, task_works=%d\n", req->opcode,
9522 req->task->task_works != NULL);
9523 }
Jens Axboe79ebeae2021-08-10 15:18:27 -06009524 spin_unlock(&ctx->completion_lock);
Jens Axboefad8e0d2020-09-28 08:57:48 -06009525 if (has_lock)
9526 mutex_unlock(&ctx->uring_lock);
Jens Axboe87ce9552020-01-30 08:25:34 -07009527}
9528
9529static void io_uring_show_fdinfo(struct seq_file *m, struct file *f)
9530{
9531 struct io_ring_ctx *ctx = f->private_data;
9532
9533 if (percpu_ref_tryget(&ctx->refs)) {
9534 __io_uring_show_fdinfo(ctx, m);
9535 percpu_ref_put(&ctx->refs);
9536 }
9537}
Tobias Klauserbebdb652020-02-26 18:38:32 +01009538#endif
Jens Axboe87ce9552020-01-30 08:25:34 -07009539
Jens Axboe2b188cc2019-01-07 10:46:33 -07009540static const struct file_operations io_uring_fops = {
9541 .release = io_uring_release,
9542 .mmap = io_uring_mmap,
Roman Penyaev6c5c2402019-11-28 12:53:22 +01009543#ifndef CONFIG_MMU
9544 .get_unmapped_area = io_uring_nommu_get_unmapped_area,
9545 .mmap_capabilities = io_uring_nommu_mmap_capabilities,
9546#endif
Jens Axboe2b188cc2019-01-07 10:46:33 -07009547 .poll = io_uring_poll,
9548 .fasync = io_uring_fasync,
Tobias Klauserbebdb652020-02-26 18:38:32 +01009549#ifdef CONFIG_PROC_FS
Jens Axboe87ce9552020-01-30 08:25:34 -07009550 .show_fdinfo = io_uring_show_fdinfo,
Tobias Klauserbebdb652020-02-26 18:38:32 +01009551#endif
Jens Axboe2b188cc2019-01-07 10:46:33 -07009552};
9553
9554static int io_allocate_scq_urings(struct io_ring_ctx *ctx,
9555 struct io_uring_params *p)
9556{
Hristo Venev75b28af2019-08-26 17:23:46 +00009557 struct io_rings *rings;
9558 size_t size, sq_array_offset;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009559
Jens Axboebd740482020-08-05 12:58:23 -06009560 /* make sure these are sane, as we already accounted them */
9561 ctx->sq_entries = p->sq_entries;
9562 ctx->cq_entries = p->cq_entries;
9563
Hristo Venev75b28af2019-08-26 17:23:46 +00009564 size = rings_size(p->sq_entries, p->cq_entries, &sq_array_offset);
9565 if (size == SIZE_MAX)
9566 return -EOVERFLOW;
9567
9568 rings = io_mem_alloc(size);
9569 if (!rings)
Jens Axboe2b188cc2019-01-07 10:46:33 -07009570 return -ENOMEM;
9571
Hristo Venev75b28af2019-08-26 17:23:46 +00009572 ctx->rings = rings;
9573 ctx->sq_array = (u32 *)((char *)rings + sq_array_offset);
9574 rings->sq_ring_mask = p->sq_entries - 1;
9575 rings->cq_ring_mask = p->cq_entries - 1;
9576 rings->sq_ring_entries = p->sq_entries;
9577 rings->cq_ring_entries = p->cq_entries;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009578
9579 size = array_size(sizeof(struct io_uring_sqe), p->sq_entries);
Jens Axboeeb065d32019-11-20 09:26:29 -07009580 if (size == SIZE_MAX) {
9581 io_mem_free(ctx->rings);
9582 ctx->rings = NULL;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009583 return -EOVERFLOW;
Jens Axboeeb065d32019-11-20 09:26:29 -07009584 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07009585
9586 ctx->sq_sqes = io_mem_alloc(size);
Jens Axboeeb065d32019-11-20 09:26:29 -07009587 if (!ctx->sq_sqes) {
9588 io_mem_free(ctx->rings);
9589 ctx->rings = NULL;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009590 return -ENOMEM;
Jens Axboeeb065d32019-11-20 09:26:29 -07009591 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07009592
Jens Axboe2b188cc2019-01-07 10:46:33 -07009593 return 0;
9594}
9595
Pavel Begunkov9faadcc2020-12-21 18:34:05 +00009596static int io_uring_install_fd(struct io_ring_ctx *ctx, struct file *file)
9597{
9598 int ret, fd;
9599
9600 fd = get_unused_fd_flags(O_RDWR | O_CLOEXEC);
9601 if (fd < 0)
9602 return fd;
9603
Pavel Begunkoveef51da2021-06-14 02:36:15 +01009604 ret = io_uring_add_tctx_node(ctx);
Pavel Begunkov9faadcc2020-12-21 18:34:05 +00009605 if (ret) {
9606 put_unused_fd(fd);
9607 return ret;
9608 }
9609 fd_install(fd, file);
9610 return fd;
9611}
9612
Jens Axboe2b188cc2019-01-07 10:46:33 -07009613/*
9614 * Allocate an anonymous fd, this is what constitutes the application
9615 * visible backing of an io_uring instance. The application mmaps this
9616 * fd to gain access to the SQ/CQ ring details. If UNIX sockets are enabled,
9617 * we have to tie this fd to a socket for file garbage collection purposes.
9618 */
Pavel Begunkov9faadcc2020-12-21 18:34:05 +00009619static struct file *io_uring_get_file(struct io_ring_ctx *ctx)
Jens Axboe2b188cc2019-01-07 10:46:33 -07009620{
9621 struct file *file;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009622#if defined(CONFIG_UNIX)
Pavel Begunkov9faadcc2020-12-21 18:34:05 +00009623 int ret;
9624
Jens Axboe2b188cc2019-01-07 10:46:33 -07009625 ret = sock_create_kern(&init_net, PF_UNIX, SOCK_RAW, IPPROTO_IP,
9626 &ctx->ring_sock);
9627 if (ret)
Pavel Begunkov9faadcc2020-12-21 18:34:05 +00009628 return ERR_PTR(ret);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009629#endif
9630
Jens Axboe2b188cc2019-01-07 10:46:33 -07009631 file = anon_inode_getfile("[io_uring]", &io_uring_fops, ctx,
9632 O_RDWR | O_CLOEXEC);
Pavel Begunkov9faadcc2020-12-21 18:34:05 +00009633#if defined(CONFIG_UNIX)
Jens Axboe2b188cc2019-01-07 10:46:33 -07009634 if (IS_ERR(file)) {
Pavel Begunkov9faadcc2020-12-21 18:34:05 +00009635 sock_release(ctx->ring_sock);
9636 ctx->ring_sock = NULL;
9637 } else {
9638 ctx->ring_sock->file = file;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009639 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07009640#endif
Pavel Begunkov9faadcc2020-12-21 18:34:05 +00009641 return file;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009642}
9643
Xiaoguang Wang7f136572020-05-05 16:28:53 +08009644static int io_uring_create(unsigned entries, struct io_uring_params *p,
9645 struct io_uring_params __user *params)
Jens Axboe2b188cc2019-01-07 10:46:33 -07009646{
Jens Axboe2b188cc2019-01-07 10:46:33 -07009647 struct io_ring_ctx *ctx;
Pavel Begunkov9faadcc2020-12-21 18:34:05 +00009648 struct file *file;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009649 int ret;
9650
Jens Axboe8110c1a2019-12-28 15:39:54 -07009651 if (!entries)
Jens Axboe2b188cc2019-01-07 10:46:33 -07009652 return -EINVAL;
Jens Axboe8110c1a2019-12-28 15:39:54 -07009653 if (entries > IORING_MAX_ENTRIES) {
9654 if (!(p->flags & IORING_SETUP_CLAMP))
9655 return -EINVAL;
9656 entries = IORING_MAX_ENTRIES;
9657 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07009658
9659 /*
9660 * Use twice as many entries for the CQ ring. It's possible for the
9661 * application to drive a higher depth than the size of the SQ ring,
9662 * since the sqes are only used at submission time. This allows for
Jens Axboe33a107f2019-10-04 12:10:03 -06009663 * some flexibility in overcommitting a bit. If the application has
9664 * set IORING_SETUP_CQSIZE, it will have passed in the desired number
9665 * of CQ ring entries manually.
Jens Axboe2b188cc2019-01-07 10:46:33 -07009666 */
9667 p->sq_entries = roundup_pow_of_two(entries);
Jens Axboe33a107f2019-10-04 12:10:03 -06009668 if (p->flags & IORING_SETUP_CQSIZE) {
9669 /*
9670 * If IORING_SETUP_CQSIZE is set, we do the same roundup
9671 * to a power-of-two, if it isn't already. We do NOT impose
9672 * any cq vs sq ring sizing.
9673 */
Joseph Qieb2667b32020-11-24 15:03:03 +08009674 if (!p->cq_entries)
Jens Axboe33a107f2019-10-04 12:10:03 -06009675 return -EINVAL;
Jens Axboe8110c1a2019-12-28 15:39:54 -07009676 if (p->cq_entries > IORING_MAX_CQ_ENTRIES) {
9677 if (!(p->flags & IORING_SETUP_CLAMP))
9678 return -EINVAL;
9679 p->cq_entries = IORING_MAX_CQ_ENTRIES;
9680 }
Joseph Qieb2667b32020-11-24 15:03:03 +08009681 p->cq_entries = roundup_pow_of_two(p->cq_entries);
9682 if (p->cq_entries < p->sq_entries)
9683 return -EINVAL;
Jens Axboe33a107f2019-10-04 12:10:03 -06009684 } else {
9685 p->cq_entries = 2 * p->sq_entries;
9686 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07009687
Jens Axboe2b188cc2019-01-07 10:46:33 -07009688 ctx = io_ring_ctx_alloc(p);
Jens Axboe62e398b2021-02-21 16:19:37 -07009689 if (!ctx)
Jens Axboe2b188cc2019-01-07 10:46:33 -07009690 return -ENOMEM;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009691 ctx->compat = in_compat_syscall();
Jens Axboe62e398b2021-02-21 16:19:37 -07009692 if (!capable(CAP_IPC_LOCK))
9693 ctx->user = get_uid(current_user());
Jens Axboe2aede0e2020-09-14 10:45:53 -06009694
9695 /*
9696 * This is just grabbed for accounting purposes. When a process exits,
9697 * the mm is exited and dropped before the files, hence we need to hang
9698 * on to this mm purely for the purposes of being able to unaccount
9699 * memory (locked/pinned vm). It's not used for anything else.
9700 */
Jens Axboe6b7898e2020-08-25 07:58:00 -06009701 mmgrab(current->mm);
Jens Axboe2aede0e2020-09-14 10:45:53 -06009702 ctx->mm_account = current->mm;
Jens Axboe6b7898e2020-08-25 07:58:00 -06009703
Jens Axboe2b188cc2019-01-07 10:46:33 -07009704 ret = io_allocate_scq_urings(ctx, p);
9705 if (ret)
9706 goto err;
9707
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02009708 ret = io_sq_offload_create(ctx, p);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009709 if (ret)
9710 goto err;
Pavel Begunkoveae071c2021-04-25 14:32:24 +01009711 /* always set a rsrc node */
Pavel Begunkov47b228c2021-04-29 11:46:48 +01009712 ret = io_rsrc_node_switch_start(ctx);
9713 if (ret)
9714 goto err;
Pavel Begunkoveae071c2021-04-25 14:32:24 +01009715 io_rsrc_node_switch(ctx, NULL);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009716
Jens Axboe2b188cc2019-01-07 10:46:33 -07009717 memset(&p->sq_off, 0, sizeof(p->sq_off));
Hristo Venev75b28af2019-08-26 17:23:46 +00009718 p->sq_off.head = offsetof(struct io_rings, sq.head);
9719 p->sq_off.tail = offsetof(struct io_rings, sq.tail);
9720 p->sq_off.ring_mask = offsetof(struct io_rings, sq_ring_mask);
9721 p->sq_off.ring_entries = offsetof(struct io_rings, sq_ring_entries);
9722 p->sq_off.flags = offsetof(struct io_rings, sq_flags);
9723 p->sq_off.dropped = offsetof(struct io_rings, sq_dropped);
9724 p->sq_off.array = (char *)ctx->sq_array - (char *)ctx->rings;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009725
9726 memset(&p->cq_off, 0, sizeof(p->cq_off));
Hristo Venev75b28af2019-08-26 17:23:46 +00009727 p->cq_off.head = offsetof(struct io_rings, cq.head);
9728 p->cq_off.tail = offsetof(struct io_rings, cq.tail);
9729 p->cq_off.ring_mask = offsetof(struct io_rings, cq_ring_mask);
9730 p->cq_off.ring_entries = offsetof(struct io_rings, cq_ring_entries);
9731 p->cq_off.overflow = offsetof(struct io_rings, cq_overflow);
9732 p->cq_off.cqes = offsetof(struct io_rings, cqes);
Stefano Garzarella0d9b5b32020-05-15 18:38:04 +02009733 p->cq_off.flags = offsetof(struct io_rings, cq_flags);
Jens Axboeac90f242019-09-06 10:26:21 -06009734
Xiaoguang Wang7f136572020-05-05 16:28:53 +08009735 p->features = IORING_FEAT_SINGLE_MMAP | IORING_FEAT_NODROP |
9736 IORING_FEAT_SUBMIT_STABLE | IORING_FEAT_RW_CUR_POS |
Jiufei Xue5769a352020-06-17 17:53:55 +08009737 IORING_FEAT_CUR_PERSONALITY | IORING_FEAT_FAST_POLL |
Hao Xuc73ebb62020-11-03 10:54:37 +08009738 IORING_FEAT_POLL_32BITS | IORING_FEAT_SQPOLL_NONFIXED |
Pavel Begunkov96905572021-06-10 16:37:38 +01009739 IORING_FEAT_EXT_ARG | IORING_FEAT_NATIVE_WORKERS |
9740 IORING_FEAT_RSRC_TAGS;
Xiaoguang Wang7f136572020-05-05 16:28:53 +08009741
9742 if (copy_to_user(params, p, sizeof(*p))) {
9743 ret = -EFAULT;
9744 goto err;
9745 }
Jens Axboed1719f72020-07-30 13:43:53 -06009746
Pavel Begunkov9faadcc2020-12-21 18:34:05 +00009747 file = io_uring_get_file(ctx);
9748 if (IS_ERR(file)) {
9749 ret = PTR_ERR(file);
9750 goto err;
9751 }
9752
Jens Axboed1719f72020-07-30 13:43:53 -06009753 /*
Jens Axboe044c1ab2019-10-28 09:15:33 -06009754 * Install ring fd as the very last thing, so we don't risk someone
9755 * having closed it before we finish setup
9756 */
Pavel Begunkov9faadcc2020-12-21 18:34:05 +00009757 ret = io_uring_install_fd(ctx, file);
9758 if (ret < 0) {
9759 /* fput will clean it up */
9760 fput(file);
9761 return ret;
9762 }
Jens Axboe044c1ab2019-10-28 09:15:33 -06009763
Dmitrii Dolgovc826bd72019-10-15 19:02:01 +02009764 trace_io_uring_create(ret, ctx, p->sq_entries, p->cq_entries, p->flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009765 return ret;
9766err:
9767 io_ring_ctx_wait_and_kill(ctx);
9768 return ret;
9769}
9770
9771/*
9772 * Sets up an aio uring context, and returns the fd. Applications asks for a
9773 * ring size, we return the actual sq/cq ring sizes (among other things) in the
9774 * params structure passed in.
9775 */
9776static long io_uring_setup(u32 entries, struct io_uring_params __user *params)
9777{
9778 struct io_uring_params p;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009779 int i;
9780
9781 if (copy_from_user(&p, params, sizeof(p)))
9782 return -EFAULT;
9783 for (i = 0; i < ARRAY_SIZE(p.resv); i++) {
9784 if (p.resv[i])
9785 return -EINVAL;
9786 }
9787
Jens Axboe6c271ce2019-01-10 11:22:30 -07009788 if (p.flags & ~(IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL |
Jens Axboe8110c1a2019-12-28 15:39:54 -07009789 IORING_SETUP_SQ_AFF | IORING_SETUP_CQSIZE |
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02009790 IORING_SETUP_CLAMP | IORING_SETUP_ATTACH_WQ |
9791 IORING_SETUP_R_DISABLED))
Jens Axboe2b188cc2019-01-07 10:46:33 -07009792 return -EINVAL;
9793
Xiaoguang Wang7f136572020-05-05 16:28:53 +08009794 return io_uring_create(entries, &p, params);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009795}
9796
9797SYSCALL_DEFINE2(io_uring_setup, u32, entries,
9798 struct io_uring_params __user *, params)
9799{
9800 return io_uring_setup(entries, params);
9801}
9802
Jens Axboe66f4af92020-01-16 15:36:52 -07009803static int io_probe(struct io_ring_ctx *ctx, void __user *arg, unsigned nr_args)
9804{
9805 struct io_uring_probe *p;
9806 size_t size;
9807 int i, ret;
9808
9809 size = struct_size(p, ops, nr_args);
9810 if (size == SIZE_MAX)
9811 return -EOVERFLOW;
9812 p = kzalloc(size, GFP_KERNEL);
9813 if (!p)
9814 return -ENOMEM;
9815
9816 ret = -EFAULT;
9817 if (copy_from_user(p, arg, size))
9818 goto out;
9819 ret = -EINVAL;
9820 if (memchr_inv(p, 0, size))
9821 goto out;
9822
9823 p->last_op = IORING_OP_LAST - 1;
9824 if (nr_args > IORING_OP_LAST)
9825 nr_args = IORING_OP_LAST;
9826
9827 for (i = 0; i < nr_args; i++) {
9828 p->ops[i].op = i;
9829 if (!io_op_defs[i].not_supported)
9830 p->ops[i].flags = IO_URING_OP_SUPPORTED;
9831 }
9832 p->ops_len = i;
9833
9834 ret = 0;
9835 if (copy_to_user(arg, p, size))
9836 ret = -EFAULT;
9837out:
9838 kfree(p);
9839 return ret;
9840}
9841
Jens Axboe071698e2020-01-28 10:04:42 -07009842static int io_register_personality(struct io_ring_ctx *ctx)
9843{
Jens Axboe4379bf82021-02-15 13:40:22 -07009844 const struct cred *creds;
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +00009845 u32 id;
Jens Axboe1e6fa522020-10-15 08:46:24 -06009846 int ret;
Jens Axboe071698e2020-01-28 10:04:42 -07009847
Jens Axboe4379bf82021-02-15 13:40:22 -07009848 creds = get_current_cred();
Jens Axboe1e6fa522020-10-15 08:46:24 -06009849
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +00009850 ret = xa_alloc_cyclic(&ctx->personalities, &id, (void *)creds,
9851 XA_LIMIT(0, USHRT_MAX), &ctx->pers_next, GFP_KERNEL);
Jens Axboea30f8952021-08-20 14:53:59 -06009852 if (ret < 0) {
9853 put_cred(creds);
9854 return ret;
9855 }
9856 return id;
Jens Axboe071698e2020-01-28 10:04:42 -07009857}
9858
Stefano Garzarella21b55db2020-08-27 16:58:30 +02009859static int io_register_restrictions(struct io_ring_ctx *ctx, void __user *arg,
9860 unsigned int nr_args)
9861{
9862 struct io_uring_restriction *res;
9863 size_t size;
9864 int i, ret;
9865
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02009866 /* Restrictions allowed only if rings started disabled */
9867 if (!(ctx->flags & IORING_SETUP_R_DISABLED))
9868 return -EBADFD;
9869
Stefano Garzarella21b55db2020-08-27 16:58:30 +02009870 /* We allow only a single restrictions registration */
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02009871 if (ctx->restrictions.registered)
Stefano Garzarella21b55db2020-08-27 16:58:30 +02009872 return -EBUSY;
9873
9874 if (!arg || nr_args > IORING_MAX_RESTRICTIONS)
9875 return -EINVAL;
9876
9877 size = array_size(nr_args, sizeof(*res));
9878 if (size == SIZE_MAX)
9879 return -EOVERFLOW;
9880
9881 res = memdup_user(arg, size);
9882 if (IS_ERR(res))
9883 return PTR_ERR(res);
9884
9885 ret = 0;
9886
9887 for (i = 0; i < nr_args; i++) {
9888 switch (res[i].opcode) {
9889 case IORING_RESTRICTION_REGISTER_OP:
9890 if (res[i].register_op >= IORING_REGISTER_LAST) {
9891 ret = -EINVAL;
9892 goto out;
9893 }
9894
9895 __set_bit(res[i].register_op,
9896 ctx->restrictions.register_op);
9897 break;
9898 case IORING_RESTRICTION_SQE_OP:
9899 if (res[i].sqe_op >= IORING_OP_LAST) {
9900 ret = -EINVAL;
9901 goto out;
9902 }
9903
9904 __set_bit(res[i].sqe_op, ctx->restrictions.sqe_op);
9905 break;
9906 case IORING_RESTRICTION_SQE_FLAGS_ALLOWED:
9907 ctx->restrictions.sqe_flags_allowed = res[i].sqe_flags;
9908 break;
9909 case IORING_RESTRICTION_SQE_FLAGS_REQUIRED:
9910 ctx->restrictions.sqe_flags_required = res[i].sqe_flags;
9911 break;
9912 default:
9913 ret = -EINVAL;
9914 goto out;
9915 }
9916 }
9917
9918out:
9919 /* Reset all restrictions if an error happened */
9920 if (ret != 0)
9921 memset(&ctx->restrictions, 0, sizeof(ctx->restrictions));
9922 else
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02009923 ctx->restrictions.registered = true;
Stefano Garzarella21b55db2020-08-27 16:58:30 +02009924
9925 kfree(res);
9926 return ret;
9927}
9928
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02009929static int io_register_enable_rings(struct io_ring_ctx *ctx)
9930{
9931 if (!(ctx->flags & IORING_SETUP_R_DISABLED))
9932 return -EBADFD;
9933
9934 if (ctx->restrictions.registered)
9935 ctx->restricted = 1;
9936
Pavel Begunkov0298ef92021-03-08 13:20:57 +00009937 ctx->flags &= ~IORING_SETUP_R_DISABLED;
9938 if (ctx->sq_data && wq_has_sleeper(&ctx->sq_data->wait))
9939 wake_up(&ctx->sq_data->wait);
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02009940 return 0;
9941}
9942
Pavel Begunkovfdecb662021-04-25 14:32:20 +01009943static int __io_register_rsrc_update(struct io_ring_ctx *ctx, unsigned type,
Pavel Begunkovc3bdad02021-04-25 14:32:22 +01009944 struct io_uring_rsrc_update2 *up,
Pavel Begunkov98f0b3b2021-04-25 14:32:19 +01009945 unsigned nr_args)
9946{
9947 __u32 tmp;
9948 int err;
9949
Pavel Begunkovc3bdad02021-04-25 14:32:22 +01009950 if (up->resv)
9951 return -EINVAL;
Pavel Begunkov98f0b3b2021-04-25 14:32:19 +01009952 if (check_add_overflow(up->offset, nr_args, &tmp))
9953 return -EOVERFLOW;
9954 err = io_rsrc_node_switch_start(ctx);
9955 if (err)
9956 return err;
9957
Pavel Begunkovfdecb662021-04-25 14:32:20 +01009958 switch (type) {
9959 case IORING_RSRC_FILE:
Pavel Begunkov98f0b3b2021-04-25 14:32:19 +01009960 return __io_sqe_files_update(ctx, up, nr_args);
Pavel Begunkov634d00d2021-04-25 14:32:26 +01009961 case IORING_RSRC_BUFFER:
9962 return __io_sqe_buffers_update(ctx, up, nr_args);
Pavel Begunkov98f0b3b2021-04-25 14:32:19 +01009963 }
9964 return -EINVAL;
9965}
9966
Pavel Begunkovc3bdad02021-04-25 14:32:22 +01009967static int io_register_files_update(struct io_ring_ctx *ctx, void __user *arg,
9968 unsigned nr_args)
Pavel Begunkov98f0b3b2021-04-25 14:32:19 +01009969{
Pavel Begunkovc3bdad02021-04-25 14:32:22 +01009970 struct io_uring_rsrc_update2 up;
Pavel Begunkov98f0b3b2021-04-25 14:32:19 +01009971
9972 if (!nr_args)
9973 return -EINVAL;
Pavel Begunkovc3bdad02021-04-25 14:32:22 +01009974 memset(&up, 0, sizeof(up));
9975 if (copy_from_user(&up, arg, sizeof(struct io_uring_rsrc_update)))
9976 return -EFAULT;
9977 return __io_register_rsrc_update(ctx, IORING_RSRC_FILE, &up, nr_args);
9978}
9979
9980static int io_register_rsrc_update(struct io_ring_ctx *ctx, void __user *arg,
Pavel Begunkov992da012021-06-10 16:37:37 +01009981 unsigned size, unsigned type)
Pavel Begunkovc3bdad02021-04-25 14:32:22 +01009982{
9983 struct io_uring_rsrc_update2 up;
9984
9985 if (size != sizeof(up))
9986 return -EINVAL;
Pavel Begunkov98f0b3b2021-04-25 14:32:19 +01009987 if (copy_from_user(&up, arg, sizeof(up)))
9988 return -EFAULT;
Pavel Begunkov992da012021-06-10 16:37:37 +01009989 if (!up.nr || up.resv)
Pavel Begunkov98f0b3b2021-04-25 14:32:19 +01009990 return -EINVAL;
Pavel Begunkov992da012021-06-10 16:37:37 +01009991 return __io_register_rsrc_update(ctx, type, &up, up.nr);
Pavel Begunkov98f0b3b2021-04-25 14:32:19 +01009992}
9993
Pavel Begunkov792e3582021-04-25 14:32:21 +01009994static int io_register_rsrc(struct io_ring_ctx *ctx, void __user *arg,
Pavel Begunkov992da012021-06-10 16:37:37 +01009995 unsigned int size, unsigned int type)
Pavel Begunkov792e3582021-04-25 14:32:21 +01009996{
9997 struct io_uring_rsrc_register rr;
9998
9999 /* keep it extendible */
10000 if (size != sizeof(rr))
10001 return -EINVAL;
10002
10003 memset(&rr, 0, sizeof(rr));
10004 if (copy_from_user(&rr, arg, size))
10005 return -EFAULT;
Pavel Begunkov992da012021-06-10 16:37:37 +010010006 if (!rr.nr || rr.resv || rr.resv2)
Pavel Begunkov792e3582021-04-25 14:32:21 +010010007 return -EINVAL;
10008
Pavel Begunkov992da012021-06-10 16:37:37 +010010009 switch (type) {
Pavel Begunkov792e3582021-04-25 14:32:21 +010010010 case IORING_RSRC_FILE:
10011 return io_sqe_files_register(ctx, u64_to_user_ptr(rr.data),
10012 rr.nr, u64_to_user_ptr(rr.tags));
Pavel Begunkov634d00d2021-04-25 14:32:26 +010010013 case IORING_RSRC_BUFFER:
10014 return io_sqe_buffers_register(ctx, u64_to_user_ptr(rr.data),
10015 rr.nr, u64_to_user_ptr(rr.tags));
Pavel Begunkov792e3582021-04-25 14:32:21 +010010016 }
10017 return -EINVAL;
10018}
10019
Jens Axboefe764212021-06-17 10:19:54 -060010020static int io_register_iowq_aff(struct io_ring_ctx *ctx, void __user *arg,
10021 unsigned len)
10022{
10023 struct io_uring_task *tctx = current->io_uring;
10024 cpumask_var_t new_mask;
10025 int ret;
10026
10027 if (!tctx || !tctx->io_wq)
10028 return -EINVAL;
10029
10030 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
10031 return -ENOMEM;
10032
10033 cpumask_clear(new_mask);
10034 if (len > cpumask_size())
10035 len = cpumask_size();
10036
10037 if (copy_from_user(new_mask, arg, len)) {
10038 free_cpumask_var(new_mask);
10039 return -EFAULT;
10040 }
10041
10042 ret = io_wq_cpu_affinity(tctx->io_wq, new_mask);
10043 free_cpumask_var(new_mask);
10044 return ret;
10045}
10046
10047static int io_unregister_iowq_aff(struct io_ring_ctx *ctx)
10048{
10049 struct io_uring_task *tctx = current->io_uring;
10050
10051 if (!tctx || !tctx->io_wq)
10052 return -EINVAL;
10053
10054 return io_wq_cpu_affinity(tctx->io_wq, NULL);
10055}
10056
Jens Axboe071698e2020-01-28 10:04:42 -070010057static bool io_register_op_must_quiesce(int op)
10058{
10059 switch (op) {
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +010010060 case IORING_REGISTER_BUFFERS:
10061 case IORING_UNREGISTER_BUFFERS:
Pavel Begunkovf4f7d212021-04-01 15:44:02 +010010062 case IORING_REGISTER_FILES:
Jens Axboe071698e2020-01-28 10:04:42 -070010063 case IORING_UNREGISTER_FILES:
10064 case IORING_REGISTER_FILES_UPDATE:
10065 case IORING_REGISTER_PROBE:
10066 case IORING_REGISTER_PERSONALITY:
10067 case IORING_UNREGISTER_PERSONALITY:
Pavel Begunkov992da012021-06-10 16:37:37 +010010068 case IORING_REGISTER_FILES2:
10069 case IORING_REGISTER_FILES_UPDATE2:
10070 case IORING_REGISTER_BUFFERS2:
10071 case IORING_REGISTER_BUFFERS_UPDATE:
Jens Axboefe764212021-06-17 10:19:54 -060010072 case IORING_REGISTER_IOWQ_AFF:
10073 case IORING_UNREGISTER_IOWQ_AFF:
Jens Axboe071698e2020-01-28 10:04:42 -070010074 return false;
10075 default:
10076 return true;
10077 }
10078}
10079
Pavel Begunkove73c5c72021-08-09 13:04:12 +010010080static int io_ctx_quiesce(struct io_ring_ctx *ctx)
10081{
10082 long ret;
10083
10084 percpu_ref_kill(&ctx->refs);
10085
10086 /*
10087 * Drop uring mutex before waiting for references to exit. If another
10088 * thread is currently inside io_uring_enter() it might need to grab the
10089 * uring_lock to make progress. If we hold it here across the drain
10090 * wait, then we can deadlock. It's safe to drop the mutex here, since
10091 * no new references will come in after we've killed the percpu ref.
10092 */
10093 mutex_unlock(&ctx->uring_lock);
10094 do {
10095 ret = wait_for_completion_interruptible(&ctx->ref_comp);
10096 if (!ret)
10097 break;
10098 ret = io_run_task_work_sig();
10099 } while (ret >= 0);
10100 mutex_lock(&ctx->uring_lock);
10101
10102 if (ret)
10103 io_refs_resurrect(&ctx->refs, &ctx->ref_comp);
10104 return ret;
10105}
10106
Jens Axboeedafcce2019-01-09 09:16:05 -070010107static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
10108 void __user *arg, unsigned nr_args)
Jens Axboeb19062a2019-04-15 10:49:38 -060010109 __releases(ctx->uring_lock)
10110 __acquires(ctx->uring_lock)
Jens Axboeedafcce2019-01-09 09:16:05 -070010111{
10112 int ret;
10113
Jens Axboe35fa71a2019-04-22 10:23:23 -060010114 /*
10115 * We're inside the ring mutex, if the ref is already dying, then
10116 * someone else killed the ctx or is already going through
10117 * io_uring_register().
10118 */
10119 if (percpu_ref_is_dying(&ctx->refs))
10120 return -ENXIO;
10121
Pavel Begunkov75c40212021-04-15 13:07:40 +010010122 if (ctx->restricted) {
10123 if (opcode >= IORING_REGISTER_LAST)
10124 return -EINVAL;
10125 opcode = array_index_nospec(opcode, IORING_REGISTER_LAST);
10126 if (!test_bit(opcode, ctx->restrictions.register_op))
10127 return -EACCES;
10128 }
10129
Jens Axboe071698e2020-01-28 10:04:42 -070010130 if (io_register_op_must_quiesce(opcode)) {
Pavel Begunkove73c5c72021-08-09 13:04:12 +010010131 ret = io_ctx_quiesce(ctx);
10132 if (ret)
Pavel Begunkovf70865d2021-04-11 01:46:40 +010010133 return ret;
Jens Axboe05f3fb32019-12-09 11:22:50 -070010134 }
Jens Axboeedafcce2019-01-09 09:16:05 -070010135
10136 switch (opcode) {
10137 case IORING_REGISTER_BUFFERS:
Pavel Begunkov634d00d2021-04-25 14:32:26 +010010138 ret = io_sqe_buffers_register(ctx, arg, nr_args, NULL);
Jens Axboeedafcce2019-01-09 09:16:05 -070010139 break;
10140 case IORING_UNREGISTER_BUFFERS:
10141 ret = -EINVAL;
10142 if (arg || nr_args)
10143 break;
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -080010144 ret = io_sqe_buffers_unregister(ctx);
Jens Axboeedafcce2019-01-09 09:16:05 -070010145 break;
Jens Axboe6b063142019-01-10 22:13:58 -070010146 case IORING_REGISTER_FILES:
Pavel Begunkov792e3582021-04-25 14:32:21 +010010147 ret = io_sqe_files_register(ctx, arg, nr_args, NULL);
Jens Axboe6b063142019-01-10 22:13:58 -070010148 break;
10149 case IORING_UNREGISTER_FILES:
10150 ret = -EINVAL;
10151 if (arg || nr_args)
10152 break;
10153 ret = io_sqe_files_unregister(ctx);
10154 break;
Jens Axboec3a31e62019-10-03 13:59:56 -060010155 case IORING_REGISTER_FILES_UPDATE:
Pavel Begunkovc3bdad02021-04-25 14:32:22 +010010156 ret = io_register_files_update(ctx, arg, nr_args);
Jens Axboec3a31e62019-10-03 13:59:56 -060010157 break;
Jens Axboe9b402842019-04-11 11:45:41 -060010158 case IORING_REGISTER_EVENTFD:
Jens Axboef2842ab2020-01-08 11:04:00 -070010159 case IORING_REGISTER_EVENTFD_ASYNC:
Jens Axboe9b402842019-04-11 11:45:41 -060010160 ret = -EINVAL;
10161 if (nr_args != 1)
10162 break;
10163 ret = io_eventfd_register(ctx, arg);
Jens Axboef2842ab2020-01-08 11:04:00 -070010164 if (ret)
10165 break;
10166 if (opcode == IORING_REGISTER_EVENTFD_ASYNC)
10167 ctx->eventfd_async = 1;
10168 else
10169 ctx->eventfd_async = 0;
Jens Axboe9b402842019-04-11 11:45:41 -060010170 break;
10171 case IORING_UNREGISTER_EVENTFD:
10172 ret = -EINVAL;
10173 if (arg || nr_args)
10174 break;
10175 ret = io_eventfd_unregister(ctx);
10176 break;
Jens Axboe66f4af92020-01-16 15:36:52 -070010177 case IORING_REGISTER_PROBE:
10178 ret = -EINVAL;
10179 if (!arg || nr_args > 256)
10180 break;
10181 ret = io_probe(ctx, arg, nr_args);
10182 break;
Jens Axboe071698e2020-01-28 10:04:42 -070010183 case IORING_REGISTER_PERSONALITY:
10184 ret = -EINVAL;
10185 if (arg || nr_args)
10186 break;
10187 ret = io_register_personality(ctx);
10188 break;
10189 case IORING_UNREGISTER_PERSONALITY:
10190 ret = -EINVAL;
10191 if (arg)
10192 break;
10193 ret = io_unregister_personality(ctx, nr_args);
10194 break;
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +020010195 case IORING_REGISTER_ENABLE_RINGS:
10196 ret = -EINVAL;
10197 if (arg || nr_args)
10198 break;
10199 ret = io_register_enable_rings(ctx);
10200 break;
Stefano Garzarella21b55db2020-08-27 16:58:30 +020010201 case IORING_REGISTER_RESTRICTIONS:
10202 ret = io_register_restrictions(ctx, arg, nr_args);
10203 break;
Pavel Begunkov992da012021-06-10 16:37:37 +010010204 case IORING_REGISTER_FILES2:
10205 ret = io_register_rsrc(ctx, arg, nr_args, IORING_RSRC_FILE);
Pavel Begunkov792e3582021-04-25 14:32:21 +010010206 break;
Pavel Begunkov992da012021-06-10 16:37:37 +010010207 case IORING_REGISTER_FILES_UPDATE2:
10208 ret = io_register_rsrc_update(ctx, arg, nr_args,
10209 IORING_RSRC_FILE);
10210 break;
10211 case IORING_REGISTER_BUFFERS2:
10212 ret = io_register_rsrc(ctx, arg, nr_args, IORING_RSRC_BUFFER);
10213 break;
10214 case IORING_REGISTER_BUFFERS_UPDATE:
10215 ret = io_register_rsrc_update(ctx, arg, nr_args,
10216 IORING_RSRC_BUFFER);
Pavel Begunkovc3bdad02021-04-25 14:32:22 +010010217 break;
Jens Axboefe764212021-06-17 10:19:54 -060010218 case IORING_REGISTER_IOWQ_AFF:
10219 ret = -EINVAL;
10220 if (!arg || !nr_args)
10221 break;
10222 ret = io_register_iowq_aff(ctx, arg, nr_args);
10223 break;
10224 case IORING_UNREGISTER_IOWQ_AFF:
10225 ret = -EINVAL;
10226 if (arg || nr_args)
10227 break;
10228 ret = io_unregister_iowq_aff(ctx);
10229 break;
Jens Axboeedafcce2019-01-09 09:16:05 -070010230 default:
10231 ret = -EINVAL;
10232 break;
10233 }
10234
Jens Axboe071698e2020-01-28 10:04:42 -070010235 if (io_register_op_must_quiesce(opcode)) {
Jens Axboe05f3fb32019-12-09 11:22:50 -070010236 /* bring the ctx back to life */
Jens Axboe05f3fb32019-12-09 11:22:50 -070010237 percpu_ref_reinit(&ctx->refs);
Jens Axboe0f158b42020-05-14 17:18:39 -060010238 reinit_completion(&ctx->ref_comp);
Jens Axboe05f3fb32019-12-09 11:22:50 -070010239 }
Jens Axboeedafcce2019-01-09 09:16:05 -070010240 return ret;
10241}
10242
10243SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode,
10244 void __user *, arg, unsigned int, nr_args)
10245{
10246 struct io_ring_ctx *ctx;
10247 long ret = -EBADF;
10248 struct fd f;
10249
10250 f = fdget(fd);
10251 if (!f.file)
10252 return -EBADF;
10253
10254 ret = -EOPNOTSUPP;
10255 if (f.file->f_op != &io_uring_fops)
10256 goto out_fput;
10257
10258 ctx = f.file->private_data;
10259
Pavel Begunkovb6c23dd2021-02-20 15:17:18 +000010260 io_run_task_work();
10261
Jens Axboeedafcce2019-01-09 09:16:05 -070010262 mutex_lock(&ctx->uring_lock);
10263 ret = __io_uring_register(ctx, opcode, arg, nr_args);
10264 mutex_unlock(&ctx->uring_lock);
Dmitrii Dolgovc826bd72019-10-15 19:02:01 +020010265 trace_io_uring_register(ctx, opcode, ctx->nr_user_files, ctx->nr_user_bufs,
10266 ctx->cq_ev_fd != NULL, ret);
Jens Axboeedafcce2019-01-09 09:16:05 -070010267out_fput:
10268 fdput(f);
10269 return ret;
10270}
10271
Jens Axboe2b188cc2019-01-07 10:46:33 -070010272static int __init io_uring_init(void)
10273{
Stefan Metzmacherd7f62e82020-01-29 14:39:41 +010010274#define __BUILD_BUG_VERIFY_ELEMENT(stype, eoffset, etype, ename) do { \
10275 BUILD_BUG_ON(offsetof(stype, ename) != eoffset); \
10276 BUILD_BUG_ON(sizeof(etype) != sizeof_field(stype, ename)); \
10277} while (0)
10278
10279#define BUILD_BUG_SQE_ELEM(eoffset, etype, ename) \
10280 __BUILD_BUG_VERIFY_ELEMENT(struct io_uring_sqe, eoffset, etype, ename)
10281 BUILD_BUG_ON(sizeof(struct io_uring_sqe) != 64);
10282 BUILD_BUG_SQE_ELEM(0, __u8, opcode);
10283 BUILD_BUG_SQE_ELEM(1, __u8, flags);
10284 BUILD_BUG_SQE_ELEM(2, __u16, ioprio);
10285 BUILD_BUG_SQE_ELEM(4, __s32, fd);
10286 BUILD_BUG_SQE_ELEM(8, __u64, off);
10287 BUILD_BUG_SQE_ELEM(8, __u64, addr2);
10288 BUILD_BUG_SQE_ELEM(16, __u64, addr);
Pavel Begunkov7d67af22020-02-24 11:32:45 +030010289 BUILD_BUG_SQE_ELEM(16, __u64, splice_off_in);
Stefan Metzmacherd7f62e82020-01-29 14:39:41 +010010290 BUILD_BUG_SQE_ELEM(24, __u32, len);
10291 BUILD_BUG_SQE_ELEM(28, __kernel_rwf_t, rw_flags);
10292 BUILD_BUG_SQE_ELEM(28, /* compat */ int, rw_flags);
10293 BUILD_BUG_SQE_ELEM(28, /* compat */ __u32, rw_flags);
10294 BUILD_BUG_SQE_ELEM(28, __u32, fsync_flags);
Jiufei Xue5769a352020-06-17 17:53:55 +080010295 BUILD_BUG_SQE_ELEM(28, /* compat */ __u16, poll_events);
10296 BUILD_BUG_SQE_ELEM(28, __u32, poll32_events);
Stefan Metzmacherd7f62e82020-01-29 14:39:41 +010010297 BUILD_BUG_SQE_ELEM(28, __u32, sync_range_flags);
10298 BUILD_BUG_SQE_ELEM(28, __u32, msg_flags);
10299 BUILD_BUG_SQE_ELEM(28, __u32, timeout_flags);
10300 BUILD_BUG_SQE_ELEM(28, __u32, accept_flags);
10301 BUILD_BUG_SQE_ELEM(28, __u32, cancel_flags);
10302 BUILD_BUG_SQE_ELEM(28, __u32, open_flags);
10303 BUILD_BUG_SQE_ELEM(28, __u32, statx_flags);
10304 BUILD_BUG_SQE_ELEM(28, __u32, fadvise_advice);
Pavel Begunkov7d67af22020-02-24 11:32:45 +030010305 BUILD_BUG_SQE_ELEM(28, __u32, splice_flags);
Stefan Metzmacherd7f62e82020-01-29 14:39:41 +010010306 BUILD_BUG_SQE_ELEM(32, __u64, user_data);
10307 BUILD_BUG_SQE_ELEM(40, __u16, buf_index);
Pavel Begunkov16340ea2021-06-24 15:09:58 +010010308 BUILD_BUG_SQE_ELEM(40, __u16, buf_group);
Stefan Metzmacherd7f62e82020-01-29 14:39:41 +010010309 BUILD_BUG_SQE_ELEM(42, __u16, personality);
Pavel Begunkov7d67af22020-02-24 11:32:45 +030010310 BUILD_BUG_SQE_ELEM(44, __s32, splice_fd_in);
Stefan Metzmacherd7f62e82020-01-29 14:39:41 +010010311
Pavel Begunkovb0d658ec2021-04-27 16:13:53 +010010312 BUILD_BUG_ON(sizeof(struct io_uring_files_update) !=
10313 sizeof(struct io_uring_rsrc_update));
10314 BUILD_BUG_ON(sizeof(struct io_uring_rsrc_update) >
10315 sizeof(struct io_uring_rsrc_update2));
10316 /* should fit into one byte */
10317 BUILD_BUG_ON(SQE_VALID_FLAGS >= (1 << 8));
10318
Jens Axboed3656342019-12-18 09:50:26 -070010319 BUILD_BUG_ON(ARRAY_SIZE(io_op_defs) != IORING_OP_LAST);
Jens Axboe84557872020-03-03 15:28:17 -070010320 BUILD_BUG_ON(__REQ_F_LAST_BIT >= 8 * sizeof(int));
Pavel Begunkov16340ea2021-06-24 15:09:58 +010010321
Jens Axboe91f245d2021-02-09 13:48:50 -070010322 req_cachep = KMEM_CACHE(io_kiocb, SLAB_HWCACHE_ALIGN | SLAB_PANIC |
10323 SLAB_ACCOUNT);
Jens Axboe2b188cc2019-01-07 10:46:33 -070010324 return 0;
10325};
10326__initcall(io_uring_init);