blob: ac0c06d5c6298732e67a24630a4137c6e337e4ac [file] [log] [blame]
Jens Axboe2b188cc2019-01-07 10:46:33 -07001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Shared application/kernel submission and completion ring pairs, for
4 * supporting fast/efficient IO.
5 *
6 * A note on the read/write ordering memory barriers that are matched between
Stefan Bühler1e84b972019-04-24 23:54:16 +02007 * the application and kernel side.
8 *
9 * After the application reads the CQ ring tail, it must use an
10 * appropriate smp_rmb() to pair with the smp_wmb() the kernel uses
11 * before writing the tail (using smp_load_acquire to read the tail will
12 * do). It also needs a smp_mb() before updating CQ head (ordering the
13 * entry load(s) with the head store), pairing with an implicit barrier
Pavel Begunkovd068b502021-05-16 22:58:11 +010014 * through a control-dependency in io_get_cqe (smp_store_release to
Stefan Bühler1e84b972019-04-24 23:54:16 +020015 * store head will do). Failure to do so could lead to reading invalid
16 * CQ entries.
17 *
18 * Likewise, the application must use an appropriate smp_wmb() before
19 * writing the SQ tail (ordering SQ entry stores with the tail store),
20 * which pairs with smp_load_acquire in io_get_sqring (smp_store_release
21 * to store the tail will do). And it needs a barrier ordering the SQ
22 * head load before writing new SQ entries (smp_load_acquire to read
23 * head will do).
24 *
25 * When using the SQ poll thread (IORING_SETUP_SQPOLL), the application
26 * needs to check the SQ flags for IORING_SQ_NEED_WAKEUP *after*
27 * updating the SQ tail; a full memory barrier smp_mb() is needed
28 * between.
Jens Axboe2b188cc2019-01-07 10:46:33 -070029 *
30 * Also see the examples in the liburing library:
31 *
32 * git://git.kernel.dk/liburing
33 *
34 * io_uring also uses READ/WRITE_ONCE() for _any_ store or load that happens
35 * from data shared between the kernel and application. This is done both
36 * for ordering purposes, but also to ensure that once a value is loaded from
37 * data that the application could potentially modify, it remains stable.
38 *
39 * Copyright (C) 2018-2019 Jens Axboe
Christoph Hellwigc992fe22019-01-11 09:43:02 -070040 * Copyright (c) 2018-2019 Christoph Hellwig
Jens Axboe2b188cc2019-01-07 10:46:33 -070041 */
42#include <linux/kernel.h>
43#include <linux/init.h>
44#include <linux/errno.h>
45#include <linux/syscalls.h>
46#include <linux/compat.h>
Jens Axboe52de1fe2020-02-27 10:15:42 -070047#include <net/compat.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070048#include <linux/refcount.h>
49#include <linux/uio.h>
Pavel Begunkov6b47ee62020-01-18 20:22:41 +030050#include <linux/bits.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070051
52#include <linux/sched/signal.h>
53#include <linux/fs.h>
54#include <linux/file.h>
55#include <linux/fdtable.h>
56#include <linux/mm.h>
57#include <linux/mman.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070058#include <linux/percpu.h>
59#include <linux/slab.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070060#include <linux/blkdev.h>
Jens Axboeedafcce2019-01-09 09:16:05 -070061#include <linux/bvec.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070062#include <linux/net.h>
63#include <net/sock.h>
64#include <net/af_unix.h>
Jens Axboe6b063142019-01-10 22:13:58 -070065#include <net/scm.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070066#include <linux/anon_inodes.h>
67#include <linux/sched/mm.h>
68#include <linux/uaccess.h>
69#include <linux/nospec.h>
Jens Axboeedafcce2019-01-09 09:16:05 -070070#include <linux/sizes.h>
71#include <linux/hugetlb.h>
Jens Axboeaa4c3962019-11-29 10:14:00 -070072#include <linux/highmem.h>
Jens Axboe15b71ab2019-12-11 11:20:36 -070073#include <linux/namei.h>
74#include <linux/fsnotify.h>
Jens Axboe4840e412019-12-25 22:03:45 -070075#include <linux/fadvise.h>
Jens Axboe3e4827b2020-01-08 15:18:09 -070076#include <linux/eventpoll.h>
Pavel Begunkov7d67af22020-02-24 11:32:45 +030077#include <linux/splice.h>
Jens Axboeb41e9852020-02-17 09:52:41 -070078#include <linux/task_work.h>
Jens Axboebcf5a062020-05-22 09:24:42 -060079#include <linux/pagemap.h>
Jens Axboe0f212202020-09-13 13:09:39 -060080#include <linux/io_uring.h>
Nadav Amitef98eb02021-08-07 17:13:41 -070081#include <linux/tracehook.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070082
Dmitrii Dolgovc826bd72019-10-15 19:02:01 +020083#define CREATE_TRACE_POINTS
84#include <trace/events/io_uring.h>
85
Jens Axboe2b188cc2019-01-07 10:46:33 -070086#include <uapi/linux/io_uring.h>
87
88#include "internal.h"
Jens Axboe561fb042019-10-24 07:25:42 -060089#include "io-wq.h"
Jens Axboe2b188cc2019-01-07 10:46:33 -070090
Daniel Xu5277dea2019-09-14 14:23:45 -070091#define IORING_MAX_ENTRIES 32768
Jens Axboe33a107f2019-10-04 12:10:03 -060092#define IORING_MAX_CQ_ENTRIES (2 * IORING_MAX_ENTRIES)
Olivier Langlois4ce8ad92021-06-23 11:50:18 -070093#define IORING_SQPOLL_CAP_ENTRIES_VALUE 8
Jens Axboe65e19f52019-10-26 07:20:21 -060094
wangyangbo187f08c2021-08-19 13:56:57 +080095/* only define max */
Pavel Begunkov042b0d82021-08-09 13:04:01 +010096#define IORING_MAX_FIXED_FILES (1U << 15)
Stefano Garzarella21b55db2020-08-27 16:58:30 +020097#define IORING_MAX_RESTRICTIONS (IORING_RESTRICTION_LAST + \
98 IORING_REGISTER_LAST + IORING_OP_LAST)
Jens Axboe2b188cc2019-01-07 10:46:33 -070099
wangyangbo187f08c2021-08-19 13:56:57 +0800100#define IO_RSRC_TAG_TABLE_SHIFT (PAGE_SHIFT - 3)
Pavel Begunkov2d091d62021-06-14 02:36:21 +0100101#define IO_RSRC_TAG_TABLE_MAX (1U << IO_RSRC_TAG_TABLE_SHIFT)
102#define IO_RSRC_TAG_TABLE_MASK (IO_RSRC_TAG_TABLE_MAX - 1)
103
Pavel Begunkov489809e2021-05-14 12:06:44 +0100104#define IORING_MAX_REG_BUFFERS (1U << 14)
105
Pavel Begunkovb16fed662021-02-18 18:29:40 +0000106#define SQE_VALID_FLAGS (IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK| \
107 IOSQE_IO_HARDLINK | IOSQE_ASYNC | \
108 IOSQE_BUFFER_SELECT)
Pavel Begunkovc8543572021-06-17 18:14:04 +0100109#define IO_REQ_CLEAN_FLAGS (REQ_F_BUFFER_SELECTED | REQ_F_NEED_CLEANUP | \
110 REQ_F_POLLED | REQ_F_INFLIGHT | REQ_F_CREDS)
Pavel Begunkovb16fed662021-02-18 18:29:40 +0000111
Pavel Begunkov09899b12021-06-14 02:36:22 +0100112#define IO_TCTX_REFS_CACHE_NR (1U << 10)
113
Jens Axboe2b188cc2019-01-07 10:46:33 -0700114struct io_uring {
115 u32 head ____cacheline_aligned_in_smp;
116 u32 tail ____cacheline_aligned_in_smp;
117};
118
Stefan Bühler1e84b972019-04-24 23:54:16 +0200119/*
Hristo Venev75b28af2019-08-26 17:23:46 +0000120 * This data is shared with the application through the mmap at offsets
121 * IORING_OFF_SQ_RING and IORING_OFF_CQ_RING.
Stefan Bühler1e84b972019-04-24 23:54:16 +0200122 *
123 * The offsets to the member fields are published through struct
124 * io_sqring_offsets when calling io_uring_setup.
125 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000126struct io_rings {
Stefan Bühler1e84b972019-04-24 23:54:16 +0200127 /*
128 * Head and tail offsets into the ring; the offsets need to be
129 * masked to get valid indices.
130 *
Hristo Venev75b28af2019-08-26 17:23:46 +0000131 * The kernel controls head of the sq ring and the tail of the cq ring,
132 * and the application controls tail of the sq ring and the head of the
133 * cq ring.
Stefan Bühler1e84b972019-04-24 23:54:16 +0200134 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000135 struct io_uring sq, cq;
Stefan Bühler1e84b972019-04-24 23:54:16 +0200136 /*
Hristo Venev75b28af2019-08-26 17:23:46 +0000137 * Bitmasks to apply to head and tail offsets (constant, equals
Stefan Bühler1e84b972019-04-24 23:54:16 +0200138 * ring_entries - 1)
139 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000140 u32 sq_ring_mask, cq_ring_mask;
141 /* Ring sizes (constant, power of 2) */
142 u32 sq_ring_entries, cq_ring_entries;
Stefan Bühler1e84b972019-04-24 23:54:16 +0200143 /*
144 * Number of invalid entries dropped by the kernel due to
145 * invalid index stored in array
146 *
147 * Written by the kernel, shouldn't be modified by the
148 * application (i.e. get number of "new events" by comparing to
149 * cached value).
150 *
151 * After a new SQ head value was read by the application this
152 * counter includes all submissions that were dropped reaching
153 * the new SQ head (and possibly more).
154 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000155 u32 sq_dropped;
Stefan Bühler1e84b972019-04-24 23:54:16 +0200156 /*
Stefano Garzarella0d9b5b32020-05-15 18:38:04 +0200157 * Runtime SQ flags
Stefan Bühler1e84b972019-04-24 23:54:16 +0200158 *
159 * Written by the kernel, shouldn't be modified by the
160 * application.
161 *
162 * The application needs a full memory barrier before checking
163 * for IORING_SQ_NEED_WAKEUP after updating the sq tail.
164 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000165 u32 sq_flags;
Stefan Bühler1e84b972019-04-24 23:54:16 +0200166 /*
Stefano Garzarella0d9b5b32020-05-15 18:38:04 +0200167 * Runtime CQ flags
168 *
169 * Written by the application, shouldn't be modified by the
170 * kernel.
171 */
Pavel Begunkovfe7e3252021-06-24 15:09:57 +0100172 u32 cq_flags;
Stefano Garzarella0d9b5b32020-05-15 18:38:04 +0200173 /*
Stefan Bühler1e84b972019-04-24 23:54:16 +0200174 * Number of completion events lost because the queue was full;
175 * this should be avoided by the application by making sure
LimingWu0b4295b2019-12-05 20:18:18 +0800176 * there are not more requests pending than there is space in
Stefan Bühler1e84b972019-04-24 23:54:16 +0200177 * the completion queue.
178 *
179 * Written by the kernel, shouldn't be modified by the
180 * application (i.e. get number of "new events" by comparing to
181 * cached value).
182 *
183 * As completion events come in out of order this counter is not
184 * ordered with any other data.
185 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000186 u32 cq_overflow;
Stefan Bühler1e84b972019-04-24 23:54:16 +0200187 /*
188 * Ring buffer of completion events.
189 *
190 * The kernel writes completion events fresh every time they are
191 * produced, so the application is allowed to modify pending
192 * entries.
193 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000194 struct io_uring_cqe cqes[] ____cacheline_aligned_in_smp;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700195};
196
Pavel Begunkov45d189c2021-02-10 00:03:07 +0000197enum io_uring_cmd_flags {
198 IO_URING_F_NONBLOCK = 1,
Pavel Begunkov889fca72021-02-10 00:03:09 +0000199 IO_URING_F_COMPLETE_DEFER = 2,
Pavel Begunkov45d189c2021-02-10 00:03:07 +0000200};
201
Jens Axboeedafcce2019-01-09 09:16:05 -0700202struct io_mapped_ubuf {
203 u64 ubuf;
Pavel Begunkov4751f532021-04-01 15:43:55 +0100204 u64 ubuf_end;
Jens Axboeedafcce2019-01-09 09:16:05 -0700205 unsigned int nr_bvecs;
Jens Axboede293932020-09-17 16:19:16 -0600206 unsigned long acct_pages;
Pavel Begunkov41edf1a2021-04-25 14:32:23 +0100207 struct bio_vec bvec[];
Jens Axboeedafcce2019-01-09 09:16:05 -0700208};
209
Bijan Mottahedeh50238532021-01-15 17:37:45 +0000210struct io_ring_ctx;
211
Pavel Begunkov6c2450a2021-02-23 12:40:22 +0000212struct io_overflow_cqe {
213 struct io_uring_cqe cqe;
214 struct list_head list;
215};
216
Pavel Begunkova04b0ac2021-04-01 15:44:04 +0100217struct io_fixed_file {
218 /* file * with additional FFS_* flags */
219 unsigned long file_ptr;
220};
221
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +0000222struct io_rsrc_put {
223 struct list_head list;
Pavel Begunkovb60c8dc2021-04-25 14:32:18 +0100224 u64 tag;
Bijan Mottahedeh50238532021-01-15 17:37:45 +0000225 union {
226 void *rsrc;
227 struct file *file;
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +0100228 struct io_mapped_ubuf *buf;
Bijan Mottahedeh50238532021-01-15 17:37:45 +0000229 };
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +0000230};
231
Pavel Begunkovaeca2412021-04-11 01:46:37 +0100232struct io_file_table {
Pavel Begunkov042b0d82021-08-09 13:04:01 +0100233 struct io_fixed_file *files;
Jens Axboe31b51512019-01-18 22:56:34 -0700234};
235
Pavel Begunkovb895c9a2021-04-01 15:43:40 +0100236struct io_rsrc_node {
Xiaoguang Wang05589552020-03-31 14:05:18 +0800237 struct percpu_ref refs;
238 struct list_head node;
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +0000239 struct list_head rsrc_list;
Pavel Begunkovb895c9a2021-04-01 15:43:40 +0100240 struct io_rsrc_data *rsrc_data;
Jens Axboe4a38aed22020-05-14 17:21:15 -0600241 struct llist_node llist;
Pavel Begunkove2978222020-11-18 14:56:26 +0000242 bool done;
Xiaoguang Wang05589552020-03-31 14:05:18 +0800243};
244
Pavel Begunkov40ae0ff2021-04-01 15:43:44 +0100245typedef void (rsrc_put_fn)(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc);
246
Pavel Begunkovb895c9a2021-04-01 15:43:40 +0100247struct io_rsrc_data {
Jens Axboe05f3fb32019-12-09 11:22:50 -0700248 struct io_ring_ctx *ctx;
249
Pavel Begunkov2d091d62021-06-14 02:36:21 +0100250 u64 **tags;
251 unsigned int nr;
Pavel Begunkov40ae0ff2021-04-01 15:43:44 +0100252 rsrc_put_fn *do_put;
Pavel Begunkov3e942492021-04-11 01:46:34 +0100253 atomic_t refs;
Jens Axboe05f3fb32019-12-09 11:22:50 -0700254 struct completion done;
Hao Xu8bad28d2021-02-19 17:19:36 +0800255 bool quiesce;
Jens Axboe05f3fb32019-12-09 11:22:50 -0700256};
257
Jens Axboe5a2e7452020-02-23 16:23:11 -0700258struct io_buffer {
259 struct list_head list;
260 __u64 addr;
Thadeu Lima de Souza Cascardod1f82802021-05-05 09:47:06 -0300261 __u32 len;
Jens Axboe5a2e7452020-02-23 16:23:11 -0700262 __u16 bid;
263};
264
Stefano Garzarella21b55db2020-08-27 16:58:30 +0200265struct io_restriction {
266 DECLARE_BITMAP(register_op, IORING_REGISTER_LAST);
267 DECLARE_BITMAP(sqe_op, IORING_OP_LAST);
268 u8 sqe_flags_allowed;
269 u8 sqe_flags_required;
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +0200270 bool registered;
Stefano Garzarella21b55db2020-08-27 16:58:30 +0200271};
272
Jens Axboe37d1e2e2021-02-17 21:03:43 -0700273enum {
274 IO_SQ_THREAD_SHOULD_STOP = 0,
275 IO_SQ_THREAD_SHOULD_PARK,
276};
277
Jens Axboe534ca6d2020-09-02 13:52:19 -0600278struct io_sq_data {
279 refcount_t refs;
Pavel Begunkov9e138a42021-03-14 20:57:12 +0000280 atomic_t park_pending;
Pavel Begunkov09a6f4e2021-03-14 20:57:10 +0000281 struct mutex lock;
Jens Axboe69fb2132020-09-14 11:16:23 -0600282
283 /* ctx's that are using this sqd */
284 struct list_head ctx_list;
Jens Axboe69fb2132020-09-14 11:16:23 -0600285
Jens Axboe534ca6d2020-09-02 13:52:19 -0600286 struct task_struct *thread;
287 struct wait_queue_head wait;
Xiaoguang Wang08369242020-11-03 14:15:59 +0800288
289 unsigned sq_thread_idle;
Jens Axboe37d1e2e2021-02-17 21:03:43 -0700290 int sq_cpu;
291 pid_t task_pid;
Jens Axboe5c2469e2021-03-11 10:17:56 -0700292 pid_t task_tgid;
Jens Axboe37d1e2e2021-02-17 21:03:43 -0700293
294 unsigned long state;
Jens Axboe37d1e2e2021-02-17 21:03:43 -0700295 struct completion exited;
Jens Axboe534ca6d2020-09-02 13:52:19 -0600296};
297
Pavel Begunkov6dd0be12021-02-10 00:03:13 +0000298#define IO_COMPL_BATCH 32
Pavel Begunkov6ff119a2021-02-10 00:03:18 +0000299#define IO_REQ_CACHE_SIZE 32
Pavel Begunkovbf019da2021-02-10 00:03:17 +0000300#define IO_REQ_ALLOC_BATCH 8
Pavel Begunkov258b29a2021-02-10 00:03:10 +0000301
Pavel Begunkova1ab7b32021-02-18 18:29:42 +0000302struct io_submit_link {
303 struct io_kiocb *head;
304 struct io_kiocb *last;
305};
306
Pavel Begunkov258b29a2021-02-10 00:03:10 +0000307struct io_submit_state {
308 struct blk_plug plug;
Pavel Begunkova1ab7b32021-02-18 18:29:42 +0000309 struct io_submit_link link;
Pavel Begunkov258b29a2021-02-10 00:03:10 +0000310
311 /*
312 * io_kiocb alloc cache
313 */
Pavel Begunkovbf019da2021-02-10 00:03:17 +0000314 void *reqs[IO_REQ_CACHE_SIZE];
Pavel Begunkov258b29a2021-02-10 00:03:10 +0000315 unsigned int free_reqs;
316
317 bool plug_started;
318
319 /*
320 * Batch completion logic
321 */
Pavel Begunkovcd0ca2e2021-08-09 20:18:11 +0100322 struct io_kiocb *compl_reqs[IO_COMPL_BATCH];
323 unsigned int compl_nr;
324 /* inline/task_work completion list, under ->uring_lock */
325 struct list_head free_list;
Pavel Begunkov258b29a2021-02-10 00:03:10 +0000326
Pavel Begunkov258b29a2021-02-10 00:03:10 +0000327 unsigned int ios_left;
328};
329
Jens Axboe2b188cc2019-01-07 10:46:33 -0700330struct io_ring_ctx {
Pavel Begunkovb52ecf82021-06-14 23:37:21 +0100331 /* const or read-mostly hot data */
Jens Axboe2b188cc2019-01-07 10:46:33 -0700332 struct {
333 struct percpu_ref refs;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700334
Pavel Begunkovb52ecf82021-06-14 23:37:21 +0100335 struct io_rings *rings;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700336 unsigned int flags;
Randy Dunlape1d85332020-02-05 20:57:10 -0800337 unsigned int compat: 1;
Randy Dunlape1d85332020-02-05 20:57:10 -0800338 unsigned int drain_next: 1;
339 unsigned int eventfd_async: 1;
Stefano Garzarella21b55db2020-08-27 16:58:30 +0200340 unsigned int restricted: 1;
Pavel Begunkovf18ee4c2021-06-14 23:37:25 +0100341 unsigned int off_timeout_used: 1;
Pavel Begunkov10c66902021-06-15 16:47:56 +0100342 unsigned int drain_active: 1;
Pavel Begunkovb52ecf82021-06-14 23:37:21 +0100343 } ____cacheline_aligned_in_smp;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700344
Pavel Begunkov7f1129d2021-06-14 23:37:22 +0100345 /* submission data */
Pavel Begunkovb52ecf82021-06-14 23:37:21 +0100346 struct {
Pavel Begunkov0499e582021-06-14 23:37:29 +0100347 struct mutex uring_lock;
348
Hristo Venev75b28af2019-08-26 17:23:46 +0000349 /*
350 * Ring buffer of indices into array of io_uring_sqe, which is
351 * mmapped by the application using the IORING_OFF_SQES offset.
352 *
353 * This indirection could e.g. be used to assign fixed
354 * io_uring_sqe entries to operations and only submit them to
355 * the queue when needed.
356 *
357 * The kernel modifies neither the indices array nor the entries
358 * array.
359 */
360 u32 *sq_array;
Pavel Begunkovc7af47c2021-06-14 23:37:20 +0100361 struct io_uring_sqe *sq_sqes;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700362 unsigned cached_sq_head;
363 unsigned sq_entries;
Jens Axboede0617e2019-04-06 21:51:27 -0600364 struct list_head defer_list;
Pavel Begunkov7f1129d2021-06-14 23:37:22 +0100365
366 /*
367 * Fixed resources fast path, should be accessed only under
368 * uring_lock, and updated through io_uring_register(2)
369 */
370 struct io_rsrc_node *rsrc_node;
371 struct io_file_table file_table;
372 unsigned nr_user_files;
373 unsigned nr_user_bufs;
374 struct io_mapped_ubuf **user_bufs;
375
376 struct io_submit_state submit_state;
Jens Axboe5262f562019-09-17 12:26:57 -0600377 struct list_head timeout_list;
Pavel Begunkovef9dd632021-08-28 19:54:38 -0600378 struct list_head ltimeout_list;
Jens Axboe1d7bb1d2019-11-06 11:31:17 -0700379 struct list_head cq_overflow_list;
Pavel Begunkov7f1129d2021-06-14 23:37:22 +0100380 struct xarray io_buffers;
381 struct xarray personalities;
382 u32 pers_next;
383 unsigned sq_thread_idle;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700384 } ____cacheline_aligned_in_smp;
385
Pavel Begunkovd0acdee2021-05-16 22:58:12 +0100386 /* IRQ completion list, under ->completion_lock */
387 struct list_head locked_free_list;
388 unsigned int locked_free_nr;
Jens Axboe3c1a2ea2021-02-11 10:48:03 -0700389
Stefan Metzmacher7c30f36a2021-03-07 11:54:28 +0100390 const struct cred *sq_creds; /* cred used for __io_sq_thread() */
Jens Axboe534ca6d2020-09-02 13:52:19 -0600391 struct io_sq_data *sq_data; /* if using sq thread polling */
392
Jens Axboe90554202020-09-03 12:12:41 -0600393 struct wait_queue_head sqo_sq_wait;
Jens Axboe69fb2132020-09-14 11:16:23 -0600394 struct list_head sqd_list;
Hristo Venev75b28af2019-08-26 17:23:46 +0000395
Pavel Begunkov5ed7a372021-06-14 23:37:27 +0100396 unsigned long check_cq_overflow;
397
Jens Axboe206aefd2019-11-07 18:27:42 -0700398 struct {
399 unsigned cached_cq_tail;
400 unsigned cq_entries;
Jens Axboe206aefd2019-11-07 18:27:42 -0700401 struct eventfd_ctx *cq_ev_fd;
Pavel Begunkov0499e582021-06-14 23:37:29 +0100402 struct wait_queue_head poll_wait;
403 struct wait_queue_head cq_wait;
404 unsigned cq_extra;
405 atomic_t cq_timeouts;
406 struct fasync_struct *cq_fasync;
407 unsigned cq_last_tm_flush;
Jens Axboe206aefd2019-11-07 18:27:42 -0700408 } ____cacheline_aligned_in_smp;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700409
410 struct {
Jens Axboe2b188cc2019-01-07 10:46:33 -0700411 spinlock_t completion_lock;
Jens Axboee94f1412019-12-19 12:06:02 -0700412
Jens Axboe89850fc2021-08-10 15:11:51 -0600413 spinlock_t timeout_lock;
414
Jens Axboedef596e2019-01-09 08:59:42 -0700415 /*
Pavel Begunkov540e32a2020-07-13 23:37:09 +0300416 * ->iopoll_list is protected by the ctx->uring_lock for
Jens Axboedef596e2019-01-09 08:59:42 -0700417 * io_uring instances that don't use IORING_SETUP_SQPOLL.
418 * For SQPOLL, only the single threaded io_sq_thread() will
419 * manipulate the list, hence no extra locking is needed there.
420 */
Pavel Begunkov540e32a2020-07-13 23:37:09 +0300421 struct list_head iopoll_list;
Jens Axboe78076bb2019-12-04 19:56:40 -0700422 struct hlist_head *cancel_hash;
423 unsigned cancel_hash_bits;
Hao Xu915b3dd2021-06-28 05:37:30 +0800424 bool poll_multi_queue;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700425 } ____cacheline_aligned_in_smp;
Jens Axboe85faa7b2020-04-09 18:14:00 -0600426
Stefano Garzarella21b55db2020-08-27 16:58:30 +0200427 struct io_restriction restrictions;
Jens Axboe3c1a2ea2021-02-11 10:48:03 -0700428
Pavel Begunkovb13a8912021-05-16 22:58:07 +0100429 /* slow path rsrc auxilary data, used by update/register */
430 struct {
431 struct io_rsrc_node *rsrc_backup_node;
432 struct io_mapped_ubuf *dummy_ubuf;
433 struct io_rsrc_data *file_data;
434 struct io_rsrc_data *buf_data;
435
436 struct delayed_work rsrc_put_work;
437 struct llist_head rsrc_put_llist;
438 struct list_head rsrc_ref_list;
439 spinlock_t rsrc_ref_lock;
440 };
441
Jens Axboe3c1a2ea2021-02-11 10:48:03 -0700442 /* Keep this last, we don't need it for the fast path */
Pavel Begunkovb986af72021-05-16 22:58:06 +0100443 struct {
444 #if defined(CONFIG_UNIX)
445 struct socket *ring_sock;
446 #endif
447 /* hashed buffered write serialization */
448 struct io_wq_hash *hash_map;
449
450 /* Only used for accounting purposes */
451 struct user_struct *user;
452 struct mm_struct *mm_account;
453
454 /* ctx exit and cancelation */
Pavel Begunkov9011bf92021-06-30 21:54:03 +0100455 struct llist_head fallback_llist;
456 struct delayed_work fallback_work;
Pavel Begunkovb986af72021-05-16 22:58:06 +0100457 struct work_struct exit_work;
458 struct list_head tctx_list;
459 struct completion ref_comp;
460 };
Jens Axboe2b188cc2019-01-07 10:46:33 -0700461};
462
Stefan Metzmacher53e043b2021-03-15 12:56:56 +0100463struct io_uring_task {
464 /* submission side */
Pavel Begunkov09899b12021-06-14 02:36:22 +0100465 int cached_refs;
Stefan Metzmacher53e043b2021-03-15 12:56:56 +0100466 struct xarray xa;
467 struct wait_queue_head wait;
Stefan Metzmacheree53fb22021-03-15 12:56:57 +0100468 const struct io_ring_ctx *last;
469 struct io_wq *io_wq;
Stefan Metzmacher53e043b2021-03-15 12:56:56 +0100470 struct percpu_counter inflight;
Pavel Begunkovb303fe22021-04-11 01:46:26 +0100471 atomic_t inflight_tracked;
Stefan Metzmacher53e043b2021-03-15 12:56:56 +0100472 atomic_t in_idle;
Stefan Metzmacher53e043b2021-03-15 12:56:56 +0100473
474 spinlock_t task_lock;
475 struct io_wq_work_list task_list;
Stefan Metzmacher53e043b2021-03-15 12:56:56 +0100476 struct callback_head task_work;
Pavel Begunkov6294f362021-08-10 17:53:55 +0100477 bool task_running;
Stefan Metzmacher53e043b2021-03-15 12:56:56 +0100478};
479
Jens Axboe09bb8392019-03-13 12:39:28 -0600480/*
481 * First field must be the file pointer in all the
482 * iocb unions! See also 'struct kiocb' in <linux/fs.h>
483 */
Jens Axboe221c5eb2019-01-17 09:41:58 -0700484struct io_poll_iocb {
485 struct file *file;
Pavel Begunkov018043b2020-10-27 23:17:18 +0000486 struct wait_queue_head *head;
Jens Axboe221c5eb2019-01-17 09:41:58 -0700487 __poll_t events;
Jens Axboe8c838782019-03-12 15:48:16 -0600488 bool done;
Jens Axboe221c5eb2019-01-17 09:41:58 -0700489 bool canceled;
Jens Axboe392edb42019-12-09 17:52:20 -0700490 struct wait_queue_entry wait;
Jens Axboe221c5eb2019-01-17 09:41:58 -0700491};
492
Pavel Begunkov9d805892021-04-13 02:58:40 +0100493struct io_poll_update {
Pavel Begunkov018043b2020-10-27 23:17:18 +0000494 struct file *file;
Pavel Begunkov9d805892021-04-13 02:58:40 +0100495 u64 old_user_data;
496 u64 new_user_data;
497 __poll_t events;
Jens Axboeb69de282021-03-17 08:37:41 -0600498 bool update_events;
499 bool update_user_data;
Pavel Begunkov018043b2020-10-27 23:17:18 +0000500};
501
Jens Axboeb5dba592019-12-11 14:02:38 -0700502struct io_close {
503 struct file *file;
Jens Axboeb5dba592019-12-11 14:02:38 -0700504 int fd;
505};
506
Jens Axboead8a48a2019-11-15 08:49:11 -0700507struct io_timeout_data {
508 struct io_kiocb *req;
509 struct hrtimer timer;
510 struct timespec64 ts;
511 enum hrtimer_mode mode;
Jens Axboe50c1df22021-08-27 17:11:06 -0600512 u32 flags;
Jens Axboead8a48a2019-11-15 08:49:11 -0700513};
514
Jens Axboe8ed8d3c2019-12-16 11:55:28 -0700515struct io_accept {
516 struct file *file;
517 struct sockaddr __user *addr;
518 int __user *addr_len;
519 int flags;
Pavel Begunkovaaa4db12021-08-25 12:25:47 +0100520 u32 file_slot;
Jens Axboe09952e32020-03-19 20:16:56 -0600521 unsigned long nofile;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -0700522};
523
524struct io_sync {
525 struct file *file;
526 loff_t len;
527 loff_t off;
528 int flags;
Jens Axboed63d1b52019-12-10 10:38:56 -0700529 int mode;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -0700530};
531
Jens Axboefbf23842019-12-17 18:45:56 -0700532struct io_cancel {
533 struct file *file;
534 u64 addr;
535};
536
Jens Axboeb29472e2019-12-17 18:50:29 -0700537struct io_timeout {
538 struct file *file;
Pavel Begunkovbfe68a22020-05-30 14:54:18 +0300539 u32 off;
540 u32 target_seq;
Pavel Begunkov135fcde2020-07-13 23:37:12 +0300541 struct list_head list;
Pavel Begunkov90cd7e42020-10-27 23:25:36 +0000542 /* head of the link, used by linked timeouts only */
543 struct io_kiocb *head;
Jens Axboe89b263f2021-08-10 15:14:18 -0600544 /* for linked completions */
545 struct io_kiocb *prev;
Jens Axboeb29472e2019-12-17 18:50:29 -0700546};
547
Pavel Begunkov0bdf7a22020-10-10 18:34:10 +0100548struct io_timeout_rem {
549 struct file *file;
550 u64 addr;
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +0000551
552 /* timeout update */
553 struct timespec64 ts;
554 u32 flags;
Pavel Begunkovf1042b62021-08-28 19:54:39 -0600555 bool ltimeout;
Pavel Begunkov0bdf7a22020-10-10 18:34:10 +0100556};
557
Jens Axboe9adbd452019-12-20 08:45:55 -0700558struct io_rw {
559 /* NOTE: kiocb has the file as the first member, so don't do it here */
560 struct kiocb kiocb;
561 u64 addr;
562 u64 len;
563};
564
Jens Axboe3fbb51c2019-12-20 08:51:52 -0700565struct io_connect {
566 struct file *file;
567 struct sockaddr __user *addr;
568 int addr_len;
569};
570
Jens Axboee47293f2019-12-20 08:58:21 -0700571struct io_sr_msg {
572 struct file *file;
Jens Axboefddafac2020-01-04 20:19:44 -0700573 union {
Pavel Begunkov4af34172021-04-11 01:46:30 +0100574 struct compat_msghdr __user *umsg_compat;
575 struct user_msghdr __user *umsg;
576 void __user *buf;
Jens Axboefddafac2020-01-04 20:19:44 -0700577 };
Jens Axboee47293f2019-12-20 08:58:21 -0700578 int msg_flags;
Jens Axboebcda7ba2020-02-23 16:42:51 -0700579 int bgid;
Jens Axboefddafac2020-01-04 20:19:44 -0700580 size_t len;
Jens Axboebcda7ba2020-02-23 16:42:51 -0700581 struct io_buffer *kbuf;
Jens Axboee47293f2019-12-20 08:58:21 -0700582};
583
Jens Axboe15b71ab2019-12-11 11:20:36 -0700584struct io_open {
585 struct file *file;
586 int dfd;
Pavel Begunkovb9445592021-08-25 12:25:45 +0100587 u32 file_slot;
Jens Axboe15b71ab2019-12-11 11:20:36 -0700588 struct filename *filename;
Jens Axboec12cedf2020-01-08 17:41:21 -0700589 struct open_how how;
Jens Axboe4022e7a2020-03-19 19:23:18 -0600590 unsigned long nofile;
Jens Axboe15b71ab2019-12-11 11:20:36 -0700591};
592
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +0000593struct io_rsrc_update {
Jens Axboe05f3fb32019-12-09 11:22:50 -0700594 struct file *file;
595 u64 arg;
596 u32 nr_args;
597 u32 offset;
598};
599
Jens Axboe4840e412019-12-25 22:03:45 -0700600struct io_fadvise {
601 struct file *file;
602 u64 offset;
603 u32 len;
604 u32 advice;
605};
606
Jens Axboec1ca7572019-12-25 22:18:28 -0700607struct io_madvise {
608 struct file *file;
609 u64 addr;
610 u32 len;
611 u32 advice;
612};
613
Jens Axboe3e4827b2020-01-08 15:18:09 -0700614struct io_epoll {
615 struct file *file;
616 int epfd;
617 int op;
618 int fd;
619 struct epoll_event event;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700620};
621
Pavel Begunkov7d67af22020-02-24 11:32:45 +0300622struct io_splice {
623 struct file *file_out;
624 struct file *file_in;
625 loff_t off_out;
626 loff_t off_in;
627 u64 len;
628 unsigned int flags;
629};
630
Jens Axboeddf0322d2020-02-23 16:41:33 -0700631struct io_provide_buf {
632 struct file *file;
633 __u64 addr;
Pavel Begunkov38134ad2021-04-15 13:07:39 +0100634 __u32 len;
Jens Axboeddf0322d2020-02-23 16:41:33 -0700635 __u32 bgid;
636 __u16 nbufs;
637 __u16 bid;
638};
639
Bijan Mottahedeh1d9e1282020-05-22 21:31:16 -0700640struct io_statx {
641 struct file *file;
642 int dfd;
643 unsigned int mask;
644 unsigned int flags;
Bijan Mottahedehe62753e2020-05-22 21:31:18 -0700645 const char __user *filename;
Bijan Mottahedeh1d9e1282020-05-22 21:31:16 -0700646 struct statx __user *buffer;
647};
648
Jens Axboe36f4fa62020-09-05 11:14:22 -0600649struct io_shutdown {
650 struct file *file;
651 int how;
652};
653
Jens Axboe80a261f2020-09-28 14:23:58 -0600654struct io_rename {
655 struct file *file;
656 int old_dfd;
657 int new_dfd;
658 struct filename *oldpath;
659 struct filename *newpath;
660 int flags;
661};
662
Jens Axboe14a11432020-09-28 14:27:37 -0600663struct io_unlink {
664 struct file *file;
665 int dfd;
666 int flags;
667 struct filename *filename;
668};
669
Dmitry Kadasheve34a02d2021-07-08 13:34:45 +0700670struct io_mkdir {
671 struct file *file;
672 int dfd;
673 umode_t mode;
674 struct filename *filename;
675};
676
Dmitry Kadashev7a8721f2021-07-08 13:34:46 +0700677struct io_symlink {
678 struct file *file;
679 int new_dfd;
680 struct filename *oldpath;
681 struct filename *newpath;
682};
683
Dmitry Kadashevcf30da92021-07-08 13:34:47 +0700684struct io_hardlink {
685 struct file *file;
686 int old_dfd;
687 int new_dfd;
688 struct filename *oldpath;
689 struct filename *newpath;
690 int flags;
691};
692
Pavel Begunkov3ca405e2020-07-13 23:37:08 +0300693struct io_completion {
694 struct file *file;
Pavel Begunkov8c3f9cd2021-02-28 22:35:15 +0000695 u32 cflags;
Pavel Begunkov3ca405e2020-07-13 23:37:08 +0300696};
697
Jens Axboef499a022019-12-02 16:28:46 -0700698struct io_async_connect {
699 struct sockaddr_storage address;
700};
701
Jens Axboe03b12302019-12-02 18:50:25 -0700702struct io_async_msghdr {
703 struct iovec fast_iov[UIO_FASTIOV];
Pavel Begunkov257e84a2021-02-05 00:58:00 +0000704 /* points to an allocated iov, if NULL we use fast_iov instead */
705 struct iovec *free_iov;
Jens Axboe03b12302019-12-02 18:50:25 -0700706 struct sockaddr __user *uaddr;
707 struct msghdr msg;
Jens Axboeb5379162020-02-09 11:29:15 -0700708 struct sockaddr_storage addr;
Jens Axboe03b12302019-12-02 18:50:25 -0700709};
710
Jens Axboef67676d2019-12-02 11:03:47 -0700711struct io_async_rw {
712 struct iovec fast_iov[UIO_FASTIOV];
Jens Axboeff6165b2020-08-13 09:47:43 -0600713 const struct iovec *free_iovec;
714 struct iov_iter iter;
Jens Axboecd658692021-09-10 11:19:14 -0600715 struct iov_iter_state iter_state;
Jens Axboe227c0c92020-08-13 11:51:40 -0600716 size_t bytes_done;
Jens Axboebcf5a062020-05-22 09:24:42 -0600717 struct wait_page_queue wpq;
Jens Axboef67676d2019-12-02 11:03:47 -0700718};
719
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300720enum {
721 REQ_F_FIXED_FILE_BIT = IOSQE_FIXED_FILE_BIT,
722 REQ_F_IO_DRAIN_BIT = IOSQE_IO_DRAIN_BIT,
723 REQ_F_LINK_BIT = IOSQE_IO_LINK_BIT,
724 REQ_F_HARDLINK_BIT = IOSQE_IO_HARDLINK_BIT,
725 REQ_F_FORCE_ASYNC_BIT = IOSQE_ASYNC_BIT,
Jens Axboebcda7ba2020-02-23 16:42:51 -0700726 REQ_F_BUFFER_SELECT_BIT = IOSQE_BUFFER_SELECT_BIT,
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300727
Pavel Begunkovdddca222021-04-27 16:13:52 +0100728 /* first byte is taken by user flags, shift it to not overlap */
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +0100729 REQ_F_FAIL_BIT = 8,
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300730 REQ_F_INFLIGHT_BIT,
731 REQ_F_CUR_POS_BIT,
732 REQ_F_NOWAIT_BIT,
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300733 REQ_F_LINK_TIMEOUT_BIT,
Pavel Begunkov99bc4c32020-02-07 22:04:45 +0300734 REQ_F_NEED_CLEANUP_BIT,
Jens Axboed7718a92020-02-14 22:23:12 -0700735 REQ_F_POLLED_BIT,
Jens Axboebcda7ba2020-02-23 16:42:51 -0700736 REQ_F_BUFFER_SELECTED_BIT,
Pavel Begunkove342c802021-01-19 13:32:47 +0000737 REQ_F_COMPLETE_INLINE_BIT,
Jens Axboe230d50d2021-04-01 20:41:15 -0600738 REQ_F_REISSUE_BIT,
Pavel Begunkovb8e64b52021-06-17 18:14:02 +0100739 REQ_F_CREDS_BIT,
Pavel Begunkov20e60a32021-08-11 19:28:30 +0100740 REQ_F_REFCOUNT_BIT,
Pavel Begunkov4d13d1a2021-08-15 10:40:24 +0100741 REQ_F_ARM_LTIMEOUT_BIT,
Jens Axboe7b29f922021-03-12 08:30:14 -0700742 /* keep async read/write and isreg together and in order */
Pavel Begunkovb191e2d2021-08-09 13:04:03 +0100743 REQ_F_NOWAIT_READ_BIT,
744 REQ_F_NOWAIT_WRITE_BIT,
Jens Axboe7b29f922021-03-12 08:30:14 -0700745 REQ_F_ISREG_BIT,
Jens Axboe84557872020-03-03 15:28:17 -0700746
747 /* not a real bit, just to check we're not overflowing the space */
748 __REQ_F_LAST_BIT,
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300749};
750
751enum {
752 /* ctx owns file */
753 REQ_F_FIXED_FILE = BIT(REQ_F_FIXED_FILE_BIT),
754 /* drain existing IO first */
755 REQ_F_IO_DRAIN = BIT(REQ_F_IO_DRAIN_BIT),
756 /* linked sqes */
757 REQ_F_LINK = BIT(REQ_F_LINK_BIT),
758 /* doesn't sever on completion < 0 */
759 REQ_F_HARDLINK = BIT(REQ_F_HARDLINK_BIT),
760 /* IOSQE_ASYNC */
761 REQ_F_FORCE_ASYNC = BIT(REQ_F_FORCE_ASYNC_BIT),
Jens Axboebcda7ba2020-02-23 16:42:51 -0700762 /* IOSQE_BUFFER_SELECT */
763 REQ_F_BUFFER_SELECT = BIT(REQ_F_BUFFER_SELECT_BIT),
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300764
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300765 /* fail rest of links */
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +0100766 REQ_F_FAIL = BIT(REQ_F_FAIL_BIT),
Pavel Begunkovb05a1bc2021-03-04 13:59:24 +0000767 /* on inflight list, should be cancelled and waited on exit reliably */
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300768 REQ_F_INFLIGHT = BIT(REQ_F_INFLIGHT_BIT),
769 /* read/write uses file position */
770 REQ_F_CUR_POS = BIT(REQ_F_CUR_POS_BIT),
771 /* must not punt to workers */
772 REQ_F_NOWAIT = BIT(REQ_F_NOWAIT_BIT),
Pavel Begunkov900fad42020-10-19 16:39:16 +0100773 /* has or had linked timeout */
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300774 REQ_F_LINK_TIMEOUT = BIT(REQ_F_LINK_TIMEOUT_BIT),
Pavel Begunkov99bc4c32020-02-07 22:04:45 +0300775 /* needs cleanup */
776 REQ_F_NEED_CLEANUP = BIT(REQ_F_NEED_CLEANUP_BIT),
Jens Axboed7718a92020-02-14 22:23:12 -0700777 /* already went through poll handler */
778 REQ_F_POLLED = BIT(REQ_F_POLLED_BIT),
Jens Axboebcda7ba2020-02-23 16:42:51 -0700779 /* buffer already selected */
780 REQ_F_BUFFER_SELECTED = BIT(REQ_F_BUFFER_SELECTED_BIT),
Pavel Begunkove342c802021-01-19 13:32:47 +0000781 /* completion is deferred through io_comp_state */
782 REQ_F_COMPLETE_INLINE = BIT(REQ_F_COMPLETE_INLINE_BIT),
Jens Axboe230d50d2021-04-01 20:41:15 -0600783 /* caller should reissue async */
784 REQ_F_REISSUE = BIT(REQ_F_REISSUE_BIT),
Jens Axboe7b29f922021-03-12 08:30:14 -0700785 /* supports async reads */
Pavel Begunkovb191e2d2021-08-09 13:04:03 +0100786 REQ_F_NOWAIT_READ = BIT(REQ_F_NOWAIT_READ_BIT),
Jens Axboe7b29f922021-03-12 08:30:14 -0700787 /* supports async writes */
Pavel Begunkovb191e2d2021-08-09 13:04:03 +0100788 REQ_F_NOWAIT_WRITE = BIT(REQ_F_NOWAIT_WRITE_BIT),
Jens Axboe7b29f922021-03-12 08:30:14 -0700789 /* regular file */
790 REQ_F_ISREG = BIT(REQ_F_ISREG_BIT),
Pavel Begunkovb8e64b52021-06-17 18:14:02 +0100791 /* has creds assigned */
792 REQ_F_CREDS = BIT(REQ_F_CREDS_BIT),
Pavel Begunkov20e60a32021-08-11 19:28:30 +0100793 /* skip refcounting if not set */
794 REQ_F_REFCOUNT = BIT(REQ_F_REFCOUNT_BIT),
Pavel Begunkov4d13d1a2021-08-15 10:40:24 +0100795 /* there is a linked timeout that has to be armed */
796 REQ_F_ARM_LTIMEOUT = BIT(REQ_F_ARM_LTIMEOUT_BIT),
Jens Axboed7718a92020-02-14 22:23:12 -0700797};
798
799struct async_poll {
800 struct io_poll_iocb poll;
Jens Axboe807abcb2020-07-17 17:09:27 -0600801 struct io_poll_iocb *double_poll;
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300802};
803
Pavel Begunkovf237c302021-08-18 12:42:46 +0100804typedef void (*io_req_tw_func_t)(struct io_kiocb *req, bool *locked);
Pavel Begunkov5b0a6ac2021-06-30 21:54:04 +0100805
Jens Axboe7cbf1722021-02-10 00:03:20 +0000806struct io_task_work {
Pavel Begunkov5b0a6ac2021-06-30 21:54:04 +0100807 union {
808 struct io_wq_work_node node;
809 struct llist_node fallback_node;
810 };
811 io_req_tw_func_t func;
Jens Axboe7cbf1722021-02-10 00:03:20 +0000812};
813
Pavel Begunkov992da012021-06-10 16:37:37 +0100814enum {
815 IORING_RSRC_FILE = 0,
816 IORING_RSRC_BUFFER = 1,
817};
818
Jens Axboe09bb8392019-03-13 12:39:28 -0600819/*
820 * NOTE! Each of the iocb union members has the file pointer
821 * as the first entry in their struct definition. So you can
822 * access the file pointer through any of the sub-structs,
823 * or directly as just 'ki_filp' in this struct.
824 */
Jens Axboe2b188cc2019-01-07 10:46:33 -0700825struct io_kiocb {
Jens Axboe221c5eb2019-01-17 09:41:58 -0700826 union {
Jens Axboe09bb8392019-03-13 12:39:28 -0600827 struct file *file;
Jens Axboe9adbd452019-12-20 08:45:55 -0700828 struct io_rw rw;
Jens Axboe221c5eb2019-01-17 09:41:58 -0700829 struct io_poll_iocb poll;
Pavel Begunkov9d805892021-04-13 02:58:40 +0100830 struct io_poll_update poll_update;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -0700831 struct io_accept accept;
832 struct io_sync sync;
Jens Axboefbf23842019-12-17 18:45:56 -0700833 struct io_cancel cancel;
Jens Axboeb29472e2019-12-17 18:50:29 -0700834 struct io_timeout timeout;
Pavel Begunkov0bdf7a22020-10-10 18:34:10 +0100835 struct io_timeout_rem timeout_rem;
Jens Axboe3fbb51c2019-12-20 08:51:52 -0700836 struct io_connect connect;
Jens Axboee47293f2019-12-20 08:58:21 -0700837 struct io_sr_msg sr_msg;
Jens Axboe15b71ab2019-12-11 11:20:36 -0700838 struct io_open open;
Jens Axboeb5dba592019-12-11 14:02:38 -0700839 struct io_close close;
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +0000840 struct io_rsrc_update rsrc_update;
Jens Axboe4840e412019-12-25 22:03:45 -0700841 struct io_fadvise fadvise;
Jens Axboec1ca7572019-12-25 22:18:28 -0700842 struct io_madvise madvise;
Jens Axboe3e4827b2020-01-08 15:18:09 -0700843 struct io_epoll epoll;
Pavel Begunkov7d67af22020-02-24 11:32:45 +0300844 struct io_splice splice;
Jens Axboeddf0322d2020-02-23 16:41:33 -0700845 struct io_provide_buf pbuf;
Bijan Mottahedeh1d9e1282020-05-22 21:31:16 -0700846 struct io_statx statx;
Jens Axboe36f4fa62020-09-05 11:14:22 -0600847 struct io_shutdown shutdown;
Jens Axboe80a261f2020-09-28 14:23:58 -0600848 struct io_rename rename;
Jens Axboe14a11432020-09-28 14:27:37 -0600849 struct io_unlink unlink;
Dmitry Kadasheve34a02d2021-07-08 13:34:45 +0700850 struct io_mkdir mkdir;
Dmitry Kadashev7a8721f2021-07-08 13:34:46 +0700851 struct io_symlink symlink;
Dmitry Kadashevcf30da92021-07-08 13:34:47 +0700852 struct io_hardlink hardlink;
Pavel Begunkov3ca405e2020-07-13 23:37:08 +0300853 /* use only after cleaning per-op data, see io_clean_op() */
854 struct io_completion compl;
Jens Axboe221c5eb2019-01-17 09:41:58 -0700855 };
Jens Axboe2b188cc2019-01-07 10:46:33 -0700856
Jens Axboee8c2bc12020-08-15 18:44:09 -0700857 /* opcode allocated if it needs to store data for async defer */
858 void *async_data;
Jens Axboed625c6e2019-12-17 19:53:05 -0700859 u8 opcode;
Xiaoguang Wang65a65432020-06-11 23:39:36 +0800860 /* polled IO has completed */
861 u8 iopoll_completed;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700862
Bijan Mottahedeh4f4eeba2020-05-19 14:52:49 -0700863 u16 buf_index;
Pavel Begunkov9cf7c102020-07-13 23:37:15 +0300864 u32 result;
Bijan Mottahedeh4f4eeba2020-05-19 14:52:49 -0700865
Pavel Begunkov010e8e62020-07-30 18:43:45 +0300866 struct io_ring_ctx *ctx;
867 unsigned int flags;
Jens Axboeabc54d62021-02-24 13:32:30 -0700868 atomic_t refs;
Pavel Begunkov010e8e62020-07-30 18:43:45 +0300869 struct task_struct *task;
870 u64 user_data;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700871
Pavel Begunkovf2f87372020-10-27 23:25:37 +0000872 struct io_kiocb *link;
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +0000873 struct percpu_ref *fixed_rsrc_refs;
Jens Axboed7718a92020-02-14 22:23:12 -0700874
Pavel Begunkovb303fe22021-04-11 01:46:26 +0100875 /* used with ctx->iopoll_list with reads/writes */
Pavel Begunkov010e8e62020-07-30 18:43:45 +0300876 struct list_head inflight_entry;
Pavel Begunkov5b0a6ac2021-06-30 21:54:04 +0100877 struct io_task_work io_task_work;
Pavel Begunkov010e8e62020-07-30 18:43:45 +0300878 /* for polled requests, i.e. IORING_OP_POLL_ADD and async armed poll */
879 struct hlist_node hash_node;
880 struct async_poll *apoll;
881 struct io_wq_work work;
Pavel Begunkovfe7e3252021-06-24 15:09:57 +0100882 const struct cred *creds;
Pavel Begunkovc10d1f92021-06-17 18:14:01 +0100883
Pavel Begunkoveae071c2021-04-25 14:32:24 +0100884 /* store used ubuf, so we can prevent reloading */
885 struct io_mapped_ubuf *imu;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700886};
887
Pavel Begunkov13bf43f2021-03-06 11:02:12 +0000888struct io_tctx_node {
889 struct list_head ctx_node;
890 struct task_struct *task;
Pavel Begunkov13bf43f2021-03-06 11:02:12 +0000891 struct io_ring_ctx *ctx;
892};
893
Pavel Begunkov27dc8332020-07-13 23:37:14 +0300894struct io_defer_entry {
895 struct list_head list;
896 struct io_kiocb *req;
Pavel Begunkov9cf7c102020-07-13 23:37:15 +0300897 u32 seq;
Pavel Begunkov27dc8332020-07-13 23:37:14 +0300898};
899
Jens Axboed3656342019-12-18 09:50:26 -0700900struct io_op_def {
Jens Axboed3656342019-12-18 09:50:26 -0700901 /* needs req->file assigned */
902 unsigned needs_file : 1;
Jens Axboed3656342019-12-18 09:50:26 -0700903 /* hash wq insertion if file is a regular file */
904 unsigned hash_reg_file : 1;
905 /* unbound wq insertion if file is a non-regular file */
906 unsigned unbound_nonreg_file : 1;
Jens Axboe66f4af92020-01-16 15:36:52 -0700907 /* opcode is not supported by this kernel */
908 unsigned not_supported : 1;
Jens Axboe8a727582020-02-20 09:59:44 -0700909 /* set if opcode supports polled "wait" */
910 unsigned pollin : 1;
911 unsigned pollout : 1;
Jens Axboebcda7ba2020-02-23 16:42:51 -0700912 /* op supports buffer selection */
913 unsigned buffer_select : 1;
Pavel Begunkov26f05052021-02-28 22:35:18 +0000914 /* do prep async if is going to be punted */
915 unsigned needs_async_setup : 1;
Jens Axboe27926b62020-10-28 09:33:23 -0600916 /* should block plug */
917 unsigned plug : 1;
Jens Axboee8c2bc12020-08-15 18:44:09 -0700918 /* size of async data needed, if any */
919 unsigned short async_size;
Jens Axboed3656342019-12-18 09:50:26 -0700920};
921
Jens Axboe09186822020-10-13 15:01:40 -0600922static const struct io_op_def io_op_defs[] = {
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300923 [IORING_OP_NOP] = {},
924 [IORING_OP_READV] = {
Jens Axboed3656342019-12-18 09:50:26 -0700925 .needs_file = 1,
926 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700927 .pollin = 1,
Jens Axboe4d954c22020-02-27 07:31:19 -0700928 .buffer_select = 1,
Pavel Begunkov26f05052021-02-28 22:35:18 +0000929 .needs_async_setup = 1,
Jens Axboe27926b62020-10-28 09:33:23 -0600930 .plug = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700931 .async_size = sizeof(struct io_async_rw),
Jens Axboed3656342019-12-18 09:50:26 -0700932 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300933 [IORING_OP_WRITEV] = {
Jens Axboed3656342019-12-18 09:50:26 -0700934 .needs_file = 1,
935 .hash_reg_file = 1,
936 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700937 .pollout = 1,
Pavel Begunkov26f05052021-02-28 22:35:18 +0000938 .needs_async_setup = 1,
Jens Axboe27926b62020-10-28 09:33:23 -0600939 .plug = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700940 .async_size = sizeof(struct io_async_rw),
Jens Axboed3656342019-12-18 09:50:26 -0700941 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300942 [IORING_OP_FSYNC] = {
Jens Axboed3656342019-12-18 09:50:26 -0700943 .needs_file = 1,
944 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300945 [IORING_OP_READ_FIXED] = {
Jens Axboed3656342019-12-18 09:50:26 -0700946 .needs_file = 1,
947 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700948 .pollin = 1,
Jens Axboe27926b62020-10-28 09:33:23 -0600949 .plug = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700950 .async_size = sizeof(struct io_async_rw),
Jens Axboed3656342019-12-18 09:50:26 -0700951 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300952 [IORING_OP_WRITE_FIXED] = {
Jens Axboed3656342019-12-18 09:50:26 -0700953 .needs_file = 1,
954 .hash_reg_file = 1,
955 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700956 .pollout = 1,
Jens Axboe27926b62020-10-28 09:33:23 -0600957 .plug = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700958 .async_size = sizeof(struct io_async_rw),
Jens Axboed3656342019-12-18 09:50:26 -0700959 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300960 [IORING_OP_POLL_ADD] = {
Jens Axboed3656342019-12-18 09:50:26 -0700961 .needs_file = 1,
962 .unbound_nonreg_file = 1,
963 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300964 [IORING_OP_POLL_REMOVE] = {},
965 [IORING_OP_SYNC_FILE_RANGE] = {
Jens Axboed3656342019-12-18 09:50:26 -0700966 .needs_file = 1,
967 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300968 [IORING_OP_SENDMSG] = {
Jens Axboed3656342019-12-18 09:50:26 -0700969 .needs_file = 1,
970 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700971 .pollout = 1,
Pavel Begunkov26f05052021-02-28 22:35:18 +0000972 .needs_async_setup = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700973 .async_size = sizeof(struct io_async_msghdr),
Jens Axboed3656342019-12-18 09:50:26 -0700974 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300975 [IORING_OP_RECVMSG] = {
Jens Axboed3656342019-12-18 09:50:26 -0700976 .needs_file = 1,
977 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700978 .pollin = 1,
Jens Axboe52de1fe2020-02-27 10:15:42 -0700979 .buffer_select = 1,
Pavel Begunkov26f05052021-02-28 22:35:18 +0000980 .needs_async_setup = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700981 .async_size = sizeof(struct io_async_msghdr),
Jens Axboed3656342019-12-18 09:50:26 -0700982 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300983 [IORING_OP_TIMEOUT] = {
Jens Axboee8c2bc12020-08-15 18:44:09 -0700984 .async_size = sizeof(struct io_timeout_data),
Jens Axboed3656342019-12-18 09:50:26 -0700985 },
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +0000986 [IORING_OP_TIMEOUT_REMOVE] = {
987 /* used by timeout updates' prep() */
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +0000988 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300989 [IORING_OP_ACCEPT] = {
Jens Axboed3656342019-12-18 09:50:26 -0700990 .needs_file = 1,
991 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700992 .pollin = 1,
Jens Axboed3656342019-12-18 09:50:26 -0700993 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300994 [IORING_OP_ASYNC_CANCEL] = {},
995 [IORING_OP_LINK_TIMEOUT] = {
Jens Axboee8c2bc12020-08-15 18:44:09 -0700996 .async_size = sizeof(struct io_timeout_data),
Jens Axboed3656342019-12-18 09:50:26 -0700997 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300998 [IORING_OP_CONNECT] = {
Jens Axboed3656342019-12-18 09:50:26 -0700999 .needs_file = 1,
1000 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -07001001 .pollout = 1,
Pavel Begunkov26f05052021-02-28 22:35:18 +00001002 .needs_async_setup = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -07001003 .async_size = sizeof(struct io_async_connect),
Jens Axboed3656342019-12-18 09:50:26 -07001004 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +03001005 [IORING_OP_FALLOCATE] = {
Jens Axboed3656342019-12-18 09:50:26 -07001006 .needs_file = 1,
1007 },
Jens Axboe44526be2021-02-15 13:32:18 -07001008 [IORING_OP_OPENAT] = {},
1009 [IORING_OP_CLOSE] = {},
1010 [IORING_OP_FILES_UPDATE] = {},
1011 [IORING_OP_STATX] = {},
Pavel Begunkov0463b6c2020-01-18 21:35:38 +03001012 [IORING_OP_READ] = {
Jens Axboe3a6820f2019-12-22 15:19:35 -07001013 .needs_file = 1,
1014 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -07001015 .pollin = 1,
Jens Axboebcda7ba2020-02-23 16:42:51 -07001016 .buffer_select = 1,
Jens Axboe27926b62020-10-28 09:33:23 -06001017 .plug = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -07001018 .async_size = sizeof(struct io_async_rw),
Jens Axboe3a6820f2019-12-22 15:19:35 -07001019 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +03001020 [IORING_OP_WRITE] = {
Jens Axboe3a6820f2019-12-22 15:19:35 -07001021 .needs_file = 1,
Jens Axboe7b3188e2021-08-30 19:37:41 -06001022 .hash_reg_file = 1,
Jens Axboe3a6820f2019-12-22 15:19:35 -07001023 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -07001024 .pollout = 1,
Jens Axboe27926b62020-10-28 09:33:23 -06001025 .plug = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -07001026 .async_size = sizeof(struct io_async_rw),
Jens Axboe3a6820f2019-12-22 15:19:35 -07001027 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +03001028 [IORING_OP_FADVISE] = {
Jens Axboe4840e412019-12-25 22:03:45 -07001029 .needs_file = 1,
1030 },
Jens Axboe44526be2021-02-15 13:32:18 -07001031 [IORING_OP_MADVISE] = {},
Pavel Begunkov0463b6c2020-01-18 21:35:38 +03001032 [IORING_OP_SEND] = {
Jens Axboefddafac2020-01-04 20:19:44 -07001033 .needs_file = 1,
1034 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -07001035 .pollout = 1,
Jens Axboefddafac2020-01-04 20:19:44 -07001036 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +03001037 [IORING_OP_RECV] = {
Jens Axboefddafac2020-01-04 20:19:44 -07001038 .needs_file = 1,
1039 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -07001040 .pollin = 1,
Jens Axboebcda7ba2020-02-23 16:42:51 -07001041 .buffer_select = 1,
Jens Axboefddafac2020-01-04 20:19:44 -07001042 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +03001043 [IORING_OP_OPENAT2] = {
Jens Axboecebdb982020-01-08 17:59:24 -07001044 },
Jens Axboe3e4827b2020-01-08 15:18:09 -07001045 [IORING_OP_EPOLL_CTL] = {
1046 .unbound_nonreg_file = 1,
Jens Axboe3e4827b2020-01-08 15:18:09 -07001047 },
Pavel Begunkov7d67af22020-02-24 11:32:45 +03001048 [IORING_OP_SPLICE] = {
1049 .needs_file = 1,
1050 .hash_reg_file = 1,
1051 .unbound_nonreg_file = 1,
Jens Axboeddf0322d2020-02-23 16:41:33 -07001052 },
1053 [IORING_OP_PROVIDE_BUFFERS] = {},
Jens Axboe067524e2020-03-02 16:32:28 -07001054 [IORING_OP_REMOVE_BUFFERS] = {},
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03001055 [IORING_OP_TEE] = {
1056 .needs_file = 1,
1057 .hash_reg_file = 1,
1058 .unbound_nonreg_file = 1,
1059 },
Jens Axboe36f4fa62020-09-05 11:14:22 -06001060 [IORING_OP_SHUTDOWN] = {
1061 .needs_file = 1,
1062 },
Jens Axboe44526be2021-02-15 13:32:18 -07001063 [IORING_OP_RENAMEAT] = {},
1064 [IORING_OP_UNLINKAT] = {},
Dmitry Kadasheve34a02d2021-07-08 13:34:45 +07001065 [IORING_OP_MKDIRAT] = {},
Dmitry Kadashev7a8721f2021-07-08 13:34:46 +07001066 [IORING_OP_SYMLINKAT] = {},
Dmitry Kadashevcf30da92021-07-08 13:34:47 +07001067 [IORING_OP_LINKAT] = {},
Jens Axboed3656342019-12-18 09:50:26 -07001068};
1069
Pavel Begunkov0756a862021-08-15 10:40:25 +01001070/* requests with any of those set should undergo io_disarm_next() */
1071#define IO_DISARM_MASK (REQ_F_ARM_LTIMEOUT | REQ_F_LINK_TIMEOUT | REQ_F_FAIL)
1072
Pavel Begunkov7a612352021-03-09 00:37:59 +00001073static bool io_disarm_next(struct io_kiocb *req);
Pavel Begunkoveef51da2021-06-14 02:36:15 +01001074static void io_uring_del_tctx_node(unsigned long index);
Pavel Begunkov9936c7c2021-02-04 13:51:56 +00001075static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
1076 struct task_struct *task,
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01001077 bool cancel_all);
Pavel Begunkov78cc6872021-06-14 02:36:23 +01001078static void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd);
Pavel Begunkov1ffc5422020-12-30 21:34:15 +00001079
Pavel Begunkovd4d19c12021-04-25 14:32:17 +01001080static bool io_cqring_fill_event(struct io_ring_ctx *ctx, u64 user_data,
1081 long res, unsigned int cflags);
Jackie Liuec9c02a2019-11-08 23:50:36 +08001082static void io_put_req(struct io_kiocb *req);
Pavel Begunkov91c2f692021-08-11 19:28:28 +01001083static void io_put_req_deferred(struct io_kiocb *req);
Jens Axboec7dae4b2021-02-09 19:53:37 -07001084static void io_dismantle_req(struct io_kiocb *req);
Jens Axboe94ae5e72019-11-14 19:39:52 -07001085static void io_queue_linked_timeout(struct io_kiocb *req);
Pavel Begunkovfdecb662021-04-25 14:32:20 +01001086static int __io_register_rsrc_update(struct io_ring_ctx *ctx, unsigned type,
Pavel Begunkovc3bdad02021-04-25 14:32:22 +01001087 struct io_uring_rsrc_update2 *up,
Pavel Begunkov98f0b3b2021-04-25 14:32:19 +01001088 unsigned nr_args);
Pavel Begunkov68fb8972021-03-19 17:22:41 +00001089static void io_clean_op(struct io_kiocb *req);
Pavel Begunkovac177052021-08-09 13:04:02 +01001090static struct file *io_file_get(struct io_ring_ctx *ctx,
Pavel Begunkov8371adf2020-10-10 18:34:08 +01001091 struct io_kiocb *req, int fd, bool fixed);
Pavel Begunkovc5eef2b2021-02-10 00:03:22 +00001092static void __io_queue_sqe(struct io_kiocb *req);
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00001093static void io_rsrc_put_work(struct work_struct *work);
Jens Axboede0617e2019-04-06 21:51:27 -06001094
Pavel Begunkov907d1df2021-01-26 23:35:10 +00001095static void io_req_task_queue(struct io_kiocb *req);
Pavel Begunkov2a2758f2021-06-17 18:14:00 +01001096static void io_submit_flush_completions(struct io_ring_ctx *ctx);
Pavel Begunkov179ae0d2021-02-28 22:35:20 +00001097static int io_req_prep_async(struct io_kiocb *req);
Jens Axboe9a56a232019-01-09 09:06:50 -07001098
Pavel Begunkovb9445592021-08-25 12:25:45 +01001099static int io_install_fixed_file(struct io_kiocb *req, struct file *file,
1100 unsigned int issue_flags, u32 slot_index);
Pavel Begunkovf1042b62021-08-28 19:54:39 -06001101static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer);
Pavel Begunkovb9445592021-08-25 12:25:45 +01001102
Jens Axboe2b188cc2019-01-07 10:46:33 -07001103static struct kmem_cache *req_cachep;
1104
Jens Axboe09186822020-10-13 15:01:40 -06001105static const struct file_operations io_uring_fops;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001106
1107struct sock *io_uring_get_socket(struct file *file)
1108{
1109#if defined(CONFIG_UNIX)
1110 if (file->f_op == &io_uring_fops) {
1111 struct io_ring_ctx *ctx = file->private_data;
1112
1113 return ctx->ring_sock->sk;
1114 }
1115#endif
1116 return NULL;
1117}
1118EXPORT_SYMBOL(io_uring_get_socket);
1119
Pavel Begunkovf237c302021-08-18 12:42:46 +01001120static inline void io_tw_lock(struct io_ring_ctx *ctx, bool *locked)
1121{
1122 if (!*locked) {
1123 mutex_lock(&ctx->uring_lock);
1124 *locked = true;
1125 }
1126}
1127
Pavel Begunkovf2f87372020-10-27 23:25:37 +00001128#define io_for_each_link(pos, head) \
1129 for (pos = (head); pos; pos = pos->link)
1130
Pavel Begunkov21c843d2021-08-11 19:28:27 +01001131/*
1132 * Shamelessly stolen from the mm implementation of page reference checking,
1133 * see commit f958d7b528b1 for details.
1134 */
1135#define req_ref_zero_or_close_to_overflow(req) \
1136 ((unsigned int) atomic_read(&(req->refs)) + 127u <= 127u)
1137
1138static inline bool req_ref_inc_not_zero(struct io_kiocb *req)
1139{
Pavel Begunkov20e60a32021-08-11 19:28:30 +01001140 WARN_ON_ONCE(!(req->flags & REQ_F_REFCOUNT));
Pavel Begunkov21c843d2021-08-11 19:28:27 +01001141 return atomic_inc_not_zero(&req->refs);
1142}
1143
Pavel Begunkov21c843d2021-08-11 19:28:27 +01001144static inline bool req_ref_put_and_test(struct io_kiocb *req)
1145{
Pavel Begunkov20e60a32021-08-11 19:28:30 +01001146 if (likely(!(req->flags & REQ_F_REFCOUNT)))
1147 return true;
1148
Pavel Begunkov21c843d2021-08-11 19:28:27 +01001149 WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req));
1150 return atomic_dec_and_test(&req->refs);
1151}
1152
1153static inline void req_ref_put(struct io_kiocb *req)
1154{
Pavel Begunkov20e60a32021-08-11 19:28:30 +01001155 WARN_ON_ONCE(!(req->flags & REQ_F_REFCOUNT));
Pavel Begunkov21c843d2021-08-11 19:28:27 +01001156 WARN_ON_ONCE(req_ref_put_and_test(req));
1157}
1158
1159static inline void req_ref_get(struct io_kiocb *req)
1160{
Pavel Begunkov20e60a32021-08-11 19:28:30 +01001161 WARN_ON_ONCE(!(req->flags & REQ_F_REFCOUNT));
Pavel Begunkov21c843d2021-08-11 19:28:27 +01001162 WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req));
1163 atomic_inc(&req->refs);
1164}
1165
Pavel Begunkov48dcd382021-08-15 10:40:18 +01001166static inline void __io_req_set_refcount(struct io_kiocb *req, int nr)
Pavel Begunkov20e60a32021-08-11 19:28:30 +01001167{
1168 if (!(req->flags & REQ_F_REFCOUNT)) {
1169 req->flags |= REQ_F_REFCOUNT;
Pavel Begunkov48dcd382021-08-15 10:40:18 +01001170 atomic_set(&req->refs, nr);
Pavel Begunkov20e60a32021-08-11 19:28:30 +01001171 }
1172}
1173
Pavel Begunkov48dcd382021-08-15 10:40:18 +01001174static inline void io_req_set_refcount(struct io_kiocb *req)
1175{
1176 __io_req_set_refcount(req, 1);
1177}
1178
Pavel Begunkovb895c9a2021-04-01 15:43:40 +01001179static inline void io_req_set_rsrc_node(struct io_kiocb *req)
Jens Axboec40f6372020-06-25 15:39:59 -06001180{
Pavel Begunkov36f72fe2020-11-18 19:57:26 +00001181 struct io_ring_ctx *ctx = req->ctx;
1182
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00001183 if (!req->fixed_rsrc_refs) {
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01001184 req->fixed_rsrc_refs = &ctx->rsrc_node->refs;
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00001185 percpu_ref_get(req->fixed_rsrc_refs);
Pavel Begunkov36f72fe2020-11-18 19:57:26 +00001186 }
1187}
1188
Pavel Begunkovf70865d2021-04-11 01:46:40 +01001189static void io_refs_resurrect(struct percpu_ref *ref, struct completion *compl)
1190{
1191 bool got = percpu_ref_tryget(ref);
1192
1193 /* already at zero, wait for ->release() */
1194 if (!got)
1195 wait_for_completion(compl);
1196 percpu_ref_resurrect(ref);
1197 if (got)
1198 percpu_ref_put(ref);
1199}
1200
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01001201static bool io_match_task(struct io_kiocb *head, struct task_struct *task,
1202 bool cancel_all)
Pavel Begunkov08d23632020-11-06 13:00:22 +00001203{
1204 struct io_kiocb *req;
1205
Pavel Begunkov68207682021-03-22 01:58:25 +00001206 if (task && head->task != task)
Pavel Begunkov08d23632020-11-06 13:00:22 +00001207 return false;
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01001208 if (cancel_all)
Pavel Begunkov08d23632020-11-06 13:00:22 +00001209 return true;
1210
1211 io_for_each_link(req, head) {
Pavel Begunkovb05a1bc2021-03-04 13:59:24 +00001212 if (req->flags & REQ_F_INFLIGHT)
Jens Axboe02a13672021-01-23 15:49:31 -07001213 return true;
Pavel Begunkov08d23632020-11-06 13:00:22 +00001214 }
1215 return false;
1216}
1217
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01001218static inline void req_set_fail(struct io_kiocb *req)
Jens Axboec40f6372020-06-25 15:39:59 -06001219{
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01001220 req->flags |= REQ_F_FAIL;
Jens Axboec40f6372020-06-25 15:39:59 -06001221}
Jens Axboe4a38aed22020-05-14 17:21:15 -06001222
Hao Xua8295b92021-08-27 17:46:09 +08001223static inline void req_fail_link_node(struct io_kiocb *req, int res)
1224{
1225 req_set_fail(req);
1226 req->result = res;
1227}
1228
Jens Axboe2b188cc2019-01-07 10:46:33 -07001229static void io_ring_ctx_ref_free(struct percpu_ref *ref)
1230{
1231 struct io_ring_ctx *ctx = container_of(ref, struct io_ring_ctx, refs);
1232
Jens Axboe0f158b42020-05-14 17:18:39 -06001233 complete(&ctx->ref_comp);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001234}
1235
Pavel Begunkov8eb7e2d2020-06-29 13:13:02 +03001236static inline bool io_is_timeout_noseq(struct io_kiocb *req)
1237{
1238 return !req->timeout.off;
1239}
1240
Pavel Begunkovf56165e2021-08-09 20:18:07 +01001241static void io_fallback_req_func(struct work_struct *work)
1242{
1243 struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx,
1244 fallback_work.work);
1245 struct llist_node *node = llist_del_all(&ctx->fallback_llist);
1246 struct io_kiocb *req, *tmp;
Pavel Begunkovf237c302021-08-18 12:42:46 +01001247 bool locked = false;
Pavel Begunkovf56165e2021-08-09 20:18:07 +01001248
1249 percpu_ref_get(&ctx->refs);
1250 llist_for_each_entry_safe(req, tmp, node, io_task_work.fallback_node)
Pavel Begunkovf237c302021-08-18 12:42:46 +01001251 req->io_task_work.func(req, &locked);
Pavel Begunkov5636c002021-08-18 12:42:45 +01001252
Pavel Begunkovf237c302021-08-18 12:42:46 +01001253 if (locked) {
1254 if (ctx->submit_state.compl_nr)
1255 io_submit_flush_completions(ctx);
1256 mutex_unlock(&ctx->uring_lock);
1257 }
Pavel Begunkovf56165e2021-08-09 20:18:07 +01001258 percpu_ref_put(&ctx->refs);
Pavel Begunkovf237c302021-08-18 12:42:46 +01001259
Pavel Begunkovf56165e2021-08-09 20:18:07 +01001260}
1261
Jens Axboe2b188cc2019-01-07 10:46:33 -07001262static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
1263{
1264 struct io_ring_ctx *ctx;
Jens Axboe78076bb2019-12-04 19:56:40 -07001265 int hash_bits;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001266
1267 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
1268 if (!ctx)
1269 return NULL;
1270
Jens Axboe78076bb2019-12-04 19:56:40 -07001271 /*
1272 * Use 5 bits less than the max cq entries, that should give us around
1273 * 32 entries per hash list if totally full and uniformly spread.
1274 */
1275 hash_bits = ilog2(p->cq_entries);
1276 hash_bits -= 5;
1277 if (hash_bits <= 0)
1278 hash_bits = 1;
1279 ctx->cancel_hash_bits = hash_bits;
1280 ctx->cancel_hash = kmalloc((1U << hash_bits) * sizeof(struct hlist_head),
1281 GFP_KERNEL);
1282 if (!ctx->cancel_hash)
1283 goto err;
1284 __hash_init(ctx->cancel_hash, 1U << hash_bits);
1285
Pavel Begunkov62248432021-04-28 13:11:29 +01001286 ctx->dummy_ubuf = kzalloc(sizeof(*ctx->dummy_ubuf), GFP_KERNEL);
1287 if (!ctx->dummy_ubuf)
1288 goto err;
1289 /* set invalid range, so io_import_fixed() fails meeting it */
1290 ctx->dummy_ubuf->ubuf = -1UL;
1291
Roman Gushchin21482892019-05-07 10:01:48 -07001292 if (percpu_ref_init(&ctx->refs, io_ring_ctx_ref_free,
Jens Axboe206aefd2019-11-07 18:27:42 -07001293 PERCPU_REF_ALLOW_REINIT, GFP_KERNEL))
1294 goto err;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001295
1296 ctx->flags = p->flags;
Jens Axboe90554202020-09-03 12:12:41 -06001297 init_waitqueue_head(&ctx->sqo_sq_wait);
Jens Axboe69fb2132020-09-14 11:16:23 -06001298 INIT_LIST_HEAD(&ctx->sqd_list);
Pavel Begunkov311997b2021-06-14 23:37:28 +01001299 init_waitqueue_head(&ctx->poll_wait);
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001300 INIT_LIST_HEAD(&ctx->cq_overflow_list);
Jens Axboe0f158b42020-05-14 17:18:39 -06001301 init_completion(&ctx->ref_comp);
Jens Axboe9e15c3a2021-03-13 12:29:43 -07001302 xa_init_flags(&ctx->io_buffers, XA_FLAGS_ALLOC1);
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +00001303 xa_init_flags(&ctx->personalities, XA_FLAGS_ALLOC1);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001304 mutex_init(&ctx->uring_lock);
Pavel Begunkov311997b2021-06-14 23:37:28 +01001305 init_waitqueue_head(&ctx->cq_wait);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001306 spin_lock_init(&ctx->completion_lock);
Jens Axboe89850fc2021-08-10 15:11:51 -06001307 spin_lock_init(&ctx->timeout_lock);
Pavel Begunkov540e32a2020-07-13 23:37:09 +03001308 INIT_LIST_HEAD(&ctx->iopoll_list);
Jens Axboede0617e2019-04-06 21:51:27 -06001309 INIT_LIST_HEAD(&ctx->defer_list);
Jens Axboe5262f562019-09-17 12:26:57 -06001310 INIT_LIST_HEAD(&ctx->timeout_list);
Pavel Begunkovef9dd632021-08-28 19:54:38 -06001311 INIT_LIST_HEAD(&ctx->ltimeout_list);
Bijan Mottahedehd67d2262021-01-15 17:37:46 +00001312 spin_lock_init(&ctx->rsrc_ref_lock);
1313 INIT_LIST_HEAD(&ctx->rsrc_ref_list);
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00001314 INIT_DELAYED_WORK(&ctx->rsrc_put_work, io_rsrc_put_work);
1315 init_llist_head(&ctx->rsrc_put_llist);
Pavel Begunkov13bf43f2021-03-06 11:02:12 +00001316 INIT_LIST_HEAD(&ctx->tctx_list);
Pavel Begunkovcd0ca2e2021-08-09 20:18:11 +01001317 INIT_LIST_HEAD(&ctx->submit_state.free_list);
Pavel Begunkovd0acdee2021-05-16 22:58:12 +01001318 INIT_LIST_HEAD(&ctx->locked_free_list);
Pavel Begunkov9011bf92021-06-30 21:54:03 +01001319 INIT_DELAYED_WORK(&ctx->fallback_work, io_fallback_req_func);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001320 return ctx;
Jens Axboe206aefd2019-11-07 18:27:42 -07001321err:
Pavel Begunkov62248432021-04-28 13:11:29 +01001322 kfree(ctx->dummy_ubuf);
Jens Axboe78076bb2019-12-04 19:56:40 -07001323 kfree(ctx->cancel_hash);
Jens Axboe206aefd2019-11-07 18:27:42 -07001324 kfree(ctx);
1325 return NULL;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001326}
1327
Pavel Begunkov8f6ed492021-05-16 22:58:10 +01001328static void io_account_cq_overflow(struct io_ring_ctx *ctx)
1329{
1330 struct io_rings *r = ctx->rings;
1331
1332 WRITE_ONCE(r->cq_overflow, READ_ONCE(r->cq_overflow) + 1);
1333 ctx->cq_extra--;
1334}
1335
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03001336static bool req_need_defer(struct io_kiocb *req, u32 seq)
Jens Axboede0617e2019-04-06 21:51:27 -06001337{
Jens Axboe2bc99302020-07-09 09:43:27 -06001338 if (unlikely(req->flags & REQ_F_IO_DRAIN)) {
1339 struct io_ring_ctx *ctx = req->ctx;
Jackie Liua197f662019-11-08 08:09:12 -07001340
Pavel Begunkov8f6ed492021-05-16 22:58:10 +01001341 return seq + READ_ONCE(ctx->cq_extra) != ctx->cached_cq_tail;
Jens Axboe2bc99302020-07-09 09:43:27 -06001342 }
Jens Axboe7adf4ea2019-10-10 21:42:58 -06001343
Bob Liu9d858b22019-11-13 18:06:25 +08001344 return false;
Jens Axboe7adf4ea2019-10-10 21:42:58 -06001345}
1346
Pavel Begunkovc97d8a02021-08-09 13:04:04 +01001347#define FFS_ASYNC_READ 0x1UL
1348#define FFS_ASYNC_WRITE 0x2UL
1349#ifdef CONFIG_64BIT
1350#define FFS_ISREG 0x4UL
1351#else
1352#define FFS_ISREG 0x0UL
1353#endif
1354#define FFS_MASK ~(FFS_ASYNC_READ|FFS_ASYNC_WRITE|FFS_ISREG)
1355
1356static inline bool io_req_ffs_set(struct io_kiocb *req)
1357{
1358 return IS_ENABLED(CONFIG_64BIT) && (req->flags & REQ_F_FIXED_FILE);
1359}
1360
Pavel Begunkovce3d5aa2021-02-01 18:59:55 +00001361static void io_req_track_inflight(struct io_kiocb *req)
1362{
Pavel Begunkovce3d5aa2021-02-01 18:59:55 +00001363 if (!(req->flags & REQ_F_INFLIGHT)) {
Pavel Begunkovce3d5aa2021-02-01 18:59:55 +00001364 req->flags |= REQ_F_INFLIGHT;
Pavel Begunkovb303fe22021-04-11 01:46:26 +01001365 atomic_inc(&current->io_uring->inflight_tracked);
Pavel Begunkovce3d5aa2021-02-01 18:59:55 +00001366 }
1367}
1368
Pavel Begunkov906c6ca2021-08-15 10:40:26 +01001369static inline void io_unprep_linked_timeout(struct io_kiocb *req)
1370{
1371 req->flags &= ~REQ_F_LINK_TIMEOUT;
1372}
1373
Pavel Begunkovfd08e532021-08-11 19:28:31 +01001374static struct io_kiocb *__io_prep_linked_timeout(struct io_kiocb *req)
1375{
Pavel Begunkov906c6ca2021-08-15 10:40:26 +01001376 if (WARN_ON_ONCE(!req->link))
1377 return NULL;
1378
Pavel Begunkov4d13d1a2021-08-15 10:40:24 +01001379 req->flags &= ~REQ_F_ARM_LTIMEOUT;
1380 req->flags |= REQ_F_LINK_TIMEOUT;
Pavel Begunkovfd08e532021-08-11 19:28:31 +01001381
1382 /* linked timeouts should have two refs once prep'ed */
Pavel Begunkov48dcd382021-08-15 10:40:18 +01001383 io_req_set_refcount(req);
Pavel Begunkov4d13d1a2021-08-15 10:40:24 +01001384 __io_req_set_refcount(req->link, 2);
1385 return req->link;
Pavel Begunkovfd08e532021-08-11 19:28:31 +01001386}
1387
1388static inline struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req)
1389{
Pavel Begunkov4d13d1a2021-08-15 10:40:24 +01001390 if (likely(!(req->flags & REQ_F_ARM_LTIMEOUT)))
Pavel Begunkovfd08e532021-08-11 19:28:31 +01001391 return NULL;
1392 return __io_prep_linked_timeout(req);
1393}
1394
Pavel Begunkovcbdcb432020-06-29 19:18:43 +03001395static void io_prep_async_work(struct io_kiocb *req)
Jens Axboe561fb042019-10-24 07:25:42 -06001396{
Jens Axboed3656342019-12-18 09:50:26 -07001397 const struct io_op_def *def = &io_op_defs[req->opcode];
Pavel Begunkov23329512020-10-10 18:34:06 +01001398 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe54a91f32019-09-10 09:15:04 -06001399
Pavel Begunkovb8e64b52021-06-17 18:14:02 +01001400 if (!(req->flags & REQ_F_CREDS)) {
1401 req->flags |= REQ_F_CREDS;
Pavel Begunkovc10d1f92021-06-17 18:14:01 +01001402 req->creds = get_current_cred();
Pavel Begunkovb8e64b52021-06-17 18:14:02 +01001403 }
Jens Axboe003e8dc2021-03-06 09:22:27 -07001404
Pavel Begunkove1d675d2021-03-22 01:58:29 +00001405 req->work.list.next = NULL;
1406 req->work.flags = 0;
Pavel Begunkovfeaadc42020-10-22 16:47:16 +01001407 if (req->flags & REQ_F_FORCE_ASYNC)
1408 req->work.flags |= IO_WQ_WORK_CONCURRENT;
1409
Jens Axboed3656342019-12-18 09:50:26 -07001410 if (req->flags & REQ_F_ISREG) {
Pavel Begunkov23329512020-10-10 18:34:06 +01001411 if (def->hash_reg_file || (ctx->flags & IORING_SETUP_IOPOLL))
Pavel Begunkov8766dd52020-03-14 00:31:04 +03001412 io_wq_hash_work(&req->work, file_inode(req->file));
Jens Axboe4b982bd2021-04-01 08:38:34 -06001413 } else if (!req->file || !S_ISBLK(file_inode(req->file)->i_mode)) {
Jens Axboed3656342019-12-18 09:50:26 -07001414 if (def->unbound_nonreg_file)
Jens Axboe3529d8c2019-12-19 18:24:38 -07001415 req->work.flags |= IO_WQ_WORK_UNBOUND;
Jens Axboe54a91f32019-09-10 09:15:04 -06001416 }
Pavel Begunkove1d675d2021-03-22 01:58:29 +00001417
1418 switch (req->opcode) {
1419 case IORING_OP_SPLICE:
1420 case IORING_OP_TEE:
Pavel Begunkove1d675d2021-03-22 01:58:29 +00001421 if (!S_ISREG(file_inode(req->splice.file_in)->i_mode))
1422 req->work.flags |= IO_WQ_WORK_UNBOUND;
1423 break;
1424 }
Jens Axboe561fb042019-10-24 07:25:42 -06001425}
1426
Pavel Begunkovcbdcb432020-06-29 19:18:43 +03001427static void io_prep_async_link(struct io_kiocb *req)
1428{
1429 struct io_kiocb *cur;
1430
Pavel Begunkov44eff402021-07-26 14:14:31 +01001431 if (req->flags & REQ_F_LINK_TIMEOUT) {
1432 struct io_ring_ctx *ctx = req->ctx;
1433
Jens Axboe79ebeae2021-08-10 15:18:27 -06001434 spin_lock(&ctx->completion_lock);
Pavel Begunkov44eff402021-07-26 14:14:31 +01001435 io_for_each_link(cur, req)
1436 io_prep_async_work(cur);
Jens Axboe79ebeae2021-08-10 15:18:27 -06001437 spin_unlock(&ctx->completion_lock);
Pavel Begunkov44eff402021-07-26 14:14:31 +01001438 } else {
1439 io_for_each_link(cur, req)
1440 io_prep_async_work(cur);
1441 }
Pavel Begunkovcbdcb432020-06-29 19:18:43 +03001442}
1443
Pavel Begunkovf237c302021-08-18 12:42:46 +01001444static void io_queue_async_work(struct io_kiocb *req, bool *locked)
Jens Axboe561fb042019-10-24 07:25:42 -06001445{
Jackie Liua197f662019-11-08 08:09:12 -07001446 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkovcbdcb432020-06-29 19:18:43 +03001447 struct io_kiocb *link = io_prep_linked_timeout(req);
Jens Axboe5aa75ed2021-02-16 12:56:50 -07001448 struct io_uring_task *tctx = req->task->io_uring;
Jens Axboe561fb042019-10-24 07:25:42 -06001449
Pavel Begunkovf237c302021-08-18 12:42:46 +01001450 /* must not take the lock, NULL it as a precaution */
1451 locked = NULL;
1452
Jens Axboe3bfe6102021-02-16 14:15:30 -07001453 BUG_ON(!tctx);
1454 BUG_ON(!tctx->io_wq);
Jens Axboe561fb042019-10-24 07:25:42 -06001455
Pavel Begunkovcbdcb432020-06-29 19:18:43 +03001456 /* init ->work of the whole link before punting */
1457 io_prep_async_link(req);
Jens Axboe991468d2021-07-23 11:53:54 -06001458
1459 /*
1460 * Not expected to happen, but if we do have a bug where this _can_
1461 * happen, catch it here and ensure the request is marked as
1462 * canceled. That will make io-wq go through the usual work cancel
1463 * procedure rather than attempt to run this request (or create a new
1464 * worker for it).
1465 */
1466 if (WARN_ON_ONCE(!same_thread_group(req->task, current)))
1467 req->work.flags |= IO_WQ_WORK_CANCEL;
1468
Pavel Begunkovd07f1e8a2021-03-22 01:45:58 +00001469 trace_io_uring_queue_async_work(ctx, io_wq_is_hashed(&req->work), req,
1470 &req->work, req->flags);
Pavel Begunkovebf93662021-03-01 18:20:47 +00001471 io_wq_enqueue(tctx->io_wq, &req->work);
Jens Axboe7271ef32020-08-10 09:55:22 -06001472 if (link)
1473 io_queue_linked_timeout(link);
Pavel Begunkovcbdcb432020-06-29 19:18:43 +03001474}
1475
Pavel Begunkov1ee41602021-03-25 18:32:42 +00001476static void io_kill_timeout(struct io_kiocb *req, int status)
Pavel Begunkov8c855882021-04-13 02:58:41 +01001477 __must_hold(&req->ctx->completion_lock)
Jens Axboe89850fc2021-08-10 15:11:51 -06001478 __must_hold(&req->ctx->timeout_lock)
Jens Axboe5262f562019-09-17 12:26:57 -06001479{
Jens Axboee8c2bc12020-08-15 18:44:09 -07001480 struct io_timeout_data *io = req->async_data;
Jens Axboe5262f562019-09-17 12:26:57 -06001481
Pavel Begunkovfd9c7bc2021-04-13 02:58:42 +01001482 if (hrtimer_try_to_cancel(&io->timer) != -1) {
Pavel Begunkov2ae2eb92021-09-09 13:56:27 +01001483 if (status)
1484 req_set_fail(req);
Pavel Begunkov01cec8c2020-07-30 18:43:50 +03001485 atomic_set(&req->ctx->cq_timeouts,
1486 atomic_read(&req->ctx->cq_timeouts) + 1);
Pavel Begunkov135fcde2020-07-13 23:37:12 +03001487 list_del_init(&req->timeout.list);
Pavel Begunkovd4d19c12021-04-25 14:32:17 +01001488 io_cqring_fill_event(req->ctx, req->user_data, status, 0);
Pavel Begunkov91c2f692021-08-11 19:28:28 +01001489 io_put_req_deferred(req);
Jens Axboe5262f562019-09-17 12:26:57 -06001490 }
1491}
1492
Pavel Begunkov441b8a72021-06-14 23:37:31 +01001493static void io_queue_deferred(struct io_ring_ctx *ctx)
Pavel Begunkov04518942020-05-26 20:34:05 +03001494{
Pavel Begunkov441b8a72021-06-14 23:37:31 +01001495 while (!list_empty(&ctx->defer_list)) {
Pavel Begunkov27dc8332020-07-13 23:37:14 +03001496 struct io_defer_entry *de = list_first_entry(&ctx->defer_list,
1497 struct io_defer_entry, list);
Pavel Begunkov04518942020-05-26 20:34:05 +03001498
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03001499 if (req_need_defer(de->req, de->seq))
Pavel Begunkov04518942020-05-26 20:34:05 +03001500 break;
Pavel Begunkov27dc8332020-07-13 23:37:14 +03001501 list_del_init(&de->list);
Pavel Begunkov907d1df2021-01-26 23:35:10 +00001502 io_req_task_queue(de->req);
Pavel Begunkov27dc8332020-07-13 23:37:14 +03001503 kfree(de);
Pavel Begunkov441b8a72021-06-14 23:37:31 +01001504 }
Pavel Begunkov04518942020-05-26 20:34:05 +03001505}
1506
Pavel Begunkov360428f2020-05-30 14:54:17 +03001507static void io_flush_timeouts(struct io_ring_ctx *ctx)
Jens Axboe89850fc2021-08-10 15:11:51 -06001508 __must_hold(&ctx->completion_lock)
Pavel Begunkov360428f2020-05-30 14:54:17 +03001509{
Pavel Begunkov441b8a72021-06-14 23:37:31 +01001510 u32 seq = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
Marcelo Diop-Gonzalezf0105052021-01-15 11:54:40 -05001511
Jens Axboe79ebeae2021-08-10 15:18:27 -06001512 spin_lock_irq(&ctx->timeout_lock);
Pavel Begunkovf18ee4c2021-06-14 23:37:25 +01001513 while (!list_empty(&ctx->timeout_list)) {
Marcelo Diop-Gonzalezf0105052021-01-15 11:54:40 -05001514 u32 events_needed, events_got;
Pavel Begunkov360428f2020-05-30 14:54:17 +03001515 struct io_kiocb *req = list_first_entry(&ctx->timeout_list,
Pavel Begunkov135fcde2020-07-13 23:37:12 +03001516 struct io_kiocb, timeout.list);
Pavel Begunkov360428f2020-05-30 14:54:17 +03001517
Pavel Begunkov8eb7e2d2020-06-29 13:13:02 +03001518 if (io_is_timeout_noseq(req))
Pavel Begunkov360428f2020-05-30 14:54:17 +03001519 break;
Marcelo Diop-Gonzalezf0105052021-01-15 11:54:40 -05001520
1521 /*
1522 * Since seq can easily wrap around over time, subtract
1523 * the last seq at which timeouts were flushed before comparing.
1524 * Assuming not more than 2^31-1 events have happened since,
1525 * these subtractions won't have wrapped, so we can check if
1526 * target is in [last_seq, current_seq] by comparing the two.
1527 */
1528 events_needed = req->timeout.target_seq - ctx->cq_last_tm_flush;
1529 events_got = seq - ctx->cq_last_tm_flush;
1530 if (events_got < events_needed)
Pavel Begunkov360428f2020-05-30 14:54:17 +03001531 break;
Pavel Begunkovbfe68a22020-05-30 14:54:18 +03001532
Pavel Begunkov135fcde2020-07-13 23:37:12 +03001533 list_del_init(&req->timeout.list);
Pavel Begunkov1ee41602021-03-25 18:32:42 +00001534 io_kill_timeout(req, 0);
Pavel Begunkovf18ee4c2021-06-14 23:37:25 +01001535 }
Marcelo Diop-Gonzalezf0105052021-01-15 11:54:40 -05001536 ctx->cq_last_tm_flush = seq;
Jens Axboe79ebeae2021-08-10 15:18:27 -06001537 spin_unlock_irq(&ctx->timeout_lock);
Pavel Begunkov360428f2020-05-30 14:54:17 +03001538}
1539
Pavel Begunkov2335f6f2021-06-15 16:47:58 +01001540static void __io_commit_cqring_flush(struct io_ring_ctx *ctx)
Jens Axboede0617e2019-04-06 21:51:27 -06001541{
Pavel Begunkov2335f6f2021-06-15 16:47:58 +01001542 if (ctx->off_timeout_used)
1543 io_flush_timeouts(ctx);
1544 if (ctx->drain_active)
1545 io_queue_deferred(ctx);
1546}
1547
1548static inline void io_commit_cqring(struct io_ring_ctx *ctx)
1549{
1550 if (unlikely(ctx->off_timeout_used || ctx->drain_active))
1551 __io_commit_cqring_flush(ctx);
Pavel Begunkovec30e042021-01-19 13:32:38 +00001552 /* order cqe stores with ring update */
1553 smp_store_release(&ctx->rings->cq.tail, ctx->cached_cq_tail);
Jens Axboede0617e2019-04-06 21:51:27 -06001554}
1555
Jens Axboe90554202020-09-03 12:12:41 -06001556static inline bool io_sqring_full(struct io_ring_ctx *ctx)
1557{
1558 struct io_rings *r = ctx->rings;
1559
Pavel Begunkova566c552021-05-16 22:58:08 +01001560 return READ_ONCE(r->sq.tail) - ctx->cached_sq_head == ctx->sq_entries;
Jens Axboe90554202020-09-03 12:12:41 -06001561}
1562
Pavel Begunkov888aae22021-01-19 13:32:39 +00001563static inline unsigned int __io_cqring_events(struct io_ring_ctx *ctx)
1564{
1565 return ctx->cached_cq_tail - READ_ONCE(ctx->rings->cq.head);
1566}
1567
Pavel Begunkovd068b502021-05-16 22:58:11 +01001568static inline struct io_uring_cqe *io_get_cqe(struct io_ring_ctx *ctx)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001569{
Hristo Venev75b28af2019-08-26 17:23:46 +00001570 struct io_rings *rings = ctx->rings;
Pavel Begunkovea5ab3b2021-05-16 22:58:09 +01001571 unsigned tail, mask = ctx->cq_entries - 1;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001572
Stefan Bühler115e12e2019-04-24 23:54:18 +02001573 /*
1574 * writes to the cq entry need to come after reading head; the
1575 * control dependency is enough as we're using WRITE_ONCE to
1576 * fill the cq entry
1577 */
Pavel Begunkova566c552021-05-16 22:58:08 +01001578 if (__io_cqring_events(ctx) == ctx->cq_entries)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001579 return NULL;
1580
Pavel Begunkov888aae22021-01-19 13:32:39 +00001581 tail = ctx->cached_cq_tail++;
Pavel Begunkovea5ab3b2021-05-16 22:58:09 +01001582 return &rings->cqes[tail & mask];
Jens Axboe2b188cc2019-01-07 10:46:33 -07001583}
1584
Jens Axboef2842ab2020-01-08 11:04:00 -07001585static inline bool io_should_trigger_evfd(struct io_ring_ctx *ctx)
1586{
Pavel Begunkov44c769d2021-04-11 01:46:31 +01001587 if (likely(!ctx->cq_ev_fd))
Jens Axboef0b493e2020-02-01 21:30:11 -07001588 return false;
Stefano Garzarella7e55a192020-05-15 18:38:05 +02001589 if (READ_ONCE(ctx->rings->cq_flags) & IORING_CQ_EVENTFD_DISABLED)
1590 return false;
Pavel Begunkov44c769d2021-04-11 01:46:31 +01001591 return !ctx->eventfd_async || io_wq_current_is_worker();
Jens Axboef2842ab2020-01-08 11:04:00 -07001592}
1593
Jens Axboe2c5d7632021-08-21 07:21:19 -06001594/*
1595 * This should only get called when at least one event has been posted.
1596 * Some applications rely on the eventfd notification count only changing
1597 * IFF a new CQE has been added to the CQ ring. There's no depedency on
1598 * 1:1 relationship between how many times this function is called (and
1599 * hence the eventfd count) and number of CQEs posted to the CQ ring.
1600 */
Jens Axboeb41e9852020-02-17 09:52:41 -07001601static void io_cqring_ev_posted(struct io_ring_ctx *ctx)
Jens Axboe8c838782019-03-12 15:48:16 -06001602{
Jens Axboe5fd46172021-08-06 14:04:31 -06001603 /*
1604 * wake_up_all() may seem excessive, but io_wake_function() and
1605 * io_should_wake() handle the termination of the loop and only
1606 * wake as many waiters as we need to.
1607 */
1608 if (wq_has_sleeper(&ctx->cq_wait))
1609 wake_up_all(&ctx->cq_wait);
Jens Axboe534ca6d2020-09-02 13:52:19 -06001610 if (ctx->sq_data && waitqueue_active(&ctx->sq_data->wait))
1611 wake_up(&ctx->sq_data->wait);
Jens Axboeb41e9852020-02-17 09:52:41 -07001612 if (io_should_trigger_evfd(ctx))
Jens Axboe9b402842019-04-11 11:45:41 -06001613 eventfd_signal(ctx->cq_ev_fd, 1);
Pavel Begunkov311997b2021-06-14 23:37:28 +01001614 if (waitqueue_active(&ctx->poll_wait)) {
1615 wake_up_interruptible(&ctx->poll_wait);
Pavel Begunkov4aa84f22021-01-07 03:15:42 +00001616 kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN);
1617 }
Jens Axboe8c838782019-03-12 15:48:16 -06001618}
1619
Pavel Begunkov80c18e42021-01-07 03:15:41 +00001620static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx)
1621{
Pavel Begunkovc57a91fb2021-09-08 20:49:17 +01001622 /* see waitqueue_active() comment */
1623 smp_mb();
1624
Pavel Begunkov80c18e42021-01-07 03:15:41 +00001625 if (ctx->flags & IORING_SETUP_SQPOLL) {
Pavel Begunkovc57a91fb2021-09-08 20:49:17 +01001626 if (waitqueue_active(&ctx->cq_wait))
Jens Axboe5fd46172021-08-06 14:04:31 -06001627 wake_up_all(&ctx->cq_wait);
Pavel Begunkov80c18e42021-01-07 03:15:41 +00001628 }
1629 if (io_should_trigger_evfd(ctx))
1630 eventfd_signal(ctx->cq_ev_fd, 1);
Pavel Begunkov311997b2021-06-14 23:37:28 +01001631 if (waitqueue_active(&ctx->poll_wait)) {
1632 wake_up_interruptible(&ctx->poll_wait);
Pavel Begunkov4aa84f22021-01-07 03:15:42 +00001633 kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN);
1634 }
Pavel Begunkov80c18e42021-01-07 03:15:41 +00001635}
1636
Jens Axboec4a2ed72019-11-21 21:01:26 -07001637/* Returns true if there are no backlogged entries after the flush */
Pavel Begunkov6c2450a2021-02-23 12:40:22 +00001638static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001639{
Jens Axboeb18032b2021-01-24 16:58:56 -07001640 bool all_flushed, posted;
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001641
Pavel Begunkova566c552021-05-16 22:58:08 +01001642 if (!force && __io_cqring_events(ctx) == ctx->cq_entries)
Pavel Begunkove23de152020-12-17 00:24:37 +00001643 return false;
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001644
Jens Axboeb18032b2021-01-24 16:58:56 -07001645 posted = false;
Jens Axboe79ebeae2021-08-10 15:18:27 -06001646 spin_lock(&ctx->completion_lock);
Pavel Begunkov6c2450a2021-02-23 12:40:22 +00001647 while (!list_empty(&ctx->cq_overflow_list)) {
Pavel Begunkovd068b502021-05-16 22:58:11 +01001648 struct io_uring_cqe *cqe = io_get_cqe(ctx);
Pavel Begunkov6c2450a2021-02-23 12:40:22 +00001649 struct io_overflow_cqe *ocqe;
Jens Axboee6c8aa92020-09-28 13:10:13 -06001650
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001651 if (!cqe && !force)
1652 break;
Pavel Begunkov6c2450a2021-02-23 12:40:22 +00001653 ocqe = list_first_entry(&ctx->cq_overflow_list,
1654 struct io_overflow_cqe, list);
1655 if (cqe)
1656 memcpy(cqe, &ocqe->cqe, sizeof(*cqe));
1657 else
Pavel Begunkov8f6ed492021-05-16 22:58:10 +01001658 io_account_cq_overflow(ctx);
1659
Jens Axboeb18032b2021-01-24 16:58:56 -07001660 posted = true;
Pavel Begunkov6c2450a2021-02-23 12:40:22 +00001661 list_del(&ocqe->list);
1662 kfree(ocqe);
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001663 }
1664
Pavel Begunkov09e88402020-12-17 00:24:38 +00001665 all_flushed = list_empty(&ctx->cq_overflow_list);
1666 if (all_flushed) {
Pavel Begunkov5ed7a372021-06-14 23:37:27 +01001667 clear_bit(0, &ctx->check_cq_overflow);
Nadav Amit20c0b382021-08-07 17:13:42 -07001668 WRITE_ONCE(ctx->rings->sq_flags,
1669 ctx->rings->sq_flags & ~IORING_SQ_CQ_OVERFLOW);
Pavel Begunkov09e88402020-12-17 00:24:38 +00001670 }
Pavel Begunkov46930142020-07-30 18:43:49 +03001671
Jens Axboeb18032b2021-01-24 16:58:56 -07001672 if (posted)
1673 io_commit_cqring(ctx);
Jens Axboe79ebeae2021-08-10 15:18:27 -06001674 spin_unlock(&ctx->completion_lock);
Jens Axboeb18032b2021-01-24 16:58:56 -07001675 if (posted)
1676 io_cqring_ev_posted(ctx);
Pavel Begunkov09e88402020-12-17 00:24:38 +00001677 return all_flushed;
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001678}
1679
Pavel Begunkov90f67362021-08-09 20:18:12 +01001680static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx)
Pavel Begunkov6c503152021-01-04 20:36:36 +00001681{
Jens Axboeca0a2652021-03-04 17:15:48 -07001682 bool ret = true;
1683
Pavel Begunkov5ed7a372021-06-14 23:37:27 +01001684 if (test_bit(0, &ctx->check_cq_overflow)) {
Pavel Begunkov6c503152021-01-04 20:36:36 +00001685 /* iopoll syncs against uring_lock, not completion_lock */
1686 if (ctx->flags & IORING_SETUP_IOPOLL)
1687 mutex_lock(&ctx->uring_lock);
Pavel Begunkov90f67362021-08-09 20:18:12 +01001688 ret = __io_cqring_overflow_flush(ctx, false);
Pavel Begunkov6c503152021-01-04 20:36:36 +00001689 if (ctx->flags & IORING_SETUP_IOPOLL)
1690 mutex_unlock(&ctx->uring_lock);
1691 }
Jens Axboeca0a2652021-03-04 17:15:48 -07001692
1693 return ret;
Pavel Begunkov6c503152021-01-04 20:36:36 +00001694}
1695
Pavel Begunkov6a290a12021-08-09 13:04:13 +01001696/* must to be called somewhat shortly after putting a request */
1697static inline void io_put_task(struct task_struct *task, int nr)
1698{
1699 struct io_uring_task *tctx = task->io_uring;
1700
Pavel Begunkove98e49b2021-08-18 17:01:43 +01001701 if (likely(task == current)) {
1702 tctx->cached_refs += nr;
1703 } else {
1704 percpu_counter_sub(&tctx->inflight, nr);
1705 if (unlikely(atomic_read(&tctx->in_idle)))
1706 wake_up(&tctx->wait);
1707 put_task_struct_many(task, nr);
1708 }
Pavel Begunkov6a290a12021-08-09 13:04:13 +01001709}
1710
Pavel Begunkov9a108672021-08-27 11:55:01 +01001711static void io_task_refs_refill(struct io_uring_task *tctx)
1712{
1713 unsigned int refill = -tctx->cached_refs + IO_TCTX_REFS_CACHE_NR;
1714
1715 percpu_counter_add(&tctx->inflight, refill);
1716 refcount_add(refill, &current->usage);
1717 tctx->cached_refs += refill;
1718}
1719
1720static inline void io_get_task_refs(int nr)
1721{
1722 struct io_uring_task *tctx = current->io_uring;
1723
1724 tctx->cached_refs -= nr;
1725 if (unlikely(tctx->cached_refs < 0))
1726 io_task_refs_refill(tctx);
1727}
1728
Pavel Begunkovd4d19c12021-04-25 14:32:17 +01001729static bool io_cqring_event_overflow(struct io_ring_ctx *ctx, u64 user_data,
1730 long res, unsigned int cflags)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001731{
Pavel Begunkovcce4b8b2021-04-13 02:58:44 +01001732 struct io_overflow_cqe *ocqe;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001733
Pavel Begunkovcce4b8b2021-04-13 02:58:44 +01001734 ocqe = kmalloc(sizeof(*ocqe), GFP_ATOMIC | __GFP_ACCOUNT);
1735 if (!ocqe) {
1736 /*
1737 * If we're in ring overflow flush mode, or in task cancel mode,
1738 * or cannot allocate an overflow entry, then we need to drop it
1739 * on the floor.
1740 */
Pavel Begunkov8f6ed492021-05-16 22:58:10 +01001741 io_account_cq_overflow(ctx);
Pavel Begunkovcce4b8b2021-04-13 02:58:44 +01001742 return false;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001743 }
Pavel Begunkovcce4b8b2021-04-13 02:58:44 +01001744 if (list_empty(&ctx->cq_overflow_list)) {
Pavel Begunkov5ed7a372021-06-14 23:37:27 +01001745 set_bit(0, &ctx->check_cq_overflow);
Nadav Amit20c0b382021-08-07 17:13:42 -07001746 WRITE_ONCE(ctx->rings->sq_flags,
1747 ctx->rings->sq_flags | IORING_SQ_CQ_OVERFLOW);
1748
Pavel Begunkovcce4b8b2021-04-13 02:58:44 +01001749 }
Pavel Begunkovd4d19c12021-04-25 14:32:17 +01001750 ocqe->cqe.user_data = user_data;
Pavel Begunkovcce4b8b2021-04-13 02:58:44 +01001751 ocqe->cqe.res = res;
1752 ocqe->cqe.flags = cflags;
1753 list_add_tail(&ocqe->list, &ctx->cq_overflow_list);
1754 return true;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001755}
1756
Pavel Begunkovd4d19c12021-04-25 14:32:17 +01001757static inline bool __io_cqring_fill_event(struct io_ring_ctx *ctx, u64 user_data,
1758 long res, unsigned int cflags)
Pavel Begunkov8d133262021-04-11 01:46:33 +01001759{
Jens Axboe2b188cc2019-01-07 10:46:33 -07001760 struct io_uring_cqe *cqe;
1761
Pavel Begunkovd4d19c12021-04-25 14:32:17 +01001762 trace_io_uring_complete(ctx, user_data, res, cflags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001763
1764 /*
1765 * If we can't get a cq entry, userspace overflowed the
1766 * submission (by quite a lot). Increment the overflow count in
1767 * the ring.
1768 */
Pavel Begunkovd068b502021-05-16 22:58:11 +01001769 cqe = io_get_cqe(ctx);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001770 if (likely(cqe)) {
Pavel Begunkovd4d19c12021-04-25 14:32:17 +01001771 WRITE_ONCE(cqe->user_data, user_data);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001772 WRITE_ONCE(cqe->res, res);
1773 WRITE_ONCE(cqe->flags, cflags);
Pavel Begunkov8d133262021-04-11 01:46:33 +01001774 return true;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001775 }
Pavel Begunkovd4d19c12021-04-25 14:32:17 +01001776 return io_cqring_event_overflow(ctx, user_data, res, cflags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001777}
1778
Pavel Begunkov8d133262021-04-11 01:46:33 +01001779/* not as hot to bloat with inlining */
Pavel Begunkovd4d19c12021-04-25 14:32:17 +01001780static noinline bool io_cqring_fill_event(struct io_ring_ctx *ctx, u64 user_data,
1781 long res, unsigned int cflags)
Jens Axboebcda7ba2020-02-23 16:42:51 -07001782{
Pavel Begunkovd4d19c12021-04-25 14:32:17 +01001783 return __io_cqring_fill_event(ctx, user_data, res, cflags);
Jens Axboebcda7ba2020-02-23 16:42:51 -07001784}
1785
Pavel Begunkov7a612352021-03-09 00:37:59 +00001786static void io_req_complete_post(struct io_kiocb *req, long res,
1787 unsigned int cflags)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001788{
Jens Axboe78e19bb2019-11-06 15:21:34 -07001789 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001790
Jens Axboe79ebeae2021-08-10 15:18:27 -06001791 spin_lock(&ctx->completion_lock);
Pavel Begunkovd4d19c12021-04-25 14:32:17 +01001792 __io_cqring_fill_event(ctx, req->user_data, res, cflags);
Jens Axboec7dae4b2021-02-09 19:53:37 -07001793 /*
1794 * If we're the last reference to this request, add to our locked
1795 * free_list cache.
1796 */
Jens Axboede9b4cc2021-02-24 13:28:27 -07001797 if (req_ref_put_and_test(req)) {
Pavel Begunkov7a612352021-03-09 00:37:59 +00001798 if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) {
Pavel Begunkov0756a862021-08-15 10:40:25 +01001799 if (req->flags & IO_DISARM_MASK)
Pavel Begunkov7a612352021-03-09 00:37:59 +00001800 io_disarm_next(req);
1801 if (req->link) {
1802 io_req_task_queue(req->link);
1803 req->link = NULL;
1804 }
1805 }
Jens Axboec7dae4b2021-02-09 19:53:37 -07001806 io_dismantle_req(req);
1807 io_put_task(req->task, 1);
Pavel Begunkovbb943b82021-08-09 20:18:10 +01001808 list_add(&req->inflight_entry, &ctx->locked_free_list);
Pavel Begunkovd0acdee2021-05-16 22:58:12 +01001809 ctx->locked_free_nr++;
Pavel Begunkov180f8292021-03-14 20:57:09 +00001810 } else {
1811 if (!percpu_ref_tryget(&ctx->refs))
1812 req = NULL;
1813 }
Pavel Begunkov7a612352021-03-09 00:37:59 +00001814 io_commit_cqring(ctx);
Jens Axboe79ebeae2021-08-10 15:18:27 -06001815 spin_unlock(&ctx->completion_lock);
Pavel Begunkov7a612352021-03-09 00:37:59 +00001816
Pavel Begunkov180f8292021-03-14 20:57:09 +00001817 if (req) {
1818 io_cqring_ev_posted(ctx);
Jens Axboec7dae4b2021-02-09 19:53:37 -07001819 percpu_ref_put(&ctx->refs);
Pavel Begunkov180f8292021-03-14 20:57:09 +00001820 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07001821}
1822
Jens Axboe4e3d9ff2021-04-15 17:44:34 -06001823static inline bool io_req_needs_clean(struct io_kiocb *req)
1824{
Pavel Begunkovc8543572021-06-17 18:14:04 +01001825 return req->flags & IO_REQ_CLEAN_FLAGS;
Jens Axboe4e3d9ff2021-04-15 17:44:34 -06001826}
1827
Pavel Begunkova38d68d2021-01-19 13:32:45 +00001828static void io_req_complete_state(struct io_kiocb *req, long res,
Pavel Begunkov889fca72021-02-10 00:03:09 +00001829 unsigned int cflags)
Jens Axboebcda7ba2020-02-23 16:42:51 -07001830{
Jens Axboe4e3d9ff2021-04-15 17:44:34 -06001831 if (io_req_needs_clean(req))
Pavel Begunkov68fb8972021-03-19 17:22:41 +00001832 io_clean_op(req);
Pavel Begunkova38d68d2021-01-19 13:32:45 +00001833 req->result = res;
1834 req->compl.cflags = cflags;
Pavel Begunkove342c802021-01-19 13:32:47 +00001835 req->flags |= REQ_F_COMPLETE_INLINE;
Jens Axboee1e16092020-06-22 09:17:17 -06001836}
Jens Axboe2b188cc2019-01-07 10:46:33 -07001837
Pavel Begunkov889fca72021-02-10 00:03:09 +00001838static inline void __io_req_complete(struct io_kiocb *req, unsigned issue_flags,
1839 long res, unsigned cflags)
Pavel Begunkova38d68d2021-01-19 13:32:45 +00001840{
Pavel Begunkov889fca72021-02-10 00:03:09 +00001841 if (issue_flags & IO_URING_F_COMPLETE_DEFER)
1842 io_req_complete_state(req, res, cflags);
Pavel Begunkova38d68d2021-01-19 13:32:45 +00001843 else
Jens Axboec7dae4b2021-02-09 19:53:37 -07001844 io_req_complete_post(req, res, cflags);
Pavel Begunkova38d68d2021-01-19 13:32:45 +00001845}
Jens Axboebcda7ba2020-02-23 16:42:51 -07001846
Pavel Begunkova38d68d2021-01-19 13:32:45 +00001847static inline void io_req_complete(struct io_kiocb *req, long res)
Jens Axboee1e16092020-06-22 09:17:17 -06001848{
Pavel Begunkov889fca72021-02-10 00:03:09 +00001849 __io_req_complete(req, 0, res, 0);
Jens Axboebcda7ba2020-02-23 16:42:51 -07001850}
1851
Pavel Begunkovf41db2732021-02-28 22:35:12 +00001852static void io_req_complete_failed(struct io_kiocb *req, long res)
1853{
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01001854 req_set_fail(req);
Pavel Begunkovf41db2732021-02-28 22:35:12 +00001855 io_req_complete_post(req, res, 0);
1856}
1857
Pavel Begunkovc6d3d9c2021-08-31 14:13:10 +01001858static void io_req_complete_fail_submit(struct io_kiocb *req)
1859{
1860 /*
1861 * We don't submit, fail them all, for that replace hardlinks with
1862 * normal links. Extra REQ_F_LINK is tolerated.
1863 */
1864 req->flags &= ~REQ_F_HARDLINK;
1865 req->flags |= REQ_F_LINK;
1866 io_req_complete_failed(req, req->result);
1867}
1868
Pavel Begunkov864ea922021-08-09 13:04:08 +01001869/*
1870 * Don't initialise the fields below on every allocation, but do that in
1871 * advance and keep them valid across allocations.
1872 */
1873static void io_preinit_req(struct io_kiocb *req, struct io_ring_ctx *ctx)
1874{
1875 req->ctx = ctx;
1876 req->link = NULL;
1877 req->async_data = NULL;
1878 /* not necessary, but safer to zero */
1879 req->result = 0;
1880}
1881
Pavel Begunkovdac7a092021-03-19 17:22:39 +00001882static void io_flush_cached_locked_reqs(struct io_ring_ctx *ctx,
Pavel Begunkovcd0ca2e2021-08-09 20:18:11 +01001883 struct io_submit_state *state)
Pavel Begunkovdac7a092021-03-19 17:22:39 +00001884{
Jens Axboe79ebeae2021-08-10 15:18:27 -06001885 spin_lock(&ctx->completion_lock);
Pavel Begunkovcd0ca2e2021-08-09 20:18:11 +01001886 list_splice_init(&ctx->locked_free_list, &state->free_list);
Pavel Begunkovd0acdee2021-05-16 22:58:12 +01001887 ctx->locked_free_nr = 0;
Jens Axboe79ebeae2021-08-10 15:18:27 -06001888 spin_unlock(&ctx->completion_lock);
Pavel Begunkovdac7a092021-03-19 17:22:39 +00001889}
1890
Pavel Begunkovdd78f492021-03-19 17:22:35 +00001891/* Returns true IFF there are requests in the cache */
Jens Axboec7dae4b2021-02-09 19:53:37 -07001892static bool io_flush_cached_reqs(struct io_ring_ctx *ctx)
Pavel Begunkove5d1bc02021-02-10 00:03:23 +00001893{
Jens Axboec7dae4b2021-02-09 19:53:37 -07001894 struct io_submit_state *state = &ctx->submit_state;
Pavel Begunkovdd78f492021-03-19 17:22:35 +00001895 int nr;
Pavel Begunkove5d1bc02021-02-10 00:03:23 +00001896
Jens Axboec7dae4b2021-02-09 19:53:37 -07001897 /*
1898 * If we have more than a batch's worth of requests in our IRQ side
1899 * locked cache, grab the lock and move them over to our submission
1900 * side cache.
1901 */
Pavel Begunkovd0acdee2021-05-16 22:58:12 +01001902 if (READ_ONCE(ctx->locked_free_nr) > IO_COMPL_BATCH)
Pavel Begunkovcd0ca2e2021-08-09 20:18:11 +01001903 io_flush_cached_locked_reqs(ctx, state);
Jens Axboec7dae4b2021-02-09 19:53:37 -07001904
Pavel Begunkovdd78f492021-03-19 17:22:35 +00001905 nr = state->free_reqs;
Pavel Begunkovcd0ca2e2021-08-09 20:18:11 +01001906 while (!list_empty(&state->free_list)) {
1907 struct io_kiocb *req = list_first_entry(&state->free_list,
Pavel Begunkovbb943b82021-08-09 20:18:10 +01001908 struct io_kiocb, inflight_entry);
Pavel Begunkovdd78f492021-03-19 17:22:35 +00001909
Pavel Begunkovbb943b82021-08-09 20:18:10 +01001910 list_del(&req->inflight_entry);
Pavel Begunkovdd78f492021-03-19 17:22:35 +00001911 state->reqs[nr++] = req;
1912 if (nr == ARRAY_SIZE(state->reqs))
Pavel Begunkove5d1bc02021-02-10 00:03:23 +00001913 break;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001914 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07001915
Pavel Begunkovdd78f492021-03-19 17:22:35 +00001916 state->free_reqs = nr;
1917 return nr != 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001918}
1919
Pavel Begunkov5d5901a2021-08-11 19:28:29 +01001920/*
1921 * A request might get retired back into the request caches even before opcode
1922 * handlers and io_issue_sqe() are done with it, e.g. inline completion path.
1923 * Because of that, io_alloc_req() should be called only under ->uring_lock
1924 * and with extra caution to not get a request that is still worked on.
1925 */
Pavel Begunkov258b29a2021-02-10 00:03:10 +00001926static struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx)
Pavel Begunkov5d5901a2021-08-11 19:28:29 +01001927 __must_hold(&ctx->uring_lock)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001928{
Pavel Begunkov258b29a2021-02-10 00:03:10 +00001929 struct io_submit_state *state = &ctx->submit_state;
Pavel Begunkov864ea922021-08-09 13:04:08 +01001930 gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
1931 int ret, i;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001932
Pavel Begunkovfe7e3252021-06-24 15:09:57 +01001933 BUILD_BUG_ON(ARRAY_SIZE(state->reqs) < IO_REQ_ALLOC_BATCH);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001934
Pavel Begunkov864ea922021-08-09 13:04:08 +01001935 if (likely(state->free_reqs || io_flush_cached_reqs(ctx)))
1936 goto got_req;
Jens Axboe2579f912019-01-09 09:10:43 -07001937
Pavel Begunkov864ea922021-08-09 13:04:08 +01001938 ret = kmem_cache_alloc_bulk(req_cachep, gfp, IO_REQ_ALLOC_BATCH,
1939 state->reqs);
Pavel Begunkove5d1bc02021-02-10 00:03:23 +00001940
Pavel Begunkov864ea922021-08-09 13:04:08 +01001941 /*
1942 * Bulk alloc is all-or-nothing. If we fail to get a batch,
1943 * retry single alloc to be on the safe side.
1944 */
1945 if (unlikely(ret <= 0)) {
1946 state->reqs[0] = kmem_cache_alloc(req_cachep, gfp);
1947 if (!state->reqs[0])
1948 return NULL;
1949 ret = 1;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001950 }
Pavel Begunkov864ea922021-08-09 13:04:08 +01001951
1952 for (i = 0; i < ret; i++)
1953 io_preinit_req(state->reqs[i], ctx);
1954 state->free_reqs = ret;
Pavel Begunkove5d1bc02021-02-10 00:03:23 +00001955got_req:
Pavel Begunkov291b2822020-09-30 22:57:01 +03001956 state->free_reqs--;
1957 return state->reqs[state->free_reqs];
Jens Axboe2b188cc2019-01-07 10:46:33 -07001958}
1959
Pavel Begunkove1d767f2021-03-19 17:22:43 +00001960static inline void io_put_file(struct file *file)
Pavel Begunkov8da11c12020-02-24 11:32:44 +03001961{
Pavel Begunkove1d767f2021-03-19 17:22:43 +00001962 if (file)
Pavel Begunkov8da11c12020-02-24 11:32:44 +03001963 fput(file);
1964}
1965
Pavel Begunkov4edf20f2020-10-13 09:43:59 +01001966static void io_dismantle_req(struct io_kiocb *req)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001967{
Pavel Begunkov094bae42021-03-19 17:22:42 +00001968 unsigned int flags = req->flags;
Pavel Begunkov929a3af2020-02-19 00:19:09 +03001969
Pavel Begunkov3a0a6902021-04-20 12:03:31 +01001970 if (io_req_needs_clean(req))
1971 io_clean_op(req);
Pavel Begunkove1d767f2021-03-19 17:22:43 +00001972 if (!(flags & REQ_F_FIXED_FILE))
1973 io_put_file(req->file);
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00001974 if (req->fixed_rsrc_refs)
1975 percpu_ref_put(req->fixed_rsrc_refs);
Pavel Begunkov99ebe4e2021-06-26 21:40:49 +01001976 if (req->async_data) {
Pavel Begunkov094bae42021-03-19 17:22:42 +00001977 kfree(req->async_data);
Pavel Begunkov99ebe4e2021-06-26 21:40:49 +01001978 req->async_data = NULL;
1979 }
Pavel Begunkove6543a82020-06-28 12:52:30 +03001980}
Pavel Begunkov2b85edf2019-12-28 14:13:03 +03001981
Pavel Begunkov216578e2020-10-13 09:44:00 +01001982static void __io_free_req(struct io_kiocb *req)
Pavel Begunkove6543a82020-06-28 12:52:30 +03001983{
Jens Axboe51a4cc12020-08-10 10:55:56 -06001984 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkovecfc5172020-06-29 13:13:03 +03001985
Pavel Begunkov216578e2020-10-13 09:44:00 +01001986 io_dismantle_req(req);
Pavel Begunkov7c660732021-01-25 11:42:21 +00001987 io_put_task(req->task, 1);
Pavel Begunkove6543a82020-06-28 12:52:30 +03001988
Jens Axboe79ebeae2021-08-10 15:18:27 -06001989 spin_lock(&ctx->completion_lock);
Pavel Begunkovbb943b82021-08-09 20:18:10 +01001990 list_add(&req->inflight_entry, &ctx->locked_free_list);
Pavel Begunkovc34b0252021-08-09 20:18:08 +01001991 ctx->locked_free_nr++;
Jens Axboe79ebeae2021-08-10 15:18:27 -06001992 spin_unlock(&ctx->completion_lock);
Pavel Begunkovc34b0252021-08-09 20:18:08 +01001993
Pavel Begunkovecfc5172020-06-29 13:13:03 +03001994 percpu_ref_put(&ctx->refs);
Jens Axboee65ef562019-03-12 10:16:44 -06001995}
1996
Pavel Begunkovf2f87372020-10-27 23:25:37 +00001997static inline void io_remove_next_linked(struct io_kiocb *req)
1998{
1999 struct io_kiocb *nxt = req->link;
2000
2001 req->link = nxt->link;
2002 nxt->link = NULL;
2003}
2004
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00002005static bool io_kill_linked_timeout(struct io_kiocb *req)
2006 __must_hold(&req->ctx->completion_lock)
Jens Axboe89b263f2021-08-10 15:14:18 -06002007 __must_hold(&req->ctx->timeout_lock)
Jens Axboe9e645e112019-05-10 16:07:28 -06002008{
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00002009 struct io_kiocb *link = req->link;
Pavel Begunkovf2f87372020-10-27 23:25:37 +00002010
Pavel Begunkovb97e7362021-08-15 10:40:23 +01002011 if (link && link->opcode == IORING_OP_LINK_TIMEOUT) {
Pavel Begunkovc9abd7a2020-10-22 16:43:11 +01002012 struct io_timeout_data *io = link->async_data;
Pavel Begunkov7c86ffe2020-06-29 13:12:59 +03002013
Pavel Begunkovf2f87372020-10-27 23:25:37 +00002014 io_remove_next_linked(req);
Pavel Begunkov90cd7e42020-10-27 23:25:36 +00002015 link->timeout.head = NULL;
Pavel Begunkovfd9c7bc2021-04-13 02:58:42 +01002016 if (hrtimer_try_to_cancel(&io->timer) != -1) {
Pavel Begunkovef9dd632021-08-28 19:54:38 -06002017 list_del(&link->timeout.list);
Pavel Begunkovd4d19c12021-04-25 14:32:17 +01002018 io_cqring_fill_event(link->ctx, link->user_data,
2019 -ECANCELED, 0);
Pavel Begunkov91c2f692021-08-11 19:28:28 +01002020 io_put_req_deferred(link);
Pavel Begunkovd4729fb2021-03-22 01:58:24 +00002021 return true;
Pavel Begunkovc9abd7a2020-10-22 16:43:11 +01002022 }
2023 }
Pavel Begunkovd4729fb2021-03-22 01:58:24 +00002024 return false;
Pavel Begunkov7c86ffe2020-06-29 13:12:59 +03002025}
2026
Pavel Begunkovd148ca42020-10-18 10:17:39 +01002027static void io_fail_links(struct io_kiocb *req)
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00002028 __must_hold(&req->ctx->completion_lock)
Jens Axboe9e645e112019-05-10 16:07:28 -06002029{
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00002030 struct io_kiocb *nxt, *link = req->link;
Jens Axboe9e645e112019-05-10 16:07:28 -06002031
Pavel Begunkovf2f87372020-10-27 23:25:37 +00002032 req->link = NULL;
Pavel Begunkovf2f87372020-10-27 23:25:37 +00002033 while (link) {
Hao Xua8295b92021-08-27 17:46:09 +08002034 long res = -ECANCELED;
2035
2036 if (link->flags & REQ_F_FAIL)
2037 res = link->result;
2038
Pavel Begunkovf2f87372020-10-27 23:25:37 +00002039 nxt = link->link;
2040 link->link = NULL;
2041
Dmitrii Dolgovc826bd72019-10-15 19:02:01 +02002042 trace_io_uring_fail_link(req, link);
Hao Xua8295b92021-08-27 17:46:09 +08002043 io_cqring_fill_event(link->ctx, link->user_data, res, 0);
Pavel Begunkov91c2f692021-08-11 19:28:28 +01002044 io_put_req_deferred(link);
Pavel Begunkovf2f87372020-10-27 23:25:37 +00002045 link = nxt;
Jens Axboe9e645e112019-05-10 16:07:28 -06002046 }
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00002047}
Pavel Begunkov7c86ffe2020-06-29 13:12:59 +03002048
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00002049static bool io_disarm_next(struct io_kiocb *req)
2050 __must_hold(&req->ctx->completion_lock)
2051{
2052 bool posted = false;
2053
Pavel Begunkov0756a862021-08-15 10:40:25 +01002054 if (req->flags & REQ_F_ARM_LTIMEOUT) {
2055 struct io_kiocb *link = req->link;
2056
Pavel Begunkov906c6ca2021-08-15 10:40:26 +01002057 req->flags &= ~REQ_F_ARM_LTIMEOUT;
Pavel Begunkov0756a862021-08-15 10:40:25 +01002058 if (link && link->opcode == IORING_OP_LINK_TIMEOUT) {
2059 io_remove_next_linked(req);
2060 io_cqring_fill_event(link->ctx, link->user_data,
2061 -ECANCELED, 0);
2062 io_put_req_deferred(link);
2063 posted = true;
2064 }
2065 } else if (req->flags & REQ_F_LINK_TIMEOUT) {
Jens Axboe89b263f2021-08-10 15:14:18 -06002066 struct io_ring_ctx *ctx = req->ctx;
2067
2068 spin_lock_irq(&ctx->timeout_lock);
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00002069 posted = io_kill_linked_timeout(req);
Jens Axboe89b263f2021-08-10 15:14:18 -06002070 spin_unlock_irq(&ctx->timeout_lock);
2071 }
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01002072 if (unlikely((req->flags & REQ_F_FAIL) &&
Pavel Begunkove4335ed2021-04-11 01:46:39 +01002073 !(req->flags & REQ_F_HARDLINK))) {
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00002074 posted |= (req->link != NULL);
2075 io_fail_links(req);
2076 }
2077 return posted;
Jens Axboe9e645e112019-05-10 16:07:28 -06002078}
2079
Pavel Begunkov3fa5e0f2020-06-30 15:20:43 +03002080static struct io_kiocb *__io_req_find_next(struct io_kiocb *req)
Jens Axboe9e645e112019-05-10 16:07:28 -06002081{
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00002082 struct io_kiocb *nxt;
Jens Axboe2665abf2019-11-05 12:40:47 -07002083
Jens Axboe9e645e112019-05-10 16:07:28 -06002084 /*
2085 * If LINK is set, we have dependent requests in this chain. If we
2086 * didn't fail this request, queue the first one up, moving any other
2087 * dependencies to the next request. In case of failure, fail the rest
2088 * of the chain.
2089 */
Pavel Begunkov0756a862021-08-15 10:40:25 +01002090 if (req->flags & IO_DISARM_MASK) {
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00002091 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00002092 bool posted;
Pavel Begunkovf2f87372020-10-27 23:25:37 +00002093
Jens Axboe79ebeae2021-08-10 15:18:27 -06002094 spin_lock(&ctx->completion_lock);
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00002095 posted = io_disarm_next(req);
2096 if (posted)
2097 io_commit_cqring(req->ctx);
Jens Axboe79ebeae2021-08-10 15:18:27 -06002098 spin_unlock(&ctx->completion_lock);
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00002099 if (posted)
2100 io_cqring_ev_posted(ctx);
Pavel Begunkovf2f87372020-10-27 23:25:37 +00002101 }
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00002102 nxt = req->link;
2103 req->link = NULL;
2104 return nxt;
Jens Axboe4d7dd462019-11-20 13:03:52 -07002105}
Jens Axboe2665abf2019-11-05 12:40:47 -07002106
Pavel Begunkovf2f87372020-10-27 23:25:37 +00002107static inline struct io_kiocb *io_req_find_next(struct io_kiocb *req)
Pavel Begunkov3fa5e0f2020-06-30 15:20:43 +03002108{
Pavel Begunkovcdbff982021-02-12 18:41:16 +00002109 if (likely(!(req->flags & (REQ_F_LINK|REQ_F_HARDLINK))))
Pavel Begunkov3fa5e0f2020-06-30 15:20:43 +03002110 return NULL;
2111 return __io_req_find_next(req);
2112}
2113
Pavel Begunkovf237c302021-08-18 12:42:46 +01002114static void ctx_flush_and_put(struct io_ring_ctx *ctx, bool *locked)
Pavel Begunkov2c323952021-02-28 22:04:53 +00002115{
2116 if (!ctx)
2117 return;
Pavel Begunkovf237c302021-08-18 12:42:46 +01002118 if (*locked) {
Hao Xu99c8bc52021-08-21 06:19:54 +08002119 if (ctx->submit_state.compl_nr)
2120 io_submit_flush_completions(ctx);
Pavel Begunkov2c323952021-02-28 22:04:53 +00002121 mutex_unlock(&ctx->uring_lock);
Pavel Begunkovf237c302021-08-18 12:42:46 +01002122 *locked = false;
Pavel Begunkov2c323952021-02-28 22:04:53 +00002123 }
2124 percpu_ref_put(&ctx->refs);
2125}
2126
Jens Axboe7cbf1722021-02-10 00:03:20 +00002127static void tctx_task_work(struct callback_head *cb)
2128{
Pavel Begunkovf237c302021-08-18 12:42:46 +01002129 bool locked = false;
Pavel Begunkovebd0df22021-06-17 18:14:07 +01002130 struct io_ring_ctx *ctx = NULL;
Pavel Begunkov3f184072021-06-17 18:14:06 +01002131 struct io_uring_task *tctx = container_of(cb, struct io_uring_task,
2132 task_work);
Jens Axboe7cbf1722021-02-10 00:03:20 +00002133
Pavel Begunkov16f72072021-06-17 18:14:09 +01002134 while (1) {
Pavel Begunkov3f184072021-06-17 18:14:06 +01002135 struct io_wq_work_node *node;
2136
Pavel Begunkov8d4ad412021-09-02 00:38:23 +01002137 if (!tctx->task_list.first && locked && ctx->submit_state.compl_nr)
2138 io_submit_flush_completions(ctx);
2139
Pavel Begunkov3f184072021-06-17 18:14:06 +01002140 spin_lock_irq(&tctx->task_lock);
Pavel Begunkovc6538be2021-06-17 18:14:08 +01002141 node = tctx->task_list.first;
Pavel Begunkov3f184072021-06-17 18:14:06 +01002142 INIT_WQ_LIST(&tctx->task_list);
Pavel Begunkov6294f362021-08-10 17:53:55 +01002143 if (!node)
2144 tctx->task_running = false;
Pavel Begunkov3f184072021-06-17 18:14:06 +01002145 spin_unlock_irq(&tctx->task_lock);
Pavel Begunkov6294f362021-08-10 17:53:55 +01002146 if (!node)
2147 break;
Pavel Begunkov3f184072021-06-17 18:14:06 +01002148
Pavel Begunkov6294f362021-08-10 17:53:55 +01002149 do {
Pavel Begunkov3f184072021-06-17 18:14:06 +01002150 struct io_wq_work_node *next = node->next;
2151 struct io_kiocb *req = container_of(node, struct io_kiocb,
2152 io_task_work.node);
2153
2154 if (req->ctx != ctx) {
Pavel Begunkovf237c302021-08-18 12:42:46 +01002155 ctx_flush_and_put(ctx, &locked);
Pavel Begunkov3f184072021-06-17 18:14:06 +01002156 ctx = req->ctx;
Pavel Begunkov126180b2021-08-18 12:42:47 +01002157 /* if not contended, grab and improve batching */
2158 locked = mutex_trylock(&ctx->uring_lock);
Pavel Begunkov3f184072021-06-17 18:14:06 +01002159 percpu_ref_get(&ctx->refs);
2160 }
Pavel Begunkovf237c302021-08-18 12:42:46 +01002161 req->io_task_work.func(req, &locked);
Pavel Begunkov3f184072021-06-17 18:14:06 +01002162 node = next;
Pavel Begunkov6294f362021-08-10 17:53:55 +01002163 } while (node);
2164
Jens Axboe7cbf1722021-02-10 00:03:20 +00002165 cond_resched();
Pavel Begunkov3f184072021-06-17 18:14:06 +01002166 }
Pavel Begunkovebd0df22021-06-17 18:14:07 +01002167
Pavel Begunkovf237c302021-08-18 12:42:46 +01002168 ctx_flush_and_put(ctx, &locked);
Jens Axboe7cbf1722021-02-10 00:03:20 +00002169}
2170
Pavel Begunkove09ee512021-07-01 13:26:05 +01002171static void io_req_task_work_add(struct io_kiocb *req)
Jens Axboe7cbf1722021-02-10 00:03:20 +00002172{
Pavel Begunkovc15b79d2021-03-19 17:22:44 +00002173 struct task_struct *tsk = req->task;
Jens Axboe7cbf1722021-02-10 00:03:20 +00002174 struct io_uring_task *tctx = tsk->io_uring;
Pavel Begunkovc15b79d2021-03-19 17:22:44 +00002175 enum task_work_notify_mode notify;
Pavel Begunkove09ee512021-07-01 13:26:05 +01002176 struct io_wq_work_node *node;
Jens Axboe0b81e802021-02-16 10:33:53 -07002177 unsigned long flags;
Pavel Begunkov6294f362021-08-10 17:53:55 +01002178 bool running;
Jens Axboe7cbf1722021-02-10 00:03:20 +00002179
2180 WARN_ON_ONCE(!tctx);
2181
Jens Axboe0b81e802021-02-16 10:33:53 -07002182 spin_lock_irqsave(&tctx->task_lock, flags);
Jens Axboe7cbf1722021-02-10 00:03:20 +00002183 wq_list_add_tail(&req->io_task_work.node, &tctx->task_list);
Pavel Begunkov6294f362021-08-10 17:53:55 +01002184 running = tctx->task_running;
2185 if (!running)
2186 tctx->task_running = true;
Jens Axboe0b81e802021-02-16 10:33:53 -07002187 spin_unlock_irqrestore(&tctx->task_lock, flags);
Jens Axboe7cbf1722021-02-10 00:03:20 +00002188
2189 /* task_work already pending, we're done */
Pavel Begunkov6294f362021-08-10 17:53:55 +01002190 if (running)
Pavel Begunkove09ee512021-07-01 13:26:05 +01002191 return;
Jens Axboe7cbf1722021-02-10 00:03:20 +00002192
Pavel Begunkovc15b79d2021-03-19 17:22:44 +00002193 /*
2194 * SQPOLL kernel thread doesn't need notification, just a wakeup. For
2195 * all other cases, use TWA_SIGNAL unconditionally to ensure we're
2196 * processing task_work. There's no reliable way to tell if TWA_RESUME
2197 * will do the job.
2198 */
2199 notify = (req->ctx->flags & IORING_SETUP_SQPOLL) ? TWA_NONE : TWA_SIGNAL;
Pavel Begunkovc15b79d2021-03-19 17:22:44 +00002200 if (!task_work_add(tsk, &tctx->task_work, notify)) {
2201 wake_up_process(tsk);
Pavel Begunkove09ee512021-07-01 13:26:05 +01002202 return;
Pavel Begunkovc15b79d2021-03-19 17:22:44 +00002203 }
Pavel Begunkov2215bed2021-08-09 13:04:06 +01002204
Pavel Begunkove09ee512021-07-01 13:26:05 +01002205 spin_lock_irqsave(&tctx->task_lock, flags);
Pavel Begunkov6294f362021-08-10 17:53:55 +01002206 tctx->task_running = false;
Pavel Begunkove09ee512021-07-01 13:26:05 +01002207 node = tctx->task_list.first;
2208 INIT_WQ_LIST(&tctx->task_list);
2209 spin_unlock_irqrestore(&tctx->task_lock, flags);
Jens Axboe7cbf1722021-02-10 00:03:20 +00002210
Pavel Begunkove09ee512021-07-01 13:26:05 +01002211 while (node) {
2212 req = container_of(node, struct io_kiocb, io_task_work.node);
2213 node = node->next;
2214 if (llist_add(&req->io_task_work.fallback_node,
2215 &req->ctx->fallback_llist))
2216 schedule_delayed_work(&req->ctx->fallback_work, 1);
2217 }
Pavel Begunkoveab30c42021-01-19 13:32:42 +00002218}
2219
Pavel Begunkovf237c302021-08-18 12:42:46 +01002220static void io_req_task_cancel(struct io_kiocb *req, bool *locked)
Jens Axboec40f6372020-06-25 15:39:59 -06002221{
Jens Axboe87ceb6a2020-09-14 08:20:12 -06002222 struct io_ring_ctx *ctx = req->ctx;
Jens Axboec40f6372020-06-25 15:39:59 -06002223
Pavel Begunkovb18a1a42021-08-25 20:51:39 +01002224 /* not needed for normal modes, but SQPOLL depends on it */
Pavel Begunkovf237c302021-08-18 12:42:46 +01002225 io_tw_lock(ctx, locked);
Pavel Begunkov25935532021-03-19 17:22:40 +00002226 io_req_complete_failed(req, req->result);
Jens Axboec40f6372020-06-25 15:39:59 -06002227}
2228
Pavel Begunkovf237c302021-08-18 12:42:46 +01002229static void io_req_task_submit(struct io_kiocb *req, bool *locked)
Jens Axboec40f6372020-06-25 15:39:59 -06002230{
2231 struct io_ring_ctx *ctx = req->ctx;
2232
Pavel Begunkovf237c302021-08-18 12:42:46 +01002233 io_tw_lock(ctx, locked);
Jens Axboe316319e2021-08-19 09:41:42 -06002234 /* req->task == current here, checking PF_EXITING is safe */
Pavel Begunkovaf066f32021-08-09 13:04:19 +01002235 if (likely(!(req->task->flags & PF_EXITING)))
Pavel Begunkovc5eef2b2021-02-10 00:03:22 +00002236 __io_queue_sqe(req);
Pavel Begunkov81b6d052021-01-04 20:36:35 +00002237 else
Pavel Begunkov25935532021-03-19 17:22:40 +00002238 io_req_complete_failed(req, -EFAULT);
Jens Axboe9e645e112019-05-10 16:07:28 -06002239}
2240
Pavel Begunkova3df76982021-02-18 22:32:52 +00002241static void io_req_task_queue_fail(struct io_kiocb *req, int ret)
2242{
Pavel Begunkova3df76982021-02-18 22:32:52 +00002243 req->result = ret;
Pavel Begunkov5b0a6ac2021-06-30 21:54:04 +01002244 req->io_task_work.func = io_req_task_cancel;
Pavel Begunkove09ee512021-07-01 13:26:05 +01002245 io_req_task_work_add(req);
Pavel Begunkova3df76982021-02-18 22:32:52 +00002246}
2247
Pavel Begunkov2c4b8eb2021-02-28 22:35:10 +00002248static void io_req_task_queue(struct io_kiocb *req)
2249{
Pavel Begunkov5b0a6ac2021-06-30 21:54:04 +01002250 req->io_task_work.func = io_req_task_submit;
Pavel Begunkove09ee512021-07-01 13:26:05 +01002251 io_req_task_work_add(req);
Pavel Begunkov2c4b8eb2021-02-28 22:35:10 +00002252}
2253
Jens Axboe773af692021-07-27 10:25:55 -06002254static void io_req_task_queue_reissue(struct io_kiocb *req)
2255{
2256 req->io_task_work.func = io_queue_async_work;
2257 io_req_task_work_add(req);
2258}
2259
Pavel Begunkovf2f87372020-10-27 23:25:37 +00002260static inline void io_queue_next(struct io_kiocb *req)
Jackie Liuc69f8db2019-11-09 11:00:08 +08002261{
Pavel Begunkov9b5f7bd2020-06-29 13:13:00 +03002262 struct io_kiocb *nxt = io_req_find_next(req);
Pavel Begunkov944e58b2019-11-21 23:21:01 +03002263
Pavel Begunkov906a8c32020-06-27 14:04:55 +03002264 if (nxt)
2265 io_req_task_queue(nxt);
Jackie Liuc69f8db2019-11-09 11:00:08 +08002266}
2267
Jens Axboe9e645e112019-05-10 16:07:28 -06002268static void io_free_req(struct io_kiocb *req)
2269{
Pavel Begunkovc3524382020-06-28 12:52:32 +03002270 io_queue_next(req);
Jens Axboe9e645e112019-05-10 16:07:28 -06002271 __io_free_req(req);
Jens Axboee65ef562019-03-12 10:16:44 -06002272}
2273
Pavel Begunkovf237c302021-08-18 12:42:46 +01002274static void io_free_req_work(struct io_kiocb *req, bool *locked)
2275{
2276 io_free_req(req);
2277}
2278
Pavel Begunkov2d6500d2020-06-28 12:52:33 +03002279struct req_batch {
Pavel Begunkov5af1d132020-07-18 11:32:52 +03002280 struct task_struct *task;
2281 int task_refs;
Jens Axboe1b4c3512021-02-10 00:03:19 +00002282 int ctx_refs;
Pavel Begunkov2d6500d2020-06-28 12:52:33 +03002283};
2284
Pavel Begunkov5af1d132020-07-18 11:32:52 +03002285static inline void io_init_req_batch(struct req_batch *rb)
Pavel Begunkov7a743e22020-03-03 21:33:13 +03002286{
Pavel Begunkov5af1d132020-07-18 11:32:52 +03002287 rb->task_refs = 0;
Pavel Begunkov9ae72462021-02-10 00:03:16 +00002288 rb->ctx_refs = 0;
Pavel Begunkov5af1d132020-07-18 11:32:52 +03002289 rb->task = NULL;
2290}
Pavel Begunkov8766dd52020-03-14 00:31:04 +03002291
Pavel Begunkov2d6500d2020-06-28 12:52:33 +03002292static void io_req_free_batch_finish(struct io_ring_ctx *ctx,
2293 struct req_batch *rb)
2294{
Pavel Begunkov9ae72462021-02-10 00:03:16 +00002295 if (rb->ctx_refs)
2296 percpu_ref_put_many(&ctx->refs, rb->ctx_refs);
Pavel Begunkove98e49b2021-08-18 17:01:43 +01002297 if (rb->task)
Pavel Begunkove9dbe222021-08-09 13:04:20 +01002298 io_put_task(rb->task, rb->task_refs);
Pavel Begunkov2d6500d2020-06-28 12:52:33 +03002299}
2300
Pavel Begunkov6ff119a2021-02-10 00:03:18 +00002301static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req,
2302 struct io_submit_state *state)
Pavel Begunkov2d6500d2020-06-28 12:52:33 +03002303{
Pavel Begunkovf2f87372020-10-27 23:25:37 +00002304 io_queue_next(req);
Pavel Begunkov96670652021-03-19 17:22:32 +00002305 io_dismantle_req(req);
Pavel Begunkov2d6500d2020-06-28 12:52:33 +03002306
Jens Axboee3bc8e92020-09-24 08:45:57 -06002307 if (req->task != rb->task) {
Pavel Begunkov7c660732021-01-25 11:42:21 +00002308 if (rb->task)
2309 io_put_task(rb->task, rb->task_refs);
Jens Axboee3bc8e92020-09-24 08:45:57 -06002310 rb->task = req->task;
2311 rb->task_refs = 0;
Pavel Begunkov5af1d132020-07-18 11:32:52 +03002312 }
Jens Axboee3bc8e92020-09-24 08:45:57 -06002313 rb->task_refs++;
Pavel Begunkov9ae72462021-02-10 00:03:16 +00002314 rb->ctx_refs++;
Pavel Begunkov5af1d132020-07-18 11:32:52 +03002315
Pavel Begunkovbd759042021-02-12 03:23:50 +00002316 if (state->free_reqs != ARRAY_SIZE(state->reqs))
Pavel Begunkov6ff119a2021-02-10 00:03:18 +00002317 state->reqs[state->free_reqs++] = req;
Pavel Begunkovbd759042021-02-12 03:23:50 +00002318 else
Pavel Begunkovcd0ca2e2021-08-09 20:18:11 +01002319 list_add(&req->inflight_entry, &state->free_list);
Pavel Begunkov7a743e22020-03-03 21:33:13 +03002320}
2321
Pavel Begunkov2a2758f2021-06-17 18:14:00 +01002322static void io_submit_flush_completions(struct io_ring_ctx *ctx)
Jens Axboea141dd82021-08-12 12:48:34 -06002323 __must_hold(&ctx->uring_lock)
Pavel Begunkov905c1722021-02-10 00:03:14 +00002324{
Pavel Begunkovcd0ca2e2021-08-09 20:18:11 +01002325 struct io_submit_state *state = &ctx->submit_state;
2326 int i, nr = state->compl_nr;
Pavel Begunkov905c1722021-02-10 00:03:14 +00002327 struct req_batch rb;
2328
Jens Axboe79ebeae2021-08-10 15:18:27 -06002329 spin_lock(&ctx->completion_lock);
Pavel Begunkov905c1722021-02-10 00:03:14 +00002330 for (i = 0; i < nr; i++) {
Pavel Begunkovcd0ca2e2021-08-09 20:18:11 +01002331 struct io_kiocb *req = state->compl_reqs[i];
Pavel Begunkov5182ed22021-06-26 21:40:48 +01002332
Pavel Begunkovd4d19c12021-04-25 14:32:17 +01002333 __io_cqring_fill_event(ctx, req->user_data, req->result,
2334 req->compl.cflags);
Pavel Begunkov905c1722021-02-10 00:03:14 +00002335 }
2336 io_commit_cqring(ctx);
Jens Axboe79ebeae2021-08-10 15:18:27 -06002337 spin_unlock(&ctx->completion_lock);
Pavel Begunkov905c1722021-02-10 00:03:14 +00002338 io_cqring_ev_posted(ctx);
Pavel Begunkov5182ed22021-06-26 21:40:48 +01002339
2340 io_init_req_batch(&rb);
Pavel Begunkov905c1722021-02-10 00:03:14 +00002341 for (i = 0; i < nr; i++) {
Pavel Begunkovcd0ca2e2021-08-09 20:18:11 +01002342 struct io_kiocb *req = state->compl_reqs[i];
Pavel Begunkov905c1722021-02-10 00:03:14 +00002343
Pavel Begunkov91c2f692021-08-11 19:28:28 +01002344 if (req_ref_put_and_test(req))
Pavel Begunkov6ff119a2021-02-10 00:03:18 +00002345 io_req_free_batch(&rb, req, &ctx->submit_state);
Pavel Begunkov905c1722021-02-10 00:03:14 +00002346 }
2347
2348 io_req_free_batch_finish(ctx, &rb);
Pavel Begunkovcd0ca2e2021-08-09 20:18:11 +01002349 state->compl_nr = 0;
Jens Axboee65ef562019-03-12 10:16:44 -06002350}
2351
Jens Axboeba816ad2019-09-28 11:36:45 -06002352/*
2353 * Drop reference to request, return next in chain (if there is one) if this
2354 * was the last reference to this request.
2355 */
Pavel Begunkov0d850352021-03-19 17:22:37 +00002356static inline struct io_kiocb *io_put_req_find_next(struct io_kiocb *req)
Jens Axboee65ef562019-03-12 10:16:44 -06002357{
Pavel Begunkov9b5f7bd2020-06-29 13:13:00 +03002358 struct io_kiocb *nxt = NULL;
2359
Jens Axboede9b4cc2021-02-24 13:28:27 -07002360 if (req_ref_put_and_test(req)) {
Pavel Begunkov9b5f7bd2020-06-29 13:13:00 +03002361 nxt = io_req_find_next(req);
Jens Axboe4d7dd462019-11-20 13:03:52 -07002362 __io_free_req(req);
Jens Axboe2a44f462020-02-25 13:25:41 -07002363 }
Pavel Begunkov9b5f7bd2020-06-29 13:13:00 +03002364 return nxt;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002365}
2366
Pavel Begunkov0d850352021-03-19 17:22:37 +00002367static inline void io_put_req(struct io_kiocb *req)
Jens Axboe2b188cc2019-01-07 10:46:33 -07002368{
Jens Axboede9b4cc2021-02-24 13:28:27 -07002369 if (req_ref_put_and_test(req))
Jens Axboedef596e2019-01-09 08:59:42 -07002370 io_free_req(req);
2371}
2372
Pavel Begunkov91c2f692021-08-11 19:28:28 +01002373static inline void io_put_req_deferred(struct io_kiocb *req)
Pavel Begunkov216578e2020-10-13 09:44:00 +01002374{
Pavel Begunkov91c2f692021-08-11 19:28:28 +01002375 if (req_ref_put_and_test(req)) {
Pavel Begunkovf237c302021-08-18 12:42:46 +01002376 req->io_task_work.func = io_free_req_work;
Pavel Begunkov543af3a2021-08-09 13:04:15 +01002377 io_req_task_work_add(req);
2378 }
Pavel Begunkov216578e2020-10-13 09:44:00 +01002379}
2380
Pavel Begunkov6c503152021-01-04 20:36:36 +00002381static unsigned io_cqring_events(struct io_ring_ctx *ctx)
Jens Axboea3a0e432019-08-20 11:03:11 -06002382{
2383 /* See comment at the top of this file */
2384 smp_rmb();
Pavel Begunkove23de152020-12-17 00:24:37 +00002385 return __io_cqring_events(ctx);
Jens Axboea3a0e432019-08-20 11:03:11 -06002386}
2387
Pavel Begunkovfb5ccc92019-10-25 12:31:30 +03002388static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
2389{
2390 struct io_rings *rings = ctx->rings;
2391
2392 /* make sure SQ entry isn't read before tail */
2393 return smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head;
2394}
2395
Pavel Begunkov8ff069b2020-07-16 23:28:04 +03002396static unsigned int io_put_kbuf(struct io_kiocb *req, struct io_buffer *kbuf)
Jens Axboee94f1412019-12-19 12:06:02 -07002397{
Pavel Begunkov8ff069b2020-07-16 23:28:04 +03002398 unsigned int cflags;
Jens Axboee94f1412019-12-19 12:06:02 -07002399
Jens Axboebcda7ba2020-02-23 16:42:51 -07002400 cflags = kbuf->bid << IORING_CQE_BUFFER_SHIFT;
2401 cflags |= IORING_CQE_F_BUFFER;
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03002402 req->flags &= ~REQ_F_BUFFER_SELECTED;
Jens Axboebcda7ba2020-02-23 16:42:51 -07002403 kfree(kbuf);
2404 return cflags;
2405}
2406
Pavel Begunkov8ff069b2020-07-16 23:28:04 +03002407static inline unsigned int io_put_rw_kbuf(struct io_kiocb *req)
2408{
2409 struct io_buffer *kbuf;
2410
Pavel Begunkovae421d92021-08-17 20:28:08 +01002411 if (likely(!(req->flags & REQ_F_BUFFER_SELECTED)))
2412 return 0;
Pavel Begunkov8ff069b2020-07-16 23:28:04 +03002413 kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
2414 return io_put_kbuf(req, kbuf);
2415}
2416
Jens Axboe4c6e2772020-07-01 11:29:10 -06002417static inline bool io_run_task_work(void)
2418{
Nadav Amitef98eb02021-08-07 17:13:41 -07002419 if (test_thread_flag(TIF_NOTIFY_SIGNAL) || current->task_works) {
Jens Axboe4c6e2772020-07-01 11:29:10 -06002420 __set_current_state(TASK_RUNNING);
Nadav Amitef98eb02021-08-07 17:13:41 -07002421 tracehook_notify_signal();
Jens Axboe4c6e2772020-07-01 11:29:10 -06002422 return true;
2423 }
2424
2425 return false;
2426}
2427
Jens Axboedef596e2019-01-09 08:59:42 -07002428/*
2429 * Find and free completed poll iocbs
2430 */
2431static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
Pavel Begunkova8576af2021-08-15 10:40:21 +01002432 struct list_head *done)
Jens Axboedef596e2019-01-09 08:59:42 -07002433{
Jens Axboe8237e042019-12-28 10:48:22 -07002434 struct req_batch rb;
Jens Axboedef596e2019-01-09 08:59:42 -07002435 struct io_kiocb *req;
Xiaoguang Wangbbde0172020-06-16 02:06:38 +08002436
2437 /* order with ->result store in io_complete_rw_iopoll() */
2438 smp_rmb();
Jens Axboedef596e2019-01-09 08:59:42 -07002439
Pavel Begunkov5af1d132020-07-18 11:32:52 +03002440 io_init_req_batch(&rb);
Jens Axboedef596e2019-01-09 08:59:42 -07002441 while (!list_empty(done)) {
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002442 req = list_first_entry(done, struct io_kiocb, inflight_entry);
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002443 list_del(&req->inflight_entry);
Jens Axboedef596e2019-01-09 08:59:42 -07002444
Pavel Begunkovae421d92021-08-17 20:28:08 +01002445 __io_cqring_fill_event(ctx, req->user_data, req->result,
2446 io_put_rw_kbuf(req));
Jens Axboedef596e2019-01-09 08:59:42 -07002447 (*nr_events)++;
2448
Jens Axboede9b4cc2021-02-24 13:28:27 -07002449 if (req_ref_put_and_test(req))
Pavel Begunkov6ff119a2021-02-10 00:03:18 +00002450 io_req_free_batch(&rb, req, &ctx->submit_state);
Jens Axboedef596e2019-01-09 08:59:42 -07002451 }
Jens Axboedef596e2019-01-09 08:59:42 -07002452
Jens Axboe09bb8392019-03-13 12:39:28 -06002453 io_commit_cqring(ctx);
Pavel Begunkov80c18e42021-01-07 03:15:41 +00002454 io_cqring_ev_posted_iopoll(ctx);
Pavel Begunkov2d6500d2020-06-28 12:52:33 +03002455 io_req_free_batch_finish(ctx, &rb);
Bijan Mottahedeh581f9812020-04-03 13:51:33 -07002456}
2457
Jens Axboedef596e2019-01-09 08:59:42 -07002458static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
Pavel Begunkova8576af2021-08-15 10:40:21 +01002459 long min)
Jens Axboedef596e2019-01-09 08:59:42 -07002460{
2461 struct io_kiocb *req, *tmp;
2462 LIST_HEAD(done);
2463 bool spin;
Jens Axboedef596e2019-01-09 08:59:42 -07002464
2465 /*
2466 * Only spin for completions if we don't have multiple devices hanging
2467 * off our complete list, and we're under the requested amount.
2468 */
Hao Xu915b3dd2021-06-28 05:37:30 +08002469 spin = !ctx->poll_multi_queue && *nr_events < min;
Jens Axboedef596e2019-01-09 08:59:42 -07002470
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002471 list_for_each_entry_safe(req, tmp, &ctx->iopoll_list, inflight_entry) {
Jens Axboe9adbd452019-12-20 08:45:55 -07002472 struct kiocb *kiocb = &req->rw.kiocb;
Pavel Begunkova2416e12021-08-09 13:04:09 +01002473 int ret;
Jens Axboedef596e2019-01-09 08:59:42 -07002474
2475 /*
Bijan Mottahedeh581f9812020-04-03 13:51:33 -07002476 * Move completed and retryable entries to our local lists.
2477 * If we find a request that requires polling, break out
2478 * and complete those lists first, if we have entries there.
Jens Axboedef596e2019-01-09 08:59:42 -07002479 */
Xiaoguang Wang65a65432020-06-11 23:39:36 +08002480 if (READ_ONCE(req->iopoll_completed)) {
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002481 list_move_tail(&req->inflight_entry, &done);
Jens Axboedef596e2019-01-09 08:59:42 -07002482 continue;
2483 }
2484 if (!list_empty(&done))
2485 break;
2486
2487 ret = kiocb->ki_filp->f_op->iopoll(kiocb, spin);
Pavel Begunkova2416e12021-08-09 13:04:09 +01002488 if (unlikely(ret < 0))
2489 return ret;
2490 else if (ret)
2491 spin = false;
Jens Axboedef596e2019-01-09 08:59:42 -07002492
Pavel Begunkov3aadc232020-07-06 17:59:29 +03002493 /* iopoll may have completed current req */
2494 if (READ_ONCE(req->iopoll_completed))
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002495 list_move_tail(&req->inflight_entry, &done);
Jens Axboedef596e2019-01-09 08:59:42 -07002496 }
2497
2498 if (!list_empty(&done))
Pavel Begunkova8576af2021-08-15 10:40:21 +01002499 io_iopoll_complete(ctx, nr_events, &done);
Jens Axboedef596e2019-01-09 08:59:42 -07002500
Pavel Begunkova2416e12021-08-09 13:04:09 +01002501 return 0;
Jens Axboedef596e2019-01-09 08:59:42 -07002502}
2503
2504/*
Jens Axboedef596e2019-01-09 08:59:42 -07002505 * We can't just wait for polled events to come to us, we have to actively
2506 * find and complete them.
2507 */
Pavel Begunkovb2edc0a2020-07-07 16:36:22 +03002508static void io_iopoll_try_reap_events(struct io_ring_ctx *ctx)
Jens Axboedef596e2019-01-09 08:59:42 -07002509{
2510 if (!(ctx->flags & IORING_SETUP_IOPOLL))
2511 return;
2512
2513 mutex_lock(&ctx->uring_lock);
Pavel Begunkov540e32a2020-07-13 23:37:09 +03002514 while (!list_empty(&ctx->iopoll_list)) {
Jens Axboedef596e2019-01-09 08:59:42 -07002515 unsigned int nr_events = 0;
2516
Pavel Begunkova8576af2021-08-15 10:40:21 +01002517 io_do_iopoll(ctx, &nr_events, 0);
Jens Axboe08f54392019-08-21 22:19:11 -06002518
Pavel Begunkovb2edc0a2020-07-07 16:36:22 +03002519 /* let it sleep and repeat later if can't complete a request */
2520 if (nr_events == 0)
2521 break;
Jens Axboe08f54392019-08-21 22:19:11 -06002522 /*
2523 * Ensure we allow local-to-the-cpu processing to take place,
2524 * in this case we need to ensure that we reap all events.
Pavel Begunkov3fcee5a2020-07-06 17:59:31 +03002525 * Also let task_work, etc. to progress by releasing the mutex
Jens Axboe08f54392019-08-21 22:19:11 -06002526 */
Pavel Begunkov3fcee5a2020-07-06 17:59:31 +03002527 if (need_resched()) {
2528 mutex_unlock(&ctx->uring_lock);
2529 cond_resched();
2530 mutex_lock(&ctx->uring_lock);
2531 }
Jens Axboedef596e2019-01-09 08:59:42 -07002532 }
2533 mutex_unlock(&ctx->uring_lock);
2534}
2535
Pavel Begunkov7668b922020-07-07 16:36:21 +03002536static int io_iopoll_check(struct io_ring_ctx *ctx, long min)
Jens Axboedef596e2019-01-09 08:59:42 -07002537{
Pavel Begunkov7668b922020-07-07 16:36:21 +03002538 unsigned int nr_events = 0;
Pavel Begunkove9979b32021-04-13 02:58:45 +01002539 int ret = 0;
Jens Axboedef596e2019-01-09 08:59:42 -07002540
Xiaoguang Wangc7849be2020-02-22 14:46:05 +08002541 /*
2542 * We disallow the app entering submit/complete with polling, but we
2543 * still need to lock the ring to prevent racing with polled issue
2544 * that got punted to a workqueue.
2545 */
2546 mutex_lock(&ctx->uring_lock);
Pavel Begunkovf39c8a52021-04-13 02:58:46 +01002547 /*
2548 * Don't enter poll loop if we already have events pending.
2549 * If we do, we can potentially be spinning for commands that
2550 * already triggered a CQE (eg in error).
2551 */
Pavel Begunkov5ed7a372021-06-14 23:37:27 +01002552 if (test_bit(0, &ctx->check_cq_overflow))
Pavel Begunkovf39c8a52021-04-13 02:58:46 +01002553 __io_cqring_overflow_flush(ctx, false);
2554 if (io_cqring_events(ctx))
2555 goto out;
Jens Axboedef596e2019-01-09 08:59:42 -07002556 do {
Jens Axboe500f9fb2019-08-19 12:15:59 -06002557 /*
2558 * If a submit got punted to a workqueue, we can have the
2559 * application entering polling for a command before it gets
2560 * issued. That app will hold the uring_lock for the duration
2561 * of the poll right here, so we need to take a breather every
2562 * now and then to ensure that the issue has a chance to add
2563 * the poll to the issued list. Otherwise we can spin here
2564 * forever, while the workqueue is stuck trying to acquire the
2565 * very same mutex.
2566 */
Pavel Begunkove9979b32021-04-13 02:58:45 +01002567 if (list_empty(&ctx->iopoll_list)) {
Pavel Begunkov8f487ef2021-07-08 13:37:06 +01002568 u32 tail = ctx->cached_cq_tail;
2569
Jens Axboe500f9fb2019-08-19 12:15:59 -06002570 mutex_unlock(&ctx->uring_lock);
Jens Axboe4c6e2772020-07-01 11:29:10 -06002571 io_run_task_work();
Jens Axboe500f9fb2019-08-19 12:15:59 -06002572 mutex_lock(&ctx->uring_lock);
Pavel Begunkove9979b32021-04-13 02:58:45 +01002573
Pavel Begunkov8f487ef2021-07-08 13:37:06 +01002574 /* some requests don't go through iopoll_list */
2575 if (tail != ctx->cached_cq_tail ||
2576 list_empty(&ctx->iopoll_list))
Pavel Begunkove9979b32021-04-13 02:58:45 +01002577 break;
Jens Axboe500f9fb2019-08-19 12:15:59 -06002578 }
Pavel Begunkova8576af2021-08-15 10:40:21 +01002579 ret = io_do_iopoll(ctx, &nr_events, min);
Pavel Begunkovf39c8a52021-04-13 02:58:46 +01002580 } while (!ret && nr_events < min && !need_resched());
2581out:
Jens Axboe500f9fb2019-08-19 12:15:59 -06002582 mutex_unlock(&ctx->uring_lock);
Jens Axboedef596e2019-01-09 08:59:42 -07002583 return ret;
2584}
2585
Jens Axboe491381ce2019-10-17 09:20:46 -06002586static void kiocb_end_write(struct io_kiocb *req)
Jens Axboe2b188cc2019-01-07 10:46:33 -07002587{
Jens Axboe491381ce2019-10-17 09:20:46 -06002588 /*
2589 * Tell lockdep we inherited freeze protection from submission
2590 * thread.
2591 */
2592 if (req->flags & REQ_F_ISREG) {
Pavel Begunkov1c986792021-03-22 01:58:31 +00002593 struct super_block *sb = file_inode(req->file)->i_sb;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002594
Pavel Begunkov1c986792021-03-22 01:58:31 +00002595 __sb_writers_acquired(sb, SB_FREEZE_WRITE);
2596 sb_end_write(sb);
Jens Axboe2b188cc2019-01-07 10:46:33 -07002597 }
2598}
2599
Jens Axboeb63534c2020-06-04 11:28:00 -06002600#ifdef CONFIG_BLOCK
Pavel Begunkovdc2a6e92021-01-19 13:32:35 +00002601static bool io_resubmit_prep(struct io_kiocb *req)
Jens Axboeb63534c2020-06-04 11:28:00 -06002602{
Pavel Begunkovab454432021-03-22 01:58:33 +00002603 struct io_async_rw *rw = req->async_data;
Jens Axboeb63534c2020-06-04 11:28:00 -06002604
Pavel Begunkovab454432021-03-22 01:58:33 +00002605 if (!rw)
2606 return !io_req_prep_async(req);
Jens Axboecd658692021-09-10 11:19:14 -06002607 iov_iter_restore(&rw->iter, &rw->iter_state);
Pavel Begunkovab454432021-03-22 01:58:33 +00002608 return true;
Jens Axboeb63534c2020-06-04 11:28:00 -06002609}
Jens Axboeb63534c2020-06-04 11:28:00 -06002610
Jens Axboe3e6a0d32021-03-01 13:56:00 -07002611static bool io_rw_should_reissue(struct io_kiocb *req)
Jens Axboeb63534c2020-06-04 11:28:00 -06002612{
Jens Axboe355afae2020-09-02 09:30:31 -06002613 umode_t mode = file_inode(req->file)->i_mode;
Jens Axboe3e6a0d32021-03-01 13:56:00 -07002614 struct io_ring_ctx *ctx = req->ctx;
Jens Axboeb63534c2020-06-04 11:28:00 -06002615
Jens Axboe355afae2020-09-02 09:30:31 -06002616 if (!S_ISBLK(mode) && !S_ISREG(mode))
2617 return false;
Jens Axboe3e6a0d32021-03-01 13:56:00 -07002618 if ((req->flags & REQ_F_NOWAIT) || (io_wq_current_is_worker() &&
2619 !(ctx->flags & IORING_SETUP_IOPOLL)))
Jens Axboeb63534c2020-06-04 11:28:00 -06002620 return false;
Jens Axboe7c977a52021-02-23 19:17:35 -07002621 /*
2622 * If ref is dying, we might be running poll reap from the exit work.
2623 * Don't attempt to reissue from that path, just let it fail with
2624 * -EAGAIN.
2625 */
Jens Axboe3e6a0d32021-03-01 13:56:00 -07002626 if (percpu_ref_is_dying(&ctx->refs))
2627 return false;
Jens Axboeef046882021-07-27 10:50:31 -06002628 /*
2629 * Play it safe and assume not safe to re-import and reissue if we're
2630 * not in the original thread group (or in task context).
2631 */
2632 if (!same_thread_group(req->task, current) || !in_task())
2633 return false;
Jens Axboe3e6a0d32021-03-01 13:56:00 -07002634 return true;
2635}
Jens Axboee82ad482021-04-02 19:45:34 -06002636#else
Jens Axboea1ff1e32021-04-12 06:40:02 -06002637static bool io_resubmit_prep(struct io_kiocb *req)
2638{
2639 return false;
2640}
Jens Axboee82ad482021-04-02 19:45:34 -06002641static bool io_rw_should_reissue(struct io_kiocb *req)
2642{
2643 return false;
2644}
Jens Axboe3e6a0d32021-03-01 13:56:00 -07002645#endif
2646
Jens Axboe8ef12ef2021-08-10 15:15:25 -06002647static bool __io_complete_rw_common(struct io_kiocb *req, long res)
Jens Axboea1d7c392020-06-22 11:09:46 -06002648{
Pavel Begunkovb65c1282021-03-22 01:45:59 +00002649 if (req->rw.kiocb.ki_flags & IOCB_WRITE)
2650 kiocb_end_write(req);
Pavel Begunkov9532b992021-03-22 01:58:34 +00002651 if (res != req->result) {
2652 if ((res == -EAGAIN || res == -EOPNOTSUPP) &&
2653 io_rw_should_reissue(req)) {
2654 req->flags |= REQ_F_REISSUE;
Jens Axboe8ef12ef2021-08-10 15:15:25 -06002655 return true;
Pavel Begunkov9532b992021-03-22 01:58:34 +00002656 }
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01002657 req_set_fail(req);
Jens Axboe8ef12ef2021-08-10 15:15:25 -06002658 req->result = res;
Pavel Begunkov9532b992021-03-22 01:58:34 +00002659 }
Jens Axboe8ef12ef2021-08-10 15:15:25 -06002660 return false;
2661}
2662
Pavel Begunkovf237c302021-08-18 12:42:46 +01002663static void io_req_task_complete(struct io_kiocb *req, bool *locked)
Jens Axboe8ef12ef2021-08-10 15:15:25 -06002664{
Pavel Begunkov126180b2021-08-18 12:42:47 +01002665 unsigned int cflags = io_put_rw_kbuf(req);
2666 long res = req->result;
2667
2668 if (*locked) {
2669 struct io_ring_ctx *ctx = req->ctx;
2670 struct io_submit_state *state = &ctx->submit_state;
2671
2672 io_req_complete_state(req, res, cflags);
2673 state->compl_reqs[state->compl_nr++] = req;
2674 if (state->compl_nr == ARRAY_SIZE(state->compl_reqs))
2675 io_submit_flush_completions(ctx);
2676 } else {
2677 io_req_complete_post(req, res, cflags);
2678 }
Jens Axboe8ef12ef2021-08-10 15:15:25 -06002679}
2680
2681static void __io_complete_rw(struct io_kiocb *req, long res, long res2,
2682 unsigned int issue_flags)
2683{
2684 if (__io_complete_rw_common(req, res))
2685 return;
Pavel Begunkov63637852021-09-02 00:38:22 +01002686 __io_req_complete(req, issue_flags, req->result, io_put_rw_kbuf(req));
Jens Axboeba816ad2019-09-28 11:36:45 -06002687}
2688
2689static void io_complete_rw(struct kiocb *kiocb, long res, long res2)
2690{
Jens Axboe9adbd452019-12-20 08:45:55 -07002691 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
Jens Axboeba816ad2019-09-28 11:36:45 -06002692
Jens Axboe8ef12ef2021-08-10 15:15:25 -06002693 if (__io_complete_rw_common(req, res))
2694 return;
2695 req->result = res;
2696 req->io_task_work.func = io_req_task_complete;
2697 io_req_task_work_add(req);
Jens Axboe2b188cc2019-01-07 10:46:33 -07002698}
2699
Jens Axboedef596e2019-01-09 08:59:42 -07002700static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2)
2701{
Jens Axboe9adbd452019-12-20 08:45:55 -07002702 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
Jens Axboedef596e2019-01-09 08:59:42 -07002703
Jens Axboe491381ce2019-10-17 09:20:46 -06002704 if (kiocb->ki_flags & IOCB_WRITE)
2705 kiocb_end_write(req);
Pavel Begunkov9532b992021-03-22 01:58:34 +00002706 if (unlikely(res != req->result)) {
Pavel Begunkovb66ceaf2021-09-15 11:00:05 +01002707 if (res == -EAGAIN && io_rw_should_reissue(req)) {
2708 req->flags |= REQ_F_REISSUE;
2709 return;
Pavel Begunkov9532b992021-03-22 01:58:34 +00002710 }
Pavel Begunkov8c130822021-03-22 01:58:32 +00002711 }
Xiaoguang Wangbbde0172020-06-16 02:06:38 +08002712
2713 WRITE_ONCE(req->result, res);
Jens Axboeb9b0e0d2021-02-23 08:18:36 -07002714 /* order with io_iopoll_complete() checking ->result */
Pavel Begunkovcd664b02020-06-25 12:37:10 +03002715 smp_wmb();
2716 WRITE_ONCE(req->iopoll_completed, 1);
Jens Axboedef596e2019-01-09 08:59:42 -07002717}
2718
2719/*
2720 * After the iocb has been issued, it's safe to be found on the poll list.
2721 * Adding the kiocb to the list AFTER submission ensures that we don't
Pavel Begunkovf39c8a52021-04-13 02:58:46 +01002722 * find it from a io_do_iopoll() thread before the issuer is done
Jens Axboedef596e2019-01-09 08:59:42 -07002723 * accessing the kiocb cookie.
2724 */
Pavel Begunkovcb3d8972021-06-14 02:36:14 +01002725static void io_iopoll_req_issued(struct io_kiocb *req)
Jens Axboedef596e2019-01-09 08:59:42 -07002726{
2727 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkovcb3d8972021-06-14 02:36:14 +01002728 const bool in_async = io_wq_current_is_worker();
2729
2730 /* workqueue context doesn't hold uring_lock, grab it now */
2731 if (unlikely(in_async))
2732 mutex_lock(&ctx->uring_lock);
Jens Axboedef596e2019-01-09 08:59:42 -07002733
2734 /*
2735 * Track whether we have multiple files in our lists. This will impact
2736 * how we do polling eventually, not spinning if we're on potentially
2737 * different devices.
2738 */
Pavel Begunkov540e32a2020-07-13 23:37:09 +03002739 if (list_empty(&ctx->iopoll_list)) {
Hao Xu915b3dd2021-06-28 05:37:30 +08002740 ctx->poll_multi_queue = false;
2741 } else if (!ctx->poll_multi_queue) {
Jens Axboedef596e2019-01-09 08:59:42 -07002742 struct io_kiocb *list_req;
Hao Xu915b3dd2021-06-28 05:37:30 +08002743 unsigned int queue_num0, queue_num1;
Jens Axboedef596e2019-01-09 08:59:42 -07002744
Pavel Begunkov540e32a2020-07-13 23:37:09 +03002745 list_req = list_first_entry(&ctx->iopoll_list, struct io_kiocb,
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002746 inflight_entry);
Hao Xu915b3dd2021-06-28 05:37:30 +08002747
2748 if (list_req->file != req->file) {
2749 ctx->poll_multi_queue = true;
2750 } else {
2751 queue_num0 = blk_qc_t_to_queue_num(list_req->rw.kiocb.ki_cookie);
2752 queue_num1 = blk_qc_t_to_queue_num(req->rw.kiocb.ki_cookie);
2753 if (queue_num0 != queue_num1)
2754 ctx->poll_multi_queue = true;
2755 }
Jens Axboedef596e2019-01-09 08:59:42 -07002756 }
2757
2758 /*
2759 * For fast devices, IO may have already completed. If it has, add
2760 * it to the front so we find it first.
2761 */
Xiaoguang Wang65a65432020-06-11 23:39:36 +08002762 if (READ_ONCE(req->iopoll_completed))
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002763 list_add(&req->inflight_entry, &ctx->iopoll_list);
Jens Axboedef596e2019-01-09 08:59:42 -07002764 else
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002765 list_add_tail(&req->inflight_entry, &ctx->iopoll_list);
Xiaoguang Wangbdcd3ea2020-02-25 22:12:08 +08002766
Pavel Begunkovcb3d8972021-06-14 02:36:14 +01002767 if (unlikely(in_async)) {
2768 /*
2769 * If IORING_SETUP_SQPOLL is enabled, sqes are either handle
2770 * in sq thread task context or in io worker task context. If
2771 * current task context is sq thread, we don't need to check
2772 * whether should wake up sq thread.
2773 */
2774 if ((ctx->flags & IORING_SETUP_SQPOLL) &&
2775 wq_has_sleeper(&ctx->sq_data->wait))
2776 wake_up(&ctx->sq_data->wait);
2777
2778 mutex_unlock(&ctx->uring_lock);
2779 }
Jens Axboedef596e2019-01-09 08:59:42 -07002780}
2781
Jens Axboe4503b762020-06-01 10:00:27 -06002782static bool io_bdev_nowait(struct block_device *bdev)
2783{
Jeffle Xu9ba0d0c2020-10-19 16:59:42 +08002784 return !bdev || blk_queue_nowait(bdev_get_queue(bdev));
Jens Axboe4503b762020-06-01 10:00:27 -06002785}
2786
Jens Axboe2b188cc2019-01-07 10:46:33 -07002787/*
2788 * If we tracked the file through the SCM inflight mechanism, we could support
2789 * any file. For now, just ensure that anything potentially problematic is done
2790 * inline.
2791 */
Pavel Begunkovb191e2d2021-08-09 13:04:03 +01002792static bool __io_file_supports_nowait(struct file *file, int rw)
Jens Axboe2b188cc2019-01-07 10:46:33 -07002793{
2794 umode_t mode = file_inode(file)->i_mode;
2795
Jens Axboe4503b762020-06-01 10:00:27 -06002796 if (S_ISBLK(mode)) {
Christoph Hellwig4e7b5672020-11-23 13:38:40 +01002797 if (IS_ENABLED(CONFIG_BLOCK) &&
2798 io_bdev_nowait(I_BDEV(file->f_mapping->host)))
Jens Axboe4503b762020-06-01 10:00:27 -06002799 return true;
2800 return false;
2801 }
Pavel Begunkov976517f2021-06-09 12:07:25 +01002802 if (S_ISSOCK(mode))
Jens Axboe2b188cc2019-01-07 10:46:33 -07002803 return true;
Jens Axboe4503b762020-06-01 10:00:27 -06002804 if (S_ISREG(mode)) {
Christoph Hellwig4e7b5672020-11-23 13:38:40 +01002805 if (IS_ENABLED(CONFIG_BLOCK) &&
2806 io_bdev_nowait(file->f_inode->i_sb->s_bdev) &&
Jens Axboe4503b762020-06-01 10:00:27 -06002807 file->f_op != &io_uring_fops)
2808 return true;
2809 return false;
2810 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07002811
Jens Axboec5b85622020-06-09 19:23:05 -06002812 /* any ->read/write should understand O_NONBLOCK */
2813 if (file->f_flags & O_NONBLOCK)
2814 return true;
2815
Jens Axboeaf197f52020-04-28 13:15:06 -06002816 if (!(file->f_mode & FMODE_NOWAIT))
2817 return false;
2818
2819 if (rw == READ)
2820 return file->f_op->read_iter != NULL;
2821
2822 return file->f_op->write_iter != NULL;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002823}
2824
Pavel Begunkovb191e2d2021-08-09 13:04:03 +01002825static bool io_file_supports_nowait(struct io_kiocb *req, int rw)
Jens Axboe7b29f922021-03-12 08:30:14 -07002826{
Pavel Begunkovb191e2d2021-08-09 13:04:03 +01002827 if (rw == READ && (req->flags & REQ_F_NOWAIT_READ))
Jens Axboe7b29f922021-03-12 08:30:14 -07002828 return true;
Pavel Begunkovb191e2d2021-08-09 13:04:03 +01002829 else if (rw == WRITE && (req->flags & REQ_F_NOWAIT_WRITE))
Jens Axboe7b29f922021-03-12 08:30:14 -07002830 return true;
2831
Pavel Begunkovb191e2d2021-08-09 13:04:03 +01002832 return __io_file_supports_nowait(req->file, rw);
Jens Axboe7b29f922021-03-12 08:30:14 -07002833}
2834
Jens Axboe5d329e12021-09-14 11:08:37 -06002835static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe,
2836 int rw)
Jens Axboe2b188cc2019-01-07 10:46:33 -07002837{
Jens Axboedef596e2019-01-09 08:59:42 -07002838 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe9adbd452019-12-20 08:45:55 -07002839 struct kiocb *kiocb = &req->rw.kiocb;
Pavel Begunkov75c668c2021-02-04 13:52:05 +00002840 struct file *file = req->file;
Jens Axboe09bb8392019-03-13 12:39:28 -06002841 unsigned ioprio;
2842 int ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002843
Pavel Begunkovc97d8a02021-08-09 13:04:04 +01002844 if (!io_req_ffs_set(req) && S_ISREG(file_inode(file)->i_mode))
Jens Axboe491381ce2019-10-17 09:20:46 -06002845 req->flags |= REQ_F_ISREG;
2846
Jens Axboe2b188cc2019-01-07 10:46:33 -07002847 kiocb->ki_pos = READ_ONCE(sqe->off);
Pavel Begunkov75c668c2021-02-04 13:52:05 +00002848 if (kiocb->ki_pos == -1 && !(file->f_mode & FMODE_STREAM)) {
Jens Axboeba042912019-12-25 16:33:42 -07002849 req->flags |= REQ_F_CUR_POS;
Pavel Begunkov75c668c2021-02-04 13:52:05 +00002850 kiocb->ki_pos = file->f_pos;
Jens Axboeba042912019-12-25 16:33:42 -07002851 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07002852 kiocb->ki_hint = ki_hint_validate(file_write_hint(kiocb->ki_filp));
Pavel Begunkov3e577dc2020-02-01 03:58:42 +03002853 kiocb->ki_flags = iocb_flags(kiocb->ki_filp);
2854 ret = kiocb_set_rw_flags(kiocb, READ_ONCE(sqe->rw_flags));
2855 if (unlikely(ret))
2856 return ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002857
Jens Axboe5d329e12021-09-14 11:08:37 -06002858 /*
2859 * If the file is marked O_NONBLOCK, still allow retry for it if it
2860 * supports async. Otherwise it's impossible to use O_NONBLOCK files
2861 * reliably. If not, or it IOCB_NOWAIT is set, don't retry.
2862 */
2863 if ((kiocb->ki_flags & IOCB_NOWAIT) ||
2864 ((file->f_flags & O_NONBLOCK) && !io_file_supports_nowait(req, rw)))
Pavel Begunkov75c668c2021-02-04 13:52:05 +00002865 req->flags |= REQ_F_NOWAIT;
2866
Jens Axboe2b188cc2019-01-07 10:46:33 -07002867 ioprio = READ_ONCE(sqe->ioprio);
2868 if (ioprio) {
2869 ret = ioprio_check_cap(ioprio);
2870 if (ret)
Jens Axboe09bb8392019-03-13 12:39:28 -06002871 return ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002872
2873 kiocb->ki_ioprio = ioprio;
2874 } else
2875 kiocb->ki_ioprio = get_current_ioprio();
2876
Jens Axboedef596e2019-01-09 08:59:42 -07002877 if (ctx->flags & IORING_SETUP_IOPOLL) {
Jens Axboedef596e2019-01-09 08:59:42 -07002878 if (!(kiocb->ki_flags & IOCB_DIRECT) ||
2879 !kiocb->ki_filp->f_op->iopoll)
Jens Axboe09bb8392019-03-13 12:39:28 -06002880 return -EOPNOTSUPP;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002881
Jens Axboe394918e2021-03-08 11:40:23 -07002882 kiocb->ki_flags |= IOCB_HIPRI | IOCB_ALLOC_CACHE;
Jens Axboedef596e2019-01-09 08:59:42 -07002883 kiocb->ki_complete = io_complete_rw_iopoll;
Xiaoguang Wang65a65432020-06-11 23:39:36 +08002884 req->iopoll_completed = 0;
Jens Axboedef596e2019-01-09 08:59:42 -07002885 } else {
Jens Axboe09bb8392019-03-13 12:39:28 -06002886 if (kiocb->ki_flags & IOCB_HIPRI)
2887 return -EINVAL;
Jens Axboedef596e2019-01-09 08:59:42 -07002888 kiocb->ki_complete = io_complete_rw;
2889 }
Jens Axboe9adbd452019-12-20 08:45:55 -07002890
Pavel Begunkoveae071c2021-04-25 14:32:24 +01002891 if (req->opcode == IORING_OP_READ_FIXED ||
2892 req->opcode == IORING_OP_WRITE_FIXED) {
2893 req->imu = NULL;
2894 io_req_set_rsrc_node(req);
2895 }
2896
Jens Axboe3529d8c2019-12-19 18:24:38 -07002897 req->rw.addr = READ_ONCE(sqe->addr);
2898 req->rw.len = READ_ONCE(sqe->len);
Bijan Mottahedeh4f4eeba2020-05-19 14:52:49 -07002899 req->buf_index = READ_ONCE(sqe->buf_index);
Jens Axboe2b188cc2019-01-07 10:46:33 -07002900 return 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002901}
2902
2903static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
2904{
2905 switch (ret) {
2906 case -EIOCBQUEUED:
2907 break;
2908 case -ERESTARTSYS:
2909 case -ERESTARTNOINTR:
2910 case -ERESTARTNOHAND:
2911 case -ERESTART_RESTARTBLOCK:
2912 /*
2913 * We can't just restart the syscall, since previously
2914 * submitted sqes may already be in progress. Just fail this
2915 * IO with EINTR.
2916 */
2917 ret = -EINTR;
Gustavo A. R. Silvadf561f662020-08-23 17:36:59 -05002918 fallthrough;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002919 default:
2920 kiocb->ki_complete(kiocb, ret, 0);
2921 }
2922}
2923
Jens Axboea1d7c392020-06-22 11:09:46 -06002924static void kiocb_done(struct kiocb *kiocb, ssize_t ret,
Pavel Begunkov889fca72021-02-10 00:03:09 +00002925 unsigned int issue_flags)
Jens Axboeba816ad2019-09-28 11:36:45 -06002926{
Jens Axboeba042912019-12-25 16:33:42 -07002927 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
Jens Axboee8c2bc12020-08-15 18:44:09 -07002928 struct io_async_rw *io = req->async_data;
Jens Axboeba042912019-12-25 16:33:42 -07002929
Jens Axboe227c0c92020-08-13 11:51:40 -06002930 /* add previously done IO, if any */
Jens Axboee8c2bc12020-08-15 18:44:09 -07002931 if (io && io->bytes_done > 0) {
Jens Axboe227c0c92020-08-13 11:51:40 -06002932 if (ret < 0)
Jens Axboee8c2bc12020-08-15 18:44:09 -07002933 ret = io->bytes_done;
Jens Axboe227c0c92020-08-13 11:51:40 -06002934 else
Jens Axboee8c2bc12020-08-15 18:44:09 -07002935 ret += io->bytes_done;
Jens Axboe227c0c92020-08-13 11:51:40 -06002936 }
2937
Jens Axboeba042912019-12-25 16:33:42 -07002938 if (req->flags & REQ_F_CUR_POS)
2939 req->file->f_pos = kiocb->ki_pos;
Pavel Begunkovb66ceaf2021-09-15 11:00:05 +01002940 if (ret >= 0 && (kiocb->ki_complete == io_complete_rw))
Pavel Begunkov889fca72021-02-10 00:03:09 +00002941 __io_complete_rw(req, ret, 0, issue_flags);
Jens Axboeba816ad2019-09-28 11:36:45 -06002942 else
2943 io_rw_done(kiocb, ret);
Pavel Begunkov97284632021-04-08 19:28:03 +01002944
Pavel Begunkovb66ceaf2021-09-15 11:00:05 +01002945 if (req->flags & REQ_F_REISSUE) {
Pavel Begunkov97284632021-04-08 19:28:03 +01002946 req->flags &= ~REQ_F_REISSUE;
Jens Axboea7be7c22021-04-15 11:31:14 -06002947 if (io_resubmit_prep(req)) {
Jens Axboe773af692021-07-27 10:25:55 -06002948 io_req_task_queue_reissue(req);
Pavel Begunkov8c130822021-03-22 01:58:32 +00002949 } else {
Pavel Begunkovb66ceaf2021-09-15 11:00:05 +01002950 unsigned int cflags = io_put_rw_kbuf(req);
2951 struct io_ring_ctx *ctx = req->ctx;
2952
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01002953 req_set_fail(req);
Pavel Begunkovb66ceaf2021-09-15 11:00:05 +01002954 if (issue_flags & IO_URING_F_NONBLOCK) {
2955 mutex_lock(&ctx->uring_lock);
2956 __io_req_complete(req, issue_flags, ret, cflags);
2957 mutex_unlock(&ctx->uring_lock);
2958 } else {
2959 __io_req_complete(req, issue_flags, ret, cflags);
2960 }
Pavel Begunkov97284632021-04-08 19:28:03 +01002961 }
2962 }
Jens Axboeba816ad2019-09-28 11:36:45 -06002963}
2964
Pavel Begunkoveae071c2021-04-25 14:32:24 +01002965static int __io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter,
2966 struct io_mapped_ubuf *imu)
Jens Axboeedafcce2019-01-09 09:16:05 -07002967{
Jens Axboe9adbd452019-12-20 08:45:55 -07002968 size_t len = req->rw.len;
Pavel Begunkov75769e32021-04-01 15:43:54 +01002969 u64 buf_end, buf_addr = req->rw.addr;
Jens Axboeedafcce2019-01-09 09:16:05 -07002970 size_t offset;
Jens Axboeedafcce2019-01-09 09:16:05 -07002971
Pavel Begunkov75769e32021-04-01 15:43:54 +01002972 if (unlikely(check_add_overflow(buf_addr, (u64)len, &buf_end)))
Jens Axboeedafcce2019-01-09 09:16:05 -07002973 return -EFAULT;
2974 /* not inside the mapped region */
Pavel Begunkov4751f532021-04-01 15:43:55 +01002975 if (unlikely(buf_addr < imu->ubuf || buf_end > imu->ubuf_end))
Jens Axboeedafcce2019-01-09 09:16:05 -07002976 return -EFAULT;
2977
2978 /*
2979 * May not be a start of buffer, set size appropriately
2980 * and advance us to the beginning.
2981 */
2982 offset = buf_addr - imu->ubuf;
2983 iov_iter_bvec(iter, rw, imu->bvec, imu->nr_bvecs, offset + len);
Jens Axboebd11b3a2019-07-20 08:37:31 -06002984
2985 if (offset) {
2986 /*
2987 * Don't use iov_iter_advance() here, as it's really slow for
2988 * using the latter parts of a big fixed buffer - it iterates
2989 * over each segment manually. We can cheat a bit here, because
2990 * we know that:
2991 *
2992 * 1) it's a BVEC iter, we set it up
2993 * 2) all bvecs are PAGE_SIZE in size, except potentially the
2994 * first and last bvec
2995 *
2996 * So just find our index, and adjust the iterator afterwards.
2997 * If the offset is within the first bvec (or the whole first
2998 * bvec, just use iov_iter_advance(). This makes it easier
2999 * since we can just skip the first segment, which may not
3000 * be PAGE_SIZE aligned.
3001 */
3002 const struct bio_vec *bvec = imu->bvec;
3003
3004 if (offset <= bvec->bv_len) {
3005 iov_iter_advance(iter, offset);
3006 } else {
3007 unsigned long seg_skip;
3008
3009 /* skip first vec */
3010 offset -= bvec->bv_len;
3011 seg_skip = 1 + (offset >> PAGE_SHIFT);
3012
3013 iter->bvec = bvec + seg_skip;
3014 iter->nr_segs -= seg_skip;
Aleix Roca Nonell99c79f62019-08-15 14:03:22 +02003015 iter->count -= bvec->bv_len + offset;
Jens Axboebd11b3a2019-07-20 08:37:31 -06003016 iter->iov_offset = offset & ~PAGE_MASK;
Jens Axboebd11b3a2019-07-20 08:37:31 -06003017 }
3018 }
3019
Pavel Begunkov847595d2021-02-04 13:52:06 +00003020 return 0;
Jens Axboeedafcce2019-01-09 09:16:05 -07003021}
3022
Pavel Begunkoveae071c2021-04-25 14:32:24 +01003023static int io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter)
3024{
3025 struct io_ring_ctx *ctx = req->ctx;
3026 struct io_mapped_ubuf *imu = req->imu;
3027 u16 index, buf_index = req->buf_index;
3028
3029 if (likely(!imu)) {
3030 if (unlikely(buf_index >= ctx->nr_user_bufs))
3031 return -EFAULT;
3032 index = array_index_nospec(buf_index, ctx->nr_user_bufs);
3033 imu = READ_ONCE(ctx->user_bufs[index]);
3034 req->imu = imu;
3035 }
3036 return __io_import_fixed(req, rw, iter, imu);
3037}
3038
Jens Axboebcda7ba2020-02-23 16:42:51 -07003039static void io_ring_submit_unlock(struct io_ring_ctx *ctx, bool needs_lock)
3040{
3041 if (needs_lock)
3042 mutex_unlock(&ctx->uring_lock);
3043}
3044
3045static void io_ring_submit_lock(struct io_ring_ctx *ctx, bool needs_lock)
3046{
3047 /*
3048 * "Normal" inline submissions always hold the uring_lock, since we
3049 * grab it from the system call. Same is true for the SQPOLL offload.
3050 * The only exception is when we've detached the request and issue it
3051 * from an async worker thread, grab the lock for that case.
3052 */
3053 if (needs_lock)
3054 mutex_lock(&ctx->uring_lock);
3055}
3056
3057static struct io_buffer *io_buffer_select(struct io_kiocb *req, size_t *len,
3058 int bgid, struct io_buffer *kbuf,
3059 bool needs_lock)
3060{
3061 struct io_buffer *head;
3062
3063 if (req->flags & REQ_F_BUFFER_SELECTED)
3064 return kbuf;
3065
3066 io_ring_submit_lock(req->ctx, needs_lock);
3067
3068 lockdep_assert_held(&req->ctx->uring_lock);
3069
Jens Axboe9e15c3a2021-03-13 12:29:43 -07003070 head = xa_load(&req->ctx->io_buffers, bgid);
Jens Axboebcda7ba2020-02-23 16:42:51 -07003071 if (head) {
3072 if (!list_empty(&head->list)) {
3073 kbuf = list_last_entry(&head->list, struct io_buffer,
3074 list);
3075 list_del(&kbuf->list);
3076 } else {
3077 kbuf = head;
Jens Axboe9e15c3a2021-03-13 12:29:43 -07003078 xa_erase(&req->ctx->io_buffers, bgid);
Jens Axboebcda7ba2020-02-23 16:42:51 -07003079 }
3080 if (*len > kbuf->len)
3081 *len = kbuf->len;
3082 } else {
3083 kbuf = ERR_PTR(-ENOBUFS);
3084 }
3085
3086 io_ring_submit_unlock(req->ctx, needs_lock);
3087
3088 return kbuf;
3089}
3090
Jens Axboe4d954c22020-02-27 07:31:19 -07003091static void __user *io_rw_buffer_select(struct io_kiocb *req, size_t *len,
3092 bool needs_lock)
3093{
3094 struct io_buffer *kbuf;
Bijan Mottahedeh4f4eeba2020-05-19 14:52:49 -07003095 u16 bgid;
Jens Axboe4d954c22020-02-27 07:31:19 -07003096
3097 kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
Bijan Mottahedeh4f4eeba2020-05-19 14:52:49 -07003098 bgid = req->buf_index;
Jens Axboe4d954c22020-02-27 07:31:19 -07003099 kbuf = io_buffer_select(req, len, bgid, kbuf, needs_lock);
3100 if (IS_ERR(kbuf))
3101 return kbuf;
3102 req->rw.addr = (u64) (unsigned long) kbuf;
3103 req->flags |= REQ_F_BUFFER_SELECTED;
3104 return u64_to_user_ptr(kbuf->addr);
3105}
3106
3107#ifdef CONFIG_COMPAT
3108static ssize_t io_compat_import(struct io_kiocb *req, struct iovec *iov,
3109 bool needs_lock)
3110{
3111 struct compat_iovec __user *uiov;
3112 compat_ssize_t clen;
3113 void __user *buf;
3114 ssize_t len;
3115
3116 uiov = u64_to_user_ptr(req->rw.addr);
3117 if (!access_ok(uiov, sizeof(*uiov)))
3118 return -EFAULT;
3119 if (__get_user(clen, &uiov->iov_len))
3120 return -EFAULT;
3121 if (clen < 0)
3122 return -EINVAL;
3123
3124 len = clen;
3125 buf = io_rw_buffer_select(req, &len, needs_lock);
3126 if (IS_ERR(buf))
3127 return PTR_ERR(buf);
3128 iov[0].iov_base = buf;
3129 iov[0].iov_len = (compat_size_t) len;
3130 return 0;
3131}
3132#endif
3133
3134static ssize_t __io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
3135 bool needs_lock)
3136{
3137 struct iovec __user *uiov = u64_to_user_ptr(req->rw.addr);
3138 void __user *buf;
3139 ssize_t len;
3140
3141 if (copy_from_user(iov, uiov, sizeof(*uiov)))
3142 return -EFAULT;
3143
3144 len = iov[0].iov_len;
3145 if (len < 0)
3146 return -EINVAL;
3147 buf = io_rw_buffer_select(req, &len, needs_lock);
3148 if (IS_ERR(buf))
3149 return PTR_ERR(buf);
3150 iov[0].iov_base = buf;
3151 iov[0].iov_len = len;
3152 return 0;
3153}
3154
3155static ssize_t io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
3156 bool needs_lock)
3157{
Jens Axboedddb3e22020-06-04 11:27:01 -06003158 if (req->flags & REQ_F_BUFFER_SELECTED) {
3159 struct io_buffer *kbuf;
3160
3161 kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
3162 iov[0].iov_base = u64_to_user_ptr(kbuf->addr);
3163 iov[0].iov_len = kbuf->len;
Jens Axboe4d954c22020-02-27 07:31:19 -07003164 return 0;
Jens Axboedddb3e22020-06-04 11:27:01 -06003165 }
Pavel Begunkovdd201662020-12-19 03:15:43 +00003166 if (req->rw.len != 1)
Jens Axboe4d954c22020-02-27 07:31:19 -07003167 return -EINVAL;
3168
3169#ifdef CONFIG_COMPAT
3170 if (req->ctx->compat)
3171 return io_compat_import(req, iov, needs_lock);
3172#endif
3173
3174 return __io_iov_buffer_select(req, iov, needs_lock);
3175}
3176
Pavel Begunkov847595d2021-02-04 13:52:06 +00003177static int io_import_iovec(int rw, struct io_kiocb *req, struct iovec **iovec,
3178 struct iov_iter *iter, bool needs_lock)
Jens Axboe2b188cc2019-01-07 10:46:33 -07003179{
Jens Axboe9adbd452019-12-20 08:45:55 -07003180 void __user *buf = u64_to_user_ptr(req->rw.addr);
3181 size_t sqe_len = req->rw.len;
Pavel Begunkov847595d2021-02-04 13:52:06 +00003182 u8 opcode = req->opcode;
Jens Axboe4d954c22020-02-27 07:31:19 -07003183 ssize_t ret;
Jens Axboeedafcce2019-01-09 09:16:05 -07003184
Pavel Begunkov7d009162019-11-25 23:14:40 +03003185 if (opcode == IORING_OP_READ_FIXED || opcode == IORING_OP_WRITE_FIXED) {
Jens Axboeedafcce2019-01-09 09:16:05 -07003186 *iovec = NULL;
Jens Axboe9adbd452019-12-20 08:45:55 -07003187 return io_import_fixed(req, rw, iter);
Jens Axboeedafcce2019-01-09 09:16:05 -07003188 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07003189
Jens Axboebcda7ba2020-02-23 16:42:51 -07003190 /* buffer index only valid with fixed read/write, or buffer select */
Bijan Mottahedeh4f4eeba2020-05-19 14:52:49 -07003191 if (req->buf_index && !(req->flags & REQ_F_BUFFER_SELECT))
Jens Axboe9adbd452019-12-20 08:45:55 -07003192 return -EINVAL;
3193
Jens Axboe3a6820f2019-12-22 15:19:35 -07003194 if (opcode == IORING_OP_READ || opcode == IORING_OP_WRITE) {
Jens Axboebcda7ba2020-02-23 16:42:51 -07003195 if (req->flags & REQ_F_BUFFER_SELECT) {
Jens Axboe4d954c22020-02-27 07:31:19 -07003196 buf = io_rw_buffer_select(req, &sqe_len, needs_lock);
Pavel Begunkov867a23e2020-08-20 11:34:39 +03003197 if (IS_ERR(buf))
Jens Axboe4d954c22020-02-27 07:31:19 -07003198 return PTR_ERR(buf);
Jens Axboe3f9d6442020-03-11 12:27:04 -06003199 req->rw.len = sqe_len;
Jens Axboebcda7ba2020-02-23 16:42:51 -07003200 }
3201
Jens Axboe3a6820f2019-12-22 15:19:35 -07003202 ret = import_single_range(rw, buf, sqe_len, *iovec, iter);
3203 *iovec = NULL;
David Laight10fc72e2020-11-07 13:16:25 +00003204 return ret;
Jens Axboe3a6820f2019-12-22 15:19:35 -07003205 }
3206
Jens Axboe4d954c22020-02-27 07:31:19 -07003207 if (req->flags & REQ_F_BUFFER_SELECT) {
3208 ret = io_iov_buffer_select(req, *iovec, needs_lock);
Pavel Begunkov847595d2021-02-04 13:52:06 +00003209 if (!ret)
3210 iov_iter_init(iter, rw, *iovec, 1, (*iovec)->iov_len);
Jens Axboe4d954c22020-02-27 07:31:19 -07003211 *iovec = NULL;
3212 return ret;
3213 }
3214
Christoph Hellwig89cd35c2020-09-25 06:51:41 +02003215 return __import_iovec(rw, buf, sqe_len, UIO_FASTIOV, iovec, iter,
3216 req->ctx->compat);
Jens Axboe2b188cc2019-01-07 10:46:33 -07003217}
3218
Jens Axboe0fef9482020-08-26 10:36:20 -06003219static inline loff_t *io_kiocb_ppos(struct kiocb *kiocb)
3220{
Pavel Begunkov5b09e372020-09-30 22:57:15 +03003221 return (kiocb->ki_filp->f_mode & FMODE_STREAM) ? NULL : &kiocb->ki_pos;
Jens Axboe0fef9482020-08-26 10:36:20 -06003222}
3223
Jens Axboe32960612019-09-23 11:05:34 -06003224/*
3225 * For files that don't have ->read_iter() and ->write_iter(), handle them
3226 * by looping over ->read() or ->write() manually.
3227 */
Jens Axboe4017eb92020-10-22 14:14:12 -06003228static ssize_t loop_rw_iter(int rw, struct io_kiocb *req, struct iov_iter *iter)
Jens Axboe32960612019-09-23 11:05:34 -06003229{
Jens Axboe4017eb92020-10-22 14:14:12 -06003230 struct kiocb *kiocb = &req->rw.kiocb;
3231 struct file *file = req->file;
Jens Axboe32960612019-09-23 11:05:34 -06003232 ssize_t ret = 0;
3233
3234 /*
3235 * Don't support polled IO through this interface, and we can't
3236 * support non-blocking either. For the latter, this just causes
3237 * the kiocb to be handled from an async context.
3238 */
3239 if (kiocb->ki_flags & IOCB_HIPRI)
3240 return -EOPNOTSUPP;
3241 if (kiocb->ki_flags & IOCB_NOWAIT)
3242 return -EAGAIN;
3243
3244 while (iov_iter_count(iter)) {
Pavel Begunkov311ae9e2019-11-24 11:58:24 +03003245 struct iovec iovec;
Jens Axboe32960612019-09-23 11:05:34 -06003246 ssize_t nr;
3247
Pavel Begunkov311ae9e2019-11-24 11:58:24 +03003248 if (!iov_iter_is_bvec(iter)) {
3249 iovec = iov_iter_iovec(iter);
3250 } else {
Jens Axboe4017eb92020-10-22 14:14:12 -06003251 iovec.iov_base = u64_to_user_ptr(req->rw.addr);
3252 iovec.iov_len = req->rw.len;
Pavel Begunkov311ae9e2019-11-24 11:58:24 +03003253 }
3254
Jens Axboe32960612019-09-23 11:05:34 -06003255 if (rw == READ) {
3256 nr = file->f_op->read(file, iovec.iov_base,
Jens Axboe0fef9482020-08-26 10:36:20 -06003257 iovec.iov_len, io_kiocb_ppos(kiocb));
Jens Axboe32960612019-09-23 11:05:34 -06003258 } else {
3259 nr = file->f_op->write(file, iovec.iov_base,
Jens Axboe0fef9482020-08-26 10:36:20 -06003260 iovec.iov_len, io_kiocb_ppos(kiocb));
Jens Axboe32960612019-09-23 11:05:34 -06003261 }
3262
3263 if (nr < 0) {
3264 if (!ret)
3265 ret = nr;
3266 break;
3267 }
Jens Axboe16c8d2d2021-09-12 06:45:07 -06003268 if (!iov_iter_is_bvec(iter)) {
3269 iov_iter_advance(iter, nr);
3270 } else {
3271 req->rw.len -= nr;
3272 req->rw.addr += nr;
3273 }
Jens Axboe32960612019-09-23 11:05:34 -06003274 ret += nr;
3275 if (nr != iovec.iov_len)
3276 break;
Jens Axboe32960612019-09-23 11:05:34 -06003277 }
3278
3279 return ret;
3280}
3281
Jens Axboeff6165b2020-08-13 09:47:43 -06003282static void io_req_map_rw(struct io_kiocb *req, const struct iovec *iovec,
3283 const struct iovec *fast_iov, struct iov_iter *iter)
Jens Axboef67676d2019-12-02 11:03:47 -07003284{
Jens Axboee8c2bc12020-08-15 18:44:09 -07003285 struct io_async_rw *rw = req->async_data;
Pavel Begunkovb64e3442020-07-13 22:59:18 +03003286
Jens Axboeff6165b2020-08-13 09:47:43 -06003287 memcpy(&rw->iter, iter, sizeof(*iter));
Pavel Begunkovafb87652020-09-06 00:45:46 +03003288 rw->free_iovec = iovec;
Jens Axboe227c0c92020-08-13 11:51:40 -06003289 rw->bytes_done = 0;
Jens Axboeff6165b2020-08-13 09:47:43 -06003290 /* can only be fixed buffers, no need to do anything */
Pavel Begunkov9c3a2052020-11-23 23:20:27 +00003291 if (iov_iter_is_bvec(iter))
Jens Axboeff6165b2020-08-13 09:47:43 -06003292 return;
Pavel Begunkovb64e3442020-07-13 22:59:18 +03003293 if (!iovec) {
Jens Axboeff6165b2020-08-13 09:47:43 -06003294 unsigned iov_off = 0;
3295
3296 rw->iter.iov = rw->fast_iov;
3297 if (iter->iov != fast_iov) {
3298 iov_off = iter->iov - fast_iov;
3299 rw->iter.iov += iov_off;
3300 }
3301 if (rw->fast_iov != fast_iov)
3302 memcpy(rw->fast_iov + iov_off, fast_iov + iov_off,
Xiaoguang Wang45097da2020-04-08 22:29:58 +08003303 sizeof(struct iovec) * iter->nr_segs);
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03003304 } else {
3305 req->flags |= REQ_F_NEED_CLEANUP;
Jens Axboef67676d2019-12-02 11:03:47 -07003306 }
3307}
3308
Pavel Begunkov6cb78682021-02-28 22:35:17 +00003309static inline int io_alloc_async_data(struct io_kiocb *req)
Xiaoguang Wang3d9932a2020-03-27 15:36:52 +08003310{
Jens Axboee8c2bc12020-08-15 18:44:09 -07003311 WARN_ON_ONCE(!io_op_defs[req->opcode].async_size);
3312 req->async_data = kmalloc(io_op_defs[req->opcode].async_size, GFP_KERNEL);
3313 return req->async_data == NULL;
Xiaoguang Wang3d9932a2020-03-27 15:36:52 +08003314}
3315
Jens Axboeff6165b2020-08-13 09:47:43 -06003316static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec,
3317 const struct iovec *fast_iov,
Jens Axboe227c0c92020-08-13 11:51:40 -06003318 struct iov_iter *iter, bool force)
Jens Axboeb7bb4f72019-12-15 22:13:43 -07003319{
Pavel Begunkov26f05052021-02-28 22:35:18 +00003320 if (!force && !io_op_defs[req->opcode].needs_async_setup)
Jens Axboe74566df2020-01-13 19:23:24 -07003321 return 0;
Jens Axboee8c2bc12020-08-15 18:44:09 -07003322 if (!req->async_data) {
Jens Axboecd658692021-09-10 11:19:14 -06003323 struct io_async_rw *iorw;
3324
Pavel Begunkov6cb78682021-02-28 22:35:17 +00003325 if (io_alloc_async_data(req)) {
Pavel Begunkov6bf985d2021-02-04 13:52:01 +00003326 kfree(iovec);
Jens Axboe5d204bc2020-01-31 12:06:52 -07003327 return -ENOMEM;
Pavel Begunkov6bf985d2021-02-04 13:52:01 +00003328 }
Jens Axboeb7bb4f72019-12-15 22:13:43 -07003329
Jens Axboeff6165b2020-08-13 09:47:43 -06003330 io_req_map_rw(req, iovec, fast_iov, iter);
Jens Axboecd658692021-09-10 11:19:14 -06003331 iorw = req->async_data;
3332 /* we've copied and mapped the iter, ensure state is saved */
3333 iov_iter_save_state(&iorw->iter, &iorw->iter_state);
Jens Axboe5d204bc2020-01-31 12:06:52 -07003334 }
Jens Axboeb7bb4f72019-12-15 22:13:43 -07003335 return 0;
Jens Axboef67676d2019-12-02 11:03:47 -07003336}
3337
Pavel Begunkov73debe62020-09-30 22:57:54 +03003338static inline int io_rw_prep_async(struct io_kiocb *req, int rw)
Pavel Begunkovc3e330a2020-07-13 22:59:19 +03003339{
Jens Axboee8c2bc12020-08-15 18:44:09 -07003340 struct io_async_rw *iorw = req->async_data;
Pavel Begunkovf4bff102020-09-06 00:45:45 +03003341 struct iovec *iov = iorw->fast_iov;
Pavel Begunkov847595d2021-02-04 13:52:06 +00003342 int ret;
Pavel Begunkovc3e330a2020-07-13 22:59:19 +03003343
Pavel Begunkov2846c482020-11-07 13:16:27 +00003344 ret = io_import_iovec(rw, req, &iov, &iorw->iter, false);
Pavel Begunkovc3e330a2020-07-13 22:59:19 +03003345 if (unlikely(ret < 0))
3346 return ret;
3347
Pavel Begunkovab0b1962020-09-06 00:45:47 +03003348 iorw->bytes_done = 0;
3349 iorw->free_iovec = iov;
3350 if (iov)
3351 req->flags |= REQ_F_NEED_CLEANUP;
Jens Axboecd658692021-09-10 11:19:14 -06003352 iov_iter_save_state(&iorw->iter, &iorw->iter_state);
Pavel Begunkovc3e330a2020-07-13 22:59:19 +03003353 return 0;
3354}
3355
Pavel Begunkov73debe62020-09-30 22:57:54 +03003356static int io_read_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboef67676d2019-12-02 11:03:47 -07003357{
Jens Axboe3529d8c2019-12-19 18:24:38 -07003358 if (unlikely(!(req->file->f_mode & FMODE_READ)))
3359 return -EBADF;
Jens Axboe5d329e12021-09-14 11:08:37 -06003360 return io_prep_rw(req, sqe, READ);
Jens Axboef67676d2019-12-02 11:03:47 -07003361}
3362
Jens Axboec1dd91d2020-08-03 16:43:59 -06003363/*
3364 * This is our waitqueue callback handler, registered through lock_page_async()
3365 * when we initially tried to do the IO with the iocb armed our waitqueue.
3366 * This gets called when the page is unlocked, and we generally expect that to
3367 * happen when the page IO is completed and the page is now uptodate. This will
3368 * queue a task_work based retry of the operation, attempting to copy the data
3369 * again. If the latter fails because the page was NOT uptodate, then we will
3370 * do a thread based blocking retry of the operation. That's the unexpected
3371 * slow path.
3372 */
Jens Axboebcf5a062020-05-22 09:24:42 -06003373static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode,
3374 int sync, void *arg)
3375{
3376 struct wait_page_queue *wpq;
3377 struct io_kiocb *req = wait->private;
Jens Axboebcf5a062020-05-22 09:24:42 -06003378 struct wait_page_key *key = arg;
Jens Axboebcf5a062020-05-22 09:24:42 -06003379
3380 wpq = container_of(wait, struct wait_page_queue, wait);
3381
Linus Torvaldscdc8fcb2020-08-03 13:01:22 -07003382 if (!wake_page_match(wpq, key))
3383 return 0;
3384
Hao Xuc8d317a2020-09-29 20:00:45 +08003385 req->rw.kiocb.ki_flags &= ~IOCB_WAITQ;
Jens Axboebcf5a062020-05-22 09:24:42 -06003386 list_del_init(&wait->entry);
Pavel Begunkov921b9052021-02-12 03:23:53 +00003387 io_req_task_queue(req);
Jens Axboebcf5a062020-05-22 09:24:42 -06003388 return 1;
3389}
3390
Jens Axboec1dd91d2020-08-03 16:43:59 -06003391/*
3392 * This controls whether a given IO request should be armed for async page
3393 * based retry. If we return false here, the request is handed to the async
3394 * worker threads for retry. If we're doing buffered reads on a regular file,
3395 * we prepare a private wait_page_queue entry and retry the operation. This
3396 * will either succeed because the page is now uptodate and unlocked, or it
3397 * will register a callback when the page is unlocked at IO completion. Through
3398 * that callback, io_uring uses task_work to setup a retry of the operation.
3399 * That retry will attempt the buffered read again. The retry will generally
3400 * succeed, or in rare cases where it fails, we then fall back to using the
3401 * async worker threads for a blocking retry.
3402 */
Jens Axboe227c0c92020-08-13 11:51:40 -06003403static bool io_rw_should_retry(struct io_kiocb *req)
Jens Axboebcf5a062020-05-22 09:24:42 -06003404{
Jens Axboee8c2bc12020-08-15 18:44:09 -07003405 struct io_async_rw *rw = req->async_data;
3406 struct wait_page_queue *wait = &rw->wpq;
Jens Axboebcf5a062020-05-22 09:24:42 -06003407 struct kiocb *kiocb = &req->rw.kiocb;
Jens Axboebcf5a062020-05-22 09:24:42 -06003408
3409 /* never retry for NOWAIT, we just complete with -EAGAIN */
3410 if (req->flags & REQ_F_NOWAIT)
3411 return false;
3412
Jens Axboe227c0c92020-08-13 11:51:40 -06003413 /* Only for buffered IO */
Jens Axboe3b2a4432020-08-16 10:58:43 -07003414 if (kiocb->ki_flags & (IOCB_DIRECT | IOCB_HIPRI))
Jens Axboebcf5a062020-05-22 09:24:42 -06003415 return false;
Jens Axboe3b2a4432020-08-16 10:58:43 -07003416
Jens Axboebcf5a062020-05-22 09:24:42 -06003417 /*
3418 * just use poll if we can, and don't attempt if the fs doesn't
3419 * support callback based unlocks
3420 */
3421 if (file_can_poll(req->file) || !(req->file->f_mode & FMODE_BUF_RASYNC))
3422 return false;
3423
Jens Axboe3b2a4432020-08-16 10:58:43 -07003424 wait->wait.func = io_async_buf_func;
3425 wait->wait.private = req;
3426 wait->wait.flags = 0;
3427 INIT_LIST_HEAD(&wait->wait.entry);
3428 kiocb->ki_flags |= IOCB_WAITQ;
Hao Xuc8d317a2020-09-29 20:00:45 +08003429 kiocb->ki_flags &= ~IOCB_NOWAIT;
Jens Axboe3b2a4432020-08-16 10:58:43 -07003430 kiocb->ki_waitq = wait;
Jens Axboe3b2a4432020-08-16 10:58:43 -07003431 return true;
Jens Axboebcf5a062020-05-22 09:24:42 -06003432}
3433
Pavel Begunkovaeab9502021-06-14 02:36:24 +01003434static inline int io_iter_do_read(struct io_kiocb *req, struct iov_iter *iter)
Jens Axboebcf5a062020-05-22 09:24:42 -06003435{
3436 if (req->file->f_op->read_iter)
3437 return call_read_iter(req->file, &req->rw.kiocb, iter);
Guoyu Huang2dd21112020-08-05 03:53:50 -07003438 else if (req->file->f_op->read)
Jens Axboe4017eb92020-10-22 14:14:12 -06003439 return loop_rw_iter(READ, req, iter);
Guoyu Huang2dd21112020-08-05 03:53:50 -07003440 else
3441 return -EINVAL;
Jens Axboebcf5a062020-05-22 09:24:42 -06003442}
3443
Ming Lei7db30432021-08-21 23:07:51 +08003444static bool need_read_all(struct io_kiocb *req)
3445{
3446 return req->flags & REQ_F_ISREG ||
3447 S_ISBLK(file_inode(req->file)->i_mode);
3448}
3449
Pavel Begunkov889fca72021-02-10 00:03:09 +00003450static int io_read(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe2b188cc2019-01-07 10:46:33 -07003451{
3452 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
Jens Axboe9adbd452019-12-20 08:45:55 -07003453 struct kiocb *kiocb = &req->rw.kiocb;
Jens Axboeff6165b2020-08-13 09:47:43 -06003454 struct iov_iter __iter, *iter = &__iter;
Jens Axboee8c2bc12020-08-15 18:44:09 -07003455 struct io_async_rw *rw = req->async_data;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003456 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Jens Axboecd658692021-09-10 11:19:14 -06003457 struct iov_iter_state __state, *state;
3458 ssize_t ret, ret2;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003459
Pavel Begunkov2846c482020-11-07 13:16:27 +00003460 if (rw) {
Jens Axboee8c2bc12020-08-15 18:44:09 -07003461 iter = &rw->iter;
Jens Axboecd658692021-09-10 11:19:14 -06003462 state = &rw->iter_state;
3463 /*
3464 * We come here from an earlier attempt, restore our state to
3465 * match in case it doesn't. It's cheap enough that we don't
3466 * need to make this conditional.
3467 */
3468 iov_iter_restore(iter, state);
Pavel Begunkov2846c482020-11-07 13:16:27 +00003469 iovec = NULL;
3470 } else {
3471 ret = io_import_iovec(READ, req, &iovec, iter, !force_nonblock);
3472 if (ret < 0)
3473 return ret;
Jens Axboecd658692021-09-10 11:19:14 -06003474 state = &__state;
3475 iov_iter_save_state(iter, state);
Pavel Begunkov2846c482020-11-07 13:16:27 +00003476 }
Jens Axboecd658692021-09-10 11:19:14 -06003477 req->result = iov_iter_count(iter);
Jens Axboe2b188cc2019-01-07 10:46:33 -07003478
Jens Axboefd6c2e42019-12-18 12:19:41 -07003479 /* Ensure we clear previously set non-block flag */
3480 if (!force_nonblock)
Jens Axboe29de5f62020-02-20 09:56:08 -07003481 kiocb->ki_flags &= ~IOCB_NOWAIT;
Pavel Begunkova88fc402020-09-30 22:57:53 +03003482 else
3483 kiocb->ki_flags |= IOCB_NOWAIT;
3484
Pavel Begunkov24c74672020-06-21 13:09:51 +03003485 /* If the file doesn't support async, just async punt */
Pavel Begunkovb191e2d2021-08-09 13:04:03 +01003486 if (force_nonblock && !io_file_supports_nowait(req, READ)) {
Pavel Begunkov6713e7a2021-02-04 13:51:59 +00003487 ret = io_setup_async_rw(req, iovec, inline_vecs, iter, true);
Pavel Begunkov6bf985d2021-02-04 13:52:01 +00003488 return ret ?: -EAGAIN;
Pavel Begunkov6713e7a2021-02-04 13:51:59 +00003489 }
Jens Axboe9e645e112019-05-10 16:07:28 -06003490
Jens Axboecd658692021-09-10 11:19:14 -06003491 ret = rw_verify_area(READ, req->file, io_kiocb_ppos(kiocb), req->result);
Pavel Begunkov5ea5dd42021-02-04 13:52:03 +00003492 if (unlikely(ret)) {
3493 kfree(iovec);
3494 return ret;
3495 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07003496
Jens Axboe227c0c92020-08-13 11:51:40 -06003497 ret = io_iter_do_read(req, iter);
Jens Axboe32960612019-09-23 11:05:34 -06003498
Jens Axboe230d50d2021-04-01 20:41:15 -06003499 if (ret == -EAGAIN || (req->flags & REQ_F_REISSUE)) {
Pavel Begunkov6ad7f232021-04-08 01:54:39 +01003500 req->flags &= ~REQ_F_REISSUE;
Jens Axboeeefdf302020-08-27 16:40:19 -06003501 /* IOPOLL retry should happen for io-wq threads */
3502 if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboef91daf52020-08-15 15:58:42 -07003503 goto done;
Pavel Begunkov75c668c2021-02-04 13:52:05 +00003504 /* no retry on NONBLOCK nor RWF_NOWAIT */
3505 if (req->flags & REQ_F_NOWAIT)
Jens Axboe355afae2020-09-02 09:30:31 -06003506 goto done;
Jens Axboef38c7e32020-09-25 15:23:43 -06003507 ret = 0;
Jens Axboe230d50d2021-04-01 20:41:15 -06003508 } else if (ret == -EIOCBQUEUED) {
3509 goto out_free;
Jens Axboecd658692021-09-10 11:19:14 -06003510 } else if (ret <= 0 || ret == req->result || !force_nonblock ||
Ming Lei7db30432021-08-21 23:07:51 +08003511 (req->flags & REQ_F_NOWAIT) || !need_read_all(req)) {
Pavel Begunkov7335e3b2021-02-04 13:52:02 +00003512 /* read all, failed, already did sync or don't want to retry */
Jens Axboe00d23d52020-08-25 12:59:22 -06003513 goto done;
Jens Axboe227c0c92020-08-13 11:51:40 -06003514 }
3515
Jens Axboecd658692021-09-10 11:19:14 -06003516 /*
3517 * Don't depend on the iter state matching what was consumed, or being
3518 * untouched in case of error. Restore it and we'll advance it
3519 * manually if we need to.
3520 */
3521 iov_iter_restore(iter, state);
3522
Jens Axboe227c0c92020-08-13 11:51:40 -06003523 ret2 = io_setup_async_rw(req, iovec, inline_vecs, iter, true);
Pavel Begunkov6bf985d2021-02-04 13:52:01 +00003524 if (ret2)
3525 return ret2;
3526
Pavel Begunkovfe1cdd52021-02-17 21:02:36 +00003527 iovec = NULL;
Jens Axboee8c2bc12020-08-15 18:44:09 -07003528 rw = req->async_data;
Jens Axboecd658692021-09-10 11:19:14 -06003529 /*
3530 * Now use our persistent iterator and state, if we aren't already.
3531 * We've restored and mapped the iter to match.
3532 */
3533 if (iter != &rw->iter) {
3534 iter = &rw->iter;
3535 state = &rw->iter_state;
3536 }
Jens Axboe227c0c92020-08-13 11:51:40 -06003537
Pavel Begunkovb23df912021-02-04 13:52:04 +00003538 do {
Jens Axboecd658692021-09-10 11:19:14 -06003539 /*
3540 * We end up here because of a partial read, either from
3541 * above or inside this loop. Advance the iter by the bytes
3542 * that were consumed.
3543 */
3544 iov_iter_advance(iter, ret);
3545 if (!iov_iter_count(iter))
3546 break;
Pavel Begunkovb23df912021-02-04 13:52:04 +00003547 rw->bytes_done += ret;
Jens Axboecd658692021-09-10 11:19:14 -06003548 iov_iter_save_state(iter, state);
3549
Pavel Begunkovb23df912021-02-04 13:52:04 +00003550 /* if we can retry, do so with the callbacks armed */
3551 if (!io_rw_should_retry(req)) {
3552 kiocb->ki_flags &= ~IOCB_WAITQ;
3553 return -EAGAIN;
3554 }
3555
3556 /*
3557 * Now retry read with the IOCB_WAITQ parts set in the iocb. If
3558 * we get -EIOCBQUEUED, then we'll get a notification when the
3559 * desired page gets unlocked. We can also get a partial read
3560 * here, and if we do, then just retry at the new offset.
3561 */
3562 ret = io_iter_do_read(req, iter);
3563 if (ret == -EIOCBQUEUED)
3564 return 0;
Jens Axboe227c0c92020-08-13 11:51:40 -06003565 /* we got some bytes, but not all. retry. */
Jens Axboeb5b0ecb2021-03-04 21:02:58 -07003566 kiocb->ki_flags &= ~IOCB_WAITQ;
Jens Axboecd658692021-09-10 11:19:14 -06003567 iov_iter_restore(iter, state);
3568 } while (ret > 0);
Jens Axboe227c0c92020-08-13 11:51:40 -06003569done:
Pavel Begunkov889fca72021-02-10 00:03:09 +00003570 kiocb_done(kiocb, ret, issue_flags);
Pavel Begunkovfe1cdd52021-02-17 21:02:36 +00003571out_free:
3572 /* it's faster to check here then delegate to kfree */
3573 if (iovec)
3574 kfree(iovec);
Pavel Begunkov5ea5dd42021-02-04 13:52:03 +00003575 return 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003576}
3577
Pavel Begunkov73debe62020-09-30 22:57:54 +03003578static int io_write_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboef67676d2019-12-02 11:03:47 -07003579{
Jens Axboe3529d8c2019-12-19 18:24:38 -07003580 if (unlikely(!(req->file->f_mode & FMODE_WRITE)))
3581 return -EBADF;
Jens Axboe5d329e12021-09-14 11:08:37 -06003582 return io_prep_rw(req, sqe, WRITE);
Jens Axboef67676d2019-12-02 11:03:47 -07003583}
3584
Pavel Begunkov889fca72021-02-10 00:03:09 +00003585static int io_write(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe2b188cc2019-01-07 10:46:33 -07003586{
3587 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
Jens Axboe9adbd452019-12-20 08:45:55 -07003588 struct kiocb *kiocb = &req->rw.kiocb;
Jens Axboeff6165b2020-08-13 09:47:43 -06003589 struct iov_iter __iter, *iter = &__iter;
Jens Axboee8c2bc12020-08-15 18:44:09 -07003590 struct io_async_rw *rw = req->async_data;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003591 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Jens Axboecd658692021-09-10 11:19:14 -06003592 struct iov_iter_state __state, *state;
3593 ssize_t ret, ret2;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003594
Pavel Begunkov2846c482020-11-07 13:16:27 +00003595 if (rw) {
Jens Axboee8c2bc12020-08-15 18:44:09 -07003596 iter = &rw->iter;
Jens Axboecd658692021-09-10 11:19:14 -06003597 state = &rw->iter_state;
3598 iov_iter_restore(iter, state);
Pavel Begunkov2846c482020-11-07 13:16:27 +00003599 iovec = NULL;
3600 } else {
3601 ret = io_import_iovec(WRITE, req, &iovec, iter, !force_nonblock);
3602 if (ret < 0)
3603 return ret;
Jens Axboecd658692021-09-10 11:19:14 -06003604 state = &__state;
3605 iov_iter_save_state(iter, state);
Pavel Begunkov2846c482020-11-07 13:16:27 +00003606 }
Jens Axboecd658692021-09-10 11:19:14 -06003607 req->result = iov_iter_count(iter);
3608 ret2 = 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003609
Jens Axboefd6c2e42019-12-18 12:19:41 -07003610 /* Ensure we clear previously set non-block flag */
3611 if (!force_nonblock)
Pavel Begunkova88fc402020-09-30 22:57:53 +03003612 kiocb->ki_flags &= ~IOCB_NOWAIT;
3613 else
3614 kiocb->ki_flags |= IOCB_NOWAIT;
Jens Axboefd6c2e42019-12-18 12:19:41 -07003615
Pavel Begunkov24c74672020-06-21 13:09:51 +03003616 /* If the file doesn't support async, just async punt */
Pavel Begunkovb191e2d2021-08-09 13:04:03 +01003617 if (force_nonblock && !io_file_supports_nowait(req, WRITE))
Jens Axboef67676d2019-12-02 11:03:47 -07003618 goto copy_iov;
Jens Axboef67676d2019-12-02 11:03:47 -07003619
Jens Axboe10d59342019-12-09 20:16:22 -07003620 /* file path doesn't support NOWAIT for non-direct_IO */
3621 if (force_nonblock && !(kiocb->ki_flags & IOCB_DIRECT) &&
3622 (req->flags & REQ_F_ISREG))
Jens Axboef67676d2019-12-02 11:03:47 -07003623 goto copy_iov;
Jens Axboe9e645e112019-05-10 16:07:28 -06003624
Jens Axboecd658692021-09-10 11:19:14 -06003625 ret = rw_verify_area(WRITE, req->file, io_kiocb_ppos(kiocb), req->result);
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003626 if (unlikely(ret))
3627 goto out_free;
Roman Penyaev9bf79332019-03-25 20:09:24 +01003628
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003629 /*
3630 * Open-code file_start_write here to grab freeze protection,
3631 * which will be released by another thread in
3632 * io_complete_rw(). Fool lockdep by telling it the lock got
3633 * released so that it doesn't complain about the held lock when
3634 * we return to userspace.
3635 */
3636 if (req->flags & REQ_F_ISREG) {
Darrick J. Wong8a3c84b2020-11-10 16:50:21 -08003637 sb_start_write(file_inode(req->file)->i_sb);
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003638 __sb_writers_release(file_inode(req->file)->i_sb,
3639 SB_FREEZE_WRITE);
3640 }
3641 kiocb->ki_flags |= IOCB_WRITE;
Roman Penyaev9bf79332019-03-25 20:09:24 +01003642
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003643 if (req->file->f_op->write_iter)
Jens Axboeff6165b2020-08-13 09:47:43 -06003644 ret2 = call_write_iter(req->file, kiocb, iter);
Guoyu Huang2dd21112020-08-05 03:53:50 -07003645 else if (req->file->f_op->write)
Jens Axboe4017eb92020-10-22 14:14:12 -06003646 ret2 = loop_rw_iter(WRITE, req, iter);
Guoyu Huang2dd21112020-08-05 03:53:50 -07003647 else
3648 ret2 = -EINVAL;
Jens Axboe4ed734b2020-03-20 11:23:41 -06003649
Pavel Begunkov6ad7f232021-04-08 01:54:39 +01003650 if (req->flags & REQ_F_REISSUE) {
3651 req->flags &= ~REQ_F_REISSUE;
Jens Axboe230d50d2021-04-01 20:41:15 -06003652 ret2 = -EAGAIN;
Pavel Begunkov6ad7f232021-04-08 01:54:39 +01003653 }
Jens Axboe230d50d2021-04-01 20:41:15 -06003654
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003655 /*
3656 * Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just
3657 * retry them without IOCB_NOWAIT.
3658 */
3659 if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT))
3660 ret2 = -EAGAIN;
Pavel Begunkov75c668c2021-02-04 13:52:05 +00003661 /* no retry on NONBLOCK nor RWF_NOWAIT */
3662 if (ret2 == -EAGAIN && (req->flags & REQ_F_NOWAIT))
Jens Axboe355afae2020-09-02 09:30:31 -06003663 goto done;
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003664 if (!force_nonblock || ret2 != -EAGAIN) {
Jens Axboeeefdf302020-08-27 16:40:19 -06003665 /* IOPOLL retry should happen for io-wq threads */
3666 if ((req->ctx->flags & IORING_SETUP_IOPOLL) && ret2 == -EAGAIN)
3667 goto copy_iov;
Jens Axboe355afae2020-09-02 09:30:31 -06003668done:
Pavel Begunkov889fca72021-02-10 00:03:09 +00003669 kiocb_done(kiocb, ret2, issue_flags);
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003670 } else {
Jens Axboef67676d2019-12-02 11:03:47 -07003671copy_iov:
Jens Axboecd658692021-09-10 11:19:14 -06003672 iov_iter_restore(iter, state);
3673 if (ret2 > 0)
3674 iov_iter_advance(iter, ret2);
Jens Axboe227c0c92020-08-13 11:51:40 -06003675 ret = io_setup_async_rw(req, iovec, inline_vecs, iter, false);
Pavel Begunkov6bf985d2021-02-04 13:52:01 +00003676 return ret ?: -EAGAIN;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003677 }
Jens Axboe31b51512019-01-18 22:56:34 -07003678out_free:
Pavel Begunkovf261c162020-08-20 11:34:10 +03003679 /* it's reportedly faster than delegating the null check to kfree() */
Pavel Begunkov252917c2020-07-13 22:59:20 +03003680 if (iovec)
Xiaoguang Wang6f2cc162020-06-18 15:01:56 +08003681 kfree(iovec);
Jens Axboe2b188cc2019-01-07 10:46:33 -07003682 return ret;
3683}
3684
Jens Axboe80a261f2020-09-28 14:23:58 -06003685static int io_renameat_prep(struct io_kiocb *req,
3686 const struct io_uring_sqe *sqe)
3687{
3688 struct io_rename *ren = &req->rename;
3689 const char __user *oldf, *newf;
3690
Jens Axboeed7eb252021-06-23 09:04:13 -06003691 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3692 return -EINVAL;
Pavel Begunkov26578cd2021-08-20 10:36:37 +01003693 if (sqe->ioprio || sqe->buf_index || sqe->splice_fd_in)
Jens Axboeed7eb252021-06-23 09:04:13 -06003694 return -EINVAL;
Jens Axboe80a261f2020-09-28 14:23:58 -06003695 if (unlikely(req->flags & REQ_F_FIXED_FILE))
3696 return -EBADF;
3697
3698 ren->old_dfd = READ_ONCE(sqe->fd);
3699 oldf = u64_to_user_ptr(READ_ONCE(sqe->addr));
3700 newf = u64_to_user_ptr(READ_ONCE(sqe->addr2));
3701 ren->new_dfd = READ_ONCE(sqe->len);
3702 ren->flags = READ_ONCE(sqe->rename_flags);
3703
3704 ren->oldpath = getname(oldf);
3705 if (IS_ERR(ren->oldpath))
3706 return PTR_ERR(ren->oldpath);
3707
3708 ren->newpath = getname(newf);
3709 if (IS_ERR(ren->newpath)) {
3710 putname(ren->oldpath);
3711 return PTR_ERR(ren->newpath);
3712 }
3713
3714 req->flags |= REQ_F_NEED_CLEANUP;
3715 return 0;
3716}
3717
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003718static int io_renameat(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe80a261f2020-09-28 14:23:58 -06003719{
3720 struct io_rename *ren = &req->rename;
3721 int ret;
3722
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003723 if (issue_flags & IO_URING_F_NONBLOCK)
Jens Axboe80a261f2020-09-28 14:23:58 -06003724 return -EAGAIN;
3725
3726 ret = do_renameat2(ren->old_dfd, ren->oldpath, ren->new_dfd,
3727 ren->newpath, ren->flags);
3728
3729 req->flags &= ~REQ_F_NEED_CLEANUP;
3730 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01003731 req_set_fail(req);
Jens Axboe80a261f2020-09-28 14:23:58 -06003732 io_req_complete(req, ret);
3733 return 0;
3734}
3735
Jens Axboe14a11432020-09-28 14:27:37 -06003736static int io_unlinkat_prep(struct io_kiocb *req,
3737 const struct io_uring_sqe *sqe)
3738{
3739 struct io_unlink *un = &req->unlink;
3740 const char __user *fname;
3741
Jens Axboe22634bc2021-06-23 09:07:45 -06003742 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3743 return -EINVAL;
Pavel Begunkov26578cd2021-08-20 10:36:37 +01003744 if (sqe->ioprio || sqe->off || sqe->len || sqe->buf_index ||
3745 sqe->splice_fd_in)
Jens Axboe22634bc2021-06-23 09:07:45 -06003746 return -EINVAL;
Jens Axboe14a11432020-09-28 14:27:37 -06003747 if (unlikely(req->flags & REQ_F_FIXED_FILE))
3748 return -EBADF;
3749
3750 un->dfd = READ_ONCE(sqe->fd);
3751
3752 un->flags = READ_ONCE(sqe->unlink_flags);
3753 if (un->flags & ~AT_REMOVEDIR)
3754 return -EINVAL;
3755
3756 fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
3757 un->filename = getname(fname);
3758 if (IS_ERR(un->filename))
3759 return PTR_ERR(un->filename);
3760
3761 req->flags |= REQ_F_NEED_CLEANUP;
3762 return 0;
3763}
3764
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003765static int io_unlinkat(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe14a11432020-09-28 14:27:37 -06003766{
3767 struct io_unlink *un = &req->unlink;
3768 int ret;
3769
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003770 if (issue_flags & IO_URING_F_NONBLOCK)
Jens Axboe14a11432020-09-28 14:27:37 -06003771 return -EAGAIN;
3772
3773 if (un->flags & AT_REMOVEDIR)
3774 ret = do_rmdir(un->dfd, un->filename);
3775 else
3776 ret = do_unlinkat(un->dfd, un->filename);
3777
3778 req->flags &= ~REQ_F_NEED_CLEANUP;
3779 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01003780 req_set_fail(req);
Jens Axboe14a11432020-09-28 14:27:37 -06003781 io_req_complete(req, ret);
3782 return 0;
3783}
3784
Dmitry Kadasheve34a02d2021-07-08 13:34:45 +07003785static int io_mkdirat_prep(struct io_kiocb *req,
3786 const struct io_uring_sqe *sqe)
3787{
3788 struct io_mkdir *mkd = &req->mkdir;
3789 const char __user *fname;
3790
3791 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3792 return -EINVAL;
3793 if (sqe->ioprio || sqe->off || sqe->rw_flags || sqe->buf_index ||
3794 sqe->splice_fd_in)
3795 return -EINVAL;
3796 if (unlikely(req->flags & REQ_F_FIXED_FILE))
3797 return -EBADF;
3798
3799 mkd->dfd = READ_ONCE(sqe->fd);
3800 mkd->mode = READ_ONCE(sqe->len);
3801
3802 fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
3803 mkd->filename = getname(fname);
3804 if (IS_ERR(mkd->filename))
3805 return PTR_ERR(mkd->filename);
3806
3807 req->flags |= REQ_F_NEED_CLEANUP;
3808 return 0;
3809}
3810
3811static int io_mkdirat(struct io_kiocb *req, int issue_flags)
3812{
3813 struct io_mkdir *mkd = &req->mkdir;
3814 int ret;
3815
3816 if (issue_flags & IO_URING_F_NONBLOCK)
3817 return -EAGAIN;
3818
3819 ret = do_mkdirat(mkd->dfd, mkd->filename, mkd->mode);
3820
3821 req->flags &= ~REQ_F_NEED_CLEANUP;
3822 if (ret < 0)
3823 req_set_fail(req);
3824 io_req_complete(req, ret);
3825 return 0;
3826}
3827
Dmitry Kadashev7a8721f2021-07-08 13:34:46 +07003828static int io_symlinkat_prep(struct io_kiocb *req,
3829 const struct io_uring_sqe *sqe)
3830{
3831 struct io_symlink *sl = &req->symlink;
3832 const char __user *oldpath, *newpath;
3833
3834 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3835 return -EINVAL;
3836 if (sqe->ioprio || sqe->len || sqe->rw_flags || sqe->buf_index ||
3837 sqe->splice_fd_in)
3838 return -EINVAL;
3839 if (unlikely(req->flags & REQ_F_FIXED_FILE))
3840 return -EBADF;
3841
3842 sl->new_dfd = READ_ONCE(sqe->fd);
3843 oldpath = u64_to_user_ptr(READ_ONCE(sqe->addr));
3844 newpath = u64_to_user_ptr(READ_ONCE(sqe->addr2));
3845
3846 sl->oldpath = getname(oldpath);
3847 if (IS_ERR(sl->oldpath))
3848 return PTR_ERR(sl->oldpath);
3849
3850 sl->newpath = getname(newpath);
3851 if (IS_ERR(sl->newpath)) {
3852 putname(sl->oldpath);
3853 return PTR_ERR(sl->newpath);
3854 }
3855
3856 req->flags |= REQ_F_NEED_CLEANUP;
3857 return 0;
3858}
3859
3860static int io_symlinkat(struct io_kiocb *req, int issue_flags)
3861{
3862 struct io_symlink *sl = &req->symlink;
3863 int ret;
3864
3865 if (issue_flags & IO_URING_F_NONBLOCK)
3866 return -EAGAIN;
3867
3868 ret = do_symlinkat(sl->oldpath, sl->new_dfd, sl->newpath);
3869
3870 req->flags &= ~REQ_F_NEED_CLEANUP;
3871 if (ret < 0)
3872 req_set_fail(req);
3873 io_req_complete(req, ret);
3874 return 0;
3875}
3876
Dmitry Kadashevcf30da92021-07-08 13:34:47 +07003877static int io_linkat_prep(struct io_kiocb *req,
3878 const struct io_uring_sqe *sqe)
3879{
3880 struct io_hardlink *lnk = &req->hardlink;
3881 const char __user *oldf, *newf;
3882
3883 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3884 return -EINVAL;
3885 if (sqe->ioprio || sqe->rw_flags || sqe->buf_index || sqe->splice_fd_in)
3886 return -EINVAL;
3887 if (unlikely(req->flags & REQ_F_FIXED_FILE))
3888 return -EBADF;
3889
3890 lnk->old_dfd = READ_ONCE(sqe->fd);
3891 lnk->new_dfd = READ_ONCE(sqe->len);
3892 oldf = u64_to_user_ptr(READ_ONCE(sqe->addr));
3893 newf = u64_to_user_ptr(READ_ONCE(sqe->addr2));
3894 lnk->flags = READ_ONCE(sqe->hardlink_flags);
3895
3896 lnk->oldpath = getname(oldf);
3897 if (IS_ERR(lnk->oldpath))
3898 return PTR_ERR(lnk->oldpath);
3899
3900 lnk->newpath = getname(newf);
3901 if (IS_ERR(lnk->newpath)) {
3902 putname(lnk->oldpath);
3903 return PTR_ERR(lnk->newpath);
3904 }
3905
3906 req->flags |= REQ_F_NEED_CLEANUP;
3907 return 0;
3908}
3909
3910static int io_linkat(struct io_kiocb *req, int issue_flags)
3911{
3912 struct io_hardlink *lnk = &req->hardlink;
3913 int ret;
3914
3915 if (issue_flags & IO_URING_F_NONBLOCK)
3916 return -EAGAIN;
3917
3918 ret = do_linkat(lnk->old_dfd, lnk->oldpath, lnk->new_dfd,
3919 lnk->newpath, lnk->flags);
3920
3921 req->flags &= ~REQ_F_NEED_CLEANUP;
3922 if (ret < 0)
3923 req_set_fail(req);
3924 io_req_complete(req, ret);
3925 return 0;
3926}
3927
Jens Axboe36f4fa62020-09-05 11:14:22 -06003928static int io_shutdown_prep(struct io_kiocb *req,
3929 const struct io_uring_sqe *sqe)
3930{
3931#if defined(CONFIG_NET)
3932 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3933 return -EINVAL;
Pavel Begunkov26578cd2021-08-20 10:36:37 +01003934 if (unlikely(sqe->ioprio || sqe->off || sqe->addr || sqe->rw_flags ||
3935 sqe->buf_index || sqe->splice_fd_in))
Jens Axboe36f4fa62020-09-05 11:14:22 -06003936 return -EINVAL;
3937
3938 req->shutdown.how = READ_ONCE(sqe->len);
3939 return 0;
3940#else
3941 return -EOPNOTSUPP;
3942#endif
3943}
3944
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003945static int io_shutdown(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe36f4fa62020-09-05 11:14:22 -06003946{
3947#if defined(CONFIG_NET)
3948 struct socket *sock;
3949 int ret;
3950
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003951 if (issue_flags & IO_URING_F_NONBLOCK)
Jens Axboe36f4fa62020-09-05 11:14:22 -06003952 return -EAGAIN;
3953
Linus Torvalds48aba792020-12-16 12:44:05 -08003954 sock = sock_from_file(req->file);
Jens Axboe36f4fa62020-09-05 11:14:22 -06003955 if (unlikely(!sock))
Linus Torvalds48aba792020-12-16 12:44:05 -08003956 return -ENOTSOCK;
Jens Axboe36f4fa62020-09-05 11:14:22 -06003957
3958 ret = __sys_shutdown_sock(sock, req->shutdown.how);
Jens Axboea1464682020-12-14 20:57:27 -07003959 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01003960 req_set_fail(req);
Jens Axboe36f4fa62020-09-05 11:14:22 -06003961 io_req_complete(req, ret);
3962 return 0;
3963#else
3964 return -EOPNOTSUPP;
3965#endif
3966}
3967
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03003968static int __io_splice_prep(struct io_kiocb *req,
3969 const struct io_uring_sqe *sqe)
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003970{
Pavel Begunkovfe7e3252021-06-24 15:09:57 +01003971 struct io_splice *sp = &req->splice;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003972 unsigned int valid_flags = SPLICE_F_FD_IN_FIXED | SPLICE_F_ALL;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003973
Pavel Begunkov3232dd02020-06-03 18:03:22 +03003974 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3975 return -EINVAL;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003976
3977 sp->file_in = NULL;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003978 sp->len = READ_ONCE(sqe->len);
3979 sp->flags = READ_ONCE(sqe->splice_flags);
3980
3981 if (unlikely(sp->flags & ~valid_flags))
3982 return -EINVAL;
3983
Pavel Begunkov62906e82021-08-10 14:52:47 +01003984 sp->file_in = io_file_get(req->ctx, req, READ_ONCE(sqe->splice_fd_in),
Pavel Begunkov8371adf2020-10-10 18:34:08 +01003985 (sp->flags & SPLICE_F_FD_IN_FIXED));
3986 if (!sp->file_in)
3987 return -EBADF;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003988 req->flags |= REQ_F_NEED_CLEANUP;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003989 return 0;
3990}
3991
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03003992static int io_tee_prep(struct io_kiocb *req,
3993 const struct io_uring_sqe *sqe)
3994{
3995 if (READ_ONCE(sqe->splice_off_in) || READ_ONCE(sqe->off))
3996 return -EINVAL;
3997 return __io_splice_prep(req, sqe);
3998}
3999
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004000static int io_tee(struct io_kiocb *req, unsigned int issue_flags)
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03004001{
4002 struct io_splice *sp = &req->splice;
4003 struct file *in = sp->file_in;
4004 struct file *out = sp->file_out;
4005 unsigned int flags = sp->flags & ~SPLICE_F_FD_IN_FIXED;
4006 long ret = 0;
4007
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004008 if (issue_flags & IO_URING_F_NONBLOCK)
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03004009 return -EAGAIN;
4010 if (sp->len)
4011 ret = do_tee(in, out, sp->len, flags);
4012
Pavel Begunkove1d767f2021-03-19 17:22:43 +00004013 if (!(sp->flags & SPLICE_F_FD_IN_FIXED))
4014 io_put_file(in);
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03004015 req->flags &= ~REQ_F_NEED_CLEANUP;
4016
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03004017 if (ret != sp->len)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01004018 req_set_fail(req);
Jens Axboee1e16092020-06-22 09:17:17 -06004019 io_req_complete(req, ret);
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03004020 return 0;
4021}
4022
4023static int io_splice_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4024{
Pavel Begunkovfe7e3252021-06-24 15:09:57 +01004025 struct io_splice *sp = &req->splice;
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03004026
4027 sp->off_in = READ_ONCE(sqe->splice_off_in);
4028 sp->off_out = READ_ONCE(sqe->off);
4029 return __io_splice_prep(req, sqe);
4030}
4031
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004032static int io_splice(struct io_kiocb *req, unsigned int issue_flags)
Pavel Begunkov7d67af22020-02-24 11:32:45 +03004033{
4034 struct io_splice *sp = &req->splice;
4035 struct file *in = sp->file_in;
4036 struct file *out = sp->file_out;
4037 unsigned int flags = sp->flags & ~SPLICE_F_FD_IN_FIXED;
4038 loff_t *poff_in, *poff_out;
Pavel Begunkovc9687422020-05-04 23:00:54 +03004039 long ret = 0;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03004040
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004041 if (issue_flags & IO_URING_F_NONBLOCK)
Pavel Begunkov2fb3e822020-05-01 17:09:38 +03004042 return -EAGAIN;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03004043
4044 poff_in = (sp->off_in == -1) ? NULL : &sp->off_in;
4045 poff_out = (sp->off_out == -1) ? NULL : &sp->off_out;
Pavel Begunkovc9687422020-05-04 23:00:54 +03004046
Jens Axboe948a7742020-05-17 14:21:38 -06004047 if (sp->len)
Pavel Begunkovc9687422020-05-04 23:00:54 +03004048 ret = do_splice(in, poff_in, out, poff_out, sp->len, flags);
Pavel Begunkov7d67af22020-02-24 11:32:45 +03004049
Pavel Begunkove1d767f2021-03-19 17:22:43 +00004050 if (!(sp->flags & SPLICE_F_FD_IN_FIXED))
4051 io_put_file(in);
Pavel Begunkov7d67af22020-02-24 11:32:45 +03004052 req->flags &= ~REQ_F_NEED_CLEANUP;
4053
Pavel Begunkov7d67af22020-02-24 11:32:45 +03004054 if (ret != sp->len)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01004055 req_set_fail(req);
Jens Axboee1e16092020-06-22 09:17:17 -06004056 io_req_complete(req, ret);
Pavel Begunkov7d67af22020-02-24 11:32:45 +03004057 return 0;
4058}
4059
Jens Axboe2b188cc2019-01-07 10:46:33 -07004060/*
4061 * IORING_OP_NOP just posts a completion event, nothing else.
4062 */
Pavel Begunkov889fca72021-02-10 00:03:09 +00004063static int io_nop(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe2b188cc2019-01-07 10:46:33 -07004064{
4065 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe2b188cc2019-01-07 10:46:33 -07004066
Jens Axboedef596e2019-01-09 08:59:42 -07004067 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
4068 return -EINVAL;
4069
Pavel Begunkov889fca72021-02-10 00:03:09 +00004070 __io_req_complete(req, issue_flags, 0, 0);
Jens Axboe2b188cc2019-01-07 10:46:33 -07004071 return 0;
4072}
4073
Pavel Begunkov1155c762021-02-18 18:29:38 +00004074static int io_fsync_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Christoph Hellwigc992fe22019-01-11 09:43:02 -07004075{
Jens Axboe6b063142019-01-10 22:13:58 -07004076 struct io_ring_ctx *ctx = req->ctx;
Christoph Hellwigc992fe22019-01-11 09:43:02 -07004077
Jens Axboe09bb8392019-03-13 12:39:28 -06004078 if (!req->file)
4079 return -EBADF;
Christoph Hellwigc992fe22019-01-11 09:43:02 -07004080
Jens Axboe6b063142019-01-10 22:13:58 -07004081 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboedef596e2019-01-09 08:59:42 -07004082 return -EINVAL;
Pavel Begunkov26578cd2021-08-20 10:36:37 +01004083 if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index ||
4084 sqe->splice_fd_in))
Christoph Hellwigc992fe22019-01-11 09:43:02 -07004085 return -EINVAL;
4086
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004087 req->sync.flags = READ_ONCE(sqe->fsync_flags);
4088 if (unlikely(req->sync.flags & ~IORING_FSYNC_DATASYNC))
4089 return -EINVAL;
4090
4091 req->sync.off = READ_ONCE(sqe->off);
4092 req->sync.len = READ_ONCE(sqe->len);
Christoph Hellwigc992fe22019-01-11 09:43:02 -07004093 return 0;
4094}
4095
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004096static int io_fsync(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe78912932020-01-14 22:09:06 -07004097{
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004098 loff_t end = req->sync.off + req->sync.len;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004099 int ret;
4100
Pavel Begunkovac45abc2020-06-08 21:08:18 +03004101 /* fsync always requires a blocking context */
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004102 if (issue_flags & IO_URING_F_NONBLOCK)
Pavel Begunkovac45abc2020-06-08 21:08:18 +03004103 return -EAGAIN;
4104
Jens Axboe9adbd452019-12-20 08:45:55 -07004105 ret = vfs_fsync_range(req->file, req->sync.off,
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004106 end > 0 ? end : LLONG_MAX,
4107 req->sync.flags & IORING_FSYNC_DATASYNC);
4108 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01004109 req_set_fail(req);
Jens Axboee1e16092020-06-22 09:17:17 -06004110 io_req_complete(req, ret);
Christoph Hellwigc992fe22019-01-11 09:43:02 -07004111 return 0;
4112}
4113
Jens Axboed63d1b52019-12-10 10:38:56 -07004114static int io_fallocate_prep(struct io_kiocb *req,
4115 const struct io_uring_sqe *sqe)
4116{
Pavel Begunkov26578cd2021-08-20 10:36:37 +01004117 if (sqe->ioprio || sqe->buf_index || sqe->rw_flags ||
4118 sqe->splice_fd_in)
Jens Axboed63d1b52019-12-10 10:38:56 -07004119 return -EINVAL;
Pavel Begunkov3232dd02020-06-03 18:03:22 +03004120 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4121 return -EINVAL;
Jens Axboed63d1b52019-12-10 10:38:56 -07004122
4123 req->sync.off = READ_ONCE(sqe->off);
4124 req->sync.len = READ_ONCE(sqe->addr);
4125 req->sync.mode = READ_ONCE(sqe->len);
4126 return 0;
4127}
4128
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004129static int io_fallocate(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboed63d1b52019-12-10 10:38:56 -07004130{
Pavel Begunkovac45abc2020-06-08 21:08:18 +03004131 int ret;
Jens Axboed63d1b52019-12-10 10:38:56 -07004132
Pavel Begunkovac45abc2020-06-08 21:08:18 +03004133 /* fallocate always requiring blocking context */
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004134 if (issue_flags & IO_URING_F_NONBLOCK)
Pavel Begunkovac45abc2020-06-08 21:08:18 +03004135 return -EAGAIN;
Pavel Begunkovac45abc2020-06-08 21:08:18 +03004136 ret = vfs_fallocate(req->file, req->sync.mode, req->sync.off,
4137 req->sync.len);
Pavel Begunkovac45abc2020-06-08 21:08:18 +03004138 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01004139 req_set_fail(req);
Jens Axboee1e16092020-06-22 09:17:17 -06004140 io_req_complete(req, ret);
Jens Axboed63d1b52019-12-10 10:38:56 -07004141 return 0;
4142}
4143
Pavel Begunkovec65fea2020-06-03 18:03:24 +03004144static int __io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboe15b71ab2019-12-11 11:20:36 -07004145{
Jens Axboef8748882020-01-08 17:47:02 -07004146 const char __user *fname;
Jens Axboe15b71ab2019-12-11 11:20:36 -07004147 int ret;
4148
Pavel Begunkovd3fddf62021-08-09 13:04:16 +01004149 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4150 return -EINVAL;
Pavel Begunkovb9445592021-08-25 12:25:45 +01004151 if (unlikely(sqe->ioprio || sqe->buf_index))
Jens Axboe15b71ab2019-12-11 11:20:36 -07004152 return -EINVAL;
Pavel Begunkovec65fea2020-06-03 18:03:24 +03004153 if (unlikely(req->flags & REQ_F_FIXED_FILE))
Jens Axboecf3040c2020-02-06 21:31:40 -07004154 return -EBADF;
Jens Axboe15b71ab2019-12-11 11:20:36 -07004155
Pavel Begunkovec65fea2020-06-03 18:03:24 +03004156 /* open.how should be already initialised */
4157 if (!(req->open.how.flags & O_PATH) && force_o_largefile())
Jens Axboe08a1d26eb2020-04-08 09:20:54 -06004158 req->open.how.flags |= O_LARGEFILE;
Jens Axboe15b71ab2019-12-11 11:20:36 -07004159
Pavel Begunkov25e72d12020-06-03 18:03:23 +03004160 req->open.dfd = READ_ONCE(sqe->fd);
4161 fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
Jens Axboef8748882020-01-08 17:47:02 -07004162 req->open.filename = getname(fname);
Jens Axboe15b71ab2019-12-11 11:20:36 -07004163 if (IS_ERR(req->open.filename)) {
4164 ret = PTR_ERR(req->open.filename);
4165 req->open.filename = NULL;
4166 return ret;
4167 }
Pavel Begunkovb9445592021-08-25 12:25:45 +01004168
4169 req->open.file_slot = READ_ONCE(sqe->file_index);
4170 if (req->open.file_slot && (req->open.how.flags & O_CLOEXEC))
4171 return -EINVAL;
4172
Jens Axboe4022e7a2020-03-19 19:23:18 -06004173 req->open.nofile = rlimit(RLIMIT_NOFILE);
Pavel Begunkov8fef80b2020-02-07 23:59:53 +03004174 req->flags |= REQ_F_NEED_CLEANUP;
Jens Axboe15b71ab2019-12-11 11:20:36 -07004175 return 0;
4176}
4177
Pavel Begunkovec65fea2020-06-03 18:03:24 +03004178static int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4179{
Pavel Begunkovd3fddf62021-08-09 13:04:16 +01004180 u64 mode = READ_ONCE(sqe->len);
4181 u64 flags = READ_ONCE(sqe->open_flags);
Pavel Begunkovec65fea2020-06-03 18:03:24 +03004182
Pavel Begunkovec65fea2020-06-03 18:03:24 +03004183 req->open.how = build_open_how(flags, mode);
4184 return __io_openat_prep(req, sqe);
4185}
4186
Jens Axboecebdb982020-01-08 17:59:24 -07004187static int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4188{
4189 struct open_how __user *how;
Jens Axboecebdb982020-01-08 17:59:24 -07004190 size_t len;
4191 int ret;
4192
Jens Axboecebdb982020-01-08 17:59:24 -07004193 how = u64_to_user_ptr(READ_ONCE(sqe->addr2));
4194 len = READ_ONCE(sqe->len);
Jens Axboecebdb982020-01-08 17:59:24 -07004195 if (len < OPEN_HOW_SIZE_VER0)
4196 return -EINVAL;
4197
4198 ret = copy_struct_from_user(&req->open.how, sizeof(req->open.how), how,
4199 len);
4200 if (ret)
4201 return ret;
4202
Pavel Begunkovec65fea2020-06-03 18:03:24 +03004203 return __io_openat_prep(req, sqe);
Jens Axboecebdb982020-01-08 17:59:24 -07004204}
4205
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004206static int io_openat2(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe15b71ab2019-12-11 11:20:36 -07004207{
4208 struct open_flags op;
Jens Axboe15b71ab2019-12-11 11:20:36 -07004209 struct file *file;
Pavel Begunkovb9445592021-08-25 12:25:45 +01004210 bool resolve_nonblock, nonblock_set;
4211 bool fixed = !!req->open.file_slot;
Jens Axboe15b71ab2019-12-11 11:20:36 -07004212 int ret;
4213
Jens Axboecebdb982020-01-08 17:59:24 -07004214 ret = build_open_flags(&req->open.how, &op);
Jens Axboe15b71ab2019-12-11 11:20:36 -07004215 if (ret)
4216 goto err;
Jens Axboe3a81fd02020-12-10 12:25:36 -07004217 nonblock_set = op.open_flag & O_NONBLOCK;
4218 resolve_nonblock = req->open.how.resolve & RESOLVE_CACHED;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004219 if (issue_flags & IO_URING_F_NONBLOCK) {
Jens Axboe3a81fd02020-12-10 12:25:36 -07004220 /*
4221 * Don't bother trying for O_TRUNC, O_CREAT, or O_TMPFILE open,
4222 * it'll always -EAGAIN
4223 */
4224 if (req->open.how.flags & (O_TRUNC | O_CREAT | O_TMPFILE))
4225 return -EAGAIN;
4226 op.lookup_flags |= LOOKUP_CACHED;
4227 op.open_flag |= O_NONBLOCK;
4228 }
Jens Axboe15b71ab2019-12-11 11:20:36 -07004229
Pavel Begunkovb9445592021-08-25 12:25:45 +01004230 if (!fixed) {
4231 ret = __get_unused_fd_flags(req->open.how.flags, req->open.nofile);
4232 if (ret < 0)
4233 goto err;
4234 }
Jens Axboe15b71ab2019-12-11 11:20:36 -07004235
4236 file = do_filp_open(req->open.dfd, req->open.filename, &op);
Pavel Begunkov12dcb58a2021-06-24 15:10:00 +01004237 if (IS_ERR(file)) {
Jens Axboe3a81fd02020-12-10 12:25:36 -07004238 /*
Pavel Begunkov12dcb58a2021-06-24 15:10:00 +01004239 * We could hang on to this 'fd' on retrying, but seems like
4240 * marginal gain for something that is now known to be a slower
4241 * path. So just put it, and we'll get a new one when we retry.
Jens Axboe3a81fd02020-12-10 12:25:36 -07004242 */
Pavel Begunkovb9445592021-08-25 12:25:45 +01004243 if (!fixed)
4244 put_unused_fd(ret);
Pavel Begunkov12dcb58a2021-06-24 15:10:00 +01004245
4246 ret = PTR_ERR(file);
4247 /* only retry if RESOLVE_CACHED wasn't already set by application */
4248 if (ret == -EAGAIN &&
4249 (!resolve_nonblock && (issue_flags & IO_URING_F_NONBLOCK)))
4250 return -EAGAIN;
4251 goto err;
Jens Axboe3a81fd02020-12-10 12:25:36 -07004252 }
4253
Pavel Begunkov12dcb58a2021-06-24 15:10:00 +01004254 if ((issue_flags & IO_URING_F_NONBLOCK) && !nonblock_set)
4255 file->f_flags &= ~O_NONBLOCK;
4256 fsnotify_open(file);
Pavel Begunkovb9445592021-08-25 12:25:45 +01004257
4258 if (!fixed)
4259 fd_install(ret, file);
4260 else
4261 ret = io_install_fixed_file(req, file, issue_flags,
4262 req->open.file_slot - 1);
Jens Axboe15b71ab2019-12-11 11:20:36 -07004263err:
4264 putname(req->open.filename);
Pavel Begunkov8fef80b2020-02-07 23:59:53 +03004265 req->flags &= ~REQ_F_NEED_CLEANUP;
Jens Axboe15b71ab2019-12-11 11:20:36 -07004266 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01004267 req_set_fail(req);
Pavel Begunkov0bdf3392021-04-11 01:46:29 +01004268 __io_req_complete(req, issue_flags, ret, 0);
Jens Axboe15b71ab2019-12-11 11:20:36 -07004269 return 0;
4270}
4271
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004272static int io_openat(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboecebdb982020-01-08 17:59:24 -07004273{
Pavel Begunkove45cff52021-02-28 22:35:14 +00004274 return io_openat2(req, issue_flags);
Jens Axboecebdb982020-01-08 17:59:24 -07004275}
4276
Jens Axboe067524e2020-03-02 16:32:28 -07004277static int io_remove_buffers_prep(struct io_kiocb *req,
4278 const struct io_uring_sqe *sqe)
4279{
4280 struct io_provide_buf *p = &req->pbuf;
4281 u64 tmp;
4282
Pavel Begunkov26578cd2021-08-20 10:36:37 +01004283 if (sqe->ioprio || sqe->rw_flags || sqe->addr || sqe->len || sqe->off ||
4284 sqe->splice_fd_in)
Jens Axboe067524e2020-03-02 16:32:28 -07004285 return -EINVAL;
4286
4287 tmp = READ_ONCE(sqe->fd);
4288 if (!tmp || tmp > USHRT_MAX)
4289 return -EINVAL;
4290
4291 memset(p, 0, sizeof(*p));
4292 p->nbufs = tmp;
4293 p->bgid = READ_ONCE(sqe->buf_group);
4294 return 0;
4295}
4296
4297static int __io_remove_buffers(struct io_ring_ctx *ctx, struct io_buffer *buf,
4298 int bgid, unsigned nbufs)
4299{
4300 unsigned i = 0;
4301
4302 /* shouldn't happen */
4303 if (!nbufs)
4304 return 0;
4305
4306 /* the head kbuf is the list itself */
4307 while (!list_empty(&buf->list)) {
4308 struct io_buffer *nxt;
4309
4310 nxt = list_first_entry(&buf->list, struct io_buffer, list);
4311 list_del(&nxt->list);
4312 kfree(nxt);
4313 if (++i == nbufs)
4314 return i;
4315 }
4316 i++;
4317 kfree(buf);
Jens Axboe9e15c3a2021-03-13 12:29:43 -07004318 xa_erase(&ctx->io_buffers, bgid);
Jens Axboe067524e2020-03-02 16:32:28 -07004319
4320 return i;
4321}
4322
Pavel Begunkov889fca72021-02-10 00:03:09 +00004323static int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe067524e2020-03-02 16:32:28 -07004324{
4325 struct io_provide_buf *p = &req->pbuf;
4326 struct io_ring_ctx *ctx = req->ctx;
4327 struct io_buffer *head;
4328 int ret = 0;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004329 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Jens Axboe067524e2020-03-02 16:32:28 -07004330
4331 io_ring_submit_lock(ctx, !force_nonblock);
4332
4333 lockdep_assert_held(&ctx->uring_lock);
4334
4335 ret = -ENOENT;
Jens Axboe9e15c3a2021-03-13 12:29:43 -07004336 head = xa_load(&ctx->io_buffers, p->bgid);
Jens Axboe067524e2020-03-02 16:32:28 -07004337 if (head)
4338 ret = __io_remove_buffers(ctx, head, p->bgid, p->nbufs);
Jens Axboe067524e2020-03-02 16:32:28 -07004339 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01004340 req_set_fail(req);
Pavel Begunkov31bff9a2020-12-06 22:22:43 +00004341
Pavel Begunkov9fb8cb42021-02-28 22:35:13 +00004342 /* complete before unlock, IOPOLL may need the lock */
4343 __io_req_complete(req, issue_flags, ret, 0);
4344 io_ring_submit_unlock(ctx, !force_nonblock);
Jens Axboe067524e2020-03-02 16:32:28 -07004345 return 0;
4346}
4347
Jens Axboeddf0322d2020-02-23 16:41:33 -07004348static int io_provide_buffers_prep(struct io_kiocb *req,
4349 const struct io_uring_sqe *sqe)
4350{
Pavel Begunkov38134ad2021-04-15 13:07:39 +01004351 unsigned long size, tmp_check;
Jens Axboeddf0322d2020-02-23 16:41:33 -07004352 struct io_provide_buf *p = &req->pbuf;
4353 u64 tmp;
4354
Pavel Begunkov26578cd2021-08-20 10:36:37 +01004355 if (sqe->ioprio || sqe->rw_flags || sqe->splice_fd_in)
Jens Axboeddf0322d2020-02-23 16:41:33 -07004356 return -EINVAL;
4357
4358 tmp = READ_ONCE(sqe->fd);
4359 if (!tmp || tmp > USHRT_MAX)
4360 return -E2BIG;
4361 p->nbufs = tmp;
4362 p->addr = READ_ONCE(sqe->addr);
4363 p->len = READ_ONCE(sqe->len);
4364
Pavel Begunkov38134ad2021-04-15 13:07:39 +01004365 if (check_mul_overflow((unsigned long)p->len, (unsigned long)p->nbufs,
4366 &size))
4367 return -EOVERFLOW;
4368 if (check_add_overflow((unsigned long)p->addr, size, &tmp_check))
4369 return -EOVERFLOW;
4370
Pavel Begunkovd81269f2021-03-19 10:21:19 +00004371 size = (unsigned long)p->len * p->nbufs;
4372 if (!access_ok(u64_to_user_ptr(p->addr), size))
Jens Axboeddf0322d2020-02-23 16:41:33 -07004373 return -EFAULT;
4374
4375 p->bgid = READ_ONCE(sqe->buf_group);
4376 tmp = READ_ONCE(sqe->off);
4377 if (tmp > USHRT_MAX)
4378 return -E2BIG;
4379 p->bid = tmp;
4380 return 0;
4381}
4382
4383static int io_add_buffers(struct io_provide_buf *pbuf, struct io_buffer **head)
4384{
4385 struct io_buffer *buf;
4386 u64 addr = pbuf->addr;
4387 int i, bid = pbuf->bid;
4388
4389 for (i = 0; i < pbuf->nbufs; i++) {
4390 buf = kmalloc(sizeof(*buf), GFP_KERNEL);
4391 if (!buf)
4392 break;
4393
4394 buf->addr = addr;
Thadeu Lima de Souza Cascardod1f82802021-05-05 09:47:06 -03004395 buf->len = min_t(__u32, pbuf->len, MAX_RW_COUNT);
Jens Axboeddf0322d2020-02-23 16:41:33 -07004396 buf->bid = bid;
4397 addr += pbuf->len;
4398 bid++;
4399 if (!*head) {
4400 INIT_LIST_HEAD(&buf->list);
4401 *head = buf;
4402 } else {
4403 list_add_tail(&buf->list, &(*head)->list);
4404 }
4405 }
4406
4407 return i ? i : -ENOMEM;
4408}
4409
Pavel Begunkov889fca72021-02-10 00:03:09 +00004410static int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboeddf0322d2020-02-23 16:41:33 -07004411{
4412 struct io_provide_buf *p = &req->pbuf;
4413 struct io_ring_ctx *ctx = req->ctx;
4414 struct io_buffer *head, *list;
4415 int ret = 0;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004416 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Jens Axboeddf0322d2020-02-23 16:41:33 -07004417
4418 io_ring_submit_lock(ctx, !force_nonblock);
4419
4420 lockdep_assert_held(&ctx->uring_lock);
4421
Jens Axboe9e15c3a2021-03-13 12:29:43 -07004422 list = head = xa_load(&ctx->io_buffers, p->bgid);
Jens Axboeddf0322d2020-02-23 16:41:33 -07004423
4424 ret = io_add_buffers(p, &head);
Jens Axboe9e15c3a2021-03-13 12:29:43 -07004425 if (ret >= 0 && !list) {
4426 ret = xa_insert(&ctx->io_buffers, p->bgid, head, GFP_KERNEL);
4427 if (ret < 0)
Jens Axboe067524e2020-03-02 16:32:28 -07004428 __io_remove_buffers(ctx, head, p->bgid, -1U);
Jens Axboeddf0322d2020-02-23 16:41:33 -07004429 }
Jens Axboeddf0322d2020-02-23 16:41:33 -07004430 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01004431 req_set_fail(req);
Pavel Begunkov9fb8cb42021-02-28 22:35:13 +00004432 /* complete before unlock, IOPOLL may need the lock */
4433 __io_req_complete(req, issue_flags, ret, 0);
4434 io_ring_submit_unlock(ctx, !force_nonblock);
Jens Axboeddf0322d2020-02-23 16:41:33 -07004435 return 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07004436}
4437
Jens Axboe3e4827b2020-01-08 15:18:09 -07004438static int io_epoll_ctl_prep(struct io_kiocb *req,
4439 const struct io_uring_sqe *sqe)
4440{
4441#if defined(CONFIG_EPOLL)
Pavel Begunkov26578cd2021-08-20 10:36:37 +01004442 if (sqe->ioprio || sqe->buf_index || sqe->splice_fd_in)
Jens Axboe3e4827b2020-01-08 15:18:09 -07004443 return -EINVAL;
Pavel Begunkov2d74d042021-05-14 12:05:46 +01004444 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
Pavel Begunkov3232dd02020-06-03 18:03:22 +03004445 return -EINVAL;
Jens Axboe3e4827b2020-01-08 15:18:09 -07004446
4447 req->epoll.epfd = READ_ONCE(sqe->fd);
4448 req->epoll.op = READ_ONCE(sqe->len);
4449 req->epoll.fd = READ_ONCE(sqe->off);
4450
4451 if (ep_op_has_event(req->epoll.op)) {
4452 struct epoll_event __user *ev;
4453
4454 ev = u64_to_user_ptr(READ_ONCE(sqe->addr));
4455 if (copy_from_user(&req->epoll.event, ev, sizeof(*ev)))
4456 return -EFAULT;
4457 }
4458
4459 return 0;
4460#else
4461 return -EOPNOTSUPP;
4462#endif
4463}
4464
Pavel Begunkov889fca72021-02-10 00:03:09 +00004465static int io_epoll_ctl(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe3e4827b2020-01-08 15:18:09 -07004466{
4467#if defined(CONFIG_EPOLL)
4468 struct io_epoll *ie = &req->epoll;
4469 int ret;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004470 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Jens Axboe3e4827b2020-01-08 15:18:09 -07004471
4472 ret = do_epoll_ctl(ie->epfd, ie->op, ie->fd, &ie->event, force_nonblock);
4473 if (force_nonblock && ret == -EAGAIN)
4474 return -EAGAIN;
4475
4476 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01004477 req_set_fail(req);
Pavel Begunkov889fca72021-02-10 00:03:09 +00004478 __io_req_complete(req, issue_flags, ret, 0);
Jens Axboe3e4827b2020-01-08 15:18:09 -07004479 return 0;
4480#else
4481 return -EOPNOTSUPP;
4482#endif
4483}
4484
Jens Axboec1ca7572019-12-25 22:18:28 -07004485static int io_madvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4486{
4487#if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
Pavel Begunkov26578cd2021-08-20 10:36:37 +01004488 if (sqe->ioprio || sqe->buf_index || sqe->off || sqe->splice_fd_in)
Jens Axboec1ca7572019-12-25 22:18:28 -07004489 return -EINVAL;
Pavel Begunkov3232dd02020-06-03 18:03:22 +03004490 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4491 return -EINVAL;
Jens Axboec1ca7572019-12-25 22:18:28 -07004492
4493 req->madvise.addr = READ_ONCE(sqe->addr);
4494 req->madvise.len = READ_ONCE(sqe->len);
4495 req->madvise.advice = READ_ONCE(sqe->fadvise_advice);
4496 return 0;
4497#else
4498 return -EOPNOTSUPP;
4499#endif
4500}
4501
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004502static int io_madvise(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboec1ca7572019-12-25 22:18:28 -07004503{
4504#if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
4505 struct io_madvise *ma = &req->madvise;
4506 int ret;
4507
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004508 if (issue_flags & IO_URING_F_NONBLOCK)
Jens Axboec1ca7572019-12-25 22:18:28 -07004509 return -EAGAIN;
4510
Minchan Kim0726b012020-10-17 16:14:50 -07004511 ret = do_madvise(current->mm, ma->addr, ma->len, ma->advice);
Jens Axboec1ca7572019-12-25 22:18:28 -07004512 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01004513 req_set_fail(req);
Jens Axboee1e16092020-06-22 09:17:17 -06004514 io_req_complete(req, ret);
Jens Axboec1ca7572019-12-25 22:18:28 -07004515 return 0;
4516#else
4517 return -EOPNOTSUPP;
4518#endif
4519}
4520
Jens Axboe4840e412019-12-25 22:03:45 -07004521static int io_fadvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4522{
Pavel Begunkov26578cd2021-08-20 10:36:37 +01004523 if (sqe->ioprio || sqe->buf_index || sqe->addr || sqe->splice_fd_in)
Jens Axboe4840e412019-12-25 22:03:45 -07004524 return -EINVAL;
Pavel Begunkov3232dd02020-06-03 18:03:22 +03004525 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4526 return -EINVAL;
Jens Axboe4840e412019-12-25 22:03:45 -07004527
4528 req->fadvise.offset = READ_ONCE(sqe->off);
4529 req->fadvise.len = READ_ONCE(sqe->len);
4530 req->fadvise.advice = READ_ONCE(sqe->fadvise_advice);
4531 return 0;
4532}
4533
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004534static int io_fadvise(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe4840e412019-12-25 22:03:45 -07004535{
4536 struct io_fadvise *fa = &req->fadvise;
4537 int ret;
4538
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004539 if (issue_flags & IO_URING_F_NONBLOCK) {
Jens Axboe3e694262020-02-01 09:22:49 -07004540 switch (fa->advice) {
4541 case POSIX_FADV_NORMAL:
4542 case POSIX_FADV_RANDOM:
4543 case POSIX_FADV_SEQUENTIAL:
4544 break;
4545 default:
4546 return -EAGAIN;
4547 }
4548 }
Jens Axboe4840e412019-12-25 22:03:45 -07004549
4550 ret = vfs_fadvise(req->file, fa->offset, fa->len, fa->advice);
4551 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01004552 req_set_fail(req);
Pavel Begunkov0bdf3392021-04-11 01:46:29 +01004553 __io_req_complete(req, issue_flags, ret, 0);
Jens Axboe4840e412019-12-25 22:03:45 -07004554 return 0;
4555}
4556
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004557static int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4558{
Pavel Begunkov2d74d042021-05-14 12:05:46 +01004559 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
Pavel Begunkov3232dd02020-06-03 18:03:22 +03004560 return -EINVAL;
Pavel Begunkov26578cd2021-08-20 10:36:37 +01004561 if (sqe->ioprio || sqe->buf_index || sqe->splice_fd_in)
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004562 return -EINVAL;
Pavel Begunkov9c280f92020-04-08 08:58:46 +03004563 if (req->flags & REQ_F_FIXED_FILE)
Jens Axboecf3040c2020-02-06 21:31:40 -07004564 return -EBADF;
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004565
Bijan Mottahedeh1d9e1282020-05-22 21:31:16 -07004566 req->statx.dfd = READ_ONCE(sqe->fd);
4567 req->statx.mask = READ_ONCE(sqe->len);
Bijan Mottahedehe62753e2020-05-22 21:31:18 -07004568 req->statx.filename = u64_to_user_ptr(READ_ONCE(sqe->addr));
Bijan Mottahedeh1d9e1282020-05-22 21:31:16 -07004569 req->statx.buffer = u64_to_user_ptr(READ_ONCE(sqe->addr2));
4570 req->statx.flags = READ_ONCE(sqe->statx_flags);
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004571
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004572 return 0;
4573}
4574
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004575static int io_statx(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004576{
Bijan Mottahedeh1d9e1282020-05-22 21:31:16 -07004577 struct io_statx *ctx = &req->statx;
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004578 int ret;
4579
Pavel Begunkov59d70012021-03-22 01:58:30 +00004580 if (issue_flags & IO_URING_F_NONBLOCK)
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004581 return -EAGAIN;
4582
Bijan Mottahedehe62753e2020-05-22 21:31:18 -07004583 ret = do_statx(ctx->dfd, ctx->filename, ctx->flags, ctx->mask,
4584 ctx->buffer);
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004585
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004586 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01004587 req_set_fail(req);
Jens Axboee1e16092020-06-22 09:17:17 -06004588 io_req_complete(req, ret);
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004589 return 0;
4590}
4591
Jens Axboeb5dba592019-12-11 14:02:38 -07004592static int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4593{
Jens Axboe14587a462020-09-05 11:36:08 -06004594 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
Pavel Begunkov3232dd02020-06-03 18:03:22 +03004595 return -EINVAL;
Jens Axboeb5dba592019-12-11 14:02:38 -07004596 if (sqe->ioprio || sqe->off || sqe->addr || sqe->len ||
Pavel Begunkov26578cd2021-08-20 10:36:37 +01004597 sqe->rw_flags || sqe->buf_index || sqe->splice_fd_in)
Jens Axboeb5dba592019-12-11 14:02:38 -07004598 return -EINVAL;
Pavel Begunkov9c280f92020-04-08 08:58:46 +03004599 if (req->flags & REQ_F_FIXED_FILE)
Jens Axboecf3040c2020-02-06 21:31:40 -07004600 return -EBADF;
Jens Axboeb5dba592019-12-11 14:02:38 -07004601
4602 req->close.fd = READ_ONCE(sqe->fd);
Jens Axboeb5dba592019-12-11 14:02:38 -07004603 return 0;
4604}
4605
Pavel Begunkov889fca72021-02-10 00:03:09 +00004606static int io_close(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboeb5dba592019-12-11 14:02:38 -07004607{
Jens Axboe9eac1902021-01-19 15:50:37 -07004608 struct files_struct *files = current->files;
Pavel Begunkov3af73b22020-06-08 21:08:17 +03004609 struct io_close *close = &req->close;
Jens Axboe9eac1902021-01-19 15:50:37 -07004610 struct fdtable *fdt;
Pavel Begunkova1fde922021-04-11 01:46:28 +01004611 struct file *file = NULL;
4612 int ret = -EBADF;
Jens Axboeb5dba592019-12-11 14:02:38 -07004613
Jens Axboe9eac1902021-01-19 15:50:37 -07004614 spin_lock(&files->file_lock);
4615 fdt = files_fdtable(files);
4616 if (close->fd >= fdt->max_fds) {
4617 spin_unlock(&files->file_lock);
4618 goto err;
4619 }
4620 file = fdt->fd[close->fd];
Pavel Begunkova1fde922021-04-11 01:46:28 +01004621 if (!file || file->f_op == &io_uring_fops) {
Jens Axboe9eac1902021-01-19 15:50:37 -07004622 spin_unlock(&files->file_lock);
4623 file = NULL;
4624 goto err;
Pavel Begunkov3af73b22020-06-08 21:08:17 +03004625 }
Jens Axboeb5dba592019-12-11 14:02:38 -07004626
4627 /* if the file has a flush method, be safe and punt to async */
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004628 if (file->f_op->flush && (issue_flags & IO_URING_F_NONBLOCK)) {
Jens Axboe9eac1902021-01-19 15:50:37 -07004629 spin_unlock(&files->file_lock);
Pavel Begunkov0bf0eef2020-05-26 20:34:06 +03004630 return -EAGAIN;
Pavel Begunkova2100672020-03-02 23:45:16 +03004631 }
Jens Axboeb5dba592019-12-11 14:02:38 -07004632
Jens Axboe9eac1902021-01-19 15:50:37 -07004633 ret = __close_fd_get_file(close->fd, &file);
4634 spin_unlock(&files->file_lock);
4635 if (ret < 0) {
4636 if (ret == -ENOENT)
4637 ret = -EBADF;
4638 goto err;
4639 }
4640
Pavel Begunkov3af73b22020-06-08 21:08:17 +03004641 /* No ->flush() or already async, safely close from here */
Jens Axboe9eac1902021-01-19 15:50:37 -07004642 ret = filp_close(file, current->files);
4643err:
Pavel Begunkov3af73b22020-06-08 21:08:17 +03004644 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01004645 req_set_fail(req);
Jens Axboe9eac1902021-01-19 15:50:37 -07004646 if (file)
4647 fput(file);
Pavel Begunkov889fca72021-02-10 00:03:09 +00004648 __io_req_complete(req, issue_flags, ret, 0);
Jens Axboe1a417f42020-01-31 17:16:48 -07004649 return 0;
Jens Axboeb5dba592019-12-11 14:02:38 -07004650}
4651
Pavel Begunkov1155c762021-02-18 18:29:38 +00004652static int io_sfr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboe5d17b4a2019-04-09 14:56:44 -06004653{
4654 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe5d17b4a2019-04-09 14:56:44 -06004655
Jens Axboe5d17b4a2019-04-09 14:56:44 -06004656 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
4657 return -EINVAL;
Pavel Begunkov26578cd2021-08-20 10:36:37 +01004658 if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index ||
4659 sqe->splice_fd_in))
Jens Axboe5d17b4a2019-04-09 14:56:44 -06004660 return -EINVAL;
4661
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004662 req->sync.off = READ_ONCE(sqe->off);
4663 req->sync.len = READ_ONCE(sqe->len);
4664 req->sync.flags = READ_ONCE(sqe->sync_range_flags);
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004665 return 0;
4666}
4667
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004668static int io_sync_file_range(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004669{
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004670 int ret;
4671
Pavel Begunkovac45abc2020-06-08 21:08:18 +03004672 /* sync_file_range always requires a blocking context */
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004673 if (issue_flags & IO_URING_F_NONBLOCK)
Pavel Begunkovac45abc2020-06-08 21:08:18 +03004674 return -EAGAIN;
4675
Jens Axboe9adbd452019-12-20 08:45:55 -07004676 ret = sync_file_range(req->file, req->sync.off, req->sync.len,
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004677 req->sync.flags);
4678 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01004679 req_set_fail(req);
Jens Axboee1e16092020-06-22 09:17:17 -06004680 io_req_complete(req, ret);
Jens Axboe5d17b4a2019-04-09 14:56:44 -06004681 return 0;
4682}
4683
YueHaibing469956e2020-03-04 15:53:52 +08004684#if defined(CONFIG_NET)
Pavel Begunkov02d27d82020-02-28 10:36:36 +03004685static int io_setup_async_msg(struct io_kiocb *req,
4686 struct io_async_msghdr *kmsg)
4687{
Jens Axboee8c2bc12020-08-15 18:44:09 -07004688 struct io_async_msghdr *async_msg = req->async_data;
4689
4690 if (async_msg)
Pavel Begunkov02d27d82020-02-28 10:36:36 +03004691 return -EAGAIN;
Jens Axboee8c2bc12020-08-15 18:44:09 -07004692 if (io_alloc_async_data(req)) {
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004693 kfree(kmsg->free_iov);
Pavel Begunkov02d27d82020-02-28 10:36:36 +03004694 return -ENOMEM;
4695 }
Jens Axboee8c2bc12020-08-15 18:44:09 -07004696 async_msg = req->async_data;
Pavel Begunkov02d27d82020-02-28 10:36:36 +03004697 req->flags |= REQ_F_NEED_CLEANUP;
Jens Axboee8c2bc12020-08-15 18:44:09 -07004698 memcpy(async_msg, kmsg, sizeof(*kmsg));
Pavel Begunkov2a780802021-02-05 00:57:58 +00004699 async_msg->msg.msg_name = &async_msg->addr;
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004700 /* if were using fast_iov, set it to the new one */
4701 if (!async_msg->free_iov)
4702 async_msg->msg.msg_iter.iov = async_msg->fast_iov;
4703
Pavel Begunkov02d27d82020-02-28 10:36:36 +03004704 return -EAGAIN;
4705}
4706
Pavel Begunkov2ae523e2020-07-12 20:41:06 +03004707static int io_sendmsg_copy_hdr(struct io_kiocb *req,
4708 struct io_async_msghdr *iomsg)
4709{
Pavel Begunkov2ae523e2020-07-12 20:41:06 +03004710 iomsg->msg.msg_name = &iomsg->addr;
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004711 iomsg->free_iov = iomsg->fast_iov;
Pavel Begunkov2ae523e2020-07-12 20:41:06 +03004712 return sendmsg_copy_msghdr(&iomsg->msg, req->sr_msg.umsg,
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004713 req->sr_msg.msg_flags, &iomsg->free_iov);
Pavel Begunkov2ae523e2020-07-12 20:41:06 +03004714}
4715
Pavel Begunkov93642ef2021-02-18 18:29:44 +00004716static int io_sendmsg_prep_async(struct io_kiocb *req)
4717{
4718 int ret;
4719
Pavel Begunkov93642ef2021-02-18 18:29:44 +00004720 ret = io_sendmsg_copy_hdr(req, req->async_data);
4721 if (!ret)
4722 req->flags |= REQ_F_NEED_CLEANUP;
4723 return ret;
4724}
4725
Jens Axboe3529d8c2019-12-19 18:24:38 -07004726static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboeaa1fa282019-04-19 13:38:09 -06004727{
Jens Axboee47293f2019-12-20 08:58:21 -07004728 struct io_sr_msg *sr = &req->sr_msg;
Jens Axboe03b12302019-12-02 18:50:25 -07004729
Pavel Begunkovd2b6f482020-06-03 18:03:25 +03004730 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4731 return -EINVAL;
4732
Pavel Begunkov270a5942020-07-12 20:41:04 +03004733 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
Jens Axboefddafac2020-01-04 20:19:44 -07004734 sr->len = READ_ONCE(sqe->len);
Pavel Begunkov04411802021-04-01 15:44:00 +01004735 sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
4736 if (sr->msg_flags & MSG_DONTWAIT)
4737 req->flags |= REQ_F_NOWAIT;
Jens Axboe3529d8c2019-12-19 18:24:38 -07004738
Jens Axboed8768362020-02-27 14:17:49 -07004739#ifdef CONFIG_COMPAT
4740 if (req->ctx->compat)
4741 sr->msg_flags |= MSG_CMSG_COMPAT;
4742#endif
Pavel Begunkov93642ef2021-02-18 18:29:44 +00004743 return 0;
Jens Axboe03b12302019-12-02 18:50:25 -07004744}
4745
Pavel Begunkov889fca72021-02-10 00:03:09 +00004746static int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe03b12302019-12-02 18:50:25 -07004747{
Pavel Begunkov6b754c82020-07-16 23:28:00 +03004748 struct io_async_msghdr iomsg, *kmsg;
Jens Axboe03b12302019-12-02 18:50:25 -07004749 struct socket *sock;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004750 unsigned flags;
Stefan Metzmacher00312752021-03-20 20:33:36 +01004751 int min_ret = 0;
Jens Axboe03b12302019-12-02 18:50:25 -07004752 int ret;
4753
Florent Revestdba4a922020-12-04 12:36:04 +01004754 sock = sock_from_file(req->file);
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004755 if (unlikely(!sock))
Florent Revestdba4a922020-12-04 12:36:04 +01004756 return -ENOTSOCK;
Jens Axboe03b12302019-12-02 18:50:25 -07004757
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004758 kmsg = req->async_data;
4759 if (!kmsg) {
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004760 ret = io_sendmsg_copy_hdr(req, &iomsg);
Jens Axboefddafac2020-01-04 20:19:44 -07004761 if (ret)
4762 return ret;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004763 kmsg = &iomsg;
Jens Axboefddafac2020-01-04 20:19:44 -07004764 }
4765
Pavel Begunkov04411802021-04-01 15:44:00 +01004766 flags = req->sr_msg.msg_flags;
4767 if (issue_flags & IO_URING_F_NONBLOCK)
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004768 flags |= MSG_DONTWAIT;
Stefan Metzmacher00312752021-03-20 20:33:36 +01004769 if (flags & MSG_WAITALL)
4770 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
4771
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004772 ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004773 if ((issue_flags & IO_URING_F_NONBLOCK) && ret == -EAGAIN)
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004774 return io_setup_async_msg(req, kmsg);
4775 if (ret == -ERESTARTSYS)
4776 ret = -EINTR;
4777
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004778 /* fast path, check for non-NULL to avoid function call */
4779 if (kmsg->free_iov)
4780 kfree(kmsg->free_iov);
Jens Axboe03b12302019-12-02 18:50:25 -07004781 req->flags &= ~REQ_F_NEED_CLEANUP;
Stefan Metzmacher00312752021-03-20 20:33:36 +01004782 if (ret < min_ret)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01004783 req_set_fail(req);
Pavel Begunkov889fca72021-02-10 00:03:09 +00004784 __io_req_complete(req, issue_flags, ret, 0);
Jens Axboefddafac2020-01-04 20:19:44 -07004785 return 0;
Jens Axboefddafac2020-01-04 20:19:44 -07004786}
4787
Pavel Begunkov889fca72021-02-10 00:03:09 +00004788static int io_send(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe03b12302019-12-02 18:50:25 -07004789{
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004790 struct io_sr_msg *sr = &req->sr_msg;
4791 struct msghdr msg;
4792 struct iovec iov;
Jens Axboe03b12302019-12-02 18:50:25 -07004793 struct socket *sock;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004794 unsigned flags;
Stefan Metzmacher00312752021-03-20 20:33:36 +01004795 int min_ret = 0;
Jens Axboe03b12302019-12-02 18:50:25 -07004796 int ret;
4797
Florent Revestdba4a922020-12-04 12:36:04 +01004798 sock = sock_from_file(req->file);
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004799 if (unlikely(!sock))
Florent Revestdba4a922020-12-04 12:36:04 +01004800 return -ENOTSOCK;
Jens Axboe03b12302019-12-02 18:50:25 -07004801
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004802 ret = import_single_range(WRITE, sr->buf, sr->len, &iov, &msg.msg_iter);
4803 if (unlikely(ret))
Zheng Bin14db8412020-09-09 20:12:37 +08004804 return ret;
Jens Axboe03b12302019-12-02 18:50:25 -07004805
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004806 msg.msg_name = NULL;
4807 msg.msg_control = NULL;
4808 msg.msg_controllen = 0;
4809 msg.msg_namelen = 0;
Jens Axboe03b12302019-12-02 18:50:25 -07004810
Pavel Begunkov04411802021-04-01 15:44:00 +01004811 flags = req->sr_msg.msg_flags;
4812 if (issue_flags & IO_URING_F_NONBLOCK)
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004813 flags |= MSG_DONTWAIT;
Stefan Metzmacher00312752021-03-20 20:33:36 +01004814 if (flags & MSG_WAITALL)
4815 min_ret = iov_iter_count(&msg.msg_iter);
4816
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004817 msg.msg_flags = flags;
4818 ret = sock_sendmsg(sock, &msg);
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004819 if ((issue_flags & IO_URING_F_NONBLOCK) && ret == -EAGAIN)
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004820 return -EAGAIN;
4821 if (ret == -ERESTARTSYS)
4822 ret = -EINTR;
Jens Axboe03b12302019-12-02 18:50:25 -07004823
Stefan Metzmacher00312752021-03-20 20:33:36 +01004824 if (ret < min_ret)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01004825 req_set_fail(req);
Pavel Begunkov889fca72021-02-10 00:03:09 +00004826 __io_req_complete(req, issue_flags, ret, 0);
Jens Axboe03b12302019-12-02 18:50:25 -07004827 return 0;
Jens Axboe03b12302019-12-02 18:50:25 -07004828}
4829
Pavel Begunkov1400e692020-07-12 20:41:05 +03004830static int __io_recvmsg_copy_hdr(struct io_kiocb *req,
4831 struct io_async_msghdr *iomsg)
Jens Axboe52de1fe2020-02-27 10:15:42 -07004832{
4833 struct io_sr_msg *sr = &req->sr_msg;
4834 struct iovec __user *uiov;
4835 size_t iov_len;
4836 int ret;
4837
Pavel Begunkov1400e692020-07-12 20:41:05 +03004838 ret = __copy_msghdr_from_user(&iomsg->msg, sr->umsg,
4839 &iomsg->uaddr, &uiov, &iov_len);
Jens Axboe52de1fe2020-02-27 10:15:42 -07004840 if (ret)
4841 return ret;
4842
4843 if (req->flags & REQ_F_BUFFER_SELECT) {
4844 if (iov_len > 1)
4845 return -EINVAL;
Pavel Begunkov5476dfe2021-02-05 00:57:59 +00004846 if (copy_from_user(iomsg->fast_iov, uiov, sizeof(*uiov)))
Jens Axboe52de1fe2020-02-27 10:15:42 -07004847 return -EFAULT;
Pavel Begunkov5476dfe2021-02-05 00:57:59 +00004848 sr->len = iomsg->fast_iov[0].iov_len;
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004849 iomsg->free_iov = NULL;
Jens Axboe52de1fe2020-02-27 10:15:42 -07004850 } else {
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004851 iomsg->free_iov = iomsg->fast_iov;
Christoph Hellwig89cd35c2020-09-25 06:51:41 +02004852 ret = __import_iovec(READ, uiov, iov_len, UIO_FASTIOV,
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004853 &iomsg->free_iov, &iomsg->msg.msg_iter,
Christoph Hellwig89cd35c2020-09-25 06:51:41 +02004854 false);
Jens Axboe52de1fe2020-02-27 10:15:42 -07004855 if (ret > 0)
4856 ret = 0;
4857 }
4858
4859 return ret;
4860}
4861
4862#ifdef CONFIG_COMPAT
4863static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req,
Pavel Begunkov1400e692020-07-12 20:41:05 +03004864 struct io_async_msghdr *iomsg)
Jens Axboe52de1fe2020-02-27 10:15:42 -07004865{
Jens Axboe52de1fe2020-02-27 10:15:42 -07004866 struct io_sr_msg *sr = &req->sr_msg;
4867 struct compat_iovec __user *uiov;
4868 compat_uptr_t ptr;
4869 compat_size_t len;
4870 int ret;
4871
Pavel Begunkov4af34172021-04-11 01:46:30 +01004872 ret = __get_compat_msghdr(&iomsg->msg, sr->umsg_compat, &iomsg->uaddr,
4873 &ptr, &len);
Jens Axboe52de1fe2020-02-27 10:15:42 -07004874 if (ret)
4875 return ret;
4876
4877 uiov = compat_ptr(ptr);
4878 if (req->flags & REQ_F_BUFFER_SELECT) {
4879 compat_ssize_t clen;
4880
4881 if (len > 1)
4882 return -EINVAL;
4883 if (!access_ok(uiov, sizeof(*uiov)))
4884 return -EFAULT;
4885 if (__get_user(clen, &uiov->iov_len))
4886 return -EFAULT;
4887 if (clen < 0)
4888 return -EINVAL;
Pavel Begunkov2d280bc2020-11-29 18:33:32 +00004889 sr->len = clen;
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004890 iomsg->free_iov = NULL;
Jens Axboe52de1fe2020-02-27 10:15:42 -07004891 } else {
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004892 iomsg->free_iov = iomsg->fast_iov;
Christoph Hellwig89cd35c2020-09-25 06:51:41 +02004893 ret = __import_iovec(READ, (struct iovec __user *)uiov, len,
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004894 UIO_FASTIOV, &iomsg->free_iov,
Christoph Hellwig89cd35c2020-09-25 06:51:41 +02004895 &iomsg->msg.msg_iter, true);
Jens Axboe52de1fe2020-02-27 10:15:42 -07004896 if (ret < 0)
4897 return ret;
4898 }
4899
4900 return 0;
4901}
Jens Axboe03b12302019-12-02 18:50:25 -07004902#endif
Jens Axboe52de1fe2020-02-27 10:15:42 -07004903
Pavel Begunkov1400e692020-07-12 20:41:05 +03004904static int io_recvmsg_copy_hdr(struct io_kiocb *req,
4905 struct io_async_msghdr *iomsg)
Jens Axboe52de1fe2020-02-27 10:15:42 -07004906{
Pavel Begunkov1400e692020-07-12 20:41:05 +03004907 iomsg->msg.msg_name = &iomsg->addr;
Jens Axboe52de1fe2020-02-27 10:15:42 -07004908
4909#ifdef CONFIG_COMPAT
4910 if (req->ctx->compat)
Pavel Begunkov1400e692020-07-12 20:41:05 +03004911 return __io_compat_recvmsg_copy_hdr(req, iomsg);
Jens Axboe52de1fe2020-02-27 10:15:42 -07004912#endif
4913
Pavel Begunkov1400e692020-07-12 20:41:05 +03004914 return __io_recvmsg_copy_hdr(req, iomsg);
Jens Axboe52de1fe2020-02-27 10:15:42 -07004915}
4916
Jens Axboebcda7ba2020-02-23 16:42:51 -07004917static struct io_buffer *io_recv_buffer_select(struct io_kiocb *req,
Pavel Begunkov7fbb1b52020-07-16 23:28:05 +03004918 bool needs_lock)
Jens Axboebcda7ba2020-02-23 16:42:51 -07004919{
4920 struct io_sr_msg *sr = &req->sr_msg;
4921 struct io_buffer *kbuf;
4922
Jens Axboebcda7ba2020-02-23 16:42:51 -07004923 kbuf = io_buffer_select(req, &sr->len, sr->bgid, sr->kbuf, needs_lock);
4924 if (IS_ERR(kbuf))
4925 return kbuf;
4926
4927 sr->kbuf = kbuf;
4928 req->flags |= REQ_F_BUFFER_SELECTED;
Jens Axboebcda7ba2020-02-23 16:42:51 -07004929 return kbuf;
Jens Axboe03b12302019-12-02 18:50:25 -07004930}
4931
Pavel Begunkov7fbb1b52020-07-16 23:28:05 +03004932static inline unsigned int io_put_recv_kbuf(struct io_kiocb *req)
4933{
4934 return io_put_kbuf(req, req->sr_msg.kbuf);
4935}
4936
Pavel Begunkov93642ef2021-02-18 18:29:44 +00004937static int io_recvmsg_prep_async(struct io_kiocb *req)
Jens Axboe03b12302019-12-02 18:50:25 -07004938{
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03004939 int ret;
Jens Axboe06b76d42019-12-19 14:44:26 -07004940
Pavel Begunkov93642ef2021-02-18 18:29:44 +00004941 ret = io_recvmsg_copy_hdr(req, req->async_data);
4942 if (!ret)
4943 req->flags |= REQ_F_NEED_CLEANUP;
4944 return ret;
4945}
4946
4947static int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4948{
4949 struct io_sr_msg *sr = &req->sr_msg;
4950
Pavel Begunkovd2b6f482020-06-03 18:03:25 +03004951 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4952 return -EINVAL;
4953
Pavel Begunkov270a5942020-07-12 20:41:04 +03004954 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
Jens Axboe0b7b21e2020-01-31 08:34:59 -07004955 sr->len = READ_ONCE(sqe->len);
Jens Axboebcda7ba2020-02-23 16:42:51 -07004956 sr->bgid = READ_ONCE(sqe->buf_group);
Pavel Begunkov04411802021-04-01 15:44:00 +01004957 sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
4958 if (sr->msg_flags & MSG_DONTWAIT)
4959 req->flags |= REQ_F_NOWAIT;
Jens Axboe3529d8c2019-12-19 18:24:38 -07004960
Jens Axboed8768362020-02-27 14:17:49 -07004961#ifdef CONFIG_COMPAT
4962 if (req->ctx->compat)
4963 sr->msg_flags |= MSG_CMSG_COMPAT;
4964#endif
Pavel Begunkov93642ef2021-02-18 18:29:44 +00004965 return 0;
Jens Axboe03b12302019-12-02 18:50:25 -07004966}
4967
Pavel Begunkov889fca72021-02-10 00:03:09 +00004968static int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe03b12302019-12-02 18:50:25 -07004969{
Pavel Begunkov6b754c82020-07-16 23:28:00 +03004970 struct io_async_msghdr iomsg, *kmsg;
Jens Axboe0fa03c62019-04-19 13:34:07 -06004971 struct socket *sock;
Pavel Begunkov7fbb1b52020-07-16 23:28:05 +03004972 struct io_buffer *kbuf;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004973 unsigned flags;
Stefan Metzmacher00312752021-03-20 20:33:36 +01004974 int min_ret = 0;
Jens Axboe52de1fe2020-02-27 10:15:42 -07004975 int ret, cflags = 0;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004976 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Jens Axboe0fa03c62019-04-19 13:34:07 -06004977
Florent Revestdba4a922020-12-04 12:36:04 +01004978 sock = sock_from_file(req->file);
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004979 if (unlikely(!sock))
Florent Revestdba4a922020-12-04 12:36:04 +01004980 return -ENOTSOCK;
Jens Axboe0fa03c62019-04-19 13:34:07 -06004981
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004982 kmsg = req->async_data;
4983 if (!kmsg) {
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004984 ret = io_recvmsg_copy_hdr(req, &iomsg);
4985 if (ret)
Pavel Begunkov681fda82020-07-15 22:20:45 +03004986 return ret;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004987 kmsg = &iomsg;
Jens Axboe0fa03c62019-04-19 13:34:07 -06004988 }
4989
Pavel Begunkovbc02ef32020-07-16 23:28:03 +03004990 if (req->flags & REQ_F_BUFFER_SELECT) {
Pavel Begunkov7fbb1b52020-07-16 23:28:05 +03004991 kbuf = io_recv_buffer_select(req, !force_nonblock);
Pavel Begunkovbc02ef32020-07-16 23:28:03 +03004992 if (IS_ERR(kbuf))
4993 return PTR_ERR(kbuf);
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004994 kmsg->fast_iov[0].iov_base = u64_to_user_ptr(kbuf->addr);
Pavel Begunkov5476dfe2021-02-05 00:57:59 +00004995 kmsg->fast_iov[0].iov_len = req->sr_msg.len;
4996 iov_iter_init(&kmsg->msg.msg_iter, READ, kmsg->fast_iov,
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004997 1, req->sr_msg.len);
4998 }
4999
Pavel Begunkov04411802021-04-01 15:44:00 +01005000 flags = req->sr_msg.msg_flags;
5001 if (force_nonblock)
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03005002 flags |= MSG_DONTWAIT;
Stefan Metzmacher00312752021-03-20 20:33:36 +01005003 if (flags & MSG_WAITALL)
5004 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
5005
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03005006 ret = __sys_recvmsg_sock(sock, &kmsg->msg, req->sr_msg.umsg,
5007 kmsg->uaddr, flags);
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03005008 if (force_nonblock && ret == -EAGAIN)
5009 return io_setup_async_msg(req, kmsg);
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03005010 if (ret == -ERESTARTSYS)
5011 ret = -EINTR;
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03005012
Pavel Begunkov7fbb1b52020-07-16 23:28:05 +03005013 if (req->flags & REQ_F_BUFFER_SELECTED)
5014 cflags = io_put_recv_kbuf(req);
Pavel Begunkov257e84a2021-02-05 00:58:00 +00005015 /* fast path, check for non-NULL to avoid function call */
5016 if (kmsg->free_iov)
5017 kfree(kmsg->free_iov);
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03005018 req->flags &= ~REQ_F_NEED_CLEANUP;
Stefan Metzmacher00312752021-03-20 20:33:36 +01005019 if (ret < min_ret || ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))))
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01005020 req_set_fail(req);
Pavel Begunkov889fca72021-02-10 00:03:09 +00005021 __io_req_complete(req, issue_flags, ret, cflags);
Jens Axboe0fa03c62019-04-19 13:34:07 -06005022 return 0;
Jens Axboe0fa03c62019-04-19 13:34:07 -06005023}
5024
Pavel Begunkov889fca72021-02-10 00:03:09 +00005025static int io_recv(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboefddafac2020-01-04 20:19:44 -07005026{
Pavel Begunkov6b754c82020-07-16 23:28:00 +03005027 struct io_buffer *kbuf;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03005028 struct io_sr_msg *sr = &req->sr_msg;
5029 struct msghdr msg;
5030 void __user *buf = sr->buf;
Jens Axboefddafac2020-01-04 20:19:44 -07005031 struct socket *sock;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03005032 struct iovec iov;
5033 unsigned flags;
Stefan Metzmacher00312752021-03-20 20:33:36 +01005034 int min_ret = 0;
Jens Axboebcda7ba2020-02-23 16:42:51 -07005035 int ret, cflags = 0;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00005036 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Jens Axboefddafac2020-01-04 20:19:44 -07005037
Florent Revestdba4a922020-12-04 12:36:04 +01005038 sock = sock_from_file(req->file);
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03005039 if (unlikely(!sock))
Florent Revestdba4a922020-12-04 12:36:04 +01005040 return -ENOTSOCK;
Jens Axboefddafac2020-01-04 20:19:44 -07005041
Pavel Begunkovbc02ef32020-07-16 23:28:03 +03005042 if (req->flags & REQ_F_BUFFER_SELECT) {
Pavel Begunkov7fbb1b52020-07-16 23:28:05 +03005043 kbuf = io_recv_buffer_select(req, !force_nonblock);
Jens Axboebcda7ba2020-02-23 16:42:51 -07005044 if (IS_ERR(kbuf))
5045 return PTR_ERR(kbuf);
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03005046 buf = u64_to_user_ptr(kbuf->addr);
Jens Axboefddafac2020-01-04 20:19:44 -07005047 }
5048
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03005049 ret = import_single_range(READ, buf, sr->len, &iov, &msg.msg_iter);
Pavel Begunkov14c32ee2020-07-16 23:28:01 +03005050 if (unlikely(ret))
5051 goto out_free;
Jens Axboefddafac2020-01-04 20:19:44 -07005052
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03005053 msg.msg_name = NULL;
5054 msg.msg_control = NULL;
5055 msg.msg_controllen = 0;
5056 msg.msg_namelen = 0;
5057 msg.msg_iocb = NULL;
5058 msg.msg_flags = 0;
5059
Pavel Begunkov04411802021-04-01 15:44:00 +01005060 flags = req->sr_msg.msg_flags;
5061 if (force_nonblock)
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03005062 flags |= MSG_DONTWAIT;
Stefan Metzmacher00312752021-03-20 20:33:36 +01005063 if (flags & MSG_WAITALL)
5064 min_ret = iov_iter_count(&msg.msg_iter);
5065
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03005066 ret = sock_recvmsg(sock, &msg, flags);
5067 if (force_nonblock && ret == -EAGAIN)
5068 return -EAGAIN;
5069 if (ret == -ERESTARTSYS)
5070 ret = -EINTR;
Pavel Begunkov14c32ee2020-07-16 23:28:01 +03005071out_free:
Pavel Begunkov7fbb1b52020-07-16 23:28:05 +03005072 if (req->flags & REQ_F_BUFFER_SELECTED)
5073 cflags = io_put_recv_kbuf(req);
Stefan Metzmacher00312752021-03-20 20:33:36 +01005074 if (ret < min_ret || ((flags & MSG_WAITALL) && (msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))))
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01005075 req_set_fail(req);
Pavel Begunkov889fca72021-02-10 00:03:09 +00005076 __io_req_complete(req, issue_flags, ret, cflags);
Jens Axboefddafac2020-01-04 20:19:44 -07005077 return 0;
Jens Axboefddafac2020-01-04 20:19:44 -07005078}
5079
Jens Axboe3529d8c2019-12-19 18:24:38 -07005080static int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboe17f2fe32019-10-17 14:42:58 -06005081{
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07005082 struct io_accept *accept = &req->accept;
5083
Jens Axboe14587a462020-09-05 11:36:08 -06005084 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboe17f2fe32019-10-17 14:42:58 -06005085 return -EINVAL;
Pavel Begunkovaaa4db12021-08-25 12:25:47 +01005086 if (sqe->ioprio || sqe->len || sqe->buf_index)
Jens Axboe17f2fe32019-10-17 14:42:58 -06005087 return -EINVAL;
5088
Jens Axboed55e5f52019-12-11 16:12:15 -07005089 accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
5090 accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2));
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07005091 accept->flags = READ_ONCE(sqe->accept_flags);
Jens Axboe09952e32020-03-19 20:16:56 -06005092 accept->nofile = rlimit(RLIMIT_NOFILE);
Pavel Begunkova7083ad2021-08-25 12:25:46 +01005093
Pavel Begunkovaaa4db12021-08-25 12:25:47 +01005094 accept->file_slot = READ_ONCE(sqe->file_index);
5095 if (accept->file_slot && ((req->open.how.flags & O_CLOEXEC) ||
5096 (accept->flags & SOCK_CLOEXEC)))
5097 return -EINVAL;
Pavel Begunkova7083ad2021-08-25 12:25:46 +01005098 if (accept->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
5099 return -EINVAL;
5100 if (SOCK_NONBLOCK != O_NONBLOCK && (accept->flags & SOCK_NONBLOCK))
5101 accept->flags = (accept->flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07005102 return 0;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07005103}
Jens Axboe17f2fe32019-10-17 14:42:58 -06005104
Pavel Begunkov889fca72021-02-10 00:03:09 +00005105static int io_accept(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07005106{
5107 struct io_accept *accept = &req->accept;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00005108 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Pavel Begunkovac45abc2020-06-08 21:08:18 +03005109 unsigned int file_flags = force_nonblock ? O_NONBLOCK : 0;
Pavel Begunkovaaa4db12021-08-25 12:25:47 +01005110 bool fixed = !!accept->file_slot;
Pavel Begunkova7083ad2021-08-25 12:25:46 +01005111 struct file *file;
5112 int ret, fd;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07005113
Jiufei Xuee697dee2020-06-10 13:41:59 +08005114 if (req->file->f_flags & O_NONBLOCK)
5115 req->flags |= REQ_F_NOWAIT;
5116
Pavel Begunkovaaa4db12021-08-25 12:25:47 +01005117 if (!fixed) {
5118 fd = __get_unused_fd_flags(accept->flags, accept->nofile);
5119 if (unlikely(fd < 0))
5120 return fd;
5121 }
Pavel Begunkova7083ad2021-08-25 12:25:46 +01005122 file = do_accept(req->file, file_flags, accept->addr, accept->addr_len,
5123 accept->flags);
5124 if (IS_ERR(file)) {
Pavel Begunkovaaa4db12021-08-25 12:25:47 +01005125 if (!fixed)
5126 put_unused_fd(fd);
Pavel Begunkova7083ad2021-08-25 12:25:46 +01005127 ret = PTR_ERR(file);
5128 if (ret == -EAGAIN && force_nonblock)
5129 return -EAGAIN;
Pavel Begunkovac45abc2020-06-08 21:08:18 +03005130 if (ret == -ERESTARTSYS)
5131 ret = -EINTR;
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01005132 req_set_fail(req);
Pavel Begunkovaaa4db12021-08-25 12:25:47 +01005133 } else if (!fixed) {
Pavel Begunkova7083ad2021-08-25 12:25:46 +01005134 fd_install(fd, file);
5135 ret = fd;
Pavel Begunkovaaa4db12021-08-25 12:25:47 +01005136 } else {
5137 ret = io_install_fixed_file(req, file, issue_flags,
5138 accept->file_slot - 1);
Pavel Begunkovac45abc2020-06-08 21:08:18 +03005139 }
Pavel Begunkov889fca72021-02-10 00:03:09 +00005140 __io_req_complete(req, issue_flags, ret, 0);
Jens Axboe17f2fe32019-10-17 14:42:58 -06005141 return 0;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07005142}
5143
Pavel Begunkov93642ef2021-02-18 18:29:44 +00005144static int io_connect_prep_async(struct io_kiocb *req)
5145{
5146 struct io_async_connect *io = req->async_data;
5147 struct io_connect *conn = &req->connect;
5148
5149 return move_addr_to_kernel(conn->addr, conn->addr_len, &io->address);
5150}
5151
Jens Axboe3529d8c2019-12-19 18:24:38 -07005152static int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboef499a022019-12-02 16:28:46 -07005153{
Jens Axboe3529d8c2019-12-19 18:24:38 -07005154 struct io_connect *conn = &req->connect;
Jens Axboef499a022019-12-02 16:28:46 -07005155
Jens Axboe14587a462020-09-05 11:36:08 -06005156 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboe3fbb51c2019-12-20 08:51:52 -07005157 return -EINVAL;
Pavel Begunkov26578cd2021-08-20 10:36:37 +01005158 if (sqe->ioprio || sqe->len || sqe->buf_index || sqe->rw_flags ||
5159 sqe->splice_fd_in)
Jens Axboe3fbb51c2019-12-20 08:51:52 -07005160 return -EINVAL;
5161
Jens Axboe3529d8c2019-12-19 18:24:38 -07005162 conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
5163 conn->addr_len = READ_ONCE(sqe->addr2);
Pavel Begunkov93642ef2021-02-18 18:29:44 +00005164 return 0;
Jens Axboef499a022019-12-02 16:28:46 -07005165}
5166
Pavel Begunkov889fca72021-02-10 00:03:09 +00005167static int io_connect(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboef8e85cf2019-11-23 14:24:24 -07005168{
Jens Axboee8c2bc12020-08-15 18:44:09 -07005169 struct io_async_connect __io, *io;
Jens Axboef8e85cf2019-11-23 14:24:24 -07005170 unsigned file_flags;
Jens Axboe3fbb51c2019-12-20 08:51:52 -07005171 int ret;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00005172 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Jens Axboef8e85cf2019-11-23 14:24:24 -07005173
Jens Axboee8c2bc12020-08-15 18:44:09 -07005174 if (req->async_data) {
5175 io = req->async_data;
Jens Axboef499a022019-12-02 16:28:46 -07005176 } else {
Jens Axboe3529d8c2019-12-19 18:24:38 -07005177 ret = move_addr_to_kernel(req->connect.addr,
5178 req->connect.addr_len,
Jens Axboee8c2bc12020-08-15 18:44:09 -07005179 &__io.address);
Jens Axboef499a022019-12-02 16:28:46 -07005180 if (ret)
5181 goto out;
5182 io = &__io;
5183 }
5184
Jens Axboe3fbb51c2019-12-20 08:51:52 -07005185 file_flags = force_nonblock ? O_NONBLOCK : 0;
5186
Jens Axboee8c2bc12020-08-15 18:44:09 -07005187 ret = __sys_connect_file(req->file, &io->address,
Jens Axboe3fbb51c2019-12-20 08:51:52 -07005188 req->connect.addr_len, file_flags);
Jens Axboe87f80d62019-12-03 11:23:54 -07005189 if ((ret == -EAGAIN || ret == -EINPROGRESS) && force_nonblock) {
Jens Axboee8c2bc12020-08-15 18:44:09 -07005190 if (req->async_data)
Jens Axboeb7bb4f72019-12-15 22:13:43 -07005191 return -EAGAIN;
Jens Axboee8c2bc12020-08-15 18:44:09 -07005192 if (io_alloc_async_data(req)) {
Jens Axboef499a022019-12-02 16:28:46 -07005193 ret = -ENOMEM;
5194 goto out;
5195 }
Jens Axboee8c2bc12020-08-15 18:44:09 -07005196 memcpy(req->async_data, &__io, sizeof(__io));
Jens Axboef8e85cf2019-11-23 14:24:24 -07005197 return -EAGAIN;
Jens Axboef499a022019-12-02 16:28:46 -07005198 }
Jens Axboef8e85cf2019-11-23 14:24:24 -07005199 if (ret == -ERESTARTSYS)
5200 ret = -EINTR;
Jens Axboef499a022019-12-02 16:28:46 -07005201out:
Jens Axboe4e88d6e2019-12-07 20:59:47 -07005202 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01005203 req_set_fail(req);
Pavel Begunkov889fca72021-02-10 00:03:09 +00005204 __io_req_complete(req, issue_flags, ret, 0);
Jens Axboef8e85cf2019-11-23 14:24:24 -07005205 return 0;
Jens Axboef8e85cf2019-11-23 14:24:24 -07005206}
YueHaibing469956e2020-03-04 15:53:52 +08005207#else /* !CONFIG_NET */
Jens Axboe99a10082021-02-19 09:35:19 -07005208#define IO_NETOP_FN(op) \
5209static int io_##op(struct io_kiocb *req, unsigned int issue_flags) \
5210{ \
5211 return -EOPNOTSUPP; \
Jens Axboef8e85cf2019-11-23 14:24:24 -07005212}
5213
Jens Axboe99a10082021-02-19 09:35:19 -07005214#define IO_NETOP_PREP(op) \
5215IO_NETOP_FN(op) \
5216static int io_##op##_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) \
5217{ \
5218 return -EOPNOTSUPP; \
5219} \
5220
5221#define IO_NETOP_PREP_ASYNC(op) \
5222IO_NETOP_PREP(op) \
5223static int io_##op##_prep_async(struct io_kiocb *req) \
5224{ \
5225 return -EOPNOTSUPP; \
YueHaibing469956e2020-03-04 15:53:52 +08005226}
5227
Jens Axboe99a10082021-02-19 09:35:19 -07005228IO_NETOP_PREP_ASYNC(sendmsg);
5229IO_NETOP_PREP_ASYNC(recvmsg);
5230IO_NETOP_PREP_ASYNC(connect);
5231IO_NETOP_PREP(accept);
5232IO_NETOP_FN(send);
5233IO_NETOP_FN(recv);
YueHaibing469956e2020-03-04 15:53:52 +08005234#endif /* CONFIG_NET */
Jens Axboe17f2fe32019-10-17 14:42:58 -06005235
Jens Axboed7718a92020-02-14 22:23:12 -07005236struct io_poll_table {
5237 struct poll_table_struct pt;
5238 struct io_kiocb *req;
Pavel Begunkov68b11e82021-07-20 10:50:43 +01005239 int nr_entries;
Jens Axboed7718a92020-02-14 22:23:12 -07005240 int error;
5241};
5242
Jens Axboed7718a92020-02-14 22:23:12 -07005243static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll,
Pavel Begunkov5b0a6ac2021-06-30 21:54:04 +01005244 __poll_t mask, io_req_tw_func_t func)
Jens Axboed7718a92020-02-14 22:23:12 -07005245{
Jens Axboed7718a92020-02-14 22:23:12 -07005246 /* for instances that support it check for an event match first: */
5247 if (mask && !(mask & poll->events))
5248 return 0;
5249
5250 trace_io_uring_task_add(req->ctx, req->opcode, req->user_data, mask);
5251
5252 list_del_init(&poll->wait.entry);
5253
Jens Axboed7718a92020-02-14 22:23:12 -07005254 req->result = mask;
Pavel Begunkov5b0a6ac2021-06-30 21:54:04 +01005255 req->io_task_work.func = func;
Jens Axboe6d816e02020-08-11 08:04:14 -06005256
Jens Axboed7718a92020-02-14 22:23:12 -07005257 /*
Jens Axboee3aabf92020-05-18 11:04:17 -06005258 * If this fails, then the task is exiting. When a task exits, the
5259 * work gets canceled, so just cancel this request as well instead
5260 * of executing it. We can't safely execute it anyway, as we may not
5261 * have the needed state needed for it anyway.
Jens Axboed7718a92020-02-14 22:23:12 -07005262 */
Pavel Begunkove09ee512021-07-01 13:26:05 +01005263 io_req_task_work_add(req);
Jens Axboed7718a92020-02-14 22:23:12 -07005264 return 1;
5265}
5266
Jens Axboe74ce6ce2020-04-13 11:09:12 -06005267static bool io_poll_rewait(struct io_kiocb *req, struct io_poll_iocb *poll)
5268 __acquires(&req->ctx->completion_lock)
5269{
5270 struct io_ring_ctx *ctx = req->ctx;
5271
Jens Axboe316319e2021-08-19 09:41:42 -06005272 /* req->task == current here, checking PF_EXITING is safe */
Pavel Begunkove09ee512021-07-01 13:26:05 +01005273 if (unlikely(req->task->flags & PF_EXITING))
5274 WRITE_ONCE(poll->canceled, true);
5275
Jens Axboe74ce6ce2020-04-13 11:09:12 -06005276 if (!req->result && !READ_ONCE(poll->canceled)) {
5277 struct poll_table_struct pt = { ._key = poll->events };
5278
5279 req->result = vfs_poll(req->file, &pt) & poll->events;
5280 }
5281
Jens Axboe79ebeae2021-08-10 15:18:27 -06005282 spin_lock(&ctx->completion_lock);
Jens Axboe74ce6ce2020-04-13 11:09:12 -06005283 if (!req->result && !READ_ONCE(poll->canceled)) {
5284 add_wait_queue(poll->head, &poll->wait);
5285 return true;
5286 }
5287
5288 return false;
5289}
5290
Jens Axboed4e7cd32020-08-15 11:44:50 -07005291static struct io_poll_iocb *io_poll_get_double(struct io_kiocb *req)
Jens Axboe18bceab2020-05-15 11:56:54 -06005292{
Jens Axboee8c2bc12020-08-15 18:44:09 -07005293 /* pure poll stashes this in ->async_data, poll driven retry elsewhere */
Jens Axboed4e7cd32020-08-15 11:44:50 -07005294 if (req->opcode == IORING_OP_POLL_ADD)
Jens Axboee8c2bc12020-08-15 18:44:09 -07005295 return req->async_data;
Jens Axboed4e7cd32020-08-15 11:44:50 -07005296 return req->apoll->double_poll;
5297}
5298
5299static struct io_poll_iocb *io_poll_get_single(struct io_kiocb *req)
5300{
5301 if (req->opcode == IORING_OP_POLL_ADD)
5302 return &req->poll;
5303 return &req->apoll->poll;
5304}
5305
5306static void io_poll_remove_double(struct io_kiocb *req)
Pavel Begunkove07785b2021-04-01 15:43:57 +01005307 __must_hold(&req->ctx->completion_lock)
Jens Axboed4e7cd32020-08-15 11:44:50 -07005308{
5309 struct io_poll_iocb *poll = io_poll_get_double(req);
Jens Axboe18bceab2020-05-15 11:56:54 -06005310
5311 lockdep_assert_held(&req->ctx->completion_lock);
5312
5313 if (poll && poll->head) {
5314 struct wait_queue_head *head = poll->head;
5315
Jens Axboe79ebeae2021-08-10 15:18:27 -06005316 spin_lock_irq(&head->lock);
Jens Axboe18bceab2020-05-15 11:56:54 -06005317 list_del_init(&poll->wait.entry);
5318 if (poll->wait.private)
Jens Axboede9b4cc2021-02-24 13:28:27 -07005319 req_ref_put(req);
Jens Axboe18bceab2020-05-15 11:56:54 -06005320 poll->head = NULL;
Jens Axboe79ebeae2021-08-10 15:18:27 -06005321 spin_unlock_irq(&head->lock);
Jens Axboe18bceab2020-05-15 11:56:54 -06005322 }
5323}
5324
Xiaoguang Wang31efe482021-09-03 22:24:36 +08005325static bool __io_poll_complete(struct io_kiocb *req, __poll_t mask)
Pavel Begunkove07785b2021-04-01 15:43:57 +01005326 __must_hold(&req->ctx->completion_lock)
Jens Axboe18bceab2020-05-15 11:56:54 -06005327{
5328 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe88e41cf2021-02-22 22:08:01 -07005329 unsigned flags = IORING_CQE_F_MORE;
Pavel Begunkove27414b2021-04-09 09:13:20 +01005330 int error;
Jens Axboe18bceab2020-05-15 11:56:54 -06005331
Pavel Begunkove27414b2021-04-09 09:13:20 +01005332 if (READ_ONCE(req->poll.canceled)) {
Jens Axboe45ab03b2021-02-23 08:19:33 -07005333 error = -ECANCELED;
Jens Axboe88e41cf2021-02-22 22:08:01 -07005334 req->poll.events |= EPOLLONESHOT;
Pavel Begunkove27414b2021-04-09 09:13:20 +01005335 } else {
Jens Axboe50826202021-02-23 09:02:26 -07005336 error = mangle_poll(mask);
Pavel Begunkove27414b2021-04-09 09:13:20 +01005337 }
Jens Axboeb69de282021-03-17 08:37:41 -06005338 if (req->poll.events & EPOLLONESHOT)
5339 flags = 0;
Hao Xua62682f2021-09-22 18:12:37 +08005340 if (!io_cqring_fill_event(ctx, req->user_data, error, flags)) {
5341 req->poll.events |= EPOLLONESHOT;
Jens Axboe88e41cf2021-02-22 22:08:01 -07005342 flags = 0;
Hao Xua62682f2021-09-22 18:12:37 +08005343 }
Hao Xu7b289c32021-04-13 15:20:39 +08005344 if (flags & IORING_CQE_F_MORE)
5345 ctx->cq_extra++;
5346
Jens Axboe88e41cf2021-02-22 22:08:01 -07005347 return !(flags & IORING_CQE_F_MORE);
Jens Axboe18bceab2020-05-15 11:56:54 -06005348}
5349
Xiaoguang Wang31efe482021-09-03 22:24:36 +08005350static inline bool io_poll_complete(struct io_kiocb *req, __poll_t mask)
5351 __must_hold(&req->ctx->completion_lock)
5352{
5353 bool done;
5354
5355 done = __io_poll_complete(req, mask);
5356 io_commit_cqring(req->ctx);
5357 return done;
5358}
5359
Pavel Begunkovf237c302021-08-18 12:42:46 +01005360static void io_poll_task_func(struct io_kiocb *req, bool *locked)
Jens Axboe18bceab2020-05-15 11:56:54 -06005361{
Jens Axboe6d816e02020-08-11 08:04:14 -06005362 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkovdd221f462020-10-18 10:17:42 +01005363 struct io_kiocb *nxt;
Jens Axboe18bceab2020-05-15 11:56:54 -06005364
Pavel Begunkovdd221f462020-10-18 10:17:42 +01005365 if (io_poll_rewait(req, &req->poll)) {
Jens Axboe79ebeae2021-08-10 15:18:27 -06005366 spin_unlock(&ctx->completion_lock);
Pavel Begunkovdd221f462020-10-18 10:17:42 +01005367 } else {
Pavel Begunkovf40b9642021-04-09 09:13:19 +01005368 bool done;
Jens Axboe88e41cf2021-02-22 22:08:01 -07005369
Xiaoguang Wang31efe482021-09-03 22:24:36 +08005370 done = __io_poll_complete(req, req->result);
Jens Axboe88e41cf2021-02-22 22:08:01 -07005371 if (done) {
Hao Xua890d012021-07-28 11:03:22 +08005372 io_poll_remove_double(req);
Jens Axboe88e41cf2021-02-22 22:08:01 -07005373 hash_del(&req->hash_node);
Hao Xubd99c712021-09-22 18:12:36 +08005374 req->poll.done = true;
Pavel Begunkovf40b9642021-04-09 09:13:19 +01005375 } else {
Jens Axboe88e41cf2021-02-22 22:08:01 -07005376 req->result = 0;
5377 add_wait_queue(req->poll.head, &req->poll.wait);
5378 }
Xiaoguang Wang31efe482021-09-03 22:24:36 +08005379 io_commit_cqring(ctx);
Jens Axboe79ebeae2021-08-10 15:18:27 -06005380 spin_unlock(&ctx->completion_lock);
Pavel Begunkovdd221f462020-10-18 10:17:42 +01005381 io_cqring_ev_posted(ctx);
Pavel Begunkovdd221f462020-10-18 10:17:42 +01005382
Jens Axboe88e41cf2021-02-22 22:08:01 -07005383 if (done) {
5384 nxt = io_put_req_find_next(req);
5385 if (nxt)
Pavel Begunkovf237c302021-08-18 12:42:46 +01005386 io_req_task_submit(nxt, locked);
Jens Axboe88e41cf2021-02-22 22:08:01 -07005387 }
Pavel Begunkovea1164e2020-06-30 15:20:41 +03005388 }
Jens Axboe18bceab2020-05-15 11:56:54 -06005389}
5390
5391static int io_poll_double_wake(struct wait_queue_entry *wait, unsigned mode,
5392 int sync, void *key)
5393{
5394 struct io_kiocb *req = wait->private;
Jens Axboed4e7cd32020-08-15 11:44:50 -07005395 struct io_poll_iocb *poll = io_poll_get_single(req);
Jens Axboe18bceab2020-05-15 11:56:54 -06005396 __poll_t mask = key_to_poll(key);
Jens Axboe79ebeae2021-08-10 15:18:27 -06005397 unsigned long flags;
Jens Axboe18bceab2020-05-15 11:56:54 -06005398
5399 /* for instances that support it check for an event match first: */
5400 if (mask && !(mask & poll->events))
5401 return 0;
Jens Axboe88e41cf2021-02-22 22:08:01 -07005402 if (!(poll->events & EPOLLONESHOT))
5403 return poll->wait.func(&poll->wait, mode, sync, key);
Jens Axboe18bceab2020-05-15 11:56:54 -06005404
Jens Axboe8706e042020-09-28 08:38:54 -06005405 list_del_init(&wait->entry);
5406
Jens Axboe9ce85ef2021-07-09 08:20:28 -06005407 if (poll->head) {
Jens Axboe18bceab2020-05-15 11:56:54 -06005408 bool done;
5409
Jens Axboe79ebeae2021-08-10 15:18:27 -06005410 spin_lock_irqsave(&poll->head->lock, flags);
Jens Axboe807abcb2020-07-17 17:09:27 -06005411 done = list_empty(&poll->wait.entry);
Jens Axboe18bceab2020-05-15 11:56:54 -06005412 if (!done)
Jens Axboe807abcb2020-07-17 17:09:27 -06005413 list_del_init(&poll->wait.entry);
Jens Axboed4e7cd32020-08-15 11:44:50 -07005414 /* make sure double remove sees this as being gone */
5415 wait->private = NULL;
Jens Axboe79ebeae2021-08-10 15:18:27 -06005416 spin_unlock_irqrestore(&poll->head->lock, flags);
Jens Axboec8b5e262020-10-25 13:53:26 -06005417 if (!done) {
5418 /* use wait func handler, so it matches the rq type */
5419 poll->wait.func(&poll->wait, mode, sync, key);
5420 }
Jens Axboe18bceab2020-05-15 11:56:54 -06005421 }
Jens Axboede9b4cc2021-02-24 13:28:27 -07005422 req_ref_put(req);
Jens Axboe18bceab2020-05-15 11:56:54 -06005423 return 1;
5424}
5425
5426static void io_init_poll_iocb(struct io_poll_iocb *poll, __poll_t events,
5427 wait_queue_func_t wake_func)
5428{
5429 poll->head = NULL;
5430 poll->done = false;
5431 poll->canceled = false;
Jens Axboe464dca62021-03-19 14:06:24 -06005432#define IO_POLL_UNMASK (EPOLLERR|EPOLLHUP|EPOLLNVAL|EPOLLRDHUP)
5433 /* mask in events that we always want/need */
5434 poll->events = events | IO_POLL_UNMASK;
Jens Axboe18bceab2020-05-15 11:56:54 -06005435 INIT_LIST_HEAD(&poll->wait.entry);
5436 init_waitqueue_func_entry(&poll->wait, wake_func);
5437}
5438
5439static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt,
Jens Axboe807abcb2020-07-17 17:09:27 -06005440 struct wait_queue_head *head,
5441 struct io_poll_iocb **poll_ptr)
Jens Axboe18bceab2020-05-15 11:56:54 -06005442{
5443 struct io_kiocb *req = pt->req;
5444
5445 /*
Pavel Begunkov68b11e82021-07-20 10:50:43 +01005446 * The file being polled uses multiple waitqueues for poll handling
5447 * (e.g. one for read, one for write). Setup a separate io_poll_iocb
5448 * if this happens.
Jens Axboe18bceab2020-05-15 11:56:54 -06005449 */
Pavel Begunkov68b11e82021-07-20 10:50:43 +01005450 if (unlikely(pt->nr_entries)) {
Pavel Begunkov58852d42020-10-16 20:55:56 +01005451 struct io_poll_iocb *poll_one = poll;
5452
Pavel Begunkov23a65db2021-08-17 20:28:11 +01005453 /* double add on the same waitqueue head, ignore */
5454 if (poll_one->head == head)
5455 return;
Jens Axboe18bceab2020-05-15 11:56:54 -06005456 /* already have a 2nd entry, fail a third attempt */
Jens Axboe807abcb2020-07-17 17:09:27 -06005457 if (*poll_ptr) {
Pavel Begunkov23a65db2021-08-17 20:28:11 +01005458 if ((*poll_ptr)->head == head)
5459 return;
Jens Axboe18bceab2020-05-15 11:56:54 -06005460 pt->error = -EINVAL;
5461 return;
5462 }
Jens Axboeea6a693d2021-04-15 09:47:13 -06005463 /*
5464 * Can't handle multishot for double wait for now, turn it
5465 * into one-shot mode.
5466 */
Pavel Begunkov7a274722021-05-17 12:43:34 +01005467 if (!(poll_one->events & EPOLLONESHOT))
5468 poll_one->events |= EPOLLONESHOT;
Jens Axboe18bceab2020-05-15 11:56:54 -06005469 poll = kmalloc(sizeof(*poll), GFP_ATOMIC);
5470 if (!poll) {
5471 pt->error = -ENOMEM;
5472 return;
5473 }
Pavel Begunkov58852d42020-10-16 20:55:56 +01005474 io_init_poll_iocb(poll, poll_one->events, io_poll_double_wake);
Jens Axboede9b4cc2021-02-24 13:28:27 -07005475 req_ref_get(req);
Jens Axboe18bceab2020-05-15 11:56:54 -06005476 poll->wait.private = req;
Jens Axboe807abcb2020-07-17 17:09:27 -06005477 *poll_ptr = poll;
Jens Axboe18bceab2020-05-15 11:56:54 -06005478 }
5479
Pavel Begunkov68b11e82021-07-20 10:50:43 +01005480 pt->nr_entries++;
Jens Axboe18bceab2020-05-15 11:56:54 -06005481 poll->head = head;
Jiufei Xuea31eb4a2020-06-17 17:53:56 +08005482
5483 if (poll->events & EPOLLEXCLUSIVE)
5484 add_wait_queue_exclusive(head, &poll->wait);
5485 else
5486 add_wait_queue(head, &poll->wait);
Jens Axboe18bceab2020-05-15 11:56:54 -06005487}
5488
5489static void io_async_queue_proc(struct file *file, struct wait_queue_head *head,
5490 struct poll_table_struct *p)
5491{
5492 struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
Jens Axboe807abcb2020-07-17 17:09:27 -06005493 struct async_poll *apoll = pt->req->apoll;
Jens Axboe18bceab2020-05-15 11:56:54 -06005494
Jens Axboe807abcb2020-07-17 17:09:27 -06005495 __io_queue_proc(&apoll->poll, pt, head, &apoll->double_poll);
Jens Axboe18bceab2020-05-15 11:56:54 -06005496}
5497
Pavel Begunkovf237c302021-08-18 12:42:46 +01005498static void io_async_task_func(struct io_kiocb *req, bool *locked)
Jens Axboed7718a92020-02-14 22:23:12 -07005499{
Jens Axboed7718a92020-02-14 22:23:12 -07005500 struct async_poll *apoll = req->apoll;
5501 struct io_ring_ctx *ctx = req->ctx;
5502
Olivier Langlois236daeae2021-05-31 02:36:37 -04005503 trace_io_uring_task_run(req->ctx, req, req->opcode, req->user_data);
Jens Axboed7718a92020-02-14 22:23:12 -07005504
Jens Axboe74ce6ce2020-04-13 11:09:12 -06005505 if (io_poll_rewait(req, &apoll->poll)) {
Jens Axboe79ebeae2021-08-10 15:18:27 -06005506 spin_unlock(&ctx->completion_lock);
Jens Axboe74ce6ce2020-04-13 11:09:12 -06005507 return;
Jens Axboed7718a92020-02-14 22:23:12 -07005508 }
5509
Pavel Begunkov0ea13b42021-04-09 09:13:21 +01005510 hash_del(&req->hash_node);
Jens Axboed4e7cd32020-08-15 11:44:50 -07005511 io_poll_remove_double(req);
Hao Xubd99c712021-09-22 18:12:36 +08005512 apoll->poll.done = true;
Jens Axboe79ebeae2021-08-10 15:18:27 -06005513 spin_unlock(&ctx->completion_lock);
Jens Axboe74ce6ce2020-04-13 11:09:12 -06005514
Pavel Begunkov0be0b0e2020-06-30 15:20:42 +03005515 if (!READ_ONCE(apoll->poll.canceled))
Pavel Begunkovf237c302021-08-18 12:42:46 +01005516 io_req_task_submit(req, locked);
Pavel Begunkov0be0b0e2020-06-30 15:20:42 +03005517 else
Pavel Begunkov25935532021-03-19 17:22:40 +00005518 io_req_complete_failed(req, -ECANCELED);
Jens Axboed7718a92020-02-14 22:23:12 -07005519}
5520
5521static int io_async_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
5522 void *key)
5523{
5524 struct io_kiocb *req = wait->private;
5525 struct io_poll_iocb *poll = &req->apoll->poll;
5526
5527 trace_io_uring_poll_wake(req->ctx, req->opcode, req->user_data,
5528 key_to_poll(key));
5529
5530 return __io_async_wake(req, poll, key_to_poll(key), io_async_task_func);
5531}
5532
5533static void io_poll_req_insert(struct io_kiocb *req)
5534{
5535 struct io_ring_ctx *ctx = req->ctx;
5536 struct hlist_head *list;
5537
5538 list = &ctx->cancel_hash[hash_long(req->user_data, ctx->cancel_hash_bits)];
5539 hlist_add_head(&req->hash_node, list);
5540}
5541
5542static __poll_t __io_arm_poll_handler(struct io_kiocb *req,
5543 struct io_poll_iocb *poll,
5544 struct io_poll_table *ipt, __poll_t mask,
5545 wait_queue_func_t wake_func)
5546 __acquires(&ctx->completion_lock)
5547{
5548 struct io_ring_ctx *ctx = req->ctx;
5549 bool cancel = false;
5550
Pavel Begunkov4d52f332020-10-18 10:17:43 +01005551 INIT_HLIST_NODE(&req->hash_node);
Jens Axboe18bceab2020-05-15 11:56:54 -06005552 io_init_poll_iocb(poll, mask, wake_func);
Pavel Begunkovb90cd192020-06-21 13:09:52 +03005553 poll->file = req->file;
Jens Axboe18bceab2020-05-15 11:56:54 -06005554 poll->wait.private = req;
Jens Axboed7718a92020-02-14 22:23:12 -07005555
5556 ipt->pt._key = mask;
5557 ipt->req = req;
Pavel Begunkov68b11e82021-07-20 10:50:43 +01005558 ipt->error = 0;
5559 ipt->nr_entries = 0;
Jens Axboed7718a92020-02-14 22:23:12 -07005560
Jens Axboed7718a92020-02-14 22:23:12 -07005561 mask = vfs_poll(req->file, &ipt->pt) & poll->events;
Pavel Begunkov68b11e82021-07-20 10:50:43 +01005562 if (unlikely(!ipt->nr_entries) && !ipt->error)
5563 ipt->error = -EINVAL;
Jens Axboed7718a92020-02-14 22:23:12 -07005564
Jens Axboe79ebeae2021-08-10 15:18:27 -06005565 spin_lock(&ctx->completion_lock);
Hao Xua890d012021-07-28 11:03:22 +08005566 if (ipt->error || (mask && (poll->events & EPOLLONESHOT)))
Pavel Begunkov46fee9a2021-07-20 10:50:44 +01005567 io_poll_remove_double(req);
Jens Axboed7718a92020-02-14 22:23:12 -07005568 if (likely(poll->head)) {
Jens Axboe79ebeae2021-08-10 15:18:27 -06005569 spin_lock_irq(&poll->head->lock);
Jens Axboed7718a92020-02-14 22:23:12 -07005570 if (unlikely(list_empty(&poll->wait.entry))) {
5571 if (ipt->error)
5572 cancel = true;
5573 ipt->error = 0;
5574 mask = 0;
5575 }
Jens Axboe88e41cf2021-02-22 22:08:01 -07005576 if ((mask && (poll->events & EPOLLONESHOT)) || ipt->error)
Jens Axboed7718a92020-02-14 22:23:12 -07005577 list_del_init(&poll->wait.entry);
5578 else if (cancel)
5579 WRITE_ONCE(poll->canceled, true);
5580 else if (!poll->done) /* actually waiting for an event */
5581 io_poll_req_insert(req);
Jens Axboe79ebeae2021-08-10 15:18:27 -06005582 spin_unlock_irq(&poll->head->lock);
Jens Axboed7718a92020-02-14 22:23:12 -07005583 }
5584
5585 return mask;
5586}
5587
Olivier Langlois59b735a2021-06-22 05:17:39 -07005588enum {
5589 IO_APOLL_OK,
5590 IO_APOLL_ABORTED,
5591 IO_APOLL_READY
5592};
5593
5594static int io_arm_poll_handler(struct io_kiocb *req)
Jens Axboed7718a92020-02-14 22:23:12 -07005595{
5596 const struct io_op_def *def = &io_op_defs[req->opcode];
5597 struct io_ring_ctx *ctx = req->ctx;
5598 struct async_poll *apoll;
5599 struct io_poll_table ipt;
Pavel Begunkovb2d9c3d2021-06-26 21:40:44 +01005600 __poll_t ret, mask = EPOLLONESHOT | POLLERR | POLLPRI;
Jens Axboe9dab14b2020-08-25 12:27:50 -06005601 int rw;
Jens Axboed7718a92020-02-14 22:23:12 -07005602
5603 if (!req->file || !file_can_poll(req->file))
Olivier Langlois59b735a2021-06-22 05:17:39 -07005604 return IO_APOLL_ABORTED;
Pavel Begunkov24c74672020-06-21 13:09:51 +03005605 if (req->flags & REQ_F_POLLED)
Olivier Langlois59b735a2021-06-22 05:17:39 -07005606 return IO_APOLL_ABORTED;
Pavel Begunkovb2d9c3d2021-06-26 21:40:44 +01005607 if (!def->pollin && !def->pollout)
Olivier Langlois59b735a2021-06-22 05:17:39 -07005608 return IO_APOLL_ABORTED;
Pavel Begunkovb2d9c3d2021-06-26 21:40:44 +01005609
5610 if (def->pollin) {
5611 rw = READ;
5612 mask |= POLLIN | POLLRDNORM;
5613
5614 /* If reading from MSG_ERRQUEUE using recvmsg, ignore POLLIN */
5615 if ((req->opcode == IORING_OP_RECVMSG) &&
5616 (req->sr_msg.msg_flags & MSG_ERRQUEUE))
5617 mask &= ~POLLIN;
5618 } else {
5619 rw = WRITE;
5620 mask |= POLLOUT | POLLWRNORM;
5621 }
5622
Jens Axboe9dab14b2020-08-25 12:27:50 -06005623 /* if we can't nonblock try, then no point in arming a poll handler */
Pavel Begunkovb191e2d2021-08-09 13:04:03 +01005624 if (!io_file_supports_nowait(req, rw))
Olivier Langlois59b735a2021-06-22 05:17:39 -07005625 return IO_APOLL_ABORTED;
Jens Axboed7718a92020-02-14 22:23:12 -07005626
5627 apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC);
5628 if (unlikely(!apoll))
Olivier Langlois59b735a2021-06-22 05:17:39 -07005629 return IO_APOLL_ABORTED;
Jens Axboe807abcb2020-07-17 17:09:27 -06005630 apoll->double_poll = NULL;
Jens Axboed7718a92020-02-14 22:23:12 -07005631 req->apoll = apoll;
Pavel Begunkovb2d9c3d2021-06-26 21:40:44 +01005632 req->flags |= REQ_F_POLLED;
Jens Axboed7718a92020-02-14 22:23:12 -07005633 ipt.pt._qproc = io_async_queue_proc;
Pavel Begunkov48dcd382021-08-15 10:40:18 +01005634 io_req_set_refcount(req);
Jens Axboed7718a92020-02-14 22:23:12 -07005635
5636 ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask,
5637 io_async_wake);
Jens Axboe79ebeae2021-08-10 15:18:27 -06005638 spin_unlock(&ctx->completion_lock);
Hao Xu41a51692021-08-12 15:47:02 +08005639 if (ret || ipt.error)
5640 return ret ? IO_APOLL_READY : IO_APOLL_ABORTED;
5641
Olivier Langlois236daeae2021-05-31 02:36:37 -04005642 trace_io_uring_poll_arm(ctx, req, req->opcode, req->user_data,
5643 mask, apoll->poll.events);
Olivier Langlois59b735a2021-06-22 05:17:39 -07005644 return IO_APOLL_OK;
Jens Axboed7718a92020-02-14 22:23:12 -07005645}
5646
5647static bool __io_poll_remove_one(struct io_kiocb *req,
Jens Axboeb2e720a2021-03-31 09:03:03 -06005648 struct io_poll_iocb *poll, bool do_cancel)
Pavel Begunkove07785b2021-04-01 15:43:57 +01005649 __must_hold(&req->ctx->completion_lock)
Jens Axboed7718a92020-02-14 22:23:12 -07005650{
Jens Axboeb41e9852020-02-17 09:52:41 -07005651 bool do_complete = false;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005652
Jens Axboe50826202021-02-23 09:02:26 -07005653 if (!poll->head)
5654 return false;
Jens Axboe79ebeae2021-08-10 15:18:27 -06005655 spin_lock_irq(&poll->head->lock);
Jens Axboeb2e720a2021-03-31 09:03:03 -06005656 if (do_cancel)
5657 WRITE_ONCE(poll->canceled, true);
Jens Axboe392edb42019-12-09 17:52:20 -07005658 if (!list_empty(&poll->wait.entry)) {
5659 list_del_init(&poll->wait.entry);
Jens Axboeb41e9852020-02-17 09:52:41 -07005660 do_complete = true;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005661 }
Jens Axboe79ebeae2021-08-10 15:18:27 -06005662 spin_unlock_irq(&poll->head->lock);
Jens Axboe3bfa5bc2020-05-17 13:54:12 -06005663 hash_del(&req->hash_node);
Jens Axboed7718a92020-02-14 22:23:12 -07005664 return do_complete;
5665}
5666
Pavel Begunkov5d709042021-08-09 20:18:13 +01005667static bool io_poll_remove_one(struct io_kiocb *req)
Pavel Begunkove07785b2021-04-01 15:43:57 +01005668 __must_hold(&req->ctx->completion_lock)
Jens Axboed7718a92020-02-14 22:23:12 -07005669{
5670 bool do_complete;
5671
Jens Axboed4e7cd32020-08-15 11:44:50 -07005672 io_poll_remove_double(req);
Pavel Begunkove31001a2021-04-13 02:58:43 +01005673 do_complete = __io_poll_remove_one(req, io_poll_get_single(req), true);
Jens Axboed4e7cd32020-08-15 11:44:50 -07005674
Jens Axboeb41e9852020-02-17 09:52:41 -07005675 if (do_complete) {
Pavel Begunkovd4d19c12021-04-25 14:32:17 +01005676 io_cqring_fill_event(req->ctx, req->user_data, -ECANCELED, 0);
Jens Axboeb41e9852020-02-17 09:52:41 -07005677 io_commit_cqring(req->ctx);
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01005678 req_set_fail(req);
Pavel Begunkov91c2f692021-08-11 19:28:28 +01005679 io_put_req_deferred(req);
Pavel Begunkov5d709042021-08-09 20:18:13 +01005680 }
Jens Axboeb41e9852020-02-17 09:52:41 -07005681 return do_complete;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005682}
5683
Jens Axboe76e1b642020-09-26 15:05:03 -06005684/*
5685 * Returns true if we found and killed one or more poll requests
5686 */
Pavel Begunkov6b819282020-11-06 13:00:25 +00005687static bool io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk,
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01005688 bool cancel_all)
Jens Axboe221c5eb2019-01-17 09:41:58 -07005689{
Jens Axboe78076bb2019-12-04 19:56:40 -07005690 struct hlist_node *tmp;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005691 struct io_kiocb *req;
Jens Axboe8e2e1fa2020-04-13 17:05:14 -06005692 int posted = 0, i;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005693
Jens Axboe79ebeae2021-08-10 15:18:27 -06005694 spin_lock(&ctx->completion_lock);
Jens Axboe78076bb2019-12-04 19:56:40 -07005695 for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
5696 struct hlist_head *list;
5697
5698 list = &ctx->cancel_hash[i];
Jens Axboef3606e32020-09-22 08:18:24 -06005699 hlist_for_each_entry_safe(req, tmp, list, hash_node) {
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01005700 if (io_match_task(req, tsk, cancel_all))
Jens Axboef3606e32020-09-22 08:18:24 -06005701 posted += io_poll_remove_one(req);
5702 }
Jens Axboe221c5eb2019-01-17 09:41:58 -07005703 }
Jens Axboe79ebeae2021-08-10 15:18:27 -06005704 spin_unlock(&ctx->completion_lock);
Jens Axboeb41e9852020-02-17 09:52:41 -07005705
Jens Axboe8e2e1fa2020-04-13 17:05:14 -06005706 if (posted)
5707 io_cqring_ev_posted(ctx);
Jens Axboe76e1b642020-09-26 15:05:03 -06005708
5709 return posted != 0;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005710}
5711
Pavel Begunkov9ba5fac2021-04-14 13:38:35 +01005712static struct io_kiocb *io_poll_find(struct io_ring_ctx *ctx, __u64 sqe_addr,
5713 bool poll_only)
Pavel Begunkove07785b2021-04-01 15:43:57 +01005714 __must_hold(&ctx->completion_lock)
Jens Axboe47f46762019-11-09 17:43:02 -07005715{
Jens Axboe78076bb2019-12-04 19:56:40 -07005716 struct hlist_head *list;
Jens Axboe47f46762019-11-09 17:43:02 -07005717 struct io_kiocb *req;
5718
Jens Axboe78076bb2019-12-04 19:56:40 -07005719 list = &ctx->cancel_hash[hash_long(sqe_addr, ctx->cancel_hash_bits)];
5720 hlist_for_each_entry(req, list, hash_node) {
Jens Axboeb41e9852020-02-17 09:52:41 -07005721 if (sqe_addr != req->user_data)
5722 continue;
Pavel Begunkov9ba5fac2021-04-14 13:38:35 +01005723 if (poll_only && req->opcode != IORING_OP_POLL_ADD)
5724 continue;
Jens Axboeb2cb8052021-03-17 08:17:19 -06005725 return req;
Jens Axboe47f46762019-11-09 17:43:02 -07005726 }
Jens Axboeb2cb8052021-03-17 08:17:19 -06005727 return NULL;
Jens Axboe47f46762019-11-09 17:43:02 -07005728}
5729
Pavel Begunkov9ba5fac2021-04-14 13:38:35 +01005730static int io_poll_cancel(struct io_ring_ctx *ctx, __u64 sqe_addr,
5731 bool poll_only)
Pavel Begunkove07785b2021-04-01 15:43:57 +01005732 __must_hold(&ctx->completion_lock)
Jens Axboeb2cb8052021-03-17 08:17:19 -06005733{
5734 struct io_kiocb *req;
5735
Pavel Begunkov9ba5fac2021-04-14 13:38:35 +01005736 req = io_poll_find(ctx, sqe_addr, poll_only);
Jens Axboeb2cb8052021-03-17 08:17:19 -06005737 if (!req)
5738 return -ENOENT;
5739 if (io_poll_remove_one(req))
5740 return 0;
5741
5742 return -EALREADY;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005743}
5744
Pavel Begunkov9096af32021-04-14 13:38:36 +01005745static __poll_t io_poll_parse_events(const struct io_uring_sqe *sqe,
5746 unsigned int flags)
5747{
5748 u32 events;
5749
5750 events = READ_ONCE(sqe->poll32_events);
5751#ifdef __BIG_ENDIAN
5752 events = swahw32(events);
5753#endif
5754 if (!(flags & IORING_POLL_ADD_MULTI))
5755 events |= EPOLLONESHOT;
5756 return demangle_poll(events) | (events & (EPOLLEXCLUSIVE|EPOLLONESHOT));
5757}
5758
Pavel Begunkovc5de0032021-04-14 13:38:37 +01005759static int io_poll_update_prep(struct io_kiocb *req,
Jens Axboe3529d8c2019-12-19 18:24:38 -07005760 const struct io_uring_sqe *sqe)
Jens Axboe221c5eb2019-01-17 09:41:58 -07005761{
Pavel Begunkovc5de0032021-04-14 13:38:37 +01005762 struct io_poll_update *upd = &req->poll_update;
5763 u32 flags;
5764
Jens Axboe221c5eb2019-01-17 09:41:58 -07005765 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
5766 return -EINVAL;
Pavel Begunkov26578cd2021-08-20 10:36:37 +01005767 if (sqe->ioprio || sqe->buf_index || sqe->splice_fd_in)
Pavel Begunkovc5de0032021-04-14 13:38:37 +01005768 return -EINVAL;
5769 flags = READ_ONCE(sqe->len);
5770 if (flags & ~(IORING_POLL_UPDATE_EVENTS | IORING_POLL_UPDATE_USER_DATA |
5771 IORING_POLL_ADD_MULTI))
5772 return -EINVAL;
5773 /* meaningless without update */
5774 if (flags == IORING_POLL_ADD_MULTI)
Jens Axboe221c5eb2019-01-17 09:41:58 -07005775 return -EINVAL;
5776
Pavel Begunkovc5de0032021-04-14 13:38:37 +01005777 upd->old_user_data = READ_ONCE(sqe->addr);
5778 upd->update_events = flags & IORING_POLL_UPDATE_EVENTS;
5779 upd->update_user_data = flags & IORING_POLL_UPDATE_USER_DATA;
Jens Axboe0969e782019-12-17 18:40:57 -07005780
Pavel Begunkovc5de0032021-04-14 13:38:37 +01005781 upd->new_user_data = READ_ONCE(sqe->off);
5782 if (!upd->update_user_data && upd->new_user_data)
5783 return -EINVAL;
5784 if (upd->update_events)
5785 upd->events = io_poll_parse_events(sqe, flags);
5786 else if (sqe->poll32_events)
5787 return -EINVAL;
Jens Axboe0969e782019-12-17 18:40:57 -07005788
Jens Axboe221c5eb2019-01-17 09:41:58 -07005789 return 0;
5790}
5791
Jens Axboe221c5eb2019-01-17 09:41:58 -07005792static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
5793 void *key)
5794{
Jens Axboec2f2eb72020-02-10 09:07:05 -07005795 struct io_kiocb *req = wait->private;
5796 struct io_poll_iocb *poll = &req->poll;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005797
Jens Axboed7718a92020-02-14 22:23:12 -07005798 return __io_async_wake(req, poll, key_to_poll(key), io_poll_task_func);
Jens Axboe221c5eb2019-01-17 09:41:58 -07005799}
5800
Jens Axboe221c5eb2019-01-17 09:41:58 -07005801static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head,
5802 struct poll_table_struct *p)
5803{
5804 struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
5805
Jens Axboee8c2bc12020-08-15 18:44:09 -07005806 __io_queue_proc(&pt->req->poll, pt, head, (struct io_poll_iocb **) &pt->req->async_data);
Jens Axboeeac406c2019-11-14 12:09:58 -07005807}
5808
Jens Axboe3529d8c2019-12-19 18:24:38 -07005809static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboe221c5eb2019-01-17 09:41:58 -07005810{
5811 struct io_poll_iocb *poll = &req->poll;
Pavel Begunkovc5de0032021-04-14 13:38:37 +01005812 u32 flags;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005813
5814 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
5815 return -EINVAL;
Pavel Begunkovc5de0032021-04-14 13:38:37 +01005816 if (sqe->ioprio || sqe->buf_index || sqe->off || sqe->addr)
Jens Axboe88e41cf2021-02-22 22:08:01 -07005817 return -EINVAL;
5818 flags = READ_ONCE(sqe->len);
Pavel Begunkovc5de0032021-04-14 13:38:37 +01005819 if (flags & ~IORING_POLL_ADD_MULTI)
Jens Axboe221c5eb2019-01-17 09:41:58 -07005820 return -EINVAL;
5821
Pavel Begunkov48dcd382021-08-15 10:40:18 +01005822 io_req_set_refcount(req);
Pavel Begunkovc5de0032021-04-14 13:38:37 +01005823 poll->events = io_poll_parse_events(sqe, flags);
Jens Axboe0969e782019-12-17 18:40:57 -07005824 return 0;
5825}
5826
Pavel Begunkov61e98202021-02-10 00:03:08 +00005827static int io_poll_add(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe0969e782019-12-17 18:40:57 -07005828{
5829 struct io_poll_iocb *poll = &req->poll;
5830 struct io_ring_ctx *ctx = req->ctx;
5831 struct io_poll_table ipt;
Jens Axboe0969e782019-12-17 18:40:57 -07005832 __poll_t mask;
Jens Axboe0969e782019-12-17 18:40:57 -07005833
Jens Axboed7718a92020-02-14 22:23:12 -07005834 ipt.pt._qproc = io_poll_queue_proc;
Jens Axboe36703242019-07-25 10:20:18 -06005835
Jens Axboed7718a92020-02-14 22:23:12 -07005836 mask = __io_arm_poll_handler(req, &req->poll, &ipt, poll->events,
5837 io_poll_wake);
Jens Axboe221c5eb2019-01-17 09:41:58 -07005838
Jens Axboe8c838782019-03-12 15:48:16 -06005839 if (mask) { /* no async, we'd stolen it */
Jens Axboe8c838782019-03-12 15:48:16 -06005840 ipt.error = 0;
Pavel Begunkove27414b2021-04-09 09:13:20 +01005841 io_poll_complete(req, mask);
Jens Axboe8c838782019-03-12 15:48:16 -06005842 }
Jens Axboe79ebeae2021-08-10 15:18:27 -06005843 spin_unlock(&ctx->completion_lock);
Jens Axboe221c5eb2019-01-17 09:41:58 -07005844
Jens Axboe8c838782019-03-12 15:48:16 -06005845 if (mask) {
5846 io_cqring_ev_posted(ctx);
Jens Axboe88e41cf2021-02-22 22:08:01 -07005847 if (poll->events & EPOLLONESHOT)
5848 io_put_req(req);
Jens Axboe221c5eb2019-01-17 09:41:58 -07005849 }
Jens Axboe8c838782019-03-12 15:48:16 -06005850 return ipt.error;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005851}
5852
Pavel Begunkovc5de0032021-04-14 13:38:37 +01005853static int io_poll_update(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboeb69de282021-03-17 08:37:41 -06005854{
5855 struct io_ring_ctx *ctx = req->ctx;
5856 struct io_kiocb *preq;
Jens Axboecb3b200e2021-04-06 09:49:31 -06005857 bool completing;
Jens Axboeb69de282021-03-17 08:37:41 -06005858 int ret;
5859
Jens Axboe79ebeae2021-08-10 15:18:27 -06005860 spin_lock(&ctx->completion_lock);
Pavel Begunkov9ba5fac2021-04-14 13:38:35 +01005861 preq = io_poll_find(ctx, req->poll_update.old_user_data, true);
Jens Axboeb69de282021-03-17 08:37:41 -06005862 if (!preq) {
5863 ret = -ENOENT;
5864 goto err;
Jens Axboeb69de282021-03-17 08:37:41 -06005865 }
Jens Axboecb3b200e2021-04-06 09:49:31 -06005866
Pavel Begunkovc5de0032021-04-14 13:38:37 +01005867 if (!req->poll_update.update_events && !req->poll_update.update_user_data) {
5868 completing = true;
5869 ret = io_poll_remove_one(preq) ? 0 : -EALREADY;
5870 goto err;
5871 }
5872
Jens Axboecb3b200e2021-04-06 09:49:31 -06005873 /*
5874 * Don't allow racy completion with singleshot, as we cannot safely
5875 * update those. For multishot, if we're racing with completion, just
5876 * let completion re-add it.
5877 */
5878 completing = !__io_poll_remove_one(preq, &preq->poll, false);
5879 if (completing && (preq->poll.events & EPOLLONESHOT)) {
5880 ret = -EALREADY;
5881 goto err;
Jens Axboeb69de282021-03-17 08:37:41 -06005882 }
5883 /* we now have a detached poll request. reissue. */
5884 ret = 0;
5885err:
Jens Axboeb69de282021-03-17 08:37:41 -06005886 if (ret < 0) {
Jens Axboe79ebeae2021-08-10 15:18:27 -06005887 spin_unlock(&ctx->completion_lock);
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01005888 req_set_fail(req);
Jens Axboeb69de282021-03-17 08:37:41 -06005889 io_req_complete(req, ret);
5890 return 0;
5891 }
5892 /* only mask one event flags, keep behavior flags */
Pavel Begunkov9d805892021-04-13 02:58:40 +01005893 if (req->poll_update.update_events) {
Jens Axboeb69de282021-03-17 08:37:41 -06005894 preq->poll.events &= ~0xffff;
Pavel Begunkov9d805892021-04-13 02:58:40 +01005895 preq->poll.events |= req->poll_update.events & 0xffff;
Jens Axboeb69de282021-03-17 08:37:41 -06005896 preq->poll.events |= IO_POLL_UNMASK;
5897 }
Pavel Begunkov9d805892021-04-13 02:58:40 +01005898 if (req->poll_update.update_user_data)
5899 preq->user_data = req->poll_update.new_user_data;
Jens Axboe79ebeae2021-08-10 15:18:27 -06005900 spin_unlock(&ctx->completion_lock);
Jens Axboecb3b200e2021-04-06 09:49:31 -06005901
Jens Axboeb69de282021-03-17 08:37:41 -06005902 /* complete update request, we're done with it */
5903 io_req_complete(req, ret);
5904
Jens Axboecb3b200e2021-04-06 09:49:31 -06005905 if (!completing) {
Pavel Begunkovc5de0032021-04-14 13:38:37 +01005906 ret = io_poll_add(preq, issue_flags);
Jens Axboecb3b200e2021-04-06 09:49:31 -06005907 if (ret < 0) {
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01005908 req_set_fail(preq);
Jens Axboecb3b200e2021-04-06 09:49:31 -06005909 io_req_complete(preq, ret);
5910 }
Jens Axboeb69de282021-03-17 08:37:41 -06005911 }
5912 return 0;
5913}
5914
Pavel Begunkovf237c302021-08-18 12:42:46 +01005915static void io_req_task_timeout(struct io_kiocb *req, bool *locked)
Jens Axboe89850fc2021-08-10 15:11:51 -06005916{
Jens Axboe89850fc2021-08-10 15:11:51 -06005917 req_set_fail(req);
Pavel Begunkov505657b2021-08-17 20:28:09 +01005918 io_req_complete_post(req, -ETIME, 0);
Jens Axboe89850fc2021-08-10 15:11:51 -06005919}
5920
Jens Axboe5262f562019-09-17 12:26:57 -06005921static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
5922{
Jens Axboead8a48a2019-11-15 08:49:11 -07005923 struct io_timeout_data *data = container_of(timer,
5924 struct io_timeout_data, timer);
5925 struct io_kiocb *req = data->req;
5926 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe5262f562019-09-17 12:26:57 -06005927 unsigned long flags;
5928
Jens Axboe89850fc2021-08-10 15:11:51 -06005929 spin_lock_irqsave(&ctx->timeout_lock, flags);
Pavel Begunkova71976f2020-10-10 18:34:11 +01005930 list_del_init(&req->timeout.list);
Pavel Begunkov01cec8c2020-07-30 18:43:50 +03005931 atomic_set(&req->ctx->cq_timeouts,
5932 atomic_read(&req->ctx->cq_timeouts) + 1);
Jens Axboe89850fc2021-08-10 15:11:51 -06005933 spin_unlock_irqrestore(&ctx->timeout_lock, flags);
Pavel Begunkov01cec8c2020-07-30 18:43:50 +03005934
Jens Axboe89850fc2021-08-10 15:11:51 -06005935 req->io_task_work.func = io_req_task_timeout;
5936 io_req_task_work_add(req);
Jens Axboe5262f562019-09-17 12:26:57 -06005937 return HRTIMER_NORESTART;
5938}
5939
Pavel Begunkovfbd15842020-11-30 19:11:15 +00005940static struct io_kiocb *io_timeout_extract(struct io_ring_ctx *ctx,
5941 __u64 user_data)
Jens Axboe89850fc2021-08-10 15:11:51 -06005942 __must_hold(&ctx->timeout_lock)
Jens Axboe47f46762019-11-09 17:43:02 -07005943{
Pavel Begunkovfbd15842020-11-30 19:11:15 +00005944 struct io_timeout_data *io;
Jens Axboef254ac02020-08-12 17:33:30 -06005945 struct io_kiocb *req;
Pavel Begunkovfd9c7bc2021-04-13 02:58:42 +01005946 bool found = false;
Jens Axboef254ac02020-08-12 17:33:30 -06005947
5948 list_for_each_entry(req, &ctx->timeout_list, timeout.list) {
Pavel Begunkovfd9c7bc2021-04-13 02:58:42 +01005949 found = user_data == req->user_data;
5950 if (found)
Jens Axboef254ac02020-08-12 17:33:30 -06005951 break;
Jens Axboef254ac02020-08-12 17:33:30 -06005952 }
Pavel Begunkovfd9c7bc2021-04-13 02:58:42 +01005953 if (!found)
5954 return ERR_PTR(-ENOENT);
Jens Axboef254ac02020-08-12 17:33:30 -06005955
Pavel Begunkovfbd15842020-11-30 19:11:15 +00005956 io = req->async_data;
Pavel Begunkovfd9c7bc2021-04-13 02:58:42 +01005957 if (hrtimer_try_to_cancel(&io->timer) == -1)
Pavel Begunkovfbd15842020-11-30 19:11:15 +00005958 return ERR_PTR(-EALREADY);
5959 list_del_init(&req->timeout.list);
5960 return req;
5961}
5962
5963static int io_timeout_cancel(struct io_ring_ctx *ctx, __u64 user_data)
Pavel Begunkovec3c3d02021-08-18 10:50:52 +01005964 __must_hold(&ctx->completion_lock)
Jens Axboe89850fc2021-08-10 15:11:51 -06005965 __must_hold(&ctx->timeout_lock)
Pavel Begunkovfbd15842020-11-30 19:11:15 +00005966{
5967 struct io_kiocb *req = io_timeout_extract(ctx, user_data);
5968
5969 if (IS_ERR(req))
5970 return PTR_ERR(req);
5971
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01005972 req_set_fail(req);
Pavel Begunkovd4d19c12021-04-25 14:32:17 +01005973 io_cqring_fill_event(ctx, req->user_data, -ECANCELED, 0);
Pavel Begunkov91c2f692021-08-11 19:28:28 +01005974 io_put_req_deferred(req);
Pavel Begunkovfbd15842020-11-30 19:11:15 +00005975 return 0;
Jens Axboef254ac02020-08-12 17:33:30 -06005976}
5977
Jens Axboe50c1df22021-08-27 17:11:06 -06005978static clockid_t io_timeout_get_clock(struct io_timeout_data *data)
5979{
5980 switch (data->flags & IORING_TIMEOUT_CLOCK_MASK) {
5981 case IORING_TIMEOUT_BOOTTIME:
5982 return CLOCK_BOOTTIME;
5983 case IORING_TIMEOUT_REALTIME:
5984 return CLOCK_REALTIME;
5985 default:
5986 /* can't happen, vetted at prep time */
5987 WARN_ON_ONCE(1);
5988 fallthrough;
5989 case 0:
5990 return CLOCK_MONOTONIC;
5991 }
5992}
5993
Pavel Begunkovf1042b62021-08-28 19:54:39 -06005994static int io_linked_timeout_update(struct io_ring_ctx *ctx, __u64 user_data,
5995 struct timespec64 *ts, enum hrtimer_mode mode)
5996 __must_hold(&ctx->timeout_lock)
5997{
5998 struct io_timeout_data *io;
5999 struct io_kiocb *req;
6000 bool found = false;
6001
6002 list_for_each_entry(req, &ctx->ltimeout_list, timeout.list) {
6003 found = user_data == req->user_data;
6004 if (found)
6005 break;
6006 }
6007 if (!found)
6008 return -ENOENT;
6009
6010 io = req->async_data;
6011 if (hrtimer_try_to_cancel(&io->timer) == -1)
6012 return -EALREADY;
6013 hrtimer_init(&io->timer, io_timeout_get_clock(io), mode);
6014 io->timer.function = io_link_timeout_fn;
6015 hrtimer_start(&io->timer, timespec64_to_ktime(*ts), mode);
6016 return 0;
6017}
6018
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +00006019static int io_timeout_update(struct io_ring_ctx *ctx, __u64 user_data,
6020 struct timespec64 *ts, enum hrtimer_mode mode)
Jens Axboe89850fc2021-08-10 15:11:51 -06006021 __must_hold(&ctx->timeout_lock)
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +00006022{
6023 struct io_kiocb *req = io_timeout_extract(ctx, user_data);
6024 struct io_timeout_data *data;
6025
6026 if (IS_ERR(req))
6027 return PTR_ERR(req);
6028
6029 req->timeout.off = 0; /* noseq */
6030 data = req->async_data;
6031 list_add_tail(&req->timeout.list, &ctx->timeout_list);
Jens Axboe50c1df22021-08-27 17:11:06 -06006032 hrtimer_init(&data->timer, io_timeout_get_clock(data), mode);
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +00006033 data->timer.function = io_timeout_fn;
6034 hrtimer_start(&data->timer, timespec64_to_ktime(*ts), mode);
6035 return 0;
Jens Axboe47f46762019-11-09 17:43:02 -07006036}
6037
Jens Axboe3529d8c2019-12-19 18:24:38 -07006038static int io_timeout_remove_prep(struct io_kiocb *req,
6039 const struct io_uring_sqe *sqe)
Jens Axboeb29472e2019-12-17 18:50:29 -07006040{
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +00006041 struct io_timeout_rem *tr = &req->timeout_rem;
6042
Jens Axboeb29472e2019-12-17 18:50:29 -07006043 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
6044 return -EINVAL;
Daniele Albano61710e42020-07-18 14:15:16 -06006045 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
6046 return -EINVAL;
Pavel Begunkov26578cd2021-08-20 10:36:37 +01006047 if (sqe->ioprio || sqe->buf_index || sqe->len || sqe->splice_fd_in)
Jens Axboeb29472e2019-12-17 18:50:29 -07006048 return -EINVAL;
6049
Pavel Begunkovf1042b62021-08-28 19:54:39 -06006050 tr->ltimeout = false;
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +00006051 tr->addr = READ_ONCE(sqe->addr);
6052 tr->flags = READ_ONCE(sqe->timeout_flags);
Pavel Begunkovf1042b62021-08-28 19:54:39 -06006053 if (tr->flags & IORING_TIMEOUT_UPDATE_MASK) {
6054 if (hweight32(tr->flags & IORING_TIMEOUT_CLOCK_MASK) > 1)
6055 return -EINVAL;
6056 if (tr->flags & IORING_LINK_TIMEOUT_UPDATE)
6057 tr->ltimeout = true;
6058 if (tr->flags & ~(IORING_TIMEOUT_UPDATE_MASK|IORING_TIMEOUT_ABS))
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +00006059 return -EINVAL;
6060 if (get_timespec64(&tr->ts, u64_to_user_ptr(sqe->addr2)))
6061 return -EFAULT;
6062 } else if (tr->flags) {
6063 /* timeout removal doesn't support flags */
6064 return -EINVAL;
6065 }
6066
Jens Axboeb29472e2019-12-17 18:50:29 -07006067 return 0;
6068}
6069
Pavel Begunkov8662dae2021-01-19 13:32:44 +00006070static inline enum hrtimer_mode io_translate_timeout_mode(unsigned int flags)
6071{
6072 return (flags & IORING_TIMEOUT_ABS) ? HRTIMER_MODE_ABS
6073 : HRTIMER_MODE_REL;
6074}
6075
Jens Axboe11365042019-10-16 09:08:32 -06006076/*
6077 * Remove or update an existing timeout command
6078 */
Pavel Begunkov61e98202021-02-10 00:03:08 +00006079static int io_timeout_remove(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe11365042019-10-16 09:08:32 -06006080{
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +00006081 struct io_timeout_rem *tr = &req->timeout_rem;
Jens Axboe11365042019-10-16 09:08:32 -06006082 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe47f46762019-11-09 17:43:02 -07006083 int ret;
Jens Axboe11365042019-10-16 09:08:32 -06006084
Pavel Begunkovec3c3d02021-08-18 10:50:52 +01006085 if (!(req->timeout_rem.flags & IORING_TIMEOUT_UPDATE)) {
6086 spin_lock(&ctx->completion_lock);
6087 spin_lock_irq(&ctx->timeout_lock);
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +00006088 ret = io_timeout_cancel(ctx, tr->addr);
Pavel Begunkovec3c3d02021-08-18 10:50:52 +01006089 spin_unlock_irq(&ctx->timeout_lock);
6090 spin_unlock(&ctx->completion_lock);
6091 } else {
Pavel Begunkovf1042b62021-08-28 19:54:39 -06006092 enum hrtimer_mode mode = io_translate_timeout_mode(tr->flags);
6093
Pavel Begunkovec3c3d02021-08-18 10:50:52 +01006094 spin_lock_irq(&ctx->timeout_lock);
Pavel Begunkovf1042b62021-08-28 19:54:39 -06006095 if (tr->ltimeout)
6096 ret = io_linked_timeout_update(ctx, tr->addr, &tr->ts, mode);
6097 else
6098 ret = io_timeout_update(ctx, tr->addr, &tr->ts, mode);
Pavel Begunkovec3c3d02021-08-18 10:50:52 +01006099 spin_unlock_irq(&ctx->timeout_lock);
6100 }
Jens Axboe11365042019-10-16 09:08:32 -06006101
Jens Axboe4e88d6e2019-12-07 20:59:47 -07006102 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01006103 req_set_fail(req);
Pavel Begunkov505657b2021-08-17 20:28:09 +01006104 io_req_complete_post(req, ret, 0);
Jens Axboe11365042019-10-16 09:08:32 -06006105 return 0;
Jens Axboe5262f562019-09-17 12:26:57 -06006106}
6107
Jens Axboe3529d8c2019-12-19 18:24:38 -07006108static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
Jens Axboe2d283902019-12-04 11:08:05 -07006109 bool is_timeout_link)
Jens Axboe5262f562019-09-17 12:26:57 -06006110{
Jens Axboead8a48a2019-11-15 08:49:11 -07006111 struct io_timeout_data *data;
Jens Axboea41525a2019-10-15 16:48:15 -06006112 unsigned flags;
Pavel Begunkov56080b02020-05-26 20:34:04 +03006113 u32 off = READ_ONCE(sqe->off);
Jens Axboe5262f562019-09-17 12:26:57 -06006114
Jens Axboead8a48a2019-11-15 08:49:11 -07006115 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboe5262f562019-09-17 12:26:57 -06006116 return -EINVAL;
Pavel Begunkov26578cd2021-08-20 10:36:37 +01006117 if (sqe->ioprio || sqe->buf_index || sqe->len != 1 ||
6118 sqe->splice_fd_in)
Jens Axboea41525a2019-10-15 16:48:15 -06006119 return -EINVAL;
Pavel Begunkov56080b02020-05-26 20:34:04 +03006120 if (off && is_timeout_link)
Jens Axboe2d283902019-12-04 11:08:05 -07006121 return -EINVAL;
Jens Axboea41525a2019-10-15 16:48:15 -06006122 flags = READ_ONCE(sqe->timeout_flags);
Jens Axboe50c1df22021-08-27 17:11:06 -06006123 if (flags & ~(IORING_TIMEOUT_ABS | IORING_TIMEOUT_CLOCK_MASK))
6124 return -EINVAL;
6125 /* more than one clock specified is invalid, obviously */
6126 if (hweight32(flags & IORING_TIMEOUT_CLOCK_MASK) > 1)
Jens Axboe5262f562019-09-17 12:26:57 -06006127 return -EINVAL;
Arnd Bergmannbdf20072019-10-01 09:53:29 -06006128
Pavel Begunkovef9dd632021-08-28 19:54:38 -06006129 INIT_LIST_HEAD(&req->timeout.list);
Pavel Begunkovbfe68a22020-05-30 14:54:18 +03006130 req->timeout.off = off;
Pavel Begunkovf18ee4c2021-06-14 23:37:25 +01006131 if (unlikely(off && !req->ctx->off_timeout_used))
6132 req->ctx->off_timeout_used = true;
Jens Axboe26a61672019-12-20 09:02:01 -07006133
Jens Axboee8c2bc12020-08-15 18:44:09 -07006134 if (!req->async_data && io_alloc_async_data(req))
Jens Axboe26a61672019-12-20 09:02:01 -07006135 return -ENOMEM;
6136
Jens Axboee8c2bc12020-08-15 18:44:09 -07006137 data = req->async_data;
Jens Axboead8a48a2019-11-15 08:49:11 -07006138 data->req = req;
Jens Axboe50c1df22021-08-27 17:11:06 -06006139 data->flags = flags;
Jens Axboead8a48a2019-11-15 08:49:11 -07006140
6141 if (get_timespec64(&data->ts, u64_to_user_ptr(sqe->addr)))
Jens Axboe5262f562019-09-17 12:26:57 -06006142 return -EFAULT;
6143
Pavel Begunkov8662dae2021-01-19 13:32:44 +00006144 data->mode = io_translate_timeout_mode(flags);
Jens Axboe50c1df22021-08-27 17:11:06 -06006145 hrtimer_init(&data->timer, io_timeout_get_clock(data), data->mode);
Pavel Begunkovb97e7362021-08-15 10:40:23 +01006146
6147 if (is_timeout_link) {
6148 struct io_submit_link *link = &req->ctx->submit_state.link;
6149
6150 if (!link->head)
6151 return -EINVAL;
6152 if (link->last->opcode == IORING_OP_LINK_TIMEOUT)
6153 return -EINVAL;
Pavel Begunkov4d13d1a2021-08-15 10:40:24 +01006154 req->timeout.head = link->last;
6155 link->last->flags |= REQ_F_ARM_LTIMEOUT;
Pavel Begunkovb97e7362021-08-15 10:40:23 +01006156 }
Jens Axboead8a48a2019-11-15 08:49:11 -07006157 return 0;
6158}
6159
Pavel Begunkov61e98202021-02-10 00:03:08 +00006160static int io_timeout(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboead8a48a2019-11-15 08:49:11 -07006161{
Jens Axboead8a48a2019-11-15 08:49:11 -07006162 struct io_ring_ctx *ctx = req->ctx;
Jens Axboee8c2bc12020-08-15 18:44:09 -07006163 struct io_timeout_data *data = req->async_data;
Jens Axboead8a48a2019-11-15 08:49:11 -07006164 struct list_head *entry;
Pavel Begunkovbfe68a22020-05-30 14:54:18 +03006165 u32 tail, off = req->timeout.off;
Jens Axboead8a48a2019-11-15 08:49:11 -07006166
Jens Axboe89850fc2021-08-10 15:11:51 -06006167 spin_lock_irq(&ctx->timeout_lock);
Jens Axboe93bd25b2019-11-11 23:34:31 -07006168
Jens Axboe5262f562019-09-17 12:26:57 -06006169 /*
6170 * sqe->off holds how many events that need to occur for this
Jens Axboe93bd25b2019-11-11 23:34:31 -07006171 * timeout event to be satisfied. If it isn't set, then this is
6172 * a pure timeout request, sequence isn't used.
Jens Axboe5262f562019-09-17 12:26:57 -06006173 */
Pavel Begunkov8eb7e2d2020-06-29 13:13:02 +03006174 if (io_is_timeout_noseq(req)) {
Jens Axboe93bd25b2019-11-11 23:34:31 -07006175 entry = ctx->timeout_list.prev;
6176 goto add;
6177 }
Jens Axboe5262f562019-09-17 12:26:57 -06006178
Pavel Begunkovbfe68a22020-05-30 14:54:18 +03006179 tail = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
6180 req->timeout.target_seq = tail + off;
Jens Axboe5262f562019-09-17 12:26:57 -06006181
Marcelo Diop-Gonzalezf0105052021-01-15 11:54:40 -05006182 /* Update the last seq here in case io_flush_timeouts() hasn't.
6183 * This is safe because ->completion_lock is held, and submissions
6184 * and completions are never mixed in the same ->completion_lock section.
6185 */
6186 ctx->cq_last_tm_flush = tail;
6187
Jens Axboe5262f562019-09-17 12:26:57 -06006188 /*
6189 * Insertion sort, ensuring the first entry in the list is always
6190 * the one we need first.
6191 */
Jens Axboe5262f562019-09-17 12:26:57 -06006192 list_for_each_prev(entry, &ctx->timeout_list) {
Pavel Begunkov135fcde2020-07-13 23:37:12 +03006193 struct io_kiocb *nxt = list_entry(entry, struct io_kiocb,
6194 timeout.list);
Jens Axboe5262f562019-09-17 12:26:57 -06006195
Pavel Begunkov8eb7e2d2020-06-29 13:13:02 +03006196 if (io_is_timeout_noseq(nxt))
Jens Axboe93bd25b2019-11-11 23:34:31 -07006197 continue;
Pavel Begunkovbfe68a22020-05-30 14:54:18 +03006198 /* nxt.seq is behind @tail, otherwise would've been completed */
6199 if (off >= nxt->timeout.target_seq - tail)
Jens Axboe5262f562019-09-17 12:26:57 -06006200 break;
6201 }
Jens Axboe93bd25b2019-11-11 23:34:31 -07006202add:
Pavel Begunkov135fcde2020-07-13 23:37:12 +03006203 list_add(&req->timeout.list, entry);
Jens Axboead8a48a2019-11-15 08:49:11 -07006204 data->timer.function = io_timeout_fn;
6205 hrtimer_start(&data->timer, timespec64_to_ktime(data->ts), data->mode);
Jens Axboe89850fc2021-08-10 15:11:51 -06006206 spin_unlock_irq(&ctx->timeout_lock);
Jens Axboe5262f562019-09-17 12:26:57 -06006207 return 0;
6208}
6209
Pavel Begunkovf458dd842021-03-08 12:14:14 +00006210struct io_cancel_data {
6211 struct io_ring_ctx *ctx;
6212 u64 user_data;
6213};
6214
Jens Axboe62755e32019-10-28 21:49:21 -06006215static bool io_cancel_cb(struct io_wq_work *work, void *data)
Jens Axboede0617e2019-04-06 21:51:27 -06006216{
Jens Axboe62755e32019-10-28 21:49:21 -06006217 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
Pavel Begunkovf458dd842021-03-08 12:14:14 +00006218 struct io_cancel_data *cd = data;
Jens Axboede0617e2019-04-06 21:51:27 -06006219
Pavel Begunkovf458dd842021-03-08 12:14:14 +00006220 return req->ctx == cd->ctx && req->user_data == cd->user_data;
Jens Axboe62755e32019-10-28 21:49:21 -06006221}
6222
Pavel Begunkovf458dd842021-03-08 12:14:14 +00006223static int io_async_cancel_one(struct io_uring_task *tctx, u64 user_data,
6224 struct io_ring_ctx *ctx)
Jens Axboe62755e32019-10-28 21:49:21 -06006225{
Pavel Begunkovf458dd842021-03-08 12:14:14 +00006226 struct io_cancel_data data = { .ctx = ctx, .user_data = user_data, };
Jens Axboe62755e32019-10-28 21:49:21 -06006227 enum io_wq_cancel cancel_ret;
Jens Axboe62755e32019-10-28 21:49:21 -06006228 int ret = 0;
6229
Pavel Begunkovf458dd842021-03-08 12:14:14 +00006230 if (!tctx || !tctx->io_wq)
Jens Axboe5aa75ed2021-02-16 12:56:50 -07006231 return -ENOENT;
6232
Pavel Begunkovf458dd842021-03-08 12:14:14 +00006233 cancel_ret = io_wq_cancel_cb(tctx->io_wq, io_cancel_cb, &data, false);
Jens Axboe62755e32019-10-28 21:49:21 -06006234 switch (cancel_ret) {
6235 case IO_WQ_CANCEL_OK:
6236 ret = 0;
6237 break;
6238 case IO_WQ_CANCEL_RUNNING:
6239 ret = -EALREADY;
6240 break;
6241 case IO_WQ_CANCEL_NOTFOUND:
6242 ret = -ENOENT;
6243 break;
6244 }
6245
Jens Axboee977d6d2019-11-05 12:39:45 -07006246 return ret;
6247}
6248
Pavel Begunkov8cb01fa2021-08-15 10:40:22 +01006249static int io_try_cancel_userdata(struct io_kiocb *req, u64 sqe_addr)
Jens Axboe47f46762019-11-09 17:43:02 -07006250{
Pavel Begunkov8cb01fa2021-08-15 10:40:22 +01006251 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe47f46762019-11-09 17:43:02 -07006252 int ret;
6253
Pavel Begunkovdadebc32021-08-23 13:30:44 +01006254 WARN_ON_ONCE(!io_wq_current_is_worker() && req->task != current);
Pavel Begunkov8cb01fa2021-08-15 10:40:22 +01006255
Pavel Begunkovf458dd842021-03-08 12:14:14 +00006256 ret = io_async_cancel_one(req->task->io_uring, sqe_addr, ctx);
Pavel Begunkovdf9727a2021-04-01 15:43:59 +01006257 if (ret != -ENOENT)
Pavel Begunkov8cb01fa2021-08-15 10:40:22 +01006258 return ret;
Pavel Begunkov505657b2021-08-17 20:28:09 +01006259
6260 spin_lock(&ctx->completion_lock);
Jens Axboe79ebeae2021-08-10 15:18:27 -06006261 spin_lock_irq(&ctx->timeout_lock);
Jens Axboe47f46762019-11-09 17:43:02 -07006262 ret = io_timeout_cancel(ctx, sqe_addr);
Jens Axboe79ebeae2021-08-10 15:18:27 -06006263 spin_unlock_irq(&ctx->timeout_lock);
Jens Axboe47f46762019-11-09 17:43:02 -07006264 if (ret != -ENOENT)
Pavel Begunkov505657b2021-08-17 20:28:09 +01006265 goto out;
6266 ret = io_poll_cancel(ctx, sqe_addr, false);
6267out:
6268 spin_unlock(&ctx->completion_lock);
6269 return ret;
Jens Axboe47f46762019-11-09 17:43:02 -07006270}
6271
Jens Axboe3529d8c2019-12-19 18:24:38 -07006272static int io_async_cancel_prep(struct io_kiocb *req,
6273 const struct io_uring_sqe *sqe)
Jens Axboee977d6d2019-11-05 12:39:45 -07006274{
Jens Axboefbf23842019-12-17 18:45:56 -07006275 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboee977d6d2019-11-05 12:39:45 -07006276 return -EINVAL;
Daniele Albano61710e42020-07-18 14:15:16 -06006277 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
6278 return -EINVAL;
Pavel Begunkov26578cd2021-08-20 10:36:37 +01006279 if (sqe->ioprio || sqe->off || sqe->len || sqe->cancel_flags ||
6280 sqe->splice_fd_in)
Jens Axboee977d6d2019-11-05 12:39:45 -07006281 return -EINVAL;
6282
Jens Axboefbf23842019-12-17 18:45:56 -07006283 req->cancel.addr = READ_ONCE(sqe->addr);
6284 return 0;
6285}
6286
Pavel Begunkov61e98202021-02-10 00:03:08 +00006287static int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboefbf23842019-12-17 18:45:56 -07006288{
6289 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkov58f99372021-03-12 16:25:55 +00006290 u64 sqe_addr = req->cancel.addr;
6291 struct io_tctx_node *node;
6292 int ret;
Jens Axboefbf23842019-12-17 18:45:56 -07006293
Pavel Begunkov8cb01fa2021-08-15 10:40:22 +01006294 ret = io_try_cancel_userdata(req, sqe_addr);
Pavel Begunkov58f99372021-03-12 16:25:55 +00006295 if (ret != -ENOENT)
6296 goto done;
Pavel Begunkov58f99372021-03-12 16:25:55 +00006297
6298 /* slow path, try all io-wq's */
6299 io_ring_submit_lock(ctx, !(issue_flags & IO_URING_F_NONBLOCK));
6300 ret = -ENOENT;
6301 list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
6302 struct io_uring_task *tctx = node->task->io_uring;
6303
Pavel Begunkov58f99372021-03-12 16:25:55 +00006304 ret = io_async_cancel_one(tctx, req->cancel.addr, ctx);
6305 if (ret != -ENOENT)
6306 break;
6307 }
6308 io_ring_submit_unlock(ctx, !(issue_flags & IO_URING_F_NONBLOCK));
Pavel Begunkov58f99372021-03-12 16:25:55 +00006309done:
Pavel Begunkov58f99372021-03-12 16:25:55 +00006310 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01006311 req_set_fail(req);
Pavel Begunkov505657b2021-08-17 20:28:09 +01006312 io_req_complete_post(req, ret, 0);
Jens Axboe62755e32019-10-28 21:49:21 -06006313 return 0;
6314}
6315
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00006316static int io_rsrc_update_prep(struct io_kiocb *req,
Jens Axboe05f3fb32019-12-09 11:22:50 -07006317 const struct io_uring_sqe *sqe)
6318{
Daniele Albano61710e42020-07-18 14:15:16 -06006319 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
6320 return -EINVAL;
Pavel Begunkov26578cd2021-08-20 10:36:37 +01006321 if (sqe->ioprio || sqe->rw_flags || sqe->splice_fd_in)
Jens Axboe05f3fb32019-12-09 11:22:50 -07006322 return -EINVAL;
6323
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00006324 req->rsrc_update.offset = READ_ONCE(sqe->off);
6325 req->rsrc_update.nr_args = READ_ONCE(sqe->len);
6326 if (!req->rsrc_update.nr_args)
Jens Axboe05f3fb32019-12-09 11:22:50 -07006327 return -EINVAL;
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00006328 req->rsrc_update.arg = READ_ONCE(sqe->addr);
Jens Axboe05f3fb32019-12-09 11:22:50 -07006329 return 0;
6330}
6331
Pavel Begunkov889fca72021-02-10 00:03:09 +00006332static int io_files_update(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe05f3fb32019-12-09 11:22:50 -07006333{
6334 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkovc3bdad02021-04-25 14:32:22 +01006335 struct io_uring_rsrc_update2 up;
Jens Axboe05f3fb32019-12-09 11:22:50 -07006336 int ret;
6337
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006338 if (issue_flags & IO_URING_F_NONBLOCK)
Jens Axboe05f3fb32019-12-09 11:22:50 -07006339 return -EAGAIN;
Jens Axboe05f3fb32019-12-09 11:22:50 -07006340
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00006341 up.offset = req->rsrc_update.offset;
6342 up.data = req->rsrc_update.arg;
Pavel Begunkovc3bdad02021-04-25 14:32:22 +01006343 up.nr = 0;
6344 up.tags = 0;
Colin Ian King615cee42021-04-26 10:47:35 +01006345 up.resv = 0;
Jens Axboe05f3fb32019-12-09 11:22:50 -07006346
6347 mutex_lock(&ctx->uring_lock);
Pavel Begunkovfdecb662021-04-25 14:32:20 +01006348 ret = __io_register_rsrc_update(ctx, IORING_RSRC_FILE,
Pavel Begunkov98f0b3b2021-04-25 14:32:19 +01006349 &up, req->rsrc_update.nr_args);
Jens Axboe05f3fb32019-12-09 11:22:50 -07006350 mutex_unlock(&ctx->uring_lock);
6351
6352 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01006353 req_set_fail(req);
Pavel Begunkov889fca72021-02-10 00:03:09 +00006354 __io_req_complete(req, issue_flags, ret, 0);
Jens Axboe05f3fb32019-12-09 11:22:50 -07006355 return 0;
6356}
6357
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006358static int io_req_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboef67676d2019-12-02 11:03:47 -07006359{
Jens Axboed625c6e2019-12-17 19:53:05 -07006360 switch (req->opcode) {
Jens Axboee7815732019-12-17 19:45:06 -07006361 case IORING_OP_NOP:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006362 return 0;
Jens Axboef67676d2019-12-02 11:03:47 -07006363 case IORING_OP_READV:
6364 case IORING_OP_READ_FIXED:
Jens Axboe3a6820f2019-12-22 15:19:35 -07006365 case IORING_OP_READ:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006366 return io_read_prep(req, sqe);
Jens Axboef67676d2019-12-02 11:03:47 -07006367 case IORING_OP_WRITEV:
6368 case IORING_OP_WRITE_FIXED:
Jens Axboe3a6820f2019-12-22 15:19:35 -07006369 case IORING_OP_WRITE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006370 return io_write_prep(req, sqe);
Jens Axboe0969e782019-12-17 18:40:57 -07006371 case IORING_OP_POLL_ADD:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006372 return io_poll_add_prep(req, sqe);
Jens Axboe0969e782019-12-17 18:40:57 -07006373 case IORING_OP_POLL_REMOVE:
Pavel Begunkovc5de0032021-04-14 13:38:37 +01006374 return io_poll_update_prep(req, sqe);
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07006375 case IORING_OP_FSYNC:
Pavel Begunkov1155c762021-02-18 18:29:38 +00006376 return io_fsync_prep(req, sqe);
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07006377 case IORING_OP_SYNC_FILE_RANGE:
Pavel Begunkov1155c762021-02-18 18:29:38 +00006378 return io_sfr_prep(req, sqe);
Jens Axboe03b12302019-12-02 18:50:25 -07006379 case IORING_OP_SENDMSG:
Jens Axboefddafac2020-01-04 20:19:44 -07006380 case IORING_OP_SEND:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006381 return io_sendmsg_prep(req, sqe);
Jens Axboe03b12302019-12-02 18:50:25 -07006382 case IORING_OP_RECVMSG:
Jens Axboefddafac2020-01-04 20:19:44 -07006383 case IORING_OP_RECV:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006384 return io_recvmsg_prep(req, sqe);
Jens Axboef499a022019-12-02 16:28:46 -07006385 case IORING_OP_CONNECT:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006386 return io_connect_prep(req, sqe);
Jens Axboe2d283902019-12-04 11:08:05 -07006387 case IORING_OP_TIMEOUT:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006388 return io_timeout_prep(req, sqe, false);
Jens Axboeb29472e2019-12-17 18:50:29 -07006389 case IORING_OP_TIMEOUT_REMOVE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006390 return io_timeout_remove_prep(req, sqe);
Jens Axboefbf23842019-12-17 18:45:56 -07006391 case IORING_OP_ASYNC_CANCEL:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006392 return io_async_cancel_prep(req, sqe);
Jens Axboe2d283902019-12-04 11:08:05 -07006393 case IORING_OP_LINK_TIMEOUT:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006394 return io_timeout_prep(req, sqe, true);
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07006395 case IORING_OP_ACCEPT:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006396 return io_accept_prep(req, sqe);
Jens Axboed63d1b52019-12-10 10:38:56 -07006397 case IORING_OP_FALLOCATE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006398 return io_fallocate_prep(req, sqe);
Jens Axboe15b71ab2019-12-11 11:20:36 -07006399 case IORING_OP_OPENAT:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006400 return io_openat_prep(req, sqe);
Jens Axboeb5dba592019-12-11 14:02:38 -07006401 case IORING_OP_CLOSE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006402 return io_close_prep(req, sqe);
Jens Axboe05f3fb32019-12-09 11:22:50 -07006403 case IORING_OP_FILES_UPDATE:
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00006404 return io_rsrc_update_prep(req, sqe);
Jens Axboeeddc7ef2019-12-13 21:18:10 -07006405 case IORING_OP_STATX:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006406 return io_statx_prep(req, sqe);
Jens Axboe4840e412019-12-25 22:03:45 -07006407 case IORING_OP_FADVISE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006408 return io_fadvise_prep(req, sqe);
Jens Axboec1ca7572019-12-25 22:18:28 -07006409 case IORING_OP_MADVISE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006410 return io_madvise_prep(req, sqe);
Jens Axboecebdb982020-01-08 17:59:24 -07006411 case IORING_OP_OPENAT2:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006412 return io_openat2_prep(req, sqe);
Jens Axboe3e4827b2020-01-08 15:18:09 -07006413 case IORING_OP_EPOLL_CTL:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006414 return io_epoll_ctl_prep(req, sqe);
Pavel Begunkov7d67af22020-02-24 11:32:45 +03006415 case IORING_OP_SPLICE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006416 return io_splice_prep(req, sqe);
Jens Axboeddf0322d2020-02-23 16:41:33 -07006417 case IORING_OP_PROVIDE_BUFFERS:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006418 return io_provide_buffers_prep(req, sqe);
Jens Axboe067524e2020-03-02 16:32:28 -07006419 case IORING_OP_REMOVE_BUFFERS:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006420 return io_remove_buffers_prep(req, sqe);
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03006421 case IORING_OP_TEE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006422 return io_tee_prep(req, sqe);
Jens Axboe36f4fa62020-09-05 11:14:22 -06006423 case IORING_OP_SHUTDOWN:
6424 return io_shutdown_prep(req, sqe);
Jens Axboe80a261f2020-09-28 14:23:58 -06006425 case IORING_OP_RENAMEAT:
6426 return io_renameat_prep(req, sqe);
Jens Axboe14a11432020-09-28 14:27:37 -06006427 case IORING_OP_UNLINKAT:
6428 return io_unlinkat_prep(req, sqe);
Dmitry Kadasheve34a02d2021-07-08 13:34:45 +07006429 case IORING_OP_MKDIRAT:
6430 return io_mkdirat_prep(req, sqe);
Dmitry Kadashev7a8721f2021-07-08 13:34:46 +07006431 case IORING_OP_SYMLINKAT:
6432 return io_symlinkat_prep(req, sqe);
Dmitry Kadashevcf30da92021-07-08 13:34:47 +07006433 case IORING_OP_LINKAT:
6434 return io_linkat_prep(req, sqe);
Jens Axboef67676d2019-12-02 11:03:47 -07006435 }
6436
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006437 printk_once(KERN_WARNING "io_uring: unhandled opcode %d\n",
6438 req->opcode);
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01006439 return -EINVAL;
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006440}
6441
Pavel Begunkov93642ef2021-02-18 18:29:44 +00006442static int io_req_prep_async(struct io_kiocb *req)
Jens Axboedef596e2019-01-09 08:59:42 -07006443{
Pavel Begunkovb7e298d2021-02-28 22:35:19 +00006444 if (!io_op_defs[req->opcode].needs_async_setup)
6445 return 0;
6446 if (WARN_ON_ONCE(req->async_data))
6447 return -EFAULT;
6448 if (io_alloc_async_data(req))
6449 return -EAGAIN;
6450
Pavel Begunkov93642ef2021-02-18 18:29:44 +00006451 switch (req->opcode) {
6452 case IORING_OP_READV:
Pavel Begunkov93642ef2021-02-18 18:29:44 +00006453 return io_rw_prep_async(req, READ);
6454 case IORING_OP_WRITEV:
Pavel Begunkov93642ef2021-02-18 18:29:44 +00006455 return io_rw_prep_async(req, WRITE);
6456 case IORING_OP_SENDMSG:
Pavel Begunkov93642ef2021-02-18 18:29:44 +00006457 return io_sendmsg_prep_async(req);
6458 case IORING_OP_RECVMSG:
Pavel Begunkov93642ef2021-02-18 18:29:44 +00006459 return io_recvmsg_prep_async(req);
6460 case IORING_OP_CONNECT:
6461 return io_connect_prep_async(req);
6462 }
Pavel Begunkovb7e298d2021-02-28 22:35:19 +00006463 printk_once(KERN_WARNING "io_uring: prep_async() bad opcode %d\n",
6464 req->opcode);
6465 return -EFAULT;
Jens Axboedef596e2019-01-09 08:59:42 -07006466}
6467
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03006468static u32 io_get_sequence(struct io_kiocb *req)
6469{
Pavel Begunkova3dbdf52021-06-17 18:14:05 +01006470 u32 seq = req->ctx->cached_sq_head;
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03006471
Pavel Begunkova3dbdf52021-06-17 18:14:05 +01006472 /* need original cached_sq_head, but it was increased for each req */
6473 io_for_each_link(req, req)
6474 seq--;
6475 return seq;
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03006476}
6477
Pavel Begunkov76cc33d2021-06-14 23:37:30 +01006478static bool io_drain_req(struct io_kiocb *req)
Jens Axboedef596e2019-01-09 08:59:42 -07006479{
Pavel Begunkov3c199662021-06-15 16:47:57 +01006480 struct io_kiocb *pos;
Jens Axboedef596e2019-01-09 08:59:42 -07006481 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkov27dc8332020-07-13 23:37:14 +03006482 struct io_defer_entry *de;
Jens Axboedef596e2019-01-09 08:59:42 -07006483 int ret;
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03006484 u32 seq;
Jens Axboedef596e2019-01-09 08:59:42 -07006485
Pavel Begunkovb8ce1b92021-08-31 14:13:11 +01006486 if (req->flags & REQ_F_FAIL) {
6487 io_req_complete_fail_submit(req);
6488 return true;
6489 }
6490
Pavel Begunkov3c199662021-06-15 16:47:57 +01006491 /*
6492 * If we need to drain a request in the middle of a link, drain the
6493 * head request and the next request/link after the current link.
6494 * Considering sequential execution of links, IOSQE_IO_DRAIN will be
6495 * maintained for every request of our link.
6496 */
6497 if (ctx->drain_next) {
6498 req->flags |= REQ_F_IO_DRAIN;
6499 ctx->drain_next = false;
6500 }
6501 /* not interested in head, start from the first linked */
6502 io_for_each_link(pos, req->link) {
6503 if (pos->flags & REQ_F_IO_DRAIN) {
6504 ctx->drain_next = true;
6505 req->flags |= REQ_F_IO_DRAIN;
6506 break;
6507 }
6508 }
6509
Jens Axboedef596e2019-01-09 08:59:42 -07006510 /* Still need defer if there is pending req in defer list. */
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03006511 if (likely(list_empty_careful(&ctx->defer_list) &&
Pavel Begunkov10c66902021-06-15 16:47:56 +01006512 !(req->flags & REQ_F_IO_DRAIN))) {
6513 ctx->drain_active = false;
Pavel Begunkov76cc33d2021-06-14 23:37:30 +01006514 return false;
Pavel Begunkov10c66902021-06-15 16:47:56 +01006515 }
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03006516
6517 seq = io_get_sequence(req);
6518 /* Still a chance to pass the sequence check */
6519 if (!req_need_defer(req, seq) && list_empty_careful(&ctx->defer_list))
Pavel Begunkov76cc33d2021-06-14 23:37:30 +01006520 return false;
Jens Axboedef596e2019-01-09 08:59:42 -07006521
Pavel Begunkovb7e298d2021-02-28 22:35:19 +00006522 ret = io_req_prep_async(req);
Pavel Begunkovbe7053b2021-02-18 18:29:45 +00006523 if (ret)
Pavel Begunkov1b487732021-07-11 22:41:13 +01006524 goto fail;
Pavel Begunkovcbdcb432020-06-29 19:18:43 +03006525 io_prep_async_link(req);
Pavel Begunkov27dc8332020-07-13 23:37:14 +03006526 de = kmalloc(sizeof(*de), GFP_KERNEL);
Pavel Begunkov76cc33d2021-06-14 23:37:30 +01006527 if (!de) {
Pavel Begunkov1b487732021-07-11 22:41:13 +01006528 ret = -ENOMEM;
6529fail:
6530 io_req_complete_failed(req, ret);
Pavel Begunkov76cc33d2021-06-14 23:37:30 +01006531 return true;
6532 }
Jens Axboe31b51512019-01-18 22:56:34 -07006533
Jens Axboe79ebeae2021-08-10 15:18:27 -06006534 spin_lock(&ctx->completion_lock);
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03006535 if (!req_need_defer(req, seq) && list_empty(&ctx->defer_list)) {
Jens Axboe79ebeae2021-08-10 15:18:27 -06006536 spin_unlock(&ctx->completion_lock);
Pavel Begunkov27dc8332020-07-13 23:37:14 +03006537 kfree(de);
Pavel Begunkovf237c302021-08-18 12:42:46 +01006538 io_queue_async_work(req, NULL);
Pavel Begunkov76cc33d2021-06-14 23:37:30 +01006539 return true;
Jens Axboe31b51512019-01-18 22:56:34 -07006540 }
6541
6542 trace_io_uring_defer(ctx, req, req->user_data);
Pavel Begunkov27dc8332020-07-13 23:37:14 +03006543 de->req = req;
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03006544 de->seq = seq;
Pavel Begunkov27dc8332020-07-13 23:37:14 +03006545 list_add_tail(&de->list, &ctx->defer_list);
Jens Axboe79ebeae2021-08-10 15:18:27 -06006546 spin_unlock(&ctx->completion_lock);
Pavel Begunkov76cc33d2021-06-14 23:37:30 +01006547 return true;
Jens Axboe31b51512019-01-18 22:56:34 -07006548}
6549
Pavel Begunkov68fb8972021-03-19 17:22:41 +00006550static void io_clean_op(struct io_kiocb *req)
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03006551{
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03006552 if (req->flags & REQ_F_BUFFER_SELECTED) {
6553 switch (req->opcode) {
6554 case IORING_OP_READV:
6555 case IORING_OP_READ_FIXED:
6556 case IORING_OP_READ:
Jens Axboebcda7ba2020-02-23 16:42:51 -07006557 kfree((void *)(unsigned long)req->rw.addr);
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03006558 break;
6559 case IORING_OP_RECVMSG:
6560 case IORING_OP_RECV:
Jens Axboe52de1fe2020-02-27 10:15:42 -07006561 kfree(req->sr_msg.kbuf);
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03006562 break;
6563 }
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03006564 }
6565
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03006566 if (req->flags & REQ_F_NEED_CLEANUP) {
6567 switch (req->opcode) {
6568 case IORING_OP_READV:
6569 case IORING_OP_READ_FIXED:
6570 case IORING_OP_READ:
6571 case IORING_OP_WRITEV:
6572 case IORING_OP_WRITE_FIXED:
Jens Axboee8c2bc12020-08-15 18:44:09 -07006573 case IORING_OP_WRITE: {
6574 struct io_async_rw *io = req->async_data;
Pavel Begunkov1dacb4d2021-06-17 18:14:03 +01006575
6576 kfree(io->free_iovec);
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03006577 break;
Jens Axboee8c2bc12020-08-15 18:44:09 -07006578 }
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03006579 case IORING_OP_RECVMSG:
Jens Axboee8c2bc12020-08-15 18:44:09 -07006580 case IORING_OP_SENDMSG: {
6581 struct io_async_msghdr *io = req->async_data;
Pavel Begunkov257e84a2021-02-05 00:58:00 +00006582
6583 kfree(io->free_iov);
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03006584 break;
Jens Axboee8c2bc12020-08-15 18:44:09 -07006585 }
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03006586 case IORING_OP_SPLICE:
6587 case IORING_OP_TEE:
Pavel Begunkove1d767f2021-03-19 17:22:43 +00006588 if (!(req->splice.flags & SPLICE_F_FD_IN_FIXED))
6589 io_put_file(req->splice.file_in);
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03006590 break;
Jens Axboef3cd48502020-09-24 14:55:54 -06006591 case IORING_OP_OPENAT:
6592 case IORING_OP_OPENAT2:
6593 if (req->open.filename)
6594 putname(req->open.filename);
6595 break;
Jens Axboe80a261f2020-09-28 14:23:58 -06006596 case IORING_OP_RENAMEAT:
6597 putname(req->rename.oldpath);
6598 putname(req->rename.newpath);
6599 break;
Jens Axboe14a11432020-09-28 14:27:37 -06006600 case IORING_OP_UNLINKAT:
6601 putname(req->unlink.filename);
6602 break;
Dmitry Kadasheve34a02d2021-07-08 13:34:45 +07006603 case IORING_OP_MKDIRAT:
6604 putname(req->mkdir.filename);
6605 break;
Dmitry Kadashev7a8721f2021-07-08 13:34:46 +07006606 case IORING_OP_SYMLINKAT:
6607 putname(req->symlink.oldpath);
6608 putname(req->symlink.newpath);
6609 break;
Dmitry Kadashevcf30da92021-07-08 13:34:47 +07006610 case IORING_OP_LINKAT:
6611 putname(req->hardlink.oldpath);
6612 putname(req->hardlink.newpath);
6613 break;
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03006614 }
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03006615 }
Jens Axboe75652a302021-04-15 09:52:40 -06006616 if ((req->flags & REQ_F_POLLED) && req->apoll) {
6617 kfree(req->apoll->double_poll);
6618 kfree(req->apoll);
6619 req->apoll = NULL;
6620 }
Pavel Begunkov3a0a6902021-04-20 12:03:31 +01006621 if (req->flags & REQ_F_INFLIGHT) {
6622 struct io_uring_task *tctx = req->task->io_uring;
6623
6624 atomic_dec(&tctx->inflight_tracked);
Pavel Begunkov3a0a6902021-04-20 12:03:31 +01006625 }
Pavel Begunkovc8543572021-06-17 18:14:04 +01006626 if (req->flags & REQ_F_CREDS)
Pavel Begunkovb8e64b52021-06-17 18:14:02 +01006627 put_cred(req->creds);
Pavel Begunkovc8543572021-06-17 18:14:04 +01006628
6629 req->flags &= ~IO_REQ_CLEAN_FLAGS;
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03006630}
6631
Pavel Begunkov889fca72021-02-10 00:03:09 +00006632static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboeedafcce2019-01-09 09:16:05 -07006633{
Jens Axboeedafcce2019-01-09 09:16:05 -07006634 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe5730b272021-02-27 15:57:30 -07006635 const struct cred *creds = NULL;
Jens Axboed625c6e2019-12-17 19:53:05 -07006636 int ret;
Jens Axboeedafcce2019-01-09 09:16:05 -07006637
Pavel Begunkovb8e64b52021-06-17 18:14:02 +01006638 if ((req->flags & REQ_F_CREDS) && req->creds != current_cred())
Pavel Begunkovc10d1f92021-06-17 18:14:01 +01006639 creds = override_creds(req->creds);
Jens Axboe5730b272021-02-27 15:57:30 -07006640
Jens Axboed625c6e2019-12-17 19:53:05 -07006641 switch (req->opcode) {
Jens Axboe2b188cc2019-01-07 10:46:33 -07006642 case IORING_OP_NOP:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006643 ret = io_nop(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006644 break;
Jens Axboe2b188cc2019-01-07 10:46:33 -07006645 case IORING_OP_READV:
Jens Axboe3529d8c2019-12-19 18:24:38 -07006646 case IORING_OP_READ_FIXED:
Jens Axboe3a6820f2019-12-22 15:19:35 -07006647 case IORING_OP_READ:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006648 ret = io_read(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006649 break;
6650 case IORING_OP_WRITEV:
Jens Axboe2b188cc2019-01-07 10:46:33 -07006651 case IORING_OP_WRITE_FIXED:
Jens Axboe3a6820f2019-12-22 15:19:35 -07006652 case IORING_OP_WRITE:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006653 ret = io_write(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006654 break;
6655 case IORING_OP_FSYNC:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006656 ret = io_fsync(req, issue_flags);
Jackie Liuba5290c2019-10-09 09:19:59 +08006657 break;
Jens Axboe2b188cc2019-01-07 10:46:33 -07006658 case IORING_OP_POLL_ADD:
Pavel Begunkov61e98202021-02-10 00:03:08 +00006659 ret = io_poll_add(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006660 break;
6661 case IORING_OP_POLL_REMOVE:
Pavel Begunkovc5de0032021-04-14 13:38:37 +01006662 ret = io_poll_update(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006663 break;
Jens Axboeb76da702019-11-20 13:05:32 -07006664 case IORING_OP_SYNC_FILE_RANGE:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006665 ret = io_sync_file_range(req, issue_flags);
Jens Axboeb76da702019-11-20 13:05:32 -07006666 break;
6667 case IORING_OP_SENDMSG:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006668 ret = io_sendmsg(req, issue_flags);
Pavel Begunkov062d04d2020-10-10 18:34:12 +01006669 break;
Jens Axboefddafac2020-01-04 20:19:44 -07006670 case IORING_OP_SEND:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006671 ret = io_send(req, issue_flags);
Jens Axboeb76da702019-11-20 13:05:32 -07006672 break;
6673 case IORING_OP_RECVMSG:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006674 ret = io_recvmsg(req, issue_flags);
Pavel Begunkov062d04d2020-10-10 18:34:12 +01006675 break;
Jens Axboefddafac2020-01-04 20:19:44 -07006676 case IORING_OP_RECV:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006677 ret = io_recv(req, issue_flags);
Jens Axboeb76da702019-11-20 13:05:32 -07006678 break;
Jens Axboe561fb042019-10-24 07:25:42 -06006679 case IORING_OP_TIMEOUT:
Pavel Begunkov61e98202021-02-10 00:03:08 +00006680 ret = io_timeout(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006681 break;
6682 case IORING_OP_TIMEOUT_REMOVE:
Pavel Begunkov61e98202021-02-10 00:03:08 +00006683 ret = io_timeout_remove(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006684 break;
6685 case IORING_OP_ACCEPT:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006686 ret = io_accept(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006687 break;
6688 case IORING_OP_CONNECT:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006689 ret = io_connect(req, issue_flags);
Jens Axboe31b51512019-01-18 22:56:34 -07006690 break;
6691 case IORING_OP_ASYNC_CANCEL:
Pavel Begunkov61e98202021-02-10 00:03:08 +00006692 ret = io_async_cancel(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006693 break;
Jens Axboed63d1b52019-12-10 10:38:56 -07006694 case IORING_OP_FALLOCATE:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006695 ret = io_fallocate(req, issue_flags);
Jens Axboed63d1b52019-12-10 10:38:56 -07006696 break;
Jens Axboe15b71ab2019-12-11 11:20:36 -07006697 case IORING_OP_OPENAT:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006698 ret = io_openat(req, issue_flags);
Jens Axboe15b71ab2019-12-11 11:20:36 -07006699 break;
Jens Axboeb5dba592019-12-11 14:02:38 -07006700 case IORING_OP_CLOSE:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006701 ret = io_close(req, issue_flags);
Jens Axboeb5dba592019-12-11 14:02:38 -07006702 break;
Jens Axboe05f3fb32019-12-09 11:22:50 -07006703 case IORING_OP_FILES_UPDATE:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006704 ret = io_files_update(req, issue_flags);
Jens Axboe05f3fb32019-12-09 11:22:50 -07006705 break;
Jens Axboeeddc7ef2019-12-13 21:18:10 -07006706 case IORING_OP_STATX:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006707 ret = io_statx(req, issue_flags);
Jens Axboeeddc7ef2019-12-13 21:18:10 -07006708 break;
Jens Axboe4840e412019-12-25 22:03:45 -07006709 case IORING_OP_FADVISE:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006710 ret = io_fadvise(req, issue_flags);
Jens Axboe4840e412019-12-25 22:03:45 -07006711 break;
Jens Axboec1ca7572019-12-25 22:18:28 -07006712 case IORING_OP_MADVISE:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006713 ret = io_madvise(req, issue_flags);
Jens Axboec1ca7572019-12-25 22:18:28 -07006714 break;
Jens Axboecebdb982020-01-08 17:59:24 -07006715 case IORING_OP_OPENAT2:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006716 ret = io_openat2(req, issue_flags);
Jens Axboecebdb982020-01-08 17:59:24 -07006717 break;
Jens Axboe3e4827b2020-01-08 15:18:09 -07006718 case IORING_OP_EPOLL_CTL:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006719 ret = io_epoll_ctl(req, issue_flags);
Jens Axboe3e4827b2020-01-08 15:18:09 -07006720 break;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03006721 case IORING_OP_SPLICE:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006722 ret = io_splice(req, issue_flags);
Pavel Begunkov7d67af22020-02-24 11:32:45 +03006723 break;
Jens Axboeddf0322d2020-02-23 16:41:33 -07006724 case IORING_OP_PROVIDE_BUFFERS:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006725 ret = io_provide_buffers(req, issue_flags);
Jens Axboeddf0322d2020-02-23 16:41:33 -07006726 break;
Jens Axboe067524e2020-03-02 16:32:28 -07006727 case IORING_OP_REMOVE_BUFFERS:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006728 ret = io_remove_buffers(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006729 break;
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03006730 case IORING_OP_TEE:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006731 ret = io_tee(req, issue_flags);
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03006732 break;
Jens Axboe36f4fa62020-09-05 11:14:22 -06006733 case IORING_OP_SHUTDOWN:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006734 ret = io_shutdown(req, issue_flags);
Jens Axboe36f4fa62020-09-05 11:14:22 -06006735 break;
Jens Axboe80a261f2020-09-28 14:23:58 -06006736 case IORING_OP_RENAMEAT:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006737 ret = io_renameat(req, issue_flags);
Jens Axboe80a261f2020-09-28 14:23:58 -06006738 break;
Jens Axboe14a11432020-09-28 14:27:37 -06006739 case IORING_OP_UNLINKAT:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006740 ret = io_unlinkat(req, issue_flags);
Jens Axboe14a11432020-09-28 14:27:37 -06006741 break;
Dmitry Kadasheve34a02d2021-07-08 13:34:45 +07006742 case IORING_OP_MKDIRAT:
6743 ret = io_mkdirat(req, issue_flags);
6744 break;
Dmitry Kadashev7a8721f2021-07-08 13:34:46 +07006745 case IORING_OP_SYMLINKAT:
6746 ret = io_symlinkat(req, issue_flags);
6747 break;
Dmitry Kadashevcf30da92021-07-08 13:34:47 +07006748 case IORING_OP_LINKAT:
6749 ret = io_linkat(req, issue_flags);
6750 break;
Jens Axboe2b188cc2019-01-07 10:46:33 -07006751 default:
6752 ret = -EINVAL;
6753 break;
6754 }
Jens Axboe31b51512019-01-18 22:56:34 -07006755
Jens Axboe5730b272021-02-27 15:57:30 -07006756 if (creds)
6757 revert_creds(creds);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006758 if (ret)
6759 return ret;
Jens Axboeb5325762020-05-19 21:20:27 -06006760 /* If the op doesn't have a file, we're not polling for it */
Pavel Begunkovcb3d8972021-06-14 02:36:14 +01006761 if ((ctx->flags & IORING_SETUP_IOPOLL) && req->file)
6762 io_iopoll_req_issued(req);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006763
6764 return 0;
6765}
6766
Pavel Begunkovebc11b62021-08-09 13:04:05 +01006767static struct io_wq_work *io_wq_free_work(struct io_wq_work *work)
6768{
6769 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
6770
6771 req = io_put_req_find_next(req);
6772 return req ? &req->work : NULL;
6773}
6774
Pavel Begunkov5280f7e2021-02-04 13:52:08 +00006775static void io_wq_submit_work(struct io_wq_work *work)
Pavel Begunkovd4c81f32020-06-08 21:08:19 +03006776{
Jens Axboe2b188cc2019-01-07 10:46:33 -07006777 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
Pavel Begunkov6df1db62020-07-03 22:15:06 +03006778 struct io_kiocb *timeout;
Jens Axboe561fb042019-10-24 07:25:42 -06006779 int ret = 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07006780
Pavel Begunkov48dcd382021-08-15 10:40:18 +01006781 /* one will be dropped by ->io_free_work() after returning to io-wq */
6782 if (!(req->flags & REQ_F_REFCOUNT))
6783 __io_req_set_refcount(req, 2);
6784 else
6785 req_ref_get(req);
Pavel Begunkov5d5901a2021-08-11 19:28:29 +01006786
Pavel Begunkov6df1db62020-07-03 22:15:06 +03006787 timeout = io_prep_linked_timeout(req);
6788 if (timeout)
6789 io_queue_linked_timeout(timeout);
Pavel Begunkovd4c81f32020-06-08 21:08:19 +03006790
Pavel Begunkovdadebc32021-08-23 13:30:44 +01006791 /* either cancelled or io-wq is dying, so don't touch tctx->iowq */
Jens Axboe4014d942021-01-19 15:53:54 -07006792 if (work->flags & IO_WQ_WORK_CANCEL)
Jens Axboe561fb042019-10-24 07:25:42 -06006793 ret = -ECANCELED;
Jens Axboe31b51512019-01-18 22:56:34 -07006794
Jens Axboe561fb042019-10-24 07:25:42 -06006795 if (!ret) {
Jens Axboe561fb042019-10-24 07:25:42 -06006796 do {
Pavel Begunkov889fca72021-02-10 00:03:09 +00006797 ret = io_issue_sqe(req, 0);
Jens Axboe561fb042019-10-24 07:25:42 -06006798 /*
6799 * We can get EAGAIN for polled IO even though we're
6800 * forcing a sync submission from here, since we can't
6801 * wait for request slots on the block side.
6802 */
6803 if (ret != -EAGAIN)
6804 break;
6805 cond_resched();
6806 } while (1);
6807 }
Jens Axboe31b51512019-01-18 22:56:34 -07006808
Pavel Begunkova3df76982021-02-18 22:32:52 +00006809 /* avoid locking problems by failing it from a clean context */
Pavel Begunkov5d5901a2021-08-11 19:28:29 +01006810 if (ret)
Pavel Begunkova3df76982021-02-18 22:32:52 +00006811 io_req_task_queue_fail(req, ret);
Jens Axboe31b51512019-01-18 22:56:34 -07006812}
Jens Axboe2b188cc2019-01-07 10:46:33 -07006813
Pavel Begunkovaeca2412021-04-11 01:46:37 +01006814static inline struct io_fixed_file *io_fixed_file_slot(struct io_file_table *table,
Pavel Begunkov042b0d82021-08-09 13:04:01 +01006815 unsigned i)
Jens Axboe09bb8392019-03-13 12:39:28 -06006816{
Pavel Begunkov042b0d82021-08-09 13:04:01 +01006817 return &table->files[i];
Pavel Begunkovdafecf12021-02-28 22:35:11 +00006818}
6819
Jens Axboe09bb8392019-03-13 12:39:28 -06006820static inline struct file *io_file_from_index(struct io_ring_ctx *ctx,
6821 int index)
6822{
Pavel Begunkovaeca2412021-04-11 01:46:37 +01006823 struct io_fixed_file *slot = io_fixed_file_slot(&ctx->file_table, index);
Jens Axboe65e19f52019-10-26 07:20:21 -06006824
Pavel Begunkova04b0ac2021-04-01 15:44:04 +01006825 return (struct file *) (slot->file_ptr & FFS_MASK);
Jens Axboe65e19f52019-10-26 07:20:21 -06006826}
6827
Pavel Begunkova04b0ac2021-04-01 15:44:04 +01006828static void io_fixed_file_set(struct io_fixed_file *file_slot, struct file *file)
Pavel Begunkov9a321c92021-04-01 15:44:01 +01006829{
6830 unsigned long file_ptr = (unsigned long) file;
6831
Pavel Begunkovb191e2d2021-08-09 13:04:03 +01006832 if (__io_file_supports_nowait(file, READ))
Pavel Begunkov9a321c92021-04-01 15:44:01 +01006833 file_ptr |= FFS_ASYNC_READ;
Pavel Begunkovb191e2d2021-08-09 13:04:03 +01006834 if (__io_file_supports_nowait(file, WRITE))
Pavel Begunkov9a321c92021-04-01 15:44:01 +01006835 file_ptr |= FFS_ASYNC_WRITE;
6836 if (S_ISREG(file_inode(file)->i_mode))
6837 file_ptr |= FFS_ISREG;
Pavel Begunkova04b0ac2021-04-01 15:44:04 +01006838 file_slot->file_ptr = file_ptr;
Jens Axboe09bb8392019-03-13 12:39:28 -06006839}
6840
Pavel Begunkovac177052021-08-09 13:04:02 +01006841static inline struct file *io_file_get_fixed(struct io_ring_ctx *ctx,
6842 struct io_kiocb *req, int fd)
Pavel Begunkov8da11c12020-02-24 11:32:44 +03006843{
Pavel Begunkov8da11c12020-02-24 11:32:44 +03006844 struct file *file;
Pavel Begunkovac177052021-08-09 13:04:02 +01006845 unsigned long file_ptr;
Pavel Begunkov8da11c12020-02-24 11:32:44 +03006846
Pavel Begunkovac177052021-08-09 13:04:02 +01006847 if (unlikely((unsigned int)fd >= ctx->nr_user_files))
6848 return NULL;
6849 fd = array_index_nospec(fd, ctx->nr_user_files);
6850 file_ptr = io_fixed_file_slot(&ctx->file_table, fd)->file_ptr;
6851 file = (struct file *) (file_ptr & FFS_MASK);
6852 file_ptr &= ~FFS_MASK;
6853 /* mask in overlapping REQ_F and FFS bits */
Pavel Begunkovb191e2d2021-08-09 13:04:03 +01006854 req->flags |= (file_ptr << REQ_F_NOWAIT_READ_BIT);
Pavel Begunkovac177052021-08-09 13:04:02 +01006855 io_req_set_rsrc_node(req);
Pavel Begunkov8371adf2020-10-10 18:34:08 +01006856 return file;
Pavel Begunkov8da11c12020-02-24 11:32:44 +03006857}
6858
Pavel Begunkovac177052021-08-09 13:04:02 +01006859static struct file *io_file_get_normal(struct io_ring_ctx *ctx,
Pavel Begunkovac177052021-08-09 13:04:02 +01006860 struct io_kiocb *req, int fd)
6861{
Pavel Begunkov62906e82021-08-10 14:52:47 +01006862 struct file *file = fget(fd);
Pavel Begunkovac177052021-08-09 13:04:02 +01006863
6864 trace_io_uring_file_get(ctx, fd);
6865
6866 /* we don't allow fixed io_uring files */
6867 if (file && unlikely(file->f_op == &io_uring_fops))
6868 io_req_track_inflight(req);
6869 return file;
6870}
6871
6872static inline struct file *io_file_get(struct io_ring_ctx *ctx,
Pavel Begunkovac177052021-08-09 13:04:02 +01006873 struct io_kiocb *req, int fd, bool fixed)
6874{
6875 if (fixed)
6876 return io_file_get_fixed(ctx, req, fd);
6877 else
Pavel Begunkov62906e82021-08-10 14:52:47 +01006878 return io_file_get_normal(ctx, req, fd);
Pavel Begunkovac177052021-08-09 13:04:02 +01006879}
6880
Pavel Begunkovf237c302021-08-18 12:42:46 +01006881static void io_req_task_link_timeout(struct io_kiocb *req, bool *locked)
Jens Axboe89b263f2021-08-10 15:14:18 -06006882{
6883 struct io_kiocb *prev = req->timeout.prev;
Pavel Begunkov8cb01fa2021-08-15 10:40:22 +01006884 int ret;
Jens Axboe89b263f2021-08-10 15:14:18 -06006885
6886 if (prev) {
Pavel Begunkov8cb01fa2021-08-15 10:40:22 +01006887 ret = io_try_cancel_userdata(req, prev->user_data);
Pavel Begunkov505657b2021-08-17 20:28:09 +01006888 io_req_complete_post(req, ret ?: -ETIME, 0);
Jens Axboe89b263f2021-08-10 15:14:18 -06006889 io_put_req(prev);
Jens Axboe89b263f2021-08-10 15:14:18 -06006890 } else {
6891 io_req_complete_post(req, -ETIME, 0);
6892 }
6893}
6894
Jens Axboe2665abf2019-11-05 12:40:47 -07006895static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
6896{
Jens Axboead8a48a2019-11-15 08:49:11 -07006897 struct io_timeout_data *data = container_of(timer,
6898 struct io_timeout_data, timer);
Pavel Begunkov90cd7e42020-10-27 23:25:36 +00006899 struct io_kiocb *prev, *req = data->req;
Jens Axboe2665abf2019-11-05 12:40:47 -07006900 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe2665abf2019-11-05 12:40:47 -07006901 unsigned long flags;
Jens Axboe2665abf2019-11-05 12:40:47 -07006902
Jens Axboe89b263f2021-08-10 15:14:18 -06006903 spin_lock_irqsave(&ctx->timeout_lock, flags);
Pavel Begunkov90cd7e42020-10-27 23:25:36 +00006904 prev = req->timeout.head;
6905 req->timeout.head = NULL;
Jens Axboe2665abf2019-11-05 12:40:47 -07006906
6907 /*
6908 * We don't expect the list to be empty, that will only happen if we
6909 * race with the completion of the linked work.
6910 */
Pavel Begunkov447c19f2021-05-14 12:02:50 +01006911 if (prev) {
Pavel Begunkovf2f87372020-10-27 23:25:37 +00006912 io_remove_next_linked(prev);
Pavel Begunkov447c19f2021-05-14 12:02:50 +01006913 if (!req_ref_inc_not_zero(prev))
6914 prev = NULL;
6915 }
Pavel Begunkovef9dd632021-08-28 19:54:38 -06006916 list_del(&req->timeout.list);
Jens Axboe89b263f2021-08-10 15:14:18 -06006917 req->timeout.prev = prev;
6918 spin_unlock_irqrestore(&ctx->timeout_lock, flags);
Jens Axboe2665abf2019-11-05 12:40:47 -07006919
Jens Axboe89b263f2021-08-10 15:14:18 -06006920 req->io_task_work.func = io_req_task_link_timeout;
6921 io_req_task_work_add(req);
Jens Axboe2665abf2019-11-05 12:40:47 -07006922 return HRTIMER_NORESTART;
6923}
6924
Pavel Begunkovde968c12021-03-19 17:22:33 +00006925static void io_queue_linked_timeout(struct io_kiocb *req)
Jens Axboe2665abf2019-11-05 12:40:47 -07006926{
Pavel Begunkovde968c12021-03-19 17:22:33 +00006927 struct io_ring_ctx *ctx = req->ctx;
6928
Jens Axboe89b263f2021-08-10 15:14:18 -06006929 spin_lock_irq(&ctx->timeout_lock);
Jens Axboe76a46e02019-11-10 23:34:16 -07006930 /*
Pavel Begunkovf2f87372020-10-27 23:25:37 +00006931 * If the back reference is NULL, then our linked request finished
6932 * before we got a chance to setup the timer
Jens Axboe76a46e02019-11-10 23:34:16 -07006933 */
Pavel Begunkov90cd7e42020-10-27 23:25:36 +00006934 if (req->timeout.head) {
Jens Axboee8c2bc12020-08-15 18:44:09 -07006935 struct io_timeout_data *data = req->async_data;
Jens Axboe94ae5e72019-11-14 19:39:52 -07006936
Jens Axboead8a48a2019-11-15 08:49:11 -07006937 data->timer.function = io_link_timeout_fn;
6938 hrtimer_start(&data->timer, timespec64_to_ktime(data->ts),
6939 data->mode);
Pavel Begunkovef9dd632021-08-28 19:54:38 -06006940 list_add_tail(&req->timeout.list, &ctx->ltimeout_list);
Jens Axboe2665abf2019-11-05 12:40:47 -07006941 }
Jens Axboe89b263f2021-08-10 15:14:18 -06006942 spin_unlock_irq(&ctx->timeout_lock);
Jens Axboe2665abf2019-11-05 12:40:47 -07006943 /* drop submission reference */
Jens Axboe76a46e02019-11-10 23:34:16 -07006944 io_put_req(req);
Jens Axboe2665abf2019-11-05 12:40:47 -07006945}
6946
Pavel Begunkovc5eef2b2021-02-10 00:03:22 +00006947static void __io_queue_sqe(struct io_kiocb *req)
Pavel Begunkov282cdc82021-08-09 13:04:10 +01006948 __must_hold(&req->ctx->uring_lock)
Jens Axboe2b188cc2019-01-07 10:46:33 -07006949{
Pavel Begunkov906c6ca2021-08-15 10:40:26 +01006950 struct io_kiocb *linked_timeout;
Jens Axboee0c5c572019-03-12 10:18:47 -06006951 int ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07006952
Olivier Langlois59b735a2021-06-22 05:17:39 -07006953issue_sqe:
Pavel Begunkovc5eef2b2021-02-10 00:03:22 +00006954 ret = io_issue_sqe(req, IO_URING_F_NONBLOCK|IO_URING_F_COMPLETE_DEFER);
Jens Axboe491381ce2019-10-17 09:20:46 -06006955
6956 /*
6957 * We async punt it if the file wasn't marked NOWAIT, or if the file
6958 * doesn't support non-blocking read/write attempts
6959 */
Pavel Begunkov18400382021-03-19 17:22:34 +00006960 if (likely(!ret)) {
Pavel Begunkove342c802021-01-19 13:32:47 +00006961 if (req->flags & REQ_F_COMPLETE_INLINE) {
Pavel Begunkovc5eef2b2021-02-10 00:03:22 +00006962 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkovcd0ca2e2021-08-09 20:18:11 +01006963 struct io_submit_state *state = &ctx->submit_state;
Jens Axboee65ef562019-03-12 10:16:44 -06006964
Pavel Begunkovcd0ca2e2021-08-09 20:18:11 +01006965 state->compl_reqs[state->compl_nr++] = req;
6966 if (state->compl_nr == ARRAY_SIZE(state->compl_reqs))
Pavel Begunkov2a2758f2021-06-17 18:14:00 +01006967 io_submit_flush_completions(ctx);
Pavel Begunkov906c6ca2021-08-15 10:40:26 +01006968 return;
Pavel Begunkov0d63c142020-10-22 16:47:18 +01006969 }
Pavel Begunkov906c6ca2021-08-15 10:40:26 +01006970
6971 linked_timeout = io_prep_linked_timeout(req);
6972 if (linked_timeout)
6973 io_queue_linked_timeout(linked_timeout);
Pavel Begunkov18400382021-03-19 17:22:34 +00006974 } else if (ret == -EAGAIN && !(req->flags & REQ_F_NOWAIT)) {
Pavel Begunkov906c6ca2021-08-15 10:40:26 +01006975 linked_timeout = io_prep_linked_timeout(req);
6976
Olivier Langlois59b735a2021-06-22 05:17:39 -07006977 switch (io_arm_poll_handler(req)) {
6978 case IO_APOLL_READY:
Pavel Begunkov906c6ca2021-08-15 10:40:26 +01006979 if (linked_timeout)
6980 io_unprep_linked_timeout(req);
Olivier Langlois59b735a2021-06-22 05:17:39 -07006981 goto issue_sqe;
6982 case IO_APOLL_ABORTED:
Pavel Begunkov18400382021-03-19 17:22:34 +00006983 /*
6984 * Queued up for async execution, worker will release
6985 * submit reference when the iocb is actually submitted.
6986 */
Pavel Begunkovf237c302021-08-18 12:42:46 +01006987 io_queue_async_work(req, NULL);
Olivier Langlois59b735a2021-06-22 05:17:39 -07006988 break;
Pavel Begunkov18400382021-03-19 17:22:34 +00006989 }
Pavel Begunkov906c6ca2021-08-15 10:40:26 +01006990
6991 if (linked_timeout)
6992 io_queue_linked_timeout(linked_timeout);
Pavel Begunkov0d63c142020-10-22 16:47:18 +01006993 } else {
Pavel Begunkovf41db2732021-02-28 22:35:12 +00006994 io_req_complete_failed(req, ret);
Jens Axboe9e645e112019-05-10 16:07:28 -06006995 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07006996}
6997
Pavel Begunkov441b8a72021-06-14 23:37:31 +01006998static inline void io_queue_sqe(struct io_kiocb *req)
Pavel Begunkov282cdc82021-08-09 13:04:10 +01006999 __must_hold(&req->ctx->uring_lock)
Jackie Liu4fe2c962019-09-09 20:50:40 +08007000{
Pavel Begunkov10c66902021-06-15 16:47:56 +01007001 if (unlikely(req->ctx->drain_active) && io_drain_req(req))
Pavel Begunkov76cc33d2021-06-14 23:37:30 +01007002 return;
Jackie Liu4fe2c962019-09-09 20:50:40 +08007003
Hao Xua8295b92021-08-27 17:46:09 +08007004 if (likely(!(req->flags & (REQ_F_FORCE_ASYNC | REQ_F_FAIL)))) {
Pavel Begunkovc5eef2b2021-02-10 00:03:22 +00007005 __io_queue_sqe(req);
Hao Xua8295b92021-08-27 17:46:09 +08007006 } else if (req->flags & REQ_F_FAIL) {
Pavel Begunkovc6d3d9c2021-08-31 14:13:10 +01007007 io_req_complete_fail_submit(req);
Pavel Begunkov76cc33d2021-06-14 23:37:30 +01007008 } else {
7009 int ret = io_req_prep_async(req);
7010
7011 if (unlikely(ret))
7012 io_req_complete_failed(req, ret);
7013 else
Pavel Begunkovf237c302021-08-18 12:42:46 +01007014 io_queue_async_work(req, NULL);
Jens Axboece35a472019-12-17 08:04:44 -07007015 }
Jackie Liu4fe2c962019-09-09 20:50:40 +08007016}
7017
Stefano Garzarella21b55db2020-08-27 16:58:30 +02007018/*
7019 * Check SQE restrictions (opcode and flags).
7020 *
7021 * Returns 'true' if SQE is allowed, 'false' otherwise.
7022 */
7023static inline bool io_check_restriction(struct io_ring_ctx *ctx,
7024 struct io_kiocb *req,
7025 unsigned int sqe_flags)
7026{
Pavel Begunkov4cfb25b2021-06-26 21:40:47 +01007027 if (likely(!ctx->restricted))
Stefano Garzarella21b55db2020-08-27 16:58:30 +02007028 return true;
7029
7030 if (!test_bit(req->opcode, ctx->restrictions.sqe_op))
7031 return false;
7032
7033 if ((sqe_flags & ctx->restrictions.sqe_flags_required) !=
7034 ctx->restrictions.sqe_flags_required)
7035 return false;
7036
7037 if (sqe_flags & ~(ctx->restrictions.sqe_flags_allowed |
7038 ctx->restrictions.sqe_flags_required))
7039 return false;
7040
7041 return true;
7042}
7043
Pavel Begunkovef4ff582020-04-12 02:05:05 +03007044static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
Pavel Begunkov258b29a2021-02-10 00:03:10 +00007045 const struct io_uring_sqe *sqe)
Pavel Begunkov282cdc82021-08-09 13:04:10 +01007046 __must_hold(&ctx->uring_lock)
Pavel Begunkov0553b8b2020-04-08 08:58:45 +03007047{
Pavel Begunkov258b29a2021-02-10 00:03:10 +00007048 struct io_submit_state *state;
Pavel Begunkovef4ff582020-04-12 02:05:05 +03007049 unsigned int sqe_flags;
Jens Axboe003e8dc2021-03-06 09:22:27 -07007050 int personality, ret = 0;
Pavel Begunkovef4ff582020-04-12 02:05:05 +03007051
Pavel Begunkov864ea922021-08-09 13:04:08 +01007052 /* req is partially pre-initialised, see io_preinit_req() */
Pavel Begunkov0553b8b2020-04-08 08:58:45 +03007053 req->opcode = READ_ONCE(sqe->opcode);
Pavel Begunkov5be9ad12021-02-12 18:41:17 +00007054 /* same numerical values with corresponding REQ_F_*, safe to copy */
7055 req->flags = sqe_flags = READ_ONCE(sqe->flags);
Pavel Begunkov0553b8b2020-04-08 08:58:45 +03007056 req->user_data = READ_ONCE(sqe->user_data);
Pavel Begunkov0553b8b2020-04-08 08:58:45 +03007057 req->file = NULL;
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007058 req->fixed_rsrc_refs = NULL;
Pavel Begunkov4dd28242020-06-15 10:33:13 +03007059 req->task = current;
Pavel Begunkovef4ff582020-04-12 02:05:05 +03007060
Pavel Begunkov5be9ad12021-02-12 18:41:17 +00007061 /* enforce forwards compatibility on users */
Pavel Begunkovdddca222021-04-27 16:13:52 +01007062 if (unlikely(sqe_flags & ~SQE_VALID_FLAGS))
Pavel Begunkov5be9ad12021-02-12 18:41:17 +00007063 return -EINVAL;
Pavel Begunkovef4ff582020-04-12 02:05:05 +03007064 if (unlikely(req->opcode >= IORING_OP_LAST))
7065 return -EINVAL;
Pavel Begunkov4cfb25b2021-06-26 21:40:47 +01007066 if (!io_check_restriction(ctx, req, sqe_flags))
Stefano Garzarella21b55db2020-08-27 16:58:30 +02007067 return -EACCES;
7068
Pavel Begunkovef4ff582020-04-12 02:05:05 +03007069 if ((sqe_flags & IOSQE_BUFFER_SELECT) &&
7070 !io_op_defs[req->opcode].buffer_select)
7071 return -EOPNOTSUPP;
Pavel Begunkov3c199662021-06-15 16:47:57 +01007072 if (unlikely(sqe_flags & IOSQE_IO_DRAIN))
7073 ctx->drain_active = true;
Pavel Begunkovef4ff582020-04-12 02:05:05 +03007074
Jens Axboe003e8dc2021-03-06 09:22:27 -07007075 personality = READ_ONCE(sqe->personality);
7076 if (personality) {
Pavel Begunkovc10d1f92021-06-17 18:14:01 +01007077 req->creds = xa_load(&ctx->personalities, personality);
7078 if (!req->creds)
Jens Axboe003e8dc2021-03-06 09:22:27 -07007079 return -EINVAL;
Pavel Begunkovc10d1f92021-06-17 18:14:01 +01007080 get_cred(req->creds);
Pavel Begunkovb8e64b52021-06-17 18:14:02 +01007081 req->flags |= REQ_F_CREDS;
Jens Axboe003e8dc2021-03-06 09:22:27 -07007082 }
Pavel Begunkov258b29a2021-02-10 00:03:10 +00007083 state = &ctx->submit_state;
Pavel Begunkovef4ff582020-04-12 02:05:05 +03007084
Jens Axboe27926b62020-10-28 09:33:23 -06007085 /*
7086 * Plug now if we have more than 1 IO left after this, and the target
7087 * is potentially a read/write to block based storage.
7088 */
7089 if (!state->plug_started && state->ios_left > 1 &&
7090 io_op_defs[req->opcode].plug) {
7091 blk_start_plug(&state->plug);
7092 state->plug_started = true;
7093 }
Jens Axboe63ff8222020-05-07 14:56:15 -06007094
Pavel Begunkovbd5bbda2020-11-20 15:50:51 +00007095 if (io_op_defs[req->opcode].needs_file) {
Pavel Begunkov62906e82021-08-10 14:52:47 +01007096 req->file = io_file_get(ctx, req, READ_ONCE(sqe->fd),
Pavel Begunkovac177052021-08-09 13:04:02 +01007097 (sqe_flags & IOSQE_FIXED_FILE));
Pavel Begunkovba13e232021-02-01 18:59:52 +00007098 if (unlikely(!req->file))
Pavel Begunkovbd5bbda2020-11-20 15:50:51 +00007099 ret = -EBADF;
7100 }
7101
Pavel Begunkov71b547c2020-10-10 18:34:09 +01007102 state->ios_left--;
7103 return ret;
Pavel Begunkov0553b8b2020-04-08 08:58:45 +03007104}
7105
Pavel Begunkova6b8cadc2021-02-18 18:29:41 +00007106static int io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
Pavel Begunkova1ab7b32021-02-18 18:29:42 +00007107 const struct io_uring_sqe *sqe)
Pavel Begunkov282cdc82021-08-09 13:04:10 +01007108 __must_hold(&ctx->uring_lock)
Jens Axboe6c271ce2019-01-10 11:22:30 -07007109{
Pavel Begunkova1ab7b32021-02-18 18:29:42 +00007110 struct io_submit_link *link = &ctx->submit_state.link;
Jens Axboe6c271ce2019-01-10 11:22:30 -07007111 int ret;
7112
Pavel Begunkova6b8cadc2021-02-18 18:29:41 +00007113 ret = io_init_req(ctx, req, sqe);
7114 if (unlikely(ret)) {
7115fail_req:
Hao Xua8295b92021-08-27 17:46:09 +08007116 /* fail even hard links since we don't submit */
Pavel Begunkovde59bc12021-02-18 18:29:47 +00007117 if (link->head) {
Hao Xua8295b92021-08-27 17:46:09 +08007118 /*
7119 * we can judge a link req is failed or cancelled by if
7120 * REQ_F_FAIL is set, but the head is an exception since
7121 * it may be set REQ_F_FAIL because of other req's failure
7122 * so let's leverage req->result to distinguish if a head
7123 * is set REQ_F_FAIL because of its failure or other req's
7124 * failure so that we can set the correct ret code for it.
7125 * init result here to avoid affecting the normal path.
7126 */
7127 if (!(link->head->flags & REQ_F_FAIL))
7128 req_fail_link_node(link->head, -ECANCELED);
7129 } else if (!(req->flags & (REQ_F_LINK | REQ_F_HARDLINK))) {
7130 /*
7131 * the current req is a normal req, we should return
7132 * error and thus break the submittion loop.
7133 */
7134 io_req_complete_failed(req, ret);
7135 return ret;
Pavel Begunkovde59bc12021-02-18 18:29:47 +00007136 }
Hao Xua8295b92021-08-27 17:46:09 +08007137 req_fail_link_node(req, ret);
7138 } else {
7139 ret = io_req_prep(req, sqe);
7140 if (unlikely(ret))
7141 goto fail_req;
Pavel Begunkova6b8cadc2021-02-18 18:29:41 +00007142 }
Pavel Begunkov441b8a72021-06-14 23:37:31 +01007143
Pavel Begunkovbe7053b2021-02-18 18:29:45 +00007144 /* don't need @sqe from now on */
Olivier Langlois236daeae2021-05-31 02:36:37 -04007145 trace_io_uring_submit_sqe(ctx, req, req->opcode, req->user_data,
7146 req->flags, true,
7147 ctx->flags & IORING_SETUP_SQPOLL);
Pavel Begunkova6b8cadc2021-02-18 18:29:41 +00007148
Jens Axboe6c271ce2019-01-10 11:22:30 -07007149 /*
7150 * If we already have a head request, queue this one for async
7151 * submittal once the head completes. If we don't have a head but
7152 * IOSQE_IO_LINK is set in the sqe, start a new head. This one will be
7153 * submitted sync once the chain is complete. If none of those
7154 * conditions are true (normal request), then just queue it.
7155 */
7156 if (link->head) {
7157 struct io_kiocb *head = link->head;
7158
Hao Xua8295b92021-08-27 17:46:09 +08007159 if (!(req->flags & REQ_F_FAIL)) {
7160 ret = io_req_prep_async(req);
7161 if (unlikely(ret)) {
7162 req_fail_link_node(req, ret);
7163 if (!(head->flags & REQ_F_FAIL))
7164 req_fail_link_node(head, -ECANCELED);
7165 }
7166 }
Jens Axboe6c271ce2019-01-10 11:22:30 -07007167 trace_io_uring_link(ctx, req, head);
7168 link->last->link = req;
7169 link->last = req;
7170
7171 /* last request of a link, enqueue the link */
7172 if (!(req->flags & (REQ_F_LINK | REQ_F_HARDLINK))) {
7173 link->head = NULL;
Pavel Begunkov5e159202021-06-14 23:37:26 +01007174 io_queue_sqe(head);
Jens Axboe2b188cc2019-01-07 10:46:33 -07007175 }
Jackie Liu4fe2c962019-09-09 20:50:40 +08007176 } else {
Jens Axboe2b188cc2019-01-07 10:46:33 -07007177 if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) {
Jackie Liu4fe2c962019-09-09 20:50:40 +08007178 link->head = req;
7179 link->last = req;
7180 } else {
Pavel Begunkovbe7053b2021-02-18 18:29:45 +00007181 io_queue_sqe(req);
Jackie Liu4fe2c962019-09-09 20:50:40 +08007182 }
7183 }
7184
7185 return 0;
7186}
7187
7188/*
7189 * Batched submission is done, ensure local IO is flushed out.
7190 */
7191static void io_submit_state_end(struct io_submit_state *state,
7192 struct io_ring_ctx *ctx)
Pavel Begunkov1b4a51b2019-11-21 11:54:28 +03007193{
Pavel Begunkova1ab7b32021-02-18 18:29:42 +00007194 if (state->link.head)
Pavel Begunkovde59bc12021-02-18 18:29:47 +00007195 io_queue_sqe(state->link.head);
Pavel Begunkovcd0ca2e2021-08-09 20:18:11 +01007196 if (state->compl_nr)
Pavel Begunkov2a2758f2021-06-17 18:14:00 +01007197 io_submit_flush_completions(ctx);
Jackie Liua197f662019-11-08 08:09:12 -07007198 if (state->plug_started)
Pavel Begunkov32fe5252019-12-17 22:26:58 +03007199 blk_finish_plug(&state->plug);
Jens Axboe9e645e112019-05-10 16:07:28 -06007200}
Pavel Begunkov32fe5252019-12-17 22:26:58 +03007201
Jens Axboe9e645e112019-05-10 16:07:28 -06007202/*
7203 * Start submission side cache.
Pavel Begunkov32fe5252019-12-17 22:26:58 +03007204 */
Jens Axboe9e645e112019-05-10 16:07:28 -06007205static void io_submit_state_start(struct io_submit_state *state,
Pavel Begunkov196be952019-11-07 01:41:06 +03007206 unsigned int max_ios)
Jens Axboe9e645e112019-05-10 16:07:28 -06007207{
7208 state->plug_started = false;
Jens Axboebcda7ba2020-02-23 16:42:51 -07007209 state->ios_left = max_ios;
Pavel Begunkova1ab7b32021-02-18 18:29:42 +00007210 /* set only head, no need to init link_last in advance */
7211 state->link.head = NULL;
Jens Axboe75c6a032020-01-28 10:15:23 -07007212}
7213
Jens Axboe193155c2020-02-22 23:22:19 -07007214static void io_commit_sqring(struct io_ring_ctx *ctx)
7215{
Jens Axboe75c6a032020-01-28 10:15:23 -07007216 struct io_rings *rings = ctx->rings;
7217
7218 /*
Jens Axboe193155c2020-02-22 23:22:19 -07007219 * Ensure any loads from the SQEs are done at this point,
Jens Axboe75c6a032020-01-28 10:15:23 -07007220 * since once we write the new head, the application could
7221 * write new data to them.
Pavel Begunkov6b47ee62020-01-18 20:22:41 +03007222 */
Pavel Begunkov8da11c12020-02-24 11:32:44 +03007223 smp_store_release(&rings->sq.head, ctx->cached_sq_head);
Jens Axboebcda7ba2020-02-23 16:42:51 -07007224}
7225
Jens Axboe9e645e112019-05-10 16:07:28 -06007226/*
Fam Zhengdd9ae8a2021-06-04 17:42:56 +01007227 * Fetch an sqe, if one is available. Note this returns a pointer to memory
Jens Axboe9e645e112019-05-10 16:07:28 -06007228 * that is mapped by userspace. This means that care needs to be taken to
7229 * ensure that reads are stable, as we cannot rely on userspace always
Jens Axboe78e19bb2019-11-06 15:21:34 -07007230 * being a good citizen. If members of the sqe are validated and then later
7231 * used, it's important that those reads are done through READ_ONCE() to
Pavel Begunkov2e6e1fd2019-12-05 16:15:45 +03007232 * prevent a re-load down the line.
Jens Axboe9e645e112019-05-10 16:07:28 -06007233 */
7234static const struct io_uring_sqe *io_get_sqe(struct io_ring_ctx *ctx)
Jens Axboe9e645e112019-05-10 16:07:28 -06007235{
Pavel Begunkovea5ab3b2021-05-16 22:58:09 +01007236 unsigned head, mask = ctx->sq_entries - 1;
Pavel Begunkov17d3aeb2021-06-14 23:37:23 +01007237 unsigned sq_idx = ctx->cached_sq_head++ & mask;
Jens Axboe9e645e112019-05-10 16:07:28 -06007238
7239 /*
7240 * The cached sq head (or cq tail) serves two purposes:
7241 *
7242 * 1) allows us to batch the cost of updating the user visible
Pavel Begunkov9d763772019-12-17 02:22:07 +03007243 * head updates.
Jens Axboe9e645e112019-05-10 16:07:28 -06007244 * 2) allows the kernel side to track the head on its own, even
Pavel Begunkov8cdf2192020-01-25 00:40:24 +03007245 * though the application is the one updating it.
7246 */
Pavel Begunkov17d3aeb2021-06-14 23:37:23 +01007247 head = READ_ONCE(ctx->sq_array[sq_idx]);
Pavel Begunkov8cdf2192020-01-25 00:40:24 +03007248 if (likely(head < ctx->sq_entries))
7249 return &ctx->sq_sqes[head];
7250
7251 /* drop invalid entries */
Pavel Begunkov15641e42021-06-14 23:37:24 +01007252 ctx->cq_extra--;
7253 WRITE_ONCE(ctx->rings->sq_dropped,
7254 READ_ONCE(ctx->rings->sq_dropped) + 1);
Pavel Begunkov711be032020-01-17 03:57:59 +03007255 return NULL;
7256}
Jens Axboeb7bb4f72019-12-15 22:13:43 -07007257
Jens Axboe0f212202020-09-13 13:09:39 -06007258static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr)
Pavel Begunkov282cdc82021-08-09 13:04:10 +01007259 __must_hold(&ctx->uring_lock)
Jens Axboe6c271ce2019-01-10 11:22:30 -07007260{
Pavel Begunkov46c4e162021-02-18 18:29:37 +00007261 int submitted = 0;
Jens Axboe6c271ce2019-01-10 11:22:30 -07007262
Pavel Begunkovee7d46d2019-12-30 21:24:45 +03007263 /* make sure SQ entry isn't read before tail */
7264 nr = min3(nr, ctx->sq_entries, io_sqring_entries(ctx));
Pavel Begunkov2b85edf2019-12-28 14:13:03 +03007265 if (!percpu_ref_tryget_many(&ctx->refs, nr))
7266 return -EAGAIN;
Pavel Begunkov9a108672021-08-27 11:55:01 +01007267 io_get_task_refs(nr);
Jens Axboe6c271ce2019-01-10 11:22:30 -07007268
Pavel Begunkovba88ff12021-02-10 00:03:11 +00007269 io_submit_state_start(&ctx->submit_state, nr);
Pavel Begunkov46c4e162021-02-18 18:29:37 +00007270 while (submitted < nr) {
Jens Axboe3529d8c2019-12-19 18:24:38 -07007271 const struct io_uring_sqe *sqe;
Pavel Begunkov196be952019-11-07 01:41:06 +03007272 struct io_kiocb *req;
Pavel Begunkovfb5ccc92019-10-25 12:31:30 +03007273
Pavel Begunkov258b29a2021-02-10 00:03:10 +00007274 req = io_alloc_req(ctx);
Pavel Begunkov196be952019-11-07 01:41:06 +03007275 if (unlikely(!req)) {
7276 if (!submitted)
7277 submitted = -EAGAIN;
Pavel Begunkovfb5ccc92019-10-25 12:31:30 +03007278 break;
Jens Axboe9e645e112019-05-10 16:07:28 -06007279 }
Pavel Begunkov4fccfcb2021-02-12 11:55:17 +00007280 sqe = io_get_sqe(ctx);
7281 if (unlikely(!sqe)) {
Hao Xu0c6e1d72021-08-26 01:58:56 +08007282 list_add(&req->inflight_entry, &ctx->submit_state.free_list);
Pavel Begunkov4fccfcb2021-02-12 11:55:17 +00007283 break;
7284 }
Jens Axboed3656342019-12-18 09:50:26 -07007285 /* will complete beyond this point, count as submitted */
7286 submitted++;
Pavel Begunkova1ab7b32021-02-18 18:29:42 +00007287 if (io_submit_sqe(ctx, req, sqe))
Jens Axboed3656342019-12-18 09:50:26 -07007288 break;
Jens Axboe6c271ce2019-01-10 11:22:30 -07007289 }
7290
Pavel Begunkov9466f432020-01-25 22:34:01 +03007291 if (unlikely(submitted != nr)) {
7292 int ref_used = (submitted == -EAGAIN) ? 0 : submitted;
Jens Axboed8a6df12020-10-15 16:24:45 -06007293 int unused = nr - ref_used;
Pavel Begunkov9466f432020-01-25 22:34:01 +03007294
Pavel Begunkov09899b12021-06-14 02:36:22 +01007295 current->io_uring->cached_refs += unused;
Jens Axboed8a6df12020-10-15 16:24:45 -06007296 percpu_ref_put_many(&ctx->refs, unused);
Pavel Begunkov9466f432020-01-25 22:34:01 +03007297 }
Jens Axboe6c271ce2019-01-10 11:22:30 -07007298
Pavel Begunkova1ab7b32021-02-18 18:29:42 +00007299 io_submit_state_end(&ctx->submit_state, ctx);
Pavel Begunkovae9428c2019-11-06 00:22:14 +03007300 /* Commit SQ ring head once we've consumed and submitted all SQEs */
7301 io_commit_sqring(ctx);
7302
Jens Axboe6c271ce2019-01-10 11:22:30 -07007303 return submitted;
7304}
7305
Pavel Begunkove4b6d902021-05-16 22:58:00 +01007306static inline bool io_sqd_events_pending(struct io_sq_data *sqd)
7307{
7308 return READ_ONCE(sqd->state);
7309}
7310
Xiaoguang Wang23b36282020-07-23 20:57:24 +08007311static inline void io_ring_set_wakeup_flag(struct io_ring_ctx *ctx)
7312{
7313 /* Tell userspace we may need a wakeup call */
Jens Axboe79ebeae2021-08-10 15:18:27 -06007314 spin_lock(&ctx->completion_lock);
Nadav Amit20c0b382021-08-07 17:13:42 -07007315 WRITE_ONCE(ctx->rings->sq_flags,
7316 ctx->rings->sq_flags | IORING_SQ_NEED_WAKEUP);
Jens Axboe79ebeae2021-08-10 15:18:27 -06007317 spin_unlock(&ctx->completion_lock);
Xiaoguang Wang23b36282020-07-23 20:57:24 +08007318}
7319
7320static inline void io_ring_clear_wakeup_flag(struct io_ring_ctx *ctx)
7321{
Jens Axboe79ebeae2021-08-10 15:18:27 -06007322 spin_lock(&ctx->completion_lock);
Nadav Amit20c0b382021-08-07 17:13:42 -07007323 WRITE_ONCE(ctx->rings->sq_flags,
7324 ctx->rings->sq_flags & ~IORING_SQ_NEED_WAKEUP);
Jens Axboe79ebeae2021-08-10 15:18:27 -06007325 spin_unlock(&ctx->completion_lock);
Xiaoguang Wang23b36282020-07-23 20:57:24 +08007326}
7327
Xiaoguang Wang08369242020-11-03 14:15:59 +08007328static int __io_sq_thread(struct io_ring_ctx *ctx, bool cap_entries)
Jens Axboe6c271ce2019-01-10 11:22:30 -07007329{
Jens Axboec8d1ba52020-09-14 11:07:26 -06007330 unsigned int to_submit;
Xiaoguang Wangbdcd3ea2020-02-25 22:12:08 +08007331 int ret = 0;
Jens Axboe6c271ce2019-01-10 11:22:30 -07007332
Jens Axboec8d1ba52020-09-14 11:07:26 -06007333 to_submit = io_sqring_entries(ctx);
Jens Axboee95eee22020-09-08 09:11:32 -06007334 /* if we're handling multiple rings, cap submit size for fairness */
Olivier Langlois4ce8ad92021-06-23 11:50:18 -07007335 if (cap_entries && to_submit > IORING_SQPOLL_CAP_ENTRIES_VALUE)
7336 to_submit = IORING_SQPOLL_CAP_ENTRIES_VALUE;
Jens Axboee95eee22020-09-08 09:11:32 -06007337
Xiaoguang Wang906a3c62020-11-12 14:56:00 +08007338 if (!list_empty(&ctx->iopoll_list) || to_submit) {
7339 unsigned nr_events = 0;
Pavel Begunkov948e1942021-06-24 15:09:55 +01007340 const struct cred *creds = NULL;
7341
7342 if (ctx->sq_creds != current_cred())
7343 creds = override_creds(ctx->sq_creds);
Xiaoguang Wang906a3c62020-11-12 14:56:00 +08007344
Xiaoguang Wang08369242020-11-03 14:15:59 +08007345 mutex_lock(&ctx->uring_lock);
Xiaoguang Wang906a3c62020-11-12 14:56:00 +08007346 if (!list_empty(&ctx->iopoll_list))
Pavel Begunkova8576af2021-08-15 10:40:21 +01007347 io_do_iopoll(ctx, &nr_events, 0);
Xiaoguang Wang906a3c62020-11-12 14:56:00 +08007348
Pavel Begunkov3b763ba2021-04-18 14:52:08 +01007349 /*
7350 * Don't submit if refs are dying, good for io_uring_register(),
7351 * but also it is relied upon by io_ring_exit_work()
7352 */
Pavel Begunkov0298ef92021-03-08 13:20:57 +00007353 if (to_submit && likely(!percpu_ref_is_dying(&ctx->refs)) &&
7354 !(ctx->flags & IORING_SETUP_R_DISABLED))
Xiaoguang Wang08369242020-11-03 14:15:59 +08007355 ret = io_submit_sqes(ctx, to_submit);
7356 mutex_unlock(&ctx->uring_lock);
Jens Axboe90554202020-09-03 12:12:41 -06007357
Pavel Begunkovacfb3812021-05-16 22:58:03 +01007358 if (to_submit && wq_has_sleeper(&ctx->sqo_sq_wait))
7359 wake_up(&ctx->sqo_sq_wait);
Pavel Begunkov948e1942021-06-24 15:09:55 +01007360 if (creds)
7361 revert_creds(creds);
Pavel Begunkovacfb3812021-05-16 22:58:03 +01007362 }
Jens Axboe90554202020-09-03 12:12:41 -06007363
Xiaoguang Wang08369242020-11-03 14:15:59 +08007364 return ret;
7365}
7366
7367static void io_sqd_update_thread_idle(struct io_sq_data *sqd)
7368{
7369 struct io_ring_ctx *ctx;
7370 unsigned sq_thread_idle = 0;
7371
Pavel Begunkovc9dca272021-03-10 13:13:55 +00007372 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
7373 sq_thread_idle = max(sq_thread_idle, ctx->sq_thread_idle);
Xiaoguang Wang08369242020-11-03 14:15:59 +08007374 sqd->sq_thread_idle = sq_thread_idle;
Jens Axboec8d1ba52020-09-14 11:07:26 -06007375}
7376
Pavel Begunkove4b6d902021-05-16 22:58:00 +01007377static bool io_sqd_handle_event(struct io_sq_data *sqd)
7378{
7379 bool did_sig = false;
7380 struct ksignal ksig;
7381
7382 if (test_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state) ||
7383 signal_pending(current)) {
7384 mutex_unlock(&sqd->lock);
7385 if (signal_pending(current))
7386 did_sig = get_signal(&ksig);
7387 cond_resched();
7388 mutex_lock(&sqd->lock);
7389 }
Pavel Begunkove4b6d902021-05-16 22:58:00 +01007390 return did_sig || test_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state);
7391}
7392
Jens Axboe6c271ce2019-01-10 11:22:30 -07007393static int io_sq_thread(void *data)
7394{
Jens Axboe69fb2132020-09-14 11:16:23 -06007395 struct io_sq_data *sqd = data;
7396 struct io_ring_ctx *ctx;
Xiaoguang Wanga0d92052020-11-12 14:55:59 +08007397 unsigned long timeout = 0;
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007398 char buf[TASK_COMM_LEN];
Xiaoguang Wang08369242020-11-03 14:15:59 +08007399 DEFINE_WAIT(wait);
Jens Axboe6c271ce2019-01-10 11:22:30 -07007400
Pavel Begunkov696ee882021-04-01 09:55:04 +01007401 snprintf(buf, sizeof(buf), "iou-sqp-%d", sqd->task_pid);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007402 set_task_comm(current, buf);
Jens Axboe28cea78a2020-09-14 10:51:17 -06007403
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007404 if (sqd->sq_cpu != -1)
7405 set_cpus_allowed_ptr(current, cpumask_of(sqd->sq_cpu));
7406 else
7407 set_cpus_allowed_ptr(current, cpu_online_mask);
7408 current->flags |= PF_NO_SETAFFINITY;
7409
Pavel Begunkov09a6f4e2021-03-14 20:57:10 +00007410 mutex_lock(&sqd->lock);
Pavel Begunkove4b6d902021-05-16 22:58:00 +01007411 while (1) {
Pavel Begunkov1a924a82021-06-24 15:09:56 +01007412 bool cap_entries, sqt_spin = false;
Jens Axboec1edbf52019-11-10 16:56:04 -07007413
Pavel Begunkove4b6d902021-05-16 22:58:00 +01007414 if (io_sqd_events_pending(sqd) || signal_pending(current)) {
7415 if (io_sqd_handle_event(sqd))
Pavel Begunkovc7d95612021-04-13 11:43:00 +01007416 break;
Xiaoguang Wang08369242020-11-03 14:15:59 +08007417 timeout = jiffies + sqd->sq_thread_idle;
7418 }
Pavel Begunkove4b6d902021-05-16 22:58:00 +01007419
Jens Axboee95eee22020-09-08 09:11:32 -06007420 cap_entries = !list_is_singular(&sqd->ctx_list);
Jens Axboe69fb2132020-09-14 11:16:23 -06007421 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) {
Pavel Begunkov948e1942021-06-24 15:09:55 +01007422 int ret = __io_sq_thread(ctx, cap_entries);
Stefan Metzmacher7c30f36a2021-03-07 11:54:28 +01007423
Xiaoguang Wang08369242020-11-03 14:15:59 +08007424 if (!sqt_spin && (ret > 0 || !list_empty(&ctx->iopoll_list)))
7425 sqt_spin = true;
Jens Axboe6c271ce2019-01-10 11:22:30 -07007426 }
Pavel Begunkovdd432ea52021-06-26 21:40:45 +01007427 if (io_run_task_work())
7428 sqt_spin = true;
Jens Axboe6c271ce2019-01-10 11:22:30 -07007429
Xiaoguang Wang08369242020-11-03 14:15:59 +08007430 if (sqt_spin || !time_after(jiffies, timeout)) {
Jens Axboec8d1ba52020-09-14 11:07:26 -06007431 cond_resched();
Xiaoguang Wang08369242020-11-03 14:15:59 +08007432 if (sqt_spin)
7433 timeout = jiffies + sqd->sq_thread_idle;
7434 continue;
7435 }
7436
Xiaoguang Wang08369242020-11-03 14:15:59 +08007437 prepare_to_wait(&sqd->wait, &wait, TASK_INTERRUPTIBLE);
Pavel Begunkovdd432ea52021-06-26 21:40:45 +01007438 if (!io_sqd_events_pending(sqd) && !current->task_works) {
Pavel Begunkov1a924a82021-06-24 15:09:56 +01007439 bool needs_sched = true;
7440
Hao Xu724cb4f2021-04-21 23:19:11 +08007441 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) {
Pavel Begunkovaaa9f0f2021-05-16 22:58:01 +01007442 io_ring_set_wakeup_flag(ctx);
7443
Hao Xu724cb4f2021-04-21 23:19:11 +08007444 if ((ctx->flags & IORING_SETUP_IOPOLL) &&
7445 !list_empty_careful(&ctx->iopoll_list)) {
7446 needs_sched = false;
7447 break;
7448 }
7449 if (io_sqring_entries(ctx)) {
7450 needs_sched = false;
7451 break;
7452 }
7453 }
7454
7455 if (needs_sched) {
7456 mutex_unlock(&sqd->lock);
7457 schedule();
7458 mutex_lock(&sqd->lock);
7459 }
Jens Axboe69fb2132020-09-14 11:16:23 -06007460 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
7461 io_ring_clear_wakeup_flag(ctx);
Jens Axboe6c271ce2019-01-10 11:22:30 -07007462 }
Xiaoguang Wang08369242020-11-03 14:15:59 +08007463
7464 finish_wait(&sqd->wait, &wait);
7465 timeout = jiffies + sqd->sq_thread_idle;
Jens Axboe6c271ce2019-01-10 11:22:30 -07007466 }
7467
Pavel Begunkov78cc6872021-06-14 02:36:23 +01007468 io_uring_cancel_generic(true, sqd);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007469 sqd->thread = NULL;
Jens Axboe05962f92021-03-06 13:58:48 -07007470 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
Jens Axboe5f3f26f2021-02-25 10:17:46 -07007471 io_ring_set_wakeup_flag(ctx);
Pavel Begunkov521d6a72021-03-11 23:29:38 +00007472 io_run_task_work();
Pavel Begunkov734551d2021-04-18 14:52:09 +01007473 mutex_unlock(&sqd->lock);
7474
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007475 complete(&sqd->exited);
7476 do_exit(0);
Jens Axboe6c271ce2019-01-10 11:22:30 -07007477}
7478
Jens Axboebda52162019-09-24 13:47:15 -06007479struct io_wait_queue {
7480 struct wait_queue_entry wq;
7481 struct io_ring_ctx *ctx;
Jens Axboe5fd46172021-08-06 14:04:31 -06007482 unsigned cq_tail;
Jens Axboebda52162019-09-24 13:47:15 -06007483 unsigned nr_timeouts;
7484};
7485
Pavel Begunkov6c503152021-01-04 20:36:36 +00007486static inline bool io_should_wake(struct io_wait_queue *iowq)
Jens Axboebda52162019-09-24 13:47:15 -06007487{
7488 struct io_ring_ctx *ctx = iowq->ctx;
Jens Axboe5fd46172021-08-06 14:04:31 -06007489 int dist = ctx->cached_cq_tail - (int) iowq->cq_tail;
Jens Axboebda52162019-09-24 13:47:15 -06007490
7491 /*
Brian Gianforcarod195a662019-12-13 03:09:50 -08007492 * Wake up if we have enough events, or if a timeout occurred since we
Jens Axboebda52162019-09-24 13:47:15 -06007493 * started waiting. For timeouts, we always want to return to userspace,
7494 * regardless of event count.
7495 */
Jens Axboe5fd46172021-08-06 14:04:31 -06007496 return dist >= 0 || atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts;
Jens Axboebda52162019-09-24 13:47:15 -06007497}
7498
7499static int io_wake_function(struct wait_queue_entry *curr, unsigned int mode,
7500 int wake_flags, void *key)
7501{
7502 struct io_wait_queue *iowq = container_of(curr, struct io_wait_queue,
7503 wq);
7504
Pavel Begunkov6c503152021-01-04 20:36:36 +00007505 /*
7506 * Cannot safely flush overflowed CQEs from here, ensure we wake up
7507 * the task, and the next invocation will do it.
7508 */
Pavel Begunkov5ed7a372021-06-14 23:37:27 +01007509 if (io_should_wake(iowq) || test_bit(0, &iowq->ctx->check_cq_overflow))
Pavel Begunkov6c503152021-01-04 20:36:36 +00007510 return autoremove_wake_function(curr, mode, wake_flags, key);
7511 return -1;
Jens Axboebda52162019-09-24 13:47:15 -06007512}
7513
Jens Axboeaf9c1a42020-09-24 13:32:18 -06007514static int io_run_task_work_sig(void)
7515{
7516 if (io_run_task_work())
7517 return 1;
7518 if (!signal_pending(current))
7519 return 0;
Jens Axboe0b8cfa92021-03-21 14:16:08 -06007520 if (test_thread_flag(TIF_NOTIFY_SIGNAL))
Jens Axboe792ee0f62020-10-22 20:17:18 -06007521 return -ERESTARTSYS;
Jens Axboeaf9c1a42020-09-24 13:32:18 -06007522 return -EINTR;
7523}
7524
Pavel Begunkoveeb60b92021-02-04 13:51:58 +00007525/* when returns >0, the caller should retry */
7526static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
7527 struct io_wait_queue *iowq,
7528 signed long *timeout)
7529{
7530 int ret;
7531
7532 /* make sure we run task_work before checking for signals */
7533 ret = io_run_task_work_sig();
7534 if (ret || io_should_wake(iowq))
7535 return ret;
7536 /* let the caller flush overflows, retry */
Pavel Begunkov5ed7a372021-06-14 23:37:27 +01007537 if (test_bit(0, &ctx->check_cq_overflow))
Pavel Begunkoveeb60b92021-02-04 13:51:58 +00007538 return 1;
7539
7540 *timeout = schedule_timeout(*timeout);
7541 return !*timeout ? -ETIME : 1;
7542}
7543
Jens Axboe2b188cc2019-01-07 10:46:33 -07007544/*
7545 * Wait until events become available, if we don't already have some. The
7546 * application must reap them itself, as they reside on the shared cq ring.
7547 */
7548static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
Hao Xuc73ebb62020-11-03 10:54:37 +08007549 const sigset_t __user *sig, size_t sigsz,
7550 struct __kernel_timespec __user *uts)
Jens Axboe2b188cc2019-01-07 10:46:33 -07007551{
Pavel Begunkov902910992021-08-09 09:07:32 -06007552 struct io_wait_queue iowq;
Hristo Venev75b28af2019-08-26 17:23:46 +00007553 struct io_rings *rings = ctx->rings;
Pavel Begunkovc1d5a222021-02-04 13:51:57 +00007554 signed long timeout = MAX_SCHEDULE_TIMEOUT;
7555 int ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07007556
Jens Axboeb41e9852020-02-17 09:52:41 -07007557 do {
Pavel Begunkov90f67362021-08-09 20:18:12 +01007558 io_cqring_overflow_flush(ctx);
Pavel Begunkov6c503152021-01-04 20:36:36 +00007559 if (io_cqring_events(ctx) >= min_events)
Jens Axboeb41e9852020-02-17 09:52:41 -07007560 return 0;
Jens Axboe4c6e2772020-07-01 11:29:10 -06007561 if (!io_run_task_work())
Jens Axboeb41e9852020-02-17 09:52:41 -07007562 break;
Jens Axboeb41e9852020-02-17 09:52:41 -07007563 } while (1);
Jens Axboe2b188cc2019-01-07 10:46:33 -07007564
Xiaoguang Wang44df58d2021-09-14 22:38:52 +08007565 if (uts) {
7566 struct timespec64 ts;
7567
7568 if (get_timespec64(&ts, uts))
7569 return -EFAULT;
7570 timeout = timespec64_to_jiffies(&ts);
7571 }
7572
Jens Axboe2b188cc2019-01-07 10:46:33 -07007573 if (sig) {
Arnd Bergmann9e75ad52019-03-25 15:34:53 +01007574#ifdef CONFIG_COMPAT
7575 if (in_compat_syscall())
7576 ret = set_compat_user_sigmask((const compat_sigset_t __user *)sig,
Oleg Nesterovb7724342019-07-16 16:29:53 -07007577 sigsz);
Arnd Bergmann9e75ad52019-03-25 15:34:53 +01007578 else
7579#endif
Oleg Nesterovb7724342019-07-16 16:29:53 -07007580 ret = set_user_sigmask(sig, sigsz);
Arnd Bergmann9e75ad52019-03-25 15:34:53 +01007581
Jens Axboe2b188cc2019-01-07 10:46:33 -07007582 if (ret)
7583 return ret;
7584 }
7585
Pavel Begunkov902910992021-08-09 09:07:32 -06007586 init_waitqueue_func_entry(&iowq.wq, io_wake_function);
7587 iowq.wq.private = current;
7588 INIT_LIST_HEAD(&iowq.wq.entry);
7589 iowq.ctx = ctx;
Jens Axboebda52162019-09-24 13:47:15 -06007590 iowq.nr_timeouts = atomic_read(&ctx->cq_timeouts);
Jens Axboe5fd46172021-08-06 14:04:31 -06007591 iowq.cq_tail = READ_ONCE(ctx->rings->cq.head) + min_events;
Pavel Begunkov902910992021-08-09 09:07:32 -06007592
Dmitrii Dolgovc826bd72019-10-15 19:02:01 +02007593 trace_io_uring_cqring_wait(ctx, min_events);
Jens Axboebda52162019-09-24 13:47:15 -06007594 do {
Jens Axboeca0a2652021-03-04 17:15:48 -07007595 /* if we can't even flush overflow, don't wait for more */
Pavel Begunkov90f67362021-08-09 20:18:12 +01007596 if (!io_cqring_overflow_flush(ctx)) {
Jens Axboeca0a2652021-03-04 17:15:48 -07007597 ret = -EBUSY;
7598 break;
7599 }
Pavel Begunkov311997b2021-06-14 23:37:28 +01007600 prepare_to_wait_exclusive(&ctx->cq_wait, &iowq.wq,
Jens Axboebda52162019-09-24 13:47:15 -06007601 TASK_INTERRUPTIBLE);
Pavel Begunkoveeb60b92021-02-04 13:51:58 +00007602 ret = io_cqring_wait_schedule(ctx, &iowq, &timeout);
Pavel Begunkov311997b2021-06-14 23:37:28 +01007603 finish_wait(&ctx->cq_wait, &iowq.wq);
Jens Axboeca0a2652021-03-04 17:15:48 -07007604 cond_resched();
Pavel Begunkoveeb60b92021-02-04 13:51:58 +00007605 } while (ret > 0);
Jens Axboebda52162019-09-24 13:47:15 -06007606
Jens Axboeb7db41c2020-07-04 08:55:50 -06007607 restore_saved_sigmask_unless(ret == -EINTR);
Jens Axboe2b188cc2019-01-07 10:46:33 -07007608
Hristo Venev75b28af2019-08-26 17:23:46 +00007609 return READ_ONCE(rings->cq.head) == READ_ONCE(rings->cq.tail) ? ret : 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07007610}
7611
Pavel Begunkov9123c8f2021-06-14 02:36:20 +01007612static void io_free_page_table(void **table, size_t size)
Pavel Begunkov846a4ef2021-04-01 15:44:03 +01007613{
Pavel Begunkov9123c8f2021-06-14 02:36:20 +01007614 unsigned i, nr_tables = DIV_ROUND_UP(size, PAGE_SIZE);
Pavel Begunkov846a4ef2021-04-01 15:44:03 +01007615
7616 for (i = 0; i < nr_tables; i++)
Pavel Begunkov9123c8f2021-06-14 02:36:20 +01007617 kfree(table[i]);
7618 kfree(table);
7619}
7620
7621static void **io_alloc_page_table(size_t size)
7622{
7623 unsigned i, nr_tables = DIV_ROUND_UP(size, PAGE_SIZE);
7624 size_t init_size = size;
7625 void **table;
7626
Pavel Begunkov0bea96f2021-08-20 10:36:36 +01007627 table = kcalloc(nr_tables, sizeof(*table), GFP_KERNEL_ACCOUNT);
Pavel Begunkov9123c8f2021-06-14 02:36:20 +01007628 if (!table)
7629 return NULL;
7630
7631 for (i = 0; i < nr_tables; i++) {
Pavel Begunkov27f6b312021-06-15 13:20:13 +01007632 unsigned int this_size = min_t(size_t, size, PAGE_SIZE);
Pavel Begunkov9123c8f2021-06-14 02:36:20 +01007633
Pavel Begunkov0bea96f2021-08-20 10:36:36 +01007634 table[i] = kzalloc(this_size, GFP_KERNEL_ACCOUNT);
Pavel Begunkov9123c8f2021-06-14 02:36:20 +01007635 if (!table[i]) {
7636 io_free_page_table(table, init_size);
7637 return NULL;
7638 }
7639 size -= this_size;
7640 }
7641 return table;
Pavel Begunkov846a4ef2021-04-01 15:44:03 +01007642}
7643
Pavel Begunkov28a9fe22021-04-01 15:43:47 +01007644static void io_rsrc_node_destroy(struct io_rsrc_node *ref_node)
7645{
7646 percpu_ref_exit(&ref_node->refs);
7647 kfree(ref_node);
7648}
7649
Pavel Begunkovb9bd2be2021-08-09 09:09:47 -06007650static void io_rsrc_node_ref_zero(struct percpu_ref *ref)
7651{
7652 struct io_rsrc_node *node = container_of(ref, struct io_rsrc_node, refs);
7653 struct io_ring_ctx *ctx = node->rsrc_data->ctx;
7654 unsigned long flags;
7655 bool first_add = false;
7656
7657 spin_lock_irqsave(&ctx->rsrc_ref_lock, flags);
7658 node->done = true;
7659
7660 while (!list_empty(&ctx->rsrc_ref_list)) {
7661 node = list_first_entry(&ctx->rsrc_ref_list,
7662 struct io_rsrc_node, node);
7663 /* recycle ref nodes in order */
7664 if (!node->done)
7665 break;
7666 list_del(&node->node);
7667 first_add |= llist_add(&node->llist, &ctx->rsrc_put_llist);
7668 }
7669 spin_unlock_irqrestore(&ctx->rsrc_ref_lock, flags);
7670
7671 if (first_add)
7672 mod_delayed_work(system_wq, &ctx->rsrc_put_work, HZ);
7673}
7674
7675static struct io_rsrc_node *io_rsrc_node_alloc(struct io_ring_ctx *ctx)
7676{
7677 struct io_rsrc_node *ref_node;
7678
7679 ref_node = kzalloc(sizeof(*ref_node), GFP_KERNEL);
7680 if (!ref_node)
7681 return NULL;
7682
7683 if (percpu_ref_init(&ref_node->refs, io_rsrc_node_ref_zero,
7684 0, GFP_KERNEL)) {
7685 kfree(ref_node);
7686 return NULL;
7687 }
7688 INIT_LIST_HEAD(&ref_node->node);
7689 INIT_LIST_HEAD(&ref_node->rsrc_list);
7690 ref_node->done = false;
7691 return ref_node;
7692}
7693
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01007694static void io_rsrc_node_switch(struct io_ring_ctx *ctx,
7695 struct io_rsrc_data *data_to_kill)
Pavel Begunkov1642b442020-12-30 21:34:14 +00007696{
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01007697 WARN_ON_ONCE(!ctx->rsrc_backup_node);
7698 WARN_ON_ONCE(data_to_kill && !ctx->rsrc_node);
Pavel Begunkov82fbcfa2021-04-01 15:43:43 +01007699
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01007700 if (data_to_kill) {
7701 struct io_rsrc_node *rsrc_node = ctx->rsrc_node;
Pavel Begunkov82fbcfa2021-04-01 15:43:43 +01007702
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01007703 rsrc_node->rsrc_data = data_to_kill;
Jens Axboe4956b9e2021-08-09 07:49:41 -06007704 spin_lock_irq(&ctx->rsrc_ref_lock);
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01007705 list_add_tail(&rsrc_node->node, &ctx->rsrc_ref_list);
Jens Axboe4956b9e2021-08-09 07:49:41 -06007706 spin_unlock_irq(&ctx->rsrc_ref_lock);
Pavel Begunkov82fbcfa2021-04-01 15:43:43 +01007707
Pavel Begunkov3e942492021-04-11 01:46:34 +01007708 atomic_inc(&data_to_kill->refs);
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01007709 percpu_ref_kill(&rsrc_node->refs);
7710 ctx->rsrc_node = NULL;
7711 }
7712
7713 if (!ctx->rsrc_node) {
7714 ctx->rsrc_node = ctx->rsrc_backup_node;
7715 ctx->rsrc_backup_node = NULL;
7716 }
Pavel Begunkov1642b442020-12-30 21:34:14 +00007717}
7718
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01007719static int io_rsrc_node_switch_start(struct io_ring_ctx *ctx)
Pavel Begunkov8dd03af2021-03-19 17:22:36 +00007720{
7721 if (ctx->rsrc_backup_node)
7722 return 0;
Pavel Begunkovb895c9a2021-04-01 15:43:40 +01007723 ctx->rsrc_backup_node = io_rsrc_node_alloc(ctx);
Pavel Begunkov8dd03af2021-03-19 17:22:36 +00007724 return ctx->rsrc_backup_node ? 0 : -ENOMEM;
7725}
7726
Pavel Begunkov40ae0ff2021-04-01 15:43:44 +01007727static int io_rsrc_ref_quiesce(struct io_rsrc_data *data, struct io_ring_ctx *ctx)
Hao Xu8bad28d2021-02-19 17:19:36 +08007728{
7729 int ret;
Xiaoguang Wang05589552020-03-31 14:05:18 +08007730
Pavel Begunkov215c3902021-04-01 15:43:48 +01007731 /* As we may drop ->uring_lock, other task may have started quiesce */
Hao Xu8bad28d2021-02-19 17:19:36 +08007732 if (data->quiesce)
7733 return -ENXIO;
7734
7735 data->quiesce = true;
Pavel Begunkov1ffc5422020-12-30 21:34:15 +00007736 do {
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01007737 ret = io_rsrc_node_switch_start(ctx);
Pavel Begunkov8dd03af2021-03-19 17:22:36 +00007738 if (ret)
Pavel Begunkovf2303b12021-02-20 18:03:49 +00007739 break;
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01007740 io_rsrc_node_switch(ctx, data);
7741
Pavel Begunkov3e942492021-04-11 01:46:34 +01007742 /* kill initial ref, already quiesced if zero */
7743 if (atomic_dec_and_test(&data->refs))
7744 break;
Jens Axboec018db42021-08-09 08:15:50 -06007745 mutex_unlock(&ctx->uring_lock);
Hao Xu8bad28d2021-02-19 17:19:36 +08007746 flush_delayed_work(&ctx->rsrc_put_work);
Pavel Begunkov1ffc5422020-12-30 21:34:15 +00007747 ret = wait_for_completion_interruptible(&data->done);
Jens Axboec018db42021-08-09 08:15:50 -06007748 if (!ret) {
7749 mutex_lock(&ctx->uring_lock);
Pavel Begunkov1ffc5422020-12-30 21:34:15 +00007750 break;
Jens Axboec018db42021-08-09 08:15:50 -06007751 }
Jens Axboe05f3fb32019-12-09 11:22:50 -07007752
Pavel Begunkov3e942492021-04-11 01:46:34 +01007753 atomic_inc(&data->refs);
7754 /* wait for all works potentially completing data->done */
7755 flush_delayed_work(&ctx->rsrc_put_work);
Jens Axboecb5e1b82021-02-25 07:37:35 -07007756 reinit_completion(&data->done);
Pavel Begunkov8dd03af2021-03-19 17:22:36 +00007757
Hao Xu8bad28d2021-02-19 17:19:36 +08007758 ret = io_run_task_work_sig();
7759 mutex_lock(&ctx->uring_lock);
Pavel Begunkovf2303b12021-02-20 18:03:49 +00007760 } while (ret >= 0);
Hao Xu8bad28d2021-02-19 17:19:36 +08007761 data->quiesce = false;
7762
Hao Xu8bad28d2021-02-19 17:19:36 +08007763 return ret;
Bijan Mottahedehd7954b22021-01-15 17:37:50 +00007764}
7765
Pavel Begunkov2d091d62021-06-14 02:36:21 +01007766static u64 *io_get_tag_slot(struct io_rsrc_data *data, unsigned int idx)
7767{
7768 unsigned int off = idx & IO_RSRC_TAG_TABLE_MASK;
7769 unsigned int table_idx = idx >> IO_RSRC_TAG_TABLE_SHIFT;
7770
7771 return &data->tags[table_idx][off];
7772}
7773
Pavel Begunkov44b31f22021-04-25 14:32:16 +01007774static void io_rsrc_data_free(struct io_rsrc_data *data)
7775{
Pavel Begunkov2d091d62021-06-14 02:36:21 +01007776 size_t size = data->nr * sizeof(data->tags[0][0]);
7777
7778 if (data->tags)
7779 io_free_page_table((void **)data->tags, size);
Pavel Begunkov44b31f22021-04-25 14:32:16 +01007780 kfree(data);
7781}
7782
Pavel Begunkovd878c812021-06-14 02:36:18 +01007783static int io_rsrc_data_alloc(struct io_ring_ctx *ctx, rsrc_put_fn *do_put,
7784 u64 __user *utags, unsigned nr,
7785 struct io_rsrc_data **pdata)
Bijan Mottahedeh1ad555c2021-01-15 17:37:51 +00007786{
Pavel Begunkovb895c9a2021-04-01 15:43:40 +01007787 struct io_rsrc_data *data;
Pavel Begunkov2d091d62021-06-14 02:36:21 +01007788 int ret = -ENOMEM;
Pavel Begunkovd878c812021-06-14 02:36:18 +01007789 unsigned i;
Bijan Mottahedeh1ad555c2021-01-15 17:37:51 +00007790
7791 data = kzalloc(sizeof(*data), GFP_KERNEL);
7792 if (!data)
Pavel Begunkovd878c812021-06-14 02:36:18 +01007793 return -ENOMEM;
Pavel Begunkov2d091d62021-06-14 02:36:21 +01007794 data->tags = (u64 **)io_alloc_page_table(nr * sizeof(data->tags[0][0]));
Pavel Begunkovb60c8dc2021-04-25 14:32:18 +01007795 if (!data->tags) {
7796 kfree(data);
Pavel Begunkovd878c812021-06-14 02:36:18 +01007797 return -ENOMEM;
7798 }
Pavel Begunkov2d091d62021-06-14 02:36:21 +01007799
7800 data->nr = nr;
7801 data->ctx = ctx;
7802 data->do_put = do_put;
Pavel Begunkovd878c812021-06-14 02:36:18 +01007803 if (utags) {
Pavel Begunkov2d091d62021-06-14 02:36:21 +01007804 ret = -EFAULT;
Pavel Begunkovd878c812021-06-14 02:36:18 +01007805 for (i = 0; i < nr; i++) {
Colin Ian Kingfdd1dc32021-06-15 14:00:11 +01007806 u64 *tag_slot = io_get_tag_slot(data, i);
7807
7808 if (copy_from_user(tag_slot, &utags[i],
7809 sizeof(*tag_slot)))
Pavel Begunkov2d091d62021-06-14 02:36:21 +01007810 goto fail;
Pavel Begunkovd878c812021-06-14 02:36:18 +01007811 }
Pavel Begunkovb60c8dc2021-04-25 14:32:18 +01007812 }
7813
Pavel Begunkov3e942492021-04-11 01:46:34 +01007814 atomic_set(&data->refs, 1);
Bijan Mottahedeh1ad555c2021-01-15 17:37:51 +00007815 init_completion(&data->done);
Pavel Begunkovd878c812021-06-14 02:36:18 +01007816 *pdata = data;
7817 return 0;
Pavel Begunkov2d091d62021-06-14 02:36:21 +01007818fail:
7819 io_rsrc_data_free(data);
7820 return ret;
Bijan Mottahedeh1ad555c2021-01-15 17:37:51 +00007821}
7822
Pavel Begunkov9123c8f2021-06-14 02:36:20 +01007823static bool io_alloc_file_tables(struct io_file_table *table, unsigned nr_files)
7824{
Pavel Begunkov0bea96f2021-08-20 10:36:36 +01007825 table->files = kvcalloc(nr_files, sizeof(table->files[0]),
7826 GFP_KERNEL_ACCOUNT);
Pavel Begunkov9123c8f2021-06-14 02:36:20 +01007827 return !!table->files;
7828}
7829
Pavel Begunkov042b0d82021-08-09 13:04:01 +01007830static void io_free_file_tables(struct io_file_table *table)
Pavel Begunkov9123c8f2021-06-14 02:36:20 +01007831{
Pavel Begunkov042b0d82021-08-09 13:04:01 +01007832 kvfree(table->files);
Pavel Begunkov9123c8f2021-06-14 02:36:20 +01007833 table->files = NULL;
7834}
7835
Jens Axboe2b188cc2019-01-07 10:46:33 -07007836static void __io_sqe_files_unregister(struct io_ring_ctx *ctx)
7837{
7838#if defined(CONFIG_UNIX)
7839 if (ctx->ring_sock) {
7840 struct sock *sock = ctx->ring_sock->sk;
7841 struct sk_buff *skb;
7842
7843 while ((skb = skb_dequeue(&sock->sk_receive_queue)) != NULL)
7844 kfree_skb(skb);
7845 }
7846#else
7847 int i;
7848
7849 for (i = 0; i < ctx->nr_user_files; i++) {
7850 struct file *file;
7851
7852 file = io_file_from_index(ctx, i);
7853 if (file)
7854 fput(file);
7855 }
7856#endif
Pavel Begunkov042b0d82021-08-09 13:04:01 +01007857 io_free_file_tables(&ctx->file_table);
Pavel Begunkov44b31f22021-04-25 14:32:16 +01007858 io_rsrc_data_free(ctx->file_data);
Pavel Begunkovfff4db72021-04-25 14:32:15 +01007859 ctx->file_data = NULL;
7860 ctx->nr_user_files = 0;
Bijan Mottahedeh1ad555c2021-01-15 17:37:51 +00007861}
7862
Bijan Mottahedehd7954b22021-01-15 17:37:50 +00007863static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
7864{
Bijan Mottahedehd7954b22021-01-15 17:37:50 +00007865 int ret;
7866
Pavel Begunkov08480402021-04-13 02:58:38 +01007867 if (!ctx->file_data)
Bijan Mottahedehd7954b22021-01-15 17:37:50 +00007868 return -ENXIO;
Pavel Begunkov08480402021-04-13 02:58:38 +01007869 ret = io_rsrc_ref_quiesce(ctx->file_data, ctx);
7870 if (!ret)
7871 __io_sqe_files_unregister(ctx);
7872 return ret;
Jens Axboe6b063142019-01-10 22:13:58 -07007873}
7874
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007875static void io_sq_thread_unpark(struct io_sq_data *sqd)
Pavel Begunkov09a6f4e2021-03-14 20:57:10 +00007876 __releases(&sqd->lock)
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007877{
Pavel Begunkov521d6a72021-03-11 23:29:38 +00007878 WARN_ON_ONCE(sqd->thread == current);
7879
Pavel Begunkov9e138a42021-03-14 20:57:12 +00007880 /*
7881 * Do the dance but not conditional clear_bit() because it'd race with
7882 * other threads incrementing park_pending and setting the bit.
7883 */
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007884 clear_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state);
Pavel Begunkov9e138a42021-03-14 20:57:12 +00007885 if (atomic_dec_return(&sqd->park_pending))
7886 set_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state);
Pavel Begunkov09a6f4e2021-03-14 20:57:10 +00007887 mutex_unlock(&sqd->lock);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007888}
7889
Jens Axboe86e0d672021-03-05 08:44:39 -07007890static void io_sq_thread_park(struct io_sq_data *sqd)
Pavel Begunkov09a6f4e2021-03-14 20:57:10 +00007891 __acquires(&sqd->lock)
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007892{
Pavel Begunkov521d6a72021-03-11 23:29:38 +00007893 WARN_ON_ONCE(sqd->thread == current);
7894
Pavel Begunkov9e138a42021-03-14 20:57:12 +00007895 atomic_inc(&sqd->park_pending);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007896 set_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state);
Pavel Begunkov09a6f4e2021-03-14 20:57:10 +00007897 mutex_lock(&sqd->lock);
Jens Axboe05962f92021-03-06 13:58:48 -07007898 if (sqd->thread)
Jens Axboe86e0d672021-03-05 08:44:39 -07007899 wake_up_process(sqd->thread);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007900}
7901
7902static void io_sq_thread_stop(struct io_sq_data *sqd)
7903{
Pavel Begunkov521d6a72021-03-11 23:29:38 +00007904 WARN_ON_ONCE(sqd->thread == current);
Pavel Begunkov88885f62021-04-11 01:46:38 +01007905 WARN_ON_ONCE(test_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state));
Pavel Begunkov521d6a72021-03-11 23:29:38 +00007906
Jens Axboe05962f92021-03-06 13:58:48 -07007907 set_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state);
Pavel Begunkov88885f62021-04-11 01:46:38 +01007908 mutex_lock(&sqd->lock);
Jens Axboee8f98f242021-03-09 16:32:13 -07007909 if (sqd->thread)
7910 wake_up_process(sqd->thread);
Pavel Begunkov09a6f4e2021-03-14 20:57:10 +00007911 mutex_unlock(&sqd->lock);
Jens Axboe05962f92021-03-06 13:58:48 -07007912 wait_for_completion(&sqd->exited);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007913}
7914
Jens Axboe534ca6d2020-09-02 13:52:19 -06007915static void io_put_sq_data(struct io_sq_data *sqd)
Jens Axboe6c271ce2019-01-10 11:22:30 -07007916{
Jens Axboe534ca6d2020-09-02 13:52:19 -06007917 if (refcount_dec_and_test(&sqd->refs)) {
Pavel Begunkov9e138a42021-03-14 20:57:12 +00007918 WARN_ON_ONCE(atomic_read(&sqd->park_pending));
7919
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007920 io_sq_thread_stop(sqd);
7921 kfree(sqd);
7922 }
7923}
7924
7925static void io_sq_thread_finish(struct io_ring_ctx *ctx)
7926{
7927 struct io_sq_data *sqd = ctx->sq_data;
7928
7929 if (sqd) {
Jens Axboe05962f92021-03-06 13:58:48 -07007930 io_sq_thread_park(sqd);
Pavel Begunkov521d6a72021-03-11 23:29:38 +00007931 list_del_init(&ctx->sqd_list);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007932 io_sqd_update_thread_idle(sqd);
Jens Axboe05962f92021-03-06 13:58:48 -07007933 io_sq_thread_unpark(sqd);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007934
7935 io_put_sq_data(sqd);
7936 ctx->sq_data = NULL;
Jens Axboe534ca6d2020-09-02 13:52:19 -06007937 }
7938}
7939
Jens Axboeaa061652020-09-02 14:50:27 -06007940static struct io_sq_data *io_attach_sq_data(struct io_uring_params *p)
7941{
7942 struct io_ring_ctx *ctx_attach;
7943 struct io_sq_data *sqd;
7944 struct fd f;
7945
7946 f = fdget(p->wq_fd);
7947 if (!f.file)
7948 return ERR_PTR(-ENXIO);
7949 if (f.file->f_op != &io_uring_fops) {
7950 fdput(f);
7951 return ERR_PTR(-EINVAL);
7952 }
7953
7954 ctx_attach = f.file->private_data;
7955 sqd = ctx_attach->sq_data;
7956 if (!sqd) {
7957 fdput(f);
7958 return ERR_PTR(-EINVAL);
7959 }
Jens Axboe5c2469e2021-03-11 10:17:56 -07007960 if (sqd->task_tgid != current->tgid) {
7961 fdput(f);
7962 return ERR_PTR(-EPERM);
7963 }
Jens Axboeaa061652020-09-02 14:50:27 -06007964
7965 refcount_inc(&sqd->refs);
7966 fdput(f);
7967 return sqd;
7968}
7969
Pavel Begunkov26984fb2021-03-11 23:29:37 +00007970static struct io_sq_data *io_get_sq_data(struct io_uring_params *p,
7971 bool *attached)
Jens Axboe534ca6d2020-09-02 13:52:19 -06007972{
7973 struct io_sq_data *sqd;
7974
Pavel Begunkov26984fb2021-03-11 23:29:37 +00007975 *attached = false;
Jens Axboe5c2469e2021-03-11 10:17:56 -07007976 if (p->flags & IORING_SETUP_ATTACH_WQ) {
7977 sqd = io_attach_sq_data(p);
Pavel Begunkov26984fb2021-03-11 23:29:37 +00007978 if (!IS_ERR(sqd)) {
7979 *attached = true;
Jens Axboe5c2469e2021-03-11 10:17:56 -07007980 return sqd;
Pavel Begunkov26984fb2021-03-11 23:29:37 +00007981 }
Jens Axboe5c2469e2021-03-11 10:17:56 -07007982 /* fall through for EPERM case, setup new sqd/task */
7983 if (PTR_ERR(sqd) != -EPERM)
7984 return sqd;
7985 }
Jens Axboeaa061652020-09-02 14:50:27 -06007986
Jens Axboe534ca6d2020-09-02 13:52:19 -06007987 sqd = kzalloc(sizeof(*sqd), GFP_KERNEL);
7988 if (!sqd)
7989 return ERR_PTR(-ENOMEM);
7990
Pavel Begunkov9e138a42021-03-14 20:57:12 +00007991 atomic_set(&sqd->park_pending, 0);
Jens Axboe534ca6d2020-09-02 13:52:19 -06007992 refcount_set(&sqd->refs, 1);
Jens Axboe69fb2132020-09-14 11:16:23 -06007993 INIT_LIST_HEAD(&sqd->ctx_list);
Pavel Begunkov09a6f4e2021-03-14 20:57:10 +00007994 mutex_init(&sqd->lock);
Jens Axboe534ca6d2020-09-02 13:52:19 -06007995 init_waitqueue_head(&sqd->wait);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007996 init_completion(&sqd->exited);
Jens Axboe534ca6d2020-09-02 13:52:19 -06007997 return sqd;
7998}
7999
Jens Axboe6b063142019-01-10 22:13:58 -07008000#if defined(CONFIG_UNIX)
Jens Axboe6b063142019-01-10 22:13:58 -07008001/*
8002 * Ensure the UNIX gc is aware of our file set, so we are certain that
8003 * the io_uring can be safely unregistered on process exit, even if we have
8004 * loops in the file referencing.
8005 */
8006static int __io_sqe_files_scm(struct io_ring_ctx *ctx, int nr, int offset)
8007{
8008 struct sock *sk = ctx->ring_sock->sk;
8009 struct scm_fp_list *fpl;
8010 struct sk_buff *skb;
Jens Axboe08a45172019-10-03 08:11:03 -06008011 int i, nr_files;
Jens Axboe6b063142019-01-10 22:13:58 -07008012
Jens Axboe6b063142019-01-10 22:13:58 -07008013 fpl = kzalloc(sizeof(*fpl), GFP_KERNEL);
8014 if (!fpl)
8015 return -ENOMEM;
8016
8017 skb = alloc_skb(0, GFP_KERNEL);
8018 if (!skb) {
8019 kfree(fpl);
8020 return -ENOMEM;
8021 }
8022
8023 skb->sk = sk;
Jens Axboe6b063142019-01-10 22:13:58 -07008024
Jens Axboe08a45172019-10-03 08:11:03 -06008025 nr_files = 0;
Jens Axboe62e398b2021-02-21 16:19:37 -07008026 fpl->user = get_uid(current_user());
Jens Axboe6b063142019-01-10 22:13:58 -07008027 for (i = 0; i < nr; i++) {
Jens Axboe65e19f52019-10-26 07:20:21 -06008028 struct file *file = io_file_from_index(ctx, i + offset);
8029
8030 if (!file)
Jens Axboe08a45172019-10-03 08:11:03 -06008031 continue;
Jens Axboe65e19f52019-10-26 07:20:21 -06008032 fpl->fp[nr_files] = get_file(file);
Jens Axboe08a45172019-10-03 08:11:03 -06008033 unix_inflight(fpl->user, fpl->fp[nr_files]);
8034 nr_files++;
Jens Axboe6b063142019-01-10 22:13:58 -07008035 }
8036
Jens Axboe08a45172019-10-03 08:11:03 -06008037 if (nr_files) {
8038 fpl->max = SCM_MAX_FD;
8039 fpl->count = nr_files;
8040 UNIXCB(skb).fp = fpl;
Jens Axboe05f3fb32019-12-09 11:22:50 -07008041 skb->destructor = unix_destruct_scm;
Jens Axboe08a45172019-10-03 08:11:03 -06008042 refcount_add(skb->truesize, &sk->sk_wmem_alloc);
8043 skb_queue_head(&sk->sk_receive_queue, skb);
Jens Axboe6b063142019-01-10 22:13:58 -07008044
Jens Axboe08a45172019-10-03 08:11:03 -06008045 for (i = 0; i < nr_files; i++)
8046 fput(fpl->fp[i]);
8047 } else {
8048 kfree_skb(skb);
8049 kfree(fpl);
8050 }
Jens Axboe6b063142019-01-10 22:13:58 -07008051
8052 return 0;
8053}
8054
8055/*
8056 * If UNIX sockets are enabled, fd passing can cause a reference cycle which
8057 * causes regular reference counting to break down. We rely on the UNIX
8058 * garbage collection to take care of this problem for us.
8059 */
8060static int io_sqe_files_scm(struct io_ring_ctx *ctx)
8061{
8062 unsigned left, total;
8063 int ret = 0;
8064
8065 total = 0;
8066 left = ctx->nr_user_files;
8067 while (left) {
8068 unsigned this_files = min_t(unsigned, left, SCM_MAX_FD);
Jens Axboe6b063142019-01-10 22:13:58 -07008069
8070 ret = __io_sqe_files_scm(ctx, this_files, total);
8071 if (ret)
8072 break;
8073 left -= this_files;
8074 total += this_files;
8075 }
8076
8077 if (!ret)
8078 return 0;
8079
8080 while (total < ctx->nr_user_files) {
Jens Axboe65e19f52019-10-26 07:20:21 -06008081 struct file *file = io_file_from_index(ctx, total);
8082
8083 if (file)
8084 fput(file);
Jens Axboe6b063142019-01-10 22:13:58 -07008085 total++;
8086 }
8087
8088 return ret;
8089}
8090#else
8091static int io_sqe_files_scm(struct io_ring_ctx *ctx)
8092{
8093 return 0;
8094}
8095#endif
8096
Pavel Begunkov47e90392021-04-01 15:43:56 +01008097static void io_rsrc_file_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc)
Jens Axboec3a31e62019-10-03 13:59:56 -06008098{
Bijan Mottahedeh50238532021-01-15 17:37:45 +00008099 struct file *file = prsrc->file;
Jens Axboec3a31e62019-10-03 13:59:56 -06008100#if defined(CONFIG_UNIX)
Jens Axboec3a31e62019-10-03 13:59:56 -06008101 struct sock *sock = ctx->ring_sock->sk;
8102 struct sk_buff_head list, *head = &sock->sk_receive_queue;
8103 struct sk_buff *skb;
8104 int i;
8105
8106 __skb_queue_head_init(&list);
8107
8108 /*
8109 * Find the skb that holds this file in its SCM_RIGHTS. When found,
8110 * remove this entry and rearrange the file array.
8111 */
8112 skb = skb_dequeue(head);
8113 while (skb) {
8114 struct scm_fp_list *fp;
8115
8116 fp = UNIXCB(skb).fp;
8117 for (i = 0; i < fp->count; i++) {
8118 int left;
8119
8120 if (fp->fp[i] != file)
8121 continue;
8122
8123 unix_notinflight(fp->user, fp->fp[i]);
8124 left = fp->count - 1 - i;
8125 if (left) {
8126 memmove(&fp->fp[i], &fp->fp[i + 1],
8127 left * sizeof(struct file *));
8128 }
8129 fp->count--;
8130 if (!fp->count) {
8131 kfree_skb(skb);
8132 skb = NULL;
8133 } else {
8134 __skb_queue_tail(&list, skb);
8135 }
8136 fput(file);
8137 file = NULL;
8138 break;
8139 }
8140
8141 if (!file)
8142 break;
8143
8144 __skb_queue_tail(&list, skb);
8145
8146 skb = skb_dequeue(head);
8147 }
8148
8149 if (skb_peek(&list)) {
8150 spin_lock_irq(&head->lock);
8151 while ((skb = __skb_dequeue(&list)) != NULL)
8152 __skb_queue_tail(head, skb);
8153 spin_unlock_irq(&head->lock);
8154 }
8155#else
Jens Axboe05f3fb32019-12-09 11:22:50 -07008156 fput(file);
Jens Axboec3a31e62019-10-03 13:59:56 -06008157#endif
8158}
8159
Pavel Begunkovb895c9a2021-04-01 15:43:40 +01008160static void __io_rsrc_put_work(struct io_rsrc_node *ref_node)
Jens Axboe05f3fb32019-12-09 11:22:50 -07008161{
Pavel Begunkovb895c9a2021-04-01 15:43:40 +01008162 struct io_rsrc_data *rsrc_data = ref_node->rsrc_data;
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00008163 struct io_ring_ctx *ctx = rsrc_data->ctx;
8164 struct io_rsrc_put *prsrc, *tmp;
Xiaoguang Wang05589552020-03-31 14:05:18 +08008165
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00008166 list_for_each_entry_safe(prsrc, tmp, &ref_node->rsrc_list, list) {
8167 list_del(&prsrc->list);
Pavel Begunkovb60c8dc2021-04-25 14:32:18 +01008168
8169 if (prsrc->tag) {
8170 bool lock_ring = ctx->flags & IORING_SETUP_IOPOLL;
Pavel Begunkovb60c8dc2021-04-25 14:32:18 +01008171
8172 io_ring_submit_lock(ctx, lock_ring);
Jens Axboe79ebeae2021-08-10 15:18:27 -06008173 spin_lock(&ctx->completion_lock);
Pavel Begunkovb60c8dc2021-04-25 14:32:18 +01008174 io_cqring_fill_event(ctx, prsrc->tag, 0, 0);
Pavel Begunkov2840f712021-04-27 16:13:51 +01008175 ctx->cq_extra++;
Pavel Begunkovb60c8dc2021-04-25 14:32:18 +01008176 io_commit_cqring(ctx);
Jens Axboe79ebeae2021-08-10 15:18:27 -06008177 spin_unlock(&ctx->completion_lock);
Pavel Begunkovb60c8dc2021-04-25 14:32:18 +01008178 io_cqring_ev_posted(ctx);
8179 io_ring_submit_unlock(ctx, lock_ring);
8180 }
8181
Pavel Begunkov40ae0ff2021-04-01 15:43:44 +01008182 rsrc_data->do_put(ctx, prsrc);
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00008183 kfree(prsrc);
Jens Axboe05f3fb32019-12-09 11:22:50 -07008184 }
8185
Pavel Begunkov28a9fe22021-04-01 15:43:47 +01008186 io_rsrc_node_destroy(ref_node);
Pavel Begunkov3e942492021-04-11 01:46:34 +01008187 if (atomic_dec_and_test(&rsrc_data->refs))
8188 complete(&rsrc_data->done);
Jens Axboe05f3fb32019-12-09 11:22:50 -07008189}
8190
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00008191static void io_rsrc_put_work(struct work_struct *work)
Jens Axboe4a38aed22020-05-14 17:21:15 -06008192{
8193 struct io_ring_ctx *ctx;
8194 struct llist_node *node;
8195
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00008196 ctx = container_of(work, struct io_ring_ctx, rsrc_put_work.work);
8197 node = llist_del_all(&ctx->rsrc_put_llist);
Jens Axboe4a38aed22020-05-14 17:21:15 -06008198
8199 while (node) {
Pavel Begunkovb895c9a2021-04-01 15:43:40 +01008200 struct io_rsrc_node *ref_node;
Jens Axboe4a38aed22020-05-14 17:21:15 -06008201 struct llist_node *next = node->next;
8202
Pavel Begunkovb895c9a2021-04-01 15:43:40 +01008203 ref_node = llist_entry(node, struct io_rsrc_node, llist);
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00008204 __io_rsrc_put_work(ref_node);
Jens Axboe4a38aed22020-05-14 17:21:15 -06008205 node = next;
8206 }
8207}
8208
Jens Axboe05f3fb32019-12-09 11:22:50 -07008209static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
Pavel Begunkov792e3582021-04-25 14:32:21 +01008210 unsigned nr_args, u64 __user *tags)
Jens Axboe05f3fb32019-12-09 11:22:50 -07008211{
8212 __s32 __user *fds = (__s32 __user *) arg;
Jens Axboe05f3fb32019-12-09 11:22:50 -07008213 struct file *file;
Pavel Begunkovf3baed32021-04-01 15:43:42 +01008214 int fd, ret;
Pavel Begunkov846a4ef2021-04-01 15:44:03 +01008215 unsigned i;
Jens Axboe05f3fb32019-12-09 11:22:50 -07008216
8217 if (ctx->file_data)
8218 return -EBUSY;
8219 if (!nr_args)
8220 return -EINVAL;
8221 if (nr_args > IORING_MAX_FIXED_FILES)
8222 return -EMFILE;
Pavel Begunkov3a1b8a42021-08-20 10:36:35 +01008223 if (nr_args > rlimit(RLIMIT_NOFILE))
8224 return -EMFILE;
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01008225 ret = io_rsrc_node_switch_start(ctx);
Pavel Begunkovf3baed32021-04-01 15:43:42 +01008226 if (ret)
8227 return ret;
Pavel Begunkovd878c812021-06-14 02:36:18 +01008228 ret = io_rsrc_data_alloc(ctx, io_rsrc_file_put, tags, nr_args,
8229 &ctx->file_data);
8230 if (ret)
8231 return ret;
Jens Axboe05f3fb32019-12-09 11:22:50 -07008232
Pavel Begunkovf3baed32021-04-01 15:43:42 +01008233 ret = -ENOMEM;
Pavel Begunkovaeca2412021-04-11 01:46:37 +01008234 if (!io_alloc_file_tables(&ctx->file_table, nr_args))
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01008235 goto out_free;
Jens Axboe05f3fb32019-12-09 11:22:50 -07008236
Jens Axboe05f3fb32019-12-09 11:22:50 -07008237 for (i = 0; i < nr_args; i++, ctx->nr_user_files++) {
Pavel Begunkovd878c812021-06-14 02:36:18 +01008238 if (copy_from_user(&fd, &fds[i], sizeof(fd))) {
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01008239 ret = -EFAULT;
8240 goto out_fput;
Jens Axboe05f3fb32019-12-09 11:22:50 -07008241 }
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01008242 /* allow sparse sets */
Pavel Begunkov792e3582021-04-25 14:32:21 +01008243 if (fd == -1) {
8244 ret = -EINVAL;
Pavel Begunkov2d091d62021-06-14 02:36:21 +01008245 if (unlikely(*io_get_tag_slot(ctx->file_data, i)))
Pavel Begunkov792e3582021-04-25 14:32:21 +01008246 goto out_fput;
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01008247 continue;
Pavel Begunkov792e3582021-04-25 14:32:21 +01008248 }
Jens Axboe05f3fb32019-12-09 11:22:50 -07008249
Jens Axboe05f3fb32019-12-09 11:22:50 -07008250 file = fget(fd);
Jens Axboe05f3fb32019-12-09 11:22:50 -07008251 ret = -EBADF;
Pavel Begunkov792e3582021-04-25 14:32:21 +01008252 if (unlikely(!file))
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01008253 goto out_fput;
Jens Axboe05f3fb32019-12-09 11:22:50 -07008254
8255 /*
8256 * Don't allow io_uring instances to be registered. If UNIX
8257 * isn't enabled, then this causes a reference cycle and this
8258 * instance can never get freed. If UNIX is enabled we'll
8259 * handle it just fine, but there's still no point in allowing
8260 * a ring fd as it doesn't support regular read/write anyway.
8261 */
8262 if (file->f_op == &io_uring_fops) {
8263 fput(file);
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01008264 goto out_fput;
Jens Axboe05f3fb32019-12-09 11:22:50 -07008265 }
Pavel Begunkovaeca2412021-04-11 01:46:37 +01008266 io_fixed_file_set(io_fixed_file_slot(&ctx->file_table, i), file);
Jens Axboe05f3fb32019-12-09 11:22:50 -07008267 }
8268
Jens Axboe05f3fb32019-12-09 11:22:50 -07008269 ret = io_sqe_files_scm(ctx);
Xiaoguang Wang05589552020-03-31 14:05:18 +08008270 if (ret) {
Pavel Begunkov08480402021-04-13 02:58:38 +01008271 __io_sqe_files_unregister(ctx);
Xiaoguang Wang05589552020-03-31 14:05:18 +08008272 return ret;
8273 }
Jens Axboe05f3fb32019-12-09 11:22:50 -07008274
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01008275 io_rsrc_node_switch(ctx, NULL);
Jens Axboe05f3fb32019-12-09 11:22:50 -07008276 return ret;
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01008277out_fput:
8278 for (i = 0; i < ctx->nr_user_files; i++) {
8279 file = io_file_from_index(ctx, i);
8280 if (file)
8281 fput(file);
8282 }
Pavel Begunkov042b0d82021-08-09 13:04:01 +01008283 io_free_file_tables(&ctx->file_table);
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01008284 ctx->nr_user_files = 0;
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01008285out_free:
Pavel Begunkov44b31f22021-04-25 14:32:16 +01008286 io_rsrc_data_free(ctx->file_data);
Jens Axboe55cbc252020-10-14 07:35:57 -06008287 ctx->file_data = NULL;
Jens Axboe05f3fb32019-12-09 11:22:50 -07008288 return ret;
8289}
8290
Jens Axboec3a31e62019-10-03 13:59:56 -06008291static int io_sqe_file_register(struct io_ring_ctx *ctx, struct file *file,
8292 int index)
8293{
8294#if defined(CONFIG_UNIX)
8295 struct sock *sock = ctx->ring_sock->sk;
8296 struct sk_buff_head *head = &sock->sk_receive_queue;
8297 struct sk_buff *skb;
8298
8299 /*
8300 * See if we can merge this file into an existing skb SCM_RIGHTS
8301 * file set. If there's no room, fall back to allocating a new skb
8302 * and filling it in.
8303 */
8304 spin_lock_irq(&head->lock);
8305 skb = skb_peek(head);
8306 if (skb) {
8307 struct scm_fp_list *fpl = UNIXCB(skb).fp;
8308
8309 if (fpl->count < SCM_MAX_FD) {
8310 __skb_unlink(skb, head);
8311 spin_unlock_irq(&head->lock);
8312 fpl->fp[fpl->count] = get_file(file);
8313 unix_inflight(fpl->user, fpl->fp[fpl->count]);
8314 fpl->count++;
8315 spin_lock_irq(&head->lock);
8316 __skb_queue_head(head, skb);
8317 } else {
8318 skb = NULL;
8319 }
8320 }
8321 spin_unlock_irq(&head->lock);
8322
8323 if (skb) {
8324 fput(file);
8325 return 0;
8326 }
8327
8328 return __io_sqe_files_scm(ctx, 1, index);
8329#else
8330 return 0;
8331#endif
8332}
8333
Pavel Begunkov9c7b0ba2021-09-14 16:12:52 +01008334static int io_queue_rsrc_removal(struct io_rsrc_data *data, unsigned idx,
8335 struct io_rsrc_node *node, void *rsrc)
8336{
8337 struct io_rsrc_put *prsrc;
8338
8339 prsrc = kzalloc(sizeof(*prsrc), GFP_KERNEL);
8340 if (!prsrc)
8341 return -ENOMEM;
8342
8343 prsrc->tag = *io_get_tag_slot(data, idx);
8344 prsrc->rsrc = rsrc;
8345 list_add(&prsrc->list, &node->rsrc_list);
8346 return 0;
8347}
8348
Pavel Begunkovb9445592021-08-25 12:25:45 +01008349static int io_install_fixed_file(struct io_kiocb *req, struct file *file,
8350 unsigned int issue_flags, u32 slot_index)
8351{
8352 struct io_ring_ctx *ctx = req->ctx;
8353 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Pavel Begunkov9c7b0ba2021-09-14 16:12:52 +01008354 bool needs_switch = false;
Pavel Begunkovb9445592021-08-25 12:25:45 +01008355 struct io_fixed_file *file_slot;
8356 int ret = -EBADF;
8357
8358 io_ring_submit_lock(ctx, !force_nonblock);
8359 if (file->f_op == &io_uring_fops)
8360 goto err;
8361 ret = -ENXIO;
8362 if (!ctx->file_data)
8363 goto err;
8364 ret = -EINVAL;
8365 if (slot_index >= ctx->nr_user_files)
8366 goto err;
8367
8368 slot_index = array_index_nospec(slot_index, ctx->nr_user_files);
8369 file_slot = io_fixed_file_slot(&ctx->file_table, slot_index);
Pavel Begunkov9c7b0ba2021-09-14 16:12:52 +01008370
8371 if (file_slot->file_ptr) {
8372 struct file *old_file;
8373
8374 ret = io_rsrc_node_switch_start(ctx);
8375 if (ret)
8376 goto err;
8377
8378 old_file = (struct file *)(file_slot->file_ptr & FFS_MASK);
8379 ret = io_queue_rsrc_removal(ctx->file_data, slot_index,
8380 ctx->rsrc_node, old_file);
8381 if (ret)
8382 goto err;
8383 file_slot->file_ptr = 0;
8384 needs_switch = true;
8385 }
Pavel Begunkovb9445592021-08-25 12:25:45 +01008386
8387 *io_get_tag_slot(ctx->file_data, slot_index) = 0;
8388 io_fixed_file_set(file_slot, file);
8389 ret = io_sqe_file_register(ctx, file, slot_index);
8390 if (ret) {
8391 file_slot->file_ptr = 0;
8392 goto err;
8393 }
8394
8395 ret = 0;
8396err:
Pavel Begunkov9c7b0ba2021-09-14 16:12:52 +01008397 if (needs_switch)
8398 io_rsrc_node_switch(ctx, ctx->file_data);
Pavel Begunkovb9445592021-08-25 12:25:45 +01008399 io_ring_submit_unlock(ctx, !force_nonblock);
8400 if (ret)
8401 fput(file);
8402 return ret;
8403}
8404
Jens Axboe05f3fb32019-12-09 11:22:50 -07008405static int __io_sqe_files_update(struct io_ring_ctx *ctx,
Pavel Begunkovc3bdad02021-04-25 14:32:22 +01008406 struct io_uring_rsrc_update2 *up,
Jens Axboe05f3fb32019-12-09 11:22:50 -07008407 unsigned nr_args)
8408{
Pavel Begunkovc3bdad02021-04-25 14:32:22 +01008409 u64 __user *tags = u64_to_user_ptr(up->tags);
Pavel Begunkov98f0b3b2021-04-25 14:32:19 +01008410 __s32 __user *fds = u64_to_user_ptr(up->data);
Pavel Begunkovb895c9a2021-04-01 15:43:40 +01008411 struct io_rsrc_data *data = ctx->file_data;
Pavel Begunkova04b0ac2021-04-01 15:44:04 +01008412 struct io_fixed_file *file_slot;
8413 struct file *file;
Pavel Begunkov98f0b3b2021-04-25 14:32:19 +01008414 int fd, i, err = 0;
8415 unsigned int done;
Xiaoguang Wang05589552020-03-31 14:05:18 +08008416 bool needs_switch = false;
Jens Axboec3a31e62019-10-03 13:59:56 -06008417
Pavel Begunkov98f0b3b2021-04-25 14:32:19 +01008418 if (!ctx->file_data)
8419 return -ENXIO;
8420 if (up->offset + nr_args > ctx->nr_user_files)
Jens Axboec3a31e62019-10-03 13:59:56 -06008421 return -EINVAL;
8422
Pavel Begunkov67973b92021-01-26 13:51:09 +00008423 for (done = 0; done < nr_args; done++) {
Pavel Begunkovc3bdad02021-04-25 14:32:22 +01008424 u64 tag = 0;
8425
8426 if ((tags && copy_from_user(&tag, &tags[done], sizeof(tag))) ||
8427 copy_from_user(&fd, &fds[done], sizeof(fd))) {
Jens Axboec3a31e62019-10-03 13:59:56 -06008428 err = -EFAULT;
8429 break;
8430 }
Pavel Begunkovc3bdad02021-04-25 14:32:22 +01008431 if ((fd == IORING_REGISTER_FILES_SKIP || fd == -1) && tag) {
8432 err = -EINVAL;
8433 break;
8434 }
noah4e0377a2021-01-26 15:23:28 -05008435 if (fd == IORING_REGISTER_FILES_SKIP)
8436 continue;
8437
Pavel Begunkov67973b92021-01-26 13:51:09 +00008438 i = array_index_nospec(up->offset + done, ctx->nr_user_files);
Pavel Begunkovaeca2412021-04-11 01:46:37 +01008439 file_slot = io_fixed_file_slot(&ctx->file_table, i);
Pavel Begunkovea64ec022021-02-04 13:52:07 +00008440
Pavel Begunkova04b0ac2021-04-01 15:44:04 +01008441 if (file_slot->file_ptr) {
8442 file = (struct file *)(file_slot->file_ptr & FFS_MASK);
Pavel Begunkovb60c8dc2021-04-25 14:32:18 +01008443 err = io_queue_rsrc_removal(data, up->offset + done,
8444 ctx->rsrc_node, file);
Hillf Dantona5318d32020-03-23 17:47:15 +08008445 if (err)
8446 break;
Pavel Begunkova04b0ac2021-04-01 15:44:04 +01008447 file_slot->file_ptr = 0;
Xiaoguang Wang05589552020-03-31 14:05:18 +08008448 needs_switch = true;
Jens Axboec3a31e62019-10-03 13:59:56 -06008449 }
8450 if (fd != -1) {
Jens Axboec3a31e62019-10-03 13:59:56 -06008451 file = fget(fd);
8452 if (!file) {
8453 err = -EBADF;
8454 break;
8455 }
8456 /*
8457 * Don't allow io_uring instances to be registered. If
8458 * UNIX isn't enabled, then this causes a reference
8459 * cycle and this instance can never get freed. If UNIX
8460 * is enabled we'll handle it just fine, but there's
8461 * still no point in allowing a ring fd as it doesn't
8462 * support regular read/write anyway.
8463 */
8464 if (file->f_op == &io_uring_fops) {
8465 fput(file);
8466 err = -EBADF;
8467 break;
8468 }
Pavel Begunkov2d091d62021-06-14 02:36:21 +01008469 *io_get_tag_slot(data, up->offset + done) = tag;
Pavel Begunkov9a321c92021-04-01 15:44:01 +01008470 io_fixed_file_set(file_slot, file);
Jens Axboec3a31e62019-10-03 13:59:56 -06008471 err = io_sqe_file_register(ctx, file, i);
Yang Yingliangf3bd9da2020-07-09 10:11:41 +00008472 if (err) {
Pavel Begunkova04b0ac2021-04-01 15:44:04 +01008473 file_slot->file_ptr = 0;
Yang Yingliangf3bd9da2020-07-09 10:11:41 +00008474 fput(file);
Jens Axboec3a31e62019-10-03 13:59:56 -06008475 break;
Yang Yingliangf3bd9da2020-07-09 10:11:41 +00008476 }
Jens Axboec3a31e62019-10-03 13:59:56 -06008477 }
Jens Axboe05f3fb32019-12-09 11:22:50 -07008478 }
8479
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01008480 if (needs_switch)
8481 io_rsrc_node_switch(ctx, data);
Jens Axboec3a31e62019-10-03 13:59:56 -06008482 return done ? done : err;
8483}
Xiaoguang Wang05589552020-03-31 14:05:18 +08008484
Jens Axboe685fe7f2021-03-08 09:37:51 -07008485static struct io_wq *io_init_wq_offload(struct io_ring_ctx *ctx,
8486 struct task_struct *task)
Pavel Begunkov24369c22020-01-28 03:15:48 +03008487{
Jens Axboee9418942021-02-19 12:33:30 -07008488 struct io_wq_hash *hash;
Pavel Begunkov24369c22020-01-28 03:15:48 +03008489 struct io_wq_data data;
Pavel Begunkov24369c22020-01-28 03:15:48 +03008490 unsigned int concurrency;
Pavel Begunkov24369c22020-01-28 03:15:48 +03008491
Yang Yingliang362a9e62021-07-20 16:38:05 +08008492 mutex_lock(&ctx->uring_lock);
Jens Axboee9418942021-02-19 12:33:30 -07008493 hash = ctx->hash_map;
8494 if (!hash) {
8495 hash = kzalloc(sizeof(*hash), GFP_KERNEL);
Yang Yingliang362a9e62021-07-20 16:38:05 +08008496 if (!hash) {
8497 mutex_unlock(&ctx->uring_lock);
Jens Axboee9418942021-02-19 12:33:30 -07008498 return ERR_PTR(-ENOMEM);
Yang Yingliang362a9e62021-07-20 16:38:05 +08008499 }
Jens Axboee9418942021-02-19 12:33:30 -07008500 refcount_set(&hash->refs, 1);
8501 init_waitqueue_head(&hash->wait);
8502 ctx->hash_map = hash;
8503 }
Yang Yingliang362a9e62021-07-20 16:38:05 +08008504 mutex_unlock(&ctx->uring_lock);
Jens Axboee9418942021-02-19 12:33:30 -07008505
8506 data.hash = hash;
Jens Axboe685fe7f2021-03-08 09:37:51 -07008507 data.task = task;
Pavel Begunkovebc11b62021-08-09 13:04:05 +01008508 data.free_work = io_wq_free_work;
Pavel Begunkovf5fa38c2020-06-08 21:08:20 +03008509 data.do_work = io_wq_submit_work;
Pavel Begunkov24369c22020-01-28 03:15:48 +03008510
Jens Axboed25e3a32021-02-16 11:41:41 -07008511 /* Do QD, or 4 * CPUS, whatever is smallest */
8512 concurrency = min(ctx->sq_entries, 4 * num_online_cpus());
Pavel Begunkov24369c22020-01-28 03:15:48 +03008513
Jens Axboe5aa75ed2021-02-16 12:56:50 -07008514 return io_wq_create(concurrency, &data);
Pavel Begunkov24369c22020-01-28 03:15:48 +03008515}
8516
Jens Axboe5aa75ed2021-02-16 12:56:50 -07008517static int io_uring_alloc_task_context(struct task_struct *task,
8518 struct io_ring_ctx *ctx)
Jens Axboe0f212202020-09-13 13:09:39 -06008519{
8520 struct io_uring_task *tctx;
Jens Axboed8a6df12020-10-15 16:24:45 -06008521 int ret;
Jens Axboe0f212202020-09-13 13:09:39 -06008522
Pavel Begunkov09899b12021-06-14 02:36:22 +01008523 tctx = kzalloc(sizeof(*tctx), GFP_KERNEL);
Jens Axboe0f212202020-09-13 13:09:39 -06008524 if (unlikely(!tctx))
8525 return -ENOMEM;
8526
Jens Axboed8a6df12020-10-15 16:24:45 -06008527 ret = percpu_counter_init(&tctx->inflight, 0, GFP_KERNEL);
8528 if (unlikely(ret)) {
8529 kfree(tctx);
8530 return ret;
8531 }
8532
Jens Axboe685fe7f2021-03-08 09:37:51 -07008533 tctx->io_wq = io_init_wq_offload(ctx, task);
Jens Axboe5aa75ed2021-02-16 12:56:50 -07008534 if (IS_ERR(tctx->io_wq)) {
8535 ret = PTR_ERR(tctx->io_wq);
8536 percpu_counter_destroy(&tctx->inflight);
8537 kfree(tctx);
8538 return ret;
8539 }
8540
Jens Axboe0f212202020-09-13 13:09:39 -06008541 xa_init(&tctx->xa);
8542 init_waitqueue_head(&tctx->wait);
Jens Axboefdaf0832020-10-30 09:37:30 -06008543 atomic_set(&tctx->in_idle, 0);
Pavel Begunkovb303fe22021-04-11 01:46:26 +01008544 atomic_set(&tctx->inflight_tracked, 0);
Jens Axboe0f212202020-09-13 13:09:39 -06008545 task->io_uring = tctx;
Jens Axboe7cbf1722021-02-10 00:03:20 +00008546 spin_lock_init(&tctx->task_lock);
8547 INIT_WQ_LIST(&tctx->task_list);
Jens Axboe7cbf1722021-02-10 00:03:20 +00008548 init_task_work(&tctx->task_work, tctx_task_work);
Jens Axboe0f212202020-09-13 13:09:39 -06008549 return 0;
8550}
8551
8552void __io_uring_free(struct task_struct *tsk)
8553{
8554 struct io_uring_task *tctx = tsk->io_uring;
8555
8556 WARN_ON_ONCE(!xa_empty(&tctx->xa));
Pavel Begunkovef8eaa42021-02-27 11:16:45 +00008557 WARN_ON_ONCE(tctx->io_wq);
Pavel Begunkov09899b12021-06-14 02:36:22 +01008558 WARN_ON_ONCE(tctx->cached_refs);
Pavel Begunkovef8eaa42021-02-27 11:16:45 +00008559
Jens Axboed8a6df12020-10-15 16:24:45 -06008560 percpu_counter_destroy(&tctx->inflight);
Jens Axboe0f212202020-09-13 13:09:39 -06008561 kfree(tctx);
8562 tsk->io_uring = NULL;
8563}
8564
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02008565static int io_sq_offload_create(struct io_ring_ctx *ctx,
8566 struct io_uring_params *p)
Jens Axboe2b188cc2019-01-07 10:46:33 -07008567{
8568 int ret;
8569
Jens Axboed25e3a32021-02-16 11:41:41 -07008570 /* Retain compatibility with failing for an invalid attach attempt */
8571 if ((ctx->flags & (IORING_SETUP_ATTACH_WQ | IORING_SETUP_SQPOLL)) ==
8572 IORING_SETUP_ATTACH_WQ) {
8573 struct fd f;
8574
8575 f = fdget(p->wq_fd);
8576 if (!f.file)
8577 return -ENXIO;
Jens Axboe0cc936f2021-07-22 17:08:07 -06008578 if (f.file->f_op != &io_uring_fops) {
8579 fdput(f);
Pavel Begunkovf2a48dd2021-04-20 12:03:33 +01008580 return -EINVAL;
Jens Axboe0cc936f2021-07-22 17:08:07 -06008581 }
8582 fdput(f);
Jens Axboed25e3a32021-02-16 11:41:41 -07008583 }
Jens Axboe6c271ce2019-01-10 11:22:30 -07008584 if (ctx->flags & IORING_SETUP_SQPOLL) {
Jens Axboe46fe18b2021-03-04 12:39:36 -07008585 struct task_struct *tsk;
Jens Axboe534ca6d2020-09-02 13:52:19 -06008586 struct io_sq_data *sqd;
Pavel Begunkov26984fb2021-03-11 23:29:37 +00008587 bool attached;
Jens Axboe534ca6d2020-09-02 13:52:19 -06008588
Pavel Begunkov26984fb2021-03-11 23:29:37 +00008589 sqd = io_get_sq_data(p, &attached);
Jens Axboe534ca6d2020-09-02 13:52:19 -06008590 if (IS_ERR(sqd)) {
8591 ret = PTR_ERR(sqd);
8592 goto err;
8593 }
Jens Axboe69fb2132020-09-14 11:16:23 -06008594
Stefan Metzmacher7c30f36a2021-03-07 11:54:28 +01008595 ctx->sq_creds = get_current_cred();
Jens Axboe534ca6d2020-09-02 13:52:19 -06008596 ctx->sq_data = sqd;
Jens Axboe917257d2019-04-13 09:28:55 -06008597 ctx->sq_thread_idle = msecs_to_jiffies(p->sq_thread_idle);
8598 if (!ctx->sq_thread_idle)
8599 ctx->sq_thread_idle = HZ;
8600
Pavel Begunkov78d7f6b2021-03-10 13:13:53 +00008601 io_sq_thread_park(sqd);
Pavel Begunkovde75a3d2021-03-18 11:54:35 +00008602 list_add(&ctx->sqd_list, &sqd->ctx_list);
8603 io_sqd_update_thread_idle(sqd);
Pavel Begunkov26984fb2021-03-11 23:29:37 +00008604 /* don't attach to a dying SQPOLL thread, would be racy */
Pavel Begunkovf2a48dd2021-04-20 12:03:33 +01008605 ret = (attached && !sqd->thread) ? -ENXIO : 0;
Pavel Begunkov78d7f6b2021-03-10 13:13:53 +00008606 io_sq_thread_unpark(sqd);
8607
Pavel Begunkovde75a3d2021-03-18 11:54:35 +00008608 if (ret < 0)
8609 goto err;
8610 if (attached)
Jens Axboe5aa75ed2021-02-16 12:56:50 -07008611 return 0;
Jens Axboeaa061652020-09-02 14:50:27 -06008612
Jens Axboe6c271ce2019-01-10 11:22:30 -07008613 if (p->flags & IORING_SETUP_SQ_AFF) {
Jens Axboe44a9bd12019-05-14 20:00:30 -06008614 int cpu = p->sq_thread_cpu;
Jens Axboe6c271ce2019-01-10 11:22:30 -07008615
Jens Axboe917257d2019-04-13 09:28:55 -06008616 ret = -EINVAL;
Pavel Begunkovf2a48dd2021-04-20 12:03:33 +01008617 if (cpu >= nr_cpu_ids || !cpu_online(cpu))
Jens Axboee8f98f242021-03-09 16:32:13 -07008618 goto err_sqpoll;
Jens Axboe37d1e2e2021-02-17 21:03:43 -07008619 sqd->sq_cpu = cpu;
Jens Axboe6c271ce2019-01-10 11:22:30 -07008620 } else {
Jens Axboe37d1e2e2021-02-17 21:03:43 -07008621 sqd->sq_cpu = -1;
Jens Axboe6c271ce2019-01-10 11:22:30 -07008622 }
Jens Axboe37d1e2e2021-02-17 21:03:43 -07008623
8624 sqd->task_pid = current->pid;
Jens Axboe5c2469e2021-03-11 10:17:56 -07008625 sqd->task_tgid = current->tgid;
Jens Axboe46fe18b2021-03-04 12:39:36 -07008626 tsk = create_io_thread(io_sq_thread, sqd, NUMA_NO_NODE);
8627 if (IS_ERR(tsk)) {
8628 ret = PTR_ERR(tsk);
Jens Axboee8f98f242021-03-09 16:32:13 -07008629 goto err_sqpoll;
Jens Axboe6c271ce2019-01-10 11:22:30 -07008630 }
Pavel Begunkov97a73a02021-03-08 17:30:54 +00008631
Jens Axboe46fe18b2021-03-04 12:39:36 -07008632 sqd->thread = tsk;
Pavel Begunkov97a73a02021-03-08 17:30:54 +00008633 ret = io_uring_alloc_task_context(tsk, ctx);
Jens Axboe46fe18b2021-03-04 12:39:36 -07008634 wake_up_new_task(tsk);
Jens Axboe0f212202020-09-13 13:09:39 -06008635 if (ret)
8636 goto err;
Jens Axboe6c271ce2019-01-10 11:22:30 -07008637 } else if (p->flags & IORING_SETUP_SQ_AFF) {
8638 /* Can't have SQ_AFF without SQPOLL */
8639 ret = -EINVAL;
8640 goto err;
8641 }
8642
Jens Axboe2b188cc2019-01-07 10:46:33 -07008643 return 0;
Pavel Begunkovf2a48dd2021-04-20 12:03:33 +01008644err_sqpoll:
8645 complete(&ctx->sq_data->exited);
Jens Axboe2b188cc2019-01-07 10:46:33 -07008646err:
Jens Axboe37d1e2e2021-02-17 21:03:43 -07008647 io_sq_thread_finish(ctx);
Jens Axboe2b188cc2019-01-07 10:46:33 -07008648 return ret;
8649}
8650
Bijan Mottahedeha087e2b2020-06-16 16:36:07 -07008651static inline void __io_unaccount_mem(struct user_struct *user,
8652 unsigned long nr_pages)
Jens Axboe2b188cc2019-01-07 10:46:33 -07008653{
8654 atomic_long_sub(nr_pages, &user->locked_vm);
8655}
8656
Bijan Mottahedeha087e2b2020-06-16 16:36:07 -07008657static inline int __io_account_mem(struct user_struct *user,
8658 unsigned long nr_pages)
Jens Axboe2b188cc2019-01-07 10:46:33 -07008659{
8660 unsigned long page_limit, cur_pages, new_pages;
8661
8662 /* Don't allow more pages than we can safely lock */
8663 page_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
8664
8665 do {
8666 cur_pages = atomic_long_read(&user->locked_vm);
8667 new_pages = cur_pages + nr_pages;
8668 if (new_pages > page_limit)
8669 return -ENOMEM;
8670 } while (atomic_long_cmpxchg(&user->locked_vm, cur_pages,
8671 new_pages) != cur_pages);
8672
8673 return 0;
8674}
8675
Jens Axboe26bfa89e2021-02-09 20:14:12 -07008676static void io_unaccount_mem(struct io_ring_ctx *ctx, unsigned long nr_pages)
Bijan Mottahedeha087e2b2020-06-16 16:36:07 -07008677{
Jens Axboe62e398b2021-02-21 16:19:37 -07008678 if (ctx->user)
Bijan Mottahedeha087e2b2020-06-16 16:36:07 -07008679 __io_unaccount_mem(ctx->user, nr_pages);
Bijan Mottahedeh30975822020-06-16 16:36:09 -07008680
Jens Axboe26bfa89e2021-02-09 20:14:12 -07008681 if (ctx->mm_account)
8682 atomic64_sub(nr_pages, &ctx->mm_account->pinned_vm);
Bijan Mottahedeha087e2b2020-06-16 16:36:07 -07008683}
8684
Jens Axboe26bfa89e2021-02-09 20:14:12 -07008685static int io_account_mem(struct io_ring_ctx *ctx, unsigned long nr_pages)
Bijan Mottahedeha087e2b2020-06-16 16:36:07 -07008686{
Bijan Mottahedeh30975822020-06-16 16:36:09 -07008687 int ret;
8688
Jens Axboe62e398b2021-02-21 16:19:37 -07008689 if (ctx->user) {
Bijan Mottahedeh30975822020-06-16 16:36:09 -07008690 ret = __io_account_mem(ctx->user, nr_pages);
8691 if (ret)
8692 return ret;
8693 }
8694
Jens Axboe26bfa89e2021-02-09 20:14:12 -07008695 if (ctx->mm_account)
8696 atomic64_add(nr_pages, &ctx->mm_account->pinned_vm);
Bijan Mottahedeha087e2b2020-06-16 16:36:07 -07008697
8698 return 0;
8699}
8700
Jens Axboe2b188cc2019-01-07 10:46:33 -07008701static void io_mem_free(void *ptr)
8702{
Mark Rutland52e04ef2019-04-30 17:30:21 +01008703 struct page *page;
Jens Axboe2b188cc2019-01-07 10:46:33 -07008704
Mark Rutland52e04ef2019-04-30 17:30:21 +01008705 if (!ptr)
8706 return;
8707
8708 page = virt_to_head_page(ptr);
Jens Axboe2b188cc2019-01-07 10:46:33 -07008709 if (put_page_testzero(page))
8710 free_compound_page(page);
8711}
8712
8713static void *io_mem_alloc(size_t size)
8714{
8715 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN | __GFP_COMP |
Jens Axboe26bfa89e2021-02-09 20:14:12 -07008716 __GFP_NORETRY | __GFP_ACCOUNT;
Jens Axboe2b188cc2019-01-07 10:46:33 -07008717
8718 return (void *) __get_free_pages(gfp_flags, get_order(size));
8719}
8720
Hristo Venev75b28af2019-08-26 17:23:46 +00008721static unsigned long rings_size(unsigned sq_entries, unsigned cq_entries,
8722 size_t *sq_offset)
8723{
8724 struct io_rings *rings;
8725 size_t off, sq_array_size;
8726
8727 off = struct_size(rings, cqes, cq_entries);
8728 if (off == SIZE_MAX)
8729 return SIZE_MAX;
8730
8731#ifdef CONFIG_SMP
8732 off = ALIGN(off, SMP_CACHE_BYTES);
8733 if (off == 0)
8734 return SIZE_MAX;
8735#endif
8736
Dmitry Vyukovb36200f2020-07-11 11:31:11 +02008737 if (sq_offset)
8738 *sq_offset = off;
8739
Hristo Venev75b28af2019-08-26 17:23:46 +00008740 sq_array_size = array_size(sizeof(u32), sq_entries);
8741 if (sq_array_size == SIZE_MAX)
8742 return SIZE_MAX;
8743
8744 if (check_add_overflow(off, sq_array_size, &off))
8745 return SIZE_MAX;
8746
Hristo Venev75b28af2019-08-26 17:23:46 +00008747 return off;
8748}
8749
Pavel Begunkov41edf1a2021-04-25 14:32:23 +01008750static void io_buffer_unmap(struct io_ring_ctx *ctx, struct io_mapped_ubuf **slot)
Pavel Begunkov7f61a1e2021-04-11 01:46:35 +01008751{
Pavel Begunkov41edf1a2021-04-25 14:32:23 +01008752 struct io_mapped_ubuf *imu = *slot;
Pavel Begunkov7f61a1e2021-04-11 01:46:35 +01008753 unsigned int i;
8754
Pavel Begunkov62248432021-04-28 13:11:29 +01008755 if (imu != ctx->dummy_ubuf) {
8756 for (i = 0; i < imu->nr_bvecs; i++)
8757 unpin_user_page(imu->bvec[i].bv_page);
8758 if (imu->acct_pages)
8759 io_unaccount_mem(ctx, imu->acct_pages);
8760 kvfree(imu);
8761 }
Pavel Begunkov41edf1a2021-04-25 14:32:23 +01008762 *slot = NULL;
Pavel Begunkov7f61a1e2021-04-11 01:46:35 +01008763}
8764
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01008765static void io_rsrc_buf_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc)
8766{
Pavel Begunkov634d00d2021-04-25 14:32:26 +01008767 io_buffer_unmap(ctx, &prsrc->buf);
8768 prsrc->buf = NULL;
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01008769}
8770
8771static void __io_sqe_buffers_unregister(struct io_ring_ctx *ctx)
Jens Axboeedafcce2019-01-09 09:16:05 -07008772{
Pavel Begunkov7f61a1e2021-04-11 01:46:35 +01008773 unsigned int i;
Jens Axboeedafcce2019-01-09 09:16:05 -07008774
Pavel Begunkov7f61a1e2021-04-11 01:46:35 +01008775 for (i = 0; i < ctx->nr_user_bufs; i++)
8776 io_buffer_unmap(ctx, &ctx->user_bufs[i]);
Jens Axboeedafcce2019-01-09 09:16:05 -07008777 kfree(ctx->user_bufs);
Zqiangbb6659c2021-04-30 16:25:15 +08008778 io_rsrc_data_free(ctx->buf_data);
Jens Axboeedafcce2019-01-09 09:16:05 -07008779 ctx->user_bufs = NULL;
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01008780 ctx->buf_data = NULL;
Jens Axboeedafcce2019-01-09 09:16:05 -07008781 ctx->nr_user_bufs = 0;
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01008782}
8783
Jens Axboeedafcce2019-01-09 09:16:05 -07008784static int io_sqe_buffers_unregister(struct io_ring_ctx *ctx)
8785{
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01008786 int ret;
Jens Axboeedafcce2019-01-09 09:16:05 -07008787
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01008788 if (!ctx->buf_data)
Jens Axboeedafcce2019-01-09 09:16:05 -07008789 return -ENXIO;
8790
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01008791 ret = io_rsrc_ref_quiesce(ctx->buf_data, ctx);
8792 if (!ret)
8793 __io_sqe_buffers_unregister(ctx);
8794 return ret;
Jens Axboeedafcce2019-01-09 09:16:05 -07008795}
8796
8797static int io_copy_iov(struct io_ring_ctx *ctx, struct iovec *dst,
8798 void __user *arg, unsigned index)
8799{
8800 struct iovec __user *src;
8801
8802#ifdef CONFIG_COMPAT
8803 if (ctx->compat) {
8804 struct compat_iovec __user *ciovs;
8805 struct compat_iovec ciov;
8806
8807 ciovs = (struct compat_iovec __user *) arg;
8808 if (copy_from_user(&ciov, &ciovs[index], sizeof(ciov)))
8809 return -EFAULT;
8810
Jens Axboed55e5f52019-12-11 16:12:15 -07008811 dst->iov_base = u64_to_user_ptr((u64)ciov.iov_base);
Jens Axboeedafcce2019-01-09 09:16:05 -07008812 dst->iov_len = ciov.iov_len;
8813 return 0;
8814 }
8815#endif
8816 src = (struct iovec __user *) arg;
8817 if (copy_from_user(dst, &src[index], sizeof(*dst)))
8818 return -EFAULT;
8819 return 0;
8820}
8821
Jens Axboede293932020-09-17 16:19:16 -06008822/*
8823 * Not super efficient, but this is just a registration time. And we do cache
8824 * the last compound head, so generally we'll only do a full search if we don't
8825 * match that one.
8826 *
8827 * We check if the given compound head page has already been accounted, to
8828 * avoid double accounting it. This allows us to account the full size of the
8829 * page, not just the constituent pages of a huge page.
8830 */
8831static bool headpage_already_acct(struct io_ring_ctx *ctx, struct page **pages,
8832 int nr_pages, struct page *hpage)
8833{
8834 int i, j;
8835
8836 /* check current page array */
8837 for (i = 0; i < nr_pages; i++) {
8838 if (!PageCompound(pages[i]))
8839 continue;
8840 if (compound_head(pages[i]) == hpage)
8841 return true;
8842 }
8843
8844 /* check previously registered pages */
8845 for (i = 0; i < ctx->nr_user_bufs; i++) {
Pavel Begunkov41edf1a2021-04-25 14:32:23 +01008846 struct io_mapped_ubuf *imu = ctx->user_bufs[i];
Jens Axboede293932020-09-17 16:19:16 -06008847
8848 for (j = 0; j < imu->nr_bvecs; j++) {
8849 if (!PageCompound(imu->bvec[j].bv_page))
8850 continue;
8851 if (compound_head(imu->bvec[j].bv_page) == hpage)
8852 return true;
8853 }
8854 }
8855
8856 return false;
8857}
8858
8859static int io_buffer_account_pin(struct io_ring_ctx *ctx, struct page **pages,
8860 int nr_pages, struct io_mapped_ubuf *imu,
8861 struct page **last_hpage)
8862{
8863 int i, ret;
8864
Pavel Begunkov216e5832021-05-29 12:01:02 +01008865 imu->acct_pages = 0;
Jens Axboede293932020-09-17 16:19:16 -06008866 for (i = 0; i < nr_pages; i++) {
8867 if (!PageCompound(pages[i])) {
8868 imu->acct_pages++;
8869 } else {
8870 struct page *hpage;
8871
8872 hpage = compound_head(pages[i]);
8873 if (hpage == *last_hpage)
8874 continue;
8875 *last_hpage = hpage;
8876 if (headpage_already_acct(ctx, pages, i, hpage))
8877 continue;
8878 imu->acct_pages += page_size(hpage) >> PAGE_SHIFT;
8879 }
8880 }
8881
8882 if (!imu->acct_pages)
8883 return 0;
8884
Jens Axboe26bfa89e2021-02-09 20:14:12 -07008885 ret = io_account_mem(ctx, imu->acct_pages);
Jens Axboede293932020-09-17 16:19:16 -06008886 if (ret)
8887 imu->acct_pages = 0;
8888 return ret;
8889}
8890
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008891static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov,
Pavel Begunkov41edf1a2021-04-25 14:32:23 +01008892 struct io_mapped_ubuf **pimu,
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008893 struct page **last_hpage)
Jens Axboeedafcce2019-01-09 09:16:05 -07008894{
Pavel Begunkov41edf1a2021-04-25 14:32:23 +01008895 struct io_mapped_ubuf *imu = NULL;
Jens Axboeedafcce2019-01-09 09:16:05 -07008896 struct vm_area_struct **vmas = NULL;
8897 struct page **pages = NULL;
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008898 unsigned long off, start, end, ubuf;
8899 size_t size;
8900 int ret, pret, nr_pages, i;
Jens Axboeedafcce2019-01-09 09:16:05 -07008901
Pavel Begunkov62248432021-04-28 13:11:29 +01008902 if (!iov->iov_base) {
8903 *pimu = ctx->dummy_ubuf;
8904 return 0;
8905 }
8906
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008907 ubuf = (unsigned long) iov->iov_base;
8908 end = (ubuf + iov->iov_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
8909 start = ubuf >> PAGE_SHIFT;
8910 nr_pages = end - start;
8911
Pavel Begunkov41edf1a2021-04-25 14:32:23 +01008912 *pimu = NULL;
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008913 ret = -ENOMEM;
8914
8915 pages = kvmalloc_array(nr_pages, sizeof(struct page *), GFP_KERNEL);
8916 if (!pages)
8917 goto done;
8918
8919 vmas = kvmalloc_array(nr_pages, sizeof(struct vm_area_struct *),
8920 GFP_KERNEL);
8921 if (!vmas)
8922 goto done;
8923
Pavel Begunkov41edf1a2021-04-25 14:32:23 +01008924 imu = kvmalloc(struct_size(imu, bvec, nr_pages), GFP_KERNEL);
Pavel Begunkova2b41982021-04-26 00:16:31 +01008925 if (!imu)
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008926 goto done;
8927
8928 ret = 0;
8929 mmap_read_lock(current->mm);
8930 pret = pin_user_pages(ubuf, nr_pages, FOLL_WRITE | FOLL_LONGTERM,
8931 pages, vmas);
8932 if (pret == nr_pages) {
8933 /* don't support file backed memory */
8934 for (i = 0; i < nr_pages; i++) {
8935 struct vm_area_struct *vma = vmas[i];
8936
Pavel Begunkov40dad762021-06-09 15:26:54 +01008937 if (vma_is_shmem(vma))
8938 continue;
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008939 if (vma->vm_file &&
8940 !is_file_hugepages(vma->vm_file)) {
8941 ret = -EOPNOTSUPP;
8942 break;
8943 }
8944 }
8945 } else {
8946 ret = pret < 0 ? pret : -EFAULT;
8947 }
8948 mmap_read_unlock(current->mm);
8949 if (ret) {
8950 /*
8951 * if we did partial map, or found file backed vmas,
8952 * release any pages we did get
8953 */
8954 if (pret > 0)
8955 unpin_user_pages(pages, pret);
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008956 goto done;
8957 }
8958
8959 ret = io_buffer_account_pin(ctx, pages, pret, imu, last_hpage);
8960 if (ret) {
8961 unpin_user_pages(pages, pret);
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008962 goto done;
8963 }
8964
8965 off = ubuf & ~PAGE_MASK;
8966 size = iov->iov_len;
8967 for (i = 0; i < nr_pages; i++) {
8968 size_t vec_len;
8969
8970 vec_len = min_t(size_t, size, PAGE_SIZE - off);
8971 imu->bvec[i].bv_page = pages[i];
8972 imu->bvec[i].bv_len = vec_len;
8973 imu->bvec[i].bv_offset = off;
8974 off = 0;
8975 size -= vec_len;
8976 }
8977 /* store original address for later verification */
8978 imu->ubuf = ubuf;
Pavel Begunkov4751f532021-04-01 15:43:55 +01008979 imu->ubuf_end = ubuf + iov->iov_len;
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008980 imu->nr_bvecs = nr_pages;
Pavel Begunkov41edf1a2021-04-25 14:32:23 +01008981 *pimu = imu;
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008982 ret = 0;
8983done:
Pavel Begunkov41edf1a2021-04-25 14:32:23 +01008984 if (ret)
8985 kvfree(imu);
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008986 kvfree(pages);
8987 kvfree(vmas);
8988 return ret;
8989}
8990
Bijan Mottahedeh2b358602021-01-06 12:39:11 -08008991static int io_buffers_map_alloc(struct io_ring_ctx *ctx, unsigned int nr_args)
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008992{
Pavel Begunkov87094462021-04-11 01:46:36 +01008993 ctx->user_bufs = kcalloc(nr_args, sizeof(*ctx->user_bufs), GFP_KERNEL);
8994 return ctx->user_bufs ? 0 : -ENOMEM;
Bijan Mottahedeh2b358602021-01-06 12:39:11 -08008995}
8996
8997static int io_buffer_validate(struct iovec *iov)
8998{
Pavel Begunkov50e96982021-03-24 22:59:01 +00008999 unsigned long tmp, acct_len = iov->iov_len + (PAGE_SIZE - 1);
9000
Bijan Mottahedeh2b358602021-01-06 12:39:11 -08009001 /*
9002 * Don't impose further limits on the size and buffer
9003 * constraints here, we'll -EINVAL later when IO is
9004 * submitted if they are wrong.
9005 */
Pavel Begunkov62248432021-04-28 13:11:29 +01009006 if (!iov->iov_base)
9007 return iov->iov_len ? -EFAULT : 0;
9008 if (!iov->iov_len)
Bijan Mottahedeh2b358602021-01-06 12:39:11 -08009009 return -EFAULT;
9010
9011 /* arbitrary limit, but we need something */
9012 if (iov->iov_len > SZ_1G)
9013 return -EFAULT;
9014
Pavel Begunkov50e96982021-03-24 22:59:01 +00009015 if (check_add_overflow((unsigned long)iov->iov_base, acct_len, &tmp))
9016 return -EOVERFLOW;
9017
Bijan Mottahedeh2b358602021-01-06 12:39:11 -08009018 return 0;
9019}
9020
9021static int io_sqe_buffers_register(struct io_ring_ctx *ctx, void __user *arg,
Pavel Begunkov634d00d2021-04-25 14:32:26 +01009022 unsigned int nr_args, u64 __user *tags)
Bijan Mottahedeh2b358602021-01-06 12:39:11 -08009023{
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01009024 struct page *last_hpage = NULL;
9025 struct io_rsrc_data *data;
Bijan Mottahedeh2b358602021-01-06 12:39:11 -08009026 int i, ret;
9027 struct iovec iov;
Bijan Mottahedeh2b358602021-01-06 12:39:11 -08009028
Pavel Begunkov87094462021-04-11 01:46:36 +01009029 if (ctx->user_bufs)
9030 return -EBUSY;
Pavel Begunkov489809e2021-05-14 12:06:44 +01009031 if (!nr_args || nr_args > IORING_MAX_REG_BUFFERS)
Pavel Begunkov87094462021-04-11 01:46:36 +01009032 return -EINVAL;
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01009033 ret = io_rsrc_node_switch_start(ctx);
Bijan Mottahedeh2b358602021-01-06 12:39:11 -08009034 if (ret)
9035 return ret;
Pavel Begunkovd878c812021-06-14 02:36:18 +01009036 ret = io_rsrc_data_alloc(ctx, io_rsrc_buf_put, tags, nr_args, &data);
9037 if (ret)
9038 return ret;
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01009039 ret = io_buffers_map_alloc(ctx, nr_args);
9040 if (ret) {
Zqiangbb6659c2021-04-30 16:25:15 +08009041 io_rsrc_data_free(data);
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01009042 return ret;
9043 }
Bijan Mottahedeh2b358602021-01-06 12:39:11 -08009044
Pavel Begunkov87094462021-04-11 01:46:36 +01009045 for (i = 0; i < nr_args; i++, ctx->nr_user_bufs++) {
Jens Axboeedafcce2019-01-09 09:16:05 -07009046 ret = io_copy_iov(ctx, &iov, arg, i);
9047 if (ret)
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08009048 break;
Bijan Mottahedeh2b358602021-01-06 12:39:11 -08009049 ret = io_buffer_validate(&iov);
9050 if (ret)
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08009051 break;
Pavel Begunkov2d091d62021-06-14 02:36:21 +01009052 if (!iov.iov_base && *io_get_tag_slot(data, i)) {
Colin Ian Kingcf3770e2021-04-29 11:46:02 +01009053 ret = -EINVAL;
9054 break;
9055 }
Jens Axboeedafcce2019-01-09 09:16:05 -07009056
Pavel Begunkov41edf1a2021-04-25 14:32:23 +01009057 ret = io_sqe_buffer_register(ctx, &iov, &ctx->user_bufs[i],
9058 &last_hpage);
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08009059 if (ret)
9060 break;
Jens Axboeedafcce2019-01-09 09:16:05 -07009061 }
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08009062
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01009063 WARN_ON_ONCE(ctx->buf_data);
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08009064
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01009065 ctx->buf_data = data;
9066 if (ret)
9067 __io_sqe_buffers_unregister(ctx);
9068 else
9069 io_rsrc_node_switch(ctx, NULL);
Jens Axboeedafcce2019-01-09 09:16:05 -07009070 return ret;
9071}
9072
Pavel Begunkov634d00d2021-04-25 14:32:26 +01009073static int __io_sqe_buffers_update(struct io_ring_ctx *ctx,
9074 struct io_uring_rsrc_update2 *up,
9075 unsigned int nr_args)
9076{
9077 u64 __user *tags = u64_to_user_ptr(up->tags);
9078 struct iovec iov, __user *iovs = u64_to_user_ptr(up->data);
Pavel Begunkov634d00d2021-04-25 14:32:26 +01009079 struct page *last_hpage = NULL;
9080 bool needs_switch = false;
9081 __u32 done;
9082 int i, err;
9083
9084 if (!ctx->buf_data)
9085 return -ENXIO;
9086 if (up->offset + nr_args > ctx->nr_user_bufs)
9087 return -EINVAL;
9088
9089 for (done = 0; done < nr_args; done++) {
Pavel Begunkov0b8c0e72021-04-26 15:17:38 +01009090 struct io_mapped_ubuf *imu;
9091 int offset = up->offset + done;
Pavel Begunkov634d00d2021-04-25 14:32:26 +01009092 u64 tag = 0;
9093
9094 err = io_copy_iov(ctx, &iov, iovs, done);
9095 if (err)
9096 break;
9097 if (tags && copy_from_user(&tag, &tags[done], sizeof(tag))) {
9098 err = -EFAULT;
9099 break;
9100 }
Pavel Begunkov0b8c0e72021-04-26 15:17:38 +01009101 err = io_buffer_validate(&iov);
9102 if (err)
9103 break;
Colin Ian Kingcf3770e2021-04-29 11:46:02 +01009104 if (!iov.iov_base && tag) {
9105 err = -EINVAL;
9106 break;
9107 }
Pavel Begunkov0b8c0e72021-04-26 15:17:38 +01009108 err = io_sqe_buffer_register(ctx, &iov, &imu, &last_hpage);
9109 if (err)
9110 break;
Pavel Begunkov634d00d2021-04-25 14:32:26 +01009111
Pavel Begunkov0b8c0e72021-04-26 15:17:38 +01009112 i = array_index_nospec(offset, ctx->nr_user_bufs);
Pavel Begunkov62248432021-04-28 13:11:29 +01009113 if (ctx->user_bufs[i] != ctx->dummy_ubuf) {
Pavel Begunkov0b8c0e72021-04-26 15:17:38 +01009114 err = io_queue_rsrc_removal(ctx->buf_data, offset,
9115 ctx->rsrc_node, ctx->user_bufs[i]);
9116 if (unlikely(err)) {
9117 io_buffer_unmap(ctx, &imu);
Pavel Begunkov634d00d2021-04-25 14:32:26 +01009118 break;
Pavel Begunkov0b8c0e72021-04-26 15:17:38 +01009119 }
Pavel Begunkov634d00d2021-04-25 14:32:26 +01009120 ctx->user_bufs[i] = NULL;
9121 needs_switch = true;
9122 }
9123
Pavel Begunkov0b8c0e72021-04-26 15:17:38 +01009124 ctx->user_bufs[i] = imu;
Pavel Begunkov2d091d62021-06-14 02:36:21 +01009125 *io_get_tag_slot(ctx->buf_data, offset) = tag;
Pavel Begunkov634d00d2021-04-25 14:32:26 +01009126 }
9127
9128 if (needs_switch)
9129 io_rsrc_node_switch(ctx, ctx->buf_data);
9130 return done ? done : err;
9131}
9132
Jens Axboe9b402842019-04-11 11:45:41 -06009133static int io_eventfd_register(struct io_ring_ctx *ctx, void __user *arg)
9134{
9135 __s32 __user *fds = arg;
9136 int fd;
9137
9138 if (ctx->cq_ev_fd)
9139 return -EBUSY;
9140
9141 if (copy_from_user(&fd, fds, sizeof(*fds)))
9142 return -EFAULT;
9143
9144 ctx->cq_ev_fd = eventfd_ctx_fdget(fd);
9145 if (IS_ERR(ctx->cq_ev_fd)) {
9146 int ret = PTR_ERR(ctx->cq_ev_fd);
Pavel Begunkovfe7e3252021-06-24 15:09:57 +01009147
Jens Axboe9b402842019-04-11 11:45:41 -06009148 ctx->cq_ev_fd = NULL;
9149 return ret;
9150 }
9151
9152 return 0;
9153}
9154
9155static int io_eventfd_unregister(struct io_ring_ctx *ctx)
9156{
9157 if (ctx->cq_ev_fd) {
9158 eventfd_ctx_put(ctx->cq_ev_fd);
9159 ctx->cq_ev_fd = NULL;
9160 return 0;
9161 }
9162
9163 return -ENXIO;
9164}
9165
Jens Axboe5a2e7452020-02-23 16:23:11 -07009166static void io_destroy_buffers(struct io_ring_ctx *ctx)
9167{
Jens Axboe9e15c3a2021-03-13 12:29:43 -07009168 struct io_buffer *buf;
9169 unsigned long index;
9170
9171 xa_for_each(&ctx->io_buffers, index, buf)
9172 __io_remove_buffers(ctx, buf, index, -1U);
Jens Axboe5a2e7452020-02-23 16:23:11 -07009173}
9174
Pavel Begunkov72558342021-08-09 20:18:09 +01009175static void io_req_cache_free(struct list_head *list)
Jens Axboe1b4c3512021-02-10 00:03:19 +00009176{
Jens Axboe68e68ee2021-02-13 09:00:02 -07009177 struct io_kiocb *req, *nxt;
Jens Axboe1b4c3512021-02-10 00:03:19 +00009178
Pavel Begunkovbb943b82021-08-09 20:18:10 +01009179 list_for_each_entry_safe(req, nxt, list, inflight_entry) {
9180 list_del(&req->inflight_entry);
Jens Axboe1b4c3512021-02-10 00:03:19 +00009181 kmem_cache_free(req_cachep, req);
9182 }
9183}
9184
Jens Axboe4010fec2021-02-27 15:04:18 -07009185static void io_req_caches_free(struct io_ring_ctx *ctx)
Jens Axboe2b188cc2019-01-07 10:46:33 -07009186{
Pavel Begunkovcd0ca2e2021-08-09 20:18:11 +01009187 struct io_submit_state *state = &ctx->submit_state;
Pavel Begunkovbf019da2021-02-10 00:03:17 +00009188
Jens Axboe9a4fdbd2021-02-13 09:09:44 -07009189 mutex_lock(&ctx->uring_lock);
9190
Pavel Begunkovcd0ca2e2021-08-09 20:18:11 +01009191 if (state->free_reqs) {
9192 kmem_cache_free_bulk(req_cachep, state->free_reqs, state->reqs);
9193 state->free_reqs = 0;
Pavel Begunkov8e5c66c2021-02-22 11:45:55 +00009194 }
Jens Axboe9a4fdbd2021-02-13 09:09:44 -07009195
Pavel Begunkovcd0ca2e2021-08-09 20:18:11 +01009196 io_flush_cached_locked_reqs(ctx, state);
9197 io_req_cache_free(&state->free_list);
Jens Axboe9a4fdbd2021-02-13 09:09:44 -07009198 mutex_unlock(&ctx->uring_lock);
9199}
9200
Pavel Begunkov43597aa2021-08-10 02:44:23 +01009201static void io_wait_rsrc_data(struct io_rsrc_data *data)
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01009202{
Pavel Begunkov43597aa2021-08-10 02:44:23 +01009203 if (data && !atomic_dec_and_test(&data->refs))
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01009204 wait_for_completion(&data->done);
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01009205}
9206
Jens Axboe2b188cc2019-01-07 10:46:33 -07009207static void io_ring_ctx_free(struct io_ring_ctx *ctx)
9208{
Jens Axboe37d1e2e2021-02-17 21:03:43 -07009209 io_sq_thread_finish(ctx);
Jens Axboe2aede0e2020-09-14 10:45:53 -06009210
Jens Axboe37d1e2e2021-02-17 21:03:43 -07009211 if (ctx->mm_account) {
Jens Axboe2aede0e2020-09-14 10:45:53 -06009212 mmdrop(ctx->mm_account);
9213 ctx->mm_account = NULL;
Bijan Mottahedeh30975822020-06-16 16:36:09 -07009214 }
Jens Axboedef596e2019-01-09 08:59:42 -07009215
Pavel Begunkov43597aa2021-08-10 02:44:23 +01009216 /* __io_rsrc_put_work() may need uring_lock to progress, wait w/o it */
9217 io_wait_rsrc_data(ctx->buf_data);
9218 io_wait_rsrc_data(ctx->file_data);
9219
Hao Xu8bad28d2021-02-19 17:19:36 +08009220 mutex_lock(&ctx->uring_lock);
Pavel Begunkov43597aa2021-08-10 02:44:23 +01009221 if (ctx->buf_data)
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01009222 __io_sqe_buffers_unregister(ctx);
Pavel Begunkov43597aa2021-08-10 02:44:23 +01009223 if (ctx->file_data)
Pavel Begunkov08480402021-04-13 02:58:38 +01009224 __io_sqe_files_unregister(ctx);
Pavel Begunkovc4ea0602021-04-01 15:43:58 +01009225 if (ctx->rings)
9226 __io_cqring_overflow_flush(ctx, true);
Hao Xu8bad28d2021-02-19 17:19:36 +08009227 mutex_unlock(&ctx->uring_lock);
Jens Axboe9b402842019-04-11 11:45:41 -06009228 io_eventfd_unregister(ctx);
Jens Axboe5a2e7452020-02-23 16:23:11 -07009229 io_destroy_buffers(ctx);
Pavel Begunkov07db2982021-04-20 12:03:32 +01009230 if (ctx->sq_creds)
9231 put_cred(ctx->sq_creds);
Jens Axboedef596e2019-01-09 08:59:42 -07009232
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01009233 /* there are no registered resources left, nobody uses it */
9234 if (ctx->rsrc_node)
9235 io_rsrc_node_destroy(ctx->rsrc_node);
Pavel Begunkov8dd03af2021-03-19 17:22:36 +00009236 if (ctx->rsrc_backup_node)
Pavel Begunkovb895c9a2021-04-01 15:43:40 +01009237 io_rsrc_node_destroy(ctx->rsrc_backup_node);
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01009238 flush_delayed_work(&ctx->rsrc_put_work);
9239
9240 WARN_ON_ONCE(!list_empty(&ctx->rsrc_ref_list));
9241 WARN_ON_ONCE(!llist_empty(&ctx->rsrc_put_llist));
Jens Axboe2b188cc2019-01-07 10:46:33 -07009242
9243#if defined(CONFIG_UNIX)
Eric Biggers355e8d22019-06-12 14:58:43 -07009244 if (ctx->ring_sock) {
9245 ctx->ring_sock->file = NULL; /* so that iput() is called */
Jens Axboe2b188cc2019-01-07 10:46:33 -07009246 sock_release(ctx->ring_sock);
Eric Biggers355e8d22019-06-12 14:58:43 -07009247 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07009248#endif
Pavel Begunkovef9dd632021-08-28 19:54:38 -06009249 WARN_ON_ONCE(!list_empty(&ctx->ltimeout_list));
Jens Axboe2b188cc2019-01-07 10:46:33 -07009250
Hristo Venev75b28af2019-08-26 17:23:46 +00009251 io_mem_free(ctx->rings);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009252 io_mem_free(ctx->sq_sqes);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009253
9254 percpu_ref_exit(&ctx->refs);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009255 free_uid(ctx->user);
Jens Axboe4010fec2021-02-27 15:04:18 -07009256 io_req_caches_free(ctx);
Jens Axboee9418942021-02-19 12:33:30 -07009257 if (ctx->hash_map)
9258 io_wq_put_hash(ctx->hash_map);
Jens Axboe78076bb2019-12-04 19:56:40 -07009259 kfree(ctx->cancel_hash);
Pavel Begunkov62248432021-04-28 13:11:29 +01009260 kfree(ctx->dummy_ubuf);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009261 kfree(ctx);
9262}
9263
9264static __poll_t io_uring_poll(struct file *file, poll_table *wait)
9265{
9266 struct io_ring_ctx *ctx = file->private_data;
9267 __poll_t mask = 0;
9268
Pavel Begunkov311997b2021-06-14 23:37:28 +01009269 poll_wait(file, &ctx->poll_wait, wait);
Stefan Bühler4f7067c2019-04-24 23:54:17 +02009270 /*
9271 * synchronizes with barrier from wq_has_sleeper call in
9272 * io_commit_cqring
9273 */
Jens Axboe2b188cc2019-01-07 10:46:33 -07009274 smp_rmb();
Jens Axboe90554202020-09-03 12:12:41 -06009275 if (!io_sqring_full(ctx))
Jens Axboe2b188cc2019-01-07 10:46:33 -07009276 mask |= EPOLLOUT | EPOLLWRNORM;
Hao Xued670c32021-02-05 16:34:21 +08009277
9278 /*
9279 * Don't flush cqring overflow list here, just do a simple check.
9280 * Otherwise there could possible be ABBA deadlock:
9281 * CPU0 CPU1
9282 * ---- ----
9283 * lock(&ctx->uring_lock);
9284 * lock(&ep->mtx);
9285 * lock(&ctx->uring_lock);
9286 * lock(&ep->mtx);
9287 *
9288 * Users may get EPOLLIN meanwhile seeing nothing in cqring, this
9289 * pushs them to do the flush.
9290 */
Pavel Begunkov5ed7a372021-06-14 23:37:27 +01009291 if (io_cqring_events(ctx) || test_bit(0, &ctx->check_cq_overflow))
Jens Axboe2b188cc2019-01-07 10:46:33 -07009292 mask |= EPOLLIN | EPOLLRDNORM;
9293
9294 return mask;
9295}
9296
9297static int io_uring_fasync(int fd, struct file *file, int on)
9298{
9299 struct io_ring_ctx *ctx = file->private_data;
9300
9301 return fasync_helper(fd, file, on, &ctx->cq_fasync);
9302}
9303
Yejune Deng0bead8c2020-12-24 11:02:20 +08009304static int io_unregister_personality(struct io_ring_ctx *ctx, unsigned id)
Jens Axboe071698e2020-01-28 10:04:42 -07009305{
Jens Axboe4379bf82021-02-15 13:40:22 -07009306 const struct cred *creds;
Jens Axboe071698e2020-01-28 10:04:42 -07009307
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +00009308 creds = xa_erase(&ctx->personalities, id);
Jens Axboe4379bf82021-02-15 13:40:22 -07009309 if (creds) {
9310 put_cred(creds);
Yejune Deng0bead8c2020-12-24 11:02:20 +08009311 return 0;
Jens Axboe1e6fa522020-10-15 08:46:24 -06009312 }
Yejune Deng0bead8c2020-12-24 11:02:20 +08009313
9314 return -EINVAL;
9315}
9316
Pavel Begunkovd56d9382021-03-06 11:02:13 +00009317struct io_tctx_exit {
9318 struct callback_head task_work;
9319 struct completion completion;
Pavel Begunkovbaf186c2021-03-06 11:02:15 +00009320 struct io_ring_ctx *ctx;
Pavel Begunkovd56d9382021-03-06 11:02:13 +00009321};
9322
9323static void io_tctx_exit_cb(struct callback_head *cb)
9324{
9325 struct io_uring_task *tctx = current->io_uring;
9326 struct io_tctx_exit *work;
9327
9328 work = container_of(cb, struct io_tctx_exit, task_work);
9329 /*
9330 * When @in_idle, we're in cancellation and it's racy to remove the
9331 * node. It'll be removed by the end of cancellation, just ignore it.
9332 */
9333 if (!atomic_read(&tctx->in_idle))
Pavel Begunkoveef51da2021-06-14 02:36:15 +01009334 io_uring_del_tctx_node((unsigned long)work->ctx);
Pavel Begunkovd56d9382021-03-06 11:02:13 +00009335 complete(&work->completion);
9336}
9337
Pavel Begunkov28090c12021-04-25 23:34:45 +01009338static bool io_cancel_ctx_cb(struct io_wq_work *work, void *data)
9339{
9340 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
9341
9342 return req->ctx == data;
9343}
9344
Jens Axboe85faa7b2020-04-09 18:14:00 -06009345static void io_ring_exit_work(struct work_struct *work)
9346{
Pavel Begunkovd56d9382021-03-06 11:02:13 +00009347 struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx, exit_work);
Pavel Begunkovb5bb3a22021-03-06 11:02:16 +00009348 unsigned long timeout = jiffies + HZ * 60 * 5;
Pavel Begunkov58d3be22021-08-09 13:04:17 +01009349 unsigned long interval = HZ / 20;
Pavel Begunkovd56d9382021-03-06 11:02:13 +00009350 struct io_tctx_exit exit;
9351 struct io_tctx_node *node;
9352 int ret;
Jens Axboe85faa7b2020-04-09 18:14:00 -06009353
Jens Axboe56952e92020-06-17 15:00:04 -06009354 /*
9355 * If we're doing polled IO and end up having requests being
9356 * submitted async (out-of-line), then completions can come in while
9357 * we're waiting for refs to drop. We need to reap these manually,
9358 * as nobody else will be looking for them.
9359 */
Pavel Begunkovb2edc0a2020-07-07 16:36:22 +03009360 do {
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01009361 io_uring_try_cancel_requests(ctx, NULL, true);
Pavel Begunkov28090c12021-04-25 23:34:45 +01009362 if (ctx->sq_data) {
9363 struct io_sq_data *sqd = ctx->sq_data;
9364 struct task_struct *tsk;
9365
9366 io_sq_thread_park(sqd);
9367 tsk = sqd->thread;
9368 if (tsk && tsk->io_uring && tsk->io_uring->io_wq)
9369 io_wq_cancel_cb(tsk->io_uring->io_wq,
9370 io_cancel_ctx_cb, ctx, true);
9371 io_sq_thread_unpark(sqd);
9372 }
Pavel Begunkovb5bb3a22021-03-06 11:02:16 +00009373
Pavel Begunkov58d3be22021-08-09 13:04:17 +01009374 if (WARN_ON_ONCE(time_after(jiffies, timeout))) {
9375 /* there is little hope left, don't run it too often */
9376 interval = HZ * 60;
9377 }
9378 } while (!wait_for_completion_timeout(&ctx->ref_comp, interval));
Pavel Begunkovd56d9382021-03-06 11:02:13 +00009379
Pavel Begunkov7f006512021-04-14 13:38:34 +01009380 init_completion(&exit.completion);
9381 init_task_work(&exit.task_work, io_tctx_exit_cb);
9382 exit.ctx = ctx;
Pavel Begunkov89b50662021-04-01 15:43:50 +01009383 /*
9384 * Some may use context even when all refs and requests have been put,
9385 * and they are free to do so while still holding uring_lock or
Pavel Begunkov5b0a6ac2021-06-30 21:54:04 +01009386 * completion_lock, see io_req_task_submit(). Apart from other work,
Pavel Begunkov89b50662021-04-01 15:43:50 +01009387 * this lock/unlock section also waits them to finish.
9388 */
Pavel Begunkovd56d9382021-03-06 11:02:13 +00009389 mutex_lock(&ctx->uring_lock);
9390 while (!list_empty(&ctx->tctx_list)) {
Pavel Begunkovb5bb3a22021-03-06 11:02:16 +00009391 WARN_ON_ONCE(time_after(jiffies, timeout));
9392
Pavel Begunkovd56d9382021-03-06 11:02:13 +00009393 node = list_first_entry(&ctx->tctx_list, struct io_tctx_node,
9394 ctx_node);
Pavel Begunkov7f006512021-04-14 13:38:34 +01009395 /* don't spin on a single task if cancellation failed */
9396 list_rotate_left(&ctx->tctx_list);
Pavel Begunkovd56d9382021-03-06 11:02:13 +00009397 ret = task_work_add(node->task, &exit.task_work, TWA_SIGNAL);
9398 if (WARN_ON_ONCE(ret))
9399 continue;
9400 wake_up_process(node->task);
9401
9402 mutex_unlock(&ctx->uring_lock);
9403 wait_for_completion(&exit.completion);
Pavel Begunkovd56d9382021-03-06 11:02:13 +00009404 mutex_lock(&ctx->uring_lock);
9405 }
9406 mutex_unlock(&ctx->uring_lock);
Jens Axboe79ebeae2021-08-10 15:18:27 -06009407 spin_lock(&ctx->completion_lock);
9408 spin_unlock(&ctx->completion_lock);
Pavel Begunkovd56d9382021-03-06 11:02:13 +00009409
Jens Axboe85faa7b2020-04-09 18:14:00 -06009410 io_ring_ctx_free(ctx);
9411}
9412
Pavel Begunkov80c4cbd2021-03-25 18:32:43 +00009413/* Returns true if we found and killed one or more timeouts */
9414static bool io_kill_timeouts(struct io_ring_ctx *ctx, struct task_struct *tsk,
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01009415 bool cancel_all)
Pavel Begunkov80c4cbd2021-03-25 18:32:43 +00009416{
9417 struct io_kiocb *req, *tmp;
9418 int canceled = 0;
9419
Jens Axboe79ebeae2021-08-10 15:18:27 -06009420 spin_lock(&ctx->completion_lock);
9421 spin_lock_irq(&ctx->timeout_lock);
Pavel Begunkov80c4cbd2021-03-25 18:32:43 +00009422 list_for_each_entry_safe(req, tmp, &ctx->timeout_list, timeout.list) {
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01009423 if (io_match_task(req, tsk, cancel_all)) {
Pavel Begunkov80c4cbd2021-03-25 18:32:43 +00009424 io_kill_timeout(req, -ECANCELED);
9425 canceled++;
9426 }
9427 }
Jens Axboe79ebeae2021-08-10 15:18:27 -06009428 spin_unlock_irq(&ctx->timeout_lock);
Pavel Begunkov51520422021-03-29 11:39:29 +01009429 if (canceled != 0)
9430 io_commit_cqring(ctx);
Jens Axboe79ebeae2021-08-10 15:18:27 -06009431 spin_unlock(&ctx->completion_lock);
Pavel Begunkov80c4cbd2021-03-25 18:32:43 +00009432 if (canceled != 0)
9433 io_cqring_ev_posted(ctx);
9434 return canceled != 0;
9435}
9436
Jens Axboe2b188cc2019-01-07 10:46:33 -07009437static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
9438{
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +00009439 unsigned long index;
9440 struct creds *creds;
9441
Jens Axboe2b188cc2019-01-07 10:46:33 -07009442 mutex_lock(&ctx->uring_lock);
9443 percpu_ref_kill(&ctx->refs);
Pavel Begunkov634578f2020-12-06 22:22:44 +00009444 if (ctx->rings)
Pavel Begunkov6c2450a2021-02-23 12:40:22 +00009445 __io_cqring_overflow_flush(ctx, true);
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +00009446 xa_for_each(&ctx->personalities, index, creds)
9447 io_unregister_personality(ctx, index);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009448 mutex_unlock(&ctx->uring_lock);
9449
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01009450 io_kill_timeouts(ctx, NULL, true);
9451 io_poll_remove_all(ctx, NULL, true);
Jens Axboe561fb042019-10-24 07:25:42 -06009452
Jens Axboe15dff282019-11-13 09:09:23 -07009453 /* if we failed setting up the ctx, we might not have any rings */
Pavel Begunkovb2edc0a2020-07-07 16:36:22 +03009454 io_iopoll_try_reap_events(ctx);
Jens Axboe309fc032020-07-10 09:13:34 -06009455
Jens Axboe85faa7b2020-04-09 18:14:00 -06009456 INIT_WORK(&ctx->exit_work, io_ring_exit_work);
Jens Axboefc666772020-08-19 11:10:51 -06009457 /*
9458 * Use system_unbound_wq to avoid spawning tons of event kworkers
9459 * if we're exiting a ton of rings at the same time. It just adds
9460 * noise and overhead, there's no discernable change in runtime
9461 * over using system_wq.
9462 */
9463 queue_work(system_unbound_wq, &ctx->exit_work);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009464}
9465
9466static int io_uring_release(struct inode *inode, struct file *file)
9467{
9468 struct io_ring_ctx *ctx = file->private_data;
9469
9470 file->private_data = NULL;
9471 io_ring_ctx_wait_and_kill(ctx);
9472 return 0;
9473}
9474
Pavel Begunkovf6edbab2020-11-06 13:00:26 +00009475struct io_task_cancel {
9476 struct task_struct *task;
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01009477 bool all;
Pavel Begunkovf6edbab2020-11-06 13:00:26 +00009478};
Pavel Begunkov67c4d9e2020-06-15 10:24:05 +03009479
Pavel Begunkovf6edbab2020-11-06 13:00:26 +00009480static bool io_cancel_task_cb(struct io_wq_work *work, void *data)
Jens Axboeb711d4e2020-08-16 08:23:05 -07009481{
Pavel Begunkov9a472ef2020-11-05 22:31:37 +00009482 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
Pavel Begunkovf6edbab2020-11-06 13:00:26 +00009483 struct io_task_cancel *cancel = data;
Pavel Begunkov9a472ef2020-11-05 22:31:37 +00009484 bool ret;
9485
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01009486 if (!cancel->all && (req->flags & REQ_F_LINK_TIMEOUT)) {
Pavel Begunkov9a472ef2020-11-05 22:31:37 +00009487 struct io_ring_ctx *ctx = req->ctx;
9488
9489 /* protect against races with linked timeouts */
Jens Axboe79ebeae2021-08-10 15:18:27 -06009490 spin_lock(&ctx->completion_lock);
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01009491 ret = io_match_task(req, cancel->task, cancel->all);
Jens Axboe79ebeae2021-08-10 15:18:27 -06009492 spin_unlock(&ctx->completion_lock);
Pavel Begunkov9a472ef2020-11-05 22:31:37 +00009493 } else {
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01009494 ret = io_match_task(req, cancel->task, cancel->all);
Pavel Begunkov9a472ef2020-11-05 22:31:37 +00009495 }
9496 return ret;
Jens Axboeb711d4e2020-08-16 08:23:05 -07009497}
9498
Pavel Begunkove1915f72021-03-11 23:29:35 +00009499static bool io_cancel_defer_files(struct io_ring_ctx *ctx,
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01009500 struct task_struct *task, bool cancel_all)
Pavel Begunkovb7ddce32020-09-06 00:45:14 +03009501{
Pavel Begunkove1915f72021-03-11 23:29:35 +00009502 struct io_defer_entry *de;
Pavel Begunkovb7ddce32020-09-06 00:45:14 +03009503 LIST_HEAD(list);
9504
Jens Axboe79ebeae2021-08-10 15:18:27 -06009505 spin_lock(&ctx->completion_lock);
Pavel Begunkovb7ddce32020-09-06 00:45:14 +03009506 list_for_each_entry_reverse(de, &ctx->defer_list, list) {
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01009507 if (io_match_task(de->req, task, cancel_all)) {
Pavel Begunkovb7ddce32020-09-06 00:45:14 +03009508 list_cut_position(&list, &ctx->defer_list, &de->list);
9509 break;
9510 }
9511 }
Jens Axboe79ebeae2021-08-10 15:18:27 -06009512 spin_unlock(&ctx->completion_lock);
Pavel Begunkove1915f72021-03-11 23:29:35 +00009513 if (list_empty(&list))
9514 return false;
Pavel Begunkovb7ddce32020-09-06 00:45:14 +03009515
9516 while (!list_empty(&list)) {
9517 de = list_first_entry(&list, struct io_defer_entry, list);
9518 list_del_init(&de->list);
Pavel Begunkovf41db2732021-02-28 22:35:12 +00009519 io_req_complete_failed(de->req, -ECANCELED);
Pavel Begunkovb7ddce32020-09-06 00:45:14 +03009520 kfree(de);
9521 }
Pavel Begunkove1915f72021-03-11 23:29:35 +00009522 return true;
Pavel Begunkovb7ddce32020-09-06 00:45:14 +03009523}
9524
Pavel Begunkov1b007642021-03-06 11:02:17 +00009525static bool io_uring_try_cancel_iowq(struct io_ring_ctx *ctx)
9526{
9527 struct io_tctx_node *node;
9528 enum io_wq_cancel cret;
9529 bool ret = false;
9530
9531 mutex_lock(&ctx->uring_lock);
9532 list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
9533 struct io_uring_task *tctx = node->task->io_uring;
9534
9535 /*
9536 * io_wq will stay alive while we hold uring_lock, because it's
9537 * killed after ctx nodes, which requires to take the lock.
9538 */
9539 if (!tctx || !tctx->io_wq)
9540 continue;
9541 cret = io_wq_cancel_cb(tctx->io_wq, io_cancel_ctx_cb, ctx, true);
9542 ret |= (cret != IO_WQ_CANCEL_NOTFOUND);
9543 }
9544 mutex_unlock(&ctx->uring_lock);
9545
9546 return ret;
9547}
9548
Pavel Begunkov9936c7c2021-02-04 13:51:56 +00009549static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
9550 struct task_struct *task,
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01009551 bool cancel_all)
Pavel Begunkov9936c7c2021-02-04 13:51:56 +00009552{
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01009553 struct io_task_cancel cancel = { .task = task, .all = cancel_all, };
Pavel Begunkov1b007642021-03-06 11:02:17 +00009554 struct io_uring_task *tctx = task ? task->io_uring : NULL;
Pavel Begunkov9936c7c2021-02-04 13:51:56 +00009555
9556 while (1) {
9557 enum io_wq_cancel cret;
9558 bool ret = false;
9559
Pavel Begunkov1b007642021-03-06 11:02:17 +00009560 if (!task) {
9561 ret |= io_uring_try_cancel_iowq(ctx);
9562 } else if (tctx && tctx->io_wq) {
9563 /*
9564 * Cancels requests of all rings, not only @ctx, but
9565 * it's fine as the task is in exit/exec.
9566 */
Jens Axboe5aa75ed2021-02-16 12:56:50 -07009567 cret = io_wq_cancel_cb(tctx->io_wq, io_cancel_task_cb,
Pavel Begunkov9936c7c2021-02-04 13:51:56 +00009568 &cancel, true);
9569 ret |= (cret != IO_WQ_CANCEL_NOTFOUND);
9570 }
9571
9572 /* SQPOLL thread does its own polling */
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01009573 if ((!(ctx->flags & IORING_SETUP_SQPOLL) && cancel_all) ||
Jens Axboed052d1d2021-03-11 10:49:20 -07009574 (ctx->sq_data && ctx->sq_data->thread == current)) {
Pavel Begunkov9936c7c2021-02-04 13:51:56 +00009575 while (!list_empty_careful(&ctx->iopoll_list)) {
9576 io_iopoll_try_reap_events(ctx);
9577 ret = true;
9578 }
9579 }
9580
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01009581 ret |= io_cancel_defer_files(ctx, task, cancel_all);
9582 ret |= io_poll_remove_all(ctx, task, cancel_all);
9583 ret |= io_kill_timeouts(ctx, task, cancel_all);
Pavel Begunkove5dc4802021-06-26 21:40:46 +01009584 if (task)
9585 ret |= io_run_task_work();
Pavel Begunkov9936c7c2021-02-04 13:51:56 +00009586 if (!ret)
9587 break;
9588 cond_resched();
9589 }
9590}
9591
Pavel Begunkoveef51da2021-06-14 02:36:15 +01009592static int __io_uring_add_tctx_node(struct io_ring_ctx *ctx)
Jens Axboe0f212202020-09-13 13:09:39 -06009593{
Matthew Wilcox (Oracle)236434c2020-10-09 13:49:52 +01009594 struct io_uring_task *tctx = current->io_uring;
Pavel Begunkov13bf43f2021-03-06 11:02:12 +00009595 struct io_tctx_node *node;
Pavel Begunkova528b042020-12-21 18:34:04 +00009596 int ret;
Matthew Wilcox (Oracle)236434c2020-10-09 13:49:52 +01009597
9598 if (unlikely(!tctx)) {
Jens Axboe5aa75ed2021-02-16 12:56:50 -07009599 ret = io_uring_alloc_task_context(current, ctx);
Jens Axboe0f212202020-09-13 13:09:39 -06009600 if (unlikely(ret))
9601 return ret;
Matthew Wilcox (Oracle)236434c2020-10-09 13:49:52 +01009602 tctx = current->io_uring;
Jens Axboe0f212202020-09-13 13:09:39 -06009603 }
Pavel Begunkovcf27f3b2021-03-19 17:22:31 +00009604 if (!xa_load(&tctx->xa, (unsigned long)ctx)) {
9605 node = kmalloc(sizeof(*node), GFP_KERNEL);
9606 if (!node)
9607 return -ENOMEM;
9608 node->ctx = ctx;
9609 node->task = current;
Jens Axboe0f212202020-09-13 13:09:39 -06009610
Pavel Begunkovcf27f3b2021-03-19 17:22:31 +00009611 ret = xa_err(xa_store(&tctx->xa, (unsigned long)ctx,
9612 node, GFP_KERNEL));
9613 if (ret) {
9614 kfree(node);
9615 return ret;
Jens Axboe0f212202020-09-13 13:09:39 -06009616 }
Pavel Begunkovcf27f3b2021-03-19 17:22:31 +00009617
9618 mutex_lock(&ctx->uring_lock);
9619 list_add(&node->ctx_node, &ctx->tctx_list);
9620 mutex_unlock(&ctx->uring_lock);
Jens Axboe0f212202020-09-13 13:09:39 -06009621 }
Pavel Begunkovcf27f3b2021-03-19 17:22:31 +00009622 tctx->last = ctx;
Jens Axboe0f212202020-09-13 13:09:39 -06009623 return 0;
9624}
9625
9626/*
Pavel Begunkovcf27f3b2021-03-19 17:22:31 +00009627 * Note that this task has used io_uring. We use it for cancelation purposes.
9628 */
Pavel Begunkoveef51da2021-06-14 02:36:15 +01009629static inline int io_uring_add_tctx_node(struct io_ring_ctx *ctx)
Pavel Begunkovcf27f3b2021-03-19 17:22:31 +00009630{
9631 struct io_uring_task *tctx = current->io_uring;
9632
9633 if (likely(tctx && tctx->last == ctx))
9634 return 0;
Pavel Begunkoveef51da2021-06-14 02:36:15 +01009635 return __io_uring_add_tctx_node(ctx);
Pavel Begunkovcf27f3b2021-03-19 17:22:31 +00009636}
9637
9638/*
Jens Axboe0f212202020-09-13 13:09:39 -06009639 * Remove this io_uring_file -> task mapping.
9640 */
Pavel Begunkoveef51da2021-06-14 02:36:15 +01009641static void io_uring_del_tctx_node(unsigned long index)
Jens Axboe0f212202020-09-13 13:09:39 -06009642{
9643 struct io_uring_task *tctx = current->io_uring;
Pavel Begunkov13bf43f2021-03-06 11:02:12 +00009644 struct io_tctx_node *node;
Pavel Begunkov29412672021-03-06 11:02:11 +00009645
Pavel Begunkoveebd2e32021-03-06 11:02:14 +00009646 if (!tctx)
9647 return;
Pavel Begunkov13bf43f2021-03-06 11:02:12 +00009648 node = xa_erase(&tctx->xa, index);
9649 if (!node)
Pavel Begunkov29412672021-03-06 11:02:11 +00009650 return;
Jens Axboe0f212202020-09-13 13:09:39 -06009651
Pavel Begunkov13bf43f2021-03-06 11:02:12 +00009652 WARN_ON_ONCE(current != node->task);
9653 WARN_ON_ONCE(list_empty(&node->ctx_node));
9654
9655 mutex_lock(&node->ctx->uring_lock);
9656 list_del(&node->ctx_node);
9657 mutex_unlock(&node->ctx->uring_lock);
9658
Pavel Begunkovbaf186c2021-03-06 11:02:15 +00009659 if (tctx->last == node->ctx)
Jens Axboe0f212202020-09-13 13:09:39 -06009660 tctx->last = NULL;
Pavel Begunkov13bf43f2021-03-06 11:02:12 +00009661 kfree(node);
Jens Axboe0f212202020-09-13 13:09:39 -06009662}
9663
Pavel Begunkov8452d4a2021-02-27 11:16:46 +00009664static void io_uring_clean_tctx(struct io_uring_task *tctx)
Pavel Begunkovde7f1d92021-01-04 20:43:29 +00009665{
Pavel Begunkovba5ef6d2021-05-20 13:21:20 +01009666 struct io_wq *wq = tctx->io_wq;
Pavel Begunkov13bf43f2021-03-06 11:02:12 +00009667 struct io_tctx_node *node;
Pavel Begunkovde7f1d92021-01-04 20:43:29 +00009668 unsigned long index;
9669
Pavel Begunkov13bf43f2021-03-06 11:02:12 +00009670 xa_for_each(&tctx->xa, index, node)
Pavel Begunkoveef51da2021-06-14 02:36:15 +01009671 io_uring_del_tctx_node(index);
Marco Elverb16ef422021-05-27 11:25:48 +02009672 if (wq) {
9673 /*
9674 * Must be after io_uring_del_task_file() (removes nodes under
9675 * uring_lock) to avoid race with io_uring_try_cancel_iowq().
9676 */
Pavel Begunkovba5ef6d2021-05-20 13:21:20 +01009677 io_wq_put_and_exit(wq);
Pavel Begunkovdadebc32021-08-23 13:30:44 +01009678 tctx->io_wq = NULL;
Marco Elverb16ef422021-05-27 11:25:48 +02009679 }
Pavel Begunkovde7f1d92021-01-04 20:43:29 +00009680}
9681
Pavel Begunkov3f48cf12021-04-11 01:46:27 +01009682static s64 tctx_inflight(struct io_uring_task *tctx, bool tracked)
Pavel Begunkov521d6a72021-03-11 23:29:38 +00009683{
Pavel Begunkov3f48cf12021-04-11 01:46:27 +01009684 if (tracked)
9685 return atomic_read(&tctx->inflight_tracked);
Pavel Begunkov521d6a72021-03-11 23:29:38 +00009686 return percpu_counter_sum(&tctx->inflight);
9687}
9688
Pavel Begunkov09899b12021-06-14 02:36:22 +01009689static void io_uring_drop_tctx_refs(struct task_struct *task)
9690{
9691 struct io_uring_task *tctx = task->io_uring;
9692 unsigned int refs = tctx->cached_refs;
9693
Pavel Begunkove9dbe222021-08-09 13:04:20 +01009694 if (refs) {
9695 tctx->cached_refs = 0;
9696 percpu_counter_sub(&tctx->inflight, refs);
9697 put_task_struct_many(task, refs);
9698 }
Pavel Begunkov09899b12021-06-14 02:36:22 +01009699}
9700
Pavel Begunkov78cc6872021-06-14 02:36:23 +01009701/*
9702 * Find any io_uring ctx that this task has registered or done IO on, and cancel
9703 * requests. @sqd should be not-null IIF it's an SQPOLL thread cancellation.
9704 */
9705static void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd)
Pavel Begunkov0e9ddb32021-02-07 22:34:26 +00009706{
Pavel Begunkov521d6a72021-03-11 23:29:38 +00009707 struct io_uring_task *tctx = current->io_uring;
Pavel Begunkov734551d2021-04-18 14:52:09 +01009708 struct io_ring_ctx *ctx;
Jens Axboefdaf0832020-10-30 09:37:30 -06009709 s64 inflight;
Pavel Begunkov0e9ddb32021-02-07 22:34:26 +00009710 DEFINE_WAIT(wait);
Jens Axboefdaf0832020-10-30 09:37:30 -06009711
Pavel Begunkov78cc6872021-06-14 02:36:23 +01009712 WARN_ON_ONCE(sqd && sqd->thread != current);
9713
Palash Oswal6d042ff2021-04-27 18:21:49 +05309714 if (!current->io_uring)
9715 return;
Pavel Begunkov17a91052021-05-23 15:48:39 +01009716 if (tctx->io_wq)
9717 io_wq_exit_start(tctx->io_wq);
9718
Jens Axboefdaf0832020-10-30 09:37:30 -06009719 atomic_inc(&tctx->in_idle);
Jens Axboed8a6df12020-10-15 16:24:45 -06009720 do {
Pavel Begunkove9dbe222021-08-09 13:04:20 +01009721 io_uring_drop_tctx_refs(current);
Jens Axboe0f212202020-09-13 13:09:39 -06009722 /* read completions before cancelations */
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01009723 inflight = tctx_inflight(tctx, !cancel_all);
Jens Axboed8a6df12020-10-15 16:24:45 -06009724 if (!inflight)
9725 break;
Jens Axboe0f212202020-09-13 13:09:39 -06009726
Pavel Begunkov78cc6872021-06-14 02:36:23 +01009727 if (!sqd) {
9728 struct io_tctx_node *node;
9729 unsigned long index;
9730
9731 xa_for_each(&tctx->xa, index, node) {
9732 /* sqpoll task will cancel all its requests */
9733 if (node->ctx->sq_data)
9734 continue;
9735 io_uring_try_cancel_requests(node->ctx, current,
9736 cancel_all);
9737 }
9738 } else {
9739 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
9740 io_uring_try_cancel_requests(ctx, current,
9741 cancel_all);
9742 }
9743
9744 prepare_to_wait(&tctx->wait, &wait, TASK_UNINTERRUPTIBLE);
Pavel Begunkove9dbe222021-08-09 13:04:20 +01009745 io_uring_drop_tctx_refs(current);
Jens Axboe0f212202020-09-13 13:09:39 -06009746 /*
Pavel Begunkova1bb3cd2021-01-26 15:28:26 +00009747 * If we've seen completions, retry without waiting. This
9748 * avoids a race where a completion comes in before we did
9749 * prepare_to_wait().
Jens Axboe0f212202020-09-13 13:09:39 -06009750 */
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01009751 if (inflight == tctx_inflight(tctx, !cancel_all))
Pavel Begunkova1bb3cd2021-01-26 15:28:26 +00009752 schedule();
Pavel Begunkovf57555e2020-12-20 13:21:44 +00009753 finish_wait(&tctx->wait, &wait);
Jens Axboed8a6df12020-10-15 16:24:45 -06009754 } while (1);
Jens Axboefdaf0832020-10-30 09:37:30 -06009755 atomic_dec(&tctx->in_idle);
Pavel Begunkovde7f1d92021-01-04 20:43:29 +00009756
Pavel Begunkov8452d4a2021-02-27 11:16:46 +00009757 io_uring_clean_tctx(tctx);
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01009758 if (cancel_all) {
Pavel Begunkov3f48cf12021-04-11 01:46:27 +01009759 /* for exec all current's requests should be gone, kill tctx */
9760 __io_uring_free(current);
9761 }
Pavel Begunkov44e728b2020-06-15 10:24:04 +03009762}
9763
Hao Xuf552a272021-08-12 12:14:35 +08009764void __io_uring_cancel(bool cancel_all)
Pavel Begunkov78cc6872021-06-14 02:36:23 +01009765{
Hao Xuf552a272021-08-12 12:14:35 +08009766 io_uring_cancel_generic(cancel_all, NULL);
Pavel Begunkov78cc6872021-06-14 02:36:23 +01009767}
9768
Roman Penyaev6c5c2402019-11-28 12:53:22 +01009769static void *io_uring_validate_mmap_request(struct file *file,
9770 loff_t pgoff, size_t sz)
Jens Axboe2b188cc2019-01-07 10:46:33 -07009771{
Jens Axboe2b188cc2019-01-07 10:46:33 -07009772 struct io_ring_ctx *ctx = file->private_data;
Roman Penyaev6c5c2402019-11-28 12:53:22 +01009773 loff_t offset = pgoff << PAGE_SHIFT;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009774 struct page *page;
9775 void *ptr;
9776
9777 switch (offset) {
9778 case IORING_OFF_SQ_RING:
Hristo Venev75b28af2019-08-26 17:23:46 +00009779 case IORING_OFF_CQ_RING:
9780 ptr = ctx->rings;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009781 break;
9782 case IORING_OFF_SQES:
9783 ptr = ctx->sq_sqes;
9784 break;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009785 default:
Roman Penyaev6c5c2402019-11-28 12:53:22 +01009786 return ERR_PTR(-EINVAL);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009787 }
9788
9789 page = virt_to_head_page(ptr);
Matthew Wilcox (Oracle)a50b8542019-09-23 15:34:25 -07009790 if (sz > page_size(page))
Roman Penyaev6c5c2402019-11-28 12:53:22 +01009791 return ERR_PTR(-EINVAL);
9792
9793 return ptr;
9794}
9795
9796#ifdef CONFIG_MMU
9797
9798static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
9799{
9800 size_t sz = vma->vm_end - vma->vm_start;
9801 unsigned long pfn;
9802 void *ptr;
9803
9804 ptr = io_uring_validate_mmap_request(file, vma->vm_pgoff, sz);
9805 if (IS_ERR(ptr))
9806 return PTR_ERR(ptr);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009807
9808 pfn = virt_to_phys(ptr) >> PAGE_SHIFT;
9809 return remap_pfn_range(vma, vma->vm_start, pfn, sz, vma->vm_page_prot);
9810}
9811
Roman Penyaev6c5c2402019-11-28 12:53:22 +01009812#else /* !CONFIG_MMU */
9813
9814static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
9815{
9816 return vma->vm_flags & (VM_SHARED | VM_MAYSHARE) ? 0 : -EINVAL;
9817}
9818
9819static unsigned int io_uring_nommu_mmap_capabilities(struct file *file)
9820{
9821 return NOMMU_MAP_DIRECT | NOMMU_MAP_READ | NOMMU_MAP_WRITE;
9822}
9823
9824static unsigned long io_uring_nommu_get_unmapped_area(struct file *file,
9825 unsigned long addr, unsigned long len,
9826 unsigned long pgoff, unsigned long flags)
9827{
9828 void *ptr;
9829
9830 ptr = io_uring_validate_mmap_request(file, pgoff, len);
9831 if (IS_ERR(ptr))
9832 return PTR_ERR(ptr);
9833
9834 return (unsigned long) ptr;
9835}
9836
9837#endif /* !CONFIG_MMU */
9838
Pavel Begunkovd9d05212021-01-08 20:57:25 +00009839static int io_sqpoll_wait_sq(struct io_ring_ctx *ctx)
Jens Axboe90554202020-09-03 12:12:41 -06009840{
9841 DEFINE_WAIT(wait);
9842
9843 do {
9844 if (!io_sqring_full(ctx))
9845 break;
Jens Axboe90554202020-09-03 12:12:41 -06009846 prepare_to_wait(&ctx->sqo_sq_wait, &wait, TASK_INTERRUPTIBLE);
9847
9848 if (!io_sqring_full(ctx))
9849 break;
Jens Axboe90554202020-09-03 12:12:41 -06009850 schedule();
9851 } while (!signal_pending(current));
9852
9853 finish_wait(&ctx->sqo_sq_wait, &wait);
Yang Li51993282021-03-09 14:30:41 +08009854 return 0;
Jens Axboe90554202020-09-03 12:12:41 -06009855}
9856
Hao Xuc73ebb62020-11-03 10:54:37 +08009857static int io_get_ext_arg(unsigned flags, const void __user *argp, size_t *argsz,
9858 struct __kernel_timespec __user **ts,
9859 const sigset_t __user **sig)
9860{
9861 struct io_uring_getevents_arg arg;
9862
9863 /*
9864 * If EXT_ARG isn't set, then we have no timespec and the argp pointer
9865 * is just a pointer to the sigset_t.
9866 */
9867 if (!(flags & IORING_ENTER_EXT_ARG)) {
9868 *sig = (const sigset_t __user *) argp;
9869 *ts = NULL;
9870 return 0;
9871 }
9872
9873 /*
9874 * EXT_ARG is set - ensure we agree on the size of it and copy in our
9875 * timespec and sigset_t pointers if good.
9876 */
9877 if (*argsz != sizeof(arg))
9878 return -EINVAL;
9879 if (copy_from_user(&arg, argp, sizeof(arg)))
9880 return -EFAULT;
9881 *sig = u64_to_user_ptr(arg.sigmask);
9882 *argsz = arg.sigmask_sz;
9883 *ts = u64_to_user_ptr(arg.ts);
9884 return 0;
9885}
9886
Jens Axboe2b188cc2019-01-07 10:46:33 -07009887SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
Hao Xuc73ebb62020-11-03 10:54:37 +08009888 u32, min_complete, u32, flags, const void __user *, argp,
9889 size_t, argsz)
Jens Axboe2b188cc2019-01-07 10:46:33 -07009890{
9891 struct io_ring_ctx *ctx;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009892 int submitted = 0;
9893 struct fd f;
Pavel Begunkov33f993d2021-03-19 17:22:30 +00009894 long ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009895
Jens Axboe4c6e2772020-07-01 11:29:10 -06009896 io_run_task_work();
Jens Axboeb41e9852020-02-17 09:52:41 -07009897
Pavel Begunkov33f993d2021-03-19 17:22:30 +00009898 if (unlikely(flags & ~(IORING_ENTER_GETEVENTS | IORING_ENTER_SQ_WAKEUP |
9899 IORING_ENTER_SQ_WAIT | IORING_ENTER_EXT_ARG)))
Jens Axboe2b188cc2019-01-07 10:46:33 -07009900 return -EINVAL;
9901
9902 f = fdget(fd);
Pavel Begunkov33f993d2021-03-19 17:22:30 +00009903 if (unlikely(!f.file))
Jens Axboe2b188cc2019-01-07 10:46:33 -07009904 return -EBADF;
9905
9906 ret = -EOPNOTSUPP;
Pavel Begunkov33f993d2021-03-19 17:22:30 +00009907 if (unlikely(f.file->f_op != &io_uring_fops))
Jens Axboe2b188cc2019-01-07 10:46:33 -07009908 goto out_fput;
9909
9910 ret = -ENXIO;
9911 ctx = f.file->private_data;
Pavel Begunkov33f993d2021-03-19 17:22:30 +00009912 if (unlikely(!percpu_ref_tryget(&ctx->refs)))
Jens Axboe2b188cc2019-01-07 10:46:33 -07009913 goto out_fput;
9914
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02009915 ret = -EBADFD;
Pavel Begunkov33f993d2021-03-19 17:22:30 +00009916 if (unlikely(ctx->flags & IORING_SETUP_R_DISABLED))
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02009917 goto out;
9918
Jens Axboe6c271ce2019-01-10 11:22:30 -07009919 /*
9920 * For SQ polling, the thread will do all submissions and completions.
9921 * Just return the requested submit count, and wake the thread if
9922 * we were asked to.
9923 */
Jens Axboeb2a9ead2019-09-12 14:19:16 -06009924 ret = 0;
Jens Axboe6c271ce2019-01-10 11:22:30 -07009925 if (ctx->flags & IORING_SETUP_SQPOLL) {
Pavel Begunkov90f67362021-08-09 20:18:12 +01009926 io_cqring_overflow_flush(ctx);
Pavel Begunkov89448c42020-12-17 00:24:39 +00009927
Jens Axboe21f96522021-08-14 09:04:40 -06009928 if (unlikely(ctx->sq_data->thread == NULL)) {
9929 ret = -EOWNERDEAD;
Stefan Metzmacher04147482021-03-07 11:54:29 +01009930 goto out;
Jens Axboe21f96522021-08-14 09:04:40 -06009931 }
Jens Axboe6c271ce2019-01-10 11:22:30 -07009932 if (flags & IORING_ENTER_SQ_WAKEUP)
Jens Axboe534ca6d2020-09-02 13:52:19 -06009933 wake_up(&ctx->sq_data->wait);
Pavel Begunkovd9d05212021-01-08 20:57:25 +00009934 if (flags & IORING_ENTER_SQ_WAIT) {
9935 ret = io_sqpoll_wait_sq(ctx);
9936 if (ret)
9937 goto out;
9938 }
Jens Axboe6c271ce2019-01-10 11:22:30 -07009939 submitted = to_submit;
Jens Axboeb2a9ead2019-09-12 14:19:16 -06009940 } else if (to_submit) {
Pavel Begunkoveef51da2021-06-14 02:36:15 +01009941 ret = io_uring_add_tctx_node(ctx);
Jens Axboe0f212202020-09-13 13:09:39 -06009942 if (unlikely(ret))
9943 goto out;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009944 mutex_lock(&ctx->uring_lock);
Jens Axboe0f212202020-09-13 13:09:39 -06009945 submitted = io_submit_sqes(ctx, to_submit);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009946 mutex_unlock(&ctx->uring_lock);
Pavel Begunkov7c504e652019-12-18 19:53:45 +03009947
9948 if (submitted != to_submit)
9949 goto out;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009950 }
9951 if (flags & IORING_ENTER_GETEVENTS) {
Hao Xuc73ebb62020-11-03 10:54:37 +08009952 const sigset_t __user *sig;
9953 struct __kernel_timespec __user *ts;
9954
9955 ret = io_get_ext_arg(flags, argp, &argsz, &ts, &sig);
9956 if (unlikely(ret))
9957 goto out;
9958
Jens Axboe2b188cc2019-01-07 10:46:33 -07009959 min_complete = min(min_complete, ctx->cq_entries);
9960
Xiaoguang Wang32b22442020-03-11 09:26:09 +08009961 /*
9962 * When SETUP_IOPOLL and SETUP_SQPOLL are both enabled, user
9963 * space applications don't need to do io completion events
9964 * polling again, they can rely on io_sq_thread to do polling
9965 * work, which can reduce cpu usage and uring_lock contention.
9966 */
9967 if (ctx->flags & IORING_SETUP_IOPOLL &&
9968 !(ctx->flags & IORING_SETUP_SQPOLL)) {
Pavel Begunkov7668b922020-07-07 16:36:21 +03009969 ret = io_iopoll_check(ctx, min_complete);
Jens Axboedef596e2019-01-09 08:59:42 -07009970 } else {
Hao Xuc73ebb62020-11-03 10:54:37 +08009971 ret = io_cqring_wait(ctx, min_complete, sig, argsz, ts);
Jens Axboedef596e2019-01-09 08:59:42 -07009972 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07009973 }
9974
Pavel Begunkov7c504e652019-12-18 19:53:45 +03009975out:
Pavel Begunkov6805b322019-10-08 02:18:42 +03009976 percpu_ref_put(&ctx->refs);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009977out_fput:
9978 fdput(f);
9979 return submitted ? submitted : ret;
9980}
9981
Tobias Klauserbebdb652020-02-26 18:38:32 +01009982#ifdef CONFIG_PROC_FS
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +00009983static int io_uring_show_cred(struct seq_file *m, unsigned int id,
9984 const struct cred *cred)
Jens Axboe87ce9552020-01-30 08:25:34 -07009985{
Jens Axboe87ce9552020-01-30 08:25:34 -07009986 struct user_namespace *uns = seq_user_ns(m);
9987 struct group_info *gi;
9988 kernel_cap_t cap;
9989 unsigned __capi;
9990 int g;
9991
9992 seq_printf(m, "%5d\n", id);
9993 seq_put_decimal_ull(m, "\tUid:\t", from_kuid_munged(uns, cred->uid));
9994 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->euid));
9995 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->suid));
9996 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->fsuid));
9997 seq_put_decimal_ull(m, "\n\tGid:\t", from_kgid_munged(uns, cred->gid));
9998 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->egid));
9999 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->sgid));
10000 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->fsgid));
10001 seq_puts(m, "\n\tGroups:\t");
10002 gi = cred->group_info;
10003 for (g = 0; g < gi->ngroups; g++) {
10004 seq_put_decimal_ull(m, g ? " " : "",
10005 from_kgid_munged(uns, gi->gid[g]));
10006 }
10007 seq_puts(m, "\n\tCapEff:\t");
10008 cap = cred->cap_effective;
10009 CAP_FOR_EACH_U32(__capi)
10010 seq_put_hex_ll(m, NULL, cap.cap[CAP_LAST_U32 - __capi], 8);
10011 seq_putc(m, '\n');
10012 return 0;
10013}
10014
10015static void __io_uring_show_fdinfo(struct io_ring_ctx *ctx, struct seq_file *m)
10016{
Joseph Qidbbe9c62020-09-29 09:01:22 -060010017 struct io_sq_data *sq = NULL;
Jens Axboefad8e0d2020-09-28 08:57:48 -060010018 bool has_lock;
Jens Axboe87ce9552020-01-30 08:25:34 -070010019 int i;
10020
Jens Axboefad8e0d2020-09-28 08:57:48 -060010021 /*
10022 * Avoid ABBA deadlock between the seq lock and the io_uring mutex,
10023 * since fdinfo case grabs it in the opposite direction of normal use
10024 * cases. If we fail to get the lock, we just don't iterate any
10025 * structures that could be going away outside the io_uring mutex.
10026 */
10027 has_lock = mutex_trylock(&ctx->uring_lock);
10028
Jens Axboe5f3f26f2021-02-25 10:17:46 -070010029 if (has_lock && (ctx->flags & IORING_SETUP_SQPOLL)) {
Joseph Qidbbe9c62020-09-29 09:01:22 -060010030 sq = ctx->sq_data;
Jens Axboe5f3f26f2021-02-25 10:17:46 -070010031 if (!sq->thread)
10032 sq = NULL;
10033 }
Joseph Qidbbe9c62020-09-29 09:01:22 -060010034
10035 seq_printf(m, "SqThread:\t%d\n", sq ? task_pid_nr(sq->thread) : -1);
10036 seq_printf(m, "SqThreadCpu:\t%d\n", sq ? task_cpu(sq->thread) : -1);
Jens Axboe87ce9552020-01-30 08:25:34 -070010037 seq_printf(m, "UserFiles:\t%u\n", ctx->nr_user_files);
Jens Axboefad8e0d2020-09-28 08:57:48 -060010038 for (i = 0; has_lock && i < ctx->nr_user_files; i++) {
Jens Axboe7b29f922021-03-12 08:30:14 -070010039 struct file *f = io_file_from_index(ctx, i);
Jens Axboe87ce9552020-01-30 08:25:34 -070010040
Jens Axboe87ce9552020-01-30 08:25:34 -070010041 if (f)
10042 seq_printf(m, "%5u: %s\n", i, file_dentry(f)->d_iname);
10043 else
10044 seq_printf(m, "%5u: <none>\n", i);
10045 }
10046 seq_printf(m, "UserBufs:\t%u\n", ctx->nr_user_bufs);
Jens Axboefad8e0d2020-09-28 08:57:48 -060010047 for (i = 0; has_lock && i < ctx->nr_user_bufs; i++) {
Pavel Begunkov41edf1a2021-04-25 14:32:23 +010010048 struct io_mapped_ubuf *buf = ctx->user_bufs[i];
Pavel Begunkov4751f532021-04-01 15:43:55 +010010049 unsigned int len = buf->ubuf_end - buf->ubuf;
Jens Axboe87ce9552020-01-30 08:25:34 -070010050
Pavel Begunkov4751f532021-04-01 15:43:55 +010010051 seq_printf(m, "%5u: 0x%llx/%u\n", i, buf->ubuf, len);
Jens Axboe87ce9552020-01-30 08:25:34 -070010052 }
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +000010053 if (has_lock && !xa_empty(&ctx->personalities)) {
10054 unsigned long index;
10055 const struct cred *cred;
10056
Jens Axboe87ce9552020-01-30 08:25:34 -070010057 seq_printf(m, "Personalities:\n");
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +000010058 xa_for_each(&ctx->personalities, index, cred)
10059 io_uring_show_cred(m, index, cred);
Jens Axboe87ce9552020-01-30 08:25:34 -070010060 }
Jens Axboed7718a92020-02-14 22:23:12 -070010061 seq_printf(m, "PollList:\n");
Jens Axboe79ebeae2021-08-10 15:18:27 -060010062 spin_lock(&ctx->completion_lock);
Jens Axboed7718a92020-02-14 22:23:12 -070010063 for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
10064 struct hlist_head *list = &ctx->cancel_hash[i];
10065 struct io_kiocb *req;
10066
10067 hlist_for_each_entry(req, list, hash_node)
10068 seq_printf(m, " op=%d, task_works=%d\n", req->opcode,
10069 req->task->task_works != NULL);
10070 }
Jens Axboe79ebeae2021-08-10 15:18:27 -060010071 spin_unlock(&ctx->completion_lock);
Jens Axboefad8e0d2020-09-28 08:57:48 -060010072 if (has_lock)
10073 mutex_unlock(&ctx->uring_lock);
Jens Axboe87ce9552020-01-30 08:25:34 -070010074}
10075
10076static void io_uring_show_fdinfo(struct seq_file *m, struct file *f)
10077{
10078 struct io_ring_ctx *ctx = f->private_data;
10079
10080 if (percpu_ref_tryget(&ctx->refs)) {
10081 __io_uring_show_fdinfo(ctx, m);
10082 percpu_ref_put(&ctx->refs);
10083 }
10084}
Tobias Klauserbebdb652020-02-26 18:38:32 +010010085#endif
Jens Axboe87ce9552020-01-30 08:25:34 -070010086
Jens Axboe2b188cc2019-01-07 10:46:33 -070010087static const struct file_operations io_uring_fops = {
10088 .release = io_uring_release,
10089 .mmap = io_uring_mmap,
Roman Penyaev6c5c2402019-11-28 12:53:22 +010010090#ifndef CONFIG_MMU
10091 .get_unmapped_area = io_uring_nommu_get_unmapped_area,
10092 .mmap_capabilities = io_uring_nommu_mmap_capabilities,
10093#endif
Jens Axboe2b188cc2019-01-07 10:46:33 -070010094 .poll = io_uring_poll,
10095 .fasync = io_uring_fasync,
Tobias Klauserbebdb652020-02-26 18:38:32 +010010096#ifdef CONFIG_PROC_FS
Jens Axboe87ce9552020-01-30 08:25:34 -070010097 .show_fdinfo = io_uring_show_fdinfo,
Tobias Klauserbebdb652020-02-26 18:38:32 +010010098#endif
Jens Axboe2b188cc2019-01-07 10:46:33 -070010099};
10100
10101static int io_allocate_scq_urings(struct io_ring_ctx *ctx,
10102 struct io_uring_params *p)
10103{
Hristo Venev75b28af2019-08-26 17:23:46 +000010104 struct io_rings *rings;
10105 size_t size, sq_array_offset;
Jens Axboe2b188cc2019-01-07 10:46:33 -070010106
Jens Axboebd740482020-08-05 12:58:23 -060010107 /* make sure these are sane, as we already accounted them */
10108 ctx->sq_entries = p->sq_entries;
10109 ctx->cq_entries = p->cq_entries;
10110
Hristo Venev75b28af2019-08-26 17:23:46 +000010111 size = rings_size(p->sq_entries, p->cq_entries, &sq_array_offset);
10112 if (size == SIZE_MAX)
10113 return -EOVERFLOW;
10114
10115 rings = io_mem_alloc(size);
10116 if (!rings)
Jens Axboe2b188cc2019-01-07 10:46:33 -070010117 return -ENOMEM;
10118
Hristo Venev75b28af2019-08-26 17:23:46 +000010119 ctx->rings = rings;
10120 ctx->sq_array = (u32 *)((char *)rings + sq_array_offset);
10121 rings->sq_ring_mask = p->sq_entries - 1;
10122 rings->cq_ring_mask = p->cq_entries - 1;
10123 rings->sq_ring_entries = p->sq_entries;
10124 rings->cq_ring_entries = p->cq_entries;
Jens Axboe2b188cc2019-01-07 10:46:33 -070010125
10126 size = array_size(sizeof(struct io_uring_sqe), p->sq_entries);
Jens Axboeeb065d32019-11-20 09:26:29 -070010127 if (size == SIZE_MAX) {
10128 io_mem_free(ctx->rings);
10129 ctx->rings = NULL;
Jens Axboe2b188cc2019-01-07 10:46:33 -070010130 return -EOVERFLOW;
Jens Axboeeb065d32019-11-20 09:26:29 -070010131 }
Jens Axboe2b188cc2019-01-07 10:46:33 -070010132
10133 ctx->sq_sqes = io_mem_alloc(size);
Jens Axboeeb065d32019-11-20 09:26:29 -070010134 if (!ctx->sq_sqes) {
10135 io_mem_free(ctx->rings);
10136 ctx->rings = NULL;
Jens Axboe2b188cc2019-01-07 10:46:33 -070010137 return -ENOMEM;
Jens Axboeeb065d32019-11-20 09:26:29 -070010138 }
Jens Axboe2b188cc2019-01-07 10:46:33 -070010139
Jens Axboe2b188cc2019-01-07 10:46:33 -070010140 return 0;
10141}
10142
Pavel Begunkov9faadcc2020-12-21 18:34:05 +000010143static int io_uring_install_fd(struct io_ring_ctx *ctx, struct file *file)
10144{
10145 int ret, fd;
10146
10147 fd = get_unused_fd_flags(O_RDWR | O_CLOEXEC);
10148 if (fd < 0)
10149 return fd;
10150
Pavel Begunkoveef51da2021-06-14 02:36:15 +010010151 ret = io_uring_add_tctx_node(ctx);
Pavel Begunkov9faadcc2020-12-21 18:34:05 +000010152 if (ret) {
10153 put_unused_fd(fd);
10154 return ret;
10155 }
10156 fd_install(fd, file);
10157 return fd;
10158}
10159
Jens Axboe2b188cc2019-01-07 10:46:33 -070010160/*
10161 * Allocate an anonymous fd, this is what constitutes the application
10162 * visible backing of an io_uring instance. The application mmaps this
10163 * fd to gain access to the SQ/CQ ring details. If UNIX sockets are enabled,
10164 * we have to tie this fd to a socket for file garbage collection purposes.
10165 */
Pavel Begunkov9faadcc2020-12-21 18:34:05 +000010166static struct file *io_uring_get_file(struct io_ring_ctx *ctx)
Jens Axboe2b188cc2019-01-07 10:46:33 -070010167{
10168 struct file *file;
Jens Axboe2b188cc2019-01-07 10:46:33 -070010169#if defined(CONFIG_UNIX)
Pavel Begunkov9faadcc2020-12-21 18:34:05 +000010170 int ret;
10171
Jens Axboe2b188cc2019-01-07 10:46:33 -070010172 ret = sock_create_kern(&init_net, PF_UNIX, SOCK_RAW, IPPROTO_IP,
10173 &ctx->ring_sock);
10174 if (ret)
Pavel Begunkov9faadcc2020-12-21 18:34:05 +000010175 return ERR_PTR(ret);
Jens Axboe2b188cc2019-01-07 10:46:33 -070010176#endif
10177
Jens Axboe2b188cc2019-01-07 10:46:33 -070010178 file = anon_inode_getfile("[io_uring]", &io_uring_fops, ctx,
10179 O_RDWR | O_CLOEXEC);
Pavel Begunkov9faadcc2020-12-21 18:34:05 +000010180#if defined(CONFIG_UNIX)
Jens Axboe2b188cc2019-01-07 10:46:33 -070010181 if (IS_ERR(file)) {
Pavel Begunkov9faadcc2020-12-21 18:34:05 +000010182 sock_release(ctx->ring_sock);
10183 ctx->ring_sock = NULL;
10184 } else {
10185 ctx->ring_sock->file = file;
Jens Axboe2b188cc2019-01-07 10:46:33 -070010186 }
Jens Axboe2b188cc2019-01-07 10:46:33 -070010187#endif
Pavel Begunkov9faadcc2020-12-21 18:34:05 +000010188 return file;
Jens Axboe2b188cc2019-01-07 10:46:33 -070010189}
10190
Xiaoguang Wang7f136572020-05-05 16:28:53 +080010191static int io_uring_create(unsigned entries, struct io_uring_params *p,
10192 struct io_uring_params __user *params)
Jens Axboe2b188cc2019-01-07 10:46:33 -070010193{
Jens Axboe2b188cc2019-01-07 10:46:33 -070010194 struct io_ring_ctx *ctx;
Pavel Begunkov9faadcc2020-12-21 18:34:05 +000010195 struct file *file;
Jens Axboe2b188cc2019-01-07 10:46:33 -070010196 int ret;
10197
Jens Axboe8110c1a2019-12-28 15:39:54 -070010198 if (!entries)
Jens Axboe2b188cc2019-01-07 10:46:33 -070010199 return -EINVAL;
Jens Axboe8110c1a2019-12-28 15:39:54 -070010200 if (entries > IORING_MAX_ENTRIES) {
10201 if (!(p->flags & IORING_SETUP_CLAMP))
10202 return -EINVAL;
10203 entries = IORING_MAX_ENTRIES;
10204 }
Jens Axboe2b188cc2019-01-07 10:46:33 -070010205
10206 /*
10207 * Use twice as many entries for the CQ ring. It's possible for the
10208 * application to drive a higher depth than the size of the SQ ring,
10209 * since the sqes are only used at submission time. This allows for
Jens Axboe33a107f2019-10-04 12:10:03 -060010210 * some flexibility in overcommitting a bit. If the application has
10211 * set IORING_SETUP_CQSIZE, it will have passed in the desired number
10212 * of CQ ring entries manually.
Jens Axboe2b188cc2019-01-07 10:46:33 -070010213 */
10214 p->sq_entries = roundup_pow_of_two(entries);
Jens Axboe33a107f2019-10-04 12:10:03 -060010215 if (p->flags & IORING_SETUP_CQSIZE) {
10216 /*
10217 * If IORING_SETUP_CQSIZE is set, we do the same roundup
10218 * to a power-of-two, if it isn't already. We do NOT impose
10219 * any cq vs sq ring sizing.
10220 */
Joseph Qieb2667b32020-11-24 15:03:03 +080010221 if (!p->cq_entries)
Jens Axboe33a107f2019-10-04 12:10:03 -060010222 return -EINVAL;
Jens Axboe8110c1a2019-12-28 15:39:54 -070010223 if (p->cq_entries > IORING_MAX_CQ_ENTRIES) {
10224 if (!(p->flags & IORING_SETUP_CLAMP))
10225 return -EINVAL;
10226 p->cq_entries = IORING_MAX_CQ_ENTRIES;
10227 }
Joseph Qieb2667b32020-11-24 15:03:03 +080010228 p->cq_entries = roundup_pow_of_two(p->cq_entries);
10229 if (p->cq_entries < p->sq_entries)
10230 return -EINVAL;
Jens Axboe33a107f2019-10-04 12:10:03 -060010231 } else {
10232 p->cq_entries = 2 * p->sq_entries;
10233 }
Jens Axboe2b188cc2019-01-07 10:46:33 -070010234
Jens Axboe2b188cc2019-01-07 10:46:33 -070010235 ctx = io_ring_ctx_alloc(p);
Jens Axboe62e398b2021-02-21 16:19:37 -070010236 if (!ctx)
Jens Axboe2b188cc2019-01-07 10:46:33 -070010237 return -ENOMEM;
Jens Axboe2b188cc2019-01-07 10:46:33 -070010238 ctx->compat = in_compat_syscall();
Jens Axboe62e398b2021-02-21 16:19:37 -070010239 if (!capable(CAP_IPC_LOCK))
10240 ctx->user = get_uid(current_user());
Jens Axboe2aede0e2020-09-14 10:45:53 -060010241
10242 /*
10243 * This is just grabbed for accounting purposes. When a process exits,
10244 * the mm is exited and dropped before the files, hence we need to hang
10245 * on to this mm purely for the purposes of being able to unaccount
10246 * memory (locked/pinned vm). It's not used for anything else.
10247 */
Jens Axboe6b7898e2020-08-25 07:58:00 -060010248 mmgrab(current->mm);
Jens Axboe2aede0e2020-09-14 10:45:53 -060010249 ctx->mm_account = current->mm;
Jens Axboe6b7898e2020-08-25 07:58:00 -060010250
Jens Axboe2b188cc2019-01-07 10:46:33 -070010251 ret = io_allocate_scq_urings(ctx, p);
10252 if (ret)
10253 goto err;
10254
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +020010255 ret = io_sq_offload_create(ctx, p);
Jens Axboe2b188cc2019-01-07 10:46:33 -070010256 if (ret)
10257 goto err;
Pavel Begunkoveae071c2021-04-25 14:32:24 +010010258 /* always set a rsrc node */
Pavel Begunkov47b228c2021-04-29 11:46:48 +010010259 ret = io_rsrc_node_switch_start(ctx);
10260 if (ret)
10261 goto err;
Pavel Begunkoveae071c2021-04-25 14:32:24 +010010262 io_rsrc_node_switch(ctx, NULL);
Jens Axboe2b188cc2019-01-07 10:46:33 -070010263
Jens Axboe2b188cc2019-01-07 10:46:33 -070010264 memset(&p->sq_off, 0, sizeof(p->sq_off));
Hristo Venev75b28af2019-08-26 17:23:46 +000010265 p->sq_off.head = offsetof(struct io_rings, sq.head);
10266 p->sq_off.tail = offsetof(struct io_rings, sq.tail);
10267 p->sq_off.ring_mask = offsetof(struct io_rings, sq_ring_mask);
10268 p->sq_off.ring_entries = offsetof(struct io_rings, sq_ring_entries);
10269 p->sq_off.flags = offsetof(struct io_rings, sq_flags);
10270 p->sq_off.dropped = offsetof(struct io_rings, sq_dropped);
10271 p->sq_off.array = (char *)ctx->sq_array - (char *)ctx->rings;
Jens Axboe2b188cc2019-01-07 10:46:33 -070010272
10273 memset(&p->cq_off, 0, sizeof(p->cq_off));
Hristo Venev75b28af2019-08-26 17:23:46 +000010274 p->cq_off.head = offsetof(struct io_rings, cq.head);
10275 p->cq_off.tail = offsetof(struct io_rings, cq.tail);
10276 p->cq_off.ring_mask = offsetof(struct io_rings, cq_ring_mask);
10277 p->cq_off.ring_entries = offsetof(struct io_rings, cq_ring_entries);
10278 p->cq_off.overflow = offsetof(struct io_rings, cq_overflow);
10279 p->cq_off.cqes = offsetof(struct io_rings, cqes);
Stefano Garzarella0d9b5b32020-05-15 18:38:04 +020010280 p->cq_off.flags = offsetof(struct io_rings, cq_flags);
Jens Axboeac90f242019-09-06 10:26:21 -060010281
Xiaoguang Wang7f136572020-05-05 16:28:53 +080010282 p->features = IORING_FEAT_SINGLE_MMAP | IORING_FEAT_NODROP |
10283 IORING_FEAT_SUBMIT_STABLE | IORING_FEAT_RW_CUR_POS |
Jiufei Xue5769a352020-06-17 17:53:55 +080010284 IORING_FEAT_CUR_PERSONALITY | IORING_FEAT_FAST_POLL |
Hao Xuc73ebb62020-11-03 10:54:37 +080010285 IORING_FEAT_POLL_32BITS | IORING_FEAT_SQPOLL_NONFIXED |
Pavel Begunkov96905572021-06-10 16:37:38 +010010286 IORING_FEAT_EXT_ARG | IORING_FEAT_NATIVE_WORKERS |
10287 IORING_FEAT_RSRC_TAGS;
Xiaoguang Wang7f136572020-05-05 16:28:53 +080010288
10289 if (copy_to_user(params, p, sizeof(*p))) {
10290 ret = -EFAULT;
10291 goto err;
10292 }
Jens Axboed1719f72020-07-30 13:43:53 -060010293
Pavel Begunkov9faadcc2020-12-21 18:34:05 +000010294 file = io_uring_get_file(ctx);
10295 if (IS_ERR(file)) {
10296 ret = PTR_ERR(file);
10297 goto err;
10298 }
10299
Jens Axboed1719f72020-07-30 13:43:53 -060010300 /*
Jens Axboe044c1ab2019-10-28 09:15:33 -060010301 * Install ring fd as the very last thing, so we don't risk someone
10302 * having closed it before we finish setup
10303 */
Pavel Begunkov9faadcc2020-12-21 18:34:05 +000010304 ret = io_uring_install_fd(ctx, file);
10305 if (ret < 0) {
10306 /* fput will clean it up */
10307 fput(file);
10308 return ret;
10309 }
Jens Axboe044c1ab2019-10-28 09:15:33 -060010310
Dmitrii Dolgovc826bd72019-10-15 19:02:01 +020010311 trace_io_uring_create(ret, ctx, p->sq_entries, p->cq_entries, p->flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -070010312 return ret;
10313err:
10314 io_ring_ctx_wait_and_kill(ctx);
10315 return ret;
10316}
10317
10318/*
10319 * Sets up an aio uring context, and returns the fd. Applications asks for a
10320 * ring size, we return the actual sq/cq ring sizes (among other things) in the
10321 * params structure passed in.
10322 */
10323static long io_uring_setup(u32 entries, struct io_uring_params __user *params)
10324{
10325 struct io_uring_params p;
Jens Axboe2b188cc2019-01-07 10:46:33 -070010326 int i;
10327
10328 if (copy_from_user(&p, params, sizeof(p)))
10329 return -EFAULT;
10330 for (i = 0; i < ARRAY_SIZE(p.resv); i++) {
10331 if (p.resv[i])
10332 return -EINVAL;
10333 }
10334
Jens Axboe6c271ce2019-01-10 11:22:30 -070010335 if (p.flags & ~(IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL |
Jens Axboe8110c1a2019-12-28 15:39:54 -070010336 IORING_SETUP_SQ_AFF | IORING_SETUP_CQSIZE |
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +020010337 IORING_SETUP_CLAMP | IORING_SETUP_ATTACH_WQ |
10338 IORING_SETUP_R_DISABLED))
Jens Axboe2b188cc2019-01-07 10:46:33 -070010339 return -EINVAL;
10340
Xiaoguang Wang7f136572020-05-05 16:28:53 +080010341 return io_uring_create(entries, &p, params);
Jens Axboe2b188cc2019-01-07 10:46:33 -070010342}
10343
10344SYSCALL_DEFINE2(io_uring_setup, u32, entries,
10345 struct io_uring_params __user *, params)
10346{
10347 return io_uring_setup(entries, params);
10348}
10349
Jens Axboe66f4af92020-01-16 15:36:52 -070010350static int io_probe(struct io_ring_ctx *ctx, void __user *arg, unsigned nr_args)
10351{
10352 struct io_uring_probe *p;
10353 size_t size;
10354 int i, ret;
10355
10356 size = struct_size(p, ops, nr_args);
10357 if (size == SIZE_MAX)
10358 return -EOVERFLOW;
10359 p = kzalloc(size, GFP_KERNEL);
10360 if (!p)
10361 return -ENOMEM;
10362
10363 ret = -EFAULT;
10364 if (copy_from_user(p, arg, size))
10365 goto out;
10366 ret = -EINVAL;
10367 if (memchr_inv(p, 0, size))
10368 goto out;
10369
10370 p->last_op = IORING_OP_LAST - 1;
10371 if (nr_args > IORING_OP_LAST)
10372 nr_args = IORING_OP_LAST;
10373
10374 for (i = 0; i < nr_args; i++) {
10375 p->ops[i].op = i;
10376 if (!io_op_defs[i].not_supported)
10377 p->ops[i].flags = IO_URING_OP_SUPPORTED;
10378 }
10379 p->ops_len = i;
10380
10381 ret = 0;
10382 if (copy_to_user(arg, p, size))
10383 ret = -EFAULT;
10384out:
10385 kfree(p);
10386 return ret;
10387}
10388
Jens Axboe071698e2020-01-28 10:04:42 -070010389static int io_register_personality(struct io_ring_ctx *ctx)
10390{
Jens Axboe4379bf82021-02-15 13:40:22 -070010391 const struct cred *creds;
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +000010392 u32 id;
Jens Axboe1e6fa522020-10-15 08:46:24 -060010393 int ret;
Jens Axboe071698e2020-01-28 10:04:42 -070010394
Jens Axboe4379bf82021-02-15 13:40:22 -070010395 creds = get_current_cred();
Jens Axboe1e6fa522020-10-15 08:46:24 -060010396
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +000010397 ret = xa_alloc_cyclic(&ctx->personalities, &id, (void *)creds,
10398 XA_LIMIT(0, USHRT_MAX), &ctx->pers_next, GFP_KERNEL);
Jens Axboea30f8952021-08-20 14:53:59 -060010399 if (ret < 0) {
10400 put_cred(creds);
10401 return ret;
10402 }
10403 return id;
Jens Axboe071698e2020-01-28 10:04:42 -070010404}
10405
Stefano Garzarella21b55db2020-08-27 16:58:30 +020010406static int io_register_restrictions(struct io_ring_ctx *ctx, void __user *arg,
10407 unsigned int nr_args)
10408{
10409 struct io_uring_restriction *res;
10410 size_t size;
10411 int i, ret;
10412
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +020010413 /* Restrictions allowed only if rings started disabled */
10414 if (!(ctx->flags & IORING_SETUP_R_DISABLED))
10415 return -EBADFD;
10416
Stefano Garzarella21b55db2020-08-27 16:58:30 +020010417 /* We allow only a single restrictions registration */
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +020010418 if (ctx->restrictions.registered)
Stefano Garzarella21b55db2020-08-27 16:58:30 +020010419 return -EBUSY;
10420
10421 if (!arg || nr_args > IORING_MAX_RESTRICTIONS)
10422 return -EINVAL;
10423
10424 size = array_size(nr_args, sizeof(*res));
10425 if (size == SIZE_MAX)
10426 return -EOVERFLOW;
10427
10428 res = memdup_user(arg, size);
10429 if (IS_ERR(res))
10430 return PTR_ERR(res);
10431
10432 ret = 0;
10433
10434 for (i = 0; i < nr_args; i++) {
10435 switch (res[i].opcode) {
10436 case IORING_RESTRICTION_REGISTER_OP:
10437 if (res[i].register_op >= IORING_REGISTER_LAST) {
10438 ret = -EINVAL;
10439 goto out;
10440 }
10441
10442 __set_bit(res[i].register_op,
10443 ctx->restrictions.register_op);
10444 break;
10445 case IORING_RESTRICTION_SQE_OP:
10446 if (res[i].sqe_op >= IORING_OP_LAST) {
10447 ret = -EINVAL;
10448 goto out;
10449 }
10450
10451 __set_bit(res[i].sqe_op, ctx->restrictions.sqe_op);
10452 break;
10453 case IORING_RESTRICTION_SQE_FLAGS_ALLOWED:
10454 ctx->restrictions.sqe_flags_allowed = res[i].sqe_flags;
10455 break;
10456 case IORING_RESTRICTION_SQE_FLAGS_REQUIRED:
10457 ctx->restrictions.sqe_flags_required = res[i].sqe_flags;
10458 break;
10459 default:
10460 ret = -EINVAL;
10461 goto out;
10462 }
10463 }
10464
10465out:
10466 /* Reset all restrictions if an error happened */
10467 if (ret != 0)
10468 memset(&ctx->restrictions, 0, sizeof(ctx->restrictions));
10469 else
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +020010470 ctx->restrictions.registered = true;
Stefano Garzarella21b55db2020-08-27 16:58:30 +020010471
10472 kfree(res);
10473 return ret;
10474}
10475
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +020010476static int io_register_enable_rings(struct io_ring_ctx *ctx)
10477{
10478 if (!(ctx->flags & IORING_SETUP_R_DISABLED))
10479 return -EBADFD;
10480
10481 if (ctx->restrictions.registered)
10482 ctx->restricted = 1;
10483
Pavel Begunkov0298ef92021-03-08 13:20:57 +000010484 ctx->flags &= ~IORING_SETUP_R_DISABLED;
10485 if (ctx->sq_data && wq_has_sleeper(&ctx->sq_data->wait))
10486 wake_up(&ctx->sq_data->wait);
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +020010487 return 0;
10488}
10489
Pavel Begunkovfdecb662021-04-25 14:32:20 +010010490static int __io_register_rsrc_update(struct io_ring_ctx *ctx, unsigned type,
Pavel Begunkovc3bdad02021-04-25 14:32:22 +010010491 struct io_uring_rsrc_update2 *up,
Pavel Begunkov98f0b3b2021-04-25 14:32:19 +010010492 unsigned nr_args)
10493{
10494 __u32 tmp;
10495 int err;
10496
Pavel Begunkovc3bdad02021-04-25 14:32:22 +010010497 if (up->resv)
10498 return -EINVAL;
Pavel Begunkov98f0b3b2021-04-25 14:32:19 +010010499 if (check_add_overflow(up->offset, nr_args, &tmp))
10500 return -EOVERFLOW;
10501 err = io_rsrc_node_switch_start(ctx);
10502 if (err)
10503 return err;
10504
Pavel Begunkovfdecb662021-04-25 14:32:20 +010010505 switch (type) {
10506 case IORING_RSRC_FILE:
Pavel Begunkov98f0b3b2021-04-25 14:32:19 +010010507 return __io_sqe_files_update(ctx, up, nr_args);
Pavel Begunkov634d00d2021-04-25 14:32:26 +010010508 case IORING_RSRC_BUFFER:
10509 return __io_sqe_buffers_update(ctx, up, nr_args);
Pavel Begunkov98f0b3b2021-04-25 14:32:19 +010010510 }
10511 return -EINVAL;
10512}
10513
Pavel Begunkovc3bdad02021-04-25 14:32:22 +010010514static int io_register_files_update(struct io_ring_ctx *ctx, void __user *arg,
10515 unsigned nr_args)
Pavel Begunkov98f0b3b2021-04-25 14:32:19 +010010516{
Pavel Begunkovc3bdad02021-04-25 14:32:22 +010010517 struct io_uring_rsrc_update2 up;
Pavel Begunkov98f0b3b2021-04-25 14:32:19 +010010518
10519 if (!nr_args)
10520 return -EINVAL;
Pavel Begunkovc3bdad02021-04-25 14:32:22 +010010521 memset(&up, 0, sizeof(up));
10522 if (copy_from_user(&up, arg, sizeof(struct io_uring_rsrc_update)))
10523 return -EFAULT;
10524 return __io_register_rsrc_update(ctx, IORING_RSRC_FILE, &up, nr_args);
10525}
10526
10527static int io_register_rsrc_update(struct io_ring_ctx *ctx, void __user *arg,
Pavel Begunkov992da012021-06-10 16:37:37 +010010528 unsigned size, unsigned type)
Pavel Begunkovc3bdad02021-04-25 14:32:22 +010010529{
10530 struct io_uring_rsrc_update2 up;
10531
10532 if (size != sizeof(up))
10533 return -EINVAL;
Pavel Begunkov98f0b3b2021-04-25 14:32:19 +010010534 if (copy_from_user(&up, arg, sizeof(up)))
10535 return -EFAULT;
Pavel Begunkov992da012021-06-10 16:37:37 +010010536 if (!up.nr || up.resv)
Pavel Begunkov98f0b3b2021-04-25 14:32:19 +010010537 return -EINVAL;
Pavel Begunkov992da012021-06-10 16:37:37 +010010538 return __io_register_rsrc_update(ctx, type, &up, up.nr);
Pavel Begunkov98f0b3b2021-04-25 14:32:19 +010010539}
10540
Pavel Begunkov792e3582021-04-25 14:32:21 +010010541static int io_register_rsrc(struct io_ring_ctx *ctx, void __user *arg,
Pavel Begunkov992da012021-06-10 16:37:37 +010010542 unsigned int size, unsigned int type)
Pavel Begunkov792e3582021-04-25 14:32:21 +010010543{
10544 struct io_uring_rsrc_register rr;
10545
10546 /* keep it extendible */
10547 if (size != sizeof(rr))
10548 return -EINVAL;
10549
10550 memset(&rr, 0, sizeof(rr));
10551 if (copy_from_user(&rr, arg, size))
10552 return -EFAULT;
Pavel Begunkov992da012021-06-10 16:37:37 +010010553 if (!rr.nr || rr.resv || rr.resv2)
Pavel Begunkov792e3582021-04-25 14:32:21 +010010554 return -EINVAL;
10555
Pavel Begunkov992da012021-06-10 16:37:37 +010010556 switch (type) {
Pavel Begunkov792e3582021-04-25 14:32:21 +010010557 case IORING_RSRC_FILE:
10558 return io_sqe_files_register(ctx, u64_to_user_ptr(rr.data),
10559 rr.nr, u64_to_user_ptr(rr.tags));
Pavel Begunkov634d00d2021-04-25 14:32:26 +010010560 case IORING_RSRC_BUFFER:
10561 return io_sqe_buffers_register(ctx, u64_to_user_ptr(rr.data),
10562 rr.nr, u64_to_user_ptr(rr.tags));
Pavel Begunkov792e3582021-04-25 14:32:21 +010010563 }
10564 return -EINVAL;
10565}
10566
Jens Axboefe764212021-06-17 10:19:54 -060010567static int io_register_iowq_aff(struct io_ring_ctx *ctx, void __user *arg,
10568 unsigned len)
10569{
10570 struct io_uring_task *tctx = current->io_uring;
10571 cpumask_var_t new_mask;
10572 int ret;
10573
10574 if (!tctx || !tctx->io_wq)
10575 return -EINVAL;
10576
10577 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
10578 return -ENOMEM;
10579
10580 cpumask_clear(new_mask);
10581 if (len > cpumask_size())
10582 len = cpumask_size();
10583
10584 if (copy_from_user(new_mask, arg, len)) {
10585 free_cpumask_var(new_mask);
10586 return -EFAULT;
10587 }
10588
10589 ret = io_wq_cpu_affinity(tctx->io_wq, new_mask);
10590 free_cpumask_var(new_mask);
10591 return ret;
10592}
10593
10594static int io_unregister_iowq_aff(struct io_ring_ctx *ctx)
10595{
10596 struct io_uring_task *tctx = current->io_uring;
10597
10598 if (!tctx || !tctx->io_wq)
10599 return -EINVAL;
10600
10601 return io_wq_cpu_affinity(tctx->io_wq, NULL);
10602}
10603
Jens Axboe2e480052021-08-27 11:33:19 -060010604static int io_register_iowq_max_workers(struct io_ring_ctx *ctx,
10605 void __user *arg)
10606{
Jens Axboefa846932021-09-01 14:15:59 -060010607 struct io_uring_task *tctx = NULL;
10608 struct io_sq_data *sqd = NULL;
Jens Axboe2e480052021-08-27 11:33:19 -060010609 __u32 new_count[2];
10610 int i, ret;
10611
Jens Axboe2e480052021-08-27 11:33:19 -060010612 if (copy_from_user(new_count, arg, sizeof(new_count)))
10613 return -EFAULT;
10614 for (i = 0; i < ARRAY_SIZE(new_count); i++)
10615 if (new_count[i] > INT_MAX)
10616 return -EINVAL;
10617
Jens Axboefa846932021-09-01 14:15:59 -060010618 if (ctx->flags & IORING_SETUP_SQPOLL) {
10619 sqd = ctx->sq_data;
10620 if (sqd) {
Jens Axboe009ad9f2021-09-08 19:07:26 -060010621 /*
10622 * Observe the correct sqd->lock -> ctx->uring_lock
10623 * ordering. Fine to drop uring_lock here, we hold
10624 * a ref to the ctx.
10625 */
Jens Axboe41d3a6b2021-09-13 13:08:51 -060010626 refcount_inc(&sqd->refs);
Jens Axboe009ad9f2021-09-08 19:07:26 -060010627 mutex_unlock(&ctx->uring_lock);
Jens Axboefa846932021-09-01 14:15:59 -060010628 mutex_lock(&sqd->lock);
Jens Axboe009ad9f2021-09-08 19:07:26 -060010629 mutex_lock(&ctx->uring_lock);
Jens Axboe41d3a6b2021-09-13 13:08:51 -060010630 if (sqd->thread)
10631 tctx = sqd->thread->io_uring;
Jens Axboefa846932021-09-01 14:15:59 -060010632 }
10633 } else {
10634 tctx = current->io_uring;
10635 }
10636
10637 ret = -EINVAL;
10638 if (!tctx || !tctx->io_wq)
10639 goto err;
10640
Jens Axboe2e480052021-08-27 11:33:19 -060010641 ret = io_wq_max_workers(tctx->io_wq, new_count);
10642 if (ret)
Jens Axboefa846932021-09-01 14:15:59 -060010643 goto err;
10644
Jens Axboe41d3a6b2021-09-13 13:08:51 -060010645 if (sqd) {
Jens Axboefa846932021-09-01 14:15:59 -060010646 mutex_unlock(&sqd->lock);
Jens Axboe41d3a6b2021-09-13 13:08:51 -060010647 io_put_sq_data(sqd);
10648 }
Jens Axboe2e480052021-08-27 11:33:19 -060010649
10650 if (copy_to_user(arg, new_count, sizeof(new_count)))
10651 return -EFAULT;
10652
10653 return 0;
Jens Axboefa846932021-09-01 14:15:59 -060010654err:
Jens Axboe41d3a6b2021-09-13 13:08:51 -060010655 if (sqd) {
Jens Axboefa846932021-09-01 14:15:59 -060010656 mutex_unlock(&sqd->lock);
Jens Axboe41d3a6b2021-09-13 13:08:51 -060010657 io_put_sq_data(sqd);
10658 }
Jens Axboefa846932021-09-01 14:15:59 -060010659 return ret;
Jens Axboe2e480052021-08-27 11:33:19 -060010660}
10661
Jens Axboe071698e2020-01-28 10:04:42 -070010662static bool io_register_op_must_quiesce(int op)
10663{
10664 switch (op) {
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +010010665 case IORING_REGISTER_BUFFERS:
10666 case IORING_UNREGISTER_BUFFERS:
Pavel Begunkovf4f7d212021-04-01 15:44:02 +010010667 case IORING_REGISTER_FILES:
Jens Axboe071698e2020-01-28 10:04:42 -070010668 case IORING_UNREGISTER_FILES:
10669 case IORING_REGISTER_FILES_UPDATE:
10670 case IORING_REGISTER_PROBE:
10671 case IORING_REGISTER_PERSONALITY:
10672 case IORING_UNREGISTER_PERSONALITY:
Pavel Begunkov992da012021-06-10 16:37:37 +010010673 case IORING_REGISTER_FILES2:
10674 case IORING_REGISTER_FILES_UPDATE2:
10675 case IORING_REGISTER_BUFFERS2:
10676 case IORING_REGISTER_BUFFERS_UPDATE:
Jens Axboefe764212021-06-17 10:19:54 -060010677 case IORING_REGISTER_IOWQ_AFF:
10678 case IORING_UNREGISTER_IOWQ_AFF:
Jens Axboe2e480052021-08-27 11:33:19 -060010679 case IORING_REGISTER_IOWQ_MAX_WORKERS:
Jens Axboe071698e2020-01-28 10:04:42 -070010680 return false;
10681 default:
10682 return true;
10683 }
10684}
10685
Pavel Begunkove73c5c72021-08-09 13:04:12 +010010686static int io_ctx_quiesce(struct io_ring_ctx *ctx)
10687{
10688 long ret;
10689
10690 percpu_ref_kill(&ctx->refs);
10691
10692 /*
10693 * Drop uring mutex before waiting for references to exit. If another
10694 * thread is currently inside io_uring_enter() it might need to grab the
10695 * uring_lock to make progress. If we hold it here across the drain
10696 * wait, then we can deadlock. It's safe to drop the mutex here, since
10697 * no new references will come in after we've killed the percpu ref.
10698 */
10699 mutex_unlock(&ctx->uring_lock);
10700 do {
10701 ret = wait_for_completion_interruptible(&ctx->ref_comp);
10702 if (!ret)
10703 break;
10704 ret = io_run_task_work_sig();
10705 } while (ret >= 0);
10706 mutex_lock(&ctx->uring_lock);
10707
10708 if (ret)
10709 io_refs_resurrect(&ctx->refs, &ctx->ref_comp);
10710 return ret;
10711}
10712
Jens Axboeedafcce2019-01-09 09:16:05 -070010713static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
10714 void __user *arg, unsigned nr_args)
Jens Axboeb19062a2019-04-15 10:49:38 -060010715 __releases(ctx->uring_lock)
10716 __acquires(ctx->uring_lock)
Jens Axboeedafcce2019-01-09 09:16:05 -070010717{
10718 int ret;
10719
Jens Axboe35fa71a2019-04-22 10:23:23 -060010720 /*
10721 * We're inside the ring mutex, if the ref is already dying, then
10722 * someone else killed the ctx or is already going through
10723 * io_uring_register().
10724 */
10725 if (percpu_ref_is_dying(&ctx->refs))
10726 return -ENXIO;
10727
Pavel Begunkov75c40212021-04-15 13:07:40 +010010728 if (ctx->restricted) {
10729 if (opcode >= IORING_REGISTER_LAST)
10730 return -EINVAL;
10731 opcode = array_index_nospec(opcode, IORING_REGISTER_LAST);
10732 if (!test_bit(opcode, ctx->restrictions.register_op))
10733 return -EACCES;
10734 }
10735
Jens Axboe071698e2020-01-28 10:04:42 -070010736 if (io_register_op_must_quiesce(opcode)) {
Pavel Begunkove73c5c72021-08-09 13:04:12 +010010737 ret = io_ctx_quiesce(ctx);
10738 if (ret)
Pavel Begunkovf70865d2021-04-11 01:46:40 +010010739 return ret;
Jens Axboe05f3fb32019-12-09 11:22:50 -070010740 }
Jens Axboeedafcce2019-01-09 09:16:05 -070010741
10742 switch (opcode) {
10743 case IORING_REGISTER_BUFFERS:
Pavel Begunkov634d00d2021-04-25 14:32:26 +010010744 ret = io_sqe_buffers_register(ctx, arg, nr_args, NULL);
Jens Axboeedafcce2019-01-09 09:16:05 -070010745 break;
10746 case IORING_UNREGISTER_BUFFERS:
10747 ret = -EINVAL;
10748 if (arg || nr_args)
10749 break;
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -080010750 ret = io_sqe_buffers_unregister(ctx);
Jens Axboeedafcce2019-01-09 09:16:05 -070010751 break;
Jens Axboe6b063142019-01-10 22:13:58 -070010752 case IORING_REGISTER_FILES:
Pavel Begunkov792e3582021-04-25 14:32:21 +010010753 ret = io_sqe_files_register(ctx, arg, nr_args, NULL);
Jens Axboe6b063142019-01-10 22:13:58 -070010754 break;
10755 case IORING_UNREGISTER_FILES:
10756 ret = -EINVAL;
10757 if (arg || nr_args)
10758 break;
10759 ret = io_sqe_files_unregister(ctx);
10760 break;
Jens Axboec3a31e62019-10-03 13:59:56 -060010761 case IORING_REGISTER_FILES_UPDATE:
Pavel Begunkovc3bdad02021-04-25 14:32:22 +010010762 ret = io_register_files_update(ctx, arg, nr_args);
Jens Axboec3a31e62019-10-03 13:59:56 -060010763 break;
Jens Axboe9b402842019-04-11 11:45:41 -060010764 case IORING_REGISTER_EVENTFD:
Jens Axboef2842ab2020-01-08 11:04:00 -070010765 case IORING_REGISTER_EVENTFD_ASYNC:
Jens Axboe9b402842019-04-11 11:45:41 -060010766 ret = -EINVAL;
10767 if (nr_args != 1)
10768 break;
10769 ret = io_eventfd_register(ctx, arg);
Jens Axboef2842ab2020-01-08 11:04:00 -070010770 if (ret)
10771 break;
10772 if (opcode == IORING_REGISTER_EVENTFD_ASYNC)
10773 ctx->eventfd_async = 1;
10774 else
10775 ctx->eventfd_async = 0;
Jens Axboe9b402842019-04-11 11:45:41 -060010776 break;
10777 case IORING_UNREGISTER_EVENTFD:
10778 ret = -EINVAL;
10779 if (arg || nr_args)
10780 break;
10781 ret = io_eventfd_unregister(ctx);
10782 break;
Jens Axboe66f4af92020-01-16 15:36:52 -070010783 case IORING_REGISTER_PROBE:
10784 ret = -EINVAL;
10785 if (!arg || nr_args > 256)
10786 break;
10787 ret = io_probe(ctx, arg, nr_args);
10788 break;
Jens Axboe071698e2020-01-28 10:04:42 -070010789 case IORING_REGISTER_PERSONALITY:
10790 ret = -EINVAL;
10791 if (arg || nr_args)
10792 break;
10793 ret = io_register_personality(ctx);
10794 break;
10795 case IORING_UNREGISTER_PERSONALITY:
10796 ret = -EINVAL;
10797 if (arg)
10798 break;
10799 ret = io_unregister_personality(ctx, nr_args);
10800 break;
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +020010801 case IORING_REGISTER_ENABLE_RINGS:
10802 ret = -EINVAL;
10803 if (arg || nr_args)
10804 break;
10805 ret = io_register_enable_rings(ctx);
10806 break;
Stefano Garzarella21b55db2020-08-27 16:58:30 +020010807 case IORING_REGISTER_RESTRICTIONS:
10808 ret = io_register_restrictions(ctx, arg, nr_args);
10809 break;
Pavel Begunkov992da012021-06-10 16:37:37 +010010810 case IORING_REGISTER_FILES2:
10811 ret = io_register_rsrc(ctx, arg, nr_args, IORING_RSRC_FILE);
Pavel Begunkov792e3582021-04-25 14:32:21 +010010812 break;
Pavel Begunkov992da012021-06-10 16:37:37 +010010813 case IORING_REGISTER_FILES_UPDATE2:
10814 ret = io_register_rsrc_update(ctx, arg, nr_args,
10815 IORING_RSRC_FILE);
10816 break;
10817 case IORING_REGISTER_BUFFERS2:
10818 ret = io_register_rsrc(ctx, arg, nr_args, IORING_RSRC_BUFFER);
10819 break;
10820 case IORING_REGISTER_BUFFERS_UPDATE:
10821 ret = io_register_rsrc_update(ctx, arg, nr_args,
10822 IORING_RSRC_BUFFER);
Pavel Begunkovc3bdad02021-04-25 14:32:22 +010010823 break;
Jens Axboefe764212021-06-17 10:19:54 -060010824 case IORING_REGISTER_IOWQ_AFF:
10825 ret = -EINVAL;
10826 if (!arg || !nr_args)
10827 break;
10828 ret = io_register_iowq_aff(ctx, arg, nr_args);
10829 break;
10830 case IORING_UNREGISTER_IOWQ_AFF:
10831 ret = -EINVAL;
10832 if (arg || nr_args)
10833 break;
10834 ret = io_unregister_iowq_aff(ctx);
10835 break;
Jens Axboe2e480052021-08-27 11:33:19 -060010836 case IORING_REGISTER_IOWQ_MAX_WORKERS:
10837 ret = -EINVAL;
10838 if (!arg || nr_args != 2)
10839 break;
10840 ret = io_register_iowq_max_workers(ctx, arg);
10841 break;
Jens Axboeedafcce2019-01-09 09:16:05 -070010842 default:
10843 ret = -EINVAL;
10844 break;
10845 }
10846
Jens Axboe071698e2020-01-28 10:04:42 -070010847 if (io_register_op_must_quiesce(opcode)) {
Jens Axboe05f3fb32019-12-09 11:22:50 -070010848 /* bring the ctx back to life */
Jens Axboe05f3fb32019-12-09 11:22:50 -070010849 percpu_ref_reinit(&ctx->refs);
Jens Axboe0f158b42020-05-14 17:18:39 -060010850 reinit_completion(&ctx->ref_comp);
Jens Axboe05f3fb32019-12-09 11:22:50 -070010851 }
Jens Axboeedafcce2019-01-09 09:16:05 -070010852 return ret;
10853}
10854
10855SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode,
10856 void __user *, arg, unsigned int, nr_args)
10857{
10858 struct io_ring_ctx *ctx;
10859 long ret = -EBADF;
10860 struct fd f;
10861
10862 f = fdget(fd);
10863 if (!f.file)
10864 return -EBADF;
10865
10866 ret = -EOPNOTSUPP;
10867 if (f.file->f_op != &io_uring_fops)
10868 goto out_fput;
10869
10870 ctx = f.file->private_data;
10871
Pavel Begunkovb6c23dd2021-02-20 15:17:18 +000010872 io_run_task_work();
10873
Jens Axboeedafcce2019-01-09 09:16:05 -070010874 mutex_lock(&ctx->uring_lock);
10875 ret = __io_uring_register(ctx, opcode, arg, nr_args);
10876 mutex_unlock(&ctx->uring_lock);
Dmitrii Dolgovc826bd72019-10-15 19:02:01 +020010877 trace_io_uring_register(ctx, opcode, ctx->nr_user_files, ctx->nr_user_bufs,
10878 ctx->cq_ev_fd != NULL, ret);
Jens Axboeedafcce2019-01-09 09:16:05 -070010879out_fput:
10880 fdput(f);
10881 return ret;
10882}
10883
Jens Axboe2b188cc2019-01-07 10:46:33 -070010884static int __init io_uring_init(void)
10885{
Stefan Metzmacherd7f62e82020-01-29 14:39:41 +010010886#define __BUILD_BUG_VERIFY_ELEMENT(stype, eoffset, etype, ename) do { \
10887 BUILD_BUG_ON(offsetof(stype, ename) != eoffset); \
10888 BUILD_BUG_ON(sizeof(etype) != sizeof_field(stype, ename)); \
10889} while (0)
10890
10891#define BUILD_BUG_SQE_ELEM(eoffset, etype, ename) \
10892 __BUILD_BUG_VERIFY_ELEMENT(struct io_uring_sqe, eoffset, etype, ename)
10893 BUILD_BUG_ON(sizeof(struct io_uring_sqe) != 64);
10894 BUILD_BUG_SQE_ELEM(0, __u8, opcode);
10895 BUILD_BUG_SQE_ELEM(1, __u8, flags);
10896 BUILD_BUG_SQE_ELEM(2, __u16, ioprio);
10897 BUILD_BUG_SQE_ELEM(4, __s32, fd);
10898 BUILD_BUG_SQE_ELEM(8, __u64, off);
10899 BUILD_BUG_SQE_ELEM(8, __u64, addr2);
10900 BUILD_BUG_SQE_ELEM(16, __u64, addr);
Pavel Begunkov7d67af22020-02-24 11:32:45 +030010901 BUILD_BUG_SQE_ELEM(16, __u64, splice_off_in);
Stefan Metzmacherd7f62e82020-01-29 14:39:41 +010010902 BUILD_BUG_SQE_ELEM(24, __u32, len);
10903 BUILD_BUG_SQE_ELEM(28, __kernel_rwf_t, rw_flags);
10904 BUILD_BUG_SQE_ELEM(28, /* compat */ int, rw_flags);
10905 BUILD_BUG_SQE_ELEM(28, /* compat */ __u32, rw_flags);
10906 BUILD_BUG_SQE_ELEM(28, __u32, fsync_flags);
Jiufei Xue5769a352020-06-17 17:53:55 +080010907 BUILD_BUG_SQE_ELEM(28, /* compat */ __u16, poll_events);
10908 BUILD_BUG_SQE_ELEM(28, __u32, poll32_events);
Stefan Metzmacherd7f62e82020-01-29 14:39:41 +010010909 BUILD_BUG_SQE_ELEM(28, __u32, sync_range_flags);
10910 BUILD_BUG_SQE_ELEM(28, __u32, msg_flags);
10911 BUILD_BUG_SQE_ELEM(28, __u32, timeout_flags);
10912 BUILD_BUG_SQE_ELEM(28, __u32, accept_flags);
10913 BUILD_BUG_SQE_ELEM(28, __u32, cancel_flags);
10914 BUILD_BUG_SQE_ELEM(28, __u32, open_flags);
10915 BUILD_BUG_SQE_ELEM(28, __u32, statx_flags);
10916 BUILD_BUG_SQE_ELEM(28, __u32, fadvise_advice);
Pavel Begunkov7d67af22020-02-24 11:32:45 +030010917 BUILD_BUG_SQE_ELEM(28, __u32, splice_flags);
Stefan Metzmacherd7f62e82020-01-29 14:39:41 +010010918 BUILD_BUG_SQE_ELEM(32, __u64, user_data);
10919 BUILD_BUG_SQE_ELEM(40, __u16, buf_index);
Pavel Begunkov16340ea2021-06-24 15:09:58 +010010920 BUILD_BUG_SQE_ELEM(40, __u16, buf_group);
Stefan Metzmacherd7f62e82020-01-29 14:39:41 +010010921 BUILD_BUG_SQE_ELEM(42, __u16, personality);
Pavel Begunkov7d67af22020-02-24 11:32:45 +030010922 BUILD_BUG_SQE_ELEM(44, __s32, splice_fd_in);
Pavel Begunkovb9445592021-08-25 12:25:45 +010010923 BUILD_BUG_SQE_ELEM(44, __u32, file_index);
Stefan Metzmacherd7f62e82020-01-29 14:39:41 +010010924
Pavel Begunkovb0d658ec2021-04-27 16:13:53 +010010925 BUILD_BUG_ON(sizeof(struct io_uring_files_update) !=
10926 sizeof(struct io_uring_rsrc_update));
10927 BUILD_BUG_ON(sizeof(struct io_uring_rsrc_update) >
10928 sizeof(struct io_uring_rsrc_update2));
Pavel Begunkov90499ad2021-08-25 20:51:40 +010010929
10930 /* ->buf_index is u16 */
10931 BUILD_BUG_ON(IORING_MAX_REG_BUFFERS >= (1u << 16));
10932
Pavel Begunkovb0d658ec2021-04-27 16:13:53 +010010933 /* should fit into one byte */
10934 BUILD_BUG_ON(SQE_VALID_FLAGS >= (1 << 8));
10935
Jens Axboed3656342019-12-18 09:50:26 -070010936 BUILD_BUG_ON(ARRAY_SIZE(io_op_defs) != IORING_OP_LAST);
Hao Xu32c2d332021-09-07 11:22:43 +080010937 BUILD_BUG_ON(__REQ_F_LAST_BIT > 8 * sizeof(int));
Pavel Begunkov16340ea2021-06-24 15:09:58 +010010938
Jens Axboe91f245d2021-02-09 13:48:50 -070010939 req_cachep = KMEM_CACHE(io_kiocb, SLAB_HWCACHE_ALIGN | SLAB_PANIC |
10940 SLAB_ACCOUNT);
Jens Axboe2b188cc2019-01-07 10:46:33 -070010941 return 0;
10942};
10943__initcall(io_uring_init);