blob: 2eb33d0fa55043790d0ed26e1db07cc316ffb2e0 [file] [log] [blame]
Jens Axboe2b188cc2019-01-07 10:46:33 -07001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Shared application/kernel submission and completion ring pairs, for
4 * supporting fast/efficient IO.
5 *
6 * A note on the read/write ordering memory barriers that are matched between
Stefan Bühler1e84b972019-04-24 23:54:16 +02007 * the application and kernel side.
8 *
9 * After the application reads the CQ ring tail, it must use an
10 * appropriate smp_rmb() to pair with the smp_wmb() the kernel uses
11 * before writing the tail (using smp_load_acquire to read the tail will
12 * do). It also needs a smp_mb() before updating CQ head (ordering the
13 * entry load(s) with the head store), pairing with an implicit barrier
Pavel Begunkovd068b502021-05-16 22:58:11 +010014 * through a control-dependency in io_get_cqe (smp_store_release to
Stefan Bühler1e84b972019-04-24 23:54:16 +020015 * store head will do). Failure to do so could lead to reading invalid
16 * CQ entries.
17 *
18 * Likewise, the application must use an appropriate smp_wmb() before
19 * writing the SQ tail (ordering SQ entry stores with the tail store),
20 * which pairs with smp_load_acquire in io_get_sqring (smp_store_release
21 * to store the tail will do). And it needs a barrier ordering the SQ
22 * head load before writing new SQ entries (smp_load_acquire to read
23 * head will do).
24 *
25 * When using the SQ poll thread (IORING_SETUP_SQPOLL), the application
26 * needs to check the SQ flags for IORING_SQ_NEED_WAKEUP *after*
27 * updating the SQ tail; a full memory barrier smp_mb() is needed
28 * between.
Jens Axboe2b188cc2019-01-07 10:46:33 -070029 *
30 * Also see the examples in the liburing library:
31 *
32 * git://git.kernel.dk/liburing
33 *
34 * io_uring also uses READ/WRITE_ONCE() for _any_ store or load that happens
35 * from data shared between the kernel and application. This is done both
36 * for ordering purposes, but also to ensure that once a value is loaded from
37 * data that the application could potentially modify, it remains stable.
38 *
39 * Copyright (C) 2018-2019 Jens Axboe
Christoph Hellwigc992fe22019-01-11 09:43:02 -070040 * Copyright (c) 2018-2019 Christoph Hellwig
Jens Axboe2b188cc2019-01-07 10:46:33 -070041 */
42#include <linux/kernel.h>
43#include <linux/init.h>
44#include <linux/errno.h>
45#include <linux/syscalls.h>
46#include <linux/compat.h>
Jens Axboe52de1fe2020-02-27 10:15:42 -070047#include <net/compat.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070048#include <linux/refcount.h>
49#include <linux/uio.h>
Pavel Begunkov6b47ee62020-01-18 20:22:41 +030050#include <linux/bits.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070051
52#include <linux/sched/signal.h>
53#include <linux/fs.h>
54#include <linux/file.h>
55#include <linux/fdtable.h>
56#include <linux/mm.h>
57#include <linux/mman.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070058#include <linux/percpu.h>
59#include <linux/slab.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070060#include <linux/blkdev.h>
Jens Axboeedafcce2019-01-09 09:16:05 -070061#include <linux/bvec.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070062#include <linux/net.h>
63#include <net/sock.h>
64#include <net/af_unix.h>
Jens Axboe6b063142019-01-10 22:13:58 -070065#include <net/scm.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070066#include <linux/anon_inodes.h>
67#include <linux/sched/mm.h>
68#include <linux/uaccess.h>
69#include <linux/nospec.h>
Jens Axboeedafcce2019-01-09 09:16:05 -070070#include <linux/sizes.h>
71#include <linux/hugetlb.h>
Jens Axboeaa4c3962019-11-29 10:14:00 -070072#include <linux/highmem.h>
Jens Axboe15b71ab2019-12-11 11:20:36 -070073#include <linux/namei.h>
74#include <linux/fsnotify.h>
Jens Axboe4840e412019-12-25 22:03:45 -070075#include <linux/fadvise.h>
Jens Axboe3e4827b2020-01-08 15:18:09 -070076#include <linux/eventpoll.h>
Pavel Begunkov7d67af22020-02-24 11:32:45 +030077#include <linux/splice.h>
Jens Axboeb41e9852020-02-17 09:52:41 -070078#include <linux/task_work.h>
Jens Axboebcf5a062020-05-22 09:24:42 -060079#include <linux/pagemap.h>
Jens Axboe0f212202020-09-13 13:09:39 -060080#include <linux/io_uring.h>
Nadav Amitef98eb02021-08-07 17:13:41 -070081#include <linux/tracehook.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070082
Dmitrii Dolgovc826bd72019-10-15 19:02:01 +020083#define CREATE_TRACE_POINTS
84#include <trace/events/io_uring.h>
85
Jens Axboe2b188cc2019-01-07 10:46:33 -070086#include <uapi/linux/io_uring.h>
87
88#include "internal.h"
Jens Axboe561fb042019-10-24 07:25:42 -060089#include "io-wq.h"
Jens Axboe2b188cc2019-01-07 10:46:33 -070090
Daniel Xu5277dea2019-09-14 14:23:45 -070091#define IORING_MAX_ENTRIES 32768
Jens Axboe33a107f2019-10-04 12:10:03 -060092#define IORING_MAX_CQ_ENTRIES (2 * IORING_MAX_ENTRIES)
Olivier Langlois4ce8ad92021-06-23 11:50:18 -070093#define IORING_SQPOLL_CAP_ENTRIES_VALUE 8
Jens Axboe65e19f52019-10-26 07:20:21 -060094
wangyangbo187f08c2021-08-19 13:56:57 +080095/* only define max */
Pavel Begunkov042b0d82021-08-09 13:04:01 +010096#define IORING_MAX_FIXED_FILES (1U << 15)
Stefano Garzarella21b55db2020-08-27 16:58:30 +020097#define IORING_MAX_RESTRICTIONS (IORING_RESTRICTION_LAST + \
98 IORING_REGISTER_LAST + IORING_OP_LAST)
Jens Axboe2b188cc2019-01-07 10:46:33 -070099
wangyangbo187f08c2021-08-19 13:56:57 +0800100#define IO_RSRC_TAG_TABLE_SHIFT (PAGE_SHIFT - 3)
Pavel Begunkov2d091d62021-06-14 02:36:21 +0100101#define IO_RSRC_TAG_TABLE_MAX (1U << IO_RSRC_TAG_TABLE_SHIFT)
102#define IO_RSRC_TAG_TABLE_MASK (IO_RSRC_TAG_TABLE_MAX - 1)
103
Pavel Begunkov489809e2021-05-14 12:06:44 +0100104#define IORING_MAX_REG_BUFFERS (1U << 14)
105
Pavel Begunkovb16fed662021-02-18 18:29:40 +0000106#define SQE_VALID_FLAGS (IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK| \
107 IOSQE_IO_HARDLINK | IOSQE_ASYNC | \
108 IOSQE_BUFFER_SELECT)
Pavel Begunkovc8543572021-06-17 18:14:04 +0100109#define IO_REQ_CLEAN_FLAGS (REQ_F_BUFFER_SELECTED | REQ_F_NEED_CLEANUP | \
110 REQ_F_POLLED | REQ_F_INFLIGHT | REQ_F_CREDS)
Pavel Begunkovb16fed662021-02-18 18:29:40 +0000111
Pavel Begunkov09899b12021-06-14 02:36:22 +0100112#define IO_TCTX_REFS_CACHE_NR (1U << 10)
113
Jens Axboe2b188cc2019-01-07 10:46:33 -0700114struct io_uring {
115 u32 head ____cacheline_aligned_in_smp;
116 u32 tail ____cacheline_aligned_in_smp;
117};
118
Stefan Bühler1e84b972019-04-24 23:54:16 +0200119/*
Hristo Venev75b28af2019-08-26 17:23:46 +0000120 * This data is shared with the application through the mmap at offsets
121 * IORING_OFF_SQ_RING and IORING_OFF_CQ_RING.
Stefan Bühler1e84b972019-04-24 23:54:16 +0200122 *
123 * The offsets to the member fields are published through struct
124 * io_sqring_offsets when calling io_uring_setup.
125 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000126struct io_rings {
Stefan Bühler1e84b972019-04-24 23:54:16 +0200127 /*
128 * Head and tail offsets into the ring; the offsets need to be
129 * masked to get valid indices.
130 *
Hristo Venev75b28af2019-08-26 17:23:46 +0000131 * The kernel controls head of the sq ring and the tail of the cq ring,
132 * and the application controls tail of the sq ring and the head of the
133 * cq ring.
Stefan Bühler1e84b972019-04-24 23:54:16 +0200134 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000135 struct io_uring sq, cq;
Stefan Bühler1e84b972019-04-24 23:54:16 +0200136 /*
Hristo Venev75b28af2019-08-26 17:23:46 +0000137 * Bitmasks to apply to head and tail offsets (constant, equals
Stefan Bühler1e84b972019-04-24 23:54:16 +0200138 * ring_entries - 1)
139 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000140 u32 sq_ring_mask, cq_ring_mask;
141 /* Ring sizes (constant, power of 2) */
142 u32 sq_ring_entries, cq_ring_entries;
Stefan Bühler1e84b972019-04-24 23:54:16 +0200143 /*
144 * Number of invalid entries dropped by the kernel due to
145 * invalid index stored in array
146 *
147 * Written by the kernel, shouldn't be modified by the
148 * application (i.e. get number of "new events" by comparing to
149 * cached value).
150 *
151 * After a new SQ head value was read by the application this
152 * counter includes all submissions that were dropped reaching
153 * the new SQ head (and possibly more).
154 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000155 u32 sq_dropped;
Stefan Bühler1e84b972019-04-24 23:54:16 +0200156 /*
Stefano Garzarella0d9b5b32020-05-15 18:38:04 +0200157 * Runtime SQ flags
Stefan Bühler1e84b972019-04-24 23:54:16 +0200158 *
159 * Written by the kernel, shouldn't be modified by the
160 * application.
161 *
162 * The application needs a full memory barrier before checking
163 * for IORING_SQ_NEED_WAKEUP after updating the sq tail.
164 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000165 u32 sq_flags;
Stefan Bühler1e84b972019-04-24 23:54:16 +0200166 /*
Stefano Garzarella0d9b5b32020-05-15 18:38:04 +0200167 * Runtime CQ flags
168 *
169 * Written by the application, shouldn't be modified by the
170 * kernel.
171 */
Pavel Begunkovfe7e3252021-06-24 15:09:57 +0100172 u32 cq_flags;
Stefano Garzarella0d9b5b32020-05-15 18:38:04 +0200173 /*
Stefan Bühler1e84b972019-04-24 23:54:16 +0200174 * Number of completion events lost because the queue was full;
175 * this should be avoided by the application by making sure
LimingWu0b4295b2019-12-05 20:18:18 +0800176 * there are not more requests pending than there is space in
Stefan Bühler1e84b972019-04-24 23:54:16 +0200177 * the completion queue.
178 *
179 * Written by the kernel, shouldn't be modified by the
180 * application (i.e. get number of "new events" by comparing to
181 * cached value).
182 *
183 * As completion events come in out of order this counter is not
184 * ordered with any other data.
185 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000186 u32 cq_overflow;
Stefan Bühler1e84b972019-04-24 23:54:16 +0200187 /*
188 * Ring buffer of completion events.
189 *
190 * The kernel writes completion events fresh every time they are
191 * produced, so the application is allowed to modify pending
192 * entries.
193 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000194 struct io_uring_cqe cqes[] ____cacheline_aligned_in_smp;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700195};
196
Pavel Begunkov45d189c2021-02-10 00:03:07 +0000197enum io_uring_cmd_flags {
198 IO_URING_F_NONBLOCK = 1,
Pavel Begunkov889fca72021-02-10 00:03:09 +0000199 IO_URING_F_COMPLETE_DEFER = 2,
Pavel Begunkov45d189c2021-02-10 00:03:07 +0000200};
201
Jens Axboeedafcce2019-01-09 09:16:05 -0700202struct io_mapped_ubuf {
203 u64 ubuf;
Pavel Begunkov4751f532021-04-01 15:43:55 +0100204 u64 ubuf_end;
Jens Axboeedafcce2019-01-09 09:16:05 -0700205 unsigned int nr_bvecs;
Jens Axboede293932020-09-17 16:19:16 -0600206 unsigned long acct_pages;
Pavel Begunkov41edf1a2021-04-25 14:32:23 +0100207 struct bio_vec bvec[];
Jens Axboeedafcce2019-01-09 09:16:05 -0700208};
209
Bijan Mottahedeh50238532021-01-15 17:37:45 +0000210struct io_ring_ctx;
211
Pavel Begunkov6c2450a2021-02-23 12:40:22 +0000212struct io_overflow_cqe {
213 struct io_uring_cqe cqe;
214 struct list_head list;
215};
216
Pavel Begunkova04b0ac2021-04-01 15:44:04 +0100217struct io_fixed_file {
218 /* file * with additional FFS_* flags */
219 unsigned long file_ptr;
220};
221
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +0000222struct io_rsrc_put {
223 struct list_head list;
Pavel Begunkovb60c8dc2021-04-25 14:32:18 +0100224 u64 tag;
Bijan Mottahedeh50238532021-01-15 17:37:45 +0000225 union {
226 void *rsrc;
227 struct file *file;
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +0100228 struct io_mapped_ubuf *buf;
Bijan Mottahedeh50238532021-01-15 17:37:45 +0000229 };
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +0000230};
231
Pavel Begunkovaeca2412021-04-11 01:46:37 +0100232struct io_file_table {
Pavel Begunkov042b0d82021-08-09 13:04:01 +0100233 struct io_fixed_file *files;
Jens Axboe31b51512019-01-18 22:56:34 -0700234};
235
Pavel Begunkovb895c9a2021-04-01 15:43:40 +0100236struct io_rsrc_node {
Xiaoguang Wang05589552020-03-31 14:05:18 +0800237 struct percpu_ref refs;
238 struct list_head node;
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +0000239 struct list_head rsrc_list;
Pavel Begunkovb895c9a2021-04-01 15:43:40 +0100240 struct io_rsrc_data *rsrc_data;
Jens Axboe4a38aed22020-05-14 17:21:15 -0600241 struct llist_node llist;
Pavel Begunkove2978222020-11-18 14:56:26 +0000242 bool done;
Xiaoguang Wang05589552020-03-31 14:05:18 +0800243};
244
Pavel Begunkov40ae0ff2021-04-01 15:43:44 +0100245typedef void (rsrc_put_fn)(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc);
246
Pavel Begunkovb895c9a2021-04-01 15:43:40 +0100247struct io_rsrc_data {
Jens Axboe05f3fb32019-12-09 11:22:50 -0700248 struct io_ring_ctx *ctx;
249
Pavel Begunkov2d091d62021-06-14 02:36:21 +0100250 u64 **tags;
251 unsigned int nr;
Pavel Begunkov40ae0ff2021-04-01 15:43:44 +0100252 rsrc_put_fn *do_put;
Pavel Begunkov3e942492021-04-11 01:46:34 +0100253 atomic_t refs;
Jens Axboe05f3fb32019-12-09 11:22:50 -0700254 struct completion done;
Hao Xu8bad28d2021-02-19 17:19:36 +0800255 bool quiesce;
Jens Axboe05f3fb32019-12-09 11:22:50 -0700256};
257
Jens Axboe5a2e7452020-02-23 16:23:11 -0700258struct io_buffer {
259 struct list_head list;
260 __u64 addr;
Thadeu Lima de Souza Cascardod1f82802021-05-05 09:47:06 -0300261 __u32 len;
Jens Axboe5a2e7452020-02-23 16:23:11 -0700262 __u16 bid;
263};
264
Stefano Garzarella21b55db2020-08-27 16:58:30 +0200265struct io_restriction {
266 DECLARE_BITMAP(register_op, IORING_REGISTER_LAST);
267 DECLARE_BITMAP(sqe_op, IORING_OP_LAST);
268 u8 sqe_flags_allowed;
269 u8 sqe_flags_required;
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +0200270 bool registered;
Stefano Garzarella21b55db2020-08-27 16:58:30 +0200271};
272
Jens Axboe37d1e2e2021-02-17 21:03:43 -0700273enum {
274 IO_SQ_THREAD_SHOULD_STOP = 0,
275 IO_SQ_THREAD_SHOULD_PARK,
276};
277
Jens Axboe534ca6d2020-09-02 13:52:19 -0600278struct io_sq_data {
279 refcount_t refs;
Pavel Begunkov9e138a42021-03-14 20:57:12 +0000280 atomic_t park_pending;
Pavel Begunkov09a6f4e2021-03-14 20:57:10 +0000281 struct mutex lock;
Jens Axboe69fb2132020-09-14 11:16:23 -0600282
283 /* ctx's that are using this sqd */
284 struct list_head ctx_list;
Jens Axboe69fb2132020-09-14 11:16:23 -0600285
Jens Axboe534ca6d2020-09-02 13:52:19 -0600286 struct task_struct *thread;
287 struct wait_queue_head wait;
Xiaoguang Wang08369242020-11-03 14:15:59 +0800288
289 unsigned sq_thread_idle;
Jens Axboe37d1e2e2021-02-17 21:03:43 -0700290 int sq_cpu;
291 pid_t task_pid;
Jens Axboe5c2469e2021-03-11 10:17:56 -0700292 pid_t task_tgid;
Jens Axboe37d1e2e2021-02-17 21:03:43 -0700293
294 unsigned long state;
Jens Axboe37d1e2e2021-02-17 21:03:43 -0700295 struct completion exited;
Jens Axboe534ca6d2020-09-02 13:52:19 -0600296};
297
Pavel Begunkov6dd0be12021-02-10 00:03:13 +0000298#define IO_COMPL_BATCH 32
Pavel Begunkov6ff119a2021-02-10 00:03:18 +0000299#define IO_REQ_CACHE_SIZE 32
Pavel Begunkovbf019da2021-02-10 00:03:17 +0000300#define IO_REQ_ALLOC_BATCH 8
Pavel Begunkov258b29a2021-02-10 00:03:10 +0000301
Pavel Begunkova1ab7b32021-02-18 18:29:42 +0000302struct io_submit_link {
303 struct io_kiocb *head;
304 struct io_kiocb *last;
305};
306
Pavel Begunkov258b29a2021-02-10 00:03:10 +0000307struct io_submit_state {
308 struct blk_plug plug;
Pavel Begunkova1ab7b32021-02-18 18:29:42 +0000309 struct io_submit_link link;
Pavel Begunkov258b29a2021-02-10 00:03:10 +0000310
311 /*
312 * io_kiocb alloc cache
313 */
Pavel Begunkovbf019da2021-02-10 00:03:17 +0000314 void *reqs[IO_REQ_CACHE_SIZE];
Pavel Begunkov258b29a2021-02-10 00:03:10 +0000315 unsigned int free_reqs;
316
317 bool plug_started;
318
319 /*
320 * Batch completion logic
321 */
Pavel Begunkovcd0ca2e2021-08-09 20:18:11 +0100322 struct io_kiocb *compl_reqs[IO_COMPL_BATCH];
323 unsigned int compl_nr;
324 /* inline/task_work completion list, under ->uring_lock */
325 struct list_head free_list;
Pavel Begunkov258b29a2021-02-10 00:03:10 +0000326
Pavel Begunkov258b29a2021-02-10 00:03:10 +0000327 unsigned int ios_left;
328};
329
Jens Axboe2b188cc2019-01-07 10:46:33 -0700330struct io_ring_ctx {
Pavel Begunkovb52ecf82021-06-14 23:37:21 +0100331 /* const or read-mostly hot data */
Jens Axboe2b188cc2019-01-07 10:46:33 -0700332 struct {
333 struct percpu_ref refs;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700334
Pavel Begunkovb52ecf82021-06-14 23:37:21 +0100335 struct io_rings *rings;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700336 unsigned int flags;
Randy Dunlape1d85332020-02-05 20:57:10 -0800337 unsigned int compat: 1;
Randy Dunlape1d85332020-02-05 20:57:10 -0800338 unsigned int drain_next: 1;
339 unsigned int eventfd_async: 1;
Stefano Garzarella21b55db2020-08-27 16:58:30 +0200340 unsigned int restricted: 1;
Pavel Begunkovf18ee4c2021-06-14 23:37:25 +0100341 unsigned int off_timeout_used: 1;
Pavel Begunkov10c66902021-06-15 16:47:56 +0100342 unsigned int drain_active: 1;
Pavel Begunkovb52ecf82021-06-14 23:37:21 +0100343 } ____cacheline_aligned_in_smp;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700344
Pavel Begunkov7f1129d2021-06-14 23:37:22 +0100345 /* submission data */
Pavel Begunkovb52ecf82021-06-14 23:37:21 +0100346 struct {
Pavel Begunkov0499e582021-06-14 23:37:29 +0100347 struct mutex uring_lock;
348
Hristo Venev75b28af2019-08-26 17:23:46 +0000349 /*
350 * Ring buffer of indices into array of io_uring_sqe, which is
351 * mmapped by the application using the IORING_OFF_SQES offset.
352 *
353 * This indirection could e.g. be used to assign fixed
354 * io_uring_sqe entries to operations and only submit them to
355 * the queue when needed.
356 *
357 * The kernel modifies neither the indices array nor the entries
358 * array.
359 */
360 u32 *sq_array;
Pavel Begunkovc7af47c2021-06-14 23:37:20 +0100361 struct io_uring_sqe *sq_sqes;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700362 unsigned cached_sq_head;
363 unsigned sq_entries;
Jens Axboede0617e2019-04-06 21:51:27 -0600364 struct list_head defer_list;
Pavel Begunkov7f1129d2021-06-14 23:37:22 +0100365
366 /*
367 * Fixed resources fast path, should be accessed only under
368 * uring_lock, and updated through io_uring_register(2)
369 */
370 struct io_rsrc_node *rsrc_node;
371 struct io_file_table file_table;
372 unsigned nr_user_files;
373 unsigned nr_user_bufs;
374 struct io_mapped_ubuf **user_bufs;
375
376 struct io_submit_state submit_state;
Jens Axboe5262f562019-09-17 12:26:57 -0600377 struct list_head timeout_list;
Pavel Begunkovef9dd632021-08-28 19:54:38 -0600378 struct list_head ltimeout_list;
Jens Axboe1d7bb1d2019-11-06 11:31:17 -0700379 struct list_head cq_overflow_list;
Pavel Begunkov7f1129d2021-06-14 23:37:22 +0100380 struct xarray io_buffers;
381 struct xarray personalities;
382 u32 pers_next;
383 unsigned sq_thread_idle;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700384 } ____cacheline_aligned_in_smp;
385
Pavel Begunkovd0acdee2021-05-16 22:58:12 +0100386 /* IRQ completion list, under ->completion_lock */
387 struct list_head locked_free_list;
388 unsigned int locked_free_nr;
Jens Axboe3c1a2ea2021-02-11 10:48:03 -0700389
Stefan Metzmacher7c30f36a2021-03-07 11:54:28 +0100390 const struct cred *sq_creds; /* cred used for __io_sq_thread() */
Jens Axboe534ca6d2020-09-02 13:52:19 -0600391 struct io_sq_data *sq_data; /* if using sq thread polling */
392
Jens Axboe90554202020-09-03 12:12:41 -0600393 struct wait_queue_head sqo_sq_wait;
Jens Axboe69fb2132020-09-14 11:16:23 -0600394 struct list_head sqd_list;
Hristo Venev75b28af2019-08-26 17:23:46 +0000395
Pavel Begunkov5ed7a372021-06-14 23:37:27 +0100396 unsigned long check_cq_overflow;
397
Jens Axboe206aefd2019-11-07 18:27:42 -0700398 struct {
399 unsigned cached_cq_tail;
400 unsigned cq_entries;
Jens Axboe206aefd2019-11-07 18:27:42 -0700401 struct eventfd_ctx *cq_ev_fd;
Pavel Begunkov0499e582021-06-14 23:37:29 +0100402 struct wait_queue_head poll_wait;
403 struct wait_queue_head cq_wait;
404 unsigned cq_extra;
405 atomic_t cq_timeouts;
Pavel Begunkov0499e582021-06-14 23:37:29 +0100406 unsigned cq_last_tm_flush;
Jens Axboe206aefd2019-11-07 18:27:42 -0700407 } ____cacheline_aligned_in_smp;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700408
409 struct {
Jens Axboe2b188cc2019-01-07 10:46:33 -0700410 spinlock_t completion_lock;
Jens Axboee94f1412019-12-19 12:06:02 -0700411
Jens Axboe89850fc2021-08-10 15:11:51 -0600412 spinlock_t timeout_lock;
413
Jens Axboedef596e2019-01-09 08:59:42 -0700414 /*
Pavel Begunkov540e32a2020-07-13 23:37:09 +0300415 * ->iopoll_list is protected by the ctx->uring_lock for
Jens Axboedef596e2019-01-09 08:59:42 -0700416 * io_uring instances that don't use IORING_SETUP_SQPOLL.
417 * For SQPOLL, only the single threaded io_sq_thread() will
418 * manipulate the list, hence no extra locking is needed there.
419 */
Pavel Begunkov540e32a2020-07-13 23:37:09 +0300420 struct list_head iopoll_list;
Jens Axboe78076bb2019-12-04 19:56:40 -0700421 struct hlist_head *cancel_hash;
422 unsigned cancel_hash_bits;
Hao Xu915b3dd2021-06-28 05:37:30 +0800423 bool poll_multi_queue;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700424 } ____cacheline_aligned_in_smp;
Jens Axboe85faa7b2020-04-09 18:14:00 -0600425
Stefano Garzarella21b55db2020-08-27 16:58:30 +0200426 struct io_restriction restrictions;
Jens Axboe3c1a2ea2021-02-11 10:48:03 -0700427
Pavel Begunkovb13a8912021-05-16 22:58:07 +0100428 /* slow path rsrc auxilary data, used by update/register */
429 struct {
430 struct io_rsrc_node *rsrc_backup_node;
431 struct io_mapped_ubuf *dummy_ubuf;
432 struct io_rsrc_data *file_data;
433 struct io_rsrc_data *buf_data;
434
435 struct delayed_work rsrc_put_work;
436 struct llist_head rsrc_put_llist;
437 struct list_head rsrc_ref_list;
438 spinlock_t rsrc_ref_lock;
439 };
440
Jens Axboe3c1a2ea2021-02-11 10:48:03 -0700441 /* Keep this last, we don't need it for the fast path */
Pavel Begunkovb986af72021-05-16 22:58:06 +0100442 struct {
443 #if defined(CONFIG_UNIX)
444 struct socket *ring_sock;
445 #endif
446 /* hashed buffered write serialization */
447 struct io_wq_hash *hash_map;
448
449 /* Only used for accounting purposes */
450 struct user_struct *user;
451 struct mm_struct *mm_account;
452
453 /* ctx exit and cancelation */
Pavel Begunkov9011bf92021-06-30 21:54:03 +0100454 struct llist_head fallback_llist;
455 struct delayed_work fallback_work;
Pavel Begunkovb986af72021-05-16 22:58:06 +0100456 struct work_struct exit_work;
457 struct list_head tctx_list;
458 struct completion ref_comp;
Pavel Begunkove139a1e2021-10-19 23:43:46 +0100459 u32 iowq_limits[2];
460 bool iowq_limits_set;
Pavel Begunkovb986af72021-05-16 22:58:06 +0100461 };
Jens Axboe2b188cc2019-01-07 10:46:33 -0700462};
463
Stefan Metzmacher53e043b2021-03-15 12:56:56 +0100464struct io_uring_task {
465 /* submission side */
Pavel Begunkov09899b12021-06-14 02:36:22 +0100466 int cached_refs;
Stefan Metzmacher53e043b2021-03-15 12:56:56 +0100467 struct xarray xa;
468 struct wait_queue_head wait;
Stefan Metzmacheree53fb22021-03-15 12:56:57 +0100469 const struct io_ring_ctx *last;
470 struct io_wq *io_wq;
Stefan Metzmacher53e043b2021-03-15 12:56:56 +0100471 struct percpu_counter inflight;
Pavel Begunkovb303fe22021-04-11 01:46:26 +0100472 atomic_t inflight_tracked;
Stefan Metzmacher53e043b2021-03-15 12:56:56 +0100473 atomic_t in_idle;
Stefan Metzmacher53e043b2021-03-15 12:56:56 +0100474
475 spinlock_t task_lock;
476 struct io_wq_work_list task_list;
Stefan Metzmacher53e043b2021-03-15 12:56:56 +0100477 struct callback_head task_work;
Pavel Begunkov6294f362021-08-10 17:53:55 +0100478 bool task_running;
Stefan Metzmacher53e043b2021-03-15 12:56:56 +0100479};
480
Jens Axboe09bb8392019-03-13 12:39:28 -0600481/*
482 * First field must be the file pointer in all the
483 * iocb unions! See also 'struct kiocb' in <linux/fs.h>
484 */
Jens Axboe221c5eb2019-01-17 09:41:58 -0700485struct io_poll_iocb {
486 struct file *file;
Pavel Begunkov018043b2020-10-27 23:17:18 +0000487 struct wait_queue_head *head;
Jens Axboe221c5eb2019-01-17 09:41:58 -0700488 __poll_t events;
Jens Axboe392edb42019-12-09 17:52:20 -0700489 struct wait_queue_entry wait;
Jens Axboe221c5eb2019-01-17 09:41:58 -0700490};
491
Pavel Begunkov9d805892021-04-13 02:58:40 +0100492struct io_poll_update {
Pavel Begunkov018043b2020-10-27 23:17:18 +0000493 struct file *file;
Pavel Begunkov9d805892021-04-13 02:58:40 +0100494 u64 old_user_data;
495 u64 new_user_data;
496 __poll_t events;
Jens Axboeb69de282021-03-17 08:37:41 -0600497 bool update_events;
498 bool update_user_data;
Pavel Begunkov018043b2020-10-27 23:17:18 +0000499};
500
Jens Axboeb5dba592019-12-11 14:02:38 -0700501struct io_close {
502 struct file *file;
Jens Axboeb5dba592019-12-11 14:02:38 -0700503 int fd;
Pavel Begunkov7df778b2021-09-24 20:04:29 +0100504 u32 file_slot;
Jens Axboeb5dba592019-12-11 14:02:38 -0700505};
506
Jens Axboead8a48a2019-11-15 08:49:11 -0700507struct io_timeout_data {
508 struct io_kiocb *req;
509 struct hrtimer timer;
510 struct timespec64 ts;
511 enum hrtimer_mode mode;
Jens Axboe50c1df22021-08-27 17:11:06 -0600512 u32 flags;
Jens Axboead8a48a2019-11-15 08:49:11 -0700513};
514
Jens Axboe8ed8d3c2019-12-16 11:55:28 -0700515struct io_accept {
516 struct file *file;
517 struct sockaddr __user *addr;
518 int __user *addr_len;
519 int flags;
Pavel Begunkovaaa4db12021-08-25 12:25:47 +0100520 u32 file_slot;
Jens Axboe09952e32020-03-19 20:16:56 -0600521 unsigned long nofile;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -0700522};
523
524struct io_sync {
525 struct file *file;
526 loff_t len;
527 loff_t off;
528 int flags;
Jens Axboed63d1b52019-12-10 10:38:56 -0700529 int mode;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -0700530};
531
Jens Axboefbf23842019-12-17 18:45:56 -0700532struct io_cancel {
533 struct file *file;
534 u64 addr;
535};
536
Jens Axboeb29472e2019-12-17 18:50:29 -0700537struct io_timeout {
538 struct file *file;
Pavel Begunkovbfe68a22020-05-30 14:54:18 +0300539 u32 off;
540 u32 target_seq;
Pavel Begunkov135fcde2020-07-13 23:37:12 +0300541 struct list_head list;
Pavel Begunkov90cd7e42020-10-27 23:25:36 +0000542 /* head of the link, used by linked timeouts only */
543 struct io_kiocb *head;
Jens Axboe89b263f2021-08-10 15:14:18 -0600544 /* for linked completions */
545 struct io_kiocb *prev;
Jens Axboeb29472e2019-12-17 18:50:29 -0700546};
547
Pavel Begunkov0bdf7a22020-10-10 18:34:10 +0100548struct io_timeout_rem {
549 struct file *file;
550 u64 addr;
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +0000551
552 /* timeout update */
553 struct timespec64 ts;
554 u32 flags;
Pavel Begunkovf1042b62021-08-28 19:54:39 -0600555 bool ltimeout;
Pavel Begunkov0bdf7a22020-10-10 18:34:10 +0100556};
557
Jens Axboe9adbd452019-12-20 08:45:55 -0700558struct io_rw {
559 /* NOTE: kiocb has the file as the first member, so don't do it here */
560 struct kiocb kiocb;
561 u64 addr;
562 u64 len;
563};
564
Jens Axboe3fbb51c2019-12-20 08:51:52 -0700565struct io_connect {
566 struct file *file;
567 struct sockaddr __user *addr;
568 int addr_len;
569};
570
Jens Axboee47293f2019-12-20 08:58:21 -0700571struct io_sr_msg {
572 struct file *file;
Jens Axboefddafac2020-01-04 20:19:44 -0700573 union {
Pavel Begunkov4af34172021-04-11 01:46:30 +0100574 struct compat_msghdr __user *umsg_compat;
575 struct user_msghdr __user *umsg;
576 void __user *buf;
Jens Axboefddafac2020-01-04 20:19:44 -0700577 };
Jens Axboee47293f2019-12-20 08:58:21 -0700578 int msg_flags;
Jens Axboebcda7ba2020-02-23 16:42:51 -0700579 int bgid;
Jens Axboefddafac2020-01-04 20:19:44 -0700580 size_t len;
Jens Axboebcda7ba2020-02-23 16:42:51 -0700581 struct io_buffer *kbuf;
Jens Axboee47293f2019-12-20 08:58:21 -0700582};
583
Jens Axboe15b71ab2019-12-11 11:20:36 -0700584struct io_open {
585 struct file *file;
586 int dfd;
Pavel Begunkovb9445592021-08-25 12:25:45 +0100587 u32 file_slot;
Jens Axboe15b71ab2019-12-11 11:20:36 -0700588 struct filename *filename;
Jens Axboec12cedf2020-01-08 17:41:21 -0700589 struct open_how how;
Jens Axboe4022e7a2020-03-19 19:23:18 -0600590 unsigned long nofile;
Jens Axboe15b71ab2019-12-11 11:20:36 -0700591};
592
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +0000593struct io_rsrc_update {
Jens Axboe05f3fb32019-12-09 11:22:50 -0700594 struct file *file;
595 u64 arg;
596 u32 nr_args;
597 u32 offset;
598};
599
Jens Axboe4840e412019-12-25 22:03:45 -0700600struct io_fadvise {
601 struct file *file;
602 u64 offset;
603 u32 len;
604 u32 advice;
605};
606
Jens Axboec1ca7572019-12-25 22:18:28 -0700607struct io_madvise {
608 struct file *file;
609 u64 addr;
610 u32 len;
611 u32 advice;
612};
613
Jens Axboe3e4827b2020-01-08 15:18:09 -0700614struct io_epoll {
615 struct file *file;
616 int epfd;
617 int op;
618 int fd;
619 struct epoll_event event;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700620};
621
Pavel Begunkov7d67af22020-02-24 11:32:45 +0300622struct io_splice {
623 struct file *file_out;
Pavel Begunkov7d67af22020-02-24 11:32:45 +0300624 loff_t off_out;
625 loff_t off_in;
626 u64 len;
Jens Axboeae6cba32022-03-29 10:59:20 -0600627 int splice_fd_in;
Pavel Begunkov7d67af22020-02-24 11:32:45 +0300628 unsigned int flags;
629};
630
Jens Axboeddf0322d2020-02-23 16:41:33 -0700631struct io_provide_buf {
632 struct file *file;
633 __u64 addr;
Pavel Begunkov38134ad2021-04-15 13:07:39 +0100634 __u32 len;
Jens Axboeddf0322d2020-02-23 16:41:33 -0700635 __u32 bgid;
636 __u16 nbufs;
637 __u16 bid;
638};
639
Bijan Mottahedeh1d9e1282020-05-22 21:31:16 -0700640struct io_statx {
641 struct file *file;
642 int dfd;
643 unsigned int mask;
644 unsigned int flags;
Bijan Mottahedehe62753e2020-05-22 21:31:18 -0700645 const char __user *filename;
Bijan Mottahedeh1d9e1282020-05-22 21:31:16 -0700646 struct statx __user *buffer;
647};
648
Jens Axboe36f4fa62020-09-05 11:14:22 -0600649struct io_shutdown {
650 struct file *file;
651 int how;
652};
653
Jens Axboe80a261f2020-09-28 14:23:58 -0600654struct io_rename {
655 struct file *file;
656 int old_dfd;
657 int new_dfd;
658 struct filename *oldpath;
659 struct filename *newpath;
660 int flags;
661};
662
Jens Axboe14a11432020-09-28 14:27:37 -0600663struct io_unlink {
664 struct file *file;
665 int dfd;
666 int flags;
667 struct filename *filename;
668};
669
Dmitry Kadasheve34a02d2021-07-08 13:34:45 +0700670struct io_mkdir {
671 struct file *file;
672 int dfd;
673 umode_t mode;
674 struct filename *filename;
675};
676
Dmitry Kadashev7a8721f2021-07-08 13:34:46 +0700677struct io_symlink {
678 struct file *file;
679 int new_dfd;
680 struct filename *oldpath;
681 struct filename *newpath;
682};
683
Dmitry Kadashevcf30da92021-07-08 13:34:47 +0700684struct io_hardlink {
685 struct file *file;
686 int old_dfd;
687 int new_dfd;
688 struct filename *oldpath;
689 struct filename *newpath;
690 int flags;
691};
692
Pavel Begunkov3ca405e2020-07-13 23:37:08 +0300693struct io_completion {
694 struct file *file;
Pavel Begunkov8c3f9cd2021-02-28 22:35:15 +0000695 u32 cflags;
Pavel Begunkov3ca405e2020-07-13 23:37:08 +0300696};
697
Jens Axboef499a022019-12-02 16:28:46 -0700698struct io_async_connect {
699 struct sockaddr_storage address;
700};
701
Jens Axboe03b12302019-12-02 18:50:25 -0700702struct io_async_msghdr {
703 struct iovec fast_iov[UIO_FASTIOV];
Pavel Begunkov257e84a2021-02-05 00:58:00 +0000704 /* points to an allocated iov, if NULL we use fast_iov instead */
705 struct iovec *free_iov;
Jens Axboe03b12302019-12-02 18:50:25 -0700706 struct sockaddr __user *uaddr;
707 struct msghdr msg;
Jens Axboeb5379162020-02-09 11:29:15 -0700708 struct sockaddr_storage addr;
Jens Axboe03b12302019-12-02 18:50:25 -0700709};
710
Jens Axboef67676d2019-12-02 11:03:47 -0700711struct io_async_rw {
712 struct iovec fast_iov[UIO_FASTIOV];
Jens Axboeff6165b2020-08-13 09:47:43 -0600713 const struct iovec *free_iovec;
714 struct iov_iter iter;
Jens Axboecd658692021-09-10 11:19:14 -0600715 struct iov_iter_state iter_state;
Jens Axboe227c0c92020-08-13 11:51:40 -0600716 size_t bytes_done;
Jens Axboebcf5a062020-05-22 09:24:42 -0600717 struct wait_page_queue wpq;
Jens Axboef67676d2019-12-02 11:03:47 -0700718};
719
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300720enum {
721 REQ_F_FIXED_FILE_BIT = IOSQE_FIXED_FILE_BIT,
722 REQ_F_IO_DRAIN_BIT = IOSQE_IO_DRAIN_BIT,
723 REQ_F_LINK_BIT = IOSQE_IO_LINK_BIT,
724 REQ_F_HARDLINK_BIT = IOSQE_IO_HARDLINK_BIT,
725 REQ_F_FORCE_ASYNC_BIT = IOSQE_ASYNC_BIT,
Jens Axboebcda7ba2020-02-23 16:42:51 -0700726 REQ_F_BUFFER_SELECT_BIT = IOSQE_BUFFER_SELECT_BIT,
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300727
Pavel Begunkovdddca222021-04-27 16:13:52 +0100728 /* first byte is taken by user flags, shift it to not overlap */
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +0100729 REQ_F_FAIL_BIT = 8,
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300730 REQ_F_INFLIGHT_BIT,
731 REQ_F_CUR_POS_BIT,
732 REQ_F_NOWAIT_BIT,
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300733 REQ_F_LINK_TIMEOUT_BIT,
Pavel Begunkov99bc4c32020-02-07 22:04:45 +0300734 REQ_F_NEED_CLEANUP_BIT,
Jens Axboed7718a92020-02-14 22:23:12 -0700735 REQ_F_POLLED_BIT,
Jens Axboebcda7ba2020-02-23 16:42:51 -0700736 REQ_F_BUFFER_SELECTED_BIT,
Pavel Begunkove342c802021-01-19 13:32:47 +0000737 REQ_F_COMPLETE_INLINE_BIT,
Jens Axboe230d50d2021-04-01 20:41:15 -0600738 REQ_F_REISSUE_BIT,
Pavel Begunkovb8e64b52021-06-17 18:14:02 +0100739 REQ_F_CREDS_BIT,
Pavel Begunkov20e60a32021-08-11 19:28:30 +0100740 REQ_F_REFCOUNT_BIT,
Pavel Begunkov4d13d1a2021-08-15 10:40:24 +0100741 REQ_F_ARM_LTIMEOUT_BIT,
Jens Axboe7b29f922021-03-12 08:30:14 -0700742 /* keep async read/write and isreg together and in order */
Pavel Begunkovb191e2d2021-08-09 13:04:03 +0100743 REQ_F_NOWAIT_READ_BIT,
744 REQ_F_NOWAIT_WRITE_BIT,
Jens Axboe7b29f922021-03-12 08:30:14 -0700745 REQ_F_ISREG_BIT,
Jens Axboe84557872020-03-03 15:28:17 -0700746
747 /* not a real bit, just to check we're not overflowing the space */
748 __REQ_F_LAST_BIT,
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300749};
750
751enum {
752 /* ctx owns file */
753 REQ_F_FIXED_FILE = BIT(REQ_F_FIXED_FILE_BIT),
754 /* drain existing IO first */
755 REQ_F_IO_DRAIN = BIT(REQ_F_IO_DRAIN_BIT),
756 /* linked sqes */
757 REQ_F_LINK = BIT(REQ_F_LINK_BIT),
758 /* doesn't sever on completion < 0 */
759 REQ_F_HARDLINK = BIT(REQ_F_HARDLINK_BIT),
760 /* IOSQE_ASYNC */
761 REQ_F_FORCE_ASYNC = BIT(REQ_F_FORCE_ASYNC_BIT),
Jens Axboebcda7ba2020-02-23 16:42:51 -0700762 /* IOSQE_BUFFER_SELECT */
763 REQ_F_BUFFER_SELECT = BIT(REQ_F_BUFFER_SELECT_BIT),
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300764
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300765 /* fail rest of links */
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +0100766 REQ_F_FAIL = BIT(REQ_F_FAIL_BIT),
Pavel Begunkovb05a1bc2021-03-04 13:59:24 +0000767 /* on inflight list, should be cancelled and waited on exit reliably */
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300768 REQ_F_INFLIGHT = BIT(REQ_F_INFLIGHT_BIT),
769 /* read/write uses file position */
770 REQ_F_CUR_POS = BIT(REQ_F_CUR_POS_BIT),
771 /* must not punt to workers */
772 REQ_F_NOWAIT = BIT(REQ_F_NOWAIT_BIT),
Pavel Begunkov900fad42020-10-19 16:39:16 +0100773 /* has or had linked timeout */
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300774 REQ_F_LINK_TIMEOUT = BIT(REQ_F_LINK_TIMEOUT_BIT),
Pavel Begunkov99bc4c32020-02-07 22:04:45 +0300775 /* needs cleanup */
776 REQ_F_NEED_CLEANUP = BIT(REQ_F_NEED_CLEANUP_BIT),
Jens Axboed7718a92020-02-14 22:23:12 -0700777 /* already went through poll handler */
778 REQ_F_POLLED = BIT(REQ_F_POLLED_BIT),
Jens Axboebcda7ba2020-02-23 16:42:51 -0700779 /* buffer already selected */
780 REQ_F_BUFFER_SELECTED = BIT(REQ_F_BUFFER_SELECTED_BIT),
Pavel Begunkove342c802021-01-19 13:32:47 +0000781 /* completion is deferred through io_comp_state */
782 REQ_F_COMPLETE_INLINE = BIT(REQ_F_COMPLETE_INLINE_BIT),
Jens Axboe230d50d2021-04-01 20:41:15 -0600783 /* caller should reissue async */
784 REQ_F_REISSUE = BIT(REQ_F_REISSUE_BIT),
Jens Axboe7b29f922021-03-12 08:30:14 -0700785 /* supports async reads */
Pavel Begunkovb191e2d2021-08-09 13:04:03 +0100786 REQ_F_NOWAIT_READ = BIT(REQ_F_NOWAIT_READ_BIT),
Jens Axboe7b29f922021-03-12 08:30:14 -0700787 /* supports async writes */
Pavel Begunkovb191e2d2021-08-09 13:04:03 +0100788 REQ_F_NOWAIT_WRITE = BIT(REQ_F_NOWAIT_WRITE_BIT),
Jens Axboe7b29f922021-03-12 08:30:14 -0700789 /* regular file */
790 REQ_F_ISREG = BIT(REQ_F_ISREG_BIT),
Pavel Begunkovb8e64b52021-06-17 18:14:02 +0100791 /* has creds assigned */
792 REQ_F_CREDS = BIT(REQ_F_CREDS_BIT),
Pavel Begunkov20e60a32021-08-11 19:28:30 +0100793 /* skip refcounting if not set */
794 REQ_F_REFCOUNT = BIT(REQ_F_REFCOUNT_BIT),
Pavel Begunkov4d13d1a2021-08-15 10:40:24 +0100795 /* there is a linked timeout that has to be armed */
796 REQ_F_ARM_LTIMEOUT = BIT(REQ_F_ARM_LTIMEOUT_BIT),
Jens Axboed7718a92020-02-14 22:23:12 -0700797};
798
799struct async_poll {
800 struct io_poll_iocb poll;
Jens Axboe807abcb2020-07-17 17:09:27 -0600801 struct io_poll_iocb *double_poll;
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300802};
803
Pavel Begunkovf237c302021-08-18 12:42:46 +0100804typedef void (*io_req_tw_func_t)(struct io_kiocb *req, bool *locked);
Pavel Begunkov5b0a6ac2021-06-30 21:54:04 +0100805
Jens Axboe7cbf1722021-02-10 00:03:20 +0000806struct io_task_work {
Pavel Begunkov5b0a6ac2021-06-30 21:54:04 +0100807 union {
808 struct io_wq_work_node node;
809 struct llist_node fallback_node;
810 };
811 io_req_tw_func_t func;
Jens Axboe7cbf1722021-02-10 00:03:20 +0000812};
813
Pavel Begunkov992da012021-06-10 16:37:37 +0100814enum {
815 IORING_RSRC_FILE = 0,
816 IORING_RSRC_BUFFER = 1,
817};
818
Jens Axboe09bb8392019-03-13 12:39:28 -0600819/*
820 * NOTE! Each of the iocb union members has the file pointer
821 * as the first entry in their struct definition. So you can
822 * access the file pointer through any of the sub-structs,
823 * or directly as just 'ki_filp' in this struct.
824 */
Jens Axboe2b188cc2019-01-07 10:46:33 -0700825struct io_kiocb {
Jens Axboe221c5eb2019-01-17 09:41:58 -0700826 union {
Jens Axboe09bb8392019-03-13 12:39:28 -0600827 struct file *file;
Jens Axboe9adbd452019-12-20 08:45:55 -0700828 struct io_rw rw;
Jens Axboe221c5eb2019-01-17 09:41:58 -0700829 struct io_poll_iocb poll;
Pavel Begunkov9d805892021-04-13 02:58:40 +0100830 struct io_poll_update poll_update;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -0700831 struct io_accept accept;
832 struct io_sync sync;
Jens Axboefbf23842019-12-17 18:45:56 -0700833 struct io_cancel cancel;
Jens Axboeb29472e2019-12-17 18:50:29 -0700834 struct io_timeout timeout;
Pavel Begunkov0bdf7a22020-10-10 18:34:10 +0100835 struct io_timeout_rem timeout_rem;
Jens Axboe3fbb51c2019-12-20 08:51:52 -0700836 struct io_connect connect;
Jens Axboee47293f2019-12-20 08:58:21 -0700837 struct io_sr_msg sr_msg;
Jens Axboe15b71ab2019-12-11 11:20:36 -0700838 struct io_open open;
Jens Axboeb5dba592019-12-11 14:02:38 -0700839 struct io_close close;
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +0000840 struct io_rsrc_update rsrc_update;
Jens Axboe4840e412019-12-25 22:03:45 -0700841 struct io_fadvise fadvise;
Jens Axboec1ca7572019-12-25 22:18:28 -0700842 struct io_madvise madvise;
Jens Axboe3e4827b2020-01-08 15:18:09 -0700843 struct io_epoll epoll;
Pavel Begunkov7d67af22020-02-24 11:32:45 +0300844 struct io_splice splice;
Jens Axboeddf0322d2020-02-23 16:41:33 -0700845 struct io_provide_buf pbuf;
Bijan Mottahedeh1d9e1282020-05-22 21:31:16 -0700846 struct io_statx statx;
Jens Axboe36f4fa62020-09-05 11:14:22 -0600847 struct io_shutdown shutdown;
Jens Axboe80a261f2020-09-28 14:23:58 -0600848 struct io_rename rename;
Jens Axboe14a11432020-09-28 14:27:37 -0600849 struct io_unlink unlink;
Dmitry Kadasheve34a02d2021-07-08 13:34:45 +0700850 struct io_mkdir mkdir;
Dmitry Kadashev7a8721f2021-07-08 13:34:46 +0700851 struct io_symlink symlink;
Dmitry Kadashevcf30da92021-07-08 13:34:47 +0700852 struct io_hardlink hardlink;
Pavel Begunkov3ca405e2020-07-13 23:37:08 +0300853 /* use only after cleaning per-op data, see io_clean_op() */
854 struct io_completion compl;
Jens Axboe221c5eb2019-01-17 09:41:58 -0700855 };
Jens Axboe2b188cc2019-01-07 10:46:33 -0700856
Jens Axboee8c2bc12020-08-15 18:44:09 -0700857 /* opcode allocated if it needs to store data for async defer */
858 void *async_data;
Jens Axboed625c6e2019-12-17 19:53:05 -0700859 u8 opcode;
Xiaoguang Wang65a65432020-06-11 23:39:36 +0800860 /* polled IO has completed */
861 u8 iopoll_completed;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700862
Bijan Mottahedeh4f4eeba2020-05-19 14:52:49 -0700863 u16 buf_index;
Pavel Begunkov9cf7c102020-07-13 23:37:15 +0300864 u32 result;
Bijan Mottahedeh4f4eeba2020-05-19 14:52:49 -0700865
Pavel Begunkov010e8e62020-07-30 18:43:45 +0300866 struct io_ring_ctx *ctx;
867 unsigned int flags;
Jens Axboeabc54d62021-02-24 13:32:30 -0700868 atomic_t refs;
Pavel Begunkov010e8e62020-07-30 18:43:45 +0300869 struct task_struct *task;
870 u64 user_data;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700871
Pavel Begunkovf2f87372020-10-27 23:25:37 +0000872 struct io_kiocb *link;
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +0000873 struct percpu_ref *fixed_rsrc_refs;
Jens Axboed7718a92020-02-14 22:23:12 -0700874
Pavel Begunkovb303fe22021-04-11 01:46:26 +0100875 /* used with ctx->iopoll_list with reads/writes */
Pavel Begunkov010e8e62020-07-30 18:43:45 +0300876 struct list_head inflight_entry;
Pavel Begunkov5b0a6ac2021-06-30 21:54:04 +0100877 struct io_task_work io_task_work;
Pavel Begunkov010e8e62020-07-30 18:43:45 +0300878 /* for polled requests, i.e. IORING_OP_POLL_ADD and async armed poll */
879 struct hlist_node hash_node;
880 struct async_poll *apoll;
881 struct io_wq_work work;
Pavel Begunkovfe7e3252021-06-24 15:09:57 +0100882 const struct cred *creds;
Pavel Begunkovc10d1f92021-06-17 18:14:01 +0100883
Pavel Begunkoveae071c2021-04-25 14:32:24 +0100884 /* store used ubuf, so we can prevent reloading */
885 struct io_mapped_ubuf *imu;
Pavel Begunkovf770fba2022-08-29 14:30:18 +0100886 /* stores selected buf, valid IFF REQ_F_BUFFER_SELECTED is set */
887 struct io_buffer *kbuf;
888 atomic_t poll_refs;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700889};
890
Pavel Begunkov13bf43f2021-03-06 11:02:12 +0000891struct io_tctx_node {
892 struct list_head ctx_node;
893 struct task_struct *task;
Pavel Begunkov13bf43f2021-03-06 11:02:12 +0000894 struct io_ring_ctx *ctx;
895};
896
Pavel Begunkov27dc8332020-07-13 23:37:14 +0300897struct io_defer_entry {
898 struct list_head list;
899 struct io_kiocb *req;
Pavel Begunkov9cf7c102020-07-13 23:37:15 +0300900 u32 seq;
Pavel Begunkov27dc8332020-07-13 23:37:14 +0300901};
902
Jens Axboed3656342019-12-18 09:50:26 -0700903struct io_op_def {
Jens Axboed3656342019-12-18 09:50:26 -0700904 /* needs req->file assigned */
905 unsigned needs_file : 1;
Jens Axboed3656342019-12-18 09:50:26 -0700906 /* hash wq insertion if file is a regular file */
907 unsigned hash_reg_file : 1;
908 /* unbound wq insertion if file is a non-regular file */
909 unsigned unbound_nonreg_file : 1;
Jens Axboe66f4af92020-01-16 15:36:52 -0700910 /* opcode is not supported by this kernel */
911 unsigned not_supported : 1;
Jens Axboe8a727582020-02-20 09:59:44 -0700912 /* set if opcode supports polled "wait" */
913 unsigned pollin : 1;
914 unsigned pollout : 1;
Jens Axboebcda7ba2020-02-23 16:42:51 -0700915 /* op supports buffer selection */
916 unsigned buffer_select : 1;
Pavel Begunkov26f05052021-02-28 22:35:18 +0000917 /* do prep async if is going to be punted */
918 unsigned needs_async_setup : 1;
Jens Axboe27926b62020-10-28 09:33:23 -0600919 /* should block plug */
920 unsigned plug : 1;
Jens Axboee8c2bc12020-08-15 18:44:09 -0700921 /* size of async data needed, if any */
922 unsigned short async_size;
Jens Axboed3656342019-12-18 09:50:26 -0700923};
924
Jens Axboe09186822020-10-13 15:01:40 -0600925static const struct io_op_def io_op_defs[] = {
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300926 [IORING_OP_NOP] = {},
927 [IORING_OP_READV] = {
Jens Axboed3656342019-12-18 09:50:26 -0700928 .needs_file = 1,
929 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700930 .pollin = 1,
Jens Axboe4d954c22020-02-27 07:31:19 -0700931 .buffer_select = 1,
Pavel Begunkov26f05052021-02-28 22:35:18 +0000932 .needs_async_setup = 1,
Jens Axboe27926b62020-10-28 09:33:23 -0600933 .plug = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700934 .async_size = sizeof(struct io_async_rw),
Jens Axboed3656342019-12-18 09:50:26 -0700935 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300936 [IORING_OP_WRITEV] = {
Jens Axboed3656342019-12-18 09:50:26 -0700937 .needs_file = 1,
938 .hash_reg_file = 1,
939 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700940 .pollout = 1,
Pavel Begunkov26f05052021-02-28 22:35:18 +0000941 .needs_async_setup = 1,
Jens Axboe27926b62020-10-28 09:33:23 -0600942 .plug = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700943 .async_size = sizeof(struct io_async_rw),
Jens Axboed3656342019-12-18 09:50:26 -0700944 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300945 [IORING_OP_FSYNC] = {
Jens Axboed3656342019-12-18 09:50:26 -0700946 .needs_file = 1,
947 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300948 [IORING_OP_READ_FIXED] = {
Jens Axboed3656342019-12-18 09:50:26 -0700949 .needs_file = 1,
950 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700951 .pollin = 1,
Jens Axboe27926b62020-10-28 09:33:23 -0600952 .plug = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700953 .async_size = sizeof(struct io_async_rw),
Jens Axboed3656342019-12-18 09:50:26 -0700954 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300955 [IORING_OP_WRITE_FIXED] = {
Jens Axboed3656342019-12-18 09:50:26 -0700956 .needs_file = 1,
957 .hash_reg_file = 1,
958 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700959 .pollout = 1,
Jens Axboe27926b62020-10-28 09:33:23 -0600960 .plug = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700961 .async_size = sizeof(struct io_async_rw),
Jens Axboed3656342019-12-18 09:50:26 -0700962 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300963 [IORING_OP_POLL_ADD] = {
Jens Axboed3656342019-12-18 09:50:26 -0700964 .needs_file = 1,
965 .unbound_nonreg_file = 1,
966 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300967 [IORING_OP_POLL_REMOVE] = {},
968 [IORING_OP_SYNC_FILE_RANGE] = {
Jens Axboed3656342019-12-18 09:50:26 -0700969 .needs_file = 1,
970 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300971 [IORING_OP_SENDMSG] = {
Jens Axboed3656342019-12-18 09:50:26 -0700972 .needs_file = 1,
973 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700974 .pollout = 1,
Pavel Begunkov26f05052021-02-28 22:35:18 +0000975 .needs_async_setup = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700976 .async_size = sizeof(struct io_async_msghdr),
Jens Axboed3656342019-12-18 09:50:26 -0700977 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300978 [IORING_OP_RECVMSG] = {
Jens Axboed3656342019-12-18 09:50:26 -0700979 .needs_file = 1,
980 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700981 .pollin = 1,
Jens Axboe52de1fe2020-02-27 10:15:42 -0700982 .buffer_select = 1,
Pavel Begunkov26f05052021-02-28 22:35:18 +0000983 .needs_async_setup = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700984 .async_size = sizeof(struct io_async_msghdr),
Jens Axboed3656342019-12-18 09:50:26 -0700985 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300986 [IORING_OP_TIMEOUT] = {
Jens Axboee8c2bc12020-08-15 18:44:09 -0700987 .async_size = sizeof(struct io_timeout_data),
Jens Axboed3656342019-12-18 09:50:26 -0700988 },
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +0000989 [IORING_OP_TIMEOUT_REMOVE] = {
990 /* used by timeout updates' prep() */
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +0000991 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300992 [IORING_OP_ACCEPT] = {
Jens Axboed3656342019-12-18 09:50:26 -0700993 .needs_file = 1,
994 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700995 .pollin = 1,
Jens Axboed3656342019-12-18 09:50:26 -0700996 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300997 [IORING_OP_ASYNC_CANCEL] = {},
998 [IORING_OP_LINK_TIMEOUT] = {
Jens Axboee8c2bc12020-08-15 18:44:09 -0700999 .async_size = sizeof(struct io_timeout_data),
Jens Axboed3656342019-12-18 09:50:26 -07001000 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +03001001 [IORING_OP_CONNECT] = {
Jens Axboed3656342019-12-18 09:50:26 -07001002 .needs_file = 1,
1003 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -07001004 .pollout = 1,
Pavel Begunkov26f05052021-02-28 22:35:18 +00001005 .needs_async_setup = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -07001006 .async_size = sizeof(struct io_async_connect),
Jens Axboed3656342019-12-18 09:50:26 -07001007 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +03001008 [IORING_OP_FALLOCATE] = {
Jens Axboed3656342019-12-18 09:50:26 -07001009 .needs_file = 1,
1010 },
Jens Axboe44526be2021-02-15 13:32:18 -07001011 [IORING_OP_OPENAT] = {},
1012 [IORING_OP_CLOSE] = {},
1013 [IORING_OP_FILES_UPDATE] = {},
1014 [IORING_OP_STATX] = {},
Pavel Begunkov0463b6c2020-01-18 21:35:38 +03001015 [IORING_OP_READ] = {
Jens Axboe3a6820f2019-12-22 15:19:35 -07001016 .needs_file = 1,
1017 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -07001018 .pollin = 1,
Jens Axboebcda7ba2020-02-23 16:42:51 -07001019 .buffer_select = 1,
Jens Axboe27926b62020-10-28 09:33:23 -06001020 .plug = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -07001021 .async_size = sizeof(struct io_async_rw),
Jens Axboe3a6820f2019-12-22 15:19:35 -07001022 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +03001023 [IORING_OP_WRITE] = {
Jens Axboe3a6820f2019-12-22 15:19:35 -07001024 .needs_file = 1,
Jens Axboe7b3188e2021-08-30 19:37:41 -06001025 .hash_reg_file = 1,
Jens Axboe3a6820f2019-12-22 15:19:35 -07001026 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -07001027 .pollout = 1,
Jens Axboe27926b62020-10-28 09:33:23 -06001028 .plug = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -07001029 .async_size = sizeof(struct io_async_rw),
Jens Axboe3a6820f2019-12-22 15:19:35 -07001030 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +03001031 [IORING_OP_FADVISE] = {
Jens Axboe4840e412019-12-25 22:03:45 -07001032 .needs_file = 1,
1033 },
Jens Axboe44526be2021-02-15 13:32:18 -07001034 [IORING_OP_MADVISE] = {},
Pavel Begunkov0463b6c2020-01-18 21:35:38 +03001035 [IORING_OP_SEND] = {
Jens Axboefddafac2020-01-04 20:19:44 -07001036 .needs_file = 1,
1037 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -07001038 .pollout = 1,
Jens Axboefddafac2020-01-04 20:19:44 -07001039 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +03001040 [IORING_OP_RECV] = {
Jens Axboefddafac2020-01-04 20:19:44 -07001041 .needs_file = 1,
1042 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -07001043 .pollin = 1,
Jens Axboebcda7ba2020-02-23 16:42:51 -07001044 .buffer_select = 1,
Jens Axboefddafac2020-01-04 20:19:44 -07001045 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +03001046 [IORING_OP_OPENAT2] = {
Jens Axboecebdb982020-01-08 17:59:24 -07001047 },
Jens Axboe3e4827b2020-01-08 15:18:09 -07001048 [IORING_OP_EPOLL_CTL] = {
1049 .unbound_nonreg_file = 1,
Jens Axboe3e4827b2020-01-08 15:18:09 -07001050 },
Pavel Begunkov7d67af22020-02-24 11:32:45 +03001051 [IORING_OP_SPLICE] = {
1052 .needs_file = 1,
1053 .hash_reg_file = 1,
1054 .unbound_nonreg_file = 1,
Jens Axboeddf0322d2020-02-23 16:41:33 -07001055 },
1056 [IORING_OP_PROVIDE_BUFFERS] = {},
Jens Axboe067524e2020-03-02 16:32:28 -07001057 [IORING_OP_REMOVE_BUFFERS] = {},
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03001058 [IORING_OP_TEE] = {
1059 .needs_file = 1,
1060 .hash_reg_file = 1,
1061 .unbound_nonreg_file = 1,
1062 },
Jens Axboe36f4fa62020-09-05 11:14:22 -06001063 [IORING_OP_SHUTDOWN] = {
1064 .needs_file = 1,
1065 },
Jens Axboe44526be2021-02-15 13:32:18 -07001066 [IORING_OP_RENAMEAT] = {},
1067 [IORING_OP_UNLINKAT] = {},
Dmitry Kadasheve34a02d2021-07-08 13:34:45 +07001068 [IORING_OP_MKDIRAT] = {},
Dmitry Kadashev7a8721f2021-07-08 13:34:46 +07001069 [IORING_OP_SYMLINKAT] = {},
Dmitry Kadashevcf30da92021-07-08 13:34:47 +07001070 [IORING_OP_LINKAT] = {},
Jens Axboed3656342019-12-18 09:50:26 -07001071};
1072
Pavel Begunkov0756a862021-08-15 10:40:25 +01001073/* requests with any of those set should undergo io_disarm_next() */
1074#define IO_DISARM_MASK (REQ_F_ARM_LTIMEOUT | REQ_F_LINK_TIMEOUT | REQ_F_FAIL)
1075
Pavel Begunkov7a612352021-03-09 00:37:59 +00001076static bool io_disarm_next(struct io_kiocb *req);
Pavel Begunkoveef51da2021-06-14 02:36:15 +01001077static void io_uring_del_tctx_node(unsigned long index);
Pavel Begunkov9936c7c2021-02-04 13:51:56 +00001078static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
1079 struct task_struct *task,
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01001080 bool cancel_all);
Pavel Begunkov78cc6872021-06-14 02:36:23 +01001081static void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd);
Pavel Begunkov1ffc5422020-12-30 21:34:15 +00001082
Pavel Begunkovb850d6d2022-08-29 14:30:13 +01001083static void io_fill_cqe_req(struct io_kiocb *req, s32 res, u32 cflags);
1084
Jackie Liuec9c02a2019-11-08 23:50:36 +08001085static void io_put_req(struct io_kiocb *req);
Pavel Begunkov91c2f692021-08-11 19:28:28 +01001086static void io_put_req_deferred(struct io_kiocb *req);
Jens Axboec7dae4b2021-02-09 19:53:37 -07001087static void io_dismantle_req(struct io_kiocb *req);
Jens Axboe94ae5e72019-11-14 19:39:52 -07001088static void io_queue_linked_timeout(struct io_kiocb *req);
Pavel Begunkovfdecb662021-04-25 14:32:20 +01001089static int __io_register_rsrc_update(struct io_ring_ctx *ctx, unsigned type,
Pavel Begunkovc3bdad02021-04-25 14:32:22 +01001090 struct io_uring_rsrc_update2 *up,
Pavel Begunkov98f0b3b2021-04-25 14:32:19 +01001091 unsigned nr_args);
Pavel Begunkov68fb8972021-03-19 17:22:41 +00001092static void io_clean_op(struct io_kiocb *req);
Pavel Begunkovac177052021-08-09 13:04:02 +01001093static struct file *io_file_get(struct io_ring_ctx *ctx,
Pavel Begunkov8371adf2020-10-10 18:34:08 +01001094 struct io_kiocb *req, int fd, bool fixed);
Pavel Begunkovc5eef2b2021-02-10 00:03:22 +00001095static void __io_queue_sqe(struct io_kiocb *req);
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00001096static void io_rsrc_put_work(struct work_struct *work);
Jens Axboede0617e2019-04-06 21:51:27 -06001097
Pavel Begunkov907d1df2021-01-26 23:35:10 +00001098static void io_req_task_queue(struct io_kiocb *req);
Pavel Begunkov2a2758f2021-06-17 18:14:00 +01001099static void io_submit_flush_completions(struct io_ring_ctx *ctx);
Pavel Begunkov179ae0d2021-02-28 22:35:20 +00001100static int io_req_prep_async(struct io_kiocb *req);
Jens Axboe9a56a232019-01-09 09:06:50 -07001101
Pavel Begunkovb9445592021-08-25 12:25:45 +01001102static int io_install_fixed_file(struct io_kiocb *req, struct file *file,
1103 unsigned int issue_flags, u32 slot_index);
Pavel Begunkov7df778b2021-09-24 20:04:29 +01001104static int io_close_fixed(struct io_kiocb *req, unsigned int issue_flags);
1105
Pavel Begunkovf1042b62021-08-28 19:54:39 -06001106static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer);
Pavel Begunkovb9445592021-08-25 12:25:45 +01001107
Jens Axboe2b188cc2019-01-07 10:46:33 -07001108static struct kmem_cache *req_cachep;
1109
Jens Axboe09186822020-10-13 15:01:40 -06001110static const struct file_operations io_uring_fops;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001111
1112struct sock *io_uring_get_socket(struct file *file)
1113{
1114#if defined(CONFIG_UNIX)
1115 if (file->f_op == &io_uring_fops) {
1116 struct io_ring_ctx *ctx = file->private_data;
1117
1118 return ctx->ring_sock->sk;
1119 }
1120#endif
1121 return NULL;
1122}
1123EXPORT_SYMBOL(io_uring_get_socket);
1124
Pavel Begunkovf237c302021-08-18 12:42:46 +01001125static inline void io_tw_lock(struct io_ring_ctx *ctx, bool *locked)
1126{
1127 if (!*locked) {
1128 mutex_lock(&ctx->uring_lock);
1129 *locked = true;
1130 }
1131}
1132
Pavel Begunkovf2f87372020-10-27 23:25:37 +00001133#define io_for_each_link(pos, head) \
1134 for (pos = (head); pos; pos = pos->link)
1135
Pavel Begunkov21c843d2021-08-11 19:28:27 +01001136/*
1137 * Shamelessly stolen from the mm implementation of page reference checking,
1138 * see commit f958d7b528b1 for details.
1139 */
1140#define req_ref_zero_or_close_to_overflow(req) \
1141 ((unsigned int) atomic_read(&(req->refs)) + 127u <= 127u)
1142
1143static inline bool req_ref_inc_not_zero(struct io_kiocb *req)
1144{
Pavel Begunkov20e60a32021-08-11 19:28:30 +01001145 WARN_ON_ONCE(!(req->flags & REQ_F_REFCOUNT));
Pavel Begunkov21c843d2021-08-11 19:28:27 +01001146 return atomic_inc_not_zero(&req->refs);
1147}
1148
Pavel Begunkov21c843d2021-08-11 19:28:27 +01001149static inline bool req_ref_put_and_test(struct io_kiocb *req)
1150{
Pavel Begunkov20e60a32021-08-11 19:28:30 +01001151 if (likely(!(req->flags & REQ_F_REFCOUNT)))
1152 return true;
1153
Pavel Begunkov21c843d2021-08-11 19:28:27 +01001154 WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req));
1155 return atomic_dec_and_test(&req->refs);
1156}
1157
Pavel Begunkov21c843d2021-08-11 19:28:27 +01001158static inline void req_ref_get(struct io_kiocb *req)
1159{
Pavel Begunkov20e60a32021-08-11 19:28:30 +01001160 WARN_ON_ONCE(!(req->flags & REQ_F_REFCOUNT));
Pavel Begunkov21c843d2021-08-11 19:28:27 +01001161 WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req));
1162 atomic_inc(&req->refs);
1163}
1164
Pavel Begunkov48dcd382021-08-15 10:40:18 +01001165static inline void __io_req_set_refcount(struct io_kiocb *req, int nr)
Pavel Begunkov20e60a32021-08-11 19:28:30 +01001166{
1167 if (!(req->flags & REQ_F_REFCOUNT)) {
1168 req->flags |= REQ_F_REFCOUNT;
Pavel Begunkov48dcd382021-08-15 10:40:18 +01001169 atomic_set(&req->refs, nr);
Pavel Begunkov20e60a32021-08-11 19:28:30 +01001170 }
1171}
1172
Pavel Begunkov48dcd382021-08-15 10:40:18 +01001173static inline void io_req_set_refcount(struct io_kiocb *req)
1174{
1175 __io_req_set_refcount(req, 1);
1176}
1177
Pavel Begunkovb895c9a2021-04-01 15:43:40 +01001178static inline void io_req_set_rsrc_node(struct io_kiocb *req)
Jens Axboec40f6372020-06-25 15:39:59 -06001179{
Pavel Begunkov36f72fe2020-11-18 19:57:26 +00001180 struct io_ring_ctx *ctx = req->ctx;
1181
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00001182 if (!req->fixed_rsrc_refs) {
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01001183 req->fixed_rsrc_refs = &ctx->rsrc_node->refs;
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00001184 percpu_ref_get(req->fixed_rsrc_refs);
Pavel Begunkov36f72fe2020-11-18 19:57:26 +00001185 }
1186}
1187
Pavel Begunkovf70865d2021-04-11 01:46:40 +01001188static void io_refs_resurrect(struct percpu_ref *ref, struct completion *compl)
1189{
1190 bool got = percpu_ref_tryget(ref);
1191
1192 /* already at zero, wait for ->release() */
1193 if (!got)
1194 wait_for_completion(compl);
1195 percpu_ref_resurrect(ref);
1196 if (got)
1197 percpu_ref_put(ref);
1198}
1199
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01001200static bool io_match_task(struct io_kiocb *head, struct task_struct *task,
1201 bool cancel_all)
Pavel Begunkov1c939a52021-11-26 14:38:15 +00001202 __must_hold(&req->ctx->timeout_lock)
Pavel Begunkov08d23632020-11-06 13:00:22 +00001203{
1204 struct io_kiocb *req;
1205
Pavel Begunkov68207682021-03-22 01:58:25 +00001206 if (task && head->task != task)
Pavel Begunkov08d23632020-11-06 13:00:22 +00001207 return false;
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01001208 if (cancel_all)
Pavel Begunkov08d23632020-11-06 13:00:22 +00001209 return true;
1210
1211 io_for_each_link(req, head) {
Pavel Begunkovb05a1bc2021-03-04 13:59:24 +00001212 if (req->flags & REQ_F_INFLIGHT)
Jens Axboe02a13672021-01-23 15:49:31 -07001213 return true;
Pavel Begunkov08d23632020-11-06 13:00:22 +00001214 }
1215 return false;
1216}
1217
Pavel Begunkov1c939a52021-11-26 14:38:15 +00001218static bool io_match_linked(struct io_kiocb *head)
1219{
1220 struct io_kiocb *req;
1221
1222 io_for_each_link(req, head) {
1223 if (req->flags & REQ_F_INFLIGHT)
1224 return true;
1225 }
1226 return false;
1227}
1228
1229/*
1230 * As io_match_task() but protected against racing with linked timeouts.
1231 * User must not hold timeout_lock.
1232 */
1233static bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task,
1234 bool cancel_all)
1235{
1236 bool matched;
1237
1238 if (task && head->task != task)
1239 return false;
1240 if (cancel_all)
1241 return true;
1242
1243 if (head->flags & REQ_F_LINK_TIMEOUT) {
1244 struct io_ring_ctx *ctx = head->ctx;
1245
1246 /* protect against races with linked timeouts */
1247 spin_lock_irq(&ctx->timeout_lock);
1248 matched = io_match_linked(head);
1249 spin_unlock_irq(&ctx->timeout_lock);
1250 } else {
1251 matched = io_match_linked(head);
1252 }
1253 return matched;
1254}
1255
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01001256static inline void req_set_fail(struct io_kiocb *req)
Jens Axboec40f6372020-06-25 15:39:59 -06001257{
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01001258 req->flags |= REQ_F_FAIL;
Jens Axboec40f6372020-06-25 15:39:59 -06001259}
Jens Axboe4a38aed22020-05-14 17:21:15 -06001260
Hao Xua8295b92021-08-27 17:46:09 +08001261static inline void req_fail_link_node(struct io_kiocb *req, int res)
1262{
1263 req_set_fail(req);
1264 req->result = res;
1265}
1266
Jens Axboe2b188cc2019-01-07 10:46:33 -07001267static void io_ring_ctx_ref_free(struct percpu_ref *ref)
1268{
1269 struct io_ring_ctx *ctx = container_of(ref, struct io_ring_ctx, refs);
1270
Jens Axboe0f158b42020-05-14 17:18:39 -06001271 complete(&ctx->ref_comp);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001272}
1273
Pavel Begunkov8eb7e2d2020-06-29 13:13:02 +03001274static inline bool io_is_timeout_noseq(struct io_kiocb *req)
1275{
1276 return !req->timeout.off;
1277}
1278
Pavel Begunkovf56165e2021-08-09 20:18:07 +01001279static void io_fallback_req_func(struct work_struct *work)
1280{
1281 struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx,
1282 fallback_work.work);
1283 struct llist_node *node = llist_del_all(&ctx->fallback_llist);
1284 struct io_kiocb *req, *tmp;
Pavel Begunkovf237c302021-08-18 12:42:46 +01001285 bool locked = false;
Pavel Begunkovf56165e2021-08-09 20:18:07 +01001286
1287 percpu_ref_get(&ctx->refs);
1288 llist_for_each_entry_safe(req, tmp, node, io_task_work.fallback_node)
Pavel Begunkovf237c302021-08-18 12:42:46 +01001289 req->io_task_work.func(req, &locked);
Pavel Begunkov5636c002021-08-18 12:42:45 +01001290
Pavel Begunkovf237c302021-08-18 12:42:46 +01001291 if (locked) {
1292 if (ctx->submit_state.compl_nr)
1293 io_submit_flush_completions(ctx);
1294 mutex_unlock(&ctx->uring_lock);
1295 }
Pavel Begunkovf56165e2021-08-09 20:18:07 +01001296 percpu_ref_put(&ctx->refs);
Pavel Begunkovf237c302021-08-18 12:42:46 +01001297
Pavel Begunkovf56165e2021-08-09 20:18:07 +01001298}
1299
Jens Axboe2b188cc2019-01-07 10:46:33 -07001300static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
1301{
1302 struct io_ring_ctx *ctx;
Jens Axboe78076bb2019-12-04 19:56:40 -07001303 int hash_bits;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001304
1305 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
1306 if (!ctx)
1307 return NULL;
1308
Jens Axboe78076bb2019-12-04 19:56:40 -07001309 /*
1310 * Use 5 bits less than the max cq entries, that should give us around
1311 * 32 entries per hash list if totally full and uniformly spread.
1312 */
1313 hash_bits = ilog2(p->cq_entries);
1314 hash_bits -= 5;
1315 if (hash_bits <= 0)
1316 hash_bits = 1;
1317 ctx->cancel_hash_bits = hash_bits;
1318 ctx->cancel_hash = kmalloc((1U << hash_bits) * sizeof(struct hlist_head),
1319 GFP_KERNEL);
1320 if (!ctx->cancel_hash)
1321 goto err;
1322 __hash_init(ctx->cancel_hash, 1U << hash_bits);
1323
Pavel Begunkov62248432021-04-28 13:11:29 +01001324 ctx->dummy_ubuf = kzalloc(sizeof(*ctx->dummy_ubuf), GFP_KERNEL);
1325 if (!ctx->dummy_ubuf)
1326 goto err;
1327 /* set invalid range, so io_import_fixed() fails meeting it */
1328 ctx->dummy_ubuf->ubuf = -1UL;
1329
Roman Gushchin21482892019-05-07 10:01:48 -07001330 if (percpu_ref_init(&ctx->refs, io_ring_ctx_ref_free,
Jens Axboe206aefd2019-11-07 18:27:42 -07001331 PERCPU_REF_ALLOW_REINIT, GFP_KERNEL))
1332 goto err;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001333
1334 ctx->flags = p->flags;
Jens Axboe90554202020-09-03 12:12:41 -06001335 init_waitqueue_head(&ctx->sqo_sq_wait);
Jens Axboe69fb2132020-09-14 11:16:23 -06001336 INIT_LIST_HEAD(&ctx->sqd_list);
Pavel Begunkov311997b2021-06-14 23:37:28 +01001337 init_waitqueue_head(&ctx->poll_wait);
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001338 INIT_LIST_HEAD(&ctx->cq_overflow_list);
Jens Axboe0f158b42020-05-14 17:18:39 -06001339 init_completion(&ctx->ref_comp);
Jens Axboe9e15c3a2021-03-13 12:29:43 -07001340 xa_init_flags(&ctx->io_buffers, XA_FLAGS_ALLOC1);
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +00001341 xa_init_flags(&ctx->personalities, XA_FLAGS_ALLOC1);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001342 mutex_init(&ctx->uring_lock);
Pavel Begunkov311997b2021-06-14 23:37:28 +01001343 init_waitqueue_head(&ctx->cq_wait);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001344 spin_lock_init(&ctx->completion_lock);
Jens Axboe89850fc2021-08-10 15:11:51 -06001345 spin_lock_init(&ctx->timeout_lock);
Pavel Begunkov540e32a2020-07-13 23:37:09 +03001346 INIT_LIST_HEAD(&ctx->iopoll_list);
Jens Axboede0617e2019-04-06 21:51:27 -06001347 INIT_LIST_HEAD(&ctx->defer_list);
Jens Axboe5262f562019-09-17 12:26:57 -06001348 INIT_LIST_HEAD(&ctx->timeout_list);
Pavel Begunkovef9dd632021-08-28 19:54:38 -06001349 INIT_LIST_HEAD(&ctx->ltimeout_list);
Bijan Mottahedehd67d2262021-01-15 17:37:46 +00001350 spin_lock_init(&ctx->rsrc_ref_lock);
1351 INIT_LIST_HEAD(&ctx->rsrc_ref_list);
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00001352 INIT_DELAYED_WORK(&ctx->rsrc_put_work, io_rsrc_put_work);
1353 init_llist_head(&ctx->rsrc_put_llist);
Pavel Begunkov13bf43f2021-03-06 11:02:12 +00001354 INIT_LIST_HEAD(&ctx->tctx_list);
Pavel Begunkovcd0ca2e2021-08-09 20:18:11 +01001355 INIT_LIST_HEAD(&ctx->submit_state.free_list);
Pavel Begunkovd0acdee2021-05-16 22:58:12 +01001356 INIT_LIST_HEAD(&ctx->locked_free_list);
Pavel Begunkov9011bf92021-06-30 21:54:03 +01001357 INIT_DELAYED_WORK(&ctx->fallback_work, io_fallback_req_func);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001358 return ctx;
Jens Axboe206aefd2019-11-07 18:27:42 -07001359err:
Pavel Begunkov62248432021-04-28 13:11:29 +01001360 kfree(ctx->dummy_ubuf);
Jens Axboe78076bb2019-12-04 19:56:40 -07001361 kfree(ctx->cancel_hash);
Jens Axboe206aefd2019-11-07 18:27:42 -07001362 kfree(ctx);
1363 return NULL;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001364}
1365
Pavel Begunkov8f6ed492021-05-16 22:58:10 +01001366static void io_account_cq_overflow(struct io_ring_ctx *ctx)
1367{
1368 struct io_rings *r = ctx->rings;
1369
1370 WRITE_ONCE(r->cq_overflow, READ_ONCE(r->cq_overflow) + 1);
1371 ctx->cq_extra--;
1372}
1373
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03001374static bool req_need_defer(struct io_kiocb *req, u32 seq)
Jens Axboede0617e2019-04-06 21:51:27 -06001375{
Jens Axboe2bc99302020-07-09 09:43:27 -06001376 if (unlikely(req->flags & REQ_F_IO_DRAIN)) {
1377 struct io_ring_ctx *ctx = req->ctx;
Jackie Liua197f662019-11-08 08:09:12 -07001378
Pavel Begunkov8f6ed492021-05-16 22:58:10 +01001379 return seq + READ_ONCE(ctx->cq_extra) != ctx->cached_cq_tail;
Jens Axboe2bc99302020-07-09 09:43:27 -06001380 }
Jens Axboe7adf4ea2019-10-10 21:42:58 -06001381
Bob Liu9d858b22019-11-13 18:06:25 +08001382 return false;
Jens Axboe7adf4ea2019-10-10 21:42:58 -06001383}
1384
Pavel Begunkovc97d8a02021-08-09 13:04:04 +01001385#define FFS_ASYNC_READ 0x1UL
1386#define FFS_ASYNC_WRITE 0x2UL
1387#ifdef CONFIG_64BIT
1388#define FFS_ISREG 0x4UL
1389#else
1390#define FFS_ISREG 0x0UL
1391#endif
1392#define FFS_MASK ~(FFS_ASYNC_READ|FFS_ASYNC_WRITE|FFS_ISREG)
1393
1394static inline bool io_req_ffs_set(struct io_kiocb *req)
1395{
1396 return IS_ENABLED(CONFIG_64BIT) && (req->flags & REQ_F_FIXED_FILE);
1397}
1398
Pavel Begunkovce3d5aa2021-02-01 18:59:55 +00001399static void io_req_track_inflight(struct io_kiocb *req)
1400{
Pavel Begunkovce3d5aa2021-02-01 18:59:55 +00001401 if (!(req->flags & REQ_F_INFLIGHT)) {
Pavel Begunkovce3d5aa2021-02-01 18:59:55 +00001402 req->flags |= REQ_F_INFLIGHT;
Jens Axboe3746d622022-06-23 11:06:43 -06001403 atomic_inc(&req->task->io_uring->inflight_tracked);
Pavel Begunkovce3d5aa2021-02-01 18:59:55 +00001404 }
1405}
1406
Pavel Begunkovfd08e532021-08-11 19:28:31 +01001407static struct io_kiocb *__io_prep_linked_timeout(struct io_kiocb *req)
1408{
Pavel Begunkov906c6ca2021-08-15 10:40:26 +01001409 if (WARN_ON_ONCE(!req->link))
1410 return NULL;
1411
Pavel Begunkov4d13d1a2021-08-15 10:40:24 +01001412 req->flags &= ~REQ_F_ARM_LTIMEOUT;
1413 req->flags |= REQ_F_LINK_TIMEOUT;
Pavel Begunkovfd08e532021-08-11 19:28:31 +01001414
1415 /* linked timeouts should have two refs once prep'ed */
Pavel Begunkov48dcd382021-08-15 10:40:18 +01001416 io_req_set_refcount(req);
Pavel Begunkov4d13d1a2021-08-15 10:40:24 +01001417 __io_req_set_refcount(req->link, 2);
1418 return req->link;
Pavel Begunkovfd08e532021-08-11 19:28:31 +01001419}
1420
1421static inline struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req)
1422{
Pavel Begunkov4d13d1a2021-08-15 10:40:24 +01001423 if (likely(!(req->flags & REQ_F_ARM_LTIMEOUT)))
Pavel Begunkovfd08e532021-08-11 19:28:31 +01001424 return NULL;
1425 return __io_prep_linked_timeout(req);
1426}
1427
Pavel Begunkovcbdcb432020-06-29 19:18:43 +03001428static void io_prep_async_work(struct io_kiocb *req)
Jens Axboe561fb042019-10-24 07:25:42 -06001429{
Jens Axboed3656342019-12-18 09:50:26 -07001430 const struct io_op_def *def = &io_op_defs[req->opcode];
Pavel Begunkov23329512020-10-10 18:34:06 +01001431 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe54a91f32019-09-10 09:15:04 -06001432
Pavel Begunkovb8e64b52021-06-17 18:14:02 +01001433 if (!(req->flags & REQ_F_CREDS)) {
1434 req->flags |= REQ_F_CREDS;
Pavel Begunkovc10d1f92021-06-17 18:14:01 +01001435 req->creds = get_current_cred();
Pavel Begunkovb8e64b52021-06-17 18:14:02 +01001436 }
Jens Axboe003e8dc2021-03-06 09:22:27 -07001437
Pavel Begunkove1d675d2021-03-22 01:58:29 +00001438 req->work.list.next = NULL;
1439 req->work.flags = 0;
Pavel Begunkovfeaadc42020-10-22 16:47:16 +01001440 if (req->flags & REQ_F_FORCE_ASYNC)
1441 req->work.flags |= IO_WQ_WORK_CONCURRENT;
1442
Jens Axboed3656342019-12-18 09:50:26 -07001443 if (req->flags & REQ_F_ISREG) {
Pavel Begunkov23329512020-10-10 18:34:06 +01001444 if (def->hash_reg_file || (ctx->flags & IORING_SETUP_IOPOLL))
Pavel Begunkov8766dd52020-03-14 00:31:04 +03001445 io_wq_hash_work(&req->work, file_inode(req->file));
Jens Axboe4b982bd2021-04-01 08:38:34 -06001446 } else if (!req->file || !S_ISBLK(file_inode(req->file)->i_mode)) {
Jens Axboed3656342019-12-18 09:50:26 -07001447 if (def->unbound_nonreg_file)
Jens Axboe3529d8c2019-12-19 18:24:38 -07001448 req->work.flags |= IO_WQ_WORK_UNBOUND;
Jens Axboe54a91f32019-09-10 09:15:04 -06001449 }
Jens Axboe561fb042019-10-24 07:25:42 -06001450}
1451
Pavel Begunkovcbdcb432020-06-29 19:18:43 +03001452static void io_prep_async_link(struct io_kiocb *req)
1453{
1454 struct io_kiocb *cur;
1455
Pavel Begunkov44eff402021-07-26 14:14:31 +01001456 if (req->flags & REQ_F_LINK_TIMEOUT) {
1457 struct io_ring_ctx *ctx = req->ctx;
1458
Pavel Begunkov09eb40f2021-11-23 01:45:35 +00001459 spin_lock_irq(&ctx->timeout_lock);
Pavel Begunkov44eff402021-07-26 14:14:31 +01001460 io_for_each_link(cur, req)
1461 io_prep_async_work(cur);
Pavel Begunkov09eb40f2021-11-23 01:45:35 +00001462 spin_unlock_irq(&ctx->timeout_lock);
Pavel Begunkov44eff402021-07-26 14:14:31 +01001463 } else {
1464 io_for_each_link(cur, req)
1465 io_prep_async_work(cur);
1466 }
Pavel Begunkovcbdcb432020-06-29 19:18:43 +03001467}
1468
Pavel Begunkovf237c302021-08-18 12:42:46 +01001469static void io_queue_async_work(struct io_kiocb *req, bool *locked)
Jens Axboe561fb042019-10-24 07:25:42 -06001470{
Jackie Liua197f662019-11-08 08:09:12 -07001471 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkovcbdcb432020-06-29 19:18:43 +03001472 struct io_kiocb *link = io_prep_linked_timeout(req);
Jens Axboe5aa75ed2021-02-16 12:56:50 -07001473 struct io_uring_task *tctx = req->task->io_uring;
Jens Axboe561fb042019-10-24 07:25:42 -06001474
Pavel Begunkovf237c302021-08-18 12:42:46 +01001475 /* must not take the lock, NULL it as a precaution */
1476 locked = NULL;
1477
Jens Axboe3bfe6102021-02-16 14:15:30 -07001478 BUG_ON(!tctx);
1479 BUG_ON(!tctx->io_wq);
Jens Axboe561fb042019-10-24 07:25:42 -06001480
Pavel Begunkovcbdcb432020-06-29 19:18:43 +03001481 /* init ->work of the whole link before punting */
1482 io_prep_async_link(req);
Jens Axboe991468d2021-07-23 11:53:54 -06001483
1484 /*
1485 * Not expected to happen, but if we do have a bug where this _can_
1486 * happen, catch it here and ensure the request is marked as
1487 * canceled. That will make io-wq go through the usual work cancel
1488 * procedure rather than attempt to run this request (or create a new
1489 * worker for it).
1490 */
1491 if (WARN_ON_ONCE(!same_thread_group(req->task, current)))
1492 req->work.flags |= IO_WQ_WORK_CANCEL;
1493
Pavel Begunkovd07f1e8a2021-03-22 01:45:58 +00001494 trace_io_uring_queue_async_work(ctx, io_wq_is_hashed(&req->work), req,
1495 &req->work, req->flags);
Pavel Begunkovebf93662021-03-01 18:20:47 +00001496 io_wq_enqueue(tctx->io_wq, &req->work);
Jens Axboe7271ef32020-08-10 09:55:22 -06001497 if (link)
1498 io_queue_linked_timeout(link);
Pavel Begunkovcbdcb432020-06-29 19:18:43 +03001499}
1500
Pavel Begunkov1ee41602021-03-25 18:32:42 +00001501static void io_kill_timeout(struct io_kiocb *req, int status)
Pavel Begunkov8c855882021-04-13 02:58:41 +01001502 __must_hold(&req->ctx->completion_lock)
Jens Axboe89850fc2021-08-10 15:11:51 -06001503 __must_hold(&req->ctx->timeout_lock)
Jens Axboe5262f562019-09-17 12:26:57 -06001504{
Jens Axboee8c2bc12020-08-15 18:44:09 -07001505 struct io_timeout_data *io = req->async_data;
Jens Axboe5262f562019-09-17 12:26:57 -06001506
Pavel Begunkovfd9c7bc2021-04-13 02:58:42 +01001507 if (hrtimer_try_to_cancel(&io->timer) != -1) {
Pavel Begunkov2ae2eb92021-09-09 13:56:27 +01001508 if (status)
1509 req_set_fail(req);
Pavel Begunkov01cec8c2020-07-30 18:43:50 +03001510 atomic_set(&req->ctx->cq_timeouts,
1511 atomic_read(&req->ctx->cq_timeouts) + 1);
Pavel Begunkov135fcde2020-07-13 23:37:12 +03001512 list_del_init(&req->timeout.list);
Pavel Begunkovb850d6d2022-08-29 14:30:13 +01001513 io_fill_cqe_req(req, status, 0);
Pavel Begunkov91c2f692021-08-11 19:28:28 +01001514 io_put_req_deferred(req);
Jens Axboe5262f562019-09-17 12:26:57 -06001515 }
1516}
1517
Pavel Begunkov441b8a72021-06-14 23:37:31 +01001518static void io_queue_deferred(struct io_ring_ctx *ctx)
Pavel Begunkov04518942020-05-26 20:34:05 +03001519{
Pavel Begunkov441b8a72021-06-14 23:37:31 +01001520 while (!list_empty(&ctx->defer_list)) {
Pavel Begunkov27dc8332020-07-13 23:37:14 +03001521 struct io_defer_entry *de = list_first_entry(&ctx->defer_list,
1522 struct io_defer_entry, list);
Pavel Begunkov04518942020-05-26 20:34:05 +03001523
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03001524 if (req_need_defer(de->req, de->seq))
Pavel Begunkov04518942020-05-26 20:34:05 +03001525 break;
Pavel Begunkov27dc8332020-07-13 23:37:14 +03001526 list_del_init(&de->list);
Pavel Begunkov907d1df2021-01-26 23:35:10 +00001527 io_req_task_queue(de->req);
Pavel Begunkov27dc8332020-07-13 23:37:14 +03001528 kfree(de);
Pavel Begunkov441b8a72021-06-14 23:37:31 +01001529 }
Pavel Begunkov04518942020-05-26 20:34:05 +03001530}
1531
Pavel Begunkov360428f2020-05-30 14:54:17 +03001532static void io_flush_timeouts(struct io_ring_ctx *ctx)
Jens Axboe89850fc2021-08-10 15:11:51 -06001533 __must_hold(&ctx->completion_lock)
Pavel Begunkov360428f2020-05-30 14:54:17 +03001534{
Pavel Begunkov441b8a72021-06-14 23:37:31 +01001535 u32 seq = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
Jens Axboeba7261a2022-04-08 11:08:58 -06001536 struct io_kiocb *req, *tmp;
Marcelo Diop-Gonzalezf0105052021-01-15 11:54:40 -05001537
Jens Axboe79ebeae2021-08-10 15:18:27 -06001538 spin_lock_irq(&ctx->timeout_lock);
Jens Axboeba7261a2022-04-08 11:08:58 -06001539 list_for_each_entry_safe(req, tmp, &ctx->timeout_list, timeout.list) {
Marcelo Diop-Gonzalezf0105052021-01-15 11:54:40 -05001540 u32 events_needed, events_got;
Pavel Begunkov360428f2020-05-30 14:54:17 +03001541
Pavel Begunkov8eb7e2d2020-06-29 13:13:02 +03001542 if (io_is_timeout_noseq(req))
Pavel Begunkov360428f2020-05-30 14:54:17 +03001543 break;
Marcelo Diop-Gonzalezf0105052021-01-15 11:54:40 -05001544
1545 /*
1546 * Since seq can easily wrap around over time, subtract
1547 * the last seq at which timeouts were flushed before comparing.
1548 * Assuming not more than 2^31-1 events have happened since,
1549 * these subtractions won't have wrapped, so we can check if
1550 * target is in [last_seq, current_seq] by comparing the two.
1551 */
1552 events_needed = req->timeout.target_seq - ctx->cq_last_tm_flush;
1553 events_got = seq - ctx->cq_last_tm_flush;
1554 if (events_got < events_needed)
Pavel Begunkov360428f2020-05-30 14:54:17 +03001555 break;
Pavel Begunkovbfe68a22020-05-30 14:54:18 +03001556
Pavel Begunkov1ee41602021-03-25 18:32:42 +00001557 io_kill_timeout(req, 0);
Pavel Begunkovf18ee4c2021-06-14 23:37:25 +01001558 }
Marcelo Diop-Gonzalezf0105052021-01-15 11:54:40 -05001559 ctx->cq_last_tm_flush = seq;
Jens Axboe79ebeae2021-08-10 15:18:27 -06001560 spin_unlock_irq(&ctx->timeout_lock);
Pavel Begunkov360428f2020-05-30 14:54:17 +03001561}
1562
Pavel Begunkov2335f6f2021-06-15 16:47:58 +01001563static void __io_commit_cqring_flush(struct io_ring_ctx *ctx)
Jens Axboede0617e2019-04-06 21:51:27 -06001564{
Pavel Begunkov2335f6f2021-06-15 16:47:58 +01001565 if (ctx->off_timeout_used)
1566 io_flush_timeouts(ctx);
1567 if (ctx->drain_active)
1568 io_queue_deferred(ctx);
1569}
1570
1571static inline void io_commit_cqring(struct io_ring_ctx *ctx)
1572{
1573 if (unlikely(ctx->off_timeout_used || ctx->drain_active))
1574 __io_commit_cqring_flush(ctx);
Pavel Begunkovec30e042021-01-19 13:32:38 +00001575 /* order cqe stores with ring update */
1576 smp_store_release(&ctx->rings->cq.tail, ctx->cached_cq_tail);
Jens Axboede0617e2019-04-06 21:51:27 -06001577}
1578
Jens Axboe90554202020-09-03 12:12:41 -06001579static inline bool io_sqring_full(struct io_ring_ctx *ctx)
1580{
1581 struct io_rings *r = ctx->rings;
1582
Pavel Begunkova566c552021-05-16 22:58:08 +01001583 return READ_ONCE(r->sq.tail) - ctx->cached_sq_head == ctx->sq_entries;
Jens Axboe90554202020-09-03 12:12:41 -06001584}
1585
Pavel Begunkov888aae22021-01-19 13:32:39 +00001586static inline unsigned int __io_cqring_events(struct io_ring_ctx *ctx)
1587{
1588 return ctx->cached_cq_tail - READ_ONCE(ctx->rings->cq.head);
1589}
1590
Pavel Begunkovd068b502021-05-16 22:58:11 +01001591static inline struct io_uring_cqe *io_get_cqe(struct io_ring_ctx *ctx)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001592{
Hristo Venev75b28af2019-08-26 17:23:46 +00001593 struct io_rings *rings = ctx->rings;
Pavel Begunkovea5ab3b2021-05-16 22:58:09 +01001594 unsigned tail, mask = ctx->cq_entries - 1;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001595
Stefan Bühler115e12e2019-04-24 23:54:18 +02001596 /*
1597 * writes to the cq entry need to come after reading head; the
1598 * control dependency is enough as we're using WRITE_ONCE to
1599 * fill the cq entry
1600 */
Pavel Begunkova566c552021-05-16 22:58:08 +01001601 if (__io_cqring_events(ctx) == ctx->cq_entries)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001602 return NULL;
1603
Pavel Begunkov888aae22021-01-19 13:32:39 +00001604 tail = ctx->cached_cq_tail++;
Pavel Begunkovea5ab3b2021-05-16 22:58:09 +01001605 return &rings->cqes[tail & mask];
Jens Axboe2b188cc2019-01-07 10:46:33 -07001606}
1607
Jens Axboef2842ab2020-01-08 11:04:00 -07001608static inline bool io_should_trigger_evfd(struct io_ring_ctx *ctx)
1609{
Pavel Begunkov44c769d2021-04-11 01:46:31 +01001610 if (likely(!ctx->cq_ev_fd))
Jens Axboef0b493e2020-02-01 21:30:11 -07001611 return false;
Stefano Garzarella7e55a192020-05-15 18:38:05 +02001612 if (READ_ONCE(ctx->rings->cq_flags) & IORING_CQ_EVENTFD_DISABLED)
1613 return false;
Pavel Begunkov44c769d2021-04-11 01:46:31 +01001614 return !ctx->eventfd_async || io_wq_current_is_worker();
Jens Axboef2842ab2020-01-08 11:04:00 -07001615}
1616
Jens Axboe2c5d7632021-08-21 07:21:19 -06001617/*
1618 * This should only get called when at least one event has been posted.
1619 * Some applications rely on the eventfd notification count only changing
1620 * IFF a new CQE has been added to the CQ ring. There's no depedency on
1621 * 1:1 relationship between how many times this function is called (and
1622 * hence the eventfd count) and number of CQEs posted to the CQ ring.
1623 */
Jens Axboeb41e9852020-02-17 09:52:41 -07001624static void io_cqring_ev_posted(struct io_ring_ctx *ctx)
Jens Axboe8c838782019-03-12 15:48:16 -06001625{
Jens Axboe5fd46172021-08-06 14:04:31 -06001626 /*
1627 * wake_up_all() may seem excessive, but io_wake_function() and
1628 * io_should_wake() handle the termination of the loop and only
1629 * wake as many waiters as we need to.
1630 */
1631 if (wq_has_sleeper(&ctx->cq_wait))
1632 wake_up_all(&ctx->cq_wait);
Jens Axboe534ca6d2020-09-02 13:52:19 -06001633 if (ctx->sq_data && waitqueue_active(&ctx->sq_data->wait))
1634 wake_up(&ctx->sq_data->wait);
Jens Axboeb41e9852020-02-17 09:52:41 -07001635 if (io_should_trigger_evfd(ctx))
Jens Axboe9b402842019-04-11 11:45:41 -06001636 eventfd_signal(ctx->cq_ev_fd, 1);
Pavel Begunkov3f008382021-10-01 10:39:33 +01001637 if (waitqueue_active(&ctx->poll_wait))
Pavel Begunkov311997b2021-06-14 23:37:28 +01001638 wake_up_interruptible(&ctx->poll_wait);
Jens Axboe8c838782019-03-12 15:48:16 -06001639}
1640
Pavel Begunkov80c18e42021-01-07 03:15:41 +00001641static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx)
1642{
Pavel Begunkovc57a91fb2021-09-08 20:49:17 +01001643 /* see waitqueue_active() comment */
1644 smp_mb();
1645
Pavel Begunkov80c18e42021-01-07 03:15:41 +00001646 if (ctx->flags & IORING_SETUP_SQPOLL) {
Pavel Begunkovc57a91fb2021-09-08 20:49:17 +01001647 if (waitqueue_active(&ctx->cq_wait))
Jens Axboe5fd46172021-08-06 14:04:31 -06001648 wake_up_all(&ctx->cq_wait);
Pavel Begunkov80c18e42021-01-07 03:15:41 +00001649 }
1650 if (io_should_trigger_evfd(ctx))
1651 eventfd_signal(ctx->cq_ev_fd, 1);
Pavel Begunkov3f008382021-10-01 10:39:33 +01001652 if (waitqueue_active(&ctx->poll_wait))
Pavel Begunkov311997b2021-06-14 23:37:28 +01001653 wake_up_interruptible(&ctx->poll_wait);
Pavel Begunkov80c18e42021-01-07 03:15:41 +00001654}
1655
Jens Axboec4a2ed72019-11-21 21:01:26 -07001656/* Returns true if there are no backlogged entries after the flush */
Pavel Begunkov6c2450a2021-02-23 12:40:22 +00001657static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001658{
Jens Axboeb18032b2021-01-24 16:58:56 -07001659 bool all_flushed, posted;
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001660
Pavel Begunkova566c552021-05-16 22:58:08 +01001661 if (!force && __io_cqring_events(ctx) == ctx->cq_entries)
Pavel Begunkove23de152020-12-17 00:24:37 +00001662 return false;
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001663
Jens Axboeb18032b2021-01-24 16:58:56 -07001664 posted = false;
Jens Axboe79ebeae2021-08-10 15:18:27 -06001665 spin_lock(&ctx->completion_lock);
Pavel Begunkov6c2450a2021-02-23 12:40:22 +00001666 while (!list_empty(&ctx->cq_overflow_list)) {
Pavel Begunkovd068b502021-05-16 22:58:11 +01001667 struct io_uring_cqe *cqe = io_get_cqe(ctx);
Pavel Begunkov6c2450a2021-02-23 12:40:22 +00001668 struct io_overflow_cqe *ocqe;
Jens Axboee6c8aa92020-09-28 13:10:13 -06001669
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001670 if (!cqe && !force)
1671 break;
Pavel Begunkov6c2450a2021-02-23 12:40:22 +00001672 ocqe = list_first_entry(&ctx->cq_overflow_list,
1673 struct io_overflow_cqe, list);
1674 if (cqe)
1675 memcpy(cqe, &ocqe->cqe, sizeof(*cqe));
1676 else
Pavel Begunkov8f6ed492021-05-16 22:58:10 +01001677 io_account_cq_overflow(ctx);
1678
Jens Axboeb18032b2021-01-24 16:58:56 -07001679 posted = true;
Pavel Begunkov6c2450a2021-02-23 12:40:22 +00001680 list_del(&ocqe->list);
1681 kfree(ocqe);
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001682 }
1683
Pavel Begunkov09e88402020-12-17 00:24:38 +00001684 all_flushed = list_empty(&ctx->cq_overflow_list);
1685 if (all_flushed) {
Pavel Begunkov5ed7a372021-06-14 23:37:27 +01001686 clear_bit(0, &ctx->check_cq_overflow);
Nadav Amit20c0b382021-08-07 17:13:42 -07001687 WRITE_ONCE(ctx->rings->sq_flags,
1688 ctx->rings->sq_flags & ~IORING_SQ_CQ_OVERFLOW);
Pavel Begunkov09e88402020-12-17 00:24:38 +00001689 }
Pavel Begunkov46930142020-07-30 18:43:49 +03001690
Jens Axboeb18032b2021-01-24 16:58:56 -07001691 if (posted)
1692 io_commit_cqring(ctx);
Jens Axboe79ebeae2021-08-10 15:18:27 -06001693 spin_unlock(&ctx->completion_lock);
Jens Axboeb18032b2021-01-24 16:58:56 -07001694 if (posted)
1695 io_cqring_ev_posted(ctx);
Pavel Begunkov09e88402020-12-17 00:24:38 +00001696 return all_flushed;
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001697}
1698
Pavel Begunkov90f67362021-08-09 20:18:12 +01001699static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx)
Pavel Begunkov6c503152021-01-04 20:36:36 +00001700{
Jens Axboeca0a2652021-03-04 17:15:48 -07001701 bool ret = true;
1702
Pavel Begunkov5ed7a372021-06-14 23:37:27 +01001703 if (test_bit(0, &ctx->check_cq_overflow)) {
Pavel Begunkov6c503152021-01-04 20:36:36 +00001704 /* iopoll syncs against uring_lock, not completion_lock */
1705 if (ctx->flags & IORING_SETUP_IOPOLL)
1706 mutex_lock(&ctx->uring_lock);
Pavel Begunkov90f67362021-08-09 20:18:12 +01001707 ret = __io_cqring_overflow_flush(ctx, false);
Pavel Begunkov6c503152021-01-04 20:36:36 +00001708 if (ctx->flags & IORING_SETUP_IOPOLL)
1709 mutex_unlock(&ctx->uring_lock);
1710 }
Jens Axboeca0a2652021-03-04 17:15:48 -07001711
1712 return ret;
Pavel Begunkov6c503152021-01-04 20:36:36 +00001713}
1714
Pavel Begunkov6a290a12021-08-09 13:04:13 +01001715/* must to be called somewhat shortly after putting a request */
1716static inline void io_put_task(struct task_struct *task, int nr)
1717{
1718 struct io_uring_task *tctx = task->io_uring;
1719
Pavel Begunkove98e49b2021-08-18 17:01:43 +01001720 if (likely(task == current)) {
1721 tctx->cached_refs += nr;
1722 } else {
1723 percpu_counter_sub(&tctx->inflight, nr);
1724 if (unlikely(atomic_read(&tctx->in_idle)))
1725 wake_up(&tctx->wait);
1726 put_task_struct_many(task, nr);
1727 }
Pavel Begunkov6a290a12021-08-09 13:04:13 +01001728}
1729
Pavel Begunkov9a108672021-08-27 11:55:01 +01001730static void io_task_refs_refill(struct io_uring_task *tctx)
1731{
1732 unsigned int refill = -tctx->cached_refs + IO_TCTX_REFS_CACHE_NR;
1733
1734 percpu_counter_add(&tctx->inflight, refill);
1735 refcount_add(refill, &current->usage);
1736 tctx->cached_refs += refill;
1737}
1738
1739static inline void io_get_task_refs(int nr)
1740{
1741 struct io_uring_task *tctx = current->io_uring;
1742
1743 tctx->cached_refs -= nr;
1744 if (unlikely(tctx->cached_refs < 0))
1745 io_task_refs_refill(tctx);
1746}
1747
Pavel Begunkovb168b1a2022-01-09 00:53:22 +00001748static __cold void io_uring_drop_tctx_refs(struct task_struct *task)
1749{
1750 struct io_uring_task *tctx = task->io_uring;
1751 unsigned int refs = tctx->cached_refs;
1752
1753 if (refs) {
1754 tctx->cached_refs = 0;
1755 percpu_counter_sub(&tctx->inflight, refs);
1756 put_task_struct_many(task, refs);
1757 }
1758}
1759
Pavel Begunkovd4d19c12021-04-25 14:32:17 +01001760static bool io_cqring_event_overflow(struct io_ring_ctx *ctx, u64 user_data,
Pavel Begunkov5c0ea4c2022-08-29 14:30:12 +01001761 s32 res, u32 cflags)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001762{
Pavel Begunkovcce4b8b2021-04-13 02:58:44 +01001763 struct io_overflow_cqe *ocqe;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001764
Pavel Begunkovcce4b8b2021-04-13 02:58:44 +01001765 ocqe = kmalloc(sizeof(*ocqe), GFP_ATOMIC | __GFP_ACCOUNT);
1766 if (!ocqe) {
1767 /*
1768 * If we're in ring overflow flush mode, or in task cancel mode,
1769 * or cannot allocate an overflow entry, then we need to drop it
1770 * on the floor.
1771 */
Pavel Begunkov8f6ed492021-05-16 22:58:10 +01001772 io_account_cq_overflow(ctx);
Pavel Begunkovcce4b8b2021-04-13 02:58:44 +01001773 return false;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001774 }
Pavel Begunkovcce4b8b2021-04-13 02:58:44 +01001775 if (list_empty(&ctx->cq_overflow_list)) {
Pavel Begunkov5ed7a372021-06-14 23:37:27 +01001776 set_bit(0, &ctx->check_cq_overflow);
Nadav Amit20c0b382021-08-07 17:13:42 -07001777 WRITE_ONCE(ctx->rings->sq_flags,
1778 ctx->rings->sq_flags | IORING_SQ_CQ_OVERFLOW);
1779
Pavel Begunkovcce4b8b2021-04-13 02:58:44 +01001780 }
Pavel Begunkovd4d19c12021-04-25 14:32:17 +01001781 ocqe->cqe.user_data = user_data;
Pavel Begunkovcce4b8b2021-04-13 02:58:44 +01001782 ocqe->cqe.res = res;
1783 ocqe->cqe.flags = cflags;
1784 list_add_tail(&ocqe->list, &ctx->cq_overflow_list);
1785 return true;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001786}
1787
Pavel Begunkovb850d6d2022-08-29 14:30:13 +01001788static inline bool __io_fill_cqe(struct io_ring_ctx *ctx, u64 user_data,
1789 s32 res, u32 cflags)
Pavel Begunkov8d133262021-04-11 01:46:33 +01001790{
Jens Axboe2b188cc2019-01-07 10:46:33 -07001791 struct io_uring_cqe *cqe;
1792
Pavel Begunkovd4d19c12021-04-25 14:32:17 +01001793 trace_io_uring_complete(ctx, user_data, res, cflags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001794
1795 /*
1796 * If we can't get a cq entry, userspace overflowed the
1797 * submission (by quite a lot). Increment the overflow count in
1798 * the ring.
1799 */
Pavel Begunkovd068b502021-05-16 22:58:11 +01001800 cqe = io_get_cqe(ctx);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001801 if (likely(cqe)) {
Pavel Begunkovd4d19c12021-04-25 14:32:17 +01001802 WRITE_ONCE(cqe->user_data, user_data);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001803 WRITE_ONCE(cqe->res, res);
1804 WRITE_ONCE(cqe->flags, cflags);
Pavel Begunkov8d133262021-04-11 01:46:33 +01001805 return true;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001806 }
Pavel Begunkovd4d19c12021-04-25 14:32:17 +01001807 return io_cqring_event_overflow(ctx, user_data, res, cflags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001808}
1809
Pavel Begunkovb850d6d2022-08-29 14:30:13 +01001810static noinline void io_fill_cqe_req(struct io_kiocb *req, s32 res, u32 cflags)
Jens Axboebcda7ba2020-02-23 16:42:51 -07001811{
Pavel Begunkovb850d6d2022-08-29 14:30:13 +01001812 __io_fill_cqe(req->ctx, req->user_data, res, cflags);
1813}
1814
1815static noinline bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data,
1816 s32 res, u32 cflags)
1817{
1818 ctx->cq_extra++;
1819 return __io_fill_cqe(ctx, user_data, res, cflags);
Jens Axboebcda7ba2020-02-23 16:42:51 -07001820}
1821
Pavel Begunkov5c0ea4c2022-08-29 14:30:12 +01001822static void io_req_complete_post(struct io_kiocb *req, s32 res,
1823 u32 cflags)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001824{
Jens Axboe78e19bb2019-11-06 15:21:34 -07001825 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001826
Jens Axboe79ebeae2021-08-10 15:18:27 -06001827 spin_lock(&ctx->completion_lock);
Pavel Begunkovb850d6d2022-08-29 14:30:13 +01001828 __io_fill_cqe(ctx, req->user_data, res, cflags);
Jens Axboec7dae4b2021-02-09 19:53:37 -07001829 /*
1830 * If we're the last reference to this request, add to our locked
1831 * free_list cache.
1832 */
Jens Axboede9b4cc2021-02-24 13:28:27 -07001833 if (req_ref_put_and_test(req)) {
Pavel Begunkov7a612352021-03-09 00:37:59 +00001834 if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) {
Pavel Begunkov0756a862021-08-15 10:40:25 +01001835 if (req->flags & IO_DISARM_MASK)
Pavel Begunkov7a612352021-03-09 00:37:59 +00001836 io_disarm_next(req);
1837 if (req->link) {
1838 io_req_task_queue(req->link);
1839 req->link = NULL;
1840 }
1841 }
Jens Axboec7dae4b2021-02-09 19:53:37 -07001842 io_dismantle_req(req);
1843 io_put_task(req->task, 1);
Pavel Begunkovbb943b82021-08-09 20:18:10 +01001844 list_add(&req->inflight_entry, &ctx->locked_free_list);
Pavel Begunkovd0acdee2021-05-16 22:58:12 +01001845 ctx->locked_free_nr++;
Pavel Begunkov180f8292021-03-14 20:57:09 +00001846 } else {
1847 if (!percpu_ref_tryget(&ctx->refs))
1848 req = NULL;
1849 }
Pavel Begunkov7a612352021-03-09 00:37:59 +00001850 io_commit_cqring(ctx);
Jens Axboe79ebeae2021-08-10 15:18:27 -06001851 spin_unlock(&ctx->completion_lock);
Pavel Begunkov7a612352021-03-09 00:37:59 +00001852
Pavel Begunkov180f8292021-03-14 20:57:09 +00001853 if (req) {
1854 io_cqring_ev_posted(ctx);
Jens Axboec7dae4b2021-02-09 19:53:37 -07001855 percpu_ref_put(&ctx->refs);
Pavel Begunkov180f8292021-03-14 20:57:09 +00001856 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07001857}
1858
Jens Axboe4e3d9ff2021-04-15 17:44:34 -06001859static inline bool io_req_needs_clean(struct io_kiocb *req)
1860{
Pavel Begunkovc8543572021-06-17 18:14:04 +01001861 return req->flags & IO_REQ_CLEAN_FLAGS;
Jens Axboe4e3d9ff2021-04-15 17:44:34 -06001862}
1863
Pavel Begunkov5c0ea4c2022-08-29 14:30:12 +01001864static inline void io_req_complete_state(struct io_kiocb *req, s32 res,
1865 u32 cflags)
Jens Axboebcda7ba2020-02-23 16:42:51 -07001866{
Jens Axboe4e3d9ff2021-04-15 17:44:34 -06001867 if (io_req_needs_clean(req))
Pavel Begunkov68fb8972021-03-19 17:22:41 +00001868 io_clean_op(req);
Pavel Begunkova38d68d2021-01-19 13:32:45 +00001869 req->result = res;
1870 req->compl.cflags = cflags;
Pavel Begunkove342c802021-01-19 13:32:47 +00001871 req->flags |= REQ_F_COMPLETE_INLINE;
Jens Axboee1e16092020-06-22 09:17:17 -06001872}
Jens Axboe2b188cc2019-01-07 10:46:33 -07001873
Pavel Begunkov889fca72021-02-10 00:03:09 +00001874static inline void __io_req_complete(struct io_kiocb *req, unsigned issue_flags,
Pavel Begunkov5c0ea4c2022-08-29 14:30:12 +01001875 s32 res, u32 cflags)
Pavel Begunkova38d68d2021-01-19 13:32:45 +00001876{
Pavel Begunkov889fca72021-02-10 00:03:09 +00001877 if (issue_flags & IO_URING_F_COMPLETE_DEFER)
1878 io_req_complete_state(req, res, cflags);
Pavel Begunkova38d68d2021-01-19 13:32:45 +00001879 else
Jens Axboec7dae4b2021-02-09 19:53:37 -07001880 io_req_complete_post(req, res, cflags);
Pavel Begunkova38d68d2021-01-19 13:32:45 +00001881}
Jens Axboebcda7ba2020-02-23 16:42:51 -07001882
Pavel Begunkov5c0ea4c2022-08-29 14:30:12 +01001883static inline void io_req_complete(struct io_kiocb *req, s32 res)
Jens Axboee1e16092020-06-22 09:17:17 -06001884{
Pavel Begunkov889fca72021-02-10 00:03:09 +00001885 __io_req_complete(req, 0, res, 0);
Jens Axboebcda7ba2020-02-23 16:42:51 -07001886}
1887
Pavel Begunkov5c0ea4c2022-08-29 14:30:12 +01001888static void io_req_complete_failed(struct io_kiocb *req, s32 res)
Pavel Begunkovf41db2732021-02-28 22:35:12 +00001889{
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01001890 req_set_fail(req);
Pavel Begunkovf41db2732021-02-28 22:35:12 +00001891 io_req_complete_post(req, res, 0);
1892}
1893
Pavel Begunkovc6d3d9c2021-08-31 14:13:10 +01001894static void io_req_complete_fail_submit(struct io_kiocb *req)
1895{
1896 /*
1897 * We don't submit, fail them all, for that replace hardlinks with
1898 * normal links. Extra REQ_F_LINK is tolerated.
1899 */
1900 req->flags &= ~REQ_F_HARDLINK;
1901 req->flags |= REQ_F_LINK;
1902 io_req_complete_failed(req, req->result);
1903}
1904
Pavel Begunkov864ea922021-08-09 13:04:08 +01001905/*
1906 * Don't initialise the fields below on every allocation, but do that in
1907 * advance and keep them valid across allocations.
1908 */
1909static void io_preinit_req(struct io_kiocb *req, struct io_ring_ctx *ctx)
1910{
1911 req->ctx = ctx;
1912 req->link = NULL;
1913 req->async_data = NULL;
1914 /* not necessary, but safer to zero */
1915 req->result = 0;
1916}
1917
Pavel Begunkovdac7a092021-03-19 17:22:39 +00001918static void io_flush_cached_locked_reqs(struct io_ring_ctx *ctx,
Pavel Begunkovcd0ca2e2021-08-09 20:18:11 +01001919 struct io_submit_state *state)
Pavel Begunkovdac7a092021-03-19 17:22:39 +00001920{
Jens Axboe79ebeae2021-08-10 15:18:27 -06001921 spin_lock(&ctx->completion_lock);
Pavel Begunkovcd0ca2e2021-08-09 20:18:11 +01001922 list_splice_init(&ctx->locked_free_list, &state->free_list);
Pavel Begunkovd0acdee2021-05-16 22:58:12 +01001923 ctx->locked_free_nr = 0;
Jens Axboe79ebeae2021-08-10 15:18:27 -06001924 spin_unlock(&ctx->completion_lock);
Pavel Begunkovdac7a092021-03-19 17:22:39 +00001925}
1926
Pavel Begunkovdd78f492021-03-19 17:22:35 +00001927/* Returns true IFF there are requests in the cache */
Jens Axboec7dae4b2021-02-09 19:53:37 -07001928static bool io_flush_cached_reqs(struct io_ring_ctx *ctx)
Pavel Begunkove5d1bc02021-02-10 00:03:23 +00001929{
Jens Axboec7dae4b2021-02-09 19:53:37 -07001930 struct io_submit_state *state = &ctx->submit_state;
Pavel Begunkovdd78f492021-03-19 17:22:35 +00001931 int nr;
Pavel Begunkove5d1bc02021-02-10 00:03:23 +00001932
Jens Axboec7dae4b2021-02-09 19:53:37 -07001933 /*
1934 * If we have more than a batch's worth of requests in our IRQ side
1935 * locked cache, grab the lock and move them over to our submission
1936 * side cache.
1937 */
Pavel Begunkovd0acdee2021-05-16 22:58:12 +01001938 if (READ_ONCE(ctx->locked_free_nr) > IO_COMPL_BATCH)
Pavel Begunkovcd0ca2e2021-08-09 20:18:11 +01001939 io_flush_cached_locked_reqs(ctx, state);
Jens Axboec7dae4b2021-02-09 19:53:37 -07001940
Pavel Begunkovdd78f492021-03-19 17:22:35 +00001941 nr = state->free_reqs;
Pavel Begunkovcd0ca2e2021-08-09 20:18:11 +01001942 while (!list_empty(&state->free_list)) {
1943 struct io_kiocb *req = list_first_entry(&state->free_list,
Pavel Begunkovbb943b82021-08-09 20:18:10 +01001944 struct io_kiocb, inflight_entry);
Pavel Begunkovdd78f492021-03-19 17:22:35 +00001945
Pavel Begunkovbb943b82021-08-09 20:18:10 +01001946 list_del(&req->inflight_entry);
Pavel Begunkovdd78f492021-03-19 17:22:35 +00001947 state->reqs[nr++] = req;
1948 if (nr == ARRAY_SIZE(state->reqs))
Pavel Begunkove5d1bc02021-02-10 00:03:23 +00001949 break;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001950 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07001951
Pavel Begunkovdd78f492021-03-19 17:22:35 +00001952 state->free_reqs = nr;
1953 return nr != 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001954}
1955
Pavel Begunkov5d5901a2021-08-11 19:28:29 +01001956/*
1957 * A request might get retired back into the request caches even before opcode
1958 * handlers and io_issue_sqe() are done with it, e.g. inline completion path.
1959 * Because of that, io_alloc_req() should be called only under ->uring_lock
1960 * and with extra caution to not get a request that is still worked on.
1961 */
Pavel Begunkov258b29a2021-02-10 00:03:10 +00001962static struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx)
Pavel Begunkov5d5901a2021-08-11 19:28:29 +01001963 __must_hold(&ctx->uring_lock)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001964{
Pavel Begunkov258b29a2021-02-10 00:03:10 +00001965 struct io_submit_state *state = &ctx->submit_state;
Pavel Begunkov864ea922021-08-09 13:04:08 +01001966 gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
1967 int ret, i;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001968
Pavel Begunkovfe7e3252021-06-24 15:09:57 +01001969 BUILD_BUG_ON(ARRAY_SIZE(state->reqs) < IO_REQ_ALLOC_BATCH);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001970
Pavel Begunkov864ea922021-08-09 13:04:08 +01001971 if (likely(state->free_reqs || io_flush_cached_reqs(ctx)))
1972 goto got_req;
Jens Axboe2579f912019-01-09 09:10:43 -07001973
Pavel Begunkov864ea922021-08-09 13:04:08 +01001974 ret = kmem_cache_alloc_bulk(req_cachep, gfp, IO_REQ_ALLOC_BATCH,
1975 state->reqs);
Pavel Begunkove5d1bc02021-02-10 00:03:23 +00001976
Pavel Begunkov864ea922021-08-09 13:04:08 +01001977 /*
1978 * Bulk alloc is all-or-nothing. If we fail to get a batch,
1979 * retry single alloc to be on the safe side.
1980 */
1981 if (unlikely(ret <= 0)) {
1982 state->reqs[0] = kmem_cache_alloc(req_cachep, gfp);
1983 if (!state->reqs[0])
1984 return NULL;
1985 ret = 1;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001986 }
Pavel Begunkov864ea922021-08-09 13:04:08 +01001987
1988 for (i = 0; i < ret; i++)
1989 io_preinit_req(state->reqs[i], ctx);
1990 state->free_reqs = ret;
Pavel Begunkove5d1bc02021-02-10 00:03:23 +00001991got_req:
Pavel Begunkov291b2822020-09-30 22:57:01 +03001992 state->free_reqs--;
1993 return state->reqs[state->free_reqs];
Jens Axboe2b188cc2019-01-07 10:46:33 -07001994}
1995
Pavel Begunkove1d767f2021-03-19 17:22:43 +00001996static inline void io_put_file(struct file *file)
Pavel Begunkov8da11c12020-02-24 11:32:44 +03001997{
Pavel Begunkove1d767f2021-03-19 17:22:43 +00001998 if (file)
Pavel Begunkov8da11c12020-02-24 11:32:44 +03001999 fput(file);
2000}
2001
Pavel Begunkov4edf20f2020-10-13 09:43:59 +01002002static void io_dismantle_req(struct io_kiocb *req)
Jens Axboe2b188cc2019-01-07 10:46:33 -07002003{
Pavel Begunkov094bae42021-03-19 17:22:42 +00002004 unsigned int flags = req->flags;
Pavel Begunkov929a3af2020-02-19 00:19:09 +03002005
Pavel Begunkov3a0a6902021-04-20 12:03:31 +01002006 if (io_req_needs_clean(req))
2007 io_clean_op(req);
Pavel Begunkove1d767f2021-03-19 17:22:43 +00002008 if (!(flags & REQ_F_FIXED_FILE))
2009 io_put_file(req->file);
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00002010 if (req->fixed_rsrc_refs)
2011 percpu_ref_put(req->fixed_rsrc_refs);
Pavel Begunkov99ebe4e2021-06-26 21:40:49 +01002012 if (req->async_data) {
Pavel Begunkov094bae42021-03-19 17:22:42 +00002013 kfree(req->async_data);
Pavel Begunkov99ebe4e2021-06-26 21:40:49 +01002014 req->async_data = NULL;
2015 }
Pavel Begunkove6543a82020-06-28 12:52:30 +03002016}
Pavel Begunkov2b85edf2019-12-28 14:13:03 +03002017
Pavel Begunkov216578e2020-10-13 09:44:00 +01002018static void __io_free_req(struct io_kiocb *req)
Pavel Begunkove6543a82020-06-28 12:52:30 +03002019{
Jens Axboe51a4cc12020-08-10 10:55:56 -06002020 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkovecfc5172020-06-29 13:13:03 +03002021
Pavel Begunkov216578e2020-10-13 09:44:00 +01002022 io_dismantle_req(req);
Pavel Begunkov7c660732021-01-25 11:42:21 +00002023 io_put_task(req->task, 1);
Pavel Begunkove6543a82020-06-28 12:52:30 +03002024
Jens Axboe79ebeae2021-08-10 15:18:27 -06002025 spin_lock(&ctx->completion_lock);
Pavel Begunkovbb943b82021-08-09 20:18:10 +01002026 list_add(&req->inflight_entry, &ctx->locked_free_list);
Pavel Begunkovc34b0252021-08-09 20:18:08 +01002027 ctx->locked_free_nr++;
Jens Axboe79ebeae2021-08-10 15:18:27 -06002028 spin_unlock(&ctx->completion_lock);
Pavel Begunkovc34b0252021-08-09 20:18:08 +01002029
Pavel Begunkovecfc5172020-06-29 13:13:03 +03002030 percpu_ref_put(&ctx->refs);
Jens Axboee65ef562019-03-12 10:16:44 -06002031}
2032
Pavel Begunkovf2f87372020-10-27 23:25:37 +00002033static inline void io_remove_next_linked(struct io_kiocb *req)
2034{
2035 struct io_kiocb *nxt = req->link;
2036
2037 req->link = nxt->link;
2038 nxt->link = NULL;
2039}
2040
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00002041static bool io_kill_linked_timeout(struct io_kiocb *req)
2042 __must_hold(&req->ctx->completion_lock)
Jens Axboe89b263f2021-08-10 15:14:18 -06002043 __must_hold(&req->ctx->timeout_lock)
Jens Axboe9e645e112019-05-10 16:07:28 -06002044{
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00002045 struct io_kiocb *link = req->link;
Pavel Begunkovf2f87372020-10-27 23:25:37 +00002046
Pavel Begunkovb97e7362021-08-15 10:40:23 +01002047 if (link && link->opcode == IORING_OP_LINK_TIMEOUT) {
Pavel Begunkovc9abd7a2020-10-22 16:43:11 +01002048 struct io_timeout_data *io = link->async_data;
Pavel Begunkov7c86ffe2020-06-29 13:12:59 +03002049
Pavel Begunkovf2f87372020-10-27 23:25:37 +00002050 io_remove_next_linked(req);
Pavel Begunkov90cd7e42020-10-27 23:25:36 +00002051 link->timeout.head = NULL;
Pavel Begunkovfd9c7bc2021-04-13 02:58:42 +01002052 if (hrtimer_try_to_cancel(&io->timer) != -1) {
Pavel Begunkovef9dd632021-08-28 19:54:38 -06002053 list_del(&link->timeout.list);
Pavel Begunkovb850d6d2022-08-29 14:30:13 +01002054 io_fill_cqe_req(link, -ECANCELED, 0);
Pavel Begunkov91c2f692021-08-11 19:28:28 +01002055 io_put_req_deferred(link);
Pavel Begunkovd4729fb2021-03-22 01:58:24 +00002056 return true;
Pavel Begunkovc9abd7a2020-10-22 16:43:11 +01002057 }
2058 }
Pavel Begunkovd4729fb2021-03-22 01:58:24 +00002059 return false;
Pavel Begunkov7c86ffe2020-06-29 13:12:59 +03002060}
2061
Pavel Begunkovd148ca42020-10-18 10:17:39 +01002062static void io_fail_links(struct io_kiocb *req)
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00002063 __must_hold(&req->ctx->completion_lock)
Jens Axboe9e645e112019-05-10 16:07:28 -06002064{
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00002065 struct io_kiocb *nxt, *link = req->link;
Jens Axboe9e645e112019-05-10 16:07:28 -06002066
Pavel Begunkovf2f87372020-10-27 23:25:37 +00002067 req->link = NULL;
Pavel Begunkovf2f87372020-10-27 23:25:37 +00002068 while (link) {
Hao Xua8295b92021-08-27 17:46:09 +08002069 long res = -ECANCELED;
2070
2071 if (link->flags & REQ_F_FAIL)
2072 res = link->result;
2073
Pavel Begunkovf2f87372020-10-27 23:25:37 +00002074 nxt = link->link;
2075 link->link = NULL;
2076
Dmitrii Dolgovc826bd72019-10-15 19:02:01 +02002077 trace_io_uring_fail_link(req, link);
Pavel Begunkovb850d6d2022-08-29 14:30:13 +01002078 io_fill_cqe_req(link, res, 0);
Pavel Begunkov91c2f692021-08-11 19:28:28 +01002079 io_put_req_deferred(link);
Pavel Begunkovf2f87372020-10-27 23:25:37 +00002080 link = nxt;
Jens Axboe9e645e112019-05-10 16:07:28 -06002081 }
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00002082}
Pavel Begunkov7c86ffe2020-06-29 13:12:59 +03002083
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00002084static bool io_disarm_next(struct io_kiocb *req)
2085 __must_hold(&req->ctx->completion_lock)
2086{
2087 bool posted = false;
2088
Pavel Begunkov0756a862021-08-15 10:40:25 +01002089 if (req->flags & REQ_F_ARM_LTIMEOUT) {
2090 struct io_kiocb *link = req->link;
2091
Pavel Begunkov906c6ca2021-08-15 10:40:26 +01002092 req->flags &= ~REQ_F_ARM_LTIMEOUT;
Pavel Begunkov0756a862021-08-15 10:40:25 +01002093 if (link && link->opcode == IORING_OP_LINK_TIMEOUT) {
2094 io_remove_next_linked(req);
Pavel Begunkovb850d6d2022-08-29 14:30:13 +01002095 io_fill_cqe_req(link, -ECANCELED, 0);
Pavel Begunkov0756a862021-08-15 10:40:25 +01002096 io_put_req_deferred(link);
2097 posted = true;
2098 }
2099 } else if (req->flags & REQ_F_LINK_TIMEOUT) {
Jens Axboe89b263f2021-08-10 15:14:18 -06002100 struct io_ring_ctx *ctx = req->ctx;
2101
2102 spin_lock_irq(&ctx->timeout_lock);
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00002103 posted = io_kill_linked_timeout(req);
Jens Axboe89b263f2021-08-10 15:14:18 -06002104 spin_unlock_irq(&ctx->timeout_lock);
2105 }
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01002106 if (unlikely((req->flags & REQ_F_FAIL) &&
Pavel Begunkove4335ed2021-04-11 01:46:39 +01002107 !(req->flags & REQ_F_HARDLINK))) {
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00002108 posted |= (req->link != NULL);
2109 io_fail_links(req);
2110 }
2111 return posted;
Jens Axboe9e645e112019-05-10 16:07:28 -06002112}
2113
Pavel Begunkov3fa5e0f2020-06-30 15:20:43 +03002114static struct io_kiocb *__io_req_find_next(struct io_kiocb *req)
Jens Axboe9e645e112019-05-10 16:07:28 -06002115{
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00002116 struct io_kiocb *nxt;
Jens Axboe2665abf2019-11-05 12:40:47 -07002117
Jens Axboe9e645e112019-05-10 16:07:28 -06002118 /*
2119 * If LINK is set, we have dependent requests in this chain. If we
2120 * didn't fail this request, queue the first one up, moving any other
2121 * dependencies to the next request. In case of failure, fail the rest
2122 * of the chain.
2123 */
Pavel Begunkov0756a862021-08-15 10:40:25 +01002124 if (req->flags & IO_DISARM_MASK) {
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00002125 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00002126 bool posted;
Pavel Begunkovf2f87372020-10-27 23:25:37 +00002127
Jens Axboe79ebeae2021-08-10 15:18:27 -06002128 spin_lock(&ctx->completion_lock);
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00002129 posted = io_disarm_next(req);
2130 if (posted)
2131 io_commit_cqring(req->ctx);
Jens Axboe79ebeae2021-08-10 15:18:27 -06002132 spin_unlock(&ctx->completion_lock);
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00002133 if (posted)
2134 io_cqring_ev_posted(ctx);
Pavel Begunkovf2f87372020-10-27 23:25:37 +00002135 }
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00002136 nxt = req->link;
2137 req->link = NULL;
2138 return nxt;
Jens Axboe4d7dd462019-11-20 13:03:52 -07002139}
Jens Axboe2665abf2019-11-05 12:40:47 -07002140
Pavel Begunkovf2f87372020-10-27 23:25:37 +00002141static inline struct io_kiocb *io_req_find_next(struct io_kiocb *req)
Pavel Begunkov3fa5e0f2020-06-30 15:20:43 +03002142{
Pavel Begunkovcdbff982021-02-12 18:41:16 +00002143 if (likely(!(req->flags & (REQ_F_LINK|REQ_F_HARDLINK))))
Pavel Begunkov3fa5e0f2020-06-30 15:20:43 +03002144 return NULL;
2145 return __io_req_find_next(req);
2146}
2147
Pavel Begunkovf237c302021-08-18 12:42:46 +01002148static void ctx_flush_and_put(struct io_ring_ctx *ctx, bool *locked)
Pavel Begunkov2c323952021-02-28 22:04:53 +00002149{
2150 if (!ctx)
2151 return;
Pavel Begunkovf237c302021-08-18 12:42:46 +01002152 if (*locked) {
Hao Xu99c8bc52021-08-21 06:19:54 +08002153 if (ctx->submit_state.compl_nr)
2154 io_submit_flush_completions(ctx);
Pavel Begunkov2c323952021-02-28 22:04:53 +00002155 mutex_unlock(&ctx->uring_lock);
Pavel Begunkovf237c302021-08-18 12:42:46 +01002156 *locked = false;
Pavel Begunkov2c323952021-02-28 22:04:53 +00002157 }
2158 percpu_ref_put(&ctx->refs);
2159}
2160
Jens Axboe7cbf1722021-02-10 00:03:20 +00002161static void tctx_task_work(struct callback_head *cb)
2162{
Pavel Begunkovf237c302021-08-18 12:42:46 +01002163 bool locked = false;
Pavel Begunkovebd0df22021-06-17 18:14:07 +01002164 struct io_ring_ctx *ctx = NULL;
Pavel Begunkov3f184072021-06-17 18:14:06 +01002165 struct io_uring_task *tctx = container_of(cb, struct io_uring_task,
2166 task_work);
Jens Axboe7cbf1722021-02-10 00:03:20 +00002167
Pavel Begunkov16f72072021-06-17 18:14:09 +01002168 while (1) {
Pavel Begunkov3f184072021-06-17 18:14:06 +01002169 struct io_wq_work_node *node;
2170
Pavel Begunkov8d4ad412021-09-02 00:38:23 +01002171 if (!tctx->task_list.first && locked && ctx->submit_state.compl_nr)
2172 io_submit_flush_completions(ctx);
2173
Pavel Begunkov3f184072021-06-17 18:14:06 +01002174 spin_lock_irq(&tctx->task_lock);
Pavel Begunkovc6538be2021-06-17 18:14:08 +01002175 node = tctx->task_list.first;
Pavel Begunkov3f184072021-06-17 18:14:06 +01002176 INIT_WQ_LIST(&tctx->task_list);
Pavel Begunkov6294f362021-08-10 17:53:55 +01002177 if (!node)
2178 tctx->task_running = false;
Pavel Begunkov3f184072021-06-17 18:14:06 +01002179 spin_unlock_irq(&tctx->task_lock);
Pavel Begunkov6294f362021-08-10 17:53:55 +01002180 if (!node)
2181 break;
Pavel Begunkov3f184072021-06-17 18:14:06 +01002182
Pavel Begunkov6294f362021-08-10 17:53:55 +01002183 do {
Pavel Begunkov3f184072021-06-17 18:14:06 +01002184 struct io_wq_work_node *next = node->next;
2185 struct io_kiocb *req = container_of(node, struct io_kiocb,
2186 io_task_work.node);
2187
2188 if (req->ctx != ctx) {
Pavel Begunkovf237c302021-08-18 12:42:46 +01002189 ctx_flush_and_put(ctx, &locked);
Pavel Begunkov3f184072021-06-17 18:14:06 +01002190 ctx = req->ctx;
Pavel Begunkov126180b2021-08-18 12:42:47 +01002191 /* if not contended, grab and improve batching */
2192 locked = mutex_trylock(&ctx->uring_lock);
Pavel Begunkov3f184072021-06-17 18:14:06 +01002193 percpu_ref_get(&ctx->refs);
2194 }
Pavel Begunkovf237c302021-08-18 12:42:46 +01002195 req->io_task_work.func(req, &locked);
Pavel Begunkov3f184072021-06-17 18:14:06 +01002196 node = next;
Pavel Begunkov6294f362021-08-10 17:53:55 +01002197 } while (node);
2198
Jens Axboe7cbf1722021-02-10 00:03:20 +00002199 cond_resched();
Pavel Begunkov3f184072021-06-17 18:14:06 +01002200 }
Pavel Begunkovebd0df22021-06-17 18:14:07 +01002201
Pavel Begunkovf237c302021-08-18 12:42:46 +01002202 ctx_flush_and_put(ctx, &locked);
Pavel Begunkovb168b1a2022-01-09 00:53:22 +00002203
2204 /* relaxed read is enough as only the task itself sets ->in_idle */
2205 if (unlikely(atomic_read(&tctx->in_idle)))
2206 io_uring_drop_tctx_refs(current);
Jens Axboe7cbf1722021-02-10 00:03:20 +00002207}
2208
Pavel Begunkove09ee512021-07-01 13:26:05 +01002209static void io_req_task_work_add(struct io_kiocb *req)
Jens Axboe7cbf1722021-02-10 00:03:20 +00002210{
Pavel Begunkovc15b79d2021-03-19 17:22:44 +00002211 struct task_struct *tsk = req->task;
Jens Axboe7cbf1722021-02-10 00:03:20 +00002212 struct io_uring_task *tctx = tsk->io_uring;
Pavel Begunkovc15b79d2021-03-19 17:22:44 +00002213 enum task_work_notify_mode notify;
Pavel Begunkove09ee512021-07-01 13:26:05 +01002214 struct io_wq_work_node *node;
Jens Axboe0b81e802021-02-16 10:33:53 -07002215 unsigned long flags;
Pavel Begunkov6294f362021-08-10 17:53:55 +01002216 bool running;
Jens Axboe7cbf1722021-02-10 00:03:20 +00002217
2218 WARN_ON_ONCE(!tctx);
2219
Jens Axboe0b81e802021-02-16 10:33:53 -07002220 spin_lock_irqsave(&tctx->task_lock, flags);
Jens Axboe7cbf1722021-02-10 00:03:20 +00002221 wq_list_add_tail(&req->io_task_work.node, &tctx->task_list);
Pavel Begunkov6294f362021-08-10 17:53:55 +01002222 running = tctx->task_running;
2223 if (!running)
2224 tctx->task_running = true;
Jens Axboe0b81e802021-02-16 10:33:53 -07002225 spin_unlock_irqrestore(&tctx->task_lock, flags);
Jens Axboe7cbf1722021-02-10 00:03:20 +00002226
2227 /* task_work already pending, we're done */
Pavel Begunkov6294f362021-08-10 17:53:55 +01002228 if (running)
Pavel Begunkove09ee512021-07-01 13:26:05 +01002229 return;
Jens Axboe7cbf1722021-02-10 00:03:20 +00002230
Pavel Begunkovc15b79d2021-03-19 17:22:44 +00002231 /*
2232 * SQPOLL kernel thread doesn't need notification, just a wakeup. For
2233 * all other cases, use TWA_SIGNAL unconditionally to ensure we're
2234 * processing task_work. There's no reliable way to tell if TWA_RESUME
2235 * will do the job.
2236 */
2237 notify = (req->ctx->flags & IORING_SETUP_SQPOLL) ? TWA_NONE : TWA_SIGNAL;
Pavel Begunkovc15b79d2021-03-19 17:22:44 +00002238 if (!task_work_add(tsk, &tctx->task_work, notify)) {
2239 wake_up_process(tsk);
Pavel Begunkove09ee512021-07-01 13:26:05 +01002240 return;
Pavel Begunkovc15b79d2021-03-19 17:22:44 +00002241 }
Pavel Begunkov2215bed2021-08-09 13:04:06 +01002242
Pavel Begunkove09ee512021-07-01 13:26:05 +01002243 spin_lock_irqsave(&tctx->task_lock, flags);
Pavel Begunkov6294f362021-08-10 17:53:55 +01002244 tctx->task_running = false;
Pavel Begunkove09ee512021-07-01 13:26:05 +01002245 node = tctx->task_list.first;
2246 INIT_WQ_LIST(&tctx->task_list);
2247 spin_unlock_irqrestore(&tctx->task_lock, flags);
Jens Axboe7cbf1722021-02-10 00:03:20 +00002248
Pavel Begunkove09ee512021-07-01 13:26:05 +01002249 while (node) {
2250 req = container_of(node, struct io_kiocb, io_task_work.node);
2251 node = node->next;
2252 if (llist_add(&req->io_task_work.fallback_node,
2253 &req->ctx->fallback_llist))
2254 schedule_delayed_work(&req->ctx->fallback_work, 1);
2255 }
Pavel Begunkoveab30c42021-01-19 13:32:42 +00002256}
2257
Pavel Begunkovf237c302021-08-18 12:42:46 +01002258static void io_req_task_cancel(struct io_kiocb *req, bool *locked)
Jens Axboec40f6372020-06-25 15:39:59 -06002259{
Jens Axboe87ceb6a2020-09-14 08:20:12 -06002260 struct io_ring_ctx *ctx = req->ctx;
Jens Axboec40f6372020-06-25 15:39:59 -06002261
Pavel Begunkovb18a1a42021-08-25 20:51:39 +01002262 /* not needed for normal modes, but SQPOLL depends on it */
Pavel Begunkovf237c302021-08-18 12:42:46 +01002263 io_tw_lock(ctx, locked);
Pavel Begunkov25935532021-03-19 17:22:40 +00002264 io_req_complete_failed(req, req->result);
Jens Axboec40f6372020-06-25 15:39:59 -06002265}
2266
Pavel Begunkovf237c302021-08-18 12:42:46 +01002267static void io_req_task_submit(struct io_kiocb *req, bool *locked)
Jens Axboec40f6372020-06-25 15:39:59 -06002268{
2269 struct io_ring_ctx *ctx = req->ctx;
2270
Pavel Begunkovf237c302021-08-18 12:42:46 +01002271 io_tw_lock(ctx, locked);
Jens Axboe316319e2021-08-19 09:41:42 -06002272 /* req->task == current here, checking PF_EXITING is safe */
Pavel Begunkovaf066f32021-08-09 13:04:19 +01002273 if (likely(!(req->task->flags & PF_EXITING)))
Pavel Begunkovc5eef2b2021-02-10 00:03:22 +00002274 __io_queue_sqe(req);
Pavel Begunkov81b6d052021-01-04 20:36:35 +00002275 else
Pavel Begunkov25935532021-03-19 17:22:40 +00002276 io_req_complete_failed(req, -EFAULT);
Jens Axboe9e645e112019-05-10 16:07:28 -06002277}
2278
Pavel Begunkova3df76982021-02-18 22:32:52 +00002279static void io_req_task_queue_fail(struct io_kiocb *req, int ret)
2280{
Pavel Begunkova3df76982021-02-18 22:32:52 +00002281 req->result = ret;
Pavel Begunkov5b0a6ac2021-06-30 21:54:04 +01002282 req->io_task_work.func = io_req_task_cancel;
Pavel Begunkove09ee512021-07-01 13:26:05 +01002283 io_req_task_work_add(req);
Pavel Begunkova3df76982021-02-18 22:32:52 +00002284}
2285
Pavel Begunkov2c4b8eb2021-02-28 22:35:10 +00002286static void io_req_task_queue(struct io_kiocb *req)
2287{
Pavel Begunkov5b0a6ac2021-06-30 21:54:04 +01002288 req->io_task_work.func = io_req_task_submit;
Pavel Begunkove09ee512021-07-01 13:26:05 +01002289 io_req_task_work_add(req);
Pavel Begunkov2c4b8eb2021-02-28 22:35:10 +00002290}
2291
Jens Axboe773af692021-07-27 10:25:55 -06002292static void io_req_task_queue_reissue(struct io_kiocb *req)
2293{
2294 req->io_task_work.func = io_queue_async_work;
2295 io_req_task_work_add(req);
2296}
2297
Pavel Begunkovf2f87372020-10-27 23:25:37 +00002298static inline void io_queue_next(struct io_kiocb *req)
Jackie Liuc69f8db2019-11-09 11:00:08 +08002299{
Pavel Begunkov9b5f7bd2020-06-29 13:13:00 +03002300 struct io_kiocb *nxt = io_req_find_next(req);
Pavel Begunkov944e58b2019-11-21 23:21:01 +03002301
Pavel Begunkov906a8c32020-06-27 14:04:55 +03002302 if (nxt)
2303 io_req_task_queue(nxt);
Jackie Liuc69f8db2019-11-09 11:00:08 +08002304}
2305
Jens Axboe9e645e112019-05-10 16:07:28 -06002306static void io_free_req(struct io_kiocb *req)
2307{
Pavel Begunkovc3524382020-06-28 12:52:32 +03002308 io_queue_next(req);
Jens Axboe9e645e112019-05-10 16:07:28 -06002309 __io_free_req(req);
Jens Axboee65ef562019-03-12 10:16:44 -06002310}
2311
Pavel Begunkovf237c302021-08-18 12:42:46 +01002312static void io_free_req_work(struct io_kiocb *req, bool *locked)
2313{
2314 io_free_req(req);
2315}
2316
Pavel Begunkov2d6500d2020-06-28 12:52:33 +03002317struct req_batch {
Pavel Begunkov5af1d132020-07-18 11:32:52 +03002318 struct task_struct *task;
2319 int task_refs;
Jens Axboe1b4c3512021-02-10 00:03:19 +00002320 int ctx_refs;
Pavel Begunkov2d6500d2020-06-28 12:52:33 +03002321};
2322
Pavel Begunkov5af1d132020-07-18 11:32:52 +03002323static inline void io_init_req_batch(struct req_batch *rb)
Pavel Begunkov7a743e22020-03-03 21:33:13 +03002324{
Pavel Begunkov5af1d132020-07-18 11:32:52 +03002325 rb->task_refs = 0;
Pavel Begunkov9ae72462021-02-10 00:03:16 +00002326 rb->ctx_refs = 0;
Pavel Begunkov5af1d132020-07-18 11:32:52 +03002327 rb->task = NULL;
2328}
Pavel Begunkov8766dd52020-03-14 00:31:04 +03002329
Pavel Begunkov2d6500d2020-06-28 12:52:33 +03002330static void io_req_free_batch_finish(struct io_ring_ctx *ctx,
2331 struct req_batch *rb)
2332{
Pavel Begunkov9ae72462021-02-10 00:03:16 +00002333 if (rb->ctx_refs)
2334 percpu_ref_put_many(&ctx->refs, rb->ctx_refs);
Pavel Begunkove98e49b2021-08-18 17:01:43 +01002335 if (rb->task)
Pavel Begunkove9dbe222021-08-09 13:04:20 +01002336 io_put_task(rb->task, rb->task_refs);
Pavel Begunkov2d6500d2020-06-28 12:52:33 +03002337}
2338
Pavel Begunkov6ff119a2021-02-10 00:03:18 +00002339static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req,
2340 struct io_submit_state *state)
Pavel Begunkov2d6500d2020-06-28 12:52:33 +03002341{
Pavel Begunkovf2f87372020-10-27 23:25:37 +00002342 io_queue_next(req);
Pavel Begunkov96670652021-03-19 17:22:32 +00002343 io_dismantle_req(req);
Pavel Begunkov2d6500d2020-06-28 12:52:33 +03002344
Jens Axboee3bc8e92020-09-24 08:45:57 -06002345 if (req->task != rb->task) {
Pavel Begunkov7c660732021-01-25 11:42:21 +00002346 if (rb->task)
2347 io_put_task(rb->task, rb->task_refs);
Jens Axboee3bc8e92020-09-24 08:45:57 -06002348 rb->task = req->task;
2349 rb->task_refs = 0;
Pavel Begunkov5af1d132020-07-18 11:32:52 +03002350 }
Jens Axboee3bc8e92020-09-24 08:45:57 -06002351 rb->task_refs++;
Pavel Begunkov9ae72462021-02-10 00:03:16 +00002352 rb->ctx_refs++;
Pavel Begunkov5af1d132020-07-18 11:32:52 +03002353
Pavel Begunkovbd759042021-02-12 03:23:50 +00002354 if (state->free_reqs != ARRAY_SIZE(state->reqs))
Pavel Begunkov6ff119a2021-02-10 00:03:18 +00002355 state->reqs[state->free_reqs++] = req;
Pavel Begunkovbd759042021-02-12 03:23:50 +00002356 else
Pavel Begunkovcd0ca2e2021-08-09 20:18:11 +01002357 list_add(&req->inflight_entry, &state->free_list);
Pavel Begunkov7a743e22020-03-03 21:33:13 +03002358}
2359
Pavel Begunkov2a2758f2021-06-17 18:14:00 +01002360static void io_submit_flush_completions(struct io_ring_ctx *ctx)
Jens Axboea141dd82021-08-12 12:48:34 -06002361 __must_hold(&ctx->uring_lock)
Pavel Begunkov905c1722021-02-10 00:03:14 +00002362{
Pavel Begunkovcd0ca2e2021-08-09 20:18:11 +01002363 struct io_submit_state *state = &ctx->submit_state;
2364 int i, nr = state->compl_nr;
Pavel Begunkov905c1722021-02-10 00:03:14 +00002365 struct req_batch rb;
2366
Jens Axboe79ebeae2021-08-10 15:18:27 -06002367 spin_lock(&ctx->completion_lock);
Pavel Begunkov905c1722021-02-10 00:03:14 +00002368 for (i = 0; i < nr; i++) {
Pavel Begunkovcd0ca2e2021-08-09 20:18:11 +01002369 struct io_kiocb *req = state->compl_reqs[i];
Pavel Begunkov5182ed22021-06-26 21:40:48 +01002370
Pavel Begunkovb850d6d2022-08-29 14:30:13 +01002371 __io_fill_cqe(ctx, req->user_data, req->result,
2372 req->compl.cflags);
Pavel Begunkov905c1722021-02-10 00:03:14 +00002373 }
2374 io_commit_cqring(ctx);
Jens Axboe79ebeae2021-08-10 15:18:27 -06002375 spin_unlock(&ctx->completion_lock);
Pavel Begunkov905c1722021-02-10 00:03:14 +00002376 io_cqring_ev_posted(ctx);
Pavel Begunkov5182ed22021-06-26 21:40:48 +01002377
2378 io_init_req_batch(&rb);
Pavel Begunkov905c1722021-02-10 00:03:14 +00002379 for (i = 0; i < nr; i++) {
Pavel Begunkovcd0ca2e2021-08-09 20:18:11 +01002380 struct io_kiocb *req = state->compl_reqs[i];
Pavel Begunkov905c1722021-02-10 00:03:14 +00002381
Pavel Begunkov91c2f692021-08-11 19:28:28 +01002382 if (req_ref_put_and_test(req))
Pavel Begunkov6ff119a2021-02-10 00:03:18 +00002383 io_req_free_batch(&rb, req, &ctx->submit_state);
Pavel Begunkov905c1722021-02-10 00:03:14 +00002384 }
2385
2386 io_req_free_batch_finish(ctx, &rb);
Pavel Begunkovcd0ca2e2021-08-09 20:18:11 +01002387 state->compl_nr = 0;
Jens Axboee65ef562019-03-12 10:16:44 -06002388}
2389
Jens Axboeba816ad2019-09-28 11:36:45 -06002390/*
2391 * Drop reference to request, return next in chain (if there is one) if this
2392 * was the last reference to this request.
2393 */
Pavel Begunkov0d850352021-03-19 17:22:37 +00002394static inline struct io_kiocb *io_put_req_find_next(struct io_kiocb *req)
Jens Axboee65ef562019-03-12 10:16:44 -06002395{
Pavel Begunkov9b5f7bd2020-06-29 13:13:00 +03002396 struct io_kiocb *nxt = NULL;
2397
Jens Axboede9b4cc2021-02-24 13:28:27 -07002398 if (req_ref_put_and_test(req)) {
Pavel Begunkov9b5f7bd2020-06-29 13:13:00 +03002399 nxt = io_req_find_next(req);
Jens Axboe4d7dd462019-11-20 13:03:52 -07002400 __io_free_req(req);
Jens Axboe2a44f462020-02-25 13:25:41 -07002401 }
Pavel Begunkov9b5f7bd2020-06-29 13:13:00 +03002402 return nxt;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002403}
2404
Pavel Begunkov0d850352021-03-19 17:22:37 +00002405static inline void io_put_req(struct io_kiocb *req)
Jens Axboe2b188cc2019-01-07 10:46:33 -07002406{
Jens Axboede9b4cc2021-02-24 13:28:27 -07002407 if (req_ref_put_and_test(req))
Jens Axboedef596e2019-01-09 08:59:42 -07002408 io_free_req(req);
2409}
2410
Pavel Begunkov91c2f692021-08-11 19:28:28 +01002411static inline void io_put_req_deferred(struct io_kiocb *req)
Pavel Begunkov216578e2020-10-13 09:44:00 +01002412{
Pavel Begunkov91c2f692021-08-11 19:28:28 +01002413 if (req_ref_put_and_test(req)) {
Pavel Begunkovf237c302021-08-18 12:42:46 +01002414 req->io_task_work.func = io_free_req_work;
Pavel Begunkov543af3a2021-08-09 13:04:15 +01002415 io_req_task_work_add(req);
2416 }
Pavel Begunkov216578e2020-10-13 09:44:00 +01002417}
2418
Pavel Begunkov6c503152021-01-04 20:36:36 +00002419static unsigned io_cqring_events(struct io_ring_ctx *ctx)
Jens Axboea3a0e432019-08-20 11:03:11 -06002420{
2421 /* See comment at the top of this file */
2422 smp_rmb();
Pavel Begunkove23de152020-12-17 00:24:37 +00002423 return __io_cqring_events(ctx);
Jens Axboea3a0e432019-08-20 11:03:11 -06002424}
2425
Pavel Begunkovfb5ccc92019-10-25 12:31:30 +03002426static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
2427{
2428 struct io_rings *rings = ctx->rings;
2429
2430 /* make sure SQ entry isn't read before tail */
2431 return smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head;
2432}
2433
Pavel Begunkov8ff069b2020-07-16 23:28:04 +03002434static unsigned int io_put_kbuf(struct io_kiocb *req, struct io_buffer *kbuf)
Jens Axboee94f1412019-12-19 12:06:02 -07002435{
Pavel Begunkov8ff069b2020-07-16 23:28:04 +03002436 unsigned int cflags;
Jens Axboee94f1412019-12-19 12:06:02 -07002437
Jens Axboebcda7ba2020-02-23 16:42:51 -07002438 cflags = kbuf->bid << IORING_CQE_BUFFER_SHIFT;
2439 cflags |= IORING_CQE_F_BUFFER;
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03002440 req->flags &= ~REQ_F_BUFFER_SELECTED;
Jens Axboebcda7ba2020-02-23 16:42:51 -07002441 kfree(kbuf);
2442 return cflags;
2443}
2444
Pavel Begunkov8ff069b2020-07-16 23:28:04 +03002445static inline unsigned int io_put_rw_kbuf(struct io_kiocb *req)
2446{
2447 struct io_buffer *kbuf;
2448
Pavel Begunkovae421d92021-08-17 20:28:08 +01002449 if (likely(!(req->flags & REQ_F_BUFFER_SELECTED)))
2450 return 0;
Pavel Begunkov8ff069b2020-07-16 23:28:04 +03002451 kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
2452 return io_put_kbuf(req, kbuf);
2453}
2454
Jens Axboe4c6e2772020-07-01 11:29:10 -06002455static inline bool io_run_task_work(void)
2456{
Nadav Amitef98eb02021-08-07 17:13:41 -07002457 if (test_thread_flag(TIF_NOTIFY_SIGNAL) || current->task_works) {
Jens Axboe4c6e2772020-07-01 11:29:10 -06002458 __set_current_state(TASK_RUNNING);
Nadav Amitef98eb02021-08-07 17:13:41 -07002459 tracehook_notify_signal();
Jens Axboe4c6e2772020-07-01 11:29:10 -06002460 return true;
2461 }
2462
2463 return false;
2464}
2465
Jens Axboedef596e2019-01-09 08:59:42 -07002466/*
2467 * Find and free completed poll iocbs
2468 */
2469static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
Pavel Begunkova8576af2021-08-15 10:40:21 +01002470 struct list_head *done)
Jens Axboedef596e2019-01-09 08:59:42 -07002471{
Jens Axboe8237e042019-12-28 10:48:22 -07002472 struct req_batch rb;
Jens Axboedef596e2019-01-09 08:59:42 -07002473 struct io_kiocb *req;
Xiaoguang Wangbbde0172020-06-16 02:06:38 +08002474
2475 /* order with ->result store in io_complete_rw_iopoll() */
2476 smp_rmb();
Jens Axboedef596e2019-01-09 08:59:42 -07002477
Pavel Begunkov5af1d132020-07-18 11:32:52 +03002478 io_init_req_batch(&rb);
Jens Axboedef596e2019-01-09 08:59:42 -07002479 while (!list_empty(done)) {
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002480 req = list_first_entry(done, struct io_kiocb, inflight_entry);
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002481 list_del(&req->inflight_entry);
Jens Axboedef596e2019-01-09 08:59:42 -07002482
Pavel Begunkovb850d6d2022-08-29 14:30:13 +01002483 io_fill_cqe_req(req, req->result, io_put_rw_kbuf(req));
Jens Axboedef596e2019-01-09 08:59:42 -07002484 (*nr_events)++;
2485
Jens Axboede9b4cc2021-02-24 13:28:27 -07002486 if (req_ref_put_and_test(req))
Pavel Begunkov6ff119a2021-02-10 00:03:18 +00002487 io_req_free_batch(&rb, req, &ctx->submit_state);
Jens Axboedef596e2019-01-09 08:59:42 -07002488 }
Jens Axboedef596e2019-01-09 08:59:42 -07002489
Jens Axboe09bb8392019-03-13 12:39:28 -06002490 io_commit_cqring(ctx);
Pavel Begunkov80c18e42021-01-07 03:15:41 +00002491 io_cqring_ev_posted_iopoll(ctx);
Pavel Begunkov2d6500d2020-06-28 12:52:33 +03002492 io_req_free_batch_finish(ctx, &rb);
Bijan Mottahedeh581f9812020-04-03 13:51:33 -07002493}
2494
Jens Axboedef596e2019-01-09 08:59:42 -07002495static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
Pavel Begunkova8576af2021-08-15 10:40:21 +01002496 long min)
Jens Axboedef596e2019-01-09 08:59:42 -07002497{
2498 struct io_kiocb *req, *tmp;
2499 LIST_HEAD(done);
2500 bool spin;
Jens Axboedef596e2019-01-09 08:59:42 -07002501
2502 /*
2503 * Only spin for completions if we don't have multiple devices hanging
2504 * off our complete list, and we're under the requested amount.
2505 */
Hao Xu915b3dd2021-06-28 05:37:30 +08002506 spin = !ctx->poll_multi_queue && *nr_events < min;
Jens Axboedef596e2019-01-09 08:59:42 -07002507
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002508 list_for_each_entry_safe(req, tmp, &ctx->iopoll_list, inflight_entry) {
Jens Axboe9adbd452019-12-20 08:45:55 -07002509 struct kiocb *kiocb = &req->rw.kiocb;
Pavel Begunkova2416e12021-08-09 13:04:09 +01002510 int ret;
Jens Axboedef596e2019-01-09 08:59:42 -07002511
2512 /*
Bijan Mottahedeh581f9812020-04-03 13:51:33 -07002513 * Move completed and retryable entries to our local lists.
2514 * If we find a request that requires polling, break out
2515 * and complete those lists first, if we have entries there.
Jens Axboedef596e2019-01-09 08:59:42 -07002516 */
Xiaoguang Wang65a65432020-06-11 23:39:36 +08002517 if (READ_ONCE(req->iopoll_completed)) {
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002518 list_move_tail(&req->inflight_entry, &done);
Jens Axboedef596e2019-01-09 08:59:42 -07002519 continue;
2520 }
2521 if (!list_empty(&done))
2522 break;
2523
2524 ret = kiocb->ki_filp->f_op->iopoll(kiocb, spin);
Pavel Begunkova2416e12021-08-09 13:04:09 +01002525 if (unlikely(ret < 0))
2526 return ret;
2527 else if (ret)
2528 spin = false;
Jens Axboedef596e2019-01-09 08:59:42 -07002529
Pavel Begunkov3aadc232020-07-06 17:59:29 +03002530 /* iopoll may have completed current req */
2531 if (READ_ONCE(req->iopoll_completed))
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002532 list_move_tail(&req->inflight_entry, &done);
Jens Axboedef596e2019-01-09 08:59:42 -07002533 }
2534
2535 if (!list_empty(&done))
Pavel Begunkova8576af2021-08-15 10:40:21 +01002536 io_iopoll_complete(ctx, nr_events, &done);
Jens Axboedef596e2019-01-09 08:59:42 -07002537
Pavel Begunkova2416e12021-08-09 13:04:09 +01002538 return 0;
Jens Axboedef596e2019-01-09 08:59:42 -07002539}
2540
2541/*
Jens Axboedef596e2019-01-09 08:59:42 -07002542 * We can't just wait for polled events to come to us, we have to actively
2543 * find and complete them.
2544 */
Pavel Begunkovb2edc0a2020-07-07 16:36:22 +03002545static void io_iopoll_try_reap_events(struct io_ring_ctx *ctx)
Jens Axboedef596e2019-01-09 08:59:42 -07002546{
2547 if (!(ctx->flags & IORING_SETUP_IOPOLL))
2548 return;
2549
2550 mutex_lock(&ctx->uring_lock);
Pavel Begunkov540e32a2020-07-13 23:37:09 +03002551 while (!list_empty(&ctx->iopoll_list)) {
Jens Axboedef596e2019-01-09 08:59:42 -07002552 unsigned int nr_events = 0;
2553
Pavel Begunkova8576af2021-08-15 10:40:21 +01002554 io_do_iopoll(ctx, &nr_events, 0);
Jens Axboe08f54392019-08-21 22:19:11 -06002555
Pavel Begunkovb2edc0a2020-07-07 16:36:22 +03002556 /* let it sleep and repeat later if can't complete a request */
2557 if (nr_events == 0)
2558 break;
Jens Axboe08f54392019-08-21 22:19:11 -06002559 /*
2560 * Ensure we allow local-to-the-cpu processing to take place,
2561 * in this case we need to ensure that we reap all events.
Pavel Begunkov3fcee5a2020-07-06 17:59:31 +03002562 * Also let task_work, etc. to progress by releasing the mutex
Jens Axboe08f54392019-08-21 22:19:11 -06002563 */
Pavel Begunkov3fcee5a2020-07-06 17:59:31 +03002564 if (need_resched()) {
2565 mutex_unlock(&ctx->uring_lock);
2566 cond_resched();
2567 mutex_lock(&ctx->uring_lock);
2568 }
Jens Axboedef596e2019-01-09 08:59:42 -07002569 }
2570 mutex_unlock(&ctx->uring_lock);
2571}
2572
Pavel Begunkov7668b922020-07-07 16:36:21 +03002573static int io_iopoll_check(struct io_ring_ctx *ctx, long min)
Jens Axboedef596e2019-01-09 08:59:42 -07002574{
Pavel Begunkov7668b922020-07-07 16:36:21 +03002575 unsigned int nr_events = 0;
Pavel Begunkove9979b32021-04-13 02:58:45 +01002576 int ret = 0;
Jens Axboedef596e2019-01-09 08:59:42 -07002577
Xiaoguang Wangc7849be2020-02-22 14:46:05 +08002578 /*
2579 * We disallow the app entering submit/complete with polling, but we
2580 * still need to lock the ring to prevent racing with polled issue
2581 * that got punted to a workqueue.
2582 */
2583 mutex_lock(&ctx->uring_lock);
Pavel Begunkovf39c8a52021-04-13 02:58:46 +01002584 /*
2585 * Don't enter poll loop if we already have events pending.
2586 * If we do, we can potentially be spinning for commands that
2587 * already triggered a CQE (eg in error).
2588 */
Pavel Begunkov5ed7a372021-06-14 23:37:27 +01002589 if (test_bit(0, &ctx->check_cq_overflow))
Pavel Begunkovf39c8a52021-04-13 02:58:46 +01002590 __io_cqring_overflow_flush(ctx, false);
2591 if (io_cqring_events(ctx))
2592 goto out;
Jens Axboedef596e2019-01-09 08:59:42 -07002593 do {
Jens Axboe500f9fb2019-08-19 12:15:59 -06002594 /*
2595 * If a submit got punted to a workqueue, we can have the
2596 * application entering polling for a command before it gets
2597 * issued. That app will hold the uring_lock for the duration
2598 * of the poll right here, so we need to take a breather every
2599 * now and then to ensure that the issue has a chance to add
2600 * the poll to the issued list. Otherwise we can spin here
2601 * forever, while the workqueue is stuck trying to acquire the
2602 * very same mutex.
2603 */
Pavel Begunkove9979b32021-04-13 02:58:45 +01002604 if (list_empty(&ctx->iopoll_list)) {
Pavel Begunkov8f487ef2021-07-08 13:37:06 +01002605 u32 tail = ctx->cached_cq_tail;
2606
Jens Axboe500f9fb2019-08-19 12:15:59 -06002607 mutex_unlock(&ctx->uring_lock);
Jens Axboe4c6e2772020-07-01 11:29:10 -06002608 io_run_task_work();
Jens Axboe500f9fb2019-08-19 12:15:59 -06002609 mutex_lock(&ctx->uring_lock);
Pavel Begunkove9979b32021-04-13 02:58:45 +01002610
Pavel Begunkov8f487ef2021-07-08 13:37:06 +01002611 /* some requests don't go through iopoll_list */
2612 if (tail != ctx->cached_cq_tail ||
2613 list_empty(&ctx->iopoll_list))
Pavel Begunkove9979b32021-04-13 02:58:45 +01002614 break;
Jens Axboe500f9fb2019-08-19 12:15:59 -06002615 }
Pavel Begunkova8576af2021-08-15 10:40:21 +01002616 ret = io_do_iopoll(ctx, &nr_events, min);
Pavel Begunkovf39c8a52021-04-13 02:58:46 +01002617 } while (!ret && nr_events < min && !need_resched());
2618out:
Jens Axboe500f9fb2019-08-19 12:15:59 -06002619 mutex_unlock(&ctx->uring_lock);
Jens Axboedef596e2019-01-09 08:59:42 -07002620 return ret;
2621}
2622
Jens Axboe491381ce2019-10-17 09:20:46 -06002623static void kiocb_end_write(struct io_kiocb *req)
Jens Axboe2b188cc2019-01-07 10:46:33 -07002624{
Jens Axboe491381ce2019-10-17 09:20:46 -06002625 /*
2626 * Tell lockdep we inherited freeze protection from submission
2627 * thread.
2628 */
2629 if (req->flags & REQ_F_ISREG) {
Pavel Begunkov1c986792021-03-22 01:58:31 +00002630 struct super_block *sb = file_inode(req->file)->i_sb;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002631
Pavel Begunkov1c986792021-03-22 01:58:31 +00002632 __sb_writers_acquired(sb, SB_FREEZE_WRITE);
2633 sb_end_write(sb);
Jens Axboe2b188cc2019-01-07 10:46:33 -07002634 }
2635}
2636
Jens Axboeb63534c2020-06-04 11:28:00 -06002637#ifdef CONFIG_BLOCK
Pavel Begunkovdc2a6e92021-01-19 13:32:35 +00002638static bool io_resubmit_prep(struct io_kiocb *req)
Jens Axboeb63534c2020-06-04 11:28:00 -06002639{
Pavel Begunkovab454432021-03-22 01:58:33 +00002640 struct io_async_rw *rw = req->async_data;
Jens Axboeb63534c2020-06-04 11:28:00 -06002641
Pavel Begunkovab454432021-03-22 01:58:33 +00002642 if (!rw)
2643 return !io_req_prep_async(req);
Jens Axboecd658692021-09-10 11:19:14 -06002644 iov_iter_restore(&rw->iter, &rw->iter_state);
Pavel Begunkovab454432021-03-22 01:58:33 +00002645 return true;
Jens Axboeb63534c2020-06-04 11:28:00 -06002646}
Jens Axboeb63534c2020-06-04 11:28:00 -06002647
Jens Axboe3e6a0d32021-03-01 13:56:00 -07002648static bool io_rw_should_reissue(struct io_kiocb *req)
Jens Axboeb63534c2020-06-04 11:28:00 -06002649{
Jens Axboe355afae2020-09-02 09:30:31 -06002650 umode_t mode = file_inode(req->file)->i_mode;
Jens Axboe3e6a0d32021-03-01 13:56:00 -07002651 struct io_ring_ctx *ctx = req->ctx;
Jens Axboeb63534c2020-06-04 11:28:00 -06002652
Jens Axboe355afae2020-09-02 09:30:31 -06002653 if (!S_ISBLK(mode) && !S_ISREG(mode))
2654 return false;
Jens Axboe3e6a0d32021-03-01 13:56:00 -07002655 if ((req->flags & REQ_F_NOWAIT) || (io_wq_current_is_worker() &&
2656 !(ctx->flags & IORING_SETUP_IOPOLL)))
Jens Axboeb63534c2020-06-04 11:28:00 -06002657 return false;
Jens Axboe7c977a52021-02-23 19:17:35 -07002658 /*
2659 * If ref is dying, we might be running poll reap from the exit work.
2660 * Don't attempt to reissue from that path, just let it fail with
2661 * -EAGAIN.
2662 */
Jens Axboe3e6a0d32021-03-01 13:56:00 -07002663 if (percpu_ref_is_dying(&ctx->refs))
2664 return false;
Jens Axboeef046882021-07-27 10:50:31 -06002665 /*
2666 * Play it safe and assume not safe to re-import and reissue if we're
2667 * not in the original thread group (or in task context).
2668 */
2669 if (!same_thread_group(req->task, current) || !in_task())
2670 return false;
Jens Axboe3e6a0d32021-03-01 13:56:00 -07002671 return true;
2672}
Jens Axboee82ad482021-04-02 19:45:34 -06002673#else
Jens Axboea1ff1e32021-04-12 06:40:02 -06002674static bool io_resubmit_prep(struct io_kiocb *req)
2675{
2676 return false;
2677}
Jens Axboee82ad482021-04-02 19:45:34 -06002678static bool io_rw_should_reissue(struct io_kiocb *req)
2679{
2680 return false;
2681}
Jens Axboe3e6a0d32021-03-01 13:56:00 -07002682#endif
2683
Jens Axboe8ef12ef2021-08-10 15:15:25 -06002684static bool __io_complete_rw_common(struct io_kiocb *req, long res)
Jens Axboea1d7c392020-06-22 11:09:46 -06002685{
Jens Axboedf1ec532022-03-20 13:08:38 -06002686 if (req->rw.kiocb.ki_flags & IOCB_WRITE) {
Pavel Begunkovb65c1282021-03-22 01:45:59 +00002687 kiocb_end_write(req);
Jens Axboedf1ec532022-03-20 13:08:38 -06002688 fsnotify_modify(req->file);
2689 } else {
2690 fsnotify_access(req->file);
2691 }
Pavel Begunkov9532b992021-03-22 01:58:34 +00002692 if (res != req->result) {
2693 if ((res == -EAGAIN || res == -EOPNOTSUPP) &&
2694 io_rw_should_reissue(req)) {
2695 req->flags |= REQ_F_REISSUE;
Jens Axboe8ef12ef2021-08-10 15:15:25 -06002696 return true;
Pavel Begunkov9532b992021-03-22 01:58:34 +00002697 }
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01002698 req_set_fail(req);
Jens Axboe8ef12ef2021-08-10 15:15:25 -06002699 req->result = res;
Pavel Begunkov9532b992021-03-22 01:58:34 +00002700 }
Jens Axboe8ef12ef2021-08-10 15:15:25 -06002701 return false;
2702}
2703
Pavel Begunkovf237c302021-08-18 12:42:46 +01002704static void io_req_task_complete(struct io_kiocb *req, bool *locked)
Jens Axboe8ef12ef2021-08-10 15:15:25 -06002705{
Pavel Begunkov126180b2021-08-18 12:42:47 +01002706 unsigned int cflags = io_put_rw_kbuf(req);
Pavel Begunkov5c0ea4c2022-08-29 14:30:12 +01002707 int res = req->result;
Pavel Begunkov126180b2021-08-18 12:42:47 +01002708
2709 if (*locked) {
2710 struct io_ring_ctx *ctx = req->ctx;
2711 struct io_submit_state *state = &ctx->submit_state;
2712
2713 io_req_complete_state(req, res, cflags);
2714 state->compl_reqs[state->compl_nr++] = req;
2715 if (state->compl_nr == ARRAY_SIZE(state->compl_reqs))
2716 io_submit_flush_completions(ctx);
2717 } else {
2718 io_req_complete_post(req, res, cflags);
2719 }
Jens Axboe8ef12ef2021-08-10 15:15:25 -06002720}
2721
2722static void __io_complete_rw(struct io_kiocb *req, long res, long res2,
2723 unsigned int issue_flags)
2724{
2725 if (__io_complete_rw_common(req, res))
2726 return;
Pavel Begunkov63637852021-09-02 00:38:22 +01002727 __io_req_complete(req, issue_flags, req->result, io_put_rw_kbuf(req));
Jens Axboeba816ad2019-09-28 11:36:45 -06002728}
2729
2730static void io_complete_rw(struct kiocb *kiocb, long res, long res2)
2731{
Jens Axboe9adbd452019-12-20 08:45:55 -07002732 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
Jens Axboeba816ad2019-09-28 11:36:45 -06002733
Jens Axboe8ef12ef2021-08-10 15:15:25 -06002734 if (__io_complete_rw_common(req, res))
2735 return;
2736 req->result = res;
2737 req->io_task_work.func = io_req_task_complete;
2738 io_req_task_work_add(req);
Jens Axboe2b188cc2019-01-07 10:46:33 -07002739}
2740
Jens Axboedef596e2019-01-09 08:59:42 -07002741static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2)
2742{
Jens Axboe9adbd452019-12-20 08:45:55 -07002743 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
Jens Axboedef596e2019-01-09 08:59:42 -07002744
Jens Axboe491381ce2019-10-17 09:20:46 -06002745 if (kiocb->ki_flags & IOCB_WRITE)
2746 kiocb_end_write(req);
Pavel Begunkov9532b992021-03-22 01:58:34 +00002747 if (unlikely(res != req->result)) {
Pavel Begunkovb66ceaf2021-09-15 11:00:05 +01002748 if (res == -EAGAIN && io_rw_should_reissue(req)) {
2749 req->flags |= REQ_F_REISSUE;
2750 return;
Pavel Begunkov9532b992021-03-22 01:58:34 +00002751 }
Pavel Begunkov8c130822021-03-22 01:58:32 +00002752 }
Xiaoguang Wangbbde0172020-06-16 02:06:38 +08002753
2754 WRITE_ONCE(req->result, res);
Jens Axboeb9b0e0d2021-02-23 08:18:36 -07002755 /* order with io_iopoll_complete() checking ->result */
Pavel Begunkovcd664b02020-06-25 12:37:10 +03002756 smp_wmb();
2757 WRITE_ONCE(req->iopoll_completed, 1);
Jens Axboedef596e2019-01-09 08:59:42 -07002758}
2759
2760/*
2761 * After the iocb has been issued, it's safe to be found on the poll list.
2762 * Adding the kiocb to the list AFTER submission ensures that we don't
Pavel Begunkovf39c8a52021-04-13 02:58:46 +01002763 * find it from a io_do_iopoll() thread before the issuer is done
Jens Axboedef596e2019-01-09 08:59:42 -07002764 * accessing the kiocb cookie.
2765 */
Pavel Begunkovcb3d8972021-06-14 02:36:14 +01002766static void io_iopoll_req_issued(struct io_kiocb *req)
Jens Axboedef596e2019-01-09 08:59:42 -07002767{
2768 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkovcb3d8972021-06-14 02:36:14 +01002769 const bool in_async = io_wq_current_is_worker();
2770
2771 /* workqueue context doesn't hold uring_lock, grab it now */
2772 if (unlikely(in_async))
2773 mutex_lock(&ctx->uring_lock);
Jens Axboedef596e2019-01-09 08:59:42 -07002774
2775 /*
2776 * Track whether we have multiple files in our lists. This will impact
2777 * how we do polling eventually, not spinning if we're on potentially
2778 * different devices.
2779 */
Pavel Begunkov540e32a2020-07-13 23:37:09 +03002780 if (list_empty(&ctx->iopoll_list)) {
Hao Xu915b3dd2021-06-28 05:37:30 +08002781 ctx->poll_multi_queue = false;
2782 } else if (!ctx->poll_multi_queue) {
Jens Axboedef596e2019-01-09 08:59:42 -07002783 struct io_kiocb *list_req;
Hao Xu915b3dd2021-06-28 05:37:30 +08002784 unsigned int queue_num0, queue_num1;
Jens Axboedef596e2019-01-09 08:59:42 -07002785
Pavel Begunkov540e32a2020-07-13 23:37:09 +03002786 list_req = list_first_entry(&ctx->iopoll_list, struct io_kiocb,
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002787 inflight_entry);
Hao Xu915b3dd2021-06-28 05:37:30 +08002788
2789 if (list_req->file != req->file) {
2790 ctx->poll_multi_queue = true;
2791 } else {
2792 queue_num0 = blk_qc_t_to_queue_num(list_req->rw.kiocb.ki_cookie);
2793 queue_num1 = blk_qc_t_to_queue_num(req->rw.kiocb.ki_cookie);
2794 if (queue_num0 != queue_num1)
2795 ctx->poll_multi_queue = true;
2796 }
Jens Axboedef596e2019-01-09 08:59:42 -07002797 }
2798
2799 /*
2800 * For fast devices, IO may have already completed. If it has, add
2801 * it to the front so we find it first.
2802 */
Xiaoguang Wang65a65432020-06-11 23:39:36 +08002803 if (READ_ONCE(req->iopoll_completed))
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002804 list_add(&req->inflight_entry, &ctx->iopoll_list);
Jens Axboedef596e2019-01-09 08:59:42 -07002805 else
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002806 list_add_tail(&req->inflight_entry, &ctx->iopoll_list);
Xiaoguang Wangbdcd3ea2020-02-25 22:12:08 +08002807
Pavel Begunkovcb3d8972021-06-14 02:36:14 +01002808 if (unlikely(in_async)) {
2809 /*
2810 * If IORING_SETUP_SQPOLL is enabled, sqes are either handle
2811 * in sq thread task context or in io worker task context. If
2812 * current task context is sq thread, we don't need to check
2813 * whether should wake up sq thread.
2814 */
2815 if ((ctx->flags & IORING_SETUP_SQPOLL) &&
2816 wq_has_sleeper(&ctx->sq_data->wait))
2817 wake_up(&ctx->sq_data->wait);
2818
2819 mutex_unlock(&ctx->uring_lock);
2820 }
Jens Axboedef596e2019-01-09 08:59:42 -07002821}
2822
Jens Axboe4503b762020-06-01 10:00:27 -06002823static bool io_bdev_nowait(struct block_device *bdev)
2824{
Jeffle Xu9ba0d0c2020-10-19 16:59:42 +08002825 return !bdev || blk_queue_nowait(bdev_get_queue(bdev));
Jens Axboe4503b762020-06-01 10:00:27 -06002826}
2827
Jens Axboe2b188cc2019-01-07 10:46:33 -07002828/*
2829 * If we tracked the file through the SCM inflight mechanism, we could support
2830 * any file. For now, just ensure that anything potentially problematic is done
2831 * inline.
2832 */
Pavel Begunkovb191e2d2021-08-09 13:04:03 +01002833static bool __io_file_supports_nowait(struct file *file, int rw)
Jens Axboe2b188cc2019-01-07 10:46:33 -07002834{
2835 umode_t mode = file_inode(file)->i_mode;
2836
Jens Axboe4503b762020-06-01 10:00:27 -06002837 if (S_ISBLK(mode)) {
Christoph Hellwig4e7b5672020-11-23 13:38:40 +01002838 if (IS_ENABLED(CONFIG_BLOCK) &&
2839 io_bdev_nowait(I_BDEV(file->f_mapping->host)))
Jens Axboe4503b762020-06-01 10:00:27 -06002840 return true;
2841 return false;
2842 }
Pavel Begunkov976517f2021-06-09 12:07:25 +01002843 if (S_ISSOCK(mode))
Jens Axboe2b188cc2019-01-07 10:46:33 -07002844 return true;
Jens Axboe4503b762020-06-01 10:00:27 -06002845 if (S_ISREG(mode)) {
Christoph Hellwig4e7b5672020-11-23 13:38:40 +01002846 if (IS_ENABLED(CONFIG_BLOCK) &&
2847 io_bdev_nowait(file->f_inode->i_sb->s_bdev) &&
Jens Axboe4503b762020-06-01 10:00:27 -06002848 file->f_op != &io_uring_fops)
2849 return true;
2850 return false;
2851 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07002852
Jens Axboec5b85622020-06-09 19:23:05 -06002853 /* any ->read/write should understand O_NONBLOCK */
2854 if (file->f_flags & O_NONBLOCK)
2855 return true;
2856
Jens Axboeaf197f52020-04-28 13:15:06 -06002857 if (!(file->f_mode & FMODE_NOWAIT))
2858 return false;
2859
2860 if (rw == READ)
2861 return file->f_op->read_iter != NULL;
2862
2863 return file->f_op->write_iter != NULL;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002864}
2865
Pavel Begunkovb191e2d2021-08-09 13:04:03 +01002866static bool io_file_supports_nowait(struct io_kiocb *req, int rw)
Jens Axboe7b29f922021-03-12 08:30:14 -07002867{
Pavel Begunkovb191e2d2021-08-09 13:04:03 +01002868 if (rw == READ && (req->flags & REQ_F_NOWAIT_READ))
Jens Axboe7b29f922021-03-12 08:30:14 -07002869 return true;
Pavel Begunkovb191e2d2021-08-09 13:04:03 +01002870 else if (rw == WRITE && (req->flags & REQ_F_NOWAIT_WRITE))
Jens Axboe7b29f922021-03-12 08:30:14 -07002871 return true;
2872
Pavel Begunkovb191e2d2021-08-09 13:04:03 +01002873 return __io_file_supports_nowait(req->file, rw);
Jens Axboe7b29f922021-03-12 08:30:14 -07002874}
2875
Jens Axboe5d329e12021-09-14 11:08:37 -06002876static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe,
2877 int rw)
Jens Axboe2b188cc2019-01-07 10:46:33 -07002878{
Jens Axboedef596e2019-01-09 08:59:42 -07002879 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe9adbd452019-12-20 08:45:55 -07002880 struct kiocb *kiocb = &req->rw.kiocb;
Pavel Begunkov75c668c2021-02-04 13:52:05 +00002881 struct file *file = req->file;
Jens Axboe09bb8392019-03-13 12:39:28 -06002882 unsigned ioprio;
2883 int ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002884
Pavel Begunkovc97d8a02021-08-09 13:04:04 +01002885 if (!io_req_ffs_set(req) && S_ISREG(file_inode(file)->i_mode))
Jens Axboe491381ce2019-10-17 09:20:46 -06002886 req->flags |= REQ_F_ISREG;
2887
Jens Axboe2b188cc2019-01-07 10:46:33 -07002888 kiocb->ki_pos = READ_ONCE(sqe->off);
Jens Axboe20fb0dc2021-12-22 20:26:56 -07002889 if (kiocb->ki_pos == -1) {
2890 if (!(file->f_mode & FMODE_STREAM)) {
2891 req->flags |= REQ_F_CUR_POS;
2892 kiocb->ki_pos = file->f_pos;
2893 } else {
2894 kiocb->ki_pos = 0;
2895 }
Jens Axboeba042912019-12-25 16:33:42 -07002896 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07002897 kiocb->ki_hint = ki_hint_validate(file_write_hint(kiocb->ki_filp));
Pavel Begunkov3e577dc2020-02-01 03:58:42 +03002898 kiocb->ki_flags = iocb_flags(kiocb->ki_filp);
2899 ret = kiocb_set_rw_flags(kiocb, READ_ONCE(sqe->rw_flags));
2900 if (unlikely(ret))
2901 return ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002902
Jens Axboe5d329e12021-09-14 11:08:37 -06002903 /*
2904 * If the file is marked O_NONBLOCK, still allow retry for it if it
2905 * supports async. Otherwise it's impossible to use O_NONBLOCK files
2906 * reliably. If not, or it IOCB_NOWAIT is set, don't retry.
2907 */
2908 if ((kiocb->ki_flags & IOCB_NOWAIT) ||
2909 ((file->f_flags & O_NONBLOCK) && !io_file_supports_nowait(req, rw)))
Pavel Begunkov75c668c2021-02-04 13:52:05 +00002910 req->flags |= REQ_F_NOWAIT;
2911
Jens Axboe2b188cc2019-01-07 10:46:33 -07002912 ioprio = READ_ONCE(sqe->ioprio);
2913 if (ioprio) {
2914 ret = ioprio_check_cap(ioprio);
2915 if (ret)
Jens Axboe09bb8392019-03-13 12:39:28 -06002916 return ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002917
2918 kiocb->ki_ioprio = ioprio;
2919 } else
2920 kiocb->ki_ioprio = get_current_ioprio();
2921
Jens Axboedef596e2019-01-09 08:59:42 -07002922 if (ctx->flags & IORING_SETUP_IOPOLL) {
Jens Axboedef596e2019-01-09 08:59:42 -07002923 if (!(kiocb->ki_flags & IOCB_DIRECT) ||
2924 !kiocb->ki_filp->f_op->iopoll)
Jens Axboe09bb8392019-03-13 12:39:28 -06002925 return -EOPNOTSUPP;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002926
Jens Axboe394918e2021-03-08 11:40:23 -07002927 kiocb->ki_flags |= IOCB_HIPRI | IOCB_ALLOC_CACHE;
Jens Axboedef596e2019-01-09 08:59:42 -07002928 kiocb->ki_complete = io_complete_rw_iopoll;
Xiaoguang Wang65a65432020-06-11 23:39:36 +08002929 req->iopoll_completed = 0;
Jens Axboedef596e2019-01-09 08:59:42 -07002930 } else {
Jens Axboe09bb8392019-03-13 12:39:28 -06002931 if (kiocb->ki_flags & IOCB_HIPRI)
2932 return -EINVAL;
Jens Axboedef596e2019-01-09 08:59:42 -07002933 kiocb->ki_complete = io_complete_rw;
2934 }
Jens Axboe9adbd452019-12-20 08:45:55 -07002935
Pavel Begunkovea512d52022-06-09 08:34:35 +01002936 /* used for fixed read/write too - just read unconditionally */
2937 req->buf_index = READ_ONCE(sqe->buf_index);
2938 req->imu = NULL;
2939
Pavel Begunkoveae071c2021-04-25 14:32:24 +01002940 if (req->opcode == IORING_OP_READ_FIXED ||
2941 req->opcode == IORING_OP_WRITE_FIXED) {
Pavel Begunkovea512d52022-06-09 08:34:35 +01002942 struct io_ring_ctx *ctx = req->ctx;
2943 u16 index;
2944
2945 if (unlikely(req->buf_index >= ctx->nr_user_bufs))
2946 return -EFAULT;
2947 index = array_index_nospec(req->buf_index, ctx->nr_user_bufs);
2948 req->imu = ctx->user_bufs[index];
Pavel Begunkoveae071c2021-04-25 14:32:24 +01002949 io_req_set_rsrc_node(req);
2950 }
2951
Jens Axboe3529d8c2019-12-19 18:24:38 -07002952 req->rw.addr = READ_ONCE(sqe->addr);
2953 req->rw.len = READ_ONCE(sqe->len);
Jens Axboe2b188cc2019-01-07 10:46:33 -07002954 return 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002955}
2956
2957static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
2958{
2959 switch (ret) {
2960 case -EIOCBQUEUED:
2961 break;
2962 case -ERESTARTSYS:
2963 case -ERESTARTNOINTR:
2964 case -ERESTARTNOHAND:
2965 case -ERESTART_RESTARTBLOCK:
2966 /*
2967 * We can't just restart the syscall, since previously
2968 * submitted sqes may already be in progress. Just fail this
2969 * IO with EINTR.
2970 */
2971 ret = -EINTR;
Gustavo A. R. Silvadf561f662020-08-23 17:36:59 -05002972 fallthrough;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002973 default:
2974 kiocb->ki_complete(kiocb, ret, 0);
2975 }
2976}
2977
Jens Axboea1d7c392020-06-22 11:09:46 -06002978static void kiocb_done(struct kiocb *kiocb, ssize_t ret,
Pavel Begunkov889fca72021-02-10 00:03:09 +00002979 unsigned int issue_flags)
Jens Axboeba816ad2019-09-28 11:36:45 -06002980{
Jens Axboeba042912019-12-25 16:33:42 -07002981 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
Jens Axboee8c2bc12020-08-15 18:44:09 -07002982 struct io_async_rw *io = req->async_data;
Jens Axboeba042912019-12-25 16:33:42 -07002983
Jens Axboe227c0c92020-08-13 11:51:40 -06002984 /* add previously done IO, if any */
Jens Axboee8c2bc12020-08-15 18:44:09 -07002985 if (io && io->bytes_done > 0) {
Jens Axboe227c0c92020-08-13 11:51:40 -06002986 if (ret < 0)
Jens Axboee8c2bc12020-08-15 18:44:09 -07002987 ret = io->bytes_done;
Jens Axboe227c0c92020-08-13 11:51:40 -06002988 else
Jens Axboee8c2bc12020-08-15 18:44:09 -07002989 ret += io->bytes_done;
Jens Axboe227c0c92020-08-13 11:51:40 -06002990 }
2991
Jens Axboeba042912019-12-25 16:33:42 -07002992 if (req->flags & REQ_F_CUR_POS)
2993 req->file->f_pos = kiocb->ki_pos;
Pavel Begunkovb66ceaf2021-09-15 11:00:05 +01002994 if (ret >= 0 && (kiocb->ki_complete == io_complete_rw))
Pavel Begunkov889fca72021-02-10 00:03:09 +00002995 __io_complete_rw(req, ret, 0, issue_flags);
Jens Axboeba816ad2019-09-28 11:36:45 -06002996 else
2997 io_rw_done(kiocb, ret);
Pavel Begunkov97284632021-04-08 19:28:03 +01002998
Pavel Begunkovb66ceaf2021-09-15 11:00:05 +01002999 if (req->flags & REQ_F_REISSUE) {
Pavel Begunkov97284632021-04-08 19:28:03 +01003000 req->flags &= ~REQ_F_REISSUE;
Jens Axboea7be7c22021-04-15 11:31:14 -06003001 if (io_resubmit_prep(req)) {
Jens Axboe773af692021-07-27 10:25:55 -06003002 io_req_task_queue_reissue(req);
Pavel Begunkov8c130822021-03-22 01:58:32 +00003003 } else {
Pavel Begunkovb66ceaf2021-09-15 11:00:05 +01003004 unsigned int cflags = io_put_rw_kbuf(req);
3005 struct io_ring_ctx *ctx = req->ctx;
3006
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01003007 req_set_fail(req);
Hao Xu14cfbb72021-10-14 22:04:00 +08003008 if (!(issue_flags & IO_URING_F_NONBLOCK)) {
Pavel Begunkovb66ceaf2021-09-15 11:00:05 +01003009 mutex_lock(&ctx->uring_lock);
3010 __io_req_complete(req, issue_flags, ret, cflags);
3011 mutex_unlock(&ctx->uring_lock);
3012 } else {
3013 __io_req_complete(req, issue_flags, ret, cflags);
3014 }
Pavel Begunkov97284632021-04-08 19:28:03 +01003015 }
3016 }
Jens Axboeba816ad2019-09-28 11:36:45 -06003017}
3018
Pavel Begunkoveae071c2021-04-25 14:32:24 +01003019static int __io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter,
3020 struct io_mapped_ubuf *imu)
Jens Axboeedafcce2019-01-09 09:16:05 -07003021{
Jens Axboe9adbd452019-12-20 08:45:55 -07003022 size_t len = req->rw.len;
Pavel Begunkov75769e32021-04-01 15:43:54 +01003023 u64 buf_end, buf_addr = req->rw.addr;
Jens Axboeedafcce2019-01-09 09:16:05 -07003024 size_t offset;
Jens Axboeedafcce2019-01-09 09:16:05 -07003025
Pavel Begunkov75769e32021-04-01 15:43:54 +01003026 if (unlikely(check_add_overflow(buf_addr, (u64)len, &buf_end)))
Jens Axboeedafcce2019-01-09 09:16:05 -07003027 return -EFAULT;
3028 /* not inside the mapped region */
Pavel Begunkov4751f532021-04-01 15:43:55 +01003029 if (unlikely(buf_addr < imu->ubuf || buf_end > imu->ubuf_end))
Jens Axboeedafcce2019-01-09 09:16:05 -07003030 return -EFAULT;
3031
3032 /*
3033 * May not be a start of buffer, set size appropriately
3034 * and advance us to the beginning.
3035 */
3036 offset = buf_addr - imu->ubuf;
3037 iov_iter_bvec(iter, rw, imu->bvec, imu->nr_bvecs, offset + len);
Jens Axboebd11b3a2019-07-20 08:37:31 -06003038
3039 if (offset) {
3040 /*
3041 * Don't use iov_iter_advance() here, as it's really slow for
3042 * using the latter parts of a big fixed buffer - it iterates
3043 * over each segment manually. We can cheat a bit here, because
3044 * we know that:
3045 *
3046 * 1) it's a BVEC iter, we set it up
3047 * 2) all bvecs are PAGE_SIZE in size, except potentially the
3048 * first and last bvec
3049 *
3050 * So just find our index, and adjust the iterator afterwards.
3051 * If the offset is within the first bvec (or the whole first
3052 * bvec, just use iov_iter_advance(). This makes it easier
3053 * since we can just skip the first segment, which may not
3054 * be PAGE_SIZE aligned.
3055 */
3056 const struct bio_vec *bvec = imu->bvec;
3057
3058 if (offset <= bvec->bv_len) {
3059 iov_iter_advance(iter, offset);
3060 } else {
3061 unsigned long seg_skip;
3062
3063 /* skip first vec */
3064 offset -= bvec->bv_len;
3065 seg_skip = 1 + (offset >> PAGE_SHIFT);
3066
3067 iter->bvec = bvec + seg_skip;
3068 iter->nr_segs -= seg_skip;
Aleix Roca Nonell99c79f62019-08-15 14:03:22 +02003069 iter->count -= bvec->bv_len + offset;
Jens Axboebd11b3a2019-07-20 08:37:31 -06003070 iter->iov_offset = offset & ~PAGE_MASK;
Jens Axboebd11b3a2019-07-20 08:37:31 -06003071 }
3072 }
3073
Pavel Begunkov847595d2021-02-04 13:52:06 +00003074 return 0;
Jens Axboeedafcce2019-01-09 09:16:05 -07003075}
3076
Pavel Begunkoveae071c2021-04-25 14:32:24 +01003077static int io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter)
3078{
Pavel Begunkovea512d52022-06-09 08:34:35 +01003079 if (WARN_ON_ONCE(!req->imu))
3080 return -EFAULT;
3081 return __io_import_fixed(req, rw, iter, req->imu);
Pavel Begunkoveae071c2021-04-25 14:32:24 +01003082}
3083
Jens Axboebcda7ba2020-02-23 16:42:51 -07003084static void io_ring_submit_unlock(struct io_ring_ctx *ctx, bool needs_lock)
3085{
3086 if (needs_lock)
3087 mutex_unlock(&ctx->uring_lock);
3088}
3089
3090static void io_ring_submit_lock(struct io_ring_ctx *ctx, bool needs_lock)
3091{
3092 /*
3093 * "Normal" inline submissions always hold the uring_lock, since we
3094 * grab it from the system call. Same is true for the SQPOLL offload.
3095 * The only exception is when we've detached the request and issue it
3096 * from an async worker thread, grab the lock for that case.
3097 */
3098 if (needs_lock)
3099 mutex_lock(&ctx->uring_lock);
3100}
3101
3102static struct io_buffer *io_buffer_select(struct io_kiocb *req, size_t *len,
3103 int bgid, struct io_buffer *kbuf,
3104 bool needs_lock)
3105{
3106 struct io_buffer *head;
3107
3108 if (req->flags & REQ_F_BUFFER_SELECTED)
3109 return kbuf;
3110
3111 io_ring_submit_lock(req->ctx, needs_lock);
3112
3113 lockdep_assert_held(&req->ctx->uring_lock);
3114
Jens Axboe9e15c3a2021-03-13 12:29:43 -07003115 head = xa_load(&req->ctx->io_buffers, bgid);
Jens Axboebcda7ba2020-02-23 16:42:51 -07003116 if (head) {
3117 if (!list_empty(&head->list)) {
3118 kbuf = list_last_entry(&head->list, struct io_buffer,
3119 list);
3120 list_del(&kbuf->list);
3121 } else {
3122 kbuf = head;
Jens Axboe9e15c3a2021-03-13 12:29:43 -07003123 xa_erase(&req->ctx->io_buffers, bgid);
Jens Axboebcda7ba2020-02-23 16:42:51 -07003124 }
3125 if (*len > kbuf->len)
3126 *len = kbuf->len;
3127 } else {
3128 kbuf = ERR_PTR(-ENOBUFS);
3129 }
3130
3131 io_ring_submit_unlock(req->ctx, needs_lock);
3132
3133 return kbuf;
3134}
3135
Jens Axboe4d954c22020-02-27 07:31:19 -07003136static void __user *io_rw_buffer_select(struct io_kiocb *req, size_t *len,
3137 bool needs_lock)
3138{
3139 struct io_buffer *kbuf;
Bijan Mottahedeh4f4eeba2020-05-19 14:52:49 -07003140 u16 bgid;
Jens Axboe4d954c22020-02-27 07:31:19 -07003141
3142 kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
Bijan Mottahedeh4f4eeba2020-05-19 14:52:49 -07003143 bgid = req->buf_index;
Jens Axboe4d954c22020-02-27 07:31:19 -07003144 kbuf = io_buffer_select(req, len, bgid, kbuf, needs_lock);
3145 if (IS_ERR(kbuf))
3146 return kbuf;
3147 req->rw.addr = (u64) (unsigned long) kbuf;
3148 req->flags |= REQ_F_BUFFER_SELECTED;
3149 return u64_to_user_ptr(kbuf->addr);
3150}
3151
3152#ifdef CONFIG_COMPAT
3153static ssize_t io_compat_import(struct io_kiocb *req, struct iovec *iov,
3154 bool needs_lock)
3155{
3156 struct compat_iovec __user *uiov;
3157 compat_ssize_t clen;
3158 void __user *buf;
3159 ssize_t len;
3160
3161 uiov = u64_to_user_ptr(req->rw.addr);
3162 if (!access_ok(uiov, sizeof(*uiov)))
3163 return -EFAULT;
3164 if (__get_user(clen, &uiov->iov_len))
3165 return -EFAULT;
3166 if (clen < 0)
3167 return -EINVAL;
3168
3169 len = clen;
3170 buf = io_rw_buffer_select(req, &len, needs_lock);
3171 if (IS_ERR(buf))
3172 return PTR_ERR(buf);
3173 iov[0].iov_base = buf;
3174 iov[0].iov_len = (compat_size_t) len;
3175 return 0;
3176}
3177#endif
3178
3179static ssize_t __io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
3180 bool needs_lock)
3181{
3182 struct iovec __user *uiov = u64_to_user_ptr(req->rw.addr);
3183 void __user *buf;
3184 ssize_t len;
3185
3186 if (copy_from_user(iov, uiov, sizeof(*uiov)))
3187 return -EFAULT;
3188
3189 len = iov[0].iov_len;
3190 if (len < 0)
3191 return -EINVAL;
3192 buf = io_rw_buffer_select(req, &len, needs_lock);
3193 if (IS_ERR(buf))
3194 return PTR_ERR(buf);
3195 iov[0].iov_base = buf;
3196 iov[0].iov_len = len;
3197 return 0;
3198}
3199
3200static ssize_t io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
3201 bool needs_lock)
3202{
Jens Axboedddb3e22020-06-04 11:27:01 -06003203 if (req->flags & REQ_F_BUFFER_SELECTED) {
3204 struct io_buffer *kbuf;
3205
3206 kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
3207 iov[0].iov_base = u64_to_user_ptr(kbuf->addr);
3208 iov[0].iov_len = kbuf->len;
Jens Axboe4d954c22020-02-27 07:31:19 -07003209 return 0;
Jens Axboedddb3e22020-06-04 11:27:01 -06003210 }
Pavel Begunkovdd201662020-12-19 03:15:43 +00003211 if (req->rw.len != 1)
Jens Axboe4d954c22020-02-27 07:31:19 -07003212 return -EINVAL;
3213
3214#ifdef CONFIG_COMPAT
3215 if (req->ctx->compat)
3216 return io_compat_import(req, iov, needs_lock);
3217#endif
3218
3219 return __io_iov_buffer_select(req, iov, needs_lock);
3220}
3221
Pavel Begunkov847595d2021-02-04 13:52:06 +00003222static int io_import_iovec(int rw, struct io_kiocb *req, struct iovec **iovec,
3223 struct iov_iter *iter, bool needs_lock)
Jens Axboe2b188cc2019-01-07 10:46:33 -07003224{
Jens Axboe9adbd452019-12-20 08:45:55 -07003225 void __user *buf = u64_to_user_ptr(req->rw.addr);
3226 size_t sqe_len = req->rw.len;
Pavel Begunkov847595d2021-02-04 13:52:06 +00003227 u8 opcode = req->opcode;
Jens Axboe4d954c22020-02-27 07:31:19 -07003228 ssize_t ret;
Jens Axboeedafcce2019-01-09 09:16:05 -07003229
Pavel Begunkov7d009162019-11-25 23:14:40 +03003230 if (opcode == IORING_OP_READ_FIXED || opcode == IORING_OP_WRITE_FIXED) {
Jens Axboeedafcce2019-01-09 09:16:05 -07003231 *iovec = NULL;
Jens Axboe9adbd452019-12-20 08:45:55 -07003232 return io_import_fixed(req, rw, iter);
Jens Axboeedafcce2019-01-09 09:16:05 -07003233 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07003234
Jens Axboebcda7ba2020-02-23 16:42:51 -07003235 /* buffer index only valid with fixed read/write, or buffer select */
Bijan Mottahedeh4f4eeba2020-05-19 14:52:49 -07003236 if (req->buf_index && !(req->flags & REQ_F_BUFFER_SELECT))
Jens Axboe9adbd452019-12-20 08:45:55 -07003237 return -EINVAL;
3238
Jens Axboe3a6820f2019-12-22 15:19:35 -07003239 if (opcode == IORING_OP_READ || opcode == IORING_OP_WRITE) {
Jens Axboebcda7ba2020-02-23 16:42:51 -07003240 if (req->flags & REQ_F_BUFFER_SELECT) {
Jens Axboe4d954c22020-02-27 07:31:19 -07003241 buf = io_rw_buffer_select(req, &sqe_len, needs_lock);
Pavel Begunkov867a23e2020-08-20 11:34:39 +03003242 if (IS_ERR(buf))
Jens Axboe4d954c22020-02-27 07:31:19 -07003243 return PTR_ERR(buf);
Jens Axboe3f9d6442020-03-11 12:27:04 -06003244 req->rw.len = sqe_len;
Jens Axboebcda7ba2020-02-23 16:42:51 -07003245 }
3246
Jens Axboe3a6820f2019-12-22 15:19:35 -07003247 ret = import_single_range(rw, buf, sqe_len, *iovec, iter);
3248 *iovec = NULL;
David Laight10fc72e2020-11-07 13:16:25 +00003249 return ret;
Jens Axboe3a6820f2019-12-22 15:19:35 -07003250 }
3251
Jens Axboe4d954c22020-02-27 07:31:19 -07003252 if (req->flags & REQ_F_BUFFER_SELECT) {
3253 ret = io_iov_buffer_select(req, *iovec, needs_lock);
Pavel Begunkov847595d2021-02-04 13:52:06 +00003254 if (!ret)
3255 iov_iter_init(iter, rw, *iovec, 1, (*iovec)->iov_len);
Jens Axboe4d954c22020-02-27 07:31:19 -07003256 *iovec = NULL;
3257 return ret;
3258 }
3259
Christoph Hellwig89cd35c2020-09-25 06:51:41 +02003260 return __import_iovec(rw, buf, sqe_len, UIO_FASTIOV, iovec, iter,
3261 req->ctx->compat);
Jens Axboe2b188cc2019-01-07 10:46:33 -07003262}
3263
Jens Axboe0fef9482020-08-26 10:36:20 -06003264static inline loff_t *io_kiocb_ppos(struct kiocb *kiocb)
3265{
Pavel Begunkov5b09e372020-09-30 22:57:15 +03003266 return (kiocb->ki_filp->f_mode & FMODE_STREAM) ? NULL : &kiocb->ki_pos;
Jens Axboe0fef9482020-08-26 10:36:20 -06003267}
3268
Jens Axboe32960612019-09-23 11:05:34 -06003269/*
3270 * For files that don't have ->read_iter() and ->write_iter(), handle them
3271 * by looping over ->read() or ->write() manually.
3272 */
Jens Axboe4017eb92020-10-22 14:14:12 -06003273static ssize_t loop_rw_iter(int rw, struct io_kiocb *req, struct iov_iter *iter)
Jens Axboe32960612019-09-23 11:05:34 -06003274{
Jens Axboe4017eb92020-10-22 14:14:12 -06003275 struct kiocb *kiocb = &req->rw.kiocb;
3276 struct file *file = req->file;
Jens Axboe32960612019-09-23 11:05:34 -06003277 ssize_t ret = 0;
3278
3279 /*
3280 * Don't support polled IO through this interface, and we can't
3281 * support non-blocking either. For the latter, this just causes
3282 * the kiocb to be handled from an async context.
3283 */
3284 if (kiocb->ki_flags & IOCB_HIPRI)
3285 return -EOPNOTSUPP;
3286 if (kiocb->ki_flags & IOCB_NOWAIT)
3287 return -EAGAIN;
3288
3289 while (iov_iter_count(iter)) {
Pavel Begunkov311ae9e2019-11-24 11:58:24 +03003290 struct iovec iovec;
Jens Axboe32960612019-09-23 11:05:34 -06003291 ssize_t nr;
3292
Pavel Begunkov311ae9e2019-11-24 11:58:24 +03003293 if (!iov_iter_is_bvec(iter)) {
3294 iovec = iov_iter_iovec(iter);
3295 } else {
Jens Axboe4017eb92020-10-22 14:14:12 -06003296 iovec.iov_base = u64_to_user_ptr(req->rw.addr);
3297 iovec.iov_len = req->rw.len;
Pavel Begunkov311ae9e2019-11-24 11:58:24 +03003298 }
3299
Jens Axboe32960612019-09-23 11:05:34 -06003300 if (rw == READ) {
3301 nr = file->f_op->read(file, iovec.iov_base,
Jens Axboe0fef9482020-08-26 10:36:20 -06003302 iovec.iov_len, io_kiocb_ppos(kiocb));
Jens Axboe32960612019-09-23 11:05:34 -06003303 } else {
3304 nr = file->f_op->write(file, iovec.iov_base,
Jens Axboe0fef9482020-08-26 10:36:20 -06003305 iovec.iov_len, io_kiocb_ppos(kiocb));
Jens Axboe32960612019-09-23 11:05:34 -06003306 }
3307
3308 if (nr < 0) {
3309 if (!ret)
3310 ret = nr;
3311 break;
3312 }
Jens Axboe109dda42022-03-18 11:28:13 -06003313 ret += nr;
Jens Axboe16c8d2d2021-09-12 06:45:07 -06003314 if (!iov_iter_is_bvec(iter)) {
3315 iov_iter_advance(iter, nr);
3316 } else {
Jens Axboe16c8d2d2021-09-12 06:45:07 -06003317 req->rw.addr += nr;
Jens Axboe109dda42022-03-18 11:28:13 -06003318 req->rw.len -= nr;
3319 if (!req->rw.len)
3320 break;
Jens Axboe16c8d2d2021-09-12 06:45:07 -06003321 }
Jens Axboe32960612019-09-23 11:05:34 -06003322 if (nr != iovec.iov_len)
3323 break;
Jens Axboe32960612019-09-23 11:05:34 -06003324 }
3325
3326 return ret;
3327}
3328
Jens Axboeff6165b2020-08-13 09:47:43 -06003329static void io_req_map_rw(struct io_kiocb *req, const struct iovec *iovec,
3330 const struct iovec *fast_iov, struct iov_iter *iter)
Jens Axboef67676d2019-12-02 11:03:47 -07003331{
Jens Axboee8c2bc12020-08-15 18:44:09 -07003332 struct io_async_rw *rw = req->async_data;
Pavel Begunkovb64e3442020-07-13 22:59:18 +03003333
Jens Axboeff6165b2020-08-13 09:47:43 -06003334 memcpy(&rw->iter, iter, sizeof(*iter));
Pavel Begunkovafb87652020-09-06 00:45:46 +03003335 rw->free_iovec = iovec;
Jens Axboe227c0c92020-08-13 11:51:40 -06003336 rw->bytes_done = 0;
Jens Axboeff6165b2020-08-13 09:47:43 -06003337 /* can only be fixed buffers, no need to do anything */
Pavel Begunkov9c3a2052020-11-23 23:20:27 +00003338 if (iov_iter_is_bvec(iter))
Jens Axboeff6165b2020-08-13 09:47:43 -06003339 return;
Pavel Begunkovb64e3442020-07-13 22:59:18 +03003340 if (!iovec) {
Jens Axboeff6165b2020-08-13 09:47:43 -06003341 unsigned iov_off = 0;
3342
3343 rw->iter.iov = rw->fast_iov;
3344 if (iter->iov != fast_iov) {
3345 iov_off = iter->iov - fast_iov;
3346 rw->iter.iov += iov_off;
3347 }
3348 if (rw->fast_iov != fast_iov)
3349 memcpy(rw->fast_iov + iov_off, fast_iov + iov_off,
Xiaoguang Wang45097da2020-04-08 22:29:58 +08003350 sizeof(struct iovec) * iter->nr_segs);
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03003351 } else {
3352 req->flags |= REQ_F_NEED_CLEANUP;
Jens Axboef67676d2019-12-02 11:03:47 -07003353 }
3354}
3355
Pavel Begunkov6cb78682021-02-28 22:35:17 +00003356static inline int io_alloc_async_data(struct io_kiocb *req)
Xiaoguang Wang3d9932a2020-03-27 15:36:52 +08003357{
Jens Axboee8c2bc12020-08-15 18:44:09 -07003358 WARN_ON_ONCE(!io_op_defs[req->opcode].async_size);
3359 req->async_data = kmalloc(io_op_defs[req->opcode].async_size, GFP_KERNEL);
3360 return req->async_data == NULL;
Xiaoguang Wang3d9932a2020-03-27 15:36:52 +08003361}
3362
Jens Axboeff6165b2020-08-13 09:47:43 -06003363static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec,
3364 const struct iovec *fast_iov,
Jens Axboe227c0c92020-08-13 11:51:40 -06003365 struct iov_iter *iter, bool force)
Jens Axboeb7bb4f72019-12-15 22:13:43 -07003366{
Pavel Begunkov26f05052021-02-28 22:35:18 +00003367 if (!force && !io_op_defs[req->opcode].needs_async_setup)
Jens Axboe74566df2020-01-13 19:23:24 -07003368 return 0;
Jens Axboee8c2bc12020-08-15 18:44:09 -07003369 if (!req->async_data) {
Jens Axboecd658692021-09-10 11:19:14 -06003370 struct io_async_rw *iorw;
3371
Pavel Begunkov6cb78682021-02-28 22:35:17 +00003372 if (io_alloc_async_data(req)) {
Pavel Begunkov6bf985d2021-02-04 13:52:01 +00003373 kfree(iovec);
Jens Axboe5d204bc2020-01-31 12:06:52 -07003374 return -ENOMEM;
Pavel Begunkov6bf985d2021-02-04 13:52:01 +00003375 }
Jens Axboeb7bb4f72019-12-15 22:13:43 -07003376
Jens Axboeff6165b2020-08-13 09:47:43 -06003377 io_req_map_rw(req, iovec, fast_iov, iter);
Jens Axboecd658692021-09-10 11:19:14 -06003378 iorw = req->async_data;
3379 /* we've copied and mapped the iter, ensure state is saved */
3380 iov_iter_save_state(&iorw->iter, &iorw->iter_state);
Jens Axboe5d204bc2020-01-31 12:06:52 -07003381 }
Jens Axboeb7bb4f72019-12-15 22:13:43 -07003382 return 0;
Jens Axboef67676d2019-12-02 11:03:47 -07003383}
3384
Pavel Begunkov73debe62020-09-30 22:57:54 +03003385static inline int io_rw_prep_async(struct io_kiocb *req, int rw)
Pavel Begunkovc3e330a2020-07-13 22:59:19 +03003386{
Jens Axboee8c2bc12020-08-15 18:44:09 -07003387 struct io_async_rw *iorw = req->async_data;
Pavel Begunkovf4bff102020-09-06 00:45:45 +03003388 struct iovec *iov = iorw->fast_iov;
Pavel Begunkov847595d2021-02-04 13:52:06 +00003389 int ret;
Pavel Begunkovc3e330a2020-07-13 22:59:19 +03003390
Pavel Begunkov2846c482020-11-07 13:16:27 +00003391 ret = io_import_iovec(rw, req, &iov, &iorw->iter, false);
Pavel Begunkovc3e330a2020-07-13 22:59:19 +03003392 if (unlikely(ret < 0))
3393 return ret;
3394
Pavel Begunkovab0b1962020-09-06 00:45:47 +03003395 iorw->bytes_done = 0;
3396 iorw->free_iovec = iov;
3397 if (iov)
3398 req->flags |= REQ_F_NEED_CLEANUP;
Jens Axboecd658692021-09-10 11:19:14 -06003399 iov_iter_save_state(&iorw->iter, &iorw->iter_state);
Pavel Begunkovc3e330a2020-07-13 22:59:19 +03003400 return 0;
3401}
3402
Pavel Begunkov73debe62020-09-30 22:57:54 +03003403static int io_read_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboef67676d2019-12-02 11:03:47 -07003404{
Jens Axboe3529d8c2019-12-19 18:24:38 -07003405 if (unlikely(!(req->file->f_mode & FMODE_READ)))
3406 return -EBADF;
Jens Axboe5d329e12021-09-14 11:08:37 -06003407 return io_prep_rw(req, sqe, READ);
Jens Axboef67676d2019-12-02 11:03:47 -07003408}
3409
Jens Axboec1dd91d2020-08-03 16:43:59 -06003410/*
3411 * This is our waitqueue callback handler, registered through lock_page_async()
3412 * when we initially tried to do the IO with the iocb armed our waitqueue.
3413 * This gets called when the page is unlocked, and we generally expect that to
3414 * happen when the page IO is completed and the page is now uptodate. This will
3415 * queue a task_work based retry of the operation, attempting to copy the data
3416 * again. If the latter fails because the page was NOT uptodate, then we will
3417 * do a thread based blocking retry of the operation. That's the unexpected
3418 * slow path.
3419 */
Jens Axboebcf5a062020-05-22 09:24:42 -06003420static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode,
3421 int sync, void *arg)
3422{
3423 struct wait_page_queue *wpq;
3424 struct io_kiocb *req = wait->private;
Jens Axboebcf5a062020-05-22 09:24:42 -06003425 struct wait_page_key *key = arg;
Jens Axboebcf5a062020-05-22 09:24:42 -06003426
3427 wpq = container_of(wait, struct wait_page_queue, wait);
3428
Linus Torvaldscdc8fcb2020-08-03 13:01:22 -07003429 if (!wake_page_match(wpq, key))
3430 return 0;
3431
Hao Xuc8d317a2020-09-29 20:00:45 +08003432 req->rw.kiocb.ki_flags &= ~IOCB_WAITQ;
Jens Axboebcf5a062020-05-22 09:24:42 -06003433 list_del_init(&wait->entry);
Pavel Begunkov921b9052021-02-12 03:23:53 +00003434 io_req_task_queue(req);
Jens Axboebcf5a062020-05-22 09:24:42 -06003435 return 1;
3436}
3437
Jens Axboec1dd91d2020-08-03 16:43:59 -06003438/*
3439 * This controls whether a given IO request should be armed for async page
3440 * based retry. If we return false here, the request is handed to the async
3441 * worker threads for retry. If we're doing buffered reads on a regular file,
3442 * we prepare a private wait_page_queue entry and retry the operation. This
3443 * will either succeed because the page is now uptodate and unlocked, or it
3444 * will register a callback when the page is unlocked at IO completion. Through
3445 * that callback, io_uring uses task_work to setup a retry of the operation.
3446 * That retry will attempt the buffered read again. The retry will generally
3447 * succeed, or in rare cases where it fails, we then fall back to using the
3448 * async worker threads for a blocking retry.
3449 */
Jens Axboe227c0c92020-08-13 11:51:40 -06003450static bool io_rw_should_retry(struct io_kiocb *req)
Jens Axboebcf5a062020-05-22 09:24:42 -06003451{
Jens Axboee8c2bc12020-08-15 18:44:09 -07003452 struct io_async_rw *rw = req->async_data;
3453 struct wait_page_queue *wait = &rw->wpq;
Jens Axboebcf5a062020-05-22 09:24:42 -06003454 struct kiocb *kiocb = &req->rw.kiocb;
Jens Axboebcf5a062020-05-22 09:24:42 -06003455
3456 /* never retry for NOWAIT, we just complete with -EAGAIN */
3457 if (req->flags & REQ_F_NOWAIT)
3458 return false;
3459
Jens Axboe227c0c92020-08-13 11:51:40 -06003460 /* Only for buffered IO */
Jens Axboe3b2a4432020-08-16 10:58:43 -07003461 if (kiocb->ki_flags & (IOCB_DIRECT | IOCB_HIPRI))
Jens Axboebcf5a062020-05-22 09:24:42 -06003462 return false;
Jens Axboe3b2a4432020-08-16 10:58:43 -07003463
Jens Axboebcf5a062020-05-22 09:24:42 -06003464 /*
3465 * just use poll if we can, and don't attempt if the fs doesn't
3466 * support callback based unlocks
3467 */
3468 if (file_can_poll(req->file) || !(req->file->f_mode & FMODE_BUF_RASYNC))
3469 return false;
3470
Jens Axboe3b2a4432020-08-16 10:58:43 -07003471 wait->wait.func = io_async_buf_func;
3472 wait->wait.private = req;
3473 wait->wait.flags = 0;
3474 INIT_LIST_HEAD(&wait->wait.entry);
3475 kiocb->ki_flags |= IOCB_WAITQ;
Hao Xuc8d317a2020-09-29 20:00:45 +08003476 kiocb->ki_flags &= ~IOCB_NOWAIT;
Jens Axboe3b2a4432020-08-16 10:58:43 -07003477 kiocb->ki_waitq = wait;
Jens Axboe3b2a4432020-08-16 10:58:43 -07003478 return true;
Jens Axboebcf5a062020-05-22 09:24:42 -06003479}
3480
Pavel Begunkovaeab9502021-06-14 02:36:24 +01003481static inline int io_iter_do_read(struct io_kiocb *req, struct iov_iter *iter)
Jens Axboebcf5a062020-05-22 09:24:42 -06003482{
3483 if (req->file->f_op->read_iter)
3484 return call_read_iter(req->file, &req->rw.kiocb, iter);
Guoyu Huang2dd21112020-08-05 03:53:50 -07003485 else if (req->file->f_op->read)
Jens Axboe4017eb92020-10-22 14:14:12 -06003486 return loop_rw_iter(READ, req, iter);
Guoyu Huang2dd21112020-08-05 03:53:50 -07003487 else
3488 return -EINVAL;
Jens Axboebcf5a062020-05-22 09:24:42 -06003489}
3490
Ming Lei7db30432021-08-21 23:07:51 +08003491static bool need_read_all(struct io_kiocb *req)
3492{
3493 return req->flags & REQ_F_ISREG ||
3494 S_ISBLK(file_inode(req->file)->i_mode);
3495}
3496
Pavel Begunkov889fca72021-02-10 00:03:09 +00003497static int io_read(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe2b188cc2019-01-07 10:46:33 -07003498{
3499 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
Jens Axboe9adbd452019-12-20 08:45:55 -07003500 struct kiocb *kiocb = &req->rw.kiocb;
Jens Axboeff6165b2020-08-13 09:47:43 -06003501 struct iov_iter __iter, *iter = &__iter;
Jens Axboee8c2bc12020-08-15 18:44:09 -07003502 struct io_async_rw *rw = req->async_data;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003503 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Jens Axboecd658692021-09-10 11:19:14 -06003504 struct iov_iter_state __state, *state;
3505 ssize_t ret, ret2;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003506
Pavel Begunkov2846c482020-11-07 13:16:27 +00003507 if (rw) {
Jens Axboee8c2bc12020-08-15 18:44:09 -07003508 iter = &rw->iter;
Jens Axboecd658692021-09-10 11:19:14 -06003509 state = &rw->iter_state;
3510 /*
3511 * We come here from an earlier attempt, restore our state to
3512 * match in case it doesn't. It's cheap enough that we don't
3513 * need to make this conditional.
3514 */
3515 iov_iter_restore(iter, state);
Pavel Begunkov2846c482020-11-07 13:16:27 +00003516 iovec = NULL;
3517 } else {
3518 ret = io_import_iovec(READ, req, &iovec, iter, !force_nonblock);
3519 if (ret < 0)
3520 return ret;
Jens Axboecd658692021-09-10 11:19:14 -06003521 state = &__state;
3522 iov_iter_save_state(iter, state);
Pavel Begunkov2846c482020-11-07 13:16:27 +00003523 }
Jens Axboecd658692021-09-10 11:19:14 -06003524 req->result = iov_iter_count(iter);
Jens Axboe2b188cc2019-01-07 10:46:33 -07003525
Jens Axboefd6c2e42019-12-18 12:19:41 -07003526 /* Ensure we clear previously set non-block flag */
3527 if (!force_nonblock)
Jens Axboe29de5f62020-02-20 09:56:08 -07003528 kiocb->ki_flags &= ~IOCB_NOWAIT;
Pavel Begunkova88fc402020-09-30 22:57:53 +03003529 else
3530 kiocb->ki_flags |= IOCB_NOWAIT;
3531
Pavel Begunkov24c74672020-06-21 13:09:51 +03003532 /* If the file doesn't support async, just async punt */
Pavel Begunkovb191e2d2021-08-09 13:04:03 +01003533 if (force_nonblock && !io_file_supports_nowait(req, READ)) {
Pavel Begunkov6713e7a2021-02-04 13:51:59 +00003534 ret = io_setup_async_rw(req, iovec, inline_vecs, iter, true);
Pavel Begunkov6bf985d2021-02-04 13:52:01 +00003535 return ret ?: -EAGAIN;
Pavel Begunkov6713e7a2021-02-04 13:51:59 +00003536 }
Jens Axboe9e645e112019-05-10 16:07:28 -06003537
Jens Axboecd658692021-09-10 11:19:14 -06003538 ret = rw_verify_area(READ, req->file, io_kiocb_ppos(kiocb), req->result);
Pavel Begunkov5ea5dd42021-02-04 13:52:03 +00003539 if (unlikely(ret)) {
3540 kfree(iovec);
3541 return ret;
3542 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07003543
Jens Axboe227c0c92020-08-13 11:51:40 -06003544 ret = io_iter_do_read(req, iter);
Jens Axboe32960612019-09-23 11:05:34 -06003545
Jens Axboe230d50d2021-04-01 20:41:15 -06003546 if (ret == -EAGAIN || (req->flags & REQ_F_REISSUE)) {
Pavel Begunkov6ad7f232021-04-08 01:54:39 +01003547 req->flags &= ~REQ_F_REISSUE;
Jens Axboeeefdf302020-08-27 16:40:19 -06003548 /* IOPOLL retry should happen for io-wq threads */
3549 if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboef91daf52020-08-15 15:58:42 -07003550 goto done;
Pavel Begunkov75c668c2021-02-04 13:52:05 +00003551 /* no retry on NONBLOCK nor RWF_NOWAIT */
3552 if (req->flags & REQ_F_NOWAIT)
Jens Axboe355afae2020-09-02 09:30:31 -06003553 goto done;
Jens Axboef38c7e32020-09-25 15:23:43 -06003554 ret = 0;
Jens Axboe230d50d2021-04-01 20:41:15 -06003555 } else if (ret == -EIOCBQUEUED) {
3556 goto out_free;
Jens Axboecd658692021-09-10 11:19:14 -06003557 } else if (ret <= 0 || ret == req->result || !force_nonblock ||
Ming Lei7db30432021-08-21 23:07:51 +08003558 (req->flags & REQ_F_NOWAIT) || !need_read_all(req)) {
Pavel Begunkov7335e3b2021-02-04 13:52:02 +00003559 /* read all, failed, already did sync or don't want to retry */
Jens Axboe00d23d52020-08-25 12:59:22 -06003560 goto done;
Jens Axboe227c0c92020-08-13 11:51:40 -06003561 }
3562
Jens Axboecd658692021-09-10 11:19:14 -06003563 /*
3564 * Don't depend on the iter state matching what was consumed, or being
3565 * untouched in case of error. Restore it and we'll advance it
3566 * manually if we need to.
3567 */
3568 iov_iter_restore(iter, state);
3569
Jens Axboe227c0c92020-08-13 11:51:40 -06003570 ret2 = io_setup_async_rw(req, iovec, inline_vecs, iter, true);
Pavel Begunkov6bf985d2021-02-04 13:52:01 +00003571 if (ret2)
3572 return ret2;
3573
Pavel Begunkovfe1cdd52021-02-17 21:02:36 +00003574 iovec = NULL;
Jens Axboee8c2bc12020-08-15 18:44:09 -07003575 rw = req->async_data;
Jens Axboecd658692021-09-10 11:19:14 -06003576 /*
3577 * Now use our persistent iterator and state, if we aren't already.
3578 * We've restored and mapped the iter to match.
3579 */
3580 if (iter != &rw->iter) {
3581 iter = &rw->iter;
3582 state = &rw->iter_state;
3583 }
Jens Axboe227c0c92020-08-13 11:51:40 -06003584
Pavel Begunkovb23df912021-02-04 13:52:04 +00003585 do {
Jens Axboecd658692021-09-10 11:19:14 -06003586 /*
3587 * We end up here because of a partial read, either from
3588 * above or inside this loop. Advance the iter by the bytes
3589 * that were consumed.
3590 */
3591 iov_iter_advance(iter, ret);
3592 if (!iov_iter_count(iter))
3593 break;
Pavel Begunkovb23df912021-02-04 13:52:04 +00003594 rw->bytes_done += ret;
Jens Axboecd658692021-09-10 11:19:14 -06003595 iov_iter_save_state(iter, state);
3596
Pavel Begunkovb23df912021-02-04 13:52:04 +00003597 /* if we can retry, do so with the callbacks armed */
3598 if (!io_rw_should_retry(req)) {
3599 kiocb->ki_flags &= ~IOCB_WAITQ;
3600 return -EAGAIN;
3601 }
3602
3603 /*
3604 * Now retry read with the IOCB_WAITQ parts set in the iocb. If
3605 * we get -EIOCBQUEUED, then we'll get a notification when the
3606 * desired page gets unlocked. We can also get a partial read
3607 * here, and if we do, then just retry at the new offset.
3608 */
3609 ret = io_iter_do_read(req, iter);
3610 if (ret == -EIOCBQUEUED)
3611 return 0;
Jens Axboe227c0c92020-08-13 11:51:40 -06003612 /* we got some bytes, but not all. retry. */
Jens Axboeb5b0ecb2021-03-04 21:02:58 -07003613 kiocb->ki_flags &= ~IOCB_WAITQ;
Jens Axboecd658692021-09-10 11:19:14 -06003614 iov_iter_restore(iter, state);
3615 } while (ret > 0);
Jens Axboe227c0c92020-08-13 11:51:40 -06003616done:
Pavel Begunkov889fca72021-02-10 00:03:09 +00003617 kiocb_done(kiocb, ret, issue_flags);
Pavel Begunkovfe1cdd52021-02-17 21:02:36 +00003618out_free:
3619 /* it's faster to check here then delegate to kfree */
3620 if (iovec)
3621 kfree(iovec);
Pavel Begunkov5ea5dd42021-02-04 13:52:03 +00003622 return 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003623}
3624
Pavel Begunkov73debe62020-09-30 22:57:54 +03003625static int io_write_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboef67676d2019-12-02 11:03:47 -07003626{
Jens Axboe3529d8c2019-12-19 18:24:38 -07003627 if (unlikely(!(req->file->f_mode & FMODE_WRITE)))
3628 return -EBADF;
Jens Axboe5d329e12021-09-14 11:08:37 -06003629 return io_prep_rw(req, sqe, WRITE);
Jens Axboef67676d2019-12-02 11:03:47 -07003630}
3631
Pavel Begunkov889fca72021-02-10 00:03:09 +00003632static int io_write(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe2b188cc2019-01-07 10:46:33 -07003633{
3634 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
Jens Axboe9adbd452019-12-20 08:45:55 -07003635 struct kiocb *kiocb = &req->rw.kiocb;
Jens Axboeff6165b2020-08-13 09:47:43 -06003636 struct iov_iter __iter, *iter = &__iter;
Jens Axboee8c2bc12020-08-15 18:44:09 -07003637 struct io_async_rw *rw = req->async_data;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003638 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Jens Axboecd658692021-09-10 11:19:14 -06003639 struct iov_iter_state __state, *state;
3640 ssize_t ret, ret2;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003641
Pavel Begunkov2846c482020-11-07 13:16:27 +00003642 if (rw) {
Jens Axboee8c2bc12020-08-15 18:44:09 -07003643 iter = &rw->iter;
Jens Axboecd658692021-09-10 11:19:14 -06003644 state = &rw->iter_state;
3645 iov_iter_restore(iter, state);
Pavel Begunkov2846c482020-11-07 13:16:27 +00003646 iovec = NULL;
3647 } else {
3648 ret = io_import_iovec(WRITE, req, &iovec, iter, !force_nonblock);
3649 if (ret < 0)
3650 return ret;
Jens Axboecd658692021-09-10 11:19:14 -06003651 state = &__state;
3652 iov_iter_save_state(iter, state);
Pavel Begunkov2846c482020-11-07 13:16:27 +00003653 }
Jens Axboecd658692021-09-10 11:19:14 -06003654 req->result = iov_iter_count(iter);
Jens Axboe2b188cc2019-01-07 10:46:33 -07003655
Jens Axboefd6c2e42019-12-18 12:19:41 -07003656 /* Ensure we clear previously set non-block flag */
3657 if (!force_nonblock)
Pavel Begunkova88fc402020-09-30 22:57:53 +03003658 kiocb->ki_flags &= ~IOCB_NOWAIT;
3659 else
3660 kiocb->ki_flags |= IOCB_NOWAIT;
Jens Axboefd6c2e42019-12-18 12:19:41 -07003661
Pavel Begunkov24c74672020-06-21 13:09:51 +03003662 /* If the file doesn't support async, just async punt */
Pavel Begunkovb191e2d2021-08-09 13:04:03 +01003663 if (force_nonblock && !io_file_supports_nowait(req, WRITE))
Jens Axboef67676d2019-12-02 11:03:47 -07003664 goto copy_iov;
Jens Axboef67676d2019-12-02 11:03:47 -07003665
Jens Axboe10d59342019-12-09 20:16:22 -07003666 /* file path doesn't support NOWAIT for non-direct_IO */
3667 if (force_nonblock && !(kiocb->ki_flags & IOCB_DIRECT) &&
3668 (req->flags & REQ_F_ISREG))
Jens Axboef67676d2019-12-02 11:03:47 -07003669 goto copy_iov;
Jens Axboe9e645e112019-05-10 16:07:28 -06003670
Jens Axboecd658692021-09-10 11:19:14 -06003671 ret = rw_verify_area(WRITE, req->file, io_kiocb_ppos(kiocb), req->result);
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003672 if (unlikely(ret))
3673 goto out_free;
Roman Penyaev9bf79332019-03-25 20:09:24 +01003674
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003675 /*
3676 * Open-code file_start_write here to grab freeze protection,
3677 * which will be released by another thread in
3678 * io_complete_rw(). Fool lockdep by telling it the lock got
3679 * released so that it doesn't complain about the held lock when
3680 * we return to userspace.
3681 */
3682 if (req->flags & REQ_F_ISREG) {
Darrick J. Wong8a3c84b2020-11-10 16:50:21 -08003683 sb_start_write(file_inode(req->file)->i_sb);
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003684 __sb_writers_release(file_inode(req->file)->i_sb,
3685 SB_FREEZE_WRITE);
3686 }
3687 kiocb->ki_flags |= IOCB_WRITE;
Roman Penyaev9bf79332019-03-25 20:09:24 +01003688
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003689 if (req->file->f_op->write_iter)
Jens Axboeff6165b2020-08-13 09:47:43 -06003690 ret2 = call_write_iter(req->file, kiocb, iter);
Guoyu Huang2dd21112020-08-05 03:53:50 -07003691 else if (req->file->f_op->write)
Jens Axboe4017eb92020-10-22 14:14:12 -06003692 ret2 = loop_rw_iter(WRITE, req, iter);
Guoyu Huang2dd21112020-08-05 03:53:50 -07003693 else
3694 ret2 = -EINVAL;
Jens Axboe4ed734b2020-03-20 11:23:41 -06003695
Pavel Begunkov6ad7f232021-04-08 01:54:39 +01003696 if (req->flags & REQ_F_REISSUE) {
3697 req->flags &= ~REQ_F_REISSUE;
Jens Axboe230d50d2021-04-01 20:41:15 -06003698 ret2 = -EAGAIN;
Pavel Begunkov6ad7f232021-04-08 01:54:39 +01003699 }
Jens Axboe230d50d2021-04-01 20:41:15 -06003700
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003701 /*
3702 * Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just
3703 * retry them without IOCB_NOWAIT.
3704 */
3705 if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT))
3706 ret2 = -EAGAIN;
Pavel Begunkov75c668c2021-02-04 13:52:05 +00003707 /* no retry on NONBLOCK nor RWF_NOWAIT */
3708 if (ret2 == -EAGAIN && (req->flags & REQ_F_NOWAIT))
Jens Axboe355afae2020-09-02 09:30:31 -06003709 goto done;
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003710 if (!force_nonblock || ret2 != -EAGAIN) {
Jens Axboeeefdf302020-08-27 16:40:19 -06003711 /* IOPOLL retry should happen for io-wq threads */
3712 if ((req->ctx->flags & IORING_SETUP_IOPOLL) && ret2 == -EAGAIN)
3713 goto copy_iov;
Jens Axboe355afae2020-09-02 09:30:31 -06003714done:
Pavel Begunkov889fca72021-02-10 00:03:09 +00003715 kiocb_done(kiocb, ret2, issue_flags);
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003716 } else {
Jens Axboef67676d2019-12-02 11:03:47 -07003717copy_iov:
Jens Axboecd658692021-09-10 11:19:14 -06003718 iov_iter_restore(iter, state);
Jens Axboe227c0c92020-08-13 11:51:40 -06003719 ret = io_setup_async_rw(req, iovec, inline_vecs, iter, false);
Jens Axboe295219a2022-08-25 10:19:08 -06003720 if (!ret) {
3721 if (kiocb->ki_flags & IOCB_WRITE)
3722 kiocb_end_write(req);
3723 return -EAGAIN;
3724 }
3725 return ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003726 }
Jens Axboe31b51512019-01-18 22:56:34 -07003727out_free:
Pavel Begunkovf261c162020-08-20 11:34:10 +03003728 /* it's reportedly faster than delegating the null check to kfree() */
Pavel Begunkov252917c2020-07-13 22:59:20 +03003729 if (iovec)
Xiaoguang Wang6f2cc162020-06-18 15:01:56 +08003730 kfree(iovec);
Jens Axboe2b188cc2019-01-07 10:46:33 -07003731 return ret;
3732}
3733
Jens Axboe80a261f2020-09-28 14:23:58 -06003734static int io_renameat_prep(struct io_kiocb *req,
3735 const struct io_uring_sqe *sqe)
3736{
3737 struct io_rename *ren = &req->rename;
3738 const char __user *oldf, *newf;
3739
Jens Axboeed7eb252021-06-23 09:04:13 -06003740 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3741 return -EINVAL;
Pavel Begunkov26578cd2021-08-20 10:36:37 +01003742 if (sqe->ioprio || sqe->buf_index || sqe->splice_fd_in)
Jens Axboeed7eb252021-06-23 09:04:13 -06003743 return -EINVAL;
Jens Axboe80a261f2020-09-28 14:23:58 -06003744 if (unlikely(req->flags & REQ_F_FIXED_FILE))
3745 return -EBADF;
3746
3747 ren->old_dfd = READ_ONCE(sqe->fd);
3748 oldf = u64_to_user_ptr(READ_ONCE(sqe->addr));
3749 newf = u64_to_user_ptr(READ_ONCE(sqe->addr2));
3750 ren->new_dfd = READ_ONCE(sqe->len);
3751 ren->flags = READ_ONCE(sqe->rename_flags);
3752
3753 ren->oldpath = getname(oldf);
3754 if (IS_ERR(ren->oldpath))
3755 return PTR_ERR(ren->oldpath);
3756
3757 ren->newpath = getname(newf);
3758 if (IS_ERR(ren->newpath)) {
3759 putname(ren->oldpath);
3760 return PTR_ERR(ren->newpath);
3761 }
3762
3763 req->flags |= REQ_F_NEED_CLEANUP;
3764 return 0;
3765}
3766
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003767static int io_renameat(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe80a261f2020-09-28 14:23:58 -06003768{
3769 struct io_rename *ren = &req->rename;
3770 int ret;
3771
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003772 if (issue_flags & IO_URING_F_NONBLOCK)
Jens Axboe80a261f2020-09-28 14:23:58 -06003773 return -EAGAIN;
3774
3775 ret = do_renameat2(ren->old_dfd, ren->oldpath, ren->new_dfd,
3776 ren->newpath, ren->flags);
3777
3778 req->flags &= ~REQ_F_NEED_CLEANUP;
3779 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01003780 req_set_fail(req);
Jens Axboe80a261f2020-09-28 14:23:58 -06003781 io_req_complete(req, ret);
3782 return 0;
3783}
3784
Jens Axboe14a11432020-09-28 14:27:37 -06003785static int io_unlinkat_prep(struct io_kiocb *req,
3786 const struct io_uring_sqe *sqe)
3787{
3788 struct io_unlink *un = &req->unlink;
3789 const char __user *fname;
3790
Jens Axboe22634bc2021-06-23 09:07:45 -06003791 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3792 return -EINVAL;
Pavel Begunkov26578cd2021-08-20 10:36:37 +01003793 if (sqe->ioprio || sqe->off || sqe->len || sqe->buf_index ||
3794 sqe->splice_fd_in)
Jens Axboe22634bc2021-06-23 09:07:45 -06003795 return -EINVAL;
Jens Axboe14a11432020-09-28 14:27:37 -06003796 if (unlikely(req->flags & REQ_F_FIXED_FILE))
3797 return -EBADF;
3798
3799 un->dfd = READ_ONCE(sqe->fd);
3800
3801 un->flags = READ_ONCE(sqe->unlink_flags);
3802 if (un->flags & ~AT_REMOVEDIR)
3803 return -EINVAL;
3804
3805 fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
3806 un->filename = getname(fname);
3807 if (IS_ERR(un->filename))
3808 return PTR_ERR(un->filename);
3809
3810 req->flags |= REQ_F_NEED_CLEANUP;
3811 return 0;
3812}
3813
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003814static int io_unlinkat(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe14a11432020-09-28 14:27:37 -06003815{
3816 struct io_unlink *un = &req->unlink;
3817 int ret;
3818
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003819 if (issue_flags & IO_URING_F_NONBLOCK)
Jens Axboe14a11432020-09-28 14:27:37 -06003820 return -EAGAIN;
3821
3822 if (un->flags & AT_REMOVEDIR)
3823 ret = do_rmdir(un->dfd, un->filename);
3824 else
3825 ret = do_unlinkat(un->dfd, un->filename);
3826
3827 req->flags &= ~REQ_F_NEED_CLEANUP;
3828 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01003829 req_set_fail(req);
Jens Axboe14a11432020-09-28 14:27:37 -06003830 io_req_complete(req, ret);
3831 return 0;
3832}
3833
Dmitry Kadasheve34a02d2021-07-08 13:34:45 +07003834static int io_mkdirat_prep(struct io_kiocb *req,
3835 const struct io_uring_sqe *sqe)
3836{
3837 struct io_mkdir *mkd = &req->mkdir;
3838 const char __user *fname;
3839
3840 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3841 return -EINVAL;
3842 if (sqe->ioprio || sqe->off || sqe->rw_flags || sqe->buf_index ||
3843 sqe->splice_fd_in)
3844 return -EINVAL;
3845 if (unlikely(req->flags & REQ_F_FIXED_FILE))
3846 return -EBADF;
3847
3848 mkd->dfd = READ_ONCE(sqe->fd);
3849 mkd->mode = READ_ONCE(sqe->len);
3850
3851 fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
3852 mkd->filename = getname(fname);
3853 if (IS_ERR(mkd->filename))
3854 return PTR_ERR(mkd->filename);
3855
3856 req->flags |= REQ_F_NEED_CLEANUP;
3857 return 0;
3858}
3859
3860static int io_mkdirat(struct io_kiocb *req, int issue_flags)
3861{
3862 struct io_mkdir *mkd = &req->mkdir;
3863 int ret;
3864
3865 if (issue_flags & IO_URING_F_NONBLOCK)
3866 return -EAGAIN;
3867
3868 ret = do_mkdirat(mkd->dfd, mkd->filename, mkd->mode);
3869
3870 req->flags &= ~REQ_F_NEED_CLEANUP;
3871 if (ret < 0)
3872 req_set_fail(req);
3873 io_req_complete(req, ret);
3874 return 0;
3875}
3876
Dmitry Kadashev7a8721f2021-07-08 13:34:46 +07003877static int io_symlinkat_prep(struct io_kiocb *req,
3878 const struct io_uring_sqe *sqe)
3879{
3880 struct io_symlink *sl = &req->symlink;
3881 const char __user *oldpath, *newpath;
3882
3883 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3884 return -EINVAL;
3885 if (sqe->ioprio || sqe->len || sqe->rw_flags || sqe->buf_index ||
3886 sqe->splice_fd_in)
3887 return -EINVAL;
3888 if (unlikely(req->flags & REQ_F_FIXED_FILE))
3889 return -EBADF;
3890
3891 sl->new_dfd = READ_ONCE(sqe->fd);
3892 oldpath = u64_to_user_ptr(READ_ONCE(sqe->addr));
3893 newpath = u64_to_user_ptr(READ_ONCE(sqe->addr2));
3894
3895 sl->oldpath = getname(oldpath);
3896 if (IS_ERR(sl->oldpath))
3897 return PTR_ERR(sl->oldpath);
3898
3899 sl->newpath = getname(newpath);
3900 if (IS_ERR(sl->newpath)) {
3901 putname(sl->oldpath);
3902 return PTR_ERR(sl->newpath);
3903 }
3904
3905 req->flags |= REQ_F_NEED_CLEANUP;
3906 return 0;
3907}
3908
3909static int io_symlinkat(struct io_kiocb *req, int issue_flags)
3910{
3911 struct io_symlink *sl = &req->symlink;
3912 int ret;
3913
3914 if (issue_flags & IO_URING_F_NONBLOCK)
3915 return -EAGAIN;
3916
3917 ret = do_symlinkat(sl->oldpath, sl->new_dfd, sl->newpath);
3918
3919 req->flags &= ~REQ_F_NEED_CLEANUP;
3920 if (ret < 0)
3921 req_set_fail(req);
3922 io_req_complete(req, ret);
3923 return 0;
3924}
3925
Dmitry Kadashevcf30da92021-07-08 13:34:47 +07003926static int io_linkat_prep(struct io_kiocb *req,
3927 const struct io_uring_sqe *sqe)
3928{
3929 struct io_hardlink *lnk = &req->hardlink;
3930 const char __user *oldf, *newf;
3931
3932 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3933 return -EINVAL;
3934 if (sqe->ioprio || sqe->rw_flags || sqe->buf_index || sqe->splice_fd_in)
3935 return -EINVAL;
3936 if (unlikely(req->flags & REQ_F_FIXED_FILE))
3937 return -EBADF;
3938
3939 lnk->old_dfd = READ_ONCE(sqe->fd);
3940 lnk->new_dfd = READ_ONCE(sqe->len);
3941 oldf = u64_to_user_ptr(READ_ONCE(sqe->addr));
3942 newf = u64_to_user_ptr(READ_ONCE(sqe->addr2));
3943 lnk->flags = READ_ONCE(sqe->hardlink_flags);
3944
3945 lnk->oldpath = getname(oldf);
3946 if (IS_ERR(lnk->oldpath))
3947 return PTR_ERR(lnk->oldpath);
3948
3949 lnk->newpath = getname(newf);
3950 if (IS_ERR(lnk->newpath)) {
3951 putname(lnk->oldpath);
3952 return PTR_ERR(lnk->newpath);
3953 }
3954
3955 req->flags |= REQ_F_NEED_CLEANUP;
3956 return 0;
3957}
3958
3959static int io_linkat(struct io_kiocb *req, int issue_flags)
3960{
3961 struct io_hardlink *lnk = &req->hardlink;
3962 int ret;
3963
3964 if (issue_flags & IO_URING_F_NONBLOCK)
3965 return -EAGAIN;
3966
3967 ret = do_linkat(lnk->old_dfd, lnk->oldpath, lnk->new_dfd,
3968 lnk->newpath, lnk->flags);
3969
3970 req->flags &= ~REQ_F_NEED_CLEANUP;
3971 if (ret < 0)
3972 req_set_fail(req);
3973 io_req_complete(req, ret);
3974 return 0;
3975}
3976
Jens Axboe36f4fa62020-09-05 11:14:22 -06003977static int io_shutdown_prep(struct io_kiocb *req,
3978 const struct io_uring_sqe *sqe)
3979{
3980#if defined(CONFIG_NET)
3981 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3982 return -EINVAL;
Pavel Begunkov26578cd2021-08-20 10:36:37 +01003983 if (unlikely(sqe->ioprio || sqe->off || sqe->addr || sqe->rw_flags ||
3984 sqe->buf_index || sqe->splice_fd_in))
Jens Axboe36f4fa62020-09-05 11:14:22 -06003985 return -EINVAL;
3986
3987 req->shutdown.how = READ_ONCE(sqe->len);
3988 return 0;
3989#else
3990 return -EOPNOTSUPP;
3991#endif
3992}
3993
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003994static int io_shutdown(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe36f4fa62020-09-05 11:14:22 -06003995{
3996#if defined(CONFIG_NET)
3997 struct socket *sock;
3998 int ret;
3999
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004000 if (issue_flags & IO_URING_F_NONBLOCK)
Jens Axboe36f4fa62020-09-05 11:14:22 -06004001 return -EAGAIN;
4002
Linus Torvalds48aba792020-12-16 12:44:05 -08004003 sock = sock_from_file(req->file);
Jens Axboe36f4fa62020-09-05 11:14:22 -06004004 if (unlikely(!sock))
Linus Torvalds48aba792020-12-16 12:44:05 -08004005 return -ENOTSOCK;
Jens Axboe36f4fa62020-09-05 11:14:22 -06004006
4007 ret = __sys_shutdown_sock(sock, req->shutdown.how);
Jens Axboea1464682020-12-14 20:57:27 -07004008 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01004009 req_set_fail(req);
Jens Axboe36f4fa62020-09-05 11:14:22 -06004010 io_req_complete(req, ret);
4011 return 0;
4012#else
4013 return -EOPNOTSUPP;
4014#endif
4015}
4016
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03004017static int __io_splice_prep(struct io_kiocb *req,
4018 const struct io_uring_sqe *sqe)
Pavel Begunkov7d67af22020-02-24 11:32:45 +03004019{
Pavel Begunkovfe7e3252021-06-24 15:09:57 +01004020 struct io_splice *sp = &req->splice;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03004021 unsigned int valid_flags = SPLICE_F_FD_IN_FIXED | SPLICE_F_ALL;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03004022
Pavel Begunkov3232dd02020-06-03 18:03:22 +03004023 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4024 return -EINVAL;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03004025
Pavel Begunkov7d67af22020-02-24 11:32:45 +03004026 sp->len = READ_ONCE(sqe->len);
4027 sp->flags = READ_ONCE(sqe->splice_flags);
Pavel Begunkov7d67af22020-02-24 11:32:45 +03004028 if (unlikely(sp->flags & ~valid_flags))
4029 return -EINVAL;
Jens Axboeae6cba32022-03-29 10:59:20 -06004030 sp->splice_fd_in = READ_ONCE(sqe->splice_fd_in);
Pavel Begunkov7d67af22020-02-24 11:32:45 +03004031 return 0;
4032}
4033
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03004034static int io_tee_prep(struct io_kiocb *req,
4035 const struct io_uring_sqe *sqe)
4036{
4037 if (READ_ONCE(sqe->splice_off_in) || READ_ONCE(sqe->off))
4038 return -EINVAL;
4039 return __io_splice_prep(req, sqe);
4040}
4041
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004042static int io_tee(struct io_kiocb *req, unsigned int issue_flags)
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03004043{
4044 struct io_splice *sp = &req->splice;
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03004045 struct file *out = sp->file_out;
4046 unsigned int flags = sp->flags & ~SPLICE_F_FD_IN_FIXED;
Jens Axboeae6cba32022-03-29 10:59:20 -06004047 struct file *in;
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03004048 long ret = 0;
4049
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004050 if (issue_flags & IO_URING_F_NONBLOCK)
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03004051 return -EAGAIN;
Jens Axboeae6cba32022-03-29 10:59:20 -06004052
4053 in = io_file_get(req->ctx, req, sp->splice_fd_in,
4054 (sp->flags & SPLICE_F_FD_IN_FIXED));
4055 if (!in) {
4056 ret = -EBADF;
4057 goto done;
4058 }
4059
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03004060 if (sp->len)
4061 ret = do_tee(in, out, sp->len, flags);
4062
Pavel Begunkove1d767f2021-03-19 17:22:43 +00004063 if (!(sp->flags & SPLICE_F_FD_IN_FIXED))
4064 io_put_file(in);
Jens Axboeae6cba32022-03-29 10:59:20 -06004065done:
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03004066 if (ret != sp->len)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01004067 req_set_fail(req);
Jens Axboee1e16092020-06-22 09:17:17 -06004068 io_req_complete(req, ret);
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03004069 return 0;
4070}
4071
4072static int io_splice_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4073{
Pavel Begunkovfe7e3252021-06-24 15:09:57 +01004074 struct io_splice *sp = &req->splice;
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03004075
4076 sp->off_in = READ_ONCE(sqe->splice_off_in);
4077 sp->off_out = READ_ONCE(sqe->off);
4078 return __io_splice_prep(req, sqe);
4079}
4080
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004081static int io_splice(struct io_kiocb *req, unsigned int issue_flags)
Pavel Begunkov7d67af22020-02-24 11:32:45 +03004082{
4083 struct io_splice *sp = &req->splice;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03004084 struct file *out = sp->file_out;
4085 unsigned int flags = sp->flags & ~SPLICE_F_FD_IN_FIXED;
4086 loff_t *poff_in, *poff_out;
Jens Axboeae6cba32022-03-29 10:59:20 -06004087 struct file *in;
Pavel Begunkovc9687422020-05-04 23:00:54 +03004088 long ret = 0;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03004089
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004090 if (issue_flags & IO_URING_F_NONBLOCK)
Pavel Begunkov2fb3e822020-05-01 17:09:38 +03004091 return -EAGAIN;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03004092
Jens Axboeae6cba32022-03-29 10:59:20 -06004093 in = io_file_get(req->ctx, req, sp->splice_fd_in,
4094 (sp->flags & SPLICE_F_FD_IN_FIXED));
4095 if (!in) {
4096 ret = -EBADF;
4097 goto done;
4098 }
4099
Pavel Begunkov7d67af22020-02-24 11:32:45 +03004100 poff_in = (sp->off_in == -1) ? NULL : &sp->off_in;
4101 poff_out = (sp->off_out == -1) ? NULL : &sp->off_out;
Pavel Begunkovc9687422020-05-04 23:00:54 +03004102
Jens Axboe948a7742020-05-17 14:21:38 -06004103 if (sp->len)
Pavel Begunkovc9687422020-05-04 23:00:54 +03004104 ret = do_splice(in, poff_in, out, poff_out, sp->len, flags);
Pavel Begunkov7d67af22020-02-24 11:32:45 +03004105
Pavel Begunkove1d767f2021-03-19 17:22:43 +00004106 if (!(sp->flags & SPLICE_F_FD_IN_FIXED))
4107 io_put_file(in);
Jens Axboeae6cba32022-03-29 10:59:20 -06004108done:
Pavel Begunkov7d67af22020-02-24 11:32:45 +03004109 if (ret != sp->len)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01004110 req_set_fail(req);
Jens Axboee1e16092020-06-22 09:17:17 -06004111 io_req_complete(req, ret);
Pavel Begunkov7d67af22020-02-24 11:32:45 +03004112 return 0;
4113}
4114
Jens Axboe2b188cc2019-01-07 10:46:33 -07004115/*
4116 * IORING_OP_NOP just posts a completion event, nothing else.
4117 */
Pavel Begunkov889fca72021-02-10 00:03:09 +00004118static int io_nop(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe2b188cc2019-01-07 10:46:33 -07004119{
4120 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe2b188cc2019-01-07 10:46:33 -07004121
Jens Axboedef596e2019-01-09 08:59:42 -07004122 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
4123 return -EINVAL;
4124
Pavel Begunkov889fca72021-02-10 00:03:09 +00004125 __io_req_complete(req, issue_flags, 0, 0);
Jens Axboe2b188cc2019-01-07 10:46:33 -07004126 return 0;
4127}
4128
Pavel Begunkov1155c762021-02-18 18:29:38 +00004129static int io_fsync_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Christoph Hellwigc992fe22019-01-11 09:43:02 -07004130{
Jens Axboe6b063142019-01-10 22:13:58 -07004131 struct io_ring_ctx *ctx = req->ctx;
Christoph Hellwigc992fe22019-01-11 09:43:02 -07004132
Jens Axboe6b063142019-01-10 22:13:58 -07004133 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboedef596e2019-01-09 08:59:42 -07004134 return -EINVAL;
Pavel Begunkov26578cd2021-08-20 10:36:37 +01004135 if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index ||
4136 sqe->splice_fd_in))
Christoph Hellwigc992fe22019-01-11 09:43:02 -07004137 return -EINVAL;
4138
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004139 req->sync.flags = READ_ONCE(sqe->fsync_flags);
4140 if (unlikely(req->sync.flags & ~IORING_FSYNC_DATASYNC))
4141 return -EINVAL;
4142
4143 req->sync.off = READ_ONCE(sqe->off);
4144 req->sync.len = READ_ONCE(sqe->len);
Christoph Hellwigc992fe22019-01-11 09:43:02 -07004145 return 0;
4146}
4147
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004148static int io_fsync(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe78912932020-01-14 22:09:06 -07004149{
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004150 loff_t end = req->sync.off + req->sync.len;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004151 int ret;
4152
Pavel Begunkovac45abc2020-06-08 21:08:18 +03004153 /* fsync always requires a blocking context */
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004154 if (issue_flags & IO_URING_F_NONBLOCK)
Pavel Begunkovac45abc2020-06-08 21:08:18 +03004155 return -EAGAIN;
4156
Jens Axboe9adbd452019-12-20 08:45:55 -07004157 ret = vfs_fsync_range(req->file, req->sync.off,
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004158 end > 0 ? end : LLONG_MAX,
4159 req->sync.flags & IORING_FSYNC_DATASYNC);
4160 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01004161 req_set_fail(req);
Jens Axboee1e16092020-06-22 09:17:17 -06004162 io_req_complete(req, ret);
Christoph Hellwigc992fe22019-01-11 09:43:02 -07004163 return 0;
4164}
4165
Jens Axboed63d1b52019-12-10 10:38:56 -07004166static int io_fallocate_prep(struct io_kiocb *req,
4167 const struct io_uring_sqe *sqe)
4168{
Pavel Begunkov26578cd2021-08-20 10:36:37 +01004169 if (sqe->ioprio || sqe->buf_index || sqe->rw_flags ||
4170 sqe->splice_fd_in)
Jens Axboed63d1b52019-12-10 10:38:56 -07004171 return -EINVAL;
Pavel Begunkov3232dd02020-06-03 18:03:22 +03004172 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4173 return -EINVAL;
Jens Axboed63d1b52019-12-10 10:38:56 -07004174
4175 req->sync.off = READ_ONCE(sqe->off);
4176 req->sync.len = READ_ONCE(sqe->addr);
4177 req->sync.mode = READ_ONCE(sqe->len);
4178 return 0;
4179}
4180
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004181static int io_fallocate(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboed63d1b52019-12-10 10:38:56 -07004182{
Pavel Begunkovac45abc2020-06-08 21:08:18 +03004183 int ret;
Jens Axboed63d1b52019-12-10 10:38:56 -07004184
Pavel Begunkovac45abc2020-06-08 21:08:18 +03004185 /* fallocate always requiring blocking context */
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004186 if (issue_flags & IO_URING_F_NONBLOCK)
Pavel Begunkovac45abc2020-06-08 21:08:18 +03004187 return -EAGAIN;
Pavel Begunkovac45abc2020-06-08 21:08:18 +03004188 ret = vfs_fallocate(req->file, req->sync.mode, req->sync.off,
4189 req->sync.len);
Pavel Begunkovac45abc2020-06-08 21:08:18 +03004190 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01004191 req_set_fail(req);
Jens Axboedf1ec532022-03-20 13:08:38 -06004192 else
4193 fsnotify_modify(req->file);
Jens Axboee1e16092020-06-22 09:17:17 -06004194 io_req_complete(req, ret);
Jens Axboed63d1b52019-12-10 10:38:56 -07004195 return 0;
4196}
4197
Pavel Begunkovec65fea2020-06-03 18:03:24 +03004198static int __io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboe15b71ab2019-12-11 11:20:36 -07004199{
Jens Axboef8748882020-01-08 17:47:02 -07004200 const char __user *fname;
Jens Axboe15b71ab2019-12-11 11:20:36 -07004201 int ret;
4202
Pavel Begunkovd3fddf62021-08-09 13:04:16 +01004203 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4204 return -EINVAL;
Pavel Begunkovb9445592021-08-25 12:25:45 +01004205 if (unlikely(sqe->ioprio || sqe->buf_index))
Jens Axboe15b71ab2019-12-11 11:20:36 -07004206 return -EINVAL;
Pavel Begunkovec65fea2020-06-03 18:03:24 +03004207 if (unlikely(req->flags & REQ_F_FIXED_FILE))
Jens Axboecf3040c2020-02-06 21:31:40 -07004208 return -EBADF;
Jens Axboe15b71ab2019-12-11 11:20:36 -07004209
Pavel Begunkovec65fea2020-06-03 18:03:24 +03004210 /* open.how should be already initialised */
4211 if (!(req->open.how.flags & O_PATH) && force_o_largefile())
Jens Axboe08a1d26eb2020-04-08 09:20:54 -06004212 req->open.how.flags |= O_LARGEFILE;
Jens Axboe15b71ab2019-12-11 11:20:36 -07004213
Pavel Begunkov25e72d12020-06-03 18:03:23 +03004214 req->open.dfd = READ_ONCE(sqe->fd);
4215 fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
Jens Axboef8748882020-01-08 17:47:02 -07004216 req->open.filename = getname(fname);
Jens Axboe15b71ab2019-12-11 11:20:36 -07004217 if (IS_ERR(req->open.filename)) {
4218 ret = PTR_ERR(req->open.filename);
4219 req->open.filename = NULL;
4220 return ret;
4221 }
Pavel Begunkovb9445592021-08-25 12:25:45 +01004222
4223 req->open.file_slot = READ_ONCE(sqe->file_index);
4224 if (req->open.file_slot && (req->open.how.flags & O_CLOEXEC))
4225 return -EINVAL;
4226
Jens Axboe4022e7a2020-03-19 19:23:18 -06004227 req->open.nofile = rlimit(RLIMIT_NOFILE);
Pavel Begunkov8fef80b2020-02-07 23:59:53 +03004228 req->flags |= REQ_F_NEED_CLEANUP;
Jens Axboe15b71ab2019-12-11 11:20:36 -07004229 return 0;
4230}
4231
Pavel Begunkovec65fea2020-06-03 18:03:24 +03004232static int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4233{
Pavel Begunkovd3fddf62021-08-09 13:04:16 +01004234 u64 mode = READ_ONCE(sqe->len);
4235 u64 flags = READ_ONCE(sqe->open_flags);
Pavel Begunkovec65fea2020-06-03 18:03:24 +03004236
Pavel Begunkovec65fea2020-06-03 18:03:24 +03004237 req->open.how = build_open_how(flags, mode);
4238 return __io_openat_prep(req, sqe);
4239}
4240
Jens Axboecebdb982020-01-08 17:59:24 -07004241static int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4242{
4243 struct open_how __user *how;
Jens Axboecebdb982020-01-08 17:59:24 -07004244 size_t len;
4245 int ret;
4246
Jens Axboecebdb982020-01-08 17:59:24 -07004247 how = u64_to_user_ptr(READ_ONCE(sqe->addr2));
4248 len = READ_ONCE(sqe->len);
Jens Axboecebdb982020-01-08 17:59:24 -07004249 if (len < OPEN_HOW_SIZE_VER0)
4250 return -EINVAL;
4251
4252 ret = copy_struct_from_user(&req->open.how, sizeof(req->open.how), how,
4253 len);
4254 if (ret)
4255 return ret;
4256
Pavel Begunkovec65fea2020-06-03 18:03:24 +03004257 return __io_openat_prep(req, sqe);
Jens Axboecebdb982020-01-08 17:59:24 -07004258}
4259
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004260static int io_openat2(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe15b71ab2019-12-11 11:20:36 -07004261{
4262 struct open_flags op;
Jens Axboe15b71ab2019-12-11 11:20:36 -07004263 struct file *file;
Pavel Begunkovb9445592021-08-25 12:25:45 +01004264 bool resolve_nonblock, nonblock_set;
4265 bool fixed = !!req->open.file_slot;
Jens Axboe15b71ab2019-12-11 11:20:36 -07004266 int ret;
4267
Jens Axboecebdb982020-01-08 17:59:24 -07004268 ret = build_open_flags(&req->open.how, &op);
Jens Axboe15b71ab2019-12-11 11:20:36 -07004269 if (ret)
4270 goto err;
Jens Axboe3a81fd02020-12-10 12:25:36 -07004271 nonblock_set = op.open_flag & O_NONBLOCK;
4272 resolve_nonblock = req->open.how.resolve & RESOLVE_CACHED;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004273 if (issue_flags & IO_URING_F_NONBLOCK) {
Jens Axboe3a81fd02020-12-10 12:25:36 -07004274 /*
4275 * Don't bother trying for O_TRUNC, O_CREAT, or O_TMPFILE open,
4276 * it'll always -EAGAIN
4277 */
4278 if (req->open.how.flags & (O_TRUNC | O_CREAT | O_TMPFILE))
4279 return -EAGAIN;
4280 op.lookup_flags |= LOOKUP_CACHED;
4281 op.open_flag |= O_NONBLOCK;
4282 }
Jens Axboe15b71ab2019-12-11 11:20:36 -07004283
Pavel Begunkovb9445592021-08-25 12:25:45 +01004284 if (!fixed) {
4285 ret = __get_unused_fd_flags(req->open.how.flags, req->open.nofile);
4286 if (ret < 0)
4287 goto err;
4288 }
Jens Axboe15b71ab2019-12-11 11:20:36 -07004289
4290 file = do_filp_open(req->open.dfd, req->open.filename, &op);
Pavel Begunkov12dcb58a2021-06-24 15:10:00 +01004291 if (IS_ERR(file)) {
Jens Axboe3a81fd02020-12-10 12:25:36 -07004292 /*
Pavel Begunkov12dcb58a2021-06-24 15:10:00 +01004293 * We could hang on to this 'fd' on retrying, but seems like
4294 * marginal gain for something that is now known to be a slower
4295 * path. So just put it, and we'll get a new one when we retry.
Jens Axboe3a81fd02020-12-10 12:25:36 -07004296 */
Pavel Begunkovb9445592021-08-25 12:25:45 +01004297 if (!fixed)
4298 put_unused_fd(ret);
Pavel Begunkov12dcb58a2021-06-24 15:10:00 +01004299
4300 ret = PTR_ERR(file);
4301 /* only retry if RESOLVE_CACHED wasn't already set by application */
4302 if (ret == -EAGAIN &&
4303 (!resolve_nonblock && (issue_flags & IO_URING_F_NONBLOCK)))
4304 return -EAGAIN;
4305 goto err;
Jens Axboe3a81fd02020-12-10 12:25:36 -07004306 }
4307
Pavel Begunkov12dcb58a2021-06-24 15:10:00 +01004308 if ((issue_flags & IO_URING_F_NONBLOCK) && !nonblock_set)
4309 file->f_flags &= ~O_NONBLOCK;
4310 fsnotify_open(file);
Pavel Begunkovb9445592021-08-25 12:25:45 +01004311
4312 if (!fixed)
4313 fd_install(ret, file);
4314 else
4315 ret = io_install_fixed_file(req, file, issue_flags,
4316 req->open.file_slot - 1);
Jens Axboe15b71ab2019-12-11 11:20:36 -07004317err:
4318 putname(req->open.filename);
Pavel Begunkov8fef80b2020-02-07 23:59:53 +03004319 req->flags &= ~REQ_F_NEED_CLEANUP;
Jens Axboe15b71ab2019-12-11 11:20:36 -07004320 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01004321 req_set_fail(req);
Pavel Begunkov0bdf3392021-04-11 01:46:29 +01004322 __io_req_complete(req, issue_flags, ret, 0);
Jens Axboe15b71ab2019-12-11 11:20:36 -07004323 return 0;
4324}
4325
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004326static int io_openat(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboecebdb982020-01-08 17:59:24 -07004327{
Pavel Begunkove45cff52021-02-28 22:35:14 +00004328 return io_openat2(req, issue_flags);
Jens Axboecebdb982020-01-08 17:59:24 -07004329}
4330
Jens Axboe067524e2020-03-02 16:32:28 -07004331static int io_remove_buffers_prep(struct io_kiocb *req,
4332 const struct io_uring_sqe *sqe)
4333{
4334 struct io_provide_buf *p = &req->pbuf;
4335 u64 tmp;
4336
Pavel Begunkov26578cd2021-08-20 10:36:37 +01004337 if (sqe->ioprio || sqe->rw_flags || sqe->addr || sqe->len || sqe->off ||
4338 sqe->splice_fd_in)
Jens Axboe067524e2020-03-02 16:32:28 -07004339 return -EINVAL;
4340
4341 tmp = READ_ONCE(sqe->fd);
4342 if (!tmp || tmp > USHRT_MAX)
4343 return -EINVAL;
4344
4345 memset(p, 0, sizeof(*p));
4346 p->nbufs = tmp;
4347 p->bgid = READ_ONCE(sqe->buf_group);
4348 return 0;
4349}
4350
4351static int __io_remove_buffers(struct io_ring_ctx *ctx, struct io_buffer *buf,
4352 int bgid, unsigned nbufs)
4353{
4354 unsigned i = 0;
4355
4356 /* shouldn't happen */
4357 if (!nbufs)
4358 return 0;
4359
4360 /* the head kbuf is the list itself */
4361 while (!list_empty(&buf->list)) {
4362 struct io_buffer *nxt;
4363
4364 nxt = list_first_entry(&buf->list, struct io_buffer, list);
4365 list_del(&nxt->list);
4366 kfree(nxt);
4367 if (++i == nbufs)
4368 return i;
Ye Bin2d447d32021-11-22 10:47:37 +08004369 cond_resched();
Jens Axboe067524e2020-03-02 16:32:28 -07004370 }
4371 i++;
4372 kfree(buf);
Jens Axboe9e15c3a2021-03-13 12:29:43 -07004373 xa_erase(&ctx->io_buffers, bgid);
Jens Axboe067524e2020-03-02 16:32:28 -07004374
4375 return i;
4376}
4377
Pavel Begunkov889fca72021-02-10 00:03:09 +00004378static int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe067524e2020-03-02 16:32:28 -07004379{
4380 struct io_provide_buf *p = &req->pbuf;
4381 struct io_ring_ctx *ctx = req->ctx;
4382 struct io_buffer *head;
4383 int ret = 0;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004384 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Jens Axboe067524e2020-03-02 16:32:28 -07004385
4386 io_ring_submit_lock(ctx, !force_nonblock);
4387
4388 lockdep_assert_held(&ctx->uring_lock);
4389
4390 ret = -ENOENT;
Jens Axboe9e15c3a2021-03-13 12:29:43 -07004391 head = xa_load(&ctx->io_buffers, p->bgid);
Jens Axboe067524e2020-03-02 16:32:28 -07004392 if (head)
4393 ret = __io_remove_buffers(ctx, head, p->bgid, p->nbufs);
Jens Axboe067524e2020-03-02 16:32:28 -07004394 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01004395 req_set_fail(req);
Pavel Begunkov31bff9a2020-12-06 22:22:43 +00004396
Pavel Begunkov9fb8cb42021-02-28 22:35:13 +00004397 /* complete before unlock, IOPOLL may need the lock */
4398 __io_req_complete(req, issue_flags, ret, 0);
4399 io_ring_submit_unlock(ctx, !force_nonblock);
Jens Axboe067524e2020-03-02 16:32:28 -07004400 return 0;
4401}
4402
Jens Axboeddf0322d2020-02-23 16:41:33 -07004403static int io_provide_buffers_prep(struct io_kiocb *req,
4404 const struct io_uring_sqe *sqe)
4405{
Pavel Begunkov38134ad2021-04-15 13:07:39 +01004406 unsigned long size, tmp_check;
Jens Axboeddf0322d2020-02-23 16:41:33 -07004407 struct io_provide_buf *p = &req->pbuf;
4408 u64 tmp;
4409
Pavel Begunkov26578cd2021-08-20 10:36:37 +01004410 if (sqe->ioprio || sqe->rw_flags || sqe->splice_fd_in)
Jens Axboeddf0322d2020-02-23 16:41:33 -07004411 return -EINVAL;
4412
4413 tmp = READ_ONCE(sqe->fd);
4414 if (!tmp || tmp > USHRT_MAX)
4415 return -E2BIG;
4416 p->nbufs = tmp;
4417 p->addr = READ_ONCE(sqe->addr);
4418 p->len = READ_ONCE(sqe->len);
4419
Pavel Begunkov38134ad2021-04-15 13:07:39 +01004420 if (check_mul_overflow((unsigned long)p->len, (unsigned long)p->nbufs,
4421 &size))
4422 return -EOVERFLOW;
4423 if (check_add_overflow((unsigned long)p->addr, size, &tmp_check))
4424 return -EOVERFLOW;
4425
Pavel Begunkovd81269f2021-03-19 10:21:19 +00004426 size = (unsigned long)p->len * p->nbufs;
4427 if (!access_ok(u64_to_user_ptr(p->addr), size))
Jens Axboeddf0322d2020-02-23 16:41:33 -07004428 return -EFAULT;
4429
4430 p->bgid = READ_ONCE(sqe->buf_group);
4431 tmp = READ_ONCE(sqe->off);
4432 if (tmp > USHRT_MAX)
4433 return -E2BIG;
4434 p->bid = tmp;
4435 return 0;
4436}
4437
4438static int io_add_buffers(struct io_provide_buf *pbuf, struct io_buffer **head)
4439{
4440 struct io_buffer *buf;
4441 u64 addr = pbuf->addr;
4442 int i, bid = pbuf->bid;
4443
4444 for (i = 0; i < pbuf->nbufs; i++) {
Jens Axboe9990da92021-09-24 07:39:08 -06004445 buf = kmalloc(sizeof(*buf), GFP_KERNEL_ACCOUNT);
Jens Axboeddf0322d2020-02-23 16:41:33 -07004446 if (!buf)
4447 break;
4448
4449 buf->addr = addr;
Thadeu Lima de Souza Cascardod1f82802021-05-05 09:47:06 -03004450 buf->len = min_t(__u32, pbuf->len, MAX_RW_COUNT);
Jens Axboeddf0322d2020-02-23 16:41:33 -07004451 buf->bid = bid;
4452 addr += pbuf->len;
4453 bid++;
4454 if (!*head) {
4455 INIT_LIST_HEAD(&buf->list);
4456 *head = buf;
4457 } else {
4458 list_add_tail(&buf->list, &(*head)->list);
4459 }
Eric Dumazetc718ea42022-02-14 20:10:03 -08004460 cond_resched();
Jens Axboeddf0322d2020-02-23 16:41:33 -07004461 }
4462
4463 return i ? i : -ENOMEM;
4464}
4465
Pavel Begunkov889fca72021-02-10 00:03:09 +00004466static int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboeddf0322d2020-02-23 16:41:33 -07004467{
4468 struct io_provide_buf *p = &req->pbuf;
4469 struct io_ring_ctx *ctx = req->ctx;
4470 struct io_buffer *head, *list;
4471 int ret = 0;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004472 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Jens Axboeddf0322d2020-02-23 16:41:33 -07004473
4474 io_ring_submit_lock(ctx, !force_nonblock);
4475
4476 lockdep_assert_held(&ctx->uring_lock);
4477
Jens Axboe9e15c3a2021-03-13 12:29:43 -07004478 list = head = xa_load(&ctx->io_buffers, p->bgid);
Jens Axboeddf0322d2020-02-23 16:41:33 -07004479
4480 ret = io_add_buffers(p, &head);
Jens Axboe9e15c3a2021-03-13 12:29:43 -07004481 if (ret >= 0 && !list) {
Pavel Begunkovfa304062022-08-04 15:13:46 +01004482 ret = xa_insert(&ctx->io_buffers, p->bgid, head,
4483 GFP_KERNEL_ACCOUNT);
Jens Axboe9e15c3a2021-03-13 12:29:43 -07004484 if (ret < 0)
Jens Axboe067524e2020-03-02 16:32:28 -07004485 __io_remove_buffers(ctx, head, p->bgid, -1U);
Jens Axboeddf0322d2020-02-23 16:41:33 -07004486 }
Jens Axboeddf0322d2020-02-23 16:41:33 -07004487 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01004488 req_set_fail(req);
Pavel Begunkov9fb8cb42021-02-28 22:35:13 +00004489 /* complete before unlock, IOPOLL may need the lock */
4490 __io_req_complete(req, issue_flags, ret, 0);
4491 io_ring_submit_unlock(ctx, !force_nonblock);
Jens Axboeddf0322d2020-02-23 16:41:33 -07004492 return 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07004493}
4494
Jens Axboe3e4827b2020-01-08 15:18:09 -07004495static int io_epoll_ctl_prep(struct io_kiocb *req,
4496 const struct io_uring_sqe *sqe)
4497{
4498#if defined(CONFIG_EPOLL)
Pavel Begunkov26578cd2021-08-20 10:36:37 +01004499 if (sqe->ioprio || sqe->buf_index || sqe->splice_fd_in)
Jens Axboe3e4827b2020-01-08 15:18:09 -07004500 return -EINVAL;
Pavel Begunkov2d74d042021-05-14 12:05:46 +01004501 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
Pavel Begunkov3232dd02020-06-03 18:03:22 +03004502 return -EINVAL;
Jens Axboe3e4827b2020-01-08 15:18:09 -07004503
4504 req->epoll.epfd = READ_ONCE(sqe->fd);
4505 req->epoll.op = READ_ONCE(sqe->len);
4506 req->epoll.fd = READ_ONCE(sqe->off);
4507
4508 if (ep_op_has_event(req->epoll.op)) {
4509 struct epoll_event __user *ev;
4510
4511 ev = u64_to_user_ptr(READ_ONCE(sqe->addr));
4512 if (copy_from_user(&req->epoll.event, ev, sizeof(*ev)))
4513 return -EFAULT;
4514 }
4515
4516 return 0;
4517#else
4518 return -EOPNOTSUPP;
4519#endif
4520}
4521
Pavel Begunkov889fca72021-02-10 00:03:09 +00004522static int io_epoll_ctl(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe3e4827b2020-01-08 15:18:09 -07004523{
4524#if defined(CONFIG_EPOLL)
4525 struct io_epoll *ie = &req->epoll;
4526 int ret;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004527 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Jens Axboe3e4827b2020-01-08 15:18:09 -07004528
4529 ret = do_epoll_ctl(ie->epfd, ie->op, ie->fd, &ie->event, force_nonblock);
4530 if (force_nonblock && ret == -EAGAIN)
4531 return -EAGAIN;
4532
4533 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01004534 req_set_fail(req);
Pavel Begunkov889fca72021-02-10 00:03:09 +00004535 __io_req_complete(req, issue_flags, ret, 0);
Jens Axboe3e4827b2020-01-08 15:18:09 -07004536 return 0;
4537#else
4538 return -EOPNOTSUPP;
4539#endif
4540}
4541
Jens Axboec1ca7572019-12-25 22:18:28 -07004542static int io_madvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4543{
4544#if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
Pavel Begunkov26578cd2021-08-20 10:36:37 +01004545 if (sqe->ioprio || sqe->buf_index || sqe->off || sqe->splice_fd_in)
Jens Axboec1ca7572019-12-25 22:18:28 -07004546 return -EINVAL;
Pavel Begunkov3232dd02020-06-03 18:03:22 +03004547 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4548 return -EINVAL;
Jens Axboec1ca7572019-12-25 22:18:28 -07004549
4550 req->madvise.addr = READ_ONCE(sqe->addr);
4551 req->madvise.len = READ_ONCE(sqe->len);
4552 req->madvise.advice = READ_ONCE(sqe->fadvise_advice);
4553 return 0;
4554#else
4555 return -EOPNOTSUPP;
4556#endif
4557}
4558
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004559static int io_madvise(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboec1ca7572019-12-25 22:18:28 -07004560{
4561#if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
4562 struct io_madvise *ma = &req->madvise;
4563 int ret;
4564
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004565 if (issue_flags & IO_URING_F_NONBLOCK)
Jens Axboec1ca7572019-12-25 22:18:28 -07004566 return -EAGAIN;
4567
Minchan Kim0726b012020-10-17 16:14:50 -07004568 ret = do_madvise(current->mm, ma->addr, ma->len, ma->advice);
Jens Axboec1ca7572019-12-25 22:18:28 -07004569 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01004570 req_set_fail(req);
Jens Axboee1e16092020-06-22 09:17:17 -06004571 io_req_complete(req, ret);
Jens Axboec1ca7572019-12-25 22:18:28 -07004572 return 0;
4573#else
4574 return -EOPNOTSUPP;
4575#endif
4576}
4577
Jens Axboe4840e412019-12-25 22:03:45 -07004578static int io_fadvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4579{
Pavel Begunkov26578cd2021-08-20 10:36:37 +01004580 if (sqe->ioprio || sqe->buf_index || sqe->addr || sqe->splice_fd_in)
Jens Axboe4840e412019-12-25 22:03:45 -07004581 return -EINVAL;
Pavel Begunkov3232dd02020-06-03 18:03:22 +03004582 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4583 return -EINVAL;
Jens Axboe4840e412019-12-25 22:03:45 -07004584
4585 req->fadvise.offset = READ_ONCE(sqe->off);
4586 req->fadvise.len = READ_ONCE(sqe->len);
4587 req->fadvise.advice = READ_ONCE(sqe->fadvise_advice);
4588 return 0;
4589}
4590
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004591static int io_fadvise(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe4840e412019-12-25 22:03:45 -07004592{
4593 struct io_fadvise *fa = &req->fadvise;
4594 int ret;
4595
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004596 if (issue_flags & IO_URING_F_NONBLOCK) {
Jens Axboe3e694262020-02-01 09:22:49 -07004597 switch (fa->advice) {
4598 case POSIX_FADV_NORMAL:
4599 case POSIX_FADV_RANDOM:
4600 case POSIX_FADV_SEQUENTIAL:
4601 break;
4602 default:
4603 return -EAGAIN;
4604 }
4605 }
Jens Axboe4840e412019-12-25 22:03:45 -07004606
4607 ret = vfs_fadvise(req->file, fa->offset, fa->len, fa->advice);
4608 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01004609 req_set_fail(req);
Pavel Begunkov0bdf3392021-04-11 01:46:29 +01004610 __io_req_complete(req, issue_flags, ret, 0);
Jens Axboe4840e412019-12-25 22:03:45 -07004611 return 0;
4612}
4613
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004614static int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4615{
Pavel Begunkov2d74d042021-05-14 12:05:46 +01004616 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
Pavel Begunkov3232dd02020-06-03 18:03:22 +03004617 return -EINVAL;
Pavel Begunkov26578cd2021-08-20 10:36:37 +01004618 if (sqe->ioprio || sqe->buf_index || sqe->splice_fd_in)
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004619 return -EINVAL;
Pavel Begunkov9c280f92020-04-08 08:58:46 +03004620 if (req->flags & REQ_F_FIXED_FILE)
Jens Axboecf3040c2020-02-06 21:31:40 -07004621 return -EBADF;
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004622
Bijan Mottahedeh1d9e1282020-05-22 21:31:16 -07004623 req->statx.dfd = READ_ONCE(sqe->fd);
4624 req->statx.mask = READ_ONCE(sqe->len);
Bijan Mottahedehe62753e2020-05-22 21:31:18 -07004625 req->statx.filename = u64_to_user_ptr(READ_ONCE(sqe->addr));
Bijan Mottahedeh1d9e1282020-05-22 21:31:16 -07004626 req->statx.buffer = u64_to_user_ptr(READ_ONCE(sqe->addr2));
4627 req->statx.flags = READ_ONCE(sqe->statx_flags);
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004628
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004629 return 0;
4630}
4631
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004632static int io_statx(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004633{
Bijan Mottahedeh1d9e1282020-05-22 21:31:16 -07004634 struct io_statx *ctx = &req->statx;
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004635 int ret;
4636
Pavel Begunkov59d70012021-03-22 01:58:30 +00004637 if (issue_flags & IO_URING_F_NONBLOCK)
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004638 return -EAGAIN;
4639
Bijan Mottahedehe62753e2020-05-22 21:31:18 -07004640 ret = do_statx(ctx->dfd, ctx->filename, ctx->flags, ctx->mask,
4641 ctx->buffer);
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004642
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004643 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01004644 req_set_fail(req);
Jens Axboee1e16092020-06-22 09:17:17 -06004645 io_req_complete(req, ret);
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004646 return 0;
4647}
4648
Jens Axboeb5dba592019-12-11 14:02:38 -07004649static int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4650{
Jens Axboe14587a462020-09-05 11:36:08 -06004651 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
Pavel Begunkov3232dd02020-06-03 18:03:22 +03004652 return -EINVAL;
Jens Axboeb5dba592019-12-11 14:02:38 -07004653 if (sqe->ioprio || sqe->off || sqe->addr || sqe->len ||
Pavel Begunkov7df778b2021-09-24 20:04:29 +01004654 sqe->rw_flags || sqe->buf_index)
Jens Axboeb5dba592019-12-11 14:02:38 -07004655 return -EINVAL;
Pavel Begunkov9c280f92020-04-08 08:58:46 +03004656 if (req->flags & REQ_F_FIXED_FILE)
Jens Axboecf3040c2020-02-06 21:31:40 -07004657 return -EBADF;
Jens Axboeb5dba592019-12-11 14:02:38 -07004658
4659 req->close.fd = READ_ONCE(sqe->fd);
Pavel Begunkov7df778b2021-09-24 20:04:29 +01004660 req->close.file_slot = READ_ONCE(sqe->file_index);
4661 if (req->close.file_slot && req->close.fd)
4662 return -EINVAL;
4663
Jens Axboeb5dba592019-12-11 14:02:38 -07004664 return 0;
4665}
4666
Pavel Begunkov889fca72021-02-10 00:03:09 +00004667static int io_close(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboeb5dba592019-12-11 14:02:38 -07004668{
Jens Axboe9eac1902021-01-19 15:50:37 -07004669 struct files_struct *files = current->files;
Pavel Begunkov3af73b22020-06-08 21:08:17 +03004670 struct io_close *close = &req->close;
Jens Axboe9eac1902021-01-19 15:50:37 -07004671 struct fdtable *fdt;
Pavel Begunkova1fde922021-04-11 01:46:28 +01004672 struct file *file = NULL;
4673 int ret = -EBADF;
Jens Axboeb5dba592019-12-11 14:02:38 -07004674
Pavel Begunkov7df778b2021-09-24 20:04:29 +01004675 if (req->close.file_slot) {
4676 ret = io_close_fixed(req, issue_flags);
4677 goto err;
4678 }
4679
Jens Axboe9eac1902021-01-19 15:50:37 -07004680 spin_lock(&files->file_lock);
4681 fdt = files_fdtable(files);
4682 if (close->fd >= fdt->max_fds) {
4683 spin_unlock(&files->file_lock);
4684 goto err;
4685 }
4686 file = fdt->fd[close->fd];
Pavel Begunkova1fde922021-04-11 01:46:28 +01004687 if (!file || file->f_op == &io_uring_fops) {
Jens Axboe9eac1902021-01-19 15:50:37 -07004688 spin_unlock(&files->file_lock);
4689 file = NULL;
4690 goto err;
Pavel Begunkov3af73b22020-06-08 21:08:17 +03004691 }
Jens Axboeb5dba592019-12-11 14:02:38 -07004692
4693 /* if the file has a flush method, be safe and punt to async */
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004694 if (file->f_op->flush && (issue_flags & IO_URING_F_NONBLOCK)) {
Jens Axboe9eac1902021-01-19 15:50:37 -07004695 spin_unlock(&files->file_lock);
Pavel Begunkov0bf0eef2020-05-26 20:34:06 +03004696 return -EAGAIN;
Pavel Begunkova2100672020-03-02 23:45:16 +03004697 }
Jens Axboeb5dba592019-12-11 14:02:38 -07004698
Jens Axboe9eac1902021-01-19 15:50:37 -07004699 ret = __close_fd_get_file(close->fd, &file);
4700 spin_unlock(&files->file_lock);
4701 if (ret < 0) {
4702 if (ret == -ENOENT)
4703 ret = -EBADF;
4704 goto err;
4705 }
4706
Pavel Begunkov3af73b22020-06-08 21:08:17 +03004707 /* No ->flush() or already async, safely close from here */
Jens Axboe9eac1902021-01-19 15:50:37 -07004708 ret = filp_close(file, current->files);
4709err:
Pavel Begunkov3af73b22020-06-08 21:08:17 +03004710 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01004711 req_set_fail(req);
Jens Axboe9eac1902021-01-19 15:50:37 -07004712 if (file)
4713 fput(file);
Pavel Begunkov889fca72021-02-10 00:03:09 +00004714 __io_req_complete(req, issue_flags, ret, 0);
Jens Axboe1a417f42020-01-31 17:16:48 -07004715 return 0;
Jens Axboeb5dba592019-12-11 14:02:38 -07004716}
4717
Pavel Begunkov1155c762021-02-18 18:29:38 +00004718static int io_sfr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboe5d17b4a2019-04-09 14:56:44 -06004719{
4720 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe5d17b4a2019-04-09 14:56:44 -06004721
Jens Axboe5d17b4a2019-04-09 14:56:44 -06004722 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
4723 return -EINVAL;
Pavel Begunkov26578cd2021-08-20 10:36:37 +01004724 if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index ||
4725 sqe->splice_fd_in))
Jens Axboe5d17b4a2019-04-09 14:56:44 -06004726 return -EINVAL;
4727
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004728 req->sync.off = READ_ONCE(sqe->off);
4729 req->sync.len = READ_ONCE(sqe->len);
4730 req->sync.flags = READ_ONCE(sqe->sync_range_flags);
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004731 return 0;
4732}
4733
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004734static int io_sync_file_range(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004735{
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004736 int ret;
4737
Pavel Begunkovac45abc2020-06-08 21:08:18 +03004738 /* sync_file_range always requires a blocking context */
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004739 if (issue_flags & IO_URING_F_NONBLOCK)
Pavel Begunkovac45abc2020-06-08 21:08:18 +03004740 return -EAGAIN;
4741
Jens Axboe9adbd452019-12-20 08:45:55 -07004742 ret = sync_file_range(req->file, req->sync.off, req->sync.len,
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004743 req->sync.flags);
4744 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01004745 req_set_fail(req);
Jens Axboee1e16092020-06-22 09:17:17 -06004746 io_req_complete(req, ret);
Jens Axboe5d17b4a2019-04-09 14:56:44 -06004747 return 0;
4748}
4749
YueHaibing469956e2020-03-04 15:53:52 +08004750#if defined(CONFIG_NET)
Pavel Begunkov02d27d82020-02-28 10:36:36 +03004751static int io_setup_async_msg(struct io_kiocb *req,
4752 struct io_async_msghdr *kmsg)
4753{
Jens Axboee8c2bc12020-08-15 18:44:09 -07004754 struct io_async_msghdr *async_msg = req->async_data;
4755
4756 if (async_msg)
Pavel Begunkov02d27d82020-02-28 10:36:36 +03004757 return -EAGAIN;
Jens Axboee8c2bc12020-08-15 18:44:09 -07004758 if (io_alloc_async_data(req)) {
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004759 kfree(kmsg->free_iov);
Pavel Begunkov02d27d82020-02-28 10:36:36 +03004760 return -ENOMEM;
4761 }
Jens Axboee8c2bc12020-08-15 18:44:09 -07004762 async_msg = req->async_data;
Pavel Begunkov02d27d82020-02-28 10:36:36 +03004763 req->flags |= REQ_F_NEED_CLEANUP;
Jens Axboee8c2bc12020-08-15 18:44:09 -07004764 memcpy(async_msg, kmsg, sizeof(*kmsg));
Pavel Begunkov2a780802021-02-05 00:57:58 +00004765 async_msg->msg.msg_name = &async_msg->addr;
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004766 /* if were using fast_iov, set it to the new one */
4767 if (!async_msg->free_iov)
4768 async_msg->msg.msg_iter.iov = async_msg->fast_iov;
4769
Pavel Begunkov02d27d82020-02-28 10:36:36 +03004770 return -EAGAIN;
4771}
4772
Pavel Begunkov2ae523e2020-07-12 20:41:06 +03004773static int io_sendmsg_copy_hdr(struct io_kiocb *req,
4774 struct io_async_msghdr *iomsg)
4775{
Pavel Begunkov2ae523e2020-07-12 20:41:06 +03004776 iomsg->msg.msg_name = &iomsg->addr;
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004777 iomsg->free_iov = iomsg->fast_iov;
Pavel Begunkov2ae523e2020-07-12 20:41:06 +03004778 return sendmsg_copy_msghdr(&iomsg->msg, req->sr_msg.umsg,
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004779 req->sr_msg.msg_flags, &iomsg->free_iov);
Pavel Begunkov2ae523e2020-07-12 20:41:06 +03004780}
4781
Pavel Begunkov93642ef2021-02-18 18:29:44 +00004782static int io_sendmsg_prep_async(struct io_kiocb *req)
4783{
4784 int ret;
4785
Pavel Begunkov93642ef2021-02-18 18:29:44 +00004786 ret = io_sendmsg_copy_hdr(req, req->async_data);
4787 if (!ret)
4788 req->flags |= REQ_F_NEED_CLEANUP;
4789 return ret;
4790}
4791
Jens Axboe3529d8c2019-12-19 18:24:38 -07004792static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboeaa1fa282019-04-19 13:38:09 -06004793{
Jens Axboee47293f2019-12-20 08:58:21 -07004794 struct io_sr_msg *sr = &req->sr_msg;
Jens Axboe03b12302019-12-02 18:50:25 -07004795
Pavel Begunkovd2b6f482020-06-03 18:03:25 +03004796 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4797 return -EINVAL;
Jens Axboe79c10cb2022-04-26 19:34:11 -06004798 if (unlikely(sqe->addr2 || sqe->file_index))
4799 return -EINVAL;
Jens Axboe50fefe52022-06-30 14:42:05 -06004800 if (unlikely(sqe->addr2 || sqe->file_index || sqe->ioprio))
4801 return -EINVAL;
Pavel Begunkovd2b6f482020-06-03 18:03:25 +03004802
Pavel Begunkov270a5942020-07-12 20:41:04 +03004803 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
Jens Axboefddafac2020-01-04 20:19:44 -07004804 sr->len = READ_ONCE(sqe->len);
Pavel Begunkov04411802021-04-01 15:44:00 +01004805 sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
4806 if (sr->msg_flags & MSG_DONTWAIT)
4807 req->flags |= REQ_F_NOWAIT;
Jens Axboe3529d8c2019-12-19 18:24:38 -07004808
Jens Axboed8768362020-02-27 14:17:49 -07004809#ifdef CONFIG_COMPAT
4810 if (req->ctx->compat)
4811 sr->msg_flags |= MSG_CMSG_COMPAT;
4812#endif
Pavel Begunkov93642ef2021-02-18 18:29:44 +00004813 return 0;
Jens Axboe03b12302019-12-02 18:50:25 -07004814}
4815
Pavel Begunkov889fca72021-02-10 00:03:09 +00004816static int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe03b12302019-12-02 18:50:25 -07004817{
Pavel Begunkov6b754c82020-07-16 23:28:00 +03004818 struct io_async_msghdr iomsg, *kmsg;
Jens Axboe03b12302019-12-02 18:50:25 -07004819 struct socket *sock;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004820 unsigned flags;
Stefan Metzmacher00312752021-03-20 20:33:36 +01004821 int min_ret = 0;
Jens Axboe03b12302019-12-02 18:50:25 -07004822 int ret;
4823
Florent Revestdba4a922020-12-04 12:36:04 +01004824 sock = sock_from_file(req->file);
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004825 if (unlikely(!sock))
Florent Revestdba4a922020-12-04 12:36:04 +01004826 return -ENOTSOCK;
Jens Axboe03b12302019-12-02 18:50:25 -07004827
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004828 kmsg = req->async_data;
4829 if (!kmsg) {
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004830 ret = io_sendmsg_copy_hdr(req, &iomsg);
Jens Axboefddafac2020-01-04 20:19:44 -07004831 if (ret)
4832 return ret;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004833 kmsg = &iomsg;
Jens Axboefddafac2020-01-04 20:19:44 -07004834 }
4835
Pavel Begunkov04411802021-04-01 15:44:00 +01004836 flags = req->sr_msg.msg_flags;
4837 if (issue_flags & IO_URING_F_NONBLOCK)
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004838 flags |= MSG_DONTWAIT;
Stefan Metzmacher00312752021-03-20 20:33:36 +01004839 if (flags & MSG_WAITALL)
4840 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
4841
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004842 ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004843 if ((issue_flags & IO_URING_F_NONBLOCK) && ret == -EAGAIN)
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004844 return io_setup_async_msg(req, kmsg);
4845 if (ret == -ERESTARTSYS)
4846 ret = -EINTR;
4847
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004848 /* fast path, check for non-NULL to avoid function call */
4849 if (kmsg->free_iov)
4850 kfree(kmsg->free_iov);
Jens Axboe03b12302019-12-02 18:50:25 -07004851 req->flags &= ~REQ_F_NEED_CLEANUP;
Stefan Metzmacher00312752021-03-20 20:33:36 +01004852 if (ret < min_ret)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01004853 req_set_fail(req);
Pavel Begunkov889fca72021-02-10 00:03:09 +00004854 __io_req_complete(req, issue_flags, ret, 0);
Jens Axboefddafac2020-01-04 20:19:44 -07004855 return 0;
Jens Axboefddafac2020-01-04 20:19:44 -07004856}
4857
Pavel Begunkov889fca72021-02-10 00:03:09 +00004858static int io_send(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe03b12302019-12-02 18:50:25 -07004859{
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004860 struct io_sr_msg *sr = &req->sr_msg;
4861 struct msghdr msg;
4862 struct iovec iov;
Jens Axboe03b12302019-12-02 18:50:25 -07004863 struct socket *sock;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004864 unsigned flags;
Stefan Metzmacher00312752021-03-20 20:33:36 +01004865 int min_ret = 0;
Jens Axboe03b12302019-12-02 18:50:25 -07004866 int ret;
4867
Florent Revestdba4a922020-12-04 12:36:04 +01004868 sock = sock_from_file(req->file);
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004869 if (unlikely(!sock))
Florent Revestdba4a922020-12-04 12:36:04 +01004870 return -ENOTSOCK;
Jens Axboe03b12302019-12-02 18:50:25 -07004871
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004872 ret = import_single_range(WRITE, sr->buf, sr->len, &iov, &msg.msg_iter);
4873 if (unlikely(ret))
Zheng Bin14db8412020-09-09 20:12:37 +08004874 return ret;
Jens Axboe03b12302019-12-02 18:50:25 -07004875
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004876 msg.msg_name = NULL;
4877 msg.msg_control = NULL;
4878 msg.msg_controllen = 0;
4879 msg.msg_namelen = 0;
Jens Axboe03b12302019-12-02 18:50:25 -07004880
Pavel Begunkov04411802021-04-01 15:44:00 +01004881 flags = req->sr_msg.msg_flags;
4882 if (issue_flags & IO_URING_F_NONBLOCK)
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004883 flags |= MSG_DONTWAIT;
Stefan Metzmacher00312752021-03-20 20:33:36 +01004884 if (flags & MSG_WAITALL)
4885 min_ret = iov_iter_count(&msg.msg_iter);
4886
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004887 msg.msg_flags = flags;
4888 ret = sock_sendmsg(sock, &msg);
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004889 if ((issue_flags & IO_URING_F_NONBLOCK) && ret == -EAGAIN)
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004890 return -EAGAIN;
4891 if (ret == -ERESTARTSYS)
4892 ret = -EINTR;
Jens Axboe03b12302019-12-02 18:50:25 -07004893
Stefan Metzmacher00312752021-03-20 20:33:36 +01004894 if (ret < min_ret)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01004895 req_set_fail(req);
Pavel Begunkov889fca72021-02-10 00:03:09 +00004896 __io_req_complete(req, issue_flags, ret, 0);
Jens Axboe03b12302019-12-02 18:50:25 -07004897 return 0;
Jens Axboe03b12302019-12-02 18:50:25 -07004898}
4899
Pavel Begunkov1400e692020-07-12 20:41:05 +03004900static int __io_recvmsg_copy_hdr(struct io_kiocb *req,
4901 struct io_async_msghdr *iomsg)
Jens Axboe52de1fe2020-02-27 10:15:42 -07004902{
4903 struct io_sr_msg *sr = &req->sr_msg;
4904 struct iovec __user *uiov;
4905 size_t iov_len;
4906 int ret;
4907
Pavel Begunkov1400e692020-07-12 20:41:05 +03004908 ret = __copy_msghdr_from_user(&iomsg->msg, sr->umsg,
4909 &iomsg->uaddr, &uiov, &iov_len);
Jens Axboe52de1fe2020-02-27 10:15:42 -07004910 if (ret)
4911 return ret;
4912
4913 if (req->flags & REQ_F_BUFFER_SELECT) {
4914 if (iov_len > 1)
4915 return -EINVAL;
Pavel Begunkov5476dfe2021-02-05 00:57:59 +00004916 if (copy_from_user(iomsg->fast_iov, uiov, sizeof(*uiov)))
Jens Axboe52de1fe2020-02-27 10:15:42 -07004917 return -EFAULT;
Pavel Begunkov5476dfe2021-02-05 00:57:59 +00004918 sr->len = iomsg->fast_iov[0].iov_len;
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004919 iomsg->free_iov = NULL;
Jens Axboe52de1fe2020-02-27 10:15:42 -07004920 } else {
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004921 iomsg->free_iov = iomsg->fast_iov;
Christoph Hellwig89cd35c2020-09-25 06:51:41 +02004922 ret = __import_iovec(READ, uiov, iov_len, UIO_FASTIOV,
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004923 &iomsg->free_iov, &iomsg->msg.msg_iter,
Christoph Hellwig89cd35c2020-09-25 06:51:41 +02004924 false);
Jens Axboe52de1fe2020-02-27 10:15:42 -07004925 if (ret > 0)
4926 ret = 0;
4927 }
4928
4929 return ret;
4930}
4931
4932#ifdef CONFIG_COMPAT
4933static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req,
Pavel Begunkov1400e692020-07-12 20:41:05 +03004934 struct io_async_msghdr *iomsg)
Jens Axboe52de1fe2020-02-27 10:15:42 -07004935{
Jens Axboe52de1fe2020-02-27 10:15:42 -07004936 struct io_sr_msg *sr = &req->sr_msg;
4937 struct compat_iovec __user *uiov;
4938 compat_uptr_t ptr;
4939 compat_size_t len;
4940 int ret;
4941
Pavel Begunkov4af34172021-04-11 01:46:30 +01004942 ret = __get_compat_msghdr(&iomsg->msg, sr->umsg_compat, &iomsg->uaddr,
4943 &ptr, &len);
Jens Axboe52de1fe2020-02-27 10:15:42 -07004944 if (ret)
4945 return ret;
4946
4947 uiov = compat_ptr(ptr);
4948 if (req->flags & REQ_F_BUFFER_SELECT) {
4949 compat_ssize_t clen;
4950
4951 if (len > 1)
4952 return -EINVAL;
4953 if (!access_ok(uiov, sizeof(*uiov)))
4954 return -EFAULT;
4955 if (__get_user(clen, &uiov->iov_len))
4956 return -EFAULT;
4957 if (clen < 0)
4958 return -EINVAL;
Pavel Begunkov2d280bc2020-11-29 18:33:32 +00004959 sr->len = clen;
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004960 iomsg->free_iov = NULL;
Jens Axboe52de1fe2020-02-27 10:15:42 -07004961 } else {
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004962 iomsg->free_iov = iomsg->fast_iov;
Christoph Hellwig89cd35c2020-09-25 06:51:41 +02004963 ret = __import_iovec(READ, (struct iovec __user *)uiov, len,
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004964 UIO_FASTIOV, &iomsg->free_iov,
Christoph Hellwig89cd35c2020-09-25 06:51:41 +02004965 &iomsg->msg.msg_iter, true);
Jens Axboe52de1fe2020-02-27 10:15:42 -07004966 if (ret < 0)
4967 return ret;
4968 }
4969
4970 return 0;
4971}
Jens Axboe03b12302019-12-02 18:50:25 -07004972#endif
Jens Axboe52de1fe2020-02-27 10:15:42 -07004973
Pavel Begunkov1400e692020-07-12 20:41:05 +03004974static int io_recvmsg_copy_hdr(struct io_kiocb *req,
4975 struct io_async_msghdr *iomsg)
Jens Axboe52de1fe2020-02-27 10:15:42 -07004976{
Pavel Begunkov1400e692020-07-12 20:41:05 +03004977 iomsg->msg.msg_name = &iomsg->addr;
Jens Axboe52de1fe2020-02-27 10:15:42 -07004978
4979#ifdef CONFIG_COMPAT
4980 if (req->ctx->compat)
Pavel Begunkov1400e692020-07-12 20:41:05 +03004981 return __io_compat_recvmsg_copy_hdr(req, iomsg);
Jens Axboe52de1fe2020-02-27 10:15:42 -07004982#endif
4983
Pavel Begunkov1400e692020-07-12 20:41:05 +03004984 return __io_recvmsg_copy_hdr(req, iomsg);
Jens Axboe52de1fe2020-02-27 10:15:42 -07004985}
4986
Jens Axboebcda7ba2020-02-23 16:42:51 -07004987static struct io_buffer *io_recv_buffer_select(struct io_kiocb *req,
Pavel Begunkov7fbb1b52020-07-16 23:28:05 +03004988 bool needs_lock)
Jens Axboebcda7ba2020-02-23 16:42:51 -07004989{
4990 struct io_sr_msg *sr = &req->sr_msg;
4991 struct io_buffer *kbuf;
4992
Jens Axboebcda7ba2020-02-23 16:42:51 -07004993 kbuf = io_buffer_select(req, &sr->len, sr->bgid, sr->kbuf, needs_lock);
4994 if (IS_ERR(kbuf))
4995 return kbuf;
4996
4997 sr->kbuf = kbuf;
4998 req->flags |= REQ_F_BUFFER_SELECTED;
Jens Axboebcda7ba2020-02-23 16:42:51 -07004999 return kbuf;
Jens Axboe03b12302019-12-02 18:50:25 -07005000}
5001
Pavel Begunkov7fbb1b52020-07-16 23:28:05 +03005002static inline unsigned int io_put_recv_kbuf(struct io_kiocb *req)
5003{
5004 return io_put_kbuf(req, req->sr_msg.kbuf);
5005}
5006
Pavel Begunkov93642ef2021-02-18 18:29:44 +00005007static int io_recvmsg_prep_async(struct io_kiocb *req)
Jens Axboe03b12302019-12-02 18:50:25 -07005008{
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03005009 int ret;
Jens Axboe06b76d42019-12-19 14:44:26 -07005010
Pavel Begunkov93642ef2021-02-18 18:29:44 +00005011 ret = io_recvmsg_copy_hdr(req, req->async_data);
5012 if (!ret)
5013 req->flags |= REQ_F_NEED_CLEANUP;
5014 return ret;
5015}
5016
5017static int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
5018{
5019 struct io_sr_msg *sr = &req->sr_msg;
5020
Pavel Begunkovd2b6f482020-06-03 18:03:25 +03005021 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
5022 return -EINVAL;
Jens Axboe37811e42022-04-26 19:34:57 -06005023 if (unlikely(sqe->addr2 || sqe->file_index))
5024 return -EINVAL;
Jens Axboe50fefe52022-06-30 14:42:05 -06005025 if (unlikely(sqe->addr2 || sqe->file_index || sqe->ioprio))
5026 return -EINVAL;
Pavel Begunkovd2b6f482020-06-03 18:03:25 +03005027
Pavel Begunkov270a5942020-07-12 20:41:04 +03005028 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
Jens Axboe0b7b21e2020-01-31 08:34:59 -07005029 sr->len = READ_ONCE(sqe->len);
Jens Axboebcda7ba2020-02-23 16:42:51 -07005030 sr->bgid = READ_ONCE(sqe->buf_group);
Pavel Begunkov04411802021-04-01 15:44:00 +01005031 sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
5032 if (sr->msg_flags & MSG_DONTWAIT)
5033 req->flags |= REQ_F_NOWAIT;
Jens Axboe3529d8c2019-12-19 18:24:38 -07005034
Jens Axboed8768362020-02-27 14:17:49 -07005035#ifdef CONFIG_COMPAT
5036 if (req->ctx->compat)
5037 sr->msg_flags |= MSG_CMSG_COMPAT;
5038#endif
Pavel Begunkov93642ef2021-02-18 18:29:44 +00005039 return 0;
Jens Axboe03b12302019-12-02 18:50:25 -07005040}
5041
Pavel Begunkov889fca72021-02-10 00:03:09 +00005042static int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe03b12302019-12-02 18:50:25 -07005043{
Pavel Begunkov6b754c82020-07-16 23:28:00 +03005044 struct io_async_msghdr iomsg, *kmsg;
Jens Axboe0fa03c62019-04-19 13:34:07 -06005045 struct socket *sock;
Pavel Begunkov7fbb1b52020-07-16 23:28:05 +03005046 struct io_buffer *kbuf;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03005047 unsigned flags;
Stefan Metzmacher00312752021-03-20 20:33:36 +01005048 int min_ret = 0;
Jens Axboe52de1fe2020-02-27 10:15:42 -07005049 int ret, cflags = 0;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00005050 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Jens Axboe0fa03c62019-04-19 13:34:07 -06005051
Florent Revestdba4a922020-12-04 12:36:04 +01005052 sock = sock_from_file(req->file);
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03005053 if (unlikely(!sock))
Florent Revestdba4a922020-12-04 12:36:04 +01005054 return -ENOTSOCK;
Jens Axboe0fa03c62019-04-19 13:34:07 -06005055
Pavel Begunkov257e84a2021-02-05 00:58:00 +00005056 kmsg = req->async_data;
5057 if (!kmsg) {
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03005058 ret = io_recvmsg_copy_hdr(req, &iomsg);
5059 if (ret)
Pavel Begunkov681fda82020-07-15 22:20:45 +03005060 return ret;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03005061 kmsg = &iomsg;
Jens Axboe0fa03c62019-04-19 13:34:07 -06005062 }
5063
Pavel Begunkovbc02ef32020-07-16 23:28:03 +03005064 if (req->flags & REQ_F_BUFFER_SELECT) {
Pavel Begunkov7fbb1b52020-07-16 23:28:05 +03005065 kbuf = io_recv_buffer_select(req, !force_nonblock);
Pavel Begunkovbc02ef32020-07-16 23:28:03 +03005066 if (IS_ERR(kbuf))
5067 return PTR_ERR(kbuf);
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03005068 kmsg->fast_iov[0].iov_base = u64_to_user_ptr(kbuf->addr);
Pavel Begunkov5476dfe2021-02-05 00:57:59 +00005069 kmsg->fast_iov[0].iov_len = req->sr_msg.len;
5070 iov_iter_init(&kmsg->msg.msg_iter, READ, kmsg->fast_iov,
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03005071 1, req->sr_msg.len);
5072 }
5073
Pavel Begunkov04411802021-04-01 15:44:00 +01005074 flags = req->sr_msg.msg_flags;
5075 if (force_nonblock)
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03005076 flags |= MSG_DONTWAIT;
Stefan Metzmacher00312752021-03-20 20:33:36 +01005077 if (flags & MSG_WAITALL)
5078 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
5079
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03005080 ret = __sys_recvmsg_sock(sock, &kmsg->msg, req->sr_msg.umsg,
5081 kmsg->uaddr, flags);
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03005082 if (force_nonblock && ret == -EAGAIN)
5083 return io_setup_async_msg(req, kmsg);
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03005084 if (ret == -ERESTARTSYS)
5085 ret = -EINTR;
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03005086
Pavel Begunkov7fbb1b52020-07-16 23:28:05 +03005087 if (req->flags & REQ_F_BUFFER_SELECTED)
5088 cflags = io_put_recv_kbuf(req);
Pavel Begunkov257e84a2021-02-05 00:58:00 +00005089 /* fast path, check for non-NULL to avoid function call */
5090 if (kmsg->free_iov)
5091 kfree(kmsg->free_iov);
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03005092 req->flags &= ~REQ_F_NEED_CLEANUP;
Stefan Metzmacher00312752021-03-20 20:33:36 +01005093 if (ret < min_ret || ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))))
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01005094 req_set_fail(req);
Pavel Begunkov889fca72021-02-10 00:03:09 +00005095 __io_req_complete(req, issue_flags, ret, cflags);
Jens Axboe0fa03c62019-04-19 13:34:07 -06005096 return 0;
Jens Axboe0fa03c62019-04-19 13:34:07 -06005097}
5098
Pavel Begunkov889fca72021-02-10 00:03:09 +00005099static int io_recv(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboefddafac2020-01-04 20:19:44 -07005100{
Pavel Begunkov6b754c82020-07-16 23:28:00 +03005101 struct io_buffer *kbuf;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03005102 struct io_sr_msg *sr = &req->sr_msg;
5103 struct msghdr msg;
5104 void __user *buf = sr->buf;
Jens Axboefddafac2020-01-04 20:19:44 -07005105 struct socket *sock;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03005106 struct iovec iov;
5107 unsigned flags;
Stefan Metzmacher00312752021-03-20 20:33:36 +01005108 int min_ret = 0;
Jens Axboebcda7ba2020-02-23 16:42:51 -07005109 int ret, cflags = 0;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00005110 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Jens Axboefddafac2020-01-04 20:19:44 -07005111
Florent Revestdba4a922020-12-04 12:36:04 +01005112 sock = sock_from_file(req->file);
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03005113 if (unlikely(!sock))
Florent Revestdba4a922020-12-04 12:36:04 +01005114 return -ENOTSOCK;
Jens Axboefddafac2020-01-04 20:19:44 -07005115
Pavel Begunkovbc02ef32020-07-16 23:28:03 +03005116 if (req->flags & REQ_F_BUFFER_SELECT) {
Pavel Begunkov7fbb1b52020-07-16 23:28:05 +03005117 kbuf = io_recv_buffer_select(req, !force_nonblock);
Jens Axboebcda7ba2020-02-23 16:42:51 -07005118 if (IS_ERR(kbuf))
5119 return PTR_ERR(kbuf);
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03005120 buf = u64_to_user_ptr(kbuf->addr);
Jens Axboefddafac2020-01-04 20:19:44 -07005121 }
5122
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03005123 ret = import_single_range(READ, buf, sr->len, &iov, &msg.msg_iter);
Pavel Begunkov14c32ee2020-07-16 23:28:01 +03005124 if (unlikely(ret))
5125 goto out_free;
Jens Axboefddafac2020-01-04 20:19:44 -07005126
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03005127 msg.msg_name = NULL;
5128 msg.msg_control = NULL;
5129 msg.msg_controllen = 0;
5130 msg.msg_namelen = 0;
5131 msg.msg_iocb = NULL;
5132 msg.msg_flags = 0;
5133
Pavel Begunkov04411802021-04-01 15:44:00 +01005134 flags = req->sr_msg.msg_flags;
5135 if (force_nonblock)
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03005136 flags |= MSG_DONTWAIT;
Stefan Metzmacher00312752021-03-20 20:33:36 +01005137 if (flags & MSG_WAITALL)
5138 min_ret = iov_iter_count(&msg.msg_iter);
5139
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03005140 ret = sock_recvmsg(sock, &msg, flags);
5141 if (force_nonblock && ret == -EAGAIN)
5142 return -EAGAIN;
5143 if (ret == -ERESTARTSYS)
5144 ret = -EINTR;
Pavel Begunkov14c32ee2020-07-16 23:28:01 +03005145out_free:
Pavel Begunkov7fbb1b52020-07-16 23:28:05 +03005146 if (req->flags & REQ_F_BUFFER_SELECTED)
5147 cflags = io_put_recv_kbuf(req);
Stefan Metzmacher00312752021-03-20 20:33:36 +01005148 if (ret < min_ret || ((flags & MSG_WAITALL) && (msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))))
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01005149 req_set_fail(req);
Pavel Begunkov889fca72021-02-10 00:03:09 +00005150 __io_req_complete(req, issue_flags, ret, cflags);
Jens Axboefddafac2020-01-04 20:19:44 -07005151 return 0;
Jens Axboefddafac2020-01-04 20:19:44 -07005152}
5153
Jens Axboe3529d8c2019-12-19 18:24:38 -07005154static int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboe17f2fe32019-10-17 14:42:58 -06005155{
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07005156 struct io_accept *accept = &req->accept;
5157
Jens Axboe14587a462020-09-05 11:36:08 -06005158 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboe17f2fe32019-10-17 14:42:58 -06005159 return -EINVAL;
Pavel Begunkovaaa4db12021-08-25 12:25:47 +01005160 if (sqe->ioprio || sqe->len || sqe->buf_index)
Jens Axboe17f2fe32019-10-17 14:42:58 -06005161 return -EINVAL;
5162
Jens Axboed55e5f52019-12-11 16:12:15 -07005163 accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
5164 accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2));
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07005165 accept->flags = READ_ONCE(sqe->accept_flags);
Jens Axboe09952e32020-03-19 20:16:56 -06005166 accept->nofile = rlimit(RLIMIT_NOFILE);
Pavel Begunkova7083ad2021-08-25 12:25:46 +01005167
Pavel Begunkovaaa4db12021-08-25 12:25:47 +01005168 accept->file_slot = READ_ONCE(sqe->file_index);
Jens Axboe13239762022-03-14 17:26:19 -06005169 if (accept->file_slot && (accept->flags & SOCK_CLOEXEC))
Pavel Begunkovaaa4db12021-08-25 12:25:47 +01005170 return -EINVAL;
Pavel Begunkova7083ad2021-08-25 12:25:46 +01005171 if (accept->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
5172 return -EINVAL;
5173 if (SOCK_NONBLOCK != O_NONBLOCK && (accept->flags & SOCK_NONBLOCK))
5174 accept->flags = (accept->flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07005175 return 0;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07005176}
Jens Axboe17f2fe32019-10-17 14:42:58 -06005177
Pavel Begunkov889fca72021-02-10 00:03:09 +00005178static int io_accept(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07005179{
5180 struct io_accept *accept = &req->accept;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00005181 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Pavel Begunkovac45abc2020-06-08 21:08:18 +03005182 unsigned int file_flags = force_nonblock ? O_NONBLOCK : 0;
Pavel Begunkovaaa4db12021-08-25 12:25:47 +01005183 bool fixed = !!accept->file_slot;
Pavel Begunkova7083ad2021-08-25 12:25:46 +01005184 struct file *file;
5185 int ret, fd;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07005186
Jiufei Xuee697dee2020-06-10 13:41:59 +08005187 if (req->file->f_flags & O_NONBLOCK)
5188 req->flags |= REQ_F_NOWAIT;
5189
Pavel Begunkovaaa4db12021-08-25 12:25:47 +01005190 if (!fixed) {
5191 fd = __get_unused_fd_flags(accept->flags, accept->nofile);
5192 if (unlikely(fd < 0))
5193 return fd;
5194 }
Pavel Begunkova7083ad2021-08-25 12:25:46 +01005195 file = do_accept(req->file, file_flags, accept->addr, accept->addr_len,
5196 accept->flags);
5197 if (IS_ERR(file)) {
Pavel Begunkovaaa4db12021-08-25 12:25:47 +01005198 if (!fixed)
5199 put_unused_fd(fd);
Pavel Begunkova7083ad2021-08-25 12:25:46 +01005200 ret = PTR_ERR(file);
5201 if (ret == -EAGAIN && force_nonblock)
5202 return -EAGAIN;
Pavel Begunkovac45abc2020-06-08 21:08:18 +03005203 if (ret == -ERESTARTSYS)
5204 ret = -EINTR;
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01005205 req_set_fail(req);
Pavel Begunkovaaa4db12021-08-25 12:25:47 +01005206 } else if (!fixed) {
Pavel Begunkova7083ad2021-08-25 12:25:46 +01005207 fd_install(fd, file);
5208 ret = fd;
Pavel Begunkovaaa4db12021-08-25 12:25:47 +01005209 } else {
5210 ret = io_install_fixed_file(req, file, issue_flags,
5211 accept->file_slot - 1);
Pavel Begunkovac45abc2020-06-08 21:08:18 +03005212 }
Pavel Begunkov889fca72021-02-10 00:03:09 +00005213 __io_req_complete(req, issue_flags, ret, 0);
Jens Axboe17f2fe32019-10-17 14:42:58 -06005214 return 0;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07005215}
5216
Pavel Begunkov93642ef2021-02-18 18:29:44 +00005217static int io_connect_prep_async(struct io_kiocb *req)
5218{
5219 struct io_async_connect *io = req->async_data;
5220 struct io_connect *conn = &req->connect;
5221
5222 return move_addr_to_kernel(conn->addr, conn->addr_len, &io->address);
5223}
5224
Jens Axboe3529d8c2019-12-19 18:24:38 -07005225static int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboef499a022019-12-02 16:28:46 -07005226{
Jens Axboe3529d8c2019-12-19 18:24:38 -07005227 struct io_connect *conn = &req->connect;
Jens Axboef499a022019-12-02 16:28:46 -07005228
Jens Axboe14587a462020-09-05 11:36:08 -06005229 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboe3fbb51c2019-12-20 08:51:52 -07005230 return -EINVAL;
Pavel Begunkov26578cd2021-08-20 10:36:37 +01005231 if (sqe->ioprio || sqe->len || sqe->buf_index || sqe->rw_flags ||
5232 sqe->splice_fd_in)
Jens Axboe3fbb51c2019-12-20 08:51:52 -07005233 return -EINVAL;
5234
Jens Axboe3529d8c2019-12-19 18:24:38 -07005235 conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
5236 conn->addr_len = READ_ONCE(sqe->addr2);
Pavel Begunkov93642ef2021-02-18 18:29:44 +00005237 return 0;
Jens Axboef499a022019-12-02 16:28:46 -07005238}
5239
Pavel Begunkov889fca72021-02-10 00:03:09 +00005240static int io_connect(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboef8e85cf2019-11-23 14:24:24 -07005241{
Jens Axboee8c2bc12020-08-15 18:44:09 -07005242 struct io_async_connect __io, *io;
Jens Axboef8e85cf2019-11-23 14:24:24 -07005243 unsigned file_flags;
Jens Axboe3fbb51c2019-12-20 08:51:52 -07005244 int ret;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00005245 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Jens Axboef8e85cf2019-11-23 14:24:24 -07005246
Jens Axboee8c2bc12020-08-15 18:44:09 -07005247 if (req->async_data) {
5248 io = req->async_data;
Jens Axboef499a022019-12-02 16:28:46 -07005249 } else {
Jens Axboe3529d8c2019-12-19 18:24:38 -07005250 ret = move_addr_to_kernel(req->connect.addr,
5251 req->connect.addr_len,
Jens Axboee8c2bc12020-08-15 18:44:09 -07005252 &__io.address);
Jens Axboef499a022019-12-02 16:28:46 -07005253 if (ret)
5254 goto out;
5255 io = &__io;
5256 }
5257
Jens Axboe3fbb51c2019-12-20 08:51:52 -07005258 file_flags = force_nonblock ? O_NONBLOCK : 0;
5259
Jens Axboee8c2bc12020-08-15 18:44:09 -07005260 ret = __sys_connect_file(req->file, &io->address,
Jens Axboe3fbb51c2019-12-20 08:51:52 -07005261 req->connect.addr_len, file_flags);
Jens Axboe87f80d62019-12-03 11:23:54 -07005262 if ((ret == -EAGAIN || ret == -EINPROGRESS) && force_nonblock) {
Jens Axboee8c2bc12020-08-15 18:44:09 -07005263 if (req->async_data)
Jens Axboeb7bb4f72019-12-15 22:13:43 -07005264 return -EAGAIN;
Jens Axboee8c2bc12020-08-15 18:44:09 -07005265 if (io_alloc_async_data(req)) {
Jens Axboef499a022019-12-02 16:28:46 -07005266 ret = -ENOMEM;
5267 goto out;
5268 }
Jens Axboee8c2bc12020-08-15 18:44:09 -07005269 memcpy(req->async_data, &__io, sizeof(__io));
Jens Axboef8e85cf2019-11-23 14:24:24 -07005270 return -EAGAIN;
Jens Axboef499a022019-12-02 16:28:46 -07005271 }
Jens Axboef8e85cf2019-11-23 14:24:24 -07005272 if (ret == -ERESTARTSYS)
5273 ret = -EINTR;
Jens Axboef499a022019-12-02 16:28:46 -07005274out:
Jens Axboe4e88d6e2019-12-07 20:59:47 -07005275 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01005276 req_set_fail(req);
Pavel Begunkov889fca72021-02-10 00:03:09 +00005277 __io_req_complete(req, issue_flags, ret, 0);
Jens Axboef8e85cf2019-11-23 14:24:24 -07005278 return 0;
Jens Axboef8e85cf2019-11-23 14:24:24 -07005279}
YueHaibing469956e2020-03-04 15:53:52 +08005280#else /* !CONFIG_NET */
Jens Axboe99a10082021-02-19 09:35:19 -07005281#define IO_NETOP_FN(op) \
5282static int io_##op(struct io_kiocb *req, unsigned int issue_flags) \
5283{ \
5284 return -EOPNOTSUPP; \
Jens Axboef8e85cf2019-11-23 14:24:24 -07005285}
5286
Jens Axboe99a10082021-02-19 09:35:19 -07005287#define IO_NETOP_PREP(op) \
5288IO_NETOP_FN(op) \
5289static int io_##op##_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) \
5290{ \
5291 return -EOPNOTSUPP; \
5292} \
5293
5294#define IO_NETOP_PREP_ASYNC(op) \
5295IO_NETOP_PREP(op) \
5296static int io_##op##_prep_async(struct io_kiocb *req) \
5297{ \
5298 return -EOPNOTSUPP; \
YueHaibing469956e2020-03-04 15:53:52 +08005299}
5300
Jens Axboe99a10082021-02-19 09:35:19 -07005301IO_NETOP_PREP_ASYNC(sendmsg);
5302IO_NETOP_PREP_ASYNC(recvmsg);
5303IO_NETOP_PREP_ASYNC(connect);
5304IO_NETOP_PREP(accept);
5305IO_NETOP_FN(send);
5306IO_NETOP_FN(recv);
YueHaibing469956e2020-03-04 15:53:52 +08005307#endif /* CONFIG_NET */
Jens Axboe17f2fe32019-10-17 14:42:58 -06005308
Jens Axboed7718a92020-02-14 22:23:12 -07005309struct io_poll_table {
5310 struct poll_table_struct pt;
5311 struct io_kiocb *req;
Pavel Begunkov68b11e82021-07-20 10:50:43 +01005312 int nr_entries;
Jens Axboed7718a92020-02-14 22:23:12 -07005313 int error;
5314};
5315
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005316#define IO_POLL_CANCEL_FLAG BIT(31)
Jens Axboec41e79a2022-08-29 14:30:21 +01005317#define IO_POLL_REF_MASK GENMASK(30, 0)
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005318
5319/*
5320 * If refs part of ->poll_refs (see IO_POLL_REF_MASK) is 0, it's free. We can
5321 * bump it and acquire ownership. It's disallowed to modify requests while not
5322 * owning it, that prevents from races for enqueueing task_work's and b/w
5323 * arming poll and wakeups.
5324 */
5325static inline bool io_poll_get_ownership(struct io_kiocb *req)
5326{
5327 return !(atomic_fetch_inc(&req->poll_refs) & IO_POLL_REF_MASK);
5328}
5329
5330static void io_poll_mark_cancelled(struct io_kiocb *req)
5331{
5332 atomic_or(IO_POLL_CANCEL_FLAG, &req->poll_refs);
5333}
5334
Pavel Begunkova85d7ac2022-08-29 14:30:15 +01005335static struct io_poll_iocb *io_poll_get_double(struct io_kiocb *req)
5336{
5337 /* pure poll stashes this in ->async_data, poll driven retry elsewhere */
5338 if (req->opcode == IORING_OP_POLL_ADD)
5339 return req->async_data;
5340 return req->apoll->double_poll;
5341}
5342
5343static struct io_poll_iocb *io_poll_get_single(struct io_kiocb *req)
5344{
5345 if (req->opcode == IORING_OP_POLL_ADD)
5346 return &req->poll;
5347 return &req->apoll->poll;
5348}
5349
5350static void io_poll_req_insert(struct io_kiocb *req)
5351{
5352 struct io_ring_ctx *ctx = req->ctx;
5353 struct hlist_head *list;
5354
5355 list = &ctx->cancel_hash[hash_long(req->user_data, ctx->cancel_hash_bits)];
5356 hlist_add_head(&req->hash_node, list);
5357}
5358
5359static void io_init_poll_iocb(struct io_poll_iocb *poll, __poll_t events,
5360 wait_queue_func_t wake_func)
5361{
5362 poll->head = NULL;
Pavel Begunkova85d7ac2022-08-29 14:30:15 +01005363#define IO_POLL_UNMASK (EPOLLERR|EPOLLHUP|EPOLLNVAL|EPOLLRDHUP)
5364 /* mask in events that we always want/need */
5365 poll->events = events | IO_POLL_UNMASK;
5366 INIT_LIST_HEAD(&poll->wait.entry);
5367 init_waitqueue_func_entry(&poll->wait, wake_func);
5368}
5369
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005370static inline void io_poll_remove_entry(struct io_poll_iocb *poll)
Jens Axboed7718a92020-02-14 22:23:12 -07005371{
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005372 struct wait_queue_head *head = poll->head;
Jens Axboed7718a92020-02-14 22:23:12 -07005373
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005374 spin_lock_irq(&head->lock);
Jens Axboed7718a92020-02-14 22:23:12 -07005375 list_del_init(&poll->wait.entry);
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005376 poll->head = NULL;
5377 spin_unlock_irq(&head->lock);
Jens Axboed7718a92020-02-14 22:23:12 -07005378}
5379
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005380static void io_poll_remove_entries(struct io_kiocb *req)
5381{
5382 struct io_poll_iocb *poll = io_poll_get_single(req);
5383 struct io_poll_iocb *poll_double = io_poll_get_double(req);
5384
5385 if (poll->head)
5386 io_poll_remove_entry(poll);
5387 if (poll_double && poll_double->head)
5388 io_poll_remove_entry(poll_double);
5389}
5390
5391/*
5392 * All poll tw should go through this. Checks for poll events, manages
5393 * references, does rewait, etc.
5394 *
5395 * Returns a negative error on failure. >0 when no action require, which is
5396 * either spurious wakeup or multishot CQE is served. 0 when it's done with
5397 * the request, then the mask is stored in req->result.
5398 */
5399static int io_poll_check_events(struct io_kiocb *req)
Jens Axboe74ce6ce2020-04-13 11:09:12 -06005400{
5401 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005402 struct io_poll_iocb *poll = io_poll_get_single(req);
5403 int v;
Jens Axboe74ce6ce2020-04-13 11:09:12 -06005404
Jens Axboe316319e2021-08-19 09:41:42 -06005405 /* req->task == current here, checking PF_EXITING is safe */
Pavel Begunkove09ee512021-07-01 13:26:05 +01005406 if (unlikely(req->task->flags & PF_EXITING))
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005407 io_poll_mark_cancelled(req);
Pavel Begunkove09ee512021-07-01 13:26:05 +01005408
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005409 do {
5410 v = atomic_read(&req->poll_refs);
Jens Axboe74ce6ce2020-04-13 11:09:12 -06005411
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005412 /* tw handler should be the owner, and so have some references */
5413 if (WARN_ON_ONCE(!(v & IO_POLL_REF_MASK)))
5414 return 0;
5415 if (v & IO_POLL_CANCEL_FLAG)
5416 return -ECANCELED;
Jens Axboe74ce6ce2020-04-13 11:09:12 -06005417
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005418 if (!req->result) {
5419 struct poll_table_struct pt = { ._key = poll->events };
Jens Axboe74ce6ce2020-04-13 11:09:12 -06005420
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005421 req->result = vfs_poll(req->file, &pt) & poll->events;
5422 }
Jens Axboe74ce6ce2020-04-13 11:09:12 -06005423
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005424 /* multishot, just fill an CQE and proceed */
5425 if (req->result && !(poll->events & EPOLLONESHOT)) {
5426 __poll_t mask = mangle_poll(req->result & poll->events);
5427 bool filled;
Jens Axboe18bceab2020-05-15 11:56:54 -06005428
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005429 spin_lock(&ctx->completion_lock);
5430 filled = io_fill_cqe_aux(ctx, req->user_data, mask,
5431 IORING_CQE_F_MORE);
5432 io_commit_cqring(ctx);
5433 spin_unlock(&ctx->completion_lock);
5434 if (unlikely(!filled))
5435 return -ECANCELED;
5436 io_cqring_ev_posted(ctx);
5437 } else if (req->result) {
5438 return 0;
5439 }
Jens Axboe18bceab2020-05-15 11:56:54 -06005440
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005441 /*
5442 * Release all references, retry if someone tried to restart
5443 * task_work while we were executing it.
5444 */
5445 } while (atomic_sub_return(v & IO_POLL_REF_MASK, &req->poll_refs));
Jens Axboe18bceab2020-05-15 11:56:54 -06005446
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005447 return 1;
Jens Axboe18bceab2020-05-15 11:56:54 -06005448}
5449
Pavel Begunkovf237c302021-08-18 12:42:46 +01005450static void io_poll_task_func(struct io_kiocb *req, bool *locked)
Jens Axboe18bceab2020-05-15 11:56:54 -06005451{
Jens Axboe6d816e02020-08-11 08:04:14 -06005452 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005453 int ret;
Jens Axboe18bceab2020-05-15 11:56:54 -06005454
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005455 ret = io_poll_check_events(req);
5456 if (ret > 0)
5457 return;
5458
5459 if (!ret) {
5460 req->result = mangle_poll(req->result & req->poll.events);
Pavel Begunkovdd221f462020-10-18 10:17:42 +01005461 } else {
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005462 req->result = ret;
5463 req_set_fail(req);
Pavel Begunkovdd221f462020-10-18 10:17:42 +01005464 }
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005465
5466 io_poll_remove_entries(req);
5467 spin_lock(&ctx->completion_lock);
5468 hash_del(&req->hash_node);
5469 spin_unlock(&ctx->completion_lock);
5470 io_req_complete_post(req, req->result, 0);
Jens Axboe18bceab2020-05-15 11:56:54 -06005471}
5472
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005473static void io_apoll_task_func(struct io_kiocb *req, bool *locked)
5474{
5475 struct io_ring_ctx *ctx = req->ctx;
5476 int ret;
5477
5478 ret = io_poll_check_events(req);
5479 if (ret > 0)
5480 return;
5481
5482 io_poll_remove_entries(req);
5483 spin_lock(&ctx->completion_lock);
5484 hash_del(&req->hash_node);
5485 spin_unlock(&ctx->completion_lock);
5486
5487 if (!ret)
5488 io_req_task_submit(req, locked);
5489 else
5490 io_req_complete_failed(req, ret);
5491}
5492
5493static void __io_poll_execute(struct io_kiocb *req, int mask)
5494{
5495 req->result = mask;
5496 if (req->opcode == IORING_OP_POLL_ADD)
5497 req->io_task_work.func = io_poll_task_func;
5498 else
5499 req->io_task_work.func = io_apoll_task_func;
5500
5501 trace_io_uring_task_add(req->ctx, req->opcode, req->user_data, mask);
5502 io_req_task_work_add(req);
5503}
5504
5505static inline void io_poll_execute(struct io_kiocb *req, int res)
5506{
5507 if (io_poll_get_ownership(req))
5508 __io_poll_execute(req, res);
5509}
5510
5511static void io_poll_cancel_req(struct io_kiocb *req)
5512{
5513 io_poll_mark_cancelled(req);
5514 /* kick tw, which should complete the request */
5515 io_poll_execute(req, 0);
5516}
5517
5518static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
5519 void *key)
Jens Axboe18bceab2020-05-15 11:56:54 -06005520{
5521 struct io_kiocb *req = wait->private;
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005522 struct io_poll_iocb *poll = container_of(wait, struct io_poll_iocb,
5523 wait);
Jens Axboe18bceab2020-05-15 11:56:54 -06005524 __poll_t mask = key_to_poll(key);
5525
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005526 /* for instances that support it check for an event match first */
Jens Axboe18bceab2020-05-15 11:56:54 -06005527 if (mask && !(mask & poll->events))
5528 return 0;
5529
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005530 if (io_poll_get_ownership(req))
5531 __io_poll_execute(req, mask);
Jens Axboe18bceab2020-05-15 11:56:54 -06005532 return 1;
5533}
5534
Jens Axboe18bceab2020-05-15 11:56:54 -06005535static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt,
Jens Axboe807abcb2020-07-17 17:09:27 -06005536 struct wait_queue_head *head,
5537 struct io_poll_iocb **poll_ptr)
Jens Axboe18bceab2020-05-15 11:56:54 -06005538{
5539 struct io_kiocb *req = pt->req;
5540
5541 /*
Pavel Begunkov68b11e82021-07-20 10:50:43 +01005542 * The file being polled uses multiple waitqueues for poll handling
5543 * (e.g. one for read, one for write). Setup a separate io_poll_iocb
5544 * if this happens.
Jens Axboe18bceab2020-05-15 11:56:54 -06005545 */
Pavel Begunkov68b11e82021-07-20 10:50:43 +01005546 if (unlikely(pt->nr_entries)) {
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005547 struct io_poll_iocb *first = poll;
Pavel Begunkov58852d42020-10-16 20:55:56 +01005548
Pavel Begunkov23a65db2021-08-17 20:28:11 +01005549 /* double add on the same waitqueue head, ignore */
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005550 if (first->head == head)
Pavel Begunkov23a65db2021-08-17 20:28:11 +01005551 return;
Jens Axboe18bceab2020-05-15 11:56:54 -06005552 /* already have a 2nd entry, fail a third attempt */
Jens Axboe807abcb2020-07-17 17:09:27 -06005553 if (*poll_ptr) {
Pavel Begunkov23a65db2021-08-17 20:28:11 +01005554 if ((*poll_ptr)->head == head)
5555 return;
Jens Axboe18bceab2020-05-15 11:56:54 -06005556 pt->error = -EINVAL;
5557 return;
5558 }
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005559
Jens Axboe18bceab2020-05-15 11:56:54 -06005560 poll = kmalloc(sizeof(*poll), GFP_ATOMIC);
5561 if (!poll) {
5562 pt->error = -ENOMEM;
5563 return;
5564 }
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005565 io_init_poll_iocb(poll, first->events, first->wait.func);
Jens Axboe807abcb2020-07-17 17:09:27 -06005566 *poll_ptr = poll;
Jens Axboe18bceab2020-05-15 11:56:54 -06005567 }
5568
Pavel Begunkov68b11e82021-07-20 10:50:43 +01005569 pt->nr_entries++;
Jens Axboe18bceab2020-05-15 11:56:54 -06005570 poll->head = head;
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005571 poll->wait.private = req;
Jiufei Xuea31eb4a2020-06-17 17:53:56 +08005572
5573 if (poll->events & EPOLLEXCLUSIVE)
5574 add_wait_queue_exclusive(head, &poll->wait);
5575 else
5576 add_wait_queue(head, &poll->wait);
Jens Axboe18bceab2020-05-15 11:56:54 -06005577}
5578
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005579static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head,
5580 struct poll_table_struct *p)
5581{
5582 struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
5583
5584 __io_queue_proc(&pt->req->poll, pt, head,
5585 (struct io_poll_iocb **) &pt->req->async_data);
5586}
5587
5588static int __io_arm_poll_handler(struct io_kiocb *req,
5589 struct io_poll_iocb *poll,
5590 struct io_poll_table *ipt, __poll_t mask)
5591{
5592 struct io_ring_ctx *ctx = req->ctx;
5593 int v;
5594
5595 INIT_HLIST_NODE(&req->hash_node);
5596 io_init_poll_iocb(poll, mask, io_poll_wake);
5597 poll->file = req->file;
5598 poll->wait.private = req;
5599
5600 ipt->pt._key = mask;
5601 ipt->req = req;
5602 ipt->error = 0;
5603 ipt->nr_entries = 0;
5604
5605 /*
5606 * Take the ownership to delay any tw execution up until we're done
5607 * with poll arming. see io_poll_get_ownership().
5608 */
5609 atomic_set(&req->poll_refs, 1);
5610 mask = vfs_poll(req->file, &ipt->pt) & poll->events;
5611
5612 if (mask && (poll->events & EPOLLONESHOT)) {
5613 io_poll_remove_entries(req);
5614 /* no one else has access to the req, forget about the ref */
5615 return mask;
5616 }
5617 if (!mask && unlikely(ipt->error || !ipt->nr_entries)) {
5618 io_poll_remove_entries(req);
5619 if (!ipt->error)
5620 ipt->error = -EINVAL;
5621 return 0;
5622 }
5623
5624 spin_lock(&ctx->completion_lock);
5625 io_poll_req_insert(req);
5626 spin_unlock(&ctx->completion_lock);
5627
5628 if (mask) {
5629 /* can't multishot if failed, just queue the event we've got */
5630 if (unlikely(ipt->error || !ipt->nr_entries))
5631 poll->events |= EPOLLONESHOT;
5632 __io_poll_execute(req, mask);
5633 return 0;
5634 }
5635
5636 /*
5637 * Release ownership. If someone tried to queue a tw while it was
5638 * locked, kick it off for them.
5639 */
5640 v = atomic_dec_return(&req->poll_refs);
5641 if (unlikely(v & IO_POLL_REF_MASK))
5642 __io_poll_execute(req, 0);
5643 return 0;
5644}
5645
Jens Axboe18bceab2020-05-15 11:56:54 -06005646static void io_async_queue_proc(struct file *file, struct wait_queue_head *head,
5647 struct poll_table_struct *p)
5648{
5649 struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
Jens Axboe807abcb2020-07-17 17:09:27 -06005650 struct async_poll *apoll = pt->req->apoll;
Jens Axboe18bceab2020-05-15 11:56:54 -06005651
Jens Axboe807abcb2020-07-17 17:09:27 -06005652 __io_queue_proc(&apoll->poll, pt, head, &apoll->double_poll);
Jens Axboe18bceab2020-05-15 11:56:54 -06005653}
5654
Olivier Langlois59b735a2021-06-22 05:17:39 -07005655enum {
5656 IO_APOLL_OK,
5657 IO_APOLL_ABORTED,
5658 IO_APOLL_READY
5659};
5660
5661static int io_arm_poll_handler(struct io_kiocb *req)
Jens Axboed7718a92020-02-14 22:23:12 -07005662{
5663 const struct io_op_def *def = &io_op_defs[req->opcode];
5664 struct io_ring_ctx *ctx = req->ctx;
5665 struct async_poll *apoll;
5666 struct io_poll_table ipt;
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005667 __poll_t mask = EPOLLONESHOT | POLLERR | POLLPRI;
5668 int ret;
Jens Axboed7718a92020-02-14 22:23:12 -07005669
5670 if (!req->file || !file_can_poll(req->file))
Olivier Langlois59b735a2021-06-22 05:17:39 -07005671 return IO_APOLL_ABORTED;
Pavel Begunkov24c74672020-06-21 13:09:51 +03005672 if (req->flags & REQ_F_POLLED)
Olivier Langlois59b735a2021-06-22 05:17:39 -07005673 return IO_APOLL_ABORTED;
Pavel Begunkovb2d9c3d2021-06-26 21:40:44 +01005674 if (!def->pollin && !def->pollout)
Olivier Langlois59b735a2021-06-22 05:17:39 -07005675 return IO_APOLL_ABORTED;
Pavel Begunkovb2d9c3d2021-06-26 21:40:44 +01005676
5677 if (def->pollin) {
Pavel Begunkovb2d9c3d2021-06-26 21:40:44 +01005678 mask |= POLLIN | POLLRDNORM;
5679
5680 /* If reading from MSG_ERRQUEUE using recvmsg, ignore POLLIN */
5681 if ((req->opcode == IORING_OP_RECVMSG) &&
5682 (req->sr_msg.msg_flags & MSG_ERRQUEUE))
5683 mask &= ~POLLIN;
5684 } else {
Pavel Begunkovb2d9c3d2021-06-26 21:40:44 +01005685 mask |= POLLOUT | POLLWRNORM;
5686 }
5687
Jens Axboed7718a92020-02-14 22:23:12 -07005688 apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC);
5689 if (unlikely(!apoll))
Olivier Langlois59b735a2021-06-22 05:17:39 -07005690 return IO_APOLL_ABORTED;
Jens Axboe807abcb2020-07-17 17:09:27 -06005691 apoll->double_poll = NULL;
Jens Axboed7718a92020-02-14 22:23:12 -07005692 req->apoll = apoll;
Pavel Begunkovb2d9c3d2021-06-26 21:40:44 +01005693 req->flags |= REQ_F_POLLED;
Jens Axboed7718a92020-02-14 22:23:12 -07005694 ipt.pt._qproc = io_async_queue_proc;
5695
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005696 ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask);
Hao Xu41a51692021-08-12 15:47:02 +08005697 if (ret || ipt.error)
5698 return ret ? IO_APOLL_READY : IO_APOLL_ABORTED;
5699
Olivier Langlois236daeae2021-05-31 02:36:37 -04005700 trace_io_uring_poll_arm(ctx, req, req->opcode, req->user_data,
5701 mask, apoll->poll.events);
Olivier Langlois59b735a2021-06-22 05:17:39 -07005702 return IO_APOLL_OK;
Jens Axboed7718a92020-02-14 22:23:12 -07005703}
5704
Jens Axboe76e1b642020-09-26 15:05:03 -06005705/*
5706 * Returns true if we found and killed one or more poll requests
5707 */
Pavel Begunkov6b819282020-11-06 13:00:25 +00005708static bool io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk,
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01005709 bool cancel_all)
Jens Axboe221c5eb2019-01-17 09:41:58 -07005710{
Jens Axboe78076bb2019-12-04 19:56:40 -07005711 struct hlist_node *tmp;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005712 struct io_kiocb *req;
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005713 bool found = false;
5714 int i;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005715
Jens Axboe79ebeae2021-08-10 15:18:27 -06005716 spin_lock(&ctx->completion_lock);
Jens Axboe78076bb2019-12-04 19:56:40 -07005717 for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
5718 struct hlist_head *list;
5719
5720 list = &ctx->cancel_hash[i];
Jens Axboef3606e32020-09-22 08:18:24 -06005721 hlist_for_each_entry_safe(req, tmp, list, hash_node) {
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005722 if (io_match_task_safe(req, tsk, cancel_all)) {
Jens Axboe7524ec52022-08-29 14:30:20 +01005723 hlist_del_init(&req->hash_node);
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005724 io_poll_cancel_req(req);
5725 found = true;
5726 }
Jens Axboef3606e32020-09-22 08:18:24 -06005727 }
Jens Axboe221c5eb2019-01-17 09:41:58 -07005728 }
Jens Axboe79ebeae2021-08-10 15:18:27 -06005729 spin_unlock(&ctx->completion_lock);
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005730 return found;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005731}
5732
Pavel Begunkov9ba5fac2021-04-14 13:38:35 +01005733static struct io_kiocb *io_poll_find(struct io_ring_ctx *ctx, __u64 sqe_addr,
5734 bool poll_only)
Pavel Begunkove07785b2021-04-01 15:43:57 +01005735 __must_hold(&ctx->completion_lock)
Jens Axboe47f46762019-11-09 17:43:02 -07005736{
Jens Axboe78076bb2019-12-04 19:56:40 -07005737 struct hlist_head *list;
Jens Axboe47f46762019-11-09 17:43:02 -07005738 struct io_kiocb *req;
5739
Jens Axboe78076bb2019-12-04 19:56:40 -07005740 list = &ctx->cancel_hash[hash_long(sqe_addr, ctx->cancel_hash_bits)];
5741 hlist_for_each_entry(req, list, hash_node) {
Jens Axboeb41e9852020-02-17 09:52:41 -07005742 if (sqe_addr != req->user_data)
5743 continue;
Pavel Begunkov9ba5fac2021-04-14 13:38:35 +01005744 if (poll_only && req->opcode != IORING_OP_POLL_ADD)
5745 continue;
Jens Axboeb2cb8052021-03-17 08:17:19 -06005746 return req;
Jens Axboe47f46762019-11-09 17:43:02 -07005747 }
Jens Axboeb2cb8052021-03-17 08:17:19 -06005748 return NULL;
Jens Axboe47f46762019-11-09 17:43:02 -07005749}
5750
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005751static bool io_poll_disarm(struct io_kiocb *req)
5752 __must_hold(&ctx->completion_lock)
5753{
5754 if (!io_poll_get_ownership(req))
5755 return false;
5756 io_poll_remove_entries(req);
5757 hash_del(&req->hash_node);
5758 return true;
5759}
5760
Pavel Begunkov9ba5fac2021-04-14 13:38:35 +01005761static int io_poll_cancel(struct io_ring_ctx *ctx, __u64 sqe_addr,
5762 bool poll_only)
Pavel Begunkove07785b2021-04-01 15:43:57 +01005763 __must_hold(&ctx->completion_lock)
Jens Axboeb2cb8052021-03-17 08:17:19 -06005764{
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005765 struct io_kiocb *req = io_poll_find(ctx, sqe_addr, poll_only);
Jens Axboeb2cb8052021-03-17 08:17:19 -06005766
Jens Axboeb2cb8052021-03-17 08:17:19 -06005767 if (!req)
5768 return -ENOENT;
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005769 io_poll_cancel_req(req);
5770 return 0;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005771}
5772
Pavel Begunkov9096af32021-04-14 13:38:36 +01005773static __poll_t io_poll_parse_events(const struct io_uring_sqe *sqe,
5774 unsigned int flags)
5775{
5776 u32 events;
5777
5778 events = READ_ONCE(sqe->poll32_events);
5779#ifdef __BIG_ENDIAN
5780 events = swahw32(events);
5781#endif
5782 if (!(flags & IORING_POLL_ADD_MULTI))
5783 events |= EPOLLONESHOT;
5784 return demangle_poll(events) | (events & (EPOLLEXCLUSIVE|EPOLLONESHOT));
5785}
5786
Pavel Begunkovc5de0032021-04-14 13:38:37 +01005787static int io_poll_update_prep(struct io_kiocb *req,
Jens Axboe3529d8c2019-12-19 18:24:38 -07005788 const struct io_uring_sqe *sqe)
Jens Axboe221c5eb2019-01-17 09:41:58 -07005789{
Pavel Begunkovc5de0032021-04-14 13:38:37 +01005790 struct io_poll_update *upd = &req->poll_update;
5791 u32 flags;
5792
Jens Axboe221c5eb2019-01-17 09:41:58 -07005793 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
5794 return -EINVAL;
Pavel Begunkov26578cd2021-08-20 10:36:37 +01005795 if (sqe->ioprio || sqe->buf_index || sqe->splice_fd_in)
Pavel Begunkovc5de0032021-04-14 13:38:37 +01005796 return -EINVAL;
5797 flags = READ_ONCE(sqe->len);
5798 if (flags & ~(IORING_POLL_UPDATE_EVENTS | IORING_POLL_UPDATE_USER_DATA |
5799 IORING_POLL_ADD_MULTI))
5800 return -EINVAL;
5801 /* meaningless without update */
5802 if (flags == IORING_POLL_ADD_MULTI)
Jens Axboe221c5eb2019-01-17 09:41:58 -07005803 return -EINVAL;
5804
Pavel Begunkovc5de0032021-04-14 13:38:37 +01005805 upd->old_user_data = READ_ONCE(sqe->addr);
5806 upd->update_events = flags & IORING_POLL_UPDATE_EVENTS;
5807 upd->update_user_data = flags & IORING_POLL_UPDATE_USER_DATA;
Jens Axboe0969e782019-12-17 18:40:57 -07005808
Pavel Begunkovc5de0032021-04-14 13:38:37 +01005809 upd->new_user_data = READ_ONCE(sqe->off);
5810 if (!upd->update_user_data && upd->new_user_data)
5811 return -EINVAL;
5812 if (upd->update_events)
5813 upd->events = io_poll_parse_events(sqe, flags);
5814 else if (sqe->poll32_events)
5815 return -EINVAL;
Jens Axboe0969e782019-12-17 18:40:57 -07005816
Jens Axboe221c5eb2019-01-17 09:41:58 -07005817 return 0;
5818}
5819
Jens Axboe3529d8c2019-12-19 18:24:38 -07005820static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboe221c5eb2019-01-17 09:41:58 -07005821{
5822 struct io_poll_iocb *poll = &req->poll;
Pavel Begunkovc5de0032021-04-14 13:38:37 +01005823 u32 flags;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005824
5825 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
5826 return -EINVAL;
Pavel Begunkovc5de0032021-04-14 13:38:37 +01005827 if (sqe->ioprio || sqe->buf_index || sqe->off || sqe->addr)
Jens Axboe88e41cf2021-02-22 22:08:01 -07005828 return -EINVAL;
5829 flags = READ_ONCE(sqe->len);
Pavel Begunkovc5de0032021-04-14 13:38:37 +01005830 if (flags & ~IORING_POLL_ADD_MULTI)
Jens Axboe221c5eb2019-01-17 09:41:58 -07005831 return -EINVAL;
5832
Pavel Begunkov48dcd382021-08-15 10:40:18 +01005833 io_req_set_refcount(req);
Pavel Begunkovc5de0032021-04-14 13:38:37 +01005834 poll->events = io_poll_parse_events(sqe, flags);
Jens Axboe0969e782019-12-17 18:40:57 -07005835 return 0;
5836}
5837
Pavel Begunkov61e98202021-02-10 00:03:08 +00005838static int io_poll_add(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe0969e782019-12-17 18:40:57 -07005839{
5840 struct io_poll_iocb *poll = &req->poll;
Jens Axboe0969e782019-12-17 18:40:57 -07005841 struct io_poll_table ipt;
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005842 int ret;
Jens Axboe0969e782019-12-17 18:40:57 -07005843
Jens Axboed7718a92020-02-14 22:23:12 -07005844 ipt.pt._qproc = io_poll_queue_proc;
Jens Axboe36703242019-07-25 10:20:18 -06005845
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005846 ret = __io_arm_poll_handler(req, &req->poll, &ipt, poll->events);
Pavel Begunkov6c7259c2022-08-29 14:30:22 +01005847 if (!ret && ipt.error)
5848 req_set_fail(req);
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005849 ret = ret ?: ipt.error;
5850 if (ret)
5851 __io_req_complete(req, issue_flags, ret, 0);
5852 return 0;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005853}
5854
Pavel Begunkovc5de0032021-04-14 13:38:37 +01005855static int io_poll_update(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboeb69de282021-03-17 08:37:41 -06005856{
5857 struct io_ring_ctx *ctx = req->ctx;
5858 struct io_kiocb *preq;
Pavel Begunkov040e58f2022-08-29 14:30:14 +01005859 int ret2, ret = 0;
Jens Axboeb69de282021-03-17 08:37:41 -06005860
Jens Axboe79ebeae2021-08-10 15:18:27 -06005861 spin_lock(&ctx->completion_lock);
Pavel Begunkov9ba5fac2021-04-14 13:38:35 +01005862 preq = io_poll_find(ctx, req->poll_update.old_user_data, true);
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005863 if (!preq || !io_poll_disarm(preq)) {
Pavel Begunkov040e58f2022-08-29 14:30:14 +01005864 spin_unlock(&ctx->completion_lock);
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005865 ret = preq ? -EALREADY : -ENOENT;
Pavel Begunkov040e58f2022-08-29 14:30:14 +01005866 goto out;
Jens Axboeb69de282021-03-17 08:37:41 -06005867 }
Jens Axboe79ebeae2021-08-10 15:18:27 -06005868 spin_unlock(&ctx->completion_lock);
Jens Axboecb3b200e2021-04-06 09:49:31 -06005869
Pavel Begunkov040e58f2022-08-29 14:30:14 +01005870 if (req->poll_update.update_events || req->poll_update.update_user_data) {
5871 /* only mask one event flags, keep behavior flags */
5872 if (req->poll_update.update_events) {
5873 preq->poll.events &= ~0xffff;
5874 preq->poll.events |= req->poll_update.events & 0xffff;
5875 preq->poll.events |= IO_POLL_UNMASK;
5876 }
5877 if (req->poll_update.update_user_data)
5878 preq->user_data = req->poll_update.new_user_data;
5879
5880 ret2 = io_poll_add(preq, issue_flags);
5881 /* successfully updated, don't complete poll request */
5882 if (!ret2)
5883 goto out;
5884 }
5885 req_set_fail(preq);
5886 io_req_complete(preq, -ECANCELED);
5887out:
5888 if (ret < 0)
5889 req_set_fail(req);
Jens Axboeb69de282021-03-17 08:37:41 -06005890 /* complete update request, we're done with it */
5891 io_req_complete(req, ret);
Jens Axboeb69de282021-03-17 08:37:41 -06005892 return 0;
5893}
5894
Pavel Begunkovf237c302021-08-18 12:42:46 +01005895static void io_req_task_timeout(struct io_kiocb *req, bool *locked)
Jens Axboe89850fc2021-08-10 15:11:51 -06005896{
Jens Axboe89850fc2021-08-10 15:11:51 -06005897 req_set_fail(req);
Pavel Begunkov505657b2021-08-17 20:28:09 +01005898 io_req_complete_post(req, -ETIME, 0);
Jens Axboe89850fc2021-08-10 15:11:51 -06005899}
5900
Jens Axboe5262f562019-09-17 12:26:57 -06005901static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
5902{
Jens Axboead8a48a2019-11-15 08:49:11 -07005903 struct io_timeout_data *data = container_of(timer,
5904 struct io_timeout_data, timer);
5905 struct io_kiocb *req = data->req;
5906 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe5262f562019-09-17 12:26:57 -06005907 unsigned long flags;
5908
Jens Axboe89850fc2021-08-10 15:11:51 -06005909 spin_lock_irqsave(&ctx->timeout_lock, flags);
Pavel Begunkova71976f2020-10-10 18:34:11 +01005910 list_del_init(&req->timeout.list);
Pavel Begunkov01cec8c2020-07-30 18:43:50 +03005911 atomic_set(&req->ctx->cq_timeouts,
5912 atomic_read(&req->ctx->cq_timeouts) + 1);
Jens Axboe89850fc2021-08-10 15:11:51 -06005913 spin_unlock_irqrestore(&ctx->timeout_lock, flags);
Pavel Begunkov01cec8c2020-07-30 18:43:50 +03005914
Jens Axboe89850fc2021-08-10 15:11:51 -06005915 req->io_task_work.func = io_req_task_timeout;
5916 io_req_task_work_add(req);
Jens Axboe5262f562019-09-17 12:26:57 -06005917 return HRTIMER_NORESTART;
5918}
5919
Pavel Begunkovfbd15842020-11-30 19:11:15 +00005920static struct io_kiocb *io_timeout_extract(struct io_ring_ctx *ctx,
5921 __u64 user_data)
Jens Axboe89850fc2021-08-10 15:11:51 -06005922 __must_hold(&ctx->timeout_lock)
Jens Axboe47f46762019-11-09 17:43:02 -07005923{
Pavel Begunkovfbd15842020-11-30 19:11:15 +00005924 struct io_timeout_data *io;
Jens Axboef254ac02020-08-12 17:33:30 -06005925 struct io_kiocb *req;
Pavel Begunkovfd9c7bc2021-04-13 02:58:42 +01005926 bool found = false;
Jens Axboef254ac02020-08-12 17:33:30 -06005927
5928 list_for_each_entry(req, &ctx->timeout_list, timeout.list) {
Pavel Begunkovfd9c7bc2021-04-13 02:58:42 +01005929 found = user_data == req->user_data;
5930 if (found)
Jens Axboef254ac02020-08-12 17:33:30 -06005931 break;
Jens Axboef254ac02020-08-12 17:33:30 -06005932 }
Pavel Begunkovfd9c7bc2021-04-13 02:58:42 +01005933 if (!found)
5934 return ERR_PTR(-ENOENT);
Jens Axboef254ac02020-08-12 17:33:30 -06005935
Pavel Begunkovfbd15842020-11-30 19:11:15 +00005936 io = req->async_data;
Pavel Begunkovfd9c7bc2021-04-13 02:58:42 +01005937 if (hrtimer_try_to_cancel(&io->timer) == -1)
Pavel Begunkovfbd15842020-11-30 19:11:15 +00005938 return ERR_PTR(-EALREADY);
5939 list_del_init(&req->timeout.list);
5940 return req;
5941}
5942
5943static int io_timeout_cancel(struct io_ring_ctx *ctx, __u64 user_data)
Pavel Begunkovec3c3d02021-08-18 10:50:52 +01005944 __must_hold(&ctx->completion_lock)
Jens Axboe89850fc2021-08-10 15:11:51 -06005945 __must_hold(&ctx->timeout_lock)
Pavel Begunkovfbd15842020-11-30 19:11:15 +00005946{
5947 struct io_kiocb *req = io_timeout_extract(ctx, user_data);
5948
5949 if (IS_ERR(req))
5950 return PTR_ERR(req);
5951
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01005952 req_set_fail(req);
Pavel Begunkovb850d6d2022-08-29 14:30:13 +01005953 io_fill_cqe_req(req, -ECANCELED, 0);
Pavel Begunkov91c2f692021-08-11 19:28:28 +01005954 io_put_req_deferred(req);
Pavel Begunkovfbd15842020-11-30 19:11:15 +00005955 return 0;
Jens Axboef254ac02020-08-12 17:33:30 -06005956}
5957
Jens Axboe50c1df22021-08-27 17:11:06 -06005958static clockid_t io_timeout_get_clock(struct io_timeout_data *data)
5959{
5960 switch (data->flags & IORING_TIMEOUT_CLOCK_MASK) {
5961 case IORING_TIMEOUT_BOOTTIME:
5962 return CLOCK_BOOTTIME;
5963 case IORING_TIMEOUT_REALTIME:
5964 return CLOCK_REALTIME;
5965 default:
5966 /* can't happen, vetted at prep time */
5967 WARN_ON_ONCE(1);
5968 fallthrough;
5969 case 0:
5970 return CLOCK_MONOTONIC;
5971 }
5972}
5973
Pavel Begunkovf1042b62021-08-28 19:54:39 -06005974static int io_linked_timeout_update(struct io_ring_ctx *ctx, __u64 user_data,
5975 struct timespec64 *ts, enum hrtimer_mode mode)
5976 __must_hold(&ctx->timeout_lock)
5977{
5978 struct io_timeout_data *io;
5979 struct io_kiocb *req;
5980 bool found = false;
5981
5982 list_for_each_entry(req, &ctx->ltimeout_list, timeout.list) {
5983 found = user_data == req->user_data;
5984 if (found)
5985 break;
5986 }
5987 if (!found)
5988 return -ENOENT;
5989
5990 io = req->async_data;
5991 if (hrtimer_try_to_cancel(&io->timer) == -1)
5992 return -EALREADY;
5993 hrtimer_init(&io->timer, io_timeout_get_clock(io), mode);
5994 io->timer.function = io_link_timeout_fn;
5995 hrtimer_start(&io->timer, timespec64_to_ktime(*ts), mode);
5996 return 0;
5997}
5998
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +00005999static int io_timeout_update(struct io_ring_ctx *ctx, __u64 user_data,
6000 struct timespec64 *ts, enum hrtimer_mode mode)
Jens Axboe89850fc2021-08-10 15:11:51 -06006001 __must_hold(&ctx->timeout_lock)
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +00006002{
6003 struct io_kiocb *req = io_timeout_extract(ctx, user_data);
6004 struct io_timeout_data *data;
6005
6006 if (IS_ERR(req))
6007 return PTR_ERR(req);
6008
6009 req->timeout.off = 0; /* noseq */
6010 data = req->async_data;
6011 list_add_tail(&req->timeout.list, &ctx->timeout_list);
Jens Axboe50c1df22021-08-27 17:11:06 -06006012 hrtimer_init(&data->timer, io_timeout_get_clock(data), mode);
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +00006013 data->timer.function = io_timeout_fn;
6014 hrtimer_start(&data->timer, timespec64_to_ktime(*ts), mode);
6015 return 0;
Jens Axboe47f46762019-11-09 17:43:02 -07006016}
6017
Jens Axboe3529d8c2019-12-19 18:24:38 -07006018static int io_timeout_remove_prep(struct io_kiocb *req,
6019 const struct io_uring_sqe *sqe)
Jens Axboeb29472e2019-12-17 18:50:29 -07006020{
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +00006021 struct io_timeout_rem *tr = &req->timeout_rem;
6022
Jens Axboeb29472e2019-12-17 18:50:29 -07006023 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
6024 return -EINVAL;
Daniele Albano61710e42020-07-18 14:15:16 -06006025 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
6026 return -EINVAL;
Pavel Begunkov26578cd2021-08-20 10:36:37 +01006027 if (sqe->ioprio || sqe->buf_index || sqe->len || sqe->splice_fd_in)
Jens Axboeb29472e2019-12-17 18:50:29 -07006028 return -EINVAL;
6029
Pavel Begunkovf1042b62021-08-28 19:54:39 -06006030 tr->ltimeout = false;
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +00006031 tr->addr = READ_ONCE(sqe->addr);
6032 tr->flags = READ_ONCE(sqe->timeout_flags);
Pavel Begunkovf1042b62021-08-28 19:54:39 -06006033 if (tr->flags & IORING_TIMEOUT_UPDATE_MASK) {
6034 if (hweight32(tr->flags & IORING_TIMEOUT_CLOCK_MASK) > 1)
6035 return -EINVAL;
6036 if (tr->flags & IORING_LINK_TIMEOUT_UPDATE)
6037 tr->ltimeout = true;
6038 if (tr->flags & ~(IORING_TIMEOUT_UPDATE_MASK|IORING_TIMEOUT_ABS))
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +00006039 return -EINVAL;
6040 if (get_timespec64(&tr->ts, u64_to_user_ptr(sqe->addr2)))
6041 return -EFAULT;
6042 } else if (tr->flags) {
6043 /* timeout removal doesn't support flags */
6044 return -EINVAL;
6045 }
6046
Jens Axboeb29472e2019-12-17 18:50:29 -07006047 return 0;
6048}
6049
Pavel Begunkov8662dae2021-01-19 13:32:44 +00006050static inline enum hrtimer_mode io_translate_timeout_mode(unsigned int flags)
6051{
6052 return (flags & IORING_TIMEOUT_ABS) ? HRTIMER_MODE_ABS
6053 : HRTIMER_MODE_REL;
6054}
6055
Jens Axboe11365042019-10-16 09:08:32 -06006056/*
6057 * Remove or update an existing timeout command
6058 */
Pavel Begunkov61e98202021-02-10 00:03:08 +00006059static int io_timeout_remove(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe11365042019-10-16 09:08:32 -06006060{
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +00006061 struct io_timeout_rem *tr = &req->timeout_rem;
Jens Axboe11365042019-10-16 09:08:32 -06006062 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe47f46762019-11-09 17:43:02 -07006063 int ret;
Jens Axboe11365042019-10-16 09:08:32 -06006064
Pavel Begunkovec3c3d02021-08-18 10:50:52 +01006065 if (!(req->timeout_rem.flags & IORING_TIMEOUT_UPDATE)) {
6066 spin_lock(&ctx->completion_lock);
6067 spin_lock_irq(&ctx->timeout_lock);
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +00006068 ret = io_timeout_cancel(ctx, tr->addr);
Pavel Begunkovec3c3d02021-08-18 10:50:52 +01006069 spin_unlock_irq(&ctx->timeout_lock);
6070 spin_unlock(&ctx->completion_lock);
6071 } else {
Pavel Begunkovf1042b62021-08-28 19:54:39 -06006072 enum hrtimer_mode mode = io_translate_timeout_mode(tr->flags);
6073
Pavel Begunkovec3c3d02021-08-18 10:50:52 +01006074 spin_lock_irq(&ctx->timeout_lock);
Pavel Begunkovf1042b62021-08-28 19:54:39 -06006075 if (tr->ltimeout)
6076 ret = io_linked_timeout_update(ctx, tr->addr, &tr->ts, mode);
6077 else
6078 ret = io_timeout_update(ctx, tr->addr, &tr->ts, mode);
Pavel Begunkovec3c3d02021-08-18 10:50:52 +01006079 spin_unlock_irq(&ctx->timeout_lock);
6080 }
Jens Axboe11365042019-10-16 09:08:32 -06006081
Jens Axboe4e88d6e2019-12-07 20:59:47 -07006082 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01006083 req_set_fail(req);
Pavel Begunkov505657b2021-08-17 20:28:09 +01006084 io_req_complete_post(req, ret, 0);
Jens Axboe11365042019-10-16 09:08:32 -06006085 return 0;
Jens Axboe5262f562019-09-17 12:26:57 -06006086}
6087
Jens Axboe3529d8c2019-12-19 18:24:38 -07006088static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
Jens Axboe2d283902019-12-04 11:08:05 -07006089 bool is_timeout_link)
Jens Axboe5262f562019-09-17 12:26:57 -06006090{
Jens Axboead8a48a2019-11-15 08:49:11 -07006091 struct io_timeout_data *data;
Jens Axboea41525a2019-10-15 16:48:15 -06006092 unsigned flags;
Pavel Begunkov56080b02020-05-26 20:34:04 +03006093 u32 off = READ_ONCE(sqe->off);
Jens Axboe5262f562019-09-17 12:26:57 -06006094
Jens Axboead8a48a2019-11-15 08:49:11 -07006095 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboe5262f562019-09-17 12:26:57 -06006096 return -EINVAL;
Pavel Begunkov26578cd2021-08-20 10:36:37 +01006097 if (sqe->ioprio || sqe->buf_index || sqe->len != 1 ||
6098 sqe->splice_fd_in)
Jens Axboea41525a2019-10-15 16:48:15 -06006099 return -EINVAL;
Pavel Begunkov56080b02020-05-26 20:34:04 +03006100 if (off && is_timeout_link)
Jens Axboe2d283902019-12-04 11:08:05 -07006101 return -EINVAL;
Jens Axboea41525a2019-10-15 16:48:15 -06006102 flags = READ_ONCE(sqe->timeout_flags);
Jens Axboe50c1df22021-08-27 17:11:06 -06006103 if (flags & ~(IORING_TIMEOUT_ABS | IORING_TIMEOUT_CLOCK_MASK))
6104 return -EINVAL;
6105 /* more than one clock specified is invalid, obviously */
6106 if (hweight32(flags & IORING_TIMEOUT_CLOCK_MASK) > 1)
Jens Axboe5262f562019-09-17 12:26:57 -06006107 return -EINVAL;
Arnd Bergmannbdf20072019-10-01 09:53:29 -06006108
Pavel Begunkovef9dd632021-08-28 19:54:38 -06006109 INIT_LIST_HEAD(&req->timeout.list);
Pavel Begunkovbfe68a22020-05-30 14:54:18 +03006110 req->timeout.off = off;
Pavel Begunkovf18ee4c2021-06-14 23:37:25 +01006111 if (unlikely(off && !req->ctx->off_timeout_used))
6112 req->ctx->off_timeout_used = true;
Jens Axboe26a61672019-12-20 09:02:01 -07006113
Jens Axboee8c2bc12020-08-15 18:44:09 -07006114 if (!req->async_data && io_alloc_async_data(req))
Jens Axboe26a61672019-12-20 09:02:01 -07006115 return -ENOMEM;
6116
Jens Axboee8c2bc12020-08-15 18:44:09 -07006117 data = req->async_data;
Jens Axboead8a48a2019-11-15 08:49:11 -07006118 data->req = req;
Jens Axboe50c1df22021-08-27 17:11:06 -06006119 data->flags = flags;
Jens Axboead8a48a2019-11-15 08:49:11 -07006120
6121 if (get_timespec64(&data->ts, u64_to_user_ptr(sqe->addr)))
Jens Axboe5262f562019-09-17 12:26:57 -06006122 return -EFAULT;
6123
Jens Axboeba7261a2022-04-08 11:08:58 -06006124 INIT_LIST_HEAD(&req->timeout.list);
Pavel Begunkov8662dae2021-01-19 13:32:44 +00006125 data->mode = io_translate_timeout_mode(flags);
Jens Axboe50c1df22021-08-27 17:11:06 -06006126 hrtimer_init(&data->timer, io_timeout_get_clock(data), data->mode);
Pavel Begunkovb97e7362021-08-15 10:40:23 +01006127
6128 if (is_timeout_link) {
6129 struct io_submit_link *link = &req->ctx->submit_state.link;
6130
6131 if (!link->head)
6132 return -EINVAL;
6133 if (link->last->opcode == IORING_OP_LINK_TIMEOUT)
6134 return -EINVAL;
Pavel Begunkov4d13d1a2021-08-15 10:40:24 +01006135 req->timeout.head = link->last;
6136 link->last->flags |= REQ_F_ARM_LTIMEOUT;
Pavel Begunkovb97e7362021-08-15 10:40:23 +01006137 }
Jens Axboead8a48a2019-11-15 08:49:11 -07006138 return 0;
6139}
6140
Pavel Begunkov61e98202021-02-10 00:03:08 +00006141static int io_timeout(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboead8a48a2019-11-15 08:49:11 -07006142{
Jens Axboead8a48a2019-11-15 08:49:11 -07006143 struct io_ring_ctx *ctx = req->ctx;
Jens Axboee8c2bc12020-08-15 18:44:09 -07006144 struct io_timeout_data *data = req->async_data;
Jens Axboead8a48a2019-11-15 08:49:11 -07006145 struct list_head *entry;
Pavel Begunkovbfe68a22020-05-30 14:54:18 +03006146 u32 tail, off = req->timeout.off;
Jens Axboead8a48a2019-11-15 08:49:11 -07006147
Jens Axboe89850fc2021-08-10 15:11:51 -06006148 spin_lock_irq(&ctx->timeout_lock);
Jens Axboe93bd25b2019-11-11 23:34:31 -07006149
Jens Axboe5262f562019-09-17 12:26:57 -06006150 /*
6151 * sqe->off holds how many events that need to occur for this
Jens Axboe93bd25b2019-11-11 23:34:31 -07006152 * timeout event to be satisfied. If it isn't set, then this is
6153 * a pure timeout request, sequence isn't used.
Jens Axboe5262f562019-09-17 12:26:57 -06006154 */
Pavel Begunkov8eb7e2d2020-06-29 13:13:02 +03006155 if (io_is_timeout_noseq(req)) {
Jens Axboe93bd25b2019-11-11 23:34:31 -07006156 entry = ctx->timeout_list.prev;
6157 goto add;
6158 }
Jens Axboe5262f562019-09-17 12:26:57 -06006159
Pavel Begunkovbfe68a22020-05-30 14:54:18 +03006160 tail = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
6161 req->timeout.target_seq = tail + off;
Jens Axboe5262f562019-09-17 12:26:57 -06006162
Marcelo Diop-Gonzalezf0105052021-01-15 11:54:40 -05006163 /* Update the last seq here in case io_flush_timeouts() hasn't.
6164 * This is safe because ->completion_lock is held, and submissions
6165 * and completions are never mixed in the same ->completion_lock section.
6166 */
6167 ctx->cq_last_tm_flush = tail;
6168
Jens Axboe5262f562019-09-17 12:26:57 -06006169 /*
6170 * Insertion sort, ensuring the first entry in the list is always
6171 * the one we need first.
6172 */
Jens Axboe5262f562019-09-17 12:26:57 -06006173 list_for_each_prev(entry, &ctx->timeout_list) {
Pavel Begunkov135fcde2020-07-13 23:37:12 +03006174 struct io_kiocb *nxt = list_entry(entry, struct io_kiocb,
6175 timeout.list);
Jens Axboe5262f562019-09-17 12:26:57 -06006176
Pavel Begunkov8eb7e2d2020-06-29 13:13:02 +03006177 if (io_is_timeout_noseq(nxt))
Jens Axboe93bd25b2019-11-11 23:34:31 -07006178 continue;
Pavel Begunkovbfe68a22020-05-30 14:54:18 +03006179 /* nxt.seq is behind @tail, otherwise would've been completed */
6180 if (off >= nxt->timeout.target_seq - tail)
Jens Axboe5262f562019-09-17 12:26:57 -06006181 break;
6182 }
Jens Axboe93bd25b2019-11-11 23:34:31 -07006183add:
Pavel Begunkov135fcde2020-07-13 23:37:12 +03006184 list_add(&req->timeout.list, entry);
Jens Axboead8a48a2019-11-15 08:49:11 -07006185 data->timer.function = io_timeout_fn;
6186 hrtimer_start(&data->timer, timespec64_to_ktime(data->ts), data->mode);
Jens Axboe89850fc2021-08-10 15:11:51 -06006187 spin_unlock_irq(&ctx->timeout_lock);
Jens Axboe5262f562019-09-17 12:26:57 -06006188 return 0;
6189}
6190
Pavel Begunkovf458dd842021-03-08 12:14:14 +00006191struct io_cancel_data {
6192 struct io_ring_ctx *ctx;
6193 u64 user_data;
6194};
6195
Jens Axboe62755e32019-10-28 21:49:21 -06006196static bool io_cancel_cb(struct io_wq_work *work, void *data)
Jens Axboede0617e2019-04-06 21:51:27 -06006197{
Jens Axboe62755e32019-10-28 21:49:21 -06006198 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
Pavel Begunkovf458dd842021-03-08 12:14:14 +00006199 struct io_cancel_data *cd = data;
Jens Axboede0617e2019-04-06 21:51:27 -06006200
Pavel Begunkovf458dd842021-03-08 12:14:14 +00006201 return req->ctx == cd->ctx && req->user_data == cd->user_data;
Jens Axboe62755e32019-10-28 21:49:21 -06006202}
6203
Pavel Begunkovf458dd842021-03-08 12:14:14 +00006204static int io_async_cancel_one(struct io_uring_task *tctx, u64 user_data,
6205 struct io_ring_ctx *ctx)
Jens Axboe62755e32019-10-28 21:49:21 -06006206{
Pavel Begunkovf458dd842021-03-08 12:14:14 +00006207 struct io_cancel_data data = { .ctx = ctx, .user_data = user_data, };
Jens Axboe62755e32019-10-28 21:49:21 -06006208 enum io_wq_cancel cancel_ret;
Jens Axboe62755e32019-10-28 21:49:21 -06006209 int ret = 0;
6210
Pavel Begunkovf458dd842021-03-08 12:14:14 +00006211 if (!tctx || !tctx->io_wq)
Jens Axboe5aa75ed2021-02-16 12:56:50 -07006212 return -ENOENT;
6213
Pavel Begunkovf458dd842021-03-08 12:14:14 +00006214 cancel_ret = io_wq_cancel_cb(tctx->io_wq, io_cancel_cb, &data, false);
Jens Axboe62755e32019-10-28 21:49:21 -06006215 switch (cancel_ret) {
6216 case IO_WQ_CANCEL_OK:
6217 ret = 0;
6218 break;
6219 case IO_WQ_CANCEL_RUNNING:
6220 ret = -EALREADY;
6221 break;
6222 case IO_WQ_CANCEL_NOTFOUND:
6223 ret = -ENOENT;
6224 break;
6225 }
6226
Jens Axboee977d6d2019-11-05 12:39:45 -07006227 return ret;
6228}
6229
Pavel Begunkov8cb01fa2021-08-15 10:40:22 +01006230static int io_try_cancel_userdata(struct io_kiocb *req, u64 sqe_addr)
Jens Axboe47f46762019-11-09 17:43:02 -07006231{
Pavel Begunkov8cb01fa2021-08-15 10:40:22 +01006232 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe47f46762019-11-09 17:43:02 -07006233 int ret;
6234
Pavel Begunkovdadebc32021-08-23 13:30:44 +01006235 WARN_ON_ONCE(!io_wq_current_is_worker() && req->task != current);
Pavel Begunkov8cb01fa2021-08-15 10:40:22 +01006236
Pavel Begunkovf458dd842021-03-08 12:14:14 +00006237 ret = io_async_cancel_one(req->task->io_uring, sqe_addr, ctx);
Pavel Begunkovdf9727a2021-04-01 15:43:59 +01006238 if (ret != -ENOENT)
Pavel Begunkov8cb01fa2021-08-15 10:40:22 +01006239 return ret;
Pavel Begunkov505657b2021-08-17 20:28:09 +01006240
6241 spin_lock(&ctx->completion_lock);
Jens Axboe79ebeae2021-08-10 15:18:27 -06006242 spin_lock_irq(&ctx->timeout_lock);
Jens Axboe47f46762019-11-09 17:43:02 -07006243 ret = io_timeout_cancel(ctx, sqe_addr);
Jens Axboe79ebeae2021-08-10 15:18:27 -06006244 spin_unlock_irq(&ctx->timeout_lock);
Jens Axboe47f46762019-11-09 17:43:02 -07006245 if (ret != -ENOENT)
Pavel Begunkov505657b2021-08-17 20:28:09 +01006246 goto out;
6247 ret = io_poll_cancel(ctx, sqe_addr, false);
6248out:
6249 spin_unlock(&ctx->completion_lock);
6250 return ret;
Jens Axboe47f46762019-11-09 17:43:02 -07006251}
6252
Jens Axboe3529d8c2019-12-19 18:24:38 -07006253static int io_async_cancel_prep(struct io_kiocb *req,
6254 const struct io_uring_sqe *sqe)
Jens Axboee977d6d2019-11-05 12:39:45 -07006255{
Jens Axboefbf23842019-12-17 18:45:56 -07006256 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboee977d6d2019-11-05 12:39:45 -07006257 return -EINVAL;
Daniele Albano61710e42020-07-18 14:15:16 -06006258 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
6259 return -EINVAL;
Pavel Begunkov26578cd2021-08-20 10:36:37 +01006260 if (sqe->ioprio || sqe->off || sqe->len || sqe->cancel_flags ||
6261 sqe->splice_fd_in)
Jens Axboee977d6d2019-11-05 12:39:45 -07006262 return -EINVAL;
6263
Jens Axboefbf23842019-12-17 18:45:56 -07006264 req->cancel.addr = READ_ONCE(sqe->addr);
6265 return 0;
6266}
6267
Pavel Begunkov61e98202021-02-10 00:03:08 +00006268static int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboefbf23842019-12-17 18:45:56 -07006269{
6270 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkov58f99372021-03-12 16:25:55 +00006271 u64 sqe_addr = req->cancel.addr;
6272 struct io_tctx_node *node;
6273 int ret;
Jens Axboefbf23842019-12-17 18:45:56 -07006274
Pavel Begunkov8cb01fa2021-08-15 10:40:22 +01006275 ret = io_try_cancel_userdata(req, sqe_addr);
Pavel Begunkov58f99372021-03-12 16:25:55 +00006276 if (ret != -ENOENT)
6277 goto done;
Pavel Begunkov58f99372021-03-12 16:25:55 +00006278
6279 /* slow path, try all io-wq's */
6280 io_ring_submit_lock(ctx, !(issue_flags & IO_URING_F_NONBLOCK));
6281 ret = -ENOENT;
6282 list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
6283 struct io_uring_task *tctx = node->task->io_uring;
6284
Pavel Begunkov58f99372021-03-12 16:25:55 +00006285 ret = io_async_cancel_one(tctx, req->cancel.addr, ctx);
6286 if (ret != -ENOENT)
6287 break;
6288 }
6289 io_ring_submit_unlock(ctx, !(issue_flags & IO_URING_F_NONBLOCK));
Pavel Begunkov58f99372021-03-12 16:25:55 +00006290done:
Pavel Begunkov58f99372021-03-12 16:25:55 +00006291 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01006292 req_set_fail(req);
Pavel Begunkov505657b2021-08-17 20:28:09 +01006293 io_req_complete_post(req, ret, 0);
Jens Axboe62755e32019-10-28 21:49:21 -06006294 return 0;
6295}
6296
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00006297static int io_rsrc_update_prep(struct io_kiocb *req,
Jens Axboe05f3fb32019-12-09 11:22:50 -07006298 const struct io_uring_sqe *sqe)
6299{
Daniele Albano61710e42020-07-18 14:15:16 -06006300 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
6301 return -EINVAL;
Pavel Begunkov26578cd2021-08-20 10:36:37 +01006302 if (sqe->ioprio || sqe->rw_flags || sqe->splice_fd_in)
Jens Axboe05f3fb32019-12-09 11:22:50 -07006303 return -EINVAL;
6304
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00006305 req->rsrc_update.offset = READ_ONCE(sqe->off);
6306 req->rsrc_update.nr_args = READ_ONCE(sqe->len);
6307 if (!req->rsrc_update.nr_args)
Jens Axboe05f3fb32019-12-09 11:22:50 -07006308 return -EINVAL;
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00006309 req->rsrc_update.arg = READ_ONCE(sqe->addr);
Jens Axboe05f3fb32019-12-09 11:22:50 -07006310 return 0;
6311}
6312
Pavel Begunkov889fca72021-02-10 00:03:09 +00006313static int io_files_update(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe05f3fb32019-12-09 11:22:50 -07006314{
6315 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkovc3bdad02021-04-25 14:32:22 +01006316 struct io_uring_rsrc_update2 up;
Jens Axboe05f3fb32019-12-09 11:22:50 -07006317 int ret;
6318
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00006319 up.offset = req->rsrc_update.offset;
6320 up.data = req->rsrc_update.arg;
Pavel Begunkovc3bdad02021-04-25 14:32:22 +01006321 up.nr = 0;
6322 up.tags = 0;
Colin Ian King615cee42021-04-26 10:47:35 +01006323 up.resv = 0;
Dylan Yudaken7a7c9f92022-04-12 09:30:40 -07006324 up.resv2 = 0;
Jens Axboe05f3fb32019-12-09 11:22:50 -07006325
Jens Axboecdb31c22021-09-24 08:43:54 -06006326 io_ring_submit_lock(ctx, !(issue_flags & IO_URING_F_NONBLOCK));
Pavel Begunkovfdecb662021-04-25 14:32:20 +01006327 ret = __io_register_rsrc_update(ctx, IORING_RSRC_FILE,
Pavel Begunkov98f0b3b2021-04-25 14:32:19 +01006328 &up, req->rsrc_update.nr_args);
Jens Axboecdb31c22021-09-24 08:43:54 -06006329 io_ring_submit_unlock(ctx, !(issue_flags & IO_URING_F_NONBLOCK));
Jens Axboe05f3fb32019-12-09 11:22:50 -07006330
6331 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01006332 req_set_fail(req);
Pavel Begunkov889fca72021-02-10 00:03:09 +00006333 __io_req_complete(req, issue_flags, ret, 0);
Jens Axboe05f3fb32019-12-09 11:22:50 -07006334 return 0;
6335}
6336
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006337static int io_req_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboef67676d2019-12-02 11:03:47 -07006338{
Jens Axboed625c6e2019-12-17 19:53:05 -07006339 switch (req->opcode) {
Jens Axboee7815732019-12-17 19:45:06 -07006340 case IORING_OP_NOP:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006341 return 0;
Jens Axboef67676d2019-12-02 11:03:47 -07006342 case IORING_OP_READV:
6343 case IORING_OP_READ_FIXED:
Jens Axboe3a6820f2019-12-22 15:19:35 -07006344 case IORING_OP_READ:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006345 return io_read_prep(req, sqe);
Jens Axboef67676d2019-12-02 11:03:47 -07006346 case IORING_OP_WRITEV:
6347 case IORING_OP_WRITE_FIXED:
Jens Axboe3a6820f2019-12-22 15:19:35 -07006348 case IORING_OP_WRITE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006349 return io_write_prep(req, sqe);
Jens Axboe0969e782019-12-17 18:40:57 -07006350 case IORING_OP_POLL_ADD:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006351 return io_poll_add_prep(req, sqe);
Jens Axboe0969e782019-12-17 18:40:57 -07006352 case IORING_OP_POLL_REMOVE:
Pavel Begunkovc5de0032021-04-14 13:38:37 +01006353 return io_poll_update_prep(req, sqe);
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07006354 case IORING_OP_FSYNC:
Pavel Begunkov1155c762021-02-18 18:29:38 +00006355 return io_fsync_prep(req, sqe);
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07006356 case IORING_OP_SYNC_FILE_RANGE:
Pavel Begunkov1155c762021-02-18 18:29:38 +00006357 return io_sfr_prep(req, sqe);
Jens Axboe03b12302019-12-02 18:50:25 -07006358 case IORING_OP_SENDMSG:
Jens Axboefddafac2020-01-04 20:19:44 -07006359 case IORING_OP_SEND:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006360 return io_sendmsg_prep(req, sqe);
Jens Axboe03b12302019-12-02 18:50:25 -07006361 case IORING_OP_RECVMSG:
Jens Axboefddafac2020-01-04 20:19:44 -07006362 case IORING_OP_RECV:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006363 return io_recvmsg_prep(req, sqe);
Jens Axboef499a022019-12-02 16:28:46 -07006364 case IORING_OP_CONNECT:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006365 return io_connect_prep(req, sqe);
Jens Axboe2d283902019-12-04 11:08:05 -07006366 case IORING_OP_TIMEOUT:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006367 return io_timeout_prep(req, sqe, false);
Jens Axboeb29472e2019-12-17 18:50:29 -07006368 case IORING_OP_TIMEOUT_REMOVE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006369 return io_timeout_remove_prep(req, sqe);
Jens Axboefbf23842019-12-17 18:45:56 -07006370 case IORING_OP_ASYNC_CANCEL:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006371 return io_async_cancel_prep(req, sqe);
Jens Axboe2d283902019-12-04 11:08:05 -07006372 case IORING_OP_LINK_TIMEOUT:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006373 return io_timeout_prep(req, sqe, true);
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07006374 case IORING_OP_ACCEPT:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006375 return io_accept_prep(req, sqe);
Jens Axboed63d1b52019-12-10 10:38:56 -07006376 case IORING_OP_FALLOCATE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006377 return io_fallocate_prep(req, sqe);
Jens Axboe15b71ab2019-12-11 11:20:36 -07006378 case IORING_OP_OPENAT:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006379 return io_openat_prep(req, sqe);
Jens Axboeb5dba592019-12-11 14:02:38 -07006380 case IORING_OP_CLOSE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006381 return io_close_prep(req, sqe);
Jens Axboe05f3fb32019-12-09 11:22:50 -07006382 case IORING_OP_FILES_UPDATE:
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00006383 return io_rsrc_update_prep(req, sqe);
Jens Axboeeddc7ef2019-12-13 21:18:10 -07006384 case IORING_OP_STATX:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006385 return io_statx_prep(req, sqe);
Jens Axboe4840e412019-12-25 22:03:45 -07006386 case IORING_OP_FADVISE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006387 return io_fadvise_prep(req, sqe);
Jens Axboec1ca7572019-12-25 22:18:28 -07006388 case IORING_OP_MADVISE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006389 return io_madvise_prep(req, sqe);
Jens Axboecebdb982020-01-08 17:59:24 -07006390 case IORING_OP_OPENAT2:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006391 return io_openat2_prep(req, sqe);
Jens Axboe3e4827b2020-01-08 15:18:09 -07006392 case IORING_OP_EPOLL_CTL:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006393 return io_epoll_ctl_prep(req, sqe);
Pavel Begunkov7d67af22020-02-24 11:32:45 +03006394 case IORING_OP_SPLICE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006395 return io_splice_prep(req, sqe);
Jens Axboeddf0322d2020-02-23 16:41:33 -07006396 case IORING_OP_PROVIDE_BUFFERS:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006397 return io_provide_buffers_prep(req, sqe);
Jens Axboe067524e2020-03-02 16:32:28 -07006398 case IORING_OP_REMOVE_BUFFERS:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006399 return io_remove_buffers_prep(req, sqe);
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03006400 case IORING_OP_TEE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006401 return io_tee_prep(req, sqe);
Jens Axboe36f4fa62020-09-05 11:14:22 -06006402 case IORING_OP_SHUTDOWN:
6403 return io_shutdown_prep(req, sqe);
Jens Axboe80a261f2020-09-28 14:23:58 -06006404 case IORING_OP_RENAMEAT:
6405 return io_renameat_prep(req, sqe);
Jens Axboe14a11432020-09-28 14:27:37 -06006406 case IORING_OP_UNLINKAT:
6407 return io_unlinkat_prep(req, sqe);
Dmitry Kadasheve34a02d2021-07-08 13:34:45 +07006408 case IORING_OP_MKDIRAT:
6409 return io_mkdirat_prep(req, sqe);
Dmitry Kadashev7a8721f2021-07-08 13:34:46 +07006410 case IORING_OP_SYMLINKAT:
6411 return io_symlinkat_prep(req, sqe);
Dmitry Kadashevcf30da92021-07-08 13:34:47 +07006412 case IORING_OP_LINKAT:
6413 return io_linkat_prep(req, sqe);
Jens Axboef67676d2019-12-02 11:03:47 -07006414 }
6415
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006416 printk_once(KERN_WARNING "io_uring: unhandled opcode %d\n",
6417 req->opcode);
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01006418 return -EINVAL;
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006419}
6420
Pavel Begunkov93642ef2021-02-18 18:29:44 +00006421static int io_req_prep_async(struct io_kiocb *req)
Jens Axboedef596e2019-01-09 08:59:42 -07006422{
Pavel Begunkovb7e298d2021-02-28 22:35:19 +00006423 if (!io_op_defs[req->opcode].needs_async_setup)
6424 return 0;
6425 if (WARN_ON_ONCE(req->async_data))
6426 return -EFAULT;
6427 if (io_alloc_async_data(req))
6428 return -EAGAIN;
6429
Pavel Begunkov93642ef2021-02-18 18:29:44 +00006430 switch (req->opcode) {
6431 case IORING_OP_READV:
Pavel Begunkov93642ef2021-02-18 18:29:44 +00006432 return io_rw_prep_async(req, READ);
6433 case IORING_OP_WRITEV:
Pavel Begunkov93642ef2021-02-18 18:29:44 +00006434 return io_rw_prep_async(req, WRITE);
6435 case IORING_OP_SENDMSG:
Pavel Begunkov93642ef2021-02-18 18:29:44 +00006436 return io_sendmsg_prep_async(req);
6437 case IORING_OP_RECVMSG:
Pavel Begunkov93642ef2021-02-18 18:29:44 +00006438 return io_recvmsg_prep_async(req);
6439 case IORING_OP_CONNECT:
6440 return io_connect_prep_async(req);
6441 }
Pavel Begunkovb7e298d2021-02-28 22:35:19 +00006442 printk_once(KERN_WARNING "io_uring: prep_async() bad opcode %d\n",
6443 req->opcode);
6444 return -EFAULT;
Jens Axboedef596e2019-01-09 08:59:42 -07006445}
6446
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03006447static u32 io_get_sequence(struct io_kiocb *req)
6448{
Pavel Begunkova3dbdf52021-06-17 18:14:05 +01006449 u32 seq = req->ctx->cached_sq_head;
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03006450
Pavel Begunkova3dbdf52021-06-17 18:14:05 +01006451 /* need original cached_sq_head, but it was increased for each req */
6452 io_for_each_link(req, req)
6453 seq--;
6454 return seq;
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03006455}
6456
Pavel Begunkov76cc33d2021-06-14 23:37:30 +01006457static bool io_drain_req(struct io_kiocb *req)
Jens Axboedef596e2019-01-09 08:59:42 -07006458{
Pavel Begunkov3c199662021-06-15 16:47:57 +01006459 struct io_kiocb *pos;
Jens Axboedef596e2019-01-09 08:59:42 -07006460 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkov27dc8332020-07-13 23:37:14 +03006461 struct io_defer_entry *de;
Jens Axboedef596e2019-01-09 08:59:42 -07006462 int ret;
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03006463 u32 seq;
Jens Axboedef596e2019-01-09 08:59:42 -07006464
Pavel Begunkovb8ce1b92021-08-31 14:13:11 +01006465 if (req->flags & REQ_F_FAIL) {
6466 io_req_complete_fail_submit(req);
6467 return true;
6468 }
6469
Pavel Begunkov3c199662021-06-15 16:47:57 +01006470 /*
6471 * If we need to drain a request in the middle of a link, drain the
6472 * head request and the next request/link after the current link.
6473 * Considering sequential execution of links, IOSQE_IO_DRAIN will be
6474 * maintained for every request of our link.
6475 */
6476 if (ctx->drain_next) {
6477 req->flags |= REQ_F_IO_DRAIN;
6478 ctx->drain_next = false;
6479 }
6480 /* not interested in head, start from the first linked */
6481 io_for_each_link(pos, req->link) {
6482 if (pos->flags & REQ_F_IO_DRAIN) {
6483 ctx->drain_next = true;
6484 req->flags |= REQ_F_IO_DRAIN;
6485 break;
6486 }
6487 }
6488
Jens Axboedef596e2019-01-09 08:59:42 -07006489 /* Still need defer if there is pending req in defer list. */
Hao Xu1bd12b72021-11-25 17:21:02 +08006490 spin_lock(&ctx->completion_lock);
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03006491 if (likely(list_empty_careful(&ctx->defer_list) &&
Pavel Begunkov10c66902021-06-15 16:47:56 +01006492 !(req->flags & REQ_F_IO_DRAIN))) {
Hao Xu1bd12b72021-11-25 17:21:02 +08006493 spin_unlock(&ctx->completion_lock);
Pavel Begunkov10c66902021-06-15 16:47:56 +01006494 ctx->drain_active = false;
Pavel Begunkov76cc33d2021-06-14 23:37:30 +01006495 return false;
Pavel Begunkov10c66902021-06-15 16:47:56 +01006496 }
Hao Xu1bd12b72021-11-25 17:21:02 +08006497 spin_unlock(&ctx->completion_lock);
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03006498
6499 seq = io_get_sequence(req);
6500 /* Still a chance to pass the sequence check */
6501 if (!req_need_defer(req, seq) && list_empty_careful(&ctx->defer_list))
Pavel Begunkov76cc33d2021-06-14 23:37:30 +01006502 return false;
Jens Axboedef596e2019-01-09 08:59:42 -07006503
Pavel Begunkovb7e298d2021-02-28 22:35:19 +00006504 ret = io_req_prep_async(req);
Pavel Begunkovbe7053b2021-02-18 18:29:45 +00006505 if (ret)
Pavel Begunkov1b487732021-07-11 22:41:13 +01006506 goto fail;
Pavel Begunkovcbdcb432020-06-29 19:18:43 +03006507 io_prep_async_link(req);
Pavel Begunkov27dc8332020-07-13 23:37:14 +03006508 de = kmalloc(sizeof(*de), GFP_KERNEL);
Pavel Begunkov76cc33d2021-06-14 23:37:30 +01006509 if (!de) {
Pavel Begunkov1b487732021-07-11 22:41:13 +01006510 ret = -ENOMEM;
6511fail:
6512 io_req_complete_failed(req, ret);
Pavel Begunkov76cc33d2021-06-14 23:37:30 +01006513 return true;
6514 }
Jens Axboe31b51512019-01-18 22:56:34 -07006515
Jens Axboe79ebeae2021-08-10 15:18:27 -06006516 spin_lock(&ctx->completion_lock);
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03006517 if (!req_need_defer(req, seq) && list_empty(&ctx->defer_list)) {
Jens Axboe79ebeae2021-08-10 15:18:27 -06006518 spin_unlock(&ctx->completion_lock);
Pavel Begunkov27dc8332020-07-13 23:37:14 +03006519 kfree(de);
Pavel Begunkovf237c302021-08-18 12:42:46 +01006520 io_queue_async_work(req, NULL);
Pavel Begunkov76cc33d2021-06-14 23:37:30 +01006521 return true;
Jens Axboe31b51512019-01-18 22:56:34 -07006522 }
6523
6524 trace_io_uring_defer(ctx, req, req->user_data);
Pavel Begunkov27dc8332020-07-13 23:37:14 +03006525 de->req = req;
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03006526 de->seq = seq;
Pavel Begunkov27dc8332020-07-13 23:37:14 +03006527 list_add_tail(&de->list, &ctx->defer_list);
Jens Axboe79ebeae2021-08-10 15:18:27 -06006528 spin_unlock(&ctx->completion_lock);
Pavel Begunkov76cc33d2021-06-14 23:37:30 +01006529 return true;
Jens Axboe31b51512019-01-18 22:56:34 -07006530}
6531
Pavel Begunkov68fb8972021-03-19 17:22:41 +00006532static void io_clean_op(struct io_kiocb *req)
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03006533{
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03006534 if (req->flags & REQ_F_BUFFER_SELECTED) {
6535 switch (req->opcode) {
6536 case IORING_OP_READV:
6537 case IORING_OP_READ_FIXED:
6538 case IORING_OP_READ:
Jens Axboebcda7ba2020-02-23 16:42:51 -07006539 kfree((void *)(unsigned long)req->rw.addr);
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03006540 break;
6541 case IORING_OP_RECVMSG:
6542 case IORING_OP_RECV:
Jens Axboe52de1fe2020-02-27 10:15:42 -07006543 kfree(req->sr_msg.kbuf);
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03006544 break;
6545 }
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03006546 }
6547
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03006548 if (req->flags & REQ_F_NEED_CLEANUP) {
6549 switch (req->opcode) {
6550 case IORING_OP_READV:
6551 case IORING_OP_READ_FIXED:
6552 case IORING_OP_READ:
6553 case IORING_OP_WRITEV:
6554 case IORING_OP_WRITE_FIXED:
Jens Axboee8c2bc12020-08-15 18:44:09 -07006555 case IORING_OP_WRITE: {
6556 struct io_async_rw *io = req->async_data;
Pavel Begunkov1dacb4d2021-06-17 18:14:03 +01006557
6558 kfree(io->free_iovec);
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03006559 break;
Jens Axboee8c2bc12020-08-15 18:44:09 -07006560 }
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03006561 case IORING_OP_RECVMSG:
Jens Axboee8c2bc12020-08-15 18:44:09 -07006562 case IORING_OP_SENDMSG: {
6563 struct io_async_msghdr *io = req->async_data;
Pavel Begunkov257e84a2021-02-05 00:58:00 +00006564
6565 kfree(io->free_iov);
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03006566 break;
Jens Axboee8c2bc12020-08-15 18:44:09 -07006567 }
Jens Axboef3cd48502020-09-24 14:55:54 -06006568 case IORING_OP_OPENAT:
6569 case IORING_OP_OPENAT2:
6570 if (req->open.filename)
6571 putname(req->open.filename);
6572 break;
Jens Axboe80a261f2020-09-28 14:23:58 -06006573 case IORING_OP_RENAMEAT:
6574 putname(req->rename.oldpath);
6575 putname(req->rename.newpath);
6576 break;
Jens Axboe14a11432020-09-28 14:27:37 -06006577 case IORING_OP_UNLINKAT:
6578 putname(req->unlink.filename);
6579 break;
Dmitry Kadasheve34a02d2021-07-08 13:34:45 +07006580 case IORING_OP_MKDIRAT:
6581 putname(req->mkdir.filename);
6582 break;
Dmitry Kadashev7a8721f2021-07-08 13:34:46 +07006583 case IORING_OP_SYMLINKAT:
6584 putname(req->symlink.oldpath);
6585 putname(req->symlink.newpath);
6586 break;
Dmitry Kadashevcf30da92021-07-08 13:34:47 +07006587 case IORING_OP_LINKAT:
6588 putname(req->hardlink.oldpath);
6589 putname(req->hardlink.newpath);
6590 break;
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03006591 }
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03006592 }
Jens Axboe75652a302021-04-15 09:52:40 -06006593 if ((req->flags & REQ_F_POLLED) && req->apoll) {
6594 kfree(req->apoll->double_poll);
6595 kfree(req->apoll);
6596 req->apoll = NULL;
6597 }
Pavel Begunkov3a0a6902021-04-20 12:03:31 +01006598 if (req->flags & REQ_F_INFLIGHT) {
6599 struct io_uring_task *tctx = req->task->io_uring;
6600
6601 atomic_dec(&tctx->inflight_tracked);
Pavel Begunkov3a0a6902021-04-20 12:03:31 +01006602 }
Pavel Begunkovc8543572021-06-17 18:14:04 +01006603 if (req->flags & REQ_F_CREDS)
Pavel Begunkovb8e64b52021-06-17 18:14:02 +01006604 put_cred(req->creds);
Pavel Begunkovc8543572021-06-17 18:14:04 +01006605
6606 req->flags &= ~IO_REQ_CLEAN_FLAGS;
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03006607}
6608
Pavel Begunkov889fca72021-02-10 00:03:09 +00006609static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboeedafcce2019-01-09 09:16:05 -07006610{
Jens Axboeedafcce2019-01-09 09:16:05 -07006611 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe5730b272021-02-27 15:57:30 -07006612 const struct cred *creds = NULL;
Jens Axboed625c6e2019-12-17 19:53:05 -07006613 int ret;
Jens Axboeedafcce2019-01-09 09:16:05 -07006614
Pavel Begunkovb8e64b52021-06-17 18:14:02 +01006615 if ((req->flags & REQ_F_CREDS) && req->creds != current_cred())
Pavel Begunkovc10d1f92021-06-17 18:14:01 +01006616 creds = override_creds(req->creds);
Jens Axboe5730b272021-02-27 15:57:30 -07006617
Jens Axboed625c6e2019-12-17 19:53:05 -07006618 switch (req->opcode) {
Jens Axboe2b188cc2019-01-07 10:46:33 -07006619 case IORING_OP_NOP:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006620 ret = io_nop(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006621 break;
Jens Axboe2b188cc2019-01-07 10:46:33 -07006622 case IORING_OP_READV:
Jens Axboe3529d8c2019-12-19 18:24:38 -07006623 case IORING_OP_READ_FIXED:
Jens Axboe3a6820f2019-12-22 15:19:35 -07006624 case IORING_OP_READ:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006625 ret = io_read(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006626 break;
6627 case IORING_OP_WRITEV:
Jens Axboe2b188cc2019-01-07 10:46:33 -07006628 case IORING_OP_WRITE_FIXED:
Jens Axboe3a6820f2019-12-22 15:19:35 -07006629 case IORING_OP_WRITE:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006630 ret = io_write(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006631 break;
6632 case IORING_OP_FSYNC:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006633 ret = io_fsync(req, issue_flags);
Jackie Liuba5290c2019-10-09 09:19:59 +08006634 break;
Jens Axboe2b188cc2019-01-07 10:46:33 -07006635 case IORING_OP_POLL_ADD:
Pavel Begunkov61e98202021-02-10 00:03:08 +00006636 ret = io_poll_add(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006637 break;
6638 case IORING_OP_POLL_REMOVE:
Pavel Begunkovc5de0032021-04-14 13:38:37 +01006639 ret = io_poll_update(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006640 break;
Jens Axboeb76da702019-11-20 13:05:32 -07006641 case IORING_OP_SYNC_FILE_RANGE:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006642 ret = io_sync_file_range(req, issue_flags);
Jens Axboeb76da702019-11-20 13:05:32 -07006643 break;
6644 case IORING_OP_SENDMSG:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006645 ret = io_sendmsg(req, issue_flags);
Pavel Begunkov062d04d2020-10-10 18:34:12 +01006646 break;
Jens Axboefddafac2020-01-04 20:19:44 -07006647 case IORING_OP_SEND:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006648 ret = io_send(req, issue_flags);
Jens Axboeb76da702019-11-20 13:05:32 -07006649 break;
6650 case IORING_OP_RECVMSG:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006651 ret = io_recvmsg(req, issue_flags);
Pavel Begunkov062d04d2020-10-10 18:34:12 +01006652 break;
Jens Axboefddafac2020-01-04 20:19:44 -07006653 case IORING_OP_RECV:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006654 ret = io_recv(req, issue_flags);
Jens Axboeb76da702019-11-20 13:05:32 -07006655 break;
Jens Axboe561fb042019-10-24 07:25:42 -06006656 case IORING_OP_TIMEOUT:
Pavel Begunkov61e98202021-02-10 00:03:08 +00006657 ret = io_timeout(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006658 break;
6659 case IORING_OP_TIMEOUT_REMOVE:
Pavel Begunkov61e98202021-02-10 00:03:08 +00006660 ret = io_timeout_remove(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006661 break;
6662 case IORING_OP_ACCEPT:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006663 ret = io_accept(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006664 break;
6665 case IORING_OP_CONNECT:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006666 ret = io_connect(req, issue_flags);
Jens Axboe31b51512019-01-18 22:56:34 -07006667 break;
6668 case IORING_OP_ASYNC_CANCEL:
Pavel Begunkov61e98202021-02-10 00:03:08 +00006669 ret = io_async_cancel(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006670 break;
Jens Axboed63d1b52019-12-10 10:38:56 -07006671 case IORING_OP_FALLOCATE:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006672 ret = io_fallocate(req, issue_flags);
Jens Axboed63d1b52019-12-10 10:38:56 -07006673 break;
Jens Axboe15b71ab2019-12-11 11:20:36 -07006674 case IORING_OP_OPENAT:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006675 ret = io_openat(req, issue_flags);
Jens Axboe15b71ab2019-12-11 11:20:36 -07006676 break;
Jens Axboeb5dba592019-12-11 14:02:38 -07006677 case IORING_OP_CLOSE:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006678 ret = io_close(req, issue_flags);
Jens Axboeb5dba592019-12-11 14:02:38 -07006679 break;
Jens Axboe05f3fb32019-12-09 11:22:50 -07006680 case IORING_OP_FILES_UPDATE:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006681 ret = io_files_update(req, issue_flags);
Jens Axboe05f3fb32019-12-09 11:22:50 -07006682 break;
Jens Axboeeddc7ef2019-12-13 21:18:10 -07006683 case IORING_OP_STATX:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006684 ret = io_statx(req, issue_flags);
Jens Axboeeddc7ef2019-12-13 21:18:10 -07006685 break;
Jens Axboe4840e412019-12-25 22:03:45 -07006686 case IORING_OP_FADVISE:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006687 ret = io_fadvise(req, issue_flags);
Jens Axboe4840e412019-12-25 22:03:45 -07006688 break;
Jens Axboec1ca7572019-12-25 22:18:28 -07006689 case IORING_OP_MADVISE:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006690 ret = io_madvise(req, issue_flags);
Jens Axboec1ca7572019-12-25 22:18:28 -07006691 break;
Jens Axboecebdb982020-01-08 17:59:24 -07006692 case IORING_OP_OPENAT2:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006693 ret = io_openat2(req, issue_flags);
Jens Axboecebdb982020-01-08 17:59:24 -07006694 break;
Jens Axboe3e4827b2020-01-08 15:18:09 -07006695 case IORING_OP_EPOLL_CTL:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006696 ret = io_epoll_ctl(req, issue_flags);
Jens Axboe3e4827b2020-01-08 15:18:09 -07006697 break;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03006698 case IORING_OP_SPLICE:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006699 ret = io_splice(req, issue_flags);
Pavel Begunkov7d67af22020-02-24 11:32:45 +03006700 break;
Jens Axboeddf0322d2020-02-23 16:41:33 -07006701 case IORING_OP_PROVIDE_BUFFERS:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006702 ret = io_provide_buffers(req, issue_flags);
Jens Axboeddf0322d2020-02-23 16:41:33 -07006703 break;
Jens Axboe067524e2020-03-02 16:32:28 -07006704 case IORING_OP_REMOVE_BUFFERS:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006705 ret = io_remove_buffers(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006706 break;
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03006707 case IORING_OP_TEE:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006708 ret = io_tee(req, issue_flags);
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03006709 break;
Jens Axboe36f4fa62020-09-05 11:14:22 -06006710 case IORING_OP_SHUTDOWN:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006711 ret = io_shutdown(req, issue_flags);
Jens Axboe36f4fa62020-09-05 11:14:22 -06006712 break;
Jens Axboe80a261f2020-09-28 14:23:58 -06006713 case IORING_OP_RENAMEAT:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006714 ret = io_renameat(req, issue_flags);
Jens Axboe80a261f2020-09-28 14:23:58 -06006715 break;
Jens Axboe14a11432020-09-28 14:27:37 -06006716 case IORING_OP_UNLINKAT:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006717 ret = io_unlinkat(req, issue_flags);
Jens Axboe14a11432020-09-28 14:27:37 -06006718 break;
Dmitry Kadasheve34a02d2021-07-08 13:34:45 +07006719 case IORING_OP_MKDIRAT:
6720 ret = io_mkdirat(req, issue_flags);
6721 break;
Dmitry Kadashev7a8721f2021-07-08 13:34:46 +07006722 case IORING_OP_SYMLINKAT:
6723 ret = io_symlinkat(req, issue_flags);
6724 break;
Dmitry Kadashevcf30da92021-07-08 13:34:47 +07006725 case IORING_OP_LINKAT:
6726 ret = io_linkat(req, issue_flags);
6727 break;
Jens Axboe2b188cc2019-01-07 10:46:33 -07006728 default:
6729 ret = -EINVAL;
6730 break;
6731 }
Jens Axboe31b51512019-01-18 22:56:34 -07006732
Jens Axboe5730b272021-02-27 15:57:30 -07006733 if (creds)
6734 revert_creds(creds);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006735 if (ret)
6736 return ret;
Jens Axboeb5325762020-05-19 21:20:27 -06006737 /* If the op doesn't have a file, we're not polling for it */
Pavel Begunkovcb3d8972021-06-14 02:36:14 +01006738 if ((ctx->flags & IORING_SETUP_IOPOLL) && req->file)
6739 io_iopoll_req_issued(req);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006740
6741 return 0;
6742}
6743
Pavel Begunkovebc11b62021-08-09 13:04:05 +01006744static struct io_wq_work *io_wq_free_work(struct io_wq_work *work)
6745{
6746 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
6747
6748 req = io_put_req_find_next(req);
6749 return req ? &req->work : NULL;
6750}
6751
Pavel Begunkov5280f7e2021-02-04 13:52:08 +00006752static void io_wq_submit_work(struct io_wq_work *work)
Pavel Begunkovd4c81f32020-06-08 21:08:19 +03006753{
Jens Axboe2b188cc2019-01-07 10:46:33 -07006754 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
Pavel Begunkov6df1db62020-07-03 22:15:06 +03006755 struct io_kiocb *timeout;
Jens Axboe561fb042019-10-24 07:25:42 -06006756 int ret = 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07006757
Pavel Begunkov48dcd382021-08-15 10:40:18 +01006758 /* one will be dropped by ->io_free_work() after returning to io-wq */
6759 if (!(req->flags & REQ_F_REFCOUNT))
6760 __io_req_set_refcount(req, 2);
6761 else
6762 req_ref_get(req);
Pavel Begunkov5d5901a2021-08-11 19:28:29 +01006763
Pavel Begunkov6df1db62020-07-03 22:15:06 +03006764 timeout = io_prep_linked_timeout(req);
6765 if (timeout)
6766 io_queue_linked_timeout(timeout);
Pavel Begunkovd4c81f32020-06-08 21:08:19 +03006767
Pavel Begunkovdadebc32021-08-23 13:30:44 +01006768 /* either cancelled or io-wq is dying, so don't touch tctx->iowq */
Jens Axboe4014d942021-01-19 15:53:54 -07006769 if (work->flags & IO_WQ_WORK_CANCEL)
Jens Axboe561fb042019-10-24 07:25:42 -06006770 ret = -ECANCELED;
Jens Axboe31b51512019-01-18 22:56:34 -07006771
Jens Axboe561fb042019-10-24 07:25:42 -06006772 if (!ret) {
Jens Axboe561fb042019-10-24 07:25:42 -06006773 do {
Pavel Begunkov889fca72021-02-10 00:03:09 +00006774 ret = io_issue_sqe(req, 0);
Jens Axboe561fb042019-10-24 07:25:42 -06006775 /*
6776 * We can get EAGAIN for polled IO even though we're
6777 * forcing a sync submission from here, since we can't
6778 * wait for request slots on the block side.
6779 */
Pavel Begunkov51ebf1b2022-05-13 11:24:56 +01006780 if (ret != -EAGAIN || !(req->ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboe561fb042019-10-24 07:25:42 -06006781 break;
6782 cond_resched();
6783 } while (1);
6784 }
Jens Axboe31b51512019-01-18 22:56:34 -07006785
Pavel Begunkova3df76982021-02-18 22:32:52 +00006786 /* avoid locking problems by failing it from a clean context */
Pavel Begunkov5d5901a2021-08-11 19:28:29 +01006787 if (ret)
Pavel Begunkova3df76982021-02-18 22:32:52 +00006788 io_req_task_queue_fail(req, ret);
Jens Axboe31b51512019-01-18 22:56:34 -07006789}
Jens Axboe2b188cc2019-01-07 10:46:33 -07006790
Pavel Begunkovaeca2412021-04-11 01:46:37 +01006791static inline struct io_fixed_file *io_fixed_file_slot(struct io_file_table *table,
Pavel Begunkov042b0d82021-08-09 13:04:01 +01006792 unsigned i)
Jens Axboe09bb8392019-03-13 12:39:28 -06006793{
Pavel Begunkov042b0d82021-08-09 13:04:01 +01006794 return &table->files[i];
Pavel Begunkovdafecf12021-02-28 22:35:11 +00006795}
6796
Jens Axboe09bb8392019-03-13 12:39:28 -06006797static inline struct file *io_file_from_index(struct io_ring_ctx *ctx,
6798 int index)
6799{
Pavel Begunkovaeca2412021-04-11 01:46:37 +01006800 struct io_fixed_file *slot = io_fixed_file_slot(&ctx->file_table, index);
Jens Axboe65e19f52019-10-26 07:20:21 -06006801
Pavel Begunkova04b0ac2021-04-01 15:44:04 +01006802 return (struct file *) (slot->file_ptr & FFS_MASK);
Jens Axboe65e19f52019-10-26 07:20:21 -06006803}
6804
Pavel Begunkova04b0ac2021-04-01 15:44:04 +01006805static void io_fixed_file_set(struct io_fixed_file *file_slot, struct file *file)
Pavel Begunkov9a321c92021-04-01 15:44:01 +01006806{
6807 unsigned long file_ptr = (unsigned long) file;
6808
Pavel Begunkovb191e2d2021-08-09 13:04:03 +01006809 if (__io_file_supports_nowait(file, READ))
Pavel Begunkov9a321c92021-04-01 15:44:01 +01006810 file_ptr |= FFS_ASYNC_READ;
Pavel Begunkovb191e2d2021-08-09 13:04:03 +01006811 if (__io_file_supports_nowait(file, WRITE))
Pavel Begunkov9a321c92021-04-01 15:44:01 +01006812 file_ptr |= FFS_ASYNC_WRITE;
6813 if (S_ISREG(file_inode(file)->i_mode))
6814 file_ptr |= FFS_ISREG;
Pavel Begunkova04b0ac2021-04-01 15:44:04 +01006815 file_slot->file_ptr = file_ptr;
Jens Axboe09bb8392019-03-13 12:39:28 -06006816}
6817
Pavel Begunkovac177052021-08-09 13:04:02 +01006818static inline struct file *io_file_get_fixed(struct io_ring_ctx *ctx,
6819 struct io_kiocb *req, int fd)
Pavel Begunkov8da11c12020-02-24 11:32:44 +03006820{
Pavel Begunkov8da11c12020-02-24 11:32:44 +03006821 struct file *file;
Pavel Begunkovac177052021-08-09 13:04:02 +01006822 unsigned long file_ptr;
Pavel Begunkov8da11c12020-02-24 11:32:44 +03006823
Pavel Begunkovac177052021-08-09 13:04:02 +01006824 if (unlikely((unsigned int)fd >= ctx->nr_user_files))
6825 return NULL;
6826 fd = array_index_nospec(fd, ctx->nr_user_files);
6827 file_ptr = io_fixed_file_slot(&ctx->file_table, fd)->file_ptr;
6828 file = (struct file *) (file_ptr & FFS_MASK);
6829 file_ptr &= ~FFS_MASK;
6830 /* mask in overlapping REQ_F and FFS bits */
Pavel Begunkovb191e2d2021-08-09 13:04:03 +01006831 req->flags |= (file_ptr << REQ_F_NOWAIT_READ_BIT);
Pavel Begunkovac177052021-08-09 13:04:02 +01006832 io_req_set_rsrc_node(req);
Pavel Begunkov8371adf2020-10-10 18:34:08 +01006833 return file;
Pavel Begunkov8da11c12020-02-24 11:32:44 +03006834}
6835
Pavel Begunkovac177052021-08-09 13:04:02 +01006836static struct file *io_file_get_normal(struct io_ring_ctx *ctx,
Pavel Begunkovac177052021-08-09 13:04:02 +01006837 struct io_kiocb *req, int fd)
6838{
Pavel Begunkov62906e82021-08-10 14:52:47 +01006839 struct file *file = fget(fd);
Pavel Begunkovac177052021-08-09 13:04:02 +01006840
6841 trace_io_uring_file_get(ctx, fd);
6842
6843 /* we don't allow fixed io_uring files */
6844 if (file && unlikely(file->f_op == &io_uring_fops))
6845 io_req_track_inflight(req);
6846 return file;
6847}
6848
6849static inline struct file *io_file_get(struct io_ring_ctx *ctx,
Pavel Begunkovac177052021-08-09 13:04:02 +01006850 struct io_kiocb *req, int fd, bool fixed)
6851{
6852 if (fixed)
6853 return io_file_get_fixed(ctx, req, fd);
6854 else
Pavel Begunkov62906e82021-08-10 14:52:47 +01006855 return io_file_get_normal(ctx, req, fd);
Pavel Begunkovac177052021-08-09 13:04:02 +01006856}
6857
Pavel Begunkovf237c302021-08-18 12:42:46 +01006858static void io_req_task_link_timeout(struct io_kiocb *req, bool *locked)
Jens Axboe89b263f2021-08-10 15:14:18 -06006859{
6860 struct io_kiocb *prev = req->timeout.prev;
Pavel Begunkov3d2a1e62021-11-26 14:38:14 +00006861 int ret = -ENOENT;
Jens Axboe89b263f2021-08-10 15:14:18 -06006862
6863 if (prev) {
Pavel Begunkov3d2a1e62021-11-26 14:38:14 +00006864 if (!(req->task->flags & PF_EXITING))
6865 ret = io_try_cancel_userdata(req, prev->user_data);
Pavel Begunkov505657b2021-08-17 20:28:09 +01006866 io_req_complete_post(req, ret ?: -ETIME, 0);
Jens Axboe89b263f2021-08-10 15:14:18 -06006867 io_put_req(prev);
Jens Axboe89b263f2021-08-10 15:14:18 -06006868 } else {
6869 io_req_complete_post(req, -ETIME, 0);
6870 }
6871}
6872
Jens Axboe2665abf2019-11-05 12:40:47 -07006873static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
6874{
Jens Axboead8a48a2019-11-15 08:49:11 -07006875 struct io_timeout_data *data = container_of(timer,
6876 struct io_timeout_data, timer);
Pavel Begunkov90cd7e42020-10-27 23:25:36 +00006877 struct io_kiocb *prev, *req = data->req;
Jens Axboe2665abf2019-11-05 12:40:47 -07006878 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe2665abf2019-11-05 12:40:47 -07006879 unsigned long flags;
Jens Axboe2665abf2019-11-05 12:40:47 -07006880
Jens Axboe89b263f2021-08-10 15:14:18 -06006881 spin_lock_irqsave(&ctx->timeout_lock, flags);
Pavel Begunkov90cd7e42020-10-27 23:25:36 +00006882 prev = req->timeout.head;
6883 req->timeout.head = NULL;
Jens Axboe2665abf2019-11-05 12:40:47 -07006884
6885 /*
6886 * We don't expect the list to be empty, that will only happen if we
6887 * race with the completion of the linked work.
6888 */
Pavel Begunkov447c19f2021-05-14 12:02:50 +01006889 if (prev) {
Pavel Begunkovf2f87372020-10-27 23:25:37 +00006890 io_remove_next_linked(prev);
Pavel Begunkov447c19f2021-05-14 12:02:50 +01006891 if (!req_ref_inc_not_zero(prev))
6892 prev = NULL;
6893 }
Pavel Begunkovef9dd632021-08-28 19:54:38 -06006894 list_del(&req->timeout.list);
Jens Axboe89b263f2021-08-10 15:14:18 -06006895 req->timeout.prev = prev;
6896 spin_unlock_irqrestore(&ctx->timeout_lock, flags);
Jens Axboe2665abf2019-11-05 12:40:47 -07006897
Jens Axboe89b263f2021-08-10 15:14:18 -06006898 req->io_task_work.func = io_req_task_link_timeout;
6899 io_req_task_work_add(req);
Jens Axboe2665abf2019-11-05 12:40:47 -07006900 return HRTIMER_NORESTART;
6901}
6902
Pavel Begunkovde968c12021-03-19 17:22:33 +00006903static void io_queue_linked_timeout(struct io_kiocb *req)
Jens Axboe2665abf2019-11-05 12:40:47 -07006904{
Pavel Begunkovde968c12021-03-19 17:22:33 +00006905 struct io_ring_ctx *ctx = req->ctx;
6906
Jens Axboe89b263f2021-08-10 15:14:18 -06006907 spin_lock_irq(&ctx->timeout_lock);
Jens Axboe76a46e02019-11-10 23:34:16 -07006908 /*
Pavel Begunkovf2f87372020-10-27 23:25:37 +00006909 * If the back reference is NULL, then our linked request finished
6910 * before we got a chance to setup the timer
Jens Axboe76a46e02019-11-10 23:34:16 -07006911 */
Pavel Begunkov90cd7e42020-10-27 23:25:36 +00006912 if (req->timeout.head) {
Jens Axboee8c2bc12020-08-15 18:44:09 -07006913 struct io_timeout_data *data = req->async_data;
Jens Axboe94ae5e72019-11-14 19:39:52 -07006914
Jens Axboead8a48a2019-11-15 08:49:11 -07006915 data->timer.function = io_link_timeout_fn;
6916 hrtimer_start(&data->timer, timespec64_to_ktime(data->ts),
6917 data->mode);
Pavel Begunkovef9dd632021-08-28 19:54:38 -06006918 list_add_tail(&req->timeout.list, &ctx->ltimeout_list);
Jens Axboe2665abf2019-11-05 12:40:47 -07006919 }
Jens Axboe89b263f2021-08-10 15:14:18 -06006920 spin_unlock_irq(&ctx->timeout_lock);
Jens Axboe2665abf2019-11-05 12:40:47 -07006921 /* drop submission reference */
Jens Axboe76a46e02019-11-10 23:34:16 -07006922 io_put_req(req);
Jens Axboe2665abf2019-11-05 12:40:47 -07006923}
6924
Pavel Begunkovc5eef2b2021-02-10 00:03:22 +00006925static void __io_queue_sqe(struct io_kiocb *req)
Pavel Begunkov282cdc82021-08-09 13:04:10 +01006926 __must_hold(&req->ctx->uring_lock)
Jens Axboe2b188cc2019-01-07 10:46:33 -07006927{
Pavel Begunkov906c6ca2021-08-15 10:40:26 +01006928 struct io_kiocb *linked_timeout;
Jens Axboee0c5c572019-03-12 10:18:47 -06006929 int ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07006930
Olivier Langlois59b735a2021-06-22 05:17:39 -07006931issue_sqe:
Pavel Begunkovc5eef2b2021-02-10 00:03:22 +00006932 ret = io_issue_sqe(req, IO_URING_F_NONBLOCK|IO_URING_F_COMPLETE_DEFER);
Jens Axboe491381ce2019-10-17 09:20:46 -06006933
6934 /*
6935 * We async punt it if the file wasn't marked NOWAIT, or if the file
6936 * doesn't support non-blocking read/write attempts
6937 */
Pavel Begunkov18400382021-03-19 17:22:34 +00006938 if (likely(!ret)) {
Pavel Begunkove342c802021-01-19 13:32:47 +00006939 if (req->flags & REQ_F_COMPLETE_INLINE) {
Pavel Begunkovc5eef2b2021-02-10 00:03:22 +00006940 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkovcd0ca2e2021-08-09 20:18:11 +01006941 struct io_submit_state *state = &ctx->submit_state;
Jens Axboee65ef562019-03-12 10:16:44 -06006942
Pavel Begunkovcd0ca2e2021-08-09 20:18:11 +01006943 state->compl_reqs[state->compl_nr++] = req;
6944 if (state->compl_nr == ARRAY_SIZE(state->compl_reqs))
Pavel Begunkov2a2758f2021-06-17 18:14:00 +01006945 io_submit_flush_completions(ctx);
Pavel Begunkov906c6ca2021-08-15 10:40:26 +01006946 return;
Pavel Begunkov0d63c142020-10-22 16:47:18 +01006947 }
Pavel Begunkov906c6ca2021-08-15 10:40:26 +01006948
6949 linked_timeout = io_prep_linked_timeout(req);
6950 if (linked_timeout)
6951 io_queue_linked_timeout(linked_timeout);
Pavel Begunkov18400382021-03-19 17:22:34 +00006952 } else if (ret == -EAGAIN && !(req->flags & REQ_F_NOWAIT)) {
Pavel Begunkov906c6ca2021-08-15 10:40:26 +01006953 linked_timeout = io_prep_linked_timeout(req);
6954
Olivier Langlois59b735a2021-06-22 05:17:39 -07006955 switch (io_arm_poll_handler(req)) {
6956 case IO_APOLL_READY:
Pavel Begunkov906c6ca2021-08-15 10:40:26 +01006957 if (linked_timeout)
Pavel Begunkov4ea672a2021-10-20 09:53:02 +01006958 io_queue_linked_timeout(linked_timeout);
Olivier Langlois59b735a2021-06-22 05:17:39 -07006959 goto issue_sqe;
6960 case IO_APOLL_ABORTED:
Pavel Begunkov18400382021-03-19 17:22:34 +00006961 /*
6962 * Queued up for async execution, worker will release
6963 * submit reference when the iocb is actually submitted.
6964 */
Pavel Begunkovf237c302021-08-18 12:42:46 +01006965 io_queue_async_work(req, NULL);
Olivier Langlois59b735a2021-06-22 05:17:39 -07006966 break;
Pavel Begunkov18400382021-03-19 17:22:34 +00006967 }
Pavel Begunkov906c6ca2021-08-15 10:40:26 +01006968
6969 if (linked_timeout)
6970 io_queue_linked_timeout(linked_timeout);
Pavel Begunkov0d63c142020-10-22 16:47:18 +01006971 } else {
Pavel Begunkovf41db2732021-02-28 22:35:12 +00006972 io_req_complete_failed(req, ret);
Jens Axboe9e645e112019-05-10 16:07:28 -06006973 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07006974}
6975
Pavel Begunkov441b8a72021-06-14 23:37:31 +01006976static inline void io_queue_sqe(struct io_kiocb *req)
Pavel Begunkov282cdc82021-08-09 13:04:10 +01006977 __must_hold(&req->ctx->uring_lock)
Jackie Liu4fe2c962019-09-09 20:50:40 +08006978{
Pavel Begunkov10c66902021-06-15 16:47:56 +01006979 if (unlikely(req->ctx->drain_active) && io_drain_req(req))
Pavel Begunkov76cc33d2021-06-14 23:37:30 +01006980 return;
Jackie Liu4fe2c962019-09-09 20:50:40 +08006981
Hao Xua8295b92021-08-27 17:46:09 +08006982 if (likely(!(req->flags & (REQ_F_FORCE_ASYNC | REQ_F_FAIL)))) {
Pavel Begunkovc5eef2b2021-02-10 00:03:22 +00006983 __io_queue_sqe(req);
Hao Xua8295b92021-08-27 17:46:09 +08006984 } else if (req->flags & REQ_F_FAIL) {
Pavel Begunkovc6d3d9c2021-08-31 14:13:10 +01006985 io_req_complete_fail_submit(req);
Pavel Begunkov76cc33d2021-06-14 23:37:30 +01006986 } else {
6987 int ret = io_req_prep_async(req);
6988
6989 if (unlikely(ret))
6990 io_req_complete_failed(req, ret);
6991 else
Pavel Begunkovf237c302021-08-18 12:42:46 +01006992 io_queue_async_work(req, NULL);
Jens Axboece35a472019-12-17 08:04:44 -07006993 }
Jackie Liu4fe2c962019-09-09 20:50:40 +08006994}
6995
Stefano Garzarella21b55db2020-08-27 16:58:30 +02006996/*
6997 * Check SQE restrictions (opcode and flags).
6998 *
6999 * Returns 'true' if SQE is allowed, 'false' otherwise.
7000 */
7001static inline bool io_check_restriction(struct io_ring_ctx *ctx,
7002 struct io_kiocb *req,
7003 unsigned int sqe_flags)
7004{
Pavel Begunkov4cfb25b2021-06-26 21:40:47 +01007005 if (likely(!ctx->restricted))
Stefano Garzarella21b55db2020-08-27 16:58:30 +02007006 return true;
7007
7008 if (!test_bit(req->opcode, ctx->restrictions.sqe_op))
7009 return false;
7010
7011 if ((sqe_flags & ctx->restrictions.sqe_flags_required) !=
7012 ctx->restrictions.sqe_flags_required)
7013 return false;
7014
7015 if (sqe_flags & ~(ctx->restrictions.sqe_flags_allowed |
7016 ctx->restrictions.sqe_flags_required))
7017 return false;
7018
7019 return true;
7020}
7021
Pavel Begunkovef4ff582020-04-12 02:05:05 +03007022static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
Pavel Begunkov258b29a2021-02-10 00:03:10 +00007023 const struct io_uring_sqe *sqe)
Pavel Begunkov282cdc82021-08-09 13:04:10 +01007024 __must_hold(&ctx->uring_lock)
Pavel Begunkov0553b8b2020-04-08 08:58:45 +03007025{
Pavel Begunkov258b29a2021-02-10 00:03:10 +00007026 struct io_submit_state *state;
Pavel Begunkovef4ff582020-04-12 02:05:05 +03007027 unsigned int sqe_flags;
Jens Axboe003e8dc2021-03-06 09:22:27 -07007028 int personality, ret = 0;
Pavel Begunkovef4ff582020-04-12 02:05:05 +03007029
Pavel Begunkov864ea922021-08-09 13:04:08 +01007030 /* req is partially pre-initialised, see io_preinit_req() */
Pavel Begunkov0553b8b2020-04-08 08:58:45 +03007031 req->opcode = READ_ONCE(sqe->opcode);
Pavel Begunkov5be9ad12021-02-12 18:41:17 +00007032 /* same numerical values with corresponding REQ_F_*, safe to copy */
7033 req->flags = sqe_flags = READ_ONCE(sqe->flags);
Pavel Begunkov0553b8b2020-04-08 08:58:45 +03007034 req->user_data = READ_ONCE(sqe->user_data);
Pavel Begunkov0553b8b2020-04-08 08:58:45 +03007035 req->file = NULL;
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007036 req->fixed_rsrc_refs = NULL;
Pavel Begunkov4dd28242020-06-15 10:33:13 +03007037 req->task = current;
Pavel Begunkovef4ff582020-04-12 02:05:05 +03007038
Pavel Begunkov5be9ad12021-02-12 18:41:17 +00007039 /* enforce forwards compatibility on users */
Pavel Begunkovdddca222021-04-27 16:13:52 +01007040 if (unlikely(sqe_flags & ~SQE_VALID_FLAGS))
Pavel Begunkov5be9ad12021-02-12 18:41:17 +00007041 return -EINVAL;
Pavel Begunkovef4ff582020-04-12 02:05:05 +03007042 if (unlikely(req->opcode >= IORING_OP_LAST))
7043 return -EINVAL;
Pavel Begunkov4cfb25b2021-06-26 21:40:47 +01007044 if (!io_check_restriction(ctx, req, sqe_flags))
Stefano Garzarella21b55db2020-08-27 16:58:30 +02007045 return -EACCES;
7046
Pavel Begunkovef4ff582020-04-12 02:05:05 +03007047 if ((sqe_flags & IOSQE_BUFFER_SELECT) &&
7048 !io_op_defs[req->opcode].buffer_select)
7049 return -EOPNOTSUPP;
Pavel Begunkov3c199662021-06-15 16:47:57 +01007050 if (unlikely(sqe_flags & IOSQE_IO_DRAIN))
7051 ctx->drain_active = true;
Pavel Begunkovef4ff582020-04-12 02:05:05 +03007052
Jens Axboe003e8dc2021-03-06 09:22:27 -07007053 personality = READ_ONCE(sqe->personality);
7054 if (personality) {
Pavel Begunkovc10d1f92021-06-17 18:14:01 +01007055 req->creds = xa_load(&ctx->personalities, personality);
7056 if (!req->creds)
Jens Axboe003e8dc2021-03-06 09:22:27 -07007057 return -EINVAL;
Pavel Begunkovc10d1f92021-06-17 18:14:01 +01007058 get_cred(req->creds);
Pavel Begunkovb8e64b52021-06-17 18:14:02 +01007059 req->flags |= REQ_F_CREDS;
Jens Axboe003e8dc2021-03-06 09:22:27 -07007060 }
Pavel Begunkov258b29a2021-02-10 00:03:10 +00007061 state = &ctx->submit_state;
Pavel Begunkovef4ff582020-04-12 02:05:05 +03007062
Jens Axboe27926b62020-10-28 09:33:23 -06007063 /*
7064 * Plug now if we have more than 1 IO left after this, and the target
7065 * is potentially a read/write to block based storage.
7066 */
7067 if (!state->plug_started && state->ios_left > 1 &&
7068 io_op_defs[req->opcode].plug) {
7069 blk_start_plug(&state->plug);
7070 state->plug_started = true;
7071 }
Jens Axboe63ff8222020-05-07 14:56:15 -06007072
Pavel Begunkovbd5bbda2020-11-20 15:50:51 +00007073 if (io_op_defs[req->opcode].needs_file) {
Pavel Begunkov62906e82021-08-10 14:52:47 +01007074 req->file = io_file_get(ctx, req, READ_ONCE(sqe->fd),
Pavel Begunkovac177052021-08-09 13:04:02 +01007075 (sqe_flags & IOSQE_FIXED_FILE));
Pavel Begunkovba13e232021-02-01 18:59:52 +00007076 if (unlikely(!req->file))
Pavel Begunkovbd5bbda2020-11-20 15:50:51 +00007077 ret = -EBADF;
7078 }
7079
Pavel Begunkov71b547c2020-10-10 18:34:09 +01007080 state->ios_left--;
7081 return ret;
Pavel Begunkov0553b8b2020-04-08 08:58:45 +03007082}
7083
Pavel Begunkova6b8cadc2021-02-18 18:29:41 +00007084static int io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
Pavel Begunkova1ab7b32021-02-18 18:29:42 +00007085 const struct io_uring_sqe *sqe)
Pavel Begunkov282cdc82021-08-09 13:04:10 +01007086 __must_hold(&ctx->uring_lock)
Jens Axboe6c271ce2019-01-10 11:22:30 -07007087{
Pavel Begunkova1ab7b32021-02-18 18:29:42 +00007088 struct io_submit_link *link = &ctx->submit_state.link;
Jens Axboe6c271ce2019-01-10 11:22:30 -07007089 int ret;
7090
Pavel Begunkova6b8cadc2021-02-18 18:29:41 +00007091 ret = io_init_req(ctx, req, sqe);
7092 if (unlikely(ret)) {
7093fail_req:
Hao Xua8295b92021-08-27 17:46:09 +08007094 /* fail even hard links since we don't submit */
Pavel Begunkovde59bc12021-02-18 18:29:47 +00007095 if (link->head) {
Hao Xua8295b92021-08-27 17:46:09 +08007096 /*
7097 * we can judge a link req is failed or cancelled by if
7098 * REQ_F_FAIL is set, but the head is an exception since
7099 * it may be set REQ_F_FAIL because of other req's failure
7100 * so let's leverage req->result to distinguish if a head
7101 * is set REQ_F_FAIL because of its failure or other req's
7102 * failure so that we can set the correct ret code for it.
7103 * init result here to avoid affecting the normal path.
7104 */
7105 if (!(link->head->flags & REQ_F_FAIL))
7106 req_fail_link_node(link->head, -ECANCELED);
7107 } else if (!(req->flags & (REQ_F_LINK | REQ_F_HARDLINK))) {
7108 /*
7109 * the current req is a normal req, we should return
7110 * error and thus break the submittion loop.
7111 */
7112 io_req_complete_failed(req, ret);
7113 return ret;
Pavel Begunkovde59bc12021-02-18 18:29:47 +00007114 }
Hao Xua8295b92021-08-27 17:46:09 +08007115 req_fail_link_node(req, ret);
7116 } else {
7117 ret = io_req_prep(req, sqe);
7118 if (unlikely(ret))
7119 goto fail_req;
Pavel Begunkova6b8cadc2021-02-18 18:29:41 +00007120 }
Pavel Begunkov441b8a72021-06-14 23:37:31 +01007121
Pavel Begunkovbe7053b2021-02-18 18:29:45 +00007122 /* don't need @sqe from now on */
Olivier Langlois236daeae2021-05-31 02:36:37 -04007123 trace_io_uring_submit_sqe(ctx, req, req->opcode, req->user_data,
7124 req->flags, true,
7125 ctx->flags & IORING_SETUP_SQPOLL);
Pavel Begunkova6b8cadc2021-02-18 18:29:41 +00007126
Jens Axboe6c271ce2019-01-10 11:22:30 -07007127 /*
7128 * If we already have a head request, queue this one for async
7129 * submittal once the head completes. If we don't have a head but
7130 * IOSQE_IO_LINK is set in the sqe, start a new head. This one will be
7131 * submitted sync once the chain is complete. If none of those
7132 * conditions are true (normal request), then just queue it.
7133 */
7134 if (link->head) {
7135 struct io_kiocb *head = link->head;
7136
Hao Xua8295b92021-08-27 17:46:09 +08007137 if (!(req->flags & REQ_F_FAIL)) {
7138 ret = io_req_prep_async(req);
7139 if (unlikely(ret)) {
7140 req_fail_link_node(req, ret);
7141 if (!(head->flags & REQ_F_FAIL))
7142 req_fail_link_node(head, -ECANCELED);
7143 }
7144 }
Jens Axboe6c271ce2019-01-10 11:22:30 -07007145 trace_io_uring_link(ctx, req, head);
7146 link->last->link = req;
7147 link->last = req;
7148
7149 /* last request of a link, enqueue the link */
7150 if (!(req->flags & (REQ_F_LINK | REQ_F_HARDLINK))) {
7151 link->head = NULL;
Pavel Begunkov5e159202021-06-14 23:37:26 +01007152 io_queue_sqe(head);
Jens Axboe2b188cc2019-01-07 10:46:33 -07007153 }
Jackie Liu4fe2c962019-09-09 20:50:40 +08007154 } else {
Jens Axboe2b188cc2019-01-07 10:46:33 -07007155 if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) {
Jackie Liu4fe2c962019-09-09 20:50:40 +08007156 link->head = req;
7157 link->last = req;
7158 } else {
Pavel Begunkovbe7053b2021-02-18 18:29:45 +00007159 io_queue_sqe(req);
Jackie Liu4fe2c962019-09-09 20:50:40 +08007160 }
7161 }
7162
7163 return 0;
7164}
7165
7166/*
7167 * Batched submission is done, ensure local IO is flushed out.
7168 */
7169static void io_submit_state_end(struct io_submit_state *state,
7170 struct io_ring_ctx *ctx)
Pavel Begunkov1b4a51b2019-11-21 11:54:28 +03007171{
Pavel Begunkova1ab7b32021-02-18 18:29:42 +00007172 if (state->link.head)
Pavel Begunkovde59bc12021-02-18 18:29:47 +00007173 io_queue_sqe(state->link.head);
Pavel Begunkovcd0ca2e2021-08-09 20:18:11 +01007174 if (state->compl_nr)
Pavel Begunkov2a2758f2021-06-17 18:14:00 +01007175 io_submit_flush_completions(ctx);
Jackie Liua197f662019-11-08 08:09:12 -07007176 if (state->plug_started)
Pavel Begunkov32fe5252019-12-17 22:26:58 +03007177 blk_finish_plug(&state->plug);
Jens Axboe9e645e112019-05-10 16:07:28 -06007178}
Pavel Begunkov32fe5252019-12-17 22:26:58 +03007179
Jens Axboe9e645e112019-05-10 16:07:28 -06007180/*
7181 * Start submission side cache.
Pavel Begunkov32fe5252019-12-17 22:26:58 +03007182 */
Jens Axboe9e645e112019-05-10 16:07:28 -06007183static void io_submit_state_start(struct io_submit_state *state,
Pavel Begunkov196be952019-11-07 01:41:06 +03007184 unsigned int max_ios)
Jens Axboe9e645e112019-05-10 16:07:28 -06007185{
7186 state->plug_started = false;
Jens Axboebcda7ba2020-02-23 16:42:51 -07007187 state->ios_left = max_ios;
Pavel Begunkova1ab7b32021-02-18 18:29:42 +00007188 /* set only head, no need to init link_last in advance */
7189 state->link.head = NULL;
Jens Axboe75c6a032020-01-28 10:15:23 -07007190}
7191
Jens Axboe193155c2020-02-22 23:22:19 -07007192static void io_commit_sqring(struct io_ring_ctx *ctx)
7193{
Jens Axboe75c6a032020-01-28 10:15:23 -07007194 struct io_rings *rings = ctx->rings;
7195
7196 /*
Jens Axboe193155c2020-02-22 23:22:19 -07007197 * Ensure any loads from the SQEs are done at this point,
Jens Axboe75c6a032020-01-28 10:15:23 -07007198 * since once we write the new head, the application could
7199 * write new data to them.
Pavel Begunkov6b47ee62020-01-18 20:22:41 +03007200 */
Pavel Begunkov8da11c12020-02-24 11:32:44 +03007201 smp_store_release(&rings->sq.head, ctx->cached_sq_head);
Jens Axboebcda7ba2020-02-23 16:42:51 -07007202}
7203
Jens Axboe9e645e112019-05-10 16:07:28 -06007204/*
Fam Zhengdd9ae8a2021-06-04 17:42:56 +01007205 * Fetch an sqe, if one is available. Note this returns a pointer to memory
Jens Axboe9e645e112019-05-10 16:07:28 -06007206 * that is mapped by userspace. This means that care needs to be taken to
7207 * ensure that reads are stable, as we cannot rely on userspace always
Jens Axboe78e19bb2019-11-06 15:21:34 -07007208 * being a good citizen. If members of the sqe are validated and then later
7209 * used, it's important that those reads are done through READ_ONCE() to
Pavel Begunkov2e6e1fd2019-12-05 16:15:45 +03007210 * prevent a re-load down the line.
Jens Axboe9e645e112019-05-10 16:07:28 -06007211 */
7212static const struct io_uring_sqe *io_get_sqe(struct io_ring_ctx *ctx)
Jens Axboe9e645e112019-05-10 16:07:28 -06007213{
Pavel Begunkovea5ab3b2021-05-16 22:58:09 +01007214 unsigned head, mask = ctx->sq_entries - 1;
Pavel Begunkov17d3aeb2021-06-14 23:37:23 +01007215 unsigned sq_idx = ctx->cached_sq_head++ & mask;
Jens Axboe9e645e112019-05-10 16:07:28 -06007216
7217 /*
7218 * The cached sq head (or cq tail) serves two purposes:
7219 *
7220 * 1) allows us to batch the cost of updating the user visible
Pavel Begunkov9d763772019-12-17 02:22:07 +03007221 * head updates.
Jens Axboe9e645e112019-05-10 16:07:28 -06007222 * 2) allows the kernel side to track the head on its own, even
Pavel Begunkov8cdf2192020-01-25 00:40:24 +03007223 * though the application is the one updating it.
7224 */
Pavel Begunkov17d3aeb2021-06-14 23:37:23 +01007225 head = READ_ONCE(ctx->sq_array[sq_idx]);
Pavel Begunkov8cdf2192020-01-25 00:40:24 +03007226 if (likely(head < ctx->sq_entries))
7227 return &ctx->sq_sqes[head];
7228
7229 /* drop invalid entries */
Pavel Begunkov15641e42021-06-14 23:37:24 +01007230 ctx->cq_extra--;
7231 WRITE_ONCE(ctx->rings->sq_dropped,
7232 READ_ONCE(ctx->rings->sq_dropped) + 1);
Pavel Begunkov711be032020-01-17 03:57:59 +03007233 return NULL;
7234}
Jens Axboeb7bb4f72019-12-15 22:13:43 -07007235
Jens Axboe0f212202020-09-13 13:09:39 -06007236static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr)
Pavel Begunkov282cdc82021-08-09 13:04:10 +01007237 __must_hold(&ctx->uring_lock)
Jens Axboe6c271ce2019-01-10 11:22:30 -07007238{
Pavel Begunkov46c4e162021-02-18 18:29:37 +00007239 int submitted = 0;
Jens Axboe6c271ce2019-01-10 11:22:30 -07007240
Pavel Begunkovee7d46d2019-12-30 21:24:45 +03007241 /* make sure SQ entry isn't read before tail */
7242 nr = min3(nr, ctx->sq_entries, io_sqring_entries(ctx));
Pavel Begunkov2b85edf2019-12-28 14:13:03 +03007243 if (!percpu_ref_tryget_many(&ctx->refs, nr))
7244 return -EAGAIN;
Pavel Begunkov9a108672021-08-27 11:55:01 +01007245 io_get_task_refs(nr);
Jens Axboe6c271ce2019-01-10 11:22:30 -07007246
Pavel Begunkovba88ff12021-02-10 00:03:11 +00007247 io_submit_state_start(&ctx->submit_state, nr);
Pavel Begunkov46c4e162021-02-18 18:29:37 +00007248 while (submitted < nr) {
Jens Axboe3529d8c2019-12-19 18:24:38 -07007249 const struct io_uring_sqe *sqe;
Pavel Begunkov196be952019-11-07 01:41:06 +03007250 struct io_kiocb *req;
Pavel Begunkovfb5ccc92019-10-25 12:31:30 +03007251
Pavel Begunkov258b29a2021-02-10 00:03:10 +00007252 req = io_alloc_req(ctx);
Pavel Begunkov196be952019-11-07 01:41:06 +03007253 if (unlikely(!req)) {
7254 if (!submitted)
7255 submitted = -EAGAIN;
Pavel Begunkovfb5ccc92019-10-25 12:31:30 +03007256 break;
Jens Axboe9e645e112019-05-10 16:07:28 -06007257 }
Pavel Begunkov4fccfcb2021-02-12 11:55:17 +00007258 sqe = io_get_sqe(ctx);
7259 if (unlikely(!sqe)) {
Hao Xu0c6e1d72021-08-26 01:58:56 +08007260 list_add(&req->inflight_entry, &ctx->submit_state.free_list);
Pavel Begunkov4fccfcb2021-02-12 11:55:17 +00007261 break;
7262 }
Jens Axboed3656342019-12-18 09:50:26 -07007263 /* will complete beyond this point, count as submitted */
7264 submitted++;
Pavel Begunkova1ab7b32021-02-18 18:29:42 +00007265 if (io_submit_sqe(ctx, req, sqe))
Jens Axboed3656342019-12-18 09:50:26 -07007266 break;
Jens Axboe6c271ce2019-01-10 11:22:30 -07007267 }
7268
Pavel Begunkov9466f432020-01-25 22:34:01 +03007269 if (unlikely(submitted != nr)) {
7270 int ref_used = (submitted == -EAGAIN) ? 0 : submitted;
Jens Axboed8a6df12020-10-15 16:24:45 -06007271 int unused = nr - ref_used;
Pavel Begunkov9466f432020-01-25 22:34:01 +03007272
Pavel Begunkov09899b12021-06-14 02:36:22 +01007273 current->io_uring->cached_refs += unused;
Jens Axboed8a6df12020-10-15 16:24:45 -06007274 percpu_ref_put_many(&ctx->refs, unused);
Pavel Begunkov9466f432020-01-25 22:34:01 +03007275 }
Jens Axboe6c271ce2019-01-10 11:22:30 -07007276
Pavel Begunkova1ab7b32021-02-18 18:29:42 +00007277 io_submit_state_end(&ctx->submit_state, ctx);
Pavel Begunkovae9428c2019-11-06 00:22:14 +03007278 /* Commit SQ ring head once we've consumed and submitted all SQEs */
7279 io_commit_sqring(ctx);
7280
Jens Axboe6c271ce2019-01-10 11:22:30 -07007281 return submitted;
7282}
7283
Pavel Begunkove4b6d902021-05-16 22:58:00 +01007284static inline bool io_sqd_events_pending(struct io_sq_data *sqd)
7285{
7286 return READ_ONCE(sqd->state);
7287}
7288
Xiaoguang Wang23b36282020-07-23 20:57:24 +08007289static inline void io_ring_set_wakeup_flag(struct io_ring_ctx *ctx)
7290{
7291 /* Tell userspace we may need a wakeup call */
Jens Axboe79ebeae2021-08-10 15:18:27 -06007292 spin_lock(&ctx->completion_lock);
Nadav Amit20c0b382021-08-07 17:13:42 -07007293 WRITE_ONCE(ctx->rings->sq_flags,
7294 ctx->rings->sq_flags | IORING_SQ_NEED_WAKEUP);
Jens Axboe79ebeae2021-08-10 15:18:27 -06007295 spin_unlock(&ctx->completion_lock);
Xiaoguang Wang23b36282020-07-23 20:57:24 +08007296}
7297
7298static inline void io_ring_clear_wakeup_flag(struct io_ring_ctx *ctx)
7299{
Jens Axboe79ebeae2021-08-10 15:18:27 -06007300 spin_lock(&ctx->completion_lock);
Nadav Amit20c0b382021-08-07 17:13:42 -07007301 WRITE_ONCE(ctx->rings->sq_flags,
7302 ctx->rings->sq_flags & ~IORING_SQ_NEED_WAKEUP);
Jens Axboe79ebeae2021-08-10 15:18:27 -06007303 spin_unlock(&ctx->completion_lock);
Xiaoguang Wang23b36282020-07-23 20:57:24 +08007304}
7305
Xiaoguang Wang08369242020-11-03 14:15:59 +08007306static int __io_sq_thread(struct io_ring_ctx *ctx, bool cap_entries)
Jens Axboe6c271ce2019-01-10 11:22:30 -07007307{
Jens Axboec8d1ba52020-09-14 11:07:26 -06007308 unsigned int to_submit;
Xiaoguang Wangbdcd3ea2020-02-25 22:12:08 +08007309 int ret = 0;
Jens Axboe6c271ce2019-01-10 11:22:30 -07007310
Jens Axboec8d1ba52020-09-14 11:07:26 -06007311 to_submit = io_sqring_entries(ctx);
Jens Axboee95eee22020-09-08 09:11:32 -06007312 /* if we're handling multiple rings, cap submit size for fairness */
Olivier Langlois4ce8ad92021-06-23 11:50:18 -07007313 if (cap_entries && to_submit > IORING_SQPOLL_CAP_ENTRIES_VALUE)
7314 to_submit = IORING_SQPOLL_CAP_ENTRIES_VALUE;
Jens Axboee95eee22020-09-08 09:11:32 -06007315
Xiaoguang Wang906a3c62020-11-12 14:56:00 +08007316 if (!list_empty(&ctx->iopoll_list) || to_submit) {
7317 unsigned nr_events = 0;
Pavel Begunkov948e1942021-06-24 15:09:55 +01007318 const struct cred *creds = NULL;
7319
7320 if (ctx->sq_creds != current_cred())
7321 creds = override_creds(ctx->sq_creds);
Xiaoguang Wang906a3c62020-11-12 14:56:00 +08007322
Xiaoguang Wang08369242020-11-03 14:15:59 +08007323 mutex_lock(&ctx->uring_lock);
Xiaoguang Wang906a3c62020-11-12 14:56:00 +08007324 if (!list_empty(&ctx->iopoll_list))
Pavel Begunkova8576af2021-08-15 10:40:21 +01007325 io_do_iopoll(ctx, &nr_events, 0);
Xiaoguang Wang906a3c62020-11-12 14:56:00 +08007326
Pavel Begunkov3b763ba2021-04-18 14:52:08 +01007327 /*
7328 * Don't submit if refs are dying, good for io_uring_register(),
7329 * but also it is relied upon by io_ring_exit_work()
7330 */
Pavel Begunkov0298ef92021-03-08 13:20:57 +00007331 if (to_submit && likely(!percpu_ref_is_dying(&ctx->refs)) &&
7332 !(ctx->flags & IORING_SETUP_R_DISABLED))
Xiaoguang Wang08369242020-11-03 14:15:59 +08007333 ret = io_submit_sqes(ctx, to_submit);
7334 mutex_unlock(&ctx->uring_lock);
Jens Axboe90554202020-09-03 12:12:41 -06007335
Pavel Begunkovacfb3812021-05-16 22:58:03 +01007336 if (to_submit && wq_has_sleeper(&ctx->sqo_sq_wait))
7337 wake_up(&ctx->sqo_sq_wait);
Pavel Begunkov948e1942021-06-24 15:09:55 +01007338 if (creds)
7339 revert_creds(creds);
Pavel Begunkovacfb3812021-05-16 22:58:03 +01007340 }
Jens Axboe90554202020-09-03 12:12:41 -06007341
Xiaoguang Wang08369242020-11-03 14:15:59 +08007342 return ret;
7343}
7344
7345static void io_sqd_update_thread_idle(struct io_sq_data *sqd)
7346{
7347 struct io_ring_ctx *ctx;
7348 unsigned sq_thread_idle = 0;
7349
Pavel Begunkovc9dca272021-03-10 13:13:55 +00007350 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
7351 sq_thread_idle = max(sq_thread_idle, ctx->sq_thread_idle);
Xiaoguang Wang08369242020-11-03 14:15:59 +08007352 sqd->sq_thread_idle = sq_thread_idle;
Jens Axboec8d1ba52020-09-14 11:07:26 -06007353}
7354
Pavel Begunkove4b6d902021-05-16 22:58:00 +01007355static bool io_sqd_handle_event(struct io_sq_data *sqd)
7356{
7357 bool did_sig = false;
7358 struct ksignal ksig;
7359
7360 if (test_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state) ||
7361 signal_pending(current)) {
7362 mutex_unlock(&sqd->lock);
7363 if (signal_pending(current))
7364 did_sig = get_signal(&ksig);
7365 cond_resched();
7366 mutex_lock(&sqd->lock);
7367 }
Pavel Begunkove4b6d902021-05-16 22:58:00 +01007368 return did_sig || test_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state);
7369}
7370
Jens Axboe6c271ce2019-01-10 11:22:30 -07007371static int io_sq_thread(void *data)
7372{
Jens Axboe69fb2132020-09-14 11:16:23 -06007373 struct io_sq_data *sqd = data;
7374 struct io_ring_ctx *ctx;
Xiaoguang Wanga0d92052020-11-12 14:55:59 +08007375 unsigned long timeout = 0;
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007376 char buf[TASK_COMM_LEN];
Xiaoguang Wang08369242020-11-03 14:15:59 +08007377 DEFINE_WAIT(wait);
Jens Axboe6c271ce2019-01-10 11:22:30 -07007378
Pavel Begunkov696ee882021-04-01 09:55:04 +01007379 snprintf(buf, sizeof(buf), "iou-sqp-%d", sqd->task_pid);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007380 set_task_comm(current, buf);
Jens Axboe28cea78a2020-09-14 10:51:17 -06007381
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007382 if (sqd->sq_cpu != -1)
7383 set_cpus_allowed_ptr(current, cpumask_of(sqd->sq_cpu));
7384 else
7385 set_cpus_allowed_ptr(current, cpu_online_mask);
7386 current->flags |= PF_NO_SETAFFINITY;
7387
Pavel Begunkov09a6f4e2021-03-14 20:57:10 +00007388 mutex_lock(&sqd->lock);
Pavel Begunkove4b6d902021-05-16 22:58:00 +01007389 while (1) {
Pavel Begunkov1a924a82021-06-24 15:09:56 +01007390 bool cap_entries, sqt_spin = false;
Jens Axboec1edbf52019-11-10 16:56:04 -07007391
Pavel Begunkove4b6d902021-05-16 22:58:00 +01007392 if (io_sqd_events_pending(sqd) || signal_pending(current)) {
7393 if (io_sqd_handle_event(sqd))
Pavel Begunkovc7d95612021-04-13 11:43:00 +01007394 break;
Xiaoguang Wang08369242020-11-03 14:15:59 +08007395 timeout = jiffies + sqd->sq_thread_idle;
7396 }
Pavel Begunkove4b6d902021-05-16 22:58:00 +01007397
Jens Axboee95eee22020-09-08 09:11:32 -06007398 cap_entries = !list_is_singular(&sqd->ctx_list);
Jens Axboe69fb2132020-09-14 11:16:23 -06007399 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) {
Pavel Begunkov948e1942021-06-24 15:09:55 +01007400 int ret = __io_sq_thread(ctx, cap_entries);
Stefan Metzmacher7c30f36a2021-03-07 11:54:28 +01007401
Xiaoguang Wang08369242020-11-03 14:15:59 +08007402 if (!sqt_spin && (ret > 0 || !list_empty(&ctx->iopoll_list)))
7403 sqt_spin = true;
Jens Axboe6c271ce2019-01-10 11:22:30 -07007404 }
Pavel Begunkovdd432ea52021-06-26 21:40:45 +01007405 if (io_run_task_work())
7406 sqt_spin = true;
Jens Axboe6c271ce2019-01-10 11:22:30 -07007407
Xiaoguang Wang08369242020-11-03 14:15:59 +08007408 if (sqt_spin || !time_after(jiffies, timeout)) {
Jens Axboec8d1ba52020-09-14 11:07:26 -06007409 cond_resched();
Xiaoguang Wang08369242020-11-03 14:15:59 +08007410 if (sqt_spin)
7411 timeout = jiffies + sqd->sq_thread_idle;
7412 continue;
7413 }
7414
Xiaoguang Wang08369242020-11-03 14:15:59 +08007415 prepare_to_wait(&sqd->wait, &wait, TASK_INTERRUPTIBLE);
Pavel Begunkovdd432ea52021-06-26 21:40:45 +01007416 if (!io_sqd_events_pending(sqd) && !current->task_works) {
Pavel Begunkov1a924a82021-06-24 15:09:56 +01007417 bool needs_sched = true;
7418
Hao Xu724cb4f2021-04-21 23:19:11 +08007419 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) {
Pavel Begunkovaaa9f0f2021-05-16 22:58:01 +01007420 io_ring_set_wakeup_flag(ctx);
7421
Hao Xu724cb4f2021-04-21 23:19:11 +08007422 if ((ctx->flags & IORING_SETUP_IOPOLL) &&
7423 !list_empty_careful(&ctx->iopoll_list)) {
7424 needs_sched = false;
7425 break;
7426 }
7427 if (io_sqring_entries(ctx)) {
7428 needs_sched = false;
7429 break;
7430 }
7431 }
7432
7433 if (needs_sched) {
7434 mutex_unlock(&sqd->lock);
7435 schedule();
7436 mutex_lock(&sqd->lock);
7437 }
Jens Axboe69fb2132020-09-14 11:16:23 -06007438 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
7439 io_ring_clear_wakeup_flag(ctx);
Jens Axboe6c271ce2019-01-10 11:22:30 -07007440 }
Xiaoguang Wang08369242020-11-03 14:15:59 +08007441
7442 finish_wait(&sqd->wait, &wait);
7443 timeout = jiffies + sqd->sq_thread_idle;
Jens Axboe6c271ce2019-01-10 11:22:30 -07007444 }
7445
Pavel Begunkov78cc6872021-06-14 02:36:23 +01007446 io_uring_cancel_generic(true, sqd);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007447 sqd->thread = NULL;
Jens Axboe05962f92021-03-06 13:58:48 -07007448 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
Jens Axboe5f3f26f2021-02-25 10:17:46 -07007449 io_ring_set_wakeup_flag(ctx);
Pavel Begunkov521d6a72021-03-11 23:29:38 +00007450 io_run_task_work();
Pavel Begunkov734551d2021-04-18 14:52:09 +01007451 mutex_unlock(&sqd->lock);
7452
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007453 complete(&sqd->exited);
7454 do_exit(0);
Jens Axboe6c271ce2019-01-10 11:22:30 -07007455}
7456
Jens Axboebda52162019-09-24 13:47:15 -06007457struct io_wait_queue {
7458 struct wait_queue_entry wq;
7459 struct io_ring_ctx *ctx;
Jens Axboe5fd46172021-08-06 14:04:31 -06007460 unsigned cq_tail;
Jens Axboebda52162019-09-24 13:47:15 -06007461 unsigned nr_timeouts;
7462};
7463
Pavel Begunkov6c503152021-01-04 20:36:36 +00007464static inline bool io_should_wake(struct io_wait_queue *iowq)
Jens Axboebda52162019-09-24 13:47:15 -06007465{
7466 struct io_ring_ctx *ctx = iowq->ctx;
Jens Axboe5fd46172021-08-06 14:04:31 -06007467 int dist = ctx->cached_cq_tail - (int) iowq->cq_tail;
Jens Axboebda52162019-09-24 13:47:15 -06007468
7469 /*
Brian Gianforcarod195a662019-12-13 03:09:50 -08007470 * Wake up if we have enough events, or if a timeout occurred since we
Jens Axboebda52162019-09-24 13:47:15 -06007471 * started waiting. For timeouts, we always want to return to userspace,
7472 * regardless of event count.
7473 */
Jens Axboe5fd46172021-08-06 14:04:31 -06007474 return dist >= 0 || atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts;
Jens Axboebda52162019-09-24 13:47:15 -06007475}
7476
7477static int io_wake_function(struct wait_queue_entry *curr, unsigned int mode,
7478 int wake_flags, void *key)
7479{
7480 struct io_wait_queue *iowq = container_of(curr, struct io_wait_queue,
7481 wq);
7482
Pavel Begunkov6c503152021-01-04 20:36:36 +00007483 /*
7484 * Cannot safely flush overflowed CQEs from here, ensure we wake up
7485 * the task, and the next invocation will do it.
7486 */
Pavel Begunkov5ed7a372021-06-14 23:37:27 +01007487 if (io_should_wake(iowq) || test_bit(0, &iowq->ctx->check_cq_overflow))
Pavel Begunkov6c503152021-01-04 20:36:36 +00007488 return autoremove_wake_function(curr, mode, wake_flags, key);
7489 return -1;
Jens Axboebda52162019-09-24 13:47:15 -06007490}
7491
Jens Axboeaf9c1a42020-09-24 13:32:18 -06007492static int io_run_task_work_sig(void)
7493{
7494 if (io_run_task_work())
7495 return 1;
7496 if (!signal_pending(current))
7497 return 0;
Jens Axboe0b8cfa92021-03-21 14:16:08 -06007498 if (test_thread_flag(TIF_NOTIFY_SIGNAL))
Jens Axboe792ee0f62020-10-22 20:17:18 -06007499 return -ERESTARTSYS;
Jens Axboeaf9c1a42020-09-24 13:32:18 -06007500 return -EINTR;
7501}
7502
Pavel Begunkoveeb60b92021-02-04 13:51:58 +00007503/* when returns >0, the caller should retry */
7504static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
7505 struct io_wait_queue *iowq,
Jens Axboe7c834372022-02-21 05:49:30 -07007506 ktime_t timeout)
Pavel Begunkoveeb60b92021-02-04 13:51:58 +00007507{
7508 int ret;
7509
7510 /* make sure we run task_work before checking for signals */
7511 ret = io_run_task_work_sig();
7512 if (ret || io_should_wake(iowq))
7513 return ret;
7514 /* let the caller flush overflows, retry */
Pavel Begunkov5ed7a372021-06-14 23:37:27 +01007515 if (test_bit(0, &ctx->check_cq_overflow))
Pavel Begunkoveeb60b92021-02-04 13:51:58 +00007516 return 1;
7517
Jens Axboe7c834372022-02-21 05:49:30 -07007518 if (!schedule_hrtimeout(&timeout, HRTIMER_MODE_ABS))
7519 return -ETIME;
7520 return 1;
Pavel Begunkoveeb60b92021-02-04 13:51:58 +00007521}
7522
Jens Axboe2b188cc2019-01-07 10:46:33 -07007523/*
7524 * Wait until events become available, if we don't already have some. The
7525 * application must reap them itself, as they reside on the shared cq ring.
7526 */
7527static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
Hao Xuc73ebb62020-11-03 10:54:37 +08007528 const sigset_t __user *sig, size_t sigsz,
7529 struct __kernel_timespec __user *uts)
Jens Axboe2b188cc2019-01-07 10:46:33 -07007530{
Pavel Begunkov902910992021-08-09 09:07:32 -06007531 struct io_wait_queue iowq;
Hristo Venev75b28af2019-08-26 17:23:46 +00007532 struct io_rings *rings = ctx->rings;
Jens Axboe7c834372022-02-21 05:49:30 -07007533 ktime_t timeout = KTIME_MAX;
Pavel Begunkovc1d5a222021-02-04 13:51:57 +00007534 int ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07007535
Jens Axboeb41e9852020-02-17 09:52:41 -07007536 do {
Pavel Begunkov90f67362021-08-09 20:18:12 +01007537 io_cqring_overflow_flush(ctx);
Pavel Begunkov6c503152021-01-04 20:36:36 +00007538 if (io_cqring_events(ctx) >= min_events)
Jens Axboeb41e9852020-02-17 09:52:41 -07007539 return 0;
Jens Axboe4c6e2772020-07-01 11:29:10 -06007540 if (!io_run_task_work())
Jens Axboeb41e9852020-02-17 09:52:41 -07007541 break;
Jens Axboeb41e9852020-02-17 09:52:41 -07007542 } while (1);
Jens Axboe2b188cc2019-01-07 10:46:33 -07007543
Xiaoguang Wang44df58d2021-09-14 22:38:52 +08007544 if (uts) {
7545 struct timespec64 ts;
7546
7547 if (get_timespec64(&ts, uts))
7548 return -EFAULT;
Jens Axboe7c834372022-02-21 05:49:30 -07007549 timeout = ktime_add_ns(timespec64_to_ktime(ts), ktime_get_ns());
Xiaoguang Wang44df58d2021-09-14 22:38:52 +08007550 }
7551
Jens Axboe2b188cc2019-01-07 10:46:33 -07007552 if (sig) {
Arnd Bergmann9e75ad52019-03-25 15:34:53 +01007553#ifdef CONFIG_COMPAT
7554 if (in_compat_syscall())
7555 ret = set_compat_user_sigmask((const compat_sigset_t __user *)sig,
Oleg Nesterovb7724342019-07-16 16:29:53 -07007556 sigsz);
Arnd Bergmann9e75ad52019-03-25 15:34:53 +01007557 else
7558#endif
Oleg Nesterovb7724342019-07-16 16:29:53 -07007559 ret = set_user_sigmask(sig, sigsz);
Arnd Bergmann9e75ad52019-03-25 15:34:53 +01007560
Jens Axboe2b188cc2019-01-07 10:46:33 -07007561 if (ret)
7562 return ret;
7563 }
7564
Pavel Begunkov902910992021-08-09 09:07:32 -06007565 init_waitqueue_func_entry(&iowq.wq, io_wake_function);
7566 iowq.wq.private = current;
7567 INIT_LIST_HEAD(&iowq.wq.entry);
7568 iowq.ctx = ctx;
Jens Axboebda52162019-09-24 13:47:15 -06007569 iowq.nr_timeouts = atomic_read(&ctx->cq_timeouts);
Jens Axboe5fd46172021-08-06 14:04:31 -06007570 iowq.cq_tail = READ_ONCE(ctx->rings->cq.head) + min_events;
Pavel Begunkov902910992021-08-09 09:07:32 -06007571
Dmitrii Dolgovc826bd72019-10-15 19:02:01 +02007572 trace_io_uring_cqring_wait(ctx, min_events);
Jens Axboebda52162019-09-24 13:47:15 -06007573 do {
Jens Axboeca0a2652021-03-04 17:15:48 -07007574 /* if we can't even flush overflow, don't wait for more */
Pavel Begunkov90f67362021-08-09 20:18:12 +01007575 if (!io_cqring_overflow_flush(ctx)) {
Jens Axboeca0a2652021-03-04 17:15:48 -07007576 ret = -EBUSY;
7577 break;
7578 }
Pavel Begunkov311997b2021-06-14 23:37:28 +01007579 prepare_to_wait_exclusive(&ctx->cq_wait, &iowq.wq,
Jens Axboebda52162019-09-24 13:47:15 -06007580 TASK_INTERRUPTIBLE);
Jens Axboe7c834372022-02-21 05:49:30 -07007581 ret = io_cqring_wait_schedule(ctx, &iowq, timeout);
Pavel Begunkov311997b2021-06-14 23:37:28 +01007582 finish_wait(&ctx->cq_wait, &iowq.wq);
Jens Axboeca0a2652021-03-04 17:15:48 -07007583 cond_resched();
Pavel Begunkoveeb60b92021-02-04 13:51:58 +00007584 } while (ret > 0);
Jens Axboebda52162019-09-24 13:47:15 -06007585
Jens Axboeb7db41c2020-07-04 08:55:50 -06007586 restore_saved_sigmask_unless(ret == -EINTR);
Jens Axboe2b188cc2019-01-07 10:46:33 -07007587
Hristo Venev75b28af2019-08-26 17:23:46 +00007588 return READ_ONCE(rings->cq.head) == READ_ONCE(rings->cq.tail) ? ret : 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07007589}
7590
Pavel Begunkov9123c8f2021-06-14 02:36:20 +01007591static void io_free_page_table(void **table, size_t size)
Pavel Begunkov846a4ef2021-04-01 15:44:03 +01007592{
Pavel Begunkov9123c8f2021-06-14 02:36:20 +01007593 unsigned i, nr_tables = DIV_ROUND_UP(size, PAGE_SIZE);
Pavel Begunkov846a4ef2021-04-01 15:44:03 +01007594
7595 for (i = 0; i < nr_tables; i++)
Pavel Begunkov9123c8f2021-06-14 02:36:20 +01007596 kfree(table[i]);
7597 kfree(table);
7598}
7599
7600static void **io_alloc_page_table(size_t size)
7601{
7602 unsigned i, nr_tables = DIV_ROUND_UP(size, PAGE_SIZE);
7603 size_t init_size = size;
7604 void **table;
7605
Pavel Begunkov0bea96f2021-08-20 10:36:36 +01007606 table = kcalloc(nr_tables, sizeof(*table), GFP_KERNEL_ACCOUNT);
Pavel Begunkov9123c8f2021-06-14 02:36:20 +01007607 if (!table)
7608 return NULL;
7609
7610 for (i = 0; i < nr_tables; i++) {
Pavel Begunkov27f6b312021-06-15 13:20:13 +01007611 unsigned int this_size = min_t(size_t, size, PAGE_SIZE);
Pavel Begunkov9123c8f2021-06-14 02:36:20 +01007612
Pavel Begunkov0bea96f2021-08-20 10:36:36 +01007613 table[i] = kzalloc(this_size, GFP_KERNEL_ACCOUNT);
Pavel Begunkov9123c8f2021-06-14 02:36:20 +01007614 if (!table[i]) {
7615 io_free_page_table(table, init_size);
7616 return NULL;
7617 }
7618 size -= this_size;
7619 }
7620 return table;
Pavel Begunkov846a4ef2021-04-01 15:44:03 +01007621}
7622
Pavel Begunkov28a9fe22021-04-01 15:43:47 +01007623static void io_rsrc_node_destroy(struct io_rsrc_node *ref_node)
7624{
7625 percpu_ref_exit(&ref_node->refs);
7626 kfree(ref_node);
7627}
7628
Pavel Begunkovb9bd2be2021-08-09 09:09:47 -06007629static void io_rsrc_node_ref_zero(struct percpu_ref *ref)
7630{
7631 struct io_rsrc_node *node = container_of(ref, struct io_rsrc_node, refs);
7632 struct io_ring_ctx *ctx = node->rsrc_data->ctx;
7633 unsigned long flags;
7634 bool first_add = false;
Dylan Yudaken82cc3382022-01-21 04:38:56 -08007635 unsigned long delay = HZ;
Pavel Begunkovb9bd2be2021-08-09 09:09:47 -06007636
7637 spin_lock_irqsave(&ctx->rsrc_ref_lock, flags);
7638 node->done = true;
7639
Dylan Yudaken82cc3382022-01-21 04:38:56 -08007640 /* if we are mid-quiesce then do not delay */
7641 if (node->rsrc_data->quiesce)
7642 delay = 0;
7643
Pavel Begunkovb9bd2be2021-08-09 09:09:47 -06007644 while (!list_empty(&ctx->rsrc_ref_list)) {
7645 node = list_first_entry(&ctx->rsrc_ref_list,
7646 struct io_rsrc_node, node);
7647 /* recycle ref nodes in order */
7648 if (!node->done)
7649 break;
7650 list_del(&node->node);
7651 first_add |= llist_add(&node->llist, &ctx->rsrc_put_llist);
7652 }
7653 spin_unlock_irqrestore(&ctx->rsrc_ref_lock, flags);
7654
7655 if (first_add)
Dylan Yudaken82cc3382022-01-21 04:38:56 -08007656 mod_delayed_work(system_wq, &ctx->rsrc_put_work, delay);
Pavel Begunkovb9bd2be2021-08-09 09:09:47 -06007657}
7658
7659static struct io_rsrc_node *io_rsrc_node_alloc(struct io_ring_ctx *ctx)
7660{
7661 struct io_rsrc_node *ref_node;
7662
7663 ref_node = kzalloc(sizeof(*ref_node), GFP_KERNEL);
7664 if (!ref_node)
7665 return NULL;
7666
7667 if (percpu_ref_init(&ref_node->refs, io_rsrc_node_ref_zero,
7668 0, GFP_KERNEL)) {
7669 kfree(ref_node);
7670 return NULL;
7671 }
7672 INIT_LIST_HEAD(&ref_node->node);
7673 INIT_LIST_HEAD(&ref_node->rsrc_list);
7674 ref_node->done = false;
7675 return ref_node;
7676}
7677
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01007678static void io_rsrc_node_switch(struct io_ring_ctx *ctx,
7679 struct io_rsrc_data *data_to_kill)
Pavel Begunkov1642b442020-12-30 21:34:14 +00007680{
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01007681 WARN_ON_ONCE(!ctx->rsrc_backup_node);
7682 WARN_ON_ONCE(data_to_kill && !ctx->rsrc_node);
Pavel Begunkov82fbcfa2021-04-01 15:43:43 +01007683
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01007684 if (data_to_kill) {
7685 struct io_rsrc_node *rsrc_node = ctx->rsrc_node;
Pavel Begunkov82fbcfa2021-04-01 15:43:43 +01007686
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01007687 rsrc_node->rsrc_data = data_to_kill;
Jens Axboe4956b9e2021-08-09 07:49:41 -06007688 spin_lock_irq(&ctx->rsrc_ref_lock);
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01007689 list_add_tail(&rsrc_node->node, &ctx->rsrc_ref_list);
Jens Axboe4956b9e2021-08-09 07:49:41 -06007690 spin_unlock_irq(&ctx->rsrc_ref_lock);
Pavel Begunkov82fbcfa2021-04-01 15:43:43 +01007691
Pavel Begunkov3e942492021-04-11 01:46:34 +01007692 atomic_inc(&data_to_kill->refs);
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01007693 percpu_ref_kill(&rsrc_node->refs);
7694 ctx->rsrc_node = NULL;
7695 }
7696
7697 if (!ctx->rsrc_node) {
7698 ctx->rsrc_node = ctx->rsrc_backup_node;
7699 ctx->rsrc_backup_node = NULL;
7700 }
Pavel Begunkov1642b442020-12-30 21:34:14 +00007701}
7702
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01007703static int io_rsrc_node_switch_start(struct io_ring_ctx *ctx)
Pavel Begunkov8dd03af2021-03-19 17:22:36 +00007704{
7705 if (ctx->rsrc_backup_node)
7706 return 0;
Pavel Begunkovb895c9a2021-04-01 15:43:40 +01007707 ctx->rsrc_backup_node = io_rsrc_node_alloc(ctx);
Pavel Begunkov8dd03af2021-03-19 17:22:36 +00007708 return ctx->rsrc_backup_node ? 0 : -ENOMEM;
7709}
7710
Pavel Begunkov40ae0ff2021-04-01 15:43:44 +01007711static int io_rsrc_ref_quiesce(struct io_rsrc_data *data, struct io_ring_ctx *ctx)
Hao Xu8bad28d2021-02-19 17:19:36 +08007712{
7713 int ret;
Xiaoguang Wang05589552020-03-31 14:05:18 +08007714
Pavel Begunkov215c3902021-04-01 15:43:48 +01007715 /* As we may drop ->uring_lock, other task may have started quiesce */
Hao Xu8bad28d2021-02-19 17:19:36 +08007716 if (data->quiesce)
7717 return -ENXIO;
7718
7719 data->quiesce = true;
Pavel Begunkov1ffc5422020-12-30 21:34:15 +00007720 do {
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01007721 ret = io_rsrc_node_switch_start(ctx);
Pavel Begunkov8dd03af2021-03-19 17:22:36 +00007722 if (ret)
Pavel Begunkovf2303b12021-02-20 18:03:49 +00007723 break;
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01007724 io_rsrc_node_switch(ctx, data);
7725
Pavel Begunkov3e942492021-04-11 01:46:34 +01007726 /* kill initial ref, already quiesced if zero */
7727 if (atomic_dec_and_test(&data->refs))
7728 break;
Jens Axboec018db42021-08-09 08:15:50 -06007729 mutex_unlock(&ctx->uring_lock);
Hao Xu8bad28d2021-02-19 17:19:36 +08007730 flush_delayed_work(&ctx->rsrc_put_work);
Pavel Begunkov1ffc5422020-12-30 21:34:15 +00007731 ret = wait_for_completion_interruptible(&data->done);
Jens Axboec018db42021-08-09 08:15:50 -06007732 if (!ret) {
7733 mutex_lock(&ctx->uring_lock);
Dylan Yudaken0d773aa2022-02-22 08:17:51 -08007734 if (atomic_read(&data->refs) > 0) {
7735 /*
7736 * it has been revived by another thread while
7737 * we were unlocked
7738 */
7739 mutex_unlock(&ctx->uring_lock);
7740 } else {
7741 break;
7742 }
Jens Axboec018db42021-08-09 08:15:50 -06007743 }
Jens Axboe05f3fb32019-12-09 11:22:50 -07007744
Pavel Begunkov3e942492021-04-11 01:46:34 +01007745 atomic_inc(&data->refs);
7746 /* wait for all works potentially completing data->done */
7747 flush_delayed_work(&ctx->rsrc_put_work);
Jens Axboecb5e1b82021-02-25 07:37:35 -07007748 reinit_completion(&data->done);
Pavel Begunkov8dd03af2021-03-19 17:22:36 +00007749
Hao Xu8bad28d2021-02-19 17:19:36 +08007750 ret = io_run_task_work_sig();
7751 mutex_lock(&ctx->uring_lock);
Pavel Begunkovf2303b12021-02-20 18:03:49 +00007752 } while (ret >= 0);
Hao Xu8bad28d2021-02-19 17:19:36 +08007753 data->quiesce = false;
7754
Hao Xu8bad28d2021-02-19 17:19:36 +08007755 return ret;
Bijan Mottahedehd7954b22021-01-15 17:37:50 +00007756}
7757
Pavel Begunkov2d091d62021-06-14 02:36:21 +01007758static u64 *io_get_tag_slot(struct io_rsrc_data *data, unsigned int idx)
7759{
7760 unsigned int off = idx & IO_RSRC_TAG_TABLE_MASK;
7761 unsigned int table_idx = idx >> IO_RSRC_TAG_TABLE_SHIFT;
7762
7763 return &data->tags[table_idx][off];
7764}
7765
Pavel Begunkov44b31f22021-04-25 14:32:16 +01007766static void io_rsrc_data_free(struct io_rsrc_data *data)
7767{
Pavel Begunkov2d091d62021-06-14 02:36:21 +01007768 size_t size = data->nr * sizeof(data->tags[0][0]);
7769
7770 if (data->tags)
7771 io_free_page_table((void **)data->tags, size);
Pavel Begunkov44b31f22021-04-25 14:32:16 +01007772 kfree(data);
7773}
7774
Pavel Begunkovd878c812021-06-14 02:36:18 +01007775static int io_rsrc_data_alloc(struct io_ring_ctx *ctx, rsrc_put_fn *do_put,
7776 u64 __user *utags, unsigned nr,
7777 struct io_rsrc_data **pdata)
Bijan Mottahedeh1ad555c2021-01-15 17:37:51 +00007778{
Pavel Begunkovb895c9a2021-04-01 15:43:40 +01007779 struct io_rsrc_data *data;
Pavel Begunkov2d091d62021-06-14 02:36:21 +01007780 int ret = -ENOMEM;
Pavel Begunkovd878c812021-06-14 02:36:18 +01007781 unsigned i;
Bijan Mottahedeh1ad555c2021-01-15 17:37:51 +00007782
7783 data = kzalloc(sizeof(*data), GFP_KERNEL);
7784 if (!data)
Pavel Begunkovd878c812021-06-14 02:36:18 +01007785 return -ENOMEM;
Pavel Begunkov2d091d62021-06-14 02:36:21 +01007786 data->tags = (u64 **)io_alloc_page_table(nr * sizeof(data->tags[0][0]));
Pavel Begunkovb60c8dc2021-04-25 14:32:18 +01007787 if (!data->tags) {
7788 kfree(data);
Pavel Begunkovd878c812021-06-14 02:36:18 +01007789 return -ENOMEM;
7790 }
Pavel Begunkov2d091d62021-06-14 02:36:21 +01007791
7792 data->nr = nr;
7793 data->ctx = ctx;
7794 data->do_put = do_put;
Pavel Begunkovd878c812021-06-14 02:36:18 +01007795 if (utags) {
Pavel Begunkov2d091d62021-06-14 02:36:21 +01007796 ret = -EFAULT;
Pavel Begunkovd878c812021-06-14 02:36:18 +01007797 for (i = 0; i < nr; i++) {
Colin Ian Kingfdd1dc32021-06-15 14:00:11 +01007798 u64 *tag_slot = io_get_tag_slot(data, i);
7799
7800 if (copy_from_user(tag_slot, &utags[i],
7801 sizeof(*tag_slot)))
Pavel Begunkov2d091d62021-06-14 02:36:21 +01007802 goto fail;
Pavel Begunkovd878c812021-06-14 02:36:18 +01007803 }
Pavel Begunkovb60c8dc2021-04-25 14:32:18 +01007804 }
7805
Pavel Begunkov3e942492021-04-11 01:46:34 +01007806 atomic_set(&data->refs, 1);
Bijan Mottahedeh1ad555c2021-01-15 17:37:51 +00007807 init_completion(&data->done);
Pavel Begunkovd878c812021-06-14 02:36:18 +01007808 *pdata = data;
7809 return 0;
Pavel Begunkov2d091d62021-06-14 02:36:21 +01007810fail:
7811 io_rsrc_data_free(data);
7812 return ret;
Bijan Mottahedeh1ad555c2021-01-15 17:37:51 +00007813}
7814
Pavel Begunkov9123c8f2021-06-14 02:36:20 +01007815static bool io_alloc_file_tables(struct io_file_table *table, unsigned nr_files)
7816{
Pavel Begunkov0bea96f2021-08-20 10:36:36 +01007817 table->files = kvcalloc(nr_files, sizeof(table->files[0]),
7818 GFP_KERNEL_ACCOUNT);
Pavel Begunkov9123c8f2021-06-14 02:36:20 +01007819 return !!table->files;
7820}
7821
Pavel Begunkov042b0d82021-08-09 13:04:01 +01007822static void io_free_file_tables(struct io_file_table *table)
Pavel Begunkov9123c8f2021-06-14 02:36:20 +01007823{
Pavel Begunkov042b0d82021-08-09 13:04:01 +01007824 kvfree(table->files);
Pavel Begunkov9123c8f2021-06-14 02:36:20 +01007825 table->files = NULL;
7826}
7827
Jens Axboe2b188cc2019-01-07 10:46:33 -07007828static void __io_sqe_files_unregister(struct io_ring_ctx *ctx)
7829{
7830#if defined(CONFIG_UNIX)
7831 if (ctx->ring_sock) {
7832 struct sock *sock = ctx->ring_sock->sk;
7833 struct sk_buff *skb;
7834
7835 while ((skb = skb_dequeue(&sock->sk_receive_queue)) != NULL)
7836 kfree_skb(skb);
7837 }
7838#else
7839 int i;
7840
7841 for (i = 0; i < ctx->nr_user_files; i++) {
7842 struct file *file;
7843
7844 file = io_file_from_index(ctx, i);
7845 if (file)
7846 fput(file);
7847 }
7848#endif
Pavel Begunkov042b0d82021-08-09 13:04:01 +01007849 io_free_file_tables(&ctx->file_table);
Pavel Begunkov44b31f22021-04-25 14:32:16 +01007850 io_rsrc_data_free(ctx->file_data);
Pavel Begunkovfff4db72021-04-25 14:32:15 +01007851 ctx->file_data = NULL;
7852 ctx->nr_user_files = 0;
Bijan Mottahedeh1ad555c2021-01-15 17:37:51 +00007853}
7854
Bijan Mottahedehd7954b22021-01-15 17:37:50 +00007855static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
7856{
Pavel Begunkovb1e7cad2022-06-13 06:32:44 +01007857 unsigned nr = ctx->nr_user_files;
Bijan Mottahedehd7954b22021-01-15 17:37:50 +00007858 int ret;
7859
Pavel Begunkov08480402021-04-13 02:58:38 +01007860 if (!ctx->file_data)
Bijan Mottahedehd7954b22021-01-15 17:37:50 +00007861 return -ENXIO;
Pavel Begunkovb1e7cad2022-06-13 06:32:44 +01007862
7863 /*
7864 * Quiesce may unlock ->uring_lock, and while it's not held
7865 * prevent new requests using the table.
7866 */
7867 ctx->nr_user_files = 0;
Pavel Begunkov08480402021-04-13 02:58:38 +01007868 ret = io_rsrc_ref_quiesce(ctx->file_data, ctx);
Pavel Begunkovb1e7cad2022-06-13 06:32:44 +01007869 ctx->nr_user_files = nr;
Pavel Begunkov08480402021-04-13 02:58:38 +01007870 if (!ret)
7871 __io_sqe_files_unregister(ctx);
7872 return ret;
Jens Axboe6b063142019-01-10 22:13:58 -07007873}
7874
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007875static void io_sq_thread_unpark(struct io_sq_data *sqd)
Pavel Begunkov09a6f4e2021-03-14 20:57:10 +00007876 __releases(&sqd->lock)
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007877{
Pavel Begunkov521d6a72021-03-11 23:29:38 +00007878 WARN_ON_ONCE(sqd->thread == current);
7879
Pavel Begunkov9e138a42021-03-14 20:57:12 +00007880 /*
7881 * Do the dance but not conditional clear_bit() because it'd race with
7882 * other threads incrementing park_pending and setting the bit.
7883 */
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007884 clear_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state);
Pavel Begunkov9e138a42021-03-14 20:57:12 +00007885 if (atomic_dec_return(&sqd->park_pending))
7886 set_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state);
Pavel Begunkov09a6f4e2021-03-14 20:57:10 +00007887 mutex_unlock(&sqd->lock);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007888}
7889
Jens Axboe86e0d672021-03-05 08:44:39 -07007890static void io_sq_thread_park(struct io_sq_data *sqd)
Pavel Begunkov09a6f4e2021-03-14 20:57:10 +00007891 __acquires(&sqd->lock)
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007892{
Pavel Begunkov521d6a72021-03-11 23:29:38 +00007893 WARN_ON_ONCE(sqd->thread == current);
7894
Pavel Begunkov9e138a42021-03-14 20:57:12 +00007895 atomic_inc(&sqd->park_pending);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007896 set_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state);
Pavel Begunkov09a6f4e2021-03-14 20:57:10 +00007897 mutex_lock(&sqd->lock);
Jens Axboe05962f92021-03-06 13:58:48 -07007898 if (sqd->thread)
Jens Axboe86e0d672021-03-05 08:44:39 -07007899 wake_up_process(sqd->thread);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007900}
7901
7902static void io_sq_thread_stop(struct io_sq_data *sqd)
7903{
Pavel Begunkov521d6a72021-03-11 23:29:38 +00007904 WARN_ON_ONCE(sqd->thread == current);
Pavel Begunkov88885f62021-04-11 01:46:38 +01007905 WARN_ON_ONCE(test_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state));
Pavel Begunkov521d6a72021-03-11 23:29:38 +00007906
Jens Axboe05962f92021-03-06 13:58:48 -07007907 set_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state);
Pavel Begunkov88885f62021-04-11 01:46:38 +01007908 mutex_lock(&sqd->lock);
Jens Axboee8f98f242021-03-09 16:32:13 -07007909 if (sqd->thread)
7910 wake_up_process(sqd->thread);
Pavel Begunkov09a6f4e2021-03-14 20:57:10 +00007911 mutex_unlock(&sqd->lock);
Jens Axboe05962f92021-03-06 13:58:48 -07007912 wait_for_completion(&sqd->exited);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007913}
7914
Jens Axboe534ca6d2020-09-02 13:52:19 -06007915static void io_put_sq_data(struct io_sq_data *sqd)
Jens Axboe6c271ce2019-01-10 11:22:30 -07007916{
Jens Axboe534ca6d2020-09-02 13:52:19 -06007917 if (refcount_dec_and_test(&sqd->refs)) {
Pavel Begunkov9e138a42021-03-14 20:57:12 +00007918 WARN_ON_ONCE(atomic_read(&sqd->park_pending));
7919
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007920 io_sq_thread_stop(sqd);
7921 kfree(sqd);
7922 }
7923}
7924
7925static void io_sq_thread_finish(struct io_ring_ctx *ctx)
7926{
7927 struct io_sq_data *sqd = ctx->sq_data;
7928
7929 if (sqd) {
Jens Axboe05962f92021-03-06 13:58:48 -07007930 io_sq_thread_park(sqd);
Pavel Begunkov521d6a72021-03-11 23:29:38 +00007931 list_del_init(&ctx->sqd_list);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007932 io_sqd_update_thread_idle(sqd);
Jens Axboe05962f92021-03-06 13:58:48 -07007933 io_sq_thread_unpark(sqd);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007934
7935 io_put_sq_data(sqd);
7936 ctx->sq_data = NULL;
Jens Axboe534ca6d2020-09-02 13:52:19 -06007937 }
7938}
7939
Jens Axboeaa061652020-09-02 14:50:27 -06007940static struct io_sq_data *io_attach_sq_data(struct io_uring_params *p)
7941{
7942 struct io_ring_ctx *ctx_attach;
7943 struct io_sq_data *sqd;
7944 struct fd f;
7945
7946 f = fdget(p->wq_fd);
7947 if (!f.file)
7948 return ERR_PTR(-ENXIO);
7949 if (f.file->f_op != &io_uring_fops) {
7950 fdput(f);
7951 return ERR_PTR(-EINVAL);
7952 }
7953
7954 ctx_attach = f.file->private_data;
7955 sqd = ctx_attach->sq_data;
7956 if (!sqd) {
7957 fdput(f);
7958 return ERR_PTR(-EINVAL);
7959 }
Jens Axboe5c2469e2021-03-11 10:17:56 -07007960 if (sqd->task_tgid != current->tgid) {
7961 fdput(f);
7962 return ERR_PTR(-EPERM);
7963 }
Jens Axboeaa061652020-09-02 14:50:27 -06007964
7965 refcount_inc(&sqd->refs);
7966 fdput(f);
7967 return sqd;
7968}
7969
Pavel Begunkov26984fb2021-03-11 23:29:37 +00007970static struct io_sq_data *io_get_sq_data(struct io_uring_params *p,
7971 bool *attached)
Jens Axboe534ca6d2020-09-02 13:52:19 -06007972{
7973 struct io_sq_data *sqd;
7974
Pavel Begunkov26984fb2021-03-11 23:29:37 +00007975 *attached = false;
Jens Axboe5c2469e2021-03-11 10:17:56 -07007976 if (p->flags & IORING_SETUP_ATTACH_WQ) {
7977 sqd = io_attach_sq_data(p);
Pavel Begunkov26984fb2021-03-11 23:29:37 +00007978 if (!IS_ERR(sqd)) {
7979 *attached = true;
Jens Axboe5c2469e2021-03-11 10:17:56 -07007980 return sqd;
Pavel Begunkov26984fb2021-03-11 23:29:37 +00007981 }
Jens Axboe5c2469e2021-03-11 10:17:56 -07007982 /* fall through for EPERM case, setup new sqd/task */
7983 if (PTR_ERR(sqd) != -EPERM)
7984 return sqd;
7985 }
Jens Axboeaa061652020-09-02 14:50:27 -06007986
Jens Axboe534ca6d2020-09-02 13:52:19 -06007987 sqd = kzalloc(sizeof(*sqd), GFP_KERNEL);
7988 if (!sqd)
7989 return ERR_PTR(-ENOMEM);
7990
Pavel Begunkov9e138a42021-03-14 20:57:12 +00007991 atomic_set(&sqd->park_pending, 0);
Jens Axboe534ca6d2020-09-02 13:52:19 -06007992 refcount_set(&sqd->refs, 1);
Jens Axboe69fb2132020-09-14 11:16:23 -06007993 INIT_LIST_HEAD(&sqd->ctx_list);
Pavel Begunkov09a6f4e2021-03-14 20:57:10 +00007994 mutex_init(&sqd->lock);
Jens Axboe534ca6d2020-09-02 13:52:19 -06007995 init_waitqueue_head(&sqd->wait);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007996 init_completion(&sqd->exited);
Jens Axboe534ca6d2020-09-02 13:52:19 -06007997 return sqd;
7998}
7999
Jens Axboe6b063142019-01-10 22:13:58 -07008000#if defined(CONFIG_UNIX)
Jens Axboe6b063142019-01-10 22:13:58 -07008001/*
8002 * Ensure the UNIX gc is aware of our file set, so we are certain that
8003 * the io_uring can be safely unregistered on process exit, even if we have
8004 * loops in the file referencing.
8005 */
8006static int __io_sqe_files_scm(struct io_ring_ctx *ctx, int nr, int offset)
8007{
8008 struct sock *sk = ctx->ring_sock->sk;
8009 struct scm_fp_list *fpl;
8010 struct sk_buff *skb;
Jens Axboe08a45172019-10-03 08:11:03 -06008011 int i, nr_files;
Jens Axboe6b063142019-01-10 22:13:58 -07008012
Jens Axboe6b063142019-01-10 22:13:58 -07008013 fpl = kzalloc(sizeof(*fpl), GFP_KERNEL);
8014 if (!fpl)
8015 return -ENOMEM;
8016
8017 skb = alloc_skb(0, GFP_KERNEL);
8018 if (!skb) {
8019 kfree(fpl);
8020 return -ENOMEM;
8021 }
8022
8023 skb->sk = sk;
Jens Axboe6b063142019-01-10 22:13:58 -07008024
Jens Axboe08a45172019-10-03 08:11:03 -06008025 nr_files = 0;
Jens Axboe62e398b2021-02-21 16:19:37 -07008026 fpl->user = get_uid(current_user());
Jens Axboe6b063142019-01-10 22:13:58 -07008027 for (i = 0; i < nr; i++) {
Jens Axboe65e19f52019-10-26 07:20:21 -06008028 struct file *file = io_file_from_index(ctx, i + offset);
8029
8030 if (!file)
Jens Axboe08a45172019-10-03 08:11:03 -06008031 continue;
Jens Axboe65e19f52019-10-26 07:20:21 -06008032 fpl->fp[nr_files] = get_file(file);
Jens Axboe08a45172019-10-03 08:11:03 -06008033 unix_inflight(fpl->user, fpl->fp[nr_files]);
8034 nr_files++;
Jens Axboe6b063142019-01-10 22:13:58 -07008035 }
8036
Jens Axboe08a45172019-10-03 08:11:03 -06008037 if (nr_files) {
8038 fpl->max = SCM_MAX_FD;
8039 fpl->count = nr_files;
8040 UNIXCB(skb).fp = fpl;
Jens Axboe05f3fb32019-12-09 11:22:50 -07008041 skb->destructor = unix_destruct_scm;
Jens Axboe08a45172019-10-03 08:11:03 -06008042 refcount_add(skb->truesize, &sk->sk_wmem_alloc);
8043 skb_queue_head(&sk->sk_receive_queue, skb);
Jens Axboe6b063142019-01-10 22:13:58 -07008044
Pavel Begunkov285f5d72022-04-06 12:43:58 +01008045 for (i = 0; i < nr; i++) {
8046 struct file *file = io_file_from_index(ctx, i + offset);
8047
8048 if (file)
8049 fput(file);
8050 }
Jens Axboe08a45172019-10-03 08:11:03 -06008051 } else {
8052 kfree_skb(skb);
Pavel Begunkov0853bd62022-03-25 16:36:31 +00008053 free_uid(fpl->user);
Jens Axboe08a45172019-10-03 08:11:03 -06008054 kfree(fpl);
8055 }
Jens Axboe6b063142019-01-10 22:13:58 -07008056
8057 return 0;
8058}
8059
8060/*
8061 * If UNIX sockets are enabled, fd passing can cause a reference cycle which
8062 * causes regular reference counting to break down. We rely on the UNIX
8063 * garbage collection to take care of this problem for us.
8064 */
8065static int io_sqe_files_scm(struct io_ring_ctx *ctx)
8066{
8067 unsigned left, total;
8068 int ret = 0;
8069
8070 total = 0;
8071 left = ctx->nr_user_files;
8072 while (left) {
8073 unsigned this_files = min_t(unsigned, left, SCM_MAX_FD);
Jens Axboe6b063142019-01-10 22:13:58 -07008074
8075 ret = __io_sqe_files_scm(ctx, this_files, total);
8076 if (ret)
8077 break;
8078 left -= this_files;
8079 total += this_files;
8080 }
8081
8082 if (!ret)
8083 return 0;
8084
8085 while (total < ctx->nr_user_files) {
Jens Axboe65e19f52019-10-26 07:20:21 -06008086 struct file *file = io_file_from_index(ctx, total);
8087
8088 if (file)
8089 fput(file);
Jens Axboe6b063142019-01-10 22:13:58 -07008090 total++;
8091 }
8092
8093 return ret;
8094}
8095#else
8096static int io_sqe_files_scm(struct io_ring_ctx *ctx)
8097{
8098 return 0;
8099}
8100#endif
8101
Pavel Begunkov47e90392021-04-01 15:43:56 +01008102static void io_rsrc_file_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc)
Jens Axboec3a31e62019-10-03 13:59:56 -06008103{
Bijan Mottahedeh50238532021-01-15 17:37:45 +00008104 struct file *file = prsrc->file;
Jens Axboec3a31e62019-10-03 13:59:56 -06008105#if defined(CONFIG_UNIX)
Jens Axboec3a31e62019-10-03 13:59:56 -06008106 struct sock *sock = ctx->ring_sock->sk;
8107 struct sk_buff_head list, *head = &sock->sk_receive_queue;
8108 struct sk_buff *skb;
8109 int i;
8110
8111 __skb_queue_head_init(&list);
8112
8113 /*
8114 * Find the skb that holds this file in its SCM_RIGHTS. When found,
8115 * remove this entry and rearrange the file array.
8116 */
8117 skb = skb_dequeue(head);
8118 while (skb) {
8119 struct scm_fp_list *fp;
8120
8121 fp = UNIXCB(skb).fp;
8122 for (i = 0; i < fp->count; i++) {
8123 int left;
8124
8125 if (fp->fp[i] != file)
8126 continue;
8127
8128 unix_notinflight(fp->user, fp->fp[i]);
8129 left = fp->count - 1 - i;
8130 if (left) {
8131 memmove(&fp->fp[i], &fp->fp[i + 1],
8132 left * sizeof(struct file *));
8133 }
8134 fp->count--;
8135 if (!fp->count) {
8136 kfree_skb(skb);
8137 skb = NULL;
8138 } else {
8139 __skb_queue_tail(&list, skb);
8140 }
8141 fput(file);
8142 file = NULL;
8143 break;
8144 }
8145
8146 if (!file)
8147 break;
8148
8149 __skb_queue_tail(&list, skb);
8150
8151 skb = skb_dequeue(head);
8152 }
8153
8154 if (skb_peek(&list)) {
8155 spin_lock_irq(&head->lock);
8156 while ((skb = __skb_dequeue(&list)) != NULL)
8157 __skb_queue_tail(head, skb);
8158 spin_unlock_irq(&head->lock);
8159 }
8160#else
Jens Axboe05f3fb32019-12-09 11:22:50 -07008161 fput(file);
Jens Axboec3a31e62019-10-03 13:59:56 -06008162#endif
8163}
8164
Pavel Begunkovb895c9a2021-04-01 15:43:40 +01008165static void __io_rsrc_put_work(struct io_rsrc_node *ref_node)
Jens Axboe05f3fb32019-12-09 11:22:50 -07008166{
Pavel Begunkovb895c9a2021-04-01 15:43:40 +01008167 struct io_rsrc_data *rsrc_data = ref_node->rsrc_data;
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00008168 struct io_ring_ctx *ctx = rsrc_data->ctx;
8169 struct io_rsrc_put *prsrc, *tmp;
Xiaoguang Wang05589552020-03-31 14:05:18 +08008170
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00008171 list_for_each_entry_safe(prsrc, tmp, &ref_node->rsrc_list, list) {
8172 list_del(&prsrc->list);
Pavel Begunkovb60c8dc2021-04-25 14:32:18 +01008173
8174 if (prsrc->tag) {
8175 bool lock_ring = ctx->flags & IORING_SETUP_IOPOLL;
Pavel Begunkovb60c8dc2021-04-25 14:32:18 +01008176
8177 io_ring_submit_lock(ctx, lock_ring);
Jens Axboe79ebeae2021-08-10 15:18:27 -06008178 spin_lock(&ctx->completion_lock);
Pavel Begunkovb850d6d2022-08-29 14:30:13 +01008179 io_fill_cqe_aux(ctx, prsrc->tag, 0, 0);
Pavel Begunkovb60c8dc2021-04-25 14:32:18 +01008180 io_commit_cqring(ctx);
Jens Axboe79ebeae2021-08-10 15:18:27 -06008181 spin_unlock(&ctx->completion_lock);
Pavel Begunkovb60c8dc2021-04-25 14:32:18 +01008182 io_cqring_ev_posted(ctx);
8183 io_ring_submit_unlock(ctx, lock_ring);
8184 }
8185
Pavel Begunkov40ae0ff2021-04-01 15:43:44 +01008186 rsrc_data->do_put(ctx, prsrc);
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00008187 kfree(prsrc);
Jens Axboe05f3fb32019-12-09 11:22:50 -07008188 }
8189
Pavel Begunkov28a9fe22021-04-01 15:43:47 +01008190 io_rsrc_node_destroy(ref_node);
Pavel Begunkov3e942492021-04-11 01:46:34 +01008191 if (atomic_dec_and_test(&rsrc_data->refs))
8192 complete(&rsrc_data->done);
Jens Axboe05f3fb32019-12-09 11:22:50 -07008193}
8194
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00008195static void io_rsrc_put_work(struct work_struct *work)
Jens Axboe4a38aed22020-05-14 17:21:15 -06008196{
8197 struct io_ring_ctx *ctx;
8198 struct llist_node *node;
8199
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00008200 ctx = container_of(work, struct io_ring_ctx, rsrc_put_work.work);
8201 node = llist_del_all(&ctx->rsrc_put_llist);
Jens Axboe4a38aed22020-05-14 17:21:15 -06008202
8203 while (node) {
Pavel Begunkovb895c9a2021-04-01 15:43:40 +01008204 struct io_rsrc_node *ref_node;
Jens Axboe4a38aed22020-05-14 17:21:15 -06008205 struct llist_node *next = node->next;
8206
Pavel Begunkovb895c9a2021-04-01 15:43:40 +01008207 ref_node = llist_entry(node, struct io_rsrc_node, llist);
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00008208 __io_rsrc_put_work(ref_node);
Jens Axboe4a38aed22020-05-14 17:21:15 -06008209 node = next;
8210 }
8211}
8212
Jens Axboe05f3fb32019-12-09 11:22:50 -07008213static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
Pavel Begunkov792e3582021-04-25 14:32:21 +01008214 unsigned nr_args, u64 __user *tags)
Jens Axboe05f3fb32019-12-09 11:22:50 -07008215{
8216 __s32 __user *fds = (__s32 __user *) arg;
Jens Axboe05f3fb32019-12-09 11:22:50 -07008217 struct file *file;
Pavel Begunkovf3baed32021-04-01 15:43:42 +01008218 int fd, ret;
Pavel Begunkov846a4ef2021-04-01 15:44:03 +01008219 unsigned i;
Jens Axboe05f3fb32019-12-09 11:22:50 -07008220
8221 if (ctx->file_data)
8222 return -EBUSY;
8223 if (!nr_args)
8224 return -EINVAL;
8225 if (nr_args > IORING_MAX_FIXED_FILES)
8226 return -EMFILE;
Pavel Begunkov3a1b8a42021-08-20 10:36:35 +01008227 if (nr_args > rlimit(RLIMIT_NOFILE))
8228 return -EMFILE;
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01008229 ret = io_rsrc_node_switch_start(ctx);
Pavel Begunkovf3baed32021-04-01 15:43:42 +01008230 if (ret)
8231 return ret;
Pavel Begunkovd878c812021-06-14 02:36:18 +01008232 ret = io_rsrc_data_alloc(ctx, io_rsrc_file_put, tags, nr_args,
8233 &ctx->file_data);
8234 if (ret)
8235 return ret;
Jens Axboe05f3fb32019-12-09 11:22:50 -07008236
Pavel Begunkovf3baed32021-04-01 15:43:42 +01008237 ret = -ENOMEM;
Pavel Begunkovaeca2412021-04-11 01:46:37 +01008238 if (!io_alloc_file_tables(&ctx->file_table, nr_args))
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01008239 goto out_free;
Jens Axboe05f3fb32019-12-09 11:22:50 -07008240
Jens Axboe05f3fb32019-12-09 11:22:50 -07008241 for (i = 0; i < nr_args; i++, ctx->nr_user_files++) {
Pavel Begunkovd878c812021-06-14 02:36:18 +01008242 if (copy_from_user(&fd, &fds[i], sizeof(fd))) {
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01008243 ret = -EFAULT;
8244 goto out_fput;
Jens Axboe05f3fb32019-12-09 11:22:50 -07008245 }
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01008246 /* allow sparse sets */
Pavel Begunkov792e3582021-04-25 14:32:21 +01008247 if (fd == -1) {
8248 ret = -EINVAL;
Pavel Begunkov2d091d62021-06-14 02:36:21 +01008249 if (unlikely(*io_get_tag_slot(ctx->file_data, i)))
Pavel Begunkov792e3582021-04-25 14:32:21 +01008250 goto out_fput;
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01008251 continue;
Pavel Begunkov792e3582021-04-25 14:32:21 +01008252 }
Jens Axboe05f3fb32019-12-09 11:22:50 -07008253
Jens Axboe05f3fb32019-12-09 11:22:50 -07008254 file = fget(fd);
Jens Axboe05f3fb32019-12-09 11:22:50 -07008255 ret = -EBADF;
Pavel Begunkov792e3582021-04-25 14:32:21 +01008256 if (unlikely(!file))
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01008257 goto out_fput;
Jens Axboe05f3fb32019-12-09 11:22:50 -07008258
8259 /*
8260 * Don't allow io_uring instances to be registered. If UNIX
8261 * isn't enabled, then this causes a reference cycle and this
8262 * instance can never get freed. If UNIX is enabled we'll
8263 * handle it just fine, but there's still no point in allowing
8264 * a ring fd as it doesn't support regular read/write anyway.
8265 */
8266 if (file->f_op == &io_uring_fops) {
8267 fput(file);
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01008268 goto out_fput;
Jens Axboe05f3fb32019-12-09 11:22:50 -07008269 }
Pavel Begunkovaeca2412021-04-11 01:46:37 +01008270 io_fixed_file_set(io_fixed_file_slot(&ctx->file_table, i), file);
Jens Axboe05f3fb32019-12-09 11:22:50 -07008271 }
8272
Jens Axboe05f3fb32019-12-09 11:22:50 -07008273 ret = io_sqe_files_scm(ctx);
Xiaoguang Wang05589552020-03-31 14:05:18 +08008274 if (ret) {
Pavel Begunkov08480402021-04-13 02:58:38 +01008275 __io_sqe_files_unregister(ctx);
Xiaoguang Wang05589552020-03-31 14:05:18 +08008276 return ret;
8277 }
Jens Axboe05f3fb32019-12-09 11:22:50 -07008278
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01008279 io_rsrc_node_switch(ctx, NULL);
Jens Axboe05f3fb32019-12-09 11:22:50 -07008280 return ret;
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01008281out_fput:
8282 for (i = 0; i < ctx->nr_user_files; i++) {
8283 file = io_file_from_index(ctx, i);
8284 if (file)
8285 fput(file);
8286 }
Pavel Begunkov042b0d82021-08-09 13:04:01 +01008287 io_free_file_tables(&ctx->file_table);
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01008288 ctx->nr_user_files = 0;
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01008289out_free:
Pavel Begunkov44b31f22021-04-25 14:32:16 +01008290 io_rsrc_data_free(ctx->file_data);
Jens Axboe55cbc252020-10-14 07:35:57 -06008291 ctx->file_data = NULL;
Jens Axboe05f3fb32019-12-09 11:22:50 -07008292 return ret;
8293}
8294
Jens Axboec3a31e62019-10-03 13:59:56 -06008295static int io_sqe_file_register(struct io_ring_ctx *ctx, struct file *file,
8296 int index)
8297{
8298#if defined(CONFIG_UNIX)
8299 struct sock *sock = ctx->ring_sock->sk;
8300 struct sk_buff_head *head = &sock->sk_receive_queue;
8301 struct sk_buff *skb;
8302
8303 /*
8304 * See if we can merge this file into an existing skb SCM_RIGHTS
8305 * file set. If there's no room, fall back to allocating a new skb
8306 * and filling it in.
8307 */
8308 spin_lock_irq(&head->lock);
8309 skb = skb_peek(head);
8310 if (skb) {
8311 struct scm_fp_list *fpl = UNIXCB(skb).fp;
8312
8313 if (fpl->count < SCM_MAX_FD) {
8314 __skb_unlink(skb, head);
8315 spin_unlock_irq(&head->lock);
8316 fpl->fp[fpl->count] = get_file(file);
8317 unix_inflight(fpl->user, fpl->fp[fpl->count]);
8318 fpl->count++;
8319 spin_lock_irq(&head->lock);
8320 __skb_queue_head(head, skb);
8321 } else {
8322 skb = NULL;
8323 }
8324 }
8325 spin_unlock_irq(&head->lock);
8326
8327 if (skb) {
8328 fput(file);
8329 return 0;
8330 }
8331
8332 return __io_sqe_files_scm(ctx, 1, index);
8333#else
8334 return 0;
8335#endif
8336}
8337
Pavel Begunkov9c7b0ba2021-09-14 16:12:52 +01008338static int io_queue_rsrc_removal(struct io_rsrc_data *data, unsigned idx,
8339 struct io_rsrc_node *node, void *rsrc)
8340{
Pavel Begunkov5218d5c2022-04-07 14:05:04 +01008341 u64 *tag_slot = io_get_tag_slot(data, idx);
Pavel Begunkov9c7b0ba2021-09-14 16:12:52 +01008342 struct io_rsrc_put *prsrc;
8343
8344 prsrc = kzalloc(sizeof(*prsrc), GFP_KERNEL);
8345 if (!prsrc)
8346 return -ENOMEM;
8347
Pavel Begunkov5218d5c2022-04-07 14:05:04 +01008348 prsrc->tag = *tag_slot;
8349 *tag_slot = 0;
Pavel Begunkov9c7b0ba2021-09-14 16:12:52 +01008350 prsrc->rsrc = rsrc;
8351 list_add(&prsrc->list, &node->rsrc_list);
8352 return 0;
8353}
8354
Pavel Begunkovb9445592021-08-25 12:25:45 +01008355static int io_install_fixed_file(struct io_kiocb *req, struct file *file,
8356 unsigned int issue_flags, u32 slot_index)
8357{
8358 struct io_ring_ctx *ctx = req->ctx;
8359 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Pavel Begunkov9c7b0ba2021-09-14 16:12:52 +01008360 bool needs_switch = false;
Pavel Begunkovb9445592021-08-25 12:25:45 +01008361 struct io_fixed_file *file_slot;
8362 int ret = -EBADF;
8363
8364 io_ring_submit_lock(ctx, !force_nonblock);
8365 if (file->f_op == &io_uring_fops)
8366 goto err;
8367 ret = -ENXIO;
8368 if (!ctx->file_data)
8369 goto err;
8370 ret = -EINVAL;
8371 if (slot_index >= ctx->nr_user_files)
8372 goto err;
8373
8374 slot_index = array_index_nospec(slot_index, ctx->nr_user_files);
8375 file_slot = io_fixed_file_slot(&ctx->file_table, slot_index);
Pavel Begunkov9c7b0ba2021-09-14 16:12:52 +01008376
8377 if (file_slot->file_ptr) {
8378 struct file *old_file;
8379
8380 ret = io_rsrc_node_switch_start(ctx);
8381 if (ret)
8382 goto err;
8383
8384 old_file = (struct file *)(file_slot->file_ptr & FFS_MASK);
8385 ret = io_queue_rsrc_removal(ctx->file_data, slot_index,
8386 ctx->rsrc_node, old_file);
8387 if (ret)
8388 goto err;
8389 file_slot->file_ptr = 0;
8390 needs_switch = true;
8391 }
Pavel Begunkovb9445592021-08-25 12:25:45 +01008392
8393 *io_get_tag_slot(ctx->file_data, slot_index) = 0;
8394 io_fixed_file_set(file_slot, file);
8395 ret = io_sqe_file_register(ctx, file, slot_index);
8396 if (ret) {
8397 file_slot->file_ptr = 0;
8398 goto err;
8399 }
8400
8401 ret = 0;
8402err:
Pavel Begunkov9c7b0ba2021-09-14 16:12:52 +01008403 if (needs_switch)
8404 io_rsrc_node_switch(ctx, ctx->file_data);
Pavel Begunkovb9445592021-08-25 12:25:45 +01008405 io_ring_submit_unlock(ctx, !force_nonblock);
8406 if (ret)
8407 fput(file);
8408 return ret;
8409}
8410
Pavel Begunkov7df778b2021-09-24 20:04:29 +01008411static int io_close_fixed(struct io_kiocb *req, unsigned int issue_flags)
8412{
8413 unsigned int offset = req->close.file_slot - 1;
8414 struct io_ring_ctx *ctx = req->ctx;
8415 struct io_fixed_file *file_slot;
8416 struct file *file;
Pavel Begunkovb8ed0f72022-04-07 14:05:05 +01008417 int ret;
Pavel Begunkov7df778b2021-09-24 20:04:29 +01008418
8419 io_ring_submit_lock(ctx, !(issue_flags & IO_URING_F_NONBLOCK));
8420 ret = -ENXIO;
8421 if (unlikely(!ctx->file_data))
8422 goto out;
8423 ret = -EINVAL;
8424 if (offset >= ctx->nr_user_files)
8425 goto out;
8426 ret = io_rsrc_node_switch_start(ctx);
8427 if (ret)
8428 goto out;
8429
Pavel Begunkovb8ed0f72022-04-07 14:05:05 +01008430 offset = array_index_nospec(offset, ctx->nr_user_files);
8431 file_slot = io_fixed_file_slot(&ctx->file_table, offset);
Pavel Begunkov7df778b2021-09-24 20:04:29 +01008432 ret = -EBADF;
8433 if (!file_slot->file_ptr)
8434 goto out;
8435
8436 file = (struct file *)(file_slot->file_ptr & FFS_MASK);
8437 ret = io_queue_rsrc_removal(ctx->file_data, offset, ctx->rsrc_node, file);
8438 if (ret)
8439 goto out;
8440
8441 file_slot->file_ptr = 0;
8442 io_rsrc_node_switch(ctx, ctx->file_data);
8443 ret = 0;
8444out:
8445 io_ring_submit_unlock(ctx, !(issue_flags & IO_URING_F_NONBLOCK));
8446 return ret;
8447}
8448
Jens Axboe05f3fb32019-12-09 11:22:50 -07008449static int __io_sqe_files_update(struct io_ring_ctx *ctx,
Pavel Begunkovc3bdad02021-04-25 14:32:22 +01008450 struct io_uring_rsrc_update2 *up,
Jens Axboe05f3fb32019-12-09 11:22:50 -07008451 unsigned nr_args)
8452{
Pavel Begunkovc3bdad02021-04-25 14:32:22 +01008453 u64 __user *tags = u64_to_user_ptr(up->tags);
Pavel Begunkov98f0b3b2021-04-25 14:32:19 +01008454 __s32 __user *fds = u64_to_user_ptr(up->data);
Pavel Begunkovb895c9a2021-04-01 15:43:40 +01008455 struct io_rsrc_data *data = ctx->file_data;
Pavel Begunkova04b0ac2021-04-01 15:44:04 +01008456 struct io_fixed_file *file_slot;
8457 struct file *file;
Pavel Begunkov98f0b3b2021-04-25 14:32:19 +01008458 int fd, i, err = 0;
8459 unsigned int done;
Xiaoguang Wang05589552020-03-31 14:05:18 +08008460 bool needs_switch = false;
Jens Axboec3a31e62019-10-03 13:59:56 -06008461
Pavel Begunkov98f0b3b2021-04-25 14:32:19 +01008462 if (!ctx->file_data)
8463 return -ENXIO;
8464 if (up->offset + nr_args > ctx->nr_user_files)
Jens Axboec3a31e62019-10-03 13:59:56 -06008465 return -EINVAL;
8466
Pavel Begunkov67973b92021-01-26 13:51:09 +00008467 for (done = 0; done < nr_args; done++) {
Pavel Begunkovc3bdad02021-04-25 14:32:22 +01008468 u64 tag = 0;
8469
8470 if ((tags && copy_from_user(&tag, &tags[done], sizeof(tag))) ||
8471 copy_from_user(&fd, &fds[done], sizeof(fd))) {
Jens Axboec3a31e62019-10-03 13:59:56 -06008472 err = -EFAULT;
8473 break;
8474 }
Pavel Begunkovc3bdad02021-04-25 14:32:22 +01008475 if ((fd == IORING_REGISTER_FILES_SKIP || fd == -1) && tag) {
8476 err = -EINVAL;
8477 break;
8478 }
noah4e0377a2021-01-26 15:23:28 -05008479 if (fd == IORING_REGISTER_FILES_SKIP)
8480 continue;
8481
Pavel Begunkov67973b92021-01-26 13:51:09 +00008482 i = array_index_nospec(up->offset + done, ctx->nr_user_files);
Pavel Begunkovaeca2412021-04-11 01:46:37 +01008483 file_slot = io_fixed_file_slot(&ctx->file_table, i);
Pavel Begunkovea64ec022021-02-04 13:52:07 +00008484
Pavel Begunkova04b0ac2021-04-01 15:44:04 +01008485 if (file_slot->file_ptr) {
8486 file = (struct file *)(file_slot->file_ptr & FFS_MASK);
Pavel Begunkovb8ed0f72022-04-07 14:05:05 +01008487 err = io_queue_rsrc_removal(data, i, ctx->rsrc_node, file);
Hillf Dantona5318d32020-03-23 17:47:15 +08008488 if (err)
8489 break;
Pavel Begunkova04b0ac2021-04-01 15:44:04 +01008490 file_slot->file_ptr = 0;
Xiaoguang Wang05589552020-03-31 14:05:18 +08008491 needs_switch = true;
Jens Axboec3a31e62019-10-03 13:59:56 -06008492 }
8493 if (fd != -1) {
Jens Axboec3a31e62019-10-03 13:59:56 -06008494 file = fget(fd);
8495 if (!file) {
8496 err = -EBADF;
8497 break;
8498 }
8499 /*
8500 * Don't allow io_uring instances to be registered. If
8501 * UNIX isn't enabled, then this causes a reference
8502 * cycle and this instance can never get freed. If UNIX
8503 * is enabled we'll handle it just fine, but there's
8504 * still no point in allowing a ring fd as it doesn't
8505 * support regular read/write anyway.
8506 */
8507 if (file->f_op == &io_uring_fops) {
8508 fput(file);
8509 err = -EBADF;
8510 break;
8511 }
Pavel Begunkov50c981b2022-04-06 12:43:57 +01008512 *io_get_tag_slot(data, i) = tag;
Pavel Begunkov9a321c92021-04-01 15:44:01 +01008513 io_fixed_file_set(file_slot, file);
Jens Axboec3a31e62019-10-03 13:59:56 -06008514 err = io_sqe_file_register(ctx, file, i);
Yang Yingliangf3bd9da2020-07-09 10:11:41 +00008515 if (err) {
Pavel Begunkova04b0ac2021-04-01 15:44:04 +01008516 file_slot->file_ptr = 0;
Yang Yingliangf3bd9da2020-07-09 10:11:41 +00008517 fput(file);
Jens Axboec3a31e62019-10-03 13:59:56 -06008518 break;
Yang Yingliangf3bd9da2020-07-09 10:11:41 +00008519 }
Jens Axboec3a31e62019-10-03 13:59:56 -06008520 }
Jens Axboe05f3fb32019-12-09 11:22:50 -07008521 }
8522
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01008523 if (needs_switch)
8524 io_rsrc_node_switch(ctx, data);
Jens Axboec3a31e62019-10-03 13:59:56 -06008525 return done ? done : err;
8526}
Xiaoguang Wang05589552020-03-31 14:05:18 +08008527
Jens Axboe685fe7f2021-03-08 09:37:51 -07008528static struct io_wq *io_init_wq_offload(struct io_ring_ctx *ctx,
8529 struct task_struct *task)
Pavel Begunkov24369c22020-01-28 03:15:48 +03008530{
Jens Axboee9418942021-02-19 12:33:30 -07008531 struct io_wq_hash *hash;
Pavel Begunkov24369c22020-01-28 03:15:48 +03008532 struct io_wq_data data;
Pavel Begunkov24369c22020-01-28 03:15:48 +03008533 unsigned int concurrency;
Pavel Begunkov24369c22020-01-28 03:15:48 +03008534
Yang Yingliang362a9e62021-07-20 16:38:05 +08008535 mutex_lock(&ctx->uring_lock);
Jens Axboee9418942021-02-19 12:33:30 -07008536 hash = ctx->hash_map;
8537 if (!hash) {
8538 hash = kzalloc(sizeof(*hash), GFP_KERNEL);
Yang Yingliang362a9e62021-07-20 16:38:05 +08008539 if (!hash) {
8540 mutex_unlock(&ctx->uring_lock);
Jens Axboee9418942021-02-19 12:33:30 -07008541 return ERR_PTR(-ENOMEM);
Yang Yingliang362a9e62021-07-20 16:38:05 +08008542 }
Jens Axboee9418942021-02-19 12:33:30 -07008543 refcount_set(&hash->refs, 1);
8544 init_waitqueue_head(&hash->wait);
8545 ctx->hash_map = hash;
8546 }
Yang Yingliang362a9e62021-07-20 16:38:05 +08008547 mutex_unlock(&ctx->uring_lock);
Jens Axboee9418942021-02-19 12:33:30 -07008548
8549 data.hash = hash;
Jens Axboe685fe7f2021-03-08 09:37:51 -07008550 data.task = task;
Pavel Begunkovebc11b62021-08-09 13:04:05 +01008551 data.free_work = io_wq_free_work;
Pavel Begunkovf5fa38c2020-06-08 21:08:20 +03008552 data.do_work = io_wq_submit_work;
Pavel Begunkov24369c22020-01-28 03:15:48 +03008553
Jens Axboed25e3a32021-02-16 11:41:41 -07008554 /* Do QD, or 4 * CPUS, whatever is smallest */
8555 concurrency = min(ctx->sq_entries, 4 * num_online_cpus());
Pavel Begunkov24369c22020-01-28 03:15:48 +03008556
Jens Axboe5aa75ed2021-02-16 12:56:50 -07008557 return io_wq_create(concurrency, &data);
Pavel Begunkov24369c22020-01-28 03:15:48 +03008558}
8559
Jens Axboe5aa75ed2021-02-16 12:56:50 -07008560static int io_uring_alloc_task_context(struct task_struct *task,
8561 struct io_ring_ctx *ctx)
Jens Axboe0f212202020-09-13 13:09:39 -06008562{
8563 struct io_uring_task *tctx;
Jens Axboed8a6df12020-10-15 16:24:45 -06008564 int ret;
Jens Axboe0f212202020-09-13 13:09:39 -06008565
Pavel Begunkov09899b12021-06-14 02:36:22 +01008566 tctx = kzalloc(sizeof(*tctx), GFP_KERNEL);
Jens Axboe0f212202020-09-13 13:09:39 -06008567 if (unlikely(!tctx))
8568 return -ENOMEM;
8569
Jens Axboed8a6df12020-10-15 16:24:45 -06008570 ret = percpu_counter_init(&tctx->inflight, 0, GFP_KERNEL);
8571 if (unlikely(ret)) {
8572 kfree(tctx);
8573 return ret;
8574 }
8575
Jens Axboe685fe7f2021-03-08 09:37:51 -07008576 tctx->io_wq = io_init_wq_offload(ctx, task);
Jens Axboe5aa75ed2021-02-16 12:56:50 -07008577 if (IS_ERR(tctx->io_wq)) {
8578 ret = PTR_ERR(tctx->io_wq);
8579 percpu_counter_destroy(&tctx->inflight);
8580 kfree(tctx);
8581 return ret;
8582 }
8583
Jens Axboe0f212202020-09-13 13:09:39 -06008584 xa_init(&tctx->xa);
8585 init_waitqueue_head(&tctx->wait);
Jens Axboefdaf0832020-10-30 09:37:30 -06008586 atomic_set(&tctx->in_idle, 0);
Pavel Begunkovb303fe22021-04-11 01:46:26 +01008587 atomic_set(&tctx->inflight_tracked, 0);
Jens Axboe0f212202020-09-13 13:09:39 -06008588 task->io_uring = tctx;
Jens Axboe7cbf1722021-02-10 00:03:20 +00008589 spin_lock_init(&tctx->task_lock);
8590 INIT_WQ_LIST(&tctx->task_list);
Jens Axboe7cbf1722021-02-10 00:03:20 +00008591 init_task_work(&tctx->task_work, tctx_task_work);
Jens Axboe0f212202020-09-13 13:09:39 -06008592 return 0;
8593}
8594
8595void __io_uring_free(struct task_struct *tsk)
8596{
8597 struct io_uring_task *tctx = tsk->io_uring;
8598
8599 WARN_ON_ONCE(!xa_empty(&tctx->xa));
Pavel Begunkovef8eaa42021-02-27 11:16:45 +00008600 WARN_ON_ONCE(tctx->io_wq);
Pavel Begunkov09899b12021-06-14 02:36:22 +01008601 WARN_ON_ONCE(tctx->cached_refs);
Pavel Begunkovef8eaa42021-02-27 11:16:45 +00008602
Jens Axboed8a6df12020-10-15 16:24:45 -06008603 percpu_counter_destroy(&tctx->inflight);
Jens Axboe0f212202020-09-13 13:09:39 -06008604 kfree(tctx);
8605 tsk->io_uring = NULL;
8606}
8607
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02008608static int io_sq_offload_create(struct io_ring_ctx *ctx,
8609 struct io_uring_params *p)
Jens Axboe2b188cc2019-01-07 10:46:33 -07008610{
8611 int ret;
8612
Jens Axboed25e3a32021-02-16 11:41:41 -07008613 /* Retain compatibility with failing for an invalid attach attempt */
8614 if ((ctx->flags & (IORING_SETUP_ATTACH_WQ | IORING_SETUP_SQPOLL)) ==
8615 IORING_SETUP_ATTACH_WQ) {
8616 struct fd f;
8617
8618 f = fdget(p->wq_fd);
8619 if (!f.file)
8620 return -ENXIO;
Jens Axboe0cc936f2021-07-22 17:08:07 -06008621 if (f.file->f_op != &io_uring_fops) {
8622 fdput(f);
Pavel Begunkovf2a48dd2021-04-20 12:03:33 +01008623 return -EINVAL;
Jens Axboe0cc936f2021-07-22 17:08:07 -06008624 }
8625 fdput(f);
Jens Axboed25e3a32021-02-16 11:41:41 -07008626 }
Jens Axboe6c271ce2019-01-10 11:22:30 -07008627 if (ctx->flags & IORING_SETUP_SQPOLL) {
Jens Axboe46fe18b2021-03-04 12:39:36 -07008628 struct task_struct *tsk;
Jens Axboe534ca6d2020-09-02 13:52:19 -06008629 struct io_sq_data *sqd;
Pavel Begunkov26984fb2021-03-11 23:29:37 +00008630 bool attached;
Jens Axboe534ca6d2020-09-02 13:52:19 -06008631
Pavel Begunkov26984fb2021-03-11 23:29:37 +00008632 sqd = io_get_sq_data(p, &attached);
Jens Axboe534ca6d2020-09-02 13:52:19 -06008633 if (IS_ERR(sqd)) {
8634 ret = PTR_ERR(sqd);
8635 goto err;
8636 }
Jens Axboe69fb2132020-09-14 11:16:23 -06008637
Stefan Metzmacher7c30f36a2021-03-07 11:54:28 +01008638 ctx->sq_creds = get_current_cred();
Jens Axboe534ca6d2020-09-02 13:52:19 -06008639 ctx->sq_data = sqd;
Jens Axboe917257d2019-04-13 09:28:55 -06008640 ctx->sq_thread_idle = msecs_to_jiffies(p->sq_thread_idle);
8641 if (!ctx->sq_thread_idle)
8642 ctx->sq_thread_idle = HZ;
8643
Pavel Begunkov78d7f6b2021-03-10 13:13:53 +00008644 io_sq_thread_park(sqd);
Pavel Begunkovde75a3d2021-03-18 11:54:35 +00008645 list_add(&ctx->sqd_list, &sqd->ctx_list);
8646 io_sqd_update_thread_idle(sqd);
Pavel Begunkov26984fb2021-03-11 23:29:37 +00008647 /* don't attach to a dying SQPOLL thread, would be racy */
Pavel Begunkovf2a48dd2021-04-20 12:03:33 +01008648 ret = (attached && !sqd->thread) ? -ENXIO : 0;
Pavel Begunkov78d7f6b2021-03-10 13:13:53 +00008649 io_sq_thread_unpark(sqd);
8650
Pavel Begunkovde75a3d2021-03-18 11:54:35 +00008651 if (ret < 0)
8652 goto err;
8653 if (attached)
Jens Axboe5aa75ed2021-02-16 12:56:50 -07008654 return 0;
Jens Axboeaa061652020-09-02 14:50:27 -06008655
Jens Axboe6c271ce2019-01-10 11:22:30 -07008656 if (p->flags & IORING_SETUP_SQ_AFF) {
Jens Axboe44a9bd12019-05-14 20:00:30 -06008657 int cpu = p->sq_thread_cpu;
Jens Axboe6c271ce2019-01-10 11:22:30 -07008658
Jens Axboe917257d2019-04-13 09:28:55 -06008659 ret = -EINVAL;
Pavel Begunkovf2a48dd2021-04-20 12:03:33 +01008660 if (cpu >= nr_cpu_ids || !cpu_online(cpu))
Jens Axboee8f98f242021-03-09 16:32:13 -07008661 goto err_sqpoll;
Jens Axboe37d1e2e2021-02-17 21:03:43 -07008662 sqd->sq_cpu = cpu;
Jens Axboe6c271ce2019-01-10 11:22:30 -07008663 } else {
Jens Axboe37d1e2e2021-02-17 21:03:43 -07008664 sqd->sq_cpu = -1;
Jens Axboe6c271ce2019-01-10 11:22:30 -07008665 }
Jens Axboe37d1e2e2021-02-17 21:03:43 -07008666
8667 sqd->task_pid = current->pid;
Jens Axboe5c2469e2021-03-11 10:17:56 -07008668 sqd->task_tgid = current->tgid;
Jens Axboe46fe18b2021-03-04 12:39:36 -07008669 tsk = create_io_thread(io_sq_thread, sqd, NUMA_NO_NODE);
8670 if (IS_ERR(tsk)) {
8671 ret = PTR_ERR(tsk);
Jens Axboee8f98f242021-03-09 16:32:13 -07008672 goto err_sqpoll;
Jens Axboe6c271ce2019-01-10 11:22:30 -07008673 }
Pavel Begunkov97a73a02021-03-08 17:30:54 +00008674
Jens Axboe46fe18b2021-03-04 12:39:36 -07008675 sqd->thread = tsk;
Pavel Begunkov97a73a02021-03-08 17:30:54 +00008676 ret = io_uring_alloc_task_context(tsk, ctx);
Jens Axboe46fe18b2021-03-04 12:39:36 -07008677 wake_up_new_task(tsk);
Jens Axboe0f212202020-09-13 13:09:39 -06008678 if (ret)
8679 goto err;
Jens Axboe6c271ce2019-01-10 11:22:30 -07008680 } else if (p->flags & IORING_SETUP_SQ_AFF) {
8681 /* Can't have SQ_AFF without SQPOLL */
8682 ret = -EINVAL;
8683 goto err;
8684 }
8685
Jens Axboe2b188cc2019-01-07 10:46:33 -07008686 return 0;
Pavel Begunkovf2a48dd2021-04-20 12:03:33 +01008687err_sqpoll:
8688 complete(&ctx->sq_data->exited);
Jens Axboe2b188cc2019-01-07 10:46:33 -07008689err:
Jens Axboe37d1e2e2021-02-17 21:03:43 -07008690 io_sq_thread_finish(ctx);
Jens Axboe2b188cc2019-01-07 10:46:33 -07008691 return ret;
8692}
8693
Bijan Mottahedeha087e2b2020-06-16 16:36:07 -07008694static inline void __io_unaccount_mem(struct user_struct *user,
8695 unsigned long nr_pages)
Jens Axboe2b188cc2019-01-07 10:46:33 -07008696{
8697 atomic_long_sub(nr_pages, &user->locked_vm);
8698}
8699
Bijan Mottahedeha087e2b2020-06-16 16:36:07 -07008700static inline int __io_account_mem(struct user_struct *user,
8701 unsigned long nr_pages)
Jens Axboe2b188cc2019-01-07 10:46:33 -07008702{
8703 unsigned long page_limit, cur_pages, new_pages;
8704
8705 /* Don't allow more pages than we can safely lock */
8706 page_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
8707
8708 do {
8709 cur_pages = atomic_long_read(&user->locked_vm);
8710 new_pages = cur_pages + nr_pages;
8711 if (new_pages > page_limit)
8712 return -ENOMEM;
8713 } while (atomic_long_cmpxchg(&user->locked_vm, cur_pages,
8714 new_pages) != cur_pages);
8715
8716 return 0;
8717}
8718
Jens Axboe26bfa89e2021-02-09 20:14:12 -07008719static void io_unaccount_mem(struct io_ring_ctx *ctx, unsigned long nr_pages)
Bijan Mottahedeha087e2b2020-06-16 16:36:07 -07008720{
Jens Axboe62e398b2021-02-21 16:19:37 -07008721 if (ctx->user)
Bijan Mottahedeha087e2b2020-06-16 16:36:07 -07008722 __io_unaccount_mem(ctx->user, nr_pages);
Bijan Mottahedeh30975822020-06-16 16:36:09 -07008723
Jens Axboe26bfa89e2021-02-09 20:14:12 -07008724 if (ctx->mm_account)
8725 atomic64_sub(nr_pages, &ctx->mm_account->pinned_vm);
Bijan Mottahedeha087e2b2020-06-16 16:36:07 -07008726}
8727
Jens Axboe26bfa89e2021-02-09 20:14:12 -07008728static int io_account_mem(struct io_ring_ctx *ctx, unsigned long nr_pages)
Bijan Mottahedeha087e2b2020-06-16 16:36:07 -07008729{
Bijan Mottahedeh30975822020-06-16 16:36:09 -07008730 int ret;
8731
Jens Axboe62e398b2021-02-21 16:19:37 -07008732 if (ctx->user) {
Bijan Mottahedeh30975822020-06-16 16:36:09 -07008733 ret = __io_account_mem(ctx->user, nr_pages);
8734 if (ret)
8735 return ret;
8736 }
8737
Jens Axboe26bfa89e2021-02-09 20:14:12 -07008738 if (ctx->mm_account)
8739 atomic64_add(nr_pages, &ctx->mm_account->pinned_vm);
Bijan Mottahedeha087e2b2020-06-16 16:36:07 -07008740
8741 return 0;
8742}
8743
Jens Axboe2b188cc2019-01-07 10:46:33 -07008744static void io_mem_free(void *ptr)
8745{
Mark Rutland52e04ef2019-04-30 17:30:21 +01008746 struct page *page;
Jens Axboe2b188cc2019-01-07 10:46:33 -07008747
Mark Rutland52e04ef2019-04-30 17:30:21 +01008748 if (!ptr)
8749 return;
8750
8751 page = virt_to_head_page(ptr);
Jens Axboe2b188cc2019-01-07 10:46:33 -07008752 if (put_page_testzero(page))
8753 free_compound_page(page);
8754}
8755
8756static void *io_mem_alloc(size_t size)
8757{
Shakeel Butt246dfbc2022-01-24 21:17:36 -08008758 gfp_t gfp = GFP_KERNEL_ACCOUNT | __GFP_ZERO | __GFP_NOWARN | __GFP_COMP;
Jens Axboe2b188cc2019-01-07 10:46:33 -07008759
Shakeel Butt246dfbc2022-01-24 21:17:36 -08008760 return (void *) __get_free_pages(gfp, get_order(size));
Jens Axboe2b188cc2019-01-07 10:46:33 -07008761}
8762
Hristo Venev75b28af2019-08-26 17:23:46 +00008763static unsigned long rings_size(unsigned sq_entries, unsigned cq_entries,
8764 size_t *sq_offset)
8765{
8766 struct io_rings *rings;
8767 size_t off, sq_array_size;
8768
8769 off = struct_size(rings, cqes, cq_entries);
8770 if (off == SIZE_MAX)
8771 return SIZE_MAX;
8772
8773#ifdef CONFIG_SMP
8774 off = ALIGN(off, SMP_CACHE_BYTES);
8775 if (off == 0)
8776 return SIZE_MAX;
8777#endif
8778
Dmitry Vyukovb36200f2020-07-11 11:31:11 +02008779 if (sq_offset)
8780 *sq_offset = off;
8781
Hristo Venev75b28af2019-08-26 17:23:46 +00008782 sq_array_size = array_size(sizeof(u32), sq_entries);
8783 if (sq_array_size == SIZE_MAX)
8784 return SIZE_MAX;
8785
8786 if (check_add_overflow(off, sq_array_size, &off))
8787 return SIZE_MAX;
8788
Hristo Venev75b28af2019-08-26 17:23:46 +00008789 return off;
8790}
8791
Pavel Begunkov41edf1a2021-04-25 14:32:23 +01008792static void io_buffer_unmap(struct io_ring_ctx *ctx, struct io_mapped_ubuf **slot)
Pavel Begunkov7f61a1e2021-04-11 01:46:35 +01008793{
Pavel Begunkov41edf1a2021-04-25 14:32:23 +01008794 struct io_mapped_ubuf *imu = *slot;
Pavel Begunkov7f61a1e2021-04-11 01:46:35 +01008795 unsigned int i;
8796
Pavel Begunkov62248432021-04-28 13:11:29 +01008797 if (imu != ctx->dummy_ubuf) {
8798 for (i = 0; i < imu->nr_bvecs; i++)
8799 unpin_user_page(imu->bvec[i].bv_page);
8800 if (imu->acct_pages)
8801 io_unaccount_mem(ctx, imu->acct_pages);
8802 kvfree(imu);
8803 }
Pavel Begunkov41edf1a2021-04-25 14:32:23 +01008804 *slot = NULL;
Pavel Begunkov7f61a1e2021-04-11 01:46:35 +01008805}
8806
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01008807static void io_rsrc_buf_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc)
8808{
Pavel Begunkov634d00d2021-04-25 14:32:26 +01008809 io_buffer_unmap(ctx, &prsrc->buf);
8810 prsrc->buf = NULL;
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01008811}
8812
8813static void __io_sqe_buffers_unregister(struct io_ring_ctx *ctx)
Jens Axboeedafcce2019-01-09 09:16:05 -07008814{
Pavel Begunkov7f61a1e2021-04-11 01:46:35 +01008815 unsigned int i;
Jens Axboeedafcce2019-01-09 09:16:05 -07008816
Pavel Begunkov7f61a1e2021-04-11 01:46:35 +01008817 for (i = 0; i < ctx->nr_user_bufs; i++)
8818 io_buffer_unmap(ctx, &ctx->user_bufs[i]);
Jens Axboeedafcce2019-01-09 09:16:05 -07008819 kfree(ctx->user_bufs);
Zqiangbb6659c2021-04-30 16:25:15 +08008820 io_rsrc_data_free(ctx->buf_data);
Jens Axboeedafcce2019-01-09 09:16:05 -07008821 ctx->user_bufs = NULL;
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01008822 ctx->buf_data = NULL;
Jens Axboeedafcce2019-01-09 09:16:05 -07008823 ctx->nr_user_bufs = 0;
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01008824}
8825
Jens Axboeedafcce2019-01-09 09:16:05 -07008826static int io_sqe_buffers_unregister(struct io_ring_ctx *ctx)
8827{
Pavel Begunkov91f5a602022-06-13 06:30:06 +01008828 unsigned nr = ctx->nr_user_bufs;
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01008829 int ret;
Jens Axboeedafcce2019-01-09 09:16:05 -07008830
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01008831 if (!ctx->buf_data)
Jens Axboeedafcce2019-01-09 09:16:05 -07008832 return -ENXIO;
8833
Pavel Begunkov91f5a602022-06-13 06:30:06 +01008834 /*
8835 * Quiesce may unlock ->uring_lock, and while it's not held
8836 * prevent new requests using the table.
8837 */
8838 ctx->nr_user_bufs = 0;
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01008839 ret = io_rsrc_ref_quiesce(ctx->buf_data, ctx);
Pavel Begunkov91f5a602022-06-13 06:30:06 +01008840 ctx->nr_user_bufs = nr;
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01008841 if (!ret)
8842 __io_sqe_buffers_unregister(ctx);
8843 return ret;
Jens Axboeedafcce2019-01-09 09:16:05 -07008844}
8845
8846static int io_copy_iov(struct io_ring_ctx *ctx, struct iovec *dst,
8847 void __user *arg, unsigned index)
8848{
8849 struct iovec __user *src;
8850
8851#ifdef CONFIG_COMPAT
8852 if (ctx->compat) {
8853 struct compat_iovec __user *ciovs;
8854 struct compat_iovec ciov;
8855
8856 ciovs = (struct compat_iovec __user *) arg;
8857 if (copy_from_user(&ciov, &ciovs[index], sizeof(ciov)))
8858 return -EFAULT;
8859
Jens Axboed55e5f52019-12-11 16:12:15 -07008860 dst->iov_base = u64_to_user_ptr((u64)ciov.iov_base);
Jens Axboeedafcce2019-01-09 09:16:05 -07008861 dst->iov_len = ciov.iov_len;
8862 return 0;
8863 }
8864#endif
8865 src = (struct iovec __user *) arg;
8866 if (copy_from_user(dst, &src[index], sizeof(*dst)))
8867 return -EFAULT;
8868 return 0;
8869}
8870
Jens Axboede293932020-09-17 16:19:16 -06008871/*
8872 * Not super efficient, but this is just a registration time. And we do cache
8873 * the last compound head, so generally we'll only do a full search if we don't
8874 * match that one.
8875 *
8876 * We check if the given compound head page has already been accounted, to
8877 * avoid double accounting it. This allows us to account the full size of the
8878 * page, not just the constituent pages of a huge page.
8879 */
8880static bool headpage_already_acct(struct io_ring_ctx *ctx, struct page **pages,
8881 int nr_pages, struct page *hpage)
8882{
8883 int i, j;
8884
8885 /* check current page array */
8886 for (i = 0; i < nr_pages; i++) {
8887 if (!PageCompound(pages[i]))
8888 continue;
8889 if (compound_head(pages[i]) == hpage)
8890 return true;
8891 }
8892
8893 /* check previously registered pages */
8894 for (i = 0; i < ctx->nr_user_bufs; i++) {
Pavel Begunkov41edf1a2021-04-25 14:32:23 +01008895 struct io_mapped_ubuf *imu = ctx->user_bufs[i];
Jens Axboede293932020-09-17 16:19:16 -06008896
8897 for (j = 0; j < imu->nr_bvecs; j++) {
8898 if (!PageCompound(imu->bvec[j].bv_page))
8899 continue;
8900 if (compound_head(imu->bvec[j].bv_page) == hpage)
8901 return true;
8902 }
8903 }
8904
8905 return false;
8906}
8907
8908static int io_buffer_account_pin(struct io_ring_ctx *ctx, struct page **pages,
8909 int nr_pages, struct io_mapped_ubuf *imu,
8910 struct page **last_hpage)
8911{
8912 int i, ret;
8913
Pavel Begunkov216e5832021-05-29 12:01:02 +01008914 imu->acct_pages = 0;
Jens Axboede293932020-09-17 16:19:16 -06008915 for (i = 0; i < nr_pages; i++) {
8916 if (!PageCompound(pages[i])) {
8917 imu->acct_pages++;
8918 } else {
8919 struct page *hpage;
8920
8921 hpage = compound_head(pages[i]);
8922 if (hpage == *last_hpage)
8923 continue;
8924 *last_hpage = hpage;
8925 if (headpage_already_acct(ctx, pages, i, hpage))
8926 continue;
8927 imu->acct_pages += page_size(hpage) >> PAGE_SHIFT;
8928 }
8929 }
8930
8931 if (!imu->acct_pages)
8932 return 0;
8933
Jens Axboe26bfa89e2021-02-09 20:14:12 -07008934 ret = io_account_mem(ctx, imu->acct_pages);
Jens Axboede293932020-09-17 16:19:16 -06008935 if (ret)
8936 imu->acct_pages = 0;
8937 return ret;
8938}
8939
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008940static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov,
Pavel Begunkov41edf1a2021-04-25 14:32:23 +01008941 struct io_mapped_ubuf **pimu,
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008942 struct page **last_hpage)
Jens Axboeedafcce2019-01-09 09:16:05 -07008943{
Pavel Begunkov41edf1a2021-04-25 14:32:23 +01008944 struct io_mapped_ubuf *imu = NULL;
Jens Axboeedafcce2019-01-09 09:16:05 -07008945 struct vm_area_struct **vmas = NULL;
8946 struct page **pages = NULL;
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008947 unsigned long off, start, end, ubuf;
8948 size_t size;
8949 int ret, pret, nr_pages, i;
Jens Axboeedafcce2019-01-09 09:16:05 -07008950
Pavel Begunkov62248432021-04-28 13:11:29 +01008951 if (!iov->iov_base) {
8952 *pimu = ctx->dummy_ubuf;
8953 return 0;
8954 }
8955
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008956 ubuf = (unsigned long) iov->iov_base;
8957 end = (ubuf + iov->iov_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
8958 start = ubuf >> PAGE_SHIFT;
8959 nr_pages = end - start;
8960
Pavel Begunkov41edf1a2021-04-25 14:32:23 +01008961 *pimu = NULL;
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008962 ret = -ENOMEM;
8963
8964 pages = kvmalloc_array(nr_pages, sizeof(struct page *), GFP_KERNEL);
8965 if (!pages)
8966 goto done;
8967
8968 vmas = kvmalloc_array(nr_pages, sizeof(struct vm_area_struct *),
8969 GFP_KERNEL);
8970 if (!vmas)
8971 goto done;
8972
Pavel Begunkov41edf1a2021-04-25 14:32:23 +01008973 imu = kvmalloc(struct_size(imu, bvec, nr_pages), GFP_KERNEL);
Pavel Begunkova2b41982021-04-26 00:16:31 +01008974 if (!imu)
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008975 goto done;
8976
8977 ret = 0;
8978 mmap_read_lock(current->mm);
8979 pret = pin_user_pages(ubuf, nr_pages, FOLL_WRITE | FOLL_LONGTERM,
8980 pages, vmas);
8981 if (pret == nr_pages) {
8982 /* don't support file backed memory */
8983 for (i = 0; i < nr_pages; i++) {
8984 struct vm_area_struct *vma = vmas[i];
8985
Pavel Begunkov40dad762021-06-09 15:26:54 +01008986 if (vma_is_shmem(vma))
8987 continue;
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008988 if (vma->vm_file &&
8989 !is_file_hugepages(vma->vm_file)) {
8990 ret = -EOPNOTSUPP;
8991 break;
8992 }
8993 }
8994 } else {
8995 ret = pret < 0 ? pret : -EFAULT;
8996 }
8997 mmap_read_unlock(current->mm);
8998 if (ret) {
8999 /*
9000 * if we did partial map, or found file backed vmas,
9001 * release any pages we did get
9002 */
9003 if (pret > 0)
9004 unpin_user_pages(pages, pret);
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08009005 goto done;
9006 }
9007
9008 ret = io_buffer_account_pin(ctx, pages, pret, imu, last_hpage);
9009 if (ret) {
9010 unpin_user_pages(pages, pret);
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08009011 goto done;
9012 }
9013
9014 off = ubuf & ~PAGE_MASK;
9015 size = iov->iov_len;
9016 for (i = 0; i < nr_pages; i++) {
9017 size_t vec_len;
9018
9019 vec_len = min_t(size_t, size, PAGE_SIZE - off);
9020 imu->bvec[i].bv_page = pages[i];
9021 imu->bvec[i].bv_len = vec_len;
9022 imu->bvec[i].bv_offset = off;
9023 off = 0;
9024 size -= vec_len;
9025 }
9026 /* store original address for later verification */
9027 imu->ubuf = ubuf;
Pavel Begunkov4751f532021-04-01 15:43:55 +01009028 imu->ubuf_end = ubuf + iov->iov_len;
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08009029 imu->nr_bvecs = nr_pages;
Pavel Begunkov41edf1a2021-04-25 14:32:23 +01009030 *pimu = imu;
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08009031 ret = 0;
9032done:
Pavel Begunkov41edf1a2021-04-25 14:32:23 +01009033 if (ret)
9034 kvfree(imu);
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08009035 kvfree(pages);
9036 kvfree(vmas);
9037 return ret;
9038}
9039
Bijan Mottahedeh2b358602021-01-06 12:39:11 -08009040static int io_buffers_map_alloc(struct io_ring_ctx *ctx, unsigned int nr_args)
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08009041{
Pavel Begunkov87094462021-04-11 01:46:36 +01009042 ctx->user_bufs = kcalloc(nr_args, sizeof(*ctx->user_bufs), GFP_KERNEL);
9043 return ctx->user_bufs ? 0 : -ENOMEM;
Bijan Mottahedeh2b358602021-01-06 12:39:11 -08009044}
9045
9046static int io_buffer_validate(struct iovec *iov)
9047{
Pavel Begunkov50e96982021-03-24 22:59:01 +00009048 unsigned long tmp, acct_len = iov->iov_len + (PAGE_SIZE - 1);
9049
Bijan Mottahedeh2b358602021-01-06 12:39:11 -08009050 /*
9051 * Don't impose further limits on the size and buffer
9052 * constraints here, we'll -EINVAL later when IO is
9053 * submitted if they are wrong.
9054 */
Pavel Begunkov62248432021-04-28 13:11:29 +01009055 if (!iov->iov_base)
9056 return iov->iov_len ? -EFAULT : 0;
9057 if (!iov->iov_len)
Bijan Mottahedeh2b358602021-01-06 12:39:11 -08009058 return -EFAULT;
9059
9060 /* arbitrary limit, but we need something */
9061 if (iov->iov_len > SZ_1G)
9062 return -EFAULT;
9063
Pavel Begunkov50e96982021-03-24 22:59:01 +00009064 if (check_add_overflow((unsigned long)iov->iov_base, acct_len, &tmp))
9065 return -EOVERFLOW;
9066
Bijan Mottahedeh2b358602021-01-06 12:39:11 -08009067 return 0;
9068}
9069
9070static int io_sqe_buffers_register(struct io_ring_ctx *ctx, void __user *arg,
Pavel Begunkov634d00d2021-04-25 14:32:26 +01009071 unsigned int nr_args, u64 __user *tags)
Bijan Mottahedeh2b358602021-01-06 12:39:11 -08009072{
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01009073 struct page *last_hpage = NULL;
9074 struct io_rsrc_data *data;
Bijan Mottahedeh2b358602021-01-06 12:39:11 -08009075 int i, ret;
9076 struct iovec iov;
Bijan Mottahedeh2b358602021-01-06 12:39:11 -08009077
Pavel Begunkov87094462021-04-11 01:46:36 +01009078 if (ctx->user_bufs)
9079 return -EBUSY;
Pavel Begunkov489809e2021-05-14 12:06:44 +01009080 if (!nr_args || nr_args > IORING_MAX_REG_BUFFERS)
Pavel Begunkov87094462021-04-11 01:46:36 +01009081 return -EINVAL;
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01009082 ret = io_rsrc_node_switch_start(ctx);
Bijan Mottahedeh2b358602021-01-06 12:39:11 -08009083 if (ret)
9084 return ret;
Pavel Begunkovd878c812021-06-14 02:36:18 +01009085 ret = io_rsrc_data_alloc(ctx, io_rsrc_buf_put, tags, nr_args, &data);
9086 if (ret)
9087 return ret;
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01009088 ret = io_buffers_map_alloc(ctx, nr_args);
9089 if (ret) {
Zqiangbb6659c2021-04-30 16:25:15 +08009090 io_rsrc_data_free(data);
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01009091 return ret;
9092 }
Bijan Mottahedeh2b358602021-01-06 12:39:11 -08009093
Pavel Begunkov87094462021-04-11 01:46:36 +01009094 for (i = 0; i < nr_args; i++, ctx->nr_user_bufs++) {
Jens Axboeedafcce2019-01-09 09:16:05 -07009095 ret = io_copy_iov(ctx, &iov, arg, i);
9096 if (ret)
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08009097 break;
Bijan Mottahedeh2b358602021-01-06 12:39:11 -08009098 ret = io_buffer_validate(&iov);
9099 if (ret)
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08009100 break;
Pavel Begunkov2d091d62021-06-14 02:36:21 +01009101 if (!iov.iov_base && *io_get_tag_slot(data, i)) {
Colin Ian Kingcf3770e2021-04-29 11:46:02 +01009102 ret = -EINVAL;
9103 break;
9104 }
Jens Axboeedafcce2019-01-09 09:16:05 -07009105
Pavel Begunkov41edf1a2021-04-25 14:32:23 +01009106 ret = io_sqe_buffer_register(ctx, &iov, &ctx->user_bufs[i],
9107 &last_hpage);
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08009108 if (ret)
9109 break;
Jens Axboeedafcce2019-01-09 09:16:05 -07009110 }
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08009111
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01009112 WARN_ON_ONCE(ctx->buf_data);
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08009113
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01009114 ctx->buf_data = data;
9115 if (ret)
9116 __io_sqe_buffers_unregister(ctx);
9117 else
9118 io_rsrc_node_switch(ctx, NULL);
Jens Axboeedafcce2019-01-09 09:16:05 -07009119 return ret;
9120}
9121
Pavel Begunkov634d00d2021-04-25 14:32:26 +01009122static int __io_sqe_buffers_update(struct io_ring_ctx *ctx,
9123 struct io_uring_rsrc_update2 *up,
9124 unsigned int nr_args)
9125{
9126 u64 __user *tags = u64_to_user_ptr(up->tags);
9127 struct iovec iov, __user *iovs = u64_to_user_ptr(up->data);
Pavel Begunkov634d00d2021-04-25 14:32:26 +01009128 struct page *last_hpage = NULL;
9129 bool needs_switch = false;
9130 __u32 done;
9131 int i, err;
9132
9133 if (!ctx->buf_data)
9134 return -ENXIO;
9135 if (up->offset + nr_args > ctx->nr_user_bufs)
9136 return -EINVAL;
9137
9138 for (done = 0; done < nr_args; done++) {
Pavel Begunkov0b8c0e72021-04-26 15:17:38 +01009139 struct io_mapped_ubuf *imu;
9140 int offset = up->offset + done;
Pavel Begunkov634d00d2021-04-25 14:32:26 +01009141 u64 tag = 0;
9142
9143 err = io_copy_iov(ctx, &iov, iovs, done);
9144 if (err)
9145 break;
9146 if (tags && copy_from_user(&tag, &tags[done], sizeof(tag))) {
9147 err = -EFAULT;
9148 break;
9149 }
Pavel Begunkov0b8c0e72021-04-26 15:17:38 +01009150 err = io_buffer_validate(&iov);
9151 if (err)
9152 break;
Colin Ian Kingcf3770e2021-04-29 11:46:02 +01009153 if (!iov.iov_base && tag) {
9154 err = -EINVAL;
9155 break;
9156 }
Pavel Begunkov0b8c0e72021-04-26 15:17:38 +01009157 err = io_sqe_buffer_register(ctx, &iov, &imu, &last_hpage);
9158 if (err)
9159 break;
Pavel Begunkov634d00d2021-04-25 14:32:26 +01009160
Pavel Begunkov0b8c0e72021-04-26 15:17:38 +01009161 i = array_index_nospec(offset, ctx->nr_user_bufs);
Pavel Begunkov62248432021-04-28 13:11:29 +01009162 if (ctx->user_bufs[i] != ctx->dummy_ubuf) {
Pavel Begunkovb8ed0f72022-04-07 14:05:05 +01009163 err = io_queue_rsrc_removal(ctx->buf_data, i,
Pavel Begunkov0b8c0e72021-04-26 15:17:38 +01009164 ctx->rsrc_node, ctx->user_bufs[i]);
9165 if (unlikely(err)) {
9166 io_buffer_unmap(ctx, &imu);
Pavel Begunkov634d00d2021-04-25 14:32:26 +01009167 break;
Pavel Begunkov0b8c0e72021-04-26 15:17:38 +01009168 }
Pavel Begunkov634d00d2021-04-25 14:32:26 +01009169 ctx->user_bufs[i] = NULL;
9170 needs_switch = true;
9171 }
9172
Pavel Begunkov0b8c0e72021-04-26 15:17:38 +01009173 ctx->user_bufs[i] = imu;
Pavel Begunkov2d091d62021-06-14 02:36:21 +01009174 *io_get_tag_slot(ctx->buf_data, offset) = tag;
Pavel Begunkov634d00d2021-04-25 14:32:26 +01009175 }
9176
9177 if (needs_switch)
9178 io_rsrc_node_switch(ctx, ctx->buf_data);
9179 return done ? done : err;
9180}
9181
Jens Axboe9b402842019-04-11 11:45:41 -06009182static int io_eventfd_register(struct io_ring_ctx *ctx, void __user *arg)
9183{
9184 __s32 __user *fds = arg;
9185 int fd;
9186
9187 if (ctx->cq_ev_fd)
9188 return -EBUSY;
9189
9190 if (copy_from_user(&fd, fds, sizeof(*fds)))
9191 return -EFAULT;
9192
9193 ctx->cq_ev_fd = eventfd_ctx_fdget(fd);
9194 if (IS_ERR(ctx->cq_ev_fd)) {
9195 int ret = PTR_ERR(ctx->cq_ev_fd);
Pavel Begunkovfe7e3252021-06-24 15:09:57 +01009196
Jens Axboe9b402842019-04-11 11:45:41 -06009197 ctx->cq_ev_fd = NULL;
9198 return ret;
9199 }
9200
9201 return 0;
9202}
9203
9204static int io_eventfd_unregister(struct io_ring_ctx *ctx)
9205{
9206 if (ctx->cq_ev_fd) {
9207 eventfd_ctx_put(ctx->cq_ev_fd);
9208 ctx->cq_ev_fd = NULL;
9209 return 0;
9210 }
9211
9212 return -ENXIO;
9213}
9214
Jens Axboe5a2e7452020-02-23 16:23:11 -07009215static void io_destroy_buffers(struct io_ring_ctx *ctx)
9216{
Jens Axboe9e15c3a2021-03-13 12:29:43 -07009217 struct io_buffer *buf;
9218 unsigned long index;
9219
Ye Bin2d447d32021-11-22 10:47:37 +08009220 xa_for_each(&ctx->io_buffers, index, buf)
Jens Axboe9e15c3a2021-03-13 12:29:43 -07009221 __io_remove_buffers(ctx, buf, index, -1U);
Jens Axboe5a2e7452020-02-23 16:23:11 -07009222}
9223
Pavel Begunkov72558342021-08-09 20:18:09 +01009224static void io_req_cache_free(struct list_head *list)
Jens Axboe1b4c3512021-02-10 00:03:19 +00009225{
Jens Axboe68e68ee2021-02-13 09:00:02 -07009226 struct io_kiocb *req, *nxt;
Jens Axboe1b4c3512021-02-10 00:03:19 +00009227
Pavel Begunkovbb943b82021-08-09 20:18:10 +01009228 list_for_each_entry_safe(req, nxt, list, inflight_entry) {
9229 list_del(&req->inflight_entry);
Jens Axboe1b4c3512021-02-10 00:03:19 +00009230 kmem_cache_free(req_cachep, req);
9231 }
9232}
9233
Jens Axboe4010fec2021-02-27 15:04:18 -07009234static void io_req_caches_free(struct io_ring_ctx *ctx)
Jens Axboe2b188cc2019-01-07 10:46:33 -07009235{
Pavel Begunkovcd0ca2e2021-08-09 20:18:11 +01009236 struct io_submit_state *state = &ctx->submit_state;
Pavel Begunkovbf019da2021-02-10 00:03:17 +00009237
Jens Axboe9a4fdbd2021-02-13 09:09:44 -07009238 mutex_lock(&ctx->uring_lock);
9239
Pavel Begunkovcd0ca2e2021-08-09 20:18:11 +01009240 if (state->free_reqs) {
9241 kmem_cache_free_bulk(req_cachep, state->free_reqs, state->reqs);
9242 state->free_reqs = 0;
Pavel Begunkov8e5c66c2021-02-22 11:45:55 +00009243 }
Jens Axboe9a4fdbd2021-02-13 09:09:44 -07009244
Pavel Begunkovcd0ca2e2021-08-09 20:18:11 +01009245 io_flush_cached_locked_reqs(ctx, state);
9246 io_req_cache_free(&state->free_list);
Jens Axboe9a4fdbd2021-02-13 09:09:44 -07009247 mutex_unlock(&ctx->uring_lock);
9248}
9249
Pavel Begunkov43597aa2021-08-10 02:44:23 +01009250static void io_wait_rsrc_data(struct io_rsrc_data *data)
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01009251{
Pavel Begunkov43597aa2021-08-10 02:44:23 +01009252 if (data && !atomic_dec_and_test(&data->refs))
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01009253 wait_for_completion(&data->done);
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01009254}
9255
Jens Axboe2b188cc2019-01-07 10:46:33 -07009256static void io_ring_ctx_free(struct io_ring_ctx *ctx)
9257{
Jens Axboe37d1e2e2021-02-17 21:03:43 -07009258 io_sq_thread_finish(ctx);
Jens Axboe2aede0e2020-09-14 10:45:53 -06009259
Jens Axboe37d1e2e2021-02-17 21:03:43 -07009260 if (ctx->mm_account) {
Jens Axboe2aede0e2020-09-14 10:45:53 -06009261 mmdrop(ctx->mm_account);
9262 ctx->mm_account = NULL;
Bijan Mottahedeh30975822020-06-16 16:36:09 -07009263 }
Jens Axboedef596e2019-01-09 08:59:42 -07009264
Pavel Begunkov43597aa2021-08-10 02:44:23 +01009265 /* __io_rsrc_put_work() may need uring_lock to progress, wait w/o it */
9266 io_wait_rsrc_data(ctx->buf_data);
9267 io_wait_rsrc_data(ctx->file_data);
9268
Hao Xu8bad28d2021-02-19 17:19:36 +08009269 mutex_lock(&ctx->uring_lock);
Pavel Begunkov43597aa2021-08-10 02:44:23 +01009270 if (ctx->buf_data)
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01009271 __io_sqe_buffers_unregister(ctx);
Pavel Begunkov43597aa2021-08-10 02:44:23 +01009272 if (ctx->file_data)
Pavel Begunkov08480402021-04-13 02:58:38 +01009273 __io_sqe_files_unregister(ctx);
Pavel Begunkovc4ea0602021-04-01 15:43:58 +01009274 if (ctx->rings)
9275 __io_cqring_overflow_flush(ctx, true);
Hao Xu8bad28d2021-02-19 17:19:36 +08009276 mutex_unlock(&ctx->uring_lock);
Jens Axboe9b402842019-04-11 11:45:41 -06009277 io_eventfd_unregister(ctx);
Jens Axboe5a2e7452020-02-23 16:23:11 -07009278 io_destroy_buffers(ctx);
Pavel Begunkov07db2982021-04-20 12:03:32 +01009279 if (ctx->sq_creds)
9280 put_cred(ctx->sq_creds);
Jens Axboedef596e2019-01-09 08:59:42 -07009281
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01009282 /* there are no registered resources left, nobody uses it */
9283 if (ctx->rsrc_node)
9284 io_rsrc_node_destroy(ctx->rsrc_node);
Pavel Begunkov8dd03af2021-03-19 17:22:36 +00009285 if (ctx->rsrc_backup_node)
Pavel Begunkovb895c9a2021-04-01 15:43:40 +01009286 io_rsrc_node_destroy(ctx->rsrc_backup_node);
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01009287 flush_delayed_work(&ctx->rsrc_put_work);
9288
9289 WARN_ON_ONCE(!list_empty(&ctx->rsrc_ref_list));
9290 WARN_ON_ONCE(!llist_empty(&ctx->rsrc_put_llist));
Jens Axboe2b188cc2019-01-07 10:46:33 -07009291
9292#if defined(CONFIG_UNIX)
Eric Biggers355e8d22019-06-12 14:58:43 -07009293 if (ctx->ring_sock) {
9294 ctx->ring_sock->file = NULL; /* so that iput() is called */
Jens Axboe2b188cc2019-01-07 10:46:33 -07009295 sock_release(ctx->ring_sock);
Eric Biggers355e8d22019-06-12 14:58:43 -07009296 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07009297#endif
Pavel Begunkovef9dd632021-08-28 19:54:38 -06009298 WARN_ON_ONCE(!list_empty(&ctx->ltimeout_list));
Jens Axboe2b188cc2019-01-07 10:46:33 -07009299
Hristo Venev75b28af2019-08-26 17:23:46 +00009300 io_mem_free(ctx->rings);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009301 io_mem_free(ctx->sq_sqes);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009302
9303 percpu_ref_exit(&ctx->refs);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009304 free_uid(ctx->user);
Jens Axboe4010fec2021-02-27 15:04:18 -07009305 io_req_caches_free(ctx);
Jens Axboee9418942021-02-19 12:33:30 -07009306 if (ctx->hash_map)
9307 io_wq_put_hash(ctx->hash_map);
Jens Axboe78076bb2019-12-04 19:56:40 -07009308 kfree(ctx->cancel_hash);
Pavel Begunkov62248432021-04-28 13:11:29 +01009309 kfree(ctx->dummy_ubuf);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009310 kfree(ctx);
9311}
9312
9313static __poll_t io_uring_poll(struct file *file, poll_table *wait)
9314{
9315 struct io_ring_ctx *ctx = file->private_data;
9316 __poll_t mask = 0;
9317
Pavel Begunkov311997b2021-06-14 23:37:28 +01009318 poll_wait(file, &ctx->poll_wait, wait);
Stefan Bühler4f7067c2019-04-24 23:54:17 +02009319 /*
9320 * synchronizes with barrier from wq_has_sleeper call in
9321 * io_commit_cqring
9322 */
Jens Axboe2b188cc2019-01-07 10:46:33 -07009323 smp_rmb();
Jens Axboe90554202020-09-03 12:12:41 -06009324 if (!io_sqring_full(ctx))
Jens Axboe2b188cc2019-01-07 10:46:33 -07009325 mask |= EPOLLOUT | EPOLLWRNORM;
Hao Xued670c32021-02-05 16:34:21 +08009326
9327 /*
9328 * Don't flush cqring overflow list here, just do a simple check.
9329 * Otherwise there could possible be ABBA deadlock:
9330 * CPU0 CPU1
9331 * ---- ----
9332 * lock(&ctx->uring_lock);
9333 * lock(&ep->mtx);
9334 * lock(&ctx->uring_lock);
9335 * lock(&ep->mtx);
9336 *
9337 * Users may get EPOLLIN meanwhile seeing nothing in cqring, this
9338 * pushs them to do the flush.
9339 */
Pavel Begunkov5ed7a372021-06-14 23:37:27 +01009340 if (io_cqring_events(ctx) || test_bit(0, &ctx->check_cq_overflow))
Jens Axboe2b188cc2019-01-07 10:46:33 -07009341 mask |= EPOLLIN | EPOLLRDNORM;
9342
9343 return mask;
9344}
9345
Yejune Deng0bead8c2020-12-24 11:02:20 +08009346static int io_unregister_personality(struct io_ring_ctx *ctx, unsigned id)
Jens Axboe071698e2020-01-28 10:04:42 -07009347{
Jens Axboe4379bf82021-02-15 13:40:22 -07009348 const struct cred *creds;
Jens Axboe071698e2020-01-28 10:04:42 -07009349
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +00009350 creds = xa_erase(&ctx->personalities, id);
Jens Axboe4379bf82021-02-15 13:40:22 -07009351 if (creds) {
9352 put_cred(creds);
Yejune Deng0bead8c2020-12-24 11:02:20 +08009353 return 0;
Jens Axboe1e6fa522020-10-15 08:46:24 -06009354 }
Yejune Deng0bead8c2020-12-24 11:02:20 +08009355
9356 return -EINVAL;
9357}
9358
Pavel Begunkovd56d9382021-03-06 11:02:13 +00009359struct io_tctx_exit {
9360 struct callback_head task_work;
9361 struct completion completion;
Pavel Begunkovbaf186c2021-03-06 11:02:15 +00009362 struct io_ring_ctx *ctx;
Pavel Begunkovd56d9382021-03-06 11:02:13 +00009363};
9364
9365static void io_tctx_exit_cb(struct callback_head *cb)
9366{
9367 struct io_uring_task *tctx = current->io_uring;
9368 struct io_tctx_exit *work;
9369
9370 work = container_of(cb, struct io_tctx_exit, task_work);
9371 /*
9372 * When @in_idle, we're in cancellation and it's racy to remove the
9373 * node. It'll be removed by the end of cancellation, just ignore it.
9374 */
9375 if (!atomic_read(&tctx->in_idle))
Pavel Begunkoveef51da2021-06-14 02:36:15 +01009376 io_uring_del_tctx_node((unsigned long)work->ctx);
Pavel Begunkovd56d9382021-03-06 11:02:13 +00009377 complete(&work->completion);
9378}
9379
Pavel Begunkov28090c12021-04-25 23:34:45 +01009380static bool io_cancel_ctx_cb(struct io_wq_work *work, void *data)
9381{
9382 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
9383
9384 return req->ctx == data;
9385}
9386
Jens Axboe85faa7b2020-04-09 18:14:00 -06009387static void io_ring_exit_work(struct work_struct *work)
9388{
Pavel Begunkovd56d9382021-03-06 11:02:13 +00009389 struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx, exit_work);
Pavel Begunkovb5bb3a22021-03-06 11:02:16 +00009390 unsigned long timeout = jiffies + HZ * 60 * 5;
Pavel Begunkov58d3be22021-08-09 13:04:17 +01009391 unsigned long interval = HZ / 20;
Pavel Begunkovd56d9382021-03-06 11:02:13 +00009392 struct io_tctx_exit exit;
9393 struct io_tctx_node *node;
9394 int ret;
Jens Axboe85faa7b2020-04-09 18:14:00 -06009395
Jens Axboe56952e92020-06-17 15:00:04 -06009396 /*
9397 * If we're doing polled IO and end up having requests being
9398 * submitted async (out-of-line), then completions can come in while
9399 * we're waiting for refs to drop. We need to reap these manually,
9400 * as nobody else will be looking for them.
9401 */
Pavel Begunkovb2edc0a2020-07-07 16:36:22 +03009402 do {
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01009403 io_uring_try_cancel_requests(ctx, NULL, true);
Pavel Begunkov28090c12021-04-25 23:34:45 +01009404 if (ctx->sq_data) {
9405 struct io_sq_data *sqd = ctx->sq_data;
9406 struct task_struct *tsk;
9407
9408 io_sq_thread_park(sqd);
9409 tsk = sqd->thread;
9410 if (tsk && tsk->io_uring && tsk->io_uring->io_wq)
9411 io_wq_cancel_cb(tsk->io_uring->io_wq,
9412 io_cancel_ctx_cb, ctx, true);
9413 io_sq_thread_unpark(sqd);
9414 }
Pavel Begunkovb5bb3a22021-03-06 11:02:16 +00009415
Pavel Begunkov58d3be22021-08-09 13:04:17 +01009416 if (WARN_ON_ONCE(time_after(jiffies, timeout))) {
9417 /* there is little hope left, don't run it too often */
9418 interval = HZ * 60;
9419 }
9420 } while (!wait_for_completion_timeout(&ctx->ref_comp, interval));
Pavel Begunkovd56d9382021-03-06 11:02:13 +00009421
Pavel Begunkov7f006512021-04-14 13:38:34 +01009422 init_completion(&exit.completion);
9423 init_task_work(&exit.task_work, io_tctx_exit_cb);
9424 exit.ctx = ctx;
Pavel Begunkov89b50662021-04-01 15:43:50 +01009425 /*
9426 * Some may use context even when all refs and requests have been put,
9427 * and they are free to do so while still holding uring_lock or
Pavel Begunkov5b0a6ac2021-06-30 21:54:04 +01009428 * completion_lock, see io_req_task_submit(). Apart from other work,
Pavel Begunkov89b50662021-04-01 15:43:50 +01009429 * this lock/unlock section also waits them to finish.
9430 */
Pavel Begunkovd56d9382021-03-06 11:02:13 +00009431 mutex_lock(&ctx->uring_lock);
9432 while (!list_empty(&ctx->tctx_list)) {
Pavel Begunkovb5bb3a22021-03-06 11:02:16 +00009433 WARN_ON_ONCE(time_after(jiffies, timeout));
9434
Pavel Begunkovd56d9382021-03-06 11:02:13 +00009435 node = list_first_entry(&ctx->tctx_list, struct io_tctx_node,
9436 ctx_node);
Pavel Begunkov7f006512021-04-14 13:38:34 +01009437 /* don't spin on a single task if cancellation failed */
9438 list_rotate_left(&ctx->tctx_list);
Pavel Begunkovd56d9382021-03-06 11:02:13 +00009439 ret = task_work_add(node->task, &exit.task_work, TWA_SIGNAL);
9440 if (WARN_ON_ONCE(ret))
9441 continue;
9442 wake_up_process(node->task);
9443
9444 mutex_unlock(&ctx->uring_lock);
9445 wait_for_completion(&exit.completion);
Pavel Begunkovd56d9382021-03-06 11:02:13 +00009446 mutex_lock(&ctx->uring_lock);
9447 }
9448 mutex_unlock(&ctx->uring_lock);
Jens Axboe79ebeae2021-08-10 15:18:27 -06009449 spin_lock(&ctx->completion_lock);
9450 spin_unlock(&ctx->completion_lock);
Pavel Begunkovd56d9382021-03-06 11:02:13 +00009451
Jens Axboe85faa7b2020-04-09 18:14:00 -06009452 io_ring_ctx_free(ctx);
9453}
9454
Pavel Begunkov80c4cbd2021-03-25 18:32:43 +00009455/* Returns true if we found and killed one or more timeouts */
9456static bool io_kill_timeouts(struct io_ring_ctx *ctx, struct task_struct *tsk,
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01009457 bool cancel_all)
Pavel Begunkov80c4cbd2021-03-25 18:32:43 +00009458{
9459 struct io_kiocb *req, *tmp;
9460 int canceled = 0;
9461
Jens Axboe79ebeae2021-08-10 15:18:27 -06009462 spin_lock(&ctx->completion_lock);
9463 spin_lock_irq(&ctx->timeout_lock);
Pavel Begunkov80c4cbd2021-03-25 18:32:43 +00009464 list_for_each_entry_safe(req, tmp, &ctx->timeout_list, timeout.list) {
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01009465 if (io_match_task(req, tsk, cancel_all)) {
Pavel Begunkov80c4cbd2021-03-25 18:32:43 +00009466 io_kill_timeout(req, -ECANCELED);
9467 canceled++;
9468 }
9469 }
Jens Axboe79ebeae2021-08-10 15:18:27 -06009470 spin_unlock_irq(&ctx->timeout_lock);
Pavel Begunkov51520422021-03-29 11:39:29 +01009471 if (canceled != 0)
9472 io_commit_cqring(ctx);
Jens Axboe79ebeae2021-08-10 15:18:27 -06009473 spin_unlock(&ctx->completion_lock);
Pavel Begunkov80c4cbd2021-03-25 18:32:43 +00009474 if (canceled != 0)
9475 io_cqring_ev_posted(ctx);
9476 return canceled != 0;
9477}
9478
Jens Axboe2b188cc2019-01-07 10:46:33 -07009479static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
9480{
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +00009481 unsigned long index;
9482 struct creds *creds;
9483
Jens Axboe2b188cc2019-01-07 10:46:33 -07009484 mutex_lock(&ctx->uring_lock);
9485 percpu_ref_kill(&ctx->refs);
Pavel Begunkov634578f2020-12-06 22:22:44 +00009486 if (ctx->rings)
Pavel Begunkov6c2450a2021-02-23 12:40:22 +00009487 __io_cqring_overflow_flush(ctx, true);
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +00009488 xa_for_each(&ctx->personalities, index, creds)
9489 io_unregister_personality(ctx, index);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009490 mutex_unlock(&ctx->uring_lock);
9491
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01009492 io_kill_timeouts(ctx, NULL, true);
9493 io_poll_remove_all(ctx, NULL, true);
Jens Axboe561fb042019-10-24 07:25:42 -06009494
Jens Axboe15dff282019-11-13 09:09:23 -07009495 /* if we failed setting up the ctx, we might not have any rings */
Pavel Begunkovb2edc0a2020-07-07 16:36:22 +03009496 io_iopoll_try_reap_events(ctx);
Jens Axboe309fc032020-07-10 09:13:34 -06009497
Jens Axboe85faa7b2020-04-09 18:14:00 -06009498 INIT_WORK(&ctx->exit_work, io_ring_exit_work);
Jens Axboefc666772020-08-19 11:10:51 -06009499 /*
9500 * Use system_unbound_wq to avoid spawning tons of event kworkers
9501 * if we're exiting a ton of rings at the same time. It just adds
9502 * noise and overhead, there's no discernable change in runtime
9503 * over using system_wq.
9504 */
9505 queue_work(system_unbound_wq, &ctx->exit_work);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009506}
9507
9508static int io_uring_release(struct inode *inode, struct file *file)
9509{
9510 struct io_ring_ctx *ctx = file->private_data;
9511
9512 file->private_data = NULL;
9513 io_ring_ctx_wait_and_kill(ctx);
9514 return 0;
9515}
9516
Pavel Begunkovf6edbab2020-11-06 13:00:26 +00009517struct io_task_cancel {
9518 struct task_struct *task;
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01009519 bool all;
Pavel Begunkovf6edbab2020-11-06 13:00:26 +00009520};
Pavel Begunkov67c4d9e2020-06-15 10:24:05 +03009521
Pavel Begunkovf6edbab2020-11-06 13:00:26 +00009522static bool io_cancel_task_cb(struct io_wq_work *work, void *data)
Jens Axboeb711d4e2020-08-16 08:23:05 -07009523{
Pavel Begunkov9a472ef2020-11-05 22:31:37 +00009524 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
Pavel Begunkovf6edbab2020-11-06 13:00:26 +00009525 struct io_task_cancel *cancel = data;
Pavel Begunkov9a472ef2020-11-05 22:31:37 +00009526
Pavel Begunkov1c939a52021-11-26 14:38:15 +00009527 return io_match_task_safe(req, cancel->task, cancel->all);
Jens Axboeb711d4e2020-08-16 08:23:05 -07009528}
9529
Pavel Begunkove1915f72021-03-11 23:29:35 +00009530static bool io_cancel_defer_files(struct io_ring_ctx *ctx,
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01009531 struct task_struct *task, bool cancel_all)
Pavel Begunkovb7ddce32020-09-06 00:45:14 +03009532{
Pavel Begunkove1915f72021-03-11 23:29:35 +00009533 struct io_defer_entry *de;
Pavel Begunkovb7ddce32020-09-06 00:45:14 +03009534 LIST_HEAD(list);
9535
Jens Axboe79ebeae2021-08-10 15:18:27 -06009536 spin_lock(&ctx->completion_lock);
Pavel Begunkovb7ddce32020-09-06 00:45:14 +03009537 list_for_each_entry_reverse(de, &ctx->defer_list, list) {
Pavel Begunkov1c939a52021-11-26 14:38:15 +00009538 if (io_match_task_safe(de->req, task, cancel_all)) {
Pavel Begunkovb7ddce32020-09-06 00:45:14 +03009539 list_cut_position(&list, &ctx->defer_list, &de->list);
9540 break;
9541 }
9542 }
Jens Axboe79ebeae2021-08-10 15:18:27 -06009543 spin_unlock(&ctx->completion_lock);
Pavel Begunkove1915f72021-03-11 23:29:35 +00009544 if (list_empty(&list))
9545 return false;
Pavel Begunkovb7ddce32020-09-06 00:45:14 +03009546
9547 while (!list_empty(&list)) {
9548 de = list_first_entry(&list, struct io_defer_entry, list);
9549 list_del_init(&de->list);
Pavel Begunkovf41db2732021-02-28 22:35:12 +00009550 io_req_complete_failed(de->req, -ECANCELED);
Pavel Begunkovb7ddce32020-09-06 00:45:14 +03009551 kfree(de);
9552 }
Pavel Begunkove1915f72021-03-11 23:29:35 +00009553 return true;
Pavel Begunkovb7ddce32020-09-06 00:45:14 +03009554}
9555
Pavel Begunkov1b007642021-03-06 11:02:17 +00009556static bool io_uring_try_cancel_iowq(struct io_ring_ctx *ctx)
9557{
9558 struct io_tctx_node *node;
9559 enum io_wq_cancel cret;
9560 bool ret = false;
9561
9562 mutex_lock(&ctx->uring_lock);
9563 list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
9564 struct io_uring_task *tctx = node->task->io_uring;
9565
9566 /*
9567 * io_wq will stay alive while we hold uring_lock, because it's
9568 * killed after ctx nodes, which requires to take the lock.
9569 */
9570 if (!tctx || !tctx->io_wq)
9571 continue;
9572 cret = io_wq_cancel_cb(tctx->io_wq, io_cancel_ctx_cb, ctx, true);
9573 ret |= (cret != IO_WQ_CANCEL_NOTFOUND);
9574 }
9575 mutex_unlock(&ctx->uring_lock);
9576
9577 return ret;
9578}
9579
Pavel Begunkov9936c7c2021-02-04 13:51:56 +00009580static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
9581 struct task_struct *task,
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01009582 bool cancel_all)
Pavel Begunkov9936c7c2021-02-04 13:51:56 +00009583{
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01009584 struct io_task_cancel cancel = { .task = task, .all = cancel_all, };
Pavel Begunkov1b007642021-03-06 11:02:17 +00009585 struct io_uring_task *tctx = task ? task->io_uring : NULL;
Pavel Begunkov9936c7c2021-02-04 13:51:56 +00009586
9587 while (1) {
9588 enum io_wq_cancel cret;
9589 bool ret = false;
9590
Pavel Begunkov1b007642021-03-06 11:02:17 +00009591 if (!task) {
9592 ret |= io_uring_try_cancel_iowq(ctx);
9593 } else if (tctx && tctx->io_wq) {
9594 /*
9595 * Cancels requests of all rings, not only @ctx, but
9596 * it's fine as the task is in exit/exec.
9597 */
Jens Axboe5aa75ed2021-02-16 12:56:50 -07009598 cret = io_wq_cancel_cb(tctx->io_wq, io_cancel_task_cb,
Pavel Begunkov9936c7c2021-02-04 13:51:56 +00009599 &cancel, true);
9600 ret |= (cret != IO_WQ_CANCEL_NOTFOUND);
9601 }
9602
9603 /* SQPOLL thread does its own polling */
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01009604 if ((!(ctx->flags & IORING_SETUP_SQPOLL) && cancel_all) ||
Jens Axboed052d1d2021-03-11 10:49:20 -07009605 (ctx->sq_data && ctx->sq_data->thread == current)) {
Pavel Begunkov9936c7c2021-02-04 13:51:56 +00009606 while (!list_empty_careful(&ctx->iopoll_list)) {
9607 io_iopoll_try_reap_events(ctx);
9608 ret = true;
9609 }
9610 }
9611
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01009612 ret |= io_cancel_defer_files(ctx, task, cancel_all);
9613 ret |= io_poll_remove_all(ctx, task, cancel_all);
9614 ret |= io_kill_timeouts(ctx, task, cancel_all);
Pavel Begunkove5dc4802021-06-26 21:40:46 +01009615 if (task)
9616 ret |= io_run_task_work();
Pavel Begunkov9936c7c2021-02-04 13:51:56 +00009617 if (!ret)
9618 break;
9619 cond_resched();
9620 }
9621}
9622
Pavel Begunkoveef51da2021-06-14 02:36:15 +01009623static int __io_uring_add_tctx_node(struct io_ring_ctx *ctx)
Jens Axboe0f212202020-09-13 13:09:39 -06009624{
Matthew Wilcox (Oracle)236434c2020-10-09 13:49:52 +01009625 struct io_uring_task *tctx = current->io_uring;
Pavel Begunkov13bf43f2021-03-06 11:02:12 +00009626 struct io_tctx_node *node;
Pavel Begunkova528b042020-12-21 18:34:04 +00009627 int ret;
Matthew Wilcox (Oracle)236434c2020-10-09 13:49:52 +01009628
9629 if (unlikely(!tctx)) {
Jens Axboe5aa75ed2021-02-16 12:56:50 -07009630 ret = io_uring_alloc_task_context(current, ctx);
Jens Axboe0f212202020-09-13 13:09:39 -06009631 if (unlikely(ret))
9632 return ret;
Pavel Begunkove139a1e2021-10-19 23:43:46 +01009633
Matthew Wilcox (Oracle)236434c2020-10-09 13:49:52 +01009634 tctx = current->io_uring;
Pavel Begunkove139a1e2021-10-19 23:43:46 +01009635 if (ctx->iowq_limits_set) {
9636 unsigned int limits[2] = { ctx->iowq_limits[0],
9637 ctx->iowq_limits[1], };
9638
9639 ret = io_wq_max_workers(tctx->io_wq, limits);
9640 if (ret)
9641 return ret;
9642 }
Jens Axboe0f212202020-09-13 13:09:39 -06009643 }
Pavel Begunkovcf27f3b2021-03-19 17:22:31 +00009644 if (!xa_load(&tctx->xa, (unsigned long)ctx)) {
9645 node = kmalloc(sizeof(*node), GFP_KERNEL);
9646 if (!node)
9647 return -ENOMEM;
9648 node->ctx = ctx;
9649 node->task = current;
Jens Axboe0f212202020-09-13 13:09:39 -06009650
Pavel Begunkovcf27f3b2021-03-19 17:22:31 +00009651 ret = xa_err(xa_store(&tctx->xa, (unsigned long)ctx,
9652 node, GFP_KERNEL));
9653 if (ret) {
9654 kfree(node);
9655 return ret;
Jens Axboe0f212202020-09-13 13:09:39 -06009656 }
Pavel Begunkovcf27f3b2021-03-19 17:22:31 +00009657
9658 mutex_lock(&ctx->uring_lock);
9659 list_add(&node->ctx_node, &ctx->tctx_list);
9660 mutex_unlock(&ctx->uring_lock);
Jens Axboe0f212202020-09-13 13:09:39 -06009661 }
Pavel Begunkovcf27f3b2021-03-19 17:22:31 +00009662 tctx->last = ctx;
Jens Axboe0f212202020-09-13 13:09:39 -06009663 return 0;
9664}
9665
9666/*
Pavel Begunkovcf27f3b2021-03-19 17:22:31 +00009667 * Note that this task has used io_uring. We use it for cancelation purposes.
9668 */
Pavel Begunkoveef51da2021-06-14 02:36:15 +01009669static inline int io_uring_add_tctx_node(struct io_ring_ctx *ctx)
Pavel Begunkovcf27f3b2021-03-19 17:22:31 +00009670{
9671 struct io_uring_task *tctx = current->io_uring;
9672
9673 if (likely(tctx && tctx->last == ctx))
9674 return 0;
Pavel Begunkoveef51da2021-06-14 02:36:15 +01009675 return __io_uring_add_tctx_node(ctx);
Pavel Begunkovcf27f3b2021-03-19 17:22:31 +00009676}
9677
9678/*
Jens Axboe0f212202020-09-13 13:09:39 -06009679 * Remove this io_uring_file -> task mapping.
9680 */
Pavel Begunkoveef51da2021-06-14 02:36:15 +01009681static void io_uring_del_tctx_node(unsigned long index)
Jens Axboe0f212202020-09-13 13:09:39 -06009682{
9683 struct io_uring_task *tctx = current->io_uring;
Pavel Begunkov13bf43f2021-03-06 11:02:12 +00009684 struct io_tctx_node *node;
Pavel Begunkov29412672021-03-06 11:02:11 +00009685
Pavel Begunkoveebd2e32021-03-06 11:02:14 +00009686 if (!tctx)
9687 return;
Pavel Begunkov13bf43f2021-03-06 11:02:12 +00009688 node = xa_erase(&tctx->xa, index);
9689 if (!node)
Pavel Begunkov29412672021-03-06 11:02:11 +00009690 return;
Jens Axboe0f212202020-09-13 13:09:39 -06009691
Pavel Begunkov13bf43f2021-03-06 11:02:12 +00009692 WARN_ON_ONCE(current != node->task);
9693 WARN_ON_ONCE(list_empty(&node->ctx_node));
9694
9695 mutex_lock(&node->ctx->uring_lock);
9696 list_del(&node->ctx_node);
9697 mutex_unlock(&node->ctx->uring_lock);
9698
Pavel Begunkovbaf186c2021-03-06 11:02:15 +00009699 if (tctx->last == node->ctx)
Jens Axboe0f212202020-09-13 13:09:39 -06009700 tctx->last = NULL;
Pavel Begunkov13bf43f2021-03-06 11:02:12 +00009701 kfree(node);
Jens Axboe0f212202020-09-13 13:09:39 -06009702}
9703
Pavel Begunkov8452d4a2021-02-27 11:16:46 +00009704static void io_uring_clean_tctx(struct io_uring_task *tctx)
Pavel Begunkovde7f1d92021-01-04 20:43:29 +00009705{
Pavel Begunkovba5ef6d2021-05-20 13:21:20 +01009706 struct io_wq *wq = tctx->io_wq;
Pavel Begunkov13bf43f2021-03-06 11:02:12 +00009707 struct io_tctx_node *node;
Pavel Begunkovde7f1d92021-01-04 20:43:29 +00009708 unsigned long index;
9709
Jens Axboe8bab4c02021-09-24 07:12:27 -06009710 xa_for_each(&tctx->xa, index, node) {
Pavel Begunkoveef51da2021-06-14 02:36:15 +01009711 io_uring_del_tctx_node(index);
Jens Axboe8bab4c02021-09-24 07:12:27 -06009712 cond_resched();
9713 }
Marco Elverb16ef422021-05-27 11:25:48 +02009714 if (wq) {
9715 /*
9716 * Must be after io_uring_del_task_file() (removes nodes under
9717 * uring_lock) to avoid race with io_uring_try_cancel_iowq().
9718 */
Pavel Begunkovba5ef6d2021-05-20 13:21:20 +01009719 io_wq_put_and_exit(wq);
Pavel Begunkovdadebc32021-08-23 13:30:44 +01009720 tctx->io_wq = NULL;
Marco Elverb16ef422021-05-27 11:25:48 +02009721 }
Pavel Begunkovde7f1d92021-01-04 20:43:29 +00009722}
9723
Pavel Begunkov3f48cf12021-04-11 01:46:27 +01009724static s64 tctx_inflight(struct io_uring_task *tctx, bool tracked)
Pavel Begunkov521d6a72021-03-11 23:29:38 +00009725{
Pavel Begunkov3f48cf12021-04-11 01:46:27 +01009726 if (tracked)
9727 return atomic_read(&tctx->inflight_tracked);
Pavel Begunkov521d6a72021-03-11 23:29:38 +00009728 return percpu_counter_sum(&tctx->inflight);
9729}
9730
Pavel Begunkov78cc6872021-06-14 02:36:23 +01009731/*
9732 * Find any io_uring ctx that this task has registered or done IO on, and cancel
Jens Axboe8e129762021-12-09 08:54:29 -07009733 * requests. @sqd should be not-null IFF it's an SQPOLL thread cancellation.
Pavel Begunkov78cc6872021-06-14 02:36:23 +01009734 */
9735static void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd)
Pavel Begunkov0e9ddb32021-02-07 22:34:26 +00009736{
Pavel Begunkov521d6a72021-03-11 23:29:38 +00009737 struct io_uring_task *tctx = current->io_uring;
Pavel Begunkov734551d2021-04-18 14:52:09 +01009738 struct io_ring_ctx *ctx;
Jens Axboefdaf0832020-10-30 09:37:30 -06009739 s64 inflight;
Pavel Begunkov0e9ddb32021-02-07 22:34:26 +00009740 DEFINE_WAIT(wait);
Jens Axboefdaf0832020-10-30 09:37:30 -06009741
Pavel Begunkov78cc6872021-06-14 02:36:23 +01009742 WARN_ON_ONCE(sqd && sqd->thread != current);
9743
Palash Oswal6d042ff2021-04-27 18:21:49 +05309744 if (!current->io_uring)
9745 return;
Pavel Begunkov17a91052021-05-23 15:48:39 +01009746 if (tctx->io_wq)
9747 io_wq_exit_start(tctx->io_wq);
9748
Jens Axboefdaf0832020-10-30 09:37:30 -06009749 atomic_inc(&tctx->in_idle);
Jens Axboed8a6df12020-10-15 16:24:45 -06009750 do {
Pavel Begunkove9dbe222021-08-09 13:04:20 +01009751 io_uring_drop_tctx_refs(current);
Jens Axboe0f212202020-09-13 13:09:39 -06009752 /* read completions before cancelations */
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01009753 inflight = tctx_inflight(tctx, !cancel_all);
Jens Axboed8a6df12020-10-15 16:24:45 -06009754 if (!inflight)
9755 break;
Jens Axboe0f212202020-09-13 13:09:39 -06009756
Pavel Begunkov78cc6872021-06-14 02:36:23 +01009757 if (!sqd) {
9758 struct io_tctx_node *node;
9759 unsigned long index;
9760
9761 xa_for_each(&tctx->xa, index, node) {
9762 /* sqpoll task will cancel all its requests */
9763 if (node->ctx->sq_data)
9764 continue;
9765 io_uring_try_cancel_requests(node->ctx, current,
9766 cancel_all);
9767 }
9768 } else {
9769 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
9770 io_uring_try_cancel_requests(ctx, current,
9771 cancel_all);
9772 }
9773
Jens Axboe8e129762021-12-09 08:54:29 -07009774 prepare_to_wait(&tctx->wait, &wait, TASK_INTERRUPTIBLE);
9775 io_run_task_work();
Pavel Begunkove9dbe222021-08-09 13:04:20 +01009776 io_uring_drop_tctx_refs(current);
Jens Axboe8e129762021-12-09 08:54:29 -07009777
Jens Axboe0f212202020-09-13 13:09:39 -06009778 /*
Pavel Begunkova1bb3cd2021-01-26 15:28:26 +00009779 * If we've seen completions, retry without waiting. This
9780 * avoids a race where a completion comes in before we did
9781 * prepare_to_wait().
Jens Axboe0f212202020-09-13 13:09:39 -06009782 */
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01009783 if (inflight == tctx_inflight(tctx, !cancel_all))
Pavel Begunkova1bb3cd2021-01-26 15:28:26 +00009784 schedule();
Pavel Begunkovf57555e2020-12-20 13:21:44 +00009785 finish_wait(&tctx->wait, &wait);
Jens Axboed8a6df12020-10-15 16:24:45 -06009786 } while (1);
Pavel Begunkovde7f1d92021-01-04 20:43:29 +00009787
Pavel Begunkov8452d4a2021-02-27 11:16:46 +00009788 io_uring_clean_tctx(tctx);
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01009789 if (cancel_all) {
Pavel Begunkovb168b1a2022-01-09 00:53:22 +00009790 /*
9791 * We shouldn't run task_works after cancel, so just leave
9792 * ->in_idle set for normal exit.
9793 */
9794 atomic_dec(&tctx->in_idle);
Pavel Begunkov3f48cf12021-04-11 01:46:27 +01009795 /* for exec all current's requests should be gone, kill tctx */
9796 __io_uring_free(current);
9797 }
Pavel Begunkov44e728b2020-06-15 10:24:04 +03009798}
9799
Hao Xuf552a272021-08-12 12:14:35 +08009800void __io_uring_cancel(bool cancel_all)
Pavel Begunkov78cc6872021-06-14 02:36:23 +01009801{
Hao Xuf552a272021-08-12 12:14:35 +08009802 io_uring_cancel_generic(cancel_all, NULL);
Pavel Begunkov78cc6872021-06-14 02:36:23 +01009803}
9804
Roman Penyaev6c5c2402019-11-28 12:53:22 +01009805static void *io_uring_validate_mmap_request(struct file *file,
9806 loff_t pgoff, size_t sz)
Jens Axboe2b188cc2019-01-07 10:46:33 -07009807{
Jens Axboe2b188cc2019-01-07 10:46:33 -07009808 struct io_ring_ctx *ctx = file->private_data;
Roman Penyaev6c5c2402019-11-28 12:53:22 +01009809 loff_t offset = pgoff << PAGE_SHIFT;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009810 struct page *page;
9811 void *ptr;
9812
9813 switch (offset) {
9814 case IORING_OFF_SQ_RING:
Hristo Venev75b28af2019-08-26 17:23:46 +00009815 case IORING_OFF_CQ_RING:
9816 ptr = ctx->rings;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009817 break;
9818 case IORING_OFF_SQES:
9819 ptr = ctx->sq_sqes;
9820 break;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009821 default:
Roman Penyaev6c5c2402019-11-28 12:53:22 +01009822 return ERR_PTR(-EINVAL);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009823 }
9824
9825 page = virt_to_head_page(ptr);
Matthew Wilcox (Oracle)a50b8542019-09-23 15:34:25 -07009826 if (sz > page_size(page))
Roman Penyaev6c5c2402019-11-28 12:53:22 +01009827 return ERR_PTR(-EINVAL);
9828
9829 return ptr;
9830}
9831
9832#ifdef CONFIG_MMU
9833
9834static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
9835{
9836 size_t sz = vma->vm_end - vma->vm_start;
9837 unsigned long pfn;
9838 void *ptr;
9839
9840 ptr = io_uring_validate_mmap_request(file, vma->vm_pgoff, sz);
9841 if (IS_ERR(ptr))
9842 return PTR_ERR(ptr);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009843
9844 pfn = virt_to_phys(ptr) >> PAGE_SHIFT;
9845 return remap_pfn_range(vma, vma->vm_start, pfn, sz, vma->vm_page_prot);
9846}
9847
Roman Penyaev6c5c2402019-11-28 12:53:22 +01009848#else /* !CONFIG_MMU */
9849
9850static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
9851{
9852 return vma->vm_flags & (VM_SHARED | VM_MAYSHARE) ? 0 : -EINVAL;
9853}
9854
9855static unsigned int io_uring_nommu_mmap_capabilities(struct file *file)
9856{
9857 return NOMMU_MAP_DIRECT | NOMMU_MAP_READ | NOMMU_MAP_WRITE;
9858}
9859
9860static unsigned long io_uring_nommu_get_unmapped_area(struct file *file,
9861 unsigned long addr, unsigned long len,
9862 unsigned long pgoff, unsigned long flags)
9863{
9864 void *ptr;
9865
9866 ptr = io_uring_validate_mmap_request(file, pgoff, len);
9867 if (IS_ERR(ptr))
9868 return PTR_ERR(ptr);
9869
9870 return (unsigned long) ptr;
9871}
9872
9873#endif /* !CONFIG_MMU */
9874
Pavel Begunkovd9d05212021-01-08 20:57:25 +00009875static int io_sqpoll_wait_sq(struct io_ring_ctx *ctx)
Jens Axboe90554202020-09-03 12:12:41 -06009876{
9877 DEFINE_WAIT(wait);
9878
9879 do {
9880 if (!io_sqring_full(ctx))
9881 break;
Jens Axboe90554202020-09-03 12:12:41 -06009882 prepare_to_wait(&ctx->sqo_sq_wait, &wait, TASK_INTERRUPTIBLE);
9883
9884 if (!io_sqring_full(ctx))
9885 break;
Jens Axboe90554202020-09-03 12:12:41 -06009886 schedule();
9887 } while (!signal_pending(current));
9888
9889 finish_wait(&ctx->sqo_sq_wait, &wait);
Yang Li51993282021-03-09 14:30:41 +08009890 return 0;
Jens Axboe90554202020-09-03 12:12:41 -06009891}
9892
Hao Xuc73ebb62020-11-03 10:54:37 +08009893static int io_get_ext_arg(unsigned flags, const void __user *argp, size_t *argsz,
9894 struct __kernel_timespec __user **ts,
9895 const sigset_t __user **sig)
9896{
9897 struct io_uring_getevents_arg arg;
9898
9899 /*
9900 * If EXT_ARG isn't set, then we have no timespec and the argp pointer
9901 * is just a pointer to the sigset_t.
9902 */
9903 if (!(flags & IORING_ENTER_EXT_ARG)) {
9904 *sig = (const sigset_t __user *) argp;
9905 *ts = NULL;
9906 return 0;
9907 }
9908
9909 /*
9910 * EXT_ARG is set - ensure we agree on the size of it and copy in our
9911 * timespec and sigset_t pointers if good.
9912 */
9913 if (*argsz != sizeof(arg))
9914 return -EINVAL;
9915 if (copy_from_user(&arg, argp, sizeof(arg)))
9916 return -EFAULT;
Dylan Yudaken99475482022-04-12 09:30:42 -07009917 if (arg.pad)
9918 return -EINVAL;
Hao Xuc73ebb62020-11-03 10:54:37 +08009919 *sig = u64_to_user_ptr(arg.sigmask);
9920 *argsz = arg.sigmask_sz;
9921 *ts = u64_to_user_ptr(arg.ts);
9922 return 0;
9923}
9924
Jens Axboe2b188cc2019-01-07 10:46:33 -07009925SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
Hao Xuc73ebb62020-11-03 10:54:37 +08009926 u32, min_complete, u32, flags, const void __user *, argp,
9927 size_t, argsz)
Jens Axboe2b188cc2019-01-07 10:46:33 -07009928{
9929 struct io_ring_ctx *ctx;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009930 int submitted = 0;
9931 struct fd f;
Pavel Begunkov33f993d2021-03-19 17:22:30 +00009932 long ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009933
Jens Axboe4c6e2772020-07-01 11:29:10 -06009934 io_run_task_work();
Jens Axboeb41e9852020-02-17 09:52:41 -07009935
Pavel Begunkov33f993d2021-03-19 17:22:30 +00009936 if (unlikely(flags & ~(IORING_ENTER_GETEVENTS | IORING_ENTER_SQ_WAKEUP |
9937 IORING_ENTER_SQ_WAIT | IORING_ENTER_EXT_ARG)))
Jens Axboe2b188cc2019-01-07 10:46:33 -07009938 return -EINVAL;
9939
9940 f = fdget(fd);
Pavel Begunkov33f993d2021-03-19 17:22:30 +00009941 if (unlikely(!f.file))
Jens Axboe2b188cc2019-01-07 10:46:33 -07009942 return -EBADF;
9943
9944 ret = -EOPNOTSUPP;
Pavel Begunkov33f993d2021-03-19 17:22:30 +00009945 if (unlikely(f.file->f_op != &io_uring_fops))
Jens Axboe2b188cc2019-01-07 10:46:33 -07009946 goto out_fput;
9947
9948 ret = -ENXIO;
9949 ctx = f.file->private_data;
Pavel Begunkov33f993d2021-03-19 17:22:30 +00009950 if (unlikely(!percpu_ref_tryget(&ctx->refs)))
Jens Axboe2b188cc2019-01-07 10:46:33 -07009951 goto out_fput;
9952
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02009953 ret = -EBADFD;
Pavel Begunkov33f993d2021-03-19 17:22:30 +00009954 if (unlikely(ctx->flags & IORING_SETUP_R_DISABLED))
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02009955 goto out;
9956
Jens Axboe6c271ce2019-01-10 11:22:30 -07009957 /*
9958 * For SQ polling, the thread will do all submissions and completions.
9959 * Just return the requested submit count, and wake the thread if
9960 * we were asked to.
9961 */
Jens Axboeb2a9ead2019-09-12 14:19:16 -06009962 ret = 0;
Jens Axboe6c271ce2019-01-10 11:22:30 -07009963 if (ctx->flags & IORING_SETUP_SQPOLL) {
Pavel Begunkov90f67362021-08-09 20:18:12 +01009964 io_cqring_overflow_flush(ctx);
Pavel Begunkov89448c42020-12-17 00:24:39 +00009965
Jens Axboe21f96522021-08-14 09:04:40 -06009966 if (unlikely(ctx->sq_data->thread == NULL)) {
9967 ret = -EOWNERDEAD;
Stefan Metzmacher04147482021-03-07 11:54:29 +01009968 goto out;
Jens Axboe21f96522021-08-14 09:04:40 -06009969 }
Jens Axboe6c271ce2019-01-10 11:22:30 -07009970 if (flags & IORING_ENTER_SQ_WAKEUP)
Jens Axboe534ca6d2020-09-02 13:52:19 -06009971 wake_up(&ctx->sq_data->wait);
Pavel Begunkovd9d05212021-01-08 20:57:25 +00009972 if (flags & IORING_ENTER_SQ_WAIT) {
9973 ret = io_sqpoll_wait_sq(ctx);
9974 if (ret)
9975 goto out;
9976 }
Jens Axboe6c271ce2019-01-10 11:22:30 -07009977 submitted = to_submit;
Jens Axboeb2a9ead2019-09-12 14:19:16 -06009978 } else if (to_submit) {
Pavel Begunkoveef51da2021-06-14 02:36:15 +01009979 ret = io_uring_add_tctx_node(ctx);
Jens Axboe0f212202020-09-13 13:09:39 -06009980 if (unlikely(ret))
9981 goto out;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009982 mutex_lock(&ctx->uring_lock);
Jens Axboe0f212202020-09-13 13:09:39 -06009983 submitted = io_submit_sqes(ctx, to_submit);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009984 mutex_unlock(&ctx->uring_lock);
Pavel Begunkov7c504e652019-12-18 19:53:45 +03009985
9986 if (submitted != to_submit)
9987 goto out;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009988 }
9989 if (flags & IORING_ENTER_GETEVENTS) {
Hao Xuc73ebb62020-11-03 10:54:37 +08009990 const sigset_t __user *sig;
9991 struct __kernel_timespec __user *ts;
9992
9993 ret = io_get_ext_arg(flags, argp, &argsz, &ts, &sig);
9994 if (unlikely(ret))
9995 goto out;
9996
Jens Axboe2b188cc2019-01-07 10:46:33 -07009997 min_complete = min(min_complete, ctx->cq_entries);
9998
Xiaoguang Wang32b22442020-03-11 09:26:09 +08009999 /*
10000 * When SETUP_IOPOLL and SETUP_SQPOLL are both enabled, user
10001 * space applications don't need to do io completion events
10002 * polling again, they can rely on io_sq_thread to do polling
10003 * work, which can reduce cpu usage and uring_lock contention.
10004 */
10005 if (ctx->flags & IORING_SETUP_IOPOLL &&
10006 !(ctx->flags & IORING_SETUP_SQPOLL)) {
Pavel Begunkov7668b922020-07-07 16:36:21 +030010007 ret = io_iopoll_check(ctx, min_complete);
Jens Axboedef596e2019-01-09 08:59:42 -070010008 } else {
Hao Xuc73ebb62020-11-03 10:54:37 +080010009 ret = io_cqring_wait(ctx, min_complete, sig, argsz, ts);
Jens Axboedef596e2019-01-09 08:59:42 -070010010 }
Jens Axboe2b188cc2019-01-07 10:46:33 -070010011 }
10012
Pavel Begunkov7c504e652019-12-18 19:53:45 +030010013out:
Pavel Begunkov6805b322019-10-08 02:18:42 +030010014 percpu_ref_put(&ctx->refs);
Jens Axboe2b188cc2019-01-07 10:46:33 -070010015out_fput:
10016 fdput(f);
10017 return submitted ? submitted : ret;
10018}
10019
Tobias Klauserbebdb652020-02-26 18:38:32 +010010020#ifdef CONFIG_PROC_FS
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +000010021static int io_uring_show_cred(struct seq_file *m, unsigned int id,
10022 const struct cred *cred)
Jens Axboe87ce9552020-01-30 08:25:34 -070010023{
Jens Axboe87ce9552020-01-30 08:25:34 -070010024 struct user_namespace *uns = seq_user_ns(m);
10025 struct group_info *gi;
10026 kernel_cap_t cap;
10027 unsigned __capi;
10028 int g;
10029
10030 seq_printf(m, "%5d\n", id);
10031 seq_put_decimal_ull(m, "\tUid:\t", from_kuid_munged(uns, cred->uid));
10032 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->euid));
10033 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->suid));
10034 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->fsuid));
10035 seq_put_decimal_ull(m, "\n\tGid:\t", from_kgid_munged(uns, cred->gid));
10036 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->egid));
10037 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->sgid));
10038 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->fsgid));
10039 seq_puts(m, "\n\tGroups:\t");
10040 gi = cred->group_info;
10041 for (g = 0; g < gi->ngroups; g++) {
10042 seq_put_decimal_ull(m, g ? " " : "",
10043 from_kgid_munged(uns, gi->gid[g]));
10044 }
10045 seq_puts(m, "\n\tCapEff:\t");
10046 cap = cred->cap_effective;
10047 CAP_FOR_EACH_U32(__capi)
10048 seq_put_hex_ll(m, NULL, cap.cap[CAP_LAST_U32 - __capi], 8);
10049 seq_putc(m, '\n');
10050 return 0;
10051}
10052
10053static void __io_uring_show_fdinfo(struct io_ring_ctx *ctx, struct seq_file *m)
10054{
Joseph Qidbbe9c62020-09-29 09:01:22 -060010055 struct io_sq_data *sq = NULL;
Jens Axboefad8e0d2020-09-28 08:57:48 -060010056 bool has_lock;
Jens Axboe87ce9552020-01-30 08:25:34 -070010057 int i;
10058
Jens Axboefad8e0d2020-09-28 08:57:48 -060010059 /*
10060 * Avoid ABBA deadlock between the seq lock and the io_uring mutex,
10061 * since fdinfo case grabs it in the opposite direction of normal use
10062 * cases. If we fail to get the lock, we just don't iterate any
10063 * structures that could be going away outside the io_uring mutex.
10064 */
10065 has_lock = mutex_trylock(&ctx->uring_lock);
10066
Jens Axboe5f3f26f2021-02-25 10:17:46 -070010067 if (has_lock && (ctx->flags & IORING_SETUP_SQPOLL)) {
Joseph Qidbbe9c62020-09-29 09:01:22 -060010068 sq = ctx->sq_data;
Jens Axboe5f3f26f2021-02-25 10:17:46 -070010069 if (!sq->thread)
10070 sq = NULL;
10071 }
Joseph Qidbbe9c62020-09-29 09:01:22 -060010072
10073 seq_printf(m, "SqThread:\t%d\n", sq ? task_pid_nr(sq->thread) : -1);
10074 seq_printf(m, "SqThreadCpu:\t%d\n", sq ? task_cpu(sq->thread) : -1);
Jens Axboe87ce9552020-01-30 08:25:34 -070010075 seq_printf(m, "UserFiles:\t%u\n", ctx->nr_user_files);
Jens Axboefad8e0d2020-09-28 08:57:48 -060010076 for (i = 0; has_lock && i < ctx->nr_user_files; i++) {
Jens Axboe7b29f922021-03-12 08:30:14 -070010077 struct file *f = io_file_from_index(ctx, i);
Jens Axboe87ce9552020-01-30 08:25:34 -070010078
Jens Axboe87ce9552020-01-30 08:25:34 -070010079 if (f)
10080 seq_printf(m, "%5u: %s\n", i, file_dentry(f)->d_iname);
10081 else
10082 seq_printf(m, "%5u: <none>\n", i);
10083 }
10084 seq_printf(m, "UserBufs:\t%u\n", ctx->nr_user_bufs);
Jens Axboefad8e0d2020-09-28 08:57:48 -060010085 for (i = 0; has_lock && i < ctx->nr_user_bufs; i++) {
Pavel Begunkov41edf1a2021-04-25 14:32:23 +010010086 struct io_mapped_ubuf *buf = ctx->user_bufs[i];
Pavel Begunkov4751f532021-04-01 15:43:55 +010010087 unsigned int len = buf->ubuf_end - buf->ubuf;
Jens Axboe87ce9552020-01-30 08:25:34 -070010088
Pavel Begunkov4751f532021-04-01 15:43:55 +010010089 seq_printf(m, "%5u: 0x%llx/%u\n", i, buf->ubuf, len);
Jens Axboe87ce9552020-01-30 08:25:34 -070010090 }
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +000010091 if (has_lock && !xa_empty(&ctx->personalities)) {
10092 unsigned long index;
10093 const struct cred *cred;
10094
Jens Axboe87ce9552020-01-30 08:25:34 -070010095 seq_printf(m, "Personalities:\n");
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +000010096 xa_for_each(&ctx->personalities, index, cred)
10097 io_uring_show_cred(m, index, cred);
Jens Axboe87ce9552020-01-30 08:25:34 -070010098 }
Jens Axboed7718a92020-02-14 22:23:12 -070010099 seq_printf(m, "PollList:\n");
Jens Axboe79ebeae2021-08-10 15:18:27 -060010100 spin_lock(&ctx->completion_lock);
Jens Axboed7718a92020-02-14 22:23:12 -070010101 for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
10102 struct hlist_head *list = &ctx->cancel_hash[i];
10103 struct io_kiocb *req;
10104
10105 hlist_for_each_entry(req, list, hash_node)
10106 seq_printf(m, " op=%d, task_works=%d\n", req->opcode,
10107 req->task->task_works != NULL);
10108 }
Jens Axboe79ebeae2021-08-10 15:18:27 -060010109 spin_unlock(&ctx->completion_lock);
Jens Axboefad8e0d2020-09-28 08:57:48 -060010110 if (has_lock)
10111 mutex_unlock(&ctx->uring_lock);
Jens Axboe87ce9552020-01-30 08:25:34 -070010112}
10113
10114static void io_uring_show_fdinfo(struct seq_file *m, struct file *f)
10115{
10116 struct io_ring_ctx *ctx = f->private_data;
10117
10118 if (percpu_ref_tryget(&ctx->refs)) {
10119 __io_uring_show_fdinfo(ctx, m);
10120 percpu_ref_put(&ctx->refs);
10121 }
10122}
Tobias Klauserbebdb652020-02-26 18:38:32 +010010123#endif
Jens Axboe87ce9552020-01-30 08:25:34 -070010124
Jens Axboe2b188cc2019-01-07 10:46:33 -070010125static const struct file_operations io_uring_fops = {
10126 .release = io_uring_release,
10127 .mmap = io_uring_mmap,
Roman Penyaev6c5c2402019-11-28 12:53:22 +010010128#ifndef CONFIG_MMU
10129 .get_unmapped_area = io_uring_nommu_get_unmapped_area,
10130 .mmap_capabilities = io_uring_nommu_mmap_capabilities,
10131#endif
Jens Axboe2b188cc2019-01-07 10:46:33 -070010132 .poll = io_uring_poll,
Tobias Klauserbebdb652020-02-26 18:38:32 +010010133#ifdef CONFIG_PROC_FS
Jens Axboe87ce9552020-01-30 08:25:34 -070010134 .show_fdinfo = io_uring_show_fdinfo,
Tobias Klauserbebdb652020-02-26 18:38:32 +010010135#endif
Jens Axboe2b188cc2019-01-07 10:46:33 -070010136};
10137
10138static int io_allocate_scq_urings(struct io_ring_ctx *ctx,
10139 struct io_uring_params *p)
10140{
Hristo Venev75b28af2019-08-26 17:23:46 +000010141 struct io_rings *rings;
10142 size_t size, sq_array_offset;
Jens Axboe2b188cc2019-01-07 10:46:33 -070010143
Jens Axboebd740482020-08-05 12:58:23 -060010144 /* make sure these are sane, as we already accounted them */
10145 ctx->sq_entries = p->sq_entries;
10146 ctx->cq_entries = p->cq_entries;
10147
Hristo Venev75b28af2019-08-26 17:23:46 +000010148 size = rings_size(p->sq_entries, p->cq_entries, &sq_array_offset);
10149 if (size == SIZE_MAX)
10150 return -EOVERFLOW;
10151
10152 rings = io_mem_alloc(size);
10153 if (!rings)
Jens Axboe2b188cc2019-01-07 10:46:33 -070010154 return -ENOMEM;
10155
Hristo Venev75b28af2019-08-26 17:23:46 +000010156 ctx->rings = rings;
10157 ctx->sq_array = (u32 *)((char *)rings + sq_array_offset);
10158 rings->sq_ring_mask = p->sq_entries - 1;
10159 rings->cq_ring_mask = p->cq_entries - 1;
10160 rings->sq_ring_entries = p->sq_entries;
10161 rings->cq_ring_entries = p->cq_entries;
Jens Axboe2b188cc2019-01-07 10:46:33 -070010162
10163 size = array_size(sizeof(struct io_uring_sqe), p->sq_entries);
Jens Axboeeb065d32019-11-20 09:26:29 -070010164 if (size == SIZE_MAX) {
10165 io_mem_free(ctx->rings);
10166 ctx->rings = NULL;
Jens Axboe2b188cc2019-01-07 10:46:33 -070010167 return -EOVERFLOW;
Jens Axboeeb065d32019-11-20 09:26:29 -070010168 }
Jens Axboe2b188cc2019-01-07 10:46:33 -070010169
10170 ctx->sq_sqes = io_mem_alloc(size);
Jens Axboeeb065d32019-11-20 09:26:29 -070010171 if (!ctx->sq_sqes) {
10172 io_mem_free(ctx->rings);
10173 ctx->rings = NULL;
Jens Axboe2b188cc2019-01-07 10:46:33 -070010174 return -ENOMEM;
Jens Axboeeb065d32019-11-20 09:26:29 -070010175 }
Jens Axboe2b188cc2019-01-07 10:46:33 -070010176
Jens Axboe2b188cc2019-01-07 10:46:33 -070010177 return 0;
10178}
10179
Pavel Begunkov9faadcc2020-12-21 18:34:05 +000010180static int io_uring_install_fd(struct io_ring_ctx *ctx, struct file *file)
10181{
10182 int ret, fd;
10183
10184 fd = get_unused_fd_flags(O_RDWR | O_CLOEXEC);
10185 if (fd < 0)
10186 return fd;
10187
Pavel Begunkoveef51da2021-06-14 02:36:15 +010010188 ret = io_uring_add_tctx_node(ctx);
Pavel Begunkov9faadcc2020-12-21 18:34:05 +000010189 if (ret) {
10190 put_unused_fd(fd);
10191 return ret;
10192 }
10193 fd_install(fd, file);
10194 return fd;
10195}
10196
Jens Axboe2b188cc2019-01-07 10:46:33 -070010197/*
10198 * Allocate an anonymous fd, this is what constitutes the application
10199 * visible backing of an io_uring instance. The application mmaps this
10200 * fd to gain access to the SQ/CQ ring details. If UNIX sockets are enabled,
10201 * we have to tie this fd to a socket for file garbage collection purposes.
10202 */
Pavel Begunkov9faadcc2020-12-21 18:34:05 +000010203static struct file *io_uring_get_file(struct io_ring_ctx *ctx)
Jens Axboe2b188cc2019-01-07 10:46:33 -070010204{
10205 struct file *file;
Jens Axboe2b188cc2019-01-07 10:46:33 -070010206#if defined(CONFIG_UNIX)
Pavel Begunkov9faadcc2020-12-21 18:34:05 +000010207 int ret;
10208
Jens Axboe2b188cc2019-01-07 10:46:33 -070010209 ret = sock_create_kern(&init_net, PF_UNIX, SOCK_RAW, IPPROTO_IP,
10210 &ctx->ring_sock);
10211 if (ret)
Pavel Begunkov9faadcc2020-12-21 18:34:05 +000010212 return ERR_PTR(ret);
Jens Axboe2b188cc2019-01-07 10:46:33 -070010213#endif
10214
Jens Axboe2b188cc2019-01-07 10:46:33 -070010215 file = anon_inode_getfile("[io_uring]", &io_uring_fops, ctx,
10216 O_RDWR | O_CLOEXEC);
Pavel Begunkov9faadcc2020-12-21 18:34:05 +000010217#if defined(CONFIG_UNIX)
Jens Axboe2b188cc2019-01-07 10:46:33 -070010218 if (IS_ERR(file)) {
Pavel Begunkov9faadcc2020-12-21 18:34:05 +000010219 sock_release(ctx->ring_sock);
10220 ctx->ring_sock = NULL;
10221 } else {
10222 ctx->ring_sock->file = file;
Jens Axboe2b188cc2019-01-07 10:46:33 -070010223 }
Jens Axboe2b188cc2019-01-07 10:46:33 -070010224#endif
Pavel Begunkov9faadcc2020-12-21 18:34:05 +000010225 return file;
Jens Axboe2b188cc2019-01-07 10:46:33 -070010226}
10227
Xiaoguang Wang7f136572020-05-05 16:28:53 +080010228static int io_uring_create(unsigned entries, struct io_uring_params *p,
10229 struct io_uring_params __user *params)
Jens Axboe2b188cc2019-01-07 10:46:33 -070010230{
Jens Axboe2b188cc2019-01-07 10:46:33 -070010231 struct io_ring_ctx *ctx;
Pavel Begunkov9faadcc2020-12-21 18:34:05 +000010232 struct file *file;
Jens Axboe2b188cc2019-01-07 10:46:33 -070010233 int ret;
10234
Jens Axboe8110c1a2019-12-28 15:39:54 -070010235 if (!entries)
Jens Axboe2b188cc2019-01-07 10:46:33 -070010236 return -EINVAL;
Jens Axboe8110c1a2019-12-28 15:39:54 -070010237 if (entries > IORING_MAX_ENTRIES) {
10238 if (!(p->flags & IORING_SETUP_CLAMP))
10239 return -EINVAL;
10240 entries = IORING_MAX_ENTRIES;
10241 }
Jens Axboe2b188cc2019-01-07 10:46:33 -070010242
10243 /*
10244 * Use twice as many entries for the CQ ring. It's possible for the
10245 * application to drive a higher depth than the size of the SQ ring,
10246 * since the sqes are only used at submission time. This allows for
Jens Axboe33a107f2019-10-04 12:10:03 -060010247 * some flexibility in overcommitting a bit. If the application has
10248 * set IORING_SETUP_CQSIZE, it will have passed in the desired number
10249 * of CQ ring entries manually.
Jens Axboe2b188cc2019-01-07 10:46:33 -070010250 */
10251 p->sq_entries = roundup_pow_of_two(entries);
Jens Axboe33a107f2019-10-04 12:10:03 -060010252 if (p->flags & IORING_SETUP_CQSIZE) {
10253 /*
10254 * If IORING_SETUP_CQSIZE is set, we do the same roundup
10255 * to a power-of-two, if it isn't already. We do NOT impose
10256 * any cq vs sq ring sizing.
10257 */
Joseph Qieb2667b32020-11-24 15:03:03 +080010258 if (!p->cq_entries)
Jens Axboe33a107f2019-10-04 12:10:03 -060010259 return -EINVAL;
Jens Axboe8110c1a2019-12-28 15:39:54 -070010260 if (p->cq_entries > IORING_MAX_CQ_ENTRIES) {
10261 if (!(p->flags & IORING_SETUP_CLAMP))
10262 return -EINVAL;
10263 p->cq_entries = IORING_MAX_CQ_ENTRIES;
10264 }
Joseph Qieb2667b32020-11-24 15:03:03 +080010265 p->cq_entries = roundup_pow_of_two(p->cq_entries);
10266 if (p->cq_entries < p->sq_entries)
10267 return -EINVAL;
Jens Axboe33a107f2019-10-04 12:10:03 -060010268 } else {
10269 p->cq_entries = 2 * p->sq_entries;
10270 }
Jens Axboe2b188cc2019-01-07 10:46:33 -070010271
Jens Axboe2b188cc2019-01-07 10:46:33 -070010272 ctx = io_ring_ctx_alloc(p);
Jens Axboe62e398b2021-02-21 16:19:37 -070010273 if (!ctx)
Jens Axboe2b188cc2019-01-07 10:46:33 -070010274 return -ENOMEM;
Jens Axboe2b188cc2019-01-07 10:46:33 -070010275 ctx->compat = in_compat_syscall();
Jens Axboe62e398b2021-02-21 16:19:37 -070010276 if (!capable(CAP_IPC_LOCK))
10277 ctx->user = get_uid(current_user());
Jens Axboe2aede0e2020-09-14 10:45:53 -060010278
10279 /*
10280 * This is just grabbed for accounting purposes. When a process exits,
10281 * the mm is exited and dropped before the files, hence we need to hang
10282 * on to this mm purely for the purposes of being able to unaccount
10283 * memory (locked/pinned vm). It's not used for anything else.
10284 */
Jens Axboe6b7898e2020-08-25 07:58:00 -060010285 mmgrab(current->mm);
Jens Axboe2aede0e2020-09-14 10:45:53 -060010286 ctx->mm_account = current->mm;
Jens Axboe6b7898e2020-08-25 07:58:00 -060010287
Jens Axboe2b188cc2019-01-07 10:46:33 -070010288 ret = io_allocate_scq_urings(ctx, p);
10289 if (ret)
10290 goto err;
10291
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +020010292 ret = io_sq_offload_create(ctx, p);
Jens Axboe2b188cc2019-01-07 10:46:33 -070010293 if (ret)
10294 goto err;
Pavel Begunkoveae071c2021-04-25 14:32:24 +010010295 /* always set a rsrc node */
Pavel Begunkov47b228c2021-04-29 11:46:48 +010010296 ret = io_rsrc_node_switch_start(ctx);
10297 if (ret)
10298 goto err;
Pavel Begunkoveae071c2021-04-25 14:32:24 +010010299 io_rsrc_node_switch(ctx, NULL);
Jens Axboe2b188cc2019-01-07 10:46:33 -070010300
Jens Axboe2b188cc2019-01-07 10:46:33 -070010301 memset(&p->sq_off, 0, sizeof(p->sq_off));
Hristo Venev75b28af2019-08-26 17:23:46 +000010302 p->sq_off.head = offsetof(struct io_rings, sq.head);
10303 p->sq_off.tail = offsetof(struct io_rings, sq.tail);
10304 p->sq_off.ring_mask = offsetof(struct io_rings, sq_ring_mask);
10305 p->sq_off.ring_entries = offsetof(struct io_rings, sq_ring_entries);
10306 p->sq_off.flags = offsetof(struct io_rings, sq_flags);
10307 p->sq_off.dropped = offsetof(struct io_rings, sq_dropped);
10308 p->sq_off.array = (char *)ctx->sq_array - (char *)ctx->rings;
Jens Axboe2b188cc2019-01-07 10:46:33 -070010309
10310 memset(&p->cq_off, 0, sizeof(p->cq_off));
Hristo Venev75b28af2019-08-26 17:23:46 +000010311 p->cq_off.head = offsetof(struct io_rings, cq.head);
10312 p->cq_off.tail = offsetof(struct io_rings, cq.tail);
10313 p->cq_off.ring_mask = offsetof(struct io_rings, cq_ring_mask);
10314 p->cq_off.ring_entries = offsetof(struct io_rings, cq_ring_entries);
10315 p->cq_off.overflow = offsetof(struct io_rings, cq_overflow);
10316 p->cq_off.cqes = offsetof(struct io_rings, cqes);
Stefano Garzarella0d9b5b32020-05-15 18:38:04 +020010317 p->cq_off.flags = offsetof(struct io_rings, cq_flags);
Jens Axboeac90f242019-09-06 10:26:21 -060010318
Xiaoguang Wang7f136572020-05-05 16:28:53 +080010319 p->features = IORING_FEAT_SINGLE_MMAP | IORING_FEAT_NODROP |
10320 IORING_FEAT_SUBMIT_STABLE | IORING_FEAT_RW_CUR_POS |
Jiufei Xue5769a352020-06-17 17:53:55 +080010321 IORING_FEAT_CUR_PERSONALITY | IORING_FEAT_FAST_POLL |
Hao Xuc73ebb62020-11-03 10:54:37 +080010322 IORING_FEAT_POLL_32BITS | IORING_FEAT_SQPOLL_NONFIXED |
Pavel Begunkov96905572021-06-10 16:37:38 +010010323 IORING_FEAT_EXT_ARG | IORING_FEAT_NATIVE_WORKERS |
10324 IORING_FEAT_RSRC_TAGS;
Xiaoguang Wang7f136572020-05-05 16:28:53 +080010325
10326 if (copy_to_user(params, p, sizeof(*p))) {
10327 ret = -EFAULT;
10328 goto err;
10329 }
Jens Axboed1719f72020-07-30 13:43:53 -060010330
Pavel Begunkov9faadcc2020-12-21 18:34:05 +000010331 file = io_uring_get_file(ctx);
10332 if (IS_ERR(file)) {
10333 ret = PTR_ERR(file);
10334 goto err;
10335 }
10336
Jens Axboed1719f72020-07-30 13:43:53 -060010337 /*
Jens Axboe044c1ab2019-10-28 09:15:33 -060010338 * Install ring fd as the very last thing, so we don't risk someone
10339 * having closed it before we finish setup
10340 */
Pavel Begunkov9faadcc2020-12-21 18:34:05 +000010341 ret = io_uring_install_fd(ctx, file);
10342 if (ret < 0) {
10343 /* fput will clean it up */
10344 fput(file);
10345 return ret;
10346 }
Jens Axboe044c1ab2019-10-28 09:15:33 -060010347
Dmitrii Dolgovc826bd72019-10-15 19:02:01 +020010348 trace_io_uring_create(ret, ctx, p->sq_entries, p->cq_entries, p->flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -070010349 return ret;
10350err:
10351 io_ring_ctx_wait_and_kill(ctx);
10352 return ret;
10353}
10354
10355/*
10356 * Sets up an aio uring context, and returns the fd. Applications asks for a
10357 * ring size, we return the actual sq/cq ring sizes (among other things) in the
10358 * params structure passed in.
10359 */
10360static long io_uring_setup(u32 entries, struct io_uring_params __user *params)
10361{
10362 struct io_uring_params p;
Jens Axboe2b188cc2019-01-07 10:46:33 -070010363 int i;
10364
10365 if (copy_from_user(&p, params, sizeof(p)))
10366 return -EFAULT;
10367 for (i = 0; i < ARRAY_SIZE(p.resv); i++) {
10368 if (p.resv[i])
10369 return -EINVAL;
10370 }
10371
Jens Axboe6c271ce2019-01-10 11:22:30 -070010372 if (p.flags & ~(IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL |
Jens Axboe8110c1a2019-12-28 15:39:54 -070010373 IORING_SETUP_SQ_AFF | IORING_SETUP_CQSIZE |
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +020010374 IORING_SETUP_CLAMP | IORING_SETUP_ATTACH_WQ |
10375 IORING_SETUP_R_DISABLED))
Jens Axboe2b188cc2019-01-07 10:46:33 -070010376 return -EINVAL;
10377
Xiaoguang Wang7f136572020-05-05 16:28:53 +080010378 return io_uring_create(entries, &p, params);
Jens Axboe2b188cc2019-01-07 10:46:33 -070010379}
10380
10381SYSCALL_DEFINE2(io_uring_setup, u32, entries,
10382 struct io_uring_params __user *, params)
10383{
10384 return io_uring_setup(entries, params);
10385}
10386
Jens Axboe66f4af92020-01-16 15:36:52 -070010387static int io_probe(struct io_ring_ctx *ctx, void __user *arg, unsigned nr_args)
10388{
10389 struct io_uring_probe *p;
10390 size_t size;
10391 int i, ret;
10392
10393 size = struct_size(p, ops, nr_args);
10394 if (size == SIZE_MAX)
10395 return -EOVERFLOW;
10396 p = kzalloc(size, GFP_KERNEL);
10397 if (!p)
10398 return -ENOMEM;
10399
10400 ret = -EFAULT;
10401 if (copy_from_user(p, arg, size))
10402 goto out;
10403 ret = -EINVAL;
10404 if (memchr_inv(p, 0, size))
10405 goto out;
10406
10407 p->last_op = IORING_OP_LAST - 1;
10408 if (nr_args > IORING_OP_LAST)
10409 nr_args = IORING_OP_LAST;
10410
10411 for (i = 0; i < nr_args; i++) {
10412 p->ops[i].op = i;
10413 if (!io_op_defs[i].not_supported)
10414 p->ops[i].flags = IO_URING_OP_SUPPORTED;
10415 }
10416 p->ops_len = i;
10417
10418 ret = 0;
10419 if (copy_to_user(arg, p, size))
10420 ret = -EFAULT;
10421out:
10422 kfree(p);
10423 return ret;
10424}
10425
Jens Axboe071698e2020-01-28 10:04:42 -070010426static int io_register_personality(struct io_ring_ctx *ctx)
10427{
Jens Axboe4379bf82021-02-15 13:40:22 -070010428 const struct cred *creds;
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +000010429 u32 id;
Jens Axboe1e6fa522020-10-15 08:46:24 -060010430 int ret;
Jens Axboe071698e2020-01-28 10:04:42 -070010431
Jens Axboe4379bf82021-02-15 13:40:22 -070010432 creds = get_current_cred();
Jens Axboe1e6fa522020-10-15 08:46:24 -060010433
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +000010434 ret = xa_alloc_cyclic(&ctx->personalities, &id, (void *)creds,
10435 XA_LIMIT(0, USHRT_MAX), &ctx->pers_next, GFP_KERNEL);
Jens Axboea30f8952021-08-20 14:53:59 -060010436 if (ret < 0) {
10437 put_cred(creds);
10438 return ret;
10439 }
10440 return id;
Jens Axboe071698e2020-01-28 10:04:42 -070010441}
10442
Stefano Garzarella21b55db2020-08-27 16:58:30 +020010443static int io_register_restrictions(struct io_ring_ctx *ctx, void __user *arg,
10444 unsigned int nr_args)
10445{
10446 struct io_uring_restriction *res;
10447 size_t size;
10448 int i, ret;
10449
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +020010450 /* Restrictions allowed only if rings started disabled */
10451 if (!(ctx->flags & IORING_SETUP_R_DISABLED))
10452 return -EBADFD;
10453
Stefano Garzarella21b55db2020-08-27 16:58:30 +020010454 /* We allow only a single restrictions registration */
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +020010455 if (ctx->restrictions.registered)
Stefano Garzarella21b55db2020-08-27 16:58:30 +020010456 return -EBUSY;
10457
10458 if (!arg || nr_args > IORING_MAX_RESTRICTIONS)
10459 return -EINVAL;
10460
10461 size = array_size(nr_args, sizeof(*res));
10462 if (size == SIZE_MAX)
10463 return -EOVERFLOW;
10464
10465 res = memdup_user(arg, size);
10466 if (IS_ERR(res))
10467 return PTR_ERR(res);
10468
10469 ret = 0;
10470
10471 for (i = 0; i < nr_args; i++) {
10472 switch (res[i].opcode) {
10473 case IORING_RESTRICTION_REGISTER_OP:
10474 if (res[i].register_op >= IORING_REGISTER_LAST) {
10475 ret = -EINVAL;
10476 goto out;
10477 }
10478
10479 __set_bit(res[i].register_op,
10480 ctx->restrictions.register_op);
10481 break;
10482 case IORING_RESTRICTION_SQE_OP:
10483 if (res[i].sqe_op >= IORING_OP_LAST) {
10484 ret = -EINVAL;
10485 goto out;
10486 }
10487
10488 __set_bit(res[i].sqe_op, ctx->restrictions.sqe_op);
10489 break;
10490 case IORING_RESTRICTION_SQE_FLAGS_ALLOWED:
10491 ctx->restrictions.sqe_flags_allowed = res[i].sqe_flags;
10492 break;
10493 case IORING_RESTRICTION_SQE_FLAGS_REQUIRED:
10494 ctx->restrictions.sqe_flags_required = res[i].sqe_flags;
10495 break;
10496 default:
10497 ret = -EINVAL;
10498 goto out;
10499 }
10500 }
10501
10502out:
10503 /* Reset all restrictions if an error happened */
10504 if (ret != 0)
10505 memset(&ctx->restrictions, 0, sizeof(ctx->restrictions));
10506 else
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +020010507 ctx->restrictions.registered = true;
Stefano Garzarella21b55db2020-08-27 16:58:30 +020010508
10509 kfree(res);
10510 return ret;
10511}
10512
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +020010513static int io_register_enable_rings(struct io_ring_ctx *ctx)
10514{
10515 if (!(ctx->flags & IORING_SETUP_R_DISABLED))
10516 return -EBADFD;
10517
10518 if (ctx->restrictions.registered)
10519 ctx->restricted = 1;
10520
Pavel Begunkov0298ef92021-03-08 13:20:57 +000010521 ctx->flags &= ~IORING_SETUP_R_DISABLED;
10522 if (ctx->sq_data && wq_has_sleeper(&ctx->sq_data->wait))
10523 wake_up(&ctx->sq_data->wait);
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +020010524 return 0;
10525}
10526
Pavel Begunkovfdecb662021-04-25 14:32:20 +010010527static int __io_register_rsrc_update(struct io_ring_ctx *ctx, unsigned type,
Pavel Begunkovc3bdad02021-04-25 14:32:22 +010010528 struct io_uring_rsrc_update2 *up,
Pavel Begunkov98f0b3b2021-04-25 14:32:19 +010010529 unsigned nr_args)
10530{
10531 __u32 tmp;
10532 int err;
10533
10534 if (check_add_overflow(up->offset, nr_args, &tmp))
10535 return -EOVERFLOW;
10536 err = io_rsrc_node_switch_start(ctx);
10537 if (err)
10538 return err;
10539
Pavel Begunkovfdecb662021-04-25 14:32:20 +010010540 switch (type) {
10541 case IORING_RSRC_FILE:
Pavel Begunkov98f0b3b2021-04-25 14:32:19 +010010542 return __io_sqe_files_update(ctx, up, nr_args);
Pavel Begunkov634d00d2021-04-25 14:32:26 +010010543 case IORING_RSRC_BUFFER:
10544 return __io_sqe_buffers_update(ctx, up, nr_args);
Pavel Begunkov98f0b3b2021-04-25 14:32:19 +010010545 }
10546 return -EINVAL;
10547}
10548
Pavel Begunkovc3bdad02021-04-25 14:32:22 +010010549static int io_register_files_update(struct io_ring_ctx *ctx, void __user *arg,
10550 unsigned nr_args)
Pavel Begunkov98f0b3b2021-04-25 14:32:19 +010010551{
Pavel Begunkovc3bdad02021-04-25 14:32:22 +010010552 struct io_uring_rsrc_update2 up;
Pavel Begunkov98f0b3b2021-04-25 14:32:19 +010010553
10554 if (!nr_args)
10555 return -EINVAL;
Pavel Begunkovc3bdad02021-04-25 14:32:22 +010010556 memset(&up, 0, sizeof(up));
10557 if (copy_from_user(&up, arg, sizeof(struct io_uring_rsrc_update)))
10558 return -EFAULT;
Dylan Yudaken7a7c9f92022-04-12 09:30:40 -070010559 if (up.resv || up.resv2)
Dylan Yudaken22aa1592022-04-12 09:30:39 -070010560 return -EINVAL;
Pavel Begunkovc3bdad02021-04-25 14:32:22 +010010561 return __io_register_rsrc_update(ctx, IORING_RSRC_FILE, &up, nr_args);
10562}
10563
10564static int io_register_rsrc_update(struct io_ring_ctx *ctx, void __user *arg,
Pavel Begunkov992da012021-06-10 16:37:37 +010010565 unsigned size, unsigned type)
Pavel Begunkovc3bdad02021-04-25 14:32:22 +010010566{
10567 struct io_uring_rsrc_update2 up;
10568
10569 if (size != sizeof(up))
10570 return -EINVAL;
Pavel Begunkov98f0b3b2021-04-25 14:32:19 +010010571 if (copy_from_user(&up, arg, sizeof(up)))
10572 return -EFAULT;
Dylan Yudaken7a7c9f92022-04-12 09:30:40 -070010573 if (!up.nr || up.resv || up.resv2)
Pavel Begunkov98f0b3b2021-04-25 14:32:19 +010010574 return -EINVAL;
Pavel Begunkov992da012021-06-10 16:37:37 +010010575 return __io_register_rsrc_update(ctx, type, &up, up.nr);
Pavel Begunkov98f0b3b2021-04-25 14:32:19 +010010576}
10577
Pavel Begunkov792e3582021-04-25 14:32:21 +010010578static int io_register_rsrc(struct io_ring_ctx *ctx, void __user *arg,
Pavel Begunkov992da012021-06-10 16:37:37 +010010579 unsigned int size, unsigned int type)
Pavel Begunkov792e3582021-04-25 14:32:21 +010010580{
10581 struct io_uring_rsrc_register rr;
10582
10583 /* keep it extendible */
10584 if (size != sizeof(rr))
10585 return -EINVAL;
10586
10587 memset(&rr, 0, sizeof(rr));
10588 if (copy_from_user(&rr, arg, size))
10589 return -EFAULT;
Pavel Begunkov992da012021-06-10 16:37:37 +010010590 if (!rr.nr || rr.resv || rr.resv2)
Pavel Begunkov792e3582021-04-25 14:32:21 +010010591 return -EINVAL;
10592
Pavel Begunkov992da012021-06-10 16:37:37 +010010593 switch (type) {
Pavel Begunkov792e3582021-04-25 14:32:21 +010010594 case IORING_RSRC_FILE:
10595 return io_sqe_files_register(ctx, u64_to_user_ptr(rr.data),
10596 rr.nr, u64_to_user_ptr(rr.tags));
Pavel Begunkov634d00d2021-04-25 14:32:26 +010010597 case IORING_RSRC_BUFFER:
10598 return io_sqe_buffers_register(ctx, u64_to_user_ptr(rr.data),
10599 rr.nr, u64_to_user_ptr(rr.tags));
Pavel Begunkov792e3582021-04-25 14:32:21 +010010600 }
10601 return -EINVAL;
10602}
10603
Jens Axboefe764212021-06-17 10:19:54 -060010604static int io_register_iowq_aff(struct io_ring_ctx *ctx, void __user *arg,
10605 unsigned len)
10606{
10607 struct io_uring_task *tctx = current->io_uring;
10608 cpumask_var_t new_mask;
10609 int ret;
10610
10611 if (!tctx || !tctx->io_wq)
10612 return -EINVAL;
10613
10614 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
10615 return -ENOMEM;
10616
10617 cpumask_clear(new_mask);
10618 if (len > cpumask_size())
10619 len = cpumask_size();
10620
Eugene Syromiatnikovfe223dd2022-04-06 13:55:33 +020010621 if (in_compat_syscall()) {
10622 ret = compat_get_bitmap(cpumask_bits(new_mask),
10623 (const compat_ulong_t __user *)arg,
10624 len * 8 /* CHAR_BIT */);
10625 } else {
10626 ret = copy_from_user(new_mask, arg, len);
10627 }
10628
10629 if (ret) {
Jens Axboefe764212021-06-17 10:19:54 -060010630 free_cpumask_var(new_mask);
10631 return -EFAULT;
10632 }
10633
10634 ret = io_wq_cpu_affinity(tctx->io_wq, new_mask);
10635 free_cpumask_var(new_mask);
10636 return ret;
10637}
10638
10639static int io_unregister_iowq_aff(struct io_ring_ctx *ctx)
10640{
10641 struct io_uring_task *tctx = current->io_uring;
10642
10643 if (!tctx || !tctx->io_wq)
10644 return -EINVAL;
10645
10646 return io_wq_cpu_affinity(tctx->io_wq, NULL);
10647}
10648
Jens Axboe2e480052021-08-27 11:33:19 -060010649static int io_register_iowq_max_workers(struct io_ring_ctx *ctx,
10650 void __user *arg)
Pavel Begunkovb22fa622021-10-21 13:20:29 +010010651 __must_hold(&ctx->uring_lock)
Jens Axboe2e480052021-08-27 11:33:19 -060010652{
Pavel Begunkovb22fa622021-10-21 13:20:29 +010010653 struct io_tctx_node *node;
Jens Axboefa846932021-09-01 14:15:59 -060010654 struct io_uring_task *tctx = NULL;
10655 struct io_sq_data *sqd = NULL;
Jens Axboe2e480052021-08-27 11:33:19 -060010656 __u32 new_count[2];
10657 int i, ret;
10658
Jens Axboe2e480052021-08-27 11:33:19 -060010659 if (copy_from_user(new_count, arg, sizeof(new_count)))
10660 return -EFAULT;
10661 for (i = 0; i < ARRAY_SIZE(new_count); i++)
10662 if (new_count[i] > INT_MAX)
10663 return -EINVAL;
10664
Jens Axboefa846932021-09-01 14:15:59 -060010665 if (ctx->flags & IORING_SETUP_SQPOLL) {
10666 sqd = ctx->sq_data;
10667 if (sqd) {
Jens Axboe009ad9f2021-09-08 19:07:26 -060010668 /*
10669 * Observe the correct sqd->lock -> ctx->uring_lock
10670 * ordering. Fine to drop uring_lock here, we hold
10671 * a ref to the ctx.
10672 */
Jens Axboe41d3a6b2021-09-13 13:08:51 -060010673 refcount_inc(&sqd->refs);
Jens Axboe009ad9f2021-09-08 19:07:26 -060010674 mutex_unlock(&ctx->uring_lock);
Jens Axboefa846932021-09-01 14:15:59 -060010675 mutex_lock(&sqd->lock);
Jens Axboe009ad9f2021-09-08 19:07:26 -060010676 mutex_lock(&ctx->uring_lock);
Jens Axboe41d3a6b2021-09-13 13:08:51 -060010677 if (sqd->thread)
10678 tctx = sqd->thread->io_uring;
Jens Axboefa846932021-09-01 14:15:59 -060010679 }
10680 } else {
10681 tctx = current->io_uring;
10682 }
10683
Pavel Begunkove139a1e2021-10-19 23:43:46 +010010684 BUILD_BUG_ON(sizeof(new_count) != sizeof(ctx->iowq_limits));
Jens Axboefa846932021-09-01 14:15:59 -060010685
Pavel Begunkov4cac4872021-11-08 15:10:03 +000010686 for (i = 0; i < ARRAY_SIZE(new_count); i++)
10687 if (new_count[i])
10688 ctx->iowq_limits[i] = new_count[i];
Pavel Begunkove139a1e2021-10-19 23:43:46 +010010689 ctx->iowq_limits_set = true;
10690
10691 ret = -EINVAL;
10692 if (tctx && tctx->io_wq) {
10693 ret = io_wq_max_workers(tctx->io_wq, new_count);
10694 if (ret)
10695 goto err;
10696 } else {
10697 memset(new_count, 0, sizeof(new_count));
10698 }
Jens Axboefa846932021-09-01 14:15:59 -060010699
Jens Axboe41d3a6b2021-09-13 13:08:51 -060010700 if (sqd) {
Jens Axboefa846932021-09-01 14:15:59 -060010701 mutex_unlock(&sqd->lock);
Jens Axboe41d3a6b2021-09-13 13:08:51 -060010702 io_put_sq_data(sqd);
10703 }
Jens Axboe2e480052021-08-27 11:33:19 -060010704
10705 if (copy_to_user(arg, new_count, sizeof(new_count)))
10706 return -EFAULT;
10707
Pavel Begunkovb22fa622021-10-21 13:20:29 +010010708 /* that's it for SQPOLL, only the SQPOLL task creates requests */
10709 if (sqd)
10710 return 0;
10711
10712 /* now propagate the restriction to all registered users */
10713 list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
10714 struct io_uring_task *tctx = node->task->io_uring;
10715
10716 if (WARN_ON_ONCE(!tctx->io_wq))
10717 continue;
10718
10719 for (i = 0; i < ARRAY_SIZE(new_count); i++)
10720 new_count[i] = ctx->iowq_limits[i];
10721 /* ignore errors, it always returns zero anyway */
10722 (void)io_wq_max_workers(tctx->io_wq, new_count);
10723 }
Jens Axboe2e480052021-08-27 11:33:19 -060010724 return 0;
Jens Axboefa846932021-09-01 14:15:59 -060010725err:
Jens Axboe41d3a6b2021-09-13 13:08:51 -060010726 if (sqd) {
Jens Axboefa846932021-09-01 14:15:59 -060010727 mutex_unlock(&sqd->lock);
Jens Axboe41d3a6b2021-09-13 13:08:51 -060010728 io_put_sq_data(sqd);
10729 }
Jens Axboefa846932021-09-01 14:15:59 -060010730 return ret;
Jens Axboe2e480052021-08-27 11:33:19 -060010731}
10732
Jens Axboe071698e2020-01-28 10:04:42 -070010733static bool io_register_op_must_quiesce(int op)
10734{
10735 switch (op) {
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +010010736 case IORING_REGISTER_BUFFERS:
10737 case IORING_UNREGISTER_BUFFERS:
Pavel Begunkovf4f7d212021-04-01 15:44:02 +010010738 case IORING_REGISTER_FILES:
Jens Axboe071698e2020-01-28 10:04:42 -070010739 case IORING_UNREGISTER_FILES:
10740 case IORING_REGISTER_FILES_UPDATE:
10741 case IORING_REGISTER_PROBE:
10742 case IORING_REGISTER_PERSONALITY:
10743 case IORING_UNREGISTER_PERSONALITY:
Pavel Begunkov992da012021-06-10 16:37:37 +010010744 case IORING_REGISTER_FILES2:
10745 case IORING_REGISTER_FILES_UPDATE2:
10746 case IORING_REGISTER_BUFFERS2:
10747 case IORING_REGISTER_BUFFERS_UPDATE:
Jens Axboefe764212021-06-17 10:19:54 -060010748 case IORING_REGISTER_IOWQ_AFF:
10749 case IORING_UNREGISTER_IOWQ_AFF:
Jens Axboe2e480052021-08-27 11:33:19 -060010750 case IORING_REGISTER_IOWQ_MAX_WORKERS:
Jens Axboe071698e2020-01-28 10:04:42 -070010751 return false;
10752 default:
10753 return true;
10754 }
10755}
10756
Pavel Begunkove73c5c72021-08-09 13:04:12 +010010757static int io_ctx_quiesce(struct io_ring_ctx *ctx)
10758{
10759 long ret;
10760
10761 percpu_ref_kill(&ctx->refs);
10762
10763 /*
10764 * Drop uring mutex before waiting for references to exit. If another
10765 * thread is currently inside io_uring_enter() it might need to grab the
10766 * uring_lock to make progress. If we hold it here across the drain
10767 * wait, then we can deadlock. It's safe to drop the mutex here, since
10768 * no new references will come in after we've killed the percpu ref.
10769 */
10770 mutex_unlock(&ctx->uring_lock);
10771 do {
10772 ret = wait_for_completion_interruptible(&ctx->ref_comp);
10773 if (!ret)
10774 break;
10775 ret = io_run_task_work_sig();
10776 } while (ret >= 0);
10777 mutex_lock(&ctx->uring_lock);
10778
10779 if (ret)
10780 io_refs_resurrect(&ctx->refs, &ctx->ref_comp);
10781 return ret;
10782}
10783
Jens Axboeedafcce2019-01-09 09:16:05 -070010784static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
10785 void __user *arg, unsigned nr_args)
Jens Axboeb19062a2019-04-15 10:49:38 -060010786 __releases(ctx->uring_lock)
10787 __acquires(ctx->uring_lock)
Jens Axboeedafcce2019-01-09 09:16:05 -070010788{
10789 int ret;
10790
Jens Axboe35fa71a2019-04-22 10:23:23 -060010791 /*
10792 * We're inside the ring mutex, if the ref is already dying, then
10793 * someone else killed the ctx or is already going through
10794 * io_uring_register().
10795 */
10796 if (percpu_ref_is_dying(&ctx->refs))
10797 return -ENXIO;
10798
Pavel Begunkov75c40212021-04-15 13:07:40 +010010799 if (ctx->restricted) {
10800 if (opcode >= IORING_REGISTER_LAST)
10801 return -EINVAL;
10802 opcode = array_index_nospec(opcode, IORING_REGISTER_LAST);
10803 if (!test_bit(opcode, ctx->restrictions.register_op))
10804 return -EACCES;
10805 }
10806
Jens Axboe071698e2020-01-28 10:04:42 -070010807 if (io_register_op_must_quiesce(opcode)) {
Pavel Begunkove73c5c72021-08-09 13:04:12 +010010808 ret = io_ctx_quiesce(ctx);
10809 if (ret)
Pavel Begunkovf70865d2021-04-11 01:46:40 +010010810 return ret;
Jens Axboe05f3fb32019-12-09 11:22:50 -070010811 }
Jens Axboeedafcce2019-01-09 09:16:05 -070010812
10813 switch (opcode) {
10814 case IORING_REGISTER_BUFFERS:
Pavel Begunkov634d00d2021-04-25 14:32:26 +010010815 ret = io_sqe_buffers_register(ctx, arg, nr_args, NULL);
Jens Axboeedafcce2019-01-09 09:16:05 -070010816 break;
10817 case IORING_UNREGISTER_BUFFERS:
10818 ret = -EINVAL;
10819 if (arg || nr_args)
10820 break;
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -080010821 ret = io_sqe_buffers_unregister(ctx);
Jens Axboeedafcce2019-01-09 09:16:05 -070010822 break;
Jens Axboe6b063142019-01-10 22:13:58 -070010823 case IORING_REGISTER_FILES:
Pavel Begunkov792e3582021-04-25 14:32:21 +010010824 ret = io_sqe_files_register(ctx, arg, nr_args, NULL);
Jens Axboe6b063142019-01-10 22:13:58 -070010825 break;
10826 case IORING_UNREGISTER_FILES:
10827 ret = -EINVAL;
10828 if (arg || nr_args)
10829 break;
10830 ret = io_sqe_files_unregister(ctx);
10831 break;
Jens Axboec3a31e62019-10-03 13:59:56 -060010832 case IORING_REGISTER_FILES_UPDATE:
Pavel Begunkovc3bdad02021-04-25 14:32:22 +010010833 ret = io_register_files_update(ctx, arg, nr_args);
Jens Axboec3a31e62019-10-03 13:59:56 -060010834 break;
Jens Axboe9b402842019-04-11 11:45:41 -060010835 case IORING_REGISTER_EVENTFD:
Jens Axboef2842ab2020-01-08 11:04:00 -070010836 case IORING_REGISTER_EVENTFD_ASYNC:
Jens Axboe9b402842019-04-11 11:45:41 -060010837 ret = -EINVAL;
10838 if (nr_args != 1)
10839 break;
10840 ret = io_eventfd_register(ctx, arg);
Jens Axboef2842ab2020-01-08 11:04:00 -070010841 if (ret)
10842 break;
10843 if (opcode == IORING_REGISTER_EVENTFD_ASYNC)
10844 ctx->eventfd_async = 1;
10845 else
10846 ctx->eventfd_async = 0;
Jens Axboe9b402842019-04-11 11:45:41 -060010847 break;
10848 case IORING_UNREGISTER_EVENTFD:
10849 ret = -EINVAL;
10850 if (arg || nr_args)
10851 break;
10852 ret = io_eventfd_unregister(ctx);
10853 break;
Jens Axboe66f4af92020-01-16 15:36:52 -070010854 case IORING_REGISTER_PROBE:
10855 ret = -EINVAL;
10856 if (!arg || nr_args > 256)
10857 break;
10858 ret = io_probe(ctx, arg, nr_args);
10859 break;
Jens Axboe071698e2020-01-28 10:04:42 -070010860 case IORING_REGISTER_PERSONALITY:
10861 ret = -EINVAL;
10862 if (arg || nr_args)
10863 break;
10864 ret = io_register_personality(ctx);
10865 break;
10866 case IORING_UNREGISTER_PERSONALITY:
10867 ret = -EINVAL;
10868 if (arg)
10869 break;
10870 ret = io_unregister_personality(ctx, nr_args);
10871 break;
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +020010872 case IORING_REGISTER_ENABLE_RINGS:
10873 ret = -EINVAL;
10874 if (arg || nr_args)
10875 break;
10876 ret = io_register_enable_rings(ctx);
10877 break;
Stefano Garzarella21b55db2020-08-27 16:58:30 +020010878 case IORING_REGISTER_RESTRICTIONS:
10879 ret = io_register_restrictions(ctx, arg, nr_args);
10880 break;
Pavel Begunkov992da012021-06-10 16:37:37 +010010881 case IORING_REGISTER_FILES2:
10882 ret = io_register_rsrc(ctx, arg, nr_args, IORING_RSRC_FILE);
Pavel Begunkov792e3582021-04-25 14:32:21 +010010883 break;
Pavel Begunkov992da012021-06-10 16:37:37 +010010884 case IORING_REGISTER_FILES_UPDATE2:
10885 ret = io_register_rsrc_update(ctx, arg, nr_args,
10886 IORING_RSRC_FILE);
10887 break;
10888 case IORING_REGISTER_BUFFERS2:
10889 ret = io_register_rsrc(ctx, arg, nr_args, IORING_RSRC_BUFFER);
10890 break;
10891 case IORING_REGISTER_BUFFERS_UPDATE:
10892 ret = io_register_rsrc_update(ctx, arg, nr_args,
10893 IORING_RSRC_BUFFER);
Pavel Begunkovc3bdad02021-04-25 14:32:22 +010010894 break;
Jens Axboefe764212021-06-17 10:19:54 -060010895 case IORING_REGISTER_IOWQ_AFF:
10896 ret = -EINVAL;
10897 if (!arg || !nr_args)
10898 break;
10899 ret = io_register_iowq_aff(ctx, arg, nr_args);
10900 break;
10901 case IORING_UNREGISTER_IOWQ_AFF:
10902 ret = -EINVAL;
10903 if (arg || nr_args)
10904 break;
10905 ret = io_unregister_iowq_aff(ctx);
10906 break;
Jens Axboe2e480052021-08-27 11:33:19 -060010907 case IORING_REGISTER_IOWQ_MAX_WORKERS:
10908 ret = -EINVAL;
10909 if (!arg || nr_args != 2)
10910 break;
10911 ret = io_register_iowq_max_workers(ctx, arg);
10912 break;
Jens Axboeedafcce2019-01-09 09:16:05 -070010913 default:
10914 ret = -EINVAL;
10915 break;
10916 }
10917
Jens Axboe071698e2020-01-28 10:04:42 -070010918 if (io_register_op_must_quiesce(opcode)) {
Jens Axboe05f3fb32019-12-09 11:22:50 -070010919 /* bring the ctx back to life */
Jens Axboe05f3fb32019-12-09 11:22:50 -070010920 percpu_ref_reinit(&ctx->refs);
Jens Axboe0f158b42020-05-14 17:18:39 -060010921 reinit_completion(&ctx->ref_comp);
Jens Axboe05f3fb32019-12-09 11:22:50 -070010922 }
Jens Axboeedafcce2019-01-09 09:16:05 -070010923 return ret;
10924}
10925
10926SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode,
10927 void __user *, arg, unsigned int, nr_args)
10928{
10929 struct io_ring_ctx *ctx;
10930 long ret = -EBADF;
10931 struct fd f;
10932
10933 f = fdget(fd);
10934 if (!f.file)
10935 return -EBADF;
10936
10937 ret = -EOPNOTSUPP;
10938 if (f.file->f_op != &io_uring_fops)
10939 goto out_fput;
10940
10941 ctx = f.file->private_data;
10942
Pavel Begunkovb6c23dd2021-02-20 15:17:18 +000010943 io_run_task_work();
10944
Jens Axboeedafcce2019-01-09 09:16:05 -070010945 mutex_lock(&ctx->uring_lock);
10946 ret = __io_uring_register(ctx, opcode, arg, nr_args);
10947 mutex_unlock(&ctx->uring_lock);
Dmitrii Dolgovc826bd72019-10-15 19:02:01 +020010948 trace_io_uring_register(ctx, opcode, ctx->nr_user_files, ctx->nr_user_bufs,
10949 ctx->cq_ev_fd != NULL, ret);
Jens Axboeedafcce2019-01-09 09:16:05 -070010950out_fput:
10951 fdput(f);
10952 return ret;
10953}
10954
Jens Axboe2b188cc2019-01-07 10:46:33 -070010955static int __init io_uring_init(void)
10956{
Stefan Metzmacherd7f62e82020-01-29 14:39:41 +010010957#define __BUILD_BUG_VERIFY_ELEMENT(stype, eoffset, etype, ename) do { \
10958 BUILD_BUG_ON(offsetof(stype, ename) != eoffset); \
10959 BUILD_BUG_ON(sizeof(etype) != sizeof_field(stype, ename)); \
10960} while (0)
10961
10962#define BUILD_BUG_SQE_ELEM(eoffset, etype, ename) \
10963 __BUILD_BUG_VERIFY_ELEMENT(struct io_uring_sqe, eoffset, etype, ename)
10964 BUILD_BUG_ON(sizeof(struct io_uring_sqe) != 64);
10965 BUILD_BUG_SQE_ELEM(0, __u8, opcode);
10966 BUILD_BUG_SQE_ELEM(1, __u8, flags);
10967 BUILD_BUG_SQE_ELEM(2, __u16, ioprio);
10968 BUILD_BUG_SQE_ELEM(4, __s32, fd);
10969 BUILD_BUG_SQE_ELEM(8, __u64, off);
10970 BUILD_BUG_SQE_ELEM(8, __u64, addr2);
10971 BUILD_BUG_SQE_ELEM(16, __u64, addr);
Pavel Begunkov7d67af22020-02-24 11:32:45 +030010972 BUILD_BUG_SQE_ELEM(16, __u64, splice_off_in);
Stefan Metzmacherd7f62e82020-01-29 14:39:41 +010010973 BUILD_BUG_SQE_ELEM(24, __u32, len);
10974 BUILD_BUG_SQE_ELEM(28, __kernel_rwf_t, rw_flags);
10975 BUILD_BUG_SQE_ELEM(28, /* compat */ int, rw_flags);
10976 BUILD_BUG_SQE_ELEM(28, /* compat */ __u32, rw_flags);
10977 BUILD_BUG_SQE_ELEM(28, __u32, fsync_flags);
Jiufei Xue5769a352020-06-17 17:53:55 +080010978 BUILD_BUG_SQE_ELEM(28, /* compat */ __u16, poll_events);
10979 BUILD_BUG_SQE_ELEM(28, __u32, poll32_events);
Stefan Metzmacherd7f62e82020-01-29 14:39:41 +010010980 BUILD_BUG_SQE_ELEM(28, __u32, sync_range_flags);
10981 BUILD_BUG_SQE_ELEM(28, __u32, msg_flags);
10982 BUILD_BUG_SQE_ELEM(28, __u32, timeout_flags);
10983 BUILD_BUG_SQE_ELEM(28, __u32, accept_flags);
10984 BUILD_BUG_SQE_ELEM(28, __u32, cancel_flags);
10985 BUILD_BUG_SQE_ELEM(28, __u32, open_flags);
10986 BUILD_BUG_SQE_ELEM(28, __u32, statx_flags);
10987 BUILD_BUG_SQE_ELEM(28, __u32, fadvise_advice);
Pavel Begunkov7d67af22020-02-24 11:32:45 +030010988 BUILD_BUG_SQE_ELEM(28, __u32, splice_flags);
Stefan Metzmacherd7f62e82020-01-29 14:39:41 +010010989 BUILD_BUG_SQE_ELEM(32, __u64, user_data);
10990 BUILD_BUG_SQE_ELEM(40, __u16, buf_index);
Pavel Begunkov16340ea2021-06-24 15:09:58 +010010991 BUILD_BUG_SQE_ELEM(40, __u16, buf_group);
Stefan Metzmacherd7f62e82020-01-29 14:39:41 +010010992 BUILD_BUG_SQE_ELEM(42, __u16, personality);
Pavel Begunkov7d67af22020-02-24 11:32:45 +030010993 BUILD_BUG_SQE_ELEM(44, __s32, splice_fd_in);
Pavel Begunkovb9445592021-08-25 12:25:45 +010010994 BUILD_BUG_SQE_ELEM(44, __u32, file_index);
Stefan Metzmacherd7f62e82020-01-29 14:39:41 +010010995
Pavel Begunkovb0d658ec2021-04-27 16:13:53 +010010996 BUILD_BUG_ON(sizeof(struct io_uring_files_update) !=
10997 sizeof(struct io_uring_rsrc_update));
10998 BUILD_BUG_ON(sizeof(struct io_uring_rsrc_update) >
10999 sizeof(struct io_uring_rsrc_update2));
Pavel Begunkov90499ad2021-08-25 20:51:40 +010011000
11001 /* ->buf_index is u16 */
11002 BUILD_BUG_ON(IORING_MAX_REG_BUFFERS >= (1u << 16));
11003
Pavel Begunkovb0d658ec2021-04-27 16:13:53 +010011004 /* should fit into one byte */
11005 BUILD_BUG_ON(SQE_VALID_FLAGS >= (1 << 8));
11006
Jens Axboed3656342019-12-18 09:50:26 -070011007 BUILD_BUG_ON(ARRAY_SIZE(io_op_defs) != IORING_OP_LAST);
Hao Xu32c2d332021-09-07 11:22:43 +080011008 BUILD_BUG_ON(__REQ_F_LAST_BIT > 8 * sizeof(int));
Pavel Begunkov16340ea2021-06-24 15:09:58 +010011009
Jens Axboe91f245d2021-02-09 13:48:50 -070011010 req_cachep = KMEM_CACHE(io_kiocb, SLAB_HWCACHE_ALIGN | SLAB_PANIC |
11011 SLAB_ACCOUNT);
Jens Axboe2b188cc2019-01-07 10:46:33 -070011012 return 0;
11013};
11014__initcall(io_uring_init);