blob: 2350d43aa7828c09e8002e1b7b6a271825f62d6d [file] [log] [blame]
Jens Axboe2b188cc2019-01-07 10:46:33 -07001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Shared application/kernel submission and completion ring pairs, for
4 * supporting fast/efficient IO.
5 *
6 * A note on the read/write ordering memory barriers that are matched between
Stefan Bühler1e84b972019-04-24 23:54:16 +02007 * the application and kernel side.
8 *
9 * After the application reads the CQ ring tail, it must use an
10 * appropriate smp_rmb() to pair with the smp_wmb() the kernel uses
11 * before writing the tail (using smp_load_acquire to read the tail will
12 * do). It also needs a smp_mb() before updating CQ head (ordering the
13 * entry load(s) with the head store), pairing with an implicit barrier
Pavel Begunkovd068b502021-05-16 22:58:11 +010014 * through a control-dependency in io_get_cqe (smp_store_release to
Stefan Bühler1e84b972019-04-24 23:54:16 +020015 * store head will do). Failure to do so could lead to reading invalid
16 * CQ entries.
17 *
18 * Likewise, the application must use an appropriate smp_wmb() before
19 * writing the SQ tail (ordering SQ entry stores with the tail store),
20 * which pairs with smp_load_acquire in io_get_sqring (smp_store_release
21 * to store the tail will do). And it needs a barrier ordering the SQ
22 * head load before writing new SQ entries (smp_load_acquire to read
23 * head will do).
24 *
25 * When using the SQ poll thread (IORING_SETUP_SQPOLL), the application
26 * needs to check the SQ flags for IORING_SQ_NEED_WAKEUP *after*
27 * updating the SQ tail; a full memory barrier smp_mb() is needed
28 * between.
Jens Axboe2b188cc2019-01-07 10:46:33 -070029 *
30 * Also see the examples in the liburing library:
31 *
32 * git://git.kernel.dk/liburing
33 *
34 * io_uring also uses READ/WRITE_ONCE() for _any_ store or load that happens
35 * from data shared between the kernel and application. This is done both
36 * for ordering purposes, but also to ensure that once a value is loaded from
37 * data that the application could potentially modify, it remains stable.
38 *
39 * Copyright (C) 2018-2019 Jens Axboe
Christoph Hellwigc992fe22019-01-11 09:43:02 -070040 * Copyright (c) 2018-2019 Christoph Hellwig
Jens Axboe2b188cc2019-01-07 10:46:33 -070041 */
42#include <linux/kernel.h>
43#include <linux/init.h>
44#include <linux/errno.h>
45#include <linux/syscalls.h>
46#include <linux/compat.h>
Jens Axboe52de1fe2020-02-27 10:15:42 -070047#include <net/compat.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070048#include <linux/refcount.h>
49#include <linux/uio.h>
Pavel Begunkov6b47ee62020-01-18 20:22:41 +030050#include <linux/bits.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070051
52#include <linux/sched/signal.h>
53#include <linux/fs.h>
54#include <linux/file.h>
55#include <linux/fdtable.h>
56#include <linux/mm.h>
57#include <linux/mman.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070058#include <linux/percpu.h>
59#include <linux/slab.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070060#include <linux/blkdev.h>
Jens Axboeedafcce2019-01-09 09:16:05 -070061#include <linux/bvec.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070062#include <linux/net.h>
63#include <net/sock.h>
64#include <net/af_unix.h>
Jens Axboe6b063142019-01-10 22:13:58 -070065#include <net/scm.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070066#include <linux/anon_inodes.h>
67#include <linux/sched/mm.h>
68#include <linux/uaccess.h>
69#include <linux/nospec.h>
Jens Axboeedafcce2019-01-09 09:16:05 -070070#include <linux/sizes.h>
71#include <linux/hugetlb.h>
Jens Axboeaa4c3962019-11-29 10:14:00 -070072#include <linux/highmem.h>
Jens Axboe15b71ab2019-12-11 11:20:36 -070073#include <linux/namei.h>
74#include <linux/fsnotify.h>
Jens Axboe4840e412019-12-25 22:03:45 -070075#include <linux/fadvise.h>
Jens Axboe3e4827b2020-01-08 15:18:09 -070076#include <linux/eventpoll.h>
Pavel Begunkov7d67af22020-02-24 11:32:45 +030077#include <linux/splice.h>
Jens Axboeb41e9852020-02-17 09:52:41 -070078#include <linux/task_work.h>
Jens Axboebcf5a062020-05-22 09:24:42 -060079#include <linux/pagemap.h>
Jens Axboe0f212202020-09-13 13:09:39 -060080#include <linux/io_uring.h>
Nadav Amitef98eb02021-08-07 17:13:41 -070081#include <linux/tracehook.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070082
Dmitrii Dolgovc826bd72019-10-15 19:02:01 +020083#define CREATE_TRACE_POINTS
84#include <trace/events/io_uring.h>
85
Jens Axboe2b188cc2019-01-07 10:46:33 -070086#include <uapi/linux/io_uring.h>
87
Jens Axboef435c662022-05-23 17:05:03 -060088#include "../fs/internal.h"
Jens Axboe561fb042019-10-24 07:25:42 -060089#include "io-wq.h"
Jens Axboe2b188cc2019-01-07 10:46:33 -070090
Daniel Xu5277dea2019-09-14 14:23:45 -070091#define IORING_MAX_ENTRIES 32768
Jens Axboe33a107f2019-10-04 12:10:03 -060092#define IORING_MAX_CQ_ENTRIES (2 * IORING_MAX_ENTRIES)
Olivier Langlois4ce8ad92021-06-23 11:50:18 -070093#define IORING_SQPOLL_CAP_ENTRIES_VALUE 8
Jens Axboe65e19f52019-10-26 07:20:21 -060094
wangyangbo187f08c2021-08-19 13:56:57 +080095/* only define max */
Pavel Begunkov042b0d82021-08-09 13:04:01 +010096#define IORING_MAX_FIXED_FILES (1U << 15)
Stefano Garzarella21b55db2020-08-27 16:58:30 +020097#define IORING_MAX_RESTRICTIONS (IORING_RESTRICTION_LAST + \
98 IORING_REGISTER_LAST + IORING_OP_LAST)
Jens Axboe2b188cc2019-01-07 10:46:33 -070099
wangyangbo187f08c2021-08-19 13:56:57 +0800100#define IO_RSRC_TAG_TABLE_SHIFT (PAGE_SHIFT - 3)
Pavel Begunkov2d091d62021-06-14 02:36:21 +0100101#define IO_RSRC_TAG_TABLE_MAX (1U << IO_RSRC_TAG_TABLE_SHIFT)
102#define IO_RSRC_TAG_TABLE_MASK (IO_RSRC_TAG_TABLE_MAX - 1)
103
Pavel Begunkov489809e2021-05-14 12:06:44 +0100104#define IORING_MAX_REG_BUFFERS (1U << 14)
105
Pavel Begunkovb16fed662021-02-18 18:29:40 +0000106#define SQE_VALID_FLAGS (IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK| \
107 IOSQE_IO_HARDLINK | IOSQE_ASYNC | \
108 IOSQE_BUFFER_SELECT)
Pavel Begunkovc8543572021-06-17 18:14:04 +0100109#define IO_REQ_CLEAN_FLAGS (REQ_F_BUFFER_SELECTED | REQ_F_NEED_CLEANUP | \
110 REQ_F_POLLED | REQ_F_INFLIGHT | REQ_F_CREDS)
Pavel Begunkovb16fed662021-02-18 18:29:40 +0000111
Pavel Begunkov09899b12021-06-14 02:36:22 +0100112#define IO_TCTX_REFS_CACHE_NR (1U << 10)
113
Jens Axboe2b188cc2019-01-07 10:46:33 -0700114struct io_uring {
115 u32 head ____cacheline_aligned_in_smp;
116 u32 tail ____cacheline_aligned_in_smp;
117};
118
Stefan Bühler1e84b972019-04-24 23:54:16 +0200119/*
Hristo Venev75b28af2019-08-26 17:23:46 +0000120 * This data is shared with the application through the mmap at offsets
121 * IORING_OFF_SQ_RING and IORING_OFF_CQ_RING.
Stefan Bühler1e84b972019-04-24 23:54:16 +0200122 *
123 * The offsets to the member fields are published through struct
124 * io_sqring_offsets when calling io_uring_setup.
125 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000126struct io_rings {
Stefan Bühler1e84b972019-04-24 23:54:16 +0200127 /*
128 * Head and tail offsets into the ring; the offsets need to be
129 * masked to get valid indices.
130 *
Hristo Venev75b28af2019-08-26 17:23:46 +0000131 * The kernel controls head of the sq ring and the tail of the cq ring,
132 * and the application controls tail of the sq ring and the head of the
133 * cq ring.
Stefan Bühler1e84b972019-04-24 23:54:16 +0200134 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000135 struct io_uring sq, cq;
Stefan Bühler1e84b972019-04-24 23:54:16 +0200136 /*
Hristo Venev75b28af2019-08-26 17:23:46 +0000137 * Bitmasks to apply to head and tail offsets (constant, equals
Stefan Bühler1e84b972019-04-24 23:54:16 +0200138 * ring_entries - 1)
139 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000140 u32 sq_ring_mask, cq_ring_mask;
141 /* Ring sizes (constant, power of 2) */
142 u32 sq_ring_entries, cq_ring_entries;
Stefan Bühler1e84b972019-04-24 23:54:16 +0200143 /*
144 * Number of invalid entries dropped by the kernel due to
145 * invalid index stored in array
146 *
147 * Written by the kernel, shouldn't be modified by the
148 * application (i.e. get number of "new events" by comparing to
149 * cached value).
150 *
151 * After a new SQ head value was read by the application this
152 * counter includes all submissions that were dropped reaching
153 * the new SQ head (and possibly more).
154 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000155 u32 sq_dropped;
Stefan Bühler1e84b972019-04-24 23:54:16 +0200156 /*
Stefano Garzarella0d9b5b32020-05-15 18:38:04 +0200157 * Runtime SQ flags
Stefan Bühler1e84b972019-04-24 23:54:16 +0200158 *
159 * Written by the kernel, shouldn't be modified by the
160 * application.
161 *
162 * The application needs a full memory barrier before checking
163 * for IORING_SQ_NEED_WAKEUP after updating the sq tail.
164 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000165 u32 sq_flags;
Stefan Bühler1e84b972019-04-24 23:54:16 +0200166 /*
Stefano Garzarella0d9b5b32020-05-15 18:38:04 +0200167 * Runtime CQ flags
168 *
169 * Written by the application, shouldn't be modified by the
170 * kernel.
171 */
Pavel Begunkovfe7e3252021-06-24 15:09:57 +0100172 u32 cq_flags;
Stefano Garzarella0d9b5b32020-05-15 18:38:04 +0200173 /*
Stefan Bühler1e84b972019-04-24 23:54:16 +0200174 * Number of completion events lost because the queue was full;
175 * this should be avoided by the application by making sure
LimingWu0b4295b2019-12-05 20:18:18 +0800176 * there are not more requests pending than there is space in
Stefan Bühler1e84b972019-04-24 23:54:16 +0200177 * the completion queue.
178 *
179 * Written by the kernel, shouldn't be modified by the
180 * application (i.e. get number of "new events" by comparing to
181 * cached value).
182 *
183 * As completion events come in out of order this counter is not
184 * ordered with any other data.
185 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000186 u32 cq_overflow;
Stefan Bühler1e84b972019-04-24 23:54:16 +0200187 /*
188 * Ring buffer of completion events.
189 *
190 * The kernel writes completion events fresh every time they are
191 * produced, so the application is allowed to modify pending
192 * entries.
193 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000194 struct io_uring_cqe cqes[] ____cacheline_aligned_in_smp;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700195};
196
Pavel Begunkov45d189c2021-02-10 00:03:07 +0000197enum io_uring_cmd_flags {
198 IO_URING_F_NONBLOCK = 1,
Pavel Begunkov889fca72021-02-10 00:03:09 +0000199 IO_URING_F_COMPLETE_DEFER = 2,
Pavel Begunkov45d189c2021-02-10 00:03:07 +0000200};
201
Jens Axboeedafcce2019-01-09 09:16:05 -0700202struct io_mapped_ubuf {
203 u64 ubuf;
Pavel Begunkov4751f532021-04-01 15:43:55 +0100204 u64 ubuf_end;
Jens Axboeedafcce2019-01-09 09:16:05 -0700205 unsigned int nr_bvecs;
Jens Axboede293932020-09-17 16:19:16 -0600206 unsigned long acct_pages;
Pavel Begunkov41edf1a2021-04-25 14:32:23 +0100207 struct bio_vec bvec[];
Jens Axboeedafcce2019-01-09 09:16:05 -0700208};
209
Bijan Mottahedeh50238532021-01-15 17:37:45 +0000210struct io_ring_ctx;
211
Pavel Begunkov6c2450a2021-02-23 12:40:22 +0000212struct io_overflow_cqe {
213 struct io_uring_cqe cqe;
214 struct list_head list;
215};
216
Pavel Begunkova04b0ac2021-04-01 15:44:04 +0100217struct io_fixed_file {
218 /* file * with additional FFS_* flags */
219 unsigned long file_ptr;
220};
221
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +0000222struct io_rsrc_put {
223 struct list_head list;
Pavel Begunkovb60c8dc2021-04-25 14:32:18 +0100224 u64 tag;
Bijan Mottahedeh50238532021-01-15 17:37:45 +0000225 union {
226 void *rsrc;
227 struct file *file;
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +0100228 struct io_mapped_ubuf *buf;
Bijan Mottahedeh50238532021-01-15 17:37:45 +0000229 };
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +0000230};
231
Pavel Begunkovaeca2412021-04-11 01:46:37 +0100232struct io_file_table {
Pavel Begunkov042b0d82021-08-09 13:04:01 +0100233 struct io_fixed_file *files;
Jens Axboe31b51512019-01-18 22:56:34 -0700234};
235
Pavel Begunkovb895c9a2021-04-01 15:43:40 +0100236struct io_rsrc_node {
Xiaoguang Wang05589552020-03-31 14:05:18 +0800237 struct percpu_ref refs;
238 struct list_head node;
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +0000239 struct list_head rsrc_list;
Pavel Begunkovb895c9a2021-04-01 15:43:40 +0100240 struct io_rsrc_data *rsrc_data;
Jens Axboe4a38aed22020-05-14 17:21:15 -0600241 struct llist_node llist;
Pavel Begunkove2978222020-11-18 14:56:26 +0000242 bool done;
Xiaoguang Wang05589552020-03-31 14:05:18 +0800243};
244
Pavel Begunkov40ae0ff2021-04-01 15:43:44 +0100245typedef void (rsrc_put_fn)(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc);
246
Pavel Begunkovb895c9a2021-04-01 15:43:40 +0100247struct io_rsrc_data {
Jens Axboe05f3fb32019-12-09 11:22:50 -0700248 struct io_ring_ctx *ctx;
249
Pavel Begunkov2d091d62021-06-14 02:36:21 +0100250 u64 **tags;
251 unsigned int nr;
Pavel Begunkov40ae0ff2021-04-01 15:43:44 +0100252 rsrc_put_fn *do_put;
Pavel Begunkov3e942492021-04-11 01:46:34 +0100253 atomic_t refs;
Jens Axboe05f3fb32019-12-09 11:22:50 -0700254 struct completion done;
Hao Xu8bad28d2021-02-19 17:19:36 +0800255 bool quiesce;
Jens Axboe05f3fb32019-12-09 11:22:50 -0700256};
257
Jens Axboe5a2e7452020-02-23 16:23:11 -0700258struct io_buffer {
259 struct list_head list;
260 __u64 addr;
Thadeu Lima de Souza Cascardod1f82802021-05-05 09:47:06 -0300261 __u32 len;
Jens Axboe5a2e7452020-02-23 16:23:11 -0700262 __u16 bid;
263};
264
Stefano Garzarella21b55db2020-08-27 16:58:30 +0200265struct io_restriction {
266 DECLARE_BITMAP(register_op, IORING_REGISTER_LAST);
267 DECLARE_BITMAP(sqe_op, IORING_OP_LAST);
268 u8 sqe_flags_allowed;
269 u8 sqe_flags_required;
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +0200270 bool registered;
Stefano Garzarella21b55db2020-08-27 16:58:30 +0200271};
272
Jens Axboe37d1e2e2021-02-17 21:03:43 -0700273enum {
274 IO_SQ_THREAD_SHOULD_STOP = 0,
275 IO_SQ_THREAD_SHOULD_PARK,
276};
277
Jens Axboe534ca6d2020-09-02 13:52:19 -0600278struct io_sq_data {
279 refcount_t refs;
Pavel Begunkov9e138a42021-03-14 20:57:12 +0000280 atomic_t park_pending;
Pavel Begunkov09a6f4e2021-03-14 20:57:10 +0000281 struct mutex lock;
Jens Axboe69fb2132020-09-14 11:16:23 -0600282
283 /* ctx's that are using this sqd */
284 struct list_head ctx_list;
Jens Axboe69fb2132020-09-14 11:16:23 -0600285
Jens Axboe534ca6d2020-09-02 13:52:19 -0600286 struct task_struct *thread;
287 struct wait_queue_head wait;
Xiaoguang Wang08369242020-11-03 14:15:59 +0800288
289 unsigned sq_thread_idle;
Jens Axboe37d1e2e2021-02-17 21:03:43 -0700290 int sq_cpu;
291 pid_t task_pid;
Jens Axboe5c2469e2021-03-11 10:17:56 -0700292 pid_t task_tgid;
Jens Axboe37d1e2e2021-02-17 21:03:43 -0700293
294 unsigned long state;
Jens Axboe37d1e2e2021-02-17 21:03:43 -0700295 struct completion exited;
Jens Axboe534ca6d2020-09-02 13:52:19 -0600296};
297
Pavel Begunkov6dd0be12021-02-10 00:03:13 +0000298#define IO_COMPL_BATCH 32
Pavel Begunkov6ff119a2021-02-10 00:03:18 +0000299#define IO_REQ_CACHE_SIZE 32
Pavel Begunkovbf019da2021-02-10 00:03:17 +0000300#define IO_REQ_ALLOC_BATCH 8
Pavel Begunkov258b29a2021-02-10 00:03:10 +0000301
Pavel Begunkova1ab7b32021-02-18 18:29:42 +0000302struct io_submit_link {
303 struct io_kiocb *head;
304 struct io_kiocb *last;
305};
306
Pavel Begunkov258b29a2021-02-10 00:03:10 +0000307struct io_submit_state {
308 struct blk_plug plug;
Pavel Begunkova1ab7b32021-02-18 18:29:42 +0000309 struct io_submit_link link;
Pavel Begunkov258b29a2021-02-10 00:03:10 +0000310
311 /*
312 * io_kiocb alloc cache
313 */
Pavel Begunkovbf019da2021-02-10 00:03:17 +0000314 void *reqs[IO_REQ_CACHE_SIZE];
Pavel Begunkov258b29a2021-02-10 00:03:10 +0000315 unsigned int free_reqs;
316
317 bool plug_started;
318
319 /*
320 * Batch completion logic
321 */
Pavel Begunkovcd0ca2e2021-08-09 20:18:11 +0100322 struct io_kiocb *compl_reqs[IO_COMPL_BATCH];
323 unsigned int compl_nr;
324 /* inline/task_work completion list, under ->uring_lock */
325 struct list_head free_list;
Pavel Begunkov258b29a2021-02-10 00:03:10 +0000326
Pavel Begunkov258b29a2021-02-10 00:03:10 +0000327 unsigned int ios_left;
328};
329
Jens Axboe2b188cc2019-01-07 10:46:33 -0700330struct io_ring_ctx {
Pavel Begunkovb52ecf82021-06-14 23:37:21 +0100331 /* const or read-mostly hot data */
Jens Axboe2b188cc2019-01-07 10:46:33 -0700332 struct {
333 struct percpu_ref refs;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700334
Pavel Begunkovb52ecf82021-06-14 23:37:21 +0100335 struct io_rings *rings;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700336 unsigned int flags;
Randy Dunlape1d85332020-02-05 20:57:10 -0800337 unsigned int compat: 1;
Randy Dunlape1d85332020-02-05 20:57:10 -0800338 unsigned int drain_next: 1;
339 unsigned int eventfd_async: 1;
Stefano Garzarella21b55db2020-08-27 16:58:30 +0200340 unsigned int restricted: 1;
Pavel Begunkovf18ee4c2021-06-14 23:37:25 +0100341 unsigned int off_timeout_used: 1;
Pavel Begunkov10c66902021-06-15 16:47:56 +0100342 unsigned int drain_active: 1;
Pavel Begunkovb52ecf82021-06-14 23:37:21 +0100343 } ____cacheline_aligned_in_smp;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700344
Pavel Begunkov7f1129d2021-06-14 23:37:22 +0100345 /* submission data */
Pavel Begunkovb52ecf82021-06-14 23:37:21 +0100346 struct {
Pavel Begunkov0499e582021-06-14 23:37:29 +0100347 struct mutex uring_lock;
348
Hristo Venev75b28af2019-08-26 17:23:46 +0000349 /*
350 * Ring buffer of indices into array of io_uring_sqe, which is
351 * mmapped by the application using the IORING_OFF_SQES offset.
352 *
353 * This indirection could e.g. be used to assign fixed
354 * io_uring_sqe entries to operations and only submit them to
355 * the queue when needed.
356 *
357 * The kernel modifies neither the indices array nor the entries
358 * array.
359 */
360 u32 *sq_array;
Pavel Begunkovc7af47c2021-06-14 23:37:20 +0100361 struct io_uring_sqe *sq_sqes;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700362 unsigned cached_sq_head;
363 unsigned sq_entries;
Jens Axboede0617e2019-04-06 21:51:27 -0600364 struct list_head defer_list;
Pavel Begunkov7f1129d2021-06-14 23:37:22 +0100365
366 /*
367 * Fixed resources fast path, should be accessed only under
368 * uring_lock, and updated through io_uring_register(2)
369 */
370 struct io_rsrc_node *rsrc_node;
371 struct io_file_table file_table;
372 unsigned nr_user_files;
373 unsigned nr_user_bufs;
374 struct io_mapped_ubuf **user_bufs;
375
376 struct io_submit_state submit_state;
Jens Axboe5262f562019-09-17 12:26:57 -0600377 struct list_head timeout_list;
Pavel Begunkovef9dd632021-08-28 19:54:38 -0600378 struct list_head ltimeout_list;
Jens Axboe1d7bb1d2019-11-06 11:31:17 -0700379 struct list_head cq_overflow_list;
Pavel Begunkov7f1129d2021-06-14 23:37:22 +0100380 struct xarray io_buffers;
381 struct xarray personalities;
382 u32 pers_next;
383 unsigned sq_thread_idle;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700384 } ____cacheline_aligned_in_smp;
385
Pavel Begunkovd0acdee2021-05-16 22:58:12 +0100386 /* IRQ completion list, under ->completion_lock */
387 struct list_head locked_free_list;
388 unsigned int locked_free_nr;
Jens Axboe3c1a2ea2021-02-11 10:48:03 -0700389
Stefan Metzmacher7c30f36a2021-03-07 11:54:28 +0100390 const struct cred *sq_creds; /* cred used for __io_sq_thread() */
Jens Axboe534ca6d2020-09-02 13:52:19 -0600391 struct io_sq_data *sq_data; /* if using sq thread polling */
392
Jens Axboe90554202020-09-03 12:12:41 -0600393 struct wait_queue_head sqo_sq_wait;
Jens Axboe69fb2132020-09-14 11:16:23 -0600394 struct list_head sqd_list;
Hristo Venev75b28af2019-08-26 17:23:46 +0000395
Pavel Begunkov5ed7a372021-06-14 23:37:27 +0100396 unsigned long check_cq_overflow;
397
Jens Axboe206aefd2019-11-07 18:27:42 -0700398 struct {
399 unsigned cached_cq_tail;
400 unsigned cq_entries;
Jens Axboe206aefd2019-11-07 18:27:42 -0700401 struct eventfd_ctx *cq_ev_fd;
Pavel Begunkov0499e582021-06-14 23:37:29 +0100402 struct wait_queue_head poll_wait;
403 struct wait_queue_head cq_wait;
404 unsigned cq_extra;
405 atomic_t cq_timeouts;
Pavel Begunkov0499e582021-06-14 23:37:29 +0100406 unsigned cq_last_tm_flush;
Jens Axboe206aefd2019-11-07 18:27:42 -0700407 } ____cacheline_aligned_in_smp;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700408
409 struct {
Jens Axboe2b188cc2019-01-07 10:46:33 -0700410 spinlock_t completion_lock;
Jens Axboee94f1412019-12-19 12:06:02 -0700411
Jens Axboe89850fc2021-08-10 15:11:51 -0600412 spinlock_t timeout_lock;
413
Jens Axboedef596e2019-01-09 08:59:42 -0700414 /*
Pavel Begunkov540e32a2020-07-13 23:37:09 +0300415 * ->iopoll_list is protected by the ctx->uring_lock for
Jens Axboedef596e2019-01-09 08:59:42 -0700416 * io_uring instances that don't use IORING_SETUP_SQPOLL.
417 * For SQPOLL, only the single threaded io_sq_thread() will
418 * manipulate the list, hence no extra locking is needed there.
419 */
Pavel Begunkov540e32a2020-07-13 23:37:09 +0300420 struct list_head iopoll_list;
Jens Axboe78076bb2019-12-04 19:56:40 -0700421 struct hlist_head *cancel_hash;
422 unsigned cancel_hash_bits;
Hao Xu915b3dd2021-06-28 05:37:30 +0800423 bool poll_multi_queue;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700424 } ____cacheline_aligned_in_smp;
Jens Axboe85faa7b2020-04-09 18:14:00 -0600425
Stefano Garzarella21b55db2020-08-27 16:58:30 +0200426 struct io_restriction restrictions;
Jens Axboe3c1a2ea2021-02-11 10:48:03 -0700427
Pavel Begunkovb13a8912021-05-16 22:58:07 +0100428 /* slow path rsrc auxilary data, used by update/register */
429 struct {
430 struct io_rsrc_node *rsrc_backup_node;
431 struct io_mapped_ubuf *dummy_ubuf;
432 struct io_rsrc_data *file_data;
433 struct io_rsrc_data *buf_data;
434
435 struct delayed_work rsrc_put_work;
436 struct llist_head rsrc_put_llist;
437 struct list_head rsrc_ref_list;
438 spinlock_t rsrc_ref_lock;
439 };
440
Jens Axboe3c1a2ea2021-02-11 10:48:03 -0700441 /* Keep this last, we don't need it for the fast path */
Pavel Begunkovb986af72021-05-16 22:58:06 +0100442 struct {
443 #if defined(CONFIG_UNIX)
444 struct socket *ring_sock;
445 #endif
446 /* hashed buffered write serialization */
447 struct io_wq_hash *hash_map;
448
449 /* Only used for accounting purposes */
450 struct user_struct *user;
451 struct mm_struct *mm_account;
452
453 /* ctx exit and cancelation */
Pavel Begunkov9011bf92021-06-30 21:54:03 +0100454 struct llist_head fallback_llist;
455 struct delayed_work fallback_work;
Pavel Begunkovb986af72021-05-16 22:58:06 +0100456 struct work_struct exit_work;
457 struct list_head tctx_list;
458 struct completion ref_comp;
Pavel Begunkove139a1e2021-10-19 23:43:46 +0100459 u32 iowq_limits[2];
460 bool iowq_limits_set;
Pavel Begunkovb986af72021-05-16 22:58:06 +0100461 };
Jens Axboe2b188cc2019-01-07 10:46:33 -0700462};
463
Stefan Metzmacher53e043b2021-03-15 12:56:56 +0100464struct io_uring_task {
465 /* submission side */
Pavel Begunkov09899b12021-06-14 02:36:22 +0100466 int cached_refs;
Stefan Metzmacher53e043b2021-03-15 12:56:56 +0100467 struct xarray xa;
468 struct wait_queue_head wait;
Stefan Metzmacheree53fb22021-03-15 12:56:57 +0100469 const struct io_ring_ctx *last;
470 struct io_wq *io_wq;
Stefan Metzmacher53e043b2021-03-15 12:56:56 +0100471 struct percpu_counter inflight;
Pavel Begunkovb303fe22021-04-11 01:46:26 +0100472 atomic_t inflight_tracked;
Stefan Metzmacher53e043b2021-03-15 12:56:56 +0100473 atomic_t in_idle;
Stefan Metzmacher53e043b2021-03-15 12:56:56 +0100474
475 spinlock_t task_lock;
476 struct io_wq_work_list task_list;
Stefan Metzmacher53e043b2021-03-15 12:56:56 +0100477 struct callback_head task_work;
Pavel Begunkov6294f362021-08-10 17:53:55 +0100478 bool task_running;
Stefan Metzmacher53e043b2021-03-15 12:56:56 +0100479};
480
Jens Axboe09bb8392019-03-13 12:39:28 -0600481/*
482 * First field must be the file pointer in all the
483 * iocb unions! See also 'struct kiocb' in <linux/fs.h>
484 */
Jens Axboe221c5eb2019-01-17 09:41:58 -0700485struct io_poll_iocb {
486 struct file *file;
Pavel Begunkov018043b2020-10-27 23:17:18 +0000487 struct wait_queue_head *head;
Jens Axboe221c5eb2019-01-17 09:41:58 -0700488 __poll_t events;
Jens Axboe392edb42019-12-09 17:52:20 -0700489 struct wait_queue_entry wait;
Jens Axboe221c5eb2019-01-17 09:41:58 -0700490};
491
Pavel Begunkov9d805892021-04-13 02:58:40 +0100492struct io_poll_update {
Pavel Begunkov018043b2020-10-27 23:17:18 +0000493 struct file *file;
Pavel Begunkov9d805892021-04-13 02:58:40 +0100494 u64 old_user_data;
495 u64 new_user_data;
496 __poll_t events;
Jens Axboeb69de282021-03-17 08:37:41 -0600497 bool update_events;
498 bool update_user_data;
Pavel Begunkov018043b2020-10-27 23:17:18 +0000499};
500
Jens Axboeb5dba592019-12-11 14:02:38 -0700501struct io_close {
502 struct file *file;
Jens Axboeb5dba592019-12-11 14:02:38 -0700503 int fd;
Pavel Begunkov7df778b2021-09-24 20:04:29 +0100504 u32 file_slot;
Jens Axboeb5dba592019-12-11 14:02:38 -0700505};
506
Jens Axboead8a48a2019-11-15 08:49:11 -0700507struct io_timeout_data {
508 struct io_kiocb *req;
509 struct hrtimer timer;
510 struct timespec64 ts;
511 enum hrtimer_mode mode;
Jens Axboe50c1df22021-08-27 17:11:06 -0600512 u32 flags;
Jens Axboead8a48a2019-11-15 08:49:11 -0700513};
514
Jens Axboe8ed8d3c2019-12-16 11:55:28 -0700515struct io_accept {
516 struct file *file;
517 struct sockaddr __user *addr;
518 int __user *addr_len;
519 int flags;
Pavel Begunkovaaa4db12021-08-25 12:25:47 +0100520 u32 file_slot;
Jens Axboe09952e32020-03-19 20:16:56 -0600521 unsigned long nofile;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -0700522};
523
524struct io_sync {
525 struct file *file;
526 loff_t len;
527 loff_t off;
528 int flags;
Jens Axboed63d1b52019-12-10 10:38:56 -0700529 int mode;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -0700530};
531
Jens Axboefbf23842019-12-17 18:45:56 -0700532struct io_cancel {
533 struct file *file;
534 u64 addr;
535};
536
Jens Axboeb29472e2019-12-17 18:50:29 -0700537struct io_timeout {
538 struct file *file;
Pavel Begunkovbfe68a22020-05-30 14:54:18 +0300539 u32 off;
540 u32 target_seq;
Pavel Begunkov135fcde2020-07-13 23:37:12 +0300541 struct list_head list;
Pavel Begunkov90cd7e42020-10-27 23:25:36 +0000542 /* head of the link, used by linked timeouts only */
543 struct io_kiocb *head;
Jens Axboe89b263f2021-08-10 15:14:18 -0600544 /* for linked completions */
545 struct io_kiocb *prev;
Jens Axboeb29472e2019-12-17 18:50:29 -0700546};
547
Pavel Begunkov0bdf7a22020-10-10 18:34:10 +0100548struct io_timeout_rem {
549 struct file *file;
550 u64 addr;
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +0000551
552 /* timeout update */
553 struct timespec64 ts;
554 u32 flags;
Pavel Begunkovf1042b62021-08-28 19:54:39 -0600555 bool ltimeout;
Pavel Begunkov0bdf7a22020-10-10 18:34:10 +0100556};
557
Jens Axboe9adbd452019-12-20 08:45:55 -0700558struct io_rw {
559 /* NOTE: kiocb has the file as the first member, so don't do it here */
560 struct kiocb kiocb;
561 u64 addr;
562 u64 len;
563};
564
Jens Axboe3fbb51c2019-12-20 08:51:52 -0700565struct io_connect {
566 struct file *file;
567 struct sockaddr __user *addr;
568 int addr_len;
569};
570
Jens Axboee47293f2019-12-20 08:58:21 -0700571struct io_sr_msg {
572 struct file *file;
Jens Axboefddafac2020-01-04 20:19:44 -0700573 union {
Pavel Begunkov4af34172021-04-11 01:46:30 +0100574 struct compat_msghdr __user *umsg_compat;
575 struct user_msghdr __user *umsg;
576 void __user *buf;
Jens Axboefddafac2020-01-04 20:19:44 -0700577 };
Jens Axboee47293f2019-12-20 08:58:21 -0700578 int msg_flags;
Jens Axboebcda7ba2020-02-23 16:42:51 -0700579 int bgid;
Jens Axboefddafac2020-01-04 20:19:44 -0700580 size_t len;
Jens Axboe9b7b0f22023-01-21 10:21:22 -0700581 size_t done_io;
Jens Axboebcda7ba2020-02-23 16:42:51 -0700582 struct io_buffer *kbuf;
Jens Axboee47293f2019-12-20 08:58:21 -0700583};
584
Jens Axboe15b71ab2019-12-11 11:20:36 -0700585struct io_open {
586 struct file *file;
587 int dfd;
Pavel Begunkovb9445592021-08-25 12:25:45 +0100588 u32 file_slot;
Jens Axboe15b71ab2019-12-11 11:20:36 -0700589 struct filename *filename;
Jens Axboec12cedf2020-01-08 17:41:21 -0700590 struct open_how how;
Jens Axboe4022e7a2020-03-19 19:23:18 -0600591 unsigned long nofile;
Jens Axboe15b71ab2019-12-11 11:20:36 -0700592};
593
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +0000594struct io_rsrc_update {
Jens Axboe05f3fb32019-12-09 11:22:50 -0700595 struct file *file;
596 u64 arg;
597 u32 nr_args;
598 u32 offset;
599};
600
Jens Axboe4840e412019-12-25 22:03:45 -0700601struct io_fadvise {
602 struct file *file;
603 u64 offset;
604 u32 len;
605 u32 advice;
606};
607
Jens Axboec1ca7572019-12-25 22:18:28 -0700608struct io_madvise {
609 struct file *file;
610 u64 addr;
611 u32 len;
612 u32 advice;
613};
614
Jens Axboe3e4827b2020-01-08 15:18:09 -0700615struct io_epoll {
616 struct file *file;
617 int epfd;
618 int op;
619 int fd;
620 struct epoll_event event;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700621};
622
Pavel Begunkov7d67af22020-02-24 11:32:45 +0300623struct io_splice {
624 struct file *file_out;
Pavel Begunkov7d67af22020-02-24 11:32:45 +0300625 loff_t off_out;
626 loff_t off_in;
627 u64 len;
Jens Axboeae6cba32022-03-29 10:59:20 -0600628 int splice_fd_in;
Pavel Begunkov7d67af22020-02-24 11:32:45 +0300629 unsigned int flags;
630};
631
Jens Axboeddf0322d2020-02-23 16:41:33 -0700632struct io_provide_buf {
633 struct file *file;
634 __u64 addr;
Pavel Begunkov38134ad2021-04-15 13:07:39 +0100635 __u32 len;
Jens Axboeddf0322d2020-02-23 16:41:33 -0700636 __u32 bgid;
637 __u16 nbufs;
638 __u16 bid;
639};
640
Bijan Mottahedeh1d9e1282020-05-22 21:31:16 -0700641struct io_statx {
642 struct file *file;
643 int dfd;
644 unsigned int mask;
645 unsigned int flags;
Bijan Mottahedehe62753e2020-05-22 21:31:18 -0700646 const char __user *filename;
Bijan Mottahedeh1d9e1282020-05-22 21:31:16 -0700647 struct statx __user *buffer;
648};
649
Jens Axboe36f4fa62020-09-05 11:14:22 -0600650struct io_shutdown {
651 struct file *file;
652 int how;
653};
654
Jens Axboe80a261f2020-09-28 14:23:58 -0600655struct io_rename {
656 struct file *file;
657 int old_dfd;
658 int new_dfd;
659 struct filename *oldpath;
660 struct filename *newpath;
661 int flags;
662};
663
Jens Axboe14a11432020-09-28 14:27:37 -0600664struct io_unlink {
665 struct file *file;
666 int dfd;
667 int flags;
668 struct filename *filename;
669};
670
Dmitry Kadasheve34a02d2021-07-08 13:34:45 +0700671struct io_mkdir {
672 struct file *file;
673 int dfd;
674 umode_t mode;
675 struct filename *filename;
676};
677
Dmitry Kadashev7a8721f2021-07-08 13:34:46 +0700678struct io_symlink {
679 struct file *file;
680 int new_dfd;
681 struct filename *oldpath;
682 struct filename *newpath;
683};
684
Dmitry Kadashevcf30da92021-07-08 13:34:47 +0700685struct io_hardlink {
686 struct file *file;
687 int old_dfd;
688 int new_dfd;
689 struct filename *oldpath;
690 struct filename *newpath;
691 int flags;
692};
693
Pavel Begunkov3ca405e2020-07-13 23:37:08 +0300694struct io_completion {
695 struct file *file;
Pavel Begunkov8c3f9cd2021-02-28 22:35:15 +0000696 u32 cflags;
Pavel Begunkov3ca405e2020-07-13 23:37:08 +0300697};
698
Jens Axboef499a022019-12-02 16:28:46 -0700699struct io_async_connect {
700 struct sockaddr_storage address;
701};
702
Jens Axboe03b12302019-12-02 18:50:25 -0700703struct io_async_msghdr {
704 struct iovec fast_iov[UIO_FASTIOV];
Pavel Begunkov257e84a2021-02-05 00:58:00 +0000705 /* points to an allocated iov, if NULL we use fast_iov instead */
706 struct iovec *free_iov;
Jens Axboe03b12302019-12-02 18:50:25 -0700707 struct sockaddr __user *uaddr;
708 struct msghdr msg;
Jens Axboeb5379162020-02-09 11:29:15 -0700709 struct sockaddr_storage addr;
Jens Axboe03b12302019-12-02 18:50:25 -0700710};
711
Jens Axboef67676d2019-12-02 11:03:47 -0700712struct io_async_rw {
713 struct iovec fast_iov[UIO_FASTIOV];
Jens Axboeff6165b2020-08-13 09:47:43 -0600714 const struct iovec *free_iovec;
715 struct iov_iter iter;
Jens Axboecd658692021-09-10 11:19:14 -0600716 struct iov_iter_state iter_state;
Jens Axboe227c0c92020-08-13 11:51:40 -0600717 size_t bytes_done;
Jens Axboebcf5a062020-05-22 09:24:42 -0600718 struct wait_page_queue wpq;
Jens Axboef67676d2019-12-02 11:03:47 -0700719};
720
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300721enum {
722 REQ_F_FIXED_FILE_BIT = IOSQE_FIXED_FILE_BIT,
723 REQ_F_IO_DRAIN_BIT = IOSQE_IO_DRAIN_BIT,
724 REQ_F_LINK_BIT = IOSQE_IO_LINK_BIT,
725 REQ_F_HARDLINK_BIT = IOSQE_IO_HARDLINK_BIT,
726 REQ_F_FORCE_ASYNC_BIT = IOSQE_ASYNC_BIT,
Jens Axboebcda7ba2020-02-23 16:42:51 -0700727 REQ_F_BUFFER_SELECT_BIT = IOSQE_BUFFER_SELECT_BIT,
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300728
Pavel Begunkovdddca222021-04-27 16:13:52 +0100729 /* first byte is taken by user flags, shift it to not overlap */
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +0100730 REQ_F_FAIL_BIT = 8,
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300731 REQ_F_INFLIGHT_BIT,
732 REQ_F_CUR_POS_BIT,
733 REQ_F_NOWAIT_BIT,
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300734 REQ_F_LINK_TIMEOUT_BIT,
Pavel Begunkov99bc4c32020-02-07 22:04:45 +0300735 REQ_F_NEED_CLEANUP_BIT,
Jens Axboed7718a92020-02-14 22:23:12 -0700736 REQ_F_POLLED_BIT,
Jens Axboebcda7ba2020-02-23 16:42:51 -0700737 REQ_F_BUFFER_SELECTED_BIT,
Pavel Begunkove342c802021-01-19 13:32:47 +0000738 REQ_F_COMPLETE_INLINE_BIT,
Jens Axboe230d50d2021-04-01 20:41:15 -0600739 REQ_F_REISSUE_BIT,
Pavel Begunkovb8e64b52021-06-17 18:14:02 +0100740 REQ_F_CREDS_BIT,
Pavel Begunkov20e60a32021-08-11 19:28:30 +0100741 REQ_F_REFCOUNT_BIT,
Pavel Begunkov4d13d1a2021-08-15 10:40:24 +0100742 REQ_F_ARM_LTIMEOUT_BIT,
Jens Axboe390b8812022-03-23 09:30:05 -0600743 REQ_F_PARTIAL_IO_BIT,
Jens Axboe7b29f922021-03-12 08:30:14 -0700744 /* keep async read/write and isreg together and in order */
Pavel Begunkovb191e2d2021-08-09 13:04:03 +0100745 REQ_F_NOWAIT_READ_BIT,
746 REQ_F_NOWAIT_WRITE_BIT,
Jens Axboe7b29f922021-03-12 08:30:14 -0700747 REQ_F_ISREG_BIT,
Jens Axboe84557872020-03-03 15:28:17 -0700748
749 /* not a real bit, just to check we're not overflowing the space */
750 __REQ_F_LAST_BIT,
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300751};
752
753enum {
754 /* ctx owns file */
755 REQ_F_FIXED_FILE = BIT(REQ_F_FIXED_FILE_BIT),
756 /* drain existing IO first */
757 REQ_F_IO_DRAIN = BIT(REQ_F_IO_DRAIN_BIT),
758 /* linked sqes */
759 REQ_F_LINK = BIT(REQ_F_LINK_BIT),
760 /* doesn't sever on completion < 0 */
761 REQ_F_HARDLINK = BIT(REQ_F_HARDLINK_BIT),
762 /* IOSQE_ASYNC */
763 REQ_F_FORCE_ASYNC = BIT(REQ_F_FORCE_ASYNC_BIT),
Jens Axboebcda7ba2020-02-23 16:42:51 -0700764 /* IOSQE_BUFFER_SELECT */
765 REQ_F_BUFFER_SELECT = BIT(REQ_F_BUFFER_SELECT_BIT),
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300766
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300767 /* fail rest of links */
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +0100768 REQ_F_FAIL = BIT(REQ_F_FAIL_BIT),
Pavel Begunkovb05a1bc2021-03-04 13:59:24 +0000769 /* on inflight list, should be cancelled and waited on exit reliably */
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300770 REQ_F_INFLIGHT = BIT(REQ_F_INFLIGHT_BIT),
771 /* read/write uses file position */
772 REQ_F_CUR_POS = BIT(REQ_F_CUR_POS_BIT),
773 /* must not punt to workers */
774 REQ_F_NOWAIT = BIT(REQ_F_NOWAIT_BIT),
Pavel Begunkov900fad42020-10-19 16:39:16 +0100775 /* has or had linked timeout */
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300776 REQ_F_LINK_TIMEOUT = BIT(REQ_F_LINK_TIMEOUT_BIT),
Pavel Begunkov99bc4c32020-02-07 22:04:45 +0300777 /* needs cleanup */
778 REQ_F_NEED_CLEANUP = BIT(REQ_F_NEED_CLEANUP_BIT),
Jens Axboed7718a92020-02-14 22:23:12 -0700779 /* already went through poll handler */
780 REQ_F_POLLED = BIT(REQ_F_POLLED_BIT),
Jens Axboebcda7ba2020-02-23 16:42:51 -0700781 /* buffer already selected */
782 REQ_F_BUFFER_SELECTED = BIT(REQ_F_BUFFER_SELECTED_BIT),
Pavel Begunkove342c802021-01-19 13:32:47 +0000783 /* completion is deferred through io_comp_state */
784 REQ_F_COMPLETE_INLINE = BIT(REQ_F_COMPLETE_INLINE_BIT),
Jens Axboe230d50d2021-04-01 20:41:15 -0600785 /* caller should reissue async */
786 REQ_F_REISSUE = BIT(REQ_F_REISSUE_BIT),
Jens Axboe7b29f922021-03-12 08:30:14 -0700787 /* supports async reads */
Pavel Begunkovb191e2d2021-08-09 13:04:03 +0100788 REQ_F_NOWAIT_READ = BIT(REQ_F_NOWAIT_READ_BIT),
Jens Axboe7b29f922021-03-12 08:30:14 -0700789 /* supports async writes */
Pavel Begunkovb191e2d2021-08-09 13:04:03 +0100790 REQ_F_NOWAIT_WRITE = BIT(REQ_F_NOWAIT_WRITE_BIT),
Jens Axboe7b29f922021-03-12 08:30:14 -0700791 /* regular file */
792 REQ_F_ISREG = BIT(REQ_F_ISREG_BIT),
Pavel Begunkovb8e64b52021-06-17 18:14:02 +0100793 /* has creds assigned */
794 REQ_F_CREDS = BIT(REQ_F_CREDS_BIT),
Pavel Begunkov20e60a32021-08-11 19:28:30 +0100795 /* skip refcounting if not set */
796 REQ_F_REFCOUNT = BIT(REQ_F_REFCOUNT_BIT),
Pavel Begunkov4d13d1a2021-08-15 10:40:24 +0100797 /* there is a linked timeout that has to be armed */
798 REQ_F_ARM_LTIMEOUT = BIT(REQ_F_ARM_LTIMEOUT_BIT),
Jens Axboe390b8812022-03-23 09:30:05 -0600799 /* request has already done partial IO */
800 REQ_F_PARTIAL_IO = BIT(REQ_F_PARTIAL_IO_BIT),
Jens Axboed7718a92020-02-14 22:23:12 -0700801};
802
803struct async_poll {
804 struct io_poll_iocb poll;
Jens Axboe807abcb2020-07-17 17:09:27 -0600805 struct io_poll_iocb *double_poll;
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300806};
807
Pavel Begunkovf237c302021-08-18 12:42:46 +0100808typedef void (*io_req_tw_func_t)(struct io_kiocb *req, bool *locked);
Pavel Begunkov5b0a6ac2021-06-30 21:54:04 +0100809
Jens Axboe7cbf1722021-02-10 00:03:20 +0000810struct io_task_work {
Pavel Begunkov5b0a6ac2021-06-30 21:54:04 +0100811 union {
812 struct io_wq_work_node node;
813 struct llist_node fallback_node;
814 };
815 io_req_tw_func_t func;
Jens Axboe7cbf1722021-02-10 00:03:20 +0000816};
817
Pavel Begunkov992da012021-06-10 16:37:37 +0100818enum {
819 IORING_RSRC_FILE = 0,
820 IORING_RSRC_BUFFER = 1,
821};
822
Jens Axboe09bb8392019-03-13 12:39:28 -0600823/*
824 * NOTE! Each of the iocb union members has the file pointer
825 * as the first entry in their struct definition. So you can
826 * access the file pointer through any of the sub-structs,
827 * or directly as just 'ki_filp' in this struct.
828 */
Jens Axboe2b188cc2019-01-07 10:46:33 -0700829struct io_kiocb {
Jens Axboe221c5eb2019-01-17 09:41:58 -0700830 union {
Jens Axboe09bb8392019-03-13 12:39:28 -0600831 struct file *file;
Jens Axboe9adbd452019-12-20 08:45:55 -0700832 struct io_rw rw;
Jens Axboe221c5eb2019-01-17 09:41:58 -0700833 struct io_poll_iocb poll;
Pavel Begunkov9d805892021-04-13 02:58:40 +0100834 struct io_poll_update poll_update;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -0700835 struct io_accept accept;
836 struct io_sync sync;
Jens Axboefbf23842019-12-17 18:45:56 -0700837 struct io_cancel cancel;
Jens Axboeb29472e2019-12-17 18:50:29 -0700838 struct io_timeout timeout;
Pavel Begunkov0bdf7a22020-10-10 18:34:10 +0100839 struct io_timeout_rem timeout_rem;
Jens Axboe3fbb51c2019-12-20 08:51:52 -0700840 struct io_connect connect;
Jens Axboee47293f2019-12-20 08:58:21 -0700841 struct io_sr_msg sr_msg;
Jens Axboe15b71ab2019-12-11 11:20:36 -0700842 struct io_open open;
Jens Axboeb5dba592019-12-11 14:02:38 -0700843 struct io_close close;
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +0000844 struct io_rsrc_update rsrc_update;
Jens Axboe4840e412019-12-25 22:03:45 -0700845 struct io_fadvise fadvise;
Jens Axboec1ca7572019-12-25 22:18:28 -0700846 struct io_madvise madvise;
Jens Axboe3e4827b2020-01-08 15:18:09 -0700847 struct io_epoll epoll;
Pavel Begunkov7d67af22020-02-24 11:32:45 +0300848 struct io_splice splice;
Jens Axboeddf0322d2020-02-23 16:41:33 -0700849 struct io_provide_buf pbuf;
Bijan Mottahedeh1d9e1282020-05-22 21:31:16 -0700850 struct io_statx statx;
Jens Axboe36f4fa62020-09-05 11:14:22 -0600851 struct io_shutdown shutdown;
Jens Axboe80a261f2020-09-28 14:23:58 -0600852 struct io_rename rename;
Jens Axboe14a11432020-09-28 14:27:37 -0600853 struct io_unlink unlink;
Dmitry Kadasheve34a02d2021-07-08 13:34:45 +0700854 struct io_mkdir mkdir;
Dmitry Kadashev7a8721f2021-07-08 13:34:46 +0700855 struct io_symlink symlink;
Dmitry Kadashevcf30da92021-07-08 13:34:47 +0700856 struct io_hardlink hardlink;
Pavel Begunkov3ca405e2020-07-13 23:37:08 +0300857 /* use only after cleaning per-op data, see io_clean_op() */
858 struct io_completion compl;
Jens Axboe221c5eb2019-01-17 09:41:58 -0700859 };
Jens Axboe2b188cc2019-01-07 10:46:33 -0700860
Jens Axboee8c2bc12020-08-15 18:44:09 -0700861 /* opcode allocated if it needs to store data for async defer */
862 void *async_data;
Jens Axboed625c6e2019-12-17 19:53:05 -0700863 u8 opcode;
Xiaoguang Wang65a65432020-06-11 23:39:36 +0800864 /* polled IO has completed */
865 u8 iopoll_completed;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700866
Bijan Mottahedeh4f4eeba2020-05-19 14:52:49 -0700867 u16 buf_index;
Pavel Begunkov9cf7c102020-07-13 23:37:15 +0300868 u32 result;
Bijan Mottahedeh4f4eeba2020-05-19 14:52:49 -0700869
Pavel Begunkov010e8e62020-07-30 18:43:45 +0300870 struct io_ring_ctx *ctx;
871 unsigned int flags;
Jens Axboeabc54d62021-02-24 13:32:30 -0700872 atomic_t refs;
Pavel Begunkov010e8e62020-07-30 18:43:45 +0300873 struct task_struct *task;
874 u64 user_data;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700875
Pavel Begunkovf2f87372020-10-27 23:25:37 +0000876 struct io_kiocb *link;
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +0000877 struct percpu_ref *fixed_rsrc_refs;
Jens Axboed7718a92020-02-14 22:23:12 -0700878
Pavel Begunkovb303fe22021-04-11 01:46:26 +0100879 /* used with ctx->iopoll_list with reads/writes */
Pavel Begunkov010e8e62020-07-30 18:43:45 +0300880 struct list_head inflight_entry;
Pavel Begunkov5b0a6ac2021-06-30 21:54:04 +0100881 struct io_task_work io_task_work;
Pavel Begunkov010e8e62020-07-30 18:43:45 +0300882 /* for polled requests, i.e. IORING_OP_POLL_ADD and async armed poll */
883 struct hlist_node hash_node;
884 struct async_poll *apoll;
885 struct io_wq_work work;
Pavel Begunkovfe7e3252021-06-24 15:09:57 +0100886 const struct cred *creds;
Pavel Begunkovc10d1f92021-06-17 18:14:01 +0100887
Pavel Begunkoveae071c2021-04-25 14:32:24 +0100888 /* store used ubuf, so we can prevent reloading */
889 struct io_mapped_ubuf *imu;
Pavel Begunkovf770fba2022-08-29 14:30:18 +0100890 /* stores selected buf, valid IFF REQ_F_BUFFER_SELECTED is set */
891 struct io_buffer *kbuf;
892 atomic_t poll_refs;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700893};
894
Pavel Begunkov13bf43f2021-03-06 11:02:12 +0000895struct io_tctx_node {
896 struct list_head ctx_node;
897 struct task_struct *task;
Pavel Begunkov13bf43f2021-03-06 11:02:12 +0000898 struct io_ring_ctx *ctx;
899};
900
Pavel Begunkov27dc8332020-07-13 23:37:14 +0300901struct io_defer_entry {
902 struct list_head list;
903 struct io_kiocb *req;
Pavel Begunkov9cf7c102020-07-13 23:37:15 +0300904 u32 seq;
Pavel Begunkov27dc8332020-07-13 23:37:14 +0300905};
906
Jens Axboed3656342019-12-18 09:50:26 -0700907struct io_op_def {
Jens Axboed3656342019-12-18 09:50:26 -0700908 /* needs req->file assigned */
909 unsigned needs_file : 1;
Jens Axboed3656342019-12-18 09:50:26 -0700910 /* hash wq insertion if file is a regular file */
911 unsigned hash_reg_file : 1;
912 /* unbound wq insertion if file is a non-regular file */
913 unsigned unbound_nonreg_file : 1;
Jens Axboe66f4af92020-01-16 15:36:52 -0700914 /* opcode is not supported by this kernel */
915 unsigned not_supported : 1;
Jens Axboe8a727582020-02-20 09:59:44 -0700916 /* set if opcode supports polled "wait" */
917 unsigned pollin : 1;
918 unsigned pollout : 1;
Jens Axboebcda7ba2020-02-23 16:42:51 -0700919 /* op supports buffer selection */
920 unsigned buffer_select : 1;
Pavel Begunkov26f05052021-02-28 22:35:18 +0000921 /* do prep async if is going to be punted */
922 unsigned needs_async_setup : 1;
Jens Axboe27926b62020-10-28 09:33:23 -0600923 /* should block plug */
924 unsigned plug : 1;
Jens Axboee8c2bc12020-08-15 18:44:09 -0700925 /* size of async data needed, if any */
926 unsigned short async_size;
Jens Axboed3656342019-12-18 09:50:26 -0700927};
928
Jens Axboe09186822020-10-13 15:01:40 -0600929static const struct io_op_def io_op_defs[] = {
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300930 [IORING_OP_NOP] = {},
931 [IORING_OP_READV] = {
Jens Axboed3656342019-12-18 09:50:26 -0700932 .needs_file = 1,
933 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700934 .pollin = 1,
Jens Axboe4d954c22020-02-27 07:31:19 -0700935 .buffer_select = 1,
Pavel Begunkov26f05052021-02-28 22:35:18 +0000936 .needs_async_setup = 1,
Jens Axboe27926b62020-10-28 09:33:23 -0600937 .plug = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700938 .async_size = sizeof(struct io_async_rw),
Jens Axboed3656342019-12-18 09:50:26 -0700939 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300940 [IORING_OP_WRITEV] = {
Jens Axboed3656342019-12-18 09:50:26 -0700941 .needs_file = 1,
942 .hash_reg_file = 1,
943 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700944 .pollout = 1,
Pavel Begunkov26f05052021-02-28 22:35:18 +0000945 .needs_async_setup = 1,
Jens Axboe27926b62020-10-28 09:33:23 -0600946 .plug = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700947 .async_size = sizeof(struct io_async_rw),
Jens Axboed3656342019-12-18 09:50:26 -0700948 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300949 [IORING_OP_FSYNC] = {
Jens Axboed3656342019-12-18 09:50:26 -0700950 .needs_file = 1,
951 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300952 [IORING_OP_READ_FIXED] = {
Jens Axboed3656342019-12-18 09:50:26 -0700953 .needs_file = 1,
954 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700955 .pollin = 1,
Jens Axboe27926b62020-10-28 09:33:23 -0600956 .plug = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700957 .async_size = sizeof(struct io_async_rw),
Jens Axboed3656342019-12-18 09:50:26 -0700958 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300959 [IORING_OP_WRITE_FIXED] = {
Jens Axboed3656342019-12-18 09:50:26 -0700960 .needs_file = 1,
961 .hash_reg_file = 1,
962 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700963 .pollout = 1,
Jens Axboe27926b62020-10-28 09:33:23 -0600964 .plug = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700965 .async_size = sizeof(struct io_async_rw),
Jens Axboed3656342019-12-18 09:50:26 -0700966 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300967 [IORING_OP_POLL_ADD] = {
Jens Axboed3656342019-12-18 09:50:26 -0700968 .needs_file = 1,
969 .unbound_nonreg_file = 1,
970 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300971 [IORING_OP_POLL_REMOVE] = {},
972 [IORING_OP_SYNC_FILE_RANGE] = {
Jens Axboed3656342019-12-18 09:50:26 -0700973 .needs_file = 1,
974 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300975 [IORING_OP_SENDMSG] = {
Jens Axboed3656342019-12-18 09:50:26 -0700976 .needs_file = 1,
977 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700978 .pollout = 1,
Pavel Begunkov26f05052021-02-28 22:35:18 +0000979 .needs_async_setup = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700980 .async_size = sizeof(struct io_async_msghdr),
Jens Axboed3656342019-12-18 09:50:26 -0700981 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300982 [IORING_OP_RECVMSG] = {
Jens Axboed3656342019-12-18 09:50:26 -0700983 .needs_file = 1,
984 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700985 .pollin = 1,
Jens Axboe52de1fe2020-02-27 10:15:42 -0700986 .buffer_select = 1,
Pavel Begunkov26f05052021-02-28 22:35:18 +0000987 .needs_async_setup = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700988 .async_size = sizeof(struct io_async_msghdr),
Jens Axboed3656342019-12-18 09:50:26 -0700989 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300990 [IORING_OP_TIMEOUT] = {
Jens Axboee8c2bc12020-08-15 18:44:09 -0700991 .async_size = sizeof(struct io_timeout_data),
Jens Axboed3656342019-12-18 09:50:26 -0700992 },
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +0000993 [IORING_OP_TIMEOUT_REMOVE] = {
994 /* used by timeout updates' prep() */
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +0000995 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300996 [IORING_OP_ACCEPT] = {
Jens Axboed3656342019-12-18 09:50:26 -0700997 .needs_file = 1,
998 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700999 .pollin = 1,
Jens Axboed3656342019-12-18 09:50:26 -07001000 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +03001001 [IORING_OP_ASYNC_CANCEL] = {},
1002 [IORING_OP_LINK_TIMEOUT] = {
Jens Axboee8c2bc12020-08-15 18:44:09 -07001003 .async_size = sizeof(struct io_timeout_data),
Jens Axboed3656342019-12-18 09:50:26 -07001004 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +03001005 [IORING_OP_CONNECT] = {
Jens Axboed3656342019-12-18 09:50:26 -07001006 .needs_file = 1,
1007 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -07001008 .pollout = 1,
Pavel Begunkov26f05052021-02-28 22:35:18 +00001009 .needs_async_setup = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -07001010 .async_size = sizeof(struct io_async_connect),
Jens Axboed3656342019-12-18 09:50:26 -07001011 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +03001012 [IORING_OP_FALLOCATE] = {
Jens Axboed3656342019-12-18 09:50:26 -07001013 .needs_file = 1,
1014 },
Jens Axboe44526be2021-02-15 13:32:18 -07001015 [IORING_OP_OPENAT] = {},
1016 [IORING_OP_CLOSE] = {},
1017 [IORING_OP_FILES_UPDATE] = {},
1018 [IORING_OP_STATX] = {},
Pavel Begunkov0463b6c2020-01-18 21:35:38 +03001019 [IORING_OP_READ] = {
Jens Axboe3a6820f2019-12-22 15:19:35 -07001020 .needs_file = 1,
1021 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -07001022 .pollin = 1,
Jens Axboebcda7ba2020-02-23 16:42:51 -07001023 .buffer_select = 1,
Jens Axboe27926b62020-10-28 09:33:23 -06001024 .plug = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -07001025 .async_size = sizeof(struct io_async_rw),
Jens Axboe3a6820f2019-12-22 15:19:35 -07001026 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +03001027 [IORING_OP_WRITE] = {
Jens Axboe3a6820f2019-12-22 15:19:35 -07001028 .needs_file = 1,
Jens Axboe7b3188e2021-08-30 19:37:41 -06001029 .hash_reg_file = 1,
Jens Axboe3a6820f2019-12-22 15:19:35 -07001030 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -07001031 .pollout = 1,
Jens Axboe27926b62020-10-28 09:33:23 -06001032 .plug = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -07001033 .async_size = sizeof(struct io_async_rw),
Jens Axboe3a6820f2019-12-22 15:19:35 -07001034 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +03001035 [IORING_OP_FADVISE] = {
Jens Axboe4840e412019-12-25 22:03:45 -07001036 .needs_file = 1,
1037 },
Jens Axboe44526be2021-02-15 13:32:18 -07001038 [IORING_OP_MADVISE] = {},
Pavel Begunkov0463b6c2020-01-18 21:35:38 +03001039 [IORING_OP_SEND] = {
Jens Axboefddafac2020-01-04 20:19:44 -07001040 .needs_file = 1,
1041 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -07001042 .pollout = 1,
Jens Axboefddafac2020-01-04 20:19:44 -07001043 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +03001044 [IORING_OP_RECV] = {
Jens Axboefddafac2020-01-04 20:19:44 -07001045 .needs_file = 1,
1046 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -07001047 .pollin = 1,
Jens Axboebcda7ba2020-02-23 16:42:51 -07001048 .buffer_select = 1,
Jens Axboefddafac2020-01-04 20:19:44 -07001049 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +03001050 [IORING_OP_OPENAT2] = {
Jens Axboecebdb982020-01-08 17:59:24 -07001051 },
Jens Axboe3e4827b2020-01-08 15:18:09 -07001052 [IORING_OP_EPOLL_CTL] = {
1053 .unbound_nonreg_file = 1,
Jens Axboe3e4827b2020-01-08 15:18:09 -07001054 },
Pavel Begunkov7d67af22020-02-24 11:32:45 +03001055 [IORING_OP_SPLICE] = {
1056 .needs_file = 1,
1057 .hash_reg_file = 1,
1058 .unbound_nonreg_file = 1,
Jens Axboeddf0322d2020-02-23 16:41:33 -07001059 },
1060 [IORING_OP_PROVIDE_BUFFERS] = {},
Jens Axboe067524e2020-03-02 16:32:28 -07001061 [IORING_OP_REMOVE_BUFFERS] = {},
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03001062 [IORING_OP_TEE] = {
1063 .needs_file = 1,
1064 .hash_reg_file = 1,
1065 .unbound_nonreg_file = 1,
1066 },
Jens Axboe36f4fa62020-09-05 11:14:22 -06001067 [IORING_OP_SHUTDOWN] = {
1068 .needs_file = 1,
1069 },
Jens Axboe44526be2021-02-15 13:32:18 -07001070 [IORING_OP_RENAMEAT] = {},
1071 [IORING_OP_UNLINKAT] = {},
Dmitry Kadasheve34a02d2021-07-08 13:34:45 +07001072 [IORING_OP_MKDIRAT] = {},
Dmitry Kadashev7a8721f2021-07-08 13:34:46 +07001073 [IORING_OP_SYMLINKAT] = {},
Dmitry Kadashevcf30da92021-07-08 13:34:47 +07001074 [IORING_OP_LINKAT] = {},
Jens Axboed3656342019-12-18 09:50:26 -07001075};
1076
Pavel Begunkov0756a862021-08-15 10:40:25 +01001077/* requests with any of those set should undergo io_disarm_next() */
1078#define IO_DISARM_MASK (REQ_F_ARM_LTIMEOUT | REQ_F_LINK_TIMEOUT | REQ_F_FAIL)
1079
Pavel Begunkov7a612352021-03-09 00:37:59 +00001080static bool io_disarm_next(struct io_kiocb *req);
Pavel Begunkoveef51da2021-06-14 02:36:15 +01001081static void io_uring_del_tctx_node(unsigned long index);
Pavel Begunkov9936c7c2021-02-04 13:51:56 +00001082static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
1083 struct task_struct *task,
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01001084 bool cancel_all);
Pavel Begunkov78cc6872021-06-14 02:36:23 +01001085static void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd);
Pavel Begunkov1ffc5422020-12-30 21:34:15 +00001086
Pavel Begunkovb850d6d2022-08-29 14:30:13 +01001087static void io_fill_cqe_req(struct io_kiocb *req, s32 res, u32 cflags);
1088
Jackie Liuec9c02a2019-11-08 23:50:36 +08001089static void io_put_req(struct io_kiocb *req);
Pavel Begunkov91c2f692021-08-11 19:28:28 +01001090static void io_put_req_deferred(struct io_kiocb *req);
Jens Axboec7dae4b2021-02-09 19:53:37 -07001091static void io_dismantle_req(struct io_kiocb *req);
Jens Axboe94ae5e72019-11-14 19:39:52 -07001092static void io_queue_linked_timeout(struct io_kiocb *req);
Pavel Begunkovfdecb662021-04-25 14:32:20 +01001093static int __io_register_rsrc_update(struct io_ring_ctx *ctx, unsigned type,
Pavel Begunkovc3bdad02021-04-25 14:32:22 +01001094 struct io_uring_rsrc_update2 *up,
Pavel Begunkov98f0b3b2021-04-25 14:32:19 +01001095 unsigned nr_args);
Pavel Begunkov68fb8972021-03-19 17:22:41 +00001096static void io_clean_op(struct io_kiocb *req);
Pavel Begunkovac177052021-08-09 13:04:02 +01001097static struct file *io_file_get(struct io_ring_ctx *ctx,
Pavel Begunkov8371adf2020-10-10 18:34:08 +01001098 struct io_kiocb *req, int fd, bool fixed);
Pavel Begunkovc5eef2b2021-02-10 00:03:22 +00001099static void __io_queue_sqe(struct io_kiocb *req);
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00001100static void io_rsrc_put_work(struct work_struct *work);
Jens Axboede0617e2019-04-06 21:51:27 -06001101
Pavel Begunkov907d1df2021-01-26 23:35:10 +00001102static void io_req_task_queue(struct io_kiocb *req);
Pavel Begunkov2a2758f2021-06-17 18:14:00 +01001103static void io_submit_flush_completions(struct io_ring_ctx *ctx);
Pavel Begunkov179ae0d2021-02-28 22:35:20 +00001104static int io_req_prep_async(struct io_kiocb *req);
Jens Axboe9a56a232019-01-09 09:06:50 -07001105
Pavel Begunkovb9445592021-08-25 12:25:45 +01001106static int io_install_fixed_file(struct io_kiocb *req, struct file *file,
1107 unsigned int issue_flags, u32 slot_index);
Pavel Begunkov7df778b2021-09-24 20:04:29 +01001108static int io_close_fixed(struct io_kiocb *req, unsigned int issue_flags);
1109
Pavel Begunkovf1042b62021-08-28 19:54:39 -06001110static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer);
Pavel Begunkovb9445592021-08-25 12:25:45 +01001111
Jens Axboe2b188cc2019-01-07 10:46:33 -07001112static struct kmem_cache *req_cachep;
1113
Jens Axboe09186822020-10-13 15:01:40 -06001114static const struct file_operations io_uring_fops;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001115
1116struct sock *io_uring_get_socket(struct file *file)
1117{
1118#if defined(CONFIG_UNIX)
1119 if (file->f_op == &io_uring_fops) {
1120 struct io_ring_ctx *ctx = file->private_data;
1121
1122 return ctx->ring_sock->sk;
1123 }
1124#endif
1125 return NULL;
1126}
1127EXPORT_SYMBOL(io_uring_get_socket);
1128
Pavel Begunkovf237c302021-08-18 12:42:46 +01001129static inline void io_tw_lock(struct io_ring_ctx *ctx, bool *locked)
1130{
1131 if (!*locked) {
1132 mutex_lock(&ctx->uring_lock);
1133 *locked = true;
1134 }
1135}
1136
Pavel Begunkovf2f87372020-10-27 23:25:37 +00001137#define io_for_each_link(pos, head) \
1138 for (pos = (head); pos; pos = pos->link)
1139
Pavel Begunkov21c843d2021-08-11 19:28:27 +01001140/*
1141 * Shamelessly stolen from the mm implementation of page reference checking,
1142 * see commit f958d7b528b1 for details.
1143 */
1144#define req_ref_zero_or_close_to_overflow(req) \
1145 ((unsigned int) atomic_read(&(req->refs)) + 127u <= 127u)
1146
1147static inline bool req_ref_inc_not_zero(struct io_kiocb *req)
1148{
Pavel Begunkov20e60a32021-08-11 19:28:30 +01001149 WARN_ON_ONCE(!(req->flags & REQ_F_REFCOUNT));
Pavel Begunkov21c843d2021-08-11 19:28:27 +01001150 return atomic_inc_not_zero(&req->refs);
1151}
1152
Pavel Begunkov21c843d2021-08-11 19:28:27 +01001153static inline bool req_ref_put_and_test(struct io_kiocb *req)
1154{
Pavel Begunkov20e60a32021-08-11 19:28:30 +01001155 if (likely(!(req->flags & REQ_F_REFCOUNT)))
1156 return true;
1157
Pavel Begunkov21c843d2021-08-11 19:28:27 +01001158 WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req));
1159 return atomic_dec_and_test(&req->refs);
1160}
1161
Pavel Begunkov21c843d2021-08-11 19:28:27 +01001162static inline void req_ref_get(struct io_kiocb *req)
1163{
Pavel Begunkov20e60a32021-08-11 19:28:30 +01001164 WARN_ON_ONCE(!(req->flags & REQ_F_REFCOUNT));
Pavel Begunkov21c843d2021-08-11 19:28:27 +01001165 WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req));
1166 atomic_inc(&req->refs);
1167}
1168
Pavel Begunkov48dcd382021-08-15 10:40:18 +01001169static inline void __io_req_set_refcount(struct io_kiocb *req, int nr)
Pavel Begunkov20e60a32021-08-11 19:28:30 +01001170{
1171 if (!(req->flags & REQ_F_REFCOUNT)) {
1172 req->flags |= REQ_F_REFCOUNT;
Pavel Begunkov48dcd382021-08-15 10:40:18 +01001173 atomic_set(&req->refs, nr);
Pavel Begunkov20e60a32021-08-11 19:28:30 +01001174 }
1175}
1176
Pavel Begunkov48dcd382021-08-15 10:40:18 +01001177static inline void io_req_set_refcount(struct io_kiocb *req)
1178{
1179 __io_req_set_refcount(req, 1);
1180}
1181
Pavel Begunkovb895c9a2021-04-01 15:43:40 +01001182static inline void io_req_set_rsrc_node(struct io_kiocb *req)
Jens Axboec40f6372020-06-25 15:39:59 -06001183{
Pavel Begunkov36f72fe2020-11-18 19:57:26 +00001184 struct io_ring_ctx *ctx = req->ctx;
1185
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00001186 if (!req->fixed_rsrc_refs) {
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01001187 req->fixed_rsrc_refs = &ctx->rsrc_node->refs;
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00001188 percpu_ref_get(req->fixed_rsrc_refs);
Pavel Begunkov36f72fe2020-11-18 19:57:26 +00001189 }
1190}
1191
Pavel Begunkovf70865d2021-04-11 01:46:40 +01001192static void io_refs_resurrect(struct percpu_ref *ref, struct completion *compl)
1193{
1194 bool got = percpu_ref_tryget(ref);
1195
1196 /* already at zero, wait for ->release() */
1197 if (!got)
1198 wait_for_completion(compl);
1199 percpu_ref_resurrect(ref);
1200 if (got)
1201 percpu_ref_put(ref);
1202}
1203
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01001204static bool io_match_task(struct io_kiocb *head, struct task_struct *task,
1205 bool cancel_all)
Pavel Begunkov1c939a52021-11-26 14:38:15 +00001206 __must_hold(&req->ctx->timeout_lock)
Pavel Begunkov08d23632020-11-06 13:00:22 +00001207{
1208 struct io_kiocb *req;
1209
Pavel Begunkov68207682021-03-22 01:58:25 +00001210 if (task && head->task != task)
Pavel Begunkov08d23632020-11-06 13:00:22 +00001211 return false;
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01001212 if (cancel_all)
Pavel Begunkov08d23632020-11-06 13:00:22 +00001213 return true;
1214
1215 io_for_each_link(req, head) {
Pavel Begunkovb05a1bc2021-03-04 13:59:24 +00001216 if (req->flags & REQ_F_INFLIGHT)
Jens Axboe02a13672021-01-23 15:49:31 -07001217 return true;
Pavel Begunkov08d23632020-11-06 13:00:22 +00001218 }
1219 return false;
1220}
1221
Pavel Begunkov1c939a52021-11-26 14:38:15 +00001222static bool io_match_linked(struct io_kiocb *head)
1223{
1224 struct io_kiocb *req;
1225
1226 io_for_each_link(req, head) {
1227 if (req->flags & REQ_F_INFLIGHT)
1228 return true;
1229 }
1230 return false;
1231}
1232
1233/*
1234 * As io_match_task() but protected against racing with linked timeouts.
1235 * User must not hold timeout_lock.
1236 */
1237static bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task,
1238 bool cancel_all)
1239{
1240 bool matched;
1241
1242 if (task && head->task != task)
1243 return false;
1244 if (cancel_all)
1245 return true;
1246
1247 if (head->flags & REQ_F_LINK_TIMEOUT) {
1248 struct io_ring_ctx *ctx = head->ctx;
1249
1250 /* protect against races with linked timeouts */
1251 spin_lock_irq(&ctx->timeout_lock);
1252 matched = io_match_linked(head);
1253 spin_unlock_irq(&ctx->timeout_lock);
1254 } else {
1255 matched = io_match_linked(head);
1256 }
1257 return matched;
1258}
1259
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01001260static inline void req_set_fail(struct io_kiocb *req)
Jens Axboec40f6372020-06-25 15:39:59 -06001261{
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01001262 req->flags |= REQ_F_FAIL;
Jens Axboec40f6372020-06-25 15:39:59 -06001263}
Jens Axboe4a38aed22020-05-14 17:21:15 -06001264
Hao Xua8295b92021-08-27 17:46:09 +08001265static inline void req_fail_link_node(struct io_kiocb *req, int res)
1266{
1267 req_set_fail(req);
1268 req->result = res;
1269}
1270
Jens Axboe2b188cc2019-01-07 10:46:33 -07001271static void io_ring_ctx_ref_free(struct percpu_ref *ref)
1272{
1273 struct io_ring_ctx *ctx = container_of(ref, struct io_ring_ctx, refs);
1274
Jens Axboe0f158b42020-05-14 17:18:39 -06001275 complete(&ctx->ref_comp);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001276}
1277
Pavel Begunkov8eb7e2d2020-06-29 13:13:02 +03001278static inline bool io_is_timeout_noseq(struct io_kiocb *req)
1279{
1280 return !req->timeout.off;
1281}
1282
Pavel Begunkovf56165e2021-08-09 20:18:07 +01001283static void io_fallback_req_func(struct work_struct *work)
1284{
1285 struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx,
1286 fallback_work.work);
1287 struct llist_node *node = llist_del_all(&ctx->fallback_llist);
1288 struct io_kiocb *req, *tmp;
Pavel Begunkovf237c302021-08-18 12:42:46 +01001289 bool locked = false;
Pavel Begunkovf56165e2021-08-09 20:18:07 +01001290
1291 percpu_ref_get(&ctx->refs);
1292 llist_for_each_entry_safe(req, tmp, node, io_task_work.fallback_node)
Pavel Begunkovf237c302021-08-18 12:42:46 +01001293 req->io_task_work.func(req, &locked);
Pavel Begunkov5636c002021-08-18 12:42:45 +01001294
Pavel Begunkovf237c302021-08-18 12:42:46 +01001295 if (locked) {
1296 if (ctx->submit_state.compl_nr)
1297 io_submit_flush_completions(ctx);
1298 mutex_unlock(&ctx->uring_lock);
1299 }
Pavel Begunkovf56165e2021-08-09 20:18:07 +01001300 percpu_ref_put(&ctx->refs);
Pavel Begunkovf237c302021-08-18 12:42:46 +01001301
Pavel Begunkovf56165e2021-08-09 20:18:07 +01001302}
1303
Jens Axboe2b188cc2019-01-07 10:46:33 -07001304static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
1305{
1306 struct io_ring_ctx *ctx;
Jens Axboe78076bb2019-12-04 19:56:40 -07001307 int hash_bits;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001308
1309 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
1310 if (!ctx)
1311 return NULL;
1312
Jens Axboe78076bb2019-12-04 19:56:40 -07001313 /*
1314 * Use 5 bits less than the max cq entries, that should give us around
1315 * 32 entries per hash list if totally full and uniformly spread.
1316 */
1317 hash_bits = ilog2(p->cq_entries);
1318 hash_bits -= 5;
1319 if (hash_bits <= 0)
1320 hash_bits = 1;
1321 ctx->cancel_hash_bits = hash_bits;
1322 ctx->cancel_hash = kmalloc((1U << hash_bits) * sizeof(struct hlist_head),
1323 GFP_KERNEL);
1324 if (!ctx->cancel_hash)
1325 goto err;
1326 __hash_init(ctx->cancel_hash, 1U << hash_bits);
1327
Pavel Begunkov62248432021-04-28 13:11:29 +01001328 ctx->dummy_ubuf = kzalloc(sizeof(*ctx->dummy_ubuf), GFP_KERNEL);
1329 if (!ctx->dummy_ubuf)
1330 goto err;
1331 /* set invalid range, so io_import_fixed() fails meeting it */
1332 ctx->dummy_ubuf->ubuf = -1UL;
1333
Roman Gushchin21482892019-05-07 10:01:48 -07001334 if (percpu_ref_init(&ctx->refs, io_ring_ctx_ref_free,
Jens Axboe206aefd2019-11-07 18:27:42 -07001335 PERCPU_REF_ALLOW_REINIT, GFP_KERNEL))
1336 goto err;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001337
1338 ctx->flags = p->flags;
Jens Axboe90554202020-09-03 12:12:41 -06001339 init_waitqueue_head(&ctx->sqo_sq_wait);
Jens Axboe69fb2132020-09-14 11:16:23 -06001340 INIT_LIST_HEAD(&ctx->sqd_list);
Pavel Begunkov311997b2021-06-14 23:37:28 +01001341 init_waitqueue_head(&ctx->poll_wait);
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001342 INIT_LIST_HEAD(&ctx->cq_overflow_list);
Jens Axboe0f158b42020-05-14 17:18:39 -06001343 init_completion(&ctx->ref_comp);
Jens Axboe9e15c3a2021-03-13 12:29:43 -07001344 xa_init_flags(&ctx->io_buffers, XA_FLAGS_ALLOC1);
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +00001345 xa_init_flags(&ctx->personalities, XA_FLAGS_ALLOC1);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001346 mutex_init(&ctx->uring_lock);
Pavel Begunkov311997b2021-06-14 23:37:28 +01001347 init_waitqueue_head(&ctx->cq_wait);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001348 spin_lock_init(&ctx->completion_lock);
Jens Axboe89850fc2021-08-10 15:11:51 -06001349 spin_lock_init(&ctx->timeout_lock);
Pavel Begunkov540e32a2020-07-13 23:37:09 +03001350 INIT_LIST_HEAD(&ctx->iopoll_list);
Jens Axboede0617e2019-04-06 21:51:27 -06001351 INIT_LIST_HEAD(&ctx->defer_list);
Jens Axboe5262f562019-09-17 12:26:57 -06001352 INIT_LIST_HEAD(&ctx->timeout_list);
Pavel Begunkovef9dd632021-08-28 19:54:38 -06001353 INIT_LIST_HEAD(&ctx->ltimeout_list);
Bijan Mottahedehd67d2262021-01-15 17:37:46 +00001354 spin_lock_init(&ctx->rsrc_ref_lock);
1355 INIT_LIST_HEAD(&ctx->rsrc_ref_list);
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00001356 INIT_DELAYED_WORK(&ctx->rsrc_put_work, io_rsrc_put_work);
1357 init_llist_head(&ctx->rsrc_put_llist);
Pavel Begunkov13bf43f2021-03-06 11:02:12 +00001358 INIT_LIST_HEAD(&ctx->tctx_list);
Pavel Begunkovcd0ca2e2021-08-09 20:18:11 +01001359 INIT_LIST_HEAD(&ctx->submit_state.free_list);
Pavel Begunkovd0acdee2021-05-16 22:58:12 +01001360 INIT_LIST_HEAD(&ctx->locked_free_list);
Pavel Begunkov9011bf92021-06-30 21:54:03 +01001361 INIT_DELAYED_WORK(&ctx->fallback_work, io_fallback_req_func);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001362 return ctx;
Jens Axboe206aefd2019-11-07 18:27:42 -07001363err:
Pavel Begunkov62248432021-04-28 13:11:29 +01001364 kfree(ctx->dummy_ubuf);
Jens Axboe78076bb2019-12-04 19:56:40 -07001365 kfree(ctx->cancel_hash);
Jens Axboe206aefd2019-11-07 18:27:42 -07001366 kfree(ctx);
1367 return NULL;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001368}
1369
Pavel Begunkov8f6ed492021-05-16 22:58:10 +01001370static void io_account_cq_overflow(struct io_ring_ctx *ctx)
1371{
1372 struct io_rings *r = ctx->rings;
1373
1374 WRITE_ONCE(r->cq_overflow, READ_ONCE(r->cq_overflow) + 1);
1375 ctx->cq_extra--;
1376}
1377
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03001378static bool req_need_defer(struct io_kiocb *req, u32 seq)
Jens Axboede0617e2019-04-06 21:51:27 -06001379{
Jens Axboe2bc99302020-07-09 09:43:27 -06001380 if (unlikely(req->flags & REQ_F_IO_DRAIN)) {
1381 struct io_ring_ctx *ctx = req->ctx;
Jackie Liua197f662019-11-08 08:09:12 -07001382
Pavel Begunkov8f6ed492021-05-16 22:58:10 +01001383 return seq + READ_ONCE(ctx->cq_extra) != ctx->cached_cq_tail;
Jens Axboe2bc99302020-07-09 09:43:27 -06001384 }
Jens Axboe7adf4ea2019-10-10 21:42:58 -06001385
Bob Liu9d858b22019-11-13 18:06:25 +08001386 return false;
Jens Axboe7adf4ea2019-10-10 21:42:58 -06001387}
1388
Pavel Begunkovc97d8a02021-08-09 13:04:04 +01001389#define FFS_ASYNC_READ 0x1UL
1390#define FFS_ASYNC_WRITE 0x2UL
1391#ifdef CONFIG_64BIT
1392#define FFS_ISREG 0x4UL
1393#else
1394#define FFS_ISREG 0x0UL
1395#endif
1396#define FFS_MASK ~(FFS_ASYNC_READ|FFS_ASYNC_WRITE|FFS_ISREG)
1397
1398static inline bool io_req_ffs_set(struct io_kiocb *req)
1399{
1400 return IS_ENABLED(CONFIG_64BIT) && (req->flags & REQ_F_FIXED_FILE);
1401}
1402
Pavel Begunkovce3d5aa2021-02-01 18:59:55 +00001403static void io_req_track_inflight(struct io_kiocb *req)
1404{
Pavel Begunkovce3d5aa2021-02-01 18:59:55 +00001405 if (!(req->flags & REQ_F_INFLIGHT)) {
Pavel Begunkovce3d5aa2021-02-01 18:59:55 +00001406 req->flags |= REQ_F_INFLIGHT;
Jens Axboe3746d622022-06-23 11:06:43 -06001407 atomic_inc(&req->task->io_uring->inflight_tracked);
Pavel Begunkovce3d5aa2021-02-01 18:59:55 +00001408 }
1409}
1410
Pavel Begunkovfd08e532021-08-11 19:28:31 +01001411static struct io_kiocb *__io_prep_linked_timeout(struct io_kiocb *req)
1412{
Pavel Begunkov906c6ca2021-08-15 10:40:26 +01001413 if (WARN_ON_ONCE(!req->link))
1414 return NULL;
1415
Pavel Begunkov4d13d1a2021-08-15 10:40:24 +01001416 req->flags &= ~REQ_F_ARM_LTIMEOUT;
1417 req->flags |= REQ_F_LINK_TIMEOUT;
Pavel Begunkovfd08e532021-08-11 19:28:31 +01001418
1419 /* linked timeouts should have two refs once prep'ed */
Pavel Begunkov48dcd382021-08-15 10:40:18 +01001420 io_req_set_refcount(req);
Pavel Begunkov4d13d1a2021-08-15 10:40:24 +01001421 __io_req_set_refcount(req->link, 2);
1422 return req->link;
Pavel Begunkovfd08e532021-08-11 19:28:31 +01001423}
1424
1425static inline struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req)
1426{
Pavel Begunkov4d13d1a2021-08-15 10:40:24 +01001427 if (likely(!(req->flags & REQ_F_ARM_LTIMEOUT)))
Pavel Begunkovfd08e532021-08-11 19:28:31 +01001428 return NULL;
1429 return __io_prep_linked_timeout(req);
1430}
1431
Pavel Begunkovcbdcb432020-06-29 19:18:43 +03001432static void io_prep_async_work(struct io_kiocb *req)
Jens Axboe561fb042019-10-24 07:25:42 -06001433{
Jens Axboed3656342019-12-18 09:50:26 -07001434 const struct io_op_def *def = &io_op_defs[req->opcode];
Pavel Begunkov23329512020-10-10 18:34:06 +01001435 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe54a91f32019-09-10 09:15:04 -06001436
Pavel Begunkovb8e64b52021-06-17 18:14:02 +01001437 if (!(req->flags & REQ_F_CREDS)) {
1438 req->flags |= REQ_F_CREDS;
Pavel Begunkovc10d1f92021-06-17 18:14:01 +01001439 req->creds = get_current_cred();
Pavel Begunkovb8e64b52021-06-17 18:14:02 +01001440 }
Jens Axboe003e8dc2021-03-06 09:22:27 -07001441
Pavel Begunkove1d675d2021-03-22 01:58:29 +00001442 req->work.list.next = NULL;
1443 req->work.flags = 0;
Pavel Begunkovfeaadc42020-10-22 16:47:16 +01001444 if (req->flags & REQ_F_FORCE_ASYNC)
1445 req->work.flags |= IO_WQ_WORK_CONCURRENT;
1446
Jens Axboed3656342019-12-18 09:50:26 -07001447 if (req->flags & REQ_F_ISREG) {
Pavel Begunkov23329512020-10-10 18:34:06 +01001448 if (def->hash_reg_file || (ctx->flags & IORING_SETUP_IOPOLL))
Pavel Begunkov8766dd52020-03-14 00:31:04 +03001449 io_wq_hash_work(&req->work, file_inode(req->file));
Jens Axboe4b982bd2021-04-01 08:38:34 -06001450 } else if (!req->file || !S_ISBLK(file_inode(req->file)->i_mode)) {
Jens Axboed3656342019-12-18 09:50:26 -07001451 if (def->unbound_nonreg_file)
Jens Axboe3529d8c2019-12-19 18:24:38 -07001452 req->work.flags |= IO_WQ_WORK_UNBOUND;
Jens Axboe54a91f32019-09-10 09:15:04 -06001453 }
Jens Axboe561fb042019-10-24 07:25:42 -06001454}
1455
Pavel Begunkovcbdcb432020-06-29 19:18:43 +03001456static void io_prep_async_link(struct io_kiocb *req)
1457{
1458 struct io_kiocb *cur;
1459
Pavel Begunkov44eff402021-07-26 14:14:31 +01001460 if (req->flags & REQ_F_LINK_TIMEOUT) {
1461 struct io_ring_ctx *ctx = req->ctx;
1462
Pavel Begunkov09eb40f2021-11-23 01:45:35 +00001463 spin_lock_irq(&ctx->timeout_lock);
Pavel Begunkov44eff402021-07-26 14:14:31 +01001464 io_for_each_link(cur, req)
1465 io_prep_async_work(cur);
Pavel Begunkov09eb40f2021-11-23 01:45:35 +00001466 spin_unlock_irq(&ctx->timeout_lock);
Pavel Begunkov44eff402021-07-26 14:14:31 +01001467 } else {
1468 io_for_each_link(cur, req)
1469 io_prep_async_work(cur);
1470 }
Pavel Begunkovcbdcb432020-06-29 19:18:43 +03001471}
1472
Pavel Begunkovf237c302021-08-18 12:42:46 +01001473static void io_queue_async_work(struct io_kiocb *req, bool *locked)
Jens Axboe561fb042019-10-24 07:25:42 -06001474{
Jackie Liua197f662019-11-08 08:09:12 -07001475 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkovcbdcb432020-06-29 19:18:43 +03001476 struct io_kiocb *link = io_prep_linked_timeout(req);
Jens Axboe5aa75ed2021-02-16 12:56:50 -07001477 struct io_uring_task *tctx = req->task->io_uring;
Jens Axboe561fb042019-10-24 07:25:42 -06001478
Pavel Begunkovf237c302021-08-18 12:42:46 +01001479 /* must not take the lock, NULL it as a precaution */
1480 locked = NULL;
1481
Jens Axboe3bfe6102021-02-16 14:15:30 -07001482 BUG_ON(!tctx);
1483 BUG_ON(!tctx->io_wq);
Jens Axboe561fb042019-10-24 07:25:42 -06001484
Pavel Begunkovcbdcb432020-06-29 19:18:43 +03001485 /* init ->work of the whole link before punting */
1486 io_prep_async_link(req);
Jens Axboe991468d2021-07-23 11:53:54 -06001487
1488 /*
1489 * Not expected to happen, but if we do have a bug where this _can_
1490 * happen, catch it here and ensure the request is marked as
1491 * canceled. That will make io-wq go through the usual work cancel
1492 * procedure rather than attempt to run this request (or create a new
1493 * worker for it).
1494 */
1495 if (WARN_ON_ONCE(!same_thread_group(req->task, current)))
1496 req->work.flags |= IO_WQ_WORK_CANCEL;
1497
Pavel Begunkovd07f1e8a2021-03-22 01:45:58 +00001498 trace_io_uring_queue_async_work(ctx, io_wq_is_hashed(&req->work), req,
1499 &req->work, req->flags);
Pavel Begunkovebf93662021-03-01 18:20:47 +00001500 io_wq_enqueue(tctx->io_wq, &req->work);
Jens Axboe7271ef32020-08-10 09:55:22 -06001501 if (link)
1502 io_queue_linked_timeout(link);
Pavel Begunkovcbdcb432020-06-29 19:18:43 +03001503}
1504
Pavel Begunkov1ee41602021-03-25 18:32:42 +00001505static void io_kill_timeout(struct io_kiocb *req, int status)
Pavel Begunkov8c855882021-04-13 02:58:41 +01001506 __must_hold(&req->ctx->completion_lock)
Jens Axboe89850fc2021-08-10 15:11:51 -06001507 __must_hold(&req->ctx->timeout_lock)
Jens Axboe5262f562019-09-17 12:26:57 -06001508{
Jens Axboee8c2bc12020-08-15 18:44:09 -07001509 struct io_timeout_data *io = req->async_data;
Jens Axboe5262f562019-09-17 12:26:57 -06001510
Pavel Begunkovfd9c7bc2021-04-13 02:58:42 +01001511 if (hrtimer_try_to_cancel(&io->timer) != -1) {
Pavel Begunkov2ae2eb92021-09-09 13:56:27 +01001512 if (status)
1513 req_set_fail(req);
Pavel Begunkov01cec8c2020-07-30 18:43:50 +03001514 atomic_set(&req->ctx->cq_timeouts,
1515 atomic_read(&req->ctx->cq_timeouts) + 1);
Pavel Begunkov135fcde2020-07-13 23:37:12 +03001516 list_del_init(&req->timeout.list);
Pavel Begunkovb850d6d2022-08-29 14:30:13 +01001517 io_fill_cqe_req(req, status, 0);
Pavel Begunkov91c2f692021-08-11 19:28:28 +01001518 io_put_req_deferred(req);
Jens Axboe5262f562019-09-17 12:26:57 -06001519 }
1520}
1521
Pavel Begunkov441b8a72021-06-14 23:37:31 +01001522static void io_queue_deferred(struct io_ring_ctx *ctx)
Pavel Begunkov04518942020-05-26 20:34:05 +03001523{
Pavel Begunkov441b8a72021-06-14 23:37:31 +01001524 while (!list_empty(&ctx->defer_list)) {
Pavel Begunkov27dc8332020-07-13 23:37:14 +03001525 struct io_defer_entry *de = list_first_entry(&ctx->defer_list,
1526 struct io_defer_entry, list);
Pavel Begunkov04518942020-05-26 20:34:05 +03001527
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03001528 if (req_need_defer(de->req, de->seq))
Pavel Begunkov04518942020-05-26 20:34:05 +03001529 break;
Pavel Begunkov27dc8332020-07-13 23:37:14 +03001530 list_del_init(&de->list);
Pavel Begunkov907d1df2021-01-26 23:35:10 +00001531 io_req_task_queue(de->req);
Pavel Begunkov27dc8332020-07-13 23:37:14 +03001532 kfree(de);
Pavel Begunkov441b8a72021-06-14 23:37:31 +01001533 }
Pavel Begunkov04518942020-05-26 20:34:05 +03001534}
1535
Pavel Begunkov360428f2020-05-30 14:54:17 +03001536static void io_flush_timeouts(struct io_ring_ctx *ctx)
Jens Axboe89850fc2021-08-10 15:11:51 -06001537 __must_hold(&ctx->completion_lock)
Pavel Begunkov360428f2020-05-30 14:54:17 +03001538{
Pavel Begunkov441b8a72021-06-14 23:37:31 +01001539 u32 seq = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
Jens Axboeba7261a2022-04-08 11:08:58 -06001540 struct io_kiocb *req, *tmp;
Marcelo Diop-Gonzalezf0105052021-01-15 11:54:40 -05001541
Jens Axboe79ebeae2021-08-10 15:18:27 -06001542 spin_lock_irq(&ctx->timeout_lock);
Jens Axboeba7261a2022-04-08 11:08:58 -06001543 list_for_each_entry_safe(req, tmp, &ctx->timeout_list, timeout.list) {
Marcelo Diop-Gonzalezf0105052021-01-15 11:54:40 -05001544 u32 events_needed, events_got;
Pavel Begunkov360428f2020-05-30 14:54:17 +03001545
Pavel Begunkov8eb7e2d2020-06-29 13:13:02 +03001546 if (io_is_timeout_noseq(req))
Pavel Begunkov360428f2020-05-30 14:54:17 +03001547 break;
Marcelo Diop-Gonzalezf0105052021-01-15 11:54:40 -05001548
1549 /*
1550 * Since seq can easily wrap around over time, subtract
1551 * the last seq at which timeouts were flushed before comparing.
1552 * Assuming not more than 2^31-1 events have happened since,
1553 * these subtractions won't have wrapped, so we can check if
1554 * target is in [last_seq, current_seq] by comparing the two.
1555 */
1556 events_needed = req->timeout.target_seq - ctx->cq_last_tm_flush;
1557 events_got = seq - ctx->cq_last_tm_flush;
1558 if (events_got < events_needed)
Pavel Begunkov360428f2020-05-30 14:54:17 +03001559 break;
Pavel Begunkovbfe68a22020-05-30 14:54:18 +03001560
Pavel Begunkov1ee41602021-03-25 18:32:42 +00001561 io_kill_timeout(req, 0);
Pavel Begunkovf18ee4c2021-06-14 23:37:25 +01001562 }
Marcelo Diop-Gonzalezf0105052021-01-15 11:54:40 -05001563 ctx->cq_last_tm_flush = seq;
Jens Axboe79ebeae2021-08-10 15:18:27 -06001564 spin_unlock_irq(&ctx->timeout_lock);
Pavel Begunkov360428f2020-05-30 14:54:17 +03001565}
1566
Pavel Begunkov2335f6f2021-06-15 16:47:58 +01001567static void __io_commit_cqring_flush(struct io_ring_ctx *ctx)
Jens Axboede0617e2019-04-06 21:51:27 -06001568{
Pavel Begunkov2335f6f2021-06-15 16:47:58 +01001569 if (ctx->off_timeout_used)
1570 io_flush_timeouts(ctx);
1571 if (ctx->drain_active)
1572 io_queue_deferred(ctx);
1573}
1574
1575static inline void io_commit_cqring(struct io_ring_ctx *ctx)
1576{
1577 if (unlikely(ctx->off_timeout_used || ctx->drain_active))
1578 __io_commit_cqring_flush(ctx);
Pavel Begunkovec30e042021-01-19 13:32:38 +00001579 /* order cqe stores with ring update */
1580 smp_store_release(&ctx->rings->cq.tail, ctx->cached_cq_tail);
Jens Axboede0617e2019-04-06 21:51:27 -06001581}
1582
Jens Axboe90554202020-09-03 12:12:41 -06001583static inline bool io_sqring_full(struct io_ring_ctx *ctx)
1584{
1585 struct io_rings *r = ctx->rings;
1586
Pavel Begunkova566c552021-05-16 22:58:08 +01001587 return READ_ONCE(r->sq.tail) - ctx->cached_sq_head == ctx->sq_entries;
Jens Axboe90554202020-09-03 12:12:41 -06001588}
1589
Pavel Begunkov888aae22021-01-19 13:32:39 +00001590static inline unsigned int __io_cqring_events(struct io_ring_ctx *ctx)
1591{
1592 return ctx->cached_cq_tail - READ_ONCE(ctx->rings->cq.head);
1593}
1594
Pavel Begunkovd068b502021-05-16 22:58:11 +01001595static inline struct io_uring_cqe *io_get_cqe(struct io_ring_ctx *ctx)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001596{
Hristo Venev75b28af2019-08-26 17:23:46 +00001597 struct io_rings *rings = ctx->rings;
Pavel Begunkovea5ab3b2021-05-16 22:58:09 +01001598 unsigned tail, mask = ctx->cq_entries - 1;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001599
Stefan Bühler115e12e2019-04-24 23:54:18 +02001600 /*
1601 * writes to the cq entry need to come after reading head; the
1602 * control dependency is enough as we're using WRITE_ONCE to
1603 * fill the cq entry
1604 */
Pavel Begunkova566c552021-05-16 22:58:08 +01001605 if (__io_cqring_events(ctx) == ctx->cq_entries)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001606 return NULL;
1607
Pavel Begunkov888aae22021-01-19 13:32:39 +00001608 tail = ctx->cached_cq_tail++;
Pavel Begunkovea5ab3b2021-05-16 22:58:09 +01001609 return &rings->cqes[tail & mask];
Jens Axboe2b188cc2019-01-07 10:46:33 -07001610}
1611
Jens Axboef2842ab2020-01-08 11:04:00 -07001612static inline bool io_should_trigger_evfd(struct io_ring_ctx *ctx)
1613{
Pavel Begunkov44c769d2021-04-11 01:46:31 +01001614 if (likely(!ctx->cq_ev_fd))
Jens Axboef0b493e2020-02-01 21:30:11 -07001615 return false;
Stefano Garzarella7e55a192020-05-15 18:38:05 +02001616 if (READ_ONCE(ctx->rings->cq_flags) & IORING_CQ_EVENTFD_DISABLED)
1617 return false;
Pavel Begunkov44c769d2021-04-11 01:46:31 +01001618 return !ctx->eventfd_async || io_wq_current_is_worker();
Jens Axboef2842ab2020-01-08 11:04:00 -07001619}
1620
Jens Axboe2c5d7632021-08-21 07:21:19 -06001621/*
1622 * This should only get called when at least one event has been posted.
1623 * Some applications rely on the eventfd notification count only changing
1624 * IFF a new CQE has been added to the CQ ring. There's no depedency on
1625 * 1:1 relationship between how many times this function is called (and
1626 * hence the eventfd count) and number of CQEs posted to the CQ ring.
1627 */
Jens Axboeb41e9852020-02-17 09:52:41 -07001628static void io_cqring_ev_posted(struct io_ring_ctx *ctx)
Jens Axboe8c838782019-03-12 15:48:16 -06001629{
Jens Axboe5fd46172021-08-06 14:04:31 -06001630 /*
1631 * wake_up_all() may seem excessive, but io_wake_function() and
1632 * io_should_wake() handle the termination of the loop and only
1633 * wake as many waiters as we need to.
1634 */
1635 if (wq_has_sleeper(&ctx->cq_wait))
Jens Axboeccf06b52022-12-23 07:04:49 -07001636 __wake_up(&ctx->cq_wait, TASK_NORMAL, 0,
1637 poll_to_key(EPOLL_URING_WAKE | EPOLLIN));
Jens Axboe534ca6d2020-09-02 13:52:19 -06001638 if (ctx->sq_data && waitqueue_active(&ctx->sq_data->wait))
1639 wake_up(&ctx->sq_data->wait);
Jens Axboeb41e9852020-02-17 09:52:41 -07001640 if (io_should_trigger_evfd(ctx))
Jens Axboeccf06b52022-12-23 07:04:49 -07001641 eventfd_signal_mask(ctx->cq_ev_fd, 1, EPOLL_URING_WAKE);
Pavel Begunkov3f008382021-10-01 10:39:33 +01001642 if (waitqueue_active(&ctx->poll_wait))
Jens Axboeccf06b52022-12-23 07:04:49 -07001643 __wake_up(&ctx->poll_wait, TASK_INTERRUPTIBLE, 0,
1644 poll_to_key(EPOLL_URING_WAKE | EPOLLIN));
Jens Axboe8c838782019-03-12 15:48:16 -06001645}
1646
Pavel Begunkov80c18e42021-01-07 03:15:41 +00001647static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx)
1648{
Pavel Begunkovc57a91fb2021-09-08 20:49:17 +01001649 /* see waitqueue_active() comment */
1650 smp_mb();
1651
Pavel Begunkov80c18e42021-01-07 03:15:41 +00001652 if (ctx->flags & IORING_SETUP_SQPOLL) {
Pavel Begunkovc57a91fb2021-09-08 20:49:17 +01001653 if (waitqueue_active(&ctx->cq_wait))
Jens Axboeccf06b52022-12-23 07:04:49 -07001654 __wake_up(&ctx->cq_wait, TASK_NORMAL, 0,
1655 poll_to_key(EPOLL_URING_WAKE | EPOLLIN));
Pavel Begunkov80c18e42021-01-07 03:15:41 +00001656 }
1657 if (io_should_trigger_evfd(ctx))
Jens Axboeccf06b52022-12-23 07:04:49 -07001658 eventfd_signal_mask(ctx->cq_ev_fd, 1, EPOLL_URING_WAKE);
Pavel Begunkov3f008382021-10-01 10:39:33 +01001659 if (waitqueue_active(&ctx->poll_wait))
Jens Axboeccf06b52022-12-23 07:04:49 -07001660 __wake_up(&ctx->poll_wait, TASK_INTERRUPTIBLE, 0,
1661 poll_to_key(EPOLL_URING_WAKE | EPOLLIN));
Pavel Begunkov80c18e42021-01-07 03:15:41 +00001662}
1663
Jens Axboec4a2ed72019-11-21 21:01:26 -07001664/* Returns true if there are no backlogged entries after the flush */
Pavel Begunkov6c2450a2021-02-23 12:40:22 +00001665static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001666{
Jens Axboeb18032b2021-01-24 16:58:56 -07001667 bool all_flushed, posted;
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001668
Pavel Begunkova566c552021-05-16 22:58:08 +01001669 if (!force && __io_cqring_events(ctx) == ctx->cq_entries)
Pavel Begunkove23de152020-12-17 00:24:37 +00001670 return false;
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001671
Jens Axboeb18032b2021-01-24 16:58:56 -07001672 posted = false;
Jens Axboe79ebeae2021-08-10 15:18:27 -06001673 spin_lock(&ctx->completion_lock);
Pavel Begunkov6c2450a2021-02-23 12:40:22 +00001674 while (!list_empty(&ctx->cq_overflow_list)) {
Pavel Begunkovd068b502021-05-16 22:58:11 +01001675 struct io_uring_cqe *cqe = io_get_cqe(ctx);
Pavel Begunkov6c2450a2021-02-23 12:40:22 +00001676 struct io_overflow_cqe *ocqe;
Jens Axboee6c8aa92020-09-28 13:10:13 -06001677
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001678 if (!cqe && !force)
1679 break;
Pavel Begunkov6c2450a2021-02-23 12:40:22 +00001680 ocqe = list_first_entry(&ctx->cq_overflow_list,
1681 struct io_overflow_cqe, list);
1682 if (cqe)
1683 memcpy(cqe, &ocqe->cqe, sizeof(*cqe));
1684 else
Pavel Begunkov8f6ed492021-05-16 22:58:10 +01001685 io_account_cq_overflow(ctx);
1686
Jens Axboeb18032b2021-01-24 16:58:56 -07001687 posted = true;
Pavel Begunkov6c2450a2021-02-23 12:40:22 +00001688 list_del(&ocqe->list);
1689 kfree(ocqe);
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001690 }
1691
Pavel Begunkov09e88402020-12-17 00:24:38 +00001692 all_flushed = list_empty(&ctx->cq_overflow_list);
1693 if (all_flushed) {
Pavel Begunkov5ed7a372021-06-14 23:37:27 +01001694 clear_bit(0, &ctx->check_cq_overflow);
Nadav Amit20c0b382021-08-07 17:13:42 -07001695 WRITE_ONCE(ctx->rings->sq_flags,
1696 ctx->rings->sq_flags & ~IORING_SQ_CQ_OVERFLOW);
Pavel Begunkov09e88402020-12-17 00:24:38 +00001697 }
Pavel Begunkov46930142020-07-30 18:43:49 +03001698
Jens Axboeb18032b2021-01-24 16:58:56 -07001699 if (posted)
1700 io_commit_cqring(ctx);
Jens Axboe79ebeae2021-08-10 15:18:27 -06001701 spin_unlock(&ctx->completion_lock);
Jens Axboeb18032b2021-01-24 16:58:56 -07001702 if (posted)
1703 io_cqring_ev_posted(ctx);
Pavel Begunkov09e88402020-12-17 00:24:38 +00001704 return all_flushed;
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001705}
1706
Pavel Begunkov90f67362021-08-09 20:18:12 +01001707static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx)
Pavel Begunkov6c503152021-01-04 20:36:36 +00001708{
Jens Axboeca0a2652021-03-04 17:15:48 -07001709 bool ret = true;
1710
Pavel Begunkov5ed7a372021-06-14 23:37:27 +01001711 if (test_bit(0, &ctx->check_cq_overflow)) {
Pavel Begunkov6c503152021-01-04 20:36:36 +00001712 /* iopoll syncs against uring_lock, not completion_lock */
1713 if (ctx->flags & IORING_SETUP_IOPOLL)
1714 mutex_lock(&ctx->uring_lock);
Pavel Begunkov90f67362021-08-09 20:18:12 +01001715 ret = __io_cqring_overflow_flush(ctx, false);
Pavel Begunkov6c503152021-01-04 20:36:36 +00001716 if (ctx->flags & IORING_SETUP_IOPOLL)
1717 mutex_unlock(&ctx->uring_lock);
1718 }
Jens Axboeca0a2652021-03-04 17:15:48 -07001719
1720 return ret;
Pavel Begunkov6c503152021-01-04 20:36:36 +00001721}
1722
Pavel Begunkov6a290a12021-08-09 13:04:13 +01001723/* must to be called somewhat shortly after putting a request */
1724static inline void io_put_task(struct task_struct *task, int nr)
1725{
1726 struct io_uring_task *tctx = task->io_uring;
1727
Pavel Begunkove98e49b2021-08-18 17:01:43 +01001728 if (likely(task == current)) {
1729 tctx->cached_refs += nr;
1730 } else {
1731 percpu_counter_sub(&tctx->inflight, nr);
1732 if (unlikely(atomic_read(&tctx->in_idle)))
1733 wake_up(&tctx->wait);
1734 put_task_struct_many(task, nr);
1735 }
Pavel Begunkov6a290a12021-08-09 13:04:13 +01001736}
1737
Pavel Begunkov9a108672021-08-27 11:55:01 +01001738static void io_task_refs_refill(struct io_uring_task *tctx)
1739{
1740 unsigned int refill = -tctx->cached_refs + IO_TCTX_REFS_CACHE_NR;
1741
1742 percpu_counter_add(&tctx->inflight, refill);
1743 refcount_add(refill, &current->usage);
1744 tctx->cached_refs += refill;
1745}
1746
1747static inline void io_get_task_refs(int nr)
1748{
1749 struct io_uring_task *tctx = current->io_uring;
1750
1751 tctx->cached_refs -= nr;
1752 if (unlikely(tctx->cached_refs < 0))
1753 io_task_refs_refill(tctx);
1754}
1755
Pavel Begunkovb168b1a2022-01-09 00:53:22 +00001756static __cold void io_uring_drop_tctx_refs(struct task_struct *task)
1757{
1758 struct io_uring_task *tctx = task->io_uring;
1759 unsigned int refs = tctx->cached_refs;
1760
1761 if (refs) {
1762 tctx->cached_refs = 0;
1763 percpu_counter_sub(&tctx->inflight, refs);
1764 put_task_struct_many(task, refs);
1765 }
1766}
1767
Pavel Begunkovd4d19c12021-04-25 14:32:17 +01001768static bool io_cqring_event_overflow(struct io_ring_ctx *ctx, u64 user_data,
Pavel Begunkov5c0ea4c2022-08-29 14:30:12 +01001769 s32 res, u32 cflags)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001770{
Pavel Begunkovcce4b8b2021-04-13 02:58:44 +01001771 struct io_overflow_cqe *ocqe;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001772
Pavel Begunkovcce4b8b2021-04-13 02:58:44 +01001773 ocqe = kmalloc(sizeof(*ocqe), GFP_ATOMIC | __GFP_ACCOUNT);
1774 if (!ocqe) {
1775 /*
1776 * If we're in ring overflow flush mode, or in task cancel mode,
1777 * or cannot allocate an overflow entry, then we need to drop it
1778 * on the floor.
1779 */
Pavel Begunkov8f6ed492021-05-16 22:58:10 +01001780 io_account_cq_overflow(ctx);
Pavel Begunkovcce4b8b2021-04-13 02:58:44 +01001781 return false;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001782 }
Pavel Begunkovcce4b8b2021-04-13 02:58:44 +01001783 if (list_empty(&ctx->cq_overflow_list)) {
Pavel Begunkov5ed7a372021-06-14 23:37:27 +01001784 set_bit(0, &ctx->check_cq_overflow);
Nadav Amit20c0b382021-08-07 17:13:42 -07001785 WRITE_ONCE(ctx->rings->sq_flags,
1786 ctx->rings->sq_flags | IORING_SQ_CQ_OVERFLOW);
1787
Pavel Begunkovcce4b8b2021-04-13 02:58:44 +01001788 }
Pavel Begunkovd4d19c12021-04-25 14:32:17 +01001789 ocqe->cqe.user_data = user_data;
Pavel Begunkovcce4b8b2021-04-13 02:58:44 +01001790 ocqe->cqe.res = res;
1791 ocqe->cqe.flags = cflags;
1792 list_add_tail(&ocqe->list, &ctx->cq_overflow_list);
1793 return true;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001794}
1795
Pavel Begunkovb850d6d2022-08-29 14:30:13 +01001796static inline bool __io_fill_cqe(struct io_ring_ctx *ctx, u64 user_data,
1797 s32 res, u32 cflags)
Pavel Begunkov8d133262021-04-11 01:46:33 +01001798{
Jens Axboe2b188cc2019-01-07 10:46:33 -07001799 struct io_uring_cqe *cqe;
1800
Pavel Begunkovd4d19c12021-04-25 14:32:17 +01001801 trace_io_uring_complete(ctx, user_data, res, cflags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001802
1803 /*
1804 * If we can't get a cq entry, userspace overflowed the
1805 * submission (by quite a lot). Increment the overflow count in
1806 * the ring.
1807 */
Pavel Begunkovd068b502021-05-16 22:58:11 +01001808 cqe = io_get_cqe(ctx);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001809 if (likely(cqe)) {
Pavel Begunkovd4d19c12021-04-25 14:32:17 +01001810 WRITE_ONCE(cqe->user_data, user_data);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001811 WRITE_ONCE(cqe->res, res);
1812 WRITE_ONCE(cqe->flags, cflags);
Pavel Begunkov8d133262021-04-11 01:46:33 +01001813 return true;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001814 }
Pavel Begunkovd4d19c12021-04-25 14:32:17 +01001815 return io_cqring_event_overflow(ctx, user_data, res, cflags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001816}
1817
Pavel Begunkovb850d6d2022-08-29 14:30:13 +01001818static noinline void io_fill_cqe_req(struct io_kiocb *req, s32 res, u32 cflags)
Jens Axboebcda7ba2020-02-23 16:42:51 -07001819{
Pavel Begunkovb850d6d2022-08-29 14:30:13 +01001820 __io_fill_cqe(req->ctx, req->user_data, res, cflags);
1821}
1822
1823static noinline bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data,
1824 s32 res, u32 cflags)
1825{
1826 ctx->cq_extra++;
1827 return __io_fill_cqe(ctx, user_data, res, cflags);
Jens Axboebcda7ba2020-02-23 16:42:51 -07001828}
1829
Pavel Begunkov5c0ea4c2022-08-29 14:30:12 +01001830static void io_req_complete_post(struct io_kiocb *req, s32 res,
1831 u32 cflags)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001832{
Jens Axboe78e19bb2019-11-06 15:21:34 -07001833 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001834
Jens Axboe79ebeae2021-08-10 15:18:27 -06001835 spin_lock(&ctx->completion_lock);
Pavel Begunkovb850d6d2022-08-29 14:30:13 +01001836 __io_fill_cqe(ctx, req->user_data, res, cflags);
Jens Axboec7dae4b2021-02-09 19:53:37 -07001837 /*
1838 * If we're the last reference to this request, add to our locked
1839 * free_list cache.
1840 */
Jens Axboede9b4cc2021-02-24 13:28:27 -07001841 if (req_ref_put_and_test(req)) {
Pavel Begunkov7a612352021-03-09 00:37:59 +00001842 if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) {
Pavel Begunkov0756a862021-08-15 10:40:25 +01001843 if (req->flags & IO_DISARM_MASK)
Pavel Begunkov7a612352021-03-09 00:37:59 +00001844 io_disarm_next(req);
1845 if (req->link) {
1846 io_req_task_queue(req->link);
1847 req->link = NULL;
1848 }
1849 }
Jens Axboec7dae4b2021-02-09 19:53:37 -07001850 io_dismantle_req(req);
1851 io_put_task(req->task, 1);
Pavel Begunkovbb943b82021-08-09 20:18:10 +01001852 list_add(&req->inflight_entry, &ctx->locked_free_list);
Pavel Begunkovd0acdee2021-05-16 22:58:12 +01001853 ctx->locked_free_nr++;
Pavel Begunkov180f8292021-03-14 20:57:09 +00001854 } else {
1855 if (!percpu_ref_tryget(&ctx->refs))
1856 req = NULL;
1857 }
Pavel Begunkov7a612352021-03-09 00:37:59 +00001858 io_commit_cqring(ctx);
Jens Axboe79ebeae2021-08-10 15:18:27 -06001859 spin_unlock(&ctx->completion_lock);
Pavel Begunkov7a612352021-03-09 00:37:59 +00001860
Pavel Begunkov180f8292021-03-14 20:57:09 +00001861 if (req) {
1862 io_cqring_ev_posted(ctx);
Jens Axboec7dae4b2021-02-09 19:53:37 -07001863 percpu_ref_put(&ctx->refs);
Pavel Begunkov180f8292021-03-14 20:57:09 +00001864 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07001865}
1866
Jens Axboe4e3d9ff2021-04-15 17:44:34 -06001867static inline bool io_req_needs_clean(struct io_kiocb *req)
1868{
Pavel Begunkovc8543572021-06-17 18:14:04 +01001869 return req->flags & IO_REQ_CLEAN_FLAGS;
Jens Axboe4e3d9ff2021-04-15 17:44:34 -06001870}
1871
Pavel Begunkov5c0ea4c2022-08-29 14:30:12 +01001872static inline void io_req_complete_state(struct io_kiocb *req, s32 res,
1873 u32 cflags)
Jens Axboebcda7ba2020-02-23 16:42:51 -07001874{
Jens Axboe4e3d9ff2021-04-15 17:44:34 -06001875 if (io_req_needs_clean(req))
Pavel Begunkov68fb8972021-03-19 17:22:41 +00001876 io_clean_op(req);
Pavel Begunkova38d68d2021-01-19 13:32:45 +00001877 req->result = res;
1878 req->compl.cflags = cflags;
Pavel Begunkove342c802021-01-19 13:32:47 +00001879 req->flags |= REQ_F_COMPLETE_INLINE;
Jens Axboee1e16092020-06-22 09:17:17 -06001880}
Jens Axboe2b188cc2019-01-07 10:46:33 -07001881
Pavel Begunkov889fca72021-02-10 00:03:09 +00001882static inline void __io_req_complete(struct io_kiocb *req, unsigned issue_flags,
Pavel Begunkov5c0ea4c2022-08-29 14:30:12 +01001883 s32 res, u32 cflags)
Pavel Begunkova38d68d2021-01-19 13:32:45 +00001884{
Pavel Begunkov889fca72021-02-10 00:03:09 +00001885 if (issue_flags & IO_URING_F_COMPLETE_DEFER)
1886 io_req_complete_state(req, res, cflags);
Pavel Begunkova38d68d2021-01-19 13:32:45 +00001887 else
Jens Axboec7dae4b2021-02-09 19:53:37 -07001888 io_req_complete_post(req, res, cflags);
Pavel Begunkova38d68d2021-01-19 13:32:45 +00001889}
Jens Axboebcda7ba2020-02-23 16:42:51 -07001890
Pavel Begunkov5c0ea4c2022-08-29 14:30:12 +01001891static inline void io_req_complete(struct io_kiocb *req, s32 res)
Jens Axboee1e16092020-06-22 09:17:17 -06001892{
Pavel Begunkov889fca72021-02-10 00:03:09 +00001893 __io_req_complete(req, 0, res, 0);
Jens Axboebcda7ba2020-02-23 16:42:51 -07001894}
1895
Pavel Begunkov5c0ea4c2022-08-29 14:30:12 +01001896static void io_req_complete_failed(struct io_kiocb *req, s32 res)
Pavel Begunkovf41db2732021-02-28 22:35:12 +00001897{
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01001898 req_set_fail(req);
Pavel Begunkovf41db2732021-02-28 22:35:12 +00001899 io_req_complete_post(req, res, 0);
1900}
1901
Pavel Begunkovc6d3d9c2021-08-31 14:13:10 +01001902static void io_req_complete_fail_submit(struct io_kiocb *req)
1903{
1904 /*
1905 * We don't submit, fail them all, for that replace hardlinks with
1906 * normal links. Extra REQ_F_LINK is tolerated.
1907 */
1908 req->flags &= ~REQ_F_HARDLINK;
1909 req->flags |= REQ_F_LINK;
1910 io_req_complete_failed(req, req->result);
1911}
1912
Pavel Begunkov864ea922021-08-09 13:04:08 +01001913/*
1914 * Don't initialise the fields below on every allocation, but do that in
1915 * advance and keep them valid across allocations.
1916 */
1917static void io_preinit_req(struct io_kiocb *req, struct io_ring_ctx *ctx)
1918{
1919 req->ctx = ctx;
1920 req->link = NULL;
1921 req->async_data = NULL;
1922 /* not necessary, but safer to zero */
1923 req->result = 0;
1924}
1925
Pavel Begunkovdac7a092021-03-19 17:22:39 +00001926static void io_flush_cached_locked_reqs(struct io_ring_ctx *ctx,
Pavel Begunkovcd0ca2e2021-08-09 20:18:11 +01001927 struct io_submit_state *state)
Pavel Begunkovdac7a092021-03-19 17:22:39 +00001928{
Jens Axboe79ebeae2021-08-10 15:18:27 -06001929 spin_lock(&ctx->completion_lock);
Pavel Begunkovcd0ca2e2021-08-09 20:18:11 +01001930 list_splice_init(&ctx->locked_free_list, &state->free_list);
Pavel Begunkovd0acdee2021-05-16 22:58:12 +01001931 ctx->locked_free_nr = 0;
Jens Axboe79ebeae2021-08-10 15:18:27 -06001932 spin_unlock(&ctx->completion_lock);
Pavel Begunkovdac7a092021-03-19 17:22:39 +00001933}
1934
Pavel Begunkovdd78f492021-03-19 17:22:35 +00001935/* Returns true IFF there are requests in the cache */
Jens Axboec7dae4b2021-02-09 19:53:37 -07001936static bool io_flush_cached_reqs(struct io_ring_ctx *ctx)
Pavel Begunkove5d1bc02021-02-10 00:03:23 +00001937{
Jens Axboec7dae4b2021-02-09 19:53:37 -07001938 struct io_submit_state *state = &ctx->submit_state;
Pavel Begunkovdd78f492021-03-19 17:22:35 +00001939 int nr;
Pavel Begunkove5d1bc02021-02-10 00:03:23 +00001940
Jens Axboec7dae4b2021-02-09 19:53:37 -07001941 /*
1942 * If we have more than a batch's worth of requests in our IRQ side
1943 * locked cache, grab the lock and move them over to our submission
1944 * side cache.
1945 */
Pavel Begunkovd0acdee2021-05-16 22:58:12 +01001946 if (READ_ONCE(ctx->locked_free_nr) > IO_COMPL_BATCH)
Pavel Begunkovcd0ca2e2021-08-09 20:18:11 +01001947 io_flush_cached_locked_reqs(ctx, state);
Jens Axboec7dae4b2021-02-09 19:53:37 -07001948
Pavel Begunkovdd78f492021-03-19 17:22:35 +00001949 nr = state->free_reqs;
Pavel Begunkovcd0ca2e2021-08-09 20:18:11 +01001950 while (!list_empty(&state->free_list)) {
1951 struct io_kiocb *req = list_first_entry(&state->free_list,
Pavel Begunkovbb943b82021-08-09 20:18:10 +01001952 struct io_kiocb, inflight_entry);
Pavel Begunkovdd78f492021-03-19 17:22:35 +00001953
Pavel Begunkovbb943b82021-08-09 20:18:10 +01001954 list_del(&req->inflight_entry);
Pavel Begunkovdd78f492021-03-19 17:22:35 +00001955 state->reqs[nr++] = req;
1956 if (nr == ARRAY_SIZE(state->reqs))
Pavel Begunkove5d1bc02021-02-10 00:03:23 +00001957 break;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001958 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07001959
Pavel Begunkovdd78f492021-03-19 17:22:35 +00001960 state->free_reqs = nr;
1961 return nr != 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001962}
1963
Pavel Begunkov5d5901a2021-08-11 19:28:29 +01001964/*
1965 * A request might get retired back into the request caches even before opcode
1966 * handlers and io_issue_sqe() are done with it, e.g. inline completion path.
1967 * Because of that, io_alloc_req() should be called only under ->uring_lock
1968 * and with extra caution to not get a request that is still worked on.
1969 */
Pavel Begunkov258b29a2021-02-10 00:03:10 +00001970static struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx)
Pavel Begunkov5d5901a2021-08-11 19:28:29 +01001971 __must_hold(&ctx->uring_lock)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001972{
Pavel Begunkov258b29a2021-02-10 00:03:10 +00001973 struct io_submit_state *state = &ctx->submit_state;
Pavel Begunkov864ea922021-08-09 13:04:08 +01001974 gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
1975 int ret, i;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001976
Pavel Begunkovfe7e3252021-06-24 15:09:57 +01001977 BUILD_BUG_ON(ARRAY_SIZE(state->reqs) < IO_REQ_ALLOC_BATCH);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001978
Pavel Begunkov864ea922021-08-09 13:04:08 +01001979 if (likely(state->free_reqs || io_flush_cached_reqs(ctx)))
1980 goto got_req;
Jens Axboe2579f912019-01-09 09:10:43 -07001981
Pavel Begunkov864ea922021-08-09 13:04:08 +01001982 ret = kmem_cache_alloc_bulk(req_cachep, gfp, IO_REQ_ALLOC_BATCH,
1983 state->reqs);
Pavel Begunkove5d1bc02021-02-10 00:03:23 +00001984
Pavel Begunkov864ea922021-08-09 13:04:08 +01001985 /*
1986 * Bulk alloc is all-or-nothing. If we fail to get a batch,
1987 * retry single alloc to be on the safe side.
1988 */
1989 if (unlikely(ret <= 0)) {
1990 state->reqs[0] = kmem_cache_alloc(req_cachep, gfp);
1991 if (!state->reqs[0])
1992 return NULL;
1993 ret = 1;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001994 }
Pavel Begunkov864ea922021-08-09 13:04:08 +01001995
1996 for (i = 0; i < ret; i++)
1997 io_preinit_req(state->reqs[i], ctx);
1998 state->free_reqs = ret;
Pavel Begunkove5d1bc02021-02-10 00:03:23 +00001999got_req:
Pavel Begunkov291b2822020-09-30 22:57:01 +03002000 state->free_reqs--;
2001 return state->reqs[state->free_reqs];
Jens Axboe2b188cc2019-01-07 10:46:33 -07002002}
2003
Pavel Begunkove1d767f2021-03-19 17:22:43 +00002004static inline void io_put_file(struct file *file)
Pavel Begunkov8da11c12020-02-24 11:32:44 +03002005{
Pavel Begunkove1d767f2021-03-19 17:22:43 +00002006 if (file)
Pavel Begunkov8da11c12020-02-24 11:32:44 +03002007 fput(file);
2008}
2009
Pavel Begunkov4edf20f2020-10-13 09:43:59 +01002010static void io_dismantle_req(struct io_kiocb *req)
Jens Axboe2b188cc2019-01-07 10:46:33 -07002011{
Pavel Begunkov094bae42021-03-19 17:22:42 +00002012 unsigned int flags = req->flags;
Pavel Begunkov929a3af2020-02-19 00:19:09 +03002013
Pavel Begunkov3a0a6902021-04-20 12:03:31 +01002014 if (io_req_needs_clean(req))
2015 io_clean_op(req);
Pavel Begunkove1d767f2021-03-19 17:22:43 +00002016 if (!(flags & REQ_F_FIXED_FILE))
2017 io_put_file(req->file);
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00002018 if (req->fixed_rsrc_refs)
2019 percpu_ref_put(req->fixed_rsrc_refs);
Pavel Begunkov99ebe4e2021-06-26 21:40:49 +01002020 if (req->async_data) {
Pavel Begunkov094bae42021-03-19 17:22:42 +00002021 kfree(req->async_data);
Pavel Begunkov99ebe4e2021-06-26 21:40:49 +01002022 req->async_data = NULL;
2023 }
Pavel Begunkove6543a82020-06-28 12:52:30 +03002024}
Pavel Begunkov2b85edf2019-12-28 14:13:03 +03002025
Pavel Begunkov216578e2020-10-13 09:44:00 +01002026static void __io_free_req(struct io_kiocb *req)
Pavel Begunkove6543a82020-06-28 12:52:30 +03002027{
Jens Axboe51a4cc12020-08-10 10:55:56 -06002028 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkovecfc5172020-06-29 13:13:03 +03002029
Pavel Begunkov216578e2020-10-13 09:44:00 +01002030 io_dismantle_req(req);
Pavel Begunkov7c660732021-01-25 11:42:21 +00002031 io_put_task(req->task, 1);
Pavel Begunkove6543a82020-06-28 12:52:30 +03002032
Jens Axboe79ebeae2021-08-10 15:18:27 -06002033 spin_lock(&ctx->completion_lock);
Pavel Begunkovbb943b82021-08-09 20:18:10 +01002034 list_add(&req->inflight_entry, &ctx->locked_free_list);
Pavel Begunkovc34b0252021-08-09 20:18:08 +01002035 ctx->locked_free_nr++;
Jens Axboe79ebeae2021-08-10 15:18:27 -06002036 spin_unlock(&ctx->completion_lock);
Pavel Begunkovc34b0252021-08-09 20:18:08 +01002037
Pavel Begunkovecfc5172020-06-29 13:13:03 +03002038 percpu_ref_put(&ctx->refs);
Jens Axboee65ef562019-03-12 10:16:44 -06002039}
2040
Pavel Begunkovf2f87372020-10-27 23:25:37 +00002041static inline void io_remove_next_linked(struct io_kiocb *req)
2042{
2043 struct io_kiocb *nxt = req->link;
2044
2045 req->link = nxt->link;
2046 nxt->link = NULL;
2047}
2048
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00002049static bool io_kill_linked_timeout(struct io_kiocb *req)
2050 __must_hold(&req->ctx->completion_lock)
Jens Axboe89b263f2021-08-10 15:14:18 -06002051 __must_hold(&req->ctx->timeout_lock)
Jens Axboe9e645e112019-05-10 16:07:28 -06002052{
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00002053 struct io_kiocb *link = req->link;
Pavel Begunkovf2f87372020-10-27 23:25:37 +00002054
Pavel Begunkovb97e7362021-08-15 10:40:23 +01002055 if (link && link->opcode == IORING_OP_LINK_TIMEOUT) {
Pavel Begunkovc9abd7a2020-10-22 16:43:11 +01002056 struct io_timeout_data *io = link->async_data;
Pavel Begunkov7c86ffe2020-06-29 13:12:59 +03002057
Pavel Begunkovf2f87372020-10-27 23:25:37 +00002058 io_remove_next_linked(req);
Pavel Begunkov90cd7e42020-10-27 23:25:36 +00002059 link->timeout.head = NULL;
Pavel Begunkovfd9c7bc2021-04-13 02:58:42 +01002060 if (hrtimer_try_to_cancel(&io->timer) != -1) {
Pavel Begunkovef9dd632021-08-28 19:54:38 -06002061 list_del(&link->timeout.list);
Pavel Begunkovb850d6d2022-08-29 14:30:13 +01002062 io_fill_cqe_req(link, -ECANCELED, 0);
Pavel Begunkov91c2f692021-08-11 19:28:28 +01002063 io_put_req_deferred(link);
Pavel Begunkovd4729fb2021-03-22 01:58:24 +00002064 return true;
Pavel Begunkovc9abd7a2020-10-22 16:43:11 +01002065 }
2066 }
Pavel Begunkovd4729fb2021-03-22 01:58:24 +00002067 return false;
Pavel Begunkov7c86ffe2020-06-29 13:12:59 +03002068}
2069
Pavel Begunkovd148ca42020-10-18 10:17:39 +01002070static void io_fail_links(struct io_kiocb *req)
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00002071 __must_hold(&req->ctx->completion_lock)
Jens Axboe9e645e112019-05-10 16:07:28 -06002072{
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00002073 struct io_kiocb *nxt, *link = req->link;
Jens Axboe9e645e112019-05-10 16:07:28 -06002074
Pavel Begunkovf2f87372020-10-27 23:25:37 +00002075 req->link = NULL;
Pavel Begunkovf2f87372020-10-27 23:25:37 +00002076 while (link) {
Hao Xua8295b92021-08-27 17:46:09 +08002077 long res = -ECANCELED;
2078
2079 if (link->flags & REQ_F_FAIL)
2080 res = link->result;
2081
Pavel Begunkovf2f87372020-10-27 23:25:37 +00002082 nxt = link->link;
2083 link->link = NULL;
2084
Dmitrii Dolgovc826bd72019-10-15 19:02:01 +02002085 trace_io_uring_fail_link(req, link);
Pavel Begunkovb850d6d2022-08-29 14:30:13 +01002086 io_fill_cqe_req(link, res, 0);
Pavel Begunkov91c2f692021-08-11 19:28:28 +01002087 io_put_req_deferred(link);
Pavel Begunkovf2f87372020-10-27 23:25:37 +00002088 link = nxt;
Jens Axboe9e645e112019-05-10 16:07:28 -06002089 }
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00002090}
Pavel Begunkov7c86ffe2020-06-29 13:12:59 +03002091
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00002092static bool io_disarm_next(struct io_kiocb *req)
2093 __must_hold(&req->ctx->completion_lock)
2094{
2095 bool posted = false;
2096
Pavel Begunkov0756a862021-08-15 10:40:25 +01002097 if (req->flags & REQ_F_ARM_LTIMEOUT) {
2098 struct io_kiocb *link = req->link;
2099
Pavel Begunkov906c6ca2021-08-15 10:40:26 +01002100 req->flags &= ~REQ_F_ARM_LTIMEOUT;
Pavel Begunkov0756a862021-08-15 10:40:25 +01002101 if (link && link->opcode == IORING_OP_LINK_TIMEOUT) {
2102 io_remove_next_linked(req);
Pavel Begunkovb850d6d2022-08-29 14:30:13 +01002103 io_fill_cqe_req(link, -ECANCELED, 0);
Pavel Begunkov0756a862021-08-15 10:40:25 +01002104 io_put_req_deferred(link);
2105 posted = true;
2106 }
2107 } else if (req->flags & REQ_F_LINK_TIMEOUT) {
Jens Axboe89b263f2021-08-10 15:14:18 -06002108 struct io_ring_ctx *ctx = req->ctx;
2109
2110 spin_lock_irq(&ctx->timeout_lock);
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00002111 posted = io_kill_linked_timeout(req);
Jens Axboe89b263f2021-08-10 15:14:18 -06002112 spin_unlock_irq(&ctx->timeout_lock);
2113 }
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01002114 if (unlikely((req->flags & REQ_F_FAIL) &&
Pavel Begunkove4335ed2021-04-11 01:46:39 +01002115 !(req->flags & REQ_F_HARDLINK))) {
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00002116 posted |= (req->link != NULL);
2117 io_fail_links(req);
2118 }
2119 return posted;
Jens Axboe9e645e112019-05-10 16:07:28 -06002120}
2121
Pavel Begunkov3fa5e0f2020-06-30 15:20:43 +03002122static struct io_kiocb *__io_req_find_next(struct io_kiocb *req)
Jens Axboe9e645e112019-05-10 16:07:28 -06002123{
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00002124 struct io_kiocb *nxt;
Jens Axboe2665abf2019-11-05 12:40:47 -07002125
Jens Axboe9e645e112019-05-10 16:07:28 -06002126 /*
2127 * If LINK is set, we have dependent requests in this chain. If we
2128 * didn't fail this request, queue the first one up, moving any other
2129 * dependencies to the next request. In case of failure, fail the rest
2130 * of the chain.
2131 */
Pavel Begunkov0756a862021-08-15 10:40:25 +01002132 if (req->flags & IO_DISARM_MASK) {
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00002133 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00002134 bool posted;
Pavel Begunkovf2f87372020-10-27 23:25:37 +00002135
Jens Axboe79ebeae2021-08-10 15:18:27 -06002136 spin_lock(&ctx->completion_lock);
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00002137 posted = io_disarm_next(req);
2138 if (posted)
2139 io_commit_cqring(req->ctx);
Jens Axboe79ebeae2021-08-10 15:18:27 -06002140 spin_unlock(&ctx->completion_lock);
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00002141 if (posted)
2142 io_cqring_ev_posted(ctx);
Pavel Begunkovf2f87372020-10-27 23:25:37 +00002143 }
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00002144 nxt = req->link;
2145 req->link = NULL;
2146 return nxt;
Jens Axboe4d7dd462019-11-20 13:03:52 -07002147}
Jens Axboe2665abf2019-11-05 12:40:47 -07002148
Pavel Begunkovf2f87372020-10-27 23:25:37 +00002149static inline struct io_kiocb *io_req_find_next(struct io_kiocb *req)
Pavel Begunkov3fa5e0f2020-06-30 15:20:43 +03002150{
Pavel Begunkovcdbff982021-02-12 18:41:16 +00002151 if (likely(!(req->flags & (REQ_F_LINK|REQ_F_HARDLINK))))
Pavel Begunkov3fa5e0f2020-06-30 15:20:43 +03002152 return NULL;
2153 return __io_req_find_next(req);
2154}
2155
Pavel Begunkovf237c302021-08-18 12:42:46 +01002156static void ctx_flush_and_put(struct io_ring_ctx *ctx, bool *locked)
Pavel Begunkov2c323952021-02-28 22:04:53 +00002157{
2158 if (!ctx)
2159 return;
Pavel Begunkovf237c302021-08-18 12:42:46 +01002160 if (*locked) {
Hao Xu99c8bc52021-08-21 06:19:54 +08002161 if (ctx->submit_state.compl_nr)
2162 io_submit_flush_completions(ctx);
Pavel Begunkov2c323952021-02-28 22:04:53 +00002163 mutex_unlock(&ctx->uring_lock);
Pavel Begunkovf237c302021-08-18 12:42:46 +01002164 *locked = false;
Pavel Begunkov2c323952021-02-28 22:04:53 +00002165 }
2166 percpu_ref_put(&ctx->refs);
2167}
2168
Jens Axboe7cbf1722021-02-10 00:03:20 +00002169static void tctx_task_work(struct callback_head *cb)
2170{
Pavel Begunkovf237c302021-08-18 12:42:46 +01002171 bool locked = false;
Pavel Begunkovebd0df22021-06-17 18:14:07 +01002172 struct io_ring_ctx *ctx = NULL;
Pavel Begunkov3f184072021-06-17 18:14:06 +01002173 struct io_uring_task *tctx = container_of(cb, struct io_uring_task,
2174 task_work);
Jens Axboe7cbf1722021-02-10 00:03:20 +00002175
Pavel Begunkov16f72072021-06-17 18:14:09 +01002176 while (1) {
Pavel Begunkov3f184072021-06-17 18:14:06 +01002177 struct io_wq_work_node *node;
2178
Pavel Begunkov8d4ad412021-09-02 00:38:23 +01002179 if (!tctx->task_list.first && locked && ctx->submit_state.compl_nr)
2180 io_submit_flush_completions(ctx);
2181
Pavel Begunkov3f184072021-06-17 18:14:06 +01002182 spin_lock_irq(&tctx->task_lock);
Pavel Begunkovc6538be2021-06-17 18:14:08 +01002183 node = tctx->task_list.first;
Pavel Begunkov3f184072021-06-17 18:14:06 +01002184 INIT_WQ_LIST(&tctx->task_list);
Pavel Begunkov6294f362021-08-10 17:53:55 +01002185 if (!node)
2186 tctx->task_running = false;
Pavel Begunkov3f184072021-06-17 18:14:06 +01002187 spin_unlock_irq(&tctx->task_lock);
Pavel Begunkov6294f362021-08-10 17:53:55 +01002188 if (!node)
2189 break;
Pavel Begunkov3f184072021-06-17 18:14:06 +01002190
Pavel Begunkov6294f362021-08-10 17:53:55 +01002191 do {
Pavel Begunkov3f184072021-06-17 18:14:06 +01002192 struct io_wq_work_node *next = node->next;
2193 struct io_kiocb *req = container_of(node, struct io_kiocb,
2194 io_task_work.node);
2195
2196 if (req->ctx != ctx) {
Pavel Begunkovf237c302021-08-18 12:42:46 +01002197 ctx_flush_and_put(ctx, &locked);
Pavel Begunkov3f184072021-06-17 18:14:06 +01002198 ctx = req->ctx;
Pavel Begunkov126180b2021-08-18 12:42:47 +01002199 /* if not contended, grab and improve batching */
2200 locked = mutex_trylock(&ctx->uring_lock);
Pavel Begunkov3f184072021-06-17 18:14:06 +01002201 percpu_ref_get(&ctx->refs);
2202 }
Pavel Begunkovf237c302021-08-18 12:42:46 +01002203 req->io_task_work.func(req, &locked);
Pavel Begunkov3f184072021-06-17 18:14:06 +01002204 node = next;
Pavel Begunkov6294f362021-08-10 17:53:55 +01002205 } while (node);
2206
Jens Axboe7cbf1722021-02-10 00:03:20 +00002207 cond_resched();
Pavel Begunkov3f184072021-06-17 18:14:06 +01002208 }
Pavel Begunkovebd0df22021-06-17 18:14:07 +01002209
Pavel Begunkovf237c302021-08-18 12:42:46 +01002210 ctx_flush_and_put(ctx, &locked);
Pavel Begunkovb168b1a2022-01-09 00:53:22 +00002211
2212 /* relaxed read is enough as only the task itself sets ->in_idle */
2213 if (unlikely(atomic_read(&tctx->in_idle)))
2214 io_uring_drop_tctx_refs(current);
Jens Axboe7cbf1722021-02-10 00:03:20 +00002215}
2216
Pavel Begunkove09ee512021-07-01 13:26:05 +01002217static void io_req_task_work_add(struct io_kiocb *req)
Jens Axboe7cbf1722021-02-10 00:03:20 +00002218{
Pavel Begunkovc15b79d2021-03-19 17:22:44 +00002219 struct task_struct *tsk = req->task;
Jens Axboe7cbf1722021-02-10 00:03:20 +00002220 struct io_uring_task *tctx = tsk->io_uring;
Pavel Begunkovc15b79d2021-03-19 17:22:44 +00002221 enum task_work_notify_mode notify;
Pavel Begunkove09ee512021-07-01 13:26:05 +01002222 struct io_wq_work_node *node;
Jens Axboe0b81e802021-02-16 10:33:53 -07002223 unsigned long flags;
Pavel Begunkov6294f362021-08-10 17:53:55 +01002224 bool running;
Jens Axboe7cbf1722021-02-10 00:03:20 +00002225
2226 WARN_ON_ONCE(!tctx);
2227
Jens Axboe0b81e802021-02-16 10:33:53 -07002228 spin_lock_irqsave(&tctx->task_lock, flags);
Jens Axboe7cbf1722021-02-10 00:03:20 +00002229 wq_list_add_tail(&req->io_task_work.node, &tctx->task_list);
Pavel Begunkov6294f362021-08-10 17:53:55 +01002230 running = tctx->task_running;
2231 if (!running)
2232 tctx->task_running = true;
Jens Axboe0b81e802021-02-16 10:33:53 -07002233 spin_unlock_irqrestore(&tctx->task_lock, flags);
Jens Axboe7cbf1722021-02-10 00:03:20 +00002234
2235 /* task_work already pending, we're done */
Pavel Begunkov6294f362021-08-10 17:53:55 +01002236 if (running)
Pavel Begunkove09ee512021-07-01 13:26:05 +01002237 return;
Jens Axboe7cbf1722021-02-10 00:03:20 +00002238
Pavel Begunkovc15b79d2021-03-19 17:22:44 +00002239 /*
2240 * SQPOLL kernel thread doesn't need notification, just a wakeup. For
2241 * all other cases, use TWA_SIGNAL unconditionally to ensure we're
2242 * processing task_work. There's no reliable way to tell if TWA_RESUME
2243 * will do the job.
2244 */
2245 notify = (req->ctx->flags & IORING_SETUP_SQPOLL) ? TWA_NONE : TWA_SIGNAL;
Pavel Begunkovc15b79d2021-03-19 17:22:44 +00002246 if (!task_work_add(tsk, &tctx->task_work, notify)) {
2247 wake_up_process(tsk);
Pavel Begunkove09ee512021-07-01 13:26:05 +01002248 return;
Pavel Begunkovc15b79d2021-03-19 17:22:44 +00002249 }
Pavel Begunkov2215bed2021-08-09 13:04:06 +01002250
Pavel Begunkove09ee512021-07-01 13:26:05 +01002251 spin_lock_irqsave(&tctx->task_lock, flags);
Pavel Begunkov6294f362021-08-10 17:53:55 +01002252 tctx->task_running = false;
Pavel Begunkove09ee512021-07-01 13:26:05 +01002253 node = tctx->task_list.first;
2254 INIT_WQ_LIST(&tctx->task_list);
2255 spin_unlock_irqrestore(&tctx->task_lock, flags);
Jens Axboe7cbf1722021-02-10 00:03:20 +00002256
Pavel Begunkove09ee512021-07-01 13:26:05 +01002257 while (node) {
2258 req = container_of(node, struct io_kiocb, io_task_work.node);
2259 node = node->next;
2260 if (llist_add(&req->io_task_work.fallback_node,
2261 &req->ctx->fallback_llist))
2262 schedule_delayed_work(&req->ctx->fallback_work, 1);
2263 }
Pavel Begunkoveab30c42021-01-19 13:32:42 +00002264}
2265
Pavel Begunkovf237c302021-08-18 12:42:46 +01002266static void io_req_task_cancel(struct io_kiocb *req, bool *locked)
Jens Axboec40f6372020-06-25 15:39:59 -06002267{
Jens Axboe87ceb6a2020-09-14 08:20:12 -06002268 struct io_ring_ctx *ctx = req->ctx;
Jens Axboec40f6372020-06-25 15:39:59 -06002269
Pavel Begunkovb18a1a42021-08-25 20:51:39 +01002270 /* not needed for normal modes, but SQPOLL depends on it */
Pavel Begunkovf237c302021-08-18 12:42:46 +01002271 io_tw_lock(ctx, locked);
Pavel Begunkov25935532021-03-19 17:22:40 +00002272 io_req_complete_failed(req, req->result);
Jens Axboec40f6372020-06-25 15:39:59 -06002273}
2274
Pavel Begunkovf237c302021-08-18 12:42:46 +01002275static void io_req_task_submit(struct io_kiocb *req, bool *locked)
Jens Axboec40f6372020-06-25 15:39:59 -06002276{
2277 struct io_ring_ctx *ctx = req->ctx;
2278
Pavel Begunkovf237c302021-08-18 12:42:46 +01002279 io_tw_lock(ctx, locked);
Jens Axboe316319e2021-08-19 09:41:42 -06002280 /* req->task == current here, checking PF_EXITING is safe */
Pavel Begunkovaf066f32021-08-09 13:04:19 +01002281 if (likely(!(req->task->flags & PF_EXITING)))
Pavel Begunkovc5eef2b2021-02-10 00:03:22 +00002282 __io_queue_sqe(req);
Pavel Begunkov81b6d052021-01-04 20:36:35 +00002283 else
Pavel Begunkov25935532021-03-19 17:22:40 +00002284 io_req_complete_failed(req, -EFAULT);
Jens Axboe9e645e112019-05-10 16:07:28 -06002285}
2286
Pavel Begunkova3df76982021-02-18 22:32:52 +00002287static void io_req_task_queue_fail(struct io_kiocb *req, int ret)
2288{
Pavel Begunkova3df76982021-02-18 22:32:52 +00002289 req->result = ret;
Pavel Begunkov5b0a6ac2021-06-30 21:54:04 +01002290 req->io_task_work.func = io_req_task_cancel;
Pavel Begunkove09ee512021-07-01 13:26:05 +01002291 io_req_task_work_add(req);
Pavel Begunkova3df76982021-02-18 22:32:52 +00002292}
2293
Pavel Begunkov2c4b8eb2021-02-28 22:35:10 +00002294static void io_req_task_queue(struct io_kiocb *req)
2295{
Pavel Begunkov5b0a6ac2021-06-30 21:54:04 +01002296 req->io_task_work.func = io_req_task_submit;
Pavel Begunkove09ee512021-07-01 13:26:05 +01002297 io_req_task_work_add(req);
Pavel Begunkov2c4b8eb2021-02-28 22:35:10 +00002298}
2299
Jens Axboe773af692021-07-27 10:25:55 -06002300static void io_req_task_queue_reissue(struct io_kiocb *req)
2301{
2302 req->io_task_work.func = io_queue_async_work;
2303 io_req_task_work_add(req);
2304}
2305
Pavel Begunkovf2f87372020-10-27 23:25:37 +00002306static inline void io_queue_next(struct io_kiocb *req)
Jackie Liuc69f8db2019-11-09 11:00:08 +08002307{
Pavel Begunkov9b5f7bd2020-06-29 13:13:00 +03002308 struct io_kiocb *nxt = io_req_find_next(req);
Pavel Begunkov944e58b2019-11-21 23:21:01 +03002309
Pavel Begunkov906a8c32020-06-27 14:04:55 +03002310 if (nxt)
2311 io_req_task_queue(nxt);
Jackie Liuc69f8db2019-11-09 11:00:08 +08002312}
2313
Jens Axboe9e645e112019-05-10 16:07:28 -06002314static void io_free_req(struct io_kiocb *req)
2315{
Pavel Begunkovc3524382020-06-28 12:52:32 +03002316 io_queue_next(req);
Jens Axboe9e645e112019-05-10 16:07:28 -06002317 __io_free_req(req);
Jens Axboee65ef562019-03-12 10:16:44 -06002318}
2319
Pavel Begunkovf237c302021-08-18 12:42:46 +01002320static void io_free_req_work(struct io_kiocb *req, bool *locked)
2321{
2322 io_free_req(req);
2323}
2324
Pavel Begunkov2d6500d2020-06-28 12:52:33 +03002325struct req_batch {
Pavel Begunkov5af1d132020-07-18 11:32:52 +03002326 struct task_struct *task;
2327 int task_refs;
Jens Axboe1b4c3512021-02-10 00:03:19 +00002328 int ctx_refs;
Pavel Begunkov2d6500d2020-06-28 12:52:33 +03002329};
2330
Pavel Begunkov5af1d132020-07-18 11:32:52 +03002331static inline void io_init_req_batch(struct req_batch *rb)
Pavel Begunkov7a743e22020-03-03 21:33:13 +03002332{
Pavel Begunkov5af1d132020-07-18 11:32:52 +03002333 rb->task_refs = 0;
Pavel Begunkov9ae72462021-02-10 00:03:16 +00002334 rb->ctx_refs = 0;
Pavel Begunkov5af1d132020-07-18 11:32:52 +03002335 rb->task = NULL;
2336}
Pavel Begunkov8766dd52020-03-14 00:31:04 +03002337
Pavel Begunkov2d6500d2020-06-28 12:52:33 +03002338static void io_req_free_batch_finish(struct io_ring_ctx *ctx,
2339 struct req_batch *rb)
2340{
Pavel Begunkov9ae72462021-02-10 00:03:16 +00002341 if (rb->ctx_refs)
2342 percpu_ref_put_many(&ctx->refs, rb->ctx_refs);
Pavel Begunkove98e49b2021-08-18 17:01:43 +01002343 if (rb->task)
Pavel Begunkove9dbe222021-08-09 13:04:20 +01002344 io_put_task(rb->task, rb->task_refs);
Pavel Begunkov2d6500d2020-06-28 12:52:33 +03002345}
2346
Pavel Begunkov6ff119a2021-02-10 00:03:18 +00002347static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req,
2348 struct io_submit_state *state)
Pavel Begunkov2d6500d2020-06-28 12:52:33 +03002349{
Pavel Begunkovf2f87372020-10-27 23:25:37 +00002350 io_queue_next(req);
Pavel Begunkov96670652021-03-19 17:22:32 +00002351 io_dismantle_req(req);
Pavel Begunkov2d6500d2020-06-28 12:52:33 +03002352
Jens Axboee3bc8e92020-09-24 08:45:57 -06002353 if (req->task != rb->task) {
Pavel Begunkov7c660732021-01-25 11:42:21 +00002354 if (rb->task)
2355 io_put_task(rb->task, rb->task_refs);
Jens Axboee3bc8e92020-09-24 08:45:57 -06002356 rb->task = req->task;
2357 rb->task_refs = 0;
Pavel Begunkov5af1d132020-07-18 11:32:52 +03002358 }
Jens Axboee3bc8e92020-09-24 08:45:57 -06002359 rb->task_refs++;
Pavel Begunkov9ae72462021-02-10 00:03:16 +00002360 rb->ctx_refs++;
Pavel Begunkov5af1d132020-07-18 11:32:52 +03002361
Pavel Begunkovbd759042021-02-12 03:23:50 +00002362 if (state->free_reqs != ARRAY_SIZE(state->reqs))
Pavel Begunkov6ff119a2021-02-10 00:03:18 +00002363 state->reqs[state->free_reqs++] = req;
Pavel Begunkovbd759042021-02-12 03:23:50 +00002364 else
Pavel Begunkovcd0ca2e2021-08-09 20:18:11 +01002365 list_add(&req->inflight_entry, &state->free_list);
Pavel Begunkov7a743e22020-03-03 21:33:13 +03002366}
2367
Pavel Begunkov2a2758f2021-06-17 18:14:00 +01002368static void io_submit_flush_completions(struct io_ring_ctx *ctx)
Jens Axboea141dd82021-08-12 12:48:34 -06002369 __must_hold(&ctx->uring_lock)
Pavel Begunkov905c1722021-02-10 00:03:14 +00002370{
Pavel Begunkovcd0ca2e2021-08-09 20:18:11 +01002371 struct io_submit_state *state = &ctx->submit_state;
2372 int i, nr = state->compl_nr;
Pavel Begunkov905c1722021-02-10 00:03:14 +00002373 struct req_batch rb;
2374
Jens Axboe79ebeae2021-08-10 15:18:27 -06002375 spin_lock(&ctx->completion_lock);
Pavel Begunkov905c1722021-02-10 00:03:14 +00002376 for (i = 0; i < nr; i++) {
Pavel Begunkovcd0ca2e2021-08-09 20:18:11 +01002377 struct io_kiocb *req = state->compl_reqs[i];
Pavel Begunkov5182ed22021-06-26 21:40:48 +01002378
Pavel Begunkovb850d6d2022-08-29 14:30:13 +01002379 __io_fill_cqe(ctx, req->user_data, req->result,
2380 req->compl.cflags);
Pavel Begunkov905c1722021-02-10 00:03:14 +00002381 }
2382 io_commit_cqring(ctx);
Jens Axboe79ebeae2021-08-10 15:18:27 -06002383 spin_unlock(&ctx->completion_lock);
Pavel Begunkov905c1722021-02-10 00:03:14 +00002384 io_cqring_ev_posted(ctx);
Pavel Begunkov5182ed22021-06-26 21:40:48 +01002385
2386 io_init_req_batch(&rb);
Pavel Begunkov905c1722021-02-10 00:03:14 +00002387 for (i = 0; i < nr; i++) {
Pavel Begunkovcd0ca2e2021-08-09 20:18:11 +01002388 struct io_kiocb *req = state->compl_reqs[i];
Pavel Begunkov905c1722021-02-10 00:03:14 +00002389
Pavel Begunkov91c2f692021-08-11 19:28:28 +01002390 if (req_ref_put_and_test(req))
Pavel Begunkov6ff119a2021-02-10 00:03:18 +00002391 io_req_free_batch(&rb, req, &ctx->submit_state);
Pavel Begunkov905c1722021-02-10 00:03:14 +00002392 }
2393
2394 io_req_free_batch_finish(ctx, &rb);
Pavel Begunkovcd0ca2e2021-08-09 20:18:11 +01002395 state->compl_nr = 0;
Jens Axboee65ef562019-03-12 10:16:44 -06002396}
2397
Jens Axboeba816ad2019-09-28 11:36:45 -06002398/*
2399 * Drop reference to request, return next in chain (if there is one) if this
2400 * was the last reference to this request.
2401 */
Pavel Begunkov0d850352021-03-19 17:22:37 +00002402static inline struct io_kiocb *io_put_req_find_next(struct io_kiocb *req)
Jens Axboee65ef562019-03-12 10:16:44 -06002403{
Pavel Begunkov9b5f7bd2020-06-29 13:13:00 +03002404 struct io_kiocb *nxt = NULL;
2405
Jens Axboede9b4cc2021-02-24 13:28:27 -07002406 if (req_ref_put_and_test(req)) {
Pavel Begunkov9b5f7bd2020-06-29 13:13:00 +03002407 nxt = io_req_find_next(req);
Jens Axboe4d7dd462019-11-20 13:03:52 -07002408 __io_free_req(req);
Jens Axboe2a44f462020-02-25 13:25:41 -07002409 }
Pavel Begunkov9b5f7bd2020-06-29 13:13:00 +03002410 return nxt;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002411}
2412
Pavel Begunkov0d850352021-03-19 17:22:37 +00002413static inline void io_put_req(struct io_kiocb *req)
Jens Axboe2b188cc2019-01-07 10:46:33 -07002414{
Jens Axboede9b4cc2021-02-24 13:28:27 -07002415 if (req_ref_put_and_test(req))
Jens Axboedef596e2019-01-09 08:59:42 -07002416 io_free_req(req);
2417}
2418
Pavel Begunkov91c2f692021-08-11 19:28:28 +01002419static inline void io_put_req_deferred(struct io_kiocb *req)
Pavel Begunkov216578e2020-10-13 09:44:00 +01002420{
Pavel Begunkov91c2f692021-08-11 19:28:28 +01002421 if (req_ref_put_and_test(req)) {
Pavel Begunkovf237c302021-08-18 12:42:46 +01002422 req->io_task_work.func = io_free_req_work;
Pavel Begunkov543af3a2021-08-09 13:04:15 +01002423 io_req_task_work_add(req);
2424 }
Pavel Begunkov216578e2020-10-13 09:44:00 +01002425}
2426
Pavel Begunkov6c503152021-01-04 20:36:36 +00002427static unsigned io_cqring_events(struct io_ring_ctx *ctx)
Jens Axboea3a0e432019-08-20 11:03:11 -06002428{
2429 /* See comment at the top of this file */
2430 smp_rmb();
Pavel Begunkove23de152020-12-17 00:24:37 +00002431 return __io_cqring_events(ctx);
Jens Axboea3a0e432019-08-20 11:03:11 -06002432}
2433
Pavel Begunkovfb5ccc92019-10-25 12:31:30 +03002434static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
2435{
2436 struct io_rings *rings = ctx->rings;
2437
2438 /* make sure SQ entry isn't read before tail */
2439 return smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head;
2440}
2441
Pavel Begunkov8ff069b2020-07-16 23:28:04 +03002442static unsigned int io_put_kbuf(struct io_kiocb *req, struct io_buffer *kbuf)
Jens Axboee94f1412019-12-19 12:06:02 -07002443{
Pavel Begunkov8ff069b2020-07-16 23:28:04 +03002444 unsigned int cflags;
Jens Axboee94f1412019-12-19 12:06:02 -07002445
Jens Axboebcda7ba2020-02-23 16:42:51 -07002446 cflags = kbuf->bid << IORING_CQE_BUFFER_SHIFT;
2447 cflags |= IORING_CQE_F_BUFFER;
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03002448 req->flags &= ~REQ_F_BUFFER_SELECTED;
Jens Axboebcda7ba2020-02-23 16:42:51 -07002449 kfree(kbuf);
2450 return cflags;
2451}
2452
Pavel Begunkov8ff069b2020-07-16 23:28:04 +03002453static inline unsigned int io_put_rw_kbuf(struct io_kiocb *req)
2454{
2455 struct io_buffer *kbuf;
2456
Pavel Begunkovae421d92021-08-17 20:28:08 +01002457 if (likely(!(req->flags & REQ_F_BUFFER_SELECTED)))
2458 return 0;
Pavel Begunkov8ff069b2020-07-16 23:28:04 +03002459 kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
2460 return io_put_kbuf(req, kbuf);
2461}
2462
Jens Axboe4c6e2772020-07-01 11:29:10 -06002463static inline bool io_run_task_work(void)
2464{
Nadav Amitef98eb02021-08-07 17:13:41 -07002465 if (test_thread_flag(TIF_NOTIFY_SIGNAL) || current->task_works) {
Jens Axboe4c6e2772020-07-01 11:29:10 -06002466 __set_current_state(TASK_RUNNING);
Nadav Amitef98eb02021-08-07 17:13:41 -07002467 tracehook_notify_signal();
Jens Axboe4c6e2772020-07-01 11:29:10 -06002468 return true;
2469 }
2470
2471 return false;
2472}
2473
Jens Axboedef596e2019-01-09 08:59:42 -07002474/*
2475 * Find and free completed poll iocbs
2476 */
2477static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
Pavel Begunkova8576af2021-08-15 10:40:21 +01002478 struct list_head *done)
Jens Axboedef596e2019-01-09 08:59:42 -07002479{
Jens Axboe8237e042019-12-28 10:48:22 -07002480 struct req_batch rb;
Jens Axboedef596e2019-01-09 08:59:42 -07002481 struct io_kiocb *req;
Xiaoguang Wangbbde0172020-06-16 02:06:38 +08002482
2483 /* order with ->result store in io_complete_rw_iopoll() */
2484 smp_rmb();
Jens Axboedef596e2019-01-09 08:59:42 -07002485
Pavel Begunkov5af1d132020-07-18 11:32:52 +03002486 io_init_req_batch(&rb);
Jens Axboedef596e2019-01-09 08:59:42 -07002487 while (!list_empty(done)) {
Pavel Begunkoved4629d2023-01-14 09:14:03 -07002488 struct io_uring_cqe *cqe;
2489 unsigned cflags;
2490
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002491 req = list_first_entry(done, struct io_kiocb, inflight_entry);
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002492 list_del(&req->inflight_entry);
Pavel Begunkoved4629d2023-01-14 09:14:03 -07002493 cflags = io_put_rw_kbuf(req);
Jens Axboedef596e2019-01-09 08:59:42 -07002494 (*nr_events)++;
2495
Pavel Begunkoved4629d2023-01-14 09:14:03 -07002496 cqe = io_get_cqe(ctx);
2497 if (cqe) {
2498 WRITE_ONCE(cqe->user_data, req->user_data);
2499 WRITE_ONCE(cqe->res, req->result);
2500 WRITE_ONCE(cqe->flags, cflags);
2501 } else {
2502 spin_lock(&ctx->completion_lock);
2503 io_cqring_event_overflow(ctx, req->user_data,
2504 req->result, cflags);
2505 spin_unlock(&ctx->completion_lock);
2506 }
2507
Jens Axboede9b4cc2021-02-24 13:28:27 -07002508 if (req_ref_put_and_test(req))
Pavel Begunkov6ff119a2021-02-10 00:03:18 +00002509 io_req_free_batch(&rb, req, &ctx->submit_state);
Jens Axboedef596e2019-01-09 08:59:42 -07002510 }
Jens Axboedef596e2019-01-09 08:59:42 -07002511
Jens Axboe09bb8392019-03-13 12:39:28 -06002512 io_commit_cqring(ctx);
Pavel Begunkov80c18e42021-01-07 03:15:41 +00002513 io_cqring_ev_posted_iopoll(ctx);
Pavel Begunkov2d6500d2020-06-28 12:52:33 +03002514 io_req_free_batch_finish(ctx, &rb);
Bijan Mottahedeh581f9812020-04-03 13:51:33 -07002515}
2516
Jens Axboedef596e2019-01-09 08:59:42 -07002517static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
Pavel Begunkova8576af2021-08-15 10:40:21 +01002518 long min)
Jens Axboedef596e2019-01-09 08:59:42 -07002519{
2520 struct io_kiocb *req, *tmp;
2521 LIST_HEAD(done);
2522 bool spin;
Jens Axboedef596e2019-01-09 08:59:42 -07002523
2524 /*
2525 * Only spin for completions if we don't have multiple devices hanging
2526 * off our complete list, and we're under the requested amount.
2527 */
Hao Xu915b3dd2021-06-28 05:37:30 +08002528 spin = !ctx->poll_multi_queue && *nr_events < min;
Jens Axboedef596e2019-01-09 08:59:42 -07002529
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002530 list_for_each_entry_safe(req, tmp, &ctx->iopoll_list, inflight_entry) {
Jens Axboe9adbd452019-12-20 08:45:55 -07002531 struct kiocb *kiocb = &req->rw.kiocb;
Pavel Begunkova2416e12021-08-09 13:04:09 +01002532 int ret;
Jens Axboedef596e2019-01-09 08:59:42 -07002533
2534 /*
Bijan Mottahedeh581f9812020-04-03 13:51:33 -07002535 * Move completed and retryable entries to our local lists.
2536 * If we find a request that requires polling, break out
2537 * and complete those lists first, if we have entries there.
Jens Axboedef596e2019-01-09 08:59:42 -07002538 */
Xiaoguang Wang65a65432020-06-11 23:39:36 +08002539 if (READ_ONCE(req->iopoll_completed)) {
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002540 list_move_tail(&req->inflight_entry, &done);
Jens Axboedef596e2019-01-09 08:59:42 -07002541 continue;
2542 }
2543 if (!list_empty(&done))
2544 break;
2545
2546 ret = kiocb->ki_filp->f_op->iopoll(kiocb, spin);
Pavel Begunkova2416e12021-08-09 13:04:09 +01002547 if (unlikely(ret < 0))
2548 return ret;
2549 else if (ret)
2550 spin = false;
Jens Axboedef596e2019-01-09 08:59:42 -07002551
Pavel Begunkov3aadc232020-07-06 17:59:29 +03002552 /* iopoll may have completed current req */
2553 if (READ_ONCE(req->iopoll_completed))
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002554 list_move_tail(&req->inflight_entry, &done);
Jens Axboedef596e2019-01-09 08:59:42 -07002555 }
2556
2557 if (!list_empty(&done))
Pavel Begunkova8576af2021-08-15 10:40:21 +01002558 io_iopoll_complete(ctx, nr_events, &done);
Jens Axboedef596e2019-01-09 08:59:42 -07002559
Pavel Begunkova2416e12021-08-09 13:04:09 +01002560 return 0;
Jens Axboedef596e2019-01-09 08:59:42 -07002561}
2562
2563/*
Jens Axboedef596e2019-01-09 08:59:42 -07002564 * We can't just wait for polled events to come to us, we have to actively
2565 * find and complete them.
2566 */
Pavel Begunkovb2edc0a2020-07-07 16:36:22 +03002567static void io_iopoll_try_reap_events(struct io_ring_ctx *ctx)
Jens Axboedef596e2019-01-09 08:59:42 -07002568{
2569 if (!(ctx->flags & IORING_SETUP_IOPOLL))
2570 return;
2571
2572 mutex_lock(&ctx->uring_lock);
Pavel Begunkov540e32a2020-07-13 23:37:09 +03002573 while (!list_empty(&ctx->iopoll_list)) {
Jens Axboedef596e2019-01-09 08:59:42 -07002574 unsigned int nr_events = 0;
2575
Pavel Begunkova8576af2021-08-15 10:40:21 +01002576 io_do_iopoll(ctx, &nr_events, 0);
Jens Axboe08f54392019-08-21 22:19:11 -06002577
Pavel Begunkovb2edc0a2020-07-07 16:36:22 +03002578 /* let it sleep and repeat later if can't complete a request */
2579 if (nr_events == 0)
2580 break;
Jens Axboe08f54392019-08-21 22:19:11 -06002581 /*
2582 * Ensure we allow local-to-the-cpu processing to take place,
2583 * in this case we need to ensure that we reap all events.
Pavel Begunkov3fcee5a2020-07-06 17:59:31 +03002584 * Also let task_work, etc. to progress by releasing the mutex
Jens Axboe08f54392019-08-21 22:19:11 -06002585 */
Pavel Begunkov3fcee5a2020-07-06 17:59:31 +03002586 if (need_resched()) {
2587 mutex_unlock(&ctx->uring_lock);
2588 cond_resched();
2589 mutex_lock(&ctx->uring_lock);
2590 }
Jens Axboedef596e2019-01-09 08:59:42 -07002591 }
2592 mutex_unlock(&ctx->uring_lock);
2593}
2594
Pavel Begunkov7668b922020-07-07 16:36:21 +03002595static int io_iopoll_check(struct io_ring_ctx *ctx, long min)
Jens Axboedef596e2019-01-09 08:59:42 -07002596{
Pavel Begunkov7668b922020-07-07 16:36:21 +03002597 unsigned int nr_events = 0;
Pavel Begunkove9979b32021-04-13 02:58:45 +01002598 int ret = 0;
Jens Axboedef596e2019-01-09 08:59:42 -07002599
Xiaoguang Wangc7849be2020-02-22 14:46:05 +08002600 /*
2601 * We disallow the app entering submit/complete with polling, but we
2602 * still need to lock the ring to prevent racing with polled issue
2603 * that got punted to a workqueue.
2604 */
2605 mutex_lock(&ctx->uring_lock);
Pavel Begunkovf39c8a52021-04-13 02:58:46 +01002606 /*
2607 * Don't enter poll loop if we already have events pending.
2608 * If we do, we can potentially be spinning for commands that
2609 * already triggered a CQE (eg in error).
2610 */
Pavel Begunkov5ed7a372021-06-14 23:37:27 +01002611 if (test_bit(0, &ctx->check_cq_overflow))
Pavel Begunkovf39c8a52021-04-13 02:58:46 +01002612 __io_cqring_overflow_flush(ctx, false);
2613 if (io_cqring_events(ctx))
2614 goto out;
Jens Axboedef596e2019-01-09 08:59:42 -07002615 do {
Jens Axboe500f9fb2019-08-19 12:15:59 -06002616 /*
2617 * If a submit got punted to a workqueue, we can have the
2618 * application entering polling for a command before it gets
2619 * issued. That app will hold the uring_lock for the duration
2620 * of the poll right here, so we need to take a breather every
2621 * now and then to ensure that the issue has a chance to add
2622 * the poll to the issued list. Otherwise we can spin here
2623 * forever, while the workqueue is stuck trying to acquire the
2624 * very same mutex.
2625 */
Pavel Begunkove9979b32021-04-13 02:58:45 +01002626 if (list_empty(&ctx->iopoll_list)) {
Pavel Begunkov8f487ef2021-07-08 13:37:06 +01002627 u32 tail = ctx->cached_cq_tail;
2628
Jens Axboe500f9fb2019-08-19 12:15:59 -06002629 mutex_unlock(&ctx->uring_lock);
Jens Axboe4c6e2772020-07-01 11:29:10 -06002630 io_run_task_work();
Jens Axboe500f9fb2019-08-19 12:15:59 -06002631 mutex_lock(&ctx->uring_lock);
Pavel Begunkove9979b32021-04-13 02:58:45 +01002632
Pavel Begunkov8f487ef2021-07-08 13:37:06 +01002633 /* some requests don't go through iopoll_list */
2634 if (tail != ctx->cached_cq_tail ||
2635 list_empty(&ctx->iopoll_list))
Pavel Begunkove9979b32021-04-13 02:58:45 +01002636 break;
Jens Axboe500f9fb2019-08-19 12:15:59 -06002637 }
Pavel Begunkova8576af2021-08-15 10:40:21 +01002638 ret = io_do_iopoll(ctx, &nr_events, min);
Pavel Begunkovf39c8a52021-04-13 02:58:46 +01002639 } while (!ret && nr_events < min && !need_resched());
2640out:
Jens Axboe500f9fb2019-08-19 12:15:59 -06002641 mutex_unlock(&ctx->uring_lock);
Jens Axboedef596e2019-01-09 08:59:42 -07002642 return ret;
2643}
2644
Jens Axboe491381ce2019-10-17 09:20:46 -06002645static void kiocb_end_write(struct io_kiocb *req)
Jens Axboe2b188cc2019-01-07 10:46:33 -07002646{
Jens Axboe491381ce2019-10-17 09:20:46 -06002647 /*
2648 * Tell lockdep we inherited freeze protection from submission
2649 * thread.
2650 */
2651 if (req->flags & REQ_F_ISREG) {
Pavel Begunkov1c986792021-03-22 01:58:31 +00002652 struct super_block *sb = file_inode(req->file)->i_sb;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002653
Pavel Begunkov1c986792021-03-22 01:58:31 +00002654 __sb_writers_acquired(sb, SB_FREEZE_WRITE);
2655 sb_end_write(sb);
Jens Axboe2b188cc2019-01-07 10:46:33 -07002656 }
2657}
2658
Jens Axboeb63534c2020-06-04 11:28:00 -06002659#ifdef CONFIG_BLOCK
Pavel Begunkovdc2a6e92021-01-19 13:32:35 +00002660static bool io_resubmit_prep(struct io_kiocb *req)
Jens Axboeb63534c2020-06-04 11:28:00 -06002661{
Pavel Begunkovab454432021-03-22 01:58:33 +00002662 struct io_async_rw *rw = req->async_data;
Jens Axboeb63534c2020-06-04 11:28:00 -06002663
Pavel Begunkovab454432021-03-22 01:58:33 +00002664 if (!rw)
2665 return !io_req_prep_async(req);
Jens Axboecd658692021-09-10 11:19:14 -06002666 iov_iter_restore(&rw->iter, &rw->iter_state);
Pavel Begunkovab454432021-03-22 01:58:33 +00002667 return true;
Jens Axboeb63534c2020-06-04 11:28:00 -06002668}
Jens Axboeb63534c2020-06-04 11:28:00 -06002669
Jens Axboe3e6a0d32021-03-01 13:56:00 -07002670static bool io_rw_should_reissue(struct io_kiocb *req)
Jens Axboeb63534c2020-06-04 11:28:00 -06002671{
Jens Axboe355afae2020-09-02 09:30:31 -06002672 umode_t mode = file_inode(req->file)->i_mode;
Jens Axboe3e6a0d32021-03-01 13:56:00 -07002673 struct io_ring_ctx *ctx = req->ctx;
Jens Axboeb63534c2020-06-04 11:28:00 -06002674
Jens Axboe355afae2020-09-02 09:30:31 -06002675 if (!S_ISBLK(mode) && !S_ISREG(mode))
2676 return false;
Jens Axboe3e6a0d32021-03-01 13:56:00 -07002677 if ((req->flags & REQ_F_NOWAIT) || (io_wq_current_is_worker() &&
2678 !(ctx->flags & IORING_SETUP_IOPOLL)))
Jens Axboeb63534c2020-06-04 11:28:00 -06002679 return false;
Jens Axboe7c977a52021-02-23 19:17:35 -07002680 /*
2681 * If ref is dying, we might be running poll reap from the exit work.
2682 * Don't attempt to reissue from that path, just let it fail with
2683 * -EAGAIN.
2684 */
Jens Axboe3e6a0d32021-03-01 13:56:00 -07002685 if (percpu_ref_is_dying(&ctx->refs))
2686 return false;
Jens Axboeef046882021-07-27 10:50:31 -06002687 /*
2688 * Play it safe and assume not safe to re-import and reissue if we're
2689 * not in the original thread group (or in task context).
2690 */
2691 if (!same_thread_group(req->task, current) || !in_task())
2692 return false;
Jens Axboe3e6a0d32021-03-01 13:56:00 -07002693 return true;
2694}
Jens Axboee82ad482021-04-02 19:45:34 -06002695#else
Jens Axboea1ff1e32021-04-12 06:40:02 -06002696static bool io_resubmit_prep(struct io_kiocb *req)
2697{
2698 return false;
2699}
Jens Axboee82ad482021-04-02 19:45:34 -06002700static bool io_rw_should_reissue(struct io_kiocb *req)
2701{
2702 return false;
2703}
Jens Axboe3e6a0d32021-03-01 13:56:00 -07002704#endif
2705
Jens Axboe8ef12ef2021-08-10 15:15:25 -06002706static bool __io_complete_rw_common(struct io_kiocb *req, long res)
Jens Axboea1d7c392020-06-22 11:09:46 -06002707{
Jens Axboedf1ec532022-03-20 13:08:38 -06002708 if (req->rw.kiocb.ki_flags & IOCB_WRITE) {
Pavel Begunkovb65c1282021-03-22 01:45:59 +00002709 kiocb_end_write(req);
Jens Axboedf1ec532022-03-20 13:08:38 -06002710 fsnotify_modify(req->file);
2711 } else {
2712 fsnotify_access(req->file);
2713 }
Pavel Begunkov9532b992021-03-22 01:58:34 +00002714 if (res != req->result) {
2715 if ((res == -EAGAIN || res == -EOPNOTSUPP) &&
2716 io_rw_should_reissue(req)) {
2717 req->flags |= REQ_F_REISSUE;
Jens Axboe8ef12ef2021-08-10 15:15:25 -06002718 return true;
Pavel Begunkov9532b992021-03-22 01:58:34 +00002719 }
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01002720 req_set_fail(req);
Jens Axboe8ef12ef2021-08-10 15:15:25 -06002721 req->result = res;
Pavel Begunkov9532b992021-03-22 01:58:34 +00002722 }
Jens Axboe8ef12ef2021-08-10 15:15:25 -06002723 return false;
2724}
2725
Harshit Mogalapallie326ee02023-01-10 08:46:47 -08002726static inline int io_fixup_rw_res(struct io_kiocb *req, long res)
Pavel Begunkove8574572022-10-16 22:42:56 +01002727{
2728 struct io_async_rw *io = req->async_data;
2729
2730 /* add previously done IO, if any */
2731 if (io && io->bytes_done > 0) {
2732 if (res < 0)
2733 res = io->bytes_done;
2734 else
2735 res += io->bytes_done;
2736 }
2737 return res;
2738}
2739
Pavel Begunkovf237c302021-08-18 12:42:46 +01002740static void io_req_task_complete(struct io_kiocb *req, bool *locked)
Jens Axboe8ef12ef2021-08-10 15:15:25 -06002741{
Pavel Begunkov126180b2021-08-18 12:42:47 +01002742 unsigned int cflags = io_put_rw_kbuf(req);
Pavel Begunkov5c0ea4c2022-08-29 14:30:12 +01002743 int res = req->result;
Pavel Begunkov126180b2021-08-18 12:42:47 +01002744
2745 if (*locked) {
2746 struct io_ring_ctx *ctx = req->ctx;
2747 struct io_submit_state *state = &ctx->submit_state;
2748
2749 io_req_complete_state(req, res, cflags);
2750 state->compl_reqs[state->compl_nr++] = req;
2751 if (state->compl_nr == ARRAY_SIZE(state->compl_reqs))
2752 io_submit_flush_completions(ctx);
2753 } else {
2754 io_req_complete_post(req, res, cflags);
2755 }
Jens Axboe8ef12ef2021-08-10 15:15:25 -06002756}
2757
2758static void __io_complete_rw(struct io_kiocb *req, long res, long res2,
2759 unsigned int issue_flags)
2760{
2761 if (__io_complete_rw_common(req, res))
2762 return;
Pavel Begunkove8574572022-10-16 22:42:56 +01002763 __io_req_complete(req, issue_flags, io_fixup_rw_res(req, res), io_put_rw_kbuf(req));
Jens Axboeba816ad2019-09-28 11:36:45 -06002764}
2765
2766static void io_complete_rw(struct kiocb *kiocb, long res, long res2)
2767{
Jens Axboe9adbd452019-12-20 08:45:55 -07002768 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
Jens Axboeba816ad2019-09-28 11:36:45 -06002769
Jens Axboe8ef12ef2021-08-10 15:15:25 -06002770 if (__io_complete_rw_common(req, res))
2771 return;
Pavel Begunkove8574572022-10-16 22:42:56 +01002772 req->result = io_fixup_rw_res(req, res);
Jens Axboe8ef12ef2021-08-10 15:15:25 -06002773 req->io_task_work.func = io_req_task_complete;
2774 io_req_task_work_add(req);
Jens Axboe2b188cc2019-01-07 10:46:33 -07002775}
2776
Jens Axboedef596e2019-01-09 08:59:42 -07002777static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2)
2778{
Jens Axboe9adbd452019-12-20 08:45:55 -07002779 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
Jens Axboedef596e2019-01-09 08:59:42 -07002780
Jens Axboe491381ce2019-10-17 09:20:46 -06002781 if (kiocb->ki_flags & IOCB_WRITE)
2782 kiocb_end_write(req);
Pavel Begunkov9532b992021-03-22 01:58:34 +00002783 if (unlikely(res != req->result)) {
Pavel Begunkovb66ceaf2021-09-15 11:00:05 +01002784 if (res == -EAGAIN && io_rw_should_reissue(req)) {
2785 req->flags |= REQ_F_REISSUE;
2786 return;
Pavel Begunkov9532b992021-03-22 01:58:34 +00002787 }
Pavel Begunkov8c130822021-03-22 01:58:32 +00002788 }
Xiaoguang Wangbbde0172020-06-16 02:06:38 +08002789
2790 WRITE_ONCE(req->result, res);
Jens Axboeb9b0e0d2021-02-23 08:18:36 -07002791 /* order with io_iopoll_complete() checking ->result */
Pavel Begunkovcd664b02020-06-25 12:37:10 +03002792 smp_wmb();
2793 WRITE_ONCE(req->iopoll_completed, 1);
Jens Axboedef596e2019-01-09 08:59:42 -07002794}
2795
2796/*
2797 * After the iocb has been issued, it's safe to be found on the poll list.
2798 * Adding the kiocb to the list AFTER submission ensures that we don't
Pavel Begunkovf39c8a52021-04-13 02:58:46 +01002799 * find it from a io_do_iopoll() thread before the issuer is done
Jens Axboedef596e2019-01-09 08:59:42 -07002800 * accessing the kiocb cookie.
2801 */
Pavel Begunkovcb3d8972021-06-14 02:36:14 +01002802static void io_iopoll_req_issued(struct io_kiocb *req)
Jens Axboedef596e2019-01-09 08:59:42 -07002803{
2804 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkovcb3d8972021-06-14 02:36:14 +01002805 const bool in_async = io_wq_current_is_worker();
2806
2807 /* workqueue context doesn't hold uring_lock, grab it now */
2808 if (unlikely(in_async))
2809 mutex_lock(&ctx->uring_lock);
Jens Axboedef596e2019-01-09 08:59:42 -07002810
2811 /*
2812 * Track whether we have multiple files in our lists. This will impact
2813 * how we do polling eventually, not spinning if we're on potentially
2814 * different devices.
2815 */
Pavel Begunkov540e32a2020-07-13 23:37:09 +03002816 if (list_empty(&ctx->iopoll_list)) {
Hao Xu915b3dd2021-06-28 05:37:30 +08002817 ctx->poll_multi_queue = false;
2818 } else if (!ctx->poll_multi_queue) {
Jens Axboedef596e2019-01-09 08:59:42 -07002819 struct io_kiocb *list_req;
Hao Xu915b3dd2021-06-28 05:37:30 +08002820 unsigned int queue_num0, queue_num1;
Jens Axboedef596e2019-01-09 08:59:42 -07002821
Pavel Begunkov540e32a2020-07-13 23:37:09 +03002822 list_req = list_first_entry(&ctx->iopoll_list, struct io_kiocb,
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002823 inflight_entry);
Hao Xu915b3dd2021-06-28 05:37:30 +08002824
2825 if (list_req->file != req->file) {
2826 ctx->poll_multi_queue = true;
2827 } else {
2828 queue_num0 = blk_qc_t_to_queue_num(list_req->rw.kiocb.ki_cookie);
2829 queue_num1 = blk_qc_t_to_queue_num(req->rw.kiocb.ki_cookie);
2830 if (queue_num0 != queue_num1)
2831 ctx->poll_multi_queue = true;
2832 }
Jens Axboedef596e2019-01-09 08:59:42 -07002833 }
2834
2835 /*
2836 * For fast devices, IO may have already completed. If it has, add
2837 * it to the front so we find it first.
2838 */
Xiaoguang Wang65a65432020-06-11 23:39:36 +08002839 if (READ_ONCE(req->iopoll_completed))
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002840 list_add(&req->inflight_entry, &ctx->iopoll_list);
Jens Axboedef596e2019-01-09 08:59:42 -07002841 else
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002842 list_add_tail(&req->inflight_entry, &ctx->iopoll_list);
Xiaoguang Wangbdcd3ea2020-02-25 22:12:08 +08002843
Pavel Begunkovcb3d8972021-06-14 02:36:14 +01002844 if (unlikely(in_async)) {
2845 /*
2846 * If IORING_SETUP_SQPOLL is enabled, sqes are either handle
2847 * in sq thread task context or in io worker task context. If
2848 * current task context is sq thread, we don't need to check
2849 * whether should wake up sq thread.
2850 */
2851 if ((ctx->flags & IORING_SETUP_SQPOLL) &&
2852 wq_has_sleeper(&ctx->sq_data->wait))
2853 wake_up(&ctx->sq_data->wait);
2854
2855 mutex_unlock(&ctx->uring_lock);
2856 }
Jens Axboedef596e2019-01-09 08:59:42 -07002857}
2858
Jens Axboe4503b762020-06-01 10:00:27 -06002859static bool io_bdev_nowait(struct block_device *bdev)
2860{
Jeffle Xu9ba0d0c2020-10-19 16:59:42 +08002861 return !bdev || blk_queue_nowait(bdev_get_queue(bdev));
Jens Axboe4503b762020-06-01 10:00:27 -06002862}
2863
Jens Axboe2b188cc2019-01-07 10:46:33 -07002864/*
2865 * If we tracked the file through the SCM inflight mechanism, we could support
2866 * any file. For now, just ensure that anything potentially problematic is done
2867 * inline.
2868 */
Pavel Begunkovb191e2d2021-08-09 13:04:03 +01002869static bool __io_file_supports_nowait(struct file *file, int rw)
Jens Axboe2b188cc2019-01-07 10:46:33 -07002870{
2871 umode_t mode = file_inode(file)->i_mode;
2872
Jens Axboe4503b762020-06-01 10:00:27 -06002873 if (S_ISBLK(mode)) {
Christoph Hellwig4e7b5672020-11-23 13:38:40 +01002874 if (IS_ENABLED(CONFIG_BLOCK) &&
2875 io_bdev_nowait(I_BDEV(file->f_mapping->host)))
Jens Axboe4503b762020-06-01 10:00:27 -06002876 return true;
2877 return false;
2878 }
Pavel Begunkov976517f2021-06-09 12:07:25 +01002879 if (S_ISSOCK(mode))
Jens Axboe2b188cc2019-01-07 10:46:33 -07002880 return true;
Jens Axboe4503b762020-06-01 10:00:27 -06002881 if (S_ISREG(mode)) {
Christoph Hellwig4e7b5672020-11-23 13:38:40 +01002882 if (IS_ENABLED(CONFIG_BLOCK) &&
2883 io_bdev_nowait(file->f_inode->i_sb->s_bdev) &&
Jens Axboe4503b762020-06-01 10:00:27 -06002884 file->f_op != &io_uring_fops)
2885 return true;
2886 return false;
2887 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07002888
Jens Axboec5b85622020-06-09 19:23:05 -06002889 /* any ->read/write should understand O_NONBLOCK */
2890 if (file->f_flags & O_NONBLOCK)
2891 return true;
2892
Jens Axboeaf197f52020-04-28 13:15:06 -06002893 if (!(file->f_mode & FMODE_NOWAIT))
2894 return false;
2895
2896 if (rw == READ)
2897 return file->f_op->read_iter != NULL;
2898
2899 return file->f_op->write_iter != NULL;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002900}
2901
Pavel Begunkovb191e2d2021-08-09 13:04:03 +01002902static bool io_file_supports_nowait(struct io_kiocb *req, int rw)
Jens Axboe7b29f922021-03-12 08:30:14 -07002903{
Pavel Begunkovb191e2d2021-08-09 13:04:03 +01002904 if (rw == READ && (req->flags & REQ_F_NOWAIT_READ))
Jens Axboe7b29f922021-03-12 08:30:14 -07002905 return true;
Pavel Begunkovb191e2d2021-08-09 13:04:03 +01002906 else if (rw == WRITE && (req->flags & REQ_F_NOWAIT_WRITE))
Jens Axboe7b29f922021-03-12 08:30:14 -07002907 return true;
2908
Pavel Begunkovb191e2d2021-08-09 13:04:03 +01002909 return __io_file_supports_nowait(req->file, rw);
Jens Axboe7b29f922021-03-12 08:30:14 -07002910}
2911
Jens Axboe5d329e12021-09-14 11:08:37 -06002912static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe,
2913 int rw)
Jens Axboe2b188cc2019-01-07 10:46:33 -07002914{
Jens Axboedef596e2019-01-09 08:59:42 -07002915 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe9adbd452019-12-20 08:45:55 -07002916 struct kiocb *kiocb = &req->rw.kiocb;
Pavel Begunkov75c668c2021-02-04 13:52:05 +00002917 struct file *file = req->file;
Jens Axboe09bb8392019-03-13 12:39:28 -06002918 unsigned ioprio;
2919 int ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002920
Pavel Begunkovc97d8a02021-08-09 13:04:04 +01002921 if (!io_req_ffs_set(req) && S_ISREG(file_inode(file)->i_mode))
Jens Axboe491381ce2019-10-17 09:20:46 -06002922 req->flags |= REQ_F_ISREG;
2923
Jens Axboe2b188cc2019-01-07 10:46:33 -07002924 kiocb->ki_pos = READ_ONCE(sqe->off);
Jens Axboe20fb0dc2021-12-22 20:26:56 -07002925 if (kiocb->ki_pos == -1) {
2926 if (!(file->f_mode & FMODE_STREAM)) {
2927 req->flags |= REQ_F_CUR_POS;
2928 kiocb->ki_pos = file->f_pos;
2929 } else {
2930 kiocb->ki_pos = 0;
2931 }
Jens Axboeba042912019-12-25 16:33:42 -07002932 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07002933 kiocb->ki_hint = ki_hint_validate(file_write_hint(kiocb->ki_filp));
Pavel Begunkov3e577dc2020-02-01 03:58:42 +03002934 kiocb->ki_flags = iocb_flags(kiocb->ki_filp);
2935 ret = kiocb_set_rw_flags(kiocb, READ_ONCE(sqe->rw_flags));
2936 if (unlikely(ret))
2937 return ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002938
Jens Axboe5d329e12021-09-14 11:08:37 -06002939 /*
2940 * If the file is marked O_NONBLOCK, still allow retry for it if it
2941 * supports async. Otherwise it's impossible to use O_NONBLOCK files
2942 * reliably. If not, or it IOCB_NOWAIT is set, don't retry.
2943 */
2944 if ((kiocb->ki_flags & IOCB_NOWAIT) ||
2945 ((file->f_flags & O_NONBLOCK) && !io_file_supports_nowait(req, rw)))
Pavel Begunkov75c668c2021-02-04 13:52:05 +00002946 req->flags |= REQ_F_NOWAIT;
2947
Jens Axboe2b188cc2019-01-07 10:46:33 -07002948 ioprio = READ_ONCE(sqe->ioprio);
2949 if (ioprio) {
2950 ret = ioprio_check_cap(ioprio);
2951 if (ret)
Jens Axboe09bb8392019-03-13 12:39:28 -06002952 return ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002953
2954 kiocb->ki_ioprio = ioprio;
2955 } else
2956 kiocb->ki_ioprio = get_current_ioprio();
2957
Jens Axboedef596e2019-01-09 08:59:42 -07002958 if (ctx->flags & IORING_SETUP_IOPOLL) {
Jens Axboedef596e2019-01-09 08:59:42 -07002959 if (!(kiocb->ki_flags & IOCB_DIRECT) ||
2960 !kiocb->ki_filp->f_op->iopoll)
Jens Axboe09bb8392019-03-13 12:39:28 -06002961 return -EOPNOTSUPP;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002962
Jens Axboe394918e2021-03-08 11:40:23 -07002963 kiocb->ki_flags |= IOCB_HIPRI | IOCB_ALLOC_CACHE;
Jens Axboedef596e2019-01-09 08:59:42 -07002964 kiocb->ki_complete = io_complete_rw_iopoll;
Xiaoguang Wang65a65432020-06-11 23:39:36 +08002965 req->iopoll_completed = 0;
Jens Axboedef596e2019-01-09 08:59:42 -07002966 } else {
Jens Axboe09bb8392019-03-13 12:39:28 -06002967 if (kiocb->ki_flags & IOCB_HIPRI)
2968 return -EINVAL;
Jens Axboedef596e2019-01-09 08:59:42 -07002969 kiocb->ki_complete = io_complete_rw;
2970 }
Jens Axboe9adbd452019-12-20 08:45:55 -07002971
Pavel Begunkovea512d52022-06-09 08:34:35 +01002972 /* used for fixed read/write too - just read unconditionally */
2973 req->buf_index = READ_ONCE(sqe->buf_index);
2974 req->imu = NULL;
2975
Pavel Begunkoveae071c2021-04-25 14:32:24 +01002976 if (req->opcode == IORING_OP_READ_FIXED ||
2977 req->opcode == IORING_OP_WRITE_FIXED) {
Pavel Begunkovea512d52022-06-09 08:34:35 +01002978 struct io_ring_ctx *ctx = req->ctx;
2979 u16 index;
2980
2981 if (unlikely(req->buf_index >= ctx->nr_user_bufs))
2982 return -EFAULT;
2983 index = array_index_nospec(req->buf_index, ctx->nr_user_bufs);
2984 req->imu = ctx->user_bufs[index];
Pavel Begunkoveae071c2021-04-25 14:32:24 +01002985 io_req_set_rsrc_node(req);
2986 }
2987
Jens Axboe3529d8c2019-12-19 18:24:38 -07002988 req->rw.addr = READ_ONCE(sqe->addr);
2989 req->rw.len = READ_ONCE(sqe->len);
Jens Axboe2b188cc2019-01-07 10:46:33 -07002990 return 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002991}
2992
2993static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
2994{
2995 switch (ret) {
2996 case -EIOCBQUEUED:
2997 break;
2998 case -ERESTARTSYS:
2999 case -ERESTARTNOINTR:
3000 case -ERESTARTNOHAND:
3001 case -ERESTART_RESTARTBLOCK:
3002 /*
3003 * We can't just restart the syscall, since previously
3004 * submitted sqes may already be in progress. Just fail this
3005 * IO with EINTR.
3006 */
3007 ret = -EINTR;
Gustavo A. R. Silvadf561f662020-08-23 17:36:59 -05003008 fallthrough;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003009 default:
3010 kiocb->ki_complete(kiocb, ret, 0);
3011 }
3012}
3013
Jens Axboea1d7c392020-06-22 11:09:46 -06003014static void kiocb_done(struct kiocb *kiocb, ssize_t ret,
Pavel Begunkov889fca72021-02-10 00:03:09 +00003015 unsigned int issue_flags)
Jens Axboeba816ad2019-09-28 11:36:45 -06003016{
Jens Axboeba042912019-12-25 16:33:42 -07003017 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
Jens Axboe227c0c92020-08-13 11:51:40 -06003018
Jens Axboeba042912019-12-25 16:33:42 -07003019 if (req->flags & REQ_F_CUR_POS)
3020 req->file->f_pos = kiocb->ki_pos;
Pavel Begunkovb66ceaf2021-09-15 11:00:05 +01003021 if (ret >= 0 && (kiocb->ki_complete == io_complete_rw))
Pavel Begunkov889fca72021-02-10 00:03:09 +00003022 __io_complete_rw(req, ret, 0, issue_flags);
Jens Axboeba816ad2019-09-28 11:36:45 -06003023 else
3024 io_rw_done(kiocb, ret);
Pavel Begunkov97284632021-04-08 19:28:03 +01003025
Pavel Begunkovb66ceaf2021-09-15 11:00:05 +01003026 if (req->flags & REQ_F_REISSUE) {
Pavel Begunkov97284632021-04-08 19:28:03 +01003027 req->flags &= ~REQ_F_REISSUE;
Jens Axboea7be7c22021-04-15 11:31:14 -06003028 if (io_resubmit_prep(req)) {
Jens Axboe773af692021-07-27 10:25:55 -06003029 io_req_task_queue_reissue(req);
Pavel Begunkov8c130822021-03-22 01:58:32 +00003030 } else {
Pavel Begunkovb66ceaf2021-09-15 11:00:05 +01003031 unsigned int cflags = io_put_rw_kbuf(req);
3032 struct io_ring_ctx *ctx = req->ctx;
3033
Pavel Begunkove8574572022-10-16 22:42:56 +01003034 ret = io_fixup_rw_res(req, ret);
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01003035 req_set_fail(req);
Hao Xu14cfbb72021-10-14 22:04:00 +08003036 if (!(issue_flags & IO_URING_F_NONBLOCK)) {
Pavel Begunkovb66ceaf2021-09-15 11:00:05 +01003037 mutex_lock(&ctx->uring_lock);
3038 __io_req_complete(req, issue_flags, ret, cflags);
3039 mutex_unlock(&ctx->uring_lock);
3040 } else {
3041 __io_req_complete(req, issue_flags, ret, cflags);
3042 }
Pavel Begunkov97284632021-04-08 19:28:03 +01003043 }
3044 }
Jens Axboeba816ad2019-09-28 11:36:45 -06003045}
3046
Pavel Begunkoveae071c2021-04-25 14:32:24 +01003047static int __io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter,
3048 struct io_mapped_ubuf *imu)
Jens Axboeedafcce2019-01-09 09:16:05 -07003049{
Jens Axboe9adbd452019-12-20 08:45:55 -07003050 size_t len = req->rw.len;
Pavel Begunkov75769e32021-04-01 15:43:54 +01003051 u64 buf_end, buf_addr = req->rw.addr;
Jens Axboeedafcce2019-01-09 09:16:05 -07003052 size_t offset;
Jens Axboeedafcce2019-01-09 09:16:05 -07003053
Pavel Begunkov75769e32021-04-01 15:43:54 +01003054 if (unlikely(check_add_overflow(buf_addr, (u64)len, &buf_end)))
Jens Axboeedafcce2019-01-09 09:16:05 -07003055 return -EFAULT;
3056 /* not inside the mapped region */
Pavel Begunkov4751f532021-04-01 15:43:55 +01003057 if (unlikely(buf_addr < imu->ubuf || buf_end > imu->ubuf_end))
Jens Axboeedafcce2019-01-09 09:16:05 -07003058 return -EFAULT;
3059
3060 /*
3061 * May not be a start of buffer, set size appropriately
3062 * and advance us to the beginning.
3063 */
3064 offset = buf_addr - imu->ubuf;
3065 iov_iter_bvec(iter, rw, imu->bvec, imu->nr_bvecs, offset + len);
Jens Axboebd11b3a2019-07-20 08:37:31 -06003066
3067 if (offset) {
3068 /*
3069 * Don't use iov_iter_advance() here, as it's really slow for
3070 * using the latter parts of a big fixed buffer - it iterates
3071 * over each segment manually. We can cheat a bit here, because
3072 * we know that:
3073 *
3074 * 1) it's a BVEC iter, we set it up
3075 * 2) all bvecs are PAGE_SIZE in size, except potentially the
3076 * first and last bvec
3077 *
3078 * So just find our index, and adjust the iterator afterwards.
3079 * If the offset is within the first bvec (or the whole first
3080 * bvec, just use iov_iter_advance(). This makes it easier
3081 * since we can just skip the first segment, which may not
3082 * be PAGE_SIZE aligned.
3083 */
3084 const struct bio_vec *bvec = imu->bvec;
3085
3086 if (offset <= bvec->bv_len) {
3087 iov_iter_advance(iter, offset);
3088 } else {
3089 unsigned long seg_skip;
3090
3091 /* skip first vec */
3092 offset -= bvec->bv_len;
3093 seg_skip = 1 + (offset >> PAGE_SHIFT);
3094
3095 iter->bvec = bvec + seg_skip;
3096 iter->nr_segs -= seg_skip;
Aleix Roca Nonell99c79f62019-08-15 14:03:22 +02003097 iter->count -= bvec->bv_len + offset;
Jens Axboebd11b3a2019-07-20 08:37:31 -06003098 iter->iov_offset = offset & ~PAGE_MASK;
Jens Axboebd11b3a2019-07-20 08:37:31 -06003099 }
3100 }
3101
Pavel Begunkov847595d2021-02-04 13:52:06 +00003102 return 0;
Jens Axboeedafcce2019-01-09 09:16:05 -07003103}
3104
Pavel Begunkoveae071c2021-04-25 14:32:24 +01003105static int io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter)
3106{
Pavel Begunkovea512d52022-06-09 08:34:35 +01003107 if (WARN_ON_ONCE(!req->imu))
3108 return -EFAULT;
3109 return __io_import_fixed(req, rw, iter, req->imu);
Pavel Begunkoveae071c2021-04-25 14:32:24 +01003110}
3111
Jens Axboebcda7ba2020-02-23 16:42:51 -07003112static void io_ring_submit_unlock(struct io_ring_ctx *ctx, bool needs_lock)
3113{
3114 if (needs_lock)
3115 mutex_unlock(&ctx->uring_lock);
3116}
3117
3118static void io_ring_submit_lock(struct io_ring_ctx *ctx, bool needs_lock)
3119{
3120 /*
3121 * "Normal" inline submissions always hold the uring_lock, since we
3122 * grab it from the system call. Same is true for the SQPOLL offload.
3123 * The only exception is when we've detached the request and issue it
3124 * from an async worker thread, grab the lock for that case.
3125 */
3126 if (needs_lock)
3127 mutex_lock(&ctx->uring_lock);
3128}
3129
3130static struct io_buffer *io_buffer_select(struct io_kiocb *req, size_t *len,
3131 int bgid, struct io_buffer *kbuf,
3132 bool needs_lock)
3133{
3134 struct io_buffer *head;
3135
3136 if (req->flags & REQ_F_BUFFER_SELECTED)
3137 return kbuf;
3138
3139 io_ring_submit_lock(req->ctx, needs_lock);
3140
3141 lockdep_assert_held(&req->ctx->uring_lock);
3142
Jens Axboe9e15c3a2021-03-13 12:29:43 -07003143 head = xa_load(&req->ctx->io_buffers, bgid);
Jens Axboebcda7ba2020-02-23 16:42:51 -07003144 if (head) {
3145 if (!list_empty(&head->list)) {
3146 kbuf = list_last_entry(&head->list, struct io_buffer,
3147 list);
3148 list_del(&kbuf->list);
3149 } else {
3150 kbuf = head;
Jens Axboe9e15c3a2021-03-13 12:29:43 -07003151 xa_erase(&req->ctx->io_buffers, bgid);
Jens Axboebcda7ba2020-02-23 16:42:51 -07003152 }
3153 if (*len > kbuf->len)
3154 *len = kbuf->len;
3155 } else {
3156 kbuf = ERR_PTR(-ENOBUFS);
3157 }
3158
3159 io_ring_submit_unlock(req->ctx, needs_lock);
3160
3161 return kbuf;
3162}
3163
Jens Axboe4d954c22020-02-27 07:31:19 -07003164static void __user *io_rw_buffer_select(struct io_kiocb *req, size_t *len,
3165 bool needs_lock)
3166{
3167 struct io_buffer *kbuf;
Bijan Mottahedeh4f4eeba2020-05-19 14:52:49 -07003168 u16 bgid;
Jens Axboe4d954c22020-02-27 07:31:19 -07003169
3170 kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
Bijan Mottahedeh4f4eeba2020-05-19 14:52:49 -07003171 bgid = req->buf_index;
Jens Axboe4d954c22020-02-27 07:31:19 -07003172 kbuf = io_buffer_select(req, len, bgid, kbuf, needs_lock);
3173 if (IS_ERR(kbuf))
3174 return kbuf;
3175 req->rw.addr = (u64) (unsigned long) kbuf;
3176 req->flags |= REQ_F_BUFFER_SELECTED;
3177 return u64_to_user_ptr(kbuf->addr);
3178}
3179
3180#ifdef CONFIG_COMPAT
3181static ssize_t io_compat_import(struct io_kiocb *req, struct iovec *iov,
3182 bool needs_lock)
3183{
3184 struct compat_iovec __user *uiov;
3185 compat_ssize_t clen;
3186 void __user *buf;
3187 ssize_t len;
3188
3189 uiov = u64_to_user_ptr(req->rw.addr);
3190 if (!access_ok(uiov, sizeof(*uiov)))
3191 return -EFAULT;
3192 if (__get_user(clen, &uiov->iov_len))
3193 return -EFAULT;
3194 if (clen < 0)
3195 return -EINVAL;
3196
3197 len = clen;
3198 buf = io_rw_buffer_select(req, &len, needs_lock);
3199 if (IS_ERR(buf))
3200 return PTR_ERR(buf);
3201 iov[0].iov_base = buf;
3202 iov[0].iov_len = (compat_size_t) len;
3203 return 0;
3204}
3205#endif
3206
3207static ssize_t __io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
3208 bool needs_lock)
3209{
3210 struct iovec __user *uiov = u64_to_user_ptr(req->rw.addr);
3211 void __user *buf;
3212 ssize_t len;
3213
3214 if (copy_from_user(iov, uiov, sizeof(*uiov)))
3215 return -EFAULT;
3216
3217 len = iov[0].iov_len;
3218 if (len < 0)
3219 return -EINVAL;
3220 buf = io_rw_buffer_select(req, &len, needs_lock);
3221 if (IS_ERR(buf))
3222 return PTR_ERR(buf);
3223 iov[0].iov_base = buf;
3224 iov[0].iov_len = len;
3225 return 0;
3226}
3227
3228static ssize_t io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
3229 bool needs_lock)
3230{
Jens Axboedddb3e22020-06-04 11:27:01 -06003231 if (req->flags & REQ_F_BUFFER_SELECTED) {
3232 struct io_buffer *kbuf;
3233
3234 kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
3235 iov[0].iov_base = u64_to_user_ptr(kbuf->addr);
3236 iov[0].iov_len = kbuf->len;
Jens Axboe4d954c22020-02-27 07:31:19 -07003237 return 0;
Jens Axboedddb3e22020-06-04 11:27:01 -06003238 }
Pavel Begunkovdd201662020-12-19 03:15:43 +00003239 if (req->rw.len != 1)
Jens Axboe4d954c22020-02-27 07:31:19 -07003240 return -EINVAL;
3241
3242#ifdef CONFIG_COMPAT
3243 if (req->ctx->compat)
3244 return io_compat_import(req, iov, needs_lock);
3245#endif
3246
3247 return __io_iov_buffer_select(req, iov, needs_lock);
3248}
3249
Pavel Begunkov847595d2021-02-04 13:52:06 +00003250static int io_import_iovec(int rw, struct io_kiocb *req, struct iovec **iovec,
3251 struct iov_iter *iter, bool needs_lock)
Jens Axboe2b188cc2019-01-07 10:46:33 -07003252{
Jens Axboe9adbd452019-12-20 08:45:55 -07003253 void __user *buf = u64_to_user_ptr(req->rw.addr);
3254 size_t sqe_len = req->rw.len;
Pavel Begunkov847595d2021-02-04 13:52:06 +00003255 u8 opcode = req->opcode;
Jens Axboe4d954c22020-02-27 07:31:19 -07003256 ssize_t ret;
Jens Axboeedafcce2019-01-09 09:16:05 -07003257
Pavel Begunkov7d009162019-11-25 23:14:40 +03003258 if (opcode == IORING_OP_READ_FIXED || opcode == IORING_OP_WRITE_FIXED) {
Jens Axboeedafcce2019-01-09 09:16:05 -07003259 *iovec = NULL;
Jens Axboe9adbd452019-12-20 08:45:55 -07003260 return io_import_fixed(req, rw, iter);
Jens Axboeedafcce2019-01-09 09:16:05 -07003261 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07003262
Jens Axboebcda7ba2020-02-23 16:42:51 -07003263 /* buffer index only valid with fixed read/write, or buffer select */
Bijan Mottahedeh4f4eeba2020-05-19 14:52:49 -07003264 if (req->buf_index && !(req->flags & REQ_F_BUFFER_SELECT))
Jens Axboe9adbd452019-12-20 08:45:55 -07003265 return -EINVAL;
3266
Jens Axboe3a6820f2019-12-22 15:19:35 -07003267 if (opcode == IORING_OP_READ || opcode == IORING_OP_WRITE) {
Jens Axboebcda7ba2020-02-23 16:42:51 -07003268 if (req->flags & REQ_F_BUFFER_SELECT) {
Jens Axboe4d954c22020-02-27 07:31:19 -07003269 buf = io_rw_buffer_select(req, &sqe_len, needs_lock);
Pavel Begunkov867a23e2020-08-20 11:34:39 +03003270 if (IS_ERR(buf))
Jens Axboe4d954c22020-02-27 07:31:19 -07003271 return PTR_ERR(buf);
Jens Axboe3f9d6442020-03-11 12:27:04 -06003272 req->rw.len = sqe_len;
Jens Axboebcda7ba2020-02-23 16:42:51 -07003273 }
3274
Jens Axboe3a6820f2019-12-22 15:19:35 -07003275 ret = import_single_range(rw, buf, sqe_len, *iovec, iter);
3276 *iovec = NULL;
David Laight10fc72e2020-11-07 13:16:25 +00003277 return ret;
Jens Axboe3a6820f2019-12-22 15:19:35 -07003278 }
3279
Jens Axboe4d954c22020-02-27 07:31:19 -07003280 if (req->flags & REQ_F_BUFFER_SELECT) {
3281 ret = io_iov_buffer_select(req, *iovec, needs_lock);
Pavel Begunkov847595d2021-02-04 13:52:06 +00003282 if (!ret)
3283 iov_iter_init(iter, rw, *iovec, 1, (*iovec)->iov_len);
Jens Axboe4d954c22020-02-27 07:31:19 -07003284 *iovec = NULL;
3285 return ret;
3286 }
3287
Christoph Hellwig89cd35c2020-09-25 06:51:41 +02003288 return __import_iovec(rw, buf, sqe_len, UIO_FASTIOV, iovec, iter,
3289 req->ctx->compat);
Jens Axboe2b188cc2019-01-07 10:46:33 -07003290}
3291
Jens Axboe0fef9482020-08-26 10:36:20 -06003292static inline loff_t *io_kiocb_ppos(struct kiocb *kiocb)
3293{
Pavel Begunkov5b09e372020-09-30 22:57:15 +03003294 return (kiocb->ki_filp->f_mode & FMODE_STREAM) ? NULL : &kiocb->ki_pos;
Jens Axboe0fef9482020-08-26 10:36:20 -06003295}
3296
Jens Axboe32960612019-09-23 11:05:34 -06003297/*
3298 * For files that don't have ->read_iter() and ->write_iter(), handle them
3299 * by looping over ->read() or ->write() manually.
3300 */
Jens Axboe4017eb92020-10-22 14:14:12 -06003301static ssize_t loop_rw_iter(int rw, struct io_kiocb *req, struct iov_iter *iter)
Jens Axboe32960612019-09-23 11:05:34 -06003302{
Jens Axboe4017eb92020-10-22 14:14:12 -06003303 struct kiocb *kiocb = &req->rw.kiocb;
3304 struct file *file = req->file;
Jens Axboe32960612019-09-23 11:05:34 -06003305 ssize_t ret = 0;
3306
3307 /*
3308 * Don't support polled IO through this interface, and we can't
3309 * support non-blocking either. For the latter, this just causes
3310 * the kiocb to be handled from an async context.
3311 */
3312 if (kiocb->ki_flags & IOCB_HIPRI)
3313 return -EOPNOTSUPP;
3314 if (kiocb->ki_flags & IOCB_NOWAIT)
3315 return -EAGAIN;
3316
3317 while (iov_iter_count(iter)) {
Pavel Begunkov311ae9e2019-11-24 11:58:24 +03003318 struct iovec iovec;
Jens Axboe32960612019-09-23 11:05:34 -06003319 ssize_t nr;
3320
Pavel Begunkov311ae9e2019-11-24 11:58:24 +03003321 if (!iov_iter_is_bvec(iter)) {
3322 iovec = iov_iter_iovec(iter);
3323 } else {
Jens Axboe4017eb92020-10-22 14:14:12 -06003324 iovec.iov_base = u64_to_user_ptr(req->rw.addr);
3325 iovec.iov_len = req->rw.len;
Pavel Begunkov311ae9e2019-11-24 11:58:24 +03003326 }
3327
Jens Axboe32960612019-09-23 11:05:34 -06003328 if (rw == READ) {
3329 nr = file->f_op->read(file, iovec.iov_base,
Jens Axboe0fef9482020-08-26 10:36:20 -06003330 iovec.iov_len, io_kiocb_ppos(kiocb));
Jens Axboe32960612019-09-23 11:05:34 -06003331 } else {
3332 nr = file->f_op->write(file, iovec.iov_base,
Jens Axboe0fef9482020-08-26 10:36:20 -06003333 iovec.iov_len, io_kiocb_ppos(kiocb));
Jens Axboe32960612019-09-23 11:05:34 -06003334 }
3335
3336 if (nr < 0) {
3337 if (!ret)
3338 ret = nr;
3339 break;
3340 }
Jens Axboe109dda42022-03-18 11:28:13 -06003341 ret += nr;
Jens Axboe16c8d2d2021-09-12 06:45:07 -06003342 if (!iov_iter_is_bvec(iter)) {
3343 iov_iter_advance(iter, nr);
3344 } else {
Jens Axboe16c8d2d2021-09-12 06:45:07 -06003345 req->rw.addr += nr;
Jens Axboe109dda42022-03-18 11:28:13 -06003346 req->rw.len -= nr;
3347 if (!req->rw.len)
3348 break;
Jens Axboe16c8d2d2021-09-12 06:45:07 -06003349 }
Jens Axboe32960612019-09-23 11:05:34 -06003350 if (nr != iovec.iov_len)
3351 break;
Jens Axboe32960612019-09-23 11:05:34 -06003352 }
3353
3354 return ret;
3355}
3356
Jens Axboeff6165b2020-08-13 09:47:43 -06003357static void io_req_map_rw(struct io_kiocb *req, const struct iovec *iovec,
3358 const struct iovec *fast_iov, struct iov_iter *iter)
Jens Axboef67676d2019-12-02 11:03:47 -07003359{
Jens Axboee8c2bc12020-08-15 18:44:09 -07003360 struct io_async_rw *rw = req->async_data;
Pavel Begunkovb64e3442020-07-13 22:59:18 +03003361
Jens Axboeff6165b2020-08-13 09:47:43 -06003362 memcpy(&rw->iter, iter, sizeof(*iter));
Pavel Begunkovafb87652020-09-06 00:45:46 +03003363 rw->free_iovec = iovec;
Jens Axboe227c0c92020-08-13 11:51:40 -06003364 rw->bytes_done = 0;
Jens Axboeff6165b2020-08-13 09:47:43 -06003365 /* can only be fixed buffers, no need to do anything */
Pavel Begunkov9c3a2052020-11-23 23:20:27 +00003366 if (iov_iter_is_bvec(iter))
Jens Axboeff6165b2020-08-13 09:47:43 -06003367 return;
Pavel Begunkovb64e3442020-07-13 22:59:18 +03003368 if (!iovec) {
Jens Axboeff6165b2020-08-13 09:47:43 -06003369 unsigned iov_off = 0;
3370
3371 rw->iter.iov = rw->fast_iov;
3372 if (iter->iov != fast_iov) {
3373 iov_off = iter->iov - fast_iov;
3374 rw->iter.iov += iov_off;
3375 }
3376 if (rw->fast_iov != fast_iov)
3377 memcpy(rw->fast_iov + iov_off, fast_iov + iov_off,
Xiaoguang Wang45097da2020-04-08 22:29:58 +08003378 sizeof(struct iovec) * iter->nr_segs);
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03003379 } else {
3380 req->flags |= REQ_F_NEED_CLEANUP;
Jens Axboef67676d2019-12-02 11:03:47 -07003381 }
3382}
3383
Pavel Begunkov6cb78682021-02-28 22:35:17 +00003384static inline int io_alloc_async_data(struct io_kiocb *req)
Xiaoguang Wang3d9932a2020-03-27 15:36:52 +08003385{
Jens Axboee8c2bc12020-08-15 18:44:09 -07003386 WARN_ON_ONCE(!io_op_defs[req->opcode].async_size);
3387 req->async_data = kmalloc(io_op_defs[req->opcode].async_size, GFP_KERNEL);
3388 return req->async_data == NULL;
Xiaoguang Wang3d9932a2020-03-27 15:36:52 +08003389}
3390
Jens Axboeff6165b2020-08-13 09:47:43 -06003391static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec,
3392 const struct iovec *fast_iov,
Jens Axboe227c0c92020-08-13 11:51:40 -06003393 struct iov_iter *iter, bool force)
Jens Axboeb7bb4f72019-12-15 22:13:43 -07003394{
Pavel Begunkov26f05052021-02-28 22:35:18 +00003395 if (!force && !io_op_defs[req->opcode].needs_async_setup)
Jens Axboe74566df2020-01-13 19:23:24 -07003396 return 0;
Jens Axboee8c2bc12020-08-15 18:44:09 -07003397 if (!req->async_data) {
Jens Axboecd658692021-09-10 11:19:14 -06003398 struct io_async_rw *iorw;
3399
Pavel Begunkov6cb78682021-02-28 22:35:17 +00003400 if (io_alloc_async_data(req)) {
Pavel Begunkov6bf985d2021-02-04 13:52:01 +00003401 kfree(iovec);
Jens Axboe5d204bc2020-01-31 12:06:52 -07003402 return -ENOMEM;
Pavel Begunkov6bf985d2021-02-04 13:52:01 +00003403 }
Jens Axboeb7bb4f72019-12-15 22:13:43 -07003404
Jens Axboeff6165b2020-08-13 09:47:43 -06003405 io_req_map_rw(req, iovec, fast_iov, iter);
Jens Axboecd658692021-09-10 11:19:14 -06003406 iorw = req->async_data;
3407 /* we've copied and mapped the iter, ensure state is saved */
3408 iov_iter_save_state(&iorw->iter, &iorw->iter_state);
Jens Axboe5d204bc2020-01-31 12:06:52 -07003409 }
Jens Axboeb7bb4f72019-12-15 22:13:43 -07003410 return 0;
Jens Axboef67676d2019-12-02 11:03:47 -07003411}
3412
Pavel Begunkov73debe62020-09-30 22:57:54 +03003413static inline int io_rw_prep_async(struct io_kiocb *req, int rw)
Pavel Begunkovc3e330a2020-07-13 22:59:19 +03003414{
Jens Axboee8c2bc12020-08-15 18:44:09 -07003415 struct io_async_rw *iorw = req->async_data;
Pavel Begunkovf4bff102020-09-06 00:45:45 +03003416 struct iovec *iov = iorw->fast_iov;
Pavel Begunkov847595d2021-02-04 13:52:06 +00003417 int ret;
Pavel Begunkovc3e330a2020-07-13 22:59:19 +03003418
Pavel Begunkov2846c482020-11-07 13:16:27 +00003419 ret = io_import_iovec(rw, req, &iov, &iorw->iter, false);
Pavel Begunkovc3e330a2020-07-13 22:59:19 +03003420 if (unlikely(ret < 0))
3421 return ret;
3422
Pavel Begunkovab0b1962020-09-06 00:45:47 +03003423 iorw->bytes_done = 0;
3424 iorw->free_iovec = iov;
3425 if (iov)
3426 req->flags |= REQ_F_NEED_CLEANUP;
Jens Axboecd658692021-09-10 11:19:14 -06003427 iov_iter_save_state(&iorw->iter, &iorw->iter_state);
Pavel Begunkovc3e330a2020-07-13 22:59:19 +03003428 return 0;
3429}
3430
Pavel Begunkov73debe62020-09-30 22:57:54 +03003431static int io_read_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboef67676d2019-12-02 11:03:47 -07003432{
Jens Axboe3529d8c2019-12-19 18:24:38 -07003433 if (unlikely(!(req->file->f_mode & FMODE_READ)))
3434 return -EBADF;
Jens Axboe5d329e12021-09-14 11:08:37 -06003435 return io_prep_rw(req, sqe, READ);
Jens Axboef67676d2019-12-02 11:03:47 -07003436}
3437
Jens Axboec1dd91d2020-08-03 16:43:59 -06003438/*
3439 * This is our waitqueue callback handler, registered through lock_page_async()
3440 * when we initially tried to do the IO with the iocb armed our waitqueue.
3441 * This gets called when the page is unlocked, and we generally expect that to
3442 * happen when the page IO is completed and the page is now uptodate. This will
3443 * queue a task_work based retry of the operation, attempting to copy the data
3444 * again. If the latter fails because the page was NOT uptodate, then we will
3445 * do a thread based blocking retry of the operation. That's the unexpected
3446 * slow path.
3447 */
Jens Axboebcf5a062020-05-22 09:24:42 -06003448static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode,
3449 int sync, void *arg)
3450{
3451 struct wait_page_queue *wpq;
3452 struct io_kiocb *req = wait->private;
Jens Axboebcf5a062020-05-22 09:24:42 -06003453 struct wait_page_key *key = arg;
Jens Axboebcf5a062020-05-22 09:24:42 -06003454
3455 wpq = container_of(wait, struct wait_page_queue, wait);
3456
Linus Torvaldscdc8fcb2020-08-03 13:01:22 -07003457 if (!wake_page_match(wpq, key))
3458 return 0;
3459
Hao Xuc8d317a2020-09-29 20:00:45 +08003460 req->rw.kiocb.ki_flags &= ~IOCB_WAITQ;
Jens Axboebcf5a062020-05-22 09:24:42 -06003461 list_del_init(&wait->entry);
Pavel Begunkov921b9052021-02-12 03:23:53 +00003462 io_req_task_queue(req);
Jens Axboebcf5a062020-05-22 09:24:42 -06003463 return 1;
3464}
3465
Jens Axboec1dd91d2020-08-03 16:43:59 -06003466/*
3467 * This controls whether a given IO request should be armed for async page
3468 * based retry. If we return false here, the request is handed to the async
3469 * worker threads for retry. If we're doing buffered reads on a regular file,
3470 * we prepare a private wait_page_queue entry and retry the operation. This
3471 * will either succeed because the page is now uptodate and unlocked, or it
3472 * will register a callback when the page is unlocked at IO completion. Through
3473 * that callback, io_uring uses task_work to setup a retry of the operation.
3474 * That retry will attempt the buffered read again. The retry will generally
3475 * succeed, or in rare cases where it fails, we then fall back to using the
3476 * async worker threads for a blocking retry.
3477 */
Jens Axboe227c0c92020-08-13 11:51:40 -06003478static bool io_rw_should_retry(struct io_kiocb *req)
Jens Axboebcf5a062020-05-22 09:24:42 -06003479{
Jens Axboee8c2bc12020-08-15 18:44:09 -07003480 struct io_async_rw *rw = req->async_data;
3481 struct wait_page_queue *wait = &rw->wpq;
Jens Axboebcf5a062020-05-22 09:24:42 -06003482 struct kiocb *kiocb = &req->rw.kiocb;
Jens Axboebcf5a062020-05-22 09:24:42 -06003483
3484 /* never retry for NOWAIT, we just complete with -EAGAIN */
3485 if (req->flags & REQ_F_NOWAIT)
3486 return false;
3487
Jens Axboe227c0c92020-08-13 11:51:40 -06003488 /* Only for buffered IO */
Jens Axboe3b2a4432020-08-16 10:58:43 -07003489 if (kiocb->ki_flags & (IOCB_DIRECT | IOCB_HIPRI))
Jens Axboebcf5a062020-05-22 09:24:42 -06003490 return false;
Jens Axboe3b2a4432020-08-16 10:58:43 -07003491
Jens Axboebcf5a062020-05-22 09:24:42 -06003492 /*
3493 * just use poll if we can, and don't attempt if the fs doesn't
3494 * support callback based unlocks
3495 */
3496 if (file_can_poll(req->file) || !(req->file->f_mode & FMODE_BUF_RASYNC))
3497 return false;
3498
Jens Axboe3b2a4432020-08-16 10:58:43 -07003499 wait->wait.func = io_async_buf_func;
3500 wait->wait.private = req;
3501 wait->wait.flags = 0;
3502 INIT_LIST_HEAD(&wait->wait.entry);
3503 kiocb->ki_flags |= IOCB_WAITQ;
Hao Xuc8d317a2020-09-29 20:00:45 +08003504 kiocb->ki_flags &= ~IOCB_NOWAIT;
Jens Axboe3b2a4432020-08-16 10:58:43 -07003505 kiocb->ki_waitq = wait;
Jens Axboe3b2a4432020-08-16 10:58:43 -07003506 return true;
Jens Axboebcf5a062020-05-22 09:24:42 -06003507}
3508
Pavel Begunkovaeab9502021-06-14 02:36:24 +01003509static inline int io_iter_do_read(struct io_kiocb *req, struct iov_iter *iter)
Jens Axboebcf5a062020-05-22 09:24:42 -06003510{
3511 if (req->file->f_op->read_iter)
3512 return call_read_iter(req->file, &req->rw.kiocb, iter);
Guoyu Huang2dd21112020-08-05 03:53:50 -07003513 else if (req->file->f_op->read)
Jens Axboe4017eb92020-10-22 14:14:12 -06003514 return loop_rw_iter(READ, req, iter);
Guoyu Huang2dd21112020-08-05 03:53:50 -07003515 else
3516 return -EINVAL;
Jens Axboebcf5a062020-05-22 09:24:42 -06003517}
3518
Ming Lei7db30432021-08-21 23:07:51 +08003519static bool need_read_all(struct io_kiocb *req)
3520{
3521 return req->flags & REQ_F_ISREG ||
3522 S_ISBLK(file_inode(req->file)->i_mode);
3523}
3524
Pavel Begunkov889fca72021-02-10 00:03:09 +00003525static int io_read(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe2b188cc2019-01-07 10:46:33 -07003526{
3527 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
Jens Axboe9adbd452019-12-20 08:45:55 -07003528 struct kiocb *kiocb = &req->rw.kiocb;
Jens Axboeff6165b2020-08-13 09:47:43 -06003529 struct iov_iter __iter, *iter = &__iter;
Jens Axboee8c2bc12020-08-15 18:44:09 -07003530 struct io_async_rw *rw = req->async_data;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003531 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Jens Axboecd658692021-09-10 11:19:14 -06003532 struct iov_iter_state __state, *state;
3533 ssize_t ret, ret2;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003534
Pavel Begunkov2846c482020-11-07 13:16:27 +00003535 if (rw) {
Jens Axboee8c2bc12020-08-15 18:44:09 -07003536 iter = &rw->iter;
Jens Axboecd658692021-09-10 11:19:14 -06003537 state = &rw->iter_state;
3538 /*
3539 * We come here from an earlier attempt, restore our state to
3540 * match in case it doesn't. It's cheap enough that we don't
3541 * need to make this conditional.
3542 */
3543 iov_iter_restore(iter, state);
Pavel Begunkov2846c482020-11-07 13:16:27 +00003544 iovec = NULL;
3545 } else {
3546 ret = io_import_iovec(READ, req, &iovec, iter, !force_nonblock);
3547 if (ret < 0)
3548 return ret;
Jens Axboecd658692021-09-10 11:19:14 -06003549 state = &__state;
3550 iov_iter_save_state(iter, state);
Pavel Begunkov2846c482020-11-07 13:16:27 +00003551 }
Jens Axboecd658692021-09-10 11:19:14 -06003552 req->result = iov_iter_count(iter);
Jens Axboe2b188cc2019-01-07 10:46:33 -07003553
Jens Axboefd6c2e42019-12-18 12:19:41 -07003554 /* Ensure we clear previously set non-block flag */
3555 if (!force_nonblock)
Jens Axboe29de5f62020-02-20 09:56:08 -07003556 kiocb->ki_flags &= ~IOCB_NOWAIT;
Pavel Begunkova88fc402020-09-30 22:57:53 +03003557 else
3558 kiocb->ki_flags |= IOCB_NOWAIT;
3559
Pavel Begunkov24c74672020-06-21 13:09:51 +03003560 /* If the file doesn't support async, just async punt */
Pavel Begunkovb191e2d2021-08-09 13:04:03 +01003561 if (force_nonblock && !io_file_supports_nowait(req, READ)) {
Pavel Begunkov6713e7a2021-02-04 13:51:59 +00003562 ret = io_setup_async_rw(req, iovec, inline_vecs, iter, true);
Pavel Begunkov6bf985d2021-02-04 13:52:01 +00003563 return ret ?: -EAGAIN;
Pavel Begunkov6713e7a2021-02-04 13:51:59 +00003564 }
Jens Axboe9e645e112019-05-10 16:07:28 -06003565
Jens Axboecd658692021-09-10 11:19:14 -06003566 ret = rw_verify_area(READ, req->file, io_kiocb_ppos(kiocb), req->result);
Pavel Begunkov5ea5dd42021-02-04 13:52:03 +00003567 if (unlikely(ret)) {
3568 kfree(iovec);
3569 return ret;
3570 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07003571
Jens Axboe227c0c92020-08-13 11:51:40 -06003572 ret = io_iter_do_read(req, iter);
Jens Axboe32960612019-09-23 11:05:34 -06003573
Jens Axboe230d50d2021-04-01 20:41:15 -06003574 if (ret == -EAGAIN || (req->flags & REQ_F_REISSUE)) {
Pavel Begunkov6ad7f232021-04-08 01:54:39 +01003575 req->flags &= ~REQ_F_REISSUE;
Jens Axboeeefdf302020-08-27 16:40:19 -06003576 /* IOPOLL retry should happen for io-wq threads */
3577 if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboef91daf52020-08-15 15:58:42 -07003578 goto done;
Pavel Begunkov75c668c2021-02-04 13:52:05 +00003579 /* no retry on NONBLOCK nor RWF_NOWAIT */
3580 if (req->flags & REQ_F_NOWAIT)
Jens Axboe355afae2020-09-02 09:30:31 -06003581 goto done;
Jens Axboef38c7e32020-09-25 15:23:43 -06003582 ret = 0;
Jens Axboe230d50d2021-04-01 20:41:15 -06003583 } else if (ret == -EIOCBQUEUED) {
3584 goto out_free;
Jens Axboecd658692021-09-10 11:19:14 -06003585 } else if (ret <= 0 || ret == req->result || !force_nonblock ||
Ming Lei7db30432021-08-21 23:07:51 +08003586 (req->flags & REQ_F_NOWAIT) || !need_read_all(req)) {
Pavel Begunkov7335e3b2021-02-04 13:52:02 +00003587 /* read all, failed, already did sync or don't want to retry */
Jens Axboe00d23d52020-08-25 12:59:22 -06003588 goto done;
Jens Axboe227c0c92020-08-13 11:51:40 -06003589 }
3590
Jens Axboecd658692021-09-10 11:19:14 -06003591 /*
3592 * Don't depend on the iter state matching what was consumed, or being
3593 * untouched in case of error. Restore it and we'll advance it
3594 * manually if we need to.
3595 */
3596 iov_iter_restore(iter, state);
3597
Jens Axboe227c0c92020-08-13 11:51:40 -06003598 ret2 = io_setup_async_rw(req, iovec, inline_vecs, iter, true);
Pavel Begunkov6bf985d2021-02-04 13:52:01 +00003599 if (ret2)
3600 return ret2;
3601
Pavel Begunkovfe1cdd52021-02-17 21:02:36 +00003602 iovec = NULL;
Jens Axboee8c2bc12020-08-15 18:44:09 -07003603 rw = req->async_data;
Jens Axboecd658692021-09-10 11:19:14 -06003604 /*
3605 * Now use our persistent iterator and state, if we aren't already.
3606 * We've restored and mapped the iter to match.
3607 */
3608 if (iter != &rw->iter) {
3609 iter = &rw->iter;
3610 state = &rw->iter_state;
3611 }
Jens Axboe227c0c92020-08-13 11:51:40 -06003612
Pavel Begunkovb23df912021-02-04 13:52:04 +00003613 do {
Jens Axboecd658692021-09-10 11:19:14 -06003614 /*
3615 * We end up here because of a partial read, either from
3616 * above or inside this loop. Advance the iter by the bytes
3617 * that were consumed.
3618 */
3619 iov_iter_advance(iter, ret);
3620 if (!iov_iter_count(iter))
3621 break;
Pavel Begunkovb23df912021-02-04 13:52:04 +00003622 rw->bytes_done += ret;
Jens Axboecd658692021-09-10 11:19:14 -06003623 iov_iter_save_state(iter, state);
3624
Pavel Begunkovb23df912021-02-04 13:52:04 +00003625 /* if we can retry, do so with the callbacks armed */
3626 if (!io_rw_should_retry(req)) {
3627 kiocb->ki_flags &= ~IOCB_WAITQ;
3628 return -EAGAIN;
3629 }
3630
Pavel Begunkov98aada62022-10-16 22:42:58 +01003631 req->result = iov_iter_count(iter);
Pavel Begunkovb23df912021-02-04 13:52:04 +00003632 /*
3633 * Now retry read with the IOCB_WAITQ parts set in the iocb. If
3634 * we get -EIOCBQUEUED, then we'll get a notification when the
3635 * desired page gets unlocked. We can also get a partial read
3636 * here, and if we do, then just retry at the new offset.
3637 */
3638 ret = io_iter_do_read(req, iter);
3639 if (ret == -EIOCBQUEUED)
3640 return 0;
Jens Axboe227c0c92020-08-13 11:51:40 -06003641 /* we got some bytes, but not all. retry. */
Jens Axboeb5b0ecb2021-03-04 21:02:58 -07003642 kiocb->ki_flags &= ~IOCB_WAITQ;
Jens Axboecd658692021-09-10 11:19:14 -06003643 iov_iter_restore(iter, state);
3644 } while (ret > 0);
Jens Axboe227c0c92020-08-13 11:51:40 -06003645done:
Pavel Begunkov889fca72021-02-10 00:03:09 +00003646 kiocb_done(kiocb, ret, issue_flags);
Pavel Begunkovfe1cdd52021-02-17 21:02:36 +00003647out_free:
3648 /* it's faster to check here then delegate to kfree */
3649 if (iovec)
3650 kfree(iovec);
Pavel Begunkov5ea5dd42021-02-04 13:52:03 +00003651 return 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003652}
3653
Pavel Begunkov73debe62020-09-30 22:57:54 +03003654static int io_write_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboef67676d2019-12-02 11:03:47 -07003655{
Jens Axboe3529d8c2019-12-19 18:24:38 -07003656 if (unlikely(!(req->file->f_mode & FMODE_WRITE)))
3657 return -EBADF;
Jens Axboe5d329e12021-09-14 11:08:37 -06003658 return io_prep_rw(req, sqe, WRITE);
Jens Axboef67676d2019-12-02 11:03:47 -07003659}
3660
Pavel Begunkov889fca72021-02-10 00:03:09 +00003661static int io_write(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe2b188cc2019-01-07 10:46:33 -07003662{
3663 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
Jens Axboe9adbd452019-12-20 08:45:55 -07003664 struct kiocb *kiocb = &req->rw.kiocb;
Jens Axboeff6165b2020-08-13 09:47:43 -06003665 struct iov_iter __iter, *iter = &__iter;
Jens Axboee8c2bc12020-08-15 18:44:09 -07003666 struct io_async_rw *rw = req->async_data;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003667 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Jens Axboecd658692021-09-10 11:19:14 -06003668 struct iov_iter_state __state, *state;
3669 ssize_t ret, ret2;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003670
Pavel Begunkov2846c482020-11-07 13:16:27 +00003671 if (rw) {
Jens Axboee8c2bc12020-08-15 18:44:09 -07003672 iter = &rw->iter;
Jens Axboecd658692021-09-10 11:19:14 -06003673 state = &rw->iter_state;
3674 iov_iter_restore(iter, state);
Pavel Begunkov2846c482020-11-07 13:16:27 +00003675 iovec = NULL;
3676 } else {
3677 ret = io_import_iovec(WRITE, req, &iovec, iter, !force_nonblock);
3678 if (ret < 0)
3679 return ret;
Jens Axboecd658692021-09-10 11:19:14 -06003680 state = &__state;
3681 iov_iter_save_state(iter, state);
Pavel Begunkov2846c482020-11-07 13:16:27 +00003682 }
Jens Axboecd658692021-09-10 11:19:14 -06003683 req->result = iov_iter_count(iter);
Jens Axboe2b188cc2019-01-07 10:46:33 -07003684
Jens Axboefd6c2e42019-12-18 12:19:41 -07003685 /* Ensure we clear previously set non-block flag */
3686 if (!force_nonblock)
Pavel Begunkova88fc402020-09-30 22:57:53 +03003687 kiocb->ki_flags &= ~IOCB_NOWAIT;
3688 else
3689 kiocb->ki_flags |= IOCB_NOWAIT;
Jens Axboefd6c2e42019-12-18 12:19:41 -07003690
Pavel Begunkov24c74672020-06-21 13:09:51 +03003691 /* If the file doesn't support async, just async punt */
Pavel Begunkovb191e2d2021-08-09 13:04:03 +01003692 if (force_nonblock && !io_file_supports_nowait(req, WRITE))
Jens Axboef67676d2019-12-02 11:03:47 -07003693 goto copy_iov;
Jens Axboef67676d2019-12-02 11:03:47 -07003694
Jens Axboe10d59342019-12-09 20:16:22 -07003695 /* file path doesn't support NOWAIT for non-direct_IO */
3696 if (force_nonblock && !(kiocb->ki_flags & IOCB_DIRECT) &&
3697 (req->flags & REQ_F_ISREG))
Jens Axboef67676d2019-12-02 11:03:47 -07003698 goto copy_iov;
Jens Axboe9e645e112019-05-10 16:07:28 -06003699
Jens Axboecd658692021-09-10 11:19:14 -06003700 ret = rw_verify_area(WRITE, req->file, io_kiocb_ppos(kiocb), req->result);
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003701 if (unlikely(ret))
3702 goto out_free;
Roman Penyaev9bf79332019-03-25 20:09:24 +01003703
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003704 /*
3705 * Open-code file_start_write here to grab freeze protection,
3706 * which will be released by another thread in
3707 * io_complete_rw(). Fool lockdep by telling it the lock got
3708 * released so that it doesn't complain about the held lock when
3709 * we return to userspace.
3710 */
3711 if (req->flags & REQ_F_ISREG) {
Darrick J. Wong8a3c84b2020-11-10 16:50:21 -08003712 sb_start_write(file_inode(req->file)->i_sb);
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003713 __sb_writers_release(file_inode(req->file)->i_sb,
3714 SB_FREEZE_WRITE);
3715 }
3716 kiocb->ki_flags |= IOCB_WRITE;
Roman Penyaev9bf79332019-03-25 20:09:24 +01003717
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003718 if (req->file->f_op->write_iter)
Jens Axboeff6165b2020-08-13 09:47:43 -06003719 ret2 = call_write_iter(req->file, kiocb, iter);
Guoyu Huang2dd21112020-08-05 03:53:50 -07003720 else if (req->file->f_op->write)
Jens Axboe4017eb92020-10-22 14:14:12 -06003721 ret2 = loop_rw_iter(WRITE, req, iter);
Guoyu Huang2dd21112020-08-05 03:53:50 -07003722 else
3723 ret2 = -EINVAL;
Jens Axboe4ed734b2020-03-20 11:23:41 -06003724
Pavel Begunkov6ad7f232021-04-08 01:54:39 +01003725 if (req->flags & REQ_F_REISSUE) {
3726 req->flags &= ~REQ_F_REISSUE;
Jens Axboe230d50d2021-04-01 20:41:15 -06003727 ret2 = -EAGAIN;
Pavel Begunkov6ad7f232021-04-08 01:54:39 +01003728 }
Jens Axboe230d50d2021-04-01 20:41:15 -06003729
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003730 /*
3731 * Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just
3732 * retry them without IOCB_NOWAIT.
3733 */
3734 if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT))
3735 ret2 = -EAGAIN;
Pavel Begunkov75c668c2021-02-04 13:52:05 +00003736 /* no retry on NONBLOCK nor RWF_NOWAIT */
3737 if (ret2 == -EAGAIN && (req->flags & REQ_F_NOWAIT))
Jens Axboe355afae2020-09-02 09:30:31 -06003738 goto done;
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003739 if (!force_nonblock || ret2 != -EAGAIN) {
Jens Axboeeefdf302020-08-27 16:40:19 -06003740 /* IOPOLL retry should happen for io-wq threads */
3741 if ((req->ctx->flags & IORING_SETUP_IOPOLL) && ret2 == -EAGAIN)
3742 goto copy_iov;
Jens Axboe355afae2020-09-02 09:30:31 -06003743done:
Pavel Begunkov889fca72021-02-10 00:03:09 +00003744 kiocb_done(kiocb, ret2, issue_flags);
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003745 } else {
Jens Axboef67676d2019-12-02 11:03:47 -07003746copy_iov:
Jens Axboecd658692021-09-10 11:19:14 -06003747 iov_iter_restore(iter, state);
Jens Axboe227c0c92020-08-13 11:51:40 -06003748 ret = io_setup_async_rw(req, iovec, inline_vecs, iter, false);
Jens Axboe295219a2022-08-25 10:19:08 -06003749 if (!ret) {
3750 if (kiocb->ki_flags & IOCB_WRITE)
3751 kiocb_end_write(req);
3752 return -EAGAIN;
3753 }
3754 return ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003755 }
Jens Axboe31b51512019-01-18 22:56:34 -07003756out_free:
Pavel Begunkovf261c162020-08-20 11:34:10 +03003757 /* it's reportedly faster than delegating the null check to kfree() */
Pavel Begunkov252917c2020-07-13 22:59:20 +03003758 if (iovec)
Xiaoguang Wang6f2cc162020-06-18 15:01:56 +08003759 kfree(iovec);
Jens Axboe2b188cc2019-01-07 10:46:33 -07003760 return ret;
3761}
3762
Jens Axboe80a261f2020-09-28 14:23:58 -06003763static int io_renameat_prep(struct io_kiocb *req,
3764 const struct io_uring_sqe *sqe)
3765{
3766 struct io_rename *ren = &req->rename;
3767 const char __user *oldf, *newf;
3768
Jens Axboeed7eb252021-06-23 09:04:13 -06003769 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3770 return -EINVAL;
Pavel Begunkov26578cd2021-08-20 10:36:37 +01003771 if (sqe->ioprio || sqe->buf_index || sqe->splice_fd_in)
Jens Axboeed7eb252021-06-23 09:04:13 -06003772 return -EINVAL;
Jens Axboe80a261f2020-09-28 14:23:58 -06003773 if (unlikely(req->flags & REQ_F_FIXED_FILE))
3774 return -EBADF;
3775
3776 ren->old_dfd = READ_ONCE(sqe->fd);
3777 oldf = u64_to_user_ptr(READ_ONCE(sqe->addr));
3778 newf = u64_to_user_ptr(READ_ONCE(sqe->addr2));
3779 ren->new_dfd = READ_ONCE(sqe->len);
3780 ren->flags = READ_ONCE(sqe->rename_flags);
3781
3782 ren->oldpath = getname(oldf);
3783 if (IS_ERR(ren->oldpath))
3784 return PTR_ERR(ren->oldpath);
3785
3786 ren->newpath = getname(newf);
3787 if (IS_ERR(ren->newpath)) {
3788 putname(ren->oldpath);
3789 return PTR_ERR(ren->newpath);
3790 }
3791
3792 req->flags |= REQ_F_NEED_CLEANUP;
3793 return 0;
3794}
3795
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003796static int io_renameat(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe80a261f2020-09-28 14:23:58 -06003797{
3798 struct io_rename *ren = &req->rename;
3799 int ret;
3800
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003801 if (issue_flags & IO_URING_F_NONBLOCK)
Jens Axboe80a261f2020-09-28 14:23:58 -06003802 return -EAGAIN;
3803
3804 ret = do_renameat2(ren->old_dfd, ren->oldpath, ren->new_dfd,
3805 ren->newpath, ren->flags);
3806
3807 req->flags &= ~REQ_F_NEED_CLEANUP;
3808 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01003809 req_set_fail(req);
Jens Axboe80a261f2020-09-28 14:23:58 -06003810 io_req_complete(req, ret);
3811 return 0;
3812}
3813
Jens Axboe14a11432020-09-28 14:27:37 -06003814static int io_unlinkat_prep(struct io_kiocb *req,
3815 const struct io_uring_sqe *sqe)
3816{
3817 struct io_unlink *un = &req->unlink;
3818 const char __user *fname;
3819
Jens Axboe22634bc2021-06-23 09:07:45 -06003820 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3821 return -EINVAL;
Pavel Begunkov26578cd2021-08-20 10:36:37 +01003822 if (sqe->ioprio || sqe->off || sqe->len || sqe->buf_index ||
3823 sqe->splice_fd_in)
Jens Axboe22634bc2021-06-23 09:07:45 -06003824 return -EINVAL;
Jens Axboe14a11432020-09-28 14:27:37 -06003825 if (unlikely(req->flags & REQ_F_FIXED_FILE))
3826 return -EBADF;
3827
3828 un->dfd = READ_ONCE(sqe->fd);
3829
3830 un->flags = READ_ONCE(sqe->unlink_flags);
3831 if (un->flags & ~AT_REMOVEDIR)
3832 return -EINVAL;
3833
3834 fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
3835 un->filename = getname(fname);
3836 if (IS_ERR(un->filename))
3837 return PTR_ERR(un->filename);
3838
3839 req->flags |= REQ_F_NEED_CLEANUP;
3840 return 0;
3841}
3842
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003843static int io_unlinkat(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe14a11432020-09-28 14:27:37 -06003844{
3845 struct io_unlink *un = &req->unlink;
3846 int ret;
3847
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003848 if (issue_flags & IO_URING_F_NONBLOCK)
Jens Axboe14a11432020-09-28 14:27:37 -06003849 return -EAGAIN;
3850
3851 if (un->flags & AT_REMOVEDIR)
3852 ret = do_rmdir(un->dfd, un->filename);
3853 else
3854 ret = do_unlinkat(un->dfd, un->filename);
3855
3856 req->flags &= ~REQ_F_NEED_CLEANUP;
3857 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01003858 req_set_fail(req);
Jens Axboe14a11432020-09-28 14:27:37 -06003859 io_req_complete(req, ret);
3860 return 0;
3861}
3862
Dmitry Kadasheve34a02d2021-07-08 13:34:45 +07003863static int io_mkdirat_prep(struct io_kiocb *req,
3864 const struct io_uring_sqe *sqe)
3865{
3866 struct io_mkdir *mkd = &req->mkdir;
3867 const char __user *fname;
3868
3869 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3870 return -EINVAL;
3871 if (sqe->ioprio || sqe->off || sqe->rw_flags || sqe->buf_index ||
3872 sqe->splice_fd_in)
3873 return -EINVAL;
3874 if (unlikely(req->flags & REQ_F_FIXED_FILE))
3875 return -EBADF;
3876
3877 mkd->dfd = READ_ONCE(sqe->fd);
3878 mkd->mode = READ_ONCE(sqe->len);
3879
3880 fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
3881 mkd->filename = getname(fname);
3882 if (IS_ERR(mkd->filename))
3883 return PTR_ERR(mkd->filename);
3884
3885 req->flags |= REQ_F_NEED_CLEANUP;
3886 return 0;
3887}
3888
3889static int io_mkdirat(struct io_kiocb *req, int issue_flags)
3890{
3891 struct io_mkdir *mkd = &req->mkdir;
3892 int ret;
3893
3894 if (issue_flags & IO_URING_F_NONBLOCK)
3895 return -EAGAIN;
3896
3897 ret = do_mkdirat(mkd->dfd, mkd->filename, mkd->mode);
3898
3899 req->flags &= ~REQ_F_NEED_CLEANUP;
3900 if (ret < 0)
3901 req_set_fail(req);
3902 io_req_complete(req, ret);
3903 return 0;
3904}
3905
Dmitry Kadashev7a8721f2021-07-08 13:34:46 +07003906static int io_symlinkat_prep(struct io_kiocb *req,
3907 const struct io_uring_sqe *sqe)
3908{
3909 struct io_symlink *sl = &req->symlink;
3910 const char __user *oldpath, *newpath;
3911
3912 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3913 return -EINVAL;
3914 if (sqe->ioprio || sqe->len || sqe->rw_flags || sqe->buf_index ||
3915 sqe->splice_fd_in)
3916 return -EINVAL;
3917 if (unlikely(req->flags & REQ_F_FIXED_FILE))
3918 return -EBADF;
3919
3920 sl->new_dfd = READ_ONCE(sqe->fd);
3921 oldpath = u64_to_user_ptr(READ_ONCE(sqe->addr));
3922 newpath = u64_to_user_ptr(READ_ONCE(sqe->addr2));
3923
3924 sl->oldpath = getname(oldpath);
3925 if (IS_ERR(sl->oldpath))
3926 return PTR_ERR(sl->oldpath);
3927
3928 sl->newpath = getname(newpath);
3929 if (IS_ERR(sl->newpath)) {
3930 putname(sl->oldpath);
3931 return PTR_ERR(sl->newpath);
3932 }
3933
3934 req->flags |= REQ_F_NEED_CLEANUP;
3935 return 0;
3936}
3937
3938static int io_symlinkat(struct io_kiocb *req, int issue_flags)
3939{
3940 struct io_symlink *sl = &req->symlink;
3941 int ret;
3942
3943 if (issue_flags & IO_URING_F_NONBLOCK)
3944 return -EAGAIN;
3945
3946 ret = do_symlinkat(sl->oldpath, sl->new_dfd, sl->newpath);
3947
3948 req->flags &= ~REQ_F_NEED_CLEANUP;
3949 if (ret < 0)
3950 req_set_fail(req);
3951 io_req_complete(req, ret);
3952 return 0;
3953}
3954
Dmitry Kadashevcf30da92021-07-08 13:34:47 +07003955static int io_linkat_prep(struct io_kiocb *req,
3956 const struct io_uring_sqe *sqe)
3957{
3958 struct io_hardlink *lnk = &req->hardlink;
3959 const char __user *oldf, *newf;
3960
3961 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3962 return -EINVAL;
3963 if (sqe->ioprio || sqe->rw_flags || sqe->buf_index || sqe->splice_fd_in)
3964 return -EINVAL;
3965 if (unlikely(req->flags & REQ_F_FIXED_FILE))
3966 return -EBADF;
3967
3968 lnk->old_dfd = READ_ONCE(sqe->fd);
3969 lnk->new_dfd = READ_ONCE(sqe->len);
3970 oldf = u64_to_user_ptr(READ_ONCE(sqe->addr));
3971 newf = u64_to_user_ptr(READ_ONCE(sqe->addr2));
3972 lnk->flags = READ_ONCE(sqe->hardlink_flags);
3973
3974 lnk->oldpath = getname(oldf);
3975 if (IS_ERR(lnk->oldpath))
3976 return PTR_ERR(lnk->oldpath);
3977
3978 lnk->newpath = getname(newf);
3979 if (IS_ERR(lnk->newpath)) {
3980 putname(lnk->oldpath);
3981 return PTR_ERR(lnk->newpath);
3982 }
3983
3984 req->flags |= REQ_F_NEED_CLEANUP;
3985 return 0;
3986}
3987
3988static int io_linkat(struct io_kiocb *req, int issue_flags)
3989{
3990 struct io_hardlink *lnk = &req->hardlink;
3991 int ret;
3992
3993 if (issue_flags & IO_URING_F_NONBLOCK)
3994 return -EAGAIN;
3995
3996 ret = do_linkat(lnk->old_dfd, lnk->oldpath, lnk->new_dfd,
3997 lnk->newpath, lnk->flags);
3998
3999 req->flags &= ~REQ_F_NEED_CLEANUP;
4000 if (ret < 0)
4001 req_set_fail(req);
4002 io_req_complete(req, ret);
4003 return 0;
4004}
4005
Jens Axboe36f4fa62020-09-05 11:14:22 -06004006static int io_shutdown_prep(struct io_kiocb *req,
4007 const struct io_uring_sqe *sqe)
4008{
4009#if defined(CONFIG_NET)
4010 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4011 return -EINVAL;
Pavel Begunkov26578cd2021-08-20 10:36:37 +01004012 if (unlikely(sqe->ioprio || sqe->off || sqe->addr || sqe->rw_flags ||
4013 sqe->buf_index || sqe->splice_fd_in))
Jens Axboe36f4fa62020-09-05 11:14:22 -06004014 return -EINVAL;
4015
4016 req->shutdown.how = READ_ONCE(sqe->len);
4017 return 0;
4018#else
4019 return -EOPNOTSUPP;
4020#endif
4021}
4022
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004023static int io_shutdown(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe36f4fa62020-09-05 11:14:22 -06004024{
4025#if defined(CONFIG_NET)
4026 struct socket *sock;
4027 int ret;
4028
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004029 if (issue_flags & IO_URING_F_NONBLOCK)
Jens Axboe36f4fa62020-09-05 11:14:22 -06004030 return -EAGAIN;
4031
Linus Torvalds48aba792020-12-16 12:44:05 -08004032 sock = sock_from_file(req->file);
Jens Axboe36f4fa62020-09-05 11:14:22 -06004033 if (unlikely(!sock))
Linus Torvalds48aba792020-12-16 12:44:05 -08004034 return -ENOTSOCK;
Jens Axboe36f4fa62020-09-05 11:14:22 -06004035
4036 ret = __sys_shutdown_sock(sock, req->shutdown.how);
Jens Axboea1464682020-12-14 20:57:27 -07004037 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01004038 req_set_fail(req);
Jens Axboe36f4fa62020-09-05 11:14:22 -06004039 io_req_complete(req, ret);
4040 return 0;
4041#else
4042 return -EOPNOTSUPP;
4043#endif
4044}
4045
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03004046static int __io_splice_prep(struct io_kiocb *req,
4047 const struct io_uring_sqe *sqe)
Pavel Begunkov7d67af22020-02-24 11:32:45 +03004048{
Pavel Begunkovfe7e3252021-06-24 15:09:57 +01004049 struct io_splice *sp = &req->splice;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03004050 unsigned int valid_flags = SPLICE_F_FD_IN_FIXED | SPLICE_F_ALL;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03004051
Pavel Begunkov3232dd02020-06-03 18:03:22 +03004052 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4053 return -EINVAL;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03004054
Pavel Begunkov7d67af22020-02-24 11:32:45 +03004055 sp->len = READ_ONCE(sqe->len);
4056 sp->flags = READ_ONCE(sqe->splice_flags);
Pavel Begunkov7d67af22020-02-24 11:32:45 +03004057 if (unlikely(sp->flags & ~valid_flags))
4058 return -EINVAL;
Jens Axboeae6cba32022-03-29 10:59:20 -06004059 sp->splice_fd_in = READ_ONCE(sqe->splice_fd_in);
Pavel Begunkov7d67af22020-02-24 11:32:45 +03004060 return 0;
4061}
4062
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03004063static int io_tee_prep(struct io_kiocb *req,
4064 const struct io_uring_sqe *sqe)
4065{
4066 if (READ_ONCE(sqe->splice_off_in) || READ_ONCE(sqe->off))
4067 return -EINVAL;
4068 return __io_splice_prep(req, sqe);
4069}
4070
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004071static int io_tee(struct io_kiocb *req, unsigned int issue_flags)
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03004072{
4073 struct io_splice *sp = &req->splice;
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03004074 struct file *out = sp->file_out;
4075 unsigned int flags = sp->flags & ~SPLICE_F_FD_IN_FIXED;
Jens Axboeae6cba32022-03-29 10:59:20 -06004076 struct file *in;
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03004077 long ret = 0;
4078
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004079 if (issue_flags & IO_URING_F_NONBLOCK)
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03004080 return -EAGAIN;
Jens Axboeae6cba32022-03-29 10:59:20 -06004081
4082 in = io_file_get(req->ctx, req, sp->splice_fd_in,
4083 (sp->flags & SPLICE_F_FD_IN_FIXED));
4084 if (!in) {
4085 ret = -EBADF;
4086 goto done;
4087 }
4088
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03004089 if (sp->len)
4090 ret = do_tee(in, out, sp->len, flags);
4091
Pavel Begunkove1d767f2021-03-19 17:22:43 +00004092 if (!(sp->flags & SPLICE_F_FD_IN_FIXED))
4093 io_put_file(in);
Jens Axboeae6cba32022-03-29 10:59:20 -06004094done:
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03004095 if (ret != sp->len)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01004096 req_set_fail(req);
Jens Axboee1e16092020-06-22 09:17:17 -06004097 io_req_complete(req, ret);
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03004098 return 0;
4099}
4100
4101static int io_splice_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4102{
Pavel Begunkovfe7e3252021-06-24 15:09:57 +01004103 struct io_splice *sp = &req->splice;
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03004104
4105 sp->off_in = READ_ONCE(sqe->splice_off_in);
4106 sp->off_out = READ_ONCE(sqe->off);
4107 return __io_splice_prep(req, sqe);
4108}
4109
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004110static int io_splice(struct io_kiocb *req, unsigned int issue_flags)
Pavel Begunkov7d67af22020-02-24 11:32:45 +03004111{
4112 struct io_splice *sp = &req->splice;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03004113 struct file *out = sp->file_out;
4114 unsigned int flags = sp->flags & ~SPLICE_F_FD_IN_FIXED;
4115 loff_t *poff_in, *poff_out;
Jens Axboeae6cba32022-03-29 10:59:20 -06004116 struct file *in;
Pavel Begunkovc9687422020-05-04 23:00:54 +03004117 long ret = 0;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03004118
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004119 if (issue_flags & IO_URING_F_NONBLOCK)
Pavel Begunkov2fb3e822020-05-01 17:09:38 +03004120 return -EAGAIN;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03004121
Jens Axboeae6cba32022-03-29 10:59:20 -06004122 in = io_file_get(req->ctx, req, sp->splice_fd_in,
4123 (sp->flags & SPLICE_F_FD_IN_FIXED));
4124 if (!in) {
4125 ret = -EBADF;
4126 goto done;
4127 }
4128
Pavel Begunkov7d67af22020-02-24 11:32:45 +03004129 poff_in = (sp->off_in == -1) ? NULL : &sp->off_in;
4130 poff_out = (sp->off_out == -1) ? NULL : &sp->off_out;
Pavel Begunkovc9687422020-05-04 23:00:54 +03004131
Jens Axboe948a7742020-05-17 14:21:38 -06004132 if (sp->len)
Pavel Begunkovc9687422020-05-04 23:00:54 +03004133 ret = do_splice(in, poff_in, out, poff_out, sp->len, flags);
Pavel Begunkov7d67af22020-02-24 11:32:45 +03004134
Pavel Begunkove1d767f2021-03-19 17:22:43 +00004135 if (!(sp->flags & SPLICE_F_FD_IN_FIXED))
4136 io_put_file(in);
Jens Axboeae6cba32022-03-29 10:59:20 -06004137done:
Pavel Begunkov7d67af22020-02-24 11:32:45 +03004138 if (ret != sp->len)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01004139 req_set_fail(req);
Jens Axboee1e16092020-06-22 09:17:17 -06004140 io_req_complete(req, ret);
Pavel Begunkov7d67af22020-02-24 11:32:45 +03004141 return 0;
4142}
4143
Jens Axboe2b188cc2019-01-07 10:46:33 -07004144/*
4145 * IORING_OP_NOP just posts a completion event, nothing else.
4146 */
Pavel Begunkov889fca72021-02-10 00:03:09 +00004147static int io_nop(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe2b188cc2019-01-07 10:46:33 -07004148{
4149 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe2b188cc2019-01-07 10:46:33 -07004150
Jens Axboedef596e2019-01-09 08:59:42 -07004151 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
4152 return -EINVAL;
4153
Pavel Begunkov889fca72021-02-10 00:03:09 +00004154 __io_req_complete(req, issue_flags, 0, 0);
Jens Axboe2b188cc2019-01-07 10:46:33 -07004155 return 0;
4156}
4157
Pavel Begunkov1155c762021-02-18 18:29:38 +00004158static int io_fsync_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Christoph Hellwigc992fe22019-01-11 09:43:02 -07004159{
Jens Axboe6b063142019-01-10 22:13:58 -07004160 struct io_ring_ctx *ctx = req->ctx;
Christoph Hellwigc992fe22019-01-11 09:43:02 -07004161
Jens Axboe6b063142019-01-10 22:13:58 -07004162 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboedef596e2019-01-09 08:59:42 -07004163 return -EINVAL;
Pavel Begunkov26578cd2021-08-20 10:36:37 +01004164 if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index ||
4165 sqe->splice_fd_in))
Christoph Hellwigc992fe22019-01-11 09:43:02 -07004166 return -EINVAL;
4167
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004168 req->sync.flags = READ_ONCE(sqe->fsync_flags);
4169 if (unlikely(req->sync.flags & ~IORING_FSYNC_DATASYNC))
4170 return -EINVAL;
4171
4172 req->sync.off = READ_ONCE(sqe->off);
4173 req->sync.len = READ_ONCE(sqe->len);
Christoph Hellwigc992fe22019-01-11 09:43:02 -07004174 return 0;
4175}
4176
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004177static int io_fsync(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe78912932020-01-14 22:09:06 -07004178{
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004179 loff_t end = req->sync.off + req->sync.len;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004180 int ret;
4181
Pavel Begunkovac45abc2020-06-08 21:08:18 +03004182 /* fsync always requires a blocking context */
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004183 if (issue_flags & IO_URING_F_NONBLOCK)
Pavel Begunkovac45abc2020-06-08 21:08:18 +03004184 return -EAGAIN;
4185
Jens Axboe9adbd452019-12-20 08:45:55 -07004186 ret = vfs_fsync_range(req->file, req->sync.off,
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004187 end > 0 ? end : LLONG_MAX,
4188 req->sync.flags & IORING_FSYNC_DATASYNC);
4189 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01004190 req_set_fail(req);
Jens Axboee1e16092020-06-22 09:17:17 -06004191 io_req_complete(req, ret);
Christoph Hellwigc992fe22019-01-11 09:43:02 -07004192 return 0;
4193}
4194
Jens Axboed63d1b52019-12-10 10:38:56 -07004195static int io_fallocate_prep(struct io_kiocb *req,
4196 const struct io_uring_sqe *sqe)
4197{
Pavel Begunkov26578cd2021-08-20 10:36:37 +01004198 if (sqe->ioprio || sqe->buf_index || sqe->rw_flags ||
4199 sqe->splice_fd_in)
Jens Axboed63d1b52019-12-10 10:38:56 -07004200 return -EINVAL;
Pavel Begunkov3232dd02020-06-03 18:03:22 +03004201 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4202 return -EINVAL;
Jens Axboed63d1b52019-12-10 10:38:56 -07004203
4204 req->sync.off = READ_ONCE(sqe->off);
4205 req->sync.len = READ_ONCE(sqe->addr);
4206 req->sync.mode = READ_ONCE(sqe->len);
4207 return 0;
4208}
4209
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004210static int io_fallocate(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboed63d1b52019-12-10 10:38:56 -07004211{
Pavel Begunkovac45abc2020-06-08 21:08:18 +03004212 int ret;
Jens Axboed63d1b52019-12-10 10:38:56 -07004213
Pavel Begunkovac45abc2020-06-08 21:08:18 +03004214 /* fallocate always requiring blocking context */
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004215 if (issue_flags & IO_URING_F_NONBLOCK)
Pavel Begunkovac45abc2020-06-08 21:08:18 +03004216 return -EAGAIN;
Pavel Begunkovac45abc2020-06-08 21:08:18 +03004217 ret = vfs_fallocate(req->file, req->sync.mode, req->sync.off,
4218 req->sync.len);
Pavel Begunkovac45abc2020-06-08 21:08:18 +03004219 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01004220 req_set_fail(req);
Jens Axboedf1ec532022-03-20 13:08:38 -06004221 else
4222 fsnotify_modify(req->file);
Jens Axboee1e16092020-06-22 09:17:17 -06004223 io_req_complete(req, ret);
Jens Axboed63d1b52019-12-10 10:38:56 -07004224 return 0;
4225}
4226
Pavel Begunkovec65fea2020-06-03 18:03:24 +03004227static int __io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboe15b71ab2019-12-11 11:20:36 -07004228{
Jens Axboef8748882020-01-08 17:47:02 -07004229 const char __user *fname;
Jens Axboe15b71ab2019-12-11 11:20:36 -07004230 int ret;
4231
Pavel Begunkovd3fddf62021-08-09 13:04:16 +01004232 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4233 return -EINVAL;
Pavel Begunkovb9445592021-08-25 12:25:45 +01004234 if (unlikely(sqe->ioprio || sqe->buf_index))
Jens Axboe15b71ab2019-12-11 11:20:36 -07004235 return -EINVAL;
Pavel Begunkovec65fea2020-06-03 18:03:24 +03004236 if (unlikely(req->flags & REQ_F_FIXED_FILE))
Jens Axboecf3040c2020-02-06 21:31:40 -07004237 return -EBADF;
Jens Axboe15b71ab2019-12-11 11:20:36 -07004238
Pavel Begunkovec65fea2020-06-03 18:03:24 +03004239 /* open.how should be already initialised */
4240 if (!(req->open.how.flags & O_PATH) && force_o_largefile())
Jens Axboe08a1d26eb2020-04-08 09:20:54 -06004241 req->open.how.flags |= O_LARGEFILE;
Jens Axboe15b71ab2019-12-11 11:20:36 -07004242
Pavel Begunkov25e72d12020-06-03 18:03:23 +03004243 req->open.dfd = READ_ONCE(sqe->fd);
4244 fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
Jens Axboef8748882020-01-08 17:47:02 -07004245 req->open.filename = getname(fname);
Jens Axboe15b71ab2019-12-11 11:20:36 -07004246 if (IS_ERR(req->open.filename)) {
4247 ret = PTR_ERR(req->open.filename);
4248 req->open.filename = NULL;
4249 return ret;
4250 }
Pavel Begunkovb9445592021-08-25 12:25:45 +01004251
4252 req->open.file_slot = READ_ONCE(sqe->file_index);
4253 if (req->open.file_slot && (req->open.how.flags & O_CLOEXEC))
4254 return -EINVAL;
4255
Jens Axboe4022e7a2020-03-19 19:23:18 -06004256 req->open.nofile = rlimit(RLIMIT_NOFILE);
Pavel Begunkov8fef80b2020-02-07 23:59:53 +03004257 req->flags |= REQ_F_NEED_CLEANUP;
Jens Axboe15b71ab2019-12-11 11:20:36 -07004258 return 0;
4259}
4260
Pavel Begunkovec65fea2020-06-03 18:03:24 +03004261static int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4262{
Pavel Begunkovd3fddf62021-08-09 13:04:16 +01004263 u64 mode = READ_ONCE(sqe->len);
4264 u64 flags = READ_ONCE(sqe->open_flags);
Pavel Begunkovec65fea2020-06-03 18:03:24 +03004265
Pavel Begunkovec65fea2020-06-03 18:03:24 +03004266 req->open.how = build_open_how(flags, mode);
4267 return __io_openat_prep(req, sqe);
4268}
4269
Jens Axboecebdb982020-01-08 17:59:24 -07004270static int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4271{
4272 struct open_how __user *how;
Jens Axboecebdb982020-01-08 17:59:24 -07004273 size_t len;
4274 int ret;
4275
Jens Axboecebdb982020-01-08 17:59:24 -07004276 how = u64_to_user_ptr(READ_ONCE(sqe->addr2));
4277 len = READ_ONCE(sqe->len);
Jens Axboecebdb982020-01-08 17:59:24 -07004278 if (len < OPEN_HOW_SIZE_VER0)
4279 return -EINVAL;
4280
4281 ret = copy_struct_from_user(&req->open.how, sizeof(req->open.how), how,
4282 len);
4283 if (ret)
4284 return ret;
4285
Pavel Begunkovec65fea2020-06-03 18:03:24 +03004286 return __io_openat_prep(req, sqe);
Jens Axboecebdb982020-01-08 17:59:24 -07004287}
4288
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004289static int io_openat2(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe15b71ab2019-12-11 11:20:36 -07004290{
4291 struct open_flags op;
Jens Axboe15b71ab2019-12-11 11:20:36 -07004292 struct file *file;
Pavel Begunkovb9445592021-08-25 12:25:45 +01004293 bool resolve_nonblock, nonblock_set;
4294 bool fixed = !!req->open.file_slot;
Jens Axboe15b71ab2019-12-11 11:20:36 -07004295 int ret;
4296
Jens Axboecebdb982020-01-08 17:59:24 -07004297 ret = build_open_flags(&req->open.how, &op);
Jens Axboe15b71ab2019-12-11 11:20:36 -07004298 if (ret)
4299 goto err;
Jens Axboe3a81fd02020-12-10 12:25:36 -07004300 nonblock_set = op.open_flag & O_NONBLOCK;
4301 resolve_nonblock = req->open.how.resolve & RESOLVE_CACHED;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004302 if (issue_flags & IO_URING_F_NONBLOCK) {
Jens Axboe3a81fd02020-12-10 12:25:36 -07004303 /*
4304 * Don't bother trying for O_TRUNC, O_CREAT, or O_TMPFILE open,
4305 * it'll always -EAGAIN
4306 */
4307 if (req->open.how.flags & (O_TRUNC | O_CREAT | O_TMPFILE))
4308 return -EAGAIN;
4309 op.lookup_flags |= LOOKUP_CACHED;
4310 op.open_flag |= O_NONBLOCK;
4311 }
Jens Axboe15b71ab2019-12-11 11:20:36 -07004312
Pavel Begunkovb9445592021-08-25 12:25:45 +01004313 if (!fixed) {
4314 ret = __get_unused_fd_flags(req->open.how.flags, req->open.nofile);
4315 if (ret < 0)
4316 goto err;
4317 }
Jens Axboe15b71ab2019-12-11 11:20:36 -07004318
4319 file = do_filp_open(req->open.dfd, req->open.filename, &op);
Pavel Begunkov12dcb58a2021-06-24 15:10:00 +01004320 if (IS_ERR(file)) {
Jens Axboe3a81fd02020-12-10 12:25:36 -07004321 /*
Pavel Begunkov12dcb58a2021-06-24 15:10:00 +01004322 * We could hang on to this 'fd' on retrying, but seems like
4323 * marginal gain for something that is now known to be a slower
4324 * path. So just put it, and we'll get a new one when we retry.
Jens Axboe3a81fd02020-12-10 12:25:36 -07004325 */
Pavel Begunkovb9445592021-08-25 12:25:45 +01004326 if (!fixed)
4327 put_unused_fd(ret);
Pavel Begunkov12dcb58a2021-06-24 15:10:00 +01004328
4329 ret = PTR_ERR(file);
4330 /* only retry if RESOLVE_CACHED wasn't already set by application */
4331 if (ret == -EAGAIN &&
4332 (!resolve_nonblock && (issue_flags & IO_URING_F_NONBLOCK)))
4333 return -EAGAIN;
4334 goto err;
Jens Axboe3a81fd02020-12-10 12:25:36 -07004335 }
4336
Pavel Begunkov12dcb58a2021-06-24 15:10:00 +01004337 if ((issue_flags & IO_URING_F_NONBLOCK) && !nonblock_set)
4338 file->f_flags &= ~O_NONBLOCK;
4339 fsnotify_open(file);
Pavel Begunkovb9445592021-08-25 12:25:45 +01004340
4341 if (!fixed)
4342 fd_install(ret, file);
4343 else
4344 ret = io_install_fixed_file(req, file, issue_flags,
4345 req->open.file_slot - 1);
Jens Axboe15b71ab2019-12-11 11:20:36 -07004346err:
4347 putname(req->open.filename);
Pavel Begunkov8fef80b2020-02-07 23:59:53 +03004348 req->flags &= ~REQ_F_NEED_CLEANUP;
Jens Axboe15b71ab2019-12-11 11:20:36 -07004349 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01004350 req_set_fail(req);
Pavel Begunkov0bdf3392021-04-11 01:46:29 +01004351 __io_req_complete(req, issue_flags, ret, 0);
Jens Axboe15b71ab2019-12-11 11:20:36 -07004352 return 0;
4353}
4354
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004355static int io_openat(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboecebdb982020-01-08 17:59:24 -07004356{
Pavel Begunkove45cff52021-02-28 22:35:14 +00004357 return io_openat2(req, issue_flags);
Jens Axboecebdb982020-01-08 17:59:24 -07004358}
4359
Jens Axboe067524e2020-03-02 16:32:28 -07004360static int io_remove_buffers_prep(struct io_kiocb *req,
4361 const struct io_uring_sqe *sqe)
4362{
4363 struct io_provide_buf *p = &req->pbuf;
4364 u64 tmp;
4365
Pavel Begunkov26578cd2021-08-20 10:36:37 +01004366 if (sqe->ioprio || sqe->rw_flags || sqe->addr || sqe->len || sqe->off ||
4367 sqe->splice_fd_in)
Jens Axboe067524e2020-03-02 16:32:28 -07004368 return -EINVAL;
4369
4370 tmp = READ_ONCE(sqe->fd);
4371 if (!tmp || tmp > USHRT_MAX)
4372 return -EINVAL;
4373
4374 memset(p, 0, sizeof(*p));
4375 p->nbufs = tmp;
4376 p->bgid = READ_ONCE(sqe->buf_group);
4377 return 0;
4378}
4379
4380static int __io_remove_buffers(struct io_ring_ctx *ctx, struct io_buffer *buf,
4381 int bgid, unsigned nbufs)
4382{
4383 unsigned i = 0;
4384
4385 /* shouldn't happen */
4386 if (!nbufs)
4387 return 0;
4388
4389 /* the head kbuf is the list itself */
4390 while (!list_empty(&buf->list)) {
4391 struct io_buffer *nxt;
4392
4393 nxt = list_first_entry(&buf->list, struct io_buffer, list);
4394 list_del(&nxt->list);
4395 kfree(nxt);
4396 if (++i == nbufs)
4397 return i;
Ye Bin2d447d32021-11-22 10:47:37 +08004398 cond_resched();
Jens Axboe067524e2020-03-02 16:32:28 -07004399 }
4400 i++;
4401 kfree(buf);
Jens Axboe9e15c3a2021-03-13 12:29:43 -07004402 xa_erase(&ctx->io_buffers, bgid);
Jens Axboe067524e2020-03-02 16:32:28 -07004403
4404 return i;
4405}
4406
Pavel Begunkov889fca72021-02-10 00:03:09 +00004407static int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe067524e2020-03-02 16:32:28 -07004408{
4409 struct io_provide_buf *p = &req->pbuf;
4410 struct io_ring_ctx *ctx = req->ctx;
4411 struct io_buffer *head;
4412 int ret = 0;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004413 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Jens Axboe067524e2020-03-02 16:32:28 -07004414
4415 io_ring_submit_lock(ctx, !force_nonblock);
4416
4417 lockdep_assert_held(&ctx->uring_lock);
4418
4419 ret = -ENOENT;
Jens Axboe9e15c3a2021-03-13 12:29:43 -07004420 head = xa_load(&ctx->io_buffers, p->bgid);
Jens Axboe067524e2020-03-02 16:32:28 -07004421 if (head)
4422 ret = __io_remove_buffers(ctx, head, p->bgid, p->nbufs);
Jens Axboe067524e2020-03-02 16:32:28 -07004423 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01004424 req_set_fail(req);
Pavel Begunkov31bff9a2020-12-06 22:22:43 +00004425
Pavel Begunkov9fb8cb42021-02-28 22:35:13 +00004426 /* complete before unlock, IOPOLL may need the lock */
4427 __io_req_complete(req, issue_flags, ret, 0);
4428 io_ring_submit_unlock(ctx, !force_nonblock);
Jens Axboe067524e2020-03-02 16:32:28 -07004429 return 0;
4430}
4431
Jens Axboeddf0322d2020-02-23 16:41:33 -07004432static int io_provide_buffers_prep(struct io_kiocb *req,
4433 const struct io_uring_sqe *sqe)
4434{
Pavel Begunkov38134ad2021-04-15 13:07:39 +01004435 unsigned long size, tmp_check;
Jens Axboeddf0322d2020-02-23 16:41:33 -07004436 struct io_provide_buf *p = &req->pbuf;
4437 u64 tmp;
4438
Pavel Begunkov26578cd2021-08-20 10:36:37 +01004439 if (sqe->ioprio || sqe->rw_flags || sqe->splice_fd_in)
Jens Axboeddf0322d2020-02-23 16:41:33 -07004440 return -EINVAL;
4441
4442 tmp = READ_ONCE(sqe->fd);
4443 if (!tmp || tmp > USHRT_MAX)
4444 return -E2BIG;
4445 p->nbufs = tmp;
4446 p->addr = READ_ONCE(sqe->addr);
4447 p->len = READ_ONCE(sqe->len);
4448
Pavel Begunkov38134ad2021-04-15 13:07:39 +01004449 if (check_mul_overflow((unsigned long)p->len, (unsigned long)p->nbufs,
4450 &size))
4451 return -EOVERFLOW;
4452 if (check_add_overflow((unsigned long)p->addr, size, &tmp_check))
4453 return -EOVERFLOW;
4454
Pavel Begunkovd81269f2021-03-19 10:21:19 +00004455 size = (unsigned long)p->len * p->nbufs;
4456 if (!access_ok(u64_to_user_ptr(p->addr), size))
Jens Axboeddf0322d2020-02-23 16:41:33 -07004457 return -EFAULT;
4458
4459 p->bgid = READ_ONCE(sqe->buf_group);
4460 tmp = READ_ONCE(sqe->off);
4461 if (tmp > USHRT_MAX)
4462 return -E2BIG;
4463 p->bid = tmp;
4464 return 0;
4465}
4466
4467static int io_add_buffers(struct io_provide_buf *pbuf, struct io_buffer **head)
4468{
4469 struct io_buffer *buf;
4470 u64 addr = pbuf->addr;
4471 int i, bid = pbuf->bid;
4472
4473 for (i = 0; i < pbuf->nbufs; i++) {
Jens Axboe9990da92021-09-24 07:39:08 -06004474 buf = kmalloc(sizeof(*buf), GFP_KERNEL_ACCOUNT);
Jens Axboeddf0322d2020-02-23 16:41:33 -07004475 if (!buf)
4476 break;
4477
4478 buf->addr = addr;
Thadeu Lima de Souza Cascardod1f82802021-05-05 09:47:06 -03004479 buf->len = min_t(__u32, pbuf->len, MAX_RW_COUNT);
Jens Axboeddf0322d2020-02-23 16:41:33 -07004480 buf->bid = bid;
4481 addr += pbuf->len;
4482 bid++;
4483 if (!*head) {
4484 INIT_LIST_HEAD(&buf->list);
4485 *head = buf;
4486 } else {
4487 list_add_tail(&buf->list, &(*head)->list);
4488 }
Eric Dumazetc718ea42022-02-14 20:10:03 -08004489 cond_resched();
Jens Axboeddf0322d2020-02-23 16:41:33 -07004490 }
4491
4492 return i ? i : -ENOMEM;
4493}
4494
Pavel Begunkov889fca72021-02-10 00:03:09 +00004495static int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboeddf0322d2020-02-23 16:41:33 -07004496{
4497 struct io_provide_buf *p = &req->pbuf;
4498 struct io_ring_ctx *ctx = req->ctx;
4499 struct io_buffer *head, *list;
4500 int ret = 0;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004501 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Jens Axboeddf0322d2020-02-23 16:41:33 -07004502
4503 io_ring_submit_lock(ctx, !force_nonblock);
4504
4505 lockdep_assert_held(&ctx->uring_lock);
4506
Jens Axboe9e15c3a2021-03-13 12:29:43 -07004507 list = head = xa_load(&ctx->io_buffers, p->bgid);
Jens Axboeddf0322d2020-02-23 16:41:33 -07004508
4509 ret = io_add_buffers(p, &head);
Jens Axboe9e15c3a2021-03-13 12:29:43 -07004510 if (ret >= 0 && !list) {
Pavel Begunkovfa304062022-08-04 15:13:46 +01004511 ret = xa_insert(&ctx->io_buffers, p->bgid, head,
4512 GFP_KERNEL_ACCOUNT);
Jens Axboe9e15c3a2021-03-13 12:29:43 -07004513 if (ret < 0)
Jens Axboe067524e2020-03-02 16:32:28 -07004514 __io_remove_buffers(ctx, head, p->bgid, -1U);
Jens Axboeddf0322d2020-02-23 16:41:33 -07004515 }
Jens Axboeddf0322d2020-02-23 16:41:33 -07004516 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01004517 req_set_fail(req);
Pavel Begunkov9fb8cb42021-02-28 22:35:13 +00004518 /* complete before unlock, IOPOLL may need the lock */
4519 __io_req_complete(req, issue_flags, ret, 0);
4520 io_ring_submit_unlock(ctx, !force_nonblock);
Jens Axboeddf0322d2020-02-23 16:41:33 -07004521 return 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07004522}
4523
Jens Axboe3e4827b2020-01-08 15:18:09 -07004524static int io_epoll_ctl_prep(struct io_kiocb *req,
4525 const struct io_uring_sqe *sqe)
4526{
4527#if defined(CONFIG_EPOLL)
Pavel Begunkov26578cd2021-08-20 10:36:37 +01004528 if (sqe->ioprio || sqe->buf_index || sqe->splice_fd_in)
Jens Axboe3e4827b2020-01-08 15:18:09 -07004529 return -EINVAL;
Pavel Begunkov2d74d042021-05-14 12:05:46 +01004530 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
Pavel Begunkov3232dd02020-06-03 18:03:22 +03004531 return -EINVAL;
Jens Axboe3e4827b2020-01-08 15:18:09 -07004532
4533 req->epoll.epfd = READ_ONCE(sqe->fd);
4534 req->epoll.op = READ_ONCE(sqe->len);
4535 req->epoll.fd = READ_ONCE(sqe->off);
4536
4537 if (ep_op_has_event(req->epoll.op)) {
4538 struct epoll_event __user *ev;
4539
4540 ev = u64_to_user_ptr(READ_ONCE(sqe->addr));
4541 if (copy_from_user(&req->epoll.event, ev, sizeof(*ev)))
4542 return -EFAULT;
4543 }
4544
4545 return 0;
4546#else
4547 return -EOPNOTSUPP;
4548#endif
4549}
4550
Pavel Begunkov889fca72021-02-10 00:03:09 +00004551static int io_epoll_ctl(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe3e4827b2020-01-08 15:18:09 -07004552{
4553#if defined(CONFIG_EPOLL)
4554 struct io_epoll *ie = &req->epoll;
4555 int ret;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004556 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Jens Axboe3e4827b2020-01-08 15:18:09 -07004557
4558 ret = do_epoll_ctl(ie->epfd, ie->op, ie->fd, &ie->event, force_nonblock);
4559 if (force_nonblock && ret == -EAGAIN)
4560 return -EAGAIN;
4561
4562 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01004563 req_set_fail(req);
Pavel Begunkov889fca72021-02-10 00:03:09 +00004564 __io_req_complete(req, issue_flags, ret, 0);
Jens Axboe3e4827b2020-01-08 15:18:09 -07004565 return 0;
4566#else
4567 return -EOPNOTSUPP;
4568#endif
4569}
4570
Jens Axboec1ca7572019-12-25 22:18:28 -07004571static int io_madvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4572{
4573#if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
Pavel Begunkov26578cd2021-08-20 10:36:37 +01004574 if (sqe->ioprio || sqe->buf_index || sqe->off || sqe->splice_fd_in)
Jens Axboec1ca7572019-12-25 22:18:28 -07004575 return -EINVAL;
Pavel Begunkov3232dd02020-06-03 18:03:22 +03004576 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4577 return -EINVAL;
Jens Axboec1ca7572019-12-25 22:18:28 -07004578
4579 req->madvise.addr = READ_ONCE(sqe->addr);
4580 req->madvise.len = READ_ONCE(sqe->len);
4581 req->madvise.advice = READ_ONCE(sqe->fadvise_advice);
4582 return 0;
4583#else
4584 return -EOPNOTSUPP;
4585#endif
4586}
4587
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004588static int io_madvise(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboec1ca7572019-12-25 22:18:28 -07004589{
4590#if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
4591 struct io_madvise *ma = &req->madvise;
4592 int ret;
4593
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004594 if (issue_flags & IO_URING_F_NONBLOCK)
Jens Axboec1ca7572019-12-25 22:18:28 -07004595 return -EAGAIN;
4596
Minchan Kim0726b012020-10-17 16:14:50 -07004597 ret = do_madvise(current->mm, ma->addr, ma->len, ma->advice);
Jens Axboec1ca7572019-12-25 22:18:28 -07004598 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01004599 req_set_fail(req);
Jens Axboee1e16092020-06-22 09:17:17 -06004600 io_req_complete(req, ret);
Jens Axboec1ca7572019-12-25 22:18:28 -07004601 return 0;
4602#else
4603 return -EOPNOTSUPP;
4604#endif
4605}
4606
Jens Axboe4840e412019-12-25 22:03:45 -07004607static int io_fadvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4608{
Pavel Begunkov26578cd2021-08-20 10:36:37 +01004609 if (sqe->ioprio || sqe->buf_index || sqe->addr || sqe->splice_fd_in)
Jens Axboe4840e412019-12-25 22:03:45 -07004610 return -EINVAL;
Pavel Begunkov3232dd02020-06-03 18:03:22 +03004611 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4612 return -EINVAL;
Jens Axboe4840e412019-12-25 22:03:45 -07004613
4614 req->fadvise.offset = READ_ONCE(sqe->off);
4615 req->fadvise.len = READ_ONCE(sqe->len);
4616 req->fadvise.advice = READ_ONCE(sqe->fadvise_advice);
4617 return 0;
4618}
4619
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004620static int io_fadvise(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe4840e412019-12-25 22:03:45 -07004621{
4622 struct io_fadvise *fa = &req->fadvise;
4623 int ret;
4624
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004625 if (issue_flags & IO_URING_F_NONBLOCK) {
Jens Axboe3e694262020-02-01 09:22:49 -07004626 switch (fa->advice) {
4627 case POSIX_FADV_NORMAL:
4628 case POSIX_FADV_RANDOM:
4629 case POSIX_FADV_SEQUENTIAL:
4630 break;
4631 default:
4632 return -EAGAIN;
4633 }
4634 }
Jens Axboe4840e412019-12-25 22:03:45 -07004635
4636 ret = vfs_fadvise(req->file, fa->offset, fa->len, fa->advice);
4637 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01004638 req_set_fail(req);
Pavel Begunkov0bdf3392021-04-11 01:46:29 +01004639 __io_req_complete(req, issue_flags, ret, 0);
Jens Axboe4840e412019-12-25 22:03:45 -07004640 return 0;
4641}
4642
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004643static int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4644{
Pavel Begunkov2d74d042021-05-14 12:05:46 +01004645 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
Pavel Begunkov3232dd02020-06-03 18:03:22 +03004646 return -EINVAL;
Pavel Begunkov26578cd2021-08-20 10:36:37 +01004647 if (sqe->ioprio || sqe->buf_index || sqe->splice_fd_in)
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004648 return -EINVAL;
Pavel Begunkov9c280f92020-04-08 08:58:46 +03004649 if (req->flags & REQ_F_FIXED_FILE)
Jens Axboecf3040c2020-02-06 21:31:40 -07004650 return -EBADF;
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004651
Bijan Mottahedeh1d9e1282020-05-22 21:31:16 -07004652 req->statx.dfd = READ_ONCE(sqe->fd);
4653 req->statx.mask = READ_ONCE(sqe->len);
Bijan Mottahedehe62753e2020-05-22 21:31:18 -07004654 req->statx.filename = u64_to_user_ptr(READ_ONCE(sqe->addr));
Bijan Mottahedeh1d9e1282020-05-22 21:31:16 -07004655 req->statx.buffer = u64_to_user_ptr(READ_ONCE(sqe->addr2));
4656 req->statx.flags = READ_ONCE(sqe->statx_flags);
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004657
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004658 return 0;
4659}
4660
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004661static int io_statx(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004662{
Bijan Mottahedeh1d9e1282020-05-22 21:31:16 -07004663 struct io_statx *ctx = &req->statx;
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004664 int ret;
4665
Pavel Begunkov59d70012021-03-22 01:58:30 +00004666 if (issue_flags & IO_URING_F_NONBLOCK)
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004667 return -EAGAIN;
4668
Bijan Mottahedehe62753e2020-05-22 21:31:18 -07004669 ret = do_statx(ctx->dfd, ctx->filename, ctx->flags, ctx->mask,
4670 ctx->buffer);
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004671
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004672 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01004673 req_set_fail(req);
Jens Axboee1e16092020-06-22 09:17:17 -06004674 io_req_complete(req, ret);
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004675 return 0;
4676}
4677
Jens Axboeb5dba592019-12-11 14:02:38 -07004678static int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4679{
Jens Axboe14587a462020-09-05 11:36:08 -06004680 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
Pavel Begunkov3232dd02020-06-03 18:03:22 +03004681 return -EINVAL;
Jens Axboeb5dba592019-12-11 14:02:38 -07004682 if (sqe->ioprio || sqe->off || sqe->addr || sqe->len ||
Pavel Begunkov7df778b2021-09-24 20:04:29 +01004683 sqe->rw_flags || sqe->buf_index)
Jens Axboeb5dba592019-12-11 14:02:38 -07004684 return -EINVAL;
Pavel Begunkov9c280f92020-04-08 08:58:46 +03004685 if (req->flags & REQ_F_FIXED_FILE)
Jens Axboecf3040c2020-02-06 21:31:40 -07004686 return -EBADF;
Jens Axboeb5dba592019-12-11 14:02:38 -07004687
4688 req->close.fd = READ_ONCE(sqe->fd);
Pavel Begunkov7df778b2021-09-24 20:04:29 +01004689 req->close.file_slot = READ_ONCE(sqe->file_index);
4690 if (req->close.file_slot && req->close.fd)
4691 return -EINVAL;
4692
Jens Axboeb5dba592019-12-11 14:02:38 -07004693 return 0;
4694}
4695
Pavel Begunkov889fca72021-02-10 00:03:09 +00004696static int io_close(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboeb5dba592019-12-11 14:02:38 -07004697{
Jens Axboe9eac1902021-01-19 15:50:37 -07004698 struct files_struct *files = current->files;
Pavel Begunkov3af73b22020-06-08 21:08:17 +03004699 struct io_close *close = &req->close;
Jens Axboe9eac1902021-01-19 15:50:37 -07004700 struct fdtable *fdt;
Pavel Begunkova1fde922021-04-11 01:46:28 +01004701 struct file *file = NULL;
4702 int ret = -EBADF;
Jens Axboeb5dba592019-12-11 14:02:38 -07004703
Pavel Begunkov7df778b2021-09-24 20:04:29 +01004704 if (req->close.file_slot) {
4705 ret = io_close_fixed(req, issue_flags);
4706 goto err;
4707 }
4708
Jens Axboe9eac1902021-01-19 15:50:37 -07004709 spin_lock(&files->file_lock);
4710 fdt = files_fdtable(files);
4711 if (close->fd >= fdt->max_fds) {
4712 spin_unlock(&files->file_lock);
4713 goto err;
4714 }
4715 file = fdt->fd[close->fd];
Pavel Begunkova1fde922021-04-11 01:46:28 +01004716 if (!file || file->f_op == &io_uring_fops) {
Jens Axboe9eac1902021-01-19 15:50:37 -07004717 spin_unlock(&files->file_lock);
4718 file = NULL;
4719 goto err;
Pavel Begunkov3af73b22020-06-08 21:08:17 +03004720 }
Jens Axboeb5dba592019-12-11 14:02:38 -07004721
4722 /* if the file has a flush method, be safe and punt to async */
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004723 if (file->f_op->flush && (issue_flags & IO_URING_F_NONBLOCK)) {
Jens Axboe9eac1902021-01-19 15:50:37 -07004724 spin_unlock(&files->file_lock);
Pavel Begunkov0bf0eef2020-05-26 20:34:06 +03004725 return -EAGAIN;
Pavel Begunkova2100672020-03-02 23:45:16 +03004726 }
Jens Axboeb5dba592019-12-11 14:02:38 -07004727
Jens Axboe9eac1902021-01-19 15:50:37 -07004728 ret = __close_fd_get_file(close->fd, &file);
4729 spin_unlock(&files->file_lock);
4730 if (ret < 0) {
4731 if (ret == -ENOENT)
4732 ret = -EBADF;
4733 goto err;
4734 }
4735
Pavel Begunkov3af73b22020-06-08 21:08:17 +03004736 /* No ->flush() or already async, safely close from here */
Jens Axboe9eac1902021-01-19 15:50:37 -07004737 ret = filp_close(file, current->files);
4738err:
Pavel Begunkov3af73b22020-06-08 21:08:17 +03004739 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01004740 req_set_fail(req);
Jens Axboe9eac1902021-01-19 15:50:37 -07004741 if (file)
4742 fput(file);
Pavel Begunkov889fca72021-02-10 00:03:09 +00004743 __io_req_complete(req, issue_flags, ret, 0);
Jens Axboe1a417f42020-01-31 17:16:48 -07004744 return 0;
Jens Axboeb5dba592019-12-11 14:02:38 -07004745}
4746
Pavel Begunkov1155c762021-02-18 18:29:38 +00004747static int io_sfr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboe5d17b4a2019-04-09 14:56:44 -06004748{
4749 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe5d17b4a2019-04-09 14:56:44 -06004750
Jens Axboe5d17b4a2019-04-09 14:56:44 -06004751 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
4752 return -EINVAL;
Pavel Begunkov26578cd2021-08-20 10:36:37 +01004753 if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index ||
4754 sqe->splice_fd_in))
Jens Axboe5d17b4a2019-04-09 14:56:44 -06004755 return -EINVAL;
4756
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004757 req->sync.off = READ_ONCE(sqe->off);
4758 req->sync.len = READ_ONCE(sqe->len);
4759 req->sync.flags = READ_ONCE(sqe->sync_range_flags);
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004760 return 0;
4761}
4762
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004763static int io_sync_file_range(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004764{
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004765 int ret;
4766
Pavel Begunkovac45abc2020-06-08 21:08:18 +03004767 /* sync_file_range always requires a blocking context */
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004768 if (issue_flags & IO_URING_F_NONBLOCK)
Pavel Begunkovac45abc2020-06-08 21:08:18 +03004769 return -EAGAIN;
4770
Jens Axboe9adbd452019-12-20 08:45:55 -07004771 ret = sync_file_range(req->file, req->sync.off, req->sync.len,
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004772 req->sync.flags);
4773 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01004774 req_set_fail(req);
Jens Axboee1e16092020-06-22 09:17:17 -06004775 io_req_complete(req, ret);
Jens Axboe5d17b4a2019-04-09 14:56:44 -06004776 return 0;
4777}
4778
YueHaibing469956e2020-03-04 15:53:52 +08004779#if defined(CONFIG_NET)
Pavel Begunkov02d27d82020-02-28 10:36:36 +03004780static int io_setup_async_msg(struct io_kiocb *req,
4781 struct io_async_msghdr *kmsg)
4782{
Jens Axboee8c2bc12020-08-15 18:44:09 -07004783 struct io_async_msghdr *async_msg = req->async_data;
4784
4785 if (async_msg)
Pavel Begunkov02d27d82020-02-28 10:36:36 +03004786 return -EAGAIN;
Jens Axboee8c2bc12020-08-15 18:44:09 -07004787 if (io_alloc_async_data(req)) {
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004788 kfree(kmsg->free_iov);
Pavel Begunkov02d27d82020-02-28 10:36:36 +03004789 return -ENOMEM;
4790 }
Jens Axboee8c2bc12020-08-15 18:44:09 -07004791 async_msg = req->async_data;
Pavel Begunkov02d27d82020-02-28 10:36:36 +03004792 req->flags |= REQ_F_NEED_CLEANUP;
Jens Axboee8c2bc12020-08-15 18:44:09 -07004793 memcpy(async_msg, kmsg, sizeof(*kmsg));
Pavel Begunkovf9dc33f2022-09-29 22:23:18 +01004794 if (async_msg->msg.msg_name)
4795 async_msg->msg.msg_name = &async_msg->addr;
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004796 /* if were using fast_iov, set it to the new one */
4797 if (!async_msg->free_iov)
4798 async_msg->msg.msg_iter.iov = async_msg->fast_iov;
4799
Pavel Begunkov02d27d82020-02-28 10:36:36 +03004800 return -EAGAIN;
4801}
4802
Pavel Begunkov2ae523e2020-07-12 20:41:06 +03004803static int io_sendmsg_copy_hdr(struct io_kiocb *req,
4804 struct io_async_msghdr *iomsg)
4805{
Pavel Begunkov2ae523e2020-07-12 20:41:06 +03004806 iomsg->msg.msg_name = &iomsg->addr;
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004807 iomsg->free_iov = iomsg->fast_iov;
Pavel Begunkov2ae523e2020-07-12 20:41:06 +03004808 return sendmsg_copy_msghdr(&iomsg->msg, req->sr_msg.umsg,
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004809 req->sr_msg.msg_flags, &iomsg->free_iov);
Pavel Begunkov2ae523e2020-07-12 20:41:06 +03004810}
4811
Pavel Begunkov93642ef2021-02-18 18:29:44 +00004812static int io_sendmsg_prep_async(struct io_kiocb *req)
4813{
4814 int ret;
4815
Pavel Begunkov93642ef2021-02-18 18:29:44 +00004816 ret = io_sendmsg_copy_hdr(req, req->async_data);
4817 if (!ret)
4818 req->flags |= REQ_F_NEED_CLEANUP;
4819 return ret;
4820}
4821
Jens Axboe3529d8c2019-12-19 18:24:38 -07004822static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboeaa1fa282019-04-19 13:38:09 -06004823{
Jens Axboee47293f2019-12-20 08:58:21 -07004824 struct io_sr_msg *sr = &req->sr_msg;
Jens Axboe03b12302019-12-02 18:50:25 -07004825
Pavel Begunkovd2b6f482020-06-03 18:03:25 +03004826 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4827 return -EINVAL;
Jens Axboe79c10cb2022-04-26 19:34:11 -06004828 if (unlikely(sqe->addr2 || sqe->file_index))
4829 return -EINVAL;
Jens Axboe50fefe52022-06-30 14:42:05 -06004830 if (unlikely(sqe->addr2 || sqe->file_index || sqe->ioprio))
4831 return -EINVAL;
Pavel Begunkovd2b6f482020-06-03 18:03:25 +03004832
Pavel Begunkov270a5942020-07-12 20:41:04 +03004833 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
Jens Axboefddafac2020-01-04 20:19:44 -07004834 sr->len = READ_ONCE(sqe->len);
Pavel Begunkov04411802021-04-01 15:44:00 +01004835 sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
4836 if (sr->msg_flags & MSG_DONTWAIT)
4837 req->flags |= REQ_F_NOWAIT;
Jens Axboe3529d8c2019-12-19 18:24:38 -07004838
Jens Axboed8768362020-02-27 14:17:49 -07004839#ifdef CONFIG_COMPAT
4840 if (req->ctx->compat)
4841 sr->msg_flags |= MSG_CMSG_COMPAT;
4842#endif
Pavel Begunkov93642ef2021-02-18 18:29:44 +00004843 return 0;
Jens Axboe03b12302019-12-02 18:50:25 -07004844}
4845
Pavel Begunkov889fca72021-02-10 00:03:09 +00004846static int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe03b12302019-12-02 18:50:25 -07004847{
Pavel Begunkov6b754c82020-07-16 23:28:00 +03004848 struct io_async_msghdr iomsg, *kmsg;
Jens Axboe03b12302019-12-02 18:50:25 -07004849 struct socket *sock;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004850 unsigned flags;
Stefan Metzmacher00312752021-03-20 20:33:36 +01004851 int min_ret = 0;
Jens Axboe03b12302019-12-02 18:50:25 -07004852 int ret;
4853
Florent Revestdba4a922020-12-04 12:36:04 +01004854 sock = sock_from_file(req->file);
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004855 if (unlikely(!sock))
Florent Revestdba4a922020-12-04 12:36:04 +01004856 return -ENOTSOCK;
Jens Axboe03b12302019-12-02 18:50:25 -07004857
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004858 kmsg = req->async_data;
4859 if (!kmsg) {
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004860 ret = io_sendmsg_copy_hdr(req, &iomsg);
Jens Axboefddafac2020-01-04 20:19:44 -07004861 if (ret)
4862 return ret;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004863 kmsg = &iomsg;
Jens Axboefddafac2020-01-04 20:19:44 -07004864 }
4865
Pavel Begunkov04411802021-04-01 15:44:00 +01004866 flags = req->sr_msg.msg_flags;
4867 if (issue_flags & IO_URING_F_NONBLOCK)
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004868 flags |= MSG_DONTWAIT;
Stefan Metzmacher00312752021-03-20 20:33:36 +01004869 if (flags & MSG_WAITALL)
4870 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
4871
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004872 ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004873
Pavel Begunkovcdc68e72021-11-23 00:07:47 +00004874 if (ret < min_ret) {
4875 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
4876 return io_setup_async_msg(req, kmsg);
4877 if (ret == -ERESTARTSYS)
4878 ret = -EINTR;
4879 req_set_fail(req);
4880 }
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004881 /* fast path, check for non-NULL to avoid function call */
4882 if (kmsg->free_iov)
4883 kfree(kmsg->free_iov);
Jens Axboe03b12302019-12-02 18:50:25 -07004884 req->flags &= ~REQ_F_NEED_CLEANUP;
Pavel Begunkov889fca72021-02-10 00:03:09 +00004885 __io_req_complete(req, issue_flags, ret, 0);
Jens Axboefddafac2020-01-04 20:19:44 -07004886 return 0;
Jens Axboefddafac2020-01-04 20:19:44 -07004887}
4888
Pavel Begunkov889fca72021-02-10 00:03:09 +00004889static int io_send(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe03b12302019-12-02 18:50:25 -07004890{
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004891 struct io_sr_msg *sr = &req->sr_msg;
4892 struct msghdr msg;
4893 struct iovec iov;
Jens Axboe03b12302019-12-02 18:50:25 -07004894 struct socket *sock;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004895 unsigned flags;
Stefan Metzmacher00312752021-03-20 20:33:36 +01004896 int min_ret = 0;
Jens Axboe03b12302019-12-02 18:50:25 -07004897 int ret;
4898
Florent Revestdba4a922020-12-04 12:36:04 +01004899 sock = sock_from_file(req->file);
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004900 if (unlikely(!sock))
Florent Revestdba4a922020-12-04 12:36:04 +01004901 return -ENOTSOCK;
Jens Axboe03b12302019-12-02 18:50:25 -07004902
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004903 ret = import_single_range(WRITE, sr->buf, sr->len, &iov, &msg.msg_iter);
4904 if (unlikely(ret))
Zheng Bin14db8412020-09-09 20:12:37 +08004905 return ret;
Jens Axboe03b12302019-12-02 18:50:25 -07004906
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004907 msg.msg_name = NULL;
4908 msg.msg_control = NULL;
4909 msg.msg_controllen = 0;
4910 msg.msg_namelen = 0;
Jens Axboe03b12302019-12-02 18:50:25 -07004911
Pavel Begunkov04411802021-04-01 15:44:00 +01004912 flags = req->sr_msg.msg_flags;
4913 if (issue_flags & IO_URING_F_NONBLOCK)
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004914 flags |= MSG_DONTWAIT;
Stefan Metzmacher00312752021-03-20 20:33:36 +01004915 if (flags & MSG_WAITALL)
4916 min_ret = iov_iter_count(&msg.msg_iter);
4917
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004918 msg.msg_flags = flags;
4919 ret = sock_sendmsg(sock, &msg);
Pavel Begunkovcdc68e72021-11-23 00:07:47 +00004920 if (ret < min_ret) {
4921 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
4922 return -EAGAIN;
4923 if (ret == -ERESTARTSYS)
4924 ret = -EINTR;
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01004925 req_set_fail(req);
Pavel Begunkovcdc68e72021-11-23 00:07:47 +00004926 }
Pavel Begunkov889fca72021-02-10 00:03:09 +00004927 __io_req_complete(req, issue_flags, ret, 0);
Jens Axboe03b12302019-12-02 18:50:25 -07004928 return 0;
Jens Axboe03b12302019-12-02 18:50:25 -07004929}
4930
Pavel Begunkov1400e692020-07-12 20:41:05 +03004931static int __io_recvmsg_copy_hdr(struct io_kiocb *req,
4932 struct io_async_msghdr *iomsg)
Jens Axboe52de1fe2020-02-27 10:15:42 -07004933{
4934 struct io_sr_msg *sr = &req->sr_msg;
4935 struct iovec __user *uiov;
4936 size_t iov_len;
4937 int ret;
4938
Pavel Begunkov1400e692020-07-12 20:41:05 +03004939 ret = __copy_msghdr_from_user(&iomsg->msg, sr->umsg,
4940 &iomsg->uaddr, &uiov, &iov_len);
Jens Axboe52de1fe2020-02-27 10:15:42 -07004941 if (ret)
4942 return ret;
4943
4944 if (req->flags & REQ_F_BUFFER_SELECT) {
4945 if (iov_len > 1)
4946 return -EINVAL;
Pavel Begunkov5476dfe2021-02-05 00:57:59 +00004947 if (copy_from_user(iomsg->fast_iov, uiov, sizeof(*uiov)))
Jens Axboe52de1fe2020-02-27 10:15:42 -07004948 return -EFAULT;
Pavel Begunkov5476dfe2021-02-05 00:57:59 +00004949 sr->len = iomsg->fast_iov[0].iov_len;
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004950 iomsg->free_iov = NULL;
Jens Axboe52de1fe2020-02-27 10:15:42 -07004951 } else {
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004952 iomsg->free_iov = iomsg->fast_iov;
Christoph Hellwig89cd35c2020-09-25 06:51:41 +02004953 ret = __import_iovec(READ, uiov, iov_len, UIO_FASTIOV,
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004954 &iomsg->free_iov, &iomsg->msg.msg_iter,
Christoph Hellwig89cd35c2020-09-25 06:51:41 +02004955 false);
Jens Axboe52de1fe2020-02-27 10:15:42 -07004956 if (ret > 0)
4957 ret = 0;
4958 }
4959
4960 return ret;
4961}
4962
4963#ifdef CONFIG_COMPAT
4964static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req,
Pavel Begunkov1400e692020-07-12 20:41:05 +03004965 struct io_async_msghdr *iomsg)
Jens Axboe52de1fe2020-02-27 10:15:42 -07004966{
Jens Axboe52de1fe2020-02-27 10:15:42 -07004967 struct io_sr_msg *sr = &req->sr_msg;
4968 struct compat_iovec __user *uiov;
4969 compat_uptr_t ptr;
4970 compat_size_t len;
4971 int ret;
4972
Pavel Begunkov4af34172021-04-11 01:46:30 +01004973 ret = __get_compat_msghdr(&iomsg->msg, sr->umsg_compat, &iomsg->uaddr,
4974 &ptr, &len);
Jens Axboe52de1fe2020-02-27 10:15:42 -07004975 if (ret)
4976 return ret;
4977
4978 uiov = compat_ptr(ptr);
4979 if (req->flags & REQ_F_BUFFER_SELECT) {
4980 compat_ssize_t clen;
4981
4982 if (len > 1)
4983 return -EINVAL;
4984 if (!access_ok(uiov, sizeof(*uiov)))
4985 return -EFAULT;
4986 if (__get_user(clen, &uiov->iov_len))
4987 return -EFAULT;
4988 if (clen < 0)
4989 return -EINVAL;
Pavel Begunkov2d280bc2020-11-29 18:33:32 +00004990 sr->len = clen;
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004991 iomsg->free_iov = NULL;
Jens Axboe52de1fe2020-02-27 10:15:42 -07004992 } else {
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004993 iomsg->free_iov = iomsg->fast_iov;
Christoph Hellwig89cd35c2020-09-25 06:51:41 +02004994 ret = __import_iovec(READ, (struct iovec __user *)uiov, len,
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004995 UIO_FASTIOV, &iomsg->free_iov,
Christoph Hellwig89cd35c2020-09-25 06:51:41 +02004996 &iomsg->msg.msg_iter, true);
Jens Axboe52de1fe2020-02-27 10:15:42 -07004997 if (ret < 0)
4998 return ret;
4999 }
5000
5001 return 0;
5002}
Jens Axboe03b12302019-12-02 18:50:25 -07005003#endif
Jens Axboe52de1fe2020-02-27 10:15:42 -07005004
Pavel Begunkov1400e692020-07-12 20:41:05 +03005005static int io_recvmsg_copy_hdr(struct io_kiocb *req,
5006 struct io_async_msghdr *iomsg)
Jens Axboe52de1fe2020-02-27 10:15:42 -07005007{
Pavel Begunkov1400e692020-07-12 20:41:05 +03005008 iomsg->msg.msg_name = &iomsg->addr;
Jens Axboe52de1fe2020-02-27 10:15:42 -07005009
5010#ifdef CONFIG_COMPAT
5011 if (req->ctx->compat)
Pavel Begunkov1400e692020-07-12 20:41:05 +03005012 return __io_compat_recvmsg_copy_hdr(req, iomsg);
Jens Axboe52de1fe2020-02-27 10:15:42 -07005013#endif
5014
Pavel Begunkov1400e692020-07-12 20:41:05 +03005015 return __io_recvmsg_copy_hdr(req, iomsg);
Jens Axboe52de1fe2020-02-27 10:15:42 -07005016}
5017
Jens Axboebcda7ba2020-02-23 16:42:51 -07005018static struct io_buffer *io_recv_buffer_select(struct io_kiocb *req,
Pavel Begunkov7fbb1b52020-07-16 23:28:05 +03005019 bool needs_lock)
Jens Axboebcda7ba2020-02-23 16:42:51 -07005020{
5021 struct io_sr_msg *sr = &req->sr_msg;
5022 struct io_buffer *kbuf;
5023
Jens Axboebcda7ba2020-02-23 16:42:51 -07005024 kbuf = io_buffer_select(req, &sr->len, sr->bgid, sr->kbuf, needs_lock);
5025 if (IS_ERR(kbuf))
5026 return kbuf;
5027
5028 sr->kbuf = kbuf;
5029 req->flags |= REQ_F_BUFFER_SELECTED;
Jens Axboebcda7ba2020-02-23 16:42:51 -07005030 return kbuf;
Jens Axboe03b12302019-12-02 18:50:25 -07005031}
5032
Pavel Begunkov7fbb1b52020-07-16 23:28:05 +03005033static inline unsigned int io_put_recv_kbuf(struct io_kiocb *req)
5034{
5035 return io_put_kbuf(req, req->sr_msg.kbuf);
5036}
5037
Pavel Begunkov93642ef2021-02-18 18:29:44 +00005038static int io_recvmsg_prep_async(struct io_kiocb *req)
Jens Axboe03b12302019-12-02 18:50:25 -07005039{
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03005040 int ret;
Jens Axboe06b76d42019-12-19 14:44:26 -07005041
Pavel Begunkov93642ef2021-02-18 18:29:44 +00005042 ret = io_recvmsg_copy_hdr(req, req->async_data);
5043 if (!ret)
5044 req->flags |= REQ_F_NEED_CLEANUP;
5045 return ret;
5046}
5047
5048static int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
5049{
5050 struct io_sr_msg *sr = &req->sr_msg;
5051
Pavel Begunkovd2b6f482020-06-03 18:03:25 +03005052 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
5053 return -EINVAL;
Jens Axboe37811e42022-04-26 19:34:57 -06005054 if (unlikely(sqe->addr2 || sqe->file_index))
5055 return -EINVAL;
Jens Axboe50fefe52022-06-30 14:42:05 -06005056 if (unlikely(sqe->addr2 || sqe->file_index || sqe->ioprio))
5057 return -EINVAL;
Pavel Begunkovd2b6f482020-06-03 18:03:25 +03005058
Pavel Begunkov270a5942020-07-12 20:41:04 +03005059 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
Jens Axboe0b7b21e2020-01-31 08:34:59 -07005060 sr->len = READ_ONCE(sqe->len);
Jens Axboebcda7ba2020-02-23 16:42:51 -07005061 sr->bgid = READ_ONCE(sqe->buf_group);
Pavel Begunkov04411802021-04-01 15:44:00 +01005062 sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
5063 if (sr->msg_flags & MSG_DONTWAIT)
5064 req->flags |= REQ_F_NOWAIT;
Jens Axboe3529d8c2019-12-19 18:24:38 -07005065
Jens Axboed8768362020-02-27 14:17:49 -07005066#ifdef CONFIG_COMPAT
5067 if (req->ctx->compat)
5068 sr->msg_flags |= MSG_CMSG_COMPAT;
5069#endif
Jens Axboe9b7b0f22023-01-21 10:21:22 -07005070 sr->done_io = 0;
Pavel Begunkov93642ef2021-02-18 18:29:44 +00005071 return 0;
Jens Axboe03b12302019-12-02 18:50:25 -07005072}
5073
Jens Axboe9b7b0f22023-01-21 10:21:22 -07005074static bool io_net_retry(struct socket *sock, int flags)
5075{
5076 if (!(flags & MSG_WAITALL))
5077 return false;
5078 return sock->type == SOCK_STREAM || sock->type == SOCK_SEQPACKET;
5079}
5080
Pavel Begunkov889fca72021-02-10 00:03:09 +00005081static int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe03b12302019-12-02 18:50:25 -07005082{
Pavel Begunkov6b754c82020-07-16 23:28:00 +03005083 struct io_async_msghdr iomsg, *kmsg;
Jens Axboe9b7b0f22023-01-21 10:21:22 -07005084 struct io_sr_msg *sr = &req->sr_msg;
Jens Axboe0fa03c62019-04-19 13:34:07 -06005085 struct socket *sock;
Pavel Begunkov7fbb1b52020-07-16 23:28:05 +03005086 struct io_buffer *kbuf;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03005087 unsigned flags;
Stefan Metzmacher00312752021-03-20 20:33:36 +01005088 int min_ret = 0;
Jens Axboe52de1fe2020-02-27 10:15:42 -07005089 int ret, cflags = 0;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00005090 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Jens Axboe0fa03c62019-04-19 13:34:07 -06005091
Florent Revestdba4a922020-12-04 12:36:04 +01005092 sock = sock_from_file(req->file);
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03005093 if (unlikely(!sock))
Florent Revestdba4a922020-12-04 12:36:04 +01005094 return -ENOTSOCK;
Jens Axboe0fa03c62019-04-19 13:34:07 -06005095
Pavel Begunkov257e84a2021-02-05 00:58:00 +00005096 kmsg = req->async_data;
5097 if (!kmsg) {
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03005098 ret = io_recvmsg_copy_hdr(req, &iomsg);
5099 if (ret)
Pavel Begunkov681fda82020-07-15 22:20:45 +03005100 return ret;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03005101 kmsg = &iomsg;
Jens Axboe0fa03c62019-04-19 13:34:07 -06005102 }
5103
Pavel Begunkovbc02ef32020-07-16 23:28:03 +03005104 if (req->flags & REQ_F_BUFFER_SELECT) {
Pavel Begunkov7fbb1b52020-07-16 23:28:05 +03005105 kbuf = io_recv_buffer_select(req, !force_nonblock);
Pavel Begunkovbc02ef32020-07-16 23:28:03 +03005106 if (IS_ERR(kbuf))
5107 return PTR_ERR(kbuf);
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03005108 kmsg->fast_iov[0].iov_base = u64_to_user_ptr(kbuf->addr);
Pavel Begunkov5476dfe2021-02-05 00:57:59 +00005109 kmsg->fast_iov[0].iov_len = req->sr_msg.len;
5110 iov_iter_init(&kmsg->msg.msg_iter, READ, kmsg->fast_iov,
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03005111 1, req->sr_msg.len);
5112 }
5113
Pavel Begunkov04411802021-04-01 15:44:00 +01005114 flags = req->sr_msg.msg_flags;
5115 if (force_nonblock)
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03005116 flags |= MSG_DONTWAIT;
Stefan Metzmacher00312752021-03-20 20:33:36 +01005117 if (flags & MSG_WAITALL)
5118 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
5119
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03005120 ret = __sys_recvmsg_sock(sock, &kmsg->msg, req->sr_msg.umsg,
5121 kmsg->uaddr, flags);
Pavel Begunkovcdc68e72021-11-23 00:07:47 +00005122 if (ret < min_ret) {
5123 if (ret == -EAGAIN && force_nonblock)
5124 return io_setup_async_msg(req, kmsg);
5125 if (ret == -ERESTARTSYS)
5126 ret = -EINTR;
Jens Axboe9b7b0f22023-01-21 10:21:22 -07005127 if (ret > 0 && io_net_retry(sock, flags)) {
5128 sr->done_io += ret;
Jens Axboe390b8812022-03-23 09:30:05 -06005129 req->flags |= REQ_F_PARTIAL_IO;
Jens Axboe9b7b0f22023-01-21 10:21:22 -07005130 return io_setup_async_msg(req, kmsg);
5131 }
Pavel Begunkovcdc68e72021-11-23 00:07:47 +00005132 req_set_fail(req);
5133 } else if ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
5134 req_set_fail(req);
5135 }
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03005136
Pavel Begunkov7fbb1b52020-07-16 23:28:05 +03005137 if (req->flags & REQ_F_BUFFER_SELECTED)
5138 cflags = io_put_recv_kbuf(req);
Pavel Begunkov257e84a2021-02-05 00:58:00 +00005139 /* fast path, check for non-NULL to avoid function call */
5140 if (kmsg->free_iov)
5141 kfree(kmsg->free_iov);
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03005142 req->flags &= ~REQ_F_NEED_CLEANUP;
Jens Axboe9b7b0f22023-01-21 10:21:22 -07005143 if (ret >= 0)
5144 ret += sr->done_io;
5145 else if (sr->done_io)
5146 ret = sr->done_io;
Pavel Begunkov889fca72021-02-10 00:03:09 +00005147 __io_req_complete(req, issue_flags, ret, cflags);
Jens Axboe0fa03c62019-04-19 13:34:07 -06005148 return 0;
Jens Axboe0fa03c62019-04-19 13:34:07 -06005149}
5150
Pavel Begunkov889fca72021-02-10 00:03:09 +00005151static int io_recv(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboefddafac2020-01-04 20:19:44 -07005152{
Pavel Begunkov6b754c82020-07-16 23:28:00 +03005153 struct io_buffer *kbuf;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03005154 struct io_sr_msg *sr = &req->sr_msg;
5155 struct msghdr msg;
5156 void __user *buf = sr->buf;
Jens Axboefddafac2020-01-04 20:19:44 -07005157 struct socket *sock;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03005158 struct iovec iov;
5159 unsigned flags;
Stefan Metzmacher00312752021-03-20 20:33:36 +01005160 int min_ret = 0;
Jens Axboebcda7ba2020-02-23 16:42:51 -07005161 int ret, cflags = 0;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00005162 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Jens Axboefddafac2020-01-04 20:19:44 -07005163
Florent Revestdba4a922020-12-04 12:36:04 +01005164 sock = sock_from_file(req->file);
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03005165 if (unlikely(!sock))
Florent Revestdba4a922020-12-04 12:36:04 +01005166 return -ENOTSOCK;
Jens Axboefddafac2020-01-04 20:19:44 -07005167
Pavel Begunkovbc02ef32020-07-16 23:28:03 +03005168 if (req->flags & REQ_F_BUFFER_SELECT) {
Pavel Begunkov7fbb1b52020-07-16 23:28:05 +03005169 kbuf = io_recv_buffer_select(req, !force_nonblock);
Jens Axboebcda7ba2020-02-23 16:42:51 -07005170 if (IS_ERR(kbuf))
5171 return PTR_ERR(kbuf);
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03005172 buf = u64_to_user_ptr(kbuf->addr);
Jens Axboefddafac2020-01-04 20:19:44 -07005173 }
5174
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03005175 ret = import_single_range(READ, buf, sr->len, &iov, &msg.msg_iter);
Pavel Begunkov14c32ee2020-07-16 23:28:01 +03005176 if (unlikely(ret))
5177 goto out_free;
Jens Axboefddafac2020-01-04 20:19:44 -07005178
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03005179 msg.msg_name = NULL;
5180 msg.msg_control = NULL;
5181 msg.msg_controllen = 0;
5182 msg.msg_namelen = 0;
5183 msg.msg_iocb = NULL;
5184 msg.msg_flags = 0;
5185
Pavel Begunkov04411802021-04-01 15:44:00 +01005186 flags = req->sr_msg.msg_flags;
5187 if (force_nonblock)
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03005188 flags |= MSG_DONTWAIT;
Stefan Metzmacher00312752021-03-20 20:33:36 +01005189 if (flags & MSG_WAITALL)
5190 min_ret = iov_iter_count(&msg.msg_iter);
5191
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03005192 ret = sock_recvmsg(sock, &msg, flags);
Pavel Begunkov14c32ee2020-07-16 23:28:01 +03005193out_free:
Pavel Begunkovcdc68e72021-11-23 00:07:47 +00005194 if (ret < min_ret) {
5195 if (ret == -EAGAIN && force_nonblock)
5196 return -EAGAIN;
5197 if (ret == -ERESTARTSYS)
5198 ret = -EINTR;
Jens Axboe9b7b0f22023-01-21 10:21:22 -07005199 if (ret > 0 && io_net_retry(sock, flags)) {
5200 sr->len -= ret;
5201 sr->buf += ret;
5202 sr->done_io += ret;
Jens Axboe390b8812022-03-23 09:30:05 -06005203 req->flags |= REQ_F_PARTIAL_IO;
Jens Axboe9b7b0f22023-01-21 10:21:22 -07005204 return -EAGAIN;
5205 }
Pavel Begunkovcdc68e72021-11-23 00:07:47 +00005206 req_set_fail(req);
5207 } else if ((flags & MSG_WAITALL) && (msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
5208 req_set_fail(req);
5209 }
Pavel Begunkov7fbb1b52020-07-16 23:28:05 +03005210 if (req->flags & REQ_F_BUFFER_SELECTED)
5211 cflags = io_put_recv_kbuf(req);
Jens Axboe9b7b0f22023-01-21 10:21:22 -07005212 if (ret >= 0)
5213 ret += sr->done_io;
5214 else if (sr->done_io)
5215 ret = sr->done_io;
Pavel Begunkov889fca72021-02-10 00:03:09 +00005216 __io_req_complete(req, issue_flags, ret, cflags);
Jens Axboefddafac2020-01-04 20:19:44 -07005217 return 0;
Jens Axboefddafac2020-01-04 20:19:44 -07005218}
5219
Jens Axboe3529d8c2019-12-19 18:24:38 -07005220static int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboe17f2fe32019-10-17 14:42:58 -06005221{
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07005222 struct io_accept *accept = &req->accept;
5223
Jens Axboe14587a462020-09-05 11:36:08 -06005224 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboe17f2fe32019-10-17 14:42:58 -06005225 return -EINVAL;
Pavel Begunkovaaa4db12021-08-25 12:25:47 +01005226 if (sqe->ioprio || sqe->len || sqe->buf_index)
Jens Axboe17f2fe32019-10-17 14:42:58 -06005227 return -EINVAL;
5228
Jens Axboed55e5f52019-12-11 16:12:15 -07005229 accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
5230 accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2));
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07005231 accept->flags = READ_ONCE(sqe->accept_flags);
Jens Axboe09952e32020-03-19 20:16:56 -06005232 accept->nofile = rlimit(RLIMIT_NOFILE);
Pavel Begunkova7083ad2021-08-25 12:25:46 +01005233
Pavel Begunkovaaa4db12021-08-25 12:25:47 +01005234 accept->file_slot = READ_ONCE(sqe->file_index);
Jens Axboe13239762022-03-14 17:26:19 -06005235 if (accept->file_slot && (accept->flags & SOCK_CLOEXEC))
Pavel Begunkovaaa4db12021-08-25 12:25:47 +01005236 return -EINVAL;
Pavel Begunkova7083ad2021-08-25 12:25:46 +01005237 if (accept->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
5238 return -EINVAL;
5239 if (SOCK_NONBLOCK != O_NONBLOCK && (accept->flags & SOCK_NONBLOCK))
5240 accept->flags = (accept->flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07005241 return 0;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07005242}
Jens Axboe17f2fe32019-10-17 14:42:58 -06005243
Pavel Begunkov889fca72021-02-10 00:03:09 +00005244static int io_accept(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07005245{
5246 struct io_accept *accept = &req->accept;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00005247 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Pavel Begunkovac45abc2020-06-08 21:08:18 +03005248 unsigned int file_flags = force_nonblock ? O_NONBLOCK : 0;
Pavel Begunkovaaa4db12021-08-25 12:25:47 +01005249 bool fixed = !!accept->file_slot;
Pavel Begunkova7083ad2021-08-25 12:25:46 +01005250 struct file *file;
5251 int ret, fd;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07005252
Jiufei Xuee697dee2020-06-10 13:41:59 +08005253 if (req->file->f_flags & O_NONBLOCK)
5254 req->flags |= REQ_F_NOWAIT;
5255
Pavel Begunkovaaa4db12021-08-25 12:25:47 +01005256 if (!fixed) {
5257 fd = __get_unused_fd_flags(accept->flags, accept->nofile);
5258 if (unlikely(fd < 0))
5259 return fd;
5260 }
Pavel Begunkova7083ad2021-08-25 12:25:46 +01005261 file = do_accept(req->file, file_flags, accept->addr, accept->addr_len,
5262 accept->flags);
5263 if (IS_ERR(file)) {
Pavel Begunkovaaa4db12021-08-25 12:25:47 +01005264 if (!fixed)
5265 put_unused_fd(fd);
Pavel Begunkova7083ad2021-08-25 12:25:46 +01005266 ret = PTR_ERR(file);
5267 if (ret == -EAGAIN && force_nonblock)
5268 return -EAGAIN;
Pavel Begunkovac45abc2020-06-08 21:08:18 +03005269 if (ret == -ERESTARTSYS)
5270 ret = -EINTR;
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01005271 req_set_fail(req);
Pavel Begunkovaaa4db12021-08-25 12:25:47 +01005272 } else if (!fixed) {
Pavel Begunkova7083ad2021-08-25 12:25:46 +01005273 fd_install(fd, file);
5274 ret = fd;
Pavel Begunkovaaa4db12021-08-25 12:25:47 +01005275 } else {
5276 ret = io_install_fixed_file(req, file, issue_flags,
5277 accept->file_slot - 1);
Pavel Begunkovac45abc2020-06-08 21:08:18 +03005278 }
Pavel Begunkov889fca72021-02-10 00:03:09 +00005279 __io_req_complete(req, issue_flags, ret, 0);
Jens Axboe17f2fe32019-10-17 14:42:58 -06005280 return 0;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07005281}
5282
Pavel Begunkov93642ef2021-02-18 18:29:44 +00005283static int io_connect_prep_async(struct io_kiocb *req)
5284{
5285 struct io_async_connect *io = req->async_data;
5286 struct io_connect *conn = &req->connect;
5287
5288 return move_addr_to_kernel(conn->addr, conn->addr_len, &io->address);
5289}
5290
Jens Axboe3529d8c2019-12-19 18:24:38 -07005291static int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboef499a022019-12-02 16:28:46 -07005292{
Jens Axboe3529d8c2019-12-19 18:24:38 -07005293 struct io_connect *conn = &req->connect;
Jens Axboef499a022019-12-02 16:28:46 -07005294
Jens Axboe14587a462020-09-05 11:36:08 -06005295 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboe3fbb51c2019-12-20 08:51:52 -07005296 return -EINVAL;
Pavel Begunkov26578cd2021-08-20 10:36:37 +01005297 if (sqe->ioprio || sqe->len || sqe->buf_index || sqe->rw_flags ||
5298 sqe->splice_fd_in)
Jens Axboe3fbb51c2019-12-20 08:51:52 -07005299 return -EINVAL;
5300
Jens Axboe3529d8c2019-12-19 18:24:38 -07005301 conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
5302 conn->addr_len = READ_ONCE(sqe->addr2);
Pavel Begunkov93642ef2021-02-18 18:29:44 +00005303 return 0;
Jens Axboef499a022019-12-02 16:28:46 -07005304}
5305
Pavel Begunkov889fca72021-02-10 00:03:09 +00005306static int io_connect(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboef8e85cf2019-11-23 14:24:24 -07005307{
Jens Axboee8c2bc12020-08-15 18:44:09 -07005308 struct io_async_connect __io, *io;
Jens Axboef8e85cf2019-11-23 14:24:24 -07005309 unsigned file_flags;
Jens Axboe3fbb51c2019-12-20 08:51:52 -07005310 int ret;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00005311 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Jens Axboef8e85cf2019-11-23 14:24:24 -07005312
Jens Axboee8c2bc12020-08-15 18:44:09 -07005313 if (req->async_data) {
5314 io = req->async_data;
Jens Axboef499a022019-12-02 16:28:46 -07005315 } else {
Jens Axboe3529d8c2019-12-19 18:24:38 -07005316 ret = move_addr_to_kernel(req->connect.addr,
5317 req->connect.addr_len,
Jens Axboee8c2bc12020-08-15 18:44:09 -07005318 &__io.address);
Jens Axboef499a022019-12-02 16:28:46 -07005319 if (ret)
5320 goto out;
5321 io = &__io;
5322 }
5323
Jens Axboe3fbb51c2019-12-20 08:51:52 -07005324 file_flags = force_nonblock ? O_NONBLOCK : 0;
5325
Jens Axboee8c2bc12020-08-15 18:44:09 -07005326 ret = __sys_connect_file(req->file, &io->address,
Jens Axboe3fbb51c2019-12-20 08:51:52 -07005327 req->connect.addr_len, file_flags);
Jens Axboe87f80d62019-12-03 11:23:54 -07005328 if ((ret == -EAGAIN || ret == -EINPROGRESS) && force_nonblock) {
Jens Axboee8c2bc12020-08-15 18:44:09 -07005329 if (req->async_data)
Jens Axboeb7bb4f72019-12-15 22:13:43 -07005330 return -EAGAIN;
Jens Axboee8c2bc12020-08-15 18:44:09 -07005331 if (io_alloc_async_data(req)) {
Jens Axboef499a022019-12-02 16:28:46 -07005332 ret = -ENOMEM;
5333 goto out;
5334 }
Jens Axboee8c2bc12020-08-15 18:44:09 -07005335 memcpy(req->async_data, &__io, sizeof(__io));
Jens Axboef8e85cf2019-11-23 14:24:24 -07005336 return -EAGAIN;
Jens Axboef499a022019-12-02 16:28:46 -07005337 }
Jens Axboef8e85cf2019-11-23 14:24:24 -07005338 if (ret == -ERESTARTSYS)
5339 ret = -EINTR;
Jens Axboef499a022019-12-02 16:28:46 -07005340out:
Jens Axboe4e88d6e2019-12-07 20:59:47 -07005341 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01005342 req_set_fail(req);
Pavel Begunkov889fca72021-02-10 00:03:09 +00005343 __io_req_complete(req, issue_flags, ret, 0);
Jens Axboef8e85cf2019-11-23 14:24:24 -07005344 return 0;
Jens Axboef8e85cf2019-11-23 14:24:24 -07005345}
YueHaibing469956e2020-03-04 15:53:52 +08005346#else /* !CONFIG_NET */
Jens Axboe99a10082021-02-19 09:35:19 -07005347#define IO_NETOP_FN(op) \
5348static int io_##op(struct io_kiocb *req, unsigned int issue_flags) \
5349{ \
5350 return -EOPNOTSUPP; \
Jens Axboef8e85cf2019-11-23 14:24:24 -07005351}
5352
Jens Axboe99a10082021-02-19 09:35:19 -07005353#define IO_NETOP_PREP(op) \
5354IO_NETOP_FN(op) \
5355static int io_##op##_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) \
5356{ \
5357 return -EOPNOTSUPP; \
5358} \
5359
5360#define IO_NETOP_PREP_ASYNC(op) \
5361IO_NETOP_PREP(op) \
5362static int io_##op##_prep_async(struct io_kiocb *req) \
5363{ \
5364 return -EOPNOTSUPP; \
YueHaibing469956e2020-03-04 15:53:52 +08005365}
5366
Jens Axboe99a10082021-02-19 09:35:19 -07005367IO_NETOP_PREP_ASYNC(sendmsg);
5368IO_NETOP_PREP_ASYNC(recvmsg);
5369IO_NETOP_PREP_ASYNC(connect);
5370IO_NETOP_PREP(accept);
5371IO_NETOP_FN(send);
5372IO_NETOP_FN(recv);
YueHaibing469956e2020-03-04 15:53:52 +08005373#endif /* CONFIG_NET */
Jens Axboe17f2fe32019-10-17 14:42:58 -06005374
Jens Axboed7718a92020-02-14 22:23:12 -07005375struct io_poll_table {
5376 struct poll_table_struct pt;
5377 struct io_kiocb *req;
Pavel Begunkov68b11e82021-07-20 10:50:43 +01005378 int nr_entries;
Jens Axboed7718a92020-02-14 22:23:12 -07005379 int error;
5380};
5381
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005382#define IO_POLL_CANCEL_FLAG BIT(31)
Pavel Begunkov4b702b72022-12-02 14:27:14 +00005383#define IO_POLL_RETRY_FLAG BIT(30)
5384#define IO_POLL_REF_MASK GENMASK(29, 0)
5385
5386/*
5387 * We usually have 1-2 refs taken, 128 is more than enough and we want to
5388 * maximise the margin between this amount and the moment when it overflows.
5389 */
5390#define IO_POLL_REF_BIAS 128
5391
5392static bool io_poll_get_ownership_slowpath(struct io_kiocb *req)
5393{
5394 int v;
5395
5396 /*
5397 * poll_refs are already elevated and we don't have much hope for
5398 * grabbing the ownership. Instead of incrementing set a retry flag
5399 * to notify the loop that there might have been some change.
5400 */
5401 v = atomic_fetch_or(IO_POLL_RETRY_FLAG, &req->poll_refs);
5402 if (v & IO_POLL_REF_MASK)
5403 return false;
5404 return !(atomic_fetch_inc(&req->poll_refs) & IO_POLL_REF_MASK);
5405}
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005406
5407/*
5408 * If refs part of ->poll_refs (see IO_POLL_REF_MASK) is 0, it's free. We can
5409 * bump it and acquire ownership. It's disallowed to modify requests while not
5410 * owning it, that prevents from races for enqueueing task_work's and b/w
5411 * arming poll and wakeups.
5412 */
5413static inline bool io_poll_get_ownership(struct io_kiocb *req)
5414{
Pavel Begunkov4b702b72022-12-02 14:27:14 +00005415 if (unlikely(atomic_read(&req->poll_refs) >= IO_POLL_REF_BIAS))
5416 return io_poll_get_ownership_slowpath(req);
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005417 return !(atomic_fetch_inc(&req->poll_refs) & IO_POLL_REF_MASK);
5418}
5419
5420static void io_poll_mark_cancelled(struct io_kiocb *req)
5421{
5422 atomic_or(IO_POLL_CANCEL_FLAG, &req->poll_refs);
5423}
5424
Pavel Begunkova85d7ac2022-08-29 14:30:15 +01005425static struct io_poll_iocb *io_poll_get_double(struct io_kiocb *req)
5426{
5427 /* pure poll stashes this in ->async_data, poll driven retry elsewhere */
5428 if (req->opcode == IORING_OP_POLL_ADD)
5429 return req->async_data;
5430 return req->apoll->double_poll;
5431}
5432
5433static struct io_poll_iocb *io_poll_get_single(struct io_kiocb *req)
5434{
5435 if (req->opcode == IORING_OP_POLL_ADD)
5436 return &req->poll;
5437 return &req->apoll->poll;
5438}
5439
5440static void io_poll_req_insert(struct io_kiocb *req)
5441{
5442 struct io_ring_ctx *ctx = req->ctx;
5443 struct hlist_head *list;
5444
5445 list = &ctx->cancel_hash[hash_long(req->user_data, ctx->cancel_hash_bits)];
5446 hlist_add_head(&req->hash_node, list);
5447}
5448
5449static void io_init_poll_iocb(struct io_poll_iocb *poll, __poll_t events,
5450 wait_queue_func_t wake_func)
5451{
5452 poll->head = NULL;
Pavel Begunkova85d7ac2022-08-29 14:30:15 +01005453#define IO_POLL_UNMASK (EPOLLERR|EPOLLHUP|EPOLLNVAL|EPOLLRDHUP)
5454 /* mask in events that we always want/need */
5455 poll->events = events | IO_POLL_UNMASK;
5456 INIT_LIST_HEAD(&poll->wait.entry);
5457 init_waitqueue_func_entry(&poll->wait, wake_func);
5458}
5459
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005460static inline void io_poll_remove_entry(struct io_poll_iocb *poll)
Jens Axboed7718a92020-02-14 22:23:12 -07005461{
Pavel Begunkove9d7ca02022-08-29 14:30:24 +01005462 struct wait_queue_head *head = smp_load_acquire(&poll->head);
Jens Axboed7718a92020-02-14 22:23:12 -07005463
Pavel Begunkove9d7ca02022-08-29 14:30:24 +01005464 if (head) {
5465 spin_lock_irq(&head->lock);
5466 list_del_init(&poll->wait.entry);
5467 poll->head = NULL;
5468 spin_unlock_irq(&head->lock);
5469 }
Jens Axboed7718a92020-02-14 22:23:12 -07005470}
5471
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005472static void io_poll_remove_entries(struct io_kiocb *req)
5473{
5474 struct io_poll_iocb *poll = io_poll_get_single(req);
5475 struct io_poll_iocb *poll_double = io_poll_get_double(req);
5476
Pavel Begunkove9d7ca02022-08-29 14:30:24 +01005477 /*
5478 * While we hold the waitqueue lock and the waitqueue is nonempty,
5479 * wake_up_pollfree() will wait for us. However, taking the waitqueue
5480 * lock in the first place can race with the waitqueue being freed.
5481 *
5482 * We solve this as eventpoll does: by taking advantage of the fact that
5483 * all users of wake_up_pollfree() will RCU-delay the actual free. If
5484 * we enter rcu_read_lock() and see that the pointer to the queue is
5485 * non-NULL, we can then lock it without the memory being freed out from
5486 * under us.
5487 *
5488 * Keep holding rcu_read_lock() as long as we hold the queue lock, in
5489 * case the caller deletes the entry from the queue, leaving it empty.
5490 * In that case, only RCU prevents the queue memory from being freed.
5491 */
5492 rcu_read_lock();
5493 io_poll_remove_entry(poll);
5494 if (poll_double)
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005495 io_poll_remove_entry(poll_double);
Pavel Begunkove9d7ca02022-08-29 14:30:24 +01005496 rcu_read_unlock();
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005497}
5498
5499/*
5500 * All poll tw should go through this. Checks for poll events, manages
5501 * references, does rewait, etc.
5502 *
5503 * Returns a negative error on failure. >0 when no action require, which is
5504 * either spurious wakeup or multishot CQE is served. 0 when it's done with
5505 * the request, then the mask is stored in req->result.
5506 */
5507static int io_poll_check_events(struct io_kiocb *req)
Jens Axboe74ce6ce2020-04-13 11:09:12 -06005508{
5509 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005510 struct io_poll_iocb *poll = io_poll_get_single(req);
5511 int v;
Jens Axboe74ce6ce2020-04-13 11:09:12 -06005512
Jens Axboe316319e2021-08-19 09:41:42 -06005513 /* req->task == current here, checking PF_EXITING is safe */
Pavel Begunkove09ee512021-07-01 13:26:05 +01005514 if (unlikely(req->task->flags & PF_EXITING))
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005515 io_poll_mark_cancelled(req);
Pavel Begunkove09ee512021-07-01 13:26:05 +01005516
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005517 do {
5518 v = atomic_read(&req->poll_refs);
Jens Axboe74ce6ce2020-04-13 11:09:12 -06005519
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005520 /* tw handler should be the owner, and so have some references */
5521 if (WARN_ON_ONCE(!(v & IO_POLL_REF_MASK)))
5522 return 0;
5523 if (v & IO_POLL_CANCEL_FLAG)
5524 return -ECANCELED;
Pavel Begunkovcd1981a2022-12-02 14:27:12 +00005525 /*
5526 * cqe.res contains only events of the first wake up
5527 * and all others are be lost. Redo vfs_poll() to get
5528 * up to date state.
5529 */
5530 if ((v & IO_POLL_REF_MASK) != 1)
5531 req->result = 0;
Pavel Begunkov4b702b72022-12-02 14:27:14 +00005532 if (v & IO_POLL_RETRY_FLAG) {
5533 req->result = 0;
5534 /*
5535 * We won't find new events that came in between
5536 * vfs_poll and the ref put unless we clear the
5537 * flag in advance.
5538 */
5539 atomic_andnot(IO_POLL_RETRY_FLAG, &req->poll_refs);
5540 v &= ~IO_POLL_RETRY_FLAG;
5541 }
Jens Axboe74ce6ce2020-04-13 11:09:12 -06005542
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005543 if (!req->result) {
5544 struct poll_table_struct pt = { ._key = poll->events };
Jens Axboe74ce6ce2020-04-13 11:09:12 -06005545
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005546 req->result = vfs_poll(req->file, &pt) & poll->events;
5547 }
Jens Axboe74ce6ce2020-04-13 11:09:12 -06005548
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005549 /* multishot, just fill an CQE and proceed */
5550 if (req->result && !(poll->events & EPOLLONESHOT)) {
5551 __poll_t mask = mangle_poll(req->result & poll->events);
5552 bool filled;
Jens Axboe18bceab2020-05-15 11:56:54 -06005553
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005554 spin_lock(&ctx->completion_lock);
5555 filled = io_fill_cqe_aux(ctx, req->user_data, mask,
5556 IORING_CQE_F_MORE);
5557 io_commit_cqring(ctx);
5558 spin_unlock(&ctx->completion_lock);
5559 if (unlikely(!filled))
5560 return -ECANCELED;
5561 io_cqring_ev_posted(ctx);
5562 } else if (req->result) {
5563 return 0;
5564 }
Jens Axboe18bceab2020-05-15 11:56:54 -06005565
Pavel Begunkov62321dc2022-12-02 14:27:11 +00005566 /* force the next iteration to vfs_poll() */
5567 req->result = 0;
5568
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005569 /*
5570 * Release all references, retry if someone tried to restart
5571 * task_work while we were executing it.
5572 */
Lin Madf4b1772022-12-02 14:27:15 +00005573 } while (atomic_sub_return(v & IO_POLL_REF_MASK, &req->poll_refs) &
5574 IO_POLL_REF_MASK);
Jens Axboe18bceab2020-05-15 11:56:54 -06005575
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005576 return 1;
Jens Axboe18bceab2020-05-15 11:56:54 -06005577}
5578
Pavel Begunkovf237c302021-08-18 12:42:46 +01005579static void io_poll_task_func(struct io_kiocb *req, bool *locked)
Jens Axboe18bceab2020-05-15 11:56:54 -06005580{
Jens Axboe6d816e02020-08-11 08:04:14 -06005581 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005582 int ret;
Jens Axboe18bceab2020-05-15 11:56:54 -06005583
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005584 ret = io_poll_check_events(req);
5585 if (ret > 0)
5586 return;
5587
5588 if (!ret) {
5589 req->result = mangle_poll(req->result & req->poll.events);
Pavel Begunkovdd221f462020-10-18 10:17:42 +01005590 } else {
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005591 req->result = ret;
5592 req_set_fail(req);
Pavel Begunkovdd221f462020-10-18 10:17:42 +01005593 }
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005594
5595 io_poll_remove_entries(req);
5596 spin_lock(&ctx->completion_lock);
5597 hash_del(&req->hash_node);
5598 spin_unlock(&ctx->completion_lock);
5599 io_req_complete_post(req, req->result, 0);
Jens Axboe18bceab2020-05-15 11:56:54 -06005600}
5601
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005602static void io_apoll_task_func(struct io_kiocb *req, bool *locked)
5603{
5604 struct io_ring_ctx *ctx = req->ctx;
5605 int ret;
5606
5607 ret = io_poll_check_events(req);
5608 if (ret > 0)
5609 return;
5610
5611 io_poll_remove_entries(req);
5612 spin_lock(&ctx->completion_lock);
5613 hash_del(&req->hash_node);
5614 spin_unlock(&ctx->completion_lock);
5615
5616 if (!ret)
5617 io_req_task_submit(req, locked);
5618 else
5619 io_req_complete_failed(req, ret);
5620}
5621
5622static void __io_poll_execute(struct io_kiocb *req, int mask)
5623{
5624 req->result = mask;
5625 if (req->opcode == IORING_OP_POLL_ADD)
5626 req->io_task_work.func = io_poll_task_func;
5627 else
5628 req->io_task_work.func = io_apoll_task_func;
5629
5630 trace_io_uring_task_add(req->ctx, req->opcode, req->user_data, mask);
5631 io_req_task_work_add(req);
5632}
5633
5634static inline void io_poll_execute(struct io_kiocb *req, int res)
5635{
5636 if (io_poll_get_ownership(req))
5637 __io_poll_execute(req, res);
5638}
5639
5640static void io_poll_cancel_req(struct io_kiocb *req)
5641{
5642 io_poll_mark_cancelled(req);
5643 /* kick tw, which should complete the request */
5644 io_poll_execute(req, 0);
5645}
5646
5647static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
5648 void *key)
Jens Axboe18bceab2020-05-15 11:56:54 -06005649{
5650 struct io_kiocb *req = wait->private;
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005651 struct io_poll_iocb *poll = container_of(wait, struct io_poll_iocb,
5652 wait);
Jens Axboe18bceab2020-05-15 11:56:54 -06005653 __poll_t mask = key_to_poll(key);
5654
Pavel Begunkove9d7ca02022-08-29 14:30:24 +01005655 if (unlikely(mask & POLLFREE)) {
5656 io_poll_mark_cancelled(req);
5657 /* we have to kick tw in case it's not already */
5658 io_poll_execute(req, 0);
5659
5660 /*
5661 * If the waitqueue is being freed early but someone is already
5662 * holds ownership over it, we have to tear down the request as
5663 * best we can. That means immediately removing the request from
5664 * its waitqueue and preventing all further accesses to the
5665 * waitqueue via the request.
5666 */
5667 list_del_init(&poll->wait.entry);
5668
5669 /*
5670 * Careful: this *must* be the last step, since as soon
5671 * as req->head is NULL'ed out, the request can be
5672 * completed and freed, since aio_poll_complete_work()
5673 * will no longer need to take the waitqueue lock.
5674 */
5675 smp_store_release(&poll->head, NULL);
5676 return 1;
5677 }
5678
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005679 /* for instances that support it check for an event match first */
Jens Axboe18bceab2020-05-15 11:56:54 -06005680 if (mask && !(mask & poll->events))
5681 return 0;
5682
Jens Axboeccf06b52022-12-23 07:04:49 -07005683 if (io_poll_get_ownership(req)) {
5684 /*
5685 * If we trigger a multishot poll off our own wakeup path,
5686 * disable multishot as there is a circular dependency between
5687 * CQ posting and triggering the event.
5688 */
5689 if (mask & EPOLL_URING_WAKE)
5690 poll->events |= EPOLLONESHOT;
5691
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005692 __io_poll_execute(req, mask);
Jens Axboeccf06b52022-12-23 07:04:49 -07005693 }
Jens Axboe18bceab2020-05-15 11:56:54 -06005694 return 1;
5695}
5696
Jens Axboe18bceab2020-05-15 11:56:54 -06005697static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt,
Jens Axboe807abcb2020-07-17 17:09:27 -06005698 struct wait_queue_head *head,
5699 struct io_poll_iocb **poll_ptr)
Jens Axboe18bceab2020-05-15 11:56:54 -06005700{
5701 struct io_kiocb *req = pt->req;
5702
5703 /*
Pavel Begunkov68b11e82021-07-20 10:50:43 +01005704 * The file being polled uses multiple waitqueues for poll handling
5705 * (e.g. one for read, one for write). Setup a separate io_poll_iocb
5706 * if this happens.
Jens Axboe18bceab2020-05-15 11:56:54 -06005707 */
Pavel Begunkov68b11e82021-07-20 10:50:43 +01005708 if (unlikely(pt->nr_entries)) {
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005709 struct io_poll_iocb *first = poll;
Pavel Begunkov58852d42020-10-16 20:55:56 +01005710
Pavel Begunkov23a65db2021-08-17 20:28:11 +01005711 /* double add on the same waitqueue head, ignore */
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005712 if (first->head == head)
Pavel Begunkov23a65db2021-08-17 20:28:11 +01005713 return;
Jens Axboe18bceab2020-05-15 11:56:54 -06005714 /* already have a 2nd entry, fail a third attempt */
Jens Axboe807abcb2020-07-17 17:09:27 -06005715 if (*poll_ptr) {
Pavel Begunkov23a65db2021-08-17 20:28:11 +01005716 if ((*poll_ptr)->head == head)
5717 return;
Jens Axboe18bceab2020-05-15 11:56:54 -06005718 pt->error = -EINVAL;
5719 return;
5720 }
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005721
Jens Axboe18bceab2020-05-15 11:56:54 -06005722 poll = kmalloc(sizeof(*poll), GFP_ATOMIC);
5723 if (!poll) {
5724 pt->error = -ENOMEM;
5725 return;
5726 }
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005727 io_init_poll_iocb(poll, first->events, first->wait.func);
Jens Axboe807abcb2020-07-17 17:09:27 -06005728 *poll_ptr = poll;
Jens Axboe18bceab2020-05-15 11:56:54 -06005729 }
5730
Pavel Begunkov68b11e82021-07-20 10:50:43 +01005731 pt->nr_entries++;
Jens Axboe18bceab2020-05-15 11:56:54 -06005732 poll->head = head;
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005733 poll->wait.private = req;
Jiufei Xuea31eb4a2020-06-17 17:53:56 +08005734
5735 if (poll->events & EPOLLEXCLUSIVE)
5736 add_wait_queue_exclusive(head, &poll->wait);
5737 else
5738 add_wait_queue(head, &poll->wait);
Jens Axboe18bceab2020-05-15 11:56:54 -06005739}
5740
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005741static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head,
5742 struct poll_table_struct *p)
5743{
5744 struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
5745
5746 __io_queue_proc(&pt->req->poll, pt, head,
5747 (struct io_poll_iocb **) &pt->req->async_data);
5748}
5749
5750static int __io_arm_poll_handler(struct io_kiocb *req,
5751 struct io_poll_iocb *poll,
5752 struct io_poll_table *ipt, __poll_t mask)
5753{
5754 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005755
5756 INIT_HLIST_NODE(&req->hash_node);
5757 io_init_poll_iocb(poll, mask, io_poll_wake);
5758 poll->file = req->file;
5759 poll->wait.private = req;
5760
5761 ipt->pt._key = mask;
5762 ipt->req = req;
5763 ipt->error = 0;
5764 ipt->nr_entries = 0;
5765
5766 /*
5767 * Take the ownership to delay any tw execution up until we're done
5768 * with poll arming. see io_poll_get_ownership().
5769 */
5770 atomic_set(&req->poll_refs, 1);
5771 mask = vfs_poll(req->file, &ipt->pt) & poll->events;
5772
5773 if (mask && (poll->events & EPOLLONESHOT)) {
5774 io_poll_remove_entries(req);
5775 /* no one else has access to the req, forget about the ref */
5776 return mask;
5777 }
5778 if (!mask && unlikely(ipt->error || !ipt->nr_entries)) {
5779 io_poll_remove_entries(req);
5780 if (!ipt->error)
5781 ipt->error = -EINVAL;
5782 return 0;
5783 }
5784
5785 spin_lock(&ctx->completion_lock);
5786 io_poll_req_insert(req);
5787 spin_unlock(&ctx->completion_lock);
5788
5789 if (mask) {
5790 /* can't multishot if failed, just queue the event we've got */
Pavel Begunkov182dc3a2022-08-29 14:30:23 +01005791 if (unlikely(ipt->error || !ipt->nr_entries)) {
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005792 poll->events |= EPOLLONESHOT;
Pavel Begunkov182dc3a2022-08-29 14:30:23 +01005793 ipt->error = 0;
5794 }
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005795 __io_poll_execute(req, mask);
5796 return 0;
5797 }
5798
5799 /*
Pavel Begunkov1d588492022-12-02 14:27:13 +00005800 * Try to release ownership. If we see a change of state, e.g.
5801 * poll was waken up, queue up a tw, it'll deal with it.
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005802 */
Pavel Begunkov1d588492022-12-02 14:27:13 +00005803 if (atomic_cmpxchg(&req->poll_refs, 1, 0) != 1)
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005804 __io_poll_execute(req, 0);
5805 return 0;
5806}
5807
Jens Axboe18bceab2020-05-15 11:56:54 -06005808static void io_async_queue_proc(struct file *file, struct wait_queue_head *head,
5809 struct poll_table_struct *p)
5810{
5811 struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
Jens Axboe807abcb2020-07-17 17:09:27 -06005812 struct async_poll *apoll = pt->req->apoll;
Jens Axboe18bceab2020-05-15 11:56:54 -06005813
Jens Axboe807abcb2020-07-17 17:09:27 -06005814 __io_queue_proc(&apoll->poll, pt, head, &apoll->double_poll);
Jens Axboe18bceab2020-05-15 11:56:54 -06005815}
5816
Olivier Langlois59b735a2021-06-22 05:17:39 -07005817enum {
5818 IO_APOLL_OK,
5819 IO_APOLL_ABORTED,
5820 IO_APOLL_READY
5821};
5822
5823static int io_arm_poll_handler(struct io_kiocb *req)
Jens Axboed7718a92020-02-14 22:23:12 -07005824{
5825 const struct io_op_def *def = &io_op_defs[req->opcode];
5826 struct io_ring_ctx *ctx = req->ctx;
5827 struct async_poll *apoll;
5828 struct io_poll_table ipt;
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005829 __poll_t mask = EPOLLONESHOT | POLLERR | POLLPRI;
5830 int ret;
Jens Axboed7718a92020-02-14 22:23:12 -07005831
5832 if (!req->file || !file_can_poll(req->file))
Olivier Langlois59b735a2021-06-22 05:17:39 -07005833 return IO_APOLL_ABORTED;
Pavel Begunkov24c74672020-06-21 13:09:51 +03005834 if (req->flags & REQ_F_POLLED)
Olivier Langlois59b735a2021-06-22 05:17:39 -07005835 return IO_APOLL_ABORTED;
Pavel Begunkovb2d9c3d2021-06-26 21:40:44 +01005836 if (!def->pollin && !def->pollout)
Olivier Langlois59b735a2021-06-22 05:17:39 -07005837 return IO_APOLL_ABORTED;
Pavel Begunkovb2d9c3d2021-06-26 21:40:44 +01005838
5839 if (def->pollin) {
Pavel Begunkovb2d9c3d2021-06-26 21:40:44 +01005840 mask |= POLLIN | POLLRDNORM;
5841
5842 /* If reading from MSG_ERRQUEUE using recvmsg, ignore POLLIN */
5843 if ((req->opcode == IORING_OP_RECVMSG) &&
5844 (req->sr_msg.msg_flags & MSG_ERRQUEUE))
5845 mask &= ~POLLIN;
5846 } else {
Pavel Begunkovb2d9c3d2021-06-26 21:40:44 +01005847 mask |= POLLOUT | POLLWRNORM;
5848 }
5849
Jens Axboed7718a92020-02-14 22:23:12 -07005850 apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC);
5851 if (unlikely(!apoll))
Olivier Langlois59b735a2021-06-22 05:17:39 -07005852 return IO_APOLL_ABORTED;
Jens Axboe807abcb2020-07-17 17:09:27 -06005853 apoll->double_poll = NULL;
Jens Axboed7718a92020-02-14 22:23:12 -07005854 req->apoll = apoll;
Pavel Begunkovb2d9c3d2021-06-26 21:40:44 +01005855 req->flags |= REQ_F_POLLED;
Jens Axboed7718a92020-02-14 22:23:12 -07005856 ipt.pt._qproc = io_async_queue_proc;
5857
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005858 ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask);
Hao Xu41a51692021-08-12 15:47:02 +08005859 if (ret || ipt.error)
5860 return ret ? IO_APOLL_READY : IO_APOLL_ABORTED;
5861
Olivier Langlois236daeae2021-05-31 02:36:37 -04005862 trace_io_uring_poll_arm(ctx, req, req->opcode, req->user_data,
5863 mask, apoll->poll.events);
Olivier Langlois59b735a2021-06-22 05:17:39 -07005864 return IO_APOLL_OK;
Jens Axboed7718a92020-02-14 22:23:12 -07005865}
5866
Jens Axboe76e1b642020-09-26 15:05:03 -06005867/*
5868 * Returns true if we found and killed one or more poll requests
5869 */
Pavel Begunkov6b819282020-11-06 13:00:25 +00005870static bool io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk,
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01005871 bool cancel_all)
Jens Axboe221c5eb2019-01-17 09:41:58 -07005872{
Jens Axboe78076bb2019-12-04 19:56:40 -07005873 struct hlist_node *tmp;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005874 struct io_kiocb *req;
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005875 bool found = false;
5876 int i;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005877
Jens Axboe79ebeae2021-08-10 15:18:27 -06005878 spin_lock(&ctx->completion_lock);
Jens Axboe78076bb2019-12-04 19:56:40 -07005879 for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
5880 struct hlist_head *list;
5881
5882 list = &ctx->cancel_hash[i];
Jens Axboef3606e32020-09-22 08:18:24 -06005883 hlist_for_each_entry_safe(req, tmp, list, hash_node) {
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005884 if (io_match_task_safe(req, tsk, cancel_all)) {
Jens Axboe7524ec52022-08-29 14:30:20 +01005885 hlist_del_init(&req->hash_node);
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005886 io_poll_cancel_req(req);
5887 found = true;
5888 }
Jens Axboef3606e32020-09-22 08:18:24 -06005889 }
Jens Axboe221c5eb2019-01-17 09:41:58 -07005890 }
Jens Axboe79ebeae2021-08-10 15:18:27 -06005891 spin_unlock(&ctx->completion_lock);
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005892 return found;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005893}
5894
Pavel Begunkov9ba5fac2021-04-14 13:38:35 +01005895static struct io_kiocb *io_poll_find(struct io_ring_ctx *ctx, __u64 sqe_addr,
5896 bool poll_only)
Pavel Begunkove07785b2021-04-01 15:43:57 +01005897 __must_hold(&ctx->completion_lock)
Jens Axboe47f46762019-11-09 17:43:02 -07005898{
Jens Axboe78076bb2019-12-04 19:56:40 -07005899 struct hlist_head *list;
Jens Axboe47f46762019-11-09 17:43:02 -07005900 struct io_kiocb *req;
5901
Jens Axboe78076bb2019-12-04 19:56:40 -07005902 list = &ctx->cancel_hash[hash_long(sqe_addr, ctx->cancel_hash_bits)];
5903 hlist_for_each_entry(req, list, hash_node) {
Jens Axboeb41e9852020-02-17 09:52:41 -07005904 if (sqe_addr != req->user_data)
5905 continue;
Pavel Begunkov9ba5fac2021-04-14 13:38:35 +01005906 if (poll_only && req->opcode != IORING_OP_POLL_ADD)
5907 continue;
Jens Axboeb2cb8052021-03-17 08:17:19 -06005908 return req;
Jens Axboe47f46762019-11-09 17:43:02 -07005909 }
Jens Axboeb2cb8052021-03-17 08:17:19 -06005910 return NULL;
Jens Axboe47f46762019-11-09 17:43:02 -07005911}
5912
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005913static bool io_poll_disarm(struct io_kiocb *req)
5914 __must_hold(&ctx->completion_lock)
5915{
5916 if (!io_poll_get_ownership(req))
5917 return false;
5918 io_poll_remove_entries(req);
5919 hash_del(&req->hash_node);
5920 return true;
5921}
5922
Pavel Begunkov9ba5fac2021-04-14 13:38:35 +01005923static int io_poll_cancel(struct io_ring_ctx *ctx, __u64 sqe_addr,
5924 bool poll_only)
Pavel Begunkove07785b2021-04-01 15:43:57 +01005925 __must_hold(&ctx->completion_lock)
Jens Axboeb2cb8052021-03-17 08:17:19 -06005926{
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005927 struct io_kiocb *req = io_poll_find(ctx, sqe_addr, poll_only);
Jens Axboeb2cb8052021-03-17 08:17:19 -06005928
Jens Axboeb2cb8052021-03-17 08:17:19 -06005929 if (!req)
5930 return -ENOENT;
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005931 io_poll_cancel_req(req);
5932 return 0;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005933}
5934
Pavel Begunkov9096af32021-04-14 13:38:36 +01005935static __poll_t io_poll_parse_events(const struct io_uring_sqe *sqe,
5936 unsigned int flags)
5937{
5938 u32 events;
5939
5940 events = READ_ONCE(sqe->poll32_events);
5941#ifdef __BIG_ENDIAN
5942 events = swahw32(events);
5943#endif
5944 if (!(flags & IORING_POLL_ADD_MULTI))
5945 events |= EPOLLONESHOT;
5946 return demangle_poll(events) | (events & (EPOLLEXCLUSIVE|EPOLLONESHOT));
5947}
5948
Pavel Begunkovc5de0032021-04-14 13:38:37 +01005949static int io_poll_update_prep(struct io_kiocb *req,
Jens Axboe3529d8c2019-12-19 18:24:38 -07005950 const struct io_uring_sqe *sqe)
Jens Axboe221c5eb2019-01-17 09:41:58 -07005951{
Pavel Begunkovc5de0032021-04-14 13:38:37 +01005952 struct io_poll_update *upd = &req->poll_update;
5953 u32 flags;
5954
Jens Axboe221c5eb2019-01-17 09:41:58 -07005955 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
5956 return -EINVAL;
Pavel Begunkov26578cd2021-08-20 10:36:37 +01005957 if (sqe->ioprio || sqe->buf_index || sqe->splice_fd_in)
Pavel Begunkovc5de0032021-04-14 13:38:37 +01005958 return -EINVAL;
5959 flags = READ_ONCE(sqe->len);
5960 if (flags & ~(IORING_POLL_UPDATE_EVENTS | IORING_POLL_UPDATE_USER_DATA |
5961 IORING_POLL_ADD_MULTI))
5962 return -EINVAL;
5963 /* meaningless without update */
5964 if (flags == IORING_POLL_ADD_MULTI)
Jens Axboe221c5eb2019-01-17 09:41:58 -07005965 return -EINVAL;
5966
Pavel Begunkovc5de0032021-04-14 13:38:37 +01005967 upd->old_user_data = READ_ONCE(sqe->addr);
5968 upd->update_events = flags & IORING_POLL_UPDATE_EVENTS;
5969 upd->update_user_data = flags & IORING_POLL_UPDATE_USER_DATA;
Jens Axboe0969e782019-12-17 18:40:57 -07005970
Pavel Begunkovc5de0032021-04-14 13:38:37 +01005971 upd->new_user_data = READ_ONCE(sqe->off);
5972 if (!upd->update_user_data && upd->new_user_data)
5973 return -EINVAL;
5974 if (upd->update_events)
5975 upd->events = io_poll_parse_events(sqe, flags);
5976 else if (sqe->poll32_events)
5977 return -EINVAL;
Jens Axboe0969e782019-12-17 18:40:57 -07005978
Jens Axboe221c5eb2019-01-17 09:41:58 -07005979 return 0;
5980}
5981
Jens Axboe3529d8c2019-12-19 18:24:38 -07005982static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboe221c5eb2019-01-17 09:41:58 -07005983{
5984 struct io_poll_iocb *poll = &req->poll;
Pavel Begunkovc5de0032021-04-14 13:38:37 +01005985 u32 flags;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005986
5987 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
5988 return -EINVAL;
Pavel Begunkovc5de0032021-04-14 13:38:37 +01005989 if (sqe->ioprio || sqe->buf_index || sqe->off || sqe->addr)
Jens Axboe88e41cf2021-02-22 22:08:01 -07005990 return -EINVAL;
5991 flags = READ_ONCE(sqe->len);
Pavel Begunkovc5de0032021-04-14 13:38:37 +01005992 if (flags & ~IORING_POLL_ADD_MULTI)
Jens Axboe221c5eb2019-01-17 09:41:58 -07005993 return -EINVAL;
5994
Pavel Begunkov48dcd382021-08-15 10:40:18 +01005995 io_req_set_refcount(req);
Pavel Begunkovc5de0032021-04-14 13:38:37 +01005996 poll->events = io_poll_parse_events(sqe, flags);
Jens Axboe0969e782019-12-17 18:40:57 -07005997 return 0;
5998}
5999
Pavel Begunkov61e98202021-02-10 00:03:08 +00006000static int io_poll_add(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe0969e782019-12-17 18:40:57 -07006001{
6002 struct io_poll_iocb *poll = &req->poll;
Jens Axboe0969e782019-12-17 18:40:57 -07006003 struct io_poll_table ipt;
Pavel Begunkovf770fba2022-08-29 14:30:18 +01006004 int ret;
Jens Axboe0969e782019-12-17 18:40:57 -07006005
Jens Axboed7718a92020-02-14 22:23:12 -07006006 ipt.pt._qproc = io_poll_queue_proc;
Jens Axboe36703242019-07-25 10:20:18 -06006007
Pavel Begunkovf770fba2022-08-29 14:30:18 +01006008 ret = __io_arm_poll_handler(req, &req->poll, &ipt, poll->events);
Pavel Begunkov6c7259c2022-08-29 14:30:22 +01006009 if (!ret && ipt.error)
6010 req_set_fail(req);
Pavel Begunkovf770fba2022-08-29 14:30:18 +01006011 ret = ret ?: ipt.error;
6012 if (ret)
6013 __io_req_complete(req, issue_flags, ret, 0);
6014 return 0;
Jens Axboe221c5eb2019-01-17 09:41:58 -07006015}
6016
Pavel Begunkovc5de0032021-04-14 13:38:37 +01006017static int io_poll_update(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboeb69de282021-03-17 08:37:41 -06006018{
6019 struct io_ring_ctx *ctx = req->ctx;
6020 struct io_kiocb *preq;
Pavel Begunkov040e58f2022-08-29 14:30:14 +01006021 int ret2, ret = 0;
Jens Axboeb69de282021-03-17 08:37:41 -06006022
Jens Axboe79ebeae2021-08-10 15:18:27 -06006023 spin_lock(&ctx->completion_lock);
Pavel Begunkov9ba5fac2021-04-14 13:38:35 +01006024 preq = io_poll_find(ctx, req->poll_update.old_user_data, true);
Pavel Begunkovf770fba2022-08-29 14:30:18 +01006025 if (!preq || !io_poll_disarm(preq)) {
Pavel Begunkov040e58f2022-08-29 14:30:14 +01006026 spin_unlock(&ctx->completion_lock);
Pavel Begunkovf770fba2022-08-29 14:30:18 +01006027 ret = preq ? -EALREADY : -ENOENT;
Pavel Begunkov040e58f2022-08-29 14:30:14 +01006028 goto out;
Jens Axboeb69de282021-03-17 08:37:41 -06006029 }
Jens Axboe79ebeae2021-08-10 15:18:27 -06006030 spin_unlock(&ctx->completion_lock);
Jens Axboecb3b200e2021-04-06 09:49:31 -06006031
Pavel Begunkov040e58f2022-08-29 14:30:14 +01006032 if (req->poll_update.update_events || req->poll_update.update_user_data) {
6033 /* only mask one event flags, keep behavior flags */
6034 if (req->poll_update.update_events) {
6035 preq->poll.events &= ~0xffff;
6036 preq->poll.events |= req->poll_update.events & 0xffff;
6037 preq->poll.events |= IO_POLL_UNMASK;
6038 }
6039 if (req->poll_update.update_user_data)
6040 preq->user_data = req->poll_update.new_user_data;
6041
6042 ret2 = io_poll_add(preq, issue_flags);
6043 /* successfully updated, don't complete poll request */
6044 if (!ret2)
6045 goto out;
6046 }
6047 req_set_fail(preq);
6048 io_req_complete(preq, -ECANCELED);
6049out:
6050 if (ret < 0)
6051 req_set_fail(req);
Jens Axboeb69de282021-03-17 08:37:41 -06006052 /* complete update request, we're done with it */
6053 io_req_complete(req, ret);
Jens Axboeb69de282021-03-17 08:37:41 -06006054 return 0;
6055}
6056
Pavel Begunkovf237c302021-08-18 12:42:46 +01006057static void io_req_task_timeout(struct io_kiocb *req, bool *locked)
Jens Axboe89850fc2021-08-10 15:11:51 -06006058{
Jens Axboe89850fc2021-08-10 15:11:51 -06006059 req_set_fail(req);
Pavel Begunkov505657b2021-08-17 20:28:09 +01006060 io_req_complete_post(req, -ETIME, 0);
Jens Axboe89850fc2021-08-10 15:11:51 -06006061}
6062
Jens Axboe5262f562019-09-17 12:26:57 -06006063static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
6064{
Jens Axboead8a48a2019-11-15 08:49:11 -07006065 struct io_timeout_data *data = container_of(timer,
6066 struct io_timeout_data, timer);
6067 struct io_kiocb *req = data->req;
6068 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe5262f562019-09-17 12:26:57 -06006069 unsigned long flags;
6070
Jens Axboe89850fc2021-08-10 15:11:51 -06006071 spin_lock_irqsave(&ctx->timeout_lock, flags);
Pavel Begunkova71976f2020-10-10 18:34:11 +01006072 list_del_init(&req->timeout.list);
Pavel Begunkov01cec8c2020-07-30 18:43:50 +03006073 atomic_set(&req->ctx->cq_timeouts,
6074 atomic_read(&req->ctx->cq_timeouts) + 1);
Jens Axboe89850fc2021-08-10 15:11:51 -06006075 spin_unlock_irqrestore(&ctx->timeout_lock, flags);
Pavel Begunkov01cec8c2020-07-30 18:43:50 +03006076
Jens Axboe89850fc2021-08-10 15:11:51 -06006077 req->io_task_work.func = io_req_task_timeout;
6078 io_req_task_work_add(req);
Jens Axboe5262f562019-09-17 12:26:57 -06006079 return HRTIMER_NORESTART;
6080}
6081
Pavel Begunkovfbd15842020-11-30 19:11:15 +00006082static struct io_kiocb *io_timeout_extract(struct io_ring_ctx *ctx,
6083 __u64 user_data)
Jens Axboe89850fc2021-08-10 15:11:51 -06006084 __must_hold(&ctx->timeout_lock)
Jens Axboe47f46762019-11-09 17:43:02 -07006085{
Pavel Begunkovfbd15842020-11-30 19:11:15 +00006086 struct io_timeout_data *io;
Jens Axboef254ac02020-08-12 17:33:30 -06006087 struct io_kiocb *req;
Pavel Begunkovfd9c7bc2021-04-13 02:58:42 +01006088 bool found = false;
Jens Axboef254ac02020-08-12 17:33:30 -06006089
6090 list_for_each_entry(req, &ctx->timeout_list, timeout.list) {
Pavel Begunkovfd9c7bc2021-04-13 02:58:42 +01006091 found = user_data == req->user_data;
6092 if (found)
Jens Axboef254ac02020-08-12 17:33:30 -06006093 break;
Jens Axboef254ac02020-08-12 17:33:30 -06006094 }
Pavel Begunkovfd9c7bc2021-04-13 02:58:42 +01006095 if (!found)
6096 return ERR_PTR(-ENOENT);
Jens Axboef254ac02020-08-12 17:33:30 -06006097
Pavel Begunkovfbd15842020-11-30 19:11:15 +00006098 io = req->async_data;
Pavel Begunkovfd9c7bc2021-04-13 02:58:42 +01006099 if (hrtimer_try_to_cancel(&io->timer) == -1)
Pavel Begunkovfbd15842020-11-30 19:11:15 +00006100 return ERR_PTR(-EALREADY);
6101 list_del_init(&req->timeout.list);
6102 return req;
6103}
6104
6105static int io_timeout_cancel(struct io_ring_ctx *ctx, __u64 user_data)
Pavel Begunkovec3c3d02021-08-18 10:50:52 +01006106 __must_hold(&ctx->completion_lock)
Jens Axboe89850fc2021-08-10 15:11:51 -06006107 __must_hold(&ctx->timeout_lock)
Pavel Begunkovfbd15842020-11-30 19:11:15 +00006108{
6109 struct io_kiocb *req = io_timeout_extract(ctx, user_data);
6110
6111 if (IS_ERR(req))
6112 return PTR_ERR(req);
6113
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01006114 req_set_fail(req);
Pavel Begunkovb850d6d2022-08-29 14:30:13 +01006115 io_fill_cqe_req(req, -ECANCELED, 0);
Pavel Begunkov91c2f692021-08-11 19:28:28 +01006116 io_put_req_deferred(req);
Pavel Begunkovfbd15842020-11-30 19:11:15 +00006117 return 0;
Jens Axboef254ac02020-08-12 17:33:30 -06006118}
6119
Jens Axboe50c1df22021-08-27 17:11:06 -06006120static clockid_t io_timeout_get_clock(struct io_timeout_data *data)
6121{
6122 switch (data->flags & IORING_TIMEOUT_CLOCK_MASK) {
6123 case IORING_TIMEOUT_BOOTTIME:
6124 return CLOCK_BOOTTIME;
6125 case IORING_TIMEOUT_REALTIME:
6126 return CLOCK_REALTIME;
6127 default:
6128 /* can't happen, vetted at prep time */
6129 WARN_ON_ONCE(1);
6130 fallthrough;
6131 case 0:
6132 return CLOCK_MONOTONIC;
6133 }
6134}
6135
Pavel Begunkovf1042b62021-08-28 19:54:39 -06006136static int io_linked_timeout_update(struct io_ring_ctx *ctx, __u64 user_data,
6137 struct timespec64 *ts, enum hrtimer_mode mode)
6138 __must_hold(&ctx->timeout_lock)
6139{
6140 struct io_timeout_data *io;
6141 struct io_kiocb *req;
6142 bool found = false;
6143
6144 list_for_each_entry(req, &ctx->ltimeout_list, timeout.list) {
6145 found = user_data == req->user_data;
6146 if (found)
6147 break;
6148 }
6149 if (!found)
6150 return -ENOENT;
6151
6152 io = req->async_data;
6153 if (hrtimer_try_to_cancel(&io->timer) == -1)
6154 return -EALREADY;
6155 hrtimer_init(&io->timer, io_timeout_get_clock(io), mode);
6156 io->timer.function = io_link_timeout_fn;
6157 hrtimer_start(&io->timer, timespec64_to_ktime(*ts), mode);
6158 return 0;
6159}
6160
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +00006161static int io_timeout_update(struct io_ring_ctx *ctx, __u64 user_data,
6162 struct timespec64 *ts, enum hrtimer_mode mode)
Jens Axboe89850fc2021-08-10 15:11:51 -06006163 __must_hold(&ctx->timeout_lock)
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +00006164{
6165 struct io_kiocb *req = io_timeout_extract(ctx, user_data);
6166 struct io_timeout_data *data;
6167
6168 if (IS_ERR(req))
6169 return PTR_ERR(req);
6170
6171 req->timeout.off = 0; /* noseq */
6172 data = req->async_data;
6173 list_add_tail(&req->timeout.list, &ctx->timeout_list);
Jens Axboe50c1df22021-08-27 17:11:06 -06006174 hrtimer_init(&data->timer, io_timeout_get_clock(data), mode);
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +00006175 data->timer.function = io_timeout_fn;
6176 hrtimer_start(&data->timer, timespec64_to_ktime(*ts), mode);
6177 return 0;
Jens Axboe47f46762019-11-09 17:43:02 -07006178}
6179
Jens Axboe3529d8c2019-12-19 18:24:38 -07006180static int io_timeout_remove_prep(struct io_kiocb *req,
6181 const struct io_uring_sqe *sqe)
Jens Axboeb29472e2019-12-17 18:50:29 -07006182{
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +00006183 struct io_timeout_rem *tr = &req->timeout_rem;
6184
Jens Axboeb29472e2019-12-17 18:50:29 -07006185 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
6186 return -EINVAL;
Daniele Albano61710e42020-07-18 14:15:16 -06006187 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
6188 return -EINVAL;
Pavel Begunkov26578cd2021-08-20 10:36:37 +01006189 if (sqe->ioprio || sqe->buf_index || sqe->len || sqe->splice_fd_in)
Jens Axboeb29472e2019-12-17 18:50:29 -07006190 return -EINVAL;
6191
Pavel Begunkovf1042b62021-08-28 19:54:39 -06006192 tr->ltimeout = false;
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +00006193 tr->addr = READ_ONCE(sqe->addr);
6194 tr->flags = READ_ONCE(sqe->timeout_flags);
Pavel Begunkovf1042b62021-08-28 19:54:39 -06006195 if (tr->flags & IORING_TIMEOUT_UPDATE_MASK) {
6196 if (hweight32(tr->flags & IORING_TIMEOUT_CLOCK_MASK) > 1)
6197 return -EINVAL;
6198 if (tr->flags & IORING_LINK_TIMEOUT_UPDATE)
6199 tr->ltimeout = true;
6200 if (tr->flags & ~(IORING_TIMEOUT_UPDATE_MASK|IORING_TIMEOUT_ABS))
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +00006201 return -EINVAL;
6202 if (get_timespec64(&tr->ts, u64_to_user_ptr(sqe->addr2)))
6203 return -EFAULT;
6204 } else if (tr->flags) {
6205 /* timeout removal doesn't support flags */
6206 return -EINVAL;
6207 }
6208
Jens Axboeb29472e2019-12-17 18:50:29 -07006209 return 0;
6210}
6211
Pavel Begunkov8662dae2021-01-19 13:32:44 +00006212static inline enum hrtimer_mode io_translate_timeout_mode(unsigned int flags)
6213{
6214 return (flags & IORING_TIMEOUT_ABS) ? HRTIMER_MODE_ABS
6215 : HRTIMER_MODE_REL;
6216}
6217
Jens Axboe11365042019-10-16 09:08:32 -06006218/*
6219 * Remove or update an existing timeout command
6220 */
Pavel Begunkov61e98202021-02-10 00:03:08 +00006221static int io_timeout_remove(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe11365042019-10-16 09:08:32 -06006222{
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +00006223 struct io_timeout_rem *tr = &req->timeout_rem;
Jens Axboe11365042019-10-16 09:08:32 -06006224 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe47f46762019-11-09 17:43:02 -07006225 int ret;
Jens Axboe11365042019-10-16 09:08:32 -06006226
Pavel Begunkovec3c3d02021-08-18 10:50:52 +01006227 if (!(req->timeout_rem.flags & IORING_TIMEOUT_UPDATE)) {
6228 spin_lock(&ctx->completion_lock);
6229 spin_lock_irq(&ctx->timeout_lock);
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +00006230 ret = io_timeout_cancel(ctx, tr->addr);
Pavel Begunkovec3c3d02021-08-18 10:50:52 +01006231 spin_unlock_irq(&ctx->timeout_lock);
6232 spin_unlock(&ctx->completion_lock);
6233 } else {
Pavel Begunkovf1042b62021-08-28 19:54:39 -06006234 enum hrtimer_mode mode = io_translate_timeout_mode(tr->flags);
6235
Pavel Begunkovec3c3d02021-08-18 10:50:52 +01006236 spin_lock_irq(&ctx->timeout_lock);
Pavel Begunkovf1042b62021-08-28 19:54:39 -06006237 if (tr->ltimeout)
6238 ret = io_linked_timeout_update(ctx, tr->addr, &tr->ts, mode);
6239 else
6240 ret = io_timeout_update(ctx, tr->addr, &tr->ts, mode);
Pavel Begunkovec3c3d02021-08-18 10:50:52 +01006241 spin_unlock_irq(&ctx->timeout_lock);
6242 }
Jens Axboe11365042019-10-16 09:08:32 -06006243
Jens Axboe4e88d6e2019-12-07 20:59:47 -07006244 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01006245 req_set_fail(req);
Pavel Begunkov505657b2021-08-17 20:28:09 +01006246 io_req_complete_post(req, ret, 0);
Jens Axboe11365042019-10-16 09:08:32 -06006247 return 0;
Jens Axboe5262f562019-09-17 12:26:57 -06006248}
6249
Jens Axboe3529d8c2019-12-19 18:24:38 -07006250static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
Jens Axboe2d283902019-12-04 11:08:05 -07006251 bool is_timeout_link)
Jens Axboe5262f562019-09-17 12:26:57 -06006252{
Jens Axboead8a48a2019-11-15 08:49:11 -07006253 struct io_timeout_data *data;
Jens Axboea41525a2019-10-15 16:48:15 -06006254 unsigned flags;
Pavel Begunkov56080b02020-05-26 20:34:04 +03006255 u32 off = READ_ONCE(sqe->off);
Jens Axboe5262f562019-09-17 12:26:57 -06006256
Jens Axboead8a48a2019-11-15 08:49:11 -07006257 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboe5262f562019-09-17 12:26:57 -06006258 return -EINVAL;
Pavel Begunkov26578cd2021-08-20 10:36:37 +01006259 if (sqe->ioprio || sqe->buf_index || sqe->len != 1 ||
6260 sqe->splice_fd_in)
Jens Axboea41525a2019-10-15 16:48:15 -06006261 return -EINVAL;
Pavel Begunkov56080b02020-05-26 20:34:04 +03006262 if (off && is_timeout_link)
Jens Axboe2d283902019-12-04 11:08:05 -07006263 return -EINVAL;
Jens Axboea41525a2019-10-15 16:48:15 -06006264 flags = READ_ONCE(sqe->timeout_flags);
Jens Axboe50c1df22021-08-27 17:11:06 -06006265 if (flags & ~(IORING_TIMEOUT_ABS | IORING_TIMEOUT_CLOCK_MASK))
6266 return -EINVAL;
6267 /* more than one clock specified is invalid, obviously */
6268 if (hweight32(flags & IORING_TIMEOUT_CLOCK_MASK) > 1)
Jens Axboe5262f562019-09-17 12:26:57 -06006269 return -EINVAL;
Arnd Bergmannbdf20072019-10-01 09:53:29 -06006270
Pavel Begunkovef9dd632021-08-28 19:54:38 -06006271 INIT_LIST_HEAD(&req->timeout.list);
Pavel Begunkovbfe68a22020-05-30 14:54:18 +03006272 req->timeout.off = off;
Pavel Begunkovf18ee4c2021-06-14 23:37:25 +01006273 if (unlikely(off && !req->ctx->off_timeout_used))
6274 req->ctx->off_timeout_used = true;
Jens Axboe26a61672019-12-20 09:02:01 -07006275
Jens Axboee8c2bc12020-08-15 18:44:09 -07006276 if (!req->async_data && io_alloc_async_data(req))
Jens Axboe26a61672019-12-20 09:02:01 -07006277 return -ENOMEM;
6278
Jens Axboee8c2bc12020-08-15 18:44:09 -07006279 data = req->async_data;
Jens Axboead8a48a2019-11-15 08:49:11 -07006280 data->req = req;
Jens Axboe50c1df22021-08-27 17:11:06 -06006281 data->flags = flags;
Jens Axboead8a48a2019-11-15 08:49:11 -07006282
6283 if (get_timespec64(&data->ts, u64_to_user_ptr(sqe->addr)))
Jens Axboe5262f562019-09-17 12:26:57 -06006284 return -EFAULT;
6285
Jens Axboeba7261a2022-04-08 11:08:58 -06006286 INIT_LIST_HEAD(&req->timeout.list);
Pavel Begunkov8662dae2021-01-19 13:32:44 +00006287 data->mode = io_translate_timeout_mode(flags);
Jens Axboe50c1df22021-08-27 17:11:06 -06006288 hrtimer_init(&data->timer, io_timeout_get_clock(data), data->mode);
Pavel Begunkovb97e7362021-08-15 10:40:23 +01006289
6290 if (is_timeout_link) {
6291 struct io_submit_link *link = &req->ctx->submit_state.link;
6292
6293 if (!link->head)
6294 return -EINVAL;
6295 if (link->last->opcode == IORING_OP_LINK_TIMEOUT)
6296 return -EINVAL;
Pavel Begunkov4d13d1a2021-08-15 10:40:24 +01006297 req->timeout.head = link->last;
6298 link->last->flags |= REQ_F_ARM_LTIMEOUT;
Pavel Begunkovb97e7362021-08-15 10:40:23 +01006299 }
Jens Axboead8a48a2019-11-15 08:49:11 -07006300 return 0;
6301}
6302
Pavel Begunkov61e98202021-02-10 00:03:08 +00006303static int io_timeout(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboead8a48a2019-11-15 08:49:11 -07006304{
Jens Axboead8a48a2019-11-15 08:49:11 -07006305 struct io_ring_ctx *ctx = req->ctx;
Jens Axboee8c2bc12020-08-15 18:44:09 -07006306 struct io_timeout_data *data = req->async_data;
Jens Axboead8a48a2019-11-15 08:49:11 -07006307 struct list_head *entry;
Pavel Begunkovbfe68a22020-05-30 14:54:18 +03006308 u32 tail, off = req->timeout.off;
Jens Axboead8a48a2019-11-15 08:49:11 -07006309
Jens Axboe89850fc2021-08-10 15:11:51 -06006310 spin_lock_irq(&ctx->timeout_lock);
Jens Axboe93bd25b2019-11-11 23:34:31 -07006311
Jens Axboe5262f562019-09-17 12:26:57 -06006312 /*
6313 * sqe->off holds how many events that need to occur for this
Jens Axboe93bd25b2019-11-11 23:34:31 -07006314 * timeout event to be satisfied. If it isn't set, then this is
6315 * a pure timeout request, sequence isn't used.
Jens Axboe5262f562019-09-17 12:26:57 -06006316 */
Pavel Begunkov8eb7e2d2020-06-29 13:13:02 +03006317 if (io_is_timeout_noseq(req)) {
Jens Axboe93bd25b2019-11-11 23:34:31 -07006318 entry = ctx->timeout_list.prev;
6319 goto add;
6320 }
Jens Axboe5262f562019-09-17 12:26:57 -06006321
Pavel Begunkovbfe68a22020-05-30 14:54:18 +03006322 tail = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
6323 req->timeout.target_seq = tail + off;
Jens Axboe5262f562019-09-17 12:26:57 -06006324
Marcelo Diop-Gonzalezf0105052021-01-15 11:54:40 -05006325 /* Update the last seq here in case io_flush_timeouts() hasn't.
6326 * This is safe because ->completion_lock is held, and submissions
6327 * and completions are never mixed in the same ->completion_lock section.
6328 */
6329 ctx->cq_last_tm_flush = tail;
6330
Jens Axboe5262f562019-09-17 12:26:57 -06006331 /*
6332 * Insertion sort, ensuring the first entry in the list is always
6333 * the one we need first.
6334 */
Jens Axboe5262f562019-09-17 12:26:57 -06006335 list_for_each_prev(entry, &ctx->timeout_list) {
Pavel Begunkov135fcde2020-07-13 23:37:12 +03006336 struct io_kiocb *nxt = list_entry(entry, struct io_kiocb,
6337 timeout.list);
Jens Axboe5262f562019-09-17 12:26:57 -06006338
Pavel Begunkov8eb7e2d2020-06-29 13:13:02 +03006339 if (io_is_timeout_noseq(nxt))
Jens Axboe93bd25b2019-11-11 23:34:31 -07006340 continue;
Pavel Begunkovbfe68a22020-05-30 14:54:18 +03006341 /* nxt.seq is behind @tail, otherwise would've been completed */
6342 if (off >= nxt->timeout.target_seq - tail)
Jens Axboe5262f562019-09-17 12:26:57 -06006343 break;
6344 }
Jens Axboe93bd25b2019-11-11 23:34:31 -07006345add:
Pavel Begunkov135fcde2020-07-13 23:37:12 +03006346 list_add(&req->timeout.list, entry);
Jens Axboead8a48a2019-11-15 08:49:11 -07006347 data->timer.function = io_timeout_fn;
6348 hrtimer_start(&data->timer, timespec64_to_ktime(data->ts), data->mode);
Jens Axboe89850fc2021-08-10 15:11:51 -06006349 spin_unlock_irq(&ctx->timeout_lock);
Jens Axboe5262f562019-09-17 12:26:57 -06006350 return 0;
6351}
6352
Pavel Begunkovf458dd842021-03-08 12:14:14 +00006353struct io_cancel_data {
6354 struct io_ring_ctx *ctx;
6355 u64 user_data;
6356};
6357
Jens Axboe62755e32019-10-28 21:49:21 -06006358static bool io_cancel_cb(struct io_wq_work *work, void *data)
Jens Axboede0617e2019-04-06 21:51:27 -06006359{
Jens Axboe62755e32019-10-28 21:49:21 -06006360 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
Pavel Begunkovf458dd842021-03-08 12:14:14 +00006361 struct io_cancel_data *cd = data;
Jens Axboede0617e2019-04-06 21:51:27 -06006362
Pavel Begunkovf458dd842021-03-08 12:14:14 +00006363 return req->ctx == cd->ctx && req->user_data == cd->user_data;
Jens Axboe62755e32019-10-28 21:49:21 -06006364}
6365
Pavel Begunkovf458dd842021-03-08 12:14:14 +00006366static int io_async_cancel_one(struct io_uring_task *tctx, u64 user_data,
6367 struct io_ring_ctx *ctx)
Jens Axboe62755e32019-10-28 21:49:21 -06006368{
Pavel Begunkovf458dd842021-03-08 12:14:14 +00006369 struct io_cancel_data data = { .ctx = ctx, .user_data = user_data, };
Jens Axboe62755e32019-10-28 21:49:21 -06006370 enum io_wq_cancel cancel_ret;
Jens Axboe62755e32019-10-28 21:49:21 -06006371 int ret = 0;
6372
Pavel Begunkovf458dd842021-03-08 12:14:14 +00006373 if (!tctx || !tctx->io_wq)
Jens Axboe5aa75ed2021-02-16 12:56:50 -07006374 return -ENOENT;
6375
Pavel Begunkovf458dd842021-03-08 12:14:14 +00006376 cancel_ret = io_wq_cancel_cb(tctx->io_wq, io_cancel_cb, &data, false);
Jens Axboe62755e32019-10-28 21:49:21 -06006377 switch (cancel_ret) {
6378 case IO_WQ_CANCEL_OK:
6379 ret = 0;
6380 break;
6381 case IO_WQ_CANCEL_RUNNING:
6382 ret = -EALREADY;
6383 break;
6384 case IO_WQ_CANCEL_NOTFOUND:
6385 ret = -ENOENT;
6386 break;
6387 }
6388
Jens Axboee977d6d2019-11-05 12:39:45 -07006389 return ret;
6390}
6391
Pavel Begunkov8cb01fa2021-08-15 10:40:22 +01006392static int io_try_cancel_userdata(struct io_kiocb *req, u64 sqe_addr)
Jens Axboe47f46762019-11-09 17:43:02 -07006393{
Pavel Begunkov8cb01fa2021-08-15 10:40:22 +01006394 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe47f46762019-11-09 17:43:02 -07006395 int ret;
6396
Pavel Begunkovdadebc32021-08-23 13:30:44 +01006397 WARN_ON_ONCE(!io_wq_current_is_worker() && req->task != current);
Pavel Begunkov8cb01fa2021-08-15 10:40:22 +01006398
Pavel Begunkovf458dd842021-03-08 12:14:14 +00006399 ret = io_async_cancel_one(req->task->io_uring, sqe_addr, ctx);
Pavel Begunkovdf9727a2021-04-01 15:43:59 +01006400 if (ret != -ENOENT)
Pavel Begunkov8cb01fa2021-08-15 10:40:22 +01006401 return ret;
Pavel Begunkov505657b2021-08-17 20:28:09 +01006402
6403 spin_lock(&ctx->completion_lock);
Jens Axboe79ebeae2021-08-10 15:18:27 -06006404 spin_lock_irq(&ctx->timeout_lock);
Jens Axboe47f46762019-11-09 17:43:02 -07006405 ret = io_timeout_cancel(ctx, sqe_addr);
Jens Axboe79ebeae2021-08-10 15:18:27 -06006406 spin_unlock_irq(&ctx->timeout_lock);
Jens Axboe47f46762019-11-09 17:43:02 -07006407 if (ret != -ENOENT)
Pavel Begunkov505657b2021-08-17 20:28:09 +01006408 goto out;
6409 ret = io_poll_cancel(ctx, sqe_addr, false);
6410out:
6411 spin_unlock(&ctx->completion_lock);
6412 return ret;
Jens Axboe47f46762019-11-09 17:43:02 -07006413}
6414
Jens Axboe3529d8c2019-12-19 18:24:38 -07006415static int io_async_cancel_prep(struct io_kiocb *req,
6416 const struct io_uring_sqe *sqe)
Jens Axboee977d6d2019-11-05 12:39:45 -07006417{
Jens Axboefbf23842019-12-17 18:45:56 -07006418 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboee977d6d2019-11-05 12:39:45 -07006419 return -EINVAL;
Daniele Albano61710e42020-07-18 14:15:16 -06006420 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
6421 return -EINVAL;
Pavel Begunkov26578cd2021-08-20 10:36:37 +01006422 if (sqe->ioprio || sqe->off || sqe->len || sqe->cancel_flags ||
6423 sqe->splice_fd_in)
Jens Axboee977d6d2019-11-05 12:39:45 -07006424 return -EINVAL;
6425
Jens Axboefbf23842019-12-17 18:45:56 -07006426 req->cancel.addr = READ_ONCE(sqe->addr);
6427 return 0;
6428}
6429
Pavel Begunkov61e98202021-02-10 00:03:08 +00006430static int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboefbf23842019-12-17 18:45:56 -07006431{
6432 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkov58f99372021-03-12 16:25:55 +00006433 u64 sqe_addr = req->cancel.addr;
6434 struct io_tctx_node *node;
6435 int ret;
Jens Axboefbf23842019-12-17 18:45:56 -07006436
Pavel Begunkov8cb01fa2021-08-15 10:40:22 +01006437 ret = io_try_cancel_userdata(req, sqe_addr);
Pavel Begunkov58f99372021-03-12 16:25:55 +00006438 if (ret != -ENOENT)
6439 goto done;
Pavel Begunkov58f99372021-03-12 16:25:55 +00006440
6441 /* slow path, try all io-wq's */
6442 io_ring_submit_lock(ctx, !(issue_flags & IO_URING_F_NONBLOCK));
6443 ret = -ENOENT;
6444 list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
6445 struct io_uring_task *tctx = node->task->io_uring;
6446
Pavel Begunkov58f99372021-03-12 16:25:55 +00006447 ret = io_async_cancel_one(tctx, req->cancel.addr, ctx);
6448 if (ret != -ENOENT)
6449 break;
6450 }
6451 io_ring_submit_unlock(ctx, !(issue_flags & IO_URING_F_NONBLOCK));
Pavel Begunkov58f99372021-03-12 16:25:55 +00006452done:
Pavel Begunkov58f99372021-03-12 16:25:55 +00006453 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01006454 req_set_fail(req);
Pavel Begunkov505657b2021-08-17 20:28:09 +01006455 io_req_complete_post(req, ret, 0);
Jens Axboe62755e32019-10-28 21:49:21 -06006456 return 0;
6457}
6458
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00006459static int io_rsrc_update_prep(struct io_kiocb *req,
Jens Axboe05f3fb32019-12-09 11:22:50 -07006460 const struct io_uring_sqe *sqe)
6461{
Daniele Albano61710e42020-07-18 14:15:16 -06006462 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
6463 return -EINVAL;
Pavel Begunkov26578cd2021-08-20 10:36:37 +01006464 if (sqe->ioprio || sqe->rw_flags || sqe->splice_fd_in)
Jens Axboe05f3fb32019-12-09 11:22:50 -07006465 return -EINVAL;
6466
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00006467 req->rsrc_update.offset = READ_ONCE(sqe->off);
6468 req->rsrc_update.nr_args = READ_ONCE(sqe->len);
6469 if (!req->rsrc_update.nr_args)
Jens Axboe05f3fb32019-12-09 11:22:50 -07006470 return -EINVAL;
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00006471 req->rsrc_update.arg = READ_ONCE(sqe->addr);
Jens Axboe05f3fb32019-12-09 11:22:50 -07006472 return 0;
6473}
6474
Pavel Begunkov889fca72021-02-10 00:03:09 +00006475static int io_files_update(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe05f3fb32019-12-09 11:22:50 -07006476{
6477 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkovc3bdad02021-04-25 14:32:22 +01006478 struct io_uring_rsrc_update2 up;
Jens Axboe05f3fb32019-12-09 11:22:50 -07006479 int ret;
6480
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00006481 up.offset = req->rsrc_update.offset;
6482 up.data = req->rsrc_update.arg;
Pavel Begunkovc3bdad02021-04-25 14:32:22 +01006483 up.nr = 0;
6484 up.tags = 0;
Colin Ian King615cee42021-04-26 10:47:35 +01006485 up.resv = 0;
Dylan Yudaken7a7c9f92022-04-12 09:30:40 -07006486 up.resv2 = 0;
Jens Axboe05f3fb32019-12-09 11:22:50 -07006487
Jens Axboecdb31c22021-09-24 08:43:54 -06006488 io_ring_submit_lock(ctx, !(issue_flags & IO_URING_F_NONBLOCK));
Pavel Begunkovfdecb662021-04-25 14:32:20 +01006489 ret = __io_register_rsrc_update(ctx, IORING_RSRC_FILE,
Pavel Begunkov98f0b3b2021-04-25 14:32:19 +01006490 &up, req->rsrc_update.nr_args);
Jens Axboecdb31c22021-09-24 08:43:54 -06006491 io_ring_submit_unlock(ctx, !(issue_flags & IO_URING_F_NONBLOCK));
Jens Axboe05f3fb32019-12-09 11:22:50 -07006492
6493 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01006494 req_set_fail(req);
Pavel Begunkov889fca72021-02-10 00:03:09 +00006495 __io_req_complete(req, issue_flags, ret, 0);
Jens Axboe05f3fb32019-12-09 11:22:50 -07006496 return 0;
6497}
6498
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006499static int io_req_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboef67676d2019-12-02 11:03:47 -07006500{
Jens Axboed625c6e2019-12-17 19:53:05 -07006501 switch (req->opcode) {
Jens Axboee7815732019-12-17 19:45:06 -07006502 case IORING_OP_NOP:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006503 return 0;
Jens Axboef67676d2019-12-02 11:03:47 -07006504 case IORING_OP_READV:
6505 case IORING_OP_READ_FIXED:
Jens Axboe3a6820f2019-12-22 15:19:35 -07006506 case IORING_OP_READ:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006507 return io_read_prep(req, sqe);
Jens Axboef67676d2019-12-02 11:03:47 -07006508 case IORING_OP_WRITEV:
6509 case IORING_OP_WRITE_FIXED:
Jens Axboe3a6820f2019-12-22 15:19:35 -07006510 case IORING_OP_WRITE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006511 return io_write_prep(req, sqe);
Jens Axboe0969e782019-12-17 18:40:57 -07006512 case IORING_OP_POLL_ADD:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006513 return io_poll_add_prep(req, sqe);
Jens Axboe0969e782019-12-17 18:40:57 -07006514 case IORING_OP_POLL_REMOVE:
Pavel Begunkovc5de0032021-04-14 13:38:37 +01006515 return io_poll_update_prep(req, sqe);
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07006516 case IORING_OP_FSYNC:
Pavel Begunkov1155c762021-02-18 18:29:38 +00006517 return io_fsync_prep(req, sqe);
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07006518 case IORING_OP_SYNC_FILE_RANGE:
Pavel Begunkov1155c762021-02-18 18:29:38 +00006519 return io_sfr_prep(req, sqe);
Jens Axboe03b12302019-12-02 18:50:25 -07006520 case IORING_OP_SENDMSG:
Jens Axboefddafac2020-01-04 20:19:44 -07006521 case IORING_OP_SEND:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006522 return io_sendmsg_prep(req, sqe);
Jens Axboe03b12302019-12-02 18:50:25 -07006523 case IORING_OP_RECVMSG:
Jens Axboefddafac2020-01-04 20:19:44 -07006524 case IORING_OP_RECV:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006525 return io_recvmsg_prep(req, sqe);
Jens Axboef499a022019-12-02 16:28:46 -07006526 case IORING_OP_CONNECT:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006527 return io_connect_prep(req, sqe);
Jens Axboe2d283902019-12-04 11:08:05 -07006528 case IORING_OP_TIMEOUT:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006529 return io_timeout_prep(req, sqe, false);
Jens Axboeb29472e2019-12-17 18:50:29 -07006530 case IORING_OP_TIMEOUT_REMOVE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006531 return io_timeout_remove_prep(req, sqe);
Jens Axboefbf23842019-12-17 18:45:56 -07006532 case IORING_OP_ASYNC_CANCEL:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006533 return io_async_cancel_prep(req, sqe);
Jens Axboe2d283902019-12-04 11:08:05 -07006534 case IORING_OP_LINK_TIMEOUT:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006535 return io_timeout_prep(req, sqe, true);
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07006536 case IORING_OP_ACCEPT:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006537 return io_accept_prep(req, sqe);
Jens Axboed63d1b52019-12-10 10:38:56 -07006538 case IORING_OP_FALLOCATE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006539 return io_fallocate_prep(req, sqe);
Jens Axboe15b71ab2019-12-11 11:20:36 -07006540 case IORING_OP_OPENAT:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006541 return io_openat_prep(req, sqe);
Jens Axboeb5dba592019-12-11 14:02:38 -07006542 case IORING_OP_CLOSE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006543 return io_close_prep(req, sqe);
Jens Axboe05f3fb32019-12-09 11:22:50 -07006544 case IORING_OP_FILES_UPDATE:
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00006545 return io_rsrc_update_prep(req, sqe);
Jens Axboeeddc7ef2019-12-13 21:18:10 -07006546 case IORING_OP_STATX:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006547 return io_statx_prep(req, sqe);
Jens Axboe4840e412019-12-25 22:03:45 -07006548 case IORING_OP_FADVISE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006549 return io_fadvise_prep(req, sqe);
Jens Axboec1ca7572019-12-25 22:18:28 -07006550 case IORING_OP_MADVISE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006551 return io_madvise_prep(req, sqe);
Jens Axboecebdb982020-01-08 17:59:24 -07006552 case IORING_OP_OPENAT2:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006553 return io_openat2_prep(req, sqe);
Jens Axboe3e4827b2020-01-08 15:18:09 -07006554 case IORING_OP_EPOLL_CTL:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006555 return io_epoll_ctl_prep(req, sqe);
Pavel Begunkov7d67af22020-02-24 11:32:45 +03006556 case IORING_OP_SPLICE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006557 return io_splice_prep(req, sqe);
Jens Axboeddf0322d2020-02-23 16:41:33 -07006558 case IORING_OP_PROVIDE_BUFFERS:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006559 return io_provide_buffers_prep(req, sqe);
Jens Axboe067524e2020-03-02 16:32:28 -07006560 case IORING_OP_REMOVE_BUFFERS:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006561 return io_remove_buffers_prep(req, sqe);
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03006562 case IORING_OP_TEE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006563 return io_tee_prep(req, sqe);
Jens Axboe36f4fa62020-09-05 11:14:22 -06006564 case IORING_OP_SHUTDOWN:
6565 return io_shutdown_prep(req, sqe);
Jens Axboe80a261f2020-09-28 14:23:58 -06006566 case IORING_OP_RENAMEAT:
6567 return io_renameat_prep(req, sqe);
Jens Axboe14a11432020-09-28 14:27:37 -06006568 case IORING_OP_UNLINKAT:
6569 return io_unlinkat_prep(req, sqe);
Dmitry Kadasheve34a02d2021-07-08 13:34:45 +07006570 case IORING_OP_MKDIRAT:
6571 return io_mkdirat_prep(req, sqe);
Dmitry Kadashev7a8721f2021-07-08 13:34:46 +07006572 case IORING_OP_SYMLINKAT:
6573 return io_symlinkat_prep(req, sqe);
Dmitry Kadashevcf30da92021-07-08 13:34:47 +07006574 case IORING_OP_LINKAT:
6575 return io_linkat_prep(req, sqe);
Jens Axboef67676d2019-12-02 11:03:47 -07006576 }
6577
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006578 printk_once(KERN_WARNING "io_uring: unhandled opcode %d\n",
6579 req->opcode);
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01006580 return -EINVAL;
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006581}
6582
Pavel Begunkov93642ef2021-02-18 18:29:44 +00006583static int io_req_prep_async(struct io_kiocb *req)
Jens Axboedef596e2019-01-09 08:59:42 -07006584{
Pavel Begunkovb7e298d2021-02-28 22:35:19 +00006585 if (!io_op_defs[req->opcode].needs_async_setup)
6586 return 0;
6587 if (WARN_ON_ONCE(req->async_data))
6588 return -EFAULT;
6589 if (io_alloc_async_data(req))
6590 return -EAGAIN;
6591
Pavel Begunkov93642ef2021-02-18 18:29:44 +00006592 switch (req->opcode) {
6593 case IORING_OP_READV:
Pavel Begunkov93642ef2021-02-18 18:29:44 +00006594 return io_rw_prep_async(req, READ);
6595 case IORING_OP_WRITEV:
Pavel Begunkov93642ef2021-02-18 18:29:44 +00006596 return io_rw_prep_async(req, WRITE);
6597 case IORING_OP_SENDMSG:
Pavel Begunkov93642ef2021-02-18 18:29:44 +00006598 return io_sendmsg_prep_async(req);
6599 case IORING_OP_RECVMSG:
Pavel Begunkov93642ef2021-02-18 18:29:44 +00006600 return io_recvmsg_prep_async(req);
6601 case IORING_OP_CONNECT:
6602 return io_connect_prep_async(req);
6603 }
Pavel Begunkovb7e298d2021-02-28 22:35:19 +00006604 printk_once(KERN_WARNING "io_uring: prep_async() bad opcode %d\n",
6605 req->opcode);
6606 return -EFAULT;
Jens Axboedef596e2019-01-09 08:59:42 -07006607}
6608
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03006609static u32 io_get_sequence(struct io_kiocb *req)
6610{
Pavel Begunkova3dbdf52021-06-17 18:14:05 +01006611 u32 seq = req->ctx->cached_sq_head;
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03006612
Pavel Begunkova3dbdf52021-06-17 18:14:05 +01006613 /* need original cached_sq_head, but it was increased for each req */
6614 io_for_each_link(req, req)
6615 seq--;
6616 return seq;
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03006617}
6618
Pavel Begunkov76cc33d2021-06-14 23:37:30 +01006619static bool io_drain_req(struct io_kiocb *req)
Jens Axboedef596e2019-01-09 08:59:42 -07006620{
Pavel Begunkov3c199662021-06-15 16:47:57 +01006621 struct io_kiocb *pos;
Jens Axboedef596e2019-01-09 08:59:42 -07006622 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkov27dc8332020-07-13 23:37:14 +03006623 struct io_defer_entry *de;
Jens Axboedef596e2019-01-09 08:59:42 -07006624 int ret;
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03006625 u32 seq;
Jens Axboedef596e2019-01-09 08:59:42 -07006626
Pavel Begunkovb8ce1b92021-08-31 14:13:11 +01006627 if (req->flags & REQ_F_FAIL) {
6628 io_req_complete_fail_submit(req);
6629 return true;
6630 }
6631
Pavel Begunkov3c199662021-06-15 16:47:57 +01006632 /*
6633 * If we need to drain a request in the middle of a link, drain the
6634 * head request and the next request/link after the current link.
6635 * Considering sequential execution of links, IOSQE_IO_DRAIN will be
6636 * maintained for every request of our link.
6637 */
6638 if (ctx->drain_next) {
6639 req->flags |= REQ_F_IO_DRAIN;
6640 ctx->drain_next = false;
6641 }
6642 /* not interested in head, start from the first linked */
6643 io_for_each_link(pos, req->link) {
6644 if (pos->flags & REQ_F_IO_DRAIN) {
6645 ctx->drain_next = true;
6646 req->flags |= REQ_F_IO_DRAIN;
6647 break;
6648 }
6649 }
6650
Jens Axboedef596e2019-01-09 08:59:42 -07006651 /* Still need defer if there is pending req in defer list. */
Hao Xu1bd12b72021-11-25 17:21:02 +08006652 spin_lock(&ctx->completion_lock);
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03006653 if (likely(list_empty_careful(&ctx->defer_list) &&
Pavel Begunkov10c66902021-06-15 16:47:56 +01006654 !(req->flags & REQ_F_IO_DRAIN))) {
Hao Xu1bd12b72021-11-25 17:21:02 +08006655 spin_unlock(&ctx->completion_lock);
Pavel Begunkov10c66902021-06-15 16:47:56 +01006656 ctx->drain_active = false;
Pavel Begunkov76cc33d2021-06-14 23:37:30 +01006657 return false;
Pavel Begunkov10c66902021-06-15 16:47:56 +01006658 }
Hao Xu1bd12b72021-11-25 17:21:02 +08006659 spin_unlock(&ctx->completion_lock);
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03006660
6661 seq = io_get_sequence(req);
6662 /* Still a chance to pass the sequence check */
6663 if (!req_need_defer(req, seq) && list_empty_careful(&ctx->defer_list))
Pavel Begunkov76cc33d2021-06-14 23:37:30 +01006664 return false;
Jens Axboedef596e2019-01-09 08:59:42 -07006665
Pavel Begunkovb7e298d2021-02-28 22:35:19 +00006666 ret = io_req_prep_async(req);
Pavel Begunkovbe7053b2021-02-18 18:29:45 +00006667 if (ret)
Pavel Begunkov1b487732021-07-11 22:41:13 +01006668 goto fail;
Pavel Begunkovcbdcb432020-06-29 19:18:43 +03006669 io_prep_async_link(req);
Pavel Begunkov27dc8332020-07-13 23:37:14 +03006670 de = kmalloc(sizeof(*de), GFP_KERNEL);
Pavel Begunkov76cc33d2021-06-14 23:37:30 +01006671 if (!de) {
Pavel Begunkov1b487732021-07-11 22:41:13 +01006672 ret = -ENOMEM;
6673fail:
6674 io_req_complete_failed(req, ret);
Pavel Begunkov76cc33d2021-06-14 23:37:30 +01006675 return true;
6676 }
Jens Axboe31b51512019-01-18 22:56:34 -07006677
Jens Axboe79ebeae2021-08-10 15:18:27 -06006678 spin_lock(&ctx->completion_lock);
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03006679 if (!req_need_defer(req, seq) && list_empty(&ctx->defer_list)) {
Jens Axboe79ebeae2021-08-10 15:18:27 -06006680 spin_unlock(&ctx->completion_lock);
Pavel Begunkov27dc8332020-07-13 23:37:14 +03006681 kfree(de);
Pavel Begunkovf237c302021-08-18 12:42:46 +01006682 io_queue_async_work(req, NULL);
Pavel Begunkov76cc33d2021-06-14 23:37:30 +01006683 return true;
Jens Axboe31b51512019-01-18 22:56:34 -07006684 }
6685
6686 trace_io_uring_defer(ctx, req, req->user_data);
Pavel Begunkov27dc8332020-07-13 23:37:14 +03006687 de->req = req;
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03006688 de->seq = seq;
Pavel Begunkov27dc8332020-07-13 23:37:14 +03006689 list_add_tail(&de->list, &ctx->defer_list);
Jens Axboe79ebeae2021-08-10 15:18:27 -06006690 spin_unlock(&ctx->completion_lock);
Pavel Begunkov76cc33d2021-06-14 23:37:30 +01006691 return true;
Jens Axboe31b51512019-01-18 22:56:34 -07006692}
6693
Pavel Begunkov68fb8972021-03-19 17:22:41 +00006694static void io_clean_op(struct io_kiocb *req)
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03006695{
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03006696 if (req->flags & REQ_F_BUFFER_SELECTED) {
6697 switch (req->opcode) {
6698 case IORING_OP_READV:
6699 case IORING_OP_READ_FIXED:
6700 case IORING_OP_READ:
Jens Axboebcda7ba2020-02-23 16:42:51 -07006701 kfree((void *)(unsigned long)req->rw.addr);
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03006702 break;
6703 case IORING_OP_RECVMSG:
6704 case IORING_OP_RECV:
Jens Axboe52de1fe2020-02-27 10:15:42 -07006705 kfree(req->sr_msg.kbuf);
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03006706 break;
6707 }
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03006708 }
6709
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03006710 if (req->flags & REQ_F_NEED_CLEANUP) {
6711 switch (req->opcode) {
6712 case IORING_OP_READV:
6713 case IORING_OP_READ_FIXED:
6714 case IORING_OP_READ:
6715 case IORING_OP_WRITEV:
6716 case IORING_OP_WRITE_FIXED:
Jens Axboee8c2bc12020-08-15 18:44:09 -07006717 case IORING_OP_WRITE: {
6718 struct io_async_rw *io = req->async_data;
Pavel Begunkov1dacb4d2021-06-17 18:14:03 +01006719
6720 kfree(io->free_iovec);
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03006721 break;
Jens Axboee8c2bc12020-08-15 18:44:09 -07006722 }
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03006723 case IORING_OP_RECVMSG:
Jens Axboee8c2bc12020-08-15 18:44:09 -07006724 case IORING_OP_SENDMSG: {
6725 struct io_async_msghdr *io = req->async_data;
Pavel Begunkov257e84a2021-02-05 00:58:00 +00006726
6727 kfree(io->free_iov);
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03006728 break;
Jens Axboee8c2bc12020-08-15 18:44:09 -07006729 }
Jens Axboef3cd48502020-09-24 14:55:54 -06006730 case IORING_OP_OPENAT:
6731 case IORING_OP_OPENAT2:
6732 if (req->open.filename)
6733 putname(req->open.filename);
6734 break;
Jens Axboe80a261f2020-09-28 14:23:58 -06006735 case IORING_OP_RENAMEAT:
6736 putname(req->rename.oldpath);
6737 putname(req->rename.newpath);
6738 break;
Jens Axboe14a11432020-09-28 14:27:37 -06006739 case IORING_OP_UNLINKAT:
6740 putname(req->unlink.filename);
6741 break;
Dmitry Kadasheve34a02d2021-07-08 13:34:45 +07006742 case IORING_OP_MKDIRAT:
6743 putname(req->mkdir.filename);
6744 break;
Dmitry Kadashev7a8721f2021-07-08 13:34:46 +07006745 case IORING_OP_SYMLINKAT:
6746 putname(req->symlink.oldpath);
6747 putname(req->symlink.newpath);
6748 break;
Dmitry Kadashevcf30da92021-07-08 13:34:47 +07006749 case IORING_OP_LINKAT:
6750 putname(req->hardlink.oldpath);
6751 putname(req->hardlink.newpath);
6752 break;
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03006753 }
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03006754 }
Jens Axboe75652a302021-04-15 09:52:40 -06006755 if ((req->flags & REQ_F_POLLED) && req->apoll) {
6756 kfree(req->apoll->double_poll);
6757 kfree(req->apoll);
6758 req->apoll = NULL;
6759 }
Pavel Begunkov3a0a6902021-04-20 12:03:31 +01006760 if (req->flags & REQ_F_INFLIGHT) {
6761 struct io_uring_task *tctx = req->task->io_uring;
6762
6763 atomic_dec(&tctx->inflight_tracked);
Pavel Begunkov3a0a6902021-04-20 12:03:31 +01006764 }
Pavel Begunkovc8543572021-06-17 18:14:04 +01006765 if (req->flags & REQ_F_CREDS)
Pavel Begunkovb8e64b52021-06-17 18:14:02 +01006766 put_cred(req->creds);
Pavel Begunkovc8543572021-06-17 18:14:04 +01006767
6768 req->flags &= ~IO_REQ_CLEAN_FLAGS;
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03006769}
6770
Pavel Begunkov889fca72021-02-10 00:03:09 +00006771static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboeedafcce2019-01-09 09:16:05 -07006772{
Jens Axboeedafcce2019-01-09 09:16:05 -07006773 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe5730b272021-02-27 15:57:30 -07006774 const struct cred *creds = NULL;
Jens Axboed625c6e2019-12-17 19:53:05 -07006775 int ret;
Jens Axboeedafcce2019-01-09 09:16:05 -07006776
Pavel Begunkovb8e64b52021-06-17 18:14:02 +01006777 if ((req->flags & REQ_F_CREDS) && req->creds != current_cred())
Pavel Begunkovc10d1f92021-06-17 18:14:01 +01006778 creds = override_creds(req->creds);
Jens Axboe5730b272021-02-27 15:57:30 -07006779
Jens Axboed625c6e2019-12-17 19:53:05 -07006780 switch (req->opcode) {
Jens Axboe2b188cc2019-01-07 10:46:33 -07006781 case IORING_OP_NOP:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006782 ret = io_nop(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006783 break;
Jens Axboe2b188cc2019-01-07 10:46:33 -07006784 case IORING_OP_READV:
Jens Axboe3529d8c2019-12-19 18:24:38 -07006785 case IORING_OP_READ_FIXED:
Jens Axboe3a6820f2019-12-22 15:19:35 -07006786 case IORING_OP_READ:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006787 ret = io_read(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006788 break;
6789 case IORING_OP_WRITEV:
Jens Axboe2b188cc2019-01-07 10:46:33 -07006790 case IORING_OP_WRITE_FIXED:
Jens Axboe3a6820f2019-12-22 15:19:35 -07006791 case IORING_OP_WRITE:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006792 ret = io_write(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006793 break;
6794 case IORING_OP_FSYNC:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006795 ret = io_fsync(req, issue_flags);
Jackie Liuba5290c2019-10-09 09:19:59 +08006796 break;
Jens Axboe2b188cc2019-01-07 10:46:33 -07006797 case IORING_OP_POLL_ADD:
Pavel Begunkov61e98202021-02-10 00:03:08 +00006798 ret = io_poll_add(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006799 break;
6800 case IORING_OP_POLL_REMOVE:
Pavel Begunkovc5de0032021-04-14 13:38:37 +01006801 ret = io_poll_update(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006802 break;
Jens Axboeb76da702019-11-20 13:05:32 -07006803 case IORING_OP_SYNC_FILE_RANGE:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006804 ret = io_sync_file_range(req, issue_flags);
Jens Axboeb76da702019-11-20 13:05:32 -07006805 break;
6806 case IORING_OP_SENDMSG:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006807 ret = io_sendmsg(req, issue_flags);
Pavel Begunkov062d04d2020-10-10 18:34:12 +01006808 break;
Jens Axboefddafac2020-01-04 20:19:44 -07006809 case IORING_OP_SEND:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006810 ret = io_send(req, issue_flags);
Jens Axboeb76da702019-11-20 13:05:32 -07006811 break;
6812 case IORING_OP_RECVMSG:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006813 ret = io_recvmsg(req, issue_flags);
Pavel Begunkov062d04d2020-10-10 18:34:12 +01006814 break;
Jens Axboefddafac2020-01-04 20:19:44 -07006815 case IORING_OP_RECV:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006816 ret = io_recv(req, issue_flags);
Jens Axboeb76da702019-11-20 13:05:32 -07006817 break;
Jens Axboe561fb042019-10-24 07:25:42 -06006818 case IORING_OP_TIMEOUT:
Pavel Begunkov61e98202021-02-10 00:03:08 +00006819 ret = io_timeout(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006820 break;
6821 case IORING_OP_TIMEOUT_REMOVE:
Pavel Begunkov61e98202021-02-10 00:03:08 +00006822 ret = io_timeout_remove(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006823 break;
6824 case IORING_OP_ACCEPT:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006825 ret = io_accept(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006826 break;
6827 case IORING_OP_CONNECT:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006828 ret = io_connect(req, issue_flags);
Jens Axboe31b51512019-01-18 22:56:34 -07006829 break;
6830 case IORING_OP_ASYNC_CANCEL:
Pavel Begunkov61e98202021-02-10 00:03:08 +00006831 ret = io_async_cancel(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006832 break;
Jens Axboed63d1b52019-12-10 10:38:56 -07006833 case IORING_OP_FALLOCATE:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006834 ret = io_fallocate(req, issue_flags);
Jens Axboed63d1b52019-12-10 10:38:56 -07006835 break;
Jens Axboe15b71ab2019-12-11 11:20:36 -07006836 case IORING_OP_OPENAT:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006837 ret = io_openat(req, issue_flags);
Jens Axboe15b71ab2019-12-11 11:20:36 -07006838 break;
Jens Axboeb5dba592019-12-11 14:02:38 -07006839 case IORING_OP_CLOSE:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006840 ret = io_close(req, issue_flags);
Jens Axboeb5dba592019-12-11 14:02:38 -07006841 break;
Jens Axboe05f3fb32019-12-09 11:22:50 -07006842 case IORING_OP_FILES_UPDATE:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006843 ret = io_files_update(req, issue_flags);
Jens Axboe05f3fb32019-12-09 11:22:50 -07006844 break;
Jens Axboeeddc7ef2019-12-13 21:18:10 -07006845 case IORING_OP_STATX:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006846 ret = io_statx(req, issue_flags);
Jens Axboeeddc7ef2019-12-13 21:18:10 -07006847 break;
Jens Axboe4840e412019-12-25 22:03:45 -07006848 case IORING_OP_FADVISE:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006849 ret = io_fadvise(req, issue_flags);
Jens Axboe4840e412019-12-25 22:03:45 -07006850 break;
Jens Axboec1ca7572019-12-25 22:18:28 -07006851 case IORING_OP_MADVISE:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006852 ret = io_madvise(req, issue_flags);
Jens Axboec1ca7572019-12-25 22:18:28 -07006853 break;
Jens Axboecebdb982020-01-08 17:59:24 -07006854 case IORING_OP_OPENAT2:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006855 ret = io_openat2(req, issue_flags);
Jens Axboecebdb982020-01-08 17:59:24 -07006856 break;
Jens Axboe3e4827b2020-01-08 15:18:09 -07006857 case IORING_OP_EPOLL_CTL:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006858 ret = io_epoll_ctl(req, issue_flags);
Jens Axboe3e4827b2020-01-08 15:18:09 -07006859 break;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03006860 case IORING_OP_SPLICE:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006861 ret = io_splice(req, issue_flags);
Pavel Begunkov7d67af22020-02-24 11:32:45 +03006862 break;
Jens Axboeddf0322d2020-02-23 16:41:33 -07006863 case IORING_OP_PROVIDE_BUFFERS:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006864 ret = io_provide_buffers(req, issue_flags);
Jens Axboeddf0322d2020-02-23 16:41:33 -07006865 break;
Jens Axboe067524e2020-03-02 16:32:28 -07006866 case IORING_OP_REMOVE_BUFFERS:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006867 ret = io_remove_buffers(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006868 break;
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03006869 case IORING_OP_TEE:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006870 ret = io_tee(req, issue_flags);
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03006871 break;
Jens Axboe36f4fa62020-09-05 11:14:22 -06006872 case IORING_OP_SHUTDOWN:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006873 ret = io_shutdown(req, issue_flags);
Jens Axboe36f4fa62020-09-05 11:14:22 -06006874 break;
Jens Axboe80a261f2020-09-28 14:23:58 -06006875 case IORING_OP_RENAMEAT:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006876 ret = io_renameat(req, issue_flags);
Jens Axboe80a261f2020-09-28 14:23:58 -06006877 break;
Jens Axboe14a11432020-09-28 14:27:37 -06006878 case IORING_OP_UNLINKAT:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006879 ret = io_unlinkat(req, issue_flags);
Jens Axboe14a11432020-09-28 14:27:37 -06006880 break;
Dmitry Kadasheve34a02d2021-07-08 13:34:45 +07006881 case IORING_OP_MKDIRAT:
6882 ret = io_mkdirat(req, issue_flags);
6883 break;
Dmitry Kadashev7a8721f2021-07-08 13:34:46 +07006884 case IORING_OP_SYMLINKAT:
6885 ret = io_symlinkat(req, issue_flags);
6886 break;
Dmitry Kadashevcf30da92021-07-08 13:34:47 +07006887 case IORING_OP_LINKAT:
6888 ret = io_linkat(req, issue_flags);
6889 break;
Jens Axboe2b188cc2019-01-07 10:46:33 -07006890 default:
6891 ret = -EINVAL;
6892 break;
6893 }
Jens Axboe31b51512019-01-18 22:56:34 -07006894
Jens Axboe5730b272021-02-27 15:57:30 -07006895 if (creds)
6896 revert_creds(creds);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006897 if (ret)
6898 return ret;
Jens Axboeb5325762020-05-19 21:20:27 -06006899 /* If the op doesn't have a file, we're not polling for it */
Pavel Begunkovcb3d8972021-06-14 02:36:14 +01006900 if ((ctx->flags & IORING_SETUP_IOPOLL) && req->file)
6901 io_iopoll_req_issued(req);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006902
6903 return 0;
6904}
6905
Pavel Begunkovebc11b62021-08-09 13:04:05 +01006906static struct io_wq_work *io_wq_free_work(struct io_wq_work *work)
6907{
6908 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
6909
6910 req = io_put_req_find_next(req);
6911 return req ? &req->work : NULL;
6912}
6913
Pavel Begunkov5280f7e2021-02-04 13:52:08 +00006914static void io_wq_submit_work(struct io_wq_work *work)
Pavel Begunkovd4c81f32020-06-08 21:08:19 +03006915{
Jens Axboe2b188cc2019-01-07 10:46:33 -07006916 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
Pavel Begunkov6df1db62020-07-03 22:15:06 +03006917 struct io_kiocb *timeout;
Jens Axboe561fb042019-10-24 07:25:42 -06006918 int ret = 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07006919
Pavel Begunkov48dcd382021-08-15 10:40:18 +01006920 /* one will be dropped by ->io_free_work() after returning to io-wq */
6921 if (!(req->flags & REQ_F_REFCOUNT))
6922 __io_req_set_refcount(req, 2);
6923 else
6924 req_ref_get(req);
Pavel Begunkov5d5901a2021-08-11 19:28:29 +01006925
Pavel Begunkov6df1db62020-07-03 22:15:06 +03006926 timeout = io_prep_linked_timeout(req);
6927 if (timeout)
6928 io_queue_linked_timeout(timeout);
Pavel Begunkovd4c81f32020-06-08 21:08:19 +03006929
Pavel Begunkovdadebc32021-08-23 13:30:44 +01006930 /* either cancelled or io-wq is dying, so don't touch tctx->iowq */
Jens Axboe4014d942021-01-19 15:53:54 -07006931 if (work->flags & IO_WQ_WORK_CANCEL)
Jens Axboe561fb042019-10-24 07:25:42 -06006932 ret = -ECANCELED;
Jens Axboe31b51512019-01-18 22:56:34 -07006933
Jens Axboe561fb042019-10-24 07:25:42 -06006934 if (!ret) {
Jens Axboe561fb042019-10-24 07:25:42 -06006935 do {
Pavel Begunkov889fca72021-02-10 00:03:09 +00006936 ret = io_issue_sqe(req, 0);
Jens Axboe561fb042019-10-24 07:25:42 -06006937 /*
6938 * We can get EAGAIN for polled IO even though we're
6939 * forcing a sync submission from here, since we can't
6940 * wait for request slots on the block side.
6941 */
Pavel Begunkov51ebf1b2022-05-13 11:24:56 +01006942 if (ret != -EAGAIN || !(req->ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboe561fb042019-10-24 07:25:42 -06006943 break;
6944 cond_resched();
6945 } while (1);
6946 }
Jens Axboe31b51512019-01-18 22:56:34 -07006947
Pavel Begunkova3df76982021-02-18 22:32:52 +00006948 /* avoid locking problems by failing it from a clean context */
Pavel Begunkov5d5901a2021-08-11 19:28:29 +01006949 if (ret)
Pavel Begunkova3df76982021-02-18 22:32:52 +00006950 io_req_task_queue_fail(req, ret);
Jens Axboe31b51512019-01-18 22:56:34 -07006951}
Jens Axboe2b188cc2019-01-07 10:46:33 -07006952
Pavel Begunkovaeca2412021-04-11 01:46:37 +01006953static inline struct io_fixed_file *io_fixed_file_slot(struct io_file_table *table,
Pavel Begunkov042b0d82021-08-09 13:04:01 +01006954 unsigned i)
Jens Axboe09bb8392019-03-13 12:39:28 -06006955{
Pavel Begunkov042b0d82021-08-09 13:04:01 +01006956 return &table->files[i];
Pavel Begunkovdafecf12021-02-28 22:35:11 +00006957}
6958
Jens Axboe09bb8392019-03-13 12:39:28 -06006959static inline struct file *io_file_from_index(struct io_ring_ctx *ctx,
6960 int index)
6961{
Pavel Begunkovaeca2412021-04-11 01:46:37 +01006962 struct io_fixed_file *slot = io_fixed_file_slot(&ctx->file_table, index);
Jens Axboe65e19f52019-10-26 07:20:21 -06006963
Pavel Begunkova04b0ac2021-04-01 15:44:04 +01006964 return (struct file *) (slot->file_ptr & FFS_MASK);
Jens Axboe65e19f52019-10-26 07:20:21 -06006965}
6966
Pavel Begunkova04b0ac2021-04-01 15:44:04 +01006967static void io_fixed_file_set(struct io_fixed_file *file_slot, struct file *file)
Pavel Begunkov9a321c92021-04-01 15:44:01 +01006968{
6969 unsigned long file_ptr = (unsigned long) file;
6970
Pavel Begunkovb191e2d2021-08-09 13:04:03 +01006971 if (__io_file_supports_nowait(file, READ))
Pavel Begunkov9a321c92021-04-01 15:44:01 +01006972 file_ptr |= FFS_ASYNC_READ;
Pavel Begunkovb191e2d2021-08-09 13:04:03 +01006973 if (__io_file_supports_nowait(file, WRITE))
Pavel Begunkov9a321c92021-04-01 15:44:01 +01006974 file_ptr |= FFS_ASYNC_WRITE;
6975 if (S_ISREG(file_inode(file)->i_mode))
6976 file_ptr |= FFS_ISREG;
Pavel Begunkova04b0ac2021-04-01 15:44:04 +01006977 file_slot->file_ptr = file_ptr;
Jens Axboe09bb8392019-03-13 12:39:28 -06006978}
6979
Pavel Begunkovac177052021-08-09 13:04:02 +01006980static inline struct file *io_file_get_fixed(struct io_ring_ctx *ctx,
6981 struct io_kiocb *req, int fd)
Pavel Begunkov8da11c12020-02-24 11:32:44 +03006982{
Pavel Begunkov8da11c12020-02-24 11:32:44 +03006983 struct file *file;
Pavel Begunkovac177052021-08-09 13:04:02 +01006984 unsigned long file_ptr;
Pavel Begunkov8da11c12020-02-24 11:32:44 +03006985
Pavel Begunkovac177052021-08-09 13:04:02 +01006986 if (unlikely((unsigned int)fd >= ctx->nr_user_files))
6987 return NULL;
6988 fd = array_index_nospec(fd, ctx->nr_user_files);
6989 file_ptr = io_fixed_file_slot(&ctx->file_table, fd)->file_ptr;
6990 file = (struct file *) (file_ptr & FFS_MASK);
6991 file_ptr &= ~FFS_MASK;
6992 /* mask in overlapping REQ_F and FFS bits */
Pavel Begunkovb191e2d2021-08-09 13:04:03 +01006993 req->flags |= (file_ptr << REQ_F_NOWAIT_READ_BIT);
Pavel Begunkovac177052021-08-09 13:04:02 +01006994 io_req_set_rsrc_node(req);
Pavel Begunkov8371adf2020-10-10 18:34:08 +01006995 return file;
Pavel Begunkov8da11c12020-02-24 11:32:44 +03006996}
6997
Pavel Begunkovac177052021-08-09 13:04:02 +01006998static struct file *io_file_get_normal(struct io_ring_ctx *ctx,
Pavel Begunkovac177052021-08-09 13:04:02 +01006999 struct io_kiocb *req, int fd)
7000{
Pavel Begunkov62906e82021-08-10 14:52:47 +01007001 struct file *file = fget(fd);
Pavel Begunkovac177052021-08-09 13:04:02 +01007002
7003 trace_io_uring_file_get(ctx, fd);
7004
7005 /* we don't allow fixed io_uring files */
7006 if (file && unlikely(file->f_op == &io_uring_fops))
7007 io_req_track_inflight(req);
7008 return file;
7009}
7010
7011static inline struct file *io_file_get(struct io_ring_ctx *ctx,
Pavel Begunkovac177052021-08-09 13:04:02 +01007012 struct io_kiocb *req, int fd, bool fixed)
7013{
7014 if (fixed)
7015 return io_file_get_fixed(ctx, req, fd);
7016 else
Pavel Begunkov62906e82021-08-10 14:52:47 +01007017 return io_file_get_normal(ctx, req, fd);
Pavel Begunkovac177052021-08-09 13:04:02 +01007018}
7019
Pavel Begunkovf237c302021-08-18 12:42:46 +01007020static void io_req_task_link_timeout(struct io_kiocb *req, bool *locked)
Jens Axboe89b263f2021-08-10 15:14:18 -06007021{
7022 struct io_kiocb *prev = req->timeout.prev;
Pavel Begunkov3d2a1e62021-11-26 14:38:14 +00007023 int ret = -ENOENT;
Jens Axboe89b263f2021-08-10 15:14:18 -06007024
7025 if (prev) {
Pavel Begunkov3d2a1e62021-11-26 14:38:14 +00007026 if (!(req->task->flags & PF_EXITING))
7027 ret = io_try_cancel_userdata(req, prev->user_data);
Pavel Begunkov505657b2021-08-17 20:28:09 +01007028 io_req_complete_post(req, ret ?: -ETIME, 0);
Jens Axboe89b263f2021-08-10 15:14:18 -06007029 io_put_req(prev);
Jens Axboe89b263f2021-08-10 15:14:18 -06007030 } else {
7031 io_req_complete_post(req, -ETIME, 0);
7032 }
7033}
7034
Jens Axboe2665abf2019-11-05 12:40:47 -07007035static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
7036{
Jens Axboead8a48a2019-11-15 08:49:11 -07007037 struct io_timeout_data *data = container_of(timer,
7038 struct io_timeout_data, timer);
Pavel Begunkov90cd7e42020-10-27 23:25:36 +00007039 struct io_kiocb *prev, *req = data->req;
Jens Axboe2665abf2019-11-05 12:40:47 -07007040 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe2665abf2019-11-05 12:40:47 -07007041 unsigned long flags;
Jens Axboe2665abf2019-11-05 12:40:47 -07007042
Jens Axboe89b263f2021-08-10 15:14:18 -06007043 spin_lock_irqsave(&ctx->timeout_lock, flags);
Pavel Begunkov90cd7e42020-10-27 23:25:36 +00007044 prev = req->timeout.head;
7045 req->timeout.head = NULL;
Jens Axboe2665abf2019-11-05 12:40:47 -07007046
7047 /*
7048 * We don't expect the list to be empty, that will only happen if we
7049 * race with the completion of the linked work.
7050 */
Pavel Begunkov447c19f2021-05-14 12:02:50 +01007051 if (prev) {
Pavel Begunkovf2f87372020-10-27 23:25:37 +00007052 io_remove_next_linked(prev);
Pavel Begunkov447c19f2021-05-14 12:02:50 +01007053 if (!req_ref_inc_not_zero(prev))
7054 prev = NULL;
7055 }
Pavel Begunkovef9dd632021-08-28 19:54:38 -06007056 list_del(&req->timeout.list);
Jens Axboe89b263f2021-08-10 15:14:18 -06007057 req->timeout.prev = prev;
7058 spin_unlock_irqrestore(&ctx->timeout_lock, flags);
Jens Axboe2665abf2019-11-05 12:40:47 -07007059
Jens Axboe89b263f2021-08-10 15:14:18 -06007060 req->io_task_work.func = io_req_task_link_timeout;
7061 io_req_task_work_add(req);
Jens Axboe2665abf2019-11-05 12:40:47 -07007062 return HRTIMER_NORESTART;
7063}
7064
Pavel Begunkovde968c12021-03-19 17:22:33 +00007065static void io_queue_linked_timeout(struct io_kiocb *req)
Jens Axboe2665abf2019-11-05 12:40:47 -07007066{
Pavel Begunkovde968c12021-03-19 17:22:33 +00007067 struct io_ring_ctx *ctx = req->ctx;
7068
Jens Axboe89b263f2021-08-10 15:14:18 -06007069 spin_lock_irq(&ctx->timeout_lock);
Jens Axboe76a46e02019-11-10 23:34:16 -07007070 /*
Pavel Begunkovf2f87372020-10-27 23:25:37 +00007071 * If the back reference is NULL, then our linked request finished
7072 * before we got a chance to setup the timer
Jens Axboe76a46e02019-11-10 23:34:16 -07007073 */
Pavel Begunkov90cd7e42020-10-27 23:25:36 +00007074 if (req->timeout.head) {
Jens Axboee8c2bc12020-08-15 18:44:09 -07007075 struct io_timeout_data *data = req->async_data;
Jens Axboe94ae5e72019-11-14 19:39:52 -07007076
Jens Axboead8a48a2019-11-15 08:49:11 -07007077 data->timer.function = io_link_timeout_fn;
7078 hrtimer_start(&data->timer, timespec64_to_ktime(data->ts),
7079 data->mode);
Pavel Begunkovef9dd632021-08-28 19:54:38 -06007080 list_add_tail(&req->timeout.list, &ctx->ltimeout_list);
Jens Axboe2665abf2019-11-05 12:40:47 -07007081 }
Jens Axboe89b263f2021-08-10 15:14:18 -06007082 spin_unlock_irq(&ctx->timeout_lock);
Jens Axboe2665abf2019-11-05 12:40:47 -07007083 /* drop submission reference */
Jens Axboe76a46e02019-11-10 23:34:16 -07007084 io_put_req(req);
Jens Axboe2665abf2019-11-05 12:40:47 -07007085}
7086
Pavel Begunkovc5eef2b2021-02-10 00:03:22 +00007087static void __io_queue_sqe(struct io_kiocb *req)
Pavel Begunkov282cdc82021-08-09 13:04:10 +01007088 __must_hold(&req->ctx->uring_lock)
Jens Axboe2b188cc2019-01-07 10:46:33 -07007089{
Pavel Begunkov906c6ca2021-08-15 10:40:26 +01007090 struct io_kiocb *linked_timeout;
Jens Axboee0c5c572019-03-12 10:18:47 -06007091 int ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07007092
Olivier Langlois59b735a2021-06-22 05:17:39 -07007093issue_sqe:
Pavel Begunkovc5eef2b2021-02-10 00:03:22 +00007094 ret = io_issue_sqe(req, IO_URING_F_NONBLOCK|IO_URING_F_COMPLETE_DEFER);
Jens Axboe491381ce2019-10-17 09:20:46 -06007095
7096 /*
7097 * We async punt it if the file wasn't marked NOWAIT, or if the file
7098 * doesn't support non-blocking read/write attempts
7099 */
Pavel Begunkov18400382021-03-19 17:22:34 +00007100 if (likely(!ret)) {
Pavel Begunkove342c802021-01-19 13:32:47 +00007101 if (req->flags & REQ_F_COMPLETE_INLINE) {
Pavel Begunkovc5eef2b2021-02-10 00:03:22 +00007102 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkovcd0ca2e2021-08-09 20:18:11 +01007103 struct io_submit_state *state = &ctx->submit_state;
Jens Axboee65ef562019-03-12 10:16:44 -06007104
Pavel Begunkovcd0ca2e2021-08-09 20:18:11 +01007105 state->compl_reqs[state->compl_nr++] = req;
7106 if (state->compl_nr == ARRAY_SIZE(state->compl_reqs))
Pavel Begunkov2a2758f2021-06-17 18:14:00 +01007107 io_submit_flush_completions(ctx);
Pavel Begunkov906c6ca2021-08-15 10:40:26 +01007108 return;
Pavel Begunkov0d63c142020-10-22 16:47:18 +01007109 }
Pavel Begunkov906c6ca2021-08-15 10:40:26 +01007110
7111 linked_timeout = io_prep_linked_timeout(req);
7112 if (linked_timeout)
7113 io_queue_linked_timeout(linked_timeout);
Pavel Begunkov18400382021-03-19 17:22:34 +00007114 } else if (ret == -EAGAIN && !(req->flags & REQ_F_NOWAIT)) {
Pavel Begunkov906c6ca2021-08-15 10:40:26 +01007115 linked_timeout = io_prep_linked_timeout(req);
7116
Olivier Langlois59b735a2021-06-22 05:17:39 -07007117 switch (io_arm_poll_handler(req)) {
7118 case IO_APOLL_READY:
Pavel Begunkov906c6ca2021-08-15 10:40:26 +01007119 if (linked_timeout)
Pavel Begunkov4ea672a2021-10-20 09:53:02 +01007120 io_queue_linked_timeout(linked_timeout);
Olivier Langlois59b735a2021-06-22 05:17:39 -07007121 goto issue_sqe;
7122 case IO_APOLL_ABORTED:
Pavel Begunkov18400382021-03-19 17:22:34 +00007123 /*
7124 * Queued up for async execution, worker will release
7125 * submit reference when the iocb is actually submitted.
7126 */
Pavel Begunkovf237c302021-08-18 12:42:46 +01007127 io_queue_async_work(req, NULL);
Olivier Langlois59b735a2021-06-22 05:17:39 -07007128 break;
Pavel Begunkov18400382021-03-19 17:22:34 +00007129 }
Pavel Begunkov906c6ca2021-08-15 10:40:26 +01007130
7131 if (linked_timeout)
7132 io_queue_linked_timeout(linked_timeout);
Pavel Begunkov0d63c142020-10-22 16:47:18 +01007133 } else {
Pavel Begunkovf41db2732021-02-28 22:35:12 +00007134 io_req_complete_failed(req, ret);
Jens Axboe9e645e112019-05-10 16:07:28 -06007135 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07007136}
7137
Pavel Begunkov441b8a72021-06-14 23:37:31 +01007138static inline void io_queue_sqe(struct io_kiocb *req)
Pavel Begunkov282cdc82021-08-09 13:04:10 +01007139 __must_hold(&req->ctx->uring_lock)
Jackie Liu4fe2c962019-09-09 20:50:40 +08007140{
Pavel Begunkov10c66902021-06-15 16:47:56 +01007141 if (unlikely(req->ctx->drain_active) && io_drain_req(req))
Pavel Begunkov76cc33d2021-06-14 23:37:30 +01007142 return;
Jackie Liu4fe2c962019-09-09 20:50:40 +08007143
Hao Xua8295b92021-08-27 17:46:09 +08007144 if (likely(!(req->flags & (REQ_F_FORCE_ASYNC | REQ_F_FAIL)))) {
Pavel Begunkovc5eef2b2021-02-10 00:03:22 +00007145 __io_queue_sqe(req);
Hao Xua8295b92021-08-27 17:46:09 +08007146 } else if (req->flags & REQ_F_FAIL) {
Pavel Begunkovc6d3d9c2021-08-31 14:13:10 +01007147 io_req_complete_fail_submit(req);
Pavel Begunkov76cc33d2021-06-14 23:37:30 +01007148 } else {
7149 int ret = io_req_prep_async(req);
7150
7151 if (unlikely(ret))
7152 io_req_complete_failed(req, ret);
7153 else
Pavel Begunkovf237c302021-08-18 12:42:46 +01007154 io_queue_async_work(req, NULL);
Jens Axboece35a472019-12-17 08:04:44 -07007155 }
Jackie Liu4fe2c962019-09-09 20:50:40 +08007156}
7157
Stefano Garzarella21b55db2020-08-27 16:58:30 +02007158/*
7159 * Check SQE restrictions (opcode and flags).
7160 *
7161 * Returns 'true' if SQE is allowed, 'false' otherwise.
7162 */
7163static inline bool io_check_restriction(struct io_ring_ctx *ctx,
7164 struct io_kiocb *req,
7165 unsigned int sqe_flags)
7166{
Pavel Begunkov4cfb25b2021-06-26 21:40:47 +01007167 if (likely(!ctx->restricted))
Stefano Garzarella21b55db2020-08-27 16:58:30 +02007168 return true;
7169
7170 if (!test_bit(req->opcode, ctx->restrictions.sqe_op))
7171 return false;
7172
7173 if ((sqe_flags & ctx->restrictions.sqe_flags_required) !=
7174 ctx->restrictions.sqe_flags_required)
7175 return false;
7176
7177 if (sqe_flags & ~(ctx->restrictions.sqe_flags_allowed |
7178 ctx->restrictions.sqe_flags_required))
7179 return false;
7180
7181 return true;
7182}
7183
Pavel Begunkovef4ff582020-04-12 02:05:05 +03007184static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
Pavel Begunkov258b29a2021-02-10 00:03:10 +00007185 const struct io_uring_sqe *sqe)
Pavel Begunkov282cdc82021-08-09 13:04:10 +01007186 __must_hold(&ctx->uring_lock)
Pavel Begunkov0553b8b2020-04-08 08:58:45 +03007187{
Pavel Begunkov258b29a2021-02-10 00:03:10 +00007188 struct io_submit_state *state;
Pavel Begunkovef4ff582020-04-12 02:05:05 +03007189 unsigned int sqe_flags;
Jens Axboe003e8dc2021-03-06 09:22:27 -07007190 int personality, ret = 0;
Pavel Begunkovef4ff582020-04-12 02:05:05 +03007191
Pavel Begunkov864ea922021-08-09 13:04:08 +01007192 /* req is partially pre-initialised, see io_preinit_req() */
Pavel Begunkov0553b8b2020-04-08 08:58:45 +03007193 req->opcode = READ_ONCE(sqe->opcode);
Pavel Begunkov5be9ad12021-02-12 18:41:17 +00007194 /* same numerical values with corresponding REQ_F_*, safe to copy */
7195 req->flags = sqe_flags = READ_ONCE(sqe->flags);
Pavel Begunkov0553b8b2020-04-08 08:58:45 +03007196 req->user_data = READ_ONCE(sqe->user_data);
Pavel Begunkov0553b8b2020-04-08 08:58:45 +03007197 req->file = NULL;
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007198 req->fixed_rsrc_refs = NULL;
Pavel Begunkov4dd28242020-06-15 10:33:13 +03007199 req->task = current;
Pavel Begunkovef4ff582020-04-12 02:05:05 +03007200
Pavel Begunkov5be9ad12021-02-12 18:41:17 +00007201 /* enforce forwards compatibility on users */
Pavel Begunkovdddca222021-04-27 16:13:52 +01007202 if (unlikely(sqe_flags & ~SQE_VALID_FLAGS))
Pavel Begunkov5be9ad12021-02-12 18:41:17 +00007203 return -EINVAL;
Pavel Begunkovef4ff582020-04-12 02:05:05 +03007204 if (unlikely(req->opcode >= IORING_OP_LAST))
7205 return -EINVAL;
Pavel Begunkov4cfb25b2021-06-26 21:40:47 +01007206 if (!io_check_restriction(ctx, req, sqe_flags))
Stefano Garzarella21b55db2020-08-27 16:58:30 +02007207 return -EACCES;
7208
Pavel Begunkovef4ff582020-04-12 02:05:05 +03007209 if ((sqe_flags & IOSQE_BUFFER_SELECT) &&
7210 !io_op_defs[req->opcode].buffer_select)
7211 return -EOPNOTSUPP;
Pavel Begunkov3c199662021-06-15 16:47:57 +01007212 if (unlikely(sqe_flags & IOSQE_IO_DRAIN))
7213 ctx->drain_active = true;
Pavel Begunkovef4ff582020-04-12 02:05:05 +03007214
Jens Axboe003e8dc2021-03-06 09:22:27 -07007215 personality = READ_ONCE(sqe->personality);
7216 if (personality) {
Pavel Begunkovc10d1f92021-06-17 18:14:01 +01007217 req->creds = xa_load(&ctx->personalities, personality);
7218 if (!req->creds)
Jens Axboe003e8dc2021-03-06 09:22:27 -07007219 return -EINVAL;
Pavel Begunkovc10d1f92021-06-17 18:14:01 +01007220 get_cred(req->creds);
Pavel Begunkovb8e64b52021-06-17 18:14:02 +01007221 req->flags |= REQ_F_CREDS;
Jens Axboe003e8dc2021-03-06 09:22:27 -07007222 }
Pavel Begunkov258b29a2021-02-10 00:03:10 +00007223 state = &ctx->submit_state;
Pavel Begunkovef4ff582020-04-12 02:05:05 +03007224
Jens Axboe27926b62020-10-28 09:33:23 -06007225 /*
7226 * Plug now if we have more than 1 IO left after this, and the target
7227 * is potentially a read/write to block based storage.
7228 */
7229 if (!state->plug_started && state->ios_left > 1 &&
7230 io_op_defs[req->opcode].plug) {
7231 blk_start_plug(&state->plug);
7232 state->plug_started = true;
7233 }
Jens Axboe63ff8222020-05-07 14:56:15 -06007234
Pavel Begunkovbd5bbda2020-11-20 15:50:51 +00007235 if (io_op_defs[req->opcode].needs_file) {
Pavel Begunkov62906e82021-08-10 14:52:47 +01007236 req->file = io_file_get(ctx, req, READ_ONCE(sqe->fd),
Pavel Begunkovac177052021-08-09 13:04:02 +01007237 (sqe_flags & IOSQE_FIXED_FILE));
Pavel Begunkovba13e232021-02-01 18:59:52 +00007238 if (unlikely(!req->file))
Pavel Begunkovbd5bbda2020-11-20 15:50:51 +00007239 ret = -EBADF;
7240 }
7241
Pavel Begunkov71b547c2020-10-10 18:34:09 +01007242 state->ios_left--;
7243 return ret;
Pavel Begunkov0553b8b2020-04-08 08:58:45 +03007244}
7245
Pavel Begunkova6b8cadc2021-02-18 18:29:41 +00007246static int io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
Pavel Begunkova1ab7b32021-02-18 18:29:42 +00007247 const struct io_uring_sqe *sqe)
Pavel Begunkov282cdc82021-08-09 13:04:10 +01007248 __must_hold(&ctx->uring_lock)
Jens Axboe6c271ce2019-01-10 11:22:30 -07007249{
Pavel Begunkova1ab7b32021-02-18 18:29:42 +00007250 struct io_submit_link *link = &ctx->submit_state.link;
Jens Axboe6c271ce2019-01-10 11:22:30 -07007251 int ret;
7252
Pavel Begunkova6b8cadc2021-02-18 18:29:41 +00007253 ret = io_init_req(ctx, req, sqe);
7254 if (unlikely(ret)) {
7255fail_req:
Hao Xua8295b92021-08-27 17:46:09 +08007256 /* fail even hard links since we don't submit */
Pavel Begunkovde59bc12021-02-18 18:29:47 +00007257 if (link->head) {
Hao Xua8295b92021-08-27 17:46:09 +08007258 /*
7259 * we can judge a link req is failed or cancelled by if
7260 * REQ_F_FAIL is set, but the head is an exception since
7261 * it may be set REQ_F_FAIL because of other req's failure
7262 * so let's leverage req->result to distinguish if a head
7263 * is set REQ_F_FAIL because of its failure or other req's
7264 * failure so that we can set the correct ret code for it.
7265 * init result here to avoid affecting the normal path.
7266 */
7267 if (!(link->head->flags & REQ_F_FAIL))
7268 req_fail_link_node(link->head, -ECANCELED);
7269 } else if (!(req->flags & (REQ_F_LINK | REQ_F_HARDLINK))) {
7270 /*
7271 * the current req is a normal req, we should return
7272 * error and thus break the submittion loop.
7273 */
7274 io_req_complete_failed(req, ret);
7275 return ret;
Pavel Begunkovde59bc12021-02-18 18:29:47 +00007276 }
Hao Xua8295b92021-08-27 17:46:09 +08007277 req_fail_link_node(req, ret);
7278 } else {
7279 ret = io_req_prep(req, sqe);
7280 if (unlikely(ret))
7281 goto fail_req;
Pavel Begunkova6b8cadc2021-02-18 18:29:41 +00007282 }
Pavel Begunkov441b8a72021-06-14 23:37:31 +01007283
Pavel Begunkovbe7053b2021-02-18 18:29:45 +00007284 /* don't need @sqe from now on */
Olivier Langlois236daeae2021-05-31 02:36:37 -04007285 trace_io_uring_submit_sqe(ctx, req, req->opcode, req->user_data,
7286 req->flags, true,
7287 ctx->flags & IORING_SETUP_SQPOLL);
Pavel Begunkova6b8cadc2021-02-18 18:29:41 +00007288
Jens Axboe6c271ce2019-01-10 11:22:30 -07007289 /*
7290 * If we already have a head request, queue this one for async
7291 * submittal once the head completes. If we don't have a head but
7292 * IOSQE_IO_LINK is set in the sqe, start a new head. This one will be
7293 * submitted sync once the chain is complete. If none of those
7294 * conditions are true (normal request), then just queue it.
7295 */
7296 if (link->head) {
7297 struct io_kiocb *head = link->head;
7298
Hao Xua8295b92021-08-27 17:46:09 +08007299 if (!(req->flags & REQ_F_FAIL)) {
7300 ret = io_req_prep_async(req);
7301 if (unlikely(ret)) {
7302 req_fail_link_node(req, ret);
7303 if (!(head->flags & REQ_F_FAIL))
7304 req_fail_link_node(head, -ECANCELED);
7305 }
7306 }
Jens Axboe6c271ce2019-01-10 11:22:30 -07007307 trace_io_uring_link(ctx, req, head);
7308 link->last->link = req;
7309 link->last = req;
7310
7311 /* last request of a link, enqueue the link */
7312 if (!(req->flags & (REQ_F_LINK | REQ_F_HARDLINK))) {
7313 link->head = NULL;
Pavel Begunkov5e159202021-06-14 23:37:26 +01007314 io_queue_sqe(head);
Jens Axboe2b188cc2019-01-07 10:46:33 -07007315 }
Jackie Liu4fe2c962019-09-09 20:50:40 +08007316 } else {
Jens Axboe2b188cc2019-01-07 10:46:33 -07007317 if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) {
Jackie Liu4fe2c962019-09-09 20:50:40 +08007318 link->head = req;
7319 link->last = req;
7320 } else {
Pavel Begunkovbe7053b2021-02-18 18:29:45 +00007321 io_queue_sqe(req);
Jackie Liu4fe2c962019-09-09 20:50:40 +08007322 }
7323 }
7324
7325 return 0;
7326}
7327
7328/*
7329 * Batched submission is done, ensure local IO is flushed out.
7330 */
7331static void io_submit_state_end(struct io_submit_state *state,
7332 struct io_ring_ctx *ctx)
Pavel Begunkov1b4a51b2019-11-21 11:54:28 +03007333{
Pavel Begunkova1ab7b32021-02-18 18:29:42 +00007334 if (state->link.head)
Pavel Begunkovde59bc12021-02-18 18:29:47 +00007335 io_queue_sqe(state->link.head);
Pavel Begunkovcd0ca2e2021-08-09 20:18:11 +01007336 if (state->compl_nr)
Pavel Begunkov2a2758f2021-06-17 18:14:00 +01007337 io_submit_flush_completions(ctx);
Jackie Liua197f662019-11-08 08:09:12 -07007338 if (state->plug_started)
Pavel Begunkov32fe5252019-12-17 22:26:58 +03007339 blk_finish_plug(&state->plug);
Jens Axboe9e645e112019-05-10 16:07:28 -06007340}
Pavel Begunkov32fe5252019-12-17 22:26:58 +03007341
Jens Axboe9e645e112019-05-10 16:07:28 -06007342/*
7343 * Start submission side cache.
Pavel Begunkov32fe5252019-12-17 22:26:58 +03007344 */
Jens Axboe9e645e112019-05-10 16:07:28 -06007345static void io_submit_state_start(struct io_submit_state *state,
Pavel Begunkov196be952019-11-07 01:41:06 +03007346 unsigned int max_ios)
Jens Axboe9e645e112019-05-10 16:07:28 -06007347{
7348 state->plug_started = false;
Jens Axboebcda7ba2020-02-23 16:42:51 -07007349 state->ios_left = max_ios;
Pavel Begunkova1ab7b32021-02-18 18:29:42 +00007350 /* set only head, no need to init link_last in advance */
7351 state->link.head = NULL;
Jens Axboe75c6a032020-01-28 10:15:23 -07007352}
7353
Jens Axboe193155c2020-02-22 23:22:19 -07007354static void io_commit_sqring(struct io_ring_ctx *ctx)
7355{
Jens Axboe75c6a032020-01-28 10:15:23 -07007356 struct io_rings *rings = ctx->rings;
7357
7358 /*
Jens Axboe193155c2020-02-22 23:22:19 -07007359 * Ensure any loads from the SQEs are done at this point,
Jens Axboe75c6a032020-01-28 10:15:23 -07007360 * since once we write the new head, the application could
7361 * write new data to them.
Pavel Begunkov6b47ee62020-01-18 20:22:41 +03007362 */
Pavel Begunkov8da11c12020-02-24 11:32:44 +03007363 smp_store_release(&rings->sq.head, ctx->cached_sq_head);
Jens Axboebcda7ba2020-02-23 16:42:51 -07007364}
7365
Jens Axboe9e645e112019-05-10 16:07:28 -06007366/*
Fam Zhengdd9ae8a2021-06-04 17:42:56 +01007367 * Fetch an sqe, if one is available. Note this returns a pointer to memory
Jens Axboe9e645e112019-05-10 16:07:28 -06007368 * that is mapped by userspace. This means that care needs to be taken to
7369 * ensure that reads are stable, as we cannot rely on userspace always
Jens Axboe78e19bb2019-11-06 15:21:34 -07007370 * being a good citizen. If members of the sqe are validated and then later
7371 * used, it's important that those reads are done through READ_ONCE() to
Pavel Begunkov2e6e1fd2019-12-05 16:15:45 +03007372 * prevent a re-load down the line.
Jens Axboe9e645e112019-05-10 16:07:28 -06007373 */
7374static const struct io_uring_sqe *io_get_sqe(struct io_ring_ctx *ctx)
Jens Axboe9e645e112019-05-10 16:07:28 -06007375{
Pavel Begunkovea5ab3b2021-05-16 22:58:09 +01007376 unsigned head, mask = ctx->sq_entries - 1;
Pavel Begunkov17d3aeb2021-06-14 23:37:23 +01007377 unsigned sq_idx = ctx->cached_sq_head++ & mask;
Jens Axboe9e645e112019-05-10 16:07:28 -06007378
7379 /*
7380 * The cached sq head (or cq tail) serves two purposes:
7381 *
7382 * 1) allows us to batch the cost of updating the user visible
Pavel Begunkov9d763772019-12-17 02:22:07 +03007383 * head updates.
Jens Axboe9e645e112019-05-10 16:07:28 -06007384 * 2) allows the kernel side to track the head on its own, even
Pavel Begunkov8cdf2192020-01-25 00:40:24 +03007385 * though the application is the one updating it.
7386 */
Pavel Begunkov17d3aeb2021-06-14 23:37:23 +01007387 head = READ_ONCE(ctx->sq_array[sq_idx]);
Pavel Begunkov8cdf2192020-01-25 00:40:24 +03007388 if (likely(head < ctx->sq_entries))
7389 return &ctx->sq_sqes[head];
7390
7391 /* drop invalid entries */
Pavel Begunkov15641e42021-06-14 23:37:24 +01007392 ctx->cq_extra--;
7393 WRITE_ONCE(ctx->rings->sq_dropped,
7394 READ_ONCE(ctx->rings->sq_dropped) + 1);
Pavel Begunkov711be032020-01-17 03:57:59 +03007395 return NULL;
7396}
Jens Axboeb7bb4f72019-12-15 22:13:43 -07007397
Jens Axboe0f212202020-09-13 13:09:39 -06007398static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr)
Pavel Begunkov282cdc82021-08-09 13:04:10 +01007399 __must_hold(&ctx->uring_lock)
Jens Axboe6c271ce2019-01-10 11:22:30 -07007400{
Pavel Begunkov46c4e162021-02-18 18:29:37 +00007401 int submitted = 0;
Jens Axboe6c271ce2019-01-10 11:22:30 -07007402
Pavel Begunkovee7d46d2019-12-30 21:24:45 +03007403 /* make sure SQ entry isn't read before tail */
7404 nr = min3(nr, ctx->sq_entries, io_sqring_entries(ctx));
Pavel Begunkov2b85edf2019-12-28 14:13:03 +03007405 if (!percpu_ref_tryget_many(&ctx->refs, nr))
7406 return -EAGAIN;
Pavel Begunkov9a108672021-08-27 11:55:01 +01007407 io_get_task_refs(nr);
Jens Axboe6c271ce2019-01-10 11:22:30 -07007408
Pavel Begunkovba88ff12021-02-10 00:03:11 +00007409 io_submit_state_start(&ctx->submit_state, nr);
Pavel Begunkov46c4e162021-02-18 18:29:37 +00007410 while (submitted < nr) {
Jens Axboe3529d8c2019-12-19 18:24:38 -07007411 const struct io_uring_sqe *sqe;
Pavel Begunkov196be952019-11-07 01:41:06 +03007412 struct io_kiocb *req;
Pavel Begunkovfb5ccc92019-10-25 12:31:30 +03007413
Pavel Begunkov258b29a2021-02-10 00:03:10 +00007414 req = io_alloc_req(ctx);
Pavel Begunkov196be952019-11-07 01:41:06 +03007415 if (unlikely(!req)) {
7416 if (!submitted)
7417 submitted = -EAGAIN;
Pavel Begunkovfb5ccc92019-10-25 12:31:30 +03007418 break;
Jens Axboe9e645e112019-05-10 16:07:28 -06007419 }
Pavel Begunkov4fccfcb2021-02-12 11:55:17 +00007420 sqe = io_get_sqe(ctx);
7421 if (unlikely(!sqe)) {
Hao Xu0c6e1d72021-08-26 01:58:56 +08007422 list_add(&req->inflight_entry, &ctx->submit_state.free_list);
Pavel Begunkov4fccfcb2021-02-12 11:55:17 +00007423 break;
7424 }
Jens Axboed3656342019-12-18 09:50:26 -07007425 /* will complete beyond this point, count as submitted */
7426 submitted++;
Pavel Begunkova1ab7b32021-02-18 18:29:42 +00007427 if (io_submit_sqe(ctx, req, sqe))
Jens Axboed3656342019-12-18 09:50:26 -07007428 break;
Jens Axboe6c271ce2019-01-10 11:22:30 -07007429 }
7430
Pavel Begunkov9466f432020-01-25 22:34:01 +03007431 if (unlikely(submitted != nr)) {
7432 int ref_used = (submitted == -EAGAIN) ? 0 : submitted;
Jens Axboed8a6df12020-10-15 16:24:45 -06007433 int unused = nr - ref_used;
Pavel Begunkov9466f432020-01-25 22:34:01 +03007434
Pavel Begunkov09899b12021-06-14 02:36:22 +01007435 current->io_uring->cached_refs += unused;
Jens Axboed8a6df12020-10-15 16:24:45 -06007436 percpu_ref_put_many(&ctx->refs, unused);
Pavel Begunkov9466f432020-01-25 22:34:01 +03007437 }
Jens Axboe6c271ce2019-01-10 11:22:30 -07007438
Pavel Begunkova1ab7b32021-02-18 18:29:42 +00007439 io_submit_state_end(&ctx->submit_state, ctx);
Pavel Begunkovae9428c2019-11-06 00:22:14 +03007440 /* Commit SQ ring head once we've consumed and submitted all SQEs */
7441 io_commit_sqring(ctx);
7442
Jens Axboe6c271ce2019-01-10 11:22:30 -07007443 return submitted;
7444}
7445
Pavel Begunkove4b6d902021-05-16 22:58:00 +01007446static inline bool io_sqd_events_pending(struct io_sq_data *sqd)
7447{
7448 return READ_ONCE(sqd->state);
7449}
7450
Xiaoguang Wang23b36282020-07-23 20:57:24 +08007451static inline void io_ring_set_wakeup_flag(struct io_ring_ctx *ctx)
7452{
7453 /* Tell userspace we may need a wakeup call */
Jens Axboe79ebeae2021-08-10 15:18:27 -06007454 spin_lock(&ctx->completion_lock);
Nadav Amit20c0b382021-08-07 17:13:42 -07007455 WRITE_ONCE(ctx->rings->sq_flags,
7456 ctx->rings->sq_flags | IORING_SQ_NEED_WAKEUP);
Jens Axboe79ebeae2021-08-10 15:18:27 -06007457 spin_unlock(&ctx->completion_lock);
Xiaoguang Wang23b36282020-07-23 20:57:24 +08007458}
7459
7460static inline void io_ring_clear_wakeup_flag(struct io_ring_ctx *ctx)
7461{
Jens Axboe79ebeae2021-08-10 15:18:27 -06007462 spin_lock(&ctx->completion_lock);
Nadav Amit20c0b382021-08-07 17:13:42 -07007463 WRITE_ONCE(ctx->rings->sq_flags,
7464 ctx->rings->sq_flags & ~IORING_SQ_NEED_WAKEUP);
Jens Axboe79ebeae2021-08-10 15:18:27 -06007465 spin_unlock(&ctx->completion_lock);
Xiaoguang Wang23b36282020-07-23 20:57:24 +08007466}
7467
Xiaoguang Wang08369242020-11-03 14:15:59 +08007468static int __io_sq_thread(struct io_ring_ctx *ctx, bool cap_entries)
Jens Axboe6c271ce2019-01-10 11:22:30 -07007469{
Jens Axboec8d1ba52020-09-14 11:07:26 -06007470 unsigned int to_submit;
Xiaoguang Wangbdcd3ea2020-02-25 22:12:08 +08007471 int ret = 0;
Jens Axboe6c271ce2019-01-10 11:22:30 -07007472
Jens Axboec8d1ba52020-09-14 11:07:26 -06007473 to_submit = io_sqring_entries(ctx);
Jens Axboee95eee22020-09-08 09:11:32 -06007474 /* if we're handling multiple rings, cap submit size for fairness */
Olivier Langlois4ce8ad92021-06-23 11:50:18 -07007475 if (cap_entries && to_submit > IORING_SQPOLL_CAP_ENTRIES_VALUE)
7476 to_submit = IORING_SQPOLL_CAP_ENTRIES_VALUE;
Jens Axboee95eee22020-09-08 09:11:32 -06007477
Xiaoguang Wang906a3c62020-11-12 14:56:00 +08007478 if (!list_empty(&ctx->iopoll_list) || to_submit) {
7479 unsigned nr_events = 0;
Pavel Begunkov948e1942021-06-24 15:09:55 +01007480 const struct cred *creds = NULL;
7481
7482 if (ctx->sq_creds != current_cred())
7483 creds = override_creds(ctx->sq_creds);
Xiaoguang Wang906a3c62020-11-12 14:56:00 +08007484
Xiaoguang Wang08369242020-11-03 14:15:59 +08007485 mutex_lock(&ctx->uring_lock);
Xiaoguang Wang906a3c62020-11-12 14:56:00 +08007486 if (!list_empty(&ctx->iopoll_list))
Pavel Begunkova8576af2021-08-15 10:40:21 +01007487 io_do_iopoll(ctx, &nr_events, 0);
Xiaoguang Wang906a3c62020-11-12 14:56:00 +08007488
Pavel Begunkov3b763ba2021-04-18 14:52:08 +01007489 /*
7490 * Don't submit if refs are dying, good for io_uring_register(),
7491 * but also it is relied upon by io_ring_exit_work()
7492 */
Pavel Begunkov0298ef92021-03-08 13:20:57 +00007493 if (to_submit && likely(!percpu_ref_is_dying(&ctx->refs)) &&
7494 !(ctx->flags & IORING_SETUP_R_DISABLED))
Xiaoguang Wang08369242020-11-03 14:15:59 +08007495 ret = io_submit_sqes(ctx, to_submit);
7496 mutex_unlock(&ctx->uring_lock);
Jens Axboe90554202020-09-03 12:12:41 -06007497
Pavel Begunkovacfb3812021-05-16 22:58:03 +01007498 if (to_submit && wq_has_sleeper(&ctx->sqo_sq_wait))
7499 wake_up(&ctx->sqo_sq_wait);
Pavel Begunkov948e1942021-06-24 15:09:55 +01007500 if (creds)
7501 revert_creds(creds);
Pavel Begunkovacfb3812021-05-16 22:58:03 +01007502 }
Jens Axboe90554202020-09-03 12:12:41 -06007503
Xiaoguang Wang08369242020-11-03 14:15:59 +08007504 return ret;
7505}
7506
7507static void io_sqd_update_thread_idle(struct io_sq_data *sqd)
7508{
7509 struct io_ring_ctx *ctx;
7510 unsigned sq_thread_idle = 0;
7511
Pavel Begunkovc9dca272021-03-10 13:13:55 +00007512 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
7513 sq_thread_idle = max(sq_thread_idle, ctx->sq_thread_idle);
Xiaoguang Wang08369242020-11-03 14:15:59 +08007514 sqd->sq_thread_idle = sq_thread_idle;
Jens Axboec8d1ba52020-09-14 11:07:26 -06007515}
7516
Pavel Begunkove4b6d902021-05-16 22:58:00 +01007517static bool io_sqd_handle_event(struct io_sq_data *sqd)
7518{
7519 bool did_sig = false;
7520 struct ksignal ksig;
7521
7522 if (test_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state) ||
7523 signal_pending(current)) {
7524 mutex_unlock(&sqd->lock);
7525 if (signal_pending(current))
7526 did_sig = get_signal(&ksig);
7527 cond_resched();
7528 mutex_lock(&sqd->lock);
7529 }
Pavel Begunkove4b6d902021-05-16 22:58:00 +01007530 return did_sig || test_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state);
7531}
7532
Jens Axboe6c271ce2019-01-10 11:22:30 -07007533static int io_sq_thread(void *data)
7534{
Jens Axboe69fb2132020-09-14 11:16:23 -06007535 struct io_sq_data *sqd = data;
7536 struct io_ring_ctx *ctx;
Xiaoguang Wanga0d92052020-11-12 14:55:59 +08007537 unsigned long timeout = 0;
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007538 char buf[TASK_COMM_LEN];
Xiaoguang Wang08369242020-11-03 14:15:59 +08007539 DEFINE_WAIT(wait);
Jens Axboe6c271ce2019-01-10 11:22:30 -07007540
Pavel Begunkov696ee882021-04-01 09:55:04 +01007541 snprintf(buf, sizeof(buf), "iou-sqp-%d", sqd->task_pid);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007542 set_task_comm(current, buf);
Jens Axboe28cea78a2020-09-14 10:51:17 -06007543
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007544 if (sqd->sq_cpu != -1)
7545 set_cpus_allowed_ptr(current, cpumask_of(sqd->sq_cpu));
7546 else
7547 set_cpus_allowed_ptr(current, cpu_online_mask);
7548 current->flags |= PF_NO_SETAFFINITY;
7549
Pavel Begunkov09a6f4e2021-03-14 20:57:10 +00007550 mutex_lock(&sqd->lock);
Pavel Begunkove4b6d902021-05-16 22:58:00 +01007551 while (1) {
Pavel Begunkov1a924a82021-06-24 15:09:56 +01007552 bool cap_entries, sqt_spin = false;
Jens Axboec1edbf52019-11-10 16:56:04 -07007553
Pavel Begunkove4b6d902021-05-16 22:58:00 +01007554 if (io_sqd_events_pending(sqd) || signal_pending(current)) {
7555 if (io_sqd_handle_event(sqd))
Pavel Begunkovc7d95612021-04-13 11:43:00 +01007556 break;
Xiaoguang Wang08369242020-11-03 14:15:59 +08007557 timeout = jiffies + sqd->sq_thread_idle;
7558 }
Pavel Begunkove4b6d902021-05-16 22:58:00 +01007559
Jens Axboee95eee22020-09-08 09:11:32 -06007560 cap_entries = !list_is_singular(&sqd->ctx_list);
Jens Axboe69fb2132020-09-14 11:16:23 -06007561 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) {
Pavel Begunkov948e1942021-06-24 15:09:55 +01007562 int ret = __io_sq_thread(ctx, cap_entries);
Stefan Metzmacher7c30f36a2021-03-07 11:54:28 +01007563
Xiaoguang Wang08369242020-11-03 14:15:59 +08007564 if (!sqt_spin && (ret > 0 || !list_empty(&ctx->iopoll_list)))
7565 sqt_spin = true;
Jens Axboe6c271ce2019-01-10 11:22:30 -07007566 }
Pavel Begunkovdd432ea52021-06-26 21:40:45 +01007567 if (io_run_task_work())
7568 sqt_spin = true;
Jens Axboe6c271ce2019-01-10 11:22:30 -07007569
Xiaoguang Wang08369242020-11-03 14:15:59 +08007570 if (sqt_spin || !time_after(jiffies, timeout)) {
Jens Axboec8d1ba52020-09-14 11:07:26 -06007571 cond_resched();
Xiaoguang Wang08369242020-11-03 14:15:59 +08007572 if (sqt_spin)
7573 timeout = jiffies + sqd->sq_thread_idle;
7574 continue;
7575 }
7576
Xiaoguang Wang08369242020-11-03 14:15:59 +08007577 prepare_to_wait(&sqd->wait, &wait, TASK_INTERRUPTIBLE);
Pavel Begunkovdd432ea52021-06-26 21:40:45 +01007578 if (!io_sqd_events_pending(sqd) && !current->task_works) {
Pavel Begunkov1a924a82021-06-24 15:09:56 +01007579 bool needs_sched = true;
7580
Hao Xu724cb4f2021-04-21 23:19:11 +08007581 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) {
Pavel Begunkovaaa9f0f2021-05-16 22:58:01 +01007582 io_ring_set_wakeup_flag(ctx);
7583
Hao Xu724cb4f2021-04-21 23:19:11 +08007584 if ((ctx->flags & IORING_SETUP_IOPOLL) &&
7585 !list_empty_careful(&ctx->iopoll_list)) {
7586 needs_sched = false;
7587 break;
7588 }
7589 if (io_sqring_entries(ctx)) {
7590 needs_sched = false;
7591 break;
7592 }
7593 }
7594
7595 if (needs_sched) {
7596 mutex_unlock(&sqd->lock);
7597 schedule();
7598 mutex_lock(&sqd->lock);
7599 }
Jens Axboe69fb2132020-09-14 11:16:23 -06007600 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
7601 io_ring_clear_wakeup_flag(ctx);
Jens Axboe6c271ce2019-01-10 11:22:30 -07007602 }
Xiaoguang Wang08369242020-11-03 14:15:59 +08007603
7604 finish_wait(&sqd->wait, &wait);
7605 timeout = jiffies + sqd->sq_thread_idle;
Jens Axboe6c271ce2019-01-10 11:22:30 -07007606 }
7607
Pavel Begunkov78cc6872021-06-14 02:36:23 +01007608 io_uring_cancel_generic(true, sqd);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007609 sqd->thread = NULL;
Jens Axboe05962f92021-03-06 13:58:48 -07007610 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
Jens Axboe5f3f26f2021-02-25 10:17:46 -07007611 io_ring_set_wakeup_flag(ctx);
Pavel Begunkov521d6a72021-03-11 23:29:38 +00007612 io_run_task_work();
Pavel Begunkov734551d2021-04-18 14:52:09 +01007613 mutex_unlock(&sqd->lock);
7614
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007615 complete(&sqd->exited);
7616 do_exit(0);
Jens Axboe6c271ce2019-01-10 11:22:30 -07007617}
7618
Jens Axboebda52162019-09-24 13:47:15 -06007619struct io_wait_queue {
7620 struct wait_queue_entry wq;
7621 struct io_ring_ctx *ctx;
Jens Axboe5fd46172021-08-06 14:04:31 -06007622 unsigned cq_tail;
Jens Axboebda52162019-09-24 13:47:15 -06007623 unsigned nr_timeouts;
7624};
7625
Pavel Begunkov6c503152021-01-04 20:36:36 +00007626static inline bool io_should_wake(struct io_wait_queue *iowq)
Jens Axboebda52162019-09-24 13:47:15 -06007627{
7628 struct io_ring_ctx *ctx = iowq->ctx;
Jens Axboe5fd46172021-08-06 14:04:31 -06007629 int dist = ctx->cached_cq_tail - (int) iowq->cq_tail;
Jens Axboebda52162019-09-24 13:47:15 -06007630
7631 /*
Brian Gianforcarod195a662019-12-13 03:09:50 -08007632 * Wake up if we have enough events, or if a timeout occurred since we
Jens Axboebda52162019-09-24 13:47:15 -06007633 * started waiting. For timeouts, we always want to return to userspace,
7634 * regardless of event count.
7635 */
Jens Axboe5fd46172021-08-06 14:04:31 -06007636 return dist >= 0 || atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts;
Jens Axboebda52162019-09-24 13:47:15 -06007637}
7638
7639static int io_wake_function(struct wait_queue_entry *curr, unsigned int mode,
7640 int wake_flags, void *key)
7641{
7642 struct io_wait_queue *iowq = container_of(curr, struct io_wait_queue,
7643 wq);
7644
Pavel Begunkov6c503152021-01-04 20:36:36 +00007645 /*
7646 * Cannot safely flush overflowed CQEs from here, ensure we wake up
7647 * the task, and the next invocation will do it.
7648 */
Pavel Begunkov5ed7a372021-06-14 23:37:27 +01007649 if (io_should_wake(iowq) || test_bit(0, &iowq->ctx->check_cq_overflow))
Pavel Begunkov6c503152021-01-04 20:36:36 +00007650 return autoremove_wake_function(curr, mode, wake_flags, key);
7651 return -1;
Jens Axboebda52162019-09-24 13:47:15 -06007652}
7653
Jens Axboeaf9c1a42020-09-24 13:32:18 -06007654static int io_run_task_work_sig(void)
7655{
7656 if (io_run_task_work())
7657 return 1;
7658 if (!signal_pending(current))
7659 return 0;
Jens Axboe0b8cfa92021-03-21 14:16:08 -06007660 if (test_thread_flag(TIF_NOTIFY_SIGNAL))
Jens Axboe792ee0f62020-10-22 20:17:18 -06007661 return -ERESTARTSYS;
Jens Axboeaf9c1a42020-09-24 13:32:18 -06007662 return -EINTR;
7663}
7664
Pavel Begunkoveeb60b92021-02-04 13:51:58 +00007665/* when returns >0, the caller should retry */
7666static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
7667 struct io_wait_queue *iowq,
Pavel Begunkovc3222fd2023-01-05 10:49:15 +00007668 ktime_t *timeout)
Pavel Begunkoveeb60b92021-02-04 13:51:58 +00007669{
7670 int ret;
7671
7672 /* make sure we run task_work before checking for signals */
7673 ret = io_run_task_work_sig();
7674 if (ret || io_should_wake(iowq))
7675 return ret;
7676 /* let the caller flush overflows, retry */
Pavel Begunkov5ed7a372021-06-14 23:37:27 +01007677 if (test_bit(0, &ctx->check_cq_overflow))
Pavel Begunkoveeb60b92021-02-04 13:51:58 +00007678 return 1;
7679
Pavel Begunkovc3222fd2023-01-05 10:49:15 +00007680 if (!schedule_hrtimeout(timeout, HRTIMER_MODE_ABS))
Jens Axboe7c834372022-02-21 05:49:30 -07007681 return -ETIME;
7682 return 1;
Pavel Begunkoveeb60b92021-02-04 13:51:58 +00007683}
7684
Jens Axboe2b188cc2019-01-07 10:46:33 -07007685/*
7686 * Wait until events become available, if we don't already have some. The
7687 * application must reap them itself, as they reside on the shared cq ring.
7688 */
7689static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
Hao Xuc73ebb62020-11-03 10:54:37 +08007690 const sigset_t __user *sig, size_t sigsz,
7691 struct __kernel_timespec __user *uts)
Jens Axboe2b188cc2019-01-07 10:46:33 -07007692{
Pavel Begunkov902910992021-08-09 09:07:32 -06007693 struct io_wait_queue iowq;
Hristo Venev75b28af2019-08-26 17:23:46 +00007694 struct io_rings *rings = ctx->rings;
Jens Axboe7c834372022-02-21 05:49:30 -07007695 ktime_t timeout = KTIME_MAX;
Pavel Begunkovc1d5a222021-02-04 13:51:57 +00007696 int ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07007697
Jens Axboeb41e9852020-02-17 09:52:41 -07007698 do {
Pavel Begunkov90f67362021-08-09 20:18:12 +01007699 io_cqring_overflow_flush(ctx);
Pavel Begunkov6c503152021-01-04 20:36:36 +00007700 if (io_cqring_events(ctx) >= min_events)
Jens Axboeb41e9852020-02-17 09:52:41 -07007701 return 0;
Jens Axboe4c6e2772020-07-01 11:29:10 -06007702 if (!io_run_task_work())
Jens Axboeb41e9852020-02-17 09:52:41 -07007703 break;
Jens Axboeb41e9852020-02-17 09:52:41 -07007704 } while (1);
Jens Axboe2b188cc2019-01-07 10:46:33 -07007705
Xiaoguang Wang44df58d2021-09-14 22:38:52 +08007706 if (uts) {
7707 struct timespec64 ts;
7708
7709 if (get_timespec64(&ts, uts))
7710 return -EFAULT;
Jens Axboe7c834372022-02-21 05:49:30 -07007711 timeout = ktime_add_ns(timespec64_to_ktime(ts), ktime_get_ns());
Xiaoguang Wang44df58d2021-09-14 22:38:52 +08007712 }
7713
Jens Axboe2b188cc2019-01-07 10:46:33 -07007714 if (sig) {
Arnd Bergmann9e75ad52019-03-25 15:34:53 +01007715#ifdef CONFIG_COMPAT
7716 if (in_compat_syscall())
7717 ret = set_compat_user_sigmask((const compat_sigset_t __user *)sig,
Oleg Nesterovb7724342019-07-16 16:29:53 -07007718 sigsz);
Arnd Bergmann9e75ad52019-03-25 15:34:53 +01007719 else
7720#endif
Oleg Nesterovb7724342019-07-16 16:29:53 -07007721 ret = set_user_sigmask(sig, sigsz);
Arnd Bergmann9e75ad52019-03-25 15:34:53 +01007722
Jens Axboe2b188cc2019-01-07 10:46:33 -07007723 if (ret)
7724 return ret;
7725 }
7726
Pavel Begunkov902910992021-08-09 09:07:32 -06007727 init_waitqueue_func_entry(&iowq.wq, io_wake_function);
7728 iowq.wq.private = current;
7729 INIT_LIST_HEAD(&iowq.wq.entry);
7730 iowq.ctx = ctx;
Jens Axboebda52162019-09-24 13:47:15 -06007731 iowq.nr_timeouts = atomic_read(&ctx->cq_timeouts);
Jens Axboe5fd46172021-08-06 14:04:31 -06007732 iowq.cq_tail = READ_ONCE(ctx->rings->cq.head) + min_events;
Pavel Begunkov902910992021-08-09 09:07:32 -06007733
Dmitrii Dolgovc826bd72019-10-15 19:02:01 +02007734 trace_io_uring_cqring_wait(ctx, min_events);
Jens Axboebda52162019-09-24 13:47:15 -06007735 do {
Jens Axboeca0a2652021-03-04 17:15:48 -07007736 /* if we can't even flush overflow, don't wait for more */
Pavel Begunkov90f67362021-08-09 20:18:12 +01007737 if (!io_cqring_overflow_flush(ctx)) {
Jens Axboeca0a2652021-03-04 17:15:48 -07007738 ret = -EBUSY;
7739 break;
7740 }
Pavel Begunkov311997b2021-06-14 23:37:28 +01007741 prepare_to_wait_exclusive(&ctx->cq_wait, &iowq.wq,
Jens Axboebda52162019-09-24 13:47:15 -06007742 TASK_INTERRUPTIBLE);
Pavel Begunkovc3222fd2023-01-05 10:49:15 +00007743 ret = io_cqring_wait_schedule(ctx, &iowq, &timeout);
Pavel Begunkov311997b2021-06-14 23:37:28 +01007744 finish_wait(&ctx->cq_wait, &iowq.wq);
Jens Axboeca0a2652021-03-04 17:15:48 -07007745 cond_resched();
Pavel Begunkoveeb60b92021-02-04 13:51:58 +00007746 } while (ret > 0);
Jens Axboebda52162019-09-24 13:47:15 -06007747
Jens Axboeb7db41c2020-07-04 08:55:50 -06007748 restore_saved_sigmask_unless(ret == -EINTR);
Jens Axboe2b188cc2019-01-07 10:46:33 -07007749
Hristo Venev75b28af2019-08-26 17:23:46 +00007750 return READ_ONCE(rings->cq.head) == READ_ONCE(rings->cq.tail) ? ret : 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07007751}
7752
Pavel Begunkov9123c8f2021-06-14 02:36:20 +01007753static void io_free_page_table(void **table, size_t size)
Pavel Begunkov846a4ef2021-04-01 15:44:03 +01007754{
Pavel Begunkov9123c8f2021-06-14 02:36:20 +01007755 unsigned i, nr_tables = DIV_ROUND_UP(size, PAGE_SIZE);
Pavel Begunkov846a4ef2021-04-01 15:44:03 +01007756
7757 for (i = 0; i < nr_tables; i++)
Pavel Begunkov9123c8f2021-06-14 02:36:20 +01007758 kfree(table[i]);
7759 kfree(table);
7760}
7761
7762static void **io_alloc_page_table(size_t size)
7763{
7764 unsigned i, nr_tables = DIV_ROUND_UP(size, PAGE_SIZE);
7765 size_t init_size = size;
7766 void **table;
7767
Pavel Begunkov0bea96f2021-08-20 10:36:36 +01007768 table = kcalloc(nr_tables, sizeof(*table), GFP_KERNEL_ACCOUNT);
Pavel Begunkov9123c8f2021-06-14 02:36:20 +01007769 if (!table)
7770 return NULL;
7771
7772 for (i = 0; i < nr_tables; i++) {
Pavel Begunkov27f6b312021-06-15 13:20:13 +01007773 unsigned int this_size = min_t(size_t, size, PAGE_SIZE);
Pavel Begunkov9123c8f2021-06-14 02:36:20 +01007774
Pavel Begunkov0bea96f2021-08-20 10:36:36 +01007775 table[i] = kzalloc(this_size, GFP_KERNEL_ACCOUNT);
Pavel Begunkov9123c8f2021-06-14 02:36:20 +01007776 if (!table[i]) {
7777 io_free_page_table(table, init_size);
7778 return NULL;
7779 }
7780 size -= this_size;
7781 }
7782 return table;
Pavel Begunkov846a4ef2021-04-01 15:44:03 +01007783}
7784
Pavel Begunkov28a9fe22021-04-01 15:43:47 +01007785static void io_rsrc_node_destroy(struct io_rsrc_node *ref_node)
7786{
7787 percpu_ref_exit(&ref_node->refs);
7788 kfree(ref_node);
7789}
7790
Pavel Begunkovb9bd2be2021-08-09 09:09:47 -06007791static void io_rsrc_node_ref_zero(struct percpu_ref *ref)
7792{
7793 struct io_rsrc_node *node = container_of(ref, struct io_rsrc_node, refs);
7794 struct io_ring_ctx *ctx = node->rsrc_data->ctx;
7795 unsigned long flags;
7796 bool first_add = false;
Dylan Yudaken82cc3382022-01-21 04:38:56 -08007797 unsigned long delay = HZ;
Pavel Begunkovb9bd2be2021-08-09 09:09:47 -06007798
7799 spin_lock_irqsave(&ctx->rsrc_ref_lock, flags);
7800 node->done = true;
7801
Dylan Yudaken82cc3382022-01-21 04:38:56 -08007802 /* if we are mid-quiesce then do not delay */
7803 if (node->rsrc_data->quiesce)
7804 delay = 0;
7805
Pavel Begunkovb9bd2be2021-08-09 09:09:47 -06007806 while (!list_empty(&ctx->rsrc_ref_list)) {
7807 node = list_first_entry(&ctx->rsrc_ref_list,
7808 struct io_rsrc_node, node);
7809 /* recycle ref nodes in order */
7810 if (!node->done)
7811 break;
7812 list_del(&node->node);
7813 first_add |= llist_add(&node->llist, &ctx->rsrc_put_llist);
7814 }
7815 spin_unlock_irqrestore(&ctx->rsrc_ref_lock, flags);
7816
7817 if (first_add)
Dylan Yudaken82cc3382022-01-21 04:38:56 -08007818 mod_delayed_work(system_wq, &ctx->rsrc_put_work, delay);
Pavel Begunkovb9bd2be2021-08-09 09:09:47 -06007819}
7820
7821static struct io_rsrc_node *io_rsrc_node_alloc(struct io_ring_ctx *ctx)
7822{
7823 struct io_rsrc_node *ref_node;
7824
7825 ref_node = kzalloc(sizeof(*ref_node), GFP_KERNEL);
7826 if (!ref_node)
7827 return NULL;
7828
7829 if (percpu_ref_init(&ref_node->refs, io_rsrc_node_ref_zero,
7830 0, GFP_KERNEL)) {
7831 kfree(ref_node);
7832 return NULL;
7833 }
7834 INIT_LIST_HEAD(&ref_node->node);
7835 INIT_LIST_HEAD(&ref_node->rsrc_list);
7836 ref_node->done = false;
7837 return ref_node;
7838}
7839
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01007840static void io_rsrc_node_switch(struct io_ring_ctx *ctx,
7841 struct io_rsrc_data *data_to_kill)
Pavel Begunkov1642b442020-12-30 21:34:14 +00007842{
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01007843 WARN_ON_ONCE(!ctx->rsrc_backup_node);
7844 WARN_ON_ONCE(data_to_kill && !ctx->rsrc_node);
Pavel Begunkov82fbcfa2021-04-01 15:43:43 +01007845
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01007846 if (data_to_kill) {
7847 struct io_rsrc_node *rsrc_node = ctx->rsrc_node;
Pavel Begunkov82fbcfa2021-04-01 15:43:43 +01007848
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01007849 rsrc_node->rsrc_data = data_to_kill;
Jens Axboe4956b9e2021-08-09 07:49:41 -06007850 spin_lock_irq(&ctx->rsrc_ref_lock);
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01007851 list_add_tail(&rsrc_node->node, &ctx->rsrc_ref_list);
Jens Axboe4956b9e2021-08-09 07:49:41 -06007852 spin_unlock_irq(&ctx->rsrc_ref_lock);
Pavel Begunkov82fbcfa2021-04-01 15:43:43 +01007853
Pavel Begunkov3e942492021-04-11 01:46:34 +01007854 atomic_inc(&data_to_kill->refs);
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01007855 percpu_ref_kill(&rsrc_node->refs);
7856 ctx->rsrc_node = NULL;
7857 }
7858
7859 if (!ctx->rsrc_node) {
7860 ctx->rsrc_node = ctx->rsrc_backup_node;
7861 ctx->rsrc_backup_node = NULL;
7862 }
Pavel Begunkov1642b442020-12-30 21:34:14 +00007863}
7864
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01007865static int io_rsrc_node_switch_start(struct io_ring_ctx *ctx)
Pavel Begunkov8dd03af2021-03-19 17:22:36 +00007866{
7867 if (ctx->rsrc_backup_node)
7868 return 0;
Pavel Begunkovb895c9a2021-04-01 15:43:40 +01007869 ctx->rsrc_backup_node = io_rsrc_node_alloc(ctx);
Pavel Begunkov8dd03af2021-03-19 17:22:36 +00007870 return ctx->rsrc_backup_node ? 0 : -ENOMEM;
7871}
7872
Pavel Begunkov40ae0ff2021-04-01 15:43:44 +01007873static int io_rsrc_ref_quiesce(struct io_rsrc_data *data, struct io_ring_ctx *ctx)
Hao Xu8bad28d2021-02-19 17:19:36 +08007874{
7875 int ret;
Xiaoguang Wang05589552020-03-31 14:05:18 +08007876
Pavel Begunkov215c3902021-04-01 15:43:48 +01007877 /* As we may drop ->uring_lock, other task may have started quiesce */
Hao Xu8bad28d2021-02-19 17:19:36 +08007878 if (data->quiesce)
7879 return -ENXIO;
7880
7881 data->quiesce = true;
Pavel Begunkov1ffc5422020-12-30 21:34:15 +00007882 do {
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01007883 ret = io_rsrc_node_switch_start(ctx);
Pavel Begunkov8dd03af2021-03-19 17:22:36 +00007884 if (ret)
Pavel Begunkovf2303b12021-02-20 18:03:49 +00007885 break;
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01007886 io_rsrc_node_switch(ctx, data);
7887
Pavel Begunkov3e942492021-04-11 01:46:34 +01007888 /* kill initial ref, already quiesced if zero */
7889 if (atomic_dec_and_test(&data->refs))
7890 break;
Jens Axboec018db42021-08-09 08:15:50 -06007891 mutex_unlock(&ctx->uring_lock);
Hao Xu8bad28d2021-02-19 17:19:36 +08007892 flush_delayed_work(&ctx->rsrc_put_work);
Pavel Begunkov1ffc5422020-12-30 21:34:15 +00007893 ret = wait_for_completion_interruptible(&data->done);
Jens Axboec018db42021-08-09 08:15:50 -06007894 if (!ret) {
7895 mutex_lock(&ctx->uring_lock);
Dylan Yudaken0d773aa2022-02-22 08:17:51 -08007896 if (atomic_read(&data->refs) > 0) {
7897 /*
7898 * it has been revived by another thread while
7899 * we were unlocked
7900 */
7901 mutex_unlock(&ctx->uring_lock);
7902 } else {
7903 break;
7904 }
Jens Axboec018db42021-08-09 08:15:50 -06007905 }
Jens Axboe05f3fb32019-12-09 11:22:50 -07007906
Pavel Begunkov3e942492021-04-11 01:46:34 +01007907 atomic_inc(&data->refs);
7908 /* wait for all works potentially completing data->done */
7909 flush_delayed_work(&ctx->rsrc_put_work);
Jens Axboecb5e1b82021-02-25 07:37:35 -07007910 reinit_completion(&data->done);
Pavel Begunkov8dd03af2021-03-19 17:22:36 +00007911
Hao Xu8bad28d2021-02-19 17:19:36 +08007912 ret = io_run_task_work_sig();
7913 mutex_lock(&ctx->uring_lock);
Pavel Begunkovf2303b12021-02-20 18:03:49 +00007914 } while (ret >= 0);
Hao Xu8bad28d2021-02-19 17:19:36 +08007915 data->quiesce = false;
7916
Hao Xu8bad28d2021-02-19 17:19:36 +08007917 return ret;
Bijan Mottahedehd7954b22021-01-15 17:37:50 +00007918}
7919
Pavel Begunkov2d091d62021-06-14 02:36:21 +01007920static u64 *io_get_tag_slot(struct io_rsrc_data *data, unsigned int idx)
7921{
7922 unsigned int off = idx & IO_RSRC_TAG_TABLE_MASK;
7923 unsigned int table_idx = idx >> IO_RSRC_TAG_TABLE_SHIFT;
7924
7925 return &data->tags[table_idx][off];
7926}
7927
Pavel Begunkov44b31f22021-04-25 14:32:16 +01007928static void io_rsrc_data_free(struct io_rsrc_data *data)
7929{
Pavel Begunkov2d091d62021-06-14 02:36:21 +01007930 size_t size = data->nr * sizeof(data->tags[0][0]);
7931
7932 if (data->tags)
7933 io_free_page_table((void **)data->tags, size);
Pavel Begunkov44b31f22021-04-25 14:32:16 +01007934 kfree(data);
7935}
7936
Pavel Begunkovd878c812021-06-14 02:36:18 +01007937static int io_rsrc_data_alloc(struct io_ring_ctx *ctx, rsrc_put_fn *do_put,
7938 u64 __user *utags, unsigned nr,
7939 struct io_rsrc_data **pdata)
Bijan Mottahedeh1ad555c2021-01-15 17:37:51 +00007940{
Pavel Begunkovb895c9a2021-04-01 15:43:40 +01007941 struct io_rsrc_data *data;
Pavel Begunkov2d091d62021-06-14 02:36:21 +01007942 int ret = -ENOMEM;
Pavel Begunkovd878c812021-06-14 02:36:18 +01007943 unsigned i;
Bijan Mottahedeh1ad555c2021-01-15 17:37:51 +00007944
7945 data = kzalloc(sizeof(*data), GFP_KERNEL);
7946 if (!data)
Pavel Begunkovd878c812021-06-14 02:36:18 +01007947 return -ENOMEM;
Pavel Begunkov2d091d62021-06-14 02:36:21 +01007948 data->tags = (u64 **)io_alloc_page_table(nr * sizeof(data->tags[0][0]));
Pavel Begunkovb60c8dc2021-04-25 14:32:18 +01007949 if (!data->tags) {
7950 kfree(data);
Pavel Begunkovd878c812021-06-14 02:36:18 +01007951 return -ENOMEM;
7952 }
Pavel Begunkov2d091d62021-06-14 02:36:21 +01007953
7954 data->nr = nr;
7955 data->ctx = ctx;
7956 data->do_put = do_put;
Pavel Begunkovd878c812021-06-14 02:36:18 +01007957 if (utags) {
Pavel Begunkov2d091d62021-06-14 02:36:21 +01007958 ret = -EFAULT;
Pavel Begunkovd878c812021-06-14 02:36:18 +01007959 for (i = 0; i < nr; i++) {
Colin Ian Kingfdd1dc32021-06-15 14:00:11 +01007960 u64 *tag_slot = io_get_tag_slot(data, i);
7961
7962 if (copy_from_user(tag_slot, &utags[i],
7963 sizeof(*tag_slot)))
Pavel Begunkov2d091d62021-06-14 02:36:21 +01007964 goto fail;
Pavel Begunkovd878c812021-06-14 02:36:18 +01007965 }
Pavel Begunkovb60c8dc2021-04-25 14:32:18 +01007966 }
7967
Pavel Begunkov3e942492021-04-11 01:46:34 +01007968 atomic_set(&data->refs, 1);
Bijan Mottahedeh1ad555c2021-01-15 17:37:51 +00007969 init_completion(&data->done);
Pavel Begunkovd878c812021-06-14 02:36:18 +01007970 *pdata = data;
7971 return 0;
Pavel Begunkov2d091d62021-06-14 02:36:21 +01007972fail:
7973 io_rsrc_data_free(data);
7974 return ret;
Bijan Mottahedeh1ad555c2021-01-15 17:37:51 +00007975}
7976
Pavel Begunkov9123c8f2021-06-14 02:36:20 +01007977static bool io_alloc_file_tables(struct io_file_table *table, unsigned nr_files)
7978{
Pavel Begunkov0bea96f2021-08-20 10:36:36 +01007979 table->files = kvcalloc(nr_files, sizeof(table->files[0]),
7980 GFP_KERNEL_ACCOUNT);
Pavel Begunkov9123c8f2021-06-14 02:36:20 +01007981 return !!table->files;
7982}
7983
Pavel Begunkov042b0d82021-08-09 13:04:01 +01007984static void io_free_file_tables(struct io_file_table *table)
Pavel Begunkov9123c8f2021-06-14 02:36:20 +01007985{
Pavel Begunkov042b0d82021-08-09 13:04:01 +01007986 kvfree(table->files);
Pavel Begunkov9123c8f2021-06-14 02:36:20 +01007987 table->files = NULL;
7988}
7989
Jens Axboe2b188cc2019-01-07 10:46:33 -07007990static void __io_sqe_files_unregister(struct io_ring_ctx *ctx)
7991{
7992#if defined(CONFIG_UNIX)
7993 if (ctx->ring_sock) {
7994 struct sock *sock = ctx->ring_sock->sk;
7995 struct sk_buff *skb;
7996
7997 while ((skb = skb_dequeue(&sock->sk_receive_queue)) != NULL)
7998 kfree_skb(skb);
7999 }
8000#else
8001 int i;
8002
8003 for (i = 0; i < ctx->nr_user_files; i++) {
8004 struct file *file;
8005
8006 file = io_file_from_index(ctx, i);
8007 if (file)
8008 fput(file);
8009 }
8010#endif
Pavel Begunkov042b0d82021-08-09 13:04:01 +01008011 io_free_file_tables(&ctx->file_table);
Pavel Begunkov44b31f22021-04-25 14:32:16 +01008012 io_rsrc_data_free(ctx->file_data);
Pavel Begunkovfff4db72021-04-25 14:32:15 +01008013 ctx->file_data = NULL;
8014 ctx->nr_user_files = 0;
Bijan Mottahedeh1ad555c2021-01-15 17:37:51 +00008015}
8016
Bijan Mottahedehd7954b22021-01-15 17:37:50 +00008017static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
8018{
Pavel Begunkovb1e7cad2022-06-13 06:32:44 +01008019 unsigned nr = ctx->nr_user_files;
Bijan Mottahedehd7954b22021-01-15 17:37:50 +00008020 int ret;
8021
Pavel Begunkov08480402021-04-13 02:58:38 +01008022 if (!ctx->file_data)
Bijan Mottahedehd7954b22021-01-15 17:37:50 +00008023 return -ENXIO;
Pavel Begunkovb1e7cad2022-06-13 06:32:44 +01008024
8025 /*
8026 * Quiesce may unlock ->uring_lock, and while it's not held
8027 * prevent new requests using the table.
8028 */
8029 ctx->nr_user_files = 0;
Pavel Begunkov08480402021-04-13 02:58:38 +01008030 ret = io_rsrc_ref_quiesce(ctx->file_data, ctx);
Pavel Begunkovb1e7cad2022-06-13 06:32:44 +01008031 ctx->nr_user_files = nr;
Pavel Begunkov08480402021-04-13 02:58:38 +01008032 if (!ret)
8033 __io_sqe_files_unregister(ctx);
8034 return ret;
Jens Axboe6b063142019-01-10 22:13:58 -07008035}
8036
Jens Axboe37d1e2e2021-02-17 21:03:43 -07008037static void io_sq_thread_unpark(struct io_sq_data *sqd)
Pavel Begunkov09a6f4e2021-03-14 20:57:10 +00008038 __releases(&sqd->lock)
Jens Axboe37d1e2e2021-02-17 21:03:43 -07008039{
Pavel Begunkov521d6a72021-03-11 23:29:38 +00008040 WARN_ON_ONCE(sqd->thread == current);
8041
Pavel Begunkov9e138a42021-03-14 20:57:12 +00008042 /*
8043 * Do the dance but not conditional clear_bit() because it'd race with
8044 * other threads incrementing park_pending and setting the bit.
8045 */
Jens Axboe37d1e2e2021-02-17 21:03:43 -07008046 clear_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state);
Pavel Begunkov9e138a42021-03-14 20:57:12 +00008047 if (atomic_dec_return(&sqd->park_pending))
8048 set_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state);
Pavel Begunkov09a6f4e2021-03-14 20:57:10 +00008049 mutex_unlock(&sqd->lock);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07008050}
8051
Jens Axboe86e0d672021-03-05 08:44:39 -07008052static void io_sq_thread_park(struct io_sq_data *sqd)
Pavel Begunkov09a6f4e2021-03-14 20:57:10 +00008053 __acquires(&sqd->lock)
Jens Axboe37d1e2e2021-02-17 21:03:43 -07008054{
Pavel Begunkov521d6a72021-03-11 23:29:38 +00008055 WARN_ON_ONCE(sqd->thread == current);
8056
Pavel Begunkov9e138a42021-03-14 20:57:12 +00008057 atomic_inc(&sqd->park_pending);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07008058 set_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state);
Pavel Begunkov09a6f4e2021-03-14 20:57:10 +00008059 mutex_lock(&sqd->lock);
Jens Axboe05962f92021-03-06 13:58:48 -07008060 if (sqd->thread)
Jens Axboe86e0d672021-03-05 08:44:39 -07008061 wake_up_process(sqd->thread);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07008062}
8063
8064static void io_sq_thread_stop(struct io_sq_data *sqd)
8065{
Pavel Begunkov521d6a72021-03-11 23:29:38 +00008066 WARN_ON_ONCE(sqd->thread == current);
Pavel Begunkov88885f62021-04-11 01:46:38 +01008067 WARN_ON_ONCE(test_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state));
Pavel Begunkov521d6a72021-03-11 23:29:38 +00008068
Jens Axboe05962f92021-03-06 13:58:48 -07008069 set_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state);
Pavel Begunkov88885f62021-04-11 01:46:38 +01008070 mutex_lock(&sqd->lock);
Jens Axboee8f98f242021-03-09 16:32:13 -07008071 if (sqd->thread)
8072 wake_up_process(sqd->thread);
Pavel Begunkov09a6f4e2021-03-14 20:57:10 +00008073 mutex_unlock(&sqd->lock);
Jens Axboe05962f92021-03-06 13:58:48 -07008074 wait_for_completion(&sqd->exited);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07008075}
8076
Jens Axboe534ca6d2020-09-02 13:52:19 -06008077static void io_put_sq_data(struct io_sq_data *sqd)
Jens Axboe6c271ce2019-01-10 11:22:30 -07008078{
Jens Axboe534ca6d2020-09-02 13:52:19 -06008079 if (refcount_dec_and_test(&sqd->refs)) {
Pavel Begunkov9e138a42021-03-14 20:57:12 +00008080 WARN_ON_ONCE(atomic_read(&sqd->park_pending));
8081
Jens Axboe37d1e2e2021-02-17 21:03:43 -07008082 io_sq_thread_stop(sqd);
8083 kfree(sqd);
8084 }
8085}
8086
8087static void io_sq_thread_finish(struct io_ring_ctx *ctx)
8088{
8089 struct io_sq_data *sqd = ctx->sq_data;
8090
8091 if (sqd) {
Jens Axboe05962f92021-03-06 13:58:48 -07008092 io_sq_thread_park(sqd);
Pavel Begunkov521d6a72021-03-11 23:29:38 +00008093 list_del_init(&ctx->sqd_list);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07008094 io_sqd_update_thread_idle(sqd);
Jens Axboe05962f92021-03-06 13:58:48 -07008095 io_sq_thread_unpark(sqd);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07008096
8097 io_put_sq_data(sqd);
8098 ctx->sq_data = NULL;
Jens Axboe534ca6d2020-09-02 13:52:19 -06008099 }
8100}
8101
Jens Axboeaa061652020-09-02 14:50:27 -06008102static struct io_sq_data *io_attach_sq_data(struct io_uring_params *p)
8103{
8104 struct io_ring_ctx *ctx_attach;
8105 struct io_sq_data *sqd;
8106 struct fd f;
8107
8108 f = fdget(p->wq_fd);
8109 if (!f.file)
8110 return ERR_PTR(-ENXIO);
8111 if (f.file->f_op != &io_uring_fops) {
8112 fdput(f);
8113 return ERR_PTR(-EINVAL);
8114 }
8115
8116 ctx_attach = f.file->private_data;
8117 sqd = ctx_attach->sq_data;
8118 if (!sqd) {
8119 fdput(f);
8120 return ERR_PTR(-EINVAL);
8121 }
Jens Axboe5c2469e2021-03-11 10:17:56 -07008122 if (sqd->task_tgid != current->tgid) {
8123 fdput(f);
8124 return ERR_PTR(-EPERM);
8125 }
Jens Axboeaa061652020-09-02 14:50:27 -06008126
8127 refcount_inc(&sqd->refs);
8128 fdput(f);
8129 return sqd;
8130}
8131
Pavel Begunkov26984fb2021-03-11 23:29:37 +00008132static struct io_sq_data *io_get_sq_data(struct io_uring_params *p,
8133 bool *attached)
Jens Axboe534ca6d2020-09-02 13:52:19 -06008134{
8135 struct io_sq_data *sqd;
8136
Pavel Begunkov26984fb2021-03-11 23:29:37 +00008137 *attached = false;
Jens Axboe5c2469e2021-03-11 10:17:56 -07008138 if (p->flags & IORING_SETUP_ATTACH_WQ) {
8139 sqd = io_attach_sq_data(p);
Pavel Begunkov26984fb2021-03-11 23:29:37 +00008140 if (!IS_ERR(sqd)) {
8141 *attached = true;
Jens Axboe5c2469e2021-03-11 10:17:56 -07008142 return sqd;
Pavel Begunkov26984fb2021-03-11 23:29:37 +00008143 }
Jens Axboe5c2469e2021-03-11 10:17:56 -07008144 /* fall through for EPERM case, setup new sqd/task */
8145 if (PTR_ERR(sqd) != -EPERM)
8146 return sqd;
8147 }
Jens Axboeaa061652020-09-02 14:50:27 -06008148
Jens Axboe534ca6d2020-09-02 13:52:19 -06008149 sqd = kzalloc(sizeof(*sqd), GFP_KERNEL);
8150 if (!sqd)
8151 return ERR_PTR(-ENOMEM);
8152
Pavel Begunkov9e138a42021-03-14 20:57:12 +00008153 atomic_set(&sqd->park_pending, 0);
Jens Axboe534ca6d2020-09-02 13:52:19 -06008154 refcount_set(&sqd->refs, 1);
Jens Axboe69fb2132020-09-14 11:16:23 -06008155 INIT_LIST_HEAD(&sqd->ctx_list);
Pavel Begunkov09a6f4e2021-03-14 20:57:10 +00008156 mutex_init(&sqd->lock);
Jens Axboe534ca6d2020-09-02 13:52:19 -06008157 init_waitqueue_head(&sqd->wait);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07008158 init_completion(&sqd->exited);
Jens Axboe534ca6d2020-09-02 13:52:19 -06008159 return sqd;
8160}
8161
Jens Axboe6b063142019-01-10 22:13:58 -07008162#if defined(CONFIG_UNIX)
Jens Axboe6b063142019-01-10 22:13:58 -07008163/*
8164 * Ensure the UNIX gc is aware of our file set, so we are certain that
8165 * the io_uring can be safely unregistered on process exit, even if we have
8166 * loops in the file referencing.
8167 */
8168static int __io_sqe_files_scm(struct io_ring_ctx *ctx, int nr, int offset)
8169{
8170 struct sock *sk = ctx->ring_sock->sk;
8171 struct scm_fp_list *fpl;
8172 struct sk_buff *skb;
Jens Axboe08a45172019-10-03 08:11:03 -06008173 int i, nr_files;
Jens Axboe6b063142019-01-10 22:13:58 -07008174
Jens Axboe6b063142019-01-10 22:13:58 -07008175 fpl = kzalloc(sizeof(*fpl), GFP_KERNEL);
8176 if (!fpl)
8177 return -ENOMEM;
8178
8179 skb = alloc_skb(0, GFP_KERNEL);
8180 if (!skb) {
8181 kfree(fpl);
8182 return -ENOMEM;
8183 }
8184
8185 skb->sk = sk;
Pavel Begunkov813d8fe2022-10-16 22:42:54 +01008186 skb->scm_io_uring = 1;
Jens Axboe6b063142019-01-10 22:13:58 -07008187
Jens Axboe08a45172019-10-03 08:11:03 -06008188 nr_files = 0;
Jens Axboe62e398b2021-02-21 16:19:37 -07008189 fpl->user = get_uid(current_user());
Jens Axboe6b063142019-01-10 22:13:58 -07008190 for (i = 0; i < nr; i++) {
Jens Axboe65e19f52019-10-26 07:20:21 -06008191 struct file *file = io_file_from_index(ctx, i + offset);
8192
8193 if (!file)
Jens Axboe08a45172019-10-03 08:11:03 -06008194 continue;
Jens Axboe65e19f52019-10-26 07:20:21 -06008195 fpl->fp[nr_files] = get_file(file);
Jens Axboe08a45172019-10-03 08:11:03 -06008196 unix_inflight(fpl->user, fpl->fp[nr_files]);
8197 nr_files++;
Jens Axboe6b063142019-01-10 22:13:58 -07008198 }
8199
Jens Axboe08a45172019-10-03 08:11:03 -06008200 if (nr_files) {
8201 fpl->max = SCM_MAX_FD;
8202 fpl->count = nr_files;
8203 UNIXCB(skb).fp = fpl;
Jens Axboe05f3fb32019-12-09 11:22:50 -07008204 skb->destructor = unix_destruct_scm;
Jens Axboe08a45172019-10-03 08:11:03 -06008205 refcount_add(skb->truesize, &sk->sk_wmem_alloc);
8206 skb_queue_head(&sk->sk_receive_queue, skb);
Jens Axboe6b063142019-01-10 22:13:58 -07008207
Pavel Begunkov285f5d72022-04-06 12:43:58 +01008208 for (i = 0; i < nr; i++) {
8209 struct file *file = io_file_from_index(ctx, i + offset);
8210
8211 if (file)
8212 fput(file);
8213 }
Jens Axboe08a45172019-10-03 08:11:03 -06008214 } else {
8215 kfree_skb(skb);
Pavel Begunkov0853bd62022-03-25 16:36:31 +00008216 free_uid(fpl->user);
Jens Axboe08a45172019-10-03 08:11:03 -06008217 kfree(fpl);
8218 }
Jens Axboe6b063142019-01-10 22:13:58 -07008219
8220 return 0;
8221}
8222
8223/*
8224 * If UNIX sockets are enabled, fd passing can cause a reference cycle which
8225 * causes regular reference counting to break down. We rely on the UNIX
8226 * garbage collection to take care of this problem for us.
8227 */
8228static int io_sqe_files_scm(struct io_ring_ctx *ctx)
8229{
8230 unsigned left, total;
8231 int ret = 0;
8232
8233 total = 0;
8234 left = ctx->nr_user_files;
8235 while (left) {
8236 unsigned this_files = min_t(unsigned, left, SCM_MAX_FD);
Jens Axboe6b063142019-01-10 22:13:58 -07008237
8238 ret = __io_sqe_files_scm(ctx, this_files, total);
8239 if (ret)
8240 break;
8241 left -= this_files;
8242 total += this_files;
8243 }
8244
8245 if (!ret)
8246 return 0;
8247
8248 while (total < ctx->nr_user_files) {
Jens Axboe65e19f52019-10-26 07:20:21 -06008249 struct file *file = io_file_from_index(ctx, total);
8250
8251 if (file)
8252 fput(file);
Jens Axboe6b063142019-01-10 22:13:58 -07008253 total++;
8254 }
8255
8256 return ret;
8257}
8258#else
8259static int io_sqe_files_scm(struct io_ring_ctx *ctx)
8260{
8261 return 0;
8262}
8263#endif
8264
Pavel Begunkov47e90392021-04-01 15:43:56 +01008265static void io_rsrc_file_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc)
Jens Axboec3a31e62019-10-03 13:59:56 -06008266{
Bijan Mottahedeh50238532021-01-15 17:37:45 +00008267 struct file *file = prsrc->file;
Jens Axboec3a31e62019-10-03 13:59:56 -06008268#if defined(CONFIG_UNIX)
Jens Axboec3a31e62019-10-03 13:59:56 -06008269 struct sock *sock = ctx->ring_sock->sk;
8270 struct sk_buff_head list, *head = &sock->sk_receive_queue;
8271 struct sk_buff *skb;
8272 int i;
8273
8274 __skb_queue_head_init(&list);
8275
8276 /*
8277 * Find the skb that holds this file in its SCM_RIGHTS. When found,
8278 * remove this entry and rearrange the file array.
8279 */
8280 skb = skb_dequeue(head);
8281 while (skb) {
8282 struct scm_fp_list *fp;
8283
8284 fp = UNIXCB(skb).fp;
8285 for (i = 0; i < fp->count; i++) {
8286 int left;
8287
8288 if (fp->fp[i] != file)
8289 continue;
8290
8291 unix_notinflight(fp->user, fp->fp[i]);
8292 left = fp->count - 1 - i;
8293 if (left) {
8294 memmove(&fp->fp[i], &fp->fp[i + 1],
8295 left * sizeof(struct file *));
8296 }
8297 fp->count--;
8298 if (!fp->count) {
8299 kfree_skb(skb);
8300 skb = NULL;
8301 } else {
8302 __skb_queue_tail(&list, skb);
8303 }
8304 fput(file);
8305 file = NULL;
8306 break;
8307 }
8308
8309 if (!file)
8310 break;
8311
8312 __skb_queue_tail(&list, skb);
8313
8314 skb = skb_dequeue(head);
8315 }
8316
8317 if (skb_peek(&list)) {
8318 spin_lock_irq(&head->lock);
8319 while ((skb = __skb_dequeue(&list)) != NULL)
8320 __skb_queue_tail(head, skb);
8321 spin_unlock_irq(&head->lock);
8322 }
8323#else
Jens Axboe05f3fb32019-12-09 11:22:50 -07008324 fput(file);
Jens Axboec3a31e62019-10-03 13:59:56 -06008325#endif
8326}
8327
Pavel Begunkovb895c9a2021-04-01 15:43:40 +01008328static void __io_rsrc_put_work(struct io_rsrc_node *ref_node)
Jens Axboe05f3fb32019-12-09 11:22:50 -07008329{
Pavel Begunkovb895c9a2021-04-01 15:43:40 +01008330 struct io_rsrc_data *rsrc_data = ref_node->rsrc_data;
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00008331 struct io_ring_ctx *ctx = rsrc_data->ctx;
8332 struct io_rsrc_put *prsrc, *tmp;
Xiaoguang Wang05589552020-03-31 14:05:18 +08008333
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00008334 list_for_each_entry_safe(prsrc, tmp, &ref_node->rsrc_list, list) {
8335 list_del(&prsrc->list);
Pavel Begunkovb60c8dc2021-04-25 14:32:18 +01008336
8337 if (prsrc->tag) {
8338 bool lock_ring = ctx->flags & IORING_SETUP_IOPOLL;
Pavel Begunkovb60c8dc2021-04-25 14:32:18 +01008339
8340 io_ring_submit_lock(ctx, lock_ring);
Jens Axboe79ebeae2021-08-10 15:18:27 -06008341 spin_lock(&ctx->completion_lock);
Pavel Begunkovb850d6d2022-08-29 14:30:13 +01008342 io_fill_cqe_aux(ctx, prsrc->tag, 0, 0);
Pavel Begunkovb60c8dc2021-04-25 14:32:18 +01008343 io_commit_cqring(ctx);
Jens Axboe79ebeae2021-08-10 15:18:27 -06008344 spin_unlock(&ctx->completion_lock);
Pavel Begunkovb60c8dc2021-04-25 14:32:18 +01008345 io_cqring_ev_posted(ctx);
8346 io_ring_submit_unlock(ctx, lock_ring);
8347 }
8348
Pavel Begunkov40ae0ff2021-04-01 15:43:44 +01008349 rsrc_data->do_put(ctx, prsrc);
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00008350 kfree(prsrc);
Jens Axboe05f3fb32019-12-09 11:22:50 -07008351 }
8352
Pavel Begunkov28a9fe22021-04-01 15:43:47 +01008353 io_rsrc_node_destroy(ref_node);
Pavel Begunkov3e942492021-04-11 01:46:34 +01008354 if (atomic_dec_and_test(&rsrc_data->refs))
8355 complete(&rsrc_data->done);
Jens Axboe05f3fb32019-12-09 11:22:50 -07008356}
8357
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00008358static void io_rsrc_put_work(struct work_struct *work)
Jens Axboe4a38aed22020-05-14 17:21:15 -06008359{
8360 struct io_ring_ctx *ctx;
8361 struct llist_node *node;
8362
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00008363 ctx = container_of(work, struct io_ring_ctx, rsrc_put_work.work);
8364 node = llist_del_all(&ctx->rsrc_put_llist);
Jens Axboe4a38aed22020-05-14 17:21:15 -06008365
8366 while (node) {
Pavel Begunkovb895c9a2021-04-01 15:43:40 +01008367 struct io_rsrc_node *ref_node;
Jens Axboe4a38aed22020-05-14 17:21:15 -06008368 struct llist_node *next = node->next;
8369
Pavel Begunkovb895c9a2021-04-01 15:43:40 +01008370 ref_node = llist_entry(node, struct io_rsrc_node, llist);
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00008371 __io_rsrc_put_work(ref_node);
Jens Axboe4a38aed22020-05-14 17:21:15 -06008372 node = next;
8373 }
8374}
8375
Jens Axboe05f3fb32019-12-09 11:22:50 -07008376static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
Pavel Begunkov792e3582021-04-25 14:32:21 +01008377 unsigned nr_args, u64 __user *tags)
Jens Axboe05f3fb32019-12-09 11:22:50 -07008378{
8379 __s32 __user *fds = (__s32 __user *) arg;
Jens Axboe05f3fb32019-12-09 11:22:50 -07008380 struct file *file;
Pavel Begunkovf3baed32021-04-01 15:43:42 +01008381 int fd, ret;
Pavel Begunkov846a4ef2021-04-01 15:44:03 +01008382 unsigned i;
Jens Axboe05f3fb32019-12-09 11:22:50 -07008383
8384 if (ctx->file_data)
8385 return -EBUSY;
8386 if (!nr_args)
8387 return -EINVAL;
8388 if (nr_args > IORING_MAX_FIXED_FILES)
8389 return -EMFILE;
Pavel Begunkov3a1b8a42021-08-20 10:36:35 +01008390 if (nr_args > rlimit(RLIMIT_NOFILE))
8391 return -EMFILE;
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01008392 ret = io_rsrc_node_switch_start(ctx);
Pavel Begunkovf3baed32021-04-01 15:43:42 +01008393 if (ret)
8394 return ret;
Pavel Begunkovd878c812021-06-14 02:36:18 +01008395 ret = io_rsrc_data_alloc(ctx, io_rsrc_file_put, tags, nr_args,
8396 &ctx->file_data);
8397 if (ret)
8398 return ret;
Jens Axboe05f3fb32019-12-09 11:22:50 -07008399
Pavel Begunkovf3baed32021-04-01 15:43:42 +01008400 ret = -ENOMEM;
Pavel Begunkovaeca2412021-04-11 01:46:37 +01008401 if (!io_alloc_file_tables(&ctx->file_table, nr_args))
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01008402 goto out_free;
Jens Axboe05f3fb32019-12-09 11:22:50 -07008403
Jens Axboe05f3fb32019-12-09 11:22:50 -07008404 for (i = 0; i < nr_args; i++, ctx->nr_user_files++) {
Pavel Begunkovd878c812021-06-14 02:36:18 +01008405 if (copy_from_user(&fd, &fds[i], sizeof(fd))) {
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01008406 ret = -EFAULT;
8407 goto out_fput;
Jens Axboe05f3fb32019-12-09 11:22:50 -07008408 }
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01008409 /* allow sparse sets */
Pavel Begunkov792e3582021-04-25 14:32:21 +01008410 if (fd == -1) {
8411 ret = -EINVAL;
Pavel Begunkov2d091d62021-06-14 02:36:21 +01008412 if (unlikely(*io_get_tag_slot(ctx->file_data, i)))
Pavel Begunkov792e3582021-04-25 14:32:21 +01008413 goto out_fput;
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01008414 continue;
Pavel Begunkov792e3582021-04-25 14:32:21 +01008415 }
Jens Axboe05f3fb32019-12-09 11:22:50 -07008416
Jens Axboe05f3fb32019-12-09 11:22:50 -07008417 file = fget(fd);
Jens Axboe05f3fb32019-12-09 11:22:50 -07008418 ret = -EBADF;
Pavel Begunkov792e3582021-04-25 14:32:21 +01008419 if (unlikely(!file))
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01008420 goto out_fput;
Jens Axboe05f3fb32019-12-09 11:22:50 -07008421
8422 /*
8423 * Don't allow io_uring instances to be registered. If UNIX
8424 * isn't enabled, then this causes a reference cycle and this
8425 * instance can never get freed. If UNIX is enabled we'll
8426 * handle it just fine, but there's still no point in allowing
8427 * a ring fd as it doesn't support regular read/write anyway.
8428 */
8429 if (file->f_op == &io_uring_fops) {
8430 fput(file);
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01008431 goto out_fput;
Jens Axboe05f3fb32019-12-09 11:22:50 -07008432 }
Pavel Begunkovaeca2412021-04-11 01:46:37 +01008433 io_fixed_file_set(io_fixed_file_slot(&ctx->file_table, i), file);
Jens Axboe05f3fb32019-12-09 11:22:50 -07008434 }
8435
Jens Axboe05f3fb32019-12-09 11:22:50 -07008436 ret = io_sqe_files_scm(ctx);
Xiaoguang Wang05589552020-03-31 14:05:18 +08008437 if (ret) {
Pavel Begunkov08480402021-04-13 02:58:38 +01008438 __io_sqe_files_unregister(ctx);
Xiaoguang Wang05589552020-03-31 14:05:18 +08008439 return ret;
8440 }
Jens Axboe05f3fb32019-12-09 11:22:50 -07008441
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01008442 io_rsrc_node_switch(ctx, NULL);
Jens Axboe05f3fb32019-12-09 11:22:50 -07008443 return ret;
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01008444out_fput:
8445 for (i = 0; i < ctx->nr_user_files; i++) {
8446 file = io_file_from_index(ctx, i);
8447 if (file)
8448 fput(file);
8449 }
Pavel Begunkov042b0d82021-08-09 13:04:01 +01008450 io_free_file_tables(&ctx->file_table);
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01008451 ctx->nr_user_files = 0;
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01008452out_free:
Pavel Begunkov44b31f22021-04-25 14:32:16 +01008453 io_rsrc_data_free(ctx->file_data);
Jens Axboe55cbc252020-10-14 07:35:57 -06008454 ctx->file_data = NULL;
Jens Axboe05f3fb32019-12-09 11:22:50 -07008455 return ret;
8456}
8457
Jens Axboec3a31e62019-10-03 13:59:56 -06008458static int io_sqe_file_register(struct io_ring_ctx *ctx, struct file *file,
8459 int index)
8460{
8461#if defined(CONFIG_UNIX)
8462 struct sock *sock = ctx->ring_sock->sk;
8463 struct sk_buff_head *head = &sock->sk_receive_queue;
8464 struct sk_buff *skb;
8465
8466 /*
8467 * See if we can merge this file into an existing skb SCM_RIGHTS
8468 * file set. If there's no room, fall back to allocating a new skb
8469 * and filling it in.
8470 */
8471 spin_lock_irq(&head->lock);
8472 skb = skb_peek(head);
8473 if (skb) {
8474 struct scm_fp_list *fpl = UNIXCB(skb).fp;
8475
8476 if (fpl->count < SCM_MAX_FD) {
8477 __skb_unlink(skb, head);
8478 spin_unlock_irq(&head->lock);
8479 fpl->fp[fpl->count] = get_file(file);
8480 unix_inflight(fpl->user, fpl->fp[fpl->count]);
8481 fpl->count++;
8482 spin_lock_irq(&head->lock);
8483 __skb_queue_head(head, skb);
8484 } else {
8485 skb = NULL;
8486 }
8487 }
8488 spin_unlock_irq(&head->lock);
8489
8490 if (skb) {
8491 fput(file);
8492 return 0;
8493 }
8494
8495 return __io_sqe_files_scm(ctx, 1, index);
8496#else
8497 return 0;
8498#endif
8499}
8500
Pavel Begunkov9c7b0ba2021-09-14 16:12:52 +01008501static int io_queue_rsrc_removal(struct io_rsrc_data *data, unsigned idx,
8502 struct io_rsrc_node *node, void *rsrc)
8503{
Pavel Begunkov5218d5c2022-04-07 14:05:04 +01008504 u64 *tag_slot = io_get_tag_slot(data, idx);
Pavel Begunkov9c7b0ba2021-09-14 16:12:52 +01008505 struct io_rsrc_put *prsrc;
8506
8507 prsrc = kzalloc(sizeof(*prsrc), GFP_KERNEL);
8508 if (!prsrc)
8509 return -ENOMEM;
8510
Pavel Begunkov5218d5c2022-04-07 14:05:04 +01008511 prsrc->tag = *tag_slot;
8512 *tag_slot = 0;
Pavel Begunkov9c7b0ba2021-09-14 16:12:52 +01008513 prsrc->rsrc = rsrc;
8514 list_add(&prsrc->list, &node->rsrc_list);
8515 return 0;
8516}
8517
Pavel Begunkovb9445592021-08-25 12:25:45 +01008518static int io_install_fixed_file(struct io_kiocb *req, struct file *file,
8519 unsigned int issue_flags, u32 slot_index)
8520{
8521 struct io_ring_ctx *ctx = req->ctx;
8522 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Pavel Begunkov9c7b0ba2021-09-14 16:12:52 +01008523 bool needs_switch = false;
Pavel Begunkovb9445592021-08-25 12:25:45 +01008524 struct io_fixed_file *file_slot;
8525 int ret = -EBADF;
8526
8527 io_ring_submit_lock(ctx, !force_nonblock);
8528 if (file->f_op == &io_uring_fops)
8529 goto err;
8530 ret = -ENXIO;
8531 if (!ctx->file_data)
8532 goto err;
8533 ret = -EINVAL;
8534 if (slot_index >= ctx->nr_user_files)
8535 goto err;
8536
8537 slot_index = array_index_nospec(slot_index, ctx->nr_user_files);
8538 file_slot = io_fixed_file_slot(&ctx->file_table, slot_index);
Pavel Begunkov9c7b0ba2021-09-14 16:12:52 +01008539
8540 if (file_slot->file_ptr) {
8541 struct file *old_file;
8542
8543 ret = io_rsrc_node_switch_start(ctx);
8544 if (ret)
8545 goto err;
8546
8547 old_file = (struct file *)(file_slot->file_ptr & FFS_MASK);
8548 ret = io_queue_rsrc_removal(ctx->file_data, slot_index,
8549 ctx->rsrc_node, old_file);
8550 if (ret)
8551 goto err;
8552 file_slot->file_ptr = 0;
8553 needs_switch = true;
8554 }
Pavel Begunkovb9445592021-08-25 12:25:45 +01008555
8556 *io_get_tag_slot(ctx->file_data, slot_index) = 0;
8557 io_fixed_file_set(file_slot, file);
8558 ret = io_sqe_file_register(ctx, file, slot_index);
8559 if (ret) {
8560 file_slot->file_ptr = 0;
8561 goto err;
8562 }
8563
8564 ret = 0;
8565err:
Pavel Begunkov9c7b0ba2021-09-14 16:12:52 +01008566 if (needs_switch)
8567 io_rsrc_node_switch(ctx, ctx->file_data);
Pavel Begunkovb9445592021-08-25 12:25:45 +01008568 io_ring_submit_unlock(ctx, !force_nonblock);
8569 if (ret)
8570 fput(file);
8571 return ret;
8572}
8573
Pavel Begunkov7df778b2021-09-24 20:04:29 +01008574static int io_close_fixed(struct io_kiocb *req, unsigned int issue_flags)
8575{
8576 unsigned int offset = req->close.file_slot - 1;
8577 struct io_ring_ctx *ctx = req->ctx;
8578 struct io_fixed_file *file_slot;
8579 struct file *file;
Pavel Begunkovb8ed0f72022-04-07 14:05:05 +01008580 int ret;
Pavel Begunkov7df778b2021-09-24 20:04:29 +01008581
8582 io_ring_submit_lock(ctx, !(issue_flags & IO_URING_F_NONBLOCK));
8583 ret = -ENXIO;
8584 if (unlikely(!ctx->file_data))
8585 goto out;
8586 ret = -EINVAL;
8587 if (offset >= ctx->nr_user_files)
8588 goto out;
8589 ret = io_rsrc_node_switch_start(ctx);
8590 if (ret)
8591 goto out;
8592
Pavel Begunkovb8ed0f72022-04-07 14:05:05 +01008593 offset = array_index_nospec(offset, ctx->nr_user_files);
8594 file_slot = io_fixed_file_slot(&ctx->file_table, offset);
Pavel Begunkov7df778b2021-09-24 20:04:29 +01008595 ret = -EBADF;
8596 if (!file_slot->file_ptr)
8597 goto out;
8598
8599 file = (struct file *)(file_slot->file_ptr & FFS_MASK);
8600 ret = io_queue_rsrc_removal(ctx->file_data, offset, ctx->rsrc_node, file);
8601 if (ret)
8602 goto out;
8603
8604 file_slot->file_ptr = 0;
8605 io_rsrc_node_switch(ctx, ctx->file_data);
8606 ret = 0;
8607out:
8608 io_ring_submit_unlock(ctx, !(issue_flags & IO_URING_F_NONBLOCK));
8609 return ret;
8610}
8611
Jens Axboe05f3fb32019-12-09 11:22:50 -07008612static int __io_sqe_files_update(struct io_ring_ctx *ctx,
Pavel Begunkovc3bdad02021-04-25 14:32:22 +01008613 struct io_uring_rsrc_update2 *up,
Jens Axboe05f3fb32019-12-09 11:22:50 -07008614 unsigned nr_args)
8615{
Pavel Begunkovc3bdad02021-04-25 14:32:22 +01008616 u64 __user *tags = u64_to_user_ptr(up->tags);
Pavel Begunkov98f0b3b2021-04-25 14:32:19 +01008617 __s32 __user *fds = u64_to_user_ptr(up->data);
Pavel Begunkovb895c9a2021-04-01 15:43:40 +01008618 struct io_rsrc_data *data = ctx->file_data;
Pavel Begunkova04b0ac2021-04-01 15:44:04 +01008619 struct io_fixed_file *file_slot;
8620 struct file *file;
Pavel Begunkov98f0b3b2021-04-25 14:32:19 +01008621 int fd, i, err = 0;
8622 unsigned int done;
Xiaoguang Wang05589552020-03-31 14:05:18 +08008623 bool needs_switch = false;
Jens Axboec3a31e62019-10-03 13:59:56 -06008624
Pavel Begunkov98f0b3b2021-04-25 14:32:19 +01008625 if (!ctx->file_data)
8626 return -ENXIO;
8627 if (up->offset + nr_args > ctx->nr_user_files)
Jens Axboec3a31e62019-10-03 13:59:56 -06008628 return -EINVAL;
8629
Pavel Begunkov67973b92021-01-26 13:51:09 +00008630 for (done = 0; done < nr_args; done++) {
Pavel Begunkovc3bdad02021-04-25 14:32:22 +01008631 u64 tag = 0;
8632
8633 if ((tags && copy_from_user(&tag, &tags[done], sizeof(tag))) ||
8634 copy_from_user(&fd, &fds[done], sizeof(fd))) {
Jens Axboec3a31e62019-10-03 13:59:56 -06008635 err = -EFAULT;
8636 break;
8637 }
Pavel Begunkovc3bdad02021-04-25 14:32:22 +01008638 if ((fd == IORING_REGISTER_FILES_SKIP || fd == -1) && tag) {
8639 err = -EINVAL;
8640 break;
8641 }
noah4e0377a2021-01-26 15:23:28 -05008642 if (fd == IORING_REGISTER_FILES_SKIP)
8643 continue;
8644
Pavel Begunkov67973b92021-01-26 13:51:09 +00008645 i = array_index_nospec(up->offset + done, ctx->nr_user_files);
Pavel Begunkovaeca2412021-04-11 01:46:37 +01008646 file_slot = io_fixed_file_slot(&ctx->file_table, i);
Pavel Begunkovea64ec022021-02-04 13:52:07 +00008647
Pavel Begunkova04b0ac2021-04-01 15:44:04 +01008648 if (file_slot->file_ptr) {
8649 file = (struct file *)(file_slot->file_ptr & FFS_MASK);
Pavel Begunkovb8ed0f72022-04-07 14:05:05 +01008650 err = io_queue_rsrc_removal(data, i, ctx->rsrc_node, file);
Hillf Dantona5318d32020-03-23 17:47:15 +08008651 if (err)
8652 break;
Pavel Begunkova04b0ac2021-04-01 15:44:04 +01008653 file_slot->file_ptr = 0;
Xiaoguang Wang05589552020-03-31 14:05:18 +08008654 needs_switch = true;
Jens Axboec3a31e62019-10-03 13:59:56 -06008655 }
8656 if (fd != -1) {
Jens Axboec3a31e62019-10-03 13:59:56 -06008657 file = fget(fd);
8658 if (!file) {
8659 err = -EBADF;
8660 break;
8661 }
8662 /*
8663 * Don't allow io_uring instances to be registered. If
8664 * UNIX isn't enabled, then this causes a reference
8665 * cycle and this instance can never get freed. If UNIX
8666 * is enabled we'll handle it just fine, but there's
8667 * still no point in allowing a ring fd as it doesn't
8668 * support regular read/write anyway.
8669 */
8670 if (file->f_op == &io_uring_fops) {
8671 fput(file);
8672 err = -EBADF;
8673 break;
8674 }
Pavel Begunkov50c981b2022-04-06 12:43:57 +01008675 *io_get_tag_slot(data, i) = tag;
Pavel Begunkov9a321c92021-04-01 15:44:01 +01008676 io_fixed_file_set(file_slot, file);
Jens Axboec3a31e62019-10-03 13:59:56 -06008677 err = io_sqe_file_register(ctx, file, i);
Yang Yingliangf3bd9da2020-07-09 10:11:41 +00008678 if (err) {
Pavel Begunkova04b0ac2021-04-01 15:44:04 +01008679 file_slot->file_ptr = 0;
Yang Yingliangf3bd9da2020-07-09 10:11:41 +00008680 fput(file);
Jens Axboec3a31e62019-10-03 13:59:56 -06008681 break;
Yang Yingliangf3bd9da2020-07-09 10:11:41 +00008682 }
Jens Axboec3a31e62019-10-03 13:59:56 -06008683 }
Jens Axboe05f3fb32019-12-09 11:22:50 -07008684 }
8685
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01008686 if (needs_switch)
8687 io_rsrc_node_switch(ctx, data);
Jens Axboec3a31e62019-10-03 13:59:56 -06008688 return done ? done : err;
8689}
Xiaoguang Wang05589552020-03-31 14:05:18 +08008690
Jens Axboe685fe7f2021-03-08 09:37:51 -07008691static struct io_wq *io_init_wq_offload(struct io_ring_ctx *ctx,
8692 struct task_struct *task)
Pavel Begunkov24369c22020-01-28 03:15:48 +03008693{
Jens Axboee9418942021-02-19 12:33:30 -07008694 struct io_wq_hash *hash;
Pavel Begunkov24369c22020-01-28 03:15:48 +03008695 struct io_wq_data data;
Pavel Begunkov24369c22020-01-28 03:15:48 +03008696 unsigned int concurrency;
Pavel Begunkov24369c22020-01-28 03:15:48 +03008697
Yang Yingliang362a9e62021-07-20 16:38:05 +08008698 mutex_lock(&ctx->uring_lock);
Jens Axboee9418942021-02-19 12:33:30 -07008699 hash = ctx->hash_map;
8700 if (!hash) {
8701 hash = kzalloc(sizeof(*hash), GFP_KERNEL);
Yang Yingliang362a9e62021-07-20 16:38:05 +08008702 if (!hash) {
8703 mutex_unlock(&ctx->uring_lock);
Jens Axboee9418942021-02-19 12:33:30 -07008704 return ERR_PTR(-ENOMEM);
Yang Yingliang362a9e62021-07-20 16:38:05 +08008705 }
Jens Axboee9418942021-02-19 12:33:30 -07008706 refcount_set(&hash->refs, 1);
8707 init_waitqueue_head(&hash->wait);
8708 ctx->hash_map = hash;
8709 }
Yang Yingliang362a9e62021-07-20 16:38:05 +08008710 mutex_unlock(&ctx->uring_lock);
Jens Axboee9418942021-02-19 12:33:30 -07008711
8712 data.hash = hash;
Jens Axboe685fe7f2021-03-08 09:37:51 -07008713 data.task = task;
Pavel Begunkovebc11b62021-08-09 13:04:05 +01008714 data.free_work = io_wq_free_work;
Pavel Begunkovf5fa38c2020-06-08 21:08:20 +03008715 data.do_work = io_wq_submit_work;
Pavel Begunkov24369c22020-01-28 03:15:48 +03008716
Jens Axboed25e3a32021-02-16 11:41:41 -07008717 /* Do QD, or 4 * CPUS, whatever is smallest */
8718 concurrency = min(ctx->sq_entries, 4 * num_online_cpus());
Pavel Begunkov24369c22020-01-28 03:15:48 +03008719
Jens Axboe5aa75ed2021-02-16 12:56:50 -07008720 return io_wq_create(concurrency, &data);
Pavel Begunkov24369c22020-01-28 03:15:48 +03008721}
8722
Jens Axboe5aa75ed2021-02-16 12:56:50 -07008723static int io_uring_alloc_task_context(struct task_struct *task,
8724 struct io_ring_ctx *ctx)
Jens Axboe0f212202020-09-13 13:09:39 -06008725{
8726 struct io_uring_task *tctx;
Jens Axboed8a6df12020-10-15 16:24:45 -06008727 int ret;
Jens Axboe0f212202020-09-13 13:09:39 -06008728
Pavel Begunkov09899b12021-06-14 02:36:22 +01008729 tctx = kzalloc(sizeof(*tctx), GFP_KERNEL);
Jens Axboe0f212202020-09-13 13:09:39 -06008730 if (unlikely(!tctx))
8731 return -ENOMEM;
8732
Jens Axboed8a6df12020-10-15 16:24:45 -06008733 ret = percpu_counter_init(&tctx->inflight, 0, GFP_KERNEL);
8734 if (unlikely(ret)) {
8735 kfree(tctx);
8736 return ret;
8737 }
8738
Jens Axboe685fe7f2021-03-08 09:37:51 -07008739 tctx->io_wq = io_init_wq_offload(ctx, task);
Jens Axboe5aa75ed2021-02-16 12:56:50 -07008740 if (IS_ERR(tctx->io_wq)) {
8741 ret = PTR_ERR(tctx->io_wq);
8742 percpu_counter_destroy(&tctx->inflight);
8743 kfree(tctx);
8744 return ret;
8745 }
8746
Jens Axboe0f212202020-09-13 13:09:39 -06008747 xa_init(&tctx->xa);
8748 init_waitqueue_head(&tctx->wait);
Jens Axboefdaf0832020-10-30 09:37:30 -06008749 atomic_set(&tctx->in_idle, 0);
Pavel Begunkovb303fe22021-04-11 01:46:26 +01008750 atomic_set(&tctx->inflight_tracked, 0);
Jens Axboe0f212202020-09-13 13:09:39 -06008751 task->io_uring = tctx;
Jens Axboe7cbf1722021-02-10 00:03:20 +00008752 spin_lock_init(&tctx->task_lock);
8753 INIT_WQ_LIST(&tctx->task_list);
Jens Axboe7cbf1722021-02-10 00:03:20 +00008754 init_task_work(&tctx->task_work, tctx_task_work);
Jens Axboe0f212202020-09-13 13:09:39 -06008755 return 0;
8756}
8757
8758void __io_uring_free(struct task_struct *tsk)
8759{
8760 struct io_uring_task *tctx = tsk->io_uring;
8761
8762 WARN_ON_ONCE(!xa_empty(&tctx->xa));
Pavel Begunkovef8eaa42021-02-27 11:16:45 +00008763 WARN_ON_ONCE(tctx->io_wq);
Pavel Begunkov09899b12021-06-14 02:36:22 +01008764 WARN_ON_ONCE(tctx->cached_refs);
Pavel Begunkovef8eaa42021-02-27 11:16:45 +00008765
Jens Axboed8a6df12020-10-15 16:24:45 -06008766 percpu_counter_destroy(&tctx->inflight);
Jens Axboe0f212202020-09-13 13:09:39 -06008767 kfree(tctx);
8768 tsk->io_uring = NULL;
8769}
8770
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02008771static int io_sq_offload_create(struct io_ring_ctx *ctx,
8772 struct io_uring_params *p)
Jens Axboe2b188cc2019-01-07 10:46:33 -07008773{
8774 int ret;
8775
Jens Axboed25e3a32021-02-16 11:41:41 -07008776 /* Retain compatibility with failing for an invalid attach attempt */
8777 if ((ctx->flags & (IORING_SETUP_ATTACH_WQ | IORING_SETUP_SQPOLL)) ==
8778 IORING_SETUP_ATTACH_WQ) {
8779 struct fd f;
8780
8781 f = fdget(p->wq_fd);
8782 if (!f.file)
8783 return -ENXIO;
Jens Axboe0cc936f2021-07-22 17:08:07 -06008784 if (f.file->f_op != &io_uring_fops) {
8785 fdput(f);
Pavel Begunkovf2a48dd2021-04-20 12:03:33 +01008786 return -EINVAL;
Jens Axboe0cc936f2021-07-22 17:08:07 -06008787 }
8788 fdput(f);
Jens Axboed25e3a32021-02-16 11:41:41 -07008789 }
Jens Axboe6c271ce2019-01-10 11:22:30 -07008790 if (ctx->flags & IORING_SETUP_SQPOLL) {
Jens Axboe46fe18b2021-03-04 12:39:36 -07008791 struct task_struct *tsk;
Jens Axboe534ca6d2020-09-02 13:52:19 -06008792 struct io_sq_data *sqd;
Pavel Begunkov26984fb2021-03-11 23:29:37 +00008793 bool attached;
Jens Axboe534ca6d2020-09-02 13:52:19 -06008794
Pavel Begunkov26984fb2021-03-11 23:29:37 +00008795 sqd = io_get_sq_data(p, &attached);
Jens Axboe534ca6d2020-09-02 13:52:19 -06008796 if (IS_ERR(sqd)) {
8797 ret = PTR_ERR(sqd);
8798 goto err;
8799 }
Jens Axboe69fb2132020-09-14 11:16:23 -06008800
Stefan Metzmacher7c30f36a2021-03-07 11:54:28 +01008801 ctx->sq_creds = get_current_cred();
Jens Axboe534ca6d2020-09-02 13:52:19 -06008802 ctx->sq_data = sqd;
Jens Axboe917257d2019-04-13 09:28:55 -06008803 ctx->sq_thread_idle = msecs_to_jiffies(p->sq_thread_idle);
8804 if (!ctx->sq_thread_idle)
8805 ctx->sq_thread_idle = HZ;
8806
Pavel Begunkov78d7f6b2021-03-10 13:13:53 +00008807 io_sq_thread_park(sqd);
Pavel Begunkovde75a3d2021-03-18 11:54:35 +00008808 list_add(&ctx->sqd_list, &sqd->ctx_list);
8809 io_sqd_update_thread_idle(sqd);
Pavel Begunkov26984fb2021-03-11 23:29:37 +00008810 /* don't attach to a dying SQPOLL thread, would be racy */
Pavel Begunkovf2a48dd2021-04-20 12:03:33 +01008811 ret = (attached && !sqd->thread) ? -ENXIO : 0;
Pavel Begunkov78d7f6b2021-03-10 13:13:53 +00008812 io_sq_thread_unpark(sqd);
8813
Pavel Begunkovde75a3d2021-03-18 11:54:35 +00008814 if (ret < 0)
8815 goto err;
8816 if (attached)
Jens Axboe5aa75ed2021-02-16 12:56:50 -07008817 return 0;
Jens Axboeaa061652020-09-02 14:50:27 -06008818
Jens Axboe6c271ce2019-01-10 11:22:30 -07008819 if (p->flags & IORING_SETUP_SQ_AFF) {
Jens Axboe44a9bd12019-05-14 20:00:30 -06008820 int cpu = p->sq_thread_cpu;
Jens Axboe6c271ce2019-01-10 11:22:30 -07008821
Jens Axboe917257d2019-04-13 09:28:55 -06008822 ret = -EINVAL;
Pavel Begunkovf2a48dd2021-04-20 12:03:33 +01008823 if (cpu >= nr_cpu_ids || !cpu_online(cpu))
Jens Axboee8f98f242021-03-09 16:32:13 -07008824 goto err_sqpoll;
Jens Axboe37d1e2e2021-02-17 21:03:43 -07008825 sqd->sq_cpu = cpu;
Jens Axboe6c271ce2019-01-10 11:22:30 -07008826 } else {
Jens Axboe37d1e2e2021-02-17 21:03:43 -07008827 sqd->sq_cpu = -1;
Jens Axboe6c271ce2019-01-10 11:22:30 -07008828 }
Jens Axboe37d1e2e2021-02-17 21:03:43 -07008829
8830 sqd->task_pid = current->pid;
Jens Axboe5c2469e2021-03-11 10:17:56 -07008831 sqd->task_tgid = current->tgid;
Jens Axboe46fe18b2021-03-04 12:39:36 -07008832 tsk = create_io_thread(io_sq_thread, sqd, NUMA_NO_NODE);
8833 if (IS_ERR(tsk)) {
8834 ret = PTR_ERR(tsk);
Jens Axboee8f98f242021-03-09 16:32:13 -07008835 goto err_sqpoll;
Jens Axboe6c271ce2019-01-10 11:22:30 -07008836 }
Pavel Begunkov97a73a02021-03-08 17:30:54 +00008837
Jens Axboe46fe18b2021-03-04 12:39:36 -07008838 sqd->thread = tsk;
Pavel Begunkov97a73a02021-03-08 17:30:54 +00008839 ret = io_uring_alloc_task_context(tsk, ctx);
Jens Axboe46fe18b2021-03-04 12:39:36 -07008840 wake_up_new_task(tsk);
Jens Axboe0f212202020-09-13 13:09:39 -06008841 if (ret)
8842 goto err;
Jens Axboe6c271ce2019-01-10 11:22:30 -07008843 } else if (p->flags & IORING_SETUP_SQ_AFF) {
8844 /* Can't have SQ_AFF without SQPOLL */
8845 ret = -EINVAL;
8846 goto err;
8847 }
8848
Jens Axboe2b188cc2019-01-07 10:46:33 -07008849 return 0;
Pavel Begunkovf2a48dd2021-04-20 12:03:33 +01008850err_sqpoll:
8851 complete(&ctx->sq_data->exited);
Jens Axboe2b188cc2019-01-07 10:46:33 -07008852err:
Jens Axboe37d1e2e2021-02-17 21:03:43 -07008853 io_sq_thread_finish(ctx);
Jens Axboe2b188cc2019-01-07 10:46:33 -07008854 return ret;
8855}
8856
Bijan Mottahedeha087e2b2020-06-16 16:36:07 -07008857static inline void __io_unaccount_mem(struct user_struct *user,
8858 unsigned long nr_pages)
Jens Axboe2b188cc2019-01-07 10:46:33 -07008859{
8860 atomic_long_sub(nr_pages, &user->locked_vm);
8861}
8862
Bijan Mottahedeha087e2b2020-06-16 16:36:07 -07008863static inline int __io_account_mem(struct user_struct *user,
8864 unsigned long nr_pages)
Jens Axboe2b188cc2019-01-07 10:46:33 -07008865{
8866 unsigned long page_limit, cur_pages, new_pages;
8867
8868 /* Don't allow more pages than we can safely lock */
8869 page_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
8870
8871 do {
8872 cur_pages = atomic_long_read(&user->locked_vm);
8873 new_pages = cur_pages + nr_pages;
8874 if (new_pages > page_limit)
8875 return -ENOMEM;
8876 } while (atomic_long_cmpxchg(&user->locked_vm, cur_pages,
8877 new_pages) != cur_pages);
8878
8879 return 0;
8880}
8881
Jens Axboe26bfa89e2021-02-09 20:14:12 -07008882static void io_unaccount_mem(struct io_ring_ctx *ctx, unsigned long nr_pages)
Bijan Mottahedeha087e2b2020-06-16 16:36:07 -07008883{
Jens Axboe62e398b2021-02-21 16:19:37 -07008884 if (ctx->user)
Bijan Mottahedeha087e2b2020-06-16 16:36:07 -07008885 __io_unaccount_mem(ctx->user, nr_pages);
Bijan Mottahedeh30975822020-06-16 16:36:09 -07008886
Jens Axboe26bfa89e2021-02-09 20:14:12 -07008887 if (ctx->mm_account)
8888 atomic64_sub(nr_pages, &ctx->mm_account->pinned_vm);
Bijan Mottahedeha087e2b2020-06-16 16:36:07 -07008889}
8890
Jens Axboe26bfa89e2021-02-09 20:14:12 -07008891static int io_account_mem(struct io_ring_ctx *ctx, unsigned long nr_pages)
Bijan Mottahedeha087e2b2020-06-16 16:36:07 -07008892{
Bijan Mottahedeh30975822020-06-16 16:36:09 -07008893 int ret;
8894
Jens Axboe62e398b2021-02-21 16:19:37 -07008895 if (ctx->user) {
Bijan Mottahedeh30975822020-06-16 16:36:09 -07008896 ret = __io_account_mem(ctx->user, nr_pages);
8897 if (ret)
8898 return ret;
8899 }
8900
Jens Axboe26bfa89e2021-02-09 20:14:12 -07008901 if (ctx->mm_account)
8902 atomic64_add(nr_pages, &ctx->mm_account->pinned_vm);
Bijan Mottahedeha087e2b2020-06-16 16:36:07 -07008903
8904 return 0;
8905}
8906
Jens Axboe2b188cc2019-01-07 10:46:33 -07008907static void io_mem_free(void *ptr)
8908{
Mark Rutland52e04ef2019-04-30 17:30:21 +01008909 struct page *page;
Jens Axboe2b188cc2019-01-07 10:46:33 -07008910
Mark Rutland52e04ef2019-04-30 17:30:21 +01008911 if (!ptr)
8912 return;
8913
8914 page = virt_to_head_page(ptr);
Jens Axboe2b188cc2019-01-07 10:46:33 -07008915 if (put_page_testzero(page))
8916 free_compound_page(page);
8917}
8918
8919static void *io_mem_alloc(size_t size)
8920{
Shakeel Butt246dfbc2022-01-24 21:17:36 -08008921 gfp_t gfp = GFP_KERNEL_ACCOUNT | __GFP_ZERO | __GFP_NOWARN | __GFP_COMP;
Jens Axboe2b188cc2019-01-07 10:46:33 -07008922
Shakeel Butt246dfbc2022-01-24 21:17:36 -08008923 return (void *) __get_free_pages(gfp, get_order(size));
Jens Axboe2b188cc2019-01-07 10:46:33 -07008924}
8925
Hristo Venev75b28af2019-08-26 17:23:46 +00008926static unsigned long rings_size(unsigned sq_entries, unsigned cq_entries,
8927 size_t *sq_offset)
8928{
8929 struct io_rings *rings;
8930 size_t off, sq_array_size;
8931
8932 off = struct_size(rings, cqes, cq_entries);
8933 if (off == SIZE_MAX)
8934 return SIZE_MAX;
8935
8936#ifdef CONFIG_SMP
8937 off = ALIGN(off, SMP_CACHE_BYTES);
8938 if (off == 0)
8939 return SIZE_MAX;
8940#endif
8941
Dmitry Vyukovb36200f2020-07-11 11:31:11 +02008942 if (sq_offset)
8943 *sq_offset = off;
8944
Hristo Venev75b28af2019-08-26 17:23:46 +00008945 sq_array_size = array_size(sizeof(u32), sq_entries);
8946 if (sq_array_size == SIZE_MAX)
8947 return SIZE_MAX;
8948
8949 if (check_add_overflow(off, sq_array_size, &off))
8950 return SIZE_MAX;
8951
Hristo Venev75b28af2019-08-26 17:23:46 +00008952 return off;
8953}
8954
Pavel Begunkov41edf1a2021-04-25 14:32:23 +01008955static void io_buffer_unmap(struct io_ring_ctx *ctx, struct io_mapped_ubuf **slot)
Pavel Begunkov7f61a1e2021-04-11 01:46:35 +01008956{
Pavel Begunkov41edf1a2021-04-25 14:32:23 +01008957 struct io_mapped_ubuf *imu = *slot;
Pavel Begunkov7f61a1e2021-04-11 01:46:35 +01008958 unsigned int i;
8959
Pavel Begunkov62248432021-04-28 13:11:29 +01008960 if (imu != ctx->dummy_ubuf) {
8961 for (i = 0; i < imu->nr_bvecs; i++)
8962 unpin_user_page(imu->bvec[i].bv_page);
8963 if (imu->acct_pages)
8964 io_unaccount_mem(ctx, imu->acct_pages);
8965 kvfree(imu);
8966 }
Pavel Begunkov41edf1a2021-04-25 14:32:23 +01008967 *slot = NULL;
Pavel Begunkov7f61a1e2021-04-11 01:46:35 +01008968}
8969
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01008970static void io_rsrc_buf_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc)
8971{
Pavel Begunkov634d00d2021-04-25 14:32:26 +01008972 io_buffer_unmap(ctx, &prsrc->buf);
8973 prsrc->buf = NULL;
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01008974}
8975
8976static void __io_sqe_buffers_unregister(struct io_ring_ctx *ctx)
Jens Axboeedafcce2019-01-09 09:16:05 -07008977{
Pavel Begunkov7f61a1e2021-04-11 01:46:35 +01008978 unsigned int i;
Jens Axboeedafcce2019-01-09 09:16:05 -07008979
Pavel Begunkov7f61a1e2021-04-11 01:46:35 +01008980 for (i = 0; i < ctx->nr_user_bufs; i++)
8981 io_buffer_unmap(ctx, &ctx->user_bufs[i]);
Jens Axboeedafcce2019-01-09 09:16:05 -07008982 kfree(ctx->user_bufs);
Zqiangbb6659c2021-04-30 16:25:15 +08008983 io_rsrc_data_free(ctx->buf_data);
Jens Axboeedafcce2019-01-09 09:16:05 -07008984 ctx->user_bufs = NULL;
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01008985 ctx->buf_data = NULL;
Jens Axboeedafcce2019-01-09 09:16:05 -07008986 ctx->nr_user_bufs = 0;
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01008987}
8988
Jens Axboeedafcce2019-01-09 09:16:05 -07008989static int io_sqe_buffers_unregister(struct io_ring_ctx *ctx)
8990{
Pavel Begunkov91f5a602022-06-13 06:30:06 +01008991 unsigned nr = ctx->nr_user_bufs;
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01008992 int ret;
Jens Axboeedafcce2019-01-09 09:16:05 -07008993
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01008994 if (!ctx->buf_data)
Jens Axboeedafcce2019-01-09 09:16:05 -07008995 return -ENXIO;
8996
Pavel Begunkov91f5a602022-06-13 06:30:06 +01008997 /*
8998 * Quiesce may unlock ->uring_lock, and while it's not held
8999 * prevent new requests using the table.
9000 */
9001 ctx->nr_user_bufs = 0;
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01009002 ret = io_rsrc_ref_quiesce(ctx->buf_data, ctx);
Pavel Begunkov91f5a602022-06-13 06:30:06 +01009003 ctx->nr_user_bufs = nr;
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01009004 if (!ret)
9005 __io_sqe_buffers_unregister(ctx);
9006 return ret;
Jens Axboeedafcce2019-01-09 09:16:05 -07009007}
9008
9009static int io_copy_iov(struct io_ring_ctx *ctx, struct iovec *dst,
9010 void __user *arg, unsigned index)
9011{
9012 struct iovec __user *src;
9013
9014#ifdef CONFIG_COMPAT
9015 if (ctx->compat) {
9016 struct compat_iovec __user *ciovs;
9017 struct compat_iovec ciov;
9018
9019 ciovs = (struct compat_iovec __user *) arg;
9020 if (copy_from_user(&ciov, &ciovs[index], sizeof(ciov)))
9021 return -EFAULT;
9022
Jens Axboed55e5f52019-12-11 16:12:15 -07009023 dst->iov_base = u64_to_user_ptr((u64)ciov.iov_base);
Jens Axboeedafcce2019-01-09 09:16:05 -07009024 dst->iov_len = ciov.iov_len;
9025 return 0;
9026 }
9027#endif
9028 src = (struct iovec __user *) arg;
9029 if (copy_from_user(dst, &src[index], sizeof(*dst)))
9030 return -EFAULT;
9031 return 0;
9032}
9033
Jens Axboede293932020-09-17 16:19:16 -06009034/*
9035 * Not super efficient, but this is just a registration time. And we do cache
9036 * the last compound head, so generally we'll only do a full search if we don't
9037 * match that one.
9038 *
9039 * We check if the given compound head page has already been accounted, to
9040 * avoid double accounting it. This allows us to account the full size of the
9041 * page, not just the constituent pages of a huge page.
9042 */
9043static bool headpage_already_acct(struct io_ring_ctx *ctx, struct page **pages,
9044 int nr_pages, struct page *hpage)
9045{
9046 int i, j;
9047
9048 /* check current page array */
9049 for (i = 0; i < nr_pages; i++) {
9050 if (!PageCompound(pages[i]))
9051 continue;
9052 if (compound_head(pages[i]) == hpage)
9053 return true;
9054 }
9055
9056 /* check previously registered pages */
9057 for (i = 0; i < ctx->nr_user_bufs; i++) {
Pavel Begunkov41edf1a2021-04-25 14:32:23 +01009058 struct io_mapped_ubuf *imu = ctx->user_bufs[i];
Jens Axboede293932020-09-17 16:19:16 -06009059
9060 for (j = 0; j < imu->nr_bvecs; j++) {
9061 if (!PageCompound(imu->bvec[j].bv_page))
9062 continue;
9063 if (compound_head(imu->bvec[j].bv_page) == hpage)
9064 return true;
9065 }
9066 }
9067
9068 return false;
9069}
9070
9071static int io_buffer_account_pin(struct io_ring_ctx *ctx, struct page **pages,
9072 int nr_pages, struct io_mapped_ubuf *imu,
9073 struct page **last_hpage)
9074{
9075 int i, ret;
9076
Pavel Begunkov216e5832021-05-29 12:01:02 +01009077 imu->acct_pages = 0;
Jens Axboede293932020-09-17 16:19:16 -06009078 for (i = 0; i < nr_pages; i++) {
9079 if (!PageCompound(pages[i])) {
9080 imu->acct_pages++;
9081 } else {
9082 struct page *hpage;
9083
9084 hpage = compound_head(pages[i]);
9085 if (hpage == *last_hpage)
9086 continue;
9087 *last_hpage = hpage;
9088 if (headpage_already_acct(ctx, pages, i, hpage))
9089 continue;
9090 imu->acct_pages += page_size(hpage) >> PAGE_SHIFT;
9091 }
9092 }
9093
9094 if (!imu->acct_pages)
9095 return 0;
9096
Jens Axboe26bfa89e2021-02-09 20:14:12 -07009097 ret = io_account_mem(ctx, imu->acct_pages);
Jens Axboede293932020-09-17 16:19:16 -06009098 if (ret)
9099 imu->acct_pages = 0;
9100 return ret;
9101}
9102
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08009103static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov,
Pavel Begunkov41edf1a2021-04-25 14:32:23 +01009104 struct io_mapped_ubuf **pimu,
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08009105 struct page **last_hpage)
Jens Axboeedafcce2019-01-09 09:16:05 -07009106{
Pavel Begunkov41edf1a2021-04-25 14:32:23 +01009107 struct io_mapped_ubuf *imu = NULL;
Jens Axboeedafcce2019-01-09 09:16:05 -07009108 struct vm_area_struct **vmas = NULL;
9109 struct page **pages = NULL;
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08009110 unsigned long off, start, end, ubuf;
9111 size_t size;
9112 int ret, pret, nr_pages, i;
Jens Axboeedafcce2019-01-09 09:16:05 -07009113
Pavel Begunkov62248432021-04-28 13:11:29 +01009114 if (!iov->iov_base) {
9115 *pimu = ctx->dummy_ubuf;
9116 return 0;
9117 }
9118
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08009119 ubuf = (unsigned long) iov->iov_base;
9120 end = (ubuf + iov->iov_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
9121 start = ubuf >> PAGE_SHIFT;
9122 nr_pages = end - start;
9123
Pavel Begunkov41edf1a2021-04-25 14:32:23 +01009124 *pimu = NULL;
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08009125 ret = -ENOMEM;
9126
9127 pages = kvmalloc_array(nr_pages, sizeof(struct page *), GFP_KERNEL);
9128 if (!pages)
9129 goto done;
9130
9131 vmas = kvmalloc_array(nr_pages, sizeof(struct vm_area_struct *),
9132 GFP_KERNEL);
9133 if (!vmas)
9134 goto done;
9135
Pavel Begunkov41edf1a2021-04-25 14:32:23 +01009136 imu = kvmalloc(struct_size(imu, bvec, nr_pages), GFP_KERNEL);
Pavel Begunkova2b41982021-04-26 00:16:31 +01009137 if (!imu)
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08009138 goto done;
9139
9140 ret = 0;
9141 mmap_read_lock(current->mm);
9142 pret = pin_user_pages(ubuf, nr_pages, FOLL_WRITE | FOLL_LONGTERM,
9143 pages, vmas);
9144 if (pret == nr_pages) {
9145 /* don't support file backed memory */
9146 for (i = 0; i < nr_pages; i++) {
9147 struct vm_area_struct *vma = vmas[i];
9148
Pavel Begunkov40dad762021-06-09 15:26:54 +01009149 if (vma_is_shmem(vma))
9150 continue;
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08009151 if (vma->vm_file &&
9152 !is_file_hugepages(vma->vm_file)) {
9153 ret = -EOPNOTSUPP;
9154 break;
9155 }
9156 }
9157 } else {
9158 ret = pret < 0 ? pret : -EFAULT;
9159 }
9160 mmap_read_unlock(current->mm);
9161 if (ret) {
9162 /*
9163 * if we did partial map, or found file backed vmas,
9164 * release any pages we did get
9165 */
9166 if (pret > 0)
9167 unpin_user_pages(pages, pret);
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08009168 goto done;
9169 }
9170
9171 ret = io_buffer_account_pin(ctx, pages, pret, imu, last_hpage);
9172 if (ret) {
9173 unpin_user_pages(pages, pret);
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08009174 goto done;
9175 }
9176
9177 off = ubuf & ~PAGE_MASK;
9178 size = iov->iov_len;
9179 for (i = 0; i < nr_pages; i++) {
9180 size_t vec_len;
9181
9182 vec_len = min_t(size_t, size, PAGE_SIZE - off);
9183 imu->bvec[i].bv_page = pages[i];
9184 imu->bvec[i].bv_len = vec_len;
9185 imu->bvec[i].bv_offset = off;
9186 off = 0;
9187 size -= vec_len;
9188 }
9189 /* store original address for later verification */
9190 imu->ubuf = ubuf;
Pavel Begunkov4751f532021-04-01 15:43:55 +01009191 imu->ubuf_end = ubuf + iov->iov_len;
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08009192 imu->nr_bvecs = nr_pages;
Pavel Begunkov41edf1a2021-04-25 14:32:23 +01009193 *pimu = imu;
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08009194 ret = 0;
9195done:
Pavel Begunkov41edf1a2021-04-25 14:32:23 +01009196 if (ret)
9197 kvfree(imu);
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08009198 kvfree(pages);
9199 kvfree(vmas);
9200 return ret;
9201}
9202
Bijan Mottahedeh2b358602021-01-06 12:39:11 -08009203static int io_buffers_map_alloc(struct io_ring_ctx *ctx, unsigned int nr_args)
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08009204{
Pavel Begunkov87094462021-04-11 01:46:36 +01009205 ctx->user_bufs = kcalloc(nr_args, sizeof(*ctx->user_bufs), GFP_KERNEL);
9206 return ctx->user_bufs ? 0 : -ENOMEM;
Bijan Mottahedeh2b358602021-01-06 12:39:11 -08009207}
9208
9209static int io_buffer_validate(struct iovec *iov)
9210{
Pavel Begunkov50e96982021-03-24 22:59:01 +00009211 unsigned long tmp, acct_len = iov->iov_len + (PAGE_SIZE - 1);
9212
Bijan Mottahedeh2b358602021-01-06 12:39:11 -08009213 /*
9214 * Don't impose further limits on the size and buffer
9215 * constraints here, we'll -EINVAL later when IO is
9216 * submitted if they are wrong.
9217 */
Pavel Begunkov62248432021-04-28 13:11:29 +01009218 if (!iov->iov_base)
9219 return iov->iov_len ? -EFAULT : 0;
9220 if (!iov->iov_len)
Bijan Mottahedeh2b358602021-01-06 12:39:11 -08009221 return -EFAULT;
9222
9223 /* arbitrary limit, but we need something */
9224 if (iov->iov_len > SZ_1G)
9225 return -EFAULT;
9226
Pavel Begunkov50e96982021-03-24 22:59:01 +00009227 if (check_add_overflow((unsigned long)iov->iov_base, acct_len, &tmp))
9228 return -EOVERFLOW;
9229
Bijan Mottahedeh2b358602021-01-06 12:39:11 -08009230 return 0;
9231}
9232
9233static int io_sqe_buffers_register(struct io_ring_ctx *ctx, void __user *arg,
Pavel Begunkov634d00d2021-04-25 14:32:26 +01009234 unsigned int nr_args, u64 __user *tags)
Bijan Mottahedeh2b358602021-01-06 12:39:11 -08009235{
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01009236 struct page *last_hpage = NULL;
9237 struct io_rsrc_data *data;
Bijan Mottahedeh2b358602021-01-06 12:39:11 -08009238 int i, ret;
9239 struct iovec iov;
Bijan Mottahedeh2b358602021-01-06 12:39:11 -08009240
Pavel Begunkov87094462021-04-11 01:46:36 +01009241 if (ctx->user_bufs)
9242 return -EBUSY;
Pavel Begunkov489809e2021-05-14 12:06:44 +01009243 if (!nr_args || nr_args > IORING_MAX_REG_BUFFERS)
Pavel Begunkov87094462021-04-11 01:46:36 +01009244 return -EINVAL;
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01009245 ret = io_rsrc_node_switch_start(ctx);
Bijan Mottahedeh2b358602021-01-06 12:39:11 -08009246 if (ret)
9247 return ret;
Pavel Begunkovd878c812021-06-14 02:36:18 +01009248 ret = io_rsrc_data_alloc(ctx, io_rsrc_buf_put, tags, nr_args, &data);
9249 if (ret)
9250 return ret;
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01009251 ret = io_buffers_map_alloc(ctx, nr_args);
9252 if (ret) {
Zqiangbb6659c2021-04-30 16:25:15 +08009253 io_rsrc_data_free(data);
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01009254 return ret;
9255 }
Bijan Mottahedeh2b358602021-01-06 12:39:11 -08009256
Pavel Begunkov87094462021-04-11 01:46:36 +01009257 for (i = 0; i < nr_args; i++, ctx->nr_user_bufs++) {
Jens Axboeedafcce2019-01-09 09:16:05 -07009258 ret = io_copy_iov(ctx, &iov, arg, i);
9259 if (ret)
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08009260 break;
Bijan Mottahedeh2b358602021-01-06 12:39:11 -08009261 ret = io_buffer_validate(&iov);
9262 if (ret)
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08009263 break;
Pavel Begunkov2d091d62021-06-14 02:36:21 +01009264 if (!iov.iov_base && *io_get_tag_slot(data, i)) {
Colin Ian Kingcf3770e2021-04-29 11:46:02 +01009265 ret = -EINVAL;
9266 break;
9267 }
Jens Axboeedafcce2019-01-09 09:16:05 -07009268
Pavel Begunkov41edf1a2021-04-25 14:32:23 +01009269 ret = io_sqe_buffer_register(ctx, &iov, &ctx->user_bufs[i],
9270 &last_hpage);
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08009271 if (ret)
9272 break;
Jens Axboeedafcce2019-01-09 09:16:05 -07009273 }
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08009274
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01009275 WARN_ON_ONCE(ctx->buf_data);
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08009276
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01009277 ctx->buf_data = data;
9278 if (ret)
9279 __io_sqe_buffers_unregister(ctx);
9280 else
9281 io_rsrc_node_switch(ctx, NULL);
Jens Axboeedafcce2019-01-09 09:16:05 -07009282 return ret;
9283}
9284
Pavel Begunkov634d00d2021-04-25 14:32:26 +01009285static int __io_sqe_buffers_update(struct io_ring_ctx *ctx,
9286 struct io_uring_rsrc_update2 *up,
9287 unsigned int nr_args)
9288{
9289 u64 __user *tags = u64_to_user_ptr(up->tags);
9290 struct iovec iov, __user *iovs = u64_to_user_ptr(up->data);
Pavel Begunkov634d00d2021-04-25 14:32:26 +01009291 struct page *last_hpage = NULL;
9292 bool needs_switch = false;
9293 __u32 done;
9294 int i, err;
9295
9296 if (!ctx->buf_data)
9297 return -ENXIO;
9298 if (up->offset + nr_args > ctx->nr_user_bufs)
9299 return -EINVAL;
9300
9301 for (done = 0; done < nr_args; done++) {
Pavel Begunkov0b8c0e72021-04-26 15:17:38 +01009302 struct io_mapped_ubuf *imu;
9303 int offset = up->offset + done;
Pavel Begunkov634d00d2021-04-25 14:32:26 +01009304 u64 tag = 0;
9305
9306 err = io_copy_iov(ctx, &iov, iovs, done);
9307 if (err)
9308 break;
9309 if (tags && copy_from_user(&tag, &tags[done], sizeof(tag))) {
9310 err = -EFAULT;
9311 break;
9312 }
Pavel Begunkov0b8c0e72021-04-26 15:17:38 +01009313 err = io_buffer_validate(&iov);
9314 if (err)
9315 break;
Colin Ian Kingcf3770e2021-04-29 11:46:02 +01009316 if (!iov.iov_base && tag) {
9317 err = -EINVAL;
9318 break;
9319 }
Pavel Begunkov0b8c0e72021-04-26 15:17:38 +01009320 err = io_sqe_buffer_register(ctx, &iov, &imu, &last_hpage);
9321 if (err)
9322 break;
Pavel Begunkov634d00d2021-04-25 14:32:26 +01009323
Pavel Begunkov0b8c0e72021-04-26 15:17:38 +01009324 i = array_index_nospec(offset, ctx->nr_user_bufs);
Pavel Begunkov62248432021-04-28 13:11:29 +01009325 if (ctx->user_bufs[i] != ctx->dummy_ubuf) {
Pavel Begunkovb8ed0f72022-04-07 14:05:05 +01009326 err = io_queue_rsrc_removal(ctx->buf_data, i,
Pavel Begunkov0b8c0e72021-04-26 15:17:38 +01009327 ctx->rsrc_node, ctx->user_bufs[i]);
9328 if (unlikely(err)) {
9329 io_buffer_unmap(ctx, &imu);
Pavel Begunkov634d00d2021-04-25 14:32:26 +01009330 break;
Pavel Begunkov0b8c0e72021-04-26 15:17:38 +01009331 }
Pavel Begunkov634d00d2021-04-25 14:32:26 +01009332 ctx->user_bufs[i] = NULL;
9333 needs_switch = true;
9334 }
9335
Pavel Begunkov0b8c0e72021-04-26 15:17:38 +01009336 ctx->user_bufs[i] = imu;
Pavel Begunkov2d091d62021-06-14 02:36:21 +01009337 *io_get_tag_slot(ctx->buf_data, offset) = tag;
Pavel Begunkov634d00d2021-04-25 14:32:26 +01009338 }
9339
9340 if (needs_switch)
9341 io_rsrc_node_switch(ctx, ctx->buf_data);
9342 return done ? done : err;
9343}
9344
Jens Axboe9b402842019-04-11 11:45:41 -06009345static int io_eventfd_register(struct io_ring_ctx *ctx, void __user *arg)
9346{
9347 __s32 __user *fds = arg;
9348 int fd;
9349
9350 if (ctx->cq_ev_fd)
9351 return -EBUSY;
9352
9353 if (copy_from_user(&fd, fds, sizeof(*fds)))
9354 return -EFAULT;
9355
9356 ctx->cq_ev_fd = eventfd_ctx_fdget(fd);
9357 if (IS_ERR(ctx->cq_ev_fd)) {
9358 int ret = PTR_ERR(ctx->cq_ev_fd);
Pavel Begunkovfe7e3252021-06-24 15:09:57 +01009359
Jens Axboe9b402842019-04-11 11:45:41 -06009360 ctx->cq_ev_fd = NULL;
9361 return ret;
9362 }
9363
9364 return 0;
9365}
9366
9367static int io_eventfd_unregister(struct io_ring_ctx *ctx)
9368{
9369 if (ctx->cq_ev_fd) {
9370 eventfd_ctx_put(ctx->cq_ev_fd);
9371 ctx->cq_ev_fd = NULL;
9372 return 0;
9373 }
9374
9375 return -ENXIO;
9376}
9377
Jens Axboe5a2e7452020-02-23 16:23:11 -07009378static void io_destroy_buffers(struct io_ring_ctx *ctx)
9379{
Jens Axboe9e15c3a2021-03-13 12:29:43 -07009380 struct io_buffer *buf;
9381 unsigned long index;
9382
Ye Bin2d447d32021-11-22 10:47:37 +08009383 xa_for_each(&ctx->io_buffers, index, buf)
Jens Axboe9e15c3a2021-03-13 12:29:43 -07009384 __io_remove_buffers(ctx, buf, index, -1U);
Jens Axboe5a2e7452020-02-23 16:23:11 -07009385}
9386
Pavel Begunkov72558342021-08-09 20:18:09 +01009387static void io_req_cache_free(struct list_head *list)
Jens Axboe1b4c3512021-02-10 00:03:19 +00009388{
Jens Axboe68e68ee2021-02-13 09:00:02 -07009389 struct io_kiocb *req, *nxt;
Jens Axboe1b4c3512021-02-10 00:03:19 +00009390
Pavel Begunkovbb943b82021-08-09 20:18:10 +01009391 list_for_each_entry_safe(req, nxt, list, inflight_entry) {
9392 list_del(&req->inflight_entry);
Jens Axboe1b4c3512021-02-10 00:03:19 +00009393 kmem_cache_free(req_cachep, req);
9394 }
9395}
9396
Jens Axboe4010fec2021-02-27 15:04:18 -07009397static void io_req_caches_free(struct io_ring_ctx *ctx)
Jens Axboe2b188cc2019-01-07 10:46:33 -07009398{
Pavel Begunkovcd0ca2e2021-08-09 20:18:11 +01009399 struct io_submit_state *state = &ctx->submit_state;
Pavel Begunkovbf019da2021-02-10 00:03:17 +00009400
Jens Axboe9a4fdbd2021-02-13 09:09:44 -07009401 mutex_lock(&ctx->uring_lock);
9402
Pavel Begunkovcd0ca2e2021-08-09 20:18:11 +01009403 if (state->free_reqs) {
9404 kmem_cache_free_bulk(req_cachep, state->free_reqs, state->reqs);
9405 state->free_reqs = 0;
Pavel Begunkov8e5c66c2021-02-22 11:45:55 +00009406 }
Jens Axboe9a4fdbd2021-02-13 09:09:44 -07009407
Pavel Begunkovcd0ca2e2021-08-09 20:18:11 +01009408 io_flush_cached_locked_reqs(ctx, state);
9409 io_req_cache_free(&state->free_list);
Jens Axboe9a4fdbd2021-02-13 09:09:44 -07009410 mutex_unlock(&ctx->uring_lock);
9411}
9412
Pavel Begunkov43597aa2021-08-10 02:44:23 +01009413static void io_wait_rsrc_data(struct io_rsrc_data *data)
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01009414{
Pavel Begunkov43597aa2021-08-10 02:44:23 +01009415 if (data && !atomic_dec_and_test(&data->refs))
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01009416 wait_for_completion(&data->done);
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01009417}
9418
Jens Axboe2b188cc2019-01-07 10:46:33 -07009419static void io_ring_ctx_free(struct io_ring_ctx *ctx)
9420{
Jens Axboe37d1e2e2021-02-17 21:03:43 -07009421 io_sq_thread_finish(ctx);
Jens Axboe2aede0e2020-09-14 10:45:53 -06009422
Pavel Begunkov43597aa2021-08-10 02:44:23 +01009423 /* __io_rsrc_put_work() may need uring_lock to progress, wait w/o it */
9424 io_wait_rsrc_data(ctx->buf_data);
9425 io_wait_rsrc_data(ctx->file_data);
9426
Hao Xu8bad28d2021-02-19 17:19:36 +08009427 mutex_lock(&ctx->uring_lock);
Pavel Begunkov43597aa2021-08-10 02:44:23 +01009428 if (ctx->buf_data)
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01009429 __io_sqe_buffers_unregister(ctx);
Pavel Begunkov43597aa2021-08-10 02:44:23 +01009430 if (ctx->file_data)
Pavel Begunkov08480402021-04-13 02:58:38 +01009431 __io_sqe_files_unregister(ctx);
Pavel Begunkovc4ea0602021-04-01 15:43:58 +01009432 if (ctx->rings)
9433 __io_cqring_overflow_flush(ctx, true);
Hao Xu8bad28d2021-02-19 17:19:36 +08009434 mutex_unlock(&ctx->uring_lock);
Jens Axboe9b402842019-04-11 11:45:41 -06009435 io_eventfd_unregister(ctx);
Jens Axboe5a2e7452020-02-23 16:23:11 -07009436 io_destroy_buffers(ctx);
Pavel Begunkov07db2982021-04-20 12:03:32 +01009437 if (ctx->sq_creds)
9438 put_cred(ctx->sq_creds);
Jens Axboedef596e2019-01-09 08:59:42 -07009439
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01009440 /* there are no registered resources left, nobody uses it */
9441 if (ctx->rsrc_node)
9442 io_rsrc_node_destroy(ctx->rsrc_node);
Pavel Begunkov8dd03af2021-03-19 17:22:36 +00009443 if (ctx->rsrc_backup_node)
Pavel Begunkovb895c9a2021-04-01 15:43:40 +01009444 io_rsrc_node_destroy(ctx->rsrc_backup_node);
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01009445 flush_delayed_work(&ctx->rsrc_put_work);
9446
9447 WARN_ON_ONCE(!list_empty(&ctx->rsrc_ref_list));
9448 WARN_ON_ONCE(!llist_empty(&ctx->rsrc_put_llist));
Jens Axboe2b188cc2019-01-07 10:46:33 -07009449
9450#if defined(CONFIG_UNIX)
Eric Biggers355e8d22019-06-12 14:58:43 -07009451 if (ctx->ring_sock) {
9452 ctx->ring_sock->file = NULL; /* so that iput() is called */
Jens Axboe2b188cc2019-01-07 10:46:33 -07009453 sock_release(ctx->ring_sock);
Eric Biggers355e8d22019-06-12 14:58:43 -07009454 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07009455#endif
Pavel Begunkovef9dd632021-08-28 19:54:38 -06009456 WARN_ON_ONCE(!list_empty(&ctx->ltimeout_list));
Jens Axboe2b188cc2019-01-07 10:46:33 -07009457
Pavel Begunkovcd148d42022-10-16 22:42:55 +01009458 if (ctx->mm_account) {
9459 mmdrop(ctx->mm_account);
9460 ctx->mm_account = NULL;
9461 }
9462
Hristo Venev75b28af2019-08-26 17:23:46 +00009463 io_mem_free(ctx->rings);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009464 io_mem_free(ctx->sq_sqes);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009465
9466 percpu_ref_exit(&ctx->refs);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009467 free_uid(ctx->user);
Jens Axboe4010fec2021-02-27 15:04:18 -07009468 io_req_caches_free(ctx);
Jens Axboee9418942021-02-19 12:33:30 -07009469 if (ctx->hash_map)
9470 io_wq_put_hash(ctx->hash_map);
Jens Axboe78076bb2019-12-04 19:56:40 -07009471 kfree(ctx->cancel_hash);
Pavel Begunkov62248432021-04-28 13:11:29 +01009472 kfree(ctx->dummy_ubuf);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009473 kfree(ctx);
9474}
9475
9476static __poll_t io_uring_poll(struct file *file, poll_table *wait)
9477{
9478 struct io_ring_ctx *ctx = file->private_data;
9479 __poll_t mask = 0;
9480
Pavel Begunkov311997b2021-06-14 23:37:28 +01009481 poll_wait(file, &ctx->poll_wait, wait);
Stefan Bühler4f7067c2019-04-24 23:54:17 +02009482 /*
9483 * synchronizes with barrier from wq_has_sleeper call in
9484 * io_commit_cqring
9485 */
Jens Axboe2b188cc2019-01-07 10:46:33 -07009486 smp_rmb();
Jens Axboe90554202020-09-03 12:12:41 -06009487 if (!io_sqring_full(ctx))
Jens Axboe2b188cc2019-01-07 10:46:33 -07009488 mask |= EPOLLOUT | EPOLLWRNORM;
Hao Xued670c32021-02-05 16:34:21 +08009489
9490 /*
9491 * Don't flush cqring overflow list here, just do a simple check.
9492 * Otherwise there could possible be ABBA deadlock:
9493 * CPU0 CPU1
9494 * ---- ----
9495 * lock(&ctx->uring_lock);
9496 * lock(&ep->mtx);
9497 * lock(&ctx->uring_lock);
9498 * lock(&ep->mtx);
9499 *
9500 * Users may get EPOLLIN meanwhile seeing nothing in cqring, this
9501 * pushs them to do the flush.
9502 */
Pavel Begunkov5ed7a372021-06-14 23:37:27 +01009503 if (io_cqring_events(ctx) || test_bit(0, &ctx->check_cq_overflow))
Jens Axboe2b188cc2019-01-07 10:46:33 -07009504 mask |= EPOLLIN | EPOLLRDNORM;
9505
9506 return mask;
9507}
9508
Yejune Deng0bead8c2020-12-24 11:02:20 +08009509static int io_unregister_personality(struct io_ring_ctx *ctx, unsigned id)
Jens Axboe071698e2020-01-28 10:04:42 -07009510{
Jens Axboe4379bf82021-02-15 13:40:22 -07009511 const struct cred *creds;
Jens Axboe071698e2020-01-28 10:04:42 -07009512
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +00009513 creds = xa_erase(&ctx->personalities, id);
Jens Axboe4379bf82021-02-15 13:40:22 -07009514 if (creds) {
9515 put_cred(creds);
Yejune Deng0bead8c2020-12-24 11:02:20 +08009516 return 0;
Jens Axboe1e6fa522020-10-15 08:46:24 -06009517 }
Yejune Deng0bead8c2020-12-24 11:02:20 +08009518
9519 return -EINVAL;
9520}
9521
Pavel Begunkovd56d9382021-03-06 11:02:13 +00009522struct io_tctx_exit {
9523 struct callback_head task_work;
9524 struct completion completion;
Pavel Begunkovbaf186c2021-03-06 11:02:15 +00009525 struct io_ring_ctx *ctx;
Pavel Begunkovd56d9382021-03-06 11:02:13 +00009526};
9527
9528static void io_tctx_exit_cb(struct callback_head *cb)
9529{
9530 struct io_uring_task *tctx = current->io_uring;
9531 struct io_tctx_exit *work;
9532
9533 work = container_of(cb, struct io_tctx_exit, task_work);
9534 /*
9535 * When @in_idle, we're in cancellation and it's racy to remove the
9536 * node. It'll be removed by the end of cancellation, just ignore it.
Harshit Mogalapallif8955112022-12-06 01:38:32 -08009537 * tctx can be NULL if the queueing of this task_work raced with
9538 * work cancelation off the exec path.
Pavel Begunkovd56d9382021-03-06 11:02:13 +00009539 */
Harshit Mogalapallif8955112022-12-06 01:38:32 -08009540 if (tctx && !atomic_read(&tctx->in_idle))
Pavel Begunkoveef51da2021-06-14 02:36:15 +01009541 io_uring_del_tctx_node((unsigned long)work->ctx);
Pavel Begunkovd56d9382021-03-06 11:02:13 +00009542 complete(&work->completion);
9543}
9544
Pavel Begunkov28090c12021-04-25 23:34:45 +01009545static bool io_cancel_ctx_cb(struct io_wq_work *work, void *data)
9546{
9547 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
9548
9549 return req->ctx == data;
9550}
9551
Jens Axboe85faa7b2020-04-09 18:14:00 -06009552static void io_ring_exit_work(struct work_struct *work)
9553{
Pavel Begunkovd56d9382021-03-06 11:02:13 +00009554 struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx, exit_work);
Pavel Begunkovb5bb3a22021-03-06 11:02:16 +00009555 unsigned long timeout = jiffies + HZ * 60 * 5;
Pavel Begunkov58d3be22021-08-09 13:04:17 +01009556 unsigned long interval = HZ / 20;
Pavel Begunkovd56d9382021-03-06 11:02:13 +00009557 struct io_tctx_exit exit;
9558 struct io_tctx_node *node;
9559 int ret;
Jens Axboe85faa7b2020-04-09 18:14:00 -06009560
Jens Axboe56952e92020-06-17 15:00:04 -06009561 /*
9562 * If we're doing polled IO and end up having requests being
9563 * submitted async (out-of-line), then completions can come in while
9564 * we're waiting for refs to drop. We need to reap these manually,
9565 * as nobody else will be looking for them.
9566 */
Pavel Begunkovb2edc0a2020-07-07 16:36:22 +03009567 do {
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01009568 io_uring_try_cancel_requests(ctx, NULL, true);
Pavel Begunkov28090c12021-04-25 23:34:45 +01009569 if (ctx->sq_data) {
9570 struct io_sq_data *sqd = ctx->sq_data;
9571 struct task_struct *tsk;
9572
9573 io_sq_thread_park(sqd);
9574 tsk = sqd->thread;
9575 if (tsk && tsk->io_uring && tsk->io_uring->io_wq)
9576 io_wq_cancel_cb(tsk->io_uring->io_wq,
9577 io_cancel_ctx_cb, ctx, true);
9578 io_sq_thread_unpark(sqd);
9579 }
Pavel Begunkovb5bb3a22021-03-06 11:02:16 +00009580
Pavel Begunkov58d3be22021-08-09 13:04:17 +01009581 if (WARN_ON_ONCE(time_after(jiffies, timeout))) {
9582 /* there is little hope left, don't run it too often */
9583 interval = HZ * 60;
9584 }
9585 } while (!wait_for_completion_timeout(&ctx->ref_comp, interval));
Pavel Begunkovd56d9382021-03-06 11:02:13 +00009586
Pavel Begunkov7f006512021-04-14 13:38:34 +01009587 init_completion(&exit.completion);
9588 init_task_work(&exit.task_work, io_tctx_exit_cb);
9589 exit.ctx = ctx;
Pavel Begunkov89b50662021-04-01 15:43:50 +01009590 /*
9591 * Some may use context even when all refs and requests have been put,
9592 * and they are free to do so while still holding uring_lock or
Pavel Begunkov5b0a6ac2021-06-30 21:54:04 +01009593 * completion_lock, see io_req_task_submit(). Apart from other work,
Pavel Begunkov89b50662021-04-01 15:43:50 +01009594 * this lock/unlock section also waits them to finish.
9595 */
Pavel Begunkovd56d9382021-03-06 11:02:13 +00009596 mutex_lock(&ctx->uring_lock);
9597 while (!list_empty(&ctx->tctx_list)) {
Pavel Begunkovb5bb3a22021-03-06 11:02:16 +00009598 WARN_ON_ONCE(time_after(jiffies, timeout));
9599
Pavel Begunkovd56d9382021-03-06 11:02:13 +00009600 node = list_first_entry(&ctx->tctx_list, struct io_tctx_node,
9601 ctx_node);
Pavel Begunkov7f006512021-04-14 13:38:34 +01009602 /* don't spin on a single task if cancellation failed */
9603 list_rotate_left(&ctx->tctx_list);
Pavel Begunkovd56d9382021-03-06 11:02:13 +00009604 ret = task_work_add(node->task, &exit.task_work, TWA_SIGNAL);
9605 if (WARN_ON_ONCE(ret))
9606 continue;
9607 wake_up_process(node->task);
9608
9609 mutex_unlock(&ctx->uring_lock);
9610 wait_for_completion(&exit.completion);
Pavel Begunkovd56d9382021-03-06 11:02:13 +00009611 mutex_lock(&ctx->uring_lock);
9612 }
9613 mutex_unlock(&ctx->uring_lock);
Jens Axboe79ebeae2021-08-10 15:18:27 -06009614 spin_lock(&ctx->completion_lock);
9615 spin_unlock(&ctx->completion_lock);
Pavel Begunkovd56d9382021-03-06 11:02:13 +00009616
Jens Axboe85faa7b2020-04-09 18:14:00 -06009617 io_ring_ctx_free(ctx);
9618}
9619
Pavel Begunkov80c4cbd2021-03-25 18:32:43 +00009620/* Returns true if we found and killed one or more timeouts */
9621static bool io_kill_timeouts(struct io_ring_ctx *ctx, struct task_struct *tsk,
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01009622 bool cancel_all)
Pavel Begunkov80c4cbd2021-03-25 18:32:43 +00009623{
9624 struct io_kiocb *req, *tmp;
9625 int canceled = 0;
9626
Jens Axboe79ebeae2021-08-10 15:18:27 -06009627 spin_lock(&ctx->completion_lock);
9628 spin_lock_irq(&ctx->timeout_lock);
Pavel Begunkov80c4cbd2021-03-25 18:32:43 +00009629 list_for_each_entry_safe(req, tmp, &ctx->timeout_list, timeout.list) {
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01009630 if (io_match_task(req, tsk, cancel_all)) {
Pavel Begunkov80c4cbd2021-03-25 18:32:43 +00009631 io_kill_timeout(req, -ECANCELED);
9632 canceled++;
9633 }
9634 }
Jens Axboe79ebeae2021-08-10 15:18:27 -06009635 spin_unlock_irq(&ctx->timeout_lock);
Pavel Begunkov51520422021-03-29 11:39:29 +01009636 if (canceled != 0)
9637 io_commit_cqring(ctx);
Jens Axboe79ebeae2021-08-10 15:18:27 -06009638 spin_unlock(&ctx->completion_lock);
Pavel Begunkov80c4cbd2021-03-25 18:32:43 +00009639 if (canceled != 0)
9640 io_cqring_ev_posted(ctx);
9641 return canceled != 0;
9642}
9643
Jens Axboe2b188cc2019-01-07 10:46:33 -07009644static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
9645{
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +00009646 unsigned long index;
9647 struct creds *creds;
9648
Jens Axboe2b188cc2019-01-07 10:46:33 -07009649 mutex_lock(&ctx->uring_lock);
9650 percpu_ref_kill(&ctx->refs);
Pavel Begunkov634578f2020-12-06 22:22:44 +00009651 if (ctx->rings)
Pavel Begunkov6c2450a2021-02-23 12:40:22 +00009652 __io_cqring_overflow_flush(ctx, true);
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +00009653 xa_for_each(&ctx->personalities, index, creds)
9654 io_unregister_personality(ctx, index);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009655 mutex_unlock(&ctx->uring_lock);
9656
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01009657 io_kill_timeouts(ctx, NULL, true);
9658 io_poll_remove_all(ctx, NULL, true);
Jens Axboe561fb042019-10-24 07:25:42 -06009659
Jens Axboe15dff282019-11-13 09:09:23 -07009660 /* if we failed setting up the ctx, we might not have any rings */
Pavel Begunkovb2edc0a2020-07-07 16:36:22 +03009661 io_iopoll_try_reap_events(ctx);
Jens Axboe309fc032020-07-10 09:13:34 -06009662
Jens Axboe85faa7b2020-04-09 18:14:00 -06009663 INIT_WORK(&ctx->exit_work, io_ring_exit_work);
Jens Axboefc666772020-08-19 11:10:51 -06009664 /*
9665 * Use system_unbound_wq to avoid spawning tons of event kworkers
9666 * if we're exiting a ton of rings at the same time. It just adds
9667 * noise and overhead, there's no discernable change in runtime
9668 * over using system_wq.
9669 */
9670 queue_work(system_unbound_wq, &ctx->exit_work);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009671}
9672
9673static int io_uring_release(struct inode *inode, struct file *file)
9674{
9675 struct io_ring_ctx *ctx = file->private_data;
9676
9677 file->private_data = NULL;
9678 io_ring_ctx_wait_and_kill(ctx);
9679 return 0;
9680}
9681
Pavel Begunkovf6edbab2020-11-06 13:00:26 +00009682struct io_task_cancel {
9683 struct task_struct *task;
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01009684 bool all;
Pavel Begunkovf6edbab2020-11-06 13:00:26 +00009685};
Pavel Begunkov67c4d9e2020-06-15 10:24:05 +03009686
Pavel Begunkovf6edbab2020-11-06 13:00:26 +00009687static bool io_cancel_task_cb(struct io_wq_work *work, void *data)
Jens Axboeb711d4e2020-08-16 08:23:05 -07009688{
Pavel Begunkov9a472ef2020-11-05 22:31:37 +00009689 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
Pavel Begunkovf6edbab2020-11-06 13:00:26 +00009690 struct io_task_cancel *cancel = data;
Pavel Begunkov9a472ef2020-11-05 22:31:37 +00009691
Pavel Begunkov1c939a52021-11-26 14:38:15 +00009692 return io_match_task_safe(req, cancel->task, cancel->all);
Jens Axboeb711d4e2020-08-16 08:23:05 -07009693}
9694
Pavel Begunkove1915f72021-03-11 23:29:35 +00009695static bool io_cancel_defer_files(struct io_ring_ctx *ctx,
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01009696 struct task_struct *task, bool cancel_all)
Pavel Begunkovb7ddce32020-09-06 00:45:14 +03009697{
Pavel Begunkove1915f72021-03-11 23:29:35 +00009698 struct io_defer_entry *de;
Pavel Begunkovb7ddce32020-09-06 00:45:14 +03009699 LIST_HEAD(list);
9700
Jens Axboe79ebeae2021-08-10 15:18:27 -06009701 spin_lock(&ctx->completion_lock);
Pavel Begunkovb7ddce32020-09-06 00:45:14 +03009702 list_for_each_entry_reverse(de, &ctx->defer_list, list) {
Pavel Begunkov1c939a52021-11-26 14:38:15 +00009703 if (io_match_task_safe(de->req, task, cancel_all)) {
Pavel Begunkovb7ddce32020-09-06 00:45:14 +03009704 list_cut_position(&list, &ctx->defer_list, &de->list);
9705 break;
9706 }
9707 }
Jens Axboe79ebeae2021-08-10 15:18:27 -06009708 spin_unlock(&ctx->completion_lock);
Pavel Begunkove1915f72021-03-11 23:29:35 +00009709 if (list_empty(&list))
9710 return false;
Pavel Begunkovb7ddce32020-09-06 00:45:14 +03009711
9712 while (!list_empty(&list)) {
9713 de = list_first_entry(&list, struct io_defer_entry, list);
9714 list_del_init(&de->list);
Pavel Begunkovf41db2732021-02-28 22:35:12 +00009715 io_req_complete_failed(de->req, -ECANCELED);
Pavel Begunkovb7ddce32020-09-06 00:45:14 +03009716 kfree(de);
9717 }
Pavel Begunkove1915f72021-03-11 23:29:35 +00009718 return true;
Pavel Begunkovb7ddce32020-09-06 00:45:14 +03009719}
9720
Pavel Begunkov1b007642021-03-06 11:02:17 +00009721static bool io_uring_try_cancel_iowq(struct io_ring_ctx *ctx)
9722{
9723 struct io_tctx_node *node;
9724 enum io_wq_cancel cret;
9725 bool ret = false;
9726
9727 mutex_lock(&ctx->uring_lock);
9728 list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
9729 struct io_uring_task *tctx = node->task->io_uring;
9730
9731 /*
9732 * io_wq will stay alive while we hold uring_lock, because it's
9733 * killed after ctx nodes, which requires to take the lock.
9734 */
9735 if (!tctx || !tctx->io_wq)
9736 continue;
9737 cret = io_wq_cancel_cb(tctx->io_wq, io_cancel_ctx_cb, ctx, true);
9738 ret |= (cret != IO_WQ_CANCEL_NOTFOUND);
9739 }
9740 mutex_unlock(&ctx->uring_lock);
9741
9742 return ret;
9743}
9744
Pavel Begunkov9936c7c2021-02-04 13:51:56 +00009745static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
9746 struct task_struct *task,
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01009747 bool cancel_all)
Pavel Begunkov9936c7c2021-02-04 13:51:56 +00009748{
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01009749 struct io_task_cancel cancel = { .task = task, .all = cancel_all, };
Pavel Begunkov1b007642021-03-06 11:02:17 +00009750 struct io_uring_task *tctx = task ? task->io_uring : NULL;
Pavel Begunkov9936c7c2021-02-04 13:51:56 +00009751
9752 while (1) {
9753 enum io_wq_cancel cret;
9754 bool ret = false;
9755
Pavel Begunkov1b007642021-03-06 11:02:17 +00009756 if (!task) {
9757 ret |= io_uring_try_cancel_iowq(ctx);
9758 } else if (tctx && tctx->io_wq) {
9759 /*
9760 * Cancels requests of all rings, not only @ctx, but
9761 * it's fine as the task is in exit/exec.
9762 */
Jens Axboe5aa75ed2021-02-16 12:56:50 -07009763 cret = io_wq_cancel_cb(tctx->io_wq, io_cancel_task_cb,
Pavel Begunkov9936c7c2021-02-04 13:51:56 +00009764 &cancel, true);
9765 ret |= (cret != IO_WQ_CANCEL_NOTFOUND);
9766 }
9767
9768 /* SQPOLL thread does its own polling */
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01009769 if ((!(ctx->flags & IORING_SETUP_SQPOLL) && cancel_all) ||
Jens Axboed052d1d2021-03-11 10:49:20 -07009770 (ctx->sq_data && ctx->sq_data->thread == current)) {
Pavel Begunkov9936c7c2021-02-04 13:51:56 +00009771 while (!list_empty_careful(&ctx->iopoll_list)) {
9772 io_iopoll_try_reap_events(ctx);
9773 ret = true;
9774 }
9775 }
9776
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01009777 ret |= io_cancel_defer_files(ctx, task, cancel_all);
9778 ret |= io_poll_remove_all(ctx, task, cancel_all);
9779 ret |= io_kill_timeouts(ctx, task, cancel_all);
Pavel Begunkove5dc4802021-06-26 21:40:46 +01009780 if (task)
9781 ret |= io_run_task_work();
Pavel Begunkov9936c7c2021-02-04 13:51:56 +00009782 if (!ret)
9783 break;
9784 cond_resched();
9785 }
9786}
9787
Pavel Begunkoveef51da2021-06-14 02:36:15 +01009788static int __io_uring_add_tctx_node(struct io_ring_ctx *ctx)
Jens Axboe0f212202020-09-13 13:09:39 -06009789{
Matthew Wilcox (Oracle)236434c2020-10-09 13:49:52 +01009790 struct io_uring_task *tctx = current->io_uring;
Pavel Begunkov13bf43f2021-03-06 11:02:12 +00009791 struct io_tctx_node *node;
Pavel Begunkova528b042020-12-21 18:34:04 +00009792 int ret;
Matthew Wilcox (Oracle)236434c2020-10-09 13:49:52 +01009793
9794 if (unlikely(!tctx)) {
Jens Axboe5aa75ed2021-02-16 12:56:50 -07009795 ret = io_uring_alloc_task_context(current, ctx);
Jens Axboe0f212202020-09-13 13:09:39 -06009796 if (unlikely(ret))
9797 return ret;
Pavel Begunkove139a1e2021-10-19 23:43:46 +01009798
Matthew Wilcox (Oracle)236434c2020-10-09 13:49:52 +01009799 tctx = current->io_uring;
Pavel Begunkove139a1e2021-10-19 23:43:46 +01009800 if (ctx->iowq_limits_set) {
9801 unsigned int limits[2] = { ctx->iowq_limits[0],
9802 ctx->iowq_limits[1], };
9803
9804 ret = io_wq_max_workers(tctx->io_wq, limits);
9805 if (ret)
9806 return ret;
9807 }
Jens Axboe0f212202020-09-13 13:09:39 -06009808 }
Pavel Begunkovcf27f3b2021-03-19 17:22:31 +00009809 if (!xa_load(&tctx->xa, (unsigned long)ctx)) {
9810 node = kmalloc(sizeof(*node), GFP_KERNEL);
9811 if (!node)
9812 return -ENOMEM;
9813 node->ctx = ctx;
9814 node->task = current;
Jens Axboe0f212202020-09-13 13:09:39 -06009815
Pavel Begunkovcf27f3b2021-03-19 17:22:31 +00009816 ret = xa_err(xa_store(&tctx->xa, (unsigned long)ctx,
9817 node, GFP_KERNEL));
9818 if (ret) {
9819 kfree(node);
9820 return ret;
Jens Axboe0f212202020-09-13 13:09:39 -06009821 }
Pavel Begunkovcf27f3b2021-03-19 17:22:31 +00009822
9823 mutex_lock(&ctx->uring_lock);
9824 list_add(&node->ctx_node, &ctx->tctx_list);
9825 mutex_unlock(&ctx->uring_lock);
Jens Axboe0f212202020-09-13 13:09:39 -06009826 }
Pavel Begunkovcf27f3b2021-03-19 17:22:31 +00009827 tctx->last = ctx;
Jens Axboe0f212202020-09-13 13:09:39 -06009828 return 0;
9829}
9830
9831/*
Pavel Begunkovcf27f3b2021-03-19 17:22:31 +00009832 * Note that this task has used io_uring. We use it for cancelation purposes.
9833 */
Pavel Begunkoveef51da2021-06-14 02:36:15 +01009834static inline int io_uring_add_tctx_node(struct io_ring_ctx *ctx)
Pavel Begunkovcf27f3b2021-03-19 17:22:31 +00009835{
9836 struct io_uring_task *tctx = current->io_uring;
9837
9838 if (likely(tctx && tctx->last == ctx))
9839 return 0;
Pavel Begunkoveef51da2021-06-14 02:36:15 +01009840 return __io_uring_add_tctx_node(ctx);
Pavel Begunkovcf27f3b2021-03-19 17:22:31 +00009841}
9842
9843/*
Jens Axboe0f212202020-09-13 13:09:39 -06009844 * Remove this io_uring_file -> task mapping.
9845 */
Pavel Begunkoveef51da2021-06-14 02:36:15 +01009846static void io_uring_del_tctx_node(unsigned long index)
Jens Axboe0f212202020-09-13 13:09:39 -06009847{
9848 struct io_uring_task *tctx = current->io_uring;
Pavel Begunkov13bf43f2021-03-06 11:02:12 +00009849 struct io_tctx_node *node;
Pavel Begunkov29412672021-03-06 11:02:11 +00009850
Pavel Begunkoveebd2e32021-03-06 11:02:14 +00009851 if (!tctx)
9852 return;
Pavel Begunkov13bf43f2021-03-06 11:02:12 +00009853 node = xa_erase(&tctx->xa, index);
9854 if (!node)
Pavel Begunkov29412672021-03-06 11:02:11 +00009855 return;
Jens Axboe0f212202020-09-13 13:09:39 -06009856
Pavel Begunkov13bf43f2021-03-06 11:02:12 +00009857 WARN_ON_ONCE(current != node->task);
9858 WARN_ON_ONCE(list_empty(&node->ctx_node));
9859
9860 mutex_lock(&node->ctx->uring_lock);
9861 list_del(&node->ctx_node);
9862 mutex_unlock(&node->ctx->uring_lock);
9863
Pavel Begunkovbaf186c2021-03-06 11:02:15 +00009864 if (tctx->last == node->ctx)
Jens Axboe0f212202020-09-13 13:09:39 -06009865 tctx->last = NULL;
Pavel Begunkov13bf43f2021-03-06 11:02:12 +00009866 kfree(node);
Jens Axboe0f212202020-09-13 13:09:39 -06009867}
9868
Pavel Begunkov8452d4a2021-02-27 11:16:46 +00009869static void io_uring_clean_tctx(struct io_uring_task *tctx)
Pavel Begunkovde7f1d92021-01-04 20:43:29 +00009870{
Pavel Begunkovba5ef6d2021-05-20 13:21:20 +01009871 struct io_wq *wq = tctx->io_wq;
Pavel Begunkov13bf43f2021-03-06 11:02:12 +00009872 struct io_tctx_node *node;
Pavel Begunkovde7f1d92021-01-04 20:43:29 +00009873 unsigned long index;
9874
Jens Axboe8bab4c02021-09-24 07:12:27 -06009875 xa_for_each(&tctx->xa, index, node) {
Pavel Begunkoveef51da2021-06-14 02:36:15 +01009876 io_uring_del_tctx_node(index);
Jens Axboe8bab4c02021-09-24 07:12:27 -06009877 cond_resched();
9878 }
Marco Elverb16ef422021-05-27 11:25:48 +02009879 if (wq) {
9880 /*
9881 * Must be after io_uring_del_task_file() (removes nodes under
9882 * uring_lock) to avoid race with io_uring_try_cancel_iowq().
9883 */
Pavel Begunkovba5ef6d2021-05-20 13:21:20 +01009884 io_wq_put_and_exit(wq);
Pavel Begunkovdadebc32021-08-23 13:30:44 +01009885 tctx->io_wq = NULL;
Marco Elverb16ef422021-05-27 11:25:48 +02009886 }
Pavel Begunkovde7f1d92021-01-04 20:43:29 +00009887}
9888
Pavel Begunkov3f48cf12021-04-11 01:46:27 +01009889static s64 tctx_inflight(struct io_uring_task *tctx, bool tracked)
Pavel Begunkov521d6a72021-03-11 23:29:38 +00009890{
Pavel Begunkov3f48cf12021-04-11 01:46:27 +01009891 if (tracked)
9892 return atomic_read(&tctx->inflight_tracked);
Pavel Begunkov521d6a72021-03-11 23:29:38 +00009893 return percpu_counter_sum(&tctx->inflight);
9894}
9895
Pavel Begunkov78cc6872021-06-14 02:36:23 +01009896/*
9897 * Find any io_uring ctx that this task has registered or done IO on, and cancel
Jens Axboe8e129762021-12-09 08:54:29 -07009898 * requests. @sqd should be not-null IFF it's an SQPOLL thread cancellation.
Pavel Begunkov78cc6872021-06-14 02:36:23 +01009899 */
9900static void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd)
Pavel Begunkov0e9ddb32021-02-07 22:34:26 +00009901{
Pavel Begunkov521d6a72021-03-11 23:29:38 +00009902 struct io_uring_task *tctx = current->io_uring;
Pavel Begunkov734551d2021-04-18 14:52:09 +01009903 struct io_ring_ctx *ctx;
Jens Axboefdaf0832020-10-30 09:37:30 -06009904 s64 inflight;
Pavel Begunkov0e9ddb32021-02-07 22:34:26 +00009905 DEFINE_WAIT(wait);
Jens Axboefdaf0832020-10-30 09:37:30 -06009906
Pavel Begunkov78cc6872021-06-14 02:36:23 +01009907 WARN_ON_ONCE(sqd && sqd->thread != current);
9908
Palash Oswal6d042ff2021-04-27 18:21:49 +05309909 if (!current->io_uring)
9910 return;
Pavel Begunkov17a91052021-05-23 15:48:39 +01009911 if (tctx->io_wq)
9912 io_wq_exit_start(tctx->io_wq);
9913
Jens Axboefdaf0832020-10-30 09:37:30 -06009914 atomic_inc(&tctx->in_idle);
Jens Axboed8a6df12020-10-15 16:24:45 -06009915 do {
Pavel Begunkove9dbe222021-08-09 13:04:20 +01009916 io_uring_drop_tctx_refs(current);
Jens Axboe0f212202020-09-13 13:09:39 -06009917 /* read completions before cancelations */
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01009918 inflight = tctx_inflight(tctx, !cancel_all);
Jens Axboed8a6df12020-10-15 16:24:45 -06009919 if (!inflight)
9920 break;
Jens Axboe0f212202020-09-13 13:09:39 -06009921
Pavel Begunkov78cc6872021-06-14 02:36:23 +01009922 if (!sqd) {
9923 struct io_tctx_node *node;
9924 unsigned long index;
9925
9926 xa_for_each(&tctx->xa, index, node) {
9927 /* sqpoll task will cancel all its requests */
9928 if (node->ctx->sq_data)
9929 continue;
9930 io_uring_try_cancel_requests(node->ctx, current,
9931 cancel_all);
9932 }
9933 } else {
9934 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
9935 io_uring_try_cancel_requests(ctx, current,
9936 cancel_all);
9937 }
9938
Jens Axboe8e129762021-12-09 08:54:29 -07009939 prepare_to_wait(&tctx->wait, &wait, TASK_INTERRUPTIBLE);
9940 io_run_task_work();
Pavel Begunkove9dbe222021-08-09 13:04:20 +01009941 io_uring_drop_tctx_refs(current);
Jens Axboe8e129762021-12-09 08:54:29 -07009942
Jens Axboe0f212202020-09-13 13:09:39 -06009943 /*
Pavel Begunkova1bb3cd2021-01-26 15:28:26 +00009944 * If we've seen completions, retry without waiting. This
9945 * avoids a race where a completion comes in before we did
9946 * prepare_to_wait().
Jens Axboe0f212202020-09-13 13:09:39 -06009947 */
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01009948 if (inflight == tctx_inflight(tctx, !cancel_all))
Pavel Begunkova1bb3cd2021-01-26 15:28:26 +00009949 schedule();
Pavel Begunkovf57555e2020-12-20 13:21:44 +00009950 finish_wait(&tctx->wait, &wait);
Jens Axboed8a6df12020-10-15 16:24:45 -06009951 } while (1);
Pavel Begunkovde7f1d92021-01-04 20:43:29 +00009952
Pavel Begunkov8452d4a2021-02-27 11:16:46 +00009953 io_uring_clean_tctx(tctx);
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01009954 if (cancel_all) {
Pavel Begunkovb168b1a2022-01-09 00:53:22 +00009955 /*
9956 * We shouldn't run task_works after cancel, so just leave
9957 * ->in_idle set for normal exit.
9958 */
9959 atomic_dec(&tctx->in_idle);
Pavel Begunkov3f48cf12021-04-11 01:46:27 +01009960 /* for exec all current's requests should be gone, kill tctx */
9961 __io_uring_free(current);
9962 }
Pavel Begunkov44e728b2020-06-15 10:24:04 +03009963}
9964
Hao Xuf552a272021-08-12 12:14:35 +08009965void __io_uring_cancel(bool cancel_all)
Pavel Begunkov78cc6872021-06-14 02:36:23 +01009966{
Hao Xuf552a272021-08-12 12:14:35 +08009967 io_uring_cancel_generic(cancel_all, NULL);
Pavel Begunkov78cc6872021-06-14 02:36:23 +01009968}
9969
Roman Penyaev6c5c2402019-11-28 12:53:22 +01009970static void *io_uring_validate_mmap_request(struct file *file,
9971 loff_t pgoff, size_t sz)
Jens Axboe2b188cc2019-01-07 10:46:33 -07009972{
Jens Axboe2b188cc2019-01-07 10:46:33 -07009973 struct io_ring_ctx *ctx = file->private_data;
Roman Penyaev6c5c2402019-11-28 12:53:22 +01009974 loff_t offset = pgoff << PAGE_SHIFT;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009975 struct page *page;
9976 void *ptr;
9977
9978 switch (offset) {
9979 case IORING_OFF_SQ_RING:
Hristo Venev75b28af2019-08-26 17:23:46 +00009980 case IORING_OFF_CQ_RING:
9981 ptr = ctx->rings;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009982 break;
9983 case IORING_OFF_SQES:
9984 ptr = ctx->sq_sqes;
9985 break;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009986 default:
Roman Penyaev6c5c2402019-11-28 12:53:22 +01009987 return ERR_PTR(-EINVAL);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009988 }
9989
9990 page = virt_to_head_page(ptr);
Matthew Wilcox (Oracle)a50b8542019-09-23 15:34:25 -07009991 if (sz > page_size(page))
Roman Penyaev6c5c2402019-11-28 12:53:22 +01009992 return ERR_PTR(-EINVAL);
9993
9994 return ptr;
9995}
9996
9997#ifdef CONFIG_MMU
9998
9999static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
10000{
10001 size_t sz = vma->vm_end - vma->vm_start;
10002 unsigned long pfn;
10003 void *ptr;
10004
10005 ptr = io_uring_validate_mmap_request(file, vma->vm_pgoff, sz);
10006 if (IS_ERR(ptr))
10007 return PTR_ERR(ptr);
Jens Axboe2b188cc2019-01-07 10:46:33 -070010008
10009 pfn = virt_to_phys(ptr) >> PAGE_SHIFT;
10010 return remap_pfn_range(vma, vma->vm_start, pfn, sz, vma->vm_page_prot);
10011}
10012
Roman Penyaev6c5c2402019-11-28 12:53:22 +010010013#else /* !CONFIG_MMU */
10014
10015static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
10016{
10017 return vma->vm_flags & (VM_SHARED | VM_MAYSHARE) ? 0 : -EINVAL;
10018}
10019
10020static unsigned int io_uring_nommu_mmap_capabilities(struct file *file)
10021{
10022 return NOMMU_MAP_DIRECT | NOMMU_MAP_READ | NOMMU_MAP_WRITE;
10023}
10024
10025static unsigned long io_uring_nommu_get_unmapped_area(struct file *file,
10026 unsigned long addr, unsigned long len,
10027 unsigned long pgoff, unsigned long flags)
10028{
10029 void *ptr;
10030
10031 ptr = io_uring_validate_mmap_request(file, pgoff, len);
10032 if (IS_ERR(ptr))
10033 return PTR_ERR(ptr);
10034
10035 return (unsigned long) ptr;
10036}
10037
10038#endif /* !CONFIG_MMU */
10039
Pavel Begunkovd9d05212021-01-08 20:57:25 +000010040static int io_sqpoll_wait_sq(struct io_ring_ctx *ctx)
Jens Axboe90554202020-09-03 12:12:41 -060010041{
10042 DEFINE_WAIT(wait);
10043
10044 do {
10045 if (!io_sqring_full(ctx))
10046 break;
Jens Axboe90554202020-09-03 12:12:41 -060010047 prepare_to_wait(&ctx->sqo_sq_wait, &wait, TASK_INTERRUPTIBLE);
10048
10049 if (!io_sqring_full(ctx))
10050 break;
Jens Axboe90554202020-09-03 12:12:41 -060010051 schedule();
10052 } while (!signal_pending(current));
10053
10054 finish_wait(&ctx->sqo_sq_wait, &wait);
Yang Li51993282021-03-09 14:30:41 +080010055 return 0;
Jens Axboe90554202020-09-03 12:12:41 -060010056}
10057
Hao Xuc73ebb62020-11-03 10:54:37 +080010058static int io_get_ext_arg(unsigned flags, const void __user *argp, size_t *argsz,
10059 struct __kernel_timespec __user **ts,
10060 const sigset_t __user **sig)
10061{
10062 struct io_uring_getevents_arg arg;
10063
10064 /*
10065 * If EXT_ARG isn't set, then we have no timespec and the argp pointer
10066 * is just a pointer to the sigset_t.
10067 */
10068 if (!(flags & IORING_ENTER_EXT_ARG)) {
10069 *sig = (const sigset_t __user *) argp;
10070 *ts = NULL;
10071 return 0;
10072 }
10073
10074 /*
10075 * EXT_ARG is set - ensure we agree on the size of it and copy in our
10076 * timespec and sigset_t pointers if good.
10077 */
10078 if (*argsz != sizeof(arg))
10079 return -EINVAL;
10080 if (copy_from_user(&arg, argp, sizeof(arg)))
10081 return -EFAULT;
Dylan Yudaken99475482022-04-12 09:30:42 -070010082 if (arg.pad)
10083 return -EINVAL;
Hao Xuc73ebb62020-11-03 10:54:37 +080010084 *sig = u64_to_user_ptr(arg.sigmask);
10085 *argsz = arg.sigmask_sz;
10086 *ts = u64_to_user_ptr(arg.ts);
10087 return 0;
10088}
10089
Jens Axboe2b188cc2019-01-07 10:46:33 -070010090SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
Hao Xuc73ebb62020-11-03 10:54:37 +080010091 u32, min_complete, u32, flags, const void __user *, argp,
10092 size_t, argsz)
Jens Axboe2b188cc2019-01-07 10:46:33 -070010093{
10094 struct io_ring_ctx *ctx;
Jens Axboe2b188cc2019-01-07 10:46:33 -070010095 int submitted = 0;
10096 struct fd f;
Pavel Begunkov33f993d2021-03-19 17:22:30 +000010097 long ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -070010098
Jens Axboe4c6e2772020-07-01 11:29:10 -060010099 io_run_task_work();
Jens Axboeb41e9852020-02-17 09:52:41 -070010100
Pavel Begunkov33f993d2021-03-19 17:22:30 +000010101 if (unlikely(flags & ~(IORING_ENTER_GETEVENTS | IORING_ENTER_SQ_WAKEUP |
10102 IORING_ENTER_SQ_WAIT | IORING_ENTER_EXT_ARG)))
Jens Axboe2b188cc2019-01-07 10:46:33 -070010103 return -EINVAL;
10104
10105 f = fdget(fd);
Pavel Begunkov33f993d2021-03-19 17:22:30 +000010106 if (unlikely(!f.file))
Jens Axboe2b188cc2019-01-07 10:46:33 -070010107 return -EBADF;
10108
10109 ret = -EOPNOTSUPP;
Pavel Begunkov33f993d2021-03-19 17:22:30 +000010110 if (unlikely(f.file->f_op != &io_uring_fops))
Jens Axboe2b188cc2019-01-07 10:46:33 -070010111 goto out_fput;
10112
10113 ret = -ENXIO;
10114 ctx = f.file->private_data;
Pavel Begunkov33f993d2021-03-19 17:22:30 +000010115 if (unlikely(!percpu_ref_tryget(&ctx->refs)))
Jens Axboe2b188cc2019-01-07 10:46:33 -070010116 goto out_fput;
10117
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +020010118 ret = -EBADFD;
Pavel Begunkov33f993d2021-03-19 17:22:30 +000010119 if (unlikely(ctx->flags & IORING_SETUP_R_DISABLED))
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +020010120 goto out;
10121
Jens Axboe6c271ce2019-01-10 11:22:30 -070010122 /*
10123 * For SQ polling, the thread will do all submissions and completions.
10124 * Just return the requested submit count, and wake the thread if
10125 * we were asked to.
10126 */
Jens Axboeb2a9ead2019-09-12 14:19:16 -060010127 ret = 0;
Jens Axboe6c271ce2019-01-10 11:22:30 -070010128 if (ctx->flags & IORING_SETUP_SQPOLL) {
Pavel Begunkov90f67362021-08-09 20:18:12 +010010129 io_cqring_overflow_flush(ctx);
Pavel Begunkov89448c42020-12-17 00:24:39 +000010130
Jens Axboe21f96522021-08-14 09:04:40 -060010131 if (unlikely(ctx->sq_data->thread == NULL)) {
10132 ret = -EOWNERDEAD;
Stefan Metzmacher04147482021-03-07 11:54:29 +010010133 goto out;
Jens Axboe21f96522021-08-14 09:04:40 -060010134 }
Jens Axboe6c271ce2019-01-10 11:22:30 -070010135 if (flags & IORING_ENTER_SQ_WAKEUP)
Jens Axboe534ca6d2020-09-02 13:52:19 -060010136 wake_up(&ctx->sq_data->wait);
Pavel Begunkovd9d05212021-01-08 20:57:25 +000010137 if (flags & IORING_ENTER_SQ_WAIT) {
10138 ret = io_sqpoll_wait_sq(ctx);
10139 if (ret)
10140 goto out;
10141 }
Jens Axboe6c271ce2019-01-10 11:22:30 -070010142 submitted = to_submit;
Jens Axboeb2a9ead2019-09-12 14:19:16 -060010143 } else if (to_submit) {
Pavel Begunkoveef51da2021-06-14 02:36:15 +010010144 ret = io_uring_add_tctx_node(ctx);
Jens Axboe0f212202020-09-13 13:09:39 -060010145 if (unlikely(ret))
10146 goto out;
Jens Axboe2b188cc2019-01-07 10:46:33 -070010147 mutex_lock(&ctx->uring_lock);
Jens Axboe0f212202020-09-13 13:09:39 -060010148 submitted = io_submit_sqes(ctx, to_submit);
Jens Axboe2b188cc2019-01-07 10:46:33 -070010149 mutex_unlock(&ctx->uring_lock);
Pavel Begunkov7c504e652019-12-18 19:53:45 +030010150
10151 if (submitted != to_submit)
10152 goto out;
Jens Axboe2b188cc2019-01-07 10:46:33 -070010153 }
10154 if (flags & IORING_ENTER_GETEVENTS) {
Hao Xuc73ebb62020-11-03 10:54:37 +080010155 const sigset_t __user *sig;
10156 struct __kernel_timespec __user *ts;
10157
10158 ret = io_get_ext_arg(flags, argp, &argsz, &ts, &sig);
10159 if (unlikely(ret))
10160 goto out;
10161
Jens Axboe2b188cc2019-01-07 10:46:33 -070010162 min_complete = min(min_complete, ctx->cq_entries);
10163
Xiaoguang Wang32b22442020-03-11 09:26:09 +080010164 /*
10165 * When SETUP_IOPOLL and SETUP_SQPOLL are both enabled, user
10166 * space applications don't need to do io completion events
10167 * polling again, they can rely on io_sq_thread to do polling
10168 * work, which can reduce cpu usage and uring_lock contention.
10169 */
10170 if (ctx->flags & IORING_SETUP_IOPOLL &&
10171 !(ctx->flags & IORING_SETUP_SQPOLL)) {
Pavel Begunkov7668b922020-07-07 16:36:21 +030010172 ret = io_iopoll_check(ctx, min_complete);
Jens Axboedef596e2019-01-09 08:59:42 -070010173 } else {
Hao Xuc73ebb62020-11-03 10:54:37 +080010174 ret = io_cqring_wait(ctx, min_complete, sig, argsz, ts);
Jens Axboedef596e2019-01-09 08:59:42 -070010175 }
Jens Axboe2b188cc2019-01-07 10:46:33 -070010176 }
10177
Pavel Begunkov7c504e652019-12-18 19:53:45 +030010178out:
Pavel Begunkov6805b322019-10-08 02:18:42 +030010179 percpu_ref_put(&ctx->refs);
Jens Axboe2b188cc2019-01-07 10:46:33 -070010180out_fput:
10181 fdput(f);
10182 return submitted ? submitted : ret;
10183}
10184
Tobias Klauserbebdb652020-02-26 18:38:32 +010010185#ifdef CONFIG_PROC_FS
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +000010186static int io_uring_show_cred(struct seq_file *m, unsigned int id,
10187 const struct cred *cred)
Jens Axboe87ce9552020-01-30 08:25:34 -070010188{
Jens Axboe87ce9552020-01-30 08:25:34 -070010189 struct user_namespace *uns = seq_user_ns(m);
10190 struct group_info *gi;
10191 kernel_cap_t cap;
10192 unsigned __capi;
10193 int g;
10194
10195 seq_printf(m, "%5d\n", id);
10196 seq_put_decimal_ull(m, "\tUid:\t", from_kuid_munged(uns, cred->uid));
10197 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->euid));
10198 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->suid));
10199 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->fsuid));
10200 seq_put_decimal_ull(m, "\n\tGid:\t", from_kgid_munged(uns, cred->gid));
10201 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->egid));
10202 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->sgid));
10203 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->fsgid));
10204 seq_puts(m, "\n\tGroups:\t");
10205 gi = cred->group_info;
10206 for (g = 0; g < gi->ngroups; g++) {
10207 seq_put_decimal_ull(m, g ? " " : "",
10208 from_kgid_munged(uns, gi->gid[g]));
10209 }
10210 seq_puts(m, "\n\tCapEff:\t");
10211 cap = cred->cap_effective;
10212 CAP_FOR_EACH_U32(__capi)
10213 seq_put_hex_ll(m, NULL, cap.cap[CAP_LAST_U32 - __capi], 8);
10214 seq_putc(m, '\n');
10215 return 0;
10216}
10217
10218static void __io_uring_show_fdinfo(struct io_ring_ctx *ctx, struct seq_file *m)
10219{
Joseph Qidbbe9c62020-09-29 09:01:22 -060010220 struct io_sq_data *sq = NULL;
Jens Axboefad8e0d2020-09-28 08:57:48 -060010221 bool has_lock;
Jens Axboe87ce9552020-01-30 08:25:34 -070010222 int i;
10223
Jens Axboefad8e0d2020-09-28 08:57:48 -060010224 /*
10225 * Avoid ABBA deadlock between the seq lock and the io_uring mutex,
10226 * since fdinfo case grabs it in the opposite direction of normal use
10227 * cases. If we fail to get the lock, we just don't iterate any
10228 * structures that could be going away outside the io_uring mutex.
10229 */
10230 has_lock = mutex_trylock(&ctx->uring_lock);
10231
Jens Axboe5f3f26f2021-02-25 10:17:46 -070010232 if (has_lock && (ctx->flags & IORING_SETUP_SQPOLL)) {
Joseph Qidbbe9c62020-09-29 09:01:22 -060010233 sq = ctx->sq_data;
Jens Axboe5f3f26f2021-02-25 10:17:46 -070010234 if (!sq->thread)
10235 sq = NULL;
10236 }
Joseph Qidbbe9c62020-09-29 09:01:22 -060010237
10238 seq_printf(m, "SqThread:\t%d\n", sq ? task_pid_nr(sq->thread) : -1);
10239 seq_printf(m, "SqThreadCpu:\t%d\n", sq ? task_cpu(sq->thread) : -1);
Jens Axboe87ce9552020-01-30 08:25:34 -070010240 seq_printf(m, "UserFiles:\t%u\n", ctx->nr_user_files);
Jens Axboefad8e0d2020-09-28 08:57:48 -060010241 for (i = 0; has_lock && i < ctx->nr_user_files; i++) {
Jens Axboe7b29f922021-03-12 08:30:14 -070010242 struct file *f = io_file_from_index(ctx, i);
Jens Axboe87ce9552020-01-30 08:25:34 -070010243
Jens Axboe87ce9552020-01-30 08:25:34 -070010244 if (f)
10245 seq_printf(m, "%5u: %s\n", i, file_dentry(f)->d_iname);
10246 else
10247 seq_printf(m, "%5u: <none>\n", i);
10248 }
10249 seq_printf(m, "UserBufs:\t%u\n", ctx->nr_user_bufs);
Jens Axboefad8e0d2020-09-28 08:57:48 -060010250 for (i = 0; has_lock && i < ctx->nr_user_bufs; i++) {
Pavel Begunkov41edf1a2021-04-25 14:32:23 +010010251 struct io_mapped_ubuf *buf = ctx->user_bufs[i];
Pavel Begunkov4751f532021-04-01 15:43:55 +010010252 unsigned int len = buf->ubuf_end - buf->ubuf;
Jens Axboe87ce9552020-01-30 08:25:34 -070010253
Pavel Begunkov4751f532021-04-01 15:43:55 +010010254 seq_printf(m, "%5u: 0x%llx/%u\n", i, buf->ubuf, len);
Jens Axboe87ce9552020-01-30 08:25:34 -070010255 }
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +000010256 if (has_lock && !xa_empty(&ctx->personalities)) {
10257 unsigned long index;
10258 const struct cred *cred;
10259
Jens Axboe87ce9552020-01-30 08:25:34 -070010260 seq_printf(m, "Personalities:\n");
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +000010261 xa_for_each(&ctx->personalities, index, cred)
10262 io_uring_show_cred(m, index, cred);
Jens Axboe87ce9552020-01-30 08:25:34 -070010263 }
Jens Axboed7718a92020-02-14 22:23:12 -070010264 seq_printf(m, "PollList:\n");
Jens Axboe79ebeae2021-08-10 15:18:27 -060010265 spin_lock(&ctx->completion_lock);
Jens Axboed7718a92020-02-14 22:23:12 -070010266 for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
10267 struct hlist_head *list = &ctx->cancel_hash[i];
10268 struct io_kiocb *req;
10269
10270 hlist_for_each_entry(req, list, hash_node)
10271 seq_printf(m, " op=%d, task_works=%d\n", req->opcode,
10272 req->task->task_works != NULL);
10273 }
Jens Axboe79ebeae2021-08-10 15:18:27 -060010274 spin_unlock(&ctx->completion_lock);
Jens Axboefad8e0d2020-09-28 08:57:48 -060010275 if (has_lock)
10276 mutex_unlock(&ctx->uring_lock);
Jens Axboe87ce9552020-01-30 08:25:34 -070010277}
10278
10279static void io_uring_show_fdinfo(struct seq_file *m, struct file *f)
10280{
10281 struct io_ring_ctx *ctx = f->private_data;
10282
10283 if (percpu_ref_tryget(&ctx->refs)) {
10284 __io_uring_show_fdinfo(ctx, m);
10285 percpu_ref_put(&ctx->refs);
10286 }
10287}
Tobias Klauserbebdb652020-02-26 18:38:32 +010010288#endif
Jens Axboe87ce9552020-01-30 08:25:34 -070010289
Jens Axboe2b188cc2019-01-07 10:46:33 -070010290static const struct file_operations io_uring_fops = {
10291 .release = io_uring_release,
10292 .mmap = io_uring_mmap,
Roman Penyaev6c5c2402019-11-28 12:53:22 +010010293#ifndef CONFIG_MMU
10294 .get_unmapped_area = io_uring_nommu_get_unmapped_area,
10295 .mmap_capabilities = io_uring_nommu_mmap_capabilities,
10296#endif
Jens Axboe2b188cc2019-01-07 10:46:33 -070010297 .poll = io_uring_poll,
Tobias Klauserbebdb652020-02-26 18:38:32 +010010298#ifdef CONFIG_PROC_FS
Jens Axboe87ce9552020-01-30 08:25:34 -070010299 .show_fdinfo = io_uring_show_fdinfo,
Tobias Klauserbebdb652020-02-26 18:38:32 +010010300#endif
Jens Axboe2b188cc2019-01-07 10:46:33 -070010301};
10302
10303static int io_allocate_scq_urings(struct io_ring_ctx *ctx,
10304 struct io_uring_params *p)
10305{
Hristo Venev75b28af2019-08-26 17:23:46 +000010306 struct io_rings *rings;
10307 size_t size, sq_array_offset;
Jens Axboe2b188cc2019-01-07 10:46:33 -070010308
Jens Axboebd740482020-08-05 12:58:23 -060010309 /* make sure these are sane, as we already accounted them */
10310 ctx->sq_entries = p->sq_entries;
10311 ctx->cq_entries = p->cq_entries;
10312
Hristo Venev75b28af2019-08-26 17:23:46 +000010313 size = rings_size(p->sq_entries, p->cq_entries, &sq_array_offset);
10314 if (size == SIZE_MAX)
10315 return -EOVERFLOW;
10316
10317 rings = io_mem_alloc(size);
10318 if (!rings)
Jens Axboe2b188cc2019-01-07 10:46:33 -070010319 return -ENOMEM;
10320
Hristo Venev75b28af2019-08-26 17:23:46 +000010321 ctx->rings = rings;
10322 ctx->sq_array = (u32 *)((char *)rings + sq_array_offset);
10323 rings->sq_ring_mask = p->sq_entries - 1;
10324 rings->cq_ring_mask = p->cq_entries - 1;
10325 rings->sq_ring_entries = p->sq_entries;
10326 rings->cq_ring_entries = p->cq_entries;
Jens Axboe2b188cc2019-01-07 10:46:33 -070010327
10328 size = array_size(sizeof(struct io_uring_sqe), p->sq_entries);
Jens Axboeeb065d32019-11-20 09:26:29 -070010329 if (size == SIZE_MAX) {
10330 io_mem_free(ctx->rings);
10331 ctx->rings = NULL;
Jens Axboe2b188cc2019-01-07 10:46:33 -070010332 return -EOVERFLOW;
Jens Axboeeb065d32019-11-20 09:26:29 -070010333 }
Jens Axboe2b188cc2019-01-07 10:46:33 -070010334
10335 ctx->sq_sqes = io_mem_alloc(size);
Jens Axboeeb065d32019-11-20 09:26:29 -070010336 if (!ctx->sq_sqes) {
10337 io_mem_free(ctx->rings);
10338 ctx->rings = NULL;
Jens Axboe2b188cc2019-01-07 10:46:33 -070010339 return -ENOMEM;
Jens Axboeeb065d32019-11-20 09:26:29 -070010340 }
Jens Axboe2b188cc2019-01-07 10:46:33 -070010341
Jens Axboe2b188cc2019-01-07 10:46:33 -070010342 return 0;
10343}
10344
Pavel Begunkov9faadcc2020-12-21 18:34:05 +000010345static int io_uring_install_fd(struct io_ring_ctx *ctx, struct file *file)
10346{
10347 int ret, fd;
10348
10349 fd = get_unused_fd_flags(O_RDWR | O_CLOEXEC);
10350 if (fd < 0)
10351 return fd;
10352
Pavel Begunkoveef51da2021-06-14 02:36:15 +010010353 ret = io_uring_add_tctx_node(ctx);
Pavel Begunkov9faadcc2020-12-21 18:34:05 +000010354 if (ret) {
10355 put_unused_fd(fd);
10356 return ret;
10357 }
10358 fd_install(fd, file);
10359 return fd;
10360}
10361
Jens Axboe2b188cc2019-01-07 10:46:33 -070010362/*
10363 * Allocate an anonymous fd, this is what constitutes the application
10364 * visible backing of an io_uring instance. The application mmaps this
10365 * fd to gain access to the SQ/CQ ring details. If UNIX sockets are enabled,
10366 * we have to tie this fd to a socket for file garbage collection purposes.
10367 */
Pavel Begunkov9faadcc2020-12-21 18:34:05 +000010368static struct file *io_uring_get_file(struct io_ring_ctx *ctx)
Jens Axboe2b188cc2019-01-07 10:46:33 -070010369{
10370 struct file *file;
Jens Axboe2b188cc2019-01-07 10:46:33 -070010371#if defined(CONFIG_UNIX)
Pavel Begunkov9faadcc2020-12-21 18:34:05 +000010372 int ret;
10373
Jens Axboe2b188cc2019-01-07 10:46:33 -070010374 ret = sock_create_kern(&init_net, PF_UNIX, SOCK_RAW, IPPROTO_IP,
10375 &ctx->ring_sock);
10376 if (ret)
Pavel Begunkov9faadcc2020-12-21 18:34:05 +000010377 return ERR_PTR(ret);
Jens Axboe2b188cc2019-01-07 10:46:33 -070010378#endif
10379
Jens Axboe2b188cc2019-01-07 10:46:33 -070010380 file = anon_inode_getfile("[io_uring]", &io_uring_fops, ctx,
10381 O_RDWR | O_CLOEXEC);
Pavel Begunkov9faadcc2020-12-21 18:34:05 +000010382#if defined(CONFIG_UNIX)
Jens Axboe2b188cc2019-01-07 10:46:33 -070010383 if (IS_ERR(file)) {
Pavel Begunkov9faadcc2020-12-21 18:34:05 +000010384 sock_release(ctx->ring_sock);
10385 ctx->ring_sock = NULL;
10386 } else {
10387 ctx->ring_sock->file = file;
Jens Axboe2b188cc2019-01-07 10:46:33 -070010388 }
Jens Axboe2b188cc2019-01-07 10:46:33 -070010389#endif
Pavel Begunkov9faadcc2020-12-21 18:34:05 +000010390 return file;
Jens Axboe2b188cc2019-01-07 10:46:33 -070010391}
10392
Xiaoguang Wang7f136572020-05-05 16:28:53 +080010393static int io_uring_create(unsigned entries, struct io_uring_params *p,
10394 struct io_uring_params __user *params)
Jens Axboe2b188cc2019-01-07 10:46:33 -070010395{
Jens Axboe2b188cc2019-01-07 10:46:33 -070010396 struct io_ring_ctx *ctx;
Pavel Begunkov9faadcc2020-12-21 18:34:05 +000010397 struct file *file;
Jens Axboe2b188cc2019-01-07 10:46:33 -070010398 int ret;
10399
Jens Axboe8110c1a2019-12-28 15:39:54 -070010400 if (!entries)
Jens Axboe2b188cc2019-01-07 10:46:33 -070010401 return -EINVAL;
Jens Axboe8110c1a2019-12-28 15:39:54 -070010402 if (entries > IORING_MAX_ENTRIES) {
10403 if (!(p->flags & IORING_SETUP_CLAMP))
10404 return -EINVAL;
10405 entries = IORING_MAX_ENTRIES;
10406 }
Jens Axboe2b188cc2019-01-07 10:46:33 -070010407
10408 /*
10409 * Use twice as many entries for the CQ ring. It's possible for the
10410 * application to drive a higher depth than the size of the SQ ring,
10411 * since the sqes are only used at submission time. This allows for
Jens Axboe33a107f2019-10-04 12:10:03 -060010412 * some flexibility in overcommitting a bit. If the application has
10413 * set IORING_SETUP_CQSIZE, it will have passed in the desired number
10414 * of CQ ring entries manually.
Jens Axboe2b188cc2019-01-07 10:46:33 -070010415 */
10416 p->sq_entries = roundup_pow_of_two(entries);
Jens Axboe33a107f2019-10-04 12:10:03 -060010417 if (p->flags & IORING_SETUP_CQSIZE) {
10418 /*
10419 * If IORING_SETUP_CQSIZE is set, we do the same roundup
10420 * to a power-of-two, if it isn't already. We do NOT impose
10421 * any cq vs sq ring sizing.
10422 */
Joseph Qieb2667b32020-11-24 15:03:03 +080010423 if (!p->cq_entries)
Jens Axboe33a107f2019-10-04 12:10:03 -060010424 return -EINVAL;
Jens Axboe8110c1a2019-12-28 15:39:54 -070010425 if (p->cq_entries > IORING_MAX_CQ_ENTRIES) {
10426 if (!(p->flags & IORING_SETUP_CLAMP))
10427 return -EINVAL;
10428 p->cq_entries = IORING_MAX_CQ_ENTRIES;
10429 }
Joseph Qieb2667b32020-11-24 15:03:03 +080010430 p->cq_entries = roundup_pow_of_two(p->cq_entries);
10431 if (p->cq_entries < p->sq_entries)
10432 return -EINVAL;
Jens Axboe33a107f2019-10-04 12:10:03 -060010433 } else {
10434 p->cq_entries = 2 * p->sq_entries;
10435 }
Jens Axboe2b188cc2019-01-07 10:46:33 -070010436
Jens Axboe2b188cc2019-01-07 10:46:33 -070010437 ctx = io_ring_ctx_alloc(p);
Jens Axboe62e398b2021-02-21 16:19:37 -070010438 if (!ctx)
Jens Axboe2b188cc2019-01-07 10:46:33 -070010439 return -ENOMEM;
Jens Axboe2b188cc2019-01-07 10:46:33 -070010440 ctx->compat = in_compat_syscall();
Jens Axboe62e398b2021-02-21 16:19:37 -070010441 if (!capable(CAP_IPC_LOCK))
10442 ctx->user = get_uid(current_user());
Jens Axboe2aede0e2020-09-14 10:45:53 -060010443
10444 /*
10445 * This is just grabbed for accounting purposes. When a process exits,
10446 * the mm is exited and dropped before the files, hence we need to hang
10447 * on to this mm purely for the purposes of being able to unaccount
10448 * memory (locked/pinned vm). It's not used for anything else.
10449 */
Jens Axboe6b7898e2020-08-25 07:58:00 -060010450 mmgrab(current->mm);
Jens Axboe2aede0e2020-09-14 10:45:53 -060010451 ctx->mm_account = current->mm;
Jens Axboe6b7898e2020-08-25 07:58:00 -060010452
Jens Axboe2b188cc2019-01-07 10:46:33 -070010453 ret = io_allocate_scq_urings(ctx, p);
10454 if (ret)
10455 goto err;
10456
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +020010457 ret = io_sq_offload_create(ctx, p);
Jens Axboe2b188cc2019-01-07 10:46:33 -070010458 if (ret)
10459 goto err;
Pavel Begunkoveae071c2021-04-25 14:32:24 +010010460 /* always set a rsrc node */
Pavel Begunkov47b228c2021-04-29 11:46:48 +010010461 ret = io_rsrc_node_switch_start(ctx);
10462 if (ret)
10463 goto err;
Pavel Begunkoveae071c2021-04-25 14:32:24 +010010464 io_rsrc_node_switch(ctx, NULL);
Jens Axboe2b188cc2019-01-07 10:46:33 -070010465
Jens Axboe2b188cc2019-01-07 10:46:33 -070010466 memset(&p->sq_off, 0, sizeof(p->sq_off));
Hristo Venev75b28af2019-08-26 17:23:46 +000010467 p->sq_off.head = offsetof(struct io_rings, sq.head);
10468 p->sq_off.tail = offsetof(struct io_rings, sq.tail);
10469 p->sq_off.ring_mask = offsetof(struct io_rings, sq_ring_mask);
10470 p->sq_off.ring_entries = offsetof(struct io_rings, sq_ring_entries);
10471 p->sq_off.flags = offsetof(struct io_rings, sq_flags);
10472 p->sq_off.dropped = offsetof(struct io_rings, sq_dropped);
10473 p->sq_off.array = (char *)ctx->sq_array - (char *)ctx->rings;
Jens Axboe2b188cc2019-01-07 10:46:33 -070010474
10475 memset(&p->cq_off, 0, sizeof(p->cq_off));
Hristo Venev75b28af2019-08-26 17:23:46 +000010476 p->cq_off.head = offsetof(struct io_rings, cq.head);
10477 p->cq_off.tail = offsetof(struct io_rings, cq.tail);
10478 p->cq_off.ring_mask = offsetof(struct io_rings, cq_ring_mask);
10479 p->cq_off.ring_entries = offsetof(struct io_rings, cq_ring_entries);
10480 p->cq_off.overflow = offsetof(struct io_rings, cq_overflow);
10481 p->cq_off.cqes = offsetof(struct io_rings, cqes);
Stefano Garzarella0d9b5b32020-05-15 18:38:04 +020010482 p->cq_off.flags = offsetof(struct io_rings, cq_flags);
Jens Axboeac90f242019-09-06 10:26:21 -060010483
Xiaoguang Wang7f136572020-05-05 16:28:53 +080010484 p->features = IORING_FEAT_SINGLE_MMAP | IORING_FEAT_NODROP |
10485 IORING_FEAT_SUBMIT_STABLE | IORING_FEAT_RW_CUR_POS |
Jiufei Xue5769a352020-06-17 17:53:55 +080010486 IORING_FEAT_CUR_PERSONALITY | IORING_FEAT_FAST_POLL |
Hao Xuc73ebb62020-11-03 10:54:37 +080010487 IORING_FEAT_POLL_32BITS | IORING_FEAT_SQPOLL_NONFIXED |
Pavel Begunkov96905572021-06-10 16:37:38 +010010488 IORING_FEAT_EXT_ARG | IORING_FEAT_NATIVE_WORKERS |
10489 IORING_FEAT_RSRC_TAGS;
Xiaoguang Wang7f136572020-05-05 16:28:53 +080010490
10491 if (copy_to_user(params, p, sizeof(*p))) {
10492 ret = -EFAULT;
10493 goto err;
10494 }
Jens Axboed1719f72020-07-30 13:43:53 -060010495
Pavel Begunkov9faadcc2020-12-21 18:34:05 +000010496 file = io_uring_get_file(ctx);
10497 if (IS_ERR(file)) {
10498 ret = PTR_ERR(file);
10499 goto err;
10500 }
10501
Jens Axboed1719f72020-07-30 13:43:53 -060010502 /*
Jens Axboe044c1ab2019-10-28 09:15:33 -060010503 * Install ring fd as the very last thing, so we don't risk someone
10504 * having closed it before we finish setup
10505 */
Pavel Begunkov9faadcc2020-12-21 18:34:05 +000010506 ret = io_uring_install_fd(ctx, file);
10507 if (ret < 0) {
10508 /* fput will clean it up */
10509 fput(file);
10510 return ret;
10511 }
Jens Axboe044c1ab2019-10-28 09:15:33 -060010512
Dmitrii Dolgovc826bd72019-10-15 19:02:01 +020010513 trace_io_uring_create(ret, ctx, p->sq_entries, p->cq_entries, p->flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -070010514 return ret;
10515err:
10516 io_ring_ctx_wait_and_kill(ctx);
10517 return ret;
10518}
10519
10520/*
10521 * Sets up an aio uring context, and returns the fd. Applications asks for a
10522 * ring size, we return the actual sq/cq ring sizes (among other things) in the
10523 * params structure passed in.
10524 */
10525static long io_uring_setup(u32 entries, struct io_uring_params __user *params)
10526{
10527 struct io_uring_params p;
Jens Axboe2b188cc2019-01-07 10:46:33 -070010528 int i;
10529
10530 if (copy_from_user(&p, params, sizeof(p)))
10531 return -EFAULT;
10532 for (i = 0; i < ARRAY_SIZE(p.resv); i++) {
10533 if (p.resv[i])
10534 return -EINVAL;
10535 }
10536
Jens Axboe6c271ce2019-01-10 11:22:30 -070010537 if (p.flags & ~(IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL |
Jens Axboe8110c1a2019-12-28 15:39:54 -070010538 IORING_SETUP_SQ_AFF | IORING_SETUP_CQSIZE |
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +020010539 IORING_SETUP_CLAMP | IORING_SETUP_ATTACH_WQ |
10540 IORING_SETUP_R_DISABLED))
Jens Axboe2b188cc2019-01-07 10:46:33 -070010541 return -EINVAL;
10542
Xiaoguang Wang7f136572020-05-05 16:28:53 +080010543 return io_uring_create(entries, &p, params);
Jens Axboe2b188cc2019-01-07 10:46:33 -070010544}
10545
10546SYSCALL_DEFINE2(io_uring_setup, u32, entries,
10547 struct io_uring_params __user *, params)
10548{
10549 return io_uring_setup(entries, params);
10550}
10551
Jens Axboe66f4af92020-01-16 15:36:52 -070010552static int io_probe(struct io_ring_ctx *ctx, void __user *arg, unsigned nr_args)
10553{
10554 struct io_uring_probe *p;
10555 size_t size;
10556 int i, ret;
10557
10558 size = struct_size(p, ops, nr_args);
10559 if (size == SIZE_MAX)
10560 return -EOVERFLOW;
10561 p = kzalloc(size, GFP_KERNEL);
10562 if (!p)
10563 return -ENOMEM;
10564
10565 ret = -EFAULT;
10566 if (copy_from_user(p, arg, size))
10567 goto out;
10568 ret = -EINVAL;
10569 if (memchr_inv(p, 0, size))
10570 goto out;
10571
10572 p->last_op = IORING_OP_LAST - 1;
10573 if (nr_args > IORING_OP_LAST)
10574 nr_args = IORING_OP_LAST;
10575
10576 for (i = 0; i < nr_args; i++) {
10577 p->ops[i].op = i;
10578 if (!io_op_defs[i].not_supported)
10579 p->ops[i].flags = IO_URING_OP_SUPPORTED;
10580 }
10581 p->ops_len = i;
10582
10583 ret = 0;
10584 if (copy_to_user(arg, p, size))
10585 ret = -EFAULT;
10586out:
10587 kfree(p);
10588 return ret;
10589}
10590
Jens Axboe071698e2020-01-28 10:04:42 -070010591static int io_register_personality(struct io_ring_ctx *ctx)
10592{
Jens Axboe4379bf82021-02-15 13:40:22 -070010593 const struct cred *creds;
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +000010594 u32 id;
Jens Axboe1e6fa522020-10-15 08:46:24 -060010595 int ret;
Jens Axboe071698e2020-01-28 10:04:42 -070010596
Jens Axboe4379bf82021-02-15 13:40:22 -070010597 creds = get_current_cred();
Jens Axboe1e6fa522020-10-15 08:46:24 -060010598
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +000010599 ret = xa_alloc_cyclic(&ctx->personalities, &id, (void *)creds,
10600 XA_LIMIT(0, USHRT_MAX), &ctx->pers_next, GFP_KERNEL);
Jens Axboea30f8952021-08-20 14:53:59 -060010601 if (ret < 0) {
10602 put_cred(creds);
10603 return ret;
10604 }
10605 return id;
Jens Axboe071698e2020-01-28 10:04:42 -070010606}
10607
Stefano Garzarella21b55db2020-08-27 16:58:30 +020010608static int io_register_restrictions(struct io_ring_ctx *ctx, void __user *arg,
10609 unsigned int nr_args)
10610{
10611 struct io_uring_restriction *res;
10612 size_t size;
10613 int i, ret;
10614
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +020010615 /* Restrictions allowed only if rings started disabled */
10616 if (!(ctx->flags & IORING_SETUP_R_DISABLED))
10617 return -EBADFD;
10618
Stefano Garzarella21b55db2020-08-27 16:58:30 +020010619 /* We allow only a single restrictions registration */
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +020010620 if (ctx->restrictions.registered)
Stefano Garzarella21b55db2020-08-27 16:58:30 +020010621 return -EBUSY;
10622
10623 if (!arg || nr_args > IORING_MAX_RESTRICTIONS)
10624 return -EINVAL;
10625
10626 size = array_size(nr_args, sizeof(*res));
10627 if (size == SIZE_MAX)
10628 return -EOVERFLOW;
10629
10630 res = memdup_user(arg, size);
10631 if (IS_ERR(res))
10632 return PTR_ERR(res);
10633
10634 ret = 0;
10635
10636 for (i = 0; i < nr_args; i++) {
10637 switch (res[i].opcode) {
10638 case IORING_RESTRICTION_REGISTER_OP:
10639 if (res[i].register_op >= IORING_REGISTER_LAST) {
10640 ret = -EINVAL;
10641 goto out;
10642 }
10643
10644 __set_bit(res[i].register_op,
10645 ctx->restrictions.register_op);
10646 break;
10647 case IORING_RESTRICTION_SQE_OP:
10648 if (res[i].sqe_op >= IORING_OP_LAST) {
10649 ret = -EINVAL;
10650 goto out;
10651 }
10652
10653 __set_bit(res[i].sqe_op, ctx->restrictions.sqe_op);
10654 break;
10655 case IORING_RESTRICTION_SQE_FLAGS_ALLOWED:
10656 ctx->restrictions.sqe_flags_allowed = res[i].sqe_flags;
10657 break;
10658 case IORING_RESTRICTION_SQE_FLAGS_REQUIRED:
10659 ctx->restrictions.sqe_flags_required = res[i].sqe_flags;
10660 break;
10661 default:
10662 ret = -EINVAL;
10663 goto out;
10664 }
10665 }
10666
10667out:
10668 /* Reset all restrictions if an error happened */
10669 if (ret != 0)
10670 memset(&ctx->restrictions, 0, sizeof(ctx->restrictions));
10671 else
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +020010672 ctx->restrictions.registered = true;
Stefano Garzarella21b55db2020-08-27 16:58:30 +020010673
10674 kfree(res);
10675 return ret;
10676}
10677
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +020010678static int io_register_enable_rings(struct io_ring_ctx *ctx)
10679{
10680 if (!(ctx->flags & IORING_SETUP_R_DISABLED))
10681 return -EBADFD;
10682
10683 if (ctx->restrictions.registered)
10684 ctx->restricted = 1;
10685
Pavel Begunkov0298ef92021-03-08 13:20:57 +000010686 ctx->flags &= ~IORING_SETUP_R_DISABLED;
10687 if (ctx->sq_data && wq_has_sleeper(&ctx->sq_data->wait))
10688 wake_up(&ctx->sq_data->wait);
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +020010689 return 0;
10690}
10691
Pavel Begunkovfdecb662021-04-25 14:32:20 +010010692static int __io_register_rsrc_update(struct io_ring_ctx *ctx, unsigned type,
Pavel Begunkovc3bdad02021-04-25 14:32:22 +010010693 struct io_uring_rsrc_update2 *up,
Pavel Begunkov98f0b3b2021-04-25 14:32:19 +010010694 unsigned nr_args)
10695{
10696 __u32 tmp;
10697 int err;
10698
10699 if (check_add_overflow(up->offset, nr_args, &tmp))
10700 return -EOVERFLOW;
10701 err = io_rsrc_node_switch_start(ctx);
10702 if (err)
10703 return err;
10704
Pavel Begunkovfdecb662021-04-25 14:32:20 +010010705 switch (type) {
10706 case IORING_RSRC_FILE:
Pavel Begunkov98f0b3b2021-04-25 14:32:19 +010010707 return __io_sqe_files_update(ctx, up, nr_args);
Pavel Begunkov634d00d2021-04-25 14:32:26 +010010708 case IORING_RSRC_BUFFER:
10709 return __io_sqe_buffers_update(ctx, up, nr_args);
Pavel Begunkov98f0b3b2021-04-25 14:32:19 +010010710 }
10711 return -EINVAL;
10712}
10713
Pavel Begunkovc3bdad02021-04-25 14:32:22 +010010714static int io_register_files_update(struct io_ring_ctx *ctx, void __user *arg,
10715 unsigned nr_args)
Pavel Begunkov98f0b3b2021-04-25 14:32:19 +010010716{
Pavel Begunkovc3bdad02021-04-25 14:32:22 +010010717 struct io_uring_rsrc_update2 up;
Pavel Begunkov98f0b3b2021-04-25 14:32:19 +010010718
10719 if (!nr_args)
10720 return -EINVAL;
Pavel Begunkovc3bdad02021-04-25 14:32:22 +010010721 memset(&up, 0, sizeof(up));
10722 if (copy_from_user(&up, arg, sizeof(struct io_uring_rsrc_update)))
10723 return -EFAULT;
Dylan Yudaken7a7c9f92022-04-12 09:30:40 -070010724 if (up.resv || up.resv2)
Dylan Yudaken22aa1592022-04-12 09:30:39 -070010725 return -EINVAL;
Pavel Begunkovc3bdad02021-04-25 14:32:22 +010010726 return __io_register_rsrc_update(ctx, IORING_RSRC_FILE, &up, nr_args);
10727}
10728
10729static int io_register_rsrc_update(struct io_ring_ctx *ctx, void __user *arg,
Pavel Begunkov992da012021-06-10 16:37:37 +010010730 unsigned size, unsigned type)
Pavel Begunkovc3bdad02021-04-25 14:32:22 +010010731{
10732 struct io_uring_rsrc_update2 up;
10733
10734 if (size != sizeof(up))
10735 return -EINVAL;
Pavel Begunkov98f0b3b2021-04-25 14:32:19 +010010736 if (copy_from_user(&up, arg, sizeof(up)))
10737 return -EFAULT;
Dylan Yudaken7a7c9f92022-04-12 09:30:40 -070010738 if (!up.nr || up.resv || up.resv2)
Pavel Begunkov98f0b3b2021-04-25 14:32:19 +010010739 return -EINVAL;
Pavel Begunkov992da012021-06-10 16:37:37 +010010740 return __io_register_rsrc_update(ctx, type, &up, up.nr);
Pavel Begunkov98f0b3b2021-04-25 14:32:19 +010010741}
10742
Pavel Begunkov792e3582021-04-25 14:32:21 +010010743static int io_register_rsrc(struct io_ring_ctx *ctx, void __user *arg,
Pavel Begunkov992da012021-06-10 16:37:37 +010010744 unsigned int size, unsigned int type)
Pavel Begunkov792e3582021-04-25 14:32:21 +010010745{
10746 struct io_uring_rsrc_register rr;
10747
10748 /* keep it extendible */
10749 if (size != sizeof(rr))
10750 return -EINVAL;
10751
10752 memset(&rr, 0, sizeof(rr));
10753 if (copy_from_user(&rr, arg, size))
10754 return -EFAULT;
Pavel Begunkov992da012021-06-10 16:37:37 +010010755 if (!rr.nr || rr.resv || rr.resv2)
Pavel Begunkov792e3582021-04-25 14:32:21 +010010756 return -EINVAL;
10757
Pavel Begunkov992da012021-06-10 16:37:37 +010010758 switch (type) {
Pavel Begunkov792e3582021-04-25 14:32:21 +010010759 case IORING_RSRC_FILE:
10760 return io_sqe_files_register(ctx, u64_to_user_ptr(rr.data),
10761 rr.nr, u64_to_user_ptr(rr.tags));
Pavel Begunkov634d00d2021-04-25 14:32:26 +010010762 case IORING_RSRC_BUFFER:
10763 return io_sqe_buffers_register(ctx, u64_to_user_ptr(rr.data),
10764 rr.nr, u64_to_user_ptr(rr.tags));
Pavel Begunkov792e3582021-04-25 14:32:21 +010010765 }
10766 return -EINVAL;
10767}
10768
Jens Axboefe764212021-06-17 10:19:54 -060010769static int io_register_iowq_aff(struct io_ring_ctx *ctx, void __user *arg,
10770 unsigned len)
10771{
10772 struct io_uring_task *tctx = current->io_uring;
10773 cpumask_var_t new_mask;
10774 int ret;
10775
10776 if (!tctx || !tctx->io_wq)
10777 return -EINVAL;
10778
10779 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
10780 return -ENOMEM;
10781
10782 cpumask_clear(new_mask);
10783 if (len > cpumask_size())
10784 len = cpumask_size();
10785
Eugene Syromiatnikovfe223dd2022-04-06 13:55:33 +020010786 if (in_compat_syscall()) {
10787 ret = compat_get_bitmap(cpumask_bits(new_mask),
10788 (const compat_ulong_t __user *)arg,
10789 len * 8 /* CHAR_BIT */);
10790 } else {
10791 ret = copy_from_user(new_mask, arg, len);
10792 }
10793
10794 if (ret) {
Jens Axboefe764212021-06-17 10:19:54 -060010795 free_cpumask_var(new_mask);
10796 return -EFAULT;
10797 }
10798
10799 ret = io_wq_cpu_affinity(tctx->io_wq, new_mask);
10800 free_cpumask_var(new_mask);
10801 return ret;
10802}
10803
10804static int io_unregister_iowq_aff(struct io_ring_ctx *ctx)
10805{
10806 struct io_uring_task *tctx = current->io_uring;
10807
10808 if (!tctx || !tctx->io_wq)
10809 return -EINVAL;
10810
10811 return io_wq_cpu_affinity(tctx->io_wq, NULL);
10812}
10813
Jens Axboe2e480052021-08-27 11:33:19 -060010814static int io_register_iowq_max_workers(struct io_ring_ctx *ctx,
10815 void __user *arg)
Pavel Begunkovb22fa622021-10-21 13:20:29 +010010816 __must_hold(&ctx->uring_lock)
Jens Axboe2e480052021-08-27 11:33:19 -060010817{
Pavel Begunkovb22fa622021-10-21 13:20:29 +010010818 struct io_tctx_node *node;
Jens Axboefa846932021-09-01 14:15:59 -060010819 struct io_uring_task *tctx = NULL;
10820 struct io_sq_data *sqd = NULL;
Jens Axboe2e480052021-08-27 11:33:19 -060010821 __u32 new_count[2];
10822 int i, ret;
10823
Jens Axboe2e480052021-08-27 11:33:19 -060010824 if (copy_from_user(new_count, arg, sizeof(new_count)))
10825 return -EFAULT;
10826 for (i = 0; i < ARRAY_SIZE(new_count); i++)
10827 if (new_count[i] > INT_MAX)
10828 return -EINVAL;
10829
Jens Axboefa846932021-09-01 14:15:59 -060010830 if (ctx->flags & IORING_SETUP_SQPOLL) {
10831 sqd = ctx->sq_data;
10832 if (sqd) {
Jens Axboe009ad9f2021-09-08 19:07:26 -060010833 /*
10834 * Observe the correct sqd->lock -> ctx->uring_lock
10835 * ordering. Fine to drop uring_lock here, we hold
10836 * a ref to the ctx.
10837 */
Jens Axboe41d3a6b2021-09-13 13:08:51 -060010838 refcount_inc(&sqd->refs);
Jens Axboe009ad9f2021-09-08 19:07:26 -060010839 mutex_unlock(&ctx->uring_lock);
Jens Axboefa846932021-09-01 14:15:59 -060010840 mutex_lock(&sqd->lock);
Jens Axboe009ad9f2021-09-08 19:07:26 -060010841 mutex_lock(&ctx->uring_lock);
Jens Axboe41d3a6b2021-09-13 13:08:51 -060010842 if (sqd->thread)
10843 tctx = sqd->thread->io_uring;
Jens Axboefa846932021-09-01 14:15:59 -060010844 }
10845 } else {
10846 tctx = current->io_uring;
10847 }
10848
Pavel Begunkove139a1e2021-10-19 23:43:46 +010010849 BUILD_BUG_ON(sizeof(new_count) != sizeof(ctx->iowq_limits));
Jens Axboefa846932021-09-01 14:15:59 -060010850
Pavel Begunkov4cac4872021-11-08 15:10:03 +000010851 for (i = 0; i < ARRAY_SIZE(new_count); i++)
10852 if (new_count[i])
10853 ctx->iowq_limits[i] = new_count[i];
Pavel Begunkove139a1e2021-10-19 23:43:46 +010010854 ctx->iowq_limits_set = true;
10855
10856 ret = -EINVAL;
10857 if (tctx && tctx->io_wq) {
10858 ret = io_wq_max_workers(tctx->io_wq, new_count);
10859 if (ret)
10860 goto err;
10861 } else {
10862 memset(new_count, 0, sizeof(new_count));
10863 }
Jens Axboefa846932021-09-01 14:15:59 -060010864
Jens Axboe41d3a6b2021-09-13 13:08:51 -060010865 if (sqd) {
Jens Axboefa846932021-09-01 14:15:59 -060010866 mutex_unlock(&sqd->lock);
Jens Axboe41d3a6b2021-09-13 13:08:51 -060010867 io_put_sq_data(sqd);
10868 }
Jens Axboe2e480052021-08-27 11:33:19 -060010869
10870 if (copy_to_user(arg, new_count, sizeof(new_count)))
10871 return -EFAULT;
10872
Pavel Begunkovb22fa622021-10-21 13:20:29 +010010873 /* that's it for SQPOLL, only the SQPOLL task creates requests */
10874 if (sqd)
10875 return 0;
10876
10877 /* now propagate the restriction to all registered users */
10878 list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
10879 struct io_uring_task *tctx = node->task->io_uring;
10880
10881 if (WARN_ON_ONCE(!tctx->io_wq))
10882 continue;
10883
10884 for (i = 0; i < ARRAY_SIZE(new_count); i++)
10885 new_count[i] = ctx->iowq_limits[i];
10886 /* ignore errors, it always returns zero anyway */
10887 (void)io_wq_max_workers(tctx->io_wq, new_count);
10888 }
Jens Axboe2e480052021-08-27 11:33:19 -060010889 return 0;
Jens Axboefa846932021-09-01 14:15:59 -060010890err:
Jens Axboe41d3a6b2021-09-13 13:08:51 -060010891 if (sqd) {
Jens Axboefa846932021-09-01 14:15:59 -060010892 mutex_unlock(&sqd->lock);
Jens Axboe41d3a6b2021-09-13 13:08:51 -060010893 io_put_sq_data(sqd);
10894 }
Jens Axboefa846932021-09-01 14:15:59 -060010895 return ret;
Jens Axboe2e480052021-08-27 11:33:19 -060010896}
10897
Jens Axboe071698e2020-01-28 10:04:42 -070010898static bool io_register_op_must_quiesce(int op)
10899{
10900 switch (op) {
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +010010901 case IORING_REGISTER_BUFFERS:
10902 case IORING_UNREGISTER_BUFFERS:
Pavel Begunkovf4f7d212021-04-01 15:44:02 +010010903 case IORING_REGISTER_FILES:
Jens Axboe071698e2020-01-28 10:04:42 -070010904 case IORING_UNREGISTER_FILES:
10905 case IORING_REGISTER_FILES_UPDATE:
10906 case IORING_REGISTER_PROBE:
10907 case IORING_REGISTER_PERSONALITY:
10908 case IORING_UNREGISTER_PERSONALITY:
Pavel Begunkov992da012021-06-10 16:37:37 +010010909 case IORING_REGISTER_FILES2:
10910 case IORING_REGISTER_FILES_UPDATE2:
10911 case IORING_REGISTER_BUFFERS2:
10912 case IORING_REGISTER_BUFFERS_UPDATE:
Jens Axboefe764212021-06-17 10:19:54 -060010913 case IORING_REGISTER_IOWQ_AFF:
10914 case IORING_UNREGISTER_IOWQ_AFF:
Jens Axboe2e480052021-08-27 11:33:19 -060010915 case IORING_REGISTER_IOWQ_MAX_WORKERS:
Jens Axboe071698e2020-01-28 10:04:42 -070010916 return false;
10917 default:
10918 return true;
10919 }
10920}
10921
Pavel Begunkove73c5c72021-08-09 13:04:12 +010010922static int io_ctx_quiesce(struct io_ring_ctx *ctx)
10923{
10924 long ret;
10925
10926 percpu_ref_kill(&ctx->refs);
10927
10928 /*
10929 * Drop uring mutex before waiting for references to exit. If another
10930 * thread is currently inside io_uring_enter() it might need to grab the
10931 * uring_lock to make progress. If we hold it here across the drain
10932 * wait, then we can deadlock. It's safe to drop the mutex here, since
10933 * no new references will come in after we've killed the percpu ref.
10934 */
10935 mutex_unlock(&ctx->uring_lock);
10936 do {
10937 ret = wait_for_completion_interruptible(&ctx->ref_comp);
10938 if (!ret)
10939 break;
10940 ret = io_run_task_work_sig();
10941 } while (ret >= 0);
10942 mutex_lock(&ctx->uring_lock);
10943
10944 if (ret)
10945 io_refs_resurrect(&ctx->refs, &ctx->ref_comp);
10946 return ret;
10947}
10948
Jens Axboeedafcce2019-01-09 09:16:05 -070010949static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
10950 void __user *arg, unsigned nr_args)
Jens Axboeb19062a2019-04-15 10:49:38 -060010951 __releases(ctx->uring_lock)
10952 __acquires(ctx->uring_lock)
Jens Axboeedafcce2019-01-09 09:16:05 -070010953{
10954 int ret;
10955
Jens Axboe35fa71a2019-04-22 10:23:23 -060010956 /*
10957 * We're inside the ring mutex, if the ref is already dying, then
10958 * someone else killed the ctx or is already going through
10959 * io_uring_register().
10960 */
10961 if (percpu_ref_is_dying(&ctx->refs))
10962 return -ENXIO;
10963
Pavel Begunkov75c40212021-04-15 13:07:40 +010010964 if (ctx->restricted) {
Pavel Begunkov75c40212021-04-15 13:07:40 +010010965 opcode = array_index_nospec(opcode, IORING_REGISTER_LAST);
10966 if (!test_bit(opcode, ctx->restrictions.register_op))
10967 return -EACCES;
10968 }
10969
Jens Axboe071698e2020-01-28 10:04:42 -070010970 if (io_register_op_must_quiesce(opcode)) {
Pavel Begunkove73c5c72021-08-09 13:04:12 +010010971 ret = io_ctx_quiesce(ctx);
10972 if (ret)
Pavel Begunkovf70865d2021-04-11 01:46:40 +010010973 return ret;
Jens Axboe05f3fb32019-12-09 11:22:50 -070010974 }
Jens Axboeedafcce2019-01-09 09:16:05 -070010975
10976 switch (opcode) {
10977 case IORING_REGISTER_BUFFERS:
Pavel Begunkov634d00d2021-04-25 14:32:26 +010010978 ret = io_sqe_buffers_register(ctx, arg, nr_args, NULL);
Jens Axboeedafcce2019-01-09 09:16:05 -070010979 break;
10980 case IORING_UNREGISTER_BUFFERS:
10981 ret = -EINVAL;
10982 if (arg || nr_args)
10983 break;
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -080010984 ret = io_sqe_buffers_unregister(ctx);
Jens Axboeedafcce2019-01-09 09:16:05 -070010985 break;
Jens Axboe6b063142019-01-10 22:13:58 -070010986 case IORING_REGISTER_FILES:
Pavel Begunkov792e3582021-04-25 14:32:21 +010010987 ret = io_sqe_files_register(ctx, arg, nr_args, NULL);
Jens Axboe6b063142019-01-10 22:13:58 -070010988 break;
10989 case IORING_UNREGISTER_FILES:
10990 ret = -EINVAL;
10991 if (arg || nr_args)
10992 break;
10993 ret = io_sqe_files_unregister(ctx);
10994 break;
Jens Axboec3a31e62019-10-03 13:59:56 -060010995 case IORING_REGISTER_FILES_UPDATE:
Pavel Begunkovc3bdad02021-04-25 14:32:22 +010010996 ret = io_register_files_update(ctx, arg, nr_args);
Jens Axboec3a31e62019-10-03 13:59:56 -060010997 break;
Jens Axboe9b402842019-04-11 11:45:41 -060010998 case IORING_REGISTER_EVENTFD:
Jens Axboef2842ab2020-01-08 11:04:00 -070010999 case IORING_REGISTER_EVENTFD_ASYNC:
Jens Axboe9b402842019-04-11 11:45:41 -060011000 ret = -EINVAL;
11001 if (nr_args != 1)
11002 break;
11003 ret = io_eventfd_register(ctx, arg);
Jens Axboef2842ab2020-01-08 11:04:00 -070011004 if (ret)
11005 break;
11006 if (opcode == IORING_REGISTER_EVENTFD_ASYNC)
11007 ctx->eventfd_async = 1;
11008 else
11009 ctx->eventfd_async = 0;
Jens Axboe9b402842019-04-11 11:45:41 -060011010 break;
11011 case IORING_UNREGISTER_EVENTFD:
11012 ret = -EINVAL;
11013 if (arg || nr_args)
11014 break;
11015 ret = io_eventfd_unregister(ctx);
11016 break;
Jens Axboe66f4af92020-01-16 15:36:52 -070011017 case IORING_REGISTER_PROBE:
11018 ret = -EINVAL;
11019 if (!arg || nr_args > 256)
11020 break;
11021 ret = io_probe(ctx, arg, nr_args);
11022 break;
Jens Axboe071698e2020-01-28 10:04:42 -070011023 case IORING_REGISTER_PERSONALITY:
11024 ret = -EINVAL;
11025 if (arg || nr_args)
11026 break;
11027 ret = io_register_personality(ctx);
11028 break;
11029 case IORING_UNREGISTER_PERSONALITY:
11030 ret = -EINVAL;
11031 if (arg)
11032 break;
11033 ret = io_unregister_personality(ctx, nr_args);
11034 break;
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +020011035 case IORING_REGISTER_ENABLE_RINGS:
11036 ret = -EINVAL;
11037 if (arg || nr_args)
11038 break;
11039 ret = io_register_enable_rings(ctx);
11040 break;
Stefano Garzarella21b55db2020-08-27 16:58:30 +020011041 case IORING_REGISTER_RESTRICTIONS:
11042 ret = io_register_restrictions(ctx, arg, nr_args);
11043 break;
Pavel Begunkov992da012021-06-10 16:37:37 +010011044 case IORING_REGISTER_FILES2:
11045 ret = io_register_rsrc(ctx, arg, nr_args, IORING_RSRC_FILE);
Pavel Begunkov792e3582021-04-25 14:32:21 +010011046 break;
Pavel Begunkov992da012021-06-10 16:37:37 +010011047 case IORING_REGISTER_FILES_UPDATE2:
11048 ret = io_register_rsrc_update(ctx, arg, nr_args,
11049 IORING_RSRC_FILE);
11050 break;
11051 case IORING_REGISTER_BUFFERS2:
11052 ret = io_register_rsrc(ctx, arg, nr_args, IORING_RSRC_BUFFER);
11053 break;
11054 case IORING_REGISTER_BUFFERS_UPDATE:
11055 ret = io_register_rsrc_update(ctx, arg, nr_args,
11056 IORING_RSRC_BUFFER);
Pavel Begunkovc3bdad02021-04-25 14:32:22 +010011057 break;
Jens Axboefe764212021-06-17 10:19:54 -060011058 case IORING_REGISTER_IOWQ_AFF:
11059 ret = -EINVAL;
11060 if (!arg || !nr_args)
11061 break;
11062 ret = io_register_iowq_aff(ctx, arg, nr_args);
11063 break;
11064 case IORING_UNREGISTER_IOWQ_AFF:
11065 ret = -EINVAL;
11066 if (arg || nr_args)
11067 break;
11068 ret = io_unregister_iowq_aff(ctx);
11069 break;
Jens Axboe2e480052021-08-27 11:33:19 -060011070 case IORING_REGISTER_IOWQ_MAX_WORKERS:
11071 ret = -EINVAL;
11072 if (!arg || nr_args != 2)
11073 break;
11074 ret = io_register_iowq_max_workers(ctx, arg);
11075 break;
Jens Axboeedafcce2019-01-09 09:16:05 -070011076 default:
11077 ret = -EINVAL;
11078 break;
11079 }
11080
Jens Axboe071698e2020-01-28 10:04:42 -070011081 if (io_register_op_must_quiesce(opcode)) {
Jens Axboe05f3fb32019-12-09 11:22:50 -070011082 /* bring the ctx back to life */
Jens Axboe05f3fb32019-12-09 11:22:50 -070011083 percpu_ref_reinit(&ctx->refs);
Jens Axboe0f158b42020-05-14 17:18:39 -060011084 reinit_completion(&ctx->ref_comp);
Jens Axboe05f3fb32019-12-09 11:22:50 -070011085 }
Jens Axboeedafcce2019-01-09 09:16:05 -070011086 return ret;
11087}
11088
11089SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode,
11090 void __user *, arg, unsigned int, nr_args)
11091{
11092 struct io_ring_ctx *ctx;
11093 long ret = -EBADF;
11094 struct fd f;
11095
Jens Axboef9309dc2022-12-23 06:37:08 -070011096 if (opcode >= IORING_REGISTER_LAST)
11097 return -EINVAL;
11098
Jens Axboeedafcce2019-01-09 09:16:05 -070011099 f = fdget(fd);
11100 if (!f.file)
11101 return -EBADF;
11102
11103 ret = -EOPNOTSUPP;
11104 if (f.file->f_op != &io_uring_fops)
11105 goto out_fput;
11106
11107 ctx = f.file->private_data;
11108
Pavel Begunkovb6c23dd2021-02-20 15:17:18 +000011109 io_run_task_work();
11110
Jens Axboeedafcce2019-01-09 09:16:05 -070011111 mutex_lock(&ctx->uring_lock);
11112 ret = __io_uring_register(ctx, opcode, arg, nr_args);
11113 mutex_unlock(&ctx->uring_lock);
Dmitrii Dolgovc826bd72019-10-15 19:02:01 +020011114 trace_io_uring_register(ctx, opcode, ctx->nr_user_files, ctx->nr_user_bufs,
11115 ctx->cq_ev_fd != NULL, ret);
Jens Axboeedafcce2019-01-09 09:16:05 -070011116out_fput:
11117 fdput(f);
11118 return ret;
11119}
11120
Jens Axboe2b188cc2019-01-07 10:46:33 -070011121static int __init io_uring_init(void)
11122{
Stefan Metzmacherd7f62e82020-01-29 14:39:41 +010011123#define __BUILD_BUG_VERIFY_ELEMENT(stype, eoffset, etype, ename) do { \
11124 BUILD_BUG_ON(offsetof(stype, ename) != eoffset); \
11125 BUILD_BUG_ON(sizeof(etype) != sizeof_field(stype, ename)); \
11126} while (0)
11127
11128#define BUILD_BUG_SQE_ELEM(eoffset, etype, ename) \
11129 __BUILD_BUG_VERIFY_ELEMENT(struct io_uring_sqe, eoffset, etype, ename)
11130 BUILD_BUG_ON(sizeof(struct io_uring_sqe) != 64);
11131 BUILD_BUG_SQE_ELEM(0, __u8, opcode);
11132 BUILD_BUG_SQE_ELEM(1, __u8, flags);
11133 BUILD_BUG_SQE_ELEM(2, __u16, ioprio);
11134 BUILD_BUG_SQE_ELEM(4, __s32, fd);
11135 BUILD_BUG_SQE_ELEM(8, __u64, off);
11136 BUILD_BUG_SQE_ELEM(8, __u64, addr2);
11137 BUILD_BUG_SQE_ELEM(16, __u64, addr);
Pavel Begunkov7d67af22020-02-24 11:32:45 +030011138 BUILD_BUG_SQE_ELEM(16, __u64, splice_off_in);
Stefan Metzmacherd7f62e82020-01-29 14:39:41 +010011139 BUILD_BUG_SQE_ELEM(24, __u32, len);
11140 BUILD_BUG_SQE_ELEM(28, __kernel_rwf_t, rw_flags);
11141 BUILD_BUG_SQE_ELEM(28, /* compat */ int, rw_flags);
11142 BUILD_BUG_SQE_ELEM(28, /* compat */ __u32, rw_flags);
11143 BUILD_BUG_SQE_ELEM(28, __u32, fsync_flags);
Jiufei Xue5769a352020-06-17 17:53:55 +080011144 BUILD_BUG_SQE_ELEM(28, /* compat */ __u16, poll_events);
11145 BUILD_BUG_SQE_ELEM(28, __u32, poll32_events);
Stefan Metzmacherd7f62e82020-01-29 14:39:41 +010011146 BUILD_BUG_SQE_ELEM(28, __u32, sync_range_flags);
11147 BUILD_BUG_SQE_ELEM(28, __u32, msg_flags);
11148 BUILD_BUG_SQE_ELEM(28, __u32, timeout_flags);
11149 BUILD_BUG_SQE_ELEM(28, __u32, accept_flags);
11150 BUILD_BUG_SQE_ELEM(28, __u32, cancel_flags);
11151 BUILD_BUG_SQE_ELEM(28, __u32, open_flags);
11152 BUILD_BUG_SQE_ELEM(28, __u32, statx_flags);
11153 BUILD_BUG_SQE_ELEM(28, __u32, fadvise_advice);
Pavel Begunkov7d67af22020-02-24 11:32:45 +030011154 BUILD_BUG_SQE_ELEM(28, __u32, splice_flags);
Stefan Metzmacherd7f62e82020-01-29 14:39:41 +010011155 BUILD_BUG_SQE_ELEM(32, __u64, user_data);
11156 BUILD_BUG_SQE_ELEM(40, __u16, buf_index);
Pavel Begunkov16340ea2021-06-24 15:09:58 +010011157 BUILD_BUG_SQE_ELEM(40, __u16, buf_group);
Stefan Metzmacherd7f62e82020-01-29 14:39:41 +010011158 BUILD_BUG_SQE_ELEM(42, __u16, personality);
Pavel Begunkov7d67af22020-02-24 11:32:45 +030011159 BUILD_BUG_SQE_ELEM(44, __s32, splice_fd_in);
Pavel Begunkovb9445592021-08-25 12:25:45 +010011160 BUILD_BUG_SQE_ELEM(44, __u32, file_index);
Stefan Metzmacherd7f62e82020-01-29 14:39:41 +010011161
Pavel Begunkovb0d658ec2021-04-27 16:13:53 +010011162 BUILD_BUG_ON(sizeof(struct io_uring_files_update) !=
11163 sizeof(struct io_uring_rsrc_update));
11164 BUILD_BUG_ON(sizeof(struct io_uring_rsrc_update) >
11165 sizeof(struct io_uring_rsrc_update2));
Pavel Begunkov90499ad2021-08-25 20:51:40 +010011166
11167 /* ->buf_index is u16 */
11168 BUILD_BUG_ON(IORING_MAX_REG_BUFFERS >= (1u << 16));
11169
Pavel Begunkovb0d658ec2021-04-27 16:13:53 +010011170 /* should fit into one byte */
11171 BUILD_BUG_ON(SQE_VALID_FLAGS >= (1 << 8));
11172
Jens Axboed3656342019-12-18 09:50:26 -070011173 BUILD_BUG_ON(ARRAY_SIZE(io_op_defs) != IORING_OP_LAST);
Hao Xu32c2d332021-09-07 11:22:43 +080011174 BUILD_BUG_ON(__REQ_F_LAST_BIT > 8 * sizeof(int));
Pavel Begunkov16340ea2021-06-24 15:09:58 +010011175
Jens Axboe91f245d2021-02-09 13:48:50 -070011176 req_cachep = KMEM_CACHE(io_kiocb, SLAB_HWCACHE_ALIGN | SLAB_PANIC |
11177 SLAB_ACCOUNT);
Jens Axboe2b188cc2019-01-07 10:46:33 -070011178 return 0;
11179};
11180__initcall(io_uring_init);