blob: f7c41d3d7752d056faba68d501b65dd66e6a0cd2 [file] [log] [blame]
Jens Axboe2b188cc2019-01-07 10:46:33 -07001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Shared application/kernel submission and completion ring pairs, for
4 * supporting fast/efficient IO.
5 *
6 * A note on the read/write ordering memory barriers that are matched between
Stefan Bühler1e84b972019-04-24 23:54:16 +02007 * the application and kernel side.
8 *
9 * After the application reads the CQ ring tail, it must use an
10 * appropriate smp_rmb() to pair with the smp_wmb() the kernel uses
11 * before writing the tail (using smp_load_acquire to read the tail will
12 * do). It also needs a smp_mb() before updating CQ head (ordering the
13 * entry load(s) with the head store), pairing with an implicit barrier
Pavel Begunkovd068b502021-05-16 22:58:11 +010014 * through a control-dependency in io_get_cqe (smp_store_release to
Stefan Bühler1e84b972019-04-24 23:54:16 +020015 * store head will do). Failure to do so could lead to reading invalid
16 * CQ entries.
17 *
18 * Likewise, the application must use an appropriate smp_wmb() before
19 * writing the SQ tail (ordering SQ entry stores with the tail store),
20 * which pairs with smp_load_acquire in io_get_sqring (smp_store_release
21 * to store the tail will do). And it needs a barrier ordering the SQ
22 * head load before writing new SQ entries (smp_load_acquire to read
23 * head will do).
24 *
25 * When using the SQ poll thread (IORING_SETUP_SQPOLL), the application
26 * needs to check the SQ flags for IORING_SQ_NEED_WAKEUP *after*
27 * updating the SQ tail; a full memory barrier smp_mb() is needed
28 * between.
Jens Axboe2b188cc2019-01-07 10:46:33 -070029 *
30 * Also see the examples in the liburing library:
31 *
32 * git://git.kernel.dk/liburing
33 *
34 * io_uring also uses READ/WRITE_ONCE() for _any_ store or load that happens
35 * from data shared between the kernel and application. This is done both
36 * for ordering purposes, but also to ensure that once a value is loaded from
37 * data that the application could potentially modify, it remains stable.
38 *
39 * Copyright (C) 2018-2019 Jens Axboe
Christoph Hellwigc992fe22019-01-11 09:43:02 -070040 * Copyright (c) 2018-2019 Christoph Hellwig
Jens Axboe2b188cc2019-01-07 10:46:33 -070041 */
42#include <linux/kernel.h>
43#include <linux/init.h>
44#include <linux/errno.h>
45#include <linux/syscalls.h>
46#include <linux/compat.h>
Jens Axboe52de1fe2020-02-27 10:15:42 -070047#include <net/compat.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070048#include <linux/refcount.h>
49#include <linux/uio.h>
Pavel Begunkov6b47ee62020-01-18 20:22:41 +030050#include <linux/bits.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070051
52#include <linux/sched/signal.h>
53#include <linux/fs.h>
54#include <linux/file.h>
55#include <linux/fdtable.h>
56#include <linux/mm.h>
57#include <linux/mman.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070058#include <linux/percpu.h>
59#include <linux/slab.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070060#include <linux/blkdev.h>
Jens Axboeedafcce2019-01-09 09:16:05 -070061#include <linux/bvec.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070062#include <linux/net.h>
63#include <net/sock.h>
64#include <net/af_unix.h>
Jens Axboe6b063142019-01-10 22:13:58 -070065#include <net/scm.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070066#include <linux/anon_inodes.h>
67#include <linux/sched/mm.h>
68#include <linux/uaccess.h>
69#include <linux/nospec.h>
Jens Axboeedafcce2019-01-09 09:16:05 -070070#include <linux/sizes.h>
71#include <linux/hugetlb.h>
Jens Axboeaa4c3962019-11-29 10:14:00 -070072#include <linux/highmem.h>
Jens Axboe15b71ab2019-12-11 11:20:36 -070073#include <linux/namei.h>
74#include <linux/fsnotify.h>
Jens Axboe4840e412019-12-25 22:03:45 -070075#include <linux/fadvise.h>
Jens Axboe3e4827b2020-01-08 15:18:09 -070076#include <linux/eventpoll.h>
Pavel Begunkov7d67af22020-02-24 11:32:45 +030077#include <linux/splice.h>
Jens Axboeb41e9852020-02-17 09:52:41 -070078#include <linux/task_work.h>
Jens Axboebcf5a062020-05-22 09:24:42 -060079#include <linux/pagemap.h>
Jens Axboe0f212202020-09-13 13:09:39 -060080#include <linux/io_uring.h>
Nadav Amitef98eb02021-08-07 17:13:41 -070081#include <linux/tracehook.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070082
Dmitrii Dolgovc826bd72019-10-15 19:02:01 +020083#define CREATE_TRACE_POINTS
84#include <trace/events/io_uring.h>
85
Jens Axboe2b188cc2019-01-07 10:46:33 -070086#include <uapi/linux/io_uring.h>
87
Jens Axboef435c662022-05-23 17:05:03 -060088#include "../fs/internal.h"
Jens Axboe561fb042019-10-24 07:25:42 -060089#include "io-wq.h"
Jens Axboe2b188cc2019-01-07 10:46:33 -070090
Daniel Xu5277dea2019-09-14 14:23:45 -070091#define IORING_MAX_ENTRIES 32768
Jens Axboe33a107f2019-10-04 12:10:03 -060092#define IORING_MAX_CQ_ENTRIES (2 * IORING_MAX_ENTRIES)
Olivier Langlois4ce8ad92021-06-23 11:50:18 -070093#define IORING_SQPOLL_CAP_ENTRIES_VALUE 8
Jens Axboe65e19f52019-10-26 07:20:21 -060094
wangyangbo187f08c2021-08-19 13:56:57 +080095/* only define max */
Pavel Begunkov042b0d82021-08-09 13:04:01 +010096#define IORING_MAX_FIXED_FILES (1U << 15)
Stefano Garzarella21b55db2020-08-27 16:58:30 +020097#define IORING_MAX_RESTRICTIONS (IORING_RESTRICTION_LAST + \
98 IORING_REGISTER_LAST + IORING_OP_LAST)
Jens Axboe2b188cc2019-01-07 10:46:33 -070099
wangyangbo187f08c2021-08-19 13:56:57 +0800100#define IO_RSRC_TAG_TABLE_SHIFT (PAGE_SHIFT - 3)
Pavel Begunkov2d091d62021-06-14 02:36:21 +0100101#define IO_RSRC_TAG_TABLE_MAX (1U << IO_RSRC_TAG_TABLE_SHIFT)
102#define IO_RSRC_TAG_TABLE_MASK (IO_RSRC_TAG_TABLE_MAX - 1)
103
Pavel Begunkov489809e2021-05-14 12:06:44 +0100104#define IORING_MAX_REG_BUFFERS (1U << 14)
105
Pavel Begunkovb16fed662021-02-18 18:29:40 +0000106#define SQE_VALID_FLAGS (IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK| \
107 IOSQE_IO_HARDLINK | IOSQE_ASYNC | \
108 IOSQE_BUFFER_SELECT)
Pavel Begunkovc8543572021-06-17 18:14:04 +0100109#define IO_REQ_CLEAN_FLAGS (REQ_F_BUFFER_SELECTED | REQ_F_NEED_CLEANUP | \
110 REQ_F_POLLED | REQ_F_INFLIGHT | REQ_F_CREDS)
Pavel Begunkovb16fed662021-02-18 18:29:40 +0000111
Pavel Begunkov09899b12021-06-14 02:36:22 +0100112#define IO_TCTX_REFS_CACHE_NR (1U << 10)
113
Jens Axboe2b188cc2019-01-07 10:46:33 -0700114struct io_uring {
115 u32 head ____cacheline_aligned_in_smp;
116 u32 tail ____cacheline_aligned_in_smp;
117};
118
Stefan Bühler1e84b972019-04-24 23:54:16 +0200119/*
Hristo Venev75b28af2019-08-26 17:23:46 +0000120 * This data is shared with the application through the mmap at offsets
121 * IORING_OFF_SQ_RING and IORING_OFF_CQ_RING.
Stefan Bühler1e84b972019-04-24 23:54:16 +0200122 *
123 * The offsets to the member fields are published through struct
124 * io_sqring_offsets when calling io_uring_setup.
125 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000126struct io_rings {
Stefan Bühler1e84b972019-04-24 23:54:16 +0200127 /*
128 * Head and tail offsets into the ring; the offsets need to be
129 * masked to get valid indices.
130 *
Hristo Venev75b28af2019-08-26 17:23:46 +0000131 * The kernel controls head of the sq ring and the tail of the cq ring,
132 * and the application controls tail of the sq ring and the head of the
133 * cq ring.
Stefan Bühler1e84b972019-04-24 23:54:16 +0200134 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000135 struct io_uring sq, cq;
Stefan Bühler1e84b972019-04-24 23:54:16 +0200136 /*
Hristo Venev75b28af2019-08-26 17:23:46 +0000137 * Bitmasks to apply to head and tail offsets (constant, equals
Stefan Bühler1e84b972019-04-24 23:54:16 +0200138 * ring_entries - 1)
139 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000140 u32 sq_ring_mask, cq_ring_mask;
141 /* Ring sizes (constant, power of 2) */
142 u32 sq_ring_entries, cq_ring_entries;
Stefan Bühler1e84b972019-04-24 23:54:16 +0200143 /*
144 * Number of invalid entries dropped by the kernel due to
145 * invalid index stored in array
146 *
147 * Written by the kernel, shouldn't be modified by the
148 * application (i.e. get number of "new events" by comparing to
149 * cached value).
150 *
151 * After a new SQ head value was read by the application this
152 * counter includes all submissions that were dropped reaching
153 * the new SQ head (and possibly more).
154 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000155 u32 sq_dropped;
Stefan Bühler1e84b972019-04-24 23:54:16 +0200156 /*
Stefano Garzarella0d9b5b32020-05-15 18:38:04 +0200157 * Runtime SQ flags
Stefan Bühler1e84b972019-04-24 23:54:16 +0200158 *
159 * Written by the kernel, shouldn't be modified by the
160 * application.
161 *
162 * The application needs a full memory barrier before checking
163 * for IORING_SQ_NEED_WAKEUP after updating the sq tail.
164 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000165 u32 sq_flags;
Stefan Bühler1e84b972019-04-24 23:54:16 +0200166 /*
Stefano Garzarella0d9b5b32020-05-15 18:38:04 +0200167 * Runtime CQ flags
168 *
169 * Written by the application, shouldn't be modified by the
170 * kernel.
171 */
Pavel Begunkovfe7e3252021-06-24 15:09:57 +0100172 u32 cq_flags;
Stefano Garzarella0d9b5b32020-05-15 18:38:04 +0200173 /*
Stefan Bühler1e84b972019-04-24 23:54:16 +0200174 * Number of completion events lost because the queue was full;
175 * this should be avoided by the application by making sure
LimingWu0b4295b2019-12-05 20:18:18 +0800176 * there are not more requests pending than there is space in
Stefan Bühler1e84b972019-04-24 23:54:16 +0200177 * the completion queue.
178 *
179 * Written by the kernel, shouldn't be modified by the
180 * application (i.e. get number of "new events" by comparing to
181 * cached value).
182 *
183 * As completion events come in out of order this counter is not
184 * ordered with any other data.
185 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000186 u32 cq_overflow;
Stefan Bühler1e84b972019-04-24 23:54:16 +0200187 /*
188 * Ring buffer of completion events.
189 *
190 * The kernel writes completion events fresh every time they are
191 * produced, so the application is allowed to modify pending
192 * entries.
193 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000194 struct io_uring_cqe cqes[] ____cacheline_aligned_in_smp;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700195};
196
Pavel Begunkov45d189c2021-02-10 00:03:07 +0000197enum io_uring_cmd_flags {
198 IO_URING_F_NONBLOCK = 1,
Pavel Begunkov889fca72021-02-10 00:03:09 +0000199 IO_URING_F_COMPLETE_DEFER = 2,
Pavel Begunkov45d189c2021-02-10 00:03:07 +0000200};
201
Jens Axboeedafcce2019-01-09 09:16:05 -0700202struct io_mapped_ubuf {
203 u64 ubuf;
Pavel Begunkov4751f532021-04-01 15:43:55 +0100204 u64 ubuf_end;
Jens Axboeedafcce2019-01-09 09:16:05 -0700205 unsigned int nr_bvecs;
Jens Axboede293932020-09-17 16:19:16 -0600206 unsigned long acct_pages;
Pavel Begunkov41edf1a2021-04-25 14:32:23 +0100207 struct bio_vec bvec[];
Jens Axboeedafcce2019-01-09 09:16:05 -0700208};
209
Bijan Mottahedeh50238532021-01-15 17:37:45 +0000210struct io_ring_ctx;
211
Pavel Begunkov6c2450a2021-02-23 12:40:22 +0000212struct io_overflow_cqe {
213 struct io_uring_cqe cqe;
214 struct list_head list;
215};
216
Pavel Begunkova04b0ac2021-04-01 15:44:04 +0100217struct io_fixed_file {
218 /* file * with additional FFS_* flags */
219 unsigned long file_ptr;
220};
221
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +0000222struct io_rsrc_put {
223 struct list_head list;
Pavel Begunkovb60c8dc2021-04-25 14:32:18 +0100224 u64 tag;
Bijan Mottahedeh50238532021-01-15 17:37:45 +0000225 union {
226 void *rsrc;
227 struct file *file;
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +0100228 struct io_mapped_ubuf *buf;
Bijan Mottahedeh50238532021-01-15 17:37:45 +0000229 };
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +0000230};
231
Pavel Begunkovaeca2412021-04-11 01:46:37 +0100232struct io_file_table {
Pavel Begunkov042b0d82021-08-09 13:04:01 +0100233 struct io_fixed_file *files;
Jens Axboe31b51512019-01-18 22:56:34 -0700234};
235
Pavel Begunkovb895c9a2021-04-01 15:43:40 +0100236struct io_rsrc_node {
Xiaoguang Wang05589552020-03-31 14:05:18 +0800237 struct percpu_ref refs;
238 struct list_head node;
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +0000239 struct list_head rsrc_list;
Pavel Begunkovb895c9a2021-04-01 15:43:40 +0100240 struct io_rsrc_data *rsrc_data;
Jens Axboe4a38aed22020-05-14 17:21:15 -0600241 struct llist_node llist;
Pavel Begunkove2978222020-11-18 14:56:26 +0000242 bool done;
Xiaoguang Wang05589552020-03-31 14:05:18 +0800243};
244
Pavel Begunkov40ae0ff2021-04-01 15:43:44 +0100245typedef void (rsrc_put_fn)(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc);
246
Pavel Begunkovb895c9a2021-04-01 15:43:40 +0100247struct io_rsrc_data {
Jens Axboe05f3fb32019-12-09 11:22:50 -0700248 struct io_ring_ctx *ctx;
249
Pavel Begunkov2d091d62021-06-14 02:36:21 +0100250 u64 **tags;
251 unsigned int nr;
Pavel Begunkov40ae0ff2021-04-01 15:43:44 +0100252 rsrc_put_fn *do_put;
Pavel Begunkov3e942492021-04-11 01:46:34 +0100253 atomic_t refs;
Jens Axboe05f3fb32019-12-09 11:22:50 -0700254 struct completion done;
Hao Xu8bad28d2021-02-19 17:19:36 +0800255 bool quiesce;
Jens Axboe05f3fb32019-12-09 11:22:50 -0700256};
257
Jens Axboe5a2e7452020-02-23 16:23:11 -0700258struct io_buffer {
259 struct list_head list;
260 __u64 addr;
Thadeu Lima de Souza Cascardod1f82802021-05-05 09:47:06 -0300261 __u32 len;
Jens Axboe5a2e7452020-02-23 16:23:11 -0700262 __u16 bid;
263};
264
Stefano Garzarella21b55db2020-08-27 16:58:30 +0200265struct io_restriction {
266 DECLARE_BITMAP(register_op, IORING_REGISTER_LAST);
267 DECLARE_BITMAP(sqe_op, IORING_OP_LAST);
268 u8 sqe_flags_allowed;
269 u8 sqe_flags_required;
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +0200270 bool registered;
Stefano Garzarella21b55db2020-08-27 16:58:30 +0200271};
272
Jens Axboe37d1e2e2021-02-17 21:03:43 -0700273enum {
274 IO_SQ_THREAD_SHOULD_STOP = 0,
275 IO_SQ_THREAD_SHOULD_PARK,
276};
277
Jens Axboe534ca6d2020-09-02 13:52:19 -0600278struct io_sq_data {
279 refcount_t refs;
Pavel Begunkov9e138a42021-03-14 20:57:12 +0000280 atomic_t park_pending;
Pavel Begunkov09a6f4e2021-03-14 20:57:10 +0000281 struct mutex lock;
Jens Axboe69fb2132020-09-14 11:16:23 -0600282
283 /* ctx's that are using this sqd */
284 struct list_head ctx_list;
Jens Axboe69fb2132020-09-14 11:16:23 -0600285
Jens Axboe534ca6d2020-09-02 13:52:19 -0600286 struct task_struct *thread;
287 struct wait_queue_head wait;
Xiaoguang Wang08369242020-11-03 14:15:59 +0800288
289 unsigned sq_thread_idle;
Jens Axboe37d1e2e2021-02-17 21:03:43 -0700290 int sq_cpu;
291 pid_t task_pid;
Jens Axboe5c2469e2021-03-11 10:17:56 -0700292 pid_t task_tgid;
Jens Axboe37d1e2e2021-02-17 21:03:43 -0700293
294 unsigned long state;
Jens Axboe37d1e2e2021-02-17 21:03:43 -0700295 struct completion exited;
Jens Axboe534ca6d2020-09-02 13:52:19 -0600296};
297
Pavel Begunkov6dd0be12021-02-10 00:03:13 +0000298#define IO_COMPL_BATCH 32
Pavel Begunkov6ff119a2021-02-10 00:03:18 +0000299#define IO_REQ_CACHE_SIZE 32
Pavel Begunkovbf019da2021-02-10 00:03:17 +0000300#define IO_REQ_ALLOC_BATCH 8
Pavel Begunkov258b29a2021-02-10 00:03:10 +0000301
Pavel Begunkova1ab7b32021-02-18 18:29:42 +0000302struct io_submit_link {
303 struct io_kiocb *head;
304 struct io_kiocb *last;
305};
306
Pavel Begunkov258b29a2021-02-10 00:03:10 +0000307struct io_submit_state {
308 struct blk_plug plug;
Pavel Begunkova1ab7b32021-02-18 18:29:42 +0000309 struct io_submit_link link;
Pavel Begunkov258b29a2021-02-10 00:03:10 +0000310
311 /*
312 * io_kiocb alloc cache
313 */
Pavel Begunkovbf019da2021-02-10 00:03:17 +0000314 void *reqs[IO_REQ_CACHE_SIZE];
Pavel Begunkov258b29a2021-02-10 00:03:10 +0000315 unsigned int free_reqs;
316
317 bool plug_started;
318
319 /*
320 * Batch completion logic
321 */
Pavel Begunkovcd0ca2e2021-08-09 20:18:11 +0100322 struct io_kiocb *compl_reqs[IO_COMPL_BATCH];
323 unsigned int compl_nr;
324 /* inline/task_work completion list, under ->uring_lock */
325 struct list_head free_list;
Pavel Begunkov258b29a2021-02-10 00:03:10 +0000326
Pavel Begunkov258b29a2021-02-10 00:03:10 +0000327 unsigned int ios_left;
328};
329
Jens Axboe2b188cc2019-01-07 10:46:33 -0700330struct io_ring_ctx {
Pavel Begunkovb52ecf82021-06-14 23:37:21 +0100331 /* const or read-mostly hot data */
Jens Axboe2b188cc2019-01-07 10:46:33 -0700332 struct {
333 struct percpu_ref refs;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700334
Pavel Begunkovb52ecf82021-06-14 23:37:21 +0100335 struct io_rings *rings;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700336 unsigned int flags;
Randy Dunlape1d85332020-02-05 20:57:10 -0800337 unsigned int compat: 1;
Randy Dunlape1d85332020-02-05 20:57:10 -0800338 unsigned int drain_next: 1;
339 unsigned int eventfd_async: 1;
Stefano Garzarella21b55db2020-08-27 16:58:30 +0200340 unsigned int restricted: 1;
Pavel Begunkovf18ee4c2021-06-14 23:37:25 +0100341 unsigned int off_timeout_used: 1;
Pavel Begunkov10c66902021-06-15 16:47:56 +0100342 unsigned int drain_active: 1;
Pavel Begunkovb52ecf82021-06-14 23:37:21 +0100343 } ____cacheline_aligned_in_smp;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700344
Pavel Begunkov7f1129d2021-06-14 23:37:22 +0100345 /* submission data */
Pavel Begunkovb52ecf82021-06-14 23:37:21 +0100346 struct {
Pavel Begunkov0499e582021-06-14 23:37:29 +0100347 struct mutex uring_lock;
348
Hristo Venev75b28af2019-08-26 17:23:46 +0000349 /*
350 * Ring buffer of indices into array of io_uring_sqe, which is
351 * mmapped by the application using the IORING_OFF_SQES offset.
352 *
353 * This indirection could e.g. be used to assign fixed
354 * io_uring_sqe entries to operations and only submit them to
355 * the queue when needed.
356 *
357 * The kernel modifies neither the indices array nor the entries
358 * array.
359 */
360 u32 *sq_array;
Pavel Begunkovc7af47c2021-06-14 23:37:20 +0100361 struct io_uring_sqe *sq_sqes;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700362 unsigned cached_sq_head;
363 unsigned sq_entries;
Jens Axboede0617e2019-04-06 21:51:27 -0600364 struct list_head defer_list;
Pavel Begunkov7f1129d2021-06-14 23:37:22 +0100365
366 /*
367 * Fixed resources fast path, should be accessed only under
368 * uring_lock, and updated through io_uring_register(2)
369 */
370 struct io_rsrc_node *rsrc_node;
371 struct io_file_table file_table;
372 unsigned nr_user_files;
373 unsigned nr_user_bufs;
374 struct io_mapped_ubuf **user_bufs;
375
376 struct io_submit_state submit_state;
Jens Axboe5262f562019-09-17 12:26:57 -0600377 struct list_head timeout_list;
Pavel Begunkovef9dd632021-08-28 19:54:38 -0600378 struct list_head ltimeout_list;
Jens Axboe1d7bb1d2019-11-06 11:31:17 -0700379 struct list_head cq_overflow_list;
Pavel Begunkov7f1129d2021-06-14 23:37:22 +0100380 struct xarray io_buffers;
381 struct xarray personalities;
382 u32 pers_next;
383 unsigned sq_thread_idle;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700384 } ____cacheline_aligned_in_smp;
385
Pavel Begunkovd0acdee2021-05-16 22:58:12 +0100386 /* IRQ completion list, under ->completion_lock */
387 struct list_head locked_free_list;
388 unsigned int locked_free_nr;
Jens Axboe3c1a2ea2021-02-11 10:48:03 -0700389
Stefan Metzmacher7c30f36a2021-03-07 11:54:28 +0100390 const struct cred *sq_creds; /* cred used for __io_sq_thread() */
Jens Axboe534ca6d2020-09-02 13:52:19 -0600391 struct io_sq_data *sq_data; /* if using sq thread polling */
392
Jens Axboe90554202020-09-03 12:12:41 -0600393 struct wait_queue_head sqo_sq_wait;
Jens Axboe69fb2132020-09-14 11:16:23 -0600394 struct list_head sqd_list;
Hristo Venev75b28af2019-08-26 17:23:46 +0000395
Pavel Begunkov5ed7a372021-06-14 23:37:27 +0100396 unsigned long check_cq_overflow;
397
Jens Axboe206aefd2019-11-07 18:27:42 -0700398 struct {
399 unsigned cached_cq_tail;
400 unsigned cq_entries;
Jens Axboe206aefd2019-11-07 18:27:42 -0700401 struct eventfd_ctx *cq_ev_fd;
Pavel Begunkov0499e582021-06-14 23:37:29 +0100402 struct wait_queue_head poll_wait;
403 struct wait_queue_head cq_wait;
404 unsigned cq_extra;
405 atomic_t cq_timeouts;
Pavel Begunkov0499e582021-06-14 23:37:29 +0100406 unsigned cq_last_tm_flush;
Jens Axboe206aefd2019-11-07 18:27:42 -0700407 } ____cacheline_aligned_in_smp;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700408
409 struct {
Jens Axboe2b188cc2019-01-07 10:46:33 -0700410 spinlock_t completion_lock;
Jens Axboee94f1412019-12-19 12:06:02 -0700411
Jens Axboe89850fc2021-08-10 15:11:51 -0600412 spinlock_t timeout_lock;
413
Jens Axboedef596e2019-01-09 08:59:42 -0700414 /*
Pavel Begunkov540e32a2020-07-13 23:37:09 +0300415 * ->iopoll_list is protected by the ctx->uring_lock for
Jens Axboedef596e2019-01-09 08:59:42 -0700416 * io_uring instances that don't use IORING_SETUP_SQPOLL.
417 * For SQPOLL, only the single threaded io_sq_thread() will
418 * manipulate the list, hence no extra locking is needed there.
419 */
Pavel Begunkov540e32a2020-07-13 23:37:09 +0300420 struct list_head iopoll_list;
Jens Axboe78076bb2019-12-04 19:56:40 -0700421 struct hlist_head *cancel_hash;
422 unsigned cancel_hash_bits;
Hao Xu915b3dd2021-06-28 05:37:30 +0800423 bool poll_multi_queue;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700424 } ____cacheline_aligned_in_smp;
Jens Axboe85faa7b2020-04-09 18:14:00 -0600425
Stefano Garzarella21b55db2020-08-27 16:58:30 +0200426 struct io_restriction restrictions;
Jens Axboe3c1a2ea2021-02-11 10:48:03 -0700427
Pavel Begunkovb13a8912021-05-16 22:58:07 +0100428 /* slow path rsrc auxilary data, used by update/register */
429 struct {
430 struct io_rsrc_node *rsrc_backup_node;
431 struct io_mapped_ubuf *dummy_ubuf;
432 struct io_rsrc_data *file_data;
433 struct io_rsrc_data *buf_data;
434
435 struct delayed_work rsrc_put_work;
436 struct llist_head rsrc_put_llist;
437 struct list_head rsrc_ref_list;
438 spinlock_t rsrc_ref_lock;
439 };
440
Jens Axboe3c1a2ea2021-02-11 10:48:03 -0700441 /* Keep this last, we don't need it for the fast path */
Pavel Begunkovb986af72021-05-16 22:58:06 +0100442 struct {
443 #if defined(CONFIG_UNIX)
444 struct socket *ring_sock;
445 #endif
446 /* hashed buffered write serialization */
447 struct io_wq_hash *hash_map;
448
449 /* Only used for accounting purposes */
450 struct user_struct *user;
451 struct mm_struct *mm_account;
452
453 /* ctx exit and cancelation */
Pavel Begunkov9011bf92021-06-30 21:54:03 +0100454 struct llist_head fallback_llist;
455 struct delayed_work fallback_work;
Pavel Begunkovb986af72021-05-16 22:58:06 +0100456 struct work_struct exit_work;
457 struct list_head tctx_list;
458 struct completion ref_comp;
Pavel Begunkove139a1e2021-10-19 23:43:46 +0100459 u32 iowq_limits[2];
460 bool iowq_limits_set;
Pavel Begunkovb986af72021-05-16 22:58:06 +0100461 };
Jens Axboe2b188cc2019-01-07 10:46:33 -0700462};
463
Stefan Metzmacher53e043b2021-03-15 12:56:56 +0100464struct io_uring_task {
465 /* submission side */
Pavel Begunkov09899b12021-06-14 02:36:22 +0100466 int cached_refs;
Stefan Metzmacher53e043b2021-03-15 12:56:56 +0100467 struct xarray xa;
468 struct wait_queue_head wait;
Stefan Metzmacheree53fb22021-03-15 12:56:57 +0100469 const struct io_ring_ctx *last;
470 struct io_wq *io_wq;
Stefan Metzmacher53e043b2021-03-15 12:56:56 +0100471 struct percpu_counter inflight;
Pavel Begunkovb303fe22021-04-11 01:46:26 +0100472 atomic_t inflight_tracked;
Stefan Metzmacher53e043b2021-03-15 12:56:56 +0100473 atomic_t in_idle;
Stefan Metzmacher53e043b2021-03-15 12:56:56 +0100474
475 spinlock_t task_lock;
476 struct io_wq_work_list task_list;
Stefan Metzmacher53e043b2021-03-15 12:56:56 +0100477 struct callback_head task_work;
Pavel Begunkov6294f362021-08-10 17:53:55 +0100478 bool task_running;
Stefan Metzmacher53e043b2021-03-15 12:56:56 +0100479};
480
Jens Axboe09bb8392019-03-13 12:39:28 -0600481/*
482 * First field must be the file pointer in all the
483 * iocb unions! See also 'struct kiocb' in <linux/fs.h>
484 */
Jens Axboe221c5eb2019-01-17 09:41:58 -0700485struct io_poll_iocb {
486 struct file *file;
Pavel Begunkov018043b2020-10-27 23:17:18 +0000487 struct wait_queue_head *head;
Jens Axboe221c5eb2019-01-17 09:41:58 -0700488 __poll_t events;
Jens Axboe345fb362023-03-06 13:28:57 -0700489 int retries;
Jens Axboe392edb42019-12-09 17:52:20 -0700490 struct wait_queue_entry wait;
Jens Axboe221c5eb2019-01-17 09:41:58 -0700491};
492
Pavel Begunkov9d805892021-04-13 02:58:40 +0100493struct io_poll_update {
Pavel Begunkov018043b2020-10-27 23:17:18 +0000494 struct file *file;
Pavel Begunkov9d805892021-04-13 02:58:40 +0100495 u64 old_user_data;
496 u64 new_user_data;
497 __poll_t events;
Jens Axboeb69de282021-03-17 08:37:41 -0600498 bool update_events;
499 bool update_user_data;
Pavel Begunkov018043b2020-10-27 23:17:18 +0000500};
501
Jens Axboeb5dba592019-12-11 14:02:38 -0700502struct io_close {
503 struct file *file;
Jens Axboeb5dba592019-12-11 14:02:38 -0700504 int fd;
Pavel Begunkov7df778b2021-09-24 20:04:29 +0100505 u32 file_slot;
Jens Axboeb5dba592019-12-11 14:02:38 -0700506};
507
Jens Axboead8a48a2019-11-15 08:49:11 -0700508struct io_timeout_data {
509 struct io_kiocb *req;
510 struct hrtimer timer;
511 struct timespec64 ts;
512 enum hrtimer_mode mode;
Jens Axboe50c1df22021-08-27 17:11:06 -0600513 u32 flags;
Jens Axboead8a48a2019-11-15 08:49:11 -0700514};
515
Jens Axboe8ed8d3c2019-12-16 11:55:28 -0700516struct io_accept {
517 struct file *file;
518 struct sockaddr __user *addr;
519 int __user *addr_len;
520 int flags;
Pavel Begunkovaaa4db12021-08-25 12:25:47 +0100521 u32 file_slot;
Jens Axboe09952e32020-03-19 20:16:56 -0600522 unsigned long nofile;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -0700523};
524
525struct io_sync {
526 struct file *file;
527 loff_t len;
528 loff_t off;
529 int flags;
Jens Axboed63d1b52019-12-10 10:38:56 -0700530 int mode;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -0700531};
532
Jens Axboefbf23842019-12-17 18:45:56 -0700533struct io_cancel {
534 struct file *file;
535 u64 addr;
536};
537
Jens Axboeb29472e2019-12-17 18:50:29 -0700538struct io_timeout {
539 struct file *file;
Pavel Begunkovbfe68a22020-05-30 14:54:18 +0300540 u32 off;
541 u32 target_seq;
Pavel Begunkov135fcde2020-07-13 23:37:12 +0300542 struct list_head list;
Pavel Begunkov90cd7e42020-10-27 23:25:36 +0000543 /* head of the link, used by linked timeouts only */
544 struct io_kiocb *head;
Jens Axboe89b263f2021-08-10 15:14:18 -0600545 /* for linked completions */
546 struct io_kiocb *prev;
Jens Axboeb29472e2019-12-17 18:50:29 -0700547};
548
Pavel Begunkov0bdf7a22020-10-10 18:34:10 +0100549struct io_timeout_rem {
550 struct file *file;
551 u64 addr;
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +0000552
553 /* timeout update */
554 struct timespec64 ts;
555 u32 flags;
Pavel Begunkovf1042b62021-08-28 19:54:39 -0600556 bool ltimeout;
Pavel Begunkov0bdf7a22020-10-10 18:34:10 +0100557};
558
Jens Axboe9adbd452019-12-20 08:45:55 -0700559struct io_rw {
560 /* NOTE: kiocb has the file as the first member, so don't do it here */
561 struct kiocb kiocb;
562 u64 addr;
563 u64 len;
564};
565
Jens Axboe3fbb51c2019-12-20 08:51:52 -0700566struct io_connect {
567 struct file *file;
568 struct sockaddr __user *addr;
569 int addr_len;
570};
571
Jens Axboee47293f2019-12-20 08:58:21 -0700572struct io_sr_msg {
573 struct file *file;
Jens Axboefddafac2020-01-04 20:19:44 -0700574 union {
Pavel Begunkov4af34172021-04-11 01:46:30 +0100575 struct compat_msghdr __user *umsg_compat;
576 struct user_msghdr __user *umsg;
577 void __user *buf;
Jens Axboefddafac2020-01-04 20:19:44 -0700578 };
Jens Axboee47293f2019-12-20 08:58:21 -0700579 int msg_flags;
Jens Axboebcda7ba2020-02-23 16:42:51 -0700580 int bgid;
Jens Axboefddafac2020-01-04 20:19:44 -0700581 size_t len;
Jens Axboe9b7b0f22023-01-21 10:21:22 -0700582 size_t done_io;
Jens Axboebcda7ba2020-02-23 16:42:51 -0700583 struct io_buffer *kbuf;
Jens Axboe34a7e502023-06-23 07:38:14 -0600584 void __user *msg_control;
Jens Axboee47293f2019-12-20 08:58:21 -0700585};
586
Jens Axboe15b71ab2019-12-11 11:20:36 -0700587struct io_open {
588 struct file *file;
589 int dfd;
Pavel Begunkovb9445592021-08-25 12:25:45 +0100590 u32 file_slot;
Jens Axboe15b71ab2019-12-11 11:20:36 -0700591 struct filename *filename;
Jens Axboec12cedf2020-01-08 17:41:21 -0700592 struct open_how how;
Jens Axboe4022e7a2020-03-19 19:23:18 -0600593 unsigned long nofile;
Jens Axboe15b71ab2019-12-11 11:20:36 -0700594};
595
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +0000596struct io_rsrc_update {
Jens Axboe05f3fb32019-12-09 11:22:50 -0700597 struct file *file;
598 u64 arg;
599 u32 nr_args;
600 u32 offset;
601};
602
Jens Axboe4840e412019-12-25 22:03:45 -0700603struct io_fadvise {
604 struct file *file;
605 u64 offset;
606 u32 len;
607 u32 advice;
608};
609
Jens Axboec1ca7572019-12-25 22:18:28 -0700610struct io_madvise {
611 struct file *file;
612 u64 addr;
613 u32 len;
614 u32 advice;
615};
616
Jens Axboe3e4827b2020-01-08 15:18:09 -0700617struct io_epoll {
618 struct file *file;
619 int epfd;
620 int op;
621 int fd;
622 struct epoll_event event;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700623};
624
Pavel Begunkov7d67af22020-02-24 11:32:45 +0300625struct io_splice {
626 struct file *file_out;
Pavel Begunkov7d67af22020-02-24 11:32:45 +0300627 loff_t off_out;
628 loff_t off_in;
629 u64 len;
Jens Axboeae6cba32022-03-29 10:59:20 -0600630 int splice_fd_in;
Pavel Begunkov7d67af22020-02-24 11:32:45 +0300631 unsigned int flags;
632};
633
Jens Axboeddf0322d2020-02-23 16:41:33 -0700634struct io_provide_buf {
635 struct file *file;
636 __u64 addr;
Pavel Begunkov38134ad2021-04-15 13:07:39 +0100637 __u32 len;
Jens Axboeddf0322d2020-02-23 16:41:33 -0700638 __u32 bgid;
639 __u16 nbufs;
640 __u16 bid;
641};
642
Bijan Mottahedeh1d9e1282020-05-22 21:31:16 -0700643struct io_statx {
644 struct file *file;
645 int dfd;
646 unsigned int mask;
647 unsigned int flags;
Bijan Mottahedehe62753e2020-05-22 21:31:18 -0700648 const char __user *filename;
Bijan Mottahedeh1d9e1282020-05-22 21:31:16 -0700649 struct statx __user *buffer;
650};
651
Jens Axboe36f4fa62020-09-05 11:14:22 -0600652struct io_shutdown {
653 struct file *file;
654 int how;
655};
656
Jens Axboe80a261f2020-09-28 14:23:58 -0600657struct io_rename {
658 struct file *file;
659 int old_dfd;
660 int new_dfd;
661 struct filename *oldpath;
662 struct filename *newpath;
663 int flags;
664};
665
Jens Axboe14a11432020-09-28 14:27:37 -0600666struct io_unlink {
667 struct file *file;
668 int dfd;
669 int flags;
670 struct filename *filename;
671};
672
Dmitry Kadasheve34a02d2021-07-08 13:34:45 +0700673struct io_mkdir {
674 struct file *file;
675 int dfd;
676 umode_t mode;
677 struct filename *filename;
678};
679
Dmitry Kadashev7a8721f2021-07-08 13:34:46 +0700680struct io_symlink {
681 struct file *file;
682 int new_dfd;
683 struct filename *oldpath;
684 struct filename *newpath;
685};
686
Dmitry Kadashevcf30da92021-07-08 13:34:47 +0700687struct io_hardlink {
688 struct file *file;
689 int old_dfd;
690 int new_dfd;
691 struct filename *oldpath;
692 struct filename *newpath;
693 int flags;
694};
695
Pavel Begunkov3ca405e2020-07-13 23:37:08 +0300696struct io_completion {
697 struct file *file;
Pavel Begunkov8c3f9cd2021-02-28 22:35:15 +0000698 u32 cflags;
Pavel Begunkov3ca405e2020-07-13 23:37:08 +0300699};
700
Jens Axboef499a022019-12-02 16:28:46 -0700701struct io_async_connect {
702 struct sockaddr_storage address;
703};
704
Jens Axboe03b12302019-12-02 18:50:25 -0700705struct io_async_msghdr {
706 struct iovec fast_iov[UIO_FASTIOV];
Pavel Begunkov257e84a2021-02-05 00:58:00 +0000707 /* points to an allocated iov, if NULL we use fast_iov instead */
708 struct iovec *free_iov;
Jens Axboe03b12302019-12-02 18:50:25 -0700709 struct sockaddr __user *uaddr;
710 struct msghdr msg;
Jens Axboeb5379162020-02-09 11:29:15 -0700711 struct sockaddr_storage addr;
Jens Axboe03b12302019-12-02 18:50:25 -0700712};
713
Jens Axboef67676d2019-12-02 11:03:47 -0700714struct io_async_rw {
715 struct iovec fast_iov[UIO_FASTIOV];
Jens Axboeff6165b2020-08-13 09:47:43 -0600716 const struct iovec *free_iovec;
717 struct iov_iter iter;
Jens Axboecd658692021-09-10 11:19:14 -0600718 struct iov_iter_state iter_state;
Jens Axboe227c0c92020-08-13 11:51:40 -0600719 size_t bytes_done;
Jens Axboebcf5a062020-05-22 09:24:42 -0600720 struct wait_page_queue wpq;
Jens Axboef67676d2019-12-02 11:03:47 -0700721};
722
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300723enum {
724 REQ_F_FIXED_FILE_BIT = IOSQE_FIXED_FILE_BIT,
725 REQ_F_IO_DRAIN_BIT = IOSQE_IO_DRAIN_BIT,
726 REQ_F_LINK_BIT = IOSQE_IO_LINK_BIT,
727 REQ_F_HARDLINK_BIT = IOSQE_IO_HARDLINK_BIT,
728 REQ_F_FORCE_ASYNC_BIT = IOSQE_ASYNC_BIT,
Jens Axboebcda7ba2020-02-23 16:42:51 -0700729 REQ_F_BUFFER_SELECT_BIT = IOSQE_BUFFER_SELECT_BIT,
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300730
Pavel Begunkovdddca222021-04-27 16:13:52 +0100731 /* first byte is taken by user flags, shift it to not overlap */
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +0100732 REQ_F_FAIL_BIT = 8,
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300733 REQ_F_INFLIGHT_BIT,
734 REQ_F_CUR_POS_BIT,
735 REQ_F_NOWAIT_BIT,
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300736 REQ_F_LINK_TIMEOUT_BIT,
Pavel Begunkov99bc4c32020-02-07 22:04:45 +0300737 REQ_F_NEED_CLEANUP_BIT,
Jens Axboed7718a92020-02-14 22:23:12 -0700738 REQ_F_POLLED_BIT,
Jens Axboebcda7ba2020-02-23 16:42:51 -0700739 REQ_F_BUFFER_SELECTED_BIT,
Pavel Begunkove342c802021-01-19 13:32:47 +0000740 REQ_F_COMPLETE_INLINE_BIT,
Jens Axboe230d50d2021-04-01 20:41:15 -0600741 REQ_F_REISSUE_BIT,
Pavel Begunkovb8e64b52021-06-17 18:14:02 +0100742 REQ_F_CREDS_BIT,
Pavel Begunkov20e60a32021-08-11 19:28:30 +0100743 REQ_F_REFCOUNT_BIT,
Pavel Begunkov4d13d1a2021-08-15 10:40:24 +0100744 REQ_F_ARM_LTIMEOUT_BIT,
Jens Axboe390b8812022-03-23 09:30:05 -0600745 REQ_F_PARTIAL_IO_BIT,
Jens Axboe7b29f922021-03-12 08:30:14 -0700746 /* keep async read/write and isreg together and in order */
Pavel Begunkovb191e2d2021-08-09 13:04:03 +0100747 REQ_F_NOWAIT_READ_BIT,
748 REQ_F_NOWAIT_WRITE_BIT,
Jens Axboe7b29f922021-03-12 08:30:14 -0700749 REQ_F_ISREG_BIT,
Jens Axboe84557872020-03-03 15:28:17 -0700750
751 /* not a real bit, just to check we're not overflowing the space */
752 __REQ_F_LAST_BIT,
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300753};
754
755enum {
756 /* ctx owns file */
757 REQ_F_FIXED_FILE = BIT(REQ_F_FIXED_FILE_BIT),
758 /* drain existing IO first */
759 REQ_F_IO_DRAIN = BIT(REQ_F_IO_DRAIN_BIT),
760 /* linked sqes */
761 REQ_F_LINK = BIT(REQ_F_LINK_BIT),
762 /* doesn't sever on completion < 0 */
763 REQ_F_HARDLINK = BIT(REQ_F_HARDLINK_BIT),
764 /* IOSQE_ASYNC */
765 REQ_F_FORCE_ASYNC = BIT(REQ_F_FORCE_ASYNC_BIT),
Jens Axboebcda7ba2020-02-23 16:42:51 -0700766 /* IOSQE_BUFFER_SELECT */
767 REQ_F_BUFFER_SELECT = BIT(REQ_F_BUFFER_SELECT_BIT),
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300768
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300769 /* fail rest of links */
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +0100770 REQ_F_FAIL = BIT(REQ_F_FAIL_BIT),
Pavel Begunkovb05a1bc2021-03-04 13:59:24 +0000771 /* on inflight list, should be cancelled and waited on exit reliably */
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300772 REQ_F_INFLIGHT = BIT(REQ_F_INFLIGHT_BIT),
773 /* read/write uses file position */
774 REQ_F_CUR_POS = BIT(REQ_F_CUR_POS_BIT),
775 /* must not punt to workers */
776 REQ_F_NOWAIT = BIT(REQ_F_NOWAIT_BIT),
Pavel Begunkov900fad42020-10-19 16:39:16 +0100777 /* has or had linked timeout */
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300778 REQ_F_LINK_TIMEOUT = BIT(REQ_F_LINK_TIMEOUT_BIT),
Pavel Begunkov99bc4c32020-02-07 22:04:45 +0300779 /* needs cleanup */
780 REQ_F_NEED_CLEANUP = BIT(REQ_F_NEED_CLEANUP_BIT),
Jens Axboed7718a92020-02-14 22:23:12 -0700781 /* already went through poll handler */
782 REQ_F_POLLED = BIT(REQ_F_POLLED_BIT),
Jens Axboebcda7ba2020-02-23 16:42:51 -0700783 /* buffer already selected */
784 REQ_F_BUFFER_SELECTED = BIT(REQ_F_BUFFER_SELECTED_BIT),
Pavel Begunkove342c802021-01-19 13:32:47 +0000785 /* completion is deferred through io_comp_state */
786 REQ_F_COMPLETE_INLINE = BIT(REQ_F_COMPLETE_INLINE_BIT),
Jens Axboe230d50d2021-04-01 20:41:15 -0600787 /* caller should reissue async */
788 REQ_F_REISSUE = BIT(REQ_F_REISSUE_BIT),
Jens Axboe7b29f922021-03-12 08:30:14 -0700789 /* supports async reads */
Pavel Begunkovb191e2d2021-08-09 13:04:03 +0100790 REQ_F_NOWAIT_READ = BIT(REQ_F_NOWAIT_READ_BIT),
Jens Axboe7b29f922021-03-12 08:30:14 -0700791 /* supports async writes */
Pavel Begunkovb191e2d2021-08-09 13:04:03 +0100792 REQ_F_NOWAIT_WRITE = BIT(REQ_F_NOWAIT_WRITE_BIT),
Jens Axboe7b29f922021-03-12 08:30:14 -0700793 /* regular file */
794 REQ_F_ISREG = BIT(REQ_F_ISREG_BIT),
Pavel Begunkovb8e64b52021-06-17 18:14:02 +0100795 /* has creds assigned */
796 REQ_F_CREDS = BIT(REQ_F_CREDS_BIT),
Pavel Begunkov20e60a32021-08-11 19:28:30 +0100797 /* skip refcounting if not set */
798 REQ_F_REFCOUNT = BIT(REQ_F_REFCOUNT_BIT),
Pavel Begunkov4d13d1a2021-08-15 10:40:24 +0100799 /* there is a linked timeout that has to be armed */
800 REQ_F_ARM_LTIMEOUT = BIT(REQ_F_ARM_LTIMEOUT_BIT),
Jens Axboe390b8812022-03-23 09:30:05 -0600801 /* request has already done partial IO */
802 REQ_F_PARTIAL_IO = BIT(REQ_F_PARTIAL_IO_BIT),
Jens Axboed7718a92020-02-14 22:23:12 -0700803};
804
805struct async_poll {
806 struct io_poll_iocb poll;
Jens Axboe807abcb2020-07-17 17:09:27 -0600807 struct io_poll_iocb *double_poll;
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300808};
809
Pavel Begunkovf237c302021-08-18 12:42:46 +0100810typedef void (*io_req_tw_func_t)(struct io_kiocb *req, bool *locked);
Pavel Begunkov5b0a6ac2021-06-30 21:54:04 +0100811
Jens Axboe7cbf1722021-02-10 00:03:20 +0000812struct io_task_work {
Pavel Begunkov5b0a6ac2021-06-30 21:54:04 +0100813 union {
814 struct io_wq_work_node node;
815 struct llist_node fallback_node;
816 };
817 io_req_tw_func_t func;
Jens Axboe7cbf1722021-02-10 00:03:20 +0000818};
819
Pavel Begunkov992da012021-06-10 16:37:37 +0100820enum {
821 IORING_RSRC_FILE = 0,
822 IORING_RSRC_BUFFER = 1,
823};
824
Jens Axboe09bb8392019-03-13 12:39:28 -0600825/*
826 * NOTE! Each of the iocb union members has the file pointer
827 * as the first entry in their struct definition. So you can
828 * access the file pointer through any of the sub-structs,
829 * or directly as just 'ki_filp' in this struct.
830 */
Jens Axboe2b188cc2019-01-07 10:46:33 -0700831struct io_kiocb {
Jens Axboe221c5eb2019-01-17 09:41:58 -0700832 union {
Jens Axboe09bb8392019-03-13 12:39:28 -0600833 struct file *file;
Jens Axboe9adbd452019-12-20 08:45:55 -0700834 struct io_rw rw;
Jens Axboe221c5eb2019-01-17 09:41:58 -0700835 struct io_poll_iocb poll;
Pavel Begunkov9d805892021-04-13 02:58:40 +0100836 struct io_poll_update poll_update;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -0700837 struct io_accept accept;
838 struct io_sync sync;
Jens Axboefbf23842019-12-17 18:45:56 -0700839 struct io_cancel cancel;
Jens Axboeb29472e2019-12-17 18:50:29 -0700840 struct io_timeout timeout;
Pavel Begunkov0bdf7a22020-10-10 18:34:10 +0100841 struct io_timeout_rem timeout_rem;
Jens Axboe3fbb51c2019-12-20 08:51:52 -0700842 struct io_connect connect;
Jens Axboee47293f2019-12-20 08:58:21 -0700843 struct io_sr_msg sr_msg;
Jens Axboe15b71ab2019-12-11 11:20:36 -0700844 struct io_open open;
Jens Axboeb5dba592019-12-11 14:02:38 -0700845 struct io_close close;
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +0000846 struct io_rsrc_update rsrc_update;
Jens Axboe4840e412019-12-25 22:03:45 -0700847 struct io_fadvise fadvise;
Jens Axboec1ca7572019-12-25 22:18:28 -0700848 struct io_madvise madvise;
Jens Axboe3e4827b2020-01-08 15:18:09 -0700849 struct io_epoll epoll;
Pavel Begunkov7d67af22020-02-24 11:32:45 +0300850 struct io_splice splice;
Jens Axboeddf0322d2020-02-23 16:41:33 -0700851 struct io_provide_buf pbuf;
Bijan Mottahedeh1d9e1282020-05-22 21:31:16 -0700852 struct io_statx statx;
Jens Axboe36f4fa62020-09-05 11:14:22 -0600853 struct io_shutdown shutdown;
Jens Axboe80a261f2020-09-28 14:23:58 -0600854 struct io_rename rename;
Jens Axboe14a11432020-09-28 14:27:37 -0600855 struct io_unlink unlink;
Dmitry Kadasheve34a02d2021-07-08 13:34:45 +0700856 struct io_mkdir mkdir;
Dmitry Kadashev7a8721f2021-07-08 13:34:46 +0700857 struct io_symlink symlink;
Dmitry Kadashevcf30da92021-07-08 13:34:47 +0700858 struct io_hardlink hardlink;
Pavel Begunkov3ca405e2020-07-13 23:37:08 +0300859 /* use only after cleaning per-op data, see io_clean_op() */
860 struct io_completion compl;
Jens Axboe221c5eb2019-01-17 09:41:58 -0700861 };
Jens Axboe2b188cc2019-01-07 10:46:33 -0700862
Jens Axboee8c2bc12020-08-15 18:44:09 -0700863 /* opcode allocated if it needs to store data for async defer */
864 void *async_data;
Jens Axboed625c6e2019-12-17 19:53:05 -0700865 u8 opcode;
Xiaoguang Wang65a65432020-06-11 23:39:36 +0800866 /* polled IO has completed */
867 u8 iopoll_completed;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700868
Bijan Mottahedeh4f4eeba2020-05-19 14:52:49 -0700869 u16 buf_index;
Pavel Begunkov9cf7c102020-07-13 23:37:15 +0300870 u32 result;
Bijan Mottahedeh4f4eeba2020-05-19 14:52:49 -0700871
Pavel Begunkov010e8e62020-07-30 18:43:45 +0300872 struct io_ring_ctx *ctx;
873 unsigned int flags;
Jens Axboeabc54d62021-02-24 13:32:30 -0700874 atomic_t refs;
Pavel Begunkov010e8e62020-07-30 18:43:45 +0300875 struct task_struct *task;
876 u64 user_data;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700877
Pavel Begunkovf2f87372020-10-27 23:25:37 +0000878 struct io_kiocb *link;
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +0000879 struct percpu_ref *fixed_rsrc_refs;
Jens Axboed7718a92020-02-14 22:23:12 -0700880
Pavel Begunkovb303fe22021-04-11 01:46:26 +0100881 /* used with ctx->iopoll_list with reads/writes */
Pavel Begunkov010e8e62020-07-30 18:43:45 +0300882 struct list_head inflight_entry;
Pavel Begunkov5b0a6ac2021-06-30 21:54:04 +0100883 struct io_task_work io_task_work;
Pavel Begunkov010e8e62020-07-30 18:43:45 +0300884 /* for polled requests, i.e. IORING_OP_POLL_ADD and async armed poll */
885 struct hlist_node hash_node;
886 struct async_poll *apoll;
887 struct io_wq_work work;
Pavel Begunkovfe7e3252021-06-24 15:09:57 +0100888 const struct cred *creds;
Pavel Begunkovc10d1f92021-06-17 18:14:01 +0100889
Pavel Begunkoveae071c2021-04-25 14:32:24 +0100890 /* store used ubuf, so we can prevent reloading */
891 struct io_mapped_ubuf *imu;
Pavel Begunkovf770fba2022-08-29 14:30:18 +0100892 /* stores selected buf, valid IFF REQ_F_BUFFER_SELECTED is set */
893 struct io_buffer *kbuf;
894 atomic_t poll_refs;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700895};
896
Pavel Begunkov13bf43f2021-03-06 11:02:12 +0000897struct io_tctx_node {
898 struct list_head ctx_node;
899 struct task_struct *task;
Pavel Begunkov13bf43f2021-03-06 11:02:12 +0000900 struct io_ring_ctx *ctx;
901};
902
Pavel Begunkov27dc8332020-07-13 23:37:14 +0300903struct io_defer_entry {
904 struct list_head list;
905 struct io_kiocb *req;
Pavel Begunkov9cf7c102020-07-13 23:37:15 +0300906 u32 seq;
Pavel Begunkov27dc8332020-07-13 23:37:14 +0300907};
908
Jens Axboed3656342019-12-18 09:50:26 -0700909struct io_op_def {
Jens Axboed3656342019-12-18 09:50:26 -0700910 /* needs req->file assigned */
911 unsigned needs_file : 1;
Jens Axboed3656342019-12-18 09:50:26 -0700912 /* hash wq insertion if file is a regular file */
913 unsigned hash_reg_file : 1;
914 /* unbound wq insertion if file is a non-regular file */
915 unsigned unbound_nonreg_file : 1;
Jens Axboe66f4af92020-01-16 15:36:52 -0700916 /* opcode is not supported by this kernel */
917 unsigned not_supported : 1;
Jens Axboe8a727582020-02-20 09:59:44 -0700918 /* set if opcode supports polled "wait" */
919 unsigned pollin : 1;
920 unsigned pollout : 1;
Jens Axboebcda7ba2020-02-23 16:42:51 -0700921 /* op supports buffer selection */
922 unsigned buffer_select : 1;
Pavel Begunkov26f05052021-02-28 22:35:18 +0000923 /* do prep async if is going to be punted */
924 unsigned needs_async_setup : 1;
Jens Axboe27926b62020-10-28 09:33:23 -0600925 /* should block plug */
926 unsigned plug : 1;
Jens Axboee8c2bc12020-08-15 18:44:09 -0700927 /* size of async data needed, if any */
928 unsigned short async_size;
Jens Axboed3656342019-12-18 09:50:26 -0700929};
930
Jens Axboe09186822020-10-13 15:01:40 -0600931static const struct io_op_def io_op_defs[] = {
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300932 [IORING_OP_NOP] = {},
933 [IORING_OP_READV] = {
Jens Axboed3656342019-12-18 09:50:26 -0700934 .needs_file = 1,
935 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700936 .pollin = 1,
Jens Axboe4d954c22020-02-27 07:31:19 -0700937 .buffer_select = 1,
Pavel Begunkov26f05052021-02-28 22:35:18 +0000938 .needs_async_setup = 1,
Jens Axboe27926b62020-10-28 09:33:23 -0600939 .plug = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700940 .async_size = sizeof(struct io_async_rw),
Jens Axboed3656342019-12-18 09:50:26 -0700941 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300942 [IORING_OP_WRITEV] = {
Jens Axboed3656342019-12-18 09:50:26 -0700943 .needs_file = 1,
944 .hash_reg_file = 1,
945 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700946 .pollout = 1,
Pavel Begunkov26f05052021-02-28 22:35:18 +0000947 .needs_async_setup = 1,
Jens Axboe27926b62020-10-28 09:33:23 -0600948 .plug = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700949 .async_size = sizeof(struct io_async_rw),
Jens Axboed3656342019-12-18 09:50:26 -0700950 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300951 [IORING_OP_FSYNC] = {
Jens Axboed3656342019-12-18 09:50:26 -0700952 .needs_file = 1,
953 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300954 [IORING_OP_READ_FIXED] = {
Jens Axboed3656342019-12-18 09:50:26 -0700955 .needs_file = 1,
956 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700957 .pollin = 1,
Jens Axboe27926b62020-10-28 09:33:23 -0600958 .plug = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700959 .async_size = sizeof(struct io_async_rw),
Jens Axboed3656342019-12-18 09:50:26 -0700960 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300961 [IORING_OP_WRITE_FIXED] = {
Jens Axboed3656342019-12-18 09:50:26 -0700962 .needs_file = 1,
963 .hash_reg_file = 1,
964 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700965 .pollout = 1,
Jens Axboe27926b62020-10-28 09:33:23 -0600966 .plug = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700967 .async_size = sizeof(struct io_async_rw),
Jens Axboed3656342019-12-18 09:50:26 -0700968 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300969 [IORING_OP_POLL_ADD] = {
Jens Axboed3656342019-12-18 09:50:26 -0700970 .needs_file = 1,
971 .unbound_nonreg_file = 1,
972 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300973 [IORING_OP_POLL_REMOVE] = {},
974 [IORING_OP_SYNC_FILE_RANGE] = {
Jens Axboed3656342019-12-18 09:50:26 -0700975 .needs_file = 1,
976 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300977 [IORING_OP_SENDMSG] = {
Jens Axboed3656342019-12-18 09:50:26 -0700978 .needs_file = 1,
979 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700980 .pollout = 1,
Pavel Begunkov26f05052021-02-28 22:35:18 +0000981 .needs_async_setup = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700982 .async_size = sizeof(struct io_async_msghdr),
Jens Axboed3656342019-12-18 09:50:26 -0700983 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300984 [IORING_OP_RECVMSG] = {
Jens Axboed3656342019-12-18 09:50:26 -0700985 .needs_file = 1,
986 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700987 .pollin = 1,
Jens Axboe52de1fe2020-02-27 10:15:42 -0700988 .buffer_select = 1,
Pavel Begunkov26f05052021-02-28 22:35:18 +0000989 .needs_async_setup = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700990 .async_size = sizeof(struct io_async_msghdr),
Jens Axboed3656342019-12-18 09:50:26 -0700991 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300992 [IORING_OP_TIMEOUT] = {
Jens Axboee8c2bc12020-08-15 18:44:09 -0700993 .async_size = sizeof(struct io_timeout_data),
Jens Axboed3656342019-12-18 09:50:26 -0700994 },
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +0000995 [IORING_OP_TIMEOUT_REMOVE] = {
996 /* used by timeout updates' prep() */
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +0000997 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300998 [IORING_OP_ACCEPT] = {
Jens Axboed3656342019-12-18 09:50:26 -0700999 .needs_file = 1,
1000 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -07001001 .pollin = 1,
Jens Axboed3656342019-12-18 09:50:26 -07001002 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +03001003 [IORING_OP_ASYNC_CANCEL] = {},
1004 [IORING_OP_LINK_TIMEOUT] = {
Jens Axboee8c2bc12020-08-15 18:44:09 -07001005 .async_size = sizeof(struct io_timeout_data),
Jens Axboed3656342019-12-18 09:50:26 -07001006 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +03001007 [IORING_OP_CONNECT] = {
Jens Axboed3656342019-12-18 09:50:26 -07001008 .needs_file = 1,
1009 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -07001010 .pollout = 1,
Pavel Begunkov26f05052021-02-28 22:35:18 +00001011 .needs_async_setup = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -07001012 .async_size = sizeof(struct io_async_connect),
Jens Axboed3656342019-12-18 09:50:26 -07001013 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +03001014 [IORING_OP_FALLOCATE] = {
Jens Axboed3656342019-12-18 09:50:26 -07001015 .needs_file = 1,
1016 },
Jens Axboe44526be2021-02-15 13:32:18 -07001017 [IORING_OP_OPENAT] = {},
1018 [IORING_OP_CLOSE] = {},
1019 [IORING_OP_FILES_UPDATE] = {},
1020 [IORING_OP_STATX] = {},
Pavel Begunkov0463b6c2020-01-18 21:35:38 +03001021 [IORING_OP_READ] = {
Jens Axboe3a6820f2019-12-22 15:19:35 -07001022 .needs_file = 1,
1023 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -07001024 .pollin = 1,
Jens Axboebcda7ba2020-02-23 16:42:51 -07001025 .buffer_select = 1,
Jens Axboe27926b62020-10-28 09:33:23 -06001026 .plug = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -07001027 .async_size = sizeof(struct io_async_rw),
Jens Axboe3a6820f2019-12-22 15:19:35 -07001028 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +03001029 [IORING_OP_WRITE] = {
Jens Axboe3a6820f2019-12-22 15:19:35 -07001030 .needs_file = 1,
Jens Axboe7b3188e2021-08-30 19:37:41 -06001031 .hash_reg_file = 1,
Jens Axboe3a6820f2019-12-22 15:19:35 -07001032 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -07001033 .pollout = 1,
Jens Axboe27926b62020-10-28 09:33:23 -06001034 .plug = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -07001035 .async_size = sizeof(struct io_async_rw),
Jens Axboe3a6820f2019-12-22 15:19:35 -07001036 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +03001037 [IORING_OP_FADVISE] = {
Jens Axboe4840e412019-12-25 22:03:45 -07001038 .needs_file = 1,
1039 },
Jens Axboe44526be2021-02-15 13:32:18 -07001040 [IORING_OP_MADVISE] = {},
Pavel Begunkov0463b6c2020-01-18 21:35:38 +03001041 [IORING_OP_SEND] = {
Jens Axboefddafac2020-01-04 20:19:44 -07001042 .needs_file = 1,
1043 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -07001044 .pollout = 1,
Jens Axboefddafac2020-01-04 20:19:44 -07001045 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +03001046 [IORING_OP_RECV] = {
Jens Axboefddafac2020-01-04 20:19:44 -07001047 .needs_file = 1,
1048 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -07001049 .pollin = 1,
Jens Axboebcda7ba2020-02-23 16:42:51 -07001050 .buffer_select = 1,
Jens Axboefddafac2020-01-04 20:19:44 -07001051 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +03001052 [IORING_OP_OPENAT2] = {
Jens Axboecebdb982020-01-08 17:59:24 -07001053 },
Jens Axboe3e4827b2020-01-08 15:18:09 -07001054 [IORING_OP_EPOLL_CTL] = {
1055 .unbound_nonreg_file = 1,
Jens Axboe3e4827b2020-01-08 15:18:09 -07001056 },
Pavel Begunkov7d67af22020-02-24 11:32:45 +03001057 [IORING_OP_SPLICE] = {
1058 .needs_file = 1,
1059 .hash_reg_file = 1,
1060 .unbound_nonreg_file = 1,
Jens Axboeddf0322d2020-02-23 16:41:33 -07001061 },
1062 [IORING_OP_PROVIDE_BUFFERS] = {},
Jens Axboe067524e2020-03-02 16:32:28 -07001063 [IORING_OP_REMOVE_BUFFERS] = {},
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03001064 [IORING_OP_TEE] = {
1065 .needs_file = 1,
1066 .hash_reg_file = 1,
1067 .unbound_nonreg_file = 1,
1068 },
Jens Axboe36f4fa62020-09-05 11:14:22 -06001069 [IORING_OP_SHUTDOWN] = {
1070 .needs_file = 1,
1071 },
Jens Axboe44526be2021-02-15 13:32:18 -07001072 [IORING_OP_RENAMEAT] = {},
1073 [IORING_OP_UNLINKAT] = {},
Dmitry Kadasheve34a02d2021-07-08 13:34:45 +07001074 [IORING_OP_MKDIRAT] = {},
Dmitry Kadashev7a8721f2021-07-08 13:34:46 +07001075 [IORING_OP_SYMLINKAT] = {},
Dmitry Kadashevcf30da92021-07-08 13:34:47 +07001076 [IORING_OP_LINKAT] = {},
Jens Axboed3656342019-12-18 09:50:26 -07001077};
1078
Pavel Begunkov0756a862021-08-15 10:40:25 +01001079/* requests with any of those set should undergo io_disarm_next() */
1080#define IO_DISARM_MASK (REQ_F_ARM_LTIMEOUT | REQ_F_LINK_TIMEOUT | REQ_F_FAIL)
1081
Pavel Begunkov7a612352021-03-09 00:37:59 +00001082static bool io_disarm_next(struct io_kiocb *req);
Pavel Begunkoveef51da2021-06-14 02:36:15 +01001083static void io_uring_del_tctx_node(unsigned long index);
Pavel Begunkov9936c7c2021-02-04 13:51:56 +00001084static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
1085 struct task_struct *task,
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01001086 bool cancel_all);
Pavel Begunkov78cc6872021-06-14 02:36:23 +01001087static void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd);
Pavel Begunkov1ffc5422020-12-30 21:34:15 +00001088
Pavel Begunkovb850d6d2022-08-29 14:30:13 +01001089static void io_fill_cqe_req(struct io_kiocb *req, s32 res, u32 cflags);
1090
Jackie Liuec9c02a2019-11-08 23:50:36 +08001091static void io_put_req(struct io_kiocb *req);
Pavel Begunkov91c2f692021-08-11 19:28:28 +01001092static void io_put_req_deferred(struct io_kiocb *req);
Jens Axboec7dae4b2021-02-09 19:53:37 -07001093static void io_dismantle_req(struct io_kiocb *req);
Jens Axboe94ae5e72019-11-14 19:39:52 -07001094static void io_queue_linked_timeout(struct io_kiocb *req);
Pavel Begunkovfdecb662021-04-25 14:32:20 +01001095static int __io_register_rsrc_update(struct io_ring_ctx *ctx, unsigned type,
Pavel Begunkovc3bdad02021-04-25 14:32:22 +01001096 struct io_uring_rsrc_update2 *up,
Pavel Begunkov98f0b3b2021-04-25 14:32:19 +01001097 unsigned nr_args);
Pavel Begunkov68fb8972021-03-19 17:22:41 +00001098static void io_clean_op(struct io_kiocb *req);
Pavel Begunkovac177052021-08-09 13:04:02 +01001099static struct file *io_file_get(struct io_ring_ctx *ctx,
Bing-Jhong Billy Jhengcf7f9cd2023-03-02 21:00:06 +08001100 struct io_kiocb *req, int fd, bool fixed,
1101 unsigned int issue_flags);
Pavel Begunkovc5eef2b2021-02-10 00:03:22 +00001102static void __io_queue_sqe(struct io_kiocb *req);
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00001103static void io_rsrc_put_work(struct work_struct *work);
Jens Axboede0617e2019-04-06 21:51:27 -06001104
Pavel Begunkov907d1df2021-01-26 23:35:10 +00001105static void io_req_task_queue(struct io_kiocb *req);
Pavel Begunkov2a2758f2021-06-17 18:14:00 +01001106static void io_submit_flush_completions(struct io_ring_ctx *ctx);
Pavel Begunkov179ae0d2021-02-28 22:35:20 +00001107static int io_req_prep_async(struct io_kiocb *req);
Jens Axboe9a56a232019-01-09 09:06:50 -07001108
Pavel Begunkovb9445592021-08-25 12:25:45 +01001109static int io_install_fixed_file(struct io_kiocb *req, struct file *file,
1110 unsigned int issue_flags, u32 slot_index);
Pavel Begunkov7df778b2021-09-24 20:04:29 +01001111static int io_close_fixed(struct io_kiocb *req, unsigned int issue_flags);
1112
Pavel Begunkovf1042b62021-08-28 19:54:39 -06001113static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer);
Pavel Begunkovb9445592021-08-25 12:25:45 +01001114
Jens Axboe2b188cc2019-01-07 10:46:33 -07001115static struct kmem_cache *req_cachep;
1116
Jens Axboe09186822020-10-13 15:01:40 -06001117static const struct file_operations io_uring_fops;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001118
1119struct sock *io_uring_get_socket(struct file *file)
1120{
1121#if defined(CONFIG_UNIX)
1122 if (file->f_op == &io_uring_fops) {
1123 struct io_ring_ctx *ctx = file->private_data;
1124
1125 return ctx->ring_sock->sk;
1126 }
1127#endif
1128 return NULL;
1129}
1130EXPORT_SYMBOL(io_uring_get_socket);
1131
Pavel Begunkovf237c302021-08-18 12:42:46 +01001132static inline void io_tw_lock(struct io_ring_ctx *ctx, bool *locked)
1133{
1134 if (!*locked) {
1135 mutex_lock(&ctx->uring_lock);
1136 *locked = true;
1137 }
1138}
1139
Pavel Begunkovf2f87372020-10-27 23:25:37 +00001140#define io_for_each_link(pos, head) \
1141 for (pos = (head); pos; pos = pos->link)
1142
Pavel Begunkov21c843d2021-08-11 19:28:27 +01001143/*
1144 * Shamelessly stolen from the mm implementation of page reference checking,
1145 * see commit f958d7b528b1 for details.
1146 */
1147#define req_ref_zero_or_close_to_overflow(req) \
1148 ((unsigned int) atomic_read(&(req->refs)) + 127u <= 127u)
1149
1150static inline bool req_ref_inc_not_zero(struct io_kiocb *req)
1151{
Pavel Begunkov20e60a32021-08-11 19:28:30 +01001152 WARN_ON_ONCE(!(req->flags & REQ_F_REFCOUNT));
Pavel Begunkov21c843d2021-08-11 19:28:27 +01001153 return atomic_inc_not_zero(&req->refs);
1154}
1155
Pavel Begunkov21c843d2021-08-11 19:28:27 +01001156static inline bool req_ref_put_and_test(struct io_kiocb *req)
1157{
Pavel Begunkov20e60a32021-08-11 19:28:30 +01001158 if (likely(!(req->flags & REQ_F_REFCOUNT)))
1159 return true;
1160
Pavel Begunkov21c843d2021-08-11 19:28:27 +01001161 WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req));
1162 return atomic_dec_and_test(&req->refs);
1163}
1164
Pavel Begunkov21c843d2021-08-11 19:28:27 +01001165static inline void req_ref_get(struct io_kiocb *req)
1166{
Pavel Begunkov20e60a32021-08-11 19:28:30 +01001167 WARN_ON_ONCE(!(req->flags & REQ_F_REFCOUNT));
Pavel Begunkov21c843d2021-08-11 19:28:27 +01001168 WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req));
1169 atomic_inc(&req->refs);
1170}
1171
Pavel Begunkov48dcd382021-08-15 10:40:18 +01001172static inline void __io_req_set_refcount(struct io_kiocb *req, int nr)
Pavel Begunkov20e60a32021-08-11 19:28:30 +01001173{
1174 if (!(req->flags & REQ_F_REFCOUNT)) {
1175 req->flags |= REQ_F_REFCOUNT;
Pavel Begunkov48dcd382021-08-15 10:40:18 +01001176 atomic_set(&req->refs, nr);
Pavel Begunkov20e60a32021-08-11 19:28:30 +01001177 }
1178}
1179
Pavel Begunkov48dcd382021-08-15 10:40:18 +01001180static inline void io_req_set_refcount(struct io_kiocb *req)
1181{
1182 __io_req_set_refcount(req, 1);
1183}
1184
Pavel Begunkovb895c9a2021-04-01 15:43:40 +01001185static inline void io_req_set_rsrc_node(struct io_kiocb *req)
Jens Axboec40f6372020-06-25 15:39:59 -06001186{
Pavel Begunkov36f72fe2020-11-18 19:57:26 +00001187 struct io_ring_ctx *ctx = req->ctx;
1188
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00001189 if (!req->fixed_rsrc_refs) {
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01001190 req->fixed_rsrc_refs = &ctx->rsrc_node->refs;
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00001191 percpu_ref_get(req->fixed_rsrc_refs);
Pavel Begunkov36f72fe2020-11-18 19:57:26 +00001192 }
1193}
1194
Pavel Begunkovf70865d2021-04-11 01:46:40 +01001195static void io_refs_resurrect(struct percpu_ref *ref, struct completion *compl)
1196{
1197 bool got = percpu_ref_tryget(ref);
1198
1199 /* already at zero, wait for ->release() */
1200 if (!got)
1201 wait_for_completion(compl);
1202 percpu_ref_resurrect(ref);
1203 if (got)
1204 percpu_ref_put(ref);
1205}
1206
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01001207static bool io_match_task(struct io_kiocb *head, struct task_struct *task,
1208 bool cancel_all)
Pavel Begunkov1c939a52021-11-26 14:38:15 +00001209 __must_hold(&req->ctx->timeout_lock)
Pavel Begunkov08d23632020-11-06 13:00:22 +00001210{
1211 struct io_kiocb *req;
1212
Pavel Begunkov68207682021-03-22 01:58:25 +00001213 if (task && head->task != task)
Pavel Begunkov08d23632020-11-06 13:00:22 +00001214 return false;
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01001215 if (cancel_all)
Pavel Begunkov08d23632020-11-06 13:00:22 +00001216 return true;
1217
1218 io_for_each_link(req, head) {
Pavel Begunkovb05a1bc2021-03-04 13:59:24 +00001219 if (req->flags & REQ_F_INFLIGHT)
Jens Axboe02a13672021-01-23 15:49:31 -07001220 return true;
Pavel Begunkov08d23632020-11-06 13:00:22 +00001221 }
1222 return false;
1223}
1224
Pavel Begunkov1c939a52021-11-26 14:38:15 +00001225static bool io_match_linked(struct io_kiocb *head)
1226{
1227 struct io_kiocb *req;
1228
1229 io_for_each_link(req, head) {
1230 if (req->flags & REQ_F_INFLIGHT)
1231 return true;
1232 }
1233 return false;
1234}
1235
1236/*
1237 * As io_match_task() but protected against racing with linked timeouts.
1238 * User must not hold timeout_lock.
1239 */
1240static bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task,
1241 bool cancel_all)
1242{
1243 bool matched;
1244
1245 if (task && head->task != task)
1246 return false;
1247 if (cancel_all)
1248 return true;
1249
1250 if (head->flags & REQ_F_LINK_TIMEOUT) {
1251 struct io_ring_ctx *ctx = head->ctx;
1252
1253 /* protect against races with linked timeouts */
1254 spin_lock_irq(&ctx->timeout_lock);
1255 matched = io_match_linked(head);
1256 spin_unlock_irq(&ctx->timeout_lock);
1257 } else {
1258 matched = io_match_linked(head);
1259 }
1260 return matched;
1261}
1262
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01001263static inline void req_set_fail(struct io_kiocb *req)
Jens Axboec40f6372020-06-25 15:39:59 -06001264{
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01001265 req->flags |= REQ_F_FAIL;
Jens Axboec40f6372020-06-25 15:39:59 -06001266}
Jens Axboe4a38aed22020-05-14 17:21:15 -06001267
Hao Xua8295b92021-08-27 17:46:09 +08001268static inline void req_fail_link_node(struct io_kiocb *req, int res)
1269{
1270 req_set_fail(req);
1271 req->result = res;
1272}
1273
Jens Axboe2b188cc2019-01-07 10:46:33 -07001274static void io_ring_ctx_ref_free(struct percpu_ref *ref)
1275{
1276 struct io_ring_ctx *ctx = container_of(ref, struct io_ring_ctx, refs);
1277
Jens Axboe0f158b42020-05-14 17:18:39 -06001278 complete(&ctx->ref_comp);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001279}
1280
Pavel Begunkov8eb7e2d2020-06-29 13:13:02 +03001281static inline bool io_is_timeout_noseq(struct io_kiocb *req)
1282{
1283 return !req->timeout.off;
1284}
1285
Pavel Begunkovf56165e2021-08-09 20:18:07 +01001286static void io_fallback_req_func(struct work_struct *work)
1287{
1288 struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx,
1289 fallback_work.work);
1290 struct llist_node *node = llist_del_all(&ctx->fallback_llist);
1291 struct io_kiocb *req, *tmp;
Pavel Begunkovf237c302021-08-18 12:42:46 +01001292 bool locked = false;
Pavel Begunkovf56165e2021-08-09 20:18:07 +01001293
1294 percpu_ref_get(&ctx->refs);
1295 llist_for_each_entry_safe(req, tmp, node, io_task_work.fallback_node)
Pavel Begunkovf237c302021-08-18 12:42:46 +01001296 req->io_task_work.func(req, &locked);
Pavel Begunkov5636c002021-08-18 12:42:45 +01001297
Pavel Begunkovf237c302021-08-18 12:42:46 +01001298 if (locked) {
1299 if (ctx->submit_state.compl_nr)
1300 io_submit_flush_completions(ctx);
1301 mutex_unlock(&ctx->uring_lock);
1302 }
Pavel Begunkovf56165e2021-08-09 20:18:07 +01001303 percpu_ref_put(&ctx->refs);
Pavel Begunkovf237c302021-08-18 12:42:46 +01001304
Pavel Begunkovf56165e2021-08-09 20:18:07 +01001305}
1306
Jens Axboe2b188cc2019-01-07 10:46:33 -07001307static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
1308{
1309 struct io_ring_ctx *ctx;
Jens Axboe78076bb2019-12-04 19:56:40 -07001310 int hash_bits;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001311
1312 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
1313 if (!ctx)
1314 return NULL;
1315
Jens Axboe78076bb2019-12-04 19:56:40 -07001316 /*
1317 * Use 5 bits less than the max cq entries, that should give us around
1318 * 32 entries per hash list if totally full and uniformly spread.
1319 */
1320 hash_bits = ilog2(p->cq_entries);
1321 hash_bits -= 5;
1322 if (hash_bits <= 0)
1323 hash_bits = 1;
1324 ctx->cancel_hash_bits = hash_bits;
1325 ctx->cancel_hash = kmalloc((1U << hash_bits) * sizeof(struct hlist_head),
1326 GFP_KERNEL);
1327 if (!ctx->cancel_hash)
1328 goto err;
1329 __hash_init(ctx->cancel_hash, 1U << hash_bits);
1330
Pavel Begunkov62248432021-04-28 13:11:29 +01001331 ctx->dummy_ubuf = kzalloc(sizeof(*ctx->dummy_ubuf), GFP_KERNEL);
1332 if (!ctx->dummy_ubuf)
1333 goto err;
1334 /* set invalid range, so io_import_fixed() fails meeting it */
1335 ctx->dummy_ubuf->ubuf = -1UL;
1336
Roman Gushchin21482892019-05-07 10:01:48 -07001337 if (percpu_ref_init(&ctx->refs, io_ring_ctx_ref_free,
Jens Axboe206aefd2019-11-07 18:27:42 -07001338 PERCPU_REF_ALLOW_REINIT, GFP_KERNEL))
1339 goto err;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001340
1341 ctx->flags = p->flags;
Jens Axboe90554202020-09-03 12:12:41 -06001342 init_waitqueue_head(&ctx->sqo_sq_wait);
Jens Axboe69fb2132020-09-14 11:16:23 -06001343 INIT_LIST_HEAD(&ctx->sqd_list);
Pavel Begunkov311997b2021-06-14 23:37:28 +01001344 init_waitqueue_head(&ctx->poll_wait);
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001345 INIT_LIST_HEAD(&ctx->cq_overflow_list);
Jens Axboe0f158b42020-05-14 17:18:39 -06001346 init_completion(&ctx->ref_comp);
Jens Axboe9e15c3a2021-03-13 12:29:43 -07001347 xa_init_flags(&ctx->io_buffers, XA_FLAGS_ALLOC1);
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +00001348 xa_init_flags(&ctx->personalities, XA_FLAGS_ALLOC1);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001349 mutex_init(&ctx->uring_lock);
Pavel Begunkov311997b2021-06-14 23:37:28 +01001350 init_waitqueue_head(&ctx->cq_wait);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001351 spin_lock_init(&ctx->completion_lock);
Jens Axboe89850fc2021-08-10 15:11:51 -06001352 spin_lock_init(&ctx->timeout_lock);
Pavel Begunkov540e32a2020-07-13 23:37:09 +03001353 INIT_LIST_HEAD(&ctx->iopoll_list);
Jens Axboede0617e2019-04-06 21:51:27 -06001354 INIT_LIST_HEAD(&ctx->defer_list);
Jens Axboe5262f562019-09-17 12:26:57 -06001355 INIT_LIST_HEAD(&ctx->timeout_list);
Pavel Begunkovef9dd632021-08-28 19:54:38 -06001356 INIT_LIST_HEAD(&ctx->ltimeout_list);
Bijan Mottahedehd67d2262021-01-15 17:37:46 +00001357 spin_lock_init(&ctx->rsrc_ref_lock);
1358 INIT_LIST_HEAD(&ctx->rsrc_ref_list);
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00001359 INIT_DELAYED_WORK(&ctx->rsrc_put_work, io_rsrc_put_work);
1360 init_llist_head(&ctx->rsrc_put_llist);
Pavel Begunkov13bf43f2021-03-06 11:02:12 +00001361 INIT_LIST_HEAD(&ctx->tctx_list);
Pavel Begunkovcd0ca2e2021-08-09 20:18:11 +01001362 INIT_LIST_HEAD(&ctx->submit_state.free_list);
Pavel Begunkovd0acdee2021-05-16 22:58:12 +01001363 INIT_LIST_HEAD(&ctx->locked_free_list);
Pavel Begunkov9011bf92021-06-30 21:54:03 +01001364 INIT_DELAYED_WORK(&ctx->fallback_work, io_fallback_req_func);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001365 return ctx;
Jens Axboe206aefd2019-11-07 18:27:42 -07001366err:
Pavel Begunkov62248432021-04-28 13:11:29 +01001367 kfree(ctx->dummy_ubuf);
Jens Axboe78076bb2019-12-04 19:56:40 -07001368 kfree(ctx->cancel_hash);
Jens Axboe206aefd2019-11-07 18:27:42 -07001369 kfree(ctx);
1370 return NULL;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001371}
1372
Pavel Begunkov8f6ed492021-05-16 22:58:10 +01001373static void io_account_cq_overflow(struct io_ring_ctx *ctx)
1374{
1375 struct io_rings *r = ctx->rings;
1376
1377 WRITE_ONCE(r->cq_overflow, READ_ONCE(r->cq_overflow) + 1);
1378 ctx->cq_extra--;
1379}
1380
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03001381static bool req_need_defer(struct io_kiocb *req, u32 seq)
Jens Axboede0617e2019-04-06 21:51:27 -06001382{
Jens Axboe2bc99302020-07-09 09:43:27 -06001383 if (unlikely(req->flags & REQ_F_IO_DRAIN)) {
1384 struct io_ring_ctx *ctx = req->ctx;
Jackie Liua197f662019-11-08 08:09:12 -07001385
Pavel Begunkov8f6ed492021-05-16 22:58:10 +01001386 return seq + READ_ONCE(ctx->cq_extra) != ctx->cached_cq_tail;
Jens Axboe2bc99302020-07-09 09:43:27 -06001387 }
Jens Axboe7adf4ea2019-10-10 21:42:58 -06001388
Bob Liu9d858b22019-11-13 18:06:25 +08001389 return false;
Jens Axboe7adf4ea2019-10-10 21:42:58 -06001390}
1391
Pavel Begunkovc97d8a02021-08-09 13:04:04 +01001392#define FFS_ASYNC_READ 0x1UL
1393#define FFS_ASYNC_WRITE 0x2UL
1394#ifdef CONFIG_64BIT
1395#define FFS_ISREG 0x4UL
1396#else
1397#define FFS_ISREG 0x0UL
1398#endif
1399#define FFS_MASK ~(FFS_ASYNC_READ|FFS_ASYNC_WRITE|FFS_ISREG)
1400
1401static inline bool io_req_ffs_set(struct io_kiocb *req)
1402{
1403 return IS_ENABLED(CONFIG_64BIT) && (req->flags & REQ_F_FIXED_FILE);
1404}
1405
Pavel Begunkovce3d5aa2021-02-01 18:59:55 +00001406static void io_req_track_inflight(struct io_kiocb *req)
1407{
Pavel Begunkovce3d5aa2021-02-01 18:59:55 +00001408 if (!(req->flags & REQ_F_INFLIGHT)) {
Pavel Begunkovce3d5aa2021-02-01 18:59:55 +00001409 req->flags |= REQ_F_INFLIGHT;
Jens Axboe3746d622022-06-23 11:06:43 -06001410 atomic_inc(&req->task->io_uring->inflight_tracked);
Pavel Begunkovce3d5aa2021-02-01 18:59:55 +00001411 }
1412}
1413
Pavel Begunkovfd08e532021-08-11 19:28:31 +01001414static struct io_kiocb *__io_prep_linked_timeout(struct io_kiocb *req)
1415{
Pavel Begunkov906c6ca2021-08-15 10:40:26 +01001416 if (WARN_ON_ONCE(!req->link))
1417 return NULL;
1418
Pavel Begunkov4d13d1a2021-08-15 10:40:24 +01001419 req->flags &= ~REQ_F_ARM_LTIMEOUT;
1420 req->flags |= REQ_F_LINK_TIMEOUT;
Pavel Begunkovfd08e532021-08-11 19:28:31 +01001421
1422 /* linked timeouts should have two refs once prep'ed */
Pavel Begunkov48dcd382021-08-15 10:40:18 +01001423 io_req_set_refcount(req);
Pavel Begunkov4d13d1a2021-08-15 10:40:24 +01001424 __io_req_set_refcount(req->link, 2);
1425 return req->link;
Pavel Begunkovfd08e532021-08-11 19:28:31 +01001426}
1427
1428static inline struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req)
1429{
Pavel Begunkov4d13d1a2021-08-15 10:40:24 +01001430 if (likely(!(req->flags & REQ_F_ARM_LTIMEOUT)))
Pavel Begunkovfd08e532021-08-11 19:28:31 +01001431 return NULL;
1432 return __io_prep_linked_timeout(req);
1433}
1434
Pavel Begunkovcbdcb432020-06-29 19:18:43 +03001435static void io_prep_async_work(struct io_kiocb *req)
Jens Axboe561fb042019-10-24 07:25:42 -06001436{
Jens Axboed3656342019-12-18 09:50:26 -07001437 const struct io_op_def *def = &io_op_defs[req->opcode];
Pavel Begunkov23329512020-10-10 18:34:06 +01001438 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe54a91f32019-09-10 09:15:04 -06001439
Pavel Begunkovb8e64b52021-06-17 18:14:02 +01001440 if (!(req->flags & REQ_F_CREDS)) {
1441 req->flags |= REQ_F_CREDS;
Pavel Begunkovc10d1f92021-06-17 18:14:01 +01001442 req->creds = get_current_cred();
Pavel Begunkovb8e64b52021-06-17 18:14:02 +01001443 }
Jens Axboe003e8dc2021-03-06 09:22:27 -07001444
Pavel Begunkove1d675d2021-03-22 01:58:29 +00001445 req->work.list.next = NULL;
1446 req->work.flags = 0;
Pavel Begunkovfeaadc42020-10-22 16:47:16 +01001447 if (req->flags & REQ_F_FORCE_ASYNC)
1448 req->work.flags |= IO_WQ_WORK_CONCURRENT;
1449
Jens Axboed3656342019-12-18 09:50:26 -07001450 if (req->flags & REQ_F_ISREG) {
Pavel Begunkov23329512020-10-10 18:34:06 +01001451 if (def->hash_reg_file || (ctx->flags & IORING_SETUP_IOPOLL))
Pavel Begunkov8766dd52020-03-14 00:31:04 +03001452 io_wq_hash_work(&req->work, file_inode(req->file));
Jens Axboe4b982bd2021-04-01 08:38:34 -06001453 } else if (!req->file || !S_ISBLK(file_inode(req->file)->i_mode)) {
Jens Axboed3656342019-12-18 09:50:26 -07001454 if (def->unbound_nonreg_file)
Jens Axboe3529d8c2019-12-19 18:24:38 -07001455 req->work.flags |= IO_WQ_WORK_UNBOUND;
Jens Axboe54a91f32019-09-10 09:15:04 -06001456 }
Jens Axboe561fb042019-10-24 07:25:42 -06001457}
1458
Pavel Begunkovcbdcb432020-06-29 19:18:43 +03001459static void io_prep_async_link(struct io_kiocb *req)
1460{
1461 struct io_kiocb *cur;
1462
Pavel Begunkov44eff402021-07-26 14:14:31 +01001463 if (req->flags & REQ_F_LINK_TIMEOUT) {
1464 struct io_ring_ctx *ctx = req->ctx;
1465
Pavel Begunkov09eb40f2021-11-23 01:45:35 +00001466 spin_lock_irq(&ctx->timeout_lock);
Pavel Begunkov44eff402021-07-26 14:14:31 +01001467 io_for_each_link(cur, req)
1468 io_prep_async_work(cur);
Pavel Begunkov09eb40f2021-11-23 01:45:35 +00001469 spin_unlock_irq(&ctx->timeout_lock);
Pavel Begunkov44eff402021-07-26 14:14:31 +01001470 } else {
1471 io_for_each_link(cur, req)
1472 io_prep_async_work(cur);
1473 }
Pavel Begunkovcbdcb432020-06-29 19:18:43 +03001474}
1475
Pavel Begunkovf237c302021-08-18 12:42:46 +01001476static void io_queue_async_work(struct io_kiocb *req, bool *locked)
Jens Axboe561fb042019-10-24 07:25:42 -06001477{
Jackie Liua197f662019-11-08 08:09:12 -07001478 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkovcbdcb432020-06-29 19:18:43 +03001479 struct io_kiocb *link = io_prep_linked_timeout(req);
Jens Axboe5aa75ed2021-02-16 12:56:50 -07001480 struct io_uring_task *tctx = req->task->io_uring;
Jens Axboe561fb042019-10-24 07:25:42 -06001481
Pavel Begunkovf237c302021-08-18 12:42:46 +01001482 /* must not take the lock, NULL it as a precaution */
1483 locked = NULL;
1484
Jens Axboe3bfe6102021-02-16 14:15:30 -07001485 BUG_ON(!tctx);
1486 BUG_ON(!tctx->io_wq);
Jens Axboe561fb042019-10-24 07:25:42 -06001487
Pavel Begunkovcbdcb432020-06-29 19:18:43 +03001488 /* init ->work of the whole link before punting */
1489 io_prep_async_link(req);
Jens Axboe991468d2021-07-23 11:53:54 -06001490
1491 /*
1492 * Not expected to happen, but if we do have a bug where this _can_
1493 * happen, catch it here and ensure the request is marked as
1494 * canceled. That will make io-wq go through the usual work cancel
1495 * procedure rather than attempt to run this request (or create a new
1496 * worker for it).
1497 */
1498 if (WARN_ON_ONCE(!same_thread_group(req->task, current)))
1499 req->work.flags |= IO_WQ_WORK_CANCEL;
1500
Pavel Begunkovd07f1e8a2021-03-22 01:45:58 +00001501 trace_io_uring_queue_async_work(ctx, io_wq_is_hashed(&req->work), req,
1502 &req->work, req->flags);
Pavel Begunkovebf93662021-03-01 18:20:47 +00001503 io_wq_enqueue(tctx->io_wq, &req->work);
Jens Axboe7271ef32020-08-10 09:55:22 -06001504 if (link)
1505 io_queue_linked_timeout(link);
Pavel Begunkovcbdcb432020-06-29 19:18:43 +03001506}
1507
Pavel Begunkov1ee41602021-03-25 18:32:42 +00001508static void io_kill_timeout(struct io_kiocb *req, int status)
Pavel Begunkov8c855882021-04-13 02:58:41 +01001509 __must_hold(&req->ctx->completion_lock)
Jens Axboe89850fc2021-08-10 15:11:51 -06001510 __must_hold(&req->ctx->timeout_lock)
Jens Axboe5262f562019-09-17 12:26:57 -06001511{
Jens Axboee8c2bc12020-08-15 18:44:09 -07001512 struct io_timeout_data *io = req->async_data;
Jens Axboe5262f562019-09-17 12:26:57 -06001513
Pavel Begunkovfd9c7bc2021-04-13 02:58:42 +01001514 if (hrtimer_try_to_cancel(&io->timer) != -1) {
Pavel Begunkov2ae2eb92021-09-09 13:56:27 +01001515 if (status)
1516 req_set_fail(req);
Pavel Begunkov01cec8c2020-07-30 18:43:50 +03001517 atomic_set(&req->ctx->cq_timeouts,
1518 atomic_read(&req->ctx->cq_timeouts) + 1);
Pavel Begunkov135fcde2020-07-13 23:37:12 +03001519 list_del_init(&req->timeout.list);
Pavel Begunkovb850d6d2022-08-29 14:30:13 +01001520 io_fill_cqe_req(req, status, 0);
Pavel Begunkov91c2f692021-08-11 19:28:28 +01001521 io_put_req_deferred(req);
Jens Axboe5262f562019-09-17 12:26:57 -06001522 }
1523}
1524
Pavel Begunkov441b8a72021-06-14 23:37:31 +01001525static void io_queue_deferred(struct io_ring_ctx *ctx)
Pavel Begunkov04518942020-05-26 20:34:05 +03001526{
Pavel Begunkov441b8a72021-06-14 23:37:31 +01001527 while (!list_empty(&ctx->defer_list)) {
Pavel Begunkov27dc8332020-07-13 23:37:14 +03001528 struct io_defer_entry *de = list_first_entry(&ctx->defer_list,
1529 struct io_defer_entry, list);
Pavel Begunkov04518942020-05-26 20:34:05 +03001530
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03001531 if (req_need_defer(de->req, de->seq))
Pavel Begunkov04518942020-05-26 20:34:05 +03001532 break;
Pavel Begunkov27dc8332020-07-13 23:37:14 +03001533 list_del_init(&de->list);
Pavel Begunkov907d1df2021-01-26 23:35:10 +00001534 io_req_task_queue(de->req);
Pavel Begunkov27dc8332020-07-13 23:37:14 +03001535 kfree(de);
Pavel Begunkov441b8a72021-06-14 23:37:31 +01001536 }
Pavel Begunkov04518942020-05-26 20:34:05 +03001537}
1538
Pavel Begunkov360428f2020-05-30 14:54:17 +03001539static void io_flush_timeouts(struct io_ring_ctx *ctx)
Jens Axboe89850fc2021-08-10 15:11:51 -06001540 __must_hold(&ctx->completion_lock)
Pavel Begunkov360428f2020-05-30 14:54:17 +03001541{
Pavel Begunkov441b8a72021-06-14 23:37:31 +01001542 u32 seq = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
Jens Axboeba7261a2022-04-08 11:08:58 -06001543 struct io_kiocb *req, *tmp;
Marcelo Diop-Gonzalezf0105052021-01-15 11:54:40 -05001544
Jens Axboe79ebeae2021-08-10 15:18:27 -06001545 spin_lock_irq(&ctx->timeout_lock);
Jens Axboeba7261a2022-04-08 11:08:58 -06001546 list_for_each_entry_safe(req, tmp, &ctx->timeout_list, timeout.list) {
Marcelo Diop-Gonzalezf0105052021-01-15 11:54:40 -05001547 u32 events_needed, events_got;
Pavel Begunkov360428f2020-05-30 14:54:17 +03001548
Pavel Begunkov8eb7e2d2020-06-29 13:13:02 +03001549 if (io_is_timeout_noseq(req))
Pavel Begunkov360428f2020-05-30 14:54:17 +03001550 break;
Marcelo Diop-Gonzalezf0105052021-01-15 11:54:40 -05001551
1552 /*
1553 * Since seq can easily wrap around over time, subtract
1554 * the last seq at which timeouts were flushed before comparing.
1555 * Assuming not more than 2^31-1 events have happened since,
1556 * these subtractions won't have wrapped, so we can check if
1557 * target is in [last_seq, current_seq] by comparing the two.
1558 */
1559 events_needed = req->timeout.target_seq - ctx->cq_last_tm_flush;
1560 events_got = seq - ctx->cq_last_tm_flush;
1561 if (events_got < events_needed)
Pavel Begunkov360428f2020-05-30 14:54:17 +03001562 break;
Pavel Begunkovbfe68a22020-05-30 14:54:18 +03001563
Pavel Begunkov1ee41602021-03-25 18:32:42 +00001564 io_kill_timeout(req, 0);
Pavel Begunkovf18ee4c2021-06-14 23:37:25 +01001565 }
Marcelo Diop-Gonzalezf0105052021-01-15 11:54:40 -05001566 ctx->cq_last_tm_flush = seq;
Jens Axboe79ebeae2021-08-10 15:18:27 -06001567 spin_unlock_irq(&ctx->timeout_lock);
Pavel Begunkov360428f2020-05-30 14:54:17 +03001568}
1569
Pavel Begunkov2335f6f2021-06-15 16:47:58 +01001570static void __io_commit_cqring_flush(struct io_ring_ctx *ctx)
Jens Axboede0617e2019-04-06 21:51:27 -06001571{
Pavel Begunkov2335f6f2021-06-15 16:47:58 +01001572 if (ctx->off_timeout_used)
1573 io_flush_timeouts(ctx);
1574 if (ctx->drain_active)
1575 io_queue_deferred(ctx);
1576}
1577
1578static inline void io_commit_cqring(struct io_ring_ctx *ctx)
1579{
1580 if (unlikely(ctx->off_timeout_used || ctx->drain_active))
1581 __io_commit_cqring_flush(ctx);
Pavel Begunkovec30e042021-01-19 13:32:38 +00001582 /* order cqe stores with ring update */
1583 smp_store_release(&ctx->rings->cq.tail, ctx->cached_cq_tail);
Jens Axboede0617e2019-04-06 21:51:27 -06001584}
1585
Jens Axboe90554202020-09-03 12:12:41 -06001586static inline bool io_sqring_full(struct io_ring_ctx *ctx)
1587{
1588 struct io_rings *r = ctx->rings;
1589
Pavel Begunkova566c552021-05-16 22:58:08 +01001590 return READ_ONCE(r->sq.tail) - ctx->cached_sq_head == ctx->sq_entries;
Jens Axboe90554202020-09-03 12:12:41 -06001591}
1592
Pavel Begunkov888aae22021-01-19 13:32:39 +00001593static inline unsigned int __io_cqring_events(struct io_ring_ctx *ctx)
1594{
1595 return ctx->cached_cq_tail - READ_ONCE(ctx->rings->cq.head);
1596}
1597
Pavel Begunkovd068b502021-05-16 22:58:11 +01001598static inline struct io_uring_cqe *io_get_cqe(struct io_ring_ctx *ctx)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001599{
Hristo Venev75b28af2019-08-26 17:23:46 +00001600 struct io_rings *rings = ctx->rings;
Pavel Begunkovea5ab3b2021-05-16 22:58:09 +01001601 unsigned tail, mask = ctx->cq_entries - 1;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001602
Stefan Bühler115e12e2019-04-24 23:54:18 +02001603 /*
1604 * writes to the cq entry need to come after reading head; the
1605 * control dependency is enough as we're using WRITE_ONCE to
1606 * fill the cq entry
1607 */
Pavel Begunkova566c552021-05-16 22:58:08 +01001608 if (__io_cqring_events(ctx) == ctx->cq_entries)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001609 return NULL;
1610
Pavel Begunkov888aae22021-01-19 13:32:39 +00001611 tail = ctx->cached_cq_tail++;
Pavel Begunkovea5ab3b2021-05-16 22:58:09 +01001612 return &rings->cqes[tail & mask];
Jens Axboe2b188cc2019-01-07 10:46:33 -07001613}
1614
Jens Axboef2842ab2020-01-08 11:04:00 -07001615static inline bool io_should_trigger_evfd(struct io_ring_ctx *ctx)
1616{
Pavel Begunkov44c769d2021-04-11 01:46:31 +01001617 if (likely(!ctx->cq_ev_fd))
Jens Axboef0b493e2020-02-01 21:30:11 -07001618 return false;
Stefano Garzarella7e55a192020-05-15 18:38:05 +02001619 if (READ_ONCE(ctx->rings->cq_flags) & IORING_CQ_EVENTFD_DISABLED)
1620 return false;
Pavel Begunkov44c769d2021-04-11 01:46:31 +01001621 return !ctx->eventfd_async || io_wq_current_is_worker();
Jens Axboef2842ab2020-01-08 11:04:00 -07001622}
1623
Jens Axboe2c5d7632021-08-21 07:21:19 -06001624/*
1625 * This should only get called when at least one event has been posted.
1626 * Some applications rely on the eventfd notification count only changing
1627 * IFF a new CQE has been added to the CQ ring. There's no depedency on
1628 * 1:1 relationship between how many times this function is called (and
1629 * hence the eventfd count) and number of CQEs posted to the CQ ring.
1630 */
Jens Axboeb41e9852020-02-17 09:52:41 -07001631static void io_cqring_ev_posted(struct io_ring_ctx *ctx)
Jens Axboe8c838782019-03-12 15:48:16 -06001632{
Jens Axboe5fd46172021-08-06 14:04:31 -06001633 /*
1634 * wake_up_all() may seem excessive, but io_wake_function() and
1635 * io_should_wake() handle the termination of the loop and only
1636 * wake as many waiters as we need to.
1637 */
1638 if (wq_has_sleeper(&ctx->cq_wait))
Jens Axboeccf06b52022-12-23 07:04:49 -07001639 __wake_up(&ctx->cq_wait, TASK_NORMAL, 0,
1640 poll_to_key(EPOLL_URING_WAKE | EPOLLIN));
Jens Axboe534ca6d2020-09-02 13:52:19 -06001641 if (ctx->sq_data && waitqueue_active(&ctx->sq_data->wait))
1642 wake_up(&ctx->sq_data->wait);
Jens Axboeb41e9852020-02-17 09:52:41 -07001643 if (io_should_trigger_evfd(ctx))
Jens Axboeccf06b52022-12-23 07:04:49 -07001644 eventfd_signal_mask(ctx->cq_ev_fd, 1, EPOLL_URING_WAKE);
Pavel Begunkov3f008382021-10-01 10:39:33 +01001645 if (waitqueue_active(&ctx->poll_wait))
Jens Axboeccf06b52022-12-23 07:04:49 -07001646 __wake_up(&ctx->poll_wait, TASK_INTERRUPTIBLE, 0,
1647 poll_to_key(EPOLL_URING_WAKE | EPOLLIN));
Jens Axboe8c838782019-03-12 15:48:16 -06001648}
1649
Pavel Begunkov80c18e42021-01-07 03:15:41 +00001650static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx)
1651{
Pavel Begunkovc57a91fb2021-09-08 20:49:17 +01001652 /* see waitqueue_active() comment */
1653 smp_mb();
1654
Pavel Begunkov80c18e42021-01-07 03:15:41 +00001655 if (ctx->flags & IORING_SETUP_SQPOLL) {
Pavel Begunkovc57a91fb2021-09-08 20:49:17 +01001656 if (waitqueue_active(&ctx->cq_wait))
Jens Axboeccf06b52022-12-23 07:04:49 -07001657 __wake_up(&ctx->cq_wait, TASK_NORMAL, 0,
1658 poll_to_key(EPOLL_URING_WAKE | EPOLLIN));
Pavel Begunkov80c18e42021-01-07 03:15:41 +00001659 }
1660 if (io_should_trigger_evfd(ctx))
Jens Axboeccf06b52022-12-23 07:04:49 -07001661 eventfd_signal_mask(ctx->cq_ev_fd, 1, EPOLL_URING_WAKE);
Pavel Begunkov3f008382021-10-01 10:39:33 +01001662 if (waitqueue_active(&ctx->poll_wait))
Jens Axboeccf06b52022-12-23 07:04:49 -07001663 __wake_up(&ctx->poll_wait, TASK_INTERRUPTIBLE, 0,
1664 poll_to_key(EPOLL_URING_WAKE | EPOLLIN));
Pavel Begunkov80c18e42021-01-07 03:15:41 +00001665}
1666
Jens Axboec4a2ed72019-11-21 21:01:26 -07001667/* Returns true if there are no backlogged entries after the flush */
Pavel Begunkov6c2450a2021-02-23 12:40:22 +00001668static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001669{
Jens Axboeb18032b2021-01-24 16:58:56 -07001670 bool all_flushed, posted;
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001671
Pavel Begunkova566c552021-05-16 22:58:08 +01001672 if (!force && __io_cqring_events(ctx) == ctx->cq_entries)
Pavel Begunkove23de152020-12-17 00:24:37 +00001673 return false;
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001674
Jens Axboeb18032b2021-01-24 16:58:56 -07001675 posted = false;
Jens Axboe79ebeae2021-08-10 15:18:27 -06001676 spin_lock(&ctx->completion_lock);
Pavel Begunkov6c2450a2021-02-23 12:40:22 +00001677 while (!list_empty(&ctx->cq_overflow_list)) {
Pavel Begunkovd068b502021-05-16 22:58:11 +01001678 struct io_uring_cqe *cqe = io_get_cqe(ctx);
Pavel Begunkov6c2450a2021-02-23 12:40:22 +00001679 struct io_overflow_cqe *ocqe;
Jens Axboee6c8aa92020-09-28 13:10:13 -06001680
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001681 if (!cqe && !force)
1682 break;
Pavel Begunkov6c2450a2021-02-23 12:40:22 +00001683 ocqe = list_first_entry(&ctx->cq_overflow_list,
1684 struct io_overflow_cqe, list);
1685 if (cqe)
1686 memcpy(cqe, &ocqe->cqe, sizeof(*cqe));
1687 else
Pavel Begunkov8f6ed492021-05-16 22:58:10 +01001688 io_account_cq_overflow(ctx);
1689
Jens Axboeb18032b2021-01-24 16:58:56 -07001690 posted = true;
Pavel Begunkov6c2450a2021-02-23 12:40:22 +00001691 list_del(&ocqe->list);
1692 kfree(ocqe);
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001693 }
1694
Pavel Begunkov09e88402020-12-17 00:24:38 +00001695 all_flushed = list_empty(&ctx->cq_overflow_list);
1696 if (all_flushed) {
Pavel Begunkov5ed7a372021-06-14 23:37:27 +01001697 clear_bit(0, &ctx->check_cq_overflow);
Nadav Amit20c0b382021-08-07 17:13:42 -07001698 WRITE_ONCE(ctx->rings->sq_flags,
1699 ctx->rings->sq_flags & ~IORING_SQ_CQ_OVERFLOW);
Pavel Begunkov09e88402020-12-17 00:24:38 +00001700 }
Pavel Begunkov46930142020-07-30 18:43:49 +03001701
Jens Axboeb18032b2021-01-24 16:58:56 -07001702 if (posted)
1703 io_commit_cqring(ctx);
Jens Axboe79ebeae2021-08-10 15:18:27 -06001704 spin_unlock(&ctx->completion_lock);
Jens Axboeb18032b2021-01-24 16:58:56 -07001705 if (posted)
1706 io_cqring_ev_posted(ctx);
Pavel Begunkov09e88402020-12-17 00:24:38 +00001707 return all_flushed;
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001708}
1709
Pavel Begunkov90f67362021-08-09 20:18:12 +01001710static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx)
Pavel Begunkov6c503152021-01-04 20:36:36 +00001711{
Jens Axboeca0a2652021-03-04 17:15:48 -07001712 bool ret = true;
1713
Pavel Begunkov5ed7a372021-06-14 23:37:27 +01001714 if (test_bit(0, &ctx->check_cq_overflow)) {
Pavel Begunkov6c503152021-01-04 20:36:36 +00001715 /* iopoll syncs against uring_lock, not completion_lock */
1716 if (ctx->flags & IORING_SETUP_IOPOLL)
1717 mutex_lock(&ctx->uring_lock);
Pavel Begunkov90f67362021-08-09 20:18:12 +01001718 ret = __io_cqring_overflow_flush(ctx, false);
Pavel Begunkov6c503152021-01-04 20:36:36 +00001719 if (ctx->flags & IORING_SETUP_IOPOLL)
1720 mutex_unlock(&ctx->uring_lock);
1721 }
Jens Axboeca0a2652021-03-04 17:15:48 -07001722
1723 return ret;
Pavel Begunkov6c503152021-01-04 20:36:36 +00001724}
1725
Pavel Begunkov6a290a12021-08-09 13:04:13 +01001726/* must to be called somewhat shortly after putting a request */
1727static inline void io_put_task(struct task_struct *task, int nr)
1728{
1729 struct io_uring_task *tctx = task->io_uring;
1730
Pavel Begunkove98e49b2021-08-18 17:01:43 +01001731 if (likely(task == current)) {
1732 tctx->cached_refs += nr;
1733 } else {
1734 percpu_counter_sub(&tctx->inflight, nr);
1735 if (unlikely(atomic_read(&tctx->in_idle)))
1736 wake_up(&tctx->wait);
1737 put_task_struct_many(task, nr);
1738 }
Pavel Begunkov6a290a12021-08-09 13:04:13 +01001739}
1740
Pavel Begunkov9a108672021-08-27 11:55:01 +01001741static void io_task_refs_refill(struct io_uring_task *tctx)
1742{
1743 unsigned int refill = -tctx->cached_refs + IO_TCTX_REFS_CACHE_NR;
1744
1745 percpu_counter_add(&tctx->inflight, refill);
1746 refcount_add(refill, &current->usage);
1747 tctx->cached_refs += refill;
1748}
1749
1750static inline void io_get_task_refs(int nr)
1751{
1752 struct io_uring_task *tctx = current->io_uring;
1753
1754 tctx->cached_refs -= nr;
1755 if (unlikely(tctx->cached_refs < 0))
1756 io_task_refs_refill(tctx);
1757}
1758
Pavel Begunkovb168b1a2022-01-09 00:53:22 +00001759static __cold void io_uring_drop_tctx_refs(struct task_struct *task)
1760{
1761 struct io_uring_task *tctx = task->io_uring;
1762 unsigned int refs = tctx->cached_refs;
1763
1764 if (refs) {
1765 tctx->cached_refs = 0;
1766 percpu_counter_sub(&tctx->inflight, refs);
1767 put_task_struct_many(task, refs);
1768 }
1769}
1770
Pavel Begunkovd4d19c12021-04-25 14:32:17 +01001771static bool io_cqring_event_overflow(struct io_ring_ctx *ctx, u64 user_data,
Pavel Begunkov5c0ea4c2022-08-29 14:30:12 +01001772 s32 res, u32 cflags)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001773{
Pavel Begunkovcce4b8b2021-04-13 02:58:44 +01001774 struct io_overflow_cqe *ocqe;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001775
Pavel Begunkovcce4b8b2021-04-13 02:58:44 +01001776 ocqe = kmalloc(sizeof(*ocqe), GFP_ATOMIC | __GFP_ACCOUNT);
1777 if (!ocqe) {
1778 /*
1779 * If we're in ring overflow flush mode, or in task cancel mode,
1780 * or cannot allocate an overflow entry, then we need to drop it
1781 * on the floor.
1782 */
Pavel Begunkov8f6ed492021-05-16 22:58:10 +01001783 io_account_cq_overflow(ctx);
Pavel Begunkovcce4b8b2021-04-13 02:58:44 +01001784 return false;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001785 }
Pavel Begunkovcce4b8b2021-04-13 02:58:44 +01001786 if (list_empty(&ctx->cq_overflow_list)) {
Pavel Begunkov5ed7a372021-06-14 23:37:27 +01001787 set_bit(0, &ctx->check_cq_overflow);
Nadav Amit20c0b382021-08-07 17:13:42 -07001788 WRITE_ONCE(ctx->rings->sq_flags,
1789 ctx->rings->sq_flags | IORING_SQ_CQ_OVERFLOW);
1790
Pavel Begunkovcce4b8b2021-04-13 02:58:44 +01001791 }
Pavel Begunkovd4d19c12021-04-25 14:32:17 +01001792 ocqe->cqe.user_data = user_data;
Pavel Begunkovcce4b8b2021-04-13 02:58:44 +01001793 ocqe->cqe.res = res;
1794 ocqe->cqe.flags = cflags;
1795 list_add_tail(&ocqe->list, &ctx->cq_overflow_list);
1796 return true;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001797}
1798
Pavel Begunkovb850d6d2022-08-29 14:30:13 +01001799static inline bool __io_fill_cqe(struct io_ring_ctx *ctx, u64 user_data,
1800 s32 res, u32 cflags)
Pavel Begunkov8d133262021-04-11 01:46:33 +01001801{
Jens Axboe2b188cc2019-01-07 10:46:33 -07001802 struct io_uring_cqe *cqe;
1803
Pavel Begunkovd4d19c12021-04-25 14:32:17 +01001804 trace_io_uring_complete(ctx, user_data, res, cflags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001805
1806 /*
1807 * If we can't get a cq entry, userspace overflowed the
1808 * submission (by quite a lot). Increment the overflow count in
1809 * the ring.
1810 */
Pavel Begunkovd068b502021-05-16 22:58:11 +01001811 cqe = io_get_cqe(ctx);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001812 if (likely(cqe)) {
Pavel Begunkovd4d19c12021-04-25 14:32:17 +01001813 WRITE_ONCE(cqe->user_data, user_data);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001814 WRITE_ONCE(cqe->res, res);
1815 WRITE_ONCE(cqe->flags, cflags);
Pavel Begunkov8d133262021-04-11 01:46:33 +01001816 return true;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001817 }
Pavel Begunkovd4d19c12021-04-25 14:32:17 +01001818 return io_cqring_event_overflow(ctx, user_data, res, cflags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001819}
1820
Pavel Begunkovb850d6d2022-08-29 14:30:13 +01001821static noinline void io_fill_cqe_req(struct io_kiocb *req, s32 res, u32 cflags)
Jens Axboebcda7ba2020-02-23 16:42:51 -07001822{
Pavel Begunkovb850d6d2022-08-29 14:30:13 +01001823 __io_fill_cqe(req->ctx, req->user_data, res, cflags);
1824}
1825
1826static noinline bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data,
1827 s32 res, u32 cflags)
1828{
1829 ctx->cq_extra++;
1830 return __io_fill_cqe(ctx, user_data, res, cflags);
Jens Axboebcda7ba2020-02-23 16:42:51 -07001831}
1832
Pavel Begunkov5c0ea4c2022-08-29 14:30:12 +01001833static void io_req_complete_post(struct io_kiocb *req, s32 res,
1834 u32 cflags)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001835{
Jens Axboe78e19bb2019-11-06 15:21:34 -07001836 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001837
Jens Axboe79ebeae2021-08-10 15:18:27 -06001838 spin_lock(&ctx->completion_lock);
Pavel Begunkovb850d6d2022-08-29 14:30:13 +01001839 __io_fill_cqe(ctx, req->user_data, res, cflags);
Jens Axboec7dae4b2021-02-09 19:53:37 -07001840 /*
1841 * If we're the last reference to this request, add to our locked
1842 * free_list cache.
1843 */
Jens Axboede9b4cc2021-02-24 13:28:27 -07001844 if (req_ref_put_and_test(req)) {
Pavel Begunkov7a612352021-03-09 00:37:59 +00001845 if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) {
Pavel Begunkov0756a862021-08-15 10:40:25 +01001846 if (req->flags & IO_DISARM_MASK)
Pavel Begunkov7a612352021-03-09 00:37:59 +00001847 io_disarm_next(req);
1848 if (req->link) {
1849 io_req_task_queue(req->link);
1850 req->link = NULL;
1851 }
1852 }
Jens Axboec7dae4b2021-02-09 19:53:37 -07001853 io_dismantle_req(req);
1854 io_put_task(req->task, 1);
Pavel Begunkovbb943b82021-08-09 20:18:10 +01001855 list_add(&req->inflight_entry, &ctx->locked_free_list);
Pavel Begunkovd0acdee2021-05-16 22:58:12 +01001856 ctx->locked_free_nr++;
Pavel Begunkov180f8292021-03-14 20:57:09 +00001857 } else {
1858 if (!percpu_ref_tryget(&ctx->refs))
1859 req = NULL;
1860 }
Pavel Begunkov7a612352021-03-09 00:37:59 +00001861 io_commit_cqring(ctx);
Jens Axboe79ebeae2021-08-10 15:18:27 -06001862 spin_unlock(&ctx->completion_lock);
Pavel Begunkov7a612352021-03-09 00:37:59 +00001863
Pavel Begunkov180f8292021-03-14 20:57:09 +00001864 if (req) {
1865 io_cqring_ev_posted(ctx);
Jens Axboec7dae4b2021-02-09 19:53:37 -07001866 percpu_ref_put(&ctx->refs);
Pavel Begunkov180f8292021-03-14 20:57:09 +00001867 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07001868}
1869
Jens Axboe4e3d9ff2021-04-15 17:44:34 -06001870static inline bool io_req_needs_clean(struct io_kiocb *req)
1871{
Pavel Begunkovc8543572021-06-17 18:14:04 +01001872 return req->flags & IO_REQ_CLEAN_FLAGS;
Jens Axboe4e3d9ff2021-04-15 17:44:34 -06001873}
1874
Pavel Begunkov5c0ea4c2022-08-29 14:30:12 +01001875static inline void io_req_complete_state(struct io_kiocb *req, s32 res,
1876 u32 cflags)
Jens Axboebcda7ba2020-02-23 16:42:51 -07001877{
Jens Axboe4e3d9ff2021-04-15 17:44:34 -06001878 if (io_req_needs_clean(req))
Pavel Begunkov68fb8972021-03-19 17:22:41 +00001879 io_clean_op(req);
Pavel Begunkova38d68d2021-01-19 13:32:45 +00001880 req->result = res;
1881 req->compl.cflags = cflags;
Pavel Begunkove342c802021-01-19 13:32:47 +00001882 req->flags |= REQ_F_COMPLETE_INLINE;
Jens Axboee1e16092020-06-22 09:17:17 -06001883}
Jens Axboe2b188cc2019-01-07 10:46:33 -07001884
Pavel Begunkov889fca72021-02-10 00:03:09 +00001885static inline void __io_req_complete(struct io_kiocb *req, unsigned issue_flags,
Pavel Begunkov5c0ea4c2022-08-29 14:30:12 +01001886 s32 res, u32 cflags)
Pavel Begunkova38d68d2021-01-19 13:32:45 +00001887{
Pavel Begunkov889fca72021-02-10 00:03:09 +00001888 if (issue_flags & IO_URING_F_COMPLETE_DEFER)
1889 io_req_complete_state(req, res, cflags);
Pavel Begunkova38d68d2021-01-19 13:32:45 +00001890 else
Jens Axboec7dae4b2021-02-09 19:53:37 -07001891 io_req_complete_post(req, res, cflags);
Pavel Begunkova38d68d2021-01-19 13:32:45 +00001892}
Jens Axboebcda7ba2020-02-23 16:42:51 -07001893
Pavel Begunkov5c0ea4c2022-08-29 14:30:12 +01001894static inline void io_req_complete(struct io_kiocb *req, s32 res)
Jens Axboee1e16092020-06-22 09:17:17 -06001895{
Pavel Begunkov889fca72021-02-10 00:03:09 +00001896 __io_req_complete(req, 0, res, 0);
Jens Axboebcda7ba2020-02-23 16:42:51 -07001897}
1898
Pavel Begunkov5c0ea4c2022-08-29 14:30:12 +01001899static void io_req_complete_failed(struct io_kiocb *req, s32 res)
Pavel Begunkovf41db2732021-02-28 22:35:12 +00001900{
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01001901 req_set_fail(req);
Pavel Begunkovf41db2732021-02-28 22:35:12 +00001902 io_req_complete_post(req, res, 0);
1903}
1904
Pavel Begunkovc6d3d9c2021-08-31 14:13:10 +01001905static void io_req_complete_fail_submit(struct io_kiocb *req)
1906{
1907 /*
1908 * We don't submit, fail them all, for that replace hardlinks with
1909 * normal links. Extra REQ_F_LINK is tolerated.
1910 */
1911 req->flags &= ~REQ_F_HARDLINK;
1912 req->flags |= REQ_F_LINK;
1913 io_req_complete_failed(req, req->result);
1914}
1915
Pavel Begunkov864ea922021-08-09 13:04:08 +01001916/*
1917 * Don't initialise the fields below on every allocation, but do that in
1918 * advance and keep them valid across allocations.
1919 */
1920static void io_preinit_req(struct io_kiocb *req, struct io_ring_ctx *ctx)
1921{
1922 req->ctx = ctx;
1923 req->link = NULL;
1924 req->async_data = NULL;
1925 /* not necessary, but safer to zero */
1926 req->result = 0;
1927}
1928
Pavel Begunkovdac7a092021-03-19 17:22:39 +00001929static void io_flush_cached_locked_reqs(struct io_ring_ctx *ctx,
Pavel Begunkovcd0ca2e2021-08-09 20:18:11 +01001930 struct io_submit_state *state)
Pavel Begunkovdac7a092021-03-19 17:22:39 +00001931{
Jens Axboe79ebeae2021-08-10 15:18:27 -06001932 spin_lock(&ctx->completion_lock);
Pavel Begunkovcd0ca2e2021-08-09 20:18:11 +01001933 list_splice_init(&ctx->locked_free_list, &state->free_list);
Pavel Begunkovd0acdee2021-05-16 22:58:12 +01001934 ctx->locked_free_nr = 0;
Jens Axboe79ebeae2021-08-10 15:18:27 -06001935 spin_unlock(&ctx->completion_lock);
Pavel Begunkovdac7a092021-03-19 17:22:39 +00001936}
1937
Pavel Begunkovdd78f492021-03-19 17:22:35 +00001938/* Returns true IFF there are requests in the cache */
Jens Axboec7dae4b2021-02-09 19:53:37 -07001939static bool io_flush_cached_reqs(struct io_ring_ctx *ctx)
Pavel Begunkove5d1bc02021-02-10 00:03:23 +00001940{
Jens Axboec7dae4b2021-02-09 19:53:37 -07001941 struct io_submit_state *state = &ctx->submit_state;
Pavel Begunkovdd78f492021-03-19 17:22:35 +00001942 int nr;
Pavel Begunkove5d1bc02021-02-10 00:03:23 +00001943
Jens Axboec7dae4b2021-02-09 19:53:37 -07001944 /*
1945 * If we have more than a batch's worth of requests in our IRQ side
1946 * locked cache, grab the lock and move them over to our submission
1947 * side cache.
1948 */
Pavel Begunkovd0acdee2021-05-16 22:58:12 +01001949 if (READ_ONCE(ctx->locked_free_nr) > IO_COMPL_BATCH)
Pavel Begunkovcd0ca2e2021-08-09 20:18:11 +01001950 io_flush_cached_locked_reqs(ctx, state);
Jens Axboec7dae4b2021-02-09 19:53:37 -07001951
Pavel Begunkovdd78f492021-03-19 17:22:35 +00001952 nr = state->free_reqs;
Pavel Begunkovcd0ca2e2021-08-09 20:18:11 +01001953 while (!list_empty(&state->free_list)) {
1954 struct io_kiocb *req = list_first_entry(&state->free_list,
Pavel Begunkovbb943b82021-08-09 20:18:10 +01001955 struct io_kiocb, inflight_entry);
Pavel Begunkovdd78f492021-03-19 17:22:35 +00001956
Pavel Begunkovbb943b82021-08-09 20:18:10 +01001957 list_del(&req->inflight_entry);
Pavel Begunkovdd78f492021-03-19 17:22:35 +00001958 state->reqs[nr++] = req;
1959 if (nr == ARRAY_SIZE(state->reqs))
Pavel Begunkove5d1bc02021-02-10 00:03:23 +00001960 break;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001961 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07001962
Pavel Begunkovdd78f492021-03-19 17:22:35 +00001963 state->free_reqs = nr;
1964 return nr != 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001965}
1966
Pavel Begunkov5d5901a2021-08-11 19:28:29 +01001967/*
1968 * A request might get retired back into the request caches even before opcode
1969 * handlers and io_issue_sqe() are done with it, e.g. inline completion path.
1970 * Because of that, io_alloc_req() should be called only under ->uring_lock
1971 * and with extra caution to not get a request that is still worked on.
1972 */
Pavel Begunkov258b29a2021-02-10 00:03:10 +00001973static struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx)
Pavel Begunkov5d5901a2021-08-11 19:28:29 +01001974 __must_hold(&ctx->uring_lock)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001975{
Pavel Begunkov258b29a2021-02-10 00:03:10 +00001976 struct io_submit_state *state = &ctx->submit_state;
Pavel Begunkov864ea922021-08-09 13:04:08 +01001977 gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
1978 int ret, i;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001979
Pavel Begunkovfe7e3252021-06-24 15:09:57 +01001980 BUILD_BUG_ON(ARRAY_SIZE(state->reqs) < IO_REQ_ALLOC_BATCH);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001981
Pavel Begunkov864ea922021-08-09 13:04:08 +01001982 if (likely(state->free_reqs || io_flush_cached_reqs(ctx)))
1983 goto got_req;
Jens Axboe2579f912019-01-09 09:10:43 -07001984
Pavel Begunkov864ea922021-08-09 13:04:08 +01001985 ret = kmem_cache_alloc_bulk(req_cachep, gfp, IO_REQ_ALLOC_BATCH,
1986 state->reqs);
Pavel Begunkove5d1bc02021-02-10 00:03:23 +00001987
Pavel Begunkov864ea922021-08-09 13:04:08 +01001988 /*
1989 * Bulk alloc is all-or-nothing. If we fail to get a batch,
1990 * retry single alloc to be on the safe side.
1991 */
1992 if (unlikely(ret <= 0)) {
1993 state->reqs[0] = kmem_cache_alloc(req_cachep, gfp);
1994 if (!state->reqs[0])
1995 return NULL;
1996 ret = 1;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001997 }
Pavel Begunkov864ea922021-08-09 13:04:08 +01001998
1999 for (i = 0; i < ret; i++)
2000 io_preinit_req(state->reqs[i], ctx);
2001 state->free_reqs = ret;
Pavel Begunkove5d1bc02021-02-10 00:03:23 +00002002got_req:
Pavel Begunkov291b2822020-09-30 22:57:01 +03002003 state->free_reqs--;
2004 return state->reqs[state->free_reqs];
Jens Axboe2b188cc2019-01-07 10:46:33 -07002005}
2006
Pavel Begunkove1d767f2021-03-19 17:22:43 +00002007static inline void io_put_file(struct file *file)
Pavel Begunkov8da11c12020-02-24 11:32:44 +03002008{
Pavel Begunkove1d767f2021-03-19 17:22:43 +00002009 if (file)
Pavel Begunkov8da11c12020-02-24 11:32:44 +03002010 fput(file);
2011}
2012
Pavel Begunkov4edf20f2020-10-13 09:43:59 +01002013static void io_dismantle_req(struct io_kiocb *req)
Jens Axboe2b188cc2019-01-07 10:46:33 -07002014{
Pavel Begunkov094bae42021-03-19 17:22:42 +00002015 unsigned int flags = req->flags;
Pavel Begunkov929a3af2020-02-19 00:19:09 +03002016
Pavel Begunkov3a0a6902021-04-20 12:03:31 +01002017 if (io_req_needs_clean(req))
2018 io_clean_op(req);
Pavel Begunkove1d767f2021-03-19 17:22:43 +00002019 if (!(flags & REQ_F_FIXED_FILE))
2020 io_put_file(req->file);
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00002021 if (req->fixed_rsrc_refs)
2022 percpu_ref_put(req->fixed_rsrc_refs);
Pavel Begunkov99ebe4e2021-06-26 21:40:49 +01002023 if (req->async_data) {
Pavel Begunkov094bae42021-03-19 17:22:42 +00002024 kfree(req->async_data);
Pavel Begunkov99ebe4e2021-06-26 21:40:49 +01002025 req->async_data = NULL;
2026 }
Pavel Begunkove6543a82020-06-28 12:52:30 +03002027}
Pavel Begunkov2b85edf2019-12-28 14:13:03 +03002028
Pavel Begunkov216578e2020-10-13 09:44:00 +01002029static void __io_free_req(struct io_kiocb *req)
Pavel Begunkove6543a82020-06-28 12:52:30 +03002030{
Jens Axboe51a4cc12020-08-10 10:55:56 -06002031 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkovecfc5172020-06-29 13:13:03 +03002032
Pavel Begunkov216578e2020-10-13 09:44:00 +01002033 io_dismantle_req(req);
Pavel Begunkov7c660732021-01-25 11:42:21 +00002034 io_put_task(req->task, 1);
Pavel Begunkove6543a82020-06-28 12:52:30 +03002035
Jens Axboe79ebeae2021-08-10 15:18:27 -06002036 spin_lock(&ctx->completion_lock);
Pavel Begunkovbb943b82021-08-09 20:18:10 +01002037 list_add(&req->inflight_entry, &ctx->locked_free_list);
Pavel Begunkovc34b0252021-08-09 20:18:08 +01002038 ctx->locked_free_nr++;
Jens Axboe79ebeae2021-08-10 15:18:27 -06002039 spin_unlock(&ctx->completion_lock);
Pavel Begunkovc34b0252021-08-09 20:18:08 +01002040
Pavel Begunkovecfc5172020-06-29 13:13:03 +03002041 percpu_ref_put(&ctx->refs);
Jens Axboee65ef562019-03-12 10:16:44 -06002042}
2043
Pavel Begunkovf2f87372020-10-27 23:25:37 +00002044static inline void io_remove_next_linked(struct io_kiocb *req)
2045{
2046 struct io_kiocb *nxt = req->link;
2047
2048 req->link = nxt->link;
2049 nxt->link = NULL;
2050}
2051
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00002052static bool io_kill_linked_timeout(struct io_kiocb *req)
2053 __must_hold(&req->ctx->completion_lock)
Jens Axboe89b263f2021-08-10 15:14:18 -06002054 __must_hold(&req->ctx->timeout_lock)
Jens Axboe9e645e112019-05-10 16:07:28 -06002055{
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00002056 struct io_kiocb *link = req->link;
Pavel Begunkovf2f87372020-10-27 23:25:37 +00002057
Pavel Begunkovb97e7362021-08-15 10:40:23 +01002058 if (link && link->opcode == IORING_OP_LINK_TIMEOUT) {
Pavel Begunkovc9abd7a2020-10-22 16:43:11 +01002059 struct io_timeout_data *io = link->async_data;
Pavel Begunkov7c86ffe2020-06-29 13:12:59 +03002060
Pavel Begunkovf2f87372020-10-27 23:25:37 +00002061 io_remove_next_linked(req);
Pavel Begunkov90cd7e42020-10-27 23:25:36 +00002062 link->timeout.head = NULL;
Pavel Begunkovfd9c7bc2021-04-13 02:58:42 +01002063 if (hrtimer_try_to_cancel(&io->timer) != -1) {
Pavel Begunkovef9dd632021-08-28 19:54:38 -06002064 list_del(&link->timeout.list);
Pavel Begunkovb850d6d2022-08-29 14:30:13 +01002065 io_fill_cqe_req(link, -ECANCELED, 0);
Pavel Begunkov91c2f692021-08-11 19:28:28 +01002066 io_put_req_deferred(link);
Pavel Begunkovd4729fb2021-03-22 01:58:24 +00002067 return true;
Pavel Begunkovc9abd7a2020-10-22 16:43:11 +01002068 }
2069 }
Pavel Begunkovd4729fb2021-03-22 01:58:24 +00002070 return false;
Pavel Begunkov7c86ffe2020-06-29 13:12:59 +03002071}
2072
Pavel Begunkovd148ca42020-10-18 10:17:39 +01002073static void io_fail_links(struct io_kiocb *req)
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00002074 __must_hold(&req->ctx->completion_lock)
Jens Axboe9e645e112019-05-10 16:07:28 -06002075{
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00002076 struct io_kiocb *nxt, *link = req->link;
Jens Axboe9e645e112019-05-10 16:07:28 -06002077
Pavel Begunkovf2f87372020-10-27 23:25:37 +00002078 req->link = NULL;
Pavel Begunkovf2f87372020-10-27 23:25:37 +00002079 while (link) {
Hao Xua8295b92021-08-27 17:46:09 +08002080 long res = -ECANCELED;
2081
2082 if (link->flags & REQ_F_FAIL)
2083 res = link->result;
2084
Pavel Begunkovf2f87372020-10-27 23:25:37 +00002085 nxt = link->link;
2086 link->link = NULL;
2087
Dmitrii Dolgovc826bd72019-10-15 19:02:01 +02002088 trace_io_uring_fail_link(req, link);
Pavel Begunkovb850d6d2022-08-29 14:30:13 +01002089 io_fill_cqe_req(link, res, 0);
Pavel Begunkov91c2f692021-08-11 19:28:28 +01002090 io_put_req_deferred(link);
Pavel Begunkovf2f87372020-10-27 23:25:37 +00002091 link = nxt;
Jens Axboe9e645e112019-05-10 16:07:28 -06002092 }
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00002093}
Pavel Begunkov7c86ffe2020-06-29 13:12:59 +03002094
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00002095static bool io_disarm_next(struct io_kiocb *req)
2096 __must_hold(&req->ctx->completion_lock)
2097{
2098 bool posted = false;
2099
Pavel Begunkov0756a862021-08-15 10:40:25 +01002100 if (req->flags & REQ_F_ARM_LTIMEOUT) {
2101 struct io_kiocb *link = req->link;
2102
Pavel Begunkov906c6ca2021-08-15 10:40:26 +01002103 req->flags &= ~REQ_F_ARM_LTIMEOUT;
Pavel Begunkov0756a862021-08-15 10:40:25 +01002104 if (link && link->opcode == IORING_OP_LINK_TIMEOUT) {
2105 io_remove_next_linked(req);
Pavel Begunkovb850d6d2022-08-29 14:30:13 +01002106 io_fill_cqe_req(link, -ECANCELED, 0);
Pavel Begunkov0756a862021-08-15 10:40:25 +01002107 io_put_req_deferred(link);
2108 posted = true;
2109 }
2110 } else if (req->flags & REQ_F_LINK_TIMEOUT) {
Jens Axboe89b263f2021-08-10 15:14:18 -06002111 struct io_ring_ctx *ctx = req->ctx;
2112
2113 spin_lock_irq(&ctx->timeout_lock);
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00002114 posted = io_kill_linked_timeout(req);
Jens Axboe89b263f2021-08-10 15:14:18 -06002115 spin_unlock_irq(&ctx->timeout_lock);
2116 }
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01002117 if (unlikely((req->flags & REQ_F_FAIL) &&
Pavel Begunkove4335ed2021-04-11 01:46:39 +01002118 !(req->flags & REQ_F_HARDLINK))) {
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00002119 posted |= (req->link != NULL);
2120 io_fail_links(req);
2121 }
2122 return posted;
Jens Axboe9e645e112019-05-10 16:07:28 -06002123}
2124
Pavel Begunkov3fa5e0f2020-06-30 15:20:43 +03002125static struct io_kiocb *__io_req_find_next(struct io_kiocb *req)
Jens Axboe9e645e112019-05-10 16:07:28 -06002126{
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00002127 struct io_kiocb *nxt;
Jens Axboe2665abf2019-11-05 12:40:47 -07002128
Jens Axboe9e645e112019-05-10 16:07:28 -06002129 /*
2130 * If LINK is set, we have dependent requests in this chain. If we
2131 * didn't fail this request, queue the first one up, moving any other
2132 * dependencies to the next request. In case of failure, fail the rest
2133 * of the chain.
2134 */
Pavel Begunkov0756a862021-08-15 10:40:25 +01002135 if (req->flags & IO_DISARM_MASK) {
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00002136 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00002137 bool posted;
Pavel Begunkovf2f87372020-10-27 23:25:37 +00002138
Jens Axboe79ebeae2021-08-10 15:18:27 -06002139 spin_lock(&ctx->completion_lock);
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00002140 posted = io_disarm_next(req);
2141 if (posted)
2142 io_commit_cqring(req->ctx);
Jens Axboe79ebeae2021-08-10 15:18:27 -06002143 spin_unlock(&ctx->completion_lock);
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00002144 if (posted)
2145 io_cqring_ev_posted(ctx);
Pavel Begunkovf2f87372020-10-27 23:25:37 +00002146 }
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00002147 nxt = req->link;
2148 req->link = NULL;
2149 return nxt;
Jens Axboe4d7dd462019-11-20 13:03:52 -07002150}
Jens Axboe2665abf2019-11-05 12:40:47 -07002151
Pavel Begunkovf2f87372020-10-27 23:25:37 +00002152static inline struct io_kiocb *io_req_find_next(struct io_kiocb *req)
Pavel Begunkov3fa5e0f2020-06-30 15:20:43 +03002153{
Pavel Begunkovcdbff982021-02-12 18:41:16 +00002154 if (likely(!(req->flags & (REQ_F_LINK|REQ_F_HARDLINK))))
Pavel Begunkov3fa5e0f2020-06-30 15:20:43 +03002155 return NULL;
2156 return __io_req_find_next(req);
2157}
2158
Pavel Begunkovf237c302021-08-18 12:42:46 +01002159static void ctx_flush_and_put(struct io_ring_ctx *ctx, bool *locked)
Pavel Begunkov2c323952021-02-28 22:04:53 +00002160{
2161 if (!ctx)
2162 return;
Pavel Begunkovf237c302021-08-18 12:42:46 +01002163 if (*locked) {
Hao Xu99c8bc52021-08-21 06:19:54 +08002164 if (ctx->submit_state.compl_nr)
2165 io_submit_flush_completions(ctx);
Pavel Begunkov2c323952021-02-28 22:04:53 +00002166 mutex_unlock(&ctx->uring_lock);
Pavel Begunkovf237c302021-08-18 12:42:46 +01002167 *locked = false;
Pavel Begunkov2c323952021-02-28 22:04:53 +00002168 }
2169 percpu_ref_put(&ctx->refs);
2170}
2171
Jens Axboe7cbf1722021-02-10 00:03:20 +00002172static void tctx_task_work(struct callback_head *cb)
2173{
Pavel Begunkovf237c302021-08-18 12:42:46 +01002174 bool locked = false;
Pavel Begunkovebd0df22021-06-17 18:14:07 +01002175 struct io_ring_ctx *ctx = NULL;
Pavel Begunkov3f184072021-06-17 18:14:06 +01002176 struct io_uring_task *tctx = container_of(cb, struct io_uring_task,
2177 task_work);
Jens Axboe7cbf1722021-02-10 00:03:20 +00002178
Pavel Begunkov16f72072021-06-17 18:14:09 +01002179 while (1) {
Pavel Begunkov3f184072021-06-17 18:14:06 +01002180 struct io_wq_work_node *node;
2181
Pavel Begunkov8d4ad412021-09-02 00:38:23 +01002182 if (!tctx->task_list.first && locked && ctx->submit_state.compl_nr)
2183 io_submit_flush_completions(ctx);
2184
Pavel Begunkov3f184072021-06-17 18:14:06 +01002185 spin_lock_irq(&tctx->task_lock);
Pavel Begunkovc6538be2021-06-17 18:14:08 +01002186 node = tctx->task_list.first;
Pavel Begunkov3f184072021-06-17 18:14:06 +01002187 INIT_WQ_LIST(&tctx->task_list);
Pavel Begunkov6294f362021-08-10 17:53:55 +01002188 if (!node)
2189 tctx->task_running = false;
Pavel Begunkov3f184072021-06-17 18:14:06 +01002190 spin_unlock_irq(&tctx->task_lock);
Pavel Begunkov6294f362021-08-10 17:53:55 +01002191 if (!node)
2192 break;
Pavel Begunkov3f184072021-06-17 18:14:06 +01002193
Pavel Begunkov6294f362021-08-10 17:53:55 +01002194 do {
Pavel Begunkov3f184072021-06-17 18:14:06 +01002195 struct io_wq_work_node *next = node->next;
2196 struct io_kiocb *req = container_of(node, struct io_kiocb,
2197 io_task_work.node);
2198
2199 if (req->ctx != ctx) {
Pavel Begunkovf237c302021-08-18 12:42:46 +01002200 ctx_flush_and_put(ctx, &locked);
Pavel Begunkov3f184072021-06-17 18:14:06 +01002201 ctx = req->ctx;
Pavel Begunkov126180b2021-08-18 12:42:47 +01002202 /* if not contended, grab and improve batching */
2203 locked = mutex_trylock(&ctx->uring_lock);
Pavel Begunkov3f184072021-06-17 18:14:06 +01002204 percpu_ref_get(&ctx->refs);
2205 }
Pavel Begunkovf237c302021-08-18 12:42:46 +01002206 req->io_task_work.func(req, &locked);
Pavel Begunkov3f184072021-06-17 18:14:06 +01002207 node = next;
Pavel Begunkov6294f362021-08-10 17:53:55 +01002208 } while (node);
2209
Jens Axboe7cbf1722021-02-10 00:03:20 +00002210 cond_resched();
Pavel Begunkov3f184072021-06-17 18:14:06 +01002211 }
Pavel Begunkovebd0df22021-06-17 18:14:07 +01002212
Pavel Begunkovf237c302021-08-18 12:42:46 +01002213 ctx_flush_and_put(ctx, &locked);
Pavel Begunkovb168b1a2022-01-09 00:53:22 +00002214
2215 /* relaxed read is enough as only the task itself sets ->in_idle */
2216 if (unlikely(atomic_read(&tctx->in_idle)))
2217 io_uring_drop_tctx_refs(current);
Jens Axboe7cbf1722021-02-10 00:03:20 +00002218}
2219
Pavel Begunkove09ee512021-07-01 13:26:05 +01002220static void io_req_task_work_add(struct io_kiocb *req)
Jens Axboe7cbf1722021-02-10 00:03:20 +00002221{
Pavel Begunkovc15b79d2021-03-19 17:22:44 +00002222 struct task_struct *tsk = req->task;
Jens Axboe7cbf1722021-02-10 00:03:20 +00002223 struct io_uring_task *tctx = tsk->io_uring;
Pavel Begunkovc15b79d2021-03-19 17:22:44 +00002224 enum task_work_notify_mode notify;
Pavel Begunkove09ee512021-07-01 13:26:05 +01002225 struct io_wq_work_node *node;
Jens Axboe0b81e802021-02-16 10:33:53 -07002226 unsigned long flags;
Pavel Begunkov6294f362021-08-10 17:53:55 +01002227 bool running;
Jens Axboe7cbf1722021-02-10 00:03:20 +00002228
2229 WARN_ON_ONCE(!tctx);
2230
Jens Axboe0b81e802021-02-16 10:33:53 -07002231 spin_lock_irqsave(&tctx->task_lock, flags);
Jens Axboe7cbf1722021-02-10 00:03:20 +00002232 wq_list_add_tail(&req->io_task_work.node, &tctx->task_list);
Pavel Begunkov6294f362021-08-10 17:53:55 +01002233 running = tctx->task_running;
2234 if (!running)
2235 tctx->task_running = true;
Jens Axboe0b81e802021-02-16 10:33:53 -07002236 spin_unlock_irqrestore(&tctx->task_lock, flags);
Jens Axboe7cbf1722021-02-10 00:03:20 +00002237
2238 /* task_work already pending, we're done */
Pavel Begunkov6294f362021-08-10 17:53:55 +01002239 if (running)
Pavel Begunkove09ee512021-07-01 13:26:05 +01002240 return;
Jens Axboe7cbf1722021-02-10 00:03:20 +00002241
Pavel Begunkovc15b79d2021-03-19 17:22:44 +00002242 /*
2243 * SQPOLL kernel thread doesn't need notification, just a wakeup. For
2244 * all other cases, use TWA_SIGNAL unconditionally to ensure we're
2245 * processing task_work. There's no reliable way to tell if TWA_RESUME
2246 * will do the job.
2247 */
2248 notify = (req->ctx->flags & IORING_SETUP_SQPOLL) ? TWA_NONE : TWA_SIGNAL;
Pavel Begunkovc15b79d2021-03-19 17:22:44 +00002249 if (!task_work_add(tsk, &tctx->task_work, notify)) {
2250 wake_up_process(tsk);
Pavel Begunkove09ee512021-07-01 13:26:05 +01002251 return;
Pavel Begunkovc15b79d2021-03-19 17:22:44 +00002252 }
Pavel Begunkov2215bed2021-08-09 13:04:06 +01002253
Pavel Begunkove09ee512021-07-01 13:26:05 +01002254 spin_lock_irqsave(&tctx->task_lock, flags);
Pavel Begunkov6294f362021-08-10 17:53:55 +01002255 tctx->task_running = false;
Pavel Begunkove09ee512021-07-01 13:26:05 +01002256 node = tctx->task_list.first;
2257 INIT_WQ_LIST(&tctx->task_list);
2258 spin_unlock_irqrestore(&tctx->task_lock, flags);
Jens Axboe7cbf1722021-02-10 00:03:20 +00002259
Pavel Begunkove09ee512021-07-01 13:26:05 +01002260 while (node) {
2261 req = container_of(node, struct io_kiocb, io_task_work.node);
2262 node = node->next;
2263 if (llist_add(&req->io_task_work.fallback_node,
2264 &req->ctx->fallback_llist))
2265 schedule_delayed_work(&req->ctx->fallback_work, 1);
2266 }
Pavel Begunkoveab30c42021-01-19 13:32:42 +00002267}
2268
Pavel Begunkovf237c302021-08-18 12:42:46 +01002269static void io_req_task_cancel(struct io_kiocb *req, bool *locked)
Jens Axboec40f6372020-06-25 15:39:59 -06002270{
Jens Axboe87ceb6a2020-09-14 08:20:12 -06002271 struct io_ring_ctx *ctx = req->ctx;
Jens Axboec40f6372020-06-25 15:39:59 -06002272
Pavel Begunkovb18a1a42021-08-25 20:51:39 +01002273 /* not needed for normal modes, but SQPOLL depends on it */
Pavel Begunkovf237c302021-08-18 12:42:46 +01002274 io_tw_lock(ctx, locked);
Pavel Begunkov25935532021-03-19 17:22:40 +00002275 io_req_complete_failed(req, req->result);
Jens Axboec40f6372020-06-25 15:39:59 -06002276}
2277
Pavel Begunkovf237c302021-08-18 12:42:46 +01002278static void io_req_task_submit(struct io_kiocb *req, bool *locked)
Jens Axboec40f6372020-06-25 15:39:59 -06002279{
2280 struct io_ring_ctx *ctx = req->ctx;
2281
Pavel Begunkovf237c302021-08-18 12:42:46 +01002282 io_tw_lock(ctx, locked);
Jens Axboe316319e2021-08-19 09:41:42 -06002283 /* req->task == current here, checking PF_EXITING is safe */
Pavel Begunkovaf066f32021-08-09 13:04:19 +01002284 if (likely(!(req->task->flags & PF_EXITING)))
Pavel Begunkovc5eef2b2021-02-10 00:03:22 +00002285 __io_queue_sqe(req);
Pavel Begunkov81b6d052021-01-04 20:36:35 +00002286 else
Pavel Begunkov25935532021-03-19 17:22:40 +00002287 io_req_complete_failed(req, -EFAULT);
Jens Axboe9e645e112019-05-10 16:07:28 -06002288}
2289
Pavel Begunkova3df76982021-02-18 22:32:52 +00002290static void io_req_task_queue_fail(struct io_kiocb *req, int ret)
2291{
Pavel Begunkova3df76982021-02-18 22:32:52 +00002292 req->result = ret;
Pavel Begunkov5b0a6ac2021-06-30 21:54:04 +01002293 req->io_task_work.func = io_req_task_cancel;
Pavel Begunkove09ee512021-07-01 13:26:05 +01002294 io_req_task_work_add(req);
Pavel Begunkova3df76982021-02-18 22:32:52 +00002295}
2296
Pavel Begunkov2c4b8eb2021-02-28 22:35:10 +00002297static void io_req_task_queue(struct io_kiocb *req)
2298{
Pavel Begunkov5b0a6ac2021-06-30 21:54:04 +01002299 req->io_task_work.func = io_req_task_submit;
Pavel Begunkove09ee512021-07-01 13:26:05 +01002300 io_req_task_work_add(req);
Pavel Begunkov2c4b8eb2021-02-28 22:35:10 +00002301}
2302
Jens Axboe773af692021-07-27 10:25:55 -06002303static void io_req_task_queue_reissue(struct io_kiocb *req)
2304{
2305 req->io_task_work.func = io_queue_async_work;
2306 io_req_task_work_add(req);
2307}
2308
Pavel Begunkovf2f87372020-10-27 23:25:37 +00002309static inline void io_queue_next(struct io_kiocb *req)
Jackie Liuc69f8db2019-11-09 11:00:08 +08002310{
Pavel Begunkov9b5f7bd2020-06-29 13:13:00 +03002311 struct io_kiocb *nxt = io_req_find_next(req);
Pavel Begunkov944e58b2019-11-21 23:21:01 +03002312
Pavel Begunkov906a8c32020-06-27 14:04:55 +03002313 if (nxt)
2314 io_req_task_queue(nxt);
Jackie Liuc69f8db2019-11-09 11:00:08 +08002315}
2316
Jens Axboe9e645e112019-05-10 16:07:28 -06002317static void io_free_req(struct io_kiocb *req)
2318{
Pavel Begunkovc3524382020-06-28 12:52:32 +03002319 io_queue_next(req);
Jens Axboe9e645e112019-05-10 16:07:28 -06002320 __io_free_req(req);
Jens Axboee65ef562019-03-12 10:16:44 -06002321}
2322
Pavel Begunkovf237c302021-08-18 12:42:46 +01002323static void io_free_req_work(struct io_kiocb *req, bool *locked)
2324{
2325 io_free_req(req);
2326}
2327
Pavel Begunkov2d6500d2020-06-28 12:52:33 +03002328struct req_batch {
Pavel Begunkov5af1d132020-07-18 11:32:52 +03002329 struct task_struct *task;
2330 int task_refs;
Jens Axboe1b4c3512021-02-10 00:03:19 +00002331 int ctx_refs;
Pavel Begunkov2d6500d2020-06-28 12:52:33 +03002332};
2333
Pavel Begunkov5af1d132020-07-18 11:32:52 +03002334static inline void io_init_req_batch(struct req_batch *rb)
Pavel Begunkov7a743e22020-03-03 21:33:13 +03002335{
Pavel Begunkov5af1d132020-07-18 11:32:52 +03002336 rb->task_refs = 0;
Pavel Begunkov9ae72462021-02-10 00:03:16 +00002337 rb->ctx_refs = 0;
Pavel Begunkov5af1d132020-07-18 11:32:52 +03002338 rb->task = NULL;
2339}
Pavel Begunkov8766dd52020-03-14 00:31:04 +03002340
Pavel Begunkov2d6500d2020-06-28 12:52:33 +03002341static void io_req_free_batch_finish(struct io_ring_ctx *ctx,
2342 struct req_batch *rb)
2343{
Pavel Begunkov9ae72462021-02-10 00:03:16 +00002344 if (rb->ctx_refs)
2345 percpu_ref_put_many(&ctx->refs, rb->ctx_refs);
Pavel Begunkove98e49b2021-08-18 17:01:43 +01002346 if (rb->task)
Pavel Begunkove9dbe222021-08-09 13:04:20 +01002347 io_put_task(rb->task, rb->task_refs);
Pavel Begunkov2d6500d2020-06-28 12:52:33 +03002348}
2349
Pavel Begunkov6ff119a2021-02-10 00:03:18 +00002350static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req,
2351 struct io_submit_state *state)
Pavel Begunkov2d6500d2020-06-28 12:52:33 +03002352{
Pavel Begunkovf2f87372020-10-27 23:25:37 +00002353 io_queue_next(req);
Pavel Begunkov96670652021-03-19 17:22:32 +00002354 io_dismantle_req(req);
Pavel Begunkov2d6500d2020-06-28 12:52:33 +03002355
Jens Axboee3bc8e92020-09-24 08:45:57 -06002356 if (req->task != rb->task) {
Pavel Begunkov7c660732021-01-25 11:42:21 +00002357 if (rb->task)
2358 io_put_task(rb->task, rb->task_refs);
Jens Axboee3bc8e92020-09-24 08:45:57 -06002359 rb->task = req->task;
2360 rb->task_refs = 0;
Pavel Begunkov5af1d132020-07-18 11:32:52 +03002361 }
Jens Axboee3bc8e92020-09-24 08:45:57 -06002362 rb->task_refs++;
Pavel Begunkov9ae72462021-02-10 00:03:16 +00002363 rb->ctx_refs++;
Pavel Begunkov5af1d132020-07-18 11:32:52 +03002364
Pavel Begunkovbd759042021-02-12 03:23:50 +00002365 if (state->free_reqs != ARRAY_SIZE(state->reqs))
Pavel Begunkov6ff119a2021-02-10 00:03:18 +00002366 state->reqs[state->free_reqs++] = req;
Pavel Begunkovbd759042021-02-12 03:23:50 +00002367 else
Pavel Begunkovcd0ca2e2021-08-09 20:18:11 +01002368 list_add(&req->inflight_entry, &state->free_list);
Pavel Begunkov7a743e22020-03-03 21:33:13 +03002369}
2370
Pavel Begunkov2a2758f2021-06-17 18:14:00 +01002371static void io_submit_flush_completions(struct io_ring_ctx *ctx)
Jens Axboea141dd82021-08-12 12:48:34 -06002372 __must_hold(&ctx->uring_lock)
Pavel Begunkov905c1722021-02-10 00:03:14 +00002373{
Pavel Begunkovcd0ca2e2021-08-09 20:18:11 +01002374 struct io_submit_state *state = &ctx->submit_state;
2375 int i, nr = state->compl_nr;
Pavel Begunkov905c1722021-02-10 00:03:14 +00002376 struct req_batch rb;
2377
Jens Axboe79ebeae2021-08-10 15:18:27 -06002378 spin_lock(&ctx->completion_lock);
Pavel Begunkov905c1722021-02-10 00:03:14 +00002379 for (i = 0; i < nr; i++) {
Pavel Begunkovcd0ca2e2021-08-09 20:18:11 +01002380 struct io_kiocb *req = state->compl_reqs[i];
Pavel Begunkov5182ed22021-06-26 21:40:48 +01002381
Pavel Begunkovb850d6d2022-08-29 14:30:13 +01002382 __io_fill_cqe(ctx, req->user_data, req->result,
2383 req->compl.cflags);
Pavel Begunkov905c1722021-02-10 00:03:14 +00002384 }
2385 io_commit_cqring(ctx);
Jens Axboe79ebeae2021-08-10 15:18:27 -06002386 spin_unlock(&ctx->completion_lock);
Pavel Begunkov905c1722021-02-10 00:03:14 +00002387 io_cqring_ev_posted(ctx);
Pavel Begunkov5182ed22021-06-26 21:40:48 +01002388
2389 io_init_req_batch(&rb);
Pavel Begunkov905c1722021-02-10 00:03:14 +00002390 for (i = 0; i < nr; i++) {
Pavel Begunkovcd0ca2e2021-08-09 20:18:11 +01002391 struct io_kiocb *req = state->compl_reqs[i];
Pavel Begunkov905c1722021-02-10 00:03:14 +00002392
Pavel Begunkov91c2f692021-08-11 19:28:28 +01002393 if (req_ref_put_and_test(req))
Pavel Begunkov6ff119a2021-02-10 00:03:18 +00002394 io_req_free_batch(&rb, req, &ctx->submit_state);
Pavel Begunkov905c1722021-02-10 00:03:14 +00002395 }
2396
2397 io_req_free_batch_finish(ctx, &rb);
Pavel Begunkovcd0ca2e2021-08-09 20:18:11 +01002398 state->compl_nr = 0;
Jens Axboee65ef562019-03-12 10:16:44 -06002399}
2400
Jens Axboeba816ad2019-09-28 11:36:45 -06002401/*
2402 * Drop reference to request, return next in chain (if there is one) if this
2403 * was the last reference to this request.
2404 */
Pavel Begunkov0d850352021-03-19 17:22:37 +00002405static inline struct io_kiocb *io_put_req_find_next(struct io_kiocb *req)
Jens Axboee65ef562019-03-12 10:16:44 -06002406{
Pavel Begunkov9b5f7bd2020-06-29 13:13:00 +03002407 struct io_kiocb *nxt = NULL;
2408
Jens Axboede9b4cc2021-02-24 13:28:27 -07002409 if (req_ref_put_and_test(req)) {
Pavel Begunkov9b5f7bd2020-06-29 13:13:00 +03002410 nxt = io_req_find_next(req);
Jens Axboe4d7dd462019-11-20 13:03:52 -07002411 __io_free_req(req);
Jens Axboe2a44f462020-02-25 13:25:41 -07002412 }
Pavel Begunkov9b5f7bd2020-06-29 13:13:00 +03002413 return nxt;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002414}
2415
Pavel Begunkov0d850352021-03-19 17:22:37 +00002416static inline void io_put_req(struct io_kiocb *req)
Jens Axboe2b188cc2019-01-07 10:46:33 -07002417{
Jens Axboede9b4cc2021-02-24 13:28:27 -07002418 if (req_ref_put_and_test(req))
Jens Axboedef596e2019-01-09 08:59:42 -07002419 io_free_req(req);
2420}
2421
Pavel Begunkov91c2f692021-08-11 19:28:28 +01002422static inline void io_put_req_deferred(struct io_kiocb *req)
Pavel Begunkov216578e2020-10-13 09:44:00 +01002423{
Pavel Begunkov91c2f692021-08-11 19:28:28 +01002424 if (req_ref_put_and_test(req)) {
Pavel Begunkovf237c302021-08-18 12:42:46 +01002425 req->io_task_work.func = io_free_req_work;
Pavel Begunkov543af3a2021-08-09 13:04:15 +01002426 io_req_task_work_add(req);
2427 }
Pavel Begunkov216578e2020-10-13 09:44:00 +01002428}
2429
Pavel Begunkov6c503152021-01-04 20:36:36 +00002430static unsigned io_cqring_events(struct io_ring_ctx *ctx)
Jens Axboea3a0e432019-08-20 11:03:11 -06002431{
2432 /* See comment at the top of this file */
2433 smp_rmb();
Pavel Begunkove23de152020-12-17 00:24:37 +00002434 return __io_cqring_events(ctx);
Jens Axboea3a0e432019-08-20 11:03:11 -06002435}
2436
Pavel Begunkovfb5ccc92019-10-25 12:31:30 +03002437static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
2438{
2439 struct io_rings *rings = ctx->rings;
2440
2441 /* make sure SQ entry isn't read before tail */
2442 return smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head;
2443}
2444
Pavel Begunkov8ff069b2020-07-16 23:28:04 +03002445static unsigned int io_put_kbuf(struct io_kiocb *req, struct io_buffer *kbuf)
Jens Axboee94f1412019-12-19 12:06:02 -07002446{
Pavel Begunkov8ff069b2020-07-16 23:28:04 +03002447 unsigned int cflags;
Jens Axboee94f1412019-12-19 12:06:02 -07002448
Jens Axboebcda7ba2020-02-23 16:42:51 -07002449 cflags = kbuf->bid << IORING_CQE_BUFFER_SHIFT;
2450 cflags |= IORING_CQE_F_BUFFER;
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03002451 req->flags &= ~REQ_F_BUFFER_SELECTED;
Jens Axboebcda7ba2020-02-23 16:42:51 -07002452 kfree(kbuf);
2453 return cflags;
2454}
2455
Pavel Begunkov8ff069b2020-07-16 23:28:04 +03002456static inline unsigned int io_put_rw_kbuf(struct io_kiocb *req)
2457{
2458 struct io_buffer *kbuf;
2459
Pavel Begunkovae421d92021-08-17 20:28:08 +01002460 if (likely(!(req->flags & REQ_F_BUFFER_SELECTED)))
2461 return 0;
Pavel Begunkov8ff069b2020-07-16 23:28:04 +03002462 kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
2463 return io_put_kbuf(req, kbuf);
2464}
2465
Jens Axboe4c6e2772020-07-01 11:29:10 -06002466static inline bool io_run_task_work(void)
2467{
Jens Axboe54df6c52023-03-06 13:15:06 -07002468 /*
2469 * PF_IO_WORKER never returns to userspace, so check here if we have
2470 * notify work that needs processing.
2471 */
2472 if (current->flags & PF_IO_WORKER &&
Jens Axboe337eb882023-03-06 13:16:38 -07002473 test_thread_flag(TIF_NOTIFY_RESUME)) {
2474 __set_current_state(TASK_RUNNING);
Jens Axboe54df6c52023-03-06 13:15:06 -07002475 tracehook_notify_resume(NULL);
Jens Axboe337eb882023-03-06 13:16:38 -07002476 }
Nadav Amitef98eb02021-08-07 17:13:41 -07002477 if (test_thread_flag(TIF_NOTIFY_SIGNAL) || current->task_works) {
Jens Axboe4c6e2772020-07-01 11:29:10 -06002478 __set_current_state(TASK_RUNNING);
Nadav Amitef98eb02021-08-07 17:13:41 -07002479 tracehook_notify_signal();
Jens Axboe4c6e2772020-07-01 11:29:10 -06002480 return true;
2481 }
2482
2483 return false;
2484}
2485
Jens Axboedef596e2019-01-09 08:59:42 -07002486/*
2487 * Find and free completed poll iocbs
2488 */
2489static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
Pavel Begunkova8576af2021-08-15 10:40:21 +01002490 struct list_head *done)
Jens Axboedef596e2019-01-09 08:59:42 -07002491{
Jens Axboe8237e042019-12-28 10:48:22 -07002492 struct req_batch rb;
Jens Axboedef596e2019-01-09 08:59:42 -07002493 struct io_kiocb *req;
Xiaoguang Wangbbde0172020-06-16 02:06:38 +08002494
2495 /* order with ->result store in io_complete_rw_iopoll() */
2496 smp_rmb();
Jens Axboedef596e2019-01-09 08:59:42 -07002497
Pavel Begunkov5af1d132020-07-18 11:32:52 +03002498 io_init_req_batch(&rb);
Jens Axboedef596e2019-01-09 08:59:42 -07002499 while (!list_empty(done)) {
Pavel Begunkoved4629d2023-01-14 09:14:03 -07002500 struct io_uring_cqe *cqe;
2501 unsigned cflags;
2502
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002503 req = list_first_entry(done, struct io_kiocb, inflight_entry);
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002504 list_del(&req->inflight_entry);
Pavel Begunkoved4629d2023-01-14 09:14:03 -07002505 cflags = io_put_rw_kbuf(req);
Jens Axboedef596e2019-01-09 08:59:42 -07002506 (*nr_events)++;
2507
Pavel Begunkoved4629d2023-01-14 09:14:03 -07002508 cqe = io_get_cqe(ctx);
2509 if (cqe) {
2510 WRITE_ONCE(cqe->user_data, req->user_data);
2511 WRITE_ONCE(cqe->res, req->result);
2512 WRITE_ONCE(cqe->flags, cflags);
2513 } else {
2514 spin_lock(&ctx->completion_lock);
2515 io_cqring_event_overflow(ctx, req->user_data,
2516 req->result, cflags);
2517 spin_unlock(&ctx->completion_lock);
2518 }
2519
Jens Axboede9b4cc2021-02-24 13:28:27 -07002520 if (req_ref_put_and_test(req))
Pavel Begunkov6ff119a2021-02-10 00:03:18 +00002521 io_req_free_batch(&rb, req, &ctx->submit_state);
Jens Axboedef596e2019-01-09 08:59:42 -07002522 }
Jens Axboedef596e2019-01-09 08:59:42 -07002523
Jens Axboe09bb8392019-03-13 12:39:28 -06002524 io_commit_cqring(ctx);
Pavel Begunkov80c18e42021-01-07 03:15:41 +00002525 io_cqring_ev_posted_iopoll(ctx);
Pavel Begunkov2d6500d2020-06-28 12:52:33 +03002526 io_req_free_batch_finish(ctx, &rb);
Bijan Mottahedeh581f9812020-04-03 13:51:33 -07002527}
2528
Jens Axboedef596e2019-01-09 08:59:42 -07002529static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
Pavel Begunkova8576af2021-08-15 10:40:21 +01002530 long min)
Jens Axboedef596e2019-01-09 08:59:42 -07002531{
2532 struct io_kiocb *req, *tmp;
2533 LIST_HEAD(done);
2534 bool spin;
Jens Axboedef596e2019-01-09 08:59:42 -07002535
2536 /*
2537 * Only spin for completions if we don't have multiple devices hanging
2538 * off our complete list, and we're under the requested amount.
2539 */
Hao Xu915b3dd2021-06-28 05:37:30 +08002540 spin = !ctx->poll_multi_queue && *nr_events < min;
Jens Axboedef596e2019-01-09 08:59:42 -07002541
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002542 list_for_each_entry_safe(req, tmp, &ctx->iopoll_list, inflight_entry) {
Jens Axboe9adbd452019-12-20 08:45:55 -07002543 struct kiocb *kiocb = &req->rw.kiocb;
Pavel Begunkova2416e12021-08-09 13:04:09 +01002544 int ret;
Jens Axboedef596e2019-01-09 08:59:42 -07002545
2546 /*
Bijan Mottahedeh581f9812020-04-03 13:51:33 -07002547 * Move completed and retryable entries to our local lists.
2548 * If we find a request that requires polling, break out
2549 * and complete those lists first, if we have entries there.
Jens Axboedef596e2019-01-09 08:59:42 -07002550 */
Xiaoguang Wang65a65432020-06-11 23:39:36 +08002551 if (READ_ONCE(req->iopoll_completed)) {
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002552 list_move_tail(&req->inflight_entry, &done);
Jens Axboedef596e2019-01-09 08:59:42 -07002553 continue;
2554 }
2555 if (!list_empty(&done))
2556 break;
2557
2558 ret = kiocb->ki_filp->f_op->iopoll(kiocb, spin);
Pavel Begunkova2416e12021-08-09 13:04:09 +01002559 if (unlikely(ret < 0))
2560 return ret;
2561 else if (ret)
2562 spin = false;
Jens Axboedef596e2019-01-09 08:59:42 -07002563
Pavel Begunkov3aadc232020-07-06 17:59:29 +03002564 /* iopoll may have completed current req */
2565 if (READ_ONCE(req->iopoll_completed))
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002566 list_move_tail(&req->inflight_entry, &done);
Jens Axboedef596e2019-01-09 08:59:42 -07002567 }
2568
2569 if (!list_empty(&done))
Pavel Begunkova8576af2021-08-15 10:40:21 +01002570 io_iopoll_complete(ctx, nr_events, &done);
Jens Axboedef596e2019-01-09 08:59:42 -07002571
Pavel Begunkova2416e12021-08-09 13:04:09 +01002572 return 0;
Jens Axboedef596e2019-01-09 08:59:42 -07002573}
2574
2575/*
Jens Axboedef596e2019-01-09 08:59:42 -07002576 * We can't just wait for polled events to come to us, we have to actively
2577 * find and complete them.
2578 */
Pavel Begunkovb2edc0a2020-07-07 16:36:22 +03002579static void io_iopoll_try_reap_events(struct io_ring_ctx *ctx)
Jens Axboedef596e2019-01-09 08:59:42 -07002580{
2581 if (!(ctx->flags & IORING_SETUP_IOPOLL))
2582 return;
2583
2584 mutex_lock(&ctx->uring_lock);
Pavel Begunkov540e32a2020-07-13 23:37:09 +03002585 while (!list_empty(&ctx->iopoll_list)) {
Jens Axboedef596e2019-01-09 08:59:42 -07002586 unsigned int nr_events = 0;
2587
Pavel Begunkova8576af2021-08-15 10:40:21 +01002588 io_do_iopoll(ctx, &nr_events, 0);
Jens Axboe08f54392019-08-21 22:19:11 -06002589
Pavel Begunkovb2edc0a2020-07-07 16:36:22 +03002590 /* let it sleep and repeat later if can't complete a request */
2591 if (nr_events == 0)
2592 break;
Jens Axboe08f54392019-08-21 22:19:11 -06002593 /*
2594 * Ensure we allow local-to-the-cpu processing to take place,
2595 * in this case we need to ensure that we reap all events.
Pavel Begunkov3fcee5a2020-07-06 17:59:31 +03002596 * Also let task_work, etc. to progress by releasing the mutex
Jens Axboe08f54392019-08-21 22:19:11 -06002597 */
Pavel Begunkov3fcee5a2020-07-06 17:59:31 +03002598 if (need_resched()) {
2599 mutex_unlock(&ctx->uring_lock);
2600 cond_resched();
2601 mutex_lock(&ctx->uring_lock);
2602 }
Jens Axboedef596e2019-01-09 08:59:42 -07002603 }
2604 mutex_unlock(&ctx->uring_lock);
2605}
2606
Pavel Begunkov7668b922020-07-07 16:36:21 +03002607static int io_iopoll_check(struct io_ring_ctx *ctx, long min)
Jens Axboedef596e2019-01-09 08:59:42 -07002608{
Pavel Begunkov7668b922020-07-07 16:36:21 +03002609 unsigned int nr_events = 0;
Pavel Begunkove9979b32021-04-13 02:58:45 +01002610 int ret = 0;
Jens Axboedef596e2019-01-09 08:59:42 -07002611
Xiaoguang Wangc7849be2020-02-22 14:46:05 +08002612 /*
2613 * We disallow the app entering submit/complete with polling, but we
2614 * still need to lock the ring to prevent racing with polled issue
2615 * that got punted to a workqueue.
2616 */
2617 mutex_lock(&ctx->uring_lock);
Pavel Begunkovf39c8a52021-04-13 02:58:46 +01002618 /*
2619 * Don't enter poll loop if we already have events pending.
2620 * If we do, we can potentially be spinning for commands that
2621 * already triggered a CQE (eg in error).
2622 */
Pavel Begunkov5ed7a372021-06-14 23:37:27 +01002623 if (test_bit(0, &ctx->check_cq_overflow))
Pavel Begunkovf39c8a52021-04-13 02:58:46 +01002624 __io_cqring_overflow_flush(ctx, false);
2625 if (io_cqring_events(ctx))
2626 goto out;
Jens Axboedef596e2019-01-09 08:59:42 -07002627 do {
Jens Axboe500f9fb2019-08-19 12:15:59 -06002628 /*
2629 * If a submit got punted to a workqueue, we can have the
2630 * application entering polling for a command before it gets
2631 * issued. That app will hold the uring_lock for the duration
2632 * of the poll right here, so we need to take a breather every
2633 * now and then to ensure that the issue has a chance to add
2634 * the poll to the issued list. Otherwise we can spin here
2635 * forever, while the workqueue is stuck trying to acquire the
2636 * very same mutex.
2637 */
Pavel Begunkove9979b32021-04-13 02:58:45 +01002638 if (list_empty(&ctx->iopoll_list)) {
Pavel Begunkov8f487ef2021-07-08 13:37:06 +01002639 u32 tail = ctx->cached_cq_tail;
2640
Jens Axboe500f9fb2019-08-19 12:15:59 -06002641 mutex_unlock(&ctx->uring_lock);
Jens Axboe4c6e2772020-07-01 11:29:10 -06002642 io_run_task_work();
Jens Axboe500f9fb2019-08-19 12:15:59 -06002643 mutex_lock(&ctx->uring_lock);
Pavel Begunkove9979b32021-04-13 02:58:45 +01002644
Pavel Begunkov8f487ef2021-07-08 13:37:06 +01002645 /* some requests don't go through iopoll_list */
2646 if (tail != ctx->cached_cq_tail ||
2647 list_empty(&ctx->iopoll_list))
Pavel Begunkove9979b32021-04-13 02:58:45 +01002648 break;
Jens Axboe500f9fb2019-08-19 12:15:59 -06002649 }
Pavel Begunkova8576af2021-08-15 10:40:21 +01002650 ret = io_do_iopoll(ctx, &nr_events, min);
Pavel Begunkovf39c8a52021-04-13 02:58:46 +01002651 } while (!ret && nr_events < min && !need_resched());
2652out:
Jens Axboe500f9fb2019-08-19 12:15:59 -06002653 mutex_unlock(&ctx->uring_lock);
Jens Axboedef596e2019-01-09 08:59:42 -07002654 return ret;
2655}
2656
Jens Axboe491381ce2019-10-17 09:20:46 -06002657static void kiocb_end_write(struct io_kiocb *req)
Jens Axboe2b188cc2019-01-07 10:46:33 -07002658{
Jens Axboe491381ce2019-10-17 09:20:46 -06002659 /*
2660 * Tell lockdep we inherited freeze protection from submission
2661 * thread.
2662 */
2663 if (req->flags & REQ_F_ISREG) {
Pavel Begunkov1c986792021-03-22 01:58:31 +00002664 struct super_block *sb = file_inode(req->file)->i_sb;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002665
Pavel Begunkov1c986792021-03-22 01:58:31 +00002666 __sb_writers_acquired(sb, SB_FREEZE_WRITE);
2667 sb_end_write(sb);
Jens Axboe2b188cc2019-01-07 10:46:33 -07002668 }
2669}
2670
Jens Axboeb63534c2020-06-04 11:28:00 -06002671#ifdef CONFIG_BLOCK
Pavel Begunkovdc2a6e92021-01-19 13:32:35 +00002672static bool io_resubmit_prep(struct io_kiocb *req)
Jens Axboeb63534c2020-06-04 11:28:00 -06002673{
Pavel Begunkovab454432021-03-22 01:58:33 +00002674 struct io_async_rw *rw = req->async_data;
Jens Axboeb63534c2020-06-04 11:28:00 -06002675
Pavel Begunkovab454432021-03-22 01:58:33 +00002676 if (!rw)
2677 return !io_req_prep_async(req);
Jens Axboecd658692021-09-10 11:19:14 -06002678 iov_iter_restore(&rw->iter, &rw->iter_state);
Pavel Begunkovab454432021-03-22 01:58:33 +00002679 return true;
Jens Axboeb63534c2020-06-04 11:28:00 -06002680}
Jens Axboeb63534c2020-06-04 11:28:00 -06002681
Jens Axboe3e6a0d32021-03-01 13:56:00 -07002682static bool io_rw_should_reissue(struct io_kiocb *req)
Jens Axboeb63534c2020-06-04 11:28:00 -06002683{
Jens Axboe355afae2020-09-02 09:30:31 -06002684 umode_t mode = file_inode(req->file)->i_mode;
Jens Axboe3e6a0d32021-03-01 13:56:00 -07002685 struct io_ring_ctx *ctx = req->ctx;
Jens Axboeb63534c2020-06-04 11:28:00 -06002686
Jens Axboe355afae2020-09-02 09:30:31 -06002687 if (!S_ISBLK(mode) && !S_ISREG(mode))
2688 return false;
Jens Axboe3e6a0d32021-03-01 13:56:00 -07002689 if ((req->flags & REQ_F_NOWAIT) || (io_wq_current_is_worker() &&
2690 !(ctx->flags & IORING_SETUP_IOPOLL)))
Jens Axboeb63534c2020-06-04 11:28:00 -06002691 return false;
Jens Axboe7c977a52021-02-23 19:17:35 -07002692 /*
2693 * If ref is dying, we might be running poll reap from the exit work.
2694 * Don't attempt to reissue from that path, just let it fail with
2695 * -EAGAIN.
2696 */
Jens Axboe3e6a0d32021-03-01 13:56:00 -07002697 if (percpu_ref_is_dying(&ctx->refs))
2698 return false;
Jens Axboeef046882021-07-27 10:50:31 -06002699 /*
2700 * Play it safe and assume not safe to re-import and reissue if we're
2701 * not in the original thread group (or in task context).
2702 */
2703 if (!same_thread_group(req->task, current) || !in_task())
2704 return false;
Jens Axboe3e6a0d32021-03-01 13:56:00 -07002705 return true;
2706}
Jens Axboee82ad482021-04-02 19:45:34 -06002707#else
Jens Axboea1ff1e32021-04-12 06:40:02 -06002708static bool io_resubmit_prep(struct io_kiocb *req)
2709{
2710 return false;
2711}
Jens Axboee82ad482021-04-02 19:45:34 -06002712static bool io_rw_should_reissue(struct io_kiocb *req)
2713{
2714 return false;
2715}
Jens Axboe3e6a0d32021-03-01 13:56:00 -07002716#endif
2717
Jens Axboeb10acfc2023-01-22 10:36:37 -07002718/*
2719 * Trigger the notifications after having done some IO, and finish the write
2720 * accounting, if any.
2721 */
2722static void io_req_io_end(struct io_kiocb *req)
2723{
2724 struct io_rw *rw = &req->rw;
2725
Jens Axboeb10acfc2023-01-22 10:36:37 -07002726 if (rw->kiocb.ki_flags & IOCB_WRITE) {
2727 kiocb_end_write(req);
2728 fsnotify_modify(req->file);
2729 } else {
2730 fsnotify_access(req->file);
2731 }
2732}
2733
Jens Axboe8ef12ef2021-08-10 15:15:25 -06002734static bool __io_complete_rw_common(struct io_kiocb *req, long res)
Jens Axboea1d7c392020-06-22 11:09:46 -06002735{
Pavel Begunkov9532b992021-03-22 01:58:34 +00002736 if (res != req->result) {
2737 if ((res == -EAGAIN || res == -EOPNOTSUPP) &&
2738 io_rw_should_reissue(req)) {
Jens Axboeb10acfc2023-01-22 10:36:37 -07002739 /*
2740 * Reissue will start accounting again, finish the
2741 * current cycle.
2742 */
2743 io_req_io_end(req);
Pavel Begunkov9532b992021-03-22 01:58:34 +00002744 req->flags |= REQ_F_REISSUE;
Jens Axboe8ef12ef2021-08-10 15:15:25 -06002745 return true;
Pavel Begunkov9532b992021-03-22 01:58:34 +00002746 }
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01002747 req_set_fail(req);
Jens Axboe8ef12ef2021-08-10 15:15:25 -06002748 req->result = res;
Pavel Begunkov9532b992021-03-22 01:58:34 +00002749 }
Jens Axboe8ef12ef2021-08-10 15:15:25 -06002750 return false;
2751}
2752
Harshit Mogalapallie326ee02023-01-10 08:46:47 -08002753static inline int io_fixup_rw_res(struct io_kiocb *req, long res)
Pavel Begunkove8574572022-10-16 22:42:56 +01002754{
2755 struct io_async_rw *io = req->async_data;
2756
2757 /* add previously done IO, if any */
2758 if (io && io->bytes_done > 0) {
2759 if (res < 0)
2760 res = io->bytes_done;
2761 else
2762 res += io->bytes_done;
2763 }
2764 return res;
2765}
2766
Pavel Begunkovf237c302021-08-18 12:42:46 +01002767static void io_req_task_complete(struct io_kiocb *req, bool *locked)
Jens Axboe8ef12ef2021-08-10 15:15:25 -06002768{
Pavel Begunkov126180b2021-08-18 12:42:47 +01002769 unsigned int cflags = io_put_rw_kbuf(req);
Pavel Begunkov5c0ea4c2022-08-29 14:30:12 +01002770 int res = req->result;
Pavel Begunkov126180b2021-08-18 12:42:47 +01002771
2772 if (*locked) {
2773 struct io_ring_ctx *ctx = req->ctx;
2774 struct io_submit_state *state = &ctx->submit_state;
2775
2776 io_req_complete_state(req, res, cflags);
2777 state->compl_reqs[state->compl_nr++] = req;
2778 if (state->compl_nr == ARRAY_SIZE(state->compl_reqs))
2779 io_submit_flush_completions(ctx);
2780 } else {
2781 io_req_complete_post(req, res, cflags);
2782 }
Jens Axboe8ef12ef2021-08-10 15:15:25 -06002783}
2784
Jens Axboe89a410d2023-01-21 13:38:51 -07002785static void io_req_rw_complete(struct io_kiocb *req, bool *locked)
2786{
Jens Axboeb10acfc2023-01-22 10:36:37 -07002787 io_req_io_end(req);
Jens Axboe89a410d2023-01-21 13:38:51 -07002788 io_req_task_complete(req, locked);
2789}
2790
Jens Axboeba816ad2019-09-28 11:36:45 -06002791static void io_complete_rw(struct kiocb *kiocb, long res, long res2)
2792{
Jens Axboe9adbd452019-12-20 08:45:55 -07002793 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
Jens Axboeba816ad2019-09-28 11:36:45 -06002794
Jens Axboe8ef12ef2021-08-10 15:15:25 -06002795 if (__io_complete_rw_common(req, res))
2796 return;
Pavel Begunkove8574572022-10-16 22:42:56 +01002797 req->result = io_fixup_rw_res(req, res);
Jens Axboe89a410d2023-01-21 13:38:51 -07002798 req->io_task_work.func = io_req_rw_complete;
Jens Axboe8ef12ef2021-08-10 15:15:25 -06002799 io_req_task_work_add(req);
Jens Axboe2b188cc2019-01-07 10:46:33 -07002800}
2801
Jens Axboedef596e2019-01-09 08:59:42 -07002802static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2)
2803{
Jens Axboe9adbd452019-12-20 08:45:55 -07002804 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
Jens Axboedef596e2019-01-09 08:59:42 -07002805
Jens Axboe491381ce2019-10-17 09:20:46 -06002806 if (kiocb->ki_flags & IOCB_WRITE)
2807 kiocb_end_write(req);
Pavel Begunkov9532b992021-03-22 01:58:34 +00002808 if (unlikely(res != req->result)) {
Pavel Begunkovb66ceaf2021-09-15 11:00:05 +01002809 if (res == -EAGAIN && io_rw_should_reissue(req)) {
2810 req->flags |= REQ_F_REISSUE;
2811 return;
Pavel Begunkov9532b992021-03-22 01:58:34 +00002812 }
Pavel Begunkov8c130822021-03-22 01:58:32 +00002813 }
Xiaoguang Wangbbde0172020-06-16 02:06:38 +08002814
2815 WRITE_ONCE(req->result, res);
Jens Axboeb9b0e0d2021-02-23 08:18:36 -07002816 /* order with io_iopoll_complete() checking ->result */
Pavel Begunkovcd664b02020-06-25 12:37:10 +03002817 smp_wmb();
2818 WRITE_ONCE(req->iopoll_completed, 1);
Jens Axboedef596e2019-01-09 08:59:42 -07002819}
2820
2821/*
2822 * After the iocb has been issued, it's safe to be found on the poll list.
2823 * Adding the kiocb to the list AFTER submission ensures that we don't
Pavel Begunkovf39c8a52021-04-13 02:58:46 +01002824 * find it from a io_do_iopoll() thread before the issuer is done
Jens Axboedef596e2019-01-09 08:59:42 -07002825 * accessing the kiocb cookie.
2826 */
Pavel Begunkovcb3d8972021-06-14 02:36:14 +01002827static void io_iopoll_req_issued(struct io_kiocb *req)
Jens Axboedef596e2019-01-09 08:59:42 -07002828{
2829 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkovcb3d8972021-06-14 02:36:14 +01002830 const bool in_async = io_wq_current_is_worker();
2831
2832 /* workqueue context doesn't hold uring_lock, grab it now */
2833 if (unlikely(in_async))
2834 mutex_lock(&ctx->uring_lock);
Jens Axboedef596e2019-01-09 08:59:42 -07002835
2836 /*
2837 * Track whether we have multiple files in our lists. This will impact
2838 * how we do polling eventually, not spinning if we're on potentially
2839 * different devices.
2840 */
Pavel Begunkov540e32a2020-07-13 23:37:09 +03002841 if (list_empty(&ctx->iopoll_list)) {
Hao Xu915b3dd2021-06-28 05:37:30 +08002842 ctx->poll_multi_queue = false;
2843 } else if (!ctx->poll_multi_queue) {
Jens Axboedef596e2019-01-09 08:59:42 -07002844 struct io_kiocb *list_req;
Hao Xu915b3dd2021-06-28 05:37:30 +08002845 unsigned int queue_num0, queue_num1;
Jens Axboedef596e2019-01-09 08:59:42 -07002846
Pavel Begunkov540e32a2020-07-13 23:37:09 +03002847 list_req = list_first_entry(&ctx->iopoll_list, struct io_kiocb,
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002848 inflight_entry);
Hao Xu915b3dd2021-06-28 05:37:30 +08002849
2850 if (list_req->file != req->file) {
2851 ctx->poll_multi_queue = true;
2852 } else {
2853 queue_num0 = blk_qc_t_to_queue_num(list_req->rw.kiocb.ki_cookie);
2854 queue_num1 = blk_qc_t_to_queue_num(req->rw.kiocb.ki_cookie);
2855 if (queue_num0 != queue_num1)
2856 ctx->poll_multi_queue = true;
2857 }
Jens Axboedef596e2019-01-09 08:59:42 -07002858 }
2859
2860 /*
2861 * For fast devices, IO may have already completed. If it has, add
2862 * it to the front so we find it first.
2863 */
Xiaoguang Wang65a65432020-06-11 23:39:36 +08002864 if (READ_ONCE(req->iopoll_completed))
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002865 list_add(&req->inflight_entry, &ctx->iopoll_list);
Jens Axboedef596e2019-01-09 08:59:42 -07002866 else
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002867 list_add_tail(&req->inflight_entry, &ctx->iopoll_list);
Xiaoguang Wangbdcd3ea2020-02-25 22:12:08 +08002868
Pavel Begunkovcb3d8972021-06-14 02:36:14 +01002869 if (unlikely(in_async)) {
2870 /*
2871 * If IORING_SETUP_SQPOLL is enabled, sqes are either handle
2872 * in sq thread task context or in io worker task context. If
2873 * current task context is sq thread, we don't need to check
2874 * whether should wake up sq thread.
2875 */
2876 if ((ctx->flags & IORING_SETUP_SQPOLL) &&
2877 wq_has_sleeper(&ctx->sq_data->wait))
2878 wake_up(&ctx->sq_data->wait);
2879
2880 mutex_unlock(&ctx->uring_lock);
2881 }
Jens Axboedef596e2019-01-09 08:59:42 -07002882}
2883
Jens Axboe4503b762020-06-01 10:00:27 -06002884static bool io_bdev_nowait(struct block_device *bdev)
2885{
Jeffle Xu9ba0d0c2020-10-19 16:59:42 +08002886 return !bdev || blk_queue_nowait(bdev_get_queue(bdev));
Jens Axboe4503b762020-06-01 10:00:27 -06002887}
2888
Jens Axboe2b188cc2019-01-07 10:46:33 -07002889/*
2890 * If we tracked the file through the SCM inflight mechanism, we could support
2891 * any file. For now, just ensure that anything potentially problematic is done
2892 * inline.
2893 */
Pavel Begunkovb191e2d2021-08-09 13:04:03 +01002894static bool __io_file_supports_nowait(struct file *file, int rw)
Jens Axboe2b188cc2019-01-07 10:46:33 -07002895{
2896 umode_t mode = file_inode(file)->i_mode;
2897
Jens Axboe4503b762020-06-01 10:00:27 -06002898 if (S_ISBLK(mode)) {
Christoph Hellwig4e7b5672020-11-23 13:38:40 +01002899 if (IS_ENABLED(CONFIG_BLOCK) &&
2900 io_bdev_nowait(I_BDEV(file->f_mapping->host)))
Jens Axboe4503b762020-06-01 10:00:27 -06002901 return true;
2902 return false;
2903 }
Pavel Begunkov976517f2021-06-09 12:07:25 +01002904 if (S_ISSOCK(mode))
Jens Axboe2b188cc2019-01-07 10:46:33 -07002905 return true;
Jens Axboe4503b762020-06-01 10:00:27 -06002906 if (S_ISREG(mode)) {
Christoph Hellwig4e7b5672020-11-23 13:38:40 +01002907 if (IS_ENABLED(CONFIG_BLOCK) &&
2908 io_bdev_nowait(file->f_inode->i_sb->s_bdev) &&
Jens Axboe4503b762020-06-01 10:00:27 -06002909 file->f_op != &io_uring_fops)
2910 return true;
2911 return false;
2912 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07002913
Jens Axboec5b85622020-06-09 19:23:05 -06002914 /* any ->read/write should understand O_NONBLOCK */
2915 if (file->f_flags & O_NONBLOCK)
2916 return true;
2917
Jens Axboeaf197f52020-04-28 13:15:06 -06002918 if (!(file->f_mode & FMODE_NOWAIT))
2919 return false;
2920
2921 if (rw == READ)
2922 return file->f_op->read_iter != NULL;
2923
2924 return file->f_op->write_iter != NULL;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002925}
2926
Pavel Begunkovb191e2d2021-08-09 13:04:03 +01002927static bool io_file_supports_nowait(struct io_kiocb *req, int rw)
Jens Axboe7b29f922021-03-12 08:30:14 -07002928{
Pavel Begunkovb191e2d2021-08-09 13:04:03 +01002929 if (rw == READ && (req->flags & REQ_F_NOWAIT_READ))
Jens Axboe7b29f922021-03-12 08:30:14 -07002930 return true;
Pavel Begunkovb191e2d2021-08-09 13:04:03 +01002931 else if (rw == WRITE && (req->flags & REQ_F_NOWAIT_WRITE))
Jens Axboe7b29f922021-03-12 08:30:14 -07002932 return true;
2933
Pavel Begunkovb191e2d2021-08-09 13:04:03 +01002934 return __io_file_supports_nowait(req->file, rw);
Jens Axboe7b29f922021-03-12 08:30:14 -07002935}
2936
Jens Axboe5d329e12021-09-14 11:08:37 -06002937static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe,
2938 int rw)
Jens Axboe2b188cc2019-01-07 10:46:33 -07002939{
Jens Axboedef596e2019-01-09 08:59:42 -07002940 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe9adbd452019-12-20 08:45:55 -07002941 struct kiocb *kiocb = &req->rw.kiocb;
Pavel Begunkov75c668c2021-02-04 13:52:05 +00002942 struct file *file = req->file;
Jens Axboe09bb8392019-03-13 12:39:28 -06002943 unsigned ioprio;
2944 int ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002945
Pavel Begunkovc97d8a02021-08-09 13:04:04 +01002946 if (!io_req_ffs_set(req) && S_ISREG(file_inode(file)->i_mode))
Jens Axboe491381ce2019-10-17 09:20:46 -06002947 req->flags |= REQ_F_ISREG;
2948
Jens Axboe2b188cc2019-01-07 10:46:33 -07002949 kiocb->ki_pos = READ_ONCE(sqe->off);
Jens Axboe2b188cc2019-01-07 10:46:33 -07002950 kiocb->ki_hint = ki_hint_validate(file_write_hint(kiocb->ki_filp));
Pavel Begunkov3e577dc2020-02-01 03:58:42 +03002951 kiocb->ki_flags = iocb_flags(kiocb->ki_filp);
2952 ret = kiocb_set_rw_flags(kiocb, READ_ONCE(sqe->rw_flags));
2953 if (unlikely(ret))
2954 return ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002955
Jens Axboe5d329e12021-09-14 11:08:37 -06002956 /*
2957 * If the file is marked O_NONBLOCK, still allow retry for it if it
2958 * supports async. Otherwise it's impossible to use O_NONBLOCK files
2959 * reliably. If not, or it IOCB_NOWAIT is set, don't retry.
2960 */
2961 if ((kiocb->ki_flags & IOCB_NOWAIT) ||
2962 ((file->f_flags & O_NONBLOCK) && !io_file_supports_nowait(req, rw)))
Pavel Begunkov75c668c2021-02-04 13:52:05 +00002963 req->flags |= REQ_F_NOWAIT;
2964
Jens Axboe2b188cc2019-01-07 10:46:33 -07002965 ioprio = READ_ONCE(sqe->ioprio);
2966 if (ioprio) {
2967 ret = ioprio_check_cap(ioprio);
2968 if (ret)
Jens Axboe09bb8392019-03-13 12:39:28 -06002969 return ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002970
2971 kiocb->ki_ioprio = ioprio;
2972 } else
2973 kiocb->ki_ioprio = get_current_ioprio();
2974
Jens Axboedef596e2019-01-09 08:59:42 -07002975 if (ctx->flags & IORING_SETUP_IOPOLL) {
Jens Axboedef596e2019-01-09 08:59:42 -07002976 if (!(kiocb->ki_flags & IOCB_DIRECT) ||
2977 !kiocb->ki_filp->f_op->iopoll)
Jens Axboe09bb8392019-03-13 12:39:28 -06002978 return -EOPNOTSUPP;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002979
Jens Axboe394918e2021-03-08 11:40:23 -07002980 kiocb->ki_flags |= IOCB_HIPRI | IOCB_ALLOC_CACHE;
Jens Axboedef596e2019-01-09 08:59:42 -07002981 kiocb->ki_complete = io_complete_rw_iopoll;
Xiaoguang Wang65a65432020-06-11 23:39:36 +08002982 req->iopoll_completed = 0;
Jens Axboedef596e2019-01-09 08:59:42 -07002983 } else {
Jens Axboe09bb8392019-03-13 12:39:28 -06002984 if (kiocb->ki_flags & IOCB_HIPRI)
2985 return -EINVAL;
Jens Axboedef596e2019-01-09 08:59:42 -07002986 kiocb->ki_complete = io_complete_rw;
2987 }
Jens Axboe9adbd452019-12-20 08:45:55 -07002988
Pavel Begunkovea512d52022-06-09 08:34:35 +01002989 /* used for fixed read/write too - just read unconditionally */
2990 req->buf_index = READ_ONCE(sqe->buf_index);
2991 req->imu = NULL;
2992
Pavel Begunkoveae071c2021-04-25 14:32:24 +01002993 if (req->opcode == IORING_OP_READ_FIXED ||
2994 req->opcode == IORING_OP_WRITE_FIXED) {
Pavel Begunkovea512d52022-06-09 08:34:35 +01002995 struct io_ring_ctx *ctx = req->ctx;
2996 u16 index;
2997
2998 if (unlikely(req->buf_index >= ctx->nr_user_bufs))
2999 return -EFAULT;
3000 index = array_index_nospec(req->buf_index, ctx->nr_user_bufs);
3001 req->imu = ctx->user_bufs[index];
Pavel Begunkoveae071c2021-04-25 14:32:24 +01003002 io_req_set_rsrc_node(req);
3003 }
3004
Jens Axboe3529d8c2019-12-19 18:24:38 -07003005 req->rw.addr = READ_ONCE(sqe->addr);
3006 req->rw.len = READ_ONCE(sqe->len);
Jens Axboe2b188cc2019-01-07 10:46:33 -07003007 return 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003008}
3009
3010static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
3011{
3012 switch (ret) {
3013 case -EIOCBQUEUED:
3014 break;
3015 case -ERESTARTSYS:
3016 case -ERESTARTNOINTR:
3017 case -ERESTARTNOHAND:
3018 case -ERESTART_RESTARTBLOCK:
3019 /*
3020 * We can't just restart the syscall, since previously
3021 * submitted sqes may already be in progress. Just fail this
3022 * IO with EINTR.
3023 */
3024 ret = -EINTR;
Gustavo A. R. Silvadf561f662020-08-23 17:36:59 -05003025 fallthrough;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003026 default:
3027 kiocb->ki_complete(kiocb, ret, 0);
3028 }
3029}
3030
Dylan Yudaken05d69b32022-02-22 02:55:03 -08003031static inline loff_t *io_kiocb_update_pos(struct io_kiocb *req)
Dylan Yudakenff8a0702022-02-22 02:55:02 -08003032{
3033 struct kiocb *kiocb = &req->rw.kiocb;
3034
Jens Axboe311b2982022-04-11 09:48:30 -06003035 if (kiocb->ki_pos != -1)
3036 return &kiocb->ki_pos;
3037
3038 if (!(req->file->f_mode & FMODE_STREAM)) {
3039 req->flags |= REQ_F_CUR_POS;
3040 kiocb->ki_pos = req->file->f_pos;
3041 return &kiocb->ki_pos;
Dylan Yudakenff8a0702022-02-22 02:55:02 -08003042 }
Jens Axboe311b2982022-04-11 09:48:30 -06003043
3044 kiocb->ki_pos = 0;
3045 return NULL;
Dylan Yudakenff8a0702022-02-22 02:55:02 -08003046}
3047
Jens Axboea1d7c392020-06-22 11:09:46 -06003048static void kiocb_done(struct kiocb *kiocb, ssize_t ret,
Pavel Begunkov889fca72021-02-10 00:03:09 +00003049 unsigned int issue_flags)
Jens Axboeba816ad2019-09-28 11:36:45 -06003050{
Jens Axboeba042912019-12-25 16:33:42 -07003051 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
Jens Axboe227c0c92020-08-13 11:51:40 -06003052
Jens Axboeba042912019-12-25 16:33:42 -07003053 if (req->flags & REQ_F_CUR_POS)
3054 req->file->f_pos = kiocb->ki_pos;
Jens Axboeb10acfc2023-01-22 10:36:37 -07003055 if (ret >= 0 && (kiocb->ki_complete == io_complete_rw)) {
3056 if (!__io_complete_rw_common(req, ret)) {
3057 /*
3058 * Safe to call io_end from here as we're inline
3059 * from the submission path.
3060 */
3061 io_req_io_end(req);
3062 __io_req_complete(req, issue_flags,
3063 io_fixup_rw_res(req, ret),
3064 io_put_rw_kbuf(req));
3065 }
3066 } else {
Jens Axboeba816ad2019-09-28 11:36:45 -06003067 io_rw_done(kiocb, ret);
Jens Axboeb10acfc2023-01-22 10:36:37 -07003068 }
Pavel Begunkov97284632021-04-08 19:28:03 +01003069
Pavel Begunkovb66ceaf2021-09-15 11:00:05 +01003070 if (req->flags & REQ_F_REISSUE) {
Pavel Begunkov97284632021-04-08 19:28:03 +01003071 req->flags &= ~REQ_F_REISSUE;
Jens Axboea7be7c22021-04-15 11:31:14 -06003072 if (io_resubmit_prep(req)) {
Jens Axboe773af692021-07-27 10:25:55 -06003073 io_req_task_queue_reissue(req);
Pavel Begunkov8c130822021-03-22 01:58:32 +00003074 } else {
Pavel Begunkovb66ceaf2021-09-15 11:00:05 +01003075 unsigned int cflags = io_put_rw_kbuf(req);
3076 struct io_ring_ctx *ctx = req->ctx;
3077
Pavel Begunkove8574572022-10-16 22:42:56 +01003078 ret = io_fixup_rw_res(req, ret);
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01003079 req_set_fail(req);
Hao Xu14cfbb72021-10-14 22:04:00 +08003080 if (!(issue_flags & IO_URING_F_NONBLOCK)) {
Pavel Begunkovb66ceaf2021-09-15 11:00:05 +01003081 mutex_lock(&ctx->uring_lock);
3082 __io_req_complete(req, issue_flags, ret, cflags);
3083 mutex_unlock(&ctx->uring_lock);
3084 } else {
3085 __io_req_complete(req, issue_flags, ret, cflags);
3086 }
Pavel Begunkov97284632021-04-08 19:28:03 +01003087 }
3088 }
Jens Axboeba816ad2019-09-28 11:36:45 -06003089}
3090
Pavel Begunkoveae071c2021-04-25 14:32:24 +01003091static int __io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter,
3092 struct io_mapped_ubuf *imu)
Jens Axboeedafcce2019-01-09 09:16:05 -07003093{
Jens Axboe9adbd452019-12-20 08:45:55 -07003094 size_t len = req->rw.len;
Pavel Begunkov75769e32021-04-01 15:43:54 +01003095 u64 buf_end, buf_addr = req->rw.addr;
Jens Axboeedafcce2019-01-09 09:16:05 -07003096 size_t offset;
Jens Axboeedafcce2019-01-09 09:16:05 -07003097
Pavel Begunkov75769e32021-04-01 15:43:54 +01003098 if (unlikely(check_add_overflow(buf_addr, (u64)len, &buf_end)))
Jens Axboeedafcce2019-01-09 09:16:05 -07003099 return -EFAULT;
3100 /* not inside the mapped region */
Pavel Begunkov4751f532021-04-01 15:43:55 +01003101 if (unlikely(buf_addr < imu->ubuf || buf_end > imu->ubuf_end))
Jens Axboeedafcce2019-01-09 09:16:05 -07003102 return -EFAULT;
3103
3104 /*
3105 * May not be a start of buffer, set size appropriately
3106 * and advance us to the beginning.
3107 */
3108 offset = buf_addr - imu->ubuf;
3109 iov_iter_bvec(iter, rw, imu->bvec, imu->nr_bvecs, offset + len);
Jens Axboebd11b3a2019-07-20 08:37:31 -06003110
3111 if (offset) {
3112 /*
3113 * Don't use iov_iter_advance() here, as it's really slow for
3114 * using the latter parts of a big fixed buffer - it iterates
3115 * over each segment manually. We can cheat a bit here, because
3116 * we know that:
3117 *
3118 * 1) it's a BVEC iter, we set it up
3119 * 2) all bvecs are PAGE_SIZE in size, except potentially the
3120 * first and last bvec
3121 *
3122 * So just find our index, and adjust the iterator afterwards.
3123 * If the offset is within the first bvec (or the whole first
3124 * bvec, just use iov_iter_advance(). This makes it easier
3125 * since we can just skip the first segment, which may not
3126 * be PAGE_SIZE aligned.
3127 */
3128 const struct bio_vec *bvec = imu->bvec;
3129
3130 if (offset <= bvec->bv_len) {
3131 iov_iter_advance(iter, offset);
3132 } else {
3133 unsigned long seg_skip;
3134
3135 /* skip first vec */
3136 offset -= bvec->bv_len;
3137 seg_skip = 1 + (offset >> PAGE_SHIFT);
3138
3139 iter->bvec = bvec + seg_skip;
3140 iter->nr_segs -= seg_skip;
Aleix Roca Nonell99c79f62019-08-15 14:03:22 +02003141 iter->count -= bvec->bv_len + offset;
Jens Axboebd11b3a2019-07-20 08:37:31 -06003142 iter->iov_offset = offset & ~PAGE_MASK;
Jens Axboebd11b3a2019-07-20 08:37:31 -06003143 }
3144 }
3145
Pavel Begunkov847595d2021-02-04 13:52:06 +00003146 return 0;
Jens Axboeedafcce2019-01-09 09:16:05 -07003147}
3148
Pavel Begunkoveae071c2021-04-25 14:32:24 +01003149static int io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter)
3150{
Pavel Begunkovea512d52022-06-09 08:34:35 +01003151 if (WARN_ON_ONCE(!req->imu))
3152 return -EFAULT;
3153 return __io_import_fixed(req, rw, iter, req->imu);
Pavel Begunkoveae071c2021-04-25 14:32:24 +01003154}
3155
Jens Axboebcda7ba2020-02-23 16:42:51 -07003156static void io_ring_submit_unlock(struct io_ring_ctx *ctx, bool needs_lock)
3157{
3158 if (needs_lock)
3159 mutex_unlock(&ctx->uring_lock);
3160}
3161
3162static void io_ring_submit_lock(struct io_ring_ctx *ctx, bool needs_lock)
3163{
3164 /*
3165 * "Normal" inline submissions always hold the uring_lock, since we
3166 * grab it from the system call. Same is true for the SQPOLL offload.
3167 * The only exception is when we've detached the request and issue it
3168 * from an async worker thread, grab the lock for that case.
3169 */
3170 if (needs_lock)
3171 mutex_lock(&ctx->uring_lock);
3172}
3173
3174static struct io_buffer *io_buffer_select(struct io_kiocb *req, size_t *len,
3175 int bgid, struct io_buffer *kbuf,
3176 bool needs_lock)
3177{
3178 struct io_buffer *head;
3179
3180 if (req->flags & REQ_F_BUFFER_SELECTED)
3181 return kbuf;
3182
3183 io_ring_submit_lock(req->ctx, needs_lock);
3184
3185 lockdep_assert_held(&req->ctx->uring_lock);
3186
Jens Axboe9e15c3a2021-03-13 12:29:43 -07003187 head = xa_load(&req->ctx->io_buffers, bgid);
Jens Axboebcda7ba2020-02-23 16:42:51 -07003188 if (head) {
3189 if (!list_empty(&head->list)) {
3190 kbuf = list_last_entry(&head->list, struct io_buffer,
3191 list);
3192 list_del(&kbuf->list);
3193 } else {
3194 kbuf = head;
Jens Axboe9e15c3a2021-03-13 12:29:43 -07003195 xa_erase(&req->ctx->io_buffers, bgid);
Jens Axboebcda7ba2020-02-23 16:42:51 -07003196 }
3197 if (*len > kbuf->len)
3198 *len = kbuf->len;
3199 } else {
3200 kbuf = ERR_PTR(-ENOBUFS);
3201 }
3202
3203 io_ring_submit_unlock(req->ctx, needs_lock);
3204
3205 return kbuf;
3206}
3207
Jens Axboe4d954c22020-02-27 07:31:19 -07003208static void __user *io_rw_buffer_select(struct io_kiocb *req, size_t *len,
3209 bool needs_lock)
3210{
3211 struct io_buffer *kbuf;
Bijan Mottahedeh4f4eeba2020-05-19 14:52:49 -07003212 u16 bgid;
Jens Axboe4d954c22020-02-27 07:31:19 -07003213
3214 kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
Bijan Mottahedeh4f4eeba2020-05-19 14:52:49 -07003215 bgid = req->buf_index;
Jens Axboe4d954c22020-02-27 07:31:19 -07003216 kbuf = io_buffer_select(req, len, bgid, kbuf, needs_lock);
3217 if (IS_ERR(kbuf))
3218 return kbuf;
3219 req->rw.addr = (u64) (unsigned long) kbuf;
3220 req->flags |= REQ_F_BUFFER_SELECTED;
3221 return u64_to_user_ptr(kbuf->addr);
3222}
3223
3224#ifdef CONFIG_COMPAT
3225static ssize_t io_compat_import(struct io_kiocb *req, struct iovec *iov,
3226 bool needs_lock)
3227{
3228 struct compat_iovec __user *uiov;
3229 compat_ssize_t clen;
3230 void __user *buf;
3231 ssize_t len;
3232
3233 uiov = u64_to_user_ptr(req->rw.addr);
3234 if (!access_ok(uiov, sizeof(*uiov)))
3235 return -EFAULT;
3236 if (__get_user(clen, &uiov->iov_len))
3237 return -EFAULT;
3238 if (clen < 0)
3239 return -EINVAL;
3240
3241 len = clen;
3242 buf = io_rw_buffer_select(req, &len, needs_lock);
3243 if (IS_ERR(buf))
3244 return PTR_ERR(buf);
3245 iov[0].iov_base = buf;
3246 iov[0].iov_len = (compat_size_t) len;
3247 return 0;
3248}
3249#endif
3250
3251static ssize_t __io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
3252 bool needs_lock)
3253{
3254 struct iovec __user *uiov = u64_to_user_ptr(req->rw.addr);
3255 void __user *buf;
3256 ssize_t len;
3257
3258 if (copy_from_user(iov, uiov, sizeof(*uiov)))
3259 return -EFAULT;
3260
3261 len = iov[0].iov_len;
3262 if (len < 0)
3263 return -EINVAL;
3264 buf = io_rw_buffer_select(req, &len, needs_lock);
3265 if (IS_ERR(buf))
3266 return PTR_ERR(buf);
3267 iov[0].iov_base = buf;
3268 iov[0].iov_len = len;
3269 return 0;
3270}
3271
3272static ssize_t io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
3273 bool needs_lock)
3274{
Jens Axboedddb3e22020-06-04 11:27:01 -06003275 if (req->flags & REQ_F_BUFFER_SELECTED) {
3276 struct io_buffer *kbuf;
3277
3278 kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
3279 iov[0].iov_base = u64_to_user_ptr(kbuf->addr);
3280 iov[0].iov_len = kbuf->len;
Jens Axboe4d954c22020-02-27 07:31:19 -07003281 return 0;
Jens Axboedddb3e22020-06-04 11:27:01 -06003282 }
Pavel Begunkovdd201662020-12-19 03:15:43 +00003283 if (req->rw.len != 1)
Jens Axboe4d954c22020-02-27 07:31:19 -07003284 return -EINVAL;
3285
3286#ifdef CONFIG_COMPAT
3287 if (req->ctx->compat)
3288 return io_compat_import(req, iov, needs_lock);
3289#endif
3290
3291 return __io_iov_buffer_select(req, iov, needs_lock);
3292}
3293
Pavel Begunkov847595d2021-02-04 13:52:06 +00003294static int io_import_iovec(int rw, struct io_kiocb *req, struct iovec **iovec,
3295 struct iov_iter *iter, bool needs_lock)
Jens Axboe2b188cc2019-01-07 10:46:33 -07003296{
Jens Axboe9adbd452019-12-20 08:45:55 -07003297 void __user *buf = u64_to_user_ptr(req->rw.addr);
3298 size_t sqe_len = req->rw.len;
Pavel Begunkov847595d2021-02-04 13:52:06 +00003299 u8 opcode = req->opcode;
Jens Axboe4d954c22020-02-27 07:31:19 -07003300 ssize_t ret;
Jens Axboeedafcce2019-01-09 09:16:05 -07003301
Pavel Begunkov7d009162019-11-25 23:14:40 +03003302 if (opcode == IORING_OP_READ_FIXED || opcode == IORING_OP_WRITE_FIXED) {
Jens Axboeedafcce2019-01-09 09:16:05 -07003303 *iovec = NULL;
Jens Axboe9adbd452019-12-20 08:45:55 -07003304 return io_import_fixed(req, rw, iter);
Jens Axboeedafcce2019-01-09 09:16:05 -07003305 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07003306
Jens Axboebcda7ba2020-02-23 16:42:51 -07003307 /* buffer index only valid with fixed read/write, or buffer select */
Bijan Mottahedeh4f4eeba2020-05-19 14:52:49 -07003308 if (req->buf_index && !(req->flags & REQ_F_BUFFER_SELECT))
Jens Axboe9adbd452019-12-20 08:45:55 -07003309 return -EINVAL;
3310
Jens Axboe3a6820f2019-12-22 15:19:35 -07003311 if (opcode == IORING_OP_READ || opcode == IORING_OP_WRITE) {
Jens Axboebcda7ba2020-02-23 16:42:51 -07003312 if (req->flags & REQ_F_BUFFER_SELECT) {
Jens Axboe4d954c22020-02-27 07:31:19 -07003313 buf = io_rw_buffer_select(req, &sqe_len, needs_lock);
Pavel Begunkov867a23e2020-08-20 11:34:39 +03003314 if (IS_ERR(buf))
Jens Axboe4d954c22020-02-27 07:31:19 -07003315 return PTR_ERR(buf);
Jens Axboe3f9d6442020-03-11 12:27:04 -06003316 req->rw.len = sqe_len;
Jens Axboebcda7ba2020-02-23 16:42:51 -07003317 }
3318
Jens Axboe3a6820f2019-12-22 15:19:35 -07003319 ret = import_single_range(rw, buf, sqe_len, *iovec, iter);
3320 *iovec = NULL;
David Laight10fc72e2020-11-07 13:16:25 +00003321 return ret;
Jens Axboe3a6820f2019-12-22 15:19:35 -07003322 }
3323
Jens Axboe4d954c22020-02-27 07:31:19 -07003324 if (req->flags & REQ_F_BUFFER_SELECT) {
3325 ret = io_iov_buffer_select(req, *iovec, needs_lock);
Pavel Begunkov847595d2021-02-04 13:52:06 +00003326 if (!ret)
3327 iov_iter_init(iter, rw, *iovec, 1, (*iovec)->iov_len);
Jens Axboe4d954c22020-02-27 07:31:19 -07003328 *iovec = NULL;
3329 return ret;
3330 }
3331
Christoph Hellwig89cd35c2020-09-25 06:51:41 +02003332 return __import_iovec(rw, buf, sqe_len, UIO_FASTIOV, iovec, iter,
3333 req->ctx->compat);
Jens Axboe2b188cc2019-01-07 10:46:33 -07003334}
3335
Jens Axboe0fef9482020-08-26 10:36:20 -06003336static inline loff_t *io_kiocb_ppos(struct kiocb *kiocb)
3337{
Pavel Begunkov5b09e372020-09-30 22:57:15 +03003338 return (kiocb->ki_filp->f_mode & FMODE_STREAM) ? NULL : &kiocb->ki_pos;
Jens Axboe0fef9482020-08-26 10:36:20 -06003339}
3340
Jens Axboe32960612019-09-23 11:05:34 -06003341/*
3342 * For files that don't have ->read_iter() and ->write_iter(), handle them
3343 * by looping over ->read() or ->write() manually.
3344 */
Jens Axboe4017eb92020-10-22 14:14:12 -06003345static ssize_t loop_rw_iter(int rw, struct io_kiocb *req, struct iov_iter *iter)
Jens Axboe32960612019-09-23 11:05:34 -06003346{
Jens Axboe4017eb92020-10-22 14:14:12 -06003347 struct kiocb *kiocb = &req->rw.kiocb;
3348 struct file *file = req->file;
Jens Axboe32960612019-09-23 11:05:34 -06003349 ssize_t ret = 0;
Dylan Yudakenb7958ca2022-02-22 02:55:01 -08003350 loff_t *ppos;
Jens Axboe32960612019-09-23 11:05:34 -06003351
3352 /*
3353 * Don't support polled IO through this interface, and we can't
3354 * support non-blocking either. For the latter, this just causes
3355 * the kiocb to be handled from an async context.
3356 */
3357 if (kiocb->ki_flags & IOCB_HIPRI)
3358 return -EOPNOTSUPP;
3359 if (kiocb->ki_flags & IOCB_NOWAIT)
3360 return -EAGAIN;
3361
Dylan Yudakenb7958ca2022-02-22 02:55:01 -08003362 ppos = io_kiocb_ppos(kiocb);
3363
Jens Axboe32960612019-09-23 11:05:34 -06003364 while (iov_iter_count(iter)) {
Pavel Begunkov311ae9e2019-11-24 11:58:24 +03003365 struct iovec iovec;
Jens Axboe32960612019-09-23 11:05:34 -06003366 ssize_t nr;
3367
Pavel Begunkov311ae9e2019-11-24 11:58:24 +03003368 if (!iov_iter_is_bvec(iter)) {
3369 iovec = iov_iter_iovec(iter);
3370 } else {
Jens Axboe4017eb92020-10-22 14:14:12 -06003371 iovec.iov_base = u64_to_user_ptr(req->rw.addr);
3372 iovec.iov_len = req->rw.len;
Pavel Begunkov311ae9e2019-11-24 11:58:24 +03003373 }
3374
Jens Axboe32960612019-09-23 11:05:34 -06003375 if (rw == READ) {
3376 nr = file->f_op->read(file, iovec.iov_base,
Dylan Yudakenb7958ca2022-02-22 02:55:01 -08003377 iovec.iov_len, ppos);
Jens Axboe32960612019-09-23 11:05:34 -06003378 } else {
3379 nr = file->f_op->write(file, iovec.iov_base,
Dylan Yudakenb7958ca2022-02-22 02:55:01 -08003380 iovec.iov_len, ppos);
Jens Axboe32960612019-09-23 11:05:34 -06003381 }
3382
3383 if (nr < 0) {
3384 if (!ret)
3385 ret = nr;
3386 break;
3387 }
Jens Axboe109dda42022-03-18 11:28:13 -06003388 ret += nr;
Jens Axboe16c8d2d2021-09-12 06:45:07 -06003389 if (!iov_iter_is_bvec(iter)) {
3390 iov_iter_advance(iter, nr);
3391 } else {
Jens Axboe16c8d2d2021-09-12 06:45:07 -06003392 req->rw.addr += nr;
Jens Axboe109dda42022-03-18 11:28:13 -06003393 req->rw.len -= nr;
3394 if (!req->rw.len)
3395 break;
Jens Axboe16c8d2d2021-09-12 06:45:07 -06003396 }
Jens Axboe32960612019-09-23 11:05:34 -06003397 if (nr != iovec.iov_len)
3398 break;
Jens Axboe32960612019-09-23 11:05:34 -06003399 }
3400
3401 return ret;
3402}
3403
Jens Axboeff6165b2020-08-13 09:47:43 -06003404static void io_req_map_rw(struct io_kiocb *req, const struct iovec *iovec,
3405 const struct iovec *fast_iov, struct iov_iter *iter)
Jens Axboef67676d2019-12-02 11:03:47 -07003406{
Jens Axboee8c2bc12020-08-15 18:44:09 -07003407 struct io_async_rw *rw = req->async_data;
Pavel Begunkovb64e3442020-07-13 22:59:18 +03003408
Jens Axboeff6165b2020-08-13 09:47:43 -06003409 memcpy(&rw->iter, iter, sizeof(*iter));
Pavel Begunkovafb87652020-09-06 00:45:46 +03003410 rw->free_iovec = iovec;
Jens Axboe227c0c92020-08-13 11:51:40 -06003411 rw->bytes_done = 0;
Jens Axboeff6165b2020-08-13 09:47:43 -06003412 /* can only be fixed buffers, no need to do anything */
Pavel Begunkov9c3a2052020-11-23 23:20:27 +00003413 if (iov_iter_is_bvec(iter))
Jens Axboeff6165b2020-08-13 09:47:43 -06003414 return;
Pavel Begunkovb64e3442020-07-13 22:59:18 +03003415 if (!iovec) {
Jens Axboeff6165b2020-08-13 09:47:43 -06003416 unsigned iov_off = 0;
3417
3418 rw->iter.iov = rw->fast_iov;
3419 if (iter->iov != fast_iov) {
3420 iov_off = iter->iov - fast_iov;
3421 rw->iter.iov += iov_off;
3422 }
3423 if (rw->fast_iov != fast_iov)
3424 memcpy(rw->fast_iov + iov_off, fast_iov + iov_off,
Xiaoguang Wang45097da2020-04-08 22:29:58 +08003425 sizeof(struct iovec) * iter->nr_segs);
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03003426 } else {
3427 req->flags |= REQ_F_NEED_CLEANUP;
Jens Axboef67676d2019-12-02 11:03:47 -07003428 }
3429}
3430
Pavel Begunkov6cb78682021-02-28 22:35:17 +00003431static inline int io_alloc_async_data(struct io_kiocb *req)
Xiaoguang Wang3d9932a2020-03-27 15:36:52 +08003432{
Jens Axboee8c2bc12020-08-15 18:44:09 -07003433 WARN_ON_ONCE(!io_op_defs[req->opcode].async_size);
3434 req->async_data = kmalloc(io_op_defs[req->opcode].async_size, GFP_KERNEL);
3435 return req->async_data == NULL;
Xiaoguang Wang3d9932a2020-03-27 15:36:52 +08003436}
3437
Jens Axboeff6165b2020-08-13 09:47:43 -06003438static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec,
3439 const struct iovec *fast_iov,
Jens Axboe227c0c92020-08-13 11:51:40 -06003440 struct iov_iter *iter, bool force)
Jens Axboeb7bb4f72019-12-15 22:13:43 -07003441{
Pavel Begunkov26f05052021-02-28 22:35:18 +00003442 if (!force && !io_op_defs[req->opcode].needs_async_setup)
Jens Axboe74566df2020-01-13 19:23:24 -07003443 return 0;
Jens Axboee8c2bc12020-08-15 18:44:09 -07003444 if (!req->async_data) {
Jens Axboecd658692021-09-10 11:19:14 -06003445 struct io_async_rw *iorw;
3446
Pavel Begunkov6cb78682021-02-28 22:35:17 +00003447 if (io_alloc_async_data(req)) {
Pavel Begunkov6bf985d2021-02-04 13:52:01 +00003448 kfree(iovec);
Jens Axboe5d204bc2020-01-31 12:06:52 -07003449 return -ENOMEM;
Pavel Begunkov6bf985d2021-02-04 13:52:01 +00003450 }
Jens Axboeb7bb4f72019-12-15 22:13:43 -07003451
Jens Axboeff6165b2020-08-13 09:47:43 -06003452 io_req_map_rw(req, iovec, fast_iov, iter);
Jens Axboecd658692021-09-10 11:19:14 -06003453 iorw = req->async_data;
3454 /* we've copied and mapped the iter, ensure state is saved */
3455 iov_iter_save_state(&iorw->iter, &iorw->iter_state);
Jens Axboe5d204bc2020-01-31 12:06:52 -07003456 }
Jens Axboeb7bb4f72019-12-15 22:13:43 -07003457 return 0;
Jens Axboef67676d2019-12-02 11:03:47 -07003458}
3459
Pavel Begunkov73debe62020-09-30 22:57:54 +03003460static inline int io_rw_prep_async(struct io_kiocb *req, int rw)
Pavel Begunkovc3e330a2020-07-13 22:59:19 +03003461{
Jens Axboee8c2bc12020-08-15 18:44:09 -07003462 struct io_async_rw *iorw = req->async_data;
Pavel Begunkovf4bff102020-09-06 00:45:45 +03003463 struct iovec *iov = iorw->fast_iov;
Pavel Begunkov847595d2021-02-04 13:52:06 +00003464 int ret;
Pavel Begunkovc3e330a2020-07-13 22:59:19 +03003465
Pavel Begunkov2846c482020-11-07 13:16:27 +00003466 ret = io_import_iovec(rw, req, &iov, &iorw->iter, false);
Pavel Begunkovc3e330a2020-07-13 22:59:19 +03003467 if (unlikely(ret < 0))
3468 return ret;
3469
Pavel Begunkovab0b1962020-09-06 00:45:47 +03003470 iorw->bytes_done = 0;
3471 iorw->free_iovec = iov;
3472 if (iov)
3473 req->flags |= REQ_F_NEED_CLEANUP;
Jens Axboecd658692021-09-10 11:19:14 -06003474 iov_iter_save_state(&iorw->iter, &iorw->iter_state);
Pavel Begunkovc3e330a2020-07-13 22:59:19 +03003475 return 0;
3476}
3477
Pavel Begunkov73debe62020-09-30 22:57:54 +03003478static int io_read_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboef67676d2019-12-02 11:03:47 -07003479{
Jens Axboe3529d8c2019-12-19 18:24:38 -07003480 if (unlikely(!(req->file->f_mode & FMODE_READ)))
3481 return -EBADF;
Jens Axboe5d329e12021-09-14 11:08:37 -06003482 return io_prep_rw(req, sqe, READ);
Jens Axboef67676d2019-12-02 11:03:47 -07003483}
3484
Jens Axboec1dd91d2020-08-03 16:43:59 -06003485/*
3486 * This is our waitqueue callback handler, registered through lock_page_async()
3487 * when we initially tried to do the IO with the iocb armed our waitqueue.
3488 * This gets called when the page is unlocked, and we generally expect that to
3489 * happen when the page IO is completed and the page is now uptodate. This will
3490 * queue a task_work based retry of the operation, attempting to copy the data
3491 * again. If the latter fails because the page was NOT uptodate, then we will
3492 * do a thread based blocking retry of the operation. That's the unexpected
3493 * slow path.
3494 */
Jens Axboebcf5a062020-05-22 09:24:42 -06003495static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode,
3496 int sync, void *arg)
3497{
3498 struct wait_page_queue *wpq;
3499 struct io_kiocb *req = wait->private;
Jens Axboebcf5a062020-05-22 09:24:42 -06003500 struct wait_page_key *key = arg;
Jens Axboebcf5a062020-05-22 09:24:42 -06003501
3502 wpq = container_of(wait, struct wait_page_queue, wait);
3503
Linus Torvaldscdc8fcb2020-08-03 13:01:22 -07003504 if (!wake_page_match(wpq, key))
3505 return 0;
3506
Hao Xuc8d317a2020-09-29 20:00:45 +08003507 req->rw.kiocb.ki_flags &= ~IOCB_WAITQ;
Jens Axboebcf5a062020-05-22 09:24:42 -06003508 list_del_init(&wait->entry);
Pavel Begunkov921b9052021-02-12 03:23:53 +00003509 io_req_task_queue(req);
Jens Axboebcf5a062020-05-22 09:24:42 -06003510 return 1;
3511}
3512
Jens Axboec1dd91d2020-08-03 16:43:59 -06003513/*
3514 * This controls whether a given IO request should be armed for async page
3515 * based retry. If we return false here, the request is handed to the async
3516 * worker threads for retry. If we're doing buffered reads on a regular file,
3517 * we prepare a private wait_page_queue entry and retry the operation. This
3518 * will either succeed because the page is now uptodate and unlocked, or it
3519 * will register a callback when the page is unlocked at IO completion. Through
3520 * that callback, io_uring uses task_work to setup a retry of the operation.
3521 * That retry will attempt the buffered read again. The retry will generally
3522 * succeed, or in rare cases where it fails, we then fall back to using the
3523 * async worker threads for a blocking retry.
3524 */
Jens Axboe227c0c92020-08-13 11:51:40 -06003525static bool io_rw_should_retry(struct io_kiocb *req)
Jens Axboebcf5a062020-05-22 09:24:42 -06003526{
Jens Axboee8c2bc12020-08-15 18:44:09 -07003527 struct io_async_rw *rw = req->async_data;
3528 struct wait_page_queue *wait = &rw->wpq;
Jens Axboebcf5a062020-05-22 09:24:42 -06003529 struct kiocb *kiocb = &req->rw.kiocb;
Jens Axboebcf5a062020-05-22 09:24:42 -06003530
3531 /* never retry for NOWAIT, we just complete with -EAGAIN */
3532 if (req->flags & REQ_F_NOWAIT)
3533 return false;
3534
Jens Axboe227c0c92020-08-13 11:51:40 -06003535 /* Only for buffered IO */
Jens Axboe3b2a4432020-08-16 10:58:43 -07003536 if (kiocb->ki_flags & (IOCB_DIRECT | IOCB_HIPRI))
Jens Axboebcf5a062020-05-22 09:24:42 -06003537 return false;
Jens Axboe3b2a4432020-08-16 10:58:43 -07003538
Jens Axboebcf5a062020-05-22 09:24:42 -06003539 /*
3540 * just use poll if we can, and don't attempt if the fs doesn't
3541 * support callback based unlocks
3542 */
3543 if (file_can_poll(req->file) || !(req->file->f_mode & FMODE_BUF_RASYNC))
3544 return false;
3545
Jens Axboe3b2a4432020-08-16 10:58:43 -07003546 wait->wait.func = io_async_buf_func;
3547 wait->wait.private = req;
3548 wait->wait.flags = 0;
3549 INIT_LIST_HEAD(&wait->wait.entry);
3550 kiocb->ki_flags |= IOCB_WAITQ;
Hao Xuc8d317a2020-09-29 20:00:45 +08003551 kiocb->ki_flags &= ~IOCB_NOWAIT;
Jens Axboe3b2a4432020-08-16 10:58:43 -07003552 kiocb->ki_waitq = wait;
Jens Axboe3b2a4432020-08-16 10:58:43 -07003553 return true;
Jens Axboebcf5a062020-05-22 09:24:42 -06003554}
3555
Pavel Begunkovaeab9502021-06-14 02:36:24 +01003556static inline int io_iter_do_read(struct io_kiocb *req, struct iov_iter *iter)
Jens Axboebcf5a062020-05-22 09:24:42 -06003557{
3558 if (req->file->f_op->read_iter)
3559 return call_read_iter(req->file, &req->rw.kiocb, iter);
Guoyu Huang2dd21112020-08-05 03:53:50 -07003560 else if (req->file->f_op->read)
Jens Axboe4017eb92020-10-22 14:14:12 -06003561 return loop_rw_iter(READ, req, iter);
Guoyu Huang2dd21112020-08-05 03:53:50 -07003562 else
3563 return -EINVAL;
Jens Axboebcf5a062020-05-22 09:24:42 -06003564}
3565
Ming Lei7db30432021-08-21 23:07:51 +08003566static bool need_read_all(struct io_kiocb *req)
3567{
3568 return req->flags & REQ_F_ISREG ||
3569 S_ISBLK(file_inode(req->file)->i_mode);
3570}
3571
Pavel Begunkov889fca72021-02-10 00:03:09 +00003572static int io_read(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe2b188cc2019-01-07 10:46:33 -07003573{
3574 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
Jens Axboe9adbd452019-12-20 08:45:55 -07003575 struct kiocb *kiocb = &req->rw.kiocb;
Jens Axboeff6165b2020-08-13 09:47:43 -06003576 struct iov_iter __iter, *iter = &__iter;
Jens Axboee8c2bc12020-08-15 18:44:09 -07003577 struct io_async_rw *rw = req->async_data;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003578 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Jens Axboecd658692021-09-10 11:19:14 -06003579 struct iov_iter_state __state, *state;
3580 ssize_t ret, ret2;
Dylan Yudaken05d69b32022-02-22 02:55:03 -08003581 loff_t *ppos;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003582
Pavel Begunkov2846c482020-11-07 13:16:27 +00003583 if (rw) {
Jens Axboee8c2bc12020-08-15 18:44:09 -07003584 iter = &rw->iter;
Jens Axboecd658692021-09-10 11:19:14 -06003585 state = &rw->iter_state;
3586 /*
3587 * We come here from an earlier attempt, restore our state to
3588 * match in case it doesn't. It's cheap enough that we don't
3589 * need to make this conditional.
3590 */
3591 iov_iter_restore(iter, state);
Pavel Begunkov2846c482020-11-07 13:16:27 +00003592 iovec = NULL;
3593 } else {
3594 ret = io_import_iovec(READ, req, &iovec, iter, !force_nonblock);
3595 if (ret < 0)
3596 return ret;
Jens Axboecd658692021-09-10 11:19:14 -06003597 state = &__state;
3598 iov_iter_save_state(iter, state);
Pavel Begunkov2846c482020-11-07 13:16:27 +00003599 }
Jens Axboecd658692021-09-10 11:19:14 -06003600 req->result = iov_iter_count(iter);
Jens Axboe2b188cc2019-01-07 10:46:33 -07003601
Jens Axboefd6c2e42019-12-18 12:19:41 -07003602 /* Ensure we clear previously set non-block flag */
3603 if (!force_nonblock)
Jens Axboe29de5f62020-02-20 09:56:08 -07003604 kiocb->ki_flags &= ~IOCB_NOWAIT;
Pavel Begunkova88fc402020-09-30 22:57:53 +03003605 else
3606 kiocb->ki_flags |= IOCB_NOWAIT;
3607
Pavel Begunkov24c74672020-06-21 13:09:51 +03003608 /* If the file doesn't support async, just async punt */
Pavel Begunkovb191e2d2021-08-09 13:04:03 +01003609 if (force_nonblock && !io_file_supports_nowait(req, READ)) {
Pavel Begunkov6713e7a2021-02-04 13:51:59 +00003610 ret = io_setup_async_rw(req, iovec, inline_vecs, iter, true);
Pavel Begunkov6bf985d2021-02-04 13:52:01 +00003611 return ret ?: -EAGAIN;
Pavel Begunkov6713e7a2021-02-04 13:51:59 +00003612 }
Jens Axboe9e645e112019-05-10 16:07:28 -06003613
Dylan Yudaken05d69b32022-02-22 02:55:03 -08003614 ppos = io_kiocb_update_pos(req);
Dylan Yudakenff8a0702022-02-22 02:55:02 -08003615
Dylan Yudaken05d69b32022-02-22 02:55:03 -08003616 ret = rw_verify_area(READ, req->file, ppos, req->result);
Pavel Begunkov5ea5dd42021-02-04 13:52:03 +00003617 if (unlikely(ret)) {
3618 kfree(iovec);
3619 return ret;
3620 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07003621
Jens Axboe227c0c92020-08-13 11:51:40 -06003622 ret = io_iter_do_read(req, iter);
Jens Axboe32960612019-09-23 11:05:34 -06003623
Jens Axboe230d50d2021-04-01 20:41:15 -06003624 if (ret == -EAGAIN || (req->flags & REQ_F_REISSUE)) {
Pavel Begunkov6ad7f232021-04-08 01:54:39 +01003625 req->flags &= ~REQ_F_REISSUE;
Jens Axboeeefdf302020-08-27 16:40:19 -06003626 /* IOPOLL retry should happen for io-wq threads */
3627 if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboef91daf52020-08-15 15:58:42 -07003628 goto done;
Pavel Begunkov75c668c2021-02-04 13:52:05 +00003629 /* no retry on NONBLOCK nor RWF_NOWAIT */
3630 if (req->flags & REQ_F_NOWAIT)
Jens Axboe355afae2020-09-02 09:30:31 -06003631 goto done;
Jens Axboef38c7e32020-09-25 15:23:43 -06003632 ret = 0;
Jens Axboe230d50d2021-04-01 20:41:15 -06003633 } else if (ret == -EIOCBQUEUED) {
3634 goto out_free;
Jens Axboecd658692021-09-10 11:19:14 -06003635 } else if (ret <= 0 || ret == req->result || !force_nonblock ||
Ming Lei7db30432021-08-21 23:07:51 +08003636 (req->flags & REQ_F_NOWAIT) || !need_read_all(req)) {
Pavel Begunkov7335e3b2021-02-04 13:52:02 +00003637 /* read all, failed, already did sync or don't want to retry */
Jens Axboe00d23d52020-08-25 12:59:22 -06003638 goto done;
Jens Axboe227c0c92020-08-13 11:51:40 -06003639 }
3640
Jens Axboecd658692021-09-10 11:19:14 -06003641 /*
3642 * Don't depend on the iter state matching what was consumed, or being
3643 * untouched in case of error. Restore it and we'll advance it
3644 * manually if we need to.
3645 */
3646 iov_iter_restore(iter, state);
3647
Jens Axboe227c0c92020-08-13 11:51:40 -06003648 ret2 = io_setup_async_rw(req, iovec, inline_vecs, iter, true);
Pavel Begunkov6bf985d2021-02-04 13:52:01 +00003649 if (ret2)
3650 return ret2;
3651
Pavel Begunkovfe1cdd52021-02-17 21:02:36 +00003652 iovec = NULL;
Jens Axboee8c2bc12020-08-15 18:44:09 -07003653 rw = req->async_data;
Jens Axboecd658692021-09-10 11:19:14 -06003654 /*
3655 * Now use our persistent iterator and state, if we aren't already.
3656 * We've restored and mapped the iter to match.
3657 */
3658 if (iter != &rw->iter) {
3659 iter = &rw->iter;
3660 state = &rw->iter_state;
3661 }
Jens Axboe227c0c92020-08-13 11:51:40 -06003662
Pavel Begunkovb23df912021-02-04 13:52:04 +00003663 do {
Jens Axboecd658692021-09-10 11:19:14 -06003664 /*
3665 * We end up here because of a partial read, either from
3666 * above or inside this loop. Advance the iter by the bytes
3667 * that were consumed.
3668 */
3669 iov_iter_advance(iter, ret);
3670 if (!iov_iter_count(iter))
3671 break;
Pavel Begunkovb23df912021-02-04 13:52:04 +00003672 rw->bytes_done += ret;
Jens Axboecd658692021-09-10 11:19:14 -06003673 iov_iter_save_state(iter, state);
3674
Pavel Begunkovb23df912021-02-04 13:52:04 +00003675 /* if we can retry, do so with the callbacks armed */
3676 if (!io_rw_should_retry(req)) {
3677 kiocb->ki_flags &= ~IOCB_WAITQ;
3678 return -EAGAIN;
3679 }
3680
Pavel Begunkov98aada62022-10-16 22:42:58 +01003681 req->result = iov_iter_count(iter);
Pavel Begunkovb23df912021-02-04 13:52:04 +00003682 /*
3683 * Now retry read with the IOCB_WAITQ parts set in the iocb. If
3684 * we get -EIOCBQUEUED, then we'll get a notification when the
3685 * desired page gets unlocked. We can also get a partial read
3686 * here, and if we do, then just retry at the new offset.
3687 */
3688 ret = io_iter_do_read(req, iter);
3689 if (ret == -EIOCBQUEUED)
3690 return 0;
Jens Axboe227c0c92020-08-13 11:51:40 -06003691 /* we got some bytes, but not all. retry. */
Jens Axboeb5b0ecb2021-03-04 21:02:58 -07003692 kiocb->ki_flags &= ~IOCB_WAITQ;
Jens Axboecd658692021-09-10 11:19:14 -06003693 iov_iter_restore(iter, state);
3694 } while (ret > 0);
Jens Axboe227c0c92020-08-13 11:51:40 -06003695done:
Pavel Begunkov889fca72021-02-10 00:03:09 +00003696 kiocb_done(kiocb, ret, issue_flags);
Pavel Begunkovfe1cdd52021-02-17 21:02:36 +00003697out_free:
3698 /* it's faster to check here then delegate to kfree */
3699 if (iovec)
3700 kfree(iovec);
Pavel Begunkov5ea5dd42021-02-04 13:52:03 +00003701 return 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003702}
3703
Pavel Begunkov73debe62020-09-30 22:57:54 +03003704static int io_write_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboef67676d2019-12-02 11:03:47 -07003705{
Jens Axboe3529d8c2019-12-19 18:24:38 -07003706 if (unlikely(!(req->file->f_mode & FMODE_WRITE)))
3707 return -EBADF;
Jens Axboe5d329e12021-09-14 11:08:37 -06003708 return io_prep_rw(req, sqe, WRITE);
Jens Axboef67676d2019-12-02 11:03:47 -07003709}
3710
Pavel Begunkov889fca72021-02-10 00:03:09 +00003711static int io_write(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe2b188cc2019-01-07 10:46:33 -07003712{
3713 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
Jens Axboe9adbd452019-12-20 08:45:55 -07003714 struct kiocb *kiocb = &req->rw.kiocb;
Jens Axboeff6165b2020-08-13 09:47:43 -06003715 struct iov_iter __iter, *iter = &__iter;
Jens Axboee8c2bc12020-08-15 18:44:09 -07003716 struct io_async_rw *rw = req->async_data;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003717 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Jens Axboecd658692021-09-10 11:19:14 -06003718 struct iov_iter_state __state, *state;
3719 ssize_t ret, ret2;
Dylan Yudaken05d69b32022-02-22 02:55:03 -08003720 loff_t *ppos;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003721
Pavel Begunkov2846c482020-11-07 13:16:27 +00003722 if (rw) {
Jens Axboee8c2bc12020-08-15 18:44:09 -07003723 iter = &rw->iter;
Jens Axboecd658692021-09-10 11:19:14 -06003724 state = &rw->iter_state;
3725 iov_iter_restore(iter, state);
Pavel Begunkov2846c482020-11-07 13:16:27 +00003726 iovec = NULL;
3727 } else {
3728 ret = io_import_iovec(WRITE, req, &iovec, iter, !force_nonblock);
3729 if (ret < 0)
3730 return ret;
Jens Axboecd658692021-09-10 11:19:14 -06003731 state = &__state;
3732 iov_iter_save_state(iter, state);
Pavel Begunkov2846c482020-11-07 13:16:27 +00003733 }
Jens Axboecd658692021-09-10 11:19:14 -06003734 req->result = iov_iter_count(iter);
Jens Axboe2b188cc2019-01-07 10:46:33 -07003735
Jens Axboefd6c2e42019-12-18 12:19:41 -07003736 /* Ensure we clear previously set non-block flag */
3737 if (!force_nonblock)
Pavel Begunkova88fc402020-09-30 22:57:53 +03003738 kiocb->ki_flags &= ~IOCB_NOWAIT;
3739 else
3740 kiocb->ki_flags |= IOCB_NOWAIT;
Jens Axboefd6c2e42019-12-18 12:19:41 -07003741
Pavel Begunkov24c74672020-06-21 13:09:51 +03003742 /* If the file doesn't support async, just async punt */
Pavel Begunkovb191e2d2021-08-09 13:04:03 +01003743 if (force_nonblock && !io_file_supports_nowait(req, WRITE))
Jens Axboef67676d2019-12-02 11:03:47 -07003744 goto copy_iov;
Jens Axboef67676d2019-12-02 11:03:47 -07003745
Jens Axboe10d59342019-12-09 20:16:22 -07003746 /* file path doesn't support NOWAIT for non-direct_IO */
3747 if (force_nonblock && !(kiocb->ki_flags & IOCB_DIRECT) &&
3748 (req->flags & REQ_F_ISREG))
Jens Axboef67676d2019-12-02 11:03:47 -07003749 goto copy_iov;
Jens Axboe9e645e112019-05-10 16:07:28 -06003750
Dylan Yudaken05d69b32022-02-22 02:55:03 -08003751 ppos = io_kiocb_update_pos(req);
Dylan Yudakenff8a0702022-02-22 02:55:02 -08003752
Dylan Yudaken05d69b32022-02-22 02:55:03 -08003753 ret = rw_verify_area(WRITE, req->file, ppos, req->result);
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003754 if (unlikely(ret))
3755 goto out_free;
Roman Penyaev9bf79332019-03-25 20:09:24 +01003756
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003757 /*
3758 * Open-code file_start_write here to grab freeze protection,
3759 * which will be released by another thread in
3760 * io_complete_rw(). Fool lockdep by telling it the lock got
3761 * released so that it doesn't complain about the held lock when
3762 * we return to userspace.
3763 */
3764 if (req->flags & REQ_F_ISREG) {
Darrick J. Wong8a3c84b2020-11-10 16:50:21 -08003765 sb_start_write(file_inode(req->file)->i_sb);
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003766 __sb_writers_release(file_inode(req->file)->i_sb,
3767 SB_FREEZE_WRITE);
3768 }
3769 kiocb->ki_flags |= IOCB_WRITE;
Roman Penyaev9bf79332019-03-25 20:09:24 +01003770
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003771 if (req->file->f_op->write_iter)
Jens Axboeff6165b2020-08-13 09:47:43 -06003772 ret2 = call_write_iter(req->file, kiocb, iter);
Guoyu Huang2dd21112020-08-05 03:53:50 -07003773 else if (req->file->f_op->write)
Jens Axboe4017eb92020-10-22 14:14:12 -06003774 ret2 = loop_rw_iter(WRITE, req, iter);
Guoyu Huang2dd21112020-08-05 03:53:50 -07003775 else
3776 ret2 = -EINVAL;
Jens Axboe4ed734b2020-03-20 11:23:41 -06003777
Pavel Begunkov6ad7f232021-04-08 01:54:39 +01003778 if (req->flags & REQ_F_REISSUE) {
3779 req->flags &= ~REQ_F_REISSUE;
Jens Axboe230d50d2021-04-01 20:41:15 -06003780 ret2 = -EAGAIN;
Pavel Begunkov6ad7f232021-04-08 01:54:39 +01003781 }
Jens Axboe230d50d2021-04-01 20:41:15 -06003782
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003783 /*
3784 * Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just
3785 * retry them without IOCB_NOWAIT.
3786 */
3787 if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT))
3788 ret2 = -EAGAIN;
Pavel Begunkov75c668c2021-02-04 13:52:05 +00003789 /* no retry on NONBLOCK nor RWF_NOWAIT */
3790 if (ret2 == -EAGAIN && (req->flags & REQ_F_NOWAIT))
Jens Axboe355afae2020-09-02 09:30:31 -06003791 goto done;
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003792 if (!force_nonblock || ret2 != -EAGAIN) {
Jens Axboeeefdf302020-08-27 16:40:19 -06003793 /* IOPOLL retry should happen for io-wq threads */
3794 if ((req->ctx->flags & IORING_SETUP_IOPOLL) && ret2 == -EAGAIN)
3795 goto copy_iov;
Jens Axboe355afae2020-09-02 09:30:31 -06003796done:
Pavel Begunkov889fca72021-02-10 00:03:09 +00003797 kiocb_done(kiocb, ret2, issue_flags);
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003798 } else {
Jens Axboef67676d2019-12-02 11:03:47 -07003799copy_iov:
Jens Axboecd658692021-09-10 11:19:14 -06003800 iov_iter_restore(iter, state);
Jens Axboe227c0c92020-08-13 11:51:40 -06003801 ret = io_setup_async_rw(req, iovec, inline_vecs, iter, false);
Jens Axboe295219a2022-08-25 10:19:08 -06003802 if (!ret) {
3803 if (kiocb->ki_flags & IOCB_WRITE)
3804 kiocb_end_write(req);
3805 return -EAGAIN;
3806 }
3807 return ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003808 }
Jens Axboe31b51512019-01-18 22:56:34 -07003809out_free:
Pavel Begunkovf261c162020-08-20 11:34:10 +03003810 /* it's reportedly faster than delegating the null check to kfree() */
Pavel Begunkov252917c2020-07-13 22:59:20 +03003811 if (iovec)
Xiaoguang Wang6f2cc162020-06-18 15:01:56 +08003812 kfree(iovec);
Jens Axboe2b188cc2019-01-07 10:46:33 -07003813 return ret;
3814}
3815
Jens Axboe80a261f2020-09-28 14:23:58 -06003816static int io_renameat_prep(struct io_kiocb *req,
3817 const struct io_uring_sqe *sqe)
3818{
3819 struct io_rename *ren = &req->rename;
3820 const char __user *oldf, *newf;
3821
Jens Axboeed7eb252021-06-23 09:04:13 -06003822 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3823 return -EINVAL;
Pavel Begunkov26578cd2021-08-20 10:36:37 +01003824 if (sqe->ioprio || sqe->buf_index || sqe->splice_fd_in)
Jens Axboeed7eb252021-06-23 09:04:13 -06003825 return -EINVAL;
Jens Axboe80a261f2020-09-28 14:23:58 -06003826 if (unlikely(req->flags & REQ_F_FIXED_FILE))
3827 return -EBADF;
3828
3829 ren->old_dfd = READ_ONCE(sqe->fd);
3830 oldf = u64_to_user_ptr(READ_ONCE(sqe->addr));
3831 newf = u64_to_user_ptr(READ_ONCE(sqe->addr2));
3832 ren->new_dfd = READ_ONCE(sqe->len);
3833 ren->flags = READ_ONCE(sqe->rename_flags);
3834
3835 ren->oldpath = getname(oldf);
3836 if (IS_ERR(ren->oldpath))
3837 return PTR_ERR(ren->oldpath);
3838
3839 ren->newpath = getname(newf);
3840 if (IS_ERR(ren->newpath)) {
3841 putname(ren->oldpath);
3842 return PTR_ERR(ren->newpath);
3843 }
3844
3845 req->flags |= REQ_F_NEED_CLEANUP;
3846 return 0;
3847}
3848
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003849static int io_renameat(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe80a261f2020-09-28 14:23:58 -06003850{
3851 struct io_rename *ren = &req->rename;
3852 int ret;
3853
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003854 if (issue_flags & IO_URING_F_NONBLOCK)
Jens Axboe80a261f2020-09-28 14:23:58 -06003855 return -EAGAIN;
3856
3857 ret = do_renameat2(ren->old_dfd, ren->oldpath, ren->new_dfd,
3858 ren->newpath, ren->flags);
3859
3860 req->flags &= ~REQ_F_NEED_CLEANUP;
3861 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01003862 req_set_fail(req);
Jens Axboe80a261f2020-09-28 14:23:58 -06003863 io_req_complete(req, ret);
3864 return 0;
3865}
3866
Jens Axboe14a11432020-09-28 14:27:37 -06003867static int io_unlinkat_prep(struct io_kiocb *req,
3868 const struct io_uring_sqe *sqe)
3869{
3870 struct io_unlink *un = &req->unlink;
3871 const char __user *fname;
3872
Jens Axboe22634bc2021-06-23 09:07:45 -06003873 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3874 return -EINVAL;
Pavel Begunkov26578cd2021-08-20 10:36:37 +01003875 if (sqe->ioprio || sqe->off || sqe->len || sqe->buf_index ||
3876 sqe->splice_fd_in)
Jens Axboe22634bc2021-06-23 09:07:45 -06003877 return -EINVAL;
Jens Axboe14a11432020-09-28 14:27:37 -06003878 if (unlikely(req->flags & REQ_F_FIXED_FILE))
3879 return -EBADF;
3880
3881 un->dfd = READ_ONCE(sqe->fd);
3882
3883 un->flags = READ_ONCE(sqe->unlink_flags);
3884 if (un->flags & ~AT_REMOVEDIR)
3885 return -EINVAL;
3886
3887 fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
3888 un->filename = getname(fname);
3889 if (IS_ERR(un->filename))
3890 return PTR_ERR(un->filename);
3891
3892 req->flags |= REQ_F_NEED_CLEANUP;
3893 return 0;
3894}
3895
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003896static int io_unlinkat(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe14a11432020-09-28 14:27:37 -06003897{
3898 struct io_unlink *un = &req->unlink;
3899 int ret;
3900
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003901 if (issue_flags & IO_URING_F_NONBLOCK)
Jens Axboe14a11432020-09-28 14:27:37 -06003902 return -EAGAIN;
3903
3904 if (un->flags & AT_REMOVEDIR)
3905 ret = do_rmdir(un->dfd, un->filename);
3906 else
3907 ret = do_unlinkat(un->dfd, un->filename);
3908
3909 req->flags &= ~REQ_F_NEED_CLEANUP;
3910 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01003911 req_set_fail(req);
Jens Axboe14a11432020-09-28 14:27:37 -06003912 io_req_complete(req, ret);
3913 return 0;
3914}
3915
Dmitry Kadasheve34a02d2021-07-08 13:34:45 +07003916static int io_mkdirat_prep(struct io_kiocb *req,
3917 const struct io_uring_sqe *sqe)
3918{
3919 struct io_mkdir *mkd = &req->mkdir;
3920 const char __user *fname;
3921
3922 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3923 return -EINVAL;
3924 if (sqe->ioprio || sqe->off || sqe->rw_flags || sqe->buf_index ||
3925 sqe->splice_fd_in)
3926 return -EINVAL;
3927 if (unlikely(req->flags & REQ_F_FIXED_FILE))
3928 return -EBADF;
3929
3930 mkd->dfd = READ_ONCE(sqe->fd);
3931 mkd->mode = READ_ONCE(sqe->len);
3932
3933 fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
3934 mkd->filename = getname(fname);
3935 if (IS_ERR(mkd->filename))
3936 return PTR_ERR(mkd->filename);
3937
3938 req->flags |= REQ_F_NEED_CLEANUP;
3939 return 0;
3940}
3941
3942static int io_mkdirat(struct io_kiocb *req, int issue_flags)
3943{
3944 struct io_mkdir *mkd = &req->mkdir;
3945 int ret;
3946
3947 if (issue_flags & IO_URING_F_NONBLOCK)
3948 return -EAGAIN;
3949
3950 ret = do_mkdirat(mkd->dfd, mkd->filename, mkd->mode);
3951
3952 req->flags &= ~REQ_F_NEED_CLEANUP;
3953 if (ret < 0)
3954 req_set_fail(req);
3955 io_req_complete(req, ret);
3956 return 0;
3957}
3958
Dmitry Kadashev7a8721f2021-07-08 13:34:46 +07003959static int io_symlinkat_prep(struct io_kiocb *req,
3960 const struct io_uring_sqe *sqe)
3961{
3962 struct io_symlink *sl = &req->symlink;
3963 const char __user *oldpath, *newpath;
3964
3965 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3966 return -EINVAL;
3967 if (sqe->ioprio || sqe->len || sqe->rw_flags || sqe->buf_index ||
3968 sqe->splice_fd_in)
3969 return -EINVAL;
3970 if (unlikely(req->flags & REQ_F_FIXED_FILE))
3971 return -EBADF;
3972
3973 sl->new_dfd = READ_ONCE(sqe->fd);
3974 oldpath = u64_to_user_ptr(READ_ONCE(sqe->addr));
3975 newpath = u64_to_user_ptr(READ_ONCE(sqe->addr2));
3976
3977 sl->oldpath = getname(oldpath);
3978 if (IS_ERR(sl->oldpath))
3979 return PTR_ERR(sl->oldpath);
3980
3981 sl->newpath = getname(newpath);
3982 if (IS_ERR(sl->newpath)) {
3983 putname(sl->oldpath);
3984 return PTR_ERR(sl->newpath);
3985 }
3986
3987 req->flags |= REQ_F_NEED_CLEANUP;
3988 return 0;
3989}
3990
3991static int io_symlinkat(struct io_kiocb *req, int issue_flags)
3992{
3993 struct io_symlink *sl = &req->symlink;
3994 int ret;
3995
3996 if (issue_flags & IO_URING_F_NONBLOCK)
3997 return -EAGAIN;
3998
3999 ret = do_symlinkat(sl->oldpath, sl->new_dfd, sl->newpath);
4000
4001 req->flags &= ~REQ_F_NEED_CLEANUP;
4002 if (ret < 0)
4003 req_set_fail(req);
4004 io_req_complete(req, ret);
4005 return 0;
4006}
4007
Dmitry Kadashevcf30da92021-07-08 13:34:47 +07004008static int io_linkat_prep(struct io_kiocb *req,
4009 const struct io_uring_sqe *sqe)
4010{
4011 struct io_hardlink *lnk = &req->hardlink;
4012 const char __user *oldf, *newf;
4013
4014 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4015 return -EINVAL;
4016 if (sqe->ioprio || sqe->rw_flags || sqe->buf_index || sqe->splice_fd_in)
4017 return -EINVAL;
4018 if (unlikely(req->flags & REQ_F_FIXED_FILE))
4019 return -EBADF;
4020
4021 lnk->old_dfd = READ_ONCE(sqe->fd);
4022 lnk->new_dfd = READ_ONCE(sqe->len);
4023 oldf = u64_to_user_ptr(READ_ONCE(sqe->addr));
4024 newf = u64_to_user_ptr(READ_ONCE(sqe->addr2));
4025 lnk->flags = READ_ONCE(sqe->hardlink_flags);
4026
4027 lnk->oldpath = getname(oldf);
4028 if (IS_ERR(lnk->oldpath))
4029 return PTR_ERR(lnk->oldpath);
4030
4031 lnk->newpath = getname(newf);
4032 if (IS_ERR(lnk->newpath)) {
4033 putname(lnk->oldpath);
4034 return PTR_ERR(lnk->newpath);
4035 }
4036
4037 req->flags |= REQ_F_NEED_CLEANUP;
4038 return 0;
4039}
4040
4041static int io_linkat(struct io_kiocb *req, int issue_flags)
4042{
4043 struct io_hardlink *lnk = &req->hardlink;
4044 int ret;
4045
4046 if (issue_flags & IO_URING_F_NONBLOCK)
4047 return -EAGAIN;
4048
4049 ret = do_linkat(lnk->old_dfd, lnk->oldpath, lnk->new_dfd,
4050 lnk->newpath, lnk->flags);
4051
4052 req->flags &= ~REQ_F_NEED_CLEANUP;
4053 if (ret < 0)
4054 req_set_fail(req);
4055 io_req_complete(req, ret);
4056 return 0;
4057}
4058
Jens Axboe36f4fa62020-09-05 11:14:22 -06004059static int io_shutdown_prep(struct io_kiocb *req,
4060 const struct io_uring_sqe *sqe)
4061{
4062#if defined(CONFIG_NET)
4063 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4064 return -EINVAL;
Pavel Begunkov26578cd2021-08-20 10:36:37 +01004065 if (unlikely(sqe->ioprio || sqe->off || sqe->addr || sqe->rw_flags ||
4066 sqe->buf_index || sqe->splice_fd_in))
Jens Axboe36f4fa62020-09-05 11:14:22 -06004067 return -EINVAL;
4068
4069 req->shutdown.how = READ_ONCE(sqe->len);
4070 return 0;
4071#else
4072 return -EOPNOTSUPP;
4073#endif
4074}
4075
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004076static int io_shutdown(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe36f4fa62020-09-05 11:14:22 -06004077{
4078#if defined(CONFIG_NET)
4079 struct socket *sock;
4080 int ret;
4081
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004082 if (issue_flags & IO_URING_F_NONBLOCK)
Jens Axboe36f4fa62020-09-05 11:14:22 -06004083 return -EAGAIN;
4084
Linus Torvalds48aba792020-12-16 12:44:05 -08004085 sock = sock_from_file(req->file);
Jens Axboe36f4fa62020-09-05 11:14:22 -06004086 if (unlikely(!sock))
Linus Torvalds48aba792020-12-16 12:44:05 -08004087 return -ENOTSOCK;
Jens Axboe36f4fa62020-09-05 11:14:22 -06004088
4089 ret = __sys_shutdown_sock(sock, req->shutdown.how);
Jens Axboea1464682020-12-14 20:57:27 -07004090 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01004091 req_set_fail(req);
Jens Axboe36f4fa62020-09-05 11:14:22 -06004092 io_req_complete(req, ret);
4093 return 0;
4094#else
4095 return -EOPNOTSUPP;
4096#endif
4097}
4098
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03004099static int __io_splice_prep(struct io_kiocb *req,
4100 const struct io_uring_sqe *sqe)
Pavel Begunkov7d67af22020-02-24 11:32:45 +03004101{
Pavel Begunkovfe7e3252021-06-24 15:09:57 +01004102 struct io_splice *sp = &req->splice;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03004103 unsigned int valid_flags = SPLICE_F_FD_IN_FIXED | SPLICE_F_ALL;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03004104
Pavel Begunkov3232dd02020-06-03 18:03:22 +03004105 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4106 return -EINVAL;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03004107
Pavel Begunkov7d67af22020-02-24 11:32:45 +03004108 sp->len = READ_ONCE(sqe->len);
4109 sp->flags = READ_ONCE(sqe->splice_flags);
Pavel Begunkov7d67af22020-02-24 11:32:45 +03004110 if (unlikely(sp->flags & ~valid_flags))
4111 return -EINVAL;
Jens Axboeae6cba32022-03-29 10:59:20 -06004112 sp->splice_fd_in = READ_ONCE(sqe->splice_fd_in);
Pavel Begunkov7d67af22020-02-24 11:32:45 +03004113 return 0;
4114}
4115
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03004116static int io_tee_prep(struct io_kiocb *req,
4117 const struct io_uring_sqe *sqe)
4118{
4119 if (READ_ONCE(sqe->splice_off_in) || READ_ONCE(sqe->off))
4120 return -EINVAL;
4121 return __io_splice_prep(req, sqe);
4122}
4123
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004124static int io_tee(struct io_kiocb *req, unsigned int issue_flags)
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03004125{
4126 struct io_splice *sp = &req->splice;
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03004127 struct file *out = sp->file_out;
4128 unsigned int flags = sp->flags & ~SPLICE_F_FD_IN_FIXED;
Jens Axboeae6cba32022-03-29 10:59:20 -06004129 struct file *in;
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03004130 long ret = 0;
4131
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004132 if (issue_flags & IO_URING_F_NONBLOCK)
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03004133 return -EAGAIN;
Jens Axboeae6cba32022-03-29 10:59:20 -06004134
4135 in = io_file_get(req->ctx, req, sp->splice_fd_in,
Bing-Jhong Billy Jhengcf7f9cd2023-03-02 21:00:06 +08004136 (sp->flags & SPLICE_F_FD_IN_FIXED), issue_flags);
Jens Axboeae6cba32022-03-29 10:59:20 -06004137 if (!in) {
4138 ret = -EBADF;
4139 goto done;
4140 }
4141
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03004142 if (sp->len)
4143 ret = do_tee(in, out, sp->len, flags);
4144
Pavel Begunkove1d767f2021-03-19 17:22:43 +00004145 if (!(sp->flags & SPLICE_F_FD_IN_FIXED))
4146 io_put_file(in);
Jens Axboeae6cba32022-03-29 10:59:20 -06004147done:
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03004148 if (ret != sp->len)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01004149 req_set_fail(req);
Jens Axboee1e16092020-06-22 09:17:17 -06004150 io_req_complete(req, ret);
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03004151 return 0;
4152}
4153
4154static int io_splice_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4155{
Pavel Begunkovfe7e3252021-06-24 15:09:57 +01004156 struct io_splice *sp = &req->splice;
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03004157
4158 sp->off_in = READ_ONCE(sqe->splice_off_in);
4159 sp->off_out = READ_ONCE(sqe->off);
4160 return __io_splice_prep(req, sqe);
4161}
4162
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004163static int io_splice(struct io_kiocb *req, unsigned int issue_flags)
Pavel Begunkov7d67af22020-02-24 11:32:45 +03004164{
4165 struct io_splice *sp = &req->splice;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03004166 struct file *out = sp->file_out;
4167 unsigned int flags = sp->flags & ~SPLICE_F_FD_IN_FIXED;
4168 loff_t *poff_in, *poff_out;
Jens Axboeae6cba32022-03-29 10:59:20 -06004169 struct file *in;
Pavel Begunkovc9687422020-05-04 23:00:54 +03004170 long ret = 0;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03004171
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004172 if (issue_flags & IO_URING_F_NONBLOCK)
Pavel Begunkov2fb3e822020-05-01 17:09:38 +03004173 return -EAGAIN;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03004174
Jens Axboeae6cba32022-03-29 10:59:20 -06004175 in = io_file_get(req->ctx, req, sp->splice_fd_in,
Bing-Jhong Billy Jhengcf7f9cd2023-03-02 21:00:06 +08004176 (sp->flags & SPLICE_F_FD_IN_FIXED), issue_flags);
Jens Axboeae6cba32022-03-29 10:59:20 -06004177 if (!in) {
4178 ret = -EBADF;
4179 goto done;
4180 }
4181
Pavel Begunkov7d67af22020-02-24 11:32:45 +03004182 poff_in = (sp->off_in == -1) ? NULL : &sp->off_in;
4183 poff_out = (sp->off_out == -1) ? NULL : &sp->off_out;
Pavel Begunkovc9687422020-05-04 23:00:54 +03004184
Jens Axboe948a7742020-05-17 14:21:38 -06004185 if (sp->len)
Pavel Begunkovc9687422020-05-04 23:00:54 +03004186 ret = do_splice(in, poff_in, out, poff_out, sp->len, flags);
Pavel Begunkov7d67af22020-02-24 11:32:45 +03004187
Pavel Begunkove1d767f2021-03-19 17:22:43 +00004188 if (!(sp->flags & SPLICE_F_FD_IN_FIXED))
4189 io_put_file(in);
Jens Axboeae6cba32022-03-29 10:59:20 -06004190done:
Pavel Begunkov7d67af22020-02-24 11:32:45 +03004191 if (ret != sp->len)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01004192 req_set_fail(req);
Jens Axboee1e16092020-06-22 09:17:17 -06004193 io_req_complete(req, ret);
Pavel Begunkov7d67af22020-02-24 11:32:45 +03004194 return 0;
4195}
4196
Jens Axboe2b188cc2019-01-07 10:46:33 -07004197/*
4198 * IORING_OP_NOP just posts a completion event, nothing else.
4199 */
Pavel Begunkov889fca72021-02-10 00:03:09 +00004200static int io_nop(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe2b188cc2019-01-07 10:46:33 -07004201{
4202 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe2b188cc2019-01-07 10:46:33 -07004203
Jens Axboedef596e2019-01-09 08:59:42 -07004204 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
4205 return -EINVAL;
4206
Pavel Begunkov889fca72021-02-10 00:03:09 +00004207 __io_req_complete(req, issue_flags, 0, 0);
Jens Axboe2b188cc2019-01-07 10:46:33 -07004208 return 0;
4209}
4210
Pavel Begunkov1155c762021-02-18 18:29:38 +00004211static int io_fsync_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Christoph Hellwigc992fe22019-01-11 09:43:02 -07004212{
Jens Axboe6b063142019-01-10 22:13:58 -07004213 struct io_ring_ctx *ctx = req->ctx;
Christoph Hellwigc992fe22019-01-11 09:43:02 -07004214
Jens Axboe6b063142019-01-10 22:13:58 -07004215 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboedef596e2019-01-09 08:59:42 -07004216 return -EINVAL;
Pavel Begunkov26578cd2021-08-20 10:36:37 +01004217 if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index ||
4218 sqe->splice_fd_in))
Christoph Hellwigc992fe22019-01-11 09:43:02 -07004219 return -EINVAL;
4220
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004221 req->sync.flags = READ_ONCE(sqe->fsync_flags);
4222 if (unlikely(req->sync.flags & ~IORING_FSYNC_DATASYNC))
4223 return -EINVAL;
4224
4225 req->sync.off = READ_ONCE(sqe->off);
4226 req->sync.len = READ_ONCE(sqe->len);
Christoph Hellwigc992fe22019-01-11 09:43:02 -07004227 return 0;
4228}
4229
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004230static int io_fsync(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe78912932020-01-14 22:09:06 -07004231{
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004232 loff_t end = req->sync.off + req->sync.len;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004233 int ret;
4234
Pavel Begunkovac45abc2020-06-08 21:08:18 +03004235 /* fsync always requires a blocking context */
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004236 if (issue_flags & IO_URING_F_NONBLOCK)
Pavel Begunkovac45abc2020-06-08 21:08:18 +03004237 return -EAGAIN;
4238
Jens Axboe9adbd452019-12-20 08:45:55 -07004239 ret = vfs_fsync_range(req->file, req->sync.off,
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004240 end > 0 ? end : LLONG_MAX,
4241 req->sync.flags & IORING_FSYNC_DATASYNC);
4242 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01004243 req_set_fail(req);
Jens Axboee1e16092020-06-22 09:17:17 -06004244 io_req_complete(req, ret);
Christoph Hellwigc992fe22019-01-11 09:43:02 -07004245 return 0;
4246}
4247
Jens Axboed63d1b52019-12-10 10:38:56 -07004248static int io_fallocate_prep(struct io_kiocb *req,
4249 const struct io_uring_sqe *sqe)
4250{
Pavel Begunkov26578cd2021-08-20 10:36:37 +01004251 if (sqe->ioprio || sqe->buf_index || sqe->rw_flags ||
4252 sqe->splice_fd_in)
Jens Axboed63d1b52019-12-10 10:38:56 -07004253 return -EINVAL;
Pavel Begunkov3232dd02020-06-03 18:03:22 +03004254 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4255 return -EINVAL;
Jens Axboed63d1b52019-12-10 10:38:56 -07004256
4257 req->sync.off = READ_ONCE(sqe->off);
4258 req->sync.len = READ_ONCE(sqe->addr);
4259 req->sync.mode = READ_ONCE(sqe->len);
4260 return 0;
4261}
4262
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004263static int io_fallocate(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboed63d1b52019-12-10 10:38:56 -07004264{
Pavel Begunkovac45abc2020-06-08 21:08:18 +03004265 int ret;
Jens Axboed63d1b52019-12-10 10:38:56 -07004266
Pavel Begunkovac45abc2020-06-08 21:08:18 +03004267 /* fallocate always requiring blocking context */
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004268 if (issue_flags & IO_URING_F_NONBLOCK)
Pavel Begunkovac45abc2020-06-08 21:08:18 +03004269 return -EAGAIN;
Pavel Begunkovac45abc2020-06-08 21:08:18 +03004270 ret = vfs_fallocate(req->file, req->sync.mode, req->sync.off,
4271 req->sync.len);
Pavel Begunkovac45abc2020-06-08 21:08:18 +03004272 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01004273 req_set_fail(req);
Jens Axboedf1ec532022-03-20 13:08:38 -06004274 else
4275 fsnotify_modify(req->file);
Jens Axboee1e16092020-06-22 09:17:17 -06004276 io_req_complete(req, ret);
Jens Axboed63d1b52019-12-10 10:38:56 -07004277 return 0;
4278}
4279
Pavel Begunkovec65fea2020-06-03 18:03:24 +03004280static int __io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboe15b71ab2019-12-11 11:20:36 -07004281{
Jens Axboef8748882020-01-08 17:47:02 -07004282 const char __user *fname;
Jens Axboe15b71ab2019-12-11 11:20:36 -07004283 int ret;
4284
Pavel Begunkovd3fddf62021-08-09 13:04:16 +01004285 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4286 return -EINVAL;
Pavel Begunkovb9445592021-08-25 12:25:45 +01004287 if (unlikely(sqe->ioprio || sqe->buf_index))
Jens Axboe15b71ab2019-12-11 11:20:36 -07004288 return -EINVAL;
Pavel Begunkovec65fea2020-06-03 18:03:24 +03004289 if (unlikely(req->flags & REQ_F_FIXED_FILE))
Jens Axboecf3040c2020-02-06 21:31:40 -07004290 return -EBADF;
Jens Axboe15b71ab2019-12-11 11:20:36 -07004291
Pavel Begunkovec65fea2020-06-03 18:03:24 +03004292 /* open.how should be already initialised */
4293 if (!(req->open.how.flags & O_PATH) && force_o_largefile())
Jens Axboe08a1d26eb2020-04-08 09:20:54 -06004294 req->open.how.flags |= O_LARGEFILE;
Jens Axboe15b71ab2019-12-11 11:20:36 -07004295
Pavel Begunkov25e72d12020-06-03 18:03:23 +03004296 req->open.dfd = READ_ONCE(sqe->fd);
4297 fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
Jens Axboef8748882020-01-08 17:47:02 -07004298 req->open.filename = getname(fname);
Jens Axboe15b71ab2019-12-11 11:20:36 -07004299 if (IS_ERR(req->open.filename)) {
4300 ret = PTR_ERR(req->open.filename);
4301 req->open.filename = NULL;
4302 return ret;
4303 }
Pavel Begunkovb9445592021-08-25 12:25:45 +01004304
4305 req->open.file_slot = READ_ONCE(sqe->file_index);
4306 if (req->open.file_slot && (req->open.how.flags & O_CLOEXEC))
4307 return -EINVAL;
4308
Jens Axboe4022e7a2020-03-19 19:23:18 -06004309 req->open.nofile = rlimit(RLIMIT_NOFILE);
Pavel Begunkov8fef80b2020-02-07 23:59:53 +03004310 req->flags |= REQ_F_NEED_CLEANUP;
Jens Axboe15b71ab2019-12-11 11:20:36 -07004311 return 0;
4312}
4313
Pavel Begunkovec65fea2020-06-03 18:03:24 +03004314static int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4315{
Pavel Begunkovd3fddf62021-08-09 13:04:16 +01004316 u64 mode = READ_ONCE(sqe->len);
4317 u64 flags = READ_ONCE(sqe->open_flags);
Pavel Begunkovec65fea2020-06-03 18:03:24 +03004318
Pavel Begunkovec65fea2020-06-03 18:03:24 +03004319 req->open.how = build_open_how(flags, mode);
4320 return __io_openat_prep(req, sqe);
4321}
4322
Jens Axboecebdb982020-01-08 17:59:24 -07004323static int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4324{
4325 struct open_how __user *how;
Jens Axboecebdb982020-01-08 17:59:24 -07004326 size_t len;
4327 int ret;
4328
Jens Axboecebdb982020-01-08 17:59:24 -07004329 how = u64_to_user_ptr(READ_ONCE(sqe->addr2));
4330 len = READ_ONCE(sqe->len);
Jens Axboecebdb982020-01-08 17:59:24 -07004331 if (len < OPEN_HOW_SIZE_VER0)
4332 return -EINVAL;
4333
4334 ret = copy_struct_from_user(&req->open.how, sizeof(req->open.how), how,
4335 len);
4336 if (ret)
4337 return ret;
4338
Pavel Begunkovec65fea2020-06-03 18:03:24 +03004339 return __io_openat_prep(req, sqe);
Jens Axboecebdb982020-01-08 17:59:24 -07004340}
4341
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004342static int io_openat2(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe15b71ab2019-12-11 11:20:36 -07004343{
4344 struct open_flags op;
Jens Axboe15b71ab2019-12-11 11:20:36 -07004345 struct file *file;
Pavel Begunkovb9445592021-08-25 12:25:45 +01004346 bool resolve_nonblock, nonblock_set;
4347 bool fixed = !!req->open.file_slot;
Jens Axboe15b71ab2019-12-11 11:20:36 -07004348 int ret;
4349
Jens Axboecebdb982020-01-08 17:59:24 -07004350 ret = build_open_flags(&req->open.how, &op);
Jens Axboe15b71ab2019-12-11 11:20:36 -07004351 if (ret)
4352 goto err;
Jens Axboe3a81fd02020-12-10 12:25:36 -07004353 nonblock_set = op.open_flag & O_NONBLOCK;
4354 resolve_nonblock = req->open.how.resolve & RESOLVE_CACHED;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004355 if (issue_flags & IO_URING_F_NONBLOCK) {
Jens Axboe3a81fd02020-12-10 12:25:36 -07004356 /*
4357 * Don't bother trying for O_TRUNC, O_CREAT, or O_TMPFILE open,
4358 * it'll always -EAGAIN
4359 */
4360 if (req->open.how.flags & (O_TRUNC | O_CREAT | O_TMPFILE))
4361 return -EAGAIN;
4362 op.lookup_flags |= LOOKUP_CACHED;
4363 op.open_flag |= O_NONBLOCK;
4364 }
Jens Axboe15b71ab2019-12-11 11:20:36 -07004365
Pavel Begunkovb9445592021-08-25 12:25:45 +01004366 if (!fixed) {
4367 ret = __get_unused_fd_flags(req->open.how.flags, req->open.nofile);
4368 if (ret < 0)
4369 goto err;
4370 }
Jens Axboe15b71ab2019-12-11 11:20:36 -07004371
4372 file = do_filp_open(req->open.dfd, req->open.filename, &op);
Pavel Begunkov12dcb58a2021-06-24 15:10:00 +01004373 if (IS_ERR(file)) {
Jens Axboe3a81fd02020-12-10 12:25:36 -07004374 /*
Pavel Begunkov12dcb58a2021-06-24 15:10:00 +01004375 * We could hang on to this 'fd' on retrying, but seems like
4376 * marginal gain for something that is now known to be a slower
4377 * path. So just put it, and we'll get a new one when we retry.
Jens Axboe3a81fd02020-12-10 12:25:36 -07004378 */
Pavel Begunkovb9445592021-08-25 12:25:45 +01004379 if (!fixed)
4380 put_unused_fd(ret);
Pavel Begunkov12dcb58a2021-06-24 15:10:00 +01004381
4382 ret = PTR_ERR(file);
4383 /* only retry if RESOLVE_CACHED wasn't already set by application */
4384 if (ret == -EAGAIN &&
4385 (!resolve_nonblock && (issue_flags & IO_URING_F_NONBLOCK)))
4386 return -EAGAIN;
4387 goto err;
Jens Axboe3a81fd02020-12-10 12:25:36 -07004388 }
4389
Pavel Begunkov12dcb58a2021-06-24 15:10:00 +01004390 if ((issue_flags & IO_URING_F_NONBLOCK) && !nonblock_set)
4391 file->f_flags &= ~O_NONBLOCK;
4392 fsnotify_open(file);
Pavel Begunkovb9445592021-08-25 12:25:45 +01004393
4394 if (!fixed)
4395 fd_install(ret, file);
4396 else
4397 ret = io_install_fixed_file(req, file, issue_flags,
4398 req->open.file_slot - 1);
Jens Axboe15b71ab2019-12-11 11:20:36 -07004399err:
4400 putname(req->open.filename);
Pavel Begunkov8fef80b2020-02-07 23:59:53 +03004401 req->flags &= ~REQ_F_NEED_CLEANUP;
Jens Axboe15b71ab2019-12-11 11:20:36 -07004402 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01004403 req_set_fail(req);
Pavel Begunkov0bdf3392021-04-11 01:46:29 +01004404 __io_req_complete(req, issue_flags, ret, 0);
Jens Axboe15b71ab2019-12-11 11:20:36 -07004405 return 0;
4406}
4407
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004408static int io_openat(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboecebdb982020-01-08 17:59:24 -07004409{
Pavel Begunkove45cff52021-02-28 22:35:14 +00004410 return io_openat2(req, issue_flags);
Jens Axboecebdb982020-01-08 17:59:24 -07004411}
4412
Jens Axboe067524e2020-03-02 16:32:28 -07004413static int io_remove_buffers_prep(struct io_kiocb *req,
4414 const struct io_uring_sqe *sqe)
4415{
4416 struct io_provide_buf *p = &req->pbuf;
4417 u64 tmp;
4418
Pavel Begunkov26578cd2021-08-20 10:36:37 +01004419 if (sqe->ioprio || sqe->rw_flags || sqe->addr || sqe->len || sqe->off ||
4420 sqe->splice_fd_in)
Jens Axboe067524e2020-03-02 16:32:28 -07004421 return -EINVAL;
4422
4423 tmp = READ_ONCE(sqe->fd);
4424 if (!tmp || tmp > USHRT_MAX)
4425 return -EINVAL;
4426
4427 memset(p, 0, sizeof(*p));
4428 p->nbufs = tmp;
4429 p->bgid = READ_ONCE(sqe->buf_group);
4430 return 0;
4431}
4432
4433static int __io_remove_buffers(struct io_ring_ctx *ctx, struct io_buffer *buf,
4434 int bgid, unsigned nbufs)
4435{
4436 unsigned i = 0;
4437
4438 /* shouldn't happen */
4439 if (!nbufs)
4440 return 0;
4441
4442 /* the head kbuf is the list itself */
4443 while (!list_empty(&buf->list)) {
4444 struct io_buffer *nxt;
4445
4446 nxt = list_first_entry(&buf->list, struct io_buffer, list);
4447 list_del(&nxt->list);
4448 kfree(nxt);
4449 if (++i == nbufs)
4450 return i;
Ye Bin2d447d32021-11-22 10:47:37 +08004451 cond_resched();
Jens Axboe067524e2020-03-02 16:32:28 -07004452 }
4453 i++;
4454 kfree(buf);
Jens Axboe9e15c3a2021-03-13 12:29:43 -07004455 xa_erase(&ctx->io_buffers, bgid);
Jens Axboe067524e2020-03-02 16:32:28 -07004456
4457 return i;
4458}
4459
Pavel Begunkov889fca72021-02-10 00:03:09 +00004460static int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe067524e2020-03-02 16:32:28 -07004461{
4462 struct io_provide_buf *p = &req->pbuf;
4463 struct io_ring_ctx *ctx = req->ctx;
4464 struct io_buffer *head;
4465 int ret = 0;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004466 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Jens Axboe067524e2020-03-02 16:32:28 -07004467
4468 io_ring_submit_lock(ctx, !force_nonblock);
4469
4470 lockdep_assert_held(&ctx->uring_lock);
4471
4472 ret = -ENOENT;
Jens Axboe9e15c3a2021-03-13 12:29:43 -07004473 head = xa_load(&ctx->io_buffers, p->bgid);
Jens Axboe067524e2020-03-02 16:32:28 -07004474 if (head)
4475 ret = __io_remove_buffers(ctx, head, p->bgid, p->nbufs);
Jens Axboe067524e2020-03-02 16:32:28 -07004476 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01004477 req_set_fail(req);
Pavel Begunkov31bff9a2020-12-06 22:22:43 +00004478
Pavel Begunkov9fb8cb42021-02-28 22:35:13 +00004479 /* complete before unlock, IOPOLL may need the lock */
4480 __io_req_complete(req, issue_flags, ret, 0);
4481 io_ring_submit_unlock(ctx, !force_nonblock);
Jens Axboe067524e2020-03-02 16:32:28 -07004482 return 0;
4483}
4484
Jens Axboeddf0322d2020-02-23 16:41:33 -07004485static int io_provide_buffers_prep(struct io_kiocb *req,
4486 const struct io_uring_sqe *sqe)
4487{
Pavel Begunkov38134ad2021-04-15 13:07:39 +01004488 unsigned long size, tmp_check;
Jens Axboeddf0322d2020-02-23 16:41:33 -07004489 struct io_provide_buf *p = &req->pbuf;
4490 u64 tmp;
4491
Pavel Begunkov26578cd2021-08-20 10:36:37 +01004492 if (sqe->ioprio || sqe->rw_flags || sqe->splice_fd_in)
Jens Axboeddf0322d2020-02-23 16:41:33 -07004493 return -EINVAL;
4494
4495 tmp = READ_ONCE(sqe->fd);
4496 if (!tmp || tmp > USHRT_MAX)
4497 return -E2BIG;
4498 p->nbufs = tmp;
4499 p->addr = READ_ONCE(sqe->addr);
4500 p->len = READ_ONCE(sqe->len);
4501
Pavel Begunkov38134ad2021-04-15 13:07:39 +01004502 if (check_mul_overflow((unsigned long)p->len, (unsigned long)p->nbufs,
4503 &size))
4504 return -EOVERFLOW;
4505 if (check_add_overflow((unsigned long)p->addr, size, &tmp_check))
4506 return -EOVERFLOW;
4507
Pavel Begunkovd81269f2021-03-19 10:21:19 +00004508 size = (unsigned long)p->len * p->nbufs;
4509 if (!access_ok(u64_to_user_ptr(p->addr), size))
Jens Axboeddf0322d2020-02-23 16:41:33 -07004510 return -EFAULT;
4511
4512 p->bgid = READ_ONCE(sqe->buf_group);
4513 tmp = READ_ONCE(sqe->off);
4514 if (tmp > USHRT_MAX)
4515 return -E2BIG;
4516 p->bid = tmp;
4517 return 0;
4518}
4519
4520static int io_add_buffers(struct io_provide_buf *pbuf, struct io_buffer **head)
4521{
4522 struct io_buffer *buf;
4523 u64 addr = pbuf->addr;
4524 int i, bid = pbuf->bid;
4525
4526 for (i = 0; i < pbuf->nbufs; i++) {
Jens Axboe9990da92021-09-24 07:39:08 -06004527 buf = kmalloc(sizeof(*buf), GFP_KERNEL_ACCOUNT);
Jens Axboeddf0322d2020-02-23 16:41:33 -07004528 if (!buf)
4529 break;
4530
4531 buf->addr = addr;
Thadeu Lima de Souza Cascardod1f82802021-05-05 09:47:06 -03004532 buf->len = min_t(__u32, pbuf->len, MAX_RW_COUNT);
Jens Axboeddf0322d2020-02-23 16:41:33 -07004533 buf->bid = bid;
4534 addr += pbuf->len;
4535 bid++;
4536 if (!*head) {
4537 INIT_LIST_HEAD(&buf->list);
4538 *head = buf;
4539 } else {
4540 list_add_tail(&buf->list, &(*head)->list);
4541 }
Eric Dumazetc718ea42022-02-14 20:10:03 -08004542 cond_resched();
Jens Axboeddf0322d2020-02-23 16:41:33 -07004543 }
4544
4545 return i ? i : -ENOMEM;
4546}
4547
Pavel Begunkov889fca72021-02-10 00:03:09 +00004548static int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboeddf0322d2020-02-23 16:41:33 -07004549{
4550 struct io_provide_buf *p = &req->pbuf;
4551 struct io_ring_ctx *ctx = req->ctx;
4552 struct io_buffer *head, *list;
4553 int ret = 0;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004554 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Jens Axboeddf0322d2020-02-23 16:41:33 -07004555
4556 io_ring_submit_lock(ctx, !force_nonblock);
4557
4558 lockdep_assert_held(&ctx->uring_lock);
4559
Jens Axboe9e15c3a2021-03-13 12:29:43 -07004560 list = head = xa_load(&ctx->io_buffers, p->bgid);
Jens Axboeddf0322d2020-02-23 16:41:33 -07004561
4562 ret = io_add_buffers(p, &head);
Jens Axboe9e15c3a2021-03-13 12:29:43 -07004563 if (ret >= 0 && !list) {
Pavel Begunkovfa304062022-08-04 15:13:46 +01004564 ret = xa_insert(&ctx->io_buffers, p->bgid, head,
4565 GFP_KERNEL_ACCOUNT);
Jens Axboe9e15c3a2021-03-13 12:29:43 -07004566 if (ret < 0)
Jens Axboe067524e2020-03-02 16:32:28 -07004567 __io_remove_buffers(ctx, head, p->bgid, -1U);
Jens Axboeddf0322d2020-02-23 16:41:33 -07004568 }
Jens Axboeddf0322d2020-02-23 16:41:33 -07004569 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01004570 req_set_fail(req);
Pavel Begunkov9fb8cb42021-02-28 22:35:13 +00004571 /* complete before unlock, IOPOLL may need the lock */
4572 __io_req_complete(req, issue_flags, ret, 0);
4573 io_ring_submit_unlock(ctx, !force_nonblock);
Jens Axboeddf0322d2020-02-23 16:41:33 -07004574 return 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07004575}
4576
Jens Axboe3e4827b2020-01-08 15:18:09 -07004577static int io_epoll_ctl_prep(struct io_kiocb *req,
4578 const struct io_uring_sqe *sqe)
4579{
4580#if defined(CONFIG_EPOLL)
Pavel Begunkov26578cd2021-08-20 10:36:37 +01004581 if (sqe->ioprio || sqe->buf_index || sqe->splice_fd_in)
Jens Axboe3e4827b2020-01-08 15:18:09 -07004582 return -EINVAL;
Pavel Begunkov2d74d042021-05-14 12:05:46 +01004583 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
Pavel Begunkov3232dd02020-06-03 18:03:22 +03004584 return -EINVAL;
Jens Axboe3e4827b2020-01-08 15:18:09 -07004585
4586 req->epoll.epfd = READ_ONCE(sqe->fd);
4587 req->epoll.op = READ_ONCE(sqe->len);
4588 req->epoll.fd = READ_ONCE(sqe->off);
4589
4590 if (ep_op_has_event(req->epoll.op)) {
4591 struct epoll_event __user *ev;
4592
4593 ev = u64_to_user_ptr(READ_ONCE(sqe->addr));
4594 if (copy_from_user(&req->epoll.event, ev, sizeof(*ev)))
4595 return -EFAULT;
4596 }
4597
4598 return 0;
4599#else
4600 return -EOPNOTSUPP;
4601#endif
4602}
4603
Pavel Begunkov889fca72021-02-10 00:03:09 +00004604static int io_epoll_ctl(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe3e4827b2020-01-08 15:18:09 -07004605{
4606#if defined(CONFIG_EPOLL)
4607 struct io_epoll *ie = &req->epoll;
4608 int ret;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004609 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Jens Axboe3e4827b2020-01-08 15:18:09 -07004610
4611 ret = do_epoll_ctl(ie->epfd, ie->op, ie->fd, &ie->event, force_nonblock);
4612 if (force_nonblock && ret == -EAGAIN)
4613 return -EAGAIN;
4614
4615 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01004616 req_set_fail(req);
Pavel Begunkov889fca72021-02-10 00:03:09 +00004617 __io_req_complete(req, issue_flags, ret, 0);
Jens Axboe3e4827b2020-01-08 15:18:09 -07004618 return 0;
4619#else
4620 return -EOPNOTSUPP;
4621#endif
4622}
4623
Jens Axboec1ca7572019-12-25 22:18:28 -07004624static int io_madvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4625{
4626#if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
Pavel Begunkov26578cd2021-08-20 10:36:37 +01004627 if (sqe->ioprio || sqe->buf_index || sqe->off || sqe->splice_fd_in)
Jens Axboec1ca7572019-12-25 22:18:28 -07004628 return -EINVAL;
Pavel Begunkov3232dd02020-06-03 18:03:22 +03004629 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4630 return -EINVAL;
Jens Axboec1ca7572019-12-25 22:18:28 -07004631
4632 req->madvise.addr = READ_ONCE(sqe->addr);
4633 req->madvise.len = READ_ONCE(sqe->len);
4634 req->madvise.advice = READ_ONCE(sqe->fadvise_advice);
4635 return 0;
4636#else
4637 return -EOPNOTSUPP;
4638#endif
4639}
4640
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004641static int io_madvise(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboec1ca7572019-12-25 22:18:28 -07004642{
4643#if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
4644 struct io_madvise *ma = &req->madvise;
4645 int ret;
4646
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004647 if (issue_flags & IO_URING_F_NONBLOCK)
Jens Axboec1ca7572019-12-25 22:18:28 -07004648 return -EAGAIN;
4649
Minchan Kim0726b012020-10-17 16:14:50 -07004650 ret = do_madvise(current->mm, ma->addr, ma->len, ma->advice);
Jens Axboec1ca7572019-12-25 22:18:28 -07004651 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01004652 req_set_fail(req);
Jens Axboee1e16092020-06-22 09:17:17 -06004653 io_req_complete(req, ret);
Jens Axboec1ca7572019-12-25 22:18:28 -07004654 return 0;
4655#else
4656 return -EOPNOTSUPP;
4657#endif
4658}
4659
Jens Axboe4840e412019-12-25 22:03:45 -07004660static int io_fadvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4661{
Pavel Begunkov26578cd2021-08-20 10:36:37 +01004662 if (sqe->ioprio || sqe->buf_index || sqe->addr || sqe->splice_fd_in)
Jens Axboe4840e412019-12-25 22:03:45 -07004663 return -EINVAL;
Pavel Begunkov3232dd02020-06-03 18:03:22 +03004664 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4665 return -EINVAL;
Jens Axboe4840e412019-12-25 22:03:45 -07004666
4667 req->fadvise.offset = READ_ONCE(sqe->off);
4668 req->fadvise.len = READ_ONCE(sqe->len);
4669 req->fadvise.advice = READ_ONCE(sqe->fadvise_advice);
4670 return 0;
4671}
4672
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004673static int io_fadvise(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe4840e412019-12-25 22:03:45 -07004674{
4675 struct io_fadvise *fa = &req->fadvise;
4676 int ret;
4677
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004678 if (issue_flags & IO_URING_F_NONBLOCK) {
Jens Axboe3e694262020-02-01 09:22:49 -07004679 switch (fa->advice) {
4680 case POSIX_FADV_NORMAL:
4681 case POSIX_FADV_RANDOM:
4682 case POSIX_FADV_SEQUENTIAL:
4683 break;
4684 default:
4685 return -EAGAIN;
4686 }
4687 }
Jens Axboe4840e412019-12-25 22:03:45 -07004688
4689 ret = vfs_fadvise(req->file, fa->offset, fa->len, fa->advice);
4690 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01004691 req_set_fail(req);
Pavel Begunkov0bdf3392021-04-11 01:46:29 +01004692 __io_req_complete(req, issue_flags, ret, 0);
Jens Axboe4840e412019-12-25 22:03:45 -07004693 return 0;
4694}
4695
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004696static int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4697{
Pavel Begunkov2d74d042021-05-14 12:05:46 +01004698 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
Pavel Begunkov3232dd02020-06-03 18:03:22 +03004699 return -EINVAL;
Pavel Begunkov26578cd2021-08-20 10:36:37 +01004700 if (sqe->ioprio || sqe->buf_index || sqe->splice_fd_in)
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004701 return -EINVAL;
Pavel Begunkov9c280f92020-04-08 08:58:46 +03004702 if (req->flags & REQ_F_FIXED_FILE)
Jens Axboecf3040c2020-02-06 21:31:40 -07004703 return -EBADF;
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004704
Bijan Mottahedeh1d9e1282020-05-22 21:31:16 -07004705 req->statx.dfd = READ_ONCE(sqe->fd);
4706 req->statx.mask = READ_ONCE(sqe->len);
Bijan Mottahedehe62753e2020-05-22 21:31:18 -07004707 req->statx.filename = u64_to_user_ptr(READ_ONCE(sqe->addr));
Bijan Mottahedeh1d9e1282020-05-22 21:31:16 -07004708 req->statx.buffer = u64_to_user_ptr(READ_ONCE(sqe->addr2));
4709 req->statx.flags = READ_ONCE(sqe->statx_flags);
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004710
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004711 return 0;
4712}
4713
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004714static int io_statx(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004715{
Bijan Mottahedeh1d9e1282020-05-22 21:31:16 -07004716 struct io_statx *ctx = &req->statx;
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004717 int ret;
4718
Pavel Begunkov59d70012021-03-22 01:58:30 +00004719 if (issue_flags & IO_URING_F_NONBLOCK)
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004720 return -EAGAIN;
4721
Bijan Mottahedehe62753e2020-05-22 21:31:18 -07004722 ret = do_statx(ctx->dfd, ctx->filename, ctx->flags, ctx->mask,
4723 ctx->buffer);
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004724
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004725 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01004726 req_set_fail(req);
Jens Axboee1e16092020-06-22 09:17:17 -06004727 io_req_complete(req, ret);
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004728 return 0;
4729}
4730
Jens Axboeb5dba592019-12-11 14:02:38 -07004731static int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4732{
Jens Axboe14587a462020-09-05 11:36:08 -06004733 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
Pavel Begunkov3232dd02020-06-03 18:03:22 +03004734 return -EINVAL;
Jens Axboeb5dba592019-12-11 14:02:38 -07004735 if (sqe->ioprio || sqe->off || sqe->addr || sqe->len ||
Pavel Begunkov7df778b2021-09-24 20:04:29 +01004736 sqe->rw_flags || sqe->buf_index)
Jens Axboeb5dba592019-12-11 14:02:38 -07004737 return -EINVAL;
Pavel Begunkov9c280f92020-04-08 08:58:46 +03004738 if (req->flags & REQ_F_FIXED_FILE)
Jens Axboecf3040c2020-02-06 21:31:40 -07004739 return -EBADF;
Jens Axboeb5dba592019-12-11 14:02:38 -07004740
4741 req->close.fd = READ_ONCE(sqe->fd);
Pavel Begunkov7df778b2021-09-24 20:04:29 +01004742 req->close.file_slot = READ_ONCE(sqe->file_index);
4743 if (req->close.file_slot && req->close.fd)
4744 return -EINVAL;
4745
Jens Axboeb5dba592019-12-11 14:02:38 -07004746 return 0;
4747}
4748
Pavel Begunkov889fca72021-02-10 00:03:09 +00004749static int io_close(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboeb5dba592019-12-11 14:02:38 -07004750{
Jens Axboe9eac1902021-01-19 15:50:37 -07004751 struct files_struct *files = current->files;
Pavel Begunkov3af73b22020-06-08 21:08:17 +03004752 struct io_close *close = &req->close;
Jens Axboe9eac1902021-01-19 15:50:37 -07004753 struct fdtable *fdt;
Pavel Begunkova1fde922021-04-11 01:46:28 +01004754 struct file *file = NULL;
4755 int ret = -EBADF;
Jens Axboeb5dba592019-12-11 14:02:38 -07004756
Pavel Begunkov7df778b2021-09-24 20:04:29 +01004757 if (req->close.file_slot) {
4758 ret = io_close_fixed(req, issue_flags);
4759 goto err;
4760 }
4761
Jens Axboe9eac1902021-01-19 15:50:37 -07004762 spin_lock(&files->file_lock);
4763 fdt = files_fdtable(files);
4764 if (close->fd >= fdt->max_fds) {
4765 spin_unlock(&files->file_lock);
4766 goto err;
4767 }
4768 file = fdt->fd[close->fd];
Pavel Begunkova1fde922021-04-11 01:46:28 +01004769 if (!file || file->f_op == &io_uring_fops) {
Jens Axboe9eac1902021-01-19 15:50:37 -07004770 spin_unlock(&files->file_lock);
4771 file = NULL;
4772 goto err;
Pavel Begunkov3af73b22020-06-08 21:08:17 +03004773 }
Jens Axboeb5dba592019-12-11 14:02:38 -07004774
4775 /* if the file has a flush method, be safe and punt to async */
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004776 if (file->f_op->flush && (issue_flags & IO_URING_F_NONBLOCK)) {
Jens Axboe9eac1902021-01-19 15:50:37 -07004777 spin_unlock(&files->file_lock);
Pavel Begunkov0bf0eef2020-05-26 20:34:06 +03004778 return -EAGAIN;
Pavel Begunkova2100672020-03-02 23:45:16 +03004779 }
Jens Axboeb5dba592019-12-11 14:02:38 -07004780
Jens Axboe9eac1902021-01-19 15:50:37 -07004781 ret = __close_fd_get_file(close->fd, &file);
4782 spin_unlock(&files->file_lock);
4783 if (ret < 0) {
4784 if (ret == -ENOENT)
4785 ret = -EBADF;
4786 goto err;
4787 }
4788
Pavel Begunkov3af73b22020-06-08 21:08:17 +03004789 /* No ->flush() or already async, safely close from here */
Jens Axboe9eac1902021-01-19 15:50:37 -07004790 ret = filp_close(file, current->files);
4791err:
Pavel Begunkov3af73b22020-06-08 21:08:17 +03004792 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01004793 req_set_fail(req);
Jens Axboe9eac1902021-01-19 15:50:37 -07004794 if (file)
4795 fput(file);
Pavel Begunkov889fca72021-02-10 00:03:09 +00004796 __io_req_complete(req, issue_flags, ret, 0);
Jens Axboe1a417f42020-01-31 17:16:48 -07004797 return 0;
Jens Axboeb5dba592019-12-11 14:02:38 -07004798}
4799
Pavel Begunkov1155c762021-02-18 18:29:38 +00004800static int io_sfr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboe5d17b4a2019-04-09 14:56:44 -06004801{
4802 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe5d17b4a2019-04-09 14:56:44 -06004803
Jens Axboe5d17b4a2019-04-09 14:56:44 -06004804 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
4805 return -EINVAL;
Pavel Begunkov26578cd2021-08-20 10:36:37 +01004806 if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index ||
4807 sqe->splice_fd_in))
Jens Axboe5d17b4a2019-04-09 14:56:44 -06004808 return -EINVAL;
4809
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004810 req->sync.off = READ_ONCE(sqe->off);
4811 req->sync.len = READ_ONCE(sqe->len);
4812 req->sync.flags = READ_ONCE(sqe->sync_range_flags);
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004813 return 0;
4814}
4815
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004816static int io_sync_file_range(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004817{
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004818 int ret;
4819
Pavel Begunkovac45abc2020-06-08 21:08:18 +03004820 /* sync_file_range always requires a blocking context */
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004821 if (issue_flags & IO_URING_F_NONBLOCK)
Pavel Begunkovac45abc2020-06-08 21:08:18 +03004822 return -EAGAIN;
4823
Jens Axboe9adbd452019-12-20 08:45:55 -07004824 ret = sync_file_range(req->file, req->sync.off, req->sync.len,
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004825 req->sync.flags);
4826 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01004827 req_set_fail(req);
Jens Axboee1e16092020-06-22 09:17:17 -06004828 io_req_complete(req, ret);
Jens Axboe5d17b4a2019-04-09 14:56:44 -06004829 return 0;
4830}
4831
YueHaibing469956e2020-03-04 15:53:52 +08004832#if defined(CONFIG_NET)
Jens Axboe3c1a3d02022-04-20 19:21:36 -06004833static bool io_net_retry(struct socket *sock, int flags)
4834{
4835 if (!(flags & MSG_WAITALL))
4836 return false;
4837 return sock->type == SOCK_STREAM || sock->type == SOCK_SEQPACKET;
4838}
4839
Pavel Begunkov02d27d82020-02-28 10:36:36 +03004840static int io_setup_async_msg(struct io_kiocb *req,
4841 struct io_async_msghdr *kmsg)
4842{
Jens Axboee8c2bc12020-08-15 18:44:09 -07004843 struct io_async_msghdr *async_msg = req->async_data;
4844
4845 if (async_msg)
Pavel Begunkov02d27d82020-02-28 10:36:36 +03004846 return -EAGAIN;
Jens Axboee8c2bc12020-08-15 18:44:09 -07004847 if (io_alloc_async_data(req)) {
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004848 kfree(kmsg->free_iov);
Pavel Begunkov02d27d82020-02-28 10:36:36 +03004849 return -ENOMEM;
4850 }
Jens Axboee8c2bc12020-08-15 18:44:09 -07004851 async_msg = req->async_data;
Pavel Begunkov02d27d82020-02-28 10:36:36 +03004852 req->flags |= REQ_F_NEED_CLEANUP;
Jens Axboee8c2bc12020-08-15 18:44:09 -07004853 memcpy(async_msg, kmsg, sizeof(*kmsg));
Pavel Begunkovf9dc33f2022-09-29 22:23:18 +01004854 if (async_msg->msg.msg_name)
4855 async_msg->msg.msg_name = &async_msg->addr;
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004856 /* if were using fast_iov, set it to the new one */
Stefan Metzmacher2e4c95a2022-09-29 09:39:10 +02004857 if (!kmsg->free_iov) {
4858 size_t fast_idx = kmsg->msg.msg_iter.iov - kmsg->fast_iov;
4859 async_msg->msg.msg_iter.iov = &async_msg->fast_iov[fast_idx];
4860 }
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004861
Pavel Begunkov02d27d82020-02-28 10:36:36 +03004862 return -EAGAIN;
4863}
4864
Pavel Begunkov2ae523e2020-07-12 20:41:06 +03004865static int io_sendmsg_copy_hdr(struct io_kiocb *req,
4866 struct io_async_msghdr *iomsg)
4867{
Jens Axboe34a7e502023-06-23 07:38:14 -06004868 struct io_sr_msg *sr = &req->sr_msg;
4869 int ret;
4870
Pavel Begunkov2ae523e2020-07-12 20:41:06 +03004871 iomsg->msg.msg_name = &iomsg->addr;
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004872 iomsg->free_iov = iomsg->fast_iov;
Jens Axboe34a7e502023-06-23 07:38:14 -06004873 ret = sendmsg_copy_msghdr(&iomsg->msg, req->sr_msg.umsg,
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004874 req->sr_msg.msg_flags, &iomsg->free_iov);
Jens Axboe34a7e502023-06-23 07:38:14 -06004875 /* save msg_control as sys_sendmsg() overwrites it */
4876 sr->msg_control = iomsg->msg.msg_control;
4877 return ret;
Pavel Begunkov2ae523e2020-07-12 20:41:06 +03004878}
4879
Pavel Begunkov93642ef2021-02-18 18:29:44 +00004880static int io_sendmsg_prep_async(struct io_kiocb *req)
4881{
4882 int ret;
4883
Pavel Begunkov93642ef2021-02-18 18:29:44 +00004884 ret = io_sendmsg_copy_hdr(req, req->async_data);
4885 if (!ret)
4886 req->flags |= REQ_F_NEED_CLEANUP;
4887 return ret;
4888}
4889
Jens Axboe3529d8c2019-12-19 18:24:38 -07004890static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboeaa1fa282019-04-19 13:38:09 -06004891{
Jens Axboee47293f2019-12-20 08:58:21 -07004892 struct io_sr_msg *sr = &req->sr_msg;
Jens Axboe03b12302019-12-02 18:50:25 -07004893
Pavel Begunkovd2b6f482020-06-03 18:03:25 +03004894 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4895 return -EINVAL;
Jens Axboe79c10cb2022-04-26 19:34:11 -06004896 if (unlikely(sqe->addr2 || sqe->file_index))
4897 return -EINVAL;
Jens Axboe50fefe52022-06-30 14:42:05 -06004898 if (unlikely(sqe->addr2 || sqe->file_index || sqe->ioprio))
4899 return -EINVAL;
Pavel Begunkovd2b6f482020-06-03 18:03:25 +03004900
Pavel Begunkov270a5942020-07-12 20:41:04 +03004901 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
Jens Axboefddafac2020-01-04 20:19:44 -07004902 sr->len = READ_ONCE(sqe->len);
Pavel Begunkov04411802021-04-01 15:44:00 +01004903 sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
4904 if (sr->msg_flags & MSG_DONTWAIT)
4905 req->flags |= REQ_F_NOWAIT;
Jens Axboe3529d8c2019-12-19 18:24:38 -07004906
Jens Axboed8768362020-02-27 14:17:49 -07004907#ifdef CONFIG_COMPAT
4908 if (req->ctx->compat)
4909 sr->msg_flags |= MSG_CMSG_COMPAT;
4910#endif
Jens Axboe3c1a3d02022-04-20 19:21:36 -06004911 sr->done_io = 0;
Pavel Begunkov93642ef2021-02-18 18:29:44 +00004912 return 0;
Jens Axboe03b12302019-12-02 18:50:25 -07004913}
4914
Pavel Begunkov889fca72021-02-10 00:03:09 +00004915static int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe03b12302019-12-02 18:50:25 -07004916{
Pavel Begunkov6b754c82020-07-16 23:28:00 +03004917 struct io_async_msghdr iomsg, *kmsg;
Jens Axboe3c1a3d02022-04-20 19:21:36 -06004918 struct io_sr_msg *sr = &req->sr_msg;
Jens Axboe03b12302019-12-02 18:50:25 -07004919 struct socket *sock;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004920 unsigned flags;
Stefan Metzmacher00312752021-03-20 20:33:36 +01004921 int min_ret = 0;
Jens Axboe03b12302019-12-02 18:50:25 -07004922 int ret;
4923
Florent Revestdba4a922020-12-04 12:36:04 +01004924 sock = sock_from_file(req->file);
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004925 if (unlikely(!sock))
Florent Revestdba4a922020-12-04 12:36:04 +01004926 return -ENOTSOCK;
Jens Axboe03b12302019-12-02 18:50:25 -07004927
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004928 kmsg = req->async_data;
4929 if (!kmsg) {
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004930 ret = io_sendmsg_copy_hdr(req, &iomsg);
Jens Axboefddafac2020-01-04 20:19:44 -07004931 if (ret)
4932 return ret;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004933 kmsg = &iomsg;
Jens Axboe34a7e502023-06-23 07:38:14 -06004934 } else {
4935 kmsg->msg.msg_control = sr->msg_control;
Jens Axboefddafac2020-01-04 20:19:44 -07004936 }
4937
Pavel Begunkov04411802021-04-01 15:44:00 +01004938 flags = req->sr_msg.msg_flags;
4939 if (issue_flags & IO_URING_F_NONBLOCK)
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004940 flags |= MSG_DONTWAIT;
Stefan Metzmacher00312752021-03-20 20:33:36 +01004941 if (flags & MSG_WAITALL)
4942 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
4943
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004944 ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004945
Pavel Begunkovcdc68e72021-11-23 00:07:47 +00004946 if (ret < min_ret) {
4947 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
4948 return io_setup_async_msg(req, kmsg);
4949 if (ret == -ERESTARTSYS)
4950 ret = -EINTR;
Jens Axboe3c1a3d02022-04-20 19:21:36 -06004951 if (ret > 0 && io_net_retry(sock, flags)) {
4952 sr->done_io += ret;
4953 req->flags |= REQ_F_PARTIAL_IO;
4954 return io_setup_async_msg(req, kmsg);
4955 }
Pavel Begunkovcdc68e72021-11-23 00:07:47 +00004956 req_set_fail(req);
4957 }
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004958 /* fast path, check for non-NULL to avoid function call */
4959 if (kmsg->free_iov)
4960 kfree(kmsg->free_iov);
Jens Axboe03b12302019-12-02 18:50:25 -07004961 req->flags &= ~REQ_F_NEED_CLEANUP;
Jens Axboe3c1a3d02022-04-20 19:21:36 -06004962 if (ret >= 0)
4963 ret += sr->done_io;
4964 else if (sr->done_io)
4965 ret = sr->done_io;
Pavel Begunkov889fca72021-02-10 00:03:09 +00004966 __io_req_complete(req, issue_flags, ret, 0);
Jens Axboefddafac2020-01-04 20:19:44 -07004967 return 0;
Jens Axboefddafac2020-01-04 20:19:44 -07004968}
4969
Pavel Begunkov889fca72021-02-10 00:03:09 +00004970static int io_send(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe03b12302019-12-02 18:50:25 -07004971{
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004972 struct io_sr_msg *sr = &req->sr_msg;
4973 struct msghdr msg;
4974 struct iovec iov;
Jens Axboe03b12302019-12-02 18:50:25 -07004975 struct socket *sock;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004976 unsigned flags;
Stefan Metzmacher00312752021-03-20 20:33:36 +01004977 int min_ret = 0;
Jens Axboe03b12302019-12-02 18:50:25 -07004978 int ret;
4979
Florent Revestdba4a922020-12-04 12:36:04 +01004980 sock = sock_from_file(req->file);
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004981 if (unlikely(!sock))
Florent Revestdba4a922020-12-04 12:36:04 +01004982 return -ENOTSOCK;
Jens Axboe03b12302019-12-02 18:50:25 -07004983
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004984 ret = import_single_range(WRITE, sr->buf, sr->len, &iov, &msg.msg_iter);
4985 if (unlikely(ret))
Zheng Bin14db8412020-09-09 20:12:37 +08004986 return ret;
Jens Axboe03b12302019-12-02 18:50:25 -07004987
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004988 msg.msg_name = NULL;
4989 msg.msg_control = NULL;
4990 msg.msg_controllen = 0;
4991 msg.msg_namelen = 0;
Jens Axboe03b12302019-12-02 18:50:25 -07004992
Pavel Begunkov04411802021-04-01 15:44:00 +01004993 flags = req->sr_msg.msg_flags;
4994 if (issue_flags & IO_URING_F_NONBLOCK)
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004995 flags |= MSG_DONTWAIT;
Stefan Metzmacher00312752021-03-20 20:33:36 +01004996 if (flags & MSG_WAITALL)
4997 min_ret = iov_iter_count(&msg.msg_iter);
4998
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004999 msg.msg_flags = flags;
5000 ret = sock_sendmsg(sock, &msg);
Pavel Begunkovcdc68e72021-11-23 00:07:47 +00005001 if (ret < min_ret) {
5002 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
5003 return -EAGAIN;
5004 if (ret == -ERESTARTSYS)
5005 ret = -EINTR;
Jens Axboe3c1a3d02022-04-20 19:21:36 -06005006 if (ret > 0 && io_net_retry(sock, flags)) {
5007 sr->len -= ret;
5008 sr->buf += ret;
5009 sr->done_io += ret;
5010 req->flags |= REQ_F_PARTIAL_IO;
5011 return -EAGAIN;
5012 }
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01005013 req_set_fail(req);
Pavel Begunkovcdc68e72021-11-23 00:07:47 +00005014 }
Jens Axboe3c1a3d02022-04-20 19:21:36 -06005015 if (ret >= 0)
5016 ret += sr->done_io;
5017 else if (sr->done_io)
5018 ret = sr->done_io;
Pavel Begunkov889fca72021-02-10 00:03:09 +00005019 __io_req_complete(req, issue_flags, ret, 0);
Jens Axboe03b12302019-12-02 18:50:25 -07005020 return 0;
Jens Axboe03b12302019-12-02 18:50:25 -07005021}
5022
Pavel Begunkov1400e692020-07-12 20:41:05 +03005023static int __io_recvmsg_copy_hdr(struct io_kiocb *req,
5024 struct io_async_msghdr *iomsg)
Jens Axboe52de1fe2020-02-27 10:15:42 -07005025{
5026 struct io_sr_msg *sr = &req->sr_msg;
5027 struct iovec __user *uiov;
5028 size_t iov_len;
5029 int ret;
5030
Pavel Begunkov1400e692020-07-12 20:41:05 +03005031 ret = __copy_msghdr_from_user(&iomsg->msg, sr->umsg,
5032 &iomsg->uaddr, &uiov, &iov_len);
Jens Axboe52de1fe2020-02-27 10:15:42 -07005033 if (ret)
5034 return ret;
5035
5036 if (req->flags & REQ_F_BUFFER_SELECT) {
5037 if (iov_len > 1)
5038 return -EINVAL;
Pavel Begunkov5476dfe2021-02-05 00:57:59 +00005039 if (copy_from_user(iomsg->fast_iov, uiov, sizeof(*uiov)))
Jens Axboe52de1fe2020-02-27 10:15:42 -07005040 return -EFAULT;
Pavel Begunkov5476dfe2021-02-05 00:57:59 +00005041 sr->len = iomsg->fast_iov[0].iov_len;
Pavel Begunkov257e84a2021-02-05 00:58:00 +00005042 iomsg->free_iov = NULL;
Jens Axboe52de1fe2020-02-27 10:15:42 -07005043 } else {
Pavel Begunkov257e84a2021-02-05 00:58:00 +00005044 iomsg->free_iov = iomsg->fast_iov;
Christoph Hellwig89cd35c2020-09-25 06:51:41 +02005045 ret = __import_iovec(READ, uiov, iov_len, UIO_FASTIOV,
Pavel Begunkov257e84a2021-02-05 00:58:00 +00005046 &iomsg->free_iov, &iomsg->msg.msg_iter,
Christoph Hellwig89cd35c2020-09-25 06:51:41 +02005047 false);
Jens Axboe52de1fe2020-02-27 10:15:42 -07005048 if (ret > 0)
5049 ret = 0;
5050 }
5051
5052 return ret;
5053}
5054
5055#ifdef CONFIG_COMPAT
5056static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req,
Pavel Begunkov1400e692020-07-12 20:41:05 +03005057 struct io_async_msghdr *iomsg)
Jens Axboe52de1fe2020-02-27 10:15:42 -07005058{
Jens Axboe52de1fe2020-02-27 10:15:42 -07005059 struct io_sr_msg *sr = &req->sr_msg;
5060 struct compat_iovec __user *uiov;
5061 compat_uptr_t ptr;
5062 compat_size_t len;
5063 int ret;
5064
Pavel Begunkov4af34172021-04-11 01:46:30 +01005065 ret = __get_compat_msghdr(&iomsg->msg, sr->umsg_compat, &iomsg->uaddr,
5066 &ptr, &len);
Jens Axboe52de1fe2020-02-27 10:15:42 -07005067 if (ret)
5068 return ret;
5069
5070 uiov = compat_ptr(ptr);
5071 if (req->flags & REQ_F_BUFFER_SELECT) {
5072 compat_ssize_t clen;
5073
5074 if (len > 1)
5075 return -EINVAL;
5076 if (!access_ok(uiov, sizeof(*uiov)))
5077 return -EFAULT;
5078 if (__get_user(clen, &uiov->iov_len))
5079 return -EFAULT;
5080 if (clen < 0)
5081 return -EINVAL;
Pavel Begunkov2d280bc2020-11-29 18:33:32 +00005082 sr->len = clen;
Pavel Begunkov257e84a2021-02-05 00:58:00 +00005083 iomsg->free_iov = NULL;
Jens Axboe52de1fe2020-02-27 10:15:42 -07005084 } else {
Pavel Begunkov257e84a2021-02-05 00:58:00 +00005085 iomsg->free_iov = iomsg->fast_iov;
Christoph Hellwig89cd35c2020-09-25 06:51:41 +02005086 ret = __import_iovec(READ, (struct iovec __user *)uiov, len,
Pavel Begunkov257e84a2021-02-05 00:58:00 +00005087 UIO_FASTIOV, &iomsg->free_iov,
Christoph Hellwig89cd35c2020-09-25 06:51:41 +02005088 &iomsg->msg.msg_iter, true);
Jens Axboe52de1fe2020-02-27 10:15:42 -07005089 if (ret < 0)
5090 return ret;
5091 }
5092
5093 return 0;
5094}
Jens Axboe03b12302019-12-02 18:50:25 -07005095#endif
Jens Axboe52de1fe2020-02-27 10:15:42 -07005096
Pavel Begunkov1400e692020-07-12 20:41:05 +03005097static int io_recvmsg_copy_hdr(struct io_kiocb *req,
5098 struct io_async_msghdr *iomsg)
Jens Axboe52de1fe2020-02-27 10:15:42 -07005099{
Pavel Begunkov1400e692020-07-12 20:41:05 +03005100 iomsg->msg.msg_name = &iomsg->addr;
Jens Axboe52de1fe2020-02-27 10:15:42 -07005101
5102#ifdef CONFIG_COMPAT
5103 if (req->ctx->compat)
Pavel Begunkov1400e692020-07-12 20:41:05 +03005104 return __io_compat_recvmsg_copy_hdr(req, iomsg);
Jens Axboe52de1fe2020-02-27 10:15:42 -07005105#endif
5106
Pavel Begunkov1400e692020-07-12 20:41:05 +03005107 return __io_recvmsg_copy_hdr(req, iomsg);
Jens Axboe52de1fe2020-02-27 10:15:42 -07005108}
5109
Jens Axboebcda7ba2020-02-23 16:42:51 -07005110static struct io_buffer *io_recv_buffer_select(struct io_kiocb *req,
Pavel Begunkov7fbb1b52020-07-16 23:28:05 +03005111 bool needs_lock)
Jens Axboebcda7ba2020-02-23 16:42:51 -07005112{
5113 struct io_sr_msg *sr = &req->sr_msg;
5114 struct io_buffer *kbuf;
5115
Jens Axboebcda7ba2020-02-23 16:42:51 -07005116 kbuf = io_buffer_select(req, &sr->len, sr->bgid, sr->kbuf, needs_lock);
5117 if (IS_ERR(kbuf))
5118 return kbuf;
5119
5120 sr->kbuf = kbuf;
5121 req->flags |= REQ_F_BUFFER_SELECTED;
Jens Axboebcda7ba2020-02-23 16:42:51 -07005122 return kbuf;
Jens Axboe03b12302019-12-02 18:50:25 -07005123}
5124
Pavel Begunkov7fbb1b52020-07-16 23:28:05 +03005125static inline unsigned int io_put_recv_kbuf(struct io_kiocb *req)
5126{
5127 return io_put_kbuf(req, req->sr_msg.kbuf);
5128}
5129
Pavel Begunkov93642ef2021-02-18 18:29:44 +00005130static int io_recvmsg_prep_async(struct io_kiocb *req)
Jens Axboe03b12302019-12-02 18:50:25 -07005131{
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03005132 int ret;
Jens Axboe06b76d42019-12-19 14:44:26 -07005133
Pavel Begunkov93642ef2021-02-18 18:29:44 +00005134 ret = io_recvmsg_copy_hdr(req, req->async_data);
5135 if (!ret)
5136 req->flags |= REQ_F_NEED_CLEANUP;
5137 return ret;
5138}
5139
5140static int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
5141{
5142 struct io_sr_msg *sr = &req->sr_msg;
5143
Pavel Begunkovd2b6f482020-06-03 18:03:25 +03005144 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
5145 return -EINVAL;
Jens Axboe37811e42022-04-26 19:34:57 -06005146 if (unlikely(sqe->addr2 || sqe->file_index))
5147 return -EINVAL;
Jens Axboe50fefe52022-06-30 14:42:05 -06005148 if (unlikely(sqe->addr2 || sqe->file_index || sqe->ioprio))
5149 return -EINVAL;
Pavel Begunkovd2b6f482020-06-03 18:03:25 +03005150
Pavel Begunkov270a5942020-07-12 20:41:04 +03005151 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
Jens Axboe0b7b21e2020-01-31 08:34:59 -07005152 sr->len = READ_ONCE(sqe->len);
Jens Axboebcda7ba2020-02-23 16:42:51 -07005153 sr->bgid = READ_ONCE(sqe->buf_group);
David Lamparter7e8cd202023-03-06 13:23:06 -07005154 sr->msg_flags = READ_ONCE(sqe->msg_flags);
Pavel Begunkov04411802021-04-01 15:44:00 +01005155 if (sr->msg_flags & MSG_DONTWAIT)
5156 req->flags |= REQ_F_NOWAIT;
Jens Axboe3529d8c2019-12-19 18:24:38 -07005157
Jens Axboed8768362020-02-27 14:17:49 -07005158#ifdef CONFIG_COMPAT
5159 if (req->ctx->compat)
5160 sr->msg_flags |= MSG_CMSG_COMPAT;
5161#endif
Jens Axboe9b7b0f22023-01-21 10:21:22 -07005162 sr->done_io = 0;
Pavel Begunkov93642ef2021-02-18 18:29:44 +00005163 return 0;
Jens Axboe03b12302019-12-02 18:50:25 -07005164}
5165
Pavel Begunkov889fca72021-02-10 00:03:09 +00005166static int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe03b12302019-12-02 18:50:25 -07005167{
Pavel Begunkov6b754c82020-07-16 23:28:00 +03005168 struct io_async_msghdr iomsg, *kmsg;
Jens Axboe9b7b0f22023-01-21 10:21:22 -07005169 struct io_sr_msg *sr = &req->sr_msg;
Jens Axboe0fa03c62019-04-19 13:34:07 -06005170 struct socket *sock;
Pavel Begunkov7fbb1b52020-07-16 23:28:05 +03005171 struct io_buffer *kbuf;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03005172 unsigned flags;
Stefan Metzmacher00312752021-03-20 20:33:36 +01005173 int min_ret = 0;
Jens Axboe52de1fe2020-02-27 10:15:42 -07005174 int ret, cflags = 0;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00005175 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Jens Axboe0fa03c62019-04-19 13:34:07 -06005176
Florent Revestdba4a922020-12-04 12:36:04 +01005177 sock = sock_from_file(req->file);
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03005178 if (unlikely(!sock))
Florent Revestdba4a922020-12-04 12:36:04 +01005179 return -ENOTSOCK;
Jens Axboe0fa03c62019-04-19 13:34:07 -06005180
Pavel Begunkov257e84a2021-02-05 00:58:00 +00005181 kmsg = req->async_data;
5182 if (!kmsg) {
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03005183 ret = io_recvmsg_copy_hdr(req, &iomsg);
5184 if (ret)
Pavel Begunkov681fda82020-07-15 22:20:45 +03005185 return ret;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03005186 kmsg = &iomsg;
Jens Axboe0fa03c62019-04-19 13:34:07 -06005187 }
5188
Pavel Begunkovbc02ef32020-07-16 23:28:03 +03005189 if (req->flags & REQ_F_BUFFER_SELECT) {
Pavel Begunkov7fbb1b52020-07-16 23:28:05 +03005190 kbuf = io_recv_buffer_select(req, !force_nonblock);
Pavel Begunkovbc02ef32020-07-16 23:28:03 +03005191 if (IS_ERR(kbuf))
5192 return PTR_ERR(kbuf);
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03005193 kmsg->fast_iov[0].iov_base = u64_to_user_ptr(kbuf->addr);
Pavel Begunkov5476dfe2021-02-05 00:57:59 +00005194 kmsg->fast_iov[0].iov_len = req->sr_msg.len;
5195 iov_iter_init(&kmsg->msg.msg_iter, READ, kmsg->fast_iov,
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03005196 1, req->sr_msg.len);
5197 }
5198
Pavel Begunkov04411802021-04-01 15:44:00 +01005199 flags = req->sr_msg.msg_flags;
5200 if (force_nonblock)
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03005201 flags |= MSG_DONTWAIT;
Stefan Metzmacher00312752021-03-20 20:33:36 +01005202 if (flags & MSG_WAITALL)
5203 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
5204
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03005205 ret = __sys_recvmsg_sock(sock, &kmsg->msg, req->sr_msg.umsg,
5206 kmsg->uaddr, flags);
Pavel Begunkovcdc68e72021-11-23 00:07:47 +00005207 if (ret < min_ret) {
5208 if (ret == -EAGAIN && force_nonblock)
5209 return io_setup_async_msg(req, kmsg);
5210 if (ret == -ERESTARTSYS)
5211 ret = -EINTR;
Jens Axboe9b7b0f22023-01-21 10:21:22 -07005212 if (ret > 0 && io_net_retry(sock, flags)) {
5213 sr->done_io += ret;
Jens Axboe390b8812022-03-23 09:30:05 -06005214 req->flags |= REQ_F_PARTIAL_IO;
Jens Axboe9b7b0f22023-01-21 10:21:22 -07005215 return io_setup_async_msg(req, kmsg);
5216 }
Pavel Begunkovcdc68e72021-11-23 00:07:47 +00005217 req_set_fail(req);
5218 } else if ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
5219 req_set_fail(req);
5220 }
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03005221
Pavel Begunkov7fbb1b52020-07-16 23:28:05 +03005222 if (req->flags & REQ_F_BUFFER_SELECTED)
5223 cflags = io_put_recv_kbuf(req);
Pavel Begunkov257e84a2021-02-05 00:58:00 +00005224 /* fast path, check for non-NULL to avoid function call */
5225 if (kmsg->free_iov)
5226 kfree(kmsg->free_iov);
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03005227 req->flags &= ~REQ_F_NEED_CLEANUP;
Jens Axboe9b7b0f22023-01-21 10:21:22 -07005228 if (ret >= 0)
5229 ret += sr->done_io;
5230 else if (sr->done_io)
5231 ret = sr->done_io;
Pavel Begunkov889fca72021-02-10 00:03:09 +00005232 __io_req_complete(req, issue_flags, ret, cflags);
Jens Axboe0fa03c62019-04-19 13:34:07 -06005233 return 0;
Jens Axboe0fa03c62019-04-19 13:34:07 -06005234}
5235
Pavel Begunkov889fca72021-02-10 00:03:09 +00005236static int io_recv(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboefddafac2020-01-04 20:19:44 -07005237{
Pavel Begunkov6b754c82020-07-16 23:28:00 +03005238 struct io_buffer *kbuf;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03005239 struct io_sr_msg *sr = &req->sr_msg;
5240 struct msghdr msg;
5241 void __user *buf = sr->buf;
Jens Axboefddafac2020-01-04 20:19:44 -07005242 struct socket *sock;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03005243 struct iovec iov;
5244 unsigned flags;
Stefan Metzmacher00312752021-03-20 20:33:36 +01005245 int min_ret = 0;
Jens Axboebcda7ba2020-02-23 16:42:51 -07005246 int ret, cflags = 0;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00005247 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Jens Axboefddafac2020-01-04 20:19:44 -07005248
Florent Revestdba4a922020-12-04 12:36:04 +01005249 sock = sock_from_file(req->file);
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03005250 if (unlikely(!sock))
Florent Revestdba4a922020-12-04 12:36:04 +01005251 return -ENOTSOCK;
Jens Axboefddafac2020-01-04 20:19:44 -07005252
Pavel Begunkovbc02ef32020-07-16 23:28:03 +03005253 if (req->flags & REQ_F_BUFFER_SELECT) {
Pavel Begunkov7fbb1b52020-07-16 23:28:05 +03005254 kbuf = io_recv_buffer_select(req, !force_nonblock);
Jens Axboebcda7ba2020-02-23 16:42:51 -07005255 if (IS_ERR(kbuf))
5256 return PTR_ERR(kbuf);
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03005257 buf = u64_to_user_ptr(kbuf->addr);
Jens Axboefddafac2020-01-04 20:19:44 -07005258 }
5259
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03005260 ret = import_single_range(READ, buf, sr->len, &iov, &msg.msg_iter);
Pavel Begunkov14c32ee2020-07-16 23:28:01 +03005261 if (unlikely(ret))
5262 goto out_free;
Jens Axboefddafac2020-01-04 20:19:44 -07005263
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03005264 msg.msg_name = NULL;
5265 msg.msg_control = NULL;
5266 msg.msg_controllen = 0;
5267 msg.msg_namelen = 0;
5268 msg.msg_iocb = NULL;
5269 msg.msg_flags = 0;
5270
Pavel Begunkov04411802021-04-01 15:44:00 +01005271 flags = req->sr_msg.msg_flags;
5272 if (force_nonblock)
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03005273 flags |= MSG_DONTWAIT;
Stefan Metzmacher00312752021-03-20 20:33:36 +01005274 if (flags & MSG_WAITALL)
5275 min_ret = iov_iter_count(&msg.msg_iter);
5276
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03005277 ret = sock_recvmsg(sock, &msg, flags);
Pavel Begunkovcdc68e72021-11-23 00:07:47 +00005278 if (ret < min_ret) {
5279 if (ret == -EAGAIN && force_nonblock)
5280 return -EAGAIN;
5281 if (ret == -ERESTARTSYS)
5282 ret = -EINTR;
Jens Axboe9b7b0f22023-01-21 10:21:22 -07005283 if (ret > 0 && io_net_retry(sock, flags)) {
5284 sr->len -= ret;
5285 sr->buf += ret;
5286 sr->done_io += ret;
Jens Axboe390b8812022-03-23 09:30:05 -06005287 req->flags |= REQ_F_PARTIAL_IO;
Jens Axboe9b7b0f22023-01-21 10:21:22 -07005288 return -EAGAIN;
5289 }
Pavel Begunkovcdc68e72021-11-23 00:07:47 +00005290 req_set_fail(req);
5291 } else if ((flags & MSG_WAITALL) && (msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
Alviro Iskandar Setiawane944f1e2022-02-07 21:05:33 +07005292out_free:
Pavel Begunkovcdc68e72021-11-23 00:07:47 +00005293 req_set_fail(req);
5294 }
Pavel Begunkov7fbb1b52020-07-16 23:28:05 +03005295 if (req->flags & REQ_F_BUFFER_SELECTED)
5296 cflags = io_put_recv_kbuf(req);
Jens Axboe9b7b0f22023-01-21 10:21:22 -07005297 if (ret >= 0)
5298 ret += sr->done_io;
5299 else if (sr->done_io)
5300 ret = sr->done_io;
Pavel Begunkov889fca72021-02-10 00:03:09 +00005301 __io_req_complete(req, issue_flags, ret, cflags);
Jens Axboefddafac2020-01-04 20:19:44 -07005302 return 0;
Jens Axboefddafac2020-01-04 20:19:44 -07005303}
5304
Jens Axboe3529d8c2019-12-19 18:24:38 -07005305static int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboe17f2fe32019-10-17 14:42:58 -06005306{
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07005307 struct io_accept *accept = &req->accept;
5308
Jens Axboe14587a462020-09-05 11:36:08 -06005309 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboe17f2fe32019-10-17 14:42:58 -06005310 return -EINVAL;
Pavel Begunkovaaa4db12021-08-25 12:25:47 +01005311 if (sqe->ioprio || sqe->len || sqe->buf_index)
Jens Axboe17f2fe32019-10-17 14:42:58 -06005312 return -EINVAL;
5313
Jens Axboed55e5f52019-12-11 16:12:15 -07005314 accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
5315 accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2));
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07005316 accept->flags = READ_ONCE(sqe->accept_flags);
Jens Axboe09952e32020-03-19 20:16:56 -06005317 accept->nofile = rlimit(RLIMIT_NOFILE);
Pavel Begunkova7083ad2021-08-25 12:25:46 +01005318
Pavel Begunkovaaa4db12021-08-25 12:25:47 +01005319 accept->file_slot = READ_ONCE(sqe->file_index);
Jens Axboe13239762022-03-14 17:26:19 -06005320 if (accept->file_slot && (accept->flags & SOCK_CLOEXEC))
Pavel Begunkovaaa4db12021-08-25 12:25:47 +01005321 return -EINVAL;
Pavel Begunkova7083ad2021-08-25 12:25:46 +01005322 if (accept->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
5323 return -EINVAL;
5324 if (SOCK_NONBLOCK != O_NONBLOCK && (accept->flags & SOCK_NONBLOCK))
5325 accept->flags = (accept->flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07005326 return 0;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07005327}
Jens Axboe17f2fe32019-10-17 14:42:58 -06005328
Pavel Begunkov889fca72021-02-10 00:03:09 +00005329static int io_accept(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07005330{
5331 struct io_accept *accept = &req->accept;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00005332 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Pavel Begunkovac45abc2020-06-08 21:08:18 +03005333 unsigned int file_flags = force_nonblock ? O_NONBLOCK : 0;
Pavel Begunkovaaa4db12021-08-25 12:25:47 +01005334 bool fixed = !!accept->file_slot;
Pavel Begunkova7083ad2021-08-25 12:25:46 +01005335 struct file *file;
5336 int ret, fd;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07005337
Pavel Begunkovaaa4db12021-08-25 12:25:47 +01005338 if (!fixed) {
5339 fd = __get_unused_fd_flags(accept->flags, accept->nofile);
5340 if (unlikely(fd < 0))
5341 return fd;
5342 }
Pavel Begunkova7083ad2021-08-25 12:25:46 +01005343 file = do_accept(req->file, file_flags, accept->addr, accept->addr_len,
5344 accept->flags);
5345 if (IS_ERR(file)) {
Pavel Begunkovaaa4db12021-08-25 12:25:47 +01005346 if (!fixed)
5347 put_unused_fd(fd);
Pavel Begunkova7083ad2021-08-25 12:25:46 +01005348 ret = PTR_ERR(file);
Dylan Yudaken30b90682023-01-21 09:13:12 -07005349 /* safe to retry */
5350 req->flags |= REQ_F_PARTIAL_IO;
Pavel Begunkova7083ad2021-08-25 12:25:46 +01005351 if (ret == -EAGAIN && force_nonblock)
5352 return -EAGAIN;
Pavel Begunkovac45abc2020-06-08 21:08:18 +03005353 if (ret == -ERESTARTSYS)
5354 ret = -EINTR;
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01005355 req_set_fail(req);
Pavel Begunkovaaa4db12021-08-25 12:25:47 +01005356 } else if (!fixed) {
Pavel Begunkova7083ad2021-08-25 12:25:46 +01005357 fd_install(fd, file);
5358 ret = fd;
Pavel Begunkovaaa4db12021-08-25 12:25:47 +01005359 } else {
5360 ret = io_install_fixed_file(req, file, issue_flags,
5361 accept->file_slot - 1);
Pavel Begunkovac45abc2020-06-08 21:08:18 +03005362 }
Pavel Begunkov889fca72021-02-10 00:03:09 +00005363 __io_req_complete(req, issue_flags, ret, 0);
Jens Axboe17f2fe32019-10-17 14:42:58 -06005364 return 0;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07005365}
5366
Pavel Begunkov93642ef2021-02-18 18:29:44 +00005367static int io_connect_prep_async(struct io_kiocb *req)
5368{
5369 struct io_async_connect *io = req->async_data;
5370 struct io_connect *conn = &req->connect;
5371
5372 return move_addr_to_kernel(conn->addr, conn->addr_len, &io->address);
5373}
5374
Jens Axboe3529d8c2019-12-19 18:24:38 -07005375static int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboef499a022019-12-02 16:28:46 -07005376{
Jens Axboe3529d8c2019-12-19 18:24:38 -07005377 struct io_connect *conn = &req->connect;
Jens Axboef499a022019-12-02 16:28:46 -07005378
Jens Axboe14587a462020-09-05 11:36:08 -06005379 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboe3fbb51c2019-12-20 08:51:52 -07005380 return -EINVAL;
Pavel Begunkov26578cd2021-08-20 10:36:37 +01005381 if (sqe->ioprio || sqe->len || sqe->buf_index || sqe->rw_flags ||
5382 sqe->splice_fd_in)
Jens Axboe3fbb51c2019-12-20 08:51:52 -07005383 return -EINVAL;
5384
Jens Axboe3529d8c2019-12-19 18:24:38 -07005385 conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
5386 conn->addr_len = READ_ONCE(sqe->addr2);
Pavel Begunkov93642ef2021-02-18 18:29:44 +00005387 return 0;
Jens Axboef499a022019-12-02 16:28:46 -07005388}
5389
Pavel Begunkov889fca72021-02-10 00:03:09 +00005390static int io_connect(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboef8e85cf2019-11-23 14:24:24 -07005391{
Jens Axboee8c2bc12020-08-15 18:44:09 -07005392 struct io_async_connect __io, *io;
Jens Axboef8e85cf2019-11-23 14:24:24 -07005393 unsigned file_flags;
Jens Axboe3fbb51c2019-12-20 08:51:52 -07005394 int ret;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00005395 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Jens Axboef8e85cf2019-11-23 14:24:24 -07005396
Jens Axboee8c2bc12020-08-15 18:44:09 -07005397 if (req->async_data) {
5398 io = req->async_data;
Jens Axboef499a022019-12-02 16:28:46 -07005399 } else {
Jens Axboe3529d8c2019-12-19 18:24:38 -07005400 ret = move_addr_to_kernel(req->connect.addr,
5401 req->connect.addr_len,
Jens Axboee8c2bc12020-08-15 18:44:09 -07005402 &__io.address);
Jens Axboef499a022019-12-02 16:28:46 -07005403 if (ret)
5404 goto out;
5405 io = &__io;
5406 }
5407
Jens Axboe3fbb51c2019-12-20 08:51:52 -07005408 file_flags = force_nonblock ? O_NONBLOCK : 0;
5409
Jens Axboee8c2bc12020-08-15 18:44:09 -07005410 ret = __sys_connect_file(req->file, &io->address,
Jens Axboe3fbb51c2019-12-20 08:51:52 -07005411 req->connect.addr_len, file_flags);
Jens Axboe87f80d62019-12-03 11:23:54 -07005412 if ((ret == -EAGAIN || ret == -EINPROGRESS) && force_nonblock) {
Jens Axboee8c2bc12020-08-15 18:44:09 -07005413 if (req->async_data)
Jens Axboeb7bb4f72019-12-15 22:13:43 -07005414 return -EAGAIN;
Jens Axboee8c2bc12020-08-15 18:44:09 -07005415 if (io_alloc_async_data(req)) {
Jens Axboef499a022019-12-02 16:28:46 -07005416 ret = -ENOMEM;
5417 goto out;
5418 }
Jens Axboee8c2bc12020-08-15 18:44:09 -07005419 memcpy(req->async_data, &__io, sizeof(__io));
Jens Axboef8e85cf2019-11-23 14:24:24 -07005420 return -EAGAIN;
Jens Axboef499a022019-12-02 16:28:46 -07005421 }
Jens Axboef8e85cf2019-11-23 14:24:24 -07005422 if (ret == -ERESTARTSYS)
5423 ret = -EINTR;
Jens Axboef499a022019-12-02 16:28:46 -07005424out:
Jens Axboe4e88d6e2019-12-07 20:59:47 -07005425 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01005426 req_set_fail(req);
Pavel Begunkov889fca72021-02-10 00:03:09 +00005427 __io_req_complete(req, issue_flags, ret, 0);
Jens Axboef8e85cf2019-11-23 14:24:24 -07005428 return 0;
Jens Axboef8e85cf2019-11-23 14:24:24 -07005429}
YueHaibing469956e2020-03-04 15:53:52 +08005430#else /* !CONFIG_NET */
Jens Axboe99a10082021-02-19 09:35:19 -07005431#define IO_NETOP_FN(op) \
5432static int io_##op(struct io_kiocb *req, unsigned int issue_flags) \
5433{ \
5434 return -EOPNOTSUPP; \
Jens Axboef8e85cf2019-11-23 14:24:24 -07005435}
5436
Jens Axboe99a10082021-02-19 09:35:19 -07005437#define IO_NETOP_PREP(op) \
5438IO_NETOP_FN(op) \
5439static int io_##op##_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) \
5440{ \
5441 return -EOPNOTSUPP; \
5442} \
5443
5444#define IO_NETOP_PREP_ASYNC(op) \
5445IO_NETOP_PREP(op) \
5446static int io_##op##_prep_async(struct io_kiocb *req) \
5447{ \
5448 return -EOPNOTSUPP; \
YueHaibing469956e2020-03-04 15:53:52 +08005449}
5450
Jens Axboe99a10082021-02-19 09:35:19 -07005451IO_NETOP_PREP_ASYNC(sendmsg);
5452IO_NETOP_PREP_ASYNC(recvmsg);
5453IO_NETOP_PREP_ASYNC(connect);
5454IO_NETOP_PREP(accept);
5455IO_NETOP_FN(send);
5456IO_NETOP_FN(recv);
YueHaibing469956e2020-03-04 15:53:52 +08005457#endif /* CONFIG_NET */
Jens Axboe17f2fe32019-10-17 14:42:58 -06005458
Jens Axboed7718a92020-02-14 22:23:12 -07005459struct io_poll_table {
5460 struct poll_table_struct pt;
5461 struct io_kiocb *req;
Pavel Begunkov68b11e82021-07-20 10:50:43 +01005462 int nr_entries;
Jens Axboed7718a92020-02-14 22:23:12 -07005463 int error;
5464};
5465
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005466#define IO_POLL_CANCEL_FLAG BIT(31)
Pavel Begunkov4b702b72022-12-02 14:27:14 +00005467#define IO_POLL_RETRY_FLAG BIT(30)
5468#define IO_POLL_REF_MASK GENMASK(29, 0)
5469
5470/*
5471 * We usually have 1-2 refs taken, 128 is more than enough and we want to
5472 * maximise the margin between this amount and the moment when it overflows.
5473 */
5474#define IO_POLL_REF_BIAS 128
5475
5476static bool io_poll_get_ownership_slowpath(struct io_kiocb *req)
5477{
5478 int v;
5479
5480 /*
5481 * poll_refs are already elevated and we don't have much hope for
5482 * grabbing the ownership. Instead of incrementing set a retry flag
5483 * to notify the loop that there might have been some change.
5484 */
5485 v = atomic_fetch_or(IO_POLL_RETRY_FLAG, &req->poll_refs);
5486 if (v & IO_POLL_REF_MASK)
5487 return false;
5488 return !(atomic_fetch_inc(&req->poll_refs) & IO_POLL_REF_MASK);
5489}
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005490
5491/*
5492 * If refs part of ->poll_refs (see IO_POLL_REF_MASK) is 0, it's free. We can
5493 * bump it and acquire ownership. It's disallowed to modify requests while not
5494 * owning it, that prevents from races for enqueueing task_work's and b/w
5495 * arming poll and wakeups.
5496 */
5497static inline bool io_poll_get_ownership(struct io_kiocb *req)
5498{
Pavel Begunkov4b702b72022-12-02 14:27:14 +00005499 if (unlikely(atomic_read(&req->poll_refs) >= IO_POLL_REF_BIAS))
5500 return io_poll_get_ownership_slowpath(req);
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005501 return !(atomic_fetch_inc(&req->poll_refs) & IO_POLL_REF_MASK);
5502}
5503
5504static void io_poll_mark_cancelled(struct io_kiocb *req)
5505{
5506 atomic_or(IO_POLL_CANCEL_FLAG, &req->poll_refs);
5507}
5508
Pavel Begunkova85d7ac2022-08-29 14:30:15 +01005509static struct io_poll_iocb *io_poll_get_double(struct io_kiocb *req)
5510{
5511 /* pure poll stashes this in ->async_data, poll driven retry elsewhere */
5512 if (req->opcode == IORING_OP_POLL_ADD)
5513 return req->async_data;
5514 return req->apoll->double_poll;
5515}
5516
5517static struct io_poll_iocb *io_poll_get_single(struct io_kiocb *req)
5518{
5519 if (req->opcode == IORING_OP_POLL_ADD)
5520 return &req->poll;
5521 return &req->apoll->poll;
5522}
5523
5524static void io_poll_req_insert(struct io_kiocb *req)
5525{
5526 struct io_ring_ctx *ctx = req->ctx;
5527 struct hlist_head *list;
5528
5529 list = &ctx->cancel_hash[hash_long(req->user_data, ctx->cancel_hash_bits)];
5530 hlist_add_head(&req->hash_node, list);
5531}
5532
5533static void io_init_poll_iocb(struct io_poll_iocb *poll, __poll_t events,
5534 wait_queue_func_t wake_func)
5535{
5536 poll->head = NULL;
Pavel Begunkova85d7ac2022-08-29 14:30:15 +01005537#define IO_POLL_UNMASK (EPOLLERR|EPOLLHUP|EPOLLNVAL|EPOLLRDHUP)
5538 /* mask in events that we always want/need */
5539 poll->events = events | IO_POLL_UNMASK;
5540 INIT_LIST_HEAD(&poll->wait.entry);
5541 init_waitqueue_func_entry(&poll->wait, wake_func);
5542}
5543
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005544static inline void io_poll_remove_entry(struct io_poll_iocb *poll)
Jens Axboed7718a92020-02-14 22:23:12 -07005545{
Pavel Begunkove9d7ca02022-08-29 14:30:24 +01005546 struct wait_queue_head *head = smp_load_acquire(&poll->head);
Jens Axboed7718a92020-02-14 22:23:12 -07005547
Pavel Begunkove9d7ca02022-08-29 14:30:24 +01005548 if (head) {
5549 spin_lock_irq(&head->lock);
5550 list_del_init(&poll->wait.entry);
5551 poll->head = NULL;
5552 spin_unlock_irq(&head->lock);
5553 }
Jens Axboed7718a92020-02-14 22:23:12 -07005554}
5555
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005556static void io_poll_remove_entries(struct io_kiocb *req)
5557{
5558 struct io_poll_iocb *poll = io_poll_get_single(req);
5559 struct io_poll_iocb *poll_double = io_poll_get_double(req);
5560
Pavel Begunkove9d7ca02022-08-29 14:30:24 +01005561 /*
5562 * While we hold the waitqueue lock and the waitqueue is nonempty,
5563 * wake_up_pollfree() will wait for us. However, taking the waitqueue
5564 * lock in the first place can race with the waitqueue being freed.
5565 *
5566 * We solve this as eventpoll does: by taking advantage of the fact that
5567 * all users of wake_up_pollfree() will RCU-delay the actual free. If
5568 * we enter rcu_read_lock() and see that the pointer to the queue is
5569 * non-NULL, we can then lock it without the memory being freed out from
5570 * under us.
5571 *
5572 * Keep holding rcu_read_lock() as long as we hold the queue lock, in
5573 * case the caller deletes the entry from the queue, leaving it empty.
5574 * In that case, only RCU prevents the queue memory from being freed.
5575 */
5576 rcu_read_lock();
5577 io_poll_remove_entry(poll);
5578 if (poll_double)
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005579 io_poll_remove_entry(poll_double);
Pavel Begunkove9d7ca02022-08-29 14:30:24 +01005580 rcu_read_unlock();
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005581}
5582
5583/*
5584 * All poll tw should go through this. Checks for poll events, manages
5585 * references, does rewait, etc.
5586 *
5587 * Returns a negative error on failure. >0 when no action require, which is
5588 * either spurious wakeup or multishot CQE is served. 0 when it's done with
5589 * the request, then the mask is stored in req->result.
5590 */
5591static int io_poll_check_events(struct io_kiocb *req)
Jens Axboe74ce6ce2020-04-13 11:09:12 -06005592{
5593 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005594 struct io_poll_iocb *poll = io_poll_get_single(req);
5595 int v;
Jens Axboe74ce6ce2020-04-13 11:09:12 -06005596
Jens Axboe316319e2021-08-19 09:41:42 -06005597 /* req->task == current here, checking PF_EXITING is safe */
Pavel Begunkove09ee512021-07-01 13:26:05 +01005598 if (unlikely(req->task->flags & PF_EXITING))
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005599 io_poll_mark_cancelled(req);
Pavel Begunkove09ee512021-07-01 13:26:05 +01005600
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005601 do {
5602 v = atomic_read(&req->poll_refs);
Jens Axboe74ce6ce2020-04-13 11:09:12 -06005603
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005604 /* tw handler should be the owner, and so have some references */
5605 if (WARN_ON_ONCE(!(v & IO_POLL_REF_MASK)))
5606 return 0;
5607 if (v & IO_POLL_CANCEL_FLAG)
5608 return -ECANCELED;
Pavel Begunkovcd1981a2022-12-02 14:27:12 +00005609 /*
5610 * cqe.res contains only events of the first wake up
5611 * and all others are be lost. Redo vfs_poll() to get
5612 * up to date state.
5613 */
5614 if ((v & IO_POLL_REF_MASK) != 1)
5615 req->result = 0;
Pavel Begunkov4b702b72022-12-02 14:27:14 +00005616 if (v & IO_POLL_RETRY_FLAG) {
5617 req->result = 0;
5618 /*
5619 * We won't find new events that came in between
5620 * vfs_poll and the ref put unless we clear the
5621 * flag in advance.
5622 */
5623 atomic_andnot(IO_POLL_RETRY_FLAG, &req->poll_refs);
5624 v &= ~IO_POLL_RETRY_FLAG;
5625 }
Jens Axboe74ce6ce2020-04-13 11:09:12 -06005626
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005627 if (!req->result) {
5628 struct poll_table_struct pt = { ._key = poll->events };
Jens Axboe74ce6ce2020-04-13 11:09:12 -06005629
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005630 req->result = vfs_poll(req->file, &pt) & poll->events;
5631 }
Jens Axboe74ce6ce2020-04-13 11:09:12 -06005632
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005633 /* multishot, just fill an CQE and proceed */
5634 if (req->result && !(poll->events & EPOLLONESHOT)) {
5635 __poll_t mask = mangle_poll(req->result & poll->events);
5636 bool filled;
Jens Axboe18bceab2020-05-15 11:56:54 -06005637
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005638 spin_lock(&ctx->completion_lock);
5639 filled = io_fill_cqe_aux(ctx, req->user_data, mask,
5640 IORING_CQE_F_MORE);
5641 io_commit_cqring(ctx);
5642 spin_unlock(&ctx->completion_lock);
5643 if (unlikely(!filled))
5644 return -ECANCELED;
5645 io_cqring_ev_posted(ctx);
5646 } else if (req->result) {
5647 return 0;
5648 }
Jens Axboe18bceab2020-05-15 11:56:54 -06005649
Pavel Begunkov62321dc2022-12-02 14:27:11 +00005650 /* force the next iteration to vfs_poll() */
5651 req->result = 0;
5652
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005653 /*
5654 * Release all references, retry if someone tried to restart
5655 * task_work while we were executing it.
5656 */
Lin Madf4b1772022-12-02 14:27:15 +00005657 } while (atomic_sub_return(v & IO_POLL_REF_MASK, &req->poll_refs) &
5658 IO_POLL_REF_MASK);
Jens Axboe18bceab2020-05-15 11:56:54 -06005659
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005660 return 1;
Jens Axboe18bceab2020-05-15 11:56:54 -06005661}
5662
Pavel Begunkovf237c302021-08-18 12:42:46 +01005663static void io_poll_task_func(struct io_kiocb *req, bool *locked)
Jens Axboe18bceab2020-05-15 11:56:54 -06005664{
Jens Axboe6d816e02020-08-11 08:04:14 -06005665 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005666 int ret;
Jens Axboe18bceab2020-05-15 11:56:54 -06005667
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005668 ret = io_poll_check_events(req);
5669 if (ret > 0)
5670 return;
5671
5672 if (!ret) {
5673 req->result = mangle_poll(req->result & req->poll.events);
Pavel Begunkovdd221f462020-10-18 10:17:42 +01005674 } else {
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005675 req->result = ret;
5676 req_set_fail(req);
Pavel Begunkovdd221f462020-10-18 10:17:42 +01005677 }
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005678
5679 io_poll_remove_entries(req);
5680 spin_lock(&ctx->completion_lock);
5681 hash_del(&req->hash_node);
5682 spin_unlock(&ctx->completion_lock);
5683 io_req_complete_post(req, req->result, 0);
Jens Axboe18bceab2020-05-15 11:56:54 -06005684}
5685
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005686static void io_apoll_task_func(struct io_kiocb *req, bool *locked)
5687{
5688 struct io_ring_ctx *ctx = req->ctx;
5689 int ret;
5690
5691 ret = io_poll_check_events(req);
5692 if (ret > 0)
5693 return;
5694
5695 io_poll_remove_entries(req);
5696 spin_lock(&ctx->completion_lock);
5697 hash_del(&req->hash_node);
5698 spin_unlock(&ctx->completion_lock);
5699
5700 if (!ret)
5701 io_req_task_submit(req, locked);
5702 else
5703 io_req_complete_failed(req, ret);
5704}
5705
5706static void __io_poll_execute(struct io_kiocb *req, int mask)
5707{
5708 req->result = mask;
5709 if (req->opcode == IORING_OP_POLL_ADD)
5710 req->io_task_work.func = io_poll_task_func;
5711 else
5712 req->io_task_work.func = io_apoll_task_func;
5713
5714 trace_io_uring_task_add(req->ctx, req->opcode, req->user_data, mask);
5715 io_req_task_work_add(req);
5716}
5717
5718static inline void io_poll_execute(struct io_kiocb *req, int res)
5719{
5720 if (io_poll_get_ownership(req))
5721 __io_poll_execute(req, res);
5722}
5723
5724static void io_poll_cancel_req(struct io_kiocb *req)
5725{
5726 io_poll_mark_cancelled(req);
5727 /* kick tw, which should complete the request */
5728 io_poll_execute(req, 0);
5729}
5730
5731static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
5732 void *key)
Jens Axboe18bceab2020-05-15 11:56:54 -06005733{
5734 struct io_kiocb *req = wait->private;
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005735 struct io_poll_iocb *poll = container_of(wait, struct io_poll_iocb,
5736 wait);
Jens Axboe18bceab2020-05-15 11:56:54 -06005737 __poll_t mask = key_to_poll(key);
5738
Pavel Begunkove9d7ca02022-08-29 14:30:24 +01005739 if (unlikely(mask & POLLFREE)) {
5740 io_poll_mark_cancelled(req);
5741 /* we have to kick tw in case it's not already */
5742 io_poll_execute(req, 0);
5743
5744 /*
5745 * If the waitqueue is being freed early but someone is already
5746 * holds ownership over it, we have to tear down the request as
5747 * best we can. That means immediately removing the request from
5748 * its waitqueue and preventing all further accesses to the
5749 * waitqueue via the request.
5750 */
5751 list_del_init(&poll->wait.entry);
5752
5753 /*
5754 * Careful: this *must* be the last step, since as soon
5755 * as req->head is NULL'ed out, the request can be
5756 * completed and freed, since aio_poll_complete_work()
5757 * will no longer need to take the waitqueue lock.
5758 */
5759 smp_store_release(&poll->head, NULL);
5760 return 1;
5761 }
5762
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005763 /* for instances that support it check for an event match first */
Jens Axboe18bceab2020-05-15 11:56:54 -06005764 if (mask && !(mask & poll->events))
5765 return 0;
5766
Jens Axboeccf06b52022-12-23 07:04:49 -07005767 if (io_poll_get_ownership(req)) {
5768 /*
5769 * If we trigger a multishot poll off our own wakeup path,
5770 * disable multishot as there is a circular dependency between
5771 * CQ posting and triggering the event.
5772 */
5773 if (mask & EPOLL_URING_WAKE)
5774 poll->events |= EPOLLONESHOT;
5775
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005776 __io_poll_execute(req, mask);
Jens Axboeccf06b52022-12-23 07:04:49 -07005777 }
Jens Axboe18bceab2020-05-15 11:56:54 -06005778 return 1;
5779}
5780
Jens Axboe18bceab2020-05-15 11:56:54 -06005781static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt,
Jens Axboe807abcb2020-07-17 17:09:27 -06005782 struct wait_queue_head *head,
5783 struct io_poll_iocb **poll_ptr)
Jens Axboe18bceab2020-05-15 11:56:54 -06005784{
5785 struct io_kiocb *req = pt->req;
5786
5787 /*
Pavel Begunkov68b11e82021-07-20 10:50:43 +01005788 * The file being polled uses multiple waitqueues for poll handling
5789 * (e.g. one for read, one for write). Setup a separate io_poll_iocb
5790 * if this happens.
Jens Axboe18bceab2020-05-15 11:56:54 -06005791 */
Pavel Begunkov68b11e82021-07-20 10:50:43 +01005792 if (unlikely(pt->nr_entries)) {
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005793 struct io_poll_iocb *first = poll;
Pavel Begunkov58852d42020-10-16 20:55:56 +01005794
Pavel Begunkov23a65db2021-08-17 20:28:11 +01005795 /* double add on the same waitqueue head, ignore */
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005796 if (first->head == head)
Pavel Begunkov23a65db2021-08-17 20:28:11 +01005797 return;
Jens Axboe18bceab2020-05-15 11:56:54 -06005798 /* already have a 2nd entry, fail a third attempt */
Jens Axboe807abcb2020-07-17 17:09:27 -06005799 if (*poll_ptr) {
Pavel Begunkov23a65db2021-08-17 20:28:11 +01005800 if ((*poll_ptr)->head == head)
5801 return;
Jens Axboe18bceab2020-05-15 11:56:54 -06005802 pt->error = -EINVAL;
5803 return;
5804 }
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005805
Jens Axboe18bceab2020-05-15 11:56:54 -06005806 poll = kmalloc(sizeof(*poll), GFP_ATOMIC);
5807 if (!poll) {
5808 pt->error = -ENOMEM;
5809 return;
5810 }
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005811 io_init_poll_iocb(poll, first->events, first->wait.func);
Jens Axboe807abcb2020-07-17 17:09:27 -06005812 *poll_ptr = poll;
Jens Axboe18bceab2020-05-15 11:56:54 -06005813 }
5814
Pavel Begunkov68b11e82021-07-20 10:50:43 +01005815 pt->nr_entries++;
Jens Axboe18bceab2020-05-15 11:56:54 -06005816 poll->head = head;
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005817 poll->wait.private = req;
Jiufei Xuea31eb4a2020-06-17 17:53:56 +08005818
5819 if (poll->events & EPOLLEXCLUSIVE)
5820 add_wait_queue_exclusive(head, &poll->wait);
5821 else
5822 add_wait_queue(head, &poll->wait);
Jens Axboe18bceab2020-05-15 11:56:54 -06005823}
5824
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005825static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head,
5826 struct poll_table_struct *p)
5827{
5828 struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
5829
5830 __io_queue_proc(&pt->req->poll, pt, head,
5831 (struct io_poll_iocb **) &pt->req->async_data);
5832}
5833
5834static int __io_arm_poll_handler(struct io_kiocb *req,
5835 struct io_poll_iocb *poll,
5836 struct io_poll_table *ipt, __poll_t mask)
5837{
5838 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005839
5840 INIT_HLIST_NODE(&req->hash_node);
5841 io_init_poll_iocb(poll, mask, io_poll_wake);
5842 poll->file = req->file;
5843 poll->wait.private = req;
5844
5845 ipt->pt._key = mask;
5846 ipt->req = req;
5847 ipt->error = 0;
5848 ipt->nr_entries = 0;
5849
5850 /*
5851 * Take the ownership to delay any tw execution up until we're done
5852 * with poll arming. see io_poll_get_ownership().
5853 */
5854 atomic_set(&req->poll_refs, 1);
5855 mask = vfs_poll(req->file, &ipt->pt) & poll->events;
5856
5857 if (mask && (poll->events & EPOLLONESHOT)) {
5858 io_poll_remove_entries(req);
5859 /* no one else has access to the req, forget about the ref */
5860 return mask;
5861 }
5862 if (!mask && unlikely(ipt->error || !ipt->nr_entries)) {
5863 io_poll_remove_entries(req);
5864 if (!ipt->error)
5865 ipt->error = -EINVAL;
5866 return 0;
5867 }
5868
5869 spin_lock(&ctx->completion_lock);
5870 io_poll_req_insert(req);
5871 spin_unlock(&ctx->completion_lock);
5872
5873 if (mask) {
5874 /* can't multishot if failed, just queue the event we've got */
Pavel Begunkov182dc3a2022-08-29 14:30:23 +01005875 if (unlikely(ipt->error || !ipt->nr_entries)) {
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005876 poll->events |= EPOLLONESHOT;
Pavel Begunkov182dc3a2022-08-29 14:30:23 +01005877 ipt->error = 0;
5878 }
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005879 __io_poll_execute(req, mask);
5880 return 0;
5881 }
5882
5883 /*
Pavel Begunkov1d588492022-12-02 14:27:13 +00005884 * Try to release ownership. If we see a change of state, e.g.
5885 * poll was waken up, queue up a tw, it'll deal with it.
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005886 */
Pavel Begunkov1d588492022-12-02 14:27:13 +00005887 if (atomic_cmpxchg(&req->poll_refs, 1, 0) != 1)
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005888 __io_poll_execute(req, 0);
5889 return 0;
5890}
5891
Jens Axboe18bceab2020-05-15 11:56:54 -06005892static void io_async_queue_proc(struct file *file, struct wait_queue_head *head,
5893 struct poll_table_struct *p)
5894{
5895 struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
Jens Axboe807abcb2020-07-17 17:09:27 -06005896 struct async_poll *apoll = pt->req->apoll;
Jens Axboe18bceab2020-05-15 11:56:54 -06005897
Jens Axboe807abcb2020-07-17 17:09:27 -06005898 __io_queue_proc(&apoll->poll, pt, head, &apoll->double_poll);
Jens Axboe18bceab2020-05-15 11:56:54 -06005899}
5900
Olivier Langlois59b735a2021-06-22 05:17:39 -07005901enum {
5902 IO_APOLL_OK,
5903 IO_APOLL_ABORTED,
5904 IO_APOLL_READY
5905};
5906
Jens Axboe345fb362023-03-06 13:28:57 -07005907/*
5908 * We can't reliably detect loops in repeated poll triggers and issue
5909 * subsequently failing. But rather than fail these immediately, allow a
5910 * certain amount of retries before we give up. Given that this condition
5911 * should _rarely_ trigger even once, we should be fine with a larger value.
5912 */
5913#define APOLL_MAX_RETRY 128
5914
Olivier Langlois59b735a2021-06-22 05:17:39 -07005915static int io_arm_poll_handler(struct io_kiocb *req)
Jens Axboed7718a92020-02-14 22:23:12 -07005916{
5917 const struct io_op_def *def = &io_op_defs[req->opcode];
5918 struct io_ring_ctx *ctx = req->ctx;
5919 struct async_poll *apoll;
5920 struct io_poll_table ipt;
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005921 __poll_t mask = EPOLLONESHOT | POLLERR | POLLPRI;
5922 int ret;
Jens Axboed7718a92020-02-14 22:23:12 -07005923
5924 if (!req->file || !file_can_poll(req->file))
Olivier Langlois59b735a2021-06-22 05:17:39 -07005925 return IO_APOLL_ABORTED;
Pavel Begunkovb2d9c3d2021-06-26 21:40:44 +01005926 if (!def->pollin && !def->pollout)
Olivier Langlois59b735a2021-06-22 05:17:39 -07005927 return IO_APOLL_ABORTED;
Pavel Begunkovb2d9c3d2021-06-26 21:40:44 +01005928
5929 if (def->pollin) {
Pavel Begunkovb2d9c3d2021-06-26 21:40:44 +01005930 mask |= POLLIN | POLLRDNORM;
5931
5932 /* If reading from MSG_ERRQUEUE using recvmsg, ignore POLLIN */
5933 if ((req->opcode == IORING_OP_RECVMSG) &&
5934 (req->sr_msg.msg_flags & MSG_ERRQUEUE))
5935 mask &= ~POLLIN;
5936 } else {
Pavel Begunkovb2d9c3d2021-06-26 21:40:44 +01005937 mask |= POLLOUT | POLLWRNORM;
5938 }
5939
Pavel Begunkov124fb132023-01-22 10:24:20 -07005940 if (req->flags & REQ_F_POLLED) {
Jens Axboea79b13f2023-01-21 10:39:22 -07005941 apoll = req->apoll;
Pavel Begunkov124fb132023-01-22 10:24:20 -07005942 kfree(apoll->double_poll);
Jens Axboe345fb362023-03-06 13:28:57 -07005943 if (unlikely(!--apoll->poll.retries)) {
5944 apoll->double_poll = NULL;
5945 return IO_APOLL_ABORTED;
5946 }
Pavel Begunkov124fb132023-01-22 10:24:20 -07005947 } else {
Jens Axboea79b13f2023-01-21 10:39:22 -07005948 apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC);
Fedor Pchelkinf4ba5542023-03-16 21:56:16 +03005949 if (unlikely(!apoll))
5950 return IO_APOLL_ABORTED;
Jens Axboe345fb362023-03-06 13:28:57 -07005951 apoll->poll.retries = APOLL_MAX_RETRY;
Pavel Begunkov124fb132023-01-22 10:24:20 -07005952 }
Jens Axboe807abcb2020-07-17 17:09:27 -06005953 apoll->double_poll = NULL;
Jens Axboed7718a92020-02-14 22:23:12 -07005954 req->apoll = apoll;
Pavel Begunkovb2d9c3d2021-06-26 21:40:44 +01005955 req->flags |= REQ_F_POLLED;
Jens Axboed7718a92020-02-14 22:23:12 -07005956 ipt.pt._qproc = io_async_queue_proc;
5957
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005958 ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask);
Hao Xu41a51692021-08-12 15:47:02 +08005959 if (ret || ipt.error)
5960 return ret ? IO_APOLL_READY : IO_APOLL_ABORTED;
5961
Olivier Langlois236daeae2021-05-31 02:36:37 -04005962 trace_io_uring_poll_arm(ctx, req, req->opcode, req->user_data,
5963 mask, apoll->poll.events);
Olivier Langlois59b735a2021-06-22 05:17:39 -07005964 return IO_APOLL_OK;
Jens Axboed7718a92020-02-14 22:23:12 -07005965}
5966
Jens Axboe76e1b642020-09-26 15:05:03 -06005967/*
5968 * Returns true if we found and killed one or more poll requests
5969 */
Pavel Begunkov6b819282020-11-06 13:00:25 +00005970static bool io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk,
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01005971 bool cancel_all)
Jens Axboe221c5eb2019-01-17 09:41:58 -07005972{
Jens Axboe78076bb2019-12-04 19:56:40 -07005973 struct hlist_node *tmp;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005974 struct io_kiocb *req;
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005975 bool found = false;
5976 int i;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005977
Jens Axboe79ebeae2021-08-10 15:18:27 -06005978 spin_lock(&ctx->completion_lock);
Jens Axboe78076bb2019-12-04 19:56:40 -07005979 for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
5980 struct hlist_head *list;
5981
5982 list = &ctx->cancel_hash[i];
Jens Axboef3606e32020-09-22 08:18:24 -06005983 hlist_for_each_entry_safe(req, tmp, list, hash_node) {
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005984 if (io_match_task_safe(req, tsk, cancel_all)) {
Jens Axboe7524ec52022-08-29 14:30:20 +01005985 hlist_del_init(&req->hash_node);
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005986 io_poll_cancel_req(req);
5987 found = true;
5988 }
Jens Axboef3606e32020-09-22 08:18:24 -06005989 }
Jens Axboe221c5eb2019-01-17 09:41:58 -07005990 }
Jens Axboe79ebeae2021-08-10 15:18:27 -06005991 spin_unlock(&ctx->completion_lock);
Pavel Begunkovf770fba2022-08-29 14:30:18 +01005992 return found;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005993}
5994
Pavel Begunkov9ba5fac2021-04-14 13:38:35 +01005995static struct io_kiocb *io_poll_find(struct io_ring_ctx *ctx, __u64 sqe_addr,
5996 bool poll_only)
Pavel Begunkove07785b2021-04-01 15:43:57 +01005997 __must_hold(&ctx->completion_lock)
Jens Axboe47f46762019-11-09 17:43:02 -07005998{
Jens Axboe78076bb2019-12-04 19:56:40 -07005999 struct hlist_head *list;
Jens Axboe47f46762019-11-09 17:43:02 -07006000 struct io_kiocb *req;
6001
Jens Axboe78076bb2019-12-04 19:56:40 -07006002 list = &ctx->cancel_hash[hash_long(sqe_addr, ctx->cancel_hash_bits)];
6003 hlist_for_each_entry(req, list, hash_node) {
Jens Axboeb41e9852020-02-17 09:52:41 -07006004 if (sqe_addr != req->user_data)
6005 continue;
Pavel Begunkov9ba5fac2021-04-14 13:38:35 +01006006 if (poll_only && req->opcode != IORING_OP_POLL_ADD)
6007 continue;
Jens Axboeb2cb8052021-03-17 08:17:19 -06006008 return req;
Jens Axboe47f46762019-11-09 17:43:02 -07006009 }
Jens Axboeb2cb8052021-03-17 08:17:19 -06006010 return NULL;
Jens Axboe47f46762019-11-09 17:43:02 -07006011}
6012
Pavel Begunkovf770fba2022-08-29 14:30:18 +01006013static bool io_poll_disarm(struct io_kiocb *req)
6014 __must_hold(&ctx->completion_lock)
6015{
6016 if (!io_poll_get_ownership(req))
6017 return false;
6018 io_poll_remove_entries(req);
6019 hash_del(&req->hash_node);
6020 return true;
6021}
6022
Pavel Begunkov9ba5fac2021-04-14 13:38:35 +01006023static int io_poll_cancel(struct io_ring_ctx *ctx, __u64 sqe_addr,
6024 bool poll_only)
Pavel Begunkove07785b2021-04-01 15:43:57 +01006025 __must_hold(&ctx->completion_lock)
Jens Axboeb2cb8052021-03-17 08:17:19 -06006026{
Pavel Begunkovf770fba2022-08-29 14:30:18 +01006027 struct io_kiocb *req = io_poll_find(ctx, sqe_addr, poll_only);
Jens Axboeb2cb8052021-03-17 08:17:19 -06006028
Jens Axboeb2cb8052021-03-17 08:17:19 -06006029 if (!req)
6030 return -ENOENT;
Pavel Begunkovf770fba2022-08-29 14:30:18 +01006031 io_poll_cancel_req(req);
6032 return 0;
Jens Axboe221c5eb2019-01-17 09:41:58 -07006033}
6034
Pavel Begunkov9096af32021-04-14 13:38:36 +01006035static __poll_t io_poll_parse_events(const struct io_uring_sqe *sqe,
6036 unsigned int flags)
6037{
6038 u32 events;
6039
6040 events = READ_ONCE(sqe->poll32_events);
6041#ifdef __BIG_ENDIAN
6042 events = swahw32(events);
6043#endif
6044 if (!(flags & IORING_POLL_ADD_MULTI))
6045 events |= EPOLLONESHOT;
6046 return demangle_poll(events) | (events & (EPOLLEXCLUSIVE|EPOLLONESHOT));
6047}
6048
Pavel Begunkovc5de0032021-04-14 13:38:37 +01006049static int io_poll_update_prep(struct io_kiocb *req,
Jens Axboe3529d8c2019-12-19 18:24:38 -07006050 const struct io_uring_sqe *sqe)
Jens Axboe221c5eb2019-01-17 09:41:58 -07006051{
Pavel Begunkovc5de0032021-04-14 13:38:37 +01006052 struct io_poll_update *upd = &req->poll_update;
6053 u32 flags;
6054
Jens Axboe221c5eb2019-01-17 09:41:58 -07006055 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
6056 return -EINVAL;
Pavel Begunkov26578cd2021-08-20 10:36:37 +01006057 if (sqe->ioprio || sqe->buf_index || sqe->splice_fd_in)
Pavel Begunkovc5de0032021-04-14 13:38:37 +01006058 return -EINVAL;
6059 flags = READ_ONCE(sqe->len);
6060 if (flags & ~(IORING_POLL_UPDATE_EVENTS | IORING_POLL_UPDATE_USER_DATA |
6061 IORING_POLL_ADD_MULTI))
6062 return -EINVAL;
6063 /* meaningless without update */
6064 if (flags == IORING_POLL_ADD_MULTI)
Jens Axboe221c5eb2019-01-17 09:41:58 -07006065 return -EINVAL;
6066
Pavel Begunkovc5de0032021-04-14 13:38:37 +01006067 upd->old_user_data = READ_ONCE(sqe->addr);
6068 upd->update_events = flags & IORING_POLL_UPDATE_EVENTS;
6069 upd->update_user_data = flags & IORING_POLL_UPDATE_USER_DATA;
Jens Axboe0969e782019-12-17 18:40:57 -07006070
Pavel Begunkovc5de0032021-04-14 13:38:37 +01006071 upd->new_user_data = READ_ONCE(sqe->off);
6072 if (!upd->update_user_data && upd->new_user_data)
6073 return -EINVAL;
6074 if (upd->update_events)
6075 upd->events = io_poll_parse_events(sqe, flags);
6076 else if (sqe->poll32_events)
6077 return -EINVAL;
Jens Axboe0969e782019-12-17 18:40:57 -07006078
Jens Axboe221c5eb2019-01-17 09:41:58 -07006079 return 0;
6080}
6081
Jens Axboe3529d8c2019-12-19 18:24:38 -07006082static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboe221c5eb2019-01-17 09:41:58 -07006083{
6084 struct io_poll_iocb *poll = &req->poll;
Pavel Begunkovc5de0032021-04-14 13:38:37 +01006085 u32 flags;
Jens Axboe221c5eb2019-01-17 09:41:58 -07006086
6087 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
6088 return -EINVAL;
Pavel Begunkovc5de0032021-04-14 13:38:37 +01006089 if (sqe->ioprio || sqe->buf_index || sqe->off || sqe->addr)
Jens Axboe88e41cf2021-02-22 22:08:01 -07006090 return -EINVAL;
6091 flags = READ_ONCE(sqe->len);
Pavel Begunkovc5de0032021-04-14 13:38:37 +01006092 if (flags & ~IORING_POLL_ADD_MULTI)
Jens Axboe221c5eb2019-01-17 09:41:58 -07006093 return -EINVAL;
6094
Pavel Begunkov48dcd382021-08-15 10:40:18 +01006095 io_req_set_refcount(req);
Pavel Begunkovc5de0032021-04-14 13:38:37 +01006096 poll->events = io_poll_parse_events(sqe, flags);
Jens Axboe0969e782019-12-17 18:40:57 -07006097 return 0;
6098}
6099
Pavel Begunkov61e98202021-02-10 00:03:08 +00006100static int io_poll_add(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe0969e782019-12-17 18:40:57 -07006101{
6102 struct io_poll_iocb *poll = &req->poll;
Jens Axboe0969e782019-12-17 18:40:57 -07006103 struct io_poll_table ipt;
Pavel Begunkovf770fba2022-08-29 14:30:18 +01006104 int ret;
Jens Axboe0969e782019-12-17 18:40:57 -07006105
Jens Axboed7718a92020-02-14 22:23:12 -07006106 ipt.pt._qproc = io_poll_queue_proc;
Jens Axboe36703242019-07-25 10:20:18 -06006107
Pavel Begunkovf770fba2022-08-29 14:30:18 +01006108 ret = __io_arm_poll_handler(req, &req->poll, &ipt, poll->events);
Pavel Begunkov6c7259c2022-08-29 14:30:22 +01006109 if (!ret && ipt.error)
6110 req_set_fail(req);
Pavel Begunkovf770fba2022-08-29 14:30:18 +01006111 ret = ret ?: ipt.error;
6112 if (ret)
6113 __io_req_complete(req, issue_flags, ret, 0);
6114 return 0;
Jens Axboe221c5eb2019-01-17 09:41:58 -07006115}
6116
Pavel Begunkovc5de0032021-04-14 13:38:37 +01006117static int io_poll_update(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboeb69de282021-03-17 08:37:41 -06006118{
6119 struct io_ring_ctx *ctx = req->ctx;
6120 struct io_kiocb *preq;
Pavel Begunkov040e58f2022-08-29 14:30:14 +01006121 int ret2, ret = 0;
Jens Axboeb69de282021-03-17 08:37:41 -06006122
Jens Axboe0e388fc2023-06-16 21:12:06 -06006123 io_ring_submit_lock(ctx, !(issue_flags & IO_URING_F_NONBLOCK));
6124
Jens Axboe79ebeae2021-08-10 15:18:27 -06006125 spin_lock(&ctx->completion_lock);
Pavel Begunkov9ba5fac2021-04-14 13:38:35 +01006126 preq = io_poll_find(ctx, req->poll_update.old_user_data, true);
Pavel Begunkovf770fba2022-08-29 14:30:18 +01006127 if (!preq || !io_poll_disarm(preq)) {
Pavel Begunkov040e58f2022-08-29 14:30:14 +01006128 spin_unlock(&ctx->completion_lock);
Pavel Begunkovf770fba2022-08-29 14:30:18 +01006129 ret = preq ? -EALREADY : -ENOENT;
Pavel Begunkov040e58f2022-08-29 14:30:14 +01006130 goto out;
Jens Axboeb69de282021-03-17 08:37:41 -06006131 }
Jens Axboe79ebeae2021-08-10 15:18:27 -06006132 spin_unlock(&ctx->completion_lock);
Jens Axboecb3b200e2021-04-06 09:49:31 -06006133
Pavel Begunkov040e58f2022-08-29 14:30:14 +01006134 if (req->poll_update.update_events || req->poll_update.update_user_data) {
6135 /* only mask one event flags, keep behavior flags */
6136 if (req->poll_update.update_events) {
6137 preq->poll.events &= ~0xffff;
6138 preq->poll.events |= req->poll_update.events & 0xffff;
6139 preq->poll.events |= IO_POLL_UNMASK;
6140 }
6141 if (req->poll_update.update_user_data)
6142 preq->user_data = req->poll_update.new_user_data;
6143
6144 ret2 = io_poll_add(preq, issue_flags);
6145 /* successfully updated, don't complete poll request */
6146 if (!ret2)
6147 goto out;
6148 }
6149 req_set_fail(preq);
6150 io_req_complete(preq, -ECANCELED);
6151out:
6152 if (ret < 0)
6153 req_set_fail(req);
Jens Axboeb69de282021-03-17 08:37:41 -06006154 /* complete update request, we're done with it */
6155 io_req_complete(req, ret);
Jens Axboe0e388fc2023-06-16 21:12:06 -06006156 io_ring_submit_unlock(ctx, !(issue_flags & IO_URING_F_NONBLOCK));
Jens Axboeb69de282021-03-17 08:37:41 -06006157 return 0;
6158}
6159
Pavel Begunkovf237c302021-08-18 12:42:46 +01006160static void io_req_task_timeout(struct io_kiocb *req, bool *locked)
Jens Axboe89850fc2021-08-10 15:11:51 -06006161{
Jens Axboe89850fc2021-08-10 15:11:51 -06006162 req_set_fail(req);
Pavel Begunkov505657b2021-08-17 20:28:09 +01006163 io_req_complete_post(req, -ETIME, 0);
Jens Axboe89850fc2021-08-10 15:11:51 -06006164}
6165
Jens Axboe5262f562019-09-17 12:26:57 -06006166static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
6167{
Jens Axboead8a48a2019-11-15 08:49:11 -07006168 struct io_timeout_data *data = container_of(timer,
6169 struct io_timeout_data, timer);
6170 struct io_kiocb *req = data->req;
6171 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe5262f562019-09-17 12:26:57 -06006172 unsigned long flags;
6173
Jens Axboe89850fc2021-08-10 15:11:51 -06006174 spin_lock_irqsave(&ctx->timeout_lock, flags);
Pavel Begunkova71976f2020-10-10 18:34:11 +01006175 list_del_init(&req->timeout.list);
Pavel Begunkov01cec8c2020-07-30 18:43:50 +03006176 atomic_set(&req->ctx->cq_timeouts,
6177 atomic_read(&req->ctx->cq_timeouts) + 1);
Jens Axboe89850fc2021-08-10 15:11:51 -06006178 spin_unlock_irqrestore(&ctx->timeout_lock, flags);
Pavel Begunkov01cec8c2020-07-30 18:43:50 +03006179
Jens Axboe89850fc2021-08-10 15:11:51 -06006180 req->io_task_work.func = io_req_task_timeout;
6181 io_req_task_work_add(req);
Jens Axboe5262f562019-09-17 12:26:57 -06006182 return HRTIMER_NORESTART;
6183}
6184
Pavel Begunkovfbd15842020-11-30 19:11:15 +00006185static struct io_kiocb *io_timeout_extract(struct io_ring_ctx *ctx,
6186 __u64 user_data)
Jens Axboe89850fc2021-08-10 15:11:51 -06006187 __must_hold(&ctx->timeout_lock)
Jens Axboe47f46762019-11-09 17:43:02 -07006188{
Pavel Begunkovfbd15842020-11-30 19:11:15 +00006189 struct io_timeout_data *io;
Jens Axboef254ac02020-08-12 17:33:30 -06006190 struct io_kiocb *req;
Pavel Begunkovfd9c7bc2021-04-13 02:58:42 +01006191 bool found = false;
Jens Axboef254ac02020-08-12 17:33:30 -06006192
6193 list_for_each_entry(req, &ctx->timeout_list, timeout.list) {
Pavel Begunkovfd9c7bc2021-04-13 02:58:42 +01006194 found = user_data == req->user_data;
6195 if (found)
Jens Axboef254ac02020-08-12 17:33:30 -06006196 break;
Jens Axboef254ac02020-08-12 17:33:30 -06006197 }
Pavel Begunkovfd9c7bc2021-04-13 02:58:42 +01006198 if (!found)
6199 return ERR_PTR(-ENOENT);
Jens Axboef254ac02020-08-12 17:33:30 -06006200
Pavel Begunkovfbd15842020-11-30 19:11:15 +00006201 io = req->async_data;
Pavel Begunkovfd9c7bc2021-04-13 02:58:42 +01006202 if (hrtimer_try_to_cancel(&io->timer) == -1)
Pavel Begunkovfbd15842020-11-30 19:11:15 +00006203 return ERR_PTR(-EALREADY);
6204 list_del_init(&req->timeout.list);
6205 return req;
6206}
6207
6208static int io_timeout_cancel(struct io_ring_ctx *ctx, __u64 user_data)
Pavel Begunkovec3c3d02021-08-18 10:50:52 +01006209 __must_hold(&ctx->completion_lock)
Jens Axboe89850fc2021-08-10 15:11:51 -06006210 __must_hold(&ctx->timeout_lock)
Pavel Begunkovfbd15842020-11-30 19:11:15 +00006211{
6212 struct io_kiocb *req = io_timeout_extract(ctx, user_data);
6213
6214 if (IS_ERR(req))
6215 return PTR_ERR(req);
6216
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01006217 req_set_fail(req);
Pavel Begunkovb850d6d2022-08-29 14:30:13 +01006218 io_fill_cqe_req(req, -ECANCELED, 0);
Pavel Begunkov91c2f692021-08-11 19:28:28 +01006219 io_put_req_deferred(req);
Pavel Begunkovfbd15842020-11-30 19:11:15 +00006220 return 0;
Jens Axboef254ac02020-08-12 17:33:30 -06006221}
6222
Jens Axboe50c1df22021-08-27 17:11:06 -06006223static clockid_t io_timeout_get_clock(struct io_timeout_data *data)
6224{
6225 switch (data->flags & IORING_TIMEOUT_CLOCK_MASK) {
6226 case IORING_TIMEOUT_BOOTTIME:
6227 return CLOCK_BOOTTIME;
6228 case IORING_TIMEOUT_REALTIME:
6229 return CLOCK_REALTIME;
6230 default:
6231 /* can't happen, vetted at prep time */
6232 WARN_ON_ONCE(1);
6233 fallthrough;
6234 case 0:
6235 return CLOCK_MONOTONIC;
6236 }
6237}
6238
Pavel Begunkovf1042b62021-08-28 19:54:39 -06006239static int io_linked_timeout_update(struct io_ring_ctx *ctx, __u64 user_data,
6240 struct timespec64 *ts, enum hrtimer_mode mode)
6241 __must_hold(&ctx->timeout_lock)
6242{
6243 struct io_timeout_data *io;
6244 struct io_kiocb *req;
6245 bool found = false;
6246
6247 list_for_each_entry(req, &ctx->ltimeout_list, timeout.list) {
6248 found = user_data == req->user_data;
6249 if (found)
6250 break;
6251 }
6252 if (!found)
6253 return -ENOENT;
6254
6255 io = req->async_data;
6256 if (hrtimer_try_to_cancel(&io->timer) == -1)
6257 return -EALREADY;
6258 hrtimer_init(&io->timer, io_timeout_get_clock(io), mode);
6259 io->timer.function = io_link_timeout_fn;
6260 hrtimer_start(&io->timer, timespec64_to_ktime(*ts), mode);
6261 return 0;
6262}
6263
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +00006264static int io_timeout_update(struct io_ring_ctx *ctx, __u64 user_data,
6265 struct timespec64 *ts, enum hrtimer_mode mode)
Jens Axboe89850fc2021-08-10 15:11:51 -06006266 __must_hold(&ctx->timeout_lock)
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +00006267{
6268 struct io_kiocb *req = io_timeout_extract(ctx, user_data);
6269 struct io_timeout_data *data;
6270
6271 if (IS_ERR(req))
6272 return PTR_ERR(req);
6273
6274 req->timeout.off = 0; /* noseq */
6275 data = req->async_data;
6276 list_add_tail(&req->timeout.list, &ctx->timeout_list);
Jens Axboe50c1df22021-08-27 17:11:06 -06006277 hrtimer_init(&data->timer, io_timeout_get_clock(data), mode);
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +00006278 data->timer.function = io_timeout_fn;
6279 hrtimer_start(&data->timer, timespec64_to_ktime(*ts), mode);
6280 return 0;
Jens Axboe47f46762019-11-09 17:43:02 -07006281}
6282
Jens Axboe3529d8c2019-12-19 18:24:38 -07006283static int io_timeout_remove_prep(struct io_kiocb *req,
6284 const struct io_uring_sqe *sqe)
Jens Axboeb29472e2019-12-17 18:50:29 -07006285{
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +00006286 struct io_timeout_rem *tr = &req->timeout_rem;
6287
Jens Axboeb29472e2019-12-17 18:50:29 -07006288 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
6289 return -EINVAL;
Daniele Albano61710e42020-07-18 14:15:16 -06006290 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
6291 return -EINVAL;
Pavel Begunkov26578cd2021-08-20 10:36:37 +01006292 if (sqe->ioprio || sqe->buf_index || sqe->len || sqe->splice_fd_in)
Jens Axboeb29472e2019-12-17 18:50:29 -07006293 return -EINVAL;
6294
Pavel Begunkovf1042b62021-08-28 19:54:39 -06006295 tr->ltimeout = false;
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +00006296 tr->addr = READ_ONCE(sqe->addr);
6297 tr->flags = READ_ONCE(sqe->timeout_flags);
Pavel Begunkovf1042b62021-08-28 19:54:39 -06006298 if (tr->flags & IORING_TIMEOUT_UPDATE_MASK) {
6299 if (hweight32(tr->flags & IORING_TIMEOUT_CLOCK_MASK) > 1)
6300 return -EINVAL;
6301 if (tr->flags & IORING_LINK_TIMEOUT_UPDATE)
6302 tr->ltimeout = true;
6303 if (tr->flags & ~(IORING_TIMEOUT_UPDATE_MASK|IORING_TIMEOUT_ABS))
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +00006304 return -EINVAL;
6305 if (get_timespec64(&tr->ts, u64_to_user_ptr(sqe->addr2)))
6306 return -EFAULT;
6307 } else if (tr->flags) {
6308 /* timeout removal doesn't support flags */
6309 return -EINVAL;
6310 }
6311
Jens Axboeb29472e2019-12-17 18:50:29 -07006312 return 0;
6313}
6314
Pavel Begunkov8662dae2021-01-19 13:32:44 +00006315static inline enum hrtimer_mode io_translate_timeout_mode(unsigned int flags)
6316{
6317 return (flags & IORING_TIMEOUT_ABS) ? HRTIMER_MODE_ABS
6318 : HRTIMER_MODE_REL;
6319}
6320
Jens Axboe11365042019-10-16 09:08:32 -06006321/*
6322 * Remove or update an existing timeout command
6323 */
Pavel Begunkov61e98202021-02-10 00:03:08 +00006324static int io_timeout_remove(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe11365042019-10-16 09:08:32 -06006325{
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +00006326 struct io_timeout_rem *tr = &req->timeout_rem;
Jens Axboe11365042019-10-16 09:08:32 -06006327 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe47f46762019-11-09 17:43:02 -07006328 int ret;
Jens Axboe11365042019-10-16 09:08:32 -06006329
Pavel Begunkovec3c3d02021-08-18 10:50:52 +01006330 if (!(req->timeout_rem.flags & IORING_TIMEOUT_UPDATE)) {
6331 spin_lock(&ctx->completion_lock);
6332 spin_lock_irq(&ctx->timeout_lock);
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +00006333 ret = io_timeout_cancel(ctx, tr->addr);
Pavel Begunkovec3c3d02021-08-18 10:50:52 +01006334 spin_unlock_irq(&ctx->timeout_lock);
6335 spin_unlock(&ctx->completion_lock);
6336 } else {
Pavel Begunkovf1042b62021-08-28 19:54:39 -06006337 enum hrtimer_mode mode = io_translate_timeout_mode(tr->flags);
6338
Pavel Begunkovec3c3d02021-08-18 10:50:52 +01006339 spin_lock_irq(&ctx->timeout_lock);
Pavel Begunkovf1042b62021-08-28 19:54:39 -06006340 if (tr->ltimeout)
6341 ret = io_linked_timeout_update(ctx, tr->addr, &tr->ts, mode);
6342 else
6343 ret = io_timeout_update(ctx, tr->addr, &tr->ts, mode);
Pavel Begunkovec3c3d02021-08-18 10:50:52 +01006344 spin_unlock_irq(&ctx->timeout_lock);
6345 }
Jens Axboe11365042019-10-16 09:08:32 -06006346
Jens Axboe4e88d6e2019-12-07 20:59:47 -07006347 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01006348 req_set_fail(req);
Pavel Begunkov505657b2021-08-17 20:28:09 +01006349 io_req_complete_post(req, ret, 0);
Jens Axboe11365042019-10-16 09:08:32 -06006350 return 0;
Jens Axboe5262f562019-09-17 12:26:57 -06006351}
6352
Jens Axboe3529d8c2019-12-19 18:24:38 -07006353static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
Jens Axboe2d283902019-12-04 11:08:05 -07006354 bool is_timeout_link)
Jens Axboe5262f562019-09-17 12:26:57 -06006355{
Jens Axboead8a48a2019-11-15 08:49:11 -07006356 struct io_timeout_data *data;
Jens Axboea41525a2019-10-15 16:48:15 -06006357 unsigned flags;
Pavel Begunkov56080b02020-05-26 20:34:04 +03006358 u32 off = READ_ONCE(sqe->off);
Jens Axboe5262f562019-09-17 12:26:57 -06006359
Jens Axboead8a48a2019-11-15 08:49:11 -07006360 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboe5262f562019-09-17 12:26:57 -06006361 return -EINVAL;
Pavel Begunkov26578cd2021-08-20 10:36:37 +01006362 if (sqe->ioprio || sqe->buf_index || sqe->len != 1 ||
6363 sqe->splice_fd_in)
Jens Axboea41525a2019-10-15 16:48:15 -06006364 return -EINVAL;
Pavel Begunkov56080b02020-05-26 20:34:04 +03006365 if (off && is_timeout_link)
Jens Axboe2d283902019-12-04 11:08:05 -07006366 return -EINVAL;
Jens Axboea41525a2019-10-15 16:48:15 -06006367 flags = READ_ONCE(sqe->timeout_flags);
Jens Axboe50c1df22021-08-27 17:11:06 -06006368 if (flags & ~(IORING_TIMEOUT_ABS | IORING_TIMEOUT_CLOCK_MASK))
6369 return -EINVAL;
6370 /* more than one clock specified is invalid, obviously */
6371 if (hweight32(flags & IORING_TIMEOUT_CLOCK_MASK) > 1)
Jens Axboe5262f562019-09-17 12:26:57 -06006372 return -EINVAL;
Arnd Bergmannbdf20072019-10-01 09:53:29 -06006373
Pavel Begunkovef9dd632021-08-28 19:54:38 -06006374 INIT_LIST_HEAD(&req->timeout.list);
Pavel Begunkovbfe68a22020-05-30 14:54:18 +03006375 req->timeout.off = off;
Pavel Begunkovf18ee4c2021-06-14 23:37:25 +01006376 if (unlikely(off && !req->ctx->off_timeout_used))
6377 req->ctx->off_timeout_used = true;
Jens Axboe26a61672019-12-20 09:02:01 -07006378
Jens Axboee8c2bc12020-08-15 18:44:09 -07006379 if (!req->async_data && io_alloc_async_data(req))
Jens Axboe26a61672019-12-20 09:02:01 -07006380 return -ENOMEM;
6381
Jens Axboee8c2bc12020-08-15 18:44:09 -07006382 data = req->async_data;
Jens Axboead8a48a2019-11-15 08:49:11 -07006383 data->req = req;
Jens Axboe50c1df22021-08-27 17:11:06 -06006384 data->flags = flags;
Jens Axboead8a48a2019-11-15 08:49:11 -07006385
6386 if (get_timespec64(&data->ts, u64_to_user_ptr(sqe->addr)))
Jens Axboe5262f562019-09-17 12:26:57 -06006387 return -EFAULT;
6388
Jens Axboeba7261a2022-04-08 11:08:58 -06006389 INIT_LIST_HEAD(&req->timeout.list);
Pavel Begunkov8662dae2021-01-19 13:32:44 +00006390 data->mode = io_translate_timeout_mode(flags);
Jens Axboe50c1df22021-08-27 17:11:06 -06006391 hrtimer_init(&data->timer, io_timeout_get_clock(data), data->mode);
Pavel Begunkovb97e7362021-08-15 10:40:23 +01006392
6393 if (is_timeout_link) {
6394 struct io_submit_link *link = &req->ctx->submit_state.link;
6395
6396 if (!link->head)
6397 return -EINVAL;
6398 if (link->last->opcode == IORING_OP_LINK_TIMEOUT)
6399 return -EINVAL;
Pavel Begunkov4d13d1a2021-08-15 10:40:24 +01006400 req->timeout.head = link->last;
6401 link->last->flags |= REQ_F_ARM_LTIMEOUT;
Pavel Begunkovb97e7362021-08-15 10:40:23 +01006402 }
Jens Axboead8a48a2019-11-15 08:49:11 -07006403 return 0;
6404}
6405
Pavel Begunkov61e98202021-02-10 00:03:08 +00006406static int io_timeout(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboead8a48a2019-11-15 08:49:11 -07006407{
Jens Axboead8a48a2019-11-15 08:49:11 -07006408 struct io_ring_ctx *ctx = req->ctx;
Jens Axboee8c2bc12020-08-15 18:44:09 -07006409 struct io_timeout_data *data = req->async_data;
Jens Axboead8a48a2019-11-15 08:49:11 -07006410 struct list_head *entry;
Pavel Begunkovbfe68a22020-05-30 14:54:18 +03006411 u32 tail, off = req->timeout.off;
Jens Axboead8a48a2019-11-15 08:49:11 -07006412
Jens Axboe89850fc2021-08-10 15:11:51 -06006413 spin_lock_irq(&ctx->timeout_lock);
Jens Axboe93bd25b2019-11-11 23:34:31 -07006414
Jens Axboe5262f562019-09-17 12:26:57 -06006415 /*
6416 * sqe->off holds how many events that need to occur for this
Jens Axboe93bd25b2019-11-11 23:34:31 -07006417 * timeout event to be satisfied. If it isn't set, then this is
6418 * a pure timeout request, sequence isn't used.
Jens Axboe5262f562019-09-17 12:26:57 -06006419 */
Pavel Begunkov8eb7e2d2020-06-29 13:13:02 +03006420 if (io_is_timeout_noseq(req)) {
Jens Axboe93bd25b2019-11-11 23:34:31 -07006421 entry = ctx->timeout_list.prev;
6422 goto add;
6423 }
Jens Axboe5262f562019-09-17 12:26:57 -06006424
Pavel Begunkovbfe68a22020-05-30 14:54:18 +03006425 tail = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
6426 req->timeout.target_seq = tail + off;
Jens Axboe5262f562019-09-17 12:26:57 -06006427
Marcelo Diop-Gonzalezf0105052021-01-15 11:54:40 -05006428 /* Update the last seq here in case io_flush_timeouts() hasn't.
6429 * This is safe because ->completion_lock is held, and submissions
6430 * and completions are never mixed in the same ->completion_lock section.
6431 */
6432 ctx->cq_last_tm_flush = tail;
6433
Jens Axboe5262f562019-09-17 12:26:57 -06006434 /*
6435 * Insertion sort, ensuring the first entry in the list is always
6436 * the one we need first.
6437 */
Jens Axboe5262f562019-09-17 12:26:57 -06006438 list_for_each_prev(entry, &ctx->timeout_list) {
Pavel Begunkov135fcde2020-07-13 23:37:12 +03006439 struct io_kiocb *nxt = list_entry(entry, struct io_kiocb,
6440 timeout.list);
Jens Axboe5262f562019-09-17 12:26:57 -06006441
Pavel Begunkov8eb7e2d2020-06-29 13:13:02 +03006442 if (io_is_timeout_noseq(nxt))
Jens Axboe93bd25b2019-11-11 23:34:31 -07006443 continue;
Pavel Begunkovbfe68a22020-05-30 14:54:18 +03006444 /* nxt.seq is behind @tail, otherwise would've been completed */
6445 if (off >= nxt->timeout.target_seq - tail)
Jens Axboe5262f562019-09-17 12:26:57 -06006446 break;
6447 }
Jens Axboe93bd25b2019-11-11 23:34:31 -07006448add:
Pavel Begunkov135fcde2020-07-13 23:37:12 +03006449 list_add(&req->timeout.list, entry);
Jens Axboead8a48a2019-11-15 08:49:11 -07006450 data->timer.function = io_timeout_fn;
6451 hrtimer_start(&data->timer, timespec64_to_ktime(data->ts), data->mode);
Jens Axboe89850fc2021-08-10 15:11:51 -06006452 spin_unlock_irq(&ctx->timeout_lock);
Jens Axboe5262f562019-09-17 12:26:57 -06006453 return 0;
6454}
6455
Pavel Begunkovf458dd842021-03-08 12:14:14 +00006456struct io_cancel_data {
6457 struct io_ring_ctx *ctx;
6458 u64 user_data;
6459};
6460
Jens Axboe62755e32019-10-28 21:49:21 -06006461static bool io_cancel_cb(struct io_wq_work *work, void *data)
Jens Axboede0617e2019-04-06 21:51:27 -06006462{
Jens Axboe62755e32019-10-28 21:49:21 -06006463 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
Pavel Begunkovf458dd842021-03-08 12:14:14 +00006464 struct io_cancel_data *cd = data;
Jens Axboede0617e2019-04-06 21:51:27 -06006465
Pavel Begunkovf458dd842021-03-08 12:14:14 +00006466 return req->ctx == cd->ctx && req->user_data == cd->user_data;
Jens Axboe62755e32019-10-28 21:49:21 -06006467}
6468
Pavel Begunkovf458dd842021-03-08 12:14:14 +00006469static int io_async_cancel_one(struct io_uring_task *tctx, u64 user_data,
6470 struct io_ring_ctx *ctx)
Jens Axboe62755e32019-10-28 21:49:21 -06006471{
Pavel Begunkovf458dd842021-03-08 12:14:14 +00006472 struct io_cancel_data data = { .ctx = ctx, .user_data = user_data, };
Jens Axboe62755e32019-10-28 21:49:21 -06006473 enum io_wq_cancel cancel_ret;
Jens Axboe62755e32019-10-28 21:49:21 -06006474 int ret = 0;
6475
Pavel Begunkovf458dd842021-03-08 12:14:14 +00006476 if (!tctx || !tctx->io_wq)
Jens Axboe5aa75ed2021-02-16 12:56:50 -07006477 return -ENOENT;
6478
Pavel Begunkovf458dd842021-03-08 12:14:14 +00006479 cancel_ret = io_wq_cancel_cb(tctx->io_wq, io_cancel_cb, &data, false);
Jens Axboe62755e32019-10-28 21:49:21 -06006480 switch (cancel_ret) {
6481 case IO_WQ_CANCEL_OK:
6482 ret = 0;
6483 break;
6484 case IO_WQ_CANCEL_RUNNING:
6485 ret = -EALREADY;
6486 break;
6487 case IO_WQ_CANCEL_NOTFOUND:
6488 ret = -ENOENT;
6489 break;
6490 }
6491
Jens Axboee977d6d2019-11-05 12:39:45 -07006492 return ret;
6493}
6494
Pavel Begunkov8cb01fa2021-08-15 10:40:22 +01006495static int io_try_cancel_userdata(struct io_kiocb *req, u64 sqe_addr)
Jens Axboe47f46762019-11-09 17:43:02 -07006496{
Pavel Begunkov8cb01fa2021-08-15 10:40:22 +01006497 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe47f46762019-11-09 17:43:02 -07006498 int ret;
6499
Pavel Begunkovdadebc32021-08-23 13:30:44 +01006500 WARN_ON_ONCE(!io_wq_current_is_worker() && req->task != current);
Pavel Begunkov8cb01fa2021-08-15 10:40:22 +01006501
Pavel Begunkovf458dd842021-03-08 12:14:14 +00006502 ret = io_async_cancel_one(req->task->io_uring, sqe_addr, ctx);
Pavel Begunkovdf9727a2021-04-01 15:43:59 +01006503 if (ret != -ENOENT)
Pavel Begunkov8cb01fa2021-08-15 10:40:22 +01006504 return ret;
Pavel Begunkov505657b2021-08-17 20:28:09 +01006505
6506 spin_lock(&ctx->completion_lock);
Jens Axboe79ebeae2021-08-10 15:18:27 -06006507 spin_lock_irq(&ctx->timeout_lock);
Jens Axboe47f46762019-11-09 17:43:02 -07006508 ret = io_timeout_cancel(ctx, sqe_addr);
Jens Axboe79ebeae2021-08-10 15:18:27 -06006509 spin_unlock_irq(&ctx->timeout_lock);
Jens Axboe47f46762019-11-09 17:43:02 -07006510 if (ret != -ENOENT)
Pavel Begunkov505657b2021-08-17 20:28:09 +01006511 goto out;
6512 ret = io_poll_cancel(ctx, sqe_addr, false);
6513out:
6514 spin_unlock(&ctx->completion_lock);
6515 return ret;
Jens Axboe47f46762019-11-09 17:43:02 -07006516}
6517
Jens Axboe3529d8c2019-12-19 18:24:38 -07006518static int io_async_cancel_prep(struct io_kiocb *req,
6519 const struct io_uring_sqe *sqe)
Jens Axboee977d6d2019-11-05 12:39:45 -07006520{
Jens Axboefbf23842019-12-17 18:45:56 -07006521 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboee977d6d2019-11-05 12:39:45 -07006522 return -EINVAL;
Daniele Albano61710e42020-07-18 14:15:16 -06006523 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
6524 return -EINVAL;
Pavel Begunkov26578cd2021-08-20 10:36:37 +01006525 if (sqe->ioprio || sqe->off || sqe->len || sqe->cancel_flags ||
6526 sqe->splice_fd_in)
Jens Axboee977d6d2019-11-05 12:39:45 -07006527 return -EINVAL;
6528
Jens Axboefbf23842019-12-17 18:45:56 -07006529 req->cancel.addr = READ_ONCE(sqe->addr);
6530 return 0;
6531}
6532
Pavel Begunkov61e98202021-02-10 00:03:08 +00006533static int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboefbf23842019-12-17 18:45:56 -07006534{
6535 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkov58f99372021-03-12 16:25:55 +00006536 u64 sqe_addr = req->cancel.addr;
6537 struct io_tctx_node *node;
6538 int ret;
Jens Axboefbf23842019-12-17 18:45:56 -07006539
Pavel Begunkov8cb01fa2021-08-15 10:40:22 +01006540 ret = io_try_cancel_userdata(req, sqe_addr);
Pavel Begunkov58f99372021-03-12 16:25:55 +00006541 if (ret != -ENOENT)
6542 goto done;
Pavel Begunkov58f99372021-03-12 16:25:55 +00006543
6544 /* slow path, try all io-wq's */
6545 io_ring_submit_lock(ctx, !(issue_flags & IO_URING_F_NONBLOCK));
6546 ret = -ENOENT;
6547 list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
6548 struct io_uring_task *tctx = node->task->io_uring;
6549
Pavel Begunkov58f99372021-03-12 16:25:55 +00006550 ret = io_async_cancel_one(tctx, req->cancel.addr, ctx);
6551 if (ret != -ENOENT)
6552 break;
6553 }
6554 io_ring_submit_unlock(ctx, !(issue_flags & IO_URING_F_NONBLOCK));
Pavel Begunkov58f99372021-03-12 16:25:55 +00006555done:
Pavel Begunkov58f99372021-03-12 16:25:55 +00006556 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01006557 req_set_fail(req);
Pavel Begunkov505657b2021-08-17 20:28:09 +01006558 io_req_complete_post(req, ret, 0);
Jens Axboe62755e32019-10-28 21:49:21 -06006559 return 0;
6560}
6561
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00006562static int io_rsrc_update_prep(struct io_kiocb *req,
Jens Axboe05f3fb32019-12-09 11:22:50 -07006563 const struct io_uring_sqe *sqe)
6564{
Daniele Albano61710e42020-07-18 14:15:16 -06006565 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
6566 return -EINVAL;
Pavel Begunkov26578cd2021-08-20 10:36:37 +01006567 if (sqe->ioprio || sqe->rw_flags || sqe->splice_fd_in)
Jens Axboe05f3fb32019-12-09 11:22:50 -07006568 return -EINVAL;
6569
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00006570 req->rsrc_update.offset = READ_ONCE(sqe->off);
6571 req->rsrc_update.nr_args = READ_ONCE(sqe->len);
6572 if (!req->rsrc_update.nr_args)
Jens Axboe05f3fb32019-12-09 11:22:50 -07006573 return -EINVAL;
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00006574 req->rsrc_update.arg = READ_ONCE(sqe->addr);
Jens Axboe05f3fb32019-12-09 11:22:50 -07006575 return 0;
6576}
6577
Pavel Begunkov889fca72021-02-10 00:03:09 +00006578static int io_files_update(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe05f3fb32019-12-09 11:22:50 -07006579{
6580 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkovc3bdad02021-04-25 14:32:22 +01006581 struct io_uring_rsrc_update2 up;
Jens Axboe05f3fb32019-12-09 11:22:50 -07006582 int ret;
6583
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00006584 up.offset = req->rsrc_update.offset;
6585 up.data = req->rsrc_update.arg;
Pavel Begunkovc3bdad02021-04-25 14:32:22 +01006586 up.nr = 0;
6587 up.tags = 0;
Colin Ian King615cee42021-04-26 10:47:35 +01006588 up.resv = 0;
Dylan Yudaken7a7c9f92022-04-12 09:30:40 -07006589 up.resv2 = 0;
Jens Axboe05f3fb32019-12-09 11:22:50 -07006590
Jens Axboecdb31c22021-09-24 08:43:54 -06006591 io_ring_submit_lock(ctx, !(issue_flags & IO_URING_F_NONBLOCK));
Pavel Begunkovfdecb662021-04-25 14:32:20 +01006592 ret = __io_register_rsrc_update(ctx, IORING_RSRC_FILE,
Pavel Begunkov98f0b3b2021-04-25 14:32:19 +01006593 &up, req->rsrc_update.nr_args);
Jens Axboecdb31c22021-09-24 08:43:54 -06006594 io_ring_submit_unlock(ctx, !(issue_flags & IO_URING_F_NONBLOCK));
Jens Axboe05f3fb32019-12-09 11:22:50 -07006595
6596 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01006597 req_set_fail(req);
Pavel Begunkov889fca72021-02-10 00:03:09 +00006598 __io_req_complete(req, issue_flags, ret, 0);
Jens Axboe05f3fb32019-12-09 11:22:50 -07006599 return 0;
6600}
6601
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006602static int io_req_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboef67676d2019-12-02 11:03:47 -07006603{
Jens Axboed625c6e2019-12-17 19:53:05 -07006604 switch (req->opcode) {
Jens Axboee7815732019-12-17 19:45:06 -07006605 case IORING_OP_NOP:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006606 return 0;
Jens Axboef67676d2019-12-02 11:03:47 -07006607 case IORING_OP_READV:
6608 case IORING_OP_READ_FIXED:
Jens Axboe3a6820f2019-12-22 15:19:35 -07006609 case IORING_OP_READ:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006610 return io_read_prep(req, sqe);
Jens Axboef67676d2019-12-02 11:03:47 -07006611 case IORING_OP_WRITEV:
6612 case IORING_OP_WRITE_FIXED:
Jens Axboe3a6820f2019-12-22 15:19:35 -07006613 case IORING_OP_WRITE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006614 return io_write_prep(req, sqe);
Jens Axboe0969e782019-12-17 18:40:57 -07006615 case IORING_OP_POLL_ADD:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006616 return io_poll_add_prep(req, sqe);
Jens Axboe0969e782019-12-17 18:40:57 -07006617 case IORING_OP_POLL_REMOVE:
Pavel Begunkovc5de0032021-04-14 13:38:37 +01006618 return io_poll_update_prep(req, sqe);
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07006619 case IORING_OP_FSYNC:
Pavel Begunkov1155c762021-02-18 18:29:38 +00006620 return io_fsync_prep(req, sqe);
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07006621 case IORING_OP_SYNC_FILE_RANGE:
Pavel Begunkov1155c762021-02-18 18:29:38 +00006622 return io_sfr_prep(req, sqe);
Jens Axboe03b12302019-12-02 18:50:25 -07006623 case IORING_OP_SENDMSG:
Jens Axboefddafac2020-01-04 20:19:44 -07006624 case IORING_OP_SEND:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006625 return io_sendmsg_prep(req, sqe);
Jens Axboe03b12302019-12-02 18:50:25 -07006626 case IORING_OP_RECVMSG:
Jens Axboefddafac2020-01-04 20:19:44 -07006627 case IORING_OP_RECV:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006628 return io_recvmsg_prep(req, sqe);
Jens Axboef499a022019-12-02 16:28:46 -07006629 case IORING_OP_CONNECT:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006630 return io_connect_prep(req, sqe);
Jens Axboe2d283902019-12-04 11:08:05 -07006631 case IORING_OP_TIMEOUT:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006632 return io_timeout_prep(req, sqe, false);
Jens Axboeb29472e2019-12-17 18:50:29 -07006633 case IORING_OP_TIMEOUT_REMOVE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006634 return io_timeout_remove_prep(req, sqe);
Jens Axboefbf23842019-12-17 18:45:56 -07006635 case IORING_OP_ASYNC_CANCEL:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006636 return io_async_cancel_prep(req, sqe);
Jens Axboe2d283902019-12-04 11:08:05 -07006637 case IORING_OP_LINK_TIMEOUT:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006638 return io_timeout_prep(req, sqe, true);
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07006639 case IORING_OP_ACCEPT:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006640 return io_accept_prep(req, sqe);
Jens Axboed63d1b52019-12-10 10:38:56 -07006641 case IORING_OP_FALLOCATE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006642 return io_fallocate_prep(req, sqe);
Jens Axboe15b71ab2019-12-11 11:20:36 -07006643 case IORING_OP_OPENAT:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006644 return io_openat_prep(req, sqe);
Jens Axboeb5dba592019-12-11 14:02:38 -07006645 case IORING_OP_CLOSE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006646 return io_close_prep(req, sqe);
Jens Axboe05f3fb32019-12-09 11:22:50 -07006647 case IORING_OP_FILES_UPDATE:
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00006648 return io_rsrc_update_prep(req, sqe);
Jens Axboeeddc7ef2019-12-13 21:18:10 -07006649 case IORING_OP_STATX:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006650 return io_statx_prep(req, sqe);
Jens Axboe4840e412019-12-25 22:03:45 -07006651 case IORING_OP_FADVISE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006652 return io_fadvise_prep(req, sqe);
Jens Axboec1ca7572019-12-25 22:18:28 -07006653 case IORING_OP_MADVISE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006654 return io_madvise_prep(req, sqe);
Jens Axboecebdb982020-01-08 17:59:24 -07006655 case IORING_OP_OPENAT2:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006656 return io_openat2_prep(req, sqe);
Jens Axboe3e4827b2020-01-08 15:18:09 -07006657 case IORING_OP_EPOLL_CTL:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006658 return io_epoll_ctl_prep(req, sqe);
Pavel Begunkov7d67af22020-02-24 11:32:45 +03006659 case IORING_OP_SPLICE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006660 return io_splice_prep(req, sqe);
Jens Axboeddf0322d2020-02-23 16:41:33 -07006661 case IORING_OP_PROVIDE_BUFFERS:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006662 return io_provide_buffers_prep(req, sqe);
Jens Axboe067524e2020-03-02 16:32:28 -07006663 case IORING_OP_REMOVE_BUFFERS:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006664 return io_remove_buffers_prep(req, sqe);
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03006665 case IORING_OP_TEE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006666 return io_tee_prep(req, sqe);
Jens Axboe36f4fa62020-09-05 11:14:22 -06006667 case IORING_OP_SHUTDOWN:
6668 return io_shutdown_prep(req, sqe);
Jens Axboe80a261f2020-09-28 14:23:58 -06006669 case IORING_OP_RENAMEAT:
6670 return io_renameat_prep(req, sqe);
Jens Axboe14a11432020-09-28 14:27:37 -06006671 case IORING_OP_UNLINKAT:
6672 return io_unlinkat_prep(req, sqe);
Dmitry Kadasheve34a02d2021-07-08 13:34:45 +07006673 case IORING_OP_MKDIRAT:
6674 return io_mkdirat_prep(req, sqe);
Dmitry Kadashev7a8721f2021-07-08 13:34:46 +07006675 case IORING_OP_SYMLINKAT:
6676 return io_symlinkat_prep(req, sqe);
Dmitry Kadashevcf30da92021-07-08 13:34:47 +07006677 case IORING_OP_LINKAT:
6678 return io_linkat_prep(req, sqe);
Jens Axboef67676d2019-12-02 11:03:47 -07006679 }
6680
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006681 printk_once(KERN_WARNING "io_uring: unhandled opcode %d\n",
6682 req->opcode);
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01006683 return -EINVAL;
Pavel Begunkovbfe76552020-09-30 22:57:55 +03006684}
6685
Pavel Begunkov93642ef2021-02-18 18:29:44 +00006686static int io_req_prep_async(struct io_kiocb *req)
Jens Axboedef596e2019-01-09 08:59:42 -07006687{
Pavel Begunkovb7e298d2021-02-28 22:35:19 +00006688 if (!io_op_defs[req->opcode].needs_async_setup)
6689 return 0;
6690 if (WARN_ON_ONCE(req->async_data))
6691 return -EFAULT;
6692 if (io_alloc_async_data(req))
6693 return -EAGAIN;
6694
Pavel Begunkov93642ef2021-02-18 18:29:44 +00006695 switch (req->opcode) {
6696 case IORING_OP_READV:
Pavel Begunkov93642ef2021-02-18 18:29:44 +00006697 return io_rw_prep_async(req, READ);
6698 case IORING_OP_WRITEV:
Pavel Begunkov93642ef2021-02-18 18:29:44 +00006699 return io_rw_prep_async(req, WRITE);
6700 case IORING_OP_SENDMSG:
Pavel Begunkov93642ef2021-02-18 18:29:44 +00006701 return io_sendmsg_prep_async(req);
6702 case IORING_OP_RECVMSG:
Pavel Begunkov93642ef2021-02-18 18:29:44 +00006703 return io_recvmsg_prep_async(req);
6704 case IORING_OP_CONNECT:
6705 return io_connect_prep_async(req);
6706 }
Pavel Begunkovb7e298d2021-02-28 22:35:19 +00006707 printk_once(KERN_WARNING "io_uring: prep_async() bad opcode %d\n",
6708 req->opcode);
6709 return -EFAULT;
Jens Axboedef596e2019-01-09 08:59:42 -07006710}
6711
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03006712static u32 io_get_sequence(struct io_kiocb *req)
6713{
Pavel Begunkova3dbdf52021-06-17 18:14:05 +01006714 u32 seq = req->ctx->cached_sq_head;
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03006715
Pavel Begunkova3dbdf52021-06-17 18:14:05 +01006716 /* need original cached_sq_head, but it was increased for each req */
6717 io_for_each_link(req, req)
6718 seq--;
6719 return seq;
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03006720}
6721
Pavel Begunkov76cc33d2021-06-14 23:37:30 +01006722static bool io_drain_req(struct io_kiocb *req)
Jens Axboedef596e2019-01-09 08:59:42 -07006723{
Pavel Begunkov3c199662021-06-15 16:47:57 +01006724 struct io_kiocb *pos;
Jens Axboedef596e2019-01-09 08:59:42 -07006725 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkov27dc8332020-07-13 23:37:14 +03006726 struct io_defer_entry *de;
Jens Axboedef596e2019-01-09 08:59:42 -07006727 int ret;
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03006728 u32 seq;
Jens Axboedef596e2019-01-09 08:59:42 -07006729
Pavel Begunkovb8ce1b92021-08-31 14:13:11 +01006730 if (req->flags & REQ_F_FAIL) {
6731 io_req_complete_fail_submit(req);
6732 return true;
6733 }
6734
Pavel Begunkov3c199662021-06-15 16:47:57 +01006735 /*
6736 * If we need to drain a request in the middle of a link, drain the
6737 * head request and the next request/link after the current link.
6738 * Considering sequential execution of links, IOSQE_IO_DRAIN will be
6739 * maintained for every request of our link.
6740 */
6741 if (ctx->drain_next) {
6742 req->flags |= REQ_F_IO_DRAIN;
6743 ctx->drain_next = false;
6744 }
6745 /* not interested in head, start from the first linked */
6746 io_for_each_link(pos, req->link) {
6747 if (pos->flags & REQ_F_IO_DRAIN) {
6748 ctx->drain_next = true;
6749 req->flags |= REQ_F_IO_DRAIN;
6750 break;
6751 }
6752 }
6753
Jens Axboedef596e2019-01-09 08:59:42 -07006754 /* Still need defer if there is pending req in defer list. */
Hao Xu1bd12b72021-11-25 17:21:02 +08006755 spin_lock(&ctx->completion_lock);
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03006756 if (likely(list_empty_careful(&ctx->defer_list) &&
Pavel Begunkov10c66902021-06-15 16:47:56 +01006757 !(req->flags & REQ_F_IO_DRAIN))) {
Hao Xu1bd12b72021-11-25 17:21:02 +08006758 spin_unlock(&ctx->completion_lock);
Pavel Begunkov10c66902021-06-15 16:47:56 +01006759 ctx->drain_active = false;
Pavel Begunkov76cc33d2021-06-14 23:37:30 +01006760 return false;
Pavel Begunkov10c66902021-06-15 16:47:56 +01006761 }
Hao Xu1bd12b72021-11-25 17:21:02 +08006762 spin_unlock(&ctx->completion_lock);
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03006763
6764 seq = io_get_sequence(req);
6765 /* Still a chance to pass the sequence check */
6766 if (!req_need_defer(req, seq) && list_empty_careful(&ctx->defer_list))
Pavel Begunkov76cc33d2021-06-14 23:37:30 +01006767 return false;
Jens Axboedef596e2019-01-09 08:59:42 -07006768
Pavel Begunkovb7e298d2021-02-28 22:35:19 +00006769 ret = io_req_prep_async(req);
Pavel Begunkovbe7053b2021-02-18 18:29:45 +00006770 if (ret)
Pavel Begunkov1b487732021-07-11 22:41:13 +01006771 goto fail;
Pavel Begunkovcbdcb432020-06-29 19:18:43 +03006772 io_prep_async_link(req);
Pavel Begunkov27dc8332020-07-13 23:37:14 +03006773 de = kmalloc(sizeof(*de), GFP_KERNEL);
Pavel Begunkov76cc33d2021-06-14 23:37:30 +01006774 if (!de) {
Pavel Begunkov1b487732021-07-11 22:41:13 +01006775 ret = -ENOMEM;
6776fail:
6777 io_req_complete_failed(req, ret);
Pavel Begunkov76cc33d2021-06-14 23:37:30 +01006778 return true;
6779 }
Jens Axboe31b51512019-01-18 22:56:34 -07006780
Jens Axboe79ebeae2021-08-10 15:18:27 -06006781 spin_lock(&ctx->completion_lock);
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03006782 if (!req_need_defer(req, seq) && list_empty(&ctx->defer_list)) {
Jens Axboe79ebeae2021-08-10 15:18:27 -06006783 spin_unlock(&ctx->completion_lock);
Pavel Begunkov27dc8332020-07-13 23:37:14 +03006784 kfree(de);
Pavel Begunkovf237c302021-08-18 12:42:46 +01006785 io_queue_async_work(req, NULL);
Pavel Begunkov76cc33d2021-06-14 23:37:30 +01006786 return true;
Jens Axboe31b51512019-01-18 22:56:34 -07006787 }
6788
6789 trace_io_uring_defer(ctx, req, req->user_data);
Pavel Begunkov27dc8332020-07-13 23:37:14 +03006790 de->req = req;
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03006791 de->seq = seq;
Pavel Begunkov27dc8332020-07-13 23:37:14 +03006792 list_add_tail(&de->list, &ctx->defer_list);
Jens Axboe79ebeae2021-08-10 15:18:27 -06006793 spin_unlock(&ctx->completion_lock);
Pavel Begunkov76cc33d2021-06-14 23:37:30 +01006794 return true;
Jens Axboe31b51512019-01-18 22:56:34 -07006795}
6796
Pavel Begunkov68fb8972021-03-19 17:22:41 +00006797static void io_clean_op(struct io_kiocb *req)
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03006798{
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03006799 if (req->flags & REQ_F_BUFFER_SELECTED) {
6800 switch (req->opcode) {
6801 case IORING_OP_READV:
6802 case IORING_OP_READ_FIXED:
6803 case IORING_OP_READ:
Jens Axboebcda7ba2020-02-23 16:42:51 -07006804 kfree((void *)(unsigned long)req->rw.addr);
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03006805 break;
6806 case IORING_OP_RECVMSG:
6807 case IORING_OP_RECV:
Jens Axboe52de1fe2020-02-27 10:15:42 -07006808 kfree(req->sr_msg.kbuf);
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03006809 break;
6810 }
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03006811 }
6812
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03006813 if (req->flags & REQ_F_NEED_CLEANUP) {
6814 switch (req->opcode) {
6815 case IORING_OP_READV:
6816 case IORING_OP_READ_FIXED:
6817 case IORING_OP_READ:
6818 case IORING_OP_WRITEV:
6819 case IORING_OP_WRITE_FIXED:
Jens Axboee8c2bc12020-08-15 18:44:09 -07006820 case IORING_OP_WRITE: {
6821 struct io_async_rw *io = req->async_data;
Pavel Begunkov1dacb4d2021-06-17 18:14:03 +01006822
6823 kfree(io->free_iovec);
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03006824 break;
Jens Axboee8c2bc12020-08-15 18:44:09 -07006825 }
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03006826 case IORING_OP_RECVMSG:
Jens Axboee8c2bc12020-08-15 18:44:09 -07006827 case IORING_OP_SENDMSG: {
6828 struct io_async_msghdr *io = req->async_data;
Pavel Begunkov257e84a2021-02-05 00:58:00 +00006829
6830 kfree(io->free_iov);
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03006831 break;
Jens Axboee8c2bc12020-08-15 18:44:09 -07006832 }
Jens Axboef3cd48502020-09-24 14:55:54 -06006833 case IORING_OP_OPENAT:
6834 case IORING_OP_OPENAT2:
6835 if (req->open.filename)
6836 putname(req->open.filename);
6837 break;
Jens Axboe80a261f2020-09-28 14:23:58 -06006838 case IORING_OP_RENAMEAT:
6839 putname(req->rename.oldpath);
6840 putname(req->rename.newpath);
6841 break;
Jens Axboe14a11432020-09-28 14:27:37 -06006842 case IORING_OP_UNLINKAT:
6843 putname(req->unlink.filename);
6844 break;
Dmitry Kadasheve34a02d2021-07-08 13:34:45 +07006845 case IORING_OP_MKDIRAT:
6846 putname(req->mkdir.filename);
6847 break;
Dmitry Kadashev7a8721f2021-07-08 13:34:46 +07006848 case IORING_OP_SYMLINKAT:
6849 putname(req->symlink.oldpath);
6850 putname(req->symlink.newpath);
6851 break;
Dmitry Kadashevcf30da92021-07-08 13:34:47 +07006852 case IORING_OP_LINKAT:
6853 putname(req->hardlink.oldpath);
6854 putname(req->hardlink.newpath);
6855 break;
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03006856 }
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03006857 }
Jens Axboe75652a302021-04-15 09:52:40 -06006858 if ((req->flags & REQ_F_POLLED) && req->apoll) {
6859 kfree(req->apoll->double_poll);
6860 kfree(req->apoll);
6861 req->apoll = NULL;
6862 }
Pavel Begunkov3a0a6902021-04-20 12:03:31 +01006863 if (req->flags & REQ_F_INFLIGHT) {
6864 struct io_uring_task *tctx = req->task->io_uring;
6865
6866 atomic_dec(&tctx->inflight_tracked);
Pavel Begunkov3a0a6902021-04-20 12:03:31 +01006867 }
Pavel Begunkovc8543572021-06-17 18:14:04 +01006868 if (req->flags & REQ_F_CREDS)
Pavel Begunkovb8e64b52021-06-17 18:14:02 +01006869 put_cred(req->creds);
Pavel Begunkovc8543572021-06-17 18:14:04 +01006870
6871 req->flags &= ~IO_REQ_CLEAN_FLAGS;
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03006872}
6873
Pavel Begunkov889fca72021-02-10 00:03:09 +00006874static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboeedafcce2019-01-09 09:16:05 -07006875{
Jens Axboeedafcce2019-01-09 09:16:05 -07006876 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe5730b272021-02-27 15:57:30 -07006877 const struct cred *creds = NULL;
Jens Axboed625c6e2019-12-17 19:53:05 -07006878 int ret;
Jens Axboeedafcce2019-01-09 09:16:05 -07006879
Pavel Begunkovb8e64b52021-06-17 18:14:02 +01006880 if ((req->flags & REQ_F_CREDS) && req->creds != current_cred())
Pavel Begunkovc10d1f92021-06-17 18:14:01 +01006881 creds = override_creds(req->creds);
Jens Axboe5730b272021-02-27 15:57:30 -07006882
Jens Axboed625c6e2019-12-17 19:53:05 -07006883 switch (req->opcode) {
Jens Axboe2b188cc2019-01-07 10:46:33 -07006884 case IORING_OP_NOP:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006885 ret = io_nop(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006886 break;
Jens Axboe2b188cc2019-01-07 10:46:33 -07006887 case IORING_OP_READV:
Jens Axboe3529d8c2019-12-19 18:24:38 -07006888 case IORING_OP_READ_FIXED:
Jens Axboe3a6820f2019-12-22 15:19:35 -07006889 case IORING_OP_READ:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006890 ret = io_read(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006891 break;
6892 case IORING_OP_WRITEV:
Jens Axboe2b188cc2019-01-07 10:46:33 -07006893 case IORING_OP_WRITE_FIXED:
Jens Axboe3a6820f2019-12-22 15:19:35 -07006894 case IORING_OP_WRITE:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006895 ret = io_write(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006896 break;
6897 case IORING_OP_FSYNC:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006898 ret = io_fsync(req, issue_flags);
Jackie Liuba5290c2019-10-09 09:19:59 +08006899 break;
Jens Axboe2b188cc2019-01-07 10:46:33 -07006900 case IORING_OP_POLL_ADD:
Pavel Begunkov61e98202021-02-10 00:03:08 +00006901 ret = io_poll_add(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006902 break;
6903 case IORING_OP_POLL_REMOVE:
Pavel Begunkovc5de0032021-04-14 13:38:37 +01006904 ret = io_poll_update(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006905 break;
Jens Axboeb76da702019-11-20 13:05:32 -07006906 case IORING_OP_SYNC_FILE_RANGE:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006907 ret = io_sync_file_range(req, issue_flags);
Jens Axboeb76da702019-11-20 13:05:32 -07006908 break;
6909 case IORING_OP_SENDMSG:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006910 ret = io_sendmsg(req, issue_flags);
Pavel Begunkov062d04d2020-10-10 18:34:12 +01006911 break;
Jens Axboefddafac2020-01-04 20:19:44 -07006912 case IORING_OP_SEND:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006913 ret = io_send(req, issue_flags);
Jens Axboeb76da702019-11-20 13:05:32 -07006914 break;
6915 case IORING_OP_RECVMSG:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006916 ret = io_recvmsg(req, issue_flags);
Pavel Begunkov062d04d2020-10-10 18:34:12 +01006917 break;
Jens Axboefddafac2020-01-04 20:19:44 -07006918 case IORING_OP_RECV:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006919 ret = io_recv(req, issue_flags);
Jens Axboeb76da702019-11-20 13:05:32 -07006920 break;
Jens Axboe561fb042019-10-24 07:25:42 -06006921 case IORING_OP_TIMEOUT:
Pavel Begunkov61e98202021-02-10 00:03:08 +00006922 ret = io_timeout(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006923 break;
6924 case IORING_OP_TIMEOUT_REMOVE:
Pavel Begunkov61e98202021-02-10 00:03:08 +00006925 ret = io_timeout_remove(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006926 break;
6927 case IORING_OP_ACCEPT:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006928 ret = io_accept(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006929 break;
6930 case IORING_OP_CONNECT:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006931 ret = io_connect(req, issue_flags);
Jens Axboe31b51512019-01-18 22:56:34 -07006932 break;
6933 case IORING_OP_ASYNC_CANCEL:
Pavel Begunkov61e98202021-02-10 00:03:08 +00006934 ret = io_async_cancel(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006935 break;
Jens Axboed63d1b52019-12-10 10:38:56 -07006936 case IORING_OP_FALLOCATE:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006937 ret = io_fallocate(req, issue_flags);
Jens Axboed63d1b52019-12-10 10:38:56 -07006938 break;
Jens Axboe15b71ab2019-12-11 11:20:36 -07006939 case IORING_OP_OPENAT:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006940 ret = io_openat(req, issue_flags);
Jens Axboe15b71ab2019-12-11 11:20:36 -07006941 break;
Jens Axboeb5dba592019-12-11 14:02:38 -07006942 case IORING_OP_CLOSE:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006943 ret = io_close(req, issue_flags);
Jens Axboeb5dba592019-12-11 14:02:38 -07006944 break;
Jens Axboe05f3fb32019-12-09 11:22:50 -07006945 case IORING_OP_FILES_UPDATE:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006946 ret = io_files_update(req, issue_flags);
Jens Axboe05f3fb32019-12-09 11:22:50 -07006947 break;
Jens Axboeeddc7ef2019-12-13 21:18:10 -07006948 case IORING_OP_STATX:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006949 ret = io_statx(req, issue_flags);
Jens Axboeeddc7ef2019-12-13 21:18:10 -07006950 break;
Jens Axboe4840e412019-12-25 22:03:45 -07006951 case IORING_OP_FADVISE:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006952 ret = io_fadvise(req, issue_flags);
Jens Axboe4840e412019-12-25 22:03:45 -07006953 break;
Jens Axboec1ca7572019-12-25 22:18:28 -07006954 case IORING_OP_MADVISE:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006955 ret = io_madvise(req, issue_flags);
Jens Axboec1ca7572019-12-25 22:18:28 -07006956 break;
Jens Axboecebdb982020-01-08 17:59:24 -07006957 case IORING_OP_OPENAT2:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006958 ret = io_openat2(req, issue_flags);
Jens Axboecebdb982020-01-08 17:59:24 -07006959 break;
Jens Axboe3e4827b2020-01-08 15:18:09 -07006960 case IORING_OP_EPOLL_CTL:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006961 ret = io_epoll_ctl(req, issue_flags);
Jens Axboe3e4827b2020-01-08 15:18:09 -07006962 break;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03006963 case IORING_OP_SPLICE:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006964 ret = io_splice(req, issue_flags);
Pavel Begunkov7d67af22020-02-24 11:32:45 +03006965 break;
Jens Axboeddf0322d2020-02-23 16:41:33 -07006966 case IORING_OP_PROVIDE_BUFFERS:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006967 ret = io_provide_buffers(req, issue_flags);
Jens Axboeddf0322d2020-02-23 16:41:33 -07006968 break;
Jens Axboe067524e2020-03-02 16:32:28 -07006969 case IORING_OP_REMOVE_BUFFERS:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006970 ret = io_remove_buffers(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006971 break;
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03006972 case IORING_OP_TEE:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006973 ret = io_tee(req, issue_flags);
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03006974 break;
Jens Axboe36f4fa62020-09-05 11:14:22 -06006975 case IORING_OP_SHUTDOWN:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006976 ret = io_shutdown(req, issue_flags);
Jens Axboe36f4fa62020-09-05 11:14:22 -06006977 break;
Jens Axboe80a261f2020-09-28 14:23:58 -06006978 case IORING_OP_RENAMEAT:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006979 ret = io_renameat(req, issue_flags);
Jens Axboe80a261f2020-09-28 14:23:58 -06006980 break;
Jens Axboe14a11432020-09-28 14:27:37 -06006981 case IORING_OP_UNLINKAT:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006982 ret = io_unlinkat(req, issue_flags);
Jens Axboe14a11432020-09-28 14:27:37 -06006983 break;
Dmitry Kadasheve34a02d2021-07-08 13:34:45 +07006984 case IORING_OP_MKDIRAT:
6985 ret = io_mkdirat(req, issue_flags);
6986 break;
Dmitry Kadashev7a8721f2021-07-08 13:34:46 +07006987 case IORING_OP_SYMLINKAT:
6988 ret = io_symlinkat(req, issue_flags);
6989 break;
Dmitry Kadashevcf30da92021-07-08 13:34:47 +07006990 case IORING_OP_LINKAT:
6991 ret = io_linkat(req, issue_flags);
6992 break;
Jens Axboe2b188cc2019-01-07 10:46:33 -07006993 default:
6994 ret = -EINVAL;
6995 break;
6996 }
Jens Axboe31b51512019-01-18 22:56:34 -07006997
Jens Axboe5730b272021-02-27 15:57:30 -07006998 if (creds)
6999 revert_creds(creds);
Jens Axboe2b188cc2019-01-07 10:46:33 -07007000 if (ret)
7001 return ret;
Jens Axboeb5325762020-05-19 21:20:27 -06007002 /* If the op doesn't have a file, we're not polling for it */
Pavel Begunkovcb3d8972021-06-14 02:36:14 +01007003 if ((ctx->flags & IORING_SETUP_IOPOLL) && req->file)
7004 io_iopoll_req_issued(req);
Jens Axboe2b188cc2019-01-07 10:46:33 -07007005
7006 return 0;
7007}
7008
Pavel Begunkovebc11b62021-08-09 13:04:05 +01007009static struct io_wq_work *io_wq_free_work(struct io_wq_work *work)
7010{
7011 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
7012
7013 req = io_put_req_find_next(req);
7014 return req ? &req->work : NULL;
7015}
7016
Pavel Begunkov5280f7e2021-02-04 13:52:08 +00007017static void io_wq_submit_work(struct io_wq_work *work)
Pavel Begunkovd4c81f32020-06-08 21:08:19 +03007018{
Jens Axboe2b188cc2019-01-07 10:46:33 -07007019 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
Pavel Begunkov6df1db62020-07-03 22:15:06 +03007020 struct io_kiocb *timeout;
Jens Axboe561fb042019-10-24 07:25:42 -06007021 int ret = 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07007022
Pavel Begunkov48dcd382021-08-15 10:40:18 +01007023 /* one will be dropped by ->io_free_work() after returning to io-wq */
7024 if (!(req->flags & REQ_F_REFCOUNT))
7025 __io_req_set_refcount(req, 2);
7026 else
7027 req_ref_get(req);
Pavel Begunkov5d5901a2021-08-11 19:28:29 +01007028
Pavel Begunkov6df1db62020-07-03 22:15:06 +03007029 timeout = io_prep_linked_timeout(req);
7030 if (timeout)
7031 io_queue_linked_timeout(timeout);
Pavel Begunkovd4c81f32020-06-08 21:08:19 +03007032
Pavel Begunkovdadebc32021-08-23 13:30:44 +01007033 /* either cancelled or io-wq is dying, so don't touch tctx->iowq */
Jens Axboe4014d942021-01-19 15:53:54 -07007034 if (work->flags & IO_WQ_WORK_CANCEL)
Jens Axboe561fb042019-10-24 07:25:42 -06007035 ret = -ECANCELED;
Jens Axboe31b51512019-01-18 22:56:34 -07007036
Jens Axboe561fb042019-10-24 07:25:42 -06007037 if (!ret) {
Jens Axboe561fb042019-10-24 07:25:42 -06007038 do {
Pavel Begunkov889fca72021-02-10 00:03:09 +00007039 ret = io_issue_sqe(req, 0);
Jens Axboe561fb042019-10-24 07:25:42 -06007040 /*
7041 * We can get EAGAIN for polled IO even though we're
7042 * forcing a sync submission from here, since we can't
7043 * wait for request slots on the block side.
7044 */
Pavel Begunkov51ebf1b2022-05-13 11:24:56 +01007045 if (ret != -EAGAIN || !(req->ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboe561fb042019-10-24 07:25:42 -06007046 break;
7047 cond_resched();
7048 } while (1);
7049 }
Jens Axboe31b51512019-01-18 22:56:34 -07007050
Pavel Begunkova3df76982021-02-18 22:32:52 +00007051 /* avoid locking problems by failing it from a clean context */
Pavel Begunkov5d5901a2021-08-11 19:28:29 +01007052 if (ret)
Pavel Begunkova3df76982021-02-18 22:32:52 +00007053 io_req_task_queue_fail(req, ret);
Jens Axboe31b51512019-01-18 22:56:34 -07007054}
Jens Axboe2b188cc2019-01-07 10:46:33 -07007055
Pavel Begunkovaeca2412021-04-11 01:46:37 +01007056static inline struct io_fixed_file *io_fixed_file_slot(struct io_file_table *table,
Pavel Begunkov042b0d82021-08-09 13:04:01 +01007057 unsigned i)
Jens Axboe09bb8392019-03-13 12:39:28 -06007058{
Pavel Begunkov042b0d82021-08-09 13:04:01 +01007059 return &table->files[i];
Pavel Begunkovdafecf12021-02-28 22:35:11 +00007060}
7061
Jens Axboe09bb8392019-03-13 12:39:28 -06007062static inline struct file *io_file_from_index(struct io_ring_ctx *ctx,
7063 int index)
7064{
Pavel Begunkovaeca2412021-04-11 01:46:37 +01007065 struct io_fixed_file *slot = io_fixed_file_slot(&ctx->file_table, index);
Jens Axboe65e19f52019-10-26 07:20:21 -06007066
Pavel Begunkova04b0ac2021-04-01 15:44:04 +01007067 return (struct file *) (slot->file_ptr & FFS_MASK);
Jens Axboe65e19f52019-10-26 07:20:21 -06007068}
7069
Pavel Begunkova04b0ac2021-04-01 15:44:04 +01007070static void io_fixed_file_set(struct io_fixed_file *file_slot, struct file *file)
Pavel Begunkov9a321c92021-04-01 15:44:01 +01007071{
7072 unsigned long file_ptr = (unsigned long) file;
7073
Pavel Begunkovb191e2d2021-08-09 13:04:03 +01007074 if (__io_file_supports_nowait(file, READ))
Pavel Begunkov9a321c92021-04-01 15:44:01 +01007075 file_ptr |= FFS_ASYNC_READ;
Pavel Begunkovb191e2d2021-08-09 13:04:03 +01007076 if (__io_file_supports_nowait(file, WRITE))
Pavel Begunkov9a321c92021-04-01 15:44:01 +01007077 file_ptr |= FFS_ASYNC_WRITE;
7078 if (S_ISREG(file_inode(file)->i_mode))
7079 file_ptr |= FFS_ISREG;
Pavel Begunkova04b0ac2021-04-01 15:44:04 +01007080 file_slot->file_ptr = file_ptr;
Jens Axboe09bb8392019-03-13 12:39:28 -06007081}
7082
Pavel Begunkovac177052021-08-09 13:04:02 +01007083static inline struct file *io_file_get_fixed(struct io_ring_ctx *ctx,
Bing-Jhong Billy Jhengcf7f9cd2023-03-02 21:00:06 +08007084 struct io_kiocb *req, int fd,
7085 unsigned int issue_flags)
Pavel Begunkov8da11c12020-02-24 11:32:44 +03007086{
Bing-Jhong Billy Jhengcf7f9cd2023-03-02 21:00:06 +08007087 struct file *file = NULL;
Pavel Begunkovac177052021-08-09 13:04:02 +01007088 unsigned long file_ptr;
Pavel Begunkov8da11c12020-02-24 11:32:44 +03007089
Bing-Jhong Billy Jhengcf7f9cd2023-03-02 21:00:06 +08007090 io_ring_submit_lock(ctx, !(issue_flags & IO_URING_F_NONBLOCK));
7091
Pavel Begunkovac177052021-08-09 13:04:02 +01007092 if (unlikely((unsigned int)fd >= ctx->nr_user_files))
Bing-Jhong Billy Jhengcf7f9cd2023-03-02 21:00:06 +08007093 goto out;
Pavel Begunkovac177052021-08-09 13:04:02 +01007094 fd = array_index_nospec(fd, ctx->nr_user_files);
7095 file_ptr = io_fixed_file_slot(&ctx->file_table, fd)->file_ptr;
7096 file = (struct file *) (file_ptr & FFS_MASK);
7097 file_ptr &= ~FFS_MASK;
7098 /* mask in overlapping REQ_F and FFS bits */
Pavel Begunkovb191e2d2021-08-09 13:04:03 +01007099 req->flags |= (file_ptr << REQ_F_NOWAIT_READ_BIT);
Pavel Begunkovac177052021-08-09 13:04:02 +01007100 io_req_set_rsrc_node(req);
Bing-Jhong Billy Jhengcf7f9cd2023-03-02 21:00:06 +08007101out:
7102 io_ring_submit_unlock(ctx, !(issue_flags & IO_URING_F_NONBLOCK));
Pavel Begunkov8371adf2020-10-10 18:34:08 +01007103 return file;
Pavel Begunkov8da11c12020-02-24 11:32:44 +03007104}
7105
Pavel Begunkovac177052021-08-09 13:04:02 +01007106static struct file *io_file_get_normal(struct io_ring_ctx *ctx,
Pavel Begunkovac177052021-08-09 13:04:02 +01007107 struct io_kiocb *req, int fd)
7108{
Pavel Begunkov62906e82021-08-10 14:52:47 +01007109 struct file *file = fget(fd);
Pavel Begunkovac177052021-08-09 13:04:02 +01007110
7111 trace_io_uring_file_get(ctx, fd);
7112
7113 /* we don't allow fixed io_uring files */
7114 if (file && unlikely(file->f_op == &io_uring_fops))
7115 io_req_track_inflight(req);
7116 return file;
7117}
7118
7119static inline struct file *io_file_get(struct io_ring_ctx *ctx,
Bing-Jhong Billy Jhengcf7f9cd2023-03-02 21:00:06 +08007120 struct io_kiocb *req, int fd, bool fixed,
7121 unsigned int issue_flags)
Pavel Begunkovac177052021-08-09 13:04:02 +01007122{
7123 if (fixed)
Bing-Jhong Billy Jhengcf7f9cd2023-03-02 21:00:06 +08007124 return io_file_get_fixed(ctx, req, fd, issue_flags);
Pavel Begunkovac177052021-08-09 13:04:02 +01007125 else
Pavel Begunkov62906e82021-08-10 14:52:47 +01007126 return io_file_get_normal(ctx, req, fd);
Pavel Begunkovac177052021-08-09 13:04:02 +01007127}
7128
Pavel Begunkovf237c302021-08-18 12:42:46 +01007129static void io_req_task_link_timeout(struct io_kiocb *req, bool *locked)
Jens Axboe89b263f2021-08-10 15:14:18 -06007130{
7131 struct io_kiocb *prev = req->timeout.prev;
Pavel Begunkov3d2a1e62021-11-26 14:38:14 +00007132 int ret = -ENOENT;
Jens Axboe89b263f2021-08-10 15:14:18 -06007133
7134 if (prev) {
Pavel Begunkov3d2a1e62021-11-26 14:38:14 +00007135 if (!(req->task->flags & PF_EXITING))
7136 ret = io_try_cancel_userdata(req, prev->user_data);
Pavel Begunkov505657b2021-08-17 20:28:09 +01007137 io_req_complete_post(req, ret ?: -ETIME, 0);
Jens Axboe89b263f2021-08-10 15:14:18 -06007138 io_put_req(prev);
Jens Axboe89b263f2021-08-10 15:14:18 -06007139 } else {
7140 io_req_complete_post(req, -ETIME, 0);
7141 }
7142}
7143
Jens Axboe2665abf2019-11-05 12:40:47 -07007144static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
7145{
Jens Axboead8a48a2019-11-15 08:49:11 -07007146 struct io_timeout_data *data = container_of(timer,
7147 struct io_timeout_data, timer);
Pavel Begunkov90cd7e42020-10-27 23:25:36 +00007148 struct io_kiocb *prev, *req = data->req;
Jens Axboe2665abf2019-11-05 12:40:47 -07007149 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe2665abf2019-11-05 12:40:47 -07007150 unsigned long flags;
Jens Axboe2665abf2019-11-05 12:40:47 -07007151
Jens Axboe89b263f2021-08-10 15:14:18 -06007152 spin_lock_irqsave(&ctx->timeout_lock, flags);
Pavel Begunkov90cd7e42020-10-27 23:25:36 +00007153 prev = req->timeout.head;
7154 req->timeout.head = NULL;
Jens Axboe2665abf2019-11-05 12:40:47 -07007155
7156 /*
7157 * We don't expect the list to be empty, that will only happen if we
7158 * race with the completion of the linked work.
7159 */
Pavel Begunkov447c19f2021-05-14 12:02:50 +01007160 if (prev) {
Pavel Begunkovf2f87372020-10-27 23:25:37 +00007161 io_remove_next_linked(prev);
Pavel Begunkov447c19f2021-05-14 12:02:50 +01007162 if (!req_ref_inc_not_zero(prev))
7163 prev = NULL;
7164 }
Pavel Begunkovef9dd632021-08-28 19:54:38 -06007165 list_del(&req->timeout.list);
Jens Axboe89b263f2021-08-10 15:14:18 -06007166 req->timeout.prev = prev;
7167 spin_unlock_irqrestore(&ctx->timeout_lock, flags);
Jens Axboe2665abf2019-11-05 12:40:47 -07007168
Jens Axboe89b263f2021-08-10 15:14:18 -06007169 req->io_task_work.func = io_req_task_link_timeout;
7170 io_req_task_work_add(req);
Jens Axboe2665abf2019-11-05 12:40:47 -07007171 return HRTIMER_NORESTART;
7172}
7173
Pavel Begunkovde968c12021-03-19 17:22:33 +00007174static void io_queue_linked_timeout(struct io_kiocb *req)
Jens Axboe2665abf2019-11-05 12:40:47 -07007175{
Pavel Begunkovde968c12021-03-19 17:22:33 +00007176 struct io_ring_ctx *ctx = req->ctx;
7177
Jens Axboe89b263f2021-08-10 15:14:18 -06007178 spin_lock_irq(&ctx->timeout_lock);
Jens Axboe76a46e02019-11-10 23:34:16 -07007179 /*
Pavel Begunkovf2f87372020-10-27 23:25:37 +00007180 * If the back reference is NULL, then our linked request finished
7181 * before we got a chance to setup the timer
Jens Axboe76a46e02019-11-10 23:34:16 -07007182 */
Pavel Begunkov90cd7e42020-10-27 23:25:36 +00007183 if (req->timeout.head) {
Jens Axboee8c2bc12020-08-15 18:44:09 -07007184 struct io_timeout_data *data = req->async_data;
Jens Axboe94ae5e72019-11-14 19:39:52 -07007185
Jens Axboead8a48a2019-11-15 08:49:11 -07007186 data->timer.function = io_link_timeout_fn;
7187 hrtimer_start(&data->timer, timespec64_to_ktime(data->ts),
7188 data->mode);
Pavel Begunkovef9dd632021-08-28 19:54:38 -06007189 list_add_tail(&req->timeout.list, &ctx->ltimeout_list);
Jens Axboe2665abf2019-11-05 12:40:47 -07007190 }
Jens Axboe89b263f2021-08-10 15:14:18 -06007191 spin_unlock_irq(&ctx->timeout_lock);
Jens Axboe2665abf2019-11-05 12:40:47 -07007192 /* drop submission reference */
Jens Axboe76a46e02019-11-10 23:34:16 -07007193 io_put_req(req);
Jens Axboe2665abf2019-11-05 12:40:47 -07007194}
7195
Pavel Begunkovc5eef2b2021-02-10 00:03:22 +00007196static void __io_queue_sqe(struct io_kiocb *req)
Pavel Begunkov282cdc82021-08-09 13:04:10 +01007197 __must_hold(&req->ctx->uring_lock)
Jens Axboe2b188cc2019-01-07 10:46:33 -07007198{
Pavel Begunkov906c6ca2021-08-15 10:40:26 +01007199 struct io_kiocb *linked_timeout;
Jens Axboee0c5c572019-03-12 10:18:47 -06007200 int ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07007201
Olivier Langlois59b735a2021-06-22 05:17:39 -07007202issue_sqe:
Pavel Begunkovc5eef2b2021-02-10 00:03:22 +00007203 ret = io_issue_sqe(req, IO_URING_F_NONBLOCK|IO_URING_F_COMPLETE_DEFER);
Jens Axboe491381ce2019-10-17 09:20:46 -06007204
7205 /*
7206 * We async punt it if the file wasn't marked NOWAIT, or if the file
7207 * doesn't support non-blocking read/write attempts
7208 */
Pavel Begunkov18400382021-03-19 17:22:34 +00007209 if (likely(!ret)) {
Pavel Begunkove342c802021-01-19 13:32:47 +00007210 if (req->flags & REQ_F_COMPLETE_INLINE) {
Pavel Begunkovc5eef2b2021-02-10 00:03:22 +00007211 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkovcd0ca2e2021-08-09 20:18:11 +01007212 struct io_submit_state *state = &ctx->submit_state;
Jens Axboee65ef562019-03-12 10:16:44 -06007213
Pavel Begunkovcd0ca2e2021-08-09 20:18:11 +01007214 state->compl_reqs[state->compl_nr++] = req;
7215 if (state->compl_nr == ARRAY_SIZE(state->compl_reqs))
Pavel Begunkov2a2758f2021-06-17 18:14:00 +01007216 io_submit_flush_completions(ctx);
Pavel Begunkov906c6ca2021-08-15 10:40:26 +01007217 return;
Pavel Begunkov0d63c142020-10-22 16:47:18 +01007218 }
Pavel Begunkov906c6ca2021-08-15 10:40:26 +01007219
7220 linked_timeout = io_prep_linked_timeout(req);
7221 if (linked_timeout)
7222 io_queue_linked_timeout(linked_timeout);
Pavel Begunkov18400382021-03-19 17:22:34 +00007223 } else if (ret == -EAGAIN && !(req->flags & REQ_F_NOWAIT)) {
Pavel Begunkov906c6ca2021-08-15 10:40:26 +01007224 linked_timeout = io_prep_linked_timeout(req);
7225
Olivier Langlois59b735a2021-06-22 05:17:39 -07007226 switch (io_arm_poll_handler(req)) {
7227 case IO_APOLL_READY:
Pavel Begunkov906c6ca2021-08-15 10:40:26 +01007228 if (linked_timeout)
Pavel Begunkov4ea672a2021-10-20 09:53:02 +01007229 io_queue_linked_timeout(linked_timeout);
Olivier Langlois59b735a2021-06-22 05:17:39 -07007230 goto issue_sqe;
7231 case IO_APOLL_ABORTED:
Pavel Begunkov18400382021-03-19 17:22:34 +00007232 /*
7233 * Queued up for async execution, worker will release
7234 * submit reference when the iocb is actually submitted.
7235 */
Pavel Begunkovf237c302021-08-18 12:42:46 +01007236 io_queue_async_work(req, NULL);
Olivier Langlois59b735a2021-06-22 05:17:39 -07007237 break;
Pavel Begunkov18400382021-03-19 17:22:34 +00007238 }
Pavel Begunkov906c6ca2021-08-15 10:40:26 +01007239
7240 if (linked_timeout)
7241 io_queue_linked_timeout(linked_timeout);
Pavel Begunkov0d63c142020-10-22 16:47:18 +01007242 } else {
Pavel Begunkovf41db2732021-02-28 22:35:12 +00007243 io_req_complete_failed(req, ret);
Jens Axboe9e645e112019-05-10 16:07:28 -06007244 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07007245}
7246
Pavel Begunkov441b8a72021-06-14 23:37:31 +01007247static inline void io_queue_sqe(struct io_kiocb *req)
Pavel Begunkov282cdc82021-08-09 13:04:10 +01007248 __must_hold(&req->ctx->uring_lock)
Jackie Liu4fe2c962019-09-09 20:50:40 +08007249{
Pavel Begunkov10c66902021-06-15 16:47:56 +01007250 if (unlikely(req->ctx->drain_active) && io_drain_req(req))
Pavel Begunkov76cc33d2021-06-14 23:37:30 +01007251 return;
Jackie Liu4fe2c962019-09-09 20:50:40 +08007252
Hao Xua8295b92021-08-27 17:46:09 +08007253 if (likely(!(req->flags & (REQ_F_FORCE_ASYNC | REQ_F_FAIL)))) {
Pavel Begunkovc5eef2b2021-02-10 00:03:22 +00007254 __io_queue_sqe(req);
Hao Xua8295b92021-08-27 17:46:09 +08007255 } else if (req->flags & REQ_F_FAIL) {
Pavel Begunkovc6d3d9c2021-08-31 14:13:10 +01007256 io_req_complete_fail_submit(req);
Pavel Begunkov76cc33d2021-06-14 23:37:30 +01007257 } else {
7258 int ret = io_req_prep_async(req);
7259
7260 if (unlikely(ret))
7261 io_req_complete_failed(req, ret);
7262 else
Pavel Begunkovf237c302021-08-18 12:42:46 +01007263 io_queue_async_work(req, NULL);
Jens Axboece35a472019-12-17 08:04:44 -07007264 }
Jackie Liu4fe2c962019-09-09 20:50:40 +08007265}
7266
Stefano Garzarella21b55db2020-08-27 16:58:30 +02007267/*
7268 * Check SQE restrictions (opcode and flags).
7269 *
7270 * Returns 'true' if SQE is allowed, 'false' otherwise.
7271 */
7272static inline bool io_check_restriction(struct io_ring_ctx *ctx,
7273 struct io_kiocb *req,
7274 unsigned int sqe_flags)
7275{
Pavel Begunkov4cfb25b2021-06-26 21:40:47 +01007276 if (likely(!ctx->restricted))
Stefano Garzarella21b55db2020-08-27 16:58:30 +02007277 return true;
7278
7279 if (!test_bit(req->opcode, ctx->restrictions.sqe_op))
7280 return false;
7281
7282 if ((sqe_flags & ctx->restrictions.sqe_flags_required) !=
7283 ctx->restrictions.sqe_flags_required)
7284 return false;
7285
7286 if (sqe_flags & ~(ctx->restrictions.sqe_flags_allowed |
7287 ctx->restrictions.sqe_flags_required))
7288 return false;
7289
7290 return true;
7291}
7292
Pavel Begunkovef4ff582020-04-12 02:05:05 +03007293static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
Pavel Begunkov258b29a2021-02-10 00:03:10 +00007294 const struct io_uring_sqe *sqe)
Pavel Begunkov282cdc82021-08-09 13:04:10 +01007295 __must_hold(&ctx->uring_lock)
Pavel Begunkov0553b8b2020-04-08 08:58:45 +03007296{
Pavel Begunkov258b29a2021-02-10 00:03:10 +00007297 struct io_submit_state *state;
Pavel Begunkovef4ff582020-04-12 02:05:05 +03007298 unsigned int sqe_flags;
Jens Axboe003e8dc2021-03-06 09:22:27 -07007299 int personality, ret = 0;
Pavel Begunkovef4ff582020-04-12 02:05:05 +03007300
Pavel Begunkov864ea922021-08-09 13:04:08 +01007301 /* req is partially pre-initialised, see io_preinit_req() */
Pavel Begunkov0553b8b2020-04-08 08:58:45 +03007302 req->opcode = READ_ONCE(sqe->opcode);
Pavel Begunkov5be9ad12021-02-12 18:41:17 +00007303 /* same numerical values with corresponding REQ_F_*, safe to copy */
7304 req->flags = sqe_flags = READ_ONCE(sqe->flags);
Pavel Begunkov0553b8b2020-04-08 08:58:45 +03007305 req->user_data = READ_ONCE(sqe->user_data);
Pavel Begunkov0553b8b2020-04-08 08:58:45 +03007306 req->file = NULL;
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007307 req->fixed_rsrc_refs = NULL;
Pavel Begunkov4dd28242020-06-15 10:33:13 +03007308 req->task = current;
Pavel Begunkovef4ff582020-04-12 02:05:05 +03007309
Pavel Begunkov5be9ad12021-02-12 18:41:17 +00007310 /* enforce forwards compatibility on users */
Pavel Begunkovdddca222021-04-27 16:13:52 +01007311 if (unlikely(sqe_flags & ~SQE_VALID_FLAGS))
Pavel Begunkov5be9ad12021-02-12 18:41:17 +00007312 return -EINVAL;
Pavel Begunkovef4ff582020-04-12 02:05:05 +03007313 if (unlikely(req->opcode >= IORING_OP_LAST))
7314 return -EINVAL;
Pavel Begunkov4cfb25b2021-06-26 21:40:47 +01007315 if (!io_check_restriction(ctx, req, sqe_flags))
Stefano Garzarella21b55db2020-08-27 16:58:30 +02007316 return -EACCES;
7317
Pavel Begunkovef4ff582020-04-12 02:05:05 +03007318 if ((sqe_flags & IOSQE_BUFFER_SELECT) &&
7319 !io_op_defs[req->opcode].buffer_select)
7320 return -EOPNOTSUPP;
Pavel Begunkov3c199662021-06-15 16:47:57 +01007321 if (unlikely(sqe_flags & IOSQE_IO_DRAIN))
7322 ctx->drain_active = true;
Pavel Begunkovef4ff582020-04-12 02:05:05 +03007323
Jens Axboe003e8dc2021-03-06 09:22:27 -07007324 personality = READ_ONCE(sqe->personality);
7325 if (personality) {
Pavel Begunkovc10d1f92021-06-17 18:14:01 +01007326 req->creds = xa_load(&ctx->personalities, personality);
7327 if (!req->creds)
Jens Axboe003e8dc2021-03-06 09:22:27 -07007328 return -EINVAL;
Pavel Begunkovc10d1f92021-06-17 18:14:01 +01007329 get_cred(req->creds);
Pavel Begunkovb8e64b52021-06-17 18:14:02 +01007330 req->flags |= REQ_F_CREDS;
Jens Axboe003e8dc2021-03-06 09:22:27 -07007331 }
Pavel Begunkov258b29a2021-02-10 00:03:10 +00007332 state = &ctx->submit_state;
Pavel Begunkovef4ff582020-04-12 02:05:05 +03007333
Jens Axboe27926b62020-10-28 09:33:23 -06007334 /*
7335 * Plug now if we have more than 1 IO left after this, and the target
7336 * is potentially a read/write to block based storage.
7337 */
7338 if (!state->plug_started && state->ios_left > 1 &&
7339 io_op_defs[req->opcode].plug) {
7340 blk_start_plug(&state->plug);
7341 state->plug_started = true;
7342 }
Jens Axboe63ff8222020-05-07 14:56:15 -06007343
Pavel Begunkovbd5bbda2020-11-20 15:50:51 +00007344 if (io_op_defs[req->opcode].needs_file) {
Pavel Begunkov62906e82021-08-10 14:52:47 +01007345 req->file = io_file_get(ctx, req, READ_ONCE(sqe->fd),
Jens Axboe937c15e2023-03-03 06:49:57 -07007346 (sqe_flags & IOSQE_FIXED_FILE),
7347 IO_URING_F_NONBLOCK);
Pavel Begunkovba13e232021-02-01 18:59:52 +00007348 if (unlikely(!req->file))
Pavel Begunkovbd5bbda2020-11-20 15:50:51 +00007349 ret = -EBADF;
7350 }
7351
Pavel Begunkov71b547c2020-10-10 18:34:09 +01007352 state->ios_left--;
7353 return ret;
Pavel Begunkov0553b8b2020-04-08 08:58:45 +03007354}
7355
Pavel Begunkova6b8cadc2021-02-18 18:29:41 +00007356static int io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
Pavel Begunkova1ab7b32021-02-18 18:29:42 +00007357 const struct io_uring_sqe *sqe)
Pavel Begunkov282cdc82021-08-09 13:04:10 +01007358 __must_hold(&ctx->uring_lock)
Jens Axboe6c271ce2019-01-10 11:22:30 -07007359{
Pavel Begunkova1ab7b32021-02-18 18:29:42 +00007360 struct io_submit_link *link = &ctx->submit_state.link;
Jens Axboe6c271ce2019-01-10 11:22:30 -07007361 int ret;
7362
Pavel Begunkova6b8cadc2021-02-18 18:29:41 +00007363 ret = io_init_req(ctx, req, sqe);
7364 if (unlikely(ret)) {
7365fail_req:
Hao Xua8295b92021-08-27 17:46:09 +08007366 /* fail even hard links since we don't submit */
Pavel Begunkovde59bc12021-02-18 18:29:47 +00007367 if (link->head) {
Hao Xua8295b92021-08-27 17:46:09 +08007368 /*
7369 * we can judge a link req is failed or cancelled by if
7370 * REQ_F_FAIL is set, but the head is an exception since
7371 * it may be set REQ_F_FAIL because of other req's failure
7372 * so let's leverage req->result to distinguish if a head
7373 * is set REQ_F_FAIL because of its failure or other req's
7374 * failure so that we can set the correct ret code for it.
7375 * init result here to avoid affecting the normal path.
7376 */
7377 if (!(link->head->flags & REQ_F_FAIL))
7378 req_fail_link_node(link->head, -ECANCELED);
7379 } else if (!(req->flags & (REQ_F_LINK | REQ_F_HARDLINK))) {
7380 /*
7381 * the current req is a normal req, we should return
7382 * error and thus break the submittion loop.
7383 */
7384 io_req_complete_failed(req, ret);
7385 return ret;
Pavel Begunkovde59bc12021-02-18 18:29:47 +00007386 }
Hao Xua8295b92021-08-27 17:46:09 +08007387 req_fail_link_node(req, ret);
7388 } else {
7389 ret = io_req_prep(req, sqe);
7390 if (unlikely(ret))
7391 goto fail_req;
Pavel Begunkova6b8cadc2021-02-18 18:29:41 +00007392 }
Pavel Begunkov441b8a72021-06-14 23:37:31 +01007393
Pavel Begunkovbe7053b2021-02-18 18:29:45 +00007394 /* don't need @sqe from now on */
Olivier Langlois236daeae2021-05-31 02:36:37 -04007395 trace_io_uring_submit_sqe(ctx, req, req->opcode, req->user_data,
7396 req->flags, true,
7397 ctx->flags & IORING_SETUP_SQPOLL);
Pavel Begunkova6b8cadc2021-02-18 18:29:41 +00007398
Jens Axboe6c271ce2019-01-10 11:22:30 -07007399 /*
7400 * If we already have a head request, queue this one for async
7401 * submittal once the head completes. If we don't have a head but
7402 * IOSQE_IO_LINK is set in the sqe, start a new head. This one will be
7403 * submitted sync once the chain is complete. If none of those
7404 * conditions are true (normal request), then just queue it.
7405 */
7406 if (link->head) {
7407 struct io_kiocb *head = link->head;
7408
Hao Xua8295b92021-08-27 17:46:09 +08007409 if (!(req->flags & REQ_F_FAIL)) {
7410 ret = io_req_prep_async(req);
7411 if (unlikely(ret)) {
7412 req_fail_link_node(req, ret);
7413 if (!(head->flags & REQ_F_FAIL))
7414 req_fail_link_node(head, -ECANCELED);
7415 }
7416 }
Jens Axboe6c271ce2019-01-10 11:22:30 -07007417 trace_io_uring_link(ctx, req, head);
7418 link->last->link = req;
7419 link->last = req;
7420
7421 /* last request of a link, enqueue the link */
7422 if (!(req->flags & (REQ_F_LINK | REQ_F_HARDLINK))) {
7423 link->head = NULL;
Pavel Begunkov5e159202021-06-14 23:37:26 +01007424 io_queue_sqe(head);
Jens Axboe2b188cc2019-01-07 10:46:33 -07007425 }
Jackie Liu4fe2c962019-09-09 20:50:40 +08007426 } else {
Jens Axboe2b188cc2019-01-07 10:46:33 -07007427 if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) {
Jackie Liu4fe2c962019-09-09 20:50:40 +08007428 link->head = req;
7429 link->last = req;
7430 } else {
Pavel Begunkovbe7053b2021-02-18 18:29:45 +00007431 io_queue_sqe(req);
Jackie Liu4fe2c962019-09-09 20:50:40 +08007432 }
7433 }
7434
7435 return 0;
7436}
7437
7438/*
7439 * Batched submission is done, ensure local IO is flushed out.
7440 */
7441static void io_submit_state_end(struct io_submit_state *state,
7442 struct io_ring_ctx *ctx)
Pavel Begunkov1b4a51b2019-11-21 11:54:28 +03007443{
Pavel Begunkova1ab7b32021-02-18 18:29:42 +00007444 if (state->link.head)
Pavel Begunkovde59bc12021-02-18 18:29:47 +00007445 io_queue_sqe(state->link.head);
Pavel Begunkovcd0ca2e2021-08-09 20:18:11 +01007446 if (state->compl_nr)
Pavel Begunkov2a2758f2021-06-17 18:14:00 +01007447 io_submit_flush_completions(ctx);
Jackie Liua197f662019-11-08 08:09:12 -07007448 if (state->plug_started)
Pavel Begunkov32fe5252019-12-17 22:26:58 +03007449 blk_finish_plug(&state->plug);
Jens Axboe9e645e112019-05-10 16:07:28 -06007450}
Pavel Begunkov32fe5252019-12-17 22:26:58 +03007451
Jens Axboe9e645e112019-05-10 16:07:28 -06007452/*
7453 * Start submission side cache.
Pavel Begunkov32fe5252019-12-17 22:26:58 +03007454 */
Jens Axboe9e645e112019-05-10 16:07:28 -06007455static void io_submit_state_start(struct io_submit_state *state,
Pavel Begunkov196be952019-11-07 01:41:06 +03007456 unsigned int max_ios)
Jens Axboe9e645e112019-05-10 16:07:28 -06007457{
7458 state->plug_started = false;
Jens Axboebcda7ba2020-02-23 16:42:51 -07007459 state->ios_left = max_ios;
Pavel Begunkova1ab7b32021-02-18 18:29:42 +00007460 /* set only head, no need to init link_last in advance */
7461 state->link.head = NULL;
Jens Axboe75c6a032020-01-28 10:15:23 -07007462}
7463
Jens Axboe193155c2020-02-22 23:22:19 -07007464static void io_commit_sqring(struct io_ring_ctx *ctx)
7465{
Jens Axboe75c6a032020-01-28 10:15:23 -07007466 struct io_rings *rings = ctx->rings;
7467
7468 /*
Jens Axboe193155c2020-02-22 23:22:19 -07007469 * Ensure any loads from the SQEs are done at this point,
Jens Axboe75c6a032020-01-28 10:15:23 -07007470 * since once we write the new head, the application could
7471 * write new data to them.
Pavel Begunkov6b47ee62020-01-18 20:22:41 +03007472 */
Pavel Begunkov8da11c12020-02-24 11:32:44 +03007473 smp_store_release(&rings->sq.head, ctx->cached_sq_head);
Jens Axboebcda7ba2020-02-23 16:42:51 -07007474}
7475
Jens Axboe9e645e112019-05-10 16:07:28 -06007476/*
Fam Zhengdd9ae8a2021-06-04 17:42:56 +01007477 * Fetch an sqe, if one is available. Note this returns a pointer to memory
Jens Axboe9e645e112019-05-10 16:07:28 -06007478 * that is mapped by userspace. This means that care needs to be taken to
7479 * ensure that reads are stable, as we cannot rely on userspace always
Jens Axboe78e19bb2019-11-06 15:21:34 -07007480 * being a good citizen. If members of the sqe are validated and then later
7481 * used, it's important that those reads are done through READ_ONCE() to
Pavel Begunkov2e6e1fd2019-12-05 16:15:45 +03007482 * prevent a re-load down the line.
Jens Axboe9e645e112019-05-10 16:07:28 -06007483 */
7484static const struct io_uring_sqe *io_get_sqe(struct io_ring_ctx *ctx)
Jens Axboe9e645e112019-05-10 16:07:28 -06007485{
Pavel Begunkovea5ab3b2021-05-16 22:58:09 +01007486 unsigned head, mask = ctx->sq_entries - 1;
Pavel Begunkov17d3aeb2021-06-14 23:37:23 +01007487 unsigned sq_idx = ctx->cached_sq_head++ & mask;
Jens Axboe9e645e112019-05-10 16:07:28 -06007488
7489 /*
7490 * The cached sq head (or cq tail) serves two purposes:
7491 *
7492 * 1) allows us to batch the cost of updating the user visible
Pavel Begunkov9d763772019-12-17 02:22:07 +03007493 * head updates.
Jens Axboe9e645e112019-05-10 16:07:28 -06007494 * 2) allows the kernel side to track the head on its own, even
Pavel Begunkov8cdf2192020-01-25 00:40:24 +03007495 * though the application is the one updating it.
7496 */
Pavel Begunkov17d3aeb2021-06-14 23:37:23 +01007497 head = READ_ONCE(ctx->sq_array[sq_idx]);
Pavel Begunkov8cdf2192020-01-25 00:40:24 +03007498 if (likely(head < ctx->sq_entries))
7499 return &ctx->sq_sqes[head];
7500
7501 /* drop invalid entries */
Pavel Begunkov15641e42021-06-14 23:37:24 +01007502 ctx->cq_extra--;
7503 WRITE_ONCE(ctx->rings->sq_dropped,
7504 READ_ONCE(ctx->rings->sq_dropped) + 1);
Pavel Begunkov711be032020-01-17 03:57:59 +03007505 return NULL;
7506}
Jens Axboeb7bb4f72019-12-15 22:13:43 -07007507
Jens Axboe0f212202020-09-13 13:09:39 -06007508static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr)
Pavel Begunkov282cdc82021-08-09 13:04:10 +01007509 __must_hold(&ctx->uring_lock)
Jens Axboe6c271ce2019-01-10 11:22:30 -07007510{
Pavel Begunkov46c4e162021-02-18 18:29:37 +00007511 int submitted = 0;
Jens Axboe6c271ce2019-01-10 11:22:30 -07007512
Pavel Begunkovee7d46d2019-12-30 21:24:45 +03007513 /* make sure SQ entry isn't read before tail */
7514 nr = min3(nr, ctx->sq_entries, io_sqring_entries(ctx));
Pavel Begunkov2b85edf2019-12-28 14:13:03 +03007515 if (!percpu_ref_tryget_many(&ctx->refs, nr))
7516 return -EAGAIN;
Pavel Begunkov9a108672021-08-27 11:55:01 +01007517 io_get_task_refs(nr);
Jens Axboe6c271ce2019-01-10 11:22:30 -07007518
Pavel Begunkovba88ff12021-02-10 00:03:11 +00007519 io_submit_state_start(&ctx->submit_state, nr);
Pavel Begunkov46c4e162021-02-18 18:29:37 +00007520 while (submitted < nr) {
Jens Axboe3529d8c2019-12-19 18:24:38 -07007521 const struct io_uring_sqe *sqe;
Pavel Begunkov196be952019-11-07 01:41:06 +03007522 struct io_kiocb *req;
Pavel Begunkovfb5ccc92019-10-25 12:31:30 +03007523
Pavel Begunkov258b29a2021-02-10 00:03:10 +00007524 req = io_alloc_req(ctx);
Pavel Begunkov196be952019-11-07 01:41:06 +03007525 if (unlikely(!req)) {
7526 if (!submitted)
7527 submitted = -EAGAIN;
Pavel Begunkovfb5ccc92019-10-25 12:31:30 +03007528 break;
Jens Axboe9e645e112019-05-10 16:07:28 -06007529 }
Pavel Begunkov4fccfcb2021-02-12 11:55:17 +00007530 sqe = io_get_sqe(ctx);
7531 if (unlikely(!sqe)) {
Hao Xu0c6e1d72021-08-26 01:58:56 +08007532 list_add(&req->inflight_entry, &ctx->submit_state.free_list);
Pavel Begunkov4fccfcb2021-02-12 11:55:17 +00007533 break;
7534 }
Jens Axboed3656342019-12-18 09:50:26 -07007535 /* will complete beyond this point, count as submitted */
7536 submitted++;
Pavel Begunkova1ab7b32021-02-18 18:29:42 +00007537 if (io_submit_sqe(ctx, req, sqe))
Jens Axboed3656342019-12-18 09:50:26 -07007538 break;
Jens Axboe6c271ce2019-01-10 11:22:30 -07007539 }
7540
Pavel Begunkov9466f432020-01-25 22:34:01 +03007541 if (unlikely(submitted != nr)) {
7542 int ref_used = (submitted == -EAGAIN) ? 0 : submitted;
Jens Axboed8a6df12020-10-15 16:24:45 -06007543 int unused = nr - ref_used;
Pavel Begunkov9466f432020-01-25 22:34:01 +03007544
Pavel Begunkov09899b12021-06-14 02:36:22 +01007545 current->io_uring->cached_refs += unused;
Jens Axboed8a6df12020-10-15 16:24:45 -06007546 percpu_ref_put_many(&ctx->refs, unused);
Pavel Begunkov9466f432020-01-25 22:34:01 +03007547 }
Jens Axboe6c271ce2019-01-10 11:22:30 -07007548
Pavel Begunkova1ab7b32021-02-18 18:29:42 +00007549 io_submit_state_end(&ctx->submit_state, ctx);
Pavel Begunkovae9428c2019-11-06 00:22:14 +03007550 /* Commit SQ ring head once we've consumed and submitted all SQEs */
7551 io_commit_sqring(ctx);
7552
Jens Axboe6c271ce2019-01-10 11:22:30 -07007553 return submitted;
7554}
7555
Pavel Begunkove4b6d902021-05-16 22:58:00 +01007556static inline bool io_sqd_events_pending(struct io_sq_data *sqd)
7557{
7558 return READ_ONCE(sqd->state);
7559}
7560
Xiaoguang Wang23b36282020-07-23 20:57:24 +08007561static inline void io_ring_set_wakeup_flag(struct io_ring_ctx *ctx)
7562{
7563 /* Tell userspace we may need a wakeup call */
Jens Axboe79ebeae2021-08-10 15:18:27 -06007564 spin_lock(&ctx->completion_lock);
Nadav Amit20c0b382021-08-07 17:13:42 -07007565 WRITE_ONCE(ctx->rings->sq_flags,
7566 ctx->rings->sq_flags | IORING_SQ_NEED_WAKEUP);
Jens Axboe79ebeae2021-08-10 15:18:27 -06007567 spin_unlock(&ctx->completion_lock);
Xiaoguang Wang23b36282020-07-23 20:57:24 +08007568}
7569
7570static inline void io_ring_clear_wakeup_flag(struct io_ring_ctx *ctx)
7571{
Jens Axboe79ebeae2021-08-10 15:18:27 -06007572 spin_lock(&ctx->completion_lock);
Nadav Amit20c0b382021-08-07 17:13:42 -07007573 WRITE_ONCE(ctx->rings->sq_flags,
7574 ctx->rings->sq_flags & ~IORING_SQ_NEED_WAKEUP);
Jens Axboe79ebeae2021-08-10 15:18:27 -06007575 spin_unlock(&ctx->completion_lock);
Xiaoguang Wang23b36282020-07-23 20:57:24 +08007576}
7577
Xiaoguang Wang08369242020-11-03 14:15:59 +08007578static int __io_sq_thread(struct io_ring_ctx *ctx, bool cap_entries)
Jens Axboe6c271ce2019-01-10 11:22:30 -07007579{
Jens Axboec8d1ba52020-09-14 11:07:26 -06007580 unsigned int to_submit;
Xiaoguang Wangbdcd3ea2020-02-25 22:12:08 +08007581 int ret = 0;
Jens Axboe6c271ce2019-01-10 11:22:30 -07007582
Jens Axboec8d1ba52020-09-14 11:07:26 -06007583 to_submit = io_sqring_entries(ctx);
Jens Axboee95eee22020-09-08 09:11:32 -06007584 /* if we're handling multiple rings, cap submit size for fairness */
Olivier Langlois4ce8ad92021-06-23 11:50:18 -07007585 if (cap_entries && to_submit > IORING_SQPOLL_CAP_ENTRIES_VALUE)
7586 to_submit = IORING_SQPOLL_CAP_ENTRIES_VALUE;
Jens Axboee95eee22020-09-08 09:11:32 -06007587
Xiaoguang Wang906a3c62020-11-12 14:56:00 +08007588 if (!list_empty(&ctx->iopoll_list) || to_submit) {
7589 unsigned nr_events = 0;
Pavel Begunkov948e1942021-06-24 15:09:55 +01007590 const struct cred *creds = NULL;
7591
7592 if (ctx->sq_creds != current_cred())
7593 creds = override_creds(ctx->sq_creds);
Xiaoguang Wang906a3c62020-11-12 14:56:00 +08007594
Xiaoguang Wang08369242020-11-03 14:15:59 +08007595 mutex_lock(&ctx->uring_lock);
Xiaoguang Wang906a3c62020-11-12 14:56:00 +08007596 if (!list_empty(&ctx->iopoll_list))
Pavel Begunkova8576af2021-08-15 10:40:21 +01007597 io_do_iopoll(ctx, &nr_events, 0);
Xiaoguang Wang906a3c62020-11-12 14:56:00 +08007598
Pavel Begunkov3b763ba2021-04-18 14:52:08 +01007599 /*
7600 * Don't submit if refs are dying, good for io_uring_register(),
7601 * but also it is relied upon by io_ring_exit_work()
7602 */
Pavel Begunkov0298ef92021-03-08 13:20:57 +00007603 if (to_submit && likely(!percpu_ref_is_dying(&ctx->refs)) &&
7604 !(ctx->flags & IORING_SETUP_R_DISABLED))
Xiaoguang Wang08369242020-11-03 14:15:59 +08007605 ret = io_submit_sqes(ctx, to_submit);
7606 mutex_unlock(&ctx->uring_lock);
Jens Axboe90554202020-09-03 12:12:41 -06007607
Pavel Begunkovacfb3812021-05-16 22:58:03 +01007608 if (to_submit && wq_has_sleeper(&ctx->sqo_sq_wait))
7609 wake_up(&ctx->sqo_sq_wait);
Pavel Begunkov948e1942021-06-24 15:09:55 +01007610 if (creds)
7611 revert_creds(creds);
Pavel Begunkovacfb3812021-05-16 22:58:03 +01007612 }
Jens Axboe90554202020-09-03 12:12:41 -06007613
Xiaoguang Wang08369242020-11-03 14:15:59 +08007614 return ret;
7615}
7616
7617static void io_sqd_update_thread_idle(struct io_sq_data *sqd)
7618{
7619 struct io_ring_ctx *ctx;
7620 unsigned sq_thread_idle = 0;
7621
Pavel Begunkovc9dca272021-03-10 13:13:55 +00007622 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
7623 sq_thread_idle = max(sq_thread_idle, ctx->sq_thread_idle);
Xiaoguang Wang08369242020-11-03 14:15:59 +08007624 sqd->sq_thread_idle = sq_thread_idle;
Jens Axboec8d1ba52020-09-14 11:07:26 -06007625}
7626
Pavel Begunkove4b6d902021-05-16 22:58:00 +01007627static bool io_sqd_handle_event(struct io_sq_data *sqd)
7628{
7629 bool did_sig = false;
7630 struct ksignal ksig;
7631
7632 if (test_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state) ||
7633 signal_pending(current)) {
7634 mutex_unlock(&sqd->lock);
7635 if (signal_pending(current))
7636 did_sig = get_signal(&ksig);
7637 cond_resched();
7638 mutex_lock(&sqd->lock);
7639 }
Pavel Begunkove4b6d902021-05-16 22:58:00 +01007640 return did_sig || test_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state);
7641}
7642
Jens Axboe6c271ce2019-01-10 11:22:30 -07007643static int io_sq_thread(void *data)
7644{
Jens Axboe69fb2132020-09-14 11:16:23 -06007645 struct io_sq_data *sqd = data;
7646 struct io_ring_ctx *ctx;
Xiaoguang Wanga0d92052020-11-12 14:55:59 +08007647 unsigned long timeout = 0;
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007648 char buf[TASK_COMM_LEN];
Xiaoguang Wang08369242020-11-03 14:15:59 +08007649 DEFINE_WAIT(wait);
Jens Axboe6c271ce2019-01-10 11:22:30 -07007650
Pavel Begunkov696ee882021-04-01 09:55:04 +01007651 snprintf(buf, sizeof(buf), "iou-sqp-%d", sqd->task_pid);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007652 set_task_comm(current, buf);
Jens Axboe28cea78a2020-09-14 10:51:17 -06007653
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007654 if (sqd->sq_cpu != -1)
7655 set_cpus_allowed_ptr(current, cpumask_of(sqd->sq_cpu));
7656 else
7657 set_cpus_allowed_ptr(current, cpu_online_mask);
7658 current->flags |= PF_NO_SETAFFINITY;
7659
Pavel Begunkov09a6f4e2021-03-14 20:57:10 +00007660 mutex_lock(&sqd->lock);
Pavel Begunkove4b6d902021-05-16 22:58:00 +01007661 while (1) {
Pavel Begunkov1a924a82021-06-24 15:09:56 +01007662 bool cap_entries, sqt_spin = false;
Jens Axboec1edbf52019-11-10 16:56:04 -07007663
Pavel Begunkove4b6d902021-05-16 22:58:00 +01007664 if (io_sqd_events_pending(sqd) || signal_pending(current)) {
7665 if (io_sqd_handle_event(sqd))
Pavel Begunkovc7d95612021-04-13 11:43:00 +01007666 break;
Xiaoguang Wang08369242020-11-03 14:15:59 +08007667 timeout = jiffies + sqd->sq_thread_idle;
7668 }
Pavel Begunkove4b6d902021-05-16 22:58:00 +01007669
Jens Axboee95eee22020-09-08 09:11:32 -06007670 cap_entries = !list_is_singular(&sqd->ctx_list);
Jens Axboe69fb2132020-09-14 11:16:23 -06007671 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) {
Pavel Begunkov948e1942021-06-24 15:09:55 +01007672 int ret = __io_sq_thread(ctx, cap_entries);
Stefan Metzmacher7c30f36a2021-03-07 11:54:28 +01007673
Xiaoguang Wang08369242020-11-03 14:15:59 +08007674 if (!sqt_spin && (ret > 0 || !list_empty(&ctx->iopoll_list)))
7675 sqt_spin = true;
Jens Axboe6c271ce2019-01-10 11:22:30 -07007676 }
Pavel Begunkovdd432ea52021-06-26 21:40:45 +01007677 if (io_run_task_work())
7678 sqt_spin = true;
Jens Axboe6c271ce2019-01-10 11:22:30 -07007679
Xiaoguang Wang08369242020-11-03 14:15:59 +08007680 if (sqt_spin || !time_after(jiffies, timeout)) {
Jens Axboec8d1ba52020-09-14 11:07:26 -06007681 cond_resched();
Xiaoguang Wang08369242020-11-03 14:15:59 +08007682 if (sqt_spin)
7683 timeout = jiffies + sqd->sq_thread_idle;
7684 continue;
7685 }
7686
Xiaoguang Wang08369242020-11-03 14:15:59 +08007687 prepare_to_wait(&sqd->wait, &wait, TASK_INTERRUPTIBLE);
Pavel Begunkovdd432ea52021-06-26 21:40:45 +01007688 if (!io_sqd_events_pending(sqd) && !current->task_works) {
Pavel Begunkov1a924a82021-06-24 15:09:56 +01007689 bool needs_sched = true;
7690
Hao Xu724cb4f2021-04-21 23:19:11 +08007691 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) {
Pavel Begunkovaaa9f0f2021-05-16 22:58:01 +01007692 io_ring_set_wakeup_flag(ctx);
7693
Hao Xu724cb4f2021-04-21 23:19:11 +08007694 if ((ctx->flags & IORING_SETUP_IOPOLL) &&
7695 !list_empty_careful(&ctx->iopoll_list)) {
7696 needs_sched = false;
7697 break;
7698 }
7699 if (io_sqring_entries(ctx)) {
7700 needs_sched = false;
7701 break;
7702 }
7703 }
7704
7705 if (needs_sched) {
7706 mutex_unlock(&sqd->lock);
7707 schedule();
7708 mutex_lock(&sqd->lock);
7709 }
Jens Axboe69fb2132020-09-14 11:16:23 -06007710 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
7711 io_ring_clear_wakeup_flag(ctx);
Jens Axboe6c271ce2019-01-10 11:22:30 -07007712 }
Xiaoguang Wang08369242020-11-03 14:15:59 +08007713
7714 finish_wait(&sqd->wait, &wait);
7715 timeout = jiffies + sqd->sq_thread_idle;
Jens Axboe6c271ce2019-01-10 11:22:30 -07007716 }
7717
Pavel Begunkov78cc6872021-06-14 02:36:23 +01007718 io_uring_cancel_generic(true, sqd);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007719 sqd->thread = NULL;
Jens Axboe05962f92021-03-06 13:58:48 -07007720 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
Jens Axboe5f3f26f2021-02-25 10:17:46 -07007721 io_ring_set_wakeup_flag(ctx);
Pavel Begunkov521d6a72021-03-11 23:29:38 +00007722 io_run_task_work();
Pavel Begunkov734551d2021-04-18 14:52:09 +01007723 mutex_unlock(&sqd->lock);
7724
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007725 complete(&sqd->exited);
7726 do_exit(0);
Jens Axboe6c271ce2019-01-10 11:22:30 -07007727}
7728
Jens Axboebda52162019-09-24 13:47:15 -06007729struct io_wait_queue {
7730 struct wait_queue_entry wq;
7731 struct io_ring_ctx *ctx;
Jens Axboe5fd46172021-08-06 14:04:31 -06007732 unsigned cq_tail;
Jens Axboebda52162019-09-24 13:47:15 -06007733 unsigned nr_timeouts;
7734};
7735
Pavel Begunkov6c503152021-01-04 20:36:36 +00007736static inline bool io_should_wake(struct io_wait_queue *iowq)
Jens Axboebda52162019-09-24 13:47:15 -06007737{
7738 struct io_ring_ctx *ctx = iowq->ctx;
Jens Axboe5fd46172021-08-06 14:04:31 -06007739 int dist = ctx->cached_cq_tail - (int) iowq->cq_tail;
Jens Axboebda52162019-09-24 13:47:15 -06007740
7741 /*
Brian Gianforcarod195a662019-12-13 03:09:50 -08007742 * Wake up if we have enough events, or if a timeout occurred since we
Jens Axboebda52162019-09-24 13:47:15 -06007743 * started waiting. For timeouts, we always want to return to userspace,
7744 * regardless of event count.
7745 */
Jens Axboe5fd46172021-08-06 14:04:31 -06007746 return dist >= 0 || atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts;
Jens Axboebda52162019-09-24 13:47:15 -06007747}
7748
7749static int io_wake_function(struct wait_queue_entry *curr, unsigned int mode,
7750 int wake_flags, void *key)
7751{
7752 struct io_wait_queue *iowq = container_of(curr, struct io_wait_queue,
7753 wq);
7754
Pavel Begunkov6c503152021-01-04 20:36:36 +00007755 /*
7756 * Cannot safely flush overflowed CQEs from here, ensure we wake up
7757 * the task, and the next invocation will do it.
7758 */
Pavel Begunkov5ed7a372021-06-14 23:37:27 +01007759 if (io_should_wake(iowq) || test_bit(0, &iowq->ctx->check_cq_overflow))
Pavel Begunkov6c503152021-01-04 20:36:36 +00007760 return autoremove_wake_function(curr, mode, wake_flags, key);
7761 return -1;
Jens Axboebda52162019-09-24 13:47:15 -06007762}
7763
Jens Axboeaf9c1a42020-09-24 13:32:18 -06007764static int io_run_task_work_sig(void)
7765{
7766 if (io_run_task_work())
7767 return 1;
7768 if (!signal_pending(current))
7769 return 0;
Jens Axboe0b8cfa92021-03-21 14:16:08 -06007770 if (test_thread_flag(TIF_NOTIFY_SIGNAL))
Jens Axboe792ee0f62020-10-22 20:17:18 -06007771 return -ERESTARTSYS;
Jens Axboeaf9c1a42020-09-24 13:32:18 -06007772 return -EINTR;
7773}
7774
Pavel Begunkoveeb60b92021-02-04 13:51:58 +00007775/* when returns >0, the caller should retry */
7776static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
7777 struct io_wait_queue *iowq,
Pavel Begunkovc3222fd2023-01-05 10:49:15 +00007778 ktime_t *timeout)
Pavel Begunkoveeb60b92021-02-04 13:51:58 +00007779{
7780 int ret;
7781
7782 /* make sure we run task_work before checking for signals */
7783 ret = io_run_task_work_sig();
7784 if (ret || io_should_wake(iowq))
7785 return ret;
7786 /* let the caller flush overflows, retry */
Pavel Begunkov5ed7a372021-06-14 23:37:27 +01007787 if (test_bit(0, &ctx->check_cq_overflow))
Pavel Begunkoveeb60b92021-02-04 13:51:58 +00007788 return 1;
7789
Pavel Begunkovc3222fd2023-01-05 10:49:15 +00007790 if (!schedule_hrtimeout(timeout, HRTIMER_MODE_ABS))
Jens Axboe7c834372022-02-21 05:49:30 -07007791 return -ETIME;
7792 return 1;
Pavel Begunkoveeb60b92021-02-04 13:51:58 +00007793}
7794
Jens Axboe2b188cc2019-01-07 10:46:33 -07007795/*
7796 * Wait until events become available, if we don't already have some. The
7797 * application must reap them itself, as they reside on the shared cq ring.
7798 */
7799static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
Hao Xuc73ebb62020-11-03 10:54:37 +08007800 const sigset_t __user *sig, size_t sigsz,
7801 struct __kernel_timespec __user *uts)
Jens Axboe2b188cc2019-01-07 10:46:33 -07007802{
Pavel Begunkov902910992021-08-09 09:07:32 -06007803 struct io_wait_queue iowq;
Hristo Venev75b28af2019-08-26 17:23:46 +00007804 struct io_rings *rings = ctx->rings;
Jens Axboe7c834372022-02-21 05:49:30 -07007805 ktime_t timeout = KTIME_MAX;
Pavel Begunkovc1d5a222021-02-04 13:51:57 +00007806 int ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07007807
Jens Axboeb41e9852020-02-17 09:52:41 -07007808 do {
Pavel Begunkov90f67362021-08-09 20:18:12 +01007809 io_cqring_overflow_flush(ctx);
Pavel Begunkov6c503152021-01-04 20:36:36 +00007810 if (io_cqring_events(ctx) >= min_events)
Jens Axboeb41e9852020-02-17 09:52:41 -07007811 return 0;
Jens Axboe4c6e2772020-07-01 11:29:10 -06007812 if (!io_run_task_work())
Jens Axboeb41e9852020-02-17 09:52:41 -07007813 break;
Jens Axboeb41e9852020-02-17 09:52:41 -07007814 } while (1);
Jens Axboe2b188cc2019-01-07 10:46:33 -07007815
Xiaoguang Wang44df58d2021-09-14 22:38:52 +08007816 if (uts) {
7817 struct timespec64 ts;
7818
7819 if (get_timespec64(&ts, uts))
7820 return -EFAULT;
Jens Axboe7c834372022-02-21 05:49:30 -07007821 timeout = ktime_add_ns(timespec64_to_ktime(ts), ktime_get_ns());
Xiaoguang Wang44df58d2021-09-14 22:38:52 +08007822 }
7823
Jens Axboe2b188cc2019-01-07 10:46:33 -07007824 if (sig) {
Arnd Bergmann9e75ad52019-03-25 15:34:53 +01007825#ifdef CONFIG_COMPAT
7826 if (in_compat_syscall())
7827 ret = set_compat_user_sigmask((const compat_sigset_t __user *)sig,
Oleg Nesterovb7724342019-07-16 16:29:53 -07007828 sigsz);
Arnd Bergmann9e75ad52019-03-25 15:34:53 +01007829 else
7830#endif
Oleg Nesterovb7724342019-07-16 16:29:53 -07007831 ret = set_user_sigmask(sig, sigsz);
Arnd Bergmann9e75ad52019-03-25 15:34:53 +01007832
Jens Axboe2b188cc2019-01-07 10:46:33 -07007833 if (ret)
7834 return ret;
7835 }
7836
Pavel Begunkov902910992021-08-09 09:07:32 -06007837 init_waitqueue_func_entry(&iowq.wq, io_wake_function);
7838 iowq.wq.private = current;
7839 INIT_LIST_HEAD(&iowq.wq.entry);
7840 iowq.ctx = ctx;
Jens Axboebda52162019-09-24 13:47:15 -06007841 iowq.nr_timeouts = atomic_read(&ctx->cq_timeouts);
Jens Axboe5fd46172021-08-06 14:04:31 -06007842 iowq.cq_tail = READ_ONCE(ctx->rings->cq.head) + min_events;
Pavel Begunkov902910992021-08-09 09:07:32 -06007843
Dmitrii Dolgovc826bd72019-10-15 19:02:01 +02007844 trace_io_uring_cqring_wait(ctx, min_events);
Jens Axboebda52162019-09-24 13:47:15 -06007845 do {
Jens Axboeca0a2652021-03-04 17:15:48 -07007846 /* if we can't even flush overflow, don't wait for more */
Pavel Begunkov90f67362021-08-09 20:18:12 +01007847 if (!io_cqring_overflow_flush(ctx)) {
Jens Axboeca0a2652021-03-04 17:15:48 -07007848 ret = -EBUSY;
7849 break;
7850 }
Pavel Begunkov311997b2021-06-14 23:37:28 +01007851 prepare_to_wait_exclusive(&ctx->cq_wait, &iowq.wq,
Jens Axboebda52162019-09-24 13:47:15 -06007852 TASK_INTERRUPTIBLE);
Pavel Begunkovc3222fd2023-01-05 10:49:15 +00007853 ret = io_cqring_wait_schedule(ctx, &iowq, &timeout);
Pavel Begunkov311997b2021-06-14 23:37:28 +01007854 finish_wait(&ctx->cq_wait, &iowq.wq);
Jens Axboeca0a2652021-03-04 17:15:48 -07007855 cond_resched();
Pavel Begunkoveeb60b92021-02-04 13:51:58 +00007856 } while (ret > 0);
Jens Axboebda52162019-09-24 13:47:15 -06007857
Jens Axboeb7db41c2020-07-04 08:55:50 -06007858 restore_saved_sigmask_unless(ret == -EINTR);
Jens Axboe2b188cc2019-01-07 10:46:33 -07007859
Hristo Venev75b28af2019-08-26 17:23:46 +00007860 return READ_ONCE(rings->cq.head) == READ_ONCE(rings->cq.tail) ? ret : 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07007861}
7862
Pavel Begunkov9123c8f2021-06-14 02:36:20 +01007863static void io_free_page_table(void **table, size_t size)
Pavel Begunkov846a4ef2021-04-01 15:44:03 +01007864{
Pavel Begunkov9123c8f2021-06-14 02:36:20 +01007865 unsigned i, nr_tables = DIV_ROUND_UP(size, PAGE_SIZE);
Pavel Begunkov846a4ef2021-04-01 15:44:03 +01007866
7867 for (i = 0; i < nr_tables; i++)
Pavel Begunkov9123c8f2021-06-14 02:36:20 +01007868 kfree(table[i]);
7869 kfree(table);
7870}
7871
7872static void **io_alloc_page_table(size_t size)
7873{
7874 unsigned i, nr_tables = DIV_ROUND_UP(size, PAGE_SIZE);
7875 size_t init_size = size;
7876 void **table;
7877
Pavel Begunkov0bea96f2021-08-20 10:36:36 +01007878 table = kcalloc(nr_tables, sizeof(*table), GFP_KERNEL_ACCOUNT);
Pavel Begunkov9123c8f2021-06-14 02:36:20 +01007879 if (!table)
7880 return NULL;
7881
7882 for (i = 0; i < nr_tables; i++) {
Pavel Begunkov27f6b312021-06-15 13:20:13 +01007883 unsigned int this_size = min_t(size_t, size, PAGE_SIZE);
Pavel Begunkov9123c8f2021-06-14 02:36:20 +01007884
Pavel Begunkov0bea96f2021-08-20 10:36:36 +01007885 table[i] = kzalloc(this_size, GFP_KERNEL_ACCOUNT);
Pavel Begunkov9123c8f2021-06-14 02:36:20 +01007886 if (!table[i]) {
7887 io_free_page_table(table, init_size);
7888 return NULL;
7889 }
7890 size -= this_size;
7891 }
7892 return table;
Pavel Begunkov846a4ef2021-04-01 15:44:03 +01007893}
7894
Pavel Begunkov28a9fe22021-04-01 15:43:47 +01007895static void io_rsrc_node_destroy(struct io_rsrc_node *ref_node)
7896{
7897 percpu_ref_exit(&ref_node->refs);
7898 kfree(ref_node);
7899}
7900
Pavel Begunkovb9bd2be2021-08-09 09:09:47 -06007901static void io_rsrc_node_ref_zero(struct percpu_ref *ref)
7902{
7903 struct io_rsrc_node *node = container_of(ref, struct io_rsrc_node, refs);
7904 struct io_ring_ctx *ctx = node->rsrc_data->ctx;
7905 unsigned long flags;
7906 bool first_add = false;
Dylan Yudaken82cc3382022-01-21 04:38:56 -08007907 unsigned long delay = HZ;
Pavel Begunkovb9bd2be2021-08-09 09:09:47 -06007908
7909 spin_lock_irqsave(&ctx->rsrc_ref_lock, flags);
7910 node->done = true;
7911
Dylan Yudaken82cc3382022-01-21 04:38:56 -08007912 /* if we are mid-quiesce then do not delay */
7913 if (node->rsrc_data->quiesce)
7914 delay = 0;
7915
Pavel Begunkovb9bd2be2021-08-09 09:09:47 -06007916 while (!list_empty(&ctx->rsrc_ref_list)) {
7917 node = list_first_entry(&ctx->rsrc_ref_list,
7918 struct io_rsrc_node, node);
7919 /* recycle ref nodes in order */
7920 if (!node->done)
7921 break;
7922 list_del(&node->node);
7923 first_add |= llist_add(&node->llist, &ctx->rsrc_put_llist);
7924 }
7925 spin_unlock_irqrestore(&ctx->rsrc_ref_lock, flags);
7926
7927 if (first_add)
Dylan Yudaken82cc3382022-01-21 04:38:56 -08007928 mod_delayed_work(system_wq, &ctx->rsrc_put_work, delay);
Pavel Begunkovb9bd2be2021-08-09 09:09:47 -06007929}
7930
7931static struct io_rsrc_node *io_rsrc_node_alloc(struct io_ring_ctx *ctx)
7932{
7933 struct io_rsrc_node *ref_node;
7934
7935 ref_node = kzalloc(sizeof(*ref_node), GFP_KERNEL);
7936 if (!ref_node)
7937 return NULL;
7938
7939 if (percpu_ref_init(&ref_node->refs, io_rsrc_node_ref_zero,
7940 0, GFP_KERNEL)) {
7941 kfree(ref_node);
7942 return NULL;
7943 }
7944 INIT_LIST_HEAD(&ref_node->node);
7945 INIT_LIST_HEAD(&ref_node->rsrc_list);
7946 ref_node->done = false;
7947 return ref_node;
7948}
7949
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01007950static void io_rsrc_node_switch(struct io_ring_ctx *ctx,
7951 struct io_rsrc_data *data_to_kill)
Pavel Begunkov1642b442020-12-30 21:34:14 +00007952{
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01007953 WARN_ON_ONCE(!ctx->rsrc_backup_node);
7954 WARN_ON_ONCE(data_to_kill && !ctx->rsrc_node);
Pavel Begunkov82fbcfa2021-04-01 15:43:43 +01007955
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01007956 if (data_to_kill) {
7957 struct io_rsrc_node *rsrc_node = ctx->rsrc_node;
Pavel Begunkov82fbcfa2021-04-01 15:43:43 +01007958
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01007959 rsrc_node->rsrc_data = data_to_kill;
Jens Axboe4956b9e2021-08-09 07:49:41 -06007960 spin_lock_irq(&ctx->rsrc_ref_lock);
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01007961 list_add_tail(&rsrc_node->node, &ctx->rsrc_ref_list);
Jens Axboe4956b9e2021-08-09 07:49:41 -06007962 spin_unlock_irq(&ctx->rsrc_ref_lock);
Pavel Begunkov82fbcfa2021-04-01 15:43:43 +01007963
Pavel Begunkov3e942492021-04-11 01:46:34 +01007964 atomic_inc(&data_to_kill->refs);
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01007965 percpu_ref_kill(&rsrc_node->refs);
7966 ctx->rsrc_node = NULL;
7967 }
7968
7969 if (!ctx->rsrc_node) {
7970 ctx->rsrc_node = ctx->rsrc_backup_node;
7971 ctx->rsrc_backup_node = NULL;
7972 }
Pavel Begunkov1642b442020-12-30 21:34:14 +00007973}
7974
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01007975static int io_rsrc_node_switch_start(struct io_ring_ctx *ctx)
Pavel Begunkov8dd03af2021-03-19 17:22:36 +00007976{
7977 if (ctx->rsrc_backup_node)
7978 return 0;
Pavel Begunkovb895c9a2021-04-01 15:43:40 +01007979 ctx->rsrc_backup_node = io_rsrc_node_alloc(ctx);
Pavel Begunkov8dd03af2021-03-19 17:22:36 +00007980 return ctx->rsrc_backup_node ? 0 : -ENOMEM;
7981}
7982
Pavel Begunkov40ae0ff2021-04-01 15:43:44 +01007983static int io_rsrc_ref_quiesce(struct io_rsrc_data *data, struct io_ring_ctx *ctx)
Hao Xu8bad28d2021-02-19 17:19:36 +08007984{
7985 int ret;
Xiaoguang Wang05589552020-03-31 14:05:18 +08007986
Pavel Begunkov215c3902021-04-01 15:43:48 +01007987 /* As we may drop ->uring_lock, other task may have started quiesce */
Hao Xu8bad28d2021-02-19 17:19:36 +08007988 if (data->quiesce)
7989 return -ENXIO;
7990
7991 data->quiesce = true;
Pavel Begunkov1ffc5422020-12-30 21:34:15 +00007992 do {
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01007993 ret = io_rsrc_node_switch_start(ctx);
Pavel Begunkov8dd03af2021-03-19 17:22:36 +00007994 if (ret)
Pavel Begunkovf2303b12021-02-20 18:03:49 +00007995 break;
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01007996 io_rsrc_node_switch(ctx, data);
7997
Pavel Begunkov3e942492021-04-11 01:46:34 +01007998 /* kill initial ref, already quiesced if zero */
7999 if (atomic_dec_and_test(&data->refs))
8000 break;
Jens Axboec018db42021-08-09 08:15:50 -06008001 mutex_unlock(&ctx->uring_lock);
Hao Xu8bad28d2021-02-19 17:19:36 +08008002 flush_delayed_work(&ctx->rsrc_put_work);
Pavel Begunkov1ffc5422020-12-30 21:34:15 +00008003 ret = wait_for_completion_interruptible(&data->done);
Jens Axboec018db42021-08-09 08:15:50 -06008004 if (!ret) {
8005 mutex_lock(&ctx->uring_lock);
Dylan Yudaken0d773aa2022-02-22 08:17:51 -08008006 if (atomic_read(&data->refs) > 0) {
8007 /*
8008 * it has been revived by another thread while
8009 * we were unlocked
8010 */
8011 mutex_unlock(&ctx->uring_lock);
8012 } else {
8013 break;
8014 }
Jens Axboec018db42021-08-09 08:15:50 -06008015 }
Jens Axboe05f3fb32019-12-09 11:22:50 -07008016
Pavel Begunkov3e942492021-04-11 01:46:34 +01008017 atomic_inc(&data->refs);
8018 /* wait for all works potentially completing data->done */
8019 flush_delayed_work(&ctx->rsrc_put_work);
Jens Axboecb5e1b82021-02-25 07:37:35 -07008020 reinit_completion(&data->done);
Pavel Begunkov8dd03af2021-03-19 17:22:36 +00008021
Hao Xu8bad28d2021-02-19 17:19:36 +08008022 ret = io_run_task_work_sig();
8023 mutex_lock(&ctx->uring_lock);
Pavel Begunkovf2303b12021-02-20 18:03:49 +00008024 } while (ret >= 0);
Hao Xu8bad28d2021-02-19 17:19:36 +08008025 data->quiesce = false;
8026
Hao Xu8bad28d2021-02-19 17:19:36 +08008027 return ret;
Bijan Mottahedehd7954b22021-01-15 17:37:50 +00008028}
8029
Pavel Begunkov2d091d62021-06-14 02:36:21 +01008030static u64 *io_get_tag_slot(struct io_rsrc_data *data, unsigned int idx)
8031{
8032 unsigned int off = idx & IO_RSRC_TAG_TABLE_MASK;
8033 unsigned int table_idx = idx >> IO_RSRC_TAG_TABLE_SHIFT;
8034
8035 return &data->tags[table_idx][off];
8036}
8037
Pavel Begunkov44b31f22021-04-25 14:32:16 +01008038static void io_rsrc_data_free(struct io_rsrc_data *data)
8039{
Pavel Begunkov2d091d62021-06-14 02:36:21 +01008040 size_t size = data->nr * sizeof(data->tags[0][0]);
8041
8042 if (data->tags)
8043 io_free_page_table((void **)data->tags, size);
Pavel Begunkov44b31f22021-04-25 14:32:16 +01008044 kfree(data);
8045}
8046
Pavel Begunkovd878c812021-06-14 02:36:18 +01008047static int io_rsrc_data_alloc(struct io_ring_ctx *ctx, rsrc_put_fn *do_put,
8048 u64 __user *utags, unsigned nr,
8049 struct io_rsrc_data **pdata)
Bijan Mottahedeh1ad555c2021-01-15 17:37:51 +00008050{
Pavel Begunkovb895c9a2021-04-01 15:43:40 +01008051 struct io_rsrc_data *data;
Pavel Begunkov2d091d62021-06-14 02:36:21 +01008052 int ret = -ENOMEM;
Pavel Begunkovd878c812021-06-14 02:36:18 +01008053 unsigned i;
Bijan Mottahedeh1ad555c2021-01-15 17:37:51 +00008054
8055 data = kzalloc(sizeof(*data), GFP_KERNEL);
8056 if (!data)
Pavel Begunkovd878c812021-06-14 02:36:18 +01008057 return -ENOMEM;
Pavel Begunkov2d091d62021-06-14 02:36:21 +01008058 data->tags = (u64 **)io_alloc_page_table(nr * sizeof(data->tags[0][0]));
Pavel Begunkovb60c8dc2021-04-25 14:32:18 +01008059 if (!data->tags) {
8060 kfree(data);
Pavel Begunkovd878c812021-06-14 02:36:18 +01008061 return -ENOMEM;
8062 }
Pavel Begunkov2d091d62021-06-14 02:36:21 +01008063
8064 data->nr = nr;
8065 data->ctx = ctx;
8066 data->do_put = do_put;
Pavel Begunkovd878c812021-06-14 02:36:18 +01008067 if (utags) {
Pavel Begunkov2d091d62021-06-14 02:36:21 +01008068 ret = -EFAULT;
Pavel Begunkovd878c812021-06-14 02:36:18 +01008069 for (i = 0; i < nr; i++) {
Colin Ian Kingfdd1dc32021-06-15 14:00:11 +01008070 u64 *tag_slot = io_get_tag_slot(data, i);
8071
8072 if (copy_from_user(tag_slot, &utags[i],
8073 sizeof(*tag_slot)))
Pavel Begunkov2d091d62021-06-14 02:36:21 +01008074 goto fail;
Pavel Begunkovd878c812021-06-14 02:36:18 +01008075 }
Pavel Begunkovb60c8dc2021-04-25 14:32:18 +01008076 }
8077
Pavel Begunkov3e942492021-04-11 01:46:34 +01008078 atomic_set(&data->refs, 1);
Bijan Mottahedeh1ad555c2021-01-15 17:37:51 +00008079 init_completion(&data->done);
Pavel Begunkovd878c812021-06-14 02:36:18 +01008080 *pdata = data;
8081 return 0;
Pavel Begunkov2d091d62021-06-14 02:36:21 +01008082fail:
8083 io_rsrc_data_free(data);
8084 return ret;
Bijan Mottahedeh1ad555c2021-01-15 17:37:51 +00008085}
8086
Pavel Begunkov9123c8f2021-06-14 02:36:20 +01008087static bool io_alloc_file_tables(struct io_file_table *table, unsigned nr_files)
8088{
Pavel Begunkov0bea96f2021-08-20 10:36:36 +01008089 table->files = kvcalloc(nr_files, sizeof(table->files[0]),
8090 GFP_KERNEL_ACCOUNT);
Pavel Begunkov9123c8f2021-06-14 02:36:20 +01008091 return !!table->files;
8092}
8093
Pavel Begunkov042b0d82021-08-09 13:04:01 +01008094static void io_free_file_tables(struct io_file_table *table)
Pavel Begunkov9123c8f2021-06-14 02:36:20 +01008095{
Pavel Begunkov042b0d82021-08-09 13:04:01 +01008096 kvfree(table->files);
Pavel Begunkov9123c8f2021-06-14 02:36:20 +01008097 table->files = NULL;
8098}
8099
Jens Axboe2b188cc2019-01-07 10:46:33 -07008100static void __io_sqe_files_unregister(struct io_ring_ctx *ctx)
8101{
8102#if defined(CONFIG_UNIX)
8103 if (ctx->ring_sock) {
8104 struct sock *sock = ctx->ring_sock->sk;
8105 struct sk_buff *skb;
8106
8107 while ((skb = skb_dequeue(&sock->sk_receive_queue)) != NULL)
8108 kfree_skb(skb);
8109 }
8110#else
8111 int i;
8112
8113 for (i = 0; i < ctx->nr_user_files; i++) {
8114 struct file *file;
8115
8116 file = io_file_from_index(ctx, i);
8117 if (file)
8118 fput(file);
8119 }
8120#endif
Pavel Begunkov042b0d82021-08-09 13:04:01 +01008121 io_free_file_tables(&ctx->file_table);
Pavel Begunkov44b31f22021-04-25 14:32:16 +01008122 io_rsrc_data_free(ctx->file_data);
Pavel Begunkovfff4db72021-04-25 14:32:15 +01008123 ctx->file_data = NULL;
8124 ctx->nr_user_files = 0;
Bijan Mottahedeh1ad555c2021-01-15 17:37:51 +00008125}
8126
Bijan Mottahedehd7954b22021-01-15 17:37:50 +00008127static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
8128{
Pavel Begunkovb1e7cad2022-06-13 06:32:44 +01008129 unsigned nr = ctx->nr_user_files;
Bijan Mottahedehd7954b22021-01-15 17:37:50 +00008130 int ret;
8131
Pavel Begunkov08480402021-04-13 02:58:38 +01008132 if (!ctx->file_data)
Bijan Mottahedehd7954b22021-01-15 17:37:50 +00008133 return -ENXIO;
Pavel Begunkovb1e7cad2022-06-13 06:32:44 +01008134
8135 /*
8136 * Quiesce may unlock ->uring_lock, and while it's not held
8137 * prevent new requests using the table.
8138 */
8139 ctx->nr_user_files = 0;
Pavel Begunkov08480402021-04-13 02:58:38 +01008140 ret = io_rsrc_ref_quiesce(ctx->file_data, ctx);
Pavel Begunkovb1e7cad2022-06-13 06:32:44 +01008141 ctx->nr_user_files = nr;
Pavel Begunkov08480402021-04-13 02:58:38 +01008142 if (!ret)
8143 __io_sqe_files_unregister(ctx);
8144 return ret;
Jens Axboe6b063142019-01-10 22:13:58 -07008145}
8146
Jens Axboe37d1e2e2021-02-17 21:03:43 -07008147static void io_sq_thread_unpark(struct io_sq_data *sqd)
Pavel Begunkov09a6f4e2021-03-14 20:57:10 +00008148 __releases(&sqd->lock)
Jens Axboe37d1e2e2021-02-17 21:03:43 -07008149{
Pavel Begunkov521d6a72021-03-11 23:29:38 +00008150 WARN_ON_ONCE(sqd->thread == current);
8151
Pavel Begunkov9e138a42021-03-14 20:57:12 +00008152 /*
8153 * Do the dance but not conditional clear_bit() because it'd race with
8154 * other threads incrementing park_pending and setting the bit.
8155 */
Jens Axboe37d1e2e2021-02-17 21:03:43 -07008156 clear_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state);
Pavel Begunkov9e138a42021-03-14 20:57:12 +00008157 if (atomic_dec_return(&sqd->park_pending))
8158 set_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state);
Pavel Begunkov09a6f4e2021-03-14 20:57:10 +00008159 mutex_unlock(&sqd->lock);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07008160}
8161
Jens Axboe86e0d672021-03-05 08:44:39 -07008162static void io_sq_thread_park(struct io_sq_data *sqd)
Pavel Begunkov09a6f4e2021-03-14 20:57:10 +00008163 __acquires(&sqd->lock)
Jens Axboe37d1e2e2021-02-17 21:03:43 -07008164{
Pavel Begunkov521d6a72021-03-11 23:29:38 +00008165 WARN_ON_ONCE(sqd->thread == current);
8166
Pavel Begunkov9e138a42021-03-14 20:57:12 +00008167 atomic_inc(&sqd->park_pending);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07008168 set_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state);
Pavel Begunkov09a6f4e2021-03-14 20:57:10 +00008169 mutex_lock(&sqd->lock);
Jens Axboe05962f92021-03-06 13:58:48 -07008170 if (sqd->thread)
Jens Axboe86e0d672021-03-05 08:44:39 -07008171 wake_up_process(sqd->thread);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07008172}
8173
8174static void io_sq_thread_stop(struct io_sq_data *sqd)
8175{
Pavel Begunkov521d6a72021-03-11 23:29:38 +00008176 WARN_ON_ONCE(sqd->thread == current);
Pavel Begunkov88885f62021-04-11 01:46:38 +01008177 WARN_ON_ONCE(test_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state));
Pavel Begunkov521d6a72021-03-11 23:29:38 +00008178
Jens Axboe05962f92021-03-06 13:58:48 -07008179 set_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state);
Pavel Begunkov88885f62021-04-11 01:46:38 +01008180 mutex_lock(&sqd->lock);
Jens Axboee8f98f242021-03-09 16:32:13 -07008181 if (sqd->thread)
8182 wake_up_process(sqd->thread);
Pavel Begunkov09a6f4e2021-03-14 20:57:10 +00008183 mutex_unlock(&sqd->lock);
Jens Axboe05962f92021-03-06 13:58:48 -07008184 wait_for_completion(&sqd->exited);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07008185}
8186
Jens Axboe534ca6d2020-09-02 13:52:19 -06008187static void io_put_sq_data(struct io_sq_data *sqd)
Jens Axboe6c271ce2019-01-10 11:22:30 -07008188{
Jens Axboe534ca6d2020-09-02 13:52:19 -06008189 if (refcount_dec_and_test(&sqd->refs)) {
Pavel Begunkov9e138a42021-03-14 20:57:12 +00008190 WARN_ON_ONCE(atomic_read(&sqd->park_pending));
8191
Jens Axboe37d1e2e2021-02-17 21:03:43 -07008192 io_sq_thread_stop(sqd);
8193 kfree(sqd);
8194 }
8195}
8196
8197static void io_sq_thread_finish(struct io_ring_ctx *ctx)
8198{
8199 struct io_sq_data *sqd = ctx->sq_data;
8200
8201 if (sqd) {
Jens Axboe05962f92021-03-06 13:58:48 -07008202 io_sq_thread_park(sqd);
Pavel Begunkov521d6a72021-03-11 23:29:38 +00008203 list_del_init(&ctx->sqd_list);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07008204 io_sqd_update_thread_idle(sqd);
Jens Axboe05962f92021-03-06 13:58:48 -07008205 io_sq_thread_unpark(sqd);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07008206
8207 io_put_sq_data(sqd);
8208 ctx->sq_data = NULL;
Jens Axboe534ca6d2020-09-02 13:52:19 -06008209 }
8210}
8211
Jens Axboeaa061652020-09-02 14:50:27 -06008212static struct io_sq_data *io_attach_sq_data(struct io_uring_params *p)
8213{
8214 struct io_ring_ctx *ctx_attach;
8215 struct io_sq_data *sqd;
8216 struct fd f;
8217
8218 f = fdget(p->wq_fd);
8219 if (!f.file)
8220 return ERR_PTR(-ENXIO);
8221 if (f.file->f_op != &io_uring_fops) {
8222 fdput(f);
8223 return ERR_PTR(-EINVAL);
8224 }
8225
8226 ctx_attach = f.file->private_data;
8227 sqd = ctx_attach->sq_data;
8228 if (!sqd) {
8229 fdput(f);
8230 return ERR_PTR(-EINVAL);
8231 }
Jens Axboe5c2469e2021-03-11 10:17:56 -07008232 if (sqd->task_tgid != current->tgid) {
8233 fdput(f);
8234 return ERR_PTR(-EPERM);
8235 }
Jens Axboeaa061652020-09-02 14:50:27 -06008236
8237 refcount_inc(&sqd->refs);
8238 fdput(f);
8239 return sqd;
8240}
8241
Pavel Begunkov26984fb2021-03-11 23:29:37 +00008242static struct io_sq_data *io_get_sq_data(struct io_uring_params *p,
8243 bool *attached)
Jens Axboe534ca6d2020-09-02 13:52:19 -06008244{
8245 struct io_sq_data *sqd;
8246
Pavel Begunkov26984fb2021-03-11 23:29:37 +00008247 *attached = false;
Jens Axboe5c2469e2021-03-11 10:17:56 -07008248 if (p->flags & IORING_SETUP_ATTACH_WQ) {
8249 sqd = io_attach_sq_data(p);
Pavel Begunkov26984fb2021-03-11 23:29:37 +00008250 if (!IS_ERR(sqd)) {
8251 *attached = true;
Jens Axboe5c2469e2021-03-11 10:17:56 -07008252 return sqd;
Pavel Begunkov26984fb2021-03-11 23:29:37 +00008253 }
Jens Axboe5c2469e2021-03-11 10:17:56 -07008254 /* fall through for EPERM case, setup new sqd/task */
8255 if (PTR_ERR(sqd) != -EPERM)
8256 return sqd;
8257 }
Jens Axboeaa061652020-09-02 14:50:27 -06008258
Jens Axboe534ca6d2020-09-02 13:52:19 -06008259 sqd = kzalloc(sizeof(*sqd), GFP_KERNEL);
8260 if (!sqd)
8261 return ERR_PTR(-ENOMEM);
8262
Pavel Begunkov9e138a42021-03-14 20:57:12 +00008263 atomic_set(&sqd->park_pending, 0);
Jens Axboe534ca6d2020-09-02 13:52:19 -06008264 refcount_set(&sqd->refs, 1);
Jens Axboe69fb2132020-09-14 11:16:23 -06008265 INIT_LIST_HEAD(&sqd->ctx_list);
Pavel Begunkov09a6f4e2021-03-14 20:57:10 +00008266 mutex_init(&sqd->lock);
Jens Axboe534ca6d2020-09-02 13:52:19 -06008267 init_waitqueue_head(&sqd->wait);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07008268 init_completion(&sqd->exited);
Jens Axboe534ca6d2020-09-02 13:52:19 -06008269 return sqd;
8270}
8271
Jens Axboe6b063142019-01-10 22:13:58 -07008272#if defined(CONFIG_UNIX)
Jens Axboe6b063142019-01-10 22:13:58 -07008273/*
8274 * Ensure the UNIX gc is aware of our file set, so we are certain that
8275 * the io_uring can be safely unregistered on process exit, even if we have
8276 * loops in the file referencing.
8277 */
8278static int __io_sqe_files_scm(struct io_ring_ctx *ctx, int nr, int offset)
8279{
8280 struct sock *sk = ctx->ring_sock->sk;
8281 struct scm_fp_list *fpl;
8282 struct sk_buff *skb;
Jens Axboe08a45172019-10-03 08:11:03 -06008283 int i, nr_files;
Jens Axboe6b063142019-01-10 22:13:58 -07008284
Jens Axboe6b063142019-01-10 22:13:58 -07008285 fpl = kzalloc(sizeof(*fpl), GFP_KERNEL);
8286 if (!fpl)
8287 return -ENOMEM;
8288
8289 skb = alloc_skb(0, GFP_KERNEL);
8290 if (!skb) {
8291 kfree(fpl);
8292 return -ENOMEM;
8293 }
8294
8295 skb->sk = sk;
Pavel Begunkov813d8fe2022-10-16 22:42:54 +01008296 skb->scm_io_uring = 1;
Jens Axboe6b063142019-01-10 22:13:58 -07008297
Jens Axboe08a45172019-10-03 08:11:03 -06008298 nr_files = 0;
Jens Axboe62e398b2021-02-21 16:19:37 -07008299 fpl->user = get_uid(current_user());
Jens Axboe6b063142019-01-10 22:13:58 -07008300 for (i = 0; i < nr; i++) {
Jens Axboe65e19f52019-10-26 07:20:21 -06008301 struct file *file = io_file_from_index(ctx, i + offset);
8302
8303 if (!file)
Jens Axboe08a45172019-10-03 08:11:03 -06008304 continue;
Jens Axboe65e19f52019-10-26 07:20:21 -06008305 fpl->fp[nr_files] = get_file(file);
Jens Axboe08a45172019-10-03 08:11:03 -06008306 unix_inflight(fpl->user, fpl->fp[nr_files]);
8307 nr_files++;
Jens Axboe6b063142019-01-10 22:13:58 -07008308 }
8309
Jens Axboe08a45172019-10-03 08:11:03 -06008310 if (nr_files) {
8311 fpl->max = SCM_MAX_FD;
8312 fpl->count = nr_files;
8313 UNIXCB(skb).fp = fpl;
Jens Axboe05f3fb32019-12-09 11:22:50 -07008314 skb->destructor = unix_destruct_scm;
Jens Axboe08a45172019-10-03 08:11:03 -06008315 refcount_add(skb->truesize, &sk->sk_wmem_alloc);
8316 skb_queue_head(&sk->sk_receive_queue, skb);
Jens Axboe6b063142019-01-10 22:13:58 -07008317
Pavel Begunkov285f5d72022-04-06 12:43:58 +01008318 for (i = 0; i < nr; i++) {
8319 struct file *file = io_file_from_index(ctx, i + offset);
8320
8321 if (file)
8322 fput(file);
8323 }
Jens Axboe08a45172019-10-03 08:11:03 -06008324 } else {
8325 kfree_skb(skb);
Pavel Begunkov0853bd62022-03-25 16:36:31 +00008326 free_uid(fpl->user);
Jens Axboe08a45172019-10-03 08:11:03 -06008327 kfree(fpl);
8328 }
Jens Axboe6b063142019-01-10 22:13:58 -07008329
8330 return 0;
8331}
8332
8333/*
8334 * If UNIX sockets are enabled, fd passing can cause a reference cycle which
8335 * causes regular reference counting to break down. We rely on the UNIX
8336 * garbage collection to take care of this problem for us.
8337 */
8338static int io_sqe_files_scm(struct io_ring_ctx *ctx)
8339{
8340 unsigned left, total;
8341 int ret = 0;
8342
8343 total = 0;
8344 left = ctx->nr_user_files;
8345 while (left) {
8346 unsigned this_files = min_t(unsigned, left, SCM_MAX_FD);
Jens Axboe6b063142019-01-10 22:13:58 -07008347
8348 ret = __io_sqe_files_scm(ctx, this_files, total);
8349 if (ret)
8350 break;
8351 left -= this_files;
8352 total += this_files;
8353 }
8354
8355 if (!ret)
8356 return 0;
8357
8358 while (total < ctx->nr_user_files) {
Jens Axboe65e19f52019-10-26 07:20:21 -06008359 struct file *file = io_file_from_index(ctx, total);
8360
8361 if (file)
8362 fput(file);
Jens Axboe6b063142019-01-10 22:13:58 -07008363 total++;
8364 }
8365
8366 return ret;
8367}
8368#else
8369static int io_sqe_files_scm(struct io_ring_ctx *ctx)
8370{
8371 return 0;
8372}
8373#endif
8374
Pavel Begunkov47e90392021-04-01 15:43:56 +01008375static void io_rsrc_file_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc)
Jens Axboec3a31e62019-10-03 13:59:56 -06008376{
Bijan Mottahedeh50238532021-01-15 17:37:45 +00008377 struct file *file = prsrc->file;
Jens Axboec3a31e62019-10-03 13:59:56 -06008378#if defined(CONFIG_UNIX)
Jens Axboec3a31e62019-10-03 13:59:56 -06008379 struct sock *sock = ctx->ring_sock->sk;
8380 struct sk_buff_head list, *head = &sock->sk_receive_queue;
8381 struct sk_buff *skb;
8382 int i;
8383
8384 __skb_queue_head_init(&list);
8385
8386 /*
8387 * Find the skb that holds this file in its SCM_RIGHTS. When found,
8388 * remove this entry and rearrange the file array.
8389 */
8390 skb = skb_dequeue(head);
8391 while (skb) {
8392 struct scm_fp_list *fp;
8393
8394 fp = UNIXCB(skb).fp;
8395 for (i = 0; i < fp->count; i++) {
8396 int left;
8397
8398 if (fp->fp[i] != file)
8399 continue;
8400
8401 unix_notinflight(fp->user, fp->fp[i]);
8402 left = fp->count - 1 - i;
8403 if (left) {
8404 memmove(&fp->fp[i], &fp->fp[i + 1],
8405 left * sizeof(struct file *));
8406 }
8407 fp->count--;
8408 if (!fp->count) {
8409 kfree_skb(skb);
8410 skb = NULL;
8411 } else {
8412 __skb_queue_tail(&list, skb);
8413 }
8414 fput(file);
8415 file = NULL;
8416 break;
8417 }
8418
8419 if (!file)
8420 break;
8421
8422 __skb_queue_tail(&list, skb);
8423
8424 skb = skb_dequeue(head);
8425 }
8426
8427 if (skb_peek(&list)) {
8428 spin_lock_irq(&head->lock);
8429 while ((skb = __skb_dequeue(&list)) != NULL)
8430 __skb_queue_tail(head, skb);
8431 spin_unlock_irq(&head->lock);
8432 }
8433#else
Jens Axboe05f3fb32019-12-09 11:22:50 -07008434 fput(file);
Jens Axboec3a31e62019-10-03 13:59:56 -06008435#endif
8436}
8437
Pavel Begunkovb895c9a2021-04-01 15:43:40 +01008438static void __io_rsrc_put_work(struct io_rsrc_node *ref_node)
Jens Axboe05f3fb32019-12-09 11:22:50 -07008439{
Pavel Begunkovb895c9a2021-04-01 15:43:40 +01008440 struct io_rsrc_data *rsrc_data = ref_node->rsrc_data;
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00008441 struct io_ring_ctx *ctx = rsrc_data->ctx;
8442 struct io_rsrc_put *prsrc, *tmp;
Xiaoguang Wang05589552020-03-31 14:05:18 +08008443
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00008444 list_for_each_entry_safe(prsrc, tmp, &ref_node->rsrc_list, list) {
8445 list_del(&prsrc->list);
Pavel Begunkovb60c8dc2021-04-25 14:32:18 +01008446
8447 if (prsrc->tag) {
8448 bool lock_ring = ctx->flags & IORING_SETUP_IOPOLL;
Pavel Begunkovb60c8dc2021-04-25 14:32:18 +01008449
8450 io_ring_submit_lock(ctx, lock_ring);
Jens Axboe79ebeae2021-08-10 15:18:27 -06008451 spin_lock(&ctx->completion_lock);
Pavel Begunkovb850d6d2022-08-29 14:30:13 +01008452 io_fill_cqe_aux(ctx, prsrc->tag, 0, 0);
Pavel Begunkovb60c8dc2021-04-25 14:32:18 +01008453 io_commit_cqring(ctx);
Jens Axboe79ebeae2021-08-10 15:18:27 -06008454 spin_unlock(&ctx->completion_lock);
Pavel Begunkovb60c8dc2021-04-25 14:32:18 +01008455 io_cqring_ev_posted(ctx);
8456 io_ring_submit_unlock(ctx, lock_ring);
8457 }
8458
Pavel Begunkov40ae0ff2021-04-01 15:43:44 +01008459 rsrc_data->do_put(ctx, prsrc);
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00008460 kfree(prsrc);
Jens Axboe05f3fb32019-12-09 11:22:50 -07008461 }
8462
Pavel Begunkov28a9fe22021-04-01 15:43:47 +01008463 io_rsrc_node_destroy(ref_node);
Pavel Begunkov3e942492021-04-11 01:46:34 +01008464 if (atomic_dec_and_test(&rsrc_data->refs))
8465 complete(&rsrc_data->done);
Jens Axboe05f3fb32019-12-09 11:22:50 -07008466}
8467
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00008468static void io_rsrc_put_work(struct work_struct *work)
Jens Axboe4a38aed22020-05-14 17:21:15 -06008469{
8470 struct io_ring_ctx *ctx;
8471 struct llist_node *node;
8472
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00008473 ctx = container_of(work, struct io_ring_ctx, rsrc_put_work.work);
8474 node = llist_del_all(&ctx->rsrc_put_llist);
Jens Axboe4a38aed22020-05-14 17:21:15 -06008475
8476 while (node) {
Pavel Begunkovb895c9a2021-04-01 15:43:40 +01008477 struct io_rsrc_node *ref_node;
Jens Axboe4a38aed22020-05-14 17:21:15 -06008478 struct llist_node *next = node->next;
8479
Pavel Begunkovb895c9a2021-04-01 15:43:40 +01008480 ref_node = llist_entry(node, struct io_rsrc_node, llist);
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00008481 __io_rsrc_put_work(ref_node);
Jens Axboe4a38aed22020-05-14 17:21:15 -06008482 node = next;
8483 }
8484}
8485
Jens Axboe05f3fb32019-12-09 11:22:50 -07008486static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
Pavel Begunkov792e3582021-04-25 14:32:21 +01008487 unsigned nr_args, u64 __user *tags)
Jens Axboe05f3fb32019-12-09 11:22:50 -07008488{
8489 __s32 __user *fds = (__s32 __user *) arg;
Jens Axboe05f3fb32019-12-09 11:22:50 -07008490 struct file *file;
Pavel Begunkovf3baed32021-04-01 15:43:42 +01008491 int fd, ret;
Pavel Begunkov846a4ef2021-04-01 15:44:03 +01008492 unsigned i;
Jens Axboe05f3fb32019-12-09 11:22:50 -07008493
8494 if (ctx->file_data)
8495 return -EBUSY;
8496 if (!nr_args)
8497 return -EINVAL;
8498 if (nr_args > IORING_MAX_FIXED_FILES)
8499 return -EMFILE;
Pavel Begunkov3a1b8a42021-08-20 10:36:35 +01008500 if (nr_args > rlimit(RLIMIT_NOFILE))
8501 return -EMFILE;
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01008502 ret = io_rsrc_node_switch_start(ctx);
Pavel Begunkovf3baed32021-04-01 15:43:42 +01008503 if (ret)
8504 return ret;
Pavel Begunkovd878c812021-06-14 02:36:18 +01008505 ret = io_rsrc_data_alloc(ctx, io_rsrc_file_put, tags, nr_args,
8506 &ctx->file_data);
8507 if (ret)
8508 return ret;
Jens Axboe05f3fb32019-12-09 11:22:50 -07008509
Pavel Begunkovf3baed32021-04-01 15:43:42 +01008510 ret = -ENOMEM;
Pavel Begunkovaeca2412021-04-11 01:46:37 +01008511 if (!io_alloc_file_tables(&ctx->file_table, nr_args))
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01008512 goto out_free;
Jens Axboe05f3fb32019-12-09 11:22:50 -07008513
Jens Axboe05f3fb32019-12-09 11:22:50 -07008514 for (i = 0; i < nr_args; i++, ctx->nr_user_files++) {
Pavel Begunkovd878c812021-06-14 02:36:18 +01008515 if (copy_from_user(&fd, &fds[i], sizeof(fd))) {
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01008516 ret = -EFAULT;
8517 goto out_fput;
Jens Axboe05f3fb32019-12-09 11:22:50 -07008518 }
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01008519 /* allow sparse sets */
Pavel Begunkov792e3582021-04-25 14:32:21 +01008520 if (fd == -1) {
8521 ret = -EINVAL;
Pavel Begunkov2d091d62021-06-14 02:36:21 +01008522 if (unlikely(*io_get_tag_slot(ctx->file_data, i)))
Pavel Begunkov792e3582021-04-25 14:32:21 +01008523 goto out_fput;
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01008524 continue;
Pavel Begunkov792e3582021-04-25 14:32:21 +01008525 }
Jens Axboe05f3fb32019-12-09 11:22:50 -07008526
Jens Axboe05f3fb32019-12-09 11:22:50 -07008527 file = fget(fd);
Jens Axboe05f3fb32019-12-09 11:22:50 -07008528 ret = -EBADF;
Pavel Begunkov792e3582021-04-25 14:32:21 +01008529 if (unlikely(!file))
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01008530 goto out_fput;
Jens Axboe05f3fb32019-12-09 11:22:50 -07008531
8532 /*
8533 * Don't allow io_uring instances to be registered. If UNIX
8534 * isn't enabled, then this causes a reference cycle and this
8535 * instance can never get freed. If UNIX is enabled we'll
8536 * handle it just fine, but there's still no point in allowing
8537 * a ring fd as it doesn't support regular read/write anyway.
8538 */
8539 if (file->f_op == &io_uring_fops) {
8540 fput(file);
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01008541 goto out_fput;
Jens Axboe05f3fb32019-12-09 11:22:50 -07008542 }
Pavel Begunkovaeca2412021-04-11 01:46:37 +01008543 io_fixed_file_set(io_fixed_file_slot(&ctx->file_table, i), file);
Jens Axboe05f3fb32019-12-09 11:22:50 -07008544 }
8545
Jens Axboe05f3fb32019-12-09 11:22:50 -07008546 ret = io_sqe_files_scm(ctx);
Xiaoguang Wang05589552020-03-31 14:05:18 +08008547 if (ret) {
Pavel Begunkov08480402021-04-13 02:58:38 +01008548 __io_sqe_files_unregister(ctx);
Xiaoguang Wang05589552020-03-31 14:05:18 +08008549 return ret;
8550 }
Jens Axboe05f3fb32019-12-09 11:22:50 -07008551
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01008552 io_rsrc_node_switch(ctx, NULL);
Jens Axboe05f3fb32019-12-09 11:22:50 -07008553 return ret;
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01008554out_fput:
8555 for (i = 0; i < ctx->nr_user_files; i++) {
8556 file = io_file_from_index(ctx, i);
8557 if (file)
8558 fput(file);
8559 }
Pavel Begunkov042b0d82021-08-09 13:04:01 +01008560 io_free_file_tables(&ctx->file_table);
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01008561 ctx->nr_user_files = 0;
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01008562out_free:
Pavel Begunkov44b31f22021-04-25 14:32:16 +01008563 io_rsrc_data_free(ctx->file_data);
Jens Axboe55cbc252020-10-14 07:35:57 -06008564 ctx->file_data = NULL;
Jens Axboe05f3fb32019-12-09 11:22:50 -07008565 return ret;
8566}
8567
Jens Axboec3a31e62019-10-03 13:59:56 -06008568static int io_sqe_file_register(struct io_ring_ctx *ctx, struct file *file,
8569 int index)
8570{
8571#if defined(CONFIG_UNIX)
8572 struct sock *sock = ctx->ring_sock->sk;
8573 struct sk_buff_head *head = &sock->sk_receive_queue;
8574 struct sk_buff *skb;
8575
8576 /*
8577 * See if we can merge this file into an existing skb SCM_RIGHTS
8578 * file set. If there's no room, fall back to allocating a new skb
8579 * and filling it in.
8580 */
8581 spin_lock_irq(&head->lock);
8582 skb = skb_peek(head);
8583 if (skb) {
8584 struct scm_fp_list *fpl = UNIXCB(skb).fp;
8585
8586 if (fpl->count < SCM_MAX_FD) {
8587 __skb_unlink(skb, head);
8588 spin_unlock_irq(&head->lock);
8589 fpl->fp[fpl->count] = get_file(file);
8590 unix_inflight(fpl->user, fpl->fp[fpl->count]);
8591 fpl->count++;
8592 spin_lock_irq(&head->lock);
8593 __skb_queue_head(head, skb);
8594 } else {
8595 skb = NULL;
8596 }
8597 }
8598 spin_unlock_irq(&head->lock);
8599
8600 if (skb) {
8601 fput(file);
8602 return 0;
8603 }
8604
8605 return __io_sqe_files_scm(ctx, 1, index);
8606#else
8607 return 0;
8608#endif
8609}
8610
Pavel Begunkov9c7b0ba2021-09-14 16:12:52 +01008611static int io_queue_rsrc_removal(struct io_rsrc_data *data, unsigned idx,
8612 struct io_rsrc_node *node, void *rsrc)
8613{
Pavel Begunkov5218d5c2022-04-07 14:05:04 +01008614 u64 *tag_slot = io_get_tag_slot(data, idx);
Pavel Begunkov9c7b0ba2021-09-14 16:12:52 +01008615 struct io_rsrc_put *prsrc;
8616
8617 prsrc = kzalloc(sizeof(*prsrc), GFP_KERNEL);
8618 if (!prsrc)
8619 return -ENOMEM;
8620
Pavel Begunkov5218d5c2022-04-07 14:05:04 +01008621 prsrc->tag = *tag_slot;
8622 *tag_slot = 0;
Pavel Begunkov9c7b0ba2021-09-14 16:12:52 +01008623 prsrc->rsrc = rsrc;
8624 list_add(&prsrc->list, &node->rsrc_list);
8625 return 0;
8626}
8627
Pavel Begunkovb9445592021-08-25 12:25:45 +01008628static int io_install_fixed_file(struct io_kiocb *req, struct file *file,
8629 unsigned int issue_flags, u32 slot_index)
8630{
8631 struct io_ring_ctx *ctx = req->ctx;
8632 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Pavel Begunkov9c7b0ba2021-09-14 16:12:52 +01008633 bool needs_switch = false;
Pavel Begunkovb9445592021-08-25 12:25:45 +01008634 struct io_fixed_file *file_slot;
8635 int ret = -EBADF;
8636
8637 io_ring_submit_lock(ctx, !force_nonblock);
8638 if (file->f_op == &io_uring_fops)
8639 goto err;
8640 ret = -ENXIO;
8641 if (!ctx->file_data)
8642 goto err;
8643 ret = -EINVAL;
8644 if (slot_index >= ctx->nr_user_files)
8645 goto err;
8646
8647 slot_index = array_index_nospec(slot_index, ctx->nr_user_files);
8648 file_slot = io_fixed_file_slot(&ctx->file_table, slot_index);
Pavel Begunkov9c7b0ba2021-09-14 16:12:52 +01008649
8650 if (file_slot->file_ptr) {
8651 struct file *old_file;
8652
8653 ret = io_rsrc_node_switch_start(ctx);
8654 if (ret)
8655 goto err;
8656
8657 old_file = (struct file *)(file_slot->file_ptr & FFS_MASK);
8658 ret = io_queue_rsrc_removal(ctx->file_data, slot_index,
8659 ctx->rsrc_node, old_file);
8660 if (ret)
8661 goto err;
8662 file_slot->file_ptr = 0;
8663 needs_switch = true;
8664 }
Pavel Begunkovb9445592021-08-25 12:25:45 +01008665
8666 *io_get_tag_slot(ctx->file_data, slot_index) = 0;
8667 io_fixed_file_set(file_slot, file);
8668 ret = io_sqe_file_register(ctx, file, slot_index);
8669 if (ret) {
8670 file_slot->file_ptr = 0;
8671 goto err;
8672 }
8673
8674 ret = 0;
8675err:
Pavel Begunkov9c7b0ba2021-09-14 16:12:52 +01008676 if (needs_switch)
8677 io_rsrc_node_switch(ctx, ctx->file_data);
Pavel Begunkovb9445592021-08-25 12:25:45 +01008678 io_ring_submit_unlock(ctx, !force_nonblock);
8679 if (ret)
8680 fput(file);
8681 return ret;
8682}
8683
Pavel Begunkov7df778b2021-09-24 20:04:29 +01008684static int io_close_fixed(struct io_kiocb *req, unsigned int issue_flags)
8685{
8686 unsigned int offset = req->close.file_slot - 1;
8687 struct io_ring_ctx *ctx = req->ctx;
8688 struct io_fixed_file *file_slot;
8689 struct file *file;
Pavel Begunkovb8ed0f72022-04-07 14:05:05 +01008690 int ret;
Pavel Begunkov7df778b2021-09-24 20:04:29 +01008691
8692 io_ring_submit_lock(ctx, !(issue_flags & IO_URING_F_NONBLOCK));
8693 ret = -ENXIO;
8694 if (unlikely(!ctx->file_data))
8695 goto out;
8696 ret = -EINVAL;
8697 if (offset >= ctx->nr_user_files)
8698 goto out;
8699 ret = io_rsrc_node_switch_start(ctx);
8700 if (ret)
8701 goto out;
8702
Pavel Begunkovb8ed0f72022-04-07 14:05:05 +01008703 offset = array_index_nospec(offset, ctx->nr_user_files);
8704 file_slot = io_fixed_file_slot(&ctx->file_table, offset);
Pavel Begunkov7df778b2021-09-24 20:04:29 +01008705 ret = -EBADF;
8706 if (!file_slot->file_ptr)
8707 goto out;
8708
8709 file = (struct file *)(file_slot->file_ptr & FFS_MASK);
8710 ret = io_queue_rsrc_removal(ctx->file_data, offset, ctx->rsrc_node, file);
8711 if (ret)
8712 goto out;
8713
8714 file_slot->file_ptr = 0;
8715 io_rsrc_node_switch(ctx, ctx->file_data);
8716 ret = 0;
8717out:
8718 io_ring_submit_unlock(ctx, !(issue_flags & IO_URING_F_NONBLOCK));
8719 return ret;
8720}
8721
Jens Axboe05f3fb32019-12-09 11:22:50 -07008722static int __io_sqe_files_update(struct io_ring_ctx *ctx,
Pavel Begunkovc3bdad02021-04-25 14:32:22 +01008723 struct io_uring_rsrc_update2 *up,
Jens Axboe05f3fb32019-12-09 11:22:50 -07008724 unsigned nr_args)
8725{
Pavel Begunkovc3bdad02021-04-25 14:32:22 +01008726 u64 __user *tags = u64_to_user_ptr(up->tags);
Pavel Begunkov98f0b3b2021-04-25 14:32:19 +01008727 __s32 __user *fds = u64_to_user_ptr(up->data);
Pavel Begunkovb895c9a2021-04-01 15:43:40 +01008728 struct io_rsrc_data *data = ctx->file_data;
Pavel Begunkova04b0ac2021-04-01 15:44:04 +01008729 struct io_fixed_file *file_slot;
8730 struct file *file;
Pavel Begunkov98f0b3b2021-04-25 14:32:19 +01008731 int fd, i, err = 0;
8732 unsigned int done;
Xiaoguang Wang05589552020-03-31 14:05:18 +08008733 bool needs_switch = false;
Jens Axboec3a31e62019-10-03 13:59:56 -06008734
Pavel Begunkov98f0b3b2021-04-25 14:32:19 +01008735 if (!ctx->file_data)
8736 return -ENXIO;
8737 if (up->offset + nr_args > ctx->nr_user_files)
Jens Axboec3a31e62019-10-03 13:59:56 -06008738 return -EINVAL;
8739
Pavel Begunkov67973b92021-01-26 13:51:09 +00008740 for (done = 0; done < nr_args; done++) {
Pavel Begunkovc3bdad02021-04-25 14:32:22 +01008741 u64 tag = 0;
8742
8743 if ((tags && copy_from_user(&tag, &tags[done], sizeof(tag))) ||
8744 copy_from_user(&fd, &fds[done], sizeof(fd))) {
Jens Axboec3a31e62019-10-03 13:59:56 -06008745 err = -EFAULT;
8746 break;
8747 }
Pavel Begunkovc3bdad02021-04-25 14:32:22 +01008748 if ((fd == IORING_REGISTER_FILES_SKIP || fd == -1) && tag) {
8749 err = -EINVAL;
8750 break;
8751 }
noah4e0377a2021-01-26 15:23:28 -05008752 if (fd == IORING_REGISTER_FILES_SKIP)
8753 continue;
8754
Pavel Begunkov67973b92021-01-26 13:51:09 +00008755 i = array_index_nospec(up->offset + done, ctx->nr_user_files);
Pavel Begunkovaeca2412021-04-11 01:46:37 +01008756 file_slot = io_fixed_file_slot(&ctx->file_table, i);
Pavel Begunkovea64ec022021-02-04 13:52:07 +00008757
Pavel Begunkova04b0ac2021-04-01 15:44:04 +01008758 if (file_slot->file_ptr) {
8759 file = (struct file *)(file_slot->file_ptr & FFS_MASK);
Pavel Begunkovb8ed0f72022-04-07 14:05:05 +01008760 err = io_queue_rsrc_removal(data, i, ctx->rsrc_node, file);
Hillf Dantona5318d32020-03-23 17:47:15 +08008761 if (err)
8762 break;
Pavel Begunkova04b0ac2021-04-01 15:44:04 +01008763 file_slot->file_ptr = 0;
Xiaoguang Wang05589552020-03-31 14:05:18 +08008764 needs_switch = true;
Jens Axboec3a31e62019-10-03 13:59:56 -06008765 }
8766 if (fd != -1) {
Jens Axboec3a31e62019-10-03 13:59:56 -06008767 file = fget(fd);
8768 if (!file) {
8769 err = -EBADF;
8770 break;
8771 }
8772 /*
8773 * Don't allow io_uring instances to be registered. If
8774 * UNIX isn't enabled, then this causes a reference
8775 * cycle and this instance can never get freed. If UNIX
8776 * is enabled we'll handle it just fine, but there's
8777 * still no point in allowing a ring fd as it doesn't
8778 * support regular read/write anyway.
8779 */
8780 if (file->f_op == &io_uring_fops) {
8781 fput(file);
8782 err = -EBADF;
8783 break;
8784 }
Pavel Begunkov50c981b2022-04-06 12:43:57 +01008785 *io_get_tag_slot(data, i) = tag;
Pavel Begunkov9a321c92021-04-01 15:44:01 +01008786 io_fixed_file_set(file_slot, file);
Jens Axboec3a31e62019-10-03 13:59:56 -06008787 err = io_sqe_file_register(ctx, file, i);
Yang Yingliangf3bd9da2020-07-09 10:11:41 +00008788 if (err) {
Pavel Begunkova04b0ac2021-04-01 15:44:04 +01008789 file_slot->file_ptr = 0;
Yang Yingliangf3bd9da2020-07-09 10:11:41 +00008790 fput(file);
Jens Axboec3a31e62019-10-03 13:59:56 -06008791 break;
Yang Yingliangf3bd9da2020-07-09 10:11:41 +00008792 }
Jens Axboec3a31e62019-10-03 13:59:56 -06008793 }
Jens Axboe05f3fb32019-12-09 11:22:50 -07008794 }
8795
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01008796 if (needs_switch)
8797 io_rsrc_node_switch(ctx, data);
Jens Axboec3a31e62019-10-03 13:59:56 -06008798 return done ? done : err;
8799}
Xiaoguang Wang05589552020-03-31 14:05:18 +08008800
Jens Axboe685fe7f2021-03-08 09:37:51 -07008801static struct io_wq *io_init_wq_offload(struct io_ring_ctx *ctx,
8802 struct task_struct *task)
Pavel Begunkov24369c22020-01-28 03:15:48 +03008803{
Jens Axboee9418942021-02-19 12:33:30 -07008804 struct io_wq_hash *hash;
Pavel Begunkov24369c22020-01-28 03:15:48 +03008805 struct io_wq_data data;
Pavel Begunkov24369c22020-01-28 03:15:48 +03008806 unsigned int concurrency;
Pavel Begunkov24369c22020-01-28 03:15:48 +03008807
Yang Yingliang362a9e62021-07-20 16:38:05 +08008808 mutex_lock(&ctx->uring_lock);
Jens Axboee9418942021-02-19 12:33:30 -07008809 hash = ctx->hash_map;
8810 if (!hash) {
8811 hash = kzalloc(sizeof(*hash), GFP_KERNEL);
Yang Yingliang362a9e62021-07-20 16:38:05 +08008812 if (!hash) {
8813 mutex_unlock(&ctx->uring_lock);
Jens Axboee9418942021-02-19 12:33:30 -07008814 return ERR_PTR(-ENOMEM);
Yang Yingliang362a9e62021-07-20 16:38:05 +08008815 }
Jens Axboee9418942021-02-19 12:33:30 -07008816 refcount_set(&hash->refs, 1);
8817 init_waitqueue_head(&hash->wait);
8818 ctx->hash_map = hash;
8819 }
Yang Yingliang362a9e62021-07-20 16:38:05 +08008820 mutex_unlock(&ctx->uring_lock);
Jens Axboee9418942021-02-19 12:33:30 -07008821
8822 data.hash = hash;
Jens Axboe685fe7f2021-03-08 09:37:51 -07008823 data.task = task;
Pavel Begunkovebc11b62021-08-09 13:04:05 +01008824 data.free_work = io_wq_free_work;
Pavel Begunkovf5fa38c2020-06-08 21:08:20 +03008825 data.do_work = io_wq_submit_work;
Pavel Begunkov24369c22020-01-28 03:15:48 +03008826
Jens Axboed25e3a32021-02-16 11:41:41 -07008827 /* Do QD, or 4 * CPUS, whatever is smallest */
8828 concurrency = min(ctx->sq_entries, 4 * num_online_cpus());
Pavel Begunkov24369c22020-01-28 03:15:48 +03008829
Jens Axboe5aa75ed2021-02-16 12:56:50 -07008830 return io_wq_create(concurrency, &data);
Pavel Begunkov24369c22020-01-28 03:15:48 +03008831}
8832
Jens Axboe5aa75ed2021-02-16 12:56:50 -07008833static int io_uring_alloc_task_context(struct task_struct *task,
8834 struct io_ring_ctx *ctx)
Jens Axboe0f212202020-09-13 13:09:39 -06008835{
8836 struct io_uring_task *tctx;
Jens Axboed8a6df12020-10-15 16:24:45 -06008837 int ret;
Jens Axboe0f212202020-09-13 13:09:39 -06008838
Pavel Begunkov09899b12021-06-14 02:36:22 +01008839 tctx = kzalloc(sizeof(*tctx), GFP_KERNEL);
Jens Axboe0f212202020-09-13 13:09:39 -06008840 if (unlikely(!tctx))
8841 return -ENOMEM;
8842
Jens Axboed8a6df12020-10-15 16:24:45 -06008843 ret = percpu_counter_init(&tctx->inflight, 0, GFP_KERNEL);
8844 if (unlikely(ret)) {
8845 kfree(tctx);
8846 return ret;
8847 }
8848
Jens Axboe685fe7f2021-03-08 09:37:51 -07008849 tctx->io_wq = io_init_wq_offload(ctx, task);
Jens Axboe5aa75ed2021-02-16 12:56:50 -07008850 if (IS_ERR(tctx->io_wq)) {
8851 ret = PTR_ERR(tctx->io_wq);
8852 percpu_counter_destroy(&tctx->inflight);
8853 kfree(tctx);
8854 return ret;
8855 }
8856
Jens Axboe0f212202020-09-13 13:09:39 -06008857 xa_init(&tctx->xa);
8858 init_waitqueue_head(&tctx->wait);
Jens Axboefdaf0832020-10-30 09:37:30 -06008859 atomic_set(&tctx->in_idle, 0);
Pavel Begunkovb303fe22021-04-11 01:46:26 +01008860 atomic_set(&tctx->inflight_tracked, 0);
Jens Axboe0f212202020-09-13 13:09:39 -06008861 task->io_uring = tctx;
Jens Axboe7cbf1722021-02-10 00:03:20 +00008862 spin_lock_init(&tctx->task_lock);
8863 INIT_WQ_LIST(&tctx->task_list);
Jens Axboe7cbf1722021-02-10 00:03:20 +00008864 init_task_work(&tctx->task_work, tctx_task_work);
Jens Axboe0f212202020-09-13 13:09:39 -06008865 return 0;
8866}
8867
8868void __io_uring_free(struct task_struct *tsk)
8869{
8870 struct io_uring_task *tctx = tsk->io_uring;
8871
8872 WARN_ON_ONCE(!xa_empty(&tctx->xa));
Pavel Begunkovef8eaa42021-02-27 11:16:45 +00008873 WARN_ON_ONCE(tctx->io_wq);
Pavel Begunkov09899b12021-06-14 02:36:22 +01008874 WARN_ON_ONCE(tctx->cached_refs);
Pavel Begunkovef8eaa42021-02-27 11:16:45 +00008875
Jens Axboed8a6df12020-10-15 16:24:45 -06008876 percpu_counter_destroy(&tctx->inflight);
Jens Axboe0f212202020-09-13 13:09:39 -06008877 kfree(tctx);
8878 tsk->io_uring = NULL;
8879}
8880
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02008881static int io_sq_offload_create(struct io_ring_ctx *ctx,
8882 struct io_uring_params *p)
Jens Axboe2b188cc2019-01-07 10:46:33 -07008883{
8884 int ret;
8885
Jens Axboed25e3a32021-02-16 11:41:41 -07008886 /* Retain compatibility with failing for an invalid attach attempt */
8887 if ((ctx->flags & (IORING_SETUP_ATTACH_WQ | IORING_SETUP_SQPOLL)) ==
8888 IORING_SETUP_ATTACH_WQ) {
8889 struct fd f;
8890
8891 f = fdget(p->wq_fd);
8892 if (!f.file)
8893 return -ENXIO;
Jens Axboe0cc936f2021-07-22 17:08:07 -06008894 if (f.file->f_op != &io_uring_fops) {
8895 fdput(f);
Pavel Begunkovf2a48dd2021-04-20 12:03:33 +01008896 return -EINVAL;
Jens Axboe0cc936f2021-07-22 17:08:07 -06008897 }
8898 fdput(f);
Jens Axboed25e3a32021-02-16 11:41:41 -07008899 }
Jens Axboe6c271ce2019-01-10 11:22:30 -07008900 if (ctx->flags & IORING_SETUP_SQPOLL) {
Jens Axboe46fe18b2021-03-04 12:39:36 -07008901 struct task_struct *tsk;
Jens Axboe534ca6d2020-09-02 13:52:19 -06008902 struct io_sq_data *sqd;
Pavel Begunkov26984fb2021-03-11 23:29:37 +00008903 bool attached;
Jens Axboe534ca6d2020-09-02 13:52:19 -06008904
Pavel Begunkov26984fb2021-03-11 23:29:37 +00008905 sqd = io_get_sq_data(p, &attached);
Jens Axboe534ca6d2020-09-02 13:52:19 -06008906 if (IS_ERR(sqd)) {
8907 ret = PTR_ERR(sqd);
8908 goto err;
8909 }
Jens Axboe69fb2132020-09-14 11:16:23 -06008910
Stefan Metzmacher7c30f36a2021-03-07 11:54:28 +01008911 ctx->sq_creds = get_current_cred();
Jens Axboe534ca6d2020-09-02 13:52:19 -06008912 ctx->sq_data = sqd;
Jens Axboe917257d2019-04-13 09:28:55 -06008913 ctx->sq_thread_idle = msecs_to_jiffies(p->sq_thread_idle);
8914 if (!ctx->sq_thread_idle)
8915 ctx->sq_thread_idle = HZ;
8916
Pavel Begunkov78d7f6b2021-03-10 13:13:53 +00008917 io_sq_thread_park(sqd);
Pavel Begunkovde75a3d2021-03-18 11:54:35 +00008918 list_add(&ctx->sqd_list, &sqd->ctx_list);
8919 io_sqd_update_thread_idle(sqd);
Pavel Begunkov26984fb2021-03-11 23:29:37 +00008920 /* don't attach to a dying SQPOLL thread, would be racy */
Pavel Begunkovf2a48dd2021-04-20 12:03:33 +01008921 ret = (attached && !sqd->thread) ? -ENXIO : 0;
Pavel Begunkov78d7f6b2021-03-10 13:13:53 +00008922 io_sq_thread_unpark(sqd);
8923
Pavel Begunkovde75a3d2021-03-18 11:54:35 +00008924 if (ret < 0)
8925 goto err;
8926 if (attached)
Jens Axboe5aa75ed2021-02-16 12:56:50 -07008927 return 0;
Jens Axboeaa061652020-09-02 14:50:27 -06008928
Jens Axboe6c271ce2019-01-10 11:22:30 -07008929 if (p->flags & IORING_SETUP_SQ_AFF) {
Jens Axboe44a9bd12019-05-14 20:00:30 -06008930 int cpu = p->sq_thread_cpu;
Jens Axboe6c271ce2019-01-10 11:22:30 -07008931
Jens Axboe917257d2019-04-13 09:28:55 -06008932 ret = -EINVAL;
Pavel Begunkovf2a48dd2021-04-20 12:03:33 +01008933 if (cpu >= nr_cpu_ids || !cpu_online(cpu))
Jens Axboee8f98f242021-03-09 16:32:13 -07008934 goto err_sqpoll;
Jens Axboe37d1e2e2021-02-17 21:03:43 -07008935 sqd->sq_cpu = cpu;
Jens Axboe6c271ce2019-01-10 11:22:30 -07008936 } else {
Jens Axboe37d1e2e2021-02-17 21:03:43 -07008937 sqd->sq_cpu = -1;
Jens Axboe6c271ce2019-01-10 11:22:30 -07008938 }
Jens Axboe37d1e2e2021-02-17 21:03:43 -07008939
8940 sqd->task_pid = current->pid;
Jens Axboe5c2469e2021-03-11 10:17:56 -07008941 sqd->task_tgid = current->tgid;
Jens Axboe46fe18b2021-03-04 12:39:36 -07008942 tsk = create_io_thread(io_sq_thread, sqd, NUMA_NO_NODE);
8943 if (IS_ERR(tsk)) {
8944 ret = PTR_ERR(tsk);
Jens Axboee8f98f242021-03-09 16:32:13 -07008945 goto err_sqpoll;
Jens Axboe6c271ce2019-01-10 11:22:30 -07008946 }
Pavel Begunkov97a73a02021-03-08 17:30:54 +00008947
Jens Axboe46fe18b2021-03-04 12:39:36 -07008948 sqd->thread = tsk;
Pavel Begunkov97a73a02021-03-08 17:30:54 +00008949 ret = io_uring_alloc_task_context(tsk, ctx);
Jens Axboe46fe18b2021-03-04 12:39:36 -07008950 wake_up_new_task(tsk);
Jens Axboe0f212202020-09-13 13:09:39 -06008951 if (ret)
8952 goto err;
Jens Axboe6c271ce2019-01-10 11:22:30 -07008953 } else if (p->flags & IORING_SETUP_SQ_AFF) {
8954 /* Can't have SQ_AFF without SQPOLL */
8955 ret = -EINVAL;
8956 goto err;
8957 }
8958
Jens Axboe2b188cc2019-01-07 10:46:33 -07008959 return 0;
Pavel Begunkovf2a48dd2021-04-20 12:03:33 +01008960err_sqpoll:
8961 complete(&ctx->sq_data->exited);
Jens Axboe2b188cc2019-01-07 10:46:33 -07008962err:
Jens Axboe37d1e2e2021-02-17 21:03:43 -07008963 io_sq_thread_finish(ctx);
Jens Axboe2b188cc2019-01-07 10:46:33 -07008964 return ret;
8965}
8966
Bijan Mottahedeha087e2b2020-06-16 16:36:07 -07008967static inline void __io_unaccount_mem(struct user_struct *user,
8968 unsigned long nr_pages)
Jens Axboe2b188cc2019-01-07 10:46:33 -07008969{
8970 atomic_long_sub(nr_pages, &user->locked_vm);
8971}
8972
Bijan Mottahedeha087e2b2020-06-16 16:36:07 -07008973static inline int __io_account_mem(struct user_struct *user,
8974 unsigned long nr_pages)
Jens Axboe2b188cc2019-01-07 10:46:33 -07008975{
8976 unsigned long page_limit, cur_pages, new_pages;
8977
8978 /* Don't allow more pages than we can safely lock */
8979 page_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
8980
8981 do {
8982 cur_pages = atomic_long_read(&user->locked_vm);
8983 new_pages = cur_pages + nr_pages;
8984 if (new_pages > page_limit)
8985 return -ENOMEM;
8986 } while (atomic_long_cmpxchg(&user->locked_vm, cur_pages,
8987 new_pages) != cur_pages);
8988
8989 return 0;
8990}
8991
Jens Axboe26bfa89e2021-02-09 20:14:12 -07008992static void io_unaccount_mem(struct io_ring_ctx *ctx, unsigned long nr_pages)
Bijan Mottahedeha087e2b2020-06-16 16:36:07 -07008993{
Jens Axboe62e398b2021-02-21 16:19:37 -07008994 if (ctx->user)
Bijan Mottahedeha087e2b2020-06-16 16:36:07 -07008995 __io_unaccount_mem(ctx->user, nr_pages);
Bijan Mottahedeh30975822020-06-16 16:36:09 -07008996
Jens Axboe26bfa89e2021-02-09 20:14:12 -07008997 if (ctx->mm_account)
8998 atomic64_sub(nr_pages, &ctx->mm_account->pinned_vm);
Bijan Mottahedeha087e2b2020-06-16 16:36:07 -07008999}
9000
Jens Axboe26bfa89e2021-02-09 20:14:12 -07009001static int io_account_mem(struct io_ring_ctx *ctx, unsigned long nr_pages)
Bijan Mottahedeha087e2b2020-06-16 16:36:07 -07009002{
Bijan Mottahedeh30975822020-06-16 16:36:09 -07009003 int ret;
9004
Jens Axboe62e398b2021-02-21 16:19:37 -07009005 if (ctx->user) {
Bijan Mottahedeh30975822020-06-16 16:36:09 -07009006 ret = __io_account_mem(ctx->user, nr_pages);
9007 if (ret)
9008 return ret;
9009 }
9010
Jens Axboe26bfa89e2021-02-09 20:14:12 -07009011 if (ctx->mm_account)
9012 atomic64_add(nr_pages, &ctx->mm_account->pinned_vm);
Bijan Mottahedeha087e2b2020-06-16 16:36:07 -07009013
9014 return 0;
9015}
9016
Jens Axboe2b188cc2019-01-07 10:46:33 -07009017static void io_mem_free(void *ptr)
9018{
Mark Rutland52e04ef2019-04-30 17:30:21 +01009019 struct page *page;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009020
Mark Rutland52e04ef2019-04-30 17:30:21 +01009021 if (!ptr)
9022 return;
9023
9024 page = virt_to_head_page(ptr);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009025 if (put_page_testzero(page))
9026 free_compound_page(page);
9027}
9028
9029static void *io_mem_alloc(size_t size)
9030{
Shakeel Butt246dfbc2022-01-24 21:17:36 -08009031 gfp_t gfp = GFP_KERNEL_ACCOUNT | __GFP_ZERO | __GFP_NOWARN | __GFP_COMP;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009032
Shakeel Butt246dfbc2022-01-24 21:17:36 -08009033 return (void *) __get_free_pages(gfp, get_order(size));
Jens Axboe2b188cc2019-01-07 10:46:33 -07009034}
9035
Hristo Venev75b28af2019-08-26 17:23:46 +00009036static unsigned long rings_size(unsigned sq_entries, unsigned cq_entries,
9037 size_t *sq_offset)
9038{
9039 struct io_rings *rings;
9040 size_t off, sq_array_size;
9041
9042 off = struct_size(rings, cqes, cq_entries);
9043 if (off == SIZE_MAX)
9044 return SIZE_MAX;
9045
9046#ifdef CONFIG_SMP
9047 off = ALIGN(off, SMP_CACHE_BYTES);
9048 if (off == 0)
9049 return SIZE_MAX;
9050#endif
9051
Dmitry Vyukovb36200f2020-07-11 11:31:11 +02009052 if (sq_offset)
9053 *sq_offset = off;
9054
Hristo Venev75b28af2019-08-26 17:23:46 +00009055 sq_array_size = array_size(sizeof(u32), sq_entries);
9056 if (sq_array_size == SIZE_MAX)
9057 return SIZE_MAX;
9058
9059 if (check_add_overflow(off, sq_array_size, &off))
9060 return SIZE_MAX;
9061
Hristo Venev75b28af2019-08-26 17:23:46 +00009062 return off;
9063}
9064
Pavel Begunkov41edf1a2021-04-25 14:32:23 +01009065static void io_buffer_unmap(struct io_ring_ctx *ctx, struct io_mapped_ubuf **slot)
Pavel Begunkov7f61a1e2021-04-11 01:46:35 +01009066{
Pavel Begunkov41edf1a2021-04-25 14:32:23 +01009067 struct io_mapped_ubuf *imu = *slot;
Pavel Begunkov7f61a1e2021-04-11 01:46:35 +01009068 unsigned int i;
9069
Pavel Begunkov62248432021-04-28 13:11:29 +01009070 if (imu != ctx->dummy_ubuf) {
9071 for (i = 0; i < imu->nr_bvecs; i++)
9072 unpin_user_page(imu->bvec[i].bv_page);
9073 if (imu->acct_pages)
9074 io_unaccount_mem(ctx, imu->acct_pages);
9075 kvfree(imu);
9076 }
Pavel Begunkov41edf1a2021-04-25 14:32:23 +01009077 *slot = NULL;
Pavel Begunkov7f61a1e2021-04-11 01:46:35 +01009078}
9079
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01009080static void io_rsrc_buf_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc)
9081{
Pavel Begunkov634d00d2021-04-25 14:32:26 +01009082 io_buffer_unmap(ctx, &prsrc->buf);
9083 prsrc->buf = NULL;
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01009084}
9085
9086static void __io_sqe_buffers_unregister(struct io_ring_ctx *ctx)
Jens Axboeedafcce2019-01-09 09:16:05 -07009087{
Pavel Begunkov7f61a1e2021-04-11 01:46:35 +01009088 unsigned int i;
Jens Axboeedafcce2019-01-09 09:16:05 -07009089
Pavel Begunkov7f61a1e2021-04-11 01:46:35 +01009090 for (i = 0; i < ctx->nr_user_bufs; i++)
9091 io_buffer_unmap(ctx, &ctx->user_bufs[i]);
Jens Axboeedafcce2019-01-09 09:16:05 -07009092 kfree(ctx->user_bufs);
Zqiangbb6659c2021-04-30 16:25:15 +08009093 io_rsrc_data_free(ctx->buf_data);
Jens Axboeedafcce2019-01-09 09:16:05 -07009094 ctx->user_bufs = NULL;
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01009095 ctx->buf_data = NULL;
Jens Axboeedafcce2019-01-09 09:16:05 -07009096 ctx->nr_user_bufs = 0;
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01009097}
9098
Jens Axboeedafcce2019-01-09 09:16:05 -07009099static int io_sqe_buffers_unregister(struct io_ring_ctx *ctx)
9100{
Pavel Begunkov91f5a602022-06-13 06:30:06 +01009101 unsigned nr = ctx->nr_user_bufs;
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01009102 int ret;
Jens Axboeedafcce2019-01-09 09:16:05 -07009103
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01009104 if (!ctx->buf_data)
Jens Axboeedafcce2019-01-09 09:16:05 -07009105 return -ENXIO;
9106
Pavel Begunkov91f5a602022-06-13 06:30:06 +01009107 /*
9108 * Quiesce may unlock ->uring_lock, and while it's not held
9109 * prevent new requests using the table.
9110 */
9111 ctx->nr_user_bufs = 0;
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01009112 ret = io_rsrc_ref_quiesce(ctx->buf_data, ctx);
Pavel Begunkov91f5a602022-06-13 06:30:06 +01009113 ctx->nr_user_bufs = nr;
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01009114 if (!ret)
9115 __io_sqe_buffers_unregister(ctx);
9116 return ret;
Jens Axboeedafcce2019-01-09 09:16:05 -07009117}
9118
9119static int io_copy_iov(struct io_ring_ctx *ctx, struct iovec *dst,
9120 void __user *arg, unsigned index)
9121{
9122 struct iovec __user *src;
9123
9124#ifdef CONFIG_COMPAT
9125 if (ctx->compat) {
9126 struct compat_iovec __user *ciovs;
9127 struct compat_iovec ciov;
9128
9129 ciovs = (struct compat_iovec __user *) arg;
9130 if (copy_from_user(&ciov, &ciovs[index], sizeof(ciov)))
9131 return -EFAULT;
9132
Jens Axboed55e5f52019-12-11 16:12:15 -07009133 dst->iov_base = u64_to_user_ptr((u64)ciov.iov_base);
Jens Axboeedafcce2019-01-09 09:16:05 -07009134 dst->iov_len = ciov.iov_len;
9135 return 0;
9136 }
9137#endif
9138 src = (struct iovec __user *) arg;
9139 if (copy_from_user(dst, &src[index], sizeof(*dst)))
9140 return -EFAULT;
9141 return 0;
9142}
9143
Jens Axboede293932020-09-17 16:19:16 -06009144/*
9145 * Not super efficient, but this is just a registration time. And we do cache
9146 * the last compound head, so generally we'll only do a full search if we don't
9147 * match that one.
9148 *
9149 * We check if the given compound head page has already been accounted, to
9150 * avoid double accounting it. This allows us to account the full size of the
9151 * page, not just the constituent pages of a huge page.
9152 */
9153static bool headpage_already_acct(struct io_ring_ctx *ctx, struct page **pages,
9154 int nr_pages, struct page *hpage)
9155{
9156 int i, j;
9157
9158 /* check current page array */
9159 for (i = 0; i < nr_pages; i++) {
9160 if (!PageCompound(pages[i]))
9161 continue;
9162 if (compound_head(pages[i]) == hpage)
9163 return true;
9164 }
9165
9166 /* check previously registered pages */
9167 for (i = 0; i < ctx->nr_user_bufs; i++) {
Pavel Begunkov41edf1a2021-04-25 14:32:23 +01009168 struct io_mapped_ubuf *imu = ctx->user_bufs[i];
Jens Axboede293932020-09-17 16:19:16 -06009169
9170 for (j = 0; j < imu->nr_bvecs; j++) {
9171 if (!PageCompound(imu->bvec[j].bv_page))
9172 continue;
9173 if (compound_head(imu->bvec[j].bv_page) == hpage)
9174 return true;
9175 }
9176 }
9177
9178 return false;
9179}
9180
9181static int io_buffer_account_pin(struct io_ring_ctx *ctx, struct page **pages,
9182 int nr_pages, struct io_mapped_ubuf *imu,
9183 struct page **last_hpage)
9184{
9185 int i, ret;
9186
Pavel Begunkov216e5832021-05-29 12:01:02 +01009187 imu->acct_pages = 0;
Jens Axboede293932020-09-17 16:19:16 -06009188 for (i = 0; i < nr_pages; i++) {
9189 if (!PageCompound(pages[i])) {
9190 imu->acct_pages++;
9191 } else {
9192 struct page *hpage;
9193
9194 hpage = compound_head(pages[i]);
9195 if (hpage == *last_hpage)
9196 continue;
9197 *last_hpage = hpage;
9198 if (headpage_already_acct(ctx, pages, i, hpage))
9199 continue;
9200 imu->acct_pages += page_size(hpage) >> PAGE_SHIFT;
9201 }
9202 }
9203
9204 if (!imu->acct_pages)
9205 return 0;
9206
Jens Axboe26bfa89e2021-02-09 20:14:12 -07009207 ret = io_account_mem(ctx, imu->acct_pages);
Jens Axboede293932020-09-17 16:19:16 -06009208 if (ret)
9209 imu->acct_pages = 0;
9210 return ret;
9211}
9212
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08009213static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov,
Pavel Begunkov41edf1a2021-04-25 14:32:23 +01009214 struct io_mapped_ubuf **pimu,
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08009215 struct page **last_hpage)
Jens Axboeedafcce2019-01-09 09:16:05 -07009216{
Pavel Begunkov41edf1a2021-04-25 14:32:23 +01009217 struct io_mapped_ubuf *imu = NULL;
Jens Axboeedafcce2019-01-09 09:16:05 -07009218 struct vm_area_struct **vmas = NULL;
9219 struct page **pages = NULL;
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08009220 unsigned long off, start, end, ubuf;
9221 size_t size;
9222 int ret, pret, nr_pages, i;
Jens Axboeedafcce2019-01-09 09:16:05 -07009223
Pavel Begunkov62248432021-04-28 13:11:29 +01009224 if (!iov->iov_base) {
9225 *pimu = ctx->dummy_ubuf;
9226 return 0;
9227 }
9228
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08009229 ubuf = (unsigned long) iov->iov_base;
9230 end = (ubuf + iov->iov_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
9231 start = ubuf >> PAGE_SHIFT;
9232 nr_pages = end - start;
9233
Pavel Begunkov41edf1a2021-04-25 14:32:23 +01009234 *pimu = NULL;
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08009235 ret = -ENOMEM;
9236
9237 pages = kvmalloc_array(nr_pages, sizeof(struct page *), GFP_KERNEL);
9238 if (!pages)
9239 goto done;
9240
9241 vmas = kvmalloc_array(nr_pages, sizeof(struct vm_area_struct *),
9242 GFP_KERNEL);
9243 if (!vmas)
9244 goto done;
9245
Pavel Begunkov41edf1a2021-04-25 14:32:23 +01009246 imu = kvmalloc(struct_size(imu, bvec, nr_pages), GFP_KERNEL);
Pavel Begunkova2b41982021-04-26 00:16:31 +01009247 if (!imu)
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08009248 goto done;
9249
9250 ret = 0;
9251 mmap_read_lock(current->mm);
9252 pret = pin_user_pages(ubuf, nr_pages, FOLL_WRITE | FOLL_LONGTERM,
9253 pages, vmas);
9254 if (pret == nr_pages) {
Pavel Begunkovdde0d0d2023-03-06 13:21:40 -07009255 struct file *file = vmas[0]->vm_file;
9256
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08009257 /* don't support file backed memory */
9258 for (i = 0; i < nr_pages; i++) {
Pavel Begunkovdde0d0d2023-03-06 13:21:40 -07009259 if (vmas[i]->vm_file != file) {
9260 ret = -EINVAL;
9261 break;
9262 }
9263 if (!file)
Pavel Begunkov40dad762021-06-09 15:26:54 +01009264 continue;
Pavel Begunkovdde0d0d2023-03-06 13:21:40 -07009265 if (!vma_is_shmem(vmas[i]) && !is_file_hugepages(file)) {
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08009266 ret = -EOPNOTSUPP;
9267 break;
9268 }
9269 }
9270 } else {
9271 ret = pret < 0 ? pret : -EFAULT;
9272 }
9273 mmap_read_unlock(current->mm);
9274 if (ret) {
9275 /*
9276 * if we did partial map, or found file backed vmas,
9277 * release any pages we did get
9278 */
9279 if (pret > 0)
9280 unpin_user_pages(pages, pret);
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08009281 goto done;
9282 }
9283
9284 ret = io_buffer_account_pin(ctx, pages, pret, imu, last_hpage);
9285 if (ret) {
9286 unpin_user_pages(pages, pret);
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08009287 goto done;
9288 }
9289
9290 off = ubuf & ~PAGE_MASK;
9291 size = iov->iov_len;
9292 for (i = 0; i < nr_pages; i++) {
9293 size_t vec_len;
9294
9295 vec_len = min_t(size_t, size, PAGE_SIZE - off);
9296 imu->bvec[i].bv_page = pages[i];
9297 imu->bvec[i].bv_len = vec_len;
9298 imu->bvec[i].bv_offset = off;
9299 off = 0;
9300 size -= vec_len;
9301 }
9302 /* store original address for later verification */
9303 imu->ubuf = ubuf;
Pavel Begunkov4751f532021-04-01 15:43:55 +01009304 imu->ubuf_end = ubuf + iov->iov_len;
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08009305 imu->nr_bvecs = nr_pages;
Pavel Begunkov41edf1a2021-04-25 14:32:23 +01009306 *pimu = imu;
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08009307 ret = 0;
9308done:
Pavel Begunkov41edf1a2021-04-25 14:32:23 +01009309 if (ret)
9310 kvfree(imu);
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08009311 kvfree(pages);
9312 kvfree(vmas);
9313 return ret;
9314}
9315
Bijan Mottahedeh2b358602021-01-06 12:39:11 -08009316static int io_buffers_map_alloc(struct io_ring_ctx *ctx, unsigned int nr_args)
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08009317{
Pavel Begunkov87094462021-04-11 01:46:36 +01009318 ctx->user_bufs = kcalloc(nr_args, sizeof(*ctx->user_bufs), GFP_KERNEL);
9319 return ctx->user_bufs ? 0 : -ENOMEM;
Bijan Mottahedeh2b358602021-01-06 12:39:11 -08009320}
9321
9322static int io_buffer_validate(struct iovec *iov)
9323{
Pavel Begunkov50e96982021-03-24 22:59:01 +00009324 unsigned long tmp, acct_len = iov->iov_len + (PAGE_SIZE - 1);
9325
Bijan Mottahedeh2b358602021-01-06 12:39:11 -08009326 /*
9327 * Don't impose further limits on the size and buffer
9328 * constraints here, we'll -EINVAL later when IO is
9329 * submitted if they are wrong.
9330 */
Pavel Begunkov62248432021-04-28 13:11:29 +01009331 if (!iov->iov_base)
9332 return iov->iov_len ? -EFAULT : 0;
9333 if (!iov->iov_len)
Bijan Mottahedeh2b358602021-01-06 12:39:11 -08009334 return -EFAULT;
9335
9336 /* arbitrary limit, but we need something */
9337 if (iov->iov_len > SZ_1G)
9338 return -EFAULT;
9339
Pavel Begunkov50e96982021-03-24 22:59:01 +00009340 if (check_add_overflow((unsigned long)iov->iov_base, acct_len, &tmp))
9341 return -EOVERFLOW;
9342
Bijan Mottahedeh2b358602021-01-06 12:39:11 -08009343 return 0;
9344}
9345
9346static int io_sqe_buffers_register(struct io_ring_ctx *ctx, void __user *arg,
Pavel Begunkov634d00d2021-04-25 14:32:26 +01009347 unsigned int nr_args, u64 __user *tags)
Bijan Mottahedeh2b358602021-01-06 12:39:11 -08009348{
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01009349 struct page *last_hpage = NULL;
9350 struct io_rsrc_data *data;
Bijan Mottahedeh2b358602021-01-06 12:39:11 -08009351 int i, ret;
9352 struct iovec iov;
Bijan Mottahedeh2b358602021-01-06 12:39:11 -08009353
Pavel Begunkov87094462021-04-11 01:46:36 +01009354 if (ctx->user_bufs)
9355 return -EBUSY;
Pavel Begunkov489809e2021-05-14 12:06:44 +01009356 if (!nr_args || nr_args > IORING_MAX_REG_BUFFERS)
Pavel Begunkov87094462021-04-11 01:46:36 +01009357 return -EINVAL;
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01009358 ret = io_rsrc_node_switch_start(ctx);
Bijan Mottahedeh2b358602021-01-06 12:39:11 -08009359 if (ret)
9360 return ret;
Pavel Begunkovd878c812021-06-14 02:36:18 +01009361 ret = io_rsrc_data_alloc(ctx, io_rsrc_buf_put, tags, nr_args, &data);
9362 if (ret)
9363 return ret;
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01009364 ret = io_buffers_map_alloc(ctx, nr_args);
9365 if (ret) {
Zqiangbb6659c2021-04-30 16:25:15 +08009366 io_rsrc_data_free(data);
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01009367 return ret;
9368 }
Bijan Mottahedeh2b358602021-01-06 12:39:11 -08009369
Pavel Begunkov87094462021-04-11 01:46:36 +01009370 for (i = 0; i < nr_args; i++, ctx->nr_user_bufs++) {
Jens Axboeedafcce2019-01-09 09:16:05 -07009371 ret = io_copy_iov(ctx, &iov, arg, i);
9372 if (ret)
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08009373 break;
Bijan Mottahedeh2b358602021-01-06 12:39:11 -08009374 ret = io_buffer_validate(&iov);
9375 if (ret)
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08009376 break;
Pavel Begunkov2d091d62021-06-14 02:36:21 +01009377 if (!iov.iov_base && *io_get_tag_slot(data, i)) {
Colin Ian Kingcf3770e2021-04-29 11:46:02 +01009378 ret = -EINVAL;
9379 break;
9380 }
Jens Axboeedafcce2019-01-09 09:16:05 -07009381
Pavel Begunkov41edf1a2021-04-25 14:32:23 +01009382 ret = io_sqe_buffer_register(ctx, &iov, &ctx->user_bufs[i],
9383 &last_hpage);
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08009384 if (ret)
9385 break;
Jens Axboeedafcce2019-01-09 09:16:05 -07009386 }
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08009387
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01009388 WARN_ON_ONCE(ctx->buf_data);
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08009389
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01009390 ctx->buf_data = data;
9391 if (ret)
9392 __io_sqe_buffers_unregister(ctx);
9393 else
9394 io_rsrc_node_switch(ctx, NULL);
Jens Axboeedafcce2019-01-09 09:16:05 -07009395 return ret;
9396}
9397
Pavel Begunkov634d00d2021-04-25 14:32:26 +01009398static int __io_sqe_buffers_update(struct io_ring_ctx *ctx,
9399 struct io_uring_rsrc_update2 *up,
9400 unsigned int nr_args)
9401{
9402 u64 __user *tags = u64_to_user_ptr(up->tags);
9403 struct iovec iov, __user *iovs = u64_to_user_ptr(up->data);
Pavel Begunkov634d00d2021-04-25 14:32:26 +01009404 struct page *last_hpage = NULL;
9405 bool needs_switch = false;
9406 __u32 done;
9407 int i, err;
9408
9409 if (!ctx->buf_data)
9410 return -ENXIO;
9411 if (up->offset + nr_args > ctx->nr_user_bufs)
9412 return -EINVAL;
9413
9414 for (done = 0; done < nr_args; done++) {
Pavel Begunkov0b8c0e72021-04-26 15:17:38 +01009415 struct io_mapped_ubuf *imu;
9416 int offset = up->offset + done;
Pavel Begunkov634d00d2021-04-25 14:32:26 +01009417 u64 tag = 0;
9418
9419 err = io_copy_iov(ctx, &iov, iovs, done);
9420 if (err)
9421 break;
9422 if (tags && copy_from_user(&tag, &tags[done], sizeof(tag))) {
9423 err = -EFAULT;
9424 break;
9425 }
Pavel Begunkov0b8c0e72021-04-26 15:17:38 +01009426 err = io_buffer_validate(&iov);
9427 if (err)
9428 break;
Colin Ian Kingcf3770e2021-04-29 11:46:02 +01009429 if (!iov.iov_base && tag) {
9430 err = -EINVAL;
9431 break;
9432 }
Pavel Begunkov0b8c0e72021-04-26 15:17:38 +01009433 err = io_sqe_buffer_register(ctx, &iov, &imu, &last_hpage);
9434 if (err)
9435 break;
Pavel Begunkov634d00d2021-04-25 14:32:26 +01009436
Pavel Begunkov0b8c0e72021-04-26 15:17:38 +01009437 i = array_index_nospec(offset, ctx->nr_user_bufs);
Pavel Begunkov62248432021-04-28 13:11:29 +01009438 if (ctx->user_bufs[i] != ctx->dummy_ubuf) {
Pavel Begunkovb8ed0f72022-04-07 14:05:05 +01009439 err = io_queue_rsrc_removal(ctx->buf_data, i,
Pavel Begunkov0b8c0e72021-04-26 15:17:38 +01009440 ctx->rsrc_node, ctx->user_bufs[i]);
9441 if (unlikely(err)) {
9442 io_buffer_unmap(ctx, &imu);
Pavel Begunkov634d00d2021-04-25 14:32:26 +01009443 break;
Pavel Begunkov0b8c0e72021-04-26 15:17:38 +01009444 }
Pavel Begunkov634d00d2021-04-25 14:32:26 +01009445 ctx->user_bufs[i] = NULL;
9446 needs_switch = true;
9447 }
9448
Pavel Begunkov0b8c0e72021-04-26 15:17:38 +01009449 ctx->user_bufs[i] = imu;
Pavel Begunkov2d091d62021-06-14 02:36:21 +01009450 *io_get_tag_slot(ctx->buf_data, offset) = tag;
Pavel Begunkov634d00d2021-04-25 14:32:26 +01009451 }
9452
9453 if (needs_switch)
9454 io_rsrc_node_switch(ctx, ctx->buf_data);
9455 return done ? done : err;
9456}
9457
Jens Axboe9b402842019-04-11 11:45:41 -06009458static int io_eventfd_register(struct io_ring_ctx *ctx, void __user *arg)
9459{
9460 __s32 __user *fds = arg;
9461 int fd;
9462
9463 if (ctx->cq_ev_fd)
9464 return -EBUSY;
9465
9466 if (copy_from_user(&fd, fds, sizeof(*fds)))
9467 return -EFAULT;
9468
9469 ctx->cq_ev_fd = eventfd_ctx_fdget(fd);
9470 if (IS_ERR(ctx->cq_ev_fd)) {
9471 int ret = PTR_ERR(ctx->cq_ev_fd);
Pavel Begunkovfe7e3252021-06-24 15:09:57 +01009472
Jens Axboe9b402842019-04-11 11:45:41 -06009473 ctx->cq_ev_fd = NULL;
9474 return ret;
9475 }
9476
9477 return 0;
9478}
9479
9480static int io_eventfd_unregister(struct io_ring_ctx *ctx)
9481{
9482 if (ctx->cq_ev_fd) {
9483 eventfd_ctx_put(ctx->cq_ev_fd);
9484 ctx->cq_ev_fd = NULL;
9485 return 0;
9486 }
9487
9488 return -ENXIO;
9489}
9490
Jens Axboe5a2e7452020-02-23 16:23:11 -07009491static void io_destroy_buffers(struct io_ring_ctx *ctx)
9492{
Jens Axboe9e15c3a2021-03-13 12:29:43 -07009493 struct io_buffer *buf;
9494 unsigned long index;
9495
Ye Bin2d447d32021-11-22 10:47:37 +08009496 xa_for_each(&ctx->io_buffers, index, buf)
Jens Axboe9e15c3a2021-03-13 12:29:43 -07009497 __io_remove_buffers(ctx, buf, index, -1U);
Jens Axboe5a2e7452020-02-23 16:23:11 -07009498}
9499
Pavel Begunkov72558342021-08-09 20:18:09 +01009500static void io_req_cache_free(struct list_head *list)
Jens Axboe1b4c3512021-02-10 00:03:19 +00009501{
Jens Axboe68e68ee2021-02-13 09:00:02 -07009502 struct io_kiocb *req, *nxt;
Jens Axboe1b4c3512021-02-10 00:03:19 +00009503
Pavel Begunkovbb943b82021-08-09 20:18:10 +01009504 list_for_each_entry_safe(req, nxt, list, inflight_entry) {
9505 list_del(&req->inflight_entry);
Jens Axboe1b4c3512021-02-10 00:03:19 +00009506 kmem_cache_free(req_cachep, req);
9507 }
9508}
9509
Jens Axboe4010fec2021-02-27 15:04:18 -07009510static void io_req_caches_free(struct io_ring_ctx *ctx)
Jens Axboe2b188cc2019-01-07 10:46:33 -07009511{
Pavel Begunkovcd0ca2e2021-08-09 20:18:11 +01009512 struct io_submit_state *state = &ctx->submit_state;
Pavel Begunkovbf019da2021-02-10 00:03:17 +00009513
Jens Axboe9a4fdbd2021-02-13 09:09:44 -07009514 mutex_lock(&ctx->uring_lock);
9515
Pavel Begunkovcd0ca2e2021-08-09 20:18:11 +01009516 if (state->free_reqs) {
9517 kmem_cache_free_bulk(req_cachep, state->free_reqs, state->reqs);
9518 state->free_reqs = 0;
Pavel Begunkov8e5c66c2021-02-22 11:45:55 +00009519 }
Jens Axboe9a4fdbd2021-02-13 09:09:44 -07009520
Pavel Begunkovcd0ca2e2021-08-09 20:18:11 +01009521 io_flush_cached_locked_reqs(ctx, state);
9522 io_req_cache_free(&state->free_list);
Jens Axboe9a4fdbd2021-02-13 09:09:44 -07009523 mutex_unlock(&ctx->uring_lock);
9524}
9525
Pavel Begunkov43597aa2021-08-10 02:44:23 +01009526static void io_wait_rsrc_data(struct io_rsrc_data *data)
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01009527{
Pavel Begunkov43597aa2021-08-10 02:44:23 +01009528 if (data && !atomic_dec_and_test(&data->refs))
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01009529 wait_for_completion(&data->done);
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01009530}
9531
Jens Axboe2b188cc2019-01-07 10:46:33 -07009532static void io_ring_ctx_free(struct io_ring_ctx *ctx)
9533{
Jens Axboe37d1e2e2021-02-17 21:03:43 -07009534 io_sq_thread_finish(ctx);
Jens Axboe2aede0e2020-09-14 10:45:53 -06009535
Pavel Begunkov43597aa2021-08-10 02:44:23 +01009536 /* __io_rsrc_put_work() may need uring_lock to progress, wait w/o it */
9537 io_wait_rsrc_data(ctx->buf_data);
9538 io_wait_rsrc_data(ctx->file_data);
9539
Hao Xu8bad28d2021-02-19 17:19:36 +08009540 mutex_lock(&ctx->uring_lock);
Pavel Begunkov43597aa2021-08-10 02:44:23 +01009541 if (ctx->buf_data)
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01009542 __io_sqe_buffers_unregister(ctx);
Pavel Begunkov43597aa2021-08-10 02:44:23 +01009543 if (ctx->file_data)
Pavel Begunkov08480402021-04-13 02:58:38 +01009544 __io_sqe_files_unregister(ctx);
Pavel Begunkovc4ea0602021-04-01 15:43:58 +01009545 if (ctx->rings)
9546 __io_cqring_overflow_flush(ctx, true);
Hao Xu8bad28d2021-02-19 17:19:36 +08009547 mutex_unlock(&ctx->uring_lock);
Jens Axboe9b402842019-04-11 11:45:41 -06009548 io_eventfd_unregister(ctx);
Jens Axboe5a2e7452020-02-23 16:23:11 -07009549 io_destroy_buffers(ctx);
Pavel Begunkov07db2982021-04-20 12:03:32 +01009550 if (ctx->sq_creds)
9551 put_cred(ctx->sq_creds);
Jens Axboedef596e2019-01-09 08:59:42 -07009552
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01009553 /* there are no registered resources left, nobody uses it */
9554 if (ctx->rsrc_node)
9555 io_rsrc_node_destroy(ctx->rsrc_node);
Pavel Begunkov8dd03af2021-03-19 17:22:36 +00009556 if (ctx->rsrc_backup_node)
Pavel Begunkovb895c9a2021-04-01 15:43:40 +01009557 io_rsrc_node_destroy(ctx->rsrc_backup_node);
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01009558 flush_delayed_work(&ctx->rsrc_put_work);
9559
9560 WARN_ON_ONCE(!list_empty(&ctx->rsrc_ref_list));
9561 WARN_ON_ONCE(!llist_empty(&ctx->rsrc_put_llist));
Jens Axboe2b188cc2019-01-07 10:46:33 -07009562
9563#if defined(CONFIG_UNIX)
Eric Biggers355e8d22019-06-12 14:58:43 -07009564 if (ctx->ring_sock) {
9565 ctx->ring_sock->file = NULL; /* so that iput() is called */
Jens Axboe2b188cc2019-01-07 10:46:33 -07009566 sock_release(ctx->ring_sock);
Eric Biggers355e8d22019-06-12 14:58:43 -07009567 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07009568#endif
Pavel Begunkovef9dd632021-08-28 19:54:38 -06009569 WARN_ON_ONCE(!list_empty(&ctx->ltimeout_list));
Jens Axboe2b188cc2019-01-07 10:46:33 -07009570
Pavel Begunkovcd148d42022-10-16 22:42:55 +01009571 if (ctx->mm_account) {
9572 mmdrop(ctx->mm_account);
9573 ctx->mm_account = NULL;
9574 }
9575
Hristo Venev75b28af2019-08-26 17:23:46 +00009576 io_mem_free(ctx->rings);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009577 io_mem_free(ctx->sq_sqes);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009578
9579 percpu_ref_exit(&ctx->refs);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009580 free_uid(ctx->user);
Jens Axboe4010fec2021-02-27 15:04:18 -07009581 io_req_caches_free(ctx);
Jens Axboee9418942021-02-19 12:33:30 -07009582 if (ctx->hash_map)
9583 io_wq_put_hash(ctx->hash_map);
Jens Axboe78076bb2019-12-04 19:56:40 -07009584 kfree(ctx->cancel_hash);
Pavel Begunkov62248432021-04-28 13:11:29 +01009585 kfree(ctx->dummy_ubuf);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009586 kfree(ctx);
9587}
9588
9589static __poll_t io_uring_poll(struct file *file, poll_table *wait)
9590{
9591 struct io_ring_ctx *ctx = file->private_data;
9592 __poll_t mask = 0;
9593
Pavel Begunkov311997b2021-06-14 23:37:28 +01009594 poll_wait(file, &ctx->poll_wait, wait);
Stefan Bühler4f7067c2019-04-24 23:54:17 +02009595 /*
9596 * synchronizes with barrier from wq_has_sleeper call in
9597 * io_commit_cqring
9598 */
Jens Axboe2b188cc2019-01-07 10:46:33 -07009599 smp_rmb();
Jens Axboe90554202020-09-03 12:12:41 -06009600 if (!io_sqring_full(ctx))
Jens Axboe2b188cc2019-01-07 10:46:33 -07009601 mask |= EPOLLOUT | EPOLLWRNORM;
Hao Xued670c32021-02-05 16:34:21 +08009602
9603 /*
9604 * Don't flush cqring overflow list here, just do a simple check.
9605 * Otherwise there could possible be ABBA deadlock:
9606 * CPU0 CPU1
9607 * ---- ----
9608 * lock(&ctx->uring_lock);
9609 * lock(&ep->mtx);
9610 * lock(&ctx->uring_lock);
9611 * lock(&ep->mtx);
9612 *
9613 * Users may get EPOLLIN meanwhile seeing nothing in cqring, this
9614 * pushs them to do the flush.
9615 */
Pavel Begunkov5ed7a372021-06-14 23:37:27 +01009616 if (io_cqring_events(ctx) || test_bit(0, &ctx->check_cq_overflow))
Jens Axboe2b188cc2019-01-07 10:46:33 -07009617 mask |= EPOLLIN | EPOLLRDNORM;
9618
9619 return mask;
9620}
9621
Yejune Deng0bead8c2020-12-24 11:02:20 +08009622static int io_unregister_personality(struct io_ring_ctx *ctx, unsigned id)
Jens Axboe071698e2020-01-28 10:04:42 -07009623{
Jens Axboe4379bf82021-02-15 13:40:22 -07009624 const struct cred *creds;
Jens Axboe071698e2020-01-28 10:04:42 -07009625
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +00009626 creds = xa_erase(&ctx->personalities, id);
Jens Axboe4379bf82021-02-15 13:40:22 -07009627 if (creds) {
9628 put_cred(creds);
Yejune Deng0bead8c2020-12-24 11:02:20 +08009629 return 0;
Jens Axboe1e6fa522020-10-15 08:46:24 -06009630 }
Yejune Deng0bead8c2020-12-24 11:02:20 +08009631
9632 return -EINVAL;
9633}
9634
Pavel Begunkovd56d9382021-03-06 11:02:13 +00009635struct io_tctx_exit {
9636 struct callback_head task_work;
9637 struct completion completion;
Pavel Begunkovbaf186c2021-03-06 11:02:15 +00009638 struct io_ring_ctx *ctx;
Pavel Begunkovd56d9382021-03-06 11:02:13 +00009639};
9640
9641static void io_tctx_exit_cb(struct callback_head *cb)
9642{
9643 struct io_uring_task *tctx = current->io_uring;
9644 struct io_tctx_exit *work;
9645
9646 work = container_of(cb, struct io_tctx_exit, task_work);
9647 /*
9648 * When @in_idle, we're in cancellation and it's racy to remove the
9649 * node. It'll be removed by the end of cancellation, just ignore it.
Harshit Mogalapallif8955112022-12-06 01:38:32 -08009650 * tctx can be NULL if the queueing of this task_work raced with
9651 * work cancelation off the exec path.
Pavel Begunkovd56d9382021-03-06 11:02:13 +00009652 */
Harshit Mogalapallif8955112022-12-06 01:38:32 -08009653 if (tctx && !atomic_read(&tctx->in_idle))
Pavel Begunkoveef51da2021-06-14 02:36:15 +01009654 io_uring_del_tctx_node((unsigned long)work->ctx);
Pavel Begunkovd56d9382021-03-06 11:02:13 +00009655 complete(&work->completion);
9656}
9657
Pavel Begunkov28090c12021-04-25 23:34:45 +01009658static bool io_cancel_ctx_cb(struct io_wq_work *work, void *data)
9659{
9660 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
9661
9662 return req->ctx == data;
9663}
9664
Jens Axboe85faa7b2020-04-09 18:14:00 -06009665static void io_ring_exit_work(struct work_struct *work)
9666{
Pavel Begunkovd56d9382021-03-06 11:02:13 +00009667 struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx, exit_work);
Pavel Begunkovb5bb3a22021-03-06 11:02:16 +00009668 unsigned long timeout = jiffies + HZ * 60 * 5;
Pavel Begunkov58d3be22021-08-09 13:04:17 +01009669 unsigned long interval = HZ / 20;
Pavel Begunkovd56d9382021-03-06 11:02:13 +00009670 struct io_tctx_exit exit;
9671 struct io_tctx_node *node;
9672 int ret;
Jens Axboe85faa7b2020-04-09 18:14:00 -06009673
Jens Axboe56952e92020-06-17 15:00:04 -06009674 /*
9675 * If we're doing polled IO and end up having requests being
9676 * submitted async (out-of-line), then completions can come in while
9677 * we're waiting for refs to drop. We need to reap these manually,
9678 * as nobody else will be looking for them.
9679 */
Pavel Begunkovb2edc0a2020-07-07 16:36:22 +03009680 do {
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01009681 io_uring_try_cancel_requests(ctx, NULL, true);
Pavel Begunkov28090c12021-04-25 23:34:45 +01009682 if (ctx->sq_data) {
9683 struct io_sq_data *sqd = ctx->sq_data;
9684 struct task_struct *tsk;
9685
9686 io_sq_thread_park(sqd);
9687 tsk = sqd->thread;
9688 if (tsk && tsk->io_uring && tsk->io_uring->io_wq)
9689 io_wq_cancel_cb(tsk->io_uring->io_wq,
9690 io_cancel_ctx_cb, ctx, true);
9691 io_sq_thread_unpark(sqd);
9692 }
Pavel Begunkovb5bb3a22021-03-06 11:02:16 +00009693
Pavel Begunkov58d3be22021-08-09 13:04:17 +01009694 if (WARN_ON_ONCE(time_after(jiffies, timeout))) {
9695 /* there is little hope left, don't run it too often */
9696 interval = HZ * 60;
9697 }
9698 } while (!wait_for_completion_timeout(&ctx->ref_comp, interval));
Pavel Begunkovd56d9382021-03-06 11:02:13 +00009699
Pavel Begunkov7f006512021-04-14 13:38:34 +01009700 init_completion(&exit.completion);
9701 init_task_work(&exit.task_work, io_tctx_exit_cb);
9702 exit.ctx = ctx;
Pavel Begunkov89b50662021-04-01 15:43:50 +01009703 /*
9704 * Some may use context even when all refs and requests have been put,
9705 * and they are free to do so while still holding uring_lock or
Pavel Begunkov5b0a6ac2021-06-30 21:54:04 +01009706 * completion_lock, see io_req_task_submit(). Apart from other work,
Pavel Begunkov89b50662021-04-01 15:43:50 +01009707 * this lock/unlock section also waits them to finish.
9708 */
Pavel Begunkovd56d9382021-03-06 11:02:13 +00009709 mutex_lock(&ctx->uring_lock);
9710 while (!list_empty(&ctx->tctx_list)) {
Pavel Begunkovb5bb3a22021-03-06 11:02:16 +00009711 WARN_ON_ONCE(time_after(jiffies, timeout));
9712
Pavel Begunkovd56d9382021-03-06 11:02:13 +00009713 node = list_first_entry(&ctx->tctx_list, struct io_tctx_node,
9714 ctx_node);
Pavel Begunkov7f006512021-04-14 13:38:34 +01009715 /* don't spin on a single task if cancellation failed */
9716 list_rotate_left(&ctx->tctx_list);
Pavel Begunkovd56d9382021-03-06 11:02:13 +00009717 ret = task_work_add(node->task, &exit.task_work, TWA_SIGNAL);
9718 if (WARN_ON_ONCE(ret))
9719 continue;
9720 wake_up_process(node->task);
9721
9722 mutex_unlock(&ctx->uring_lock);
9723 wait_for_completion(&exit.completion);
Pavel Begunkovd56d9382021-03-06 11:02:13 +00009724 mutex_lock(&ctx->uring_lock);
9725 }
9726 mutex_unlock(&ctx->uring_lock);
Jens Axboe79ebeae2021-08-10 15:18:27 -06009727 spin_lock(&ctx->completion_lock);
9728 spin_unlock(&ctx->completion_lock);
Pavel Begunkovd56d9382021-03-06 11:02:13 +00009729
Jens Axboe85faa7b2020-04-09 18:14:00 -06009730 io_ring_ctx_free(ctx);
9731}
9732
Pavel Begunkov80c4cbd2021-03-25 18:32:43 +00009733/* Returns true if we found and killed one or more timeouts */
9734static bool io_kill_timeouts(struct io_ring_ctx *ctx, struct task_struct *tsk,
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01009735 bool cancel_all)
Pavel Begunkov80c4cbd2021-03-25 18:32:43 +00009736{
9737 struct io_kiocb *req, *tmp;
9738 int canceled = 0;
9739
Jens Axboe79ebeae2021-08-10 15:18:27 -06009740 spin_lock(&ctx->completion_lock);
9741 spin_lock_irq(&ctx->timeout_lock);
Pavel Begunkov80c4cbd2021-03-25 18:32:43 +00009742 list_for_each_entry_safe(req, tmp, &ctx->timeout_list, timeout.list) {
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01009743 if (io_match_task(req, tsk, cancel_all)) {
Pavel Begunkov80c4cbd2021-03-25 18:32:43 +00009744 io_kill_timeout(req, -ECANCELED);
9745 canceled++;
9746 }
9747 }
Jens Axboe79ebeae2021-08-10 15:18:27 -06009748 spin_unlock_irq(&ctx->timeout_lock);
Pavel Begunkov51520422021-03-29 11:39:29 +01009749 if (canceled != 0)
9750 io_commit_cqring(ctx);
Jens Axboe79ebeae2021-08-10 15:18:27 -06009751 spin_unlock(&ctx->completion_lock);
Pavel Begunkov80c4cbd2021-03-25 18:32:43 +00009752 if (canceled != 0)
9753 io_cqring_ev_posted(ctx);
9754 return canceled != 0;
9755}
9756
Jens Axboe2b188cc2019-01-07 10:46:33 -07009757static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
9758{
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +00009759 unsigned long index;
9760 struct creds *creds;
9761
Jens Axboe2b188cc2019-01-07 10:46:33 -07009762 mutex_lock(&ctx->uring_lock);
9763 percpu_ref_kill(&ctx->refs);
Pavel Begunkov634578f2020-12-06 22:22:44 +00009764 if (ctx->rings)
Pavel Begunkov6c2450a2021-02-23 12:40:22 +00009765 __io_cqring_overflow_flush(ctx, true);
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +00009766 xa_for_each(&ctx->personalities, index, creds)
9767 io_unregister_personality(ctx, index);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009768 mutex_unlock(&ctx->uring_lock);
9769
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01009770 io_kill_timeouts(ctx, NULL, true);
9771 io_poll_remove_all(ctx, NULL, true);
Jens Axboe561fb042019-10-24 07:25:42 -06009772
Jens Axboe15dff282019-11-13 09:09:23 -07009773 /* if we failed setting up the ctx, we might not have any rings */
Pavel Begunkovb2edc0a2020-07-07 16:36:22 +03009774 io_iopoll_try_reap_events(ctx);
Jens Axboe309fc032020-07-10 09:13:34 -06009775
Jens Axboe86e2d692023-01-21 12:36:08 -07009776 /* drop cached put refs after potentially doing completions */
9777 if (current->io_uring)
9778 io_uring_drop_tctx_refs(current);
9779
Jens Axboe85faa7b2020-04-09 18:14:00 -06009780 INIT_WORK(&ctx->exit_work, io_ring_exit_work);
Jens Axboefc666772020-08-19 11:10:51 -06009781 /*
9782 * Use system_unbound_wq to avoid spawning tons of event kworkers
9783 * if we're exiting a ton of rings at the same time. It just adds
9784 * noise and overhead, there's no discernable change in runtime
9785 * over using system_wq.
9786 */
9787 queue_work(system_unbound_wq, &ctx->exit_work);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009788}
9789
9790static int io_uring_release(struct inode *inode, struct file *file)
9791{
9792 struct io_ring_ctx *ctx = file->private_data;
9793
9794 file->private_data = NULL;
9795 io_ring_ctx_wait_and_kill(ctx);
9796 return 0;
9797}
9798
Pavel Begunkovf6edbab2020-11-06 13:00:26 +00009799struct io_task_cancel {
9800 struct task_struct *task;
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01009801 bool all;
Pavel Begunkovf6edbab2020-11-06 13:00:26 +00009802};
Pavel Begunkov67c4d9e2020-06-15 10:24:05 +03009803
Pavel Begunkovf6edbab2020-11-06 13:00:26 +00009804static bool io_cancel_task_cb(struct io_wq_work *work, void *data)
Jens Axboeb711d4e2020-08-16 08:23:05 -07009805{
Pavel Begunkov9a472ef2020-11-05 22:31:37 +00009806 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
Pavel Begunkovf6edbab2020-11-06 13:00:26 +00009807 struct io_task_cancel *cancel = data;
Pavel Begunkov9a472ef2020-11-05 22:31:37 +00009808
Pavel Begunkov1c939a52021-11-26 14:38:15 +00009809 return io_match_task_safe(req, cancel->task, cancel->all);
Jens Axboeb711d4e2020-08-16 08:23:05 -07009810}
9811
Pavel Begunkove1915f72021-03-11 23:29:35 +00009812static bool io_cancel_defer_files(struct io_ring_ctx *ctx,
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01009813 struct task_struct *task, bool cancel_all)
Pavel Begunkovb7ddce32020-09-06 00:45:14 +03009814{
Pavel Begunkove1915f72021-03-11 23:29:35 +00009815 struct io_defer_entry *de;
Pavel Begunkovb7ddce32020-09-06 00:45:14 +03009816 LIST_HEAD(list);
9817
Jens Axboe79ebeae2021-08-10 15:18:27 -06009818 spin_lock(&ctx->completion_lock);
Pavel Begunkovb7ddce32020-09-06 00:45:14 +03009819 list_for_each_entry_reverse(de, &ctx->defer_list, list) {
Pavel Begunkov1c939a52021-11-26 14:38:15 +00009820 if (io_match_task_safe(de->req, task, cancel_all)) {
Pavel Begunkovb7ddce32020-09-06 00:45:14 +03009821 list_cut_position(&list, &ctx->defer_list, &de->list);
9822 break;
9823 }
9824 }
Jens Axboe79ebeae2021-08-10 15:18:27 -06009825 spin_unlock(&ctx->completion_lock);
Pavel Begunkove1915f72021-03-11 23:29:35 +00009826 if (list_empty(&list))
9827 return false;
Pavel Begunkovb7ddce32020-09-06 00:45:14 +03009828
9829 while (!list_empty(&list)) {
9830 de = list_first_entry(&list, struct io_defer_entry, list);
9831 list_del_init(&de->list);
Pavel Begunkovf41db2732021-02-28 22:35:12 +00009832 io_req_complete_failed(de->req, -ECANCELED);
Pavel Begunkovb7ddce32020-09-06 00:45:14 +03009833 kfree(de);
9834 }
Pavel Begunkove1915f72021-03-11 23:29:35 +00009835 return true;
Pavel Begunkovb7ddce32020-09-06 00:45:14 +03009836}
9837
Pavel Begunkov1b007642021-03-06 11:02:17 +00009838static bool io_uring_try_cancel_iowq(struct io_ring_ctx *ctx)
9839{
9840 struct io_tctx_node *node;
9841 enum io_wq_cancel cret;
9842 bool ret = false;
9843
9844 mutex_lock(&ctx->uring_lock);
9845 list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
9846 struct io_uring_task *tctx = node->task->io_uring;
9847
9848 /*
9849 * io_wq will stay alive while we hold uring_lock, because it's
9850 * killed after ctx nodes, which requires to take the lock.
9851 */
9852 if (!tctx || !tctx->io_wq)
9853 continue;
9854 cret = io_wq_cancel_cb(tctx->io_wq, io_cancel_ctx_cb, ctx, true);
9855 ret |= (cret != IO_WQ_CANCEL_NOTFOUND);
9856 }
9857 mutex_unlock(&ctx->uring_lock);
9858
9859 return ret;
9860}
9861
Pavel Begunkov9936c7c2021-02-04 13:51:56 +00009862static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
9863 struct task_struct *task,
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01009864 bool cancel_all)
Pavel Begunkov9936c7c2021-02-04 13:51:56 +00009865{
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01009866 struct io_task_cancel cancel = { .task = task, .all = cancel_all, };
Pavel Begunkov1b007642021-03-06 11:02:17 +00009867 struct io_uring_task *tctx = task ? task->io_uring : NULL;
Pavel Begunkov9936c7c2021-02-04 13:51:56 +00009868
9869 while (1) {
9870 enum io_wq_cancel cret;
9871 bool ret = false;
9872
Pavel Begunkov1b007642021-03-06 11:02:17 +00009873 if (!task) {
9874 ret |= io_uring_try_cancel_iowq(ctx);
9875 } else if (tctx && tctx->io_wq) {
9876 /*
9877 * Cancels requests of all rings, not only @ctx, but
9878 * it's fine as the task is in exit/exec.
9879 */
Jens Axboe5aa75ed2021-02-16 12:56:50 -07009880 cret = io_wq_cancel_cb(tctx->io_wq, io_cancel_task_cb,
Pavel Begunkov9936c7c2021-02-04 13:51:56 +00009881 &cancel, true);
9882 ret |= (cret != IO_WQ_CANCEL_NOTFOUND);
9883 }
9884
9885 /* SQPOLL thread does its own polling */
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01009886 if ((!(ctx->flags & IORING_SETUP_SQPOLL) && cancel_all) ||
Jens Axboed052d1d2021-03-11 10:49:20 -07009887 (ctx->sq_data && ctx->sq_data->thread == current)) {
Pavel Begunkov9936c7c2021-02-04 13:51:56 +00009888 while (!list_empty_careful(&ctx->iopoll_list)) {
9889 io_iopoll_try_reap_events(ctx);
9890 ret = true;
Jens Axboeabd54d82023-03-06 13:18:27 -07009891 cond_resched();
Pavel Begunkov9936c7c2021-02-04 13:51:56 +00009892 }
9893 }
9894
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01009895 ret |= io_cancel_defer_files(ctx, task, cancel_all);
9896 ret |= io_poll_remove_all(ctx, task, cancel_all);
9897 ret |= io_kill_timeouts(ctx, task, cancel_all);
Pavel Begunkove5dc4802021-06-26 21:40:46 +01009898 if (task)
9899 ret |= io_run_task_work();
Pavel Begunkov9936c7c2021-02-04 13:51:56 +00009900 if (!ret)
9901 break;
9902 cond_resched();
9903 }
9904}
9905
Pavel Begunkoveef51da2021-06-14 02:36:15 +01009906static int __io_uring_add_tctx_node(struct io_ring_ctx *ctx)
Jens Axboe0f212202020-09-13 13:09:39 -06009907{
Matthew Wilcox (Oracle)236434c2020-10-09 13:49:52 +01009908 struct io_uring_task *tctx = current->io_uring;
Pavel Begunkov13bf43f2021-03-06 11:02:12 +00009909 struct io_tctx_node *node;
Pavel Begunkova528b042020-12-21 18:34:04 +00009910 int ret;
Matthew Wilcox (Oracle)236434c2020-10-09 13:49:52 +01009911
9912 if (unlikely(!tctx)) {
Jens Axboe5aa75ed2021-02-16 12:56:50 -07009913 ret = io_uring_alloc_task_context(current, ctx);
Jens Axboe0f212202020-09-13 13:09:39 -06009914 if (unlikely(ret))
9915 return ret;
Pavel Begunkove139a1e2021-10-19 23:43:46 +01009916
Matthew Wilcox (Oracle)236434c2020-10-09 13:49:52 +01009917 tctx = current->io_uring;
Pavel Begunkove139a1e2021-10-19 23:43:46 +01009918 if (ctx->iowq_limits_set) {
9919 unsigned int limits[2] = { ctx->iowq_limits[0],
9920 ctx->iowq_limits[1], };
9921
9922 ret = io_wq_max_workers(tctx->io_wq, limits);
9923 if (ret)
9924 return ret;
9925 }
Jens Axboe0f212202020-09-13 13:09:39 -06009926 }
Pavel Begunkovcf27f3b2021-03-19 17:22:31 +00009927 if (!xa_load(&tctx->xa, (unsigned long)ctx)) {
9928 node = kmalloc(sizeof(*node), GFP_KERNEL);
9929 if (!node)
9930 return -ENOMEM;
9931 node->ctx = ctx;
9932 node->task = current;
Jens Axboe0f212202020-09-13 13:09:39 -06009933
Pavel Begunkovcf27f3b2021-03-19 17:22:31 +00009934 ret = xa_err(xa_store(&tctx->xa, (unsigned long)ctx,
9935 node, GFP_KERNEL));
9936 if (ret) {
9937 kfree(node);
9938 return ret;
Jens Axboe0f212202020-09-13 13:09:39 -06009939 }
Pavel Begunkovcf27f3b2021-03-19 17:22:31 +00009940
9941 mutex_lock(&ctx->uring_lock);
9942 list_add(&node->ctx_node, &ctx->tctx_list);
9943 mutex_unlock(&ctx->uring_lock);
Jens Axboe0f212202020-09-13 13:09:39 -06009944 }
Pavel Begunkovcf27f3b2021-03-19 17:22:31 +00009945 tctx->last = ctx;
Jens Axboe0f212202020-09-13 13:09:39 -06009946 return 0;
9947}
9948
9949/*
Pavel Begunkovcf27f3b2021-03-19 17:22:31 +00009950 * Note that this task has used io_uring. We use it for cancelation purposes.
9951 */
Pavel Begunkoveef51da2021-06-14 02:36:15 +01009952static inline int io_uring_add_tctx_node(struct io_ring_ctx *ctx)
Pavel Begunkovcf27f3b2021-03-19 17:22:31 +00009953{
9954 struct io_uring_task *tctx = current->io_uring;
9955
9956 if (likely(tctx && tctx->last == ctx))
9957 return 0;
Pavel Begunkoveef51da2021-06-14 02:36:15 +01009958 return __io_uring_add_tctx_node(ctx);
Pavel Begunkovcf27f3b2021-03-19 17:22:31 +00009959}
9960
9961/*
Jens Axboe0f212202020-09-13 13:09:39 -06009962 * Remove this io_uring_file -> task mapping.
9963 */
Pavel Begunkoveef51da2021-06-14 02:36:15 +01009964static void io_uring_del_tctx_node(unsigned long index)
Jens Axboe0f212202020-09-13 13:09:39 -06009965{
9966 struct io_uring_task *tctx = current->io_uring;
Pavel Begunkov13bf43f2021-03-06 11:02:12 +00009967 struct io_tctx_node *node;
Pavel Begunkov29412672021-03-06 11:02:11 +00009968
Pavel Begunkoveebd2e32021-03-06 11:02:14 +00009969 if (!tctx)
9970 return;
Pavel Begunkov13bf43f2021-03-06 11:02:12 +00009971 node = xa_erase(&tctx->xa, index);
9972 if (!node)
Pavel Begunkov29412672021-03-06 11:02:11 +00009973 return;
Jens Axboe0f212202020-09-13 13:09:39 -06009974
Pavel Begunkov13bf43f2021-03-06 11:02:12 +00009975 WARN_ON_ONCE(current != node->task);
9976 WARN_ON_ONCE(list_empty(&node->ctx_node));
9977
9978 mutex_lock(&node->ctx->uring_lock);
9979 list_del(&node->ctx_node);
9980 mutex_unlock(&node->ctx->uring_lock);
9981
Pavel Begunkovbaf186c2021-03-06 11:02:15 +00009982 if (tctx->last == node->ctx)
Jens Axboe0f212202020-09-13 13:09:39 -06009983 tctx->last = NULL;
Pavel Begunkov13bf43f2021-03-06 11:02:12 +00009984 kfree(node);
Jens Axboe0f212202020-09-13 13:09:39 -06009985}
9986
Pavel Begunkov8452d4a2021-02-27 11:16:46 +00009987static void io_uring_clean_tctx(struct io_uring_task *tctx)
Pavel Begunkovde7f1d92021-01-04 20:43:29 +00009988{
Pavel Begunkovba5ef6d2021-05-20 13:21:20 +01009989 struct io_wq *wq = tctx->io_wq;
Pavel Begunkov13bf43f2021-03-06 11:02:12 +00009990 struct io_tctx_node *node;
Pavel Begunkovde7f1d92021-01-04 20:43:29 +00009991 unsigned long index;
9992
Jens Axboe8bab4c02021-09-24 07:12:27 -06009993 xa_for_each(&tctx->xa, index, node) {
Pavel Begunkoveef51da2021-06-14 02:36:15 +01009994 io_uring_del_tctx_node(index);
Jens Axboe8bab4c02021-09-24 07:12:27 -06009995 cond_resched();
9996 }
Marco Elverb16ef422021-05-27 11:25:48 +02009997 if (wq) {
9998 /*
9999 * Must be after io_uring_del_task_file() (removes nodes under
10000 * uring_lock) to avoid race with io_uring_try_cancel_iowq().
10001 */
Pavel Begunkovba5ef6d2021-05-20 13:21:20 +010010002 io_wq_put_and_exit(wq);
Pavel Begunkovdadebc32021-08-23 13:30:44 +010010003 tctx->io_wq = NULL;
Marco Elverb16ef422021-05-27 11:25:48 +020010004 }
Pavel Begunkovde7f1d92021-01-04 20:43:29 +000010005}
10006
Pavel Begunkov3f48cf12021-04-11 01:46:27 +010010007static s64 tctx_inflight(struct io_uring_task *tctx, bool tracked)
Pavel Begunkov521d6a72021-03-11 23:29:38 +000010008{
Pavel Begunkov3f48cf12021-04-11 01:46:27 +010010009 if (tracked)
10010 return atomic_read(&tctx->inflight_tracked);
Pavel Begunkov521d6a72021-03-11 23:29:38 +000010011 return percpu_counter_sum(&tctx->inflight);
10012}
10013
Pavel Begunkov78cc6872021-06-14 02:36:23 +010010014/*
10015 * Find any io_uring ctx that this task has registered or done IO on, and cancel
Jens Axboe8e129762021-12-09 08:54:29 -070010016 * requests. @sqd should be not-null IFF it's an SQPOLL thread cancellation.
Pavel Begunkov78cc6872021-06-14 02:36:23 +010010017 */
10018static void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd)
Pavel Begunkov0e9ddb32021-02-07 22:34:26 +000010019{
Pavel Begunkov521d6a72021-03-11 23:29:38 +000010020 struct io_uring_task *tctx = current->io_uring;
Pavel Begunkov734551d2021-04-18 14:52:09 +010010021 struct io_ring_ctx *ctx;
Jens Axboefdaf0832020-10-30 09:37:30 -060010022 s64 inflight;
Pavel Begunkov0e9ddb32021-02-07 22:34:26 +000010023 DEFINE_WAIT(wait);
Jens Axboefdaf0832020-10-30 09:37:30 -060010024
Pavel Begunkov78cc6872021-06-14 02:36:23 +010010025 WARN_ON_ONCE(sqd && sqd->thread != current);
10026
Palash Oswal6d042ff2021-04-27 18:21:49 +053010027 if (!current->io_uring)
10028 return;
Pavel Begunkov17a91052021-05-23 15:48:39 +010010029 if (tctx->io_wq)
10030 io_wq_exit_start(tctx->io_wq);
10031
Jens Axboefdaf0832020-10-30 09:37:30 -060010032 atomic_inc(&tctx->in_idle);
Jens Axboed8a6df12020-10-15 16:24:45 -060010033 do {
Pavel Begunkove9dbe222021-08-09 13:04:20 +010010034 io_uring_drop_tctx_refs(current);
Jens Axboe0f212202020-09-13 13:09:39 -060010035 /* read completions before cancelations */
Pavel Begunkov3dd0c972021-05-16 22:58:04 +010010036 inflight = tctx_inflight(tctx, !cancel_all);
Jens Axboed8a6df12020-10-15 16:24:45 -060010037 if (!inflight)
10038 break;
Jens Axboe0f212202020-09-13 13:09:39 -060010039
Pavel Begunkov78cc6872021-06-14 02:36:23 +010010040 if (!sqd) {
10041 struct io_tctx_node *node;
10042 unsigned long index;
10043
10044 xa_for_each(&tctx->xa, index, node) {
10045 /* sqpoll task will cancel all its requests */
10046 if (node->ctx->sq_data)
10047 continue;
10048 io_uring_try_cancel_requests(node->ctx, current,
10049 cancel_all);
10050 }
10051 } else {
10052 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
10053 io_uring_try_cancel_requests(ctx, current,
10054 cancel_all);
10055 }
10056
Jens Axboe8e129762021-12-09 08:54:29 -070010057 prepare_to_wait(&tctx->wait, &wait, TASK_INTERRUPTIBLE);
10058 io_run_task_work();
Pavel Begunkove9dbe222021-08-09 13:04:20 +010010059 io_uring_drop_tctx_refs(current);
Jens Axboe8e129762021-12-09 08:54:29 -070010060
Jens Axboe0f212202020-09-13 13:09:39 -060010061 /*
Pavel Begunkova1bb3cd2021-01-26 15:28:26 +000010062 * If we've seen completions, retry without waiting. This
10063 * avoids a race where a completion comes in before we did
10064 * prepare_to_wait().
Jens Axboe0f212202020-09-13 13:09:39 -060010065 */
Pavel Begunkov3dd0c972021-05-16 22:58:04 +010010066 if (inflight == tctx_inflight(tctx, !cancel_all))
Pavel Begunkova1bb3cd2021-01-26 15:28:26 +000010067 schedule();
Pavel Begunkovf57555e2020-12-20 13:21:44 +000010068 finish_wait(&tctx->wait, &wait);
Jens Axboed8a6df12020-10-15 16:24:45 -060010069 } while (1);
Pavel Begunkovde7f1d92021-01-04 20:43:29 +000010070
Pavel Begunkov8452d4a2021-02-27 11:16:46 +000010071 io_uring_clean_tctx(tctx);
Pavel Begunkov3dd0c972021-05-16 22:58:04 +010010072 if (cancel_all) {
Pavel Begunkovb168b1a2022-01-09 00:53:22 +000010073 /*
10074 * We shouldn't run task_works after cancel, so just leave
10075 * ->in_idle set for normal exit.
10076 */
10077 atomic_dec(&tctx->in_idle);
Pavel Begunkov3f48cf12021-04-11 01:46:27 +010010078 /* for exec all current's requests should be gone, kill tctx */
10079 __io_uring_free(current);
10080 }
Pavel Begunkov44e728b2020-06-15 10:24:04 +030010081}
10082
Hao Xuf552a272021-08-12 12:14:35 +080010083void __io_uring_cancel(bool cancel_all)
Pavel Begunkov78cc6872021-06-14 02:36:23 +010010084{
Hao Xuf552a272021-08-12 12:14:35 +080010085 io_uring_cancel_generic(cancel_all, NULL);
Pavel Begunkov78cc6872021-06-14 02:36:23 +010010086}
10087
Roman Penyaev6c5c2402019-11-28 12:53:22 +010010088static void *io_uring_validate_mmap_request(struct file *file,
10089 loff_t pgoff, size_t sz)
Jens Axboe2b188cc2019-01-07 10:46:33 -070010090{
Jens Axboe2b188cc2019-01-07 10:46:33 -070010091 struct io_ring_ctx *ctx = file->private_data;
Roman Penyaev6c5c2402019-11-28 12:53:22 +010010092 loff_t offset = pgoff << PAGE_SHIFT;
Jens Axboe2b188cc2019-01-07 10:46:33 -070010093 struct page *page;
10094 void *ptr;
10095
10096 switch (offset) {
10097 case IORING_OFF_SQ_RING:
Hristo Venev75b28af2019-08-26 17:23:46 +000010098 case IORING_OFF_CQ_RING:
10099 ptr = ctx->rings;
Jens Axboe2b188cc2019-01-07 10:46:33 -070010100 break;
10101 case IORING_OFF_SQES:
10102 ptr = ctx->sq_sqes;
10103 break;
Jens Axboe2b188cc2019-01-07 10:46:33 -070010104 default:
Roman Penyaev6c5c2402019-11-28 12:53:22 +010010105 return ERR_PTR(-EINVAL);
Jens Axboe2b188cc2019-01-07 10:46:33 -070010106 }
10107
10108 page = virt_to_head_page(ptr);
Matthew Wilcox (Oracle)a50b8542019-09-23 15:34:25 -070010109 if (sz > page_size(page))
Roman Penyaev6c5c2402019-11-28 12:53:22 +010010110 return ERR_PTR(-EINVAL);
10111
10112 return ptr;
10113}
10114
10115#ifdef CONFIG_MMU
10116
10117static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
10118{
10119 size_t sz = vma->vm_end - vma->vm_start;
10120 unsigned long pfn;
10121 void *ptr;
10122
10123 ptr = io_uring_validate_mmap_request(file, vma->vm_pgoff, sz);
10124 if (IS_ERR(ptr))
10125 return PTR_ERR(ptr);
Jens Axboe2b188cc2019-01-07 10:46:33 -070010126
10127 pfn = virt_to_phys(ptr) >> PAGE_SHIFT;
10128 return remap_pfn_range(vma, vma->vm_start, pfn, sz, vma->vm_page_prot);
10129}
10130
Roman Penyaev6c5c2402019-11-28 12:53:22 +010010131#else /* !CONFIG_MMU */
10132
10133static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
10134{
10135 return vma->vm_flags & (VM_SHARED | VM_MAYSHARE) ? 0 : -EINVAL;
10136}
10137
10138static unsigned int io_uring_nommu_mmap_capabilities(struct file *file)
10139{
10140 return NOMMU_MAP_DIRECT | NOMMU_MAP_READ | NOMMU_MAP_WRITE;
10141}
10142
10143static unsigned long io_uring_nommu_get_unmapped_area(struct file *file,
10144 unsigned long addr, unsigned long len,
10145 unsigned long pgoff, unsigned long flags)
10146{
10147 void *ptr;
10148
10149 ptr = io_uring_validate_mmap_request(file, pgoff, len);
10150 if (IS_ERR(ptr))
10151 return PTR_ERR(ptr);
10152
10153 return (unsigned long) ptr;
10154}
10155
10156#endif /* !CONFIG_MMU */
10157
Pavel Begunkovd9d05212021-01-08 20:57:25 +000010158static int io_sqpoll_wait_sq(struct io_ring_ctx *ctx)
Jens Axboe90554202020-09-03 12:12:41 -060010159{
10160 DEFINE_WAIT(wait);
10161
10162 do {
10163 if (!io_sqring_full(ctx))
10164 break;
Jens Axboe90554202020-09-03 12:12:41 -060010165 prepare_to_wait(&ctx->sqo_sq_wait, &wait, TASK_INTERRUPTIBLE);
10166
10167 if (!io_sqring_full(ctx))
10168 break;
Jens Axboe90554202020-09-03 12:12:41 -060010169 schedule();
10170 } while (!signal_pending(current));
10171
10172 finish_wait(&ctx->sqo_sq_wait, &wait);
Yang Li51993282021-03-09 14:30:41 +080010173 return 0;
Jens Axboe90554202020-09-03 12:12:41 -060010174}
10175
Hao Xuc73ebb62020-11-03 10:54:37 +080010176static int io_get_ext_arg(unsigned flags, const void __user *argp, size_t *argsz,
10177 struct __kernel_timespec __user **ts,
10178 const sigset_t __user **sig)
10179{
10180 struct io_uring_getevents_arg arg;
10181
10182 /*
10183 * If EXT_ARG isn't set, then we have no timespec and the argp pointer
10184 * is just a pointer to the sigset_t.
10185 */
10186 if (!(flags & IORING_ENTER_EXT_ARG)) {
10187 *sig = (const sigset_t __user *) argp;
10188 *ts = NULL;
10189 return 0;
10190 }
10191
10192 /*
10193 * EXT_ARG is set - ensure we agree on the size of it and copy in our
10194 * timespec and sigset_t pointers if good.
10195 */
10196 if (*argsz != sizeof(arg))
10197 return -EINVAL;
10198 if (copy_from_user(&arg, argp, sizeof(arg)))
10199 return -EFAULT;
Dylan Yudaken99475482022-04-12 09:30:42 -070010200 if (arg.pad)
10201 return -EINVAL;
Hao Xuc73ebb62020-11-03 10:54:37 +080010202 *sig = u64_to_user_ptr(arg.sigmask);
10203 *argsz = arg.sigmask_sz;
10204 *ts = u64_to_user_ptr(arg.ts);
10205 return 0;
10206}
10207
Jens Axboe2b188cc2019-01-07 10:46:33 -070010208SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
Hao Xuc73ebb62020-11-03 10:54:37 +080010209 u32, min_complete, u32, flags, const void __user *, argp,
10210 size_t, argsz)
Jens Axboe2b188cc2019-01-07 10:46:33 -070010211{
10212 struct io_ring_ctx *ctx;
Jens Axboe2b188cc2019-01-07 10:46:33 -070010213 int submitted = 0;
10214 struct fd f;
Pavel Begunkov33f993d2021-03-19 17:22:30 +000010215 long ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -070010216
Jens Axboe4c6e2772020-07-01 11:29:10 -060010217 io_run_task_work();
Jens Axboeb41e9852020-02-17 09:52:41 -070010218
Pavel Begunkov33f993d2021-03-19 17:22:30 +000010219 if (unlikely(flags & ~(IORING_ENTER_GETEVENTS | IORING_ENTER_SQ_WAKEUP |
10220 IORING_ENTER_SQ_WAIT | IORING_ENTER_EXT_ARG)))
Jens Axboe2b188cc2019-01-07 10:46:33 -070010221 return -EINVAL;
10222
10223 f = fdget(fd);
Pavel Begunkov33f993d2021-03-19 17:22:30 +000010224 if (unlikely(!f.file))
Jens Axboe2b188cc2019-01-07 10:46:33 -070010225 return -EBADF;
10226
10227 ret = -EOPNOTSUPP;
Pavel Begunkov33f993d2021-03-19 17:22:30 +000010228 if (unlikely(f.file->f_op != &io_uring_fops))
Jens Axboe2b188cc2019-01-07 10:46:33 -070010229 goto out_fput;
10230
10231 ret = -ENXIO;
10232 ctx = f.file->private_data;
Pavel Begunkov33f993d2021-03-19 17:22:30 +000010233 if (unlikely(!percpu_ref_tryget(&ctx->refs)))
Jens Axboe2b188cc2019-01-07 10:46:33 -070010234 goto out_fput;
10235
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +020010236 ret = -EBADFD;
Pavel Begunkov33f993d2021-03-19 17:22:30 +000010237 if (unlikely(ctx->flags & IORING_SETUP_R_DISABLED))
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +020010238 goto out;
10239
Jens Axboe6c271ce2019-01-10 11:22:30 -070010240 /*
10241 * For SQ polling, the thread will do all submissions and completions.
10242 * Just return the requested submit count, and wake the thread if
10243 * we were asked to.
10244 */
Jens Axboeb2a9ead2019-09-12 14:19:16 -060010245 ret = 0;
Jens Axboe6c271ce2019-01-10 11:22:30 -070010246 if (ctx->flags & IORING_SETUP_SQPOLL) {
Pavel Begunkov90f67362021-08-09 20:18:12 +010010247 io_cqring_overflow_flush(ctx);
Pavel Begunkov89448c42020-12-17 00:24:39 +000010248
Jens Axboe21f96522021-08-14 09:04:40 -060010249 if (unlikely(ctx->sq_data->thread == NULL)) {
10250 ret = -EOWNERDEAD;
Stefan Metzmacher04147482021-03-07 11:54:29 +010010251 goto out;
Jens Axboe21f96522021-08-14 09:04:40 -060010252 }
Jens Axboe6c271ce2019-01-10 11:22:30 -070010253 if (flags & IORING_ENTER_SQ_WAKEUP)
Jens Axboe534ca6d2020-09-02 13:52:19 -060010254 wake_up(&ctx->sq_data->wait);
Pavel Begunkovd9d05212021-01-08 20:57:25 +000010255 if (flags & IORING_ENTER_SQ_WAIT) {
10256 ret = io_sqpoll_wait_sq(ctx);
10257 if (ret)
10258 goto out;
10259 }
Jens Axboe6c271ce2019-01-10 11:22:30 -070010260 submitted = to_submit;
Jens Axboeb2a9ead2019-09-12 14:19:16 -060010261 } else if (to_submit) {
Pavel Begunkoveef51da2021-06-14 02:36:15 +010010262 ret = io_uring_add_tctx_node(ctx);
Jens Axboe0f212202020-09-13 13:09:39 -060010263 if (unlikely(ret))
10264 goto out;
Jens Axboe2b188cc2019-01-07 10:46:33 -070010265 mutex_lock(&ctx->uring_lock);
Jens Axboe0f212202020-09-13 13:09:39 -060010266 submitted = io_submit_sqes(ctx, to_submit);
Jens Axboe2b188cc2019-01-07 10:46:33 -070010267 mutex_unlock(&ctx->uring_lock);
Pavel Begunkov7c504e652019-12-18 19:53:45 +030010268
10269 if (submitted != to_submit)
10270 goto out;
Jens Axboe2b188cc2019-01-07 10:46:33 -070010271 }
10272 if (flags & IORING_ENTER_GETEVENTS) {
Hao Xuc73ebb62020-11-03 10:54:37 +080010273 const sigset_t __user *sig;
10274 struct __kernel_timespec __user *ts;
10275
10276 ret = io_get_ext_arg(flags, argp, &argsz, &ts, &sig);
10277 if (unlikely(ret))
10278 goto out;
10279
Jens Axboe2b188cc2019-01-07 10:46:33 -070010280 min_complete = min(min_complete, ctx->cq_entries);
10281
Xiaoguang Wang32b22442020-03-11 09:26:09 +080010282 /*
10283 * When SETUP_IOPOLL and SETUP_SQPOLL are both enabled, user
10284 * space applications don't need to do io completion events
10285 * polling again, they can rely on io_sq_thread to do polling
10286 * work, which can reduce cpu usage and uring_lock contention.
10287 */
10288 if (ctx->flags & IORING_SETUP_IOPOLL &&
10289 !(ctx->flags & IORING_SETUP_SQPOLL)) {
Pavel Begunkov7668b922020-07-07 16:36:21 +030010290 ret = io_iopoll_check(ctx, min_complete);
Jens Axboedef596e2019-01-09 08:59:42 -070010291 } else {
Hao Xuc73ebb62020-11-03 10:54:37 +080010292 ret = io_cqring_wait(ctx, min_complete, sig, argsz, ts);
Jens Axboedef596e2019-01-09 08:59:42 -070010293 }
Jens Axboe2b188cc2019-01-07 10:46:33 -070010294 }
10295
Pavel Begunkov7c504e652019-12-18 19:53:45 +030010296out:
Pavel Begunkov6805b322019-10-08 02:18:42 +030010297 percpu_ref_put(&ctx->refs);
Jens Axboe2b188cc2019-01-07 10:46:33 -070010298out_fput:
10299 fdput(f);
10300 return submitted ? submitted : ret;
10301}
10302
Tobias Klauserbebdb652020-02-26 18:38:32 +010010303#ifdef CONFIG_PROC_FS
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +000010304static int io_uring_show_cred(struct seq_file *m, unsigned int id,
10305 const struct cred *cred)
Jens Axboe87ce9552020-01-30 08:25:34 -070010306{
Jens Axboe87ce9552020-01-30 08:25:34 -070010307 struct user_namespace *uns = seq_user_ns(m);
10308 struct group_info *gi;
10309 kernel_cap_t cap;
10310 unsigned __capi;
10311 int g;
10312
10313 seq_printf(m, "%5d\n", id);
10314 seq_put_decimal_ull(m, "\tUid:\t", from_kuid_munged(uns, cred->uid));
10315 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->euid));
10316 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->suid));
10317 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->fsuid));
10318 seq_put_decimal_ull(m, "\n\tGid:\t", from_kgid_munged(uns, cred->gid));
10319 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->egid));
10320 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->sgid));
10321 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->fsgid));
10322 seq_puts(m, "\n\tGroups:\t");
10323 gi = cred->group_info;
10324 for (g = 0; g < gi->ngroups; g++) {
10325 seq_put_decimal_ull(m, g ? " " : "",
10326 from_kgid_munged(uns, gi->gid[g]));
10327 }
10328 seq_puts(m, "\n\tCapEff:\t");
10329 cap = cred->cap_effective;
10330 CAP_FOR_EACH_U32(__capi)
10331 seq_put_hex_ll(m, NULL, cap.cap[CAP_LAST_U32 - __capi], 8);
10332 seq_putc(m, '\n');
10333 return 0;
10334}
10335
10336static void __io_uring_show_fdinfo(struct io_ring_ctx *ctx, struct seq_file *m)
10337{
Joseph Qidbbe9c62020-09-29 09:01:22 -060010338 struct io_sq_data *sq = NULL;
Jens Axboefad8e0d2020-09-28 08:57:48 -060010339 bool has_lock;
Jens Axboe87ce9552020-01-30 08:25:34 -070010340 int i;
10341
Jens Axboefad8e0d2020-09-28 08:57:48 -060010342 /*
10343 * Avoid ABBA deadlock between the seq lock and the io_uring mutex,
10344 * since fdinfo case grabs it in the opposite direction of normal use
10345 * cases. If we fail to get the lock, we just don't iterate any
10346 * structures that could be going away outside the io_uring mutex.
10347 */
10348 has_lock = mutex_trylock(&ctx->uring_lock);
10349
Jens Axboe5f3f26f2021-02-25 10:17:46 -070010350 if (has_lock && (ctx->flags & IORING_SETUP_SQPOLL)) {
Joseph Qidbbe9c62020-09-29 09:01:22 -060010351 sq = ctx->sq_data;
Jens Axboe5f3f26f2021-02-25 10:17:46 -070010352 if (!sq->thread)
10353 sq = NULL;
10354 }
Joseph Qidbbe9c62020-09-29 09:01:22 -060010355
10356 seq_printf(m, "SqThread:\t%d\n", sq ? task_pid_nr(sq->thread) : -1);
10357 seq_printf(m, "SqThreadCpu:\t%d\n", sq ? task_cpu(sq->thread) : -1);
Jens Axboe87ce9552020-01-30 08:25:34 -070010358 seq_printf(m, "UserFiles:\t%u\n", ctx->nr_user_files);
Jens Axboefad8e0d2020-09-28 08:57:48 -060010359 for (i = 0; has_lock && i < ctx->nr_user_files; i++) {
Jens Axboe7b29f922021-03-12 08:30:14 -070010360 struct file *f = io_file_from_index(ctx, i);
Jens Axboe87ce9552020-01-30 08:25:34 -070010361
Jens Axboe87ce9552020-01-30 08:25:34 -070010362 if (f)
10363 seq_printf(m, "%5u: %s\n", i, file_dentry(f)->d_iname);
10364 else
10365 seq_printf(m, "%5u: <none>\n", i);
10366 }
10367 seq_printf(m, "UserBufs:\t%u\n", ctx->nr_user_bufs);
Jens Axboefad8e0d2020-09-28 08:57:48 -060010368 for (i = 0; has_lock && i < ctx->nr_user_bufs; i++) {
Pavel Begunkov41edf1a2021-04-25 14:32:23 +010010369 struct io_mapped_ubuf *buf = ctx->user_bufs[i];
Pavel Begunkov4751f532021-04-01 15:43:55 +010010370 unsigned int len = buf->ubuf_end - buf->ubuf;
Jens Axboe87ce9552020-01-30 08:25:34 -070010371
Pavel Begunkov4751f532021-04-01 15:43:55 +010010372 seq_printf(m, "%5u: 0x%llx/%u\n", i, buf->ubuf, len);
Jens Axboe87ce9552020-01-30 08:25:34 -070010373 }
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +000010374 if (has_lock && !xa_empty(&ctx->personalities)) {
10375 unsigned long index;
10376 const struct cred *cred;
10377
Jens Axboe87ce9552020-01-30 08:25:34 -070010378 seq_printf(m, "Personalities:\n");
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +000010379 xa_for_each(&ctx->personalities, index, cred)
10380 io_uring_show_cred(m, index, cred);
Jens Axboe87ce9552020-01-30 08:25:34 -070010381 }
Jens Axboed7718a92020-02-14 22:23:12 -070010382 seq_printf(m, "PollList:\n");
Jens Axboe79ebeae2021-08-10 15:18:27 -060010383 spin_lock(&ctx->completion_lock);
Jens Axboed7718a92020-02-14 22:23:12 -070010384 for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
10385 struct hlist_head *list = &ctx->cancel_hash[i];
10386 struct io_kiocb *req;
10387
10388 hlist_for_each_entry(req, list, hash_node)
10389 seq_printf(m, " op=%d, task_works=%d\n", req->opcode,
10390 req->task->task_works != NULL);
10391 }
Jens Axboe79ebeae2021-08-10 15:18:27 -060010392 spin_unlock(&ctx->completion_lock);
Jens Axboefad8e0d2020-09-28 08:57:48 -060010393 if (has_lock)
10394 mutex_unlock(&ctx->uring_lock);
Jens Axboe87ce9552020-01-30 08:25:34 -070010395}
10396
10397static void io_uring_show_fdinfo(struct seq_file *m, struct file *f)
10398{
10399 struct io_ring_ctx *ctx = f->private_data;
10400
10401 if (percpu_ref_tryget(&ctx->refs)) {
10402 __io_uring_show_fdinfo(ctx, m);
10403 percpu_ref_put(&ctx->refs);
10404 }
10405}
Tobias Klauserbebdb652020-02-26 18:38:32 +010010406#endif
Jens Axboe87ce9552020-01-30 08:25:34 -070010407
Jens Axboe2b188cc2019-01-07 10:46:33 -070010408static const struct file_operations io_uring_fops = {
10409 .release = io_uring_release,
10410 .mmap = io_uring_mmap,
Roman Penyaev6c5c2402019-11-28 12:53:22 +010010411#ifndef CONFIG_MMU
10412 .get_unmapped_area = io_uring_nommu_get_unmapped_area,
10413 .mmap_capabilities = io_uring_nommu_mmap_capabilities,
10414#endif
Jens Axboe2b188cc2019-01-07 10:46:33 -070010415 .poll = io_uring_poll,
Tobias Klauserbebdb652020-02-26 18:38:32 +010010416#ifdef CONFIG_PROC_FS
Jens Axboe87ce9552020-01-30 08:25:34 -070010417 .show_fdinfo = io_uring_show_fdinfo,
Tobias Klauserbebdb652020-02-26 18:38:32 +010010418#endif
Jens Axboe2b188cc2019-01-07 10:46:33 -070010419};
10420
10421static int io_allocate_scq_urings(struct io_ring_ctx *ctx,
10422 struct io_uring_params *p)
10423{
Hristo Venev75b28af2019-08-26 17:23:46 +000010424 struct io_rings *rings;
10425 size_t size, sq_array_offset;
Jens Axboe2b188cc2019-01-07 10:46:33 -070010426
Jens Axboebd740482020-08-05 12:58:23 -060010427 /* make sure these are sane, as we already accounted them */
10428 ctx->sq_entries = p->sq_entries;
10429 ctx->cq_entries = p->cq_entries;
10430
Hristo Venev75b28af2019-08-26 17:23:46 +000010431 size = rings_size(p->sq_entries, p->cq_entries, &sq_array_offset);
10432 if (size == SIZE_MAX)
10433 return -EOVERFLOW;
10434
10435 rings = io_mem_alloc(size);
10436 if (!rings)
Jens Axboe2b188cc2019-01-07 10:46:33 -070010437 return -ENOMEM;
10438
Hristo Venev75b28af2019-08-26 17:23:46 +000010439 ctx->rings = rings;
10440 ctx->sq_array = (u32 *)((char *)rings + sq_array_offset);
10441 rings->sq_ring_mask = p->sq_entries - 1;
10442 rings->cq_ring_mask = p->cq_entries - 1;
10443 rings->sq_ring_entries = p->sq_entries;
10444 rings->cq_ring_entries = p->cq_entries;
Jens Axboe2b188cc2019-01-07 10:46:33 -070010445
10446 size = array_size(sizeof(struct io_uring_sqe), p->sq_entries);
Jens Axboeeb065d32019-11-20 09:26:29 -070010447 if (size == SIZE_MAX) {
10448 io_mem_free(ctx->rings);
10449 ctx->rings = NULL;
Jens Axboe2b188cc2019-01-07 10:46:33 -070010450 return -EOVERFLOW;
Jens Axboeeb065d32019-11-20 09:26:29 -070010451 }
Jens Axboe2b188cc2019-01-07 10:46:33 -070010452
10453 ctx->sq_sqes = io_mem_alloc(size);
Jens Axboeeb065d32019-11-20 09:26:29 -070010454 if (!ctx->sq_sqes) {
10455 io_mem_free(ctx->rings);
10456 ctx->rings = NULL;
Jens Axboe2b188cc2019-01-07 10:46:33 -070010457 return -ENOMEM;
Jens Axboeeb065d32019-11-20 09:26:29 -070010458 }
Jens Axboe2b188cc2019-01-07 10:46:33 -070010459
Jens Axboe2b188cc2019-01-07 10:46:33 -070010460 return 0;
10461}
10462
Pavel Begunkov9faadcc2020-12-21 18:34:05 +000010463static int io_uring_install_fd(struct io_ring_ctx *ctx, struct file *file)
10464{
10465 int ret, fd;
10466
10467 fd = get_unused_fd_flags(O_RDWR | O_CLOEXEC);
10468 if (fd < 0)
10469 return fd;
10470
Pavel Begunkoveef51da2021-06-14 02:36:15 +010010471 ret = io_uring_add_tctx_node(ctx);
Pavel Begunkov9faadcc2020-12-21 18:34:05 +000010472 if (ret) {
10473 put_unused_fd(fd);
10474 return ret;
10475 }
10476 fd_install(fd, file);
10477 return fd;
10478}
10479
Jens Axboe2b188cc2019-01-07 10:46:33 -070010480/*
10481 * Allocate an anonymous fd, this is what constitutes the application
10482 * visible backing of an io_uring instance. The application mmaps this
10483 * fd to gain access to the SQ/CQ ring details. If UNIX sockets are enabled,
10484 * we have to tie this fd to a socket for file garbage collection purposes.
10485 */
Pavel Begunkov9faadcc2020-12-21 18:34:05 +000010486static struct file *io_uring_get_file(struct io_ring_ctx *ctx)
Jens Axboe2b188cc2019-01-07 10:46:33 -070010487{
10488 struct file *file;
Jens Axboe2b188cc2019-01-07 10:46:33 -070010489#if defined(CONFIG_UNIX)
Pavel Begunkov9faadcc2020-12-21 18:34:05 +000010490 int ret;
10491
Jens Axboe2b188cc2019-01-07 10:46:33 -070010492 ret = sock_create_kern(&init_net, PF_UNIX, SOCK_RAW, IPPROTO_IP,
10493 &ctx->ring_sock);
10494 if (ret)
Pavel Begunkov9faadcc2020-12-21 18:34:05 +000010495 return ERR_PTR(ret);
Jens Axboe2b188cc2019-01-07 10:46:33 -070010496#endif
10497
Jens Axboe2b188cc2019-01-07 10:46:33 -070010498 file = anon_inode_getfile("[io_uring]", &io_uring_fops, ctx,
10499 O_RDWR | O_CLOEXEC);
Pavel Begunkov9faadcc2020-12-21 18:34:05 +000010500#if defined(CONFIG_UNIX)
Jens Axboe2b188cc2019-01-07 10:46:33 -070010501 if (IS_ERR(file)) {
Pavel Begunkov9faadcc2020-12-21 18:34:05 +000010502 sock_release(ctx->ring_sock);
10503 ctx->ring_sock = NULL;
10504 } else {
10505 ctx->ring_sock->file = file;
Jens Axboe2b188cc2019-01-07 10:46:33 -070010506 }
Jens Axboe2b188cc2019-01-07 10:46:33 -070010507#endif
Pavel Begunkov9faadcc2020-12-21 18:34:05 +000010508 return file;
Jens Axboe2b188cc2019-01-07 10:46:33 -070010509}
10510
Xiaoguang Wang7f136572020-05-05 16:28:53 +080010511static int io_uring_create(unsigned entries, struct io_uring_params *p,
10512 struct io_uring_params __user *params)
Jens Axboe2b188cc2019-01-07 10:46:33 -070010513{
Jens Axboe2b188cc2019-01-07 10:46:33 -070010514 struct io_ring_ctx *ctx;
Pavel Begunkov9faadcc2020-12-21 18:34:05 +000010515 struct file *file;
Jens Axboe2b188cc2019-01-07 10:46:33 -070010516 int ret;
10517
Jens Axboe8110c1a2019-12-28 15:39:54 -070010518 if (!entries)
Jens Axboe2b188cc2019-01-07 10:46:33 -070010519 return -EINVAL;
Jens Axboe8110c1a2019-12-28 15:39:54 -070010520 if (entries > IORING_MAX_ENTRIES) {
10521 if (!(p->flags & IORING_SETUP_CLAMP))
10522 return -EINVAL;
10523 entries = IORING_MAX_ENTRIES;
10524 }
Jens Axboe2b188cc2019-01-07 10:46:33 -070010525
10526 /*
10527 * Use twice as many entries for the CQ ring. It's possible for the
10528 * application to drive a higher depth than the size of the SQ ring,
10529 * since the sqes are only used at submission time. This allows for
Jens Axboe33a107f2019-10-04 12:10:03 -060010530 * some flexibility in overcommitting a bit. If the application has
10531 * set IORING_SETUP_CQSIZE, it will have passed in the desired number
10532 * of CQ ring entries manually.
Jens Axboe2b188cc2019-01-07 10:46:33 -070010533 */
10534 p->sq_entries = roundup_pow_of_two(entries);
Jens Axboe33a107f2019-10-04 12:10:03 -060010535 if (p->flags & IORING_SETUP_CQSIZE) {
10536 /*
10537 * If IORING_SETUP_CQSIZE is set, we do the same roundup
10538 * to a power-of-two, if it isn't already. We do NOT impose
10539 * any cq vs sq ring sizing.
10540 */
Joseph Qieb2667b32020-11-24 15:03:03 +080010541 if (!p->cq_entries)
Jens Axboe33a107f2019-10-04 12:10:03 -060010542 return -EINVAL;
Jens Axboe8110c1a2019-12-28 15:39:54 -070010543 if (p->cq_entries > IORING_MAX_CQ_ENTRIES) {
10544 if (!(p->flags & IORING_SETUP_CLAMP))
10545 return -EINVAL;
10546 p->cq_entries = IORING_MAX_CQ_ENTRIES;
10547 }
Joseph Qieb2667b32020-11-24 15:03:03 +080010548 p->cq_entries = roundup_pow_of_two(p->cq_entries);
10549 if (p->cq_entries < p->sq_entries)
10550 return -EINVAL;
Jens Axboe33a107f2019-10-04 12:10:03 -060010551 } else {
10552 p->cq_entries = 2 * p->sq_entries;
10553 }
Jens Axboe2b188cc2019-01-07 10:46:33 -070010554
Jens Axboe2b188cc2019-01-07 10:46:33 -070010555 ctx = io_ring_ctx_alloc(p);
Jens Axboe62e398b2021-02-21 16:19:37 -070010556 if (!ctx)
Jens Axboe2b188cc2019-01-07 10:46:33 -070010557 return -ENOMEM;
Jens Axboe2b188cc2019-01-07 10:46:33 -070010558 ctx->compat = in_compat_syscall();
Jens Axboe62e398b2021-02-21 16:19:37 -070010559 if (!capable(CAP_IPC_LOCK))
10560 ctx->user = get_uid(current_user());
Jens Axboe2aede0e2020-09-14 10:45:53 -060010561
10562 /*
10563 * This is just grabbed for accounting purposes. When a process exits,
10564 * the mm is exited and dropped before the files, hence we need to hang
10565 * on to this mm purely for the purposes of being able to unaccount
10566 * memory (locked/pinned vm). It's not used for anything else.
10567 */
Jens Axboe6b7898e2020-08-25 07:58:00 -060010568 mmgrab(current->mm);
Jens Axboe2aede0e2020-09-14 10:45:53 -060010569 ctx->mm_account = current->mm;
Jens Axboe6b7898e2020-08-25 07:58:00 -060010570
Jens Axboe2b188cc2019-01-07 10:46:33 -070010571 ret = io_allocate_scq_urings(ctx, p);
10572 if (ret)
10573 goto err;
10574
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +020010575 ret = io_sq_offload_create(ctx, p);
Jens Axboe2b188cc2019-01-07 10:46:33 -070010576 if (ret)
10577 goto err;
Pavel Begunkoveae071c2021-04-25 14:32:24 +010010578 /* always set a rsrc node */
Pavel Begunkov47b228c2021-04-29 11:46:48 +010010579 ret = io_rsrc_node_switch_start(ctx);
10580 if (ret)
10581 goto err;
Pavel Begunkoveae071c2021-04-25 14:32:24 +010010582 io_rsrc_node_switch(ctx, NULL);
Jens Axboe2b188cc2019-01-07 10:46:33 -070010583
Jens Axboe2b188cc2019-01-07 10:46:33 -070010584 memset(&p->sq_off, 0, sizeof(p->sq_off));
Hristo Venev75b28af2019-08-26 17:23:46 +000010585 p->sq_off.head = offsetof(struct io_rings, sq.head);
10586 p->sq_off.tail = offsetof(struct io_rings, sq.tail);
10587 p->sq_off.ring_mask = offsetof(struct io_rings, sq_ring_mask);
10588 p->sq_off.ring_entries = offsetof(struct io_rings, sq_ring_entries);
10589 p->sq_off.flags = offsetof(struct io_rings, sq_flags);
10590 p->sq_off.dropped = offsetof(struct io_rings, sq_dropped);
10591 p->sq_off.array = (char *)ctx->sq_array - (char *)ctx->rings;
Jens Axboe2b188cc2019-01-07 10:46:33 -070010592
10593 memset(&p->cq_off, 0, sizeof(p->cq_off));
Hristo Venev75b28af2019-08-26 17:23:46 +000010594 p->cq_off.head = offsetof(struct io_rings, cq.head);
10595 p->cq_off.tail = offsetof(struct io_rings, cq.tail);
10596 p->cq_off.ring_mask = offsetof(struct io_rings, cq_ring_mask);
10597 p->cq_off.ring_entries = offsetof(struct io_rings, cq_ring_entries);
10598 p->cq_off.overflow = offsetof(struct io_rings, cq_overflow);
10599 p->cq_off.cqes = offsetof(struct io_rings, cqes);
Stefano Garzarella0d9b5b32020-05-15 18:38:04 +020010600 p->cq_off.flags = offsetof(struct io_rings, cq_flags);
Jens Axboeac90f242019-09-06 10:26:21 -060010601
Xiaoguang Wang7f136572020-05-05 16:28:53 +080010602 p->features = IORING_FEAT_SINGLE_MMAP | IORING_FEAT_NODROP |
10603 IORING_FEAT_SUBMIT_STABLE | IORING_FEAT_RW_CUR_POS |
Jiufei Xue5769a352020-06-17 17:53:55 +080010604 IORING_FEAT_CUR_PERSONALITY | IORING_FEAT_FAST_POLL |
Hao Xuc73ebb62020-11-03 10:54:37 +080010605 IORING_FEAT_POLL_32BITS | IORING_FEAT_SQPOLL_NONFIXED |
Pavel Begunkov96905572021-06-10 16:37:38 +010010606 IORING_FEAT_EXT_ARG | IORING_FEAT_NATIVE_WORKERS |
10607 IORING_FEAT_RSRC_TAGS;
Xiaoguang Wang7f136572020-05-05 16:28:53 +080010608
10609 if (copy_to_user(params, p, sizeof(*p))) {
10610 ret = -EFAULT;
10611 goto err;
10612 }
Jens Axboed1719f72020-07-30 13:43:53 -060010613
Pavel Begunkov9faadcc2020-12-21 18:34:05 +000010614 file = io_uring_get_file(ctx);
10615 if (IS_ERR(file)) {
10616 ret = PTR_ERR(file);
10617 goto err;
10618 }
10619
Jens Axboed1719f72020-07-30 13:43:53 -060010620 /*
Jens Axboe044c1ab2019-10-28 09:15:33 -060010621 * Install ring fd as the very last thing, so we don't risk someone
10622 * having closed it before we finish setup
10623 */
Pavel Begunkov9faadcc2020-12-21 18:34:05 +000010624 ret = io_uring_install_fd(ctx, file);
10625 if (ret < 0) {
10626 /* fput will clean it up */
10627 fput(file);
10628 return ret;
10629 }
Jens Axboe044c1ab2019-10-28 09:15:33 -060010630
Dmitrii Dolgovc826bd72019-10-15 19:02:01 +020010631 trace_io_uring_create(ret, ctx, p->sq_entries, p->cq_entries, p->flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -070010632 return ret;
10633err:
10634 io_ring_ctx_wait_and_kill(ctx);
10635 return ret;
10636}
10637
10638/*
10639 * Sets up an aio uring context, and returns the fd. Applications asks for a
10640 * ring size, we return the actual sq/cq ring sizes (among other things) in the
10641 * params structure passed in.
10642 */
10643static long io_uring_setup(u32 entries, struct io_uring_params __user *params)
10644{
10645 struct io_uring_params p;
Jens Axboe2b188cc2019-01-07 10:46:33 -070010646 int i;
10647
10648 if (copy_from_user(&p, params, sizeof(p)))
10649 return -EFAULT;
10650 for (i = 0; i < ARRAY_SIZE(p.resv); i++) {
10651 if (p.resv[i])
10652 return -EINVAL;
10653 }
10654
Jens Axboe6c271ce2019-01-10 11:22:30 -070010655 if (p.flags & ~(IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL |
Jens Axboe8110c1a2019-12-28 15:39:54 -070010656 IORING_SETUP_SQ_AFF | IORING_SETUP_CQSIZE |
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +020010657 IORING_SETUP_CLAMP | IORING_SETUP_ATTACH_WQ |
10658 IORING_SETUP_R_DISABLED))
Jens Axboe2b188cc2019-01-07 10:46:33 -070010659 return -EINVAL;
10660
Xiaoguang Wang7f136572020-05-05 16:28:53 +080010661 return io_uring_create(entries, &p, params);
Jens Axboe2b188cc2019-01-07 10:46:33 -070010662}
10663
10664SYSCALL_DEFINE2(io_uring_setup, u32, entries,
10665 struct io_uring_params __user *, params)
10666{
10667 return io_uring_setup(entries, params);
10668}
10669
Jens Axboe66f4af92020-01-16 15:36:52 -070010670static int io_probe(struct io_ring_ctx *ctx, void __user *arg, unsigned nr_args)
10671{
10672 struct io_uring_probe *p;
10673 size_t size;
10674 int i, ret;
10675
10676 size = struct_size(p, ops, nr_args);
10677 if (size == SIZE_MAX)
10678 return -EOVERFLOW;
10679 p = kzalloc(size, GFP_KERNEL);
10680 if (!p)
10681 return -ENOMEM;
10682
10683 ret = -EFAULT;
10684 if (copy_from_user(p, arg, size))
10685 goto out;
10686 ret = -EINVAL;
10687 if (memchr_inv(p, 0, size))
10688 goto out;
10689
10690 p->last_op = IORING_OP_LAST - 1;
10691 if (nr_args > IORING_OP_LAST)
10692 nr_args = IORING_OP_LAST;
10693
10694 for (i = 0; i < nr_args; i++) {
10695 p->ops[i].op = i;
10696 if (!io_op_defs[i].not_supported)
10697 p->ops[i].flags = IO_URING_OP_SUPPORTED;
10698 }
10699 p->ops_len = i;
10700
10701 ret = 0;
10702 if (copy_to_user(arg, p, size))
10703 ret = -EFAULT;
10704out:
10705 kfree(p);
10706 return ret;
10707}
10708
Jens Axboe071698e2020-01-28 10:04:42 -070010709static int io_register_personality(struct io_ring_ctx *ctx)
10710{
Jens Axboe4379bf82021-02-15 13:40:22 -070010711 const struct cred *creds;
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +000010712 u32 id;
Jens Axboe1e6fa522020-10-15 08:46:24 -060010713 int ret;
Jens Axboe071698e2020-01-28 10:04:42 -070010714
Jens Axboe4379bf82021-02-15 13:40:22 -070010715 creds = get_current_cred();
Jens Axboe1e6fa522020-10-15 08:46:24 -060010716
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +000010717 ret = xa_alloc_cyclic(&ctx->personalities, &id, (void *)creds,
10718 XA_LIMIT(0, USHRT_MAX), &ctx->pers_next, GFP_KERNEL);
Jens Axboea30f8952021-08-20 14:53:59 -060010719 if (ret < 0) {
10720 put_cred(creds);
10721 return ret;
10722 }
10723 return id;
Jens Axboe071698e2020-01-28 10:04:42 -070010724}
10725
Stefano Garzarella21b55db2020-08-27 16:58:30 +020010726static int io_register_restrictions(struct io_ring_ctx *ctx, void __user *arg,
10727 unsigned int nr_args)
10728{
10729 struct io_uring_restriction *res;
10730 size_t size;
10731 int i, ret;
10732
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +020010733 /* Restrictions allowed only if rings started disabled */
10734 if (!(ctx->flags & IORING_SETUP_R_DISABLED))
10735 return -EBADFD;
10736
Stefano Garzarella21b55db2020-08-27 16:58:30 +020010737 /* We allow only a single restrictions registration */
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +020010738 if (ctx->restrictions.registered)
Stefano Garzarella21b55db2020-08-27 16:58:30 +020010739 return -EBUSY;
10740
10741 if (!arg || nr_args > IORING_MAX_RESTRICTIONS)
10742 return -EINVAL;
10743
10744 size = array_size(nr_args, sizeof(*res));
10745 if (size == SIZE_MAX)
10746 return -EOVERFLOW;
10747
10748 res = memdup_user(arg, size);
10749 if (IS_ERR(res))
10750 return PTR_ERR(res);
10751
10752 ret = 0;
10753
10754 for (i = 0; i < nr_args; i++) {
10755 switch (res[i].opcode) {
10756 case IORING_RESTRICTION_REGISTER_OP:
10757 if (res[i].register_op >= IORING_REGISTER_LAST) {
10758 ret = -EINVAL;
10759 goto out;
10760 }
10761
10762 __set_bit(res[i].register_op,
10763 ctx->restrictions.register_op);
10764 break;
10765 case IORING_RESTRICTION_SQE_OP:
10766 if (res[i].sqe_op >= IORING_OP_LAST) {
10767 ret = -EINVAL;
10768 goto out;
10769 }
10770
10771 __set_bit(res[i].sqe_op, ctx->restrictions.sqe_op);
10772 break;
10773 case IORING_RESTRICTION_SQE_FLAGS_ALLOWED:
10774 ctx->restrictions.sqe_flags_allowed = res[i].sqe_flags;
10775 break;
10776 case IORING_RESTRICTION_SQE_FLAGS_REQUIRED:
10777 ctx->restrictions.sqe_flags_required = res[i].sqe_flags;
10778 break;
10779 default:
10780 ret = -EINVAL;
10781 goto out;
10782 }
10783 }
10784
10785out:
10786 /* Reset all restrictions if an error happened */
10787 if (ret != 0)
10788 memset(&ctx->restrictions, 0, sizeof(ctx->restrictions));
10789 else
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +020010790 ctx->restrictions.registered = true;
Stefano Garzarella21b55db2020-08-27 16:58:30 +020010791
10792 kfree(res);
10793 return ret;
10794}
10795
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +020010796static int io_register_enable_rings(struct io_ring_ctx *ctx)
10797{
10798 if (!(ctx->flags & IORING_SETUP_R_DISABLED))
10799 return -EBADFD;
10800
10801 if (ctx->restrictions.registered)
10802 ctx->restricted = 1;
10803
Pavel Begunkov0298ef92021-03-08 13:20:57 +000010804 ctx->flags &= ~IORING_SETUP_R_DISABLED;
10805 if (ctx->sq_data && wq_has_sleeper(&ctx->sq_data->wait))
10806 wake_up(&ctx->sq_data->wait);
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +020010807 return 0;
10808}
10809
Pavel Begunkovfdecb662021-04-25 14:32:20 +010010810static int __io_register_rsrc_update(struct io_ring_ctx *ctx, unsigned type,
Pavel Begunkovc3bdad02021-04-25 14:32:22 +010010811 struct io_uring_rsrc_update2 *up,
Pavel Begunkov98f0b3b2021-04-25 14:32:19 +010010812 unsigned nr_args)
10813{
10814 __u32 tmp;
10815 int err;
10816
10817 if (check_add_overflow(up->offset, nr_args, &tmp))
10818 return -EOVERFLOW;
10819 err = io_rsrc_node_switch_start(ctx);
10820 if (err)
10821 return err;
10822
Pavel Begunkovfdecb662021-04-25 14:32:20 +010010823 switch (type) {
10824 case IORING_RSRC_FILE:
Pavel Begunkov98f0b3b2021-04-25 14:32:19 +010010825 return __io_sqe_files_update(ctx, up, nr_args);
Pavel Begunkov634d00d2021-04-25 14:32:26 +010010826 case IORING_RSRC_BUFFER:
10827 return __io_sqe_buffers_update(ctx, up, nr_args);
Pavel Begunkov98f0b3b2021-04-25 14:32:19 +010010828 }
10829 return -EINVAL;
10830}
10831
Pavel Begunkovc3bdad02021-04-25 14:32:22 +010010832static int io_register_files_update(struct io_ring_ctx *ctx, void __user *arg,
10833 unsigned nr_args)
Pavel Begunkov98f0b3b2021-04-25 14:32:19 +010010834{
Pavel Begunkovc3bdad02021-04-25 14:32:22 +010010835 struct io_uring_rsrc_update2 up;
Pavel Begunkov98f0b3b2021-04-25 14:32:19 +010010836
10837 if (!nr_args)
10838 return -EINVAL;
Pavel Begunkovc3bdad02021-04-25 14:32:22 +010010839 memset(&up, 0, sizeof(up));
10840 if (copy_from_user(&up, arg, sizeof(struct io_uring_rsrc_update)))
10841 return -EFAULT;
Dylan Yudaken7a7c9f92022-04-12 09:30:40 -070010842 if (up.resv || up.resv2)
Dylan Yudaken22aa1592022-04-12 09:30:39 -070010843 return -EINVAL;
Pavel Begunkovc3bdad02021-04-25 14:32:22 +010010844 return __io_register_rsrc_update(ctx, IORING_RSRC_FILE, &up, nr_args);
10845}
10846
10847static int io_register_rsrc_update(struct io_ring_ctx *ctx, void __user *arg,
Pavel Begunkov992da012021-06-10 16:37:37 +010010848 unsigned size, unsigned type)
Pavel Begunkovc3bdad02021-04-25 14:32:22 +010010849{
10850 struct io_uring_rsrc_update2 up;
10851
10852 if (size != sizeof(up))
10853 return -EINVAL;
Pavel Begunkov98f0b3b2021-04-25 14:32:19 +010010854 if (copy_from_user(&up, arg, sizeof(up)))
10855 return -EFAULT;
Dylan Yudaken7a7c9f92022-04-12 09:30:40 -070010856 if (!up.nr || up.resv || up.resv2)
Pavel Begunkov98f0b3b2021-04-25 14:32:19 +010010857 return -EINVAL;
Pavel Begunkov992da012021-06-10 16:37:37 +010010858 return __io_register_rsrc_update(ctx, type, &up, up.nr);
Pavel Begunkov98f0b3b2021-04-25 14:32:19 +010010859}
10860
Pavel Begunkov792e3582021-04-25 14:32:21 +010010861static int io_register_rsrc(struct io_ring_ctx *ctx, void __user *arg,
Pavel Begunkov992da012021-06-10 16:37:37 +010010862 unsigned int size, unsigned int type)
Pavel Begunkov792e3582021-04-25 14:32:21 +010010863{
10864 struct io_uring_rsrc_register rr;
10865
10866 /* keep it extendible */
10867 if (size != sizeof(rr))
10868 return -EINVAL;
10869
10870 memset(&rr, 0, sizeof(rr));
10871 if (copy_from_user(&rr, arg, size))
10872 return -EFAULT;
Pavel Begunkov992da012021-06-10 16:37:37 +010010873 if (!rr.nr || rr.resv || rr.resv2)
Pavel Begunkov792e3582021-04-25 14:32:21 +010010874 return -EINVAL;
10875
Pavel Begunkov992da012021-06-10 16:37:37 +010010876 switch (type) {
Pavel Begunkov792e3582021-04-25 14:32:21 +010010877 case IORING_RSRC_FILE:
10878 return io_sqe_files_register(ctx, u64_to_user_ptr(rr.data),
10879 rr.nr, u64_to_user_ptr(rr.tags));
Pavel Begunkov634d00d2021-04-25 14:32:26 +010010880 case IORING_RSRC_BUFFER:
10881 return io_sqe_buffers_register(ctx, u64_to_user_ptr(rr.data),
10882 rr.nr, u64_to_user_ptr(rr.tags));
Pavel Begunkov792e3582021-04-25 14:32:21 +010010883 }
10884 return -EINVAL;
10885}
10886
Jens Axboefe764212021-06-17 10:19:54 -060010887static int io_register_iowq_aff(struct io_ring_ctx *ctx, void __user *arg,
10888 unsigned len)
10889{
10890 struct io_uring_task *tctx = current->io_uring;
10891 cpumask_var_t new_mask;
10892 int ret;
10893
10894 if (!tctx || !tctx->io_wq)
10895 return -EINVAL;
10896
10897 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
10898 return -ENOMEM;
10899
10900 cpumask_clear(new_mask);
10901 if (len > cpumask_size())
10902 len = cpumask_size();
10903
Eugene Syromiatnikovfe223dd2022-04-06 13:55:33 +020010904 if (in_compat_syscall()) {
10905 ret = compat_get_bitmap(cpumask_bits(new_mask),
10906 (const compat_ulong_t __user *)arg,
10907 len * 8 /* CHAR_BIT */);
10908 } else {
10909 ret = copy_from_user(new_mask, arg, len);
10910 }
10911
10912 if (ret) {
Jens Axboefe764212021-06-17 10:19:54 -060010913 free_cpumask_var(new_mask);
10914 return -EFAULT;
10915 }
10916
10917 ret = io_wq_cpu_affinity(tctx->io_wq, new_mask);
10918 free_cpumask_var(new_mask);
10919 return ret;
10920}
10921
10922static int io_unregister_iowq_aff(struct io_ring_ctx *ctx)
10923{
10924 struct io_uring_task *tctx = current->io_uring;
10925
10926 if (!tctx || !tctx->io_wq)
10927 return -EINVAL;
10928
10929 return io_wq_cpu_affinity(tctx->io_wq, NULL);
10930}
10931
Jens Axboe2e480052021-08-27 11:33:19 -060010932static int io_register_iowq_max_workers(struct io_ring_ctx *ctx,
10933 void __user *arg)
Pavel Begunkovb22fa622021-10-21 13:20:29 +010010934 __must_hold(&ctx->uring_lock)
Jens Axboe2e480052021-08-27 11:33:19 -060010935{
Pavel Begunkovb22fa622021-10-21 13:20:29 +010010936 struct io_tctx_node *node;
Jens Axboefa846932021-09-01 14:15:59 -060010937 struct io_uring_task *tctx = NULL;
10938 struct io_sq_data *sqd = NULL;
Jens Axboe2e480052021-08-27 11:33:19 -060010939 __u32 new_count[2];
10940 int i, ret;
10941
Jens Axboe2e480052021-08-27 11:33:19 -060010942 if (copy_from_user(new_count, arg, sizeof(new_count)))
10943 return -EFAULT;
10944 for (i = 0; i < ARRAY_SIZE(new_count); i++)
10945 if (new_count[i] > INT_MAX)
10946 return -EINVAL;
10947
Jens Axboefa846932021-09-01 14:15:59 -060010948 if (ctx->flags & IORING_SETUP_SQPOLL) {
10949 sqd = ctx->sq_data;
10950 if (sqd) {
Jens Axboe009ad9f2021-09-08 19:07:26 -060010951 /*
10952 * Observe the correct sqd->lock -> ctx->uring_lock
10953 * ordering. Fine to drop uring_lock here, we hold
10954 * a ref to the ctx.
10955 */
Jens Axboe41d3a6b2021-09-13 13:08:51 -060010956 refcount_inc(&sqd->refs);
Jens Axboe009ad9f2021-09-08 19:07:26 -060010957 mutex_unlock(&ctx->uring_lock);
Jens Axboefa846932021-09-01 14:15:59 -060010958 mutex_lock(&sqd->lock);
Jens Axboe009ad9f2021-09-08 19:07:26 -060010959 mutex_lock(&ctx->uring_lock);
Jens Axboe41d3a6b2021-09-13 13:08:51 -060010960 if (sqd->thread)
10961 tctx = sqd->thread->io_uring;
Jens Axboefa846932021-09-01 14:15:59 -060010962 }
10963 } else {
10964 tctx = current->io_uring;
10965 }
10966
Pavel Begunkove139a1e2021-10-19 23:43:46 +010010967 BUILD_BUG_ON(sizeof(new_count) != sizeof(ctx->iowq_limits));
Jens Axboefa846932021-09-01 14:15:59 -060010968
Pavel Begunkov4cac4872021-11-08 15:10:03 +000010969 for (i = 0; i < ARRAY_SIZE(new_count); i++)
10970 if (new_count[i])
10971 ctx->iowq_limits[i] = new_count[i];
Pavel Begunkove139a1e2021-10-19 23:43:46 +010010972 ctx->iowq_limits_set = true;
10973
10974 ret = -EINVAL;
10975 if (tctx && tctx->io_wq) {
10976 ret = io_wq_max_workers(tctx->io_wq, new_count);
10977 if (ret)
10978 goto err;
10979 } else {
10980 memset(new_count, 0, sizeof(new_count));
10981 }
Jens Axboefa846932021-09-01 14:15:59 -060010982
Jens Axboe41d3a6b2021-09-13 13:08:51 -060010983 if (sqd) {
Jens Axboefa846932021-09-01 14:15:59 -060010984 mutex_unlock(&sqd->lock);
Jens Axboe41d3a6b2021-09-13 13:08:51 -060010985 io_put_sq_data(sqd);
10986 }
Jens Axboe2e480052021-08-27 11:33:19 -060010987
10988 if (copy_to_user(arg, new_count, sizeof(new_count)))
10989 return -EFAULT;
10990
Pavel Begunkovb22fa622021-10-21 13:20:29 +010010991 /* that's it for SQPOLL, only the SQPOLL task creates requests */
10992 if (sqd)
10993 return 0;
10994
10995 /* now propagate the restriction to all registered users */
10996 list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
10997 struct io_uring_task *tctx = node->task->io_uring;
10998
10999 if (WARN_ON_ONCE(!tctx->io_wq))
11000 continue;
11001
11002 for (i = 0; i < ARRAY_SIZE(new_count); i++)
11003 new_count[i] = ctx->iowq_limits[i];
11004 /* ignore errors, it always returns zero anyway */
11005 (void)io_wq_max_workers(tctx->io_wq, new_count);
11006 }
Jens Axboe2e480052021-08-27 11:33:19 -060011007 return 0;
Jens Axboefa846932021-09-01 14:15:59 -060011008err:
Jens Axboe41d3a6b2021-09-13 13:08:51 -060011009 if (sqd) {
Jens Axboefa846932021-09-01 14:15:59 -060011010 mutex_unlock(&sqd->lock);
Jens Axboe41d3a6b2021-09-13 13:08:51 -060011011 io_put_sq_data(sqd);
11012 }
Jens Axboefa846932021-09-01 14:15:59 -060011013 return ret;
Jens Axboe2e480052021-08-27 11:33:19 -060011014}
11015
Jens Axboe071698e2020-01-28 10:04:42 -070011016static bool io_register_op_must_quiesce(int op)
11017{
11018 switch (op) {
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +010011019 case IORING_REGISTER_BUFFERS:
11020 case IORING_UNREGISTER_BUFFERS:
Pavel Begunkovf4f7d212021-04-01 15:44:02 +010011021 case IORING_REGISTER_FILES:
Jens Axboe071698e2020-01-28 10:04:42 -070011022 case IORING_UNREGISTER_FILES:
11023 case IORING_REGISTER_FILES_UPDATE:
11024 case IORING_REGISTER_PROBE:
11025 case IORING_REGISTER_PERSONALITY:
11026 case IORING_UNREGISTER_PERSONALITY:
Pavel Begunkov992da012021-06-10 16:37:37 +010011027 case IORING_REGISTER_FILES2:
11028 case IORING_REGISTER_FILES_UPDATE2:
11029 case IORING_REGISTER_BUFFERS2:
11030 case IORING_REGISTER_BUFFERS_UPDATE:
Jens Axboefe764212021-06-17 10:19:54 -060011031 case IORING_REGISTER_IOWQ_AFF:
11032 case IORING_UNREGISTER_IOWQ_AFF:
Jens Axboe2e480052021-08-27 11:33:19 -060011033 case IORING_REGISTER_IOWQ_MAX_WORKERS:
Jens Axboe071698e2020-01-28 10:04:42 -070011034 return false;
11035 default:
11036 return true;
11037 }
11038}
11039
Pavel Begunkove73c5c72021-08-09 13:04:12 +010011040static int io_ctx_quiesce(struct io_ring_ctx *ctx)
11041{
11042 long ret;
11043
11044 percpu_ref_kill(&ctx->refs);
11045
11046 /*
11047 * Drop uring mutex before waiting for references to exit. If another
11048 * thread is currently inside io_uring_enter() it might need to grab the
11049 * uring_lock to make progress. If we hold it here across the drain
11050 * wait, then we can deadlock. It's safe to drop the mutex here, since
11051 * no new references will come in after we've killed the percpu ref.
11052 */
11053 mutex_unlock(&ctx->uring_lock);
11054 do {
11055 ret = wait_for_completion_interruptible(&ctx->ref_comp);
11056 if (!ret)
11057 break;
11058 ret = io_run_task_work_sig();
11059 } while (ret >= 0);
11060 mutex_lock(&ctx->uring_lock);
11061
11062 if (ret)
11063 io_refs_resurrect(&ctx->refs, &ctx->ref_comp);
11064 return ret;
11065}
11066
Jens Axboeedafcce2019-01-09 09:16:05 -070011067static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
11068 void __user *arg, unsigned nr_args)
Jens Axboeb19062a2019-04-15 10:49:38 -060011069 __releases(ctx->uring_lock)
11070 __acquires(ctx->uring_lock)
Jens Axboeedafcce2019-01-09 09:16:05 -070011071{
11072 int ret;
11073
Jens Axboe35fa71a2019-04-22 10:23:23 -060011074 /*
11075 * We're inside the ring mutex, if the ref is already dying, then
11076 * someone else killed the ctx or is already going through
11077 * io_uring_register().
11078 */
11079 if (percpu_ref_is_dying(&ctx->refs))
11080 return -ENXIO;
11081
Pavel Begunkov75c40212021-04-15 13:07:40 +010011082 if (ctx->restricted) {
Pavel Begunkov75c40212021-04-15 13:07:40 +010011083 opcode = array_index_nospec(opcode, IORING_REGISTER_LAST);
11084 if (!test_bit(opcode, ctx->restrictions.register_op))
11085 return -EACCES;
11086 }
11087
Jens Axboe071698e2020-01-28 10:04:42 -070011088 if (io_register_op_must_quiesce(opcode)) {
Pavel Begunkove73c5c72021-08-09 13:04:12 +010011089 ret = io_ctx_quiesce(ctx);
11090 if (ret)
Pavel Begunkovf70865d2021-04-11 01:46:40 +010011091 return ret;
Jens Axboe05f3fb32019-12-09 11:22:50 -070011092 }
Jens Axboeedafcce2019-01-09 09:16:05 -070011093
11094 switch (opcode) {
11095 case IORING_REGISTER_BUFFERS:
Pavel Begunkov634d00d2021-04-25 14:32:26 +010011096 ret = io_sqe_buffers_register(ctx, arg, nr_args, NULL);
Jens Axboeedafcce2019-01-09 09:16:05 -070011097 break;
11098 case IORING_UNREGISTER_BUFFERS:
11099 ret = -EINVAL;
11100 if (arg || nr_args)
11101 break;
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -080011102 ret = io_sqe_buffers_unregister(ctx);
Jens Axboeedafcce2019-01-09 09:16:05 -070011103 break;
Jens Axboe6b063142019-01-10 22:13:58 -070011104 case IORING_REGISTER_FILES:
Pavel Begunkov792e3582021-04-25 14:32:21 +010011105 ret = io_sqe_files_register(ctx, arg, nr_args, NULL);
Jens Axboe6b063142019-01-10 22:13:58 -070011106 break;
11107 case IORING_UNREGISTER_FILES:
11108 ret = -EINVAL;
11109 if (arg || nr_args)
11110 break;
11111 ret = io_sqe_files_unregister(ctx);
11112 break;
Jens Axboec3a31e62019-10-03 13:59:56 -060011113 case IORING_REGISTER_FILES_UPDATE:
Pavel Begunkovc3bdad02021-04-25 14:32:22 +010011114 ret = io_register_files_update(ctx, arg, nr_args);
Jens Axboec3a31e62019-10-03 13:59:56 -060011115 break;
Jens Axboe9b402842019-04-11 11:45:41 -060011116 case IORING_REGISTER_EVENTFD:
Jens Axboef2842ab2020-01-08 11:04:00 -070011117 case IORING_REGISTER_EVENTFD_ASYNC:
Jens Axboe9b402842019-04-11 11:45:41 -060011118 ret = -EINVAL;
11119 if (nr_args != 1)
11120 break;
11121 ret = io_eventfd_register(ctx, arg);
Jens Axboef2842ab2020-01-08 11:04:00 -070011122 if (ret)
11123 break;
11124 if (opcode == IORING_REGISTER_EVENTFD_ASYNC)
11125 ctx->eventfd_async = 1;
11126 else
11127 ctx->eventfd_async = 0;
Jens Axboe9b402842019-04-11 11:45:41 -060011128 break;
11129 case IORING_UNREGISTER_EVENTFD:
11130 ret = -EINVAL;
11131 if (arg || nr_args)
11132 break;
11133 ret = io_eventfd_unregister(ctx);
11134 break;
Jens Axboe66f4af92020-01-16 15:36:52 -070011135 case IORING_REGISTER_PROBE:
11136 ret = -EINVAL;
11137 if (!arg || nr_args > 256)
11138 break;
11139 ret = io_probe(ctx, arg, nr_args);
11140 break;
Jens Axboe071698e2020-01-28 10:04:42 -070011141 case IORING_REGISTER_PERSONALITY:
11142 ret = -EINVAL;
11143 if (arg || nr_args)
11144 break;
11145 ret = io_register_personality(ctx);
11146 break;
11147 case IORING_UNREGISTER_PERSONALITY:
11148 ret = -EINVAL;
11149 if (arg)
11150 break;
11151 ret = io_unregister_personality(ctx, nr_args);
11152 break;
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +020011153 case IORING_REGISTER_ENABLE_RINGS:
11154 ret = -EINVAL;
11155 if (arg || nr_args)
11156 break;
11157 ret = io_register_enable_rings(ctx);
11158 break;
Stefano Garzarella21b55db2020-08-27 16:58:30 +020011159 case IORING_REGISTER_RESTRICTIONS:
11160 ret = io_register_restrictions(ctx, arg, nr_args);
11161 break;
Pavel Begunkov992da012021-06-10 16:37:37 +010011162 case IORING_REGISTER_FILES2:
11163 ret = io_register_rsrc(ctx, arg, nr_args, IORING_RSRC_FILE);
Pavel Begunkov792e3582021-04-25 14:32:21 +010011164 break;
Pavel Begunkov992da012021-06-10 16:37:37 +010011165 case IORING_REGISTER_FILES_UPDATE2:
11166 ret = io_register_rsrc_update(ctx, arg, nr_args,
11167 IORING_RSRC_FILE);
11168 break;
11169 case IORING_REGISTER_BUFFERS2:
11170 ret = io_register_rsrc(ctx, arg, nr_args, IORING_RSRC_BUFFER);
11171 break;
11172 case IORING_REGISTER_BUFFERS_UPDATE:
11173 ret = io_register_rsrc_update(ctx, arg, nr_args,
11174 IORING_RSRC_BUFFER);
Pavel Begunkovc3bdad02021-04-25 14:32:22 +010011175 break;
Jens Axboefe764212021-06-17 10:19:54 -060011176 case IORING_REGISTER_IOWQ_AFF:
11177 ret = -EINVAL;
11178 if (!arg || !nr_args)
11179 break;
11180 ret = io_register_iowq_aff(ctx, arg, nr_args);
11181 break;
11182 case IORING_UNREGISTER_IOWQ_AFF:
11183 ret = -EINVAL;
11184 if (arg || nr_args)
11185 break;
11186 ret = io_unregister_iowq_aff(ctx);
11187 break;
Jens Axboe2e480052021-08-27 11:33:19 -060011188 case IORING_REGISTER_IOWQ_MAX_WORKERS:
11189 ret = -EINVAL;
11190 if (!arg || nr_args != 2)
11191 break;
11192 ret = io_register_iowq_max_workers(ctx, arg);
11193 break;
Jens Axboeedafcce2019-01-09 09:16:05 -070011194 default:
11195 ret = -EINVAL;
11196 break;
11197 }
11198
Jens Axboe071698e2020-01-28 10:04:42 -070011199 if (io_register_op_must_quiesce(opcode)) {
Jens Axboe05f3fb32019-12-09 11:22:50 -070011200 /* bring the ctx back to life */
Jens Axboe05f3fb32019-12-09 11:22:50 -070011201 percpu_ref_reinit(&ctx->refs);
Jens Axboe0f158b42020-05-14 17:18:39 -060011202 reinit_completion(&ctx->ref_comp);
Jens Axboe05f3fb32019-12-09 11:22:50 -070011203 }
Jens Axboeedafcce2019-01-09 09:16:05 -070011204 return ret;
11205}
11206
11207SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode,
11208 void __user *, arg, unsigned int, nr_args)
11209{
11210 struct io_ring_ctx *ctx;
11211 long ret = -EBADF;
11212 struct fd f;
11213
Jens Axboef9309dc2022-12-23 06:37:08 -070011214 if (opcode >= IORING_REGISTER_LAST)
11215 return -EINVAL;
11216
Jens Axboeedafcce2019-01-09 09:16:05 -070011217 f = fdget(fd);
11218 if (!f.file)
11219 return -EBADF;
11220
11221 ret = -EOPNOTSUPP;
11222 if (f.file->f_op != &io_uring_fops)
11223 goto out_fput;
11224
11225 ctx = f.file->private_data;
11226
Pavel Begunkovb6c23dd2021-02-20 15:17:18 +000011227 io_run_task_work();
11228
Jens Axboeedafcce2019-01-09 09:16:05 -070011229 mutex_lock(&ctx->uring_lock);
11230 ret = __io_uring_register(ctx, opcode, arg, nr_args);
11231 mutex_unlock(&ctx->uring_lock);
Dmitrii Dolgovc826bd72019-10-15 19:02:01 +020011232 trace_io_uring_register(ctx, opcode, ctx->nr_user_files, ctx->nr_user_bufs,
11233 ctx->cq_ev_fd != NULL, ret);
Jens Axboeedafcce2019-01-09 09:16:05 -070011234out_fput:
11235 fdput(f);
11236 return ret;
11237}
11238
Jens Axboe2b188cc2019-01-07 10:46:33 -070011239static int __init io_uring_init(void)
11240{
Stefan Metzmacherd7f62e82020-01-29 14:39:41 +010011241#define __BUILD_BUG_VERIFY_ELEMENT(stype, eoffset, etype, ename) do { \
11242 BUILD_BUG_ON(offsetof(stype, ename) != eoffset); \
11243 BUILD_BUG_ON(sizeof(etype) != sizeof_field(stype, ename)); \
11244} while (0)
11245
11246#define BUILD_BUG_SQE_ELEM(eoffset, etype, ename) \
11247 __BUILD_BUG_VERIFY_ELEMENT(struct io_uring_sqe, eoffset, etype, ename)
11248 BUILD_BUG_ON(sizeof(struct io_uring_sqe) != 64);
11249 BUILD_BUG_SQE_ELEM(0, __u8, opcode);
11250 BUILD_BUG_SQE_ELEM(1, __u8, flags);
11251 BUILD_BUG_SQE_ELEM(2, __u16, ioprio);
11252 BUILD_BUG_SQE_ELEM(4, __s32, fd);
11253 BUILD_BUG_SQE_ELEM(8, __u64, off);
11254 BUILD_BUG_SQE_ELEM(8, __u64, addr2);
11255 BUILD_BUG_SQE_ELEM(16, __u64, addr);
Pavel Begunkov7d67af22020-02-24 11:32:45 +030011256 BUILD_BUG_SQE_ELEM(16, __u64, splice_off_in);
Stefan Metzmacherd7f62e82020-01-29 14:39:41 +010011257 BUILD_BUG_SQE_ELEM(24, __u32, len);
11258 BUILD_BUG_SQE_ELEM(28, __kernel_rwf_t, rw_flags);
11259 BUILD_BUG_SQE_ELEM(28, /* compat */ int, rw_flags);
11260 BUILD_BUG_SQE_ELEM(28, /* compat */ __u32, rw_flags);
11261 BUILD_BUG_SQE_ELEM(28, __u32, fsync_flags);
Jiufei Xue5769a352020-06-17 17:53:55 +080011262 BUILD_BUG_SQE_ELEM(28, /* compat */ __u16, poll_events);
11263 BUILD_BUG_SQE_ELEM(28, __u32, poll32_events);
Stefan Metzmacherd7f62e82020-01-29 14:39:41 +010011264 BUILD_BUG_SQE_ELEM(28, __u32, sync_range_flags);
11265 BUILD_BUG_SQE_ELEM(28, __u32, msg_flags);
11266 BUILD_BUG_SQE_ELEM(28, __u32, timeout_flags);
11267 BUILD_BUG_SQE_ELEM(28, __u32, accept_flags);
11268 BUILD_BUG_SQE_ELEM(28, __u32, cancel_flags);
11269 BUILD_BUG_SQE_ELEM(28, __u32, open_flags);
11270 BUILD_BUG_SQE_ELEM(28, __u32, statx_flags);
11271 BUILD_BUG_SQE_ELEM(28, __u32, fadvise_advice);
Pavel Begunkov7d67af22020-02-24 11:32:45 +030011272 BUILD_BUG_SQE_ELEM(28, __u32, splice_flags);
Stefan Metzmacherd7f62e82020-01-29 14:39:41 +010011273 BUILD_BUG_SQE_ELEM(32, __u64, user_data);
11274 BUILD_BUG_SQE_ELEM(40, __u16, buf_index);
Pavel Begunkov16340ea2021-06-24 15:09:58 +010011275 BUILD_BUG_SQE_ELEM(40, __u16, buf_group);
Stefan Metzmacherd7f62e82020-01-29 14:39:41 +010011276 BUILD_BUG_SQE_ELEM(42, __u16, personality);
Pavel Begunkov7d67af22020-02-24 11:32:45 +030011277 BUILD_BUG_SQE_ELEM(44, __s32, splice_fd_in);
Pavel Begunkovb9445592021-08-25 12:25:45 +010011278 BUILD_BUG_SQE_ELEM(44, __u32, file_index);
Stefan Metzmacherd7f62e82020-01-29 14:39:41 +010011279
Pavel Begunkovb0d658ec2021-04-27 16:13:53 +010011280 BUILD_BUG_ON(sizeof(struct io_uring_files_update) !=
11281 sizeof(struct io_uring_rsrc_update));
11282 BUILD_BUG_ON(sizeof(struct io_uring_rsrc_update) >
11283 sizeof(struct io_uring_rsrc_update2));
Pavel Begunkov90499ad2021-08-25 20:51:40 +010011284
11285 /* ->buf_index is u16 */
11286 BUILD_BUG_ON(IORING_MAX_REG_BUFFERS >= (1u << 16));
11287
Pavel Begunkovb0d658ec2021-04-27 16:13:53 +010011288 /* should fit into one byte */
11289 BUILD_BUG_ON(SQE_VALID_FLAGS >= (1 << 8));
11290
Jens Axboed3656342019-12-18 09:50:26 -070011291 BUILD_BUG_ON(ARRAY_SIZE(io_op_defs) != IORING_OP_LAST);
Hao Xu32c2d332021-09-07 11:22:43 +080011292 BUILD_BUG_ON(__REQ_F_LAST_BIT > 8 * sizeof(int));
Pavel Begunkov16340ea2021-06-24 15:09:58 +010011293
Jens Axboe91f245d2021-02-09 13:48:50 -070011294 req_cachep = KMEM_CACHE(io_kiocb, SLAB_HWCACHE_ALIGN | SLAB_PANIC |
11295 SLAB_ACCOUNT);
Jens Axboe2b188cc2019-01-07 10:46:33 -070011296 return 0;
11297};
11298__initcall(io_uring_init);