blob: a536b2509d6da06e6bb06fe2f3c7fc1e011a50fd [file] [log] [blame]
Jens Axboe2b188cc2019-01-07 10:46:33 -07001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Shared application/kernel submission and completion ring pairs, for
4 * supporting fast/efficient IO.
5 *
6 * A note on the read/write ordering memory barriers that are matched between
Stefan Bühler1e84b972019-04-24 23:54:16 +02007 * the application and kernel side.
8 *
9 * After the application reads the CQ ring tail, it must use an
10 * appropriate smp_rmb() to pair with the smp_wmb() the kernel uses
11 * before writing the tail (using smp_load_acquire to read the tail will
12 * do). It also needs a smp_mb() before updating CQ head (ordering the
13 * entry load(s) with the head store), pairing with an implicit barrier
Pavel Begunkovd068b502021-05-16 22:58:11 +010014 * through a control-dependency in io_get_cqe (smp_store_release to
Stefan Bühler1e84b972019-04-24 23:54:16 +020015 * store head will do). Failure to do so could lead to reading invalid
16 * CQ entries.
17 *
18 * Likewise, the application must use an appropriate smp_wmb() before
19 * writing the SQ tail (ordering SQ entry stores with the tail store),
20 * which pairs with smp_load_acquire in io_get_sqring (smp_store_release
21 * to store the tail will do). And it needs a barrier ordering the SQ
22 * head load before writing new SQ entries (smp_load_acquire to read
23 * head will do).
24 *
25 * When using the SQ poll thread (IORING_SETUP_SQPOLL), the application
26 * needs to check the SQ flags for IORING_SQ_NEED_WAKEUP *after*
27 * updating the SQ tail; a full memory barrier smp_mb() is needed
28 * between.
Jens Axboe2b188cc2019-01-07 10:46:33 -070029 *
30 * Also see the examples in the liburing library:
31 *
32 * git://git.kernel.dk/liburing
33 *
34 * io_uring also uses READ/WRITE_ONCE() for _any_ store or load that happens
35 * from data shared between the kernel and application. This is done both
36 * for ordering purposes, but also to ensure that once a value is loaded from
37 * data that the application could potentially modify, it remains stable.
38 *
39 * Copyright (C) 2018-2019 Jens Axboe
Christoph Hellwigc992fe22019-01-11 09:43:02 -070040 * Copyright (c) 2018-2019 Christoph Hellwig
Jens Axboe2b188cc2019-01-07 10:46:33 -070041 */
42#include <linux/kernel.h>
43#include <linux/init.h>
44#include <linux/errno.h>
45#include <linux/syscalls.h>
46#include <linux/compat.h>
Jens Axboe52de1fe2020-02-27 10:15:42 -070047#include <net/compat.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070048#include <linux/refcount.h>
49#include <linux/uio.h>
Pavel Begunkov6b47ee62020-01-18 20:22:41 +030050#include <linux/bits.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070051
52#include <linux/sched/signal.h>
53#include <linux/fs.h>
54#include <linux/file.h>
55#include <linux/fdtable.h>
56#include <linux/mm.h>
57#include <linux/mman.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070058#include <linux/percpu.h>
59#include <linux/slab.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070060#include <linux/blkdev.h>
Jens Axboeedafcce2019-01-09 09:16:05 -070061#include <linux/bvec.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070062#include <linux/net.h>
63#include <net/sock.h>
64#include <net/af_unix.h>
Jens Axboe6b063142019-01-10 22:13:58 -070065#include <net/scm.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070066#include <linux/anon_inodes.h>
67#include <linux/sched/mm.h>
68#include <linux/uaccess.h>
69#include <linux/nospec.h>
Jens Axboeedafcce2019-01-09 09:16:05 -070070#include <linux/sizes.h>
71#include <linux/hugetlb.h>
Jens Axboeaa4c3962019-11-29 10:14:00 -070072#include <linux/highmem.h>
Jens Axboe15b71ab2019-12-11 11:20:36 -070073#include <linux/namei.h>
74#include <linux/fsnotify.h>
Jens Axboe4840e412019-12-25 22:03:45 -070075#include <linux/fadvise.h>
Jens Axboe3e4827b2020-01-08 15:18:09 -070076#include <linux/eventpoll.h>
Pavel Begunkov7d67af22020-02-24 11:32:45 +030077#include <linux/splice.h>
Jens Axboeb41e9852020-02-17 09:52:41 -070078#include <linux/task_work.h>
Jens Axboebcf5a062020-05-22 09:24:42 -060079#include <linux/pagemap.h>
Jens Axboe0f212202020-09-13 13:09:39 -060080#include <linux/io_uring.h>
Nadav Amitef98eb02021-08-07 17:13:41 -070081#include <linux/tracehook.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070082
Dmitrii Dolgovc826bd72019-10-15 19:02:01 +020083#define CREATE_TRACE_POINTS
84#include <trace/events/io_uring.h>
85
Jens Axboe2b188cc2019-01-07 10:46:33 -070086#include <uapi/linux/io_uring.h>
87
88#include "internal.h"
Jens Axboe561fb042019-10-24 07:25:42 -060089#include "io-wq.h"
Jens Axboe2b188cc2019-01-07 10:46:33 -070090
Daniel Xu5277dea2019-09-14 14:23:45 -070091#define IORING_MAX_ENTRIES 32768
Jens Axboe33a107f2019-10-04 12:10:03 -060092#define IORING_MAX_CQ_ENTRIES (2 * IORING_MAX_ENTRIES)
Olivier Langlois4ce8ad92021-06-23 11:50:18 -070093#define IORING_SQPOLL_CAP_ENTRIES_VALUE 8
Jens Axboe65e19f52019-10-26 07:20:21 -060094
Pavel Begunkov042b0d82021-08-09 13:04:01 +010095/* 512 entries per page on 64-bit archs, 64 pages max */
96#define IORING_MAX_FIXED_FILES (1U << 15)
Stefano Garzarella21b55db2020-08-27 16:58:30 +020097#define IORING_MAX_RESTRICTIONS (IORING_RESTRICTION_LAST + \
98 IORING_REGISTER_LAST + IORING_OP_LAST)
Jens Axboe2b188cc2019-01-07 10:46:33 -070099
Pavel Begunkov2d091d62021-06-14 02:36:21 +0100100#define IO_RSRC_TAG_TABLE_SHIFT 9
101#define IO_RSRC_TAG_TABLE_MAX (1U << IO_RSRC_TAG_TABLE_SHIFT)
102#define IO_RSRC_TAG_TABLE_MASK (IO_RSRC_TAG_TABLE_MAX - 1)
103
Pavel Begunkov489809e2021-05-14 12:06:44 +0100104#define IORING_MAX_REG_BUFFERS (1U << 14)
105
Pavel Begunkovb16fed662021-02-18 18:29:40 +0000106#define SQE_VALID_FLAGS (IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK| \
107 IOSQE_IO_HARDLINK | IOSQE_ASYNC | \
108 IOSQE_BUFFER_SELECT)
Pavel Begunkovc8543572021-06-17 18:14:04 +0100109#define IO_REQ_CLEAN_FLAGS (REQ_F_BUFFER_SELECTED | REQ_F_NEED_CLEANUP | \
110 REQ_F_POLLED | REQ_F_INFLIGHT | REQ_F_CREDS)
Pavel Begunkovb16fed662021-02-18 18:29:40 +0000111
Pavel Begunkov09899b12021-06-14 02:36:22 +0100112#define IO_TCTX_REFS_CACHE_NR (1U << 10)
113
Jens Axboe2b188cc2019-01-07 10:46:33 -0700114struct io_uring {
115 u32 head ____cacheline_aligned_in_smp;
116 u32 tail ____cacheline_aligned_in_smp;
117};
118
Stefan Bühler1e84b972019-04-24 23:54:16 +0200119/*
Hristo Venev75b28af2019-08-26 17:23:46 +0000120 * This data is shared with the application through the mmap at offsets
121 * IORING_OFF_SQ_RING and IORING_OFF_CQ_RING.
Stefan Bühler1e84b972019-04-24 23:54:16 +0200122 *
123 * The offsets to the member fields are published through struct
124 * io_sqring_offsets when calling io_uring_setup.
125 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000126struct io_rings {
Stefan Bühler1e84b972019-04-24 23:54:16 +0200127 /*
128 * Head and tail offsets into the ring; the offsets need to be
129 * masked to get valid indices.
130 *
Hristo Venev75b28af2019-08-26 17:23:46 +0000131 * The kernel controls head of the sq ring and the tail of the cq ring,
132 * and the application controls tail of the sq ring and the head of the
133 * cq ring.
Stefan Bühler1e84b972019-04-24 23:54:16 +0200134 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000135 struct io_uring sq, cq;
Stefan Bühler1e84b972019-04-24 23:54:16 +0200136 /*
Hristo Venev75b28af2019-08-26 17:23:46 +0000137 * Bitmasks to apply to head and tail offsets (constant, equals
Stefan Bühler1e84b972019-04-24 23:54:16 +0200138 * ring_entries - 1)
139 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000140 u32 sq_ring_mask, cq_ring_mask;
141 /* Ring sizes (constant, power of 2) */
142 u32 sq_ring_entries, cq_ring_entries;
Stefan Bühler1e84b972019-04-24 23:54:16 +0200143 /*
144 * Number of invalid entries dropped by the kernel due to
145 * invalid index stored in array
146 *
147 * Written by the kernel, shouldn't be modified by the
148 * application (i.e. get number of "new events" by comparing to
149 * cached value).
150 *
151 * After a new SQ head value was read by the application this
152 * counter includes all submissions that were dropped reaching
153 * the new SQ head (and possibly more).
154 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000155 u32 sq_dropped;
Stefan Bühler1e84b972019-04-24 23:54:16 +0200156 /*
Stefano Garzarella0d9b5b32020-05-15 18:38:04 +0200157 * Runtime SQ flags
Stefan Bühler1e84b972019-04-24 23:54:16 +0200158 *
159 * Written by the kernel, shouldn't be modified by the
160 * application.
161 *
162 * The application needs a full memory barrier before checking
163 * for IORING_SQ_NEED_WAKEUP after updating the sq tail.
164 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000165 u32 sq_flags;
Stefan Bühler1e84b972019-04-24 23:54:16 +0200166 /*
Stefano Garzarella0d9b5b32020-05-15 18:38:04 +0200167 * Runtime CQ flags
168 *
169 * Written by the application, shouldn't be modified by the
170 * kernel.
171 */
Pavel Begunkovfe7e3252021-06-24 15:09:57 +0100172 u32 cq_flags;
Stefano Garzarella0d9b5b32020-05-15 18:38:04 +0200173 /*
Stefan Bühler1e84b972019-04-24 23:54:16 +0200174 * Number of completion events lost because the queue was full;
175 * this should be avoided by the application by making sure
LimingWu0b4295b2019-12-05 20:18:18 +0800176 * there are not more requests pending than there is space in
Stefan Bühler1e84b972019-04-24 23:54:16 +0200177 * the completion queue.
178 *
179 * Written by the kernel, shouldn't be modified by the
180 * application (i.e. get number of "new events" by comparing to
181 * cached value).
182 *
183 * As completion events come in out of order this counter is not
184 * ordered with any other data.
185 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000186 u32 cq_overflow;
Stefan Bühler1e84b972019-04-24 23:54:16 +0200187 /*
188 * Ring buffer of completion events.
189 *
190 * The kernel writes completion events fresh every time they are
191 * produced, so the application is allowed to modify pending
192 * entries.
193 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000194 struct io_uring_cqe cqes[] ____cacheline_aligned_in_smp;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700195};
196
Pavel Begunkov45d189c2021-02-10 00:03:07 +0000197enum io_uring_cmd_flags {
198 IO_URING_F_NONBLOCK = 1,
Pavel Begunkov889fca72021-02-10 00:03:09 +0000199 IO_URING_F_COMPLETE_DEFER = 2,
Pavel Begunkov45d189c2021-02-10 00:03:07 +0000200};
201
Jens Axboeedafcce2019-01-09 09:16:05 -0700202struct io_mapped_ubuf {
203 u64 ubuf;
Pavel Begunkov4751f532021-04-01 15:43:55 +0100204 u64 ubuf_end;
Jens Axboeedafcce2019-01-09 09:16:05 -0700205 unsigned int nr_bvecs;
Jens Axboede293932020-09-17 16:19:16 -0600206 unsigned long acct_pages;
Pavel Begunkov41edf1a2021-04-25 14:32:23 +0100207 struct bio_vec bvec[];
Jens Axboeedafcce2019-01-09 09:16:05 -0700208};
209
Bijan Mottahedeh50238532021-01-15 17:37:45 +0000210struct io_ring_ctx;
211
Pavel Begunkov6c2450a2021-02-23 12:40:22 +0000212struct io_overflow_cqe {
213 struct io_uring_cqe cqe;
214 struct list_head list;
215};
216
Pavel Begunkova04b0ac2021-04-01 15:44:04 +0100217struct io_fixed_file {
218 /* file * with additional FFS_* flags */
219 unsigned long file_ptr;
220};
221
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +0000222struct io_rsrc_put {
223 struct list_head list;
Pavel Begunkovb60c8dc2021-04-25 14:32:18 +0100224 u64 tag;
Bijan Mottahedeh50238532021-01-15 17:37:45 +0000225 union {
226 void *rsrc;
227 struct file *file;
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +0100228 struct io_mapped_ubuf *buf;
Bijan Mottahedeh50238532021-01-15 17:37:45 +0000229 };
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +0000230};
231
Pavel Begunkovaeca2412021-04-11 01:46:37 +0100232struct io_file_table {
Pavel Begunkov042b0d82021-08-09 13:04:01 +0100233 struct io_fixed_file *files;
Jens Axboe31b51512019-01-18 22:56:34 -0700234};
235
Pavel Begunkovb895c9a2021-04-01 15:43:40 +0100236struct io_rsrc_node {
Xiaoguang Wang05589552020-03-31 14:05:18 +0800237 struct percpu_ref refs;
238 struct list_head node;
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +0000239 struct list_head rsrc_list;
Pavel Begunkovb895c9a2021-04-01 15:43:40 +0100240 struct io_rsrc_data *rsrc_data;
Jens Axboe4a38aed22020-05-14 17:21:15 -0600241 struct llist_node llist;
Pavel Begunkove2978222020-11-18 14:56:26 +0000242 bool done;
Xiaoguang Wang05589552020-03-31 14:05:18 +0800243};
244
Pavel Begunkov40ae0ff2021-04-01 15:43:44 +0100245typedef void (rsrc_put_fn)(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc);
246
Pavel Begunkovb895c9a2021-04-01 15:43:40 +0100247struct io_rsrc_data {
Jens Axboe05f3fb32019-12-09 11:22:50 -0700248 struct io_ring_ctx *ctx;
249
Pavel Begunkov2d091d62021-06-14 02:36:21 +0100250 u64 **tags;
251 unsigned int nr;
Pavel Begunkov40ae0ff2021-04-01 15:43:44 +0100252 rsrc_put_fn *do_put;
Pavel Begunkov3e942492021-04-11 01:46:34 +0100253 atomic_t refs;
Jens Axboe05f3fb32019-12-09 11:22:50 -0700254 struct completion done;
Hao Xu8bad28d2021-02-19 17:19:36 +0800255 bool quiesce;
Jens Axboe05f3fb32019-12-09 11:22:50 -0700256};
257
Jens Axboe5a2e7452020-02-23 16:23:11 -0700258struct io_buffer {
259 struct list_head list;
260 __u64 addr;
Thadeu Lima de Souza Cascardod1f82802021-05-05 09:47:06 -0300261 __u32 len;
Jens Axboe5a2e7452020-02-23 16:23:11 -0700262 __u16 bid;
263};
264
Stefano Garzarella21b55db2020-08-27 16:58:30 +0200265struct io_restriction {
266 DECLARE_BITMAP(register_op, IORING_REGISTER_LAST);
267 DECLARE_BITMAP(sqe_op, IORING_OP_LAST);
268 u8 sqe_flags_allowed;
269 u8 sqe_flags_required;
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +0200270 bool registered;
Stefano Garzarella21b55db2020-08-27 16:58:30 +0200271};
272
Jens Axboe37d1e2e2021-02-17 21:03:43 -0700273enum {
274 IO_SQ_THREAD_SHOULD_STOP = 0,
275 IO_SQ_THREAD_SHOULD_PARK,
276};
277
Jens Axboe534ca6d2020-09-02 13:52:19 -0600278struct io_sq_data {
279 refcount_t refs;
Pavel Begunkov9e138a42021-03-14 20:57:12 +0000280 atomic_t park_pending;
Pavel Begunkov09a6f4e2021-03-14 20:57:10 +0000281 struct mutex lock;
Jens Axboe69fb2132020-09-14 11:16:23 -0600282
283 /* ctx's that are using this sqd */
284 struct list_head ctx_list;
Jens Axboe69fb2132020-09-14 11:16:23 -0600285
Jens Axboe534ca6d2020-09-02 13:52:19 -0600286 struct task_struct *thread;
287 struct wait_queue_head wait;
Xiaoguang Wang08369242020-11-03 14:15:59 +0800288
289 unsigned sq_thread_idle;
Jens Axboe37d1e2e2021-02-17 21:03:43 -0700290 int sq_cpu;
291 pid_t task_pid;
Jens Axboe5c2469e2021-03-11 10:17:56 -0700292 pid_t task_tgid;
Jens Axboe37d1e2e2021-02-17 21:03:43 -0700293
294 unsigned long state;
Jens Axboe37d1e2e2021-02-17 21:03:43 -0700295 struct completion exited;
Jens Axboe534ca6d2020-09-02 13:52:19 -0600296};
297
Pavel Begunkov258b29a2021-02-10 00:03:10 +0000298#define IO_IOPOLL_BATCH 8
Pavel Begunkov6dd0be12021-02-10 00:03:13 +0000299#define IO_COMPL_BATCH 32
Pavel Begunkov6ff119a2021-02-10 00:03:18 +0000300#define IO_REQ_CACHE_SIZE 32
Pavel Begunkovbf019da2021-02-10 00:03:17 +0000301#define IO_REQ_ALLOC_BATCH 8
Pavel Begunkov258b29a2021-02-10 00:03:10 +0000302
303struct io_comp_state {
Pavel Begunkov6dd0be12021-02-10 00:03:13 +0000304 struct io_kiocb *reqs[IO_COMPL_BATCH];
Jens Axboe1b4c3512021-02-10 00:03:19 +0000305 unsigned int nr;
Jens Axboec7dae4b2021-02-09 19:53:37 -0700306 /* inline/task_work completion list, under ->uring_lock */
Jens Axboe1b4c3512021-02-10 00:03:19 +0000307 struct list_head free_list;
Pavel Begunkov258b29a2021-02-10 00:03:10 +0000308};
309
Pavel Begunkova1ab7b32021-02-18 18:29:42 +0000310struct io_submit_link {
311 struct io_kiocb *head;
312 struct io_kiocb *last;
313};
314
Pavel Begunkov258b29a2021-02-10 00:03:10 +0000315struct io_submit_state {
316 struct blk_plug plug;
Pavel Begunkova1ab7b32021-02-18 18:29:42 +0000317 struct io_submit_link link;
Pavel Begunkov258b29a2021-02-10 00:03:10 +0000318
319 /*
320 * io_kiocb alloc cache
321 */
Pavel Begunkovbf019da2021-02-10 00:03:17 +0000322 void *reqs[IO_REQ_CACHE_SIZE];
Pavel Begunkov258b29a2021-02-10 00:03:10 +0000323 unsigned int free_reqs;
324
325 bool plug_started;
326
327 /*
328 * Batch completion logic
329 */
330 struct io_comp_state comp;
331
332 /*
333 * File reference cache
334 */
335 struct file *file;
336 unsigned int fd;
337 unsigned int file_refs;
338 unsigned int ios_left;
339};
340
Jens Axboe2b188cc2019-01-07 10:46:33 -0700341struct io_ring_ctx {
Pavel Begunkovb52ecf82021-06-14 23:37:21 +0100342 /* const or read-mostly hot data */
Jens Axboe2b188cc2019-01-07 10:46:33 -0700343 struct {
344 struct percpu_ref refs;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700345
Pavel Begunkovb52ecf82021-06-14 23:37:21 +0100346 struct io_rings *rings;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700347 unsigned int flags;
Randy Dunlape1d85332020-02-05 20:57:10 -0800348 unsigned int compat: 1;
Randy Dunlape1d85332020-02-05 20:57:10 -0800349 unsigned int drain_next: 1;
350 unsigned int eventfd_async: 1;
Stefano Garzarella21b55db2020-08-27 16:58:30 +0200351 unsigned int restricted: 1;
Pavel Begunkovf18ee4c2021-06-14 23:37:25 +0100352 unsigned int off_timeout_used: 1;
Pavel Begunkov10c66902021-06-15 16:47:56 +0100353 unsigned int drain_active: 1;
Pavel Begunkovb52ecf82021-06-14 23:37:21 +0100354 } ____cacheline_aligned_in_smp;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700355
Pavel Begunkov7f1129d2021-06-14 23:37:22 +0100356 /* submission data */
Pavel Begunkovb52ecf82021-06-14 23:37:21 +0100357 struct {
Pavel Begunkov0499e582021-06-14 23:37:29 +0100358 struct mutex uring_lock;
359
Hristo Venev75b28af2019-08-26 17:23:46 +0000360 /*
361 * Ring buffer of indices into array of io_uring_sqe, which is
362 * mmapped by the application using the IORING_OFF_SQES offset.
363 *
364 * This indirection could e.g. be used to assign fixed
365 * io_uring_sqe entries to operations and only submit them to
366 * the queue when needed.
367 *
368 * The kernel modifies neither the indices array nor the entries
369 * array.
370 */
371 u32 *sq_array;
Pavel Begunkovc7af47c2021-06-14 23:37:20 +0100372 struct io_uring_sqe *sq_sqes;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700373 unsigned cached_sq_head;
374 unsigned sq_entries;
Jens Axboede0617e2019-04-06 21:51:27 -0600375 struct list_head defer_list;
Pavel Begunkov7f1129d2021-06-14 23:37:22 +0100376
377 /*
378 * Fixed resources fast path, should be accessed only under
379 * uring_lock, and updated through io_uring_register(2)
380 */
381 struct io_rsrc_node *rsrc_node;
382 struct io_file_table file_table;
383 unsigned nr_user_files;
384 unsigned nr_user_bufs;
385 struct io_mapped_ubuf **user_bufs;
386
387 struct io_submit_state submit_state;
Jens Axboe5262f562019-09-17 12:26:57 -0600388 struct list_head timeout_list;
Jens Axboe1d7bb1d2019-11-06 11:31:17 -0700389 struct list_head cq_overflow_list;
Pavel Begunkov7f1129d2021-06-14 23:37:22 +0100390 struct xarray io_buffers;
391 struct xarray personalities;
392 u32 pers_next;
393 unsigned sq_thread_idle;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700394 } ____cacheline_aligned_in_smp;
395
Pavel Begunkovd0acdee2021-05-16 22:58:12 +0100396 /* IRQ completion list, under ->completion_lock */
397 struct list_head locked_free_list;
398 unsigned int locked_free_nr;
Jens Axboe3c1a2ea2021-02-11 10:48:03 -0700399
Stefan Metzmacher7c30f36a2021-03-07 11:54:28 +0100400 const struct cred *sq_creds; /* cred used for __io_sq_thread() */
Jens Axboe534ca6d2020-09-02 13:52:19 -0600401 struct io_sq_data *sq_data; /* if using sq thread polling */
402
Jens Axboe90554202020-09-03 12:12:41 -0600403 struct wait_queue_head sqo_sq_wait;
Jens Axboe69fb2132020-09-14 11:16:23 -0600404 struct list_head sqd_list;
Hristo Venev75b28af2019-08-26 17:23:46 +0000405
Pavel Begunkov5ed7a372021-06-14 23:37:27 +0100406 unsigned long check_cq_overflow;
407
Jens Axboe206aefd2019-11-07 18:27:42 -0700408 struct {
409 unsigned cached_cq_tail;
410 unsigned cq_entries;
Jens Axboe206aefd2019-11-07 18:27:42 -0700411 struct eventfd_ctx *cq_ev_fd;
Pavel Begunkov0499e582021-06-14 23:37:29 +0100412 struct wait_queue_head poll_wait;
413 struct wait_queue_head cq_wait;
414 unsigned cq_extra;
415 atomic_t cq_timeouts;
416 struct fasync_struct *cq_fasync;
417 unsigned cq_last_tm_flush;
Jens Axboe206aefd2019-11-07 18:27:42 -0700418 } ____cacheline_aligned_in_smp;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700419
420 struct {
Jens Axboe2b188cc2019-01-07 10:46:33 -0700421 spinlock_t completion_lock;
Jens Axboee94f1412019-12-19 12:06:02 -0700422
Jens Axboedef596e2019-01-09 08:59:42 -0700423 /*
Pavel Begunkov540e32a2020-07-13 23:37:09 +0300424 * ->iopoll_list is protected by the ctx->uring_lock for
Jens Axboedef596e2019-01-09 08:59:42 -0700425 * io_uring instances that don't use IORING_SETUP_SQPOLL.
426 * For SQPOLL, only the single threaded io_sq_thread() will
427 * manipulate the list, hence no extra locking is needed there.
428 */
Pavel Begunkov540e32a2020-07-13 23:37:09 +0300429 struct list_head iopoll_list;
Jens Axboe78076bb2019-12-04 19:56:40 -0700430 struct hlist_head *cancel_hash;
431 unsigned cancel_hash_bits;
Hao Xu915b3dd2021-06-28 05:37:30 +0800432 bool poll_multi_queue;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700433 } ____cacheline_aligned_in_smp;
Jens Axboe85faa7b2020-04-09 18:14:00 -0600434
Stefano Garzarella21b55db2020-08-27 16:58:30 +0200435 struct io_restriction restrictions;
Jens Axboe3c1a2ea2021-02-11 10:48:03 -0700436
Pavel Begunkovb13a8912021-05-16 22:58:07 +0100437 /* slow path rsrc auxilary data, used by update/register */
438 struct {
439 struct io_rsrc_node *rsrc_backup_node;
440 struct io_mapped_ubuf *dummy_ubuf;
441 struct io_rsrc_data *file_data;
442 struct io_rsrc_data *buf_data;
443
444 struct delayed_work rsrc_put_work;
445 struct llist_head rsrc_put_llist;
446 struct list_head rsrc_ref_list;
447 spinlock_t rsrc_ref_lock;
448 };
449
Jens Axboe3c1a2ea2021-02-11 10:48:03 -0700450 /* Keep this last, we don't need it for the fast path */
Pavel Begunkovb986af72021-05-16 22:58:06 +0100451 struct {
452 #if defined(CONFIG_UNIX)
453 struct socket *ring_sock;
454 #endif
455 /* hashed buffered write serialization */
456 struct io_wq_hash *hash_map;
457
458 /* Only used for accounting purposes */
459 struct user_struct *user;
460 struct mm_struct *mm_account;
461
462 /* ctx exit and cancelation */
Pavel Begunkov9011bf92021-06-30 21:54:03 +0100463 struct llist_head fallback_llist;
464 struct delayed_work fallback_work;
Pavel Begunkovb986af72021-05-16 22:58:06 +0100465 struct work_struct exit_work;
466 struct list_head tctx_list;
467 struct completion ref_comp;
468 };
Jens Axboe2b188cc2019-01-07 10:46:33 -0700469};
470
Stefan Metzmacher53e043b2021-03-15 12:56:56 +0100471struct io_uring_task {
472 /* submission side */
Pavel Begunkov09899b12021-06-14 02:36:22 +0100473 int cached_refs;
Stefan Metzmacher53e043b2021-03-15 12:56:56 +0100474 struct xarray xa;
475 struct wait_queue_head wait;
Stefan Metzmacheree53fb22021-03-15 12:56:57 +0100476 const struct io_ring_ctx *last;
477 struct io_wq *io_wq;
Stefan Metzmacher53e043b2021-03-15 12:56:56 +0100478 struct percpu_counter inflight;
Pavel Begunkovb303fe22021-04-11 01:46:26 +0100479 atomic_t inflight_tracked;
Stefan Metzmacher53e043b2021-03-15 12:56:56 +0100480 atomic_t in_idle;
Stefan Metzmacher53e043b2021-03-15 12:56:56 +0100481
482 spinlock_t task_lock;
483 struct io_wq_work_list task_list;
484 unsigned long task_state;
485 struct callback_head task_work;
486};
487
Jens Axboe09bb8392019-03-13 12:39:28 -0600488/*
489 * First field must be the file pointer in all the
490 * iocb unions! See also 'struct kiocb' in <linux/fs.h>
491 */
Jens Axboe221c5eb2019-01-17 09:41:58 -0700492struct io_poll_iocb {
493 struct file *file;
Pavel Begunkov018043b2020-10-27 23:17:18 +0000494 struct wait_queue_head *head;
Jens Axboe221c5eb2019-01-17 09:41:58 -0700495 __poll_t events;
Jens Axboe8c838782019-03-12 15:48:16 -0600496 bool done;
Jens Axboe221c5eb2019-01-17 09:41:58 -0700497 bool canceled;
Jens Axboe392edb42019-12-09 17:52:20 -0700498 struct wait_queue_entry wait;
Jens Axboe221c5eb2019-01-17 09:41:58 -0700499};
500
Pavel Begunkov9d805892021-04-13 02:58:40 +0100501struct io_poll_update {
Pavel Begunkov018043b2020-10-27 23:17:18 +0000502 struct file *file;
Pavel Begunkov9d805892021-04-13 02:58:40 +0100503 u64 old_user_data;
504 u64 new_user_data;
505 __poll_t events;
Jens Axboeb69de282021-03-17 08:37:41 -0600506 bool update_events;
507 bool update_user_data;
Pavel Begunkov018043b2020-10-27 23:17:18 +0000508};
509
Jens Axboeb5dba592019-12-11 14:02:38 -0700510struct io_close {
511 struct file *file;
Jens Axboeb5dba592019-12-11 14:02:38 -0700512 int fd;
513};
514
Jens Axboead8a48a2019-11-15 08:49:11 -0700515struct io_timeout_data {
516 struct io_kiocb *req;
517 struct hrtimer timer;
518 struct timespec64 ts;
519 enum hrtimer_mode mode;
520};
521
Jens Axboe8ed8d3c2019-12-16 11:55:28 -0700522struct io_accept {
523 struct file *file;
524 struct sockaddr __user *addr;
525 int __user *addr_len;
526 int flags;
Jens Axboe09952e32020-03-19 20:16:56 -0600527 unsigned long nofile;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -0700528};
529
530struct io_sync {
531 struct file *file;
532 loff_t len;
533 loff_t off;
534 int flags;
Jens Axboed63d1b52019-12-10 10:38:56 -0700535 int mode;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -0700536};
537
Jens Axboefbf23842019-12-17 18:45:56 -0700538struct io_cancel {
539 struct file *file;
540 u64 addr;
541};
542
Jens Axboeb29472e2019-12-17 18:50:29 -0700543struct io_timeout {
544 struct file *file;
Pavel Begunkovbfe68a22020-05-30 14:54:18 +0300545 u32 off;
546 u32 target_seq;
Pavel Begunkov135fcde2020-07-13 23:37:12 +0300547 struct list_head list;
Pavel Begunkov90cd7e42020-10-27 23:25:36 +0000548 /* head of the link, used by linked timeouts only */
549 struct io_kiocb *head;
Jens Axboeb29472e2019-12-17 18:50:29 -0700550};
551
Pavel Begunkov0bdf7a22020-10-10 18:34:10 +0100552struct io_timeout_rem {
553 struct file *file;
554 u64 addr;
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +0000555
556 /* timeout update */
557 struct timespec64 ts;
558 u32 flags;
Pavel Begunkov0bdf7a22020-10-10 18:34:10 +0100559};
560
Jens Axboe9adbd452019-12-20 08:45:55 -0700561struct io_rw {
562 /* NOTE: kiocb has the file as the first member, so don't do it here */
563 struct kiocb kiocb;
564 u64 addr;
565 u64 len;
566};
567
Jens Axboe3fbb51c2019-12-20 08:51:52 -0700568struct io_connect {
569 struct file *file;
570 struct sockaddr __user *addr;
571 int addr_len;
572};
573
Jens Axboee47293f2019-12-20 08:58:21 -0700574struct io_sr_msg {
575 struct file *file;
Jens Axboefddafac2020-01-04 20:19:44 -0700576 union {
Pavel Begunkov4af34172021-04-11 01:46:30 +0100577 struct compat_msghdr __user *umsg_compat;
578 struct user_msghdr __user *umsg;
579 void __user *buf;
Jens Axboefddafac2020-01-04 20:19:44 -0700580 };
Jens Axboee47293f2019-12-20 08:58:21 -0700581 int msg_flags;
Jens Axboebcda7ba2020-02-23 16:42:51 -0700582 int bgid;
Jens Axboefddafac2020-01-04 20:19:44 -0700583 size_t len;
Jens Axboebcda7ba2020-02-23 16:42:51 -0700584 struct io_buffer *kbuf;
Jens Axboee47293f2019-12-20 08:58:21 -0700585};
586
Jens Axboe15b71ab2019-12-11 11:20:36 -0700587struct io_open {
588 struct file *file;
589 int dfd;
Jens Axboe15b71ab2019-12-11 11:20:36 -0700590 struct filename *filename;
Jens Axboec12cedf2020-01-08 17:41:21 -0700591 struct open_how how;
Jens Axboe4022e7a2020-03-19 19:23:18 -0600592 unsigned long nofile;
Jens Axboe15b71ab2019-12-11 11:20:36 -0700593};
594
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +0000595struct io_rsrc_update {
Jens Axboe05f3fb32019-12-09 11:22:50 -0700596 struct file *file;
597 u64 arg;
598 u32 nr_args;
599 u32 offset;
600};
601
Jens Axboe4840e412019-12-25 22:03:45 -0700602struct io_fadvise {
603 struct file *file;
604 u64 offset;
605 u32 len;
606 u32 advice;
607};
608
Jens Axboec1ca7572019-12-25 22:18:28 -0700609struct io_madvise {
610 struct file *file;
611 u64 addr;
612 u32 len;
613 u32 advice;
614};
615
Jens Axboe3e4827b2020-01-08 15:18:09 -0700616struct io_epoll {
617 struct file *file;
618 int epfd;
619 int op;
620 int fd;
621 struct epoll_event event;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700622};
623
Pavel Begunkov7d67af22020-02-24 11:32:45 +0300624struct io_splice {
625 struct file *file_out;
626 struct file *file_in;
627 loff_t off_out;
628 loff_t off_in;
629 u64 len;
630 unsigned int flags;
631};
632
Jens Axboeddf0322d2020-02-23 16:41:33 -0700633struct io_provide_buf {
634 struct file *file;
635 __u64 addr;
Pavel Begunkov38134ad2021-04-15 13:07:39 +0100636 __u32 len;
Jens Axboeddf0322d2020-02-23 16:41:33 -0700637 __u32 bgid;
638 __u16 nbufs;
639 __u16 bid;
640};
641
Bijan Mottahedeh1d9e1282020-05-22 21:31:16 -0700642struct io_statx {
643 struct file *file;
644 int dfd;
645 unsigned int mask;
646 unsigned int flags;
Bijan Mottahedehe62753e2020-05-22 21:31:18 -0700647 const char __user *filename;
Bijan Mottahedeh1d9e1282020-05-22 21:31:16 -0700648 struct statx __user *buffer;
649};
650
Jens Axboe36f4fa62020-09-05 11:14:22 -0600651struct io_shutdown {
652 struct file *file;
653 int how;
654};
655
Jens Axboe80a261f2020-09-28 14:23:58 -0600656struct io_rename {
657 struct file *file;
658 int old_dfd;
659 int new_dfd;
660 struct filename *oldpath;
661 struct filename *newpath;
662 int flags;
663};
664
Jens Axboe14a11432020-09-28 14:27:37 -0600665struct io_unlink {
666 struct file *file;
667 int dfd;
668 int flags;
669 struct filename *filename;
670};
671
Pavel Begunkov3ca405e2020-07-13 23:37:08 +0300672struct io_completion {
673 struct file *file;
674 struct list_head list;
Pavel Begunkov8c3f9cd2021-02-28 22:35:15 +0000675 u32 cflags;
Pavel Begunkov3ca405e2020-07-13 23:37:08 +0300676};
677
Jens Axboef499a022019-12-02 16:28:46 -0700678struct io_async_connect {
679 struct sockaddr_storage address;
680};
681
Jens Axboe03b12302019-12-02 18:50:25 -0700682struct io_async_msghdr {
683 struct iovec fast_iov[UIO_FASTIOV];
Pavel Begunkov257e84a2021-02-05 00:58:00 +0000684 /* points to an allocated iov, if NULL we use fast_iov instead */
685 struct iovec *free_iov;
Jens Axboe03b12302019-12-02 18:50:25 -0700686 struct sockaddr __user *uaddr;
687 struct msghdr msg;
Jens Axboeb5379162020-02-09 11:29:15 -0700688 struct sockaddr_storage addr;
Jens Axboe03b12302019-12-02 18:50:25 -0700689};
690
Jens Axboef67676d2019-12-02 11:03:47 -0700691struct io_async_rw {
692 struct iovec fast_iov[UIO_FASTIOV];
Jens Axboeff6165b2020-08-13 09:47:43 -0600693 const struct iovec *free_iovec;
694 struct iov_iter iter;
Jens Axboe227c0c92020-08-13 11:51:40 -0600695 size_t bytes_done;
Jens Axboebcf5a062020-05-22 09:24:42 -0600696 struct wait_page_queue wpq;
Jens Axboef67676d2019-12-02 11:03:47 -0700697};
698
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300699enum {
700 REQ_F_FIXED_FILE_BIT = IOSQE_FIXED_FILE_BIT,
701 REQ_F_IO_DRAIN_BIT = IOSQE_IO_DRAIN_BIT,
702 REQ_F_LINK_BIT = IOSQE_IO_LINK_BIT,
703 REQ_F_HARDLINK_BIT = IOSQE_IO_HARDLINK_BIT,
704 REQ_F_FORCE_ASYNC_BIT = IOSQE_ASYNC_BIT,
Jens Axboebcda7ba2020-02-23 16:42:51 -0700705 REQ_F_BUFFER_SELECT_BIT = IOSQE_BUFFER_SELECT_BIT,
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300706
Pavel Begunkovdddca222021-04-27 16:13:52 +0100707 /* first byte is taken by user flags, shift it to not overlap */
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +0100708 REQ_F_FAIL_BIT = 8,
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300709 REQ_F_INFLIGHT_BIT,
710 REQ_F_CUR_POS_BIT,
711 REQ_F_NOWAIT_BIT,
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300712 REQ_F_LINK_TIMEOUT_BIT,
Pavel Begunkov99bc4c32020-02-07 22:04:45 +0300713 REQ_F_NEED_CLEANUP_BIT,
Jens Axboed7718a92020-02-14 22:23:12 -0700714 REQ_F_POLLED_BIT,
Jens Axboebcda7ba2020-02-23 16:42:51 -0700715 REQ_F_BUFFER_SELECTED_BIT,
Pavel Begunkov900fad42020-10-19 16:39:16 +0100716 REQ_F_LTIMEOUT_ACTIVE_BIT,
Pavel Begunkove342c802021-01-19 13:32:47 +0000717 REQ_F_COMPLETE_INLINE_BIT,
Jens Axboe230d50d2021-04-01 20:41:15 -0600718 REQ_F_REISSUE_BIT,
Pavel Begunkov8c130822021-03-22 01:58:32 +0000719 REQ_F_DONT_REISSUE_BIT,
Pavel Begunkovb8e64b52021-06-17 18:14:02 +0100720 REQ_F_CREDS_BIT,
Jens Axboe7b29f922021-03-12 08:30:14 -0700721 /* keep async read/write and isreg together and in order */
Pavel Begunkovb191e2d2021-08-09 13:04:03 +0100722 REQ_F_NOWAIT_READ_BIT,
723 REQ_F_NOWAIT_WRITE_BIT,
Jens Axboe7b29f922021-03-12 08:30:14 -0700724 REQ_F_ISREG_BIT,
Jens Axboe84557872020-03-03 15:28:17 -0700725
726 /* not a real bit, just to check we're not overflowing the space */
727 __REQ_F_LAST_BIT,
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300728};
729
730enum {
731 /* ctx owns file */
732 REQ_F_FIXED_FILE = BIT(REQ_F_FIXED_FILE_BIT),
733 /* drain existing IO first */
734 REQ_F_IO_DRAIN = BIT(REQ_F_IO_DRAIN_BIT),
735 /* linked sqes */
736 REQ_F_LINK = BIT(REQ_F_LINK_BIT),
737 /* doesn't sever on completion < 0 */
738 REQ_F_HARDLINK = BIT(REQ_F_HARDLINK_BIT),
739 /* IOSQE_ASYNC */
740 REQ_F_FORCE_ASYNC = BIT(REQ_F_FORCE_ASYNC_BIT),
Jens Axboebcda7ba2020-02-23 16:42:51 -0700741 /* IOSQE_BUFFER_SELECT */
742 REQ_F_BUFFER_SELECT = BIT(REQ_F_BUFFER_SELECT_BIT),
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300743
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300744 /* fail rest of links */
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +0100745 REQ_F_FAIL = BIT(REQ_F_FAIL_BIT),
Pavel Begunkovb05a1bc2021-03-04 13:59:24 +0000746 /* on inflight list, should be cancelled and waited on exit reliably */
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300747 REQ_F_INFLIGHT = BIT(REQ_F_INFLIGHT_BIT),
748 /* read/write uses file position */
749 REQ_F_CUR_POS = BIT(REQ_F_CUR_POS_BIT),
750 /* must not punt to workers */
751 REQ_F_NOWAIT = BIT(REQ_F_NOWAIT_BIT),
Pavel Begunkov900fad42020-10-19 16:39:16 +0100752 /* has or had linked timeout */
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300753 REQ_F_LINK_TIMEOUT = BIT(REQ_F_LINK_TIMEOUT_BIT),
Pavel Begunkov99bc4c32020-02-07 22:04:45 +0300754 /* needs cleanup */
755 REQ_F_NEED_CLEANUP = BIT(REQ_F_NEED_CLEANUP_BIT),
Jens Axboed7718a92020-02-14 22:23:12 -0700756 /* already went through poll handler */
757 REQ_F_POLLED = BIT(REQ_F_POLLED_BIT),
Jens Axboebcda7ba2020-02-23 16:42:51 -0700758 /* buffer already selected */
759 REQ_F_BUFFER_SELECTED = BIT(REQ_F_BUFFER_SELECTED_BIT),
Pavel Begunkov900fad42020-10-19 16:39:16 +0100760 /* linked timeout is active, i.e. prepared by link's head */
761 REQ_F_LTIMEOUT_ACTIVE = BIT(REQ_F_LTIMEOUT_ACTIVE_BIT),
Pavel Begunkove342c802021-01-19 13:32:47 +0000762 /* completion is deferred through io_comp_state */
763 REQ_F_COMPLETE_INLINE = BIT(REQ_F_COMPLETE_INLINE_BIT),
Jens Axboe230d50d2021-04-01 20:41:15 -0600764 /* caller should reissue async */
765 REQ_F_REISSUE = BIT(REQ_F_REISSUE_BIT),
Pavel Begunkov8c130822021-03-22 01:58:32 +0000766 /* don't attempt request reissue, see io_rw_reissue() */
767 REQ_F_DONT_REISSUE = BIT(REQ_F_DONT_REISSUE_BIT),
Jens Axboe7b29f922021-03-12 08:30:14 -0700768 /* supports async reads */
Pavel Begunkovb191e2d2021-08-09 13:04:03 +0100769 REQ_F_NOWAIT_READ = BIT(REQ_F_NOWAIT_READ_BIT),
Jens Axboe7b29f922021-03-12 08:30:14 -0700770 /* supports async writes */
Pavel Begunkovb191e2d2021-08-09 13:04:03 +0100771 REQ_F_NOWAIT_WRITE = BIT(REQ_F_NOWAIT_WRITE_BIT),
Jens Axboe7b29f922021-03-12 08:30:14 -0700772 /* regular file */
773 REQ_F_ISREG = BIT(REQ_F_ISREG_BIT),
Pavel Begunkovb8e64b52021-06-17 18:14:02 +0100774 /* has creds assigned */
775 REQ_F_CREDS = BIT(REQ_F_CREDS_BIT),
Jens Axboed7718a92020-02-14 22:23:12 -0700776};
777
778struct async_poll {
779 struct io_poll_iocb poll;
Jens Axboe807abcb2020-07-17 17:09:27 -0600780 struct io_poll_iocb *double_poll;
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300781};
782
Pavel Begunkov5b0a6ac2021-06-30 21:54:04 +0100783typedef void (*io_req_tw_func_t)(struct io_kiocb *req);
784
Jens Axboe7cbf1722021-02-10 00:03:20 +0000785struct io_task_work {
Pavel Begunkov5b0a6ac2021-06-30 21:54:04 +0100786 union {
787 struct io_wq_work_node node;
788 struct llist_node fallback_node;
789 };
790 io_req_tw_func_t func;
Jens Axboe7cbf1722021-02-10 00:03:20 +0000791};
792
Pavel Begunkov992da012021-06-10 16:37:37 +0100793enum {
794 IORING_RSRC_FILE = 0,
795 IORING_RSRC_BUFFER = 1,
796};
797
Jens Axboe09bb8392019-03-13 12:39:28 -0600798/*
799 * NOTE! Each of the iocb union members has the file pointer
800 * as the first entry in their struct definition. So you can
801 * access the file pointer through any of the sub-structs,
802 * or directly as just 'ki_filp' in this struct.
803 */
Jens Axboe2b188cc2019-01-07 10:46:33 -0700804struct io_kiocb {
Jens Axboe221c5eb2019-01-17 09:41:58 -0700805 union {
Jens Axboe09bb8392019-03-13 12:39:28 -0600806 struct file *file;
Jens Axboe9adbd452019-12-20 08:45:55 -0700807 struct io_rw rw;
Jens Axboe221c5eb2019-01-17 09:41:58 -0700808 struct io_poll_iocb poll;
Pavel Begunkov9d805892021-04-13 02:58:40 +0100809 struct io_poll_update poll_update;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -0700810 struct io_accept accept;
811 struct io_sync sync;
Jens Axboefbf23842019-12-17 18:45:56 -0700812 struct io_cancel cancel;
Jens Axboeb29472e2019-12-17 18:50:29 -0700813 struct io_timeout timeout;
Pavel Begunkov0bdf7a22020-10-10 18:34:10 +0100814 struct io_timeout_rem timeout_rem;
Jens Axboe3fbb51c2019-12-20 08:51:52 -0700815 struct io_connect connect;
Jens Axboee47293f2019-12-20 08:58:21 -0700816 struct io_sr_msg sr_msg;
Jens Axboe15b71ab2019-12-11 11:20:36 -0700817 struct io_open open;
Jens Axboeb5dba592019-12-11 14:02:38 -0700818 struct io_close close;
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +0000819 struct io_rsrc_update rsrc_update;
Jens Axboe4840e412019-12-25 22:03:45 -0700820 struct io_fadvise fadvise;
Jens Axboec1ca7572019-12-25 22:18:28 -0700821 struct io_madvise madvise;
Jens Axboe3e4827b2020-01-08 15:18:09 -0700822 struct io_epoll epoll;
Pavel Begunkov7d67af22020-02-24 11:32:45 +0300823 struct io_splice splice;
Jens Axboeddf0322d2020-02-23 16:41:33 -0700824 struct io_provide_buf pbuf;
Bijan Mottahedeh1d9e1282020-05-22 21:31:16 -0700825 struct io_statx statx;
Jens Axboe36f4fa62020-09-05 11:14:22 -0600826 struct io_shutdown shutdown;
Jens Axboe80a261f2020-09-28 14:23:58 -0600827 struct io_rename rename;
Jens Axboe14a11432020-09-28 14:27:37 -0600828 struct io_unlink unlink;
Pavel Begunkov3ca405e2020-07-13 23:37:08 +0300829 /* use only after cleaning per-op data, see io_clean_op() */
830 struct io_completion compl;
Jens Axboe221c5eb2019-01-17 09:41:58 -0700831 };
Jens Axboe2b188cc2019-01-07 10:46:33 -0700832
Jens Axboee8c2bc12020-08-15 18:44:09 -0700833 /* opcode allocated if it needs to store data for async defer */
834 void *async_data;
Jens Axboed625c6e2019-12-17 19:53:05 -0700835 u8 opcode;
Xiaoguang Wang65a65432020-06-11 23:39:36 +0800836 /* polled IO has completed */
837 u8 iopoll_completed;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700838
Bijan Mottahedeh4f4eeba2020-05-19 14:52:49 -0700839 u16 buf_index;
Pavel Begunkov9cf7c102020-07-13 23:37:15 +0300840 u32 result;
Bijan Mottahedeh4f4eeba2020-05-19 14:52:49 -0700841
Pavel Begunkov010e8e62020-07-30 18:43:45 +0300842 struct io_ring_ctx *ctx;
843 unsigned int flags;
Jens Axboeabc54d62021-02-24 13:32:30 -0700844 atomic_t refs;
Pavel Begunkov010e8e62020-07-30 18:43:45 +0300845 struct task_struct *task;
846 u64 user_data;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700847
Pavel Begunkovf2f87372020-10-27 23:25:37 +0000848 struct io_kiocb *link;
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +0000849 struct percpu_ref *fixed_rsrc_refs;
Jens Axboed7718a92020-02-14 22:23:12 -0700850
Pavel Begunkovb303fe22021-04-11 01:46:26 +0100851 /* used with ctx->iopoll_list with reads/writes */
Pavel Begunkov010e8e62020-07-30 18:43:45 +0300852 struct list_head inflight_entry;
Pavel Begunkov5b0a6ac2021-06-30 21:54:04 +0100853 struct io_task_work io_task_work;
Pavel Begunkov010e8e62020-07-30 18:43:45 +0300854 /* for polled requests, i.e. IORING_OP_POLL_ADD and async armed poll */
855 struct hlist_node hash_node;
856 struct async_poll *apoll;
857 struct io_wq_work work;
Pavel Begunkovfe7e3252021-06-24 15:09:57 +0100858 const struct cred *creds;
Pavel Begunkovc10d1f92021-06-17 18:14:01 +0100859
Pavel Begunkoveae071c2021-04-25 14:32:24 +0100860 /* store used ubuf, so we can prevent reloading */
861 struct io_mapped_ubuf *imu;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700862};
863
Pavel Begunkov13bf43f2021-03-06 11:02:12 +0000864struct io_tctx_node {
865 struct list_head ctx_node;
866 struct task_struct *task;
Pavel Begunkov13bf43f2021-03-06 11:02:12 +0000867 struct io_ring_ctx *ctx;
868};
869
Pavel Begunkov27dc8332020-07-13 23:37:14 +0300870struct io_defer_entry {
871 struct list_head list;
872 struct io_kiocb *req;
Pavel Begunkov9cf7c102020-07-13 23:37:15 +0300873 u32 seq;
Pavel Begunkov27dc8332020-07-13 23:37:14 +0300874};
875
Jens Axboed3656342019-12-18 09:50:26 -0700876struct io_op_def {
Jens Axboed3656342019-12-18 09:50:26 -0700877 /* needs req->file assigned */
878 unsigned needs_file : 1;
Jens Axboed3656342019-12-18 09:50:26 -0700879 /* hash wq insertion if file is a regular file */
880 unsigned hash_reg_file : 1;
881 /* unbound wq insertion if file is a non-regular file */
882 unsigned unbound_nonreg_file : 1;
Jens Axboe66f4af92020-01-16 15:36:52 -0700883 /* opcode is not supported by this kernel */
884 unsigned not_supported : 1;
Jens Axboe8a727582020-02-20 09:59:44 -0700885 /* set if opcode supports polled "wait" */
886 unsigned pollin : 1;
887 unsigned pollout : 1;
Jens Axboebcda7ba2020-02-23 16:42:51 -0700888 /* op supports buffer selection */
889 unsigned buffer_select : 1;
Pavel Begunkov26f05052021-02-28 22:35:18 +0000890 /* do prep async if is going to be punted */
891 unsigned needs_async_setup : 1;
Jens Axboe27926b62020-10-28 09:33:23 -0600892 /* should block plug */
893 unsigned plug : 1;
Jens Axboee8c2bc12020-08-15 18:44:09 -0700894 /* size of async data needed, if any */
895 unsigned short async_size;
Jens Axboed3656342019-12-18 09:50:26 -0700896};
897
Jens Axboe09186822020-10-13 15:01:40 -0600898static const struct io_op_def io_op_defs[] = {
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300899 [IORING_OP_NOP] = {},
900 [IORING_OP_READV] = {
Jens Axboed3656342019-12-18 09:50:26 -0700901 .needs_file = 1,
902 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700903 .pollin = 1,
Jens Axboe4d954c22020-02-27 07:31:19 -0700904 .buffer_select = 1,
Pavel Begunkov26f05052021-02-28 22:35:18 +0000905 .needs_async_setup = 1,
Jens Axboe27926b62020-10-28 09:33:23 -0600906 .plug = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700907 .async_size = sizeof(struct io_async_rw),
Jens Axboed3656342019-12-18 09:50:26 -0700908 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300909 [IORING_OP_WRITEV] = {
Jens Axboed3656342019-12-18 09:50:26 -0700910 .needs_file = 1,
911 .hash_reg_file = 1,
912 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700913 .pollout = 1,
Pavel Begunkov26f05052021-02-28 22:35:18 +0000914 .needs_async_setup = 1,
Jens Axboe27926b62020-10-28 09:33:23 -0600915 .plug = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700916 .async_size = sizeof(struct io_async_rw),
Jens Axboed3656342019-12-18 09:50:26 -0700917 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300918 [IORING_OP_FSYNC] = {
Jens Axboed3656342019-12-18 09:50:26 -0700919 .needs_file = 1,
920 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300921 [IORING_OP_READ_FIXED] = {
Jens Axboed3656342019-12-18 09:50:26 -0700922 .needs_file = 1,
923 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700924 .pollin = 1,
Jens Axboe27926b62020-10-28 09:33:23 -0600925 .plug = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700926 .async_size = sizeof(struct io_async_rw),
Jens Axboed3656342019-12-18 09:50:26 -0700927 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300928 [IORING_OP_WRITE_FIXED] = {
Jens Axboed3656342019-12-18 09:50:26 -0700929 .needs_file = 1,
930 .hash_reg_file = 1,
931 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700932 .pollout = 1,
Jens Axboe27926b62020-10-28 09:33:23 -0600933 .plug = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700934 .async_size = sizeof(struct io_async_rw),
Jens Axboed3656342019-12-18 09:50:26 -0700935 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300936 [IORING_OP_POLL_ADD] = {
Jens Axboed3656342019-12-18 09:50:26 -0700937 .needs_file = 1,
938 .unbound_nonreg_file = 1,
939 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300940 [IORING_OP_POLL_REMOVE] = {},
941 [IORING_OP_SYNC_FILE_RANGE] = {
Jens Axboed3656342019-12-18 09:50:26 -0700942 .needs_file = 1,
943 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300944 [IORING_OP_SENDMSG] = {
Jens Axboed3656342019-12-18 09:50:26 -0700945 .needs_file = 1,
946 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700947 .pollout = 1,
Pavel Begunkov26f05052021-02-28 22:35:18 +0000948 .needs_async_setup = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700949 .async_size = sizeof(struct io_async_msghdr),
Jens Axboed3656342019-12-18 09:50:26 -0700950 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300951 [IORING_OP_RECVMSG] = {
Jens Axboed3656342019-12-18 09:50:26 -0700952 .needs_file = 1,
953 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700954 .pollin = 1,
Jens Axboe52de1fe2020-02-27 10:15:42 -0700955 .buffer_select = 1,
Pavel Begunkov26f05052021-02-28 22:35:18 +0000956 .needs_async_setup = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700957 .async_size = sizeof(struct io_async_msghdr),
Jens Axboed3656342019-12-18 09:50:26 -0700958 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300959 [IORING_OP_TIMEOUT] = {
Jens Axboee8c2bc12020-08-15 18:44:09 -0700960 .async_size = sizeof(struct io_timeout_data),
Jens Axboed3656342019-12-18 09:50:26 -0700961 },
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +0000962 [IORING_OP_TIMEOUT_REMOVE] = {
963 /* used by timeout updates' prep() */
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +0000964 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300965 [IORING_OP_ACCEPT] = {
Jens Axboed3656342019-12-18 09:50:26 -0700966 .needs_file = 1,
967 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700968 .pollin = 1,
Jens Axboed3656342019-12-18 09:50:26 -0700969 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300970 [IORING_OP_ASYNC_CANCEL] = {},
971 [IORING_OP_LINK_TIMEOUT] = {
Jens Axboee8c2bc12020-08-15 18:44:09 -0700972 .async_size = sizeof(struct io_timeout_data),
Jens Axboed3656342019-12-18 09:50:26 -0700973 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300974 [IORING_OP_CONNECT] = {
Jens Axboed3656342019-12-18 09:50:26 -0700975 .needs_file = 1,
976 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700977 .pollout = 1,
Pavel Begunkov26f05052021-02-28 22:35:18 +0000978 .needs_async_setup = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700979 .async_size = sizeof(struct io_async_connect),
Jens Axboed3656342019-12-18 09:50:26 -0700980 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300981 [IORING_OP_FALLOCATE] = {
Jens Axboed3656342019-12-18 09:50:26 -0700982 .needs_file = 1,
983 },
Jens Axboe44526be2021-02-15 13:32:18 -0700984 [IORING_OP_OPENAT] = {},
985 [IORING_OP_CLOSE] = {},
986 [IORING_OP_FILES_UPDATE] = {},
987 [IORING_OP_STATX] = {},
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300988 [IORING_OP_READ] = {
Jens Axboe3a6820f2019-12-22 15:19:35 -0700989 .needs_file = 1,
990 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700991 .pollin = 1,
Jens Axboebcda7ba2020-02-23 16:42:51 -0700992 .buffer_select = 1,
Jens Axboe27926b62020-10-28 09:33:23 -0600993 .plug = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700994 .async_size = sizeof(struct io_async_rw),
Jens Axboe3a6820f2019-12-22 15:19:35 -0700995 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300996 [IORING_OP_WRITE] = {
Jens Axboe3a6820f2019-12-22 15:19:35 -0700997 .needs_file = 1,
998 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700999 .pollout = 1,
Jens Axboe27926b62020-10-28 09:33:23 -06001000 .plug = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -07001001 .async_size = sizeof(struct io_async_rw),
Jens Axboe3a6820f2019-12-22 15:19:35 -07001002 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +03001003 [IORING_OP_FADVISE] = {
Jens Axboe4840e412019-12-25 22:03:45 -07001004 .needs_file = 1,
1005 },
Jens Axboe44526be2021-02-15 13:32:18 -07001006 [IORING_OP_MADVISE] = {},
Pavel Begunkov0463b6c2020-01-18 21:35:38 +03001007 [IORING_OP_SEND] = {
Jens Axboefddafac2020-01-04 20:19:44 -07001008 .needs_file = 1,
1009 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -07001010 .pollout = 1,
Jens Axboefddafac2020-01-04 20:19:44 -07001011 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +03001012 [IORING_OP_RECV] = {
Jens Axboefddafac2020-01-04 20:19:44 -07001013 .needs_file = 1,
1014 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -07001015 .pollin = 1,
Jens Axboebcda7ba2020-02-23 16:42:51 -07001016 .buffer_select = 1,
Jens Axboefddafac2020-01-04 20:19:44 -07001017 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +03001018 [IORING_OP_OPENAT2] = {
Jens Axboecebdb982020-01-08 17:59:24 -07001019 },
Jens Axboe3e4827b2020-01-08 15:18:09 -07001020 [IORING_OP_EPOLL_CTL] = {
1021 .unbound_nonreg_file = 1,
Jens Axboe3e4827b2020-01-08 15:18:09 -07001022 },
Pavel Begunkov7d67af22020-02-24 11:32:45 +03001023 [IORING_OP_SPLICE] = {
1024 .needs_file = 1,
1025 .hash_reg_file = 1,
1026 .unbound_nonreg_file = 1,
Jens Axboeddf0322d2020-02-23 16:41:33 -07001027 },
1028 [IORING_OP_PROVIDE_BUFFERS] = {},
Jens Axboe067524e2020-03-02 16:32:28 -07001029 [IORING_OP_REMOVE_BUFFERS] = {},
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03001030 [IORING_OP_TEE] = {
1031 .needs_file = 1,
1032 .hash_reg_file = 1,
1033 .unbound_nonreg_file = 1,
1034 },
Jens Axboe36f4fa62020-09-05 11:14:22 -06001035 [IORING_OP_SHUTDOWN] = {
1036 .needs_file = 1,
1037 },
Jens Axboe44526be2021-02-15 13:32:18 -07001038 [IORING_OP_RENAMEAT] = {},
1039 [IORING_OP_UNLINKAT] = {},
Jens Axboed3656342019-12-18 09:50:26 -07001040};
1041
Pavel Begunkov7a612352021-03-09 00:37:59 +00001042static bool io_disarm_next(struct io_kiocb *req);
Pavel Begunkoveef51da2021-06-14 02:36:15 +01001043static void io_uring_del_tctx_node(unsigned long index);
Pavel Begunkov9936c7c2021-02-04 13:51:56 +00001044static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
1045 struct task_struct *task,
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01001046 bool cancel_all);
Pavel Begunkov78cc6872021-06-14 02:36:23 +01001047static void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd);
Pavel Begunkovb895c9a2021-04-01 15:43:40 +01001048static struct io_rsrc_node *io_rsrc_node_alloc(struct io_ring_ctx *ctx);
Pavel Begunkov1ffc5422020-12-30 21:34:15 +00001049
Pavel Begunkovd4d19c12021-04-25 14:32:17 +01001050static bool io_cqring_fill_event(struct io_ring_ctx *ctx, u64 user_data,
1051 long res, unsigned int cflags);
Jackie Liuec9c02a2019-11-08 23:50:36 +08001052static void io_put_req(struct io_kiocb *req);
Pavel Begunkov216578e2020-10-13 09:44:00 +01001053static void io_put_req_deferred(struct io_kiocb *req, int nr);
Jens Axboec7dae4b2021-02-09 19:53:37 -07001054static void io_dismantle_req(struct io_kiocb *req);
Jens Axboe94ae5e72019-11-14 19:39:52 -07001055static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req);
1056static void io_queue_linked_timeout(struct io_kiocb *req);
Pavel Begunkovfdecb662021-04-25 14:32:20 +01001057static int __io_register_rsrc_update(struct io_ring_ctx *ctx, unsigned type,
Pavel Begunkovc3bdad02021-04-25 14:32:22 +01001058 struct io_uring_rsrc_update2 *up,
Pavel Begunkov98f0b3b2021-04-25 14:32:19 +01001059 unsigned nr_args);
Pavel Begunkov68fb8972021-03-19 17:22:41 +00001060static void io_clean_op(struct io_kiocb *req);
Pavel Begunkovac177052021-08-09 13:04:02 +01001061static struct file *io_file_get(struct io_ring_ctx *ctx,
1062 struct io_submit_state *state,
Pavel Begunkov8371adf2020-10-10 18:34:08 +01001063 struct io_kiocb *req, int fd, bool fixed);
Pavel Begunkovc5eef2b2021-02-10 00:03:22 +00001064static void __io_queue_sqe(struct io_kiocb *req);
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00001065static void io_rsrc_put_work(struct work_struct *work);
Jens Axboede0617e2019-04-06 21:51:27 -06001066
Pavel Begunkov907d1df2021-01-26 23:35:10 +00001067static void io_req_task_queue(struct io_kiocb *req);
Pavel Begunkov2a2758f2021-06-17 18:14:00 +01001068static void io_submit_flush_completions(struct io_ring_ctx *ctx);
Jens Axboe50826202021-02-23 09:02:26 -07001069static bool io_poll_remove_waitqs(struct io_kiocb *req);
Pavel Begunkov179ae0d2021-02-28 22:35:20 +00001070static int io_req_prep_async(struct io_kiocb *req);
Jens Axboe9a56a232019-01-09 09:06:50 -07001071
Pavel Begunkov9011bf92021-06-30 21:54:03 +01001072static void io_fallback_req_func(struct work_struct *unused);
1073
Jens Axboe2b188cc2019-01-07 10:46:33 -07001074static struct kmem_cache *req_cachep;
1075
Jens Axboe09186822020-10-13 15:01:40 -06001076static const struct file_operations io_uring_fops;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001077
1078struct sock *io_uring_get_socket(struct file *file)
1079{
1080#if defined(CONFIG_UNIX)
1081 if (file->f_op == &io_uring_fops) {
1082 struct io_ring_ctx *ctx = file->private_data;
1083
1084 return ctx->ring_sock->sk;
1085 }
1086#endif
1087 return NULL;
1088}
1089EXPORT_SYMBOL(io_uring_get_socket);
1090
Pavel Begunkovf2f87372020-10-27 23:25:37 +00001091#define io_for_each_link(pos, head) \
1092 for (pos = (head); pos; pos = pos->link)
1093
Pavel Begunkovb895c9a2021-04-01 15:43:40 +01001094static inline void io_req_set_rsrc_node(struct io_kiocb *req)
Jens Axboec40f6372020-06-25 15:39:59 -06001095{
Pavel Begunkov36f72fe2020-11-18 19:57:26 +00001096 struct io_ring_ctx *ctx = req->ctx;
1097
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00001098 if (!req->fixed_rsrc_refs) {
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01001099 req->fixed_rsrc_refs = &ctx->rsrc_node->refs;
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00001100 percpu_ref_get(req->fixed_rsrc_refs);
Pavel Begunkov36f72fe2020-11-18 19:57:26 +00001101 }
1102}
1103
Pavel Begunkovf70865d2021-04-11 01:46:40 +01001104static void io_refs_resurrect(struct percpu_ref *ref, struct completion *compl)
1105{
1106 bool got = percpu_ref_tryget(ref);
1107
1108 /* already at zero, wait for ->release() */
1109 if (!got)
1110 wait_for_completion(compl);
1111 percpu_ref_resurrect(ref);
1112 if (got)
1113 percpu_ref_put(ref);
1114}
1115
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01001116static bool io_match_task(struct io_kiocb *head, struct task_struct *task,
1117 bool cancel_all)
Pavel Begunkov08d23632020-11-06 13:00:22 +00001118{
1119 struct io_kiocb *req;
1120
Pavel Begunkov68207682021-03-22 01:58:25 +00001121 if (task && head->task != task)
Pavel Begunkov08d23632020-11-06 13:00:22 +00001122 return false;
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01001123 if (cancel_all)
Pavel Begunkov08d23632020-11-06 13:00:22 +00001124 return true;
1125
1126 io_for_each_link(req, head) {
Pavel Begunkovb05a1bc2021-03-04 13:59:24 +00001127 if (req->flags & REQ_F_INFLIGHT)
Jens Axboe02a13672021-01-23 15:49:31 -07001128 return true;
Pavel Begunkov08d23632020-11-06 13:00:22 +00001129 }
1130 return false;
1131}
1132
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01001133static inline void req_set_fail(struct io_kiocb *req)
Jens Axboec40f6372020-06-25 15:39:59 -06001134{
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01001135 req->flags |= REQ_F_FAIL;
Jens Axboec40f6372020-06-25 15:39:59 -06001136}
Jens Axboe4a38aed22020-05-14 17:21:15 -06001137
Jens Axboe2b188cc2019-01-07 10:46:33 -07001138static void io_ring_ctx_ref_free(struct percpu_ref *ref)
1139{
1140 struct io_ring_ctx *ctx = container_of(ref, struct io_ring_ctx, refs);
1141
Jens Axboe0f158b42020-05-14 17:18:39 -06001142 complete(&ctx->ref_comp);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001143}
1144
Pavel Begunkov8eb7e2d2020-06-29 13:13:02 +03001145static inline bool io_is_timeout_noseq(struct io_kiocb *req)
1146{
1147 return !req->timeout.off;
1148}
1149
Jens Axboe2b188cc2019-01-07 10:46:33 -07001150static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
1151{
1152 struct io_ring_ctx *ctx;
Jens Axboe78076bb2019-12-04 19:56:40 -07001153 int hash_bits;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001154
1155 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
1156 if (!ctx)
1157 return NULL;
1158
Jens Axboe78076bb2019-12-04 19:56:40 -07001159 /*
1160 * Use 5 bits less than the max cq entries, that should give us around
1161 * 32 entries per hash list if totally full and uniformly spread.
1162 */
1163 hash_bits = ilog2(p->cq_entries);
1164 hash_bits -= 5;
1165 if (hash_bits <= 0)
1166 hash_bits = 1;
1167 ctx->cancel_hash_bits = hash_bits;
1168 ctx->cancel_hash = kmalloc((1U << hash_bits) * sizeof(struct hlist_head),
1169 GFP_KERNEL);
1170 if (!ctx->cancel_hash)
1171 goto err;
1172 __hash_init(ctx->cancel_hash, 1U << hash_bits);
1173
Pavel Begunkov62248432021-04-28 13:11:29 +01001174 ctx->dummy_ubuf = kzalloc(sizeof(*ctx->dummy_ubuf), GFP_KERNEL);
1175 if (!ctx->dummy_ubuf)
1176 goto err;
1177 /* set invalid range, so io_import_fixed() fails meeting it */
1178 ctx->dummy_ubuf->ubuf = -1UL;
1179
Roman Gushchin21482892019-05-07 10:01:48 -07001180 if (percpu_ref_init(&ctx->refs, io_ring_ctx_ref_free,
Jens Axboe206aefd2019-11-07 18:27:42 -07001181 PERCPU_REF_ALLOW_REINIT, GFP_KERNEL))
1182 goto err;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001183
1184 ctx->flags = p->flags;
Jens Axboe90554202020-09-03 12:12:41 -06001185 init_waitqueue_head(&ctx->sqo_sq_wait);
Jens Axboe69fb2132020-09-14 11:16:23 -06001186 INIT_LIST_HEAD(&ctx->sqd_list);
Pavel Begunkov311997b2021-06-14 23:37:28 +01001187 init_waitqueue_head(&ctx->poll_wait);
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001188 INIT_LIST_HEAD(&ctx->cq_overflow_list);
Jens Axboe0f158b42020-05-14 17:18:39 -06001189 init_completion(&ctx->ref_comp);
Jens Axboe9e15c3a2021-03-13 12:29:43 -07001190 xa_init_flags(&ctx->io_buffers, XA_FLAGS_ALLOC1);
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +00001191 xa_init_flags(&ctx->personalities, XA_FLAGS_ALLOC1);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001192 mutex_init(&ctx->uring_lock);
Pavel Begunkov311997b2021-06-14 23:37:28 +01001193 init_waitqueue_head(&ctx->cq_wait);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001194 spin_lock_init(&ctx->completion_lock);
Pavel Begunkov540e32a2020-07-13 23:37:09 +03001195 INIT_LIST_HEAD(&ctx->iopoll_list);
Jens Axboede0617e2019-04-06 21:51:27 -06001196 INIT_LIST_HEAD(&ctx->defer_list);
Jens Axboe5262f562019-09-17 12:26:57 -06001197 INIT_LIST_HEAD(&ctx->timeout_list);
Bijan Mottahedehd67d2262021-01-15 17:37:46 +00001198 spin_lock_init(&ctx->rsrc_ref_lock);
1199 INIT_LIST_HEAD(&ctx->rsrc_ref_list);
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00001200 INIT_DELAYED_WORK(&ctx->rsrc_put_work, io_rsrc_put_work);
1201 init_llist_head(&ctx->rsrc_put_llist);
Pavel Begunkov13bf43f2021-03-06 11:02:12 +00001202 INIT_LIST_HEAD(&ctx->tctx_list);
Jens Axboe1b4c3512021-02-10 00:03:19 +00001203 INIT_LIST_HEAD(&ctx->submit_state.comp.free_list);
Pavel Begunkovd0acdee2021-05-16 22:58:12 +01001204 INIT_LIST_HEAD(&ctx->locked_free_list);
Pavel Begunkov9011bf92021-06-30 21:54:03 +01001205 INIT_DELAYED_WORK(&ctx->fallback_work, io_fallback_req_func);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001206 return ctx;
Jens Axboe206aefd2019-11-07 18:27:42 -07001207err:
Pavel Begunkov62248432021-04-28 13:11:29 +01001208 kfree(ctx->dummy_ubuf);
Jens Axboe78076bb2019-12-04 19:56:40 -07001209 kfree(ctx->cancel_hash);
Jens Axboe206aefd2019-11-07 18:27:42 -07001210 kfree(ctx);
1211 return NULL;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001212}
1213
Pavel Begunkov8f6ed492021-05-16 22:58:10 +01001214static void io_account_cq_overflow(struct io_ring_ctx *ctx)
1215{
1216 struct io_rings *r = ctx->rings;
1217
1218 WRITE_ONCE(r->cq_overflow, READ_ONCE(r->cq_overflow) + 1);
1219 ctx->cq_extra--;
1220}
1221
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03001222static bool req_need_defer(struct io_kiocb *req, u32 seq)
Jens Axboede0617e2019-04-06 21:51:27 -06001223{
Jens Axboe2bc99302020-07-09 09:43:27 -06001224 if (unlikely(req->flags & REQ_F_IO_DRAIN)) {
1225 struct io_ring_ctx *ctx = req->ctx;
Jackie Liua197f662019-11-08 08:09:12 -07001226
Pavel Begunkov8f6ed492021-05-16 22:58:10 +01001227 return seq + READ_ONCE(ctx->cq_extra) != ctx->cached_cq_tail;
Jens Axboe2bc99302020-07-09 09:43:27 -06001228 }
Jens Axboe7adf4ea2019-10-10 21:42:58 -06001229
Bob Liu9d858b22019-11-13 18:06:25 +08001230 return false;
Jens Axboe7adf4ea2019-10-10 21:42:58 -06001231}
1232
Pavel Begunkovc97d8a02021-08-09 13:04:04 +01001233#define FFS_ASYNC_READ 0x1UL
1234#define FFS_ASYNC_WRITE 0x2UL
1235#ifdef CONFIG_64BIT
1236#define FFS_ISREG 0x4UL
1237#else
1238#define FFS_ISREG 0x0UL
1239#endif
1240#define FFS_MASK ~(FFS_ASYNC_READ|FFS_ASYNC_WRITE|FFS_ISREG)
1241
1242static inline bool io_req_ffs_set(struct io_kiocb *req)
1243{
1244 return IS_ENABLED(CONFIG_64BIT) && (req->flags & REQ_F_FIXED_FILE);
1245}
1246
Pavel Begunkovce3d5aa2021-02-01 18:59:55 +00001247static void io_req_track_inflight(struct io_kiocb *req)
1248{
Pavel Begunkovce3d5aa2021-02-01 18:59:55 +00001249 if (!(req->flags & REQ_F_INFLIGHT)) {
Pavel Begunkovce3d5aa2021-02-01 18:59:55 +00001250 req->flags |= REQ_F_INFLIGHT;
Pavel Begunkovb303fe22021-04-11 01:46:26 +01001251 atomic_inc(&current->io_uring->inflight_tracked);
Pavel Begunkovce3d5aa2021-02-01 18:59:55 +00001252 }
1253}
1254
Pavel Begunkovcbdcb432020-06-29 19:18:43 +03001255static void io_prep_async_work(struct io_kiocb *req)
Jens Axboe561fb042019-10-24 07:25:42 -06001256{
Jens Axboed3656342019-12-18 09:50:26 -07001257 const struct io_op_def *def = &io_op_defs[req->opcode];
Pavel Begunkov23329512020-10-10 18:34:06 +01001258 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe54a91f32019-09-10 09:15:04 -06001259
Pavel Begunkovb8e64b52021-06-17 18:14:02 +01001260 if (!(req->flags & REQ_F_CREDS)) {
1261 req->flags |= REQ_F_CREDS;
Pavel Begunkovc10d1f92021-06-17 18:14:01 +01001262 req->creds = get_current_cred();
Pavel Begunkovb8e64b52021-06-17 18:14:02 +01001263 }
Jens Axboe003e8dc2021-03-06 09:22:27 -07001264
Pavel Begunkove1d675d2021-03-22 01:58:29 +00001265 req->work.list.next = NULL;
1266 req->work.flags = 0;
Pavel Begunkovfeaadc42020-10-22 16:47:16 +01001267 if (req->flags & REQ_F_FORCE_ASYNC)
1268 req->work.flags |= IO_WQ_WORK_CONCURRENT;
1269
Jens Axboed3656342019-12-18 09:50:26 -07001270 if (req->flags & REQ_F_ISREG) {
Pavel Begunkov23329512020-10-10 18:34:06 +01001271 if (def->hash_reg_file || (ctx->flags & IORING_SETUP_IOPOLL))
Pavel Begunkov8766dd52020-03-14 00:31:04 +03001272 io_wq_hash_work(&req->work, file_inode(req->file));
Jens Axboe4b982bd2021-04-01 08:38:34 -06001273 } else if (!req->file || !S_ISBLK(file_inode(req->file)->i_mode)) {
Jens Axboed3656342019-12-18 09:50:26 -07001274 if (def->unbound_nonreg_file)
Jens Axboe3529d8c2019-12-19 18:24:38 -07001275 req->work.flags |= IO_WQ_WORK_UNBOUND;
Jens Axboe54a91f32019-09-10 09:15:04 -06001276 }
Pavel Begunkove1d675d2021-03-22 01:58:29 +00001277
1278 switch (req->opcode) {
1279 case IORING_OP_SPLICE:
1280 case IORING_OP_TEE:
Pavel Begunkove1d675d2021-03-22 01:58:29 +00001281 if (!S_ISREG(file_inode(req->splice.file_in)->i_mode))
1282 req->work.flags |= IO_WQ_WORK_UNBOUND;
1283 break;
1284 }
Jens Axboe561fb042019-10-24 07:25:42 -06001285}
1286
Pavel Begunkovcbdcb432020-06-29 19:18:43 +03001287static void io_prep_async_link(struct io_kiocb *req)
1288{
1289 struct io_kiocb *cur;
1290
Pavel Begunkov44eff402021-07-26 14:14:31 +01001291 if (req->flags & REQ_F_LINK_TIMEOUT) {
1292 struct io_ring_ctx *ctx = req->ctx;
1293
1294 spin_lock_irq(&ctx->completion_lock);
1295 io_for_each_link(cur, req)
1296 io_prep_async_work(cur);
1297 spin_unlock_irq(&ctx->completion_lock);
1298 } else {
1299 io_for_each_link(cur, req)
1300 io_prep_async_work(cur);
1301 }
Pavel Begunkovcbdcb432020-06-29 19:18:43 +03001302}
1303
Pavel Begunkovebf93662021-03-01 18:20:47 +00001304static void io_queue_async_work(struct io_kiocb *req)
Jens Axboe561fb042019-10-24 07:25:42 -06001305{
Jackie Liua197f662019-11-08 08:09:12 -07001306 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkovcbdcb432020-06-29 19:18:43 +03001307 struct io_kiocb *link = io_prep_linked_timeout(req);
Jens Axboe5aa75ed2021-02-16 12:56:50 -07001308 struct io_uring_task *tctx = req->task->io_uring;
Jens Axboe561fb042019-10-24 07:25:42 -06001309
Jens Axboe3bfe6102021-02-16 14:15:30 -07001310 BUG_ON(!tctx);
1311 BUG_ON(!tctx->io_wq);
Jens Axboe561fb042019-10-24 07:25:42 -06001312
Pavel Begunkovcbdcb432020-06-29 19:18:43 +03001313 /* init ->work of the whole link before punting */
1314 io_prep_async_link(req);
Jens Axboe991468d2021-07-23 11:53:54 -06001315
1316 /*
1317 * Not expected to happen, but if we do have a bug where this _can_
1318 * happen, catch it here and ensure the request is marked as
1319 * canceled. That will make io-wq go through the usual work cancel
1320 * procedure rather than attempt to run this request (or create a new
1321 * worker for it).
1322 */
1323 if (WARN_ON_ONCE(!same_thread_group(req->task, current)))
1324 req->work.flags |= IO_WQ_WORK_CANCEL;
1325
Pavel Begunkovd07f1e8a2021-03-22 01:45:58 +00001326 trace_io_uring_queue_async_work(ctx, io_wq_is_hashed(&req->work), req,
1327 &req->work, req->flags);
Pavel Begunkovebf93662021-03-01 18:20:47 +00001328 io_wq_enqueue(tctx->io_wq, &req->work);
Jens Axboe7271ef32020-08-10 09:55:22 -06001329 if (link)
1330 io_queue_linked_timeout(link);
Pavel Begunkovcbdcb432020-06-29 19:18:43 +03001331}
1332
Pavel Begunkov1ee41602021-03-25 18:32:42 +00001333static void io_kill_timeout(struct io_kiocb *req, int status)
Pavel Begunkov8c855882021-04-13 02:58:41 +01001334 __must_hold(&req->ctx->completion_lock)
Jens Axboe5262f562019-09-17 12:26:57 -06001335{
Jens Axboee8c2bc12020-08-15 18:44:09 -07001336 struct io_timeout_data *io = req->async_data;
Jens Axboe5262f562019-09-17 12:26:57 -06001337
Pavel Begunkovfd9c7bc2021-04-13 02:58:42 +01001338 if (hrtimer_try_to_cancel(&io->timer) != -1) {
Pavel Begunkov01cec8c2020-07-30 18:43:50 +03001339 atomic_set(&req->ctx->cq_timeouts,
1340 atomic_read(&req->ctx->cq_timeouts) + 1);
Pavel Begunkov135fcde2020-07-13 23:37:12 +03001341 list_del_init(&req->timeout.list);
Pavel Begunkovd4d19c12021-04-25 14:32:17 +01001342 io_cqring_fill_event(req->ctx, req->user_data, status, 0);
Pavel Begunkov216578e2020-10-13 09:44:00 +01001343 io_put_req_deferred(req, 1);
Jens Axboe5262f562019-09-17 12:26:57 -06001344 }
1345}
1346
Pavel Begunkov441b8a72021-06-14 23:37:31 +01001347static void io_queue_deferred(struct io_ring_ctx *ctx)
Pavel Begunkov04518942020-05-26 20:34:05 +03001348{
Pavel Begunkov441b8a72021-06-14 23:37:31 +01001349 while (!list_empty(&ctx->defer_list)) {
Pavel Begunkov27dc8332020-07-13 23:37:14 +03001350 struct io_defer_entry *de = list_first_entry(&ctx->defer_list,
1351 struct io_defer_entry, list);
Pavel Begunkov04518942020-05-26 20:34:05 +03001352
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03001353 if (req_need_defer(de->req, de->seq))
Pavel Begunkov04518942020-05-26 20:34:05 +03001354 break;
Pavel Begunkov27dc8332020-07-13 23:37:14 +03001355 list_del_init(&de->list);
Pavel Begunkov907d1df2021-01-26 23:35:10 +00001356 io_req_task_queue(de->req);
Pavel Begunkov27dc8332020-07-13 23:37:14 +03001357 kfree(de);
Pavel Begunkov441b8a72021-06-14 23:37:31 +01001358 }
Pavel Begunkov04518942020-05-26 20:34:05 +03001359}
1360
Pavel Begunkov360428f2020-05-30 14:54:17 +03001361static void io_flush_timeouts(struct io_ring_ctx *ctx)
1362{
Pavel Begunkov441b8a72021-06-14 23:37:31 +01001363 u32 seq = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
Marcelo Diop-Gonzalezf0105052021-01-15 11:54:40 -05001364
Pavel Begunkovf18ee4c2021-06-14 23:37:25 +01001365 while (!list_empty(&ctx->timeout_list)) {
Marcelo Diop-Gonzalezf0105052021-01-15 11:54:40 -05001366 u32 events_needed, events_got;
Pavel Begunkov360428f2020-05-30 14:54:17 +03001367 struct io_kiocb *req = list_first_entry(&ctx->timeout_list,
Pavel Begunkov135fcde2020-07-13 23:37:12 +03001368 struct io_kiocb, timeout.list);
Pavel Begunkov360428f2020-05-30 14:54:17 +03001369
Pavel Begunkov8eb7e2d2020-06-29 13:13:02 +03001370 if (io_is_timeout_noseq(req))
Pavel Begunkov360428f2020-05-30 14:54:17 +03001371 break;
Marcelo Diop-Gonzalezf0105052021-01-15 11:54:40 -05001372
1373 /*
1374 * Since seq can easily wrap around over time, subtract
1375 * the last seq at which timeouts were flushed before comparing.
1376 * Assuming not more than 2^31-1 events have happened since,
1377 * these subtractions won't have wrapped, so we can check if
1378 * target is in [last_seq, current_seq] by comparing the two.
1379 */
1380 events_needed = req->timeout.target_seq - ctx->cq_last_tm_flush;
1381 events_got = seq - ctx->cq_last_tm_flush;
1382 if (events_got < events_needed)
Pavel Begunkov360428f2020-05-30 14:54:17 +03001383 break;
Pavel Begunkovbfe68a22020-05-30 14:54:18 +03001384
Pavel Begunkov135fcde2020-07-13 23:37:12 +03001385 list_del_init(&req->timeout.list);
Pavel Begunkov1ee41602021-03-25 18:32:42 +00001386 io_kill_timeout(req, 0);
Pavel Begunkovf18ee4c2021-06-14 23:37:25 +01001387 }
Marcelo Diop-Gonzalezf0105052021-01-15 11:54:40 -05001388 ctx->cq_last_tm_flush = seq;
Pavel Begunkov360428f2020-05-30 14:54:17 +03001389}
1390
Pavel Begunkov2335f6f2021-06-15 16:47:58 +01001391static void __io_commit_cqring_flush(struct io_ring_ctx *ctx)
Jens Axboede0617e2019-04-06 21:51:27 -06001392{
Pavel Begunkov2335f6f2021-06-15 16:47:58 +01001393 if (ctx->off_timeout_used)
1394 io_flush_timeouts(ctx);
1395 if (ctx->drain_active)
1396 io_queue_deferred(ctx);
1397}
1398
1399static inline void io_commit_cqring(struct io_ring_ctx *ctx)
1400{
1401 if (unlikely(ctx->off_timeout_used || ctx->drain_active))
1402 __io_commit_cqring_flush(ctx);
Pavel Begunkovec30e042021-01-19 13:32:38 +00001403 /* order cqe stores with ring update */
1404 smp_store_release(&ctx->rings->cq.tail, ctx->cached_cq_tail);
Jens Axboede0617e2019-04-06 21:51:27 -06001405}
1406
Jens Axboe90554202020-09-03 12:12:41 -06001407static inline bool io_sqring_full(struct io_ring_ctx *ctx)
1408{
1409 struct io_rings *r = ctx->rings;
1410
Pavel Begunkova566c552021-05-16 22:58:08 +01001411 return READ_ONCE(r->sq.tail) - ctx->cached_sq_head == ctx->sq_entries;
Jens Axboe90554202020-09-03 12:12:41 -06001412}
1413
Pavel Begunkov888aae22021-01-19 13:32:39 +00001414static inline unsigned int __io_cqring_events(struct io_ring_ctx *ctx)
1415{
1416 return ctx->cached_cq_tail - READ_ONCE(ctx->rings->cq.head);
1417}
1418
Pavel Begunkovd068b502021-05-16 22:58:11 +01001419static inline struct io_uring_cqe *io_get_cqe(struct io_ring_ctx *ctx)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001420{
Hristo Venev75b28af2019-08-26 17:23:46 +00001421 struct io_rings *rings = ctx->rings;
Pavel Begunkovea5ab3b2021-05-16 22:58:09 +01001422 unsigned tail, mask = ctx->cq_entries - 1;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001423
Stefan Bühler115e12e2019-04-24 23:54:18 +02001424 /*
1425 * writes to the cq entry need to come after reading head; the
1426 * control dependency is enough as we're using WRITE_ONCE to
1427 * fill the cq entry
1428 */
Pavel Begunkova566c552021-05-16 22:58:08 +01001429 if (__io_cqring_events(ctx) == ctx->cq_entries)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001430 return NULL;
1431
Pavel Begunkov888aae22021-01-19 13:32:39 +00001432 tail = ctx->cached_cq_tail++;
Pavel Begunkovea5ab3b2021-05-16 22:58:09 +01001433 return &rings->cqes[tail & mask];
Jens Axboe2b188cc2019-01-07 10:46:33 -07001434}
1435
Jens Axboef2842ab2020-01-08 11:04:00 -07001436static inline bool io_should_trigger_evfd(struct io_ring_ctx *ctx)
1437{
Pavel Begunkov44c769d2021-04-11 01:46:31 +01001438 if (likely(!ctx->cq_ev_fd))
Jens Axboef0b493e2020-02-01 21:30:11 -07001439 return false;
Stefano Garzarella7e55a192020-05-15 18:38:05 +02001440 if (READ_ONCE(ctx->rings->cq_flags) & IORING_CQ_EVENTFD_DISABLED)
1441 return false;
Pavel Begunkov44c769d2021-04-11 01:46:31 +01001442 return !ctx->eventfd_async || io_wq_current_is_worker();
Jens Axboef2842ab2020-01-08 11:04:00 -07001443}
1444
Jens Axboeb41e9852020-02-17 09:52:41 -07001445static void io_cqring_ev_posted(struct io_ring_ctx *ctx)
Jens Axboe8c838782019-03-12 15:48:16 -06001446{
Jens Axboe5fd46172021-08-06 14:04:31 -06001447 /*
1448 * wake_up_all() may seem excessive, but io_wake_function() and
1449 * io_should_wake() handle the termination of the loop and only
1450 * wake as many waiters as we need to.
1451 */
1452 if (wq_has_sleeper(&ctx->cq_wait))
1453 wake_up_all(&ctx->cq_wait);
Jens Axboe534ca6d2020-09-02 13:52:19 -06001454 if (ctx->sq_data && waitqueue_active(&ctx->sq_data->wait))
1455 wake_up(&ctx->sq_data->wait);
Jens Axboeb41e9852020-02-17 09:52:41 -07001456 if (io_should_trigger_evfd(ctx))
Jens Axboe9b402842019-04-11 11:45:41 -06001457 eventfd_signal(ctx->cq_ev_fd, 1);
Pavel Begunkov311997b2021-06-14 23:37:28 +01001458 if (waitqueue_active(&ctx->poll_wait)) {
1459 wake_up_interruptible(&ctx->poll_wait);
Pavel Begunkov4aa84f22021-01-07 03:15:42 +00001460 kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN);
1461 }
Jens Axboe8c838782019-03-12 15:48:16 -06001462}
1463
Pavel Begunkov80c18e42021-01-07 03:15:41 +00001464static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx)
1465{
1466 if (ctx->flags & IORING_SETUP_SQPOLL) {
Jens Axboe5fd46172021-08-06 14:04:31 -06001467 if (wq_has_sleeper(&ctx->cq_wait))
1468 wake_up_all(&ctx->cq_wait);
Pavel Begunkov80c18e42021-01-07 03:15:41 +00001469 }
1470 if (io_should_trigger_evfd(ctx))
1471 eventfd_signal(ctx->cq_ev_fd, 1);
Pavel Begunkov311997b2021-06-14 23:37:28 +01001472 if (waitqueue_active(&ctx->poll_wait)) {
1473 wake_up_interruptible(&ctx->poll_wait);
Pavel Begunkov4aa84f22021-01-07 03:15:42 +00001474 kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN);
1475 }
Pavel Begunkov80c18e42021-01-07 03:15:41 +00001476}
1477
Jens Axboec4a2ed72019-11-21 21:01:26 -07001478/* Returns true if there are no backlogged entries after the flush */
Pavel Begunkov6c2450a2021-02-23 12:40:22 +00001479static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001480{
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001481 unsigned long flags;
Jens Axboeb18032b2021-01-24 16:58:56 -07001482 bool all_flushed, posted;
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001483
Pavel Begunkova566c552021-05-16 22:58:08 +01001484 if (!force && __io_cqring_events(ctx) == ctx->cq_entries)
Pavel Begunkove23de152020-12-17 00:24:37 +00001485 return false;
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001486
Jens Axboeb18032b2021-01-24 16:58:56 -07001487 posted = false;
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001488 spin_lock_irqsave(&ctx->completion_lock, flags);
Pavel Begunkov6c2450a2021-02-23 12:40:22 +00001489 while (!list_empty(&ctx->cq_overflow_list)) {
Pavel Begunkovd068b502021-05-16 22:58:11 +01001490 struct io_uring_cqe *cqe = io_get_cqe(ctx);
Pavel Begunkov6c2450a2021-02-23 12:40:22 +00001491 struct io_overflow_cqe *ocqe;
Jens Axboee6c8aa92020-09-28 13:10:13 -06001492
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001493 if (!cqe && !force)
1494 break;
Pavel Begunkov6c2450a2021-02-23 12:40:22 +00001495 ocqe = list_first_entry(&ctx->cq_overflow_list,
1496 struct io_overflow_cqe, list);
1497 if (cqe)
1498 memcpy(cqe, &ocqe->cqe, sizeof(*cqe));
1499 else
Pavel Begunkov8f6ed492021-05-16 22:58:10 +01001500 io_account_cq_overflow(ctx);
1501
Jens Axboeb18032b2021-01-24 16:58:56 -07001502 posted = true;
Pavel Begunkov6c2450a2021-02-23 12:40:22 +00001503 list_del(&ocqe->list);
1504 kfree(ocqe);
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001505 }
1506
Pavel Begunkov09e88402020-12-17 00:24:38 +00001507 all_flushed = list_empty(&ctx->cq_overflow_list);
1508 if (all_flushed) {
Pavel Begunkov5ed7a372021-06-14 23:37:27 +01001509 clear_bit(0, &ctx->check_cq_overflow);
Nadav Amit20c0b382021-08-07 17:13:42 -07001510 WRITE_ONCE(ctx->rings->sq_flags,
1511 ctx->rings->sq_flags & ~IORING_SQ_CQ_OVERFLOW);
Pavel Begunkov09e88402020-12-17 00:24:38 +00001512 }
Pavel Begunkov46930142020-07-30 18:43:49 +03001513
Jens Axboeb18032b2021-01-24 16:58:56 -07001514 if (posted)
1515 io_commit_cqring(ctx);
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001516 spin_unlock_irqrestore(&ctx->completion_lock, flags);
Jens Axboeb18032b2021-01-24 16:58:56 -07001517 if (posted)
1518 io_cqring_ev_posted(ctx);
Pavel Begunkov09e88402020-12-17 00:24:38 +00001519 return all_flushed;
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001520}
1521
Pavel Begunkov6c2450a2021-02-23 12:40:22 +00001522static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)
Pavel Begunkov6c503152021-01-04 20:36:36 +00001523{
Jens Axboeca0a2652021-03-04 17:15:48 -07001524 bool ret = true;
1525
Pavel Begunkov5ed7a372021-06-14 23:37:27 +01001526 if (test_bit(0, &ctx->check_cq_overflow)) {
Pavel Begunkov6c503152021-01-04 20:36:36 +00001527 /* iopoll syncs against uring_lock, not completion_lock */
1528 if (ctx->flags & IORING_SETUP_IOPOLL)
1529 mutex_lock(&ctx->uring_lock);
Pavel Begunkov6c2450a2021-02-23 12:40:22 +00001530 ret = __io_cqring_overflow_flush(ctx, force);
Pavel Begunkov6c503152021-01-04 20:36:36 +00001531 if (ctx->flags & IORING_SETUP_IOPOLL)
1532 mutex_unlock(&ctx->uring_lock);
1533 }
Jens Axboeca0a2652021-03-04 17:15:48 -07001534
1535 return ret;
Pavel Begunkov6c503152021-01-04 20:36:36 +00001536}
1537
Jens Axboeabc54d62021-02-24 13:32:30 -07001538/*
1539 * Shamelessly stolen from the mm implementation of page reference checking,
1540 * see commit f958d7b528b1 for details.
1541 */
1542#define req_ref_zero_or_close_to_overflow(req) \
1543 ((unsigned int) atomic_read(&(req->refs)) + 127u <= 127u)
1544
Jens Axboede9b4cc2021-02-24 13:28:27 -07001545static inline bool req_ref_inc_not_zero(struct io_kiocb *req)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001546{
Jens Axboeabc54d62021-02-24 13:32:30 -07001547 return atomic_inc_not_zero(&req->refs);
Jens Axboede9b4cc2021-02-24 13:28:27 -07001548}
1549
1550static inline bool req_ref_sub_and_test(struct io_kiocb *req, int refs)
1551{
Jens Axboeabc54d62021-02-24 13:32:30 -07001552 WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req));
1553 return atomic_sub_and_test(refs, &req->refs);
Jens Axboede9b4cc2021-02-24 13:28:27 -07001554}
1555
1556static inline bool req_ref_put_and_test(struct io_kiocb *req)
1557{
Jens Axboeabc54d62021-02-24 13:32:30 -07001558 WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req));
1559 return atomic_dec_and_test(&req->refs);
Jens Axboede9b4cc2021-02-24 13:28:27 -07001560}
1561
1562static inline void req_ref_put(struct io_kiocb *req)
1563{
Jens Axboeabc54d62021-02-24 13:32:30 -07001564 WARN_ON_ONCE(req_ref_put_and_test(req));
Jens Axboede9b4cc2021-02-24 13:28:27 -07001565}
1566
1567static inline void req_ref_get(struct io_kiocb *req)
1568{
Jens Axboeabc54d62021-02-24 13:32:30 -07001569 WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req));
1570 atomic_inc(&req->refs);
Jens Axboede9b4cc2021-02-24 13:28:27 -07001571}
1572
Pavel Begunkov6a290a12021-08-09 13:04:13 +01001573/* must to be called somewhat shortly after putting a request */
1574static inline void io_put_task(struct task_struct *task, int nr)
1575{
1576 struct io_uring_task *tctx = task->io_uring;
1577
1578 percpu_counter_sub(&tctx->inflight, nr);
1579 if (unlikely(atomic_read(&tctx->in_idle)))
1580 wake_up(&tctx->wait);
1581 put_task_struct_many(task, nr);
1582}
1583
Pavel Begunkovd4d19c12021-04-25 14:32:17 +01001584static bool io_cqring_event_overflow(struct io_ring_ctx *ctx, u64 user_data,
1585 long res, unsigned int cflags)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001586{
Pavel Begunkovcce4b8b2021-04-13 02:58:44 +01001587 struct io_overflow_cqe *ocqe;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001588
Pavel Begunkovcce4b8b2021-04-13 02:58:44 +01001589 ocqe = kmalloc(sizeof(*ocqe), GFP_ATOMIC | __GFP_ACCOUNT);
1590 if (!ocqe) {
1591 /*
1592 * If we're in ring overflow flush mode, or in task cancel mode,
1593 * or cannot allocate an overflow entry, then we need to drop it
1594 * on the floor.
1595 */
Pavel Begunkov8f6ed492021-05-16 22:58:10 +01001596 io_account_cq_overflow(ctx);
Pavel Begunkovcce4b8b2021-04-13 02:58:44 +01001597 return false;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001598 }
Pavel Begunkovcce4b8b2021-04-13 02:58:44 +01001599 if (list_empty(&ctx->cq_overflow_list)) {
Pavel Begunkov5ed7a372021-06-14 23:37:27 +01001600 set_bit(0, &ctx->check_cq_overflow);
Nadav Amit20c0b382021-08-07 17:13:42 -07001601 WRITE_ONCE(ctx->rings->sq_flags,
1602 ctx->rings->sq_flags | IORING_SQ_CQ_OVERFLOW);
1603
Pavel Begunkovcce4b8b2021-04-13 02:58:44 +01001604 }
Pavel Begunkovd4d19c12021-04-25 14:32:17 +01001605 ocqe->cqe.user_data = user_data;
Pavel Begunkovcce4b8b2021-04-13 02:58:44 +01001606 ocqe->cqe.res = res;
1607 ocqe->cqe.flags = cflags;
1608 list_add_tail(&ocqe->list, &ctx->cq_overflow_list);
1609 return true;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001610}
1611
Pavel Begunkovd4d19c12021-04-25 14:32:17 +01001612static inline bool __io_cqring_fill_event(struct io_ring_ctx *ctx, u64 user_data,
1613 long res, unsigned int cflags)
Pavel Begunkov8d133262021-04-11 01:46:33 +01001614{
Jens Axboe2b188cc2019-01-07 10:46:33 -07001615 struct io_uring_cqe *cqe;
1616
Pavel Begunkovd4d19c12021-04-25 14:32:17 +01001617 trace_io_uring_complete(ctx, user_data, res, cflags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001618
1619 /*
1620 * If we can't get a cq entry, userspace overflowed the
1621 * submission (by quite a lot). Increment the overflow count in
1622 * the ring.
1623 */
Pavel Begunkovd068b502021-05-16 22:58:11 +01001624 cqe = io_get_cqe(ctx);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001625 if (likely(cqe)) {
Pavel Begunkovd4d19c12021-04-25 14:32:17 +01001626 WRITE_ONCE(cqe->user_data, user_data);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001627 WRITE_ONCE(cqe->res, res);
1628 WRITE_ONCE(cqe->flags, cflags);
Pavel Begunkov8d133262021-04-11 01:46:33 +01001629 return true;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001630 }
Pavel Begunkovd4d19c12021-04-25 14:32:17 +01001631 return io_cqring_event_overflow(ctx, user_data, res, cflags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001632}
1633
Pavel Begunkov8d133262021-04-11 01:46:33 +01001634/* not as hot to bloat with inlining */
Pavel Begunkovd4d19c12021-04-25 14:32:17 +01001635static noinline bool io_cqring_fill_event(struct io_ring_ctx *ctx, u64 user_data,
1636 long res, unsigned int cflags)
Jens Axboebcda7ba2020-02-23 16:42:51 -07001637{
Pavel Begunkovd4d19c12021-04-25 14:32:17 +01001638 return __io_cqring_fill_event(ctx, user_data, res, cflags);
Jens Axboebcda7ba2020-02-23 16:42:51 -07001639}
1640
Pavel Begunkov7a612352021-03-09 00:37:59 +00001641static void io_req_complete_post(struct io_kiocb *req, long res,
1642 unsigned int cflags)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001643{
Jens Axboe78e19bb2019-11-06 15:21:34 -07001644 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001645 unsigned long flags;
1646
1647 spin_lock_irqsave(&ctx->completion_lock, flags);
Pavel Begunkovd4d19c12021-04-25 14:32:17 +01001648 __io_cqring_fill_event(ctx, req->user_data, res, cflags);
Jens Axboec7dae4b2021-02-09 19:53:37 -07001649 /*
1650 * If we're the last reference to this request, add to our locked
1651 * free_list cache.
1652 */
Jens Axboede9b4cc2021-02-24 13:28:27 -07001653 if (req_ref_put_and_test(req)) {
Pavel Begunkov7a612352021-03-09 00:37:59 +00001654 if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) {
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01001655 if (req->flags & (REQ_F_LINK_TIMEOUT | REQ_F_FAIL))
Pavel Begunkov7a612352021-03-09 00:37:59 +00001656 io_disarm_next(req);
1657 if (req->link) {
1658 io_req_task_queue(req->link);
1659 req->link = NULL;
1660 }
1661 }
Jens Axboec7dae4b2021-02-09 19:53:37 -07001662 io_dismantle_req(req);
1663 io_put_task(req->task, 1);
Pavel Begunkovd0acdee2021-05-16 22:58:12 +01001664 list_add(&req->compl.list, &ctx->locked_free_list);
1665 ctx->locked_free_nr++;
Pavel Begunkov180f8292021-03-14 20:57:09 +00001666 } else {
1667 if (!percpu_ref_tryget(&ctx->refs))
1668 req = NULL;
1669 }
Pavel Begunkov7a612352021-03-09 00:37:59 +00001670 io_commit_cqring(ctx);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001671 spin_unlock_irqrestore(&ctx->completion_lock, flags);
Pavel Begunkov7a612352021-03-09 00:37:59 +00001672
Pavel Begunkov180f8292021-03-14 20:57:09 +00001673 if (req) {
1674 io_cqring_ev_posted(ctx);
Jens Axboec7dae4b2021-02-09 19:53:37 -07001675 percpu_ref_put(&ctx->refs);
Pavel Begunkov180f8292021-03-14 20:57:09 +00001676 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07001677}
1678
Jens Axboe4e3d9ff2021-04-15 17:44:34 -06001679static inline bool io_req_needs_clean(struct io_kiocb *req)
1680{
Pavel Begunkovc8543572021-06-17 18:14:04 +01001681 return req->flags & IO_REQ_CLEAN_FLAGS;
Jens Axboe4e3d9ff2021-04-15 17:44:34 -06001682}
1683
Pavel Begunkova38d68d2021-01-19 13:32:45 +00001684static void io_req_complete_state(struct io_kiocb *req, long res,
Pavel Begunkov889fca72021-02-10 00:03:09 +00001685 unsigned int cflags)
Jens Axboebcda7ba2020-02-23 16:42:51 -07001686{
Jens Axboe4e3d9ff2021-04-15 17:44:34 -06001687 if (io_req_needs_clean(req))
Pavel Begunkov68fb8972021-03-19 17:22:41 +00001688 io_clean_op(req);
Pavel Begunkova38d68d2021-01-19 13:32:45 +00001689 req->result = res;
1690 req->compl.cflags = cflags;
Pavel Begunkove342c802021-01-19 13:32:47 +00001691 req->flags |= REQ_F_COMPLETE_INLINE;
Jens Axboee1e16092020-06-22 09:17:17 -06001692}
Jens Axboe2b188cc2019-01-07 10:46:33 -07001693
Pavel Begunkov889fca72021-02-10 00:03:09 +00001694static inline void __io_req_complete(struct io_kiocb *req, unsigned issue_flags,
1695 long res, unsigned cflags)
Pavel Begunkova38d68d2021-01-19 13:32:45 +00001696{
Pavel Begunkov889fca72021-02-10 00:03:09 +00001697 if (issue_flags & IO_URING_F_COMPLETE_DEFER)
1698 io_req_complete_state(req, res, cflags);
Pavel Begunkova38d68d2021-01-19 13:32:45 +00001699 else
Jens Axboec7dae4b2021-02-09 19:53:37 -07001700 io_req_complete_post(req, res, cflags);
Pavel Begunkova38d68d2021-01-19 13:32:45 +00001701}
Jens Axboebcda7ba2020-02-23 16:42:51 -07001702
Pavel Begunkova38d68d2021-01-19 13:32:45 +00001703static inline void io_req_complete(struct io_kiocb *req, long res)
Jens Axboee1e16092020-06-22 09:17:17 -06001704{
Pavel Begunkov889fca72021-02-10 00:03:09 +00001705 __io_req_complete(req, 0, res, 0);
Jens Axboebcda7ba2020-02-23 16:42:51 -07001706}
1707
Pavel Begunkovf41db2732021-02-28 22:35:12 +00001708static void io_req_complete_failed(struct io_kiocb *req, long res)
1709{
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01001710 req_set_fail(req);
Pavel Begunkovf41db2732021-02-28 22:35:12 +00001711 io_put_req(req);
1712 io_req_complete_post(req, res, 0);
1713}
1714
Pavel Begunkov864ea922021-08-09 13:04:08 +01001715/*
1716 * Don't initialise the fields below on every allocation, but do that in
1717 * advance and keep them valid across allocations.
1718 */
1719static void io_preinit_req(struct io_kiocb *req, struct io_ring_ctx *ctx)
1720{
1721 req->ctx = ctx;
1722 req->link = NULL;
1723 req->async_data = NULL;
1724 /* not necessary, but safer to zero */
1725 req->result = 0;
1726}
1727
Pavel Begunkovdac7a092021-03-19 17:22:39 +00001728static void io_flush_cached_locked_reqs(struct io_ring_ctx *ctx,
1729 struct io_comp_state *cs)
1730{
1731 spin_lock_irq(&ctx->completion_lock);
Pavel Begunkovd0acdee2021-05-16 22:58:12 +01001732 list_splice_init(&ctx->locked_free_list, &cs->free_list);
1733 ctx->locked_free_nr = 0;
Pavel Begunkovdac7a092021-03-19 17:22:39 +00001734 spin_unlock_irq(&ctx->completion_lock);
1735}
1736
Pavel Begunkovdd78f492021-03-19 17:22:35 +00001737/* Returns true IFF there are requests in the cache */
Jens Axboec7dae4b2021-02-09 19:53:37 -07001738static bool io_flush_cached_reqs(struct io_ring_ctx *ctx)
Pavel Begunkove5d1bc02021-02-10 00:03:23 +00001739{
Jens Axboec7dae4b2021-02-09 19:53:37 -07001740 struct io_submit_state *state = &ctx->submit_state;
1741 struct io_comp_state *cs = &state->comp;
Pavel Begunkovdd78f492021-03-19 17:22:35 +00001742 int nr;
Pavel Begunkove5d1bc02021-02-10 00:03:23 +00001743
Jens Axboec7dae4b2021-02-09 19:53:37 -07001744 /*
1745 * If we have more than a batch's worth of requests in our IRQ side
1746 * locked cache, grab the lock and move them over to our submission
1747 * side cache.
1748 */
Pavel Begunkovd0acdee2021-05-16 22:58:12 +01001749 if (READ_ONCE(ctx->locked_free_nr) > IO_COMPL_BATCH)
Pavel Begunkovdac7a092021-03-19 17:22:39 +00001750 io_flush_cached_locked_reqs(ctx, cs);
Jens Axboec7dae4b2021-02-09 19:53:37 -07001751
Pavel Begunkovdd78f492021-03-19 17:22:35 +00001752 nr = state->free_reqs;
Jens Axboec7dae4b2021-02-09 19:53:37 -07001753 while (!list_empty(&cs->free_list)) {
Pavel Begunkovdd78f492021-03-19 17:22:35 +00001754 struct io_kiocb *req = list_first_entry(&cs->free_list,
1755 struct io_kiocb, compl.list);
1756
Jens Axboe2b188cc2019-01-07 10:46:33 -07001757 list_del(&req->compl.list);
Pavel Begunkovdd78f492021-03-19 17:22:35 +00001758 state->reqs[nr++] = req;
1759 if (nr == ARRAY_SIZE(state->reqs))
Pavel Begunkove5d1bc02021-02-10 00:03:23 +00001760 break;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001761 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07001762
Pavel Begunkovdd78f492021-03-19 17:22:35 +00001763 state->free_reqs = nr;
1764 return nr != 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001765}
1766
Pavel Begunkov258b29a2021-02-10 00:03:10 +00001767static struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001768{
Pavel Begunkov258b29a2021-02-10 00:03:10 +00001769 struct io_submit_state *state = &ctx->submit_state;
Pavel Begunkov864ea922021-08-09 13:04:08 +01001770 gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
1771 int ret, i;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001772
Pavel Begunkovfe7e3252021-06-24 15:09:57 +01001773 BUILD_BUG_ON(ARRAY_SIZE(state->reqs) < IO_REQ_ALLOC_BATCH);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001774
Pavel Begunkov864ea922021-08-09 13:04:08 +01001775 if (likely(state->free_reqs || io_flush_cached_reqs(ctx)))
1776 goto got_req;
Jens Axboe2579f912019-01-09 09:10:43 -07001777
Pavel Begunkov864ea922021-08-09 13:04:08 +01001778 ret = kmem_cache_alloc_bulk(req_cachep, gfp, IO_REQ_ALLOC_BATCH,
1779 state->reqs);
Pavel Begunkove5d1bc02021-02-10 00:03:23 +00001780
Pavel Begunkov864ea922021-08-09 13:04:08 +01001781 /*
1782 * Bulk alloc is all-or-nothing. If we fail to get a batch,
1783 * retry single alloc to be on the safe side.
1784 */
1785 if (unlikely(ret <= 0)) {
1786 state->reqs[0] = kmem_cache_alloc(req_cachep, gfp);
1787 if (!state->reqs[0])
1788 return NULL;
1789 ret = 1;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001790 }
Pavel Begunkov864ea922021-08-09 13:04:08 +01001791
1792 for (i = 0; i < ret; i++)
1793 io_preinit_req(state->reqs[i], ctx);
1794 state->free_reqs = ret;
Pavel Begunkove5d1bc02021-02-10 00:03:23 +00001795got_req:
Pavel Begunkov291b2822020-09-30 22:57:01 +03001796 state->free_reqs--;
1797 return state->reqs[state->free_reqs];
Jens Axboe2b188cc2019-01-07 10:46:33 -07001798}
1799
Pavel Begunkove1d767f2021-03-19 17:22:43 +00001800static inline void io_put_file(struct file *file)
Pavel Begunkov8da11c12020-02-24 11:32:44 +03001801{
Pavel Begunkove1d767f2021-03-19 17:22:43 +00001802 if (file)
Pavel Begunkov8da11c12020-02-24 11:32:44 +03001803 fput(file);
1804}
1805
Pavel Begunkov4edf20f2020-10-13 09:43:59 +01001806static void io_dismantle_req(struct io_kiocb *req)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001807{
Pavel Begunkov094bae42021-03-19 17:22:42 +00001808 unsigned int flags = req->flags;
Pavel Begunkov929a3af2020-02-19 00:19:09 +03001809
Pavel Begunkov3a0a6902021-04-20 12:03:31 +01001810 if (io_req_needs_clean(req))
1811 io_clean_op(req);
Pavel Begunkove1d767f2021-03-19 17:22:43 +00001812 if (!(flags & REQ_F_FIXED_FILE))
1813 io_put_file(req->file);
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00001814 if (req->fixed_rsrc_refs)
1815 percpu_ref_put(req->fixed_rsrc_refs);
Pavel Begunkov99ebe4e2021-06-26 21:40:49 +01001816 if (req->async_data) {
Pavel Begunkov094bae42021-03-19 17:22:42 +00001817 kfree(req->async_data);
Pavel Begunkov99ebe4e2021-06-26 21:40:49 +01001818 req->async_data = NULL;
1819 }
Pavel Begunkove6543a82020-06-28 12:52:30 +03001820}
Pavel Begunkov2b85edf2019-12-28 14:13:03 +03001821
Pavel Begunkov216578e2020-10-13 09:44:00 +01001822static void __io_free_req(struct io_kiocb *req)
Pavel Begunkove6543a82020-06-28 12:52:30 +03001823{
Jens Axboe51a4cc12020-08-10 10:55:56 -06001824 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkovecfc5172020-06-29 13:13:03 +03001825
Pavel Begunkov216578e2020-10-13 09:44:00 +01001826 io_dismantle_req(req);
Pavel Begunkov7c660732021-01-25 11:42:21 +00001827 io_put_task(req->task, 1);
Pavel Begunkove6543a82020-06-28 12:52:30 +03001828
Pavel Begunkov3893f392021-02-10 00:03:15 +00001829 kmem_cache_free(req_cachep, req);
Pavel Begunkovecfc5172020-06-29 13:13:03 +03001830 percpu_ref_put(&ctx->refs);
Jens Axboee65ef562019-03-12 10:16:44 -06001831}
1832
Pavel Begunkovf2f87372020-10-27 23:25:37 +00001833static inline void io_remove_next_linked(struct io_kiocb *req)
1834{
1835 struct io_kiocb *nxt = req->link;
1836
1837 req->link = nxt->link;
1838 nxt->link = NULL;
1839}
1840
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00001841static bool io_kill_linked_timeout(struct io_kiocb *req)
1842 __must_hold(&req->ctx->completion_lock)
Jens Axboe9e645e112019-05-10 16:07:28 -06001843{
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00001844 struct io_kiocb *link = req->link;
Pavel Begunkovf2f87372020-10-27 23:25:37 +00001845
Pavel Begunkov900fad42020-10-19 16:39:16 +01001846 /*
1847 * Can happen if a linked timeout fired and link had been like
1848 * req -> link t-out -> link t-out [-> ...]
1849 */
Pavel Begunkovc9abd7a2020-10-22 16:43:11 +01001850 if (link && (link->flags & REQ_F_LTIMEOUT_ACTIVE)) {
1851 struct io_timeout_data *io = link->async_data;
Pavel Begunkov7c86ffe2020-06-29 13:12:59 +03001852
Pavel Begunkovf2f87372020-10-27 23:25:37 +00001853 io_remove_next_linked(req);
Pavel Begunkov90cd7e42020-10-27 23:25:36 +00001854 link->timeout.head = NULL;
Pavel Begunkovfd9c7bc2021-04-13 02:58:42 +01001855 if (hrtimer_try_to_cancel(&io->timer) != -1) {
Pavel Begunkovd4d19c12021-04-25 14:32:17 +01001856 io_cqring_fill_event(link->ctx, link->user_data,
1857 -ECANCELED, 0);
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00001858 io_put_req_deferred(link, 1);
Pavel Begunkovd4729fb2021-03-22 01:58:24 +00001859 return true;
Pavel Begunkovc9abd7a2020-10-22 16:43:11 +01001860 }
1861 }
Pavel Begunkovd4729fb2021-03-22 01:58:24 +00001862 return false;
Pavel Begunkov7c86ffe2020-06-29 13:12:59 +03001863}
1864
Pavel Begunkovd148ca42020-10-18 10:17:39 +01001865static void io_fail_links(struct io_kiocb *req)
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00001866 __must_hold(&req->ctx->completion_lock)
Jens Axboe9e645e112019-05-10 16:07:28 -06001867{
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00001868 struct io_kiocb *nxt, *link = req->link;
Jens Axboe9e645e112019-05-10 16:07:28 -06001869
Pavel Begunkovf2f87372020-10-27 23:25:37 +00001870 req->link = NULL;
Pavel Begunkovf2f87372020-10-27 23:25:37 +00001871 while (link) {
1872 nxt = link->link;
1873 link->link = NULL;
1874
Dmitrii Dolgovc826bd72019-10-15 19:02:01 +02001875 trace_io_uring_fail_link(req, link);
Pavel Begunkovd4d19c12021-04-25 14:32:17 +01001876 io_cqring_fill_event(link->ctx, link->user_data, -ECANCELED, 0);
Jens Axboe1575f212021-02-27 15:20:49 -07001877 io_put_req_deferred(link, 2);
Pavel Begunkovf2f87372020-10-27 23:25:37 +00001878 link = nxt;
Jens Axboe9e645e112019-05-10 16:07:28 -06001879 }
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00001880}
Pavel Begunkov7c86ffe2020-06-29 13:12:59 +03001881
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00001882static bool io_disarm_next(struct io_kiocb *req)
1883 __must_hold(&req->ctx->completion_lock)
1884{
1885 bool posted = false;
1886
1887 if (likely(req->flags & REQ_F_LINK_TIMEOUT))
1888 posted = io_kill_linked_timeout(req);
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01001889 if (unlikely((req->flags & REQ_F_FAIL) &&
Pavel Begunkove4335ed2021-04-11 01:46:39 +01001890 !(req->flags & REQ_F_HARDLINK))) {
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00001891 posted |= (req->link != NULL);
1892 io_fail_links(req);
1893 }
1894 return posted;
Jens Axboe9e645e112019-05-10 16:07:28 -06001895}
1896
Pavel Begunkov3fa5e0f2020-06-30 15:20:43 +03001897static struct io_kiocb *__io_req_find_next(struct io_kiocb *req)
Jens Axboe9e645e112019-05-10 16:07:28 -06001898{
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00001899 struct io_kiocb *nxt;
Jens Axboe2665abf2019-11-05 12:40:47 -07001900
Jens Axboe9e645e112019-05-10 16:07:28 -06001901 /*
1902 * If LINK is set, we have dependent requests in this chain. If we
1903 * didn't fail this request, queue the first one up, moving any other
1904 * dependencies to the next request. In case of failure, fail the rest
1905 * of the chain.
1906 */
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01001907 if (req->flags & (REQ_F_LINK_TIMEOUT | REQ_F_FAIL)) {
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00001908 struct io_ring_ctx *ctx = req->ctx;
1909 unsigned long flags;
1910 bool posted;
Pavel Begunkovf2f87372020-10-27 23:25:37 +00001911
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00001912 spin_lock_irqsave(&ctx->completion_lock, flags);
1913 posted = io_disarm_next(req);
1914 if (posted)
1915 io_commit_cqring(req->ctx);
1916 spin_unlock_irqrestore(&ctx->completion_lock, flags);
1917 if (posted)
1918 io_cqring_ev_posted(ctx);
Pavel Begunkovf2f87372020-10-27 23:25:37 +00001919 }
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00001920 nxt = req->link;
1921 req->link = NULL;
1922 return nxt;
Jens Axboe4d7dd462019-11-20 13:03:52 -07001923}
Jens Axboe2665abf2019-11-05 12:40:47 -07001924
Pavel Begunkovf2f87372020-10-27 23:25:37 +00001925static inline struct io_kiocb *io_req_find_next(struct io_kiocb *req)
Pavel Begunkov3fa5e0f2020-06-30 15:20:43 +03001926{
Pavel Begunkovcdbff982021-02-12 18:41:16 +00001927 if (likely(!(req->flags & (REQ_F_LINK|REQ_F_HARDLINK))))
Pavel Begunkov3fa5e0f2020-06-30 15:20:43 +03001928 return NULL;
1929 return __io_req_find_next(req);
1930}
1931
Pavel Begunkov2c323952021-02-28 22:04:53 +00001932static void ctx_flush_and_put(struct io_ring_ctx *ctx)
1933{
1934 if (!ctx)
1935 return;
1936 if (ctx->submit_state.comp.nr) {
1937 mutex_lock(&ctx->uring_lock);
Pavel Begunkov2a2758f2021-06-17 18:14:00 +01001938 io_submit_flush_completions(ctx);
Pavel Begunkov2c323952021-02-28 22:04:53 +00001939 mutex_unlock(&ctx->uring_lock);
1940 }
1941 percpu_ref_put(&ctx->refs);
1942}
1943
Jens Axboe7cbf1722021-02-10 00:03:20 +00001944static void tctx_task_work(struct callback_head *cb)
1945{
Pavel Begunkovebd0df22021-06-17 18:14:07 +01001946 struct io_ring_ctx *ctx = NULL;
Pavel Begunkov3f184072021-06-17 18:14:06 +01001947 struct io_uring_task *tctx = container_of(cb, struct io_uring_task,
1948 task_work);
Jens Axboe7cbf1722021-02-10 00:03:20 +00001949
Pavel Begunkov16f72072021-06-17 18:14:09 +01001950 while (1) {
Pavel Begunkov3f184072021-06-17 18:14:06 +01001951 struct io_wq_work_node *node;
1952
1953 spin_lock_irq(&tctx->task_lock);
Pavel Begunkovc6538be2021-06-17 18:14:08 +01001954 node = tctx->task_list.first;
Pavel Begunkov3f184072021-06-17 18:14:06 +01001955 INIT_WQ_LIST(&tctx->task_list);
1956 spin_unlock_irq(&tctx->task_lock);
1957
Pavel Begunkov3f184072021-06-17 18:14:06 +01001958 while (node) {
1959 struct io_wq_work_node *next = node->next;
1960 struct io_kiocb *req = container_of(node, struct io_kiocb,
1961 io_task_work.node);
1962
1963 if (req->ctx != ctx) {
1964 ctx_flush_and_put(ctx);
1965 ctx = req->ctx;
1966 percpu_ref_get(&ctx->refs);
1967 }
Pavel Begunkov5b0a6ac2021-06-30 21:54:04 +01001968 req->io_task_work.func(req);
Pavel Begunkov3f184072021-06-17 18:14:06 +01001969 node = next;
1970 }
Pavel Begunkov7a778f92021-06-17 18:14:10 +01001971 if (wq_list_empty(&tctx->task_list)) {
Jens Axboe110aa252021-07-26 10:42:56 -06001972 spin_lock_irq(&tctx->task_lock);
Pavel Begunkov7a778f92021-06-17 18:14:10 +01001973 clear_bit(0, &tctx->task_state);
Jens Axboe110aa252021-07-26 10:42:56 -06001974 if (wq_list_empty(&tctx->task_list)) {
1975 spin_unlock_irq(&tctx->task_lock);
Pavel Begunkov7a778f92021-06-17 18:14:10 +01001976 break;
Jens Axboe110aa252021-07-26 10:42:56 -06001977 }
1978 spin_unlock_irq(&tctx->task_lock);
Pavel Begunkov7a778f92021-06-17 18:14:10 +01001979 /* another tctx_task_work() is enqueued, yield */
1980 if (test_and_set_bit(0, &tctx->task_state))
1981 break;
1982 }
Jens Axboe7cbf1722021-02-10 00:03:20 +00001983 cond_resched();
Pavel Begunkov3f184072021-06-17 18:14:06 +01001984 }
Pavel Begunkovebd0df22021-06-17 18:14:07 +01001985
1986 ctx_flush_and_put(ctx);
Jens Axboe7cbf1722021-02-10 00:03:20 +00001987}
1988
Pavel Begunkove09ee512021-07-01 13:26:05 +01001989static void io_req_task_work_add(struct io_kiocb *req)
Jens Axboe7cbf1722021-02-10 00:03:20 +00001990{
Pavel Begunkovc15b79d2021-03-19 17:22:44 +00001991 struct task_struct *tsk = req->task;
Jens Axboe7cbf1722021-02-10 00:03:20 +00001992 struct io_uring_task *tctx = tsk->io_uring;
Pavel Begunkovc15b79d2021-03-19 17:22:44 +00001993 enum task_work_notify_mode notify;
Pavel Begunkove09ee512021-07-01 13:26:05 +01001994 struct io_wq_work_node *node;
Jens Axboe0b81e802021-02-16 10:33:53 -07001995 unsigned long flags;
Jens Axboe7cbf1722021-02-10 00:03:20 +00001996
1997 WARN_ON_ONCE(!tctx);
1998
Jens Axboe0b81e802021-02-16 10:33:53 -07001999 spin_lock_irqsave(&tctx->task_lock, flags);
Jens Axboe7cbf1722021-02-10 00:03:20 +00002000 wq_list_add_tail(&req->io_task_work.node, &tctx->task_list);
Jens Axboe0b81e802021-02-16 10:33:53 -07002001 spin_unlock_irqrestore(&tctx->task_lock, flags);
Jens Axboe7cbf1722021-02-10 00:03:20 +00002002
2003 /* task_work already pending, we're done */
2004 if (test_bit(0, &tctx->task_state) ||
2005 test_and_set_bit(0, &tctx->task_state))
Pavel Begunkove09ee512021-07-01 13:26:05 +01002006 return;
Jens Axboe7cbf1722021-02-10 00:03:20 +00002007
Pavel Begunkovc15b79d2021-03-19 17:22:44 +00002008 /*
2009 * SQPOLL kernel thread doesn't need notification, just a wakeup. For
2010 * all other cases, use TWA_SIGNAL unconditionally to ensure we're
2011 * processing task_work. There's no reliable way to tell if TWA_RESUME
2012 * will do the job.
2013 */
2014 notify = (req->ctx->flags & IORING_SETUP_SQPOLL) ? TWA_NONE : TWA_SIGNAL;
Pavel Begunkovc15b79d2021-03-19 17:22:44 +00002015 if (!task_work_add(tsk, &tctx->task_work, notify)) {
2016 wake_up_process(tsk);
Pavel Begunkove09ee512021-07-01 13:26:05 +01002017 return;
Pavel Begunkovc15b79d2021-03-19 17:22:44 +00002018 }
Pavel Begunkov2215bed2021-08-09 13:04:06 +01002019
Jens Axboe7cbf1722021-02-10 00:03:20 +00002020 clear_bit(0, &tctx->task_state);
Pavel Begunkove09ee512021-07-01 13:26:05 +01002021 spin_lock_irqsave(&tctx->task_lock, flags);
2022 node = tctx->task_list.first;
2023 INIT_WQ_LIST(&tctx->task_list);
2024 spin_unlock_irqrestore(&tctx->task_lock, flags);
Jens Axboe7cbf1722021-02-10 00:03:20 +00002025
Pavel Begunkove09ee512021-07-01 13:26:05 +01002026 while (node) {
2027 req = container_of(node, struct io_kiocb, io_task_work.node);
2028 node = node->next;
2029 if (llist_add(&req->io_task_work.fallback_node,
2030 &req->ctx->fallback_llist))
2031 schedule_delayed_work(&req->ctx->fallback_work, 1);
2032 }
Pavel Begunkoveab30c42021-01-19 13:32:42 +00002033}
2034
Pavel Begunkov5b0a6ac2021-06-30 21:54:04 +01002035static void io_req_task_cancel(struct io_kiocb *req)
Jens Axboec40f6372020-06-25 15:39:59 -06002036{
Jens Axboe87ceb6a2020-09-14 08:20:12 -06002037 struct io_ring_ctx *ctx = req->ctx;
Jens Axboec40f6372020-06-25 15:39:59 -06002038
Pavel Begunkove83acd72021-02-28 22:35:09 +00002039 /* ctx is guaranteed to stay alive while we hold uring_lock */
Pavel Begunkov792bb6e2021-02-18 22:32:51 +00002040 mutex_lock(&ctx->uring_lock);
Pavel Begunkov25935532021-03-19 17:22:40 +00002041 io_req_complete_failed(req, req->result);
Pavel Begunkov792bb6e2021-02-18 22:32:51 +00002042 mutex_unlock(&ctx->uring_lock);
Jens Axboec40f6372020-06-25 15:39:59 -06002043}
2044
Pavel Begunkov5b0a6ac2021-06-30 21:54:04 +01002045static void io_req_task_submit(struct io_kiocb *req)
Jens Axboec40f6372020-06-25 15:39:59 -06002046{
2047 struct io_ring_ctx *ctx = req->ctx;
2048
Pavel Begunkov04fc6c82021-02-12 03:23:54 +00002049 /* ctx stays valid until unlock, even if we drop all ours ctx->refs */
Pavel Begunkov81b6d052021-01-04 20:36:35 +00002050 mutex_lock(&ctx->uring_lock);
Pavel Begunkov9c688262021-07-10 02:45:59 +01002051 if (!(req->task->flags & PF_EXITING) && !req->task->in_execve)
Pavel Begunkovc5eef2b2021-02-10 00:03:22 +00002052 __io_queue_sqe(req);
Pavel Begunkov81b6d052021-01-04 20:36:35 +00002053 else
Pavel Begunkov25935532021-03-19 17:22:40 +00002054 io_req_complete_failed(req, -EFAULT);
Pavel Begunkov81b6d052021-01-04 20:36:35 +00002055 mutex_unlock(&ctx->uring_lock);
Jens Axboe9e645e112019-05-10 16:07:28 -06002056}
2057
Pavel Begunkova3df76982021-02-18 22:32:52 +00002058static void io_req_task_queue_fail(struct io_kiocb *req, int ret)
2059{
Pavel Begunkova3df76982021-02-18 22:32:52 +00002060 req->result = ret;
Pavel Begunkov5b0a6ac2021-06-30 21:54:04 +01002061 req->io_task_work.func = io_req_task_cancel;
Pavel Begunkove09ee512021-07-01 13:26:05 +01002062 io_req_task_work_add(req);
Pavel Begunkova3df76982021-02-18 22:32:52 +00002063}
2064
Pavel Begunkov2c4b8eb2021-02-28 22:35:10 +00002065static void io_req_task_queue(struct io_kiocb *req)
2066{
Pavel Begunkov5b0a6ac2021-06-30 21:54:04 +01002067 req->io_task_work.func = io_req_task_submit;
Pavel Begunkove09ee512021-07-01 13:26:05 +01002068 io_req_task_work_add(req);
Pavel Begunkov2c4b8eb2021-02-28 22:35:10 +00002069}
2070
Jens Axboe773af692021-07-27 10:25:55 -06002071static void io_req_task_queue_reissue(struct io_kiocb *req)
2072{
2073 req->io_task_work.func = io_queue_async_work;
2074 io_req_task_work_add(req);
2075}
2076
Pavel Begunkovf2f87372020-10-27 23:25:37 +00002077static inline void io_queue_next(struct io_kiocb *req)
Jackie Liuc69f8db2019-11-09 11:00:08 +08002078{
Pavel Begunkov9b5f7bd2020-06-29 13:13:00 +03002079 struct io_kiocb *nxt = io_req_find_next(req);
Pavel Begunkov944e58b2019-11-21 23:21:01 +03002080
Pavel Begunkov906a8c32020-06-27 14:04:55 +03002081 if (nxt)
2082 io_req_task_queue(nxt);
Jackie Liuc69f8db2019-11-09 11:00:08 +08002083}
2084
Jens Axboe9e645e112019-05-10 16:07:28 -06002085static void io_free_req(struct io_kiocb *req)
2086{
Pavel Begunkovc3524382020-06-28 12:52:32 +03002087 io_queue_next(req);
Jens Axboe9e645e112019-05-10 16:07:28 -06002088 __io_free_req(req);
Jens Axboee65ef562019-03-12 10:16:44 -06002089}
2090
Pavel Begunkov2d6500d2020-06-28 12:52:33 +03002091struct req_batch {
Pavel Begunkov5af1d132020-07-18 11:32:52 +03002092 struct task_struct *task;
2093 int task_refs;
Jens Axboe1b4c3512021-02-10 00:03:19 +00002094 int ctx_refs;
Pavel Begunkov2d6500d2020-06-28 12:52:33 +03002095};
2096
Pavel Begunkov5af1d132020-07-18 11:32:52 +03002097static inline void io_init_req_batch(struct req_batch *rb)
Pavel Begunkov7a743e22020-03-03 21:33:13 +03002098{
Pavel Begunkov5af1d132020-07-18 11:32:52 +03002099 rb->task_refs = 0;
Pavel Begunkov9ae72462021-02-10 00:03:16 +00002100 rb->ctx_refs = 0;
Pavel Begunkov5af1d132020-07-18 11:32:52 +03002101 rb->task = NULL;
2102}
Pavel Begunkov8766dd52020-03-14 00:31:04 +03002103
Pavel Begunkov2d6500d2020-06-28 12:52:33 +03002104static void io_req_free_batch_finish(struct io_ring_ctx *ctx,
2105 struct req_batch *rb)
2106{
Pavel Begunkov6e833d52021-02-11 18:28:20 +00002107 if (rb->task)
Pavel Begunkov7c660732021-01-25 11:42:21 +00002108 io_put_task(rb->task, rb->task_refs);
Pavel Begunkov9ae72462021-02-10 00:03:16 +00002109 if (rb->ctx_refs)
2110 percpu_ref_put_many(&ctx->refs, rb->ctx_refs);
Pavel Begunkov2d6500d2020-06-28 12:52:33 +03002111}
2112
Pavel Begunkov6ff119a2021-02-10 00:03:18 +00002113static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req,
2114 struct io_submit_state *state)
Pavel Begunkov2d6500d2020-06-28 12:52:33 +03002115{
Pavel Begunkovf2f87372020-10-27 23:25:37 +00002116 io_queue_next(req);
Pavel Begunkov96670652021-03-19 17:22:32 +00002117 io_dismantle_req(req);
Pavel Begunkov2d6500d2020-06-28 12:52:33 +03002118
Jens Axboee3bc8e92020-09-24 08:45:57 -06002119 if (req->task != rb->task) {
Pavel Begunkov7c660732021-01-25 11:42:21 +00002120 if (rb->task)
2121 io_put_task(rb->task, rb->task_refs);
Jens Axboee3bc8e92020-09-24 08:45:57 -06002122 rb->task = req->task;
2123 rb->task_refs = 0;
Pavel Begunkov5af1d132020-07-18 11:32:52 +03002124 }
Jens Axboee3bc8e92020-09-24 08:45:57 -06002125 rb->task_refs++;
Pavel Begunkov9ae72462021-02-10 00:03:16 +00002126 rb->ctx_refs++;
Pavel Begunkov5af1d132020-07-18 11:32:52 +03002127
Pavel Begunkovbd759042021-02-12 03:23:50 +00002128 if (state->free_reqs != ARRAY_SIZE(state->reqs))
Pavel Begunkov6ff119a2021-02-10 00:03:18 +00002129 state->reqs[state->free_reqs++] = req;
Pavel Begunkovbd759042021-02-12 03:23:50 +00002130 else
2131 list_add(&req->compl.list, &state->comp.free_list);
Pavel Begunkov7a743e22020-03-03 21:33:13 +03002132}
2133
Pavel Begunkov2a2758f2021-06-17 18:14:00 +01002134static void io_submit_flush_completions(struct io_ring_ctx *ctx)
Pavel Begunkov282cdc82021-08-09 13:04:10 +01002135 __must_hold(&req->ctx->uring_lock)
Pavel Begunkov905c1722021-02-10 00:03:14 +00002136{
Pavel Begunkov2a2758f2021-06-17 18:14:00 +01002137 struct io_comp_state *cs = &ctx->submit_state.comp;
Pavel Begunkov905c1722021-02-10 00:03:14 +00002138 int i, nr = cs->nr;
Pavel Begunkov905c1722021-02-10 00:03:14 +00002139 struct req_batch rb;
2140
Pavel Begunkov905c1722021-02-10 00:03:14 +00002141 spin_lock_irq(&ctx->completion_lock);
2142 for (i = 0; i < nr; i++) {
Pavel Begunkov5182ed22021-06-26 21:40:48 +01002143 struct io_kiocb *req = cs->reqs[i];
2144
Pavel Begunkovd4d19c12021-04-25 14:32:17 +01002145 __io_cqring_fill_event(ctx, req->user_data, req->result,
2146 req->compl.cflags);
Pavel Begunkov905c1722021-02-10 00:03:14 +00002147 }
2148 io_commit_cqring(ctx);
2149 spin_unlock_irq(&ctx->completion_lock);
Pavel Begunkov905c1722021-02-10 00:03:14 +00002150 io_cqring_ev_posted(ctx);
Pavel Begunkov5182ed22021-06-26 21:40:48 +01002151
2152 io_init_req_batch(&rb);
Pavel Begunkov905c1722021-02-10 00:03:14 +00002153 for (i = 0; i < nr; i++) {
Pavel Begunkov5182ed22021-06-26 21:40:48 +01002154 struct io_kiocb *req = cs->reqs[i];
Pavel Begunkov905c1722021-02-10 00:03:14 +00002155
2156 /* submission and completion refs */
Jens Axboede9b4cc2021-02-24 13:28:27 -07002157 if (req_ref_sub_and_test(req, 2))
Pavel Begunkov6ff119a2021-02-10 00:03:18 +00002158 io_req_free_batch(&rb, req, &ctx->submit_state);
Pavel Begunkov905c1722021-02-10 00:03:14 +00002159 }
2160
2161 io_req_free_batch_finish(ctx, &rb);
2162 cs->nr = 0;
Jens Axboee65ef562019-03-12 10:16:44 -06002163}
2164
Jens Axboeba816ad2019-09-28 11:36:45 -06002165/*
2166 * Drop reference to request, return next in chain (if there is one) if this
2167 * was the last reference to this request.
2168 */
Pavel Begunkov0d850352021-03-19 17:22:37 +00002169static inline struct io_kiocb *io_put_req_find_next(struct io_kiocb *req)
Jens Axboee65ef562019-03-12 10:16:44 -06002170{
Pavel Begunkov9b5f7bd2020-06-29 13:13:00 +03002171 struct io_kiocb *nxt = NULL;
2172
Jens Axboede9b4cc2021-02-24 13:28:27 -07002173 if (req_ref_put_and_test(req)) {
Pavel Begunkov9b5f7bd2020-06-29 13:13:00 +03002174 nxt = io_req_find_next(req);
Jens Axboe4d7dd462019-11-20 13:03:52 -07002175 __io_free_req(req);
Jens Axboe2a44f462020-02-25 13:25:41 -07002176 }
Pavel Begunkov9b5f7bd2020-06-29 13:13:00 +03002177 return nxt;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002178}
2179
Pavel Begunkov0d850352021-03-19 17:22:37 +00002180static inline void io_put_req(struct io_kiocb *req)
Jens Axboe2b188cc2019-01-07 10:46:33 -07002181{
Jens Axboede9b4cc2021-02-24 13:28:27 -07002182 if (req_ref_put_and_test(req))
Jens Axboedef596e2019-01-09 08:59:42 -07002183 io_free_req(req);
2184}
2185
Pavel Begunkov216578e2020-10-13 09:44:00 +01002186static void io_free_req_deferred(struct io_kiocb *req)
2187{
Pavel Begunkov5b0a6ac2021-06-30 21:54:04 +01002188 req->io_task_work.func = io_free_req;
Pavel Begunkove09ee512021-07-01 13:26:05 +01002189 io_req_task_work_add(req);
Pavel Begunkov216578e2020-10-13 09:44:00 +01002190}
2191
2192static inline void io_put_req_deferred(struct io_kiocb *req, int refs)
2193{
Jens Axboede9b4cc2021-02-24 13:28:27 -07002194 if (req_ref_sub_and_test(req, refs))
Pavel Begunkov216578e2020-10-13 09:44:00 +01002195 io_free_req_deferred(req);
2196}
2197
Pavel Begunkov6c503152021-01-04 20:36:36 +00002198static unsigned io_cqring_events(struct io_ring_ctx *ctx)
Jens Axboea3a0e432019-08-20 11:03:11 -06002199{
2200 /* See comment at the top of this file */
2201 smp_rmb();
Pavel Begunkove23de152020-12-17 00:24:37 +00002202 return __io_cqring_events(ctx);
Jens Axboea3a0e432019-08-20 11:03:11 -06002203}
2204
Pavel Begunkovfb5ccc92019-10-25 12:31:30 +03002205static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
2206{
2207 struct io_rings *rings = ctx->rings;
2208
2209 /* make sure SQ entry isn't read before tail */
2210 return smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head;
2211}
2212
Pavel Begunkov8ff069b2020-07-16 23:28:04 +03002213static unsigned int io_put_kbuf(struct io_kiocb *req, struct io_buffer *kbuf)
Jens Axboee94f1412019-12-19 12:06:02 -07002214{
Pavel Begunkov8ff069b2020-07-16 23:28:04 +03002215 unsigned int cflags;
Jens Axboee94f1412019-12-19 12:06:02 -07002216
Jens Axboebcda7ba2020-02-23 16:42:51 -07002217 cflags = kbuf->bid << IORING_CQE_BUFFER_SHIFT;
2218 cflags |= IORING_CQE_F_BUFFER;
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03002219 req->flags &= ~REQ_F_BUFFER_SELECTED;
Jens Axboebcda7ba2020-02-23 16:42:51 -07002220 kfree(kbuf);
2221 return cflags;
2222}
2223
Pavel Begunkov8ff069b2020-07-16 23:28:04 +03002224static inline unsigned int io_put_rw_kbuf(struct io_kiocb *req)
2225{
2226 struct io_buffer *kbuf;
2227
2228 kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
2229 return io_put_kbuf(req, kbuf);
2230}
2231
Jens Axboe4c6e2772020-07-01 11:29:10 -06002232static inline bool io_run_task_work(void)
2233{
Nadav Amitef98eb02021-08-07 17:13:41 -07002234 if (test_thread_flag(TIF_NOTIFY_SIGNAL) || current->task_works) {
Jens Axboe4c6e2772020-07-01 11:29:10 -06002235 __set_current_state(TASK_RUNNING);
Nadav Amitef98eb02021-08-07 17:13:41 -07002236 tracehook_notify_signal();
Jens Axboe4c6e2772020-07-01 11:29:10 -06002237 return true;
2238 }
2239
2240 return false;
2241}
2242
Jens Axboedef596e2019-01-09 08:59:42 -07002243/*
2244 * Find and free completed poll iocbs
2245 */
2246static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
Jens Axboe3c30ef02021-07-23 11:49:29 -06002247 struct list_head *done, bool resubmit)
Jens Axboedef596e2019-01-09 08:59:42 -07002248{
Jens Axboe8237e042019-12-28 10:48:22 -07002249 struct req_batch rb;
Jens Axboedef596e2019-01-09 08:59:42 -07002250 struct io_kiocb *req;
Xiaoguang Wangbbde0172020-06-16 02:06:38 +08002251
2252 /* order with ->result store in io_complete_rw_iopoll() */
2253 smp_rmb();
Jens Axboedef596e2019-01-09 08:59:42 -07002254
Pavel Begunkov5af1d132020-07-18 11:32:52 +03002255 io_init_req_batch(&rb);
Jens Axboedef596e2019-01-09 08:59:42 -07002256 while (!list_empty(done)) {
Jens Axboebcda7ba2020-02-23 16:42:51 -07002257 int cflags = 0;
2258
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002259 req = list_first_entry(done, struct io_kiocb, inflight_entry);
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002260 list_del(&req->inflight_entry);
Jens Axboedef596e2019-01-09 08:59:42 -07002261
Jens Axboe3c30ef02021-07-23 11:49:29 -06002262 if (READ_ONCE(req->result) == -EAGAIN && resubmit &&
Pavel Begunkov8c130822021-03-22 01:58:32 +00002263 !(req->flags & REQ_F_DONT_REISSUE)) {
Pavel Begunkovf1613402021-02-11 18:28:21 +00002264 req->iopoll_completed = 0;
Pavel Begunkov8c130822021-03-22 01:58:32 +00002265 req_ref_get(req);
Jens Axboe773af692021-07-27 10:25:55 -06002266 io_req_task_queue_reissue(req);
Pavel Begunkov8c130822021-03-22 01:58:32 +00002267 continue;
Pavel Begunkovf1613402021-02-11 18:28:21 +00002268 }
2269
Jens Axboebcda7ba2020-02-23 16:42:51 -07002270 if (req->flags & REQ_F_BUFFER_SELECTED)
Pavel Begunkov8ff069b2020-07-16 23:28:04 +03002271 cflags = io_put_rw_kbuf(req);
Jens Axboebcda7ba2020-02-23 16:42:51 -07002272
Pavel Begunkovd4d19c12021-04-25 14:32:17 +01002273 __io_cqring_fill_event(ctx, req->user_data, req->result, cflags);
Jens Axboedef596e2019-01-09 08:59:42 -07002274 (*nr_events)++;
2275
Jens Axboede9b4cc2021-02-24 13:28:27 -07002276 if (req_ref_put_and_test(req))
Pavel Begunkov6ff119a2021-02-10 00:03:18 +00002277 io_req_free_batch(&rb, req, &ctx->submit_state);
Jens Axboedef596e2019-01-09 08:59:42 -07002278 }
Jens Axboedef596e2019-01-09 08:59:42 -07002279
Jens Axboe09bb8392019-03-13 12:39:28 -06002280 io_commit_cqring(ctx);
Pavel Begunkov80c18e42021-01-07 03:15:41 +00002281 io_cqring_ev_posted_iopoll(ctx);
Pavel Begunkov2d6500d2020-06-28 12:52:33 +03002282 io_req_free_batch_finish(ctx, &rb);
Bijan Mottahedeh581f9812020-04-03 13:51:33 -07002283}
2284
Jens Axboedef596e2019-01-09 08:59:42 -07002285static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
Jens Axboe3c30ef02021-07-23 11:49:29 -06002286 long min, bool resubmit)
Jens Axboedef596e2019-01-09 08:59:42 -07002287{
2288 struct io_kiocb *req, *tmp;
2289 LIST_HEAD(done);
2290 bool spin;
Jens Axboedef596e2019-01-09 08:59:42 -07002291
2292 /*
2293 * Only spin for completions if we don't have multiple devices hanging
2294 * off our complete list, and we're under the requested amount.
2295 */
Hao Xu915b3dd2021-06-28 05:37:30 +08002296 spin = !ctx->poll_multi_queue && *nr_events < min;
Jens Axboedef596e2019-01-09 08:59:42 -07002297
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002298 list_for_each_entry_safe(req, tmp, &ctx->iopoll_list, inflight_entry) {
Jens Axboe9adbd452019-12-20 08:45:55 -07002299 struct kiocb *kiocb = &req->rw.kiocb;
Pavel Begunkova2416e12021-08-09 13:04:09 +01002300 int ret;
Jens Axboedef596e2019-01-09 08:59:42 -07002301
2302 /*
Bijan Mottahedeh581f9812020-04-03 13:51:33 -07002303 * Move completed and retryable entries to our local lists.
2304 * If we find a request that requires polling, break out
2305 * and complete those lists first, if we have entries there.
Jens Axboedef596e2019-01-09 08:59:42 -07002306 */
Xiaoguang Wang65a65432020-06-11 23:39:36 +08002307 if (READ_ONCE(req->iopoll_completed)) {
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002308 list_move_tail(&req->inflight_entry, &done);
Jens Axboedef596e2019-01-09 08:59:42 -07002309 continue;
2310 }
2311 if (!list_empty(&done))
2312 break;
2313
2314 ret = kiocb->ki_filp->f_op->iopoll(kiocb, spin);
Pavel Begunkova2416e12021-08-09 13:04:09 +01002315 if (unlikely(ret < 0))
2316 return ret;
2317 else if (ret)
2318 spin = false;
Jens Axboedef596e2019-01-09 08:59:42 -07002319
Pavel Begunkov3aadc232020-07-06 17:59:29 +03002320 /* iopoll may have completed current req */
2321 if (READ_ONCE(req->iopoll_completed))
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002322 list_move_tail(&req->inflight_entry, &done);
Jens Axboedef596e2019-01-09 08:59:42 -07002323 }
2324
2325 if (!list_empty(&done))
Jens Axboe3c30ef02021-07-23 11:49:29 -06002326 io_iopoll_complete(ctx, nr_events, &done, resubmit);
Jens Axboedef596e2019-01-09 08:59:42 -07002327
Pavel Begunkova2416e12021-08-09 13:04:09 +01002328 return 0;
Jens Axboedef596e2019-01-09 08:59:42 -07002329}
2330
2331/*
Jens Axboedef596e2019-01-09 08:59:42 -07002332 * We can't just wait for polled events to come to us, we have to actively
2333 * find and complete them.
2334 */
Pavel Begunkovb2edc0a2020-07-07 16:36:22 +03002335static void io_iopoll_try_reap_events(struct io_ring_ctx *ctx)
Jens Axboedef596e2019-01-09 08:59:42 -07002336{
2337 if (!(ctx->flags & IORING_SETUP_IOPOLL))
2338 return;
2339
2340 mutex_lock(&ctx->uring_lock);
Pavel Begunkov540e32a2020-07-13 23:37:09 +03002341 while (!list_empty(&ctx->iopoll_list)) {
Jens Axboedef596e2019-01-09 08:59:42 -07002342 unsigned int nr_events = 0;
2343
Jens Axboe3c30ef02021-07-23 11:49:29 -06002344 io_do_iopoll(ctx, &nr_events, 0, false);
Jens Axboe08f54392019-08-21 22:19:11 -06002345
Pavel Begunkovb2edc0a2020-07-07 16:36:22 +03002346 /* let it sleep and repeat later if can't complete a request */
2347 if (nr_events == 0)
2348 break;
Jens Axboe08f54392019-08-21 22:19:11 -06002349 /*
2350 * Ensure we allow local-to-the-cpu processing to take place,
2351 * in this case we need to ensure that we reap all events.
Pavel Begunkov3fcee5a2020-07-06 17:59:31 +03002352 * Also let task_work, etc. to progress by releasing the mutex
Jens Axboe08f54392019-08-21 22:19:11 -06002353 */
Pavel Begunkov3fcee5a2020-07-06 17:59:31 +03002354 if (need_resched()) {
2355 mutex_unlock(&ctx->uring_lock);
2356 cond_resched();
2357 mutex_lock(&ctx->uring_lock);
2358 }
Jens Axboedef596e2019-01-09 08:59:42 -07002359 }
2360 mutex_unlock(&ctx->uring_lock);
2361}
2362
Pavel Begunkov7668b922020-07-07 16:36:21 +03002363static int io_iopoll_check(struct io_ring_ctx *ctx, long min)
Jens Axboedef596e2019-01-09 08:59:42 -07002364{
Pavel Begunkov7668b922020-07-07 16:36:21 +03002365 unsigned int nr_events = 0;
Pavel Begunkove9979b32021-04-13 02:58:45 +01002366 int ret = 0;
Jens Axboedef596e2019-01-09 08:59:42 -07002367
Xiaoguang Wangc7849be2020-02-22 14:46:05 +08002368 /*
2369 * We disallow the app entering submit/complete with polling, but we
2370 * still need to lock the ring to prevent racing with polled issue
2371 * that got punted to a workqueue.
2372 */
2373 mutex_lock(&ctx->uring_lock);
Pavel Begunkovf39c8a52021-04-13 02:58:46 +01002374 /*
2375 * Don't enter poll loop if we already have events pending.
2376 * If we do, we can potentially be spinning for commands that
2377 * already triggered a CQE (eg in error).
2378 */
Pavel Begunkov5ed7a372021-06-14 23:37:27 +01002379 if (test_bit(0, &ctx->check_cq_overflow))
Pavel Begunkovf39c8a52021-04-13 02:58:46 +01002380 __io_cqring_overflow_flush(ctx, false);
2381 if (io_cqring_events(ctx))
2382 goto out;
Jens Axboedef596e2019-01-09 08:59:42 -07002383 do {
Jens Axboe500f9fb2019-08-19 12:15:59 -06002384 /*
2385 * If a submit got punted to a workqueue, we can have the
2386 * application entering polling for a command before it gets
2387 * issued. That app will hold the uring_lock for the duration
2388 * of the poll right here, so we need to take a breather every
2389 * now and then to ensure that the issue has a chance to add
2390 * the poll to the issued list. Otherwise we can spin here
2391 * forever, while the workqueue is stuck trying to acquire the
2392 * very same mutex.
2393 */
Pavel Begunkove9979b32021-04-13 02:58:45 +01002394 if (list_empty(&ctx->iopoll_list)) {
Pavel Begunkov8f487ef2021-07-08 13:37:06 +01002395 u32 tail = ctx->cached_cq_tail;
2396
Jens Axboe500f9fb2019-08-19 12:15:59 -06002397 mutex_unlock(&ctx->uring_lock);
Jens Axboe4c6e2772020-07-01 11:29:10 -06002398 io_run_task_work();
Jens Axboe500f9fb2019-08-19 12:15:59 -06002399 mutex_lock(&ctx->uring_lock);
Pavel Begunkove9979b32021-04-13 02:58:45 +01002400
Pavel Begunkov8f487ef2021-07-08 13:37:06 +01002401 /* some requests don't go through iopoll_list */
2402 if (tail != ctx->cached_cq_tail ||
2403 list_empty(&ctx->iopoll_list))
Pavel Begunkove9979b32021-04-13 02:58:45 +01002404 break;
Jens Axboe500f9fb2019-08-19 12:15:59 -06002405 }
Jens Axboe3c30ef02021-07-23 11:49:29 -06002406 ret = io_do_iopoll(ctx, &nr_events, min, true);
Pavel Begunkovf39c8a52021-04-13 02:58:46 +01002407 } while (!ret && nr_events < min && !need_resched());
2408out:
Jens Axboe500f9fb2019-08-19 12:15:59 -06002409 mutex_unlock(&ctx->uring_lock);
Jens Axboedef596e2019-01-09 08:59:42 -07002410 return ret;
2411}
2412
Jens Axboe491381ce2019-10-17 09:20:46 -06002413static void kiocb_end_write(struct io_kiocb *req)
Jens Axboe2b188cc2019-01-07 10:46:33 -07002414{
Jens Axboe491381ce2019-10-17 09:20:46 -06002415 /*
2416 * Tell lockdep we inherited freeze protection from submission
2417 * thread.
2418 */
2419 if (req->flags & REQ_F_ISREG) {
Pavel Begunkov1c986792021-03-22 01:58:31 +00002420 struct super_block *sb = file_inode(req->file)->i_sb;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002421
Pavel Begunkov1c986792021-03-22 01:58:31 +00002422 __sb_writers_acquired(sb, SB_FREEZE_WRITE);
2423 sb_end_write(sb);
Jens Axboe2b188cc2019-01-07 10:46:33 -07002424 }
2425}
2426
Jens Axboeb63534c2020-06-04 11:28:00 -06002427#ifdef CONFIG_BLOCK
Pavel Begunkovdc2a6e92021-01-19 13:32:35 +00002428static bool io_resubmit_prep(struct io_kiocb *req)
Jens Axboeb63534c2020-06-04 11:28:00 -06002429{
Pavel Begunkovab454432021-03-22 01:58:33 +00002430 struct io_async_rw *rw = req->async_data;
Jens Axboeb63534c2020-06-04 11:28:00 -06002431
Pavel Begunkovab454432021-03-22 01:58:33 +00002432 if (!rw)
2433 return !io_req_prep_async(req);
2434 /* may have left rw->iter inconsistent on -EIOCBQUEUED */
2435 iov_iter_revert(&rw->iter, req->result - iov_iter_count(&rw->iter));
2436 return true;
Jens Axboeb63534c2020-06-04 11:28:00 -06002437}
Jens Axboeb63534c2020-06-04 11:28:00 -06002438
Jens Axboe3e6a0d32021-03-01 13:56:00 -07002439static bool io_rw_should_reissue(struct io_kiocb *req)
Jens Axboeb63534c2020-06-04 11:28:00 -06002440{
Jens Axboe355afae2020-09-02 09:30:31 -06002441 umode_t mode = file_inode(req->file)->i_mode;
Jens Axboe3e6a0d32021-03-01 13:56:00 -07002442 struct io_ring_ctx *ctx = req->ctx;
Jens Axboeb63534c2020-06-04 11:28:00 -06002443
Jens Axboe355afae2020-09-02 09:30:31 -06002444 if (!S_ISBLK(mode) && !S_ISREG(mode))
2445 return false;
Jens Axboe3e6a0d32021-03-01 13:56:00 -07002446 if ((req->flags & REQ_F_NOWAIT) || (io_wq_current_is_worker() &&
2447 !(ctx->flags & IORING_SETUP_IOPOLL)))
Jens Axboeb63534c2020-06-04 11:28:00 -06002448 return false;
Jens Axboe7c977a52021-02-23 19:17:35 -07002449 /*
2450 * If ref is dying, we might be running poll reap from the exit work.
2451 * Don't attempt to reissue from that path, just let it fail with
2452 * -EAGAIN.
2453 */
Jens Axboe3e6a0d32021-03-01 13:56:00 -07002454 if (percpu_ref_is_dying(&ctx->refs))
2455 return false;
Jens Axboeef046882021-07-27 10:50:31 -06002456 /*
2457 * Play it safe and assume not safe to re-import and reissue if we're
2458 * not in the original thread group (or in task context).
2459 */
2460 if (!same_thread_group(req->task, current) || !in_task())
2461 return false;
Jens Axboe3e6a0d32021-03-01 13:56:00 -07002462 return true;
2463}
Jens Axboee82ad482021-04-02 19:45:34 -06002464#else
Jens Axboea1ff1e32021-04-12 06:40:02 -06002465static bool io_resubmit_prep(struct io_kiocb *req)
2466{
2467 return false;
2468}
Jens Axboee82ad482021-04-02 19:45:34 -06002469static bool io_rw_should_reissue(struct io_kiocb *req)
2470{
2471 return false;
2472}
Jens Axboe3e6a0d32021-03-01 13:56:00 -07002473#endif
2474
Pavel Begunkov9011bf92021-06-30 21:54:03 +01002475static void io_fallback_req_func(struct work_struct *work)
2476{
2477 struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx,
2478 fallback_work.work);
2479 struct llist_node *node = llist_del_all(&ctx->fallback_llist);
2480 struct io_kiocb *req, *tmp;
2481
Pavel Begunkov9cb00732021-08-17 22:36:44 +01002482 percpu_ref_get(&ctx->refs);
Pavel Begunkov5b0a6ac2021-06-30 21:54:04 +01002483 llist_for_each_entry_safe(req, tmp, node, io_task_work.fallback_node)
2484 req->io_task_work.func(req);
Pavel Begunkov9cb00732021-08-17 22:36:44 +01002485 percpu_ref_put(&ctx->refs);
Pavel Begunkov9011bf92021-06-30 21:54:03 +01002486}
2487
Jens Axboea1d7c392020-06-22 11:09:46 -06002488static void __io_complete_rw(struct io_kiocb *req, long res, long res2,
Pavel Begunkov889fca72021-02-10 00:03:09 +00002489 unsigned int issue_flags)
Jens Axboea1d7c392020-06-22 11:09:46 -06002490{
Pavel Begunkov2f8e45f2021-02-11 18:28:23 +00002491 int cflags = 0;
2492
Pavel Begunkovb65c1282021-03-22 01:45:59 +00002493 if (req->rw.kiocb.ki_flags & IOCB_WRITE)
2494 kiocb_end_write(req);
Pavel Begunkov9532b992021-03-22 01:58:34 +00002495 if (res != req->result) {
2496 if ((res == -EAGAIN || res == -EOPNOTSUPP) &&
2497 io_rw_should_reissue(req)) {
2498 req->flags |= REQ_F_REISSUE;
2499 return;
2500 }
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01002501 req_set_fail(req);
Pavel Begunkov9532b992021-03-22 01:58:34 +00002502 }
Pavel Begunkov2f8e45f2021-02-11 18:28:23 +00002503 if (req->flags & REQ_F_BUFFER_SELECTED)
2504 cflags = io_put_rw_kbuf(req);
2505 __io_req_complete(req, issue_flags, res, cflags);
Jens Axboeba816ad2019-09-28 11:36:45 -06002506}
2507
2508static void io_complete_rw(struct kiocb *kiocb, long res, long res2)
2509{
Jens Axboe9adbd452019-12-20 08:45:55 -07002510 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
Jens Axboeba816ad2019-09-28 11:36:45 -06002511
Pavel Begunkov889fca72021-02-10 00:03:09 +00002512 __io_complete_rw(req, res, res2, 0);
Jens Axboe2b188cc2019-01-07 10:46:33 -07002513}
2514
Jens Axboedef596e2019-01-09 08:59:42 -07002515static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2)
2516{
Jens Axboe9adbd452019-12-20 08:45:55 -07002517 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
Jens Axboedef596e2019-01-09 08:59:42 -07002518
Jens Axboe491381ce2019-10-17 09:20:46 -06002519 if (kiocb->ki_flags & IOCB_WRITE)
2520 kiocb_end_write(req);
Pavel Begunkov9532b992021-03-22 01:58:34 +00002521 if (unlikely(res != req->result)) {
Jens Axboea1ff1e32021-04-12 06:40:02 -06002522 if (!(res == -EAGAIN && io_rw_should_reissue(req) &&
2523 io_resubmit_prep(req))) {
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01002524 req_set_fail(req);
Pavel Begunkov9532b992021-03-22 01:58:34 +00002525 req->flags |= REQ_F_DONT_REISSUE;
2526 }
Pavel Begunkov8c130822021-03-22 01:58:32 +00002527 }
Xiaoguang Wangbbde0172020-06-16 02:06:38 +08002528
2529 WRITE_ONCE(req->result, res);
Jens Axboeb9b0e0d2021-02-23 08:18:36 -07002530 /* order with io_iopoll_complete() checking ->result */
Pavel Begunkovcd664b02020-06-25 12:37:10 +03002531 smp_wmb();
2532 WRITE_ONCE(req->iopoll_completed, 1);
Jens Axboedef596e2019-01-09 08:59:42 -07002533}
2534
2535/*
2536 * After the iocb has been issued, it's safe to be found on the poll list.
2537 * Adding the kiocb to the list AFTER submission ensures that we don't
Pavel Begunkovf39c8a52021-04-13 02:58:46 +01002538 * find it from a io_do_iopoll() thread before the issuer is done
Jens Axboedef596e2019-01-09 08:59:42 -07002539 * accessing the kiocb cookie.
2540 */
Pavel Begunkovcb3d8972021-06-14 02:36:14 +01002541static void io_iopoll_req_issued(struct io_kiocb *req)
Jens Axboedef596e2019-01-09 08:59:42 -07002542{
2543 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkovcb3d8972021-06-14 02:36:14 +01002544 const bool in_async = io_wq_current_is_worker();
2545
2546 /* workqueue context doesn't hold uring_lock, grab it now */
2547 if (unlikely(in_async))
2548 mutex_lock(&ctx->uring_lock);
Jens Axboedef596e2019-01-09 08:59:42 -07002549
2550 /*
2551 * Track whether we have multiple files in our lists. This will impact
2552 * how we do polling eventually, not spinning if we're on potentially
2553 * different devices.
2554 */
Pavel Begunkov540e32a2020-07-13 23:37:09 +03002555 if (list_empty(&ctx->iopoll_list)) {
Hao Xu915b3dd2021-06-28 05:37:30 +08002556 ctx->poll_multi_queue = false;
2557 } else if (!ctx->poll_multi_queue) {
Jens Axboedef596e2019-01-09 08:59:42 -07002558 struct io_kiocb *list_req;
Hao Xu915b3dd2021-06-28 05:37:30 +08002559 unsigned int queue_num0, queue_num1;
Jens Axboedef596e2019-01-09 08:59:42 -07002560
Pavel Begunkov540e32a2020-07-13 23:37:09 +03002561 list_req = list_first_entry(&ctx->iopoll_list, struct io_kiocb,
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002562 inflight_entry);
Hao Xu915b3dd2021-06-28 05:37:30 +08002563
2564 if (list_req->file != req->file) {
2565 ctx->poll_multi_queue = true;
2566 } else {
2567 queue_num0 = blk_qc_t_to_queue_num(list_req->rw.kiocb.ki_cookie);
2568 queue_num1 = blk_qc_t_to_queue_num(req->rw.kiocb.ki_cookie);
2569 if (queue_num0 != queue_num1)
2570 ctx->poll_multi_queue = true;
2571 }
Jens Axboedef596e2019-01-09 08:59:42 -07002572 }
2573
2574 /*
2575 * For fast devices, IO may have already completed. If it has, add
2576 * it to the front so we find it first.
2577 */
Xiaoguang Wang65a65432020-06-11 23:39:36 +08002578 if (READ_ONCE(req->iopoll_completed))
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002579 list_add(&req->inflight_entry, &ctx->iopoll_list);
Jens Axboedef596e2019-01-09 08:59:42 -07002580 else
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002581 list_add_tail(&req->inflight_entry, &ctx->iopoll_list);
Xiaoguang Wangbdcd3ea2020-02-25 22:12:08 +08002582
Pavel Begunkovcb3d8972021-06-14 02:36:14 +01002583 if (unlikely(in_async)) {
2584 /*
2585 * If IORING_SETUP_SQPOLL is enabled, sqes are either handle
2586 * in sq thread task context or in io worker task context. If
2587 * current task context is sq thread, we don't need to check
2588 * whether should wake up sq thread.
2589 */
2590 if ((ctx->flags & IORING_SETUP_SQPOLL) &&
2591 wq_has_sleeper(&ctx->sq_data->wait))
2592 wake_up(&ctx->sq_data->wait);
2593
2594 mutex_unlock(&ctx->uring_lock);
2595 }
Jens Axboedef596e2019-01-09 08:59:42 -07002596}
2597
Pavel Begunkov9f13c352020-05-17 14:13:41 +03002598static inline void io_state_file_put(struct io_submit_state *state)
2599{
Pavel Begunkov02b23a92021-01-19 13:32:41 +00002600 if (state->file_refs) {
2601 fput_many(state->file, state->file_refs);
2602 state->file_refs = 0;
2603 }
Jens Axboe9a56a232019-01-09 09:06:50 -07002604}
2605
2606/*
2607 * Get as many references to a file as we have IOs left in this submission,
2608 * assuming most submissions are for one file, or at least that each file
2609 * has more than one submission.
2610 */
Pavel Begunkov8da11c12020-02-24 11:32:44 +03002611static struct file *__io_file_get(struct io_submit_state *state, int fd)
Jens Axboe9a56a232019-01-09 09:06:50 -07002612{
2613 if (!state)
2614 return fget(fd);
2615
Pavel Begunkov6e1271e2020-11-20 15:50:50 +00002616 if (state->file_refs) {
Jens Axboe9a56a232019-01-09 09:06:50 -07002617 if (state->fd == fd) {
Pavel Begunkov6e1271e2020-11-20 15:50:50 +00002618 state->file_refs--;
Jens Axboe9a56a232019-01-09 09:06:50 -07002619 return state->file;
2620 }
Pavel Begunkov02b23a92021-01-19 13:32:41 +00002621 io_state_file_put(state);
Jens Axboe9a56a232019-01-09 09:06:50 -07002622 }
2623 state->file = fget_many(fd, state->ios_left);
Pavel Begunkov6e1271e2020-11-20 15:50:50 +00002624 if (unlikely(!state->file))
Jens Axboe9a56a232019-01-09 09:06:50 -07002625 return NULL;
2626
2627 state->fd = fd;
Pavel Begunkov6e1271e2020-11-20 15:50:50 +00002628 state->file_refs = state->ios_left - 1;
Jens Axboe9a56a232019-01-09 09:06:50 -07002629 return state->file;
2630}
2631
Jens Axboe4503b762020-06-01 10:00:27 -06002632static bool io_bdev_nowait(struct block_device *bdev)
2633{
Jeffle Xu9ba0d0c2020-10-19 16:59:42 +08002634 return !bdev || blk_queue_nowait(bdev_get_queue(bdev));
Jens Axboe4503b762020-06-01 10:00:27 -06002635}
2636
Jens Axboe2b188cc2019-01-07 10:46:33 -07002637/*
2638 * If we tracked the file through the SCM inflight mechanism, we could support
2639 * any file. For now, just ensure that anything potentially problematic is done
2640 * inline.
2641 */
Pavel Begunkovb191e2d2021-08-09 13:04:03 +01002642static bool __io_file_supports_nowait(struct file *file, int rw)
Jens Axboe2b188cc2019-01-07 10:46:33 -07002643{
2644 umode_t mode = file_inode(file)->i_mode;
2645
Jens Axboe4503b762020-06-01 10:00:27 -06002646 if (S_ISBLK(mode)) {
Christoph Hellwig4e7b5672020-11-23 13:38:40 +01002647 if (IS_ENABLED(CONFIG_BLOCK) &&
2648 io_bdev_nowait(I_BDEV(file->f_mapping->host)))
Jens Axboe4503b762020-06-01 10:00:27 -06002649 return true;
2650 return false;
2651 }
Pavel Begunkov976517f2021-06-09 12:07:25 +01002652 if (S_ISSOCK(mode))
Jens Axboe2b188cc2019-01-07 10:46:33 -07002653 return true;
Jens Axboe4503b762020-06-01 10:00:27 -06002654 if (S_ISREG(mode)) {
Christoph Hellwig4e7b5672020-11-23 13:38:40 +01002655 if (IS_ENABLED(CONFIG_BLOCK) &&
2656 io_bdev_nowait(file->f_inode->i_sb->s_bdev) &&
Jens Axboe4503b762020-06-01 10:00:27 -06002657 file->f_op != &io_uring_fops)
2658 return true;
2659 return false;
2660 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07002661
Jens Axboec5b85622020-06-09 19:23:05 -06002662 /* any ->read/write should understand O_NONBLOCK */
2663 if (file->f_flags & O_NONBLOCK)
2664 return true;
2665
Jens Axboeaf197f52020-04-28 13:15:06 -06002666 if (!(file->f_mode & FMODE_NOWAIT))
2667 return false;
2668
2669 if (rw == READ)
2670 return file->f_op->read_iter != NULL;
2671
2672 return file->f_op->write_iter != NULL;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002673}
2674
Pavel Begunkovb191e2d2021-08-09 13:04:03 +01002675static bool io_file_supports_nowait(struct io_kiocb *req, int rw)
Jens Axboe7b29f922021-03-12 08:30:14 -07002676{
Pavel Begunkovb191e2d2021-08-09 13:04:03 +01002677 if (rw == READ && (req->flags & REQ_F_NOWAIT_READ))
Jens Axboe7b29f922021-03-12 08:30:14 -07002678 return true;
Pavel Begunkovb191e2d2021-08-09 13:04:03 +01002679 else if (rw == WRITE && (req->flags & REQ_F_NOWAIT_WRITE))
Jens Axboe7b29f922021-03-12 08:30:14 -07002680 return true;
2681
Pavel Begunkovb191e2d2021-08-09 13:04:03 +01002682 return __io_file_supports_nowait(req->file, rw);
Jens Axboe7b29f922021-03-12 08:30:14 -07002683}
2684
Pavel Begunkova88fc402020-09-30 22:57:53 +03002685static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboe2b188cc2019-01-07 10:46:33 -07002686{
Jens Axboedef596e2019-01-09 08:59:42 -07002687 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe9adbd452019-12-20 08:45:55 -07002688 struct kiocb *kiocb = &req->rw.kiocb;
Pavel Begunkov75c668c2021-02-04 13:52:05 +00002689 struct file *file = req->file;
Jens Axboe09bb8392019-03-13 12:39:28 -06002690 unsigned ioprio;
2691 int ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002692
Pavel Begunkovc97d8a02021-08-09 13:04:04 +01002693 if (!io_req_ffs_set(req) && S_ISREG(file_inode(file)->i_mode))
Jens Axboe491381ce2019-10-17 09:20:46 -06002694 req->flags |= REQ_F_ISREG;
2695
Jens Axboe2b188cc2019-01-07 10:46:33 -07002696 kiocb->ki_pos = READ_ONCE(sqe->off);
Pavel Begunkov75c668c2021-02-04 13:52:05 +00002697 if (kiocb->ki_pos == -1 && !(file->f_mode & FMODE_STREAM)) {
Jens Axboeba042912019-12-25 16:33:42 -07002698 req->flags |= REQ_F_CUR_POS;
Pavel Begunkov75c668c2021-02-04 13:52:05 +00002699 kiocb->ki_pos = file->f_pos;
Jens Axboeba042912019-12-25 16:33:42 -07002700 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07002701 kiocb->ki_hint = ki_hint_validate(file_write_hint(kiocb->ki_filp));
Pavel Begunkov3e577dc2020-02-01 03:58:42 +03002702 kiocb->ki_flags = iocb_flags(kiocb->ki_filp);
2703 ret = kiocb_set_rw_flags(kiocb, READ_ONCE(sqe->rw_flags));
2704 if (unlikely(ret))
2705 return ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002706
Pavel Begunkov75c668c2021-02-04 13:52:05 +00002707 /* don't allow async punt for O_NONBLOCK or RWF_NOWAIT */
2708 if ((kiocb->ki_flags & IOCB_NOWAIT) || (file->f_flags & O_NONBLOCK))
2709 req->flags |= REQ_F_NOWAIT;
2710
Jens Axboe2b188cc2019-01-07 10:46:33 -07002711 ioprio = READ_ONCE(sqe->ioprio);
2712 if (ioprio) {
2713 ret = ioprio_check_cap(ioprio);
2714 if (ret)
Jens Axboe09bb8392019-03-13 12:39:28 -06002715 return ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002716
2717 kiocb->ki_ioprio = ioprio;
2718 } else
2719 kiocb->ki_ioprio = get_current_ioprio();
2720
Jens Axboedef596e2019-01-09 08:59:42 -07002721 if (ctx->flags & IORING_SETUP_IOPOLL) {
Jens Axboedef596e2019-01-09 08:59:42 -07002722 if (!(kiocb->ki_flags & IOCB_DIRECT) ||
2723 !kiocb->ki_filp->f_op->iopoll)
Jens Axboe09bb8392019-03-13 12:39:28 -06002724 return -EOPNOTSUPP;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002725
Jens Axboedef596e2019-01-09 08:59:42 -07002726 kiocb->ki_flags |= IOCB_HIPRI;
2727 kiocb->ki_complete = io_complete_rw_iopoll;
Xiaoguang Wang65a65432020-06-11 23:39:36 +08002728 req->iopoll_completed = 0;
Jens Axboedef596e2019-01-09 08:59:42 -07002729 } else {
Jens Axboe09bb8392019-03-13 12:39:28 -06002730 if (kiocb->ki_flags & IOCB_HIPRI)
2731 return -EINVAL;
Jens Axboedef596e2019-01-09 08:59:42 -07002732 kiocb->ki_complete = io_complete_rw;
2733 }
Jens Axboe9adbd452019-12-20 08:45:55 -07002734
Pavel Begunkoveae071c2021-04-25 14:32:24 +01002735 if (req->opcode == IORING_OP_READ_FIXED ||
2736 req->opcode == IORING_OP_WRITE_FIXED) {
2737 req->imu = NULL;
2738 io_req_set_rsrc_node(req);
2739 }
2740
Jens Axboe3529d8c2019-12-19 18:24:38 -07002741 req->rw.addr = READ_ONCE(sqe->addr);
2742 req->rw.len = READ_ONCE(sqe->len);
Bijan Mottahedeh4f4eeba2020-05-19 14:52:49 -07002743 req->buf_index = READ_ONCE(sqe->buf_index);
Jens Axboe2b188cc2019-01-07 10:46:33 -07002744 return 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002745}
2746
2747static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
2748{
2749 switch (ret) {
2750 case -EIOCBQUEUED:
2751 break;
2752 case -ERESTARTSYS:
2753 case -ERESTARTNOINTR:
2754 case -ERESTARTNOHAND:
2755 case -ERESTART_RESTARTBLOCK:
2756 /*
2757 * We can't just restart the syscall, since previously
2758 * submitted sqes may already be in progress. Just fail this
2759 * IO with EINTR.
2760 */
2761 ret = -EINTR;
Gustavo A. R. Silvadf561f662020-08-23 17:36:59 -05002762 fallthrough;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002763 default:
2764 kiocb->ki_complete(kiocb, ret, 0);
2765 }
2766}
2767
Jens Axboea1d7c392020-06-22 11:09:46 -06002768static void kiocb_done(struct kiocb *kiocb, ssize_t ret,
Pavel Begunkov889fca72021-02-10 00:03:09 +00002769 unsigned int issue_flags)
Jens Axboeba816ad2019-09-28 11:36:45 -06002770{
Jens Axboeba042912019-12-25 16:33:42 -07002771 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
Jens Axboee8c2bc12020-08-15 18:44:09 -07002772 struct io_async_rw *io = req->async_data;
Pavel Begunkov97284632021-04-08 19:28:03 +01002773 bool check_reissue = kiocb->ki_complete == io_complete_rw;
Jens Axboeba042912019-12-25 16:33:42 -07002774
Jens Axboe227c0c92020-08-13 11:51:40 -06002775 /* add previously done IO, if any */
Jens Axboee8c2bc12020-08-15 18:44:09 -07002776 if (io && io->bytes_done > 0) {
Jens Axboe227c0c92020-08-13 11:51:40 -06002777 if (ret < 0)
Jens Axboee8c2bc12020-08-15 18:44:09 -07002778 ret = io->bytes_done;
Jens Axboe227c0c92020-08-13 11:51:40 -06002779 else
Jens Axboee8c2bc12020-08-15 18:44:09 -07002780 ret += io->bytes_done;
Jens Axboe227c0c92020-08-13 11:51:40 -06002781 }
2782
Jens Axboeba042912019-12-25 16:33:42 -07002783 if (req->flags & REQ_F_CUR_POS)
2784 req->file->f_pos = kiocb->ki_pos;
Hao Xue149bd742021-06-28 05:48:05 +08002785 if (ret >= 0 && check_reissue)
Pavel Begunkov889fca72021-02-10 00:03:09 +00002786 __io_complete_rw(req, ret, 0, issue_flags);
Jens Axboeba816ad2019-09-28 11:36:45 -06002787 else
2788 io_rw_done(kiocb, ret);
Pavel Begunkov97284632021-04-08 19:28:03 +01002789
Pavel Begunkovfe7e3252021-06-24 15:09:57 +01002790 if (check_reissue && (req->flags & REQ_F_REISSUE)) {
Pavel Begunkov97284632021-04-08 19:28:03 +01002791 req->flags &= ~REQ_F_REISSUE;
Jens Axboea7be7c22021-04-15 11:31:14 -06002792 if (io_resubmit_prep(req)) {
Pavel Begunkov8c130822021-03-22 01:58:32 +00002793 req_ref_get(req);
Jens Axboe773af692021-07-27 10:25:55 -06002794 io_req_task_queue_reissue(req);
Pavel Begunkov8c130822021-03-22 01:58:32 +00002795 } else {
Pavel Begunkov97284632021-04-08 19:28:03 +01002796 int cflags = 0;
2797
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01002798 req_set_fail(req);
Pavel Begunkov97284632021-04-08 19:28:03 +01002799 if (req->flags & REQ_F_BUFFER_SELECTED)
2800 cflags = io_put_rw_kbuf(req);
2801 __io_req_complete(req, issue_flags, ret, cflags);
2802 }
2803 }
Jens Axboeba816ad2019-09-28 11:36:45 -06002804}
2805
Pavel Begunkoveae071c2021-04-25 14:32:24 +01002806static int __io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter,
2807 struct io_mapped_ubuf *imu)
Jens Axboeedafcce2019-01-09 09:16:05 -07002808{
Jens Axboe9adbd452019-12-20 08:45:55 -07002809 size_t len = req->rw.len;
Pavel Begunkov75769e32021-04-01 15:43:54 +01002810 u64 buf_end, buf_addr = req->rw.addr;
Jens Axboeedafcce2019-01-09 09:16:05 -07002811 size_t offset;
Jens Axboeedafcce2019-01-09 09:16:05 -07002812
Pavel Begunkov75769e32021-04-01 15:43:54 +01002813 if (unlikely(check_add_overflow(buf_addr, (u64)len, &buf_end)))
Jens Axboeedafcce2019-01-09 09:16:05 -07002814 return -EFAULT;
2815 /* not inside the mapped region */
Pavel Begunkov4751f532021-04-01 15:43:55 +01002816 if (unlikely(buf_addr < imu->ubuf || buf_end > imu->ubuf_end))
Jens Axboeedafcce2019-01-09 09:16:05 -07002817 return -EFAULT;
2818
2819 /*
2820 * May not be a start of buffer, set size appropriately
2821 * and advance us to the beginning.
2822 */
2823 offset = buf_addr - imu->ubuf;
2824 iov_iter_bvec(iter, rw, imu->bvec, imu->nr_bvecs, offset + len);
Jens Axboebd11b3a2019-07-20 08:37:31 -06002825
2826 if (offset) {
2827 /*
2828 * Don't use iov_iter_advance() here, as it's really slow for
2829 * using the latter parts of a big fixed buffer - it iterates
2830 * over each segment manually. We can cheat a bit here, because
2831 * we know that:
2832 *
2833 * 1) it's a BVEC iter, we set it up
2834 * 2) all bvecs are PAGE_SIZE in size, except potentially the
2835 * first and last bvec
2836 *
2837 * So just find our index, and adjust the iterator afterwards.
2838 * If the offset is within the first bvec (or the whole first
2839 * bvec, just use iov_iter_advance(). This makes it easier
2840 * since we can just skip the first segment, which may not
2841 * be PAGE_SIZE aligned.
2842 */
2843 const struct bio_vec *bvec = imu->bvec;
2844
2845 if (offset <= bvec->bv_len) {
2846 iov_iter_advance(iter, offset);
2847 } else {
2848 unsigned long seg_skip;
2849
2850 /* skip first vec */
2851 offset -= bvec->bv_len;
2852 seg_skip = 1 + (offset >> PAGE_SHIFT);
2853
2854 iter->bvec = bvec + seg_skip;
2855 iter->nr_segs -= seg_skip;
Aleix Roca Nonell99c79f62019-08-15 14:03:22 +02002856 iter->count -= bvec->bv_len + offset;
Jens Axboebd11b3a2019-07-20 08:37:31 -06002857 iter->iov_offset = offset & ~PAGE_MASK;
Jens Axboebd11b3a2019-07-20 08:37:31 -06002858 }
2859 }
2860
Pavel Begunkov847595d2021-02-04 13:52:06 +00002861 return 0;
Jens Axboeedafcce2019-01-09 09:16:05 -07002862}
2863
Pavel Begunkoveae071c2021-04-25 14:32:24 +01002864static int io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter)
2865{
2866 struct io_ring_ctx *ctx = req->ctx;
2867 struct io_mapped_ubuf *imu = req->imu;
2868 u16 index, buf_index = req->buf_index;
2869
2870 if (likely(!imu)) {
2871 if (unlikely(buf_index >= ctx->nr_user_bufs))
2872 return -EFAULT;
2873 index = array_index_nospec(buf_index, ctx->nr_user_bufs);
2874 imu = READ_ONCE(ctx->user_bufs[index]);
2875 req->imu = imu;
2876 }
2877 return __io_import_fixed(req, rw, iter, imu);
2878}
2879
Jens Axboebcda7ba2020-02-23 16:42:51 -07002880static void io_ring_submit_unlock(struct io_ring_ctx *ctx, bool needs_lock)
2881{
2882 if (needs_lock)
2883 mutex_unlock(&ctx->uring_lock);
2884}
2885
2886static void io_ring_submit_lock(struct io_ring_ctx *ctx, bool needs_lock)
2887{
2888 /*
2889 * "Normal" inline submissions always hold the uring_lock, since we
2890 * grab it from the system call. Same is true for the SQPOLL offload.
2891 * The only exception is when we've detached the request and issue it
2892 * from an async worker thread, grab the lock for that case.
2893 */
2894 if (needs_lock)
2895 mutex_lock(&ctx->uring_lock);
2896}
2897
2898static struct io_buffer *io_buffer_select(struct io_kiocb *req, size_t *len,
2899 int bgid, struct io_buffer *kbuf,
2900 bool needs_lock)
2901{
2902 struct io_buffer *head;
2903
2904 if (req->flags & REQ_F_BUFFER_SELECTED)
2905 return kbuf;
2906
2907 io_ring_submit_lock(req->ctx, needs_lock);
2908
2909 lockdep_assert_held(&req->ctx->uring_lock);
2910
Jens Axboe9e15c3a2021-03-13 12:29:43 -07002911 head = xa_load(&req->ctx->io_buffers, bgid);
Jens Axboebcda7ba2020-02-23 16:42:51 -07002912 if (head) {
2913 if (!list_empty(&head->list)) {
2914 kbuf = list_last_entry(&head->list, struct io_buffer,
2915 list);
2916 list_del(&kbuf->list);
2917 } else {
2918 kbuf = head;
Jens Axboe9e15c3a2021-03-13 12:29:43 -07002919 xa_erase(&req->ctx->io_buffers, bgid);
Jens Axboebcda7ba2020-02-23 16:42:51 -07002920 }
2921 if (*len > kbuf->len)
2922 *len = kbuf->len;
2923 } else {
2924 kbuf = ERR_PTR(-ENOBUFS);
2925 }
2926
2927 io_ring_submit_unlock(req->ctx, needs_lock);
2928
2929 return kbuf;
2930}
2931
Jens Axboe4d954c22020-02-27 07:31:19 -07002932static void __user *io_rw_buffer_select(struct io_kiocb *req, size_t *len,
2933 bool needs_lock)
2934{
2935 struct io_buffer *kbuf;
Bijan Mottahedeh4f4eeba2020-05-19 14:52:49 -07002936 u16 bgid;
Jens Axboe4d954c22020-02-27 07:31:19 -07002937
2938 kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
Bijan Mottahedeh4f4eeba2020-05-19 14:52:49 -07002939 bgid = req->buf_index;
Jens Axboe4d954c22020-02-27 07:31:19 -07002940 kbuf = io_buffer_select(req, len, bgid, kbuf, needs_lock);
2941 if (IS_ERR(kbuf))
2942 return kbuf;
2943 req->rw.addr = (u64) (unsigned long) kbuf;
2944 req->flags |= REQ_F_BUFFER_SELECTED;
2945 return u64_to_user_ptr(kbuf->addr);
2946}
2947
2948#ifdef CONFIG_COMPAT
2949static ssize_t io_compat_import(struct io_kiocb *req, struct iovec *iov,
2950 bool needs_lock)
2951{
2952 struct compat_iovec __user *uiov;
2953 compat_ssize_t clen;
2954 void __user *buf;
2955 ssize_t len;
2956
2957 uiov = u64_to_user_ptr(req->rw.addr);
2958 if (!access_ok(uiov, sizeof(*uiov)))
2959 return -EFAULT;
2960 if (__get_user(clen, &uiov->iov_len))
2961 return -EFAULT;
2962 if (clen < 0)
2963 return -EINVAL;
2964
2965 len = clen;
2966 buf = io_rw_buffer_select(req, &len, needs_lock);
2967 if (IS_ERR(buf))
2968 return PTR_ERR(buf);
2969 iov[0].iov_base = buf;
2970 iov[0].iov_len = (compat_size_t) len;
2971 return 0;
2972}
2973#endif
2974
2975static ssize_t __io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
2976 bool needs_lock)
2977{
2978 struct iovec __user *uiov = u64_to_user_ptr(req->rw.addr);
2979 void __user *buf;
2980 ssize_t len;
2981
2982 if (copy_from_user(iov, uiov, sizeof(*uiov)))
2983 return -EFAULT;
2984
2985 len = iov[0].iov_len;
2986 if (len < 0)
2987 return -EINVAL;
2988 buf = io_rw_buffer_select(req, &len, needs_lock);
2989 if (IS_ERR(buf))
2990 return PTR_ERR(buf);
2991 iov[0].iov_base = buf;
2992 iov[0].iov_len = len;
2993 return 0;
2994}
2995
2996static ssize_t io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
2997 bool needs_lock)
2998{
Jens Axboedddb3e22020-06-04 11:27:01 -06002999 if (req->flags & REQ_F_BUFFER_SELECTED) {
3000 struct io_buffer *kbuf;
3001
3002 kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
3003 iov[0].iov_base = u64_to_user_ptr(kbuf->addr);
3004 iov[0].iov_len = kbuf->len;
Jens Axboe4d954c22020-02-27 07:31:19 -07003005 return 0;
Jens Axboedddb3e22020-06-04 11:27:01 -06003006 }
Pavel Begunkovdd201662020-12-19 03:15:43 +00003007 if (req->rw.len != 1)
Jens Axboe4d954c22020-02-27 07:31:19 -07003008 return -EINVAL;
3009
3010#ifdef CONFIG_COMPAT
3011 if (req->ctx->compat)
3012 return io_compat_import(req, iov, needs_lock);
3013#endif
3014
3015 return __io_iov_buffer_select(req, iov, needs_lock);
3016}
3017
Pavel Begunkov847595d2021-02-04 13:52:06 +00003018static int io_import_iovec(int rw, struct io_kiocb *req, struct iovec **iovec,
3019 struct iov_iter *iter, bool needs_lock)
Jens Axboe2b188cc2019-01-07 10:46:33 -07003020{
Jens Axboe9adbd452019-12-20 08:45:55 -07003021 void __user *buf = u64_to_user_ptr(req->rw.addr);
3022 size_t sqe_len = req->rw.len;
Pavel Begunkov847595d2021-02-04 13:52:06 +00003023 u8 opcode = req->opcode;
Jens Axboe4d954c22020-02-27 07:31:19 -07003024 ssize_t ret;
Jens Axboeedafcce2019-01-09 09:16:05 -07003025
Pavel Begunkov7d009162019-11-25 23:14:40 +03003026 if (opcode == IORING_OP_READ_FIXED || opcode == IORING_OP_WRITE_FIXED) {
Jens Axboeedafcce2019-01-09 09:16:05 -07003027 *iovec = NULL;
Jens Axboe9adbd452019-12-20 08:45:55 -07003028 return io_import_fixed(req, rw, iter);
Jens Axboeedafcce2019-01-09 09:16:05 -07003029 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07003030
Jens Axboebcda7ba2020-02-23 16:42:51 -07003031 /* buffer index only valid with fixed read/write, or buffer select */
Bijan Mottahedeh4f4eeba2020-05-19 14:52:49 -07003032 if (req->buf_index && !(req->flags & REQ_F_BUFFER_SELECT))
Jens Axboe9adbd452019-12-20 08:45:55 -07003033 return -EINVAL;
3034
Jens Axboe3a6820f2019-12-22 15:19:35 -07003035 if (opcode == IORING_OP_READ || opcode == IORING_OP_WRITE) {
Jens Axboebcda7ba2020-02-23 16:42:51 -07003036 if (req->flags & REQ_F_BUFFER_SELECT) {
Jens Axboe4d954c22020-02-27 07:31:19 -07003037 buf = io_rw_buffer_select(req, &sqe_len, needs_lock);
Pavel Begunkov867a23e2020-08-20 11:34:39 +03003038 if (IS_ERR(buf))
Jens Axboe4d954c22020-02-27 07:31:19 -07003039 return PTR_ERR(buf);
Jens Axboe3f9d6442020-03-11 12:27:04 -06003040 req->rw.len = sqe_len;
Jens Axboebcda7ba2020-02-23 16:42:51 -07003041 }
3042
Jens Axboe3a6820f2019-12-22 15:19:35 -07003043 ret = import_single_range(rw, buf, sqe_len, *iovec, iter);
3044 *iovec = NULL;
David Laight10fc72e2020-11-07 13:16:25 +00003045 return ret;
Jens Axboe3a6820f2019-12-22 15:19:35 -07003046 }
3047
Jens Axboe4d954c22020-02-27 07:31:19 -07003048 if (req->flags & REQ_F_BUFFER_SELECT) {
3049 ret = io_iov_buffer_select(req, *iovec, needs_lock);
Pavel Begunkov847595d2021-02-04 13:52:06 +00003050 if (!ret)
3051 iov_iter_init(iter, rw, *iovec, 1, (*iovec)->iov_len);
Jens Axboe4d954c22020-02-27 07:31:19 -07003052 *iovec = NULL;
3053 return ret;
3054 }
3055
Christoph Hellwig89cd35c2020-09-25 06:51:41 +02003056 return __import_iovec(rw, buf, sqe_len, UIO_FASTIOV, iovec, iter,
3057 req->ctx->compat);
Jens Axboe2b188cc2019-01-07 10:46:33 -07003058}
3059
Jens Axboe0fef9482020-08-26 10:36:20 -06003060static inline loff_t *io_kiocb_ppos(struct kiocb *kiocb)
3061{
Pavel Begunkov5b09e372020-09-30 22:57:15 +03003062 return (kiocb->ki_filp->f_mode & FMODE_STREAM) ? NULL : &kiocb->ki_pos;
Jens Axboe0fef9482020-08-26 10:36:20 -06003063}
3064
Jens Axboe32960612019-09-23 11:05:34 -06003065/*
3066 * For files that don't have ->read_iter() and ->write_iter(), handle them
3067 * by looping over ->read() or ->write() manually.
3068 */
Jens Axboe4017eb92020-10-22 14:14:12 -06003069static ssize_t loop_rw_iter(int rw, struct io_kiocb *req, struct iov_iter *iter)
Jens Axboe32960612019-09-23 11:05:34 -06003070{
Jens Axboe4017eb92020-10-22 14:14:12 -06003071 struct kiocb *kiocb = &req->rw.kiocb;
3072 struct file *file = req->file;
Jens Axboe32960612019-09-23 11:05:34 -06003073 ssize_t ret = 0;
3074
3075 /*
3076 * Don't support polled IO through this interface, and we can't
3077 * support non-blocking either. For the latter, this just causes
3078 * the kiocb to be handled from an async context.
3079 */
3080 if (kiocb->ki_flags & IOCB_HIPRI)
3081 return -EOPNOTSUPP;
3082 if (kiocb->ki_flags & IOCB_NOWAIT)
3083 return -EAGAIN;
3084
3085 while (iov_iter_count(iter)) {
Pavel Begunkov311ae9e2019-11-24 11:58:24 +03003086 struct iovec iovec;
Jens Axboe32960612019-09-23 11:05:34 -06003087 ssize_t nr;
3088
Pavel Begunkov311ae9e2019-11-24 11:58:24 +03003089 if (!iov_iter_is_bvec(iter)) {
3090 iovec = iov_iter_iovec(iter);
3091 } else {
Jens Axboe4017eb92020-10-22 14:14:12 -06003092 iovec.iov_base = u64_to_user_ptr(req->rw.addr);
3093 iovec.iov_len = req->rw.len;
Pavel Begunkov311ae9e2019-11-24 11:58:24 +03003094 }
3095
Jens Axboe32960612019-09-23 11:05:34 -06003096 if (rw == READ) {
3097 nr = file->f_op->read(file, iovec.iov_base,
Jens Axboe0fef9482020-08-26 10:36:20 -06003098 iovec.iov_len, io_kiocb_ppos(kiocb));
Jens Axboe32960612019-09-23 11:05:34 -06003099 } else {
3100 nr = file->f_op->write(file, iovec.iov_base,
Jens Axboe0fef9482020-08-26 10:36:20 -06003101 iovec.iov_len, io_kiocb_ppos(kiocb));
Jens Axboe32960612019-09-23 11:05:34 -06003102 }
3103
3104 if (nr < 0) {
3105 if (!ret)
3106 ret = nr;
3107 break;
3108 }
3109 ret += nr;
3110 if (nr != iovec.iov_len)
3111 break;
Jens Axboe4017eb92020-10-22 14:14:12 -06003112 req->rw.len -= nr;
3113 req->rw.addr += nr;
Jens Axboe32960612019-09-23 11:05:34 -06003114 iov_iter_advance(iter, nr);
3115 }
3116
3117 return ret;
3118}
3119
Jens Axboeff6165b2020-08-13 09:47:43 -06003120static void io_req_map_rw(struct io_kiocb *req, const struct iovec *iovec,
3121 const struct iovec *fast_iov, struct iov_iter *iter)
Jens Axboef67676d2019-12-02 11:03:47 -07003122{
Jens Axboee8c2bc12020-08-15 18:44:09 -07003123 struct io_async_rw *rw = req->async_data;
Pavel Begunkovb64e3442020-07-13 22:59:18 +03003124
Jens Axboeff6165b2020-08-13 09:47:43 -06003125 memcpy(&rw->iter, iter, sizeof(*iter));
Pavel Begunkovafb87652020-09-06 00:45:46 +03003126 rw->free_iovec = iovec;
Jens Axboe227c0c92020-08-13 11:51:40 -06003127 rw->bytes_done = 0;
Jens Axboeff6165b2020-08-13 09:47:43 -06003128 /* can only be fixed buffers, no need to do anything */
Pavel Begunkov9c3a2052020-11-23 23:20:27 +00003129 if (iov_iter_is_bvec(iter))
Jens Axboeff6165b2020-08-13 09:47:43 -06003130 return;
Pavel Begunkovb64e3442020-07-13 22:59:18 +03003131 if (!iovec) {
Jens Axboeff6165b2020-08-13 09:47:43 -06003132 unsigned iov_off = 0;
3133
3134 rw->iter.iov = rw->fast_iov;
3135 if (iter->iov != fast_iov) {
3136 iov_off = iter->iov - fast_iov;
3137 rw->iter.iov += iov_off;
3138 }
3139 if (rw->fast_iov != fast_iov)
3140 memcpy(rw->fast_iov + iov_off, fast_iov + iov_off,
Xiaoguang Wang45097da2020-04-08 22:29:58 +08003141 sizeof(struct iovec) * iter->nr_segs);
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03003142 } else {
3143 req->flags |= REQ_F_NEED_CLEANUP;
Jens Axboef67676d2019-12-02 11:03:47 -07003144 }
3145}
3146
Pavel Begunkov6cb78682021-02-28 22:35:17 +00003147static inline int io_alloc_async_data(struct io_kiocb *req)
Xiaoguang Wang3d9932a2020-03-27 15:36:52 +08003148{
Jens Axboee8c2bc12020-08-15 18:44:09 -07003149 WARN_ON_ONCE(!io_op_defs[req->opcode].async_size);
3150 req->async_data = kmalloc(io_op_defs[req->opcode].async_size, GFP_KERNEL);
3151 return req->async_data == NULL;
Xiaoguang Wang3d9932a2020-03-27 15:36:52 +08003152}
3153
Jens Axboeff6165b2020-08-13 09:47:43 -06003154static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec,
3155 const struct iovec *fast_iov,
Jens Axboe227c0c92020-08-13 11:51:40 -06003156 struct iov_iter *iter, bool force)
Jens Axboeb7bb4f72019-12-15 22:13:43 -07003157{
Pavel Begunkov26f05052021-02-28 22:35:18 +00003158 if (!force && !io_op_defs[req->opcode].needs_async_setup)
Jens Axboe74566df2020-01-13 19:23:24 -07003159 return 0;
Jens Axboee8c2bc12020-08-15 18:44:09 -07003160 if (!req->async_data) {
Pavel Begunkov6cb78682021-02-28 22:35:17 +00003161 if (io_alloc_async_data(req)) {
Pavel Begunkov6bf985d2021-02-04 13:52:01 +00003162 kfree(iovec);
Jens Axboe5d204bc2020-01-31 12:06:52 -07003163 return -ENOMEM;
Pavel Begunkov6bf985d2021-02-04 13:52:01 +00003164 }
Jens Axboeb7bb4f72019-12-15 22:13:43 -07003165
Jens Axboeff6165b2020-08-13 09:47:43 -06003166 io_req_map_rw(req, iovec, fast_iov, iter);
Jens Axboe5d204bc2020-01-31 12:06:52 -07003167 }
Jens Axboeb7bb4f72019-12-15 22:13:43 -07003168 return 0;
Jens Axboef67676d2019-12-02 11:03:47 -07003169}
3170
Pavel Begunkov73debe62020-09-30 22:57:54 +03003171static inline int io_rw_prep_async(struct io_kiocb *req, int rw)
Pavel Begunkovc3e330a2020-07-13 22:59:19 +03003172{
Jens Axboee8c2bc12020-08-15 18:44:09 -07003173 struct io_async_rw *iorw = req->async_data;
Pavel Begunkovf4bff102020-09-06 00:45:45 +03003174 struct iovec *iov = iorw->fast_iov;
Pavel Begunkov847595d2021-02-04 13:52:06 +00003175 int ret;
Pavel Begunkovc3e330a2020-07-13 22:59:19 +03003176
Pavel Begunkov2846c482020-11-07 13:16:27 +00003177 ret = io_import_iovec(rw, req, &iov, &iorw->iter, false);
Pavel Begunkovc3e330a2020-07-13 22:59:19 +03003178 if (unlikely(ret < 0))
3179 return ret;
3180
Pavel Begunkovab0b1962020-09-06 00:45:47 +03003181 iorw->bytes_done = 0;
3182 iorw->free_iovec = iov;
3183 if (iov)
3184 req->flags |= REQ_F_NEED_CLEANUP;
Pavel Begunkovc3e330a2020-07-13 22:59:19 +03003185 return 0;
3186}
3187
Pavel Begunkov73debe62020-09-30 22:57:54 +03003188static int io_read_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboef67676d2019-12-02 11:03:47 -07003189{
Jens Axboe3529d8c2019-12-19 18:24:38 -07003190 if (unlikely(!(req->file->f_mode & FMODE_READ)))
3191 return -EBADF;
Pavel Begunkov93642ef2021-02-18 18:29:44 +00003192 return io_prep_rw(req, sqe);
Jens Axboef67676d2019-12-02 11:03:47 -07003193}
3194
Jens Axboec1dd91d2020-08-03 16:43:59 -06003195/*
3196 * This is our waitqueue callback handler, registered through lock_page_async()
3197 * when we initially tried to do the IO with the iocb armed our waitqueue.
3198 * This gets called when the page is unlocked, and we generally expect that to
3199 * happen when the page IO is completed and the page is now uptodate. This will
3200 * queue a task_work based retry of the operation, attempting to copy the data
3201 * again. If the latter fails because the page was NOT uptodate, then we will
3202 * do a thread based blocking retry of the operation. That's the unexpected
3203 * slow path.
3204 */
Jens Axboebcf5a062020-05-22 09:24:42 -06003205static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode,
3206 int sync, void *arg)
3207{
3208 struct wait_page_queue *wpq;
3209 struct io_kiocb *req = wait->private;
Jens Axboebcf5a062020-05-22 09:24:42 -06003210 struct wait_page_key *key = arg;
Jens Axboebcf5a062020-05-22 09:24:42 -06003211
3212 wpq = container_of(wait, struct wait_page_queue, wait);
3213
Linus Torvaldscdc8fcb2020-08-03 13:01:22 -07003214 if (!wake_page_match(wpq, key))
3215 return 0;
3216
Hao Xuc8d317a2020-09-29 20:00:45 +08003217 req->rw.kiocb.ki_flags &= ~IOCB_WAITQ;
Jens Axboebcf5a062020-05-22 09:24:42 -06003218 list_del_init(&wait->entry);
3219
Jens Axboebcf5a062020-05-22 09:24:42 -06003220 /* submit ref gets dropped, acquire a new one */
Jens Axboede9b4cc2021-02-24 13:28:27 -07003221 req_ref_get(req);
Pavel Begunkov921b9052021-02-12 03:23:53 +00003222 io_req_task_queue(req);
Jens Axboebcf5a062020-05-22 09:24:42 -06003223 return 1;
3224}
3225
Jens Axboec1dd91d2020-08-03 16:43:59 -06003226/*
3227 * This controls whether a given IO request should be armed for async page
3228 * based retry. If we return false here, the request is handed to the async
3229 * worker threads for retry. If we're doing buffered reads on a regular file,
3230 * we prepare a private wait_page_queue entry and retry the operation. This
3231 * will either succeed because the page is now uptodate and unlocked, or it
3232 * will register a callback when the page is unlocked at IO completion. Through
3233 * that callback, io_uring uses task_work to setup a retry of the operation.
3234 * That retry will attempt the buffered read again. The retry will generally
3235 * succeed, or in rare cases where it fails, we then fall back to using the
3236 * async worker threads for a blocking retry.
3237 */
Jens Axboe227c0c92020-08-13 11:51:40 -06003238static bool io_rw_should_retry(struct io_kiocb *req)
Jens Axboebcf5a062020-05-22 09:24:42 -06003239{
Jens Axboee8c2bc12020-08-15 18:44:09 -07003240 struct io_async_rw *rw = req->async_data;
3241 struct wait_page_queue *wait = &rw->wpq;
Jens Axboebcf5a062020-05-22 09:24:42 -06003242 struct kiocb *kiocb = &req->rw.kiocb;
Jens Axboebcf5a062020-05-22 09:24:42 -06003243
3244 /* never retry for NOWAIT, we just complete with -EAGAIN */
3245 if (req->flags & REQ_F_NOWAIT)
3246 return false;
3247
Jens Axboe227c0c92020-08-13 11:51:40 -06003248 /* Only for buffered IO */
Jens Axboe3b2a4432020-08-16 10:58:43 -07003249 if (kiocb->ki_flags & (IOCB_DIRECT | IOCB_HIPRI))
Jens Axboebcf5a062020-05-22 09:24:42 -06003250 return false;
Jens Axboe3b2a4432020-08-16 10:58:43 -07003251
Jens Axboebcf5a062020-05-22 09:24:42 -06003252 /*
3253 * just use poll if we can, and don't attempt if the fs doesn't
3254 * support callback based unlocks
3255 */
3256 if (file_can_poll(req->file) || !(req->file->f_mode & FMODE_BUF_RASYNC))
3257 return false;
3258
Jens Axboe3b2a4432020-08-16 10:58:43 -07003259 wait->wait.func = io_async_buf_func;
3260 wait->wait.private = req;
3261 wait->wait.flags = 0;
3262 INIT_LIST_HEAD(&wait->wait.entry);
3263 kiocb->ki_flags |= IOCB_WAITQ;
Hao Xuc8d317a2020-09-29 20:00:45 +08003264 kiocb->ki_flags &= ~IOCB_NOWAIT;
Jens Axboe3b2a4432020-08-16 10:58:43 -07003265 kiocb->ki_waitq = wait;
Jens Axboe3b2a4432020-08-16 10:58:43 -07003266 return true;
Jens Axboebcf5a062020-05-22 09:24:42 -06003267}
3268
Pavel Begunkovaeab9502021-06-14 02:36:24 +01003269static inline int io_iter_do_read(struct io_kiocb *req, struct iov_iter *iter)
Jens Axboebcf5a062020-05-22 09:24:42 -06003270{
3271 if (req->file->f_op->read_iter)
3272 return call_read_iter(req->file, &req->rw.kiocb, iter);
Guoyu Huang2dd21112020-08-05 03:53:50 -07003273 else if (req->file->f_op->read)
Jens Axboe4017eb92020-10-22 14:14:12 -06003274 return loop_rw_iter(READ, req, iter);
Guoyu Huang2dd21112020-08-05 03:53:50 -07003275 else
3276 return -EINVAL;
Jens Axboebcf5a062020-05-22 09:24:42 -06003277}
3278
Pavel Begunkov889fca72021-02-10 00:03:09 +00003279static int io_read(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe2b188cc2019-01-07 10:46:33 -07003280{
3281 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
Jens Axboe9adbd452019-12-20 08:45:55 -07003282 struct kiocb *kiocb = &req->rw.kiocb;
Jens Axboeff6165b2020-08-13 09:47:43 -06003283 struct iov_iter __iter, *iter = &__iter;
Jens Axboee8c2bc12020-08-15 18:44:09 -07003284 struct io_async_rw *rw = req->async_data;
Jens Axboe227c0c92020-08-13 11:51:40 -06003285 ssize_t io_size, ret, ret2;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003286 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003287
Pavel Begunkov2846c482020-11-07 13:16:27 +00003288 if (rw) {
Jens Axboee8c2bc12020-08-15 18:44:09 -07003289 iter = &rw->iter;
Pavel Begunkov2846c482020-11-07 13:16:27 +00003290 iovec = NULL;
3291 } else {
3292 ret = io_import_iovec(READ, req, &iovec, iter, !force_nonblock);
3293 if (ret < 0)
3294 return ret;
3295 }
Pavel Begunkov632546c2020-11-07 13:16:26 +00003296 io_size = iov_iter_count(iter);
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003297 req->result = io_size;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003298
Jens Axboefd6c2e42019-12-18 12:19:41 -07003299 /* Ensure we clear previously set non-block flag */
3300 if (!force_nonblock)
Jens Axboe29de5f62020-02-20 09:56:08 -07003301 kiocb->ki_flags &= ~IOCB_NOWAIT;
Pavel Begunkova88fc402020-09-30 22:57:53 +03003302 else
3303 kiocb->ki_flags |= IOCB_NOWAIT;
3304
Pavel Begunkov24c74672020-06-21 13:09:51 +03003305 /* If the file doesn't support async, just async punt */
Pavel Begunkovb191e2d2021-08-09 13:04:03 +01003306 if (force_nonblock && !io_file_supports_nowait(req, READ)) {
Pavel Begunkov6713e7a2021-02-04 13:51:59 +00003307 ret = io_setup_async_rw(req, iovec, inline_vecs, iter, true);
Pavel Begunkov6bf985d2021-02-04 13:52:01 +00003308 return ret ?: -EAGAIN;
Pavel Begunkov6713e7a2021-02-04 13:51:59 +00003309 }
Jens Axboe9e645e112019-05-10 16:07:28 -06003310
Pavel Begunkov632546c2020-11-07 13:16:26 +00003311 ret = rw_verify_area(READ, req->file, io_kiocb_ppos(kiocb), io_size);
Pavel Begunkov5ea5dd42021-02-04 13:52:03 +00003312 if (unlikely(ret)) {
3313 kfree(iovec);
3314 return ret;
3315 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07003316
Jens Axboe227c0c92020-08-13 11:51:40 -06003317 ret = io_iter_do_read(req, iter);
Jens Axboe32960612019-09-23 11:05:34 -06003318
Jens Axboe230d50d2021-04-01 20:41:15 -06003319 if (ret == -EAGAIN || (req->flags & REQ_F_REISSUE)) {
Pavel Begunkov6ad7f232021-04-08 01:54:39 +01003320 req->flags &= ~REQ_F_REISSUE;
Jens Axboeeefdf302020-08-27 16:40:19 -06003321 /* IOPOLL retry should happen for io-wq threads */
3322 if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboef91daf52020-08-15 15:58:42 -07003323 goto done;
Pavel Begunkov75c668c2021-02-04 13:52:05 +00003324 /* no retry on NONBLOCK nor RWF_NOWAIT */
3325 if (req->flags & REQ_F_NOWAIT)
Jens Axboe355afae2020-09-02 09:30:31 -06003326 goto done;
Jens Axboe84216312020-08-24 11:45:26 -06003327 /* some cases will consume bytes even on error returns */
Pavel Begunkov632546c2020-11-07 13:16:26 +00003328 iov_iter_revert(iter, io_size - iov_iter_count(iter));
Jens Axboef38c7e32020-09-25 15:23:43 -06003329 ret = 0;
Jens Axboe230d50d2021-04-01 20:41:15 -06003330 } else if (ret == -EIOCBQUEUED) {
3331 goto out_free;
Pavel Begunkov7335e3b2021-02-04 13:52:02 +00003332 } else if (ret <= 0 || ret == io_size || !force_nonblock ||
Pavel Begunkov75c668c2021-02-04 13:52:05 +00003333 (req->flags & REQ_F_NOWAIT) || !(req->flags & REQ_F_ISREG)) {
Pavel Begunkov7335e3b2021-02-04 13:52:02 +00003334 /* read all, failed, already did sync or don't want to retry */
Jens Axboe00d23d52020-08-25 12:59:22 -06003335 goto done;
Jens Axboe227c0c92020-08-13 11:51:40 -06003336 }
3337
Jens Axboe227c0c92020-08-13 11:51:40 -06003338 ret2 = io_setup_async_rw(req, iovec, inline_vecs, iter, true);
Pavel Begunkov6bf985d2021-02-04 13:52:01 +00003339 if (ret2)
3340 return ret2;
3341
Pavel Begunkovfe1cdd52021-02-17 21:02:36 +00003342 iovec = NULL;
Jens Axboee8c2bc12020-08-15 18:44:09 -07003343 rw = req->async_data;
Jens Axboe227c0c92020-08-13 11:51:40 -06003344 /* now use our persistent iterator, if we aren't already */
Jens Axboee8c2bc12020-08-15 18:44:09 -07003345 iter = &rw->iter;
Jens Axboe227c0c92020-08-13 11:51:40 -06003346
Pavel Begunkovb23df912021-02-04 13:52:04 +00003347 do {
3348 io_size -= ret;
3349 rw->bytes_done += ret;
3350 /* if we can retry, do so with the callbacks armed */
3351 if (!io_rw_should_retry(req)) {
3352 kiocb->ki_flags &= ~IOCB_WAITQ;
3353 return -EAGAIN;
3354 }
3355
3356 /*
3357 * Now retry read with the IOCB_WAITQ parts set in the iocb. If
3358 * we get -EIOCBQUEUED, then we'll get a notification when the
3359 * desired page gets unlocked. We can also get a partial read
3360 * here, and if we do, then just retry at the new offset.
3361 */
3362 ret = io_iter_do_read(req, iter);
3363 if (ret == -EIOCBQUEUED)
3364 return 0;
Jens Axboe227c0c92020-08-13 11:51:40 -06003365 /* we got some bytes, but not all. retry. */
Jens Axboeb5b0ecb2021-03-04 21:02:58 -07003366 kiocb->ki_flags &= ~IOCB_WAITQ;
Pavel Begunkovb23df912021-02-04 13:52:04 +00003367 } while (ret > 0 && ret < io_size);
Jens Axboe227c0c92020-08-13 11:51:40 -06003368done:
Pavel Begunkov889fca72021-02-10 00:03:09 +00003369 kiocb_done(kiocb, ret, issue_flags);
Pavel Begunkovfe1cdd52021-02-17 21:02:36 +00003370out_free:
3371 /* it's faster to check here then delegate to kfree */
3372 if (iovec)
3373 kfree(iovec);
Pavel Begunkov5ea5dd42021-02-04 13:52:03 +00003374 return 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003375}
3376
Pavel Begunkov73debe62020-09-30 22:57:54 +03003377static int io_write_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboef67676d2019-12-02 11:03:47 -07003378{
Jens Axboe3529d8c2019-12-19 18:24:38 -07003379 if (unlikely(!(req->file->f_mode & FMODE_WRITE)))
3380 return -EBADF;
Pavel Begunkov93642ef2021-02-18 18:29:44 +00003381 return io_prep_rw(req, sqe);
Jens Axboef67676d2019-12-02 11:03:47 -07003382}
3383
Pavel Begunkov889fca72021-02-10 00:03:09 +00003384static int io_write(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe2b188cc2019-01-07 10:46:33 -07003385{
3386 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
Jens Axboe9adbd452019-12-20 08:45:55 -07003387 struct kiocb *kiocb = &req->rw.kiocb;
Jens Axboeff6165b2020-08-13 09:47:43 -06003388 struct iov_iter __iter, *iter = &__iter;
Jens Axboee8c2bc12020-08-15 18:44:09 -07003389 struct io_async_rw *rw = req->async_data;
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003390 ssize_t ret, ret2, io_size;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003391 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003392
Pavel Begunkov2846c482020-11-07 13:16:27 +00003393 if (rw) {
Jens Axboee8c2bc12020-08-15 18:44:09 -07003394 iter = &rw->iter;
Pavel Begunkov2846c482020-11-07 13:16:27 +00003395 iovec = NULL;
3396 } else {
3397 ret = io_import_iovec(WRITE, req, &iovec, iter, !force_nonblock);
3398 if (ret < 0)
3399 return ret;
3400 }
Pavel Begunkov632546c2020-11-07 13:16:26 +00003401 io_size = iov_iter_count(iter);
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003402 req->result = io_size;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003403
Jens Axboefd6c2e42019-12-18 12:19:41 -07003404 /* Ensure we clear previously set non-block flag */
3405 if (!force_nonblock)
Pavel Begunkova88fc402020-09-30 22:57:53 +03003406 kiocb->ki_flags &= ~IOCB_NOWAIT;
3407 else
3408 kiocb->ki_flags |= IOCB_NOWAIT;
Jens Axboefd6c2e42019-12-18 12:19:41 -07003409
Pavel Begunkov24c74672020-06-21 13:09:51 +03003410 /* If the file doesn't support async, just async punt */
Pavel Begunkovb191e2d2021-08-09 13:04:03 +01003411 if (force_nonblock && !io_file_supports_nowait(req, WRITE))
Jens Axboef67676d2019-12-02 11:03:47 -07003412 goto copy_iov;
Jens Axboef67676d2019-12-02 11:03:47 -07003413
Jens Axboe10d59342019-12-09 20:16:22 -07003414 /* file path doesn't support NOWAIT for non-direct_IO */
3415 if (force_nonblock && !(kiocb->ki_flags & IOCB_DIRECT) &&
3416 (req->flags & REQ_F_ISREG))
Jens Axboef67676d2019-12-02 11:03:47 -07003417 goto copy_iov;
Jens Axboe9e645e112019-05-10 16:07:28 -06003418
Pavel Begunkov632546c2020-11-07 13:16:26 +00003419 ret = rw_verify_area(WRITE, req->file, io_kiocb_ppos(kiocb), io_size);
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003420 if (unlikely(ret))
3421 goto out_free;
Roman Penyaev9bf79332019-03-25 20:09:24 +01003422
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003423 /*
3424 * Open-code file_start_write here to grab freeze protection,
3425 * which will be released by another thread in
3426 * io_complete_rw(). Fool lockdep by telling it the lock got
3427 * released so that it doesn't complain about the held lock when
3428 * we return to userspace.
3429 */
3430 if (req->flags & REQ_F_ISREG) {
Darrick J. Wong8a3c84b2020-11-10 16:50:21 -08003431 sb_start_write(file_inode(req->file)->i_sb);
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003432 __sb_writers_release(file_inode(req->file)->i_sb,
3433 SB_FREEZE_WRITE);
3434 }
3435 kiocb->ki_flags |= IOCB_WRITE;
Roman Penyaev9bf79332019-03-25 20:09:24 +01003436
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003437 if (req->file->f_op->write_iter)
Jens Axboeff6165b2020-08-13 09:47:43 -06003438 ret2 = call_write_iter(req->file, kiocb, iter);
Guoyu Huang2dd21112020-08-05 03:53:50 -07003439 else if (req->file->f_op->write)
Jens Axboe4017eb92020-10-22 14:14:12 -06003440 ret2 = loop_rw_iter(WRITE, req, iter);
Guoyu Huang2dd21112020-08-05 03:53:50 -07003441 else
3442 ret2 = -EINVAL;
Jens Axboe4ed734b2020-03-20 11:23:41 -06003443
Pavel Begunkov6ad7f232021-04-08 01:54:39 +01003444 if (req->flags & REQ_F_REISSUE) {
3445 req->flags &= ~REQ_F_REISSUE;
Jens Axboe230d50d2021-04-01 20:41:15 -06003446 ret2 = -EAGAIN;
Pavel Begunkov6ad7f232021-04-08 01:54:39 +01003447 }
Jens Axboe230d50d2021-04-01 20:41:15 -06003448
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003449 /*
3450 * Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just
3451 * retry them without IOCB_NOWAIT.
3452 */
3453 if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT))
3454 ret2 = -EAGAIN;
Pavel Begunkov75c668c2021-02-04 13:52:05 +00003455 /* no retry on NONBLOCK nor RWF_NOWAIT */
3456 if (ret2 == -EAGAIN && (req->flags & REQ_F_NOWAIT))
Jens Axboe355afae2020-09-02 09:30:31 -06003457 goto done;
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003458 if (!force_nonblock || ret2 != -EAGAIN) {
Jens Axboeeefdf302020-08-27 16:40:19 -06003459 /* IOPOLL retry should happen for io-wq threads */
3460 if ((req->ctx->flags & IORING_SETUP_IOPOLL) && ret2 == -EAGAIN)
3461 goto copy_iov;
Jens Axboe355afae2020-09-02 09:30:31 -06003462done:
Pavel Begunkov889fca72021-02-10 00:03:09 +00003463 kiocb_done(kiocb, ret2, issue_flags);
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003464 } else {
Jens Axboef67676d2019-12-02 11:03:47 -07003465copy_iov:
Jens Axboe84216312020-08-24 11:45:26 -06003466 /* some cases will consume bytes even on error returns */
Pavel Begunkov632546c2020-11-07 13:16:26 +00003467 iov_iter_revert(iter, io_size - iov_iter_count(iter));
Jens Axboe227c0c92020-08-13 11:51:40 -06003468 ret = io_setup_async_rw(req, iovec, inline_vecs, iter, false);
Pavel Begunkov6bf985d2021-02-04 13:52:01 +00003469 return ret ?: -EAGAIN;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003470 }
Jens Axboe31b51512019-01-18 22:56:34 -07003471out_free:
Pavel Begunkovf261c162020-08-20 11:34:10 +03003472 /* it's reportedly faster than delegating the null check to kfree() */
Pavel Begunkov252917c2020-07-13 22:59:20 +03003473 if (iovec)
Xiaoguang Wang6f2cc162020-06-18 15:01:56 +08003474 kfree(iovec);
Jens Axboe2b188cc2019-01-07 10:46:33 -07003475 return ret;
3476}
3477
Jens Axboe80a261f2020-09-28 14:23:58 -06003478static int io_renameat_prep(struct io_kiocb *req,
3479 const struct io_uring_sqe *sqe)
3480{
3481 struct io_rename *ren = &req->rename;
3482 const char __user *oldf, *newf;
3483
Jens Axboeed7eb252021-06-23 09:04:13 -06003484 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3485 return -EINVAL;
3486 if (sqe->ioprio || sqe->buf_index)
3487 return -EINVAL;
Jens Axboe80a261f2020-09-28 14:23:58 -06003488 if (unlikely(req->flags & REQ_F_FIXED_FILE))
3489 return -EBADF;
3490
3491 ren->old_dfd = READ_ONCE(sqe->fd);
3492 oldf = u64_to_user_ptr(READ_ONCE(sqe->addr));
3493 newf = u64_to_user_ptr(READ_ONCE(sqe->addr2));
3494 ren->new_dfd = READ_ONCE(sqe->len);
3495 ren->flags = READ_ONCE(sqe->rename_flags);
3496
3497 ren->oldpath = getname(oldf);
3498 if (IS_ERR(ren->oldpath))
3499 return PTR_ERR(ren->oldpath);
3500
3501 ren->newpath = getname(newf);
3502 if (IS_ERR(ren->newpath)) {
3503 putname(ren->oldpath);
3504 return PTR_ERR(ren->newpath);
3505 }
3506
3507 req->flags |= REQ_F_NEED_CLEANUP;
3508 return 0;
3509}
3510
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003511static int io_renameat(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe80a261f2020-09-28 14:23:58 -06003512{
3513 struct io_rename *ren = &req->rename;
3514 int ret;
3515
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003516 if (issue_flags & IO_URING_F_NONBLOCK)
Jens Axboe80a261f2020-09-28 14:23:58 -06003517 return -EAGAIN;
3518
3519 ret = do_renameat2(ren->old_dfd, ren->oldpath, ren->new_dfd,
3520 ren->newpath, ren->flags);
3521
3522 req->flags &= ~REQ_F_NEED_CLEANUP;
3523 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01003524 req_set_fail(req);
Jens Axboe80a261f2020-09-28 14:23:58 -06003525 io_req_complete(req, ret);
3526 return 0;
3527}
3528
Jens Axboe14a11432020-09-28 14:27:37 -06003529static int io_unlinkat_prep(struct io_kiocb *req,
3530 const struct io_uring_sqe *sqe)
3531{
3532 struct io_unlink *un = &req->unlink;
3533 const char __user *fname;
3534
Jens Axboe22634bc2021-06-23 09:07:45 -06003535 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3536 return -EINVAL;
3537 if (sqe->ioprio || sqe->off || sqe->len || sqe->buf_index)
3538 return -EINVAL;
Jens Axboe14a11432020-09-28 14:27:37 -06003539 if (unlikely(req->flags & REQ_F_FIXED_FILE))
3540 return -EBADF;
3541
3542 un->dfd = READ_ONCE(sqe->fd);
3543
3544 un->flags = READ_ONCE(sqe->unlink_flags);
3545 if (un->flags & ~AT_REMOVEDIR)
3546 return -EINVAL;
3547
3548 fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
3549 un->filename = getname(fname);
3550 if (IS_ERR(un->filename))
3551 return PTR_ERR(un->filename);
3552
3553 req->flags |= REQ_F_NEED_CLEANUP;
3554 return 0;
3555}
3556
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003557static int io_unlinkat(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe14a11432020-09-28 14:27:37 -06003558{
3559 struct io_unlink *un = &req->unlink;
3560 int ret;
3561
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003562 if (issue_flags & IO_URING_F_NONBLOCK)
Jens Axboe14a11432020-09-28 14:27:37 -06003563 return -EAGAIN;
3564
3565 if (un->flags & AT_REMOVEDIR)
3566 ret = do_rmdir(un->dfd, un->filename);
3567 else
3568 ret = do_unlinkat(un->dfd, un->filename);
3569
3570 req->flags &= ~REQ_F_NEED_CLEANUP;
3571 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01003572 req_set_fail(req);
Jens Axboe14a11432020-09-28 14:27:37 -06003573 io_req_complete(req, ret);
3574 return 0;
3575}
3576
Jens Axboe36f4fa62020-09-05 11:14:22 -06003577static int io_shutdown_prep(struct io_kiocb *req,
3578 const struct io_uring_sqe *sqe)
3579{
3580#if defined(CONFIG_NET)
3581 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3582 return -EINVAL;
3583 if (sqe->ioprio || sqe->off || sqe->addr || sqe->rw_flags ||
3584 sqe->buf_index)
3585 return -EINVAL;
3586
3587 req->shutdown.how = READ_ONCE(sqe->len);
3588 return 0;
3589#else
3590 return -EOPNOTSUPP;
3591#endif
3592}
3593
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003594static int io_shutdown(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe36f4fa62020-09-05 11:14:22 -06003595{
3596#if defined(CONFIG_NET)
3597 struct socket *sock;
3598 int ret;
3599
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003600 if (issue_flags & IO_URING_F_NONBLOCK)
Jens Axboe36f4fa62020-09-05 11:14:22 -06003601 return -EAGAIN;
3602
Linus Torvalds48aba792020-12-16 12:44:05 -08003603 sock = sock_from_file(req->file);
Jens Axboe36f4fa62020-09-05 11:14:22 -06003604 if (unlikely(!sock))
Linus Torvalds48aba792020-12-16 12:44:05 -08003605 return -ENOTSOCK;
Jens Axboe36f4fa62020-09-05 11:14:22 -06003606
3607 ret = __sys_shutdown_sock(sock, req->shutdown.how);
Jens Axboea1464682020-12-14 20:57:27 -07003608 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01003609 req_set_fail(req);
Jens Axboe36f4fa62020-09-05 11:14:22 -06003610 io_req_complete(req, ret);
3611 return 0;
3612#else
3613 return -EOPNOTSUPP;
3614#endif
3615}
3616
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03003617static int __io_splice_prep(struct io_kiocb *req,
3618 const struct io_uring_sqe *sqe)
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003619{
Pavel Begunkovfe7e3252021-06-24 15:09:57 +01003620 struct io_splice *sp = &req->splice;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003621 unsigned int valid_flags = SPLICE_F_FD_IN_FIXED | SPLICE_F_ALL;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003622
Pavel Begunkov3232dd02020-06-03 18:03:22 +03003623 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3624 return -EINVAL;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003625
3626 sp->file_in = NULL;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003627 sp->len = READ_ONCE(sqe->len);
3628 sp->flags = READ_ONCE(sqe->splice_flags);
3629
3630 if (unlikely(sp->flags & ~valid_flags))
3631 return -EINVAL;
3632
Pavel Begunkovac177052021-08-09 13:04:02 +01003633 sp->file_in = io_file_get(req->ctx, NULL, req,
3634 READ_ONCE(sqe->splice_fd_in),
Pavel Begunkov8371adf2020-10-10 18:34:08 +01003635 (sp->flags & SPLICE_F_FD_IN_FIXED));
3636 if (!sp->file_in)
3637 return -EBADF;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003638 req->flags |= REQ_F_NEED_CLEANUP;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003639 return 0;
3640}
3641
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03003642static int io_tee_prep(struct io_kiocb *req,
3643 const struct io_uring_sqe *sqe)
3644{
3645 if (READ_ONCE(sqe->splice_off_in) || READ_ONCE(sqe->off))
3646 return -EINVAL;
3647 return __io_splice_prep(req, sqe);
3648}
3649
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003650static int io_tee(struct io_kiocb *req, unsigned int issue_flags)
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03003651{
3652 struct io_splice *sp = &req->splice;
3653 struct file *in = sp->file_in;
3654 struct file *out = sp->file_out;
3655 unsigned int flags = sp->flags & ~SPLICE_F_FD_IN_FIXED;
3656 long ret = 0;
3657
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003658 if (issue_flags & IO_URING_F_NONBLOCK)
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03003659 return -EAGAIN;
3660 if (sp->len)
3661 ret = do_tee(in, out, sp->len, flags);
3662
Pavel Begunkove1d767f2021-03-19 17:22:43 +00003663 if (!(sp->flags & SPLICE_F_FD_IN_FIXED))
3664 io_put_file(in);
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03003665 req->flags &= ~REQ_F_NEED_CLEANUP;
3666
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03003667 if (ret != sp->len)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01003668 req_set_fail(req);
Jens Axboee1e16092020-06-22 09:17:17 -06003669 io_req_complete(req, ret);
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03003670 return 0;
3671}
3672
3673static int io_splice_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3674{
Pavel Begunkovfe7e3252021-06-24 15:09:57 +01003675 struct io_splice *sp = &req->splice;
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03003676
3677 sp->off_in = READ_ONCE(sqe->splice_off_in);
3678 sp->off_out = READ_ONCE(sqe->off);
3679 return __io_splice_prep(req, sqe);
3680}
3681
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003682static int io_splice(struct io_kiocb *req, unsigned int issue_flags)
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003683{
3684 struct io_splice *sp = &req->splice;
3685 struct file *in = sp->file_in;
3686 struct file *out = sp->file_out;
3687 unsigned int flags = sp->flags & ~SPLICE_F_FD_IN_FIXED;
3688 loff_t *poff_in, *poff_out;
Pavel Begunkovc9687422020-05-04 23:00:54 +03003689 long ret = 0;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003690
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003691 if (issue_flags & IO_URING_F_NONBLOCK)
Pavel Begunkov2fb3e822020-05-01 17:09:38 +03003692 return -EAGAIN;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003693
3694 poff_in = (sp->off_in == -1) ? NULL : &sp->off_in;
3695 poff_out = (sp->off_out == -1) ? NULL : &sp->off_out;
Pavel Begunkovc9687422020-05-04 23:00:54 +03003696
Jens Axboe948a7742020-05-17 14:21:38 -06003697 if (sp->len)
Pavel Begunkovc9687422020-05-04 23:00:54 +03003698 ret = do_splice(in, poff_in, out, poff_out, sp->len, flags);
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003699
Pavel Begunkove1d767f2021-03-19 17:22:43 +00003700 if (!(sp->flags & SPLICE_F_FD_IN_FIXED))
3701 io_put_file(in);
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003702 req->flags &= ~REQ_F_NEED_CLEANUP;
3703
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003704 if (ret != sp->len)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01003705 req_set_fail(req);
Jens Axboee1e16092020-06-22 09:17:17 -06003706 io_req_complete(req, ret);
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003707 return 0;
3708}
3709
Jens Axboe2b188cc2019-01-07 10:46:33 -07003710/*
3711 * IORING_OP_NOP just posts a completion event, nothing else.
3712 */
Pavel Begunkov889fca72021-02-10 00:03:09 +00003713static int io_nop(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe2b188cc2019-01-07 10:46:33 -07003714{
3715 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003716
Jens Axboedef596e2019-01-09 08:59:42 -07003717 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
3718 return -EINVAL;
3719
Pavel Begunkov889fca72021-02-10 00:03:09 +00003720 __io_req_complete(req, issue_flags, 0, 0);
Jens Axboe2b188cc2019-01-07 10:46:33 -07003721 return 0;
3722}
3723
Pavel Begunkov1155c762021-02-18 18:29:38 +00003724static int io_fsync_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Christoph Hellwigc992fe22019-01-11 09:43:02 -07003725{
Jens Axboe6b063142019-01-10 22:13:58 -07003726 struct io_ring_ctx *ctx = req->ctx;
Christoph Hellwigc992fe22019-01-11 09:43:02 -07003727
Jens Axboe09bb8392019-03-13 12:39:28 -06003728 if (!req->file)
3729 return -EBADF;
Christoph Hellwigc992fe22019-01-11 09:43:02 -07003730
Jens Axboe6b063142019-01-10 22:13:58 -07003731 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboedef596e2019-01-09 08:59:42 -07003732 return -EINVAL;
Jens Axboeedafcce2019-01-09 09:16:05 -07003733 if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index))
Christoph Hellwigc992fe22019-01-11 09:43:02 -07003734 return -EINVAL;
3735
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07003736 req->sync.flags = READ_ONCE(sqe->fsync_flags);
3737 if (unlikely(req->sync.flags & ~IORING_FSYNC_DATASYNC))
3738 return -EINVAL;
3739
3740 req->sync.off = READ_ONCE(sqe->off);
3741 req->sync.len = READ_ONCE(sqe->len);
Christoph Hellwigc992fe22019-01-11 09:43:02 -07003742 return 0;
3743}
3744
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003745static int io_fsync(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe78912932020-01-14 22:09:06 -07003746{
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07003747 loff_t end = req->sync.off + req->sync.len;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07003748 int ret;
3749
Pavel Begunkovac45abc2020-06-08 21:08:18 +03003750 /* fsync always requires a blocking context */
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003751 if (issue_flags & IO_URING_F_NONBLOCK)
Pavel Begunkovac45abc2020-06-08 21:08:18 +03003752 return -EAGAIN;
3753
Jens Axboe9adbd452019-12-20 08:45:55 -07003754 ret = vfs_fsync_range(req->file, req->sync.off,
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07003755 end > 0 ? end : LLONG_MAX,
3756 req->sync.flags & IORING_FSYNC_DATASYNC);
3757 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01003758 req_set_fail(req);
Jens Axboee1e16092020-06-22 09:17:17 -06003759 io_req_complete(req, ret);
Christoph Hellwigc992fe22019-01-11 09:43:02 -07003760 return 0;
3761}
3762
Jens Axboed63d1b52019-12-10 10:38:56 -07003763static int io_fallocate_prep(struct io_kiocb *req,
3764 const struct io_uring_sqe *sqe)
3765{
3766 if (sqe->ioprio || sqe->buf_index || sqe->rw_flags)
3767 return -EINVAL;
Pavel Begunkov3232dd02020-06-03 18:03:22 +03003768 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3769 return -EINVAL;
Jens Axboed63d1b52019-12-10 10:38:56 -07003770
3771 req->sync.off = READ_ONCE(sqe->off);
3772 req->sync.len = READ_ONCE(sqe->addr);
3773 req->sync.mode = READ_ONCE(sqe->len);
3774 return 0;
3775}
3776
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003777static int io_fallocate(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboed63d1b52019-12-10 10:38:56 -07003778{
Pavel Begunkovac45abc2020-06-08 21:08:18 +03003779 int ret;
Jens Axboed63d1b52019-12-10 10:38:56 -07003780
Pavel Begunkovac45abc2020-06-08 21:08:18 +03003781 /* fallocate always requiring blocking context */
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003782 if (issue_flags & IO_URING_F_NONBLOCK)
Pavel Begunkovac45abc2020-06-08 21:08:18 +03003783 return -EAGAIN;
Pavel Begunkovac45abc2020-06-08 21:08:18 +03003784 ret = vfs_fallocate(req->file, req->sync.mode, req->sync.off,
3785 req->sync.len);
Pavel Begunkovac45abc2020-06-08 21:08:18 +03003786 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01003787 req_set_fail(req);
Jens Axboee1e16092020-06-22 09:17:17 -06003788 io_req_complete(req, ret);
Jens Axboed63d1b52019-12-10 10:38:56 -07003789 return 0;
3790}
3791
Pavel Begunkovec65fea2020-06-03 18:03:24 +03003792static int __io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboe15b71ab2019-12-11 11:20:36 -07003793{
Jens Axboef8748882020-01-08 17:47:02 -07003794 const char __user *fname;
Jens Axboe15b71ab2019-12-11 11:20:36 -07003795 int ret;
3796
Pavel Begunkovec65fea2020-06-03 18:03:24 +03003797 if (unlikely(sqe->ioprio || sqe->buf_index))
Jens Axboe15b71ab2019-12-11 11:20:36 -07003798 return -EINVAL;
Pavel Begunkovec65fea2020-06-03 18:03:24 +03003799 if (unlikely(req->flags & REQ_F_FIXED_FILE))
Jens Axboecf3040c2020-02-06 21:31:40 -07003800 return -EBADF;
Jens Axboe15b71ab2019-12-11 11:20:36 -07003801
Pavel Begunkovec65fea2020-06-03 18:03:24 +03003802 /* open.how should be already initialised */
3803 if (!(req->open.how.flags & O_PATH) && force_o_largefile())
Jens Axboe08a1d26eb2020-04-08 09:20:54 -06003804 req->open.how.flags |= O_LARGEFILE;
Jens Axboe15b71ab2019-12-11 11:20:36 -07003805
Pavel Begunkov25e72d12020-06-03 18:03:23 +03003806 req->open.dfd = READ_ONCE(sqe->fd);
3807 fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
Jens Axboef8748882020-01-08 17:47:02 -07003808 req->open.filename = getname(fname);
Jens Axboe15b71ab2019-12-11 11:20:36 -07003809 if (IS_ERR(req->open.filename)) {
3810 ret = PTR_ERR(req->open.filename);
3811 req->open.filename = NULL;
3812 return ret;
3813 }
Jens Axboe4022e7a2020-03-19 19:23:18 -06003814 req->open.nofile = rlimit(RLIMIT_NOFILE);
Pavel Begunkov8fef80b2020-02-07 23:59:53 +03003815 req->flags |= REQ_F_NEED_CLEANUP;
Jens Axboe15b71ab2019-12-11 11:20:36 -07003816 return 0;
3817}
3818
Pavel Begunkovec65fea2020-06-03 18:03:24 +03003819static int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3820{
3821 u64 flags, mode;
3822
Jens Axboe14587a462020-09-05 11:36:08 -06003823 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboe4eb8dde2020-09-18 19:36:24 -06003824 return -EINVAL;
Pavel Begunkovec65fea2020-06-03 18:03:24 +03003825 mode = READ_ONCE(sqe->len);
3826 flags = READ_ONCE(sqe->open_flags);
3827 req->open.how = build_open_how(flags, mode);
3828 return __io_openat_prep(req, sqe);
3829}
3830
Jens Axboecebdb982020-01-08 17:59:24 -07003831static int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3832{
3833 struct open_how __user *how;
Jens Axboecebdb982020-01-08 17:59:24 -07003834 size_t len;
3835 int ret;
3836
Jens Axboe14587a462020-09-05 11:36:08 -06003837 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboe4eb8dde2020-09-18 19:36:24 -06003838 return -EINVAL;
Jens Axboecebdb982020-01-08 17:59:24 -07003839 how = u64_to_user_ptr(READ_ONCE(sqe->addr2));
3840 len = READ_ONCE(sqe->len);
Jens Axboecebdb982020-01-08 17:59:24 -07003841 if (len < OPEN_HOW_SIZE_VER0)
3842 return -EINVAL;
3843
3844 ret = copy_struct_from_user(&req->open.how, sizeof(req->open.how), how,
3845 len);
3846 if (ret)
3847 return ret;
3848
Pavel Begunkovec65fea2020-06-03 18:03:24 +03003849 return __io_openat_prep(req, sqe);
Jens Axboecebdb982020-01-08 17:59:24 -07003850}
3851
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003852static int io_openat2(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe15b71ab2019-12-11 11:20:36 -07003853{
3854 struct open_flags op;
Jens Axboe15b71ab2019-12-11 11:20:36 -07003855 struct file *file;
Jens Axboe3a81fd02020-12-10 12:25:36 -07003856 bool nonblock_set;
3857 bool resolve_nonblock;
Jens Axboe15b71ab2019-12-11 11:20:36 -07003858 int ret;
3859
Jens Axboecebdb982020-01-08 17:59:24 -07003860 ret = build_open_flags(&req->open.how, &op);
Jens Axboe15b71ab2019-12-11 11:20:36 -07003861 if (ret)
3862 goto err;
Jens Axboe3a81fd02020-12-10 12:25:36 -07003863 nonblock_set = op.open_flag & O_NONBLOCK;
3864 resolve_nonblock = req->open.how.resolve & RESOLVE_CACHED;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003865 if (issue_flags & IO_URING_F_NONBLOCK) {
Jens Axboe3a81fd02020-12-10 12:25:36 -07003866 /*
3867 * Don't bother trying for O_TRUNC, O_CREAT, or O_TMPFILE open,
3868 * it'll always -EAGAIN
3869 */
3870 if (req->open.how.flags & (O_TRUNC | O_CREAT | O_TMPFILE))
3871 return -EAGAIN;
3872 op.lookup_flags |= LOOKUP_CACHED;
3873 op.open_flag |= O_NONBLOCK;
3874 }
Jens Axboe15b71ab2019-12-11 11:20:36 -07003875
Jens Axboe4022e7a2020-03-19 19:23:18 -06003876 ret = __get_unused_fd_flags(req->open.how.flags, req->open.nofile);
Jens Axboe15b71ab2019-12-11 11:20:36 -07003877 if (ret < 0)
3878 goto err;
3879
3880 file = do_filp_open(req->open.dfd, req->open.filename, &op);
Pavel Begunkov12dcb58a2021-06-24 15:10:00 +01003881 if (IS_ERR(file)) {
Jens Axboe3a81fd02020-12-10 12:25:36 -07003882 /*
Pavel Begunkov12dcb58a2021-06-24 15:10:00 +01003883 * We could hang on to this 'fd' on retrying, but seems like
3884 * marginal gain for something that is now known to be a slower
3885 * path. So just put it, and we'll get a new one when we retry.
Jens Axboe3a81fd02020-12-10 12:25:36 -07003886 */
3887 put_unused_fd(ret);
Pavel Begunkov12dcb58a2021-06-24 15:10:00 +01003888
3889 ret = PTR_ERR(file);
3890 /* only retry if RESOLVE_CACHED wasn't already set by application */
3891 if (ret == -EAGAIN &&
3892 (!resolve_nonblock && (issue_flags & IO_URING_F_NONBLOCK)))
3893 return -EAGAIN;
3894 goto err;
Jens Axboe3a81fd02020-12-10 12:25:36 -07003895 }
3896
Pavel Begunkov12dcb58a2021-06-24 15:10:00 +01003897 if ((issue_flags & IO_URING_F_NONBLOCK) && !nonblock_set)
3898 file->f_flags &= ~O_NONBLOCK;
3899 fsnotify_open(file);
3900 fd_install(ret, file);
Jens Axboe15b71ab2019-12-11 11:20:36 -07003901err:
3902 putname(req->open.filename);
Pavel Begunkov8fef80b2020-02-07 23:59:53 +03003903 req->flags &= ~REQ_F_NEED_CLEANUP;
Jens Axboe15b71ab2019-12-11 11:20:36 -07003904 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01003905 req_set_fail(req);
Pavel Begunkov0bdf3392021-04-11 01:46:29 +01003906 __io_req_complete(req, issue_flags, ret, 0);
Jens Axboe15b71ab2019-12-11 11:20:36 -07003907 return 0;
3908}
3909
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003910static int io_openat(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboecebdb982020-01-08 17:59:24 -07003911{
Pavel Begunkove45cff52021-02-28 22:35:14 +00003912 return io_openat2(req, issue_flags);
Jens Axboecebdb982020-01-08 17:59:24 -07003913}
3914
Jens Axboe067524e2020-03-02 16:32:28 -07003915static int io_remove_buffers_prep(struct io_kiocb *req,
3916 const struct io_uring_sqe *sqe)
3917{
3918 struct io_provide_buf *p = &req->pbuf;
3919 u64 tmp;
3920
3921 if (sqe->ioprio || sqe->rw_flags || sqe->addr || sqe->len || sqe->off)
3922 return -EINVAL;
3923
3924 tmp = READ_ONCE(sqe->fd);
3925 if (!tmp || tmp > USHRT_MAX)
3926 return -EINVAL;
3927
3928 memset(p, 0, sizeof(*p));
3929 p->nbufs = tmp;
3930 p->bgid = READ_ONCE(sqe->buf_group);
3931 return 0;
3932}
3933
3934static int __io_remove_buffers(struct io_ring_ctx *ctx, struct io_buffer *buf,
3935 int bgid, unsigned nbufs)
3936{
3937 unsigned i = 0;
3938
3939 /* shouldn't happen */
3940 if (!nbufs)
3941 return 0;
3942
3943 /* the head kbuf is the list itself */
3944 while (!list_empty(&buf->list)) {
3945 struct io_buffer *nxt;
3946
3947 nxt = list_first_entry(&buf->list, struct io_buffer, list);
3948 list_del(&nxt->list);
3949 kfree(nxt);
3950 if (++i == nbufs)
3951 return i;
3952 }
3953 i++;
3954 kfree(buf);
Jens Axboe9e15c3a2021-03-13 12:29:43 -07003955 xa_erase(&ctx->io_buffers, bgid);
Jens Axboe067524e2020-03-02 16:32:28 -07003956
3957 return i;
3958}
3959
Pavel Begunkov889fca72021-02-10 00:03:09 +00003960static int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe067524e2020-03-02 16:32:28 -07003961{
3962 struct io_provide_buf *p = &req->pbuf;
3963 struct io_ring_ctx *ctx = req->ctx;
3964 struct io_buffer *head;
3965 int ret = 0;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003966 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Jens Axboe067524e2020-03-02 16:32:28 -07003967
3968 io_ring_submit_lock(ctx, !force_nonblock);
3969
3970 lockdep_assert_held(&ctx->uring_lock);
3971
3972 ret = -ENOENT;
Jens Axboe9e15c3a2021-03-13 12:29:43 -07003973 head = xa_load(&ctx->io_buffers, p->bgid);
Jens Axboe067524e2020-03-02 16:32:28 -07003974 if (head)
3975 ret = __io_remove_buffers(ctx, head, p->bgid, p->nbufs);
Jens Axboe067524e2020-03-02 16:32:28 -07003976 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01003977 req_set_fail(req);
Pavel Begunkov31bff9a2020-12-06 22:22:43 +00003978
Pavel Begunkov9fb8cb42021-02-28 22:35:13 +00003979 /* complete before unlock, IOPOLL may need the lock */
3980 __io_req_complete(req, issue_flags, ret, 0);
3981 io_ring_submit_unlock(ctx, !force_nonblock);
Jens Axboe067524e2020-03-02 16:32:28 -07003982 return 0;
3983}
3984
Jens Axboeddf0322d2020-02-23 16:41:33 -07003985static int io_provide_buffers_prep(struct io_kiocb *req,
3986 const struct io_uring_sqe *sqe)
3987{
Pavel Begunkov38134ad2021-04-15 13:07:39 +01003988 unsigned long size, tmp_check;
Jens Axboeddf0322d2020-02-23 16:41:33 -07003989 struct io_provide_buf *p = &req->pbuf;
3990 u64 tmp;
3991
3992 if (sqe->ioprio || sqe->rw_flags)
3993 return -EINVAL;
3994
3995 tmp = READ_ONCE(sqe->fd);
3996 if (!tmp || tmp > USHRT_MAX)
3997 return -E2BIG;
3998 p->nbufs = tmp;
3999 p->addr = READ_ONCE(sqe->addr);
4000 p->len = READ_ONCE(sqe->len);
4001
Pavel Begunkov38134ad2021-04-15 13:07:39 +01004002 if (check_mul_overflow((unsigned long)p->len, (unsigned long)p->nbufs,
4003 &size))
4004 return -EOVERFLOW;
4005 if (check_add_overflow((unsigned long)p->addr, size, &tmp_check))
4006 return -EOVERFLOW;
4007
Pavel Begunkovd81269f2021-03-19 10:21:19 +00004008 size = (unsigned long)p->len * p->nbufs;
4009 if (!access_ok(u64_to_user_ptr(p->addr), size))
Jens Axboeddf0322d2020-02-23 16:41:33 -07004010 return -EFAULT;
4011
4012 p->bgid = READ_ONCE(sqe->buf_group);
4013 tmp = READ_ONCE(sqe->off);
4014 if (tmp > USHRT_MAX)
4015 return -E2BIG;
4016 p->bid = tmp;
4017 return 0;
4018}
4019
4020static int io_add_buffers(struct io_provide_buf *pbuf, struct io_buffer **head)
4021{
4022 struct io_buffer *buf;
4023 u64 addr = pbuf->addr;
4024 int i, bid = pbuf->bid;
4025
4026 for (i = 0; i < pbuf->nbufs; i++) {
4027 buf = kmalloc(sizeof(*buf), GFP_KERNEL);
4028 if (!buf)
4029 break;
4030
4031 buf->addr = addr;
Thadeu Lima de Souza Cascardod1f82802021-05-05 09:47:06 -03004032 buf->len = min_t(__u32, pbuf->len, MAX_RW_COUNT);
Jens Axboeddf0322d2020-02-23 16:41:33 -07004033 buf->bid = bid;
4034 addr += pbuf->len;
4035 bid++;
4036 if (!*head) {
4037 INIT_LIST_HEAD(&buf->list);
4038 *head = buf;
4039 } else {
4040 list_add_tail(&buf->list, &(*head)->list);
4041 }
4042 }
4043
4044 return i ? i : -ENOMEM;
4045}
4046
Pavel Begunkov889fca72021-02-10 00:03:09 +00004047static int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboeddf0322d2020-02-23 16:41:33 -07004048{
4049 struct io_provide_buf *p = &req->pbuf;
4050 struct io_ring_ctx *ctx = req->ctx;
4051 struct io_buffer *head, *list;
4052 int ret = 0;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004053 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Jens Axboeddf0322d2020-02-23 16:41:33 -07004054
4055 io_ring_submit_lock(ctx, !force_nonblock);
4056
4057 lockdep_assert_held(&ctx->uring_lock);
4058
Jens Axboe9e15c3a2021-03-13 12:29:43 -07004059 list = head = xa_load(&ctx->io_buffers, p->bgid);
Jens Axboeddf0322d2020-02-23 16:41:33 -07004060
4061 ret = io_add_buffers(p, &head);
Jens Axboe9e15c3a2021-03-13 12:29:43 -07004062 if (ret >= 0 && !list) {
4063 ret = xa_insert(&ctx->io_buffers, p->bgid, head, GFP_KERNEL);
4064 if (ret < 0)
Jens Axboe067524e2020-03-02 16:32:28 -07004065 __io_remove_buffers(ctx, head, p->bgid, -1U);
Jens Axboeddf0322d2020-02-23 16:41:33 -07004066 }
Jens Axboeddf0322d2020-02-23 16:41:33 -07004067 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01004068 req_set_fail(req);
Pavel Begunkov9fb8cb42021-02-28 22:35:13 +00004069 /* complete before unlock, IOPOLL may need the lock */
4070 __io_req_complete(req, issue_flags, ret, 0);
4071 io_ring_submit_unlock(ctx, !force_nonblock);
Jens Axboeddf0322d2020-02-23 16:41:33 -07004072 return 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07004073}
4074
Jens Axboe3e4827b2020-01-08 15:18:09 -07004075static int io_epoll_ctl_prep(struct io_kiocb *req,
4076 const struct io_uring_sqe *sqe)
4077{
4078#if defined(CONFIG_EPOLL)
4079 if (sqe->ioprio || sqe->buf_index)
4080 return -EINVAL;
Pavel Begunkov2d74d042021-05-14 12:05:46 +01004081 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
Pavel Begunkov3232dd02020-06-03 18:03:22 +03004082 return -EINVAL;
Jens Axboe3e4827b2020-01-08 15:18:09 -07004083
4084 req->epoll.epfd = READ_ONCE(sqe->fd);
4085 req->epoll.op = READ_ONCE(sqe->len);
4086 req->epoll.fd = READ_ONCE(sqe->off);
4087
4088 if (ep_op_has_event(req->epoll.op)) {
4089 struct epoll_event __user *ev;
4090
4091 ev = u64_to_user_ptr(READ_ONCE(sqe->addr));
4092 if (copy_from_user(&req->epoll.event, ev, sizeof(*ev)))
4093 return -EFAULT;
4094 }
4095
4096 return 0;
4097#else
4098 return -EOPNOTSUPP;
4099#endif
4100}
4101
Pavel Begunkov889fca72021-02-10 00:03:09 +00004102static int io_epoll_ctl(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe3e4827b2020-01-08 15:18:09 -07004103{
4104#if defined(CONFIG_EPOLL)
4105 struct io_epoll *ie = &req->epoll;
4106 int ret;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004107 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Jens Axboe3e4827b2020-01-08 15:18:09 -07004108
4109 ret = do_epoll_ctl(ie->epfd, ie->op, ie->fd, &ie->event, force_nonblock);
4110 if (force_nonblock && ret == -EAGAIN)
4111 return -EAGAIN;
4112
4113 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01004114 req_set_fail(req);
Pavel Begunkov889fca72021-02-10 00:03:09 +00004115 __io_req_complete(req, issue_flags, ret, 0);
Jens Axboe3e4827b2020-01-08 15:18:09 -07004116 return 0;
4117#else
4118 return -EOPNOTSUPP;
4119#endif
4120}
4121
Jens Axboec1ca7572019-12-25 22:18:28 -07004122static int io_madvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4123{
4124#if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
4125 if (sqe->ioprio || sqe->buf_index || sqe->off)
4126 return -EINVAL;
Pavel Begunkov3232dd02020-06-03 18:03:22 +03004127 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4128 return -EINVAL;
Jens Axboec1ca7572019-12-25 22:18:28 -07004129
4130 req->madvise.addr = READ_ONCE(sqe->addr);
4131 req->madvise.len = READ_ONCE(sqe->len);
4132 req->madvise.advice = READ_ONCE(sqe->fadvise_advice);
4133 return 0;
4134#else
4135 return -EOPNOTSUPP;
4136#endif
4137}
4138
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004139static int io_madvise(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboec1ca7572019-12-25 22:18:28 -07004140{
4141#if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
4142 struct io_madvise *ma = &req->madvise;
4143 int ret;
4144
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004145 if (issue_flags & IO_URING_F_NONBLOCK)
Jens Axboec1ca7572019-12-25 22:18:28 -07004146 return -EAGAIN;
4147
Minchan Kim0726b012020-10-17 16:14:50 -07004148 ret = do_madvise(current->mm, ma->addr, ma->len, ma->advice);
Jens Axboec1ca7572019-12-25 22:18:28 -07004149 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01004150 req_set_fail(req);
Jens Axboee1e16092020-06-22 09:17:17 -06004151 io_req_complete(req, ret);
Jens Axboec1ca7572019-12-25 22:18:28 -07004152 return 0;
4153#else
4154 return -EOPNOTSUPP;
4155#endif
4156}
4157
Jens Axboe4840e412019-12-25 22:03:45 -07004158static int io_fadvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4159{
4160 if (sqe->ioprio || sqe->buf_index || sqe->addr)
4161 return -EINVAL;
Pavel Begunkov3232dd02020-06-03 18:03:22 +03004162 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4163 return -EINVAL;
Jens Axboe4840e412019-12-25 22:03:45 -07004164
4165 req->fadvise.offset = READ_ONCE(sqe->off);
4166 req->fadvise.len = READ_ONCE(sqe->len);
4167 req->fadvise.advice = READ_ONCE(sqe->fadvise_advice);
4168 return 0;
4169}
4170
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004171static int io_fadvise(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe4840e412019-12-25 22:03:45 -07004172{
4173 struct io_fadvise *fa = &req->fadvise;
4174 int ret;
4175
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004176 if (issue_flags & IO_URING_F_NONBLOCK) {
Jens Axboe3e694262020-02-01 09:22:49 -07004177 switch (fa->advice) {
4178 case POSIX_FADV_NORMAL:
4179 case POSIX_FADV_RANDOM:
4180 case POSIX_FADV_SEQUENTIAL:
4181 break;
4182 default:
4183 return -EAGAIN;
4184 }
4185 }
Jens Axboe4840e412019-12-25 22:03:45 -07004186
4187 ret = vfs_fadvise(req->file, fa->offset, fa->len, fa->advice);
4188 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01004189 req_set_fail(req);
Pavel Begunkov0bdf3392021-04-11 01:46:29 +01004190 __io_req_complete(req, issue_flags, ret, 0);
Jens Axboe4840e412019-12-25 22:03:45 -07004191 return 0;
4192}
4193
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004194static int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4195{
Pavel Begunkov2d74d042021-05-14 12:05:46 +01004196 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
Pavel Begunkov3232dd02020-06-03 18:03:22 +03004197 return -EINVAL;
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004198 if (sqe->ioprio || sqe->buf_index)
4199 return -EINVAL;
Pavel Begunkov9c280f92020-04-08 08:58:46 +03004200 if (req->flags & REQ_F_FIXED_FILE)
Jens Axboecf3040c2020-02-06 21:31:40 -07004201 return -EBADF;
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004202
Bijan Mottahedeh1d9e1282020-05-22 21:31:16 -07004203 req->statx.dfd = READ_ONCE(sqe->fd);
4204 req->statx.mask = READ_ONCE(sqe->len);
Bijan Mottahedehe62753e2020-05-22 21:31:18 -07004205 req->statx.filename = u64_to_user_ptr(READ_ONCE(sqe->addr));
Bijan Mottahedeh1d9e1282020-05-22 21:31:16 -07004206 req->statx.buffer = u64_to_user_ptr(READ_ONCE(sqe->addr2));
4207 req->statx.flags = READ_ONCE(sqe->statx_flags);
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004208
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004209 return 0;
4210}
4211
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004212static int io_statx(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004213{
Bijan Mottahedeh1d9e1282020-05-22 21:31:16 -07004214 struct io_statx *ctx = &req->statx;
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004215 int ret;
4216
Pavel Begunkov59d70012021-03-22 01:58:30 +00004217 if (issue_flags & IO_URING_F_NONBLOCK)
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004218 return -EAGAIN;
4219
Bijan Mottahedehe62753e2020-05-22 21:31:18 -07004220 ret = do_statx(ctx->dfd, ctx->filename, ctx->flags, ctx->mask,
4221 ctx->buffer);
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004222
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004223 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01004224 req_set_fail(req);
Jens Axboee1e16092020-06-22 09:17:17 -06004225 io_req_complete(req, ret);
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004226 return 0;
4227}
4228
Jens Axboeb5dba592019-12-11 14:02:38 -07004229static int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4230{
Jens Axboe14587a462020-09-05 11:36:08 -06004231 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
Pavel Begunkov3232dd02020-06-03 18:03:22 +03004232 return -EINVAL;
Jens Axboeb5dba592019-12-11 14:02:38 -07004233 if (sqe->ioprio || sqe->off || sqe->addr || sqe->len ||
4234 sqe->rw_flags || sqe->buf_index)
4235 return -EINVAL;
Pavel Begunkov9c280f92020-04-08 08:58:46 +03004236 if (req->flags & REQ_F_FIXED_FILE)
Jens Axboecf3040c2020-02-06 21:31:40 -07004237 return -EBADF;
Jens Axboeb5dba592019-12-11 14:02:38 -07004238
4239 req->close.fd = READ_ONCE(sqe->fd);
Jens Axboeb5dba592019-12-11 14:02:38 -07004240 return 0;
4241}
4242
Pavel Begunkov889fca72021-02-10 00:03:09 +00004243static int io_close(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboeb5dba592019-12-11 14:02:38 -07004244{
Jens Axboe9eac1902021-01-19 15:50:37 -07004245 struct files_struct *files = current->files;
Pavel Begunkov3af73b22020-06-08 21:08:17 +03004246 struct io_close *close = &req->close;
Jens Axboe9eac1902021-01-19 15:50:37 -07004247 struct fdtable *fdt;
Pavel Begunkova1fde922021-04-11 01:46:28 +01004248 struct file *file = NULL;
4249 int ret = -EBADF;
Jens Axboeb5dba592019-12-11 14:02:38 -07004250
Jens Axboe9eac1902021-01-19 15:50:37 -07004251 spin_lock(&files->file_lock);
4252 fdt = files_fdtable(files);
4253 if (close->fd >= fdt->max_fds) {
4254 spin_unlock(&files->file_lock);
4255 goto err;
4256 }
4257 file = fdt->fd[close->fd];
Pavel Begunkova1fde922021-04-11 01:46:28 +01004258 if (!file || file->f_op == &io_uring_fops) {
Jens Axboe9eac1902021-01-19 15:50:37 -07004259 spin_unlock(&files->file_lock);
4260 file = NULL;
4261 goto err;
Pavel Begunkov3af73b22020-06-08 21:08:17 +03004262 }
Jens Axboeb5dba592019-12-11 14:02:38 -07004263
4264 /* if the file has a flush method, be safe and punt to async */
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004265 if (file->f_op->flush && (issue_flags & IO_URING_F_NONBLOCK)) {
Jens Axboe9eac1902021-01-19 15:50:37 -07004266 spin_unlock(&files->file_lock);
Pavel Begunkov0bf0eef2020-05-26 20:34:06 +03004267 return -EAGAIN;
Pavel Begunkova2100672020-03-02 23:45:16 +03004268 }
Jens Axboeb5dba592019-12-11 14:02:38 -07004269
Jens Axboe9eac1902021-01-19 15:50:37 -07004270 ret = __close_fd_get_file(close->fd, &file);
4271 spin_unlock(&files->file_lock);
4272 if (ret < 0) {
4273 if (ret == -ENOENT)
4274 ret = -EBADF;
4275 goto err;
4276 }
4277
Pavel Begunkov3af73b22020-06-08 21:08:17 +03004278 /* No ->flush() or already async, safely close from here */
Jens Axboe9eac1902021-01-19 15:50:37 -07004279 ret = filp_close(file, current->files);
4280err:
Pavel Begunkov3af73b22020-06-08 21:08:17 +03004281 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01004282 req_set_fail(req);
Jens Axboe9eac1902021-01-19 15:50:37 -07004283 if (file)
4284 fput(file);
Pavel Begunkov889fca72021-02-10 00:03:09 +00004285 __io_req_complete(req, issue_flags, ret, 0);
Jens Axboe1a417f42020-01-31 17:16:48 -07004286 return 0;
Jens Axboeb5dba592019-12-11 14:02:38 -07004287}
4288
Pavel Begunkov1155c762021-02-18 18:29:38 +00004289static int io_sfr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboe5d17b4a2019-04-09 14:56:44 -06004290{
4291 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe5d17b4a2019-04-09 14:56:44 -06004292
Jens Axboe5d17b4a2019-04-09 14:56:44 -06004293 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
4294 return -EINVAL;
4295 if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index))
4296 return -EINVAL;
4297
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004298 req->sync.off = READ_ONCE(sqe->off);
4299 req->sync.len = READ_ONCE(sqe->len);
4300 req->sync.flags = READ_ONCE(sqe->sync_range_flags);
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004301 return 0;
4302}
4303
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004304static int io_sync_file_range(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004305{
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004306 int ret;
4307
Pavel Begunkovac45abc2020-06-08 21:08:18 +03004308 /* sync_file_range always requires a blocking context */
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004309 if (issue_flags & IO_URING_F_NONBLOCK)
Pavel Begunkovac45abc2020-06-08 21:08:18 +03004310 return -EAGAIN;
4311
Jens Axboe9adbd452019-12-20 08:45:55 -07004312 ret = sync_file_range(req->file, req->sync.off, req->sync.len,
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004313 req->sync.flags);
4314 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01004315 req_set_fail(req);
Jens Axboee1e16092020-06-22 09:17:17 -06004316 io_req_complete(req, ret);
Jens Axboe5d17b4a2019-04-09 14:56:44 -06004317 return 0;
4318}
4319
YueHaibing469956e2020-03-04 15:53:52 +08004320#if defined(CONFIG_NET)
Pavel Begunkov02d27d82020-02-28 10:36:36 +03004321static int io_setup_async_msg(struct io_kiocb *req,
4322 struct io_async_msghdr *kmsg)
4323{
Jens Axboee8c2bc12020-08-15 18:44:09 -07004324 struct io_async_msghdr *async_msg = req->async_data;
4325
4326 if (async_msg)
Pavel Begunkov02d27d82020-02-28 10:36:36 +03004327 return -EAGAIN;
Jens Axboee8c2bc12020-08-15 18:44:09 -07004328 if (io_alloc_async_data(req)) {
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004329 kfree(kmsg->free_iov);
Pavel Begunkov02d27d82020-02-28 10:36:36 +03004330 return -ENOMEM;
4331 }
Jens Axboee8c2bc12020-08-15 18:44:09 -07004332 async_msg = req->async_data;
Pavel Begunkov02d27d82020-02-28 10:36:36 +03004333 req->flags |= REQ_F_NEED_CLEANUP;
Jens Axboee8c2bc12020-08-15 18:44:09 -07004334 memcpy(async_msg, kmsg, sizeof(*kmsg));
Pavel Begunkov2a780802021-02-05 00:57:58 +00004335 async_msg->msg.msg_name = &async_msg->addr;
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004336 /* if were using fast_iov, set it to the new one */
4337 if (!async_msg->free_iov)
4338 async_msg->msg.msg_iter.iov = async_msg->fast_iov;
4339
Pavel Begunkov02d27d82020-02-28 10:36:36 +03004340 return -EAGAIN;
4341}
4342
Pavel Begunkov2ae523e2020-07-12 20:41:06 +03004343static int io_sendmsg_copy_hdr(struct io_kiocb *req,
4344 struct io_async_msghdr *iomsg)
4345{
Pavel Begunkov2ae523e2020-07-12 20:41:06 +03004346 iomsg->msg.msg_name = &iomsg->addr;
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004347 iomsg->free_iov = iomsg->fast_iov;
Pavel Begunkov2ae523e2020-07-12 20:41:06 +03004348 return sendmsg_copy_msghdr(&iomsg->msg, req->sr_msg.umsg,
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004349 req->sr_msg.msg_flags, &iomsg->free_iov);
Pavel Begunkov2ae523e2020-07-12 20:41:06 +03004350}
4351
Pavel Begunkov93642ef2021-02-18 18:29:44 +00004352static int io_sendmsg_prep_async(struct io_kiocb *req)
4353{
4354 int ret;
4355
Pavel Begunkov93642ef2021-02-18 18:29:44 +00004356 ret = io_sendmsg_copy_hdr(req, req->async_data);
4357 if (!ret)
4358 req->flags |= REQ_F_NEED_CLEANUP;
4359 return ret;
4360}
4361
Jens Axboe3529d8c2019-12-19 18:24:38 -07004362static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboeaa1fa282019-04-19 13:38:09 -06004363{
Jens Axboee47293f2019-12-20 08:58:21 -07004364 struct io_sr_msg *sr = &req->sr_msg;
Jens Axboe03b12302019-12-02 18:50:25 -07004365
Pavel Begunkovd2b6f482020-06-03 18:03:25 +03004366 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4367 return -EINVAL;
4368
Pavel Begunkov270a5942020-07-12 20:41:04 +03004369 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
Jens Axboefddafac2020-01-04 20:19:44 -07004370 sr->len = READ_ONCE(sqe->len);
Pavel Begunkov04411802021-04-01 15:44:00 +01004371 sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
4372 if (sr->msg_flags & MSG_DONTWAIT)
4373 req->flags |= REQ_F_NOWAIT;
Jens Axboe3529d8c2019-12-19 18:24:38 -07004374
Jens Axboed8768362020-02-27 14:17:49 -07004375#ifdef CONFIG_COMPAT
4376 if (req->ctx->compat)
4377 sr->msg_flags |= MSG_CMSG_COMPAT;
4378#endif
Pavel Begunkov93642ef2021-02-18 18:29:44 +00004379 return 0;
Jens Axboe03b12302019-12-02 18:50:25 -07004380}
4381
Pavel Begunkov889fca72021-02-10 00:03:09 +00004382static int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe03b12302019-12-02 18:50:25 -07004383{
Pavel Begunkov6b754c82020-07-16 23:28:00 +03004384 struct io_async_msghdr iomsg, *kmsg;
Jens Axboe03b12302019-12-02 18:50:25 -07004385 struct socket *sock;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004386 unsigned flags;
Stefan Metzmacher00312752021-03-20 20:33:36 +01004387 int min_ret = 0;
Jens Axboe03b12302019-12-02 18:50:25 -07004388 int ret;
4389
Florent Revestdba4a922020-12-04 12:36:04 +01004390 sock = sock_from_file(req->file);
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004391 if (unlikely(!sock))
Florent Revestdba4a922020-12-04 12:36:04 +01004392 return -ENOTSOCK;
Jens Axboe03b12302019-12-02 18:50:25 -07004393
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004394 kmsg = req->async_data;
4395 if (!kmsg) {
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004396 ret = io_sendmsg_copy_hdr(req, &iomsg);
Jens Axboefddafac2020-01-04 20:19:44 -07004397 if (ret)
4398 return ret;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004399 kmsg = &iomsg;
Jens Axboefddafac2020-01-04 20:19:44 -07004400 }
4401
Pavel Begunkov04411802021-04-01 15:44:00 +01004402 flags = req->sr_msg.msg_flags;
4403 if (issue_flags & IO_URING_F_NONBLOCK)
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004404 flags |= MSG_DONTWAIT;
Stefan Metzmacher00312752021-03-20 20:33:36 +01004405 if (flags & MSG_WAITALL)
4406 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
4407
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004408 ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004409 if ((issue_flags & IO_URING_F_NONBLOCK) && ret == -EAGAIN)
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004410 return io_setup_async_msg(req, kmsg);
4411 if (ret == -ERESTARTSYS)
4412 ret = -EINTR;
4413
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004414 /* fast path, check for non-NULL to avoid function call */
4415 if (kmsg->free_iov)
4416 kfree(kmsg->free_iov);
Jens Axboe03b12302019-12-02 18:50:25 -07004417 req->flags &= ~REQ_F_NEED_CLEANUP;
Stefan Metzmacher00312752021-03-20 20:33:36 +01004418 if (ret < min_ret)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01004419 req_set_fail(req);
Pavel Begunkov889fca72021-02-10 00:03:09 +00004420 __io_req_complete(req, issue_flags, ret, 0);
Jens Axboefddafac2020-01-04 20:19:44 -07004421 return 0;
Jens Axboefddafac2020-01-04 20:19:44 -07004422}
4423
Pavel Begunkov889fca72021-02-10 00:03:09 +00004424static int io_send(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe03b12302019-12-02 18:50:25 -07004425{
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004426 struct io_sr_msg *sr = &req->sr_msg;
4427 struct msghdr msg;
4428 struct iovec iov;
Jens Axboe03b12302019-12-02 18:50:25 -07004429 struct socket *sock;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004430 unsigned flags;
Stefan Metzmacher00312752021-03-20 20:33:36 +01004431 int min_ret = 0;
Jens Axboe03b12302019-12-02 18:50:25 -07004432 int ret;
4433
Florent Revestdba4a922020-12-04 12:36:04 +01004434 sock = sock_from_file(req->file);
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004435 if (unlikely(!sock))
Florent Revestdba4a922020-12-04 12:36:04 +01004436 return -ENOTSOCK;
Jens Axboe03b12302019-12-02 18:50:25 -07004437
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004438 ret = import_single_range(WRITE, sr->buf, sr->len, &iov, &msg.msg_iter);
4439 if (unlikely(ret))
Zheng Bin14db8412020-09-09 20:12:37 +08004440 return ret;
Jens Axboe03b12302019-12-02 18:50:25 -07004441
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004442 msg.msg_name = NULL;
4443 msg.msg_control = NULL;
4444 msg.msg_controllen = 0;
4445 msg.msg_namelen = 0;
Jens Axboe03b12302019-12-02 18:50:25 -07004446
Pavel Begunkov04411802021-04-01 15:44:00 +01004447 flags = req->sr_msg.msg_flags;
4448 if (issue_flags & IO_URING_F_NONBLOCK)
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004449 flags |= MSG_DONTWAIT;
Stefan Metzmacher00312752021-03-20 20:33:36 +01004450 if (flags & MSG_WAITALL)
4451 min_ret = iov_iter_count(&msg.msg_iter);
4452
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004453 msg.msg_flags = flags;
4454 ret = sock_sendmsg(sock, &msg);
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004455 if ((issue_flags & IO_URING_F_NONBLOCK) && ret == -EAGAIN)
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004456 return -EAGAIN;
4457 if (ret == -ERESTARTSYS)
4458 ret = -EINTR;
Jens Axboe03b12302019-12-02 18:50:25 -07004459
Stefan Metzmacher00312752021-03-20 20:33:36 +01004460 if (ret < min_ret)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01004461 req_set_fail(req);
Pavel Begunkov889fca72021-02-10 00:03:09 +00004462 __io_req_complete(req, issue_flags, ret, 0);
Jens Axboe03b12302019-12-02 18:50:25 -07004463 return 0;
Jens Axboe03b12302019-12-02 18:50:25 -07004464}
4465
Pavel Begunkov1400e692020-07-12 20:41:05 +03004466static int __io_recvmsg_copy_hdr(struct io_kiocb *req,
4467 struct io_async_msghdr *iomsg)
Jens Axboe52de1fe2020-02-27 10:15:42 -07004468{
4469 struct io_sr_msg *sr = &req->sr_msg;
4470 struct iovec __user *uiov;
4471 size_t iov_len;
4472 int ret;
4473
Pavel Begunkov1400e692020-07-12 20:41:05 +03004474 ret = __copy_msghdr_from_user(&iomsg->msg, sr->umsg,
4475 &iomsg->uaddr, &uiov, &iov_len);
Jens Axboe52de1fe2020-02-27 10:15:42 -07004476 if (ret)
4477 return ret;
4478
4479 if (req->flags & REQ_F_BUFFER_SELECT) {
4480 if (iov_len > 1)
4481 return -EINVAL;
Pavel Begunkov5476dfe2021-02-05 00:57:59 +00004482 if (copy_from_user(iomsg->fast_iov, uiov, sizeof(*uiov)))
Jens Axboe52de1fe2020-02-27 10:15:42 -07004483 return -EFAULT;
Pavel Begunkov5476dfe2021-02-05 00:57:59 +00004484 sr->len = iomsg->fast_iov[0].iov_len;
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004485 iomsg->free_iov = NULL;
Jens Axboe52de1fe2020-02-27 10:15:42 -07004486 } else {
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004487 iomsg->free_iov = iomsg->fast_iov;
Christoph Hellwig89cd35c2020-09-25 06:51:41 +02004488 ret = __import_iovec(READ, uiov, iov_len, UIO_FASTIOV,
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004489 &iomsg->free_iov, &iomsg->msg.msg_iter,
Christoph Hellwig89cd35c2020-09-25 06:51:41 +02004490 false);
Jens Axboe52de1fe2020-02-27 10:15:42 -07004491 if (ret > 0)
4492 ret = 0;
4493 }
4494
4495 return ret;
4496}
4497
4498#ifdef CONFIG_COMPAT
4499static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req,
Pavel Begunkov1400e692020-07-12 20:41:05 +03004500 struct io_async_msghdr *iomsg)
Jens Axboe52de1fe2020-02-27 10:15:42 -07004501{
Jens Axboe52de1fe2020-02-27 10:15:42 -07004502 struct io_sr_msg *sr = &req->sr_msg;
4503 struct compat_iovec __user *uiov;
4504 compat_uptr_t ptr;
4505 compat_size_t len;
4506 int ret;
4507
Pavel Begunkov4af34172021-04-11 01:46:30 +01004508 ret = __get_compat_msghdr(&iomsg->msg, sr->umsg_compat, &iomsg->uaddr,
4509 &ptr, &len);
Jens Axboe52de1fe2020-02-27 10:15:42 -07004510 if (ret)
4511 return ret;
4512
4513 uiov = compat_ptr(ptr);
4514 if (req->flags & REQ_F_BUFFER_SELECT) {
4515 compat_ssize_t clen;
4516
4517 if (len > 1)
4518 return -EINVAL;
4519 if (!access_ok(uiov, sizeof(*uiov)))
4520 return -EFAULT;
4521 if (__get_user(clen, &uiov->iov_len))
4522 return -EFAULT;
4523 if (clen < 0)
4524 return -EINVAL;
Pavel Begunkov2d280bc2020-11-29 18:33:32 +00004525 sr->len = clen;
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004526 iomsg->free_iov = NULL;
Jens Axboe52de1fe2020-02-27 10:15:42 -07004527 } else {
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004528 iomsg->free_iov = iomsg->fast_iov;
Christoph Hellwig89cd35c2020-09-25 06:51:41 +02004529 ret = __import_iovec(READ, (struct iovec __user *)uiov, len,
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004530 UIO_FASTIOV, &iomsg->free_iov,
Christoph Hellwig89cd35c2020-09-25 06:51:41 +02004531 &iomsg->msg.msg_iter, true);
Jens Axboe52de1fe2020-02-27 10:15:42 -07004532 if (ret < 0)
4533 return ret;
4534 }
4535
4536 return 0;
4537}
Jens Axboe03b12302019-12-02 18:50:25 -07004538#endif
Jens Axboe52de1fe2020-02-27 10:15:42 -07004539
Pavel Begunkov1400e692020-07-12 20:41:05 +03004540static int io_recvmsg_copy_hdr(struct io_kiocb *req,
4541 struct io_async_msghdr *iomsg)
Jens Axboe52de1fe2020-02-27 10:15:42 -07004542{
Pavel Begunkov1400e692020-07-12 20:41:05 +03004543 iomsg->msg.msg_name = &iomsg->addr;
Jens Axboe52de1fe2020-02-27 10:15:42 -07004544
4545#ifdef CONFIG_COMPAT
4546 if (req->ctx->compat)
Pavel Begunkov1400e692020-07-12 20:41:05 +03004547 return __io_compat_recvmsg_copy_hdr(req, iomsg);
Jens Axboe52de1fe2020-02-27 10:15:42 -07004548#endif
4549
Pavel Begunkov1400e692020-07-12 20:41:05 +03004550 return __io_recvmsg_copy_hdr(req, iomsg);
Jens Axboe52de1fe2020-02-27 10:15:42 -07004551}
4552
Jens Axboebcda7ba2020-02-23 16:42:51 -07004553static struct io_buffer *io_recv_buffer_select(struct io_kiocb *req,
Pavel Begunkov7fbb1b52020-07-16 23:28:05 +03004554 bool needs_lock)
Jens Axboebcda7ba2020-02-23 16:42:51 -07004555{
4556 struct io_sr_msg *sr = &req->sr_msg;
4557 struct io_buffer *kbuf;
4558
Jens Axboebcda7ba2020-02-23 16:42:51 -07004559 kbuf = io_buffer_select(req, &sr->len, sr->bgid, sr->kbuf, needs_lock);
4560 if (IS_ERR(kbuf))
4561 return kbuf;
4562
4563 sr->kbuf = kbuf;
4564 req->flags |= REQ_F_BUFFER_SELECTED;
Jens Axboebcda7ba2020-02-23 16:42:51 -07004565 return kbuf;
Jens Axboe03b12302019-12-02 18:50:25 -07004566}
4567
Pavel Begunkov7fbb1b52020-07-16 23:28:05 +03004568static inline unsigned int io_put_recv_kbuf(struct io_kiocb *req)
4569{
4570 return io_put_kbuf(req, req->sr_msg.kbuf);
4571}
4572
Pavel Begunkov93642ef2021-02-18 18:29:44 +00004573static int io_recvmsg_prep_async(struct io_kiocb *req)
Jens Axboe03b12302019-12-02 18:50:25 -07004574{
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03004575 int ret;
Jens Axboe06b76d42019-12-19 14:44:26 -07004576
Pavel Begunkov93642ef2021-02-18 18:29:44 +00004577 ret = io_recvmsg_copy_hdr(req, req->async_data);
4578 if (!ret)
4579 req->flags |= REQ_F_NEED_CLEANUP;
4580 return ret;
4581}
4582
4583static int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4584{
4585 struct io_sr_msg *sr = &req->sr_msg;
4586
Pavel Begunkovd2b6f482020-06-03 18:03:25 +03004587 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4588 return -EINVAL;
4589
Pavel Begunkov270a5942020-07-12 20:41:04 +03004590 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
Jens Axboe0b7b21e2020-01-31 08:34:59 -07004591 sr->len = READ_ONCE(sqe->len);
Jens Axboebcda7ba2020-02-23 16:42:51 -07004592 sr->bgid = READ_ONCE(sqe->buf_group);
Pavel Begunkov04411802021-04-01 15:44:00 +01004593 sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
4594 if (sr->msg_flags & MSG_DONTWAIT)
4595 req->flags |= REQ_F_NOWAIT;
Jens Axboe3529d8c2019-12-19 18:24:38 -07004596
Jens Axboed8768362020-02-27 14:17:49 -07004597#ifdef CONFIG_COMPAT
4598 if (req->ctx->compat)
4599 sr->msg_flags |= MSG_CMSG_COMPAT;
4600#endif
Pavel Begunkov93642ef2021-02-18 18:29:44 +00004601 return 0;
Jens Axboe03b12302019-12-02 18:50:25 -07004602}
4603
Pavel Begunkov889fca72021-02-10 00:03:09 +00004604static int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe03b12302019-12-02 18:50:25 -07004605{
Pavel Begunkov6b754c82020-07-16 23:28:00 +03004606 struct io_async_msghdr iomsg, *kmsg;
Jens Axboe0fa03c62019-04-19 13:34:07 -06004607 struct socket *sock;
Pavel Begunkov7fbb1b52020-07-16 23:28:05 +03004608 struct io_buffer *kbuf;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004609 unsigned flags;
Stefan Metzmacher00312752021-03-20 20:33:36 +01004610 int min_ret = 0;
Jens Axboe52de1fe2020-02-27 10:15:42 -07004611 int ret, cflags = 0;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004612 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Jens Axboe0fa03c62019-04-19 13:34:07 -06004613
Florent Revestdba4a922020-12-04 12:36:04 +01004614 sock = sock_from_file(req->file);
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004615 if (unlikely(!sock))
Florent Revestdba4a922020-12-04 12:36:04 +01004616 return -ENOTSOCK;
Jens Axboe0fa03c62019-04-19 13:34:07 -06004617
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004618 kmsg = req->async_data;
4619 if (!kmsg) {
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004620 ret = io_recvmsg_copy_hdr(req, &iomsg);
4621 if (ret)
Pavel Begunkov681fda82020-07-15 22:20:45 +03004622 return ret;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004623 kmsg = &iomsg;
Jens Axboe0fa03c62019-04-19 13:34:07 -06004624 }
4625
Pavel Begunkovbc02ef32020-07-16 23:28:03 +03004626 if (req->flags & REQ_F_BUFFER_SELECT) {
Pavel Begunkov7fbb1b52020-07-16 23:28:05 +03004627 kbuf = io_recv_buffer_select(req, !force_nonblock);
Pavel Begunkovbc02ef32020-07-16 23:28:03 +03004628 if (IS_ERR(kbuf))
4629 return PTR_ERR(kbuf);
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004630 kmsg->fast_iov[0].iov_base = u64_to_user_ptr(kbuf->addr);
Pavel Begunkov5476dfe2021-02-05 00:57:59 +00004631 kmsg->fast_iov[0].iov_len = req->sr_msg.len;
4632 iov_iter_init(&kmsg->msg.msg_iter, READ, kmsg->fast_iov,
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004633 1, req->sr_msg.len);
4634 }
4635
Pavel Begunkov04411802021-04-01 15:44:00 +01004636 flags = req->sr_msg.msg_flags;
4637 if (force_nonblock)
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004638 flags |= MSG_DONTWAIT;
Stefan Metzmacher00312752021-03-20 20:33:36 +01004639 if (flags & MSG_WAITALL)
4640 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
4641
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004642 ret = __sys_recvmsg_sock(sock, &kmsg->msg, req->sr_msg.umsg,
4643 kmsg->uaddr, flags);
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03004644 if (force_nonblock && ret == -EAGAIN)
4645 return io_setup_async_msg(req, kmsg);
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004646 if (ret == -ERESTARTSYS)
4647 ret = -EINTR;
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03004648
Pavel Begunkov7fbb1b52020-07-16 23:28:05 +03004649 if (req->flags & REQ_F_BUFFER_SELECTED)
4650 cflags = io_put_recv_kbuf(req);
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004651 /* fast path, check for non-NULL to avoid function call */
4652 if (kmsg->free_iov)
4653 kfree(kmsg->free_iov);
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03004654 req->flags &= ~REQ_F_NEED_CLEANUP;
Stefan Metzmacher00312752021-03-20 20:33:36 +01004655 if (ret < min_ret || ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))))
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01004656 req_set_fail(req);
Pavel Begunkov889fca72021-02-10 00:03:09 +00004657 __io_req_complete(req, issue_flags, ret, cflags);
Jens Axboe0fa03c62019-04-19 13:34:07 -06004658 return 0;
Jens Axboe0fa03c62019-04-19 13:34:07 -06004659}
4660
Pavel Begunkov889fca72021-02-10 00:03:09 +00004661static int io_recv(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboefddafac2020-01-04 20:19:44 -07004662{
Pavel Begunkov6b754c82020-07-16 23:28:00 +03004663 struct io_buffer *kbuf;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004664 struct io_sr_msg *sr = &req->sr_msg;
4665 struct msghdr msg;
4666 void __user *buf = sr->buf;
Jens Axboefddafac2020-01-04 20:19:44 -07004667 struct socket *sock;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004668 struct iovec iov;
4669 unsigned flags;
Stefan Metzmacher00312752021-03-20 20:33:36 +01004670 int min_ret = 0;
Jens Axboebcda7ba2020-02-23 16:42:51 -07004671 int ret, cflags = 0;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004672 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Jens Axboefddafac2020-01-04 20:19:44 -07004673
Florent Revestdba4a922020-12-04 12:36:04 +01004674 sock = sock_from_file(req->file);
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004675 if (unlikely(!sock))
Florent Revestdba4a922020-12-04 12:36:04 +01004676 return -ENOTSOCK;
Jens Axboefddafac2020-01-04 20:19:44 -07004677
Pavel Begunkovbc02ef32020-07-16 23:28:03 +03004678 if (req->flags & REQ_F_BUFFER_SELECT) {
Pavel Begunkov7fbb1b52020-07-16 23:28:05 +03004679 kbuf = io_recv_buffer_select(req, !force_nonblock);
Jens Axboebcda7ba2020-02-23 16:42:51 -07004680 if (IS_ERR(kbuf))
4681 return PTR_ERR(kbuf);
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004682 buf = u64_to_user_ptr(kbuf->addr);
Jens Axboefddafac2020-01-04 20:19:44 -07004683 }
4684
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004685 ret = import_single_range(READ, buf, sr->len, &iov, &msg.msg_iter);
Pavel Begunkov14c32ee2020-07-16 23:28:01 +03004686 if (unlikely(ret))
4687 goto out_free;
Jens Axboefddafac2020-01-04 20:19:44 -07004688
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004689 msg.msg_name = NULL;
4690 msg.msg_control = NULL;
4691 msg.msg_controllen = 0;
4692 msg.msg_namelen = 0;
4693 msg.msg_iocb = NULL;
4694 msg.msg_flags = 0;
4695
Pavel Begunkov04411802021-04-01 15:44:00 +01004696 flags = req->sr_msg.msg_flags;
4697 if (force_nonblock)
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004698 flags |= MSG_DONTWAIT;
Stefan Metzmacher00312752021-03-20 20:33:36 +01004699 if (flags & MSG_WAITALL)
4700 min_ret = iov_iter_count(&msg.msg_iter);
4701
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004702 ret = sock_recvmsg(sock, &msg, flags);
4703 if (force_nonblock && ret == -EAGAIN)
4704 return -EAGAIN;
4705 if (ret == -ERESTARTSYS)
4706 ret = -EINTR;
Pavel Begunkov14c32ee2020-07-16 23:28:01 +03004707out_free:
Pavel Begunkov7fbb1b52020-07-16 23:28:05 +03004708 if (req->flags & REQ_F_BUFFER_SELECTED)
4709 cflags = io_put_recv_kbuf(req);
Stefan Metzmacher00312752021-03-20 20:33:36 +01004710 if (ret < min_ret || ((flags & MSG_WAITALL) && (msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))))
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01004711 req_set_fail(req);
Pavel Begunkov889fca72021-02-10 00:03:09 +00004712 __io_req_complete(req, issue_flags, ret, cflags);
Jens Axboefddafac2020-01-04 20:19:44 -07004713 return 0;
Jens Axboefddafac2020-01-04 20:19:44 -07004714}
4715
Jens Axboe3529d8c2019-12-19 18:24:38 -07004716static int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboe17f2fe32019-10-17 14:42:58 -06004717{
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004718 struct io_accept *accept = &req->accept;
4719
Jens Axboe14587a462020-09-05 11:36:08 -06004720 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboe17f2fe32019-10-17 14:42:58 -06004721 return -EINVAL;
Hrvoje Zeba8042d6c2019-11-25 14:40:22 -05004722 if (sqe->ioprio || sqe->len || sqe->buf_index)
Jens Axboe17f2fe32019-10-17 14:42:58 -06004723 return -EINVAL;
4724
Jens Axboed55e5f52019-12-11 16:12:15 -07004725 accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
4726 accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2));
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004727 accept->flags = READ_ONCE(sqe->accept_flags);
Jens Axboe09952e32020-03-19 20:16:56 -06004728 accept->nofile = rlimit(RLIMIT_NOFILE);
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004729 return 0;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004730}
Jens Axboe17f2fe32019-10-17 14:42:58 -06004731
Pavel Begunkov889fca72021-02-10 00:03:09 +00004732static int io_accept(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004733{
4734 struct io_accept *accept = &req->accept;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004735 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Pavel Begunkovac45abc2020-06-08 21:08:18 +03004736 unsigned int file_flags = force_nonblock ? O_NONBLOCK : 0;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004737 int ret;
4738
Jiufei Xuee697dee2020-06-10 13:41:59 +08004739 if (req->file->f_flags & O_NONBLOCK)
4740 req->flags |= REQ_F_NOWAIT;
4741
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004742 ret = __sys_accept4_file(req->file, file_flags, accept->addr,
Jens Axboe09952e32020-03-19 20:16:56 -06004743 accept->addr_len, accept->flags,
4744 accept->nofile);
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004745 if (ret == -EAGAIN && force_nonblock)
Jens Axboe17f2fe32019-10-17 14:42:58 -06004746 return -EAGAIN;
Pavel Begunkovac45abc2020-06-08 21:08:18 +03004747 if (ret < 0) {
4748 if (ret == -ERESTARTSYS)
4749 ret = -EINTR;
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01004750 req_set_fail(req);
Pavel Begunkovac45abc2020-06-08 21:08:18 +03004751 }
Pavel Begunkov889fca72021-02-10 00:03:09 +00004752 __io_req_complete(req, issue_flags, ret, 0);
Jens Axboe17f2fe32019-10-17 14:42:58 -06004753 return 0;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004754}
4755
Pavel Begunkov93642ef2021-02-18 18:29:44 +00004756static int io_connect_prep_async(struct io_kiocb *req)
4757{
4758 struct io_async_connect *io = req->async_data;
4759 struct io_connect *conn = &req->connect;
4760
4761 return move_addr_to_kernel(conn->addr, conn->addr_len, &io->address);
4762}
4763
Jens Axboe3529d8c2019-12-19 18:24:38 -07004764static int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboef499a022019-12-02 16:28:46 -07004765{
Jens Axboe3529d8c2019-12-19 18:24:38 -07004766 struct io_connect *conn = &req->connect;
Jens Axboef499a022019-12-02 16:28:46 -07004767
Jens Axboe14587a462020-09-05 11:36:08 -06004768 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboe3fbb51c2019-12-20 08:51:52 -07004769 return -EINVAL;
4770 if (sqe->ioprio || sqe->len || sqe->buf_index || sqe->rw_flags)
4771 return -EINVAL;
4772
Jens Axboe3529d8c2019-12-19 18:24:38 -07004773 conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
4774 conn->addr_len = READ_ONCE(sqe->addr2);
Pavel Begunkov93642ef2021-02-18 18:29:44 +00004775 return 0;
Jens Axboef499a022019-12-02 16:28:46 -07004776}
4777
Pavel Begunkov889fca72021-02-10 00:03:09 +00004778static int io_connect(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboef8e85cf2019-11-23 14:24:24 -07004779{
Jens Axboee8c2bc12020-08-15 18:44:09 -07004780 struct io_async_connect __io, *io;
Jens Axboef8e85cf2019-11-23 14:24:24 -07004781 unsigned file_flags;
Jens Axboe3fbb51c2019-12-20 08:51:52 -07004782 int ret;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004783 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Jens Axboef8e85cf2019-11-23 14:24:24 -07004784
Jens Axboee8c2bc12020-08-15 18:44:09 -07004785 if (req->async_data) {
4786 io = req->async_data;
Jens Axboef499a022019-12-02 16:28:46 -07004787 } else {
Jens Axboe3529d8c2019-12-19 18:24:38 -07004788 ret = move_addr_to_kernel(req->connect.addr,
4789 req->connect.addr_len,
Jens Axboee8c2bc12020-08-15 18:44:09 -07004790 &__io.address);
Jens Axboef499a022019-12-02 16:28:46 -07004791 if (ret)
4792 goto out;
4793 io = &__io;
4794 }
4795
Jens Axboe3fbb51c2019-12-20 08:51:52 -07004796 file_flags = force_nonblock ? O_NONBLOCK : 0;
4797
Jens Axboee8c2bc12020-08-15 18:44:09 -07004798 ret = __sys_connect_file(req->file, &io->address,
Jens Axboe3fbb51c2019-12-20 08:51:52 -07004799 req->connect.addr_len, file_flags);
Jens Axboe87f80d62019-12-03 11:23:54 -07004800 if ((ret == -EAGAIN || ret == -EINPROGRESS) && force_nonblock) {
Jens Axboee8c2bc12020-08-15 18:44:09 -07004801 if (req->async_data)
Jens Axboeb7bb4f72019-12-15 22:13:43 -07004802 return -EAGAIN;
Jens Axboee8c2bc12020-08-15 18:44:09 -07004803 if (io_alloc_async_data(req)) {
Jens Axboef499a022019-12-02 16:28:46 -07004804 ret = -ENOMEM;
4805 goto out;
4806 }
Jens Axboee8c2bc12020-08-15 18:44:09 -07004807 memcpy(req->async_data, &__io, sizeof(__io));
Jens Axboef8e85cf2019-11-23 14:24:24 -07004808 return -EAGAIN;
Jens Axboef499a022019-12-02 16:28:46 -07004809 }
Jens Axboef8e85cf2019-11-23 14:24:24 -07004810 if (ret == -ERESTARTSYS)
4811 ret = -EINTR;
Jens Axboef499a022019-12-02 16:28:46 -07004812out:
Jens Axboe4e88d6e2019-12-07 20:59:47 -07004813 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01004814 req_set_fail(req);
Pavel Begunkov889fca72021-02-10 00:03:09 +00004815 __io_req_complete(req, issue_flags, ret, 0);
Jens Axboef8e85cf2019-11-23 14:24:24 -07004816 return 0;
Jens Axboef8e85cf2019-11-23 14:24:24 -07004817}
YueHaibing469956e2020-03-04 15:53:52 +08004818#else /* !CONFIG_NET */
Jens Axboe99a10082021-02-19 09:35:19 -07004819#define IO_NETOP_FN(op) \
4820static int io_##op(struct io_kiocb *req, unsigned int issue_flags) \
4821{ \
4822 return -EOPNOTSUPP; \
Jens Axboef8e85cf2019-11-23 14:24:24 -07004823}
4824
Jens Axboe99a10082021-02-19 09:35:19 -07004825#define IO_NETOP_PREP(op) \
4826IO_NETOP_FN(op) \
4827static int io_##op##_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) \
4828{ \
4829 return -EOPNOTSUPP; \
4830} \
4831
4832#define IO_NETOP_PREP_ASYNC(op) \
4833IO_NETOP_PREP(op) \
4834static int io_##op##_prep_async(struct io_kiocb *req) \
4835{ \
4836 return -EOPNOTSUPP; \
YueHaibing469956e2020-03-04 15:53:52 +08004837}
4838
Jens Axboe99a10082021-02-19 09:35:19 -07004839IO_NETOP_PREP_ASYNC(sendmsg);
4840IO_NETOP_PREP_ASYNC(recvmsg);
4841IO_NETOP_PREP_ASYNC(connect);
4842IO_NETOP_PREP(accept);
4843IO_NETOP_FN(send);
4844IO_NETOP_FN(recv);
YueHaibing469956e2020-03-04 15:53:52 +08004845#endif /* CONFIG_NET */
Jens Axboe17f2fe32019-10-17 14:42:58 -06004846
Jens Axboed7718a92020-02-14 22:23:12 -07004847struct io_poll_table {
4848 struct poll_table_struct pt;
4849 struct io_kiocb *req;
Pavel Begunkov68b11e82021-07-20 10:50:43 +01004850 int nr_entries;
Jens Axboed7718a92020-02-14 22:23:12 -07004851 int error;
4852};
4853
Jens Axboed7718a92020-02-14 22:23:12 -07004854static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll,
Pavel Begunkov5b0a6ac2021-06-30 21:54:04 +01004855 __poll_t mask, io_req_tw_func_t func)
Jens Axboed7718a92020-02-14 22:23:12 -07004856{
Jens Axboed7718a92020-02-14 22:23:12 -07004857 /* for instances that support it check for an event match first: */
4858 if (mask && !(mask & poll->events))
4859 return 0;
4860
4861 trace_io_uring_task_add(req->ctx, req->opcode, req->user_data, mask);
4862
4863 list_del_init(&poll->wait.entry);
4864
Jens Axboed7718a92020-02-14 22:23:12 -07004865 req->result = mask;
Pavel Begunkov5b0a6ac2021-06-30 21:54:04 +01004866 req->io_task_work.func = func;
Jens Axboe6d816e02020-08-11 08:04:14 -06004867
Jens Axboed7718a92020-02-14 22:23:12 -07004868 /*
Jens Axboee3aabf92020-05-18 11:04:17 -06004869 * If this fails, then the task is exiting. When a task exits, the
4870 * work gets canceled, so just cancel this request as well instead
4871 * of executing it. We can't safely execute it anyway, as we may not
4872 * have the needed state needed for it anyway.
Jens Axboed7718a92020-02-14 22:23:12 -07004873 */
Pavel Begunkove09ee512021-07-01 13:26:05 +01004874 io_req_task_work_add(req);
Jens Axboed7718a92020-02-14 22:23:12 -07004875 return 1;
4876}
4877
Jens Axboe74ce6ce2020-04-13 11:09:12 -06004878static bool io_poll_rewait(struct io_kiocb *req, struct io_poll_iocb *poll)
4879 __acquires(&req->ctx->completion_lock)
4880{
4881 struct io_ring_ctx *ctx = req->ctx;
4882
Pavel Begunkove09ee512021-07-01 13:26:05 +01004883 if (unlikely(req->task->flags & PF_EXITING))
4884 WRITE_ONCE(poll->canceled, true);
4885
Jens Axboe74ce6ce2020-04-13 11:09:12 -06004886 if (!req->result && !READ_ONCE(poll->canceled)) {
4887 struct poll_table_struct pt = { ._key = poll->events };
4888
4889 req->result = vfs_poll(req->file, &pt) & poll->events;
4890 }
4891
4892 spin_lock_irq(&ctx->completion_lock);
4893 if (!req->result && !READ_ONCE(poll->canceled)) {
4894 add_wait_queue(poll->head, &poll->wait);
4895 return true;
4896 }
4897
4898 return false;
4899}
4900
Jens Axboed4e7cd32020-08-15 11:44:50 -07004901static struct io_poll_iocb *io_poll_get_double(struct io_kiocb *req)
Jens Axboe18bceab2020-05-15 11:56:54 -06004902{
Jens Axboee8c2bc12020-08-15 18:44:09 -07004903 /* pure poll stashes this in ->async_data, poll driven retry elsewhere */
Jens Axboed4e7cd32020-08-15 11:44:50 -07004904 if (req->opcode == IORING_OP_POLL_ADD)
Jens Axboee8c2bc12020-08-15 18:44:09 -07004905 return req->async_data;
Jens Axboed4e7cd32020-08-15 11:44:50 -07004906 return req->apoll->double_poll;
4907}
4908
4909static struct io_poll_iocb *io_poll_get_single(struct io_kiocb *req)
4910{
4911 if (req->opcode == IORING_OP_POLL_ADD)
4912 return &req->poll;
4913 return &req->apoll->poll;
4914}
4915
4916static void io_poll_remove_double(struct io_kiocb *req)
Pavel Begunkove07785b2021-04-01 15:43:57 +01004917 __must_hold(&req->ctx->completion_lock)
Jens Axboed4e7cd32020-08-15 11:44:50 -07004918{
4919 struct io_poll_iocb *poll = io_poll_get_double(req);
Jens Axboe18bceab2020-05-15 11:56:54 -06004920
4921 lockdep_assert_held(&req->ctx->completion_lock);
4922
4923 if (poll && poll->head) {
4924 struct wait_queue_head *head = poll->head;
4925
4926 spin_lock(&head->lock);
4927 list_del_init(&poll->wait.entry);
4928 if (poll->wait.private)
Jens Axboede9b4cc2021-02-24 13:28:27 -07004929 req_ref_put(req);
Jens Axboe18bceab2020-05-15 11:56:54 -06004930 poll->head = NULL;
4931 spin_unlock(&head->lock);
4932 }
4933}
4934
Pavel Begunkove27414b2021-04-09 09:13:20 +01004935static bool io_poll_complete(struct io_kiocb *req, __poll_t mask)
Pavel Begunkove07785b2021-04-01 15:43:57 +01004936 __must_hold(&req->ctx->completion_lock)
Jens Axboe18bceab2020-05-15 11:56:54 -06004937{
4938 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe88e41cf2021-02-22 22:08:01 -07004939 unsigned flags = IORING_CQE_F_MORE;
Pavel Begunkove27414b2021-04-09 09:13:20 +01004940 int error;
Jens Axboe18bceab2020-05-15 11:56:54 -06004941
Pavel Begunkove27414b2021-04-09 09:13:20 +01004942 if (READ_ONCE(req->poll.canceled)) {
Jens Axboe45ab03b2021-02-23 08:19:33 -07004943 error = -ECANCELED;
Jens Axboe88e41cf2021-02-22 22:08:01 -07004944 req->poll.events |= EPOLLONESHOT;
Pavel Begunkove27414b2021-04-09 09:13:20 +01004945 } else {
Jens Axboe50826202021-02-23 09:02:26 -07004946 error = mangle_poll(mask);
Pavel Begunkove27414b2021-04-09 09:13:20 +01004947 }
Jens Axboeb69de282021-03-17 08:37:41 -06004948 if (req->poll.events & EPOLLONESHOT)
4949 flags = 0;
Pavel Begunkovd4d19c12021-04-25 14:32:17 +01004950 if (!io_cqring_fill_event(ctx, req->user_data, error, flags)) {
Jens Axboe88e41cf2021-02-22 22:08:01 -07004951 req->poll.done = true;
4952 flags = 0;
4953 }
Hao Xu7b289c32021-04-13 15:20:39 +08004954 if (flags & IORING_CQE_F_MORE)
4955 ctx->cq_extra++;
4956
Jens Axboe18bceab2020-05-15 11:56:54 -06004957 io_commit_cqring(ctx);
Jens Axboe88e41cf2021-02-22 22:08:01 -07004958 return !(flags & IORING_CQE_F_MORE);
Jens Axboe18bceab2020-05-15 11:56:54 -06004959}
4960
Pavel Begunkov5b0a6ac2021-06-30 21:54:04 +01004961static void io_poll_task_func(struct io_kiocb *req)
Jens Axboe18bceab2020-05-15 11:56:54 -06004962{
Jens Axboe6d816e02020-08-11 08:04:14 -06004963 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkovdd221f462020-10-18 10:17:42 +01004964 struct io_kiocb *nxt;
Jens Axboe18bceab2020-05-15 11:56:54 -06004965
Pavel Begunkovdd221f462020-10-18 10:17:42 +01004966 if (io_poll_rewait(req, &req->poll)) {
4967 spin_unlock_irq(&ctx->completion_lock);
4968 } else {
Pavel Begunkovf40b9642021-04-09 09:13:19 +01004969 bool done;
Jens Axboe88e41cf2021-02-22 22:08:01 -07004970
Pavel Begunkove27414b2021-04-09 09:13:20 +01004971 done = io_poll_complete(req, req->result);
Jens Axboe88e41cf2021-02-22 22:08:01 -07004972 if (done) {
Hao Xua890d012021-07-28 11:03:22 +08004973 io_poll_remove_double(req);
Jens Axboe88e41cf2021-02-22 22:08:01 -07004974 hash_del(&req->hash_node);
Pavel Begunkovf40b9642021-04-09 09:13:19 +01004975 } else {
Jens Axboe88e41cf2021-02-22 22:08:01 -07004976 req->result = 0;
4977 add_wait_queue(req->poll.head, &req->poll.wait);
4978 }
Pavel Begunkovdd221f462020-10-18 10:17:42 +01004979 spin_unlock_irq(&ctx->completion_lock);
Pavel Begunkovdd221f462020-10-18 10:17:42 +01004980 io_cqring_ev_posted(ctx);
Pavel Begunkovdd221f462020-10-18 10:17:42 +01004981
Jens Axboe88e41cf2021-02-22 22:08:01 -07004982 if (done) {
4983 nxt = io_put_req_find_next(req);
4984 if (nxt)
Pavel Begunkov5b0a6ac2021-06-30 21:54:04 +01004985 io_req_task_submit(nxt);
Jens Axboe88e41cf2021-02-22 22:08:01 -07004986 }
Pavel Begunkovea1164e2020-06-30 15:20:41 +03004987 }
Jens Axboe18bceab2020-05-15 11:56:54 -06004988}
4989
4990static int io_poll_double_wake(struct wait_queue_entry *wait, unsigned mode,
4991 int sync, void *key)
4992{
4993 struct io_kiocb *req = wait->private;
Jens Axboed4e7cd32020-08-15 11:44:50 -07004994 struct io_poll_iocb *poll = io_poll_get_single(req);
Jens Axboe18bceab2020-05-15 11:56:54 -06004995 __poll_t mask = key_to_poll(key);
4996
4997 /* for instances that support it check for an event match first: */
4998 if (mask && !(mask & poll->events))
4999 return 0;
Jens Axboe88e41cf2021-02-22 22:08:01 -07005000 if (!(poll->events & EPOLLONESHOT))
5001 return poll->wait.func(&poll->wait, mode, sync, key);
Jens Axboe18bceab2020-05-15 11:56:54 -06005002
Jens Axboe8706e042020-09-28 08:38:54 -06005003 list_del_init(&wait->entry);
5004
Jens Axboe9ce85ef2021-07-09 08:20:28 -06005005 if (poll->head) {
Jens Axboe18bceab2020-05-15 11:56:54 -06005006 bool done;
5007
Jens Axboe807abcb2020-07-17 17:09:27 -06005008 spin_lock(&poll->head->lock);
5009 done = list_empty(&poll->wait.entry);
Jens Axboe18bceab2020-05-15 11:56:54 -06005010 if (!done)
Jens Axboe807abcb2020-07-17 17:09:27 -06005011 list_del_init(&poll->wait.entry);
Jens Axboed4e7cd32020-08-15 11:44:50 -07005012 /* make sure double remove sees this as being gone */
5013 wait->private = NULL;
Jens Axboe807abcb2020-07-17 17:09:27 -06005014 spin_unlock(&poll->head->lock);
Jens Axboec8b5e262020-10-25 13:53:26 -06005015 if (!done) {
5016 /* use wait func handler, so it matches the rq type */
5017 poll->wait.func(&poll->wait, mode, sync, key);
5018 }
Jens Axboe18bceab2020-05-15 11:56:54 -06005019 }
Jens Axboede9b4cc2021-02-24 13:28:27 -07005020 req_ref_put(req);
Jens Axboe18bceab2020-05-15 11:56:54 -06005021 return 1;
5022}
5023
5024static void io_init_poll_iocb(struct io_poll_iocb *poll, __poll_t events,
5025 wait_queue_func_t wake_func)
5026{
5027 poll->head = NULL;
5028 poll->done = false;
5029 poll->canceled = false;
Jens Axboe464dca62021-03-19 14:06:24 -06005030#define IO_POLL_UNMASK (EPOLLERR|EPOLLHUP|EPOLLNVAL|EPOLLRDHUP)
5031 /* mask in events that we always want/need */
5032 poll->events = events | IO_POLL_UNMASK;
Jens Axboe18bceab2020-05-15 11:56:54 -06005033 INIT_LIST_HEAD(&poll->wait.entry);
5034 init_waitqueue_func_entry(&poll->wait, wake_func);
5035}
5036
5037static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt,
Jens Axboe807abcb2020-07-17 17:09:27 -06005038 struct wait_queue_head *head,
5039 struct io_poll_iocb **poll_ptr)
Jens Axboe18bceab2020-05-15 11:56:54 -06005040{
5041 struct io_kiocb *req = pt->req;
5042
5043 /*
Pavel Begunkov68b11e82021-07-20 10:50:43 +01005044 * The file being polled uses multiple waitqueues for poll handling
5045 * (e.g. one for read, one for write). Setup a separate io_poll_iocb
5046 * if this happens.
Jens Axboe18bceab2020-05-15 11:56:54 -06005047 */
Pavel Begunkov68b11e82021-07-20 10:50:43 +01005048 if (unlikely(pt->nr_entries)) {
Pavel Begunkov58852d42020-10-16 20:55:56 +01005049 struct io_poll_iocb *poll_one = poll;
5050
Jens Axboe18bceab2020-05-15 11:56:54 -06005051 /* already have a 2nd entry, fail a third attempt */
Jens Axboe807abcb2020-07-17 17:09:27 -06005052 if (*poll_ptr) {
Jens Axboe18bceab2020-05-15 11:56:54 -06005053 pt->error = -EINVAL;
5054 return;
5055 }
Jens Axboeea6a693d2021-04-15 09:47:13 -06005056 /*
5057 * Can't handle multishot for double wait for now, turn it
5058 * into one-shot mode.
5059 */
Pavel Begunkov7a274722021-05-17 12:43:34 +01005060 if (!(poll_one->events & EPOLLONESHOT))
5061 poll_one->events |= EPOLLONESHOT;
Jens Axboe1c3b3e62021-02-28 16:07:30 -07005062 /* double add on the same waitqueue head, ignore */
Pavel Begunkov7a274722021-05-17 12:43:34 +01005063 if (poll_one->head == head)
Jens Axboe1c3b3e62021-02-28 16:07:30 -07005064 return;
Jens Axboe18bceab2020-05-15 11:56:54 -06005065 poll = kmalloc(sizeof(*poll), GFP_ATOMIC);
5066 if (!poll) {
5067 pt->error = -ENOMEM;
5068 return;
5069 }
Pavel Begunkov58852d42020-10-16 20:55:56 +01005070 io_init_poll_iocb(poll, poll_one->events, io_poll_double_wake);
Jens Axboede9b4cc2021-02-24 13:28:27 -07005071 req_ref_get(req);
Jens Axboe18bceab2020-05-15 11:56:54 -06005072 poll->wait.private = req;
Jens Axboe807abcb2020-07-17 17:09:27 -06005073 *poll_ptr = poll;
Jens Axboe18bceab2020-05-15 11:56:54 -06005074 }
5075
Pavel Begunkov68b11e82021-07-20 10:50:43 +01005076 pt->nr_entries++;
Jens Axboe18bceab2020-05-15 11:56:54 -06005077 poll->head = head;
Jiufei Xuea31eb4a2020-06-17 17:53:56 +08005078
5079 if (poll->events & EPOLLEXCLUSIVE)
5080 add_wait_queue_exclusive(head, &poll->wait);
5081 else
5082 add_wait_queue(head, &poll->wait);
Jens Axboe18bceab2020-05-15 11:56:54 -06005083}
5084
5085static void io_async_queue_proc(struct file *file, struct wait_queue_head *head,
5086 struct poll_table_struct *p)
5087{
5088 struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
Jens Axboe807abcb2020-07-17 17:09:27 -06005089 struct async_poll *apoll = pt->req->apoll;
Jens Axboe18bceab2020-05-15 11:56:54 -06005090
Jens Axboe807abcb2020-07-17 17:09:27 -06005091 __io_queue_proc(&apoll->poll, pt, head, &apoll->double_poll);
Jens Axboe18bceab2020-05-15 11:56:54 -06005092}
5093
Pavel Begunkov5b0a6ac2021-06-30 21:54:04 +01005094static void io_async_task_func(struct io_kiocb *req)
Jens Axboed7718a92020-02-14 22:23:12 -07005095{
Jens Axboed7718a92020-02-14 22:23:12 -07005096 struct async_poll *apoll = req->apoll;
5097 struct io_ring_ctx *ctx = req->ctx;
5098
Olivier Langlois236daeae2021-05-31 02:36:37 -04005099 trace_io_uring_task_run(req->ctx, req, req->opcode, req->user_data);
Jens Axboed7718a92020-02-14 22:23:12 -07005100
Jens Axboe74ce6ce2020-04-13 11:09:12 -06005101 if (io_poll_rewait(req, &apoll->poll)) {
Jens Axboed7718a92020-02-14 22:23:12 -07005102 spin_unlock_irq(&ctx->completion_lock);
Jens Axboe74ce6ce2020-04-13 11:09:12 -06005103 return;
Jens Axboed7718a92020-02-14 22:23:12 -07005104 }
5105
Pavel Begunkov0ea13b42021-04-09 09:13:21 +01005106 hash_del(&req->hash_node);
Jens Axboed4e7cd32020-08-15 11:44:50 -07005107 io_poll_remove_double(req);
Jens Axboe74ce6ce2020-04-13 11:09:12 -06005108 spin_unlock_irq(&ctx->completion_lock);
5109
Pavel Begunkov0be0b0e2020-06-30 15:20:42 +03005110 if (!READ_ONCE(apoll->poll.canceled))
Pavel Begunkov5b0a6ac2021-06-30 21:54:04 +01005111 io_req_task_submit(req);
Pavel Begunkov0be0b0e2020-06-30 15:20:42 +03005112 else
Pavel Begunkov25935532021-03-19 17:22:40 +00005113 io_req_complete_failed(req, -ECANCELED);
Jens Axboed7718a92020-02-14 22:23:12 -07005114}
5115
5116static int io_async_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
5117 void *key)
5118{
5119 struct io_kiocb *req = wait->private;
5120 struct io_poll_iocb *poll = &req->apoll->poll;
5121
5122 trace_io_uring_poll_wake(req->ctx, req->opcode, req->user_data,
5123 key_to_poll(key));
5124
5125 return __io_async_wake(req, poll, key_to_poll(key), io_async_task_func);
5126}
5127
5128static void io_poll_req_insert(struct io_kiocb *req)
5129{
5130 struct io_ring_ctx *ctx = req->ctx;
5131 struct hlist_head *list;
5132
5133 list = &ctx->cancel_hash[hash_long(req->user_data, ctx->cancel_hash_bits)];
5134 hlist_add_head(&req->hash_node, list);
5135}
5136
5137static __poll_t __io_arm_poll_handler(struct io_kiocb *req,
5138 struct io_poll_iocb *poll,
5139 struct io_poll_table *ipt, __poll_t mask,
5140 wait_queue_func_t wake_func)
5141 __acquires(&ctx->completion_lock)
5142{
5143 struct io_ring_ctx *ctx = req->ctx;
5144 bool cancel = false;
5145
Pavel Begunkov4d52f332020-10-18 10:17:43 +01005146 INIT_HLIST_NODE(&req->hash_node);
Jens Axboe18bceab2020-05-15 11:56:54 -06005147 io_init_poll_iocb(poll, mask, wake_func);
Pavel Begunkovb90cd192020-06-21 13:09:52 +03005148 poll->file = req->file;
Jens Axboe18bceab2020-05-15 11:56:54 -06005149 poll->wait.private = req;
Jens Axboed7718a92020-02-14 22:23:12 -07005150
5151 ipt->pt._key = mask;
5152 ipt->req = req;
Pavel Begunkov68b11e82021-07-20 10:50:43 +01005153 ipt->error = 0;
5154 ipt->nr_entries = 0;
Jens Axboed7718a92020-02-14 22:23:12 -07005155
Jens Axboed7718a92020-02-14 22:23:12 -07005156 mask = vfs_poll(req->file, &ipt->pt) & poll->events;
Pavel Begunkov68b11e82021-07-20 10:50:43 +01005157 if (unlikely(!ipt->nr_entries) && !ipt->error)
5158 ipt->error = -EINVAL;
Jens Axboed7718a92020-02-14 22:23:12 -07005159
5160 spin_lock_irq(&ctx->completion_lock);
Hao Xua890d012021-07-28 11:03:22 +08005161 if (ipt->error || (mask && (poll->events & EPOLLONESHOT)))
Pavel Begunkov46fee9a2021-07-20 10:50:44 +01005162 io_poll_remove_double(req);
Jens Axboed7718a92020-02-14 22:23:12 -07005163 if (likely(poll->head)) {
5164 spin_lock(&poll->head->lock);
5165 if (unlikely(list_empty(&poll->wait.entry))) {
5166 if (ipt->error)
5167 cancel = true;
5168 ipt->error = 0;
5169 mask = 0;
5170 }
Jens Axboe88e41cf2021-02-22 22:08:01 -07005171 if ((mask && (poll->events & EPOLLONESHOT)) || ipt->error)
Jens Axboed7718a92020-02-14 22:23:12 -07005172 list_del_init(&poll->wait.entry);
5173 else if (cancel)
5174 WRITE_ONCE(poll->canceled, true);
5175 else if (!poll->done) /* actually waiting for an event */
5176 io_poll_req_insert(req);
5177 spin_unlock(&poll->head->lock);
5178 }
5179
5180 return mask;
5181}
5182
Olivier Langlois59b735a2021-06-22 05:17:39 -07005183enum {
5184 IO_APOLL_OK,
5185 IO_APOLL_ABORTED,
5186 IO_APOLL_READY
5187};
5188
5189static int io_arm_poll_handler(struct io_kiocb *req)
Jens Axboed7718a92020-02-14 22:23:12 -07005190{
5191 const struct io_op_def *def = &io_op_defs[req->opcode];
5192 struct io_ring_ctx *ctx = req->ctx;
5193 struct async_poll *apoll;
5194 struct io_poll_table ipt;
Pavel Begunkovb2d9c3d2021-06-26 21:40:44 +01005195 __poll_t ret, mask = EPOLLONESHOT | POLLERR | POLLPRI;
Jens Axboe9dab14b2020-08-25 12:27:50 -06005196 int rw;
Jens Axboed7718a92020-02-14 22:23:12 -07005197
5198 if (!req->file || !file_can_poll(req->file))
Olivier Langlois59b735a2021-06-22 05:17:39 -07005199 return IO_APOLL_ABORTED;
Pavel Begunkov24c74672020-06-21 13:09:51 +03005200 if (req->flags & REQ_F_POLLED)
Olivier Langlois59b735a2021-06-22 05:17:39 -07005201 return IO_APOLL_ABORTED;
Pavel Begunkovb2d9c3d2021-06-26 21:40:44 +01005202 if (!def->pollin && !def->pollout)
Olivier Langlois59b735a2021-06-22 05:17:39 -07005203 return IO_APOLL_ABORTED;
Pavel Begunkovb2d9c3d2021-06-26 21:40:44 +01005204
5205 if (def->pollin) {
5206 rw = READ;
5207 mask |= POLLIN | POLLRDNORM;
5208
5209 /* If reading from MSG_ERRQUEUE using recvmsg, ignore POLLIN */
5210 if ((req->opcode == IORING_OP_RECVMSG) &&
5211 (req->sr_msg.msg_flags & MSG_ERRQUEUE))
5212 mask &= ~POLLIN;
5213 } else {
5214 rw = WRITE;
5215 mask |= POLLOUT | POLLWRNORM;
5216 }
5217
Jens Axboe9dab14b2020-08-25 12:27:50 -06005218 /* if we can't nonblock try, then no point in arming a poll handler */
Pavel Begunkovb191e2d2021-08-09 13:04:03 +01005219 if (!io_file_supports_nowait(req, rw))
Olivier Langlois59b735a2021-06-22 05:17:39 -07005220 return IO_APOLL_ABORTED;
Jens Axboed7718a92020-02-14 22:23:12 -07005221
5222 apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC);
5223 if (unlikely(!apoll))
Olivier Langlois59b735a2021-06-22 05:17:39 -07005224 return IO_APOLL_ABORTED;
Jens Axboe807abcb2020-07-17 17:09:27 -06005225 apoll->double_poll = NULL;
Jens Axboed7718a92020-02-14 22:23:12 -07005226 req->apoll = apoll;
Pavel Begunkovb2d9c3d2021-06-26 21:40:44 +01005227 req->flags |= REQ_F_POLLED;
Jens Axboed7718a92020-02-14 22:23:12 -07005228 ipt.pt._qproc = io_async_queue_proc;
5229
5230 ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask,
5231 io_async_wake);
Jens Axboea36da652020-08-11 09:50:19 -06005232 if (ret || ipt.error) {
Jens Axboed7718a92020-02-14 22:23:12 -07005233 spin_unlock_irq(&ctx->completion_lock);
Olivier Langlois59b735a2021-06-22 05:17:39 -07005234 if (ret)
5235 return IO_APOLL_READY;
5236 return IO_APOLL_ABORTED;
Jens Axboed7718a92020-02-14 22:23:12 -07005237 }
5238 spin_unlock_irq(&ctx->completion_lock);
Olivier Langlois236daeae2021-05-31 02:36:37 -04005239 trace_io_uring_poll_arm(ctx, req, req->opcode, req->user_data,
5240 mask, apoll->poll.events);
Olivier Langlois59b735a2021-06-22 05:17:39 -07005241 return IO_APOLL_OK;
Jens Axboed7718a92020-02-14 22:23:12 -07005242}
5243
5244static bool __io_poll_remove_one(struct io_kiocb *req,
Jens Axboeb2e720a2021-03-31 09:03:03 -06005245 struct io_poll_iocb *poll, bool do_cancel)
Pavel Begunkove07785b2021-04-01 15:43:57 +01005246 __must_hold(&req->ctx->completion_lock)
Jens Axboed7718a92020-02-14 22:23:12 -07005247{
Jens Axboeb41e9852020-02-17 09:52:41 -07005248 bool do_complete = false;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005249
Jens Axboe50826202021-02-23 09:02:26 -07005250 if (!poll->head)
5251 return false;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005252 spin_lock(&poll->head->lock);
Jens Axboeb2e720a2021-03-31 09:03:03 -06005253 if (do_cancel)
5254 WRITE_ONCE(poll->canceled, true);
Jens Axboe392edb42019-12-09 17:52:20 -07005255 if (!list_empty(&poll->wait.entry)) {
5256 list_del_init(&poll->wait.entry);
Jens Axboeb41e9852020-02-17 09:52:41 -07005257 do_complete = true;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005258 }
5259 spin_unlock(&poll->head->lock);
Jens Axboe3bfa5bc2020-05-17 13:54:12 -06005260 hash_del(&req->hash_node);
Jens Axboed7718a92020-02-14 22:23:12 -07005261 return do_complete;
5262}
5263
Jens Axboeb2c3f7e2021-02-23 08:58:04 -07005264static bool io_poll_remove_waitqs(struct io_kiocb *req)
Pavel Begunkove07785b2021-04-01 15:43:57 +01005265 __must_hold(&req->ctx->completion_lock)
Jens Axboed7718a92020-02-14 22:23:12 -07005266{
5267 bool do_complete;
5268
Jens Axboed4e7cd32020-08-15 11:44:50 -07005269 io_poll_remove_double(req);
Pavel Begunkove31001a2021-04-13 02:58:43 +01005270 do_complete = __io_poll_remove_one(req, io_poll_get_single(req), true);
Jens Axboed4e7cd32020-08-15 11:44:50 -07005271
Pavel Begunkove31001a2021-04-13 02:58:43 +01005272 if (req->opcode != IORING_OP_POLL_ADD && do_complete) {
Jens Axboed7718a92020-02-14 22:23:12 -07005273 /* non-poll requests have submit ref still */
Pavel Begunkove31001a2021-04-13 02:58:43 +01005274 req_ref_put(req);
Xiaoguang Wangb1f573b2020-04-12 14:50:54 +08005275 }
Jens Axboeb2c3f7e2021-02-23 08:58:04 -07005276 return do_complete;
5277}
Xiaoguang Wangb1f573b2020-04-12 14:50:54 +08005278
Jens Axboeb2c3f7e2021-02-23 08:58:04 -07005279static bool io_poll_remove_one(struct io_kiocb *req)
Pavel Begunkove07785b2021-04-01 15:43:57 +01005280 __must_hold(&req->ctx->completion_lock)
Jens Axboeb2c3f7e2021-02-23 08:58:04 -07005281{
5282 bool do_complete;
5283
5284 do_complete = io_poll_remove_waitqs(req);
Jens Axboeb41e9852020-02-17 09:52:41 -07005285 if (do_complete) {
Pavel Begunkovd4d19c12021-04-25 14:32:17 +01005286 io_cqring_fill_event(req->ctx, req->user_data, -ECANCELED, 0);
Jens Axboeb41e9852020-02-17 09:52:41 -07005287 io_commit_cqring(req->ctx);
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01005288 req_set_fail(req);
Pavel Begunkov216578e2020-10-13 09:44:00 +01005289 io_put_req_deferred(req, 1);
Jens Axboeb41e9852020-02-17 09:52:41 -07005290 }
5291
5292 return do_complete;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005293}
5294
Jens Axboe76e1b642020-09-26 15:05:03 -06005295/*
5296 * Returns true if we found and killed one or more poll requests
5297 */
Pavel Begunkov6b819282020-11-06 13:00:25 +00005298static bool io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk,
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01005299 bool cancel_all)
Jens Axboe221c5eb2019-01-17 09:41:58 -07005300{
Jens Axboe78076bb2019-12-04 19:56:40 -07005301 struct hlist_node *tmp;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005302 struct io_kiocb *req;
Jens Axboe8e2e1fa2020-04-13 17:05:14 -06005303 int posted = 0, i;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005304
5305 spin_lock_irq(&ctx->completion_lock);
Jens Axboe78076bb2019-12-04 19:56:40 -07005306 for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
5307 struct hlist_head *list;
5308
5309 list = &ctx->cancel_hash[i];
Jens Axboef3606e32020-09-22 08:18:24 -06005310 hlist_for_each_entry_safe(req, tmp, list, hash_node) {
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01005311 if (io_match_task(req, tsk, cancel_all))
Jens Axboef3606e32020-09-22 08:18:24 -06005312 posted += io_poll_remove_one(req);
5313 }
Jens Axboe221c5eb2019-01-17 09:41:58 -07005314 }
5315 spin_unlock_irq(&ctx->completion_lock);
Jens Axboeb41e9852020-02-17 09:52:41 -07005316
Jens Axboe8e2e1fa2020-04-13 17:05:14 -06005317 if (posted)
5318 io_cqring_ev_posted(ctx);
Jens Axboe76e1b642020-09-26 15:05:03 -06005319
5320 return posted != 0;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005321}
5322
Pavel Begunkov9ba5fac2021-04-14 13:38:35 +01005323static struct io_kiocb *io_poll_find(struct io_ring_ctx *ctx, __u64 sqe_addr,
5324 bool poll_only)
Pavel Begunkove07785b2021-04-01 15:43:57 +01005325 __must_hold(&ctx->completion_lock)
Jens Axboe47f46762019-11-09 17:43:02 -07005326{
Jens Axboe78076bb2019-12-04 19:56:40 -07005327 struct hlist_head *list;
Jens Axboe47f46762019-11-09 17:43:02 -07005328 struct io_kiocb *req;
5329
Jens Axboe78076bb2019-12-04 19:56:40 -07005330 list = &ctx->cancel_hash[hash_long(sqe_addr, ctx->cancel_hash_bits)];
5331 hlist_for_each_entry(req, list, hash_node) {
Jens Axboeb41e9852020-02-17 09:52:41 -07005332 if (sqe_addr != req->user_data)
5333 continue;
Pavel Begunkov9ba5fac2021-04-14 13:38:35 +01005334 if (poll_only && req->opcode != IORING_OP_POLL_ADD)
5335 continue;
Jens Axboeb2cb8052021-03-17 08:17:19 -06005336 return req;
Jens Axboe47f46762019-11-09 17:43:02 -07005337 }
Jens Axboeb2cb8052021-03-17 08:17:19 -06005338 return NULL;
Jens Axboe47f46762019-11-09 17:43:02 -07005339}
5340
Pavel Begunkov9ba5fac2021-04-14 13:38:35 +01005341static int io_poll_cancel(struct io_ring_ctx *ctx, __u64 sqe_addr,
5342 bool poll_only)
Pavel Begunkove07785b2021-04-01 15:43:57 +01005343 __must_hold(&ctx->completion_lock)
Jens Axboeb2cb8052021-03-17 08:17:19 -06005344{
5345 struct io_kiocb *req;
5346
Pavel Begunkov9ba5fac2021-04-14 13:38:35 +01005347 req = io_poll_find(ctx, sqe_addr, poll_only);
Jens Axboeb2cb8052021-03-17 08:17:19 -06005348 if (!req)
5349 return -ENOENT;
5350 if (io_poll_remove_one(req))
5351 return 0;
5352
5353 return -EALREADY;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005354}
5355
Pavel Begunkov9096af32021-04-14 13:38:36 +01005356static __poll_t io_poll_parse_events(const struct io_uring_sqe *sqe,
5357 unsigned int flags)
5358{
5359 u32 events;
5360
5361 events = READ_ONCE(sqe->poll32_events);
5362#ifdef __BIG_ENDIAN
5363 events = swahw32(events);
5364#endif
5365 if (!(flags & IORING_POLL_ADD_MULTI))
5366 events |= EPOLLONESHOT;
5367 return demangle_poll(events) | (events & (EPOLLEXCLUSIVE|EPOLLONESHOT));
5368}
5369
Pavel Begunkovc5de0032021-04-14 13:38:37 +01005370static int io_poll_update_prep(struct io_kiocb *req,
Jens Axboe3529d8c2019-12-19 18:24:38 -07005371 const struct io_uring_sqe *sqe)
Jens Axboe221c5eb2019-01-17 09:41:58 -07005372{
Pavel Begunkovc5de0032021-04-14 13:38:37 +01005373 struct io_poll_update *upd = &req->poll_update;
5374 u32 flags;
5375
Jens Axboe221c5eb2019-01-17 09:41:58 -07005376 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
5377 return -EINVAL;
Pavel Begunkovc5de0032021-04-14 13:38:37 +01005378 if (sqe->ioprio || sqe->buf_index)
5379 return -EINVAL;
5380 flags = READ_ONCE(sqe->len);
5381 if (flags & ~(IORING_POLL_UPDATE_EVENTS | IORING_POLL_UPDATE_USER_DATA |
5382 IORING_POLL_ADD_MULTI))
5383 return -EINVAL;
5384 /* meaningless without update */
5385 if (flags == IORING_POLL_ADD_MULTI)
Jens Axboe221c5eb2019-01-17 09:41:58 -07005386 return -EINVAL;
5387
Pavel Begunkovc5de0032021-04-14 13:38:37 +01005388 upd->old_user_data = READ_ONCE(sqe->addr);
5389 upd->update_events = flags & IORING_POLL_UPDATE_EVENTS;
5390 upd->update_user_data = flags & IORING_POLL_UPDATE_USER_DATA;
Jens Axboe0969e782019-12-17 18:40:57 -07005391
Pavel Begunkovc5de0032021-04-14 13:38:37 +01005392 upd->new_user_data = READ_ONCE(sqe->off);
5393 if (!upd->update_user_data && upd->new_user_data)
5394 return -EINVAL;
5395 if (upd->update_events)
5396 upd->events = io_poll_parse_events(sqe, flags);
5397 else if (sqe->poll32_events)
5398 return -EINVAL;
Jens Axboe0969e782019-12-17 18:40:57 -07005399
Jens Axboe221c5eb2019-01-17 09:41:58 -07005400 return 0;
5401}
5402
Jens Axboe221c5eb2019-01-17 09:41:58 -07005403static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
5404 void *key)
5405{
Jens Axboec2f2eb72020-02-10 09:07:05 -07005406 struct io_kiocb *req = wait->private;
5407 struct io_poll_iocb *poll = &req->poll;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005408
Jens Axboed7718a92020-02-14 22:23:12 -07005409 return __io_async_wake(req, poll, key_to_poll(key), io_poll_task_func);
Jens Axboe221c5eb2019-01-17 09:41:58 -07005410}
5411
Jens Axboe221c5eb2019-01-17 09:41:58 -07005412static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head,
5413 struct poll_table_struct *p)
5414{
5415 struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
5416
Jens Axboee8c2bc12020-08-15 18:44:09 -07005417 __io_queue_proc(&pt->req->poll, pt, head, (struct io_poll_iocb **) &pt->req->async_data);
Jens Axboeeac406c2019-11-14 12:09:58 -07005418}
5419
Jens Axboe3529d8c2019-12-19 18:24:38 -07005420static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboe221c5eb2019-01-17 09:41:58 -07005421{
5422 struct io_poll_iocb *poll = &req->poll;
Pavel Begunkovc5de0032021-04-14 13:38:37 +01005423 u32 flags;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005424
5425 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
5426 return -EINVAL;
Pavel Begunkovc5de0032021-04-14 13:38:37 +01005427 if (sqe->ioprio || sqe->buf_index || sqe->off || sqe->addr)
Jens Axboe88e41cf2021-02-22 22:08:01 -07005428 return -EINVAL;
5429 flags = READ_ONCE(sqe->len);
Pavel Begunkovc5de0032021-04-14 13:38:37 +01005430 if (flags & ~IORING_POLL_ADD_MULTI)
Jens Axboe221c5eb2019-01-17 09:41:58 -07005431 return -EINVAL;
5432
Pavel Begunkovc5de0032021-04-14 13:38:37 +01005433 poll->events = io_poll_parse_events(sqe, flags);
Jens Axboe0969e782019-12-17 18:40:57 -07005434 return 0;
5435}
5436
Pavel Begunkov61e98202021-02-10 00:03:08 +00005437static int io_poll_add(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe0969e782019-12-17 18:40:57 -07005438{
5439 struct io_poll_iocb *poll = &req->poll;
5440 struct io_ring_ctx *ctx = req->ctx;
5441 struct io_poll_table ipt;
Jens Axboe0969e782019-12-17 18:40:57 -07005442 __poll_t mask;
Jens Axboe0969e782019-12-17 18:40:57 -07005443
Jens Axboed7718a92020-02-14 22:23:12 -07005444 ipt.pt._qproc = io_poll_queue_proc;
Jens Axboe36703242019-07-25 10:20:18 -06005445
Jens Axboed7718a92020-02-14 22:23:12 -07005446 mask = __io_arm_poll_handler(req, &req->poll, &ipt, poll->events,
5447 io_poll_wake);
Jens Axboe221c5eb2019-01-17 09:41:58 -07005448
Jens Axboe8c838782019-03-12 15:48:16 -06005449 if (mask) { /* no async, we'd stolen it */
Jens Axboe8c838782019-03-12 15:48:16 -06005450 ipt.error = 0;
Pavel Begunkove27414b2021-04-09 09:13:20 +01005451 io_poll_complete(req, mask);
Jens Axboe8c838782019-03-12 15:48:16 -06005452 }
Jens Axboe221c5eb2019-01-17 09:41:58 -07005453 spin_unlock_irq(&ctx->completion_lock);
5454
Jens Axboe8c838782019-03-12 15:48:16 -06005455 if (mask) {
5456 io_cqring_ev_posted(ctx);
Jens Axboe88e41cf2021-02-22 22:08:01 -07005457 if (poll->events & EPOLLONESHOT)
5458 io_put_req(req);
Jens Axboe221c5eb2019-01-17 09:41:58 -07005459 }
Jens Axboe8c838782019-03-12 15:48:16 -06005460 return ipt.error;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005461}
5462
Pavel Begunkovc5de0032021-04-14 13:38:37 +01005463static int io_poll_update(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboeb69de282021-03-17 08:37:41 -06005464{
5465 struct io_ring_ctx *ctx = req->ctx;
5466 struct io_kiocb *preq;
Jens Axboecb3b200e2021-04-06 09:49:31 -06005467 bool completing;
Jens Axboeb69de282021-03-17 08:37:41 -06005468 int ret;
5469
5470 spin_lock_irq(&ctx->completion_lock);
Pavel Begunkov9ba5fac2021-04-14 13:38:35 +01005471 preq = io_poll_find(ctx, req->poll_update.old_user_data, true);
Jens Axboeb69de282021-03-17 08:37:41 -06005472 if (!preq) {
5473 ret = -ENOENT;
5474 goto err;
Jens Axboeb69de282021-03-17 08:37:41 -06005475 }
Jens Axboecb3b200e2021-04-06 09:49:31 -06005476
Pavel Begunkovc5de0032021-04-14 13:38:37 +01005477 if (!req->poll_update.update_events && !req->poll_update.update_user_data) {
5478 completing = true;
5479 ret = io_poll_remove_one(preq) ? 0 : -EALREADY;
5480 goto err;
5481 }
5482
Jens Axboecb3b200e2021-04-06 09:49:31 -06005483 /*
5484 * Don't allow racy completion with singleshot, as we cannot safely
5485 * update those. For multishot, if we're racing with completion, just
5486 * let completion re-add it.
5487 */
5488 completing = !__io_poll_remove_one(preq, &preq->poll, false);
5489 if (completing && (preq->poll.events & EPOLLONESHOT)) {
5490 ret = -EALREADY;
5491 goto err;
Jens Axboeb69de282021-03-17 08:37:41 -06005492 }
5493 /* we now have a detached poll request. reissue. */
5494 ret = 0;
5495err:
Jens Axboeb69de282021-03-17 08:37:41 -06005496 if (ret < 0) {
Jens Axboecb3b200e2021-04-06 09:49:31 -06005497 spin_unlock_irq(&ctx->completion_lock);
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01005498 req_set_fail(req);
Jens Axboeb69de282021-03-17 08:37:41 -06005499 io_req_complete(req, ret);
5500 return 0;
5501 }
5502 /* only mask one event flags, keep behavior flags */
Pavel Begunkov9d805892021-04-13 02:58:40 +01005503 if (req->poll_update.update_events) {
Jens Axboeb69de282021-03-17 08:37:41 -06005504 preq->poll.events &= ~0xffff;
Pavel Begunkov9d805892021-04-13 02:58:40 +01005505 preq->poll.events |= req->poll_update.events & 0xffff;
Jens Axboeb69de282021-03-17 08:37:41 -06005506 preq->poll.events |= IO_POLL_UNMASK;
5507 }
Pavel Begunkov9d805892021-04-13 02:58:40 +01005508 if (req->poll_update.update_user_data)
5509 preq->user_data = req->poll_update.new_user_data;
Jens Axboecb3b200e2021-04-06 09:49:31 -06005510 spin_unlock_irq(&ctx->completion_lock);
5511
Jens Axboeb69de282021-03-17 08:37:41 -06005512 /* complete update request, we're done with it */
5513 io_req_complete(req, ret);
5514
Jens Axboecb3b200e2021-04-06 09:49:31 -06005515 if (!completing) {
Pavel Begunkovc5de0032021-04-14 13:38:37 +01005516 ret = io_poll_add(preq, issue_flags);
Jens Axboecb3b200e2021-04-06 09:49:31 -06005517 if (ret < 0) {
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01005518 req_set_fail(preq);
Jens Axboecb3b200e2021-04-06 09:49:31 -06005519 io_req_complete(preq, ret);
5520 }
Jens Axboeb69de282021-03-17 08:37:41 -06005521 }
5522 return 0;
5523}
5524
Jens Axboe5262f562019-09-17 12:26:57 -06005525static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
5526{
Jens Axboead8a48a2019-11-15 08:49:11 -07005527 struct io_timeout_data *data = container_of(timer,
5528 struct io_timeout_data, timer);
5529 struct io_kiocb *req = data->req;
5530 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe5262f562019-09-17 12:26:57 -06005531 unsigned long flags;
5532
Jens Axboe5262f562019-09-17 12:26:57 -06005533 spin_lock_irqsave(&ctx->completion_lock, flags);
Pavel Begunkova71976f2020-10-10 18:34:11 +01005534 list_del_init(&req->timeout.list);
Pavel Begunkov01cec8c2020-07-30 18:43:50 +03005535 atomic_set(&req->ctx->cq_timeouts,
5536 atomic_read(&req->ctx->cq_timeouts) + 1);
5537
Pavel Begunkovd4d19c12021-04-25 14:32:17 +01005538 io_cqring_fill_event(ctx, req->user_data, -ETIME, 0);
Jens Axboe5262f562019-09-17 12:26:57 -06005539 io_commit_cqring(ctx);
5540 spin_unlock_irqrestore(&ctx->completion_lock, flags);
5541
5542 io_cqring_ev_posted(ctx);
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01005543 req_set_fail(req);
Jens Axboe5262f562019-09-17 12:26:57 -06005544 io_put_req(req);
5545 return HRTIMER_NORESTART;
5546}
5547
Pavel Begunkovfbd15842020-11-30 19:11:15 +00005548static struct io_kiocb *io_timeout_extract(struct io_ring_ctx *ctx,
5549 __u64 user_data)
Pavel Begunkove07785b2021-04-01 15:43:57 +01005550 __must_hold(&ctx->completion_lock)
Jens Axboe47f46762019-11-09 17:43:02 -07005551{
Pavel Begunkovfbd15842020-11-30 19:11:15 +00005552 struct io_timeout_data *io;
Jens Axboef254ac02020-08-12 17:33:30 -06005553 struct io_kiocb *req;
Pavel Begunkovfd9c7bc2021-04-13 02:58:42 +01005554 bool found = false;
Jens Axboef254ac02020-08-12 17:33:30 -06005555
5556 list_for_each_entry(req, &ctx->timeout_list, timeout.list) {
Pavel Begunkovfd9c7bc2021-04-13 02:58:42 +01005557 found = user_data == req->user_data;
5558 if (found)
Jens Axboef254ac02020-08-12 17:33:30 -06005559 break;
Jens Axboef254ac02020-08-12 17:33:30 -06005560 }
Pavel Begunkovfd9c7bc2021-04-13 02:58:42 +01005561 if (!found)
5562 return ERR_PTR(-ENOENT);
Jens Axboef254ac02020-08-12 17:33:30 -06005563
Pavel Begunkovfbd15842020-11-30 19:11:15 +00005564 io = req->async_data;
Pavel Begunkovfd9c7bc2021-04-13 02:58:42 +01005565 if (hrtimer_try_to_cancel(&io->timer) == -1)
Pavel Begunkovfbd15842020-11-30 19:11:15 +00005566 return ERR_PTR(-EALREADY);
5567 list_del_init(&req->timeout.list);
5568 return req;
5569}
5570
5571static int io_timeout_cancel(struct io_ring_ctx *ctx, __u64 user_data)
Pavel Begunkove07785b2021-04-01 15:43:57 +01005572 __must_hold(&ctx->completion_lock)
Pavel Begunkovfbd15842020-11-30 19:11:15 +00005573{
5574 struct io_kiocb *req = io_timeout_extract(ctx, user_data);
5575
5576 if (IS_ERR(req))
5577 return PTR_ERR(req);
5578
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01005579 req_set_fail(req);
Pavel Begunkovd4d19c12021-04-25 14:32:17 +01005580 io_cqring_fill_event(ctx, req->user_data, -ECANCELED, 0);
Pavel Begunkovfbd15842020-11-30 19:11:15 +00005581 io_put_req_deferred(req, 1);
5582 return 0;
Jens Axboef254ac02020-08-12 17:33:30 -06005583}
5584
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +00005585static int io_timeout_update(struct io_ring_ctx *ctx, __u64 user_data,
5586 struct timespec64 *ts, enum hrtimer_mode mode)
Pavel Begunkove07785b2021-04-01 15:43:57 +01005587 __must_hold(&ctx->completion_lock)
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +00005588{
5589 struct io_kiocb *req = io_timeout_extract(ctx, user_data);
5590 struct io_timeout_data *data;
5591
5592 if (IS_ERR(req))
5593 return PTR_ERR(req);
5594
5595 req->timeout.off = 0; /* noseq */
5596 data = req->async_data;
5597 list_add_tail(&req->timeout.list, &ctx->timeout_list);
5598 hrtimer_init(&data->timer, CLOCK_MONOTONIC, mode);
5599 data->timer.function = io_timeout_fn;
5600 hrtimer_start(&data->timer, timespec64_to_ktime(*ts), mode);
5601 return 0;
Jens Axboe47f46762019-11-09 17:43:02 -07005602}
5603
Jens Axboe3529d8c2019-12-19 18:24:38 -07005604static int io_timeout_remove_prep(struct io_kiocb *req,
5605 const struct io_uring_sqe *sqe)
Jens Axboeb29472e2019-12-17 18:50:29 -07005606{
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +00005607 struct io_timeout_rem *tr = &req->timeout_rem;
5608
Jens Axboeb29472e2019-12-17 18:50:29 -07005609 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
5610 return -EINVAL;
Daniele Albano61710e42020-07-18 14:15:16 -06005611 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
5612 return -EINVAL;
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +00005613 if (sqe->ioprio || sqe->buf_index || sqe->len)
Jens Axboeb29472e2019-12-17 18:50:29 -07005614 return -EINVAL;
5615
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +00005616 tr->addr = READ_ONCE(sqe->addr);
5617 tr->flags = READ_ONCE(sqe->timeout_flags);
5618 if (tr->flags & IORING_TIMEOUT_UPDATE) {
5619 if (tr->flags & ~(IORING_TIMEOUT_UPDATE|IORING_TIMEOUT_ABS))
5620 return -EINVAL;
5621 if (get_timespec64(&tr->ts, u64_to_user_ptr(sqe->addr2)))
5622 return -EFAULT;
5623 } else if (tr->flags) {
5624 /* timeout removal doesn't support flags */
5625 return -EINVAL;
5626 }
5627
Jens Axboeb29472e2019-12-17 18:50:29 -07005628 return 0;
5629}
5630
Pavel Begunkov8662dae2021-01-19 13:32:44 +00005631static inline enum hrtimer_mode io_translate_timeout_mode(unsigned int flags)
5632{
5633 return (flags & IORING_TIMEOUT_ABS) ? HRTIMER_MODE_ABS
5634 : HRTIMER_MODE_REL;
5635}
5636
Jens Axboe11365042019-10-16 09:08:32 -06005637/*
5638 * Remove or update an existing timeout command
5639 */
Pavel Begunkov61e98202021-02-10 00:03:08 +00005640static int io_timeout_remove(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe11365042019-10-16 09:08:32 -06005641{
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +00005642 struct io_timeout_rem *tr = &req->timeout_rem;
Jens Axboe11365042019-10-16 09:08:32 -06005643 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe47f46762019-11-09 17:43:02 -07005644 int ret;
Jens Axboe11365042019-10-16 09:08:32 -06005645
Jens Axboe11365042019-10-16 09:08:32 -06005646 spin_lock_irq(&ctx->completion_lock);
Pavel Begunkov8662dae2021-01-19 13:32:44 +00005647 if (!(req->timeout_rem.flags & IORING_TIMEOUT_UPDATE))
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +00005648 ret = io_timeout_cancel(ctx, tr->addr);
Pavel Begunkov8662dae2021-01-19 13:32:44 +00005649 else
5650 ret = io_timeout_update(ctx, tr->addr, &tr->ts,
5651 io_translate_timeout_mode(tr->flags));
Jens Axboe11365042019-10-16 09:08:32 -06005652
Pavel Begunkovd4d19c12021-04-25 14:32:17 +01005653 io_cqring_fill_event(ctx, req->user_data, ret, 0);
Jens Axboe11365042019-10-16 09:08:32 -06005654 io_commit_cqring(ctx);
5655 spin_unlock_irq(&ctx->completion_lock);
Jens Axboe5262f562019-09-17 12:26:57 -06005656 io_cqring_ev_posted(ctx);
Jens Axboe4e88d6e2019-12-07 20:59:47 -07005657 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01005658 req_set_fail(req);
Jackie Liuec9c02a2019-11-08 23:50:36 +08005659 io_put_req(req);
Jens Axboe11365042019-10-16 09:08:32 -06005660 return 0;
Jens Axboe5262f562019-09-17 12:26:57 -06005661}
5662
Jens Axboe3529d8c2019-12-19 18:24:38 -07005663static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
Jens Axboe2d283902019-12-04 11:08:05 -07005664 bool is_timeout_link)
Jens Axboe5262f562019-09-17 12:26:57 -06005665{
Jens Axboead8a48a2019-11-15 08:49:11 -07005666 struct io_timeout_data *data;
Jens Axboea41525a2019-10-15 16:48:15 -06005667 unsigned flags;
Pavel Begunkov56080b02020-05-26 20:34:04 +03005668 u32 off = READ_ONCE(sqe->off);
Jens Axboe5262f562019-09-17 12:26:57 -06005669
Jens Axboead8a48a2019-11-15 08:49:11 -07005670 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboe5262f562019-09-17 12:26:57 -06005671 return -EINVAL;
Jens Axboead8a48a2019-11-15 08:49:11 -07005672 if (sqe->ioprio || sqe->buf_index || sqe->len != 1)
Jens Axboea41525a2019-10-15 16:48:15 -06005673 return -EINVAL;
Pavel Begunkov56080b02020-05-26 20:34:04 +03005674 if (off && is_timeout_link)
Jens Axboe2d283902019-12-04 11:08:05 -07005675 return -EINVAL;
Jens Axboea41525a2019-10-15 16:48:15 -06005676 flags = READ_ONCE(sqe->timeout_flags);
5677 if (flags & ~IORING_TIMEOUT_ABS)
Jens Axboe5262f562019-09-17 12:26:57 -06005678 return -EINVAL;
Arnd Bergmannbdf20072019-10-01 09:53:29 -06005679
Pavel Begunkovbfe68a22020-05-30 14:54:18 +03005680 req->timeout.off = off;
Pavel Begunkovf18ee4c2021-06-14 23:37:25 +01005681 if (unlikely(off && !req->ctx->off_timeout_used))
5682 req->ctx->off_timeout_used = true;
Jens Axboe26a61672019-12-20 09:02:01 -07005683
Jens Axboee8c2bc12020-08-15 18:44:09 -07005684 if (!req->async_data && io_alloc_async_data(req))
Jens Axboe26a61672019-12-20 09:02:01 -07005685 return -ENOMEM;
5686
Jens Axboee8c2bc12020-08-15 18:44:09 -07005687 data = req->async_data;
Jens Axboead8a48a2019-11-15 08:49:11 -07005688 data->req = req;
Jens Axboead8a48a2019-11-15 08:49:11 -07005689
5690 if (get_timespec64(&data->ts, u64_to_user_ptr(sqe->addr)))
Jens Axboe5262f562019-09-17 12:26:57 -06005691 return -EFAULT;
5692
Pavel Begunkov8662dae2021-01-19 13:32:44 +00005693 data->mode = io_translate_timeout_mode(flags);
Jens Axboead8a48a2019-11-15 08:49:11 -07005694 hrtimer_init(&data->timer, CLOCK_MONOTONIC, data->mode);
Pavel Begunkov2482b582021-03-25 18:32:44 +00005695 if (is_timeout_link)
5696 io_req_track_inflight(req);
Jens Axboead8a48a2019-11-15 08:49:11 -07005697 return 0;
5698}
5699
Pavel Begunkov61e98202021-02-10 00:03:08 +00005700static int io_timeout(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboead8a48a2019-11-15 08:49:11 -07005701{
Jens Axboead8a48a2019-11-15 08:49:11 -07005702 struct io_ring_ctx *ctx = req->ctx;
Jens Axboee8c2bc12020-08-15 18:44:09 -07005703 struct io_timeout_data *data = req->async_data;
Jens Axboead8a48a2019-11-15 08:49:11 -07005704 struct list_head *entry;
Pavel Begunkovbfe68a22020-05-30 14:54:18 +03005705 u32 tail, off = req->timeout.off;
Jens Axboead8a48a2019-11-15 08:49:11 -07005706
Pavel Begunkov733f5c92020-05-26 20:34:03 +03005707 spin_lock_irq(&ctx->completion_lock);
Jens Axboe93bd25b2019-11-11 23:34:31 -07005708
Jens Axboe5262f562019-09-17 12:26:57 -06005709 /*
5710 * sqe->off holds how many events that need to occur for this
Jens Axboe93bd25b2019-11-11 23:34:31 -07005711 * timeout event to be satisfied. If it isn't set, then this is
5712 * a pure timeout request, sequence isn't used.
Jens Axboe5262f562019-09-17 12:26:57 -06005713 */
Pavel Begunkov8eb7e2d2020-06-29 13:13:02 +03005714 if (io_is_timeout_noseq(req)) {
Jens Axboe93bd25b2019-11-11 23:34:31 -07005715 entry = ctx->timeout_list.prev;
5716 goto add;
5717 }
Jens Axboe5262f562019-09-17 12:26:57 -06005718
Pavel Begunkovbfe68a22020-05-30 14:54:18 +03005719 tail = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
5720 req->timeout.target_seq = tail + off;
Jens Axboe5262f562019-09-17 12:26:57 -06005721
Marcelo Diop-Gonzalezf0105052021-01-15 11:54:40 -05005722 /* Update the last seq here in case io_flush_timeouts() hasn't.
5723 * This is safe because ->completion_lock is held, and submissions
5724 * and completions are never mixed in the same ->completion_lock section.
5725 */
5726 ctx->cq_last_tm_flush = tail;
5727
Jens Axboe5262f562019-09-17 12:26:57 -06005728 /*
5729 * Insertion sort, ensuring the first entry in the list is always
5730 * the one we need first.
5731 */
Jens Axboe5262f562019-09-17 12:26:57 -06005732 list_for_each_prev(entry, &ctx->timeout_list) {
Pavel Begunkov135fcde2020-07-13 23:37:12 +03005733 struct io_kiocb *nxt = list_entry(entry, struct io_kiocb,
5734 timeout.list);
Jens Axboe5262f562019-09-17 12:26:57 -06005735
Pavel Begunkov8eb7e2d2020-06-29 13:13:02 +03005736 if (io_is_timeout_noseq(nxt))
Jens Axboe93bd25b2019-11-11 23:34:31 -07005737 continue;
Pavel Begunkovbfe68a22020-05-30 14:54:18 +03005738 /* nxt.seq is behind @tail, otherwise would've been completed */
5739 if (off >= nxt->timeout.target_seq - tail)
Jens Axboe5262f562019-09-17 12:26:57 -06005740 break;
5741 }
Jens Axboe93bd25b2019-11-11 23:34:31 -07005742add:
Pavel Begunkov135fcde2020-07-13 23:37:12 +03005743 list_add(&req->timeout.list, entry);
Jens Axboead8a48a2019-11-15 08:49:11 -07005744 data->timer.function = io_timeout_fn;
5745 hrtimer_start(&data->timer, timespec64_to_ktime(data->ts), data->mode);
Jens Axboe842f9612019-10-29 12:34:10 -06005746 spin_unlock_irq(&ctx->completion_lock);
Jens Axboe5262f562019-09-17 12:26:57 -06005747 return 0;
5748}
5749
Pavel Begunkovf458dd842021-03-08 12:14:14 +00005750struct io_cancel_data {
5751 struct io_ring_ctx *ctx;
5752 u64 user_data;
5753};
5754
Jens Axboe62755e32019-10-28 21:49:21 -06005755static bool io_cancel_cb(struct io_wq_work *work, void *data)
Jens Axboede0617e2019-04-06 21:51:27 -06005756{
Jens Axboe62755e32019-10-28 21:49:21 -06005757 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
Pavel Begunkovf458dd842021-03-08 12:14:14 +00005758 struct io_cancel_data *cd = data;
Jens Axboede0617e2019-04-06 21:51:27 -06005759
Pavel Begunkovf458dd842021-03-08 12:14:14 +00005760 return req->ctx == cd->ctx && req->user_data == cd->user_data;
Jens Axboe62755e32019-10-28 21:49:21 -06005761}
5762
Pavel Begunkovf458dd842021-03-08 12:14:14 +00005763static int io_async_cancel_one(struct io_uring_task *tctx, u64 user_data,
5764 struct io_ring_ctx *ctx)
Jens Axboe62755e32019-10-28 21:49:21 -06005765{
Pavel Begunkovf458dd842021-03-08 12:14:14 +00005766 struct io_cancel_data data = { .ctx = ctx, .user_data = user_data, };
Jens Axboe62755e32019-10-28 21:49:21 -06005767 enum io_wq_cancel cancel_ret;
Jens Axboe62755e32019-10-28 21:49:21 -06005768 int ret = 0;
5769
Pavel Begunkovf458dd842021-03-08 12:14:14 +00005770 if (!tctx || !tctx->io_wq)
Jens Axboe5aa75ed2021-02-16 12:56:50 -07005771 return -ENOENT;
5772
Pavel Begunkovf458dd842021-03-08 12:14:14 +00005773 cancel_ret = io_wq_cancel_cb(tctx->io_wq, io_cancel_cb, &data, false);
Jens Axboe62755e32019-10-28 21:49:21 -06005774 switch (cancel_ret) {
5775 case IO_WQ_CANCEL_OK:
5776 ret = 0;
5777 break;
5778 case IO_WQ_CANCEL_RUNNING:
5779 ret = -EALREADY;
5780 break;
5781 case IO_WQ_CANCEL_NOTFOUND:
5782 ret = -ENOENT;
5783 break;
5784 }
5785
Jens Axboee977d6d2019-11-05 12:39:45 -07005786 return ret;
5787}
5788
Jens Axboe47f46762019-11-09 17:43:02 -07005789static void io_async_find_and_cancel(struct io_ring_ctx *ctx,
5790 struct io_kiocb *req, __u64 sqe_addr,
Pavel Begunkov014db002020-03-03 21:33:12 +03005791 int success_ret)
Jens Axboe47f46762019-11-09 17:43:02 -07005792{
5793 unsigned long flags;
5794 int ret;
5795
Pavel Begunkovf458dd842021-03-08 12:14:14 +00005796 ret = io_async_cancel_one(req->task->io_uring, sqe_addr, ctx);
Jens Axboe47f46762019-11-09 17:43:02 -07005797 spin_lock_irqsave(&ctx->completion_lock, flags);
Pavel Begunkovdf9727a2021-04-01 15:43:59 +01005798 if (ret != -ENOENT)
5799 goto done;
Jens Axboe47f46762019-11-09 17:43:02 -07005800 ret = io_timeout_cancel(ctx, sqe_addr);
5801 if (ret != -ENOENT)
5802 goto done;
Pavel Begunkov9ba5fac2021-04-14 13:38:35 +01005803 ret = io_poll_cancel(ctx, sqe_addr, false);
Jens Axboe47f46762019-11-09 17:43:02 -07005804done:
Jens Axboeb0dd8a42019-11-18 12:14:54 -07005805 if (!ret)
5806 ret = success_ret;
Pavel Begunkovd4d19c12021-04-25 14:32:17 +01005807 io_cqring_fill_event(ctx, req->user_data, ret, 0);
Jens Axboe47f46762019-11-09 17:43:02 -07005808 io_commit_cqring(ctx);
5809 spin_unlock_irqrestore(&ctx->completion_lock, flags);
5810 io_cqring_ev_posted(ctx);
5811
Jens Axboe4e88d6e2019-12-07 20:59:47 -07005812 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01005813 req_set_fail(req);
Jens Axboe47f46762019-11-09 17:43:02 -07005814}
5815
Jens Axboe3529d8c2019-12-19 18:24:38 -07005816static int io_async_cancel_prep(struct io_kiocb *req,
5817 const struct io_uring_sqe *sqe)
Jens Axboee977d6d2019-11-05 12:39:45 -07005818{
Jens Axboefbf23842019-12-17 18:45:56 -07005819 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboee977d6d2019-11-05 12:39:45 -07005820 return -EINVAL;
Daniele Albano61710e42020-07-18 14:15:16 -06005821 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
5822 return -EINVAL;
5823 if (sqe->ioprio || sqe->off || sqe->len || sqe->cancel_flags)
Jens Axboee977d6d2019-11-05 12:39:45 -07005824 return -EINVAL;
5825
Jens Axboefbf23842019-12-17 18:45:56 -07005826 req->cancel.addr = READ_ONCE(sqe->addr);
5827 return 0;
5828}
5829
Pavel Begunkov61e98202021-02-10 00:03:08 +00005830static int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboefbf23842019-12-17 18:45:56 -07005831{
5832 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkov58f99372021-03-12 16:25:55 +00005833 u64 sqe_addr = req->cancel.addr;
5834 struct io_tctx_node *node;
5835 int ret;
Jens Axboefbf23842019-12-17 18:45:56 -07005836
Pavel Begunkov58f99372021-03-12 16:25:55 +00005837 /* tasks should wait for their io-wq threads, so safe w/o sync */
5838 ret = io_async_cancel_one(req->task->io_uring, sqe_addr, ctx);
5839 spin_lock_irq(&ctx->completion_lock);
5840 if (ret != -ENOENT)
5841 goto done;
5842 ret = io_timeout_cancel(ctx, sqe_addr);
5843 if (ret != -ENOENT)
5844 goto done;
Pavel Begunkov9ba5fac2021-04-14 13:38:35 +01005845 ret = io_poll_cancel(ctx, sqe_addr, false);
Pavel Begunkov58f99372021-03-12 16:25:55 +00005846 if (ret != -ENOENT)
5847 goto done;
5848 spin_unlock_irq(&ctx->completion_lock);
5849
5850 /* slow path, try all io-wq's */
5851 io_ring_submit_lock(ctx, !(issue_flags & IO_URING_F_NONBLOCK));
5852 ret = -ENOENT;
5853 list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
5854 struct io_uring_task *tctx = node->task->io_uring;
5855
Pavel Begunkov58f99372021-03-12 16:25:55 +00005856 ret = io_async_cancel_one(tctx, req->cancel.addr, ctx);
5857 if (ret != -ENOENT)
5858 break;
5859 }
5860 io_ring_submit_unlock(ctx, !(issue_flags & IO_URING_F_NONBLOCK));
5861
5862 spin_lock_irq(&ctx->completion_lock);
5863done:
Pavel Begunkovd4d19c12021-04-25 14:32:17 +01005864 io_cqring_fill_event(ctx, req->user_data, ret, 0);
Pavel Begunkov58f99372021-03-12 16:25:55 +00005865 io_commit_cqring(ctx);
5866 spin_unlock_irq(&ctx->completion_lock);
5867 io_cqring_ev_posted(ctx);
5868
5869 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01005870 req_set_fail(req);
Pavel Begunkov58f99372021-03-12 16:25:55 +00005871 io_put_req(req);
Jens Axboe62755e32019-10-28 21:49:21 -06005872 return 0;
5873}
5874
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00005875static int io_rsrc_update_prep(struct io_kiocb *req,
Jens Axboe05f3fb32019-12-09 11:22:50 -07005876 const struct io_uring_sqe *sqe)
5877{
Daniele Albano61710e42020-07-18 14:15:16 -06005878 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
5879 return -EINVAL;
5880 if (sqe->ioprio || sqe->rw_flags)
Jens Axboe05f3fb32019-12-09 11:22:50 -07005881 return -EINVAL;
5882
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00005883 req->rsrc_update.offset = READ_ONCE(sqe->off);
5884 req->rsrc_update.nr_args = READ_ONCE(sqe->len);
5885 if (!req->rsrc_update.nr_args)
Jens Axboe05f3fb32019-12-09 11:22:50 -07005886 return -EINVAL;
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00005887 req->rsrc_update.arg = READ_ONCE(sqe->addr);
Jens Axboe05f3fb32019-12-09 11:22:50 -07005888 return 0;
5889}
5890
Pavel Begunkov889fca72021-02-10 00:03:09 +00005891static int io_files_update(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe05f3fb32019-12-09 11:22:50 -07005892{
5893 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkovc3bdad02021-04-25 14:32:22 +01005894 struct io_uring_rsrc_update2 up;
Jens Axboe05f3fb32019-12-09 11:22:50 -07005895 int ret;
5896
Pavel Begunkov45d189c2021-02-10 00:03:07 +00005897 if (issue_flags & IO_URING_F_NONBLOCK)
Jens Axboe05f3fb32019-12-09 11:22:50 -07005898 return -EAGAIN;
Jens Axboe05f3fb32019-12-09 11:22:50 -07005899
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00005900 up.offset = req->rsrc_update.offset;
5901 up.data = req->rsrc_update.arg;
Pavel Begunkovc3bdad02021-04-25 14:32:22 +01005902 up.nr = 0;
5903 up.tags = 0;
Colin Ian King615cee42021-04-26 10:47:35 +01005904 up.resv = 0;
Jens Axboe05f3fb32019-12-09 11:22:50 -07005905
5906 mutex_lock(&ctx->uring_lock);
Pavel Begunkovfdecb662021-04-25 14:32:20 +01005907 ret = __io_register_rsrc_update(ctx, IORING_RSRC_FILE,
Pavel Begunkov98f0b3b2021-04-25 14:32:19 +01005908 &up, req->rsrc_update.nr_args);
Jens Axboe05f3fb32019-12-09 11:22:50 -07005909 mutex_unlock(&ctx->uring_lock);
5910
5911 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01005912 req_set_fail(req);
Pavel Begunkov889fca72021-02-10 00:03:09 +00005913 __io_req_complete(req, issue_flags, ret, 0);
Jens Axboe05f3fb32019-12-09 11:22:50 -07005914 return 0;
5915}
5916
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005917static int io_req_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboef67676d2019-12-02 11:03:47 -07005918{
Jens Axboed625c6e2019-12-17 19:53:05 -07005919 switch (req->opcode) {
Jens Axboee7815732019-12-17 19:45:06 -07005920 case IORING_OP_NOP:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005921 return 0;
Jens Axboef67676d2019-12-02 11:03:47 -07005922 case IORING_OP_READV:
5923 case IORING_OP_READ_FIXED:
Jens Axboe3a6820f2019-12-22 15:19:35 -07005924 case IORING_OP_READ:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005925 return io_read_prep(req, sqe);
Jens Axboef67676d2019-12-02 11:03:47 -07005926 case IORING_OP_WRITEV:
5927 case IORING_OP_WRITE_FIXED:
Jens Axboe3a6820f2019-12-22 15:19:35 -07005928 case IORING_OP_WRITE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005929 return io_write_prep(req, sqe);
Jens Axboe0969e782019-12-17 18:40:57 -07005930 case IORING_OP_POLL_ADD:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005931 return io_poll_add_prep(req, sqe);
Jens Axboe0969e782019-12-17 18:40:57 -07005932 case IORING_OP_POLL_REMOVE:
Pavel Begunkovc5de0032021-04-14 13:38:37 +01005933 return io_poll_update_prep(req, sqe);
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07005934 case IORING_OP_FSYNC:
Pavel Begunkov1155c762021-02-18 18:29:38 +00005935 return io_fsync_prep(req, sqe);
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07005936 case IORING_OP_SYNC_FILE_RANGE:
Pavel Begunkov1155c762021-02-18 18:29:38 +00005937 return io_sfr_prep(req, sqe);
Jens Axboe03b12302019-12-02 18:50:25 -07005938 case IORING_OP_SENDMSG:
Jens Axboefddafac2020-01-04 20:19:44 -07005939 case IORING_OP_SEND:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005940 return io_sendmsg_prep(req, sqe);
Jens Axboe03b12302019-12-02 18:50:25 -07005941 case IORING_OP_RECVMSG:
Jens Axboefddafac2020-01-04 20:19:44 -07005942 case IORING_OP_RECV:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005943 return io_recvmsg_prep(req, sqe);
Jens Axboef499a022019-12-02 16:28:46 -07005944 case IORING_OP_CONNECT:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005945 return io_connect_prep(req, sqe);
Jens Axboe2d283902019-12-04 11:08:05 -07005946 case IORING_OP_TIMEOUT:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005947 return io_timeout_prep(req, sqe, false);
Jens Axboeb29472e2019-12-17 18:50:29 -07005948 case IORING_OP_TIMEOUT_REMOVE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005949 return io_timeout_remove_prep(req, sqe);
Jens Axboefbf23842019-12-17 18:45:56 -07005950 case IORING_OP_ASYNC_CANCEL:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005951 return io_async_cancel_prep(req, sqe);
Jens Axboe2d283902019-12-04 11:08:05 -07005952 case IORING_OP_LINK_TIMEOUT:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005953 return io_timeout_prep(req, sqe, true);
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07005954 case IORING_OP_ACCEPT:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005955 return io_accept_prep(req, sqe);
Jens Axboed63d1b52019-12-10 10:38:56 -07005956 case IORING_OP_FALLOCATE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005957 return io_fallocate_prep(req, sqe);
Jens Axboe15b71ab2019-12-11 11:20:36 -07005958 case IORING_OP_OPENAT:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005959 return io_openat_prep(req, sqe);
Jens Axboeb5dba592019-12-11 14:02:38 -07005960 case IORING_OP_CLOSE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005961 return io_close_prep(req, sqe);
Jens Axboe05f3fb32019-12-09 11:22:50 -07005962 case IORING_OP_FILES_UPDATE:
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00005963 return io_rsrc_update_prep(req, sqe);
Jens Axboeeddc7ef2019-12-13 21:18:10 -07005964 case IORING_OP_STATX:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005965 return io_statx_prep(req, sqe);
Jens Axboe4840e412019-12-25 22:03:45 -07005966 case IORING_OP_FADVISE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005967 return io_fadvise_prep(req, sqe);
Jens Axboec1ca7572019-12-25 22:18:28 -07005968 case IORING_OP_MADVISE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005969 return io_madvise_prep(req, sqe);
Jens Axboecebdb982020-01-08 17:59:24 -07005970 case IORING_OP_OPENAT2:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005971 return io_openat2_prep(req, sqe);
Jens Axboe3e4827b2020-01-08 15:18:09 -07005972 case IORING_OP_EPOLL_CTL:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005973 return io_epoll_ctl_prep(req, sqe);
Pavel Begunkov7d67af22020-02-24 11:32:45 +03005974 case IORING_OP_SPLICE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005975 return io_splice_prep(req, sqe);
Jens Axboeddf0322d2020-02-23 16:41:33 -07005976 case IORING_OP_PROVIDE_BUFFERS:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005977 return io_provide_buffers_prep(req, sqe);
Jens Axboe067524e2020-03-02 16:32:28 -07005978 case IORING_OP_REMOVE_BUFFERS:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005979 return io_remove_buffers_prep(req, sqe);
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03005980 case IORING_OP_TEE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005981 return io_tee_prep(req, sqe);
Jens Axboe36f4fa62020-09-05 11:14:22 -06005982 case IORING_OP_SHUTDOWN:
5983 return io_shutdown_prep(req, sqe);
Jens Axboe80a261f2020-09-28 14:23:58 -06005984 case IORING_OP_RENAMEAT:
5985 return io_renameat_prep(req, sqe);
Jens Axboe14a11432020-09-28 14:27:37 -06005986 case IORING_OP_UNLINKAT:
5987 return io_unlinkat_prep(req, sqe);
Jens Axboef67676d2019-12-02 11:03:47 -07005988 }
5989
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005990 printk_once(KERN_WARNING "io_uring: unhandled opcode %d\n",
5991 req->opcode);
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01005992 return -EINVAL;
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005993}
5994
Pavel Begunkov93642ef2021-02-18 18:29:44 +00005995static int io_req_prep_async(struct io_kiocb *req)
Jens Axboedef596e2019-01-09 08:59:42 -07005996{
Pavel Begunkovb7e298d2021-02-28 22:35:19 +00005997 if (!io_op_defs[req->opcode].needs_async_setup)
5998 return 0;
5999 if (WARN_ON_ONCE(req->async_data))
6000 return -EFAULT;
6001 if (io_alloc_async_data(req))
6002 return -EAGAIN;
6003
Pavel Begunkov93642ef2021-02-18 18:29:44 +00006004 switch (req->opcode) {
6005 case IORING_OP_READV:
Pavel Begunkov93642ef2021-02-18 18:29:44 +00006006 return io_rw_prep_async(req, READ);
6007 case IORING_OP_WRITEV:
Pavel Begunkov93642ef2021-02-18 18:29:44 +00006008 return io_rw_prep_async(req, WRITE);
6009 case IORING_OP_SENDMSG:
Pavel Begunkov93642ef2021-02-18 18:29:44 +00006010 return io_sendmsg_prep_async(req);
6011 case IORING_OP_RECVMSG:
Pavel Begunkov93642ef2021-02-18 18:29:44 +00006012 return io_recvmsg_prep_async(req);
6013 case IORING_OP_CONNECT:
6014 return io_connect_prep_async(req);
6015 }
Pavel Begunkovb7e298d2021-02-28 22:35:19 +00006016 printk_once(KERN_WARNING "io_uring: prep_async() bad opcode %d\n",
6017 req->opcode);
6018 return -EFAULT;
Jens Axboedef596e2019-01-09 08:59:42 -07006019}
6020
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03006021static u32 io_get_sequence(struct io_kiocb *req)
6022{
Pavel Begunkova3dbdf52021-06-17 18:14:05 +01006023 u32 seq = req->ctx->cached_sq_head;
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03006024
Pavel Begunkova3dbdf52021-06-17 18:14:05 +01006025 /* need original cached_sq_head, but it was increased for each req */
6026 io_for_each_link(req, req)
6027 seq--;
6028 return seq;
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03006029}
6030
Pavel Begunkov76cc33d2021-06-14 23:37:30 +01006031static bool io_drain_req(struct io_kiocb *req)
Jens Axboedef596e2019-01-09 08:59:42 -07006032{
Pavel Begunkov3c199662021-06-15 16:47:57 +01006033 struct io_kiocb *pos;
Jens Axboedef596e2019-01-09 08:59:42 -07006034 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkov27dc8332020-07-13 23:37:14 +03006035 struct io_defer_entry *de;
Jens Axboedef596e2019-01-09 08:59:42 -07006036 int ret;
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03006037 u32 seq;
Jens Axboedef596e2019-01-09 08:59:42 -07006038
Pavel Begunkov3c199662021-06-15 16:47:57 +01006039 /*
6040 * If we need to drain a request in the middle of a link, drain the
6041 * head request and the next request/link after the current link.
6042 * Considering sequential execution of links, IOSQE_IO_DRAIN will be
6043 * maintained for every request of our link.
6044 */
6045 if (ctx->drain_next) {
6046 req->flags |= REQ_F_IO_DRAIN;
6047 ctx->drain_next = false;
6048 }
6049 /* not interested in head, start from the first linked */
6050 io_for_each_link(pos, req->link) {
6051 if (pos->flags & REQ_F_IO_DRAIN) {
6052 ctx->drain_next = true;
6053 req->flags |= REQ_F_IO_DRAIN;
6054 break;
6055 }
6056 }
6057
Jens Axboedef596e2019-01-09 08:59:42 -07006058 /* Still need defer if there is pending req in defer list. */
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03006059 if (likely(list_empty_careful(&ctx->defer_list) &&
Pavel Begunkov10c66902021-06-15 16:47:56 +01006060 !(req->flags & REQ_F_IO_DRAIN))) {
6061 ctx->drain_active = false;
Pavel Begunkov76cc33d2021-06-14 23:37:30 +01006062 return false;
Pavel Begunkov10c66902021-06-15 16:47:56 +01006063 }
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03006064
6065 seq = io_get_sequence(req);
6066 /* Still a chance to pass the sequence check */
6067 if (!req_need_defer(req, seq) && list_empty_careful(&ctx->defer_list))
Pavel Begunkov76cc33d2021-06-14 23:37:30 +01006068 return false;
Jens Axboedef596e2019-01-09 08:59:42 -07006069
Pavel Begunkovb7e298d2021-02-28 22:35:19 +00006070 ret = io_req_prep_async(req);
Pavel Begunkovbe7053b2021-02-18 18:29:45 +00006071 if (ret)
Pavel Begunkov1b487732021-07-11 22:41:13 +01006072 goto fail;
Pavel Begunkovcbdcb432020-06-29 19:18:43 +03006073 io_prep_async_link(req);
Pavel Begunkov27dc8332020-07-13 23:37:14 +03006074 de = kmalloc(sizeof(*de), GFP_KERNEL);
Pavel Begunkov76cc33d2021-06-14 23:37:30 +01006075 if (!de) {
Pavel Begunkov1b487732021-07-11 22:41:13 +01006076 ret = -ENOMEM;
6077fail:
6078 io_req_complete_failed(req, ret);
Pavel Begunkov76cc33d2021-06-14 23:37:30 +01006079 return true;
6080 }
Jens Axboe31b51512019-01-18 22:56:34 -07006081
6082 spin_lock_irq(&ctx->completion_lock);
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03006083 if (!req_need_defer(req, seq) && list_empty(&ctx->defer_list)) {
Jens Axboe31b51512019-01-18 22:56:34 -07006084 spin_unlock_irq(&ctx->completion_lock);
Pavel Begunkov27dc8332020-07-13 23:37:14 +03006085 kfree(de);
Pavel Begunkovae348172020-07-23 20:25:20 +03006086 io_queue_async_work(req);
Pavel Begunkov76cc33d2021-06-14 23:37:30 +01006087 return true;
Jens Axboe31b51512019-01-18 22:56:34 -07006088 }
6089
6090 trace_io_uring_defer(ctx, req, req->user_data);
Pavel Begunkov27dc8332020-07-13 23:37:14 +03006091 de->req = req;
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03006092 de->seq = seq;
Pavel Begunkov27dc8332020-07-13 23:37:14 +03006093 list_add_tail(&de->list, &ctx->defer_list);
Jens Axboe31b51512019-01-18 22:56:34 -07006094 spin_unlock_irq(&ctx->completion_lock);
Pavel Begunkov76cc33d2021-06-14 23:37:30 +01006095 return true;
Jens Axboe31b51512019-01-18 22:56:34 -07006096}
6097
Pavel Begunkov68fb8972021-03-19 17:22:41 +00006098static void io_clean_op(struct io_kiocb *req)
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03006099{
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03006100 if (req->flags & REQ_F_BUFFER_SELECTED) {
6101 switch (req->opcode) {
6102 case IORING_OP_READV:
6103 case IORING_OP_READ_FIXED:
6104 case IORING_OP_READ:
Jens Axboebcda7ba2020-02-23 16:42:51 -07006105 kfree((void *)(unsigned long)req->rw.addr);
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03006106 break;
6107 case IORING_OP_RECVMSG:
6108 case IORING_OP_RECV:
Jens Axboe52de1fe2020-02-27 10:15:42 -07006109 kfree(req->sr_msg.kbuf);
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03006110 break;
6111 }
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03006112 }
6113
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03006114 if (req->flags & REQ_F_NEED_CLEANUP) {
6115 switch (req->opcode) {
6116 case IORING_OP_READV:
6117 case IORING_OP_READ_FIXED:
6118 case IORING_OP_READ:
6119 case IORING_OP_WRITEV:
6120 case IORING_OP_WRITE_FIXED:
Jens Axboee8c2bc12020-08-15 18:44:09 -07006121 case IORING_OP_WRITE: {
6122 struct io_async_rw *io = req->async_data;
Pavel Begunkov1dacb4d2021-06-17 18:14:03 +01006123
6124 kfree(io->free_iovec);
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03006125 break;
Jens Axboee8c2bc12020-08-15 18:44:09 -07006126 }
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03006127 case IORING_OP_RECVMSG:
Jens Axboee8c2bc12020-08-15 18:44:09 -07006128 case IORING_OP_SENDMSG: {
6129 struct io_async_msghdr *io = req->async_data;
Pavel Begunkov257e84a2021-02-05 00:58:00 +00006130
6131 kfree(io->free_iov);
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03006132 break;
Jens Axboee8c2bc12020-08-15 18:44:09 -07006133 }
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03006134 case IORING_OP_SPLICE:
6135 case IORING_OP_TEE:
Pavel Begunkove1d767f2021-03-19 17:22:43 +00006136 if (!(req->splice.flags & SPLICE_F_FD_IN_FIXED))
6137 io_put_file(req->splice.file_in);
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03006138 break;
Jens Axboef3cd48502020-09-24 14:55:54 -06006139 case IORING_OP_OPENAT:
6140 case IORING_OP_OPENAT2:
6141 if (req->open.filename)
6142 putname(req->open.filename);
6143 break;
Jens Axboe80a261f2020-09-28 14:23:58 -06006144 case IORING_OP_RENAMEAT:
6145 putname(req->rename.oldpath);
6146 putname(req->rename.newpath);
6147 break;
Jens Axboe14a11432020-09-28 14:27:37 -06006148 case IORING_OP_UNLINKAT:
6149 putname(req->unlink.filename);
6150 break;
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03006151 }
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03006152 }
Jens Axboe75652a302021-04-15 09:52:40 -06006153 if ((req->flags & REQ_F_POLLED) && req->apoll) {
6154 kfree(req->apoll->double_poll);
6155 kfree(req->apoll);
6156 req->apoll = NULL;
6157 }
Pavel Begunkov3a0a6902021-04-20 12:03:31 +01006158 if (req->flags & REQ_F_INFLIGHT) {
6159 struct io_uring_task *tctx = req->task->io_uring;
6160
6161 atomic_dec(&tctx->inflight_tracked);
Pavel Begunkov3a0a6902021-04-20 12:03:31 +01006162 }
Pavel Begunkovc8543572021-06-17 18:14:04 +01006163 if (req->flags & REQ_F_CREDS)
Pavel Begunkovb8e64b52021-06-17 18:14:02 +01006164 put_cred(req->creds);
Pavel Begunkovc8543572021-06-17 18:14:04 +01006165
6166 req->flags &= ~IO_REQ_CLEAN_FLAGS;
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03006167}
6168
Pavel Begunkov889fca72021-02-10 00:03:09 +00006169static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboeedafcce2019-01-09 09:16:05 -07006170{
Jens Axboeedafcce2019-01-09 09:16:05 -07006171 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe5730b272021-02-27 15:57:30 -07006172 const struct cred *creds = NULL;
Jens Axboed625c6e2019-12-17 19:53:05 -07006173 int ret;
Jens Axboeedafcce2019-01-09 09:16:05 -07006174
Pavel Begunkovb8e64b52021-06-17 18:14:02 +01006175 if ((req->flags & REQ_F_CREDS) && req->creds != current_cred())
Pavel Begunkovc10d1f92021-06-17 18:14:01 +01006176 creds = override_creds(req->creds);
Jens Axboe5730b272021-02-27 15:57:30 -07006177
Jens Axboed625c6e2019-12-17 19:53:05 -07006178 switch (req->opcode) {
Jens Axboe2b188cc2019-01-07 10:46:33 -07006179 case IORING_OP_NOP:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006180 ret = io_nop(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006181 break;
Jens Axboe2b188cc2019-01-07 10:46:33 -07006182 case IORING_OP_READV:
Jens Axboe3529d8c2019-12-19 18:24:38 -07006183 case IORING_OP_READ_FIXED:
Jens Axboe3a6820f2019-12-22 15:19:35 -07006184 case IORING_OP_READ:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006185 ret = io_read(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006186 break;
6187 case IORING_OP_WRITEV:
Jens Axboe2b188cc2019-01-07 10:46:33 -07006188 case IORING_OP_WRITE_FIXED:
Jens Axboe3a6820f2019-12-22 15:19:35 -07006189 case IORING_OP_WRITE:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006190 ret = io_write(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006191 break;
6192 case IORING_OP_FSYNC:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006193 ret = io_fsync(req, issue_flags);
Jackie Liuba5290c2019-10-09 09:19:59 +08006194 break;
Jens Axboe2b188cc2019-01-07 10:46:33 -07006195 case IORING_OP_POLL_ADD:
Pavel Begunkov61e98202021-02-10 00:03:08 +00006196 ret = io_poll_add(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006197 break;
6198 case IORING_OP_POLL_REMOVE:
Pavel Begunkovc5de0032021-04-14 13:38:37 +01006199 ret = io_poll_update(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006200 break;
Jens Axboeb76da702019-11-20 13:05:32 -07006201 case IORING_OP_SYNC_FILE_RANGE:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006202 ret = io_sync_file_range(req, issue_flags);
Jens Axboeb76da702019-11-20 13:05:32 -07006203 break;
6204 case IORING_OP_SENDMSG:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006205 ret = io_sendmsg(req, issue_flags);
Pavel Begunkov062d04d2020-10-10 18:34:12 +01006206 break;
Jens Axboefddafac2020-01-04 20:19:44 -07006207 case IORING_OP_SEND:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006208 ret = io_send(req, issue_flags);
Jens Axboeb76da702019-11-20 13:05:32 -07006209 break;
6210 case IORING_OP_RECVMSG:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006211 ret = io_recvmsg(req, issue_flags);
Pavel Begunkov062d04d2020-10-10 18:34:12 +01006212 break;
Jens Axboefddafac2020-01-04 20:19:44 -07006213 case IORING_OP_RECV:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006214 ret = io_recv(req, issue_flags);
Jens Axboeb76da702019-11-20 13:05:32 -07006215 break;
Jens Axboe561fb042019-10-24 07:25:42 -06006216 case IORING_OP_TIMEOUT:
Pavel Begunkov61e98202021-02-10 00:03:08 +00006217 ret = io_timeout(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006218 break;
6219 case IORING_OP_TIMEOUT_REMOVE:
Pavel Begunkov61e98202021-02-10 00:03:08 +00006220 ret = io_timeout_remove(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006221 break;
6222 case IORING_OP_ACCEPT:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006223 ret = io_accept(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006224 break;
6225 case IORING_OP_CONNECT:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006226 ret = io_connect(req, issue_flags);
Jens Axboe31b51512019-01-18 22:56:34 -07006227 break;
6228 case IORING_OP_ASYNC_CANCEL:
Pavel Begunkov61e98202021-02-10 00:03:08 +00006229 ret = io_async_cancel(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006230 break;
Jens Axboed63d1b52019-12-10 10:38:56 -07006231 case IORING_OP_FALLOCATE:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006232 ret = io_fallocate(req, issue_flags);
Jens Axboed63d1b52019-12-10 10:38:56 -07006233 break;
Jens Axboe15b71ab2019-12-11 11:20:36 -07006234 case IORING_OP_OPENAT:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006235 ret = io_openat(req, issue_flags);
Jens Axboe15b71ab2019-12-11 11:20:36 -07006236 break;
Jens Axboeb5dba592019-12-11 14:02:38 -07006237 case IORING_OP_CLOSE:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006238 ret = io_close(req, issue_flags);
Jens Axboeb5dba592019-12-11 14:02:38 -07006239 break;
Jens Axboe05f3fb32019-12-09 11:22:50 -07006240 case IORING_OP_FILES_UPDATE:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006241 ret = io_files_update(req, issue_flags);
Jens Axboe05f3fb32019-12-09 11:22:50 -07006242 break;
Jens Axboeeddc7ef2019-12-13 21:18:10 -07006243 case IORING_OP_STATX:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006244 ret = io_statx(req, issue_flags);
Jens Axboeeddc7ef2019-12-13 21:18:10 -07006245 break;
Jens Axboe4840e412019-12-25 22:03:45 -07006246 case IORING_OP_FADVISE:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006247 ret = io_fadvise(req, issue_flags);
Jens Axboe4840e412019-12-25 22:03:45 -07006248 break;
Jens Axboec1ca7572019-12-25 22:18:28 -07006249 case IORING_OP_MADVISE:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006250 ret = io_madvise(req, issue_flags);
Jens Axboec1ca7572019-12-25 22:18:28 -07006251 break;
Jens Axboecebdb982020-01-08 17:59:24 -07006252 case IORING_OP_OPENAT2:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006253 ret = io_openat2(req, issue_flags);
Jens Axboecebdb982020-01-08 17:59:24 -07006254 break;
Jens Axboe3e4827b2020-01-08 15:18:09 -07006255 case IORING_OP_EPOLL_CTL:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006256 ret = io_epoll_ctl(req, issue_flags);
Jens Axboe3e4827b2020-01-08 15:18:09 -07006257 break;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03006258 case IORING_OP_SPLICE:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006259 ret = io_splice(req, issue_flags);
Pavel Begunkov7d67af22020-02-24 11:32:45 +03006260 break;
Jens Axboeddf0322d2020-02-23 16:41:33 -07006261 case IORING_OP_PROVIDE_BUFFERS:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006262 ret = io_provide_buffers(req, issue_flags);
Jens Axboeddf0322d2020-02-23 16:41:33 -07006263 break;
Jens Axboe067524e2020-03-02 16:32:28 -07006264 case IORING_OP_REMOVE_BUFFERS:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006265 ret = io_remove_buffers(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006266 break;
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03006267 case IORING_OP_TEE:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006268 ret = io_tee(req, issue_flags);
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03006269 break;
Jens Axboe36f4fa62020-09-05 11:14:22 -06006270 case IORING_OP_SHUTDOWN:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006271 ret = io_shutdown(req, issue_flags);
Jens Axboe36f4fa62020-09-05 11:14:22 -06006272 break;
Jens Axboe80a261f2020-09-28 14:23:58 -06006273 case IORING_OP_RENAMEAT:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006274 ret = io_renameat(req, issue_flags);
Jens Axboe80a261f2020-09-28 14:23:58 -06006275 break;
Jens Axboe14a11432020-09-28 14:27:37 -06006276 case IORING_OP_UNLINKAT:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006277 ret = io_unlinkat(req, issue_flags);
Jens Axboe14a11432020-09-28 14:27:37 -06006278 break;
Jens Axboe2b188cc2019-01-07 10:46:33 -07006279 default:
6280 ret = -EINVAL;
6281 break;
6282 }
Jens Axboe31b51512019-01-18 22:56:34 -07006283
Jens Axboe5730b272021-02-27 15:57:30 -07006284 if (creds)
6285 revert_creds(creds);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006286 if (ret)
6287 return ret;
Jens Axboeb5325762020-05-19 21:20:27 -06006288 /* If the op doesn't have a file, we're not polling for it */
Pavel Begunkovcb3d8972021-06-14 02:36:14 +01006289 if ((ctx->flags & IORING_SETUP_IOPOLL) && req->file)
6290 io_iopoll_req_issued(req);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006291
6292 return 0;
6293}
6294
Pavel Begunkovebc11b62021-08-09 13:04:05 +01006295static struct io_wq_work *io_wq_free_work(struct io_wq_work *work)
6296{
6297 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
6298
6299 req = io_put_req_find_next(req);
6300 return req ? &req->work : NULL;
6301}
6302
Pavel Begunkov5280f7e2021-02-04 13:52:08 +00006303static void io_wq_submit_work(struct io_wq_work *work)
Pavel Begunkovd4c81f32020-06-08 21:08:19 +03006304{
Jens Axboe2b188cc2019-01-07 10:46:33 -07006305 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
Pavel Begunkov6df1db62020-07-03 22:15:06 +03006306 struct io_kiocb *timeout;
Jens Axboe561fb042019-10-24 07:25:42 -06006307 int ret = 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07006308
Pavel Begunkov6df1db62020-07-03 22:15:06 +03006309 timeout = io_prep_linked_timeout(req);
6310 if (timeout)
6311 io_queue_linked_timeout(timeout);
Pavel Begunkovd4c81f32020-06-08 21:08:19 +03006312
Jens Axboe4014d942021-01-19 15:53:54 -07006313 if (work->flags & IO_WQ_WORK_CANCEL)
Jens Axboe561fb042019-10-24 07:25:42 -06006314 ret = -ECANCELED;
Jens Axboe31b51512019-01-18 22:56:34 -07006315
Jens Axboe561fb042019-10-24 07:25:42 -06006316 if (!ret) {
Jens Axboe561fb042019-10-24 07:25:42 -06006317 do {
Pavel Begunkov889fca72021-02-10 00:03:09 +00006318 ret = io_issue_sqe(req, 0);
Jens Axboe561fb042019-10-24 07:25:42 -06006319 /*
6320 * We can get EAGAIN for polled IO even though we're
6321 * forcing a sync submission from here, since we can't
6322 * wait for request slots on the block side.
6323 */
6324 if (ret != -EAGAIN)
6325 break;
6326 cond_resched();
6327 } while (1);
6328 }
Jens Axboe31b51512019-01-18 22:56:34 -07006329
Pavel Begunkova3df76982021-02-18 22:32:52 +00006330 /* avoid locking problems by failing it from a clean context */
Jens Axboe561fb042019-10-24 07:25:42 -06006331 if (ret) {
Pavel Begunkova3df76982021-02-18 22:32:52 +00006332 /* io-wq is going to take one down */
Jens Axboede9b4cc2021-02-24 13:28:27 -07006333 req_ref_get(req);
Pavel Begunkova3df76982021-02-18 22:32:52 +00006334 io_req_task_queue_fail(req, ret);
Jens Axboeedafcce2019-01-09 09:16:05 -07006335 }
Jens Axboe31b51512019-01-18 22:56:34 -07006336}
Jens Axboe2b188cc2019-01-07 10:46:33 -07006337
Pavel Begunkovaeca2412021-04-11 01:46:37 +01006338static inline struct io_fixed_file *io_fixed_file_slot(struct io_file_table *table,
Pavel Begunkov042b0d82021-08-09 13:04:01 +01006339 unsigned i)
Jens Axboe09bb8392019-03-13 12:39:28 -06006340{
Pavel Begunkov042b0d82021-08-09 13:04:01 +01006341 return &table->files[i];
Pavel Begunkovdafecf12021-02-28 22:35:11 +00006342}
6343
Jens Axboe09bb8392019-03-13 12:39:28 -06006344static inline struct file *io_file_from_index(struct io_ring_ctx *ctx,
6345 int index)
6346{
Pavel Begunkovaeca2412021-04-11 01:46:37 +01006347 struct io_fixed_file *slot = io_fixed_file_slot(&ctx->file_table, index);
Jens Axboe65e19f52019-10-26 07:20:21 -06006348
Pavel Begunkova04b0ac2021-04-01 15:44:04 +01006349 return (struct file *) (slot->file_ptr & FFS_MASK);
Jens Axboe65e19f52019-10-26 07:20:21 -06006350}
6351
Pavel Begunkova04b0ac2021-04-01 15:44:04 +01006352static void io_fixed_file_set(struct io_fixed_file *file_slot, struct file *file)
Pavel Begunkov9a321c92021-04-01 15:44:01 +01006353{
6354 unsigned long file_ptr = (unsigned long) file;
6355
Pavel Begunkovb191e2d2021-08-09 13:04:03 +01006356 if (__io_file_supports_nowait(file, READ))
Pavel Begunkov9a321c92021-04-01 15:44:01 +01006357 file_ptr |= FFS_ASYNC_READ;
Pavel Begunkovb191e2d2021-08-09 13:04:03 +01006358 if (__io_file_supports_nowait(file, WRITE))
Pavel Begunkov9a321c92021-04-01 15:44:01 +01006359 file_ptr |= FFS_ASYNC_WRITE;
6360 if (S_ISREG(file_inode(file)->i_mode))
6361 file_ptr |= FFS_ISREG;
Pavel Begunkova04b0ac2021-04-01 15:44:04 +01006362 file_slot->file_ptr = file_ptr;
Jens Axboe09bb8392019-03-13 12:39:28 -06006363}
6364
Pavel Begunkovac177052021-08-09 13:04:02 +01006365static inline struct file *io_file_get_fixed(struct io_ring_ctx *ctx,
6366 struct io_kiocb *req, int fd)
Pavel Begunkov8da11c12020-02-24 11:32:44 +03006367{
Pavel Begunkov8da11c12020-02-24 11:32:44 +03006368 struct file *file;
Pavel Begunkovac177052021-08-09 13:04:02 +01006369 unsigned long file_ptr;
Pavel Begunkov8da11c12020-02-24 11:32:44 +03006370
Pavel Begunkovac177052021-08-09 13:04:02 +01006371 if (unlikely((unsigned int)fd >= ctx->nr_user_files))
6372 return NULL;
6373 fd = array_index_nospec(fd, ctx->nr_user_files);
6374 file_ptr = io_fixed_file_slot(&ctx->file_table, fd)->file_ptr;
6375 file = (struct file *) (file_ptr & FFS_MASK);
6376 file_ptr &= ~FFS_MASK;
6377 /* mask in overlapping REQ_F and FFS bits */
Pavel Begunkovb191e2d2021-08-09 13:04:03 +01006378 req->flags |= (file_ptr << REQ_F_NOWAIT_READ_BIT);
Pavel Begunkovac177052021-08-09 13:04:02 +01006379 io_req_set_rsrc_node(req);
Pavel Begunkov8371adf2020-10-10 18:34:08 +01006380 return file;
Pavel Begunkov8da11c12020-02-24 11:32:44 +03006381}
6382
Pavel Begunkovac177052021-08-09 13:04:02 +01006383static struct file *io_file_get_normal(struct io_ring_ctx *ctx,
6384 struct io_submit_state *state,
6385 struct io_kiocb *req, int fd)
6386{
6387 struct file *file = __io_file_get(state, fd);
6388
6389 trace_io_uring_file_get(ctx, fd);
6390
6391 /* we don't allow fixed io_uring files */
6392 if (file && unlikely(file->f_op == &io_uring_fops))
6393 io_req_track_inflight(req);
6394 return file;
6395}
6396
6397static inline struct file *io_file_get(struct io_ring_ctx *ctx,
6398 struct io_submit_state *state,
6399 struct io_kiocb *req, int fd, bool fixed)
6400{
6401 if (fixed)
6402 return io_file_get_fixed(ctx, req, fd);
6403 else
6404 return io_file_get_normal(ctx, state, req, fd);
6405}
6406
Jens Axboe2665abf2019-11-05 12:40:47 -07006407static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
6408{
Jens Axboead8a48a2019-11-15 08:49:11 -07006409 struct io_timeout_data *data = container_of(timer,
6410 struct io_timeout_data, timer);
Pavel Begunkov90cd7e42020-10-27 23:25:36 +00006411 struct io_kiocb *prev, *req = data->req;
Jens Axboe2665abf2019-11-05 12:40:47 -07006412 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe2665abf2019-11-05 12:40:47 -07006413 unsigned long flags;
Jens Axboe2665abf2019-11-05 12:40:47 -07006414
6415 spin_lock_irqsave(&ctx->completion_lock, flags);
Pavel Begunkov90cd7e42020-10-27 23:25:36 +00006416 prev = req->timeout.head;
6417 req->timeout.head = NULL;
Jens Axboe2665abf2019-11-05 12:40:47 -07006418
6419 /*
6420 * We don't expect the list to be empty, that will only happen if we
6421 * race with the completion of the linked work.
6422 */
Pavel Begunkov447c19f2021-05-14 12:02:50 +01006423 if (prev) {
Pavel Begunkovf2f87372020-10-27 23:25:37 +00006424 io_remove_next_linked(prev);
Pavel Begunkov447c19f2021-05-14 12:02:50 +01006425 if (!req_ref_inc_not_zero(prev))
6426 prev = NULL;
6427 }
Jens Axboe2665abf2019-11-05 12:40:47 -07006428 spin_unlock_irqrestore(&ctx->completion_lock, flags);
6429
6430 if (prev) {
Pavel Begunkov014db002020-03-03 21:33:12 +03006431 io_async_find_and_cancel(ctx, req, prev->user_data, -ETIME);
Pavel Begunkov9ae1f8d2021-02-01 18:59:51 +00006432 io_put_req_deferred(prev, 1);
Pavel Begunkova2982322021-05-07 21:06:38 +01006433 io_put_req_deferred(req, 1);
Jens Axboe47f46762019-11-09 17:43:02 -07006434 } else {
Pavel Begunkov9ae1f8d2021-02-01 18:59:51 +00006435 io_req_complete_post(req, -ETIME, 0);
Jens Axboe2665abf2019-11-05 12:40:47 -07006436 }
Jens Axboe2665abf2019-11-05 12:40:47 -07006437 return HRTIMER_NORESTART;
6438}
6439
Pavel Begunkovde968c12021-03-19 17:22:33 +00006440static void io_queue_linked_timeout(struct io_kiocb *req)
Jens Axboe2665abf2019-11-05 12:40:47 -07006441{
Pavel Begunkovde968c12021-03-19 17:22:33 +00006442 struct io_ring_ctx *ctx = req->ctx;
6443
6444 spin_lock_irq(&ctx->completion_lock);
Jens Axboe76a46e02019-11-10 23:34:16 -07006445 /*
Pavel Begunkovf2f87372020-10-27 23:25:37 +00006446 * If the back reference is NULL, then our linked request finished
6447 * before we got a chance to setup the timer
Jens Axboe76a46e02019-11-10 23:34:16 -07006448 */
Pavel Begunkov90cd7e42020-10-27 23:25:36 +00006449 if (req->timeout.head) {
Jens Axboee8c2bc12020-08-15 18:44:09 -07006450 struct io_timeout_data *data = req->async_data;
Jens Axboe94ae5e72019-11-14 19:39:52 -07006451
Jens Axboead8a48a2019-11-15 08:49:11 -07006452 data->timer.function = io_link_timeout_fn;
6453 hrtimer_start(&data->timer, timespec64_to_ktime(data->ts),
6454 data->mode);
Jens Axboe2665abf2019-11-05 12:40:47 -07006455 }
Jens Axboe76a46e02019-11-10 23:34:16 -07006456 spin_unlock_irq(&ctx->completion_lock);
Jens Axboe2665abf2019-11-05 12:40:47 -07006457 /* drop submission reference */
Jens Axboe76a46e02019-11-10 23:34:16 -07006458 io_put_req(req);
Jens Axboe2665abf2019-11-05 12:40:47 -07006459}
6460
Jens Axboead8a48a2019-11-15 08:49:11 -07006461static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req)
Jens Axboe2665abf2019-11-05 12:40:47 -07006462{
Pavel Begunkovf2f87372020-10-27 23:25:37 +00006463 struct io_kiocb *nxt = req->link;
Jens Axboe2b188cc2019-01-07 10:46:33 -07006464
Pavel Begunkovf2f87372020-10-27 23:25:37 +00006465 if (!nxt || (req->flags & REQ_F_LINK_TIMEOUT) ||
6466 nxt->opcode != IORING_OP_LINK_TIMEOUT)
Jens Axboed7718a92020-02-14 22:23:12 -07006467 return NULL;
Jens Axboe2665abf2019-11-05 12:40:47 -07006468
Pavel Begunkov90cd7e42020-10-27 23:25:36 +00006469 nxt->timeout.head = req;
Pavel Begunkov900fad42020-10-19 16:39:16 +01006470 nxt->flags |= REQ_F_LTIMEOUT_ACTIVE;
Jens Axboe76a46e02019-11-10 23:34:16 -07006471 req->flags |= REQ_F_LINK_TIMEOUT;
Jens Axboe76a46e02019-11-10 23:34:16 -07006472 return nxt;
Jens Axboe2665abf2019-11-05 12:40:47 -07006473}
6474
Pavel Begunkovc5eef2b2021-02-10 00:03:22 +00006475static void __io_queue_sqe(struct io_kiocb *req)
Pavel Begunkov282cdc82021-08-09 13:04:10 +01006476 __must_hold(&req->ctx->uring_lock)
Jens Axboe2b188cc2019-01-07 10:46:33 -07006477{
Pavel Begunkovd3d72982021-02-12 03:23:51 +00006478 struct io_kiocb *linked_timeout = io_prep_linked_timeout(req);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006479 int ret;
6480
Olivier Langlois59b735a2021-06-22 05:17:39 -07006481issue_sqe:
Pavel Begunkovc5eef2b2021-02-10 00:03:22 +00006482 ret = io_issue_sqe(req, IO_URING_F_NONBLOCK|IO_URING_F_COMPLETE_DEFER);
Jens Axboe491381ce2019-10-17 09:20:46 -06006483
6484 /*
6485 * We async punt it if the file wasn't marked NOWAIT, or if the file
6486 * doesn't support non-blocking read/write attempts
6487 */
Pavel Begunkov18400382021-03-19 17:22:34 +00006488 if (likely(!ret)) {
Pavel Begunkov0d63c142020-10-22 16:47:18 +01006489 /* drop submission reference */
Pavel Begunkove342c802021-01-19 13:32:47 +00006490 if (req->flags & REQ_F_COMPLETE_INLINE) {
Pavel Begunkovc5eef2b2021-02-10 00:03:22 +00006491 struct io_ring_ctx *ctx = req->ctx;
6492 struct io_comp_state *cs = &ctx->submit_state.comp;
Jens Axboee65ef562019-03-12 10:16:44 -06006493
Pavel Begunkov6dd0be12021-02-10 00:03:13 +00006494 cs->reqs[cs->nr++] = req;
Pavel Begunkovd3d72982021-02-12 03:23:51 +00006495 if (cs->nr == ARRAY_SIZE(cs->reqs))
Pavel Begunkov2a2758f2021-06-17 18:14:00 +01006496 io_submit_flush_completions(ctx);
Pavel Begunkov9affd662021-01-19 13:32:46 +00006497 } else {
Pavel Begunkovd3d72982021-02-12 03:23:51 +00006498 io_put_req(req);
Pavel Begunkov0d63c142020-10-22 16:47:18 +01006499 }
Pavel Begunkov18400382021-03-19 17:22:34 +00006500 } else if (ret == -EAGAIN && !(req->flags & REQ_F_NOWAIT)) {
Olivier Langlois59b735a2021-06-22 05:17:39 -07006501 switch (io_arm_poll_handler(req)) {
6502 case IO_APOLL_READY:
6503 goto issue_sqe;
6504 case IO_APOLL_ABORTED:
Pavel Begunkov18400382021-03-19 17:22:34 +00006505 /*
6506 * Queued up for async execution, worker will release
6507 * submit reference when the iocb is actually submitted.
6508 */
6509 io_queue_async_work(req);
Olivier Langlois59b735a2021-06-22 05:17:39 -07006510 break;
Pavel Begunkov18400382021-03-19 17:22:34 +00006511 }
Pavel Begunkov0d63c142020-10-22 16:47:18 +01006512 } else {
Pavel Begunkovf41db2732021-02-28 22:35:12 +00006513 io_req_complete_failed(req, ret);
Jens Axboe9e645e112019-05-10 16:07:28 -06006514 }
Pavel Begunkovd3d72982021-02-12 03:23:51 +00006515 if (linked_timeout)
6516 io_queue_linked_timeout(linked_timeout);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006517}
6518
Pavel Begunkov441b8a72021-06-14 23:37:31 +01006519static inline void io_queue_sqe(struct io_kiocb *req)
Pavel Begunkov282cdc82021-08-09 13:04:10 +01006520 __must_hold(&req->ctx->uring_lock)
Jackie Liu4fe2c962019-09-09 20:50:40 +08006521{
Pavel Begunkov10c66902021-06-15 16:47:56 +01006522 if (unlikely(req->ctx->drain_active) && io_drain_req(req))
Pavel Begunkov76cc33d2021-06-14 23:37:30 +01006523 return;
Jackie Liu4fe2c962019-09-09 20:50:40 +08006524
Pavel Begunkov76cc33d2021-06-14 23:37:30 +01006525 if (likely(!(req->flags & REQ_F_FORCE_ASYNC))) {
Pavel Begunkovc5eef2b2021-02-10 00:03:22 +00006526 __io_queue_sqe(req);
Pavel Begunkov76cc33d2021-06-14 23:37:30 +01006527 } else {
6528 int ret = io_req_prep_async(req);
6529
6530 if (unlikely(ret))
6531 io_req_complete_failed(req, ret);
6532 else
6533 io_queue_async_work(req);
Jens Axboece35a472019-12-17 08:04:44 -07006534 }
Jackie Liu4fe2c962019-09-09 20:50:40 +08006535}
6536
Stefano Garzarella21b55db2020-08-27 16:58:30 +02006537/*
6538 * Check SQE restrictions (opcode and flags).
6539 *
6540 * Returns 'true' if SQE is allowed, 'false' otherwise.
6541 */
6542static inline bool io_check_restriction(struct io_ring_ctx *ctx,
6543 struct io_kiocb *req,
6544 unsigned int sqe_flags)
6545{
Pavel Begunkov4cfb25b2021-06-26 21:40:47 +01006546 if (likely(!ctx->restricted))
Stefano Garzarella21b55db2020-08-27 16:58:30 +02006547 return true;
6548
6549 if (!test_bit(req->opcode, ctx->restrictions.sqe_op))
6550 return false;
6551
6552 if ((sqe_flags & ctx->restrictions.sqe_flags_required) !=
6553 ctx->restrictions.sqe_flags_required)
6554 return false;
6555
6556 if (sqe_flags & ~(ctx->restrictions.sqe_flags_allowed |
6557 ctx->restrictions.sqe_flags_required))
6558 return false;
6559
6560 return true;
6561}
6562
Pavel Begunkovef4ff582020-04-12 02:05:05 +03006563static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
Pavel Begunkov258b29a2021-02-10 00:03:10 +00006564 const struct io_uring_sqe *sqe)
Pavel Begunkov282cdc82021-08-09 13:04:10 +01006565 __must_hold(&ctx->uring_lock)
Pavel Begunkov0553b8b2020-04-08 08:58:45 +03006566{
Pavel Begunkov258b29a2021-02-10 00:03:10 +00006567 struct io_submit_state *state;
Pavel Begunkovef4ff582020-04-12 02:05:05 +03006568 unsigned int sqe_flags;
Jens Axboe003e8dc2021-03-06 09:22:27 -07006569 int personality, ret = 0;
Pavel Begunkovef4ff582020-04-12 02:05:05 +03006570
Pavel Begunkov864ea922021-08-09 13:04:08 +01006571 /* req is partially pre-initialised, see io_preinit_req() */
Pavel Begunkov0553b8b2020-04-08 08:58:45 +03006572 req->opcode = READ_ONCE(sqe->opcode);
Pavel Begunkov5be9ad12021-02-12 18:41:17 +00006573 /* same numerical values with corresponding REQ_F_*, safe to copy */
6574 req->flags = sqe_flags = READ_ONCE(sqe->flags);
Pavel Begunkov0553b8b2020-04-08 08:58:45 +03006575 req->user_data = READ_ONCE(sqe->user_data);
Pavel Begunkov0553b8b2020-04-08 08:58:45 +03006576 req->file = NULL;
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00006577 req->fixed_rsrc_refs = NULL;
Pavel Begunkov0553b8b2020-04-08 08:58:45 +03006578 /* one is dropped after submission, the other at completion */
Jens Axboeabc54d62021-02-24 13:32:30 -07006579 atomic_set(&req->refs, 2);
Pavel Begunkov4dd28242020-06-15 10:33:13 +03006580 req->task = current;
Pavel Begunkovef4ff582020-04-12 02:05:05 +03006581
Pavel Begunkov5be9ad12021-02-12 18:41:17 +00006582 /* enforce forwards compatibility on users */
Pavel Begunkovdddca222021-04-27 16:13:52 +01006583 if (unlikely(sqe_flags & ~SQE_VALID_FLAGS))
Pavel Begunkov5be9ad12021-02-12 18:41:17 +00006584 return -EINVAL;
Pavel Begunkovef4ff582020-04-12 02:05:05 +03006585 if (unlikely(req->opcode >= IORING_OP_LAST))
6586 return -EINVAL;
Pavel Begunkov4cfb25b2021-06-26 21:40:47 +01006587 if (!io_check_restriction(ctx, req, sqe_flags))
Stefano Garzarella21b55db2020-08-27 16:58:30 +02006588 return -EACCES;
6589
Pavel Begunkovef4ff582020-04-12 02:05:05 +03006590 if ((sqe_flags & IOSQE_BUFFER_SELECT) &&
6591 !io_op_defs[req->opcode].buffer_select)
6592 return -EOPNOTSUPP;
Pavel Begunkov3c199662021-06-15 16:47:57 +01006593 if (unlikely(sqe_flags & IOSQE_IO_DRAIN))
6594 ctx->drain_active = true;
Pavel Begunkovef4ff582020-04-12 02:05:05 +03006595
Jens Axboe003e8dc2021-03-06 09:22:27 -07006596 personality = READ_ONCE(sqe->personality);
6597 if (personality) {
Pavel Begunkovc10d1f92021-06-17 18:14:01 +01006598 req->creds = xa_load(&ctx->personalities, personality);
6599 if (!req->creds)
Jens Axboe003e8dc2021-03-06 09:22:27 -07006600 return -EINVAL;
Pavel Begunkovc10d1f92021-06-17 18:14:01 +01006601 get_cred(req->creds);
Pavel Begunkovb8e64b52021-06-17 18:14:02 +01006602 req->flags |= REQ_F_CREDS;
Jens Axboe003e8dc2021-03-06 09:22:27 -07006603 }
Pavel Begunkov258b29a2021-02-10 00:03:10 +00006604 state = &ctx->submit_state;
Pavel Begunkovef4ff582020-04-12 02:05:05 +03006605
Jens Axboe27926b62020-10-28 09:33:23 -06006606 /*
6607 * Plug now if we have more than 1 IO left after this, and the target
6608 * is potentially a read/write to block based storage.
6609 */
6610 if (!state->plug_started && state->ios_left > 1 &&
6611 io_op_defs[req->opcode].plug) {
6612 blk_start_plug(&state->plug);
6613 state->plug_started = true;
6614 }
Jens Axboe63ff8222020-05-07 14:56:15 -06006615
Pavel Begunkovbd5bbda2020-11-20 15:50:51 +00006616 if (io_op_defs[req->opcode].needs_file) {
Pavel Begunkovac177052021-08-09 13:04:02 +01006617 req->file = io_file_get(ctx, state, req, READ_ONCE(sqe->fd),
6618 (sqe_flags & IOSQE_FIXED_FILE));
Pavel Begunkovba13e232021-02-01 18:59:52 +00006619 if (unlikely(!req->file))
Pavel Begunkovbd5bbda2020-11-20 15:50:51 +00006620 ret = -EBADF;
6621 }
6622
Pavel Begunkov71b547c2020-10-10 18:34:09 +01006623 state->ios_left--;
6624 return ret;
Pavel Begunkov0553b8b2020-04-08 08:58:45 +03006625}
6626
Pavel Begunkova6b8cadc2021-02-18 18:29:41 +00006627static int io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
Pavel Begunkova1ab7b32021-02-18 18:29:42 +00006628 const struct io_uring_sqe *sqe)
Pavel Begunkov282cdc82021-08-09 13:04:10 +01006629 __must_hold(&ctx->uring_lock)
Jens Axboe6c271ce2019-01-10 11:22:30 -07006630{
Pavel Begunkova1ab7b32021-02-18 18:29:42 +00006631 struct io_submit_link *link = &ctx->submit_state.link;
Jens Axboe6c271ce2019-01-10 11:22:30 -07006632 int ret;
6633
Pavel Begunkova6b8cadc2021-02-18 18:29:41 +00006634 ret = io_init_req(ctx, req, sqe);
6635 if (unlikely(ret)) {
6636fail_req:
Pavel Begunkovde59bc12021-02-18 18:29:47 +00006637 if (link->head) {
6638 /* fail even hard links since we don't submit */
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01006639 req_set_fail(link->head);
Pavel Begunkovf41db2732021-02-28 22:35:12 +00006640 io_req_complete_failed(link->head, -ECANCELED);
Pavel Begunkovde59bc12021-02-18 18:29:47 +00006641 link->head = NULL;
6642 }
Pavel Begunkovf41db2732021-02-28 22:35:12 +00006643 io_req_complete_failed(req, ret);
Pavel Begunkova6b8cadc2021-02-18 18:29:41 +00006644 return ret;
6645 }
Pavel Begunkov441b8a72021-06-14 23:37:31 +01006646
Pavel Begunkovbe7053b2021-02-18 18:29:45 +00006647 ret = io_req_prep(req, sqe);
6648 if (unlikely(ret))
6649 goto fail_req;
Pavel Begunkova6b8cadc2021-02-18 18:29:41 +00006650
Pavel Begunkovbe7053b2021-02-18 18:29:45 +00006651 /* don't need @sqe from now on */
Olivier Langlois236daeae2021-05-31 02:36:37 -04006652 trace_io_uring_submit_sqe(ctx, req, req->opcode, req->user_data,
6653 req->flags, true,
6654 ctx->flags & IORING_SETUP_SQPOLL);
Pavel Begunkova6b8cadc2021-02-18 18:29:41 +00006655
Jens Axboe6c271ce2019-01-10 11:22:30 -07006656 /*
6657 * If we already have a head request, queue this one for async
6658 * submittal once the head completes. If we don't have a head but
6659 * IOSQE_IO_LINK is set in the sqe, start a new head. This one will be
6660 * submitted sync once the chain is complete. If none of those
6661 * conditions are true (normal request), then just queue it.
6662 */
6663 if (link->head) {
6664 struct io_kiocb *head = link->head;
6665
Pavel Begunkovb7e298d2021-02-28 22:35:19 +00006666 ret = io_req_prep_async(req);
Pavel Begunkovcf109602021-02-18 18:29:43 +00006667 if (unlikely(ret))
Pavel Begunkova6b8cadc2021-02-18 18:29:41 +00006668 goto fail_req;
Jens Axboe6c271ce2019-01-10 11:22:30 -07006669 trace_io_uring_link(ctx, req, head);
6670 link->last->link = req;
6671 link->last = req;
6672
6673 /* last request of a link, enqueue the link */
6674 if (!(req->flags & (REQ_F_LINK | REQ_F_HARDLINK))) {
6675 link->head = NULL;
Pavel Begunkov5e159202021-06-14 23:37:26 +01006676 io_queue_sqe(head);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006677 }
Jackie Liu4fe2c962019-09-09 20:50:40 +08006678 } else {
Jens Axboe2b188cc2019-01-07 10:46:33 -07006679 if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) {
Jackie Liu4fe2c962019-09-09 20:50:40 +08006680 link->head = req;
6681 link->last = req;
6682 } else {
Pavel Begunkovbe7053b2021-02-18 18:29:45 +00006683 io_queue_sqe(req);
Jackie Liu4fe2c962019-09-09 20:50:40 +08006684 }
6685 }
6686
6687 return 0;
6688}
6689
6690/*
6691 * Batched submission is done, ensure local IO is flushed out.
6692 */
6693static void io_submit_state_end(struct io_submit_state *state,
6694 struct io_ring_ctx *ctx)
Pavel Begunkov1b4a51b2019-11-21 11:54:28 +03006695{
Pavel Begunkova1ab7b32021-02-18 18:29:42 +00006696 if (state->link.head)
Pavel Begunkovde59bc12021-02-18 18:29:47 +00006697 io_queue_sqe(state->link.head);
Jens Axboe3529d8c2019-12-19 18:24:38 -07006698 if (state->comp.nr)
Pavel Begunkov2a2758f2021-06-17 18:14:00 +01006699 io_submit_flush_completions(ctx);
Jackie Liua197f662019-11-08 08:09:12 -07006700 if (state->plug_started)
Pavel Begunkov32fe5252019-12-17 22:26:58 +03006701 blk_finish_plug(&state->plug);
Jens Axboe75c6a032020-01-28 10:15:23 -07006702 io_state_file_put(state);
Jens Axboe9e645e112019-05-10 16:07:28 -06006703}
Pavel Begunkov32fe5252019-12-17 22:26:58 +03006704
Jens Axboe9e645e112019-05-10 16:07:28 -06006705/*
6706 * Start submission side cache.
Pavel Begunkov32fe5252019-12-17 22:26:58 +03006707 */
Jens Axboe9e645e112019-05-10 16:07:28 -06006708static void io_submit_state_start(struct io_submit_state *state,
Pavel Begunkov196be952019-11-07 01:41:06 +03006709 unsigned int max_ios)
Jens Axboe9e645e112019-05-10 16:07:28 -06006710{
6711 state->plug_started = false;
Jens Axboebcda7ba2020-02-23 16:42:51 -07006712 state->ios_left = max_ios;
Pavel Begunkova1ab7b32021-02-18 18:29:42 +00006713 /* set only head, no need to init link_last in advance */
6714 state->link.head = NULL;
Jens Axboe75c6a032020-01-28 10:15:23 -07006715}
6716
Jens Axboe193155c2020-02-22 23:22:19 -07006717static void io_commit_sqring(struct io_ring_ctx *ctx)
6718{
Jens Axboe75c6a032020-01-28 10:15:23 -07006719 struct io_rings *rings = ctx->rings;
6720
6721 /*
Jens Axboe193155c2020-02-22 23:22:19 -07006722 * Ensure any loads from the SQEs are done at this point,
Jens Axboe75c6a032020-01-28 10:15:23 -07006723 * since once we write the new head, the application could
6724 * write new data to them.
Pavel Begunkov6b47ee62020-01-18 20:22:41 +03006725 */
Pavel Begunkov8da11c12020-02-24 11:32:44 +03006726 smp_store_release(&rings->sq.head, ctx->cached_sq_head);
Jens Axboebcda7ba2020-02-23 16:42:51 -07006727}
6728
Jens Axboe9e645e112019-05-10 16:07:28 -06006729/*
Fam Zhengdd9ae8a2021-06-04 17:42:56 +01006730 * Fetch an sqe, if one is available. Note this returns a pointer to memory
Jens Axboe9e645e112019-05-10 16:07:28 -06006731 * that is mapped by userspace. This means that care needs to be taken to
6732 * ensure that reads are stable, as we cannot rely on userspace always
Jens Axboe78e19bb2019-11-06 15:21:34 -07006733 * being a good citizen. If members of the sqe are validated and then later
6734 * used, it's important that those reads are done through READ_ONCE() to
Pavel Begunkov2e6e1fd2019-12-05 16:15:45 +03006735 * prevent a re-load down the line.
Jens Axboe9e645e112019-05-10 16:07:28 -06006736 */
6737static const struct io_uring_sqe *io_get_sqe(struct io_ring_ctx *ctx)
Jens Axboe9e645e112019-05-10 16:07:28 -06006738{
Pavel Begunkovea5ab3b2021-05-16 22:58:09 +01006739 unsigned head, mask = ctx->sq_entries - 1;
Pavel Begunkov17d3aeb2021-06-14 23:37:23 +01006740 unsigned sq_idx = ctx->cached_sq_head++ & mask;
Jens Axboe9e645e112019-05-10 16:07:28 -06006741
6742 /*
6743 * The cached sq head (or cq tail) serves two purposes:
6744 *
6745 * 1) allows us to batch the cost of updating the user visible
Pavel Begunkov9d763772019-12-17 02:22:07 +03006746 * head updates.
Jens Axboe9e645e112019-05-10 16:07:28 -06006747 * 2) allows the kernel side to track the head on its own, even
Pavel Begunkov8cdf2192020-01-25 00:40:24 +03006748 * though the application is the one updating it.
6749 */
Pavel Begunkov17d3aeb2021-06-14 23:37:23 +01006750 head = READ_ONCE(ctx->sq_array[sq_idx]);
Pavel Begunkov8cdf2192020-01-25 00:40:24 +03006751 if (likely(head < ctx->sq_entries))
6752 return &ctx->sq_sqes[head];
6753
6754 /* drop invalid entries */
Pavel Begunkov15641e42021-06-14 23:37:24 +01006755 ctx->cq_extra--;
6756 WRITE_ONCE(ctx->rings->sq_dropped,
6757 READ_ONCE(ctx->rings->sq_dropped) + 1);
Pavel Begunkov711be032020-01-17 03:57:59 +03006758 return NULL;
6759}
Jens Axboeb7bb4f72019-12-15 22:13:43 -07006760
Jens Axboe0f212202020-09-13 13:09:39 -06006761static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr)
Pavel Begunkov282cdc82021-08-09 13:04:10 +01006762 __must_hold(&ctx->uring_lock)
Jens Axboe6c271ce2019-01-10 11:22:30 -07006763{
Pavel Begunkov09899b12021-06-14 02:36:22 +01006764 struct io_uring_task *tctx;
Pavel Begunkov46c4e162021-02-18 18:29:37 +00006765 int submitted = 0;
Jens Axboe6c271ce2019-01-10 11:22:30 -07006766
Pavel Begunkovee7d46d2019-12-30 21:24:45 +03006767 /* make sure SQ entry isn't read before tail */
6768 nr = min3(nr, ctx->sq_entries, io_sqring_entries(ctx));
Pavel Begunkov2b85edf2019-12-28 14:13:03 +03006769 if (!percpu_ref_tryget_many(&ctx->refs, nr))
6770 return -EAGAIN;
Jens Axboe6c271ce2019-01-10 11:22:30 -07006771
Pavel Begunkov09899b12021-06-14 02:36:22 +01006772 tctx = current->io_uring;
6773 tctx->cached_refs -= nr;
6774 if (unlikely(tctx->cached_refs < 0)) {
6775 unsigned int refill = -tctx->cached_refs + IO_TCTX_REFS_CACHE_NR;
6776
6777 percpu_counter_add(&tctx->inflight, refill);
6778 refcount_add(refill, &current->usage);
6779 tctx->cached_refs += refill;
6780 }
Pavel Begunkovba88ff12021-02-10 00:03:11 +00006781 io_submit_state_start(&ctx->submit_state, nr);
Pavel Begunkovb14cca02020-01-17 04:45:59 +03006782
Pavel Begunkov46c4e162021-02-18 18:29:37 +00006783 while (submitted < nr) {
Jens Axboe3529d8c2019-12-19 18:24:38 -07006784 const struct io_uring_sqe *sqe;
Pavel Begunkov196be952019-11-07 01:41:06 +03006785 struct io_kiocb *req;
Pavel Begunkovfb5ccc92019-10-25 12:31:30 +03006786
Pavel Begunkov258b29a2021-02-10 00:03:10 +00006787 req = io_alloc_req(ctx);
Pavel Begunkov196be952019-11-07 01:41:06 +03006788 if (unlikely(!req)) {
6789 if (!submitted)
6790 submitted = -EAGAIN;
Pavel Begunkovfb5ccc92019-10-25 12:31:30 +03006791 break;
Jens Axboe9e645e112019-05-10 16:07:28 -06006792 }
Pavel Begunkov4fccfcb2021-02-12 11:55:17 +00006793 sqe = io_get_sqe(ctx);
6794 if (unlikely(!sqe)) {
6795 kmem_cache_free(req_cachep, req);
6796 break;
6797 }
Jens Axboed3656342019-12-18 09:50:26 -07006798 /* will complete beyond this point, count as submitted */
6799 submitted++;
Pavel Begunkova1ab7b32021-02-18 18:29:42 +00006800 if (io_submit_sqe(ctx, req, sqe))
Jens Axboed3656342019-12-18 09:50:26 -07006801 break;
Jens Axboe6c271ce2019-01-10 11:22:30 -07006802 }
6803
Pavel Begunkov9466f432020-01-25 22:34:01 +03006804 if (unlikely(submitted != nr)) {
6805 int ref_used = (submitted == -EAGAIN) ? 0 : submitted;
Jens Axboed8a6df12020-10-15 16:24:45 -06006806 int unused = nr - ref_used;
Pavel Begunkov9466f432020-01-25 22:34:01 +03006807
Pavel Begunkov09899b12021-06-14 02:36:22 +01006808 current->io_uring->cached_refs += unused;
Jens Axboed8a6df12020-10-15 16:24:45 -06006809 percpu_ref_put_many(&ctx->refs, unused);
Pavel Begunkov9466f432020-01-25 22:34:01 +03006810 }
Jens Axboe6c271ce2019-01-10 11:22:30 -07006811
Pavel Begunkova1ab7b32021-02-18 18:29:42 +00006812 io_submit_state_end(&ctx->submit_state, ctx);
Pavel Begunkovae9428c2019-11-06 00:22:14 +03006813 /* Commit SQ ring head once we've consumed and submitted all SQEs */
6814 io_commit_sqring(ctx);
6815
Jens Axboe6c271ce2019-01-10 11:22:30 -07006816 return submitted;
6817}
6818
Pavel Begunkove4b6d902021-05-16 22:58:00 +01006819static inline bool io_sqd_events_pending(struct io_sq_data *sqd)
6820{
6821 return READ_ONCE(sqd->state);
6822}
6823
Xiaoguang Wang23b36282020-07-23 20:57:24 +08006824static inline void io_ring_set_wakeup_flag(struct io_ring_ctx *ctx)
6825{
6826 /* Tell userspace we may need a wakeup call */
6827 spin_lock_irq(&ctx->completion_lock);
Nadav Amit20c0b382021-08-07 17:13:42 -07006828 WRITE_ONCE(ctx->rings->sq_flags,
6829 ctx->rings->sq_flags | IORING_SQ_NEED_WAKEUP);
Xiaoguang Wang23b36282020-07-23 20:57:24 +08006830 spin_unlock_irq(&ctx->completion_lock);
6831}
6832
6833static inline void io_ring_clear_wakeup_flag(struct io_ring_ctx *ctx)
6834{
6835 spin_lock_irq(&ctx->completion_lock);
Nadav Amit20c0b382021-08-07 17:13:42 -07006836 WRITE_ONCE(ctx->rings->sq_flags,
6837 ctx->rings->sq_flags & ~IORING_SQ_NEED_WAKEUP);
Xiaoguang Wang23b36282020-07-23 20:57:24 +08006838 spin_unlock_irq(&ctx->completion_lock);
6839}
6840
Xiaoguang Wang08369242020-11-03 14:15:59 +08006841static int __io_sq_thread(struct io_ring_ctx *ctx, bool cap_entries)
Jens Axboe6c271ce2019-01-10 11:22:30 -07006842{
Jens Axboec8d1ba52020-09-14 11:07:26 -06006843 unsigned int to_submit;
Xiaoguang Wangbdcd3ea2020-02-25 22:12:08 +08006844 int ret = 0;
Jens Axboe6c271ce2019-01-10 11:22:30 -07006845
Jens Axboec8d1ba52020-09-14 11:07:26 -06006846 to_submit = io_sqring_entries(ctx);
Jens Axboee95eee22020-09-08 09:11:32 -06006847 /* if we're handling multiple rings, cap submit size for fairness */
Olivier Langlois4ce8ad92021-06-23 11:50:18 -07006848 if (cap_entries && to_submit > IORING_SQPOLL_CAP_ENTRIES_VALUE)
6849 to_submit = IORING_SQPOLL_CAP_ENTRIES_VALUE;
Jens Axboee95eee22020-09-08 09:11:32 -06006850
Xiaoguang Wang906a3c62020-11-12 14:56:00 +08006851 if (!list_empty(&ctx->iopoll_list) || to_submit) {
6852 unsigned nr_events = 0;
Pavel Begunkov948e1942021-06-24 15:09:55 +01006853 const struct cred *creds = NULL;
6854
6855 if (ctx->sq_creds != current_cred())
6856 creds = override_creds(ctx->sq_creds);
Xiaoguang Wang906a3c62020-11-12 14:56:00 +08006857
Xiaoguang Wang08369242020-11-03 14:15:59 +08006858 mutex_lock(&ctx->uring_lock);
Xiaoguang Wang906a3c62020-11-12 14:56:00 +08006859 if (!list_empty(&ctx->iopoll_list))
Jens Axboe3c30ef02021-07-23 11:49:29 -06006860 io_do_iopoll(ctx, &nr_events, 0, true);
Xiaoguang Wang906a3c62020-11-12 14:56:00 +08006861
Pavel Begunkov3b763ba2021-04-18 14:52:08 +01006862 /*
6863 * Don't submit if refs are dying, good for io_uring_register(),
6864 * but also it is relied upon by io_ring_exit_work()
6865 */
Pavel Begunkov0298ef92021-03-08 13:20:57 +00006866 if (to_submit && likely(!percpu_ref_is_dying(&ctx->refs)) &&
6867 !(ctx->flags & IORING_SETUP_R_DISABLED))
Xiaoguang Wang08369242020-11-03 14:15:59 +08006868 ret = io_submit_sqes(ctx, to_submit);
6869 mutex_unlock(&ctx->uring_lock);
Jens Axboe90554202020-09-03 12:12:41 -06006870
Pavel Begunkovacfb3812021-05-16 22:58:03 +01006871 if (to_submit && wq_has_sleeper(&ctx->sqo_sq_wait))
6872 wake_up(&ctx->sqo_sq_wait);
Pavel Begunkov948e1942021-06-24 15:09:55 +01006873 if (creds)
6874 revert_creds(creds);
Pavel Begunkovacfb3812021-05-16 22:58:03 +01006875 }
Jens Axboe90554202020-09-03 12:12:41 -06006876
Xiaoguang Wang08369242020-11-03 14:15:59 +08006877 return ret;
6878}
6879
6880static void io_sqd_update_thread_idle(struct io_sq_data *sqd)
6881{
6882 struct io_ring_ctx *ctx;
6883 unsigned sq_thread_idle = 0;
6884
Pavel Begunkovc9dca272021-03-10 13:13:55 +00006885 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
6886 sq_thread_idle = max(sq_thread_idle, ctx->sq_thread_idle);
Xiaoguang Wang08369242020-11-03 14:15:59 +08006887 sqd->sq_thread_idle = sq_thread_idle;
Jens Axboec8d1ba52020-09-14 11:07:26 -06006888}
6889
Pavel Begunkove4b6d902021-05-16 22:58:00 +01006890static bool io_sqd_handle_event(struct io_sq_data *sqd)
6891{
6892 bool did_sig = false;
6893 struct ksignal ksig;
6894
6895 if (test_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state) ||
6896 signal_pending(current)) {
6897 mutex_unlock(&sqd->lock);
6898 if (signal_pending(current))
6899 did_sig = get_signal(&ksig);
6900 cond_resched();
6901 mutex_lock(&sqd->lock);
6902 }
Pavel Begunkove4b6d902021-05-16 22:58:00 +01006903 return did_sig || test_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state);
6904}
6905
Jens Axboe6c271ce2019-01-10 11:22:30 -07006906static int io_sq_thread(void *data)
6907{
Jens Axboe69fb2132020-09-14 11:16:23 -06006908 struct io_sq_data *sqd = data;
6909 struct io_ring_ctx *ctx;
Xiaoguang Wanga0d92052020-11-12 14:55:59 +08006910 unsigned long timeout = 0;
Jens Axboe37d1e2e2021-02-17 21:03:43 -07006911 char buf[TASK_COMM_LEN];
Xiaoguang Wang08369242020-11-03 14:15:59 +08006912 DEFINE_WAIT(wait);
Jens Axboe6c271ce2019-01-10 11:22:30 -07006913
Pavel Begunkov696ee882021-04-01 09:55:04 +01006914 snprintf(buf, sizeof(buf), "iou-sqp-%d", sqd->task_pid);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07006915 set_task_comm(current, buf);
Jens Axboe28cea78a2020-09-14 10:51:17 -06006916
Jens Axboe37d1e2e2021-02-17 21:03:43 -07006917 if (sqd->sq_cpu != -1)
6918 set_cpus_allowed_ptr(current, cpumask_of(sqd->sq_cpu));
6919 else
6920 set_cpus_allowed_ptr(current, cpu_online_mask);
6921 current->flags |= PF_NO_SETAFFINITY;
6922
Pavel Begunkov09a6f4e2021-03-14 20:57:10 +00006923 mutex_lock(&sqd->lock);
Pavel Begunkove4b6d902021-05-16 22:58:00 +01006924 while (1) {
Pavel Begunkov1a924a82021-06-24 15:09:56 +01006925 bool cap_entries, sqt_spin = false;
Jens Axboec1edbf52019-11-10 16:56:04 -07006926
Pavel Begunkove4b6d902021-05-16 22:58:00 +01006927 if (io_sqd_events_pending(sqd) || signal_pending(current)) {
6928 if (io_sqd_handle_event(sqd))
Pavel Begunkovc7d95612021-04-13 11:43:00 +01006929 break;
Xiaoguang Wang08369242020-11-03 14:15:59 +08006930 timeout = jiffies + sqd->sq_thread_idle;
6931 }
Pavel Begunkove4b6d902021-05-16 22:58:00 +01006932
Jens Axboee95eee22020-09-08 09:11:32 -06006933 cap_entries = !list_is_singular(&sqd->ctx_list);
Jens Axboe69fb2132020-09-14 11:16:23 -06006934 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) {
Pavel Begunkov948e1942021-06-24 15:09:55 +01006935 int ret = __io_sq_thread(ctx, cap_entries);
Stefan Metzmacher7c30f36a2021-03-07 11:54:28 +01006936
Xiaoguang Wang08369242020-11-03 14:15:59 +08006937 if (!sqt_spin && (ret > 0 || !list_empty(&ctx->iopoll_list)))
6938 sqt_spin = true;
Jens Axboe6c271ce2019-01-10 11:22:30 -07006939 }
Pavel Begunkovdd432ea52021-06-26 21:40:45 +01006940 if (io_run_task_work())
6941 sqt_spin = true;
Jens Axboe6c271ce2019-01-10 11:22:30 -07006942
Xiaoguang Wang08369242020-11-03 14:15:59 +08006943 if (sqt_spin || !time_after(jiffies, timeout)) {
Jens Axboec8d1ba52020-09-14 11:07:26 -06006944 cond_resched();
Xiaoguang Wang08369242020-11-03 14:15:59 +08006945 if (sqt_spin)
6946 timeout = jiffies + sqd->sq_thread_idle;
6947 continue;
6948 }
6949
Xiaoguang Wang08369242020-11-03 14:15:59 +08006950 prepare_to_wait(&sqd->wait, &wait, TASK_INTERRUPTIBLE);
Pavel Begunkovdd432ea52021-06-26 21:40:45 +01006951 if (!io_sqd_events_pending(sqd) && !current->task_works) {
Pavel Begunkov1a924a82021-06-24 15:09:56 +01006952 bool needs_sched = true;
6953
Hao Xu724cb4f2021-04-21 23:19:11 +08006954 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) {
Pavel Begunkovaaa9f0f2021-05-16 22:58:01 +01006955 io_ring_set_wakeup_flag(ctx);
6956
Hao Xu724cb4f2021-04-21 23:19:11 +08006957 if ((ctx->flags & IORING_SETUP_IOPOLL) &&
6958 !list_empty_careful(&ctx->iopoll_list)) {
6959 needs_sched = false;
6960 break;
6961 }
6962 if (io_sqring_entries(ctx)) {
6963 needs_sched = false;
6964 break;
6965 }
6966 }
6967
6968 if (needs_sched) {
6969 mutex_unlock(&sqd->lock);
6970 schedule();
6971 mutex_lock(&sqd->lock);
6972 }
Jens Axboe69fb2132020-09-14 11:16:23 -06006973 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
6974 io_ring_clear_wakeup_flag(ctx);
Jens Axboe6c271ce2019-01-10 11:22:30 -07006975 }
Xiaoguang Wang08369242020-11-03 14:15:59 +08006976
6977 finish_wait(&sqd->wait, &wait);
6978 timeout = jiffies + sqd->sq_thread_idle;
Jens Axboe6c271ce2019-01-10 11:22:30 -07006979 }
6980
Pavel Begunkov78cc6872021-06-14 02:36:23 +01006981 io_uring_cancel_generic(true, sqd);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07006982 sqd->thread = NULL;
Jens Axboe05962f92021-03-06 13:58:48 -07006983 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
Jens Axboe5f3f26f2021-02-25 10:17:46 -07006984 io_ring_set_wakeup_flag(ctx);
Pavel Begunkov521d6a72021-03-11 23:29:38 +00006985 io_run_task_work();
Pavel Begunkov734551d2021-04-18 14:52:09 +01006986 mutex_unlock(&sqd->lock);
6987
Jens Axboe37d1e2e2021-02-17 21:03:43 -07006988 complete(&sqd->exited);
6989 do_exit(0);
Jens Axboe6c271ce2019-01-10 11:22:30 -07006990}
6991
Jens Axboebda52162019-09-24 13:47:15 -06006992struct io_wait_queue {
6993 struct wait_queue_entry wq;
6994 struct io_ring_ctx *ctx;
Jens Axboe5fd46172021-08-06 14:04:31 -06006995 unsigned cq_tail;
Jens Axboebda52162019-09-24 13:47:15 -06006996 unsigned nr_timeouts;
6997};
6998
Pavel Begunkov6c503152021-01-04 20:36:36 +00006999static inline bool io_should_wake(struct io_wait_queue *iowq)
Jens Axboebda52162019-09-24 13:47:15 -06007000{
7001 struct io_ring_ctx *ctx = iowq->ctx;
Jens Axboe5fd46172021-08-06 14:04:31 -06007002 int dist = ctx->cached_cq_tail - (int) iowq->cq_tail;
Jens Axboebda52162019-09-24 13:47:15 -06007003
7004 /*
Brian Gianforcarod195a662019-12-13 03:09:50 -08007005 * Wake up if we have enough events, or if a timeout occurred since we
Jens Axboebda52162019-09-24 13:47:15 -06007006 * started waiting. For timeouts, we always want to return to userspace,
7007 * regardless of event count.
7008 */
Jens Axboe5fd46172021-08-06 14:04:31 -06007009 return dist >= 0 || atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts;
Jens Axboebda52162019-09-24 13:47:15 -06007010}
7011
7012static int io_wake_function(struct wait_queue_entry *curr, unsigned int mode,
7013 int wake_flags, void *key)
7014{
7015 struct io_wait_queue *iowq = container_of(curr, struct io_wait_queue,
7016 wq);
7017
Pavel Begunkov6c503152021-01-04 20:36:36 +00007018 /*
7019 * Cannot safely flush overflowed CQEs from here, ensure we wake up
7020 * the task, and the next invocation will do it.
7021 */
Pavel Begunkov5ed7a372021-06-14 23:37:27 +01007022 if (io_should_wake(iowq) || test_bit(0, &iowq->ctx->check_cq_overflow))
Pavel Begunkov6c503152021-01-04 20:36:36 +00007023 return autoremove_wake_function(curr, mode, wake_flags, key);
7024 return -1;
Jens Axboebda52162019-09-24 13:47:15 -06007025}
7026
Jens Axboeaf9c1a42020-09-24 13:32:18 -06007027static int io_run_task_work_sig(void)
7028{
7029 if (io_run_task_work())
7030 return 1;
7031 if (!signal_pending(current))
7032 return 0;
Jens Axboe0b8cfa92021-03-21 14:16:08 -06007033 if (test_thread_flag(TIF_NOTIFY_SIGNAL))
Jens Axboe792ee0f62020-10-22 20:17:18 -06007034 return -ERESTARTSYS;
Jens Axboeaf9c1a42020-09-24 13:32:18 -06007035 return -EINTR;
7036}
7037
Pavel Begunkoveeb60b92021-02-04 13:51:58 +00007038/* when returns >0, the caller should retry */
7039static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
7040 struct io_wait_queue *iowq,
7041 signed long *timeout)
7042{
7043 int ret;
7044
7045 /* make sure we run task_work before checking for signals */
7046 ret = io_run_task_work_sig();
7047 if (ret || io_should_wake(iowq))
7048 return ret;
7049 /* let the caller flush overflows, retry */
Pavel Begunkov5ed7a372021-06-14 23:37:27 +01007050 if (test_bit(0, &ctx->check_cq_overflow))
Pavel Begunkoveeb60b92021-02-04 13:51:58 +00007051 return 1;
7052
7053 *timeout = schedule_timeout(*timeout);
7054 return !*timeout ? -ETIME : 1;
7055}
7056
Jens Axboe2b188cc2019-01-07 10:46:33 -07007057/*
7058 * Wait until events become available, if we don't already have some. The
7059 * application must reap them itself, as they reside on the shared cq ring.
7060 */
7061static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
Hao Xuc73ebb62020-11-03 10:54:37 +08007062 const sigset_t __user *sig, size_t sigsz,
7063 struct __kernel_timespec __user *uts)
Jens Axboe2b188cc2019-01-07 10:46:33 -07007064{
Pavel Begunkov902910992021-08-09 09:07:32 -06007065 struct io_wait_queue iowq;
Hristo Venev75b28af2019-08-26 17:23:46 +00007066 struct io_rings *rings = ctx->rings;
Pavel Begunkovc1d5a222021-02-04 13:51:57 +00007067 signed long timeout = MAX_SCHEDULE_TIMEOUT;
7068 int ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07007069
Jens Axboeb41e9852020-02-17 09:52:41 -07007070 do {
Pavel Begunkov6c2450a2021-02-23 12:40:22 +00007071 io_cqring_overflow_flush(ctx, false);
Pavel Begunkov6c503152021-01-04 20:36:36 +00007072 if (io_cqring_events(ctx) >= min_events)
Jens Axboeb41e9852020-02-17 09:52:41 -07007073 return 0;
Jens Axboe4c6e2772020-07-01 11:29:10 -06007074 if (!io_run_task_work())
Jens Axboeb41e9852020-02-17 09:52:41 -07007075 break;
Jens Axboeb41e9852020-02-17 09:52:41 -07007076 } while (1);
Jens Axboe2b188cc2019-01-07 10:46:33 -07007077
7078 if (sig) {
Arnd Bergmann9e75ad52019-03-25 15:34:53 +01007079#ifdef CONFIG_COMPAT
7080 if (in_compat_syscall())
7081 ret = set_compat_user_sigmask((const compat_sigset_t __user *)sig,
Oleg Nesterovb7724342019-07-16 16:29:53 -07007082 sigsz);
Arnd Bergmann9e75ad52019-03-25 15:34:53 +01007083 else
7084#endif
Oleg Nesterovb7724342019-07-16 16:29:53 -07007085 ret = set_user_sigmask(sig, sigsz);
Arnd Bergmann9e75ad52019-03-25 15:34:53 +01007086
Jens Axboe2b188cc2019-01-07 10:46:33 -07007087 if (ret)
7088 return ret;
7089 }
7090
Hao Xuc73ebb62020-11-03 10:54:37 +08007091 if (uts) {
Pavel Begunkovc1d5a222021-02-04 13:51:57 +00007092 struct timespec64 ts;
7093
Hao Xuc73ebb62020-11-03 10:54:37 +08007094 if (get_timespec64(&ts, uts))
7095 return -EFAULT;
7096 timeout = timespec64_to_jiffies(&ts);
7097 }
7098
Pavel Begunkov902910992021-08-09 09:07:32 -06007099 init_waitqueue_func_entry(&iowq.wq, io_wake_function);
7100 iowq.wq.private = current;
7101 INIT_LIST_HEAD(&iowq.wq.entry);
7102 iowq.ctx = ctx;
Jens Axboebda52162019-09-24 13:47:15 -06007103 iowq.nr_timeouts = atomic_read(&ctx->cq_timeouts);
Jens Axboe5fd46172021-08-06 14:04:31 -06007104 iowq.cq_tail = READ_ONCE(ctx->rings->cq.head) + min_events;
Pavel Begunkov902910992021-08-09 09:07:32 -06007105
Dmitrii Dolgovc826bd72019-10-15 19:02:01 +02007106 trace_io_uring_cqring_wait(ctx, min_events);
Jens Axboebda52162019-09-24 13:47:15 -06007107 do {
Jens Axboeca0a2652021-03-04 17:15:48 -07007108 /* if we can't even flush overflow, don't wait for more */
Pavel Begunkov6c2450a2021-02-23 12:40:22 +00007109 if (!io_cqring_overflow_flush(ctx, false)) {
Jens Axboeca0a2652021-03-04 17:15:48 -07007110 ret = -EBUSY;
7111 break;
7112 }
Pavel Begunkov311997b2021-06-14 23:37:28 +01007113 prepare_to_wait_exclusive(&ctx->cq_wait, &iowq.wq,
Jens Axboebda52162019-09-24 13:47:15 -06007114 TASK_INTERRUPTIBLE);
Pavel Begunkoveeb60b92021-02-04 13:51:58 +00007115 ret = io_cqring_wait_schedule(ctx, &iowq, &timeout);
Pavel Begunkov311997b2021-06-14 23:37:28 +01007116 finish_wait(&ctx->cq_wait, &iowq.wq);
Jens Axboeca0a2652021-03-04 17:15:48 -07007117 cond_resched();
Pavel Begunkoveeb60b92021-02-04 13:51:58 +00007118 } while (ret > 0);
Jens Axboebda52162019-09-24 13:47:15 -06007119
Jens Axboeb7db41c2020-07-04 08:55:50 -06007120 restore_saved_sigmask_unless(ret == -EINTR);
Jens Axboe2b188cc2019-01-07 10:46:33 -07007121
Hristo Venev75b28af2019-08-26 17:23:46 +00007122 return READ_ONCE(rings->cq.head) == READ_ONCE(rings->cq.tail) ? ret : 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07007123}
7124
Pavel Begunkov9123c8f2021-06-14 02:36:20 +01007125static void io_free_page_table(void **table, size_t size)
Pavel Begunkov846a4ef2021-04-01 15:44:03 +01007126{
Pavel Begunkov9123c8f2021-06-14 02:36:20 +01007127 unsigned i, nr_tables = DIV_ROUND_UP(size, PAGE_SIZE);
Pavel Begunkov846a4ef2021-04-01 15:44:03 +01007128
7129 for (i = 0; i < nr_tables; i++)
Pavel Begunkov9123c8f2021-06-14 02:36:20 +01007130 kfree(table[i]);
7131 kfree(table);
7132}
7133
7134static void **io_alloc_page_table(size_t size)
7135{
7136 unsigned i, nr_tables = DIV_ROUND_UP(size, PAGE_SIZE);
7137 size_t init_size = size;
7138 void **table;
7139
7140 table = kcalloc(nr_tables, sizeof(*table), GFP_KERNEL);
7141 if (!table)
7142 return NULL;
7143
7144 for (i = 0; i < nr_tables; i++) {
Pavel Begunkov27f6b312021-06-15 13:20:13 +01007145 unsigned int this_size = min_t(size_t, size, PAGE_SIZE);
Pavel Begunkov9123c8f2021-06-14 02:36:20 +01007146
7147 table[i] = kzalloc(this_size, GFP_KERNEL);
7148 if (!table[i]) {
7149 io_free_page_table(table, init_size);
7150 return NULL;
7151 }
7152 size -= this_size;
7153 }
7154 return table;
Pavel Begunkov846a4ef2021-04-01 15:44:03 +01007155}
7156
Pavel Begunkov28a9fe22021-04-01 15:43:47 +01007157static void io_rsrc_node_destroy(struct io_rsrc_node *ref_node)
7158{
7159 percpu_ref_exit(&ref_node->refs);
7160 kfree(ref_node);
7161}
7162
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01007163static void io_rsrc_node_switch(struct io_ring_ctx *ctx,
7164 struct io_rsrc_data *data_to_kill)
Pavel Begunkov1642b442020-12-30 21:34:14 +00007165{
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01007166 WARN_ON_ONCE(!ctx->rsrc_backup_node);
7167 WARN_ON_ONCE(data_to_kill && !ctx->rsrc_node);
Pavel Begunkov82fbcfa2021-04-01 15:43:43 +01007168
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01007169 if (data_to_kill) {
7170 struct io_rsrc_node *rsrc_node = ctx->rsrc_node;
Pavel Begunkov82fbcfa2021-04-01 15:43:43 +01007171
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01007172 rsrc_node->rsrc_data = data_to_kill;
Jens Axboe4956b9e2021-08-09 07:49:41 -06007173 spin_lock_irq(&ctx->rsrc_ref_lock);
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01007174 list_add_tail(&rsrc_node->node, &ctx->rsrc_ref_list);
Jens Axboe4956b9e2021-08-09 07:49:41 -06007175 spin_unlock_irq(&ctx->rsrc_ref_lock);
Pavel Begunkov82fbcfa2021-04-01 15:43:43 +01007176
Pavel Begunkov3e942492021-04-11 01:46:34 +01007177 atomic_inc(&data_to_kill->refs);
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01007178 percpu_ref_kill(&rsrc_node->refs);
7179 ctx->rsrc_node = NULL;
7180 }
7181
7182 if (!ctx->rsrc_node) {
7183 ctx->rsrc_node = ctx->rsrc_backup_node;
7184 ctx->rsrc_backup_node = NULL;
7185 }
Pavel Begunkov1642b442020-12-30 21:34:14 +00007186}
7187
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01007188static int io_rsrc_node_switch_start(struct io_ring_ctx *ctx)
Pavel Begunkov8dd03af2021-03-19 17:22:36 +00007189{
7190 if (ctx->rsrc_backup_node)
7191 return 0;
Pavel Begunkovb895c9a2021-04-01 15:43:40 +01007192 ctx->rsrc_backup_node = io_rsrc_node_alloc(ctx);
Pavel Begunkov8dd03af2021-03-19 17:22:36 +00007193 return ctx->rsrc_backup_node ? 0 : -ENOMEM;
7194}
7195
Pavel Begunkov40ae0ff2021-04-01 15:43:44 +01007196static int io_rsrc_ref_quiesce(struct io_rsrc_data *data, struct io_ring_ctx *ctx)
Hao Xu8bad28d2021-02-19 17:19:36 +08007197{
7198 int ret;
Xiaoguang Wang05589552020-03-31 14:05:18 +08007199
Pavel Begunkov215c3902021-04-01 15:43:48 +01007200 /* As we may drop ->uring_lock, other task may have started quiesce */
Hao Xu8bad28d2021-02-19 17:19:36 +08007201 if (data->quiesce)
7202 return -ENXIO;
7203
7204 data->quiesce = true;
Pavel Begunkov1ffc5422020-12-30 21:34:15 +00007205 do {
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01007206 ret = io_rsrc_node_switch_start(ctx);
Pavel Begunkov8dd03af2021-03-19 17:22:36 +00007207 if (ret)
Pavel Begunkovf2303b12021-02-20 18:03:49 +00007208 break;
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01007209 io_rsrc_node_switch(ctx, data);
7210
Pavel Begunkov3e942492021-04-11 01:46:34 +01007211 /* kill initial ref, already quiesced if zero */
7212 if (atomic_dec_and_test(&data->refs))
7213 break;
Jens Axboec018db42021-08-09 08:15:50 -06007214 mutex_unlock(&ctx->uring_lock);
Hao Xu8bad28d2021-02-19 17:19:36 +08007215 flush_delayed_work(&ctx->rsrc_put_work);
Pavel Begunkov1ffc5422020-12-30 21:34:15 +00007216 ret = wait_for_completion_interruptible(&data->done);
Jens Axboec018db42021-08-09 08:15:50 -06007217 if (!ret) {
7218 mutex_lock(&ctx->uring_lock);
Pavel Begunkov1ffc5422020-12-30 21:34:15 +00007219 break;
Jens Axboec018db42021-08-09 08:15:50 -06007220 }
Jens Axboe05f3fb32019-12-09 11:22:50 -07007221
Pavel Begunkov3e942492021-04-11 01:46:34 +01007222 atomic_inc(&data->refs);
7223 /* wait for all works potentially completing data->done */
7224 flush_delayed_work(&ctx->rsrc_put_work);
Jens Axboecb5e1b82021-02-25 07:37:35 -07007225 reinit_completion(&data->done);
Pavel Begunkov8dd03af2021-03-19 17:22:36 +00007226
Hao Xu8bad28d2021-02-19 17:19:36 +08007227 ret = io_run_task_work_sig();
7228 mutex_lock(&ctx->uring_lock);
Pavel Begunkovf2303b12021-02-20 18:03:49 +00007229 } while (ret >= 0);
Hao Xu8bad28d2021-02-19 17:19:36 +08007230 data->quiesce = false;
7231
Hao Xu8bad28d2021-02-19 17:19:36 +08007232 return ret;
Bijan Mottahedehd7954b22021-01-15 17:37:50 +00007233}
7234
Pavel Begunkov2d091d62021-06-14 02:36:21 +01007235static u64 *io_get_tag_slot(struct io_rsrc_data *data, unsigned int idx)
7236{
7237 unsigned int off = idx & IO_RSRC_TAG_TABLE_MASK;
7238 unsigned int table_idx = idx >> IO_RSRC_TAG_TABLE_SHIFT;
7239
7240 return &data->tags[table_idx][off];
7241}
7242
Pavel Begunkov44b31f22021-04-25 14:32:16 +01007243static void io_rsrc_data_free(struct io_rsrc_data *data)
7244{
Pavel Begunkov2d091d62021-06-14 02:36:21 +01007245 size_t size = data->nr * sizeof(data->tags[0][0]);
7246
7247 if (data->tags)
7248 io_free_page_table((void **)data->tags, size);
Pavel Begunkov44b31f22021-04-25 14:32:16 +01007249 kfree(data);
7250}
7251
Pavel Begunkovd878c812021-06-14 02:36:18 +01007252static int io_rsrc_data_alloc(struct io_ring_ctx *ctx, rsrc_put_fn *do_put,
7253 u64 __user *utags, unsigned nr,
7254 struct io_rsrc_data **pdata)
Bijan Mottahedeh1ad555c2021-01-15 17:37:51 +00007255{
Pavel Begunkovb895c9a2021-04-01 15:43:40 +01007256 struct io_rsrc_data *data;
Pavel Begunkov2d091d62021-06-14 02:36:21 +01007257 int ret = -ENOMEM;
Pavel Begunkovd878c812021-06-14 02:36:18 +01007258 unsigned i;
Bijan Mottahedeh1ad555c2021-01-15 17:37:51 +00007259
7260 data = kzalloc(sizeof(*data), GFP_KERNEL);
7261 if (!data)
Pavel Begunkovd878c812021-06-14 02:36:18 +01007262 return -ENOMEM;
Pavel Begunkov2d091d62021-06-14 02:36:21 +01007263 data->tags = (u64 **)io_alloc_page_table(nr * sizeof(data->tags[0][0]));
Pavel Begunkovb60c8dc2021-04-25 14:32:18 +01007264 if (!data->tags) {
7265 kfree(data);
Pavel Begunkovd878c812021-06-14 02:36:18 +01007266 return -ENOMEM;
7267 }
Pavel Begunkov2d091d62021-06-14 02:36:21 +01007268
7269 data->nr = nr;
7270 data->ctx = ctx;
7271 data->do_put = do_put;
Pavel Begunkovd878c812021-06-14 02:36:18 +01007272 if (utags) {
Pavel Begunkov2d091d62021-06-14 02:36:21 +01007273 ret = -EFAULT;
Pavel Begunkovd878c812021-06-14 02:36:18 +01007274 for (i = 0; i < nr; i++) {
Colin Ian Kingfdd1dc32021-06-15 14:00:11 +01007275 u64 *tag_slot = io_get_tag_slot(data, i);
7276
7277 if (copy_from_user(tag_slot, &utags[i],
7278 sizeof(*tag_slot)))
Pavel Begunkov2d091d62021-06-14 02:36:21 +01007279 goto fail;
Pavel Begunkovd878c812021-06-14 02:36:18 +01007280 }
Pavel Begunkovb60c8dc2021-04-25 14:32:18 +01007281 }
7282
Pavel Begunkov3e942492021-04-11 01:46:34 +01007283 atomic_set(&data->refs, 1);
Bijan Mottahedeh1ad555c2021-01-15 17:37:51 +00007284 init_completion(&data->done);
Pavel Begunkovd878c812021-06-14 02:36:18 +01007285 *pdata = data;
7286 return 0;
Pavel Begunkov2d091d62021-06-14 02:36:21 +01007287fail:
7288 io_rsrc_data_free(data);
7289 return ret;
Bijan Mottahedeh1ad555c2021-01-15 17:37:51 +00007290}
7291
Pavel Begunkov9123c8f2021-06-14 02:36:20 +01007292static bool io_alloc_file_tables(struct io_file_table *table, unsigned nr_files)
7293{
Pavel Begunkov042b0d82021-08-09 13:04:01 +01007294 table->files = kvcalloc(nr_files, sizeof(table->files[0]), GFP_KERNEL);
Pavel Begunkov9123c8f2021-06-14 02:36:20 +01007295 return !!table->files;
7296}
7297
Pavel Begunkov042b0d82021-08-09 13:04:01 +01007298static void io_free_file_tables(struct io_file_table *table)
Pavel Begunkov9123c8f2021-06-14 02:36:20 +01007299{
Pavel Begunkov042b0d82021-08-09 13:04:01 +01007300 kvfree(table->files);
Pavel Begunkov9123c8f2021-06-14 02:36:20 +01007301 table->files = NULL;
7302}
7303
Jens Axboe2b188cc2019-01-07 10:46:33 -07007304static void __io_sqe_files_unregister(struct io_ring_ctx *ctx)
7305{
7306#if defined(CONFIG_UNIX)
7307 if (ctx->ring_sock) {
7308 struct sock *sock = ctx->ring_sock->sk;
7309 struct sk_buff *skb;
7310
7311 while ((skb = skb_dequeue(&sock->sk_receive_queue)) != NULL)
7312 kfree_skb(skb);
7313 }
7314#else
7315 int i;
7316
7317 for (i = 0; i < ctx->nr_user_files; i++) {
7318 struct file *file;
7319
7320 file = io_file_from_index(ctx, i);
7321 if (file)
7322 fput(file);
7323 }
7324#endif
Pavel Begunkov042b0d82021-08-09 13:04:01 +01007325 io_free_file_tables(&ctx->file_table);
Pavel Begunkov44b31f22021-04-25 14:32:16 +01007326 io_rsrc_data_free(ctx->file_data);
Pavel Begunkovfff4db72021-04-25 14:32:15 +01007327 ctx->file_data = NULL;
7328 ctx->nr_user_files = 0;
Bijan Mottahedeh1ad555c2021-01-15 17:37:51 +00007329}
7330
Bijan Mottahedehd7954b22021-01-15 17:37:50 +00007331static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
7332{
Bijan Mottahedehd7954b22021-01-15 17:37:50 +00007333 int ret;
7334
Pavel Begunkov08480402021-04-13 02:58:38 +01007335 if (!ctx->file_data)
Bijan Mottahedehd7954b22021-01-15 17:37:50 +00007336 return -ENXIO;
Pavel Begunkov08480402021-04-13 02:58:38 +01007337 ret = io_rsrc_ref_quiesce(ctx->file_data, ctx);
7338 if (!ret)
7339 __io_sqe_files_unregister(ctx);
7340 return ret;
Jens Axboe6b063142019-01-10 22:13:58 -07007341}
7342
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007343static void io_sq_thread_unpark(struct io_sq_data *sqd)
Pavel Begunkov09a6f4e2021-03-14 20:57:10 +00007344 __releases(&sqd->lock)
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007345{
Pavel Begunkov521d6a72021-03-11 23:29:38 +00007346 WARN_ON_ONCE(sqd->thread == current);
7347
Pavel Begunkov9e138a42021-03-14 20:57:12 +00007348 /*
7349 * Do the dance but not conditional clear_bit() because it'd race with
7350 * other threads incrementing park_pending and setting the bit.
7351 */
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007352 clear_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state);
Pavel Begunkov9e138a42021-03-14 20:57:12 +00007353 if (atomic_dec_return(&sqd->park_pending))
7354 set_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state);
Pavel Begunkov09a6f4e2021-03-14 20:57:10 +00007355 mutex_unlock(&sqd->lock);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007356}
7357
Jens Axboe86e0d672021-03-05 08:44:39 -07007358static void io_sq_thread_park(struct io_sq_data *sqd)
Pavel Begunkov09a6f4e2021-03-14 20:57:10 +00007359 __acquires(&sqd->lock)
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007360{
Pavel Begunkov521d6a72021-03-11 23:29:38 +00007361 WARN_ON_ONCE(sqd->thread == current);
7362
Pavel Begunkov9e138a42021-03-14 20:57:12 +00007363 atomic_inc(&sqd->park_pending);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007364 set_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state);
Pavel Begunkov09a6f4e2021-03-14 20:57:10 +00007365 mutex_lock(&sqd->lock);
Jens Axboe05962f92021-03-06 13:58:48 -07007366 if (sqd->thread)
Jens Axboe86e0d672021-03-05 08:44:39 -07007367 wake_up_process(sqd->thread);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007368}
7369
7370static void io_sq_thread_stop(struct io_sq_data *sqd)
7371{
Pavel Begunkov521d6a72021-03-11 23:29:38 +00007372 WARN_ON_ONCE(sqd->thread == current);
Pavel Begunkov88885f62021-04-11 01:46:38 +01007373 WARN_ON_ONCE(test_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state));
Pavel Begunkov521d6a72021-03-11 23:29:38 +00007374
Jens Axboe05962f92021-03-06 13:58:48 -07007375 set_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state);
Pavel Begunkov88885f62021-04-11 01:46:38 +01007376 mutex_lock(&sqd->lock);
Jens Axboee8f98f242021-03-09 16:32:13 -07007377 if (sqd->thread)
7378 wake_up_process(sqd->thread);
Pavel Begunkov09a6f4e2021-03-14 20:57:10 +00007379 mutex_unlock(&sqd->lock);
Jens Axboe05962f92021-03-06 13:58:48 -07007380 wait_for_completion(&sqd->exited);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007381}
7382
Jens Axboe534ca6d2020-09-02 13:52:19 -06007383static void io_put_sq_data(struct io_sq_data *sqd)
Jens Axboe6c271ce2019-01-10 11:22:30 -07007384{
Jens Axboe534ca6d2020-09-02 13:52:19 -06007385 if (refcount_dec_and_test(&sqd->refs)) {
Pavel Begunkov9e138a42021-03-14 20:57:12 +00007386 WARN_ON_ONCE(atomic_read(&sqd->park_pending));
7387
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007388 io_sq_thread_stop(sqd);
7389 kfree(sqd);
7390 }
7391}
7392
7393static void io_sq_thread_finish(struct io_ring_ctx *ctx)
7394{
7395 struct io_sq_data *sqd = ctx->sq_data;
7396
7397 if (sqd) {
Jens Axboe05962f92021-03-06 13:58:48 -07007398 io_sq_thread_park(sqd);
Pavel Begunkov521d6a72021-03-11 23:29:38 +00007399 list_del_init(&ctx->sqd_list);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007400 io_sqd_update_thread_idle(sqd);
Jens Axboe05962f92021-03-06 13:58:48 -07007401 io_sq_thread_unpark(sqd);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007402
7403 io_put_sq_data(sqd);
7404 ctx->sq_data = NULL;
Jens Axboe534ca6d2020-09-02 13:52:19 -06007405 }
7406}
7407
Jens Axboeaa061652020-09-02 14:50:27 -06007408static struct io_sq_data *io_attach_sq_data(struct io_uring_params *p)
7409{
7410 struct io_ring_ctx *ctx_attach;
7411 struct io_sq_data *sqd;
7412 struct fd f;
7413
7414 f = fdget(p->wq_fd);
7415 if (!f.file)
7416 return ERR_PTR(-ENXIO);
7417 if (f.file->f_op != &io_uring_fops) {
7418 fdput(f);
7419 return ERR_PTR(-EINVAL);
7420 }
7421
7422 ctx_attach = f.file->private_data;
7423 sqd = ctx_attach->sq_data;
7424 if (!sqd) {
7425 fdput(f);
7426 return ERR_PTR(-EINVAL);
7427 }
Jens Axboe5c2469e2021-03-11 10:17:56 -07007428 if (sqd->task_tgid != current->tgid) {
7429 fdput(f);
7430 return ERR_PTR(-EPERM);
7431 }
Jens Axboeaa061652020-09-02 14:50:27 -06007432
7433 refcount_inc(&sqd->refs);
7434 fdput(f);
7435 return sqd;
7436}
7437
Pavel Begunkov26984fb2021-03-11 23:29:37 +00007438static struct io_sq_data *io_get_sq_data(struct io_uring_params *p,
7439 bool *attached)
Jens Axboe534ca6d2020-09-02 13:52:19 -06007440{
7441 struct io_sq_data *sqd;
7442
Pavel Begunkov26984fb2021-03-11 23:29:37 +00007443 *attached = false;
Jens Axboe5c2469e2021-03-11 10:17:56 -07007444 if (p->flags & IORING_SETUP_ATTACH_WQ) {
7445 sqd = io_attach_sq_data(p);
Pavel Begunkov26984fb2021-03-11 23:29:37 +00007446 if (!IS_ERR(sqd)) {
7447 *attached = true;
Jens Axboe5c2469e2021-03-11 10:17:56 -07007448 return sqd;
Pavel Begunkov26984fb2021-03-11 23:29:37 +00007449 }
Jens Axboe5c2469e2021-03-11 10:17:56 -07007450 /* fall through for EPERM case, setup new sqd/task */
7451 if (PTR_ERR(sqd) != -EPERM)
7452 return sqd;
7453 }
Jens Axboeaa061652020-09-02 14:50:27 -06007454
Jens Axboe534ca6d2020-09-02 13:52:19 -06007455 sqd = kzalloc(sizeof(*sqd), GFP_KERNEL);
7456 if (!sqd)
7457 return ERR_PTR(-ENOMEM);
7458
Pavel Begunkov9e138a42021-03-14 20:57:12 +00007459 atomic_set(&sqd->park_pending, 0);
Jens Axboe534ca6d2020-09-02 13:52:19 -06007460 refcount_set(&sqd->refs, 1);
Jens Axboe69fb2132020-09-14 11:16:23 -06007461 INIT_LIST_HEAD(&sqd->ctx_list);
Pavel Begunkov09a6f4e2021-03-14 20:57:10 +00007462 mutex_init(&sqd->lock);
Jens Axboe534ca6d2020-09-02 13:52:19 -06007463 init_waitqueue_head(&sqd->wait);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007464 init_completion(&sqd->exited);
Jens Axboe534ca6d2020-09-02 13:52:19 -06007465 return sqd;
7466}
7467
Jens Axboe6b063142019-01-10 22:13:58 -07007468#if defined(CONFIG_UNIX)
Jens Axboe6b063142019-01-10 22:13:58 -07007469/*
7470 * Ensure the UNIX gc is aware of our file set, so we are certain that
7471 * the io_uring can be safely unregistered on process exit, even if we have
7472 * loops in the file referencing.
7473 */
7474static int __io_sqe_files_scm(struct io_ring_ctx *ctx, int nr, int offset)
7475{
7476 struct sock *sk = ctx->ring_sock->sk;
7477 struct scm_fp_list *fpl;
7478 struct sk_buff *skb;
Jens Axboe08a45172019-10-03 08:11:03 -06007479 int i, nr_files;
Jens Axboe6b063142019-01-10 22:13:58 -07007480
Jens Axboe6b063142019-01-10 22:13:58 -07007481 fpl = kzalloc(sizeof(*fpl), GFP_KERNEL);
7482 if (!fpl)
7483 return -ENOMEM;
7484
7485 skb = alloc_skb(0, GFP_KERNEL);
7486 if (!skb) {
7487 kfree(fpl);
7488 return -ENOMEM;
7489 }
7490
7491 skb->sk = sk;
Jens Axboe6b063142019-01-10 22:13:58 -07007492
Jens Axboe08a45172019-10-03 08:11:03 -06007493 nr_files = 0;
Jens Axboe62e398b2021-02-21 16:19:37 -07007494 fpl->user = get_uid(current_user());
Jens Axboe6b063142019-01-10 22:13:58 -07007495 for (i = 0; i < nr; i++) {
Jens Axboe65e19f52019-10-26 07:20:21 -06007496 struct file *file = io_file_from_index(ctx, i + offset);
7497
7498 if (!file)
Jens Axboe08a45172019-10-03 08:11:03 -06007499 continue;
Jens Axboe65e19f52019-10-26 07:20:21 -06007500 fpl->fp[nr_files] = get_file(file);
Jens Axboe08a45172019-10-03 08:11:03 -06007501 unix_inflight(fpl->user, fpl->fp[nr_files]);
7502 nr_files++;
Jens Axboe6b063142019-01-10 22:13:58 -07007503 }
7504
Jens Axboe08a45172019-10-03 08:11:03 -06007505 if (nr_files) {
7506 fpl->max = SCM_MAX_FD;
7507 fpl->count = nr_files;
7508 UNIXCB(skb).fp = fpl;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007509 skb->destructor = unix_destruct_scm;
Jens Axboe08a45172019-10-03 08:11:03 -06007510 refcount_add(skb->truesize, &sk->sk_wmem_alloc);
7511 skb_queue_head(&sk->sk_receive_queue, skb);
Jens Axboe6b063142019-01-10 22:13:58 -07007512
Jens Axboe08a45172019-10-03 08:11:03 -06007513 for (i = 0; i < nr_files; i++)
7514 fput(fpl->fp[i]);
7515 } else {
7516 kfree_skb(skb);
7517 kfree(fpl);
7518 }
Jens Axboe6b063142019-01-10 22:13:58 -07007519
7520 return 0;
7521}
7522
7523/*
7524 * If UNIX sockets are enabled, fd passing can cause a reference cycle which
7525 * causes regular reference counting to break down. We rely on the UNIX
7526 * garbage collection to take care of this problem for us.
7527 */
7528static int io_sqe_files_scm(struct io_ring_ctx *ctx)
7529{
7530 unsigned left, total;
7531 int ret = 0;
7532
7533 total = 0;
7534 left = ctx->nr_user_files;
7535 while (left) {
7536 unsigned this_files = min_t(unsigned, left, SCM_MAX_FD);
Jens Axboe6b063142019-01-10 22:13:58 -07007537
7538 ret = __io_sqe_files_scm(ctx, this_files, total);
7539 if (ret)
7540 break;
7541 left -= this_files;
7542 total += this_files;
7543 }
7544
7545 if (!ret)
7546 return 0;
7547
7548 while (total < ctx->nr_user_files) {
Jens Axboe65e19f52019-10-26 07:20:21 -06007549 struct file *file = io_file_from_index(ctx, total);
7550
7551 if (file)
7552 fput(file);
Jens Axboe6b063142019-01-10 22:13:58 -07007553 total++;
7554 }
7555
7556 return ret;
7557}
7558#else
7559static int io_sqe_files_scm(struct io_ring_ctx *ctx)
7560{
7561 return 0;
7562}
7563#endif
7564
Pavel Begunkov47e90392021-04-01 15:43:56 +01007565static void io_rsrc_file_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc)
Jens Axboec3a31e62019-10-03 13:59:56 -06007566{
Bijan Mottahedeh50238532021-01-15 17:37:45 +00007567 struct file *file = prsrc->file;
Jens Axboec3a31e62019-10-03 13:59:56 -06007568#if defined(CONFIG_UNIX)
Jens Axboec3a31e62019-10-03 13:59:56 -06007569 struct sock *sock = ctx->ring_sock->sk;
7570 struct sk_buff_head list, *head = &sock->sk_receive_queue;
7571 struct sk_buff *skb;
7572 int i;
7573
7574 __skb_queue_head_init(&list);
7575
7576 /*
7577 * Find the skb that holds this file in its SCM_RIGHTS. When found,
7578 * remove this entry and rearrange the file array.
7579 */
7580 skb = skb_dequeue(head);
7581 while (skb) {
7582 struct scm_fp_list *fp;
7583
7584 fp = UNIXCB(skb).fp;
7585 for (i = 0; i < fp->count; i++) {
7586 int left;
7587
7588 if (fp->fp[i] != file)
7589 continue;
7590
7591 unix_notinflight(fp->user, fp->fp[i]);
7592 left = fp->count - 1 - i;
7593 if (left) {
7594 memmove(&fp->fp[i], &fp->fp[i + 1],
7595 left * sizeof(struct file *));
7596 }
7597 fp->count--;
7598 if (!fp->count) {
7599 kfree_skb(skb);
7600 skb = NULL;
7601 } else {
7602 __skb_queue_tail(&list, skb);
7603 }
7604 fput(file);
7605 file = NULL;
7606 break;
7607 }
7608
7609 if (!file)
7610 break;
7611
7612 __skb_queue_tail(&list, skb);
7613
7614 skb = skb_dequeue(head);
7615 }
7616
7617 if (skb_peek(&list)) {
7618 spin_lock_irq(&head->lock);
7619 while ((skb = __skb_dequeue(&list)) != NULL)
7620 __skb_queue_tail(head, skb);
7621 spin_unlock_irq(&head->lock);
7622 }
7623#else
Jens Axboe05f3fb32019-12-09 11:22:50 -07007624 fput(file);
Jens Axboec3a31e62019-10-03 13:59:56 -06007625#endif
7626}
7627
Pavel Begunkovb895c9a2021-04-01 15:43:40 +01007628static void __io_rsrc_put_work(struct io_rsrc_node *ref_node)
Jens Axboe05f3fb32019-12-09 11:22:50 -07007629{
Pavel Begunkovb895c9a2021-04-01 15:43:40 +01007630 struct io_rsrc_data *rsrc_data = ref_node->rsrc_data;
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007631 struct io_ring_ctx *ctx = rsrc_data->ctx;
7632 struct io_rsrc_put *prsrc, *tmp;
Xiaoguang Wang05589552020-03-31 14:05:18 +08007633
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007634 list_for_each_entry_safe(prsrc, tmp, &ref_node->rsrc_list, list) {
7635 list_del(&prsrc->list);
Pavel Begunkovb60c8dc2021-04-25 14:32:18 +01007636
7637 if (prsrc->tag) {
7638 bool lock_ring = ctx->flags & IORING_SETUP_IOPOLL;
Pavel Begunkovb60c8dc2021-04-25 14:32:18 +01007639
7640 io_ring_submit_lock(ctx, lock_ring);
Pavel Begunkov157d2572021-06-14 02:36:19 +01007641 spin_lock_irq(&ctx->completion_lock);
Pavel Begunkovb60c8dc2021-04-25 14:32:18 +01007642 io_cqring_fill_event(ctx, prsrc->tag, 0, 0);
Pavel Begunkov2840f712021-04-27 16:13:51 +01007643 ctx->cq_extra++;
Pavel Begunkovb60c8dc2021-04-25 14:32:18 +01007644 io_commit_cqring(ctx);
Pavel Begunkov157d2572021-06-14 02:36:19 +01007645 spin_unlock_irq(&ctx->completion_lock);
Pavel Begunkovb60c8dc2021-04-25 14:32:18 +01007646 io_cqring_ev_posted(ctx);
7647 io_ring_submit_unlock(ctx, lock_ring);
7648 }
7649
Pavel Begunkov40ae0ff2021-04-01 15:43:44 +01007650 rsrc_data->do_put(ctx, prsrc);
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007651 kfree(prsrc);
Jens Axboe05f3fb32019-12-09 11:22:50 -07007652 }
7653
Pavel Begunkov28a9fe22021-04-01 15:43:47 +01007654 io_rsrc_node_destroy(ref_node);
Pavel Begunkov3e942492021-04-11 01:46:34 +01007655 if (atomic_dec_and_test(&rsrc_data->refs))
7656 complete(&rsrc_data->done);
Jens Axboe05f3fb32019-12-09 11:22:50 -07007657}
7658
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007659static void io_rsrc_put_work(struct work_struct *work)
Jens Axboe4a38aed22020-05-14 17:21:15 -06007660{
7661 struct io_ring_ctx *ctx;
7662 struct llist_node *node;
7663
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007664 ctx = container_of(work, struct io_ring_ctx, rsrc_put_work.work);
7665 node = llist_del_all(&ctx->rsrc_put_llist);
Jens Axboe4a38aed22020-05-14 17:21:15 -06007666
7667 while (node) {
Pavel Begunkovb895c9a2021-04-01 15:43:40 +01007668 struct io_rsrc_node *ref_node;
Jens Axboe4a38aed22020-05-14 17:21:15 -06007669 struct llist_node *next = node->next;
7670
Pavel Begunkovb895c9a2021-04-01 15:43:40 +01007671 ref_node = llist_entry(node, struct io_rsrc_node, llist);
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007672 __io_rsrc_put_work(ref_node);
Jens Axboe4a38aed22020-05-14 17:21:15 -06007673 node = next;
7674 }
7675}
7676
Bijan Mottahedeh00835dc2021-01-15 17:37:52 +00007677static void io_rsrc_node_ref_zero(struct percpu_ref *ref)
Jens Axboe05f3fb32019-12-09 11:22:50 -07007678{
Pavel Begunkovb895c9a2021-04-01 15:43:40 +01007679 struct io_rsrc_node *node = container_of(ref, struct io_rsrc_node, refs);
Pavel Begunkov3e942492021-04-11 01:46:34 +01007680 struct io_ring_ctx *ctx = node->rsrc_data->ctx;
Jens Axboe4956b9e2021-08-09 07:49:41 -06007681 unsigned long flags;
Pavel Begunkove2978222020-11-18 14:56:26 +00007682 bool first_add = false;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007683
Jens Axboe4956b9e2021-08-09 07:49:41 -06007684 spin_lock_irqsave(&ctx->rsrc_ref_lock, flags);
Pavel Begunkovb895c9a2021-04-01 15:43:40 +01007685 node->done = true;
Pavel Begunkove2978222020-11-18 14:56:26 +00007686
Bijan Mottahedehd67d2262021-01-15 17:37:46 +00007687 while (!list_empty(&ctx->rsrc_ref_list)) {
Pavel Begunkovb895c9a2021-04-01 15:43:40 +01007688 node = list_first_entry(&ctx->rsrc_ref_list,
7689 struct io_rsrc_node, node);
Pavel Begunkove2978222020-11-18 14:56:26 +00007690 /* recycle ref nodes in order */
Pavel Begunkovb895c9a2021-04-01 15:43:40 +01007691 if (!node->done)
Pavel Begunkove2978222020-11-18 14:56:26 +00007692 break;
Pavel Begunkovb895c9a2021-04-01 15:43:40 +01007693 list_del(&node->node);
7694 first_add |= llist_add(&node->llist, &ctx->rsrc_put_llist);
Pavel Begunkove2978222020-11-18 14:56:26 +00007695 }
Jens Axboe4956b9e2021-08-09 07:49:41 -06007696 spin_unlock_irqrestore(&ctx->rsrc_ref_lock, flags);
Pavel Begunkove2978222020-11-18 14:56:26 +00007697
Pavel Begunkov3e942492021-04-11 01:46:34 +01007698 if (first_add)
7699 mod_delayed_work(system_wq, &ctx->rsrc_put_work, HZ);
Xiaoguang Wang05589552020-03-31 14:05:18 +08007700}
7701
Pavel Begunkovb895c9a2021-04-01 15:43:40 +01007702static struct io_rsrc_node *io_rsrc_node_alloc(struct io_ring_ctx *ctx)
Xiaoguang Wang05589552020-03-31 14:05:18 +08007703{
Pavel Begunkovb895c9a2021-04-01 15:43:40 +01007704 struct io_rsrc_node *ref_node;
Xiaoguang Wang05589552020-03-31 14:05:18 +08007705
7706 ref_node = kzalloc(sizeof(*ref_node), GFP_KERNEL);
7707 if (!ref_node)
Matthew Wilcox (Oracle)3e2224c2021-01-06 16:09:26 +00007708 return NULL;
Xiaoguang Wang05589552020-03-31 14:05:18 +08007709
Bijan Mottahedeh00835dc2021-01-15 17:37:52 +00007710 if (percpu_ref_init(&ref_node->refs, io_rsrc_node_ref_zero,
Xiaoguang Wang05589552020-03-31 14:05:18 +08007711 0, GFP_KERNEL)) {
7712 kfree(ref_node);
Matthew Wilcox (Oracle)3e2224c2021-01-06 16:09:26 +00007713 return NULL;
Xiaoguang Wang05589552020-03-31 14:05:18 +08007714 }
7715 INIT_LIST_HEAD(&ref_node->node);
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007716 INIT_LIST_HEAD(&ref_node->rsrc_list);
Pavel Begunkove2978222020-11-18 14:56:26 +00007717 ref_node->done = false;
Xiaoguang Wang05589552020-03-31 14:05:18 +08007718 return ref_node;
Xiaoguang Wang05589552020-03-31 14:05:18 +08007719}
7720
Jens Axboe05f3fb32019-12-09 11:22:50 -07007721static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
Pavel Begunkov792e3582021-04-25 14:32:21 +01007722 unsigned nr_args, u64 __user *tags)
Jens Axboe05f3fb32019-12-09 11:22:50 -07007723{
7724 __s32 __user *fds = (__s32 __user *) arg;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007725 struct file *file;
Pavel Begunkovf3baed32021-04-01 15:43:42 +01007726 int fd, ret;
Pavel Begunkov846a4ef2021-04-01 15:44:03 +01007727 unsigned i;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007728
7729 if (ctx->file_data)
7730 return -EBUSY;
7731 if (!nr_args)
7732 return -EINVAL;
7733 if (nr_args > IORING_MAX_FIXED_FILES)
7734 return -EMFILE;
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01007735 ret = io_rsrc_node_switch_start(ctx);
Pavel Begunkovf3baed32021-04-01 15:43:42 +01007736 if (ret)
7737 return ret;
Pavel Begunkovd878c812021-06-14 02:36:18 +01007738 ret = io_rsrc_data_alloc(ctx, io_rsrc_file_put, tags, nr_args,
7739 &ctx->file_data);
7740 if (ret)
7741 return ret;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007742
Pavel Begunkovf3baed32021-04-01 15:43:42 +01007743 ret = -ENOMEM;
Pavel Begunkovaeca2412021-04-11 01:46:37 +01007744 if (!io_alloc_file_tables(&ctx->file_table, nr_args))
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01007745 goto out_free;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007746
Jens Axboe05f3fb32019-12-09 11:22:50 -07007747 for (i = 0; i < nr_args; i++, ctx->nr_user_files++) {
Pavel Begunkovd878c812021-06-14 02:36:18 +01007748 if (copy_from_user(&fd, &fds[i], sizeof(fd))) {
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01007749 ret = -EFAULT;
7750 goto out_fput;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007751 }
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01007752 /* allow sparse sets */
Pavel Begunkov792e3582021-04-25 14:32:21 +01007753 if (fd == -1) {
7754 ret = -EINVAL;
Pavel Begunkov2d091d62021-06-14 02:36:21 +01007755 if (unlikely(*io_get_tag_slot(ctx->file_data, i)))
Pavel Begunkov792e3582021-04-25 14:32:21 +01007756 goto out_fput;
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01007757 continue;
Pavel Begunkov792e3582021-04-25 14:32:21 +01007758 }
Jens Axboe05f3fb32019-12-09 11:22:50 -07007759
Jens Axboe05f3fb32019-12-09 11:22:50 -07007760 file = fget(fd);
Jens Axboe05f3fb32019-12-09 11:22:50 -07007761 ret = -EBADF;
Pavel Begunkov792e3582021-04-25 14:32:21 +01007762 if (unlikely(!file))
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01007763 goto out_fput;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007764
7765 /*
7766 * Don't allow io_uring instances to be registered. If UNIX
7767 * isn't enabled, then this causes a reference cycle and this
7768 * instance can never get freed. If UNIX is enabled we'll
7769 * handle it just fine, but there's still no point in allowing
7770 * a ring fd as it doesn't support regular read/write anyway.
7771 */
7772 if (file->f_op == &io_uring_fops) {
7773 fput(file);
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01007774 goto out_fput;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007775 }
Pavel Begunkovaeca2412021-04-11 01:46:37 +01007776 io_fixed_file_set(io_fixed_file_slot(&ctx->file_table, i), file);
Jens Axboe05f3fb32019-12-09 11:22:50 -07007777 }
7778
Jens Axboe05f3fb32019-12-09 11:22:50 -07007779 ret = io_sqe_files_scm(ctx);
Xiaoguang Wang05589552020-03-31 14:05:18 +08007780 if (ret) {
Pavel Begunkov08480402021-04-13 02:58:38 +01007781 __io_sqe_files_unregister(ctx);
Xiaoguang Wang05589552020-03-31 14:05:18 +08007782 return ret;
7783 }
Jens Axboe05f3fb32019-12-09 11:22:50 -07007784
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01007785 io_rsrc_node_switch(ctx, NULL);
Jens Axboe05f3fb32019-12-09 11:22:50 -07007786 return ret;
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01007787out_fput:
7788 for (i = 0; i < ctx->nr_user_files; i++) {
7789 file = io_file_from_index(ctx, i);
7790 if (file)
7791 fput(file);
7792 }
Pavel Begunkov042b0d82021-08-09 13:04:01 +01007793 io_free_file_tables(&ctx->file_table);
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01007794 ctx->nr_user_files = 0;
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01007795out_free:
Pavel Begunkov44b31f22021-04-25 14:32:16 +01007796 io_rsrc_data_free(ctx->file_data);
Jens Axboe55cbc252020-10-14 07:35:57 -06007797 ctx->file_data = NULL;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007798 return ret;
7799}
7800
Jens Axboec3a31e62019-10-03 13:59:56 -06007801static int io_sqe_file_register(struct io_ring_ctx *ctx, struct file *file,
7802 int index)
7803{
7804#if defined(CONFIG_UNIX)
7805 struct sock *sock = ctx->ring_sock->sk;
7806 struct sk_buff_head *head = &sock->sk_receive_queue;
7807 struct sk_buff *skb;
7808
7809 /*
7810 * See if we can merge this file into an existing skb SCM_RIGHTS
7811 * file set. If there's no room, fall back to allocating a new skb
7812 * and filling it in.
7813 */
7814 spin_lock_irq(&head->lock);
7815 skb = skb_peek(head);
7816 if (skb) {
7817 struct scm_fp_list *fpl = UNIXCB(skb).fp;
7818
7819 if (fpl->count < SCM_MAX_FD) {
7820 __skb_unlink(skb, head);
7821 spin_unlock_irq(&head->lock);
7822 fpl->fp[fpl->count] = get_file(file);
7823 unix_inflight(fpl->user, fpl->fp[fpl->count]);
7824 fpl->count++;
7825 spin_lock_irq(&head->lock);
7826 __skb_queue_head(head, skb);
7827 } else {
7828 skb = NULL;
7829 }
7830 }
7831 spin_unlock_irq(&head->lock);
7832
7833 if (skb) {
7834 fput(file);
7835 return 0;
7836 }
7837
7838 return __io_sqe_files_scm(ctx, 1, index);
7839#else
7840 return 0;
7841#endif
7842}
7843
Pavel Begunkovb60c8dc2021-04-25 14:32:18 +01007844static int io_queue_rsrc_removal(struct io_rsrc_data *data, unsigned idx,
Pavel Begunkove7c78372021-04-01 15:43:45 +01007845 struct io_rsrc_node *node, void *rsrc)
Jens Axboe05f3fb32019-12-09 11:22:50 -07007846{
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007847 struct io_rsrc_put *prsrc;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007848
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007849 prsrc = kzalloc(sizeof(*prsrc), GFP_KERNEL);
7850 if (!prsrc)
Hillf Dantona5318d32020-03-23 17:47:15 +08007851 return -ENOMEM;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007852
Pavel Begunkov2d091d62021-06-14 02:36:21 +01007853 prsrc->tag = *io_get_tag_slot(data, idx);
Bijan Mottahedeh50238532021-01-15 17:37:45 +00007854 prsrc->rsrc = rsrc;
Pavel Begunkove7c78372021-04-01 15:43:45 +01007855 list_add(&prsrc->list, &node->rsrc_list);
Hillf Dantona5318d32020-03-23 17:47:15 +08007856 return 0;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007857}
7858
7859static int __io_sqe_files_update(struct io_ring_ctx *ctx,
Pavel Begunkovc3bdad02021-04-25 14:32:22 +01007860 struct io_uring_rsrc_update2 *up,
Jens Axboe05f3fb32019-12-09 11:22:50 -07007861 unsigned nr_args)
7862{
Pavel Begunkovc3bdad02021-04-25 14:32:22 +01007863 u64 __user *tags = u64_to_user_ptr(up->tags);
Pavel Begunkov98f0b3b2021-04-25 14:32:19 +01007864 __s32 __user *fds = u64_to_user_ptr(up->data);
Pavel Begunkovb895c9a2021-04-01 15:43:40 +01007865 struct io_rsrc_data *data = ctx->file_data;
Pavel Begunkova04b0ac2021-04-01 15:44:04 +01007866 struct io_fixed_file *file_slot;
7867 struct file *file;
Pavel Begunkov98f0b3b2021-04-25 14:32:19 +01007868 int fd, i, err = 0;
7869 unsigned int done;
Xiaoguang Wang05589552020-03-31 14:05:18 +08007870 bool needs_switch = false;
Jens Axboec3a31e62019-10-03 13:59:56 -06007871
Pavel Begunkov98f0b3b2021-04-25 14:32:19 +01007872 if (!ctx->file_data)
7873 return -ENXIO;
7874 if (up->offset + nr_args > ctx->nr_user_files)
Jens Axboec3a31e62019-10-03 13:59:56 -06007875 return -EINVAL;
7876
Pavel Begunkov67973b92021-01-26 13:51:09 +00007877 for (done = 0; done < nr_args; done++) {
Pavel Begunkovc3bdad02021-04-25 14:32:22 +01007878 u64 tag = 0;
7879
7880 if ((tags && copy_from_user(&tag, &tags[done], sizeof(tag))) ||
7881 copy_from_user(&fd, &fds[done], sizeof(fd))) {
Jens Axboec3a31e62019-10-03 13:59:56 -06007882 err = -EFAULT;
7883 break;
7884 }
Pavel Begunkovc3bdad02021-04-25 14:32:22 +01007885 if ((fd == IORING_REGISTER_FILES_SKIP || fd == -1) && tag) {
7886 err = -EINVAL;
7887 break;
7888 }
noah4e0377a2021-01-26 15:23:28 -05007889 if (fd == IORING_REGISTER_FILES_SKIP)
7890 continue;
7891
Pavel Begunkov67973b92021-01-26 13:51:09 +00007892 i = array_index_nospec(up->offset + done, ctx->nr_user_files);
Pavel Begunkovaeca2412021-04-11 01:46:37 +01007893 file_slot = io_fixed_file_slot(&ctx->file_table, i);
Pavel Begunkovea64ec022021-02-04 13:52:07 +00007894
Pavel Begunkova04b0ac2021-04-01 15:44:04 +01007895 if (file_slot->file_ptr) {
7896 file = (struct file *)(file_slot->file_ptr & FFS_MASK);
Pavel Begunkovb60c8dc2021-04-25 14:32:18 +01007897 err = io_queue_rsrc_removal(data, up->offset + done,
7898 ctx->rsrc_node, file);
Hillf Dantona5318d32020-03-23 17:47:15 +08007899 if (err)
7900 break;
Pavel Begunkova04b0ac2021-04-01 15:44:04 +01007901 file_slot->file_ptr = 0;
Xiaoguang Wang05589552020-03-31 14:05:18 +08007902 needs_switch = true;
Jens Axboec3a31e62019-10-03 13:59:56 -06007903 }
7904 if (fd != -1) {
Jens Axboec3a31e62019-10-03 13:59:56 -06007905 file = fget(fd);
7906 if (!file) {
7907 err = -EBADF;
7908 break;
7909 }
7910 /*
7911 * Don't allow io_uring instances to be registered. If
7912 * UNIX isn't enabled, then this causes a reference
7913 * cycle and this instance can never get freed. If UNIX
7914 * is enabled we'll handle it just fine, but there's
7915 * still no point in allowing a ring fd as it doesn't
7916 * support regular read/write anyway.
7917 */
7918 if (file->f_op == &io_uring_fops) {
7919 fput(file);
7920 err = -EBADF;
7921 break;
7922 }
Pavel Begunkov2d091d62021-06-14 02:36:21 +01007923 *io_get_tag_slot(data, up->offset + done) = tag;
Pavel Begunkov9a321c92021-04-01 15:44:01 +01007924 io_fixed_file_set(file_slot, file);
Jens Axboec3a31e62019-10-03 13:59:56 -06007925 err = io_sqe_file_register(ctx, file, i);
Yang Yingliangf3bd9da2020-07-09 10:11:41 +00007926 if (err) {
Pavel Begunkova04b0ac2021-04-01 15:44:04 +01007927 file_slot->file_ptr = 0;
Yang Yingliangf3bd9da2020-07-09 10:11:41 +00007928 fput(file);
Jens Axboec3a31e62019-10-03 13:59:56 -06007929 break;
Yang Yingliangf3bd9da2020-07-09 10:11:41 +00007930 }
Jens Axboec3a31e62019-10-03 13:59:56 -06007931 }
Jens Axboe05f3fb32019-12-09 11:22:50 -07007932 }
7933
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01007934 if (needs_switch)
7935 io_rsrc_node_switch(ctx, data);
Jens Axboec3a31e62019-10-03 13:59:56 -06007936 return done ? done : err;
7937}
Xiaoguang Wang05589552020-03-31 14:05:18 +08007938
Jens Axboe685fe7f2021-03-08 09:37:51 -07007939static struct io_wq *io_init_wq_offload(struct io_ring_ctx *ctx,
7940 struct task_struct *task)
Pavel Begunkov24369c22020-01-28 03:15:48 +03007941{
Jens Axboee9418942021-02-19 12:33:30 -07007942 struct io_wq_hash *hash;
Pavel Begunkov24369c22020-01-28 03:15:48 +03007943 struct io_wq_data data;
Pavel Begunkov24369c22020-01-28 03:15:48 +03007944 unsigned int concurrency;
Pavel Begunkov24369c22020-01-28 03:15:48 +03007945
Yang Yingliang362a9e62021-07-20 16:38:05 +08007946 mutex_lock(&ctx->uring_lock);
Jens Axboee9418942021-02-19 12:33:30 -07007947 hash = ctx->hash_map;
7948 if (!hash) {
7949 hash = kzalloc(sizeof(*hash), GFP_KERNEL);
Yang Yingliang362a9e62021-07-20 16:38:05 +08007950 if (!hash) {
7951 mutex_unlock(&ctx->uring_lock);
Jens Axboee9418942021-02-19 12:33:30 -07007952 return ERR_PTR(-ENOMEM);
Yang Yingliang362a9e62021-07-20 16:38:05 +08007953 }
Jens Axboee9418942021-02-19 12:33:30 -07007954 refcount_set(&hash->refs, 1);
7955 init_waitqueue_head(&hash->wait);
7956 ctx->hash_map = hash;
7957 }
Yang Yingliang362a9e62021-07-20 16:38:05 +08007958 mutex_unlock(&ctx->uring_lock);
Jens Axboee9418942021-02-19 12:33:30 -07007959
7960 data.hash = hash;
Jens Axboe685fe7f2021-03-08 09:37:51 -07007961 data.task = task;
Pavel Begunkovebc11b62021-08-09 13:04:05 +01007962 data.free_work = io_wq_free_work;
Pavel Begunkovf5fa38c2020-06-08 21:08:20 +03007963 data.do_work = io_wq_submit_work;
Pavel Begunkov24369c22020-01-28 03:15:48 +03007964
Jens Axboed25e3a32021-02-16 11:41:41 -07007965 /* Do QD, or 4 * CPUS, whatever is smallest */
7966 concurrency = min(ctx->sq_entries, 4 * num_online_cpus());
Pavel Begunkov24369c22020-01-28 03:15:48 +03007967
Jens Axboe5aa75ed2021-02-16 12:56:50 -07007968 return io_wq_create(concurrency, &data);
Pavel Begunkov24369c22020-01-28 03:15:48 +03007969}
7970
Jens Axboe5aa75ed2021-02-16 12:56:50 -07007971static int io_uring_alloc_task_context(struct task_struct *task,
7972 struct io_ring_ctx *ctx)
Jens Axboe0f212202020-09-13 13:09:39 -06007973{
7974 struct io_uring_task *tctx;
Jens Axboed8a6df12020-10-15 16:24:45 -06007975 int ret;
Jens Axboe0f212202020-09-13 13:09:39 -06007976
Pavel Begunkov09899b12021-06-14 02:36:22 +01007977 tctx = kzalloc(sizeof(*tctx), GFP_KERNEL);
Jens Axboe0f212202020-09-13 13:09:39 -06007978 if (unlikely(!tctx))
7979 return -ENOMEM;
7980
Jens Axboed8a6df12020-10-15 16:24:45 -06007981 ret = percpu_counter_init(&tctx->inflight, 0, GFP_KERNEL);
7982 if (unlikely(ret)) {
7983 kfree(tctx);
7984 return ret;
7985 }
7986
Jens Axboe685fe7f2021-03-08 09:37:51 -07007987 tctx->io_wq = io_init_wq_offload(ctx, task);
Jens Axboe5aa75ed2021-02-16 12:56:50 -07007988 if (IS_ERR(tctx->io_wq)) {
7989 ret = PTR_ERR(tctx->io_wq);
7990 percpu_counter_destroy(&tctx->inflight);
7991 kfree(tctx);
7992 return ret;
7993 }
7994
Jens Axboe0f212202020-09-13 13:09:39 -06007995 xa_init(&tctx->xa);
7996 init_waitqueue_head(&tctx->wait);
Jens Axboefdaf0832020-10-30 09:37:30 -06007997 atomic_set(&tctx->in_idle, 0);
Pavel Begunkovb303fe22021-04-11 01:46:26 +01007998 atomic_set(&tctx->inflight_tracked, 0);
Jens Axboe0f212202020-09-13 13:09:39 -06007999 task->io_uring = tctx;
Jens Axboe7cbf1722021-02-10 00:03:20 +00008000 spin_lock_init(&tctx->task_lock);
8001 INIT_WQ_LIST(&tctx->task_list);
Jens Axboe7cbf1722021-02-10 00:03:20 +00008002 init_task_work(&tctx->task_work, tctx_task_work);
Jens Axboe0f212202020-09-13 13:09:39 -06008003 return 0;
8004}
8005
8006void __io_uring_free(struct task_struct *tsk)
8007{
8008 struct io_uring_task *tctx = tsk->io_uring;
8009
8010 WARN_ON_ONCE(!xa_empty(&tctx->xa));
Pavel Begunkovef8eaa42021-02-27 11:16:45 +00008011 WARN_ON_ONCE(tctx->io_wq);
Pavel Begunkov09899b12021-06-14 02:36:22 +01008012 WARN_ON_ONCE(tctx->cached_refs);
Pavel Begunkovef8eaa42021-02-27 11:16:45 +00008013
Jens Axboed8a6df12020-10-15 16:24:45 -06008014 percpu_counter_destroy(&tctx->inflight);
Jens Axboe0f212202020-09-13 13:09:39 -06008015 kfree(tctx);
8016 tsk->io_uring = NULL;
8017}
8018
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02008019static int io_sq_offload_create(struct io_ring_ctx *ctx,
8020 struct io_uring_params *p)
Jens Axboe2b188cc2019-01-07 10:46:33 -07008021{
8022 int ret;
8023
Jens Axboed25e3a32021-02-16 11:41:41 -07008024 /* Retain compatibility with failing for an invalid attach attempt */
8025 if ((ctx->flags & (IORING_SETUP_ATTACH_WQ | IORING_SETUP_SQPOLL)) ==
8026 IORING_SETUP_ATTACH_WQ) {
8027 struct fd f;
8028
8029 f = fdget(p->wq_fd);
8030 if (!f.file)
8031 return -ENXIO;
Jens Axboe0cc936f2021-07-22 17:08:07 -06008032 if (f.file->f_op != &io_uring_fops) {
8033 fdput(f);
Pavel Begunkovf2a48dd2021-04-20 12:03:33 +01008034 return -EINVAL;
Jens Axboe0cc936f2021-07-22 17:08:07 -06008035 }
8036 fdput(f);
Jens Axboed25e3a32021-02-16 11:41:41 -07008037 }
Jens Axboe6c271ce2019-01-10 11:22:30 -07008038 if (ctx->flags & IORING_SETUP_SQPOLL) {
Jens Axboe46fe18b2021-03-04 12:39:36 -07008039 struct task_struct *tsk;
Jens Axboe534ca6d2020-09-02 13:52:19 -06008040 struct io_sq_data *sqd;
Pavel Begunkov26984fb2021-03-11 23:29:37 +00008041 bool attached;
Jens Axboe534ca6d2020-09-02 13:52:19 -06008042
Pavel Begunkov26984fb2021-03-11 23:29:37 +00008043 sqd = io_get_sq_data(p, &attached);
Jens Axboe534ca6d2020-09-02 13:52:19 -06008044 if (IS_ERR(sqd)) {
8045 ret = PTR_ERR(sqd);
8046 goto err;
8047 }
Jens Axboe69fb2132020-09-14 11:16:23 -06008048
Stefan Metzmacher7c30f36a2021-03-07 11:54:28 +01008049 ctx->sq_creds = get_current_cred();
Jens Axboe534ca6d2020-09-02 13:52:19 -06008050 ctx->sq_data = sqd;
Jens Axboe917257d2019-04-13 09:28:55 -06008051 ctx->sq_thread_idle = msecs_to_jiffies(p->sq_thread_idle);
8052 if (!ctx->sq_thread_idle)
8053 ctx->sq_thread_idle = HZ;
8054
Pavel Begunkov78d7f6b2021-03-10 13:13:53 +00008055 io_sq_thread_park(sqd);
Pavel Begunkovde75a3d2021-03-18 11:54:35 +00008056 list_add(&ctx->sqd_list, &sqd->ctx_list);
8057 io_sqd_update_thread_idle(sqd);
Pavel Begunkov26984fb2021-03-11 23:29:37 +00008058 /* don't attach to a dying SQPOLL thread, would be racy */
Pavel Begunkovf2a48dd2021-04-20 12:03:33 +01008059 ret = (attached && !sqd->thread) ? -ENXIO : 0;
Pavel Begunkov78d7f6b2021-03-10 13:13:53 +00008060 io_sq_thread_unpark(sqd);
8061
Pavel Begunkovde75a3d2021-03-18 11:54:35 +00008062 if (ret < 0)
8063 goto err;
8064 if (attached)
Jens Axboe5aa75ed2021-02-16 12:56:50 -07008065 return 0;
Jens Axboeaa061652020-09-02 14:50:27 -06008066
Jens Axboe6c271ce2019-01-10 11:22:30 -07008067 if (p->flags & IORING_SETUP_SQ_AFF) {
Jens Axboe44a9bd12019-05-14 20:00:30 -06008068 int cpu = p->sq_thread_cpu;
Jens Axboe6c271ce2019-01-10 11:22:30 -07008069
Jens Axboe917257d2019-04-13 09:28:55 -06008070 ret = -EINVAL;
Pavel Begunkovf2a48dd2021-04-20 12:03:33 +01008071 if (cpu >= nr_cpu_ids || !cpu_online(cpu))
Jens Axboee8f98f242021-03-09 16:32:13 -07008072 goto err_sqpoll;
Jens Axboe37d1e2e2021-02-17 21:03:43 -07008073 sqd->sq_cpu = cpu;
Jens Axboe6c271ce2019-01-10 11:22:30 -07008074 } else {
Jens Axboe37d1e2e2021-02-17 21:03:43 -07008075 sqd->sq_cpu = -1;
Jens Axboe6c271ce2019-01-10 11:22:30 -07008076 }
Jens Axboe37d1e2e2021-02-17 21:03:43 -07008077
8078 sqd->task_pid = current->pid;
Jens Axboe5c2469e2021-03-11 10:17:56 -07008079 sqd->task_tgid = current->tgid;
Jens Axboe46fe18b2021-03-04 12:39:36 -07008080 tsk = create_io_thread(io_sq_thread, sqd, NUMA_NO_NODE);
8081 if (IS_ERR(tsk)) {
8082 ret = PTR_ERR(tsk);
Jens Axboee8f98f242021-03-09 16:32:13 -07008083 goto err_sqpoll;
Jens Axboe6c271ce2019-01-10 11:22:30 -07008084 }
Pavel Begunkov97a73a02021-03-08 17:30:54 +00008085
Jens Axboe46fe18b2021-03-04 12:39:36 -07008086 sqd->thread = tsk;
Pavel Begunkov97a73a02021-03-08 17:30:54 +00008087 ret = io_uring_alloc_task_context(tsk, ctx);
Jens Axboe46fe18b2021-03-04 12:39:36 -07008088 wake_up_new_task(tsk);
Jens Axboe0f212202020-09-13 13:09:39 -06008089 if (ret)
8090 goto err;
Jens Axboe6c271ce2019-01-10 11:22:30 -07008091 } else if (p->flags & IORING_SETUP_SQ_AFF) {
8092 /* Can't have SQ_AFF without SQPOLL */
8093 ret = -EINVAL;
8094 goto err;
8095 }
8096
Jens Axboe2b188cc2019-01-07 10:46:33 -07008097 return 0;
Pavel Begunkovf2a48dd2021-04-20 12:03:33 +01008098err_sqpoll:
8099 complete(&ctx->sq_data->exited);
Jens Axboe2b188cc2019-01-07 10:46:33 -07008100err:
Jens Axboe37d1e2e2021-02-17 21:03:43 -07008101 io_sq_thread_finish(ctx);
Jens Axboe2b188cc2019-01-07 10:46:33 -07008102 return ret;
8103}
8104
Bijan Mottahedeha087e2b2020-06-16 16:36:07 -07008105static inline void __io_unaccount_mem(struct user_struct *user,
8106 unsigned long nr_pages)
Jens Axboe2b188cc2019-01-07 10:46:33 -07008107{
8108 atomic_long_sub(nr_pages, &user->locked_vm);
8109}
8110
Bijan Mottahedeha087e2b2020-06-16 16:36:07 -07008111static inline int __io_account_mem(struct user_struct *user,
8112 unsigned long nr_pages)
Jens Axboe2b188cc2019-01-07 10:46:33 -07008113{
8114 unsigned long page_limit, cur_pages, new_pages;
8115
8116 /* Don't allow more pages than we can safely lock */
8117 page_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
8118
8119 do {
8120 cur_pages = atomic_long_read(&user->locked_vm);
8121 new_pages = cur_pages + nr_pages;
8122 if (new_pages > page_limit)
8123 return -ENOMEM;
8124 } while (atomic_long_cmpxchg(&user->locked_vm, cur_pages,
8125 new_pages) != cur_pages);
8126
8127 return 0;
8128}
8129
Jens Axboe26bfa89e2021-02-09 20:14:12 -07008130static void io_unaccount_mem(struct io_ring_ctx *ctx, unsigned long nr_pages)
Bijan Mottahedeha087e2b2020-06-16 16:36:07 -07008131{
Jens Axboe62e398b2021-02-21 16:19:37 -07008132 if (ctx->user)
Bijan Mottahedeha087e2b2020-06-16 16:36:07 -07008133 __io_unaccount_mem(ctx->user, nr_pages);
Bijan Mottahedeh30975822020-06-16 16:36:09 -07008134
Jens Axboe26bfa89e2021-02-09 20:14:12 -07008135 if (ctx->mm_account)
8136 atomic64_sub(nr_pages, &ctx->mm_account->pinned_vm);
Bijan Mottahedeha087e2b2020-06-16 16:36:07 -07008137}
8138
Jens Axboe26bfa89e2021-02-09 20:14:12 -07008139static int io_account_mem(struct io_ring_ctx *ctx, unsigned long nr_pages)
Bijan Mottahedeha087e2b2020-06-16 16:36:07 -07008140{
Bijan Mottahedeh30975822020-06-16 16:36:09 -07008141 int ret;
8142
Jens Axboe62e398b2021-02-21 16:19:37 -07008143 if (ctx->user) {
Bijan Mottahedeh30975822020-06-16 16:36:09 -07008144 ret = __io_account_mem(ctx->user, nr_pages);
8145 if (ret)
8146 return ret;
8147 }
8148
Jens Axboe26bfa89e2021-02-09 20:14:12 -07008149 if (ctx->mm_account)
8150 atomic64_add(nr_pages, &ctx->mm_account->pinned_vm);
Bijan Mottahedeha087e2b2020-06-16 16:36:07 -07008151
8152 return 0;
8153}
8154
Jens Axboe2b188cc2019-01-07 10:46:33 -07008155static void io_mem_free(void *ptr)
8156{
Mark Rutland52e04ef2019-04-30 17:30:21 +01008157 struct page *page;
Jens Axboe2b188cc2019-01-07 10:46:33 -07008158
Mark Rutland52e04ef2019-04-30 17:30:21 +01008159 if (!ptr)
8160 return;
8161
8162 page = virt_to_head_page(ptr);
Jens Axboe2b188cc2019-01-07 10:46:33 -07008163 if (put_page_testzero(page))
8164 free_compound_page(page);
8165}
8166
8167static void *io_mem_alloc(size_t size)
8168{
8169 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN | __GFP_COMP |
Jens Axboe26bfa89e2021-02-09 20:14:12 -07008170 __GFP_NORETRY | __GFP_ACCOUNT;
Jens Axboe2b188cc2019-01-07 10:46:33 -07008171
8172 return (void *) __get_free_pages(gfp_flags, get_order(size));
8173}
8174
Hristo Venev75b28af2019-08-26 17:23:46 +00008175static unsigned long rings_size(unsigned sq_entries, unsigned cq_entries,
8176 size_t *sq_offset)
8177{
8178 struct io_rings *rings;
8179 size_t off, sq_array_size;
8180
8181 off = struct_size(rings, cqes, cq_entries);
8182 if (off == SIZE_MAX)
8183 return SIZE_MAX;
8184
8185#ifdef CONFIG_SMP
8186 off = ALIGN(off, SMP_CACHE_BYTES);
8187 if (off == 0)
8188 return SIZE_MAX;
8189#endif
8190
Dmitry Vyukovb36200f2020-07-11 11:31:11 +02008191 if (sq_offset)
8192 *sq_offset = off;
8193
Hristo Venev75b28af2019-08-26 17:23:46 +00008194 sq_array_size = array_size(sizeof(u32), sq_entries);
8195 if (sq_array_size == SIZE_MAX)
8196 return SIZE_MAX;
8197
8198 if (check_add_overflow(off, sq_array_size, &off))
8199 return SIZE_MAX;
8200
Hristo Venev75b28af2019-08-26 17:23:46 +00008201 return off;
8202}
8203
Pavel Begunkov41edf1a2021-04-25 14:32:23 +01008204static void io_buffer_unmap(struct io_ring_ctx *ctx, struct io_mapped_ubuf **slot)
Pavel Begunkov7f61a1e2021-04-11 01:46:35 +01008205{
Pavel Begunkov41edf1a2021-04-25 14:32:23 +01008206 struct io_mapped_ubuf *imu = *slot;
Pavel Begunkov7f61a1e2021-04-11 01:46:35 +01008207 unsigned int i;
8208
Pavel Begunkov62248432021-04-28 13:11:29 +01008209 if (imu != ctx->dummy_ubuf) {
8210 for (i = 0; i < imu->nr_bvecs; i++)
8211 unpin_user_page(imu->bvec[i].bv_page);
8212 if (imu->acct_pages)
8213 io_unaccount_mem(ctx, imu->acct_pages);
8214 kvfree(imu);
8215 }
Pavel Begunkov41edf1a2021-04-25 14:32:23 +01008216 *slot = NULL;
Pavel Begunkov7f61a1e2021-04-11 01:46:35 +01008217}
8218
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01008219static void io_rsrc_buf_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc)
8220{
Pavel Begunkov634d00d2021-04-25 14:32:26 +01008221 io_buffer_unmap(ctx, &prsrc->buf);
8222 prsrc->buf = NULL;
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01008223}
8224
8225static void __io_sqe_buffers_unregister(struct io_ring_ctx *ctx)
Jens Axboeedafcce2019-01-09 09:16:05 -07008226{
Pavel Begunkov7f61a1e2021-04-11 01:46:35 +01008227 unsigned int i;
Jens Axboeedafcce2019-01-09 09:16:05 -07008228
Pavel Begunkov7f61a1e2021-04-11 01:46:35 +01008229 for (i = 0; i < ctx->nr_user_bufs; i++)
8230 io_buffer_unmap(ctx, &ctx->user_bufs[i]);
Jens Axboeedafcce2019-01-09 09:16:05 -07008231 kfree(ctx->user_bufs);
Zqiangbb6659c2021-04-30 16:25:15 +08008232 io_rsrc_data_free(ctx->buf_data);
Jens Axboeedafcce2019-01-09 09:16:05 -07008233 ctx->user_bufs = NULL;
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01008234 ctx->buf_data = NULL;
Jens Axboeedafcce2019-01-09 09:16:05 -07008235 ctx->nr_user_bufs = 0;
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01008236}
8237
Jens Axboeedafcce2019-01-09 09:16:05 -07008238static int io_sqe_buffers_unregister(struct io_ring_ctx *ctx)
8239{
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01008240 int ret;
Jens Axboeedafcce2019-01-09 09:16:05 -07008241
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01008242 if (!ctx->buf_data)
Jens Axboeedafcce2019-01-09 09:16:05 -07008243 return -ENXIO;
8244
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01008245 ret = io_rsrc_ref_quiesce(ctx->buf_data, ctx);
8246 if (!ret)
8247 __io_sqe_buffers_unregister(ctx);
8248 return ret;
Jens Axboeedafcce2019-01-09 09:16:05 -07008249}
8250
8251static int io_copy_iov(struct io_ring_ctx *ctx, struct iovec *dst,
8252 void __user *arg, unsigned index)
8253{
8254 struct iovec __user *src;
8255
8256#ifdef CONFIG_COMPAT
8257 if (ctx->compat) {
8258 struct compat_iovec __user *ciovs;
8259 struct compat_iovec ciov;
8260
8261 ciovs = (struct compat_iovec __user *) arg;
8262 if (copy_from_user(&ciov, &ciovs[index], sizeof(ciov)))
8263 return -EFAULT;
8264
Jens Axboed55e5f52019-12-11 16:12:15 -07008265 dst->iov_base = u64_to_user_ptr((u64)ciov.iov_base);
Jens Axboeedafcce2019-01-09 09:16:05 -07008266 dst->iov_len = ciov.iov_len;
8267 return 0;
8268 }
8269#endif
8270 src = (struct iovec __user *) arg;
8271 if (copy_from_user(dst, &src[index], sizeof(*dst)))
8272 return -EFAULT;
8273 return 0;
8274}
8275
Jens Axboede293932020-09-17 16:19:16 -06008276/*
8277 * Not super efficient, but this is just a registration time. And we do cache
8278 * the last compound head, so generally we'll only do a full search if we don't
8279 * match that one.
8280 *
8281 * We check if the given compound head page has already been accounted, to
8282 * avoid double accounting it. This allows us to account the full size of the
8283 * page, not just the constituent pages of a huge page.
8284 */
8285static bool headpage_already_acct(struct io_ring_ctx *ctx, struct page **pages,
8286 int nr_pages, struct page *hpage)
8287{
8288 int i, j;
8289
8290 /* check current page array */
8291 for (i = 0; i < nr_pages; i++) {
8292 if (!PageCompound(pages[i]))
8293 continue;
8294 if (compound_head(pages[i]) == hpage)
8295 return true;
8296 }
8297
8298 /* check previously registered pages */
8299 for (i = 0; i < ctx->nr_user_bufs; i++) {
Pavel Begunkov41edf1a2021-04-25 14:32:23 +01008300 struct io_mapped_ubuf *imu = ctx->user_bufs[i];
Jens Axboede293932020-09-17 16:19:16 -06008301
8302 for (j = 0; j < imu->nr_bvecs; j++) {
8303 if (!PageCompound(imu->bvec[j].bv_page))
8304 continue;
8305 if (compound_head(imu->bvec[j].bv_page) == hpage)
8306 return true;
8307 }
8308 }
8309
8310 return false;
8311}
8312
8313static int io_buffer_account_pin(struct io_ring_ctx *ctx, struct page **pages,
8314 int nr_pages, struct io_mapped_ubuf *imu,
8315 struct page **last_hpage)
8316{
8317 int i, ret;
8318
Pavel Begunkov216e5832021-05-29 12:01:02 +01008319 imu->acct_pages = 0;
Jens Axboede293932020-09-17 16:19:16 -06008320 for (i = 0; i < nr_pages; i++) {
8321 if (!PageCompound(pages[i])) {
8322 imu->acct_pages++;
8323 } else {
8324 struct page *hpage;
8325
8326 hpage = compound_head(pages[i]);
8327 if (hpage == *last_hpage)
8328 continue;
8329 *last_hpage = hpage;
8330 if (headpage_already_acct(ctx, pages, i, hpage))
8331 continue;
8332 imu->acct_pages += page_size(hpage) >> PAGE_SHIFT;
8333 }
8334 }
8335
8336 if (!imu->acct_pages)
8337 return 0;
8338
Jens Axboe26bfa89e2021-02-09 20:14:12 -07008339 ret = io_account_mem(ctx, imu->acct_pages);
Jens Axboede293932020-09-17 16:19:16 -06008340 if (ret)
8341 imu->acct_pages = 0;
8342 return ret;
8343}
8344
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008345static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov,
Pavel Begunkov41edf1a2021-04-25 14:32:23 +01008346 struct io_mapped_ubuf **pimu,
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008347 struct page **last_hpage)
Jens Axboeedafcce2019-01-09 09:16:05 -07008348{
Pavel Begunkov41edf1a2021-04-25 14:32:23 +01008349 struct io_mapped_ubuf *imu = NULL;
Jens Axboeedafcce2019-01-09 09:16:05 -07008350 struct vm_area_struct **vmas = NULL;
8351 struct page **pages = NULL;
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008352 unsigned long off, start, end, ubuf;
8353 size_t size;
8354 int ret, pret, nr_pages, i;
Jens Axboeedafcce2019-01-09 09:16:05 -07008355
Pavel Begunkov62248432021-04-28 13:11:29 +01008356 if (!iov->iov_base) {
8357 *pimu = ctx->dummy_ubuf;
8358 return 0;
8359 }
8360
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008361 ubuf = (unsigned long) iov->iov_base;
8362 end = (ubuf + iov->iov_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
8363 start = ubuf >> PAGE_SHIFT;
8364 nr_pages = end - start;
8365
Pavel Begunkov41edf1a2021-04-25 14:32:23 +01008366 *pimu = NULL;
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008367 ret = -ENOMEM;
8368
8369 pages = kvmalloc_array(nr_pages, sizeof(struct page *), GFP_KERNEL);
8370 if (!pages)
8371 goto done;
8372
8373 vmas = kvmalloc_array(nr_pages, sizeof(struct vm_area_struct *),
8374 GFP_KERNEL);
8375 if (!vmas)
8376 goto done;
8377
Pavel Begunkov41edf1a2021-04-25 14:32:23 +01008378 imu = kvmalloc(struct_size(imu, bvec, nr_pages), GFP_KERNEL);
Pavel Begunkova2b41982021-04-26 00:16:31 +01008379 if (!imu)
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008380 goto done;
8381
8382 ret = 0;
8383 mmap_read_lock(current->mm);
8384 pret = pin_user_pages(ubuf, nr_pages, FOLL_WRITE | FOLL_LONGTERM,
8385 pages, vmas);
8386 if (pret == nr_pages) {
8387 /* don't support file backed memory */
8388 for (i = 0; i < nr_pages; i++) {
8389 struct vm_area_struct *vma = vmas[i];
8390
Pavel Begunkov40dad762021-06-09 15:26:54 +01008391 if (vma_is_shmem(vma))
8392 continue;
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008393 if (vma->vm_file &&
8394 !is_file_hugepages(vma->vm_file)) {
8395 ret = -EOPNOTSUPP;
8396 break;
8397 }
8398 }
8399 } else {
8400 ret = pret < 0 ? pret : -EFAULT;
8401 }
8402 mmap_read_unlock(current->mm);
8403 if (ret) {
8404 /*
8405 * if we did partial map, or found file backed vmas,
8406 * release any pages we did get
8407 */
8408 if (pret > 0)
8409 unpin_user_pages(pages, pret);
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008410 goto done;
8411 }
8412
8413 ret = io_buffer_account_pin(ctx, pages, pret, imu, last_hpage);
8414 if (ret) {
8415 unpin_user_pages(pages, pret);
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008416 goto done;
8417 }
8418
8419 off = ubuf & ~PAGE_MASK;
8420 size = iov->iov_len;
8421 for (i = 0; i < nr_pages; i++) {
8422 size_t vec_len;
8423
8424 vec_len = min_t(size_t, size, PAGE_SIZE - off);
8425 imu->bvec[i].bv_page = pages[i];
8426 imu->bvec[i].bv_len = vec_len;
8427 imu->bvec[i].bv_offset = off;
8428 off = 0;
8429 size -= vec_len;
8430 }
8431 /* store original address for later verification */
8432 imu->ubuf = ubuf;
Pavel Begunkov4751f532021-04-01 15:43:55 +01008433 imu->ubuf_end = ubuf + iov->iov_len;
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008434 imu->nr_bvecs = nr_pages;
Pavel Begunkov41edf1a2021-04-25 14:32:23 +01008435 *pimu = imu;
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008436 ret = 0;
8437done:
Pavel Begunkov41edf1a2021-04-25 14:32:23 +01008438 if (ret)
8439 kvfree(imu);
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008440 kvfree(pages);
8441 kvfree(vmas);
8442 return ret;
8443}
8444
Bijan Mottahedeh2b358602021-01-06 12:39:11 -08008445static int io_buffers_map_alloc(struct io_ring_ctx *ctx, unsigned int nr_args)
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008446{
Pavel Begunkov87094462021-04-11 01:46:36 +01008447 ctx->user_bufs = kcalloc(nr_args, sizeof(*ctx->user_bufs), GFP_KERNEL);
8448 return ctx->user_bufs ? 0 : -ENOMEM;
Bijan Mottahedeh2b358602021-01-06 12:39:11 -08008449}
8450
8451static int io_buffer_validate(struct iovec *iov)
8452{
Pavel Begunkov50e96982021-03-24 22:59:01 +00008453 unsigned long tmp, acct_len = iov->iov_len + (PAGE_SIZE - 1);
8454
Bijan Mottahedeh2b358602021-01-06 12:39:11 -08008455 /*
8456 * Don't impose further limits on the size and buffer
8457 * constraints here, we'll -EINVAL later when IO is
8458 * submitted if they are wrong.
8459 */
Pavel Begunkov62248432021-04-28 13:11:29 +01008460 if (!iov->iov_base)
8461 return iov->iov_len ? -EFAULT : 0;
8462 if (!iov->iov_len)
Bijan Mottahedeh2b358602021-01-06 12:39:11 -08008463 return -EFAULT;
8464
8465 /* arbitrary limit, but we need something */
8466 if (iov->iov_len > SZ_1G)
8467 return -EFAULT;
8468
Pavel Begunkov50e96982021-03-24 22:59:01 +00008469 if (check_add_overflow((unsigned long)iov->iov_base, acct_len, &tmp))
8470 return -EOVERFLOW;
8471
Bijan Mottahedeh2b358602021-01-06 12:39:11 -08008472 return 0;
8473}
8474
8475static int io_sqe_buffers_register(struct io_ring_ctx *ctx, void __user *arg,
Pavel Begunkov634d00d2021-04-25 14:32:26 +01008476 unsigned int nr_args, u64 __user *tags)
Bijan Mottahedeh2b358602021-01-06 12:39:11 -08008477{
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01008478 struct page *last_hpage = NULL;
8479 struct io_rsrc_data *data;
Bijan Mottahedeh2b358602021-01-06 12:39:11 -08008480 int i, ret;
8481 struct iovec iov;
Bijan Mottahedeh2b358602021-01-06 12:39:11 -08008482
Pavel Begunkov87094462021-04-11 01:46:36 +01008483 if (ctx->user_bufs)
8484 return -EBUSY;
Pavel Begunkov489809e2021-05-14 12:06:44 +01008485 if (!nr_args || nr_args > IORING_MAX_REG_BUFFERS)
Pavel Begunkov87094462021-04-11 01:46:36 +01008486 return -EINVAL;
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01008487 ret = io_rsrc_node_switch_start(ctx);
Bijan Mottahedeh2b358602021-01-06 12:39:11 -08008488 if (ret)
8489 return ret;
Pavel Begunkovd878c812021-06-14 02:36:18 +01008490 ret = io_rsrc_data_alloc(ctx, io_rsrc_buf_put, tags, nr_args, &data);
8491 if (ret)
8492 return ret;
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01008493 ret = io_buffers_map_alloc(ctx, nr_args);
8494 if (ret) {
Zqiangbb6659c2021-04-30 16:25:15 +08008495 io_rsrc_data_free(data);
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01008496 return ret;
8497 }
Bijan Mottahedeh2b358602021-01-06 12:39:11 -08008498
Pavel Begunkov87094462021-04-11 01:46:36 +01008499 for (i = 0; i < nr_args; i++, ctx->nr_user_bufs++) {
Jens Axboeedafcce2019-01-09 09:16:05 -07008500 ret = io_copy_iov(ctx, &iov, arg, i);
8501 if (ret)
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008502 break;
Bijan Mottahedeh2b358602021-01-06 12:39:11 -08008503 ret = io_buffer_validate(&iov);
8504 if (ret)
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008505 break;
Pavel Begunkov2d091d62021-06-14 02:36:21 +01008506 if (!iov.iov_base && *io_get_tag_slot(data, i)) {
Colin Ian Kingcf3770e2021-04-29 11:46:02 +01008507 ret = -EINVAL;
8508 break;
8509 }
Jens Axboeedafcce2019-01-09 09:16:05 -07008510
Pavel Begunkov41edf1a2021-04-25 14:32:23 +01008511 ret = io_sqe_buffer_register(ctx, &iov, &ctx->user_bufs[i],
8512 &last_hpage);
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008513 if (ret)
8514 break;
Jens Axboeedafcce2019-01-09 09:16:05 -07008515 }
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008516
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01008517 WARN_ON_ONCE(ctx->buf_data);
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008518
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01008519 ctx->buf_data = data;
8520 if (ret)
8521 __io_sqe_buffers_unregister(ctx);
8522 else
8523 io_rsrc_node_switch(ctx, NULL);
Jens Axboeedafcce2019-01-09 09:16:05 -07008524 return ret;
8525}
8526
Pavel Begunkov634d00d2021-04-25 14:32:26 +01008527static int __io_sqe_buffers_update(struct io_ring_ctx *ctx,
8528 struct io_uring_rsrc_update2 *up,
8529 unsigned int nr_args)
8530{
8531 u64 __user *tags = u64_to_user_ptr(up->tags);
8532 struct iovec iov, __user *iovs = u64_to_user_ptr(up->data);
Pavel Begunkov634d00d2021-04-25 14:32:26 +01008533 struct page *last_hpage = NULL;
8534 bool needs_switch = false;
8535 __u32 done;
8536 int i, err;
8537
8538 if (!ctx->buf_data)
8539 return -ENXIO;
8540 if (up->offset + nr_args > ctx->nr_user_bufs)
8541 return -EINVAL;
8542
8543 for (done = 0; done < nr_args; done++) {
Pavel Begunkov0b8c0e72021-04-26 15:17:38 +01008544 struct io_mapped_ubuf *imu;
8545 int offset = up->offset + done;
Pavel Begunkov634d00d2021-04-25 14:32:26 +01008546 u64 tag = 0;
8547
8548 err = io_copy_iov(ctx, &iov, iovs, done);
8549 if (err)
8550 break;
8551 if (tags && copy_from_user(&tag, &tags[done], sizeof(tag))) {
8552 err = -EFAULT;
8553 break;
8554 }
Pavel Begunkov0b8c0e72021-04-26 15:17:38 +01008555 err = io_buffer_validate(&iov);
8556 if (err)
8557 break;
Colin Ian Kingcf3770e2021-04-29 11:46:02 +01008558 if (!iov.iov_base && tag) {
8559 err = -EINVAL;
8560 break;
8561 }
Pavel Begunkov0b8c0e72021-04-26 15:17:38 +01008562 err = io_sqe_buffer_register(ctx, &iov, &imu, &last_hpage);
8563 if (err)
8564 break;
Pavel Begunkov634d00d2021-04-25 14:32:26 +01008565
Pavel Begunkov0b8c0e72021-04-26 15:17:38 +01008566 i = array_index_nospec(offset, ctx->nr_user_bufs);
Pavel Begunkov62248432021-04-28 13:11:29 +01008567 if (ctx->user_bufs[i] != ctx->dummy_ubuf) {
Pavel Begunkov0b8c0e72021-04-26 15:17:38 +01008568 err = io_queue_rsrc_removal(ctx->buf_data, offset,
8569 ctx->rsrc_node, ctx->user_bufs[i]);
8570 if (unlikely(err)) {
8571 io_buffer_unmap(ctx, &imu);
Pavel Begunkov634d00d2021-04-25 14:32:26 +01008572 break;
Pavel Begunkov0b8c0e72021-04-26 15:17:38 +01008573 }
Pavel Begunkov634d00d2021-04-25 14:32:26 +01008574 ctx->user_bufs[i] = NULL;
8575 needs_switch = true;
8576 }
8577
Pavel Begunkov0b8c0e72021-04-26 15:17:38 +01008578 ctx->user_bufs[i] = imu;
Pavel Begunkov2d091d62021-06-14 02:36:21 +01008579 *io_get_tag_slot(ctx->buf_data, offset) = tag;
Pavel Begunkov634d00d2021-04-25 14:32:26 +01008580 }
8581
8582 if (needs_switch)
8583 io_rsrc_node_switch(ctx, ctx->buf_data);
8584 return done ? done : err;
8585}
8586
Jens Axboe9b402842019-04-11 11:45:41 -06008587static int io_eventfd_register(struct io_ring_ctx *ctx, void __user *arg)
8588{
8589 __s32 __user *fds = arg;
8590 int fd;
8591
8592 if (ctx->cq_ev_fd)
8593 return -EBUSY;
8594
8595 if (copy_from_user(&fd, fds, sizeof(*fds)))
8596 return -EFAULT;
8597
8598 ctx->cq_ev_fd = eventfd_ctx_fdget(fd);
8599 if (IS_ERR(ctx->cq_ev_fd)) {
8600 int ret = PTR_ERR(ctx->cq_ev_fd);
Pavel Begunkovfe7e3252021-06-24 15:09:57 +01008601
Jens Axboe9b402842019-04-11 11:45:41 -06008602 ctx->cq_ev_fd = NULL;
8603 return ret;
8604 }
8605
8606 return 0;
8607}
8608
8609static int io_eventfd_unregister(struct io_ring_ctx *ctx)
8610{
8611 if (ctx->cq_ev_fd) {
8612 eventfd_ctx_put(ctx->cq_ev_fd);
8613 ctx->cq_ev_fd = NULL;
8614 return 0;
8615 }
8616
8617 return -ENXIO;
8618}
8619
Jens Axboe5a2e7452020-02-23 16:23:11 -07008620static void io_destroy_buffers(struct io_ring_ctx *ctx)
8621{
Jens Axboe9e15c3a2021-03-13 12:29:43 -07008622 struct io_buffer *buf;
8623 unsigned long index;
8624
8625 xa_for_each(&ctx->io_buffers, index, buf)
8626 __io_remove_buffers(ctx, buf, index, -1U);
Jens Axboe5a2e7452020-02-23 16:23:11 -07008627}
8628
Jens Axboe68e68ee2021-02-13 09:00:02 -07008629static void io_req_cache_free(struct list_head *list, struct task_struct *tsk)
Jens Axboe1b4c3512021-02-10 00:03:19 +00008630{
Jens Axboe68e68ee2021-02-13 09:00:02 -07008631 struct io_kiocb *req, *nxt;
Jens Axboe1b4c3512021-02-10 00:03:19 +00008632
Jens Axboe68e68ee2021-02-13 09:00:02 -07008633 list_for_each_entry_safe(req, nxt, list, compl.list) {
8634 if (tsk && req->task != tsk)
8635 continue;
Jens Axboe1b4c3512021-02-10 00:03:19 +00008636 list_del(&req->compl.list);
8637 kmem_cache_free(req_cachep, req);
8638 }
8639}
8640
Jens Axboe4010fec2021-02-27 15:04:18 -07008641static void io_req_caches_free(struct io_ring_ctx *ctx)
Jens Axboe2b188cc2019-01-07 10:46:33 -07008642{
Pavel Begunkovbf019da2021-02-10 00:03:17 +00008643 struct io_submit_state *submit_state = &ctx->submit_state;
Pavel Begunkove5547d22021-02-23 22:17:20 +00008644 struct io_comp_state *cs = &ctx->submit_state.comp;
Pavel Begunkovbf019da2021-02-10 00:03:17 +00008645
Jens Axboe9a4fdbd2021-02-13 09:09:44 -07008646 mutex_lock(&ctx->uring_lock);
8647
Pavel Begunkov8e5c66c2021-02-22 11:45:55 +00008648 if (submit_state->free_reqs) {
Jens Axboe9a4fdbd2021-02-13 09:09:44 -07008649 kmem_cache_free_bulk(req_cachep, submit_state->free_reqs,
8650 submit_state->reqs);
Pavel Begunkov8e5c66c2021-02-22 11:45:55 +00008651 submit_state->free_reqs = 0;
8652 }
Jens Axboe9a4fdbd2021-02-13 09:09:44 -07008653
Pavel Begunkovdac7a092021-03-19 17:22:39 +00008654 io_flush_cached_locked_reqs(ctx, cs);
Pavel Begunkove5547d22021-02-23 22:17:20 +00008655 io_req_cache_free(&cs->free_list, NULL);
Jens Axboe9a4fdbd2021-02-13 09:09:44 -07008656 mutex_unlock(&ctx->uring_lock);
8657}
8658
Pavel Begunkov43597aa2021-08-10 02:44:23 +01008659static void io_wait_rsrc_data(struct io_rsrc_data *data)
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01008660{
Pavel Begunkov43597aa2021-08-10 02:44:23 +01008661 if (data && !atomic_dec_and_test(&data->refs))
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01008662 wait_for_completion(&data->done);
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01008663}
8664
Jens Axboe2b188cc2019-01-07 10:46:33 -07008665static void io_ring_ctx_free(struct io_ring_ctx *ctx)
8666{
Jens Axboe37d1e2e2021-02-17 21:03:43 -07008667 io_sq_thread_finish(ctx);
Jens Axboe2aede0e2020-09-14 10:45:53 -06008668
Jens Axboe37d1e2e2021-02-17 21:03:43 -07008669 if (ctx->mm_account) {
Jens Axboe2aede0e2020-09-14 10:45:53 -06008670 mmdrop(ctx->mm_account);
8671 ctx->mm_account = NULL;
Bijan Mottahedeh30975822020-06-16 16:36:09 -07008672 }
Jens Axboedef596e2019-01-09 08:59:42 -07008673
Pavel Begunkov43597aa2021-08-10 02:44:23 +01008674 /* __io_rsrc_put_work() may need uring_lock to progress, wait w/o it */
8675 io_wait_rsrc_data(ctx->buf_data);
8676 io_wait_rsrc_data(ctx->file_data);
8677
Hao Xu8bad28d2021-02-19 17:19:36 +08008678 mutex_lock(&ctx->uring_lock);
Pavel Begunkov43597aa2021-08-10 02:44:23 +01008679 if (ctx->buf_data)
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01008680 __io_sqe_buffers_unregister(ctx);
Pavel Begunkov43597aa2021-08-10 02:44:23 +01008681 if (ctx->file_data)
Pavel Begunkov08480402021-04-13 02:58:38 +01008682 __io_sqe_files_unregister(ctx);
Pavel Begunkovc4ea0602021-04-01 15:43:58 +01008683 if (ctx->rings)
8684 __io_cqring_overflow_flush(ctx, true);
Hao Xu8bad28d2021-02-19 17:19:36 +08008685 mutex_unlock(&ctx->uring_lock);
Jens Axboe9b402842019-04-11 11:45:41 -06008686 io_eventfd_unregister(ctx);
Jens Axboe5a2e7452020-02-23 16:23:11 -07008687 io_destroy_buffers(ctx);
Pavel Begunkov07db2982021-04-20 12:03:32 +01008688 if (ctx->sq_creds)
8689 put_cred(ctx->sq_creds);
Jens Axboedef596e2019-01-09 08:59:42 -07008690
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01008691 /* there are no registered resources left, nobody uses it */
8692 if (ctx->rsrc_node)
8693 io_rsrc_node_destroy(ctx->rsrc_node);
Pavel Begunkov8dd03af2021-03-19 17:22:36 +00008694 if (ctx->rsrc_backup_node)
Pavel Begunkovb895c9a2021-04-01 15:43:40 +01008695 io_rsrc_node_destroy(ctx->rsrc_backup_node);
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01008696 flush_delayed_work(&ctx->rsrc_put_work);
8697
8698 WARN_ON_ONCE(!list_empty(&ctx->rsrc_ref_list));
8699 WARN_ON_ONCE(!llist_empty(&ctx->rsrc_put_llist));
Jens Axboe2b188cc2019-01-07 10:46:33 -07008700
8701#if defined(CONFIG_UNIX)
Eric Biggers355e8d22019-06-12 14:58:43 -07008702 if (ctx->ring_sock) {
8703 ctx->ring_sock->file = NULL; /* so that iput() is called */
Jens Axboe2b188cc2019-01-07 10:46:33 -07008704 sock_release(ctx->ring_sock);
Eric Biggers355e8d22019-06-12 14:58:43 -07008705 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07008706#endif
8707
Hristo Venev75b28af2019-08-26 17:23:46 +00008708 io_mem_free(ctx->rings);
Jens Axboe2b188cc2019-01-07 10:46:33 -07008709 io_mem_free(ctx->sq_sqes);
Jens Axboe2b188cc2019-01-07 10:46:33 -07008710
8711 percpu_ref_exit(&ctx->refs);
Jens Axboe2b188cc2019-01-07 10:46:33 -07008712 free_uid(ctx->user);
Jens Axboe4010fec2021-02-27 15:04:18 -07008713 io_req_caches_free(ctx);
Jens Axboee9418942021-02-19 12:33:30 -07008714 if (ctx->hash_map)
8715 io_wq_put_hash(ctx->hash_map);
Jens Axboe78076bb2019-12-04 19:56:40 -07008716 kfree(ctx->cancel_hash);
Pavel Begunkov62248432021-04-28 13:11:29 +01008717 kfree(ctx->dummy_ubuf);
Jens Axboe2b188cc2019-01-07 10:46:33 -07008718 kfree(ctx);
8719}
8720
8721static __poll_t io_uring_poll(struct file *file, poll_table *wait)
8722{
8723 struct io_ring_ctx *ctx = file->private_data;
8724 __poll_t mask = 0;
8725
Pavel Begunkov311997b2021-06-14 23:37:28 +01008726 poll_wait(file, &ctx->poll_wait, wait);
Stefan Bühler4f7067c2019-04-24 23:54:17 +02008727 /*
8728 * synchronizes with barrier from wq_has_sleeper call in
8729 * io_commit_cqring
8730 */
Jens Axboe2b188cc2019-01-07 10:46:33 -07008731 smp_rmb();
Jens Axboe90554202020-09-03 12:12:41 -06008732 if (!io_sqring_full(ctx))
Jens Axboe2b188cc2019-01-07 10:46:33 -07008733 mask |= EPOLLOUT | EPOLLWRNORM;
Hao Xued670c32021-02-05 16:34:21 +08008734
8735 /*
8736 * Don't flush cqring overflow list here, just do a simple check.
8737 * Otherwise there could possible be ABBA deadlock:
8738 * CPU0 CPU1
8739 * ---- ----
8740 * lock(&ctx->uring_lock);
8741 * lock(&ep->mtx);
8742 * lock(&ctx->uring_lock);
8743 * lock(&ep->mtx);
8744 *
8745 * Users may get EPOLLIN meanwhile seeing nothing in cqring, this
8746 * pushs them to do the flush.
8747 */
Pavel Begunkov5ed7a372021-06-14 23:37:27 +01008748 if (io_cqring_events(ctx) || test_bit(0, &ctx->check_cq_overflow))
Jens Axboe2b188cc2019-01-07 10:46:33 -07008749 mask |= EPOLLIN | EPOLLRDNORM;
8750
8751 return mask;
8752}
8753
8754static int io_uring_fasync(int fd, struct file *file, int on)
8755{
8756 struct io_ring_ctx *ctx = file->private_data;
8757
8758 return fasync_helper(fd, file, on, &ctx->cq_fasync);
8759}
8760
Yejune Deng0bead8c2020-12-24 11:02:20 +08008761static int io_unregister_personality(struct io_ring_ctx *ctx, unsigned id)
Jens Axboe071698e2020-01-28 10:04:42 -07008762{
Jens Axboe4379bf82021-02-15 13:40:22 -07008763 const struct cred *creds;
Jens Axboe071698e2020-01-28 10:04:42 -07008764
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +00008765 creds = xa_erase(&ctx->personalities, id);
Jens Axboe4379bf82021-02-15 13:40:22 -07008766 if (creds) {
8767 put_cred(creds);
Yejune Deng0bead8c2020-12-24 11:02:20 +08008768 return 0;
Jens Axboe1e6fa522020-10-15 08:46:24 -06008769 }
Yejune Deng0bead8c2020-12-24 11:02:20 +08008770
8771 return -EINVAL;
8772}
8773
Pavel Begunkovd56d9382021-03-06 11:02:13 +00008774struct io_tctx_exit {
8775 struct callback_head task_work;
8776 struct completion completion;
Pavel Begunkovbaf186c2021-03-06 11:02:15 +00008777 struct io_ring_ctx *ctx;
Pavel Begunkovd56d9382021-03-06 11:02:13 +00008778};
8779
8780static void io_tctx_exit_cb(struct callback_head *cb)
8781{
8782 struct io_uring_task *tctx = current->io_uring;
8783 struct io_tctx_exit *work;
8784
8785 work = container_of(cb, struct io_tctx_exit, task_work);
8786 /*
8787 * When @in_idle, we're in cancellation and it's racy to remove the
8788 * node. It'll be removed by the end of cancellation, just ignore it.
8789 */
8790 if (!atomic_read(&tctx->in_idle))
Pavel Begunkoveef51da2021-06-14 02:36:15 +01008791 io_uring_del_tctx_node((unsigned long)work->ctx);
Pavel Begunkovd56d9382021-03-06 11:02:13 +00008792 complete(&work->completion);
8793}
8794
Pavel Begunkov28090c12021-04-25 23:34:45 +01008795static bool io_cancel_ctx_cb(struct io_wq_work *work, void *data)
8796{
8797 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
8798
8799 return req->ctx == data;
8800}
8801
Jens Axboe85faa7b2020-04-09 18:14:00 -06008802static void io_ring_exit_work(struct work_struct *work)
8803{
Pavel Begunkovd56d9382021-03-06 11:02:13 +00008804 struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx, exit_work);
Pavel Begunkovb5bb3a22021-03-06 11:02:16 +00008805 unsigned long timeout = jiffies + HZ * 60 * 5;
Pavel Begunkovd56d9382021-03-06 11:02:13 +00008806 struct io_tctx_exit exit;
8807 struct io_tctx_node *node;
8808 int ret;
Jens Axboe85faa7b2020-04-09 18:14:00 -06008809
Jens Axboe56952e92020-06-17 15:00:04 -06008810 /*
8811 * If we're doing polled IO and end up having requests being
8812 * submitted async (out-of-line), then completions can come in while
8813 * we're waiting for refs to drop. We need to reap these manually,
8814 * as nobody else will be looking for them.
8815 */
Pavel Begunkovb2edc0a2020-07-07 16:36:22 +03008816 do {
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01008817 io_uring_try_cancel_requests(ctx, NULL, true);
Pavel Begunkov28090c12021-04-25 23:34:45 +01008818 if (ctx->sq_data) {
8819 struct io_sq_data *sqd = ctx->sq_data;
8820 struct task_struct *tsk;
8821
8822 io_sq_thread_park(sqd);
8823 tsk = sqd->thread;
8824 if (tsk && tsk->io_uring && tsk->io_uring->io_wq)
8825 io_wq_cancel_cb(tsk->io_uring->io_wq,
8826 io_cancel_ctx_cb, ctx, true);
8827 io_sq_thread_unpark(sqd);
8828 }
Pavel Begunkovb5bb3a22021-03-06 11:02:16 +00008829
8830 WARN_ON_ONCE(time_after(jiffies, timeout));
Pavel Begunkovb2edc0a2020-07-07 16:36:22 +03008831 } while (!wait_for_completion_timeout(&ctx->ref_comp, HZ/20));
Pavel Begunkovd56d9382021-03-06 11:02:13 +00008832
Pavel Begunkov7f006512021-04-14 13:38:34 +01008833 init_completion(&exit.completion);
8834 init_task_work(&exit.task_work, io_tctx_exit_cb);
8835 exit.ctx = ctx;
Pavel Begunkov89b50662021-04-01 15:43:50 +01008836 /*
8837 * Some may use context even when all refs and requests have been put,
8838 * and they are free to do so while still holding uring_lock or
Pavel Begunkov5b0a6ac2021-06-30 21:54:04 +01008839 * completion_lock, see io_req_task_submit(). Apart from other work,
Pavel Begunkov89b50662021-04-01 15:43:50 +01008840 * this lock/unlock section also waits them to finish.
8841 */
Pavel Begunkovd56d9382021-03-06 11:02:13 +00008842 mutex_lock(&ctx->uring_lock);
8843 while (!list_empty(&ctx->tctx_list)) {
Pavel Begunkovb5bb3a22021-03-06 11:02:16 +00008844 WARN_ON_ONCE(time_after(jiffies, timeout));
8845
Pavel Begunkovd56d9382021-03-06 11:02:13 +00008846 node = list_first_entry(&ctx->tctx_list, struct io_tctx_node,
8847 ctx_node);
Pavel Begunkov7f006512021-04-14 13:38:34 +01008848 /* don't spin on a single task if cancellation failed */
8849 list_rotate_left(&ctx->tctx_list);
Pavel Begunkovd56d9382021-03-06 11:02:13 +00008850 ret = task_work_add(node->task, &exit.task_work, TWA_SIGNAL);
8851 if (WARN_ON_ONCE(ret))
8852 continue;
8853 wake_up_process(node->task);
8854
8855 mutex_unlock(&ctx->uring_lock);
8856 wait_for_completion(&exit.completion);
Pavel Begunkovd56d9382021-03-06 11:02:13 +00008857 mutex_lock(&ctx->uring_lock);
8858 }
8859 mutex_unlock(&ctx->uring_lock);
Pavel Begunkov89b50662021-04-01 15:43:50 +01008860 spin_lock_irq(&ctx->completion_lock);
8861 spin_unlock_irq(&ctx->completion_lock);
Pavel Begunkovd56d9382021-03-06 11:02:13 +00008862
Jens Axboe85faa7b2020-04-09 18:14:00 -06008863 io_ring_ctx_free(ctx);
8864}
8865
Pavel Begunkov80c4cbd2021-03-25 18:32:43 +00008866/* Returns true if we found and killed one or more timeouts */
8867static bool io_kill_timeouts(struct io_ring_ctx *ctx, struct task_struct *tsk,
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01008868 bool cancel_all)
Pavel Begunkov80c4cbd2021-03-25 18:32:43 +00008869{
8870 struct io_kiocb *req, *tmp;
8871 int canceled = 0;
8872
8873 spin_lock_irq(&ctx->completion_lock);
8874 list_for_each_entry_safe(req, tmp, &ctx->timeout_list, timeout.list) {
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01008875 if (io_match_task(req, tsk, cancel_all)) {
Pavel Begunkov80c4cbd2021-03-25 18:32:43 +00008876 io_kill_timeout(req, -ECANCELED);
8877 canceled++;
8878 }
8879 }
Pavel Begunkov51520422021-03-29 11:39:29 +01008880 if (canceled != 0)
8881 io_commit_cqring(ctx);
Pavel Begunkov80c4cbd2021-03-25 18:32:43 +00008882 spin_unlock_irq(&ctx->completion_lock);
Pavel Begunkov80c4cbd2021-03-25 18:32:43 +00008883 if (canceled != 0)
8884 io_cqring_ev_posted(ctx);
8885 return canceled != 0;
8886}
8887
Jens Axboe2b188cc2019-01-07 10:46:33 -07008888static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
8889{
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +00008890 unsigned long index;
8891 struct creds *creds;
8892
Jens Axboe2b188cc2019-01-07 10:46:33 -07008893 mutex_lock(&ctx->uring_lock);
8894 percpu_ref_kill(&ctx->refs);
Pavel Begunkov634578f2020-12-06 22:22:44 +00008895 if (ctx->rings)
Pavel Begunkov6c2450a2021-02-23 12:40:22 +00008896 __io_cqring_overflow_flush(ctx, true);
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +00008897 xa_for_each(&ctx->personalities, index, creds)
8898 io_unregister_personality(ctx, index);
Jens Axboe2b188cc2019-01-07 10:46:33 -07008899 mutex_unlock(&ctx->uring_lock);
8900
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01008901 io_kill_timeouts(ctx, NULL, true);
8902 io_poll_remove_all(ctx, NULL, true);
Jens Axboe561fb042019-10-24 07:25:42 -06008903
Jens Axboe15dff282019-11-13 09:09:23 -07008904 /* if we failed setting up the ctx, we might not have any rings */
Pavel Begunkovb2edc0a2020-07-07 16:36:22 +03008905 io_iopoll_try_reap_events(ctx);
Jens Axboe309fc032020-07-10 09:13:34 -06008906
Jens Axboe85faa7b2020-04-09 18:14:00 -06008907 INIT_WORK(&ctx->exit_work, io_ring_exit_work);
Jens Axboefc666772020-08-19 11:10:51 -06008908 /*
8909 * Use system_unbound_wq to avoid spawning tons of event kworkers
8910 * if we're exiting a ton of rings at the same time. It just adds
8911 * noise and overhead, there's no discernable change in runtime
8912 * over using system_wq.
8913 */
8914 queue_work(system_unbound_wq, &ctx->exit_work);
Jens Axboe2b188cc2019-01-07 10:46:33 -07008915}
8916
8917static int io_uring_release(struct inode *inode, struct file *file)
8918{
8919 struct io_ring_ctx *ctx = file->private_data;
8920
8921 file->private_data = NULL;
8922 io_ring_ctx_wait_and_kill(ctx);
8923 return 0;
8924}
8925
Pavel Begunkovf6edbab2020-11-06 13:00:26 +00008926struct io_task_cancel {
8927 struct task_struct *task;
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01008928 bool all;
Pavel Begunkovf6edbab2020-11-06 13:00:26 +00008929};
Pavel Begunkov67c4d9e2020-06-15 10:24:05 +03008930
Pavel Begunkovf6edbab2020-11-06 13:00:26 +00008931static bool io_cancel_task_cb(struct io_wq_work *work, void *data)
Jens Axboeb711d4e2020-08-16 08:23:05 -07008932{
Pavel Begunkov9a472ef2020-11-05 22:31:37 +00008933 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
Pavel Begunkovf6edbab2020-11-06 13:00:26 +00008934 struct io_task_cancel *cancel = data;
Pavel Begunkov9a472ef2020-11-05 22:31:37 +00008935 bool ret;
8936
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01008937 if (!cancel->all && (req->flags & REQ_F_LINK_TIMEOUT)) {
Pavel Begunkov9a472ef2020-11-05 22:31:37 +00008938 unsigned long flags;
8939 struct io_ring_ctx *ctx = req->ctx;
8940
8941 /* protect against races with linked timeouts */
8942 spin_lock_irqsave(&ctx->completion_lock, flags);
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01008943 ret = io_match_task(req, cancel->task, cancel->all);
Pavel Begunkov9a472ef2020-11-05 22:31:37 +00008944 spin_unlock_irqrestore(&ctx->completion_lock, flags);
8945 } else {
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01008946 ret = io_match_task(req, cancel->task, cancel->all);
Pavel Begunkov9a472ef2020-11-05 22:31:37 +00008947 }
8948 return ret;
Jens Axboeb711d4e2020-08-16 08:23:05 -07008949}
8950
Pavel Begunkove1915f72021-03-11 23:29:35 +00008951static bool io_cancel_defer_files(struct io_ring_ctx *ctx,
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01008952 struct task_struct *task, bool cancel_all)
Pavel Begunkovb7ddce32020-09-06 00:45:14 +03008953{
Pavel Begunkove1915f72021-03-11 23:29:35 +00008954 struct io_defer_entry *de;
Pavel Begunkovb7ddce32020-09-06 00:45:14 +03008955 LIST_HEAD(list);
8956
8957 spin_lock_irq(&ctx->completion_lock);
8958 list_for_each_entry_reverse(de, &ctx->defer_list, list) {
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01008959 if (io_match_task(de->req, task, cancel_all)) {
Pavel Begunkovb7ddce32020-09-06 00:45:14 +03008960 list_cut_position(&list, &ctx->defer_list, &de->list);
8961 break;
8962 }
8963 }
8964 spin_unlock_irq(&ctx->completion_lock);
Pavel Begunkove1915f72021-03-11 23:29:35 +00008965 if (list_empty(&list))
8966 return false;
Pavel Begunkovb7ddce32020-09-06 00:45:14 +03008967
8968 while (!list_empty(&list)) {
8969 de = list_first_entry(&list, struct io_defer_entry, list);
8970 list_del_init(&de->list);
Pavel Begunkovf41db2732021-02-28 22:35:12 +00008971 io_req_complete_failed(de->req, -ECANCELED);
Pavel Begunkovb7ddce32020-09-06 00:45:14 +03008972 kfree(de);
8973 }
Pavel Begunkove1915f72021-03-11 23:29:35 +00008974 return true;
Pavel Begunkovb7ddce32020-09-06 00:45:14 +03008975}
8976
Pavel Begunkov1b007642021-03-06 11:02:17 +00008977static bool io_uring_try_cancel_iowq(struct io_ring_ctx *ctx)
8978{
8979 struct io_tctx_node *node;
8980 enum io_wq_cancel cret;
8981 bool ret = false;
8982
8983 mutex_lock(&ctx->uring_lock);
8984 list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
8985 struct io_uring_task *tctx = node->task->io_uring;
8986
8987 /*
8988 * io_wq will stay alive while we hold uring_lock, because it's
8989 * killed after ctx nodes, which requires to take the lock.
8990 */
8991 if (!tctx || !tctx->io_wq)
8992 continue;
8993 cret = io_wq_cancel_cb(tctx->io_wq, io_cancel_ctx_cb, ctx, true);
8994 ret |= (cret != IO_WQ_CANCEL_NOTFOUND);
8995 }
8996 mutex_unlock(&ctx->uring_lock);
8997
8998 return ret;
8999}
9000
Pavel Begunkov9936c7c2021-02-04 13:51:56 +00009001static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
9002 struct task_struct *task,
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01009003 bool cancel_all)
Pavel Begunkov9936c7c2021-02-04 13:51:56 +00009004{
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01009005 struct io_task_cancel cancel = { .task = task, .all = cancel_all, };
Pavel Begunkov1b007642021-03-06 11:02:17 +00009006 struct io_uring_task *tctx = task ? task->io_uring : NULL;
Pavel Begunkov9936c7c2021-02-04 13:51:56 +00009007
9008 while (1) {
9009 enum io_wq_cancel cret;
9010 bool ret = false;
9011
Pavel Begunkov1b007642021-03-06 11:02:17 +00009012 if (!task) {
9013 ret |= io_uring_try_cancel_iowq(ctx);
9014 } else if (tctx && tctx->io_wq) {
9015 /*
9016 * Cancels requests of all rings, not only @ctx, but
9017 * it's fine as the task is in exit/exec.
9018 */
Jens Axboe5aa75ed2021-02-16 12:56:50 -07009019 cret = io_wq_cancel_cb(tctx->io_wq, io_cancel_task_cb,
Pavel Begunkov9936c7c2021-02-04 13:51:56 +00009020 &cancel, true);
9021 ret |= (cret != IO_WQ_CANCEL_NOTFOUND);
9022 }
9023
9024 /* SQPOLL thread does its own polling */
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01009025 if ((!(ctx->flags & IORING_SETUP_SQPOLL) && cancel_all) ||
Jens Axboed052d1d2021-03-11 10:49:20 -07009026 (ctx->sq_data && ctx->sq_data->thread == current)) {
Pavel Begunkov9936c7c2021-02-04 13:51:56 +00009027 while (!list_empty_careful(&ctx->iopoll_list)) {
9028 io_iopoll_try_reap_events(ctx);
9029 ret = true;
9030 }
9031 }
9032
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01009033 ret |= io_cancel_defer_files(ctx, task, cancel_all);
9034 ret |= io_poll_remove_all(ctx, task, cancel_all);
9035 ret |= io_kill_timeouts(ctx, task, cancel_all);
Pavel Begunkove5dc4802021-06-26 21:40:46 +01009036 if (task)
9037 ret |= io_run_task_work();
Pavel Begunkov9936c7c2021-02-04 13:51:56 +00009038 if (!ret)
9039 break;
9040 cond_resched();
9041 }
9042}
9043
Pavel Begunkoveef51da2021-06-14 02:36:15 +01009044static int __io_uring_add_tctx_node(struct io_ring_ctx *ctx)
Jens Axboe0f212202020-09-13 13:09:39 -06009045{
Matthew Wilcox (Oracle)236434c2020-10-09 13:49:52 +01009046 struct io_uring_task *tctx = current->io_uring;
Pavel Begunkov13bf43f2021-03-06 11:02:12 +00009047 struct io_tctx_node *node;
Pavel Begunkova528b042020-12-21 18:34:04 +00009048 int ret;
Matthew Wilcox (Oracle)236434c2020-10-09 13:49:52 +01009049
9050 if (unlikely(!tctx)) {
Jens Axboe5aa75ed2021-02-16 12:56:50 -07009051 ret = io_uring_alloc_task_context(current, ctx);
Jens Axboe0f212202020-09-13 13:09:39 -06009052 if (unlikely(ret))
9053 return ret;
Matthew Wilcox (Oracle)236434c2020-10-09 13:49:52 +01009054 tctx = current->io_uring;
Jens Axboe0f212202020-09-13 13:09:39 -06009055 }
Pavel Begunkovcf27f3b2021-03-19 17:22:31 +00009056 if (!xa_load(&tctx->xa, (unsigned long)ctx)) {
9057 node = kmalloc(sizeof(*node), GFP_KERNEL);
9058 if (!node)
9059 return -ENOMEM;
9060 node->ctx = ctx;
9061 node->task = current;
Jens Axboe0f212202020-09-13 13:09:39 -06009062
Pavel Begunkovcf27f3b2021-03-19 17:22:31 +00009063 ret = xa_err(xa_store(&tctx->xa, (unsigned long)ctx,
9064 node, GFP_KERNEL));
9065 if (ret) {
9066 kfree(node);
9067 return ret;
Jens Axboe0f212202020-09-13 13:09:39 -06009068 }
Pavel Begunkovcf27f3b2021-03-19 17:22:31 +00009069
9070 mutex_lock(&ctx->uring_lock);
9071 list_add(&node->ctx_node, &ctx->tctx_list);
9072 mutex_unlock(&ctx->uring_lock);
Jens Axboe0f212202020-09-13 13:09:39 -06009073 }
Pavel Begunkovcf27f3b2021-03-19 17:22:31 +00009074 tctx->last = ctx;
Jens Axboe0f212202020-09-13 13:09:39 -06009075 return 0;
9076}
9077
9078/*
Pavel Begunkovcf27f3b2021-03-19 17:22:31 +00009079 * Note that this task has used io_uring. We use it for cancelation purposes.
9080 */
Pavel Begunkoveef51da2021-06-14 02:36:15 +01009081static inline int io_uring_add_tctx_node(struct io_ring_ctx *ctx)
Pavel Begunkovcf27f3b2021-03-19 17:22:31 +00009082{
9083 struct io_uring_task *tctx = current->io_uring;
9084
9085 if (likely(tctx && tctx->last == ctx))
9086 return 0;
Pavel Begunkoveef51da2021-06-14 02:36:15 +01009087 return __io_uring_add_tctx_node(ctx);
Pavel Begunkovcf27f3b2021-03-19 17:22:31 +00009088}
9089
9090/*
Jens Axboe0f212202020-09-13 13:09:39 -06009091 * Remove this io_uring_file -> task mapping.
9092 */
Pavel Begunkoveef51da2021-06-14 02:36:15 +01009093static void io_uring_del_tctx_node(unsigned long index)
Jens Axboe0f212202020-09-13 13:09:39 -06009094{
9095 struct io_uring_task *tctx = current->io_uring;
Pavel Begunkov13bf43f2021-03-06 11:02:12 +00009096 struct io_tctx_node *node;
Pavel Begunkov29412672021-03-06 11:02:11 +00009097
Pavel Begunkoveebd2e32021-03-06 11:02:14 +00009098 if (!tctx)
9099 return;
Pavel Begunkov13bf43f2021-03-06 11:02:12 +00009100 node = xa_erase(&tctx->xa, index);
9101 if (!node)
Pavel Begunkov29412672021-03-06 11:02:11 +00009102 return;
Jens Axboe0f212202020-09-13 13:09:39 -06009103
Pavel Begunkov13bf43f2021-03-06 11:02:12 +00009104 WARN_ON_ONCE(current != node->task);
9105 WARN_ON_ONCE(list_empty(&node->ctx_node));
9106
9107 mutex_lock(&node->ctx->uring_lock);
9108 list_del(&node->ctx_node);
9109 mutex_unlock(&node->ctx->uring_lock);
9110
Pavel Begunkovbaf186c2021-03-06 11:02:15 +00009111 if (tctx->last == node->ctx)
Jens Axboe0f212202020-09-13 13:09:39 -06009112 tctx->last = NULL;
Pavel Begunkov13bf43f2021-03-06 11:02:12 +00009113 kfree(node);
Jens Axboe0f212202020-09-13 13:09:39 -06009114}
9115
Pavel Begunkov8452d4a2021-02-27 11:16:46 +00009116static void io_uring_clean_tctx(struct io_uring_task *tctx)
Pavel Begunkovde7f1d92021-01-04 20:43:29 +00009117{
Pavel Begunkovba5ef6d2021-05-20 13:21:20 +01009118 struct io_wq *wq = tctx->io_wq;
Pavel Begunkov13bf43f2021-03-06 11:02:12 +00009119 struct io_tctx_node *node;
Pavel Begunkovde7f1d92021-01-04 20:43:29 +00009120 unsigned long index;
9121
Pavel Begunkov13bf43f2021-03-06 11:02:12 +00009122 xa_for_each(&tctx->xa, index, node)
Pavel Begunkoveef51da2021-06-14 02:36:15 +01009123 io_uring_del_tctx_node(index);
Marco Elverb16ef422021-05-27 11:25:48 +02009124 if (wq) {
9125 /*
9126 * Must be after io_uring_del_task_file() (removes nodes under
9127 * uring_lock) to avoid race with io_uring_try_cancel_iowq().
9128 */
9129 tctx->io_wq = NULL;
Pavel Begunkovba5ef6d2021-05-20 13:21:20 +01009130 io_wq_put_and_exit(wq);
Marco Elverb16ef422021-05-27 11:25:48 +02009131 }
Pavel Begunkovde7f1d92021-01-04 20:43:29 +00009132}
9133
Pavel Begunkov3f48cf12021-04-11 01:46:27 +01009134static s64 tctx_inflight(struct io_uring_task *tctx, bool tracked)
Pavel Begunkov521d6a72021-03-11 23:29:38 +00009135{
Pavel Begunkov3f48cf12021-04-11 01:46:27 +01009136 if (tracked)
9137 return atomic_read(&tctx->inflight_tracked);
Pavel Begunkov521d6a72021-03-11 23:29:38 +00009138 return percpu_counter_sum(&tctx->inflight);
9139}
9140
Pavel Begunkov09899b12021-06-14 02:36:22 +01009141static void io_uring_drop_tctx_refs(struct task_struct *task)
9142{
9143 struct io_uring_task *tctx = task->io_uring;
9144 unsigned int refs = tctx->cached_refs;
9145
9146 tctx->cached_refs = 0;
9147 percpu_counter_sub(&tctx->inflight, refs);
9148 put_task_struct_many(task, refs);
9149}
9150
Pavel Begunkov78cc6872021-06-14 02:36:23 +01009151/*
9152 * Find any io_uring ctx that this task has registered or done IO on, and cancel
9153 * requests. @sqd should be not-null IIF it's an SQPOLL thread cancellation.
9154 */
9155static void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd)
Pavel Begunkov0e9ddb32021-02-07 22:34:26 +00009156{
Pavel Begunkov521d6a72021-03-11 23:29:38 +00009157 struct io_uring_task *tctx = current->io_uring;
Pavel Begunkov734551d2021-04-18 14:52:09 +01009158 struct io_ring_ctx *ctx;
Jens Axboefdaf0832020-10-30 09:37:30 -06009159 s64 inflight;
Pavel Begunkov0e9ddb32021-02-07 22:34:26 +00009160 DEFINE_WAIT(wait);
Jens Axboefdaf0832020-10-30 09:37:30 -06009161
Pavel Begunkov78cc6872021-06-14 02:36:23 +01009162 WARN_ON_ONCE(sqd && sqd->thread != current);
9163
Palash Oswal6d042ff2021-04-27 18:21:49 +05309164 if (!current->io_uring)
9165 return;
Pavel Begunkov17a91052021-05-23 15:48:39 +01009166 if (tctx->io_wq)
9167 io_wq_exit_start(tctx->io_wq);
9168
Pavel Begunkov09899b12021-06-14 02:36:22 +01009169 io_uring_drop_tctx_refs(current);
Jens Axboefdaf0832020-10-30 09:37:30 -06009170 atomic_inc(&tctx->in_idle);
Jens Axboed8a6df12020-10-15 16:24:45 -06009171 do {
Jens Axboe0f212202020-09-13 13:09:39 -06009172 /* read completions before cancelations */
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01009173 inflight = tctx_inflight(tctx, !cancel_all);
Jens Axboed8a6df12020-10-15 16:24:45 -06009174 if (!inflight)
9175 break;
Jens Axboe0f212202020-09-13 13:09:39 -06009176
Pavel Begunkov78cc6872021-06-14 02:36:23 +01009177 if (!sqd) {
9178 struct io_tctx_node *node;
9179 unsigned long index;
9180
9181 xa_for_each(&tctx->xa, index, node) {
9182 /* sqpoll task will cancel all its requests */
9183 if (node->ctx->sq_data)
9184 continue;
9185 io_uring_try_cancel_requests(node->ctx, current,
9186 cancel_all);
9187 }
9188 } else {
9189 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
9190 io_uring_try_cancel_requests(ctx, current,
9191 cancel_all);
9192 }
9193
9194 prepare_to_wait(&tctx->wait, &wait, TASK_UNINTERRUPTIBLE);
Jens Axboe0f212202020-09-13 13:09:39 -06009195 /*
Pavel Begunkova1bb3cd2021-01-26 15:28:26 +00009196 * If we've seen completions, retry without waiting. This
9197 * avoids a race where a completion comes in before we did
9198 * prepare_to_wait().
Jens Axboe0f212202020-09-13 13:09:39 -06009199 */
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01009200 if (inflight == tctx_inflight(tctx, !cancel_all))
Pavel Begunkova1bb3cd2021-01-26 15:28:26 +00009201 schedule();
Pavel Begunkovf57555e2020-12-20 13:21:44 +00009202 finish_wait(&tctx->wait, &wait);
Jens Axboed8a6df12020-10-15 16:24:45 -06009203 } while (1);
Jens Axboefdaf0832020-10-30 09:37:30 -06009204 atomic_dec(&tctx->in_idle);
Pavel Begunkovde7f1d92021-01-04 20:43:29 +00009205
Pavel Begunkov8452d4a2021-02-27 11:16:46 +00009206 io_uring_clean_tctx(tctx);
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01009207 if (cancel_all) {
Pavel Begunkov3f48cf12021-04-11 01:46:27 +01009208 /* for exec all current's requests should be gone, kill tctx */
9209 __io_uring_free(current);
9210 }
Pavel Begunkov44e728b2020-06-15 10:24:04 +03009211}
9212
Pavel Begunkov78cc6872021-06-14 02:36:23 +01009213void __io_uring_cancel(struct files_struct *files)
9214{
9215 io_uring_cancel_generic(!files, NULL);
9216}
9217
Roman Penyaev6c5c2402019-11-28 12:53:22 +01009218static void *io_uring_validate_mmap_request(struct file *file,
9219 loff_t pgoff, size_t sz)
Jens Axboe2b188cc2019-01-07 10:46:33 -07009220{
Jens Axboe2b188cc2019-01-07 10:46:33 -07009221 struct io_ring_ctx *ctx = file->private_data;
Roman Penyaev6c5c2402019-11-28 12:53:22 +01009222 loff_t offset = pgoff << PAGE_SHIFT;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009223 struct page *page;
9224 void *ptr;
9225
9226 switch (offset) {
9227 case IORING_OFF_SQ_RING:
Hristo Venev75b28af2019-08-26 17:23:46 +00009228 case IORING_OFF_CQ_RING:
9229 ptr = ctx->rings;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009230 break;
9231 case IORING_OFF_SQES:
9232 ptr = ctx->sq_sqes;
9233 break;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009234 default:
Roman Penyaev6c5c2402019-11-28 12:53:22 +01009235 return ERR_PTR(-EINVAL);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009236 }
9237
9238 page = virt_to_head_page(ptr);
Matthew Wilcox (Oracle)a50b8542019-09-23 15:34:25 -07009239 if (sz > page_size(page))
Roman Penyaev6c5c2402019-11-28 12:53:22 +01009240 return ERR_PTR(-EINVAL);
9241
9242 return ptr;
9243}
9244
9245#ifdef CONFIG_MMU
9246
9247static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
9248{
9249 size_t sz = vma->vm_end - vma->vm_start;
9250 unsigned long pfn;
9251 void *ptr;
9252
9253 ptr = io_uring_validate_mmap_request(file, vma->vm_pgoff, sz);
9254 if (IS_ERR(ptr))
9255 return PTR_ERR(ptr);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009256
9257 pfn = virt_to_phys(ptr) >> PAGE_SHIFT;
9258 return remap_pfn_range(vma, vma->vm_start, pfn, sz, vma->vm_page_prot);
9259}
9260
Roman Penyaev6c5c2402019-11-28 12:53:22 +01009261#else /* !CONFIG_MMU */
9262
9263static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
9264{
9265 return vma->vm_flags & (VM_SHARED | VM_MAYSHARE) ? 0 : -EINVAL;
9266}
9267
9268static unsigned int io_uring_nommu_mmap_capabilities(struct file *file)
9269{
9270 return NOMMU_MAP_DIRECT | NOMMU_MAP_READ | NOMMU_MAP_WRITE;
9271}
9272
9273static unsigned long io_uring_nommu_get_unmapped_area(struct file *file,
9274 unsigned long addr, unsigned long len,
9275 unsigned long pgoff, unsigned long flags)
9276{
9277 void *ptr;
9278
9279 ptr = io_uring_validate_mmap_request(file, pgoff, len);
9280 if (IS_ERR(ptr))
9281 return PTR_ERR(ptr);
9282
9283 return (unsigned long) ptr;
9284}
9285
9286#endif /* !CONFIG_MMU */
9287
Pavel Begunkovd9d05212021-01-08 20:57:25 +00009288static int io_sqpoll_wait_sq(struct io_ring_ctx *ctx)
Jens Axboe90554202020-09-03 12:12:41 -06009289{
9290 DEFINE_WAIT(wait);
9291
9292 do {
9293 if (!io_sqring_full(ctx))
9294 break;
Jens Axboe90554202020-09-03 12:12:41 -06009295 prepare_to_wait(&ctx->sqo_sq_wait, &wait, TASK_INTERRUPTIBLE);
9296
9297 if (!io_sqring_full(ctx))
9298 break;
Jens Axboe90554202020-09-03 12:12:41 -06009299 schedule();
9300 } while (!signal_pending(current));
9301
9302 finish_wait(&ctx->sqo_sq_wait, &wait);
Yang Li51993282021-03-09 14:30:41 +08009303 return 0;
Jens Axboe90554202020-09-03 12:12:41 -06009304}
9305
Hao Xuc73ebb62020-11-03 10:54:37 +08009306static int io_get_ext_arg(unsigned flags, const void __user *argp, size_t *argsz,
9307 struct __kernel_timespec __user **ts,
9308 const sigset_t __user **sig)
9309{
9310 struct io_uring_getevents_arg arg;
9311
9312 /*
9313 * If EXT_ARG isn't set, then we have no timespec and the argp pointer
9314 * is just a pointer to the sigset_t.
9315 */
9316 if (!(flags & IORING_ENTER_EXT_ARG)) {
9317 *sig = (const sigset_t __user *) argp;
9318 *ts = NULL;
9319 return 0;
9320 }
9321
9322 /*
9323 * EXT_ARG is set - ensure we agree on the size of it and copy in our
9324 * timespec and sigset_t pointers if good.
9325 */
9326 if (*argsz != sizeof(arg))
9327 return -EINVAL;
9328 if (copy_from_user(&arg, argp, sizeof(arg)))
9329 return -EFAULT;
9330 *sig = u64_to_user_ptr(arg.sigmask);
9331 *argsz = arg.sigmask_sz;
9332 *ts = u64_to_user_ptr(arg.ts);
9333 return 0;
9334}
9335
Jens Axboe2b188cc2019-01-07 10:46:33 -07009336SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
Hao Xuc73ebb62020-11-03 10:54:37 +08009337 u32, min_complete, u32, flags, const void __user *, argp,
9338 size_t, argsz)
Jens Axboe2b188cc2019-01-07 10:46:33 -07009339{
9340 struct io_ring_ctx *ctx;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009341 int submitted = 0;
9342 struct fd f;
Pavel Begunkov33f993d2021-03-19 17:22:30 +00009343 long ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009344
Jens Axboe4c6e2772020-07-01 11:29:10 -06009345 io_run_task_work();
Jens Axboeb41e9852020-02-17 09:52:41 -07009346
Pavel Begunkov33f993d2021-03-19 17:22:30 +00009347 if (unlikely(flags & ~(IORING_ENTER_GETEVENTS | IORING_ENTER_SQ_WAKEUP |
9348 IORING_ENTER_SQ_WAIT | IORING_ENTER_EXT_ARG)))
Jens Axboe2b188cc2019-01-07 10:46:33 -07009349 return -EINVAL;
9350
9351 f = fdget(fd);
Pavel Begunkov33f993d2021-03-19 17:22:30 +00009352 if (unlikely(!f.file))
Jens Axboe2b188cc2019-01-07 10:46:33 -07009353 return -EBADF;
9354
9355 ret = -EOPNOTSUPP;
Pavel Begunkov33f993d2021-03-19 17:22:30 +00009356 if (unlikely(f.file->f_op != &io_uring_fops))
Jens Axboe2b188cc2019-01-07 10:46:33 -07009357 goto out_fput;
9358
9359 ret = -ENXIO;
9360 ctx = f.file->private_data;
Pavel Begunkov33f993d2021-03-19 17:22:30 +00009361 if (unlikely(!percpu_ref_tryget(&ctx->refs)))
Jens Axboe2b188cc2019-01-07 10:46:33 -07009362 goto out_fput;
9363
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02009364 ret = -EBADFD;
Pavel Begunkov33f993d2021-03-19 17:22:30 +00009365 if (unlikely(ctx->flags & IORING_SETUP_R_DISABLED))
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02009366 goto out;
9367
Jens Axboe6c271ce2019-01-10 11:22:30 -07009368 /*
9369 * For SQ polling, the thread will do all submissions and completions.
9370 * Just return the requested submit count, and wake the thread if
9371 * we were asked to.
9372 */
Jens Axboeb2a9ead2019-09-12 14:19:16 -06009373 ret = 0;
Jens Axboe6c271ce2019-01-10 11:22:30 -07009374 if (ctx->flags & IORING_SETUP_SQPOLL) {
Pavel Begunkov6c2450a2021-02-23 12:40:22 +00009375 io_cqring_overflow_flush(ctx, false);
Pavel Begunkov89448c42020-12-17 00:24:39 +00009376
Jens Axboe21f96522021-08-14 09:04:40 -06009377 if (unlikely(ctx->sq_data->thread == NULL)) {
9378 ret = -EOWNERDEAD;
Stefan Metzmacher04147482021-03-07 11:54:29 +01009379 goto out;
Jens Axboe21f96522021-08-14 09:04:40 -06009380 }
Jens Axboe6c271ce2019-01-10 11:22:30 -07009381 if (flags & IORING_ENTER_SQ_WAKEUP)
Jens Axboe534ca6d2020-09-02 13:52:19 -06009382 wake_up(&ctx->sq_data->wait);
Pavel Begunkovd9d05212021-01-08 20:57:25 +00009383 if (flags & IORING_ENTER_SQ_WAIT) {
9384 ret = io_sqpoll_wait_sq(ctx);
9385 if (ret)
9386 goto out;
9387 }
Jens Axboe6c271ce2019-01-10 11:22:30 -07009388 submitted = to_submit;
Jens Axboeb2a9ead2019-09-12 14:19:16 -06009389 } else if (to_submit) {
Pavel Begunkoveef51da2021-06-14 02:36:15 +01009390 ret = io_uring_add_tctx_node(ctx);
Jens Axboe0f212202020-09-13 13:09:39 -06009391 if (unlikely(ret))
9392 goto out;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009393 mutex_lock(&ctx->uring_lock);
Jens Axboe0f212202020-09-13 13:09:39 -06009394 submitted = io_submit_sqes(ctx, to_submit);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009395 mutex_unlock(&ctx->uring_lock);
Pavel Begunkov7c504e652019-12-18 19:53:45 +03009396
9397 if (submitted != to_submit)
9398 goto out;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009399 }
9400 if (flags & IORING_ENTER_GETEVENTS) {
Hao Xuc73ebb62020-11-03 10:54:37 +08009401 const sigset_t __user *sig;
9402 struct __kernel_timespec __user *ts;
9403
9404 ret = io_get_ext_arg(flags, argp, &argsz, &ts, &sig);
9405 if (unlikely(ret))
9406 goto out;
9407
Jens Axboe2b188cc2019-01-07 10:46:33 -07009408 min_complete = min(min_complete, ctx->cq_entries);
9409
Xiaoguang Wang32b22442020-03-11 09:26:09 +08009410 /*
9411 * When SETUP_IOPOLL and SETUP_SQPOLL are both enabled, user
9412 * space applications don't need to do io completion events
9413 * polling again, they can rely on io_sq_thread to do polling
9414 * work, which can reduce cpu usage and uring_lock contention.
9415 */
9416 if (ctx->flags & IORING_SETUP_IOPOLL &&
9417 !(ctx->flags & IORING_SETUP_SQPOLL)) {
Pavel Begunkov7668b922020-07-07 16:36:21 +03009418 ret = io_iopoll_check(ctx, min_complete);
Jens Axboedef596e2019-01-09 08:59:42 -07009419 } else {
Hao Xuc73ebb62020-11-03 10:54:37 +08009420 ret = io_cqring_wait(ctx, min_complete, sig, argsz, ts);
Jens Axboedef596e2019-01-09 08:59:42 -07009421 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07009422 }
9423
Pavel Begunkov7c504e652019-12-18 19:53:45 +03009424out:
Pavel Begunkov6805b322019-10-08 02:18:42 +03009425 percpu_ref_put(&ctx->refs);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009426out_fput:
9427 fdput(f);
9428 return submitted ? submitted : ret;
9429}
9430
Tobias Klauserbebdb652020-02-26 18:38:32 +01009431#ifdef CONFIG_PROC_FS
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +00009432static int io_uring_show_cred(struct seq_file *m, unsigned int id,
9433 const struct cred *cred)
Jens Axboe87ce9552020-01-30 08:25:34 -07009434{
Jens Axboe87ce9552020-01-30 08:25:34 -07009435 struct user_namespace *uns = seq_user_ns(m);
9436 struct group_info *gi;
9437 kernel_cap_t cap;
9438 unsigned __capi;
9439 int g;
9440
9441 seq_printf(m, "%5d\n", id);
9442 seq_put_decimal_ull(m, "\tUid:\t", from_kuid_munged(uns, cred->uid));
9443 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->euid));
9444 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->suid));
9445 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->fsuid));
9446 seq_put_decimal_ull(m, "\n\tGid:\t", from_kgid_munged(uns, cred->gid));
9447 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->egid));
9448 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->sgid));
9449 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->fsgid));
9450 seq_puts(m, "\n\tGroups:\t");
9451 gi = cred->group_info;
9452 for (g = 0; g < gi->ngroups; g++) {
9453 seq_put_decimal_ull(m, g ? " " : "",
9454 from_kgid_munged(uns, gi->gid[g]));
9455 }
9456 seq_puts(m, "\n\tCapEff:\t");
9457 cap = cred->cap_effective;
9458 CAP_FOR_EACH_U32(__capi)
9459 seq_put_hex_ll(m, NULL, cap.cap[CAP_LAST_U32 - __capi], 8);
9460 seq_putc(m, '\n');
9461 return 0;
9462}
9463
9464static void __io_uring_show_fdinfo(struct io_ring_ctx *ctx, struct seq_file *m)
9465{
Joseph Qidbbe9c62020-09-29 09:01:22 -06009466 struct io_sq_data *sq = NULL;
Jens Axboefad8e0d2020-09-28 08:57:48 -06009467 bool has_lock;
Jens Axboe87ce9552020-01-30 08:25:34 -07009468 int i;
9469
Jens Axboefad8e0d2020-09-28 08:57:48 -06009470 /*
9471 * Avoid ABBA deadlock between the seq lock and the io_uring mutex,
9472 * since fdinfo case grabs it in the opposite direction of normal use
9473 * cases. If we fail to get the lock, we just don't iterate any
9474 * structures that could be going away outside the io_uring mutex.
9475 */
9476 has_lock = mutex_trylock(&ctx->uring_lock);
9477
Jens Axboe5f3f26f2021-02-25 10:17:46 -07009478 if (has_lock && (ctx->flags & IORING_SETUP_SQPOLL)) {
Joseph Qidbbe9c62020-09-29 09:01:22 -06009479 sq = ctx->sq_data;
Jens Axboe5f3f26f2021-02-25 10:17:46 -07009480 if (!sq->thread)
9481 sq = NULL;
9482 }
Joseph Qidbbe9c62020-09-29 09:01:22 -06009483
9484 seq_printf(m, "SqThread:\t%d\n", sq ? task_pid_nr(sq->thread) : -1);
9485 seq_printf(m, "SqThreadCpu:\t%d\n", sq ? task_cpu(sq->thread) : -1);
Jens Axboe87ce9552020-01-30 08:25:34 -07009486 seq_printf(m, "UserFiles:\t%u\n", ctx->nr_user_files);
Jens Axboefad8e0d2020-09-28 08:57:48 -06009487 for (i = 0; has_lock && i < ctx->nr_user_files; i++) {
Jens Axboe7b29f922021-03-12 08:30:14 -07009488 struct file *f = io_file_from_index(ctx, i);
Jens Axboe87ce9552020-01-30 08:25:34 -07009489
Jens Axboe87ce9552020-01-30 08:25:34 -07009490 if (f)
9491 seq_printf(m, "%5u: %s\n", i, file_dentry(f)->d_iname);
9492 else
9493 seq_printf(m, "%5u: <none>\n", i);
9494 }
9495 seq_printf(m, "UserBufs:\t%u\n", ctx->nr_user_bufs);
Jens Axboefad8e0d2020-09-28 08:57:48 -06009496 for (i = 0; has_lock && i < ctx->nr_user_bufs; i++) {
Pavel Begunkov41edf1a2021-04-25 14:32:23 +01009497 struct io_mapped_ubuf *buf = ctx->user_bufs[i];
Pavel Begunkov4751f532021-04-01 15:43:55 +01009498 unsigned int len = buf->ubuf_end - buf->ubuf;
Jens Axboe87ce9552020-01-30 08:25:34 -07009499
Pavel Begunkov4751f532021-04-01 15:43:55 +01009500 seq_printf(m, "%5u: 0x%llx/%u\n", i, buf->ubuf, len);
Jens Axboe87ce9552020-01-30 08:25:34 -07009501 }
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +00009502 if (has_lock && !xa_empty(&ctx->personalities)) {
9503 unsigned long index;
9504 const struct cred *cred;
9505
Jens Axboe87ce9552020-01-30 08:25:34 -07009506 seq_printf(m, "Personalities:\n");
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +00009507 xa_for_each(&ctx->personalities, index, cred)
9508 io_uring_show_cred(m, index, cred);
Jens Axboe87ce9552020-01-30 08:25:34 -07009509 }
Jens Axboed7718a92020-02-14 22:23:12 -07009510 seq_printf(m, "PollList:\n");
9511 spin_lock_irq(&ctx->completion_lock);
9512 for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
9513 struct hlist_head *list = &ctx->cancel_hash[i];
9514 struct io_kiocb *req;
9515
9516 hlist_for_each_entry(req, list, hash_node)
9517 seq_printf(m, " op=%d, task_works=%d\n", req->opcode,
9518 req->task->task_works != NULL);
9519 }
9520 spin_unlock_irq(&ctx->completion_lock);
Jens Axboefad8e0d2020-09-28 08:57:48 -06009521 if (has_lock)
9522 mutex_unlock(&ctx->uring_lock);
Jens Axboe87ce9552020-01-30 08:25:34 -07009523}
9524
9525static void io_uring_show_fdinfo(struct seq_file *m, struct file *f)
9526{
9527 struct io_ring_ctx *ctx = f->private_data;
9528
9529 if (percpu_ref_tryget(&ctx->refs)) {
9530 __io_uring_show_fdinfo(ctx, m);
9531 percpu_ref_put(&ctx->refs);
9532 }
9533}
Tobias Klauserbebdb652020-02-26 18:38:32 +01009534#endif
Jens Axboe87ce9552020-01-30 08:25:34 -07009535
Jens Axboe2b188cc2019-01-07 10:46:33 -07009536static const struct file_operations io_uring_fops = {
9537 .release = io_uring_release,
9538 .mmap = io_uring_mmap,
Roman Penyaev6c5c2402019-11-28 12:53:22 +01009539#ifndef CONFIG_MMU
9540 .get_unmapped_area = io_uring_nommu_get_unmapped_area,
9541 .mmap_capabilities = io_uring_nommu_mmap_capabilities,
9542#endif
Jens Axboe2b188cc2019-01-07 10:46:33 -07009543 .poll = io_uring_poll,
9544 .fasync = io_uring_fasync,
Tobias Klauserbebdb652020-02-26 18:38:32 +01009545#ifdef CONFIG_PROC_FS
Jens Axboe87ce9552020-01-30 08:25:34 -07009546 .show_fdinfo = io_uring_show_fdinfo,
Tobias Klauserbebdb652020-02-26 18:38:32 +01009547#endif
Jens Axboe2b188cc2019-01-07 10:46:33 -07009548};
9549
9550static int io_allocate_scq_urings(struct io_ring_ctx *ctx,
9551 struct io_uring_params *p)
9552{
Hristo Venev75b28af2019-08-26 17:23:46 +00009553 struct io_rings *rings;
9554 size_t size, sq_array_offset;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009555
Jens Axboebd740482020-08-05 12:58:23 -06009556 /* make sure these are sane, as we already accounted them */
9557 ctx->sq_entries = p->sq_entries;
9558 ctx->cq_entries = p->cq_entries;
9559
Hristo Venev75b28af2019-08-26 17:23:46 +00009560 size = rings_size(p->sq_entries, p->cq_entries, &sq_array_offset);
9561 if (size == SIZE_MAX)
9562 return -EOVERFLOW;
9563
9564 rings = io_mem_alloc(size);
9565 if (!rings)
Jens Axboe2b188cc2019-01-07 10:46:33 -07009566 return -ENOMEM;
9567
Hristo Venev75b28af2019-08-26 17:23:46 +00009568 ctx->rings = rings;
9569 ctx->sq_array = (u32 *)((char *)rings + sq_array_offset);
9570 rings->sq_ring_mask = p->sq_entries - 1;
9571 rings->cq_ring_mask = p->cq_entries - 1;
9572 rings->sq_ring_entries = p->sq_entries;
9573 rings->cq_ring_entries = p->cq_entries;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009574
9575 size = array_size(sizeof(struct io_uring_sqe), p->sq_entries);
Jens Axboeeb065d32019-11-20 09:26:29 -07009576 if (size == SIZE_MAX) {
9577 io_mem_free(ctx->rings);
9578 ctx->rings = NULL;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009579 return -EOVERFLOW;
Jens Axboeeb065d32019-11-20 09:26:29 -07009580 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07009581
9582 ctx->sq_sqes = io_mem_alloc(size);
Jens Axboeeb065d32019-11-20 09:26:29 -07009583 if (!ctx->sq_sqes) {
9584 io_mem_free(ctx->rings);
9585 ctx->rings = NULL;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009586 return -ENOMEM;
Jens Axboeeb065d32019-11-20 09:26:29 -07009587 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07009588
Jens Axboe2b188cc2019-01-07 10:46:33 -07009589 return 0;
9590}
9591
Pavel Begunkov9faadcc2020-12-21 18:34:05 +00009592static int io_uring_install_fd(struct io_ring_ctx *ctx, struct file *file)
9593{
9594 int ret, fd;
9595
9596 fd = get_unused_fd_flags(O_RDWR | O_CLOEXEC);
9597 if (fd < 0)
9598 return fd;
9599
Pavel Begunkoveef51da2021-06-14 02:36:15 +01009600 ret = io_uring_add_tctx_node(ctx);
Pavel Begunkov9faadcc2020-12-21 18:34:05 +00009601 if (ret) {
9602 put_unused_fd(fd);
9603 return ret;
9604 }
9605 fd_install(fd, file);
9606 return fd;
9607}
9608
Jens Axboe2b188cc2019-01-07 10:46:33 -07009609/*
9610 * Allocate an anonymous fd, this is what constitutes the application
9611 * visible backing of an io_uring instance. The application mmaps this
9612 * fd to gain access to the SQ/CQ ring details. If UNIX sockets are enabled,
9613 * we have to tie this fd to a socket for file garbage collection purposes.
9614 */
Pavel Begunkov9faadcc2020-12-21 18:34:05 +00009615static struct file *io_uring_get_file(struct io_ring_ctx *ctx)
Jens Axboe2b188cc2019-01-07 10:46:33 -07009616{
9617 struct file *file;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009618#if defined(CONFIG_UNIX)
Pavel Begunkov9faadcc2020-12-21 18:34:05 +00009619 int ret;
9620
Jens Axboe2b188cc2019-01-07 10:46:33 -07009621 ret = sock_create_kern(&init_net, PF_UNIX, SOCK_RAW, IPPROTO_IP,
9622 &ctx->ring_sock);
9623 if (ret)
Pavel Begunkov9faadcc2020-12-21 18:34:05 +00009624 return ERR_PTR(ret);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009625#endif
9626
Jens Axboe2b188cc2019-01-07 10:46:33 -07009627 file = anon_inode_getfile("[io_uring]", &io_uring_fops, ctx,
9628 O_RDWR | O_CLOEXEC);
Pavel Begunkov9faadcc2020-12-21 18:34:05 +00009629#if defined(CONFIG_UNIX)
Jens Axboe2b188cc2019-01-07 10:46:33 -07009630 if (IS_ERR(file)) {
Pavel Begunkov9faadcc2020-12-21 18:34:05 +00009631 sock_release(ctx->ring_sock);
9632 ctx->ring_sock = NULL;
9633 } else {
9634 ctx->ring_sock->file = file;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009635 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07009636#endif
Pavel Begunkov9faadcc2020-12-21 18:34:05 +00009637 return file;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009638}
9639
Xiaoguang Wang7f136572020-05-05 16:28:53 +08009640static int io_uring_create(unsigned entries, struct io_uring_params *p,
9641 struct io_uring_params __user *params)
Jens Axboe2b188cc2019-01-07 10:46:33 -07009642{
Jens Axboe2b188cc2019-01-07 10:46:33 -07009643 struct io_ring_ctx *ctx;
Pavel Begunkov9faadcc2020-12-21 18:34:05 +00009644 struct file *file;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009645 int ret;
9646
Jens Axboe8110c1a2019-12-28 15:39:54 -07009647 if (!entries)
Jens Axboe2b188cc2019-01-07 10:46:33 -07009648 return -EINVAL;
Jens Axboe8110c1a2019-12-28 15:39:54 -07009649 if (entries > IORING_MAX_ENTRIES) {
9650 if (!(p->flags & IORING_SETUP_CLAMP))
9651 return -EINVAL;
9652 entries = IORING_MAX_ENTRIES;
9653 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07009654
9655 /*
9656 * Use twice as many entries for the CQ ring. It's possible for the
9657 * application to drive a higher depth than the size of the SQ ring,
9658 * since the sqes are only used at submission time. This allows for
Jens Axboe33a107f2019-10-04 12:10:03 -06009659 * some flexibility in overcommitting a bit. If the application has
9660 * set IORING_SETUP_CQSIZE, it will have passed in the desired number
9661 * of CQ ring entries manually.
Jens Axboe2b188cc2019-01-07 10:46:33 -07009662 */
9663 p->sq_entries = roundup_pow_of_two(entries);
Jens Axboe33a107f2019-10-04 12:10:03 -06009664 if (p->flags & IORING_SETUP_CQSIZE) {
9665 /*
9666 * If IORING_SETUP_CQSIZE is set, we do the same roundup
9667 * to a power-of-two, if it isn't already. We do NOT impose
9668 * any cq vs sq ring sizing.
9669 */
Joseph Qieb2667b32020-11-24 15:03:03 +08009670 if (!p->cq_entries)
Jens Axboe33a107f2019-10-04 12:10:03 -06009671 return -EINVAL;
Jens Axboe8110c1a2019-12-28 15:39:54 -07009672 if (p->cq_entries > IORING_MAX_CQ_ENTRIES) {
9673 if (!(p->flags & IORING_SETUP_CLAMP))
9674 return -EINVAL;
9675 p->cq_entries = IORING_MAX_CQ_ENTRIES;
9676 }
Joseph Qieb2667b32020-11-24 15:03:03 +08009677 p->cq_entries = roundup_pow_of_two(p->cq_entries);
9678 if (p->cq_entries < p->sq_entries)
9679 return -EINVAL;
Jens Axboe33a107f2019-10-04 12:10:03 -06009680 } else {
9681 p->cq_entries = 2 * p->sq_entries;
9682 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07009683
Jens Axboe2b188cc2019-01-07 10:46:33 -07009684 ctx = io_ring_ctx_alloc(p);
Jens Axboe62e398b2021-02-21 16:19:37 -07009685 if (!ctx)
Jens Axboe2b188cc2019-01-07 10:46:33 -07009686 return -ENOMEM;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009687 ctx->compat = in_compat_syscall();
Jens Axboe62e398b2021-02-21 16:19:37 -07009688 if (!capable(CAP_IPC_LOCK))
9689 ctx->user = get_uid(current_user());
Jens Axboe2aede0e2020-09-14 10:45:53 -06009690
9691 /*
9692 * This is just grabbed for accounting purposes. When a process exits,
9693 * the mm is exited and dropped before the files, hence we need to hang
9694 * on to this mm purely for the purposes of being able to unaccount
9695 * memory (locked/pinned vm). It's not used for anything else.
9696 */
Jens Axboe6b7898e2020-08-25 07:58:00 -06009697 mmgrab(current->mm);
Jens Axboe2aede0e2020-09-14 10:45:53 -06009698 ctx->mm_account = current->mm;
Jens Axboe6b7898e2020-08-25 07:58:00 -06009699
Jens Axboe2b188cc2019-01-07 10:46:33 -07009700 ret = io_allocate_scq_urings(ctx, p);
9701 if (ret)
9702 goto err;
9703
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02009704 ret = io_sq_offload_create(ctx, p);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009705 if (ret)
9706 goto err;
Pavel Begunkoveae071c2021-04-25 14:32:24 +01009707 /* always set a rsrc node */
Pavel Begunkov47b228c2021-04-29 11:46:48 +01009708 ret = io_rsrc_node_switch_start(ctx);
9709 if (ret)
9710 goto err;
Pavel Begunkoveae071c2021-04-25 14:32:24 +01009711 io_rsrc_node_switch(ctx, NULL);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009712
Jens Axboe2b188cc2019-01-07 10:46:33 -07009713 memset(&p->sq_off, 0, sizeof(p->sq_off));
Hristo Venev75b28af2019-08-26 17:23:46 +00009714 p->sq_off.head = offsetof(struct io_rings, sq.head);
9715 p->sq_off.tail = offsetof(struct io_rings, sq.tail);
9716 p->sq_off.ring_mask = offsetof(struct io_rings, sq_ring_mask);
9717 p->sq_off.ring_entries = offsetof(struct io_rings, sq_ring_entries);
9718 p->sq_off.flags = offsetof(struct io_rings, sq_flags);
9719 p->sq_off.dropped = offsetof(struct io_rings, sq_dropped);
9720 p->sq_off.array = (char *)ctx->sq_array - (char *)ctx->rings;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009721
9722 memset(&p->cq_off, 0, sizeof(p->cq_off));
Hristo Venev75b28af2019-08-26 17:23:46 +00009723 p->cq_off.head = offsetof(struct io_rings, cq.head);
9724 p->cq_off.tail = offsetof(struct io_rings, cq.tail);
9725 p->cq_off.ring_mask = offsetof(struct io_rings, cq_ring_mask);
9726 p->cq_off.ring_entries = offsetof(struct io_rings, cq_ring_entries);
9727 p->cq_off.overflow = offsetof(struct io_rings, cq_overflow);
9728 p->cq_off.cqes = offsetof(struct io_rings, cqes);
Stefano Garzarella0d9b5b32020-05-15 18:38:04 +02009729 p->cq_off.flags = offsetof(struct io_rings, cq_flags);
Jens Axboeac90f242019-09-06 10:26:21 -06009730
Xiaoguang Wang7f136572020-05-05 16:28:53 +08009731 p->features = IORING_FEAT_SINGLE_MMAP | IORING_FEAT_NODROP |
9732 IORING_FEAT_SUBMIT_STABLE | IORING_FEAT_RW_CUR_POS |
Jiufei Xue5769a352020-06-17 17:53:55 +08009733 IORING_FEAT_CUR_PERSONALITY | IORING_FEAT_FAST_POLL |
Hao Xuc73ebb62020-11-03 10:54:37 +08009734 IORING_FEAT_POLL_32BITS | IORING_FEAT_SQPOLL_NONFIXED |
Pavel Begunkov96905572021-06-10 16:37:38 +01009735 IORING_FEAT_EXT_ARG | IORING_FEAT_NATIVE_WORKERS |
9736 IORING_FEAT_RSRC_TAGS;
Xiaoguang Wang7f136572020-05-05 16:28:53 +08009737
9738 if (copy_to_user(params, p, sizeof(*p))) {
9739 ret = -EFAULT;
9740 goto err;
9741 }
Jens Axboed1719f72020-07-30 13:43:53 -06009742
Pavel Begunkov9faadcc2020-12-21 18:34:05 +00009743 file = io_uring_get_file(ctx);
9744 if (IS_ERR(file)) {
9745 ret = PTR_ERR(file);
9746 goto err;
9747 }
9748
Jens Axboed1719f72020-07-30 13:43:53 -06009749 /*
Jens Axboe044c1ab2019-10-28 09:15:33 -06009750 * Install ring fd as the very last thing, so we don't risk someone
9751 * having closed it before we finish setup
9752 */
Pavel Begunkov9faadcc2020-12-21 18:34:05 +00009753 ret = io_uring_install_fd(ctx, file);
9754 if (ret < 0) {
9755 /* fput will clean it up */
9756 fput(file);
9757 return ret;
9758 }
Jens Axboe044c1ab2019-10-28 09:15:33 -06009759
Dmitrii Dolgovc826bd72019-10-15 19:02:01 +02009760 trace_io_uring_create(ret, ctx, p->sq_entries, p->cq_entries, p->flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009761 return ret;
9762err:
9763 io_ring_ctx_wait_and_kill(ctx);
9764 return ret;
9765}
9766
9767/*
9768 * Sets up an aio uring context, and returns the fd. Applications asks for a
9769 * ring size, we return the actual sq/cq ring sizes (among other things) in the
9770 * params structure passed in.
9771 */
9772static long io_uring_setup(u32 entries, struct io_uring_params __user *params)
9773{
9774 struct io_uring_params p;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009775 int i;
9776
9777 if (copy_from_user(&p, params, sizeof(p)))
9778 return -EFAULT;
9779 for (i = 0; i < ARRAY_SIZE(p.resv); i++) {
9780 if (p.resv[i])
9781 return -EINVAL;
9782 }
9783
Jens Axboe6c271ce2019-01-10 11:22:30 -07009784 if (p.flags & ~(IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL |
Jens Axboe8110c1a2019-12-28 15:39:54 -07009785 IORING_SETUP_SQ_AFF | IORING_SETUP_CQSIZE |
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02009786 IORING_SETUP_CLAMP | IORING_SETUP_ATTACH_WQ |
9787 IORING_SETUP_R_DISABLED))
Jens Axboe2b188cc2019-01-07 10:46:33 -07009788 return -EINVAL;
9789
Xiaoguang Wang7f136572020-05-05 16:28:53 +08009790 return io_uring_create(entries, &p, params);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009791}
9792
9793SYSCALL_DEFINE2(io_uring_setup, u32, entries,
9794 struct io_uring_params __user *, params)
9795{
9796 return io_uring_setup(entries, params);
9797}
9798
Jens Axboe66f4af92020-01-16 15:36:52 -07009799static int io_probe(struct io_ring_ctx *ctx, void __user *arg, unsigned nr_args)
9800{
9801 struct io_uring_probe *p;
9802 size_t size;
9803 int i, ret;
9804
9805 size = struct_size(p, ops, nr_args);
9806 if (size == SIZE_MAX)
9807 return -EOVERFLOW;
9808 p = kzalloc(size, GFP_KERNEL);
9809 if (!p)
9810 return -ENOMEM;
9811
9812 ret = -EFAULT;
9813 if (copy_from_user(p, arg, size))
9814 goto out;
9815 ret = -EINVAL;
9816 if (memchr_inv(p, 0, size))
9817 goto out;
9818
9819 p->last_op = IORING_OP_LAST - 1;
9820 if (nr_args > IORING_OP_LAST)
9821 nr_args = IORING_OP_LAST;
9822
9823 for (i = 0; i < nr_args; i++) {
9824 p->ops[i].op = i;
9825 if (!io_op_defs[i].not_supported)
9826 p->ops[i].flags = IO_URING_OP_SUPPORTED;
9827 }
9828 p->ops_len = i;
9829
9830 ret = 0;
9831 if (copy_to_user(arg, p, size))
9832 ret = -EFAULT;
9833out:
9834 kfree(p);
9835 return ret;
9836}
9837
Jens Axboe071698e2020-01-28 10:04:42 -07009838static int io_register_personality(struct io_ring_ctx *ctx)
9839{
Jens Axboe4379bf82021-02-15 13:40:22 -07009840 const struct cred *creds;
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +00009841 u32 id;
Jens Axboe1e6fa522020-10-15 08:46:24 -06009842 int ret;
Jens Axboe071698e2020-01-28 10:04:42 -07009843
Jens Axboe4379bf82021-02-15 13:40:22 -07009844 creds = get_current_cred();
Jens Axboe1e6fa522020-10-15 08:46:24 -06009845
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +00009846 ret = xa_alloc_cyclic(&ctx->personalities, &id, (void *)creds,
9847 XA_LIMIT(0, USHRT_MAX), &ctx->pers_next, GFP_KERNEL);
Jens Axboea30f8952021-08-20 14:53:59 -06009848 if (ret < 0) {
9849 put_cred(creds);
9850 return ret;
9851 }
9852 return id;
Jens Axboe071698e2020-01-28 10:04:42 -07009853}
9854
Stefano Garzarella21b55db2020-08-27 16:58:30 +02009855static int io_register_restrictions(struct io_ring_ctx *ctx, void __user *arg,
9856 unsigned int nr_args)
9857{
9858 struct io_uring_restriction *res;
9859 size_t size;
9860 int i, ret;
9861
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02009862 /* Restrictions allowed only if rings started disabled */
9863 if (!(ctx->flags & IORING_SETUP_R_DISABLED))
9864 return -EBADFD;
9865
Stefano Garzarella21b55db2020-08-27 16:58:30 +02009866 /* We allow only a single restrictions registration */
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02009867 if (ctx->restrictions.registered)
Stefano Garzarella21b55db2020-08-27 16:58:30 +02009868 return -EBUSY;
9869
9870 if (!arg || nr_args > IORING_MAX_RESTRICTIONS)
9871 return -EINVAL;
9872
9873 size = array_size(nr_args, sizeof(*res));
9874 if (size == SIZE_MAX)
9875 return -EOVERFLOW;
9876
9877 res = memdup_user(arg, size);
9878 if (IS_ERR(res))
9879 return PTR_ERR(res);
9880
9881 ret = 0;
9882
9883 for (i = 0; i < nr_args; i++) {
9884 switch (res[i].opcode) {
9885 case IORING_RESTRICTION_REGISTER_OP:
9886 if (res[i].register_op >= IORING_REGISTER_LAST) {
9887 ret = -EINVAL;
9888 goto out;
9889 }
9890
9891 __set_bit(res[i].register_op,
9892 ctx->restrictions.register_op);
9893 break;
9894 case IORING_RESTRICTION_SQE_OP:
9895 if (res[i].sqe_op >= IORING_OP_LAST) {
9896 ret = -EINVAL;
9897 goto out;
9898 }
9899
9900 __set_bit(res[i].sqe_op, ctx->restrictions.sqe_op);
9901 break;
9902 case IORING_RESTRICTION_SQE_FLAGS_ALLOWED:
9903 ctx->restrictions.sqe_flags_allowed = res[i].sqe_flags;
9904 break;
9905 case IORING_RESTRICTION_SQE_FLAGS_REQUIRED:
9906 ctx->restrictions.sqe_flags_required = res[i].sqe_flags;
9907 break;
9908 default:
9909 ret = -EINVAL;
9910 goto out;
9911 }
9912 }
9913
9914out:
9915 /* Reset all restrictions if an error happened */
9916 if (ret != 0)
9917 memset(&ctx->restrictions, 0, sizeof(ctx->restrictions));
9918 else
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02009919 ctx->restrictions.registered = true;
Stefano Garzarella21b55db2020-08-27 16:58:30 +02009920
9921 kfree(res);
9922 return ret;
9923}
9924
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02009925static int io_register_enable_rings(struct io_ring_ctx *ctx)
9926{
9927 if (!(ctx->flags & IORING_SETUP_R_DISABLED))
9928 return -EBADFD;
9929
9930 if (ctx->restrictions.registered)
9931 ctx->restricted = 1;
9932
Pavel Begunkov0298ef92021-03-08 13:20:57 +00009933 ctx->flags &= ~IORING_SETUP_R_DISABLED;
9934 if (ctx->sq_data && wq_has_sleeper(&ctx->sq_data->wait))
9935 wake_up(&ctx->sq_data->wait);
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02009936 return 0;
9937}
9938
Pavel Begunkovfdecb662021-04-25 14:32:20 +01009939static int __io_register_rsrc_update(struct io_ring_ctx *ctx, unsigned type,
Pavel Begunkovc3bdad02021-04-25 14:32:22 +01009940 struct io_uring_rsrc_update2 *up,
Pavel Begunkov98f0b3b2021-04-25 14:32:19 +01009941 unsigned nr_args)
9942{
9943 __u32 tmp;
9944 int err;
9945
Pavel Begunkovc3bdad02021-04-25 14:32:22 +01009946 if (up->resv)
9947 return -EINVAL;
Pavel Begunkov98f0b3b2021-04-25 14:32:19 +01009948 if (check_add_overflow(up->offset, nr_args, &tmp))
9949 return -EOVERFLOW;
9950 err = io_rsrc_node_switch_start(ctx);
9951 if (err)
9952 return err;
9953
Pavel Begunkovfdecb662021-04-25 14:32:20 +01009954 switch (type) {
9955 case IORING_RSRC_FILE:
Pavel Begunkov98f0b3b2021-04-25 14:32:19 +01009956 return __io_sqe_files_update(ctx, up, nr_args);
Pavel Begunkov634d00d2021-04-25 14:32:26 +01009957 case IORING_RSRC_BUFFER:
9958 return __io_sqe_buffers_update(ctx, up, nr_args);
Pavel Begunkov98f0b3b2021-04-25 14:32:19 +01009959 }
9960 return -EINVAL;
9961}
9962
Pavel Begunkovc3bdad02021-04-25 14:32:22 +01009963static int io_register_files_update(struct io_ring_ctx *ctx, void __user *arg,
9964 unsigned nr_args)
Pavel Begunkov98f0b3b2021-04-25 14:32:19 +01009965{
Pavel Begunkovc3bdad02021-04-25 14:32:22 +01009966 struct io_uring_rsrc_update2 up;
Pavel Begunkov98f0b3b2021-04-25 14:32:19 +01009967
9968 if (!nr_args)
9969 return -EINVAL;
Pavel Begunkovc3bdad02021-04-25 14:32:22 +01009970 memset(&up, 0, sizeof(up));
9971 if (copy_from_user(&up, arg, sizeof(struct io_uring_rsrc_update)))
9972 return -EFAULT;
9973 return __io_register_rsrc_update(ctx, IORING_RSRC_FILE, &up, nr_args);
9974}
9975
9976static int io_register_rsrc_update(struct io_ring_ctx *ctx, void __user *arg,
Pavel Begunkov992da012021-06-10 16:37:37 +01009977 unsigned size, unsigned type)
Pavel Begunkovc3bdad02021-04-25 14:32:22 +01009978{
9979 struct io_uring_rsrc_update2 up;
9980
9981 if (size != sizeof(up))
9982 return -EINVAL;
Pavel Begunkov98f0b3b2021-04-25 14:32:19 +01009983 if (copy_from_user(&up, arg, sizeof(up)))
9984 return -EFAULT;
Pavel Begunkov992da012021-06-10 16:37:37 +01009985 if (!up.nr || up.resv)
Pavel Begunkov98f0b3b2021-04-25 14:32:19 +01009986 return -EINVAL;
Pavel Begunkov992da012021-06-10 16:37:37 +01009987 return __io_register_rsrc_update(ctx, type, &up, up.nr);
Pavel Begunkov98f0b3b2021-04-25 14:32:19 +01009988}
9989
Pavel Begunkov792e3582021-04-25 14:32:21 +01009990static int io_register_rsrc(struct io_ring_ctx *ctx, void __user *arg,
Pavel Begunkov992da012021-06-10 16:37:37 +01009991 unsigned int size, unsigned int type)
Pavel Begunkov792e3582021-04-25 14:32:21 +01009992{
9993 struct io_uring_rsrc_register rr;
9994
9995 /* keep it extendible */
9996 if (size != sizeof(rr))
9997 return -EINVAL;
9998
9999 memset(&rr, 0, sizeof(rr));
10000 if (copy_from_user(&rr, arg, size))
10001 return -EFAULT;
Pavel Begunkov992da012021-06-10 16:37:37 +010010002 if (!rr.nr || rr.resv || rr.resv2)
Pavel Begunkov792e3582021-04-25 14:32:21 +010010003 return -EINVAL;
10004
Pavel Begunkov992da012021-06-10 16:37:37 +010010005 switch (type) {
Pavel Begunkov792e3582021-04-25 14:32:21 +010010006 case IORING_RSRC_FILE:
10007 return io_sqe_files_register(ctx, u64_to_user_ptr(rr.data),
10008 rr.nr, u64_to_user_ptr(rr.tags));
Pavel Begunkov634d00d2021-04-25 14:32:26 +010010009 case IORING_RSRC_BUFFER:
10010 return io_sqe_buffers_register(ctx, u64_to_user_ptr(rr.data),
10011 rr.nr, u64_to_user_ptr(rr.tags));
Pavel Begunkov792e3582021-04-25 14:32:21 +010010012 }
10013 return -EINVAL;
10014}
10015
Jens Axboefe764212021-06-17 10:19:54 -060010016static int io_register_iowq_aff(struct io_ring_ctx *ctx, void __user *arg,
10017 unsigned len)
10018{
10019 struct io_uring_task *tctx = current->io_uring;
10020 cpumask_var_t new_mask;
10021 int ret;
10022
10023 if (!tctx || !tctx->io_wq)
10024 return -EINVAL;
10025
10026 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
10027 return -ENOMEM;
10028
10029 cpumask_clear(new_mask);
10030 if (len > cpumask_size())
10031 len = cpumask_size();
10032
10033 if (copy_from_user(new_mask, arg, len)) {
10034 free_cpumask_var(new_mask);
10035 return -EFAULT;
10036 }
10037
10038 ret = io_wq_cpu_affinity(tctx->io_wq, new_mask);
10039 free_cpumask_var(new_mask);
10040 return ret;
10041}
10042
10043static int io_unregister_iowq_aff(struct io_ring_ctx *ctx)
10044{
10045 struct io_uring_task *tctx = current->io_uring;
10046
10047 if (!tctx || !tctx->io_wq)
10048 return -EINVAL;
10049
10050 return io_wq_cpu_affinity(tctx->io_wq, NULL);
10051}
10052
Jens Axboe071698e2020-01-28 10:04:42 -070010053static bool io_register_op_must_quiesce(int op)
10054{
10055 switch (op) {
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +010010056 case IORING_REGISTER_BUFFERS:
10057 case IORING_UNREGISTER_BUFFERS:
Pavel Begunkovf4f7d212021-04-01 15:44:02 +010010058 case IORING_REGISTER_FILES:
Jens Axboe071698e2020-01-28 10:04:42 -070010059 case IORING_UNREGISTER_FILES:
10060 case IORING_REGISTER_FILES_UPDATE:
10061 case IORING_REGISTER_PROBE:
10062 case IORING_REGISTER_PERSONALITY:
10063 case IORING_UNREGISTER_PERSONALITY:
Pavel Begunkov992da012021-06-10 16:37:37 +010010064 case IORING_REGISTER_FILES2:
10065 case IORING_REGISTER_FILES_UPDATE2:
10066 case IORING_REGISTER_BUFFERS2:
10067 case IORING_REGISTER_BUFFERS_UPDATE:
Jens Axboefe764212021-06-17 10:19:54 -060010068 case IORING_REGISTER_IOWQ_AFF:
10069 case IORING_UNREGISTER_IOWQ_AFF:
Jens Axboe071698e2020-01-28 10:04:42 -070010070 return false;
10071 default:
10072 return true;
10073 }
10074}
10075
Pavel Begunkove73c5c72021-08-09 13:04:12 +010010076static int io_ctx_quiesce(struct io_ring_ctx *ctx)
10077{
10078 long ret;
10079
10080 percpu_ref_kill(&ctx->refs);
10081
10082 /*
10083 * Drop uring mutex before waiting for references to exit. If another
10084 * thread is currently inside io_uring_enter() it might need to grab the
10085 * uring_lock to make progress. If we hold it here across the drain
10086 * wait, then we can deadlock. It's safe to drop the mutex here, since
10087 * no new references will come in after we've killed the percpu ref.
10088 */
10089 mutex_unlock(&ctx->uring_lock);
10090 do {
10091 ret = wait_for_completion_interruptible(&ctx->ref_comp);
10092 if (!ret)
10093 break;
10094 ret = io_run_task_work_sig();
10095 } while (ret >= 0);
10096 mutex_lock(&ctx->uring_lock);
10097
10098 if (ret)
10099 io_refs_resurrect(&ctx->refs, &ctx->ref_comp);
10100 return ret;
10101}
10102
Jens Axboeedafcce2019-01-09 09:16:05 -070010103static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
10104 void __user *arg, unsigned nr_args)
Jens Axboeb19062a2019-04-15 10:49:38 -060010105 __releases(ctx->uring_lock)
10106 __acquires(ctx->uring_lock)
Jens Axboeedafcce2019-01-09 09:16:05 -070010107{
10108 int ret;
10109
Jens Axboe35fa71a2019-04-22 10:23:23 -060010110 /*
10111 * We're inside the ring mutex, if the ref is already dying, then
10112 * someone else killed the ctx or is already going through
10113 * io_uring_register().
10114 */
10115 if (percpu_ref_is_dying(&ctx->refs))
10116 return -ENXIO;
10117
Pavel Begunkov75c40212021-04-15 13:07:40 +010010118 if (ctx->restricted) {
10119 if (opcode >= IORING_REGISTER_LAST)
10120 return -EINVAL;
10121 opcode = array_index_nospec(opcode, IORING_REGISTER_LAST);
10122 if (!test_bit(opcode, ctx->restrictions.register_op))
10123 return -EACCES;
10124 }
10125
Jens Axboe071698e2020-01-28 10:04:42 -070010126 if (io_register_op_must_quiesce(opcode)) {
Pavel Begunkove73c5c72021-08-09 13:04:12 +010010127 ret = io_ctx_quiesce(ctx);
10128 if (ret)
Pavel Begunkovf70865d2021-04-11 01:46:40 +010010129 return ret;
Jens Axboe05f3fb32019-12-09 11:22:50 -070010130 }
Jens Axboeedafcce2019-01-09 09:16:05 -070010131
10132 switch (opcode) {
10133 case IORING_REGISTER_BUFFERS:
Pavel Begunkov634d00d2021-04-25 14:32:26 +010010134 ret = io_sqe_buffers_register(ctx, arg, nr_args, NULL);
Jens Axboeedafcce2019-01-09 09:16:05 -070010135 break;
10136 case IORING_UNREGISTER_BUFFERS:
10137 ret = -EINVAL;
10138 if (arg || nr_args)
10139 break;
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -080010140 ret = io_sqe_buffers_unregister(ctx);
Jens Axboeedafcce2019-01-09 09:16:05 -070010141 break;
Jens Axboe6b063142019-01-10 22:13:58 -070010142 case IORING_REGISTER_FILES:
Pavel Begunkov792e3582021-04-25 14:32:21 +010010143 ret = io_sqe_files_register(ctx, arg, nr_args, NULL);
Jens Axboe6b063142019-01-10 22:13:58 -070010144 break;
10145 case IORING_UNREGISTER_FILES:
10146 ret = -EINVAL;
10147 if (arg || nr_args)
10148 break;
10149 ret = io_sqe_files_unregister(ctx);
10150 break;
Jens Axboec3a31e62019-10-03 13:59:56 -060010151 case IORING_REGISTER_FILES_UPDATE:
Pavel Begunkovc3bdad02021-04-25 14:32:22 +010010152 ret = io_register_files_update(ctx, arg, nr_args);
Jens Axboec3a31e62019-10-03 13:59:56 -060010153 break;
Jens Axboe9b402842019-04-11 11:45:41 -060010154 case IORING_REGISTER_EVENTFD:
Jens Axboef2842ab2020-01-08 11:04:00 -070010155 case IORING_REGISTER_EVENTFD_ASYNC:
Jens Axboe9b402842019-04-11 11:45:41 -060010156 ret = -EINVAL;
10157 if (nr_args != 1)
10158 break;
10159 ret = io_eventfd_register(ctx, arg);
Jens Axboef2842ab2020-01-08 11:04:00 -070010160 if (ret)
10161 break;
10162 if (opcode == IORING_REGISTER_EVENTFD_ASYNC)
10163 ctx->eventfd_async = 1;
10164 else
10165 ctx->eventfd_async = 0;
Jens Axboe9b402842019-04-11 11:45:41 -060010166 break;
10167 case IORING_UNREGISTER_EVENTFD:
10168 ret = -EINVAL;
10169 if (arg || nr_args)
10170 break;
10171 ret = io_eventfd_unregister(ctx);
10172 break;
Jens Axboe66f4af92020-01-16 15:36:52 -070010173 case IORING_REGISTER_PROBE:
10174 ret = -EINVAL;
10175 if (!arg || nr_args > 256)
10176 break;
10177 ret = io_probe(ctx, arg, nr_args);
10178 break;
Jens Axboe071698e2020-01-28 10:04:42 -070010179 case IORING_REGISTER_PERSONALITY:
10180 ret = -EINVAL;
10181 if (arg || nr_args)
10182 break;
10183 ret = io_register_personality(ctx);
10184 break;
10185 case IORING_UNREGISTER_PERSONALITY:
10186 ret = -EINVAL;
10187 if (arg)
10188 break;
10189 ret = io_unregister_personality(ctx, nr_args);
10190 break;
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +020010191 case IORING_REGISTER_ENABLE_RINGS:
10192 ret = -EINVAL;
10193 if (arg || nr_args)
10194 break;
10195 ret = io_register_enable_rings(ctx);
10196 break;
Stefano Garzarella21b55db2020-08-27 16:58:30 +020010197 case IORING_REGISTER_RESTRICTIONS:
10198 ret = io_register_restrictions(ctx, arg, nr_args);
10199 break;
Pavel Begunkov992da012021-06-10 16:37:37 +010010200 case IORING_REGISTER_FILES2:
10201 ret = io_register_rsrc(ctx, arg, nr_args, IORING_RSRC_FILE);
Pavel Begunkov792e3582021-04-25 14:32:21 +010010202 break;
Pavel Begunkov992da012021-06-10 16:37:37 +010010203 case IORING_REGISTER_FILES_UPDATE2:
10204 ret = io_register_rsrc_update(ctx, arg, nr_args,
10205 IORING_RSRC_FILE);
10206 break;
10207 case IORING_REGISTER_BUFFERS2:
10208 ret = io_register_rsrc(ctx, arg, nr_args, IORING_RSRC_BUFFER);
10209 break;
10210 case IORING_REGISTER_BUFFERS_UPDATE:
10211 ret = io_register_rsrc_update(ctx, arg, nr_args,
10212 IORING_RSRC_BUFFER);
Pavel Begunkovc3bdad02021-04-25 14:32:22 +010010213 break;
Jens Axboefe764212021-06-17 10:19:54 -060010214 case IORING_REGISTER_IOWQ_AFF:
10215 ret = -EINVAL;
10216 if (!arg || !nr_args)
10217 break;
10218 ret = io_register_iowq_aff(ctx, arg, nr_args);
10219 break;
10220 case IORING_UNREGISTER_IOWQ_AFF:
10221 ret = -EINVAL;
10222 if (arg || nr_args)
10223 break;
10224 ret = io_unregister_iowq_aff(ctx);
10225 break;
Jens Axboeedafcce2019-01-09 09:16:05 -070010226 default:
10227 ret = -EINVAL;
10228 break;
10229 }
10230
Jens Axboe071698e2020-01-28 10:04:42 -070010231 if (io_register_op_must_quiesce(opcode)) {
Jens Axboe05f3fb32019-12-09 11:22:50 -070010232 /* bring the ctx back to life */
Jens Axboe05f3fb32019-12-09 11:22:50 -070010233 percpu_ref_reinit(&ctx->refs);
Jens Axboe0f158b42020-05-14 17:18:39 -060010234 reinit_completion(&ctx->ref_comp);
Jens Axboe05f3fb32019-12-09 11:22:50 -070010235 }
Jens Axboeedafcce2019-01-09 09:16:05 -070010236 return ret;
10237}
10238
10239SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode,
10240 void __user *, arg, unsigned int, nr_args)
10241{
10242 struct io_ring_ctx *ctx;
10243 long ret = -EBADF;
10244 struct fd f;
10245
10246 f = fdget(fd);
10247 if (!f.file)
10248 return -EBADF;
10249
10250 ret = -EOPNOTSUPP;
10251 if (f.file->f_op != &io_uring_fops)
10252 goto out_fput;
10253
10254 ctx = f.file->private_data;
10255
Pavel Begunkovb6c23dd2021-02-20 15:17:18 +000010256 io_run_task_work();
10257
Jens Axboeedafcce2019-01-09 09:16:05 -070010258 mutex_lock(&ctx->uring_lock);
10259 ret = __io_uring_register(ctx, opcode, arg, nr_args);
10260 mutex_unlock(&ctx->uring_lock);
Dmitrii Dolgovc826bd72019-10-15 19:02:01 +020010261 trace_io_uring_register(ctx, opcode, ctx->nr_user_files, ctx->nr_user_bufs,
10262 ctx->cq_ev_fd != NULL, ret);
Jens Axboeedafcce2019-01-09 09:16:05 -070010263out_fput:
10264 fdput(f);
10265 return ret;
10266}
10267
Jens Axboe2b188cc2019-01-07 10:46:33 -070010268static int __init io_uring_init(void)
10269{
Stefan Metzmacherd7f62e82020-01-29 14:39:41 +010010270#define __BUILD_BUG_VERIFY_ELEMENT(stype, eoffset, etype, ename) do { \
10271 BUILD_BUG_ON(offsetof(stype, ename) != eoffset); \
10272 BUILD_BUG_ON(sizeof(etype) != sizeof_field(stype, ename)); \
10273} while (0)
10274
10275#define BUILD_BUG_SQE_ELEM(eoffset, etype, ename) \
10276 __BUILD_BUG_VERIFY_ELEMENT(struct io_uring_sqe, eoffset, etype, ename)
10277 BUILD_BUG_ON(sizeof(struct io_uring_sqe) != 64);
10278 BUILD_BUG_SQE_ELEM(0, __u8, opcode);
10279 BUILD_BUG_SQE_ELEM(1, __u8, flags);
10280 BUILD_BUG_SQE_ELEM(2, __u16, ioprio);
10281 BUILD_BUG_SQE_ELEM(4, __s32, fd);
10282 BUILD_BUG_SQE_ELEM(8, __u64, off);
10283 BUILD_BUG_SQE_ELEM(8, __u64, addr2);
10284 BUILD_BUG_SQE_ELEM(16, __u64, addr);
Pavel Begunkov7d67af22020-02-24 11:32:45 +030010285 BUILD_BUG_SQE_ELEM(16, __u64, splice_off_in);
Stefan Metzmacherd7f62e82020-01-29 14:39:41 +010010286 BUILD_BUG_SQE_ELEM(24, __u32, len);
10287 BUILD_BUG_SQE_ELEM(28, __kernel_rwf_t, rw_flags);
10288 BUILD_BUG_SQE_ELEM(28, /* compat */ int, rw_flags);
10289 BUILD_BUG_SQE_ELEM(28, /* compat */ __u32, rw_flags);
10290 BUILD_BUG_SQE_ELEM(28, __u32, fsync_flags);
Jiufei Xue5769a352020-06-17 17:53:55 +080010291 BUILD_BUG_SQE_ELEM(28, /* compat */ __u16, poll_events);
10292 BUILD_BUG_SQE_ELEM(28, __u32, poll32_events);
Stefan Metzmacherd7f62e82020-01-29 14:39:41 +010010293 BUILD_BUG_SQE_ELEM(28, __u32, sync_range_flags);
10294 BUILD_BUG_SQE_ELEM(28, __u32, msg_flags);
10295 BUILD_BUG_SQE_ELEM(28, __u32, timeout_flags);
10296 BUILD_BUG_SQE_ELEM(28, __u32, accept_flags);
10297 BUILD_BUG_SQE_ELEM(28, __u32, cancel_flags);
10298 BUILD_BUG_SQE_ELEM(28, __u32, open_flags);
10299 BUILD_BUG_SQE_ELEM(28, __u32, statx_flags);
10300 BUILD_BUG_SQE_ELEM(28, __u32, fadvise_advice);
Pavel Begunkov7d67af22020-02-24 11:32:45 +030010301 BUILD_BUG_SQE_ELEM(28, __u32, splice_flags);
Stefan Metzmacherd7f62e82020-01-29 14:39:41 +010010302 BUILD_BUG_SQE_ELEM(32, __u64, user_data);
10303 BUILD_BUG_SQE_ELEM(40, __u16, buf_index);
Pavel Begunkov16340ea2021-06-24 15:09:58 +010010304 BUILD_BUG_SQE_ELEM(40, __u16, buf_group);
Stefan Metzmacherd7f62e82020-01-29 14:39:41 +010010305 BUILD_BUG_SQE_ELEM(42, __u16, personality);
Pavel Begunkov7d67af22020-02-24 11:32:45 +030010306 BUILD_BUG_SQE_ELEM(44, __s32, splice_fd_in);
Stefan Metzmacherd7f62e82020-01-29 14:39:41 +010010307
Pavel Begunkovb0d658ec2021-04-27 16:13:53 +010010308 BUILD_BUG_ON(sizeof(struct io_uring_files_update) !=
10309 sizeof(struct io_uring_rsrc_update));
10310 BUILD_BUG_ON(sizeof(struct io_uring_rsrc_update) >
10311 sizeof(struct io_uring_rsrc_update2));
10312 /* should fit into one byte */
10313 BUILD_BUG_ON(SQE_VALID_FLAGS >= (1 << 8));
10314
Jens Axboed3656342019-12-18 09:50:26 -070010315 BUILD_BUG_ON(ARRAY_SIZE(io_op_defs) != IORING_OP_LAST);
Jens Axboe84557872020-03-03 15:28:17 -070010316 BUILD_BUG_ON(__REQ_F_LAST_BIT >= 8 * sizeof(int));
Pavel Begunkov16340ea2021-06-24 15:09:58 +010010317
Jens Axboe91f245d2021-02-09 13:48:50 -070010318 req_cachep = KMEM_CACHE(io_kiocb, SLAB_HWCACHE_ALIGN | SLAB_PANIC |
10319 SLAB_ACCOUNT);
Jens Axboe2b188cc2019-01-07 10:46:33 -070010320 return 0;
10321};
10322__initcall(io_uring_init);