blob: 118215211bb2baeb1a3bce26990b51920ada09f6 [file] [log] [blame]
Jens Axboe2b188cc2019-01-07 10:46:33 -07001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Shared application/kernel submission and completion ring pairs, for
4 * supporting fast/efficient IO.
5 *
6 * A note on the read/write ordering memory barriers that are matched between
Stefan Bühler1e84b972019-04-24 23:54:16 +02007 * the application and kernel side.
8 *
9 * After the application reads the CQ ring tail, it must use an
10 * appropriate smp_rmb() to pair with the smp_wmb() the kernel uses
11 * before writing the tail (using smp_load_acquire to read the tail will
12 * do). It also needs a smp_mb() before updating CQ head (ordering the
13 * entry load(s) with the head store), pairing with an implicit barrier
Pavel Begunkovd068b502021-05-16 22:58:11 +010014 * through a control-dependency in io_get_cqe (smp_store_release to
Stefan Bühler1e84b972019-04-24 23:54:16 +020015 * store head will do). Failure to do so could lead to reading invalid
16 * CQ entries.
17 *
18 * Likewise, the application must use an appropriate smp_wmb() before
19 * writing the SQ tail (ordering SQ entry stores with the tail store),
20 * which pairs with smp_load_acquire in io_get_sqring (smp_store_release
21 * to store the tail will do). And it needs a barrier ordering the SQ
22 * head load before writing new SQ entries (smp_load_acquire to read
23 * head will do).
24 *
25 * When using the SQ poll thread (IORING_SETUP_SQPOLL), the application
26 * needs to check the SQ flags for IORING_SQ_NEED_WAKEUP *after*
27 * updating the SQ tail; a full memory barrier smp_mb() is needed
28 * between.
Jens Axboe2b188cc2019-01-07 10:46:33 -070029 *
30 * Also see the examples in the liburing library:
31 *
32 * git://git.kernel.dk/liburing
33 *
34 * io_uring also uses READ/WRITE_ONCE() for _any_ store or load that happens
35 * from data shared between the kernel and application. This is done both
36 * for ordering purposes, but also to ensure that once a value is loaded from
37 * data that the application could potentially modify, it remains stable.
38 *
39 * Copyright (C) 2018-2019 Jens Axboe
Christoph Hellwigc992fe22019-01-11 09:43:02 -070040 * Copyright (c) 2018-2019 Christoph Hellwig
Jens Axboe2b188cc2019-01-07 10:46:33 -070041 */
42#include <linux/kernel.h>
43#include <linux/init.h>
44#include <linux/errno.h>
45#include <linux/syscalls.h>
46#include <linux/compat.h>
Jens Axboe52de1fe2020-02-27 10:15:42 -070047#include <net/compat.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070048#include <linux/refcount.h>
49#include <linux/uio.h>
Pavel Begunkov6b47ee62020-01-18 20:22:41 +030050#include <linux/bits.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070051
52#include <linux/sched/signal.h>
53#include <linux/fs.h>
54#include <linux/file.h>
55#include <linux/fdtable.h>
56#include <linux/mm.h>
57#include <linux/mman.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070058#include <linux/percpu.h>
59#include <linux/slab.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070060#include <linux/blkdev.h>
Jens Axboeedafcce2019-01-09 09:16:05 -070061#include <linux/bvec.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070062#include <linux/net.h>
63#include <net/sock.h>
64#include <net/af_unix.h>
Jens Axboe6b063142019-01-10 22:13:58 -070065#include <net/scm.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070066#include <linux/anon_inodes.h>
67#include <linux/sched/mm.h>
68#include <linux/uaccess.h>
69#include <linux/nospec.h>
Jens Axboeedafcce2019-01-09 09:16:05 -070070#include <linux/sizes.h>
71#include <linux/hugetlb.h>
Jens Axboeaa4c3962019-11-29 10:14:00 -070072#include <linux/highmem.h>
Jens Axboe15b71ab2019-12-11 11:20:36 -070073#include <linux/namei.h>
74#include <linux/fsnotify.h>
Jens Axboe4840e412019-12-25 22:03:45 -070075#include <linux/fadvise.h>
Jens Axboe3e4827b2020-01-08 15:18:09 -070076#include <linux/eventpoll.h>
Pavel Begunkov7d67af22020-02-24 11:32:45 +030077#include <linux/splice.h>
Jens Axboeb41e9852020-02-17 09:52:41 -070078#include <linux/task_work.h>
Jens Axboebcf5a062020-05-22 09:24:42 -060079#include <linux/pagemap.h>
Jens Axboe0f212202020-09-13 13:09:39 -060080#include <linux/io_uring.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070081
Dmitrii Dolgovc826bd72019-10-15 19:02:01 +020082#define CREATE_TRACE_POINTS
83#include <trace/events/io_uring.h>
84
Jens Axboe2b188cc2019-01-07 10:46:33 -070085#include <uapi/linux/io_uring.h>
86
87#include "internal.h"
Jens Axboe561fb042019-10-24 07:25:42 -060088#include "io-wq.h"
Jens Axboe2b188cc2019-01-07 10:46:33 -070089
Daniel Xu5277dea2019-09-14 14:23:45 -070090#define IORING_MAX_ENTRIES 32768
Jens Axboe33a107f2019-10-04 12:10:03 -060091#define IORING_MAX_CQ_ENTRIES (2 * IORING_MAX_ENTRIES)
Olivier Langlois4ce8ad92021-06-23 11:50:18 -070092#define IORING_SQPOLL_CAP_ENTRIES_VALUE 8
Jens Axboe65e19f52019-10-26 07:20:21 -060093
94/*
95 * Shift of 9 is 512 entries, or exactly one page on 64-bit archs
96 */
97#define IORING_FILE_TABLE_SHIFT 9
98#define IORING_MAX_FILES_TABLE (1U << IORING_FILE_TABLE_SHIFT)
99#define IORING_FILE_TABLE_MASK (IORING_MAX_FILES_TABLE - 1)
100#define IORING_MAX_FIXED_FILES (64 * IORING_MAX_FILES_TABLE)
Stefano Garzarella21b55db2020-08-27 16:58:30 +0200101#define IORING_MAX_RESTRICTIONS (IORING_RESTRICTION_LAST + \
102 IORING_REGISTER_LAST + IORING_OP_LAST)
Jens Axboe2b188cc2019-01-07 10:46:33 -0700103
Pavel Begunkov2d091d62021-06-14 02:36:21 +0100104#define IO_RSRC_TAG_TABLE_SHIFT 9
105#define IO_RSRC_TAG_TABLE_MAX (1U << IO_RSRC_TAG_TABLE_SHIFT)
106#define IO_RSRC_TAG_TABLE_MASK (IO_RSRC_TAG_TABLE_MAX - 1)
107
Pavel Begunkov489809e2021-05-14 12:06:44 +0100108#define IORING_MAX_REG_BUFFERS (1U << 14)
109
Pavel Begunkovb16fed662021-02-18 18:29:40 +0000110#define SQE_VALID_FLAGS (IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK| \
111 IOSQE_IO_HARDLINK | IOSQE_ASYNC | \
112 IOSQE_BUFFER_SELECT)
Pavel Begunkovc8543572021-06-17 18:14:04 +0100113#define IO_REQ_CLEAN_FLAGS (REQ_F_BUFFER_SELECTED | REQ_F_NEED_CLEANUP | \
114 REQ_F_POLLED | REQ_F_INFLIGHT | REQ_F_CREDS)
Pavel Begunkovb16fed662021-02-18 18:29:40 +0000115
Pavel Begunkov09899b12021-06-14 02:36:22 +0100116#define IO_TCTX_REFS_CACHE_NR (1U << 10)
117
Jens Axboe2b188cc2019-01-07 10:46:33 -0700118struct io_uring {
119 u32 head ____cacheline_aligned_in_smp;
120 u32 tail ____cacheline_aligned_in_smp;
121};
122
Stefan Bühler1e84b972019-04-24 23:54:16 +0200123/*
Hristo Venev75b28af2019-08-26 17:23:46 +0000124 * This data is shared with the application through the mmap at offsets
125 * IORING_OFF_SQ_RING and IORING_OFF_CQ_RING.
Stefan Bühler1e84b972019-04-24 23:54:16 +0200126 *
127 * The offsets to the member fields are published through struct
128 * io_sqring_offsets when calling io_uring_setup.
129 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000130struct io_rings {
Stefan Bühler1e84b972019-04-24 23:54:16 +0200131 /*
132 * Head and tail offsets into the ring; the offsets need to be
133 * masked to get valid indices.
134 *
Hristo Venev75b28af2019-08-26 17:23:46 +0000135 * The kernel controls head of the sq ring and the tail of the cq ring,
136 * and the application controls tail of the sq ring and the head of the
137 * cq ring.
Stefan Bühler1e84b972019-04-24 23:54:16 +0200138 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000139 struct io_uring sq, cq;
Stefan Bühler1e84b972019-04-24 23:54:16 +0200140 /*
Hristo Venev75b28af2019-08-26 17:23:46 +0000141 * Bitmasks to apply to head and tail offsets (constant, equals
Stefan Bühler1e84b972019-04-24 23:54:16 +0200142 * ring_entries - 1)
143 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000144 u32 sq_ring_mask, cq_ring_mask;
145 /* Ring sizes (constant, power of 2) */
146 u32 sq_ring_entries, cq_ring_entries;
Stefan Bühler1e84b972019-04-24 23:54:16 +0200147 /*
148 * Number of invalid entries dropped by the kernel due to
149 * invalid index stored in array
150 *
151 * Written by the kernel, shouldn't be modified by the
152 * application (i.e. get number of "new events" by comparing to
153 * cached value).
154 *
155 * After a new SQ head value was read by the application this
156 * counter includes all submissions that were dropped reaching
157 * the new SQ head (and possibly more).
158 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000159 u32 sq_dropped;
Stefan Bühler1e84b972019-04-24 23:54:16 +0200160 /*
Stefano Garzarella0d9b5b32020-05-15 18:38:04 +0200161 * Runtime SQ flags
Stefan Bühler1e84b972019-04-24 23:54:16 +0200162 *
163 * Written by the kernel, shouldn't be modified by the
164 * application.
165 *
166 * The application needs a full memory barrier before checking
167 * for IORING_SQ_NEED_WAKEUP after updating the sq tail.
168 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000169 u32 sq_flags;
Stefan Bühler1e84b972019-04-24 23:54:16 +0200170 /*
Stefano Garzarella0d9b5b32020-05-15 18:38:04 +0200171 * Runtime CQ flags
172 *
173 * Written by the application, shouldn't be modified by the
174 * kernel.
175 */
Pavel Begunkovfe7e3252021-06-24 15:09:57 +0100176 u32 cq_flags;
Stefano Garzarella0d9b5b32020-05-15 18:38:04 +0200177 /*
Stefan Bühler1e84b972019-04-24 23:54:16 +0200178 * Number of completion events lost because the queue was full;
179 * this should be avoided by the application by making sure
LimingWu0b4295b2019-12-05 20:18:18 +0800180 * there are not more requests pending than there is space in
Stefan Bühler1e84b972019-04-24 23:54:16 +0200181 * the completion queue.
182 *
183 * Written by the kernel, shouldn't be modified by the
184 * application (i.e. get number of "new events" by comparing to
185 * cached value).
186 *
187 * As completion events come in out of order this counter is not
188 * ordered with any other data.
189 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000190 u32 cq_overflow;
Stefan Bühler1e84b972019-04-24 23:54:16 +0200191 /*
192 * Ring buffer of completion events.
193 *
194 * The kernel writes completion events fresh every time they are
195 * produced, so the application is allowed to modify pending
196 * entries.
197 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000198 struct io_uring_cqe cqes[] ____cacheline_aligned_in_smp;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700199};
200
Pavel Begunkov45d189c2021-02-10 00:03:07 +0000201enum io_uring_cmd_flags {
202 IO_URING_F_NONBLOCK = 1,
Pavel Begunkov889fca72021-02-10 00:03:09 +0000203 IO_URING_F_COMPLETE_DEFER = 2,
Pavel Begunkov45d189c2021-02-10 00:03:07 +0000204};
205
Jens Axboeedafcce2019-01-09 09:16:05 -0700206struct io_mapped_ubuf {
207 u64 ubuf;
Pavel Begunkov4751f532021-04-01 15:43:55 +0100208 u64 ubuf_end;
Jens Axboeedafcce2019-01-09 09:16:05 -0700209 unsigned int nr_bvecs;
Jens Axboede293932020-09-17 16:19:16 -0600210 unsigned long acct_pages;
Pavel Begunkov41edf1a2021-04-25 14:32:23 +0100211 struct bio_vec bvec[];
Jens Axboeedafcce2019-01-09 09:16:05 -0700212};
213
Bijan Mottahedeh50238532021-01-15 17:37:45 +0000214struct io_ring_ctx;
215
Pavel Begunkov6c2450a2021-02-23 12:40:22 +0000216struct io_overflow_cqe {
217 struct io_uring_cqe cqe;
218 struct list_head list;
219};
220
Pavel Begunkova04b0ac2021-04-01 15:44:04 +0100221struct io_fixed_file {
222 /* file * with additional FFS_* flags */
223 unsigned long file_ptr;
224};
225
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +0000226struct io_rsrc_put {
227 struct list_head list;
Pavel Begunkovb60c8dc2021-04-25 14:32:18 +0100228 u64 tag;
Bijan Mottahedeh50238532021-01-15 17:37:45 +0000229 union {
230 void *rsrc;
231 struct file *file;
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +0100232 struct io_mapped_ubuf *buf;
Bijan Mottahedeh50238532021-01-15 17:37:45 +0000233 };
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +0000234};
235
Pavel Begunkovaeca2412021-04-11 01:46:37 +0100236struct io_file_table {
237 /* two level table */
238 struct io_fixed_file **files;
Jens Axboe31b51512019-01-18 22:56:34 -0700239};
240
Pavel Begunkovb895c9a2021-04-01 15:43:40 +0100241struct io_rsrc_node {
Xiaoguang Wang05589552020-03-31 14:05:18 +0800242 struct percpu_ref refs;
243 struct list_head node;
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +0000244 struct list_head rsrc_list;
Pavel Begunkovb895c9a2021-04-01 15:43:40 +0100245 struct io_rsrc_data *rsrc_data;
Jens Axboe4a38aed22020-05-14 17:21:15 -0600246 struct llist_node llist;
Pavel Begunkove2978222020-11-18 14:56:26 +0000247 bool done;
Xiaoguang Wang05589552020-03-31 14:05:18 +0800248};
249
Pavel Begunkov40ae0ff2021-04-01 15:43:44 +0100250typedef void (rsrc_put_fn)(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc);
251
Pavel Begunkovb895c9a2021-04-01 15:43:40 +0100252struct io_rsrc_data {
Jens Axboe05f3fb32019-12-09 11:22:50 -0700253 struct io_ring_ctx *ctx;
254
Pavel Begunkov2d091d62021-06-14 02:36:21 +0100255 u64 **tags;
256 unsigned int nr;
Pavel Begunkov40ae0ff2021-04-01 15:43:44 +0100257 rsrc_put_fn *do_put;
Pavel Begunkov3e942492021-04-11 01:46:34 +0100258 atomic_t refs;
Jens Axboe05f3fb32019-12-09 11:22:50 -0700259 struct completion done;
Hao Xu8bad28d2021-02-19 17:19:36 +0800260 bool quiesce;
Jens Axboe05f3fb32019-12-09 11:22:50 -0700261};
262
Jens Axboe5a2e7452020-02-23 16:23:11 -0700263struct io_buffer {
264 struct list_head list;
265 __u64 addr;
Thadeu Lima de Souza Cascardod1f82802021-05-05 09:47:06 -0300266 __u32 len;
Jens Axboe5a2e7452020-02-23 16:23:11 -0700267 __u16 bid;
268};
269
Stefano Garzarella21b55db2020-08-27 16:58:30 +0200270struct io_restriction {
271 DECLARE_BITMAP(register_op, IORING_REGISTER_LAST);
272 DECLARE_BITMAP(sqe_op, IORING_OP_LAST);
273 u8 sqe_flags_allowed;
274 u8 sqe_flags_required;
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +0200275 bool registered;
Stefano Garzarella21b55db2020-08-27 16:58:30 +0200276};
277
Jens Axboe37d1e2e2021-02-17 21:03:43 -0700278enum {
279 IO_SQ_THREAD_SHOULD_STOP = 0,
280 IO_SQ_THREAD_SHOULD_PARK,
281};
282
Jens Axboe534ca6d2020-09-02 13:52:19 -0600283struct io_sq_data {
284 refcount_t refs;
Pavel Begunkov9e138a42021-03-14 20:57:12 +0000285 atomic_t park_pending;
Pavel Begunkov09a6f4e2021-03-14 20:57:10 +0000286 struct mutex lock;
Jens Axboe69fb2132020-09-14 11:16:23 -0600287
288 /* ctx's that are using this sqd */
289 struct list_head ctx_list;
Jens Axboe69fb2132020-09-14 11:16:23 -0600290
Jens Axboe534ca6d2020-09-02 13:52:19 -0600291 struct task_struct *thread;
292 struct wait_queue_head wait;
Xiaoguang Wang08369242020-11-03 14:15:59 +0800293
294 unsigned sq_thread_idle;
Jens Axboe37d1e2e2021-02-17 21:03:43 -0700295 int sq_cpu;
296 pid_t task_pid;
Jens Axboe5c2469e2021-03-11 10:17:56 -0700297 pid_t task_tgid;
Jens Axboe37d1e2e2021-02-17 21:03:43 -0700298
299 unsigned long state;
Jens Axboe37d1e2e2021-02-17 21:03:43 -0700300 struct completion exited;
Jens Axboe534ca6d2020-09-02 13:52:19 -0600301};
302
Pavel Begunkov258b29a2021-02-10 00:03:10 +0000303#define IO_IOPOLL_BATCH 8
Pavel Begunkov6dd0be12021-02-10 00:03:13 +0000304#define IO_COMPL_BATCH 32
Pavel Begunkov6ff119a2021-02-10 00:03:18 +0000305#define IO_REQ_CACHE_SIZE 32
Pavel Begunkovbf019da2021-02-10 00:03:17 +0000306#define IO_REQ_ALLOC_BATCH 8
Pavel Begunkov258b29a2021-02-10 00:03:10 +0000307
308struct io_comp_state {
Pavel Begunkov6dd0be12021-02-10 00:03:13 +0000309 struct io_kiocb *reqs[IO_COMPL_BATCH];
Jens Axboe1b4c3512021-02-10 00:03:19 +0000310 unsigned int nr;
Jens Axboec7dae4b2021-02-09 19:53:37 -0700311 /* inline/task_work completion list, under ->uring_lock */
Jens Axboe1b4c3512021-02-10 00:03:19 +0000312 struct list_head free_list;
Pavel Begunkov258b29a2021-02-10 00:03:10 +0000313};
314
Pavel Begunkova1ab7b32021-02-18 18:29:42 +0000315struct io_submit_link {
316 struct io_kiocb *head;
317 struct io_kiocb *last;
318};
319
Pavel Begunkov258b29a2021-02-10 00:03:10 +0000320struct io_submit_state {
321 struct blk_plug plug;
Pavel Begunkova1ab7b32021-02-18 18:29:42 +0000322 struct io_submit_link link;
Pavel Begunkov258b29a2021-02-10 00:03:10 +0000323
324 /*
325 * io_kiocb alloc cache
326 */
Pavel Begunkovbf019da2021-02-10 00:03:17 +0000327 void *reqs[IO_REQ_CACHE_SIZE];
Pavel Begunkov258b29a2021-02-10 00:03:10 +0000328 unsigned int free_reqs;
329
330 bool plug_started;
331
332 /*
333 * Batch completion logic
334 */
335 struct io_comp_state comp;
336
337 /*
338 * File reference cache
339 */
340 struct file *file;
341 unsigned int fd;
342 unsigned int file_refs;
343 unsigned int ios_left;
344};
345
Jens Axboe2b188cc2019-01-07 10:46:33 -0700346struct io_ring_ctx {
Pavel Begunkovb52ecf82021-06-14 23:37:21 +0100347 /* const or read-mostly hot data */
Jens Axboe2b188cc2019-01-07 10:46:33 -0700348 struct {
349 struct percpu_ref refs;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700350
Pavel Begunkovb52ecf82021-06-14 23:37:21 +0100351 struct io_rings *rings;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700352 unsigned int flags;
Randy Dunlape1d85332020-02-05 20:57:10 -0800353 unsigned int compat: 1;
Randy Dunlape1d85332020-02-05 20:57:10 -0800354 unsigned int drain_next: 1;
355 unsigned int eventfd_async: 1;
Stefano Garzarella21b55db2020-08-27 16:58:30 +0200356 unsigned int restricted: 1;
Pavel Begunkovf18ee4c2021-06-14 23:37:25 +0100357 unsigned int off_timeout_used: 1;
Pavel Begunkov10c66902021-06-15 16:47:56 +0100358 unsigned int drain_active: 1;
Pavel Begunkovb52ecf82021-06-14 23:37:21 +0100359 } ____cacheline_aligned_in_smp;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700360
Pavel Begunkov7f1129d2021-06-14 23:37:22 +0100361 /* submission data */
Pavel Begunkovb52ecf82021-06-14 23:37:21 +0100362 struct {
Pavel Begunkov0499e582021-06-14 23:37:29 +0100363 struct mutex uring_lock;
364
Hristo Venev75b28af2019-08-26 17:23:46 +0000365 /*
366 * Ring buffer of indices into array of io_uring_sqe, which is
367 * mmapped by the application using the IORING_OFF_SQES offset.
368 *
369 * This indirection could e.g. be used to assign fixed
370 * io_uring_sqe entries to operations and only submit them to
371 * the queue when needed.
372 *
373 * The kernel modifies neither the indices array nor the entries
374 * array.
375 */
376 u32 *sq_array;
Pavel Begunkovc7af47c2021-06-14 23:37:20 +0100377 struct io_uring_sqe *sq_sqes;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700378 unsigned cached_sq_head;
379 unsigned sq_entries;
Jens Axboede0617e2019-04-06 21:51:27 -0600380 struct list_head defer_list;
Pavel Begunkov7f1129d2021-06-14 23:37:22 +0100381
382 /*
383 * Fixed resources fast path, should be accessed only under
384 * uring_lock, and updated through io_uring_register(2)
385 */
386 struct io_rsrc_node *rsrc_node;
387 struct io_file_table file_table;
388 unsigned nr_user_files;
389 unsigned nr_user_bufs;
390 struct io_mapped_ubuf **user_bufs;
391
392 struct io_submit_state submit_state;
Jens Axboe5262f562019-09-17 12:26:57 -0600393 struct list_head timeout_list;
Jens Axboe1d7bb1d2019-11-06 11:31:17 -0700394 struct list_head cq_overflow_list;
Pavel Begunkov7f1129d2021-06-14 23:37:22 +0100395 struct xarray io_buffers;
396 struct xarray personalities;
397 u32 pers_next;
398 unsigned sq_thread_idle;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700399 } ____cacheline_aligned_in_smp;
400
Pavel Begunkovd0acdee2021-05-16 22:58:12 +0100401 /* IRQ completion list, under ->completion_lock */
402 struct list_head locked_free_list;
403 unsigned int locked_free_nr;
Jens Axboe3c1a2ea2021-02-11 10:48:03 -0700404
Stefan Metzmacher7c30f36a2021-03-07 11:54:28 +0100405 const struct cred *sq_creds; /* cred used for __io_sq_thread() */
Jens Axboe534ca6d2020-09-02 13:52:19 -0600406 struct io_sq_data *sq_data; /* if using sq thread polling */
407
Jens Axboe90554202020-09-03 12:12:41 -0600408 struct wait_queue_head sqo_sq_wait;
Jens Axboe69fb2132020-09-14 11:16:23 -0600409 struct list_head sqd_list;
Hristo Venev75b28af2019-08-26 17:23:46 +0000410
Pavel Begunkov5ed7a372021-06-14 23:37:27 +0100411 unsigned long check_cq_overflow;
412
Jens Axboe206aefd2019-11-07 18:27:42 -0700413 struct {
414 unsigned cached_cq_tail;
415 unsigned cq_entries;
Jens Axboe206aefd2019-11-07 18:27:42 -0700416 struct eventfd_ctx *cq_ev_fd;
Pavel Begunkov0499e582021-06-14 23:37:29 +0100417 struct wait_queue_head poll_wait;
418 struct wait_queue_head cq_wait;
419 unsigned cq_extra;
420 atomic_t cq_timeouts;
421 struct fasync_struct *cq_fasync;
422 unsigned cq_last_tm_flush;
Jens Axboe206aefd2019-11-07 18:27:42 -0700423 } ____cacheline_aligned_in_smp;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700424
425 struct {
Jens Axboe2b188cc2019-01-07 10:46:33 -0700426 spinlock_t completion_lock;
Jens Axboee94f1412019-12-19 12:06:02 -0700427
Jens Axboedef596e2019-01-09 08:59:42 -0700428 /*
Pavel Begunkov540e32a2020-07-13 23:37:09 +0300429 * ->iopoll_list is protected by the ctx->uring_lock for
Jens Axboedef596e2019-01-09 08:59:42 -0700430 * io_uring instances that don't use IORING_SETUP_SQPOLL.
431 * For SQPOLL, only the single threaded io_sq_thread() will
432 * manipulate the list, hence no extra locking is needed there.
433 */
Pavel Begunkov540e32a2020-07-13 23:37:09 +0300434 struct list_head iopoll_list;
Jens Axboe78076bb2019-12-04 19:56:40 -0700435 struct hlist_head *cancel_hash;
436 unsigned cancel_hash_bits;
Hao Xu915b3dd2021-06-28 05:37:30 +0800437 bool poll_multi_queue;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700438 } ____cacheline_aligned_in_smp;
Jens Axboe85faa7b2020-04-09 18:14:00 -0600439
Stefano Garzarella21b55db2020-08-27 16:58:30 +0200440 struct io_restriction restrictions;
Jens Axboe3c1a2ea2021-02-11 10:48:03 -0700441
Pavel Begunkovb13a8912021-05-16 22:58:07 +0100442 /* slow path rsrc auxilary data, used by update/register */
443 struct {
444 struct io_rsrc_node *rsrc_backup_node;
445 struct io_mapped_ubuf *dummy_ubuf;
446 struct io_rsrc_data *file_data;
447 struct io_rsrc_data *buf_data;
448
449 struct delayed_work rsrc_put_work;
450 struct llist_head rsrc_put_llist;
451 struct list_head rsrc_ref_list;
452 spinlock_t rsrc_ref_lock;
453 };
454
Jens Axboe3c1a2ea2021-02-11 10:48:03 -0700455 /* Keep this last, we don't need it for the fast path */
Pavel Begunkovb986af72021-05-16 22:58:06 +0100456 struct {
457 #if defined(CONFIG_UNIX)
458 struct socket *ring_sock;
459 #endif
460 /* hashed buffered write serialization */
461 struct io_wq_hash *hash_map;
462
463 /* Only used for accounting purposes */
464 struct user_struct *user;
465 struct mm_struct *mm_account;
466
467 /* ctx exit and cancelation */
Pavel Begunkov9011bf92021-06-30 21:54:03 +0100468 struct llist_head fallback_llist;
469 struct delayed_work fallback_work;
Pavel Begunkovb986af72021-05-16 22:58:06 +0100470 struct work_struct exit_work;
471 struct list_head tctx_list;
472 struct completion ref_comp;
473 };
Jens Axboe2b188cc2019-01-07 10:46:33 -0700474};
475
Stefan Metzmacher53e043b2021-03-15 12:56:56 +0100476struct io_uring_task {
477 /* submission side */
Pavel Begunkov09899b12021-06-14 02:36:22 +0100478 int cached_refs;
Stefan Metzmacher53e043b2021-03-15 12:56:56 +0100479 struct xarray xa;
480 struct wait_queue_head wait;
Stefan Metzmacheree53fb22021-03-15 12:56:57 +0100481 const struct io_ring_ctx *last;
482 struct io_wq *io_wq;
Stefan Metzmacher53e043b2021-03-15 12:56:56 +0100483 struct percpu_counter inflight;
Pavel Begunkovb303fe22021-04-11 01:46:26 +0100484 atomic_t inflight_tracked;
Stefan Metzmacher53e043b2021-03-15 12:56:56 +0100485 atomic_t in_idle;
Stefan Metzmacher53e043b2021-03-15 12:56:56 +0100486
487 spinlock_t task_lock;
488 struct io_wq_work_list task_list;
489 unsigned long task_state;
490 struct callback_head task_work;
491};
492
Jens Axboe09bb8392019-03-13 12:39:28 -0600493/*
494 * First field must be the file pointer in all the
495 * iocb unions! See also 'struct kiocb' in <linux/fs.h>
496 */
Jens Axboe221c5eb2019-01-17 09:41:58 -0700497struct io_poll_iocb {
498 struct file *file;
Pavel Begunkov018043b2020-10-27 23:17:18 +0000499 struct wait_queue_head *head;
Jens Axboe221c5eb2019-01-17 09:41:58 -0700500 __poll_t events;
Jens Axboe8c838782019-03-12 15:48:16 -0600501 bool done;
Jens Axboe221c5eb2019-01-17 09:41:58 -0700502 bool canceled;
Jens Axboe392edb42019-12-09 17:52:20 -0700503 struct wait_queue_entry wait;
Jens Axboe221c5eb2019-01-17 09:41:58 -0700504};
505
Pavel Begunkov9d805892021-04-13 02:58:40 +0100506struct io_poll_update {
Pavel Begunkov018043b2020-10-27 23:17:18 +0000507 struct file *file;
Pavel Begunkov9d805892021-04-13 02:58:40 +0100508 u64 old_user_data;
509 u64 new_user_data;
510 __poll_t events;
Jens Axboeb69de282021-03-17 08:37:41 -0600511 bool update_events;
512 bool update_user_data;
Pavel Begunkov018043b2020-10-27 23:17:18 +0000513};
514
Jens Axboeb5dba592019-12-11 14:02:38 -0700515struct io_close {
516 struct file *file;
Jens Axboeb5dba592019-12-11 14:02:38 -0700517 int fd;
518};
519
Jens Axboead8a48a2019-11-15 08:49:11 -0700520struct io_timeout_data {
521 struct io_kiocb *req;
522 struct hrtimer timer;
523 struct timespec64 ts;
524 enum hrtimer_mode mode;
525};
526
Jens Axboe8ed8d3c2019-12-16 11:55:28 -0700527struct io_accept {
528 struct file *file;
529 struct sockaddr __user *addr;
530 int __user *addr_len;
531 int flags;
Jens Axboe09952e32020-03-19 20:16:56 -0600532 unsigned long nofile;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -0700533};
534
535struct io_sync {
536 struct file *file;
537 loff_t len;
538 loff_t off;
539 int flags;
Jens Axboed63d1b52019-12-10 10:38:56 -0700540 int mode;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -0700541};
542
Jens Axboefbf23842019-12-17 18:45:56 -0700543struct io_cancel {
544 struct file *file;
545 u64 addr;
546};
547
Jens Axboeb29472e2019-12-17 18:50:29 -0700548struct io_timeout {
549 struct file *file;
Pavel Begunkovbfe68a22020-05-30 14:54:18 +0300550 u32 off;
551 u32 target_seq;
Pavel Begunkov135fcde2020-07-13 23:37:12 +0300552 struct list_head list;
Pavel Begunkov90cd7e42020-10-27 23:25:36 +0000553 /* head of the link, used by linked timeouts only */
554 struct io_kiocb *head;
Jens Axboeb29472e2019-12-17 18:50:29 -0700555};
556
Pavel Begunkov0bdf7a22020-10-10 18:34:10 +0100557struct io_timeout_rem {
558 struct file *file;
559 u64 addr;
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +0000560
561 /* timeout update */
562 struct timespec64 ts;
563 u32 flags;
Pavel Begunkov0bdf7a22020-10-10 18:34:10 +0100564};
565
Jens Axboe9adbd452019-12-20 08:45:55 -0700566struct io_rw {
567 /* NOTE: kiocb has the file as the first member, so don't do it here */
568 struct kiocb kiocb;
569 u64 addr;
570 u64 len;
571};
572
Jens Axboe3fbb51c2019-12-20 08:51:52 -0700573struct io_connect {
574 struct file *file;
575 struct sockaddr __user *addr;
576 int addr_len;
577};
578
Jens Axboee47293f2019-12-20 08:58:21 -0700579struct io_sr_msg {
580 struct file *file;
Jens Axboefddafac2020-01-04 20:19:44 -0700581 union {
Pavel Begunkov4af34172021-04-11 01:46:30 +0100582 struct compat_msghdr __user *umsg_compat;
583 struct user_msghdr __user *umsg;
584 void __user *buf;
Jens Axboefddafac2020-01-04 20:19:44 -0700585 };
Jens Axboee47293f2019-12-20 08:58:21 -0700586 int msg_flags;
Jens Axboebcda7ba2020-02-23 16:42:51 -0700587 int bgid;
Jens Axboefddafac2020-01-04 20:19:44 -0700588 size_t len;
Jens Axboebcda7ba2020-02-23 16:42:51 -0700589 struct io_buffer *kbuf;
Jens Axboee47293f2019-12-20 08:58:21 -0700590};
591
Jens Axboe15b71ab2019-12-11 11:20:36 -0700592struct io_open {
593 struct file *file;
594 int dfd;
Jens Axboe15b71ab2019-12-11 11:20:36 -0700595 struct filename *filename;
Jens Axboec12cedf2020-01-08 17:41:21 -0700596 struct open_how how;
Jens Axboe4022e7a2020-03-19 19:23:18 -0600597 unsigned long nofile;
Jens Axboe15b71ab2019-12-11 11:20:36 -0700598};
599
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +0000600struct io_rsrc_update {
Jens Axboe05f3fb32019-12-09 11:22:50 -0700601 struct file *file;
602 u64 arg;
603 u32 nr_args;
604 u32 offset;
605};
606
Jens Axboe4840e412019-12-25 22:03:45 -0700607struct io_fadvise {
608 struct file *file;
609 u64 offset;
610 u32 len;
611 u32 advice;
612};
613
Jens Axboec1ca7572019-12-25 22:18:28 -0700614struct io_madvise {
615 struct file *file;
616 u64 addr;
617 u32 len;
618 u32 advice;
619};
620
Jens Axboe3e4827b2020-01-08 15:18:09 -0700621struct io_epoll {
622 struct file *file;
623 int epfd;
624 int op;
625 int fd;
626 struct epoll_event event;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700627};
628
Pavel Begunkov7d67af22020-02-24 11:32:45 +0300629struct io_splice {
630 struct file *file_out;
631 struct file *file_in;
632 loff_t off_out;
633 loff_t off_in;
634 u64 len;
635 unsigned int flags;
636};
637
Jens Axboeddf0322d2020-02-23 16:41:33 -0700638struct io_provide_buf {
639 struct file *file;
640 __u64 addr;
Pavel Begunkov38134ad2021-04-15 13:07:39 +0100641 __u32 len;
Jens Axboeddf0322d2020-02-23 16:41:33 -0700642 __u32 bgid;
643 __u16 nbufs;
644 __u16 bid;
645};
646
Bijan Mottahedeh1d9e1282020-05-22 21:31:16 -0700647struct io_statx {
648 struct file *file;
649 int dfd;
650 unsigned int mask;
651 unsigned int flags;
Bijan Mottahedehe62753e2020-05-22 21:31:18 -0700652 const char __user *filename;
Bijan Mottahedeh1d9e1282020-05-22 21:31:16 -0700653 struct statx __user *buffer;
654};
655
Jens Axboe36f4fa62020-09-05 11:14:22 -0600656struct io_shutdown {
657 struct file *file;
658 int how;
659};
660
Jens Axboe80a261f2020-09-28 14:23:58 -0600661struct io_rename {
662 struct file *file;
663 int old_dfd;
664 int new_dfd;
665 struct filename *oldpath;
666 struct filename *newpath;
667 int flags;
668};
669
Jens Axboe14a11432020-09-28 14:27:37 -0600670struct io_unlink {
671 struct file *file;
672 int dfd;
673 int flags;
674 struct filename *filename;
675};
676
Pavel Begunkov3ca405e2020-07-13 23:37:08 +0300677struct io_completion {
678 struct file *file;
679 struct list_head list;
Pavel Begunkov8c3f9cd2021-02-28 22:35:15 +0000680 u32 cflags;
Pavel Begunkov3ca405e2020-07-13 23:37:08 +0300681};
682
Jens Axboef499a022019-12-02 16:28:46 -0700683struct io_async_connect {
684 struct sockaddr_storage address;
685};
686
Jens Axboe03b12302019-12-02 18:50:25 -0700687struct io_async_msghdr {
688 struct iovec fast_iov[UIO_FASTIOV];
Pavel Begunkov257e84a2021-02-05 00:58:00 +0000689 /* points to an allocated iov, if NULL we use fast_iov instead */
690 struct iovec *free_iov;
Jens Axboe03b12302019-12-02 18:50:25 -0700691 struct sockaddr __user *uaddr;
692 struct msghdr msg;
Jens Axboeb5379162020-02-09 11:29:15 -0700693 struct sockaddr_storage addr;
Jens Axboe03b12302019-12-02 18:50:25 -0700694};
695
Jens Axboef67676d2019-12-02 11:03:47 -0700696struct io_async_rw {
697 struct iovec fast_iov[UIO_FASTIOV];
Jens Axboeff6165b2020-08-13 09:47:43 -0600698 const struct iovec *free_iovec;
699 struct iov_iter iter;
Jens Axboe227c0c92020-08-13 11:51:40 -0600700 size_t bytes_done;
Jens Axboebcf5a062020-05-22 09:24:42 -0600701 struct wait_page_queue wpq;
Jens Axboef67676d2019-12-02 11:03:47 -0700702};
703
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300704enum {
705 REQ_F_FIXED_FILE_BIT = IOSQE_FIXED_FILE_BIT,
706 REQ_F_IO_DRAIN_BIT = IOSQE_IO_DRAIN_BIT,
707 REQ_F_LINK_BIT = IOSQE_IO_LINK_BIT,
708 REQ_F_HARDLINK_BIT = IOSQE_IO_HARDLINK_BIT,
709 REQ_F_FORCE_ASYNC_BIT = IOSQE_ASYNC_BIT,
Jens Axboebcda7ba2020-02-23 16:42:51 -0700710 REQ_F_BUFFER_SELECT_BIT = IOSQE_BUFFER_SELECT_BIT,
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300711
Pavel Begunkovdddca222021-04-27 16:13:52 +0100712 /* first byte is taken by user flags, shift it to not overlap */
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +0100713 REQ_F_FAIL_BIT = 8,
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300714 REQ_F_INFLIGHT_BIT,
715 REQ_F_CUR_POS_BIT,
716 REQ_F_NOWAIT_BIT,
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300717 REQ_F_LINK_TIMEOUT_BIT,
Pavel Begunkov99bc4c32020-02-07 22:04:45 +0300718 REQ_F_NEED_CLEANUP_BIT,
Jens Axboed7718a92020-02-14 22:23:12 -0700719 REQ_F_POLLED_BIT,
Jens Axboebcda7ba2020-02-23 16:42:51 -0700720 REQ_F_BUFFER_SELECTED_BIT,
Pavel Begunkov900fad42020-10-19 16:39:16 +0100721 REQ_F_LTIMEOUT_ACTIVE_BIT,
Pavel Begunkove342c802021-01-19 13:32:47 +0000722 REQ_F_COMPLETE_INLINE_BIT,
Jens Axboe230d50d2021-04-01 20:41:15 -0600723 REQ_F_REISSUE_BIT,
Pavel Begunkov8c130822021-03-22 01:58:32 +0000724 REQ_F_DONT_REISSUE_BIT,
Pavel Begunkovb8e64b52021-06-17 18:14:02 +0100725 REQ_F_CREDS_BIT,
Jens Axboe7b29f922021-03-12 08:30:14 -0700726 /* keep async read/write and isreg together and in order */
727 REQ_F_ASYNC_READ_BIT,
728 REQ_F_ASYNC_WRITE_BIT,
729 REQ_F_ISREG_BIT,
Jens Axboe84557872020-03-03 15:28:17 -0700730
731 /* not a real bit, just to check we're not overflowing the space */
732 __REQ_F_LAST_BIT,
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300733};
734
735enum {
736 /* ctx owns file */
737 REQ_F_FIXED_FILE = BIT(REQ_F_FIXED_FILE_BIT),
738 /* drain existing IO first */
739 REQ_F_IO_DRAIN = BIT(REQ_F_IO_DRAIN_BIT),
740 /* linked sqes */
741 REQ_F_LINK = BIT(REQ_F_LINK_BIT),
742 /* doesn't sever on completion < 0 */
743 REQ_F_HARDLINK = BIT(REQ_F_HARDLINK_BIT),
744 /* IOSQE_ASYNC */
745 REQ_F_FORCE_ASYNC = BIT(REQ_F_FORCE_ASYNC_BIT),
Jens Axboebcda7ba2020-02-23 16:42:51 -0700746 /* IOSQE_BUFFER_SELECT */
747 REQ_F_BUFFER_SELECT = BIT(REQ_F_BUFFER_SELECT_BIT),
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300748
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300749 /* fail rest of links */
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +0100750 REQ_F_FAIL = BIT(REQ_F_FAIL_BIT),
Pavel Begunkovb05a1bc2021-03-04 13:59:24 +0000751 /* on inflight list, should be cancelled and waited on exit reliably */
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300752 REQ_F_INFLIGHT = BIT(REQ_F_INFLIGHT_BIT),
753 /* read/write uses file position */
754 REQ_F_CUR_POS = BIT(REQ_F_CUR_POS_BIT),
755 /* must not punt to workers */
756 REQ_F_NOWAIT = BIT(REQ_F_NOWAIT_BIT),
Pavel Begunkov900fad42020-10-19 16:39:16 +0100757 /* has or had linked timeout */
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300758 REQ_F_LINK_TIMEOUT = BIT(REQ_F_LINK_TIMEOUT_BIT),
Pavel Begunkov99bc4c32020-02-07 22:04:45 +0300759 /* needs cleanup */
760 REQ_F_NEED_CLEANUP = BIT(REQ_F_NEED_CLEANUP_BIT),
Jens Axboed7718a92020-02-14 22:23:12 -0700761 /* already went through poll handler */
762 REQ_F_POLLED = BIT(REQ_F_POLLED_BIT),
Jens Axboebcda7ba2020-02-23 16:42:51 -0700763 /* buffer already selected */
764 REQ_F_BUFFER_SELECTED = BIT(REQ_F_BUFFER_SELECTED_BIT),
Pavel Begunkov900fad42020-10-19 16:39:16 +0100765 /* linked timeout is active, i.e. prepared by link's head */
766 REQ_F_LTIMEOUT_ACTIVE = BIT(REQ_F_LTIMEOUT_ACTIVE_BIT),
Pavel Begunkove342c802021-01-19 13:32:47 +0000767 /* completion is deferred through io_comp_state */
768 REQ_F_COMPLETE_INLINE = BIT(REQ_F_COMPLETE_INLINE_BIT),
Jens Axboe230d50d2021-04-01 20:41:15 -0600769 /* caller should reissue async */
770 REQ_F_REISSUE = BIT(REQ_F_REISSUE_BIT),
Pavel Begunkov8c130822021-03-22 01:58:32 +0000771 /* don't attempt request reissue, see io_rw_reissue() */
772 REQ_F_DONT_REISSUE = BIT(REQ_F_DONT_REISSUE_BIT),
Jens Axboe7b29f922021-03-12 08:30:14 -0700773 /* supports async reads */
774 REQ_F_ASYNC_READ = BIT(REQ_F_ASYNC_READ_BIT),
775 /* supports async writes */
776 REQ_F_ASYNC_WRITE = BIT(REQ_F_ASYNC_WRITE_BIT),
777 /* regular file */
778 REQ_F_ISREG = BIT(REQ_F_ISREG_BIT),
Pavel Begunkovb8e64b52021-06-17 18:14:02 +0100779 /* has creds assigned */
780 REQ_F_CREDS = BIT(REQ_F_CREDS_BIT),
Jens Axboed7718a92020-02-14 22:23:12 -0700781};
782
783struct async_poll {
784 struct io_poll_iocb poll;
Jens Axboe807abcb2020-07-17 17:09:27 -0600785 struct io_poll_iocb *double_poll;
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300786};
787
Pavel Begunkov5b0a6ac2021-06-30 21:54:04 +0100788typedef void (*io_req_tw_func_t)(struct io_kiocb *req);
789
Jens Axboe7cbf1722021-02-10 00:03:20 +0000790struct io_task_work {
Pavel Begunkov5b0a6ac2021-06-30 21:54:04 +0100791 union {
792 struct io_wq_work_node node;
793 struct llist_node fallback_node;
794 };
795 io_req_tw_func_t func;
Jens Axboe7cbf1722021-02-10 00:03:20 +0000796};
797
Pavel Begunkov992da012021-06-10 16:37:37 +0100798enum {
799 IORING_RSRC_FILE = 0,
800 IORING_RSRC_BUFFER = 1,
801};
802
Jens Axboe09bb8392019-03-13 12:39:28 -0600803/*
804 * NOTE! Each of the iocb union members has the file pointer
805 * as the first entry in their struct definition. So you can
806 * access the file pointer through any of the sub-structs,
807 * or directly as just 'ki_filp' in this struct.
808 */
Jens Axboe2b188cc2019-01-07 10:46:33 -0700809struct io_kiocb {
Jens Axboe221c5eb2019-01-17 09:41:58 -0700810 union {
Jens Axboe09bb8392019-03-13 12:39:28 -0600811 struct file *file;
Jens Axboe9adbd452019-12-20 08:45:55 -0700812 struct io_rw rw;
Jens Axboe221c5eb2019-01-17 09:41:58 -0700813 struct io_poll_iocb poll;
Pavel Begunkov9d805892021-04-13 02:58:40 +0100814 struct io_poll_update poll_update;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -0700815 struct io_accept accept;
816 struct io_sync sync;
Jens Axboefbf23842019-12-17 18:45:56 -0700817 struct io_cancel cancel;
Jens Axboeb29472e2019-12-17 18:50:29 -0700818 struct io_timeout timeout;
Pavel Begunkov0bdf7a22020-10-10 18:34:10 +0100819 struct io_timeout_rem timeout_rem;
Jens Axboe3fbb51c2019-12-20 08:51:52 -0700820 struct io_connect connect;
Jens Axboee47293f2019-12-20 08:58:21 -0700821 struct io_sr_msg sr_msg;
Jens Axboe15b71ab2019-12-11 11:20:36 -0700822 struct io_open open;
Jens Axboeb5dba592019-12-11 14:02:38 -0700823 struct io_close close;
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +0000824 struct io_rsrc_update rsrc_update;
Jens Axboe4840e412019-12-25 22:03:45 -0700825 struct io_fadvise fadvise;
Jens Axboec1ca7572019-12-25 22:18:28 -0700826 struct io_madvise madvise;
Jens Axboe3e4827b2020-01-08 15:18:09 -0700827 struct io_epoll epoll;
Pavel Begunkov7d67af22020-02-24 11:32:45 +0300828 struct io_splice splice;
Jens Axboeddf0322d2020-02-23 16:41:33 -0700829 struct io_provide_buf pbuf;
Bijan Mottahedeh1d9e1282020-05-22 21:31:16 -0700830 struct io_statx statx;
Jens Axboe36f4fa62020-09-05 11:14:22 -0600831 struct io_shutdown shutdown;
Jens Axboe80a261f2020-09-28 14:23:58 -0600832 struct io_rename rename;
Jens Axboe14a11432020-09-28 14:27:37 -0600833 struct io_unlink unlink;
Pavel Begunkov3ca405e2020-07-13 23:37:08 +0300834 /* use only after cleaning per-op data, see io_clean_op() */
835 struct io_completion compl;
Jens Axboe221c5eb2019-01-17 09:41:58 -0700836 };
Jens Axboe2b188cc2019-01-07 10:46:33 -0700837
Jens Axboee8c2bc12020-08-15 18:44:09 -0700838 /* opcode allocated if it needs to store data for async defer */
839 void *async_data;
Jens Axboed625c6e2019-12-17 19:53:05 -0700840 u8 opcode;
Xiaoguang Wang65a65432020-06-11 23:39:36 +0800841 /* polled IO has completed */
842 u8 iopoll_completed;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700843
Bijan Mottahedeh4f4eeba2020-05-19 14:52:49 -0700844 u16 buf_index;
Pavel Begunkov9cf7c102020-07-13 23:37:15 +0300845 u32 result;
Bijan Mottahedeh4f4eeba2020-05-19 14:52:49 -0700846
Pavel Begunkov010e8e62020-07-30 18:43:45 +0300847 struct io_ring_ctx *ctx;
848 unsigned int flags;
Jens Axboeabc54d62021-02-24 13:32:30 -0700849 atomic_t refs;
Pavel Begunkov010e8e62020-07-30 18:43:45 +0300850 struct task_struct *task;
851 u64 user_data;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700852
Pavel Begunkovf2f87372020-10-27 23:25:37 +0000853 struct io_kiocb *link;
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +0000854 struct percpu_ref *fixed_rsrc_refs;
Jens Axboed7718a92020-02-14 22:23:12 -0700855
Pavel Begunkovb303fe22021-04-11 01:46:26 +0100856 /* used with ctx->iopoll_list with reads/writes */
Pavel Begunkov010e8e62020-07-30 18:43:45 +0300857 struct list_head inflight_entry;
Pavel Begunkov5b0a6ac2021-06-30 21:54:04 +0100858 struct io_task_work io_task_work;
Pavel Begunkov010e8e62020-07-30 18:43:45 +0300859 /* for polled requests, i.e. IORING_OP_POLL_ADD and async armed poll */
860 struct hlist_node hash_node;
861 struct async_poll *apoll;
862 struct io_wq_work work;
Pavel Begunkovfe7e3252021-06-24 15:09:57 +0100863 const struct cred *creds;
Pavel Begunkovc10d1f92021-06-17 18:14:01 +0100864
Pavel Begunkoveae071c2021-04-25 14:32:24 +0100865 /* store used ubuf, so we can prevent reloading */
866 struct io_mapped_ubuf *imu;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700867};
868
Pavel Begunkov13bf43f2021-03-06 11:02:12 +0000869struct io_tctx_node {
870 struct list_head ctx_node;
871 struct task_struct *task;
Pavel Begunkov13bf43f2021-03-06 11:02:12 +0000872 struct io_ring_ctx *ctx;
873};
874
Pavel Begunkov27dc8332020-07-13 23:37:14 +0300875struct io_defer_entry {
876 struct list_head list;
877 struct io_kiocb *req;
Pavel Begunkov9cf7c102020-07-13 23:37:15 +0300878 u32 seq;
Pavel Begunkov27dc8332020-07-13 23:37:14 +0300879};
880
Jens Axboed3656342019-12-18 09:50:26 -0700881struct io_op_def {
Jens Axboed3656342019-12-18 09:50:26 -0700882 /* needs req->file assigned */
883 unsigned needs_file : 1;
Jens Axboed3656342019-12-18 09:50:26 -0700884 /* hash wq insertion if file is a regular file */
885 unsigned hash_reg_file : 1;
886 /* unbound wq insertion if file is a non-regular file */
887 unsigned unbound_nonreg_file : 1;
Jens Axboe66f4af92020-01-16 15:36:52 -0700888 /* opcode is not supported by this kernel */
889 unsigned not_supported : 1;
Jens Axboe8a727582020-02-20 09:59:44 -0700890 /* set if opcode supports polled "wait" */
891 unsigned pollin : 1;
892 unsigned pollout : 1;
Jens Axboebcda7ba2020-02-23 16:42:51 -0700893 /* op supports buffer selection */
894 unsigned buffer_select : 1;
Pavel Begunkov26f05052021-02-28 22:35:18 +0000895 /* do prep async if is going to be punted */
896 unsigned needs_async_setup : 1;
Jens Axboe27926b62020-10-28 09:33:23 -0600897 /* should block plug */
898 unsigned plug : 1;
Jens Axboee8c2bc12020-08-15 18:44:09 -0700899 /* size of async data needed, if any */
900 unsigned short async_size;
Jens Axboed3656342019-12-18 09:50:26 -0700901};
902
Jens Axboe09186822020-10-13 15:01:40 -0600903static const struct io_op_def io_op_defs[] = {
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300904 [IORING_OP_NOP] = {},
905 [IORING_OP_READV] = {
Jens Axboed3656342019-12-18 09:50:26 -0700906 .needs_file = 1,
907 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700908 .pollin = 1,
Jens Axboe4d954c22020-02-27 07:31:19 -0700909 .buffer_select = 1,
Pavel Begunkov26f05052021-02-28 22:35:18 +0000910 .needs_async_setup = 1,
Jens Axboe27926b62020-10-28 09:33:23 -0600911 .plug = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700912 .async_size = sizeof(struct io_async_rw),
Jens Axboed3656342019-12-18 09:50:26 -0700913 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300914 [IORING_OP_WRITEV] = {
Jens Axboed3656342019-12-18 09:50:26 -0700915 .needs_file = 1,
916 .hash_reg_file = 1,
917 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700918 .pollout = 1,
Pavel Begunkov26f05052021-02-28 22:35:18 +0000919 .needs_async_setup = 1,
Jens Axboe27926b62020-10-28 09:33:23 -0600920 .plug = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700921 .async_size = sizeof(struct io_async_rw),
Jens Axboed3656342019-12-18 09:50:26 -0700922 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300923 [IORING_OP_FSYNC] = {
Jens Axboed3656342019-12-18 09:50:26 -0700924 .needs_file = 1,
925 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300926 [IORING_OP_READ_FIXED] = {
Jens Axboed3656342019-12-18 09:50:26 -0700927 .needs_file = 1,
928 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700929 .pollin = 1,
Jens Axboe27926b62020-10-28 09:33:23 -0600930 .plug = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700931 .async_size = sizeof(struct io_async_rw),
Jens Axboed3656342019-12-18 09:50:26 -0700932 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300933 [IORING_OP_WRITE_FIXED] = {
Jens Axboed3656342019-12-18 09:50:26 -0700934 .needs_file = 1,
935 .hash_reg_file = 1,
936 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700937 .pollout = 1,
Jens Axboe27926b62020-10-28 09:33:23 -0600938 .plug = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700939 .async_size = sizeof(struct io_async_rw),
Jens Axboed3656342019-12-18 09:50:26 -0700940 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300941 [IORING_OP_POLL_ADD] = {
Jens Axboed3656342019-12-18 09:50:26 -0700942 .needs_file = 1,
943 .unbound_nonreg_file = 1,
944 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300945 [IORING_OP_POLL_REMOVE] = {},
946 [IORING_OP_SYNC_FILE_RANGE] = {
Jens Axboed3656342019-12-18 09:50:26 -0700947 .needs_file = 1,
948 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300949 [IORING_OP_SENDMSG] = {
Jens Axboed3656342019-12-18 09:50:26 -0700950 .needs_file = 1,
951 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700952 .pollout = 1,
Pavel Begunkov26f05052021-02-28 22:35:18 +0000953 .needs_async_setup = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700954 .async_size = sizeof(struct io_async_msghdr),
Jens Axboed3656342019-12-18 09:50:26 -0700955 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300956 [IORING_OP_RECVMSG] = {
Jens Axboed3656342019-12-18 09:50:26 -0700957 .needs_file = 1,
958 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700959 .pollin = 1,
Jens Axboe52de1fe2020-02-27 10:15:42 -0700960 .buffer_select = 1,
Pavel Begunkov26f05052021-02-28 22:35:18 +0000961 .needs_async_setup = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700962 .async_size = sizeof(struct io_async_msghdr),
Jens Axboed3656342019-12-18 09:50:26 -0700963 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300964 [IORING_OP_TIMEOUT] = {
Jens Axboee8c2bc12020-08-15 18:44:09 -0700965 .async_size = sizeof(struct io_timeout_data),
Jens Axboed3656342019-12-18 09:50:26 -0700966 },
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +0000967 [IORING_OP_TIMEOUT_REMOVE] = {
968 /* used by timeout updates' prep() */
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +0000969 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300970 [IORING_OP_ACCEPT] = {
Jens Axboed3656342019-12-18 09:50:26 -0700971 .needs_file = 1,
972 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700973 .pollin = 1,
Jens Axboed3656342019-12-18 09:50:26 -0700974 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300975 [IORING_OP_ASYNC_CANCEL] = {},
976 [IORING_OP_LINK_TIMEOUT] = {
Jens Axboee8c2bc12020-08-15 18:44:09 -0700977 .async_size = sizeof(struct io_timeout_data),
Jens Axboed3656342019-12-18 09:50:26 -0700978 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300979 [IORING_OP_CONNECT] = {
Jens Axboed3656342019-12-18 09:50:26 -0700980 .needs_file = 1,
981 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700982 .pollout = 1,
Pavel Begunkov26f05052021-02-28 22:35:18 +0000983 .needs_async_setup = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700984 .async_size = sizeof(struct io_async_connect),
Jens Axboed3656342019-12-18 09:50:26 -0700985 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300986 [IORING_OP_FALLOCATE] = {
Jens Axboed3656342019-12-18 09:50:26 -0700987 .needs_file = 1,
988 },
Jens Axboe44526be2021-02-15 13:32:18 -0700989 [IORING_OP_OPENAT] = {},
990 [IORING_OP_CLOSE] = {},
991 [IORING_OP_FILES_UPDATE] = {},
992 [IORING_OP_STATX] = {},
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300993 [IORING_OP_READ] = {
Jens Axboe3a6820f2019-12-22 15:19:35 -0700994 .needs_file = 1,
995 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700996 .pollin = 1,
Jens Axboebcda7ba2020-02-23 16:42:51 -0700997 .buffer_select = 1,
Jens Axboe27926b62020-10-28 09:33:23 -0600998 .plug = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700999 .async_size = sizeof(struct io_async_rw),
Jens Axboe3a6820f2019-12-22 15:19:35 -07001000 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +03001001 [IORING_OP_WRITE] = {
Jens Axboe3a6820f2019-12-22 15:19:35 -07001002 .needs_file = 1,
1003 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -07001004 .pollout = 1,
Jens Axboe27926b62020-10-28 09:33:23 -06001005 .plug = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -07001006 .async_size = sizeof(struct io_async_rw),
Jens Axboe3a6820f2019-12-22 15:19:35 -07001007 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +03001008 [IORING_OP_FADVISE] = {
Jens Axboe4840e412019-12-25 22:03:45 -07001009 .needs_file = 1,
1010 },
Jens Axboe44526be2021-02-15 13:32:18 -07001011 [IORING_OP_MADVISE] = {},
Pavel Begunkov0463b6c2020-01-18 21:35:38 +03001012 [IORING_OP_SEND] = {
Jens Axboefddafac2020-01-04 20:19:44 -07001013 .needs_file = 1,
1014 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -07001015 .pollout = 1,
Jens Axboefddafac2020-01-04 20:19:44 -07001016 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +03001017 [IORING_OP_RECV] = {
Jens Axboefddafac2020-01-04 20:19:44 -07001018 .needs_file = 1,
1019 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -07001020 .pollin = 1,
Jens Axboebcda7ba2020-02-23 16:42:51 -07001021 .buffer_select = 1,
Jens Axboefddafac2020-01-04 20:19:44 -07001022 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +03001023 [IORING_OP_OPENAT2] = {
Jens Axboecebdb982020-01-08 17:59:24 -07001024 },
Jens Axboe3e4827b2020-01-08 15:18:09 -07001025 [IORING_OP_EPOLL_CTL] = {
1026 .unbound_nonreg_file = 1,
Jens Axboe3e4827b2020-01-08 15:18:09 -07001027 },
Pavel Begunkov7d67af22020-02-24 11:32:45 +03001028 [IORING_OP_SPLICE] = {
1029 .needs_file = 1,
1030 .hash_reg_file = 1,
1031 .unbound_nonreg_file = 1,
Jens Axboeddf0322d2020-02-23 16:41:33 -07001032 },
1033 [IORING_OP_PROVIDE_BUFFERS] = {},
Jens Axboe067524e2020-03-02 16:32:28 -07001034 [IORING_OP_REMOVE_BUFFERS] = {},
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03001035 [IORING_OP_TEE] = {
1036 .needs_file = 1,
1037 .hash_reg_file = 1,
1038 .unbound_nonreg_file = 1,
1039 },
Jens Axboe36f4fa62020-09-05 11:14:22 -06001040 [IORING_OP_SHUTDOWN] = {
1041 .needs_file = 1,
1042 },
Jens Axboe44526be2021-02-15 13:32:18 -07001043 [IORING_OP_RENAMEAT] = {},
1044 [IORING_OP_UNLINKAT] = {},
Jens Axboed3656342019-12-18 09:50:26 -07001045};
1046
Pavel Begunkov7a612352021-03-09 00:37:59 +00001047static bool io_disarm_next(struct io_kiocb *req);
Pavel Begunkoveef51da2021-06-14 02:36:15 +01001048static void io_uring_del_tctx_node(unsigned long index);
Pavel Begunkov9936c7c2021-02-04 13:51:56 +00001049static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
1050 struct task_struct *task,
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01001051 bool cancel_all);
Pavel Begunkov78cc6872021-06-14 02:36:23 +01001052static void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd);
Pavel Begunkovb895c9a2021-04-01 15:43:40 +01001053static struct io_rsrc_node *io_rsrc_node_alloc(struct io_ring_ctx *ctx);
Pavel Begunkov1ffc5422020-12-30 21:34:15 +00001054
Pavel Begunkovd4d19c12021-04-25 14:32:17 +01001055static bool io_cqring_fill_event(struct io_ring_ctx *ctx, u64 user_data,
1056 long res, unsigned int cflags);
Jackie Liuec9c02a2019-11-08 23:50:36 +08001057static void io_put_req(struct io_kiocb *req);
Pavel Begunkov216578e2020-10-13 09:44:00 +01001058static void io_put_req_deferred(struct io_kiocb *req, int nr);
Jens Axboec7dae4b2021-02-09 19:53:37 -07001059static void io_dismantle_req(struct io_kiocb *req);
1060static void io_put_task(struct task_struct *task, int nr);
Jens Axboe94ae5e72019-11-14 19:39:52 -07001061static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req);
1062static void io_queue_linked_timeout(struct io_kiocb *req);
Pavel Begunkovfdecb662021-04-25 14:32:20 +01001063static int __io_register_rsrc_update(struct io_ring_ctx *ctx, unsigned type,
Pavel Begunkovc3bdad02021-04-25 14:32:22 +01001064 struct io_uring_rsrc_update2 *up,
Pavel Begunkov98f0b3b2021-04-25 14:32:19 +01001065 unsigned nr_args);
Pavel Begunkov68fb8972021-03-19 17:22:41 +00001066static void io_clean_op(struct io_kiocb *req);
Pavel Begunkov8371adf2020-10-10 18:34:08 +01001067static struct file *io_file_get(struct io_submit_state *state,
1068 struct io_kiocb *req, int fd, bool fixed);
Pavel Begunkovc5eef2b2021-02-10 00:03:22 +00001069static void __io_queue_sqe(struct io_kiocb *req);
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00001070static void io_rsrc_put_work(struct work_struct *work);
Jens Axboede0617e2019-04-06 21:51:27 -06001071
Pavel Begunkov907d1df2021-01-26 23:35:10 +00001072static void io_req_task_queue(struct io_kiocb *req);
Pavel Begunkov2a2758f2021-06-17 18:14:00 +01001073static void io_submit_flush_completions(struct io_ring_ctx *ctx);
Jens Axboe50826202021-02-23 09:02:26 -07001074static bool io_poll_remove_waitqs(struct io_kiocb *req);
Pavel Begunkov179ae0d2021-02-28 22:35:20 +00001075static int io_req_prep_async(struct io_kiocb *req);
Jens Axboe9a56a232019-01-09 09:06:50 -07001076
Pavel Begunkov9011bf92021-06-30 21:54:03 +01001077static void io_fallback_req_func(struct work_struct *unused);
1078
Jens Axboe2b188cc2019-01-07 10:46:33 -07001079static struct kmem_cache *req_cachep;
1080
Jens Axboe09186822020-10-13 15:01:40 -06001081static const struct file_operations io_uring_fops;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001082
1083struct sock *io_uring_get_socket(struct file *file)
1084{
1085#if defined(CONFIG_UNIX)
1086 if (file->f_op == &io_uring_fops) {
1087 struct io_ring_ctx *ctx = file->private_data;
1088
1089 return ctx->ring_sock->sk;
1090 }
1091#endif
1092 return NULL;
1093}
1094EXPORT_SYMBOL(io_uring_get_socket);
1095
Pavel Begunkovf2f87372020-10-27 23:25:37 +00001096#define io_for_each_link(pos, head) \
1097 for (pos = (head); pos; pos = pos->link)
1098
Pavel Begunkovb895c9a2021-04-01 15:43:40 +01001099static inline void io_req_set_rsrc_node(struct io_kiocb *req)
Jens Axboec40f6372020-06-25 15:39:59 -06001100{
Pavel Begunkov36f72fe2020-11-18 19:57:26 +00001101 struct io_ring_ctx *ctx = req->ctx;
1102
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00001103 if (!req->fixed_rsrc_refs) {
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01001104 req->fixed_rsrc_refs = &ctx->rsrc_node->refs;
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00001105 percpu_ref_get(req->fixed_rsrc_refs);
Pavel Begunkov36f72fe2020-11-18 19:57:26 +00001106 }
1107}
1108
Pavel Begunkovf70865d2021-04-11 01:46:40 +01001109static void io_refs_resurrect(struct percpu_ref *ref, struct completion *compl)
1110{
1111 bool got = percpu_ref_tryget(ref);
1112
1113 /* already at zero, wait for ->release() */
1114 if (!got)
1115 wait_for_completion(compl);
1116 percpu_ref_resurrect(ref);
1117 if (got)
1118 percpu_ref_put(ref);
1119}
1120
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01001121static bool io_match_task(struct io_kiocb *head, struct task_struct *task,
1122 bool cancel_all)
Pavel Begunkov08d23632020-11-06 13:00:22 +00001123{
1124 struct io_kiocb *req;
1125
Pavel Begunkov68207682021-03-22 01:58:25 +00001126 if (task && head->task != task)
Pavel Begunkov08d23632020-11-06 13:00:22 +00001127 return false;
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01001128 if (cancel_all)
Pavel Begunkov08d23632020-11-06 13:00:22 +00001129 return true;
1130
1131 io_for_each_link(req, head) {
Pavel Begunkovb05a1bc2021-03-04 13:59:24 +00001132 if (req->flags & REQ_F_INFLIGHT)
Jens Axboe02a13672021-01-23 15:49:31 -07001133 return true;
Pavel Begunkov08d23632020-11-06 13:00:22 +00001134 }
1135 return false;
1136}
1137
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01001138static inline void req_set_fail(struct io_kiocb *req)
Jens Axboec40f6372020-06-25 15:39:59 -06001139{
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01001140 req->flags |= REQ_F_FAIL;
Jens Axboec40f6372020-06-25 15:39:59 -06001141}
Jens Axboe4a38aed22020-05-14 17:21:15 -06001142
Jens Axboe2b188cc2019-01-07 10:46:33 -07001143static void io_ring_ctx_ref_free(struct percpu_ref *ref)
1144{
1145 struct io_ring_ctx *ctx = container_of(ref, struct io_ring_ctx, refs);
1146
Jens Axboe0f158b42020-05-14 17:18:39 -06001147 complete(&ctx->ref_comp);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001148}
1149
Pavel Begunkov8eb7e2d2020-06-29 13:13:02 +03001150static inline bool io_is_timeout_noseq(struct io_kiocb *req)
1151{
1152 return !req->timeout.off;
1153}
1154
Jens Axboe2b188cc2019-01-07 10:46:33 -07001155static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
1156{
1157 struct io_ring_ctx *ctx;
Jens Axboe78076bb2019-12-04 19:56:40 -07001158 int hash_bits;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001159
1160 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
1161 if (!ctx)
1162 return NULL;
1163
Jens Axboe78076bb2019-12-04 19:56:40 -07001164 /*
1165 * Use 5 bits less than the max cq entries, that should give us around
1166 * 32 entries per hash list if totally full and uniformly spread.
1167 */
1168 hash_bits = ilog2(p->cq_entries);
1169 hash_bits -= 5;
1170 if (hash_bits <= 0)
1171 hash_bits = 1;
1172 ctx->cancel_hash_bits = hash_bits;
1173 ctx->cancel_hash = kmalloc((1U << hash_bits) * sizeof(struct hlist_head),
1174 GFP_KERNEL);
1175 if (!ctx->cancel_hash)
1176 goto err;
1177 __hash_init(ctx->cancel_hash, 1U << hash_bits);
1178
Pavel Begunkov62248432021-04-28 13:11:29 +01001179 ctx->dummy_ubuf = kzalloc(sizeof(*ctx->dummy_ubuf), GFP_KERNEL);
1180 if (!ctx->dummy_ubuf)
1181 goto err;
1182 /* set invalid range, so io_import_fixed() fails meeting it */
1183 ctx->dummy_ubuf->ubuf = -1UL;
1184
Roman Gushchin21482892019-05-07 10:01:48 -07001185 if (percpu_ref_init(&ctx->refs, io_ring_ctx_ref_free,
Jens Axboe206aefd2019-11-07 18:27:42 -07001186 PERCPU_REF_ALLOW_REINIT, GFP_KERNEL))
1187 goto err;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001188
1189 ctx->flags = p->flags;
Jens Axboe90554202020-09-03 12:12:41 -06001190 init_waitqueue_head(&ctx->sqo_sq_wait);
Jens Axboe69fb2132020-09-14 11:16:23 -06001191 INIT_LIST_HEAD(&ctx->sqd_list);
Pavel Begunkov311997b2021-06-14 23:37:28 +01001192 init_waitqueue_head(&ctx->poll_wait);
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001193 INIT_LIST_HEAD(&ctx->cq_overflow_list);
Jens Axboe0f158b42020-05-14 17:18:39 -06001194 init_completion(&ctx->ref_comp);
Jens Axboe9e15c3a2021-03-13 12:29:43 -07001195 xa_init_flags(&ctx->io_buffers, XA_FLAGS_ALLOC1);
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +00001196 xa_init_flags(&ctx->personalities, XA_FLAGS_ALLOC1);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001197 mutex_init(&ctx->uring_lock);
Pavel Begunkov311997b2021-06-14 23:37:28 +01001198 init_waitqueue_head(&ctx->cq_wait);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001199 spin_lock_init(&ctx->completion_lock);
Pavel Begunkov540e32a2020-07-13 23:37:09 +03001200 INIT_LIST_HEAD(&ctx->iopoll_list);
Jens Axboede0617e2019-04-06 21:51:27 -06001201 INIT_LIST_HEAD(&ctx->defer_list);
Jens Axboe5262f562019-09-17 12:26:57 -06001202 INIT_LIST_HEAD(&ctx->timeout_list);
Bijan Mottahedehd67d2262021-01-15 17:37:46 +00001203 spin_lock_init(&ctx->rsrc_ref_lock);
1204 INIT_LIST_HEAD(&ctx->rsrc_ref_list);
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00001205 INIT_DELAYED_WORK(&ctx->rsrc_put_work, io_rsrc_put_work);
1206 init_llist_head(&ctx->rsrc_put_llist);
Pavel Begunkov13bf43f2021-03-06 11:02:12 +00001207 INIT_LIST_HEAD(&ctx->tctx_list);
Jens Axboe1b4c3512021-02-10 00:03:19 +00001208 INIT_LIST_HEAD(&ctx->submit_state.comp.free_list);
Pavel Begunkovd0acdee2021-05-16 22:58:12 +01001209 INIT_LIST_HEAD(&ctx->locked_free_list);
Pavel Begunkov9011bf92021-06-30 21:54:03 +01001210 INIT_DELAYED_WORK(&ctx->fallback_work, io_fallback_req_func);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001211 return ctx;
Jens Axboe206aefd2019-11-07 18:27:42 -07001212err:
Pavel Begunkov62248432021-04-28 13:11:29 +01001213 kfree(ctx->dummy_ubuf);
Jens Axboe78076bb2019-12-04 19:56:40 -07001214 kfree(ctx->cancel_hash);
Jens Axboe206aefd2019-11-07 18:27:42 -07001215 kfree(ctx);
1216 return NULL;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001217}
1218
Pavel Begunkov8f6ed492021-05-16 22:58:10 +01001219static void io_account_cq_overflow(struct io_ring_ctx *ctx)
1220{
1221 struct io_rings *r = ctx->rings;
1222
1223 WRITE_ONCE(r->cq_overflow, READ_ONCE(r->cq_overflow) + 1);
1224 ctx->cq_extra--;
1225}
1226
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03001227static bool req_need_defer(struct io_kiocb *req, u32 seq)
Jens Axboede0617e2019-04-06 21:51:27 -06001228{
Jens Axboe2bc99302020-07-09 09:43:27 -06001229 if (unlikely(req->flags & REQ_F_IO_DRAIN)) {
1230 struct io_ring_ctx *ctx = req->ctx;
Jackie Liua197f662019-11-08 08:09:12 -07001231
Pavel Begunkov8f6ed492021-05-16 22:58:10 +01001232 return seq + READ_ONCE(ctx->cq_extra) != ctx->cached_cq_tail;
Jens Axboe2bc99302020-07-09 09:43:27 -06001233 }
Jens Axboe7adf4ea2019-10-10 21:42:58 -06001234
Bob Liu9d858b22019-11-13 18:06:25 +08001235 return false;
Jens Axboe7adf4ea2019-10-10 21:42:58 -06001236}
1237
Pavel Begunkovce3d5aa2021-02-01 18:59:55 +00001238static void io_req_track_inflight(struct io_kiocb *req)
1239{
Pavel Begunkovce3d5aa2021-02-01 18:59:55 +00001240 if (!(req->flags & REQ_F_INFLIGHT)) {
Pavel Begunkovce3d5aa2021-02-01 18:59:55 +00001241 req->flags |= REQ_F_INFLIGHT;
Pavel Begunkovb303fe22021-04-11 01:46:26 +01001242 atomic_inc(&current->io_uring->inflight_tracked);
Pavel Begunkovce3d5aa2021-02-01 18:59:55 +00001243 }
1244}
1245
Pavel Begunkovcbdcb432020-06-29 19:18:43 +03001246static void io_prep_async_work(struct io_kiocb *req)
Jens Axboe561fb042019-10-24 07:25:42 -06001247{
Jens Axboed3656342019-12-18 09:50:26 -07001248 const struct io_op_def *def = &io_op_defs[req->opcode];
Pavel Begunkov23329512020-10-10 18:34:06 +01001249 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe54a91f32019-09-10 09:15:04 -06001250
Pavel Begunkovb8e64b52021-06-17 18:14:02 +01001251 if (!(req->flags & REQ_F_CREDS)) {
1252 req->flags |= REQ_F_CREDS;
Pavel Begunkovc10d1f92021-06-17 18:14:01 +01001253 req->creds = get_current_cred();
Pavel Begunkovb8e64b52021-06-17 18:14:02 +01001254 }
Jens Axboe003e8dc2021-03-06 09:22:27 -07001255
Pavel Begunkove1d675d2021-03-22 01:58:29 +00001256 req->work.list.next = NULL;
1257 req->work.flags = 0;
Pavel Begunkovfeaadc42020-10-22 16:47:16 +01001258 if (req->flags & REQ_F_FORCE_ASYNC)
1259 req->work.flags |= IO_WQ_WORK_CONCURRENT;
1260
Jens Axboed3656342019-12-18 09:50:26 -07001261 if (req->flags & REQ_F_ISREG) {
Pavel Begunkov23329512020-10-10 18:34:06 +01001262 if (def->hash_reg_file || (ctx->flags & IORING_SETUP_IOPOLL))
Pavel Begunkov8766dd52020-03-14 00:31:04 +03001263 io_wq_hash_work(&req->work, file_inode(req->file));
Jens Axboe4b982bd2021-04-01 08:38:34 -06001264 } else if (!req->file || !S_ISBLK(file_inode(req->file)->i_mode)) {
Jens Axboed3656342019-12-18 09:50:26 -07001265 if (def->unbound_nonreg_file)
Jens Axboe3529d8c2019-12-19 18:24:38 -07001266 req->work.flags |= IO_WQ_WORK_UNBOUND;
Jens Axboe54a91f32019-09-10 09:15:04 -06001267 }
Pavel Begunkove1d675d2021-03-22 01:58:29 +00001268
1269 switch (req->opcode) {
1270 case IORING_OP_SPLICE:
1271 case IORING_OP_TEE:
Pavel Begunkove1d675d2021-03-22 01:58:29 +00001272 if (!S_ISREG(file_inode(req->splice.file_in)->i_mode))
1273 req->work.flags |= IO_WQ_WORK_UNBOUND;
1274 break;
1275 }
Jens Axboe561fb042019-10-24 07:25:42 -06001276}
1277
Pavel Begunkovcbdcb432020-06-29 19:18:43 +03001278static void io_prep_async_link(struct io_kiocb *req)
1279{
1280 struct io_kiocb *cur;
1281
Pavel Begunkovf2f87372020-10-27 23:25:37 +00001282 io_for_each_link(cur, req)
1283 io_prep_async_work(cur);
Pavel Begunkovcbdcb432020-06-29 19:18:43 +03001284}
1285
Pavel Begunkovebf93662021-03-01 18:20:47 +00001286static void io_queue_async_work(struct io_kiocb *req)
Jens Axboe561fb042019-10-24 07:25:42 -06001287{
Jackie Liua197f662019-11-08 08:09:12 -07001288 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkovcbdcb432020-06-29 19:18:43 +03001289 struct io_kiocb *link = io_prep_linked_timeout(req);
Jens Axboe5aa75ed2021-02-16 12:56:50 -07001290 struct io_uring_task *tctx = req->task->io_uring;
Jens Axboe561fb042019-10-24 07:25:42 -06001291
Jens Axboe3bfe6102021-02-16 14:15:30 -07001292 BUG_ON(!tctx);
1293 BUG_ON(!tctx->io_wq);
Jens Axboe561fb042019-10-24 07:25:42 -06001294
Pavel Begunkovcbdcb432020-06-29 19:18:43 +03001295 /* init ->work of the whole link before punting */
1296 io_prep_async_link(req);
Pavel Begunkovd07f1e8a2021-03-22 01:45:58 +00001297 trace_io_uring_queue_async_work(ctx, io_wq_is_hashed(&req->work), req,
1298 &req->work, req->flags);
Pavel Begunkovebf93662021-03-01 18:20:47 +00001299 io_wq_enqueue(tctx->io_wq, &req->work);
Jens Axboe7271ef32020-08-10 09:55:22 -06001300 if (link)
1301 io_queue_linked_timeout(link);
Pavel Begunkovcbdcb432020-06-29 19:18:43 +03001302}
1303
Pavel Begunkov1ee41602021-03-25 18:32:42 +00001304static void io_kill_timeout(struct io_kiocb *req, int status)
Pavel Begunkov8c855882021-04-13 02:58:41 +01001305 __must_hold(&req->ctx->completion_lock)
Jens Axboe5262f562019-09-17 12:26:57 -06001306{
Jens Axboee8c2bc12020-08-15 18:44:09 -07001307 struct io_timeout_data *io = req->async_data;
Jens Axboe5262f562019-09-17 12:26:57 -06001308
Pavel Begunkovfd9c7bc2021-04-13 02:58:42 +01001309 if (hrtimer_try_to_cancel(&io->timer) != -1) {
Pavel Begunkov01cec8c2020-07-30 18:43:50 +03001310 atomic_set(&req->ctx->cq_timeouts,
1311 atomic_read(&req->ctx->cq_timeouts) + 1);
Pavel Begunkov135fcde2020-07-13 23:37:12 +03001312 list_del_init(&req->timeout.list);
Pavel Begunkovd4d19c12021-04-25 14:32:17 +01001313 io_cqring_fill_event(req->ctx, req->user_data, status, 0);
Pavel Begunkov216578e2020-10-13 09:44:00 +01001314 io_put_req_deferred(req, 1);
Jens Axboe5262f562019-09-17 12:26:57 -06001315 }
1316}
1317
Pavel Begunkov441b8a72021-06-14 23:37:31 +01001318static void io_queue_deferred(struct io_ring_ctx *ctx)
Pavel Begunkov04518942020-05-26 20:34:05 +03001319{
Pavel Begunkov441b8a72021-06-14 23:37:31 +01001320 while (!list_empty(&ctx->defer_list)) {
Pavel Begunkov27dc8332020-07-13 23:37:14 +03001321 struct io_defer_entry *de = list_first_entry(&ctx->defer_list,
1322 struct io_defer_entry, list);
Pavel Begunkov04518942020-05-26 20:34:05 +03001323
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03001324 if (req_need_defer(de->req, de->seq))
Pavel Begunkov04518942020-05-26 20:34:05 +03001325 break;
Pavel Begunkov27dc8332020-07-13 23:37:14 +03001326 list_del_init(&de->list);
Pavel Begunkov907d1df2021-01-26 23:35:10 +00001327 io_req_task_queue(de->req);
Pavel Begunkov27dc8332020-07-13 23:37:14 +03001328 kfree(de);
Pavel Begunkov441b8a72021-06-14 23:37:31 +01001329 }
Pavel Begunkov04518942020-05-26 20:34:05 +03001330}
1331
Pavel Begunkov360428f2020-05-30 14:54:17 +03001332static void io_flush_timeouts(struct io_ring_ctx *ctx)
1333{
Pavel Begunkov441b8a72021-06-14 23:37:31 +01001334 u32 seq = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
Marcelo Diop-Gonzalezf0105052021-01-15 11:54:40 -05001335
Pavel Begunkovf18ee4c2021-06-14 23:37:25 +01001336 while (!list_empty(&ctx->timeout_list)) {
Marcelo Diop-Gonzalezf0105052021-01-15 11:54:40 -05001337 u32 events_needed, events_got;
Pavel Begunkov360428f2020-05-30 14:54:17 +03001338 struct io_kiocb *req = list_first_entry(&ctx->timeout_list,
Pavel Begunkov135fcde2020-07-13 23:37:12 +03001339 struct io_kiocb, timeout.list);
Pavel Begunkov360428f2020-05-30 14:54:17 +03001340
Pavel Begunkov8eb7e2d2020-06-29 13:13:02 +03001341 if (io_is_timeout_noseq(req))
Pavel Begunkov360428f2020-05-30 14:54:17 +03001342 break;
Marcelo Diop-Gonzalezf0105052021-01-15 11:54:40 -05001343
1344 /*
1345 * Since seq can easily wrap around over time, subtract
1346 * the last seq at which timeouts were flushed before comparing.
1347 * Assuming not more than 2^31-1 events have happened since,
1348 * these subtractions won't have wrapped, so we can check if
1349 * target is in [last_seq, current_seq] by comparing the two.
1350 */
1351 events_needed = req->timeout.target_seq - ctx->cq_last_tm_flush;
1352 events_got = seq - ctx->cq_last_tm_flush;
1353 if (events_got < events_needed)
Pavel Begunkov360428f2020-05-30 14:54:17 +03001354 break;
Pavel Begunkovbfe68a22020-05-30 14:54:18 +03001355
Pavel Begunkov135fcde2020-07-13 23:37:12 +03001356 list_del_init(&req->timeout.list);
Pavel Begunkov1ee41602021-03-25 18:32:42 +00001357 io_kill_timeout(req, 0);
Pavel Begunkovf18ee4c2021-06-14 23:37:25 +01001358 }
Marcelo Diop-Gonzalezf0105052021-01-15 11:54:40 -05001359 ctx->cq_last_tm_flush = seq;
Pavel Begunkov360428f2020-05-30 14:54:17 +03001360}
1361
Pavel Begunkov2335f6f2021-06-15 16:47:58 +01001362static void __io_commit_cqring_flush(struct io_ring_ctx *ctx)
Jens Axboede0617e2019-04-06 21:51:27 -06001363{
Pavel Begunkov2335f6f2021-06-15 16:47:58 +01001364 if (ctx->off_timeout_used)
1365 io_flush_timeouts(ctx);
1366 if (ctx->drain_active)
1367 io_queue_deferred(ctx);
1368}
1369
1370static inline void io_commit_cqring(struct io_ring_ctx *ctx)
1371{
1372 if (unlikely(ctx->off_timeout_used || ctx->drain_active))
1373 __io_commit_cqring_flush(ctx);
Pavel Begunkovec30e042021-01-19 13:32:38 +00001374 /* order cqe stores with ring update */
1375 smp_store_release(&ctx->rings->cq.tail, ctx->cached_cq_tail);
Jens Axboede0617e2019-04-06 21:51:27 -06001376}
1377
Jens Axboe90554202020-09-03 12:12:41 -06001378static inline bool io_sqring_full(struct io_ring_ctx *ctx)
1379{
1380 struct io_rings *r = ctx->rings;
1381
Pavel Begunkova566c552021-05-16 22:58:08 +01001382 return READ_ONCE(r->sq.tail) - ctx->cached_sq_head == ctx->sq_entries;
Jens Axboe90554202020-09-03 12:12:41 -06001383}
1384
Pavel Begunkov888aae22021-01-19 13:32:39 +00001385static inline unsigned int __io_cqring_events(struct io_ring_ctx *ctx)
1386{
1387 return ctx->cached_cq_tail - READ_ONCE(ctx->rings->cq.head);
1388}
1389
Pavel Begunkovd068b502021-05-16 22:58:11 +01001390static inline struct io_uring_cqe *io_get_cqe(struct io_ring_ctx *ctx)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001391{
Hristo Venev75b28af2019-08-26 17:23:46 +00001392 struct io_rings *rings = ctx->rings;
Pavel Begunkovea5ab3b2021-05-16 22:58:09 +01001393 unsigned tail, mask = ctx->cq_entries - 1;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001394
Stefan Bühler115e12e2019-04-24 23:54:18 +02001395 /*
1396 * writes to the cq entry need to come after reading head; the
1397 * control dependency is enough as we're using WRITE_ONCE to
1398 * fill the cq entry
1399 */
Pavel Begunkova566c552021-05-16 22:58:08 +01001400 if (__io_cqring_events(ctx) == ctx->cq_entries)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001401 return NULL;
1402
Pavel Begunkov888aae22021-01-19 13:32:39 +00001403 tail = ctx->cached_cq_tail++;
Pavel Begunkovea5ab3b2021-05-16 22:58:09 +01001404 return &rings->cqes[tail & mask];
Jens Axboe2b188cc2019-01-07 10:46:33 -07001405}
1406
Jens Axboef2842ab2020-01-08 11:04:00 -07001407static inline bool io_should_trigger_evfd(struct io_ring_ctx *ctx)
1408{
Pavel Begunkov44c769d2021-04-11 01:46:31 +01001409 if (likely(!ctx->cq_ev_fd))
Jens Axboef0b493e2020-02-01 21:30:11 -07001410 return false;
Stefano Garzarella7e55a192020-05-15 18:38:05 +02001411 if (READ_ONCE(ctx->rings->cq_flags) & IORING_CQ_EVENTFD_DISABLED)
1412 return false;
Pavel Begunkov44c769d2021-04-11 01:46:31 +01001413 return !ctx->eventfd_async || io_wq_current_is_worker();
Jens Axboef2842ab2020-01-08 11:04:00 -07001414}
1415
Jens Axboeb41e9852020-02-17 09:52:41 -07001416static void io_cqring_ev_posted(struct io_ring_ctx *ctx)
Jens Axboe8c838782019-03-12 15:48:16 -06001417{
Pavel Begunkovb1445e52021-01-07 03:15:43 +00001418 /* see waitqueue_active() comment */
1419 smp_mb();
1420
Pavel Begunkov311997b2021-06-14 23:37:28 +01001421 if (waitqueue_active(&ctx->cq_wait))
1422 wake_up(&ctx->cq_wait);
Jens Axboe534ca6d2020-09-02 13:52:19 -06001423 if (ctx->sq_data && waitqueue_active(&ctx->sq_data->wait))
1424 wake_up(&ctx->sq_data->wait);
Jens Axboeb41e9852020-02-17 09:52:41 -07001425 if (io_should_trigger_evfd(ctx))
Jens Axboe9b402842019-04-11 11:45:41 -06001426 eventfd_signal(ctx->cq_ev_fd, 1);
Pavel Begunkov311997b2021-06-14 23:37:28 +01001427 if (waitqueue_active(&ctx->poll_wait)) {
1428 wake_up_interruptible(&ctx->poll_wait);
Pavel Begunkov4aa84f22021-01-07 03:15:42 +00001429 kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN);
1430 }
Jens Axboe8c838782019-03-12 15:48:16 -06001431}
1432
Pavel Begunkov80c18e42021-01-07 03:15:41 +00001433static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx)
1434{
Pavel Begunkovb1445e52021-01-07 03:15:43 +00001435 /* see waitqueue_active() comment */
1436 smp_mb();
1437
Pavel Begunkov80c18e42021-01-07 03:15:41 +00001438 if (ctx->flags & IORING_SETUP_SQPOLL) {
Pavel Begunkov311997b2021-06-14 23:37:28 +01001439 if (waitqueue_active(&ctx->cq_wait))
1440 wake_up(&ctx->cq_wait);
Pavel Begunkov80c18e42021-01-07 03:15:41 +00001441 }
1442 if (io_should_trigger_evfd(ctx))
1443 eventfd_signal(ctx->cq_ev_fd, 1);
Pavel Begunkov311997b2021-06-14 23:37:28 +01001444 if (waitqueue_active(&ctx->poll_wait)) {
1445 wake_up_interruptible(&ctx->poll_wait);
Pavel Begunkov4aa84f22021-01-07 03:15:42 +00001446 kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN);
1447 }
Pavel Begunkov80c18e42021-01-07 03:15:41 +00001448}
1449
Jens Axboec4a2ed72019-11-21 21:01:26 -07001450/* Returns true if there are no backlogged entries after the flush */
Pavel Begunkov6c2450a2021-02-23 12:40:22 +00001451static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001452{
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001453 unsigned long flags;
Jens Axboeb18032b2021-01-24 16:58:56 -07001454 bool all_flushed, posted;
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001455
Pavel Begunkova566c552021-05-16 22:58:08 +01001456 if (!force && __io_cqring_events(ctx) == ctx->cq_entries)
Pavel Begunkove23de152020-12-17 00:24:37 +00001457 return false;
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001458
Jens Axboeb18032b2021-01-24 16:58:56 -07001459 posted = false;
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001460 spin_lock_irqsave(&ctx->completion_lock, flags);
Pavel Begunkov6c2450a2021-02-23 12:40:22 +00001461 while (!list_empty(&ctx->cq_overflow_list)) {
Pavel Begunkovd068b502021-05-16 22:58:11 +01001462 struct io_uring_cqe *cqe = io_get_cqe(ctx);
Pavel Begunkov6c2450a2021-02-23 12:40:22 +00001463 struct io_overflow_cqe *ocqe;
Jens Axboee6c8aa92020-09-28 13:10:13 -06001464
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001465 if (!cqe && !force)
1466 break;
Pavel Begunkov6c2450a2021-02-23 12:40:22 +00001467 ocqe = list_first_entry(&ctx->cq_overflow_list,
1468 struct io_overflow_cqe, list);
1469 if (cqe)
1470 memcpy(cqe, &ocqe->cqe, sizeof(*cqe));
1471 else
Pavel Begunkov8f6ed492021-05-16 22:58:10 +01001472 io_account_cq_overflow(ctx);
1473
Jens Axboeb18032b2021-01-24 16:58:56 -07001474 posted = true;
Pavel Begunkov6c2450a2021-02-23 12:40:22 +00001475 list_del(&ocqe->list);
1476 kfree(ocqe);
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001477 }
1478
Pavel Begunkov09e88402020-12-17 00:24:38 +00001479 all_flushed = list_empty(&ctx->cq_overflow_list);
1480 if (all_flushed) {
Pavel Begunkov5ed7a372021-06-14 23:37:27 +01001481 clear_bit(0, &ctx->check_cq_overflow);
Pavel Begunkov09e88402020-12-17 00:24:38 +00001482 ctx->rings->sq_flags &= ~IORING_SQ_CQ_OVERFLOW;
1483 }
Pavel Begunkov46930142020-07-30 18:43:49 +03001484
Jens Axboeb18032b2021-01-24 16:58:56 -07001485 if (posted)
1486 io_commit_cqring(ctx);
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001487 spin_unlock_irqrestore(&ctx->completion_lock, flags);
Jens Axboeb18032b2021-01-24 16:58:56 -07001488 if (posted)
1489 io_cqring_ev_posted(ctx);
Pavel Begunkov09e88402020-12-17 00:24:38 +00001490 return all_flushed;
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001491}
1492
Pavel Begunkov6c2450a2021-02-23 12:40:22 +00001493static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)
Pavel Begunkov6c503152021-01-04 20:36:36 +00001494{
Jens Axboeca0a2652021-03-04 17:15:48 -07001495 bool ret = true;
1496
Pavel Begunkov5ed7a372021-06-14 23:37:27 +01001497 if (test_bit(0, &ctx->check_cq_overflow)) {
Pavel Begunkov6c503152021-01-04 20:36:36 +00001498 /* iopoll syncs against uring_lock, not completion_lock */
1499 if (ctx->flags & IORING_SETUP_IOPOLL)
1500 mutex_lock(&ctx->uring_lock);
Pavel Begunkov6c2450a2021-02-23 12:40:22 +00001501 ret = __io_cqring_overflow_flush(ctx, force);
Pavel Begunkov6c503152021-01-04 20:36:36 +00001502 if (ctx->flags & IORING_SETUP_IOPOLL)
1503 mutex_unlock(&ctx->uring_lock);
1504 }
Jens Axboeca0a2652021-03-04 17:15:48 -07001505
1506 return ret;
Pavel Begunkov6c503152021-01-04 20:36:36 +00001507}
1508
Jens Axboeabc54d62021-02-24 13:32:30 -07001509/*
1510 * Shamelessly stolen from the mm implementation of page reference checking,
1511 * see commit f958d7b528b1 for details.
1512 */
1513#define req_ref_zero_or_close_to_overflow(req) \
1514 ((unsigned int) atomic_read(&(req->refs)) + 127u <= 127u)
1515
Jens Axboede9b4cc2021-02-24 13:28:27 -07001516static inline bool req_ref_inc_not_zero(struct io_kiocb *req)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001517{
Jens Axboeabc54d62021-02-24 13:32:30 -07001518 return atomic_inc_not_zero(&req->refs);
Jens Axboede9b4cc2021-02-24 13:28:27 -07001519}
1520
1521static inline bool req_ref_sub_and_test(struct io_kiocb *req, int refs)
1522{
Jens Axboeabc54d62021-02-24 13:32:30 -07001523 WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req));
1524 return atomic_sub_and_test(refs, &req->refs);
Jens Axboede9b4cc2021-02-24 13:28:27 -07001525}
1526
1527static inline bool req_ref_put_and_test(struct io_kiocb *req)
1528{
Jens Axboeabc54d62021-02-24 13:32:30 -07001529 WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req));
1530 return atomic_dec_and_test(&req->refs);
Jens Axboede9b4cc2021-02-24 13:28:27 -07001531}
1532
1533static inline void req_ref_put(struct io_kiocb *req)
1534{
Jens Axboeabc54d62021-02-24 13:32:30 -07001535 WARN_ON_ONCE(req_ref_put_and_test(req));
Jens Axboede9b4cc2021-02-24 13:28:27 -07001536}
1537
1538static inline void req_ref_get(struct io_kiocb *req)
1539{
Jens Axboeabc54d62021-02-24 13:32:30 -07001540 WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req));
1541 atomic_inc(&req->refs);
Jens Axboede9b4cc2021-02-24 13:28:27 -07001542}
1543
Pavel Begunkovd4d19c12021-04-25 14:32:17 +01001544static bool io_cqring_event_overflow(struct io_ring_ctx *ctx, u64 user_data,
1545 long res, unsigned int cflags)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001546{
Pavel Begunkovcce4b8b2021-04-13 02:58:44 +01001547 struct io_overflow_cqe *ocqe;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001548
Pavel Begunkovcce4b8b2021-04-13 02:58:44 +01001549 ocqe = kmalloc(sizeof(*ocqe), GFP_ATOMIC | __GFP_ACCOUNT);
1550 if (!ocqe) {
1551 /*
1552 * If we're in ring overflow flush mode, or in task cancel mode,
1553 * or cannot allocate an overflow entry, then we need to drop it
1554 * on the floor.
1555 */
Pavel Begunkov8f6ed492021-05-16 22:58:10 +01001556 io_account_cq_overflow(ctx);
Pavel Begunkovcce4b8b2021-04-13 02:58:44 +01001557 return false;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001558 }
Pavel Begunkovcce4b8b2021-04-13 02:58:44 +01001559 if (list_empty(&ctx->cq_overflow_list)) {
Pavel Begunkov5ed7a372021-06-14 23:37:27 +01001560 set_bit(0, &ctx->check_cq_overflow);
Pavel Begunkovcce4b8b2021-04-13 02:58:44 +01001561 ctx->rings->sq_flags |= IORING_SQ_CQ_OVERFLOW;
1562 }
Pavel Begunkovd4d19c12021-04-25 14:32:17 +01001563 ocqe->cqe.user_data = user_data;
Pavel Begunkovcce4b8b2021-04-13 02:58:44 +01001564 ocqe->cqe.res = res;
1565 ocqe->cqe.flags = cflags;
1566 list_add_tail(&ocqe->list, &ctx->cq_overflow_list);
1567 return true;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001568}
1569
Pavel Begunkovd4d19c12021-04-25 14:32:17 +01001570static inline bool __io_cqring_fill_event(struct io_ring_ctx *ctx, u64 user_data,
1571 long res, unsigned int cflags)
Pavel Begunkov8d133262021-04-11 01:46:33 +01001572{
Jens Axboe2b188cc2019-01-07 10:46:33 -07001573 struct io_uring_cqe *cqe;
1574
Pavel Begunkovd4d19c12021-04-25 14:32:17 +01001575 trace_io_uring_complete(ctx, user_data, res, cflags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001576
1577 /*
1578 * If we can't get a cq entry, userspace overflowed the
1579 * submission (by quite a lot). Increment the overflow count in
1580 * the ring.
1581 */
Pavel Begunkovd068b502021-05-16 22:58:11 +01001582 cqe = io_get_cqe(ctx);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001583 if (likely(cqe)) {
Pavel Begunkovd4d19c12021-04-25 14:32:17 +01001584 WRITE_ONCE(cqe->user_data, user_data);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001585 WRITE_ONCE(cqe->res, res);
1586 WRITE_ONCE(cqe->flags, cflags);
Pavel Begunkov8d133262021-04-11 01:46:33 +01001587 return true;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001588 }
Pavel Begunkovd4d19c12021-04-25 14:32:17 +01001589 return io_cqring_event_overflow(ctx, user_data, res, cflags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001590}
1591
Pavel Begunkov8d133262021-04-11 01:46:33 +01001592/* not as hot to bloat with inlining */
Pavel Begunkovd4d19c12021-04-25 14:32:17 +01001593static noinline bool io_cqring_fill_event(struct io_ring_ctx *ctx, u64 user_data,
1594 long res, unsigned int cflags)
Jens Axboebcda7ba2020-02-23 16:42:51 -07001595{
Pavel Begunkovd4d19c12021-04-25 14:32:17 +01001596 return __io_cqring_fill_event(ctx, user_data, res, cflags);
Jens Axboebcda7ba2020-02-23 16:42:51 -07001597}
1598
Pavel Begunkov7a612352021-03-09 00:37:59 +00001599static void io_req_complete_post(struct io_kiocb *req, long res,
1600 unsigned int cflags)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001601{
Jens Axboe78e19bb2019-11-06 15:21:34 -07001602 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001603 unsigned long flags;
1604
1605 spin_lock_irqsave(&ctx->completion_lock, flags);
Pavel Begunkovd4d19c12021-04-25 14:32:17 +01001606 __io_cqring_fill_event(ctx, req->user_data, res, cflags);
Jens Axboec7dae4b2021-02-09 19:53:37 -07001607 /*
1608 * If we're the last reference to this request, add to our locked
1609 * free_list cache.
1610 */
Jens Axboede9b4cc2021-02-24 13:28:27 -07001611 if (req_ref_put_and_test(req)) {
Pavel Begunkov7a612352021-03-09 00:37:59 +00001612 if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) {
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01001613 if (req->flags & (REQ_F_LINK_TIMEOUT | REQ_F_FAIL))
Pavel Begunkov7a612352021-03-09 00:37:59 +00001614 io_disarm_next(req);
1615 if (req->link) {
1616 io_req_task_queue(req->link);
1617 req->link = NULL;
1618 }
1619 }
Jens Axboec7dae4b2021-02-09 19:53:37 -07001620 io_dismantle_req(req);
1621 io_put_task(req->task, 1);
Pavel Begunkovd0acdee2021-05-16 22:58:12 +01001622 list_add(&req->compl.list, &ctx->locked_free_list);
1623 ctx->locked_free_nr++;
Pavel Begunkov180f8292021-03-14 20:57:09 +00001624 } else {
1625 if (!percpu_ref_tryget(&ctx->refs))
1626 req = NULL;
1627 }
Pavel Begunkov7a612352021-03-09 00:37:59 +00001628 io_commit_cqring(ctx);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001629 spin_unlock_irqrestore(&ctx->completion_lock, flags);
Pavel Begunkov7a612352021-03-09 00:37:59 +00001630
Pavel Begunkov180f8292021-03-14 20:57:09 +00001631 if (req) {
1632 io_cqring_ev_posted(ctx);
Jens Axboec7dae4b2021-02-09 19:53:37 -07001633 percpu_ref_put(&ctx->refs);
Pavel Begunkov180f8292021-03-14 20:57:09 +00001634 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07001635}
1636
Jens Axboe4e3d9ff2021-04-15 17:44:34 -06001637static inline bool io_req_needs_clean(struct io_kiocb *req)
1638{
Pavel Begunkovc8543572021-06-17 18:14:04 +01001639 return req->flags & IO_REQ_CLEAN_FLAGS;
Jens Axboe4e3d9ff2021-04-15 17:44:34 -06001640}
1641
Pavel Begunkova38d68d2021-01-19 13:32:45 +00001642static void io_req_complete_state(struct io_kiocb *req, long res,
Pavel Begunkov889fca72021-02-10 00:03:09 +00001643 unsigned int cflags)
Jens Axboebcda7ba2020-02-23 16:42:51 -07001644{
Jens Axboe4e3d9ff2021-04-15 17:44:34 -06001645 if (io_req_needs_clean(req))
Pavel Begunkov68fb8972021-03-19 17:22:41 +00001646 io_clean_op(req);
Pavel Begunkova38d68d2021-01-19 13:32:45 +00001647 req->result = res;
1648 req->compl.cflags = cflags;
Pavel Begunkove342c802021-01-19 13:32:47 +00001649 req->flags |= REQ_F_COMPLETE_INLINE;
Jens Axboee1e16092020-06-22 09:17:17 -06001650}
Jens Axboe2b188cc2019-01-07 10:46:33 -07001651
Pavel Begunkov889fca72021-02-10 00:03:09 +00001652static inline void __io_req_complete(struct io_kiocb *req, unsigned issue_flags,
1653 long res, unsigned cflags)
Pavel Begunkova38d68d2021-01-19 13:32:45 +00001654{
Pavel Begunkov889fca72021-02-10 00:03:09 +00001655 if (issue_flags & IO_URING_F_COMPLETE_DEFER)
1656 io_req_complete_state(req, res, cflags);
Pavel Begunkova38d68d2021-01-19 13:32:45 +00001657 else
Jens Axboec7dae4b2021-02-09 19:53:37 -07001658 io_req_complete_post(req, res, cflags);
Pavel Begunkova38d68d2021-01-19 13:32:45 +00001659}
Jens Axboebcda7ba2020-02-23 16:42:51 -07001660
Pavel Begunkova38d68d2021-01-19 13:32:45 +00001661static inline void io_req_complete(struct io_kiocb *req, long res)
Jens Axboee1e16092020-06-22 09:17:17 -06001662{
Pavel Begunkov889fca72021-02-10 00:03:09 +00001663 __io_req_complete(req, 0, res, 0);
Jens Axboebcda7ba2020-02-23 16:42:51 -07001664}
1665
Pavel Begunkovf41db2732021-02-28 22:35:12 +00001666static void io_req_complete_failed(struct io_kiocb *req, long res)
1667{
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01001668 req_set_fail(req);
Pavel Begunkovf41db2732021-02-28 22:35:12 +00001669 io_put_req(req);
1670 io_req_complete_post(req, res, 0);
1671}
1672
Pavel Begunkovdac7a092021-03-19 17:22:39 +00001673static void io_flush_cached_locked_reqs(struct io_ring_ctx *ctx,
1674 struct io_comp_state *cs)
1675{
1676 spin_lock_irq(&ctx->completion_lock);
Pavel Begunkovd0acdee2021-05-16 22:58:12 +01001677 list_splice_init(&ctx->locked_free_list, &cs->free_list);
1678 ctx->locked_free_nr = 0;
Pavel Begunkovdac7a092021-03-19 17:22:39 +00001679 spin_unlock_irq(&ctx->completion_lock);
1680}
1681
Pavel Begunkovdd78f492021-03-19 17:22:35 +00001682/* Returns true IFF there are requests in the cache */
Jens Axboec7dae4b2021-02-09 19:53:37 -07001683static bool io_flush_cached_reqs(struct io_ring_ctx *ctx)
Pavel Begunkove5d1bc02021-02-10 00:03:23 +00001684{
Jens Axboec7dae4b2021-02-09 19:53:37 -07001685 struct io_submit_state *state = &ctx->submit_state;
1686 struct io_comp_state *cs = &state->comp;
Pavel Begunkovdd78f492021-03-19 17:22:35 +00001687 int nr;
Pavel Begunkove5d1bc02021-02-10 00:03:23 +00001688
Jens Axboec7dae4b2021-02-09 19:53:37 -07001689 /*
1690 * If we have more than a batch's worth of requests in our IRQ side
1691 * locked cache, grab the lock and move them over to our submission
1692 * side cache.
1693 */
Pavel Begunkovd0acdee2021-05-16 22:58:12 +01001694 if (READ_ONCE(ctx->locked_free_nr) > IO_COMPL_BATCH)
Pavel Begunkovdac7a092021-03-19 17:22:39 +00001695 io_flush_cached_locked_reqs(ctx, cs);
Jens Axboec7dae4b2021-02-09 19:53:37 -07001696
Pavel Begunkovdd78f492021-03-19 17:22:35 +00001697 nr = state->free_reqs;
Jens Axboec7dae4b2021-02-09 19:53:37 -07001698 while (!list_empty(&cs->free_list)) {
Pavel Begunkovdd78f492021-03-19 17:22:35 +00001699 struct io_kiocb *req = list_first_entry(&cs->free_list,
1700 struct io_kiocb, compl.list);
1701
Jens Axboe2b188cc2019-01-07 10:46:33 -07001702 list_del(&req->compl.list);
Pavel Begunkovdd78f492021-03-19 17:22:35 +00001703 state->reqs[nr++] = req;
1704 if (nr == ARRAY_SIZE(state->reqs))
Pavel Begunkove5d1bc02021-02-10 00:03:23 +00001705 break;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001706 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07001707
Pavel Begunkovdd78f492021-03-19 17:22:35 +00001708 state->free_reqs = nr;
1709 return nr != 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001710}
1711
Pavel Begunkov258b29a2021-02-10 00:03:10 +00001712static struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001713{
Pavel Begunkov258b29a2021-02-10 00:03:10 +00001714 struct io_submit_state *state = &ctx->submit_state;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001715
Pavel Begunkovfe7e3252021-06-24 15:09:57 +01001716 BUILD_BUG_ON(ARRAY_SIZE(state->reqs) < IO_REQ_ALLOC_BATCH);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001717
Pavel Begunkovf6b6c7d2020-06-21 13:09:53 +03001718 if (!state->free_reqs) {
Pavel Begunkov291b2822020-09-30 22:57:01 +03001719 gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
Pavel Begunkov99ebe4e2021-06-26 21:40:49 +01001720 int ret, i;
Jens Axboe2579f912019-01-09 09:10:43 -07001721
Jens Axboec7dae4b2021-02-09 19:53:37 -07001722 if (io_flush_cached_reqs(ctx))
Pavel Begunkove5d1bc02021-02-10 00:03:23 +00001723 goto got_req;
1724
Pavel Begunkovbf019da2021-02-10 00:03:17 +00001725 ret = kmem_cache_alloc_bulk(req_cachep, gfp, IO_REQ_ALLOC_BATCH,
1726 state->reqs);
Jens Axboefd6fab22019-03-14 16:30:06 -06001727
1728 /*
1729 * Bulk alloc is all-or-nothing. If we fail to get a batch,
1730 * retry single alloc to be on the safe side.
1731 */
1732 if (unlikely(ret <= 0)) {
1733 state->reqs[0] = kmem_cache_alloc(req_cachep, gfp);
1734 if (!state->reqs[0])
Pavel Begunkov3893f392021-02-10 00:03:15 +00001735 return NULL;
Jens Axboefd6fab22019-03-14 16:30:06 -06001736 ret = 1;
1737 }
Pavel Begunkov99ebe4e2021-06-26 21:40:49 +01001738
1739 /*
1740 * Don't initialise the fields below on every allocation, but
1741 * do that in advance and keep valid on free.
1742 */
1743 for (i = 0; i < ret; i++) {
1744 struct io_kiocb *req = state->reqs[i];
1745
1746 req->ctx = ctx;
1747 req->link = NULL;
1748 req->async_data = NULL;
1749 /* not necessary, but safer to zero */
1750 req->result = 0;
1751 }
Pavel Begunkov291b2822020-09-30 22:57:01 +03001752 state->free_reqs = ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001753 }
Pavel Begunkove5d1bc02021-02-10 00:03:23 +00001754got_req:
Pavel Begunkov291b2822020-09-30 22:57:01 +03001755 state->free_reqs--;
1756 return state->reqs[state->free_reqs];
Jens Axboe2b188cc2019-01-07 10:46:33 -07001757}
1758
Pavel Begunkove1d767f2021-03-19 17:22:43 +00001759static inline void io_put_file(struct file *file)
Pavel Begunkov8da11c12020-02-24 11:32:44 +03001760{
Pavel Begunkove1d767f2021-03-19 17:22:43 +00001761 if (file)
Pavel Begunkov8da11c12020-02-24 11:32:44 +03001762 fput(file);
1763}
1764
Pavel Begunkov4edf20f2020-10-13 09:43:59 +01001765static void io_dismantle_req(struct io_kiocb *req)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001766{
Pavel Begunkov094bae42021-03-19 17:22:42 +00001767 unsigned int flags = req->flags;
Pavel Begunkov929a3af2020-02-19 00:19:09 +03001768
Pavel Begunkov3a0a6902021-04-20 12:03:31 +01001769 if (io_req_needs_clean(req))
1770 io_clean_op(req);
Pavel Begunkove1d767f2021-03-19 17:22:43 +00001771 if (!(flags & REQ_F_FIXED_FILE))
1772 io_put_file(req->file);
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00001773 if (req->fixed_rsrc_refs)
1774 percpu_ref_put(req->fixed_rsrc_refs);
Pavel Begunkov99ebe4e2021-06-26 21:40:49 +01001775 if (req->async_data) {
Pavel Begunkov094bae42021-03-19 17:22:42 +00001776 kfree(req->async_data);
Pavel Begunkov99ebe4e2021-06-26 21:40:49 +01001777 req->async_data = NULL;
1778 }
Pavel Begunkove6543a82020-06-28 12:52:30 +03001779}
Pavel Begunkov2b85edf2019-12-28 14:13:03 +03001780
Pavel Begunkovb23fcf42021-03-01 18:20:48 +00001781/* must to be called somewhat shortly after putting a request */
Pavel Begunkov7c660732021-01-25 11:42:21 +00001782static inline void io_put_task(struct task_struct *task, int nr)
1783{
1784 struct io_uring_task *tctx = task->io_uring;
1785
1786 percpu_counter_sub(&tctx->inflight, nr);
1787 if (unlikely(atomic_read(&tctx->in_idle)))
1788 wake_up(&tctx->wait);
1789 put_task_struct_many(task, nr);
1790}
1791
Pavel Begunkov216578e2020-10-13 09:44:00 +01001792static void __io_free_req(struct io_kiocb *req)
Pavel Begunkove6543a82020-06-28 12:52:30 +03001793{
Jens Axboe51a4cc12020-08-10 10:55:56 -06001794 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkovecfc5172020-06-29 13:13:03 +03001795
Pavel Begunkov216578e2020-10-13 09:44:00 +01001796 io_dismantle_req(req);
Pavel Begunkov7c660732021-01-25 11:42:21 +00001797 io_put_task(req->task, 1);
Pavel Begunkove6543a82020-06-28 12:52:30 +03001798
Pavel Begunkov3893f392021-02-10 00:03:15 +00001799 kmem_cache_free(req_cachep, req);
Pavel Begunkovecfc5172020-06-29 13:13:03 +03001800 percpu_ref_put(&ctx->refs);
Jens Axboee65ef562019-03-12 10:16:44 -06001801}
1802
Pavel Begunkovf2f87372020-10-27 23:25:37 +00001803static inline void io_remove_next_linked(struct io_kiocb *req)
1804{
1805 struct io_kiocb *nxt = req->link;
1806
1807 req->link = nxt->link;
1808 nxt->link = NULL;
1809}
1810
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00001811static bool io_kill_linked_timeout(struct io_kiocb *req)
1812 __must_hold(&req->ctx->completion_lock)
Jens Axboe9e645e112019-05-10 16:07:28 -06001813{
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00001814 struct io_kiocb *link = req->link;
Pavel Begunkovf2f87372020-10-27 23:25:37 +00001815
Pavel Begunkov900fad42020-10-19 16:39:16 +01001816 /*
1817 * Can happen if a linked timeout fired and link had been like
1818 * req -> link t-out -> link t-out [-> ...]
1819 */
Pavel Begunkovc9abd7a2020-10-22 16:43:11 +01001820 if (link && (link->flags & REQ_F_LTIMEOUT_ACTIVE)) {
1821 struct io_timeout_data *io = link->async_data;
Pavel Begunkov7c86ffe2020-06-29 13:12:59 +03001822
Pavel Begunkovf2f87372020-10-27 23:25:37 +00001823 io_remove_next_linked(req);
Pavel Begunkov90cd7e42020-10-27 23:25:36 +00001824 link->timeout.head = NULL;
Pavel Begunkovfd9c7bc2021-04-13 02:58:42 +01001825 if (hrtimer_try_to_cancel(&io->timer) != -1) {
Pavel Begunkovd4d19c12021-04-25 14:32:17 +01001826 io_cqring_fill_event(link->ctx, link->user_data,
1827 -ECANCELED, 0);
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00001828 io_put_req_deferred(link, 1);
Pavel Begunkovd4729fb2021-03-22 01:58:24 +00001829 return true;
Pavel Begunkovc9abd7a2020-10-22 16:43:11 +01001830 }
1831 }
Pavel Begunkovd4729fb2021-03-22 01:58:24 +00001832 return false;
Pavel Begunkov7c86ffe2020-06-29 13:12:59 +03001833}
1834
Pavel Begunkovd148ca42020-10-18 10:17:39 +01001835static void io_fail_links(struct io_kiocb *req)
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00001836 __must_hold(&req->ctx->completion_lock)
Jens Axboe9e645e112019-05-10 16:07:28 -06001837{
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00001838 struct io_kiocb *nxt, *link = req->link;
Jens Axboe9e645e112019-05-10 16:07:28 -06001839
Pavel Begunkovf2f87372020-10-27 23:25:37 +00001840 req->link = NULL;
Pavel Begunkovf2f87372020-10-27 23:25:37 +00001841 while (link) {
1842 nxt = link->link;
1843 link->link = NULL;
1844
Dmitrii Dolgovc826bd72019-10-15 19:02:01 +02001845 trace_io_uring_fail_link(req, link);
Pavel Begunkovd4d19c12021-04-25 14:32:17 +01001846 io_cqring_fill_event(link->ctx, link->user_data, -ECANCELED, 0);
Jens Axboe1575f212021-02-27 15:20:49 -07001847 io_put_req_deferred(link, 2);
Pavel Begunkovf2f87372020-10-27 23:25:37 +00001848 link = nxt;
Jens Axboe9e645e112019-05-10 16:07:28 -06001849 }
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00001850}
Pavel Begunkov7c86ffe2020-06-29 13:12:59 +03001851
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00001852static bool io_disarm_next(struct io_kiocb *req)
1853 __must_hold(&req->ctx->completion_lock)
1854{
1855 bool posted = false;
1856
1857 if (likely(req->flags & REQ_F_LINK_TIMEOUT))
1858 posted = io_kill_linked_timeout(req);
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01001859 if (unlikely((req->flags & REQ_F_FAIL) &&
Pavel Begunkove4335ed2021-04-11 01:46:39 +01001860 !(req->flags & REQ_F_HARDLINK))) {
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00001861 posted |= (req->link != NULL);
1862 io_fail_links(req);
1863 }
1864 return posted;
Jens Axboe9e645e112019-05-10 16:07:28 -06001865}
1866
Pavel Begunkov3fa5e0f2020-06-30 15:20:43 +03001867static struct io_kiocb *__io_req_find_next(struct io_kiocb *req)
Jens Axboe9e645e112019-05-10 16:07:28 -06001868{
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00001869 struct io_kiocb *nxt;
Jens Axboe2665abf2019-11-05 12:40:47 -07001870
Jens Axboe9e645e112019-05-10 16:07:28 -06001871 /*
1872 * If LINK is set, we have dependent requests in this chain. If we
1873 * didn't fail this request, queue the first one up, moving any other
1874 * dependencies to the next request. In case of failure, fail the rest
1875 * of the chain.
1876 */
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01001877 if (req->flags & (REQ_F_LINK_TIMEOUT | REQ_F_FAIL)) {
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00001878 struct io_ring_ctx *ctx = req->ctx;
1879 unsigned long flags;
1880 bool posted;
Pavel Begunkovf2f87372020-10-27 23:25:37 +00001881
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00001882 spin_lock_irqsave(&ctx->completion_lock, flags);
1883 posted = io_disarm_next(req);
1884 if (posted)
1885 io_commit_cqring(req->ctx);
1886 spin_unlock_irqrestore(&ctx->completion_lock, flags);
1887 if (posted)
1888 io_cqring_ev_posted(ctx);
Pavel Begunkovf2f87372020-10-27 23:25:37 +00001889 }
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00001890 nxt = req->link;
1891 req->link = NULL;
1892 return nxt;
Jens Axboe4d7dd462019-11-20 13:03:52 -07001893}
Jens Axboe2665abf2019-11-05 12:40:47 -07001894
Pavel Begunkovf2f87372020-10-27 23:25:37 +00001895static inline struct io_kiocb *io_req_find_next(struct io_kiocb *req)
Pavel Begunkov3fa5e0f2020-06-30 15:20:43 +03001896{
Pavel Begunkovcdbff982021-02-12 18:41:16 +00001897 if (likely(!(req->flags & (REQ_F_LINK|REQ_F_HARDLINK))))
Pavel Begunkov3fa5e0f2020-06-30 15:20:43 +03001898 return NULL;
1899 return __io_req_find_next(req);
1900}
1901
Pavel Begunkov2c323952021-02-28 22:04:53 +00001902static void ctx_flush_and_put(struct io_ring_ctx *ctx)
1903{
1904 if (!ctx)
1905 return;
1906 if (ctx->submit_state.comp.nr) {
1907 mutex_lock(&ctx->uring_lock);
Pavel Begunkov2a2758f2021-06-17 18:14:00 +01001908 io_submit_flush_completions(ctx);
Pavel Begunkov2c323952021-02-28 22:04:53 +00001909 mutex_unlock(&ctx->uring_lock);
1910 }
1911 percpu_ref_put(&ctx->refs);
1912}
1913
Jens Axboe7cbf1722021-02-10 00:03:20 +00001914static void tctx_task_work(struct callback_head *cb)
1915{
Pavel Begunkovebd0df22021-06-17 18:14:07 +01001916 struct io_ring_ctx *ctx = NULL;
Pavel Begunkov3f184072021-06-17 18:14:06 +01001917 struct io_uring_task *tctx = container_of(cb, struct io_uring_task,
1918 task_work);
Jens Axboe7cbf1722021-02-10 00:03:20 +00001919
Pavel Begunkov16f72072021-06-17 18:14:09 +01001920 while (1) {
Pavel Begunkov3f184072021-06-17 18:14:06 +01001921 struct io_wq_work_node *node;
1922
1923 spin_lock_irq(&tctx->task_lock);
Pavel Begunkovc6538be2021-06-17 18:14:08 +01001924 node = tctx->task_list.first;
Pavel Begunkov3f184072021-06-17 18:14:06 +01001925 INIT_WQ_LIST(&tctx->task_list);
1926 spin_unlock_irq(&tctx->task_lock);
1927
Pavel Begunkov3f184072021-06-17 18:14:06 +01001928 while (node) {
1929 struct io_wq_work_node *next = node->next;
1930 struct io_kiocb *req = container_of(node, struct io_kiocb,
1931 io_task_work.node);
1932
1933 if (req->ctx != ctx) {
1934 ctx_flush_and_put(ctx);
1935 ctx = req->ctx;
1936 percpu_ref_get(&ctx->refs);
1937 }
Pavel Begunkov5b0a6ac2021-06-30 21:54:04 +01001938 req->io_task_work.func(req);
Pavel Begunkov3f184072021-06-17 18:14:06 +01001939 node = next;
1940 }
Pavel Begunkov7a778f92021-06-17 18:14:10 +01001941 if (wq_list_empty(&tctx->task_list)) {
1942 clear_bit(0, &tctx->task_state);
1943 if (wq_list_empty(&tctx->task_list))
1944 break;
1945 /* another tctx_task_work() is enqueued, yield */
1946 if (test_and_set_bit(0, &tctx->task_state))
1947 break;
1948 }
Jens Axboe7cbf1722021-02-10 00:03:20 +00001949 cond_resched();
Pavel Begunkov3f184072021-06-17 18:14:06 +01001950 }
Pavel Begunkovebd0df22021-06-17 18:14:07 +01001951
1952 ctx_flush_and_put(ctx);
Jens Axboe7cbf1722021-02-10 00:03:20 +00001953}
1954
Pavel Begunkove09ee512021-07-01 13:26:05 +01001955static void io_req_task_work_add(struct io_kiocb *req)
Jens Axboe7cbf1722021-02-10 00:03:20 +00001956{
Pavel Begunkovc15b79d2021-03-19 17:22:44 +00001957 struct task_struct *tsk = req->task;
Jens Axboe7cbf1722021-02-10 00:03:20 +00001958 struct io_uring_task *tctx = tsk->io_uring;
Pavel Begunkovc15b79d2021-03-19 17:22:44 +00001959 enum task_work_notify_mode notify;
Pavel Begunkove09ee512021-07-01 13:26:05 +01001960 struct io_wq_work_node *node;
Jens Axboe0b81e802021-02-16 10:33:53 -07001961 unsigned long flags;
Jens Axboe7cbf1722021-02-10 00:03:20 +00001962
1963 WARN_ON_ONCE(!tctx);
1964
Jens Axboe0b81e802021-02-16 10:33:53 -07001965 spin_lock_irqsave(&tctx->task_lock, flags);
Jens Axboe7cbf1722021-02-10 00:03:20 +00001966 wq_list_add_tail(&req->io_task_work.node, &tctx->task_list);
Jens Axboe0b81e802021-02-16 10:33:53 -07001967 spin_unlock_irqrestore(&tctx->task_lock, flags);
Jens Axboe7cbf1722021-02-10 00:03:20 +00001968
1969 /* task_work already pending, we're done */
1970 if (test_bit(0, &tctx->task_state) ||
1971 test_and_set_bit(0, &tctx->task_state))
Pavel Begunkove09ee512021-07-01 13:26:05 +01001972 return;
1973 if (unlikely(tsk->flags & PF_EXITING))
1974 goto fail;
Jens Axboe7cbf1722021-02-10 00:03:20 +00001975
Pavel Begunkovc15b79d2021-03-19 17:22:44 +00001976 /*
1977 * SQPOLL kernel thread doesn't need notification, just a wakeup. For
1978 * all other cases, use TWA_SIGNAL unconditionally to ensure we're
1979 * processing task_work. There's no reliable way to tell if TWA_RESUME
1980 * will do the job.
1981 */
1982 notify = (req->ctx->flags & IORING_SETUP_SQPOLL) ? TWA_NONE : TWA_SIGNAL;
Pavel Begunkovc15b79d2021-03-19 17:22:44 +00001983 if (!task_work_add(tsk, &tctx->task_work, notify)) {
1984 wake_up_process(tsk);
Pavel Begunkove09ee512021-07-01 13:26:05 +01001985 return;
Pavel Begunkovc15b79d2021-03-19 17:22:44 +00001986 }
Pavel Begunkove09ee512021-07-01 13:26:05 +01001987fail:
Jens Axboe7cbf1722021-02-10 00:03:20 +00001988 clear_bit(0, &tctx->task_state);
Pavel Begunkove09ee512021-07-01 13:26:05 +01001989 spin_lock_irqsave(&tctx->task_lock, flags);
1990 node = tctx->task_list.first;
1991 INIT_WQ_LIST(&tctx->task_list);
1992 spin_unlock_irqrestore(&tctx->task_lock, flags);
Jens Axboe7cbf1722021-02-10 00:03:20 +00001993
Pavel Begunkove09ee512021-07-01 13:26:05 +01001994 while (node) {
1995 req = container_of(node, struct io_kiocb, io_task_work.node);
1996 node = node->next;
1997 if (llist_add(&req->io_task_work.fallback_node,
1998 &req->ctx->fallback_llist))
1999 schedule_delayed_work(&req->ctx->fallback_work, 1);
2000 }
Pavel Begunkoveab30c42021-01-19 13:32:42 +00002001}
2002
Pavel Begunkov5b0a6ac2021-06-30 21:54:04 +01002003static void io_req_task_cancel(struct io_kiocb *req)
Jens Axboec40f6372020-06-25 15:39:59 -06002004{
Jens Axboe87ceb6a2020-09-14 08:20:12 -06002005 struct io_ring_ctx *ctx = req->ctx;
Jens Axboec40f6372020-06-25 15:39:59 -06002006
Pavel Begunkove83acd72021-02-28 22:35:09 +00002007 /* ctx is guaranteed to stay alive while we hold uring_lock */
Pavel Begunkov792bb6e2021-02-18 22:32:51 +00002008 mutex_lock(&ctx->uring_lock);
Pavel Begunkov25935532021-03-19 17:22:40 +00002009 io_req_complete_failed(req, req->result);
Pavel Begunkov792bb6e2021-02-18 22:32:51 +00002010 mutex_unlock(&ctx->uring_lock);
Jens Axboec40f6372020-06-25 15:39:59 -06002011}
2012
Pavel Begunkov5b0a6ac2021-06-30 21:54:04 +01002013static void io_req_task_submit(struct io_kiocb *req)
Jens Axboec40f6372020-06-25 15:39:59 -06002014{
2015 struct io_ring_ctx *ctx = req->ctx;
2016
Pavel Begunkov04fc6c82021-02-12 03:23:54 +00002017 /* ctx stays valid until unlock, even if we drop all ours ctx->refs */
Pavel Begunkov81b6d052021-01-04 20:36:35 +00002018 mutex_lock(&ctx->uring_lock);
Pavel Begunkov9c688262021-07-10 02:45:59 +01002019 if (!(req->task->flags & PF_EXITING) && !req->task->in_execve)
Pavel Begunkovc5eef2b2021-02-10 00:03:22 +00002020 __io_queue_sqe(req);
Pavel Begunkov81b6d052021-01-04 20:36:35 +00002021 else
Pavel Begunkov25935532021-03-19 17:22:40 +00002022 io_req_complete_failed(req, -EFAULT);
Pavel Begunkov81b6d052021-01-04 20:36:35 +00002023 mutex_unlock(&ctx->uring_lock);
Jens Axboe9e645e112019-05-10 16:07:28 -06002024}
2025
Pavel Begunkova3df76982021-02-18 22:32:52 +00002026static void io_req_task_queue_fail(struct io_kiocb *req, int ret)
2027{
Pavel Begunkova3df76982021-02-18 22:32:52 +00002028 req->result = ret;
Pavel Begunkov5b0a6ac2021-06-30 21:54:04 +01002029 req->io_task_work.func = io_req_task_cancel;
Pavel Begunkove09ee512021-07-01 13:26:05 +01002030 io_req_task_work_add(req);
Pavel Begunkova3df76982021-02-18 22:32:52 +00002031}
2032
Pavel Begunkov2c4b8eb2021-02-28 22:35:10 +00002033static void io_req_task_queue(struct io_kiocb *req)
2034{
Pavel Begunkov5b0a6ac2021-06-30 21:54:04 +01002035 req->io_task_work.func = io_req_task_submit;
Pavel Begunkove09ee512021-07-01 13:26:05 +01002036 io_req_task_work_add(req);
Pavel Begunkov2c4b8eb2021-02-28 22:35:10 +00002037}
2038
Pavel Begunkovf2f87372020-10-27 23:25:37 +00002039static inline void io_queue_next(struct io_kiocb *req)
Jackie Liuc69f8db2019-11-09 11:00:08 +08002040{
Pavel Begunkov9b5f7bd2020-06-29 13:13:00 +03002041 struct io_kiocb *nxt = io_req_find_next(req);
Pavel Begunkov944e58b2019-11-21 23:21:01 +03002042
Pavel Begunkov906a8c32020-06-27 14:04:55 +03002043 if (nxt)
2044 io_req_task_queue(nxt);
Jackie Liuc69f8db2019-11-09 11:00:08 +08002045}
2046
Jens Axboe9e645e112019-05-10 16:07:28 -06002047static void io_free_req(struct io_kiocb *req)
2048{
Pavel Begunkovc3524382020-06-28 12:52:32 +03002049 io_queue_next(req);
Jens Axboe9e645e112019-05-10 16:07:28 -06002050 __io_free_req(req);
Jens Axboee65ef562019-03-12 10:16:44 -06002051}
2052
Pavel Begunkov2d6500d2020-06-28 12:52:33 +03002053struct req_batch {
Pavel Begunkov5af1d132020-07-18 11:32:52 +03002054 struct task_struct *task;
2055 int task_refs;
Jens Axboe1b4c3512021-02-10 00:03:19 +00002056 int ctx_refs;
Pavel Begunkov2d6500d2020-06-28 12:52:33 +03002057};
2058
Pavel Begunkov5af1d132020-07-18 11:32:52 +03002059static inline void io_init_req_batch(struct req_batch *rb)
Pavel Begunkov7a743e22020-03-03 21:33:13 +03002060{
Pavel Begunkov5af1d132020-07-18 11:32:52 +03002061 rb->task_refs = 0;
Pavel Begunkov9ae72462021-02-10 00:03:16 +00002062 rb->ctx_refs = 0;
Pavel Begunkov5af1d132020-07-18 11:32:52 +03002063 rb->task = NULL;
2064}
Pavel Begunkov8766dd52020-03-14 00:31:04 +03002065
Pavel Begunkov2d6500d2020-06-28 12:52:33 +03002066static void io_req_free_batch_finish(struct io_ring_ctx *ctx,
2067 struct req_batch *rb)
2068{
Pavel Begunkov6e833d52021-02-11 18:28:20 +00002069 if (rb->task)
Pavel Begunkov7c660732021-01-25 11:42:21 +00002070 io_put_task(rb->task, rb->task_refs);
Pavel Begunkov9ae72462021-02-10 00:03:16 +00002071 if (rb->ctx_refs)
2072 percpu_ref_put_many(&ctx->refs, rb->ctx_refs);
Pavel Begunkov2d6500d2020-06-28 12:52:33 +03002073}
2074
Pavel Begunkov6ff119a2021-02-10 00:03:18 +00002075static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req,
2076 struct io_submit_state *state)
Pavel Begunkov2d6500d2020-06-28 12:52:33 +03002077{
Pavel Begunkovf2f87372020-10-27 23:25:37 +00002078 io_queue_next(req);
Pavel Begunkov96670652021-03-19 17:22:32 +00002079 io_dismantle_req(req);
Pavel Begunkov2d6500d2020-06-28 12:52:33 +03002080
Jens Axboee3bc8e92020-09-24 08:45:57 -06002081 if (req->task != rb->task) {
Pavel Begunkov7c660732021-01-25 11:42:21 +00002082 if (rb->task)
2083 io_put_task(rb->task, rb->task_refs);
Jens Axboee3bc8e92020-09-24 08:45:57 -06002084 rb->task = req->task;
2085 rb->task_refs = 0;
Pavel Begunkov5af1d132020-07-18 11:32:52 +03002086 }
Jens Axboee3bc8e92020-09-24 08:45:57 -06002087 rb->task_refs++;
Pavel Begunkov9ae72462021-02-10 00:03:16 +00002088 rb->ctx_refs++;
Pavel Begunkov5af1d132020-07-18 11:32:52 +03002089
Pavel Begunkovbd759042021-02-12 03:23:50 +00002090 if (state->free_reqs != ARRAY_SIZE(state->reqs))
Pavel Begunkov6ff119a2021-02-10 00:03:18 +00002091 state->reqs[state->free_reqs++] = req;
Pavel Begunkovbd759042021-02-12 03:23:50 +00002092 else
2093 list_add(&req->compl.list, &state->comp.free_list);
Pavel Begunkov7a743e22020-03-03 21:33:13 +03002094}
2095
Pavel Begunkov2a2758f2021-06-17 18:14:00 +01002096static void io_submit_flush_completions(struct io_ring_ctx *ctx)
Pavel Begunkov905c1722021-02-10 00:03:14 +00002097{
Pavel Begunkov2a2758f2021-06-17 18:14:00 +01002098 struct io_comp_state *cs = &ctx->submit_state.comp;
Pavel Begunkov905c1722021-02-10 00:03:14 +00002099 int i, nr = cs->nr;
Pavel Begunkov905c1722021-02-10 00:03:14 +00002100 struct req_batch rb;
2101
Pavel Begunkov905c1722021-02-10 00:03:14 +00002102 spin_lock_irq(&ctx->completion_lock);
2103 for (i = 0; i < nr; i++) {
Pavel Begunkov5182ed22021-06-26 21:40:48 +01002104 struct io_kiocb *req = cs->reqs[i];
2105
Pavel Begunkovd4d19c12021-04-25 14:32:17 +01002106 __io_cqring_fill_event(ctx, req->user_data, req->result,
2107 req->compl.cflags);
Pavel Begunkov905c1722021-02-10 00:03:14 +00002108 }
2109 io_commit_cqring(ctx);
2110 spin_unlock_irq(&ctx->completion_lock);
Pavel Begunkov905c1722021-02-10 00:03:14 +00002111 io_cqring_ev_posted(ctx);
Pavel Begunkov5182ed22021-06-26 21:40:48 +01002112
2113 io_init_req_batch(&rb);
Pavel Begunkov905c1722021-02-10 00:03:14 +00002114 for (i = 0; i < nr; i++) {
Pavel Begunkov5182ed22021-06-26 21:40:48 +01002115 struct io_kiocb *req = cs->reqs[i];
Pavel Begunkov905c1722021-02-10 00:03:14 +00002116
2117 /* submission and completion refs */
Jens Axboede9b4cc2021-02-24 13:28:27 -07002118 if (req_ref_sub_and_test(req, 2))
Pavel Begunkov6ff119a2021-02-10 00:03:18 +00002119 io_req_free_batch(&rb, req, &ctx->submit_state);
Pavel Begunkov905c1722021-02-10 00:03:14 +00002120 }
2121
2122 io_req_free_batch_finish(ctx, &rb);
2123 cs->nr = 0;
Jens Axboee65ef562019-03-12 10:16:44 -06002124}
2125
Jens Axboeba816ad2019-09-28 11:36:45 -06002126/*
2127 * Drop reference to request, return next in chain (if there is one) if this
2128 * was the last reference to this request.
2129 */
Pavel Begunkov0d850352021-03-19 17:22:37 +00002130static inline struct io_kiocb *io_put_req_find_next(struct io_kiocb *req)
Jens Axboee65ef562019-03-12 10:16:44 -06002131{
Pavel Begunkov9b5f7bd2020-06-29 13:13:00 +03002132 struct io_kiocb *nxt = NULL;
2133
Jens Axboede9b4cc2021-02-24 13:28:27 -07002134 if (req_ref_put_and_test(req)) {
Pavel Begunkov9b5f7bd2020-06-29 13:13:00 +03002135 nxt = io_req_find_next(req);
Jens Axboe4d7dd462019-11-20 13:03:52 -07002136 __io_free_req(req);
Jens Axboe2a44f462020-02-25 13:25:41 -07002137 }
Pavel Begunkov9b5f7bd2020-06-29 13:13:00 +03002138 return nxt;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002139}
2140
Pavel Begunkov0d850352021-03-19 17:22:37 +00002141static inline void io_put_req(struct io_kiocb *req)
Jens Axboe2b188cc2019-01-07 10:46:33 -07002142{
Jens Axboede9b4cc2021-02-24 13:28:27 -07002143 if (req_ref_put_and_test(req))
Jens Axboedef596e2019-01-09 08:59:42 -07002144 io_free_req(req);
2145}
2146
Pavel Begunkov216578e2020-10-13 09:44:00 +01002147static void io_free_req_deferred(struct io_kiocb *req)
2148{
Pavel Begunkov5b0a6ac2021-06-30 21:54:04 +01002149 req->io_task_work.func = io_free_req;
Pavel Begunkove09ee512021-07-01 13:26:05 +01002150 io_req_task_work_add(req);
Pavel Begunkov216578e2020-10-13 09:44:00 +01002151}
2152
2153static inline void io_put_req_deferred(struct io_kiocb *req, int refs)
2154{
Jens Axboede9b4cc2021-02-24 13:28:27 -07002155 if (req_ref_sub_and_test(req, refs))
Pavel Begunkov216578e2020-10-13 09:44:00 +01002156 io_free_req_deferred(req);
2157}
2158
Pavel Begunkov6c503152021-01-04 20:36:36 +00002159static unsigned io_cqring_events(struct io_ring_ctx *ctx)
Jens Axboea3a0e432019-08-20 11:03:11 -06002160{
2161 /* See comment at the top of this file */
2162 smp_rmb();
Pavel Begunkove23de152020-12-17 00:24:37 +00002163 return __io_cqring_events(ctx);
Jens Axboea3a0e432019-08-20 11:03:11 -06002164}
2165
Pavel Begunkovfb5ccc92019-10-25 12:31:30 +03002166static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
2167{
2168 struct io_rings *rings = ctx->rings;
2169
2170 /* make sure SQ entry isn't read before tail */
2171 return smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head;
2172}
2173
Pavel Begunkov8ff069b2020-07-16 23:28:04 +03002174static unsigned int io_put_kbuf(struct io_kiocb *req, struct io_buffer *kbuf)
Jens Axboee94f1412019-12-19 12:06:02 -07002175{
Pavel Begunkov8ff069b2020-07-16 23:28:04 +03002176 unsigned int cflags;
Jens Axboee94f1412019-12-19 12:06:02 -07002177
Jens Axboebcda7ba2020-02-23 16:42:51 -07002178 cflags = kbuf->bid << IORING_CQE_BUFFER_SHIFT;
2179 cflags |= IORING_CQE_F_BUFFER;
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03002180 req->flags &= ~REQ_F_BUFFER_SELECTED;
Jens Axboebcda7ba2020-02-23 16:42:51 -07002181 kfree(kbuf);
2182 return cflags;
2183}
2184
Pavel Begunkov8ff069b2020-07-16 23:28:04 +03002185static inline unsigned int io_put_rw_kbuf(struct io_kiocb *req)
2186{
2187 struct io_buffer *kbuf;
2188
2189 kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
2190 return io_put_kbuf(req, kbuf);
2191}
2192
Jens Axboe4c6e2772020-07-01 11:29:10 -06002193static inline bool io_run_task_work(void)
2194{
2195 if (current->task_works) {
2196 __set_current_state(TASK_RUNNING);
2197 task_work_run();
2198 return true;
2199 }
2200
2201 return false;
2202}
2203
Jens Axboedef596e2019-01-09 08:59:42 -07002204/*
2205 * Find and free completed poll iocbs
2206 */
2207static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
2208 struct list_head *done)
2209{
Jens Axboe8237e042019-12-28 10:48:22 -07002210 struct req_batch rb;
Jens Axboedef596e2019-01-09 08:59:42 -07002211 struct io_kiocb *req;
Xiaoguang Wangbbde0172020-06-16 02:06:38 +08002212
2213 /* order with ->result store in io_complete_rw_iopoll() */
2214 smp_rmb();
Jens Axboedef596e2019-01-09 08:59:42 -07002215
Pavel Begunkov5af1d132020-07-18 11:32:52 +03002216 io_init_req_batch(&rb);
Jens Axboedef596e2019-01-09 08:59:42 -07002217 while (!list_empty(done)) {
Jens Axboebcda7ba2020-02-23 16:42:51 -07002218 int cflags = 0;
2219
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002220 req = list_first_entry(done, struct io_kiocb, inflight_entry);
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002221 list_del(&req->inflight_entry);
Jens Axboedef596e2019-01-09 08:59:42 -07002222
Pavel Begunkov8c130822021-03-22 01:58:32 +00002223 if (READ_ONCE(req->result) == -EAGAIN &&
2224 !(req->flags & REQ_F_DONT_REISSUE)) {
Pavel Begunkovf1613402021-02-11 18:28:21 +00002225 req->iopoll_completed = 0;
Pavel Begunkov8c130822021-03-22 01:58:32 +00002226 req_ref_get(req);
2227 io_queue_async_work(req);
2228 continue;
Pavel Begunkovf1613402021-02-11 18:28:21 +00002229 }
2230
Jens Axboebcda7ba2020-02-23 16:42:51 -07002231 if (req->flags & REQ_F_BUFFER_SELECTED)
Pavel Begunkov8ff069b2020-07-16 23:28:04 +03002232 cflags = io_put_rw_kbuf(req);
Jens Axboebcda7ba2020-02-23 16:42:51 -07002233
Pavel Begunkovd4d19c12021-04-25 14:32:17 +01002234 __io_cqring_fill_event(ctx, req->user_data, req->result, cflags);
Jens Axboedef596e2019-01-09 08:59:42 -07002235 (*nr_events)++;
2236
Jens Axboede9b4cc2021-02-24 13:28:27 -07002237 if (req_ref_put_and_test(req))
Pavel Begunkov6ff119a2021-02-10 00:03:18 +00002238 io_req_free_batch(&rb, req, &ctx->submit_state);
Jens Axboedef596e2019-01-09 08:59:42 -07002239 }
Jens Axboedef596e2019-01-09 08:59:42 -07002240
Jens Axboe09bb8392019-03-13 12:39:28 -06002241 io_commit_cqring(ctx);
Pavel Begunkov80c18e42021-01-07 03:15:41 +00002242 io_cqring_ev_posted_iopoll(ctx);
Pavel Begunkov2d6500d2020-06-28 12:52:33 +03002243 io_req_free_batch_finish(ctx, &rb);
Bijan Mottahedeh581f9812020-04-03 13:51:33 -07002244}
2245
Jens Axboedef596e2019-01-09 08:59:42 -07002246static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
2247 long min)
2248{
2249 struct io_kiocb *req, *tmp;
2250 LIST_HEAD(done);
2251 bool spin;
2252 int ret;
2253
2254 /*
2255 * Only spin for completions if we don't have multiple devices hanging
2256 * off our complete list, and we're under the requested amount.
2257 */
Hao Xu915b3dd2021-06-28 05:37:30 +08002258 spin = !ctx->poll_multi_queue && *nr_events < min;
Jens Axboedef596e2019-01-09 08:59:42 -07002259
2260 ret = 0;
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002261 list_for_each_entry_safe(req, tmp, &ctx->iopoll_list, inflight_entry) {
Jens Axboe9adbd452019-12-20 08:45:55 -07002262 struct kiocb *kiocb = &req->rw.kiocb;
Jens Axboedef596e2019-01-09 08:59:42 -07002263
2264 /*
Bijan Mottahedeh581f9812020-04-03 13:51:33 -07002265 * Move completed and retryable entries to our local lists.
2266 * If we find a request that requires polling, break out
2267 * and complete those lists first, if we have entries there.
Jens Axboedef596e2019-01-09 08:59:42 -07002268 */
Xiaoguang Wang65a65432020-06-11 23:39:36 +08002269 if (READ_ONCE(req->iopoll_completed)) {
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002270 list_move_tail(&req->inflight_entry, &done);
Jens Axboedef596e2019-01-09 08:59:42 -07002271 continue;
2272 }
2273 if (!list_empty(&done))
2274 break;
2275
2276 ret = kiocb->ki_filp->f_op->iopoll(kiocb, spin);
2277 if (ret < 0)
2278 break;
2279
Pavel Begunkov3aadc232020-07-06 17:59:29 +03002280 /* iopoll may have completed current req */
2281 if (READ_ONCE(req->iopoll_completed))
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002282 list_move_tail(&req->inflight_entry, &done);
Pavel Begunkov3aadc232020-07-06 17:59:29 +03002283
Jens Axboedef596e2019-01-09 08:59:42 -07002284 if (ret && spin)
2285 spin = false;
2286 ret = 0;
2287 }
2288
2289 if (!list_empty(&done))
2290 io_iopoll_complete(ctx, nr_events, &done);
2291
2292 return ret;
2293}
2294
2295/*
Jens Axboedef596e2019-01-09 08:59:42 -07002296 * We can't just wait for polled events to come to us, we have to actively
2297 * find and complete them.
2298 */
Pavel Begunkovb2edc0a2020-07-07 16:36:22 +03002299static void io_iopoll_try_reap_events(struct io_ring_ctx *ctx)
Jens Axboedef596e2019-01-09 08:59:42 -07002300{
2301 if (!(ctx->flags & IORING_SETUP_IOPOLL))
2302 return;
2303
2304 mutex_lock(&ctx->uring_lock);
Pavel Begunkov540e32a2020-07-13 23:37:09 +03002305 while (!list_empty(&ctx->iopoll_list)) {
Jens Axboedef596e2019-01-09 08:59:42 -07002306 unsigned int nr_events = 0;
2307
Pavel Begunkovb2edc0a2020-07-07 16:36:22 +03002308 io_do_iopoll(ctx, &nr_events, 0);
Jens Axboe08f54392019-08-21 22:19:11 -06002309
Pavel Begunkovb2edc0a2020-07-07 16:36:22 +03002310 /* let it sleep and repeat later if can't complete a request */
2311 if (nr_events == 0)
2312 break;
Jens Axboe08f54392019-08-21 22:19:11 -06002313 /*
2314 * Ensure we allow local-to-the-cpu processing to take place,
2315 * in this case we need to ensure that we reap all events.
Pavel Begunkov3fcee5a2020-07-06 17:59:31 +03002316 * Also let task_work, etc. to progress by releasing the mutex
Jens Axboe08f54392019-08-21 22:19:11 -06002317 */
Pavel Begunkov3fcee5a2020-07-06 17:59:31 +03002318 if (need_resched()) {
2319 mutex_unlock(&ctx->uring_lock);
2320 cond_resched();
2321 mutex_lock(&ctx->uring_lock);
2322 }
Jens Axboedef596e2019-01-09 08:59:42 -07002323 }
2324 mutex_unlock(&ctx->uring_lock);
2325}
2326
Pavel Begunkov7668b922020-07-07 16:36:21 +03002327static int io_iopoll_check(struct io_ring_ctx *ctx, long min)
Jens Axboedef596e2019-01-09 08:59:42 -07002328{
Pavel Begunkov7668b922020-07-07 16:36:21 +03002329 unsigned int nr_events = 0;
Pavel Begunkove9979b32021-04-13 02:58:45 +01002330 int ret = 0;
Jens Axboedef596e2019-01-09 08:59:42 -07002331
Xiaoguang Wangc7849be2020-02-22 14:46:05 +08002332 /*
2333 * We disallow the app entering submit/complete with polling, but we
2334 * still need to lock the ring to prevent racing with polled issue
2335 * that got punted to a workqueue.
2336 */
2337 mutex_lock(&ctx->uring_lock);
Pavel Begunkovf39c8a52021-04-13 02:58:46 +01002338 /*
2339 * Don't enter poll loop if we already have events pending.
2340 * If we do, we can potentially be spinning for commands that
2341 * already triggered a CQE (eg in error).
2342 */
Pavel Begunkov5ed7a372021-06-14 23:37:27 +01002343 if (test_bit(0, &ctx->check_cq_overflow))
Pavel Begunkovf39c8a52021-04-13 02:58:46 +01002344 __io_cqring_overflow_flush(ctx, false);
2345 if (io_cqring_events(ctx))
2346 goto out;
Jens Axboedef596e2019-01-09 08:59:42 -07002347 do {
Jens Axboe500f9fb2019-08-19 12:15:59 -06002348 /*
2349 * If a submit got punted to a workqueue, we can have the
2350 * application entering polling for a command before it gets
2351 * issued. That app will hold the uring_lock for the duration
2352 * of the poll right here, so we need to take a breather every
2353 * now and then to ensure that the issue has a chance to add
2354 * the poll to the issued list. Otherwise we can spin here
2355 * forever, while the workqueue is stuck trying to acquire the
2356 * very same mutex.
2357 */
Pavel Begunkove9979b32021-04-13 02:58:45 +01002358 if (list_empty(&ctx->iopoll_list)) {
Pavel Begunkov8f487ef2021-07-08 13:37:06 +01002359 u32 tail = ctx->cached_cq_tail;
2360
Jens Axboe500f9fb2019-08-19 12:15:59 -06002361 mutex_unlock(&ctx->uring_lock);
Jens Axboe4c6e2772020-07-01 11:29:10 -06002362 io_run_task_work();
Jens Axboe500f9fb2019-08-19 12:15:59 -06002363 mutex_lock(&ctx->uring_lock);
Pavel Begunkove9979b32021-04-13 02:58:45 +01002364
Pavel Begunkov8f487ef2021-07-08 13:37:06 +01002365 /* some requests don't go through iopoll_list */
2366 if (tail != ctx->cached_cq_tail ||
2367 list_empty(&ctx->iopoll_list))
Pavel Begunkove9979b32021-04-13 02:58:45 +01002368 break;
Jens Axboe500f9fb2019-08-19 12:15:59 -06002369 }
Pavel Begunkovf39c8a52021-04-13 02:58:46 +01002370 ret = io_do_iopoll(ctx, &nr_events, min);
2371 } while (!ret && nr_events < min && !need_resched());
2372out:
Jens Axboe500f9fb2019-08-19 12:15:59 -06002373 mutex_unlock(&ctx->uring_lock);
Jens Axboedef596e2019-01-09 08:59:42 -07002374 return ret;
2375}
2376
Jens Axboe491381ce2019-10-17 09:20:46 -06002377static void kiocb_end_write(struct io_kiocb *req)
Jens Axboe2b188cc2019-01-07 10:46:33 -07002378{
Jens Axboe491381ce2019-10-17 09:20:46 -06002379 /*
2380 * Tell lockdep we inherited freeze protection from submission
2381 * thread.
2382 */
2383 if (req->flags & REQ_F_ISREG) {
Pavel Begunkov1c986792021-03-22 01:58:31 +00002384 struct super_block *sb = file_inode(req->file)->i_sb;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002385
Pavel Begunkov1c986792021-03-22 01:58:31 +00002386 __sb_writers_acquired(sb, SB_FREEZE_WRITE);
2387 sb_end_write(sb);
Jens Axboe2b188cc2019-01-07 10:46:33 -07002388 }
2389}
2390
Jens Axboeb63534c2020-06-04 11:28:00 -06002391#ifdef CONFIG_BLOCK
Pavel Begunkovdc2a6e92021-01-19 13:32:35 +00002392static bool io_resubmit_prep(struct io_kiocb *req)
Jens Axboeb63534c2020-06-04 11:28:00 -06002393{
Pavel Begunkovab454432021-03-22 01:58:33 +00002394 struct io_async_rw *rw = req->async_data;
Jens Axboeb63534c2020-06-04 11:28:00 -06002395
Pavel Begunkovab454432021-03-22 01:58:33 +00002396 if (!rw)
2397 return !io_req_prep_async(req);
2398 /* may have left rw->iter inconsistent on -EIOCBQUEUED */
2399 iov_iter_revert(&rw->iter, req->result - iov_iter_count(&rw->iter));
2400 return true;
Jens Axboeb63534c2020-06-04 11:28:00 -06002401}
Jens Axboeb63534c2020-06-04 11:28:00 -06002402
Jens Axboe3e6a0d32021-03-01 13:56:00 -07002403static bool io_rw_should_reissue(struct io_kiocb *req)
Jens Axboeb63534c2020-06-04 11:28:00 -06002404{
Jens Axboe355afae2020-09-02 09:30:31 -06002405 umode_t mode = file_inode(req->file)->i_mode;
Jens Axboe3e6a0d32021-03-01 13:56:00 -07002406 struct io_ring_ctx *ctx = req->ctx;
Jens Axboeb63534c2020-06-04 11:28:00 -06002407
Jens Axboe355afae2020-09-02 09:30:31 -06002408 if (!S_ISBLK(mode) && !S_ISREG(mode))
2409 return false;
Jens Axboe3e6a0d32021-03-01 13:56:00 -07002410 if ((req->flags & REQ_F_NOWAIT) || (io_wq_current_is_worker() &&
2411 !(ctx->flags & IORING_SETUP_IOPOLL)))
Jens Axboeb63534c2020-06-04 11:28:00 -06002412 return false;
Jens Axboe7c977a52021-02-23 19:17:35 -07002413 /*
2414 * If ref is dying, we might be running poll reap from the exit work.
2415 * Don't attempt to reissue from that path, just let it fail with
2416 * -EAGAIN.
2417 */
Jens Axboe3e6a0d32021-03-01 13:56:00 -07002418 if (percpu_ref_is_dying(&ctx->refs))
2419 return false;
2420 return true;
2421}
Jens Axboee82ad482021-04-02 19:45:34 -06002422#else
Jens Axboea1ff1e32021-04-12 06:40:02 -06002423static bool io_resubmit_prep(struct io_kiocb *req)
2424{
2425 return false;
2426}
Jens Axboee82ad482021-04-02 19:45:34 -06002427static bool io_rw_should_reissue(struct io_kiocb *req)
2428{
2429 return false;
2430}
Jens Axboe3e6a0d32021-03-01 13:56:00 -07002431#endif
2432
Pavel Begunkov9011bf92021-06-30 21:54:03 +01002433static void io_fallback_req_func(struct work_struct *work)
2434{
2435 struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx,
2436 fallback_work.work);
2437 struct llist_node *node = llist_del_all(&ctx->fallback_llist);
2438 struct io_kiocb *req, *tmp;
2439
Pavel Begunkov5b0a6ac2021-06-30 21:54:04 +01002440 llist_for_each_entry_safe(req, tmp, node, io_task_work.fallback_node)
2441 req->io_task_work.func(req);
Pavel Begunkov9011bf92021-06-30 21:54:03 +01002442}
2443
Jens Axboea1d7c392020-06-22 11:09:46 -06002444static void __io_complete_rw(struct io_kiocb *req, long res, long res2,
Pavel Begunkov889fca72021-02-10 00:03:09 +00002445 unsigned int issue_flags)
Jens Axboea1d7c392020-06-22 11:09:46 -06002446{
Pavel Begunkov2f8e45f2021-02-11 18:28:23 +00002447 int cflags = 0;
2448
Pavel Begunkovb65c1282021-03-22 01:45:59 +00002449 if (req->rw.kiocb.ki_flags & IOCB_WRITE)
2450 kiocb_end_write(req);
Pavel Begunkov9532b992021-03-22 01:58:34 +00002451 if (res != req->result) {
2452 if ((res == -EAGAIN || res == -EOPNOTSUPP) &&
2453 io_rw_should_reissue(req)) {
2454 req->flags |= REQ_F_REISSUE;
2455 return;
2456 }
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01002457 req_set_fail(req);
Pavel Begunkov9532b992021-03-22 01:58:34 +00002458 }
Pavel Begunkov2f8e45f2021-02-11 18:28:23 +00002459 if (req->flags & REQ_F_BUFFER_SELECTED)
2460 cflags = io_put_rw_kbuf(req);
2461 __io_req_complete(req, issue_flags, res, cflags);
Jens Axboeba816ad2019-09-28 11:36:45 -06002462}
2463
2464static void io_complete_rw(struct kiocb *kiocb, long res, long res2)
2465{
Jens Axboe9adbd452019-12-20 08:45:55 -07002466 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
Jens Axboeba816ad2019-09-28 11:36:45 -06002467
Pavel Begunkov889fca72021-02-10 00:03:09 +00002468 __io_complete_rw(req, res, res2, 0);
Jens Axboe2b188cc2019-01-07 10:46:33 -07002469}
2470
Jens Axboedef596e2019-01-09 08:59:42 -07002471static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2)
2472{
Jens Axboe9adbd452019-12-20 08:45:55 -07002473 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
Jens Axboedef596e2019-01-09 08:59:42 -07002474
Jens Axboe491381ce2019-10-17 09:20:46 -06002475 if (kiocb->ki_flags & IOCB_WRITE)
2476 kiocb_end_write(req);
Pavel Begunkov9532b992021-03-22 01:58:34 +00002477 if (unlikely(res != req->result)) {
Jens Axboea1ff1e32021-04-12 06:40:02 -06002478 if (!(res == -EAGAIN && io_rw_should_reissue(req) &&
2479 io_resubmit_prep(req))) {
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01002480 req_set_fail(req);
Pavel Begunkov9532b992021-03-22 01:58:34 +00002481 req->flags |= REQ_F_DONT_REISSUE;
2482 }
Pavel Begunkov8c130822021-03-22 01:58:32 +00002483 }
Xiaoguang Wangbbde0172020-06-16 02:06:38 +08002484
2485 WRITE_ONCE(req->result, res);
Jens Axboeb9b0e0d2021-02-23 08:18:36 -07002486 /* order with io_iopoll_complete() checking ->result */
Pavel Begunkovcd664b02020-06-25 12:37:10 +03002487 smp_wmb();
2488 WRITE_ONCE(req->iopoll_completed, 1);
Jens Axboedef596e2019-01-09 08:59:42 -07002489}
2490
2491/*
2492 * After the iocb has been issued, it's safe to be found on the poll list.
2493 * Adding the kiocb to the list AFTER submission ensures that we don't
Pavel Begunkovf39c8a52021-04-13 02:58:46 +01002494 * find it from a io_do_iopoll() thread before the issuer is done
Jens Axboedef596e2019-01-09 08:59:42 -07002495 * accessing the kiocb cookie.
2496 */
Pavel Begunkovcb3d8972021-06-14 02:36:14 +01002497static void io_iopoll_req_issued(struct io_kiocb *req)
Jens Axboedef596e2019-01-09 08:59:42 -07002498{
2499 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkovcb3d8972021-06-14 02:36:14 +01002500 const bool in_async = io_wq_current_is_worker();
2501
2502 /* workqueue context doesn't hold uring_lock, grab it now */
2503 if (unlikely(in_async))
2504 mutex_lock(&ctx->uring_lock);
Jens Axboedef596e2019-01-09 08:59:42 -07002505
2506 /*
2507 * Track whether we have multiple files in our lists. This will impact
2508 * how we do polling eventually, not spinning if we're on potentially
2509 * different devices.
2510 */
Pavel Begunkov540e32a2020-07-13 23:37:09 +03002511 if (list_empty(&ctx->iopoll_list)) {
Hao Xu915b3dd2021-06-28 05:37:30 +08002512 ctx->poll_multi_queue = false;
2513 } else if (!ctx->poll_multi_queue) {
Jens Axboedef596e2019-01-09 08:59:42 -07002514 struct io_kiocb *list_req;
Hao Xu915b3dd2021-06-28 05:37:30 +08002515 unsigned int queue_num0, queue_num1;
Jens Axboedef596e2019-01-09 08:59:42 -07002516
Pavel Begunkov540e32a2020-07-13 23:37:09 +03002517 list_req = list_first_entry(&ctx->iopoll_list, struct io_kiocb,
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002518 inflight_entry);
Hao Xu915b3dd2021-06-28 05:37:30 +08002519
2520 if (list_req->file != req->file) {
2521 ctx->poll_multi_queue = true;
2522 } else {
2523 queue_num0 = blk_qc_t_to_queue_num(list_req->rw.kiocb.ki_cookie);
2524 queue_num1 = blk_qc_t_to_queue_num(req->rw.kiocb.ki_cookie);
2525 if (queue_num0 != queue_num1)
2526 ctx->poll_multi_queue = true;
2527 }
Jens Axboedef596e2019-01-09 08:59:42 -07002528 }
2529
2530 /*
2531 * For fast devices, IO may have already completed. If it has, add
2532 * it to the front so we find it first.
2533 */
Xiaoguang Wang65a65432020-06-11 23:39:36 +08002534 if (READ_ONCE(req->iopoll_completed))
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002535 list_add(&req->inflight_entry, &ctx->iopoll_list);
Jens Axboedef596e2019-01-09 08:59:42 -07002536 else
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002537 list_add_tail(&req->inflight_entry, &ctx->iopoll_list);
Xiaoguang Wangbdcd3ea2020-02-25 22:12:08 +08002538
Pavel Begunkovcb3d8972021-06-14 02:36:14 +01002539 if (unlikely(in_async)) {
2540 /*
2541 * If IORING_SETUP_SQPOLL is enabled, sqes are either handle
2542 * in sq thread task context or in io worker task context. If
2543 * current task context is sq thread, we don't need to check
2544 * whether should wake up sq thread.
2545 */
2546 if ((ctx->flags & IORING_SETUP_SQPOLL) &&
2547 wq_has_sleeper(&ctx->sq_data->wait))
2548 wake_up(&ctx->sq_data->wait);
2549
2550 mutex_unlock(&ctx->uring_lock);
2551 }
Jens Axboedef596e2019-01-09 08:59:42 -07002552}
2553
Pavel Begunkov9f13c352020-05-17 14:13:41 +03002554static inline void io_state_file_put(struct io_submit_state *state)
2555{
Pavel Begunkov02b23a92021-01-19 13:32:41 +00002556 if (state->file_refs) {
2557 fput_many(state->file, state->file_refs);
2558 state->file_refs = 0;
2559 }
Jens Axboe9a56a232019-01-09 09:06:50 -07002560}
2561
2562/*
2563 * Get as many references to a file as we have IOs left in this submission,
2564 * assuming most submissions are for one file, or at least that each file
2565 * has more than one submission.
2566 */
Pavel Begunkov8da11c12020-02-24 11:32:44 +03002567static struct file *__io_file_get(struct io_submit_state *state, int fd)
Jens Axboe9a56a232019-01-09 09:06:50 -07002568{
2569 if (!state)
2570 return fget(fd);
2571
Pavel Begunkov6e1271e2020-11-20 15:50:50 +00002572 if (state->file_refs) {
Jens Axboe9a56a232019-01-09 09:06:50 -07002573 if (state->fd == fd) {
Pavel Begunkov6e1271e2020-11-20 15:50:50 +00002574 state->file_refs--;
Jens Axboe9a56a232019-01-09 09:06:50 -07002575 return state->file;
2576 }
Pavel Begunkov02b23a92021-01-19 13:32:41 +00002577 io_state_file_put(state);
Jens Axboe9a56a232019-01-09 09:06:50 -07002578 }
2579 state->file = fget_many(fd, state->ios_left);
Pavel Begunkov6e1271e2020-11-20 15:50:50 +00002580 if (unlikely(!state->file))
Jens Axboe9a56a232019-01-09 09:06:50 -07002581 return NULL;
2582
2583 state->fd = fd;
Pavel Begunkov6e1271e2020-11-20 15:50:50 +00002584 state->file_refs = state->ios_left - 1;
Jens Axboe9a56a232019-01-09 09:06:50 -07002585 return state->file;
2586}
2587
Jens Axboe4503b762020-06-01 10:00:27 -06002588static bool io_bdev_nowait(struct block_device *bdev)
2589{
Jeffle Xu9ba0d0c2020-10-19 16:59:42 +08002590 return !bdev || blk_queue_nowait(bdev_get_queue(bdev));
Jens Axboe4503b762020-06-01 10:00:27 -06002591}
2592
Jens Axboe2b188cc2019-01-07 10:46:33 -07002593/*
2594 * If we tracked the file through the SCM inflight mechanism, we could support
2595 * any file. For now, just ensure that anything potentially problematic is done
2596 * inline.
2597 */
Jens Axboe7b29f922021-03-12 08:30:14 -07002598static bool __io_file_supports_async(struct file *file, int rw)
Jens Axboe2b188cc2019-01-07 10:46:33 -07002599{
2600 umode_t mode = file_inode(file)->i_mode;
2601
Jens Axboe4503b762020-06-01 10:00:27 -06002602 if (S_ISBLK(mode)) {
Christoph Hellwig4e7b5672020-11-23 13:38:40 +01002603 if (IS_ENABLED(CONFIG_BLOCK) &&
2604 io_bdev_nowait(I_BDEV(file->f_mapping->host)))
Jens Axboe4503b762020-06-01 10:00:27 -06002605 return true;
2606 return false;
2607 }
Pavel Begunkov976517f2021-06-09 12:07:25 +01002608 if (S_ISSOCK(mode))
Jens Axboe2b188cc2019-01-07 10:46:33 -07002609 return true;
Jens Axboe4503b762020-06-01 10:00:27 -06002610 if (S_ISREG(mode)) {
Christoph Hellwig4e7b5672020-11-23 13:38:40 +01002611 if (IS_ENABLED(CONFIG_BLOCK) &&
2612 io_bdev_nowait(file->f_inode->i_sb->s_bdev) &&
Jens Axboe4503b762020-06-01 10:00:27 -06002613 file->f_op != &io_uring_fops)
2614 return true;
2615 return false;
2616 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07002617
Jens Axboec5b85622020-06-09 19:23:05 -06002618 /* any ->read/write should understand O_NONBLOCK */
2619 if (file->f_flags & O_NONBLOCK)
2620 return true;
2621
Jens Axboeaf197f52020-04-28 13:15:06 -06002622 if (!(file->f_mode & FMODE_NOWAIT))
2623 return false;
2624
2625 if (rw == READ)
2626 return file->f_op->read_iter != NULL;
2627
2628 return file->f_op->write_iter != NULL;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002629}
2630
Jens Axboe7b29f922021-03-12 08:30:14 -07002631static bool io_file_supports_async(struct io_kiocb *req, int rw)
2632{
2633 if (rw == READ && (req->flags & REQ_F_ASYNC_READ))
2634 return true;
2635 else if (rw == WRITE && (req->flags & REQ_F_ASYNC_WRITE))
2636 return true;
2637
2638 return __io_file_supports_async(req->file, rw);
2639}
2640
Pavel Begunkova88fc402020-09-30 22:57:53 +03002641static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboe2b188cc2019-01-07 10:46:33 -07002642{
Jens Axboedef596e2019-01-09 08:59:42 -07002643 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe9adbd452019-12-20 08:45:55 -07002644 struct kiocb *kiocb = &req->rw.kiocb;
Pavel Begunkov75c668c2021-02-04 13:52:05 +00002645 struct file *file = req->file;
Jens Axboe09bb8392019-03-13 12:39:28 -06002646 unsigned ioprio;
2647 int ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002648
Jens Axboe7b29f922021-03-12 08:30:14 -07002649 if (!(req->flags & REQ_F_ISREG) && S_ISREG(file_inode(file)->i_mode))
Jens Axboe491381ce2019-10-17 09:20:46 -06002650 req->flags |= REQ_F_ISREG;
2651
Jens Axboe2b188cc2019-01-07 10:46:33 -07002652 kiocb->ki_pos = READ_ONCE(sqe->off);
Pavel Begunkov75c668c2021-02-04 13:52:05 +00002653 if (kiocb->ki_pos == -1 && !(file->f_mode & FMODE_STREAM)) {
Jens Axboeba042912019-12-25 16:33:42 -07002654 req->flags |= REQ_F_CUR_POS;
Pavel Begunkov75c668c2021-02-04 13:52:05 +00002655 kiocb->ki_pos = file->f_pos;
Jens Axboeba042912019-12-25 16:33:42 -07002656 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07002657 kiocb->ki_hint = ki_hint_validate(file_write_hint(kiocb->ki_filp));
Pavel Begunkov3e577dc2020-02-01 03:58:42 +03002658 kiocb->ki_flags = iocb_flags(kiocb->ki_filp);
2659 ret = kiocb_set_rw_flags(kiocb, READ_ONCE(sqe->rw_flags));
2660 if (unlikely(ret))
2661 return ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002662
Pavel Begunkov75c668c2021-02-04 13:52:05 +00002663 /* don't allow async punt for O_NONBLOCK or RWF_NOWAIT */
2664 if ((kiocb->ki_flags & IOCB_NOWAIT) || (file->f_flags & O_NONBLOCK))
2665 req->flags |= REQ_F_NOWAIT;
2666
Jens Axboe2b188cc2019-01-07 10:46:33 -07002667 ioprio = READ_ONCE(sqe->ioprio);
2668 if (ioprio) {
2669 ret = ioprio_check_cap(ioprio);
2670 if (ret)
Jens Axboe09bb8392019-03-13 12:39:28 -06002671 return ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002672
2673 kiocb->ki_ioprio = ioprio;
2674 } else
2675 kiocb->ki_ioprio = get_current_ioprio();
2676
Jens Axboedef596e2019-01-09 08:59:42 -07002677 if (ctx->flags & IORING_SETUP_IOPOLL) {
Jens Axboedef596e2019-01-09 08:59:42 -07002678 if (!(kiocb->ki_flags & IOCB_DIRECT) ||
2679 !kiocb->ki_filp->f_op->iopoll)
Jens Axboe09bb8392019-03-13 12:39:28 -06002680 return -EOPNOTSUPP;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002681
Jens Axboedef596e2019-01-09 08:59:42 -07002682 kiocb->ki_flags |= IOCB_HIPRI;
2683 kiocb->ki_complete = io_complete_rw_iopoll;
Xiaoguang Wang65a65432020-06-11 23:39:36 +08002684 req->iopoll_completed = 0;
Jens Axboedef596e2019-01-09 08:59:42 -07002685 } else {
Jens Axboe09bb8392019-03-13 12:39:28 -06002686 if (kiocb->ki_flags & IOCB_HIPRI)
2687 return -EINVAL;
Jens Axboedef596e2019-01-09 08:59:42 -07002688 kiocb->ki_complete = io_complete_rw;
2689 }
Jens Axboe9adbd452019-12-20 08:45:55 -07002690
Pavel Begunkoveae071c2021-04-25 14:32:24 +01002691 if (req->opcode == IORING_OP_READ_FIXED ||
2692 req->opcode == IORING_OP_WRITE_FIXED) {
2693 req->imu = NULL;
2694 io_req_set_rsrc_node(req);
2695 }
2696
Jens Axboe3529d8c2019-12-19 18:24:38 -07002697 req->rw.addr = READ_ONCE(sqe->addr);
2698 req->rw.len = READ_ONCE(sqe->len);
Bijan Mottahedeh4f4eeba2020-05-19 14:52:49 -07002699 req->buf_index = READ_ONCE(sqe->buf_index);
Jens Axboe2b188cc2019-01-07 10:46:33 -07002700 return 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002701}
2702
2703static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
2704{
2705 switch (ret) {
2706 case -EIOCBQUEUED:
2707 break;
2708 case -ERESTARTSYS:
2709 case -ERESTARTNOINTR:
2710 case -ERESTARTNOHAND:
2711 case -ERESTART_RESTARTBLOCK:
2712 /*
2713 * We can't just restart the syscall, since previously
2714 * submitted sqes may already be in progress. Just fail this
2715 * IO with EINTR.
2716 */
2717 ret = -EINTR;
Gustavo A. R. Silvadf561f662020-08-23 17:36:59 -05002718 fallthrough;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002719 default:
2720 kiocb->ki_complete(kiocb, ret, 0);
2721 }
2722}
2723
Jens Axboea1d7c392020-06-22 11:09:46 -06002724static void kiocb_done(struct kiocb *kiocb, ssize_t ret,
Pavel Begunkov889fca72021-02-10 00:03:09 +00002725 unsigned int issue_flags)
Jens Axboeba816ad2019-09-28 11:36:45 -06002726{
Jens Axboeba042912019-12-25 16:33:42 -07002727 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
Jens Axboee8c2bc12020-08-15 18:44:09 -07002728 struct io_async_rw *io = req->async_data;
Pavel Begunkov97284632021-04-08 19:28:03 +01002729 bool check_reissue = kiocb->ki_complete == io_complete_rw;
Jens Axboeba042912019-12-25 16:33:42 -07002730
Jens Axboe227c0c92020-08-13 11:51:40 -06002731 /* add previously done IO, if any */
Jens Axboee8c2bc12020-08-15 18:44:09 -07002732 if (io && io->bytes_done > 0) {
Jens Axboe227c0c92020-08-13 11:51:40 -06002733 if (ret < 0)
Jens Axboee8c2bc12020-08-15 18:44:09 -07002734 ret = io->bytes_done;
Jens Axboe227c0c92020-08-13 11:51:40 -06002735 else
Jens Axboee8c2bc12020-08-15 18:44:09 -07002736 ret += io->bytes_done;
Jens Axboe227c0c92020-08-13 11:51:40 -06002737 }
2738
Jens Axboeba042912019-12-25 16:33:42 -07002739 if (req->flags & REQ_F_CUR_POS)
2740 req->file->f_pos = kiocb->ki_pos;
Hao Xue149bd742021-06-28 05:48:05 +08002741 if (ret >= 0 && check_reissue)
Pavel Begunkov889fca72021-02-10 00:03:09 +00002742 __io_complete_rw(req, ret, 0, issue_flags);
Jens Axboeba816ad2019-09-28 11:36:45 -06002743 else
2744 io_rw_done(kiocb, ret);
Pavel Begunkov97284632021-04-08 19:28:03 +01002745
Pavel Begunkovfe7e3252021-06-24 15:09:57 +01002746 if (check_reissue && (req->flags & REQ_F_REISSUE)) {
Pavel Begunkov97284632021-04-08 19:28:03 +01002747 req->flags &= ~REQ_F_REISSUE;
Jens Axboea7be7c22021-04-15 11:31:14 -06002748 if (io_resubmit_prep(req)) {
Pavel Begunkov8c130822021-03-22 01:58:32 +00002749 req_ref_get(req);
2750 io_queue_async_work(req);
2751 } else {
Pavel Begunkov97284632021-04-08 19:28:03 +01002752 int cflags = 0;
2753
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01002754 req_set_fail(req);
Pavel Begunkov97284632021-04-08 19:28:03 +01002755 if (req->flags & REQ_F_BUFFER_SELECTED)
2756 cflags = io_put_rw_kbuf(req);
2757 __io_req_complete(req, issue_flags, ret, cflags);
2758 }
2759 }
Jens Axboeba816ad2019-09-28 11:36:45 -06002760}
2761
Pavel Begunkoveae071c2021-04-25 14:32:24 +01002762static int __io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter,
2763 struct io_mapped_ubuf *imu)
Jens Axboeedafcce2019-01-09 09:16:05 -07002764{
Jens Axboe9adbd452019-12-20 08:45:55 -07002765 size_t len = req->rw.len;
Pavel Begunkov75769e32021-04-01 15:43:54 +01002766 u64 buf_end, buf_addr = req->rw.addr;
Jens Axboeedafcce2019-01-09 09:16:05 -07002767 size_t offset;
Jens Axboeedafcce2019-01-09 09:16:05 -07002768
Pavel Begunkov75769e32021-04-01 15:43:54 +01002769 if (unlikely(check_add_overflow(buf_addr, (u64)len, &buf_end)))
Jens Axboeedafcce2019-01-09 09:16:05 -07002770 return -EFAULT;
2771 /* not inside the mapped region */
Pavel Begunkov4751f532021-04-01 15:43:55 +01002772 if (unlikely(buf_addr < imu->ubuf || buf_end > imu->ubuf_end))
Jens Axboeedafcce2019-01-09 09:16:05 -07002773 return -EFAULT;
2774
2775 /*
2776 * May not be a start of buffer, set size appropriately
2777 * and advance us to the beginning.
2778 */
2779 offset = buf_addr - imu->ubuf;
2780 iov_iter_bvec(iter, rw, imu->bvec, imu->nr_bvecs, offset + len);
Jens Axboebd11b3a2019-07-20 08:37:31 -06002781
2782 if (offset) {
2783 /*
2784 * Don't use iov_iter_advance() here, as it's really slow for
2785 * using the latter parts of a big fixed buffer - it iterates
2786 * over each segment manually. We can cheat a bit here, because
2787 * we know that:
2788 *
2789 * 1) it's a BVEC iter, we set it up
2790 * 2) all bvecs are PAGE_SIZE in size, except potentially the
2791 * first and last bvec
2792 *
2793 * So just find our index, and adjust the iterator afterwards.
2794 * If the offset is within the first bvec (or the whole first
2795 * bvec, just use iov_iter_advance(). This makes it easier
2796 * since we can just skip the first segment, which may not
2797 * be PAGE_SIZE aligned.
2798 */
2799 const struct bio_vec *bvec = imu->bvec;
2800
2801 if (offset <= bvec->bv_len) {
2802 iov_iter_advance(iter, offset);
2803 } else {
2804 unsigned long seg_skip;
2805
2806 /* skip first vec */
2807 offset -= bvec->bv_len;
2808 seg_skip = 1 + (offset >> PAGE_SHIFT);
2809
2810 iter->bvec = bvec + seg_skip;
2811 iter->nr_segs -= seg_skip;
Aleix Roca Nonell99c79f62019-08-15 14:03:22 +02002812 iter->count -= bvec->bv_len + offset;
Jens Axboebd11b3a2019-07-20 08:37:31 -06002813 iter->iov_offset = offset & ~PAGE_MASK;
Jens Axboebd11b3a2019-07-20 08:37:31 -06002814 }
2815 }
2816
Pavel Begunkov847595d2021-02-04 13:52:06 +00002817 return 0;
Jens Axboeedafcce2019-01-09 09:16:05 -07002818}
2819
Pavel Begunkoveae071c2021-04-25 14:32:24 +01002820static int io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter)
2821{
2822 struct io_ring_ctx *ctx = req->ctx;
2823 struct io_mapped_ubuf *imu = req->imu;
2824 u16 index, buf_index = req->buf_index;
2825
2826 if (likely(!imu)) {
2827 if (unlikely(buf_index >= ctx->nr_user_bufs))
2828 return -EFAULT;
2829 index = array_index_nospec(buf_index, ctx->nr_user_bufs);
2830 imu = READ_ONCE(ctx->user_bufs[index]);
2831 req->imu = imu;
2832 }
2833 return __io_import_fixed(req, rw, iter, imu);
2834}
2835
Jens Axboebcda7ba2020-02-23 16:42:51 -07002836static void io_ring_submit_unlock(struct io_ring_ctx *ctx, bool needs_lock)
2837{
2838 if (needs_lock)
2839 mutex_unlock(&ctx->uring_lock);
2840}
2841
2842static void io_ring_submit_lock(struct io_ring_ctx *ctx, bool needs_lock)
2843{
2844 /*
2845 * "Normal" inline submissions always hold the uring_lock, since we
2846 * grab it from the system call. Same is true for the SQPOLL offload.
2847 * The only exception is when we've detached the request and issue it
2848 * from an async worker thread, grab the lock for that case.
2849 */
2850 if (needs_lock)
2851 mutex_lock(&ctx->uring_lock);
2852}
2853
2854static struct io_buffer *io_buffer_select(struct io_kiocb *req, size_t *len,
2855 int bgid, struct io_buffer *kbuf,
2856 bool needs_lock)
2857{
2858 struct io_buffer *head;
2859
2860 if (req->flags & REQ_F_BUFFER_SELECTED)
2861 return kbuf;
2862
2863 io_ring_submit_lock(req->ctx, needs_lock);
2864
2865 lockdep_assert_held(&req->ctx->uring_lock);
2866
Jens Axboe9e15c3a2021-03-13 12:29:43 -07002867 head = xa_load(&req->ctx->io_buffers, bgid);
Jens Axboebcda7ba2020-02-23 16:42:51 -07002868 if (head) {
2869 if (!list_empty(&head->list)) {
2870 kbuf = list_last_entry(&head->list, struct io_buffer,
2871 list);
2872 list_del(&kbuf->list);
2873 } else {
2874 kbuf = head;
Jens Axboe9e15c3a2021-03-13 12:29:43 -07002875 xa_erase(&req->ctx->io_buffers, bgid);
Jens Axboebcda7ba2020-02-23 16:42:51 -07002876 }
2877 if (*len > kbuf->len)
2878 *len = kbuf->len;
2879 } else {
2880 kbuf = ERR_PTR(-ENOBUFS);
2881 }
2882
2883 io_ring_submit_unlock(req->ctx, needs_lock);
2884
2885 return kbuf;
2886}
2887
Jens Axboe4d954c22020-02-27 07:31:19 -07002888static void __user *io_rw_buffer_select(struct io_kiocb *req, size_t *len,
2889 bool needs_lock)
2890{
2891 struct io_buffer *kbuf;
Bijan Mottahedeh4f4eeba2020-05-19 14:52:49 -07002892 u16 bgid;
Jens Axboe4d954c22020-02-27 07:31:19 -07002893
2894 kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
Bijan Mottahedeh4f4eeba2020-05-19 14:52:49 -07002895 bgid = req->buf_index;
Jens Axboe4d954c22020-02-27 07:31:19 -07002896 kbuf = io_buffer_select(req, len, bgid, kbuf, needs_lock);
2897 if (IS_ERR(kbuf))
2898 return kbuf;
2899 req->rw.addr = (u64) (unsigned long) kbuf;
2900 req->flags |= REQ_F_BUFFER_SELECTED;
2901 return u64_to_user_ptr(kbuf->addr);
2902}
2903
2904#ifdef CONFIG_COMPAT
2905static ssize_t io_compat_import(struct io_kiocb *req, struct iovec *iov,
2906 bool needs_lock)
2907{
2908 struct compat_iovec __user *uiov;
2909 compat_ssize_t clen;
2910 void __user *buf;
2911 ssize_t len;
2912
2913 uiov = u64_to_user_ptr(req->rw.addr);
2914 if (!access_ok(uiov, sizeof(*uiov)))
2915 return -EFAULT;
2916 if (__get_user(clen, &uiov->iov_len))
2917 return -EFAULT;
2918 if (clen < 0)
2919 return -EINVAL;
2920
2921 len = clen;
2922 buf = io_rw_buffer_select(req, &len, needs_lock);
2923 if (IS_ERR(buf))
2924 return PTR_ERR(buf);
2925 iov[0].iov_base = buf;
2926 iov[0].iov_len = (compat_size_t) len;
2927 return 0;
2928}
2929#endif
2930
2931static ssize_t __io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
2932 bool needs_lock)
2933{
2934 struct iovec __user *uiov = u64_to_user_ptr(req->rw.addr);
2935 void __user *buf;
2936 ssize_t len;
2937
2938 if (copy_from_user(iov, uiov, sizeof(*uiov)))
2939 return -EFAULT;
2940
2941 len = iov[0].iov_len;
2942 if (len < 0)
2943 return -EINVAL;
2944 buf = io_rw_buffer_select(req, &len, needs_lock);
2945 if (IS_ERR(buf))
2946 return PTR_ERR(buf);
2947 iov[0].iov_base = buf;
2948 iov[0].iov_len = len;
2949 return 0;
2950}
2951
2952static ssize_t io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
2953 bool needs_lock)
2954{
Jens Axboedddb3e22020-06-04 11:27:01 -06002955 if (req->flags & REQ_F_BUFFER_SELECTED) {
2956 struct io_buffer *kbuf;
2957
2958 kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
2959 iov[0].iov_base = u64_to_user_ptr(kbuf->addr);
2960 iov[0].iov_len = kbuf->len;
Jens Axboe4d954c22020-02-27 07:31:19 -07002961 return 0;
Jens Axboedddb3e22020-06-04 11:27:01 -06002962 }
Pavel Begunkovdd201662020-12-19 03:15:43 +00002963 if (req->rw.len != 1)
Jens Axboe4d954c22020-02-27 07:31:19 -07002964 return -EINVAL;
2965
2966#ifdef CONFIG_COMPAT
2967 if (req->ctx->compat)
2968 return io_compat_import(req, iov, needs_lock);
2969#endif
2970
2971 return __io_iov_buffer_select(req, iov, needs_lock);
2972}
2973
Pavel Begunkov847595d2021-02-04 13:52:06 +00002974static int io_import_iovec(int rw, struct io_kiocb *req, struct iovec **iovec,
2975 struct iov_iter *iter, bool needs_lock)
Jens Axboe2b188cc2019-01-07 10:46:33 -07002976{
Jens Axboe9adbd452019-12-20 08:45:55 -07002977 void __user *buf = u64_to_user_ptr(req->rw.addr);
2978 size_t sqe_len = req->rw.len;
Pavel Begunkov847595d2021-02-04 13:52:06 +00002979 u8 opcode = req->opcode;
Jens Axboe4d954c22020-02-27 07:31:19 -07002980 ssize_t ret;
Jens Axboeedafcce2019-01-09 09:16:05 -07002981
Pavel Begunkov7d009162019-11-25 23:14:40 +03002982 if (opcode == IORING_OP_READ_FIXED || opcode == IORING_OP_WRITE_FIXED) {
Jens Axboeedafcce2019-01-09 09:16:05 -07002983 *iovec = NULL;
Jens Axboe9adbd452019-12-20 08:45:55 -07002984 return io_import_fixed(req, rw, iter);
Jens Axboeedafcce2019-01-09 09:16:05 -07002985 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07002986
Jens Axboebcda7ba2020-02-23 16:42:51 -07002987 /* buffer index only valid with fixed read/write, or buffer select */
Bijan Mottahedeh4f4eeba2020-05-19 14:52:49 -07002988 if (req->buf_index && !(req->flags & REQ_F_BUFFER_SELECT))
Jens Axboe9adbd452019-12-20 08:45:55 -07002989 return -EINVAL;
2990
Jens Axboe3a6820f2019-12-22 15:19:35 -07002991 if (opcode == IORING_OP_READ || opcode == IORING_OP_WRITE) {
Jens Axboebcda7ba2020-02-23 16:42:51 -07002992 if (req->flags & REQ_F_BUFFER_SELECT) {
Jens Axboe4d954c22020-02-27 07:31:19 -07002993 buf = io_rw_buffer_select(req, &sqe_len, needs_lock);
Pavel Begunkov867a23e2020-08-20 11:34:39 +03002994 if (IS_ERR(buf))
Jens Axboe4d954c22020-02-27 07:31:19 -07002995 return PTR_ERR(buf);
Jens Axboe3f9d6442020-03-11 12:27:04 -06002996 req->rw.len = sqe_len;
Jens Axboebcda7ba2020-02-23 16:42:51 -07002997 }
2998
Jens Axboe3a6820f2019-12-22 15:19:35 -07002999 ret = import_single_range(rw, buf, sqe_len, *iovec, iter);
3000 *iovec = NULL;
David Laight10fc72e2020-11-07 13:16:25 +00003001 return ret;
Jens Axboe3a6820f2019-12-22 15:19:35 -07003002 }
3003
Jens Axboe4d954c22020-02-27 07:31:19 -07003004 if (req->flags & REQ_F_BUFFER_SELECT) {
3005 ret = io_iov_buffer_select(req, *iovec, needs_lock);
Pavel Begunkov847595d2021-02-04 13:52:06 +00003006 if (!ret)
3007 iov_iter_init(iter, rw, *iovec, 1, (*iovec)->iov_len);
Jens Axboe4d954c22020-02-27 07:31:19 -07003008 *iovec = NULL;
3009 return ret;
3010 }
3011
Christoph Hellwig89cd35c2020-09-25 06:51:41 +02003012 return __import_iovec(rw, buf, sqe_len, UIO_FASTIOV, iovec, iter,
3013 req->ctx->compat);
Jens Axboe2b188cc2019-01-07 10:46:33 -07003014}
3015
Jens Axboe0fef9482020-08-26 10:36:20 -06003016static inline loff_t *io_kiocb_ppos(struct kiocb *kiocb)
3017{
Pavel Begunkov5b09e372020-09-30 22:57:15 +03003018 return (kiocb->ki_filp->f_mode & FMODE_STREAM) ? NULL : &kiocb->ki_pos;
Jens Axboe0fef9482020-08-26 10:36:20 -06003019}
3020
Jens Axboe32960612019-09-23 11:05:34 -06003021/*
3022 * For files that don't have ->read_iter() and ->write_iter(), handle them
3023 * by looping over ->read() or ->write() manually.
3024 */
Jens Axboe4017eb92020-10-22 14:14:12 -06003025static ssize_t loop_rw_iter(int rw, struct io_kiocb *req, struct iov_iter *iter)
Jens Axboe32960612019-09-23 11:05:34 -06003026{
Jens Axboe4017eb92020-10-22 14:14:12 -06003027 struct kiocb *kiocb = &req->rw.kiocb;
3028 struct file *file = req->file;
Jens Axboe32960612019-09-23 11:05:34 -06003029 ssize_t ret = 0;
3030
3031 /*
3032 * Don't support polled IO through this interface, and we can't
3033 * support non-blocking either. For the latter, this just causes
3034 * the kiocb to be handled from an async context.
3035 */
3036 if (kiocb->ki_flags & IOCB_HIPRI)
3037 return -EOPNOTSUPP;
3038 if (kiocb->ki_flags & IOCB_NOWAIT)
3039 return -EAGAIN;
3040
3041 while (iov_iter_count(iter)) {
Pavel Begunkov311ae9e2019-11-24 11:58:24 +03003042 struct iovec iovec;
Jens Axboe32960612019-09-23 11:05:34 -06003043 ssize_t nr;
3044
Pavel Begunkov311ae9e2019-11-24 11:58:24 +03003045 if (!iov_iter_is_bvec(iter)) {
3046 iovec = iov_iter_iovec(iter);
3047 } else {
Jens Axboe4017eb92020-10-22 14:14:12 -06003048 iovec.iov_base = u64_to_user_ptr(req->rw.addr);
3049 iovec.iov_len = req->rw.len;
Pavel Begunkov311ae9e2019-11-24 11:58:24 +03003050 }
3051
Jens Axboe32960612019-09-23 11:05:34 -06003052 if (rw == READ) {
3053 nr = file->f_op->read(file, iovec.iov_base,
Jens Axboe0fef9482020-08-26 10:36:20 -06003054 iovec.iov_len, io_kiocb_ppos(kiocb));
Jens Axboe32960612019-09-23 11:05:34 -06003055 } else {
3056 nr = file->f_op->write(file, iovec.iov_base,
Jens Axboe0fef9482020-08-26 10:36:20 -06003057 iovec.iov_len, io_kiocb_ppos(kiocb));
Jens Axboe32960612019-09-23 11:05:34 -06003058 }
3059
3060 if (nr < 0) {
3061 if (!ret)
3062 ret = nr;
3063 break;
3064 }
3065 ret += nr;
3066 if (nr != iovec.iov_len)
3067 break;
Jens Axboe4017eb92020-10-22 14:14:12 -06003068 req->rw.len -= nr;
3069 req->rw.addr += nr;
Jens Axboe32960612019-09-23 11:05:34 -06003070 iov_iter_advance(iter, nr);
3071 }
3072
3073 return ret;
3074}
3075
Jens Axboeff6165b2020-08-13 09:47:43 -06003076static void io_req_map_rw(struct io_kiocb *req, const struct iovec *iovec,
3077 const struct iovec *fast_iov, struct iov_iter *iter)
Jens Axboef67676d2019-12-02 11:03:47 -07003078{
Jens Axboee8c2bc12020-08-15 18:44:09 -07003079 struct io_async_rw *rw = req->async_data;
Pavel Begunkovb64e3442020-07-13 22:59:18 +03003080
Jens Axboeff6165b2020-08-13 09:47:43 -06003081 memcpy(&rw->iter, iter, sizeof(*iter));
Pavel Begunkovafb87652020-09-06 00:45:46 +03003082 rw->free_iovec = iovec;
Jens Axboe227c0c92020-08-13 11:51:40 -06003083 rw->bytes_done = 0;
Jens Axboeff6165b2020-08-13 09:47:43 -06003084 /* can only be fixed buffers, no need to do anything */
Pavel Begunkov9c3a2052020-11-23 23:20:27 +00003085 if (iov_iter_is_bvec(iter))
Jens Axboeff6165b2020-08-13 09:47:43 -06003086 return;
Pavel Begunkovb64e3442020-07-13 22:59:18 +03003087 if (!iovec) {
Jens Axboeff6165b2020-08-13 09:47:43 -06003088 unsigned iov_off = 0;
3089
3090 rw->iter.iov = rw->fast_iov;
3091 if (iter->iov != fast_iov) {
3092 iov_off = iter->iov - fast_iov;
3093 rw->iter.iov += iov_off;
3094 }
3095 if (rw->fast_iov != fast_iov)
3096 memcpy(rw->fast_iov + iov_off, fast_iov + iov_off,
Xiaoguang Wang45097da2020-04-08 22:29:58 +08003097 sizeof(struct iovec) * iter->nr_segs);
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03003098 } else {
3099 req->flags |= REQ_F_NEED_CLEANUP;
Jens Axboef67676d2019-12-02 11:03:47 -07003100 }
3101}
3102
Pavel Begunkov6cb78682021-02-28 22:35:17 +00003103static inline int io_alloc_async_data(struct io_kiocb *req)
Xiaoguang Wang3d9932a2020-03-27 15:36:52 +08003104{
Jens Axboee8c2bc12020-08-15 18:44:09 -07003105 WARN_ON_ONCE(!io_op_defs[req->opcode].async_size);
3106 req->async_data = kmalloc(io_op_defs[req->opcode].async_size, GFP_KERNEL);
3107 return req->async_data == NULL;
Xiaoguang Wang3d9932a2020-03-27 15:36:52 +08003108}
3109
Jens Axboeff6165b2020-08-13 09:47:43 -06003110static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec,
3111 const struct iovec *fast_iov,
Jens Axboe227c0c92020-08-13 11:51:40 -06003112 struct iov_iter *iter, bool force)
Jens Axboeb7bb4f72019-12-15 22:13:43 -07003113{
Pavel Begunkov26f05052021-02-28 22:35:18 +00003114 if (!force && !io_op_defs[req->opcode].needs_async_setup)
Jens Axboe74566df2020-01-13 19:23:24 -07003115 return 0;
Jens Axboee8c2bc12020-08-15 18:44:09 -07003116 if (!req->async_data) {
Pavel Begunkov6cb78682021-02-28 22:35:17 +00003117 if (io_alloc_async_data(req)) {
Pavel Begunkov6bf985d2021-02-04 13:52:01 +00003118 kfree(iovec);
Jens Axboe5d204bc2020-01-31 12:06:52 -07003119 return -ENOMEM;
Pavel Begunkov6bf985d2021-02-04 13:52:01 +00003120 }
Jens Axboeb7bb4f72019-12-15 22:13:43 -07003121
Jens Axboeff6165b2020-08-13 09:47:43 -06003122 io_req_map_rw(req, iovec, fast_iov, iter);
Jens Axboe5d204bc2020-01-31 12:06:52 -07003123 }
Jens Axboeb7bb4f72019-12-15 22:13:43 -07003124 return 0;
Jens Axboef67676d2019-12-02 11:03:47 -07003125}
3126
Pavel Begunkov73debe62020-09-30 22:57:54 +03003127static inline int io_rw_prep_async(struct io_kiocb *req, int rw)
Pavel Begunkovc3e330a2020-07-13 22:59:19 +03003128{
Jens Axboee8c2bc12020-08-15 18:44:09 -07003129 struct io_async_rw *iorw = req->async_data;
Pavel Begunkovf4bff102020-09-06 00:45:45 +03003130 struct iovec *iov = iorw->fast_iov;
Pavel Begunkov847595d2021-02-04 13:52:06 +00003131 int ret;
Pavel Begunkovc3e330a2020-07-13 22:59:19 +03003132
Pavel Begunkov2846c482020-11-07 13:16:27 +00003133 ret = io_import_iovec(rw, req, &iov, &iorw->iter, false);
Pavel Begunkovc3e330a2020-07-13 22:59:19 +03003134 if (unlikely(ret < 0))
3135 return ret;
3136
Pavel Begunkovab0b1962020-09-06 00:45:47 +03003137 iorw->bytes_done = 0;
3138 iorw->free_iovec = iov;
3139 if (iov)
3140 req->flags |= REQ_F_NEED_CLEANUP;
Pavel Begunkovc3e330a2020-07-13 22:59:19 +03003141 return 0;
3142}
3143
Pavel Begunkov73debe62020-09-30 22:57:54 +03003144static int io_read_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboef67676d2019-12-02 11:03:47 -07003145{
Jens Axboe3529d8c2019-12-19 18:24:38 -07003146 if (unlikely(!(req->file->f_mode & FMODE_READ)))
3147 return -EBADF;
Pavel Begunkov93642ef2021-02-18 18:29:44 +00003148 return io_prep_rw(req, sqe);
Jens Axboef67676d2019-12-02 11:03:47 -07003149}
3150
Jens Axboec1dd91d2020-08-03 16:43:59 -06003151/*
3152 * This is our waitqueue callback handler, registered through lock_page_async()
3153 * when we initially tried to do the IO with the iocb armed our waitqueue.
3154 * This gets called when the page is unlocked, and we generally expect that to
3155 * happen when the page IO is completed and the page is now uptodate. This will
3156 * queue a task_work based retry of the operation, attempting to copy the data
3157 * again. If the latter fails because the page was NOT uptodate, then we will
3158 * do a thread based blocking retry of the operation. That's the unexpected
3159 * slow path.
3160 */
Jens Axboebcf5a062020-05-22 09:24:42 -06003161static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode,
3162 int sync, void *arg)
3163{
3164 struct wait_page_queue *wpq;
3165 struct io_kiocb *req = wait->private;
Jens Axboebcf5a062020-05-22 09:24:42 -06003166 struct wait_page_key *key = arg;
Jens Axboebcf5a062020-05-22 09:24:42 -06003167
3168 wpq = container_of(wait, struct wait_page_queue, wait);
3169
Linus Torvaldscdc8fcb2020-08-03 13:01:22 -07003170 if (!wake_page_match(wpq, key))
3171 return 0;
3172
Hao Xuc8d317a2020-09-29 20:00:45 +08003173 req->rw.kiocb.ki_flags &= ~IOCB_WAITQ;
Jens Axboebcf5a062020-05-22 09:24:42 -06003174 list_del_init(&wait->entry);
3175
Jens Axboebcf5a062020-05-22 09:24:42 -06003176 /* submit ref gets dropped, acquire a new one */
Jens Axboede9b4cc2021-02-24 13:28:27 -07003177 req_ref_get(req);
Pavel Begunkov921b9052021-02-12 03:23:53 +00003178 io_req_task_queue(req);
Jens Axboebcf5a062020-05-22 09:24:42 -06003179 return 1;
3180}
3181
Jens Axboec1dd91d2020-08-03 16:43:59 -06003182/*
3183 * This controls whether a given IO request should be armed for async page
3184 * based retry. If we return false here, the request is handed to the async
3185 * worker threads for retry. If we're doing buffered reads on a regular file,
3186 * we prepare a private wait_page_queue entry and retry the operation. This
3187 * will either succeed because the page is now uptodate and unlocked, or it
3188 * will register a callback when the page is unlocked at IO completion. Through
3189 * that callback, io_uring uses task_work to setup a retry of the operation.
3190 * That retry will attempt the buffered read again. The retry will generally
3191 * succeed, or in rare cases where it fails, we then fall back to using the
3192 * async worker threads for a blocking retry.
3193 */
Jens Axboe227c0c92020-08-13 11:51:40 -06003194static bool io_rw_should_retry(struct io_kiocb *req)
Jens Axboebcf5a062020-05-22 09:24:42 -06003195{
Jens Axboee8c2bc12020-08-15 18:44:09 -07003196 struct io_async_rw *rw = req->async_data;
3197 struct wait_page_queue *wait = &rw->wpq;
Jens Axboebcf5a062020-05-22 09:24:42 -06003198 struct kiocb *kiocb = &req->rw.kiocb;
Jens Axboebcf5a062020-05-22 09:24:42 -06003199
3200 /* never retry for NOWAIT, we just complete with -EAGAIN */
3201 if (req->flags & REQ_F_NOWAIT)
3202 return false;
3203
Jens Axboe227c0c92020-08-13 11:51:40 -06003204 /* Only for buffered IO */
Jens Axboe3b2a4432020-08-16 10:58:43 -07003205 if (kiocb->ki_flags & (IOCB_DIRECT | IOCB_HIPRI))
Jens Axboebcf5a062020-05-22 09:24:42 -06003206 return false;
Jens Axboe3b2a4432020-08-16 10:58:43 -07003207
Jens Axboebcf5a062020-05-22 09:24:42 -06003208 /*
3209 * just use poll if we can, and don't attempt if the fs doesn't
3210 * support callback based unlocks
3211 */
3212 if (file_can_poll(req->file) || !(req->file->f_mode & FMODE_BUF_RASYNC))
3213 return false;
3214
Jens Axboe3b2a4432020-08-16 10:58:43 -07003215 wait->wait.func = io_async_buf_func;
3216 wait->wait.private = req;
3217 wait->wait.flags = 0;
3218 INIT_LIST_HEAD(&wait->wait.entry);
3219 kiocb->ki_flags |= IOCB_WAITQ;
Hao Xuc8d317a2020-09-29 20:00:45 +08003220 kiocb->ki_flags &= ~IOCB_NOWAIT;
Jens Axboe3b2a4432020-08-16 10:58:43 -07003221 kiocb->ki_waitq = wait;
Jens Axboe3b2a4432020-08-16 10:58:43 -07003222 return true;
Jens Axboebcf5a062020-05-22 09:24:42 -06003223}
3224
Pavel Begunkovaeab9502021-06-14 02:36:24 +01003225static inline int io_iter_do_read(struct io_kiocb *req, struct iov_iter *iter)
Jens Axboebcf5a062020-05-22 09:24:42 -06003226{
3227 if (req->file->f_op->read_iter)
3228 return call_read_iter(req->file, &req->rw.kiocb, iter);
Guoyu Huang2dd21112020-08-05 03:53:50 -07003229 else if (req->file->f_op->read)
Jens Axboe4017eb92020-10-22 14:14:12 -06003230 return loop_rw_iter(READ, req, iter);
Guoyu Huang2dd21112020-08-05 03:53:50 -07003231 else
3232 return -EINVAL;
Jens Axboebcf5a062020-05-22 09:24:42 -06003233}
3234
Pavel Begunkov889fca72021-02-10 00:03:09 +00003235static int io_read(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe2b188cc2019-01-07 10:46:33 -07003236{
3237 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
Jens Axboe9adbd452019-12-20 08:45:55 -07003238 struct kiocb *kiocb = &req->rw.kiocb;
Jens Axboeff6165b2020-08-13 09:47:43 -06003239 struct iov_iter __iter, *iter = &__iter;
Jens Axboee8c2bc12020-08-15 18:44:09 -07003240 struct io_async_rw *rw = req->async_data;
Jens Axboe227c0c92020-08-13 11:51:40 -06003241 ssize_t io_size, ret, ret2;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003242 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003243
Pavel Begunkov2846c482020-11-07 13:16:27 +00003244 if (rw) {
Jens Axboee8c2bc12020-08-15 18:44:09 -07003245 iter = &rw->iter;
Pavel Begunkov2846c482020-11-07 13:16:27 +00003246 iovec = NULL;
3247 } else {
3248 ret = io_import_iovec(READ, req, &iovec, iter, !force_nonblock);
3249 if (ret < 0)
3250 return ret;
3251 }
Pavel Begunkov632546c2020-11-07 13:16:26 +00003252 io_size = iov_iter_count(iter);
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003253 req->result = io_size;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003254
Jens Axboefd6c2e42019-12-18 12:19:41 -07003255 /* Ensure we clear previously set non-block flag */
3256 if (!force_nonblock)
Jens Axboe29de5f62020-02-20 09:56:08 -07003257 kiocb->ki_flags &= ~IOCB_NOWAIT;
Pavel Begunkova88fc402020-09-30 22:57:53 +03003258 else
3259 kiocb->ki_flags |= IOCB_NOWAIT;
3260
Pavel Begunkov24c74672020-06-21 13:09:51 +03003261 /* If the file doesn't support async, just async punt */
Jens Axboe7b29f922021-03-12 08:30:14 -07003262 if (force_nonblock && !io_file_supports_async(req, READ)) {
Pavel Begunkov6713e7a2021-02-04 13:51:59 +00003263 ret = io_setup_async_rw(req, iovec, inline_vecs, iter, true);
Pavel Begunkov6bf985d2021-02-04 13:52:01 +00003264 return ret ?: -EAGAIN;
Pavel Begunkov6713e7a2021-02-04 13:51:59 +00003265 }
Jens Axboe9e645e112019-05-10 16:07:28 -06003266
Pavel Begunkov632546c2020-11-07 13:16:26 +00003267 ret = rw_verify_area(READ, req->file, io_kiocb_ppos(kiocb), io_size);
Pavel Begunkov5ea5dd42021-02-04 13:52:03 +00003268 if (unlikely(ret)) {
3269 kfree(iovec);
3270 return ret;
3271 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07003272
Jens Axboe227c0c92020-08-13 11:51:40 -06003273 ret = io_iter_do_read(req, iter);
Jens Axboe32960612019-09-23 11:05:34 -06003274
Jens Axboe230d50d2021-04-01 20:41:15 -06003275 if (ret == -EAGAIN || (req->flags & REQ_F_REISSUE)) {
Pavel Begunkov6ad7f232021-04-08 01:54:39 +01003276 req->flags &= ~REQ_F_REISSUE;
Jens Axboeeefdf302020-08-27 16:40:19 -06003277 /* IOPOLL retry should happen for io-wq threads */
3278 if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboef91daf52020-08-15 15:58:42 -07003279 goto done;
Pavel Begunkov75c668c2021-02-04 13:52:05 +00003280 /* no retry on NONBLOCK nor RWF_NOWAIT */
3281 if (req->flags & REQ_F_NOWAIT)
Jens Axboe355afae2020-09-02 09:30:31 -06003282 goto done;
Jens Axboe84216312020-08-24 11:45:26 -06003283 /* some cases will consume bytes even on error returns */
Pavel Begunkov632546c2020-11-07 13:16:26 +00003284 iov_iter_revert(iter, io_size - iov_iter_count(iter));
Jens Axboef38c7e32020-09-25 15:23:43 -06003285 ret = 0;
Jens Axboe230d50d2021-04-01 20:41:15 -06003286 } else if (ret == -EIOCBQUEUED) {
3287 goto out_free;
Pavel Begunkov7335e3b2021-02-04 13:52:02 +00003288 } else if (ret <= 0 || ret == io_size || !force_nonblock ||
Pavel Begunkov75c668c2021-02-04 13:52:05 +00003289 (req->flags & REQ_F_NOWAIT) || !(req->flags & REQ_F_ISREG)) {
Pavel Begunkov7335e3b2021-02-04 13:52:02 +00003290 /* read all, failed, already did sync or don't want to retry */
Jens Axboe00d23d52020-08-25 12:59:22 -06003291 goto done;
Jens Axboe227c0c92020-08-13 11:51:40 -06003292 }
3293
Jens Axboe227c0c92020-08-13 11:51:40 -06003294 ret2 = io_setup_async_rw(req, iovec, inline_vecs, iter, true);
Pavel Begunkov6bf985d2021-02-04 13:52:01 +00003295 if (ret2)
3296 return ret2;
3297
Pavel Begunkovfe1cdd52021-02-17 21:02:36 +00003298 iovec = NULL;
Jens Axboee8c2bc12020-08-15 18:44:09 -07003299 rw = req->async_data;
Jens Axboe227c0c92020-08-13 11:51:40 -06003300 /* now use our persistent iterator, if we aren't already */
Jens Axboee8c2bc12020-08-15 18:44:09 -07003301 iter = &rw->iter;
Jens Axboe227c0c92020-08-13 11:51:40 -06003302
Pavel Begunkovb23df912021-02-04 13:52:04 +00003303 do {
3304 io_size -= ret;
3305 rw->bytes_done += ret;
3306 /* if we can retry, do so with the callbacks armed */
3307 if (!io_rw_should_retry(req)) {
3308 kiocb->ki_flags &= ~IOCB_WAITQ;
3309 return -EAGAIN;
3310 }
3311
3312 /*
3313 * Now retry read with the IOCB_WAITQ parts set in the iocb. If
3314 * we get -EIOCBQUEUED, then we'll get a notification when the
3315 * desired page gets unlocked. We can also get a partial read
3316 * here, and if we do, then just retry at the new offset.
3317 */
3318 ret = io_iter_do_read(req, iter);
3319 if (ret == -EIOCBQUEUED)
3320 return 0;
Jens Axboe227c0c92020-08-13 11:51:40 -06003321 /* we got some bytes, but not all. retry. */
Jens Axboeb5b0ecb2021-03-04 21:02:58 -07003322 kiocb->ki_flags &= ~IOCB_WAITQ;
Pavel Begunkovb23df912021-02-04 13:52:04 +00003323 } while (ret > 0 && ret < io_size);
Jens Axboe227c0c92020-08-13 11:51:40 -06003324done:
Pavel Begunkov889fca72021-02-10 00:03:09 +00003325 kiocb_done(kiocb, ret, issue_flags);
Pavel Begunkovfe1cdd52021-02-17 21:02:36 +00003326out_free:
3327 /* it's faster to check here then delegate to kfree */
3328 if (iovec)
3329 kfree(iovec);
Pavel Begunkov5ea5dd42021-02-04 13:52:03 +00003330 return 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003331}
3332
Pavel Begunkov73debe62020-09-30 22:57:54 +03003333static int io_write_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboef67676d2019-12-02 11:03:47 -07003334{
Jens Axboe3529d8c2019-12-19 18:24:38 -07003335 if (unlikely(!(req->file->f_mode & FMODE_WRITE)))
3336 return -EBADF;
Pavel Begunkov93642ef2021-02-18 18:29:44 +00003337 return io_prep_rw(req, sqe);
Jens Axboef67676d2019-12-02 11:03:47 -07003338}
3339
Pavel Begunkov889fca72021-02-10 00:03:09 +00003340static int io_write(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe2b188cc2019-01-07 10:46:33 -07003341{
3342 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
Jens Axboe9adbd452019-12-20 08:45:55 -07003343 struct kiocb *kiocb = &req->rw.kiocb;
Jens Axboeff6165b2020-08-13 09:47:43 -06003344 struct iov_iter __iter, *iter = &__iter;
Jens Axboee8c2bc12020-08-15 18:44:09 -07003345 struct io_async_rw *rw = req->async_data;
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003346 ssize_t ret, ret2, io_size;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003347 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003348
Pavel Begunkov2846c482020-11-07 13:16:27 +00003349 if (rw) {
Jens Axboee8c2bc12020-08-15 18:44:09 -07003350 iter = &rw->iter;
Pavel Begunkov2846c482020-11-07 13:16:27 +00003351 iovec = NULL;
3352 } else {
3353 ret = io_import_iovec(WRITE, req, &iovec, iter, !force_nonblock);
3354 if (ret < 0)
3355 return ret;
3356 }
Pavel Begunkov632546c2020-11-07 13:16:26 +00003357 io_size = iov_iter_count(iter);
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003358 req->result = io_size;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003359
Jens Axboefd6c2e42019-12-18 12:19:41 -07003360 /* Ensure we clear previously set non-block flag */
3361 if (!force_nonblock)
Pavel Begunkova88fc402020-09-30 22:57:53 +03003362 kiocb->ki_flags &= ~IOCB_NOWAIT;
3363 else
3364 kiocb->ki_flags |= IOCB_NOWAIT;
Jens Axboefd6c2e42019-12-18 12:19:41 -07003365
Pavel Begunkov24c74672020-06-21 13:09:51 +03003366 /* If the file doesn't support async, just async punt */
Jens Axboe7b29f922021-03-12 08:30:14 -07003367 if (force_nonblock && !io_file_supports_async(req, WRITE))
Jens Axboef67676d2019-12-02 11:03:47 -07003368 goto copy_iov;
Jens Axboef67676d2019-12-02 11:03:47 -07003369
Jens Axboe10d59342019-12-09 20:16:22 -07003370 /* file path doesn't support NOWAIT for non-direct_IO */
3371 if (force_nonblock && !(kiocb->ki_flags & IOCB_DIRECT) &&
3372 (req->flags & REQ_F_ISREG))
Jens Axboef67676d2019-12-02 11:03:47 -07003373 goto copy_iov;
Jens Axboe9e645e112019-05-10 16:07:28 -06003374
Pavel Begunkov632546c2020-11-07 13:16:26 +00003375 ret = rw_verify_area(WRITE, req->file, io_kiocb_ppos(kiocb), io_size);
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003376 if (unlikely(ret))
3377 goto out_free;
Roman Penyaev9bf79332019-03-25 20:09:24 +01003378
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003379 /*
3380 * Open-code file_start_write here to grab freeze protection,
3381 * which will be released by another thread in
3382 * io_complete_rw(). Fool lockdep by telling it the lock got
3383 * released so that it doesn't complain about the held lock when
3384 * we return to userspace.
3385 */
3386 if (req->flags & REQ_F_ISREG) {
Darrick J. Wong8a3c84b2020-11-10 16:50:21 -08003387 sb_start_write(file_inode(req->file)->i_sb);
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003388 __sb_writers_release(file_inode(req->file)->i_sb,
3389 SB_FREEZE_WRITE);
3390 }
3391 kiocb->ki_flags |= IOCB_WRITE;
Roman Penyaev9bf79332019-03-25 20:09:24 +01003392
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003393 if (req->file->f_op->write_iter)
Jens Axboeff6165b2020-08-13 09:47:43 -06003394 ret2 = call_write_iter(req->file, kiocb, iter);
Guoyu Huang2dd21112020-08-05 03:53:50 -07003395 else if (req->file->f_op->write)
Jens Axboe4017eb92020-10-22 14:14:12 -06003396 ret2 = loop_rw_iter(WRITE, req, iter);
Guoyu Huang2dd21112020-08-05 03:53:50 -07003397 else
3398 ret2 = -EINVAL;
Jens Axboe4ed734b2020-03-20 11:23:41 -06003399
Pavel Begunkov6ad7f232021-04-08 01:54:39 +01003400 if (req->flags & REQ_F_REISSUE) {
3401 req->flags &= ~REQ_F_REISSUE;
Jens Axboe230d50d2021-04-01 20:41:15 -06003402 ret2 = -EAGAIN;
Pavel Begunkov6ad7f232021-04-08 01:54:39 +01003403 }
Jens Axboe230d50d2021-04-01 20:41:15 -06003404
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003405 /*
3406 * Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just
3407 * retry them without IOCB_NOWAIT.
3408 */
3409 if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT))
3410 ret2 = -EAGAIN;
Pavel Begunkov75c668c2021-02-04 13:52:05 +00003411 /* no retry on NONBLOCK nor RWF_NOWAIT */
3412 if (ret2 == -EAGAIN && (req->flags & REQ_F_NOWAIT))
Jens Axboe355afae2020-09-02 09:30:31 -06003413 goto done;
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003414 if (!force_nonblock || ret2 != -EAGAIN) {
Jens Axboeeefdf302020-08-27 16:40:19 -06003415 /* IOPOLL retry should happen for io-wq threads */
3416 if ((req->ctx->flags & IORING_SETUP_IOPOLL) && ret2 == -EAGAIN)
3417 goto copy_iov;
Jens Axboe355afae2020-09-02 09:30:31 -06003418done:
Pavel Begunkov889fca72021-02-10 00:03:09 +00003419 kiocb_done(kiocb, ret2, issue_flags);
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003420 } else {
Jens Axboef67676d2019-12-02 11:03:47 -07003421copy_iov:
Jens Axboe84216312020-08-24 11:45:26 -06003422 /* some cases will consume bytes even on error returns */
Pavel Begunkov632546c2020-11-07 13:16:26 +00003423 iov_iter_revert(iter, io_size - iov_iter_count(iter));
Jens Axboe227c0c92020-08-13 11:51:40 -06003424 ret = io_setup_async_rw(req, iovec, inline_vecs, iter, false);
Pavel Begunkov6bf985d2021-02-04 13:52:01 +00003425 return ret ?: -EAGAIN;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003426 }
Jens Axboe31b51512019-01-18 22:56:34 -07003427out_free:
Pavel Begunkovf261c162020-08-20 11:34:10 +03003428 /* it's reportedly faster than delegating the null check to kfree() */
Pavel Begunkov252917c2020-07-13 22:59:20 +03003429 if (iovec)
Xiaoguang Wang6f2cc162020-06-18 15:01:56 +08003430 kfree(iovec);
Jens Axboe2b188cc2019-01-07 10:46:33 -07003431 return ret;
3432}
3433
Jens Axboe80a261f2020-09-28 14:23:58 -06003434static int io_renameat_prep(struct io_kiocb *req,
3435 const struct io_uring_sqe *sqe)
3436{
3437 struct io_rename *ren = &req->rename;
3438 const char __user *oldf, *newf;
3439
Jens Axboeed7eb252021-06-23 09:04:13 -06003440 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3441 return -EINVAL;
3442 if (sqe->ioprio || sqe->buf_index)
3443 return -EINVAL;
Jens Axboe80a261f2020-09-28 14:23:58 -06003444 if (unlikely(req->flags & REQ_F_FIXED_FILE))
3445 return -EBADF;
3446
3447 ren->old_dfd = READ_ONCE(sqe->fd);
3448 oldf = u64_to_user_ptr(READ_ONCE(sqe->addr));
3449 newf = u64_to_user_ptr(READ_ONCE(sqe->addr2));
3450 ren->new_dfd = READ_ONCE(sqe->len);
3451 ren->flags = READ_ONCE(sqe->rename_flags);
3452
3453 ren->oldpath = getname(oldf);
3454 if (IS_ERR(ren->oldpath))
3455 return PTR_ERR(ren->oldpath);
3456
3457 ren->newpath = getname(newf);
3458 if (IS_ERR(ren->newpath)) {
3459 putname(ren->oldpath);
3460 return PTR_ERR(ren->newpath);
3461 }
3462
3463 req->flags |= REQ_F_NEED_CLEANUP;
3464 return 0;
3465}
3466
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003467static int io_renameat(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe80a261f2020-09-28 14:23:58 -06003468{
3469 struct io_rename *ren = &req->rename;
3470 int ret;
3471
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003472 if (issue_flags & IO_URING_F_NONBLOCK)
Jens Axboe80a261f2020-09-28 14:23:58 -06003473 return -EAGAIN;
3474
3475 ret = do_renameat2(ren->old_dfd, ren->oldpath, ren->new_dfd,
3476 ren->newpath, ren->flags);
3477
3478 req->flags &= ~REQ_F_NEED_CLEANUP;
3479 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01003480 req_set_fail(req);
Jens Axboe80a261f2020-09-28 14:23:58 -06003481 io_req_complete(req, ret);
3482 return 0;
3483}
3484
Jens Axboe14a11432020-09-28 14:27:37 -06003485static int io_unlinkat_prep(struct io_kiocb *req,
3486 const struct io_uring_sqe *sqe)
3487{
3488 struct io_unlink *un = &req->unlink;
3489 const char __user *fname;
3490
Jens Axboe22634bc2021-06-23 09:07:45 -06003491 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3492 return -EINVAL;
3493 if (sqe->ioprio || sqe->off || sqe->len || sqe->buf_index)
3494 return -EINVAL;
Jens Axboe14a11432020-09-28 14:27:37 -06003495 if (unlikely(req->flags & REQ_F_FIXED_FILE))
3496 return -EBADF;
3497
3498 un->dfd = READ_ONCE(sqe->fd);
3499
3500 un->flags = READ_ONCE(sqe->unlink_flags);
3501 if (un->flags & ~AT_REMOVEDIR)
3502 return -EINVAL;
3503
3504 fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
3505 un->filename = getname(fname);
3506 if (IS_ERR(un->filename))
3507 return PTR_ERR(un->filename);
3508
3509 req->flags |= REQ_F_NEED_CLEANUP;
3510 return 0;
3511}
3512
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003513static int io_unlinkat(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe14a11432020-09-28 14:27:37 -06003514{
3515 struct io_unlink *un = &req->unlink;
3516 int ret;
3517
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003518 if (issue_flags & IO_URING_F_NONBLOCK)
Jens Axboe14a11432020-09-28 14:27:37 -06003519 return -EAGAIN;
3520
3521 if (un->flags & AT_REMOVEDIR)
3522 ret = do_rmdir(un->dfd, un->filename);
3523 else
3524 ret = do_unlinkat(un->dfd, un->filename);
3525
3526 req->flags &= ~REQ_F_NEED_CLEANUP;
3527 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01003528 req_set_fail(req);
Jens Axboe14a11432020-09-28 14:27:37 -06003529 io_req_complete(req, ret);
3530 return 0;
3531}
3532
Jens Axboe36f4fa62020-09-05 11:14:22 -06003533static int io_shutdown_prep(struct io_kiocb *req,
3534 const struct io_uring_sqe *sqe)
3535{
3536#if defined(CONFIG_NET)
3537 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3538 return -EINVAL;
3539 if (sqe->ioprio || sqe->off || sqe->addr || sqe->rw_flags ||
3540 sqe->buf_index)
3541 return -EINVAL;
3542
3543 req->shutdown.how = READ_ONCE(sqe->len);
3544 return 0;
3545#else
3546 return -EOPNOTSUPP;
3547#endif
3548}
3549
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003550static int io_shutdown(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe36f4fa62020-09-05 11:14:22 -06003551{
3552#if defined(CONFIG_NET)
3553 struct socket *sock;
3554 int ret;
3555
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003556 if (issue_flags & IO_URING_F_NONBLOCK)
Jens Axboe36f4fa62020-09-05 11:14:22 -06003557 return -EAGAIN;
3558
Linus Torvalds48aba792020-12-16 12:44:05 -08003559 sock = sock_from_file(req->file);
Jens Axboe36f4fa62020-09-05 11:14:22 -06003560 if (unlikely(!sock))
Linus Torvalds48aba792020-12-16 12:44:05 -08003561 return -ENOTSOCK;
Jens Axboe36f4fa62020-09-05 11:14:22 -06003562
3563 ret = __sys_shutdown_sock(sock, req->shutdown.how);
Jens Axboea1464682020-12-14 20:57:27 -07003564 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01003565 req_set_fail(req);
Jens Axboe36f4fa62020-09-05 11:14:22 -06003566 io_req_complete(req, ret);
3567 return 0;
3568#else
3569 return -EOPNOTSUPP;
3570#endif
3571}
3572
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03003573static int __io_splice_prep(struct io_kiocb *req,
3574 const struct io_uring_sqe *sqe)
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003575{
Pavel Begunkovfe7e3252021-06-24 15:09:57 +01003576 struct io_splice *sp = &req->splice;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003577 unsigned int valid_flags = SPLICE_F_FD_IN_FIXED | SPLICE_F_ALL;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003578
Pavel Begunkov3232dd02020-06-03 18:03:22 +03003579 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3580 return -EINVAL;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003581
3582 sp->file_in = NULL;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003583 sp->len = READ_ONCE(sqe->len);
3584 sp->flags = READ_ONCE(sqe->splice_flags);
3585
3586 if (unlikely(sp->flags & ~valid_flags))
3587 return -EINVAL;
3588
Pavel Begunkov8371adf2020-10-10 18:34:08 +01003589 sp->file_in = io_file_get(NULL, req, READ_ONCE(sqe->splice_fd_in),
3590 (sp->flags & SPLICE_F_FD_IN_FIXED));
3591 if (!sp->file_in)
3592 return -EBADF;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003593 req->flags |= REQ_F_NEED_CLEANUP;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003594 return 0;
3595}
3596
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03003597static int io_tee_prep(struct io_kiocb *req,
3598 const struct io_uring_sqe *sqe)
3599{
3600 if (READ_ONCE(sqe->splice_off_in) || READ_ONCE(sqe->off))
3601 return -EINVAL;
3602 return __io_splice_prep(req, sqe);
3603}
3604
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003605static int io_tee(struct io_kiocb *req, unsigned int issue_flags)
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03003606{
3607 struct io_splice *sp = &req->splice;
3608 struct file *in = sp->file_in;
3609 struct file *out = sp->file_out;
3610 unsigned int flags = sp->flags & ~SPLICE_F_FD_IN_FIXED;
3611 long ret = 0;
3612
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003613 if (issue_flags & IO_URING_F_NONBLOCK)
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03003614 return -EAGAIN;
3615 if (sp->len)
3616 ret = do_tee(in, out, sp->len, flags);
3617
Pavel Begunkove1d767f2021-03-19 17:22:43 +00003618 if (!(sp->flags & SPLICE_F_FD_IN_FIXED))
3619 io_put_file(in);
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03003620 req->flags &= ~REQ_F_NEED_CLEANUP;
3621
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03003622 if (ret != sp->len)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01003623 req_set_fail(req);
Jens Axboee1e16092020-06-22 09:17:17 -06003624 io_req_complete(req, ret);
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03003625 return 0;
3626}
3627
3628static int io_splice_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3629{
Pavel Begunkovfe7e3252021-06-24 15:09:57 +01003630 struct io_splice *sp = &req->splice;
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03003631
3632 sp->off_in = READ_ONCE(sqe->splice_off_in);
3633 sp->off_out = READ_ONCE(sqe->off);
3634 return __io_splice_prep(req, sqe);
3635}
3636
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003637static int io_splice(struct io_kiocb *req, unsigned int issue_flags)
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003638{
3639 struct io_splice *sp = &req->splice;
3640 struct file *in = sp->file_in;
3641 struct file *out = sp->file_out;
3642 unsigned int flags = sp->flags & ~SPLICE_F_FD_IN_FIXED;
3643 loff_t *poff_in, *poff_out;
Pavel Begunkovc9687422020-05-04 23:00:54 +03003644 long ret = 0;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003645
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003646 if (issue_flags & IO_URING_F_NONBLOCK)
Pavel Begunkov2fb3e822020-05-01 17:09:38 +03003647 return -EAGAIN;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003648
3649 poff_in = (sp->off_in == -1) ? NULL : &sp->off_in;
3650 poff_out = (sp->off_out == -1) ? NULL : &sp->off_out;
Pavel Begunkovc9687422020-05-04 23:00:54 +03003651
Jens Axboe948a7742020-05-17 14:21:38 -06003652 if (sp->len)
Pavel Begunkovc9687422020-05-04 23:00:54 +03003653 ret = do_splice(in, poff_in, out, poff_out, sp->len, flags);
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003654
Pavel Begunkove1d767f2021-03-19 17:22:43 +00003655 if (!(sp->flags & SPLICE_F_FD_IN_FIXED))
3656 io_put_file(in);
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003657 req->flags &= ~REQ_F_NEED_CLEANUP;
3658
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003659 if (ret != sp->len)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01003660 req_set_fail(req);
Jens Axboee1e16092020-06-22 09:17:17 -06003661 io_req_complete(req, ret);
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003662 return 0;
3663}
3664
Jens Axboe2b188cc2019-01-07 10:46:33 -07003665/*
3666 * IORING_OP_NOP just posts a completion event, nothing else.
3667 */
Pavel Begunkov889fca72021-02-10 00:03:09 +00003668static int io_nop(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe2b188cc2019-01-07 10:46:33 -07003669{
3670 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003671
Jens Axboedef596e2019-01-09 08:59:42 -07003672 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
3673 return -EINVAL;
3674
Pavel Begunkov889fca72021-02-10 00:03:09 +00003675 __io_req_complete(req, issue_flags, 0, 0);
Jens Axboe2b188cc2019-01-07 10:46:33 -07003676 return 0;
3677}
3678
Pavel Begunkov1155c762021-02-18 18:29:38 +00003679static int io_fsync_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Christoph Hellwigc992fe22019-01-11 09:43:02 -07003680{
Jens Axboe6b063142019-01-10 22:13:58 -07003681 struct io_ring_ctx *ctx = req->ctx;
Christoph Hellwigc992fe22019-01-11 09:43:02 -07003682
Jens Axboe09bb8392019-03-13 12:39:28 -06003683 if (!req->file)
3684 return -EBADF;
Christoph Hellwigc992fe22019-01-11 09:43:02 -07003685
Jens Axboe6b063142019-01-10 22:13:58 -07003686 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboedef596e2019-01-09 08:59:42 -07003687 return -EINVAL;
Jens Axboeedafcce2019-01-09 09:16:05 -07003688 if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index))
Christoph Hellwigc992fe22019-01-11 09:43:02 -07003689 return -EINVAL;
3690
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07003691 req->sync.flags = READ_ONCE(sqe->fsync_flags);
3692 if (unlikely(req->sync.flags & ~IORING_FSYNC_DATASYNC))
3693 return -EINVAL;
3694
3695 req->sync.off = READ_ONCE(sqe->off);
3696 req->sync.len = READ_ONCE(sqe->len);
Christoph Hellwigc992fe22019-01-11 09:43:02 -07003697 return 0;
3698}
3699
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003700static int io_fsync(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe78912932020-01-14 22:09:06 -07003701{
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07003702 loff_t end = req->sync.off + req->sync.len;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07003703 int ret;
3704
Pavel Begunkovac45abc2020-06-08 21:08:18 +03003705 /* fsync always requires a blocking context */
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003706 if (issue_flags & IO_URING_F_NONBLOCK)
Pavel Begunkovac45abc2020-06-08 21:08:18 +03003707 return -EAGAIN;
3708
Jens Axboe9adbd452019-12-20 08:45:55 -07003709 ret = vfs_fsync_range(req->file, req->sync.off,
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07003710 end > 0 ? end : LLONG_MAX,
3711 req->sync.flags & IORING_FSYNC_DATASYNC);
3712 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01003713 req_set_fail(req);
Jens Axboee1e16092020-06-22 09:17:17 -06003714 io_req_complete(req, ret);
Christoph Hellwigc992fe22019-01-11 09:43:02 -07003715 return 0;
3716}
3717
Jens Axboed63d1b52019-12-10 10:38:56 -07003718static int io_fallocate_prep(struct io_kiocb *req,
3719 const struct io_uring_sqe *sqe)
3720{
3721 if (sqe->ioprio || sqe->buf_index || sqe->rw_flags)
3722 return -EINVAL;
Pavel Begunkov3232dd02020-06-03 18:03:22 +03003723 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3724 return -EINVAL;
Jens Axboed63d1b52019-12-10 10:38:56 -07003725
3726 req->sync.off = READ_ONCE(sqe->off);
3727 req->sync.len = READ_ONCE(sqe->addr);
3728 req->sync.mode = READ_ONCE(sqe->len);
3729 return 0;
3730}
3731
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003732static int io_fallocate(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboed63d1b52019-12-10 10:38:56 -07003733{
Pavel Begunkovac45abc2020-06-08 21:08:18 +03003734 int ret;
Jens Axboed63d1b52019-12-10 10:38:56 -07003735
Pavel Begunkovac45abc2020-06-08 21:08:18 +03003736 /* fallocate always requiring blocking context */
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003737 if (issue_flags & IO_URING_F_NONBLOCK)
Pavel Begunkovac45abc2020-06-08 21:08:18 +03003738 return -EAGAIN;
Pavel Begunkovac45abc2020-06-08 21:08:18 +03003739 ret = vfs_fallocate(req->file, req->sync.mode, req->sync.off,
3740 req->sync.len);
Pavel Begunkovac45abc2020-06-08 21:08:18 +03003741 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01003742 req_set_fail(req);
Jens Axboee1e16092020-06-22 09:17:17 -06003743 io_req_complete(req, ret);
Jens Axboed63d1b52019-12-10 10:38:56 -07003744 return 0;
3745}
3746
Pavel Begunkovec65fea2020-06-03 18:03:24 +03003747static int __io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboe15b71ab2019-12-11 11:20:36 -07003748{
Jens Axboef8748882020-01-08 17:47:02 -07003749 const char __user *fname;
Jens Axboe15b71ab2019-12-11 11:20:36 -07003750 int ret;
3751
Pavel Begunkovec65fea2020-06-03 18:03:24 +03003752 if (unlikely(sqe->ioprio || sqe->buf_index))
Jens Axboe15b71ab2019-12-11 11:20:36 -07003753 return -EINVAL;
Pavel Begunkovec65fea2020-06-03 18:03:24 +03003754 if (unlikely(req->flags & REQ_F_FIXED_FILE))
Jens Axboecf3040c2020-02-06 21:31:40 -07003755 return -EBADF;
Jens Axboe15b71ab2019-12-11 11:20:36 -07003756
Pavel Begunkovec65fea2020-06-03 18:03:24 +03003757 /* open.how should be already initialised */
3758 if (!(req->open.how.flags & O_PATH) && force_o_largefile())
Jens Axboe08a1d26eb2020-04-08 09:20:54 -06003759 req->open.how.flags |= O_LARGEFILE;
Jens Axboe15b71ab2019-12-11 11:20:36 -07003760
Pavel Begunkov25e72d12020-06-03 18:03:23 +03003761 req->open.dfd = READ_ONCE(sqe->fd);
3762 fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
Jens Axboef8748882020-01-08 17:47:02 -07003763 req->open.filename = getname(fname);
Jens Axboe15b71ab2019-12-11 11:20:36 -07003764 if (IS_ERR(req->open.filename)) {
3765 ret = PTR_ERR(req->open.filename);
3766 req->open.filename = NULL;
3767 return ret;
3768 }
Jens Axboe4022e7a2020-03-19 19:23:18 -06003769 req->open.nofile = rlimit(RLIMIT_NOFILE);
Pavel Begunkov8fef80b2020-02-07 23:59:53 +03003770 req->flags |= REQ_F_NEED_CLEANUP;
Jens Axboe15b71ab2019-12-11 11:20:36 -07003771 return 0;
3772}
3773
Pavel Begunkovec65fea2020-06-03 18:03:24 +03003774static int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3775{
3776 u64 flags, mode;
3777
Jens Axboe14587a462020-09-05 11:36:08 -06003778 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboe4eb8dde2020-09-18 19:36:24 -06003779 return -EINVAL;
Pavel Begunkovec65fea2020-06-03 18:03:24 +03003780 mode = READ_ONCE(sqe->len);
3781 flags = READ_ONCE(sqe->open_flags);
3782 req->open.how = build_open_how(flags, mode);
3783 return __io_openat_prep(req, sqe);
3784}
3785
Jens Axboecebdb982020-01-08 17:59:24 -07003786static int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3787{
3788 struct open_how __user *how;
Jens Axboecebdb982020-01-08 17:59:24 -07003789 size_t len;
3790 int ret;
3791
Jens Axboe14587a462020-09-05 11:36:08 -06003792 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboe4eb8dde2020-09-18 19:36:24 -06003793 return -EINVAL;
Jens Axboecebdb982020-01-08 17:59:24 -07003794 how = u64_to_user_ptr(READ_ONCE(sqe->addr2));
3795 len = READ_ONCE(sqe->len);
Jens Axboecebdb982020-01-08 17:59:24 -07003796 if (len < OPEN_HOW_SIZE_VER0)
3797 return -EINVAL;
3798
3799 ret = copy_struct_from_user(&req->open.how, sizeof(req->open.how), how,
3800 len);
3801 if (ret)
3802 return ret;
3803
Pavel Begunkovec65fea2020-06-03 18:03:24 +03003804 return __io_openat_prep(req, sqe);
Jens Axboecebdb982020-01-08 17:59:24 -07003805}
3806
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003807static int io_openat2(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe15b71ab2019-12-11 11:20:36 -07003808{
3809 struct open_flags op;
Jens Axboe15b71ab2019-12-11 11:20:36 -07003810 struct file *file;
Jens Axboe3a81fd02020-12-10 12:25:36 -07003811 bool nonblock_set;
3812 bool resolve_nonblock;
Jens Axboe15b71ab2019-12-11 11:20:36 -07003813 int ret;
3814
Jens Axboecebdb982020-01-08 17:59:24 -07003815 ret = build_open_flags(&req->open.how, &op);
Jens Axboe15b71ab2019-12-11 11:20:36 -07003816 if (ret)
3817 goto err;
Jens Axboe3a81fd02020-12-10 12:25:36 -07003818 nonblock_set = op.open_flag & O_NONBLOCK;
3819 resolve_nonblock = req->open.how.resolve & RESOLVE_CACHED;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003820 if (issue_flags & IO_URING_F_NONBLOCK) {
Jens Axboe3a81fd02020-12-10 12:25:36 -07003821 /*
3822 * Don't bother trying for O_TRUNC, O_CREAT, or O_TMPFILE open,
3823 * it'll always -EAGAIN
3824 */
3825 if (req->open.how.flags & (O_TRUNC | O_CREAT | O_TMPFILE))
3826 return -EAGAIN;
3827 op.lookup_flags |= LOOKUP_CACHED;
3828 op.open_flag |= O_NONBLOCK;
3829 }
Jens Axboe15b71ab2019-12-11 11:20:36 -07003830
Jens Axboe4022e7a2020-03-19 19:23:18 -06003831 ret = __get_unused_fd_flags(req->open.how.flags, req->open.nofile);
Jens Axboe15b71ab2019-12-11 11:20:36 -07003832 if (ret < 0)
3833 goto err;
3834
3835 file = do_filp_open(req->open.dfd, req->open.filename, &op);
Pavel Begunkov12dcb58a2021-06-24 15:10:00 +01003836 if (IS_ERR(file)) {
Jens Axboe3a81fd02020-12-10 12:25:36 -07003837 /*
Pavel Begunkov12dcb58a2021-06-24 15:10:00 +01003838 * We could hang on to this 'fd' on retrying, but seems like
3839 * marginal gain for something that is now known to be a slower
3840 * path. So just put it, and we'll get a new one when we retry.
Jens Axboe3a81fd02020-12-10 12:25:36 -07003841 */
3842 put_unused_fd(ret);
Pavel Begunkov12dcb58a2021-06-24 15:10:00 +01003843
3844 ret = PTR_ERR(file);
3845 /* only retry if RESOLVE_CACHED wasn't already set by application */
3846 if (ret == -EAGAIN &&
3847 (!resolve_nonblock && (issue_flags & IO_URING_F_NONBLOCK)))
3848 return -EAGAIN;
3849 goto err;
Jens Axboe3a81fd02020-12-10 12:25:36 -07003850 }
3851
Pavel Begunkov12dcb58a2021-06-24 15:10:00 +01003852 if ((issue_flags & IO_URING_F_NONBLOCK) && !nonblock_set)
3853 file->f_flags &= ~O_NONBLOCK;
3854 fsnotify_open(file);
3855 fd_install(ret, file);
Jens Axboe15b71ab2019-12-11 11:20:36 -07003856err:
3857 putname(req->open.filename);
Pavel Begunkov8fef80b2020-02-07 23:59:53 +03003858 req->flags &= ~REQ_F_NEED_CLEANUP;
Jens Axboe15b71ab2019-12-11 11:20:36 -07003859 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01003860 req_set_fail(req);
Pavel Begunkov0bdf3392021-04-11 01:46:29 +01003861 __io_req_complete(req, issue_flags, ret, 0);
Jens Axboe15b71ab2019-12-11 11:20:36 -07003862 return 0;
3863}
3864
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003865static int io_openat(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboecebdb982020-01-08 17:59:24 -07003866{
Pavel Begunkove45cff52021-02-28 22:35:14 +00003867 return io_openat2(req, issue_flags);
Jens Axboecebdb982020-01-08 17:59:24 -07003868}
3869
Jens Axboe067524e2020-03-02 16:32:28 -07003870static int io_remove_buffers_prep(struct io_kiocb *req,
3871 const struct io_uring_sqe *sqe)
3872{
3873 struct io_provide_buf *p = &req->pbuf;
3874 u64 tmp;
3875
3876 if (sqe->ioprio || sqe->rw_flags || sqe->addr || sqe->len || sqe->off)
3877 return -EINVAL;
3878
3879 tmp = READ_ONCE(sqe->fd);
3880 if (!tmp || tmp > USHRT_MAX)
3881 return -EINVAL;
3882
3883 memset(p, 0, sizeof(*p));
3884 p->nbufs = tmp;
3885 p->bgid = READ_ONCE(sqe->buf_group);
3886 return 0;
3887}
3888
3889static int __io_remove_buffers(struct io_ring_ctx *ctx, struct io_buffer *buf,
3890 int bgid, unsigned nbufs)
3891{
3892 unsigned i = 0;
3893
3894 /* shouldn't happen */
3895 if (!nbufs)
3896 return 0;
3897
3898 /* the head kbuf is the list itself */
3899 while (!list_empty(&buf->list)) {
3900 struct io_buffer *nxt;
3901
3902 nxt = list_first_entry(&buf->list, struct io_buffer, list);
3903 list_del(&nxt->list);
3904 kfree(nxt);
3905 if (++i == nbufs)
3906 return i;
3907 }
3908 i++;
3909 kfree(buf);
Jens Axboe9e15c3a2021-03-13 12:29:43 -07003910 xa_erase(&ctx->io_buffers, bgid);
Jens Axboe067524e2020-03-02 16:32:28 -07003911
3912 return i;
3913}
3914
Pavel Begunkov889fca72021-02-10 00:03:09 +00003915static int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe067524e2020-03-02 16:32:28 -07003916{
3917 struct io_provide_buf *p = &req->pbuf;
3918 struct io_ring_ctx *ctx = req->ctx;
3919 struct io_buffer *head;
3920 int ret = 0;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003921 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Jens Axboe067524e2020-03-02 16:32:28 -07003922
3923 io_ring_submit_lock(ctx, !force_nonblock);
3924
3925 lockdep_assert_held(&ctx->uring_lock);
3926
3927 ret = -ENOENT;
Jens Axboe9e15c3a2021-03-13 12:29:43 -07003928 head = xa_load(&ctx->io_buffers, p->bgid);
Jens Axboe067524e2020-03-02 16:32:28 -07003929 if (head)
3930 ret = __io_remove_buffers(ctx, head, p->bgid, p->nbufs);
Jens Axboe067524e2020-03-02 16:32:28 -07003931 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01003932 req_set_fail(req);
Pavel Begunkov31bff9a2020-12-06 22:22:43 +00003933
Pavel Begunkov9fb8cb42021-02-28 22:35:13 +00003934 /* complete before unlock, IOPOLL may need the lock */
3935 __io_req_complete(req, issue_flags, ret, 0);
3936 io_ring_submit_unlock(ctx, !force_nonblock);
Jens Axboe067524e2020-03-02 16:32:28 -07003937 return 0;
3938}
3939
Jens Axboeddf0322d2020-02-23 16:41:33 -07003940static int io_provide_buffers_prep(struct io_kiocb *req,
3941 const struct io_uring_sqe *sqe)
3942{
Pavel Begunkov38134ad2021-04-15 13:07:39 +01003943 unsigned long size, tmp_check;
Jens Axboeddf0322d2020-02-23 16:41:33 -07003944 struct io_provide_buf *p = &req->pbuf;
3945 u64 tmp;
3946
3947 if (sqe->ioprio || sqe->rw_flags)
3948 return -EINVAL;
3949
3950 tmp = READ_ONCE(sqe->fd);
3951 if (!tmp || tmp > USHRT_MAX)
3952 return -E2BIG;
3953 p->nbufs = tmp;
3954 p->addr = READ_ONCE(sqe->addr);
3955 p->len = READ_ONCE(sqe->len);
3956
Pavel Begunkov38134ad2021-04-15 13:07:39 +01003957 if (check_mul_overflow((unsigned long)p->len, (unsigned long)p->nbufs,
3958 &size))
3959 return -EOVERFLOW;
3960 if (check_add_overflow((unsigned long)p->addr, size, &tmp_check))
3961 return -EOVERFLOW;
3962
Pavel Begunkovd81269f2021-03-19 10:21:19 +00003963 size = (unsigned long)p->len * p->nbufs;
3964 if (!access_ok(u64_to_user_ptr(p->addr), size))
Jens Axboeddf0322d2020-02-23 16:41:33 -07003965 return -EFAULT;
3966
3967 p->bgid = READ_ONCE(sqe->buf_group);
3968 tmp = READ_ONCE(sqe->off);
3969 if (tmp > USHRT_MAX)
3970 return -E2BIG;
3971 p->bid = tmp;
3972 return 0;
3973}
3974
3975static int io_add_buffers(struct io_provide_buf *pbuf, struct io_buffer **head)
3976{
3977 struct io_buffer *buf;
3978 u64 addr = pbuf->addr;
3979 int i, bid = pbuf->bid;
3980
3981 for (i = 0; i < pbuf->nbufs; i++) {
3982 buf = kmalloc(sizeof(*buf), GFP_KERNEL);
3983 if (!buf)
3984 break;
3985
3986 buf->addr = addr;
Thadeu Lima de Souza Cascardod1f82802021-05-05 09:47:06 -03003987 buf->len = min_t(__u32, pbuf->len, MAX_RW_COUNT);
Jens Axboeddf0322d2020-02-23 16:41:33 -07003988 buf->bid = bid;
3989 addr += pbuf->len;
3990 bid++;
3991 if (!*head) {
3992 INIT_LIST_HEAD(&buf->list);
3993 *head = buf;
3994 } else {
3995 list_add_tail(&buf->list, &(*head)->list);
3996 }
3997 }
3998
3999 return i ? i : -ENOMEM;
4000}
4001
Pavel Begunkov889fca72021-02-10 00:03:09 +00004002static int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboeddf0322d2020-02-23 16:41:33 -07004003{
4004 struct io_provide_buf *p = &req->pbuf;
4005 struct io_ring_ctx *ctx = req->ctx;
4006 struct io_buffer *head, *list;
4007 int ret = 0;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004008 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Jens Axboeddf0322d2020-02-23 16:41:33 -07004009
4010 io_ring_submit_lock(ctx, !force_nonblock);
4011
4012 lockdep_assert_held(&ctx->uring_lock);
4013
Jens Axboe9e15c3a2021-03-13 12:29:43 -07004014 list = head = xa_load(&ctx->io_buffers, p->bgid);
Jens Axboeddf0322d2020-02-23 16:41:33 -07004015
4016 ret = io_add_buffers(p, &head);
Jens Axboe9e15c3a2021-03-13 12:29:43 -07004017 if (ret >= 0 && !list) {
4018 ret = xa_insert(&ctx->io_buffers, p->bgid, head, GFP_KERNEL);
4019 if (ret < 0)
Jens Axboe067524e2020-03-02 16:32:28 -07004020 __io_remove_buffers(ctx, head, p->bgid, -1U);
Jens Axboeddf0322d2020-02-23 16:41:33 -07004021 }
Jens Axboeddf0322d2020-02-23 16:41:33 -07004022 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01004023 req_set_fail(req);
Pavel Begunkov9fb8cb42021-02-28 22:35:13 +00004024 /* complete before unlock, IOPOLL may need the lock */
4025 __io_req_complete(req, issue_flags, ret, 0);
4026 io_ring_submit_unlock(ctx, !force_nonblock);
Jens Axboeddf0322d2020-02-23 16:41:33 -07004027 return 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07004028}
4029
Jens Axboe3e4827b2020-01-08 15:18:09 -07004030static int io_epoll_ctl_prep(struct io_kiocb *req,
4031 const struct io_uring_sqe *sqe)
4032{
4033#if defined(CONFIG_EPOLL)
4034 if (sqe->ioprio || sqe->buf_index)
4035 return -EINVAL;
Pavel Begunkov2d74d042021-05-14 12:05:46 +01004036 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
Pavel Begunkov3232dd02020-06-03 18:03:22 +03004037 return -EINVAL;
Jens Axboe3e4827b2020-01-08 15:18:09 -07004038
4039 req->epoll.epfd = READ_ONCE(sqe->fd);
4040 req->epoll.op = READ_ONCE(sqe->len);
4041 req->epoll.fd = READ_ONCE(sqe->off);
4042
4043 if (ep_op_has_event(req->epoll.op)) {
4044 struct epoll_event __user *ev;
4045
4046 ev = u64_to_user_ptr(READ_ONCE(sqe->addr));
4047 if (copy_from_user(&req->epoll.event, ev, sizeof(*ev)))
4048 return -EFAULT;
4049 }
4050
4051 return 0;
4052#else
4053 return -EOPNOTSUPP;
4054#endif
4055}
4056
Pavel Begunkov889fca72021-02-10 00:03:09 +00004057static int io_epoll_ctl(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe3e4827b2020-01-08 15:18:09 -07004058{
4059#if defined(CONFIG_EPOLL)
4060 struct io_epoll *ie = &req->epoll;
4061 int ret;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004062 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Jens Axboe3e4827b2020-01-08 15:18:09 -07004063
4064 ret = do_epoll_ctl(ie->epfd, ie->op, ie->fd, &ie->event, force_nonblock);
4065 if (force_nonblock && ret == -EAGAIN)
4066 return -EAGAIN;
4067
4068 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01004069 req_set_fail(req);
Pavel Begunkov889fca72021-02-10 00:03:09 +00004070 __io_req_complete(req, issue_flags, ret, 0);
Jens Axboe3e4827b2020-01-08 15:18:09 -07004071 return 0;
4072#else
4073 return -EOPNOTSUPP;
4074#endif
4075}
4076
Jens Axboec1ca7572019-12-25 22:18:28 -07004077static int io_madvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4078{
4079#if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
4080 if (sqe->ioprio || sqe->buf_index || sqe->off)
4081 return -EINVAL;
Pavel Begunkov3232dd02020-06-03 18:03:22 +03004082 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4083 return -EINVAL;
Jens Axboec1ca7572019-12-25 22:18:28 -07004084
4085 req->madvise.addr = READ_ONCE(sqe->addr);
4086 req->madvise.len = READ_ONCE(sqe->len);
4087 req->madvise.advice = READ_ONCE(sqe->fadvise_advice);
4088 return 0;
4089#else
4090 return -EOPNOTSUPP;
4091#endif
4092}
4093
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004094static int io_madvise(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboec1ca7572019-12-25 22:18:28 -07004095{
4096#if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
4097 struct io_madvise *ma = &req->madvise;
4098 int ret;
4099
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004100 if (issue_flags & IO_URING_F_NONBLOCK)
Jens Axboec1ca7572019-12-25 22:18:28 -07004101 return -EAGAIN;
4102
Minchan Kim0726b012020-10-17 16:14:50 -07004103 ret = do_madvise(current->mm, ma->addr, ma->len, ma->advice);
Jens Axboec1ca7572019-12-25 22:18:28 -07004104 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01004105 req_set_fail(req);
Jens Axboee1e16092020-06-22 09:17:17 -06004106 io_req_complete(req, ret);
Jens Axboec1ca7572019-12-25 22:18:28 -07004107 return 0;
4108#else
4109 return -EOPNOTSUPP;
4110#endif
4111}
4112
Jens Axboe4840e412019-12-25 22:03:45 -07004113static int io_fadvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4114{
4115 if (sqe->ioprio || sqe->buf_index || sqe->addr)
4116 return -EINVAL;
Pavel Begunkov3232dd02020-06-03 18:03:22 +03004117 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4118 return -EINVAL;
Jens Axboe4840e412019-12-25 22:03:45 -07004119
4120 req->fadvise.offset = READ_ONCE(sqe->off);
4121 req->fadvise.len = READ_ONCE(sqe->len);
4122 req->fadvise.advice = READ_ONCE(sqe->fadvise_advice);
4123 return 0;
4124}
4125
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004126static int io_fadvise(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe4840e412019-12-25 22:03:45 -07004127{
4128 struct io_fadvise *fa = &req->fadvise;
4129 int ret;
4130
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004131 if (issue_flags & IO_URING_F_NONBLOCK) {
Jens Axboe3e694262020-02-01 09:22:49 -07004132 switch (fa->advice) {
4133 case POSIX_FADV_NORMAL:
4134 case POSIX_FADV_RANDOM:
4135 case POSIX_FADV_SEQUENTIAL:
4136 break;
4137 default:
4138 return -EAGAIN;
4139 }
4140 }
Jens Axboe4840e412019-12-25 22:03:45 -07004141
4142 ret = vfs_fadvise(req->file, fa->offset, fa->len, fa->advice);
4143 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01004144 req_set_fail(req);
Pavel Begunkov0bdf3392021-04-11 01:46:29 +01004145 __io_req_complete(req, issue_flags, ret, 0);
Jens Axboe4840e412019-12-25 22:03:45 -07004146 return 0;
4147}
4148
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004149static int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4150{
Pavel Begunkov2d74d042021-05-14 12:05:46 +01004151 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
Pavel Begunkov3232dd02020-06-03 18:03:22 +03004152 return -EINVAL;
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004153 if (sqe->ioprio || sqe->buf_index)
4154 return -EINVAL;
Pavel Begunkov9c280f92020-04-08 08:58:46 +03004155 if (req->flags & REQ_F_FIXED_FILE)
Jens Axboecf3040c2020-02-06 21:31:40 -07004156 return -EBADF;
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004157
Bijan Mottahedeh1d9e1282020-05-22 21:31:16 -07004158 req->statx.dfd = READ_ONCE(sqe->fd);
4159 req->statx.mask = READ_ONCE(sqe->len);
Bijan Mottahedehe62753e2020-05-22 21:31:18 -07004160 req->statx.filename = u64_to_user_ptr(READ_ONCE(sqe->addr));
Bijan Mottahedeh1d9e1282020-05-22 21:31:16 -07004161 req->statx.buffer = u64_to_user_ptr(READ_ONCE(sqe->addr2));
4162 req->statx.flags = READ_ONCE(sqe->statx_flags);
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004163
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004164 return 0;
4165}
4166
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004167static int io_statx(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004168{
Bijan Mottahedeh1d9e1282020-05-22 21:31:16 -07004169 struct io_statx *ctx = &req->statx;
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004170 int ret;
4171
Pavel Begunkov59d70012021-03-22 01:58:30 +00004172 if (issue_flags & IO_URING_F_NONBLOCK)
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004173 return -EAGAIN;
4174
Bijan Mottahedehe62753e2020-05-22 21:31:18 -07004175 ret = do_statx(ctx->dfd, ctx->filename, ctx->flags, ctx->mask,
4176 ctx->buffer);
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004177
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004178 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01004179 req_set_fail(req);
Jens Axboee1e16092020-06-22 09:17:17 -06004180 io_req_complete(req, ret);
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004181 return 0;
4182}
4183
Jens Axboeb5dba592019-12-11 14:02:38 -07004184static int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4185{
Jens Axboe14587a462020-09-05 11:36:08 -06004186 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
Pavel Begunkov3232dd02020-06-03 18:03:22 +03004187 return -EINVAL;
Jens Axboeb5dba592019-12-11 14:02:38 -07004188 if (sqe->ioprio || sqe->off || sqe->addr || sqe->len ||
4189 sqe->rw_flags || sqe->buf_index)
4190 return -EINVAL;
Pavel Begunkov9c280f92020-04-08 08:58:46 +03004191 if (req->flags & REQ_F_FIXED_FILE)
Jens Axboecf3040c2020-02-06 21:31:40 -07004192 return -EBADF;
Jens Axboeb5dba592019-12-11 14:02:38 -07004193
4194 req->close.fd = READ_ONCE(sqe->fd);
Jens Axboeb5dba592019-12-11 14:02:38 -07004195 return 0;
4196}
4197
Pavel Begunkov889fca72021-02-10 00:03:09 +00004198static int io_close(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboeb5dba592019-12-11 14:02:38 -07004199{
Jens Axboe9eac1902021-01-19 15:50:37 -07004200 struct files_struct *files = current->files;
Pavel Begunkov3af73b22020-06-08 21:08:17 +03004201 struct io_close *close = &req->close;
Jens Axboe9eac1902021-01-19 15:50:37 -07004202 struct fdtable *fdt;
Pavel Begunkova1fde922021-04-11 01:46:28 +01004203 struct file *file = NULL;
4204 int ret = -EBADF;
Jens Axboeb5dba592019-12-11 14:02:38 -07004205
Jens Axboe9eac1902021-01-19 15:50:37 -07004206 spin_lock(&files->file_lock);
4207 fdt = files_fdtable(files);
4208 if (close->fd >= fdt->max_fds) {
4209 spin_unlock(&files->file_lock);
4210 goto err;
4211 }
4212 file = fdt->fd[close->fd];
Pavel Begunkova1fde922021-04-11 01:46:28 +01004213 if (!file || file->f_op == &io_uring_fops) {
Jens Axboe9eac1902021-01-19 15:50:37 -07004214 spin_unlock(&files->file_lock);
4215 file = NULL;
4216 goto err;
Pavel Begunkov3af73b22020-06-08 21:08:17 +03004217 }
Jens Axboeb5dba592019-12-11 14:02:38 -07004218
4219 /* if the file has a flush method, be safe and punt to async */
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004220 if (file->f_op->flush && (issue_flags & IO_URING_F_NONBLOCK)) {
Jens Axboe9eac1902021-01-19 15:50:37 -07004221 spin_unlock(&files->file_lock);
Pavel Begunkov0bf0eef2020-05-26 20:34:06 +03004222 return -EAGAIN;
Pavel Begunkova2100672020-03-02 23:45:16 +03004223 }
Jens Axboeb5dba592019-12-11 14:02:38 -07004224
Jens Axboe9eac1902021-01-19 15:50:37 -07004225 ret = __close_fd_get_file(close->fd, &file);
4226 spin_unlock(&files->file_lock);
4227 if (ret < 0) {
4228 if (ret == -ENOENT)
4229 ret = -EBADF;
4230 goto err;
4231 }
4232
Pavel Begunkov3af73b22020-06-08 21:08:17 +03004233 /* No ->flush() or already async, safely close from here */
Jens Axboe9eac1902021-01-19 15:50:37 -07004234 ret = filp_close(file, current->files);
4235err:
Pavel Begunkov3af73b22020-06-08 21:08:17 +03004236 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01004237 req_set_fail(req);
Jens Axboe9eac1902021-01-19 15:50:37 -07004238 if (file)
4239 fput(file);
Pavel Begunkov889fca72021-02-10 00:03:09 +00004240 __io_req_complete(req, issue_flags, ret, 0);
Jens Axboe1a417f42020-01-31 17:16:48 -07004241 return 0;
Jens Axboeb5dba592019-12-11 14:02:38 -07004242}
4243
Pavel Begunkov1155c762021-02-18 18:29:38 +00004244static int io_sfr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboe5d17b4a2019-04-09 14:56:44 -06004245{
4246 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe5d17b4a2019-04-09 14:56:44 -06004247
Jens Axboe5d17b4a2019-04-09 14:56:44 -06004248 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
4249 return -EINVAL;
4250 if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index))
4251 return -EINVAL;
4252
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004253 req->sync.off = READ_ONCE(sqe->off);
4254 req->sync.len = READ_ONCE(sqe->len);
4255 req->sync.flags = READ_ONCE(sqe->sync_range_flags);
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004256 return 0;
4257}
4258
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004259static int io_sync_file_range(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004260{
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004261 int ret;
4262
Pavel Begunkovac45abc2020-06-08 21:08:18 +03004263 /* sync_file_range always requires a blocking context */
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004264 if (issue_flags & IO_URING_F_NONBLOCK)
Pavel Begunkovac45abc2020-06-08 21:08:18 +03004265 return -EAGAIN;
4266
Jens Axboe9adbd452019-12-20 08:45:55 -07004267 ret = sync_file_range(req->file, req->sync.off, req->sync.len,
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004268 req->sync.flags);
4269 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01004270 req_set_fail(req);
Jens Axboee1e16092020-06-22 09:17:17 -06004271 io_req_complete(req, ret);
Jens Axboe5d17b4a2019-04-09 14:56:44 -06004272 return 0;
4273}
4274
YueHaibing469956e2020-03-04 15:53:52 +08004275#if defined(CONFIG_NET)
Pavel Begunkov02d27d82020-02-28 10:36:36 +03004276static int io_setup_async_msg(struct io_kiocb *req,
4277 struct io_async_msghdr *kmsg)
4278{
Jens Axboee8c2bc12020-08-15 18:44:09 -07004279 struct io_async_msghdr *async_msg = req->async_data;
4280
4281 if (async_msg)
Pavel Begunkov02d27d82020-02-28 10:36:36 +03004282 return -EAGAIN;
Jens Axboee8c2bc12020-08-15 18:44:09 -07004283 if (io_alloc_async_data(req)) {
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004284 kfree(kmsg->free_iov);
Pavel Begunkov02d27d82020-02-28 10:36:36 +03004285 return -ENOMEM;
4286 }
Jens Axboee8c2bc12020-08-15 18:44:09 -07004287 async_msg = req->async_data;
Pavel Begunkov02d27d82020-02-28 10:36:36 +03004288 req->flags |= REQ_F_NEED_CLEANUP;
Jens Axboee8c2bc12020-08-15 18:44:09 -07004289 memcpy(async_msg, kmsg, sizeof(*kmsg));
Pavel Begunkov2a780802021-02-05 00:57:58 +00004290 async_msg->msg.msg_name = &async_msg->addr;
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004291 /* if were using fast_iov, set it to the new one */
4292 if (!async_msg->free_iov)
4293 async_msg->msg.msg_iter.iov = async_msg->fast_iov;
4294
Pavel Begunkov02d27d82020-02-28 10:36:36 +03004295 return -EAGAIN;
4296}
4297
Pavel Begunkov2ae523e2020-07-12 20:41:06 +03004298static int io_sendmsg_copy_hdr(struct io_kiocb *req,
4299 struct io_async_msghdr *iomsg)
4300{
Pavel Begunkov2ae523e2020-07-12 20:41:06 +03004301 iomsg->msg.msg_name = &iomsg->addr;
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004302 iomsg->free_iov = iomsg->fast_iov;
Pavel Begunkov2ae523e2020-07-12 20:41:06 +03004303 return sendmsg_copy_msghdr(&iomsg->msg, req->sr_msg.umsg,
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004304 req->sr_msg.msg_flags, &iomsg->free_iov);
Pavel Begunkov2ae523e2020-07-12 20:41:06 +03004305}
4306
Pavel Begunkov93642ef2021-02-18 18:29:44 +00004307static int io_sendmsg_prep_async(struct io_kiocb *req)
4308{
4309 int ret;
4310
Pavel Begunkov93642ef2021-02-18 18:29:44 +00004311 ret = io_sendmsg_copy_hdr(req, req->async_data);
4312 if (!ret)
4313 req->flags |= REQ_F_NEED_CLEANUP;
4314 return ret;
4315}
4316
Jens Axboe3529d8c2019-12-19 18:24:38 -07004317static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboeaa1fa282019-04-19 13:38:09 -06004318{
Jens Axboee47293f2019-12-20 08:58:21 -07004319 struct io_sr_msg *sr = &req->sr_msg;
Jens Axboe03b12302019-12-02 18:50:25 -07004320
Pavel Begunkovd2b6f482020-06-03 18:03:25 +03004321 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4322 return -EINVAL;
4323
Pavel Begunkov270a5942020-07-12 20:41:04 +03004324 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
Jens Axboefddafac2020-01-04 20:19:44 -07004325 sr->len = READ_ONCE(sqe->len);
Pavel Begunkov04411802021-04-01 15:44:00 +01004326 sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
4327 if (sr->msg_flags & MSG_DONTWAIT)
4328 req->flags |= REQ_F_NOWAIT;
Jens Axboe3529d8c2019-12-19 18:24:38 -07004329
Jens Axboed8768362020-02-27 14:17:49 -07004330#ifdef CONFIG_COMPAT
4331 if (req->ctx->compat)
4332 sr->msg_flags |= MSG_CMSG_COMPAT;
4333#endif
Pavel Begunkov93642ef2021-02-18 18:29:44 +00004334 return 0;
Jens Axboe03b12302019-12-02 18:50:25 -07004335}
4336
Pavel Begunkov889fca72021-02-10 00:03:09 +00004337static int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe03b12302019-12-02 18:50:25 -07004338{
Pavel Begunkov6b754c82020-07-16 23:28:00 +03004339 struct io_async_msghdr iomsg, *kmsg;
Jens Axboe03b12302019-12-02 18:50:25 -07004340 struct socket *sock;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004341 unsigned flags;
Stefan Metzmacher00312752021-03-20 20:33:36 +01004342 int min_ret = 0;
Jens Axboe03b12302019-12-02 18:50:25 -07004343 int ret;
4344
Florent Revestdba4a922020-12-04 12:36:04 +01004345 sock = sock_from_file(req->file);
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004346 if (unlikely(!sock))
Florent Revestdba4a922020-12-04 12:36:04 +01004347 return -ENOTSOCK;
Jens Axboe03b12302019-12-02 18:50:25 -07004348
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004349 kmsg = req->async_data;
4350 if (!kmsg) {
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004351 ret = io_sendmsg_copy_hdr(req, &iomsg);
Jens Axboefddafac2020-01-04 20:19:44 -07004352 if (ret)
4353 return ret;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004354 kmsg = &iomsg;
Jens Axboefddafac2020-01-04 20:19:44 -07004355 }
4356
Pavel Begunkov04411802021-04-01 15:44:00 +01004357 flags = req->sr_msg.msg_flags;
4358 if (issue_flags & IO_URING_F_NONBLOCK)
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004359 flags |= MSG_DONTWAIT;
Stefan Metzmacher00312752021-03-20 20:33:36 +01004360 if (flags & MSG_WAITALL)
4361 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
4362
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004363 ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004364 if ((issue_flags & IO_URING_F_NONBLOCK) && ret == -EAGAIN)
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004365 return io_setup_async_msg(req, kmsg);
4366 if (ret == -ERESTARTSYS)
4367 ret = -EINTR;
4368
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004369 /* fast path, check for non-NULL to avoid function call */
4370 if (kmsg->free_iov)
4371 kfree(kmsg->free_iov);
Jens Axboe03b12302019-12-02 18:50:25 -07004372 req->flags &= ~REQ_F_NEED_CLEANUP;
Stefan Metzmacher00312752021-03-20 20:33:36 +01004373 if (ret < min_ret)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01004374 req_set_fail(req);
Pavel Begunkov889fca72021-02-10 00:03:09 +00004375 __io_req_complete(req, issue_flags, ret, 0);
Jens Axboefddafac2020-01-04 20:19:44 -07004376 return 0;
Jens Axboefddafac2020-01-04 20:19:44 -07004377}
4378
Pavel Begunkov889fca72021-02-10 00:03:09 +00004379static int io_send(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe03b12302019-12-02 18:50:25 -07004380{
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004381 struct io_sr_msg *sr = &req->sr_msg;
4382 struct msghdr msg;
4383 struct iovec iov;
Jens Axboe03b12302019-12-02 18:50:25 -07004384 struct socket *sock;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004385 unsigned flags;
Stefan Metzmacher00312752021-03-20 20:33:36 +01004386 int min_ret = 0;
Jens Axboe03b12302019-12-02 18:50:25 -07004387 int ret;
4388
Florent Revestdba4a922020-12-04 12:36:04 +01004389 sock = sock_from_file(req->file);
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004390 if (unlikely(!sock))
Florent Revestdba4a922020-12-04 12:36:04 +01004391 return -ENOTSOCK;
Jens Axboe03b12302019-12-02 18:50:25 -07004392
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004393 ret = import_single_range(WRITE, sr->buf, sr->len, &iov, &msg.msg_iter);
4394 if (unlikely(ret))
Zheng Bin14db8412020-09-09 20:12:37 +08004395 return ret;
Jens Axboe03b12302019-12-02 18:50:25 -07004396
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004397 msg.msg_name = NULL;
4398 msg.msg_control = NULL;
4399 msg.msg_controllen = 0;
4400 msg.msg_namelen = 0;
Jens Axboe03b12302019-12-02 18:50:25 -07004401
Pavel Begunkov04411802021-04-01 15:44:00 +01004402 flags = req->sr_msg.msg_flags;
4403 if (issue_flags & IO_URING_F_NONBLOCK)
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004404 flags |= MSG_DONTWAIT;
Stefan Metzmacher00312752021-03-20 20:33:36 +01004405 if (flags & MSG_WAITALL)
4406 min_ret = iov_iter_count(&msg.msg_iter);
4407
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004408 msg.msg_flags = flags;
4409 ret = sock_sendmsg(sock, &msg);
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004410 if ((issue_flags & IO_URING_F_NONBLOCK) && ret == -EAGAIN)
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004411 return -EAGAIN;
4412 if (ret == -ERESTARTSYS)
4413 ret = -EINTR;
Jens Axboe03b12302019-12-02 18:50:25 -07004414
Stefan Metzmacher00312752021-03-20 20:33:36 +01004415 if (ret < min_ret)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01004416 req_set_fail(req);
Pavel Begunkov889fca72021-02-10 00:03:09 +00004417 __io_req_complete(req, issue_flags, ret, 0);
Jens Axboe03b12302019-12-02 18:50:25 -07004418 return 0;
Jens Axboe03b12302019-12-02 18:50:25 -07004419}
4420
Pavel Begunkov1400e692020-07-12 20:41:05 +03004421static int __io_recvmsg_copy_hdr(struct io_kiocb *req,
4422 struct io_async_msghdr *iomsg)
Jens Axboe52de1fe2020-02-27 10:15:42 -07004423{
4424 struct io_sr_msg *sr = &req->sr_msg;
4425 struct iovec __user *uiov;
4426 size_t iov_len;
4427 int ret;
4428
Pavel Begunkov1400e692020-07-12 20:41:05 +03004429 ret = __copy_msghdr_from_user(&iomsg->msg, sr->umsg,
4430 &iomsg->uaddr, &uiov, &iov_len);
Jens Axboe52de1fe2020-02-27 10:15:42 -07004431 if (ret)
4432 return ret;
4433
4434 if (req->flags & REQ_F_BUFFER_SELECT) {
4435 if (iov_len > 1)
4436 return -EINVAL;
Pavel Begunkov5476dfe2021-02-05 00:57:59 +00004437 if (copy_from_user(iomsg->fast_iov, uiov, sizeof(*uiov)))
Jens Axboe52de1fe2020-02-27 10:15:42 -07004438 return -EFAULT;
Pavel Begunkov5476dfe2021-02-05 00:57:59 +00004439 sr->len = iomsg->fast_iov[0].iov_len;
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004440 iomsg->free_iov = NULL;
Jens Axboe52de1fe2020-02-27 10:15:42 -07004441 } else {
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004442 iomsg->free_iov = iomsg->fast_iov;
Christoph Hellwig89cd35c2020-09-25 06:51:41 +02004443 ret = __import_iovec(READ, uiov, iov_len, UIO_FASTIOV,
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004444 &iomsg->free_iov, &iomsg->msg.msg_iter,
Christoph Hellwig89cd35c2020-09-25 06:51:41 +02004445 false);
Jens Axboe52de1fe2020-02-27 10:15:42 -07004446 if (ret > 0)
4447 ret = 0;
4448 }
4449
4450 return ret;
4451}
4452
4453#ifdef CONFIG_COMPAT
4454static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req,
Pavel Begunkov1400e692020-07-12 20:41:05 +03004455 struct io_async_msghdr *iomsg)
Jens Axboe52de1fe2020-02-27 10:15:42 -07004456{
Jens Axboe52de1fe2020-02-27 10:15:42 -07004457 struct io_sr_msg *sr = &req->sr_msg;
4458 struct compat_iovec __user *uiov;
4459 compat_uptr_t ptr;
4460 compat_size_t len;
4461 int ret;
4462
Pavel Begunkov4af34172021-04-11 01:46:30 +01004463 ret = __get_compat_msghdr(&iomsg->msg, sr->umsg_compat, &iomsg->uaddr,
4464 &ptr, &len);
Jens Axboe52de1fe2020-02-27 10:15:42 -07004465 if (ret)
4466 return ret;
4467
4468 uiov = compat_ptr(ptr);
4469 if (req->flags & REQ_F_BUFFER_SELECT) {
4470 compat_ssize_t clen;
4471
4472 if (len > 1)
4473 return -EINVAL;
4474 if (!access_ok(uiov, sizeof(*uiov)))
4475 return -EFAULT;
4476 if (__get_user(clen, &uiov->iov_len))
4477 return -EFAULT;
4478 if (clen < 0)
4479 return -EINVAL;
Pavel Begunkov2d280bc2020-11-29 18:33:32 +00004480 sr->len = clen;
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004481 iomsg->free_iov = NULL;
Jens Axboe52de1fe2020-02-27 10:15:42 -07004482 } else {
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004483 iomsg->free_iov = iomsg->fast_iov;
Christoph Hellwig89cd35c2020-09-25 06:51:41 +02004484 ret = __import_iovec(READ, (struct iovec __user *)uiov, len,
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004485 UIO_FASTIOV, &iomsg->free_iov,
Christoph Hellwig89cd35c2020-09-25 06:51:41 +02004486 &iomsg->msg.msg_iter, true);
Jens Axboe52de1fe2020-02-27 10:15:42 -07004487 if (ret < 0)
4488 return ret;
4489 }
4490
4491 return 0;
4492}
Jens Axboe03b12302019-12-02 18:50:25 -07004493#endif
Jens Axboe52de1fe2020-02-27 10:15:42 -07004494
Pavel Begunkov1400e692020-07-12 20:41:05 +03004495static int io_recvmsg_copy_hdr(struct io_kiocb *req,
4496 struct io_async_msghdr *iomsg)
Jens Axboe52de1fe2020-02-27 10:15:42 -07004497{
Pavel Begunkov1400e692020-07-12 20:41:05 +03004498 iomsg->msg.msg_name = &iomsg->addr;
Jens Axboe52de1fe2020-02-27 10:15:42 -07004499
4500#ifdef CONFIG_COMPAT
4501 if (req->ctx->compat)
Pavel Begunkov1400e692020-07-12 20:41:05 +03004502 return __io_compat_recvmsg_copy_hdr(req, iomsg);
Jens Axboe52de1fe2020-02-27 10:15:42 -07004503#endif
4504
Pavel Begunkov1400e692020-07-12 20:41:05 +03004505 return __io_recvmsg_copy_hdr(req, iomsg);
Jens Axboe52de1fe2020-02-27 10:15:42 -07004506}
4507
Jens Axboebcda7ba2020-02-23 16:42:51 -07004508static struct io_buffer *io_recv_buffer_select(struct io_kiocb *req,
Pavel Begunkov7fbb1b52020-07-16 23:28:05 +03004509 bool needs_lock)
Jens Axboebcda7ba2020-02-23 16:42:51 -07004510{
4511 struct io_sr_msg *sr = &req->sr_msg;
4512 struct io_buffer *kbuf;
4513
Jens Axboebcda7ba2020-02-23 16:42:51 -07004514 kbuf = io_buffer_select(req, &sr->len, sr->bgid, sr->kbuf, needs_lock);
4515 if (IS_ERR(kbuf))
4516 return kbuf;
4517
4518 sr->kbuf = kbuf;
4519 req->flags |= REQ_F_BUFFER_SELECTED;
Jens Axboebcda7ba2020-02-23 16:42:51 -07004520 return kbuf;
Jens Axboe03b12302019-12-02 18:50:25 -07004521}
4522
Pavel Begunkov7fbb1b52020-07-16 23:28:05 +03004523static inline unsigned int io_put_recv_kbuf(struct io_kiocb *req)
4524{
4525 return io_put_kbuf(req, req->sr_msg.kbuf);
4526}
4527
Pavel Begunkov93642ef2021-02-18 18:29:44 +00004528static int io_recvmsg_prep_async(struct io_kiocb *req)
Jens Axboe03b12302019-12-02 18:50:25 -07004529{
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03004530 int ret;
Jens Axboe06b76d42019-12-19 14:44:26 -07004531
Pavel Begunkov93642ef2021-02-18 18:29:44 +00004532 ret = io_recvmsg_copy_hdr(req, req->async_data);
4533 if (!ret)
4534 req->flags |= REQ_F_NEED_CLEANUP;
4535 return ret;
4536}
4537
4538static int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4539{
4540 struct io_sr_msg *sr = &req->sr_msg;
4541
Pavel Begunkovd2b6f482020-06-03 18:03:25 +03004542 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4543 return -EINVAL;
4544
Pavel Begunkov270a5942020-07-12 20:41:04 +03004545 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
Jens Axboe0b7b21e2020-01-31 08:34:59 -07004546 sr->len = READ_ONCE(sqe->len);
Jens Axboebcda7ba2020-02-23 16:42:51 -07004547 sr->bgid = READ_ONCE(sqe->buf_group);
Pavel Begunkov04411802021-04-01 15:44:00 +01004548 sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
4549 if (sr->msg_flags & MSG_DONTWAIT)
4550 req->flags |= REQ_F_NOWAIT;
Jens Axboe3529d8c2019-12-19 18:24:38 -07004551
Jens Axboed8768362020-02-27 14:17:49 -07004552#ifdef CONFIG_COMPAT
4553 if (req->ctx->compat)
4554 sr->msg_flags |= MSG_CMSG_COMPAT;
4555#endif
Pavel Begunkov93642ef2021-02-18 18:29:44 +00004556 return 0;
Jens Axboe03b12302019-12-02 18:50:25 -07004557}
4558
Pavel Begunkov889fca72021-02-10 00:03:09 +00004559static int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe03b12302019-12-02 18:50:25 -07004560{
Pavel Begunkov6b754c82020-07-16 23:28:00 +03004561 struct io_async_msghdr iomsg, *kmsg;
Jens Axboe0fa03c62019-04-19 13:34:07 -06004562 struct socket *sock;
Pavel Begunkov7fbb1b52020-07-16 23:28:05 +03004563 struct io_buffer *kbuf;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004564 unsigned flags;
Stefan Metzmacher00312752021-03-20 20:33:36 +01004565 int min_ret = 0;
Jens Axboe52de1fe2020-02-27 10:15:42 -07004566 int ret, cflags = 0;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004567 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Jens Axboe0fa03c62019-04-19 13:34:07 -06004568
Florent Revestdba4a922020-12-04 12:36:04 +01004569 sock = sock_from_file(req->file);
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004570 if (unlikely(!sock))
Florent Revestdba4a922020-12-04 12:36:04 +01004571 return -ENOTSOCK;
Jens Axboe0fa03c62019-04-19 13:34:07 -06004572
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004573 kmsg = req->async_data;
4574 if (!kmsg) {
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004575 ret = io_recvmsg_copy_hdr(req, &iomsg);
4576 if (ret)
Pavel Begunkov681fda82020-07-15 22:20:45 +03004577 return ret;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004578 kmsg = &iomsg;
Jens Axboe0fa03c62019-04-19 13:34:07 -06004579 }
4580
Pavel Begunkovbc02ef32020-07-16 23:28:03 +03004581 if (req->flags & REQ_F_BUFFER_SELECT) {
Pavel Begunkov7fbb1b52020-07-16 23:28:05 +03004582 kbuf = io_recv_buffer_select(req, !force_nonblock);
Pavel Begunkovbc02ef32020-07-16 23:28:03 +03004583 if (IS_ERR(kbuf))
4584 return PTR_ERR(kbuf);
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004585 kmsg->fast_iov[0].iov_base = u64_to_user_ptr(kbuf->addr);
Pavel Begunkov5476dfe2021-02-05 00:57:59 +00004586 kmsg->fast_iov[0].iov_len = req->sr_msg.len;
4587 iov_iter_init(&kmsg->msg.msg_iter, READ, kmsg->fast_iov,
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004588 1, req->sr_msg.len);
4589 }
4590
Pavel Begunkov04411802021-04-01 15:44:00 +01004591 flags = req->sr_msg.msg_flags;
4592 if (force_nonblock)
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004593 flags |= MSG_DONTWAIT;
Stefan Metzmacher00312752021-03-20 20:33:36 +01004594 if (flags & MSG_WAITALL)
4595 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
4596
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004597 ret = __sys_recvmsg_sock(sock, &kmsg->msg, req->sr_msg.umsg,
4598 kmsg->uaddr, flags);
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03004599 if (force_nonblock && ret == -EAGAIN)
4600 return io_setup_async_msg(req, kmsg);
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004601 if (ret == -ERESTARTSYS)
4602 ret = -EINTR;
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03004603
Pavel Begunkov7fbb1b52020-07-16 23:28:05 +03004604 if (req->flags & REQ_F_BUFFER_SELECTED)
4605 cflags = io_put_recv_kbuf(req);
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004606 /* fast path, check for non-NULL to avoid function call */
4607 if (kmsg->free_iov)
4608 kfree(kmsg->free_iov);
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03004609 req->flags &= ~REQ_F_NEED_CLEANUP;
Stefan Metzmacher00312752021-03-20 20:33:36 +01004610 if (ret < min_ret || ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))))
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01004611 req_set_fail(req);
Pavel Begunkov889fca72021-02-10 00:03:09 +00004612 __io_req_complete(req, issue_flags, ret, cflags);
Jens Axboe0fa03c62019-04-19 13:34:07 -06004613 return 0;
Jens Axboe0fa03c62019-04-19 13:34:07 -06004614}
4615
Pavel Begunkov889fca72021-02-10 00:03:09 +00004616static int io_recv(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboefddafac2020-01-04 20:19:44 -07004617{
Pavel Begunkov6b754c82020-07-16 23:28:00 +03004618 struct io_buffer *kbuf;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004619 struct io_sr_msg *sr = &req->sr_msg;
4620 struct msghdr msg;
4621 void __user *buf = sr->buf;
Jens Axboefddafac2020-01-04 20:19:44 -07004622 struct socket *sock;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004623 struct iovec iov;
4624 unsigned flags;
Stefan Metzmacher00312752021-03-20 20:33:36 +01004625 int min_ret = 0;
Jens Axboebcda7ba2020-02-23 16:42:51 -07004626 int ret, cflags = 0;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004627 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Jens Axboefddafac2020-01-04 20:19:44 -07004628
Florent Revestdba4a922020-12-04 12:36:04 +01004629 sock = sock_from_file(req->file);
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004630 if (unlikely(!sock))
Florent Revestdba4a922020-12-04 12:36:04 +01004631 return -ENOTSOCK;
Jens Axboefddafac2020-01-04 20:19:44 -07004632
Pavel Begunkovbc02ef32020-07-16 23:28:03 +03004633 if (req->flags & REQ_F_BUFFER_SELECT) {
Pavel Begunkov7fbb1b52020-07-16 23:28:05 +03004634 kbuf = io_recv_buffer_select(req, !force_nonblock);
Jens Axboebcda7ba2020-02-23 16:42:51 -07004635 if (IS_ERR(kbuf))
4636 return PTR_ERR(kbuf);
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004637 buf = u64_to_user_ptr(kbuf->addr);
Jens Axboefddafac2020-01-04 20:19:44 -07004638 }
4639
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004640 ret = import_single_range(READ, buf, sr->len, &iov, &msg.msg_iter);
Pavel Begunkov14c32ee2020-07-16 23:28:01 +03004641 if (unlikely(ret))
4642 goto out_free;
Jens Axboefddafac2020-01-04 20:19:44 -07004643
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004644 msg.msg_name = NULL;
4645 msg.msg_control = NULL;
4646 msg.msg_controllen = 0;
4647 msg.msg_namelen = 0;
4648 msg.msg_iocb = NULL;
4649 msg.msg_flags = 0;
4650
Pavel Begunkov04411802021-04-01 15:44:00 +01004651 flags = req->sr_msg.msg_flags;
4652 if (force_nonblock)
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004653 flags |= MSG_DONTWAIT;
Stefan Metzmacher00312752021-03-20 20:33:36 +01004654 if (flags & MSG_WAITALL)
4655 min_ret = iov_iter_count(&msg.msg_iter);
4656
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004657 ret = sock_recvmsg(sock, &msg, flags);
4658 if (force_nonblock && ret == -EAGAIN)
4659 return -EAGAIN;
4660 if (ret == -ERESTARTSYS)
4661 ret = -EINTR;
Pavel Begunkov14c32ee2020-07-16 23:28:01 +03004662out_free:
Pavel Begunkov7fbb1b52020-07-16 23:28:05 +03004663 if (req->flags & REQ_F_BUFFER_SELECTED)
4664 cflags = io_put_recv_kbuf(req);
Stefan Metzmacher00312752021-03-20 20:33:36 +01004665 if (ret < min_ret || ((flags & MSG_WAITALL) && (msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))))
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01004666 req_set_fail(req);
Pavel Begunkov889fca72021-02-10 00:03:09 +00004667 __io_req_complete(req, issue_flags, ret, cflags);
Jens Axboefddafac2020-01-04 20:19:44 -07004668 return 0;
Jens Axboefddafac2020-01-04 20:19:44 -07004669}
4670
Jens Axboe3529d8c2019-12-19 18:24:38 -07004671static int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboe17f2fe32019-10-17 14:42:58 -06004672{
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004673 struct io_accept *accept = &req->accept;
4674
Jens Axboe14587a462020-09-05 11:36:08 -06004675 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboe17f2fe32019-10-17 14:42:58 -06004676 return -EINVAL;
Hrvoje Zeba8042d6c2019-11-25 14:40:22 -05004677 if (sqe->ioprio || sqe->len || sqe->buf_index)
Jens Axboe17f2fe32019-10-17 14:42:58 -06004678 return -EINVAL;
4679
Jens Axboed55e5f52019-12-11 16:12:15 -07004680 accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
4681 accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2));
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004682 accept->flags = READ_ONCE(sqe->accept_flags);
Jens Axboe09952e32020-03-19 20:16:56 -06004683 accept->nofile = rlimit(RLIMIT_NOFILE);
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004684 return 0;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004685}
Jens Axboe17f2fe32019-10-17 14:42:58 -06004686
Pavel Begunkov889fca72021-02-10 00:03:09 +00004687static int io_accept(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004688{
4689 struct io_accept *accept = &req->accept;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004690 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Pavel Begunkovac45abc2020-06-08 21:08:18 +03004691 unsigned int file_flags = force_nonblock ? O_NONBLOCK : 0;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004692 int ret;
4693
Jiufei Xuee697dee2020-06-10 13:41:59 +08004694 if (req->file->f_flags & O_NONBLOCK)
4695 req->flags |= REQ_F_NOWAIT;
4696
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004697 ret = __sys_accept4_file(req->file, file_flags, accept->addr,
Jens Axboe09952e32020-03-19 20:16:56 -06004698 accept->addr_len, accept->flags,
4699 accept->nofile);
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004700 if (ret == -EAGAIN && force_nonblock)
Jens Axboe17f2fe32019-10-17 14:42:58 -06004701 return -EAGAIN;
Pavel Begunkovac45abc2020-06-08 21:08:18 +03004702 if (ret < 0) {
4703 if (ret == -ERESTARTSYS)
4704 ret = -EINTR;
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01004705 req_set_fail(req);
Pavel Begunkovac45abc2020-06-08 21:08:18 +03004706 }
Pavel Begunkov889fca72021-02-10 00:03:09 +00004707 __io_req_complete(req, issue_flags, ret, 0);
Jens Axboe17f2fe32019-10-17 14:42:58 -06004708 return 0;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004709}
4710
Pavel Begunkov93642ef2021-02-18 18:29:44 +00004711static int io_connect_prep_async(struct io_kiocb *req)
4712{
4713 struct io_async_connect *io = req->async_data;
4714 struct io_connect *conn = &req->connect;
4715
4716 return move_addr_to_kernel(conn->addr, conn->addr_len, &io->address);
4717}
4718
Jens Axboe3529d8c2019-12-19 18:24:38 -07004719static int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboef499a022019-12-02 16:28:46 -07004720{
Jens Axboe3529d8c2019-12-19 18:24:38 -07004721 struct io_connect *conn = &req->connect;
Jens Axboef499a022019-12-02 16:28:46 -07004722
Jens Axboe14587a462020-09-05 11:36:08 -06004723 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboe3fbb51c2019-12-20 08:51:52 -07004724 return -EINVAL;
4725 if (sqe->ioprio || sqe->len || sqe->buf_index || sqe->rw_flags)
4726 return -EINVAL;
4727
Jens Axboe3529d8c2019-12-19 18:24:38 -07004728 conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
4729 conn->addr_len = READ_ONCE(sqe->addr2);
Pavel Begunkov93642ef2021-02-18 18:29:44 +00004730 return 0;
Jens Axboef499a022019-12-02 16:28:46 -07004731}
4732
Pavel Begunkov889fca72021-02-10 00:03:09 +00004733static int io_connect(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboef8e85cf2019-11-23 14:24:24 -07004734{
Jens Axboee8c2bc12020-08-15 18:44:09 -07004735 struct io_async_connect __io, *io;
Jens Axboef8e85cf2019-11-23 14:24:24 -07004736 unsigned file_flags;
Jens Axboe3fbb51c2019-12-20 08:51:52 -07004737 int ret;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004738 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Jens Axboef8e85cf2019-11-23 14:24:24 -07004739
Jens Axboee8c2bc12020-08-15 18:44:09 -07004740 if (req->async_data) {
4741 io = req->async_data;
Jens Axboef499a022019-12-02 16:28:46 -07004742 } else {
Jens Axboe3529d8c2019-12-19 18:24:38 -07004743 ret = move_addr_to_kernel(req->connect.addr,
4744 req->connect.addr_len,
Jens Axboee8c2bc12020-08-15 18:44:09 -07004745 &__io.address);
Jens Axboef499a022019-12-02 16:28:46 -07004746 if (ret)
4747 goto out;
4748 io = &__io;
4749 }
4750
Jens Axboe3fbb51c2019-12-20 08:51:52 -07004751 file_flags = force_nonblock ? O_NONBLOCK : 0;
4752
Jens Axboee8c2bc12020-08-15 18:44:09 -07004753 ret = __sys_connect_file(req->file, &io->address,
Jens Axboe3fbb51c2019-12-20 08:51:52 -07004754 req->connect.addr_len, file_flags);
Jens Axboe87f80d62019-12-03 11:23:54 -07004755 if ((ret == -EAGAIN || ret == -EINPROGRESS) && force_nonblock) {
Jens Axboee8c2bc12020-08-15 18:44:09 -07004756 if (req->async_data)
Jens Axboeb7bb4f72019-12-15 22:13:43 -07004757 return -EAGAIN;
Jens Axboee8c2bc12020-08-15 18:44:09 -07004758 if (io_alloc_async_data(req)) {
Jens Axboef499a022019-12-02 16:28:46 -07004759 ret = -ENOMEM;
4760 goto out;
4761 }
Jens Axboee8c2bc12020-08-15 18:44:09 -07004762 memcpy(req->async_data, &__io, sizeof(__io));
Jens Axboef8e85cf2019-11-23 14:24:24 -07004763 return -EAGAIN;
Jens Axboef499a022019-12-02 16:28:46 -07004764 }
Jens Axboef8e85cf2019-11-23 14:24:24 -07004765 if (ret == -ERESTARTSYS)
4766 ret = -EINTR;
Jens Axboef499a022019-12-02 16:28:46 -07004767out:
Jens Axboe4e88d6e2019-12-07 20:59:47 -07004768 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01004769 req_set_fail(req);
Pavel Begunkov889fca72021-02-10 00:03:09 +00004770 __io_req_complete(req, issue_flags, ret, 0);
Jens Axboef8e85cf2019-11-23 14:24:24 -07004771 return 0;
Jens Axboef8e85cf2019-11-23 14:24:24 -07004772}
YueHaibing469956e2020-03-04 15:53:52 +08004773#else /* !CONFIG_NET */
Jens Axboe99a10082021-02-19 09:35:19 -07004774#define IO_NETOP_FN(op) \
4775static int io_##op(struct io_kiocb *req, unsigned int issue_flags) \
4776{ \
4777 return -EOPNOTSUPP; \
Jens Axboef8e85cf2019-11-23 14:24:24 -07004778}
4779
Jens Axboe99a10082021-02-19 09:35:19 -07004780#define IO_NETOP_PREP(op) \
4781IO_NETOP_FN(op) \
4782static int io_##op##_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) \
4783{ \
4784 return -EOPNOTSUPP; \
4785} \
4786
4787#define IO_NETOP_PREP_ASYNC(op) \
4788IO_NETOP_PREP(op) \
4789static int io_##op##_prep_async(struct io_kiocb *req) \
4790{ \
4791 return -EOPNOTSUPP; \
YueHaibing469956e2020-03-04 15:53:52 +08004792}
4793
Jens Axboe99a10082021-02-19 09:35:19 -07004794IO_NETOP_PREP_ASYNC(sendmsg);
4795IO_NETOP_PREP_ASYNC(recvmsg);
4796IO_NETOP_PREP_ASYNC(connect);
4797IO_NETOP_PREP(accept);
4798IO_NETOP_FN(send);
4799IO_NETOP_FN(recv);
YueHaibing469956e2020-03-04 15:53:52 +08004800#endif /* CONFIG_NET */
Jens Axboe17f2fe32019-10-17 14:42:58 -06004801
Jens Axboed7718a92020-02-14 22:23:12 -07004802struct io_poll_table {
4803 struct poll_table_struct pt;
4804 struct io_kiocb *req;
4805 int error;
4806};
4807
Jens Axboed7718a92020-02-14 22:23:12 -07004808static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll,
Pavel Begunkov5b0a6ac2021-06-30 21:54:04 +01004809 __poll_t mask, io_req_tw_func_t func)
Jens Axboed7718a92020-02-14 22:23:12 -07004810{
Jens Axboed7718a92020-02-14 22:23:12 -07004811 /* for instances that support it check for an event match first: */
4812 if (mask && !(mask & poll->events))
4813 return 0;
4814
4815 trace_io_uring_task_add(req->ctx, req->opcode, req->user_data, mask);
4816
4817 list_del_init(&poll->wait.entry);
4818
Jens Axboed7718a92020-02-14 22:23:12 -07004819 req->result = mask;
Pavel Begunkov5b0a6ac2021-06-30 21:54:04 +01004820 req->io_task_work.func = func;
Jens Axboe6d816e02020-08-11 08:04:14 -06004821
Jens Axboed7718a92020-02-14 22:23:12 -07004822 /*
Jens Axboee3aabf92020-05-18 11:04:17 -06004823 * If this fails, then the task is exiting. When a task exits, the
4824 * work gets canceled, so just cancel this request as well instead
4825 * of executing it. We can't safely execute it anyway, as we may not
4826 * have the needed state needed for it anyway.
Jens Axboed7718a92020-02-14 22:23:12 -07004827 */
Pavel Begunkove09ee512021-07-01 13:26:05 +01004828 io_req_task_work_add(req);
Jens Axboed7718a92020-02-14 22:23:12 -07004829 return 1;
4830}
4831
Jens Axboe74ce6ce2020-04-13 11:09:12 -06004832static bool io_poll_rewait(struct io_kiocb *req, struct io_poll_iocb *poll)
4833 __acquires(&req->ctx->completion_lock)
4834{
4835 struct io_ring_ctx *ctx = req->ctx;
4836
Pavel Begunkove09ee512021-07-01 13:26:05 +01004837 if (unlikely(req->task->flags & PF_EXITING))
4838 WRITE_ONCE(poll->canceled, true);
4839
Jens Axboe74ce6ce2020-04-13 11:09:12 -06004840 if (!req->result && !READ_ONCE(poll->canceled)) {
4841 struct poll_table_struct pt = { ._key = poll->events };
4842
4843 req->result = vfs_poll(req->file, &pt) & poll->events;
4844 }
4845
4846 spin_lock_irq(&ctx->completion_lock);
4847 if (!req->result && !READ_ONCE(poll->canceled)) {
4848 add_wait_queue(poll->head, &poll->wait);
4849 return true;
4850 }
4851
4852 return false;
4853}
4854
Jens Axboed4e7cd32020-08-15 11:44:50 -07004855static struct io_poll_iocb *io_poll_get_double(struct io_kiocb *req)
Jens Axboe18bceab2020-05-15 11:56:54 -06004856{
Jens Axboee8c2bc12020-08-15 18:44:09 -07004857 /* pure poll stashes this in ->async_data, poll driven retry elsewhere */
Jens Axboed4e7cd32020-08-15 11:44:50 -07004858 if (req->opcode == IORING_OP_POLL_ADD)
Jens Axboee8c2bc12020-08-15 18:44:09 -07004859 return req->async_data;
Jens Axboed4e7cd32020-08-15 11:44:50 -07004860 return req->apoll->double_poll;
4861}
4862
4863static struct io_poll_iocb *io_poll_get_single(struct io_kiocb *req)
4864{
4865 if (req->opcode == IORING_OP_POLL_ADD)
4866 return &req->poll;
4867 return &req->apoll->poll;
4868}
4869
4870static void io_poll_remove_double(struct io_kiocb *req)
Pavel Begunkove07785b2021-04-01 15:43:57 +01004871 __must_hold(&req->ctx->completion_lock)
Jens Axboed4e7cd32020-08-15 11:44:50 -07004872{
4873 struct io_poll_iocb *poll = io_poll_get_double(req);
Jens Axboe18bceab2020-05-15 11:56:54 -06004874
4875 lockdep_assert_held(&req->ctx->completion_lock);
4876
4877 if (poll && poll->head) {
4878 struct wait_queue_head *head = poll->head;
4879
4880 spin_lock(&head->lock);
4881 list_del_init(&poll->wait.entry);
4882 if (poll->wait.private)
Jens Axboede9b4cc2021-02-24 13:28:27 -07004883 req_ref_put(req);
Jens Axboe18bceab2020-05-15 11:56:54 -06004884 poll->head = NULL;
4885 spin_unlock(&head->lock);
4886 }
4887}
4888
Pavel Begunkove27414b2021-04-09 09:13:20 +01004889static bool io_poll_complete(struct io_kiocb *req, __poll_t mask)
Pavel Begunkove07785b2021-04-01 15:43:57 +01004890 __must_hold(&req->ctx->completion_lock)
Jens Axboe18bceab2020-05-15 11:56:54 -06004891{
4892 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe88e41cf2021-02-22 22:08:01 -07004893 unsigned flags = IORING_CQE_F_MORE;
Pavel Begunkove27414b2021-04-09 09:13:20 +01004894 int error;
Jens Axboe18bceab2020-05-15 11:56:54 -06004895
Pavel Begunkove27414b2021-04-09 09:13:20 +01004896 if (READ_ONCE(req->poll.canceled)) {
Jens Axboe45ab03b2021-02-23 08:19:33 -07004897 error = -ECANCELED;
Jens Axboe88e41cf2021-02-22 22:08:01 -07004898 req->poll.events |= EPOLLONESHOT;
Pavel Begunkove27414b2021-04-09 09:13:20 +01004899 } else {
Jens Axboe50826202021-02-23 09:02:26 -07004900 error = mangle_poll(mask);
Pavel Begunkove27414b2021-04-09 09:13:20 +01004901 }
Jens Axboeb69de282021-03-17 08:37:41 -06004902 if (req->poll.events & EPOLLONESHOT)
4903 flags = 0;
Pavel Begunkovd4d19c12021-04-25 14:32:17 +01004904 if (!io_cqring_fill_event(ctx, req->user_data, error, flags)) {
Jens Axboe50826202021-02-23 09:02:26 -07004905 io_poll_remove_waitqs(req);
Jens Axboe88e41cf2021-02-22 22:08:01 -07004906 req->poll.done = true;
4907 flags = 0;
4908 }
Hao Xu7b289c32021-04-13 15:20:39 +08004909 if (flags & IORING_CQE_F_MORE)
4910 ctx->cq_extra++;
4911
Jens Axboe18bceab2020-05-15 11:56:54 -06004912 io_commit_cqring(ctx);
Jens Axboe88e41cf2021-02-22 22:08:01 -07004913 return !(flags & IORING_CQE_F_MORE);
Jens Axboe18bceab2020-05-15 11:56:54 -06004914}
4915
Pavel Begunkov5b0a6ac2021-06-30 21:54:04 +01004916static void io_poll_task_func(struct io_kiocb *req)
Jens Axboe18bceab2020-05-15 11:56:54 -06004917{
Jens Axboe6d816e02020-08-11 08:04:14 -06004918 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkovdd221f462020-10-18 10:17:42 +01004919 struct io_kiocb *nxt;
Jens Axboe18bceab2020-05-15 11:56:54 -06004920
Pavel Begunkovdd221f462020-10-18 10:17:42 +01004921 if (io_poll_rewait(req, &req->poll)) {
4922 spin_unlock_irq(&ctx->completion_lock);
4923 } else {
Pavel Begunkovf40b9642021-04-09 09:13:19 +01004924 bool done;
Jens Axboe88e41cf2021-02-22 22:08:01 -07004925
Pavel Begunkove27414b2021-04-09 09:13:20 +01004926 done = io_poll_complete(req, req->result);
Jens Axboe88e41cf2021-02-22 22:08:01 -07004927 if (done) {
4928 hash_del(&req->hash_node);
Pavel Begunkovf40b9642021-04-09 09:13:19 +01004929 } else {
Jens Axboe88e41cf2021-02-22 22:08:01 -07004930 req->result = 0;
4931 add_wait_queue(req->poll.head, &req->poll.wait);
4932 }
Pavel Begunkovdd221f462020-10-18 10:17:42 +01004933 spin_unlock_irq(&ctx->completion_lock);
Pavel Begunkovdd221f462020-10-18 10:17:42 +01004934 io_cqring_ev_posted(ctx);
Pavel Begunkovdd221f462020-10-18 10:17:42 +01004935
Jens Axboe88e41cf2021-02-22 22:08:01 -07004936 if (done) {
4937 nxt = io_put_req_find_next(req);
4938 if (nxt)
Pavel Begunkov5b0a6ac2021-06-30 21:54:04 +01004939 io_req_task_submit(nxt);
Jens Axboe88e41cf2021-02-22 22:08:01 -07004940 }
Pavel Begunkovea1164e2020-06-30 15:20:41 +03004941 }
Jens Axboe18bceab2020-05-15 11:56:54 -06004942}
4943
4944static int io_poll_double_wake(struct wait_queue_entry *wait, unsigned mode,
4945 int sync, void *key)
4946{
4947 struct io_kiocb *req = wait->private;
Jens Axboed4e7cd32020-08-15 11:44:50 -07004948 struct io_poll_iocb *poll = io_poll_get_single(req);
Jens Axboe18bceab2020-05-15 11:56:54 -06004949 __poll_t mask = key_to_poll(key);
4950
4951 /* for instances that support it check for an event match first: */
4952 if (mask && !(mask & poll->events))
4953 return 0;
Jens Axboe88e41cf2021-02-22 22:08:01 -07004954 if (!(poll->events & EPOLLONESHOT))
4955 return poll->wait.func(&poll->wait, mode, sync, key);
Jens Axboe18bceab2020-05-15 11:56:54 -06004956
Jens Axboe8706e042020-09-28 08:38:54 -06004957 list_del_init(&wait->entry);
4958
Jens Axboe9ce85ef2021-07-09 08:20:28 -06004959 if (poll->head) {
Jens Axboe18bceab2020-05-15 11:56:54 -06004960 bool done;
4961
Jens Axboe807abcb2020-07-17 17:09:27 -06004962 spin_lock(&poll->head->lock);
4963 done = list_empty(&poll->wait.entry);
Jens Axboe18bceab2020-05-15 11:56:54 -06004964 if (!done)
Jens Axboe807abcb2020-07-17 17:09:27 -06004965 list_del_init(&poll->wait.entry);
Jens Axboed4e7cd32020-08-15 11:44:50 -07004966 /* make sure double remove sees this as being gone */
4967 wait->private = NULL;
Jens Axboe807abcb2020-07-17 17:09:27 -06004968 spin_unlock(&poll->head->lock);
Jens Axboec8b5e262020-10-25 13:53:26 -06004969 if (!done) {
4970 /* use wait func handler, so it matches the rq type */
4971 poll->wait.func(&poll->wait, mode, sync, key);
4972 }
Jens Axboe18bceab2020-05-15 11:56:54 -06004973 }
Jens Axboede9b4cc2021-02-24 13:28:27 -07004974 req_ref_put(req);
Jens Axboe18bceab2020-05-15 11:56:54 -06004975 return 1;
4976}
4977
4978static void io_init_poll_iocb(struct io_poll_iocb *poll, __poll_t events,
4979 wait_queue_func_t wake_func)
4980{
4981 poll->head = NULL;
4982 poll->done = false;
4983 poll->canceled = false;
Jens Axboe464dca62021-03-19 14:06:24 -06004984#define IO_POLL_UNMASK (EPOLLERR|EPOLLHUP|EPOLLNVAL|EPOLLRDHUP)
4985 /* mask in events that we always want/need */
4986 poll->events = events | IO_POLL_UNMASK;
Jens Axboe18bceab2020-05-15 11:56:54 -06004987 INIT_LIST_HEAD(&poll->wait.entry);
4988 init_waitqueue_func_entry(&poll->wait, wake_func);
4989}
4990
4991static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt,
Jens Axboe807abcb2020-07-17 17:09:27 -06004992 struct wait_queue_head *head,
4993 struct io_poll_iocb **poll_ptr)
Jens Axboe18bceab2020-05-15 11:56:54 -06004994{
4995 struct io_kiocb *req = pt->req;
4996
4997 /*
4998 * If poll->head is already set, it's because the file being polled
4999 * uses multiple waitqueues for poll handling (eg one for read, one
5000 * for write). Setup a separate io_poll_iocb if this happens.
5001 */
5002 if (unlikely(poll->head)) {
Pavel Begunkov58852d42020-10-16 20:55:56 +01005003 struct io_poll_iocb *poll_one = poll;
5004
Jens Axboe18bceab2020-05-15 11:56:54 -06005005 /* already have a 2nd entry, fail a third attempt */
Jens Axboe807abcb2020-07-17 17:09:27 -06005006 if (*poll_ptr) {
Jens Axboe18bceab2020-05-15 11:56:54 -06005007 pt->error = -EINVAL;
5008 return;
5009 }
Jens Axboeea6a693d2021-04-15 09:47:13 -06005010 /*
5011 * Can't handle multishot for double wait for now, turn it
5012 * into one-shot mode.
5013 */
Pavel Begunkov7a274722021-05-17 12:43:34 +01005014 if (!(poll_one->events & EPOLLONESHOT))
5015 poll_one->events |= EPOLLONESHOT;
Jens Axboe1c3b3e62021-02-28 16:07:30 -07005016 /* double add on the same waitqueue head, ignore */
Pavel Begunkov7a274722021-05-17 12:43:34 +01005017 if (poll_one->head == head)
Jens Axboe1c3b3e62021-02-28 16:07:30 -07005018 return;
Jens Axboe18bceab2020-05-15 11:56:54 -06005019 poll = kmalloc(sizeof(*poll), GFP_ATOMIC);
5020 if (!poll) {
5021 pt->error = -ENOMEM;
5022 return;
5023 }
Pavel Begunkov58852d42020-10-16 20:55:56 +01005024 io_init_poll_iocb(poll, poll_one->events, io_poll_double_wake);
Jens Axboede9b4cc2021-02-24 13:28:27 -07005025 req_ref_get(req);
Jens Axboe18bceab2020-05-15 11:56:54 -06005026 poll->wait.private = req;
Jens Axboe807abcb2020-07-17 17:09:27 -06005027 *poll_ptr = poll;
Jens Axboe18bceab2020-05-15 11:56:54 -06005028 }
5029
5030 pt->error = 0;
5031 poll->head = head;
Jiufei Xuea31eb4a2020-06-17 17:53:56 +08005032
5033 if (poll->events & EPOLLEXCLUSIVE)
5034 add_wait_queue_exclusive(head, &poll->wait);
5035 else
5036 add_wait_queue(head, &poll->wait);
Jens Axboe18bceab2020-05-15 11:56:54 -06005037}
5038
5039static void io_async_queue_proc(struct file *file, struct wait_queue_head *head,
5040 struct poll_table_struct *p)
5041{
5042 struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
Jens Axboe807abcb2020-07-17 17:09:27 -06005043 struct async_poll *apoll = pt->req->apoll;
Jens Axboe18bceab2020-05-15 11:56:54 -06005044
Jens Axboe807abcb2020-07-17 17:09:27 -06005045 __io_queue_proc(&apoll->poll, pt, head, &apoll->double_poll);
Jens Axboe18bceab2020-05-15 11:56:54 -06005046}
5047
Pavel Begunkov5b0a6ac2021-06-30 21:54:04 +01005048static void io_async_task_func(struct io_kiocb *req)
Jens Axboed7718a92020-02-14 22:23:12 -07005049{
Jens Axboed7718a92020-02-14 22:23:12 -07005050 struct async_poll *apoll = req->apoll;
5051 struct io_ring_ctx *ctx = req->ctx;
5052
Olivier Langlois236daeae2021-05-31 02:36:37 -04005053 trace_io_uring_task_run(req->ctx, req, req->opcode, req->user_data);
Jens Axboed7718a92020-02-14 22:23:12 -07005054
Jens Axboe74ce6ce2020-04-13 11:09:12 -06005055 if (io_poll_rewait(req, &apoll->poll)) {
Jens Axboed7718a92020-02-14 22:23:12 -07005056 spin_unlock_irq(&ctx->completion_lock);
Jens Axboe74ce6ce2020-04-13 11:09:12 -06005057 return;
Jens Axboed7718a92020-02-14 22:23:12 -07005058 }
5059
Pavel Begunkov0ea13b42021-04-09 09:13:21 +01005060 hash_del(&req->hash_node);
Jens Axboed4e7cd32020-08-15 11:44:50 -07005061 io_poll_remove_double(req);
Jens Axboe74ce6ce2020-04-13 11:09:12 -06005062 spin_unlock_irq(&ctx->completion_lock);
5063
Pavel Begunkov0be0b0e2020-06-30 15:20:42 +03005064 if (!READ_ONCE(apoll->poll.canceled))
Pavel Begunkov5b0a6ac2021-06-30 21:54:04 +01005065 io_req_task_submit(req);
Pavel Begunkov0be0b0e2020-06-30 15:20:42 +03005066 else
Pavel Begunkov25935532021-03-19 17:22:40 +00005067 io_req_complete_failed(req, -ECANCELED);
Jens Axboed7718a92020-02-14 22:23:12 -07005068}
5069
5070static int io_async_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
5071 void *key)
5072{
5073 struct io_kiocb *req = wait->private;
5074 struct io_poll_iocb *poll = &req->apoll->poll;
5075
5076 trace_io_uring_poll_wake(req->ctx, req->opcode, req->user_data,
5077 key_to_poll(key));
5078
5079 return __io_async_wake(req, poll, key_to_poll(key), io_async_task_func);
5080}
5081
5082static void io_poll_req_insert(struct io_kiocb *req)
5083{
5084 struct io_ring_ctx *ctx = req->ctx;
5085 struct hlist_head *list;
5086
5087 list = &ctx->cancel_hash[hash_long(req->user_data, ctx->cancel_hash_bits)];
5088 hlist_add_head(&req->hash_node, list);
5089}
5090
5091static __poll_t __io_arm_poll_handler(struct io_kiocb *req,
5092 struct io_poll_iocb *poll,
5093 struct io_poll_table *ipt, __poll_t mask,
5094 wait_queue_func_t wake_func)
5095 __acquires(&ctx->completion_lock)
5096{
5097 struct io_ring_ctx *ctx = req->ctx;
5098 bool cancel = false;
5099
Pavel Begunkov4d52f332020-10-18 10:17:43 +01005100 INIT_HLIST_NODE(&req->hash_node);
Jens Axboe18bceab2020-05-15 11:56:54 -06005101 io_init_poll_iocb(poll, mask, wake_func);
Pavel Begunkovb90cd192020-06-21 13:09:52 +03005102 poll->file = req->file;
Jens Axboe18bceab2020-05-15 11:56:54 -06005103 poll->wait.private = req;
Jens Axboed7718a92020-02-14 22:23:12 -07005104
5105 ipt->pt._key = mask;
5106 ipt->req = req;
5107 ipt->error = -EINVAL;
5108
Jens Axboed7718a92020-02-14 22:23:12 -07005109 mask = vfs_poll(req->file, &ipt->pt) & poll->events;
5110
5111 spin_lock_irq(&ctx->completion_lock);
5112 if (likely(poll->head)) {
5113 spin_lock(&poll->head->lock);
5114 if (unlikely(list_empty(&poll->wait.entry))) {
5115 if (ipt->error)
5116 cancel = true;
5117 ipt->error = 0;
5118 mask = 0;
5119 }
Jens Axboe88e41cf2021-02-22 22:08:01 -07005120 if ((mask && (poll->events & EPOLLONESHOT)) || ipt->error)
Jens Axboed7718a92020-02-14 22:23:12 -07005121 list_del_init(&poll->wait.entry);
5122 else if (cancel)
5123 WRITE_ONCE(poll->canceled, true);
5124 else if (!poll->done) /* actually waiting for an event */
5125 io_poll_req_insert(req);
5126 spin_unlock(&poll->head->lock);
5127 }
5128
5129 return mask;
5130}
5131
Olivier Langlois59b735a2021-06-22 05:17:39 -07005132enum {
5133 IO_APOLL_OK,
5134 IO_APOLL_ABORTED,
5135 IO_APOLL_READY
5136};
5137
5138static int io_arm_poll_handler(struct io_kiocb *req)
Jens Axboed7718a92020-02-14 22:23:12 -07005139{
5140 const struct io_op_def *def = &io_op_defs[req->opcode];
5141 struct io_ring_ctx *ctx = req->ctx;
5142 struct async_poll *apoll;
5143 struct io_poll_table ipt;
Pavel Begunkovb2d9c3d2021-06-26 21:40:44 +01005144 __poll_t ret, mask = EPOLLONESHOT | POLLERR | POLLPRI;
Jens Axboe9dab14b2020-08-25 12:27:50 -06005145 int rw;
Jens Axboed7718a92020-02-14 22:23:12 -07005146
5147 if (!req->file || !file_can_poll(req->file))
Olivier Langlois59b735a2021-06-22 05:17:39 -07005148 return IO_APOLL_ABORTED;
Pavel Begunkov24c74672020-06-21 13:09:51 +03005149 if (req->flags & REQ_F_POLLED)
Olivier Langlois59b735a2021-06-22 05:17:39 -07005150 return IO_APOLL_ABORTED;
Pavel Begunkovb2d9c3d2021-06-26 21:40:44 +01005151 if (!def->pollin && !def->pollout)
Olivier Langlois59b735a2021-06-22 05:17:39 -07005152 return IO_APOLL_ABORTED;
Pavel Begunkovb2d9c3d2021-06-26 21:40:44 +01005153
5154 if (def->pollin) {
5155 rw = READ;
5156 mask |= POLLIN | POLLRDNORM;
5157
5158 /* If reading from MSG_ERRQUEUE using recvmsg, ignore POLLIN */
5159 if ((req->opcode == IORING_OP_RECVMSG) &&
5160 (req->sr_msg.msg_flags & MSG_ERRQUEUE))
5161 mask &= ~POLLIN;
5162 } else {
5163 rw = WRITE;
5164 mask |= POLLOUT | POLLWRNORM;
5165 }
5166
Jens Axboe9dab14b2020-08-25 12:27:50 -06005167 /* if we can't nonblock try, then no point in arming a poll handler */
Jens Axboe7b29f922021-03-12 08:30:14 -07005168 if (!io_file_supports_async(req, rw))
Olivier Langlois59b735a2021-06-22 05:17:39 -07005169 return IO_APOLL_ABORTED;
Jens Axboed7718a92020-02-14 22:23:12 -07005170
5171 apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC);
5172 if (unlikely(!apoll))
Olivier Langlois59b735a2021-06-22 05:17:39 -07005173 return IO_APOLL_ABORTED;
Jens Axboe807abcb2020-07-17 17:09:27 -06005174 apoll->double_poll = NULL;
Jens Axboed7718a92020-02-14 22:23:12 -07005175 req->apoll = apoll;
Pavel Begunkovb2d9c3d2021-06-26 21:40:44 +01005176 req->flags |= REQ_F_POLLED;
Jens Axboed7718a92020-02-14 22:23:12 -07005177 ipt.pt._qproc = io_async_queue_proc;
5178
5179 ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask,
5180 io_async_wake);
Jens Axboea36da652020-08-11 09:50:19 -06005181 if (ret || ipt.error) {
Jens Axboed4e7cd32020-08-15 11:44:50 -07005182 io_poll_remove_double(req);
Jens Axboed7718a92020-02-14 22:23:12 -07005183 spin_unlock_irq(&ctx->completion_lock);
Olivier Langlois59b735a2021-06-22 05:17:39 -07005184 if (ret)
5185 return IO_APOLL_READY;
5186 return IO_APOLL_ABORTED;
Jens Axboed7718a92020-02-14 22:23:12 -07005187 }
5188 spin_unlock_irq(&ctx->completion_lock);
Olivier Langlois236daeae2021-05-31 02:36:37 -04005189 trace_io_uring_poll_arm(ctx, req, req->opcode, req->user_data,
5190 mask, apoll->poll.events);
Olivier Langlois59b735a2021-06-22 05:17:39 -07005191 return IO_APOLL_OK;
Jens Axboed7718a92020-02-14 22:23:12 -07005192}
5193
5194static bool __io_poll_remove_one(struct io_kiocb *req,
Jens Axboeb2e720a2021-03-31 09:03:03 -06005195 struct io_poll_iocb *poll, bool do_cancel)
Pavel Begunkove07785b2021-04-01 15:43:57 +01005196 __must_hold(&req->ctx->completion_lock)
Jens Axboed7718a92020-02-14 22:23:12 -07005197{
Jens Axboeb41e9852020-02-17 09:52:41 -07005198 bool do_complete = false;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005199
Jens Axboe50826202021-02-23 09:02:26 -07005200 if (!poll->head)
5201 return false;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005202 spin_lock(&poll->head->lock);
Jens Axboeb2e720a2021-03-31 09:03:03 -06005203 if (do_cancel)
5204 WRITE_ONCE(poll->canceled, true);
Jens Axboe392edb42019-12-09 17:52:20 -07005205 if (!list_empty(&poll->wait.entry)) {
5206 list_del_init(&poll->wait.entry);
Jens Axboeb41e9852020-02-17 09:52:41 -07005207 do_complete = true;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005208 }
5209 spin_unlock(&poll->head->lock);
Jens Axboe3bfa5bc2020-05-17 13:54:12 -06005210 hash_del(&req->hash_node);
Jens Axboed7718a92020-02-14 22:23:12 -07005211 return do_complete;
5212}
5213
Jens Axboeb2c3f7e2021-02-23 08:58:04 -07005214static bool io_poll_remove_waitqs(struct io_kiocb *req)
Pavel Begunkove07785b2021-04-01 15:43:57 +01005215 __must_hold(&req->ctx->completion_lock)
Jens Axboed7718a92020-02-14 22:23:12 -07005216{
5217 bool do_complete;
5218
Jens Axboed4e7cd32020-08-15 11:44:50 -07005219 io_poll_remove_double(req);
Pavel Begunkove31001a2021-04-13 02:58:43 +01005220 do_complete = __io_poll_remove_one(req, io_poll_get_single(req), true);
Jens Axboed4e7cd32020-08-15 11:44:50 -07005221
Pavel Begunkove31001a2021-04-13 02:58:43 +01005222 if (req->opcode != IORING_OP_POLL_ADD && do_complete) {
Jens Axboed7718a92020-02-14 22:23:12 -07005223 /* non-poll requests have submit ref still */
Pavel Begunkove31001a2021-04-13 02:58:43 +01005224 req_ref_put(req);
Xiaoguang Wangb1f573b2020-04-12 14:50:54 +08005225 }
Jens Axboeb2c3f7e2021-02-23 08:58:04 -07005226 return do_complete;
5227}
Xiaoguang Wangb1f573b2020-04-12 14:50:54 +08005228
Jens Axboeb2c3f7e2021-02-23 08:58:04 -07005229static bool io_poll_remove_one(struct io_kiocb *req)
Pavel Begunkove07785b2021-04-01 15:43:57 +01005230 __must_hold(&req->ctx->completion_lock)
Jens Axboeb2c3f7e2021-02-23 08:58:04 -07005231{
5232 bool do_complete;
5233
5234 do_complete = io_poll_remove_waitqs(req);
Jens Axboeb41e9852020-02-17 09:52:41 -07005235 if (do_complete) {
Pavel Begunkovd4d19c12021-04-25 14:32:17 +01005236 io_cqring_fill_event(req->ctx, req->user_data, -ECANCELED, 0);
Jens Axboeb41e9852020-02-17 09:52:41 -07005237 io_commit_cqring(req->ctx);
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01005238 req_set_fail(req);
Pavel Begunkov216578e2020-10-13 09:44:00 +01005239 io_put_req_deferred(req, 1);
Jens Axboeb41e9852020-02-17 09:52:41 -07005240 }
5241
5242 return do_complete;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005243}
5244
Jens Axboe76e1b642020-09-26 15:05:03 -06005245/*
5246 * Returns true if we found and killed one or more poll requests
5247 */
Pavel Begunkov6b819282020-11-06 13:00:25 +00005248static bool io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk,
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01005249 bool cancel_all)
Jens Axboe221c5eb2019-01-17 09:41:58 -07005250{
Jens Axboe78076bb2019-12-04 19:56:40 -07005251 struct hlist_node *tmp;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005252 struct io_kiocb *req;
Jens Axboe8e2e1fa2020-04-13 17:05:14 -06005253 int posted = 0, i;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005254
5255 spin_lock_irq(&ctx->completion_lock);
Jens Axboe78076bb2019-12-04 19:56:40 -07005256 for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
5257 struct hlist_head *list;
5258
5259 list = &ctx->cancel_hash[i];
Jens Axboef3606e32020-09-22 08:18:24 -06005260 hlist_for_each_entry_safe(req, tmp, list, hash_node) {
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01005261 if (io_match_task(req, tsk, cancel_all))
Jens Axboef3606e32020-09-22 08:18:24 -06005262 posted += io_poll_remove_one(req);
5263 }
Jens Axboe221c5eb2019-01-17 09:41:58 -07005264 }
5265 spin_unlock_irq(&ctx->completion_lock);
Jens Axboeb41e9852020-02-17 09:52:41 -07005266
Jens Axboe8e2e1fa2020-04-13 17:05:14 -06005267 if (posted)
5268 io_cqring_ev_posted(ctx);
Jens Axboe76e1b642020-09-26 15:05:03 -06005269
5270 return posted != 0;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005271}
5272
Pavel Begunkov9ba5fac2021-04-14 13:38:35 +01005273static struct io_kiocb *io_poll_find(struct io_ring_ctx *ctx, __u64 sqe_addr,
5274 bool poll_only)
Pavel Begunkove07785b2021-04-01 15:43:57 +01005275 __must_hold(&ctx->completion_lock)
Jens Axboe47f46762019-11-09 17:43:02 -07005276{
Jens Axboe78076bb2019-12-04 19:56:40 -07005277 struct hlist_head *list;
Jens Axboe47f46762019-11-09 17:43:02 -07005278 struct io_kiocb *req;
5279
Jens Axboe78076bb2019-12-04 19:56:40 -07005280 list = &ctx->cancel_hash[hash_long(sqe_addr, ctx->cancel_hash_bits)];
5281 hlist_for_each_entry(req, list, hash_node) {
Jens Axboeb41e9852020-02-17 09:52:41 -07005282 if (sqe_addr != req->user_data)
5283 continue;
Pavel Begunkov9ba5fac2021-04-14 13:38:35 +01005284 if (poll_only && req->opcode != IORING_OP_POLL_ADD)
5285 continue;
Jens Axboeb2cb8052021-03-17 08:17:19 -06005286 return req;
Jens Axboe47f46762019-11-09 17:43:02 -07005287 }
Jens Axboeb2cb8052021-03-17 08:17:19 -06005288 return NULL;
Jens Axboe47f46762019-11-09 17:43:02 -07005289}
5290
Pavel Begunkov9ba5fac2021-04-14 13:38:35 +01005291static int io_poll_cancel(struct io_ring_ctx *ctx, __u64 sqe_addr,
5292 bool poll_only)
Pavel Begunkove07785b2021-04-01 15:43:57 +01005293 __must_hold(&ctx->completion_lock)
Jens Axboeb2cb8052021-03-17 08:17:19 -06005294{
5295 struct io_kiocb *req;
5296
Pavel Begunkov9ba5fac2021-04-14 13:38:35 +01005297 req = io_poll_find(ctx, sqe_addr, poll_only);
Jens Axboeb2cb8052021-03-17 08:17:19 -06005298 if (!req)
5299 return -ENOENT;
5300 if (io_poll_remove_one(req))
5301 return 0;
5302
5303 return -EALREADY;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005304}
5305
Pavel Begunkov9096af32021-04-14 13:38:36 +01005306static __poll_t io_poll_parse_events(const struct io_uring_sqe *sqe,
5307 unsigned int flags)
5308{
5309 u32 events;
5310
5311 events = READ_ONCE(sqe->poll32_events);
5312#ifdef __BIG_ENDIAN
5313 events = swahw32(events);
5314#endif
5315 if (!(flags & IORING_POLL_ADD_MULTI))
5316 events |= EPOLLONESHOT;
5317 return demangle_poll(events) | (events & (EPOLLEXCLUSIVE|EPOLLONESHOT));
5318}
5319
Pavel Begunkovc5de0032021-04-14 13:38:37 +01005320static int io_poll_update_prep(struct io_kiocb *req,
Jens Axboe3529d8c2019-12-19 18:24:38 -07005321 const struct io_uring_sqe *sqe)
Jens Axboe221c5eb2019-01-17 09:41:58 -07005322{
Pavel Begunkovc5de0032021-04-14 13:38:37 +01005323 struct io_poll_update *upd = &req->poll_update;
5324 u32 flags;
5325
Jens Axboe221c5eb2019-01-17 09:41:58 -07005326 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
5327 return -EINVAL;
Pavel Begunkovc5de0032021-04-14 13:38:37 +01005328 if (sqe->ioprio || sqe->buf_index)
5329 return -EINVAL;
5330 flags = READ_ONCE(sqe->len);
5331 if (flags & ~(IORING_POLL_UPDATE_EVENTS | IORING_POLL_UPDATE_USER_DATA |
5332 IORING_POLL_ADD_MULTI))
5333 return -EINVAL;
5334 /* meaningless without update */
5335 if (flags == IORING_POLL_ADD_MULTI)
Jens Axboe221c5eb2019-01-17 09:41:58 -07005336 return -EINVAL;
5337
Pavel Begunkovc5de0032021-04-14 13:38:37 +01005338 upd->old_user_data = READ_ONCE(sqe->addr);
5339 upd->update_events = flags & IORING_POLL_UPDATE_EVENTS;
5340 upd->update_user_data = flags & IORING_POLL_UPDATE_USER_DATA;
Jens Axboe0969e782019-12-17 18:40:57 -07005341
Pavel Begunkovc5de0032021-04-14 13:38:37 +01005342 upd->new_user_data = READ_ONCE(sqe->off);
5343 if (!upd->update_user_data && upd->new_user_data)
5344 return -EINVAL;
5345 if (upd->update_events)
5346 upd->events = io_poll_parse_events(sqe, flags);
5347 else if (sqe->poll32_events)
5348 return -EINVAL;
Jens Axboe0969e782019-12-17 18:40:57 -07005349
Jens Axboe221c5eb2019-01-17 09:41:58 -07005350 return 0;
5351}
5352
Jens Axboe221c5eb2019-01-17 09:41:58 -07005353static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
5354 void *key)
5355{
Jens Axboec2f2eb72020-02-10 09:07:05 -07005356 struct io_kiocb *req = wait->private;
5357 struct io_poll_iocb *poll = &req->poll;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005358
Jens Axboed7718a92020-02-14 22:23:12 -07005359 return __io_async_wake(req, poll, key_to_poll(key), io_poll_task_func);
Jens Axboe221c5eb2019-01-17 09:41:58 -07005360}
5361
Jens Axboe221c5eb2019-01-17 09:41:58 -07005362static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head,
5363 struct poll_table_struct *p)
5364{
5365 struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
5366
Jens Axboee8c2bc12020-08-15 18:44:09 -07005367 __io_queue_proc(&pt->req->poll, pt, head, (struct io_poll_iocb **) &pt->req->async_data);
Jens Axboeeac406c2019-11-14 12:09:58 -07005368}
5369
Jens Axboe3529d8c2019-12-19 18:24:38 -07005370static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboe221c5eb2019-01-17 09:41:58 -07005371{
5372 struct io_poll_iocb *poll = &req->poll;
Pavel Begunkovc5de0032021-04-14 13:38:37 +01005373 u32 flags;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005374
5375 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
5376 return -EINVAL;
Pavel Begunkovc5de0032021-04-14 13:38:37 +01005377 if (sqe->ioprio || sqe->buf_index || sqe->off || sqe->addr)
Jens Axboe88e41cf2021-02-22 22:08:01 -07005378 return -EINVAL;
5379 flags = READ_ONCE(sqe->len);
Pavel Begunkovc5de0032021-04-14 13:38:37 +01005380 if (flags & ~IORING_POLL_ADD_MULTI)
Jens Axboe221c5eb2019-01-17 09:41:58 -07005381 return -EINVAL;
5382
Pavel Begunkovc5de0032021-04-14 13:38:37 +01005383 poll->events = io_poll_parse_events(sqe, flags);
Jens Axboe0969e782019-12-17 18:40:57 -07005384 return 0;
5385}
5386
Pavel Begunkov61e98202021-02-10 00:03:08 +00005387static int io_poll_add(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe0969e782019-12-17 18:40:57 -07005388{
5389 struct io_poll_iocb *poll = &req->poll;
5390 struct io_ring_ctx *ctx = req->ctx;
5391 struct io_poll_table ipt;
Jens Axboe0969e782019-12-17 18:40:57 -07005392 __poll_t mask;
Jens Axboe0969e782019-12-17 18:40:57 -07005393
Jens Axboed7718a92020-02-14 22:23:12 -07005394 ipt.pt._qproc = io_poll_queue_proc;
Jens Axboe36703242019-07-25 10:20:18 -06005395
Jens Axboed7718a92020-02-14 22:23:12 -07005396 mask = __io_arm_poll_handler(req, &req->poll, &ipt, poll->events,
5397 io_poll_wake);
Jens Axboe221c5eb2019-01-17 09:41:58 -07005398
Jens Axboe8c838782019-03-12 15:48:16 -06005399 if (mask) { /* no async, we'd stolen it */
Jens Axboe8c838782019-03-12 15:48:16 -06005400 ipt.error = 0;
Pavel Begunkove27414b2021-04-09 09:13:20 +01005401 io_poll_complete(req, mask);
Jens Axboe8c838782019-03-12 15:48:16 -06005402 }
Jens Axboe221c5eb2019-01-17 09:41:58 -07005403 spin_unlock_irq(&ctx->completion_lock);
5404
Jens Axboe8c838782019-03-12 15:48:16 -06005405 if (mask) {
5406 io_cqring_ev_posted(ctx);
Jens Axboe88e41cf2021-02-22 22:08:01 -07005407 if (poll->events & EPOLLONESHOT)
5408 io_put_req(req);
Jens Axboe221c5eb2019-01-17 09:41:58 -07005409 }
Jens Axboe8c838782019-03-12 15:48:16 -06005410 return ipt.error;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005411}
5412
Pavel Begunkovc5de0032021-04-14 13:38:37 +01005413static int io_poll_update(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboeb69de282021-03-17 08:37:41 -06005414{
5415 struct io_ring_ctx *ctx = req->ctx;
5416 struct io_kiocb *preq;
Jens Axboecb3b200e2021-04-06 09:49:31 -06005417 bool completing;
Jens Axboeb69de282021-03-17 08:37:41 -06005418 int ret;
5419
5420 spin_lock_irq(&ctx->completion_lock);
Pavel Begunkov9ba5fac2021-04-14 13:38:35 +01005421 preq = io_poll_find(ctx, req->poll_update.old_user_data, true);
Jens Axboeb69de282021-03-17 08:37:41 -06005422 if (!preq) {
5423 ret = -ENOENT;
5424 goto err;
Jens Axboeb69de282021-03-17 08:37:41 -06005425 }
Jens Axboecb3b200e2021-04-06 09:49:31 -06005426
Pavel Begunkovc5de0032021-04-14 13:38:37 +01005427 if (!req->poll_update.update_events && !req->poll_update.update_user_data) {
5428 completing = true;
5429 ret = io_poll_remove_one(preq) ? 0 : -EALREADY;
5430 goto err;
5431 }
5432
Jens Axboecb3b200e2021-04-06 09:49:31 -06005433 /*
5434 * Don't allow racy completion with singleshot, as we cannot safely
5435 * update those. For multishot, if we're racing with completion, just
5436 * let completion re-add it.
5437 */
5438 completing = !__io_poll_remove_one(preq, &preq->poll, false);
5439 if (completing && (preq->poll.events & EPOLLONESHOT)) {
5440 ret = -EALREADY;
5441 goto err;
Jens Axboeb69de282021-03-17 08:37:41 -06005442 }
5443 /* we now have a detached poll request. reissue. */
5444 ret = 0;
5445err:
Jens Axboeb69de282021-03-17 08:37:41 -06005446 if (ret < 0) {
Jens Axboecb3b200e2021-04-06 09:49:31 -06005447 spin_unlock_irq(&ctx->completion_lock);
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01005448 req_set_fail(req);
Jens Axboeb69de282021-03-17 08:37:41 -06005449 io_req_complete(req, ret);
5450 return 0;
5451 }
5452 /* only mask one event flags, keep behavior flags */
Pavel Begunkov9d805892021-04-13 02:58:40 +01005453 if (req->poll_update.update_events) {
Jens Axboeb69de282021-03-17 08:37:41 -06005454 preq->poll.events &= ~0xffff;
Pavel Begunkov9d805892021-04-13 02:58:40 +01005455 preq->poll.events |= req->poll_update.events & 0xffff;
Jens Axboeb69de282021-03-17 08:37:41 -06005456 preq->poll.events |= IO_POLL_UNMASK;
5457 }
Pavel Begunkov9d805892021-04-13 02:58:40 +01005458 if (req->poll_update.update_user_data)
5459 preq->user_data = req->poll_update.new_user_data;
Jens Axboecb3b200e2021-04-06 09:49:31 -06005460 spin_unlock_irq(&ctx->completion_lock);
5461
Jens Axboeb69de282021-03-17 08:37:41 -06005462 /* complete update request, we're done with it */
5463 io_req_complete(req, ret);
5464
Jens Axboecb3b200e2021-04-06 09:49:31 -06005465 if (!completing) {
Pavel Begunkovc5de0032021-04-14 13:38:37 +01005466 ret = io_poll_add(preq, issue_flags);
Jens Axboecb3b200e2021-04-06 09:49:31 -06005467 if (ret < 0) {
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01005468 req_set_fail(preq);
Jens Axboecb3b200e2021-04-06 09:49:31 -06005469 io_req_complete(preq, ret);
5470 }
Jens Axboeb69de282021-03-17 08:37:41 -06005471 }
5472 return 0;
5473}
5474
Jens Axboe5262f562019-09-17 12:26:57 -06005475static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
5476{
Jens Axboead8a48a2019-11-15 08:49:11 -07005477 struct io_timeout_data *data = container_of(timer,
5478 struct io_timeout_data, timer);
5479 struct io_kiocb *req = data->req;
5480 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe5262f562019-09-17 12:26:57 -06005481 unsigned long flags;
5482
Jens Axboe5262f562019-09-17 12:26:57 -06005483 spin_lock_irqsave(&ctx->completion_lock, flags);
Pavel Begunkova71976f2020-10-10 18:34:11 +01005484 list_del_init(&req->timeout.list);
Pavel Begunkov01cec8c2020-07-30 18:43:50 +03005485 atomic_set(&req->ctx->cq_timeouts,
5486 atomic_read(&req->ctx->cq_timeouts) + 1);
5487
Pavel Begunkovd4d19c12021-04-25 14:32:17 +01005488 io_cqring_fill_event(ctx, req->user_data, -ETIME, 0);
Jens Axboe5262f562019-09-17 12:26:57 -06005489 io_commit_cqring(ctx);
5490 spin_unlock_irqrestore(&ctx->completion_lock, flags);
5491
5492 io_cqring_ev_posted(ctx);
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01005493 req_set_fail(req);
Jens Axboe5262f562019-09-17 12:26:57 -06005494 io_put_req(req);
5495 return HRTIMER_NORESTART;
5496}
5497
Pavel Begunkovfbd15842020-11-30 19:11:15 +00005498static struct io_kiocb *io_timeout_extract(struct io_ring_ctx *ctx,
5499 __u64 user_data)
Pavel Begunkove07785b2021-04-01 15:43:57 +01005500 __must_hold(&ctx->completion_lock)
Jens Axboe47f46762019-11-09 17:43:02 -07005501{
Pavel Begunkovfbd15842020-11-30 19:11:15 +00005502 struct io_timeout_data *io;
Jens Axboef254ac02020-08-12 17:33:30 -06005503 struct io_kiocb *req;
Pavel Begunkovfd9c7bc2021-04-13 02:58:42 +01005504 bool found = false;
Jens Axboef254ac02020-08-12 17:33:30 -06005505
5506 list_for_each_entry(req, &ctx->timeout_list, timeout.list) {
Pavel Begunkovfd9c7bc2021-04-13 02:58:42 +01005507 found = user_data == req->user_data;
5508 if (found)
Jens Axboef254ac02020-08-12 17:33:30 -06005509 break;
Jens Axboef254ac02020-08-12 17:33:30 -06005510 }
Pavel Begunkovfd9c7bc2021-04-13 02:58:42 +01005511 if (!found)
5512 return ERR_PTR(-ENOENT);
Jens Axboef254ac02020-08-12 17:33:30 -06005513
Pavel Begunkovfbd15842020-11-30 19:11:15 +00005514 io = req->async_data;
Pavel Begunkovfd9c7bc2021-04-13 02:58:42 +01005515 if (hrtimer_try_to_cancel(&io->timer) == -1)
Pavel Begunkovfbd15842020-11-30 19:11:15 +00005516 return ERR_PTR(-EALREADY);
5517 list_del_init(&req->timeout.list);
5518 return req;
5519}
5520
5521static int io_timeout_cancel(struct io_ring_ctx *ctx, __u64 user_data)
Pavel Begunkove07785b2021-04-01 15:43:57 +01005522 __must_hold(&ctx->completion_lock)
Pavel Begunkovfbd15842020-11-30 19:11:15 +00005523{
5524 struct io_kiocb *req = io_timeout_extract(ctx, user_data);
5525
5526 if (IS_ERR(req))
5527 return PTR_ERR(req);
5528
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01005529 req_set_fail(req);
Pavel Begunkovd4d19c12021-04-25 14:32:17 +01005530 io_cqring_fill_event(ctx, req->user_data, -ECANCELED, 0);
Pavel Begunkovfbd15842020-11-30 19:11:15 +00005531 io_put_req_deferred(req, 1);
5532 return 0;
Jens Axboef254ac02020-08-12 17:33:30 -06005533}
5534
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +00005535static int io_timeout_update(struct io_ring_ctx *ctx, __u64 user_data,
5536 struct timespec64 *ts, enum hrtimer_mode mode)
Pavel Begunkove07785b2021-04-01 15:43:57 +01005537 __must_hold(&ctx->completion_lock)
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +00005538{
5539 struct io_kiocb *req = io_timeout_extract(ctx, user_data);
5540 struct io_timeout_data *data;
5541
5542 if (IS_ERR(req))
5543 return PTR_ERR(req);
5544
5545 req->timeout.off = 0; /* noseq */
5546 data = req->async_data;
5547 list_add_tail(&req->timeout.list, &ctx->timeout_list);
5548 hrtimer_init(&data->timer, CLOCK_MONOTONIC, mode);
5549 data->timer.function = io_timeout_fn;
5550 hrtimer_start(&data->timer, timespec64_to_ktime(*ts), mode);
5551 return 0;
Jens Axboe47f46762019-11-09 17:43:02 -07005552}
5553
Jens Axboe3529d8c2019-12-19 18:24:38 -07005554static int io_timeout_remove_prep(struct io_kiocb *req,
5555 const struct io_uring_sqe *sqe)
Jens Axboeb29472e2019-12-17 18:50:29 -07005556{
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +00005557 struct io_timeout_rem *tr = &req->timeout_rem;
5558
Jens Axboeb29472e2019-12-17 18:50:29 -07005559 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
5560 return -EINVAL;
Daniele Albano61710e42020-07-18 14:15:16 -06005561 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
5562 return -EINVAL;
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +00005563 if (sqe->ioprio || sqe->buf_index || sqe->len)
Jens Axboeb29472e2019-12-17 18:50:29 -07005564 return -EINVAL;
5565
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +00005566 tr->addr = READ_ONCE(sqe->addr);
5567 tr->flags = READ_ONCE(sqe->timeout_flags);
5568 if (tr->flags & IORING_TIMEOUT_UPDATE) {
5569 if (tr->flags & ~(IORING_TIMEOUT_UPDATE|IORING_TIMEOUT_ABS))
5570 return -EINVAL;
5571 if (get_timespec64(&tr->ts, u64_to_user_ptr(sqe->addr2)))
5572 return -EFAULT;
5573 } else if (tr->flags) {
5574 /* timeout removal doesn't support flags */
5575 return -EINVAL;
5576 }
5577
Jens Axboeb29472e2019-12-17 18:50:29 -07005578 return 0;
5579}
5580
Pavel Begunkov8662dae2021-01-19 13:32:44 +00005581static inline enum hrtimer_mode io_translate_timeout_mode(unsigned int flags)
5582{
5583 return (flags & IORING_TIMEOUT_ABS) ? HRTIMER_MODE_ABS
5584 : HRTIMER_MODE_REL;
5585}
5586
Jens Axboe11365042019-10-16 09:08:32 -06005587/*
5588 * Remove or update an existing timeout command
5589 */
Pavel Begunkov61e98202021-02-10 00:03:08 +00005590static int io_timeout_remove(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe11365042019-10-16 09:08:32 -06005591{
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +00005592 struct io_timeout_rem *tr = &req->timeout_rem;
Jens Axboe11365042019-10-16 09:08:32 -06005593 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe47f46762019-11-09 17:43:02 -07005594 int ret;
Jens Axboe11365042019-10-16 09:08:32 -06005595
Jens Axboe11365042019-10-16 09:08:32 -06005596 spin_lock_irq(&ctx->completion_lock);
Pavel Begunkov8662dae2021-01-19 13:32:44 +00005597 if (!(req->timeout_rem.flags & IORING_TIMEOUT_UPDATE))
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +00005598 ret = io_timeout_cancel(ctx, tr->addr);
Pavel Begunkov8662dae2021-01-19 13:32:44 +00005599 else
5600 ret = io_timeout_update(ctx, tr->addr, &tr->ts,
5601 io_translate_timeout_mode(tr->flags));
Jens Axboe11365042019-10-16 09:08:32 -06005602
Pavel Begunkovd4d19c12021-04-25 14:32:17 +01005603 io_cqring_fill_event(ctx, req->user_data, ret, 0);
Jens Axboe11365042019-10-16 09:08:32 -06005604 io_commit_cqring(ctx);
5605 spin_unlock_irq(&ctx->completion_lock);
Jens Axboe5262f562019-09-17 12:26:57 -06005606 io_cqring_ev_posted(ctx);
Jens Axboe4e88d6e2019-12-07 20:59:47 -07005607 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01005608 req_set_fail(req);
Jackie Liuec9c02a2019-11-08 23:50:36 +08005609 io_put_req(req);
Jens Axboe11365042019-10-16 09:08:32 -06005610 return 0;
Jens Axboe5262f562019-09-17 12:26:57 -06005611}
5612
Jens Axboe3529d8c2019-12-19 18:24:38 -07005613static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
Jens Axboe2d283902019-12-04 11:08:05 -07005614 bool is_timeout_link)
Jens Axboe5262f562019-09-17 12:26:57 -06005615{
Jens Axboead8a48a2019-11-15 08:49:11 -07005616 struct io_timeout_data *data;
Jens Axboea41525a2019-10-15 16:48:15 -06005617 unsigned flags;
Pavel Begunkov56080b02020-05-26 20:34:04 +03005618 u32 off = READ_ONCE(sqe->off);
Jens Axboe5262f562019-09-17 12:26:57 -06005619
Jens Axboead8a48a2019-11-15 08:49:11 -07005620 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboe5262f562019-09-17 12:26:57 -06005621 return -EINVAL;
Jens Axboead8a48a2019-11-15 08:49:11 -07005622 if (sqe->ioprio || sqe->buf_index || sqe->len != 1)
Jens Axboea41525a2019-10-15 16:48:15 -06005623 return -EINVAL;
Pavel Begunkov56080b02020-05-26 20:34:04 +03005624 if (off && is_timeout_link)
Jens Axboe2d283902019-12-04 11:08:05 -07005625 return -EINVAL;
Jens Axboea41525a2019-10-15 16:48:15 -06005626 flags = READ_ONCE(sqe->timeout_flags);
5627 if (flags & ~IORING_TIMEOUT_ABS)
Jens Axboe5262f562019-09-17 12:26:57 -06005628 return -EINVAL;
Arnd Bergmannbdf20072019-10-01 09:53:29 -06005629
Pavel Begunkovbfe68a22020-05-30 14:54:18 +03005630 req->timeout.off = off;
Pavel Begunkovf18ee4c2021-06-14 23:37:25 +01005631 if (unlikely(off && !req->ctx->off_timeout_used))
5632 req->ctx->off_timeout_used = true;
Jens Axboe26a61672019-12-20 09:02:01 -07005633
Jens Axboee8c2bc12020-08-15 18:44:09 -07005634 if (!req->async_data && io_alloc_async_data(req))
Jens Axboe26a61672019-12-20 09:02:01 -07005635 return -ENOMEM;
5636
Jens Axboee8c2bc12020-08-15 18:44:09 -07005637 data = req->async_data;
Jens Axboead8a48a2019-11-15 08:49:11 -07005638 data->req = req;
Jens Axboead8a48a2019-11-15 08:49:11 -07005639
5640 if (get_timespec64(&data->ts, u64_to_user_ptr(sqe->addr)))
Jens Axboe5262f562019-09-17 12:26:57 -06005641 return -EFAULT;
5642
Pavel Begunkov8662dae2021-01-19 13:32:44 +00005643 data->mode = io_translate_timeout_mode(flags);
Jens Axboead8a48a2019-11-15 08:49:11 -07005644 hrtimer_init(&data->timer, CLOCK_MONOTONIC, data->mode);
Pavel Begunkov2482b582021-03-25 18:32:44 +00005645 if (is_timeout_link)
5646 io_req_track_inflight(req);
Jens Axboead8a48a2019-11-15 08:49:11 -07005647 return 0;
5648}
5649
Pavel Begunkov61e98202021-02-10 00:03:08 +00005650static int io_timeout(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboead8a48a2019-11-15 08:49:11 -07005651{
Jens Axboead8a48a2019-11-15 08:49:11 -07005652 struct io_ring_ctx *ctx = req->ctx;
Jens Axboee8c2bc12020-08-15 18:44:09 -07005653 struct io_timeout_data *data = req->async_data;
Jens Axboead8a48a2019-11-15 08:49:11 -07005654 struct list_head *entry;
Pavel Begunkovbfe68a22020-05-30 14:54:18 +03005655 u32 tail, off = req->timeout.off;
Jens Axboead8a48a2019-11-15 08:49:11 -07005656
Pavel Begunkov733f5c92020-05-26 20:34:03 +03005657 spin_lock_irq(&ctx->completion_lock);
Jens Axboe93bd25b2019-11-11 23:34:31 -07005658
Jens Axboe5262f562019-09-17 12:26:57 -06005659 /*
5660 * sqe->off holds how many events that need to occur for this
Jens Axboe93bd25b2019-11-11 23:34:31 -07005661 * timeout event to be satisfied. If it isn't set, then this is
5662 * a pure timeout request, sequence isn't used.
Jens Axboe5262f562019-09-17 12:26:57 -06005663 */
Pavel Begunkov8eb7e2d2020-06-29 13:13:02 +03005664 if (io_is_timeout_noseq(req)) {
Jens Axboe93bd25b2019-11-11 23:34:31 -07005665 entry = ctx->timeout_list.prev;
5666 goto add;
5667 }
Jens Axboe5262f562019-09-17 12:26:57 -06005668
Pavel Begunkovbfe68a22020-05-30 14:54:18 +03005669 tail = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
5670 req->timeout.target_seq = tail + off;
Jens Axboe5262f562019-09-17 12:26:57 -06005671
Marcelo Diop-Gonzalezf0105052021-01-15 11:54:40 -05005672 /* Update the last seq here in case io_flush_timeouts() hasn't.
5673 * This is safe because ->completion_lock is held, and submissions
5674 * and completions are never mixed in the same ->completion_lock section.
5675 */
5676 ctx->cq_last_tm_flush = tail;
5677
Jens Axboe5262f562019-09-17 12:26:57 -06005678 /*
5679 * Insertion sort, ensuring the first entry in the list is always
5680 * the one we need first.
5681 */
Jens Axboe5262f562019-09-17 12:26:57 -06005682 list_for_each_prev(entry, &ctx->timeout_list) {
Pavel Begunkov135fcde2020-07-13 23:37:12 +03005683 struct io_kiocb *nxt = list_entry(entry, struct io_kiocb,
5684 timeout.list);
Jens Axboe5262f562019-09-17 12:26:57 -06005685
Pavel Begunkov8eb7e2d2020-06-29 13:13:02 +03005686 if (io_is_timeout_noseq(nxt))
Jens Axboe93bd25b2019-11-11 23:34:31 -07005687 continue;
Pavel Begunkovbfe68a22020-05-30 14:54:18 +03005688 /* nxt.seq is behind @tail, otherwise would've been completed */
5689 if (off >= nxt->timeout.target_seq - tail)
Jens Axboe5262f562019-09-17 12:26:57 -06005690 break;
5691 }
Jens Axboe93bd25b2019-11-11 23:34:31 -07005692add:
Pavel Begunkov135fcde2020-07-13 23:37:12 +03005693 list_add(&req->timeout.list, entry);
Jens Axboead8a48a2019-11-15 08:49:11 -07005694 data->timer.function = io_timeout_fn;
5695 hrtimer_start(&data->timer, timespec64_to_ktime(data->ts), data->mode);
Jens Axboe842f9612019-10-29 12:34:10 -06005696 spin_unlock_irq(&ctx->completion_lock);
Jens Axboe5262f562019-09-17 12:26:57 -06005697 return 0;
5698}
5699
Pavel Begunkovf458dd842021-03-08 12:14:14 +00005700struct io_cancel_data {
5701 struct io_ring_ctx *ctx;
5702 u64 user_data;
5703};
5704
Jens Axboe62755e32019-10-28 21:49:21 -06005705static bool io_cancel_cb(struct io_wq_work *work, void *data)
Jens Axboede0617e2019-04-06 21:51:27 -06005706{
Jens Axboe62755e32019-10-28 21:49:21 -06005707 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
Pavel Begunkovf458dd842021-03-08 12:14:14 +00005708 struct io_cancel_data *cd = data;
Jens Axboede0617e2019-04-06 21:51:27 -06005709
Pavel Begunkovf458dd842021-03-08 12:14:14 +00005710 return req->ctx == cd->ctx && req->user_data == cd->user_data;
Jens Axboe62755e32019-10-28 21:49:21 -06005711}
5712
Pavel Begunkovf458dd842021-03-08 12:14:14 +00005713static int io_async_cancel_one(struct io_uring_task *tctx, u64 user_data,
5714 struct io_ring_ctx *ctx)
Jens Axboe62755e32019-10-28 21:49:21 -06005715{
Pavel Begunkovf458dd842021-03-08 12:14:14 +00005716 struct io_cancel_data data = { .ctx = ctx, .user_data = user_data, };
Jens Axboe62755e32019-10-28 21:49:21 -06005717 enum io_wq_cancel cancel_ret;
Jens Axboe62755e32019-10-28 21:49:21 -06005718 int ret = 0;
5719
Pavel Begunkovf458dd842021-03-08 12:14:14 +00005720 if (!tctx || !tctx->io_wq)
Jens Axboe5aa75ed2021-02-16 12:56:50 -07005721 return -ENOENT;
5722
Pavel Begunkovf458dd842021-03-08 12:14:14 +00005723 cancel_ret = io_wq_cancel_cb(tctx->io_wq, io_cancel_cb, &data, false);
Jens Axboe62755e32019-10-28 21:49:21 -06005724 switch (cancel_ret) {
5725 case IO_WQ_CANCEL_OK:
5726 ret = 0;
5727 break;
5728 case IO_WQ_CANCEL_RUNNING:
5729 ret = -EALREADY;
5730 break;
5731 case IO_WQ_CANCEL_NOTFOUND:
5732 ret = -ENOENT;
5733 break;
5734 }
5735
Jens Axboee977d6d2019-11-05 12:39:45 -07005736 return ret;
5737}
5738
Jens Axboe47f46762019-11-09 17:43:02 -07005739static void io_async_find_and_cancel(struct io_ring_ctx *ctx,
5740 struct io_kiocb *req, __u64 sqe_addr,
Pavel Begunkov014db002020-03-03 21:33:12 +03005741 int success_ret)
Jens Axboe47f46762019-11-09 17:43:02 -07005742{
5743 unsigned long flags;
5744 int ret;
5745
Pavel Begunkovf458dd842021-03-08 12:14:14 +00005746 ret = io_async_cancel_one(req->task->io_uring, sqe_addr, ctx);
Jens Axboe47f46762019-11-09 17:43:02 -07005747 spin_lock_irqsave(&ctx->completion_lock, flags);
Pavel Begunkovdf9727a2021-04-01 15:43:59 +01005748 if (ret != -ENOENT)
5749 goto done;
Jens Axboe47f46762019-11-09 17:43:02 -07005750 ret = io_timeout_cancel(ctx, sqe_addr);
5751 if (ret != -ENOENT)
5752 goto done;
Pavel Begunkov9ba5fac2021-04-14 13:38:35 +01005753 ret = io_poll_cancel(ctx, sqe_addr, false);
Jens Axboe47f46762019-11-09 17:43:02 -07005754done:
Jens Axboeb0dd8a42019-11-18 12:14:54 -07005755 if (!ret)
5756 ret = success_ret;
Pavel Begunkovd4d19c12021-04-25 14:32:17 +01005757 io_cqring_fill_event(ctx, req->user_data, ret, 0);
Jens Axboe47f46762019-11-09 17:43:02 -07005758 io_commit_cqring(ctx);
5759 spin_unlock_irqrestore(&ctx->completion_lock, flags);
5760 io_cqring_ev_posted(ctx);
5761
Jens Axboe4e88d6e2019-12-07 20:59:47 -07005762 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01005763 req_set_fail(req);
Jens Axboe47f46762019-11-09 17:43:02 -07005764}
5765
Jens Axboe3529d8c2019-12-19 18:24:38 -07005766static int io_async_cancel_prep(struct io_kiocb *req,
5767 const struct io_uring_sqe *sqe)
Jens Axboee977d6d2019-11-05 12:39:45 -07005768{
Jens Axboefbf23842019-12-17 18:45:56 -07005769 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboee977d6d2019-11-05 12:39:45 -07005770 return -EINVAL;
Daniele Albano61710e42020-07-18 14:15:16 -06005771 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
5772 return -EINVAL;
5773 if (sqe->ioprio || sqe->off || sqe->len || sqe->cancel_flags)
Jens Axboee977d6d2019-11-05 12:39:45 -07005774 return -EINVAL;
5775
Jens Axboefbf23842019-12-17 18:45:56 -07005776 req->cancel.addr = READ_ONCE(sqe->addr);
5777 return 0;
5778}
5779
Pavel Begunkov61e98202021-02-10 00:03:08 +00005780static int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboefbf23842019-12-17 18:45:56 -07005781{
5782 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkov58f99372021-03-12 16:25:55 +00005783 u64 sqe_addr = req->cancel.addr;
5784 struct io_tctx_node *node;
5785 int ret;
Jens Axboefbf23842019-12-17 18:45:56 -07005786
Pavel Begunkov58f99372021-03-12 16:25:55 +00005787 /* tasks should wait for their io-wq threads, so safe w/o sync */
5788 ret = io_async_cancel_one(req->task->io_uring, sqe_addr, ctx);
5789 spin_lock_irq(&ctx->completion_lock);
5790 if (ret != -ENOENT)
5791 goto done;
5792 ret = io_timeout_cancel(ctx, sqe_addr);
5793 if (ret != -ENOENT)
5794 goto done;
Pavel Begunkov9ba5fac2021-04-14 13:38:35 +01005795 ret = io_poll_cancel(ctx, sqe_addr, false);
Pavel Begunkov58f99372021-03-12 16:25:55 +00005796 if (ret != -ENOENT)
5797 goto done;
5798 spin_unlock_irq(&ctx->completion_lock);
5799
5800 /* slow path, try all io-wq's */
5801 io_ring_submit_lock(ctx, !(issue_flags & IO_URING_F_NONBLOCK));
5802 ret = -ENOENT;
5803 list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
5804 struct io_uring_task *tctx = node->task->io_uring;
5805
Pavel Begunkov58f99372021-03-12 16:25:55 +00005806 ret = io_async_cancel_one(tctx, req->cancel.addr, ctx);
5807 if (ret != -ENOENT)
5808 break;
5809 }
5810 io_ring_submit_unlock(ctx, !(issue_flags & IO_URING_F_NONBLOCK));
5811
5812 spin_lock_irq(&ctx->completion_lock);
5813done:
Pavel Begunkovd4d19c12021-04-25 14:32:17 +01005814 io_cqring_fill_event(ctx, req->user_data, ret, 0);
Pavel Begunkov58f99372021-03-12 16:25:55 +00005815 io_commit_cqring(ctx);
5816 spin_unlock_irq(&ctx->completion_lock);
5817 io_cqring_ev_posted(ctx);
5818
5819 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01005820 req_set_fail(req);
Pavel Begunkov58f99372021-03-12 16:25:55 +00005821 io_put_req(req);
Jens Axboe62755e32019-10-28 21:49:21 -06005822 return 0;
5823}
5824
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00005825static int io_rsrc_update_prep(struct io_kiocb *req,
Jens Axboe05f3fb32019-12-09 11:22:50 -07005826 const struct io_uring_sqe *sqe)
5827{
Daniele Albano61710e42020-07-18 14:15:16 -06005828 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
5829 return -EINVAL;
5830 if (sqe->ioprio || sqe->rw_flags)
Jens Axboe05f3fb32019-12-09 11:22:50 -07005831 return -EINVAL;
5832
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00005833 req->rsrc_update.offset = READ_ONCE(sqe->off);
5834 req->rsrc_update.nr_args = READ_ONCE(sqe->len);
5835 if (!req->rsrc_update.nr_args)
Jens Axboe05f3fb32019-12-09 11:22:50 -07005836 return -EINVAL;
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00005837 req->rsrc_update.arg = READ_ONCE(sqe->addr);
Jens Axboe05f3fb32019-12-09 11:22:50 -07005838 return 0;
5839}
5840
Pavel Begunkov889fca72021-02-10 00:03:09 +00005841static int io_files_update(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe05f3fb32019-12-09 11:22:50 -07005842{
5843 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkovc3bdad02021-04-25 14:32:22 +01005844 struct io_uring_rsrc_update2 up;
Jens Axboe05f3fb32019-12-09 11:22:50 -07005845 int ret;
5846
Pavel Begunkov45d189c2021-02-10 00:03:07 +00005847 if (issue_flags & IO_URING_F_NONBLOCK)
Jens Axboe05f3fb32019-12-09 11:22:50 -07005848 return -EAGAIN;
Jens Axboe05f3fb32019-12-09 11:22:50 -07005849
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00005850 up.offset = req->rsrc_update.offset;
5851 up.data = req->rsrc_update.arg;
Pavel Begunkovc3bdad02021-04-25 14:32:22 +01005852 up.nr = 0;
5853 up.tags = 0;
Colin Ian King615cee42021-04-26 10:47:35 +01005854 up.resv = 0;
Jens Axboe05f3fb32019-12-09 11:22:50 -07005855
5856 mutex_lock(&ctx->uring_lock);
Pavel Begunkovfdecb662021-04-25 14:32:20 +01005857 ret = __io_register_rsrc_update(ctx, IORING_RSRC_FILE,
Pavel Begunkov98f0b3b2021-04-25 14:32:19 +01005858 &up, req->rsrc_update.nr_args);
Jens Axboe05f3fb32019-12-09 11:22:50 -07005859 mutex_unlock(&ctx->uring_lock);
5860
5861 if (ret < 0)
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01005862 req_set_fail(req);
Pavel Begunkov889fca72021-02-10 00:03:09 +00005863 __io_req_complete(req, issue_flags, ret, 0);
Jens Axboe05f3fb32019-12-09 11:22:50 -07005864 return 0;
5865}
5866
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005867static int io_req_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboef67676d2019-12-02 11:03:47 -07005868{
Jens Axboed625c6e2019-12-17 19:53:05 -07005869 switch (req->opcode) {
Jens Axboee7815732019-12-17 19:45:06 -07005870 case IORING_OP_NOP:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005871 return 0;
Jens Axboef67676d2019-12-02 11:03:47 -07005872 case IORING_OP_READV:
5873 case IORING_OP_READ_FIXED:
Jens Axboe3a6820f2019-12-22 15:19:35 -07005874 case IORING_OP_READ:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005875 return io_read_prep(req, sqe);
Jens Axboef67676d2019-12-02 11:03:47 -07005876 case IORING_OP_WRITEV:
5877 case IORING_OP_WRITE_FIXED:
Jens Axboe3a6820f2019-12-22 15:19:35 -07005878 case IORING_OP_WRITE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005879 return io_write_prep(req, sqe);
Jens Axboe0969e782019-12-17 18:40:57 -07005880 case IORING_OP_POLL_ADD:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005881 return io_poll_add_prep(req, sqe);
Jens Axboe0969e782019-12-17 18:40:57 -07005882 case IORING_OP_POLL_REMOVE:
Pavel Begunkovc5de0032021-04-14 13:38:37 +01005883 return io_poll_update_prep(req, sqe);
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07005884 case IORING_OP_FSYNC:
Pavel Begunkov1155c762021-02-18 18:29:38 +00005885 return io_fsync_prep(req, sqe);
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07005886 case IORING_OP_SYNC_FILE_RANGE:
Pavel Begunkov1155c762021-02-18 18:29:38 +00005887 return io_sfr_prep(req, sqe);
Jens Axboe03b12302019-12-02 18:50:25 -07005888 case IORING_OP_SENDMSG:
Jens Axboefddafac2020-01-04 20:19:44 -07005889 case IORING_OP_SEND:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005890 return io_sendmsg_prep(req, sqe);
Jens Axboe03b12302019-12-02 18:50:25 -07005891 case IORING_OP_RECVMSG:
Jens Axboefddafac2020-01-04 20:19:44 -07005892 case IORING_OP_RECV:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005893 return io_recvmsg_prep(req, sqe);
Jens Axboef499a022019-12-02 16:28:46 -07005894 case IORING_OP_CONNECT:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005895 return io_connect_prep(req, sqe);
Jens Axboe2d283902019-12-04 11:08:05 -07005896 case IORING_OP_TIMEOUT:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005897 return io_timeout_prep(req, sqe, false);
Jens Axboeb29472e2019-12-17 18:50:29 -07005898 case IORING_OP_TIMEOUT_REMOVE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005899 return io_timeout_remove_prep(req, sqe);
Jens Axboefbf23842019-12-17 18:45:56 -07005900 case IORING_OP_ASYNC_CANCEL:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005901 return io_async_cancel_prep(req, sqe);
Jens Axboe2d283902019-12-04 11:08:05 -07005902 case IORING_OP_LINK_TIMEOUT:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005903 return io_timeout_prep(req, sqe, true);
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07005904 case IORING_OP_ACCEPT:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005905 return io_accept_prep(req, sqe);
Jens Axboed63d1b52019-12-10 10:38:56 -07005906 case IORING_OP_FALLOCATE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005907 return io_fallocate_prep(req, sqe);
Jens Axboe15b71ab2019-12-11 11:20:36 -07005908 case IORING_OP_OPENAT:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005909 return io_openat_prep(req, sqe);
Jens Axboeb5dba592019-12-11 14:02:38 -07005910 case IORING_OP_CLOSE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005911 return io_close_prep(req, sqe);
Jens Axboe05f3fb32019-12-09 11:22:50 -07005912 case IORING_OP_FILES_UPDATE:
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00005913 return io_rsrc_update_prep(req, sqe);
Jens Axboeeddc7ef2019-12-13 21:18:10 -07005914 case IORING_OP_STATX:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005915 return io_statx_prep(req, sqe);
Jens Axboe4840e412019-12-25 22:03:45 -07005916 case IORING_OP_FADVISE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005917 return io_fadvise_prep(req, sqe);
Jens Axboec1ca7572019-12-25 22:18:28 -07005918 case IORING_OP_MADVISE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005919 return io_madvise_prep(req, sqe);
Jens Axboecebdb982020-01-08 17:59:24 -07005920 case IORING_OP_OPENAT2:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005921 return io_openat2_prep(req, sqe);
Jens Axboe3e4827b2020-01-08 15:18:09 -07005922 case IORING_OP_EPOLL_CTL:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005923 return io_epoll_ctl_prep(req, sqe);
Pavel Begunkov7d67af22020-02-24 11:32:45 +03005924 case IORING_OP_SPLICE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005925 return io_splice_prep(req, sqe);
Jens Axboeddf0322d2020-02-23 16:41:33 -07005926 case IORING_OP_PROVIDE_BUFFERS:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005927 return io_provide_buffers_prep(req, sqe);
Jens Axboe067524e2020-03-02 16:32:28 -07005928 case IORING_OP_REMOVE_BUFFERS:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005929 return io_remove_buffers_prep(req, sqe);
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03005930 case IORING_OP_TEE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005931 return io_tee_prep(req, sqe);
Jens Axboe36f4fa62020-09-05 11:14:22 -06005932 case IORING_OP_SHUTDOWN:
5933 return io_shutdown_prep(req, sqe);
Jens Axboe80a261f2020-09-28 14:23:58 -06005934 case IORING_OP_RENAMEAT:
5935 return io_renameat_prep(req, sqe);
Jens Axboe14a11432020-09-28 14:27:37 -06005936 case IORING_OP_UNLINKAT:
5937 return io_unlinkat_prep(req, sqe);
Jens Axboef67676d2019-12-02 11:03:47 -07005938 }
5939
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005940 printk_once(KERN_WARNING "io_uring: unhandled opcode %d\n",
5941 req->opcode);
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01005942 return -EINVAL;
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005943}
5944
Pavel Begunkov93642ef2021-02-18 18:29:44 +00005945static int io_req_prep_async(struct io_kiocb *req)
Jens Axboedef596e2019-01-09 08:59:42 -07005946{
Pavel Begunkovb7e298d2021-02-28 22:35:19 +00005947 if (!io_op_defs[req->opcode].needs_async_setup)
5948 return 0;
5949 if (WARN_ON_ONCE(req->async_data))
5950 return -EFAULT;
5951 if (io_alloc_async_data(req))
5952 return -EAGAIN;
5953
Pavel Begunkov93642ef2021-02-18 18:29:44 +00005954 switch (req->opcode) {
5955 case IORING_OP_READV:
Pavel Begunkov93642ef2021-02-18 18:29:44 +00005956 return io_rw_prep_async(req, READ);
5957 case IORING_OP_WRITEV:
Pavel Begunkov93642ef2021-02-18 18:29:44 +00005958 return io_rw_prep_async(req, WRITE);
5959 case IORING_OP_SENDMSG:
Pavel Begunkov93642ef2021-02-18 18:29:44 +00005960 return io_sendmsg_prep_async(req);
5961 case IORING_OP_RECVMSG:
Pavel Begunkov93642ef2021-02-18 18:29:44 +00005962 return io_recvmsg_prep_async(req);
5963 case IORING_OP_CONNECT:
5964 return io_connect_prep_async(req);
5965 }
Pavel Begunkovb7e298d2021-02-28 22:35:19 +00005966 printk_once(KERN_WARNING "io_uring: prep_async() bad opcode %d\n",
5967 req->opcode);
5968 return -EFAULT;
Jens Axboedef596e2019-01-09 08:59:42 -07005969}
5970
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03005971static u32 io_get_sequence(struct io_kiocb *req)
5972{
Pavel Begunkova3dbdf52021-06-17 18:14:05 +01005973 u32 seq = req->ctx->cached_sq_head;
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03005974
Pavel Begunkova3dbdf52021-06-17 18:14:05 +01005975 /* need original cached_sq_head, but it was increased for each req */
5976 io_for_each_link(req, req)
5977 seq--;
5978 return seq;
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03005979}
5980
Pavel Begunkov76cc33d2021-06-14 23:37:30 +01005981static bool io_drain_req(struct io_kiocb *req)
Jens Axboedef596e2019-01-09 08:59:42 -07005982{
Pavel Begunkov3c199662021-06-15 16:47:57 +01005983 struct io_kiocb *pos;
Jens Axboedef596e2019-01-09 08:59:42 -07005984 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkov27dc8332020-07-13 23:37:14 +03005985 struct io_defer_entry *de;
Jens Axboedef596e2019-01-09 08:59:42 -07005986 int ret;
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03005987 u32 seq;
Jens Axboedef596e2019-01-09 08:59:42 -07005988
Pavel Begunkov3c199662021-06-15 16:47:57 +01005989 /*
5990 * If we need to drain a request in the middle of a link, drain the
5991 * head request and the next request/link after the current link.
5992 * Considering sequential execution of links, IOSQE_IO_DRAIN will be
5993 * maintained for every request of our link.
5994 */
5995 if (ctx->drain_next) {
5996 req->flags |= REQ_F_IO_DRAIN;
5997 ctx->drain_next = false;
5998 }
5999 /* not interested in head, start from the first linked */
6000 io_for_each_link(pos, req->link) {
6001 if (pos->flags & REQ_F_IO_DRAIN) {
6002 ctx->drain_next = true;
6003 req->flags |= REQ_F_IO_DRAIN;
6004 break;
6005 }
6006 }
6007
Jens Axboedef596e2019-01-09 08:59:42 -07006008 /* Still need defer if there is pending req in defer list. */
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03006009 if (likely(list_empty_careful(&ctx->defer_list) &&
Pavel Begunkov10c66902021-06-15 16:47:56 +01006010 !(req->flags & REQ_F_IO_DRAIN))) {
6011 ctx->drain_active = false;
Pavel Begunkov76cc33d2021-06-14 23:37:30 +01006012 return false;
Pavel Begunkov10c66902021-06-15 16:47:56 +01006013 }
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03006014
6015 seq = io_get_sequence(req);
6016 /* Still a chance to pass the sequence check */
6017 if (!req_need_defer(req, seq) && list_empty_careful(&ctx->defer_list))
Pavel Begunkov76cc33d2021-06-14 23:37:30 +01006018 return false;
Jens Axboedef596e2019-01-09 08:59:42 -07006019
Pavel Begunkovb7e298d2021-02-28 22:35:19 +00006020 ret = io_req_prep_async(req);
Pavel Begunkovbe7053b2021-02-18 18:29:45 +00006021 if (ret)
6022 return ret;
Pavel Begunkovcbdcb432020-06-29 19:18:43 +03006023 io_prep_async_link(req);
Pavel Begunkov27dc8332020-07-13 23:37:14 +03006024 de = kmalloc(sizeof(*de), GFP_KERNEL);
Pavel Begunkov76cc33d2021-06-14 23:37:30 +01006025 if (!de) {
Pavel Begunkovc32aace2021-07-07 19:24:24 +01006026 io_req_complete_failed(req, -ENOMEM);
Pavel Begunkov76cc33d2021-06-14 23:37:30 +01006027 return true;
6028 }
Jens Axboe31b51512019-01-18 22:56:34 -07006029
6030 spin_lock_irq(&ctx->completion_lock);
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03006031 if (!req_need_defer(req, seq) && list_empty(&ctx->defer_list)) {
Jens Axboe31b51512019-01-18 22:56:34 -07006032 spin_unlock_irq(&ctx->completion_lock);
Pavel Begunkov27dc8332020-07-13 23:37:14 +03006033 kfree(de);
Pavel Begunkovae348172020-07-23 20:25:20 +03006034 io_queue_async_work(req);
Pavel Begunkov76cc33d2021-06-14 23:37:30 +01006035 return true;
Jens Axboe31b51512019-01-18 22:56:34 -07006036 }
6037
6038 trace_io_uring_defer(ctx, req, req->user_data);
Pavel Begunkov27dc8332020-07-13 23:37:14 +03006039 de->req = req;
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03006040 de->seq = seq;
Pavel Begunkov27dc8332020-07-13 23:37:14 +03006041 list_add_tail(&de->list, &ctx->defer_list);
Jens Axboe31b51512019-01-18 22:56:34 -07006042 spin_unlock_irq(&ctx->completion_lock);
Pavel Begunkov76cc33d2021-06-14 23:37:30 +01006043 return true;
Jens Axboe31b51512019-01-18 22:56:34 -07006044}
6045
Pavel Begunkov68fb8972021-03-19 17:22:41 +00006046static void io_clean_op(struct io_kiocb *req)
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03006047{
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03006048 if (req->flags & REQ_F_BUFFER_SELECTED) {
6049 switch (req->opcode) {
6050 case IORING_OP_READV:
6051 case IORING_OP_READ_FIXED:
6052 case IORING_OP_READ:
Jens Axboebcda7ba2020-02-23 16:42:51 -07006053 kfree((void *)(unsigned long)req->rw.addr);
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03006054 break;
6055 case IORING_OP_RECVMSG:
6056 case IORING_OP_RECV:
Jens Axboe52de1fe2020-02-27 10:15:42 -07006057 kfree(req->sr_msg.kbuf);
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03006058 break;
6059 }
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03006060 }
6061
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03006062 if (req->flags & REQ_F_NEED_CLEANUP) {
6063 switch (req->opcode) {
6064 case IORING_OP_READV:
6065 case IORING_OP_READ_FIXED:
6066 case IORING_OP_READ:
6067 case IORING_OP_WRITEV:
6068 case IORING_OP_WRITE_FIXED:
Jens Axboee8c2bc12020-08-15 18:44:09 -07006069 case IORING_OP_WRITE: {
6070 struct io_async_rw *io = req->async_data;
Pavel Begunkov1dacb4d2021-06-17 18:14:03 +01006071
6072 kfree(io->free_iovec);
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03006073 break;
Jens Axboee8c2bc12020-08-15 18:44:09 -07006074 }
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03006075 case IORING_OP_RECVMSG:
Jens Axboee8c2bc12020-08-15 18:44:09 -07006076 case IORING_OP_SENDMSG: {
6077 struct io_async_msghdr *io = req->async_data;
Pavel Begunkov257e84a2021-02-05 00:58:00 +00006078
6079 kfree(io->free_iov);
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03006080 break;
Jens Axboee8c2bc12020-08-15 18:44:09 -07006081 }
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03006082 case IORING_OP_SPLICE:
6083 case IORING_OP_TEE:
Pavel Begunkove1d767f2021-03-19 17:22:43 +00006084 if (!(req->splice.flags & SPLICE_F_FD_IN_FIXED))
6085 io_put_file(req->splice.file_in);
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03006086 break;
Jens Axboef3cd48502020-09-24 14:55:54 -06006087 case IORING_OP_OPENAT:
6088 case IORING_OP_OPENAT2:
6089 if (req->open.filename)
6090 putname(req->open.filename);
6091 break;
Jens Axboe80a261f2020-09-28 14:23:58 -06006092 case IORING_OP_RENAMEAT:
6093 putname(req->rename.oldpath);
6094 putname(req->rename.newpath);
6095 break;
Jens Axboe14a11432020-09-28 14:27:37 -06006096 case IORING_OP_UNLINKAT:
6097 putname(req->unlink.filename);
6098 break;
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03006099 }
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03006100 }
Jens Axboe75652a302021-04-15 09:52:40 -06006101 if ((req->flags & REQ_F_POLLED) && req->apoll) {
6102 kfree(req->apoll->double_poll);
6103 kfree(req->apoll);
6104 req->apoll = NULL;
6105 }
Pavel Begunkov3a0a6902021-04-20 12:03:31 +01006106 if (req->flags & REQ_F_INFLIGHT) {
6107 struct io_uring_task *tctx = req->task->io_uring;
6108
6109 atomic_dec(&tctx->inflight_tracked);
Pavel Begunkov3a0a6902021-04-20 12:03:31 +01006110 }
Pavel Begunkovc8543572021-06-17 18:14:04 +01006111 if (req->flags & REQ_F_CREDS)
Pavel Begunkovb8e64b52021-06-17 18:14:02 +01006112 put_cred(req->creds);
Pavel Begunkovc8543572021-06-17 18:14:04 +01006113
6114 req->flags &= ~IO_REQ_CLEAN_FLAGS;
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03006115}
6116
Pavel Begunkov889fca72021-02-10 00:03:09 +00006117static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboeedafcce2019-01-09 09:16:05 -07006118{
Jens Axboeedafcce2019-01-09 09:16:05 -07006119 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe5730b272021-02-27 15:57:30 -07006120 const struct cred *creds = NULL;
Jens Axboed625c6e2019-12-17 19:53:05 -07006121 int ret;
Jens Axboeedafcce2019-01-09 09:16:05 -07006122
Pavel Begunkovb8e64b52021-06-17 18:14:02 +01006123 if ((req->flags & REQ_F_CREDS) && req->creds != current_cred())
Pavel Begunkovc10d1f92021-06-17 18:14:01 +01006124 creds = override_creds(req->creds);
Jens Axboe5730b272021-02-27 15:57:30 -07006125
Jens Axboed625c6e2019-12-17 19:53:05 -07006126 switch (req->opcode) {
Jens Axboe2b188cc2019-01-07 10:46:33 -07006127 case IORING_OP_NOP:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006128 ret = io_nop(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006129 break;
Jens Axboe2b188cc2019-01-07 10:46:33 -07006130 case IORING_OP_READV:
Jens Axboe3529d8c2019-12-19 18:24:38 -07006131 case IORING_OP_READ_FIXED:
Jens Axboe3a6820f2019-12-22 15:19:35 -07006132 case IORING_OP_READ:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006133 ret = io_read(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006134 break;
6135 case IORING_OP_WRITEV:
Jens Axboe2b188cc2019-01-07 10:46:33 -07006136 case IORING_OP_WRITE_FIXED:
Jens Axboe3a6820f2019-12-22 15:19:35 -07006137 case IORING_OP_WRITE:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006138 ret = io_write(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006139 break;
6140 case IORING_OP_FSYNC:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006141 ret = io_fsync(req, issue_flags);
Jackie Liuba5290c2019-10-09 09:19:59 +08006142 break;
Jens Axboe2b188cc2019-01-07 10:46:33 -07006143 case IORING_OP_POLL_ADD:
Pavel Begunkov61e98202021-02-10 00:03:08 +00006144 ret = io_poll_add(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006145 break;
6146 case IORING_OP_POLL_REMOVE:
Pavel Begunkovc5de0032021-04-14 13:38:37 +01006147 ret = io_poll_update(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006148 break;
Jens Axboeb76da702019-11-20 13:05:32 -07006149 case IORING_OP_SYNC_FILE_RANGE:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006150 ret = io_sync_file_range(req, issue_flags);
Jens Axboeb76da702019-11-20 13:05:32 -07006151 break;
6152 case IORING_OP_SENDMSG:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006153 ret = io_sendmsg(req, issue_flags);
Pavel Begunkov062d04d2020-10-10 18:34:12 +01006154 break;
Jens Axboefddafac2020-01-04 20:19:44 -07006155 case IORING_OP_SEND:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006156 ret = io_send(req, issue_flags);
Jens Axboeb76da702019-11-20 13:05:32 -07006157 break;
6158 case IORING_OP_RECVMSG:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006159 ret = io_recvmsg(req, issue_flags);
Pavel Begunkov062d04d2020-10-10 18:34:12 +01006160 break;
Jens Axboefddafac2020-01-04 20:19:44 -07006161 case IORING_OP_RECV:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006162 ret = io_recv(req, issue_flags);
Jens Axboeb76da702019-11-20 13:05:32 -07006163 break;
Jens Axboe561fb042019-10-24 07:25:42 -06006164 case IORING_OP_TIMEOUT:
Pavel Begunkov61e98202021-02-10 00:03:08 +00006165 ret = io_timeout(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006166 break;
6167 case IORING_OP_TIMEOUT_REMOVE:
Pavel Begunkov61e98202021-02-10 00:03:08 +00006168 ret = io_timeout_remove(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006169 break;
6170 case IORING_OP_ACCEPT:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006171 ret = io_accept(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006172 break;
6173 case IORING_OP_CONNECT:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006174 ret = io_connect(req, issue_flags);
Jens Axboe31b51512019-01-18 22:56:34 -07006175 break;
6176 case IORING_OP_ASYNC_CANCEL:
Pavel Begunkov61e98202021-02-10 00:03:08 +00006177 ret = io_async_cancel(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006178 break;
Jens Axboed63d1b52019-12-10 10:38:56 -07006179 case IORING_OP_FALLOCATE:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006180 ret = io_fallocate(req, issue_flags);
Jens Axboed63d1b52019-12-10 10:38:56 -07006181 break;
Jens Axboe15b71ab2019-12-11 11:20:36 -07006182 case IORING_OP_OPENAT:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006183 ret = io_openat(req, issue_flags);
Jens Axboe15b71ab2019-12-11 11:20:36 -07006184 break;
Jens Axboeb5dba592019-12-11 14:02:38 -07006185 case IORING_OP_CLOSE:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006186 ret = io_close(req, issue_flags);
Jens Axboeb5dba592019-12-11 14:02:38 -07006187 break;
Jens Axboe05f3fb32019-12-09 11:22:50 -07006188 case IORING_OP_FILES_UPDATE:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006189 ret = io_files_update(req, issue_flags);
Jens Axboe05f3fb32019-12-09 11:22:50 -07006190 break;
Jens Axboeeddc7ef2019-12-13 21:18:10 -07006191 case IORING_OP_STATX:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006192 ret = io_statx(req, issue_flags);
Jens Axboeeddc7ef2019-12-13 21:18:10 -07006193 break;
Jens Axboe4840e412019-12-25 22:03:45 -07006194 case IORING_OP_FADVISE:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006195 ret = io_fadvise(req, issue_flags);
Jens Axboe4840e412019-12-25 22:03:45 -07006196 break;
Jens Axboec1ca7572019-12-25 22:18:28 -07006197 case IORING_OP_MADVISE:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006198 ret = io_madvise(req, issue_flags);
Jens Axboec1ca7572019-12-25 22:18:28 -07006199 break;
Jens Axboecebdb982020-01-08 17:59:24 -07006200 case IORING_OP_OPENAT2:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006201 ret = io_openat2(req, issue_flags);
Jens Axboecebdb982020-01-08 17:59:24 -07006202 break;
Jens Axboe3e4827b2020-01-08 15:18:09 -07006203 case IORING_OP_EPOLL_CTL:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006204 ret = io_epoll_ctl(req, issue_flags);
Jens Axboe3e4827b2020-01-08 15:18:09 -07006205 break;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03006206 case IORING_OP_SPLICE:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006207 ret = io_splice(req, issue_flags);
Pavel Begunkov7d67af22020-02-24 11:32:45 +03006208 break;
Jens Axboeddf0322d2020-02-23 16:41:33 -07006209 case IORING_OP_PROVIDE_BUFFERS:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006210 ret = io_provide_buffers(req, issue_flags);
Jens Axboeddf0322d2020-02-23 16:41:33 -07006211 break;
Jens Axboe067524e2020-03-02 16:32:28 -07006212 case IORING_OP_REMOVE_BUFFERS:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006213 ret = io_remove_buffers(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006214 break;
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03006215 case IORING_OP_TEE:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006216 ret = io_tee(req, issue_flags);
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03006217 break;
Jens Axboe36f4fa62020-09-05 11:14:22 -06006218 case IORING_OP_SHUTDOWN:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006219 ret = io_shutdown(req, issue_flags);
Jens Axboe36f4fa62020-09-05 11:14:22 -06006220 break;
Jens Axboe80a261f2020-09-28 14:23:58 -06006221 case IORING_OP_RENAMEAT:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006222 ret = io_renameat(req, issue_flags);
Jens Axboe80a261f2020-09-28 14:23:58 -06006223 break;
Jens Axboe14a11432020-09-28 14:27:37 -06006224 case IORING_OP_UNLINKAT:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006225 ret = io_unlinkat(req, issue_flags);
Jens Axboe14a11432020-09-28 14:27:37 -06006226 break;
Jens Axboe2b188cc2019-01-07 10:46:33 -07006227 default:
6228 ret = -EINVAL;
6229 break;
6230 }
Jens Axboe31b51512019-01-18 22:56:34 -07006231
Jens Axboe5730b272021-02-27 15:57:30 -07006232 if (creds)
6233 revert_creds(creds);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006234 if (ret)
6235 return ret;
Jens Axboeb5325762020-05-19 21:20:27 -06006236 /* If the op doesn't have a file, we're not polling for it */
Pavel Begunkovcb3d8972021-06-14 02:36:14 +01006237 if ((ctx->flags & IORING_SETUP_IOPOLL) && req->file)
6238 io_iopoll_req_issued(req);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006239
6240 return 0;
6241}
6242
Pavel Begunkov5280f7e2021-02-04 13:52:08 +00006243static void io_wq_submit_work(struct io_wq_work *work)
Pavel Begunkovd4c81f32020-06-08 21:08:19 +03006244{
Jens Axboe2b188cc2019-01-07 10:46:33 -07006245 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
Pavel Begunkov6df1db62020-07-03 22:15:06 +03006246 struct io_kiocb *timeout;
Jens Axboe561fb042019-10-24 07:25:42 -06006247 int ret = 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07006248
Pavel Begunkov6df1db62020-07-03 22:15:06 +03006249 timeout = io_prep_linked_timeout(req);
6250 if (timeout)
6251 io_queue_linked_timeout(timeout);
Pavel Begunkovd4c81f32020-06-08 21:08:19 +03006252
Jens Axboe4014d942021-01-19 15:53:54 -07006253 if (work->flags & IO_WQ_WORK_CANCEL)
Jens Axboe561fb042019-10-24 07:25:42 -06006254 ret = -ECANCELED;
Jens Axboe31b51512019-01-18 22:56:34 -07006255
Jens Axboe561fb042019-10-24 07:25:42 -06006256 if (!ret) {
Jens Axboe561fb042019-10-24 07:25:42 -06006257 do {
Pavel Begunkov889fca72021-02-10 00:03:09 +00006258 ret = io_issue_sqe(req, 0);
Jens Axboe561fb042019-10-24 07:25:42 -06006259 /*
6260 * We can get EAGAIN for polled IO even though we're
6261 * forcing a sync submission from here, since we can't
6262 * wait for request slots on the block side.
6263 */
6264 if (ret != -EAGAIN)
6265 break;
6266 cond_resched();
6267 } while (1);
6268 }
Jens Axboe31b51512019-01-18 22:56:34 -07006269
Pavel Begunkova3df76982021-02-18 22:32:52 +00006270 /* avoid locking problems by failing it from a clean context */
Jens Axboe561fb042019-10-24 07:25:42 -06006271 if (ret) {
Pavel Begunkova3df76982021-02-18 22:32:52 +00006272 /* io-wq is going to take one down */
Jens Axboede9b4cc2021-02-24 13:28:27 -07006273 req_ref_get(req);
Pavel Begunkova3df76982021-02-18 22:32:52 +00006274 io_req_task_queue_fail(req, ret);
Jens Axboeedafcce2019-01-09 09:16:05 -07006275 }
Jens Axboe31b51512019-01-18 22:56:34 -07006276}
Jens Axboe2b188cc2019-01-07 10:46:33 -07006277
Jens Axboe7b29f922021-03-12 08:30:14 -07006278#define FFS_ASYNC_READ 0x1UL
6279#define FFS_ASYNC_WRITE 0x2UL
6280#ifdef CONFIG_64BIT
6281#define FFS_ISREG 0x4UL
6282#else
6283#define FFS_ISREG 0x0UL
6284#endif
6285#define FFS_MASK ~(FFS_ASYNC_READ|FFS_ASYNC_WRITE|FFS_ISREG)
6286
Pavel Begunkovaeca2412021-04-11 01:46:37 +01006287static inline struct io_fixed_file *io_fixed_file_slot(struct io_file_table *table,
Pavel Begunkova04b0ac2021-04-01 15:44:04 +01006288 unsigned i)
Jens Axboe09bb8392019-03-13 12:39:28 -06006289{
Pavel Begunkovaeca2412021-04-11 01:46:37 +01006290 struct io_fixed_file *table_l2;
Jens Axboe65e19f52019-10-26 07:20:21 -06006291
Pavel Begunkovaeca2412021-04-11 01:46:37 +01006292 table_l2 = table->files[i >> IORING_FILE_TABLE_SHIFT];
6293 return &table_l2[i & IORING_FILE_TABLE_MASK];
Pavel Begunkovdafecf12021-02-28 22:35:11 +00006294}
6295
Jens Axboe09bb8392019-03-13 12:39:28 -06006296static inline struct file *io_file_from_index(struct io_ring_ctx *ctx,
6297 int index)
6298{
Pavel Begunkovaeca2412021-04-11 01:46:37 +01006299 struct io_fixed_file *slot = io_fixed_file_slot(&ctx->file_table, index);
Jens Axboe65e19f52019-10-26 07:20:21 -06006300
Pavel Begunkova04b0ac2021-04-01 15:44:04 +01006301 return (struct file *) (slot->file_ptr & FFS_MASK);
Jens Axboe65e19f52019-10-26 07:20:21 -06006302}
6303
Pavel Begunkova04b0ac2021-04-01 15:44:04 +01006304static void io_fixed_file_set(struct io_fixed_file *file_slot, struct file *file)
Pavel Begunkov9a321c92021-04-01 15:44:01 +01006305{
6306 unsigned long file_ptr = (unsigned long) file;
6307
6308 if (__io_file_supports_async(file, READ))
6309 file_ptr |= FFS_ASYNC_READ;
6310 if (__io_file_supports_async(file, WRITE))
6311 file_ptr |= FFS_ASYNC_WRITE;
6312 if (S_ISREG(file_inode(file)->i_mode))
6313 file_ptr |= FFS_ISREG;
Pavel Begunkova04b0ac2021-04-01 15:44:04 +01006314 file_slot->file_ptr = file_ptr;
Jens Axboe09bb8392019-03-13 12:39:28 -06006315}
6316
Pavel Begunkov8371adf2020-10-10 18:34:08 +01006317static struct file *io_file_get(struct io_submit_state *state,
6318 struct io_kiocb *req, int fd, bool fixed)
Pavel Begunkov8da11c12020-02-24 11:32:44 +03006319{
6320 struct io_ring_ctx *ctx = req->ctx;
6321 struct file *file;
6322
6323 if (fixed) {
Jens Axboe7b29f922021-03-12 08:30:14 -07006324 unsigned long file_ptr;
6325
Pavel Begunkov479f5172020-10-10 18:34:07 +01006326 if (unlikely((unsigned int)fd >= ctx->nr_user_files))
Pavel Begunkov8371adf2020-10-10 18:34:08 +01006327 return NULL;
Pavel Begunkov8da11c12020-02-24 11:32:44 +03006328 fd = array_index_nospec(fd, ctx->nr_user_files);
Pavel Begunkovaeca2412021-04-11 01:46:37 +01006329 file_ptr = io_fixed_file_slot(&ctx->file_table, fd)->file_ptr;
Jens Axboe7b29f922021-03-12 08:30:14 -07006330 file = (struct file *) (file_ptr & FFS_MASK);
6331 file_ptr &= ~FFS_MASK;
6332 /* mask in overlapping REQ_F and FFS bits */
6333 req->flags |= (file_ptr << REQ_F_ASYNC_READ_BIT);
Pavel Begunkovb895c9a2021-04-01 15:43:40 +01006334 io_req_set_rsrc_node(req);
Pavel Begunkov8da11c12020-02-24 11:32:44 +03006335 } else {
6336 trace_io_uring_file_get(ctx, fd);
6337 file = __io_file_get(state, fd);
Jens Axboed44f5542021-03-12 08:27:05 -07006338
6339 /* we don't allow fixed io_uring files */
6340 if (file && unlikely(file->f_op == &io_uring_fops))
6341 io_req_track_inflight(req);
Pavel Begunkov8da11c12020-02-24 11:32:44 +03006342 }
6343
Pavel Begunkov8371adf2020-10-10 18:34:08 +01006344 return file;
Pavel Begunkov8da11c12020-02-24 11:32:44 +03006345}
6346
Jens Axboe2665abf2019-11-05 12:40:47 -07006347static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
6348{
Jens Axboead8a48a2019-11-15 08:49:11 -07006349 struct io_timeout_data *data = container_of(timer,
6350 struct io_timeout_data, timer);
Pavel Begunkov90cd7e42020-10-27 23:25:36 +00006351 struct io_kiocb *prev, *req = data->req;
Jens Axboe2665abf2019-11-05 12:40:47 -07006352 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe2665abf2019-11-05 12:40:47 -07006353 unsigned long flags;
Jens Axboe2665abf2019-11-05 12:40:47 -07006354
6355 spin_lock_irqsave(&ctx->completion_lock, flags);
Pavel Begunkov90cd7e42020-10-27 23:25:36 +00006356 prev = req->timeout.head;
6357 req->timeout.head = NULL;
Jens Axboe2665abf2019-11-05 12:40:47 -07006358
6359 /*
6360 * We don't expect the list to be empty, that will only happen if we
6361 * race with the completion of the linked work.
6362 */
Pavel Begunkov447c19f2021-05-14 12:02:50 +01006363 if (prev) {
Pavel Begunkovf2f87372020-10-27 23:25:37 +00006364 io_remove_next_linked(prev);
Pavel Begunkov447c19f2021-05-14 12:02:50 +01006365 if (!req_ref_inc_not_zero(prev))
6366 prev = NULL;
6367 }
Jens Axboe2665abf2019-11-05 12:40:47 -07006368 spin_unlock_irqrestore(&ctx->completion_lock, flags);
6369
6370 if (prev) {
Pavel Begunkov014db002020-03-03 21:33:12 +03006371 io_async_find_and_cancel(ctx, req, prev->user_data, -ETIME);
Pavel Begunkov9ae1f8d2021-02-01 18:59:51 +00006372 io_put_req_deferred(prev, 1);
Pavel Begunkova2982322021-05-07 21:06:38 +01006373 io_put_req_deferred(req, 1);
Jens Axboe47f46762019-11-09 17:43:02 -07006374 } else {
Pavel Begunkov9ae1f8d2021-02-01 18:59:51 +00006375 io_req_complete_post(req, -ETIME, 0);
Jens Axboe2665abf2019-11-05 12:40:47 -07006376 }
Jens Axboe2665abf2019-11-05 12:40:47 -07006377 return HRTIMER_NORESTART;
6378}
6379
Pavel Begunkovde968c12021-03-19 17:22:33 +00006380static void io_queue_linked_timeout(struct io_kiocb *req)
Jens Axboe2665abf2019-11-05 12:40:47 -07006381{
Pavel Begunkovde968c12021-03-19 17:22:33 +00006382 struct io_ring_ctx *ctx = req->ctx;
6383
6384 spin_lock_irq(&ctx->completion_lock);
Jens Axboe76a46e02019-11-10 23:34:16 -07006385 /*
Pavel Begunkovf2f87372020-10-27 23:25:37 +00006386 * If the back reference is NULL, then our linked request finished
6387 * before we got a chance to setup the timer
Jens Axboe76a46e02019-11-10 23:34:16 -07006388 */
Pavel Begunkov90cd7e42020-10-27 23:25:36 +00006389 if (req->timeout.head) {
Jens Axboee8c2bc12020-08-15 18:44:09 -07006390 struct io_timeout_data *data = req->async_data;
Jens Axboe94ae5e72019-11-14 19:39:52 -07006391
Jens Axboead8a48a2019-11-15 08:49:11 -07006392 data->timer.function = io_link_timeout_fn;
6393 hrtimer_start(&data->timer, timespec64_to_ktime(data->ts),
6394 data->mode);
Jens Axboe2665abf2019-11-05 12:40:47 -07006395 }
Jens Axboe76a46e02019-11-10 23:34:16 -07006396 spin_unlock_irq(&ctx->completion_lock);
Jens Axboe2665abf2019-11-05 12:40:47 -07006397 /* drop submission reference */
Jens Axboe76a46e02019-11-10 23:34:16 -07006398 io_put_req(req);
Jens Axboe2665abf2019-11-05 12:40:47 -07006399}
6400
Jens Axboead8a48a2019-11-15 08:49:11 -07006401static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req)
Jens Axboe2665abf2019-11-05 12:40:47 -07006402{
Pavel Begunkovf2f87372020-10-27 23:25:37 +00006403 struct io_kiocb *nxt = req->link;
Jens Axboe2b188cc2019-01-07 10:46:33 -07006404
Pavel Begunkovf2f87372020-10-27 23:25:37 +00006405 if (!nxt || (req->flags & REQ_F_LINK_TIMEOUT) ||
6406 nxt->opcode != IORING_OP_LINK_TIMEOUT)
Jens Axboed7718a92020-02-14 22:23:12 -07006407 return NULL;
Jens Axboe2665abf2019-11-05 12:40:47 -07006408
Pavel Begunkov90cd7e42020-10-27 23:25:36 +00006409 nxt->timeout.head = req;
Pavel Begunkov900fad42020-10-19 16:39:16 +01006410 nxt->flags |= REQ_F_LTIMEOUT_ACTIVE;
Jens Axboe76a46e02019-11-10 23:34:16 -07006411 req->flags |= REQ_F_LINK_TIMEOUT;
Jens Axboe76a46e02019-11-10 23:34:16 -07006412 return nxt;
Jens Axboe2665abf2019-11-05 12:40:47 -07006413}
6414
Pavel Begunkovc5eef2b2021-02-10 00:03:22 +00006415static void __io_queue_sqe(struct io_kiocb *req)
Jens Axboe2b188cc2019-01-07 10:46:33 -07006416{
Pavel Begunkovd3d72982021-02-12 03:23:51 +00006417 struct io_kiocb *linked_timeout = io_prep_linked_timeout(req);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006418 int ret;
6419
Olivier Langlois59b735a2021-06-22 05:17:39 -07006420issue_sqe:
Pavel Begunkovc5eef2b2021-02-10 00:03:22 +00006421 ret = io_issue_sqe(req, IO_URING_F_NONBLOCK|IO_URING_F_COMPLETE_DEFER);
Jens Axboe491381ce2019-10-17 09:20:46 -06006422
6423 /*
6424 * We async punt it if the file wasn't marked NOWAIT, or if the file
6425 * doesn't support non-blocking read/write attempts
6426 */
Pavel Begunkov18400382021-03-19 17:22:34 +00006427 if (likely(!ret)) {
Pavel Begunkov0d63c142020-10-22 16:47:18 +01006428 /* drop submission reference */
Pavel Begunkove342c802021-01-19 13:32:47 +00006429 if (req->flags & REQ_F_COMPLETE_INLINE) {
Pavel Begunkovc5eef2b2021-02-10 00:03:22 +00006430 struct io_ring_ctx *ctx = req->ctx;
6431 struct io_comp_state *cs = &ctx->submit_state.comp;
Jens Axboee65ef562019-03-12 10:16:44 -06006432
Pavel Begunkov6dd0be12021-02-10 00:03:13 +00006433 cs->reqs[cs->nr++] = req;
Pavel Begunkovd3d72982021-02-12 03:23:51 +00006434 if (cs->nr == ARRAY_SIZE(cs->reqs))
Pavel Begunkov2a2758f2021-06-17 18:14:00 +01006435 io_submit_flush_completions(ctx);
Pavel Begunkov9affd662021-01-19 13:32:46 +00006436 } else {
Pavel Begunkovd3d72982021-02-12 03:23:51 +00006437 io_put_req(req);
Pavel Begunkov0d63c142020-10-22 16:47:18 +01006438 }
Pavel Begunkov18400382021-03-19 17:22:34 +00006439 } else if (ret == -EAGAIN && !(req->flags & REQ_F_NOWAIT)) {
Olivier Langlois59b735a2021-06-22 05:17:39 -07006440 switch (io_arm_poll_handler(req)) {
6441 case IO_APOLL_READY:
6442 goto issue_sqe;
6443 case IO_APOLL_ABORTED:
Pavel Begunkov18400382021-03-19 17:22:34 +00006444 /*
6445 * Queued up for async execution, worker will release
6446 * submit reference when the iocb is actually submitted.
6447 */
6448 io_queue_async_work(req);
Olivier Langlois59b735a2021-06-22 05:17:39 -07006449 break;
Pavel Begunkov18400382021-03-19 17:22:34 +00006450 }
Pavel Begunkov0d63c142020-10-22 16:47:18 +01006451 } else {
Pavel Begunkovf41db2732021-02-28 22:35:12 +00006452 io_req_complete_failed(req, ret);
Jens Axboe9e645e112019-05-10 16:07:28 -06006453 }
Pavel Begunkovd3d72982021-02-12 03:23:51 +00006454 if (linked_timeout)
6455 io_queue_linked_timeout(linked_timeout);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006456}
6457
Pavel Begunkov441b8a72021-06-14 23:37:31 +01006458static inline void io_queue_sqe(struct io_kiocb *req)
Jackie Liu4fe2c962019-09-09 20:50:40 +08006459{
Pavel Begunkov10c66902021-06-15 16:47:56 +01006460 if (unlikely(req->ctx->drain_active) && io_drain_req(req))
Pavel Begunkov76cc33d2021-06-14 23:37:30 +01006461 return;
Jackie Liu4fe2c962019-09-09 20:50:40 +08006462
Pavel Begunkov76cc33d2021-06-14 23:37:30 +01006463 if (likely(!(req->flags & REQ_F_FORCE_ASYNC))) {
Pavel Begunkovc5eef2b2021-02-10 00:03:22 +00006464 __io_queue_sqe(req);
Pavel Begunkov76cc33d2021-06-14 23:37:30 +01006465 } else {
6466 int ret = io_req_prep_async(req);
6467
6468 if (unlikely(ret))
6469 io_req_complete_failed(req, ret);
6470 else
6471 io_queue_async_work(req);
Jens Axboece35a472019-12-17 08:04:44 -07006472 }
Jackie Liu4fe2c962019-09-09 20:50:40 +08006473}
6474
Stefano Garzarella21b55db2020-08-27 16:58:30 +02006475/*
6476 * Check SQE restrictions (opcode and flags).
6477 *
6478 * Returns 'true' if SQE is allowed, 'false' otherwise.
6479 */
6480static inline bool io_check_restriction(struct io_ring_ctx *ctx,
6481 struct io_kiocb *req,
6482 unsigned int sqe_flags)
6483{
Pavel Begunkov4cfb25b2021-06-26 21:40:47 +01006484 if (likely(!ctx->restricted))
Stefano Garzarella21b55db2020-08-27 16:58:30 +02006485 return true;
6486
6487 if (!test_bit(req->opcode, ctx->restrictions.sqe_op))
6488 return false;
6489
6490 if ((sqe_flags & ctx->restrictions.sqe_flags_required) !=
6491 ctx->restrictions.sqe_flags_required)
6492 return false;
6493
6494 if (sqe_flags & ~(ctx->restrictions.sqe_flags_allowed |
6495 ctx->restrictions.sqe_flags_required))
6496 return false;
6497
6498 return true;
6499}
6500
Pavel Begunkovef4ff582020-04-12 02:05:05 +03006501static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
Pavel Begunkov258b29a2021-02-10 00:03:10 +00006502 const struct io_uring_sqe *sqe)
Pavel Begunkov0553b8b2020-04-08 08:58:45 +03006503{
Pavel Begunkov258b29a2021-02-10 00:03:10 +00006504 struct io_submit_state *state;
Pavel Begunkovef4ff582020-04-12 02:05:05 +03006505 unsigned int sqe_flags;
Jens Axboe003e8dc2021-03-06 09:22:27 -07006506 int personality, ret = 0;
Pavel Begunkovef4ff582020-04-12 02:05:05 +03006507
Pavel Begunkov0553b8b2020-04-08 08:58:45 +03006508 req->opcode = READ_ONCE(sqe->opcode);
Pavel Begunkov5be9ad12021-02-12 18:41:17 +00006509 /* same numerical values with corresponding REQ_F_*, safe to copy */
6510 req->flags = sqe_flags = READ_ONCE(sqe->flags);
Pavel Begunkov0553b8b2020-04-08 08:58:45 +03006511 req->user_data = READ_ONCE(sqe->user_data);
Pavel Begunkov0553b8b2020-04-08 08:58:45 +03006512 req->file = NULL;
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00006513 req->fixed_rsrc_refs = NULL;
Pavel Begunkov0553b8b2020-04-08 08:58:45 +03006514 /* one is dropped after submission, the other at completion */
Jens Axboeabc54d62021-02-24 13:32:30 -07006515 atomic_set(&req->refs, 2);
Pavel Begunkov4dd28242020-06-15 10:33:13 +03006516 req->task = current;
Pavel Begunkovef4ff582020-04-12 02:05:05 +03006517
Pavel Begunkov5be9ad12021-02-12 18:41:17 +00006518 /* enforce forwards compatibility on users */
Pavel Begunkovdddca222021-04-27 16:13:52 +01006519 if (unlikely(sqe_flags & ~SQE_VALID_FLAGS))
Pavel Begunkov5be9ad12021-02-12 18:41:17 +00006520 return -EINVAL;
Pavel Begunkovef4ff582020-04-12 02:05:05 +03006521 if (unlikely(req->opcode >= IORING_OP_LAST))
6522 return -EINVAL;
Pavel Begunkov4cfb25b2021-06-26 21:40:47 +01006523 if (!io_check_restriction(ctx, req, sqe_flags))
Stefano Garzarella21b55db2020-08-27 16:58:30 +02006524 return -EACCES;
6525
Pavel Begunkovef4ff582020-04-12 02:05:05 +03006526 if ((sqe_flags & IOSQE_BUFFER_SELECT) &&
6527 !io_op_defs[req->opcode].buffer_select)
6528 return -EOPNOTSUPP;
Pavel Begunkov3c199662021-06-15 16:47:57 +01006529 if (unlikely(sqe_flags & IOSQE_IO_DRAIN))
6530 ctx->drain_active = true;
Pavel Begunkovef4ff582020-04-12 02:05:05 +03006531
Jens Axboe003e8dc2021-03-06 09:22:27 -07006532 personality = READ_ONCE(sqe->personality);
6533 if (personality) {
Pavel Begunkovc10d1f92021-06-17 18:14:01 +01006534 req->creds = xa_load(&ctx->personalities, personality);
6535 if (!req->creds)
Jens Axboe003e8dc2021-03-06 09:22:27 -07006536 return -EINVAL;
Pavel Begunkovc10d1f92021-06-17 18:14:01 +01006537 get_cred(req->creds);
Pavel Begunkovb8e64b52021-06-17 18:14:02 +01006538 req->flags |= REQ_F_CREDS;
Jens Axboe003e8dc2021-03-06 09:22:27 -07006539 }
Pavel Begunkov258b29a2021-02-10 00:03:10 +00006540 state = &ctx->submit_state;
Pavel Begunkovef4ff582020-04-12 02:05:05 +03006541
Jens Axboe27926b62020-10-28 09:33:23 -06006542 /*
6543 * Plug now if we have more than 1 IO left after this, and the target
6544 * is potentially a read/write to block based storage.
6545 */
6546 if (!state->plug_started && state->ios_left > 1 &&
6547 io_op_defs[req->opcode].plug) {
6548 blk_start_plug(&state->plug);
6549 state->plug_started = true;
6550 }
Jens Axboe63ff8222020-05-07 14:56:15 -06006551
Pavel Begunkovbd5bbda2020-11-20 15:50:51 +00006552 if (io_op_defs[req->opcode].needs_file) {
6553 bool fixed = req->flags & REQ_F_FIXED_FILE;
Jens Axboe63ff8222020-05-07 14:56:15 -06006554
Pavel Begunkovbd5bbda2020-11-20 15:50:51 +00006555 req->file = io_file_get(state, req, READ_ONCE(sqe->fd), fixed);
Pavel Begunkovba13e232021-02-01 18:59:52 +00006556 if (unlikely(!req->file))
Pavel Begunkovbd5bbda2020-11-20 15:50:51 +00006557 ret = -EBADF;
6558 }
6559
Pavel Begunkov71b547c2020-10-10 18:34:09 +01006560 state->ios_left--;
6561 return ret;
Pavel Begunkov0553b8b2020-04-08 08:58:45 +03006562}
6563
Pavel Begunkova6b8cadc2021-02-18 18:29:41 +00006564static int io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
Pavel Begunkova1ab7b32021-02-18 18:29:42 +00006565 const struct io_uring_sqe *sqe)
Jens Axboe6c271ce2019-01-10 11:22:30 -07006566{
Pavel Begunkova1ab7b32021-02-18 18:29:42 +00006567 struct io_submit_link *link = &ctx->submit_state.link;
Jens Axboe6c271ce2019-01-10 11:22:30 -07006568 int ret;
6569
Pavel Begunkova6b8cadc2021-02-18 18:29:41 +00006570 ret = io_init_req(ctx, req, sqe);
6571 if (unlikely(ret)) {
6572fail_req:
Pavel Begunkovde59bc12021-02-18 18:29:47 +00006573 if (link->head) {
6574 /* fail even hard links since we don't submit */
Pavel Begunkov93d2bcd2021-05-16 22:58:05 +01006575 req_set_fail(link->head);
Pavel Begunkovf41db2732021-02-28 22:35:12 +00006576 io_req_complete_failed(link->head, -ECANCELED);
Pavel Begunkovde59bc12021-02-18 18:29:47 +00006577 link->head = NULL;
6578 }
Pavel Begunkovf41db2732021-02-28 22:35:12 +00006579 io_req_complete_failed(req, ret);
Pavel Begunkova6b8cadc2021-02-18 18:29:41 +00006580 return ret;
6581 }
Pavel Begunkov441b8a72021-06-14 23:37:31 +01006582
Pavel Begunkovbe7053b2021-02-18 18:29:45 +00006583 ret = io_req_prep(req, sqe);
6584 if (unlikely(ret))
6585 goto fail_req;
Pavel Begunkova6b8cadc2021-02-18 18:29:41 +00006586
Pavel Begunkovbe7053b2021-02-18 18:29:45 +00006587 /* don't need @sqe from now on */
Olivier Langlois236daeae2021-05-31 02:36:37 -04006588 trace_io_uring_submit_sqe(ctx, req, req->opcode, req->user_data,
6589 req->flags, true,
6590 ctx->flags & IORING_SETUP_SQPOLL);
Pavel Begunkova6b8cadc2021-02-18 18:29:41 +00006591
Jens Axboe6c271ce2019-01-10 11:22:30 -07006592 /*
6593 * If we already have a head request, queue this one for async
6594 * submittal once the head completes. If we don't have a head but
6595 * IOSQE_IO_LINK is set in the sqe, start a new head. This one will be
6596 * submitted sync once the chain is complete. If none of those
6597 * conditions are true (normal request), then just queue it.
6598 */
6599 if (link->head) {
6600 struct io_kiocb *head = link->head;
6601
Pavel Begunkovb7e298d2021-02-28 22:35:19 +00006602 ret = io_req_prep_async(req);
Pavel Begunkovcf109602021-02-18 18:29:43 +00006603 if (unlikely(ret))
Pavel Begunkova6b8cadc2021-02-18 18:29:41 +00006604 goto fail_req;
Jens Axboe6c271ce2019-01-10 11:22:30 -07006605 trace_io_uring_link(ctx, req, head);
6606 link->last->link = req;
6607 link->last = req;
6608
6609 /* last request of a link, enqueue the link */
6610 if (!(req->flags & (REQ_F_LINK | REQ_F_HARDLINK))) {
6611 link->head = NULL;
Pavel Begunkov5e159202021-06-14 23:37:26 +01006612 io_queue_sqe(head);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006613 }
Jackie Liu4fe2c962019-09-09 20:50:40 +08006614 } else {
Jens Axboe2b188cc2019-01-07 10:46:33 -07006615 if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) {
Jackie Liu4fe2c962019-09-09 20:50:40 +08006616 link->head = req;
6617 link->last = req;
6618 } else {
Pavel Begunkovbe7053b2021-02-18 18:29:45 +00006619 io_queue_sqe(req);
Jackie Liu4fe2c962019-09-09 20:50:40 +08006620 }
6621 }
6622
6623 return 0;
6624}
6625
6626/*
6627 * Batched submission is done, ensure local IO is flushed out.
6628 */
6629static void io_submit_state_end(struct io_submit_state *state,
6630 struct io_ring_ctx *ctx)
Pavel Begunkov1b4a51b2019-11-21 11:54:28 +03006631{
Pavel Begunkova1ab7b32021-02-18 18:29:42 +00006632 if (state->link.head)
Pavel Begunkovde59bc12021-02-18 18:29:47 +00006633 io_queue_sqe(state->link.head);
Jens Axboe3529d8c2019-12-19 18:24:38 -07006634 if (state->comp.nr)
Pavel Begunkov2a2758f2021-06-17 18:14:00 +01006635 io_submit_flush_completions(ctx);
Jackie Liua197f662019-11-08 08:09:12 -07006636 if (state->plug_started)
Pavel Begunkov32fe5252019-12-17 22:26:58 +03006637 blk_finish_plug(&state->plug);
Jens Axboe75c6a032020-01-28 10:15:23 -07006638 io_state_file_put(state);
Jens Axboe9e645e112019-05-10 16:07:28 -06006639}
Pavel Begunkov32fe5252019-12-17 22:26:58 +03006640
Jens Axboe9e645e112019-05-10 16:07:28 -06006641/*
6642 * Start submission side cache.
Pavel Begunkov32fe5252019-12-17 22:26:58 +03006643 */
Jens Axboe9e645e112019-05-10 16:07:28 -06006644static void io_submit_state_start(struct io_submit_state *state,
Pavel Begunkov196be952019-11-07 01:41:06 +03006645 unsigned int max_ios)
Jens Axboe9e645e112019-05-10 16:07:28 -06006646{
6647 state->plug_started = false;
Jens Axboebcda7ba2020-02-23 16:42:51 -07006648 state->ios_left = max_ios;
Pavel Begunkova1ab7b32021-02-18 18:29:42 +00006649 /* set only head, no need to init link_last in advance */
6650 state->link.head = NULL;
Jens Axboe75c6a032020-01-28 10:15:23 -07006651}
6652
Jens Axboe193155c2020-02-22 23:22:19 -07006653static void io_commit_sqring(struct io_ring_ctx *ctx)
6654{
Jens Axboe75c6a032020-01-28 10:15:23 -07006655 struct io_rings *rings = ctx->rings;
6656
6657 /*
Jens Axboe193155c2020-02-22 23:22:19 -07006658 * Ensure any loads from the SQEs are done at this point,
Jens Axboe75c6a032020-01-28 10:15:23 -07006659 * since once we write the new head, the application could
6660 * write new data to them.
Pavel Begunkov6b47ee62020-01-18 20:22:41 +03006661 */
Pavel Begunkov8da11c12020-02-24 11:32:44 +03006662 smp_store_release(&rings->sq.head, ctx->cached_sq_head);
Jens Axboebcda7ba2020-02-23 16:42:51 -07006663}
6664
Jens Axboe9e645e112019-05-10 16:07:28 -06006665/*
Fam Zhengdd9ae8a2021-06-04 17:42:56 +01006666 * Fetch an sqe, if one is available. Note this returns a pointer to memory
Jens Axboe9e645e112019-05-10 16:07:28 -06006667 * that is mapped by userspace. This means that care needs to be taken to
6668 * ensure that reads are stable, as we cannot rely on userspace always
Jens Axboe78e19bb2019-11-06 15:21:34 -07006669 * being a good citizen. If members of the sqe are validated and then later
6670 * used, it's important that those reads are done through READ_ONCE() to
Pavel Begunkov2e6e1fd2019-12-05 16:15:45 +03006671 * prevent a re-load down the line.
Jens Axboe9e645e112019-05-10 16:07:28 -06006672 */
6673static const struct io_uring_sqe *io_get_sqe(struct io_ring_ctx *ctx)
Jens Axboe9e645e112019-05-10 16:07:28 -06006674{
Pavel Begunkovea5ab3b2021-05-16 22:58:09 +01006675 unsigned head, mask = ctx->sq_entries - 1;
Pavel Begunkov17d3aeb2021-06-14 23:37:23 +01006676 unsigned sq_idx = ctx->cached_sq_head++ & mask;
Jens Axboe9e645e112019-05-10 16:07:28 -06006677
6678 /*
6679 * The cached sq head (or cq tail) serves two purposes:
6680 *
6681 * 1) allows us to batch the cost of updating the user visible
Pavel Begunkov9d763772019-12-17 02:22:07 +03006682 * head updates.
Jens Axboe9e645e112019-05-10 16:07:28 -06006683 * 2) allows the kernel side to track the head on its own, even
Pavel Begunkov8cdf2192020-01-25 00:40:24 +03006684 * though the application is the one updating it.
6685 */
Pavel Begunkov17d3aeb2021-06-14 23:37:23 +01006686 head = READ_ONCE(ctx->sq_array[sq_idx]);
Pavel Begunkov8cdf2192020-01-25 00:40:24 +03006687 if (likely(head < ctx->sq_entries))
6688 return &ctx->sq_sqes[head];
6689
6690 /* drop invalid entries */
Pavel Begunkov15641e42021-06-14 23:37:24 +01006691 ctx->cq_extra--;
6692 WRITE_ONCE(ctx->rings->sq_dropped,
6693 READ_ONCE(ctx->rings->sq_dropped) + 1);
Pavel Begunkov711be032020-01-17 03:57:59 +03006694 return NULL;
6695}
Jens Axboeb7bb4f72019-12-15 22:13:43 -07006696
Jens Axboe0f212202020-09-13 13:09:39 -06006697static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr)
Jens Axboe6c271ce2019-01-10 11:22:30 -07006698{
Pavel Begunkov09899b12021-06-14 02:36:22 +01006699 struct io_uring_task *tctx;
Pavel Begunkov46c4e162021-02-18 18:29:37 +00006700 int submitted = 0;
Jens Axboe6c271ce2019-01-10 11:22:30 -07006701
Pavel Begunkovee7d46d2019-12-30 21:24:45 +03006702 /* make sure SQ entry isn't read before tail */
6703 nr = min3(nr, ctx->sq_entries, io_sqring_entries(ctx));
Pavel Begunkov2b85edf2019-12-28 14:13:03 +03006704 if (!percpu_ref_tryget_many(&ctx->refs, nr))
6705 return -EAGAIN;
Jens Axboe6c271ce2019-01-10 11:22:30 -07006706
Pavel Begunkov09899b12021-06-14 02:36:22 +01006707 tctx = current->io_uring;
6708 tctx->cached_refs -= nr;
6709 if (unlikely(tctx->cached_refs < 0)) {
6710 unsigned int refill = -tctx->cached_refs + IO_TCTX_REFS_CACHE_NR;
6711
6712 percpu_counter_add(&tctx->inflight, refill);
6713 refcount_add(refill, &current->usage);
6714 tctx->cached_refs += refill;
6715 }
Pavel Begunkovba88ff12021-02-10 00:03:11 +00006716 io_submit_state_start(&ctx->submit_state, nr);
Pavel Begunkovb14cca02020-01-17 04:45:59 +03006717
Pavel Begunkov46c4e162021-02-18 18:29:37 +00006718 while (submitted < nr) {
Jens Axboe3529d8c2019-12-19 18:24:38 -07006719 const struct io_uring_sqe *sqe;
Pavel Begunkov196be952019-11-07 01:41:06 +03006720 struct io_kiocb *req;
Pavel Begunkovfb5ccc92019-10-25 12:31:30 +03006721
Pavel Begunkov258b29a2021-02-10 00:03:10 +00006722 req = io_alloc_req(ctx);
Pavel Begunkov196be952019-11-07 01:41:06 +03006723 if (unlikely(!req)) {
6724 if (!submitted)
6725 submitted = -EAGAIN;
Pavel Begunkovfb5ccc92019-10-25 12:31:30 +03006726 break;
Jens Axboe9e645e112019-05-10 16:07:28 -06006727 }
Pavel Begunkov4fccfcb2021-02-12 11:55:17 +00006728 sqe = io_get_sqe(ctx);
6729 if (unlikely(!sqe)) {
6730 kmem_cache_free(req_cachep, req);
6731 break;
6732 }
Jens Axboed3656342019-12-18 09:50:26 -07006733 /* will complete beyond this point, count as submitted */
6734 submitted++;
Pavel Begunkova1ab7b32021-02-18 18:29:42 +00006735 if (io_submit_sqe(ctx, req, sqe))
Jens Axboed3656342019-12-18 09:50:26 -07006736 break;
Jens Axboe6c271ce2019-01-10 11:22:30 -07006737 }
6738
Pavel Begunkov9466f432020-01-25 22:34:01 +03006739 if (unlikely(submitted != nr)) {
6740 int ref_used = (submitted == -EAGAIN) ? 0 : submitted;
Jens Axboed8a6df12020-10-15 16:24:45 -06006741 int unused = nr - ref_used;
Pavel Begunkov9466f432020-01-25 22:34:01 +03006742
Pavel Begunkov09899b12021-06-14 02:36:22 +01006743 current->io_uring->cached_refs += unused;
Jens Axboed8a6df12020-10-15 16:24:45 -06006744 percpu_ref_put_many(&ctx->refs, unused);
Pavel Begunkov9466f432020-01-25 22:34:01 +03006745 }
Jens Axboe6c271ce2019-01-10 11:22:30 -07006746
Pavel Begunkova1ab7b32021-02-18 18:29:42 +00006747 io_submit_state_end(&ctx->submit_state, ctx);
Pavel Begunkovae9428c2019-11-06 00:22:14 +03006748 /* Commit SQ ring head once we've consumed and submitted all SQEs */
6749 io_commit_sqring(ctx);
6750
Jens Axboe6c271ce2019-01-10 11:22:30 -07006751 return submitted;
6752}
6753
Pavel Begunkove4b6d902021-05-16 22:58:00 +01006754static inline bool io_sqd_events_pending(struct io_sq_data *sqd)
6755{
6756 return READ_ONCE(sqd->state);
6757}
6758
Xiaoguang Wang23b36282020-07-23 20:57:24 +08006759static inline void io_ring_set_wakeup_flag(struct io_ring_ctx *ctx)
6760{
6761 /* Tell userspace we may need a wakeup call */
6762 spin_lock_irq(&ctx->completion_lock);
6763 ctx->rings->sq_flags |= IORING_SQ_NEED_WAKEUP;
6764 spin_unlock_irq(&ctx->completion_lock);
6765}
6766
6767static inline void io_ring_clear_wakeup_flag(struct io_ring_ctx *ctx)
6768{
6769 spin_lock_irq(&ctx->completion_lock);
6770 ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP;
6771 spin_unlock_irq(&ctx->completion_lock);
6772}
6773
Xiaoguang Wang08369242020-11-03 14:15:59 +08006774static int __io_sq_thread(struct io_ring_ctx *ctx, bool cap_entries)
Jens Axboe6c271ce2019-01-10 11:22:30 -07006775{
Jens Axboec8d1ba52020-09-14 11:07:26 -06006776 unsigned int to_submit;
Xiaoguang Wangbdcd3ea2020-02-25 22:12:08 +08006777 int ret = 0;
Jens Axboe6c271ce2019-01-10 11:22:30 -07006778
Jens Axboec8d1ba52020-09-14 11:07:26 -06006779 to_submit = io_sqring_entries(ctx);
Jens Axboee95eee22020-09-08 09:11:32 -06006780 /* if we're handling multiple rings, cap submit size for fairness */
Olivier Langlois4ce8ad92021-06-23 11:50:18 -07006781 if (cap_entries && to_submit > IORING_SQPOLL_CAP_ENTRIES_VALUE)
6782 to_submit = IORING_SQPOLL_CAP_ENTRIES_VALUE;
Jens Axboee95eee22020-09-08 09:11:32 -06006783
Xiaoguang Wang906a3c62020-11-12 14:56:00 +08006784 if (!list_empty(&ctx->iopoll_list) || to_submit) {
6785 unsigned nr_events = 0;
Pavel Begunkov948e1942021-06-24 15:09:55 +01006786 const struct cred *creds = NULL;
6787
6788 if (ctx->sq_creds != current_cred())
6789 creds = override_creds(ctx->sq_creds);
Xiaoguang Wang906a3c62020-11-12 14:56:00 +08006790
Xiaoguang Wang08369242020-11-03 14:15:59 +08006791 mutex_lock(&ctx->uring_lock);
Xiaoguang Wang906a3c62020-11-12 14:56:00 +08006792 if (!list_empty(&ctx->iopoll_list))
6793 io_do_iopoll(ctx, &nr_events, 0);
6794
Pavel Begunkov3b763ba2021-04-18 14:52:08 +01006795 /*
6796 * Don't submit if refs are dying, good for io_uring_register(),
6797 * but also it is relied upon by io_ring_exit_work()
6798 */
Pavel Begunkov0298ef92021-03-08 13:20:57 +00006799 if (to_submit && likely(!percpu_ref_is_dying(&ctx->refs)) &&
6800 !(ctx->flags & IORING_SETUP_R_DISABLED))
Xiaoguang Wang08369242020-11-03 14:15:59 +08006801 ret = io_submit_sqes(ctx, to_submit);
6802 mutex_unlock(&ctx->uring_lock);
Jens Axboe90554202020-09-03 12:12:41 -06006803
Pavel Begunkovacfb3812021-05-16 22:58:03 +01006804 if (to_submit && wq_has_sleeper(&ctx->sqo_sq_wait))
6805 wake_up(&ctx->sqo_sq_wait);
Pavel Begunkov948e1942021-06-24 15:09:55 +01006806 if (creds)
6807 revert_creds(creds);
Pavel Begunkovacfb3812021-05-16 22:58:03 +01006808 }
Jens Axboe90554202020-09-03 12:12:41 -06006809
Xiaoguang Wang08369242020-11-03 14:15:59 +08006810 return ret;
6811}
6812
6813static void io_sqd_update_thread_idle(struct io_sq_data *sqd)
6814{
6815 struct io_ring_ctx *ctx;
6816 unsigned sq_thread_idle = 0;
6817
Pavel Begunkovc9dca272021-03-10 13:13:55 +00006818 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
6819 sq_thread_idle = max(sq_thread_idle, ctx->sq_thread_idle);
Xiaoguang Wang08369242020-11-03 14:15:59 +08006820 sqd->sq_thread_idle = sq_thread_idle;
Jens Axboec8d1ba52020-09-14 11:07:26 -06006821}
6822
Pavel Begunkove4b6d902021-05-16 22:58:00 +01006823static bool io_sqd_handle_event(struct io_sq_data *sqd)
6824{
6825 bool did_sig = false;
6826 struct ksignal ksig;
6827
6828 if (test_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state) ||
6829 signal_pending(current)) {
6830 mutex_unlock(&sqd->lock);
6831 if (signal_pending(current))
6832 did_sig = get_signal(&ksig);
6833 cond_resched();
6834 mutex_lock(&sqd->lock);
6835 }
Pavel Begunkove4b6d902021-05-16 22:58:00 +01006836 return did_sig || test_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state);
6837}
6838
Jens Axboe6c271ce2019-01-10 11:22:30 -07006839static int io_sq_thread(void *data)
6840{
Jens Axboe69fb2132020-09-14 11:16:23 -06006841 struct io_sq_data *sqd = data;
6842 struct io_ring_ctx *ctx;
Xiaoguang Wanga0d92052020-11-12 14:55:59 +08006843 unsigned long timeout = 0;
Jens Axboe37d1e2e2021-02-17 21:03:43 -07006844 char buf[TASK_COMM_LEN];
Xiaoguang Wang08369242020-11-03 14:15:59 +08006845 DEFINE_WAIT(wait);
Jens Axboe6c271ce2019-01-10 11:22:30 -07006846
Pavel Begunkov696ee882021-04-01 09:55:04 +01006847 snprintf(buf, sizeof(buf), "iou-sqp-%d", sqd->task_pid);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07006848 set_task_comm(current, buf);
Jens Axboe28cea78a2020-09-14 10:51:17 -06006849
Jens Axboe37d1e2e2021-02-17 21:03:43 -07006850 if (sqd->sq_cpu != -1)
6851 set_cpus_allowed_ptr(current, cpumask_of(sqd->sq_cpu));
6852 else
6853 set_cpus_allowed_ptr(current, cpu_online_mask);
6854 current->flags |= PF_NO_SETAFFINITY;
6855
Pavel Begunkov09a6f4e2021-03-14 20:57:10 +00006856 mutex_lock(&sqd->lock);
Pavel Begunkove4b6d902021-05-16 22:58:00 +01006857 while (1) {
Pavel Begunkov1a924a82021-06-24 15:09:56 +01006858 bool cap_entries, sqt_spin = false;
Jens Axboec1edbf52019-11-10 16:56:04 -07006859
Pavel Begunkove4b6d902021-05-16 22:58:00 +01006860 if (io_sqd_events_pending(sqd) || signal_pending(current)) {
6861 if (io_sqd_handle_event(sqd))
Pavel Begunkovc7d95612021-04-13 11:43:00 +01006862 break;
Xiaoguang Wang08369242020-11-03 14:15:59 +08006863 timeout = jiffies + sqd->sq_thread_idle;
6864 }
Pavel Begunkove4b6d902021-05-16 22:58:00 +01006865
Jens Axboee95eee22020-09-08 09:11:32 -06006866 cap_entries = !list_is_singular(&sqd->ctx_list);
Jens Axboe69fb2132020-09-14 11:16:23 -06006867 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) {
Pavel Begunkov948e1942021-06-24 15:09:55 +01006868 int ret = __io_sq_thread(ctx, cap_entries);
Stefan Metzmacher7c30f36a2021-03-07 11:54:28 +01006869
Xiaoguang Wang08369242020-11-03 14:15:59 +08006870 if (!sqt_spin && (ret > 0 || !list_empty(&ctx->iopoll_list)))
6871 sqt_spin = true;
Jens Axboe6c271ce2019-01-10 11:22:30 -07006872 }
Pavel Begunkovdd432ea52021-06-26 21:40:45 +01006873 if (io_run_task_work())
6874 sqt_spin = true;
Jens Axboe6c271ce2019-01-10 11:22:30 -07006875
Xiaoguang Wang08369242020-11-03 14:15:59 +08006876 if (sqt_spin || !time_after(jiffies, timeout)) {
Jens Axboec8d1ba52020-09-14 11:07:26 -06006877 cond_resched();
Xiaoguang Wang08369242020-11-03 14:15:59 +08006878 if (sqt_spin)
6879 timeout = jiffies + sqd->sq_thread_idle;
6880 continue;
6881 }
6882
Xiaoguang Wang08369242020-11-03 14:15:59 +08006883 prepare_to_wait(&sqd->wait, &wait, TASK_INTERRUPTIBLE);
Pavel Begunkovdd432ea52021-06-26 21:40:45 +01006884 if (!io_sqd_events_pending(sqd) && !current->task_works) {
Pavel Begunkov1a924a82021-06-24 15:09:56 +01006885 bool needs_sched = true;
6886
Hao Xu724cb4f2021-04-21 23:19:11 +08006887 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) {
Pavel Begunkovaaa9f0f2021-05-16 22:58:01 +01006888 io_ring_set_wakeup_flag(ctx);
6889
Hao Xu724cb4f2021-04-21 23:19:11 +08006890 if ((ctx->flags & IORING_SETUP_IOPOLL) &&
6891 !list_empty_careful(&ctx->iopoll_list)) {
6892 needs_sched = false;
6893 break;
6894 }
6895 if (io_sqring_entries(ctx)) {
6896 needs_sched = false;
6897 break;
6898 }
6899 }
6900
6901 if (needs_sched) {
6902 mutex_unlock(&sqd->lock);
6903 schedule();
6904 mutex_lock(&sqd->lock);
6905 }
Jens Axboe69fb2132020-09-14 11:16:23 -06006906 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
6907 io_ring_clear_wakeup_flag(ctx);
Jens Axboe6c271ce2019-01-10 11:22:30 -07006908 }
Xiaoguang Wang08369242020-11-03 14:15:59 +08006909
6910 finish_wait(&sqd->wait, &wait);
6911 timeout = jiffies + sqd->sq_thread_idle;
Jens Axboe6c271ce2019-01-10 11:22:30 -07006912 }
6913
Pavel Begunkov78cc6872021-06-14 02:36:23 +01006914 io_uring_cancel_generic(true, sqd);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07006915 sqd->thread = NULL;
Jens Axboe05962f92021-03-06 13:58:48 -07006916 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
Jens Axboe5f3f26f2021-02-25 10:17:46 -07006917 io_ring_set_wakeup_flag(ctx);
Pavel Begunkov521d6a72021-03-11 23:29:38 +00006918 io_run_task_work();
Pavel Begunkov734551d2021-04-18 14:52:09 +01006919 mutex_unlock(&sqd->lock);
6920
Jens Axboe37d1e2e2021-02-17 21:03:43 -07006921 complete(&sqd->exited);
6922 do_exit(0);
Jens Axboe6c271ce2019-01-10 11:22:30 -07006923}
6924
Jens Axboebda52162019-09-24 13:47:15 -06006925struct io_wait_queue {
6926 struct wait_queue_entry wq;
6927 struct io_ring_ctx *ctx;
6928 unsigned to_wait;
6929 unsigned nr_timeouts;
6930};
6931
Pavel Begunkov6c503152021-01-04 20:36:36 +00006932static inline bool io_should_wake(struct io_wait_queue *iowq)
Jens Axboebda52162019-09-24 13:47:15 -06006933{
6934 struct io_ring_ctx *ctx = iowq->ctx;
6935
6936 /*
Brian Gianforcarod195a662019-12-13 03:09:50 -08006937 * Wake up if we have enough events, or if a timeout occurred since we
Jens Axboebda52162019-09-24 13:47:15 -06006938 * started waiting. For timeouts, we always want to return to userspace,
6939 * regardless of event count.
6940 */
Pavel Begunkov6c503152021-01-04 20:36:36 +00006941 return io_cqring_events(ctx) >= iowq->to_wait ||
Jens Axboebda52162019-09-24 13:47:15 -06006942 atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts;
6943}
6944
6945static int io_wake_function(struct wait_queue_entry *curr, unsigned int mode,
6946 int wake_flags, void *key)
6947{
6948 struct io_wait_queue *iowq = container_of(curr, struct io_wait_queue,
6949 wq);
6950
Pavel Begunkov6c503152021-01-04 20:36:36 +00006951 /*
6952 * Cannot safely flush overflowed CQEs from here, ensure we wake up
6953 * the task, and the next invocation will do it.
6954 */
Pavel Begunkov5ed7a372021-06-14 23:37:27 +01006955 if (io_should_wake(iowq) || test_bit(0, &iowq->ctx->check_cq_overflow))
Pavel Begunkov6c503152021-01-04 20:36:36 +00006956 return autoremove_wake_function(curr, mode, wake_flags, key);
6957 return -1;
Jens Axboebda52162019-09-24 13:47:15 -06006958}
6959
Jens Axboeaf9c1a42020-09-24 13:32:18 -06006960static int io_run_task_work_sig(void)
6961{
6962 if (io_run_task_work())
6963 return 1;
6964 if (!signal_pending(current))
6965 return 0;
Jens Axboe0b8cfa92021-03-21 14:16:08 -06006966 if (test_thread_flag(TIF_NOTIFY_SIGNAL))
Jens Axboe792ee0f62020-10-22 20:17:18 -06006967 return -ERESTARTSYS;
Jens Axboeaf9c1a42020-09-24 13:32:18 -06006968 return -EINTR;
6969}
6970
Pavel Begunkoveeb60b92021-02-04 13:51:58 +00006971/* when returns >0, the caller should retry */
6972static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
6973 struct io_wait_queue *iowq,
6974 signed long *timeout)
6975{
6976 int ret;
6977
6978 /* make sure we run task_work before checking for signals */
6979 ret = io_run_task_work_sig();
6980 if (ret || io_should_wake(iowq))
6981 return ret;
6982 /* let the caller flush overflows, retry */
Pavel Begunkov5ed7a372021-06-14 23:37:27 +01006983 if (test_bit(0, &ctx->check_cq_overflow))
Pavel Begunkoveeb60b92021-02-04 13:51:58 +00006984 return 1;
6985
6986 *timeout = schedule_timeout(*timeout);
6987 return !*timeout ? -ETIME : 1;
6988}
6989
Jens Axboe2b188cc2019-01-07 10:46:33 -07006990/*
6991 * Wait until events become available, if we don't already have some. The
6992 * application must reap them itself, as they reside on the shared cq ring.
6993 */
6994static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
Hao Xuc73ebb62020-11-03 10:54:37 +08006995 const sigset_t __user *sig, size_t sigsz,
6996 struct __kernel_timespec __user *uts)
Jens Axboe2b188cc2019-01-07 10:46:33 -07006997{
Jens Axboebda52162019-09-24 13:47:15 -06006998 struct io_wait_queue iowq = {
6999 .wq = {
7000 .private = current,
7001 .func = io_wake_function,
7002 .entry = LIST_HEAD_INIT(iowq.wq.entry),
7003 },
7004 .ctx = ctx,
7005 .to_wait = min_events,
7006 };
Hristo Venev75b28af2019-08-26 17:23:46 +00007007 struct io_rings *rings = ctx->rings;
Pavel Begunkovc1d5a222021-02-04 13:51:57 +00007008 signed long timeout = MAX_SCHEDULE_TIMEOUT;
7009 int ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07007010
Jens Axboeb41e9852020-02-17 09:52:41 -07007011 do {
Pavel Begunkov6c2450a2021-02-23 12:40:22 +00007012 io_cqring_overflow_flush(ctx, false);
Pavel Begunkov6c503152021-01-04 20:36:36 +00007013 if (io_cqring_events(ctx) >= min_events)
Jens Axboeb41e9852020-02-17 09:52:41 -07007014 return 0;
Jens Axboe4c6e2772020-07-01 11:29:10 -06007015 if (!io_run_task_work())
Jens Axboeb41e9852020-02-17 09:52:41 -07007016 break;
Jens Axboeb41e9852020-02-17 09:52:41 -07007017 } while (1);
Jens Axboe2b188cc2019-01-07 10:46:33 -07007018
7019 if (sig) {
Arnd Bergmann9e75ad52019-03-25 15:34:53 +01007020#ifdef CONFIG_COMPAT
7021 if (in_compat_syscall())
7022 ret = set_compat_user_sigmask((const compat_sigset_t __user *)sig,
Oleg Nesterovb7724342019-07-16 16:29:53 -07007023 sigsz);
Arnd Bergmann9e75ad52019-03-25 15:34:53 +01007024 else
7025#endif
Oleg Nesterovb7724342019-07-16 16:29:53 -07007026 ret = set_user_sigmask(sig, sigsz);
Arnd Bergmann9e75ad52019-03-25 15:34:53 +01007027
Jens Axboe2b188cc2019-01-07 10:46:33 -07007028 if (ret)
7029 return ret;
7030 }
7031
Hao Xuc73ebb62020-11-03 10:54:37 +08007032 if (uts) {
Pavel Begunkovc1d5a222021-02-04 13:51:57 +00007033 struct timespec64 ts;
7034
Hao Xuc73ebb62020-11-03 10:54:37 +08007035 if (get_timespec64(&ts, uts))
7036 return -EFAULT;
7037 timeout = timespec64_to_jiffies(&ts);
7038 }
7039
Jens Axboebda52162019-09-24 13:47:15 -06007040 iowq.nr_timeouts = atomic_read(&ctx->cq_timeouts);
Dmitrii Dolgovc826bd72019-10-15 19:02:01 +02007041 trace_io_uring_cqring_wait(ctx, min_events);
Jens Axboebda52162019-09-24 13:47:15 -06007042 do {
Jens Axboeca0a2652021-03-04 17:15:48 -07007043 /* if we can't even flush overflow, don't wait for more */
Pavel Begunkov6c2450a2021-02-23 12:40:22 +00007044 if (!io_cqring_overflow_flush(ctx, false)) {
Jens Axboeca0a2652021-03-04 17:15:48 -07007045 ret = -EBUSY;
7046 break;
7047 }
Pavel Begunkov311997b2021-06-14 23:37:28 +01007048 prepare_to_wait_exclusive(&ctx->cq_wait, &iowq.wq,
Jens Axboebda52162019-09-24 13:47:15 -06007049 TASK_INTERRUPTIBLE);
Pavel Begunkoveeb60b92021-02-04 13:51:58 +00007050 ret = io_cqring_wait_schedule(ctx, &iowq, &timeout);
Pavel Begunkov311997b2021-06-14 23:37:28 +01007051 finish_wait(&ctx->cq_wait, &iowq.wq);
Jens Axboeca0a2652021-03-04 17:15:48 -07007052 cond_resched();
Pavel Begunkoveeb60b92021-02-04 13:51:58 +00007053 } while (ret > 0);
Jens Axboebda52162019-09-24 13:47:15 -06007054
Jens Axboeb7db41c2020-07-04 08:55:50 -06007055 restore_saved_sigmask_unless(ret == -EINTR);
Jens Axboe2b188cc2019-01-07 10:46:33 -07007056
Hristo Venev75b28af2019-08-26 17:23:46 +00007057 return READ_ONCE(rings->cq.head) == READ_ONCE(rings->cq.tail) ? ret : 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07007058}
7059
Pavel Begunkov9123c8f2021-06-14 02:36:20 +01007060static void io_free_page_table(void **table, size_t size)
Pavel Begunkov846a4ef2021-04-01 15:44:03 +01007061{
Pavel Begunkov9123c8f2021-06-14 02:36:20 +01007062 unsigned i, nr_tables = DIV_ROUND_UP(size, PAGE_SIZE);
Pavel Begunkov846a4ef2021-04-01 15:44:03 +01007063
7064 for (i = 0; i < nr_tables; i++)
Pavel Begunkov9123c8f2021-06-14 02:36:20 +01007065 kfree(table[i]);
7066 kfree(table);
7067}
7068
7069static void **io_alloc_page_table(size_t size)
7070{
7071 unsigned i, nr_tables = DIV_ROUND_UP(size, PAGE_SIZE);
7072 size_t init_size = size;
7073 void **table;
7074
7075 table = kcalloc(nr_tables, sizeof(*table), GFP_KERNEL);
7076 if (!table)
7077 return NULL;
7078
7079 for (i = 0; i < nr_tables; i++) {
Pavel Begunkov27f6b312021-06-15 13:20:13 +01007080 unsigned int this_size = min_t(size_t, size, PAGE_SIZE);
Pavel Begunkov9123c8f2021-06-14 02:36:20 +01007081
7082 table[i] = kzalloc(this_size, GFP_KERNEL);
7083 if (!table[i]) {
7084 io_free_page_table(table, init_size);
7085 return NULL;
7086 }
7087 size -= this_size;
7088 }
7089 return table;
Pavel Begunkov846a4ef2021-04-01 15:44:03 +01007090}
7091
Bijan Mottahedeh2a63b2d2021-01-15 17:37:47 +00007092static inline void io_rsrc_ref_lock(struct io_ring_ctx *ctx)
Pavel Begunkov1642b442020-12-30 21:34:14 +00007093{
Bijan Mottahedeh2a63b2d2021-01-15 17:37:47 +00007094 spin_lock_bh(&ctx->rsrc_ref_lock);
Pavel Begunkov1642b442020-12-30 21:34:14 +00007095}
7096
Bijan Mottahedeh2a63b2d2021-01-15 17:37:47 +00007097static inline void io_rsrc_ref_unlock(struct io_ring_ctx *ctx)
Jens Axboe6b063142019-01-10 22:13:58 -07007098{
Bijan Mottahedeh2a63b2d2021-01-15 17:37:47 +00007099 spin_unlock_bh(&ctx->rsrc_ref_lock);
7100}
7101
Pavel Begunkov28a9fe22021-04-01 15:43:47 +01007102static void io_rsrc_node_destroy(struct io_rsrc_node *ref_node)
7103{
7104 percpu_ref_exit(&ref_node->refs);
7105 kfree(ref_node);
7106}
7107
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01007108static void io_rsrc_node_switch(struct io_ring_ctx *ctx,
7109 struct io_rsrc_data *data_to_kill)
Jens Axboe6b063142019-01-10 22:13:58 -07007110{
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01007111 WARN_ON_ONCE(!ctx->rsrc_backup_node);
7112 WARN_ON_ONCE(data_to_kill && !ctx->rsrc_node);
Pavel Begunkov82fbcfa2021-04-01 15:43:43 +01007113
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01007114 if (data_to_kill) {
7115 struct io_rsrc_node *rsrc_node = ctx->rsrc_node;
Pavel Begunkov82fbcfa2021-04-01 15:43:43 +01007116
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01007117 rsrc_node->rsrc_data = data_to_kill;
7118 io_rsrc_ref_lock(ctx);
7119 list_add_tail(&rsrc_node->node, &ctx->rsrc_ref_list);
7120 io_rsrc_ref_unlock(ctx);
Pavel Begunkov82fbcfa2021-04-01 15:43:43 +01007121
Pavel Begunkov3e942492021-04-11 01:46:34 +01007122 atomic_inc(&data_to_kill->refs);
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01007123 percpu_ref_kill(&rsrc_node->refs);
7124 ctx->rsrc_node = NULL;
7125 }
7126
7127 if (!ctx->rsrc_node) {
7128 ctx->rsrc_node = ctx->rsrc_backup_node;
7129 ctx->rsrc_backup_node = NULL;
7130 }
Jens Axboe6b063142019-01-10 22:13:58 -07007131}
7132
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01007133static int io_rsrc_node_switch_start(struct io_ring_ctx *ctx)
Pavel Begunkov8dd03af2021-03-19 17:22:36 +00007134{
7135 if (ctx->rsrc_backup_node)
7136 return 0;
Pavel Begunkovb895c9a2021-04-01 15:43:40 +01007137 ctx->rsrc_backup_node = io_rsrc_node_alloc(ctx);
Pavel Begunkov8dd03af2021-03-19 17:22:36 +00007138 return ctx->rsrc_backup_node ? 0 : -ENOMEM;
7139}
7140
Pavel Begunkov40ae0ff2021-04-01 15:43:44 +01007141static int io_rsrc_ref_quiesce(struct io_rsrc_data *data, struct io_ring_ctx *ctx)
Hao Xu8bad28d2021-02-19 17:19:36 +08007142{
7143 int ret;
Xiaoguang Wang05589552020-03-31 14:05:18 +08007144
Pavel Begunkov215c3902021-04-01 15:43:48 +01007145 /* As we may drop ->uring_lock, other task may have started quiesce */
Hao Xu8bad28d2021-02-19 17:19:36 +08007146 if (data->quiesce)
7147 return -ENXIO;
7148
7149 data->quiesce = true;
Pavel Begunkov1ffc5422020-12-30 21:34:15 +00007150 do {
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01007151 ret = io_rsrc_node_switch_start(ctx);
Pavel Begunkov8dd03af2021-03-19 17:22:36 +00007152 if (ret)
Pavel Begunkovf2303b12021-02-20 18:03:49 +00007153 break;
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01007154 io_rsrc_node_switch(ctx, data);
7155
Pavel Begunkov3e942492021-04-11 01:46:34 +01007156 /* kill initial ref, already quiesced if zero */
7157 if (atomic_dec_and_test(&data->refs))
7158 break;
Hao Xu8bad28d2021-02-19 17:19:36 +08007159 flush_delayed_work(&ctx->rsrc_put_work);
Pavel Begunkov1ffc5422020-12-30 21:34:15 +00007160 ret = wait_for_completion_interruptible(&data->done);
7161 if (!ret)
7162 break;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007163
Pavel Begunkov3e942492021-04-11 01:46:34 +01007164 atomic_inc(&data->refs);
7165 /* wait for all works potentially completing data->done */
7166 flush_delayed_work(&ctx->rsrc_put_work);
Jens Axboecb5e1b82021-02-25 07:37:35 -07007167 reinit_completion(&data->done);
Pavel Begunkov8dd03af2021-03-19 17:22:36 +00007168
Hao Xu8bad28d2021-02-19 17:19:36 +08007169 mutex_unlock(&ctx->uring_lock);
7170 ret = io_run_task_work_sig();
7171 mutex_lock(&ctx->uring_lock);
Pavel Begunkovf2303b12021-02-20 18:03:49 +00007172 } while (ret >= 0);
Hao Xu8bad28d2021-02-19 17:19:36 +08007173 data->quiesce = false;
7174
Hao Xu8bad28d2021-02-19 17:19:36 +08007175 return ret;
Bijan Mottahedehd7954b22021-01-15 17:37:50 +00007176}
7177
Pavel Begunkov2d091d62021-06-14 02:36:21 +01007178static u64 *io_get_tag_slot(struct io_rsrc_data *data, unsigned int idx)
7179{
7180 unsigned int off = idx & IO_RSRC_TAG_TABLE_MASK;
7181 unsigned int table_idx = idx >> IO_RSRC_TAG_TABLE_SHIFT;
7182
7183 return &data->tags[table_idx][off];
7184}
7185
Pavel Begunkov44b31f22021-04-25 14:32:16 +01007186static void io_rsrc_data_free(struct io_rsrc_data *data)
7187{
Pavel Begunkov2d091d62021-06-14 02:36:21 +01007188 size_t size = data->nr * sizeof(data->tags[0][0]);
7189
7190 if (data->tags)
7191 io_free_page_table((void **)data->tags, size);
Pavel Begunkov44b31f22021-04-25 14:32:16 +01007192 kfree(data);
7193}
7194
Pavel Begunkovd878c812021-06-14 02:36:18 +01007195static int io_rsrc_data_alloc(struct io_ring_ctx *ctx, rsrc_put_fn *do_put,
7196 u64 __user *utags, unsigned nr,
7197 struct io_rsrc_data **pdata)
Bijan Mottahedeh1ad555c2021-01-15 17:37:51 +00007198{
Pavel Begunkovb895c9a2021-04-01 15:43:40 +01007199 struct io_rsrc_data *data;
Pavel Begunkov2d091d62021-06-14 02:36:21 +01007200 int ret = -ENOMEM;
Pavel Begunkovd878c812021-06-14 02:36:18 +01007201 unsigned i;
Bijan Mottahedeh1ad555c2021-01-15 17:37:51 +00007202
7203 data = kzalloc(sizeof(*data), GFP_KERNEL);
7204 if (!data)
Pavel Begunkovd878c812021-06-14 02:36:18 +01007205 return -ENOMEM;
Pavel Begunkov2d091d62021-06-14 02:36:21 +01007206 data->tags = (u64 **)io_alloc_page_table(nr * sizeof(data->tags[0][0]));
Pavel Begunkovb60c8dc2021-04-25 14:32:18 +01007207 if (!data->tags) {
7208 kfree(data);
Pavel Begunkovd878c812021-06-14 02:36:18 +01007209 return -ENOMEM;
7210 }
Pavel Begunkov2d091d62021-06-14 02:36:21 +01007211
7212 data->nr = nr;
7213 data->ctx = ctx;
7214 data->do_put = do_put;
Pavel Begunkovd878c812021-06-14 02:36:18 +01007215 if (utags) {
Pavel Begunkov2d091d62021-06-14 02:36:21 +01007216 ret = -EFAULT;
Pavel Begunkovd878c812021-06-14 02:36:18 +01007217 for (i = 0; i < nr; i++) {
Colin Ian Kingfdd1dc32021-06-15 14:00:11 +01007218 u64 *tag_slot = io_get_tag_slot(data, i);
7219
7220 if (copy_from_user(tag_slot, &utags[i],
7221 sizeof(*tag_slot)))
Pavel Begunkov2d091d62021-06-14 02:36:21 +01007222 goto fail;
Pavel Begunkovd878c812021-06-14 02:36:18 +01007223 }
Pavel Begunkovb60c8dc2021-04-25 14:32:18 +01007224 }
7225
Pavel Begunkov3e942492021-04-11 01:46:34 +01007226 atomic_set(&data->refs, 1);
Bijan Mottahedeh1ad555c2021-01-15 17:37:51 +00007227 init_completion(&data->done);
Pavel Begunkovd878c812021-06-14 02:36:18 +01007228 *pdata = data;
7229 return 0;
Pavel Begunkov2d091d62021-06-14 02:36:21 +01007230fail:
7231 io_rsrc_data_free(data);
7232 return ret;
Bijan Mottahedeh1ad555c2021-01-15 17:37:51 +00007233}
7234
Pavel Begunkov9123c8f2021-06-14 02:36:20 +01007235static bool io_alloc_file_tables(struct io_file_table *table, unsigned nr_files)
7236{
7237 size_t size = nr_files * sizeof(struct io_fixed_file);
7238
7239 table->files = (struct io_fixed_file **)io_alloc_page_table(size);
7240 return !!table->files;
7241}
7242
7243static void io_free_file_tables(struct io_file_table *table, unsigned nr_files)
7244{
7245 size_t size = nr_files * sizeof(struct io_fixed_file);
7246
7247 io_free_page_table((void **)table->files, size);
7248 table->files = NULL;
7249}
7250
Roman Penyaev2bbcd6d2019-05-16 10:53:57 +02007251static void __io_sqe_files_unregister(struct io_ring_ctx *ctx)
7252{
Jens Axboe06058632019-04-13 09:26:03 -06007253#if defined(CONFIG_UNIX)
Jens Axboe6c271ce2019-01-10 11:22:30 -07007254 if (ctx->ring_sock) {
7255 struct sock *sock = ctx->ring_sock->sk;
7256 struct sk_buff *skb;
7257
7258 while ((skb = skb_dequeue(&sock->sk_receive_queue)) != NULL)
Jens Axboe6b063142019-01-10 22:13:58 -07007259 kfree_skb(skb);
7260 }
Jens Axboe6c271ce2019-01-10 11:22:30 -07007261#else
7262 int i;
Jens Axboe6b063142019-01-10 22:13:58 -07007263
7264 for (i = 0; i < ctx->nr_user_files; i++) {
7265 struct file *file;
7266
7267 file = io_file_from_index(ctx, i);
7268 if (file)
7269 fput(file);
7270 }
7271#endif
Pavel Begunkovfff4db72021-04-25 14:32:15 +01007272 io_free_file_tables(&ctx->file_table, ctx->nr_user_files);
Pavel Begunkov44b31f22021-04-25 14:32:16 +01007273 io_rsrc_data_free(ctx->file_data);
Pavel Begunkovfff4db72021-04-25 14:32:15 +01007274 ctx->file_data = NULL;
7275 ctx->nr_user_files = 0;
Bijan Mottahedeh1ad555c2021-01-15 17:37:51 +00007276}
7277
Bijan Mottahedehd7954b22021-01-15 17:37:50 +00007278static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
7279{
Bijan Mottahedehd7954b22021-01-15 17:37:50 +00007280 int ret;
7281
Pavel Begunkov08480402021-04-13 02:58:38 +01007282 if (!ctx->file_data)
Bijan Mottahedehd7954b22021-01-15 17:37:50 +00007283 return -ENXIO;
Pavel Begunkov08480402021-04-13 02:58:38 +01007284 ret = io_rsrc_ref_quiesce(ctx->file_data, ctx);
7285 if (!ret)
7286 __io_sqe_files_unregister(ctx);
7287 return ret;
Jens Axboe6c271ce2019-01-10 11:22:30 -07007288}
7289
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007290static void io_sq_thread_unpark(struct io_sq_data *sqd)
Pavel Begunkov09a6f4e2021-03-14 20:57:10 +00007291 __releases(&sqd->lock)
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007292{
Pavel Begunkov521d6a72021-03-11 23:29:38 +00007293 WARN_ON_ONCE(sqd->thread == current);
7294
Pavel Begunkov9e138a42021-03-14 20:57:12 +00007295 /*
7296 * Do the dance but not conditional clear_bit() because it'd race with
7297 * other threads incrementing park_pending and setting the bit.
7298 */
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007299 clear_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state);
Pavel Begunkov9e138a42021-03-14 20:57:12 +00007300 if (atomic_dec_return(&sqd->park_pending))
7301 set_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state);
Pavel Begunkov09a6f4e2021-03-14 20:57:10 +00007302 mutex_unlock(&sqd->lock);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007303}
7304
Jens Axboe86e0d672021-03-05 08:44:39 -07007305static void io_sq_thread_park(struct io_sq_data *sqd)
Pavel Begunkov09a6f4e2021-03-14 20:57:10 +00007306 __acquires(&sqd->lock)
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007307{
Pavel Begunkov521d6a72021-03-11 23:29:38 +00007308 WARN_ON_ONCE(sqd->thread == current);
7309
Pavel Begunkov9e138a42021-03-14 20:57:12 +00007310 atomic_inc(&sqd->park_pending);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007311 set_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state);
Pavel Begunkov09a6f4e2021-03-14 20:57:10 +00007312 mutex_lock(&sqd->lock);
Jens Axboe05962f92021-03-06 13:58:48 -07007313 if (sqd->thread)
Jens Axboe86e0d672021-03-05 08:44:39 -07007314 wake_up_process(sqd->thread);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007315}
7316
7317static void io_sq_thread_stop(struct io_sq_data *sqd)
7318{
Pavel Begunkov521d6a72021-03-11 23:29:38 +00007319 WARN_ON_ONCE(sqd->thread == current);
Pavel Begunkov88885f62021-04-11 01:46:38 +01007320 WARN_ON_ONCE(test_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state));
Pavel Begunkov521d6a72021-03-11 23:29:38 +00007321
Jens Axboe05962f92021-03-06 13:58:48 -07007322 set_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state);
Pavel Begunkov88885f62021-04-11 01:46:38 +01007323 mutex_lock(&sqd->lock);
Jens Axboee8f98f242021-03-09 16:32:13 -07007324 if (sqd->thread)
7325 wake_up_process(sqd->thread);
Pavel Begunkov09a6f4e2021-03-14 20:57:10 +00007326 mutex_unlock(&sqd->lock);
Jens Axboe05962f92021-03-06 13:58:48 -07007327 wait_for_completion(&sqd->exited);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007328}
7329
Jens Axboe534ca6d2020-09-02 13:52:19 -06007330static void io_put_sq_data(struct io_sq_data *sqd)
Jens Axboe6c271ce2019-01-10 11:22:30 -07007331{
Jens Axboe534ca6d2020-09-02 13:52:19 -06007332 if (refcount_dec_and_test(&sqd->refs)) {
Pavel Begunkov9e138a42021-03-14 20:57:12 +00007333 WARN_ON_ONCE(atomic_read(&sqd->park_pending));
7334
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007335 io_sq_thread_stop(sqd);
7336 kfree(sqd);
7337 }
7338}
7339
7340static void io_sq_thread_finish(struct io_ring_ctx *ctx)
7341{
7342 struct io_sq_data *sqd = ctx->sq_data;
7343
7344 if (sqd) {
Jens Axboe05962f92021-03-06 13:58:48 -07007345 io_sq_thread_park(sqd);
Pavel Begunkov521d6a72021-03-11 23:29:38 +00007346 list_del_init(&ctx->sqd_list);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007347 io_sqd_update_thread_idle(sqd);
Jens Axboe05962f92021-03-06 13:58:48 -07007348 io_sq_thread_unpark(sqd);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007349
7350 io_put_sq_data(sqd);
7351 ctx->sq_data = NULL;
Jens Axboe534ca6d2020-09-02 13:52:19 -06007352 }
7353}
7354
Jens Axboeaa061652020-09-02 14:50:27 -06007355static struct io_sq_data *io_attach_sq_data(struct io_uring_params *p)
7356{
7357 struct io_ring_ctx *ctx_attach;
7358 struct io_sq_data *sqd;
7359 struct fd f;
7360
7361 f = fdget(p->wq_fd);
7362 if (!f.file)
7363 return ERR_PTR(-ENXIO);
7364 if (f.file->f_op != &io_uring_fops) {
7365 fdput(f);
7366 return ERR_PTR(-EINVAL);
7367 }
7368
7369 ctx_attach = f.file->private_data;
7370 sqd = ctx_attach->sq_data;
7371 if (!sqd) {
7372 fdput(f);
7373 return ERR_PTR(-EINVAL);
7374 }
Jens Axboe5c2469e2021-03-11 10:17:56 -07007375 if (sqd->task_tgid != current->tgid) {
7376 fdput(f);
7377 return ERR_PTR(-EPERM);
7378 }
Jens Axboeaa061652020-09-02 14:50:27 -06007379
7380 refcount_inc(&sqd->refs);
7381 fdput(f);
7382 return sqd;
7383}
7384
Pavel Begunkov26984fb2021-03-11 23:29:37 +00007385static struct io_sq_data *io_get_sq_data(struct io_uring_params *p,
7386 bool *attached)
Jens Axboe534ca6d2020-09-02 13:52:19 -06007387{
7388 struct io_sq_data *sqd;
7389
Pavel Begunkov26984fb2021-03-11 23:29:37 +00007390 *attached = false;
Jens Axboe5c2469e2021-03-11 10:17:56 -07007391 if (p->flags & IORING_SETUP_ATTACH_WQ) {
7392 sqd = io_attach_sq_data(p);
Pavel Begunkov26984fb2021-03-11 23:29:37 +00007393 if (!IS_ERR(sqd)) {
7394 *attached = true;
Jens Axboe5c2469e2021-03-11 10:17:56 -07007395 return sqd;
Pavel Begunkov26984fb2021-03-11 23:29:37 +00007396 }
Jens Axboe5c2469e2021-03-11 10:17:56 -07007397 /* fall through for EPERM case, setup new sqd/task */
7398 if (PTR_ERR(sqd) != -EPERM)
7399 return sqd;
7400 }
Jens Axboeaa061652020-09-02 14:50:27 -06007401
Jens Axboe534ca6d2020-09-02 13:52:19 -06007402 sqd = kzalloc(sizeof(*sqd), GFP_KERNEL);
7403 if (!sqd)
7404 return ERR_PTR(-ENOMEM);
7405
Pavel Begunkov9e138a42021-03-14 20:57:12 +00007406 atomic_set(&sqd->park_pending, 0);
Jens Axboe534ca6d2020-09-02 13:52:19 -06007407 refcount_set(&sqd->refs, 1);
Jens Axboe69fb2132020-09-14 11:16:23 -06007408 INIT_LIST_HEAD(&sqd->ctx_list);
Pavel Begunkov09a6f4e2021-03-14 20:57:10 +00007409 mutex_init(&sqd->lock);
Jens Axboe534ca6d2020-09-02 13:52:19 -06007410 init_waitqueue_head(&sqd->wait);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007411 init_completion(&sqd->exited);
Jens Axboe534ca6d2020-09-02 13:52:19 -06007412 return sqd;
7413}
7414
Jens Axboe6b063142019-01-10 22:13:58 -07007415#if defined(CONFIG_UNIX)
Jens Axboe6b063142019-01-10 22:13:58 -07007416/*
7417 * Ensure the UNIX gc is aware of our file set, so we are certain that
7418 * the io_uring can be safely unregistered on process exit, even if we have
7419 * loops in the file referencing.
7420 */
7421static int __io_sqe_files_scm(struct io_ring_ctx *ctx, int nr, int offset)
7422{
7423 struct sock *sk = ctx->ring_sock->sk;
7424 struct scm_fp_list *fpl;
7425 struct sk_buff *skb;
Jens Axboe08a45172019-10-03 08:11:03 -06007426 int i, nr_files;
Jens Axboe6b063142019-01-10 22:13:58 -07007427
Jens Axboe6b063142019-01-10 22:13:58 -07007428 fpl = kzalloc(sizeof(*fpl), GFP_KERNEL);
7429 if (!fpl)
7430 return -ENOMEM;
7431
7432 skb = alloc_skb(0, GFP_KERNEL);
7433 if (!skb) {
7434 kfree(fpl);
7435 return -ENOMEM;
7436 }
7437
7438 skb->sk = sk;
Jens Axboe6b063142019-01-10 22:13:58 -07007439
Jens Axboe08a45172019-10-03 08:11:03 -06007440 nr_files = 0;
Jens Axboe62e398b2021-02-21 16:19:37 -07007441 fpl->user = get_uid(current_user());
Jens Axboe6b063142019-01-10 22:13:58 -07007442 for (i = 0; i < nr; i++) {
Jens Axboe65e19f52019-10-26 07:20:21 -06007443 struct file *file = io_file_from_index(ctx, i + offset);
7444
7445 if (!file)
Jens Axboe08a45172019-10-03 08:11:03 -06007446 continue;
Jens Axboe65e19f52019-10-26 07:20:21 -06007447 fpl->fp[nr_files] = get_file(file);
Jens Axboe08a45172019-10-03 08:11:03 -06007448 unix_inflight(fpl->user, fpl->fp[nr_files]);
7449 nr_files++;
Jens Axboe6b063142019-01-10 22:13:58 -07007450 }
7451
Jens Axboe08a45172019-10-03 08:11:03 -06007452 if (nr_files) {
7453 fpl->max = SCM_MAX_FD;
7454 fpl->count = nr_files;
7455 UNIXCB(skb).fp = fpl;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007456 skb->destructor = unix_destruct_scm;
Jens Axboe08a45172019-10-03 08:11:03 -06007457 refcount_add(skb->truesize, &sk->sk_wmem_alloc);
7458 skb_queue_head(&sk->sk_receive_queue, skb);
Jens Axboe6b063142019-01-10 22:13:58 -07007459
Jens Axboe08a45172019-10-03 08:11:03 -06007460 for (i = 0; i < nr_files; i++)
7461 fput(fpl->fp[i]);
7462 } else {
7463 kfree_skb(skb);
7464 kfree(fpl);
7465 }
Jens Axboe6b063142019-01-10 22:13:58 -07007466
7467 return 0;
7468}
7469
7470/*
7471 * If UNIX sockets are enabled, fd passing can cause a reference cycle which
7472 * causes regular reference counting to break down. We rely on the UNIX
7473 * garbage collection to take care of this problem for us.
7474 */
7475static int io_sqe_files_scm(struct io_ring_ctx *ctx)
7476{
7477 unsigned left, total;
7478 int ret = 0;
7479
7480 total = 0;
7481 left = ctx->nr_user_files;
7482 while (left) {
7483 unsigned this_files = min_t(unsigned, left, SCM_MAX_FD);
7484
7485 ret = __io_sqe_files_scm(ctx, this_files, total);
7486 if (ret)
7487 break;
7488 left -= this_files;
7489 total += this_files;
7490 }
7491
7492 if (!ret)
7493 return 0;
7494
7495 while (total < ctx->nr_user_files) {
Jens Axboe65e19f52019-10-26 07:20:21 -06007496 struct file *file = io_file_from_index(ctx, total);
7497
7498 if (file)
7499 fput(file);
Jens Axboe6b063142019-01-10 22:13:58 -07007500 total++;
7501 }
7502
7503 return ret;
7504}
7505#else
7506static int io_sqe_files_scm(struct io_ring_ctx *ctx)
7507{
7508 return 0;
7509}
7510#endif
7511
Pavel Begunkov47e90392021-04-01 15:43:56 +01007512static void io_rsrc_file_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc)
Jens Axboec3a31e62019-10-03 13:59:56 -06007513{
Bijan Mottahedeh50238532021-01-15 17:37:45 +00007514 struct file *file = prsrc->file;
Jens Axboec3a31e62019-10-03 13:59:56 -06007515#if defined(CONFIG_UNIX)
Jens Axboec3a31e62019-10-03 13:59:56 -06007516 struct sock *sock = ctx->ring_sock->sk;
7517 struct sk_buff_head list, *head = &sock->sk_receive_queue;
7518 struct sk_buff *skb;
7519 int i;
7520
7521 __skb_queue_head_init(&list);
7522
7523 /*
7524 * Find the skb that holds this file in its SCM_RIGHTS. When found,
7525 * remove this entry and rearrange the file array.
7526 */
7527 skb = skb_dequeue(head);
7528 while (skb) {
7529 struct scm_fp_list *fp;
7530
7531 fp = UNIXCB(skb).fp;
7532 for (i = 0; i < fp->count; i++) {
7533 int left;
7534
7535 if (fp->fp[i] != file)
7536 continue;
7537
7538 unix_notinflight(fp->user, fp->fp[i]);
7539 left = fp->count - 1 - i;
7540 if (left) {
7541 memmove(&fp->fp[i], &fp->fp[i + 1],
7542 left * sizeof(struct file *));
7543 }
7544 fp->count--;
7545 if (!fp->count) {
7546 kfree_skb(skb);
7547 skb = NULL;
7548 } else {
7549 __skb_queue_tail(&list, skb);
7550 }
7551 fput(file);
7552 file = NULL;
7553 break;
7554 }
7555
7556 if (!file)
7557 break;
7558
7559 __skb_queue_tail(&list, skb);
7560
7561 skb = skb_dequeue(head);
7562 }
7563
7564 if (skb_peek(&list)) {
7565 spin_lock_irq(&head->lock);
7566 while ((skb = __skb_dequeue(&list)) != NULL)
7567 __skb_queue_tail(head, skb);
7568 spin_unlock_irq(&head->lock);
7569 }
7570#else
Jens Axboe05f3fb32019-12-09 11:22:50 -07007571 fput(file);
Jens Axboec3a31e62019-10-03 13:59:56 -06007572#endif
7573}
7574
Pavel Begunkovb895c9a2021-04-01 15:43:40 +01007575static void __io_rsrc_put_work(struct io_rsrc_node *ref_node)
Jens Axboe05f3fb32019-12-09 11:22:50 -07007576{
Pavel Begunkovb895c9a2021-04-01 15:43:40 +01007577 struct io_rsrc_data *rsrc_data = ref_node->rsrc_data;
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007578 struct io_ring_ctx *ctx = rsrc_data->ctx;
7579 struct io_rsrc_put *prsrc, *tmp;
Xiaoguang Wang05589552020-03-31 14:05:18 +08007580
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007581 list_for_each_entry_safe(prsrc, tmp, &ref_node->rsrc_list, list) {
7582 list_del(&prsrc->list);
Pavel Begunkovb60c8dc2021-04-25 14:32:18 +01007583
7584 if (prsrc->tag) {
7585 bool lock_ring = ctx->flags & IORING_SETUP_IOPOLL;
Pavel Begunkovb60c8dc2021-04-25 14:32:18 +01007586
7587 io_ring_submit_lock(ctx, lock_ring);
Pavel Begunkov157d2572021-06-14 02:36:19 +01007588 spin_lock_irq(&ctx->completion_lock);
Pavel Begunkovb60c8dc2021-04-25 14:32:18 +01007589 io_cqring_fill_event(ctx, prsrc->tag, 0, 0);
Pavel Begunkov2840f712021-04-27 16:13:51 +01007590 ctx->cq_extra++;
Pavel Begunkovb60c8dc2021-04-25 14:32:18 +01007591 io_commit_cqring(ctx);
Pavel Begunkov157d2572021-06-14 02:36:19 +01007592 spin_unlock_irq(&ctx->completion_lock);
Pavel Begunkovb60c8dc2021-04-25 14:32:18 +01007593 io_cqring_ev_posted(ctx);
7594 io_ring_submit_unlock(ctx, lock_ring);
7595 }
7596
Pavel Begunkov40ae0ff2021-04-01 15:43:44 +01007597 rsrc_data->do_put(ctx, prsrc);
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007598 kfree(prsrc);
Jens Axboe05f3fb32019-12-09 11:22:50 -07007599 }
7600
Pavel Begunkov28a9fe22021-04-01 15:43:47 +01007601 io_rsrc_node_destroy(ref_node);
Pavel Begunkov3e942492021-04-11 01:46:34 +01007602 if (atomic_dec_and_test(&rsrc_data->refs))
7603 complete(&rsrc_data->done);
Jens Axboe05f3fb32019-12-09 11:22:50 -07007604}
7605
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007606static void io_rsrc_put_work(struct work_struct *work)
Jens Axboe4a38aed22020-05-14 17:21:15 -06007607{
7608 struct io_ring_ctx *ctx;
7609 struct llist_node *node;
7610
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007611 ctx = container_of(work, struct io_ring_ctx, rsrc_put_work.work);
7612 node = llist_del_all(&ctx->rsrc_put_llist);
Jens Axboe4a38aed22020-05-14 17:21:15 -06007613
7614 while (node) {
Pavel Begunkovb895c9a2021-04-01 15:43:40 +01007615 struct io_rsrc_node *ref_node;
Jens Axboe4a38aed22020-05-14 17:21:15 -06007616 struct llist_node *next = node->next;
7617
Pavel Begunkovb895c9a2021-04-01 15:43:40 +01007618 ref_node = llist_entry(node, struct io_rsrc_node, llist);
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007619 __io_rsrc_put_work(ref_node);
Jens Axboe4a38aed22020-05-14 17:21:15 -06007620 node = next;
7621 }
7622}
7623
Bijan Mottahedeh00835dc2021-01-15 17:37:52 +00007624static void io_rsrc_node_ref_zero(struct percpu_ref *ref)
Jens Axboe05f3fb32019-12-09 11:22:50 -07007625{
Pavel Begunkovb895c9a2021-04-01 15:43:40 +01007626 struct io_rsrc_node *node = container_of(ref, struct io_rsrc_node, refs);
Pavel Begunkov3e942492021-04-11 01:46:34 +01007627 struct io_ring_ctx *ctx = node->rsrc_data->ctx;
Pavel Begunkove2978222020-11-18 14:56:26 +00007628 bool first_add = false;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007629
Bijan Mottahedeh2a63b2d2021-01-15 17:37:47 +00007630 io_rsrc_ref_lock(ctx);
Pavel Begunkovb895c9a2021-04-01 15:43:40 +01007631 node->done = true;
Pavel Begunkove2978222020-11-18 14:56:26 +00007632
Bijan Mottahedehd67d2262021-01-15 17:37:46 +00007633 while (!list_empty(&ctx->rsrc_ref_list)) {
Pavel Begunkovb895c9a2021-04-01 15:43:40 +01007634 node = list_first_entry(&ctx->rsrc_ref_list,
7635 struct io_rsrc_node, node);
Pavel Begunkove2978222020-11-18 14:56:26 +00007636 /* recycle ref nodes in order */
Pavel Begunkovb895c9a2021-04-01 15:43:40 +01007637 if (!node->done)
Pavel Begunkove2978222020-11-18 14:56:26 +00007638 break;
Pavel Begunkovb895c9a2021-04-01 15:43:40 +01007639 list_del(&node->node);
7640 first_add |= llist_add(&node->llist, &ctx->rsrc_put_llist);
Pavel Begunkove2978222020-11-18 14:56:26 +00007641 }
Bijan Mottahedeh2a63b2d2021-01-15 17:37:47 +00007642 io_rsrc_ref_unlock(ctx);
Pavel Begunkove2978222020-11-18 14:56:26 +00007643
Pavel Begunkov3e942492021-04-11 01:46:34 +01007644 if (first_add)
7645 mod_delayed_work(system_wq, &ctx->rsrc_put_work, HZ);
Xiaoguang Wang05589552020-03-31 14:05:18 +08007646}
7647
Pavel Begunkovb895c9a2021-04-01 15:43:40 +01007648static struct io_rsrc_node *io_rsrc_node_alloc(struct io_ring_ctx *ctx)
Xiaoguang Wang05589552020-03-31 14:05:18 +08007649{
Pavel Begunkovb895c9a2021-04-01 15:43:40 +01007650 struct io_rsrc_node *ref_node;
Xiaoguang Wang05589552020-03-31 14:05:18 +08007651
7652 ref_node = kzalloc(sizeof(*ref_node), GFP_KERNEL);
7653 if (!ref_node)
Matthew Wilcox (Oracle)3e2224c2021-01-06 16:09:26 +00007654 return NULL;
Xiaoguang Wang05589552020-03-31 14:05:18 +08007655
Bijan Mottahedeh00835dc2021-01-15 17:37:52 +00007656 if (percpu_ref_init(&ref_node->refs, io_rsrc_node_ref_zero,
Xiaoguang Wang05589552020-03-31 14:05:18 +08007657 0, GFP_KERNEL)) {
7658 kfree(ref_node);
Matthew Wilcox (Oracle)3e2224c2021-01-06 16:09:26 +00007659 return NULL;
Xiaoguang Wang05589552020-03-31 14:05:18 +08007660 }
7661 INIT_LIST_HEAD(&ref_node->node);
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007662 INIT_LIST_HEAD(&ref_node->rsrc_list);
Pavel Begunkove2978222020-11-18 14:56:26 +00007663 ref_node->done = false;
Xiaoguang Wang05589552020-03-31 14:05:18 +08007664 return ref_node;
Xiaoguang Wang05589552020-03-31 14:05:18 +08007665}
7666
Jens Axboe05f3fb32019-12-09 11:22:50 -07007667static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
Pavel Begunkov792e3582021-04-25 14:32:21 +01007668 unsigned nr_args, u64 __user *tags)
Jens Axboe05f3fb32019-12-09 11:22:50 -07007669{
7670 __s32 __user *fds = (__s32 __user *) arg;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007671 struct file *file;
Pavel Begunkovf3baed32021-04-01 15:43:42 +01007672 int fd, ret;
Pavel Begunkov846a4ef2021-04-01 15:44:03 +01007673 unsigned i;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007674
7675 if (ctx->file_data)
7676 return -EBUSY;
7677 if (!nr_args)
7678 return -EINVAL;
7679 if (nr_args > IORING_MAX_FIXED_FILES)
7680 return -EMFILE;
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01007681 ret = io_rsrc_node_switch_start(ctx);
Pavel Begunkovf3baed32021-04-01 15:43:42 +01007682 if (ret)
7683 return ret;
Pavel Begunkovd878c812021-06-14 02:36:18 +01007684 ret = io_rsrc_data_alloc(ctx, io_rsrc_file_put, tags, nr_args,
7685 &ctx->file_data);
7686 if (ret)
7687 return ret;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007688
Pavel Begunkovf3baed32021-04-01 15:43:42 +01007689 ret = -ENOMEM;
Pavel Begunkovaeca2412021-04-11 01:46:37 +01007690 if (!io_alloc_file_tables(&ctx->file_table, nr_args))
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01007691 goto out_free;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007692
Jens Axboe05f3fb32019-12-09 11:22:50 -07007693 for (i = 0; i < nr_args; i++, ctx->nr_user_files++) {
Pavel Begunkovd878c812021-06-14 02:36:18 +01007694 if (copy_from_user(&fd, &fds[i], sizeof(fd))) {
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01007695 ret = -EFAULT;
7696 goto out_fput;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007697 }
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01007698 /* allow sparse sets */
Pavel Begunkov792e3582021-04-25 14:32:21 +01007699 if (fd == -1) {
7700 ret = -EINVAL;
Pavel Begunkov2d091d62021-06-14 02:36:21 +01007701 if (unlikely(*io_get_tag_slot(ctx->file_data, i)))
Pavel Begunkov792e3582021-04-25 14:32:21 +01007702 goto out_fput;
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01007703 continue;
Pavel Begunkov792e3582021-04-25 14:32:21 +01007704 }
Jens Axboe05f3fb32019-12-09 11:22:50 -07007705
Jens Axboe05f3fb32019-12-09 11:22:50 -07007706 file = fget(fd);
Jens Axboe05f3fb32019-12-09 11:22:50 -07007707 ret = -EBADF;
Pavel Begunkov792e3582021-04-25 14:32:21 +01007708 if (unlikely(!file))
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01007709 goto out_fput;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007710
7711 /*
7712 * Don't allow io_uring instances to be registered. If UNIX
7713 * isn't enabled, then this causes a reference cycle and this
7714 * instance can never get freed. If UNIX is enabled we'll
7715 * handle it just fine, but there's still no point in allowing
7716 * a ring fd as it doesn't support regular read/write anyway.
7717 */
7718 if (file->f_op == &io_uring_fops) {
7719 fput(file);
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01007720 goto out_fput;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007721 }
Pavel Begunkovaeca2412021-04-11 01:46:37 +01007722 io_fixed_file_set(io_fixed_file_slot(&ctx->file_table, i), file);
Jens Axboe05f3fb32019-12-09 11:22:50 -07007723 }
7724
Jens Axboe05f3fb32019-12-09 11:22:50 -07007725 ret = io_sqe_files_scm(ctx);
Xiaoguang Wang05589552020-03-31 14:05:18 +08007726 if (ret) {
Pavel Begunkov08480402021-04-13 02:58:38 +01007727 __io_sqe_files_unregister(ctx);
Xiaoguang Wang05589552020-03-31 14:05:18 +08007728 return ret;
7729 }
Jens Axboe05f3fb32019-12-09 11:22:50 -07007730
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01007731 io_rsrc_node_switch(ctx, NULL);
Jens Axboe05f3fb32019-12-09 11:22:50 -07007732 return ret;
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01007733out_fput:
7734 for (i = 0; i < ctx->nr_user_files; i++) {
7735 file = io_file_from_index(ctx, i);
7736 if (file)
7737 fput(file);
7738 }
Pavel Begunkovaeca2412021-04-11 01:46:37 +01007739 io_free_file_tables(&ctx->file_table, nr_args);
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01007740 ctx->nr_user_files = 0;
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01007741out_free:
Pavel Begunkov44b31f22021-04-25 14:32:16 +01007742 io_rsrc_data_free(ctx->file_data);
Jens Axboe55cbc252020-10-14 07:35:57 -06007743 ctx->file_data = NULL;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007744 return ret;
7745}
7746
Jens Axboec3a31e62019-10-03 13:59:56 -06007747static int io_sqe_file_register(struct io_ring_ctx *ctx, struct file *file,
7748 int index)
7749{
7750#if defined(CONFIG_UNIX)
7751 struct sock *sock = ctx->ring_sock->sk;
7752 struct sk_buff_head *head = &sock->sk_receive_queue;
7753 struct sk_buff *skb;
7754
7755 /*
7756 * See if we can merge this file into an existing skb SCM_RIGHTS
7757 * file set. If there's no room, fall back to allocating a new skb
7758 * and filling it in.
7759 */
7760 spin_lock_irq(&head->lock);
7761 skb = skb_peek(head);
7762 if (skb) {
7763 struct scm_fp_list *fpl = UNIXCB(skb).fp;
7764
7765 if (fpl->count < SCM_MAX_FD) {
7766 __skb_unlink(skb, head);
7767 spin_unlock_irq(&head->lock);
7768 fpl->fp[fpl->count] = get_file(file);
7769 unix_inflight(fpl->user, fpl->fp[fpl->count]);
7770 fpl->count++;
7771 spin_lock_irq(&head->lock);
7772 __skb_queue_head(head, skb);
7773 } else {
7774 skb = NULL;
7775 }
7776 }
7777 spin_unlock_irq(&head->lock);
7778
7779 if (skb) {
7780 fput(file);
7781 return 0;
7782 }
7783
7784 return __io_sqe_files_scm(ctx, 1, index);
7785#else
7786 return 0;
7787#endif
7788}
7789
Pavel Begunkovb60c8dc2021-04-25 14:32:18 +01007790static int io_queue_rsrc_removal(struct io_rsrc_data *data, unsigned idx,
Pavel Begunkove7c78372021-04-01 15:43:45 +01007791 struct io_rsrc_node *node, void *rsrc)
Jens Axboe05f3fb32019-12-09 11:22:50 -07007792{
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007793 struct io_rsrc_put *prsrc;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007794
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007795 prsrc = kzalloc(sizeof(*prsrc), GFP_KERNEL);
7796 if (!prsrc)
Hillf Dantona5318d32020-03-23 17:47:15 +08007797 return -ENOMEM;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007798
Pavel Begunkov2d091d62021-06-14 02:36:21 +01007799 prsrc->tag = *io_get_tag_slot(data, idx);
Bijan Mottahedeh50238532021-01-15 17:37:45 +00007800 prsrc->rsrc = rsrc;
Pavel Begunkove7c78372021-04-01 15:43:45 +01007801 list_add(&prsrc->list, &node->rsrc_list);
Hillf Dantona5318d32020-03-23 17:47:15 +08007802 return 0;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007803}
7804
7805static int __io_sqe_files_update(struct io_ring_ctx *ctx,
Pavel Begunkovc3bdad02021-04-25 14:32:22 +01007806 struct io_uring_rsrc_update2 *up,
Jens Axboe05f3fb32019-12-09 11:22:50 -07007807 unsigned nr_args)
7808{
Pavel Begunkovc3bdad02021-04-25 14:32:22 +01007809 u64 __user *tags = u64_to_user_ptr(up->tags);
Pavel Begunkov98f0b3b2021-04-25 14:32:19 +01007810 __s32 __user *fds = u64_to_user_ptr(up->data);
Pavel Begunkovb895c9a2021-04-01 15:43:40 +01007811 struct io_rsrc_data *data = ctx->file_data;
Pavel Begunkova04b0ac2021-04-01 15:44:04 +01007812 struct io_fixed_file *file_slot;
7813 struct file *file;
Pavel Begunkov98f0b3b2021-04-25 14:32:19 +01007814 int fd, i, err = 0;
7815 unsigned int done;
Xiaoguang Wang05589552020-03-31 14:05:18 +08007816 bool needs_switch = false;
Jens Axboec3a31e62019-10-03 13:59:56 -06007817
Pavel Begunkov98f0b3b2021-04-25 14:32:19 +01007818 if (!ctx->file_data)
7819 return -ENXIO;
7820 if (up->offset + nr_args > ctx->nr_user_files)
Jens Axboec3a31e62019-10-03 13:59:56 -06007821 return -EINVAL;
7822
Pavel Begunkov67973b92021-01-26 13:51:09 +00007823 for (done = 0; done < nr_args; done++) {
Pavel Begunkovc3bdad02021-04-25 14:32:22 +01007824 u64 tag = 0;
7825
7826 if ((tags && copy_from_user(&tag, &tags[done], sizeof(tag))) ||
7827 copy_from_user(&fd, &fds[done], sizeof(fd))) {
Jens Axboec3a31e62019-10-03 13:59:56 -06007828 err = -EFAULT;
7829 break;
7830 }
Pavel Begunkovc3bdad02021-04-25 14:32:22 +01007831 if ((fd == IORING_REGISTER_FILES_SKIP || fd == -1) && tag) {
7832 err = -EINVAL;
7833 break;
7834 }
noah4e0377a2021-01-26 15:23:28 -05007835 if (fd == IORING_REGISTER_FILES_SKIP)
7836 continue;
7837
Pavel Begunkov67973b92021-01-26 13:51:09 +00007838 i = array_index_nospec(up->offset + done, ctx->nr_user_files);
Pavel Begunkovaeca2412021-04-11 01:46:37 +01007839 file_slot = io_fixed_file_slot(&ctx->file_table, i);
Pavel Begunkovea64ec022021-02-04 13:52:07 +00007840
Pavel Begunkova04b0ac2021-04-01 15:44:04 +01007841 if (file_slot->file_ptr) {
7842 file = (struct file *)(file_slot->file_ptr & FFS_MASK);
Pavel Begunkovb60c8dc2021-04-25 14:32:18 +01007843 err = io_queue_rsrc_removal(data, up->offset + done,
7844 ctx->rsrc_node, file);
Hillf Dantona5318d32020-03-23 17:47:15 +08007845 if (err)
7846 break;
Pavel Begunkova04b0ac2021-04-01 15:44:04 +01007847 file_slot->file_ptr = 0;
Xiaoguang Wang05589552020-03-31 14:05:18 +08007848 needs_switch = true;
Jens Axboec3a31e62019-10-03 13:59:56 -06007849 }
7850 if (fd != -1) {
Jens Axboec3a31e62019-10-03 13:59:56 -06007851 file = fget(fd);
7852 if (!file) {
7853 err = -EBADF;
7854 break;
7855 }
7856 /*
7857 * Don't allow io_uring instances to be registered. If
7858 * UNIX isn't enabled, then this causes a reference
7859 * cycle and this instance can never get freed. If UNIX
7860 * is enabled we'll handle it just fine, but there's
7861 * still no point in allowing a ring fd as it doesn't
7862 * support regular read/write anyway.
7863 */
7864 if (file->f_op == &io_uring_fops) {
7865 fput(file);
7866 err = -EBADF;
7867 break;
7868 }
Pavel Begunkov2d091d62021-06-14 02:36:21 +01007869 *io_get_tag_slot(data, up->offset + done) = tag;
Pavel Begunkov9a321c92021-04-01 15:44:01 +01007870 io_fixed_file_set(file_slot, file);
Jens Axboec3a31e62019-10-03 13:59:56 -06007871 err = io_sqe_file_register(ctx, file, i);
Yang Yingliangf3bd9da2020-07-09 10:11:41 +00007872 if (err) {
Pavel Begunkova04b0ac2021-04-01 15:44:04 +01007873 file_slot->file_ptr = 0;
Yang Yingliangf3bd9da2020-07-09 10:11:41 +00007874 fput(file);
Jens Axboec3a31e62019-10-03 13:59:56 -06007875 break;
Yang Yingliangf3bd9da2020-07-09 10:11:41 +00007876 }
Jens Axboec3a31e62019-10-03 13:59:56 -06007877 }
Jens Axboe05f3fb32019-12-09 11:22:50 -07007878 }
7879
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01007880 if (needs_switch)
7881 io_rsrc_node_switch(ctx, data);
Jens Axboec3a31e62019-10-03 13:59:56 -06007882 return done ? done : err;
7883}
Xiaoguang Wang05589552020-03-31 14:05:18 +08007884
Pavel Begunkov5280f7e2021-02-04 13:52:08 +00007885static struct io_wq_work *io_free_work(struct io_wq_work *work)
Jens Axboe7d723062019-11-12 22:31:31 -07007886{
7887 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
7888
Pavel Begunkov5280f7e2021-02-04 13:52:08 +00007889 req = io_put_req_find_next(req);
7890 return req ? &req->work : NULL;
Jens Axboe7d723062019-11-12 22:31:31 -07007891}
7892
Jens Axboe685fe7f2021-03-08 09:37:51 -07007893static struct io_wq *io_init_wq_offload(struct io_ring_ctx *ctx,
7894 struct task_struct *task)
Pavel Begunkov24369c22020-01-28 03:15:48 +03007895{
Jens Axboee9418942021-02-19 12:33:30 -07007896 struct io_wq_hash *hash;
Pavel Begunkov24369c22020-01-28 03:15:48 +03007897 struct io_wq_data data;
Pavel Begunkov24369c22020-01-28 03:15:48 +03007898 unsigned int concurrency;
Pavel Begunkov24369c22020-01-28 03:15:48 +03007899
Jens Axboee9418942021-02-19 12:33:30 -07007900 hash = ctx->hash_map;
7901 if (!hash) {
7902 hash = kzalloc(sizeof(*hash), GFP_KERNEL);
7903 if (!hash)
7904 return ERR_PTR(-ENOMEM);
7905 refcount_set(&hash->refs, 1);
7906 init_waitqueue_head(&hash->wait);
7907 ctx->hash_map = hash;
7908 }
7909
7910 data.hash = hash;
Jens Axboe685fe7f2021-03-08 09:37:51 -07007911 data.task = task;
Pavel Begunkove9fd9392020-03-04 16:14:12 +03007912 data.free_work = io_free_work;
Pavel Begunkovf5fa38c2020-06-08 21:08:20 +03007913 data.do_work = io_wq_submit_work;
Pavel Begunkov24369c22020-01-28 03:15:48 +03007914
Jens Axboed25e3a32021-02-16 11:41:41 -07007915 /* Do QD, or 4 * CPUS, whatever is smallest */
7916 concurrency = min(ctx->sq_entries, 4 * num_online_cpus());
Pavel Begunkov24369c22020-01-28 03:15:48 +03007917
Jens Axboe5aa75ed2021-02-16 12:56:50 -07007918 return io_wq_create(concurrency, &data);
Pavel Begunkov24369c22020-01-28 03:15:48 +03007919}
7920
Jens Axboe5aa75ed2021-02-16 12:56:50 -07007921static int io_uring_alloc_task_context(struct task_struct *task,
7922 struct io_ring_ctx *ctx)
Jens Axboe0f212202020-09-13 13:09:39 -06007923{
7924 struct io_uring_task *tctx;
Jens Axboed8a6df12020-10-15 16:24:45 -06007925 int ret;
Jens Axboe0f212202020-09-13 13:09:39 -06007926
Pavel Begunkov09899b12021-06-14 02:36:22 +01007927 tctx = kzalloc(sizeof(*tctx), GFP_KERNEL);
Jens Axboe0f212202020-09-13 13:09:39 -06007928 if (unlikely(!tctx))
7929 return -ENOMEM;
7930
Jens Axboed8a6df12020-10-15 16:24:45 -06007931 ret = percpu_counter_init(&tctx->inflight, 0, GFP_KERNEL);
7932 if (unlikely(ret)) {
7933 kfree(tctx);
7934 return ret;
7935 }
7936
Jens Axboe685fe7f2021-03-08 09:37:51 -07007937 tctx->io_wq = io_init_wq_offload(ctx, task);
Jens Axboe5aa75ed2021-02-16 12:56:50 -07007938 if (IS_ERR(tctx->io_wq)) {
7939 ret = PTR_ERR(tctx->io_wq);
7940 percpu_counter_destroy(&tctx->inflight);
7941 kfree(tctx);
7942 return ret;
7943 }
7944
Jens Axboe0f212202020-09-13 13:09:39 -06007945 xa_init(&tctx->xa);
7946 init_waitqueue_head(&tctx->wait);
Jens Axboefdaf0832020-10-30 09:37:30 -06007947 atomic_set(&tctx->in_idle, 0);
Pavel Begunkovb303fe22021-04-11 01:46:26 +01007948 atomic_set(&tctx->inflight_tracked, 0);
Jens Axboe0f212202020-09-13 13:09:39 -06007949 task->io_uring = tctx;
Jens Axboe7cbf1722021-02-10 00:03:20 +00007950 spin_lock_init(&tctx->task_lock);
7951 INIT_WQ_LIST(&tctx->task_list);
Jens Axboe7cbf1722021-02-10 00:03:20 +00007952 init_task_work(&tctx->task_work, tctx_task_work);
Jens Axboe0f212202020-09-13 13:09:39 -06007953 return 0;
7954}
7955
7956void __io_uring_free(struct task_struct *tsk)
7957{
7958 struct io_uring_task *tctx = tsk->io_uring;
7959
7960 WARN_ON_ONCE(!xa_empty(&tctx->xa));
Pavel Begunkovef8eaa42021-02-27 11:16:45 +00007961 WARN_ON_ONCE(tctx->io_wq);
Pavel Begunkov09899b12021-06-14 02:36:22 +01007962 WARN_ON_ONCE(tctx->cached_refs);
Pavel Begunkovef8eaa42021-02-27 11:16:45 +00007963
Jens Axboed8a6df12020-10-15 16:24:45 -06007964 percpu_counter_destroy(&tctx->inflight);
Jens Axboe0f212202020-09-13 13:09:39 -06007965 kfree(tctx);
7966 tsk->io_uring = NULL;
7967}
7968
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02007969static int io_sq_offload_create(struct io_ring_ctx *ctx,
7970 struct io_uring_params *p)
Jens Axboe6b063142019-01-10 22:13:58 -07007971{
7972 int ret;
7973
Jens Axboed25e3a32021-02-16 11:41:41 -07007974 /* Retain compatibility with failing for an invalid attach attempt */
7975 if ((ctx->flags & (IORING_SETUP_ATTACH_WQ | IORING_SETUP_SQPOLL)) ==
7976 IORING_SETUP_ATTACH_WQ) {
7977 struct fd f;
7978
7979 f = fdget(p->wq_fd);
7980 if (!f.file)
7981 return -ENXIO;
Jens Axboed25e3a32021-02-16 11:41:41 -07007982 fdput(f);
Pavel Begunkovf2a48dd2021-04-20 12:03:33 +01007983 if (f.file->f_op != &io_uring_fops)
7984 return -EINVAL;
Jens Axboed25e3a32021-02-16 11:41:41 -07007985 }
Jens Axboe6b063142019-01-10 22:13:58 -07007986 if (ctx->flags & IORING_SETUP_SQPOLL) {
Jens Axboe46fe18b2021-03-04 12:39:36 -07007987 struct task_struct *tsk;
Jens Axboe534ca6d2020-09-02 13:52:19 -06007988 struct io_sq_data *sqd;
Pavel Begunkov26984fb2021-03-11 23:29:37 +00007989 bool attached;
Jens Axboe534ca6d2020-09-02 13:52:19 -06007990
Pavel Begunkov26984fb2021-03-11 23:29:37 +00007991 sqd = io_get_sq_data(p, &attached);
Jens Axboe534ca6d2020-09-02 13:52:19 -06007992 if (IS_ERR(sqd)) {
7993 ret = PTR_ERR(sqd);
7994 goto err;
7995 }
Jens Axboe69fb2132020-09-14 11:16:23 -06007996
Stefan Metzmacher7c30f36a2021-03-07 11:54:28 +01007997 ctx->sq_creds = get_current_cred();
Jens Axboe534ca6d2020-09-02 13:52:19 -06007998 ctx->sq_data = sqd;
Jens Axboe6b063142019-01-10 22:13:58 -07007999 ctx->sq_thread_idle = msecs_to_jiffies(p->sq_thread_idle);
8000 if (!ctx->sq_thread_idle)
8001 ctx->sq_thread_idle = HZ;
8002
Pavel Begunkov78d7f6b2021-03-10 13:13:53 +00008003 io_sq_thread_park(sqd);
Pavel Begunkovde75a3d2021-03-18 11:54:35 +00008004 list_add(&ctx->sqd_list, &sqd->ctx_list);
8005 io_sqd_update_thread_idle(sqd);
Pavel Begunkov26984fb2021-03-11 23:29:37 +00008006 /* don't attach to a dying SQPOLL thread, would be racy */
Pavel Begunkovf2a48dd2021-04-20 12:03:33 +01008007 ret = (attached && !sqd->thread) ? -ENXIO : 0;
Pavel Begunkov78d7f6b2021-03-10 13:13:53 +00008008 io_sq_thread_unpark(sqd);
8009
Pavel Begunkovde75a3d2021-03-18 11:54:35 +00008010 if (ret < 0)
8011 goto err;
8012 if (attached)
Jens Axboe5aa75ed2021-02-16 12:56:50 -07008013 return 0;
Jens Axboeaa061652020-09-02 14:50:27 -06008014
Jens Axboe6b063142019-01-10 22:13:58 -07008015 if (p->flags & IORING_SETUP_SQ_AFF) {
8016 int cpu = p->sq_thread_cpu;
8017
8018 ret = -EINVAL;
Pavel Begunkovf2a48dd2021-04-20 12:03:33 +01008019 if (cpu >= nr_cpu_ids || !cpu_online(cpu))
Jens Axboee8f98f242021-03-09 16:32:13 -07008020 goto err_sqpoll;
Jens Axboe37d1e2e2021-02-17 21:03:43 -07008021 sqd->sq_cpu = cpu;
Jens Axboe6c271ce2019-01-10 11:22:30 -07008022 } else {
Jens Axboe37d1e2e2021-02-17 21:03:43 -07008023 sqd->sq_cpu = -1;
Jens Axboe6c271ce2019-01-10 11:22:30 -07008024 }
Jens Axboe37d1e2e2021-02-17 21:03:43 -07008025
8026 sqd->task_pid = current->pid;
Jens Axboe5c2469e2021-03-11 10:17:56 -07008027 sqd->task_tgid = current->tgid;
Jens Axboe46fe18b2021-03-04 12:39:36 -07008028 tsk = create_io_thread(io_sq_thread, sqd, NUMA_NO_NODE);
8029 if (IS_ERR(tsk)) {
8030 ret = PTR_ERR(tsk);
Jens Axboee8f98f242021-03-09 16:32:13 -07008031 goto err_sqpoll;
Jens Axboe6c271ce2019-01-10 11:22:30 -07008032 }
Pavel Begunkov97a73a02021-03-08 17:30:54 +00008033
Jens Axboe46fe18b2021-03-04 12:39:36 -07008034 sqd->thread = tsk;
Pavel Begunkov97a73a02021-03-08 17:30:54 +00008035 ret = io_uring_alloc_task_context(tsk, ctx);
Jens Axboe46fe18b2021-03-04 12:39:36 -07008036 wake_up_new_task(tsk);
Jens Axboe0f212202020-09-13 13:09:39 -06008037 if (ret)
8038 goto err;
Jens Axboe6c271ce2019-01-10 11:22:30 -07008039 } else if (p->flags & IORING_SETUP_SQ_AFF) {
8040 /* Can't have SQ_AFF without SQPOLL */
8041 ret = -EINVAL;
8042 goto err;
8043 }
8044
Jens Axboe2b188cc2019-01-07 10:46:33 -07008045 return 0;
Pavel Begunkovf2a48dd2021-04-20 12:03:33 +01008046err_sqpoll:
8047 complete(&ctx->sq_data->exited);
Jens Axboe2b188cc2019-01-07 10:46:33 -07008048err:
Jens Axboe37d1e2e2021-02-17 21:03:43 -07008049 io_sq_thread_finish(ctx);
Jens Axboe2b188cc2019-01-07 10:46:33 -07008050 return ret;
8051}
8052
Bijan Mottahedeha087e2b2020-06-16 16:36:07 -07008053static inline void __io_unaccount_mem(struct user_struct *user,
8054 unsigned long nr_pages)
Jens Axboe2b188cc2019-01-07 10:46:33 -07008055{
8056 atomic_long_sub(nr_pages, &user->locked_vm);
8057}
8058
Bijan Mottahedeha087e2b2020-06-16 16:36:07 -07008059static inline int __io_account_mem(struct user_struct *user,
8060 unsigned long nr_pages)
Jens Axboe2b188cc2019-01-07 10:46:33 -07008061{
8062 unsigned long page_limit, cur_pages, new_pages;
8063
8064 /* Don't allow more pages than we can safely lock */
8065 page_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
8066
8067 do {
8068 cur_pages = atomic_long_read(&user->locked_vm);
8069 new_pages = cur_pages + nr_pages;
8070 if (new_pages > page_limit)
8071 return -ENOMEM;
8072 } while (atomic_long_cmpxchg(&user->locked_vm, cur_pages,
8073 new_pages) != cur_pages);
8074
8075 return 0;
8076}
8077
Jens Axboe26bfa89e2021-02-09 20:14:12 -07008078static void io_unaccount_mem(struct io_ring_ctx *ctx, unsigned long nr_pages)
Bijan Mottahedeha087e2b2020-06-16 16:36:07 -07008079{
Jens Axboe62e398b2021-02-21 16:19:37 -07008080 if (ctx->user)
Bijan Mottahedeha087e2b2020-06-16 16:36:07 -07008081 __io_unaccount_mem(ctx->user, nr_pages);
Bijan Mottahedeh30975822020-06-16 16:36:09 -07008082
Jens Axboe26bfa89e2021-02-09 20:14:12 -07008083 if (ctx->mm_account)
8084 atomic64_sub(nr_pages, &ctx->mm_account->pinned_vm);
Bijan Mottahedeha087e2b2020-06-16 16:36:07 -07008085}
8086
Jens Axboe26bfa89e2021-02-09 20:14:12 -07008087static int io_account_mem(struct io_ring_ctx *ctx, unsigned long nr_pages)
Bijan Mottahedeha087e2b2020-06-16 16:36:07 -07008088{
Bijan Mottahedeh30975822020-06-16 16:36:09 -07008089 int ret;
8090
Jens Axboe62e398b2021-02-21 16:19:37 -07008091 if (ctx->user) {
Bijan Mottahedeh30975822020-06-16 16:36:09 -07008092 ret = __io_account_mem(ctx->user, nr_pages);
8093 if (ret)
8094 return ret;
8095 }
8096
Jens Axboe26bfa89e2021-02-09 20:14:12 -07008097 if (ctx->mm_account)
8098 atomic64_add(nr_pages, &ctx->mm_account->pinned_vm);
Bijan Mottahedeha087e2b2020-06-16 16:36:07 -07008099
8100 return 0;
8101}
8102
Jens Axboe2b188cc2019-01-07 10:46:33 -07008103static void io_mem_free(void *ptr)
8104{
Mark Rutland52e04ef2019-04-30 17:30:21 +01008105 struct page *page;
Jens Axboe2b188cc2019-01-07 10:46:33 -07008106
Mark Rutland52e04ef2019-04-30 17:30:21 +01008107 if (!ptr)
8108 return;
8109
8110 page = virt_to_head_page(ptr);
Jens Axboe2b188cc2019-01-07 10:46:33 -07008111 if (put_page_testzero(page))
8112 free_compound_page(page);
8113}
8114
8115static void *io_mem_alloc(size_t size)
8116{
8117 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN | __GFP_COMP |
Jens Axboe26bfa89e2021-02-09 20:14:12 -07008118 __GFP_NORETRY | __GFP_ACCOUNT;
Jens Axboe2b188cc2019-01-07 10:46:33 -07008119
8120 return (void *) __get_free_pages(gfp_flags, get_order(size));
8121}
8122
Hristo Venev75b28af2019-08-26 17:23:46 +00008123static unsigned long rings_size(unsigned sq_entries, unsigned cq_entries,
8124 size_t *sq_offset)
8125{
8126 struct io_rings *rings;
8127 size_t off, sq_array_size;
8128
8129 off = struct_size(rings, cqes, cq_entries);
8130 if (off == SIZE_MAX)
8131 return SIZE_MAX;
8132
8133#ifdef CONFIG_SMP
8134 off = ALIGN(off, SMP_CACHE_BYTES);
8135 if (off == 0)
8136 return SIZE_MAX;
8137#endif
8138
Dmitry Vyukovb36200f2020-07-11 11:31:11 +02008139 if (sq_offset)
8140 *sq_offset = off;
8141
Hristo Venev75b28af2019-08-26 17:23:46 +00008142 sq_array_size = array_size(sizeof(u32), sq_entries);
8143 if (sq_array_size == SIZE_MAX)
8144 return SIZE_MAX;
8145
8146 if (check_add_overflow(off, sq_array_size, &off))
8147 return SIZE_MAX;
8148
Hristo Venev75b28af2019-08-26 17:23:46 +00008149 return off;
8150}
8151
Pavel Begunkov41edf1a2021-04-25 14:32:23 +01008152static void io_buffer_unmap(struct io_ring_ctx *ctx, struct io_mapped_ubuf **slot)
Pavel Begunkov7f61a1e2021-04-11 01:46:35 +01008153{
Pavel Begunkov41edf1a2021-04-25 14:32:23 +01008154 struct io_mapped_ubuf *imu = *slot;
Pavel Begunkov7f61a1e2021-04-11 01:46:35 +01008155 unsigned int i;
8156
Pavel Begunkov62248432021-04-28 13:11:29 +01008157 if (imu != ctx->dummy_ubuf) {
8158 for (i = 0; i < imu->nr_bvecs; i++)
8159 unpin_user_page(imu->bvec[i].bv_page);
8160 if (imu->acct_pages)
8161 io_unaccount_mem(ctx, imu->acct_pages);
8162 kvfree(imu);
8163 }
Pavel Begunkov41edf1a2021-04-25 14:32:23 +01008164 *slot = NULL;
Pavel Begunkov7f61a1e2021-04-11 01:46:35 +01008165}
8166
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01008167static void io_rsrc_buf_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc)
8168{
Pavel Begunkov634d00d2021-04-25 14:32:26 +01008169 io_buffer_unmap(ctx, &prsrc->buf);
8170 prsrc->buf = NULL;
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01008171}
8172
8173static void __io_sqe_buffers_unregister(struct io_ring_ctx *ctx)
Jens Axboeedafcce2019-01-09 09:16:05 -07008174{
Pavel Begunkov7f61a1e2021-04-11 01:46:35 +01008175 unsigned int i;
Jens Axboeedafcce2019-01-09 09:16:05 -07008176
Pavel Begunkov7f61a1e2021-04-11 01:46:35 +01008177 for (i = 0; i < ctx->nr_user_bufs; i++)
8178 io_buffer_unmap(ctx, &ctx->user_bufs[i]);
Jens Axboeedafcce2019-01-09 09:16:05 -07008179 kfree(ctx->user_bufs);
Zqiangbb6659c2021-04-30 16:25:15 +08008180 io_rsrc_data_free(ctx->buf_data);
Jens Axboeedafcce2019-01-09 09:16:05 -07008181 ctx->user_bufs = NULL;
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01008182 ctx->buf_data = NULL;
Jens Axboeedafcce2019-01-09 09:16:05 -07008183 ctx->nr_user_bufs = 0;
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01008184}
8185
Jens Axboeedafcce2019-01-09 09:16:05 -07008186static int io_sqe_buffers_unregister(struct io_ring_ctx *ctx)
8187{
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01008188 int ret;
Jens Axboeedafcce2019-01-09 09:16:05 -07008189
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01008190 if (!ctx->buf_data)
Jens Axboeedafcce2019-01-09 09:16:05 -07008191 return -ENXIO;
8192
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01008193 ret = io_rsrc_ref_quiesce(ctx->buf_data, ctx);
8194 if (!ret)
8195 __io_sqe_buffers_unregister(ctx);
8196 return ret;
Jens Axboeedafcce2019-01-09 09:16:05 -07008197}
8198
8199static int io_copy_iov(struct io_ring_ctx *ctx, struct iovec *dst,
8200 void __user *arg, unsigned index)
8201{
8202 struct iovec __user *src;
8203
8204#ifdef CONFIG_COMPAT
8205 if (ctx->compat) {
8206 struct compat_iovec __user *ciovs;
8207 struct compat_iovec ciov;
8208
8209 ciovs = (struct compat_iovec __user *) arg;
8210 if (copy_from_user(&ciov, &ciovs[index], sizeof(ciov)))
8211 return -EFAULT;
8212
Jens Axboed55e5f52019-12-11 16:12:15 -07008213 dst->iov_base = u64_to_user_ptr((u64)ciov.iov_base);
Jens Axboeedafcce2019-01-09 09:16:05 -07008214 dst->iov_len = ciov.iov_len;
8215 return 0;
8216 }
8217#endif
8218 src = (struct iovec __user *) arg;
8219 if (copy_from_user(dst, &src[index], sizeof(*dst)))
8220 return -EFAULT;
8221 return 0;
8222}
8223
Jens Axboede293932020-09-17 16:19:16 -06008224/*
8225 * Not super efficient, but this is just a registration time. And we do cache
8226 * the last compound head, so generally we'll only do a full search if we don't
8227 * match that one.
8228 *
8229 * We check if the given compound head page has already been accounted, to
8230 * avoid double accounting it. This allows us to account the full size of the
8231 * page, not just the constituent pages of a huge page.
8232 */
8233static bool headpage_already_acct(struct io_ring_ctx *ctx, struct page **pages,
8234 int nr_pages, struct page *hpage)
8235{
8236 int i, j;
8237
8238 /* check current page array */
8239 for (i = 0; i < nr_pages; i++) {
8240 if (!PageCompound(pages[i]))
8241 continue;
8242 if (compound_head(pages[i]) == hpage)
8243 return true;
8244 }
8245
8246 /* check previously registered pages */
8247 for (i = 0; i < ctx->nr_user_bufs; i++) {
Pavel Begunkov41edf1a2021-04-25 14:32:23 +01008248 struct io_mapped_ubuf *imu = ctx->user_bufs[i];
Jens Axboede293932020-09-17 16:19:16 -06008249
8250 for (j = 0; j < imu->nr_bvecs; j++) {
8251 if (!PageCompound(imu->bvec[j].bv_page))
8252 continue;
8253 if (compound_head(imu->bvec[j].bv_page) == hpage)
8254 return true;
8255 }
8256 }
8257
8258 return false;
8259}
8260
8261static int io_buffer_account_pin(struct io_ring_ctx *ctx, struct page **pages,
8262 int nr_pages, struct io_mapped_ubuf *imu,
8263 struct page **last_hpage)
8264{
8265 int i, ret;
8266
Pavel Begunkov216e5832021-05-29 12:01:02 +01008267 imu->acct_pages = 0;
Jens Axboede293932020-09-17 16:19:16 -06008268 for (i = 0; i < nr_pages; i++) {
8269 if (!PageCompound(pages[i])) {
8270 imu->acct_pages++;
8271 } else {
8272 struct page *hpage;
8273
8274 hpage = compound_head(pages[i]);
8275 if (hpage == *last_hpage)
8276 continue;
8277 *last_hpage = hpage;
8278 if (headpage_already_acct(ctx, pages, i, hpage))
8279 continue;
8280 imu->acct_pages += page_size(hpage) >> PAGE_SHIFT;
8281 }
8282 }
8283
8284 if (!imu->acct_pages)
8285 return 0;
8286
Jens Axboe26bfa89e2021-02-09 20:14:12 -07008287 ret = io_account_mem(ctx, imu->acct_pages);
Jens Axboede293932020-09-17 16:19:16 -06008288 if (ret)
8289 imu->acct_pages = 0;
8290 return ret;
8291}
8292
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008293static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov,
Pavel Begunkov41edf1a2021-04-25 14:32:23 +01008294 struct io_mapped_ubuf **pimu,
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008295 struct page **last_hpage)
Jens Axboeedafcce2019-01-09 09:16:05 -07008296{
Pavel Begunkov41edf1a2021-04-25 14:32:23 +01008297 struct io_mapped_ubuf *imu = NULL;
Jens Axboeedafcce2019-01-09 09:16:05 -07008298 struct vm_area_struct **vmas = NULL;
8299 struct page **pages = NULL;
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008300 unsigned long off, start, end, ubuf;
8301 size_t size;
8302 int ret, pret, nr_pages, i;
Jens Axboeedafcce2019-01-09 09:16:05 -07008303
Pavel Begunkov62248432021-04-28 13:11:29 +01008304 if (!iov->iov_base) {
8305 *pimu = ctx->dummy_ubuf;
8306 return 0;
8307 }
8308
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008309 ubuf = (unsigned long) iov->iov_base;
8310 end = (ubuf + iov->iov_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
8311 start = ubuf >> PAGE_SHIFT;
8312 nr_pages = end - start;
8313
Pavel Begunkov41edf1a2021-04-25 14:32:23 +01008314 *pimu = NULL;
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008315 ret = -ENOMEM;
8316
8317 pages = kvmalloc_array(nr_pages, sizeof(struct page *), GFP_KERNEL);
8318 if (!pages)
8319 goto done;
8320
8321 vmas = kvmalloc_array(nr_pages, sizeof(struct vm_area_struct *),
8322 GFP_KERNEL);
8323 if (!vmas)
8324 goto done;
8325
Pavel Begunkov41edf1a2021-04-25 14:32:23 +01008326 imu = kvmalloc(struct_size(imu, bvec, nr_pages), GFP_KERNEL);
Pavel Begunkova2b41982021-04-26 00:16:31 +01008327 if (!imu)
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008328 goto done;
8329
8330 ret = 0;
8331 mmap_read_lock(current->mm);
8332 pret = pin_user_pages(ubuf, nr_pages, FOLL_WRITE | FOLL_LONGTERM,
8333 pages, vmas);
8334 if (pret == nr_pages) {
8335 /* don't support file backed memory */
8336 for (i = 0; i < nr_pages; i++) {
8337 struct vm_area_struct *vma = vmas[i];
8338
Pavel Begunkov40dad762021-06-09 15:26:54 +01008339 if (vma_is_shmem(vma))
8340 continue;
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008341 if (vma->vm_file &&
8342 !is_file_hugepages(vma->vm_file)) {
8343 ret = -EOPNOTSUPP;
8344 break;
8345 }
8346 }
8347 } else {
8348 ret = pret < 0 ? pret : -EFAULT;
8349 }
8350 mmap_read_unlock(current->mm);
8351 if (ret) {
8352 /*
8353 * if we did partial map, or found file backed vmas,
8354 * release any pages we did get
8355 */
8356 if (pret > 0)
8357 unpin_user_pages(pages, pret);
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008358 goto done;
8359 }
8360
8361 ret = io_buffer_account_pin(ctx, pages, pret, imu, last_hpage);
8362 if (ret) {
8363 unpin_user_pages(pages, pret);
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008364 goto done;
8365 }
8366
8367 off = ubuf & ~PAGE_MASK;
8368 size = iov->iov_len;
8369 for (i = 0; i < nr_pages; i++) {
8370 size_t vec_len;
8371
8372 vec_len = min_t(size_t, size, PAGE_SIZE - off);
8373 imu->bvec[i].bv_page = pages[i];
8374 imu->bvec[i].bv_len = vec_len;
8375 imu->bvec[i].bv_offset = off;
8376 off = 0;
8377 size -= vec_len;
8378 }
8379 /* store original address for later verification */
8380 imu->ubuf = ubuf;
Pavel Begunkov4751f532021-04-01 15:43:55 +01008381 imu->ubuf_end = ubuf + iov->iov_len;
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008382 imu->nr_bvecs = nr_pages;
Pavel Begunkov41edf1a2021-04-25 14:32:23 +01008383 *pimu = imu;
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008384 ret = 0;
8385done:
Pavel Begunkov41edf1a2021-04-25 14:32:23 +01008386 if (ret)
8387 kvfree(imu);
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008388 kvfree(pages);
8389 kvfree(vmas);
8390 return ret;
8391}
8392
Bijan Mottahedeh2b358602021-01-06 12:39:11 -08008393static int io_buffers_map_alloc(struct io_ring_ctx *ctx, unsigned int nr_args)
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008394{
Pavel Begunkov87094462021-04-11 01:46:36 +01008395 ctx->user_bufs = kcalloc(nr_args, sizeof(*ctx->user_bufs), GFP_KERNEL);
8396 return ctx->user_bufs ? 0 : -ENOMEM;
Bijan Mottahedeh2b358602021-01-06 12:39:11 -08008397}
8398
8399static int io_buffer_validate(struct iovec *iov)
8400{
Pavel Begunkov50e96982021-03-24 22:59:01 +00008401 unsigned long tmp, acct_len = iov->iov_len + (PAGE_SIZE - 1);
8402
Bijan Mottahedeh2b358602021-01-06 12:39:11 -08008403 /*
8404 * Don't impose further limits on the size and buffer
8405 * constraints here, we'll -EINVAL later when IO is
8406 * submitted if they are wrong.
8407 */
Pavel Begunkov62248432021-04-28 13:11:29 +01008408 if (!iov->iov_base)
8409 return iov->iov_len ? -EFAULT : 0;
8410 if (!iov->iov_len)
Bijan Mottahedeh2b358602021-01-06 12:39:11 -08008411 return -EFAULT;
8412
8413 /* arbitrary limit, but we need something */
8414 if (iov->iov_len > SZ_1G)
8415 return -EFAULT;
8416
Pavel Begunkov50e96982021-03-24 22:59:01 +00008417 if (check_add_overflow((unsigned long)iov->iov_base, acct_len, &tmp))
8418 return -EOVERFLOW;
8419
Bijan Mottahedeh2b358602021-01-06 12:39:11 -08008420 return 0;
8421}
8422
8423static int io_sqe_buffers_register(struct io_ring_ctx *ctx, void __user *arg,
Pavel Begunkov634d00d2021-04-25 14:32:26 +01008424 unsigned int nr_args, u64 __user *tags)
Bijan Mottahedeh2b358602021-01-06 12:39:11 -08008425{
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01008426 struct page *last_hpage = NULL;
8427 struct io_rsrc_data *data;
Bijan Mottahedeh2b358602021-01-06 12:39:11 -08008428 int i, ret;
8429 struct iovec iov;
Bijan Mottahedeh2b358602021-01-06 12:39:11 -08008430
Pavel Begunkov87094462021-04-11 01:46:36 +01008431 if (ctx->user_bufs)
8432 return -EBUSY;
Pavel Begunkov489809e2021-05-14 12:06:44 +01008433 if (!nr_args || nr_args > IORING_MAX_REG_BUFFERS)
Pavel Begunkov87094462021-04-11 01:46:36 +01008434 return -EINVAL;
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01008435 ret = io_rsrc_node_switch_start(ctx);
Bijan Mottahedeh2b358602021-01-06 12:39:11 -08008436 if (ret)
8437 return ret;
Pavel Begunkovd878c812021-06-14 02:36:18 +01008438 ret = io_rsrc_data_alloc(ctx, io_rsrc_buf_put, tags, nr_args, &data);
8439 if (ret)
8440 return ret;
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01008441 ret = io_buffers_map_alloc(ctx, nr_args);
8442 if (ret) {
Zqiangbb6659c2021-04-30 16:25:15 +08008443 io_rsrc_data_free(data);
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01008444 return ret;
8445 }
Bijan Mottahedeh2b358602021-01-06 12:39:11 -08008446
Pavel Begunkov87094462021-04-11 01:46:36 +01008447 for (i = 0; i < nr_args; i++, ctx->nr_user_bufs++) {
Jens Axboeedafcce2019-01-09 09:16:05 -07008448 ret = io_copy_iov(ctx, &iov, arg, i);
8449 if (ret)
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008450 break;
Bijan Mottahedeh2b358602021-01-06 12:39:11 -08008451 ret = io_buffer_validate(&iov);
8452 if (ret)
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008453 break;
Pavel Begunkov2d091d62021-06-14 02:36:21 +01008454 if (!iov.iov_base && *io_get_tag_slot(data, i)) {
Colin Ian Kingcf3770e2021-04-29 11:46:02 +01008455 ret = -EINVAL;
8456 break;
8457 }
Jens Axboeedafcce2019-01-09 09:16:05 -07008458
Pavel Begunkov41edf1a2021-04-25 14:32:23 +01008459 ret = io_sqe_buffer_register(ctx, &iov, &ctx->user_bufs[i],
8460 &last_hpage);
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008461 if (ret)
8462 break;
Jens Axboeedafcce2019-01-09 09:16:05 -07008463 }
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008464
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01008465 WARN_ON_ONCE(ctx->buf_data);
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008466
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01008467 ctx->buf_data = data;
8468 if (ret)
8469 __io_sqe_buffers_unregister(ctx);
8470 else
8471 io_rsrc_node_switch(ctx, NULL);
Jens Axboeedafcce2019-01-09 09:16:05 -07008472 return ret;
8473}
8474
Pavel Begunkov634d00d2021-04-25 14:32:26 +01008475static int __io_sqe_buffers_update(struct io_ring_ctx *ctx,
8476 struct io_uring_rsrc_update2 *up,
8477 unsigned int nr_args)
8478{
8479 u64 __user *tags = u64_to_user_ptr(up->tags);
8480 struct iovec iov, __user *iovs = u64_to_user_ptr(up->data);
Pavel Begunkov634d00d2021-04-25 14:32:26 +01008481 struct page *last_hpage = NULL;
8482 bool needs_switch = false;
8483 __u32 done;
8484 int i, err;
8485
8486 if (!ctx->buf_data)
8487 return -ENXIO;
8488 if (up->offset + nr_args > ctx->nr_user_bufs)
8489 return -EINVAL;
8490
8491 for (done = 0; done < nr_args; done++) {
Pavel Begunkov0b8c0e72021-04-26 15:17:38 +01008492 struct io_mapped_ubuf *imu;
8493 int offset = up->offset + done;
Pavel Begunkov634d00d2021-04-25 14:32:26 +01008494 u64 tag = 0;
8495
8496 err = io_copy_iov(ctx, &iov, iovs, done);
8497 if (err)
8498 break;
8499 if (tags && copy_from_user(&tag, &tags[done], sizeof(tag))) {
8500 err = -EFAULT;
8501 break;
8502 }
Pavel Begunkov0b8c0e72021-04-26 15:17:38 +01008503 err = io_buffer_validate(&iov);
8504 if (err)
8505 break;
Colin Ian Kingcf3770e2021-04-29 11:46:02 +01008506 if (!iov.iov_base && tag) {
8507 err = -EINVAL;
8508 break;
8509 }
Pavel Begunkov0b8c0e72021-04-26 15:17:38 +01008510 err = io_sqe_buffer_register(ctx, &iov, &imu, &last_hpage);
8511 if (err)
8512 break;
Pavel Begunkov634d00d2021-04-25 14:32:26 +01008513
Pavel Begunkov0b8c0e72021-04-26 15:17:38 +01008514 i = array_index_nospec(offset, ctx->nr_user_bufs);
Pavel Begunkov62248432021-04-28 13:11:29 +01008515 if (ctx->user_bufs[i] != ctx->dummy_ubuf) {
Pavel Begunkov0b8c0e72021-04-26 15:17:38 +01008516 err = io_queue_rsrc_removal(ctx->buf_data, offset,
8517 ctx->rsrc_node, ctx->user_bufs[i]);
8518 if (unlikely(err)) {
8519 io_buffer_unmap(ctx, &imu);
Pavel Begunkov634d00d2021-04-25 14:32:26 +01008520 break;
Pavel Begunkov0b8c0e72021-04-26 15:17:38 +01008521 }
Pavel Begunkov634d00d2021-04-25 14:32:26 +01008522 ctx->user_bufs[i] = NULL;
8523 needs_switch = true;
8524 }
8525
Pavel Begunkov0b8c0e72021-04-26 15:17:38 +01008526 ctx->user_bufs[i] = imu;
Pavel Begunkov2d091d62021-06-14 02:36:21 +01008527 *io_get_tag_slot(ctx->buf_data, offset) = tag;
Pavel Begunkov634d00d2021-04-25 14:32:26 +01008528 }
8529
8530 if (needs_switch)
8531 io_rsrc_node_switch(ctx, ctx->buf_data);
8532 return done ? done : err;
8533}
8534
Jens Axboe9b402842019-04-11 11:45:41 -06008535static int io_eventfd_register(struct io_ring_ctx *ctx, void __user *arg)
8536{
8537 __s32 __user *fds = arg;
8538 int fd;
8539
8540 if (ctx->cq_ev_fd)
8541 return -EBUSY;
8542
8543 if (copy_from_user(&fd, fds, sizeof(*fds)))
8544 return -EFAULT;
8545
8546 ctx->cq_ev_fd = eventfd_ctx_fdget(fd);
8547 if (IS_ERR(ctx->cq_ev_fd)) {
8548 int ret = PTR_ERR(ctx->cq_ev_fd);
Pavel Begunkovfe7e3252021-06-24 15:09:57 +01008549
Jens Axboe9b402842019-04-11 11:45:41 -06008550 ctx->cq_ev_fd = NULL;
8551 return ret;
8552 }
8553
8554 return 0;
8555}
8556
8557static int io_eventfd_unregister(struct io_ring_ctx *ctx)
8558{
8559 if (ctx->cq_ev_fd) {
8560 eventfd_ctx_put(ctx->cq_ev_fd);
8561 ctx->cq_ev_fd = NULL;
8562 return 0;
8563 }
8564
8565 return -ENXIO;
8566}
8567
Jens Axboe5a2e7452020-02-23 16:23:11 -07008568static void io_destroy_buffers(struct io_ring_ctx *ctx)
8569{
Jens Axboe9e15c3a2021-03-13 12:29:43 -07008570 struct io_buffer *buf;
8571 unsigned long index;
8572
8573 xa_for_each(&ctx->io_buffers, index, buf)
8574 __io_remove_buffers(ctx, buf, index, -1U);
Jens Axboe5a2e7452020-02-23 16:23:11 -07008575}
8576
Jens Axboe68e68ee2021-02-13 09:00:02 -07008577static void io_req_cache_free(struct list_head *list, struct task_struct *tsk)
Jens Axboe1b4c3512021-02-10 00:03:19 +00008578{
Jens Axboe68e68ee2021-02-13 09:00:02 -07008579 struct io_kiocb *req, *nxt;
Jens Axboe1b4c3512021-02-10 00:03:19 +00008580
Jens Axboe68e68ee2021-02-13 09:00:02 -07008581 list_for_each_entry_safe(req, nxt, list, compl.list) {
8582 if (tsk && req->task != tsk)
8583 continue;
Jens Axboe1b4c3512021-02-10 00:03:19 +00008584 list_del(&req->compl.list);
8585 kmem_cache_free(req_cachep, req);
8586 }
8587}
8588
Jens Axboe4010fec2021-02-27 15:04:18 -07008589static void io_req_caches_free(struct io_ring_ctx *ctx)
Jens Axboe2b188cc2019-01-07 10:46:33 -07008590{
Pavel Begunkovbf019da2021-02-10 00:03:17 +00008591 struct io_submit_state *submit_state = &ctx->submit_state;
Pavel Begunkove5547d22021-02-23 22:17:20 +00008592 struct io_comp_state *cs = &ctx->submit_state.comp;
Pavel Begunkovbf019da2021-02-10 00:03:17 +00008593
Jens Axboe9a4fdbd2021-02-13 09:09:44 -07008594 mutex_lock(&ctx->uring_lock);
8595
Pavel Begunkov8e5c66c2021-02-22 11:45:55 +00008596 if (submit_state->free_reqs) {
Jens Axboe9a4fdbd2021-02-13 09:09:44 -07008597 kmem_cache_free_bulk(req_cachep, submit_state->free_reqs,
8598 submit_state->reqs);
Pavel Begunkov8e5c66c2021-02-22 11:45:55 +00008599 submit_state->free_reqs = 0;
8600 }
Jens Axboe9a4fdbd2021-02-13 09:09:44 -07008601
Pavel Begunkovdac7a092021-03-19 17:22:39 +00008602 io_flush_cached_locked_reqs(ctx, cs);
Pavel Begunkove5547d22021-02-23 22:17:20 +00008603 io_req_cache_free(&cs->free_list, NULL);
Jens Axboe9a4fdbd2021-02-13 09:09:44 -07008604 mutex_unlock(&ctx->uring_lock);
8605}
8606
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01008607static bool io_wait_rsrc_data(struct io_rsrc_data *data)
8608{
8609 if (!data)
8610 return false;
8611 if (!atomic_dec_and_test(&data->refs))
8612 wait_for_completion(&data->done);
8613 return true;
8614}
8615
Jens Axboe2b188cc2019-01-07 10:46:33 -07008616static void io_ring_ctx_free(struct io_ring_ctx *ctx)
8617{
Jens Axboe37d1e2e2021-02-17 21:03:43 -07008618 io_sq_thread_finish(ctx);
Jens Axboe2aede0e2020-09-14 10:45:53 -06008619
Jens Axboe37d1e2e2021-02-17 21:03:43 -07008620 if (ctx->mm_account) {
Jens Axboe2aede0e2020-09-14 10:45:53 -06008621 mmdrop(ctx->mm_account);
8622 ctx->mm_account = NULL;
Bijan Mottahedeh30975822020-06-16 16:36:09 -07008623 }
Jens Axboedef596e2019-01-09 08:59:42 -07008624
Hao Xu8bad28d2021-02-19 17:19:36 +08008625 mutex_lock(&ctx->uring_lock);
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +01008626 if (io_wait_rsrc_data(ctx->buf_data))
8627 __io_sqe_buffers_unregister(ctx);
8628 if (io_wait_rsrc_data(ctx->file_data))
Pavel Begunkov08480402021-04-13 02:58:38 +01008629 __io_sqe_files_unregister(ctx);
Pavel Begunkovc4ea0602021-04-01 15:43:58 +01008630 if (ctx->rings)
8631 __io_cqring_overflow_flush(ctx, true);
Hao Xu8bad28d2021-02-19 17:19:36 +08008632 mutex_unlock(&ctx->uring_lock);
Jens Axboe9b402842019-04-11 11:45:41 -06008633 io_eventfd_unregister(ctx);
Jens Axboe5a2e7452020-02-23 16:23:11 -07008634 io_destroy_buffers(ctx);
Pavel Begunkov07db2982021-04-20 12:03:32 +01008635 if (ctx->sq_creds)
8636 put_cred(ctx->sq_creds);
Jens Axboedef596e2019-01-09 08:59:42 -07008637
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01008638 /* there are no registered resources left, nobody uses it */
8639 if (ctx->rsrc_node)
8640 io_rsrc_node_destroy(ctx->rsrc_node);
Pavel Begunkov8dd03af2021-03-19 17:22:36 +00008641 if (ctx->rsrc_backup_node)
Pavel Begunkovb895c9a2021-04-01 15:43:40 +01008642 io_rsrc_node_destroy(ctx->rsrc_backup_node);
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01008643 flush_delayed_work(&ctx->rsrc_put_work);
8644
8645 WARN_ON_ONCE(!list_empty(&ctx->rsrc_ref_list));
8646 WARN_ON_ONCE(!llist_empty(&ctx->rsrc_put_llist));
Jens Axboe2b188cc2019-01-07 10:46:33 -07008647
8648#if defined(CONFIG_UNIX)
Eric Biggers355e8d22019-06-12 14:58:43 -07008649 if (ctx->ring_sock) {
8650 ctx->ring_sock->file = NULL; /* so that iput() is called */
Jens Axboe2b188cc2019-01-07 10:46:33 -07008651 sock_release(ctx->ring_sock);
Eric Biggers355e8d22019-06-12 14:58:43 -07008652 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07008653#endif
8654
Hristo Venev75b28af2019-08-26 17:23:46 +00008655 io_mem_free(ctx->rings);
Jens Axboe2b188cc2019-01-07 10:46:33 -07008656 io_mem_free(ctx->sq_sqes);
Jens Axboe2b188cc2019-01-07 10:46:33 -07008657
8658 percpu_ref_exit(&ctx->refs);
Jens Axboe2b188cc2019-01-07 10:46:33 -07008659 free_uid(ctx->user);
Jens Axboe4010fec2021-02-27 15:04:18 -07008660 io_req_caches_free(ctx);
Jens Axboee9418942021-02-19 12:33:30 -07008661 if (ctx->hash_map)
8662 io_wq_put_hash(ctx->hash_map);
Jens Axboe78076bb2019-12-04 19:56:40 -07008663 kfree(ctx->cancel_hash);
Pavel Begunkov62248432021-04-28 13:11:29 +01008664 kfree(ctx->dummy_ubuf);
Jens Axboe2b188cc2019-01-07 10:46:33 -07008665 kfree(ctx);
8666}
8667
8668static __poll_t io_uring_poll(struct file *file, poll_table *wait)
8669{
8670 struct io_ring_ctx *ctx = file->private_data;
8671 __poll_t mask = 0;
8672
Pavel Begunkov311997b2021-06-14 23:37:28 +01008673 poll_wait(file, &ctx->poll_wait, wait);
Stefan Bühler4f7067c2019-04-24 23:54:17 +02008674 /*
8675 * synchronizes with barrier from wq_has_sleeper call in
8676 * io_commit_cqring
8677 */
Jens Axboe2b188cc2019-01-07 10:46:33 -07008678 smp_rmb();
Jens Axboe90554202020-09-03 12:12:41 -06008679 if (!io_sqring_full(ctx))
Jens Axboe2b188cc2019-01-07 10:46:33 -07008680 mask |= EPOLLOUT | EPOLLWRNORM;
Hao Xued670c32021-02-05 16:34:21 +08008681
8682 /*
8683 * Don't flush cqring overflow list here, just do a simple check.
8684 * Otherwise there could possible be ABBA deadlock:
8685 * CPU0 CPU1
8686 * ---- ----
8687 * lock(&ctx->uring_lock);
8688 * lock(&ep->mtx);
8689 * lock(&ctx->uring_lock);
8690 * lock(&ep->mtx);
8691 *
8692 * Users may get EPOLLIN meanwhile seeing nothing in cqring, this
8693 * pushs them to do the flush.
8694 */
Pavel Begunkov5ed7a372021-06-14 23:37:27 +01008695 if (io_cqring_events(ctx) || test_bit(0, &ctx->check_cq_overflow))
Jens Axboe2b188cc2019-01-07 10:46:33 -07008696 mask |= EPOLLIN | EPOLLRDNORM;
8697
8698 return mask;
8699}
8700
8701static int io_uring_fasync(int fd, struct file *file, int on)
8702{
8703 struct io_ring_ctx *ctx = file->private_data;
8704
8705 return fasync_helper(fd, file, on, &ctx->cq_fasync);
8706}
8707
Yejune Deng0bead8c2020-12-24 11:02:20 +08008708static int io_unregister_personality(struct io_ring_ctx *ctx, unsigned id)
Jens Axboe071698e2020-01-28 10:04:42 -07008709{
Jens Axboe4379bf82021-02-15 13:40:22 -07008710 const struct cred *creds;
Jens Axboe071698e2020-01-28 10:04:42 -07008711
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +00008712 creds = xa_erase(&ctx->personalities, id);
Jens Axboe4379bf82021-02-15 13:40:22 -07008713 if (creds) {
8714 put_cred(creds);
Yejune Deng0bead8c2020-12-24 11:02:20 +08008715 return 0;
Jens Axboe1e6fa522020-10-15 08:46:24 -06008716 }
Yejune Deng0bead8c2020-12-24 11:02:20 +08008717
8718 return -EINVAL;
8719}
8720
Pavel Begunkovd56d9382021-03-06 11:02:13 +00008721struct io_tctx_exit {
8722 struct callback_head task_work;
8723 struct completion completion;
Pavel Begunkovbaf186c2021-03-06 11:02:15 +00008724 struct io_ring_ctx *ctx;
Pavel Begunkovd56d9382021-03-06 11:02:13 +00008725};
8726
8727static void io_tctx_exit_cb(struct callback_head *cb)
8728{
8729 struct io_uring_task *tctx = current->io_uring;
8730 struct io_tctx_exit *work;
8731
8732 work = container_of(cb, struct io_tctx_exit, task_work);
8733 /*
8734 * When @in_idle, we're in cancellation and it's racy to remove the
8735 * node. It'll be removed by the end of cancellation, just ignore it.
8736 */
8737 if (!atomic_read(&tctx->in_idle))
Pavel Begunkoveef51da2021-06-14 02:36:15 +01008738 io_uring_del_tctx_node((unsigned long)work->ctx);
Pavel Begunkovd56d9382021-03-06 11:02:13 +00008739 complete(&work->completion);
8740}
8741
Pavel Begunkov28090c12021-04-25 23:34:45 +01008742static bool io_cancel_ctx_cb(struct io_wq_work *work, void *data)
8743{
8744 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
8745
8746 return req->ctx == data;
8747}
8748
Jens Axboe85faa7b2020-04-09 18:14:00 -06008749static void io_ring_exit_work(struct work_struct *work)
8750{
Pavel Begunkovd56d9382021-03-06 11:02:13 +00008751 struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx, exit_work);
Pavel Begunkovb5bb3a22021-03-06 11:02:16 +00008752 unsigned long timeout = jiffies + HZ * 60 * 5;
Pavel Begunkovd56d9382021-03-06 11:02:13 +00008753 struct io_tctx_exit exit;
8754 struct io_tctx_node *node;
8755 int ret;
Jens Axboe85faa7b2020-04-09 18:14:00 -06008756
Jens Axboe56952e92020-06-17 15:00:04 -06008757 /*
8758 * If we're doing polled IO and end up having requests being
8759 * submitted async (out-of-line), then completions can come in while
8760 * we're waiting for refs to drop. We need to reap these manually,
8761 * as nobody else will be looking for them.
8762 */
Pavel Begunkovb2edc0a2020-07-07 16:36:22 +03008763 do {
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01008764 io_uring_try_cancel_requests(ctx, NULL, true);
Pavel Begunkov28090c12021-04-25 23:34:45 +01008765 if (ctx->sq_data) {
8766 struct io_sq_data *sqd = ctx->sq_data;
8767 struct task_struct *tsk;
8768
8769 io_sq_thread_park(sqd);
8770 tsk = sqd->thread;
8771 if (tsk && tsk->io_uring && tsk->io_uring->io_wq)
8772 io_wq_cancel_cb(tsk->io_uring->io_wq,
8773 io_cancel_ctx_cb, ctx, true);
8774 io_sq_thread_unpark(sqd);
8775 }
Pavel Begunkovb5bb3a22021-03-06 11:02:16 +00008776
8777 WARN_ON_ONCE(time_after(jiffies, timeout));
Pavel Begunkovb2edc0a2020-07-07 16:36:22 +03008778 } while (!wait_for_completion_timeout(&ctx->ref_comp, HZ/20));
Pavel Begunkovd56d9382021-03-06 11:02:13 +00008779
Pavel Begunkov7f006512021-04-14 13:38:34 +01008780 init_completion(&exit.completion);
8781 init_task_work(&exit.task_work, io_tctx_exit_cb);
8782 exit.ctx = ctx;
Pavel Begunkov89b50662021-04-01 15:43:50 +01008783 /*
8784 * Some may use context even when all refs and requests have been put,
8785 * and they are free to do so while still holding uring_lock or
Pavel Begunkov5b0a6ac2021-06-30 21:54:04 +01008786 * completion_lock, see io_req_task_submit(). Apart from other work,
Pavel Begunkov89b50662021-04-01 15:43:50 +01008787 * this lock/unlock section also waits them to finish.
8788 */
Pavel Begunkovd56d9382021-03-06 11:02:13 +00008789 mutex_lock(&ctx->uring_lock);
8790 while (!list_empty(&ctx->tctx_list)) {
Pavel Begunkovb5bb3a22021-03-06 11:02:16 +00008791 WARN_ON_ONCE(time_after(jiffies, timeout));
8792
Pavel Begunkovd56d9382021-03-06 11:02:13 +00008793 node = list_first_entry(&ctx->tctx_list, struct io_tctx_node,
8794 ctx_node);
Pavel Begunkov7f006512021-04-14 13:38:34 +01008795 /* don't spin on a single task if cancellation failed */
8796 list_rotate_left(&ctx->tctx_list);
Pavel Begunkovd56d9382021-03-06 11:02:13 +00008797 ret = task_work_add(node->task, &exit.task_work, TWA_SIGNAL);
8798 if (WARN_ON_ONCE(ret))
8799 continue;
8800 wake_up_process(node->task);
8801
8802 mutex_unlock(&ctx->uring_lock);
8803 wait_for_completion(&exit.completion);
Pavel Begunkovd56d9382021-03-06 11:02:13 +00008804 mutex_lock(&ctx->uring_lock);
8805 }
8806 mutex_unlock(&ctx->uring_lock);
Pavel Begunkov89b50662021-04-01 15:43:50 +01008807 spin_lock_irq(&ctx->completion_lock);
8808 spin_unlock_irq(&ctx->completion_lock);
Pavel Begunkovd56d9382021-03-06 11:02:13 +00008809
Jens Axboe85faa7b2020-04-09 18:14:00 -06008810 io_ring_ctx_free(ctx);
8811}
8812
Pavel Begunkov80c4cbd2021-03-25 18:32:43 +00008813/* Returns true if we found and killed one or more timeouts */
8814static bool io_kill_timeouts(struct io_ring_ctx *ctx, struct task_struct *tsk,
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01008815 bool cancel_all)
Pavel Begunkov80c4cbd2021-03-25 18:32:43 +00008816{
8817 struct io_kiocb *req, *tmp;
8818 int canceled = 0;
8819
8820 spin_lock_irq(&ctx->completion_lock);
8821 list_for_each_entry_safe(req, tmp, &ctx->timeout_list, timeout.list) {
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01008822 if (io_match_task(req, tsk, cancel_all)) {
Pavel Begunkov80c4cbd2021-03-25 18:32:43 +00008823 io_kill_timeout(req, -ECANCELED);
8824 canceled++;
8825 }
8826 }
Pavel Begunkov51520422021-03-29 11:39:29 +01008827 if (canceled != 0)
8828 io_commit_cqring(ctx);
Pavel Begunkov80c4cbd2021-03-25 18:32:43 +00008829 spin_unlock_irq(&ctx->completion_lock);
Pavel Begunkov80c4cbd2021-03-25 18:32:43 +00008830 if (canceled != 0)
8831 io_cqring_ev_posted(ctx);
8832 return canceled != 0;
8833}
8834
Jens Axboe2b188cc2019-01-07 10:46:33 -07008835static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
8836{
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +00008837 unsigned long index;
8838 struct creds *creds;
8839
Jens Axboe2b188cc2019-01-07 10:46:33 -07008840 mutex_lock(&ctx->uring_lock);
8841 percpu_ref_kill(&ctx->refs);
Pavel Begunkov634578f2020-12-06 22:22:44 +00008842 if (ctx->rings)
Pavel Begunkov6c2450a2021-02-23 12:40:22 +00008843 __io_cqring_overflow_flush(ctx, true);
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +00008844 xa_for_each(&ctx->personalities, index, creds)
8845 io_unregister_personality(ctx, index);
Jens Axboe2b188cc2019-01-07 10:46:33 -07008846 mutex_unlock(&ctx->uring_lock);
8847
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01008848 io_kill_timeouts(ctx, NULL, true);
8849 io_poll_remove_all(ctx, NULL, true);
Jens Axboe561fb042019-10-24 07:25:42 -06008850
Jens Axboe15dff282019-11-13 09:09:23 -07008851 /* if we failed setting up the ctx, we might not have any rings */
Pavel Begunkovb2edc0a2020-07-07 16:36:22 +03008852 io_iopoll_try_reap_events(ctx);
Jens Axboe309fc032020-07-10 09:13:34 -06008853
Jens Axboe85faa7b2020-04-09 18:14:00 -06008854 INIT_WORK(&ctx->exit_work, io_ring_exit_work);
Jens Axboefc666772020-08-19 11:10:51 -06008855 /*
8856 * Use system_unbound_wq to avoid spawning tons of event kworkers
8857 * if we're exiting a ton of rings at the same time. It just adds
8858 * noise and overhead, there's no discernable change in runtime
8859 * over using system_wq.
8860 */
8861 queue_work(system_unbound_wq, &ctx->exit_work);
Jens Axboe2b188cc2019-01-07 10:46:33 -07008862}
8863
8864static int io_uring_release(struct inode *inode, struct file *file)
8865{
8866 struct io_ring_ctx *ctx = file->private_data;
8867
8868 file->private_data = NULL;
8869 io_ring_ctx_wait_and_kill(ctx);
8870 return 0;
8871}
8872
Pavel Begunkovf6edbab2020-11-06 13:00:26 +00008873struct io_task_cancel {
8874 struct task_struct *task;
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01008875 bool all;
Pavel Begunkovf6edbab2020-11-06 13:00:26 +00008876};
Pavel Begunkov67c4d9e2020-06-15 10:24:05 +03008877
Pavel Begunkovf6edbab2020-11-06 13:00:26 +00008878static bool io_cancel_task_cb(struct io_wq_work *work, void *data)
Jens Axboeb711d4e2020-08-16 08:23:05 -07008879{
Pavel Begunkov9a472ef2020-11-05 22:31:37 +00008880 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
Pavel Begunkovf6edbab2020-11-06 13:00:26 +00008881 struct io_task_cancel *cancel = data;
Pavel Begunkov9a472ef2020-11-05 22:31:37 +00008882 bool ret;
8883
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01008884 if (!cancel->all && (req->flags & REQ_F_LINK_TIMEOUT)) {
Pavel Begunkov9a472ef2020-11-05 22:31:37 +00008885 unsigned long flags;
8886 struct io_ring_ctx *ctx = req->ctx;
8887
8888 /* protect against races with linked timeouts */
8889 spin_lock_irqsave(&ctx->completion_lock, flags);
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01008890 ret = io_match_task(req, cancel->task, cancel->all);
Pavel Begunkov9a472ef2020-11-05 22:31:37 +00008891 spin_unlock_irqrestore(&ctx->completion_lock, flags);
8892 } else {
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01008893 ret = io_match_task(req, cancel->task, cancel->all);
Pavel Begunkov9a472ef2020-11-05 22:31:37 +00008894 }
8895 return ret;
Jens Axboeb711d4e2020-08-16 08:23:05 -07008896}
8897
Pavel Begunkove1915f72021-03-11 23:29:35 +00008898static bool io_cancel_defer_files(struct io_ring_ctx *ctx,
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01008899 struct task_struct *task, bool cancel_all)
Pavel Begunkovb7ddce32020-09-06 00:45:14 +03008900{
Pavel Begunkove1915f72021-03-11 23:29:35 +00008901 struct io_defer_entry *de;
Pavel Begunkovb7ddce32020-09-06 00:45:14 +03008902 LIST_HEAD(list);
8903
8904 spin_lock_irq(&ctx->completion_lock);
8905 list_for_each_entry_reverse(de, &ctx->defer_list, list) {
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01008906 if (io_match_task(de->req, task, cancel_all)) {
Pavel Begunkovb7ddce32020-09-06 00:45:14 +03008907 list_cut_position(&list, &ctx->defer_list, &de->list);
8908 break;
8909 }
8910 }
8911 spin_unlock_irq(&ctx->completion_lock);
Pavel Begunkove1915f72021-03-11 23:29:35 +00008912 if (list_empty(&list))
8913 return false;
Pavel Begunkovb7ddce32020-09-06 00:45:14 +03008914
8915 while (!list_empty(&list)) {
8916 de = list_first_entry(&list, struct io_defer_entry, list);
8917 list_del_init(&de->list);
Pavel Begunkovf41db2732021-02-28 22:35:12 +00008918 io_req_complete_failed(de->req, -ECANCELED);
Pavel Begunkovb7ddce32020-09-06 00:45:14 +03008919 kfree(de);
8920 }
Pavel Begunkove1915f72021-03-11 23:29:35 +00008921 return true;
Pavel Begunkovb7ddce32020-09-06 00:45:14 +03008922}
8923
Pavel Begunkov1b007642021-03-06 11:02:17 +00008924static bool io_uring_try_cancel_iowq(struct io_ring_ctx *ctx)
8925{
8926 struct io_tctx_node *node;
8927 enum io_wq_cancel cret;
8928 bool ret = false;
8929
8930 mutex_lock(&ctx->uring_lock);
8931 list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
8932 struct io_uring_task *tctx = node->task->io_uring;
8933
8934 /*
8935 * io_wq will stay alive while we hold uring_lock, because it's
8936 * killed after ctx nodes, which requires to take the lock.
8937 */
8938 if (!tctx || !tctx->io_wq)
8939 continue;
8940 cret = io_wq_cancel_cb(tctx->io_wq, io_cancel_ctx_cb, ctx, true);
8941 ret |= (cret != IO_WQ_CANCEL_NOTFOUND);
8942 }
8943 mutex_unlock(&ctx->uring_lock);
8944
8945 return ret;
8946}
8947
Pavel Begunkov9936c7c2021-02-04 13:51:56 +00008948static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
8949 struct task_struct *task,
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01008950 bool cancel_all)
Pavel Begunkov9936c7c2021-02-04 13:51:56 +00008951{
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01008952 struct io_task_cancel cancel = { .task = task, .all = cancel_all, };
Pavel Begunkov1b007642021-03-06 11:02:17 +00008953 struct io_uring_task *tctx = task ? task->io_uring : NULL;
Pavel Begunkov9936c7c2021-02-04 13:51:56 +00008954
8955 while (1) {
8956 enum io_wq_cancel cret;
8957 bool ret = false;
8958
Pavel Begunkov1b007642021-03-06 11:02:17 +00008959 if (!task) {
8960 ret |= io_uring_try_cancel_iowq(ctx);
8961 } else if (tctx && tctx->io_wq) {
8962 /*
8963 * Cancels requests of all rings, not only @ctx, but
8964 * it's fine as the task is in exit/exec.
8965 */
Jens Axboe5aa75ed2021-02-16 12:56:50 -07008966 cret = io_wq_cancel_cb(tctx->io_wq, io_cancel_task_cb,
Pavel Begunkov9936c7c2021-02-04 13:51:56 +00008967 &cancel, true);
8968 ret |= (cret != IO_WQ_CANCEL_NOTFOUND);
8969 }
8970
8971 /* SQPOLL thread does its own polling */
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01008972 if ((!(ctx->flags & IORING_SETUP_SQPOLL) && cancel_all) ||
Jens Axboed052d1d2021-03-11 10:49:20 -07008973 (ctx->sq_data && ctx->sq_data->thread == current)) {
Pavel Begunkov9936c7c2021-02-04 13:51:56 +00008974 while (!list_empty_careful(&ctx->iopoll_list)) {
8975 io_iopoll_try_reap_events(ctx);
8976 ret = true;
8977 }
8978 }
8979
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01008980 ret |= io_cancel_defer_files(ctx, task, cancel_all);
8981 ret |= io_poll_remove_all(ctx, task, cancel_all);
8982 ret |= io_kill_timeouts(ctx, task, cancel_all);
Pavel Begunkove5dc4802021-06-26 21:40:46 +01008983 if (task)
8984 ret |= io_run_task_work();
Pavel Begunkov9936c7c2021-02-04 13:51:56 +00008985 if (!ret)
8986 break;
8987 cond_resched();
8988 }
8989}
8990
Pavel Begunkoveef51da2021-06-14 02:36:15 +01008991static int __io_uring_add_tctx_node(struct io_ring_ctx *ctx)
Jens Axboe0f212202020-09-13 13:09:39 -06008992{
Matthew Wilcox (Oracle)236434c2020-10-09 13:49:52 +01008993 struct io_uring_task *tctx = current->io_uring;
Pavel Begunkov13bf43f2021-03-06 11:02:12 +00008994 struct io_tctx_node *node;
Pavel Begunkova528b042020-12-21 18:34:04 +00008995 int ret;
Matthew Wilcox (Oracle)236434c2020-10-09 13:49:52 +01008996
8997 if (unlikely(!tctx)) {
Jens Axboe5aa75ed2021-02-16 12:56:50 -07008998 ret = io_uring_alloc_task_context(current, ctx);
Jens Axboe0f212202020-09-13 13:09:39 -06008999 if (unlikely(ret))
9000 return ret;
Matthew Wilcox (Oracle)236434c2020-10-09 13:49:52 +01009001 tctx = current->io_uring;
Jens Axboe0f212202020-09-13 13:09:39 -06009002 }
Pavel Begunkovcf27f3b2021-03-19 17:22:31 +00009003 if (!xa_load(&tctx->xa, (unsigned long)ctx)) {
9004 node = kmalloc(sizeof(*node), GFP_KERNEL);
9005 if (!node)
9006 return -ENOMEM;
9007 node->ctx = ctx;
9008 node->task = current;
Jens Axboe0f212202020-09-13 13:09:39 -06009009
Pavel Begunkovcf27f3b2021-03-19 17:22:31 +00009010 ret = xa_err(xa_store(&tctx->xa, (unsigned long)ctx,
9011 node, GFP_KERNEL));
9012 if (ret) {
9013 kfree(node);
9014 return ret;
Jens Axboe0f212202020-09-13 13:09:39 -06009015 }
Pavel Begunkovcf27f3b2021-03-19 17:22:31 +00009016
9017 mutex_lock(&ctx->uring_lock);
9018 list_add(&node->ctx_node, &ctx->tctx_list);
9019 mutex_unlock(&ctx->uring_lock);
Jens Axboe0f212202020-09-13 13:09:39 -06009020 }
Pavel Begunkovcf27f3b2021-03-19 17:22:31 +00009021 tctx->last = ctx;
Jens Axboe0f212202020-09-13 13:09:39 -06009022 return 0;
9023}
9024
9025/*
Pavel Begunkovcf27f3b2021-03-19 17:22:31 +00009026 * Note that this task has used io_uring. We use it for cancelation purposes.
9027 */
Pavel Begunkoveef51da2021-06-14 02:36:15 +01009028static inline int io_uring_add_tctx_node(struct io_ring_ctx *ctx)
Pavel Begunkovcf27f3b2021-03-19 17:22:31 +00009029{
9030 struct io_uring_task *tctx = current->io_uring;
9031
9032 if (likely(tctx && tctx->last == ctx))
9033 return 0;
Pavel Begunkoveef51da2021-06-14 02:36:15 +01009034 return __io_uring_add_tctx_node(ctx);
Pavel Begunkovcf27f3b2021-03-19 17:22:31 +00009035}
9036
9037/*
Jens Axboe0f212202020-09-13 13:09:39 -06009038 * Remove this io_uring_file -> task mapping.
9039 */
Pavel Begunkoveef51da2021-06-14 02:36:15 +01009040static void io_uring_del_tctx_node(unsigned long index)
Jens Axboe0f212202020-09-13 13:09:39 -06009041{
9042 struct io_uring_task *tctx = current->io_uring;
Pavel Begunkov13bf43f2021-03-06 11:02:12 +00009043 struct io_tctx_node *node;
Pavel Begunkov29412672021-03-06 11:02:11 +00009044
Pavel Begunkoveebd2e32021-03-06 11:02:14 +00009045 if (!tctx)
9046 return;
Pavel Begunkov13bf43f2021-03-06 11:02:12 +00009047 node = xa_erase(&tctx->xa, index);
9048 if (!node)
Pavel Begunkov29412672021-03-06 11:02:11 +00009049 return;
Jens Axboe0f212202020-09-13 13:09:39 -06009050
Pavel Begunkov13bf43f2021-03-06 11:02:12 +00009051 WARN_ON_ONCE(current != node->task);
9052 WARN_ON_ONCE(list_empty(&node->ctx_node));
9053
9054 mutex_lock(&node->ctx->uring_lock);
9055 list_del(&node->ctx_node);
9056 mutex_unlock(&node->ctx->uring_lock);
9057
Pavel Begunkovbaf186c2021-03-06 11:02:15 +00009058 if (tctx->last == node->ctx)
Jens Axboe0f212202020-09-13 13:09:39 -06009059 tctx->last = NULL;
Pavel Begunkov13bf43f2021-03-06 11:02:12 +00009060 kfree(node);
Jens Axboe0f212202020-09-13 13:09:39 -06009061}
9062
Pavel Begunkov8452d4a2021-02-27 11:16:46 +00009063static void io_uring_clean_tctx(struct io_uring_task *tctx)
Pavel Begunkovde7f1d92021-01-04 20:43:29 +00009064{
Pavel Begunkovba5ef6d2021-05-20 13:21:20 +01009065 struct io_wq *wq = tctx->io_wq;
Pavel Begunkov13bf43f2021-03-06 11:02:12 +00009066 struct io_tctx_node *node;
Pavel Begunkovde7f1d92021-01-04 20:43:29 +00009067 unsigned long index;
9068
Pavel Begunkov13bf43f2021-03-06 11:02:12 +00009069 xa_for_each(&tctx->xa, index, node)
Pavel Begunkoveef51da2021-06-14 02:36:15 +01009070 io_uring_del_tctx_node(index);
Marco Elverb16ef422021-05-27 11:25:48 +02009071 if (wq) {
9072 /*
9073 * Must be after io_uring_del_task_file() (removes nodes under
9074 * uring_lock) to avoid race with io_uring_try_cancel_iowq().
9075 */
9076 tctx->io_wq = NULL;
Pavel Begunkovba5ef6d2021-05-20 13:21:20 +01009077 io_wq_put_and_exit(wq);
Marco Elverb16ef422021-05-27 11:25:48 +02009078 }
Pavel Begunkovde7f1d92021-01-04 20:43:29 +00009079}
9080
Pavel Begunkov3f48cf12021-04-11 01:46:27 +01009081static s64 tctx_inflight(struct io_uring_task *tctx, bool tracked)
Pavel Begunkov521d6a72021-03-11 23:29:38 +00009082{
Pavel Begunkov3f48cf12021-04-11 01:46:27 +01009083 if (tracked)
9084 return atomic_read(&tctx->inflight_tracked);
Pavel Begunkov521d6a72021-03-11 23:29:38 +00009085 return percpu_counter_sum(&tctx->inflight);
9086}
9087
Pavel Begunkov09899b12021-06-14 02:36:22 +01009088static void io_uring_drop_tctx_refs(struct task_struct *task)
9089{
9090 struct io_uring_task *tctx = task->io_uring;
9091 unsigned int refs = tctx->cached_refs;
9092
9093 tctx->cached_refs = 0;
9094 percpu_counter_sub(&tctx->inflight, refs);
9095 put_task_struct_many(task, refs);
9096}
9097
Pavel Begunkov78cc6872021-06-14 02:36:23 +01009098/*
9099 * Find any io_uring ctx that this task has registered or done IO on, and cancel
9100 * requests. @sqd should be not-null IIF it's an SQPOLL thread cancellation.
9101 */
9102static void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd)
Pavel Begunkov0e9ddb32021-02-07 22:34:26 +00009103{
Pavel Begunkov521d6a72021-03-11 23:29:38 +00009104 struct io_uring_task *tctx = current->io_uring;
Pavel Begunkov734551d2021-04-18 14:52:09 +01009105 struct io_ring_ctx *ctx;
Jens Axboefdaf0832020-10-30 09:37:30 -06009106 s64 inflight;
Pavel Begunkov0e9ddb32021-02-07 22:34:26 +00009107 DEFINE_WAIT(wait);
Jens Axboefdaf0832020-10-30 09:37:30 -06009108
Pavel Begunkov78cc6872021-06-14 02:36:23 +01009109 WARN_ON_ONCE(sqd && sqd->thread != current);
9110
Palash Oswal6d042ff2021-04-27 18:21:49 +05309111 if (!current->io_uring)
9112 return;
Pavel Begunkov17a91052021-05-23 15:48:39 +01009113 if (tctx->io_wq)
9114 io_wq_exit_start(tctx->io_wq);
9115
Pavel Begunkov09899b12021-06-14 02:36:22 +01009116 io_uring_drop_tctx_refs(current);
Jens Axboefdaf0832020-10-30 09:37:30 -06009117 atomic_inc(&tctx->in_idle);
Jens Axboed8a6df12020-10-15 16:24:45 -06009118 do {
Jens Axboe0f212202020-09-13 13:09:39 -06009119 /* read completions before cancelations */
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01009120 inflight = tctx_inflight(tctx, !cancel_all);
Jens Axboed8a6df12020-10-15 16:24:45 -06009121 if (!inflight)
9122 break;
Jens Axboe0f212202020-09-13 13:09:39 -06009123
Pavel Begunkov78cc6872021-06-14 02:36:23 +01009124 if (!sqd) {
9125 struct io_tctx_node *node;
9126 unsigned long index;
9127
9128 xa_for_each(&tctx->xa, index, node) {
9129 /* sqpoll task will cancel all its requests */
9130 if (node->ctx->sq_data)
9131 continue;
9132 io_uring_try_cancel_requests(node->ctx, current,
9133 cancel_all);
9134 }
9135 } else {
9136 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
9137 io_uring_try_cancel_requests(ctx, current,
9138 cancel_all);
9139 }
9140
9141 prepare_to_wait(&tctx->wait, &wait, TASK_UNINTERRUPTIBLE);
Jens Axboe0f212202020-09-13 13:09:39 -06009142 /*
Pavel Begunkova1bb3cd2021-01-26 15:28:26 +00009143 * If we've seen completions, retry without waiting. This
9144 * avoids a race where a completion comes in before we did
9145 * prepare_to_wait().
Jens Axboe0f212202020-09-13 13:09:39 -06009146 */
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01009147 if (inflight == tctx_inflight(tctx, !cancel_all))
Pavel Begunkova1bb3cd2021-01-26 15:28:26 +00009148 schedule();
Pavel Begunkovf57555e2020-12-20 13:21:44 +00009149 finish_wait(&tctx->wait, &wait);
Jens Axboed8a6df12020-10-15 16:24:45 -06009150 } while (1);
Jens Axboefdaf0832020-10-30 09:37:30 -06009151 atomic_dec(&tctx->in_idle);
Pavel Begunkovde7f1d92021-01-04 20:43:29 +00009152
Pavel Begunkov8452d4a2021-02-27 11:16:46 +00009153 io_uring_clean_tctx(tctx);
Pavel Begunkov3dd0c972021-05-16 22:58:04 +01009154 if (cancel_all) {
Pavel Begunkov3f48cf12021-04-11 01:46:27 +01009155 /* for exec all current's requests should be gone, kill tctx */
9156 __io_uring_free(current);
9157 }
Pavel Begunkov44e728b2020-06-15 10:24:04 +03009158}
9159
Pavel Begunkov78cc6872021-06-14 02:36:23 +01009160void __io_uring_cancel(struct files_struct *files)
9161{
9162 io_uring_cancel_generic(!files, NULL);
9163}
9164
Roman Penyaev6c5c2402019-11-28 12:53:22 +01009165static void *io_uring_validate_mmap_request(struct file *file,
9166 loff_t pgoff, size_t sz)
Jens Axboe2b188cc2019-01-07 10:46:33 -07009167{
Jens Axboe2b188cc2019-01-07 10:46:33 -07009168 struct io_ring_ctx *ctx = file->private_data;
Roman Penyaev6c5c2402019-11-28 12:53:22 +01009169 loff_t offset = pgoff << PAGE_SHIFT;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009170 struct page *page;
9171 void *ptr;
9172
9173 switch (offset) {
9174 case IORING_OFF_SQ_RING:
Hristo Venev75b28af2019-08-26 17:23:46 +00009175 case IORING_OFF_CQ_RING:
9176 ptr = ctx->rings;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009177 break;
9178 case IORING_OFF_SQES:
9179 ptr = ctx->sq_sqes;
9180 break;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009181 default:
Roman Penyaev6c5c2402019-11-28 12:53:22 +01009182 return ERR_PTR(-EINVAL);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009183 }
9184
9185 page = virt_to_head_page(ptr);
Matthew Wilcox (Oracle)a50b8542019-09-23 15:34:25 -07009186 if (sz > page_size(page))
Roman Penyaev6c5c2402019-11-28 12:53:22 +01009187 return ERR_PTR(-EINVAL);
9188
9189 return ptr;
9190}
9191
9192#ifdef CONFIG_MMU
9193
9194static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
9195{
9196 size_t sz = vma->vm_end - vma->vm_start;
9197 unsigned long pfn;
9198 void *ptr;
9199
9200 ptr = io_uring_validate_mmap_request(file, vma->vm_pgoff, sz);
9201 if (IS_ERR(ptr))
9202 return PTR_ERR(ptr);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009203
9204 pfn = virt_to_phys(ptr) >> PAGE_SHIFT;
9205 return remap_pfn_range(vma, vma->vm_start, pfn, sz, vma->vm_page_prot);
9206}
9207
Roman Penyaev6c5c2402019-11-28 12:53:22 +01009208#else /* !CONFIG_MMU */
9209
9210static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
9211{
9212 return vma->vm_flags & (VM_SHARED | VM_MAYSHARE) ? 0 : -EINVAL;
9213}
9214
9215static unsigned int io_uring_nommu_mmap_capabilities(struct file *file)
9216{
9217 return NOMMU_MAP_DIRECT | NOMMU_MAP_READ | NOMMU_MAP_WRITE;
9218}
9219
9220static unsigned long io_uring_nommu_get_unmapped_area(struct file *file,
9221 unsigned long addr, unsigned long len,
9222 unsigned long pgoff, unsigned long flags)
9223{
9224 void *ptr;
9225
9226 ptr = io_uring_validate_mmap_request(file, pgoff, len);
9227 if (IS_ERR(ptr))
9228 return PTR_ERR(ptr);
9229
9230 return (unsigned long) ptr;
9231}
9232
9233#endif /* !CONFIG_MMU */
9234
Pavel Begunkovd9d05212021-01-08 20:57:25 +00009235static int io_sqpoll_wait_sq(struct io_ring_ctx *ctx)
Jens Axboe90554202020-09-03 12:12:41 -06009236{
9237 DEFINE_WAIT(wait);
9238
9239 do {
9240 if (!io_sqring_full(ctx))
9241 break;
Jens Axboe90554202020-09-03 12:12:41 -06009242 prepare_to_wait(&ctx->sqo_sq_wait, &wait, TASK_INTERRUPTIBLE);
9243
9244 if (!io_sqring_full(ctx))
9245 break;
Jens Axboe90554202020-09-03 12:12:41 -06009246 schedule();
9247 } while (!signal_pending(current));
9248
9249 finish_wait(&ctx->sqo_sq_wait, &wait);
Yang Li51993282021-03-09 14:30:41 +08009250 return 0;
Jens Axboe90554202020-09-03 12:12:41 -06009251}
9252
Hao Xuc73ebb62020-11-03 10:54:37 +08009253static int io_get_ext_arg(unsigned flags, const void __user *argp, size_t *argsz,
9254 struct __kernel_timespec __user **ts,
9255 const sigset_t __user **sig)
9256{
9257 struct io_uring_getevents_arg arg;
9258
9259 /*
9260 * If EXT_ARG isn't set, then we have no timespec and the argp pointer
9261 * is just a pointer to the sigset_t.
9262 */
9263 if (!(flags & IORING_ENTER_EXT_ARG)) {
9264 *sig = (const sigset_t __user *) argp;
9265 *ts = NULL;
9266 return 0;
9267 }
9268
9269 /*
9270 * EXT_ARG is set - ensure we agree on the size of it and copy in our
9271 * timespec and sigset_t pointers if good.
9272 */
9273 if (*argsz != sizeof(arg))
9274 return -EINVAL;
9275 if (copy_from_user(&arg, argp, sizeof(arg)))
9276 return -EFAULT;
9277 *sig = u64_to_user_ptr(arg.sigmask);
9278 *argsz = arg.sigmask_sz;
9279 *ts = u64_to_user_ptr(arg.ts);
9280 return 0;
9281}
9282
Jens Axboe2b188cc2019-01-07 10:46:33 -07009283SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
Hao Xuc73ebb62020-11-03 10:54:37 +08009284 u32, min_complete, u32, flags, const void __user *, argp,
9285 size_t, argsz)
Jens Axboe2b188cc2019-01-07 10:46:33 -07009286{
9287 struct io_ring_ctx *ctx;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009288 int submitted = 0;
9289 struct fd f;
Pavel Begunkov33f993d2021-03-19 17:22:30 +00009290 long ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009291
Jens Axboe4c6e2772020-07-01 11:29:10 -06009292 io_run_task_work();
Jens Axboeb41e9852020-02-17 09:52:41 -07009293
Pavel Begunkov33f993d2021-03-19 17:22:30 +00009294 if (unlikely(flags & ~(IORING_ENTER_GETEVENTS | IORING_ENTER_SQ_WAKEUP |
9295 IORING_ENTER_SQ_WAIT | IORING_ENTER_EXT_ARG)))
Jens Axboe2b188cc2019-01-07 10:46:33 -07009296 return -EINVAL;
9297
9298 f = fdget(fd);
Pavel Begunkov33f993d2021-03-19 17:22:30 +00009299 if (unlikely(!f.file))
Jens Axboe2b188cc2019-01-07 10:46:33 -07009300 return -EBADF;
9301
9302 ret = -EOPNOTSUPP;
Pavel Begunkov33f993d2021-03-19 17:22:30 +00009303 if (unlikely(f.file->f_op != &io_uring_fops))
Jens Axboe2b188cc2019-01-07 10:46:33 -07009304 goto out_fput;
9305
9306 ret = -ENXIO;
9307 ctx = f.file->private_data;
Pavel Begunkov33f993d2021-03-19 17:22:30 +00009308 if (unlikely(!percpu_ref_tryget(&ctx->refs)))
Jens Axboe2b188cc2019-01-07 10:46:33 -07009309 goto out_fput;
9310
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02009311 ret = -EBADFD;
Pavel Begunkov33f993d2021-03-19 17:22:30 +00009312 if (unlikely(ctx->flags & IORING_SETUP_R_DISABLED))
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02009313 goto out;
9314
Jens Axboe6c271ce2019-01-10 11:22:30 -07009315 /*
9316 * For SQ polling, the thread will do all submissions and completions.
9317 * Just return the requested submit count, and wake the thread if
9318 * we were asked to.
9319 */
Jens Axboeb2a9ead2019-09-12 14:19:16 -06009320 ret = 0;
Jens Axboe6c271ce2019-01-10 11:22:30 -07009321 if (ctx->flags & IORING_SETUP_SQPOLL) {
Pavel Begunkov6c2450a2021-02-23 12:40:22 +00009322 io_cqring_overflow_flush(ctx, false);
Pavel Begunkov89448c42020-12-17 00:24:39 +00009323
Pavel Begunkovd9d05212021-01-08 20:57:25 +00009324 ret = -EOWNERDEAD;
Pavel Begunkovfe7e3252021-06-24 15:09:57 +01009325 if (unlikely(ctx->sq_data->thread == NULL))
Stefan Metzmacher04147482021-03-07 11:54:29 +01009326 goto out;
Jens Axboe6c271ce2019-01-10 11:22:30 -07009327 if (flags & IORING_ENTER_SQ_WAKEUP)
Jens Axboe534ca6d2020-09-02 13:52:19 -06009328 wake_up(&ctx->sq_data->wait);
Pavel Begunkovd9d05212021-01-08 20:57:25 +00009329 if (flags & IORING_ENTER_SQ_WAIT) {
9330 ret = io_sqpoll_wait_sq(ctx);
9331 if (ret)
9332 goto out;
9333 }
Jens Axboe6c271ce2019-01-10 11:22:30 -07009334 submitted = to_submit;
Jens Axboeb2a9ead2019-09-12 14:19:16 -06009335 } else if (to_submit) {
Pavel Begunkoveef51da2021-06-14 02:36:15 +01009336 ret = io_uring_add_tctx_node(ctx);
Jens Axboe0f212202020-09-13 13:09:39 -06009337 if (unlikely(ret))
9338 goto out;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009339 mutex_lock(&ctx->uring_lock);
Jens Axboe0f212202020-09-13 13:09:39 -06009340 submitted = io_submit_sqes(ctx, to_submit);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009341 mutex_unlock(&ctx->uring_lock);
Pavel Begunkov7c504e652019-12-18 19:53:45 +03009342
9343 if (submitted != to_submit)
9344 goto out;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009345 }
9346 if (flags & IORING_ENTER_GETEVENTS) {
Hao Xuc73ebb62020-11-03 10:54:37 +08009347 const sigset_t __user *sig;
9348 struct __kernel_timespec __user *ts;
9349
9350 ret = io_get_ext_arg(flags, argp, &argsz, &ts, &sig);
9351 if (unlikely(ret))
9352 goto out;
9353
Jens Axboe2b188cc2019-01-07 10:46:33 -07009354 min_complete = min(min_complete, ctx->cq_entries);
9355
Xiaoguang Wang32b22442020-03-11 09:26:09 +08009356 /*
9357 * When SETUP_IOPOLL and SETUP_SQPOLL are both enabled, user
9358 * space applications don't need to do io completion events
9359 * polling again, they can rely on io_sq_thread to do polling
9360 * work, which can reduce cpu usage and uring_lock contention.
9361 */
9362 if (ctx->flags & IORING_SETUP_IOPOLL &&
9363 !(ctx->flags & IORING_SETUP_SQPOLL)) {
Pavel Begunkov7668b922020-07-07 16:36:21 +03009364 ret = io_iopoll_check(ctx, min_complete);
Jens Axboedef596e2019-01-09 08:59:42 -07009365 } else {
Hao Xuc73ebb62020-11-03 10:54:37 +08009366 ret = io_cqring_wait(ctx, min_complete, sig, argsz, ts);
Jens Axboedef596e2019-01-09 08:59:42 -07009367 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07009368 }
9369
Pavel Begunkov7c504e652019-12-18 19:53:45 +03009370out:
Pavel Begunkov6805b322019-10-08 02:18:42 +03009371 percpu_ref_put(&ctx->refs);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009372out_fput:
9373 fdput(f);
9374 return submitted ? submitted : ret;
9375}
9376
Tobias Klauserbebdb652020-02-26 18:38:32 +01009377#ifdef CONFIG_PROC_FS
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +00009378static int io_uring_show_cred(struct seq_file *m, unsigned int id,
9379 const struct cred *cred)
Jens Axboe87ce9552020-01-30 08:25:34 -07009380{
Jens Axboe87ce9552020-01-30 08:25:34 -07009381 struct user_namespace *uns = seq_user_ns(m);
9382 struct group_info *gi;
9383 kernel_cap_t cap;
9384 unsigned __capi;
9385 int g;
9386
9387 seq_printf(m, "%5d\n", id);
9388 seq_put_decimal_ull(m, "\tUid:\t", from_kuid_munged(uns, cred->uid));
9389 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->euid));
9390 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->suid));
9391 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->fsuid));
9392 seq_put_decimal_ull(m, "\n\tGid:\t", from_kgid_munged(uns, cred->gid));
9393 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->egid));
9394 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->sgid));
9395 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->fsgid));
9396 seq_puts(m, "\n\tGroups:\t");
9397 gi = cred->group_info;
9398 for (g = 0; g < gi->ngroups; g++) {
9399 seq_put_decimal_ull(m, g ? " " : "",
9400 from_kgid_munged(uns, gi->gid[g]));
9401 }
9402 seq_puts(m, "\n\tCapEff:\t");
9403 cap = cred->cap_effective;
9404 CAP_FOR_EACH_U32(__capi)
9405 seq_put_hex_ll(m, NULL, cap.cap[CAP_LAST_U32 - __capi], 8);
9406 seq_putc(m, '\n');
9407 return 0;
9408}
9409
9410static void __io_uring_show_fdinfo(struct io_ring_ctx *ctx, struct seq_file *m)
9411{
Joseph Qidbbe9c62020-09-29 09:01:22 -06009412 struct io_sq_data *sq = NULL;
Jens Axboefad8e0d2020-09-28 08:57:48 -06009413 bool has_lock;
Jens Axboe87ce9552020-01-30 08:25:34 -07009414 int i;
9415
Jens Axboefad8e0d2020-09-28 08:57:48 -06009416 /*
9417 * Avoid ABBA deadlock between the seq lock and the io_uring mutex,
9418 * since fdinfo case grabs it in the opposite direction of normal use
9419 * cases. If we fail to get the lock, we just don't iterate any
9420 * structures that could be going away outside the io_uring mutex.
9421 */
9422 has_lock = mutex_trylock(&ctx->uring_lock);
9423
Jens Axboe5f3f26f2021-02-25 10:17:46 -07009424 if (has_lock && (ctx->flags & IORING_SETUP_SQPOLL)) {
Joseph Qidbbe9c62020-09-29 09:01:22 -06009425 sq = ctx->sq_data;
Jens Axboe5f3f26f2021-02-25 10:17:46 -07009426 if (!sq->thread)
9427 sq = NULL;
9428 }
Joseph Qidbbe9c62020-09-29 09:01:22 -06009429
9430 seq_printf(m, "SqThread:\t%d\n", sq ? task_pid_nr(sq->thread) : -1);
9431 seq_printf(m, "SqThreadCpu:\t%d\n", sq ? task_cpu(sq->thread) : -1);
Jens Axboe87ce9552020-01-30 08:25:34 -07009432 seq_printf(m, "UserFiles:\t%u\n", ctx->nr_user_files);
Jens Axboefad8e0d2020-09-28 08:57:48 -06009433 for (i = 0; has_lock && i < ctx->nr_user_files; i++) {
Jens Axboe7b29f922021-03-12 08:30:14 -07009434 struct file *f = io_file_from_index(ctx, i);
Jens Axboe87ce9552020-01-30 08:25:34 -07009435
Jens Axboe87ce9552020-01-30 08:25:34 -07009436 if (f)
9437 seq_printf(m, "%5u: %s\n", i, file_dentry(f)->d_iname);
9438 else
9439 seq_printf(m, "%5u: <none>\n", i);
9440 }
9441 seq_printf(m, "UserBufs:\t%u\n", ctx->nr_user_bufs);
Jens Axboefad8e0d2020-09-28 08:57:48 -06009442 for (i = 0; has_lock && i < ctx->nr_user_bufs; i++) {
Pavel Begunkov41edf1a2021-04-25 14:32:23 +01009443 struct io_mapped_ubuf *buf = ctx->user_bufs[i];
Pavel Begunkov4751f532021-04-01 15:43:55 +01009444 unsigned int len = buf->ubuf_end - buf->ubuf;
Jens Axboe87ce9552020-01-30 08:25:34 -07009445
Pavel Begunkov4751f532021-04-01 15:43:55 +01009446 seq_printf(m, "%5u: 0x%llx/%u\n", i, buf->ubuf, len);
Jens Axboe87ce9552020-01-30 08:25:34 -07009447 }
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +00009448 if (has_lock && !xa_empty(&ctx->personalities)) {
9449 unsigned long index;
9450 const struct cred *cred;
9451
Jens Axboe87ce9552020-01-30 08:25:34 -07009452 seq_printf(m, "Personalities:\n");
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +00009453 xa_for_each(&ctx->personalities, index, cred)
9454 io_uring_show_cred(m, index, cred);
Jens Axboe87ce9552020-01-30 08:25:34 -07009455 }
Jens Axboed7718a92020-02-14 22:23:12 -07009456 seq_printf(m, "PollList:\n");
9457 spin_lock_irq(&ctx->completion_lock);
9458 for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
9459 struct hlist_head *list = &ctx->cancel_hash[i];
9460 struct io_kiocb *req;
9461
9462 hlist_for_each_entry(req, list, hash_node)
9463 seq_printf(m, " op=%d, task_works=%d\n", req->opcode,
9464 req->task->task_works != NULL);
9465 }
9466 spin_unlock_irq(&ctx->completion_lock);
Jens Axboefad8e0d2020-09-28 08:57:48 -06009467 if (has_lock)
9468 mutex_unlock(&ctx->uring_lock);
Jens Axboe87ce9552020-01-30 08:25:34 -07009469}
9470
9471static void io_uring_show_fdinfo(struct seq_file *m, struct file *f)
9472{
9473 struct io_ring_ctx *ctx = f->private_data;
9474
9475 if (percpu_ref_tryget(&ctx->refs)) {
9476 __io_uring_show_fdinfo(ctx, m);
9477 percpu_ref_put(&ctx->refs);
9478 }
9479}
Tobias Klauserbebdb652020-02-26 18:38:32 +01009480#endif
Jens Axboe87ce9552020-01-30 08:25:34 -07009481
Jens Axboe2b188cc2019-01-07 10:46:33 -07009482static const struct file_operations io_uring_fops = {
9483 .release = io_uring_release,
9484 .mmap = io_uring_mmap,
Roman Penyaev6c5c2402019-11-28 12:53:22 +01009485#ifndef CONFIG_MMU
9486 .get_unmapped_area = io_uring_nommu_get_unmapped_area,
9487 .mmap_capabilities = io_uring_nommu_mmap_capabilities,
9488#endif
Jens Axboe2b188cc2019-01-07 10:46:33 -07009489 .poll = io_uring_poll,
9490 .fasync = io_uring_fasync,
Tobias Klauserbebdb652020-02-26 18:38:32 +01009491#ifdef CONFIG_PROC_FS
Jens Axboe87ce9552020-01-30 08:25:34 -07009492 .show_fdinfo = io_uring_show_fdinfo,
Tobias Klauserbebdb652020-02-26 18:38:32 +01009493#endif
Jens Axboe2b188cc2019-01-07 10:46:33 -07009494};
9495
9496static int io_allocate_scq_urings(struct io_ring_ctx *ctx,
9497 struct io_uring_params *p)
9498{
Hristo Venev75b28af2019-08-26 17:23:46 +00009499 struct io_rings *rings;
9500 size_t size, sq_array_offset;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009501
Jens Axboebd740482020-08-05 12:58:23 -06009502 /* make sure these are sane, as we already accounted them */
9503 ctx->sq_entries = p->sq_entries;
9504 ctx->cq_entries = p->cq_entries;
9505
Hristo Venev75b28af2019-08-26 17:23:46 +00009506 size = rings_size(p->sq_entries, p->cq_entries, &sq_array_offset);
9507 if (size == SIZE_MAX)
9508 return -EOVERFLOW;
9509
9510 rings = io_mem_alloc(size);
9511 if (!rings)
Jens Axboe2b188cc2019-01-07 10:46:33 -07009512 return -ENOMEM;
9513
Hristo Venev75b28af2019-08-26 17:23:46 +00009514 ctx->rings = rings;
9515 ctx->sq_array = (u32 *)((char *)rings + sq_array_offset);
9516 rings->sq_ring_mask = p->sq_entries - 1;
9517 rings->cq_ring_mask = p->cq_entries - 1;
9518 rings->sq_ring_entries = p->sq_entries;
9519 rings->cq_ring_entries = p->cq_entries;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009520
9521 size = array_size(sizeof(struct io_uring_sqe), p->sq_entries);
Jens Axboeeb065d32019-11-20 09:26:29 -07009522 if (size == SIZE_MAX) {
9523 io_mem_free(ctx->rings);
9524 ctx->rings = NULL;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009525 return -EOVERFLOW;
Jens Axboeeb065d32019-11-20 09:26:29 -07009526 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07009527
9528 ctx->sq_sqes = io_mem_alloc(size);
Jens Axboeeb065d32019-11-20 09:26:29 -07009529 if (!ctx->sq_sqes) {
9530 io_mem_free(ctx->rings);
9531 ctx->rings = NULL;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009532 return -ENOMEM;
Jens Axboeeb065d32019-11-20 09:26:29 -07009533 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07009534
Jens Axboe2b188cc2019-01-07 10:46:33 -07009535 return 0;
9536}
9537
Pavel Begunkov9faadcc2020-12-21 18:34:05 +00009538static int io_uring_install_fd(struct io_ring_ctx *ctx, struct file *file)
9539{
9540 int ret, fd;
9541
9542 fd = get_unused_fd_flags(O_RDWR | O_CLOEXEC);
9543 if (fd < 0)
9544 return fd;
9545
Pavel Begunkoveef51da2021-06-14 02:36:15 +01009546 ret = io_uring_add_tctx_node(ctx);
Pavel Begunkov9faadcc2020-12-21 18:34:05 +00009547 if (ret) {
9548 put_unused_fd(fd);
9549 return ret;
9550 }
9551 fd_install(fd, file);
9552 return fd;
9553}
9554
Jens Axboe2b188cc2019-01-07 10:46:33 -07009555/*
9556 * Allocate an anonymous fd, this is what constitutes the application
9557 * visible backing of an io_uring instance. The application mmaps this
9558 * fd to gain access to the SQ/CQ ring details. If UNIX sockets are enabled,
9559 * we have to tie this fd to a socket for file garbage collection purposes.
9560 */
Pavel Begunkov9faadcc2020-12-21 18:34:05 +00009561static struct file *io_uring_get_file(struct io_ring_ctx *ctx)
Jens Axboe2b188cc2019-01-07 10:46:33 -07009562{
9563 struct file *file;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009564#if defined(CONFIG_UNIX)
Pavel Begunkov9faadcc2020-12-21 18:34:05 +00009565 int ret;
9566
Jens Axboe2b188cc2019-01-07 10:46:33 -07009567 ret = sock_create_kern(&init_net, PF_UNIX, SOCK_RAW, IPPROTO_IP,
9568 &ctx->ring_sock);
9569 if (ret)
Pavel Begunkov9faadcc2020-12-21 18:34:05 +00009570 return ERR_PTR(ret);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009571#endif
9572
Jens Axboe2b188cc2019-01-07 10:46:33 -07009573 file = anon_inode_getfile("[io_uring]", &io_uring_fops, ctx,
9574 O_RDWR | O_CLOEXEC);
Pavel Begunkov9faadcc2020-12-21 18:34:05 +00009575#if defined(CONFIG_UNIX)
Jens Axboe2b188cc2019-01-07 10:46:33 -07009576 if (IS_ERR(file)) {
Pavel Begunkov9faadcc2020-12-21 18:34:05 +00009577 sock_release(ctx->ring_sock);
9578 ctx->ring_sock = NULL;
9579 } else {
9580 ctx->ring_sock->file = file;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009581 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07009582#endif
Pavel Begunkov9faadcc2020-12-21 18:34:05 +00009583 return file;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009584}
9585
Xiaoguang Wang7f136572020-05-05 16:28:53 +08009586static int io_uring_create(unsigned entries, struct io_uring_params *p,
9587 struct io_uring_params __user *params)
Jens Axboe2b188cc2019-01-07 10:46:33 -07009588{
Jens Axboe2b188cc2019-01-07 10:46:33 -07009589 struct io_ring_ctx *ctx;
Pavel Begunkov9faadcc2020-12-21 18:34:05 +00009590 struct file *file;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009591 int ret;
9592
Jens Axboe8110c1a2019-12-28 15:39:54 -07009593 if (!entries)
Jens Axboe2b188cc2019-01-07 10:46:33 -07009594 return -EINVAL;
Jens Axboe8110c1a2019-12-28 15:39:54 -07009595 if (entries > IORING_MAX_ENTRIES) {
9596 if (!(p->flags & IORING_SETUP_CLAMP))
9597 return -EINVAL;
9598 entries = IORING_MAX_ENTRIES;
9599 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07009600
9601 /*
9602 * Use twice as many entries for the CQ ring. It's possible for the
9603 * application to drive a higher depth than the size of the SQ ring,
9604 * since the sqes are only used at submission time. This allows for
Jens Axboe33a107f2019-10-04 12:10:03 -06009605 * some flexibility in overcommitting a bit. If the application has
9606 * set IORING_SETUP_CQSIZE, it will have passed in the desired number
9607 * of CQ ring entries manually.
Jens Axboe2b188cc2019-01-07 10:46:33 -07009608 */
9609 p->sq_entries = roundup_pow_of_two(entries);
Jens Axboe33a107f2019-10-04 12:10:03 -06009610 if (p->flags & IORING_SETUP_CQSIZE) {
9611 /*
9612 * If IORING_SETUP_CQSIZE is set, we do the same roundup
9613 * to a power-of-two, if it isn't already. We do NOT impose
9614 * any cq vs sq ring sizing.
9615 */
Joseph Qieb2667b32020-11-24 15:03:03 +08009616 if (!p->cq_entries)
Jens Axboe33a107f2019-10-04 12:10:03 -06009617 return -EINVAL;
Jens Axboe8110c1a2019-12-28 15:39:54 -07009618 if (p->cq_entries > IORING_MAX_CQ_ENTRIES) {
9619 if (!(p->flags & IORING_SETUP_CLAMP))
9620 return -EINVAL;
9621 p->cq_entries = IORING_MAX_CQ_ENTRIES;
9622 }
Joseph Qieb2667b32020-11-24 15:03:03 +08009623 p->cq_entries = roundup_pow_of_two(p->cq_entries);
9624 if (p->cq_entries < p->sq_entries)
9625 return -EINVAL;
Jens Axboe33a107f2019-10-04 12:10:03 -06009626 } else {
9627 p->cq_entries = 2 * p->sq_entries;
9628 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07009629
Jens Axboe2b188cc2019-01-07 10:46:33 -07009630 ctx = io_ring_ctx_alloc(p);
Jens Axboe62e398b2021-02-21 16:19:37 -07009631 if (!ctx)
Jens Axboe2b188cc2019-01-07 10:46:33 -07009632 return -ENOMEM;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009633 ctx->compat = in_compat_syscall();
Jens Axboe62e398b2021-02-21 16:19:37 -07009634 if (!capable(CAP_IPC_LOCK))
9635 ctx->user = get_uid(current_user());
Jens Axboe2aede0e2020-09-14 10:45:53 -06009636
9637 /*
9638 * This is just grabbed for accounting purposes. When a process exits,
9639 * the mm is exited and dropped before the files, hence we need to hang
9640 * on to this mm purely for the purposes of being able to unaccount
9641 * memory (locked/pinned vm). It's not used for anything else.
9642 */
Jens Axboe6b7898e2020-08-25 07:58:00 -06009643 mmgrab(current->mm);
Jens Axboe2aede0e2020-09-14 10:45:53 -06009644 ctx->mm_account = current->mm;
Jens Axboe6b7898e2020-08-25 07:58:00 -06009645
Jens Axboe2b188cc2019-01-07 10:46:33 -07009646 ret = io_allocate_scq_urings(ctx, p);
9647 if (ret)
9648 goto err;
9649
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02009650 ret = io_sq_offload_create(ctx, p);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009651 if (ret)
9652 goto err;
Pavel Begunkoveae071c2021-04-25 14:32:24 +01009653 /* always set a rsrc node */
Pavel Begunkov47b228c2021-04-29 11:46:48 +01009654 ret = io_rsrc_node_switch_start(ctx);
9655 if (ret)
9656 goto err;
Pavel Begunkoveae071c2021-04-25 14:32:24 +01009657 io_rsrc_node_switch(ctx, NULL);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009658
Jens Axboe2b188cc2019-01-07 10:46:33 -07009659 memset(&p->sq_off, 0, sizeof(p->sq_off));
Hristo Venev75b28af2019-08-26 17:23:46 +00009660 p->sq_off.head = offsetof(struct io_rings, sq.head);
9661 p->sq_off.tail = offsetof(struct io_rings, sq.tail);
9662 p->sq_off.ring_mask = offsetof(struct io_rings, sq_ring_mask);
9663 p->sq_off.ring_entries = offsetof(struct io_rings, sq_ring_entries);
9664 p->sq_off.flags = offsetof(struct io_rings, sq_flags);
9665 p->sq_off.dropped = offsetof(struct io_rings, sq_dropped);
9666 p->sq_off.array = (char *)ctx->sq_array - (char *)ctx->rings;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009667
9668 memset(&p->cq_off, 0, sizeof(p->cq_off));
Hristo Venev75b28af2019-08-26 17:23:46 +00009669 p->cq_off.head = offsetof(struct io_rings, cq.head);
9670 p->cq_off.tail = offsetof(struct io_rings, cq.tail);
9671 p->cq_off.ring_mask = offsetof(struct io_rings, cq_ring_mask);
9672 p->cq_off.ring_entries = offsetof(struct io_rings, cq_ring_entries);
9673 p->cq_off.overflow = offsetof(struct io_rings, cq_overflow);
9674 p->cq_off.cqes = offsetof(struct io_rings, cqes);
Stefano Garzarella0d9b5b32020-05-15 18:38:04 +02009675 p->cq_off.flags = offsetof(struct io_rings, cq_flags);
Jens Axboeac90f242019-09-06 10:26:21 -06009676
Xiaoguang Wang7f136572020-05-05 16:28:53 +08009677 p->features = IORING_FEAT_SINGLE_MMAP | IORING_FEAT_NODROP |
9678 IORING_FEAT_SUBMIT_STABLE | IORING_FEAT_RW_CUR_POS |
Jiufei Xue5769a352020-06-17 17:53:55 +08009679 IORING_FEAT_CUR_PERSONALITY | IORING_FEAT_FAST_POLL |
Hao Xuc73ebb62020-11-03 10:54:37 +08009680 IORING_FEAT_POLL_32BITS | IORING_FEAT_SQPOLL_NONFIXED |
Pavel Begunkov96905572021-06-10 16:37:38 +01009681 IORING_FEAT_EXT_ARG | IORING_FEAT_NATIVE_WORKERS |
9682 IORING_FEAT_RSRC_TAGS;
Xiaoguang Wang7f136572020-05-05 16:28:53 +08009683
9684 if (copy_to_user(params, p, sizeof(*p))) {
9685 ret = -EFAULT;
9686 goto err;
9687 }
Jens Axboed1719f72020-07-30 13:43:53 -06009688
Pavel Begunkov9faadcc2020-12-21 18:34:05 +00009689 file = io_uring_get_file(ctx);
9690 if (IS_ERR(file)) {
9691 ret = PTR_ERR(file);
9692 goto err;
9693 }
9694
Jens Axboed1719f72020-07-30 13:43:53 -06009695 /*
Jens Axboe044c1ab2019-10-28 09:15:33 -06009696 * Install ring fd as the very last thing, so we don't risk someone
9697 * having closed it before we finish setup
9698 */
Pavel Begunkov9faadcc2020-12-21 18:34:05 +00009699 ret = io_uring_install_fd(ctx, file);
9700 if (ret < 0) {
9701 /* fput will clean it up */
9702 fput(file);
9703 return ret;
9704 }
Jens Axboe044c1ab2019-10-28 09:15:33 -06009705
Dmitrii Dolgovc826bd72019-10-15 19:02:01 +02009706 trace_io_uring_create(ret, ctx, p->sq_entries, p->cq_entries, p->flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009707 return ret;
9708err:
9709 io_ring_ctx_wait_and_kill(ctx);
9710 return ret;
9711}
9712
9713/*
9714 * Sets up an aio uring context, and returns the fd. Applications asks for a
9715 * ring size, we return the actual sq/cq ring sizes (among other things) in the
9716 * params structure passed in.
9717 */
9718static long io_uring_setup(u32 entries, struct io_uring_params __user *params)
9719{
9720 struct io_uring_params p;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009721 int i;
9722
9723 if (copy_from_user(&p, params, sizeof(p)))
9724 return -EFAULT;
9725 for (i = 0; i < ARRAY_SIZE(p.resv); i++) {
9726 if (p.resv[i])
9727 return -EINVAL;
9728 }
9729
Jens Axboe6c271ce2019-01-10 11:22:30 -07009730 if (p.flags & ~(IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL |
Jens Axboe8110c1a2019-12-28 15:39:54 -07009731 IORING_SETUP_SQ_AFF | IORING_SETUP_CQSIZE |
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02009732 IORING_SETUP_CLAMP | IORING_SETUP_ATTACH_WQ |
9733 IORING_SETUP_R_DISABLED))
Jens Axboe2b188cc2019-01-07 10:46:33 -07009734 return -EINVAL;
9735
Xiaoguang Wang7f136572020-05-05 16:28:53 +08009736 return io_uring_create(entries, &p, params);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009737}
9738
9739SYSCALL_DEFINE2(io_uring_setup, u32, entries,
9740 struct io_uring_params __user *, params)
9741{
9742 return io_uring_setup(entries, params);
9743}
9744
Jens Axboe66f4af92020-01-16 15:36:52 -07009745static int io_probe(struct io_ring_ctx *ctx, void __user *arg, unsigned nr_args)
9746{
9747 struct io_uring_probe *p;
9748 size_t size;
9749 int i, ret;
9750
9751 size = struct_size(p, ops, nr_args);
9752 if (size == SIZE_MAX)
9753 return -EOVERFLOW;
9754 p = kzalloc(size, GFP_KERNEL);
9755 if (!p)
9756 return -ENOMEM;
9757
9758 ret = -EFAULT;
9759 if (copy_from_user(p, arg, size))
9760 goto out;
9761 ret = -EINVAL;
9762 if (memchr_inv(p, 0, size))
9763 goto out;
9764
9765 p->last_op = IORING_OP_LAST - 1;
9766 if (nr_args > IORING_OP_LAST)
9767 nr_args = IORING_OP_LAST;
9768
9769 for (i = 0; i < nr_args; i++) {
9770 p->ops[i].op = i;
9771 if (!io_op_defs[i].not_supported)
9772 p->ops[i].flags = IO_URING_OP_SUPPORTED;
9773 }
9774 p->ops_len = i;
9775
9776 ret = 0;
9777 if (copy_to_user(arg, p, size))
9778 ret = -EFAULT;
9779out:
9780 kfree(p);
9781 return ret;
9782}
9783
Jens Axboe071698e2020-01-28 10:04:42 -07009784static int io_register_personality(struct io_ring_ctx *ctx)
9785{
Jens Axboe4379bf82021-02-15 13:40:22 -07009786 const struct cred *creds;
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +00009787 u32 id;
Jens Axboe1e6fa522020-10-15 08:46:24 -06009788 int ret;
Jens Axboe071698e2020-01-28 10:04:42 -07009789
Jens Axboe4379bf82021-02-15 13:40:22 -07009790 creds = get_current_cred();
Jens Axboe1e6fa522020-10-15 08:46:24 -06009791
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +00009792 ret = xa_alloc_cyclic(&ctx->personalities, &id, (void *)creds,
9793 XA_LIMIT(0, USHRT_MAX), &ctx->pers_next, GFP_KERNEL);
9794 if (!ret)
9795 return id;
9796 put_cred(creds);
Jens Axboe1e6fa522020-10-15 08:46:24 -06009797 return ret;
Jens Axboe071698e2020-01-28 10:04:42 -07009798}
9799
Stefano Garzarella21b55db2020-08-27 16:58:30 +02009800static int io_register_restrictions(struct io_ring_ctx *ctx, void __user *arg,
9801 unsigned int nr_args)
9802{
9803 struct io_uring_restriction *res;
9804 size_t size;
9805 int i, ret;
9806
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02009807 /* Restrictions allowed only if rings started disabled */
9808 if (!(ctx->flags & IORING_SETUP_R_DISABLED))
9809 return -EBADFD;
9810
Stefano Garzarella21b55db2020-08-27 16:58:30 +02009811 /* We allow only a single restrictions registration */
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02009812 if (ctx->restrictions.registered)
Stefano Garzarella21b55db2020-08-27 16:58:30 +02009813 return -EBUSY;
9814
9815 if (!arg || nr_args > IORING_MAX_RESTRICTIONS)
9816 return -EINVAL;
9817
9818 size = array_size(nr_args, sizeof(*res));
9819 if (size == SIZE_MAX)
9820 return -EOVERFLOW;
9821
9822 res = memdup_user(arg, size);
9823 if (IS_ERR(res))
9824 return PTR_ERR(res);
9825
9826 ret = 0;
9827
9828 for (i = 0; i < nr_args; i++) {
9829 switch (res[i].opcode) {
9830 case IORING_RESTRICTION_REGISTER_OP:
9831 if (res[i].register_op >= IORING_REGISTER_LAST) {
9832 ret = -EINVAL;
9833 goto out;
9834 }
9835
9836 __set_bit(res[i].register_op,
9837 ctx->restrictions.register_op);
9838 break;
9839 case IORING_RESTRICTION_SQE_OP:
9840 if (res[i].sqe_op >= IORING_OP_LAST) {
9841 ret = -EINVAL;
9842 goto out;
9843 }
9844
9845 __set_bit(res[i].sqe_op, ctx->restrictions.sqe_op);
9846 break;
9847 case IORING_RESTRICTION_SQE_FLAGS_ALLOWED:
9848 ctx->restrictions.sqe_flags_allowed = res[i].sqe_flags;
9849 break;
9850 case IORING_RESTRICTION_SQE_FLAGS_REQUIRED:
9851 ctx->restrictions.sqe_flags_required = res[i].sqe_flags;
9852 break;
9853 default:
9854 ret = -EINVAL;
9855 goto out;
9856 }
9857 }
9858
9859out:
9860 /* Reset all restrictions if an error happened */
9861 if (ret != 0)
9862 memset(&ctx->restrictions, 0, sizeof(ctx->restrictions));
9863 else
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02009864 ctx->restrictions.registered = true;
Stefano Garzarella21b55db2020-08-27 16:58:30 +02009865
9866 kfree(res);
9867 return ret;
9868}
9869
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02009870static int io_register_enable_rings(struct io_ring_ctx *ctx)
9871{
9872 if (!(ctx->flags & IORING_SETUP_R_DISABLED))
9873 return -EBADFD;
9874
9875 if (ctx->restrictions.registered)
9876 ctx->restricted = 1;
9877
Pavel Begunkov0298ef92021-03-08 13:20:57 +00009878 ctx->flags &= ~IORING_SETUP_R_DISABLED;
9879 if (ctx->sq_data && wq_has_sleeper(&ctx->sq_data->wait))
9880 wake_up(&ctx->sq_data->wait);
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02009881 return 0;
9882}
9883
Pavel Begunkovfdecb662021-04-25 14:32:20 +01009884static int __io_register_rsrc_update(struct io_ring_ctx *ctx, unsigned type,
Pavel Begunkovc3bdad02021-04-25 14:32:22 +01009885 struct io_uring_rsrc_update2 *up,
Pavel Begunkov98f0b3b2021-04-25 14:32:19 +01009886 unsigned nr_args)
9887{
9888 __u32 tmp;
9889 int err;
9890
Pavel Begunkovc3bdad02021-04-25 14:32:22 +01009891 if (up->resv)
9892 return -EINVAL;
Pavel Begunkov98f0b3b2021-04-25 14:32:19 +01009893 if (check_add_overflow(up->offset, nr_args, &tmp))
9894 return -EOVERFLOW;
9895 err = io_rsrc_node_switch_start(ctx);
9896 if (err)
9897 return err;
9898
Pavel Begunkovfdecb662021-04-25 14:32:20 +01009899 switch (type) {
9900 case IORING_RSRC_FILE:
Pavel Begunkov98f0b3b2021-04-25 14:32:19 +01009901 return __io_sqe_files_update(ctx, up, nr_args);
Pavel Begunkov634d00d2021-04-25 14:32:26 +01009902 case IORING_RSRC_BUFFER:
9903 return __io_sqe_buffers_update(ctx, up, nr_args);
Pavel Begunkov98f0b3b2021-04-25 14:32:19 +01009904 }
9905 return -EINVAL;
9906}
9907
Pavel Begunkovc3bdad02021-04-25 14:32:22 +01009908static int io_register_files_update(struct io_ring_ctx *ctx, void __user *arg,
9909 unsigned nr_args)
Pavel Begunkov98f0b3b2021-04-25 14:32:19 +01009910{
Pavel Begunkovc3bdad02021-04-25 14:32:22 +01009911 struct io_uring_rsrc_update2 up;
Pavel Begunkov98f0b3b2021-04-25 14:32:19 +01009912
9913 if (!nr_args)
9914 return -EINVAL;
Pavel Begunkovc3bdad02021-04-25 14:32:22 +01009915 memset(&up, 0, sizeof(up));
9916 if (copy_from_user(&up, arg, sizeof(struct io_uring_rsrc_update)))
9917 return -EFAULT;
9918 return __io_register_rsrc_update(ctx, IORING_RSRC_FILE, &up, nr_args);
9919}
9920
9921static int io_register_rsrc_update(struct io_ring_ctx *ctx, void __user *arg,
Pavel Begunkov992da012021-06-10 16:37:37 +01009922 unsigned size, unsigned type)
Pavel Begunkovc3bdad02021-04-25 14:32:22 +01009923{
9924 struct io_uring_rsrc_update2 up;
9925
9926 if (size != sizeof(up))
9927 return -EINVAL;
Pavel Begunkov98f0b3b2021-04-25 14:32:19 +01009928 if (copy_from_user(&up, arg, sizeof(up)))
9929 return -EFAULT;
Pavel Begunkov992da012021-06-10 16:37:37 +01009930 if (!up.nr || up.resv)
Pavel Begunkov98f0b3b2021-04-25 14:32:19 +01009931 return -EINVAL;
Pavel Begunkov992da012021-06-10 16:37:37 +01009932 return __io_register_rsrc_update(ctx, type, &up, up.nr);
Pavel Begunkov98f0b3b2021-04-25 14:32:19 +01009933}
9934
Pavel Begunkov792e3582021-04-25 14:32:21 +01009935static int io_register_rsrc(struct io_ring_ctx *ctx, void __user *arg,
Pavel Begunkov992da012021-06-10 16:37:37 +01009936 unsigned int size, unsigned int type)
Pavel Begunkov792e3582021-04-25 14:32:21 +01009937{
9938 struct io_uring_rsrc_register rr;
9939
9940 /* keep it extendible */
9941 if (size != sizeof(rr))
9942 return -EINVAL;
9943
9944 memset(&rr, 0, sizeof(rr));
9945 if (copy_from_user(&rr, arg, size))
9946 return -EFAULT;
Pavel Begunkov992da012021-06-10 16:37:37 +01009947 if (!rr.nr || rr.resv || rr.resv2)
Pavel Begunkov792e3582021-04-25 14:32:21 +01009948 return -EINVAL;
9949
Pavel Begunkov992da012021-06-10 16:37:37 +01009950 switch (type) {
Pavel Begunkov792e3582021-04-25 14:32:21 +01009951 case IORING_RSRC_FILE:
9952 return io_sqe_files_register(ctx, u64_to_user_ptr(rr.data),
9953 rr.nr, u64_to_user_ptr(rr.tags));
Pavel Begunkov634d00d2021-04-25 14:32:26 +01009954 case IORING_RSRC_BUFFER:
9955 return io_sqe_buffers_register(ctx, u64_to_user_ptr(rr.data),
9956 rr.nr, u64_to_user_ptr(rr.tags));
Pavel Begunkov792e3582021-04-25 14:32:21 +01009957 }
9958 return -EINVAL;
9959}
9960
Jens Axboefe764212021-06-17 10:19:54 -06009961static int io_register_iowq_aff(struct io_ring_ctx *ctx, void __user *arg,
9962 unsigned len)
9963{
9964 struct io_uring_task *tctx = current->io_uring;
9965 cpumask_var_t new_mask;
9966 int ret;
9967
9968 if (!tctx || !tctx->io_wq)
9969 return -EINVAL;
9970
9971 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
9972 return -ENOMEM;
9973
9974 cpumask_clear(new_mask);
9975 if (len > cpumask_size())
9976 len = cpumask_size();
9977
9978 if (copy_from_user(new_mask, arg, len)) {
9979 free_cpumask_var(new_mask);
9980 return -EFAULT;
9981 }
9982
9983 ret = io_wq_cpu_affinity(tctx->io_wq, new_mask);
9984 free_cpumask_var(new_mask);
9985 return ret;
9986}
9987
9988static int io_unregister_iowq_aff(struct io_ring_ctx *ctx)
9989{
9990 struct io_uring_task *tctx = current->io_uring;
9991
9992 if (!tctx || !tctx->io_wq)
9993 return -EINVAL;
9994
9995 return io_wq_cpu_affinity(tctx->io_wq, NULL);
9996}
9997
Jens Axboe071698e2020-01-28 10:04:42 -07009998static bool io_register_op_must_quiesce(int op)
9999{
10000 switch (op) {
Bijan Mottahedehbd54b6f2021-04-25 14:32:25 +010010001 case IORING_REGISTER_BUFFERS:
10002 case IORING_UNREGISTER_BUFFERS:
Pavel Begunkovf4f7d212021-04-01 15:44:02 +010010003 case IORING_REGISTER_FILES:
Jens Axboe071698e2020-01-28 10:04:42 -070010004 case IORING_UNREGISTER_FILES:
10005 case IORING_REGISTER_FILES_UPDATE:
10006 case IORING_REGISTER_PROBE:
10007 case IORING_REGISTER_PERSONALITY:
10008 case IORING_UNREGISTER_PERSONALITY:
Pavel Begunkov992da012021-06-10 16:37:37 +010010009 case IORING_REGISTER_FILES2:
10010 case IORING_REGISTER_FILES_UPDATE2:
10011 case IORING_REGISTER_BUFFERS2:
10012 case IORING_REGISTER_BUFFERS_UPDATE:
Jens Axboefe764212021-06-17 10:19:54 -060010013 case IORING_REGISTER_IOWQ_AFF:
10014 case IORING_UNREGISTER_IOWQ_AFF:
Jens Axboe071698e2020-01-28 10:04:42 -070010015 return false;
10016 default:
10017 return true;
10018 }
10019}
10020
Jens Axboeedafcce2019-01-09 09:16:05 -070010021static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
10022 void __user *arg, unsigned nr_args)
Jens Axboeb19062a2019-04-15 10:49:38 -060010023 __releases(ctx->uring_lock)
10024 __acquires(ctx->uring_lock)
Jens Axboeedafcce2019-01-09 09:16:05 -070010025{
10026 int ret;
10027
Jens Axboe35fa71a2019-04-22 10:23:23 -060010028 /*
10029 * We're inside the ring mutex, if the ref is already dying, then
10030 * someone else killed the ctx or is already going through
10031 * io_uring_register().
10032 */
10033 if (percpu_ref_is_dying(&ctx->refs))
10034 return -ENXIO;
10035
Pavel Begunkov75c40212021-04-15 13:07:40 +010010036 if (ctx->restricted) {
10037 if (opcode >= IORING_REGISTER_LAST)
10038 return -EINVAL;
10039 opcode = array_index_nospec(opcode, IORING_REGISTER_LAST);
10040 if (!test_bit(opcode, ctx->restrictions.register_op))
10041 return -EACCES;
10042 }
10043
Jens Axboe071698e2020-01-28 10:04:42 -070010044 if (io_register_op_must_quiesce(opcode)) {
Jens Axboe05f3fb32019-12-09 11:22:50 -070010045 percpu_ref_kill(&ctx->refs);
Jens Axboeb19062a2019-04-15 10:49:38 -060010046
Jens Axboe05f3fb32019-12-09 11:22:50 -070010047 /*
10048 * Drop uring mutex before waiting for references to exit. If
10049 * another thread is currently inside io_uring_enter() it might
10050 * need to grab the uring_lock to make progress. If we hold it
10051 * here across the drain wait, then we can deadlock. It's safe
10052 * to drop the mutex here, since no new references will come in
10053 * after we've killed the percpu ref.
10054 */
10055 mutex_unlock(&ctx->uring_lock);
Jens Axboeaf9c1a42020-09-24 13:32:18 -060010056 do {
10057 ret = wait_for_completion_interruptible(&ctx->ref_comp);
10058 if (!ret)
10059 break;
Jens Axboeed6930c2020-10-08 19:09:46 -060010060 ret = io_run_task_work_sig();
10061 if (ret < 0)
10062 break;
Jens Axboeaf9c1a42020-09-24 13:32:18 -060010063 } while (1);
Jens Axboe05f3fb32019-12-09 11:22:50 -070010064 mutex_lock(&ctx->uring_lock);
Jens Axboeaf9c1a42020-09-24 13:32:18 -060010065
Jens Axboec1503682020-01-08 08:26:07 -070010066 if (ret) {
Pavel Begunkovf70865d2021-04-11 01:46:40 +010010067 io_refs_resurrect(&ctx->refs, &ctx->ref_comp);
10068 return ret;
Jens Axboec1503682020-01-08 08:26:07 -070010069 }
Jens Axboe05f3fb32019-12-09 11:22:50 -070010070 }
Jens Axboeedafcce2019-01-09 09:16:05 -070010071
10072 switch (opcode) {
10073 case IORING_REGISTER_BUFFERS:
Pavel Begunkov634d00d2021-04-25 14:32:26 +010010074 ret = io_sqe_buffers_register(ctx, arg, nr_args, NULL);
Jens Axboeedafcce2019-01-09 09:16:05 -070010075 break;
10076 case IORING_UNREGISTER_BUFFERS:
10077 ret = -EINVAL;
10078 if (arg || nr_args)
10079 break;
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -080010080 ret = io_sqe_buffers_unregister(ctx);
Jens Axboeedafcce2019-01-09 09:16:05 -070010081 break;
Jens Axboe6b063142019-01-10 22:13:58 -070010082 case IORING_REGISTER_FILES:
Pavel Begunkov792e3582021-04-25 14:32:21 +010010083 ret = io_sqe_files_register(ctx, arg, nr_args, NULL);
Jens Axboe6b063142019-01-10 22:13:58 -070010084 break;
10085 case IORING_UNREGISTER_FILES:
10086 ret = -EINVAL;
10087 if (arg || nr_args)
10088 break;
10089 ret = io_sqe_files_unregister(ctx);
10090 break;
Jens Axboec3a31e62019-10-03 13:59:56 -060010091 case IORING_REGISTER_FILES_UPDATE:
Pavel Begunkovc3bdad02021-04-25 14:32:22 +010010092 ret = io_register_files_update(ctx, arg, nr_args);
Jens Axboec3a31e62019-10-03 13:59:56 -060010093 break;
Jens Axboe9b402842019-04-11 11:45:41 -060010094 case IORING_REGISTER_EVENTFD:
Jens Axboef2842ab2020-01-08 11:04:00 -070010095 case IORING_REGISTER_EVENTFD_ASYNC:
Jens Axboe9b402842019-04-11 11:45:41 -060010096 ret = -EINVAL;
10097 if (nr_args != 1)
10098 break;
10099 ret = io_eventfd_register(ctx, arg);
Jens Axboef2842ab2020-01-08 11:04:00 -070010100 if (ret)
10101 break;
10102 if (opcode == IORING_REGISTER_EVENTFD_ASYNC)
10103 ctx->eventfd_async = 1;
10104 else
10105 ctx->eventfd_async = 0;
Jens Axboe9b402842019-04-11 11:45:41 -060010106 break;
10107 case IORING_UNREGISTER_EVENTFD:
10108 ret = -EINVAL;
10109 if (arg || nr_args)
10110 break;
10111 ret = io_eventfd_unregister(ctx);
10112 break;
Jens Axboe66f4af92020-01-16 15:36:52 -070010113 case IORING_REGISTER_PROBE:
10114 ret = -EINVAL;
10115 if (!arg || nr_args > 256)
10116 break;
10117 ret = io_probe(ctx, arg, nr_args);
10118 break;
Jens Axboe071698e2020-01-28 10:04:42 -070010119 case IORING_REGISTER_PERSONALITY:
10120 ret = -EINVAL;
10121 if (arg || nr_args)
10122 break;
10123 ret = io_register_personality(ctx);
10124 break;
10125 case IORING_UNREGISTER_PERSONALITY:
10126 ret = -EINVAL;
10127 if (arg)
10128 break;
10129 ret = io_unregister_personality(ctx, nr_args);
10130 break;
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +020010131 case IORING_REGISTER_ENABLE_RINGS:
10132 ret = -EINVAL;
10133 if (arg || nr_args)
10134 break;
10135 ret = io_register_enable_rings(ctx);
10136 break;
Stefano Garzarella21b55db2020-08-27 16:58:30 +020010137 case IORING_REGISTER_RESTRICTIONS:
10138 ret = io_register_restrictions(ctx, arg, nr_args);
10139 break;
Pavel Begunkov992da012021-06-10 16:37:37 +010010140 case IORING_REGISTER_FILES2:
10141 ret = io_register_rsrc(ctx, arg, nr_args, IORING_RSRC_FILE);
Pavel Begunkov792e3582021-04-25 14:32:21 +010010142 break;
Pavel Begunkov992da012021-06-10 16:37:37 +010010143 case IORING_REGISTER_FILES_UPDATE2:
10144 ret = io_register_rsrc_update(ctx, arg, nr_args,
10145 IORING_RSRC_FILE);
10146 break;
10147 case IORING_REGISTER_BUFFERS2:
10148 ret = io_register_rsrc(ctx, arg, nr_args, IORING_RSRC_BUFFER);
10149 break;
10150 case IORING_REGISTER_BUFFERS_UPDATE:
10151 ret = io_register_rsrc_update(ctx, arg, nr_args,
10152 IORING_RSRC_BUFFER);
Pavel Begunkovc3bdad02021-04-25 14:32:22 +010010153 break;
Jens Axboefe764212021-06-17 10:19:54 -060010154 case IORING_REGISTER_IOWQ_AFF:
10155 ret = -EINVAL;
10156 if (!arg || !nr_args)
10157 break;
10158 ret = io_register_iowq_aff(ctx, arg, nr_args);
10159 break;
10160 case IORING_UNREGISTER_IOWQ_AFF:
10161 ret = -EINVAL;
10162 if (arg || nr_args)
10163 break;
10164 ret = io_unregister_iowq_aff(ctx);
10165 break;
Jens Axboeedafcce2019-01-09 09:16:05 -070010166 default:
10167 ret = -EINVAL;
10168 break;
10169 }
10170
Jens Axboe071698e2020-01-28 10:04:42 -070010171 if (io_register_op_must_quiesce(opcode)) {
Jens Axboe05f3fb32019-12-09 11:22:50 -070010172 /* bring the ctx back to life */
Jens Axboe05f3fb32019-12-09 11:22:50 -070010173 percpu_ref_reinit(&ctx->refs);
Jens Axboe0f158b42020-05-14 17:18:39 -060010174 reinit_completion(&ctx->ref_comp);
Jens Axboe05f3fb32019-12-09 11:22:50 -070010175 }
Jens Axboeedafcce2019-01-09 09:16:05 -070010176 return ret;
10177}
10178
10179SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode,
10180 void __user *, arg, unsigned int, nr_args)
10181{
10182 struct io_ring_ctx *ctx;
10183 long ret = -EBADF;
10184 struct fd f;
10185
10186 f = fdget(fd);
10187 if (!f.file)
10188 return -EBADF;
10189
10190 ret = -EOPNOTSUPP;
10191 if (f.file->f_op != &io_uring_fops)
10192 goto out_fput;
10193
10194 ctx = f.file->private_data;
10195
Pavel Begunkovb6c23dd2021-02-20 15:17:18 +000010196 io_run_task_work();
10197
Jens Axboeedafcce2019-01-09 09:16:05 -070010198 mutex_lock(&ctx->uring_lock);
10199 ret = __io_uring_register(ctx, opcode, arg, nr_args);
10200 mutex_unlock(&ctx->uring_lock);
Dmitrii Dolgovc826bd72019-10-15 19:02:01 +020010201 trace_io_uring_register(ctx, opcode, ctx->nr_user_files, ctx->nr_user_bufs,
10202 ctx->cq_ev_fd != NULL, ret);
Jens Axboeedafcce2019-01-09 09:16:05 -070010203out_fput:
10204 fdput(f);
10205 return ret;
10206}
10207
Jens Axboe2b188cc2019-01-07 10:46:33 -070010208static int __init io_uring_init(void)
10209{
Stefan Metzmacherd7f62e82020-01-29 14:39:41 +010010210#define __BUILD_BUG_VERIFY_ELEMENT(stype, eoffset, etype, ename) do { \
10211 BUILD_BUG_ON(offsetof(stype, ename) != eoffset); \
10212 BUILD_BUG_ON(sizeof(etype) != sizeof_field(stype, ename)); \
10213} while (0)
10214
10215#define BUILD_BUG_SQE_ELEM(eoffset, etype, ename) \
10216 __BUILD_BUG_VERIFY_ELEMENT(struct io_uring_sqe, eoffset, etype, ename)
10217 BUILD_BUG_ON(sizeof(struct io_uring_sqe) != 64);
10218 BUILD_BUG_SQE_ELEM(0, __u8, opcode);
10219 BUILD_BUG_SQE_ELEM(1, __u8, flags);
10220 BUILD_BUG_SQE_ELEM(2, __u16, ioprio);
10221 BUILD_BUG_SQE_ELEM(4, __s32, fd);
10222 BUILD_BUG_SQE_ELEM(8, __u64, off);
10223 BUILD_BUG_SQE_ELEM(8, __u64, addr2);
10224 BUILD_BUG_SQE_ELEM(16, __u64, addr);
Pavel Begunkov7d67af22020-02-24 11:32:45 +030010225 BUILD_BUG_SQE_ELEM(16, __u64, splice_off_in);
Stefan Metzmacherd7f62e82020-01-29 14:39:41 +010010226 BUILD_BUG_SQE_ELEM(24, __u32, len);
10227 BUILD_BUG_SQE_ELEM(28, __kernel_rwf_t, rw_flags);
10228 BUILD_BUG_SQE_ELEM(28, /* compat */ int, rw_flags);
10229 BUILD_BUG_SQE_ELEM(28, /* compat */ __u32, rw_flags);
10230 BUILD_BUG_SQE_ELEM(28, __u32, fsync_flags);
Jiufei Xue5769a352020-06-17 17:53:55 +080010231 BUILD_BUG_SQE_ELEM(28, /* compat */ __u16, poll_events);
10232 BUILD_BUG_SQE_ELEM(28, __u32, poll32_events);
Stefan Metzmacherd7f62e82020-01-29 14:39:41 +010010233 BUILD_BUG_SQE_ELEM(28, __u32, sync_range_flags);
10234 BUILD_BUG_SQE_ELEM(28, __u32, msg_flags);
10235 BUILD_BUG_SQE_ELEM(28, __u32, timeout_flags);
10236 BUILD_BUG_SQE_ELEM(28, __u32, accept_flags);
10237 BUILD_BUG_SQE_ELEM(28, __u32, cancel_flags);
10238 BUILD_BUG_SQE_ELEM(28, __u32, open_flags);
10239 BUILD_BUG_SQE_ELEM(28, __u32, statx_flags);
10240 BUILD_BUG_SQE_ELEM(28, __u32, fadvise_advice);
Pavel Begunkov7d67af22020-02-24 11:32:45 +030010241 BUILD_BUG_SQE_ELEM(28, __u32, splice_flags);
Stefan Metzmacherd7f62e82020-01-29 14:39:41 +010010242 BUILD_BUG_SQE_ELEM(32, __u64, user_data);
10243 BUILD_BUG_SQE_ELEM(40, __u16, buf_index);
Pavel Begunkov16340ea2021-06-24 15:09:58 +010010244 BUILD_BUG_SQE_ELEM(40, __u16, buf_group);
Stefan Metzmacherd7f62e82020-01-29 14:39:41 +010010245 BUILD_BUG_SQE_ELEM(42, __u16, personality);
Pavel Begunkov7d67af22020-02-24 11:32:45 +030010246 BUILD_BUG_SQE_ELEM(44, __s32, splice_fd_in);
Stefan Metzmacherd7f62e82020-01-29 14:39:41 +010010247
Pavel Begunkovb0d658ec2021-04-27 16:13:53 +010010248 BUILD_BUG_ON(sizeof(struct io_uring_files_update) !=
10249 sizeof(struct io_uring_rsrc_update));
10250 BUILD_BUG_ON(sizeof(struct io_uring_rsrc_update) >
10251 sizeof(struct io_uring_rsrc_update2));
10252 /* should fit into one byte */
10253 BUILD_BUG_ON(SQE_VALID_FLAGS >= (1 << 8));
10254
Jens Axboed3656342019-12-18 09:50:26 -070010255 BUILD_BUG_ON(ARRAY_SIZE(io_op_defs) != IORING_OP_LAST);
Jens Axboe84557872020-03-03 15:28:17 -070010256 BUILD_BUG_ON(__REQ_F_LAST_BIT >= 8 * sizeof(int));
Pavel Begunkov16340ea2021-06-24 15:09:58 +010010257
Jens Axboe91f245d2021-02-09 13:48:50 -070010258 req_cachep = KMEM_CACHE(io_kiocb, SLAB_HWCACHE_ALIGN | SLAB_PANIC |
10259 SLAB_ACCOUNT);
Jens Axboe2b188cc2019-01-07 10:46:33 -070010260 return 0;
10261};
10262__initcall(io_uring_init);