blob: ab14692b05b41f161c0c7a84d700a9dd7f89af26 [file] [log] [blame]
Jens Axboe2b188cc2019-01-07 10:46:33 -07001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Shared application/kernel submission and completion ring pairs, for
4 * supporting fast/efficient IO.
5 *
6 * A note on the read/write ordering memory barriers that are matched between
Stefan Bühler1e84b972019-04-24 23:54:16 +02007 * the application and kernel side.
8 *
9 * After the application reads the CQ ring tail, it must use an
10 * appropriate smp_rmb() to pair with the smp_wmb() the kernel uses
11 * before writing the tail (using smp_load_acquire to read the tail will
12 * do). It also needs a smp_mb() before updating CQ head (ordering the
13 * entry load(s) with the head store), pairing with an implicit barrier
14 * through a control-dependency in io_get_cqring (smp_store_release to
15 * store head will do). Failure to do so could lead to reading invalid
16 * CQ entries.
17 *
18 * Likewise, the application must use an appropriate smp_wmb() before
19 * writing the SQ tail (ordering SQ entry stores with the tail store),
20 * which pairs with smp_load_acquire in io_get_sqring (smp_store_release
21 * to store the tail will do). And it needs a barrier ordering the SQ
22 * head load before writing new SQ entries (smp_load_acquire to read
23 * head will do).
24 *
25 * When using the SQ poll thread (IORING_SETUP_SQPOLL), the application
26 * needs to check the SQ flags for IORING_SQ_NEED_WAKEUP *after*
27 * updating the SQ tail; a full memory barrier smp_mb() is needed
28 * between.
Jens Axboe2b188cc2019-01-07 10:46:33 -070029 *
30 * Also see the examples in the liburing library:
31 *
32 * git://git.kernel.dk/liburing
33 *
34 * io_uring also uses READ/WRITE_ONCE() for _any_ store or load that happens
35 * from data shared between the kernel and application. This is done both
36 * for ordering purposes, but also to ensure that once a value is loaded from
37 * data that the application could potentially modify, it remains stable.
38 *
39 * Copyright (C) 2018-2019 Jens Axboe
Christoph Hellwigc992fe22019-01-11 09:43:02 -070040 * Copyright (c) 2018-2019 Christoph Hellwig
Jens Axboe2b188cc2019-01-07 10:46:33 -070041 */
42#include <linux/kernel.h>
43#include <linux/init.h>
44#include <linux/errno.h>
45#include <linux/syscalls.h>
46#include <linux/compat.h>
Jens Axboe52de1fe2020-02-27 10:15:42 -070047#include <net/compat.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070048#include <linux/refcount.h>
49#include <linux/uio.h>
Pavel Begunkov6b47ee62020-01-18 20:22:41 +030050#include <linux/bits.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070051
52#include <linux/sched/signal.h>
53#include <linux/fs.h>
54#include <linux/file.h>
55#include <linux/fdtable.h>
56#include <linux/mm.h>
57#include <linux/mman.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070058#include <linux/percpu.h>
59#include <linux/slab.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070060#include <linux/blkdev.h>
Jens Axboeedafcce2019-01-09 09:16:05 -070061#include <linux/bvec.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070062#include <linux/net.h>
63#include <net/sock.h>
64#include <net/af_unix.h>
Jens Axboe6b063142019-01-10 22:13:58 -070065#include <net/scm.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070066#include <linux/anon_inodes.h>
67#include <linux/sched/mm.h>
68#include <linux/uaccess.h>
69#include <linux/nospec.h>
Jens Axboeedafcce2019-01-09 09:16:05 -070070#include <linux/sizes.h>
71#include <linux/hugetlb.h>
Jens Axboeaa4c3962019-11-29 10:14:00 -070072#include <linux/highmem.h>
Jens Axboe15b71ab2019-12-11 11:20:36 -070073#include <linux/namei.h>
74#include <linux/fsnotify.h>
Jens Axboe4840e412019-12-25 22:03:45 -070075#include <linux/fadvise.h>
Jens Axboe3e4827b2020-01-08 15:18:09 -070076#include <linux/eventpoll.h>
Pavel Begunkov7d67af22020-02-24 11:32:45 +030077#include <linux/splice.h>
Jens Axboeb41e9852020-02-17 09:52:41 -070078#include <linux/task_work.h>
Jens Axboebcf5a062020-05-22 09:24:42 -060079#include <linux/pagemap.h>
Jens Axboe0f212202020-09-13 13:09:39 -060080#include <linux/io_uring.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070081
Dmitrii Dolgovc826bd72019-10-15 19:02:01 +020082#define CREATE_TRACE_POINTS
83#include <trace/events/io_uring.h>
84
Jens Axboe2b188cc2019-01-07 10:46:33 -070085#include <uapi/linux/io_uring.h>
86
87#include "internal.h"
Jens Axboe561fb042019-10-24 07:25:42 -060088#include "io-wq.h"
Jens Axboe2b188cc2019-01-07 10:46:33 -070089
Daniel Xu5277dea2019-09-14 14:23:45 -070090#define IORING_MAX_ENTRIES 32768
Jens Axboe33a107f2019-10-04 12:10:03 -060091#define IORING_MAX_CQ_ENTRIES (2 * IORING_MAX_ENTRIES)
Jens Axboe65e19f52019-10-26 07:20:21 -060092
93/*
94 * Shift of 9 is 512 entries, or exactly one page on 64-bit archs
95 */
96#define IORING_FILE_TABLE_SHIFT 9
97#define IORING_MAX_FILES_TABLE (1U << IORING_FILE_TABLE_SHIFT)
98#define IORING_FILE_TABLE_MASK (IORING_MAX_FILES_TABLE - 1)
99#define IORING_MAX_FIXED_FILES (64 * IORING_MAX_FILES_TABLE)
Stefano Garzarella21b55db2020-08-27 16:58:30 +0200100#define IORING_MAX_RESTRICTIONS (IORING_RESTRICTION_LAST + \
101 IORING_REGISTER_LAST + IORING_OP_LAST)
Jens Axboe2b188cc2019-01-07 10:46:33 -0700102
Pavel Begunkovb16fed662021-02-18 18:29:40 +0000103#define SQE_VALID_FLAGS (IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK| \
104 IOSQE_IO_HARDLINK | IOSQE_ASYNC | \
105 IOSQE_BUFFER_SELECT)
106
Jens Axboe2b188cc2019-01-07 10:46:33 -0700107struct io_uring {
108 u32 head ____cacheline_aligned_in_smp;
109 u32 tail ____cacheline_aligned_in_smp;
110};
111
Stefan Bühler1e84b972019-04-24 23:54:16 +0200112/*
Hristo Venev75b28af2019-08-26 17:23:46 +0000113 * This data is shared with the application through the mmap at offsets
114 * IORING_OFF_SQ_RING and IORING_OFF_CQ_RING.
Stefan Bühler1e84b972019-04-24 23:54:16 +0200115 *
116 * The offsets to the member fields are published through struct
117 * io_sqring_offsets when calling io_uring_setup.
118 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000119struct io_rings {
Stefan Bühler1e84b972019-04-24 23:54:16 +0200120 /*
121 * Head and tail offsets into the ring; the offsets need to be
122 * masked to get valid indices.
123 *
Hristo Venev75b28af2019-08-26 17:23:46 +0000124 * The kernel controls head of the sq ring and the tail of the cq ring,
125 * and the application controls tail of the sq ring and the head of the
126 * cq ring.
Stefan Bühler1e84b972019-04-24 23:54:16 +0200127 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000128 struct io_uring sq, cq;
Stefan Bühler1e84b972019-04-24 23:54:16 +0200129 /*
Hristo Venev75b28af2019-08-26 17:23:46 +0000130 * Bitmasks to apply to head and tail offsets (constant, equals
Stefan Bühler1e84b972019-04-24 23:54:16 +0200131 * ring_entries - 1)
132 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000133 u32 sq_ring_mask, cq_ring_mask;
134 /* Ring sizes (constant, power of 2) */
135 u32 sq_ring_entries, cq_ring_entries;
Stefan Bühler1e84b972019-04-24 23:54:16 +0200136 /*
137 * Number of invalid entries dropped by the kernel due to
138 * invalid index stored in array
139 *
140 * Written by the kernel, shouldn't be modified by the
141 * application (i.e. get number of "new events" by comparing to
142 * cached value).
143 *
144 * After a new SQ head value was read by the application this
145 * counter includes all submissions that were dropped reaching
146 * the new SQ head (and possibly more).
147 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000148 u32 sq_dropped;
Stefan Bühler1e84b972019-04-24 23:54:16 +0200149 /*
Stefano Garzarella0d9b5b32020-05-15 18:38:04 +0200150 * Runtime SQ flags
Stefan Bühler1e84b972019-04-24 23:54:16 +0200151 *
152 * Written by the kernel, shouldn't be modified by the
153 * application.
154 *
155 * The application needs a full memory barrier before checking
156 * for IORING_SQ_NEED_WAKEUP after updating the sq tail.
157 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000158 u32 sq_flags;
Stefan Bühler1e84b972019-04-24 23:54:16 +0200159 /*
Stefano Garzarella0d9b5b32020-05-15 18:38:04 +0200160 * Runtime CQ flags
161 *
162 * Written by the application, shouldn't be modified by the
163 * kernel.
164 */
165 u32 cq_flags;
166 /*
Stefan Bühler1e84b972019-04-24 23:54:16 +0200167 * Number of completion events lost because the queue was full;
168 * this should be avoided by the application by making sure
LimingWu0b4295b2019-12-05 20:18:18 +0800169 * there are not more requests pending than there is space in
Stefan Bühler1e84b972019-04-24 23:54:16 +0200170 * the completion queue.
171 *
172 * Written by the kernel, shouldn't be modified by the
173 * application (i.e. get number of "new events" by comparing to
174 * cached value).
175 *
176 * As completion events come in out of order this counter is not
177 * ordered with any other data.
178 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000179 u32 cq_overflow;
Stefan Bühler1e84b972019-04-24 23:54:16 +0200180 /*
181 * Ring buffer of completion events.
182 *
183 * The kernel writes completion events fresh every time they are
184 * produced, so the application is allowed to modify pending
185 * entries.
186 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000187 struct io_uring_cqe cqes[] ____cacheline_aligned_in_smp;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700188};
189
Pavel Begunkov45d189c2021-02-10 00:03:07 +0000190enum io_uring_cmd_flags {
191 IO_URING_F_NONBLOCK = 1,
Pavel Begunkov889fca72021-02-10 00:03:09 +0000192 IO_URING_F_COMPLETE_DEFER = 2,
Pavel Begunkov45d189c2021-02-10 00:03:07 +0000193};
194
Jens Axboeedafcce2019-01-09 09:16:05 -0700195struct io_mapped_ubuf {
196 u64 ubuf;
Pavel Begunkov4751f532021-04-01 15:43:55 +0100197 u64 ubuf_end;
Jens Axboeedafcce2019-01-09 09:16:05 -0700198 struct bio_vec *bvec;
199 unsigned int nr_bvecs;
Jens Axboede293932020-09-17 16:19:16 -0600200 unsigned long acct_pages;
Jens Axboeedafcce2019-01-09 09:16:05 -0700201};
202
Bijan Mottahedeh50238532021-01-15 17:37:45 +0000203struct io_ring_ctx;
204
Pavel Begunkov6c2450a2021-02-23 12:40:22 +0000205struct io_overflow_cqe {
206 struct io_uring_cqe cqe;
207 struct list_head list;
208};
209
Pavel Begunkova04b0ac2021-04-01 15:44:04 +0100210struct io_fixed_file {
211 /* file * with additional FFS_* flags */
212 unsigned long file_ptr;
213};
214
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +0000215struct io_rsrc_put {
216 struct list_head list;
Bijan Mottahedeh50238532021-01-15 17:37:45 +0000217 union {
218 void *rsrc;
219 struct file *file;
220 };
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +0000221};
222
Pavel Begunkovaeca2412021-04-11 01:46:37 +0100223struct io_file_table {
224 /* two level table */
225 struct io_fixed_file **files;
Jens Axboe31b51512019-01-18 22:56:34 -0700226};
227
Pavel Begunkovb895c9a2021-04-01 15:43:40 +0100228struct io_rsrc_node {
Xiaoguang Wang05589552020-03-31 14:05:18 +0800229 struct percpu_ref refs;
230 struct list_head node;
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +0000231 struct list_head rsrc_list;
Pavel Begunkovb895c9a2021-04-01 15:43:40 +0100232 struct io_rsrc_data *rsrc_data;
Jens Axboe4a38aed22020-05-14 17:21:15 -0600233 struct llist_node llist;
Pavel Begunkove2978222020-11-18 14:56:26 +0000234 bool done;
Xiaoguang Wang05589552020-03-31 14:05:18 +0800235};
236
Pavel Begunkov40ae0ff2021-04-01 15:43:44 +0100237typedef void (rsrc_put_fn)(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc);
238
Pavel Begunkovb895c9a2021-04-01 15:43:40 +0100239struct io_rsrc_data {
Jens Axboe05f3fb32019-12-09 11:22:50 -0700240 struct io_ring_ctx *ctx;
241
Pavel Begunkov40ae0ff2021-04-01 15:43:44 +0100242 rsrc_put_fn *do_put;
Pavel Begunkov3e942492021-04-11 01:46:34 +0100243 atomic_t refs;
Jens Axboe05f3fb32019-12-09 11:22:50 -0700244 struct completion done;
Hao Xu8bad28d2021-02-19 17:19:36 +0800245 bool quiesce;
Jens Axboe05f3fb32019-12-09 11:22:50 -0700246};
247
Jens Axboe5a2e7452020-02-23 16:23:11 -0700248struct io_buffer {
249 struct list_head list;
250 __u64 addr;
251 __s32 len;
252 __u16 bid;
253};
254
Stefano Garzarella21b55db2020-08-27 16:58:30 +0200255struct io_restriction {
256 DECLARE_BITMAP(register_op, IORING_REGISTER_LAST);
257 DECLARE_BITMAP(sqe_op, IORING_OP_LAST);
258 u8 sqe_flags_allowed;
259 u8 sqe_flags_required;
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +0200260 bool registered;
Stefano Garzarella21b55db2020-08-27 16:58:30 +0200261};
262
Jens Axboe37d1e2e2021-02-17 21:03:43 -0700263enum {
264 IO_SQ_THREAD_SHOULD_STOP = 0,
265 IO_SQ_THREAD_SHOULD_PARK,
266};
267
Jens Axboe534ca6d2020-09-02 13:52:19 -0600268struct io_sq_data {
269 refcount_t refs;
Pavel Begunkov9e138a42021-03-14 20:57:12 +0000270 atomic_t park_pending;
Pavel Begunkov09a6f4e2021-03-14 20:57:10 +0000271 struct mutex lock;
Jens Axboe69fb2132020-09-14 11:16:23 -0600272
273 /* ctx's that are using this sqd */
274 struct list_head ctx_list;
Jens Axboe69fb2132020-09-14 11:16:23 -0600275
Jens Axboe534ca6d2020-09-02 13:52:19 -0600276 struct task_struct *thread;
277 struct wait_queue_head wait;
Xiaoguang Wang08369242020-11-03 14:15:59 +0800278
279 unsigned sq_thread_idle;
Jens Axboe37d1e2e2021-02-17 21:03:43 -0700280 int sq_cpu;
281 pid_t task_pid;
Jens Axboe5c2469e2021-03-11 10:17:56 -0700282 pid_t task_tgid;
Jens Axboe37d1e2e2021-02-17 21:03:43 -0700283
284 unsigned long state;
Jens Axboe37d1e2e2021-02-17 21:03:43 -0700285 struct completion exited;
Pavel Begunkovb7f5a0b2021-03-15 14:23:08 +0000286 struct callback_head *park_task_work;
Jens Axboe534ca6d2020-09-02 13:52:19 -0600287};
288
Pavel Begunkov258b29a2021-02-10 00:03:10 +0000289#define IO_IOPOLL_BATCH 8
Pavel Begunkov6dd0be12021-02-10 00:03:13 +0000290#define IO_COMPL_BATCH 32
Pavel Begunkov6ff119a2021-02-10 00:03:18 +0000291#define IO_REQ_CACHE_SIZE 32
Pavel Begunkovbf019da2021-02-10 00:03:17 +0000292#define IO_REQ_ALLOC_BATCH 8
Pavel Begunkov258b29a2021-02-10 00:03:10 +0000293
294struct io_comp_state {
Pavel Begunkov6dd0be12021-02-10 00:03:13 +0000295 struct io_kiocb *reqs[IO_COMPL_BATCH];
Jens Axboe1b4c3512021-02-10 00:03:19 +0000296 unsigned int nr;
Jens Axboec7dae4b2021-02-09 19:53:37 -0700297 unsigned int locked_free_nr;
298 /* inline/task_work completion list, under ->uring_lock */
Jens Axboe1b4c3512021-02-10 00:03:19 +0000299 struct list_head free_list;
Jens Axboec7dae4b2021-02-09 19:53:37 -0700300 /* IRQ completion list, under ->completion_lock */
301 struct list_head locked_free_list;
Pavel Begunkov258b29a2021-02-10 00:03:10 +0000302};
303
Pavel Begunkova1ab7b32021-02-18 18:29:42 +0000304struct io_submit_link {
305 struct io_kiocb *head;
306 struct io_kiocb *last;
307};
308
Pavel Begunkov258b29a2021-02-10 00:03:10 +0000309struct io_submit_state {
310 struct blk_plug plug;
Pavel Begunkova1ab7b32021-02-18 18:29:42 +0000311 struct io_submit_link link;
Pavel Begunkov258b29a2021-02-10 00:03:10 +0000312
313 /*
314 * io_kiocb alloc cache
315 */
Pavel Begunkovbf019da2021-02-10 00:03:17 +0000316 void *reqs[IO_REQ_CACHE_SIZE];
Pavel Begunkov258b29a2021-02-10 00:03:10 +0000317 unsigned int free_reqs;
318
319 bool plug_started;
320
321 /*
322 * Batch completion logic
323 */
324 struct io_comp_state comp;
325
326 /*
327 * File reference cache
328 */
329 struct file *file;
330 unsigned int fd;
331 unsigned int file_refs;
332 unsigned int ios_left;
333};
334
Jens Axboe2b188cc2019-01-07 10:46:33 -0700335struct io_ring_ctx {
336 struct {
337 struct percpu_ref refs;
338 } ____cacheline_aligned_in_smp;
339
340 struct {
341 unsigned int flags;
Randy Dunlape1d85332020-02-05 20:57:10 -0800342 unsigned int compat: 1;
Randy Dunlape1d85332020-02-05 20:57:10 -0800343 unsigned int drain_next: 1;
344 unsigned int eventfd_async: 1;
Stefano Garzarella21b55db2020-08-27 16:58:30 +0200345 unsigned int restricted: 1;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700346
Hristo Venev75b28af2019-08-26 17:23:46 +0000347 /*
348 * Ring buffer of indices into array of io_uring_sqe, which is
349 * mmapped by the application using the IORING_OFF_SQES offset.
350 *
351 * This indirection could e.g. be used to assign fixed
352 * io_uring_sqe entries to operations and only submit them to
353 * the queue when needed.
354 *
355 * The kernel modifies neither the indices array nor the entries
356 * array.
357 */
358 u32 *sq_array;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700359 unsigned cached_sq_head;
360 unsigned sq_entries;
361 unsigned sq_mask;
Jens Axboe6c271ce2019-01-10 11:22:30 -0700362 unsigned sq_thread_idle;
Jens Axboe498ccd92019-10-25 10:04:25 -0600363 unsigned cached_sq_dropped;
Pavel Begunkov2c3bac6d2020-10-18 10:17:40 +0100364 unsigned cached_cq_overflow;
Jens Axboead3eb2c2019-12-18 17:12:20 -0700365 unsigned long sq_check_overflow;
Jens Axboede0617e2019-04-06 21:51:27 -0600366
Jens Axboee9418942021-02-19 12:33:30 -0700367 /* hashed buffered write serialization */
368 struct io_wq_hash *hash_map;
369
Jens Axboede0617e2019-04-06 21:51:27 -0600370 struct list_head defer_list;
Jens Axboe5262f562019-09-17 12:26:57 -0600371 struct list_head timeout_list;
Jens Axboe1d7bb1d2019-11-06 11:31:17 -0700372 struct list_head cq_overflow_list;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700373
Jens Axboead3eb2c2019-12-18 17:12:20 -0700374 struct io_uring_sqe *sq_sqes;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700375 } ____cacheline_aligned_in_smp;
376
Jens Axboe3c1a2ea2021-02-11 10:48:03 -0700377 struct {
378 struct mutex uring_lock;
379 wait_queue_head_t wait;
380 } ____cacheline_aligned_in_smp;
381
382 struct io_submit_state submit_state;
383
Hristo Venev75b28af2019-08-26 17:23:46 +0000384 struct io_rings *rings;
385
Jens Axboe2aede0e2020-09-14 10:45:53 -0600386 /* Only used for accounting purposes */
387 struct mm_struct *mm_account;
388
Stefan Metzmacher7c30f36a2021-03-07 11:54:28 +0100389 const struct cred *sq_creds; /* cred used for __io_sq_thread() */
Jens Axboe534ca6d2020-09-02 13:52:19 -0600390 struct io_sq_data *sq_data; /* if using sq thread polling */
391
Jens Axboe90554202020-09-03 12:12:41 -0600392 struct wait_queue_head sqo_sq_wait;
Jens Axboe69fb2132020-09-14 11:16:23 -0600393 struct list_head sqd_list;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700394
Jens Axboe6b063142019-01-10 22:13:58 -0700395 /*
396 * If used, fixed file set. Writers must ensure that ->refs is dead,
397 * readers must ensure that ->refs is alive as long as the file* is
398 * used. Only updated through io_uring_register(2).
399 */
Pavel Begunkovb895c9a2021-04-01 15:43:40 +0100400 struct io_rsrc_data *file_data;
Pavel Begunkovaeca2412021-04-11 01:46:37 +0100401 struct io_file_table file_table;
Jens Axboe6b063142019-01-10 22:13:58 -0700402 unsigned nr_user_files;
403
Jens Axboeedafcce2019-01-09 09:16:05 -0700404 /* if used, fixed mapped user buffers */
405 unsigned nr_user_bufs;
406 struct io_mapped_ubuf *user_bufs;
407
Jens Axboe2b188cc2019-01-07 10:46:33 -0700408 struct user_struct *user;
409
Jens Axboe0f158b42020-05-14 17:18:39 -0600410 struct completion ref_comp;
Jens Axboe206aefd2019-11-07 18:27:42 -0700411
412#if defined(CONFIG_UNIX)
413 struct socket *ring_sock;
414#endif
415
Jens Axboe9e15c3a2021-03-13 12:29:43 -0700416 struct xarray io_buffers;
Jens Axboe5a2e7452020-02-23 16:23:11 -0700417
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +0000418 struct xarray personalities;
419 u32 pers_next;
Jens Axboe071698e2020-01-28 10:04:42 -0700420
Jens Axboe206aefd2019-11-07 18:27:42 -0700421 struct {
422 unsigned cached_cq_tail;
423 unsigned cq_entries;
424 unsigned cq_mask;
425 atomic_t cq_timeouts;
Marcelo Diop-Gonzalezf0105052021-01-15 11:54:40 -0500426 unsigned cq_last_tm_flush;
Jens Axboead3eb2c2019-12-18 17:12:20 -0700427 unsigned long cq_check_overflow;
Jens Axboe206aefd2019-11-07 18:27:42 -0700428 struct wait_queue_head cq_wait;
429 struct fasync_struct *cq_fasync;
430 struct eventfd_ctx *cq_ev_fd;
431 } ____cacheline_aligned_in_smp;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700432
433 struct {
Jens Axboe2b188cc2019-01-07 10:46:33 -0700434 spinlock_t completion_lock;
Jens Axboee94f1412019-12-19 12:06:02 -0700435
Jens Axboedef596e2019-01-09 08:59:42 -0700436 /*
Pavel Begunkov540e32a2020-07-13 23:37:09 +0300437 * ->iopoll_list is protected by the ctx->uring_lock for
Jens Axboedef596e2019-01-09 08:59:42 -0700438 * io_uring instances that don't use IORING_SETUP_SQPOLL.
439 * For SQPOLL, only the single threaded io_sq_thread() will
440 * manipulate the list, hence no extra locking is needed there.
441 */
Pavel Begunkov540e32a2020-07-13 23:37:09 +0300442 struct list_head iopoll_list;
Jens Axboe78076bb2019-12-04 19:56:40 -0700443 struct hlist_head *cancel_hash;
444 unsigned cancel_hash_bits;
Jens Axboee94f1412019-12-19 12:06:02 -0700445 bool poll_multi_file;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700446 } ____cacheline_aligned_in_smp;
Jens Axboe85faa7b2020-04-09 18:14:00 -0600447
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +0000448 struct delayed_work rsrc_put_work;
449 struct llist_head rsrc_put_llist;
Bijan Mottahedehd67d2262021-01-15 17:37:46 +0000450 struct list_head rsrc_ref_list;
451 spinlock_t rsrc_ref_lock;
Pavel Begunkova7f0ed52021-04-01 15:43:46 +0100452 struct io_rsrc_node *rsrc_node;
Pavel Begunkovb895c9a2021-04-01 15:43:40 +0100453 struct io_rsrc_node *rsrc_backup_node;
Jens Axboe4a38aed22020-05-14 17:21:15 -0600454
Stefano Garzarella21b55db2020-08-27 16:58:30 +0200455 struct io_restriction restrictions;
Jens Axboe3c1a2ea2021-02-11 10:48:03 -0700456
Jens Axboe7c25c0d2021-02-16 07:17:00 -0700457 /* exit task_work */
458 struct callback_head *exit_task_work;
459
Jens Axboe3c1a2ea2021-02-11 10:48:03 -0700460 /* Keep this last, we don't need it for the fast path */
461 struct work_struct exit_work;
Pavel Begunkov13bf43f2021-03-06 11:02:12 +0000462 struct list_head tctx_list;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700463};
464
Stefan Metzmacher53e043b2021-03-15 12:56:56 +0100465struct io_uring_task {
466 /* submission side */
467 struct xarray xa;
468 struct wait_queue_head wait;
Stefan Metzmacheree53fb22021-03-15 12:56:57 +0100469 const struct io_ring_ctx *last;
470 struct io_wq *io_wq;
Stefan Metzmacher53e043b2021-03-15 12:56:56 +0100471 struct percpu_counter inflight;
Pavel Begunkovb303fe22021-04-11 01:46:26 +0100472 atomic_t inflight_tracked;
Stefan Metzmacher53e043b2021-03-15 12:56:56 +0100473 atomic_t in_idle;
Stefan Metzmacher53e043b2021-03-15 12:56:56 +0100474
475 spinlock_t task_lock;
476 struct io_wq_work_list task_list;
477 unsigned long task_state;
478 struct callback_head task_work;
479};
480
Jens Axboe09bb8392019-03-13 12:39:28 -0600481/*
482 * First field must be the file pointer in all the
483 * iocb unions! See also 'struct kiocb' in <linux/fs.h>
484 */
Jens Axboe221c5eb2019-01-17 09:41:58 -0700485struct io_poll_iocb {
486 struct file *file;
Pavel Begunkov018043b2020-10-27 23:17:18 +0000487 struct wait_queue_head *head;
Jens Axboe221c5eb2019-01-17 09:41:58 -0700488 __poll_t events;
Jens Axboe8c838782019-03-12 15:48:16 -0600489 bool done;
Jens Axboe221c5eb2019-01-17 09:41:58 -0700490 bool canceled;
Pavel Begunkov9d805892021-04-13 02:58:40 +0100491 struct wait_queue_entry wait;
492};
493
494struct io_poll_update {
495 struct file *file;
496 u64 old_user_data;
497 u64 new_user_data;
498 __poll_t events;
Jens Axboeb69de282021-03-17 08:37:41 -0600499 bool update_events;
500 bool update_user_data;
Jens Axboe221c5eb2019-01-17 09:41:58 -0700501};
502
Jens Axboeb5dba592019-12-11 14:02:38 -0700503struct io_close {
504 struct file *file;
Jens Axboeb5dba592019-12-11 14:02:38 -0700505 int fd;
506};
507
Jens Axboead8a48a2019-11-15 08:49:11 -0700508struct io_timeout_data {
509 struct io_kiocb *req;
510 struct hrtimer timer;
511 struct timespec64 ts;
512 enum hrtimer_mode mode;
513};
514
Jens Axboe8ed8d3c2019-12-16 11:55:28 -0700515struct io_accept {
516 struct file *file;
517 struct sockaddr __user *addr;
518 int __user *addr_len;
519 int flags;
Jens Axboe09952e32020-03-19 20:16:56 -0600520 unsigned long nofile;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -0700521};
522
523struct io_sync {
524 struct file *file;
525 loff_t len;
526 loff_t off;
527 int flags;
Jens Axboed63d1b52019-12-10 10:38:56 -0700528 int mode;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -0700529};
530
Jens Axboefbf23842019-12-17 18:45:56 -0700531struct io_cancel {
532 struct file *file;
533 u64 addr;
534};
535
Jens Axboeb29472e2019-12-17 18:50:29 -0700536struct io_timeout {
537 struct file *file;
Pavel Begunkovbfe68a22020-05-30 14:54:18 +0300538 u32 off;
539 u32 target_seq;
Pavel Begunkov135fcde2020-07-13 23:37:12 +0300540 struct list_head list;
Pavel Begunkov90cd7e42020-10-27 23:25:36 +0000541 /* head of the link, used by linked timeouts only */
542 struct io_kiocb *head;
Jens Axboeb29472e2019-12-17 18:50:29 -0700543};
544
Pavel Begunkov0bdf7a22020-10-10 18:34:10 +0100545struct io_timeout_rem {
546 struct file *file;
547 u64 addr;
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +0000548
549 /* timeout update */
550 struct timespec64 ts;
551 u32 flags;
Pavel Begunkov0bdf7a22020-10-10 18:34:10 +0100552};
553
Jens Axboe9adbd452019-12-20 08:45:55 -0700554struct io_rw {
555 /* NOTE: kiocb has the file as the first member, so don't do it here */
556 struct kiocb kiocb;
557 u64 addr;
558 u64 len;
559};
560
Jens Axboe3fbb51c2019-12-20 08:51:52 -0700561struct io_connect {
562 struct file *file;
563 struct sockaddr __user *addr;
564 int addr_len;
565};
566
Jens Axboee47293f2019-12-20 08:58:21 -0700567struct io_sr_msg {
568 struct file *file;
Jens Axboefddafac2020-01-04 20:19:44 -0700569 union {
Pavel Begunkov4af34172021-04-11 01:46:30 +0100570 struct compat_msghdr __user *umsg_compat;
571 struct user_msghdr __user *umsg;
572 void __user *buf;
Jens Axboefddafac2020-01-04 20:19:44 -0700573 };
Jens Axboee47293f2019-12-20 08:58:21 -0700574 int msg_flags;
Jens Axboebcda7ba2020-02-23 16:42:51 -0700575 int bgid;
Jens Axboefddafac2020-01-04 20:19:44 -0700576 size_t len;
Jens Axboebcda7ba2020-02-23 16:42:51 -0700577 struct io_buffer *kbuf;
Jens Axboee47293f2019-12-20 08:58:21 -0700578};
579
Jens Axboe15b71ab2019-12-11 11:20:36 -0700580struct io_open {
581 struct file *file;
582 int dfd;
Jens Axboe15b71ab2019-12-11 11:20:36 -0700583 struct filename *filename;
Jens Axboec12cedf2020-01-08 17:41:21 -0700584 struct open_how how;
Jens Axboe4022e7a2020-03-19 19:23:18 -0600585 unsigned long nofile;
Jens Axboe15b71ab2019-12-11 11:20:36 -0700586};
587
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +0000588struct io_rsrc_update {
Jens Axboe05f3fb32019-12-09 11:22:50 -0700589 struct file *file;
590 u64 arg;
591 u32 nr_args;
592 u32 offset;
593};
594
Jens Axboe4840e412019-12-25 22:03:45 -0700595struct io_fadvise {
596 struct file *file;
597 u64 offset;
598 u32 len;
599 u32 advice;
600};
601
Jens Axboec1ca7572019-12-25 22:18:28 -0700602struct io_madvise {
603 struct file *file;
604 u64 addr;
605 u32 len;
606 u32 advice;
607};
608
Jens Axboe3e4827b2020-01-08 15:18:09 -0700609struct io_epoll {
610 struct file *file;
611 int epfd;
612 int op;
613 int fd;
614 struct epoll_event event;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700615};
616
Pavel Begunkov7d67af22020-02-24 11:32:45 +0300617struct io_splice {
618 struct file *file_out;
619 struct file *file_in;
620 loff_t off_out;
621 loff_t off_in;
622 u64 len;
623 unsigned int flags;
624};
625
Jens Axboeddf0322d2020-02-23 16:41:33 -0700626struct io_provide_buf {
627 struct file *file;
628 __u64 addr;
629 __s32 len;
630 __u32 bgid;
631 __u16 nbufs;
632 __u16 bid;
633};
634
Bijan Mottahedeh1d9e1282020-05-22 21:31:16 -0700635struct io_statx {
636 struct file *file;
637 int dfd;
638 unsigned int mask;
639 unsigned int flags;
Bijan Mottahedehe62753e2020-05-22 21:31:18 -0700640 const char __user *filename;
Bijan Mottahedeh1d9e1282020-05-22 21:31:16 -0700641 struct statx __user *buffer;
642};
643
Jens Axboe36f4fa62020-09-05 11:14:22 -0600644struct io_shutdown {
645 struct file *file;
646 int how;
647};
648
Jens Axboe80a261f2020-09-28 14:23:58 -0600649struct io_rename {
650 struct file *file;
651 int old_dfd;
652 int new_dfd;
653 struct filename *oldpath;
654 struct filename *newpath;
655 int flags;
656};
657
Jens Axboe14a11432020-09-28 14:27:37 -0600658struct io_unlink {
659 struct file *file;
660 int dfd;
661 int flags;
662 struct filename *filename;
663};
664
Pavel Begunkov3ca405e2020-07-13 23:37:08 +0300665struct io_completion {
666 struct file *file;
667 struct list_head list;
Pavel Begunkov8c3f9cd2021-02-28 22:35:15 +0000668 u32 cflags;
Pavel Begunkov3ca405e2020-07-13 23:37:08 +0300669};
670
Jens Axboef499a022019-12-02 16:28:46 -0700671struct io_async_connect {
672 struct sockaddr_storage address;
673};
674
Jens Axboe03b12302019-12-02 18:50:25 -0700675struct io_async_msghdr {
676 struct iovec fast_iov[UIO_FASTIOV];
Pavel Begunkov257e84a2021-02-05 00:58:00 +0000677 /* points to an allocated iov, if NULL we use fast_iov instead */
678 struct iovec *free_iov;
Jens Axboe03b12302019-12-02 18:50:25 -0700679 struct sockaddr __user *uaddr;
680 struct msghdr msg;
Jens Axboeb5379162020-02-09 11:29:15 -0700681 struct sockaddr_storage addr;
Jens Axboe03b12302019-12-02 18:50:25 -0700682};
683
Jens Axboef67676d2019-12-02 11:03:47 -0700684struct io_async_rw {
685 struct iovec fast_iov[UIO_FASTIOV];
Jens Axboeff6165b2020-08-13 09:47:43 -0600686 const struct iovec *free_iovec;
687 struct iov_iter iter;
Jens Axboe227c0c92020-08-13 11:51:40 -0600688 size_t bytes_done;
Jens Axboebcf5a062020-05-22 09:24:42 -0600689 struct wait_page_queue wpq;
Jens Axboef67676d2019-12-02 11:03:47 -0700690};
691
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300692enum {
693 REQ_F_FIXED_FILE_BIT = IOSQE_FIXED_FILE_BIT,
694 REQ_F_IO_DRAIN_BIT = IOSQE_IO_DRAIN_BIT,
695 REQ_F_LINK_BIT = IOSQE_IO_LINK_BIT,
696 REQ_F_HARDLINK_BIT = IOSQE_IO_HARDLINK_BIT,
697 REQ_F_FORCE_ASYNC_BIT = IOSQE_ASYNC_BIT,
Jens Axboebcda7ba2020-02-23 16:42:51 -0700698 REQ_F_BUFFER_SELECT_BIT = IOSQE_BUFFER_SELECT_BIT,
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300699
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300700 REQ_F_FAIL_LINK_BIT,
701 REQ_F_INFLIGHT_BIT,
702 REQ_F_CUR_POS_BIT,
703 REQ_F_NOWAIT_BIT,
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300704 REQ_F_LINK_TIMEOUT_BIT,
Pavel Begunkov99bc4c32020-02-07 22:04:45 +0300705 REQ_F_NEED_CLEANUP_BIT,
Jens Axboed7718a92020-02-14 22:23:12 -0700706 REQ_F_POLLED_BIT,
Jens Axboebcda7ba2020-02-23 16:42:51 -0700707 REQ_F_BUFFER_SELECTED_BIT,
Pavel Begunkov900fad42020-10-19 16:39:16 +0100708 REQ_F_LTIMEOUT_ACTIVE_BIT,
Pavel Begunkove342c802021-01-19 13:32:47 +0000709 REQ_F_COMPLETE_INLINE_BIT,
Jens Axboe230d50d2021-04-01 20:41:15 -0600710 REQ_F_REISSUE_BIT,
Pavel Begunkov8c130822021-03-22 01:58:32 +0000711 REQ_F_DONT_REISSUE_BIT,
Jens Axboe7b29f922021-03-12 08:30:14 -0700712 /* keep async read/write and isreg together and in order */
713 REQ_F_ASYNC_READ_BIT,
714 REQ_F_ASYNC_WRITE_BIT,
715 REQ_F_ISREG_BIT,
Jens Axboe84557872020-03-03 15:28:17 -0700716
717 /* not a real bit, just to check we're not overflowing the space */
718 __REQ_F_LAST_BIT,
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300719};
720
721enum {
722 /* ctx owns file */
723 REQ_F_FIXED_FILE = BIT(REQ_F_FIXED_FILE_BIT),
724 /* drain existing IO first */
725 REQ_F_IO_DRAIN = BIT(REQ_F_IO_DRAIN_BIT),
726 /* linked sqes */
727 REQ_F_LINK = BIT(REQ_F_LINK_BIT),
728 /* doesn't sever on completion < 0 */
729 REQ_F_HARDLINK = BIT(REQ_F_HARDLINK_BIT),
730 /* IOSQE_ASYNC */
731 REQ_F_FORCE_ASYNC = BIT(REQ_F_FORCE_ASYNC_BIT),
Jens Axboebcda7ba2020-02-23 16:42:51 -0700732 /* IOSQE_BUFFER_SELECT */
733 REQ_F_BUFFER_SELECT = BIT(REQ_F_BUFFER_SELECT_BIT),
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300734
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300735 /* fail rest of links */
736 REQ_F_FAIL_LINK = BIT(REQ_F_FAIL_LINK_BIT),
Pavel Begunkovb05a1bc2021-03-04 13:59:24 +0000737 /* on inflight list, should be cancelled and waited on exit reliably */
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300738 REQ_F_INFLIGHT = BIT(REQ_F_INFLIGHT_BIT),
739 /* read/write uses file position */
740 REQ_F_CUR_POS = BIT(REQ_F_CUR_POS_BIT),
741 /* must not punt to workers */
742 REQ_F_NOWAIT = BIT(REQ_F_NOWAIT_BIT),
Pavel Begunkov900fad42020-10-19 16:39:16 +0100743 /* has or had linked timeout */
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300744 REQ_F_LINK_TIMEOUT = BIT(REQ_F_LINK_TIMEOUT_BIT),
Pavel Begunkov99bc4c32020-02-07 22:04:45 +0300745 /* needs cleanup */
746 REQ_F_NEED_CLEANUP = BIT(REQ_F_NEED_CLEANUP_BIT),
Jens Axboed7718a92020-02-14 22:23:12 -0700747 /* already went through poll handler */
748 REQ_F_POLLED = BIT(REQ_F_POLLED_BIT),
Jens Axboebcda7ba2020-02-23 16:42:51 -0700749 /* buffer already selected */
750 REQ_F_BUFFER_SELECTED = BIT(REQ_F_BUFFER_SELECTED_BIT),
Pavel Begunkov900fad42020-10-19 16:39:16 +0100751 /* linked timeout is active, i.e. prepared by link's head */
752 REQ_F_LTIMEOUT_ACTIVE = BIT(REQ_F_LTIMEOUT_ACTIVE_BIT),
Pavel Begunkove342c802021-01-19 13:32:47 +0000753 /* completion is deferred through io_comp_state */
754 REQ_F_COMPLETE_INLINE = BIT(REQ_F_COMPLETE_INLINE_BIT),
Jens Axboe230d50d2021-04-01 20:41:15 -0600755 /* caller should reissue async */
756 REQ_F_REISSUE = BIT(REQ_F_REISSUE_BIT),
Pavel Begunkov8c130822021-03-22 01:58:32 +0000757 /* don't attempt request reissue, see io_rw_reissue() */
758 REQ_F_DONT_REISSUE = BIT(REQ_F_DONT_REISSUE_BIT),
Jens Axboe7b29f922021-03-12 08:30:14 -0700759 /* supports async reads */
760 REQ_F_ASYNC_READ = BIT(REQ_F_ASYNC_READ_BIT),
761 /* supports async writes */
762 REQ_F_ASYNC_WRITE = BIT(REQ_F_ASYNC_WRITE_BIT),
763 /* regular file */
764 REQ_F_ISREG = BIT(REQ_F_ISREG_BIT),
Jens Axboed7718a92020-02-14 22:23:12 -0700765};
766
767struct async_poll {
768 struct io_poll_iocb poll;
Jens Axboe807abcb2020-07-17 17:09:27 -0600769 struct io_poll_iocb *double_poll;
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300770};
771
Jens Axboe7cbf1722021-02-10 00:03:20 +0000772struct io_task_work {
773 struct io_wq_work_node node;
774 task_work_func_t func;
775};
776
Jens Axboe09bb8392019-03-13 12:39:28 -0600777/*
778 * NOTE! Each of the iocb union members has the file pointer
779 * as the first entry in their struct definition. So you can
780 * access the file pointer through any of the sub-structs,
781 * or directly as just 'ki_filp' in this struct.
782 */
Jens Axboe2b188cc2019-01-07 10:46:33 -0700783struct io_kiocb {
Jens Axboe221c5eb2019-01-17 09:41:58 -0700784 union {
Jens Axboe09bb8392019-03-13 12:39:28 -0600785 struct file *file;
Jens Axboe9adbd452019-12-20 08:45:55 -0700786 struct io_rw rw;
Jens Axboe221c5eb2019-01-17 09:41:58 -0700787 struct io_poll_iocb poll;
Pavel Begunkov9d805892021-04-13 02:58:40 +0100788 struct io_poll_update poll_update;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -0700789 struct io_accept accept;
790 struct io_sync sync;
Jens Axboefbf23842019-12-17 18:45:56 -0700791 struct io_cancel cancel;
Jens Axboeb29472e2019-12-17 18:50:29 -0700792 struct io_timeout timeout;
Pavel Begunkov0bdf7a22020-10-10 18:34:10 +0100793 struct io_timeout_rem timeout_rem;
Jens Axboe3fbb51c2019-12-20 08:51:52 -0700794 struct io_connect connect;
Jens Axboee47293f2019-12-20 08:58:21 -0700795 struct io_sr_msg sr_msg;
Jens Axboe15b71ab2019-12-11 11:20:36 -0700796 struct io_open open;
Jens Axboeb5dba592019-12-11 14:02:38 -0700797 struct io_close close;
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +0000798 struct io_rsrc_update rsrc_update;
Jens Axboe4840e412019-12-25 22:03:45 -0700799 struct io_fadvise fadvise;
Jens Axboec1ca7572019-12-25 22:18:28 -0700800 struct io_madvise madvise;
Jens Axboe3e4827b2020-01-08 15:18:09 -0700801 struct io_epoll epoll;
Pavel Begunkov7d67af22020-02-24 11:32:45 +0300802 struct io_splice splice;
Jens Axboeddf0322d2020-02-23 16:41:33 -0700803 struct io_provide_buf pbuf;
Bijan Mottahedeh1d9e1282020-05-22 21:31:16 -0700804 struct io_statx statx;
Jens Axboe36f4fa62020-09-05 11:14:22 -0600805 struct io_shutdown shutdown;
Jens Axboe80a261f2020-09-28 14:23:58 -0600806 struct io_rename rename;
Jens Axboe14a11432020-09-28 14:27:37 -0600807 struct io_unlink unlink;
Pavel Begunkov3ca405e2020-07-13 23:37:08 +0300808 /* use only after cleaning per-op data, see io_clean_op() */
809 struct io_completion compl;
Jens Axboe221c5eb2019-01-17 09:41:58 -0700810 };
Jens Axboe2b188cc2019-01-07 10:46:33 -0700811
Jens Axboee8c2bc12020-08-15 18:44:09 -0700812 /* opcode allocated if it needs to store data for async defer */
813 void *async_data;
Jens Axboed625c6e2019-12-17 19:53:05 -0700814 u8 opcode;
Xiaoguang Wang65a65432020-06-11 23:39:36 +0800815 /* polled IO has completed */
816 u8 iopoll_completed;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700817
Bijan Mottahedeh4f4eeba2020-05-19 14:52:49 -0700818 u16 buf_index;
Pavel Begunkov9cf7c102020-07-13 23:37:15 +0300819 u32 result;
Bijan Mottahedeh4f4eeba2020-05-19 14:52:49 -0700820
Pavel Begunkov010e8e62020-07-30 18:43:45 +0300821 struct io_ring_ctx *ctx;
822 unsigned int flags;
Jens Axboeabc54d62021-02-24 13:32:30 -0700823 atomic_t refs;
Pavel Begunkov010e8e62020-07-30 18:43:45 +0300824 struct task_struct *task;
825 u64 user_data;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700826
Pavel Begunkovf2f87372020-10-27 23:25:37 +0000827 struct io_kiocb *link;
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +0000828 struct percpu_ref *fixed_rsrc_refs;
Jens Axboed7718a92020-02-14 22:23:12 -0700829
Pavel Begunkovb303fe22021-04-11 01:46:26 +0100830 /* used with ctx->iopoll_list with reads/writes */
Pavel Begunkov010e8e62020-07-30 18:43:45 +0300831 struct list_head inflight_entry;
Jens Axboe7cbf1722021-02-10 00:03:20 +0000832 union {
833 struct io_task_work io_task_work;
834 struct callback_head task_work;
835 };
Pavel Begunkov010e8e62020-07-30 18:43:45 +0300836 /* for polled requests, i.e. IORING_OP_POLL_ADD and async armed poll */
837 struct hlist_node hash_node;
838 struct async_poll *apoll;
839 struct io_wq_work work;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700840};
841
Pavel Begunkov13bf43f2021-03-06 11:02:12 +0000842struct io_tctx_node {
843 struct list_head ctx_node;
844 struct task_struct *task;
Pavel Begunkov13bf43f2021-03-06 11:02:12 +0000845 struct io_ring_ctx *ctx;
846};
847
Pavel Begunkov27dc8332020-07-13 23:37:14 +0300848struct io_defer_entry {
849 struct list_head list;
850 struct io_kiocb *req;
Pavel Begunkov9cf7c102020-07-13 23:37:15 +0300851 u32 seq;
Pavel Begunkov27dc8332020-07-13 23:37:14 +0300852};
853
Jens Axboed3656342019-12-18 09:50:26 -0700854struct io_op_def {
Jens Axboed3656342019-12-18 09:50:26 -0700855 /* needs req->file assigned */
856 unsigned needs_file : 1;
Jens Axboed3656342019-12-18 09:50:26 -0700857 /* hash wq insertion if file is a regular file */
858 unsigned hash_reg_file : 1;
859 /* unbound wq insertion if file is a non-regular file */
860 unsigned unbound_nonreg_file : 1;
Jens Axboe66f4af92020-01-16 15:36:52 -0700861 /* opcode is not supported by this kernel */
862 unsigned not_supported : 1;
Jens Axboe8a727582020-02-20 09:59:44 -0700863 /* set if opcode supports polled "wait" */
864 unsigned pollin : 1;
865 unsigned pollout : 1;
Jens Axboebcda7ba2020-02-23 16:42:51 -0700866 /* op supports buffer selection */
867 unsigned buffer_select : 1;
Pavel Begunkov26f05052021-02-28 22:35:18 +0000868 /* do prep async if is going to be punted */
869 unsigned needs_async_setup : 1;
Jens Axboe27926b62020-10-28 09:33:23 -0600870 /* should block plug */
871 unsigned plug : 1;
Jens Axboee8c2bc12020-08-15 18:44:09 -0700872 /* size of async data needed, if any */
873 unsigned short async_size;
Jens Axboed3656342019-12-18 09:50:26 -0700874};
875
Jens Axboe09186822020-10-13 15:01:40 -0600876static const struct io_op_def io_op_defs[] = {
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300877 [IORING_OP_NOP] = {},
878 [IORING_OP_READV] = {
Jens Axboed3656342019-12-18 09:50:26 -0700879 .needs_file = 1,
880 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700881 .pollin = 1,
Jens Axboe4d954c22020-02-27 07:31:19 -0700882 .buffer_select = 1,
Pavel Begunkov26f05052021-02-28 22:35:18 +0000883 .needs_async_setup = 1,
Jens Axboe27926b62020-10-28 09:33:23 -0600884 .plug = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700885 .async_size = sizeof(struct io_async_rw),
Jens Axboed3656342019-12-18 09:50:26 -0700886 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300887 [IORING_OP_WRITEV] = {
Jens Axboed3656342019-12-18 09:50:26 -0700888 .needs_file = 1,
889 .hash_reg_file = 1,
890 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700891 .pollout = 1,
Pavel Begunkov26f05052021-02-28 22:35:18 +0000892 .needs_async_setup = 1,
Jens Axboe27926b62020-10-28 09:33:23 -0600893 .plug = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700894 .async_size = sizeof(struct io_async_rw),
Jens Axboed3656342019-12-18 09:50:26 -0700895 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300896 [IORING_OP_FSYNC] = {
Jens Axboed3656342019-12-18 09:50:26 -0700897 .needs_file = 1,
898 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300899 [IORING_OP_READ_FIXED] = {
Jens Axboed3656342019-12-18 09:50:26 -0700900 .needs_file = 1,
901 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700902 .pollin = 1,
Jens Axboe27926b62020-10-28 09:33:23 -0600903 .plug = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700904 .async_size = sizeof(struct io_async_rw),
Jens Axboed3656342019-12-18 09:50:26 -0700905 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300906 [IORING_OP_WRITE_FIXED] = {
Jens Axboed3656342019-12-18 09:50:26 -0700907 .needs_file = 1,
908 .hash_reg_file = 1,
909 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700910 .pollout = 1,
Jens Axboe27926b62020-10-28 09:33:23 -0600911 .plug = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700912 .async_size = sizeof(struct io_async_rw),
Jens Axboed3656342019-12-18 09:50:26 -0700913 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300914 [IORING_OP_POLL_ADD] = {
Jens Axboed3656342019-12-18 09:50:26 -0700915 .needs_file = 1,
916 .unbound_nonreg_file = 1,
917 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300918 [IORING_OP_POLL_REMOVE] = {},
919 [IORING_OP_SYNC_FILE_RANGE] = {
Jens Axboed3656342019-12-18 09:50:26 -0700920 .needs_file = 1,
921 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300922 [IORING_OP_SENDMSG] = {
Jens Axboed3656342019-12-18 09:50:26 -0700923 .needs_file = 1,
924 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700925 .pollout = 1,
Pavel Begunkov26f05052021-02-28 22:35:18 +0000926 .needs_async_setup = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700927 .async_size = sizeof(struct io_async_msghdr),
Jens Axboed3656342019-12-18 09:50:26 -0700928 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300929 [IORING_OP_RECVMSG] = {
Jens Axboed3656342019-12-18 09:50:26 -0700930 .needs_file = 1,
931 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700932 .pollin = 1,
Jens Axboe52de1fe2020-02-27 10:15:42 -0700933 .buffer_select = 1,
Pavel Begunkov26f05052021-02-28 22:35:18 +0000934 .needs_async_setup = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700935 .async_size = sizeof(struct io_async_msghdr),
Jens Axboed3656342019-12-18 09:50:26 -0700936 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300937 [IORING_OP_TIMEOUT] = {
Jens Axboee8c2bc12020-08-15 18:44:09 -0700938 .async_size = sizeof(struct io_timeout_data),
Jens Axboed3656342019-12-18 09:50:26 -0700939 },
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +0000940 [IORING_OP_TIMEOUT_REMOVE] = {
941 /* used by timeout updates' prep() */
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +0000942 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300943 [IORING_OP_ACCEPT] = {
Jens Axboed3656342019-12-18 09:50:26 -0700944 .needs_file = 1,
945 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700946 .pollin = 1,
Jens Axboed3656342019-12-18 09:50:26 -0700947 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300948 [IORING_OP_ASYNC_CANCEL] = {},
949 [IORING_OP_LINK_TIMEOUT] = {
Jens Axboee8c2bc12020-08-15 18:44:09 -0700950 .async_size = sizeof(struct io_timeout_data),
Jens Axboed3656342019-12-18 09:50:26 -0700951 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300952 [IORING_OP_CONNECT] = {
Jens Axboed3656342019-12-18 09:50:26 -0700953 .needs_file = 1,
954 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700955 .pollout = 1,
Pavel Begunkov26f05052021-02-28 22:35:18 +0000956 .needs_async_setup = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700957 .async_size = sizeof(struct io_async_connect),
Jens Axboed3656342019-12-18 09:50:26 -0700958 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300959 [IORING_OP_FALLOCATE] = {
Jens Axboed3656342019-12-18 09:50:26 -0700960 .needs_file = 1,
961 },
Jens Axboe44526be2021-02-15 13:32:18 -0700962 [IORING_OP_OPENAT] = {},
963 [IORING_OP_CLOSE] = {},
964 [IORING_OP_FILES_UPDATE] = {},
965 [IORING_OP_STATX] = {},
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300966 [IORING_OP_READ] = {
Jens Axboe3a6820f2019-12-22 15:19:35 -0700967 .needs_file = 1,
968 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700969 .pollin = 1,
Jens Axboebcda7ba2020-02-23 16:42:51 -0700970 .buffer_select = 1,
Jens Axboe27926b62020-10-28 09:33:23 -0600971 .plug = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700972 .async_size = sizeof(struct io_async_rw),
Jens Axboe3a6820f2019-12-22 15:19:35 -0700973 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300974 [IORING_OP_WRITE] = {
Jens Axboe3a6820f2019-12-22 15:19:35 -0700975 .needs_file = 1,
976 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700977 .pollout = 1,
Jens Axboe27926b62020-10-28 09:33:23 -0600978 .plug = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700979 .async_size = sizeof(struct io_async_rw),
Jens Axboe3a6820f2019-12-22 15:19:35 -0700980 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300981 [IORING_OP_FADVISE] = {
Jens Axboe4840e412019-12-25 22:03:45 -0700982 .needs_file = 1,
983 },
Jens Axboe44526be2021-02-15 13:32:18 -0700984 [IORING_OP_MADVISE] = {},
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300985 [IORING_OP_SEND] = {
Jens Axboefddafac2020-01-04 20:19:44 -0700986 .needs_file = 1,
987 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700988 .pollout = 1,
Jens Axboefddafac2020-01-04 20:19:44 -0700989 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300990 [IORING_OP_RECV] = {
Jens Axboefddafac2020-01-04 20:19:44 -0700991 .needs_file = 1,
992 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700993 .pollin = 1,
Jens Axboebcda7ba2020-02-23 16:42:51 -0700994 .buffer_select = 1,
Jens Axboefddafac2020-01-04 20:19:44 -0700995 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300996 [IORING_OP_OPENAT2] = {
Jens Axboecebdb982020-01-08 17:59:24 -0700997 },
Jens Axboe3e4827b2020-01-08 15:18:09 -0700998 [IORING_OP_EPOLL_CTL] = {
999 .unbound_nonreg_file = 1,
Jens Axboe3e4827b2020-01-08 15:18:09 -07001000 },
Pavel Begunkov7d67af22020-02-24 11:32:45 +03001001 [IORING_OP_SPLICE] = {
1002 .needs_file = 1,
1003 .hash_reg_file = 1,
1004 .unbound_nonreg_file = 1,
Jens Axboeddf0322d2020-02-23 16:41:33 -07001005 },
1006 [IORING_OP_PROVIDE_BUFFERS] = {},
Jens Axboe067524e2020-03-02 16:32:28 -07001007 [IORING_OP_REMOVE_BUFFERS] = {},
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03001008 [IORING_OP_TEE] = {
1009 .needs_file = 1,
1010 .hash_reg_file = 1,
1011 .unbound_nonreg_file = 1,
1012 },
Jens Axboe36f4fa62020-09-05 11:14:22 -06001013 [IORING_OP_SHUTDOWN] = {
1014 .needs_file = 1,
1015 },
Jens Axboe44526be2021-02-15 13:32:18 -07001016 [IORING_OP_RENAMEAT] = {},
1017 [IORING_OP_UNLINKAT] = {},
Jens Axboed3656342019-12-18 09:50:26 -07001018};
1019
Pavel Begunkov7a612352021-03-09 00:37:59 +00001020static bool io_disarm_next(struct io_kiocb *req);
Pavel Begunkovd56d9382021-03-06 11:02:13 +00001021static void io_uring_del_task_file(unsigned long index);
Pavel Begunkov9936c7c2021-02-04 13:51:56 +00001022static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
1023 struct task_struct *task,
1024 struct files_struct *files);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07001025static void io_uring_cancel_sqpoll(struct io_ring_ctx *ctx);
Pavel Begunkovb895c9a2021-04-01 15:43:40 +01001026static struct io_rsrc_node *io_rsrc_node_alloc(struct io_ring_ctx *ctx);
Pavel Begunkov1ffc5422020-12-30 21:34:15 +00001027
Pavel Begunkovff6421642021-04-11 01:46:32 +01001028static bool io_cqring_fill_event(struct io_kiocb *req, long res, unsigned cflags);
Jackie Liuec9c02a2019-11-08 23:50:36 +08001029static void io_put_req(struct io_kiocb *req);
Pavel Begunkov216578e2020-10-13 09:44:00 +01001030static void io_put_req_deferred(struct io_kiocb *req, int nr);
Jens Axboec7dae4b2021-02-09 19:53:37 -07001031static void io_dismantle_req(struct io_kiocb *req);
1032static void io_put_task(struct task_struct *task, int nr);
Jens Axboe94ae5e72019-11-14 19:39:52 -07001033static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req);
1034static void io_queue_linked_timeout(struct io_kiocb *req);
Jens Axboe05f3fb32019-12-09 11:22:50 -07001035static int __io_sqe_files_update(struct io_ring_ctx *ctx,
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00001036 struct io_uring_rsrc_update *ip,
Jens Axboe05f3fb32019-12-09 11:22:50 -07001037 unsigned nr_args);
Pavel Begunkov68fb8972021-03-19 17:22:41 +00001038static void io_clean_op(struct io_kiocb *req);
Pavel Begunkov8371adf2020-10-10 18:34:08 +01001039static struct file *io_file_get(struct io_submit_state *state,
1040 struct io_kiocb *req, int fd, bool fixed);
Pavel Begunkovc5eef2b2021-02-10 00:03:22 +00001041static void __io_queue_sqe(struct io_kiocb *req);
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00001042static void io_rsrc_put_work(struct work_struct *work);
Jens Axboede0617e2019-04-06 21:51:27 -06001043
Pavel Begunkov907d1df2021-01-26 23:35:10 +00001044static void io_req_task_queue(struct io_kiocb *req);
Jens Axboe65453d12021-02-10 00:03:21 +00001045static void io_submit_flush_completions(struct io_comp_state *cs,
1046 struct io_ring_ctx *ctx);
Jens Axboe50826202021-02-23 09:02:26 -07001047static bool io_poll_remove_waitqs(struct io_kiocb *req);
Pavel Begunkov179ae0d2021-02-28 22:35:20 +00001048static int io_req_prep_async(struct io_kiocb *req);
Jens Axboe9a56a232019-01-09 09:06:50 -07001049
Jens Axboe2b188cc2019-01-07 10:46:33 -07001050static struct kmem_cache *req_cachep;
1051
Jens Axboe09186822020-10-13 15:01:40 -06001052static const struct file_operations io_uring_fops;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001053
1054struct sock *io_uring_get_socket(struct file *file)
1055{
1056#if defined(CONFIG_UNIX)
1057 if (file->f_op == &io_uring_fops) {
1058 struct io_ring_ctx *ctx = file->private_data;
1059
1060 return ctx->ring_sock->sk;
1061 }
1062#endif
1063 return NULL;
1064}
1065EXPORT_SYMBOL(io_uring_get_socket);
1066
Pavel Begunkovf2f87372020-10-27 23:25:37 +00001067#define io_for_each_link(pos, head) \
1068 for (pos = (head); pos; pos = pos->link)
1069
Pavel Begunkovb895c9a2021-04-01 15:43:40 +01001070static inline void io_req_set_rsrc_node(struct io_kiocb *req)
Jens Axboec40f6372020-06-25 15:39:59 -06001071{
Pavel Begunkov36f72fe2020-11-18 19:57:26 +00001072 struct io_ring_ctx *ctx = req->ctx;
1073
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00001074 if (!req->fixed_rsrc_refs) {
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01001075 req->fixed_rsrc_refs = &ctx->rsrc_node->refs;
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00001076 percpu_ref_get(req->fixed_rsrc_refs);
Pavel Begunkov36f72fe2020-11-18 19:57:26 +00001077 }
1078}
1079
Pavel Begunkovf70865d2021-04-11 01:46:40 +01001080static void io_refs_resurrect(struct percpu_ref *ref, struct completion *compl)
1081{
1082 bool got = percpu_ref_tryget(ref);
1083
1084 /* already at zero, wait for ->release() */
1085 if (!got)
1086 wait_for_completion(compl);
1087 percpu_ref_resurrect(ref);
1088 if (got)
1089 percpu_ref_put(ref);
1090}
1091
Pavel Begunkov08d23632020-11-06 13:00:22 +00001092static bool io_match_task(struct io_kiocb *head,
1093 struct task_struct *task,
1094 struct files_struct *files)
1095{
1096 struct io_kiocb *req;
1097
Pavel Begunkov68207682021-03-22 01:58:25 +00001098 if (task && head->task != task)
Pavel Begunkov08d23632020-11-06 13:00:22 +00001099 return false;
1100 if (!files)
1101 return true;
1102
1103 io_for_each_link(req, head) {
Pavel Begunkovb05a1bc2021-03-04 13:59:24 +00001104 if (req->flags & REQ_F_INFLIGHT)
Jens Axboe02a13672021-01-23 15:49:31 -07001105 return true;
Pavel Begunkov08d23632020-11-06 13:00:22 +00001106 }
1107 return false;
1108}
1109
Jens Axboec40f6372020-06-25 15:39:59 -06001110static inline void req_set_fail_links(struct io_kiocb *req)
1111{
Pavel Begunkove4335ed2021-04-11 01:46:39 +01001112 if (req->flags & REQ_F_LINK)
Jens Axboec40f6372020-06-25 15:39:59 -06001113 req->flags |= REQ_F_FAIL_LINK;
1114}
Jens Axboe4a38aed22020-05-14 17:21:15 -06001115
Jens Axboe2b188cc2019-01-07 10:46:33 -07001116static void io_ring_ctx_ref_free(struct percpu_ref *ref)
1117{
1118 struct io_ring_ctx *ctx = container_of(ref, struct io_ring_ctx, refs);
1119
Jens Axboe0f158b42020-05-14 17:18:39 -06001120 complete(&ctx->ref_comp);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001121}
1122
Pavel Begunkov8eb7e2d2020-06-29 13:13:02 +03001123static inline bool io_is_timeout_noseq(struct io_kiocb *req)
1124{
1125 return !req->timeout.off;
1126}
1127
Jens Axboe2b188cc2019-01-07 10:46:33 -07001128static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
1129{
1130 struct io_ring_ctx *ctx;
Jens Axboe78076bb2019-12-04 19:56:40 -07001131 int hash_bits;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001132
1133 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
1134 if (!ctx)
1135 return NULL;
1136
Jens Axboe78076bb2019-12-04 19:56:40 -07001137 /*
1138 * Use 5 bits less than the max cq entries, that should give us around
1139 * 32 entries per hash list if totally full and uniformly spread.
1140 */
1141 hash_bits = ilog2(p->cq_entries);
1142 hash_bits -= 5;
1143 if (hash_bits <= 0)
1144 hash_bits = 1;
1145 ctx->cancel_hash_bits = hash_bits;
1146 ctx->cancel_hash = kmalloc((1U << hash_bits) * sizeof(struct hlist_head),
1147 GFP_KERNEL);
1148 if (!ctx->cancel_hash)
1149 goto err;
1150 __hash_init(ctx->cancel_hash, 1U << hash_bits);
1151
Roman Gushchin21482892019-05-07 10:01:48 -07001152 if (percpu_ref_init(&ctx->refs, io_ring_ctx_ref_free,
Jens Axboe206aefd2019-11-07 18:27:42 -07001153 PERCPU_REF_ALLOW_REINIT, GFP_KERNEL))
1154 goto err;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001155
1156 ctx->flags = p->flags;
Jens Axboe90554202020-09-03 12:12:41 -06001157 init_waitqueue_head(&ctx->sqo_sq_wait);
Jens Axboe69fb2132020-09-14 11:16:23 -06001158 INIT_LIST_HEAD(&ctx->sqd_list);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001159 init_waitqueue_head(&ctx->cq_wait);
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001160 INIT_LIST_HEAD(&ctx->cq_overflow_list);
Jens Axboe0f158b42020-05-14 17:18:39 -06001161 init_completion(&ctx->ref_comp);
Jens Axboe9e15c3a2021-03-13 12:29:43 -07001162 xa_init_flags(&ctx->io_buffers, XA_FLAGS_ALLOC1);
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +00001163 xa_init_flags(&ctx->personalities, XA_FLAGS_ALLOC1);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001164 mutex_init(&ctx->uring_lock);
1165 init_waitqueue_head(&ctx->wait);
1166 spin_lock_init(&ctx->completion_lock);
Pavel Begunkov540e32a2020-07-13 23:37:09 +03001167 INIT_LIST_HEAD(&ctx->iopoll_list);
Jens Axboede0617e2019-04-06 21:51:27 -06001168 INIT_LIST_HEAD(&ctx->defer_list);
Jens Axboe5262f562019-09-17 12:26:57 -06001169 INIT_LIST_HEAD(&ctx->timeout_list);
Bijan Mottahedehd67d2262021-01-15 17:37:46 +00001170 spin_lock_init(&ctx->rsrc_ref_lock);
1171 INIT_LIST_HEAD(&ctx->rsrc_ref_list);
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00001172 INIT_DELAYED_WORK(&ctx->rsrc_put_work, io_rsrc_put_work);
1173 init_llist_head(&ctx->rsrc_put_llist);
Pavel Begunkov13bf43f2021-03-06 11:02:12 +00001174 INIT_LIST_HEAD(&ctx->tctx_list);
Jens Axboe1b4c3512021-02-10 00:03:19 +00001175 INIT_LIST_HEAD(&ctx->submit_state.comp.free_list);
Jens Axboec7dae4b2021-02-09 19:53:37 -07001176 INIT_LIST_HEAD(&ctx->submit_state.comp.locked_free_list);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001177 return ctx;
Jens Axboe206aefd2019-11-07 18:27:42 -07001178err:
Jens Axboe78076bb2019-12-04 19:56:40 -07001179 kfree(ctx->cancel_hash);
Jens Axboe206aefd2019-11-07 18:27:42 -07001180 kfree(ctx);
1181 return NULL;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001182}
1183
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03001184static bool req_need_defer(struct io_kiocb *req, u32 seq)
Jens Axboede0617e2019-04-06 21:51:27 -06001185{
Jens Axboe2bc99302020-07-09 09:43:27 -06001186 if (unlikely(req->flags & REQ_F_IO_DRAIN)) {
1187 struct io_ring_ctx *ctx = req->ctx;
Jackie Liua197f662019-11-08 08:09:12 -07001188
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03001189 return seq != ctx->cached_cq_tail
Pavel Begunkov2c3bac6d2020-10-18 10:17:40 +01001190 + READ_ONCE(ctx->cached_cq_overflow);
Jens Axboe2bc99302020-07-09 09:43:27 -06001191 }
Jens Axboe7adf4ea2019-10-10 21:42:58 -06001192
Bob Liu9d858b22019-11-13 18:06:25 +08001193 return false;
Jens Axboe7adf4ea2019-10-10 21:42:58 -06001194}
1195
Pavel Begunkovce3d5aa2021-02-01 18:59:55 +00001196static void io_req_track_inflight(struct io_kiocb *req)
1197{
Pavel Begunkovce3d5aa2021-02-01 18:59:55 +00001198 if (!(req->flags & REQ_F_INFLIGHT)) {
Pavel Begunkovce3d5aa2021-02-01 18:59:55 +00001199 req->flags |= REQ_F_INFLIGHT;
Pavel Begunkovb303fe22021-04-11 01:46:26 +01001200 atomic_inc(&current->io_uring->inflight_tracked);
Pavel Begunkovce3d5aa2021-02-01 18:59:55 +00001201 }
1202}
1203
Pavel Begunkovcbdcb432020-06-29 19:18:43 +03001204static void io_prep_async_work(struct io_kiocb *req)
Jens Axboe561fb042019-10-24 07:25:42 -06001205{
Jens Axboed3656342019-12-18 09:50:26 -07001206 const struct io_op_def *def = &io_op_defs[req->opcode];
Pavel Begunkov23329512020-10-10 18:34:06 +01001207 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe54a91f32019-09-10 09:15:04 -06001208
Jens Axboe003e8dc2021-03-06 09:22:27 -07001209 if (!req->work.creds)
1210 req->work.creds = get_current_cred();
1211
Pavel Begunkove1d675d2021-03-22 01:58:29 +00001212 req->work.list.next = NULL;
1213 req->work.flags = 0;
Pavel Begunkovfeaadc42020-10-22 16:47:16 +01001214 if (req->flags & REQ_F_FORCE_ASYNC)
1215 req->work.flags |= IO_WQ_WORK_CONCURRENT;
1216
Jens Axboed3656342019-12-18 09:50:26 -07001217 if (req->flags & REQ_F_ISREG) {
Pavel Begunkov23329512020-10-10 18:34:06 +01001218 if (def->hash_reg_file || (ctx->flags & IORING_SETUP_IOPOLL))
Pavel Begunkov8766dd52020-03-14 00:31:04 +03001219 io_wq_hash_work(&req->work, file_inode(req->file));
Jens Axboe4b982bd2021-04-01 08:38:34 -06001220 } else if (!req->file || !S_ISBLK(file_inode(req->file)->i_mode)) {
Jens Axboed3656342019-12-18 09:50:26 -07001221 if (def->unbound_nonreg_file)
Jens Axboe3529d8c2019-12-19 18:24:38 -07001222 req->work.flags |= IO_WQ_WORK_UNBOUND;
Jens Axboe54a91f32019-09-10 09:15:04 -06001223 }
Pavel Begunkove1d675d2021-03-22 01:58:29 +00001224
1225 switch (req->opcode) {
1226 case IORING_OP_SPLICE:
1227 case IORING_OP_TEE:
Pavel Begunkove1d675d2021-03-22 01:58:29 +00001228 if (!S_ISREG(file_inode(req->splice.file_in)->i_mode))
1229 req->work.flags |= IO_WQ_WORK_UNBOUND;
1230 break;
1231 }
Jens Axboe561fb042019-10-24 07:25:42 -06001232}
1233
Pavel Begunkovcbdcb432020-06-29 19:18:43 +03001234static void io_prep_async_link(struct io_kiocb *req)
1235{
1236 struct io_kiocb *cur;
1237
Pavel Begunkovf2f87372020-10-27 23:25:37 +00001238 io_for_each_link(cur, req)
1239 io_prep_async_work(cur);
Pavel Begunkovcbdcb432020-06-29 19:18:43 +03001240}
1241
Pavel Begunkovebf93662021-03-01 18:20:47 +00001242static void io_queue_async_work(struct io_kiocb *req)
Jens Axboe561fb042019-10-24 07:25:42 -06001243{
Jackie Liua197f662019-11-08 08:09:12 -07001244 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkovcbdcb432020-06-29 19:18:43 +03001245 struct io_kiocb *link = io_prep_linked_timeout(req);
Jens Axboe5aa75ed2021-02-16 12:56:50 -07001246 struct io_uring_task *tctx = req->task->io_uring;
Jens Axboe561fb042019-10-24 07:25:42 -06001247
Jens Axboe3bfe6102021-02-16 14:15:30 -07001248 BUG_ON(!tctx);
1249 BUG_ON(!tctx->io_wq);
Jens Axboe561fb042019-10-24 07:25:42 -06001250
Pavel Begunkovcbdcb432020-06-29 19:18:43 +03001251 /* init ->work of the whole link before punting */
1252 io_prep_async_link(req);
Pavel Begunkovd07f1e8a2021-03-22 01:45:58 +00001253 trace_io_uring_queue_async_work(ctx, io_wq_is_hashed(&req->work), req,
1254 &req->work, req->flags);
Pavel Begunkovebf93662021-03-01 18:20:47 +00001255 io_wq_enqueue(tctx->io_wq, &req->work);
Jens Axboe7271ef32020-08-10 09:55:22 -06001256 if (link)
1257 io_queue_linked_timeout(link);
Pavel Begunkovcbdcb432020-06-29 19:18:43 +03001258}
1259
Pavel Begunkov1ee41602021-03-25 18:32:42 +00001260static void io_kill_timeout(struct io_kiocb *req, int status)
Pavel Begunkov8c855882021-04-13 02:58:41 +01001261 __must_hold(&req->ctx->completion_lock)
Jens Axboe5262f562019-09-17 12:26:57 -06001262{
Jens Axboee8c2bc12020-08-15 18:44:09 -07001263 struct io_timeout_data *io = req->async_data;
Jens Axboe5262f562019-09-17 12:26:57 -06001264
Pavel Begunkovfd9c7bc2021-04-13 02:58:42 +01001265 if (hrtimer_try_to_cancel(&io->timer) != -1) {
Pavel Begunkov01cec8c2020-07-30 18:43:50 +03001266 atomic_set(&req->ctx->cq_timeouts,
1267 atomic_read(&req->ctx->cq_timeouts) + 1);
Pavel Begunkov135fcde2020-07-13 23:37:12 +03001268 list_del_init(&req->timeout.list);
Pavel Begunkovff6421642021-04-11 01:46:32 +01001269 io_cqring_fill_event(req, status, 0);
Pavel Begunkov216578e2020-10-13 09:44:00 +01001270 io_put_req_deferred(req, 1);
Jens Axboe5262f562019-09-17 12:26:57 -06001271 }
1272}
1273
Pavel Begunkov04518942020-05-26 20:34:05 +03001274static void __io_queue_deferred(struct io_ring_ctx *ctx)
1275{
1276 do {
Pavel Begunkov27dc8332020-07-13 23:37:14 +03001277 struct io_defer_entry *de = list_first_entry(&ctx->defer_list,
1278 struct io_defer_entry, list);
Pavel Begunkov04518942020-05-26 20:34:05 +03001279
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03001280 if (req_need_defer(de->req, de->seq))
Pavel Begunkov04518942020-05-26 20:34:05 +03001281 break;
Pavel Begunkov27dc8332020-07-13 23:37:14 +03001282 list_del_init(&de->list);
Pavel Begunkov907d1df2021-01-26 23:35:10 +00001283 io_req_task_queue(de->req);
Pavel Begunkov27dc8332020-07-13 23:37:14 +03001284 kfree(de);
Pavel Begunkov04518942020-05-26 20:34:05 +03001285 } while (!list_empty(&ctx->defer_list));
1286}
1287
Pavel Begunkov360428f2020-05-30 14:54:17 +03001288static void io_flush_timeouts(struct io_ring_ctx *ctx)
1289{
Marcelo Diop-Gonzalezf0105052021-01-15 11:54:40 -05001290 u32 seq;
1291
1292 if (list_empty(&ctx->timeout_list))
1293 return;
1294
1295 seq = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
1296
1297 do {
1298 u32 events_needed, events_got;
Pavel Begunkov360428f2020-05-30 14:54:17 +03001299 struct io_kiocb *req = list_first_entry(&ctx->timeout_list,
Pavel Begunkov135fcde2020-07-13 23:37:12 +03001300 struct io_kiocb, timeout.list);
Pavel Begunkov360428f2020-05-30 14:54:17 +03001301
Pavel Begunkov8eb7e2d2020-06-29 13:13:02 +03001302 if (io_is_timeout_noseq(req))
Pavel Begunkov360428f2020-05-30 14:54:17 +03001303 break;
Marcelo Diop-Gonzalezf0105052021-01-15 11:54:40 -05001304
1305 /*
1306 * Since seq can easily wrap around over time, subtract
1307 * the last seq at which timeouts were flushed before comparing.
1308 * Assuming not more than 2^31-1 events have happened since,
1309 * these subtractions won't have wrapped, so we can check if
1310 * target is in [last_seq, current_seq] by comparing the two.
1311 */
1312 events_needed = req->timeout.target_seq - ctx->cq_last_tm_flush;
1313 events_got = seq - ctx->cq_last_tm_flush;
1314 if (events_got < events_needed)
Pavel Begunkov360428f2020-05-30 14:54:17 +03001315 break;
Pavel Begunkovbfe68a22020-05-30 14:54:18 +03001316
Pavel Begunkov135fcde2020-07-13 23:37:12 +03001317 list_del_init(&req->timeout.list);
Pavel Begunkov1ee41602021-03-25 18:32:42 +00001318 io_kill_timeout(req, 0);
Marcelo Diop-Gonzalezf0105052021-01-15 11:54:40 -05001319 } while (!list_empty(&ctx->timeout_list));
1320
1321 ctx->cq_last_tm_flush = seq;
Pavel Begunkov360428f2020-05-30 14:54:17 +03001322}
1323
Jens Axboede0617e2019-04-06 21:51:27 -06001324static void io_commit_cqring(struct io_ring_ctx *ctx)
1325{
Pavel Begunkov360428f2020-05-30 14:54:17 +03001326 io_flush_timeouts(ctx);
Pavel Begunkovec30e042021-01-19 13:32:38 +00001327
1328 /* order cqe stores with ring update */
1329 smp_store_release(&ctx->rings->cq.tail, ctx->cached_cq_tail);
Jens Axboede0617e2019-04-06 21:51:27 -06001330
Pavel Begunkov04518942020-05-26 20:34:05 +03001331 if (unlikely(!list_empty(&ctx->defer_list)))
1332 __io_queue_deferred(ctx);
Jens Axboede0617e2019-04-06 21:51:27 -06001333}
1334
Jens Axboe90554202020-09-03 12:12:41 -06001335static inline bool io_sqring_full(struct io_ring_ctx *ctx)
1336{
1337 struct io_rings *r = ctx->rings;
1338
1339 return READ_ONCE(r->sq.tail) - ctx->cached_sq_head == r->sq_ring_entries;
1340}
1341
Pavel Begunkov888aae22021-01-19 13:32:39 +00001342static inline unsigned int __io_cqring_events(struct io_ring_ctx *ctx)
1343{
1344 return ctx->cached_cq_tail - READ_ONCE(ctx->rings->cq.head);
1345}
1346
Pavel Begunkov8d133262021-04-11 01:46:33 +01001347static inline struct io_uring_cqe *io_get_cqring(struct io_ring_ctx *ctx)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001348{
Hristo Venev75b28af2019-08-26 17:23:46 +00001349 struct io_rings *rings = ctx->rings;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001350 unsigned tail;
1351
Stefan Bühler115e12e2019-04-24 23:54:18 +02001352 /*
1353 * writes to the cq entry need to come after reading head; the
1354 * control dependency is enough as we're using WRITE_ONCE to
1355 * fill the cq entry
1356 */
Pavel Begunkov888aae22021-01-19 13:32:39 +00001357 if (__io_cqring_events(ctx) == rings->cq_ring_entries)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001358 return NULL;
1359
Pavel Begunkov888aae22021-01-19 13:32:39 +00001360 tail = ctx->cached_cq_tail++;
Hristo Venev75b28af2019-08-26 17:23:46 +00001361 return &rings->cqes[tail & ctx->cq_mask];
Jens Axboe2b188cc2019-01-07 10:46:33 -07001362}
1363
Jens Axboef2842ab2020-01-08 11:04:00 -07001364static inline bool io_should_trigger_evfd(struct io_ring_ctx *ctx)
1365{
Pavel Begunkov44c769d2021-04-11 01:46:31 +01001366 if (likely(!ctx->cq_ev_fd))
Jens Axboef0b493e2020-02-01 21:30:11 -07001367 return false;
Stefano Garzarella7e55a192020-05-15 18:38:05 +02001368 if (READ_ONCE(ctx->rings->cq_flags) & IORING_CQ_EVENTFD_DISABLED)
1369 return false;
Pavel Begunkov44c769d2021-04-11 01:46:31 +01001370 return !ctx->eventfd_async || io_wq_current_is_worker();
Jens Axboef2842ab2020-01-08 11:04:00 -07001371}
1372
Jens Axboeb41e9852020-02-17 09:52:41 -07001373static void io_cqring_ev_posted(struct io_ring_ctx *ctx)
Jens Axboe8c838782019-03-12 15:48:16 -06001374{
Pavel Begunkovb1445e52021-01-07 03:15:43 +00001375 /* see waitqueue_active() comment */
1376 smp_mb();
1377
Jens Axboe8c838782019-03-12 15:48:16 -06001378 if (waitqueue_active(&ctx->wait))
1379 wake_up(&ctx->wait);
Jens Axboe534ca6d2020-09-02 13:52:19 -06001380 if (ctx->sq_data && waitqueue_active(&ctx->sq_data->wait))
1381 wake_up(&ctx->sq_data->wait);
Jens Axboeb41e9852020-02-17 09:52:41 -07001382 if (io_should_trigger_evfd(ctx))
Jens Axboe9b402842019-04-11 11:45:41 -06001383 eventfd_signal(ctx->cq_ev_fd, 1);
Pavel Begunkovb1445e52021-01-07 03:15:43 +00001384 if (waitqueue_active(&ctx->cq_wait)) {
Pavel Begunkov4aa84f22021-01-07 03:15:42 +00001385 wake_up_interruptible(&ctx->cq_wait);
1386 kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN);
1387 }
Jens Axboe8c838782019-03-12 15:48:16 -06001388}
1389
Pavel Begunkov80c18e42021-01-07 03:15:41 +00001390static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx)
1391{
Pavel Begunkovb1445e52021-01-07 03:15:43 +00001392 /* see waitqueue_active() comment */
1393 smp_mb();
1394
Pavel Begunkov80c18e42021-01-07 03:15:41 +00001395 if (ctx->flags & IORING_SETUP_SQPOLL) {
1396 if (waitqueue_active(&ctx->wait))
1397 wake_up(&ctx->wait);
1398 }
1399 if (io_should_trigger_evfd(ctx))
1400 eventfd_signal(ctx->cq_ev_fd, 1);
Pavel Begunkovb1445e52021-01-07 03:15:43 +00001401 if (waitqueue_active(&ctx->cq_wait)) {
Pavel Begunkov4aa84f22021-01-07 03:15:42 +00001402 wake_up_interruptible(&ctx->cq_wait);
1403 kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN);
1404 }
Pavel Begunkov80c18e42021-01-07 03:15:41 +00001405}
1406
Jens Axboec4a2ed72019-11-21 21:01:26 -07001407/* Returns true if there are no backlogged entries after the flush */
Pavel Begunkov6c2450a2021-02-23 12:40:22 +00001408static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001409{
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001410 struct io_rings *rings = ctx->rings;
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001411 unsigned long flags;
Jens Axboeb18032b2021-01-24 16:58:56 -07001412 bool all_flushed, posted;
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001413
Pavel Begunkove23de152020-12-17 00:24:37 +00001414 if (!force && __io_cqring_events(ctx) == rings->cq_ring_entries)
1415 return false;
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001416
Jens Axboeb18032b2021-01-24 16:58:56 -07001417 posted = false;
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001418 spin_lock_irqsave(&ctx->completion_lock, flags);
Pavel Begunkov6c2450a2021-02-23 12:40:22 +00001419 while (!list_empty(&ctx->cq_overflow_list)) {
1420 struct io_uring_cqe *cqe = io_get_cqring(ctx);
1421 struct io_overflow_cqe *ocqe;
Jens Axboee6c8aa92020-09-28 13:10:13 -06001422
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001423 if (!cqe && !force)
1424 break;
Pavel Begunkov6c2450a2021-02-23 12:40:22 +00001425 ocqe = list_first_entry(&ctx->cq_overflow_list,
1426 struct io_overflow_cqe, list);
1427 if (cqe)
1428 memcpy(cqe, &ocqe->cqe, sizeof(*cqe));
1429 else
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001430 WRITE_ONCE(ctx->rings->cq_overflow,
Pavel Begunkov6c2450a2021-02-23 12:40:22 +00001431 ++ctx->cached_cq_overflow);
Jens Axboeb18032b2021-01-24 16:58:56 -07001432 posted = true;
Pavel Begunkov6c2450a2021-02-23 12:40:22 +00001433 list_del(&ocqe->list);
1434 kfree(ocqe);
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001435 }
1436
Pavel Begunkov09e88402020-12-17 00:24:38 +00001437 all_flushed = list_empty(&ctx->cq_overflow_list);
1438 if (all_flushed) {
1439 clear_bit(0, &ctx->sq_check_overflow);
1440 clear_bit(0, &ctx->cq_check_overflow);
1441 ctx->rings->sq_flags &= ~IORING_SQ_CQ_OVERFLOW;
1442 }
Pavel Begunkov46930142020-07-30 18:43:49 +03001443
Jens Axboeb18032b2021-01-24 16:58:56 -07001444 if (posted)
1445 io_commit_cqring(ctx);
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001446 spin_unlock_irqrestore(&ctx->completion_lock, flags);
Jens Axboeb18032b2021-01-24 16:58:56 -07001447 if (posted)
1448 io_cqring_ev_posted(ctx);
Pavel Begunkov09e88402020-12-17 00:24:38 +00001449 return all_flushed;
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001450}
1451
Pavel Begunkov6c2450a2021-02-23 12:40:22 +00001452static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)
Pavel Begunkov6c503152021-01-04 20:36:36 +00001453{
Jens Axboeca0a2652021-03-04 17:15:48 -07001454 bool ret = true;
1455
Pavel Begunkov6c503152021-01-04 20:36:36 +00001456 if (test_bit(0, &ctx->cq_check_overflow)) {
1457 /* iopoll syncs against uring_lock, not completion_lock */
1458 if (ctx->flags & IORING_SETUP_IOPOLL)
1459 mutex_lock(&ctx->uring_lock);
Pavel Begunkov6c2450a2021-02-23 12:40:22 +00001460 ret = __io_cqring_overflow_flush(ctx, force);
Pavel Begunkov6c503152021-01-04 20:36:36 +00001461 if (ctx->flags & IORING_SETUP_IOPOLL)
1462 mutex_unlock(&ctx->uring_lock);
1463 }
Jens Axboeca0a2652021-03-04 17:15:48 -07001464
1465 return ret;
Pavel Begunkov6c503152021-01-04 20:36:36 +00001466}
1467
Jens Axboeabc54d62021-02-24 13:32:30 -07001468/*
1469 * Shamelessly stolen from the mm implementation of page reference checking,
1470 * see commit f958d7b528b1 for details.
1471 */
1472#define req_ref_zero_or_close_to_overflow(req) \
1473 ((unsigned int) atomic_read(&(req->refs)) + 127u <= 127u)
1474
Jens Axboede9b4cc2021-02-24 13:28:27 -07001475static inline bool req_ref_inc_not_zero(struct io_kiocb *req)
1476{
Jens Axboeabc54d62021-02-24 13:32:30 -07001477 return atomic_inc_not_zero(&req->refs);
Jens Axboede9b4cc2021-02-24 13:28:27 -07001478}
1479
1480static inline bool req_ref_sub_and_test(struct io_kiocb *req, int refs)
1481{
Jens Axboeabc54d62021-02-24 13:32:30 -07001482 WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req));
1483 return atomic_sub_and_test(refs, &req->refs);
Jens Axboede9b4cc2021-02-24 13:28:27 -07001484}
1485
1486static inline bool req_ref_put_and_test(struct io_kiocb *req)
1487{
Jens Axboeabc54d62021-02-24 13:32:30 -07001488 WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req));
1489 return atomic_dec_and_test(&req->refs);
Jens Axboede9b4cc2021-02-24 13:28:27 -07001490}
1491
1492static inline void req_ref_put(struct io_kiocb *req)
1493{
Jens Axboeabc54d62021-02-24 13:32:30 -07001494 WARN_ON_ONCE(req_ref_put_and_test(req));
Jens Axboede9b4cc2021-02-24 13:28:27 -07001495}
1496
1497static inline void req_ref_get(struct io_kiocb *req)
1498{
Jens Axboeabc54d62021-02-24 13:32:30 -07001499 WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req));
1500 atomic_inc(&req->refs);
Jens Axboede9b4cc2021-02-24 13:28:27 -07001501}
1502
Pavel Begunkov8d133262021-04-11 01:46:33 +01001503static bool io_cqring_event_overflow(struct io_kiocb *req, long res,
1504 unsigned int cflags)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001505{
Jens Axboe78e19bb2019-11-06 15:21:34 -07001506 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkovcce4b8b2021-04-13 02:58:44 +01001507 struct io_overflow_cqe *ocqe;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001508
Pavel Begunkovcce4b8b2021-04-13 02:58:44 +01001509 ocqe = kmalloc(sizeof(*ocqe), GFP_ATOMIC | __GFP_ACCOUNT);
1510 if (!ocqe) {
1511 /*
1512 * If we're in ring overflow flush mode, or in task cancel mode,
1513 * or cannot allocate an overflow entry, then we need to drop it
1514 * on the floor.
1515 */
1516 WRITE_ONCE(ctx->rings->cq_overflow, ++ctx->cached_cq_overflow);
1517 return false;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001518 }
Pavel Begunkovcce4b8b2021-04-13 02:58:44 +01001519 if (list_empty(&ctx->cq_overflow_list)) {
1520 set_bit(0, &ctx->sq_check_overflow);
1521 set_bit(0, &ctx->cq_check_overflow);
1522 ctx->rings->sq_flags |= IORING_SQ_CQ_OVERFLOW;
1523 }
1524 ocqe->cqe.user_data = req->user_data;
1525 ocqe->cqe.res = res;
1526 ocqe->cqe.flags = cflags;
1527 list_add_tail(&ocqe->list, &ctx->cq_overflow_list);
1528 return true;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001529}
1530
Pavel Begunkov8d133262021-04-11 01:46:33 +01001531static inline bool __io_cqring_fill_event(struct io_kiocb *req, long res,
1532 unsigned int cflags)
1533{
1534 struct io_ring_ctx *ctx = req->ctx;
1535 struct io_uring_cqe *cqe;
1536
1537 trace_io_uring_complete(ctx, req->user_data, res, cflags);
1538
1539 /*
1540 * If we can't get a cq entry, userspace overflowed the
1541 * submission (by quite a lot). Increment the overflow count in
1542 * the ring.
1543 */
1544 cqe = io_get_cqring(ctx);
1545 if (likely(cqe)) {
1546 WRITE_ONCE(cqe->user_data, req->user_data);
1547 WRITE_ONCE(cqe->res, res);
1548 WRITE_ONCE(cqe->flags, cflags);
1549 return true;
1550 }
1551 return io_cqring_event_overflow(req, res, cflags);
1552}
1553
1554/* not as hot to bloat with inlining */
1555static noinline bool io_cqring_fill_event(struct io_kiocb *req, long res,
1556 unsigned int cflags)
1557{
1558 return __io_cqring_fill_event(req, res, cflags);
1559}
1560
Pavel Begunkov7a612352021-03-09 00:37:59 +00001561static void io_req_complete_post(struct io_kiocb *req, long res,
1562 unsigned int cflags)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001563{
Jens Axboe78e19bb2019-11-06 15:21:34 -07001564 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001565 unsigned long flags;
1566
1567 spin_lock_irqsave(&ctx->completion_lock, flags);
Pavel Begunkov8d133262021-04-11 01:46:33 +01001568 __io_cqring_fill_event(req, res, cflags);
Jens Axboec7dae4b2021-02-09 19:53:37 -07001569 /*
1570 * If we're the last reference to this request, add to our locked
1571 * free_list cache.
1572 */
Jens Axboede9b4cc2021-02-24 13:28:27 -07001573 if (req_ref_put_and_test(req)) {
Jens Axboec7dae4b2021-02-09 19:53:37 -07001574 struct io_comp_state *cs = &ctx->submit_state.comp;
1575
Pavel Begunkov7a612352021-03-09 00:37:59 +00001576 if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) {
1577 if (req->flags & (REQ_F_LINK_TIMEOUT | REQ_F_FAIL_LINK))
1578 io_disarm_next(req);
1579 if (req->link) {
1580 io_req_task_queue(req->link);
1581 req->link = NULL;
1582 }
1583 }
Jens Axboec7dae4b2021-02-09 19:53:37 -07001584 io_dismantle_req(req);
1585 io_put_task(req->task, 1);
1586 list_add(&req->compl.list, &cs->locked_free_list);
1587 cs->locked_free_nr++;
Pavel Begunkov180f8292021-03-14 20:57:09 +00001588 } else {
1589 if (!percpu_ref_tryget(&ctx->refs))
1590 req = NULL;
1591 }
Pavel Begunkov7a612352021-03-09 00:37:59 +00001592 io_commit_cqring(ctx);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001593 spin_unlock_irqrestore(&ctx->completion_lock, flags);
Pavel Begunkov7a612352021-03-09 00:37:59 +00001594
Pavel Begunkov180f8292021-03-14 20:57:09 +00001595 if (req) {
1596 io_cqring_ev_posted(ctx);
Jens Axboec7dae4b2021-02-09 19:53:37 -07001597 percpu_ref_put(&ctx->refs);
Pavel Begunkov180f8292021-03-14 20:57:09 +00001598 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07001599}
1600
Pavel Begunkova38d68d2021-01-19 13:32:45 +00001601static void io_req_complete_state(struct io_kiocb *req, long res,
Pavel Begunkov889fca72021-02-10 00:03:09 +00001602 unsigned int cflags)
Jens Axboebcda7ba2020-02-23 16:42:51 -07001603{
Pavel Begunkov68fb8972021-03-19 17:22:41 +00001604 if (req->flags & (REQ_F_NEED_CLEANUP | REQ_F_BUFFER_SELECTED))
1605 io_clean_op(req);
Pavel Begunkova38d68d2021-01-19 13:32:45 +00001606 req->result = res;
1607 req->compl.cflags = cflags;
Pavel Begunkove342c802021-01-19 13:32:47 +00001608 req->flags |= REQ_F_COMPLETE_INLINE;
Jens Axboee1e16092020-06-22 09:17:17 -06001609}
Jens Axboe2b188cc2019-01-07 10:46:33 -07001610
Pavel Begunkov889fca72021-02-10 00:03:09 +00001611static inline void __io_req_complete(struct io_kiocb *req, unsigned issue_flags,
1612 long res, unsigned cflags)
Pavel Begunkova38d68d2021-01-19 13:32:45 +00001613{
Pavel Begunkov889fca72021-02-10 00:03:09 +00001614 if (issue_flags & IO_URING_F_COMPLETE_DEFER)
1615 io_req_complete_state(req, res, cflags);
Pavel Begunkova38d68d2021-01-19 13:32:45 +00001616 else
Jens Axboec7dae4b2021-02-09 19:53:37 -07001617 io_req_complete_post(req, res, cflags);
Pavel Begunkova38d68d2021-01-19 13:32:45 +00001618}
Jens Axboebcda7ba2020-02-23 16:42:51 -07001619
Pavel Begunkova38d68d2021-01-19 13:32:45 +00001620static inline void io_req_complete(struct io_kiocb *req, long res)
Jens Axboee1e16092020-06-22 09:17:17 -06001621{
Pavel Begunkov889fca72021-02-10 00:03:09 +00001622 __io_req_complete(req, 0, res, 0);
Jens Axboebcda7ba2020-02-23 16:42:51 -07001623}
1624
Pavel Begunkovf41db2732021-02-28 22:35:12 +00001625static void io_req_complete_failed(struct io_kiocb *req, long res)
1626{
1627 req_set_fail_links(req);
1628 io_put_req(req);
1629 io_req_complete_post(req, res, 0);
1630}
1631
Pavel Begunkovdac7a092021-03-19 17:22:39 +00001632static void io_flush_cached_locked_reqs(struct io_ring_ctx *ctx,
1633 struct io_comp_state *cs)
1634{
1635 spin_lock_irq(&ctx->completion_lock);
1636 list_splice_init(&cs->locked_free_list, &cs->free_list);
1637 cs->locked_free_nr = 0;
1638 spin_unlock_irq(&ctx->completion_lock);
1639}
1640
Pavel Begunkovdd78f492021-03-19 17:22:35 +00001641/* Returns true IFF there are requests in the cache */
Jens Axboec7dae4b2021-02-09 19:53:37 -07001642static bool io_flush_cached_reqs(struct io_ring_ctx *ctx)
Pavel Begunkove5d1bc02021-02-10 00:03:23 +00001643{
Jens Axboec7dae4b2021-02-09 19:53:37 -07001644 struct io_submit_state *state = &ctx->submit_state;
1645 struct io_comp_state *cs = &state->comp;
Pavel Begunkovdd78f492021-03-19 17:22:35 +00001646 int nr;
Pavel Begunkove5d1bc02021-02-10 00:03:23 +00001647
Jens Axboec7dae4b2021-02-09 19:53:37 -07001648 /*
1649 * If we have more than a batch's worth of requests in our IRQ side
1650 * locked cache, grab the lock and move them over to our submission
1651 * side cache.
1652 */
Pavel Begunkovdac7a092021-03-19 17:22:39 +00001653 if (READ_ONCE(cs->locked_free_nr) > IO_COMPL_BATCH)
1654 io_flush_cached_locked_reqs(ctx, cs);
Jens Axboec7dae4b2021-02-09 19:53:37 -07001655
Pavel Begunkovdd78f492021-03-19 17:22:35 +00001656 nr = state->free_reqs;
Jens Axboec7dae4b2021-02-09 19:53:37 -07001657 while (!list_empty(&cs->free_list)) {
Pavel Begunkovdd78f492021-03-19 17:22:35 +00001658 struct io_kiocb *req = list_first_entry(&cs->free_list,
1659 struct io_kiocb, compl.list);
1660
Jens Axboe2b188cc2019-01-07 10:46:33 -07001661 list_del(&req->compl.list);
Pavel Begunkovdd78f492021-03-19 17:22:35 +00001662 state->reqs[nr++] = req;
1663 if (nr == ARRAY_SIZE(state->reqs))
Pavel Begunkove5d1bc02021-02-10 00:03:23 +00001664 break;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001665 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07001666
Pavel Begunkovdd78f492021-03-19 17:22:35 +00001667 state->free_reqs = nr;
1668 return nr != 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001669}
1670
Pavel Begunkov258b29a2021-02-10 00:03:10 +00001671static struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001672{
Pavel Begunkov258b29a2021-02-10 00:03:10 +00001673 struct io_submit_state *state = &ctx->submit_state;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001674
Pavel Begunkovbf019da2021-02-10 00:03:17 +00001675 BUILD_BUG_ON(IO_REQ_ALLOC_BATCH > ARRAY_SIZE(state->reqs));
Jens Axboe2b188cc2019-01-07 10:46:33 -07001676
Pavel Begunkovf6b6c7d2020-06-21 13:09:53 +03001677 if (!state->free_reqs) {
Pavel Begunkov291b2822020-09-30 22:57:01 +03001678 gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
Jens Axboe2579f912019-01-09 09:10:43 -07001679 int ret;
1680
Jens Axboec7dae4b2021-02-09 19:53:37 -07001681 if (io_flush_cached_reqs(ctx))
Pavel Begunkove5d1bc02021-02-10 00:03:23 +00001682 goto got_req;
1683
Pavel Begunkovbf019da2021-02-10 00:03:17 +00001684 ret = kmem_cache_alloc_bulk(req_cachep, gfp, IO_REQ_ALLOC_BATCH,
1685 state->reqs);
Jens Axboefd6fab22019-03-14 16:30:06 -06001686
1687 /*
1688 * Bulk alloc is all-or-nothing. If we fail to get a batch,
1689 * retry single alloc to be on the safe side.
1690 */
1691 if (unlikely(ret <= 0)) {
1692 state->reqs[0] = kmem_cache_alloc(req_cachep, gfp);
1693 if (!state->reqs[0])
Pavel Begunkov3893f392021-02-10 00:03:15 +00001694 return NULL;
Jens Axboefd6fab22019-03-14 16:30:06 -06001695 ret = 1;
1696 }
Pavel Begunkov291b2822020-09-30 22:57:01 +03001697 state->free_reqs = ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001698 }
Pavel Begunkove5d1bc02021-02-10 00:03:23 +00001699got_req:
Pavel Begunkov291b2822020-09-30 22:57:01 +03001700 state->free_reqs--;
1701 return state->reqs[state->free_reqs];
Jens Axboe2b188cc2019-01-07 10:46:33 -07001702}
1703
Pavel Begunkove1d767f2021-03-19 17:22:43 +00001704static inline void io_put_file(struct file *file)
Pavel Begunkov8da11c12020-02-24 11:32:44 +03001705{
Pavel Begunkove1d767f2021-03-19 17:22:43 +00001706 if (file)
Pavel Begunkov8da11c12020-02-24 11:32:44 +03001707 fput(file);
1708}
1709
Pavel Begunkov4edf20f2020-10-13 09:43:59 +01001710static void io_dismantle_req(struct io_kiocb *req)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001711{
Pavel Begunkov094bae42021-03-19 17:22:42 +00001712 unsigned int flags = req->flags;
1713
Pavel Begunkove1d767f2021-03-19 17:22:43 +00001714 if (!(flags & REQ_F_FIXED_FILE))
1715 io_put_file(req->file);
Pavel Begunkov094bae42021-03-19 17:22:42 +00001716 if (flags & (REQ_F_NEED_CLEANUP | REQ_F_BUFFER_SELECTED |
1717 REQ_F_INFLIGHT)) {
1718 io_clean_op(req);
1719
1720 if (req->flags & REQ_F_INFLIGHT) {
Pavel Begunkovb303fe22021-04-11 01:46:26 +01001721 struct io_uring_task *tctx = req->task->io_uring;
Pavel Begunkov094bae42021-03-19 17:22:42 +00001722
Pavel Begunkovb303fe22021-04-11 01:46:26 +01001723 atomic_dec(&tctx->inflight_tracked);
Pavel Begunkov094bae42021-03-19 17:22:42 +00001724 req->flags &= ~REQ_F_INFLIGHT;
1725 }
1726 }
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00001727 if (req->fixed_rsrc_refs)
1728 percpu_ref_put(req->fixed_rsrc_refs);
Pavel Begunkov094bae42021-03-19 17:22:42 +00001729 if (req->async_data)
1730 kfree(req->async_data);
Jens Axboe003e8dc2021-03-06 09:22:27 -07001731 if (req->work.creds) {
1732 put_cred(req->work.creds);
1733 req->work.creds = NULL;
1734 }
Pavel Begunkove6543a82020-06-28 12:52:30 +03001735}
Pavel Begunkov2b85edf2019-12-28 14:13:03 +03001736
Pavel Begunkovb23fcf42021-03-01 18:20:48 +00001737/* must to be called somewhat shortly after putting a request */
Pavel Begunkov7c660732021-01-25 11:42:21 +00001738static inline void io_put_task(struct task_struct *task, int nr)
1739{
1740 struct io_uring_task *tctx = task->io_uring;
1741
1742 percpu_counter_sub(&tctx->inflight, nr);
1743 if (unlikely(atomic_read(&tctx->in_idle)))
1744 wake_up(&tctx->wait);
1745 put_task_struct_many(task, nr);
1746}
1747
Pavel Begunkov216578e2020-10-13 09:44:00 +01001748static void __io_free_req(struct io_kiocb *req)
Pavel Begunkove6543a82020-06-28 12:52:30 +03001749{
Jens Axboe51a4cc12020-08-10 10:55:56 -06001750 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkovecfc5172020-06-29 13:13:03 +03001751
Pavel Begunkov216578e2020-10-13 09:44:00 +01001752 io_dismantle_req(req);
Pavel Begunkov7c660732021-01-25 11:42:21 +00001753 io_put_task(req->task, 1);
Pavel Begunkove6543a82020-06-28 12:52:30 +03001754
Pavel Begunkov3893f392021-02-10 00:03:15 +00001755 kmem_cache_free(req_cachep, req);
Pavel Begunkovecfc5172020-06-29 13:13:03 +03001756 percpu_ref_put(&ctx->refs);
Jens Axboee65ef562019-03-12 10:16:44 -06001757}
1758
Pavel Begunkovf2f87372020-10-27 23:25:37 +00001759static inline void io_remove_next_linked(struct io_kiocb *req)
1760{
1761 struct io_kiocb *nxt = req->link;
1762
1763 req->link = nxt->link;
1764 nxt->link = NULL;
1765}
1766
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00001767static bool io_kill_linked_timeout(struct io_kiocb *req)
1768 __must_hold(&req->ctx->completion_lock)
Jens Axboe9e645e112019-05-10 16:07:28 -06001769{
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00001770 struct io_kiocb *link = req->link;
Pavel Begunkovf2f87372020-10-27 23:25:37 +00001771
Pavel Begunkov900fad42020-10-19 16:39:16 +01001772 /*
1773 * Can happen if a linked timeout fired and link had been like
1774 * req -> link t-out -> link t-out [-> ...]
1775 */
Pavel Begunkovc9abd7a2020-10-22 16:43:11 +01001776 if (link && (link->flags & REQ_F_LTIMEOUT_ACTIVE)) {
1777 struct io_timeout_data *io = link->async_data;
Pavel Begunkov7c86ffe2020-06-29 13:12:59 +03001778
Pavel Begunkovf2f87372020-10-27 23:25:37 +00001779 io_remove_next_linked(req);
Pavel Begunkov90cd7e42020-10-27 23:25:36 +00001780 link->timeout.head = NULL;
Pavel Begunkovfd9c7bc2021-04-13 02:58:42 +01001781 if (hrtimer_try_to_cancel(&io->timer) != -1) {
Pavel Begunkovff6421642021-04-11 01:46:32 +01001782 io_cqring_fill_event(link, -ECANCELED, 0);
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00001783 io_put_req_deferred(link, 1);
Pavel Begunkovd4729fb2021-03-22 01:58:24 +00001784 return true;
Pavel Begunkovc9abd7a2020-10-22 16:43:11 +01001785 }
1786 }
Pavel Begunkovd4729fb2021-03-22 01:58:24 +00001787 return false;
Pavel Begunkov7c86ffe2020-06-29 13:12:59 +03001788}
1789
Pavel Begunkovd148ca42020-10-18 10:17:39 +01001790static void io_fail_links(struct io_kiocb *req)
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00001791 __must_hold(&req->ctx->completion_lock)
Jens Axboe9e645e112019-05-10 16:07:28 -06001792{
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00001793 struct io_kiocb *nxt, *link = req->link;
Jens Axboe9e645e112019-05-10 16:07:28 -06001794
Pavel Begunkovf2f87372020-10-27 23:25:37 +00001795 req->link = NULL;
Pavel Begunkovf2f87372020-10-27 23:25:37 +00001796 while (link) {
1797 nxt = link->link;
1798 link->link = NULL;
1799
Dmitrii Dolgovc826bd72019-10-15 19:02:01 +02001800 trace_io_uring_fail_link(req, link);
Pavel Begunkovff6421642021-04-11 01:46:32 +01001801 io_cqring_fill_event(link, -ECANCELED, 0);
Jens Axboe1575f212021-02-27 15:20:49 -07001802 io_put_req_deferred(link, 2);
Pavel Begunkovf2f87372020-10-27 23:25:37 +00001803 link = nxt;
Jens Axboe9e645e112019-05-10 16:07:28 -06001804 }
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00001805}
Pavel Begunkov7c86ffe2020-06-29 13:12:59 +03001806
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00001807static bool io_disarm_next(struct io_kiocb *req)
1808 __must_hold(&req->ctx->completion_lock)
1809{
1810 bool posted = false;
1811
1812 if (likely(req->flags & REQ_F_LINK_TIMEOUT))
1813 posted = io_kill_linked_timeout(req);
Pavel Begunkove4335ed2021-04-11 01:46:39 +01001814 if (unlikely((req->flags & REQ_F_FAIL_LINK) &&
1815 !(req->flags & REQ_F_HARDLINK))) {
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00001816 posted |= (req->link != NULL);
1817 io_fail_links(req);
1818 }
1819 return posted;
Jens Axboe9e645e112019-05-10 16:07:28 -06001820}
1821
Pavel Begunkov3fa5e0f2020-06-30 15:20:43 +03001822static struct io_kiocb *__io_req_find_next(struct io_kiocb *req)
Jens Axboe9e645e112019-05-10 16:07:28 -06001823{
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00001824 struct io_kiocb *nxt;
Jens Axboe2665abf2019-11-05 12:40:47 -07001825
Jens Axboe9e645e112019-05-10 16:07:28 -06001826 /*
1827 * If LINK is set, we have dependent requests in this chain. If we
1828 * didn't fail this request, queue the first one up, moving any other
1829 * dependencies to the next request. In case of failure, fail the rest
1830 * of the chain.
1831 */
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00001832 if (req->flags & (REQ_F_LINK_TIMEOUT | REQ_F_FAIL_LINK)) {
1833 struct io_ring_ctx *ctx = req->ctx;
1834 unsigned long flags;
1835 bool posted;
Pavel Begunkovf2f87372020-10-27 23:25:37 +00001836
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00001837 spin_lock_irqsave(&ctx->completion_lock, flags);
1838 posted = io_disarm_next(req);
1839 if (posted)
1840 io_commit_cqring(req->ctx);
1841 spin_unlock_irqrestore(&ctx->completion_lock, flags);
1842 if (posted)
1843 io_cqring_ev_posted(ctx);
Pavel Begunkovf2f87372020-10-27 23:25:37 +00001844 }
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00001845 nxt = req->link;
1846 req->link = NULL;
1847 return nxt;
Jens Axboe4d7dd462019-11-20 13:03:52 -07001848}
Jens Axboe2665abf2019-11-05 12:40:47 -07001849
Pavel Begunkovf2f87372020-10-27 23:25:37 +00001850static inline struct io_kiocb *io_req_find_next(struct io_kiocb *req)
Pavel Begunkov3fa5e0f2020-06-30 15:20:43 +03001851{
Pavel Begunkovcdbff982021-02-12 18:41:16 +00001852 if (likely(!(req->flags & (REQ_F_LINK|REQ_F_HARDLINK))))
Pavel Begunkov3fa5e0f2020-06-30 15:20:43 +03001853 return NULL;
1854 return __io_req_find_next(req);
1855}
1856
Pavel Begunkov2c323952021-02-28 22:04:53 +00001857static void ctx_flush_and_put(struct io_ring_ctx *ctx)
1858{
1859 if (!ctx)
1860 return;
1861 if (ctx->submit_state.comp.nr) {
1862 mutex_lock(&ctx->uring_lock);
1863 io_submit_flush_completions(&ctx->submit_state.comp, ctx);
1864 mutex_unlock(&ctx->uring_lock);
1865 }
1866 percpu_ref_put(&ctx->refs);
1867}
1868
Jens Axboe7cbf1722021-02-10 00:03:20 +00001869static bool __tctx_task_work(struct io_uring_task *tctx)
1870{
Jens Axboe65453d12021-02-10 00:03:21 +00001871 struct io_ring_ctx *ctx = NULL;
Jens Axboe7cbf1722021-02-10 00:03:20 +00001872 struct io_wq_work_list list;
1873 struct io_wq_work_node *node;
1874
1875 if (wq_list_empty(&tctx->task_list))
1876 return false;
1877
Jens Axboe0b81e802021-02-16 10:33:53 -07001878 spin_lock_irq(&tctx->task_lock);
Jens Axboe7cbf1722021-02-10 00:03:20 +00001879 list = tctx->task_list;
1880 INIT_WQ_LIST(&tctx->task_list);
Jens Axboe0b81e802021-02-16 10:33:53 -07001881 spin_unlock_irq(&tctx->task_lock);
Jens Axboe7cbf1722021-02-10 00:03:20 +00001882
1883 node = list.first;
1884 while (node) {
1885 struct io_wq_work_node *next = node->next;
1886 struct io_kiocb *req;
1887
1888 req = container_of(node, struct io_kiocb, io_task_work.node);
Pavel Begunkov2c323952021-02-28 22:04:53 +00001889 if (req->ctx != ctx) {
1890 ctx_flush_and_put(ctx);
1891 ctx = req->ctx;
1892 percpu_ref_get(&ctx->refs);
1893 }
1894
Jens Axboe7cbf1722021-02-10 00:03:20 +00001895 req->task_work.func(&req->task_work);
1896 node = next;
Jens Axboe65453d12021-02-10 00:03:21 +00001897 }
1898
Pavel Begunkov2c323952021-02-28 22:04:53 +00001899 ctx_flush_and_put(ctx);
Jens Axboe7cbf1722021-02-10 00:03:20 +00001900 return list.first != NULL;
1901}
1902
1903static void tctx_task_work(struct callback_head *cb)
1904{
1905 struct io_uring_task *tctx = container_of(cb, struct io_uring_task, task_work);
1906
Jens Axboe1d5f3602021-02-26 14:54:16 -07001907 clear_bit(0, &tctx->task_state);
1908
Jens Axboe7cbf1722021-02-10 00:03:20 +00001909 while (__tctx_task_work(tctx))
1910 cond_resched();
Jens Axboe7cbf1722021-02-10 00:03:20 +00001911}
1912
Pavel Begunkovc15b79d2021-03-19 17:22:44 +00001913static int io_req_task_work_add(struct io_kiocb *req)
Jens Axboe7cbf1722021-02-10 00:03:20 +00001914{
Pavel Begunkovc15b79d2021-03-19 17:22:44 +00001915 struct task_struct *tsk = req->task;
Jens Axboe7cbf1722021-02-10 00:03:20 +00001916 struct io_uring_task *tctx = tsk->io_uring;
Pavel Begunkovc15b79d2021-03-19 17:22:44 +00001917 enum task_work_notify_mode notify;
Jens Axboe7cbf1722021-02-10 00:03:20 +00001918 struct io_wq_work_node *node, *prev;
Jens Axboe0b81e802021-02-16 10:33:53 -07001919 unsigned long flags;
Pavel Begunkovc15b79d2021-03-19 17:22:44 +00001920 int ret = 0;
1921
1922 if (unlikely(tsk->flags & PF_EXITING))
1923 return -ESRCH;
Jens Axboe7cbf1722021-02-10 00:03:20 +00001924
1925 WARN_ON_ONCE(!tctx);
1926
Jens Axboe0b81e802021-02-16 10:33:53 -07001927 spin_lock_irqsave(&tctx->task_lock, flags);
Jens Axboe7cbf1722021-02-10 00:03:20 +00001928 wq_list_add_tail(&req->io_task_work.node, &tctx->task_list);
Jens Axboe0b81e802021-02-16 10:33:53 -07001929 spin_unlock_irqrestore(&tctx->task_lock, flags);
Jens Axboe7cbf1722021-02-10 00:03:20 +00001930
1931 /* task_work already pending, we're done */
1932 if (test_bit(0, &tctx->task_state) ||
1933 test_and_set_bit(0, &tctx->task_state))
1934 return 0;
1935
Pavel Begunkovc15b79d2021-03-19 17:22:44 +00001936 /*
1937 * SQPOLL kernel thread doesn't need notification, just a wakeup. For
1938 * all other cases, use TWA_SIGNAL unconditionally to ensure we're
1939 * processing task_work. There's no reliable way to tell if TWA_RESUME
1940 * will do the job.
1941 */
1942 notify = (req->ctx->flags & IORING_SETUP_SQPOLL) ? TWA_NONE : TWA_SIGNAL;
1943
1944 if (!task_work_add(tsk, &tctx->task_work, notify)) {
1945 wake_up_process(tsk);
Jens Axboe7cbf1722021-02-10 00:03:20 +00001946 return 0;
Pavel Begunkovc15b79d2021-03-19 17:22:44 +00001947 }
Jens Axboe7cbf1722021-02-10 00:03:20 +00001948
1949 /*
1950 * Slow path - we failed, find and delete work. if the work is not
1951 * in the list, it got run and we're fine.
1952 */
Jens Axboe0b81e802021-02-16 10:33:53 -07001953 spin_lock_irqsave(&tctx->task_lock, flags);
Jens Axboe7cbf1722021-02-10 00:03:20 +00001954 wq_list_for_each(node, prev, &tctx->task_list) {
1955 if (&req->io_task_work.node == node) {
1956 wq_list_del(&tctx->task_list, node, prev);
1957 ret = 1;
1958 break;
1959 }
1960 }
Jens Axboe0b81e802021-02-16 10:33:53 -07001961 spin_unlock_irqrestore(&tctx->task_lock, flags);
Jens Axboe7cbf1722021-02-10 00:03:20 +00001962 clear_bit(0, &tctx->task_state);
1963 return ret;
1964}
1965
Pavel Begunkov9b465712021-03-15 14:23:07 +00001966static bool io_run_task_work_head(struct callback_head **work_head)
1967{
1968 struct callback_head *work, *next;
1969 bool executed = false;
1970
1971 do {
1972 work = xchg(work_head, NULL);
1973 if (!work)
1974 break;
1975
1976 do {
1977 next = work->next;
1978 work->func(work);
1979 work = next;
1980 cond_resched();
1981 } while (work);
1982 executed = true;
1983 } while (1);
1984
1985 return executed;
1986}
1987
1988static void io_task_work_add_head(struct callback_head **work_head,
1989 struct callback_head *task_work)
1990{
1991 struct callback_head *head;
1992
1993 do {
1994 head = READ_ONCE(*work_head);
1995 task_work->next = head;
1996 } while (cmpxchg(work_head, head, task_work) != head);
1997}
1998
Pavel Begunkoveab30c42021-01-19 13:32:42 +00001999static void io_req_task_work_add_fallback(struct io_kiocb *req,
Jens Axboe7cbf1722021-02-10 00:03:20 +00002000 task_work_func_t cb)
Pavel Begunkoveab30c42021-01-19 13:32:42 +00002001{
Pavel Begunkoveab30c42021-01-19 13:32:42 +00002002 init_task_work(&req->task_work, cb);
Pavel Begunkov9b465712021-03-15 14:23:07 +00002003 io_task_work_add_head(&req->ctx->exit_task_work, &req->task_work);
Pavel Begunkoveab30c42021-01-19 13:32:42 +00002004}
2005
Jens Axboec40f6372020-06-25 15:39:59 -06002006static void io_req_task_cancel(struct callback_head *cb)
2007{
2008 struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
Jens Axboe87ceb6a2020-09-14 08:20:12 -06002009 struct io_ring_ctx *ctx = req->ctx;
Jens Axboec40f6372020-06-25 15:39:59 -06002010
Pavel Begunkove83acd72021-02-28 22:35:09 +00002011 /* ctx is guaranteed to stay alive while we hold uring_lock */
Pavel Begunkov792bb6e2021-02-18 22:32:51 +00002012 mutex_lock(&ctx->uring_lock);
Pavel Begunkov25935532021-03-19 17:22:40 +00002013 io_req_complete_failed(req, req->result);
Pavel Begunkov792bb6e2021-02-18 22:32:51 +00002014 mutex_unlock(&ctx->uring_lock);
Jens Axboec40f6372020-06-25 15:39:59 -06002015}
2016
2017static void __io_req_task_submit(struct io_kiocb *req)
2018{
2019 struct io_ring_ctx *ctx = req->ctx;
2020
Pavel Begunkov04fc6c82021-02-12 03:23:54 +00002021 /* ctx stays valid until unlock, even if we drop all ours ctx->refs */
Pavel Begunkov81b6d052021-01-04 20:36:35 +00002022 mutex_lock(&ctx->uring_lock);
Pavel Begunkov70aacfe2021-03-01 13:02:15 +00002023 if (!(current->flags & PF_EXITING) && !current->in_execve)
Pavel Begunkovc5eef2b2021-02-10 00:03:22 +00002024 __io_queue_sqe(req);
Pavel Begunkov81b6d052021-01-04 20:36:35 +00002025 else
Pavel Begunkov25935532021-03-19 17:22:40 +00002026 io_req_complete_failed(req, -EFAULT);
Pavel Begunkov81b6d052021-01-04 20:36:35 +00002027 mutex_unlock(&ctx->uring_lock);
Jens Axboe9e645e112019-05-10 16:07:28 -06002028}
2029
Jens Axboec40f6372020-06-25 15:39:59 -06002030static void io_req_task_submit(struct callback_head *cb)
2031{
2032 struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
2033
2034 __io_req_task_submit(req);
2035}
2036
Pavel Begunkova3df76982021-02-18 22:32:52 +00002037static void io_req_task_queue_fail(struct io_kiocb *req, int ret)
2038{
Pavel Begunkova3df76982021-02-18 22:32:52 +00002039 req->result = ret;
2040 req->task_work.func = io_req_task_cancel;
2041
2042 if (unlikely(io_req_task_work_add(req)))
2043 io_req_task_work_add_fallback(req, io_req_task_cancel);
2044}
2045
Pavel Begunkov2c4b8eb2021-02-28 22:35:10 +00002046static void io_req_task_queue(struct io_kiocb *req)
2047{
2048 req->task_work.func = io_req_task_submit;
2049
2050 if (unlikely(io_req_task_work_add(req)))
2051 io_req_task_queue_fail(req, -ECANCELED);
2052}
2053
Pavel Begunkovf2f87372020-10-27 23:25:37 +00002054static inline void io_queue_next(struct io_kiocb *req)
Jackie Liuc69f8db2019-11-09 11:00:08 +08002055{
Pavel Begunkov9b5f7bd2020-06-29 13:13:00 +03002056 struct io_kiocb *nxt = io_req_find_next(req);
Pavel Begunkov944e58b2019-11-21 23:21:01 +03002057
Pavel Begunkov906a8c32020-06-27 14:04:55 +03002058 if (nxt)
2059 io_req_task_queue(nxt);
Jackie Liuc69f8db2019-11-09 11:00:08 +08002060}
2061
Jens Axboe9e645e112019-05-10 16:07:28 -06002062static void io_free_req(struct io_kiocb *req)
2063{
Pavel Begunkovc3524382020-06-28 12:52:32 +03002064 io_queue_next(req);
Jens Axboe9e645e112019-05-10 16:07:28 -06002065 __io_free_req(req);
Jens Axboee65ef562019-03-12 10:16:44 -06002066}
2067
Pavel Begunkov2d6500d2020-06-28 12:52:33 +03002068struct req_batch {
Pavel Begunkov5af1d132020-07-18 11:32:52 +03002069 struct task_struct *task;
2070 int task_refs;
Jens Axboe1b4c3512021-02-10 00:03:19 +00002071 int ctx_refs;
Pavel Begunkov2d6500d2020-06-28 12:52:33 +03002072};
2073
Pavel Begunkov5af1d132020-07-18 11:32:52 +03002074static inline void io_init_req_batch(struct req_batch *rb)
Pavel Begunkov7a743e22020-03-03 21:33:13 +03002075{
Pavel Begunkov5af1d132020-07-18 11:32:52 +03002076 rb->task_refs = 0;
Pavel Begunkov9ae72462021-02-10 00:03:16 +00002077 rb->ctx_refs = 0;
Pavel Begunkov5af1d132020-07-18 11:32:52 +03002078 rb->task = NULL;
2079}
Pavel Begunkov8766dd52020-03-14 00:31:04 +03002080
Pavel Begunkov2d6500d2020-06-28 12:52:33 +03002081static void io_req_free_batch_finish(struct io_ring_ctx *ctx,
2082 struct req_batch *rb)
2083{
Pavel Begunkov6e833d52021-02-11 18:28:20 +00002084 if (rb->task)
Pavel Begunkov7c660732021-01-25 11:42:21 +00002085 io_put_task(rb->task, rb->task_refs);
Pavel Begunkov9ae72462021-02-10 00:03:16 +00002086 if (rb->ctx_refs)
2087 percpu_ref_put_many(&ctx->refs, rb->ctx_refs);
Pavel Begunkov2d6500d2020-06-28 12:52:33 +03002088}
2089
Pavel Begunkov6ff119a2021-02-10 00:03:18 +00002090static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req,
2091 struct io_submit_state *state)
Pavel Begunkov2d6500d2020-06-28 12:52:33 +03002092{
Pavel Begunkovf2f87372020-10-27 23:25:37 +00002093 io_queue_next(req);
Pavel Begunkov96670652021-03-19 17:22:32 +00002094 io_dismantle_req(req);
Pavel Begunkov2d6500d2020-06-28 12:52:33 +03002095
Jens Axboee3bc8e92020-09-24 08:45:57 -06002096 if (req->task != rb->task) {
Pavel Begunkov7c660732021-01-25 11:42:21 +00002097 if (rb->task)
2098 io_put_task(rb->task, rb->task_refs);
Jens Axboee3bc8e92020-09-24 08:45:57 -06002099 rb->task = req->task;
2100 rb->task_refs = 0;
Pavel Begunkov5af1d132020-07-18 11:32:52 +03002101 }
Jens Axboee3bc8e92020-09-24 08:45:57 -06002102 rb->task_refs++;
Pavel Begunkov9ae72462021-02-10 00:03:16 +00002103 rb->ctx_refs++;
Pavel Begunkov5af1d132020-07-18 11:32:52 +03002104
Pavel Begunkovbd759042021-02-12 03:23:50 +00002105 if (state->free_reqs != ARRAY_SIZE(state->reqs))
Pavel Begunkov6ff119a2021-02-10 00:03:18 +00002106 state->reqs[state->free_reqs++] = req;
Pavel Begunkovbd759042021-02-12 03:23:50 +00002107 else
2108 list_add(&req->compl.list, &state->comp.free_list);
Pavel Begunkov7a743e22020-03-03 21:33:13 +03002109}
2110
Pavel Begunkov905c1722021-02-10 00:03:14 +00002111static void io_submit_flush_completions(struct io_comp_state *cs,
2112 struct io_ring_ctx *ctx)
2113{
2114 int i, nr = cs->nr;
2115 struct io_kiocb *req;
2116 struct req_batch rb;
2117
2118 io_init_req_batch(&rb);
2119 spin_lock_irq(&ctx->completion_lock);
2120 for (i = 0; i < nr; i++) {
2121 req = cs->reqs[i];
Pavel Begunkov8d133262021-04-11 01:46:33 +01002122 __io_cqring_fill_event(req, req->result, req->compl.cflags);
Pavel Begunkov905c1722021-02-10 00:03:14 +00002123 }
2124 io_commit_cqring(ctx);
2125 spin_unlock_irq(&ctx->completion_lock);
2126
2127 io_cqring_ev_posted(ctx);
2128 for (i = 0; i < nr; i++) {
2129 req = cs->reqs[i];
2130
2131 /* submission and completion refs */
Jens Axboede9b4cc2021-02-24 13:28:27 -07002132 if (req_ref_sub_and_test(req, 2))
Pavel Begunkov6ff119a2021-02-10 00:03:18 +00002133 io_req_free_batch(&rb, req, &ctx->submit_state);
Pavel Begunkov905c1722021-02-10 00:03:14 +00002134 }
2135
2136 io_req_free_batch_finish(ctx, &rb);
2137 cs->nr = 0;
Jens Axboee65ef562019-03-12 10:16:44 -06002138}
2139
Jens Axboeba816ad2019-09-28 11:36:45 -06002140/*
2141 * Drop reference to request, return next in chain (if there is one) if this
2142 * was the last reference to this request.
2143 */
Pavel Begunkov0d850352021-03-19 17:22:37 +00002144static inline struct io_kiocb *io_put_req_find_next(struct io_kiocb *req)
Jens Axboee65ef562019-03-12 10:16:44 -06002145{
Pavel Begunkov9b5f7bd2020-06-29 13:13:00 +03002146 struct io_kiocb *nxt = NULL;
2147
Jens Axboede9b4cc2021-02-24 13:28:27 -07002148 if (req_ref_put_and_test(req)) {
Pavel Begunkov9b5f7bd2020-06-29 13:13:00 +03002149 nxt = io_req_find_next(req);
Jens Axboe4d7dd462019-11-20 13:03:52 -07002150 __io_free_req(req);
Jens Axboe2a44f462020-02-25 13:25:41 -07002151 }
Pavel Begunkov9b5f7bd2020-06-29 13:13:00 +03002152 return nxt;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002153}
2154
Pavel Begunkov0d850352021-03-19 17:22:37 +00002155static inline void io_put_req(struct io_kiocb *req)
Jens Axboe2b188cc2019-01-07 10:46:33 -07002156{
Jens Axboede9b4cc2021-02-24 13:28:27 -07002157 if (req_ref_put_and_test(req))
Jens Axboedef596e2019-01-09 08:59:42 -07002158 io_free_req(req);
2159}
2160
Pavel Begunkov216578e2020-10-13 09:44:00 +01002161static void io_put_req_deferred_cb(struct callback_head *cb)
2162{
2163 struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
2164
2165 io_free_req(req);
2166}
2167
2168static void io_free_req_deferred(struct io_kiocb *req)
2169{
Jens Axboe7cbf1722021-02-10 00:03:20 +00002170 req->task_work.func = io_put_req_deferred_cb;
Pavel Begunkova05432f2021-03-19 17:22:38 +00002171 if (unlikely(io_req_task_work_add(req)))
Pavel Begunkoveab30c42021-01-19 13:32:42 +00002172 io_req_task_work_add_fallback(req, io_put_req_deferred_cb);
Pavel Begunkov216578e2020-10-13 09:44:00 +01002173}
2174
2175static inline void io_put_req_deferred(struct io_kiocb *req, int refs)
2176{
Jens Axboede9b4cc2021-02-24 13:28:27 -07002177 if (req_ref_sub_and_test(req, refs))
Pavel Begunkov216578e2020-10-13 09:44:00 +01002178 io_free_req_deferred(req);
2179}
2180
Pavel Begunkov6c503152021-01-04 20:36:36 +00002181static unsigned io_cqring_events(struct io_ring_ctx *ctx)
Jens Axboea3a0e432019-08-20 11:03:11 -06002182{
2183 /* See comment at the top of this file */
2184 smp_rmb();
Pavel Begunkove23de152020-12-17 00:24:37 +00002185 return __io_cqring_events(ctx);
Jens Axboea3a0e432019-08-20 11:03:11 -06002186}
2187
Pavel Begunkovfb5ccc92019-10-25 12:31:30 +03002188static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
2189{
2190 struct io_rings *rings = ctx->rings;
2191
2192 /* make sure SQ entry isn't read before tail */
2193 return smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head;
2194}
2195
Pavel Begunkov8ff069b2020-07-16 23:28:04 +03002196static unsigned int io_put_kbuf(struct io_kiocb *req, struct io_buffer *kbuf)
Jens Axboee94f1412019-12-19 12:06:02 -07002197{
Pavel Begunkov8ff069b2020-07-16 23:28:04 +03002198 unsigned int cflags;
Jens Axboee94f1412019-12-19 12:06:02 -07002199
Jens Axboebcda7ba2020-02-23 16:42:51 -07002200 cflags = kbuf->bid << IORING_CQE_BUFFER_SHIFT;
2201 cflags |= IORING_CQE_F_BUFFER;
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03002202 req->flags &= ~REQ_F_BUFFER_SELECTED;
Jens Axboebcda7ba2020-02-23 16:42:51 -07002203 kfree(kbuf);
2204 return cflags;
2205}
2206
Pavel Begunkov8ff069b2020-07-16 23:28:04 +03002207static inline unsigned int io_put_rw_kbuf(struct io_kiocb *req)
2208{
2209 struct io_buffer *kbuf;
2210
2211 kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
2212 return io_put_kbuf(req, kbuf);
2213}
2214
Jens Axboe4c6e2772020-07-01 11:29:10 -06002215static inline bool io_run_task_work(void)
2216{
Jens Axboe6200b0a2020-09-13 14:38:30 -06002217 /*
2218 * Not safe to run on exiting task, and the task_work handling will
2219 * not add work to such a task.
2220 */
2221 if (unlikely(current->flags & PF_EXITING))
2222 return false;
Jens Axboe4c6e2772020-07-01 11:29:10 -06002223 if (current->task_works) {
2224 __set_current_state(TASK_RUNNING);
2225 task_work_run();
2226 return true;
2227 }
2228
2229 return false;
2230}
2231
Jens Axboedef596e2019-01-09 08:59:42 -07002232/*
2233 * Find and free completed poll iocbs
2234 */
2235static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
2236 struct list_head *done)
2237{
Jens Axboe8237e042019-12-28 10:48:22 -07002238 struct req_batch rb;
Jens Axboedef596e2019-01-09 08:59:42 -07002239 struct io_kiocb *req;
Xiaoguang Wangbbde0172020-06-16 02:06:38 +08002240
2241 /* order with ->result store in io_complete_rw_iopoll() */
2242 smp_rmb();
Jens Axboedef596e2019-01-09 08:59:42 -07002243
Pavel Begunkov5af1d132020-07-18 11:32:52 +03002244 io_init_req_batch(&rb);
Jens Axboedef596e2019-01-09 08:59:42 -07002245 while (!list_empty(done)) {
Jens Axboebcda7ba2020-02-23 16:42:51 -07002246 int cflags = 0;
2247
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002248 req = list_first_entry(done, struct io_kiocb, inflight_entry);
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002249 list_del(&req->inflight_entry);
Jens Axboedef596e2019-01-09 08:59:42 -07002250
Pavel Begunkov8c130822021-03-22 01:58:32 +00002251 if (READ_ONCE(req->result) == -EAGAIN &&
2252 !(req->flags & REQ_F_DONT_REISSUE)) {
Pavel Begunkovf1613402021-02-11 18:28:21 +00002253 req->iopoll_completed = 0;
Pavel Begunkov8c130822021-03-22 01:58:32 +00002254 req_ref_get(req);
2255 io_queue_async_work(req);
2256 continue;
Pavel Begunkovf1613402021-02-11 18:28:21 +00002257 }
2258
Jens Axboebcda7ba2020-02-23 16:42:51 -07002259 if (req->flags & REQ_F_BUFFER_SELECTED)
Pavel Begunkov8ff069b2020-07-16 23:28:04 +03002260 cflags = io_put_rw_kbuf(req);
Jens Axboebcda7ba2020-02-23 16:42:51 -07002261
Pavel Begunkov8d133262021-04-11 01:46:33 +01002262 __io_cqring_fill_event(req, req->result, cflags);
Jens Axboedef596e2019-01-09 08:59:42 -07002263 (*nr_events)++;
2264
Jens Axboede9b4cc2021-02-24 13:28:27 -07002265 if (req_ref_put_and_test(req))
Pavel Begunkov6ff119a2021-02-10 00:03:18 +00002266 io_req_free_batch(&rb, req, &ctx->submit_state);
Jens Axboedef596e2019-01-09 08:59:42 -07002267 }
Jens Axboedef596e2019-01-09 08:59:42 -07002268
Jens Axboe09bb8392019-03-13 12:39:28 -06002269 io_commit_cqring(ctx);
Pavel Begunkov80c18e42021-01-07 03:15:41 +00002270 io_cqring_ev_posted_iopoll(ctx);
Pavel Begunkov2d6500d2020-06-28 12:52:33 +03002271 io_req_free_batch_finish(ctx, &rb);
Bijan Mottahedeh581f9812020-04-03 13:51:33 -07002272}
2273
Jens Axboedef596e2019-01-09 08:59:42 -07002274static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
2275 long min)
2276{
2277 struct io_kiocb *req, *tmp;
2278 LIST_HEAD(done);
2279 bool spin;
2280 int ret;
2281
2282 /*
2283 * Only spin for completions if we don't have multiple devices hanging
2284 * off our complete list, and we're under the requested amount.
2285 */
2286 spin = !ctx->poll_multi_file && *nr_events < min;
2287
2288 ret = 0;
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002289 list_for_each_entry_safe(req, tmp, &ctx->iopoll_list, inflight_entry) {
Jens Axboe9adbd452019-12-20 08:45:55 -07002290 struct kiocb *kiocb = &req->rw.kiocb;
Jens Axboedef596e2019-01-09 08:59:42 -07002291
2292 /*
Bijan Mottahedeh581f9812020-04-03 13:51:33 -07002293 * Move completed and retryable entries to our local lists.
2294 * If we find a request that requires polling, break out
2295 * and complete those lists first, if we have entries there.
Jens Axboedef596e2019-01-09 08:59:42 -07002296 */
Xiaoguang Wang65a65432020-06-11 23:39:36 +08002297 if (READ_ONCE(req->iopoll_completed)) {
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002298 list_move_tail(&req->inflight_entry, &done);
Jens Axboedef596e2019-01-09 08:59:42 -07002299 continue;
2300 }
2301 if (!list_empty(&done))
2302 break;
2303
2304 ret = kiocb->ki_filp->f_op->iopoll(kiocb, spin);
2305 if (ret < 0)
2306 break;
2307
Pavel Begunkov3aadc232020-07-06 17:59:29 +03002308 /* iopoll may have completed current req */
2309 if (READ_ONCE(req->iopoll_completed))
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002310 list_move_tail(&req->inflight_entry, &done);
Pavel Begunkov3aadc232020-07-06 17:59:29 +03002311
Jens Axboedef596e2019-01-09 08:59:42 -07002312 if (ret && spin)
2313 spin = false;
2314 ret = 0;
2315 }
2316
2317 if (!list_empty(&done))
2318 io_iopoll_complete(ctx, nr_events, &done);
2319
2320 return ret;
2321}
2322
2323/*
Jens Axboedef596e2019-01-09 08:59:42 -07002324 * We can't just wait for polled events to come to us, we have to actively
2325 * find and complete them.
2326 */
Pavel Begunkovb2edc0a2020-07-07 16:36:22 +03002327static void io_iopoll_try_reap_events(struct io_ring_ctx *ctx)
Jens Axboedef596e2019-01-09 08:59:42 -07002328{
2329 if (!(ctx->flags & IORING_SETUP_IOPOLL))
2330 return;
2331
2332 mutex_lock(&ctx->uring_lock);
Pavel Begunkov540e32a2020-07-13 23:37:09 +03002333 while (!list_empty(&ctx->iopoll_list)) {
Jens Axboedef596e2019-01-09 08:59:42 -07002334 unsigned int nr_events = 0;
2335
Pavel Begunkovb2edc0a2020-07-07 16:36:22 +03002336 io_do_iopoll(ctx, &nr_events, 0);
Jens Axboe08f54392019-08-21 22:19:11 -06002337
Pavel Begunkovb2edc0a2020-07-07 16:36:22 +03002338 /* let it sleep and repeat later if can't complete a request */
2339 if (nr_events == 0)
2340 break;
Jens Axboe08f54392019-08-21 22:19:11 -06002341 /*
2342 * Ensure we allow local-to-the-cpu processing to take place,
2343 * in this case we need to ensure that we reap all events.
Pavel Begunkov3fcee5a2020-07-06 17:59:31 +03002344 * Also let task_work, etc. to progress by releasing the mutex
Jens Axboe08f54392019-08-21 22:19:11 -06002345 */
Pavel Begunkov3fcee5a2020-07-06 17:59:31 +03002346 if (need_resched()) {
2347 mutex_unlock(&ctx->uring_lock);
2348 cond_resched();
2349 mutex_lock(&ctx->uring_lock);
2350 }
Jens Axboedef596e2019-01-09 08:59:42 -07002351 }
2352 mutex_unlock(&ctx->uring_lock);
2353}
2354
Pavel Begunkov7668b922020-07-07 16:36:21 +03002355static int io_iopoll_check(struct io_ring_ctx *ctx, long min)
Jens Axboedef596e2019-01-09 08:59:42 -07002356{
Pavel Begunkov7668b922020-07-07 16:36:21 +03002357 unsigned int nr_events = 0;
Pavel Begunkove9979b32021-04-13 02:58:45 +01002358 int ret = 0;
Jens Axboedef596e2019-01-09 08:59:42 -07002359
Xiaoguang Wangc7849be2020-02-22 14:46:05 +08002360 /*
2361 * We disallow the app entering submit/complete with polling, but we
2362 * still need to lock the ring to prevent racing with polled issue
2363 * that got punted to a workqueue.
2364 */
2365 mutex_lock(&ctx->uring_lock);
Pavel Begunkovf39c8a52021-04-13 02:58:46 +01002366 /*
2367 * Don't enter poll loop if we already have events pending.
2368 * If we do, we can potentially be spinning for commands that
2369 * already triggered a CQE (eg in error).
2370 */
2371 if (test_bit(0, &ctx->cq_check_overflow))
2372 __io_cqring_overflow_flush(ctx, false);
2373 if (io_cqring_events(ctx))
2374 goto out;
Jens Axboedef596e2019-01-09 08:59:42 -07002375 do {
Jens Axboe500f9fb2019-08-19 12:15:59 -06002376 /*
2377 * If a submit got punted to a workqueue, we can have the
2378 * application entering polling for a command before it gets
2379 * issued. That app will hold the uring_lock for the duration
2380 * of the poll right here, so we need to take a breather every
2381 * now and then to ensure that the issue has a chance to add
2382 * the poll to the issued list. Otherwise we can spin here
2383 * forever, while the workqueue is stuck trying to acquire the
2384 * very same mutex.
2385 */
Pavel Begunkove9979b32021-04-13 02:58:45 +01002386 if (list_empty(&ctx->iopoll_list)) {
Jens Axboe500f9fb2019-08-19 12:15:59 -06002387 mutex_unlock(&ctx->uring_lock);
Jens Axboe4c6e2772020-07-01 11:29:10 -06002388 io_run_task_work();
Jens Axboe500f9fb2019-08-19 12:15:59 -06002389 mutex_lock(&ctx->uring_lock);
Pavel Begunkove9979b32021-04-13 02:58:45 +01002390
2391 if (list_empty(&ctx->iopoll_list))
2392 break;
Jens Axboe500f9fb2019-08-19 12:15:59 -06002393 }
Pavel Begunkovf39c8a52021-04-13 02:58:46 +01002394 ret = io_do_iopoll(ctx, &nr_events, min);
2395 } while (!ret && nr_events < min && !need_resched());
2396out:
Jens Axboe500f9fb2019-08-19 12:15:59 -06002397 mutex_unlock(&ctx->uring_lock);
Jens Axboedef596e2019-01-09 08:59:42 -07002398 return ret;
2399}
2400
Jens Axboe491381ce2019-10-17 09:20:46 -06002401static void kiocb_end_write(struct io_kiocb *req)
Jens Axboe2b188cc2019-01-07 10:46:33 -07002402{
Jens Axboe491381ce2019-10-17 09:20:46 -06002403 /*
2404 * Tell lockdep we inherited freeze protection from submission
2405 * thread.
2406 */
2407 if (req->flags & REQ_F_ISREG) {
Pavel Begunkov1c986792021-03-22 01:58:31 +00002408 struct super_block *sb = file_inode(req->file)->i_sb;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002409
Pavel Begunkov1c986792021-03-22 01:58:31 +00002410 __sb_writers_acquired(sb, SB_FREEZE_WRITE);
2411 sb_end_write(sb);
Jens Axboe2b188cc2019-01-07 10:46:33 -07002412 }
2413}
2414
Jens Axboeb63534c2020-06-04 11:28:00 -06002415#ifdef CONFIG_BLOCK
Pavel Begunkovdc2a6e92021-01-19 13:32:35 +00002416static bool io_resubmit_prep(struct io_kiocb *req)
Jens Axboeb63534c2020-06-04 11:28:00 -06002417{
Pavel Begunkovab454432021-03-22 01:58:33 +00002418 struct io_async_rw *rw = req->async_data;
2419
2420 if (!rw)
2421 return !io_req_prep_async(req);
2422 /* may have left rw->iter inconsistent on -EIOCBQUEUED */
2423 iov_iter_revert(&rw->iter, req->result - iov_iter_count(&rw->iter));
2424 return true;
Jens Axboeb63534c2020-06-04 11:28:00 -06002425}
Jens Axboeb63534c2020-06-04 11:28:00 -06002426
Jens Axboe3e6a0d32021-03-01 13:56:00 -07002427static bool io_rw_should_reissue(struct io_kiocb *req)
Jens Axboeb63534c2020-06-04 11:28:00 -06002428{
Jens Axboe355afae2020-09-02 09:30:31 -06002429 umode_t mode = file_inode(req->file)->i_mode;
Jens Axboe3e6a0d32021-03-01 13:56:00 -07002430 struct io_ring_ctx *ctx = req->ctx;
Jens Axboeb63534c2020-06-04 11:28:00 -06002431
Jens Axboe355afae2020-09-02 09:30:31 -06002432 if (!S_ISBLK(mode) && !S_ISREG(mode))
2433 return false;
Jens Axboe3e6a0d32021-03-01 13:56:00 -07002434 if ((req->flags & REQ_F_NOWAIT) || (io_wq_current_is_worker() &&
2435 !(ctx->flags & IORING_SETUP_IOPOLL)))
Jens Axboeb63534c2020-06-04 11:28:00 -06002436 return false;
Jens Axboe7c977a52021-02-23 19:17:35 -07002437 /*
2438 * If ref is dying, we might be running poll reap from the exit work.
2439 * Don't attempt to reissue from that path, just let it fail with
2440 * -EAGAIN.
2441 */
Jens Axboe3e6a0d32021-03-01 13:56:00 -07002442 if (percpu_ref_is_dying(&ctx->refs))
2443 return false;
2444 return true;
2445}
Jens Axboee82ad482021-04-02 19:45:34 -06002446#else
Jens Axboea1ff1e32021-04-12 06:40:02 -06002447static bool io_resubmit_prep(struct io_kiocb *req)
2448{
2449 return false;
2450}
Jens Axboee82ad482021-04-02 19:45:34 -06002451static bool io_rw_should_reissue(struct io_kiocb *req)
2452{
2453 return false;
2454}
Jens Axboe3e6a0d32021-03-01 13:56:00 -07002455#endif
2456
Jens Axboea1d7c392020-06-22 11:09:46 -06002457static void __io_complete_rw(struct io_kiocb *req, long res, long res2,
Pavel Begunkov889fca72021-02-10 00:03:09 +00002458 unsigned int issue_flags)
Jens Axboea1d7c392020-06-22 11:09:46 -06002459{
Pavel Begunkov2f8e45f2021-02-11 18:28:23 +00002460 int cflags = 0;
2461
Pavel Begunkovb65c1282021-03-22 01:45:59 +00002462 if (req->rw.kiocb.ki_flags & IOCB_WRITE)
2463 kiocb_end_write(req);
Pavel Begunkov9532b992021-03-22 01:58:34 +00002464 if (res != req->result) {
2465 if ((res == -EAGAIN || res == -EOPNOTSUPP) &&
2466 io_rw_should_reissue(req)) {
2467 req->flags |= REQ_F_REISSUE;
2468 return;
2469 }
Pavel Begunkov2f8e45f2021-02-11 18:28:23 +00002470 req_set_fail_links(req);
Pavel Begunkov9532b992021-03-22 01:58:34 +00002471 }
Pavel Begunkov2f8e45f2021-02-11 18:28:23 +00002472 if (req->flags & REQ_F_BUFFER_SELECTED)
2473 cflags = io_put_rw_kbuf(req);
2474 __io_req_complete(req, issue_flags, res, cflags);
Jens Axboeba816ad2019-09-28 11:36:45 -06002475}
2476
2477static void io_complete_rw(struct kiocb *kiocb, long res, long res2)
2478{
Jens Axboe9adbd452019-12-20 08:45:55 -07002479 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
Jens Axboeba816ad2019-09-28 11:36:45 -06002480
Pavel Begunkov889fca72021-02-10 00:03:09 +00002481 __io_complete_rw(req, res, res2, 0);
Jens Axboe2b188cc2019-01-07 10:46:33 -07002482}
2483
Jens Axboedef596e2019-01-09 08:59:42 -07002484static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2)
2485{
Jens Axboe9adbd452019-12-20 08:45:55 -07002486 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
Jens Axboedef596e2019-01-09 08:59:42 -07002487
Jens Axboe491381ce2019-10-17 09:20:46 -06002488 if (kiocb->ki_flags & IOCB_WRITE)
2489 kiocb_end_write(req);
Pavel Begunkov9532b992021-03-22 01:58:34 +00002490 if (unlikely(res != req->result)) {
Jens Axboea1ff1e32021-04-12 06:40:02 -06002491 if (!(res == -EAGAIN && io_rw_should_reissue(req) &&
2492 io_resubmit_prep(req))) {
Pavel Begunkov9532b992021-03-22 01:58:34 +00002493 req_set_fail_links(req);
2494 req->flags |= REQ_F_DONT_REISSUE;
2495 }
Pavel Begunkov8c130822021-03-22 01:58:32 +00002496 }
Xiaoguang Wangbbde0172020-06-16 02:06:38 +08002497
2498 WRITE_ONCE(req->result, res);
Jens Axboeb9b0e0d2021-02-23 08:18:36 -07002499 /* order with io_iopoll_complete() checking ->result */
Pavel Begunkovcd664b02020-06-25 12:37:10 +03002500 smp_wmb();
2501 WRITE_ONCE(req->iopoll_completed, 1);
Jens Axboedef596e2019-01-09 08:59:42 -07002502}
2503
2504/*
2505 * After the iocb has been issued, it's safe to be found on the poll list.
2506 * Adding the kiocb to the list AFTER submission ensures that we don't
Pavel Begunkovf39c8a52021-04-13 02:58:46 +01002507 * find it from a io_do_iopoll() thread before the issuer is done
Jens Axboedef596e2019-01-09 08:59:42 -07002508 * accessing the kiocb cookie.
2509 */
Xiaoguang Wang2e9dbe92020-11-13 00:44:08 +08002510static void io_iopoll_req_issued(struct io_kiocb *req, bool in_async)
Jens Axboedef596e2019-01-09 08:59:42 -07002511{
2512 struct io_ring_ctx *ctx = req->ctx;
2513
2514 /*
2515 * Track whether we have multiple files in our lists. This will impact
2516 * how we do polling eventually, not spinning if we're on potentially
2517 * different devices.
2518 */
Pavel Begunkov540e32a2020-07-13 23:37:09 +03002519 if (list_empty(&ctx->iopoll_list)) {
Jens Axboedef596e2019-01-09 08:59:42 -07002520 ctx->poll_multi_file = false;
2521 } else if (!ctx->poll_multi_file) {
2522 struct io_kiocb *list_req;
2523
Pavel Begunkov540e32a2020-07-13 23:37:09 +03002524 list_req = list_first_entry(&ctx->iopoll_list, struct io_kiocb,
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002525 inflight_entry);
Jens Axboe9adbd452019-12-20 08:45:55 -07002526 if (list_req->file != req->file)
Jens Axboedef596e2019-01-09 08:59:42 -07002527 ctx->poll_multi_file = true;
2528 }
2529
2530 /*
2531 * For fast devices, IO may have already completed. If it has, add
2532 * it to the front so we find it first.
2533 */
Xiaoguang Wang65a65432020-06-11 23:39:36 +08002534 if (READ_ONCE(req->iopoll_completed))
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002535 list_add(&req->inflight_entry, &ctx->iopoll_list);
Jens Axboedef596e2019-01-09 08:59:42 -07002536 else
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002537 list_add_tail(&req->inflight_entry, &ctx->iopoll_list);
Xiaoguang Wangbdcd3ea2020-02-25 22:12:08 +08002538
Xiaoguang Wang2e9dbe92020-11-13 00:44:08 +08002539 /*
2540 * If IORING_SETUP_SQPOLL is enabled, sqes are either handled in sq thread
2541 * task context or in io worker task context. If current task context is
2542 * sq thread, we don't need to check whether should wake up sq thread.
2543 */
2544 if (in_async && (ctx->flags & IORING_SETUP_SQPOLL) &&
Jens Axboe534ca6d2020-09-02 13:52:19 -06002545 wq_has_sleeper(&ctx->sq_data->wait))
2546 wake_up(&ctx->sq_data->wait);
Jens Axboedef596e2019-01-09 08:59:42 -07002547}
2548
Pavel Begunkov9f13c352020-05-17 14:13:41 +03002549static inline void io_state_file_put(struct io_submit_state *state)
2550{
Pavel Begunkov02b23a92021-01-19 13:32:41 +00002551 if (state->file_refs) {
2552 fput_many(state->file, state->file_refs);
2553 state->file_refs = 0;
2554 }
Jens Axboe9a56a232019-01-09 09:06:50 -07002555}
2556
2557/*
2558 * Get as many references to a file as we have IOs left in this submission,
2559 * assuming most submissions are for one file, or at least that each file
2560 * has more than one submission.
2561 */
Pavel Begunkov8da11c12020-02-24 11:32:44 +03002562static struct file *__io_file_get(struct io_submit_state *state, int fd)
Jens Axboe9a56a232019-01-09 09:06:50 -07002563{
2564 if (!state)
2565 return fget(fd);
2566
Pavel Begunkov6e1271e2020-11-20 15:50:50 +00002567 if (state->file_refs) {
Jens Axboe9a56a232019-01-09 09:06:50 -07002568 if (state->fd == fd) {
Pavel Begunkov6e1271e2020-11-20 15:50:50 +00002569 state->file_refs--;
Jens Axboe9a56a232019-01-09 09:06:50 -07002570 return state->file;
2571 }
Pavel Begunkov02b23a92021-01-19 13:32:41 +00002572 io_state_file_put(state);
Jens Axboe9a56a232019-01-09 09:06:50 -07002573 }
2574 state->file = fget_many(fd, state->ios_left);
Pavel Begunkov6e1271e2020-11-20 15:50:50 +00002575 if (unlikely(!state->file))
Jens Axboe9a56a232019-01-09 09:06:50 -07002576 return NULL;
2577
2578 state->fd = fd;
Pavel Begunkov6e1271e2020-11-20 15:50:50 +00002579 state->file_refs = state->ios_left - 1;
Jens Axboe9a56a232019-01-09 09:06:50 -07002580 return state->file;
2581}
2582
Jens Axboe4503b762020-06-01 10:00:27 -06002583static bool io_bdev_nowait(struct block_device *bdev)
2584{
Jeffle Xu9ba0d0c2020-10-19 16:59:42 +08002585 return !bdev || blk_queue_nowait(bdev_get_queue(bdev));
Jens Axboe4503b762020-06-01 10:00:27 -06002586}
2587
Jens Axboe2b188cc2019-01-07 10:46:33 -07002588/*
2589 * If we tracked the file through the SCM inflight mechanism, we could support
2590 * any file. For now, just ensure that anything potentially problematic is done
2591 * inline.
2592 */
Jens Axboe7b29f922021-03-12 08:30:14 -07002593static bool __io_file_supports_async(struct file *file, int rw)
Jens Axboe2b188cc2019-01-07 10:46:33 -07002594{
2595 umode_t mode = file_inode(file)->i_mode;
2596
Jens Axboe4503b762020-06-01 10:00:27 -06002597 if (S_ISBLK(mode)) {
Christoph Hellwig4e7b5672020-11-23 13:38:40 +01002598 if (IS_ENABLED(CONFIG_BLOCK) &&
2599 io_bdev_nowait(I_BDEV(file->f_mapping->host)))
Jens Axboe4503b762020-06-01 10:00:27 -06002600 return true;
2601 return false;
2602 }
2603 if (S_ISCHR(mode) || S_ISSOCK(mode))
Jens Axboe2b188cc2019-01-07 10:46:33 -07002604 return true;
Jens Axboe4503b762020-06-01 10:00:27 -06002605 if (S_ISREG(mode)) {
Christoph Hellwig4e7b5672020-11-23 13:38:40 +01002606 if (IS_ENABLED(CONFIG_BLOCK) &&
2607 io_bdev_nowait(file->f_inode->i_sb->s_bdev) &&
Jens Axboe4503b762020-06-01 10:00:27 -06002608 file->f_op != &io_uring_fops)
2609 return true;
2610 return false;
2611 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07002612
Jens Axboec5b85622020-06-09 19:23:05 -06002613 /* any ->read/write should understand O_NONBLOCK */
2614 if (file->f_flags & O_NONBLOCK)
2615 return true;
2616
Jens Axboeaf197f52020-04-28 13:15:06 -06002617 if (!(file->f_mode & FMODE_NOWAIT))
2618 return false;
2619
2620 if (rw == READ)
2621 return file->f_op->read_iter != NULL;
2622
2623 return file->f_op->write_iter != NULL;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002624}
2625
Jens Axboe7b29f922021-03-12 08:30:14 -07002626static bool io_file_supports_async(struct io_kiocb *req, int rw)
2627{
2628 if (rw == READ && (req->flags & REQ_F_ASYNC_READ))
2629 return true;
2630 else if (rw == WRITE && (req->flags & REQ_F_ASYNC_WRITE))
2631 return true;
2632
2633 return __io_file_supports_async(req->file, rw);
2634}
2635
Pavel Begunkova88fc402020-09-30 22:57:53 +03002636static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboe2b188cc2019-01-07 10:46:33 -07002637{
Jens Axboedef596e2019-01-09 08:59:42 -07002638 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe9adbd452019-12-20 08:45:55 -07002639 struct kiocb *kiocb = &req->rw.kiocb;
Pavel Begunkov75c668c2021-02-04 13:52:05 +00002640 struct file *file = req->file;
Jens Axboe09bb8392019-03-13 12:39:28 -06002641 unsigned ioprio;
2642 int ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002643
Jens Axboe7b29f922021-03-12 08:30:14 -07002644 if (!(req->flags & REQ_F_ISREG) && S_ISREG(file_inode(file)->i_mode))
Jens Axboe491381ce2019-10-17 09:20:46 -06002645 req->flags |= REQ_F_ISREG;
2646
Jens Axboe2b188cc2019-01-07 10:46:33 -07002647 kiocb->ki_pos = READ_ONCE(sqe->off);
Pavel Begunkov75c668c2021-02-04 13:52:05 +00002648 if (kiocb->ki_pos == -1 && !(file->f_mode & FMODE_STREAM)) {
Jens Axboeba042912019-12-25 16:33:42 -07002649 req->flags |= REQ_F_CUR_POS;
Pavel Begunkov75c668c2021-02-04 13:52:05 +00002650 kiocb->ki_pos = file->f_pos;
Jens Axboeba042912019-12-25 16:33:42 -07002651 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07002652 kiocb->ki_hint = ki_hint_validate(file_write_hint(kiocb->ki_filp));
Pavel Begunkov3e577dc2020-02-01 03:58:42 +03002653 kiocb->ki_flags = iocb_flags(kiocb->ki_filp);
2654 ret = kiocb_set_rw_flags(kiocb, READ_ONCE(sqe->rw_flags));
2655 if (unlikely(ret))
2656 return ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002657
Pavel Begunkov75c668c2021-02-04 13:52:05 +00002658 /* don't allow async punt for O_NONBLOCK or RWF_NOWAIT */
2659 if ((kiocb->ki_flags & IOCB_NOWAIT) || (file->f_flags & O_NONBLOCK))
2660 req->flags |= REQ_F_NOWAIT;
2661
Jens Axboe2b188cc2019-01-07 10:46:33 -07002662 ioprio = READ_ONCE(sqe->ioprio);
2663 if (ioprio) {
2664 ret = ioprio_check_cap(ioprio);
2665 if (ret)
Jens Axboe09bb8392019-03-13 12:39:28 -06002666 return ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002667
2668 kiocb->ki_ioprio = ioprio;
2669 } else
2670 kiocb->ki_ioprio = get_current_ioprio();
2671
Jens Axboedef596e2019-01-09 08:59:42 -07002672 if (ctx->flags & IORING_SETUP_IOPOLL) {
Jens Axboedef596e2019-01-09 08:59:42 -07002673 if (!(kiocb->ki_flags & IOCB_DIRECT) ||
2674 !kiocb->ki_filp->f_op->iopoll)
Jens Axboe09bb8392019-03-13 12:39:28 -06002675 return -EOPNOTSUPP;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002676
Jens Axboedef596e2019-01-09 08:59:42 -07002677 kiocb->ki_flags |= IOCB_HIPRI;
2678 kiocb->ki_complete = io_complete_rw_iopoll;
Xiaoguang Wang65a65432020-06-11 23:39:36 +08002679 req->iopoll_completed = 0;
Jens Axboedef596e2019-01-09 08:59:42 -07002680 } else {
Jens Axboe09bb8392019-03-13 12:39:28 -06002681 if (kiocb->ki_flags & IOCB_HIPRI)
2682 return -EINVAL;
Jens Axboedef596e2019-01-09 08:59:42 -07002683 kiocb->ki_complete = io_complete_rw;
2684 }
Jens Axboe9adbd452019-12-20 08:45:55 -07002685
Jens Axboe3529d8c2019-12-19 18:24:38 -07002686 req->rw.addr = READ_ONCE(sqe->addr);
2687 req->rw.len = READ_ONCE(sqe->len);
Bijan Mottahedeh4f4eeba2020-05-19 14:52:49 -07002688 req->buf_index = READ_ONCE(sqe->buf_index);
Jens Axboe2b188cc2019-01-07 10:46:33 -07002689 return 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002690}
2691
2692static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
2693{
2694 switch (ret) {
2695 case -EIOCBQUEUED:
2696 break;
2697 case -ERESTARTSYS:
2698 case -ERESTARTNOINTR:
2699 case -ERESTARTNOHAND:
2700 case -ERESTART_RESTARTBLOCK:
2701 /*
2702 * We can't just restart the syscall, since previously
2703 * submitted sqes may already be in progress. Just fail this
2704 * IO with EINTR.
2705 */
2706 ret = -EINTR;
Gustavo A. R. Silvadf561f662020-08-23 17:36:59 -05002707 fallthrough;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002708 default:
2709 kiocb->ki_complete(kiocb, ret, 0);
2710 }
2711}
2712
Jens Axboea1d7c392020-06-22 11:09:46 -06002713static void kiocb_done(struct kiocb *kiocb, ssize_t ret,
Pavel Begunkov889fca72021-02-10 00:03:09 +00002714 unsigned int issue_flags)
Jens Axboeba816ad2019-09-28 11:36:45 -06002715{
Jens Axboeba042912019-12-25 16:33:42 -07002716 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
Jens Axboee8c2bc12020-08-15 18:44:09 -07002717 struct io_async_rw *io = req->async_data;
Pavel Begunkov97284632021-04-08 19:28:03 +01002718 bool check_reissue = kiocb->ki_complete == io_complete_rw;
Jens Axboeba042912019-12-25 16:33:42 -07002719
Jens Axboe227c0c92020-08-13 11:51:40 -06002720 /* add previously done IO, if any */
Jens Axboee8c2bc12020-08-15 18:44:09 -07002721 if (io && io->bytes_done > 0) {
Jens Axboe227c0c92020-08-13 11:51:40 -06002722 if (ret < 0)
Jens Axboee8c2bc12020-08-15 18:44:09 -07002723 ret = io->bytes_done;
Jens Axboe227c0c92020-08-13 11:51:40 -06002724 else
Jens Axboee8c2bc12020-08-15 18:44:09 -07002725 ret += io->bytes_done;
Jens Axboe227c0c92020-08-13 11:51:40 -06002726 }
2727
Jens Axboeba042912019-12-25 16:33:42 -07002728 if (req->flags & REQ_F_CUR_POS)
2729 req->file->f_pos = kiocb->ki_pos;
Pavel Begunkovbcaec082020-02-24 11:30:18 +03002730 if (ret >= 0 && kiocb->ki_complete == io_complete_rw)
Pavel Begunkov889fca72021-02-10 00:03:09 +00002731 __io_complete_rw(req, ret, 0, issue_flags);
Jens Axboeba816ad2019-09-28 11:36:45 -06002732 else
2733 io_rw_done(kiocb, ret);
Pavel Begunkov97284632021-04-08 19:28:03 +01002734
2735 if (check_reissue && req->flags & REQ_F_REISSUE) {
2736 req->flags &= ~REQ_F_REISSUE;
Pavel Begunkov8c130822021-03-22 01:58:32 +00002737 if (!io_resubmit_prep(req)) {
2738 req_ref_get(req);
2739 io_queue_async_work(req);
2740 } else {
Pavel Begunkov97284632021-04-08 19:28:03 +01002741 int cflags = 0;
2742
2743 req_set_fail_links(req);
2744 if (req->flags & REQ_F_BUFFER_SELECTED)
2745 cflags = io_put_rw_kbuf(req);
2746 __io_req_complete(req, issue_flags, ret, cflags);
2747 }
2748 }
Jens Axboeba816ad2019-09-28 11:36:45 -06002749}
2750
Pavel Begunkov847595d2021-02-04 13:52:06 +00002751static int io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter)
Jens Axboeedafcce2019-01-09 09:16:05 -07002752{
Jens Axboe9adbd452019-12-20 08:45:55 -07002753 struct io_ring_ctx *ctx = req->ctx;
2754 size_t len = req->rw.len;
Jens Axboeedafcce2019-01-09 09:16:05 -07002755 struct io_mapped_ubuf *imu;
Pavel Begunkov4be1c612020-09-06 00:45:48 +03002756 u16 index, buf_index = req->buf_index;
Pavel Begunkov75769e32021-04-01 15:43:54 +01002757 u64 buf_end, buf_addr = req->rw.addr;
Jens Axboeedafcce2019-01-09 09:16:05 -07002758 size_t offset;
Jens Axboeedafcce2019-01-09 09:16:05 -07002759
Jens Axboeedafcce2019-01-09 09:16:05 -07002760 if (unlikely(buf_index >= ctx->nr_user_bufs))
2761 return -EFAULT;
Jens Axboeedafcce2019-01-09 09:16:05 -07002762 index = array_index_nospec(buf_index, ctx->nr_user_bufs);
2763 imu = &ctx->user_bufs[index];
Jens Axboe9adbd452019-12-20 08:45:55 -07002764 buf_addr = req->rw.addr;
Jens Axboeedafcce2019-01-09 09:16:05 -07002765
Pavel Begunkov75769e32021-04-01 15:43:54 +01002766 if (unlikely(check_add_overflow(buf_addr, (u64)len, &buf_end)))
Jens Axboeedafcce2019-01-09 09:16:05 -07002767 return -EFAULT;
2768 /* not inside the mapped region */
Pavel Begunkov4751f532021-04-01 15:43:55 +01002769 if (unlikely(buf_addr < imu->ubuf || buf_end > imu->ubuf_end))
Jens Axboeedafcce2019-01-09 09:16:05 -07002770 return -EFAULT;
2771
2772 /*
2773 * May not be a start of buffer, set size appropriately
2774 * and advance us to the beginning.
2775 */
2776 offset = buf_addr - imu->ubuf;
2777 iov_iter_bvec(iter, rw, imu->bvec, imu->nr_bvecs, offset + len);
Jens Axboebd11b3a2019-07-20 08:37:31 -06002778
2779 if (offset) {
2780 /*
2781 * Don't use iov_iter_advance() here, as it's really slow for
2782 * using the latter parts of a big fixed buffer - it iterates
2783 * over each segment manually. We can cheat a bit here, because
2784 * we know that:
2785 *
2786 * 1) it's a BVEC iter, we set it up
2787 * 2) all bvecs are PAGE_SIZE in size, except potentially the
2788 * first and last bvec
2789 *
2790 * So just find our index, and adjust the iterator afterwards.
2791 * If the offset is within the first bvec (or the whole first
2792 * bvec, just use iov_iter_advance(). This makes it easier
2793 * since we can just skip the first segment, which may not
2794 * be PAGE_SIZE aligned.
2795 */
2796 const struct bio_vec *bvec = imu->bvec;
2797
2798 if (offset <= bvec->bv_len) {
2799 iov_iter_advance(iter, offset);
2800 } else {
2801 unsigned long seg_skip;
2802
2803 /* skip first vec */
2804 offset -= bvec->bv_len;
2805 seg_skip = 1 + (offset >> PAGE_SHIFT);
2806
2807 iter->bvec = bvec + seg_skip;
2808 iter->nr_segs -= seg_skip;
Aleix Roca Nonell99c79f62019-08-15 14:03:22 +02002809 iter->count -= bvec->bv_len + offset;
Jens Axboebd11b3a2019-07-20 08:37:31 -06002810 iter->iov_offset = offset & ~PAGE_MASK;
Jens Axboebd11b3a2019-07-20 08:37:31 -06002811 }
2812 }
2813
Pavel Begunkov847595d2021-02-04 13:52:06 +00002814 return 0;
Jens Axboeedafcce2019-01-09 09:16:05 -07002815}
2816
Jens Axboebcda7ba2020-02-23 16:42:51 -07002817static void io_ring_submit_unlock(struct io_ring_ctx *ctx, bool needs_lock)
2818{
2819 if (needs_lock)
2820 mutex_unlock(&ctx->uring_lock);
2821}
2822
2823static void io_ring_submit_lock(struct io_ring_ctx *ctx, bool needs_lock)
2824{
2825 /*
2826 * "Normal" inline submissions always hold the uring_lock, since we
2827 * grab it from the system call. Same is true for the SQPOLL offload.
2828 * The only exception is when we've detached the request and issue it
2829 * from an async worker thread, grab the lock for that case.
2830 */
2831 if (needs_lock)
2832 mutex_lock(&ctx->uring_lock);
2833}
2834
2835static struct io_buffer *io_buffer_select(struct io_kiocb *req, size_t *len,
2836 int bgid, struct io_buffer *kbuf,
2837 bool needs_lock)
2838{
2839 struct io_buffer *head;
2840
2841 if (req->flags & REQ_F_BUFFER_SELECTED)
2842 return kbuf;
2843
2844 io_ring_submit_lock(req->ctx, needs_lock);
2845
2846 lockdep_assert_held(&req->ctx->uring_lock);
2847
Jens Axboe9e15c3a2021-03-13 12:29:43 -07002848 head = xa_load(&req->ctx->io_buffers, bgid);
Jens Axboebcda7ba2020-02-23 16:42:51 -07002849 if (head) {
2850 if (!list_empty(&head->list)) {
2851 kbuf = list_last_entry(&head->list, struct io_buffer,
2852 list);
2853 list_del(&kbuf->list);
2854 } else {
2855 kbuf = head;
Jens Axboe9e15c3a2021-03-13 12:29:43 -07002856 xa_erase(&req->ctx->io_buffers, bgid);
Jens Axboebcda7ba2020-02-23 16:42:51 -07002857 }
2858 if (*len > kbuf->len)
2859 *len = kbuf->len;
2860 } else {
2861 kbuf = ERR_PTR(-ENOBUFS);
2862 }
2863
2864 io_ring_submit_unlock(req->ctx, needs_lock);
2865
2866 return kbuf;
2867}
2868
Jens Axboe4d954c22020-02-27 07:31:19 -07002869static void __user *io_rw_buffer_select(struct io_kiocb *req, size_t *len,
2870 bool needs_lock)
2871{
2872 struct io_buffer *kbuf;
Bijan Mottahedeh4f4eeba2020-05-19 14:52:49 -07002873 u16 bgid;
Jens Axboe4d954c22020-02-27 07:31:19 -07002874
2875 kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
Bijan Mottahedeh4f4eeba2020-05-19 14:52:49 -07002876 bgid = req->buf_index;
Jens Axboe4d954c22020-02-27 07:31:19 -07002877 kbuf = io_buffer_select(req, len, bgid, kbuf, needs_lock);
2878 if (IS_ERR(kbuf))
2879 return kbuf;
2880 req->rw.addr = (u64) (unsigned long) kbuf;
2881 req->flags |= REQ_F_BUFFER_SELECTED;
2882 return u64_to_user_ptr(kbuf->addr);
2883}
2884
2885#ifdef CONFIG_COMPAT
2886static ssize_t io_compat_import(struct io_kiocb *req, struct iovec *iov,
2887 bool needs_lock)
2888{
2889 struct compat_iovec __user *uiov;
2890 compat_ssize_t clen;
2891 void __user *buf;
2892 ssize_t len;
2893
2894 uiov = u64_to_user_ptr(req->rw.addr);
2895 if (!access_ok(uiov, sizeof(*uiov)))
2896 return -EFAULT;
2897 if (__get_user(clen, &uiov->iov_len))
2898 return -EFAULT;
2899 if (clen < 0)
2900 return -EINVAL;
2901
2902 len = clen;
2903 buf = io_rw_buffer_select(req, &len, needs_lock);
2904 if (IS_ERR(buf))
2905 return PTR_ERR(buf);
2906 iov[0].iov_base = buf;
2907 iov[0].iov_len = (compat_size_t) len;
2908 return 0;
2909}
2910#endif
2911
2912static ssize_t __io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
2913 bool needs_lock)
2914{
2915 struct iovec __user *uiov = u64_to_user_ptr(req->rw.addr);
2916 void __user *buf;
2917 ssize_t len;
2918
2919 if (copy_from_user(iov, uiov, sizeof(*uiov)))
2920 return -EFAULT;
2921
2922 len = iov[0].iov_len;
2923 if (len < 0)
2924 return -EINVAL;
2925 buf = io_rw_buffer_select(req, &len, needs_lock);
2926 if (IS_ERR(buf))
2927 return PTR_ERR(buf);
2928 iov[0].iov_base = buf;
2929 iov[0].iov_len = len;
2930 return 0;
2931}
2932
2933static ssize_t io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
2934 bool needs_lock)
2935{
Jens Axboedddb3e22020-06-04 11:27:01 -06002936 if (req->flags & REQ_F_BUFFER_SELECTED) {
2937 struct io_buffer *kbuf;
2938
2939 kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
2940 iov[0].iov_base = u64_to_user_ptr(kbuf->addr);
2941 iov[0].iov_len = kbuf->len;
Jens Axboe4d954c22020-02-27 07:31:19 -07002942 return 0;
Jens Axboedddb3e22020-06-04 11:27:01 -06002943 }
Pavel Begunkovdd201662020-12-19 03:15:43 +00002944 if (req->rw.len != 1)
Jens Axboe4d954c22020-02-27 07:31:19 -07002945 return -EINVAL;
2946
2947#ifdef CONFIG_COMPAT
2948 if (req->ctx->compat)
2949 return io_compat_import(req, iov, needs_lock);
2950#endif
2951
2952 return __io_iov_buffer_select(req, iov, needs_lock);
2953}
2954
Pavel Begunkov847595d2021-02-04 13:52:06 +00002955static int io_import_iovec(int rw, struct io_kiocb *req, struct iovec **iovec,
2956 struct iov_iter *iter, bool needs_lock)
Jens Axboe2b188cc2019-01-07 10:46:33 -07002957{
Jens Axboe9adbd452019-12-20 08:45:55 -07002958 void __user *buf = u64_to_user_ptr(req->rw.addr);
2959 size_t sqe_len = req->rw.len;
Pavel Begunkov847595d2021-02-04 13:52:06 +00002960 u8 opcode = req->opcode;
Jens Axboe4d954c22020-02-27 07:31:19 -07002961 ssize_t ret;
Jens Axboeedafcce2019-01-09 09:16:05 -07002962
Pavel Begunkov7d009162019-11-25 23:14:40 +03002963 if (opcode == IORING_OP_READ_FIXED || opcode == IORING_OP_WRITE_FIXED) {
Jens Axboeedafcce2019-01-09 09:16:05 -07002964 *iovec = NULL;
Jens Axboe9adbd452019-12-20 08:45:55 -07002965 return io_import_fixed(req, rw, iter);
Jens Axboeedafcce2019-01-09 09:16:05 -07002966 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07002967
Jens Axboebcda7ba2020-02-23 16:42:51 -07002968 /* buffer index only valid with fixed read/write, or buffer select */
Bijan Mottahedeh4f4eeba2020-05-19 14:52:49 -07002969 if (req->buf_index && !(req->flags & REQ_F_BUFFER_SELECT))
Jens Axboe9adbd452019-12-20 08:45:55 -07002970 return -EINVAL;
2971
Jens Axboe3a6820f2019-12-22 15:19:35 -07002972 if (opcode == IORING_OP_READ || opcode == IORING_OP_WRITE) {
Jens Axboebcda7ba2020-02-23 16:42:51 -07002973 if (req->flags & REQ_F_BUFFER_SELECT) {
Jens Axboe4d954c22020-02-27 07:31:19 -07002974 buf = io_rw_buffer_select(req, &sqe_len, needs_lock);
Pavel Begunkov867a23e2020-08-20 11:34:39 +03002975 if (IS_ERR(buf))
Jens Axboe4d954c22020-02-27 07:31:19 -07002976 return PTR_ERR(buf);
Jens Axboe3f9d6442020-03-11 12:27:04 -06002977 req->rw.len = sqe_len;
Jens Axboebcda7ba2020-02-23 16:42:51 -07002978 }
2979
Jens Axboe3a6820f2019-12-22 15:19:35 -07002980 ret = import_single_range(rw, buf, sqe_len, *iovec, iter);
2981 *iovec = NULL;
David Laight10fc72e2020-11-07 13:16:25 +00002982 return ret;
Jens Axboe3a6820f2019-12-22 15:19:35 -07002983 }
2984
Jens Axboe4d954c22020-02-27 07:31:19 -07002985 if (req->flags & REQ_F_BUFFER_SELECT) {
2986 ret = io_iov_buffer_select(req, *iovec, needs_lock);
Pavel Begunkov847595d2021-02-04 13:52:06 +00002987 if (!ret)
2988 iov_iter_init(iter, rw, *iovec, 1, (*iovec)->iov_len);
Jens Axboe4d954c22020-02-27 07:31:19 -07002989 *iovec = NULL;
2990 return ret;
2991 }
2992
Christoph Hellwig89cd35c2020-09-25 06:51:41 +02002993 return __import_iovec(rw, buf, sqe_len, UIO_FASTIOV, iovec, iter,
2994 req->ctx->compat);
Jens Axboe2b188cc2019-01-07 10:46:33 -07002995}
2996
Jens Axboe0fef9482020-08-26 10:36:20 -06002997static inline loff_t *io_kiocb_ppos(struct kiocb *kiocb)
2998{
Pavel Begunkov5b09e372020-09-30 22:57:15 +03002999 return (kiocb->ki_filp->f_mode & FMODE_STREAM) ? NULL : &kiocb->ki_pos;
Jens Axboe0fef9482020-08-26 10:36:20 -06003000}
3001
Jens Axboe32960612019-09-23 11:05:34 -06003002/*
3003 * For files that don't have ->read_iter() and ->write_iter(), handle them
3004 * by looping over ->read() or ->write() manually.
3005 */
Jens Axboe4017eb92020-10-22 14:14:12 -06003006static ssize_t loop_rw_iter(int rw, struct io_kiocb *req, struct iov_iter *iter)
Jens Axboe32960612019-09-23 11:05:34 -06003007{
Jens Axboe4017eb92020-10-22 14:14:12 -06003008 struct kiocb *kiocb = &req->rw.kiocb;
3009 struct file *file = req->file;
Jens Axboe32960612019-09-23 11:05:34 -06003010 ssize_t ret = 0;
3011
3012 /*
3013 * Don't support polled IO through this interface, and we can't
3014 * support non-blocking either. For the latter, this just causes
3015 * the kiocb to be handled from an async context.
3016 */
3017 if (kiocb->ki_flags & IOCB_HIPRI)
3018 return -EOPNOTSUPP;
3019 if (kiocb->ki_flags & IOCB_NOWAIT)
3020 return -EAGAIN;
3021
3022 while (iov_iter_count(iter)) {
Pavel Begunkov311ae9e2019-11-24 11:58:24 +03003023 struct iovec iovec;
Jens Axboe32960612019-09-23 11:05:34 -06003024 ssize_t nr;
3025
Pavel Begunkov311ae9e2019-11-24 11:58:24 +03003026 if (!iov_iter_is_bvec(iter)) {
3027 iovec = iov_iter_iovec(iter);
3028 } else {
Jens Axboe4017eb92020-10-22 14:14:12 -06003029 iovec.iov_base = u64_to_user_ptr(req->rw.addr);
3030 iovec.iov_len = req->rw.len;
Pavel Begunkov311ae9e2019-11-24 11:58:24 +03003031 }
3032
Jens Axboe32960612019-09-23 11:05:34 -06003033 if (rw == READ) {
3034 nr = file->f_op->read(file, iovec.iov_base,
Jens Axboe0fef9482020-08-26 10:36:20 -06003035 iovec.iov_len, io_kiocb_ppos(kiocb));
Jens Axboe32960612019-09-23 11:05:34 -06003036 } else {
3037 nr = file->f_op->write(file, iovec.iov_base,
Jens Axboe0fef9482020-08-26 10:36:20 -06003038 iovec.iov_len, io_kiocb_ppos(kiocb));
Jens Axboe32960612019-09-23 11:05:34 -06003039 }
3040
3041 if (nr < 0) {
3042 if (!ret)
3043 ret = nr;
3044 break;
3045 }
3046 ret += nr;
3047 if (nr != iovec.iov_len)
3048 break;
Jens Axboe4017eb92020-10-22 14:14:12 -06003049 req->rw.len -= nr;
3050 req->rw.addr += nr;
Jens Axboe32960612019-09-23 11:05:34 -06003051 iov_iter_advance(iter, nr);
3052 }
3053
3054 return ret;
3055}
3056
Jens Axboeff6165b2020-08-13 09:47:43 -06003057static void io_req_map_rw(struct io_kiocb *req, const struct iovec *iovec,
3058 const struct iovec *fast_iov, struct iov_iter *iter)
Jens Axboef67676d2019-12-02 11:03:47 -07003059{
Jens Axboee8c2bc12020-08-15 18:44:09 -07003060 struct io_async_rw *rw = req->async_data;
Pavel Begunkovb64e3442020-07-13 22:59:18 +03003061
Jens Axboeff6165b2020-08-13 09:47:43 -06003062 memcpy(&rw->iter, iter, sizeof(*iter));
Pavel Begunkovafb87652020-09-06 00:45:46 +03003063 rw->free_iovec = iovec;
Jens Axboe227c0c92020-08-13 11:51:40 -06003064 rw->bytes_done = 0;
Jens Axboeff6165b2020-08-13 09:47:43 -06003065 /* can only be fixed buffers, no need to do anything */
Pavel Begunkov9c3a2052020-11-23 23:20:27 +00003066 if (iov_iter_is_bvec(iter))
Jens Axboeff6165b2020-08-13 09:47:43 -06003067 return;
Pavel Begunkovb64e3442020-07-13 22:59:18 +03003068 if (!iovec) {
Jens Axboeff6165b2020-08-13 09:47:43 -06003069 unsigned iov_off = 0;
3070
3071 rw->iter.iov = rw->fast_iov;
3072 if (iter->iov != fast_iov) {
3073 iov_off = iter->iov - fast_iov;
3074 rw->iter.iov += iov_off;
3075 }
3076 if (rw->fast_iov != fast_iov)
3077 memcpy(rw->fast_iov + iov_off, fast_iov + iov_off,
Xiaoguang Wang45097da2020-04-08 22:29:58 +08003078 sizeof(struct iovec) * iter->nr_segs);
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03003079 } else {
3080 req->flags |= REQ_F_NEED_CLEANUP;
Jens Axboef67676d2019-12-02 11:03:47 -07003081 }
3082}
3083
Pavel Begunkov6cb78682021-02-28 22:35:17 +00003084static inline int io_alloc_async_data(struct io_kiocb *req)
Xiaoguang Wang3d9932a2020-03-27 15:36:52 +08003085{
Jens Axboee8c2bc12020-08-15 18:44:09 -07003086 WARN_ON_ONCE(!io_op_defs[req->opcode].async_size);
3087 req->async_data = kmalloc(io_op_defs[req->opcode].async_size, GFP_KERNEL);
3088 return req->async_data == NULL;
Xiaoguang Wang3d9932a2020-03-27 15:36:52 +08003089}
3090
Jens Axboeff6165b2020-08-13 09:47:43 -06003091static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec,
3092 const struct iovec *fast_iov,
Jens Axboe227c0c92020-08-13 11:51:40 -06003093 struct iov_iter *iter, bool force)
Jens Axboeb7bb4f72019-12-15 22:13:43 -07003094{
Pavel Begunkov26f05052021-02-28 22:35:18 +00003095 if (!force && !io_op_defs[req->opcode].needs_async_setup)
Jens Axboe74566df2020-01-13 19:23:24 -07003096 return 0;
Jens Axboee8c2bc12020-08-15 18:44:09 -07003097 if (!req->async_data) {
Pavel Begunkov6cb78682021-02-28 22:35:17 +00003098 if (io_alloc_async_data(req)) {
Pavel Begunkov6bf985d2021-02-04 13:52:01 +00003099 kfree(iovec);
Jens Axboe5d204bc2020-01-31 12:06:52 -07003100 return -ENOMEM;
Pavel Begunkov6bf985d2021-02-04 13:52:01 +00003101 }
Jens Axboeb7bb4f72019-12-15 22:13:43 -07003102
Jens Axboeff6165b2020-08-13 09:47:43 -06003103 io_req_map_rw(req, iovec, fast_iov, iter);
Jens Axboe5d204bc2020-01-31 12:06:52 -07003104 }
Jens Axboeb7bb4f72019-12-15 22:13:43 -07003105 return 0;
Jens Axboef67676d2019-12-02 11:03:47 -07003106}
3107
Pavel Begunkov73debe62020-09-30 22:57:54 +03003108static inline int io_rw_prep_async(struct io_kiocb *req, int rw)
Pavel Begunkovc3e330a2020-07-13 22:59:19 +03003109{
Jens Axboee8c2bc12020-08-15 18:44:09 -07003110 struct io_async_rw *iorw = req->async_data;
Pavel Begunkovf4bff102020-09-06 00:45:45 +03003111 struct iovec *iov = iorw->fast_iov;
Pavel Begunkov847595d2021-02-04 13:52:06 +00003112 int ret;
Pavel Begunkovc3e330a2020-07-13 22:59:19 +03003113
Pavel Begunkov2846c482020-11-07 13:16:27 +00003114 ret = io_import_iovec(rw, req, &iov, &iorw->iter, false);
Pavel Begunkovc3e330a2020-07-13 22:59:19 +03003115 if (unlikely(ret < 0))
3116 return ret;
3117
Pavel Begunkovab0b1962020-09-06 00:45:47 +03003118 iorw->bytes_done = 0;
3119 iorw->free_iovec = iov;
3120 if (iov)
3121 req->flags |= REQ_F_NEED_CLEANUP;
Pavel Begunkovc3e330a2020-07-13 22:59:19 +03003122 return 0;
3123}
3124
Pavel Begunkov73debe62020-09-30 22:57:54 +03003125static int io_read_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboef67676d2019-12-02 11:03:47 -07003126{
Jens Axboe3529d8c2019-12-19 18:24:38 -07003127 if (unlikely(!(req->file->f_mode & FMODE_READ)))
3128 return -EBADF;
Pavel Begunkov93642ef2021-02-18 18:29:44 +00003129 return io_prep_rw(req, sqe);
Jens Axboef67676d2019-12-02 11:03:47 -07003130}
3131
Jens Axboec1dd91d2020-08-03 16:43:59 -06003132/*
3133 * This is our waitqueue callback handler, registered through lock_page_async()
3134 * when we initially tried to do the IO with the iocb armed our waitqueue.
3135 * This gets called when the page is unlocked, and we generally expect that to
3136 * happen when the page IO is completed and the page is now uptodate. This will
3137 * queue a task_work based retry of the operation, attempting to copy the data
3138 * again. If the latter fails because the page was NOT uptodate, then we will
3139 * do a thread based blocking retry of the operation. That's the unexpected
3140 * slow path.
3141 */
Jens Axboebcf5a062020-05-22 09:24:42 -06003142static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode,
3143 int sync, void *arg)
3144{
3145 struct wait_page_queue *wpq;
3146 struct io_kiocb *req = wait->private;
Jens Axboebcf5a062020-05-22 09:24:42 -06003147 struct wait_page_key *key = arg;
Jens Axboebcf5a062020-05-22 09:24:42 -06003148
3149 wpq = container_of(wait, struct wait_page_queue, wait);
3150
Linus Torvaldscdc8fcb2020-08-03 13:01:22 -07003151 if (!wake_page_match(wpq, key))
3152 return 0;
3153
Hao Xuc8d317a2020-09-29 20:00:45 +08003154 req->rw.kiocb.ki_flags &= ~IOCB_WAITQ;
Jens Axboebcf5a062020-05-22 09:24:42 -06003155 list_del_init(&wait->entry);
3156
Jens Axboebcf5a062020-05-22 09:24:42 -06003157 /* submit ref gets dropped, acquire a new one */
Jens Axboede9b4cc2021-02-24 13:28:27 -07003158 req_ref_get(req);
Pavel Begunkov921b9052021-02-12 03:23:53 +00003159 io_req_task_queue(req);
Jens Axboebcf5a062020-05-22 09:24:42 -06003160 return 1;
3161}
3162
Jens Axboec1dd91d2020-08-03 16:43:59 -06003163/*
3164 * This controls whether a given IO request should be armed for async page
3165 * based retry. If we return false here, the request is handed to the async
3166 * worker threads for retry. If we're doing buffered reads on a regular file,
3167 * we prepare a private wait_page_queue entry and retry the operation. This
3168 * will either succeed because the page is now uptodate and unlocked, or it
3169 * will register a callback when the page is unlocked at IO completion. Through
3170 * that callback, io_uring uses task_work to setup a retry of the operation.
3171 * That retry will attempt the buffered read again. The retry will generally
3172 * succeed, or in rare cases where it fails, we then fall back to using the
3173 * async worker threads for a blocking retry.
3174 */
Jens Axboe227c0c92020-08-13 11:51:40 -06003175static bool io_rw_should_retry(struct io_kiocb *req)
Jens Axboebcf5a062020-05-22 09:24:42 -06003176{
Jens Axboee8c2bc12020-08-15 18:44:09 -07003177 struct io_async_rw *rw = req->async_data;
3178 struct wait_page_queue *wait = &rw->wpq;
Jens Axboebcf5a062020-05-22 09:24:42 -06003179 struct kiocb *kiocb = &req->rw.kiocb;
Jens Axboebcf5a062020-05-22 09:24:42 -06003180
3181 /* never retry for NOWAIT, we just complete with -EAGAIN */
3182 if (req->flags & REQ_F_NOWAIT)
3183 return false;
3184
Jens Axboe227c0c92020-08-13 11:51:40 -06003185 /* Only for buffered IO */
Jens Axboe3b2a4432020-08-16 10:58:43 -07003186 if (kiocb->ki_flags & (IOCB_DIRECT | IOCB_HIPRI))
Jens Axboebcf5a062020-05-22 09:24:42 -06003187 return false;
Jens Axboe3b2a4432020-08-16 10:58:43 -07003188
Jens Axboebcf5a062020-05-22 09:24:42 -06003189 /*
3190 * just use poll if we can, and don't attempt if the fs doesn't
3191 * support callback based unlocks
3192 */
3193 if (file_can_poll(req->file) || !(req->file->f_mode & FMODE_BUF_RASYNC))
3194 return false;
3195
Jens Axboe3b2a4432020-08-16 10:58:43 -07003196 wait->wait.func = io_async_buf_func;
3197 wait->wait.private = req;
3198 wait->wait.flags = 0;
3199 INIT_LIST_HEAD(&wait->wait.entry);
3200 kiocb->ki_flags |= IOCB_WAITQ;
Hao Xuc8d317a2020-09-29 20:00:45 +08003201 kiocb->ki_flags &= ~IOCB_NOWAIT;
Jens Axboe3b2a4432020-08-16 10:58:43 -07003202 kiocb->ki_waitq = wait;
Jens Axboe3b2a4432020-08-16 10:58:43 -07003203 return true;
Jens Axboebcf5a062020-05-22 09:24:42 -06003204}
3205
3206static int io_iter_do_read(struct io_kiocb *req, struct iov_iter *iter)
3207{
3208 if (req->file->f_op->read_iter)
3209 return call_read_iter(req->file, &req->rw.kiocb, iter);
Guoyu Huang2dd21112020-08-05 03:53:50 -07003210 else if (req->file->f_op->read)
Jens Axboe4017eb92020-10-22 14:14:12 -06003211 return loop_rw_iter(READ, req, iter);
Guoyu Huang2dd21112020-08-05 03:53:50 -07003212 else
3213 return -EINVAL;
Jens Axboebcf5a062020-05-22 09:24:42 -06003214}
3215
Pavel Begunkov889fca72021-02-10 00:03:09 +00003216static int io_read(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe2b188cc2019-01-07 10:46:33 -07003217{
3218 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
Jens Axboe9adbd452019-12-20 08:45:55 -07003219 struct kiocb *kiocb = &req->rw.kiocb;
Jens Axboeff6165b2020-08-13 09:47:43 -06003220 struct iov_iter __iter, *iter = &__iter;
Jens Axboee8c2bc12020-08-15 18:44:09 -07003221 struct io_async_rw *rw = req->async_data;
Jens Axboe227c0c92020-08-13 11:51:40 -06003222 ssize_t io_size, ret, ret2;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003223 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003224
Pavel Begunkov2846c482020-11-07 13:16:27 +00003225 if (rw) {
Jens Axboee8c2bc12020-08-15 18:44:09 -07003226 iter = &rw->iter;
Pavel Begunkov2846c482020-11-07 13:16:27 +00003227 iovec = NULL;
3228 } else {
3229 ret = io_import_iovec(READ, req, &iovec, iter, !force_nonblock);
3230 if (ret < 0)
3231 return ret;
3232 }
Pavel Begunkov632546c2020-11-07 13:16:26 +00003233 io_size = iov_iter_count(iter);
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003234 req->result = io_size;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003235
Jens Axboefd6c2e42019-12-18 12:19:41 -07003236 /* Ensure we clear previously set non-block flag */
3237 if (!force_nonblock)
Jens Axboe29de5f62020-02-20 09:56:08 -07003238 kiocb->ki_flags &= ~IOCB_NOWAIT;
Pavel Begunkova88fc402020-09-30 22:57:53 +03003239 else
3240 kiocb->ki_flags |= IOCB_NOWAIT;
3241
Pavel Begunkov24c74672020-06-21 13:09:51 +03003242 /* If the file doesn't support async, just async punt */
Jens Axboe7b29f922021-03-12 08:30:14 -07003243 if (force_nonblock && !io_file_supports_async(req, READ)) {
Pavel Begunkov6713e7a2021-02-04 13:51:59 +00003244 ret = io_setup_async_rw(req, iovec, inline_vecs, iter, true);
Pavel Begunkov6bf985d2021-02-04 13:52:01 +00003245 return ret ?: -EAGAIN;
Pavel Begunkov6713e7a2021-02-04 13:51:59 +00003246 }
Jens Axboe9e645e112019-05-10 16:07:28 -06003247
Pavel Begunkov632546c2020-11-07 13:16:26 +00003248 ret = rw_verify_area(READ, req->file, io_kiocb_ppos(kiocb), io_size);
Pavel Begunkov5ea5dd42021-02-04 13:52:03 +00003249 if (unlikely(ret)) {
3250 kfree(iovec);
3251 return ret;
3252 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07003253
Jens Axboe227c0c92020-08-13 11:51:40 -06003254 ret = io_iter_do_read(req, iter);
Jens Axboe32960612019-09-23 11:05:34 -06003255
Jens Axboe230d50d2021-04-01 20:41:15 -06003256 if (ret == -EAGAIN || (req->flags & REQ_F_REISSUE)) {
Pavel Begunkov6ad7f232021-04-08 01:54:39 +01003257 req->flags &= ~REQ_F_REISSUE;
Jens Axboeeefdf302020-08-27 16:40:19 -06003258 /* IOPOLL retry should happen for io-wq threads */
3259 if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboef91daf52020-08-15 15:58:42 -07003260 goto done;
Pavel Begunkov75c668c2021-02-04 13:52:05 +00003261 /* no retry on NONBLOCK nor RWF_NOWAIT */
3262 if (req->flags & REQ_F_NOWAIT)
Jens Axboe355afae2020-09-02 09:30:31 -06003263 goto done;
Jens Axboe84216312020-08-24 11:45:26 -06003264 /* some cases will consume bytes even on error returns */
Pavel Begunkov632546c2020-11-07 13:16:26 +00003265 iov_iter_revert(iter, io_size - iov_iter_count(iter));
Jens Axboef38c7e32020-09-25 15:23:43 -06003266 ret = 0;
Jens Axboe230d50d2021-04-01 20:41:15 -06003267 } else if (ret == -EIOCBQUEUED) {
3268 goto out_free;
Pavel Begunkov7335e3b2021-02-04 13:52:02 +00003269 } else if (ret <= 0 || ret == io_size || !force_nonblock ||
Pavel Begunkov75c668c2021-02-04 13:52:05 +00003270 (req->flags & REQ_F_NOWAIT) || !(req->flags & REQ_F_ISREG)) {
Pavel Begunkov7335e3b2021-02-04 13:52:02 +00003271 /* read all, failed, already did sync or don't want to retry */
Jens Axboe00d23d52020-08-25 12:59:22 -06003272 goto done;
Jens Axboe227c0c92020-08-13 11:51:40 -06003273 }
3274
Jens Axboe227c0c92020-08-13 11:51:40 -06003275 ret2 = io_setup_async_rw(req, iovec, inline_vecs, iter, true);
Pavel Begunkov6bf985d2021-02-04 13:52:01 +00003276 if (ret2)
3277 return ret2;
3278
Pavel Begunkovfe1cdd52021-02-17 21:02:36 +00003279 iovec = NULL;
Jens Axboee8c2bc12020-08-15 18:44:09 -07003280 rw = req->async_data;
Jens Axboe227c0c92020-08-13 11:51:40 -06003281 /* now use our persistent iterator, if we aren't already */
Jens Axboee8c2bc12020-08-15 18:44:09 -07003282 iter = &rw->iter;
Jens Axboe227c0c92020-08-13 11:51:40 -06003283
Pavel Begunkovb23df912021-02-04 13:52:04 +00003284 do {
3285 io_size -= ret;
3286 rw->bytes_done += ret;
3287 /* if we can retry, do so with the callbacks armed */
3288 if (!io_rw_should_retry(req)) {
3289 kiocb->ki_flags &= ~IOCB_WAITQ;
3290 return -EAGAIN;
3291 }
3292
3293 /*
3294 * Now retry read with the IOCB_WAITQ parts set in the iocb. If
3295 * we get -EIOCBQUEUED, then we'll get a notification when the
3296 * desired page gets unlocked. We can also get a partial read
3297 * here, and if we do, then just retry at the new offset.
3298 */
3299 ret = io_iter_do_read(req, iter);
3300 if (ret == -EIOCBQUEUED)
3301 return 0;
Jens Axboe227c0c92020-08-13 11:51:40 -06003302 /* we got some bytes, but not all. retry. */
Jens Axboeb5b0ecb2021-03-04 21:02:58 -07003303 kiocb->ki_flags &= ~IOCB_WAITQ;
Pavel Begunkovb23df912021-02-04 13:52:04 +00003304 } while (ret > 0 && ret < io_size);
Jens Axboe227c0c92020-08-13 11:51:40 -06003305done:
Pavel Begunkov889fca72021-02-10 00:03:09 +00003306 kiocb_done(kiocb, ret, issue_flags);
Pavel Begunkovfe1cdd52021-02-17 21:02:36 +00003307out_free:
3308 /* it's faster to check here then delegate to kfree */
3309 if (iovec)
3310 kfree(iovec);
Pavel Begunkov5ea5dd42021-02-04 13:52:03 +00003311 return 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003312}
3313
Pavel Begunkov73debe62020-09-30 22:57:54 +03003314static int io_write_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboef67676d2019-12-02 11:03:47 -07003315{
Jens Axboe3529d8c2019-12-19 18:24:38 -07003316 if (unlikely(!(req->file->f_mode & FMODE_WRITE)))
3317 return -EBADF;
Pavel Begunkov93642ef2021-02-18 18:29:44 +00003318 return io_prep_rw(req, sqe);
Jens Axboef67676d2019-12-02 11:03:47 -07003319}
3320
Pavel Begunkov889fca72021-02-10 00:03:09 +00003321static int io_write(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe2b188cc2019-01-07 10:46:33 -07003322{
3323 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
Jens Axboe9adbd452019-12-20 08:45:55 -07003324 struct kiocb *kiocb = &req->rw.kiocb;
Jens Axboeff6165b2020-08-13 09:47:43 -06003325 struct iov_iter __iter, *iter = &__iter;
Jens Axboee8c2bc12020-08-15 18:44:09 -07003326 struct io_async_rw *rw = req->async_data;
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003327 ssize_t ret, ret2, io_size;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003328 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003329
Pavel Begunkov2846c482020-11-07 13:16:27 +00003330 if (rw) {
Jens Axboee8c2bc12020-08-15 18:44:09 -07003331 iter = &rw->iter;
Pavel Begunkov2846c482020-11-07 13:16:27 +00003332 iovec = NULL;
3333 } else {
3334 ret = io_import_iovec(WRITE, req, &iovec, iter, !force_nonblock);
3335 if (ret < 0)
3336 return ret;
3337 }
Pavel Begunkov632546c2020-11-07 13:16:26 +00003338 io_size = iov_iter_count(iter);
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003339 req->result = io_size;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003340
Jens Axboefd6c2e42019-12-18 12:19:41 -07003341 /* Ensure we clear previously set non-block flag */
3342 if (!force_nonblock)
Pavel Begunkova88fc402020-09-30 22:57:53 +03003343 kiocb->ki_flags &= ~IOCB_NOWAIT;
3344 else
3345 kiocb->ki_flags |= IOCB_NOWAIT;
Jens Axboefd6c2e42019-12-18 12:19:41 -07003346
Pavel Begunkov24c74672020-06-21 13:09:51 +03003347 /* If the file doesn't support async, just async punt */
Jens Axboe7b29f922021-03-12 08:30:14 -07003348 if (force_nonblock && !io_file_supports_async(req, WRITE))
Jens Axboef67676d2019-12-02 11:03:47 -07003349 goto copy_iov;
Jens Axboef67676d2019-12-02 11:03:47 -07003350
Jens Axboe10d59342019-12-09 20:16:22 -07003351 /* file path doesn't support NOWAIT for non-direct_IO */
3352 if (force_nonblock && !(kiocb->ki_flags & IOCB_DIRECT) &&
3353 (req->flags & REQ_F_ISREG))
Jens Axboef67676d2019-12-02 11:03:47 -07003354 goto copy_iov;
Jens Axboe9e645e112019-05-10 16:07:28 -06003355
Pavel Begunkov632546c2020-11-07 13:16:26 +00003356 ret = rw_verify_area(WRITE, req->file, io_kiocb_ppos(kiocb), io_size);
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003357 if (unlikely(ret))
3358 goto out_free;
Roman Penyaev9bf79332019-03-25 20:09:24 +01003359
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003360 /*
3361 * Open-code file_start_write here to grab freeze protection,
3362 * which will be released by another thread in
3363 * io_complete_rw(). Fool lockdep by telling it the lock got
3364 * released so that it doesn't complain about the held lock when
3365 * we return to userspace.
3366 */
3367 if (req->flags & REQ_F_ISREG) {
Darrick J. Wong8a3c84b2020-11-10 16:50:21 -08003368 sb_start_write(file_inode(req->file)->i_sb);
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003369 __sb_writers_release(file_inode(req->file)->i_sb,
3370 SB_FREEZE_WRITE);
3371 }
3372 kiocb->ki_flags |= IOCB_WRITE;
Roman Penyaev9bf79332019-03-25 20:09:24 +01003373
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003374 if (req->file->f_op->write_iter)
Jens Axboeff6165b2020-08-13 09:47:43 -06003375 ret2 = call_write_iter(req->file, kiocb, iter);
Guoyu Huang2dd21112020-08-05 03:53:50 -07003376 else if (req->file->f_op->write)
Jens Axboe4017eb92020-10-22 14:14:12 -06003377 ret2 = loop_rw_iter(WRITE, req, iter);
Guoyu Huang2dd21112020-08-05 03:53:50 -07003378 else
3379 ret2 = -EINVAL;
Jens Axboe4ed734b2020-03-20 11:23:41 -06003380
Pavel Begunkov6ad7f232021-04-08 01:54:39 +01003381 if (req->flags & REQ_F_REISSUE) {
3382 req->flags &= ~REQ_F_REISSUE;
Jens Axboe230d50d2021-04-01 20:41:15 -06003383 ret2 = -EAGAIN;
Pavel Begunkov6ad7f232021-04-08 01:54:39 +01003384 }
Jens Axboe230d50d2021-04-01 20:41:15 -06003385
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003386 /*
3387 * Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just
3388 * retry them without IOCB_NOWAIT.
3389 */
3390 if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT))
3391 ret2 = -EAGAIN;
Pavel Begunkov75c668c2021-02-04 13:52:05 +00003392 /* no retry on NONBLOCK nor RWF_NOWAIT */
3393 if (ret2 == -EAGAIN && (req->flags & REQ_F_NOWAIT))
Jens Axboe355afae2020-09-02 09:30:31 -06003394 goto done;
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003395 if (!force_nonblock || ret2 != -EAGAIN) {
Jens Axboeeefdf302020-08-27 16:40:19 -06003396 /* IOPOLL retry should happen for io-wq threads */
3397 if ((req->ctx->flags & IORING_SETUP_IOPOLL) && ret2 == -EAGAIN)
3398 goto copy_iov;
Jens Axboe355afae2020-09-02 09:30:31 -06003399done:
Pavel Begunkov889fca72021-02-10 00:03:09 +00003400 kiocb_done(kiocb, ret2, issue_flags);
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003401 } else {
Jens Axboef67676d2019-12-02 11:03:47 -07003402copy_iov:
Jens Axboe84216312020-08-24 11:45:26 -06003403 /* some cases will consume bytes even on error returns */
Pavel Begunkov632546c2020-11-07 13:16:26 +00003404 iov_iter_revert(iter, io_size - iov_iter_count(iter));
Jens Axboe227c0c92020-08-13 11:51:40 -06003405 ret = io_setup_async_rw(req, iovec, inline_vecs, iter, false);
Pavel Begunkov6bf985d2021-02-04 13:52:01 +00003406 return ret ?: -EAGAIN;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003407 }
Jens Axboe31b51512019-01-18 22:56:34 -07003408out_free:
Pavel Begunkovf261c162020-08-20 11:34:10 +03003409 /* it's reportedly faster than delegating the null check to kfree() */
Pavel Begunkov252917c2020-07-13 22:59:20 +03003410 if (iovec)
Xiaoguang Wang6f2cc162020-06-18 15:01:56 +08003411 kfree(iovec);
Jens Axboe2b188cc2019-01-07 10:46:33 -07003412 return ret;
3413}
3414
Jens Axboe80a261f2020-09-28 14:23:58 -06003415static int io_renameat_prep(struct io_kiocb *req,
3416 const struct io_uring_sqe *sqe)
3417{
3418 struct io_rename *ren = &req->rename;
3419 const char __user *oldf, *newf;
3420
3421 if (unlikely(req->flags & REQ_F_FIXED_FILE))
3422 return -EBADF;
3423
3424 ren->old_dfd = READ_ONCE(sqe->fd);
3425 oldf = u64_to_user_ptr(READ_ONCE(sqe->addr));
3426 newf = u64_to_user_ptr(READ_ONCE(sqe->addr2));
3427 ren->new_dfd = READ_ONCE(sqe->len);
3428 ren->flags = READ_ONCE(sqe->rename_flags);
3429
3430 ren->oldpath = getname(oldf);
3431 if (IS_ERR(ren->oldpath))
3432 return PTR_ERR(ren->oldpath);
3433
3434 ren->newpath = getname(newf);
3435 if (IS_ERR(ren->newpath)) {
3436 putname(ren->oldpath);
3437 return PTR_ERR(ren->newpath);
3438 }
3439
3440 req->flags |= REQ_F_NEED_CLEANUP;
3441 return 0;
3442}
3443
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003444static int io_renameat(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe80a261f2020-09-28 14:23:58 -06003445{
3446 struct io_rename *ren = &req->rename;
3447 int ret;
3448
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003449 if (issue_flags & IO_URING_F_NONBLOCK)
Jens Axboe80a261f2020-09-28 14:23:58 -06003450 return -EAGAIN;
3451
3452 ret = do_renameat2(ren->old_dfd, ren->oldpath, ren->new_dfd,
3453 ren->newpath, ren->flags);
3454
3455 req->flags &= ~REQ_F_NEED_CLEANUP;
3456 if (ret < 0)
3457 req_set_fail_links(req);
3458 io_req_complete(req, ret);
3459 return 0;
3460}
3461
Jens Axboe14a11432020-09-28 14:27:37 -06003462static int io_unlinkat_prep(struct io_kiocb *req,
3463 const struct io_uring_sqe *sqe)
3464{
3465 struct io_unlink *un = &req->unlink;
3466 const char __user *fname;
3467
3468 if (unlikely(req->flags & REQ_F_FIXED_FILE))
3469 return -EBADF;
3470
3471 un->dfd = READ_ONCE(sqe->fd);
3472
3473 un->flags = READ_ONCE(sqe->unlink_flags);
3474 if (un->flags & ~AT_REMOVEDIR)
3475 return -EINVAL;
3476
3477 fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
3478 un->filename = getname(fname);
3479 if (IS_ERR(un->filename))
3480 return PTR_ERR(un->filename);
3481
3482 req->flags |= REQ_F_NEED_CLEANUP;
3483 return 0;
3484}
3485
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003486static int io_unlinkat(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe14a11432020-09-28 14:27:37 -06003487{
3488 struct io_unlink *un = &req->unlink;
3489 int ret;
3490
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003491 if (issue_flags & IO_URING_F_NONBLOCK)
Jens Axboe14a11432020-09-28 14:27:37 -06003492 return -EAGAIN;
3493
3494 if (un->flags & AT_REMOVEDIR)
3495 ret = do_rmdir(un->dfd, un->filename);
3496 else
3497 ret = do_unlinkat(un->dfd, un->filename);
3498
3499 req->flags &= ~REQ_F_NEED_CLEANUP;
3500 if (ret < 0)
3501 req_set_fail_links(req);
3502 io_req_complete(req, ret);
3503 return 0;
3504}
3505
Jens Axboe36f4fa62020-09-05 11:14:22 -06003506static int io_shutdown_prep(struct io_kiocb *req,
3507 const struct io_uring_sqe *sqe)
3508{
3509#if defined(CONFIG_NET)
3510 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3511 return -EINVAL;
3512 if (sqe->ioprio || sqe->off || sqe->addr || sqe->rw_flags ||
3513 sqe->buf_index)
3514 return -EINVAL;
3515
3516 req->shutdown.how = READ_ONCE(sqe->len);
3517 return 0;
3518#else
3519 return -EOPNOTSUPP;
3520#endif
3521}
3522
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003523static int io_shutdown(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe36f4fa62020-09-05 11:14:22 -06003524{
3525#if defined(CONFIG_NET)
3526 struct socket *sock;
3527 int ret;
3528
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003529 if (issue_flags & IO_URING_F_NONBLOCK)
Jens Axboe36f4fa62020-09-05 11:14:22 -06003530 return -EAGAIN;
3531
Linus Torvalds48aba792020-12-16 12:44:05 -08003532 sock = sock_from_file(req->file);
Jens Axboe36f4fa62020-09-05 11:14:22 -06003533 if (unlikely(!sock))
Linus Torvalds48aba792020-12-16 12:44:05 -08003534 return -ENOTSOCK;
Jens Axboe36f4fa62020-09-05 11:14:22 -06003535
3536 ret = __sys_shutdown_sock(sock, req->shutdown.how);
Jens Axboea1464682020-12-14 20:57:27 -07003537 if (ret < 0)
3538 req_set_fail_links(req);
Jens Axboe36f4fa62020-09-05 11:14:22 -06003539 io_req_complete(req, ret);
3540 return 0;
3541#else
3542 return -EOPNOTSUPP;
3543#endif
3544}
3545
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03003546static int __io_splice_prep(struct io_kiocb *req,
3547 const struct io_uring_sqe *sqe)
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003548{
3549 struct io_splice* sp = &req->splice;
3550 unsigned int valid_flags = SPLICE_F_FD_IN_FIXED | SPLICE_F_ALL;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003551
Pavel Begunkov3232dd02020-06-03 18:03:22 +03003552 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3553 return -EINVAL;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003554
3555 sp->file_in = NULL;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003556 sp->len = READ_ONCE(sqe->len);
3557 sp->flags = READ_ONCE(sqe->splice_flags);
3558
3559 if (unlikely(sp->flags & ~valid_flags))
3560 return -EINVAL;
3561
Pavel Begunkov8371adf2020-10-10 18:34:08 +01003562 sp->file_in = io_file_get(NULL, req, READ_ONCE(sqe->splice_fd_in),
3563 (sp->flags & SPLICE_F_FD_IN_FIXED));
3564 if (!sp->file_in)
3565 return -EBADF;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003566 req->flags |= REQ_F_NEED_CLEANUP;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003567 return 0;
3568}
3569
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03003570static int io_tee_prep(struct io_kiocb *req,
3571 const struct io_uring_sqe *sqe)
3572{
3573 if (READ_ONCE(sqe->splice_off_in) || READ_ONCE(sqe->off))
3574 return -EINVAL;
3575 return __io_splice_prep(req, sqe);
3576}
3577
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003578static int io_tee(struct io_kiocb *req, unsigned int issue_flags)
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03003579{
3580 struct io_splice *sp = &req->splice;
3581 struct file *in = sp->file_in;
3582 struct file *out = sp->file_out;
3583 unsigned int flags = sp->flags & ~SPLICE_F_FD_IN_FIXED;
3584 long ret = 0;
3585
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003586 if (issue_flags & IO_URING_F_NONBLOCK)
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03003587 return -EAGAIN;
3588 if (sp->len)
3589 ret = do_tee(in, out, sp->len, flags);
3590
Pavel Begunkove1d767f2021-03-19 17:22:43 +00003591 if (!(sp->flags & SPLICE_F_FD_IN_FIXED))
3592 io_put_file(in);
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03003593 req->flags &= ~REQ_F_NEED_CLEANUP;
3594
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03003595 if (ret != sp->len)
3596 req_set_fail_links(req);
Jens Axboee1e16092020-06-22 09:17:17 -06003597 io_req_complete(req, ret);
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03003598 return 0;
3599}
3600
3601static int io_splice_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3602{
3603 struct io_splice* sp = &req->splice;
3604
3605 sp->off_in = READ_ONCE(sqe->splice_off_in);
3606 sp->off_out = READ_ONCE(sqe->off);
3607 return __io_splice_prep(req, sqe);
3608}
3609
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003610static int io_splice(struct io_kiocb *req, unsigned int issue_flags)
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003611{
3612 struct io_splice *sp = &req->splice;
3613 struct file *in = sp->file_in;
3614 struct file *out = sp->file_out;
3615 unsigned int flags = sp->flags & ~SPLICE_F_FD_IN_FIXED;
3616 loff_t *poff_in, *poff_out;
Pavel Begunkovc9687422020-05-04 23:00:54 +03003617 long ret = 0;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003618
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003619 if (issue_flags & IO_URING_F_NONBLOCK)
Pavel Begunkov2fb3e822020-05-01 17:09:38 +03003620 return -EAGAIN;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003621
3622 poff_in = (sp->off_in == -1) ? NULL : &sp->off_in;
3623 poff_out = (sp->off_out == -1) ? NULL : &sp->off_out;
Pavel Begunkovc9687422020-05-04 23:00:54 +03003624
Jens Axboe948a7742020-05-17 14:21:38 -06003625 if (sp->len)
Pavel Begunkovc9687422020-05-04 23:00:54 +03003626 ret = do_splice(in, poff_in, out, poff_out, sp->len, flags);
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003627
Pavel Begunkove1d767f2021-03-19 17:22:43 +00003628 if (!(sp->flags & SPLICE_F_FD_IN_FIXED))
3629 io_put_file(in);
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003630 req->flags &= ~REQ_F_NEED_CLEANUP;
3631
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003632 if (ret != sp->len)
3633 req_set_fail_links(req);
Jens Axboee1e16092020-06-22 09:17:17 -06003634 io_req_complete(req, ret);
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003635 return 0;
3636}
3637
Jens Axboe2b188cc2019-01-07 10:46:33 -07003638/*
3639 * IORING_OP_NOP just posts a completion event, nothing else.
3640 */
Pavel Begunkov889fca72021-02-10 00:03:09 +00003641static int io_nop(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe2b188cc2019-01-07 10:46:33 -07003642{
3643 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003644
Jens Axboedef596e2019-01-09 08:59:42 -07003645 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
3646 return -EINVAL;
3647
Pavel Begunkov889fca72021-02-10 00:03:09 +00003648 __io_req_complete(req, issue_flags, 0, 0);
Jens Axboe2b188cc2019-01-07 10:46:33 -07003649 return 0;
3650}
3651
Pavel Begunkov1155c762021-02-18 18:29:38 +00003652static int io_fsync_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Christoph Hellwigc992fe22019-01-11 09:43:02 -07003653{
Jens Axboe6b063142019-01-10 22:13:58 -07003654 struct io_ring_ctx *ctx = req->ctx;
Christoph Hellwigc992fe22019-01-11 09:43:02 -07003655
Jens Axboe09bb8392019-03-13 12:39:28 -06003656 if (!req->file)
3657 return -EBADF;
Christoph Hellwigc992fe22019-01-11 09:43:02 -07003658
Jens Axboe6b063142019-01-10 22:13:58 -07003659 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboedef596e2019-01-09 08:59:42 -07003660 return -EINVAL;
Jens Axboeedafcce2019-01-09 09:16:05 -07003661 if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index))
Christoph Hellwigc992fe22019-01-11 09:43:02 -07003662 return -EINVAL;
3663
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07003664 req->sync.flags = READ_ONCE(sqe->fsync_flags);
3665 if (unlikely(req->sync.flags & ~IORING_FSYNC_DATASYNC))
3666 return -EINVAL;
3667
3668 req->sync.off = READ_ONCE(sqe->off);
3669 req->sync.len = READ_ONCE(sqe->len);
Christoph Hellwigc992fe22019-01-11 09:43:02 -07003670 return 0;
3671}
3672
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003673static int io_fsync(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe78912932020-01-14 22:09:06 -07003674{
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07003675 loff_t end = req->sync.off + req->sync.len;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07003676 int ret;
3677
Pavel Begunkovac45abc2020-06-08 21:08:18 +03003678 /* fsync always requires a blocking context */
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003679 if (issue_flags & IO_URING_F_NONBLOCK)
Pavel Begunkovac45abc2020-06-08 21:08:18 +03003680 return -EAGAIN;
3681
Jens Axboe9adbd452019-12-20 08:45:55 -07003682 ret = vfs_fsync_range(req->file, req->sync.off,
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07003683 end > 0 ? end : LLONG_MAX,
3684 req->sync.flags & IORING_FSYNC_DATASYNC);
3685 if (ret < 0)
3686 req_set_fail_links(req);
Jens Axboee1e16092020-06-22 09:17:17 -06003687 io_req_complete(req, ret);
Christoph Hellwigc992fe22019-01-11 09:43:02 -07003688 return 0;
3689}
3690
Jens Axboed63d1b52019-12-10 10:38:56 -07003691static int io_fallocate_prep(struct io_kiocb *req,
3692 const struct io_uring_sqe *sqe)
3693{
3694 if (sqe->ioprio || sqe->buf_index || sqe->rw_flags)
3695 return -EINVAL;
Pavel Begunkov3232dd02020-06-03 18:03:22 +03003696 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3697 return -EINVAL;
Jens Axboed63d1b52019-12-10 10:38:56 -07003698
3699 req->sync.off = READ_ONCE(sqe->off);
3700 req->sync.len = READ_ONCE(sqe->addr);
3701 req->sync.mode = READ_ONCE(sqe->len);
3702 return 0;
3703}
3704
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003705static int io_fallocate(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboed63d1b52019-12-10 10:38:56 -07003706{
Pavel Begunkovac45abc2020-06-08 21:08:18 +03003707 int ret;
Jens Axboed63d1b52019-12-10 10:38:56 -07003708
Pavel Begunkovac45abc2020-06-08 21:08:18 +03003709 /* fallocate always requiring blocking context */
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003710 if (issue_flags & IO_URING_F_NONBLOCK)
Pavel Begunkovac45abc2020-06-08 21:08:18 +03003711 return -EAGAIN;
Pavel Begunkovac45abc2020-06-08 21:08:18 +03003712 ret = vfs_fallocate(req->file, req->sync.mode, req->sync.off,
3713 req->sync.len);
Pavel Begunkovac45abc2020-06-08 21:08:18 +03003714 if (ret < 0)
3715 req_set_fail_links(req);
Jens Axboee1e16092020-06-22 09:17:17 -06003716 io_req_complete(req, ret);
Jens Axboed63d1b52019-12-10 10:38:56 -07003717 return 0;
3718}
3719
Pavel Begunkovec65fea2020-06-03 18:03:24 +03003720static int __io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboe15b71ab2019-12-11 11:20:36 -07003721{
Jens Axboef8748882020-01-08 17:47:02 -07003722 const char __user *fname;
Jens Axboe15b71ab2019-12-11 11:20:36 -07003723 int ret;
3724
Pavel Begunkovec65fea2020-06-03 18:03:24 +03003725 if (unlikely(sqe->ioprio || sqe->buf_index))
Jens Axboe15b71ab2019-12-11 11:20:36 -07003726 return -EINVAL;
Pavel Begunkovec65fea2020-06-03 18:03:24 +03003727 if (unlikely(req->flags & REQ_F_FIXED_FILE))
Jens Axboecf3040c2020-02-06 21:31:40 -07003728 return -EBADF;
Jens Axboe15b71ab2019-12-11 11:20:36 -07003729
Pavel Begunkovec65fea2020-06-03 18:03:24 +03003730 /* open.how should be already initialised */
3731 if (!(req->open.how.flags & O_PATH) && force_o_largefile())
Jens Axboe08a1d26eb2020-04-08 09:20:54 -06003732 req->open.how.flags |= O_LARGEFILE;
Jens Axboe15b71ab2019-12-11 11:20:36 -07003733
Pavel Begunkov25e72d12020-06-03 18:03:23 +03003734 req->open.dfd = READ_ONCE(sqe->fd);
3735 fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
Jens Axboef8748882020-01-08 17:47:02 -07003736 req->open.filename = getname(fname);
Jens Axboe15b71ab2019-12-11 11:20:36 -07003737 if (IS_ERR(req->open.filename)) {
3738 ret = PTR_ERR(req->open.filename);
3739 req->open.filename = NULL;
3740 return ret;
3741 }
Jens Axboe4022e7a2020-03-19 19:23:18 -06003742 req->open.nofile = rlimit(RLIMIT_NOFILE);
Pavel Begunkov8fef80b2020-02-07 23:59:53 +03003743 req->flags |= REQ_F_NEED_CLEANUP;
Jens Axboe15b71ab2019-12-11 11:20:36 -07003744 return 0;
3745}
3746
Pavel Begunkovec65fea2020-06-03 18:03:24 +03003747static int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3748{
3749 u64 flags, mode;
3750
Jens Axboe14587a462020-09-05 11:36:08 -06003751 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboe4eb8dde2020-09-18 19:36:24 -06003752 return -EINVAL;
Pavel Begunkovec65fea2020-06-03 18:03:24 +03003753 mode = READ_ONCE(sqe->len);
3754 flags = READ_ONCE(sqe->open_flags);
3755 req->open.how = build_open_how(flags, mode);
3756 return __io_openat_prep(req, sqe);
3757}
3758
Jens Axboecebdb982020-01-08 17:59:24 -07003759static int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3760{
3761 struct open_how __user *how;
Jens Axboecebdb982020-01-08 17:59:24 -07003762 size_t len;
3763 int ret;
3764
Jens Axboe14587a462020-09-05 11:36:08 -06003765 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboe4eb8dde2020-09-18 19:36:24 -06003766 return -EINVAL;
Jens Axboecebdb982020-01-08 17:59:24 -07003767 how = u64_to_user_ptr(READ_ONCE(sqe->addr2));
3768 len = READ_ONCE(sqe->len);
Jens Axboecebdb982020-01-08 17:59:24 -07003769 if (len < OPEN_HOW_SIZE_VER0)
3770 return -EINVAL;
3771
3772 ret = copy_struct_from_user(&req->open.how, sizeof(req->open.how), how,
3773 len);
3774 if (ret)
3775 return ret;
3776
Pavel Begunkovec65fea2020-06-03 18:03:24 +03003777 return __io_openat_prep(req, sqe);
Jens Axboecebdb982020-01-08 17:59:24 -07003778}
3779
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003780static int io_openat2(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe15b71ab2019-12-11 11:20:36 -07003781{
3782 struct open_flags op;
Jens Axboe15b71ab2019-12-11 11:20:36 -07003783 struct file *file;
Jens Axboe3a81fd02020-12-10 12:25:36 -07003784 bool nonblock_set;
3785 bool resolve_nonblock;
Jens Axboe15b71ab2019-12-11 11:20:36 -07003786 int ret;
3787
Jens Axboecebdb982020-01-08 17:59:24 -07003788 ret = build_open_flags(&req->open.how, &op);
Jens Axboe15b71ab2019-12-11 11:20:36 -07003789 if (ret)
3790 goto err;
Jens Axboe3a81fd02020-12-10 12:25:36 -07003791 nonblock_set = op.open_flag & O_NONBLOCK;
3792 resolve_nonblock = req->open.how.resolve & RESOLVE_CACHED;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003793 if (issue_flags & IO_URING_F_NONBLOCK) {
Jens Axboe3a81fd02020-12-10 12:25:36 -07003794 /*
3795 * Don't bother trying for O_TRUNC, O_CREAT, or O_TMPFILE open,
3796 * it'll always -EAGAIN
3797 */
3798 if (req->open.how.flags & (O_TRUNC | O_CREAT | O_TMPFILE))
3799 return -EAGAIN;
3800 op.lookup_flags |= LOOKUP_CACHED;
3801 op.open_flag |= O_NONBLOCK;
3802 }
Jens Axboe15b71ab2019-12-11 11:20:36 -07003803
Jens Axboe4022e7a2020-03-19 19:23:18 -06003804 ret = __get_unused_fd_flags(req->open.how.flags, req->open.nofile);
Jens Axboe15b71ab2019-12-11 11:20:36 -07003805 if (ret < 0)
3806 goto err;
3807
3808 file = do_filp_open(req->open.dfd, req->open.filename, &op);
Jens Axboe3a81fd02020-12-10 12:25:36 -07003809 /* only retry if RESOLVE_CACHED wasn't already set by application */
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003810 if ((!resolve_nonblock && (issue_flags & IO_URING_F_NONBLOCK)) &&
3811 file == ERR_PTR(-EAGAIN)) {
Jens Axboe3a81fd02020-12-10 12:25:36 -07003812 /*
3813 * We could hang on to this 'fd', but seems like marginal
3814 * gain for something that is now known to be a slower path.
3815 * So just put it, and we'll get a new one when we retry.
3816 */
3817 put_unused_fd(ret);
3818 return -EAGAIN;
3819 }
3820
Jens Axboe15b71ab2019-12-11 11:20:36 -07003821 if (IS_ERR(file)) {
3822 put_unused_fd(ret);
3823 ret = PTR_ERR(file);
3824 } else {
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003825 if ((issue_flags & IO_URING_F_NONBLOCK) && !nonblock_set)
Jens Axboe3a81fd02020-12-10 12:25:36 -07003826 file->f_flags &= ~O_NONBLOCK;
Jens Axboe15b71ab2019-12-11 11:20:36 -07003827 fsnotify_open(file);
3828 fd_install(ret, file);
3829 }
3830err:
3831 putname(req->open.filename);
Pavel Begunkov8fef80b2020-02-07 23:59:53 +03003832 req->flags &= ~REQ_F_NEED_CLEANUP;
Jens Axboe15b71ab2019-12-11 11:20:36 -07003833 if (ret < 0)
3834 req_set_fail_links(req);
Pavel Begunkov0bdf3392021-04-11 01:46:29 +01003835 __io_req_complete(req, issue_flags, ret, 0);
Jens Axboe15b71ab2019-12-11 11:20:36 -07003836 return 0;
3837}
3838
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003839static int io_openat(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboecebdb982020-01-08 17:59:24 -07003840{
Pavel Begunkove45cff52021-02-28 22:35:14 +00003841 return io_openat2(req, issue_flags);
Jens Axboecebdb982020-01-08 17:59:24 -07003842}
3843
Jens Axboe067524e2020-03-02 16:32:28 -07003844static int io_remove_buffers_prep(struct io_kiocb *req,
3845 const struct io_uring_sqe *sqe)
3846{
3847 struct io_provide_buf *p = &req->pbuf;
3848 u64 tmp;
3849
3850 if (sqe->ioprio || sqe->rw_flags || sqe->addr || sqe->len || sqe->off)
3851 return -EINVAL;
3852
3853 tmp = READ_ONCE(sqe->fd);
3854 if (!tmp || tmp > USHRT_MAX)
3855 return -EINVAL;
3856
3857 memset(p, 0, sizeof(*p));
3858 p->nbufs = tmp;
3859 p->bgid = READ_ONCE(sqe->buf_group);
3860 return 0;
3861}
3862
3863static int __io_remove_buffers(struct io_ring_ctx *ctx, struct io_buffer *buf,
3864 int bgid, unsigned nbufs)
3865{
3866 unsigned i = 0;
3867
3868 /* shouldn't happen */
3869 if (!nbufs)
3870 return 0;
3871
3872 /* the head kbuf is the list itself */
3873 while (!list_empty(&buf->list)) {
3874 struct io_buffer *nxt;
3875
3876 nxt = list_first_entry(&buf->list, struct io_buffer, list);
3877 list_del(&nxt->list);
3878 kfree(nxt);
3879 if (++i == nbufs)
3880 return i;
3881 }
3882 i++;
3883 kfree(buf);
Jens Axboe9e15c3a2021-03-13 12:29:43 -07003884 xa_erase(&ctx->io_buffers, bgid);
Jens Axboe067524e2020-03-02 16:32:28 -07003885
3886 return i;
3887}
3888
Pavel Begunkov889fca72021-02-10 00:03:09 +00003889static int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe067524e2020-03-02 16:32:28 -07003890{
3891 struct io_provide_buf *p = &req->pbuf;
3892 struct io_ring_ctx *ctx = req->ctx;
3893 struct io_buffer *head;
3894 int ret = 0;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003895 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Jens Axboe067524e2020-03-02 16:32:28 -07003896
3897 io_ring_submit_lock(ctx, !force_nonblock);
3898
3899 lockdep_assert_held(&ctx->uring_lock);
3900
3901 ret = -ENOENT;
Jens Axboe9e15c3a2021-03-13 12:29:43 -07003902 head = xa_load(&ctx->io_buffers, p->bgid);
Jens Axboe067524e2020-03-02 16:32:28 -07003903 if (head)
3904 ret = __io_remove_buffers(ctx, head, p->bgid, p->nbufs);
Jens Axboe067524e2020-03-02 16:32:28 -07003905 if (ret < 0)
3906 req_set_fail_links(req);
Pavel Begunkov31bff9a2020-12-06 22:22:43 +00003907
Pavel Begunkov9fb8cb42021-02-28 22:35:13 +00003908 /* complete before unlock, IOPOLL may need the lock */
3909 __io_req_complete(req, issue_flags, ret, 0);
3910 io_ring_submit_unlock(ctx, !force_nonblock);
Jens Axboe067524e2020-03-02 16:32:28 -07003911 return 0;
3912}
3913
Jens Axboeddf0322d2020-02-23 16:41:33 -07003914static int io_provide_buffers_prep(struct io_kiocb *req,
3915 const struct io_uring_sqe *sqe)
3916{
Pavel Begunkovd81269f2021-03-19 10:21:19 +00003917 unsigned long size;
Jens Axboeddf0322d2020-02-23 16:41:33 -07003918 struct io_provide_buf *p = &req->pbuf;
3919 u64 tmp;
3920
3921 if (sqe->ioprio || sqe->rw_flags)
3922 return -EINVAL;
3923
3924 tmp = READ_ONCE(sqe->fd);
3925 if (!tmp || tmp > USHRT_MAX)
3926 return -E2BIG;
3927 p->nbufs = tmp;
3928 p->addr = READ_ONCE(sqe->addr);
3929 p->len = READ_ONCE(sqe->len);
3930
Pavel Begunkovd81269f2021-03-19 10:21:19 +00003931 size = (unsigned long)p->len * p->nbufs;
3932 if (!access_ok(u64_to_user_ptr(p->addr), size))
Jens Axboeddf0322d2020-02-23 16:41:33 -07003933 return -EFAULT;
3934
3935 p->bgid = READ_ONCE(sqe->buf_group);
3936 tmp = READ_ONCE(sqe->off);
3937 if (tmp > USHRT_MAX)
3938 return -E2BIG;
3939 p->bid = tmp;
3940 return 0;
3941}
3942
3943static int io_add_buffers(struct io_provide_buf *pbuf, struct io_buffer **head)
3944{
3945 struct io_buffer *buf;
3946 u64 addr = pbuf->addr;
3947 int i, bid = pbuf->bid;
3948
3949 for (i = 0; i < pbuf->nbufs; i++) {
3950 buf = kmalloc(sizeof(*buf), GFP_KERNEL);
3951 if (!buf)
3952 break;
3953
3954 buf->addr = addr;
3955 buf->len = pbuf->len;
3956 buf->bid = bid;
3957 addr += pbuf->len;
3958 bid++;
3959 if (!*head) {
3960 INIT_LIST_HEAD(&buf->list);
3961 *head = buf;
3962 } else {
3963 list_add_tail(&buf->list, &(*head)->list);
3964 }
3965 }
3966
3967 return i ? i : -ENOMEM;
3968}
3969
Pavel Begunkov889fca72021-02-10 00:03:09 +00003970static int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboeddf0322d2020-02-23 16:41:33 -07003971{
3972 struct io_provide_buf *p = &req->pbuf;
3973 struct io_ring_ctx *ctx = req->ctx;
3974 struct io_buffer *head, *list;
3975 int ret = 0;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003976 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Jens Axboeddf0322d2020-02-23 16:41:33 -07003977
3978 io_ring_submit_lock(ctx, !force_nonblock);
3979
3980 lockdep_assert_held(&ctx->uring_lock);
3981
Jens Axboe9e15c3a2021-03-13 12:29:43 -07003982 list = head = xa_load(&ctx->io_buffers, p->bgid);
Jens Axboeddf0322d2020-02-23 16:41:33 -07003983
3984 ret = io_add_buffers(p, &head);
Jens Axboe9e15c3a2021-03-13 12:29:43 -07003985 if (ret >= 0 && !list) {
3986 ret = xa_insert(&ctx->io_buffers, p->bgid, head, GFP_KERNEL);
3987 if (ret < 0)
Jens Axboe067524e2020-03-02 16:32:28 -07003988 __io_remove_buffers(ctx, head, p->bgid, -1U);
Jens Axboeddf0322d2020-02-23 16:41:33 -07003989 }
Jens Axboeddf0322d2020-02-23 16:41:33 -07003990 if (ret < 0)
3991 req_set_fail_links(req);
Pavel Begunkov9fb8cb42021-02-28 22:35:13 +00003992 /* complete before unlock, IOPOLL may need the lock */
3993 __io_req_complete(req, issue_flags, ret, 0);
3994 io_ring_submit_unlock(ctx, !force_nonblock);
Jens Axboeddf0322d2020-02-23 16:41:33 -07003995 return 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003996}
3997
Jens Axboe3e4827b2020-01-08 15:18:09 -07003998static int io_epoll_ctl_prep(struct io_kiocb *req,
3999 const struct io_uring_sqe *sqe)
4000{
4001#if defined(CONFIG_EPOLL)
4002 if (sqe->ioprio || sqe->buf_index)
4003 return -EINVAL;
Jens Axboe6ca56f82020-09-18 16:51:19 -06004004 if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL)))
Pavel Begunkov3232dd02020-06-03 18:03:22 +03004005 return -EINVAL;
Jens Axboe3e4827b2020-01-08 15:18:09 -07004006
4007 req->epoll.epfd = READ_ONCE(sqe->fd);
4008 req->epoll.op = READ_ONCE(sqe->len);
4009 req->epoll.fd = READ_ONCE(sqe->off);
4010
4011 if (ep_op_has_event(req->epoll.op)) {
4012 struct epoll_event __user *ev;
4013
4014 ev = u64_to_user_ptr(READ_ONCE(sqe->addr));
4015 if (copy_from_user(&req->epoll.event, ev, sizeof(*ev)))
4016 return -EFAULT;
4017 }
4018
4019 return 0;
4020#else
4021 return -EOPNOTSUPP;
4022#endif
4023}
4024
Pavel Begunkov889fca72021-02-10 00:03:09 +00004025static int io_epoll_ctl(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe3e4827b2020-01-08 15:18:09 -07004026{
4027#if defined(CONFIG_EPOLL)
4028 struct io_epoll *ie = &req->epoll;
4029 int ret;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004030 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Jens Axboe3e4827b2020-01-08 15:18:09 -07004031
4032 ret = do_epoll_ctl(ie->epfd, ie->op, ie->fd, &ie->event, force_nonblock);
4033 if (force_nonblock && ret == -EAGAIN)
4034 return -EAGAIN;
4035
4036 if (ret < 0)
4037 req_set_fail_links(req);
Pavel Begunkov889fca72021-02-10 00:03:09 +00004038 __io_req_complete(req, issue_flags, ret, 0);
Jens Axboe3e4827b2020-01-08 15:18:09 -07004039 return 0;
4040#else
4041 return -EOPNOTSUPP;
4042#endif
4043}
4044
Jens Axboec1ca7572019-12-25 22:18:28 -07004045static int io_madvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4046{
4047#if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
4048 if (sqe->ioprio || sqe->buf_index || sqe->off)
4049 return -EINVAL;
Pavel Begunkov3232dd02020-06-03 18:03:22 +03004050 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4051 return -EINVAL;
Jens Axboec1ca7572019-12-25 22:18:28 -07004052
4053 req->madvise.addr = READ_ONCE(sqe->addr);
4054 req->madvise.len = READ_ONCE(sqe->len);
4055 req->madvise.advice = READ_ONCE(sqe->fadvise_advice);
4056 return 0;
4057#else
4058 return -EOPNOTSUPP;
4059#endif
4060}
4061
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004062static int io_madvise(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboec1ca7572019-12-25 22:18:28 -07004063{
4064#if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
4065 struct io_madvise *ma = &req->madvise;
4066 int ret;
4067
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004068 if (issue_flags & IO_URING_F_NONBLOCK)
Jens Axboec1ca7572019-12-25 22:18:28 -07004069 return -EAGAIN;
4070
Minchan Kim0726b012020-10-17 16:14:50 -07004071 ret = do_madvise(current->mm, ma->addr, ma->len, ma->advice);
Jens Axboec1ca7572019-12-25 22:18:28 -07004072 if (ret < 0)
4073 req_set_fail_links(req);
Jens Axboee1e16092020-06-22 09:17:17 -06004074 io_req_complete(req, ret);
Jens Axboec1ca7572019-12-25 22:18:28 -07004075 return 0;
4076#else
4077 return -EOPNOTSUPP;
4078#endif
4079}
4080
Jens Axboe4840e412019-12-25 22:03:45 -07004081static int io_fadvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4082{
4083 if (sqe->ioprio || sqe->buf_index || sqe->addr)
4084 return -EINVAL;
Pavel Begunkov3232dd02020-06-03 18:03:22 +03004085 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4086 return -EINVAL;
Jens Axboe4840e412019-12-25 22:03:45 -07004087
4088 req->fadvise.offset = READ_ONCE(sqe->off);
4089 req->fadvise.len = READ_ONCE(sqe->len);
4090 req->fadvise.advice = READ_ONCE(sqe->fadvise_advice);
4091 return 0;
4092}
4093
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004094static int io_fadvise(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe4840e412019-12-25 22:03:45 -07004095{
4096 struct io_fadvise *fa = &req->fadvise;
4097 int ret;
4098
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004099 if (issue_flags & IO_URING_F_NONBLOCK) {
Jens Axboe3e694262020-02-01 09:22:49 -07004100 switch (fa->advice) {
4101 case POSIX_FADV_NORMAL:
4102 case POSIX_FADV_RANDOM:
4103 case POSIX_FADV_SEQUENTIAL:
4104 break;
4105 default:
4106 return -EAGAIN;
4107 }
4108 }
Jens Axboe4840e412019-12-25 22:03:45 -07004109
4110 ret = vfs_fadvise(req->file, fa->offset, fa->len, fa->advice);
4111 if (ret < 0)
4112 req_set_fail_links(req);
Pavel Begunkov0bdf3392021-04-11 01:46:29 +01004113 __io_req_complete(req, issue_flags, ret, 0);
Jens Axboe4840e412019-12-25 22:03:45 -07004114 return 0;
4115}
4116
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004117static int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4118{
Jens Axboe6ca56f82020-09-18 16:51:19 -06004119 if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL)))
Pavel Begunkov3232dd02020-06-03 18:03:22 +03004120 return -EINVAL;
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004121 if (sqe->ioprio || sqe->buf_index)
4122 return -EINVAL;
Pavel Begunkov9c280f92020-04-08 08:58:46 +03004123 if (req->flags & REQ_F_FIXED_FILE)
Jens Axboecf3040c2020-02-06 21:31:40 -07004124 return -EBADF;
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004125
Bijan Mottahedeh1d9e1282020-05-22 21:31:16 -07004126 req->statx.dfd = READ_ONCE(sqe->fd);
4127 req->statx.mask = READ_ONCE(sqe->len);
Bijan Mottahedehe62753e2020-05-22 21:31:18 -07004128 req->statx.filename = u64_to_user_ptr(READ_ONCE(sqe->addr));
Bijan Mottahedeh1d9e1282020-05-22 21:31:16 -07004129 req->statx.buffer = u64_to_user_ptr(READ_ONCE(sqe->addr2));
4130 req->statx.flags = READ_ONCE(sqe->statx_flags);
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004131
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004132 return 0;
4133}
4134
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004135static int io_statx(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004136{
Bijan Mottahedeh1d9e1282020-05-22 21:31:16 -07004137 struct io_statx *ctx = &req->statx;
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004138 int ret;
4139
Pavel Begunkov59d70012021-03-22 01:58:30 +00004140 if (issue_flags & IO_URING_F_NONBLOCK)
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004141 return -EAGAIN;
4142
Bijan Mottahedehe62753e2020-05-22 21:31:18 -07004143 ret = do_statx(ctx->dfd, ctx->filename, ctx->flags, ctx->mask,
4144 ctx->buffer);
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004145
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004146 if (ret < 0)
4147 req_set_fail_links(req);
Jens Axboee1e16092020-06-22 09:17:17 -06004148 io_req_complete(req, ret);
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004149 return 0;
4150}
4151
Jens Axboeb5dba592019-12-11 14:02:38 -07004152static int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4153{
Jens Axboe14587a462020-09-05 11:36:08 -06004154 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
Pavel Begunkov3232dd02020-06-03 18:03:22 +03004155 return -EINVAL;
Jens Axboeb5dba592019-12-11 14:02:38 -07004156 if (sqe->ioprio || sqe->off || sqe->addr || sqe->len ||
4157 sqe->rw_flags || sqe->buf_index)
4158 return -EINVAL;
Pavel Begunkov9c280f92020-04-08 08:58:46 +03004159 if (req->flags & REQ_F_FIXED_FILE)
Jens Axboecf3040c2020-02-06 21:31:40 -07004160 return -EBADF;
Jens Axboeb5dba592019-12-11 14:02:38 -07004161
4162 req->close.fd = READ_ONCE(sqe->fd);
Jens Axboeb5dba592019-12-11 14:02:38 -07004163 return 0;
4164}
4165
Pavel Begunkov889fca72021-02-10 00:03:09 +00004166static int io_close(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboeb5dba592019-12-11 14:02:38 -07004167{
Jens Axboe9eac1902021-01-19 15:50:37 -07004168 struct files_struct *files = current->files;
Pavel Begunkov3af73b22020-06-08 21:08:17 +03004169 struct io_close *close = &req->close;
Jens Axboe9eac1902021-01-19 15:50:37 -07004170 struct fdtable *fdt;
Pavel Begunkova1fde922021-04-11 01:46:28 +01004171 struct file *file = NULL;
4172 int ret = -EBADF;
Jens Axboeb5dba592019-12-11 14:02:38 -07004173
Jens Axboe9eac1902021-01-19 15:50:37 -07004174 spin_lock(&files->file_lock);
4175 fdt = files_fdtable(files);
4176 if (close->fd >= fdt->max_fds) {
4177 spin_unlock(&files->file_lock);
4178 goto err;
4179 }
4180 file = fdt->fd[close->fd];
Pavel Begunkova1fde922021-04-11 01:46:28 +01004181 if (!file || file->f_op == &io_uring_fops) {
Jens Axboe9eac1902021-01-19 15:50:37 -07004182 spin_unlock(&files->file_lock);
4183 file = NULL;
4184 goto err;
Pavel Begunkov3af73b22020-06-08 21:08:17 +03004185 }
Jens Axboeb5dba592019-12-11 14:02:38 -07004186
4187 /* if the file has a flush method, be safe and punt to async */
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004188 if (file->f_op->flush && (issue_flags & IO_URING_F_NONBLOCK)) {
Jens Axboe9eac1902021-01-19 15:50:37 -07004189 spin_unlock(&files->file_lock);
Pavel Begunkov0bf0eef2020-05-26 20:34:06 +03004190 return -EAGAIN;
Pavel Begunkova2100672020-03-02 23:45:16 +03004191 }
Jens Axboeb5dba592019-12-11 14:02:38 -07004192
Jens Axboe9eac1902021-01-19 15:50:37 -07004193 ret = __close_fd_get_file(close->fd, &file);
4194 spin_unlock(&files->file_lock);
4195 if (ret < 0) {
4196 if (ret == -ENOENT)
4197 ret = -EBADF;
4198 goto err;
4199 }
4200
Pavel Begunkov3af73b22020-06-08 21:08:17 +03004201 /* No ->flush() or already async, safely close from here */
Jens Axboe9eac1902021-01-19 15:50:37 -07004202 ret = filp_close(file, current->files);
4203err:
Pavel Begunkov3af73b22020-06-08 21:08:17 +03004204 if (ret < 0)
4205 req_set_fail_links(req);
Jens Axboe9eac1902021-01-19 15:50:37 -07004206 if (file)
4207 fput(file);
Pavel Begunkov889fca72021-02-10 00:03:09 +00004208 __io_req_complete(req, issue_flags, ret, 0);
Jens Axboe1a417f42020-01-31 17:16:48 -07004209 return 0;
Jens Axboeb5dba592019-12-11 14:02:38 -07004210}
4211
Pavel Begunkov1155c762021-02-18 18:29:38 +00004212static int io_sfr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboe5d17b4a2019-04-09 14:56:44 -06004213{
4214 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe5d17b4a2019-04-09 14:56:44 -06004215
Jens Axboe5d17b4a2019-04-09 14:56:44 -06004216 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
4217 return -EINVAL;
4218 if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index))
4219 return -EINVAL;
4220
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004221 req->sync.off = READ_ONCE(sqe->off);
4222 req->sync.len = READ_ONCE(sqe->len);
4223 req->sync.flags = READ_ONCE(sqe->sync_range_flags);
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004224 return 0;
4225}
4226
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004227static int io_sync_file_range(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004228{
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004229 int ret;
4230
Pavel Begunkovac45abc2020-06-08 21:08:18 +03004231 /* sync_file_range always requires a blocking context */
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004232 if (issue_flags & IO_URING_F_NONBLOCK)
Pavel Begunkovac45abc2020-06-08 21:08:18 +03004233 return -EAGAIN;
4234
Jens Axboe9adbd452019-12-20 08:45:55 -07004235 ret = sync_file_range(req->file, req->sync.off, req->sync.len,
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004236 req->sync.flags);
4237 if (ret < 0)
4238 req_set_fail_links(req);
Jens Axboee1e16092020-06-22 09:17:17 -06004239 io_req_complete(req, ret);
Jens Axboe5d17b4a2019-04-09 14:56:44 -06004240 return 0;
4241}
4242
YueHaibing469956e2020-03-04 15:53:52 +08004243#if defined(CONFIG_NET)
Pavel Begunkov02d27d82020-02-28 10:36:36 +03004244static int io_setup_async_msg(struct io_kiocb *req,
4245 struct io_async_msghdr *kmsg)
4246{
Jens Axboee8c2bc12020-08-15 18:44:09 -07004247 struct io_async_msghdr *async_msg = req->async_data;
4248
4249 if (async_msg)
Pavel Begunkov02d27d82020-02-28 10:36:36 +03004250 return -EAGAIN;
Jens Axboee8c2bc12020-08-15 18:44:09 -07004251 if (io_alloc_async_data(req)) {
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004252 kfree(kmsg->free_iov);
Pavel Begunkov02d27d82020-02-28 10:36:36 +03004253 return -ENOMEM;
4254 }
Jens Axboee8c2bc12020-08-15 18:44:09 -07004255 async_msg = req->async_data;
Pavel Begunkov02d27d82020-02-28 10:36:36 +03004256 req->flags |= REQ_F_NEED_CLEANUP;
Jens Axboee8c2bc12020-08-15 18:44:09 -07004257 memcpy(async_msg, kmsg, sizeof(*kmsg));
Pavel Begunkov2a780802021-02-05 00:57:58 +00004258 async_msg->msg.msg_name = &async_msg->addr;
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004259 /* if were using fast_iov, set it to the new one */
4260 if (!async_msg->free_iov)
4261 async_msg->msg.msg_iter.iov = async_msg->fast_iov;
4262
Pavel Begunkov02d27d82020-02-28 10:36:36 +03004263 return -EAGAIN;
4264}
4265
Pavel Begunkov2ae523e2020-07-12 20:41:06 +03004266static int io_sendmsg_copy_hdr(struct io_kiocb *req,
4267 struct io_async_msghdr *iomsg)
4268{
Pavel Begunkov2ae523e2020-07-12 20:41:06 +03004269 iomsg->msg.msg_name = &iomsg->addr;
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004270 iomsg->free_iov = iomsg->fast_iov;
Pavel Begunkov2ae523e2020-07-12 20:41:06 +03004271 return sendmsg_copy_msghdr(&iomsg->msg, req->sr_msg.umsg,
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004272 req->sr_msg.msg_flags, &iomsg->free_iov);
Pavel Begunkov2ae523e2020-07-12 20:41:06 +03004273}
4274
Pavel Begunkov93642ef2021-02-18 18:29:44 +00004275static int io_sendmsg_prep_async(struct io_kiocb *req)
4276{
4277 int ret;
4278
Pavel Begunkov93642ef2021-02-18 18:29:44 +00004279 ret = io_sendmsg_copy_hdr(req, req->async_data);
4280 if (!ret)
4281 req->flags |= REQ_F_NEED_CLEANUP;
4282 return ret;
4283}
4284
Jens Axboe3529d8c2019-12-19 18:24:38 -07004285static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboeaa1fa282019-04-19 13:38:09 -06004286{
Jens Axboee47293f2019-12-20 08:58:21 -07004287 struct io_sr_msg *sr = &req->sr_msg;
Jens Axboe03b12302019-12-02 18:50:25 -07004288
Pavel Begunkovd2b6f482020-06-03 18:03:25 +03004289 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4290 return -EINVAL;
4291
Pavel Begunkov270a5942020-07-12 20:41:04 +03004292 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
Jens Axboefddafac2020-01-04 20:19:44 -07004293 sr->len = READ_ONCE(sqe->len);
Pavel Begunkov04411802021-04-01 15:44:00 +01004294 sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
4295 if (sr->msg_flags & MSG_DONTWAIT)
4296 req->flags |= REQ_F_NOWAIT;
Jens Axboe3529d8c2019-12-19 18:24:38 -07004297
Jens Axboed8768362020-02-27 14:17:49 -07004298#ifdef CONFIG_COMPAT
4299 if (req->ctx->compat)
4300 sr->msg_flags |= MSG_CMSG_COMPAT;
4301#endif
Pavel Begunkov93642ef2021-02-18 18:29:44 +00004302 return 0;
Jens Axboe03b12302019-12-02 18:50:25 -07004303}
4304
Pavel Begunkov889fca72021-02-10 00:03:09 +00004305static int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe03b12302019-12-02 18:50:25 -07004306{
Pavel Begunkov6b754c82020-07-16 23:28:00 +03004307 struct io_async_msghdr iomsg, *kmsg;
Jens Axboe03b12302019-12-02 18:50:25 -07004308 struct socket *sock;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004309 unsigned flags;
Stefan Metzmacher00312752021-03-20 20:33:36 +01004310 int min_ret = 0;
Jens Axboe03b12302019-12-02 18:50:25 -07004311 int ret;
4312
Florent Revestdba4a922020-12-04 12:36:04 +01004313 sock = sock_from_file(req->file);
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004314 if (unlikely(!sock))
Florent Revestdba4a922020-12-04 12:36:04 +01004315 return -ENOTSOCK;
Jens Axboe03b12302019-12-02 18:50:25 -07004316
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004317 kmsg = req->async_data;
4318 if (!kmsg) {
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004319 ret = io_sendmsg_copy_hdr(req, &iomsg);
Jens Axboefddafac2020-01-04 20:19:44 -07004320 if (ret)
4321 return ret;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004322 kmsg = &iomsg;
Jens Axboefddafac2020-01-04 20:19:44 -07004323 }
4324
Pavel Begunkov04411802021-04-01 15:44:00 +01004325 flags = req->sr_msg.msg_flags;
4326 if (issue_flags & IO_URING_F_NONBLOCK)
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004327 flags |= MSG_DONTWAIT;
Stefan Metzmacher00312752021-03-20 20:33:36 +01004328 if (flags & MSG_WAITALL)
4329 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
4330
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004331 ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004332 if ((issue_flags & IO_URING_F_NONBLOCK) && ret == -EAGAIN)
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004333 return io_setup_async_msg(req, kmsg);
4334 if (ret == -ERESTARTSYS)
4335 ret = -EINTR;
4336
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004337 /* fast path, check for non-NULL to avoid function call */
4338 if (kmsg->free_iov)
4339 kfree(kmsg->free_iov);
Jens Axboe03b12302019-12-02 18:50:25 -07004340 req->flags &= ~REQ_F_NEED_CLEANUP;
Stefan Metzmacher00312752021-03-20 20:33:36 +01004341 if (ret < min_ret)
Jens Axboefddafac2020-01-04 20:19:44 -07004342 req_set_fail_links(req);
Pavel Begunkov889fca72021-02-10 00:03:09 +00004343 __io_req_complete(req, issue_flags, ret, 0);
Jens Axboefddafac2020-01-04 20:19:44 -07004344 return 0;
Jens Axboefddafac2020-01-04 20:19:44 -07004345}
4346
Pavel Begunkov889fca72021-02-10 00:03:09 +00004347static int io_send(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe03b12302019-12-02 18:50:25 -07004348{
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004349 struct io_sr_msg *sr = &req->sr_msg;
4350 struct msghdr msg;
4351 struct iovec iov;
Jens Axboe03b12302019-12-02 18:50:25 -07004352 struct socket *sock;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004353 unsigned flags;
Stefan Metzmacher00312752021-03-20 20:33:36 +01004354 int min_ret = 0;
Jens Axboe03b12302019-12-02 18:50:25 -07004355 int ret;
4356
Florent Revestdba4a922020-12-04 12:36:04 +01004357 sock = sock_from_file(req->file);
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004358 if (unlikely(!sock))
Florent Revestdba4a922020-12-04 12:36:04 +01004359 return -ENOTSOCK;
Jens Axboe03b12302019-12-02 18:50:25 -07004360
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004361 ret = import_single_range(WRITE, sr->buf, sr->len, &iov, &msg.msg_iter);
4362 if (unlikely(ret))
Zheng Bin14db8412020-09-09 20:12:37 +08004363 return ret;
Jens Axboe03b12302019-12-02 18:50:25 -07004364
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004365 msg.msg_name = NULL;
4366 msg.msg_control = NULL;
4367 msg.msg_controllen = 0;
4368 msg.msg_namelen = 0;
Jens Axboe03b12302019-12-02 18:50:25 -07004369
Pavel Begunkov04411802021-04-01 15:44:00 +01004370 flags = req->sr_msg.msg_flags;
4371 if (issue_flags & IO_URING_F_NONBLOCK)
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004372 flags |= MSG_DONTWAIT;
Stefan Metzmacher00312752021-03-20 20:33:36 +01004373 if (flags & MSG_WAITALL)
4374 min_ret = iov_iter_count(&msg.msg_iter);
4375
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004376 msg.msg_flags = flags;
4377 ret = sock_sendmsg(sock, &msg);
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004378 if ((issue_flags & IO_URING_F_NONBLOCK) && ret == -EAGAIN)
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004379 return -EAGAIN;
4380 if (ret == -ERESTARTSYS)
4381 ret = -EINTR;
Jens Axboe03b12302019-12-02 18:50:25 -07004382
Stefan Metzmacher00312752021-03-20 20:33:36 +01004383 if (ret < min_ret)
Jens Axboe03b12302019-12-02 18:50:25 -07004384 req_set_fail_links(req);
Pavel Begunkov889fca72021-02-10 00:03:09 +00004385 __io_req_complete(req, issue_flags, ret, 0);
Jens Axboe03b12302019-12-02 18:50:25 -07004386 return 0;
Jens Axboe03b12302019-12-02 18:50:25 -07004387}
4388
Pavel Begunkov1400e692020-07-12 20:41:05 +03004389static int __io_recvmsg_copy_hdr(struct io_kiocb *req,
4390 struct io_async_msghdr *iomsg)
Jens Axboe52de1fe2020-02-27 10:15:42 -07004391{
4392 struct io_sr_msg *sr = &req->sr_msg;
4393 struct iovec __user *uiov;
4394 size_t iov_len;
4395 int ret;
4396
Pavel Begunkov1400e692020-07-12 20:41:05 +03004397 ret = __copy_msghdr_from_user(&iomsg->msg, sr->umsg,
4398 &iomsg->uaddr, &uiov, &iov_len);
Jens Axboe52de1fe2020-02-27 10:15:42 -07004399 if (ret)
4400 return ret;
4401
4402 if (req->flags & REQ_F_BUFFER_SELECT) {
4403 if (iov_len > 1)
4404 return -EINVAL;
Pavel Begunkov5476dfe2021-02-05 00:57:59 +00004405 if (copy_from_user(iomsg->fast_iov, uiov, sizeof(*uiov)))
Jens Axboe52de1fe2020-02-27 10:15:42 -07004406 return -EFAULT;
Pavel Begunkov5476dfe2021-02-05 00:57:59 +00004407 sr->len = iomsg->fast_iov[0].iov_len;
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004408 iomsg->free_iov = NULL;
Jens Axboe52de1fe2020-02-27 10:15:42 -07004409 } else {
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004410 iomsg->free_iov = iomsg->fast_iov;
Christoph Hellwig89cd35c2020-09-25 06:51:41 +02004411 ret = __import_iovec(READ, uiov, iov_len, UIO_FASTIOV,
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004412 &iomsg->free_iov, &iomsg->msg.msg_iter,
Christoph Hellwig89cd35c2020-09-25 06:51:41 +02004413 false);
Jens Axboe52de1fe2020-02-27 10:15:42 -07004414 if (ret > 0)
4415 ret = 0;
4416 }
4417
4418 return ret;
4419}
4420
4421#ifdef CONFIG_COMPAT
4422static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req,
Pavel Begunkov1400e692020-07-12 20:41:05 +03004423 struct io_async_msghdr *iomsg)
Jens Axboe52de1fe2020-02-27 10:15:42 -07004424{
Jens Axboe52de1fe2020-02-27 10:15:42 -07004425 struct io_sr_msg *sr = &req->sr_msg;
4426 struct compat_iovec __user *uiov;
4427 compat_uptr_t ptr;
4428 compat_size_t len;
4429 int ret;
4430
Pavel Begunkov4af34172021-04-11 01:46:30 +01004431 ret = __get_compat_msghdr(&iomsg->msg, sr->umsg_compat, &iomsg->uaddr,
4432 &ptr, &len);
Jens Axboe52de1fe2020-02-27 10:15:42 -07004433 if (ret)
4434 return ret;
4435
4436 uiov = compat_ptr(ptr);
4437 if (req->flags & REQ_F_BUFFER_SELECT) {
4438 compat_ssize_t clen;
4439
4440 if (len > 1)
4441 return -EINVAL;
4442 if (!access_ok(uiov, sizeof(*uiov)))
4443 return -EFAULT;
4444 if (__get_user(clen, &uiov->iov_len))
4445 return -EFAULT;
4446 if (clen < 0)
4447 return -EINVAL;
Pavel Begunkov2d280bc2020-11-29 18:33:32 +00004448 sr->len = clen;
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004449 iomsg->free_iov = NULL;
Jens Axboe52de1fe2020-02-27 10:15:42 -07004450 } else {
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004451 iomsg->free_iov = iomsg->fast_iov;
Christoph Hellwig89cd35c2020-09-25 06:51:41 +02004452 ret = __import_iovec(READ, (struct iovec __user *)uiov, len,
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004453 UIO_FASTIOV, &iomsg->free_iov,
Christoph Hellwig89cd35c2020-09-25 06:51:41 +02004454 &iomsg->msg.msg_iter, true);
Jens Axboe52de1fe2020-02-27 10:15:42 -07004455 if (ret < 0)
4456 return ret;
4457 }
4458
4459 return 0;
4460}
Jens Axboe03b12302019-12-02 18:50:25 -07004461#endif
Jens Axboe52de1fe2020-02-27 10:15:42 -07004462
Pavel Begunkov1400e692020-07-12 20:41:05 +03004463static int io_recvmsg_copy_hdr(struct io_kiocb *req,
4464 struct io_async_msghdr *iomsg)
Jens Axboe52de1fe2020-02-27 10:15:42 -07004465{
Pavel Begunkov1400e692020-07-12 20:41:05 +03004466 iomsg->msg.msg_name = &iomsg->addr;
Jens Axboe52de1fe2020-02-27 10:15:42 -07004467
4468#ifdef CONFIG_COMPAT
4469 if (req->ctx->compat)
Pavel Begunkov1400e692020-07-12 20:41:05 +03004470 return __io_compat_recvmsg_copy_hdr(req, iomsg);
Jens Axboe52de1fe2020-02-27 10:15:42 -07004471#endif
4472
Pavel Begunkov1400e692020-07-12 20:41:05 +03004473 return __io_recvmsg_copy_hdr(req, iomsg);
Jens Axboe52de1fe2020-02-27 10:15:42 -07004474}
4475
Jens Axboebcda7ba2020-02-23 16:42:51 -07004476static struct io_buffer *io_recv_buffer_select(struct io_kiocb *req,
Pavel Begunkov7fbb1b52020-07-16 23:28:05 +03004477 bool needs_lock)
Jens Axboebcda7ba2020-02-23 16:42:51 -07004478{
4479 struct io_sr_msg *sr = &req->sr_msg;
4480 struct io_buffer *kbuf;
4481
Jens Axboebcda7ba2020-02-23 16:42:51 -07004482 kbuf = io_buffer_select(req, &sr->len, sr->bgid, sr->kbuf, needs_lock);
4483 if (IS_ERR(kbuf))
4484 return kbuf;
4485
4486 sr->kbuf = kbuf;
4487 req->flags |= REQ_F_BUFFER_SELECTED;
Jens Axboebcda7ba2020-02-23 16:42:51 -07004488 return kbuf;
Jens Axboe03b12302019-12-02 18:50:25 -07004489}
4490
Pavel Begunkov7fbb1b52020-07-16 23:28:05 +03004491static inline unsigned int io_put_recv_kbuf(struct io_kiocb *req)
4492{
4493 return io_put_kbuf(req, req->sr_msg.kbuf);
4494}
4495
Pavel Begunkov93642ef2021-02-18 18:29:44 +00004496static int io_recvmsg_prep_async(struct io_kiocb *req)
Jens Axboe03b12302019-12-02 18:50:25 -07004497{
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03004498 int ret;
Jens Axboe06b76d42019-12-19 14:44:26 -07004499
Pavel Begunkov93642ef2021-02-18 18:29:44 +00004500 ret = io_recvmsg_copy_hdr(req, req->async_data);
4501 if (!ret)
4502 req->flags |= REQ_F_NEED_CLEANUP;
4503 return ret;
4504}
4505
4506static int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4507{
4508 struct io_sr_msg *sr = &req->sr_msg;
4509
Pavel Begunkovd2b6f482020-06-03 18:03:25 +03004510 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4511 return -EINVAL;
4512
Pavel Begunkov270a5942020-07-12 20:41:04 +03004513 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
Jens Axboe0b7b21e2020-01-31 08:34:59 -07004514 sr->len = READ_ONCE(sqe->len);
Jens Axboebcda7ba2020-02-23 16:42:51 -07004515 sr->bgid = READ_ONCE(sqe->buf_group);
Pavel Begunkov04411802021-04-01 15:44:00 +01004516 sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
4517 if (sr->msg_flags & MSG_DONTWAIT)
4518 req->flags |= REQ_F_NOWAIT;
Jens Axboe3529d8c2019-12-19 18:24:38 -07004519
Jens Axboed8768362020-02-27 14:17:49 -07004520#ifdef CONFIG_COMPAT
4521 if (req->ctx->compat)
4522 sr->msg_flags |= MSG_CMSG_COMPAT;
4523#endif
Pavel Begunkov93642ef2021-02-18 18:29:44 +00004524 return 0;
Jens Axboe03b12302019-12-02 18:50:25 -07004525}
4526
Pavel Begunkov889fca72021-02-10 00:03:09 +00004527static int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe03b12302019-12-02 18:50:25 -07004528{
Pavel Begunkov6b754c82020-07-16 23:28:00 +03004529 struct io_async_msghdr iomsg, *kmsg;
Jens Axboe0fa03c62019-04-19 13:34:07 -06004530 struct socket *sock;
Pavel Begunkov7fbb1b52020-07-16 23:28:05 +03004531 struct io_buffer *kbuf;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004532 unsigned flags;
Stefan Metzmacher00312752021-03-20 20:33:36 +01004533 int min_ret = 0;
Jens Axboe52de1fe2020-02-27 10:15:42 -07004534 int ret, cflags = 0;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004535 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Jens Axboe0fa03c62019-04-19 13:34:07 -06004536
Florent Revestdba4a922020-12-04 12:36:04 +01004537 sock = sock_from_file(req->file);
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004538 if (unlikely(!sock))
Florent Revestdba4a922020-12-04 12:36:04 +01004539 return -ENOTSOCK;
Jens Axboe0fa03c62019-04-19 13:34:07 -06004540
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004541 kmsg = req->async_data;
4542 if (!kmsg) {
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004543 ret = io_recvmsg_copy_hdr(req, &iomsg);
4544 if (ret)
Pavel Begunkov681fda82020-07-15 22:20:45 +03004545 return ret;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004546 kmsg = &iomsg;
Jens Axboe0fa03c62019-04-19 13:34:07 -06004547 }
4548
Pavel Begunkovbc02ef32020-07-16 23:28:03 +03004549 if (req->flags & REQ_F_BUFFER_SELECT) {
Pavel Begunkov7fbb1b52020-07-16 23:28:05 +03004550 kbuf = io_recv_buffer_select(req, !force_nonblock);
Pavel Begunkovbc02ef32020-07-16 23:28:03 +03004551 if (IS_ERR(kbuf))
4552 return PTR_ERR(kbuf);
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004553 kmsg->fast_iov[0].iov_base = u64_to_user_ptr(kbuf->addr);
Pavel Begunkov5476dfe2021-02-05 00:57:59 +00004554 kmsg->fast_iov[0].iov_len = req->sr_msg.len;
4555 iov_iter_init(&kmsg->msg.msg_iter, READ, kmsg->fast_iov,
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004556 1, req->sr_msg.len);
4557 }
4558
Pavel Begunkov04411802021-04-01 15:44:00 +01004559 flags = req->sr_msg.msg_flags;
4560 if (force_nonblock)
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004561 flags |= MSG_DONTWAIT;
Stefan Metzmacher00312752021-03-20 20:33:36 +01004562 if (flags & MSG_WAITALL)
4563 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
4564
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004565 ret = __sys_recvmsg_sock(sock, &kmsg->msg, req->sr_msg.umsg,
4566 kmsg->uaddr, flags);
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03004567 if (force_nonblock && ret == -EAGAIN)
4568 return io_setup_async_msg(req, kmsg);
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004569 if (ret == -ERESTARTSYS)
4570 ret = -EINTR;
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03004571
Pavel Begunkov7fbb1b52020-07-16 23:28:05 +03004572 if (req->flags & REQ_F_BUFFER_SELECTED)
4573 cflags = io_put_recv_kbuf(req);
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004574 /* fast path, check for non-NULL to avoid function call */
4575 if (kmsg->free_iov)
4576 kfree(kmsg->free_iov);
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03004577 req->flags &= ~REQ_F_NEED_CLEANUP;
Stefan Metzmacher00312752021-03-20 20:33:36 +01004578 if (ret < min_ret || ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))))
Jens Axboe4e88d6e2019-12-07 20:59:47 -07004579 req_set_fail_links(req);
Pavel Begunkov889fca72021-02-10 00:03:09 +00004580 __io_req_complete(req, issue_flags, ret, cflags);
Jens Axboe0fa03c62019-04-19 13:34:07 -06004581 return 0;
Jens Axboe0fa03c62019-04-19 13:34:07 -06004582}
4583
Pavel Begunkov889fca72021-02-10 00:03:09 +00004584static int io_recv(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboefddafac2020-01-04 20:19:44 -07004585{
Pavel Begunkov6b754c82020-07-16 23:28:00 +03004586 struct io_buffer *kbuf;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004587 struct io_sr_msg *sr = &req->sr_msg;
4588 struct msghdr msg;
4589 void __user *buf = sr->buf;
Jens Axboefddafac2020-01-04 20:19:44 -07004590 struct socket *sock;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004591 struct iovec iov;
4592 unsigned flags;
Stefan Metzmacher00312752021-03-20 20:33:36 +01004593 int min_ret = 0;
Jens Axboebcda7ba2020-02-23 16:42:51 -07004594 int ret, cflags = 0;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004595 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Jens Axboefddafac2020-01-04 20:19:44 -07004596
Florent Revestdba4a922020-12-04 12:36:04 +01004597 sock = sock_from_file(req->file);
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004598 if (unlikely(!sock))
Florent Revestdba4a922020-12-04 12:36:04 +01004599 return -ENOTSOCK;
Jens Axboefddafac2020-01-04 20:19:44 -07004600
Pavel Begunkovbc02ef32020-07-16 23:28:03 +03004601 if (req->flags & REQ_F_BUFFER_SELECT) {
Pavel Begunkov7fbb1b52020-07-16 23:28:05 +03004602 kbuf = io_recv_buffer_select(req, !force_nonblock);
Jens Axboebcda7ba2020-02-23 16:42:51 -07004603 if (IS_ERR(kbuf))
4604 return PTR_ERR(kbuf);
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004605 buf = u64_to_user_ptr(kbuf->addr);
Jens Axboefddafac2020-01-04 20:19:44 -07004606 }
4607
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004608 ret = import_single_range(READ, buf, sr->len, &iov, &msg.msg_iter);
Pavel Begunkov14c32ee2020-07-16 23:28:01 +03004609 if (unlikely(ret))
4610 goto out_free;
Jens Axboefddafac2020-01-04 20:19:44 -07004611
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004612 msg.msg_name = NULL;
4613 msg.msg_control = NULL;
4614 msg.msg_controllen = 0;
4615 msg.msg_namelen = 0;
4616 msg.msg_iocb = NULL;
4617 msg.msg_flags = 0;
4618
Pavel Begunkov04411802021-04-01 15:44:00 +01004619 flags = req->sr_msg.msg_flags;
4620 if (force_nonblock)
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004621 flags |= MSG_DONTWAIT;
Stefan Metzmacher00312752021-03-20 20:33:36 +01004622 if (flags & MSG_WAITALL)
4623 min_ret = iov_iter_count(&msg.msg_iter);
4624
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004625 ret = sock_recvmsg(sock, &msg, flags);
4626 if (force_nonblock && ret == -EAGAIN)
4627 return -EAGAIN;
4628 if (ret == -ERESTARTSYS)
4629 ret = -EINTR;
Pavel Begunkov14c32ee2020-07-16 23:28:01 +03004630out_free:
Pavel Begunkov7fbb1b52020-07-16 23:28:05 +03004631 if (req->flags & REQ_F_BUFFER_SELECTED)
4632 cflags = io_put_recv_kbuf(req);
Stefan Metzmacher00312752021-03-20 20:33:36 +01004633 if (ret < min_ret || ((flags & MSG_WAITALL) && (msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))))
Jens Axboefddafac2020-01-04 20:19:44 -07004634 req_set_fail_links(req);
Pavel Begunkov889fca72021-02-10 00:03:09 +00004635 __io_req_complete(req, issue_flags, ret, cflags);
Jens Axboefddafac2020-01-04 20:19:44 -07004636 return 0;
Jens Axboefddafac2020-01-04 20:19:44 -07004637}
4638
Jens Axboe3529d8c2019-12-19 18:24:38 -07004639static int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboe17f2fe32019-10-17 14:42:58 -06004640{
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004641 struct io_accept *accept = &req->accept;
4642
Jens Axboe14587a462020-09-05 11:36:08 -06004643 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboe17f2fe32019-10-17 14:42:58 -06004644 return -EINVAL;
Hrvoje Zeba8042d6c2019-11-25 14:40:22 -05004645 if (sqe->ioprio || sqe->len || sqe->buf_index)
Jens Axboe17f2fe32019-10-17 14:42:58 -06004646 return -EINVAL;
4647
Jens Axboed55e5f52019-12-11 16:12:15 -07004648 accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
4649 accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2));
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004650 accept->flags = READ_ONCE(sqe->accept_flags);
Jens Axboe09952e32020-03-19 20:16:56 -06004651 accept->nofile = rlimit(RLIMIT_NOFILE);
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004652 return 0;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004653}
Jens Axboe17f2fe32019-10-17 14:42:58 -06004654
Pavel Begunkov889fca72021-02-10 00:03:09 +00004655static int io_accept(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004656{
4657 struct io_accept *accept = &req->accept;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004658 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Pavel Begunkovac45abc2020-06-08 21:08:18 +03004659 unsigned int file_flags = force_nonblock ? O_NONBLOCK : 0;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004660 int ret;
4661
Jiufei Xuee697dee2020-06-10 13:41:59 +08004662 if (req->file->f_flags & O_NONBLOCK)
4663 req->flags |= REQ_F_NOWAIT;
4664
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004665 ret = __sys_accept4_file(req->file, file_flags, accept->addr,
Jens Axboe09952e32020-03-19 20:16:56 -06004666 accept->addr_len, accept->flags,
4667 accept->nofile);
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004668 if (ret == -EAGAIN && force_nonblock)
Jens Axboe17f2fe32019-10-17 14:42:58 -06004669 return -EAGAIN;
Pavel Begunkovac45abc2020-06-08 21:08:18 +03004670 if (ret < 0) {
4671 if (ret == -ERESTARTSYS)
4672 ret = -EINTR;
Jens Axboe4e88d6e2019-12-07 20:59:47 -07004673 req_set_fail_links(req);
Pavel Begunkovac45abc2020-06-08 21:08:18 +03004674 }
Pavel Begunkov889fca72021-02-10 00:03:09 +00004675 __io_req_complete(req, issue_flags, ret, 0);
Jens Axboe17f2fe32019-10-17 14:42:58 -06004676 return 0;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004677}
4678
Pavel Begunkov93642ef2021-02-18 18:29:44 +00004679static int io_connect_prep_async(struct io_kiocb *req)
4680{
4681 struct io_async_connect *io = req->async_data;
4682 struct io_connect *conn = &req->connect;
4683
4684 return move_addr_to_kernel(conn->addr, conn->addr_len, &io->address);
4685}
4686
Jens Axboe3529d8c2019-12-19 18:24:38 -07004687static int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboef499a022019-12-02 16:28:46 -07004688{
Jens Axboe3529d8c2019-12-19 18:24:38 -07004689 struct io_connect *conn = &req->connect;
Jens Axboef499a022019-12-02 16:28:46 -07004690
Jens Axboe14587a462020-09-05 11:36:08 -06004691 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboe3fbb51c2019-12-20 08:51:52 -07004692 return -EINVAL;
4693 if (sqe->ioprio || sqe->len || sqe->buf_index || sqe->rw_flags)
4694 return -EINVAL;
4695
Jens Axboe3529d8c2019-12-19 18:24:38 -07004696 conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
4697 conn->addr_len = READ_ONCE(sqe->addr2);
Pavel Begunkov93642ef2021-02-18 18:29:44 +00004698 return 0;
Jens Axboef499a022019-12-02 16:28:46 -07004699}
4700
Pavel Begunkov889fca72021-02-10 00:03:09 +00004701static int io_connect(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboef8e85cf2019-11-23 14:24:24 -07004702{
Jens Axboee8c2bc12020-08-15 18:44:09 -07004703 struct io_async_connect __io, *io;
Jens Axboef8e85cf2019-11-23 14:24:24 -07004704 unsigned file_flags;
Jens Axboe3fbb51c2019-12-20 08:51:52 -07004705 int ret;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004706 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Jens Axboef8e85cf2019-11-23 14:24:24 -07004707
Jens Axboee8c2bc12020-08-15 18:44:09 -07004708 if (req->async_data) {
4709 io = req->async_data;
Jens Axboef499a022019-12-02 16:28:46 -07004710 } else {
Jens Axboe3529d8c2019-12-19 18:24:38 -07004711 ret = move_addr_to_kernel(req->connect.addr,
4712 req->connect.addr_len,
Jens Axboee8c2bc12020-08-15 18:44:09 -07004713 &__io.address);
Jens Axboef499a022019-12-02 16:28:46 -07004714 if (ret)
4715 goto out;
4716 io = &__io;
4717 }
4718
Jens Axboe3fbb51c2019-12-20 08:51:52 -07004719 file_flags = force_nonblock ? O_NONBLOCK : 0;
4720
Jens Axboee8c2bc12020-08-15 18:44:09 -07004721 ret = __sys_connect_file(req->file, &io->address,
Jens Axboe3fbb51c2019-12-20 08:51:52 -07004722 req->connect.addr_len, file_flags);
Jens Axboe87f80d62019-12-03 11:23:54 -07004723 if ((ret == -EAGAIN || ret == -EINPROGRESS) && force_nonblock) {
Jens Axboee8c2bc12020-08-15 18:44:09 -07004724 if (req->async_data)
Jens Axboeb7bb4f72019-12-15 22:13:43 -07004725 return -EAGAIN;
Jens Axboee8c2bc12020-08-15 18:44:09 -07004726 if (io_alloc_async_data(req)) {
Jens Axboef499a022019-12-02 16:28:46 -07004727 ret = -ENOMEM;
4728 goto out;
4729 }
Jens Axboee8c2bc12020-08-15 18:44:09 -07004730 memcpy(req->async_data, &__io, sizeof(__io));
Jens Axboef8e85cf2019-11-23 14:24:24 -07004731 return -EAGAIN;
Jens Axboef499a022019-12-02 16:28:46 -07004732 }
Jens Axboef8e85cf2019-11-23 14:24:24 -07004733 if (ret == -ERESTARTSYS)
4734 ret = -EINTR;
Jens Axboef499a022019-12-02 16:28:46 -07004735out:
Jens Axboe4e88d6e2019-12-07 20:59:47 -07004736 if (ret < 0)
4737 req_set_fail_links(req);
Pavel Begunkov889fca72021-02-10 00:03:09 +00004738 __io_req_complete(req, issue_flags, ret, 0);
Jens Axboef8e85cf2019-11-23 14:24:24 -07004739 return 0;
Jens Axboef8e85cf2019-11-23 14:24:24 -07004740}
YueHaibing469956e2020-03-04 15:53:52 +08004741#else /* !CONFIG_NET */
Jens Axboe99a10082021-02-19 09:35:19 -07004742#define IO_NETOP_FN(op) \
4743static int io_##op(struct io_kiocb *req, unsigned int issue_flags) \
4744{ \
4745 return -EOPNOTSUPP; \
Jens Axboef8e85cf2019-11-23 14:24:24 -07004746}
4747
Jens Axboe99a10082021-02-19 09:35:19 -07004748#define IO_NETOP_PREP(op) \
4749IO_NETOP_FN(op) \
4750static int io_##op##_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) \
4751{ \
4752 return -EOPNOTSUPP; \
4753} \
4754
4755#define IO_NETOP_PREP_ASYNC(op) \
4756IO_NETOP_PREP(op) \
4757static int io_##op##_prep_async(struct io_kiocb *req) \
4758{ \
4759 return -EOPNOTSUPP; \
YueHaibing469956e2020-03-04 15:53:52 +08004760}
4761
Jens Axboe99a10082021-02-19 09:35:19 -07004762IO_NETOP_PREP_ASYNC(sendmsg);
4763IO_NETOP_PREP_ASYNC(recvmsg);
4764IO_NETOP_PREP_ASYNC(connect);
4765IO_NETOP_PREP(accept);
4766IO_NETOP_FN(send);
4767IO_NETOP_FN(recv);
YueHaibing469956e2020-03-04 15:53:52 +08004768#endif /* CONFIG_NET */
Jens Axboe17f2fe32019-10-17 14:42:58 -06004769
Jens Axboed7718a92020-02-14 22:23:12 -07004770struct io_poll_table {
4771 struct poll_table_struct pt;
4772 struct io_kiocb *req;
4773 int error;
4774};
4775
Jens Axboed7718a92020-02-14 22:23:12 -07004776static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll,
4777 __poll_t mask, task_work_func_t func)
4778{
Jens Axboeaa96bf82020-04-03 11:26:26 -06004779 int ret;
Jens Axboed7718a92020-02-14 22:23:12 -07004780
4781 /* for instances that support it check for an event match first: */
4782 if (mask && !(mask & poll->events))
4783 return 0;
4784
4785 trace_io_uring_task_add(req->ctx, req->opcode, req->user_data, mask);
4786
4787 list_del_init(&poll->wait.entry);
4788
Jens Axboed7718a92020-02-14 22:23:12 -07004789 req->result = mask;
Jens Axboe7cbf1722021-02-10 00:03:20 +00004790 req->task_work.func = func;
Jens Axboe6d816e02020-08-11 08:04:14 -06004791
Jens Axboed7718a92020-02-14 22:23:12 -07004792 /*
Jens Axboee3aabf92020-05-18 11:04:17 -06004793 * If this fails, then the task is exiting. When a task exits, the
4794 * work gets canceled, so just cancel this request as well instead
4795 * of executing it. We can't safely execute it anyway, as we may not
4796 * have the needed state needed for it anyway.
Jens Axboed7718a92020-02-14 22:23:12 -07004797 */
Jens Axboe355fb9e2020-10-22 20:19:35 -06004798 ret = io_req_task_work_add(req);
Jens Axboeaa96bf82020-04-03 11:26:26 -06004799 if (unlikely(ret)) {
Jens Axboee3aabf92020-05-18 11:04:17 -06004800 WRITE_ONCE(poll->canceled, true);
Pavel Begunkoveab30c42021-01-19 13:32:42 +00004801 io_req_task_work_add_fallback(req, func);
Jens Axboeaa96bf82020-04-03 11:26:26 -06004802 }
Jens Axboed7718a92020-02-14 22:23:12 -07004803 return 1;
4804}
4805
Jens Axboe74ce6ce2020-04-13 11:09:12 -06004806static bool io_poll_rewait(struct io_kiocb *req, struct io_poll_iocb *poll)
4807 __acquires(&req->ctx->completion_lock)
4808{
4809 struct io_ring_ctx *ctx = req->ctx;
4810
4811 if (!req->result && !READ_ONCE(poll->canceled)) {
4812 struct poll_table_struct pt = { ._key = poll->events };
4813
4814 req->result = vfs_poll(req->file, &pt) & poll->events;
4815 }
4816
4817 spin_lock_irq(&ctx->completion_lock);
4818 if (!req->result && !READ_ONCE(poll->canceled)) {
4819 add_wait_queue(poll->head, &poll->wait);
4820 return true;
4821 }
4822
4823 return false;
4824}
4825
Jens Axboed4e7cd32020-08-15 11:44:50 -07004826static struct io_poll_iocb *io_poll_get_double(struct io_kiocb *req)
Jens Axboe18bceab2020-05-15 11:56:54 -06004827{
Jens Axboee8c2bc12020-08-15 18:44:09 -07004828 /* pure poll stashes this in ->async_data, poll driven retry elsewhere */
Jens Axboed4e7cd32020-08-15 11:44:50 -07004829 if (req->opcode == IORING_OP_POLL_ADD)
Jens Axboee8c2bc12020-08-15 18:44:09 -07004830 return req->async_data;
Jens Axboed4e7cd32020-08-15 11:44:50 -07004831 return req->apoll->double_poll;
4832}
4833
4834static struct io_poll_iocb *io_poll_get_single(struct io_kiocb *req)
4835{
4836 if (req->opcode == IORING_OP_POLL_ADD)
4837 return &req->poll;
4838 return &req->apoll->poll;
4839}
4840
4841static void io_poll_remove_double(struct io_kiocb *req)
Pavel Begunkove07785b2021-04-01 15:43:57 +01004842 __must_hold(&req->ctx->completion_lock)
Jens Axboed4e7cd32020-08-15 11:44:50 -07004843{
4844 struct io_poll_iocb *poll = io_poll_get_double(req);
Jens Axboe18bceab2020-05-15 11:56:54 -06004845
4846 lockdep_assert_held(&req->ctx->completion_lock);
4847
4848 if (poll && poll->head) {
4849 struct wait_queue_head *head = poll->head;
4850
4851 spin_lock(&head->lock);
4852 list_del_init(&poll->wait.entry);
4853 if (poll->wait.private)
Jens Axboede9b4cc2021-02-24 13:28:27 -07004854 req_ref_put(req);
Jens Axboe18bceab2020-05-15 11:56:54 -06004855 poll->head = NULL;
4856 spin_unlock(&head->lock);
4857 }
4858}
4859
Pavel Begunkove27414b2021-04-09 09:13:20 +01004860static bool io_poll_complete(struct io_kiocb *req, __poll_t mask)
Pavel Begunkove07785b2021-04-01 15:43:57 +01004861 __must_hold(&req->ctx->completion_lock)
Jens Axboe18bceab2020-05-15 11:56:54 -06004862{
4863 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe88e41cf2021-02-22 22:08:01 -07004864 unsigned flags = IORING_CQE_F_MORE;
Pavel Begunkove27414b2021-04-09 09:13:20 +01004865 int error;
Jens Axboe18bceab2020-05-15 11:56:54 -06004866
Pavel Begunkove27414b2021-04-09 09:13:20 +01004867 if (READ_ONCE(req->poll.canceled)) {
Jens Axboe45ab03b2021-02-23 08:19:33 -07004868 error = -ECANCELED;
Jens Axboe88e41cf2021-02-22 22:08:01 -07004869 req->poll.events |= EPOLLONESHOT;
Pavel Begunkove27414b2021-04-09 09:13:20 +01004870 } else {
Jens Axboe50826202021-02-23 09:02:26 -07004871 error = mangle_poll(mask);
Pavel Begunkove27414b2021-04-09 09:13:20 +01004872 }
Jens Axboeb69de282021-03-17 08:37:41 -06004873 if (req->poll.events & EPOLLONESHOT)
4874 flags = 0;
Pavel Begunkovff6421642021-04-11 01:46:32 +01004875 if (!io_cqring_fill_event(req, error, flags)) {
Jens Axboe50826202021-02-23 09:02:26 -07004876 io_poll_remove_waitqs(req);
Jens Axboe88e41cf2021-02-22 22:08:01 -07004877 req->poll.done = true;
4878 flags = 0;
4879 }
Jens Axboe18bceab2020-05-15 11:56:54 -06004880 io_commit_cqring(ctx);
Jens Axboe88e41cf2021-02-22 22:08:01 -07004881 return !(flags & IORING_CQE_F_MORE);
Jens Axboe18bceab2020-05-15 11:56:54 -06004882}
4883
Jens Axboe18bceab2020-05-15 11:56:54 -06004884static void io_poll_task_func(struct callback_head *cb)
4885{
4886 struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
Jens Axboe6d816e02020-08-11 08:04:14 -06004887 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkovdd221f462020-10-18 10:17:42 +01004888 struct io_kiocb *nxt;
Jens Axboe18bceab2020-05-15 11:56:54 -06004889
Pavel Begunkovdd221f462020-10-18 10:17:42 +01004890 if (io_poll_rewait(req, &req->poll)) {
4891 spin_unlock_irq(&ctx->completion_lock);
4892 } else {
Pavel Begunkovf40b9642021-04-09 09:13:19 +01004893 bool done;
Jens Axboe88e41cf2021-02-22 22:08:01 -07004894
Pavel Begunkove27414b2021-04-09 09:13:20 +01004895 done = io_poll_complete(req, req->result);
Jens Axboe88e41cf2021-02-22 22:08:01 -07004896 if (done) {
4897 hash_del(&req->hash_node);
Pavel Begunkovf40b9642021-04-09 09:13:19 +01004898 } else {
Jens Axboe88e41cf2021-02-22 22:08:01 -07004899 req->result = 0;
4900 add_wait_queue(req->poll.head, &req->poll.wait);
4901 }
Pavel Begunkovdd221f462020-10-18 10:17:42 +01004902 spin_unlock_irq(&ctx->completion_lock);
Pavel Begunkovf40b9642021-04-09 09:13:19 +01004903 io_cqring_ev_posted(ctx);
Pavel Begunkovdd221f462020-10-18 10:17:42 +01004904
Jens Axboe88e41cf2021-02-22 22:08:01 -07004905 if (done) {
4906 nxt = io_put_req_find_next(req);
4907 if (nxt)
4908 __io_req_task_submit(nxt);
4909 }
Pavel Begunkovdd221f462020-10-18 10:17:42 +01004910 }
Jens Axboe18bceab2020-05-15 11:56:54 -06004911}
4912
4913static int io_poll_double_wake(struct wait_queue_entry *wait, unsigned mode,
4914 int sync, void *key)
4915{
4916 struct io_kiocb *req = wait->private;
Jens Axboed4e7cd32020-08-15 11:44:50 -07004917 struct io_poll_iocb *poll = io_poll_get_single(req);
Jens Axboe18bceab2020-05-15 11:56:54 -06004918 __poll_t mask = key_to_poll(key);
4919
4920 /* for instances that support it check for an event match first: */
4921 if (mask && !(mask & poll->events))
4922 return 0;
Jens Axboe88e41cf2021-02-22 22:08:01 -07004923 if (!(poll->events & EPOLLONESHOT))
4924 return poll->wait.func(&poll->wait, mode, sync, key);
Jens Axboe18bceab2020-05-15 11:56:54 -06004925
Jens Axboe8706e042020-09-28 08:38:54 -06004926 list_del_init(&wait->entry);
4927
Jens Axboe807abcb2020-07-17 17:09:27 -06004928 if (poll && poll->head) {
Jens Axboe18bceab2020-05-15 11:56:54 -06004929 bool done;
4930
Jens Axboe807abcb2020-07-17 17:09:27 -06004931 spin_lock(&poll->head->lock);
4932 done = list_empty(&poll->wait.entry);
Jens Axboe18bceab2020-05-15 11:56:54 -06004933 if (!done)
Jens Axboe807abcb2020-07-17 17:09:27 -06004934 list_del_init(&poll->wait.entry);
Jens Axboed4e7cd32020-08-15 11:44:50 -07004935 /* make sure double remove sees this as being gone */
4936 wait->private = NULL;
Jens Axboe807abcb2020-07-17 17:09:27 -06004937 spin_unlock(&poll->head->lock);
Jens Axboec8b5e262020-10-25 13:53:26 -06004938 if (!done) {
4939 /* use wait func handler, so it matches the rq type */
4940 poll->wait.func(&poll->wait, mode, sync, key);
4941 }
Jens Axboe18bceab2020-05-15 11:56:54 -06004942 }
Jens Axboede9b4cc2021-02-24 13:28:27 -07004943 req_ref_put(req);
Jens Axboe18bceab2020-05-15 11:56:54 -06004944 return 1;
4945}
4946
4947static void io_init_poll_iocb(struct io_poll_iocb *poll, __poll_t events,
4948 wait_queue_func_t wake_func)
4949{
4950 poll->head = NULL;
4951 poll->done = false;
4952 poll->canceled = false;
Jens Axboe464dca62021-03-19 14:06:24 -06004953#define IO_POLL_UNMASK (EPOLLERR|EPOLLHUP|EPOLLNVAL|EPOLLRDHUP)
4954 /* mask in events that we always want/need */
4955 poll->events = events | IO_POLL_UNMASK;
Jens Axboe18bceab2020-05-15 11:56:54 -06004956 INIT_LIST_HEAD(&poll->wait.entry);
4957 init_waitqueue_func_entry(&poll->wait, wake_func);
4958}
4959
4960static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt,
Jens Axboe807abcb2020-07-17 17:09:27 -06004961 struct wait_queue_head *head,
4962 struct io_poll_iocb **poll_ptr)
Jens Axboe18bceab2020-05-15 11:56:54 -06004963{
4964 struct io_kiocb *req = pt->req;
4965
4966 /*
4967 * If poll->head is already set, it's because the file being polled
4968 * uses multiple waitqueues for poll handling (eg one for read, one
4969 * for write). Setup a separate io_poll_iocb if this happens.
4970 */
4971 if (unlikely(poll->head)) {
Pavel Begunkov58852d42020-10-16 20:55:56 +01004972 struct io_poll_iocb *poll_one = poll;
4973
Jens Axboe18bceab2020-05-15 11:56:54 -06004974 /* already have a 2nd entry, fail a third attempt */
Jens Axboe807abcb2020-07-17 17:09:27 -06004975 if (*poll_ptr) {
Jens Axboe18bceab2020-05-15 11:56:54 -06004976 pt->error = -EINVAL;
4977 return;
4978 }
Jens Axboe1c3b3e62021-02-28 16:07:30 -07004979 /* double add on the same waitqueue head, ignore */
4980 if (poll->head == head)
4981 return;
Jens Axboe18bceab2020-05-15 11:56:54 -06004982 poll = kmalloc(sizeof(*poll), GFP_ATOMIC);
4983 if (!poll) {
4984 pt->error = -ENOMEM;
4985 return;
4986 }
Pavel Begunkov58852d42020-10-16 20:55:56 +01004987 io_init_poll_iocb(poll, poll_one->events, io_poll_double_wake);
Jens Axboede9b4cc2021-02-24 13:28:27 -07004988 req_ref_get(req);
Jens Axboe18bceab2020-05-15 11:56:54 -06004989 poll->wait.private = req;
Jens Axboe807abcb2020-07-17 17:09:27 -06004990 *poll_ptr = poll;
Jens Axboe18bceab2020-05-15 11:56:54 -06004991 }
4992
4993 pt->error = 0;
4994 poll->head = head;
Jiufei Xuea31eb4a2020-06-17 17:53:56 +08004995
4996 if (poll->events & EPOLLEXCLUSIVE)
4997 add_wait_queue_exclusive(head, &poll->wait);
4998 else
4999 add_wait_queue(head, &poll->wait);
Jens Axboe18bceab2020-05-15 11:56:54 -06005000}
5001
5002static void io_async_queue_proc(struct file *file, struct wait_queue_head *head,
5003 struct poll_table_struct *p)
5004{
5005 struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
Jens Axboe807abcb2020-07-17 17:09:27 -06005006 struct async_poll *apoll = pt->req->apoll;
Jens Axboe18bceab2020-05-15 11:56:54 -06005007
Jens Axboe807abcb2020-07-17 17:09:27 -06005008 __io_queue_proc(&apoll->poll, pt, head, &apoll->double_poll);
Jens Axboe18bceab2020-05-15 11:56:54 -06005009}
5010
Jens Axboed7718a92020-02-14 22:23:12 -07005011static void io_async_task_func(struct callback_head *cb)
5012{
5013 struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
5014 struct async_poll *apoll = req->apoll;
5015 struct io_ring_ctx *ctx = req->ctx;
5016
5017 trace_io_uring_task_run(req->ctx, req->opcode, req->user_data);
5018
Jens Axboe74ce6ce2020-04-13 11:09:12 -06005019 if (io_poll_rewait(req, &apoll->poll)) {
Jens Axboed7718a92020-02-14 22:23:12 -07005020 spin_unlock_irq(&ctx->completion_lock);
Jens Axboe74ce6ce2020-04-13 11:09:12 -06005021 return;
Jens Axboed7718a92020-02-14 22:23:12 -07005022 }
5023
Pavel Begunkov0ea13b42021-04-09 09:13:21 +01005024 hash_del(&req->hash_node);
Jens Axboed4e7cd32020-08-15 11:44:50 -07005025 io_poll_remove_double(req);
Jens Axboe74ce6ce2020-04-13 11:09:12 -06005026 spin_unlock_irq(&ctx->completion_lock);
5027
Pavel Begunkov0be0b0e2020-06-30 15:20:42 +03005028 if (!READ_ONCE(apoll->poll.canceled))
5029 __io_req_task_submit(req);
5030 else
Pavel Begunkov25935532021-03-19 17:22:40 +00005031 io_req_complete_failed(req, -ECANCELED);
Dan Carpenteraa340842020-07-08 21:47:11 +03005032
Jens Axboe807abcb2020-07-17 17:09:27 -06005033 kfree(apoll->double_poll);
Jens Axboe31067252020-05-17 17:43:31 -06005034 kfree(apoll);
Jens Axboed7718a92020-02-14 22:23:12 -07005035}
5036
5037static int io_async_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
5038 void *key)
5039{
5040 struct io_kiocb *req = wait->private;
5041 struct io_poll_iocb *poll = &req->apoll->poll;
5042
5043 trace_io_uring_poll_wake(req->ctx, req->opcode, req->user_data,
5044 key_to_poll(key));
5045
5046 return __io_async_wake(req, poll, key_to_poll(key), io_async_task_func);
5047}
5048
5049static void io_poll_req_insert(struct io_kiocb *req)
5050{
5051 struct io_ring_ctx *ctx = req->ctx;
5052 struct hlist_head *list;
5053
5054 list = &ctx->cancel_hash[hash_long(req->user_data, ctx->cancel_hash_bits)];
5055 hlist_add_head(&req->hash_node, list);
5056}
5057
5058static __poll_t __io_arm_poll_handler(struct io_kiocb *req,
5059 struct io_poll_iocb *poll,
5060 struct io_poll_table *ipt, __poll_t mask,
5061 wait_queue_func_t wake_func)
5062 __acquires(&ctx->completion_lock)
5063{
5064 struct io_ring_ctx *ctx = req->ctx;
5065 bool cancel = false;
5066
Pavel Begunkov4d52f332020-10-18 10:17:43 +01005067 INIT_HLIST_NODE(&req->hash_node);
Jens Axboe18bceab2020-05-15 11:56:54 -06005068 io_init_poll_iocb(poll, mask, wake_func);
Pavel Begunkovb90cd192020-06-21 13:09:52 +03005069 poll->file = req->file;
Jens Axboe18bceab2020-05-15 11:56:54 -06005070 poll->wait.private = req;
Jens Axboed7718a92020-02-14 22:23:12 -07005071
5072 ipt->pt._key = mask;
5073 ipt->req = req;
5074 ipt->error = -EINVAL;
5075
Jens Axboed7718a92020-02-14 22:23:12 -07005076 mask = vfs_poll(req->file, &ipt->pt) & poll->events;
5077
5078 spin_lock_irq(&ctx->completion_lock);
5079 if (likely(poll->head)) {
5080 spin_lock(&poll->head->lock);
5081 if (unlikely(list_empty(&poll->wait.entry))) {
5082 if (ipt->error)
5083 cancel = true;
5084 ipt->error = 0;
5085 mask = 0;
5086 }
Jens Axboe88e41cf2021-02-22 22:08:01 -07005087 if ((mask && (poll->events & EPOLLONESHOT)) || ipt->error)
Jens Axboed7718a92020-02-14 22:23:12 -07005088 list_del_init(&poll->wait.entry);
5089 else if (cancel)
5090 WRITE_ONCE(poll->canceled, true);
5091 else if (!poll->done) /* actually waiting for an event */
5092 io_poll_req_insert(req);
5093 spin_unlock(&poll->head->lock);
5094 }
5095
5096 return mask;
5097}
5098
5099static bool io_arm_poll_handler(struct io_kiocb *req)
5100{
5101 const struct io_op_def *def = &io_op_defs[req->opcode];
5102 struct io_ring_ctx *ctx = req->ctx;
5103 struct async_poll *apoll;
5104 struct io_poll_table ipt;
5105 __poll_t mask, ret;
Jens Axboe9dab14b2020-08-25 12:27:50 -06005106 int rw;
Jens Axboed7718a92020-02-14 22:23:12 -07005107
5108 if (!req->file || !file_can_poll(req->file))
5109 return false;
Pavel Begunkov24c74672020-06-21 13:09:51 +03005110 if (req->flags & REQ_F_POLLED)
Jens Axboed7718a92020-02-14 22:23:12 -07005111 return false;
Jens Axboe9dab14b2020-08-25 12:27:50 -06005112 if (def->pollin)
5113 rw = READ;
5114 else if (def->pollout)
5115 rw = WRITE;
5116 else
5117 return false;
5118 /* if we can't nonblock try, then no point in arming a poll handler */
Jens Axboe7b29f922021-03-12 08:30:14 -07005119 if (!io_file_supports_async(req, rw))
Jens Axboed7718a92020-02-14 22:23:12 -07005120 return false;
5121
5122 apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC);
5123 if (unlikely(!apoll))
5124 return false;
Jens Axboe807abcb2020-07-17 17:09:27 -06005125 apoll->double_poll = NULL;
Jens Axboed7718a92020-02-14 22:23:12 -07005126
5127 req->flags |= REQ_F_POLLED;
Jens Axboed7718a92020-02-14 22:23:12 -07005128 req->apoll = apoll;
Jens Axboed7718a92020-02-14 22:23:12 -07005129
Jens Axboe88e41cf2021-02-22 22:08:01 -07005130 mask = EPOLLONESHOT;
Jens Axboed7718a92020-02-14 22:23:12 -07005131 if (def->pollin)
Nathan Chancellor8755d972020-03-02 16:01:19 -07005132 mask |= POLLIN | POLLRDNORM;
Jens Axboed7718a92020-02-14 22:23:12 -07005133 if (def->pollout)
5134 mask |= POLLOUT | POLLWRNORM;
Luke Hsiao901341b2020-08-21 21:41:05 -07005135
5136 /* If reading from MSG_ERRQUEUE using recvmsg, ignore POLLIN */
5137 if ((req->opcode == IORING_OP_RECVMSG) &&
5138 (req->sr_msg.msg_flags & MSG_ERRQUEUE))
5139 mask &= ~POLLIN;
5140
Jens Axboed7718a92020-02-14 22:23:12 -07005141 mask |= POLLERR | POLLPRI;
5142
5143 ipt.pt._qproc = io_async_queue_proc;
5144
5145 ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask,
5146 io_async_wake);
Jens Axboea36da652020-08-11 09:50:19 -06005147 if (ret || ipt.error) {
Jens Axboed4e7cd32020-08-15 11:44:50 -07005148 io_poll_remove_double(req);
Jens Axboed7718a92020-02-14 22:23:12 -07005149 spin_unlock_irq(&ctx->completion_lock);
Jens Axboe807abcb2020-07-17 17:09:27 -06005150 kfree(apoll->double_poll);
Jens Axboed7718a92020-02-14 22:23:12 -07005151 kfree(apoll);
5152 return false;
5153 }
5154 spin_unlock_irq(&ctx->completion_lock);
5155 trace_io_uring_poll_arm(ctx, req->opcode, req->user_data, mask,
5156 apoll->poll.events);
5157 return true;
5158}
5159
5160static bool __io_poll_remove_one(struct io_kiocb *req,
Jens Axboeb2e720a2021-03-31 09:03:03 -06005161 struct io_poll_iocb *poll, bool do_cancel)
Pavel Begunkove07785b2021-04-01 15:43:57 +01005162 __must_hold(&req->ctx->completion_lock)
Jens Axboed7718a92020-02-14 22:23:12 -07005163{
Jens Axboeb41e9852020-02-17 09:52:41 -07005164 bool do_complete = false;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005165
Jens Axboe50826202021-02-23 09:02:26 -07005166 if (!poll->head)
5167 return false;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005168 spin_lock(&poll->head->lock);
Jens Axboeb2e720a2021-03-31 09:03:03 -06005169 if (do_cancel)
5170 WRITE_ONCE(poll->canceled, true);
Jens Axboe392edb42019-12-09 17:52:20 -07005171 if (!list_empty(&poll->wait.entry)) {
5172 list_del_init(&poll->wait.entry);
Jens Axboeb41e9852020-02-17 09:52:41 -07005173 do_complete = true;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005174 }
5175 spin_unlock(&poll->head->lock);
Jens Axboe3bfa5bc2020-05-17 13:54:12 -06005176 hash_del(&req->hash_node);
Jens Axboed7718a92020-02-14 22:23:12 -07005177 return do_complete;
5178}
5179
Jens Axboeb2c3f7e2021-02-23 08:58:04 -07005180static bool io_poll_remove_waitqs(struct io_kiocb *req)
Pavel Begunkove07785b2021-04-01 15:43:57 +01005181 __must_hold(&req->ctx->completion_lock)
Jens Axboed7718a92020-02-14 22:23:12 -07005182{
5183 bool do_complete;
5184
Jens Axboed4e7cd32020-08-15 11:44:50 -07005185 io_poll_remove_double(req);
Pavel Begunkove31001a2021-04-13 02:58:43 +01005186 do_complete = __io_poll_remove_one(req, io_poll_get_single(req), true);
Jens Axboed4e7cd32020-08-15 11:44:50 -07005187
Pavel Begunkove31001a2021-04-13 02:58:43 +01005188 if (req->opcode != IORING_OP_POLL_ADD && do_complete) {
Jens Axboe3bfa5bc2020-05-17 13:54:12 -06005189 struct async_poll *apoll = req->apoll;
5190
Jens Axboed7718a92020-02-14 22:23:12 -07005191 /* non-poll requests have submit ref still */
Pavel Begunkove31001a2021-04-13 02:58:43 +01005192 req_ref_put(req);
5193 kfree(apoll->double_poll);
5194 kfree(apoll);
Xiaoguang Wangb1f573b2020-04-12 14:50:54 +08005195 }
Jens Axboeb2c3f7e2021-02-23 08:58:04 -07005196 return do_complete;
5197}
5198
5199static bool io_poll_remove_one(struct io_kiocb *req)
Pavel Begunkove07785b2021-04-01 15:43:57 +01005200 __must_hold(&req->ctx->completion_lock)
Jens Axboeb2c3f7e2021-02-23 08:58:04 -07005201{
5202 bool do_complete;
5203
5204 do_complete = io_poll_remove_waitqs(req);
Jens Axboeb41e9852020-02-17 09:52:41 -07005205 if (do_complete) {
Pavel Begunkovff6421642021-04-11 01:46:32 +01005206 io_cqring_fill_event(req, -ECANCELED, 0);
Jens Axboeb41e9852020-02-17 09:52:41 -07005207 io_commit_cqring(req->ctx);
Jens Axboef254ac02020-08-12 17:33:30 -06005208 req_set_fail_links(req);
Pavel Begunkov216578e2020-10-13 09:44:00 +01005209 io_put_req_deferred(req, 1);
Jens Axboeb41e9852020-02-17 09:52:41 -07005210 }
5211
5212 return do_complete;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005213}
5214
Jens Axboe76e1b642020-09-26 15:05:03 -06005215/*
5216 * Returns true if we found and killed one or more poll requests
5217 */
Pavel Begunkov6b819282020-11-06 13:00:25 +00005218static bool io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk,
5219 struct files_struct *files)
Jens Axboe221c5eb2019-01-17 09:41:58 -07005220{
Jens Axboe78076bb2019-12-04 19:56:40 -07005221 struct hlist_node *tmp;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005222 struct io_kiocb *req;
Jens Axboe8e2e1fa2020-04-13 17:05:14 -06005223 int posted = 0, i;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005224
5225 spin_lock_irq(&ctx->completion_lock);
Jens Axboe78076bb2019-12-04 19:56:40 -07005226 for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
5227 struct hlist_head *list;
5228
5229 list = &ctx->cancel_hash[i];
Jens Axboef3606e32020-09-22 08:18:24 -06005230 hlist_for_each_entry_safe(req, tmp, list, hash_node) {
Pavel Begunkov6b819282020-11-06 13:00:25 +00005231 if (io_match_task(req, tsk, files))
Jens Axboef3606e32020-09-22 08:18:24 -06005232 posted += io_poll_remove_one(req);
5233 }
Jens Axboe221c5eb2019-01-17 09:41:58 -07005234 }
5235 spin_unlock_irq(&ctx->completion_lock);
Jens Axboeb41e9852020-02-17 09:52:41 -07005236
Jens Axboe8e2e1fa2020-04-13 17:05:14 -06005237 if (posted)
5238 io_cqring_ev_posted(ctx);
Jens Axboe76e1b642020-09-26 15:05:03 -06005239
5240 return posted != 0;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005241}
5242
Pavel Begunkov9ba5fac2021-04-14 13:38:35 +01005243static struct io_kiocb *io_poll_find(struct io_ring_ctx *ctx, __u64 sqe_addr,
5244 bool poll_only)
Pavel Begunkove07785b2021-04-01 15:43:57 +01005245 __must_hold(&ctx->completion_lock)
Jens Axboe47f46762019-11-09 17:43:02 -07005246{
Jens Axboe78076bb2019-12-04 19:56:40 -07005247 struct hlist_head *list;
Jens Axboe47f46762019-11-09 17:43:02 -07005248 struct io_kiocb *req;
5249
Jens Axboe78076bb2019-12-04 19:56:40 -07005250 list = &ctx->cancel_hash[hash_long(sqe_addr, ctx->cancel_hash_bits)];
5251 hlist_for_each_entry(req, list, hash_node) {
Jens Axboeb41e9852020-02-17 09:52:41 -07005252 if (sqe_addr != req->user_data)
5253 continue;
Pavel Begunkov9ba5fac2021-04-14 13:38:35 +01005254 if (poll_only && req->opcode != IORING_OP_POLL_ADD)
5255 continue;
Jens Axboeb2cb8052021-03-17 08:17:19 -06005256 return req;
Jens Axboe47f46762019-11-09 17:43:02 -07005257 }
Jens Axboeb2cb8052021-03-17 08:17:19 -06005258 return NULL;
5259}
5260
Pavel Begunkov9ba5fac2021-04-14 13:38:35 +01005261static int io_poll_cancel(struct io_ring_ctx *ctx, __u64 sqe_addr,
5262 bool poll_only)
Pavel Begunkove07785b2021-04-01 15:43:57 +01005263 __must_hold(&ctx->completion_lock)
Jens Axboeb2cb8052021-03-17 08:17:19 -06005264{
5265 struct io_kiocb *req;
5266
Pavel Begunkov9ba5fac2021-04-14 13:38:35 +01005267 req = io_poll_find(ctx, sqe_addr, poll_only);
Jens Axboeb2cb8052021-03-17 08:17:19 -06005268 if (!req)
5269 return -ENOENT;
5270 if (io_poll_remove_one(req))
5271 return 0;
5272
5273 return -EALREADY;
Jens Axboe47f46762019-11-09 17:43:02 -07005274}
5275
Pavel Begunkov9096af32021-04-14 13:38:36 +01005276static __poll_t io_poll_parse_events(const struct io_uring_sqe *sqe,
5277 unsigned int flags)
5278{
5279 u32 events;
5280
5281 events = READ_ONCE(sqe->poll32_events);
5282#ifdef __BIG_ENDIAN
5283 events = swahw32(events);
5284#endif
5285 if (!(flags & IORING_POLL_ADD_MULTI))
5286 events |= EPOLLONESHOT;
5287 return demangle_poll(events) | (events & (EPOLLEXCLUSIVE|EPOLLONESHOT));
5288}
5289
Pavel Begunkovc5de0032021-04-14 13:38:37 +01005290static int io_poll_update_prep(struct io_kiocb *req,
Jens Axboe3529d8c2019-12-19 18:24:38 -07005291 const struct io_uring_sqe *sqe)
Jens Axboe221c5eb2019-01-17 09:41:58 -07005292{
Pavel Begunkovc5de0032021-04-14 13:38:37 +01005293 struct io_poll_update *upd = &req->poll_update;
5294 u32 flags;
5295
Jens Axboe221c5eb2019-01-17 09:41:58 -07005296 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
5297 return -EINVAL;
Pavel Begunkovc5de0032021-04-14 13:38:37 +01005298 if (sqe->ioprio || sqe->buf_index)
5299 return -EINVAL;
5300 flags = READ_ONCE(sqe->len);
5301 if (flags & ~(IORING_POLL_UPDATE_EVENTS | IORING_POLL_UPDATE_USER_DATA |
5302 IORING_POLL_ADD_MULTI))
5303 return -EINVAL;
5304 /* meaningless without update */
5305 if (flags == IORING_POLL_ADD_MULTI)
Jens Axboe221c5eb2019-01-17 09:41:58 -07005306 return -EINVAL;
5307
Pavel Begunkovc5de0032021-04-14 13:38:37 +01005308 upd->old_user_data = READ_ONCE(sqe->addr);
5309 upd->update_events = flags & IORING_POLL_UPDATE_EVENTS;
5310 upd->update_user_data = flags & IORING_POLL_UPDATE_USER_DATA;
Jens Axboe0969e782019-12-17 18:40:57 -07005311
Pavel Begunkovc5de0032021-04-14 13:38:37 +01005312 upd->new_user_data = READ_ONCE(sqe->off);
5313 if (!upd->update_user_data && upd->new_user_data)
5314 return -EINVAL;
5315 if (upd->update_events)
5316 upd->events = io_poll_parse_events(sqe, flags);
5317 else if (sqe->poll32_events)
5318 return -EINVAL;
Jens Axboe0969e782019-12-17 18:40:57 -07005319
Jens Axboe221c5eb2019-01-17 09:41:58 -07005320 return 0;
5321}
5322
Jens Axboe221c5eb2019-01-17 09:41:58 -07005323static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
5324 void *key)
5325{
Jens Axboec2f2eb72020-02-10 09:07:05 -07005326 struct io_kiocb *req = wait->private;
5327 struct io_poll_iocb *poll = &req->poll;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005328
Jens Axboed7718a92020-02-14 22:23:12 -07005329 return __io_async_wake(req, poll, key_to_poll(key), io_poll_task_func);
Jens Axboe221c5eb2019-01-17 09:41:58 -07005330}
5331
Jens Axboe221c5eb2019-01-17 09:41:58 -07005332static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head,
5333 struct poll_table_struct *p)
5334{
5335 struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
5336
Jens Axboee8c2bc12020-08-15 18:44:09 -07005337 __io_queue_proc(&pt->req->poll, pt, head, (struct io_poll_iocb **) &pt->req->async_data);
Jens Axboeeac406c2019-11-14 12:09:58 -07005338}
5339
Jens Axboe3529d8c2019-12-19 18:24:38 -07005340static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboe221c5eb2019-01-17 09:41:58 -07005341{
Pavel Begunkovc5de0032021-04-14 13:38:37 +01005342 struct io_poll_iocb *poll = &req->poll;
5343 u32 flags;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005344
5345 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
5346 return -EINVAL;
Pavel Begunkovc5de0032021-04-14 13:38:37 +01005347 if (sqe->ioprio || sqe->buf_index || sqe->off || sqe->addr)
Jens Axboe88e41cf2021-02-22 22:08:01 -07005348 return -EINVAL;
5349 flags = READ_ONCE(sqe->len);
Pavel Begunkovc5de0032021-04-14 13:38:37 +01005350 if (flags & ~IORING_POLL_ADD_MULTI)
Jens Axboe221c5eb2019-01-17 09:41:58 -07005351 return -EINVAL;
Pavel Begunkov9096af32021-04-14 13:38:36 +01005352
Pavel Begunkovc5de0032021-04-14 13:38:37 +01005353 poll->events = io_poll_parse_events(sqe, flags);
Jens Axboe0969e782019-12-17 18:40:57 -07005354 return 0;
5355}
5356
Pavel Begunkovc5de0032021-04-14 13:38:37 +01005357static int io_poll_add(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe0969e782019-12-17 18:40:57 -07005358{
5359 struct io_poll_iocb *poll = &req->poll;
5360 struct io_ring_ctx *ctx = req->ctx;
5361 struct io_poll_table ipt;
Jens Axboe0969e782019-12-17 18:40:57 -07005362 __poll_t mask;
Jens Axboe0969e782019-12-17 18:40:57 -07005363
Jens Axboed7718a92020-02-14 22:23:12 -07005364 ipt.pt._qproc = io_poll_queue_proc;
Jens Axboe36703242019-07-25 10:20:18 -06005365
Jens Axboed7718a92020-02-14 22:23:12 -07005366 mask = __io_arm_poll_handler(req, &req->poll, &ipt, poll->events,
5367 io_poll_wake);
Jens Axboe221c5eb2019-01-17 09:41:58 -07005368
Jens Axboe8c838782019-03-12 15:48:16 -06005369 if (mask) { /* no async, we'd stolen it */
Jens Axboe8c838782019-03-12 15:48:16 -06005370 ipt.error = 0;
Pavel Begunkove27414b2021-04-09 09:13:20 +01005371 io_poll_complete(req, mask);
Jens Axboe8c838782019-03-12 15:48:16 -06005372 }
Jens Axboe221c5eb2019-01-17 09:41:58 -07005373 spin_unlock_irq(&ctx->completion_lock);
5374
Jens Axboe8c838782019-03-12 15:48:16 -06005375 if (mask) {
5376 io_cqring_ev_posted(ctx);
Jens Axboe88e41cf2021-02-22 22:08:01 -07005377 if (poll->events & EPOLLONESHOT)
5378 io_put_req(req);
Jens Axboe221c5eb2019-01-17 09:41:58 -07005379 }
Jens Axboe8c838782019-03-12 15:48:16 -06005380 return ipt.error;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005381}
5382
Pavel Begunkovc5de0032021-04-14 13:38:37 +01005383static int io_poll_update(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboeb69de282021-03-17 08:37:41 -06005384{
5385 struct io_ring_ctx *ctx = req->ctx;
5386 struct io_kiocb *preq;
Jens Axboecb3b200e2021-04-06 09:49:31 -06005387 bool completing;
Jens Axboeb69de282021-03-17 08:37:41 -06005388 int ret;
5389
5390 spin_lock_irq(&ctx->completion_lock);
Pavel Begunkov9ba5fac2021-04-14 13:38:35 +01005391 preq = io_poll_find(ctx, req->poll_update.old_user_data, true);
Jens Axboeb69de282021-03-17 08:37:41 -06005392 if (!preq) {
5393 ret = -ENOENT;
5394 goto err;
Jens Axboeb69de282021-03-17 08:37:41 -06005395 }
Jens Axboecb3b200e2021-04-06 09:49:31 -06005396
Pavel Begunkovc5de0032021-04-14 13:38:37 +01005397 if (!req->poll_update.update_events && !req->poll_update.update_user_data) {
5398 completing = true;
5399 ret = io_poll_remove_one(preq) ? 0 : -EALREADY;
5400 goto err;
5401 }
5402
Jens Axboecb3b200e2021-04-06 09:49:31 -06005403 /*
5404 * Don't allow racy completion with singleshot, as we cannot safely
5405 * update those. For multishot, if we're racing with completion, just
5406 * let completion re-add it.
5407 */
5408 completing = !__io_poll_remove_one(preq, &preq->poll, false);
5409 if (completing && (preq->poll.events & EPOLLONESHOT)) {
5410 ret = -EALREADY;
5411 goto err;
Jens Axboeb69de282021-03-17 08:37:41 -06005412 }
5413 /* we now have a detached poll request. reissue. */
5414 ret = 0;
5415err:
Jens Axboeb69de282021-03-17 08:37:41 -06005416 if (ret < 0) {
Jens Axboecb3b200e2021-04-06 09:49:31 -06005417 spin_unlock_irq(&ctx->completion_lock);
Jens Axboeb69de282021-03-17 08:37:41 -06005418 req_set_fail_links(req);
5419 io_req_complete(req, ret);
5420 return 0;
5421 }
5422 /* only mask one event flags, keep behavior flags */
Pavel Begunkov9d805892021-04-13 02:58:40 +01005423 if (req->poll_update.update_events) {
Jens Axboeb69de282021-03-17 08:37:41 -06005424 preq->poll.events &= ~0xffff;
Pavel Begunkov9d805892021-04-13 02:58:40 +01005425 preq->poll.events |= req->poll_update.events & 0xffff;
Jens Axboeb69de282021-03-17 08:37:41 -06005426 preq->poll.events |= IO_POLL_UNMASK;
5427 }
Pavel Begunkov9d805892021-04-13 02:58:40 +01005428 if (req->poll_update.update_user_data)
5429 preq->user_data = req->poll_update.new_user_data;
Jens Axboecb3b200e2021-04-06 09:49:31 -06005430 spin_unlock_irq(&ctx->completion_lock);
5431
Jens Axboeb69de282021-03-17 08:37:41 -06005432 /* complete update request, we're done with it */
5433 io_req_complete(req, ret);
5434
Jens Axboecb3b200e2021-04-06 09:49:31 -06005435 if (!completing) {
Pavel Begunkovc5de0032021-04-14 13:38:37 +01005436 ret = io_poll_add(preq, issue_flags);
Jens Axboecb3b200e2021-04-06 09:49:31 -06005437 if (ret < 0) {
5438 req_set_fail_links(preq);
5439 io_req_complete(preq, ret);
5440 }
Jens Axboeb69de282021-03-17 08:37:41 -06005441 }
5442 return 0;
5443}
5444
Jens Axboe5262f562019-09-17 12:26:57 -06005445static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
5446{
Jens Axboead8a48a2019-11-15 08:49:11 -07005447 struct io_timeout_data *data = container_of(timer,
5448 struct io_timeout_data, timer);
5449 struct io_kiocb *req = data->req;
5450 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe5262f562019-09-17 12:26:57 -06005451 unsigned long flags;
5452
Jens Axboe5262f562019-09-17 12:26:57 -06005453 spin_lock_irqsave(&ctx->completion_lock, flags);
Pavel Begunkova71976f2020-10-10 18:34:11 +01005454 list_del_init(&req->timeout.list);
Pavel Begunkov01cec8c2020-07-30 18:43:50 +03005455 atomic_set(&req->ctx->cq_timeouts,
5456 atomic_read(&req->ctx->cq_timeouts) + 1);
5457
Pavel Begunkovff6421642021-04-11 01:46:32 +01005458 io_cqring_fill_event(req, -ETIME, 0);
Jens Axboe5262f562019-09-17 12:26:57 -06005459 io_commit_cqring(ctx);
5460 spin_unlock_irqrestore(&ctx->completion_lock, flags);
5461
5462 io_cqring_ev_posted(ctx);
Jens Axboe4e88d6e2019-12-07 20:59:47 -07005463 req_set_fail_links(req);
Jens Axboe5262f562019-09-17 12:26:57 -06005464 io_put_req(req);
5465 return HRTIMER_NORESTART;
5466}
5467
Pavel Begunkovfbd15842020-11-30 19:11:15 +00005468static struct io_kiocb *io_timeout_extract(struct io_ring_ctx *ctx,
5469 __u64 user_data)
Pavel Begunkove07785b2021-04-01 15:43:57 +01005470 __must_hold(&ctx->completion_lock)
Jens Axboe47f46762019-11-09 17:43:02 -07005471{
Pavel Begunkovfbd15842020-11-30 19:11:15 +00005472 struct io_timeout_data *io;
Jens Axboef254ac02020-08-12 17:33:30 -06005473 struct io_kiocb *req;
Pavel Begunkovfd9c7bc2021-04-13 02:58:42 +01005474 bool found = false;
Jens Axboef254ac02020-08-12 17:33:30 -06005475
5476 list_for_each_entry(req, &ctx->timeout_list, timeout.list) {
Pavel Begunkovfd9c7bc2021-04-13 02:58:42 +01005477 found = user_data == req->user_data;
5478 if (found)
Jens Axboef254ac02020-08-12 17:33:30 -06005479 break;
Jens Axboef254ac02020-08-12 17:33:30 -06005480 }
Pavel Begunkovfd9c7bc2021-04-13 02:58:42 +01005481 if (!found)
5482 return ERR_PTR(-ENOENT);
Jens Axboef254ac02020-08-12 17:33:30 -06005483
Pavel Begunkovfbd15842020-11-30 19:11:15 +00005484 io = req->async_data;
Pavel Begunkovfd9c7bc2021-04-13 02:58:42 +01005485 if (hrtimer_try_to_cancel(&io->timer) == -1)
Pavel Begunkovfbd15842020-11-30 19:11:15 +00005486 return ERR_PTR(-EALREADY);
5487 list_del_init(&req->timeout.list);
5488 return req;
5489}
5490
5491static int io_timeout_cancel(struct io_ring_ctx *ctx, __u64 user_data)
Pavel Begunkove07785b2021-04-01 15:43:57 +01005492 __must_hold(&ctx->completion_lock)
Pavel Begunkovfbd15842020-11-30 19:11:15 +00005493{
5494 struct io_kiocb *req = io_timeout_extract(ctx, user_data);
5495
5496 if (IS_ERR(req))
5497 return PTR_ERR(req);
5498
5499 req_set_fail_links(req);
Pavel Begunkovff6421642021-04-11 01:46:32 +01005500 io_cqring_fill_event(req, -ECANCELED, 0);
Pavel Begunkovfbd15842020-11-30 19:11:15 +00005501 io_put_req_deferred(req, 1);
5502 return 0;
Jens Axboef254ac02020-08-12 17:33:30 -06005503}
5504
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +00005505static int io_timeout_update(struct io_ring_ctx *ctx, __u64 user_data,
5506 struct timespec64 *ts, enum hrtimer_mode mode)
Pavel Begunkove07785b2021-04-01 15:43:57 +01005507 __must_hold(&ctx->completion_lock)
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +00005508{
5509 struct io_kiocb *req = io_timeout_extract(ctx, user_data);
5510 struct io_timeout_data *data;
5511
5512 if (IS_ERR(req))
5513 return PTR_ERR(req);
5514
5515 req->timeout.off = 0; /* noseq */
5516 data = req->async_data;
5517 list_add_tail(&req->timeout.list, &ctx->timeout_list);
5518 hrtimer_init(&data->timer, CLOCK_MONOTONIC, mode);
5519 data->timer.function = io_timeout_fn;
5520 hrtimer_start(&data->timer, timespec64_to_ktime(*ts), mode);
5521 return 0;
Jens Axboe47f46762019-11-09 17:43:02 -07005522}
5523
Jens Axboe3529d8c2019-12-19 18:24:38 -07005524static int io_timeout_remove_prep(struct io_kiocb *req,
5525 const struct io_uring_sqe *sqe)
Jens Axboeb29472e2019-12-17 18:50:29 -07005526{
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +00005527 struct io_timeout_rem *tr = &req->timeout_rem;
5528
Jens Axboeb29472e2019-12-17 18:50:29 -07005529 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
5530 return -EINVAL;
Daniele Albano61710e42020-07-18 14:15:16 -06005531 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
5532 return -EINVAL;
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +00005533 if (sqe->ioprio || sqe->buf_index || sqe->len)
Jens Axboeb29472e2019-12-17 18:50:29 -07005534 return -EINVAL;
5535
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +00005536 tr->addr = READ_ONCE(sqe->addr);
5537 tr->flags = READ_ONCE(sqe->timeout_flags);
5538 if (tr->flags & IORING_TIMEOUT_UPDATE) {
5539 if (tr->flags & ~(IORING_TIMEOUT_UPDATE|IORING_TIMEOUT_ABS))
5540 return -EINVAL;
5541 if (get_timespec64(&tr->ts, u64_to_user_ptr(sqe->addr2)))
5542 return -EFAULT;
5543 } else if (tr->flags) {
5544 /* timeout removal doesn't support flags */
5545 return -EINVAL;
5546 }
5547
Jens Axboeb29472e2019-12-17 18:50:29 -07005548 return 0;
5549}
5550
Pavel Begunkov8662dae2021-01-19 13:32:44 +00005551static inline enum hrtimer_mode io_translate_timeout_mode(unsigned int flags)
5552{
5553 return (flags & IORING_TIMEOUT_ABS) ? HRTIMER_MODE_ABS
5554 : HRTIMER_MODE_REL;
5555}
5556
Jens Axboe11365042019-10-16 09:08:32 -06005557/*
5558 * Remove or update an existing timeout command
5559 */
Pavel Begunkov61e98202021-02-10 00:03:08 +00005560static int io_timeout_remove(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe11365042019-10-16 09:08:32 -06005561{
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +00005562 struct io_timeout_rem *tr = &req->timeout_rem;
Jens Axboe11365042019-10-16 09:08:32 -06005563 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe47f46762019-11-09 17:43:02 -07005564 int ret;
Jens Axboe11365042019-10-16 09:08:32 -06005565
Jens Axboe11365042019-10-16 09:08:32 -06005566 spin_lock_irq(&ctx->completion_lock);
Pavel Begunkov8662dae2021-01-19 13:32:44 +00005567 if (!(req->timeout_rem.flags & IORING_TIMEOUT_UPDATE))
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +00005568 ret = io_timeout_cancel(ctx, tr->addr);
Pavel Begunkov8662dae2021-01-19 13:32:44 +00005569 else
5570 ret = io_timeout_update(ctx, tr->addr, &tr->ts,
5571 io_translate_timeout_mode(tr->flags));
Jens Axboe11365042019-10-16 09:08:32 -06005572
Pavel Begunkovff6421642021-04-11 01:46:32 +01005573 io_cqring_fill_event(req, ret, 0);
Jens Axboe11365042019-10-16 09:08:32 -06005574 io_commit_cqring(ctx);
5575 spin_unlock_irq(&ctx->completion_lock);
Jens Axboe5262f562019-09-17 12:26:57 -06005576 io_cqring_ev_posted(ctx);
Jens Axboe4e88d6e2019-12-07 20:59:47 -07005577 if (ret < 0)
5578 req_set_fail_links(req);
Jackie Liuec9c02a2019-11-08 23:50:36 +08005579 io_put_req(req);
Jens Axboe11365042019-10-16 09:08:32 -06005580 return 0;
Jens Axboe5262f562019-09-17 12:26:57 -06005581}
5582
Jens Axboe3529d8c2019-12-19 18:24:38 -07005583static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
Jens Axboe2d283902019-12-04 11:08:05 -07005584 bool is_timeout_link)
Jens Axboe5262f562019-09-17 12:26:57 -06005585{
Jens Axboead8a48a2019-11-15 08:49:11 -07005586 struct io_timeout_data *data;
Jens Axboea41525a2019-10-15 16:48:15 -06005587 unsigned flags;
Pavel Begunkov56080b02020-05-26 20:34:04 +03005588 u32 off = READ_ONCE(sqe->off);
Jens Axboe5262f562019-09-17 12:26:57 -06005589
Jens Axboead8a48a2019-11-15 08:49:11 -07005590 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboe5262f562019-09-17 12:26:57 -06005591 return -EINVAL;
Jens Axboead8a48a2019-11-15 08:49:11 -07005592 if (sqe->ioprio || sqe->buf_index || sqe->len != 1)
Jens Axboea41525a2019-10-15 16:48:15 -06005593 return -EINVAL;
Pavel Begunkov56080b02020-05-26 20:34:04 +03005594 if (off && is_timeout_link)
Jens Axboe2d283902019-12-04 11:08:05 -07005595 return -EINVAL;
Jens Axboea41525a2019-10-15 16:48:15 -06005596 flags = READ_ONCE(sqe->timeout_flags);
5597 if (flags & ~IORING_TIMEOUT_ABS)
Jens Axboe5262f562019-09-17 12:26:57 -06005598 return -EINVAL;
Arnd Bergmannbdf20072019-10-01 09:53:29 -06005599
Pavel Begunkovbfe68a22020-05-30 14:54:18 +03005600 req->timeout.off = off;
Jens Axboe26a61672019-12-20 09:02:01 -07005601
Jens Axboee8c2bc12020-08-15 18:44:09 -07005602 if (!req->async_data && io_alloc_async_data(req))
Jens Axboe26a61672019-12-20 09:02:01 -07005603 return -ENOMEM;
5604
Jens Axboee8c2bc12020-08-15 18:44:09 -07005605 data = req->async_data;
Jens Axboead8a48a2019-11-15 08:49:11 -07005606 data->req = req;
Jens Axboead8a48a2019-11-15 08:49:11 -07005607
5608 if (get_timespec64(&data->ts, u64_to_user_ptr(sqe->addr)))
Jens Axboe5262f562019-09-17 12:26:57 -06005609 return -EFAULT;
5610
Pavel Begunkov8662dae2021-01-19 13:32:44 +00005611 data->mode = io_translate_timeout_mode(flags);
Jens Axboead8a48a2019-11-15 08:49:11 -07005612 hrtimer_init(&data->timer, CLOCK_MONOTONIC, data->mode);
Pavel Begunkov2482b582021-03-25 18:32:44 +00005613 if (is_timeout_link)
5614 io_req_track_inflight(req);
Jens Axboead8a48a2019-11-15 08:49:11 -07005615 return 0;
5616}
5617
Pavel Begunkov61e98202021-02-10 00:03:08 +00005618static int io_timeout(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboead8a48a2019-11-15 08:49:11 -07005619{
Jens Axboead8a48a2019-11-15 08:49:11 -07005620 struct io_ring_ctx *ctx = req->ctx;
Jens Axboee8c2bc12020-08-15 18:44:09 -07005621 struct io_timeout_data *data = req->async_data;
Jens Axboead8a48a2019-11-15 08:49:11 -07005622 struct list_head *entry;
Pavel Begunkovbfe68a22020-05-30 14:54:18 +03005623 u32 tail, off = req->timeout.off;
Jens Axboead8a48a2019-11-15 08:49:11 -07005624
Pavel Begunkov733f5c92020-05-26 20:34:03 +03005625 spin_lock_irq(&ctx->completion_lock);
Jens Axboe93bd25b2019-11-11 23:34:31 -07005626
Jens Axboe5262f562019-09-17 12:26:57 -06005627 /*
5628 * sqe->off holds how many events that need to occur for this
Jens Axboe93bd25b2019-11-11 23:34:31 -07005629 * timeout event to be satisfied. If it isn't set, then this is
5630 * a pure timeout request, sequence isn't used.
Jens Axboe5262f562019-09-17 12:26:57 -06005631 */
Pavel Begunkov8eb7e2d2020-06-29 13:13:02 +03005632 if (io_is_timeout_noseq(req)) {
Jens Axboe93bd25b2019-11-11 23:34:31 -07005633 entry = ctx->timeout_list.prev;
5634 goto add;
5635 }
Jens Axboe5262f562019-09-17 12:26:57 -06005636
Pavel Begunkovbfe68a22020-05-30 14:54:18 +03005637 tail = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
5638 req->timeout.target_seq = tail + off;
Jens Axboe5262f562019-09-17 12:26:57 -06005639
Marcelo Diop-Gonzalezf0105052021-01-15 11:54:40 -05005640 /* Update the last seq here in case io_flush_timeouts() hasn't.
5641 * This is safe because ->completion_lock is held, and submissions
5642 * and completions are never mixed in the same ->completion_lock section.
5643 */
5644 ctx->cq_last_tm_flush = tail;
5645
Jens Axboe5262f562019-09-17 12:26:57 -06005646 /*
5647 * Insertion sort, ensuring the first entry in the list is always
5648 * the one we need first.
5649 */
Jens Axboe5262f562019-09-17 12:26:57 -06005650 list_for_each_prev(entry, &ctx->timeout_list) {
Pavel Begunkov135fcde2020-07-13 23:37:12 +03005651 struct io_kiocb *nxt = list_entry(entry, struct io_kiocb,
5652 timeout.list);
Jens Axboe5262f562019-09-17 12:26:57 -06005653
Pavel Begunkov8eb7e2d2020-06-29 13:13:02 +03005654 if (io_is_timeout_noseq(nxt))
Jens Axboe93bd25b2019-11-11 23:34:31 -07005655 continue;
Pavel Begunkovbfe68a22020-05-30 14:54:18 +03005656 /* nxt.seq is behind @tail, otherwise would've been completed */
5657 if (off >= nxt->timeout.target_seq - tail)
Jens Axboe5262f562019-09-17 12:26:57 -06005658 break;
5659 }
Jens Axboe93bd25b2019-11-11 23:34:31 -07005660add:
Pavel Begunkov135fcde2020-07-13 23:37:12 +03005661 list_add(&req->timeout.list, entry);
Jens Axboead8a48a2019-11-15 08:49:11 -07005662 data->timer.function = io_timeout_fn;
5663 hrtimer_start(&data->timer, timespec64_to_ktime(data->ts), data->mode);
Jens Axboe842f9612019-10-29 12:34:10 -06005664 spin_unlock_irq(&ctx->completion_lock);
Jens Axboe5262f562019-09-17 12:26:57 -06005665 return 0;
5666}
5667
Pavel Begunkovf458dd842021-03-08 12:14:14 +00005668struct io_cancel_data {
5669 struct io_ring_ctx *ctx;
5670 u64 user_data;
5671};
5672
Jens Axboe62755e32019-10-28 21:49:21 -06005673static bool io_cancel_cb(struct io_wq_work *work, void *data)
Jens Axboede0617e2019-04-06 21:51:27 -06005674{
Jens Axboe62755e32019-10-28 21:49:21 -06005675 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
Pavel Begunkovf458dd842021-03-08 12:14:14 +00005676 struct io_cancel_data *cd = data;
Jens Axboede0617e2019-04-06 21:51:27 -06005677
Pavel Begunkovf458dd842021-03-08 12:14:14 +00005678 return req->ctx == cd->ctx && req->user_data == cd->user_data;
Jens Axboe62755e32019-10-28 21:49:21 -06005679}
5680
Pavel Begunkovf458dd842021-03-08 12:14:14 +00005681static int io_async_cancel_one(struct io_uring_task *tctx, u64 user_data,
5682 struct io_ring_ctx *ctx)
Jens Axboe62755e32019-10-28 21:49:21 -06005683{
Pavel Begunkovf458dd842021-03-08 12:14:14 +00005684 struct io_cancel_data data = { .ctx = ctx, .user_data = user_data, };
Jens Axboe62755e32019-10-28 21:49:21 -06005685 enum io_wq_cancel cancel_ret;
Jens Axboe62755e32019-10-28 21:49:21 -06005686 int ret = 0;
5687
Pavel Begunkovf458dd842021-03-08 12:14:14 +00005688 if (!tctx || !tctx->io_wq)
Jens Axboe5aa75ed2021-02-16 12:56:50 -07005689 return -ENOENT;
5690
Pavel Begunkovf458dd842021-03-08 12:14:14 +00005691 cancel_ret = io_wq_cancel_cb(tctx->io_wq, io_cancel_cb, &data, false);
Jens Axboe62755e32019-10-28 21:49:21 -06005692 switch (cancel_ret) {
5693 case IO_WQ_CANCEL_OK:
5694 ret = 0;
5695 break;
5696 case IO_WQ_CANCEL_RUNNING:
5697 ret = -EALREADY;
5698 break;
5699 case IO_WQ_CANCEL_NOTFOUND:
5700 ret = -ENOENT;
5701 break;
5702 }
5703
Jens Axboee977d6d2019-11-05 12:39:45 -07005704 return ret;
5705}
5706
Jens Axboe47f46762019-11-09 17:43:02 -07005707static void io_async_find_and_cancel(struct io_ring_ctx *ctx,
5708 struct io_kiocb *req, __u64 sqe_addr,
Pavel Begunkov014db002020-03-03 21:33:12 +03005709 int success_ret)
Jens Axboe47f46762019-11-09 17:43:02 -07005710{
5711 unsigned long flags;
5712 int ret;
5713
Pavel Begunkovf458dd842021-03-08 12:14:14 +00005714 ret = io_async_cancel_one(req->task->io_uring, sqe_addr, ctx);
Jens Axboe47f46762019-11-09 17:43:02 -07005715 spin_lock_irqsave(&ctx->completion_lock, flags);
Pavel Begunkovdf9727a2021-04-01 15:43:59 +01005716 if (ret != -ENOENT)
5717 goto done;
Jens Axboe47f46762019-11-09 17:43:02 -07005718 ret = io_timeout_cancel(ctx, sqe_addr);
5719 if (ret != -ENOENT)
5720 goto done;
Pavel Begunkov9ba5fac2021-04-14 13:38:35 +01005721 ret = io_poll_cancel(ctx, sqe_addr, false);
Jens Axboe47f46762019-11-09 17:43:02 -07005722done:
Jens Axboeb0dd8a42019-11-18 12:14:54 -07005723 if (!ret)
5724 ret = success_ret;
Pavel Begunkovff6421642021-04-11 01:46:32 +01005725 io_cqring_fill_event(req, ret, 0);
Jens Axboe47f46762019-11-09 17:43:02 -07005726 io_commit_cqring(ctx);
5727 spin_unlock_irqrestore(&ctx->completion_lock, flags);
5728 io_cqring_ev_posted(ctx);
5729
Jens Axboe4e88d6e2019-12-07 20:59:47 -07005730 if (ret < 0)
5731 req_set_fail_links(req);
Jens Axboe47f46762019-11-09 17:43:02 -07005732}
5733
Jens Axboe3529d8c2019-12-19 18:24:38 -07005734static int io_async_cancel_prep(struct io_kiocb *req,
5735 const struct io_uring_sqe *sqe)
Jens Axboee977d6d2019-11-05 12:39:45 -07005736{
Jens Axboefbf23842019-12-17 18:45:56 -07005737 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboee977d6d2019-11-05 12:39:45 -07005738 return -EINVAL;
Daniele Albano61710e42020-07-18 14:15:16 -06005739 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
5740 return -EINVAL;
5741 if (sqe->ioprio || sqe->off || sqe->len || sqe->cancel_flags)
Jens Axboee977d6d2019-11-05 12:39:45 -07005742 return -EINVAL;
5743
Jens Axboefbf23842019-12-17 18:45:56 -07005744 req->cancel.addr = READ_ONCE(sqe->addr);
5745 return 0;
5746}
5747
Pavel Begunkov61e98202021-02-10 00:03:08 +00005748static int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboefbf23842019-12-17 18:45:56 -07005749{
5750 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkov58f99372021-03-12 16:25:55 +00005751 u64 sqe_addr = req->cancel.addr;
5752 struct io_tctx_node *node;
5753 int ret;
Jens Axboefbf23842019-12-17 18:45:56 -07005754
Pavel Begunkov58f99372021-03-12 16:25:55 +00005755 /* tasks should wait for their io-wq threads, so safe w/o sync */
5756 ret = io_async_cancel_one(req->task->io_uring, sqe_addr, ctx);
5757 spin_lock_irq(&ctx->completion_lock);
5758 if (ret != -ENOENT)
5759 goto done;
5760 ret = io_timeout_cancel(ctx, sqe_addr);
5761 if (ret != -ENOENT)
5762 goto done;
Pavel Begunkov9ba5fac2021-04-14 13:38:35 +01005763 ret = io_poll_cancel(ctx, sqe_addr, false);
Pavel Begunkov58f99372021-03-12 16:25:55 +00005764 if (ret != -ENOENT)
5765 goto done;
5766 spin_unlock_irq(&ctx->completion_lock);
5767
5768 /* slow path, try all io-wq's */
5769 io_ring_submit_lock(ctx, !(issue_flags & IO_URING_F_NONBLOCK));
5770 ret = -ENOENT;
5771 list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
5772 struct io_uring_task *tctx = node->task->io_uring;
5773
Pavel Begunkov58f99372021-03-12 16:25:55 +00005774 ret = io_async_cancel_one(tctx, req->cancel.addr, ctx);
5775 if (ret != -ENOENT)
5776 break;
5777 }
5778 io_ring_submit_unlock(ctx, !(issue_flags & IO_URING_F_NONBLOCK));
5779
5780 spin_lock_irq(&ctx->completion_lock);
5781done:
Pavel Begunkovff6421642021-04-11 01:46:32 +01005782 io_cqring_fill_event(req, ret, 0);
Pavel Begunkov58f99372021-03-12 16:25:55 +00005783 io_commit_cqring(ctx);
5784 spin_unlock_irq(&ctx->completion_lock);
5785 io_cqring_ev_posted(ctx);
5786
5787 if (ret < 0)
5788 req_set_fail_links(req);
5789 io_put_req(req);
Jens Axboe62755e32019-10-28 21:49:21 -06005790 return 0;
5791}
5792
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00005793static int io_rsrc_update_prep(struct io_kiocb *req,
Jens Axboe05f3fb32019-12-09 11:22:50 -07005794 const struct io_uring_sqe *sqe)
5795{
Jens Axboe6ca56f82020-09-18 16:51:19 -06005796 if (unlikely(req->ctx->flags & IORING_SETUP_SQPOLL))
5797 return -EINVAL;
Daniele Albano61710e42020-07-18 14:15:16 -06005798 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
5799 return -EINVAL;
5800 if (sqe->ioprio || sqe->rw_flags)
Jens Axboe05f3fb32019-12-09 11:22:50 -07005801 return -EINVAL;
5802
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00005803 req->rsrc_update.offset = READ_ONCE(sqe->off);
5804 req->rsrc_update.nr_args = READ_ONCE(sqe->len);
5805 if (!req->rsrc_update.nr_args)
Jens Axboe05f3fb32019-12-09 11:22:50 -07005806 return -EINVAL;
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00005807 req->rsrc_update.arg = READ_ONCE(sqe->addr);
Jens Axboe05f3fb32019-12-09 11:22:50 -07005808 return 0;
5809}
5810
Pavel Begunkov889fca72021-02-10 00:03:09 +00005811static int io_files_update(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe05f3fb32019-12-09 11:22:50 -07005812{
5813 struct io_ring_ctx *ctx = req->ctx;
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00005814 struct io_uring_rsrc_update up;
Jens Axboe05f3fb32019-12-09 11:22:50 -07005815 int ret;
5816
Pavel Begunkov45d189c2021-02-10 00:03:07 +00005817 if (issue_flags & IO_URING_F_NONBLOCK)
Jens Axboe05f3fb32019-12-09 11:22:50 -07005818 return -EAGAIN;
Jens Axboe05f3fb32019-12-09 11:22:50 -07005819
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00005820 up.offset = req->rsrc_update.offset;
5821 up.data = req->rsrc_update.arg;
Jens Axboe05f3fb32019-12-09 11:22:50 -07005822
5823 mutex_lock(&ctx->uring_lock);
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00005824 ret = __io_sqe_files_update(ctx, &up, req->rsrc_update.nr_args);
Jens Axboe05f3fb32019-12-09 11:22:50 -07005825 mutex_unlock(&ctx->uring_lock);
5826
5827 if (ret < 0)
5828 req_set_fail_links(req);
Pavel Begunkov889fca72021-02-10 00:03:09 +00005829 __io_req_complete(req, issue_flags, ret, 0);
Jens Axboe05f3fb32019-12-09 11:22:50 -07005830 return 0;
5831}
5832
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005833static int io_req_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboef67676d2019-12-02 11:03:47 -07005834{
Jens Axboed625c6e2019-12-17 19:53:05 -07005835 switch (req->opcode) {
Jens Axboee7815732019-12-17 19:45:06 -07005836 case IORING_OP_NOP:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005837 return 0;
Jens Axboef67676d2019-12-02 11:03:47 -07005838 case IORING_OP_READV:
5839 case IORING_OP_READ_FIXED:
Jens Axboe3a6820f2019-12-22 15:19:35 -07005840 case IORING_OP_READ:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005841 return io_read_prep(req, sqe);
Jens Axboef67676d2019-12-02 11:03:47 -07005842 case IORING_OP_WRITEV:
5843 case IORING_OP_WRITE_FIXED:
Jens Axboe3a6820f2019-12-22 15:19:35 -07005844 case IORING_OP_WRITE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005845 return io_write_prep(req, sqe);
Jens Axboe0969e782019-12-17 18:40:57 -07005846 case IORING_OP_POLL_ADD:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005847 return io_poll_add_prep(req, sqe);
Jens Axboe0969e782019-12-17 18:40:57 -07005848 case IORING_OP_POLL_REMOVE:
Pavel Begunkovc5de0032021-04-14 13:38:37 +01005849 return io_poll_update_prep(req, sqe);
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07005850 case IORING_OP_FSYNC:
Pavel Begunkov1155c762021-02-18 18:29:38 +00005851 return io_fsync_prep(req, sqe);
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07005852 case IORING_OP_SYNC_FILE_RANGE:
Pavel Begunkov1155c762021-02-18 18:29:38 +00005853 return io_sfr_prep(req, sqe);
Jens Axboe03b12302019-12-02 18:50:25 -07005854 case IORING_OP_SENDMSG:
Jens Axboefddafac2020-01-04 20:19:44 -07005855 case IORING_OP_SEND:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005856 return io_sendmsg_prep(req, sqe);
Jens Axboe03b12302019-12-02 18:50:25 -07005857 case IORING_OP_RECVMSG:
Jens Axboefddafac2020-01-04 20:19:44 -07005858 case IORING_OP_RECV:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005859 return io_recvmsg_prep(req, sqe);
Jens Axboef499a022019-12-02 16:28:46 -07005860 case IORING_OP_CONNECT:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005861 return io_connect_prep(req, sqe);
Jens Axboe2d283902019-12-04 11:08:05 -07005862 case IORING_OP_TIMEOUT:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005863 return io_timeout_prep(req, sqe, false);
Jens Axboeb29472e2019-12-17 18:50:29 -07005864 case IORING_OP_TIMEOUT_REMOVE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005865 return io_timeout_remove_prep(req, sqe);
Jens Axboefbf23842019-12-17 18:45:56 -07005866 case IORING_OP_ASYNC_CANCEL:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005867 return io_async_cancel_prep(req, sqe);
Jens Axboe2d283902019-12-04 11:08:05 -07005868 case IORING_OP_LINK_TIMEOUT:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005869 return io_timeout_prep(req, sqe, true);
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07005870 case IORING_OP_ACCEPT:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005871 return io_accept_prep(req, sqe);
Jens Axboed63d1b52019-12-10 10:38:56 -07005872 case IORING_OP_FALLOCATE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005873 return io_fallocate_prep(req, sqe);
Jens Axboe15b71ab2019-12-11 11:20:36 -07005874 case IORING_OP_OPENAT:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005875 return io_openat_prep(req, sqe);
Jens Axboeb5dba592019-12-11 14:02:38 -07005876 case IORING_OP_CLOSE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005877 return io_close_prep(req, sqe);
Jens Axboe05f3fb32019-12-09 11:22:50 -07005878 case IORING_OP_FILES_UPDATE:
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00005879 return io_rsrc_update_prep(req, sqe);
Jens Axboeeddc7ef2019-12-13 21:18:10 -07005880 case IORING_OP_STATX:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005881 return io_statx_prep(req, sqe);
Jens Axboe4840e412019-12-25 22:03:45 -07005882 case IORING_OP_FADVISE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005883 return io_fadvise_prep(req, sqe);
Jens Axboec1ca7572019-12-25 22:18:28 -07005884 case IORING_OP_MADVISE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005885 return io_madvise_prep(req, sqe);
Jens Axboecebdb982020-01-08 17:59:24 -07005886 case IORING_OP_OPENAT2:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005887 return io_openat2_prep(req, sqe);
Jens Axboe3e4827b2020-01-08 15:18:09 -07005888 case IORING_OP_EPOLL_CTL:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005889 return io_epoll_ctl_prep(req, sqe);
Pavel Begunkov7d67af22020-02-24 11:32:45 +03005890 case IORING_OP_SPLICE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005891 return io_splice_prep(req, sqe);
Jens Axboeddf0322d2020-02-23 16:41:33 -07005892 case IORING_OP_PROVIDE_BUFFERS:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005893 return io_provide_buffers_prep(req, sqe);
Jens Axboe067524e2020-03-02 16:32:28 -07005894 case IORING_OP_REMOVE_BUFFERS:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005895 return io_remove_buffers_prep(req, sqe);
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03005896 case IORING_OP_TEE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005897 return io_tee_prep(req, sqe);
Jens Axboe36f4fa62020-09-05 11:14:22 -06005898 case IORING_OP_SHUTDOWN:
5899 return io_shutdown_prep(req, sqe);
Jens Axboe80a261f2020-09-28 14:23:58 -06005900 case IORING_OP_RENAMEAT:
5901 return io_renameat_prep(req, sqe);
Jens Axboe14a11432020-09-28 14:27:37 -06005902 case IORING_OP_UNLINKAT:
5903 return io_unlinkat_prep(req, sqe);
Jens Axboef67676d2019-12-02 11:03:47 -07005904 }
5905
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005906 printk_once(KERN_WARNING "io_uring: unhandled opcode %d\n",
5907 req->opcode);
5908 return-EINVAL;
5909}
5910
Pavel Begunkov93642ef2021-02-18 18:29:44 +00005911static int io_req_prep_async(struct io_kiocb *req)
Jens Axboedef596e2019-01-09 08:59:42 -07005912{
Pavel Begunkovb7e298d2021-02-28 22:35:19 +00005913 if (!io_op_defs[req->opcode].needs_async_setup)
5914 return 0;
5915 if (WARN_ON_ONCE(req->async_data))
5916 return -EFAULT;
5917 if (io_alloc_async_data(req))
5918 return -EAGAIN;
5919
Pavel Begunkov93642ef2021-02-18 18:29:44 +00005920 switch (req->opcode) {
5921 case IORING_OP_READV:
Pavel Begunkov93642ef2021-02-18 18:29:44 +00005922 return io_rw_prep_async(req, READ);
5923 case IORING_OP_WRITEV:
Pavel Begunkov93642ef2021-02-18 18:29:44 +00005924 return io_rw_prep_async(req, WRITE);
5925 case IORING_OP_SENDMSG:
Pavel Begunkov93642ef2021-02-18 18:29:44 +00005926 return io_sendmsg_prep_async(req);
5927 case IORING_OP_RECVMSG:
Pavel Begunkov93642ef2021-02-18 18:29:44 +00005928 return io_recvmsg_prep_async(req);
5929 case IORING_OP_CONNECT:
5930 return io_connect_prep_async(req);
5931 }
Pavel Begunkovb7e298d2021-02-28 22:35:19 +00005932 printk_once(KERN_WARNING "io_uring: prep_async() bad opcode %d\n",
5933 req->opcode);
5934 return -EFAULT;
Jens Axboedef596e2019-01-09 08:59:42 -07005935}
5936
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03005937static u32 io_get_sequence(struct io_kiocb *req)
5938{
5939 struct io_kiocb *pos;
5940 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkovf2f87372020-10-27 23:25:37 +00005941 u32 total_submitted, nr_reqs = 0;
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03005942
Pavel Begunkovf2f87372020-10-27 23:25:37 +00005943 io_for_each_link(pos, req)
5944 nr_reqs++;
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03005945
5946 total_submitted = ctx->cached_sq_head - ctx->cached_sq_dropped;
5947 return total_submitted - nr_reqs;
5948}
5949
Pavel Begunkovbe7053b2021-02-18 18:29:45 +00005950static int io_req_defer(struct io_kiocb *req)
Jens Axboedef596e2019-01-09 08:59:42 -07005951{
5952 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkov27dc8332020-07-13 23:37:14 +03005953 struct io_defer_entry *de;
Jens Axboedef596e2019-01-09 08:59:42 -07005954 int ret;
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03005955 u32 seq;
Jens Axboedef596e2019-01-09 08:59:42 -07005956
5957 /* Still need defer if there is pending req in defer list. */
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03005958 if (likely(list_empty_careful(&ctx->defer_list) &&
5959 !(req->flags & REQ_F_IO_DRAIN)))
5960 return 0;
5961
5962 seq = io_get_sequence(req);
5963 /* Still a chance to pass the sequence check */
5964 if (!req_need_defer(req, seq) && list_empty_careful(&ctx->defer_list))
Jens Axboedef596e2019-01-09 08:59:42 -07005965 return 0;
5966
Pavel Begunkovb7e298d2021-02-28 22:35:19 +00005967 ret = io_req_prep_async(req);
Pavel Begunkovbe7053b2021-02-18 18:29:45 +00005968 if (ret)
5969 return ret;
Pavel Begunkovcbdcb432020-06-29 19:18:43 +03005970 io_prep_async_link(req);
Pavel Begunkov27dc8332020-07-13 23:37:14 +03005971 de = kmalloc(sizeof(*de), GFP_KERNEL);
5972 if (!de)
5973 return -ENOMEM;
Jens Axboe31b51512019-01-18 22:56:34 -07005974
5975 spin_lock_irq(&ctx->completion_lock);
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03005976 if (!req_need_defer(req, seq) && list_empty(&ctx->defer_list)) {
Jens Axboe31b51512019-01-18 22:56:34 -07005977 spin_unlock_irq(&ctx->completion_lock);
Pavel Begunkov27dc8332020-07-13 23:37:14 +03005978 kfree(de);
Pavel Begunkovae348172020-07-23 20:25:20 +03005979 io_queue_async_work(req);
5980 return -EIOCBQUEUED;
Jens Axboe31b51512019-01-18 22:56:34 -07005981 }
5982
5983 trace_io_uring_defer(ctx, req, req->user_data);
Pavel Begunkov27dc8332020-07-13 23:37:14 +03005984 de->req = req;
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03005985 de->seq = seq;
Pavel Begunkov27dc8332020-07-13 23:37:14 +03005986 list_add_tail(&de->list, &ctx->defer_list);
Jens Axboe31b51512019-01-18 22:56:34 -07005987 spin_unlock_irq(&ctx->completion_lock);
5988 return -EIOCBQUEUED;
5989}
5990
Pavel Begunkov68fb8972021-03-19 17:22:41 +00005991static void io_clean_op(struct io_kiocb *req)
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03005992{
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03005993 if (req->flags & REQ_F_BUFFER_SELECTED) {
5994 switch (req->opcode) {
5995 case IORING_OP_READV:
5996 case IORING_OP_READ_FIXED:
5997 case IORING_OP_READ:
Jens Axboebcda7ba2020-02-23 16:42:51 -07005998 kfree((void *)(unsigned long)req->rw.addr);
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03005999 break;
6000 case IORING_OP_RECVMSG:
6001 case IORING_OP_RECV:
Jens Axboe52de1fe2020-02-27 10:15:42 -07006002 kfree(req->sr_msg.kbuf);
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03006003 break;
6004 }
6005 req->flags &= ~REQ_F_BUFFER_SELECTED;
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03006006 }
6007
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03006008 if (req->flags & REQ_F_NEED_CLEANUP) {
6009 switch (req->opcode) {
6010 case IORING_OP_READV:
6011 case IORING_OP_READ_FIXED:
6012 case IORING_OP_READ:
6013 case IORING_OP_WRITEV:
6014 case IORING_OP_WRITE_FIXED:
Jens Axboee8c2bc12020-08-15 18:44:09 -07006015 case IORING_OP_WRITE: {
6016 struct io_async_rw *io = req->async_data;
6017 if (io->free_iovec)
6018 kfree(io->free_iovec);
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03006019 break;
Jens Axboee8c2bc12020-08-15 18:44:09 -07006020 }
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03006021 case IORING_OP_RECVMSG:
Jens Axboee8c2bc12020-08-15 18:44:09 -07006022 case IORING_OP_SENDMSG: {
6023 struct io_async_msghdr *io = req->async_data;
Pavel Begunkov257e84a2021-02-05 00:58:00 +00006024
6025 kfree(io->free_iov);
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03006026 break;
Jens Axboee8c2bc12020-08-15 18:44:09 -07006027 }
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03006028 case IORING_OP_SPLICE:
6029 case IORING_OP_TEE:
Pavel Begunkove1d767f2021-03-19 17:22:43 +00006030 if (!(req->splice.flags & SPLICE_F_FD_IN_FIXED))
6031 io_put_file(req->splice.file_in);
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03006032 break;
Jens Axboef3cd48502020-09-24 14:55:54 -06006033 case IORING_OP_OPENAT:
6034 case IORING_OP_OPENAT2:
6035 if (req->open.filename)
6036 putname(req->open.filename);
6037 break;
Jens Axboe80a261f2020-09-28 14:23:58 -06006038 case IORING_OP_RENAMEAT:
6039 putname(req->rename.oldpath);
6040 putname(req->rename.newpath);
6041 break;
Jens Axboe14a11432020-09-28 14:27:37 -06006042 case IORING_OP_UNLINKAT:
6043 putname(req->unlink.filename);
6044 break;
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03006045 }
6046 req->flags &= ~REQ_F_NEED_CLEANUP;
6047 }
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03006048}
6049
Pavel Begunkov889fca72021-02-10 00:03:09 +00006050static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboeedafcce2019-01-09 09:16:05 -07006051{
Jens Axboeedafcce2019-01-09 09:16:05 -07006052 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe5730b272021-02-27 15:57:30 -07006053 const struct cred *creds = NULL;
Jens Axboed625c6e2019-12-17 19:53:05 -07006054 int ret;
Jens Axboeedafcce2019-01-09 09:16:05 -07006055
Jens Axboe003e8dc2021-03-06 09:22:27 -07006056 if (req->work.creds && req->work.creds != current_cred())
6057 creds = override_creds(req->work.creds);
Jens Axboe5730b272021-02-27 15:57:30 -07006058
Jens Axboed625c6e2019-12-17 19:53:05 -07006059 switch (req->opcode) {
Jens Axboe2b188cc2019-01-07 10:46:33 -07006060 case IORING_OP_NOP:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006061 ret = io_nop(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006062 break;
Jens Axboe2b188cc2019-01-07 10:46:33 -07006063 case IORING_OP_READV:
Jens Axboe3529d8c2019-12-19 18:24:38 -07006064 case IORING_OP_READ_FIXED:
Jens Axboe3a6820f2019-12-22 15:19:35 -07006065 case IORING_OP_READ:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006066 ret = io_read(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006067 break;
6068 case IORING_OP_WRITEV:
Jens Axboe2b188cc2019-01-07 10:46:33 -07006069 case IORING_OP_WRITE_FIXED:
Jens Axboe3a6820f2019-12-22 15:19:35 -07006070 case IORING_OP_WRITE:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006071 ret = io_write(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006072 break;
6073 case IORING_OP_FSYNC:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006074 ret = io_fsync(req, issue_flags);
Jackie Liuba5290c2019-10-09 09:19:59 +08006075 break;
Jens Axboe2b188cc2019-01-07 10:46:33 -07006076 case IORING_OP_POLL_ADD:
Pavel Begunkov61e98202021-02-10 00:03:08 +00006077 ret = io_poll_add(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006078 break;
6079 case IORING_OP_POLL_REMOVE:
Pavel Begunkovc5de0032021-04-14 13:38:37 +01006080 ret = io_poll_update(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006081 break;
Jens Axboeb76da702019-11-20 13:05:32 -07006082 case IORING_OP_SYNC_FILE_RANGE:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006083 ret = io_sync_file_range(req, issue_flags);
Jens Axboeb76da702019-11-20 13:05:32 -07006084 break;
6085 case IORING_OP_SENDMSG:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006086 ret = io_sendmsg(req, issue_flags);
Pavel Begunkov062d04d2020-10-10 18:34:12 +01006087 break;
Jens Axboefddafac2020-01-04 20:19:44 -07006088 case IORING_OP_SEND:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006089 ret = io_send(req, issue_flags);
Jens Axboeb76da702019-11-20 13:05:32 -07006090 break;
6091 case IORING_OP_RECVMSG:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006092 ret = io_recvmsg(req, issue_flags);
Pavel Begunkov062d04d2020-10-10 18:34:12 +01006093 break;
Jens Axboefddafac2020-01-04 20:19:44 -07006094 case IORING_OP_RECV:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006095 ret = io_recv(req, issue_flags);
Jens Axboeb76da702019-11-20 13:05:32 -07006096 break;
Jens Axboe561fb042019-10-24 07:25:42 -06006097 case IORING_OP_TIMEOUT:
Pavel Begunkov61e98202021-02-10 00:03:08 +00006098 ret = io_timeout(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006099 break;
6100 case IORING_OP_TIMEOUT_REMOVE:
Pavel Begunkov61e98202021-02-10 00:03:08 +00006101 ret = io_timeout_remove(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006102 break;
6103 case IORING_OP_ACCEPT:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006104 ret = io_accept(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006105 break;
6106 case IORING_OP_CONNECT:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006107 ret = io_connect(req, issue_flags);
Jens Axboe31b51512019-01-18 22:56:34 -07006108 break;
6109 case IORING_OP_ASYNC_CANCEL:
Pavel Begunkov61e98202021-02-10 00:03:08 +00006110 ret = io_async_cancel(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006111 break;
Jens Axboed63d1b52019-12-10 10:38:56 -07006112 case IORING_OP_FALLOCATE:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006113 ret = io_fallocate(req, issue_flags);
Jens Axboed63d1b52019-12-10 10:38:56 -07006114 break;
Jens Axboe15b71ab2019-12-11 11:20:36 -07006115 case IORING_OP_OPENAT:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006116 ret = io_openat(req, issue_flags);
Jens Axboe15b71ab2019-12-11 11:20:36 -07006117 break;
Jens Axboeb5dba592019-12-11 14:02:38 -07006118 case IORING_OP_CLOSE:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006119 ret = io_close(req, issue_flags);
Jens Axboeb5dba592019-12-11 14:02:38 -07006120 break;
Jens Axboe05f3fb32019-12-09 11:22:50 -07006121 case IORING_OP_FILES_UPDATE:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006122 ret = io_files_update(req, issue_flags);
Jens Axboe05f3fb32019-12-09 11:22:50 -07006123 break;
Jens Axboeeddc7ef2019-12-13 21:18:10 -07006124 case IORING_OP_STATX:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006125 ret = io_statx(req, issue_flags);
Jens Axboeeddc7ef2019-12-13 21:18:10 -07006126 break;
Jens Axboe4840e412019-12-25 22:03:45 -07006127 case IORING_OP_FADVISE:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006128 ret = io_fadvise(req, issue_flags);
Jens Axboe4840e412019-12-25 22:03:45 -07006129 break;
Jens Axboec1ca7572019-12-25 22:18:28 -07006130 case IORING_OP_MADVISE:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006131 ret = io_madvise(req, issue_flags);
Jens Axboec1ca7572019-12-25 22:18:28 -07006132 break;
Jens Axboecebdb982020-01-08 17:59:24 -07006133 case IORING_OP_OPENAT2:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006134 ret = io_openat2(req, issue_flags);
Jens Axboecebdb982020-01-08 17:59:24 -07006135 break;
Jens Axboe3e4827b2020-01-08 15:18:09 -07006136 case IORING_OP_EPOLL_CTL:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006137 ret = io_epoll_ctl(req, issue_flags);
Jens Axboe3e4827b2020-01-08 15:18:09 -07006138 break;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03006139 case IORING_OP_SPLICE:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006140 ret = io_splice(req, issue_flags);
Pavel Begunkov7d67af22020-02-24 11:32:45 +03006141 break;
Jens Axboeddf0322d2020-02-23 16:41:33 -07006142 case IORING_OP_PROVIDE_BUFFERS:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006143 ret = io_provide_buffers(req, issue_flags);
Jens Axboeddf0322d2020-02-23 16:41:33 -07006144 break;
Jens Axboe067524e2020-03-02 16:32:28 -07006145 case IORING_OP_REMOVE_BUFFERS:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006146 ret = io_remove_buffers(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006147 break;
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03006148 case IORING_OP_TEE:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006149 ret = io_tee(req, issue_flags);
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03006150 break;
Jens Axboe36f4fa62020-09-05 11:14:22 -06006151 case IORING_OP_SHUTDOWN:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006152 ret = io_shutdown(req, issue_flags);
Jens Axboe36f4fa62020-09-05 11:14:22 -06006153 break;
Jens Axboe80a261f2020-09-28 14:23:58 -06006154 case IORING_OP_RENAMEAT:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006155 ret = io_renameat(req, issue_flags);
Jens Axboe80a261f2020-09-28 14:23:58 -06006156 break;
Jens Axboe14a11432020-09-28 14:27:37 -06006157 case IORING_OP_UNLINKAT:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006158 ret = io_unlinkat(req, issue_flags);
Jens Axboe14a11432020-09-28 14:27:37 -06006159 break;
Jens Axboe2b188cc2019-01-07 10:46:33 -07006160 default:
6161 ret = -EINVAL;
6162 break;
6163 }
Jens Axboe31b51512019-01-18 22:56:34 -07006164
Jens Axboe5730b272021-02-27 15:57:30 -07006165 if (creds)
6166 revert_creds(creds);
6167
Jens Axboe2b188cc2019-01-07 10:46:33 -07006168 if (ret)
6169 return ret;
6170
Jens Axboeb5325762020-05-19 21:20:27 -06006171 /* If the op doesn't have a file, we're not polling for it */
6172 if ((ctx->flags & IORING_SETUP_IOPOLL) && req->file) {
Jens Axboe11ba8202020-01-15 21:51:17 -07006173 const bool in_async = io_wq_current_is_worker();
6174
Jens Axboe11ba8202020-01-15 21:51:17 -07006175 /* workqueue context doesn't hold uring_lock, grab it now */
6176 if (in_async)
6177 mutex_lock(&ctx->uring_lock);
6178
Xiaoguang Wang2e9dbe92020-11-13 00:44:08 +08006179 io_iopoll_req_issued(req, in_async);
Jens Axboe11ba8202020-01-15 21:51:17 -07006180
6181 if (in_async)
6182 mutex_unlock(&ctx->uring_lock);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006183 }
6184
6185 return 0;
6186}
6187
Pavel Begunkov5280f7e2021-02-04 13:52:08 +00006188static void io_wq_submit_work(struct io_wq_work *work)
Pavel Begunkovd4c81f32020-06-08 21:08:19 +03006189{
Jens Axboe2b188cc2019-01-07 10:46:33 -07006190 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
Pavel Begunkov6df1db62020-07-03 22:15:06 +03006191 struct io_kiocb *timeout;
Jens Axboe561fb042019-10-24 07:25:42 -06006192 int ret = 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07006193
Pavel Begunkov6df1db62020-07-03 22:15:06 +03006194 timeout = io_prep_linked_timeout(req);
6195 if (timeout)
6196 io_queue_linked_timeout(timeout);
Pavel Begunkovd4c81f32020-06-08 21:08:19 +03006197
Jens Axboe4014d942021-01-19 15:53:54 -07006198 if (work->flags & IO_WQ_WORK_CANCEL)
Jens Axboe561fb042019-10-24 07:25:42 -06006199 ret = -ECANCELED;
Jens Axboe31b51512019-01-18 22:56:34 -07006200
Jens Axboe561fb042019-10-24 07:25:42 -06006201 if (!ret) {
Jens Axboe561fb042019-10-24 07:25:42 -06006202 do {
Pavel Begunkov889fca72021-02-10 00:03:09 +00006203 ret = io_issue_sqe(req, 0);
Jens Axboe561fb042019-10-24 07:25:42 -06006204 /*
6205 * We can get EAGAIN for polled IO even though we're
6206 * forcing a sync submission from here, since we can't
6207 * wait for request slots on the block side.
6208 */
6209 if (ret != -EAGAIN)
6210 break;
6211 cond_resched();
6212 } while (1);
6213 }
Jens Axboe31b51512019-01-18 22:56:34 -07006214
Pavel Begunkova3df76982021-02-18 22:32:52 +00006215 /* avoid locking problems by failing it from a clean context */
Jens Axboe561fb042019-10-24 07:25:42 -06006216 if (ret) {
Pavel Begunkova3df76982021-02-18 22:32:52 +00006217 /* io-wq is going to take one down */
Jens Axboede9b4cc2021-02-24 13:28:27 -07006218 req_ref_get(req);
Pavel Begunkova3df76982021-02-18 22:32:52 +00006219 io_req_task_queue_fail(req, ret);
Jens Axboeedafcce2019-01-09 09:16:05 -07006220 }
Jens Axboe31b51512019-01-18 22:56:34 -07006221}
Jens Axboe2b188cc2019-01-07 10:46:33 -07006222
Jens Axboe7b29f922021-03-12 08:30:14 -07006223#define FFS_ASYNC_READ 0x1UL
6224#define FFS_ASYNC_WRITE 0x2UL
6225#ifdef CONFIG_64BIT
6226#define FFS_ISREG 0x4UL
6227#else
6228#define FFS_ISREG 0x0UL
6229#endif
6230#define FFS_MASK ~(FFS_ASYNC_READ|FFS_ASYNC_WRITE|FFS_ISREG)
6231
Pavel Begunkovaeca2412021-04-11 01:46:37 +01006232static inline struct io_fixed_file *io_fixed_file_slot(struct io_file_table *table,
Pavel Begunkova04b0ac2021-04-01 15:44:04 +01006233 unsigned i)
Jens Axboe09bb8392019-03-13 12:39:28 -06006234{
Pavel Begunkovaeca2412021-04-11 01:46:37 +01006235 struct io_fixed_file *table_l2;
Jens Axboe65e19f52019-10-26 07:20:21 -06006236
Pavel Begunkovaeca2412021-04-11 01:46:37 +01006237 table_l2 = table->files[i >> IORING_FILE_TABLE_SHIFT];
6238 return &table_l2[i & IORING_FILE_TABLE_MASK];
Pavel Begunkovdafecf12021-02-28 22:35:11 +00006239}
6240
6241static inline struct file *io_file_from_index(struct io_ring_ctx *ctx,
6242 int index)
6243{
Pavel Begunkovaeca2412021-04-11 01:46:37 +01006244 struct io_fixed_file *slot = io_fixed_file_slot(&ctx->file_table, index);
Jens Axboe7b29f922021-03-12 08:30:14 -07006245
Pavel Begunkova04b0ac2021-04-01 15:44:04 +01006246 return (struct file *) (slot->file_ptr & FFS_MASK);
Jens Axboe65e19f52019-10-26 07:20:21 -06006247}
6248
Pavel Begunkova04b0ac2021-04-01 15:44:04 +01006249static void io_fixed_file_set(struct io_fixed_file *file_slot, struct file *file)
Pavel Begunkov9a321c92021-04-01 15:44:01 +01006250{
6251 unsigned long file_ptr = (unsigned long) file;
6252
6253 if (__io_file_supports_async(file, READ))
6254 file_ptr |= FFS_ASYNC_READ;
6255 if (__io_file_supports_async(file, WRITE))
6256 file_ptr |= FFS_ASYNC_WRITE;
6257 if (S_ISREG(file_inode(file)->i_mode))
6258 file_ptr |= FFS_ISREG;
Pavel Begunkova04b0ac2021-04-01 15:44:04 +01006259 file_slot->file_ptr = file_ptr;
Pavel Begunkov9a321c92021-04-01 15:44:01 +01006260}
6261
Pavel Begunkov8371adf2020-10-10 18:34:08 +01006262static struct file *io_file_get(struct io_submit_state *state,
6263 struct io_kiocb *req, int fd, bool fixed)
Pavel Begunkov8da11c12020-02-24 11:32:44 +03006264{
6265 struct io_ring_ctx *ctx = req->ctx;
6266 struct file *file;
6267
6268 if (fixed) {
Jens Axboe7b29f922021-03-12 08:30:14 -07006269 unsigned long file_ptr;
6270
Pavel Begunkov479f5172020-10-10 18:34:07 +01006271 if (unlikely((unsigned int)fd >= ctx->nr_user_files))
Pavel Begunkov8371adf2020-10-10 18:34:08 +01006272 return NULL;
Pavel Begunkov8da11c12020-02-24 11:32:44 +03006273 fd = array_index_nospec(fd, ctx->nr_user_files);
Pavel Begunkovaeca2412021-04-11 01:46:37 +01006274 file_ptr = io_fixed_file_slot(&ctx->file_table, fd)->file_ptr;
Jens Axboe7b29f922021-03-12 08:30:14 -07006275 file = (struct file *) (file_ptr & FFS_MASK);
6276 file_ptr &= ~FFS_MASK;
6277 /* mask in overlapping REQ_F and FFS bits */
6278 req->flags |= (file_ptr << REQ_F_ASYNC_READ_BIT);
Pavel Begunkovb895c9a2021-04-01 15:43:40 +01006279 io_req_set_rsrc_node(req);
Pavel Begunkov8da11c12020-02-24 11:32:44 +03006280 } else {
6281 trace_io_uring_file_get(ctx, fd);
6282 file = __io_file_get(state, fd);
Jens Axboed44f5542021-03-12 08:27:05 -07006283
6284 /* we don't allow fixed io_uring files */
6285 if (file && unlikely(file->f_op == &io_uring_fops))
6286 io_req_track_inflight(req);
Pavel Begunkov8da11c12020-02-24 11:32:44 +03006287 }
6288
Pavel Begunkov8371adf2020-10-10 18:34:08 +01006289 return file;
Pavel Begunkov8da11c12020-02-24 11:32:44 +03006290}
6291
Jens Axboe2665abf2019-11-05 12:40:47 -07006292static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
6293{
Jens Axboead8a48a2019-11-15 08:49:11 -07006294 struct io_timeout_data *data = container_of(timer,
6295 struct io_timeout_data, timer);
Pavel Begunkov90cd7e42020-10-27 23:25:36 +00006296 struct io_kiocb *prev, *req = data->req;
Jens Axboe2665abf2019-11-05 12:40:47 -07006297 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe2665abf2019-11-05 12:40:47 -07006298 unsigned long flags;
Jens Axboe2665abf2019-11-05 12:40:47 -07006299
6300 spin_lock_irqsave(&ctx->completion_lock, flags);
Pavel Begunkov90cd7e42020-10-27 23:25:36 +00006301 prev = req->timeout.head;
6302 req->timeout.head = NULL;
Jens Axboe2665abf2019-11-05 12:40:47 -07006303
6304 /*
6305 * We don't expect the list to be empty, that will only happen if we
6306 * race with the completion of the linked work.
6307 */
Jens Axboede9b4cc2021-02-24 13:28:27 -07006308 if (prev && req_ref_inc_not_zero(prev))
Pavel Begunkovf2f87372020-10-27 23:25:37 +00006309 io_remove_next_linked(prev);
Pavel Begunkov90cd7e42020-10-27 23:25:36 +00006310 else
6311 prev = NULL;
Jens Axboe2665abf2019-11-05 12:40:47 -07006312 spin_unlock_irqrestore(&ctx->completion_lock, flags);
6313
6314 if (prev) {
Pavel Begunkov014db002020-03-03 21:33:12 +03006315 io_async_find_and_cancel(ctx, req, prev->user_data, -ETIME);
Pavel Begunkov9ae1f8d2021-02-01 18:59:51 +00006316 io_put_req_deferred(prev, 1);
Jens Axboe47f46762019-11-09 17:43:02 -07006317 } else {
Pavel Begunkov9ae1f8d2021-02-01 18:59:51 +00006318 io_req_complete_post(req, -ETIME, 0);
Jens Axboe2665abf2019-11-05 12:40:47 -07006319 }
Pavel Begunkovdf9727a2021-04-01 15:43:59 +01006320 io_put_req_deferred(req, 1);
Jens Axboe2665abf2019-11-05 12:40:47 -07006321 return HRTIMER_NORESTART;
6322}
6323
Pavel Begunkovde968c12021-03-19 17:22:33 +00006324static void io_queue_linked_timeout(struct io_kiocb *req)
Jens Axboe2665abf2019-11-05 12:40:47 -07006325{
Pavel Begunkovde968c12021-03-19 17:22:33 +00006326 struct io_ring_ctx *ctx = req->ctx;
6327
6328 spin_lock_irq(&ctx->completion_lock);
Jens Axboe76a46e02019-11-10 23:34:16 -07006329 /*
Pavel Begunkovf2f87372020-10-27 23:25:37 +00006330 * If the back reference is NULL, then our linked request finished
6331 * before we got a chance to setup the timer
Jens Axboe76a46e02019-11-10 23:34:16 -07006332 */
Pavel Begunkov90cd7e42020-10-27 23:25:36 +00006333 if (req->timeout.head) {
Jens Axboee8c2bc12020-08-15 18:44:09 -07006334 struct io_timeout_data *data = req->async_data;
Jens Axboe94ae5e72019-11-14 19:39:52 -07006335
Jens Axboead8a48a2019-11-15 08:49:11 -07006336 data->timer.function = io_link_timeout_fn;
6337 hrtimer_start(&data->timer, timespec64_to_ktime(data->ts),
6338 data->mode);
Jens Axboe2665abf2019-11-05 12:40:47 -07006339 }
Jens Axboe76a46e02019-11-10 23:34:16 -07006340 spin_unlock_irq(&ctx->completion_lock);
Jens Axboe2665abf2019-11-05 12:40:47 -07006341 /* drop submission reference */
Jens Axboe76a46e02019-11-10 23:34:16 -07006342 io_put_req(req);
Jens Axboe2665abf2019-11-05 12:40:47 -07006343}
6344
Jens Axboead8a48a2019-11-15 08:49:11 -07006345static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req)
Jens Axboe2665abf2019-11-05 12:40:47 -07006346{
Pavel Begunkovf2f87372020-10-27 23:25:37 +00006347 struct io_kiocb *nxt = req->link;
Jens Axboe2b188cc2019-01-07 10:46:33 -07006348
Pavel Begunkovf2f87372020-10-27 23:25:37 +00006349 if (!nxt || (req->flags & REQ_F_LINK_TIMEOUT) ||
6350 nxt->opcode != IORING_OP_LINK_TIMEOUT)
Jens Axboed7718a92020-02-14 22:23:12 -07006351 return NULL;
Jens Axboe2665abf2019-11-05 12:40:47 -07006352
Pavel Begunkov90cd7e42020-10-27 23:25:36 +00006353 nxt->timeout.head = req;
Pavel Begunkov900fad42020-10-19 16:39:16 +01006354 nxt->flags |= REQ_F_LTIMEOUT_ACTIVE;
Jens Axboe76a46e02019-11-10 23:34:16 -07006355 req->flags |= REQ_F_LINK_TIMEOUT;
Jens Axboe76a46e02019-11-10 23:34:16 -07006356 return nxt;
Jens Axboe2665abf2019-11-05 12:40:47 -07006357}
6358
Pavel Begunkovc5eef2b2021-02-10 00:03:22 +00006359static void __io_queue_sqe(struct io_kiocb *req)
Jens Axboe2b188cc2019-01-07 10:46:33 -07006360{
Pavel Begunkovd3d72982021-02-12 03:23:51 +00006361 struct io_kiocb *linked_timeout = io_prep_linked_timeout(req);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006362 int ret;
6363
Pavel Begunkovc5eef2b2021-02-10 00:03:22 +00006364 ret = io_issue_sqe(req, IO_URING_F_NONBLOCK|IO_URING_F_COMPLETE_DEFER);
Jens Axboe491381ce2019-10-17 09:20:46 -06006365
6366 /*
6367 * We async punt it if the file wasn't marked NOWAIT, or if the file
6368 * doesn't support non-blocking read/write attempts
6369 */
Pavel Begunkov18400382021-03-19 17:22:34 +00006370 if (likely(!ret)) {
Pavel Begunkov0d63c142020-10-22 16:47:18 +01006371 /* drop submission reference */
Pavel Begunkove342c802021-01-19 13:32:47 +00006372 if (req->flags & REQ_F_COMPLETE_INLINE) {
Pavel Begunkovc5eef2b2021-02-10 00:03:22 +00006373 struct io_ring_ctx *ctx = req->ctx;
6374 struct io_comp_state *cs = &ctx->submit_state.comp;
Jens Axboee65ef562019-03-12 10:16:44 -06006375
Pavel Begunkov6dd0be12021-02-10 00:03:13 +00006376 cs->reqs[cs->nr++] = req;
Pavel Begunkovd3d72982021-02-12 03:23:51 +00006377 if (cs->nr == ARRAY_SIZE(cs->reqs))
Pavel Begunkovc5eef2b2021-02-10 00:03:22 +00006378 io_submit_flush_completions(cs, ctx);
Pavel Begunkov9affd662021-01-19 13:32:46 +00006379 } else {
Pavel Begunkovd3d72982021-02-12 03:23:51 +00006380 io_put_req(req);
Pavel Begunkov0d63c142020-10-22 16:47:18 +01006381 }
Pavel Begunkov18400382021-03-19 17:22:34 +00006382 } else if (ret == -EAGAIN && !(req->flags & REQ_F_NOWAIT)) {
6383 if (!io_arm_poll_handler(req)) {
6384 /*
6385 * Queued up for async execution, worker will release
6386 * submit reference when the iocb is actually submitted.
6387 */
6388 io_queue_async_work(req);
6389 }
Pavel Begunkov0d63c142020-10-22 16:47:18 +01006390 } else {
Pavel Begunkovf41db2732021-02-28 22:35:12 +00006391 io_req_complete_failed(req, ret);
Jens Axboe9e645e112019-05-10 16:07:28 -06006392 }
Pavel Begunkovd3d72982021-02-12 03:23:51 +00006393 if (linked_timeout)
6394 io_queue_linked_timeout(linked_timeout);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006395}
6396
Pavel Begunkovbe7053b2021-02-18 18:29:45 +00006397static void io_queue_sqe(struct io_kiocb *req)
Jackie Liu4fe2c962019-09-09 20:50:40 +08006398{
6399 int ret;
6400
Pavel Begunkovbe7053b2021-02-18 18:29:45 +00006401 ret = io_req_defer(req);
Jackie Liu4fe2c962019-09-09 20:50:40 +08006402 if (ret) {
6403 if (ret != -EIOCBQUEUED) {
Pavel Begunkov11185912020-01-22 23:09:35 +03006404fail_req:
Pavel Begunkovf41db2732021-02-28 22:35:12 +00006405 io_req_complete_failed(req, ret);
Jackie Liu4fe2c962019-09-09 20:50:40 +08006406 }
Pavel Begunkov25508782019-12-30 21:24:47 +03006407 } else if (req->flags & REQ_F_FORCE_ASYNC) {
Pavel Begunkovb7e298d2021-02-28 22:35:19 +00006408 ret = io_req_prep_async(req);
Pavel Begunkovbe7053b2021-02-18 18:29:45 +00006409 if (unlikely(ret))
6410 goto fail_req;
Jens Axboece35a472019-12-17 08:04:44 -07006411 io_queue_async_work(req);
6412 } else {
Pavel Begunkovc5eef2b2021-02-10 00:03:22 +00006413 __io_queue_sqe(req);
Jens Axboece35a472019-12-17 08:04:44 -07006414 }
Jackie Liu4fe2c962019-09-09 20:50:40 +08006415}
6416
Stefano Garzarella21b55db2020-08-27 16:58:30 +02006417/*
6418 * Check SQE restrictions (opcode and flags).
6419 *
6420 * Returns 'true' if SQE is allowed, 'false' otherwise.
6421 */
6422static inline bool io_check_restriction(struct io_ring_ctx *ctx,
6423 struct io_kiocb *req,
6424 unsigned int sqe_flags)
6425{
6426 if (!ctx->restricted)
6427 return true;
6428
6429 if (!test_bit(req->opcode, ctx->restrictions.sqe_op))
6430 return false;
6431
6432 if ((sqe_flags & ctx->restrictions.sqe_flags_required) !=
6433 ctx->restrictions.sqe_flags_required)
6434 return false;
6435
6436 if (sqe_flags & ~(ctx->restrictions.sqe_flags_allowed |
6437 ctx->restrictions.sqe_flags_required))
6438 return false;
6439
6440 return true;
6441}
6442
Pavel Begunkovef4ff582020-04-12 02:05:05 +03006443static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
Pavel Begunkov258b29a2021-02-10 00:03:10 +00006444 const struct io_uring_sqe *sqe)
Pavel Begunkov0553b8b2020-04-08 08:58:45 +03006445{
Pavel Begunkov258b29a2021-02-10 00:03:10 +00006446 struct io_submit_state *state;
Pavel Begunkovef4ff582020-04-12 02:05:05 +03006447 unsigned int sqe_flags;
Jens Axboe003e8dc2021-03-06 09:22:27 -07006448 int personality, ret = 0;
Pavel Begunkovef4ff582020-04-12 02:05:05 +03006449
Pavel Begunkov0553b8b2020-04-08 08:58:45 +03006450 req->opcode = READ_ONCE(sqe->opcode);
Pavel Begunkov5be9ad12021-02-12 18:41:17 +00006451 /* same numerical values with corresponding REQ_F_*, safe to copy */
6452 req->flags = sqe_flags = READ_ONCE(sqe->flags);
Pavel Begunkov0553b8b2020-04-08 08:58:45 +03006453 req->user_data = READ_ONCE(sqe->user_data);
Jens Axboee8c2bc12020-08-15 18:44:09 -07006454 req->async_data = NULL;
Pavel Begunkov0553b8b2020-04-08 08:58:45 +03006455 req->file = NULL;
6456 req->ctx = ctx;
Pavel Begunkovf2f87372020-10-27 23:25:37 +00006457 req->link = NULL;
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00006458 req->fixed_rsrc_refs = NULL;
Pavel Begunkov0553b8b2020-04-08 08:58:45 +03006459 /* one is dropped after submission, the other at completion */
Jens Axboeabc54d62021-02-24 13:32:30 -07006460 atomic_set(&req->refs, 2);
Pavel Begunkov4dd28242020-06-15 10:33:13 +03006461 req->task = current;
Pavel Begunkov0553b8b2020-04-08 08:58:45 +03006462 req->result = 0;
Jens Axboe93e68e02021-03-09 07:02:21 -07006463 req->work.creds = NULL;
Pavel Begunkovef4ff582020-04-12 02:05:05 +03006464
Pavel Begunkov5be9ad12021-02-12 18:41:17 +00006465 /* enforce forwards compatibility on users */
Pavel Begunkovebf4a5d2021-02-20 01:39:53 +00006466 if (unlikely(sqe_flags & ~SQE_VALID_FLAGS)) {
6467 req->flags = 0;
Pavel Begunkov5be9ad12021-02-12 18:41:17 +00006468 return -EINVAL;
Pavel Begunkovebf4a5d2021-02-20 01:39:53 +00006469 }
Pavel Begunkov5be9ad12021-02-12 18:41:17 +00006470
Pavel Begunkovef4ff582020-04-12 02:05:05 +03006471 if (unlikely(req->opcode >= IORING_OP_LAST))
6472 return -EINVAL;
6473
Stefano Garzarella21b55db2020-08-27 16:58:30 +02006474 if (unlikely(!io_check_restriction(ctx, req, sqe_flags)))
6475 return -EACCES;
6476
Pavel Begunkovef4ff582020-04-12 02:05:05 +03006477 if ((sqe_flags & IOSQE_BUFFER_SELECT) &&
6478 !io_op_defs[req->opcode].buffer_select)
6479 return -EOPNOTSUPP;
6480
Jens Axboe003e8dc2021-03-06 09:22:27 -07006481 personality = READ_ONCE(sqe->personality);
6482 if (personality) {
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +00006483 req->work.creds = xa_load(&ctx->personalities, personality);
Jens Axboe003e8dc2021-03-06 09:22:27 -07006484 if (!req->work.creds)
6485 return -EINVAL;
6486 get_cred(req->work.creds);
Jens Axboe003e8dc2021-03-06 09:22:27 -07006487 }
Pavel Begunkov258b29a2021-02-10 00:03:10 +00006488 state = &ctx->submit_state;
Pavel Begunkovef4ff582020-04-12 02:05:05 +03006489
Jens Axboe27926b62020-10-28 09:33:23 -06006490 /*
6491 * Plug now if we have more than 1 IO left after this, and the target
6492 * is potentially a read/write to block based storage.
6493 */
6494 if (!state->plug_started && state->ios_left > 1 &&
6495 io_op_defs[req->opcode].plug) {
6496 blk_start_plug(&state->plug);
6497 state->plug_started = true;
6498 }
Jens Axboe63ff8222020-05-07 14:56:15 -06006499
Pavel Begunkovbd5bbda2020-11-20 15:50:51 +00006500 if (io_op_defs[req->opcode].needs_file) {
6501 bool fixed = req->flags & REQ_F_FIXED_FILE;
Jens Axboe63ff8222020-05-07 14:56:15 -06006502
Pavel Begunkovbd5bbda2020-11-20 15:50:51 +00006503 req->file = io_file_get(state, req, READ_ONCE(sqe->fd), fixed);
Pavel Begunkovba13e232021-02-01 18:59:52 +00006504 if (unlikely(!req->file))
Pavel Begunkovbd5bbda2020-11-20 15:50:51 +00006505 ret = -EBADF;
6506 }
6507
Pavel Begunkov71b547c2020-10-10 18:34:09 +01006508 state->ios_left--;
6509 return ret;
Pavel Begunkov0553b8b2020-04-08 08:58:45 +03006510}
6511
Pavel Begunkova6b8cadc2021-02-18 18:29:41 +00006512static int io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
Pavel Begunkova1ab7b32021-02-18 18:29:42 +00006513 const struct io_uring_sqe *sqe)
Jens Axboe6c271ce2019-01-10 11:22:30 -07006514{
Pavel Begunkova1ab7b32021-02-18 18:29:42 +00006515 struct io_submit_link *link = &ctx->submit_state.link;
Jens Axboe6c271ce2019-01-10 11:22:30 -07006516 int ret;
6517
Pavel Begunkova6b8cadc2021-02-18 18:29:41 +00006518 ret = io_init_req(ctx, req, sqe);
6519 if (unlikely(ret)) {
6520fail_req:
Pavel Begunkovde59bc12021-02-18 18:29:47 +00006521 if (link->head) {
6522 /* fail even hard links since we don't submit */
Pavel Begunkovcf109602021-02-18 18:29:43 +00006523 link->head->flags |= REQ_F_FAIL_LINK;
Pavel Begunkovf41db2732021-02-28 22:35:12 +00006524 io_req_complete_failed(link->head, -ECANCELED);
Pavel Begunkovde59bc12021-02-18 18:29:47 +00006525 link->head = NULL;
6526 }
Pavel Begunkovf41db2732021-02-28 22:35:12 +00006527 io_req_complete_failed(req, ret);
Pavel Begunkova6b8cadc2021-02-18 18:29:41 +00006528 return ret;
6529 }
Pavel Begunkovbe7053b2021-02-18 18:29:45 +00006530 ret = io_req_prep(req, sqe);
6531 if (unlikely(ret))
6532 goto fail_req;
Pavel Begunkova6b8cadc2021-02-18 18:29:41 +00006533
Pavel Begunkovbe7053b2021-02-18 18:29:45 +00006534 /* don't need @sqe from now on */
Pavel Begunkova6b8cadc2021-02-18 18:29:41 +00006535 trace_io_uring_submit_sqe(ctx, req->opcode, req->user_data,
6536 true, ctx->flags & IORING_SETUP_SQPOLL);
6537
Jens Axboe6c271ce2019-01-10 11:22:30 -07006538 /*
6539 * If we already have a head request, queue this one for async
6540 * submittal once the head completes. If we don't have a head but
6541 * IOSQE_IO_LINK is set in the sqe, start a new head. This one will be
6542 * submitted sync once the chain is complete. If none of those
6543 * conditions are true (normal request), then just queue it.
6544 */
6545 if (link->head) {
6546 struct io_kiocb *head = link->head;
6547
6548 /*
6549 * Taking sequential execution of a link, draining both sides
6550 * of the link also fullfils IOSQE_IO_DRAIN semantics for all
6551 * requests in the link. So, it drains the head and the
6552 * next after the link request. The last one is done via
6553 * drain_next flag to persist the effect across calls.
6554 */
6555 if (req->flags & REQ_F_IO_DRAIN) {
6556 head->flags |= REQ_F_IO_DRAIN;
6557 ctx->drain_next = 1;
6558 }
Pavel Begunkovb7e298d2021-02-28 22:35:19 +00006559 ret = io_req_prep_async(req);
Pavel Begunkovcf109602021-02-18 18:29:43 +00006560 if (unlikely(ret))
Pavel Begunkova6b8cadc2021-02-18 18:29:41 +00006561 goto fail_req;
Jens Axboe6c271ce2019-01-10 11:22:30 -07006562 trace_io_uring_link(ctx, req, head);
6563 link->last->link = req;
6564 link->last = req;
6565
6566 /* last request of a link, enqueue the link */
6567 if (!(req->flags & (REQ_F_LINK | REQ_F_HARDLINK))) {
Pavel Begunkovde59bc12021-02-18 18:29:47 +00006568 io_queue_sqe(head);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006569 link->head = NULL;
6570 }
Jackie Liu4fe2c962019-09-09 20:50:40 +08006571 } else {
6572 if (unlikely(ctx->drain_next)) {
6573 req->flags |= REQ_F_IO_DRAIN;
6574 ctx->drain_next = 0;
6575 }
6576 if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) {
Jackie Liu4fe2c962019-09-09 20:50:40 +08006577 link->head = req;
6578 link->last = req;
6579 } else {
Pavel Begunkovbe7053b2021-02-18 18:29:45 +00006580 io_queue_sqe(req);
Jackie Liu4fe2c962019-09-09 20:50:40 +08006581 }
6582 }
6583
6584 return 0;
6585}
6586
6587/*
6588 * Batched submission is done, ensure local IO is flushed out.
6589 */
6590static void io_submit_state_end(struct io_submit_state *state,
6591 struct io_ring_ctx *ctx)
Pavel Begunkov1b4a51b2019-11-21 11:54:28 +03006592{
Pavel Begunkova1ab7b32021-02-18 18:29:42 +00006593 if (state->link.head)
Pavel Begunkovde59bc12021-02-18 18:29:47 +00006594 io_queue_sqe(state->link.head);
Jens Axboe3529d8c2019-12-19 18:24:38 -07006595 if (state->comp.nr)
Jens Axboe9e645e112019-05-10 16:07:28 -06006596 io_submit_flush_completions(&state->comp, ctx);
Jackie Liua197f662019-11-08 08:09:12 -07006597 if (state->plug_started)
Pavel Begunkov32fe5252019-12-17 22:26:58 +03006598 blk_finish_plug(&state->plug);
Jens Axboe75c6a032020-01-28 10:15:23 -07006599 io_state_file_put(state);
Jens Axboe9e645e112019-05-10 16:07:28 -06006600}
Pavel Begunkov32fe5252019-12-17 22:26:58 +03006601
Jens Axboe9e645e112019-05-10 16:07:28 -06006602/*
6603 * Start submission side cache.
Pavel Begunkov32fe5252019-12-17 22:26:58 +03006604 */
Jens Axboe9e645e112019-05-10 16:07:28 -06006605static void io_submit_state_start(struct io_submit_state *state,
Pavel Begunkov196be952019-11-07 01:41:06 +03006606 unsigned int max_ios)
Jens Axboe9e645e112019-05-10 16:07:28 -06006607{
6608 state->plug_started = false;
Jens Axboebcda7ba2020-02-23 16:42:51 -07006609 state->ios_left = max_ios;
Pavel Begunkova1ab7b32021-02-18 18:29:42 +00006610 /* set only head, no need to init link_last in advance */
6611 state->link.head = NULL;
Jens Axboe75c6a032020-01-28 10:15:23 -07006612}
6613
Jens Axboe193155c2020-02-22 23:22:19 -07006614static void io_commit_sqring(struct io_ring_ctx *ctx)
6615{
Jens Axboe75c6a032020-01-28 10:15:23 -07006616 struct io_rings *rings = ctx->rings;
6617
6618 /*
Jens Axboe193155c2020-02-22 23:22:19 -07006619 * Ensure any loads from the SQEs are done at this point,
Jens Axboe75c6a032020-01-28 10:15:23 -07006620 * since once we write the new head, the application could
6621 * write new data to them.
Pavel Begunkov6b47ee62020-01-18 20:22:41 +03006622 */
Pavel Begunkov8da11c12020-02-24 11:32:44 +03006623 smp_store_release(&rings->sq.head, ctx->cached_sq_head);
Jens Axboebcda7ba2020-02-23 16:42:51 -07006624}
6625
Jens Axboe9e645e112019-05-10 16:07:28 -06006626/*
Jens Axboe3529d8c2019-12-19 18:24:38 -07006627 * Fetch an sqe, if one is available. Note that sqe_ptr will point to memory
Jens Axboe9e645e112019-05-10 16:07:28 -06006628 * that is mapped by userspace. This means that care needs to be taken to
6629 * ensure that reads are stable, as we cannot rely on userspace always
Jens Axboe78e19bb2019-11-06 15:21:34 -07006630 * being a good citizen. If members of the sqe are validated and then later
6631 * used, it's important that those reads are done through READ_ONCE() to
Pavel Begunkov2e6e1fd2019-12-05 16:15:45 +03006632 * prevent a re-load down the line.
Jens Axboe9e645e112019-05-10 16:07:28 -06006633 */
6634static const struct io_uring_sqe *io_get_sqe(struct io_ring_ctx *ctx)
Jens Axboe9e645e112019-05-10 16:07:28 -06006635{
6636 u32 *sq_array = ctx->sq_array;
6637 unsigned head;
6638
6639 /*
6640 * The cached sq head (or cq tail) serves two purposes:
6641 *
6642 * 1) allows us to batch the cost of updating the user visible
Pavel Begunkov9d763772019-12-17 02:22:07 +03006643 * head updates.
Jens Axboe9e645e112019-05-10 16:07:28 -06006644 * 2) allows the kernel side to track the head on its own, even
Pavel Begunkov8cdf2192020-01-25 00:40:24 +03006645 * though the application is the one updating it.
6646 */
6647 head = READ_ONCE(sq_array[ctx->cached_sq_head++ & ctx->sq_mask]);
6648 if (likely(head < ctx->sq_entries))
6649 return &ctx->sq_sqes[head];
6650
6651 /* drop invalid entries */
Pavel Begunkov711be032020-01-17 03:57:59 +03006652 ctx->cached_sq_dropped++;
6653 WRITE_ONCE(ctx->rings->sq_dropped, ctx->cached_sq_dropped);
6654 return NULL;
6655}
Jens Axboeb7bb4f72019-12-15 22:13:43 -07006656
Jens Axboe0f212202020-09-13 13:09:39 -06006657static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr)
Jens Axboe6c271ce2019-01-10 11:22:30 -07006658{
Pavel Begunkov46c4e162021-02-18 18:29:37 +00006659 int submitted = 0;
Jens Axboe6c271ce2019-01-10 11:22:30 -07006660
Jens Axboec4a2ed72019-11-21 21:01:26 -07006661 /* if we have a backlog and couldn't flush it all, return BUSY */
Jens Axboead3eb2c2019-12-18 17:12:20 -07006662 if (test_bit(0, &ctx->sq_check_overflow)) {
Pavel Begunkov6c2450a2021-02-23 12:40:22 +00006663 if (!__io_cqring_overflow_flush(ctx, false))
Jens Axboead3eb2c2019-12-18 17:12:20 -07006664 return -EBUSY;
6665 }
Jens Axboe6c271ce2019-01-10 11:22:30 -07006666
Pavel Begunkovee7d46d2019-12-30 21:24:45 +03006667 /* make sure SQ entry isn't read before tail */
6668 nr = min3(nr, ctx->sq_entries, io_sqring_entries(ctx));
Pavel Begunkov9ef4f122019-12-30 21:24:44 +03006669
Pavel Begunkov2b85edf2019-12-28 14:13:03 +03006670 if (!percpu_ref_tryget_many(&ctx->refs, nr))
6671 return -EAGAIN;
Jens Axboe6c271ce2019-01-10 11:22:30 -07006672
Jens Axboed8a6df12020-10-15 16:24:45 -06006673 percpu_counter_add(&current->io_uring->inflight, nr);
Jens Axboefaf7b512020-10-07 12:48:53 -06006674 refcount_add(nr, &current->usage);
Pavel Begunkovba88ff12021-02-10 00:03:11 +00006675 io_submit_state_start(&ctx->submit_state, nr);
Pavel Begunkovb14cca02020-01-17 04:45:59 +03006676
Pavel Begunkov46c4e162021-02-18 18:29:37 +00006677 while (submitted < nr) {
Jens Axboe3529d8c2019-12-19 18:24:38 -07006678 const struct io_uring_sqe *sqe;
Pavel Begunkov196be952019-11-07 01:41:06 +03006679 struct io_kiocb *req;
Pavel Begunkovfb5ccc92019-10-25 12:31:30 +03006680
Pavel Begunkov258b29a2021-02-10 00:03:10 +00006681 req = io_alloc_req(ctx);
Pavel Begunkov196be952019-11-07 01:41:06 +03006682 if (unlikely(!req)) {
6683 if (!submitted)
6684 submitted = -EAGAIN;
Pavel Begunkovfb5ccc92019-10-25 12:31:30 +03006685 break;
Jens Axboe9e645e112019-05-10 16:07:28 -06006686 }
Pavel Begunkov4fccfcb2021-02-12 11:55:17 +00006687 sqe = io_get_sqe(ctx);
6688 if (unlikely(!sqe)) {
6689 kmem_cache_free(req_cachep, req);
6690 break;
6691 }
Jens Axboed3656342019-12-18 09:50:26 -07006692 /* will complete beyond this point, count as submitted */
6693 submitted++;
Pavel Begunkova1ab7b32021-02-18 18:29:42 +00006694 if (io_submit_sqe(ctx, req, sqe))
Jens Axboed3656342019-12-18 09:50:26 -07006695 break;
Jens Axboe6c271ce2019-01-10 11:22:30 -07006696 }
6697
Pavel Begunkov9466f432020-01-25 22:34:01 +03006698 if (unlikely(submitted != nr)) {
6699 int ref_used = (submitted == -EAGAIN) ? 0 : submitted;
Jens Axboed8a6df12020-10-15 16:24:45 -06006700 struct io_uring_task *tctx = current->io_uring;
6701 int unused = nr - ref_used;
Pavel Begunkov9466f432020-01-25 22:34:01 +03006702
Jens Axboed8a6df12020-10-15 16:24:45 -06006703 percpu_ref_put_many(&ctx->refs, unused);
6704 percpu_counter_sub(&tctx->inflight, unused);
6705 put_task_struct_many(current, unused);
Pavel Begunkov9466f432020-01-25 22:34:01 +03006706 }
Jens Axboe6c271ce2019-01-10 11:22:30 -07006707
Pavel Begunkova1ab7b32021-02-18 18:29:42 +00006708 io_submit_state_end(&ctx->submit_state, ctx);
Pavel Begunkovae9428c2019-11-06 00:22:14 +03006709 /* Commit SQ ring head once we've consumed and submitted all SQEs */
6710 io_commit_sqring(ctx);
6711
Jens Axboe6c271ce2019-01-10 11:22:30 -07006712 return submitted;
6713}
6714
Xiaoguang Wang23b36282020-07-23 20:57:24 +08006715static inline void io_ring_set_wakeup_flag(struct io_ring_ctx *ctx)
6716{
6717 /* Tell userspace we may need a wakeup call */
6718 spin_lock_irq(&ctx->completion_lock);
6719 ctx->rings->sq_flags |= IORING_SQ_NEED_WAKEUP;
6720 spin_unlock_irq(&ctx->completion_lock);
6721}
6722
6723static inline void io_ring_clear_wakeup_flag(struct io_ring_ctx *ctx)
6724{
6725 spin_lock_irq(&ctx->completion_lock);
6726 ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP;
6727 spin_unlock_irq(&ctx->completion_lock);
6728}
6729
Xiaoguang Wang08369242020-11-03 14:15:59 +08006730static int __io_sq_thread(struct io_ring_ctx *ctx, bool cap_entries)
Jens Axboe6c271ce2019-01-10 11:22:30 -07006731{
Jens Axboec8d1ba52020-09-14 11:07:26 -06006732 unsigned int to_submit;
Xiaoguang Wangbdcd3ea2020-02-25 22:12:08 +08006733 int ret = 0;
Jens Axboe6c271ce2019-01-10 11:22:30 -07006734
Jens Axboec8d1ba52020-09-14 11:07:26 -06006735 to_submit = io_sqring_entries(ctx);
Jens Axboee95eee22020-09-08 09:11:32 -06006736 /* if we're handling multiple rings, cap submit size for fairness */
6737 if (cap_entries && to_submit > 8)
6738 to_submit = 8;
6739
Xiaoguang Wang906a3c62020-11-12 14:56:00 +08006740 if (!list_empty(&ctx->iopoll_list) || to_submit) {
6741 unsigned nr_events = 0;
6742
Xiaoguang Wang08369242020-11-03 14:15:59 +08006743 mutex_lock(&ctx->uring_lock);
Xiaoguang Wang906a3c62020-11-12 14:56:00 +08006744 if (!list_empty(&ctx->iopoll_list))
6745 io_do_iopoll(ctx, &nr_events, 0);
6746
Pavel Begunkov0298ef92021-03-08 13:20:57 +00006747 if (to_submit && likely(!percpu_ref_is_dying(&ctx->refs)) &&
6748 !(ctx->flags & IORING_SETUP_R_DISABLED))
Xiaoguang Wang08369242020-11-03 14:15:59 +08006749 ret = io_submit_sqes(ctx, to_submit);
6750 mutex_unlock(&ctx->uring_lock);
6751 }
Jens Axboe90554202020-09-03 12:12:41 -06006752
6753 if (!io_sqring_full(ctx) && wq_has_sleeper(&ctx->sqo_sq_wait))
6754 wake_up(&ctx->sqo_sq_wait);
6755
Xiaoguang Wang08369242020-11-03 14:15:59 +08006756 return ret;
6757}
6758
6759static void io_sqd_update_thread_idle(struct io_sq_data *sqd)
6760{
6761 struct io_ring_ctx *ctx;
6762 unsigned sq_thread_idle = 0;
6763
Pavel Begunkovc9dca272021-03-10 13:13:55 +00006764 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
6765 sq_thread_idle = max(sq_thread_idle, ctx->sq_thread_idle);
Xiaoguang Wang08369242020-11-03 14:15:59 +08006766 sqd->sq_thread_idle = sq_thread_idle;
Jens Axboec8d1ba52020-09-14 11:07:26 -06006767}
6768
Jens Axboe6c271ce2019-01-10 11:22:30 -07006769static int io_sq_thread(void *data)
6770{
Jens Axboe69fb2132020-09-14 11:16:23 -06006771 struct io_sq_data *sqd = data;
6772 struct io_ring_ctx *ctx;
Xiaoguang Wanga0d92052020-11-12 14:55:59 +08006773 unsigned long timeout = 0;
Jens Axboe37d1e2e2021-02-17 21:03:43 -07006774 char buf[TASK_COMM_LEN];
Xiaoguang Wang08369242020-11-03 14:15:59 +08006775 DEFINE_WAIT(wait);
Jens Axboe6c271ce2019-01-10 11:22:30 -07006776
Pavel Begunkov696ee882021-04-01 09:55:04 +01006777 snprintf(buf, sizeof(buf), "iou-sqp-%d", sqd->task_pid);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07006778 set_task_comm(current, buf);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07006779 current->pf_io_worker = NULL;
Jens Axboe28cea78a2020-09-14 10:51:17 -06006780
Jens Axboe37d1e2e2021-02-17 21:03:43 -07006781 if (sqd->sq_cpu != -1)
6782 set_cpus_allowed_ptr(current, cpumask_of(sqd->sq_cpu));
6783 else
6784 set_cpus_allowed_ptr(current, cpu_online_mask);
6785 current->flags |= PF_NO_SETAFFINITY;
6786
Pavel Begunkov09a6f4e2021-03-14 20:57:10 +00006787 mutex_lock(&sqd->lock);
Jens Axboe05962f92021-03-06 13:58:48 -07006788 while (!test_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state)) {
Xiaoguang Wang08369242020-11-03 14:15:59 +08006789 int ret;
6790 bool cap_entries, sqt_spin, needs_sched;
Jens Axboec1edbf52019-11-10 16:56:04 -07006791
Jens Axboe82734c52021-03-29 06:52:44 -06006792 if (test_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state) ||
6793 signal_pending(current)) {
6794 bool did_sig = false;
6795
Pavel Begunkov09a6f4e2021-03-14 20:57:10 +00006796 mutex_unlock(&sqd->lock);
Jens Axboe82734c52021-03-29 06:52:44 -06006797 if (signal_pending(current)) {
6798 struct ksignal ksig;
6799
6800 did_sig = get_signal(&ksig);
6801 }
Jens Axboe05962f92021-03-06 13:58:48 -07006802 cond_resched();
Pavel Begunkov09a6f4e2021-03-14 20:57:10 +00006803 mutex_lock(&sqd->lock);
Jens Axboe82734c52021-03-29 06:52:44 -06006804 if (did_sig)
6805 break;
Pavel Begunkov521d6a72021-03-11 23:29:38 +00006806 io_run_task_work();
Pavel Begunkovb7f5a0b2021-03-15 14:23:08 +00006807 io_run_task_work_head(&sqd->park_task_work);
Xiaoguang Wang08369242020-11-03 14:15:59 +08006808 timeout = jiffies + sqd->sq_thread_idle;
Pavel Begunkov7d41e852021-03-10 13:13:54 +00006809 continue;
Xiaoguang Wang08369242020-11-03 14:15:59 +08006810 }
Xiaoguang Wang08369242020-11-03 14:15:59 +08006811 sqt_spin = false;
Jens Axboee95eee22020-09-08 09:11:32 -06006812 cap_entries = !list_is_singular(&sqd->ctx_list);
Jens Axboe69fb2132020-09-14 11:16:23 -06006813 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) {
Stefan Metzmacher7c30f36a2021-03-07 11:54:28 +01006814 const struct cred *creds = NULL;
6815
6816 if (ctx->sq_creds != current_cred())
6817 creds = override_creds(ctx->sq_creds);
Xiaoguang Wang08369242020-11-03 14:15:59 +08006818 ret = __io_sq_thread(ctx, cap_entries);
Stefan Metzmacher7c30f36a2021-03-07 11:54:28 +01006819 if (creds)
6820 revert_creds(creds);
Xiaoguang Wang08369242020-11-03 14:15:59 +08006821 if (!sqt_spin && (ret > 0 || !list_empty(&ctx->iopoll_list)))
6822 sqt_spin = true;
Jens Axboe6c271ce2019-01-10 11:22:30 -07006823 }
6824
Xiaoguang Wang08369242020-11-03 14:15:59 +08006825 if (sqt_spin || !time_after(jiffies, timeout)) {
Jens Axboec8d1ba52020-09-14 11:07:26 -06006826 io_run_task_work();
6827 cond_resched();
Xiaoguang Wang08369242020-11-03 14:15:59 +08006828 if (sqt_spin)
6829 timeout = jiffies + sqd->sq_thread_idle;
6830 continue;
6831 }
6832
Xiaoguang Wang08369242020-11-03 14:15:59 +08006833 needs_sched = true;
6834 prepare_to_wait(&sqd->wait, &wait, TASK_INTERRUPTIBLE);
6835 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) {
6836 if ((ctx->flags & IORING_SETUP_IOPOLL) &&
6837 !list_empty_careful(&ctx->iopoll_list)) {
6838 needs_sched = false;
6839 break;
6840 }
6841 if (io_sqring_entries(ctx)) {
6842 needs_sched = false;
6843 break;
6844 }
6845 }
6846
Jens Axboe05962f92021-03-06 13:58:48 -07006847 if (needs_sched && !test_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state)) {
Jens Axboe69fb2132020-09-14 11:16:23 -06006848 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
6849 io_ring_set_wakeup_flag(ctx);
Xiaoguang Wang08369242020-11-03 14:15:59 +08006850
Pavel Begunkov09a6f4e2021-03-14 20:57:10 +00006851 mutex_unlock(&sqd->lock);
Jens Axboe69fb2132020-09-14 11:16:23 -06006852 schedule();
Pavel Begunkov09a6f4e2021-03-14 20:57:10 +00006853 mutex_lock(&sqd->lock);
Jens Axboe69fb2132020-09-14 11:16:23 -06006854 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
6855 io_ring_clear_wakeup_flag(ctx);
Jens Axboe6c271ce2019-01-10 11:22:30 -07006856 }
Xiaoguang Wang08369242020-11-03 14:15:59 +08006857
6858 finish_wait(&sqd->wait, &wait);
Pavel Begunkovb7f5a0b2021-03-15 14:23:08 +00006859 io_run_task_work_head(&sqd->park_task_work);
Xiaoguang Wang08369242020-11-03 14:15:59 +08006860 timeout = jiffies + sqd->sq_thread_idle;
Jens Axboe6c271ce2019-01-10 11:22:30 -07006861 }
6862
Jens Axboe37d1e2e2021-02-17 21:03:43 -07006863 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
6864 io_uring_cancel_sqpoll(ctx);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07006865 sqd->thread = NULL;
Jens Axboe05962f92021-03-06 13:58:48 -07006866 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
Jens Axboe5f3f26f2021-02-25 10:17:46 -07006867 io_ring_set_wakeup_flag(ctx);
Pavel Begunkov09a6f4e2021-03-14 20:57:10 +00006868 mutex_unlock(&sqd->lock);
Pavel Begunkov521d6a72021-03-11 23:29:38 +00006869
6870 io_run_task_work();
Pavel Begunkovb7f5a0b2021-03-15 14:23:08 +00006871 io_run_task_work_head(&sqd->park_task_work);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07006872 complete(&sqd->exited);
6873 do_exit(0);
Jens Axboe6c271ce2019-01-10 11:22:30 -07006874}
6875
Jens Axboebda52162019-09-24 13:47:15 -06006876struct io_wait_queue {
6877 struct wait_queue_entry wq;
6878 struct io_ring_ctx *ctx;
6879 unsigned to_wait;
6880 unsigned nr_timeouts;
6881};
6882
Pavel Begunkov6c503152021-01-04 20:36:36 +00006883static inline bool io_should_wake(struct io_wait_queue *iowq)
Jens Axboebda52162019-09-24 13:47:15 -06006884{
6885 struct io_ring_ctx *ctx = iowq->ctx;
6886
6887 /*
Brian Gianforcarod195a662019-12-13 03:09:50 -08006888 * Wake up if we have enough events, or if a timeout occurred since we
Jens Axboebda52162019-09-24 13:47:15 -06006889 * started waiting. For timeouts, we always want to return to userspace,
6890 * regardless of event count.
6891 */
Pavel Begunkov6c503152021-01-04 20:36:36 +00006892 return io_cqring_events(ctx) >= iowq->to_wait ||
Jens Axboebda52162019-09-24 13:47:15 -06006893 atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts;
6894}
6895
6896static int io_wake_function(struct wait_queue_entry *curr, unsigned int mode,
6897 int wake_flags, void *key)
6898{
6899 struct io_wait_queue *iowq = container_of(curr, struct io_wait_queue,
6900 wq);
6901
Pavel Begunkov6c503152021-01-04 20:36:36 +00006902 /*
6903 * Cannot safely flush overflowed CQEs from here, ensure we wake up
6904 * the task, and the next invocation will do it.
6905 */
6906 if (io_should_wake(iowq) || test_bit(0, &iowq->ctx->cq_check_overflow))
6907 return autoremove_wake_function(curr, mode, wake_flags, key);
6908 return -1;
Jens Axboebda52162019-09-24 13:47:15 -06006909}
6910
Jens Axboeaf9c1a42020-09-24 13:32:18 -06006911static int io_run_task_work_sig(void)
6912{
6913 if (io_run_task_work())
6914 return 1;
6915 if (!signal_pending(current))
6916 return 0;
Jens Axboe0b8cfa92021-03-21 14:16:08 -06006917 if (test_thread_flag(TIF_NOTIFY_SIGNAL))
Jens Axboe792ee0f62020-10-22 20:17:18 -06006918 return -ERESTARTSYS;
Jens Axboeaf9c1a42020-09-24 13:32:18 -06006919 return -EINTR;
6920}
6921
Pavel Begunkoveeb60b92021-02-04 13:51:58 +00006922/* when returns >0, the caller should retry */
6923static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
6924 struct io_wait_queue *iowq,
6925 signed long *timeout)
6926{
6927 int ret;
6928
6929 /* make sure we run task_work before checking for signals */
6930 ret = io_run_task_work_sig();
6931 if (ret || io_should_wake(iowq))
6932 return ret;
6933 /* let the caller flush overflows, retry */
6934 if (test_bit(0, &ctx->cq_check_overflow))
6935 return 1;
6936
6937 *timeout = schedule_timeout(*timeout);
6938 return !*timeout ? -ETIME : 1;
6939}
6940
Jens Axboe2b188cc2019-01-07 10:46:33 -07006941/*
6942 * Wait until events become available, if we don't already have some. The
6943 * application must reap them itself, as they reside on the shared cq ring.
6944 */
6945static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
Hao Xuc73ebb62020-11-03 10:54:37 +08006946 const sigset_t __user *sig, size_t sigsz,
6947 struct __kernel_timespec __user *uts)
Jens Axboe2b188cc2019-01-07 10:46:33 -07006948{
Jens Axboebda52162019-09-24 13:47:15 -06006949 struct io_wait_queue iowq = {
6950 .wq = {
6951 .private = current,
6952 .func = io_wake_function,
6953 .entry = LIST_HEAD_INIT(iowq.wq.entry),
6954 },
6955 .ctx = ctx,
6956 .to_wait = min_events,
6957 };
Hristo Venev75b28af2019-08-26 17:23:46 +00006958 struct io_rings *rings = ctx->rings;
Pavel Begunkovc1d5a222021-02-04 13:51:57 +00006959 signed long timeout = MAX_SCHEDULE_TIMEOUT;
6960 int ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07006961
Jens Axboeb41e9852020-02-17 09:52:41 -07006962 do {
Pavel Begunkov6c2450a2021-02-23 12:40:22 +00006963 io_cqring_overflow_flush(ctx, false);
Pavel Begunkov6c503152021-01-04 20:36:36 +00006964 if (io_cqring_events(ctx) >= min_events)
Jens Axboeb41e9852020-02-17 09:52:41 -07006965 return 0;
Jens Axboe4c6e2772020-07-01 11:29:10 -06006966 if (!io_run_task_work())
Jens Axboeb41e9852020-02-17 09:52:41 -07006967 break;
Jens Axboeb41e9852020-02-17 09:52:41 -07006968 } while (1);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006969
6970 if (sig) {
Arnd Bergmann9e75ad52019-03-25 15:34:53 +01006971#ifdef CONFIG_COMPAT
6972 if (in_compat_syscall())
6973 ret = set_compat_user_sigmask((const compat_sigset_t __user *)sig,
Oleg Nesterovb7724342019-07-16 16:29:53 -07006974 sigsz);
Arnd Bergmann9e75ad52019-03-25 15:34:53 +01006975 else
6976#endif
Oleg Nesterovb7724342019-07-16 16:29:53 -07006977 ret = set_user_sigmask(sig, sigsz);
Arnd Bergmann9e75ad52019-03-25 15:34:53 +01006978
Jens Axboe2b188cc2019-01-07 10:46:33 -07006979 if (ret)
6980 return ret;
6981 }
6982
Hao Xuc73ebb62020-11-03 10:54:37 +08006983 if (uts) {
Pavel Begunkovc1d5a222021-02-04 13:51:57 +00006984 struct timespec64 ts;
6985
Hao Xuc73ebb62020-11-03 10:54:37 +08006986 if (get_timespec64(&ts, uts))
6987 return -EFAULT;
6988 timeout = timespec64_to_jiffies(&ts);
6989 }
6990
Jens Axboebda52162019-09-24 13:47:15 -06006991 iowq.nr_timeouts = atomic_read(&ctx->cq_timeouts);
Dmitrii Dolgovc826bd72019-10-15 19:02:01 +02006992 trace_io_uring_cqring_wait(ctx, min_events);
Jens Axboebda52162019-09-24 13:47:15 -06006993 do {
Jens Axboeca0a2652021-03-04 17:15:48 -07006994 /* if we can't even flush overflow, don't wait for more */
Pavel Begunkov6c2450a2021-02-23 12:40:22 +00006995 if (!io_cqring_overflow_flush(ctx, false)) {
Jens Axboeca0a2652021-03-04 17:15:48 -07006996 ret = -EBUSY;
6997 break;
6998 }
Jens Axboebda52162019-09-24 13:47:15 -06006999 prepare_to_wait_exclusive(&ctx->wait, &iowq.wq,
7000 TASK_INTERRUPTIBLE);
Pavel Begunkoveeb60b92021-02-04 13:51:58 +00007001 ret = io_cqring_wait_schedule(ctx, &iowq, &timeout);
7002 finish_wait(&ctx->wait, &iowq.wq);
Jens Axboeca0a2652021-03-04 17:15:48 -07007003 cond_resched();
Pavel Begunkoveeb60b92021-02-04 13:51:58 +00007004 } while (ret > 0);
Jens Axboebda52162019-09-24 13:47:15 -06007005
Jens Axboeb7db41c2020-07-04 08:55:50 -06007006 restore_saved_sigmask_unless(ret == -EINTR);
Jens Axboe2b188cc2019-01-07 10:46:33 -07007007
Hristo Venev75b28af2019-08-26 17:23:46 +00007008 return READ_ONCE(rings->cq.head) == READ_ONCE(rings->cq.tail) ? ret : 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07007009}
7010
Pavel Begunkovaeca2412021-04-11 01:46:37 +01007011static void io_free_file_tables(struct io_file_table *table, unsigned nr_files)
Pavel Begunkov846a4ef2021-04-01 15:44:03 +01007012{
7013 unsigned i, nr_tables = DIV_ROUND_UP(nr_files, IORING_MAX_FILES_TABLE);
7014
7015 for (i = 0; i < nr_tables; i++)
Pavel Begunkovaeca2412021-04-11 01:46:37 +01007016 kfree(table->files[i]);
7017 kfree(table->files);
7018 table->files = NULL;
Pavel Begunkov846a4ef2021-04-01 15:44:03 +01007019}
7020
Jens Axboe6b063142019-01-10 22:13:58 -07007021static void __io_sqe_files_unregister(struct io_ring_ctx *ctx)
7022{
7023#if defined(CONFIG_UNIX)
7024 if (ctx->ring_sock) {
7025 struct sock *sock = ctx->ring_sock->sk;
7026 struct sk_buff *skb;
7027
7028 while ((skb = skb_dequeue(&sock->sk_receive_queue)) != NULL)
7029 kfree_skb(skb);
7030 }
7031#else
7032 int i;
7033
Jens Axboe65e19f52019-10-26 07:20:21 -06007034 for (i = 0; i < ctx->nr_user_files; i++) {
7035 struct file *file;
7036
7037 file = io_file_from_index(ctx, i);
7038 if (file)
7039 fput(file);
7040 }
Jens Axboe6b063142019-01-10 22:13:58 -07007041#endif
Pavel Begunkov08480402021-04-13 02:58:38 +01007042 io_free_file_tables(&ctx->file_table, ctx->nr_user_files);
7043 kfree(ctx->file_data);
7044 ctx->file_data = NULL;
7045 ctx->nr_user_files = 0;
Jens Axboe6b063142019-01-10 22:13:58 -07007046}
7047
Bijan Mottahedeh2a63b2d2021-01-15 17:37:47 +00007048static inline void io_rsrc_ref_lock(struct io_ring_ctx *ctx)
Pavel Begunkov1642b442020-12-30 21:34:14 +00007049{
Bijan Mottahedeh2a63b2d2021-01-15 17:37:47 +00007050 spin_lock_bh(&ctx->rsrc_ref_lock);
Pavel Begunkov1642b442020-12-30 21:34:14 +00007051}
7052
Bijan Mottahedeh2a63b2d2021-01-15 17:37:47 +00007053static inline void io_rsrc_ref_unlock(struct io_ring_ctx *ctx)
Jens Axboe6b063142019-01-10 22:13:58 -07007054{
Bijan Mottahedeh2a63b2d2021-01-15 17:37:47 +00007055 spin_unlock_bh(&ctx->rsrc_ref_lock);
7056}
7057
Pavel Begunkov28a9fe22021-04-01 15:43:47 +01007058static void io_rsrc_node_destroy(struct io_rsrc_node *ref_node)
7059{
7060 percpu_ref_exit(&ref_node->refs);
7061 kfree(ref_node);
7062}
7063
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01007064static void io_rsrc_node_switch(struct io_ring_ctx *ctx,
7065 struct io_rsrc_data *data_to_kill)
Jens Axboe6b063142019-01-10 22:13:58 -07007066{
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01007067 WARN_ON_ONCE(!ctx->rsrc_backup_node);
7068 WARN_ON_ONCE(data_to_kill && !ctx->rsrc_node);
Pavel Begunkov82fbcfa2021-04-01 15:43:43 +01007069
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01007070 if (data_to_kill) {
7071 struct io_rsrc_node *rsrc_node = ctx->rsrc_node;
Pavel Begunkov82fbcfa2021-04-01 15:43:43 +01007072
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01007073 rsrc_node->rsrc_data = data_to_kill;
7074 io_rsrc_ref_lock(ctx);
7075 list_add_tail(&rsrc_node->node, &ctx->rsrc_ref_list);
7076 io_rsrc_ref_unlock(ctx);
Pavel Begunkov82fbcfa2021-04-01 15:43:43 +01007077
Pavel Begunkov3e942492021-04-11 01:46:34 +01007078 atomic_inc(&data_to_kill->refs);
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01007079 percpu_ref_kill(&rsrc_node->refs);
7080 ctx->rsrc_node = NULL;
7081 }
7082
7083 if (!ctx->rsrc_node) {
7084 ctx->rsrc_node = ctx->rsrc_backup_node;
7085 ctx->rsrc_backup_node = NULL;
7086 }
Jens Axboe6b063142019-01-10 22:13:58 -07007087}
7088
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01007089static int io_rsrc_node_switch_start(struct io_ring_ctx *ctx)
Pavel Begunkov8dd03af2021-03-19 17:22:36 +00007090{
7091 if (ctx->rsrc_backup_node)
7092 return 0;
Pavel Begunkovb895c9a2021-04-01 15:43:40 +01007093 ctx->rsrc_backup_node = io_rsrc_node_alloc(ctx);
Pavel Begunkov8dd03af2021-03-19 17:22:36 +00007094 return ctx->rsrc_backup_node ? 0 : -ENOMEM;
7095}
7096
Pavel Begunkov40ae0ff2021-04-01 15:43:44 +01007097static int io_rsrc_ref_quiesce(struct io_rsrc_data *data, struct io_ring_ctx *ctx)
Hao Xu8bad28d2021-02-19 17:19:36 +08007098{
7099 int ret;
Xiaoguang Wang05589552020-03-31 14:05:18 +08007100
Pavel Begunkov215c3902021-04-01 15:43:48 +01007101 /* As we may drop ->uring_lock, other task may have started quiesce */
Hao Xu8bad28d2021-02-19 17:19:36 +08007102 if (data->quiesce)
7103 return -ENXIO;
7104
7105 data->quiesce = true;
Pavel Begunkov1ffc5422020-12-30 21:34:15 +00007106 do {
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01007107 ret = io_rsrc_node_switch_start(ctx);
Pavel Begunkov8dd03af2021-03-19 17:22:36 +00007108 if (ret)
Pavel Begunkovf2303b12021-02-20 18:03:49 +00007109 break;
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01007110 io_rsrc_node_switch(ctx, data);
7111
Pavel Begunkov3e942492021-04-11 01:46:34 +01007112 /* kill initial ref, already quiesced if zero */
7113 if (atomic_dec_and_test(&data->refs))
7114 break;
Hao Xu8bad28d2021-02-19 17:19:36 +08007115 flush_delayed_work(&ctx->rsrc_put_work);
Pavel Begunkov1ffc5422020-12-30 21:34:15 +00007116 ret = wait_for_completion_interruptible(&data->done);
7117 if (!ret)
7118 break;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007119
Pavel Begunkov3e942492021-04-11 01:46:34 +01007120 atomic_inc(&data->refs);
7121 /* wait for all works potentially completing data->done */
7122 flush_delayed_work(&ctx->rsrc_put_work);
Jens Axboecb5e1b82021-02-25 07:37:35 -07007123 reinit_completion(&data->done);
Pavel Begunkov8dd03af2021-03-19 17:22:36 +00007124
Hao Xu8bad28d2021-02-19 17:19:36 +08007125 mutex_unlock(&ctx->uring_lock);
7126 ret = io_run_task_work_sig();
7127 mutex_lock(&ctx->uring_lock);
Pavel Begunkovf2303b12021-02-20 18:03:49 +00007128 } while (ret >= 0);
Hao Xu8bad28d2021-02-19 17:19:36 +08007129 data->quiesce = false;
7130
Hao Xu8bad28d2021-02-19 17:19:36 +08007131 return ret;
Bijan Mottahedehd7954b22021-01-15 17:37:50 +00007132}
7133
Pavel Begunkov40ae0ff2021-04-01 15:43:44 +01007134static struct io_rsrc_data *io_rsrc_data_alloc(struct io_ring_ctx *ctx,
7135 rsrc_put_fn *do_put)
Bijan Mottahedeh1ad555c2021-01-15 17:37:51 +00007136{
Pavel Begunkovb895c9a2021-04-01 15:43:40 +01007137 struct io_rsrc_data *data;
Bijan Mottahedeh1ad555c2021-01-15 17:37:51 +00007138
7139 data = kzalloc(sizeof(*data), GFP_KERNEL);
7140 if (!data)
7141 return NULL;
7142
Pavel Begunkov3e942492021-04-11 01:46:34 +01007143 atomic_set(&data->refs, 1);
Bijan Mottahedeh1ad555c2021-01-15 17:37:51 +00007144 data->ctx = ctx;
Pavel Begunkov40ae0ff2021-04-01 15:43:44 +01007145 data->do_put = do_put;
Bijan Mottahedeh1ad555c2021-01-15 17:37:51 +00007146 init_completion(&data->done);
7147 return data;
7148}
7149
Bijan Mottahedehd7954b22021-01-15 17:37:50 +00007150static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
7151{
Bijan Mottahedehd7954b22021-01-15 17:37:50 +00007152 int ret;
7153
Pavel Begunkov08480402021-04-13 02:58:38 +01007154 if (!ctx->file_data)
Bijan Mottahedehd7954b22021-01-15 17:37:50 +00007155 return -ENXIO;
Pavel Begunkov08480402021-04-13 02:58:38 +01007156 ret = io_rsrc_ref_quiesce(ctx->file_data, ctx);
7157 if (!ret)
7158 __io_sqe_files_unregister(ctx);
7159 return ret;
Jens Axboe6b063142019-01-10 22:13:58 -07007160}
7161
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007162static void io_sq_thread_unpark(struct io_sq_data *sqd)
Pavel Begunkov09a6f4e2021-03-14 20:57:10 +00007163 __releases(&sqd->lock)
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007164{
Pavel Begunkov521d6a72021-03-11 23:29:38 +00007165 WARN_ON_ONCE(sqd->thread == current);
7166
Pavel Begunkov9e138a42021-03-14 20:57:12 +00007167 /*
7168 * Do the dance but not conditional clear_bit() because it'd race with
7169 * other threads incrementing park_pending and setting the bit.
7170 */
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007171 clear_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state);
Pavel Begunkov9e138a42021-03-14 20:57:12 +00007172 if (atomic_dec_return(&sqd->park_pending))
7173 set_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state);
Pavel Begunkov09a6f4e2021-03-14 20:57:10 +00007174 mutex_unlock(&sqd->lock);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007175}
7176
Jens Axboe86e0d672021-03-05 08:44:39 -07007177static void io_sq_thread_park(struct io_sq_data *sqd)
Pavel Begunkov09a6f4e2021-03-14 20:57:10 +00007178 __acquires(&sqd->lock)
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007179{
Pavel Begunkov521d6a72021-03-11 23:29:38 +00007180 WARN_ON_ONCE(sqd->thread == current);
7181
Pavel Begunkov9e138a42021-03-14 20:57:12 +00007182 atomic_inc(&sqd->park_pending);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007183 set_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state);
Pavel Begunkov09a6f4e2021-03-14 20:57:10 +00007184 mutex_lock(&sqd->lock);
Jens Axboe05962f92021-03-06 13:58:48 -07007185 if (sqd->thread)
Jens Axboe86e0d672021-03-05 08:44:39 -07007186 wake_up_process(sqd->thread);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007187}
7188
7189static void io_sq_thread_stop(struct io_sq_data *sqd)
7190{
Pavel Begunkov521d6a72021-03-11 23:29:38 +00007191 WARN_ON_ONCE(sqd->thread == current);
Pavel Begunkov88885f62021-04-11 01:46:38 +01007192 WARN_ON_ONCE(test_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state));
Pavel Begunkov521d6a72021-03-11 23:29:38 +00007193
Jens Axboe05962f92021-03-06 13:58:48 -07007194 set_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state);
Pavel Begunkov88885f62021-04-11 01:46:38 +01007195 mutex_lock(&sqd->lock);
Jens Axboee8f98f242021-03-09 16:32:13 -07007196 if (sqd->thread)
7197 wake_up_process(sqd->thread);
Pavel Begunkov09a6f4e2021-03-14 20:57:10 +00007198 mutex_unlock(&sqd->lock);
Jens Axboe05962f92021-03-06 13:58:48 -07007199 wait_for_completion(&sqd->exited);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007200}
7201
Jens Axboe534ca6d2020-09-02 13:52:19 -06007202static void io_put_sq_data(struct io_sq_data *sqd)
Jens Axboe6c271ce2019-01-10 11:22:30 -07007203{
Jens Axboe534ca6d2020-09-02 13:52:19 -06007204 if (refcount_dec_and_test(&sqd->refs)) {
Pavel Begunkov9e138a42021-03-14 20:57:12 +00007205 WARN_ON_ONCE(atomic_read(&sqd->park_pending));
7206
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007207 io_sq_thread_stop(sqd);
7208 kfree(sqd);
7209 }
7210}
7211
7212static void io_sq_thread_finish(struct io_ring_ctx *ctx)
7213{
7214 struct io_sq_data *sqd = ctx->sq_data;
7215
7216 if (sqd) {
Jens Axboe05962f92021-03-06 13:58:48 -07007217 io_sq_thread_park(sqd);
Pavel Begunkov521d6a72021-03-11 23:29:38 +00007218 list_del_init(&ctx->sqd_list);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007219 io_sqd_update_thread_idle(sqd);
Jens Axboe05962f92021-03-06 13:58:48 -07007220 io_sq_thread_unpark(sqd);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007221
7222 io_put_sq_data(sqd);
7223 ctx->sq_data = NULL;
Stefan Metzmacher7c30f36a2021-03-07 11:54:28 +01007224 if (ctx->sq_creds)
7225 put_cred(ctx->sq_creds);
Jens Axboe534ca6d2020-09-02 13:52:19 -06007226 }
7227}
7228
Jens Axboeaa061652020-09-02 14:50:27 -06007229static struct io_sq_data *io_attach_sq_data(struct io_uring_params *p)
7230{
7231 struct io_ring_ctx *ctx_attach;
7232 struct io_sq_data *sqd;
7233 struct fd f;
7234
7235 f = fdget(p->wq_fd);
7236 if (!f.file)
7237 return ERR_PTR(-ENXIO);
7238 if (f.file->f_op != &io_uring_fops) {
7239 fdput(f);
7240 return ERR_PTR(-EINVAL);
7241 }
7242
7243 ctx_attach = f.file->private_data;
7244 sqd = ctx_attach->sq_data;
7245 if (!sqd) {
7246 fdput(f);
7247 return ERR_PTR(-EINVAL);
7248 }
Jens Axboe5c2469e2021-03-11 10:17:56 -07007249 if (sqd->task_tgid != current->tgid) {
7250 fdput(f);
7251 return ERR_PTR(-EPERM);
7252 }
Jens Axboeaa061652020-09-02 14:50:27 -06007253
7254 refcount_inc(&sqd->refs);
7255 fdput(f);
7256 return sqd;
7257}
7258
Pavel Begunkov26984fb2021-03-11 23:29:37 +00007259static struct io_sq_data *io_get_sq_data(struct io_uring_params *p,
7260 bool *attached)
Jens Axboe534ca6d2020-09-02 13:52:19 -06007261{
7262 struct io_sq_data *sqd;
7263
Pavel Begunkov26984fb2021-03-11 23:29:37 +00007264 *attached = false;
Jens Axboe5c2469e2021-03-11 10:17:56 -07007265 if (p->flags & IORING_SETUP_ATTACH_WQ) {
7266 sqd = io_attach_sq_data(p);
Pavel Begunkov26984fb2021-03-11 23:29:37 +00007267 if (!IS_ERR(sqd)) {
7268 *attached = true;
Jens Axboe5c2469e2021-03-11 10:17:56 -07007269 return sqd;
Pavel Begunkov26984fb2021-03-11 23:29:37 +00007270 }
Jens Axboe5c2469e2021-03-11 10:17:56 -07007271 /* fall through for EPERM case, setup new sqd/task */
7272 if (PTR_ERR(sqd) != -EPERM)
7273 return sqd;
7274 }
Jens Axboeaa061652020-09-02 14:50:27 -06007275
Jens Axboe534ca6d2020-09-02 13:52:19 -06007276 sqd = kzalloc(sizeof(*sqd), GFP_KERNEL);
7277 if (!sqd)
7278 return ERR_PTR(-ENOMEM);
7279
Pavel Begunkov9e138a42021-03-14 20:57:12 +00007280 atomic_set(&sqd->park_pending, 0);
Jens Axboe534ca6d2020-09-02 13:52:19 -06007281 refcount_set(&sqd->refs, 1);
Jens Axboe69fb2132020-09-14 11:16:23 -06007282 INIT_LIST_HEAD(&sqd->ctx_list);
Pavel Begunkov09a6f4e2021-03-14 20:57:10 +00007283 mutex_init(&sqd->lock);
Jens Axboe534ca6d2020-09-02 13:52:19 -06007284 init_waitqueue_head(&sqd->wait);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007285 init_completion(&sqd->exited);
Jens Axboe534ca6d2020-09-02 13:52:19 -06007286 return sqd;
7287}
7288
Jens Axboe6b063142019-01-10 22:13:58 -07007289#if defined(CONFIG_UNIX)
Jens Axboe6b063142019-01-10 22:13:58 -07007290/*
7291 * Ensure the UNIX gc is aware of our file set, so we are certain that
7292 * the io_uring can be safely unregistered on process exit, even if we have
7293 * loops in the file referencing.
7294 */
7295static int __io_sqe_files_scm(struct io_ring_ctx *ctx, int nr, int offset)
7296{
7297 struct sock *sk = ctx->ring_sock->sk;
7298 struct scm_fp_list *fpl;
7299 struct sk_buff *skb;
Jens Axboe08a45172019-10-03 08:11:03 -06007300 int i, nr_files;
Jens Axboe6b063142019-01-10 22:13:58 -07007301
Jens Axboe6b063142019-01-10 22:13:58 -07007302 fpl = kzalloc(sizeof(*fpl), GFP_KERNEL);
7303 if (!fpl)
7304 return -ENOMEM;
7305
7306 skb = alloc_skb(0, GFP_KERNEL);
7307 if (!skb) {
7308 kfree(fpl);
7309 return -ENOMEM;
7310 }
7311
7312 skb->sk = sk;
Jens Axboe6b063142019-01-10 22:13:58 -07007313
Jens Axboe08a45172019-10-03 08:11:03 -06007314 nr_files = 0;
Jens Axboe62e398b2021-02-21 16:19:37 -07007315 fpl->user = get_uid(current_user());
Jens Axboe6b063142019-01-10 22:13:58 -07007316 for (i = 0; i < nr; i++) {
Jens Axboe65e19f52019-10-26 07:20:21 -06007317 struct file *file = io_file_from_index(ctx, i + offset);
7318
7319 if (!file)
Jens Axboe08a45172019-10-03 08:11:03 -06007320 continue;
Jens Axboe65e19f52019-10-26 07:20:21 -06007321 fpl->fp[nr_files] = get_file(file);
Jens Axboe08a45172019-10-03 08:11:03 -06007322 unix_inflight(fpl->user, fpl->fp[nr_files]);
7323 nr_files++;
Jens Axboe6b063142019-01-10 22:13:58 -07007324 }
7325
Jens Axboe08a45172019-10-03 08:11:03 -06007326 if (nr_files) {
7327 fpl->max = SCM_MAX_FD;
7328 fpl->count = nr_files;
7329 UNIXCB(skb).fp = fpl;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007330 skb->destructor = unix_destruct_scm;
Jens Axboe08a45172019-10-03 08:11:03 -06007331 refcount_add(skb->truesize, &sk->sk_wmem_alloc);
7332 skb_queue_head(&sk->sk_receive_queue, skb);
Jens Axboe6b063142019-01-10 22:13:58 -07007333
Jens Axboe08a45172019-10-03 08:11:03 -06007334 for (i = 0; i < nr_files; i++)
7335 fput(fpl->fp[i]);
7336 } else {
7337 kfree_skb(skb);
7338 kfree(fpl);
7339 }
Jens Axboe6b063142019-01-10 22:13:58 -07007340
7341 return 0;
7342}
7343
7344/*
7345 * If UNIX sockets are enabled, fd passing can cause a reference cycle which
7346 * causes regular reference counting to break down. We rely on the UNIX
7347 * garbage collection to take care of this problem for us.
7348 */
7349static int io_sqe_files_scm(struct io_ring_ctx *ctx)
7350{
7351 unsigned left, total;
7352 int ret = 0;
7353
7354 total = 0;
7355 left = ctx->nr_user_files;
7356 while (left) {
7357 unsigned this_files = min_t(unsigned, left, SCM_MAX_FD);
Jens Axboe6b063142019-01-10 22:13:58 -07007358
7359 ret = __io_sqe_files_scm(ctx, this_files, total);
7360 if (ret)
7361 break;
7362 left -= this_files;
7363 total += this_files;
7364 }
7365
7366 if (!ret)
7367 return 0;
7368
7369 while (total < ctx->nr_user_files) {
Jens Axboe65e19f52019-10-26 07:20:21 -06007370 struct file *file = io_file_from_index(ctx, total);
7371
7372 if (file)
7373 fput(file);
Jens Axboe6b063142019-01-10 22:13:58 -07007374 total++;
7375 }
7376
7377 return ret;
7378}
7379#else
7380static int io_sqe_files_scm(struct io_ring_ctx *ctx)
7381{
7382 return 0;
7383}
7384#endif
7385
Pavel Begunkovaeca2412021-04-11 01:46:37 +01007386static bool io_alloc_file_tables(struct io_file_table *table, unsigned nr_files)
Jens Axboe65e19f52019-10-26 07:20:21 -06007387{
Pavel Begunkov846a4ef2021-04-01 15:44:03 +01007388 unsigned i, nr_tables = DIV_ROUND_UP(nr_files, IORING_MAX_FILES_TABLE);
7389
Pavel Begunkovaeca2412021-04-11 01:46:37 +01007390 table->files = kcalloc(nr_tables, sizeof(*table->files), GFP_KERNEL);
7391 if (!table->files)
Pavel Begunkov846a4ef2021-04-01 15:44:03 +01007392 return false;
Jens Axboe65e19f52019-10-26 07:20:21 -06007393
7394 for (i = 0; i < nr_tables; i++) {
Pavel Begunkov846a4ef2021-04-01 15:44:03 +01007395 unsigned int this_files = min(nr_files, IORING_MAX_FILES_TABLE);
Jens Axboe65e19f52019-10-26 07:20:21 -06007396
Pavel Begunkovaeca2412021-04-11 01:46:37 +01007397 table->files[i] = kcalloc(this_files, sizeof(*table->files[i]),
Jens Axboe65e19f52019-10-26 07:20:21 -06007398 GFP_KERNEL);
Pavel Begunkovaeca2412021-04-11 01:46:37 +01007399 if (!table->files[i])
Jens Axboe65e19f52019-10-26 07:20:21 -06007400 break;
7401 nr_files -= this_files;
7402 }
7403
7404 if (i == nr_tables)
Pavel Begunkov846a4ef2021-04-01 15:44:03 +01007405 return true;
Jens Axboe65e19f52019-10-26 07:20:21 -06007406
Pavel Begunkovaeca2412021-04-11 01:46:37 +01007407 io_free_file_tables(table, nr_tables * IORING_MAX_FILES_TABLE);
Pavel Begunkov846a4ef2021-04-01 15:44:03 +01007408 return false;
Jens Axboe65e19f52019-10-26 07:20:21 -06007409}
7410
Pavel Begunkov47e90392021-04-01 15:43:56 +01007411static void io_rsrc_file_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc)
Jens Axboec3a31e62019-10-03 13:59:56 -06007412{
Bijan Mottahedeh50238532021-01-15 17:37:45 +00007413 struct file *file = prsrc->file;
Jens Axboec3a31e62019-10-03 13:59:56 -06007414#if defined(CONFIG_UNIX)
Jens Axboec3a31e62019-10-03 13:59:56 -06007415 struct sock *sock = ctx->ring_sock->sk;
7416 struct sk_buff_head list, *head = &sock->sk_receive_queue;
7417 struct sk_buff *skb;
7418 int i;
7419
7420 __skb_queue_head_init(&list);
7421
7422 /*
7423 * Find the skb that holds this file in its SCM_RIGHTS. When found,
7424 * remove this entry and rearrange the file array.
7425 */
7426 skb = skb_dequeue(head);
7427 while (skb) {
7428 struct scm_fp_list *fp;
7429
7430 fp = UNIXCB(skb).fp;
7431 for (i = 0; i < fp->count; i++) {
7432 int left;
7433
7434 if (fp->fp[i] != file)
7435 continue;
7436
7437 unix_notinflight(fp->user, fp->fp[i]);
7438 left = fp->count - 1 - i;
7439 if (left) {
7440 memmove(&fp->fp[i], &fp->fp[i + 1],
7441 left * sizeof(struct file *));
7442 }
7443 fp->count--;
7444 if (!fp->count) {
7445 kfree_skb(skb);
7446 skb = NULL;
7447 } else {
7448 __skb_queue_tail(&list, skb);
7449 }
7450 fput(file);
7451 file = NULL;
7452 break;
7453 }
7454
7455 if (!file)
7456 break;
7457
7458 __skb_queue_tail(&list, skb);
7459
7460 skb = skb_dequeue(head);
7461 }
7462
7463 if (skb_peek(&list)) {
7464 spin_lock_irq(&head->lock);
7465 while ((skb = __skb_dequeue(&list)) != NULL)
7466 __skb_queue_tail(head, skb);
7467 spin_unlock_irq(&head->lock);
7468 }
7469#else
Jens Axboe05f3fb32019-12-09 11:22:50 -07007470 fput(file);
Jens Axboec3a31e62019-10-03 13:59:56 -06007471#endif
7472}
7473
Pavel Begunkovb895c9a2021-04-01 15:43:40 +01007474static void __io_rsrc_put_work(struct io_rsrc_node *ref_node)
Jens Axboe05f3fb32019-12-09 11:22:50 -07007475{
Pavel Begunkovb895c9a2021-04-01 15:43:40 +01007476 struct io_rsrc_data *rsrc_data = ref_node->rsrc_data;
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007477 struct io_ring_ctx *ctx = rsrc_data->ctx;
7478 struct io_rsrc_put *prsrc, *tmp;
Xiaoguang Wang05589552020-03-31 14:05:18 +08007479
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007480 list_for_each_entry_safe(prsrc, tmp, &ref_node->rsrc_list, list) {
7481 list_del(&prsrc->list);
Pavel Begunkov40ae0ff2021-04-01 15:43:44 +01007482 rsrc_data->do_put(ctx, prsrc);
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007483 kfree(prsrc);
Jens Axboe05f3fb32019-12-09 11:22:50 -07007484 }
7485
Pavel Begunkov28a9fe22021-04-01 15:43:47 +01007486 io_rsrc_node_destroy(ref_node);
Pavel Begunkov3e942492021-04-11 01:46:34 +01007487 if (atomic_dec_and_test(&rsrc_data->refs))
7488 complete(&rsrc_data->done);
Jens Axboe05f3fb32019-12-09 11:22:50 -07007489}
7490
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007491static void io_rsrc_put_work(struct work_struct *work)
Jens Axboe4a38aed22020-05-14 17:21:15 -06007492{
7493 struct io_ring_ctx *ctx;
7494 struct llist_node *node;
7495
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007496 ctx = container_of(work, struct io_ring_ctx, rsrc_put_work.work);
7497 node = llist_del_all(&ctx->rsrc_put_llist);
Jens Axboe4a38aed22020-05-14 17:21:15 -06007498
7499 while (node) {
Pavel Begunkovb895c9a2021-04-01 15:43:40 +01007500 struct io_rsrc_node *ref_node;
Jens Axboe4a38aed22020-05-14 17:21:15 -06007501 struct llist_node *next = node->next;
7502
Pavel Begunkovb895c9a2021-04-01 15:43:40 +01007503 ref_node = llist_entry(node, struct io_rsrc_node, llist);
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007504 __io_rsrc_put_work(ref_node);
Jens Axboe4a38aed22020-05-14 17:21:15 -06007505 node = next;
7506 }
7507}
7508
Bijan Mottahedeh00835dc2021-01-15 17:37:52 +00007509static void io_rsrc_node_ref_zero(struct percpu_ref *ref)
Jens Axboe05f3fb32019-12-09 11:22:50 -07007510{
Pavel Begunkovb895c9a2021-04-01 15:43:40 +01007511 struct io_rsrc_node *node = container_of(ref, struct io_rsrc_node, refs);
Pavel Begunkov3e942492021-04-11 01:46:34 +01007512 struct io_ring_ctx *ctx = node->rsrc_data->ctx;
Pavel Begunkove2978222020-11-18 14:56:26 +00007513 bool first_add = false;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007514
Bijan Mottahedeh2a63b2d2021-01-15 17:37:47 +00007515 io_rsrc_ref_lock(ctx);
Pavel Begunkovb895c9a2021-04-01 15:43:40 +01007516 node->done = true;
Pavel Begunkove2978222020-11-18 14:56:26 +00007517
Bijan Mottahedehd67d2262021-01-15 17:37:46 +00007518 while (!list_empty(&ctx->rsrc_ref_list)) {
Pavel Begunkovb895c9a2021-04-01 15:43:40 +01007519 node = list_first_entry(&ctx->rsrc_ref_list,
7520 struct io_rsrc_node, node);
Pavel Begunkove2978222020-11-18 14:56:26 +00007521 /* recycle ref nodes in order */
Pavel Begunkovb895c9a2021-04-01 15:43:40 +01007522 if (!node->done)
Pavel Begunkove2978222020-11-18 14:56:26 +00007523 break;
Pavel Begunkovb895c9a2021-04-01 15:43:40 +01007524 list_del(&node->node);
7525 first_add |= llist_add(&node->llist, &ctx->rsrc_put_llist);
Pavel Begunkove2978222020-11-18 14:56:26 +00007526 }
Bijan Mottahedeh2a63b2d2021-01-15 17:37:47 +00007527 io_rsrc_ref_unlock(ctx);
Pavel Begunkove2978222020-11-18 14:56:26 +00007528
Pavel Begunkov3e942492021-04-11 01:46:34 +01007529 if (first_add)
7530 mod_delayed_work(system_wq, &ctx->rsrc_put_work, HZ);
Xiaoguang Wang05589552020-03-31 14:05:18 +08007531}
7532
Pavel Begunkovb895c9a2021-04-01 15:43:40 +01007533static struct io_rsrc_node *io_rsrc_node_alloc(struct io_ring_ctx *ctx)
Xiaoguang Wang05589552020-03-31 14:05:18 +08007534{
Pavel Begunkovb895c9a2021-04-01 15:43:40 +01007535 struct io_rsrc_node *ref_node;
Xiaoguang Wang05589552020-03-31 14:05:18 +08007536
7537 ref_node = kzalloc(sizeof(*ref_node), GFP_KERNEL);
7538 if (!ref_node)
Matthew Wilcox (Oracle)3e2224c2021-01-06 16:09:26 +00007539 return NULL;
Xiaoguang Wang05589552020-03-31 14:05:18 +08007540
Bijan Mottahedeh00835dc2021-01-15 17:37:52 +00007541 if (percpu_ref_init(&ref_node->refs, io_rsrc_node_ref_zero,
Xiaoguang Wang05589552020-03-31 14:05:18 +08007542 0, GFP_KERNEL)) {
7543 kfree(ref_node);
Matthew Wilcox (Oracle)3e2224c2021-01-06 16:09:26 +00007544 return NULL;
Xiaoguang Wang05589552020-03-31 14:05:18 +08007545 }
7546 INIT_LIST_HEAD(&ref_node->node);
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007547 INIT_LIST_HEAD(&ref_node->rsrc_list);
Pavel Begunkove2978222020-11-18 14:56:26 +00007548 ref_node->done = false;
Xiaoguang Wang05589552020-03-31 14:05:18 +08007549 return ref_node;
Xiaoguang Wang05589552020-03-31 14:05:18 +08007550}
7551
Jens Axboe05f3fb32019-12-09 11:22:50 -07007552static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
7553 unsigned nr_args)
7554{
7555 __s32 __user *fds = (__s32 __user *) arg;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007556 struct file *file;
Pavel Begunkovf3baed32021-04-01 15:43:42 +01007557 int fd, ret;
Pavel Begunkov846a4ef2021-04-01 15:44:03 +01007558 unsigned i;
Pavel Begunkovb895c9a2021-04-01 15:43:40 +01007559 struct io_rsrc_data *file_data;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007560
7561 if (ctx->file_data)
7562 return -EBUSY;
7563 if (!nr_args)
7564 return -EINVAL;
7565 if (nr_args > IORING_MAX_FIXED_FILES)
7566 return -EMFILE;
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01007567 ret = io_rsrc_node_switch_start(ctx);
Pavel Begunkovf3baed32021-04-01 15:43:42 +01007568 if (ret)
7569 return ret;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007570
Pavel Begunkov47e90392021-04-01 15:43:56 +01007571 file_data = io_rsrc_data_alloc(ctx, io_rsrc_file_put);
Pavel Begunkov5398ae62020-10-10 18:34:14 +01007572 if (!file_data)
Jens Axboe05f3fb32019-12-09 11:22:50 -07007573 return -ENOMEM;
Dan Carpenter13770a72021-02-01 15:23:42 +03007574 ctx->file_data = file_data;
Pavel Begunkovf3baed32021-04-01 15:43:42 +01007575 ret = -ENOMEM;
Pavel Begunkovaeca2412021-04-11 01:46:37 +01007576 if (!io_alloc_file_tables(&ctx->file_table, nr_args))
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01007577 goto out_free;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007578
Jens Axboe05f3fb32019-12-09 11:22:50 -07007579 for (i = 0; i < nr_args; i++, ctx->nr_user_files++) {
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01007580 if (copy_from_user(&fd, &fds[i], sizeof(fd))) {
7581 ret = -EFAULT;
7582 goto out_fput;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007583 }
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01007584 /* allow sparse sets */
7585 if (fd == -1)
7586 continue;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007587
Jens Axboe05f3fb32019-12-09 11:22:50 -07007588 file = fget(fd);
Jens Axboe05f3fb32019-12-09 11:22:50 -07007589 ret = -EBADF;
7590 if (!file)
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01007591 goto out_fput;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007592
7593 /*
7594 * Don't allow io_uring instances to be registered. If UNIX
7595 * isn't enabled, then this causes a reference cycle and this
7596 * instance can never get freed. If UNIX is enabled we'll
7597 * handle it just fine, but there's still no point in allowing
7598 * a ring fd as it doesn't support regular read/write anyway.
7599 */
7600 if (file->f_op == &io_uring_fops) {
7601 fput(file);
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01007602 goto out_fput;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007603 }
Pavel Begunkovaeca2412021-04-11 01:46:37 +01007604 io_fixed_file_set(io_fixed_file_slot(&ctx->file_table, i), file);
Jens Axboe05f3fb32019-12-09 11:22:50 -07007605 }
7606
Jens Axboe05f3fb32019-12-09 11:22:50 -07007607 ret = io_sqe_files_scm(ctx);
Xiaoguang Wang05589552020-03-31 14:05:18 +08007608 if (ret) {
Pavel Begunkov08480402021-04-13 02:58:38 +01007609 __io_sqe_files_unregister(ctx);
Xiaoguang Wang05589552020-03-31 14:05:18 +08007610 return ret;
7611 }
Jens Axboe05f3fb32019-12-09 11:22:50 -07007612
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01007613 io_rsrc_node_switch(ctx, NULL);
Jens Axboe05f3fb32019-12-09 11:22:50 -07007614 return ret;
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01007615out_fput:
7616 for (i = 0; i < ctx->nr_user_files; i++) {
7617 file = io_file_from_index(ctx, i);
7618 if (file)
7619 fput(file);
7620 }
Pavel Begunkovaeca2412021-04-11 01:46:37 +01007621 io_free_file_tables(&ctx->file_table, nr_args);
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01007622 ctx->nr_user_files = 0;
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01007623out_free:
Pavel Begunkov3e942492021-04-11 01:46:34 +01007624 kfree(ctx->file_data);
Jens Axboe55cbc252020-10-14 07:35:57 -06007625 ctx->file_data = NULL;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007626 return ret;
7627}
7628
Jens Axboec3a31e62019-10-03 13:59:56 -06007629static int io_sqe_file_register(struct io_ring_ctx *ctx, struct file *file,
7630 int index)
7631{
7632#if defined(CONFIG_UNIX)
7633 struct sock *sock = ctx->ring_sock->sk;
7634 struct sk_buff_head *head = &sock->sk_receive_queue;
7635 struct sk_buff *skb;
7636
7637 /*
7638 * See if we can merge this file into an existing skb SCM_RIGHTS
7639 * file set. If there's no room, fall back to allocating a new skb
7640 * and filling it in.
7641 */
7642 spin_lock_irq(&head->lock);
7643 skb = skb_peek(head);
7644 if (skb) {
7645 struct scm_fp_list *fpl = UNIXCB(skb).fp;
7646
7647 if (fpl->count < SCM_MAX_FD) {
7648 __skb_unlink(skb, head);
7649 spin_unlock_irq(&head->lock);
7650 fpl->fp[fpl->count] = get_file(file);
7651 unix_inflight(fpl->user, fpl->fp[fpl->count]);
7652 fpl->count++;
7653 spin_lock_irq(&head->lock);
7654 __skb_queue_head(head, skb);
7655 } else {
7656 skb = NULL;
7657 }
7658 }
7659 spin_unlock_irq(&head->lock);
7660
7661 if (skb) {
7662 fput(file);
7663 return 0;
7664 }
7665
7666 return __io_sqe_files_scm(ctx, 1, index);
7667#else
7668 return 0;
7669#endif
7670}
7671
Pavel Begunkove7c78372021-04-01 15:43:45 +01007672static int io_queue_rsrc_removal(struct io_rsrc_data *data,
7673 struct io_rsrc_node *node, void *rsrc)
Jens Axboe05f3fb32019-12-09 11:22:50 -07007674{
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007675 struct io_rsrc_put *prsrc;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007676
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007677 prsrc = kzalloc(sizeof(*prsrc), GFP_KERNEL);
7678 if (!prsrc)
Hillf Dantona5318d32020-03-23 17:47:15 +08007679 return -ENOMEM;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007680
Bijan Mottahedeh50238532021-01-15 17:37:45 +00007681 prsrc->rsrc = rsrc;
Pavel Begunkove7c78372021-04-01 15:43:45 +01007682 list_add(&prsrc->list, &node->rsrc_list);
Hillf Dantona5318d32020-03-23 17:47:15 +08007683 return 0;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007684}
7685
7686static int __io_sqe_files_update(struct io_ring_ctx *ctx,
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007687 struct io_uring_rsrc_update *up,
Jens Axboe05f3fb32019-12-09 11:22:50 -07007688 unsigned nr_args)
7689{
Pavel Begunkovb895c9a2021-04-01 15:43:40 +01007690 struct io_rsrc_data *data = ctx->file_data;
Pavel Begunkova04b0ac2021-04-01 15:44:04 +01007691 struct io_fixed_file *file_slot;
7692 struct file *file;
Jens Axboec3a31e62019-10-03 13:59:56 -06007693 __s32 __user *fds;
7694 int fd, i, err;
7695 __u32 done;
Xiaoguang Wang05589552020-03-31 14:05:18 +08007696 bool needs_switch = false;
Jens Axboec3a31e62019-10-03 13:59:56 -06007697
Jens Axboe05f3fb32019-12-09 11:22:50 -07007698 if (check_add_overflow(up->offset, nr_args, &done))
Jens Axboec3a31e62019-10-03 13:59:56 -06007699 return -EOVERFLOW;
7700 if (done > ctx->nr_user_files)
7701 return -EINVAL;
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01007702 err = io_rsrc_node_switch_start(ctx);
Pavel Begunkov8dd03af2021-03-19 17:22:36 +00007703 if (err)
7704 return err;
Xiaoguang Wang05589552020-03-31 14:05:18 +08007705
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007706 fds = u64_to_user_ptr(up->data);
Pavel Begunkov67973b92021-01-26 13:51:09 +00007707 for (done = 0; done < nr_args; done++) {
Jens Axboec3a31e62019-10-03 13:59:56 -06007708 err = 0;
7709 if (copy_from_user(&fd, &fds[done], sizeof(fd))) {
7710 err = -EFAULT;
7711 break;
7712 }
noah4e0377a2021-01-26 15:23:28 -05007713 if (fd == IORING_REGISTER_FILES_SKIP)
7714 continue;
7715
Pavel Begunkov67973b92021-01-26 13:51:09 +00007716 i = array_index_nospec(up->offset + done, ctx->nr_user_files);
Pavel Begunkovaeca2412021-04-11 01:46:37 +01007717 file_slot = io_fixed_file_slot(&ctx->file_table, i);
Pavel Begunkovea64ec022021-02-04 13:52:07 +00007718
Pavel Begunkova04b0ac2021-04-01 15:44:04 +01007719 if (file_slot->file_ptr) {
7720 file = (struct file *)(file_slot->file_ptr & FFS_MASK);
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01007721 err = io_queue_rsrc_removal(data, ctx->rsrc_node, file);
Hillf Dantona5318d32020-03-23 17:47:15 +08007722 if (err)
7723 break;
Pavel Begunkova04b0ac2021-04-01 15:44:04 +01007724 file_slot->file_ptr = 0;
Xiaoguang Wang05589552020-03-31 14:05:18 +08007725 needs_switch = true;
Jens Axboec3a31e62019-10-03 13:59:56 -06007726 }
7727 if (fd != -1) {
Jens Axboec3a31e62019-10-03 13:59:56 -06007728 file = fget(fd);
7729 if (!file) {
7730 err = -EBADF;
7731 break;
7732 }
7733 /*
7734 * Don't allow io_uring instances to be registered. If
7735 * UNIX isn't enabled, then this causes a reference
7736 * cycle and this instance can never get freed. If UNIX
7737 * is enabled we'll handle it just fine, but there's
7738 * still no point in allowing a ring fd as it doesn't
7739 * support regular read/write anyway.
7740 */
7741 if (file->f_op == &io_uring_fops) {
7742 fput(file);
7743 err = -EBADF;
7744 break;
7745 }
Pavel Begunkov9a321c92021-04-01 15:44:01 +01007746 io_fixed_file_set(file_slot, file);
Jens Axboec3a31e62019-10-03 13:59:56 -06007747 err = io_sqe_file_register(ctx, file, i);
Yang Yingliangf3bd9da2020-07-09 10:11:41 +00007748 if (err) {
Pavel Begunkova04b0ac2021-04-01 15:44:04 +01007749 file_slot->file_ptr = 0;
Yang Yingliangf3bd9da2020-07-09 10:11:41 +00007750 fput(file);
Jens Axboec3a31e62019-10-03 13:59:56 -06007751 break;
Yang Yingliangf3bd9da2020-07-09 10:11:41 +00007752 }
Jens Axboec3a31e62019-10-03 13:59:56 -06007753 }
Jens Axboe05f3fb32019-12-09 11:22:50 -07007754 }
7755
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01007756 if (needs_switch)
7757 io_rsrc_node_switch(ctx, data);
Jens Axboec3a31e62019-10-03 13:59:56 -06007758 return done ? done : err;
7759}
Xiaoguang Wang05589552020-03-31 14:05:18 +08007760
Jens Axboe05f3fb32019-12-09 11:22:50 -07007761static int io_sqe_files_update(struct io_ring_ctx *ctx, void __user *arg,
7762 unsigned nr_args)
7763{
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007764 struct io_uring_rsrc_update up;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007765
7766 if (!ctx->file_data)
7767 return -ENXIO;
7768 if (!nr_args)
7769 return -EINVAL;
7770 if (copy_from_user(&up, arg, sizeof(up)))
7771 return -EFAULT;
7772 if (up.resv)
7773 return -EINVAL;
7774
7775 return __io_sqe_files_update(ctx, &up, nr_args);
7776}
Jens Axboec3a31e62019-10-03 13:59:56 -06007777
Pavel Begunkov5280f7e2021-02-04 13:52:08 +00007778static struct io_wq_work *io_free_work(struct io_wq_work *work)
Jens Axboe7d723062019-11-12 22:31:31 -07007779{
7780 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
7781
Pavel Begunkov5280f7e2021-02-04 13:52:08 +00007782 req = io_put_req_find_next(req);
7783 return req ? &req->work : NULL;
Jens Axboe7d723062019-11-12 22:31:31 -07007784}
7785
Jens Axboe685fe7f2021-03-08 09:37:51 -07007786static struct io_wq *io_init_wq_offload(struct io_ring_ctx *ctx,
7787 struct task_struct *task)
Pavel Begunkov24369c22020-01-28 03:15:48 +03007788{
Jens Axboee9418942021-02-19 12:33:30 -07007789 struct io_wq_hash *hash;
Pavel Begunkov24369c22020-01-28 03:15:48 +03007790 struct io_wq_data data;
Pavel Begunkov24369c22020-01-28 03:15:48 +03007791 unsigned int concurrency;
Pavel Begunkov24369c22020-01-28 03:15:48 +03007792
Jens Axboee9418942021-02-19 12:33:30 -07007793 hash = ctx->hash_map;
7794 if (!hash) {
7795 hash = kzalloc(sizeof(*hash), GFP_KERNEL);
7796 if (!hash)
7797 return ERR_PTR(-ENOMEM);
7798 refcount_set(&hash->refs, 1);
7799 init_waitqueue_head(&hash->wait);
7800 ctx->hash_map = hash;
7801 }
7802
7803 data.hash = hash;
Jens Axboe685fe7f2021-03-08 09:37:51 -07007804 data.task = task;
Pavel Begunkove9fd9392020-03-04 16:14:12 +03007805 data.free_work = io_free_work;
Pavel Begunkovf5fa38c2020-06-08 21:08:20 +03007806 data.do_work = io_wq_submit_work;
Pavel Begunkov24369c22020-01-28 03:15:48 +03007807
Jens Axboed25e3a32021-02-16 11:41:41 -07007808 /* Do QD, or 4 * CPUS, whatever is smallest */
7809 concurrency = min(ctx->sq_entries, 4 * num_online_cpus());
Pavel Begunkov24369c22020-01-28 03:15:48 +03007810
Jens Axboe5aa75ed2021-02-16 12:56:50 -07007811 return io_wq_create(concurrency, &data);
Pavel Begunkov24369c22020-01-28 03:15:48 +03007812}
7813
Jens Axboe5aa75ed2021-02-16 12:56:50 -07007814static int io_uring_alloc_task_context(struct task_struct *task,
7815 struct io_ring_ctx *ctx)
Jens Axboe0f212202020-09-13 13:09:39 -06007816{
7817 struct io_uring_task *tctx;
Jens Axboed8a6df12020-10-15 16:24:45 -06007818 int ret;
Jens Axboe0f212202020-09-13 13:09:39 -06007819
7820 tctx = kmalloc(sizeof(*tctx), GFP_KERNEL);
7821 if (unlikely(!tctx))
7822 return -ENOMEM;
7823
Jens Axboed8a6df12020-10-15 16:24:45 -06007824 ret = percpu_counter_init(&tctx->inflight, 0, GFP_KERNEL);
7825 if (unlikely(ret)) {
7826 kfree(tctx);
7827 return ret;
7828 }
7829
Jens Axboe685fe7f2021-03-08 09:37:51 -07007830 tctx->io_wq = io_init_wq_offload(ctx, task);
Jens Axboe5aa75ed2021-02-16 12:56:50 -07007831 if (IS_ERR(tctx->io_wq)) {
7832 ret = PTR_ERR(tctx->io_wq);
7833 percpu_counter_destroy(&tctx->inflight);
7834 kfree(tctx);
7835 return ret;
7836 }
7837
Jens Axboe0f212202020-09-13 13:09:39 -06007838 xa_init(&tctx->xa);
7839 init_waitqueue_head(&tctx->wait);
7840 tctx->last = NULL;
Jens Axboefdaf0832020-10-30 09:37:30 -06007841 atomic_set(&tctx->in_idle, 0);
Pavel Begunkovb303fe22021-04-11 01:46:26 +01007842 atomic_set(&tctx->inflight_tracked, 0);
Jens Axboe0f212202020-09-13 13:09:39 -06007843 task->io_uring = tctx;
Jens Axboe7cbf1722021-02-10 00:03:20 +00007844 spin_lock_init(&tctx->task_lock);
7845 INIT_WQ_LIST(&tctx->task_list);
7846 tctx->task_state = 0;
7847 init_task_work(&tctx->task_work, tctx_task_work);
Jens Axboe0f212202020-09-13 13:09:39 -06007848 return 0;
7849}
7850
7851void __io_uring_free(struct task_struct *tsk)
7852{
7853 struct io_uring_task *tctx = tsk->io_uring;
7854
7855 WARN_ON_ONCE(!xa_empty(&tctx->xa));
Pavel Begunkovef8eaa42021-02-27 11:16:45 +00007856 WARN_ON_ONCE(tctx->io_wq);
7857
Jens Axboed8a6df12020-10-15 16:24:45 -06007858 percpu_counter_destroy(&tctx->inflight);
Jens Axboe0f212202020-09-13 13:09:39 -06007859 kfree(tctx);
7860 tsk->io_uring = NULL;
7861}
7862
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02007863static int io_sq_offload_create(struct io_ring_ctx *ctx,
7864 struct io_uring_params *p)
Jens Axboe2b188cc2019-01-07 10:46:33 -07007865{
7866 int ret;
7867
Jens Axboed25e3a32021-02-16 11:41:41 -07007868 /* Retain compatibility with failing for an invalid attach attempt */
7869 if ((ctx->flags & (IORING_SETUP_ATTACH_WQ | IORING_SETUP_SQPOLL)) ==
7870 IORING_SETUP_ATTACH_WQ) {
7871 struct fd f;
7872
7873 f = fdget(p->wq_fd);
7874 if (!f.file)
7875 return -ENXIO;
7876 if (f.file->f_op != &io_uring_fops) {
7877 fdput(f);
7878 return -EINVAL;
7879 }
7880 fdput(f);
7881 }
Jens Axboe6c271ce2019-01-10 11:22:30 -07007882 if (ctx->flags & IORING_SETUP_SQPOLL) {
Jens Axboe46fe18b2021-03-04 12:39:36 -07007883 struct task_struct *tsk;
Jens Axboe534ca6d2020-09-02 13:52:19 -06007884 struct io_sq_data *sqd;
Pavel Begunkov26984fb2021-03-11 23:29:37 +00007885 bool attached;
Jens Axboe534ca6d2020-09-02 13:52:19 -06007886
Pavel Begunkov26984fb2021-03-11 23:29:37 +00007887 sqd = io_get_sq_data(p, &attached);
Jens Axboe534ca6d2020-09-02 13:52:19 -06007888 if (IS_ERR(sqd)) {
7889 ret = PTR_ERR(sqd);
7890 goto err;
7891 }
Jens Axboe69fb2132020-09-14 11:16:23 -06007892
Stefan Metzmacher7c30f36a2021-03-07 11:54:28 +01007893 ctx->sq_creds = get_current_cred();
Jens Axboe534ca6d2020-09-02 13:52:19 -06007894 ctx->sq_data = sqd;
Jens Axboe917257d2019-04-13 09:28:55 -06007895 ctx->sq_thread_idle = msecs_to_jiffies(p->sq_thread_idle);
7896 if (!ctx->sq_thread_idle)
7897 ctx->sq_thread_idle = HZ;
7898
Pavel Begunkov26984fb2021-03-11 23:29:37 +00007899 ret = 0;
Pavel Begunkov78d7f6b2021-03-10 13:13:53 +00007900 io_sq_thread_park(sqd);
Pavel Begunkovde75a3d2021-03-18 11:54:35 +00007901 list_add(&ctx->sqd_list, &sqd->ctx_list);
7902 io_sqd_update_thread_idle(sqd);
Pavel Begunkov26984fb2021-03-11 23:29:37 +00007903 /* don't attach to a dying SQPOLL thread, would be racy */
Pavel Begunkovde75a3d2021-03-18 11:54:35 +00007904 if (attached && !sqd->thread)
Pavel Begunkov26984fb2021-03-11 23:29:37 +00007905 ret = -ENXIO;
Pavel Begunkov78d7f6b2021-03-10 13:13:53 +00007906 io_sq_thread_unpark(sqd);
7907
Pavel Begunkovde75a3d2021-03-18 11:54:35 +00007908 if (ret < 0)
7909 goto err;
7910 if (attached)
Jens Axboe5aa75ed2021-02-16 12:56:50 -07007911 return 0;
Jens Axboeaa061652020-09-02 14:50:27 -06007912
Jens Axboe6c271ce2019-01-10 11:22:30 -07007913 if (p->flags & IORING_SETUP_SQ_AFF) {
Jens Axboe44a9bd12019-05-14 20:00:30 -06007914 int cpu = p->sq_thread_cpu;
Jens Axboe6c271ce2019-01-10 11:22:30 -07007915
Jens Axboe917257d2019-04-13 09:28:55 -06007916 ret = -EINVAL;
Jens Axboe44a9bd12019-05-14 20:00:30 -06007917 if (cpu >= nr_cpu_ids)
Jens Axboee8f98f242021-03-09 16:32:13 -07007918 goto err_sqpoll;
Shenghui Wang7889f442019-05-07 16:03:19 +08007919 if (!cpu_online(cpu))
Jens Axboee8f98f242021-03-09 16:32:13 -07007920 goto err_sqpoll;
Jens Axboe917257d2019-04-13 09:28:55 -06007921
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007922 sqd->sq_cpu = cpu;
Jens Axboe6c271ce2019-01-10 11:22:30 -07007923 } else {
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007924 sqd->sq_cpu = -1;
Jens Axboe6c271ce2019-01-10 11:22:30 -07007925 }
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007926
7927 sqd->task_pid = current->pid;
Jens Axboe5c2469e2021-03-11 10:17:56 -07007928 sqd->task_tgid = current->tgid;
Jens Axboe46fe18b2021-03-04 12:39:36 -07007929 tsk = create_io_thread(io_sq_thread, sqd, NUMA_NO_NODE);
7930 if (IS_ERR(tsk)) {
7931 ret = PTR_ERR(tsk);
Jens Axboee8f98f242021-03-09 16:32:13 -07007932 goto err_sqpoll;
Jens Axboe6c271ce2019-01-10 11:22:30 -07007933 }
Pavel Begunkov97a73a02021-03-08 17:30:54 +00007934
Jens Axboe46fe18b2021-03-04 12:39:36 -07007935 sqd->thread = tsk;
Pavel Begunkov97a73a02021-03-08 17:30:54 +00007936 ret = io_uring_alloc_task_context(tsk, ctx);
Jens Axboe46fe18b2021-03-04 12:39:36 -07007937 wake_up_new_task(tsk);
Jens Axboe0f212202020-09-13 13:09:39 -06007938 if (ret)
7939 goto err;
Jens Axboe6c271ce2019-01-10 11:22:30 -07007940 } else if (p->flags & IORING_SETUP_SQ_AFF) {
7941 /* Can't have SQ_AFF without SQPOLL */
7942 ret = -EINVAL;
7943 goto err;
7944 }
7945
Jens Axboe2b188cc2019-01-07 10:46:33 -07007946 return 0;
7947err:
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007948 io_sq_thread_finish(ctx);
Jens Axboe2b188cc2019-01-07 10:46:33 -07007949 return ret;
Jens Axboee8f98f242021-03-09 16:32:13 -07007950err_sqpoll:
7951 complete(&ctx->sq_data->exited);
7952 goto err;
Jens Axboe2b188cc2019-01-07 10:46:33 -07007953}
7954
Bijan Mottahedeha087e2b2020-06-16 16:36:07 -07007955static inline void __io_unaccount_mem(struct user_struct *user,
7956 unsigned long nr_pages)
Jens Axboe2b188cc2019-01-07 10:46:33 -07007957{
7958 atomic_long_sub(nr_pages, &user->locked_vm);
7959}
7960
Bijan Mottahedeha087e2b2020-06-16 16:36:07 -07007961static inline int __io_account_mem(struct user_struct *user,
7962 unsigned long nr_pages)
Jens Axboe2b188cc2019-01-07 10:46:33 -07007963{
7964 unsigned long page_limit, cur_pages, new_pages;
7965
7966 /* Don't allow more pages than we can safely lock */
7967 page_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
7968
7969 do {
7970 cur_pages = atomic_long_read(&user->locked_vm);
7971 new_pages = cur_pages + nr_pages;
7972 if (new_pages > page_limit)
7973 return -ENOMEM;
7974 } while (atomic_long_cmpxchg(&user->locked_vm, cur_pages,
7975 new_pages) != cur_pages);
7976
7977 return 0;
7978}
7979
Jens Axboe26bfa89e2021-02-09 20:14:12 -07007980static void io_unaccount_mem(struct io_ring_ctx *ctx, unsigned long nr_pages)
Bijan Mottahedeha087e2b2020-06-16 16:36:07 -07007981{
Jens Axboe62e398b2021-02-21 16:19:37 -07007982 if (ctx->user)
Bijan Mottahedeha087e2b2020-06-16 16:36:07 -07007983 __io_unaccount_mem(ctx->user, nr_pages);
Bijan Mottahedeh30975822020-06-16 16:36:09 -07007984
Jens Axboe26bfa89e2021-02-09 20:14:12 -07007985 if (ctx->mm_account)
7986 atomic64_sub(nr_pages, &ctx->mm_account->pinned_vm);
Bijan Mottahedeha087e2b2020-06-16 16:36:07 -07007987}
7988
Jens Axboe26bfa89e2021-02-09 20:14:12 -07007989static int io_account_mem(struct io_ring_ctx *ctx, unsigned long nr_pages)
Bijan Mottahedeha087e2b2020-06-16 16:36:07 -07007990{
Bijan Mottahedeh30975822020-06-16 16:36:09 -07007991 int ret;
7992
Jens Axboe62e398b2021-02-21 16:19:37 -07007993 if (ctx->user) {
Bijan Mottahedeh30975822020-06-16 16:36:09 -07007994 ret = __io_account_mem(ctx->user, nr_pages);
7995 if (ret)
7996 return ret;
7997 }
7998
Jens Axboe26bfa89e2021-02-09 20:14:12 -07007999 if (ctx->mm_account)
8000 atomic64_add(nr_pages, &ctx->mm_account->pinned_vm);
Bijan Mottahedeha087e2b2020-06-16 16:36:07 -07008001
8002 return 0;
8003}
8004
Jens Axboe2b188cc2019-01-07 10:46:33 -07008005static void io_mem_free(void *ptr)
8006{
Mark Rutland52e04ef2019-04-30 17:30:21 +01008007 struct page *page;
Jens Axboe2b188cc2019-01-07 10:46:33 -07008008
Mark Rutland52e04ef2019-04-30 17:30:21 +01008009 if (!ptr)
8010 return;
8011
8012 page = virt_to_head_page(ptr);
Jens Axboe2b188cc2019-01-07 10:46:33 -07008013 if (put_page_testzero(page))
8014 free_compound_page(page);
8015}
8016
8017static void *io_mem_alloc(size_t size)
8018{
8019 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN | __GFP_COMP |
Jens Axboe26bfa89e2021-02-09 20:14:12 -07008020 __GFP_NORETRY | __GFP_ACCOUNT;
Jens Axboe2b188cc2019-01-07 10:46:33 -07008021
8022 return (void *) __get_free_pages(gfp_flags, get_order(size));
8023}
8024
Hristo Venev75b28af2019-08-26 17:23:46 +00008025static unsigned long rings_size(unsigned sq_entries, unsigned cq_entries,
8026 size_t *sq_offset)
8027{
8028 struct io_rings *rings;
8029 size_t off, sq_array_size;
8030
8031 off = struct_size(rings, cqes, cq_entries);
8032 if (off == SIZE_MAX)
8033 return SIZE_MAX;
8034
8035#ifdef CONFIG_SMP
8036 off = ALIGN(off, SMP_CACHE_BYTES);
8037 if (off == 0)
8038 return SIZE_MAX;
8039#endif
8040
Dmitry Vyukovb36200f2020-07-11 11:31:11 +02008041 if (sq_offset)
8042 *sq_offset = off;
8043
Hristo Venev75b28af2019-08-26 17:23:46 +00008044 sq_array_size = array_size(sizeof(u32), sq_entries);
8045 if (sq_array_size == SIZE_MAX)
8046 return SIZE_MAX;
8047
8048 if (check_add_overflow(off, sq_array_size, &off))
8049 return SIZE_MAX;
8050
Hristo Venev75b28af2019-08-26 17:23:46 +00008051 return off;
8052}
8053
Pavel Begunkov7f61a1e2021-04-11 01:46:35 +01008054static void io_buffer_unmap(struct io_ring_ctx *ctx, struct io_mapped_ubuf *imu)
8055{
8056 unsigned int i;
8057
8058 for (i = 0; i < imu->nr_bvecs; i++)
8059 unpin_user_page(imu->bvec[i].bv_page);
8060 if (imu->acct_pages)
8061 io_unaccount_mem(ctx, imu->acct_pages);
8062 kvfree(imu->bvec);
8063 imu->nr_bvecs = 0;
8064}
8065
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008066static int io_sqe_buffers_unregister(struct io_ring_ctx *ctx)
Jens Axboeedafcce2019-01-09 09:16:05 -07008067{
Pavel Begunkov7f61a1e2021-04-11 01:46:35 +01008068 unsigned int i;
Jens Axboeedafcce2019-01-09 09:16:05 -07008069
8070 if (!ctx->user_bufs)
8071 return -ENXIO;
8072
Pavel Begunkov7f61a1e2021-04-11 01:46:35 +01008073 for (i = 0; i < ctx->nr_user_bufs; i++)
8074 io_buffer_unmap(ctx, &ctx->user_bufs[i]);
Jens Axboeedafcce2019-01-09 09:16:05 -07008075 kfree(ctx->user_bufs);
8076 ctx->user_bufs = NULL;
8077 ctx->nr_user_bufs = 0;
8078 return 0;
8079}
8080
8081static int io_copy_iov(struct io_ring_ctx *ctx, struct iovec *dst,
8082 void __user *arg, unsigned index)
8083{
8084 struct iovec __user *src;
8085
8086#ifdef CONFIG_COMPAT
8087 if (ctx->compat) {
8088 struct compat_iovec __user *ciovs;
8089 struct compat_iovec ciov;
8090
8091 ciovs = (struct compat_iovec __user *) arg;
8092 if (copy_from_user(&ciov, &ciovs[index], sizeof(ciov)))
8093 return -EFAULT;
8094
Jens Axboed55e5f52019-12-11 16:12:15 -07008095 dst->iov_base = u64_to_user_ptr((u64)ciov.iov_base);
Jens Axboeedafcce2019-01-09 09:16:05 -07008096 dst->iov_len = ciov.iov_len;
8097 return 0;
8098 }
8099#endif
8100 src = (struct iovec __user *) arg;
8101 if (copy_from_user(dst, &src[index], sizeof(*dst)))
8102 return -EFAULT;
8103 return 0;
8104}
8105
Jens Axboede293932020-09-17 16:19:16 -06008106/*
8107 * Not super efficient, but this is just a registration time. And we do cache
8108 * the last compound head, so generally we'll only do a full search if we don't
8109 * match that one.
8110 *
8111 * We check if the given compound head page has already been accounted, to
8112 * avoid double accounting it. This allows us to account the full size of the
8113 * page, not just the constituent pages of a huge page.
8114 */
8115static bool headpage_already_acct(struct io_ring_ctx *ctx, struct page **pages,
8116 int nr_pages, struct page *hpage)
8117{
8118 int i, j;
8119
8120 /* check current page array */
8121 for (i = 0; i < nr_pages; i++) {
8122 if (!PageCompound(pages[i]))
8123 continue;
8124 if (compound_head(pages[i]) == hpage)
8125 return true;
8126 }
8127
8128 /* check previously registered pages */
8129 for (i = 0; i < ctx->nr_user_bufs; i++) {
8130 struct io_mapped_ubuf *imu = &ctx->user_bufs[i];
8131
8132 for (j = 0; j < imu->nr_bvecs; j++) {
8133 if (!PageCompound(imu->bvec[j].bv_page))
8134 continue;
8135 if (compound_head(imu->bvec[j].bv_page) == hpage)
8136 return true;
8137 }
8138 }
8139
8140 return false;
8141}
8142
8143static int io_buffer_account_pin(struct io_ring_ctx *ctx, struct page **pages,
8144 int nr_pages, struct io_mapped_ubuf *imu,
8145 struct page **last_hpage)
8146{
8147 int i, ret;
8148
8149 for (i = 0; i < nr_pages; i++) {
8150 if (!PageCompound(pages[i])) {
8151 imu->acct_pages++;
8152 } else {
8153 struct page *hpage;
8154
8155 hpage = compound_head(pages[i]);
8156 if (hpage == *last_hpage)
8157 continue;
8158 *last_hpage = hpage;
8159 if (headpage_already_acct(ctx, pages, i, hpage))
8160 continue;
8161 imu->acct_pages += page_size(hpage) >> PAGE_SHIFT;
8162 }
8163 }
8164
8165 if (!imu->acct_pages)
8166 return 0;
8167
Jens Axboe26bfa89e2021-02-09 20:14:12 -07008168 ret = io_account_mem(ctx, imu->acct_pages);
Jens Axboede293932020-09-17 16:19:16 -06008169 if (ret)
8170 imu->acct_pages = 0;
8171 return ret;
8172}
8173
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008174static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov,
8175 struct io_mapped_ubuf *imu,
8176 struct page **last_hpage)
Jens Axboeedafcce2019-01-09 09:16:05 -07008177{
8178 struct vm_area_struct **vmas = NULL;
8179 struct page **pages = NULL;
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008180 unsigned long off, start, end, ubuf;
8181 size_t size;
8182 int ret, pret, nr_pages, i;
Jens Axboeedafcce2019-01-09 09:16:05 -07008183
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008184 ubuf = (unsigned long) iov->iov_base;
8185 end = (ubuf + iov->iov_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
8186 start = ubuf >> PAGE_SHIFT;
8187 nr_pages = end - start;
8188
8189 ret = -ENOMEM;
8190
8191 pages = kvmalloc_array(nr_pages, sizeof(struct page *), GFP_KERNEL);
8192 if (!pages)
8193 goto done;
8194
8195 vmas = kvmalloc_array(nr_pages, sizeof(struct vm_area_struct *),
8196 GFP_KERNEL);
8197 if (!vmas)
8198 goto done;
8199
8200 imu->bvec = kvmalloc_array(nr_pages, sizeof(struct bio_vec),
8201 GFP_KERNEL);
8202 if (!imu->bvec)
8203 goto done;
8204
8205 ret = 0;
8206 mmap_read_lock(current->mm);
8207 pret = pin_user_pages(ubuf, nr_pages, FOLL_WRITE | FOLL_LONGTERM,
8208 pages, vmas);
8209 if (pret == nr_pages) {
8210 /* don't support file backed memory */
8211 for (i = 0; i < nr_pages; i++) {
8212 struct vm_area_struct *vma = vmas[i];
8213
8214 if (vma->vm_file &&
8215 !is_file_hugepages(vma->vm_file)) {
8216 ret = -EOPNOTSUPP;
8217 break;
8218 }
8219 }
8220 } else {
8221 ret = pret < 0 ? pret : -EFAULT;
8222 }
8223 mmap_read_unlock(current->mm);
8224 if (ret) {
8225 /*
8226 * if we did partial map, or found file backed vmas,
8227 * release any pages we did get
8228 */
8229 if (pret > 0)
8230 unpin_user_pages(pages, pret);
8231 kvfree(imu->bvec);
8232 goto done;
8233 }
8234
8235 ret = io_buffer_account_pin(ctx, pages, pret, imu, last_hpage);
8236 if (ret) {
8237 unpin_user_pages(pages, pret);
8238 kvfree(imu->bvec);
8239 goto done;
8240 }
8241
8242 off = ubuf & ~PAGE_MASK;
8243 size = iov->iov_len;
8244 for (i = 0; i < nr_pages; i++) {
8245 size_t vec_len;
8246
8247 vec_len = min_t(size_t, size, PAGE_SIZE - off);
8248 imu->bvec[i].bv_page = pages[i];
8249 imu->bvec[i].bv_len = vec_len;
8250 imu->bvec[i].bv_offset = off;
8251 off = 0;
8252 size -= vec_len;
8253 }
8254 /* store original address for later verification */
8255 imu->ubuf = ubuf;
Pavel Begunkov4751f532021-04-01 15:43:55 +01008256 imu->ubuf_end = ubuf + iov->iov_len;
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008257 imu->nr_bvecs = nr_pages;
8258 ret = 0;
8259done:
8260 kvfree(pages);
8261 kvfree(vmas);
8262 return ret;
8263}
8264
Bijan Mottahedeh2b358602021-01-06 12:39:11 -08008265static int io_buffers_map_alloc(struct io_ring_ctx *ctx, unsigned int nr_args)
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008266{
Pavel Begunkov87094462021-04-11 01:46:36 +01008267 ctx->user_bufs = kcalloc(nr_args, sizeof(*ctx->user_bufs), GFP_KERNEL);
8268 return ctx->user_bufs ? 0 : -ENOMEM;
Bijan Mottahedeh2b358602021-01-06 12:39:11 -08008269}
8270
8271static int io_buffer_validate(struct iovec *iov)
8272{
Pavel Begunkov50e96982021-03-24 22:59:01 +00008273 unsigned long tmp, acct_len = iov->iov_len + (PAGE_SIZE - 1);
8274
Bijan Mottahedeh2b358602021-01-06 12:39:11 -08008275 /*
8276 * Don't impose further limits on the size and buffer
8277 * constraints here, we'll -EINVAL later when IO is
8278 * submitted if they are wrong.
8279 */
8280 if (!iov->iov_base || !iov->iov_len)
8281 return -EFAULT;
8282
8283 /* arbitrary limit, but we need something */
8284 if (iov->iov_len > SZ_1G)
8285 return -EFAULT;
8286
Pavel Begunkov50e96982021-03-24 22:59:01 +00008287 if (check_add_overflow((unsigned long)iov->iov_base, acct_len, &tmp))
8288 return -EOVERFLOW;
8289
Bijan Mottahedeh2b358602021-01-06 12:39:11 -08008290 return 0;
8291}
8292
8293static int io_sqe_buffers_register(struct io_ring_ctx *ctx, void __user *arg,
8294 unsigned int nr_args)
8295{
8296 int i, ret;
8297 struct iovec iov;
8298 struct page *last_hpage = NULL;
8299
Pavel Begunkov87094462021-04-11 01:46:36 +01008300 if (ctx->user_bufs)
8301 return -EBUSY;
8302 if (!nr_args || nr_args > UIO_MAXIOV)
8303 return -EINVAL;
Bijan Mottahedeh2b358602021-01-06 12:39:11 -08008304 ret = io_buffers_map_alloc(ctx, nr_args);
8305 if (ret)
8306 return ret;
8307
Pavel Begunkov87094462021-04-11 01:46:36 +01008308 for (i = 0; i < nr_args; i++, ctx->nr_user_bufs++) {
Jens Axboeedafcce2019-01-09 09:16:05 -07008309 struct io_mapped_ubuf *imu = &ctx->user_bufs[i];
Jens Axboeedafcce2019-01-09 09:16:05 -07008310
8311 ret = io_copy_iov(ctx, &iov, arg, i);
8312 if (ret)
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008313 break;
Bijan Mottahedeh2b358602021-01-06 12:39:11 -08008314 ret = io_buffer_validate(&iov);
8315 if (ret)
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008316 break;
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008317 ret = io_sqe_buffer_register(ctx, &iov, imu, &last_hpage);
8318 if (ret)
8319 break;
Jens Axboeedafcce2019-01-09 09:16:05 -07008320 }
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008321
8322 if (ret)
8323 io_sqe_buffers_unregister(ctx);
8324
Jens Axboeedafcce2019-01-09 09:16:05 -07008325 return ret;
8326}
8327
Jens Axboe9b402842019-04-11 11:45:41 -06008328static int io_eventfd_register(struct io_ring_ctx *ctx, void __user *arg)
8329{
8330 __s32 __user *fds = arg;
8331 int fd;
8332
8333 if (ctx->cq_ev_fd)
8334 return -EBUSY;
8335
8336 if (copy_from_user(&fd, fds, sizeof(*fds)))
8337 return -EFAULT;
8338
8339 ctx->cq_ev_fd = eventfd_ctx_fdget(fd);
8340 if (IS_ERR(ctx->cq_ev_fd)) {
8341 int ret = PTR_ERR(ctx->cq_ev_fd);
8342 ctx->cq_ev_fd = NULL;
8343 return ret;
8344 }
8345
8346 return 0;
8347}
8348
8349static int io_eventfd_unregister(struct io_ring_ctx *ctx)
8350{
8351 if (ctx->cq_ev_fd) {
8352 eventfd_ctx_put(ctx->cq_ev_fd);
8353 ctx->cq_ev_fd = NULL;
8354 return 0;
8355 }
8356
8357 return -ENXIO;
8358}
8359
Jens Axboe5a2e7452020-02-23 16:23:11 -07008360static void io_destroy_buffers(struct io_ring_ctx *ctx)
8361{
Jens Axboe9e15c3a2021-03-13 12:29:43 -07008362 struct io_buffer *buf;
8363 unsigned long index;
8364
8365 xa_for_each(&ctx->io_buffers, index, buf)
8366 __io_remove_buffers(ctx, buf, index, -1U);
Jens Axboe5a2e7452020-02-23 16:23:11 -07008367}
8368
Jens Axboe68e68ee2021-02-13 09:00:02 -07008369static void io_req_cache_free(struct list_head *list, struct task_struct *tsk)
Jens Axboe1b4c3512021-02-10 00:03:19 +00008370{
Jens Axboe68e68ee2021-02-13 09:00:02 -07008371 struct io_kiocb *req, *nxt;
Jens Axboe1b4c3512021-02-10 00:03:19 +00008372
Jens Axboe68e68ee2021-02-13 09:00:02 -07008373 list_for_each_entry_safe(req, nxt, list, compl.list) {
8374 if (tsk && req->task != tsk)
8375 continue;
Jens Axboe1b4c3512021-02-10 00:03:19 +00008376 list_del(&req->compl.list);
8377 kmem_cache_free(req_cachep, req);
8378 }
8379}
8380
Jens Axboe4010fec2021-02-27 15:04:18 -07008381static void io_req_caches_free(struct io_ring_ctx *ctx)
Jens Axboe2b188cc2019-01-07 10:46:33 -07008382{
Pavel Begunkovbf019da2021-02-10 00:03:17 +00008383 struct io_submit_state *submit_state = &ctx->submit_state;
Pavel Begunkove5547d22021-02-23 22:17:20 +00008384 struct io_comp_state *cs = &ctx->submit_state.comp;
Pavel Begunkovbf019da2021-02-10 00:03:17 +00008385
Jens Axboe9a4fdbd2021-02-13 09:09:44 -07008386 mutex_lock(&ctx->uring_lock);
8387
Pavel Begunkov8e5c66c2021-02-22 11:45:55 +00008388 if (submit_state->free_reqs) {
Jens Axboe9a4fdbd2021-02-13 09:09:44 -07008389 kmem_cache_free_bulk(req_cachep, submit_state->free_reqs,
8390 submit_state->reqs);
Pavel Begunkov8e5c66c2021-02-22 11:45:55 +00008391 submit_state->free_reqs = 0;
8392 }
Jens Axboe9a4fdbd2021-02-13 09:09:44 -07008393
Pavel Begunkovdac7a092021-03-19 17:22:39 +00008394 io_flush_cached_locked_reqs(ctx, cs);
Pavel Begunkove5547d22021-02-23 22:17:20 +00008395 io_req_cache_free(&cs->free_list, NULL);
Jens Axboe9a4fdbd2021-02-13 09:09:44 -07008396 mutex_unlock(&ctx->uring_lock);
8397}
8398
Jens Axboe2b188cc2019-01-07 10:46:33 -07008399static void io_ring_ctx_free(struct io_ring_ctx *ctx)
8400{
Jens Axboe37d1e2e2021-02-17 21:03:43 -07008401 io_sq_thread_finish(ctx);
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008402 io_sqe_buffers_unregister(ctx);
Jens Axboe2aede0e2020-09-14 10:45:53 -06008403
Jens Axboe37d1e2e2021-02-17 21:03:43 -07008404 if (ctx->mm_account) {
Jens Axboe2aede0e2020-09-14 10:45:53 -06008405 mmdrop(ctx->mm_account);
8406 ctx->mm_account = NULL;
Bijan Mottahedeh30975822020-06-16 16:36:09 -07008407 }
Jens Axboedef596e2019-01-09 08:59:42 -07008408
Hao Xu8bad28d2021-02-19 17:19:36 +08008409 mutex_lock(&ctx->uring_lock);
Pavel Begunkov08480402021-04-13 02:58:38 +01008410 if (ctx->file_data) {
8411 if (!atomic_dec_and_test(&ctx->file_data->refs))
8412 wait_for_completion(&ctx->file_data->done);
8413 __io_sqe_files_unregister(ctx);
8414 }
Pavel Begunkovc4ea0602021-04-01 15:43:58 +01008415 if (ctx->rings)
8416 __io_cqring_overflow_flush(ctx, true);
Hao Xu8bad28d2021-02-19 17:19:36 +08008417 mutex_unlock(&ctx->uring_lock);
Jens Axboe9b402842019-04-11 11:45:41 -06008418 io_eventfd_unregister(ctx);
Jens Axboe5a2e7452020-02-23 16:23:11 -07008419 io_destroy_buffers(ctx);
Jens Axboedef596e2019-01-09 08:59:42 -07008420
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01008421 /* there are no registered resources left, nobody uses it */
8422 if (ctx->rsrc_node)
8423 io_rsrc_node_destroy(ctx->rsrc_node);
Pavel Begunkov8dd03af2021-03-19 17:22:36 +00008424 if (ctx->rsrc_backup_node)
Pavel Begunkovb895c9a2021-04-01 15:43:40 +01008425 io_rsrc_node_destroy(ctx->rsrc_backup_node);
Pavel Begunkova7f0ed52021-04-01 15:43:46 +01008426 flush_delayed_work(&ctx->rsrc_put_work);
8427
8428 WARN_ON_ONCE(!list_empty(&ctx->rsrc_ref_list));
8429 WARN_ON_ONCE(!llist_empty(&ctx->rsrc_put_llist));
Pavel Begunkov8dd03af2021-03-19 17:22:36 +00008430
Jens Axboe2b188cc2019-01-07 10:46:33 -07008431#if defined(CONFIG_UNIX)
Eric Biggers355e8d22019-06-12 14:58:43 -07008432 if (ctx->ring_sock) {
8433 ctx->ring_sock->file = NULL; /* so that iput() is called */
Jens Axboe2b188cc2019-01-07 10:46:33 -07008434 sock_release(ctx->ring_sock);
Eric Biggers355e8d22019-06-12 14:58:43 -07008435 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07008436#endif
8437
Hristo Venev75b28af2019-08-26 17:23:46 +00008438 io_mem_free(ctx->rings);
Jens Axboe2b188cc2019-01-07 10:46:33 -07008439 io_mem_free(ctx->sq_sqes);
Jens Axboe2b188cc2019-01-07 10:46:33 -07008440
8441 percpu_ref_exit(&ctx->refs);
Jens Axboe2b188cc2019-01-07 10:46:33 -07008442 free_uid(ctx->user);
Jens Axboe4010fec2021-02-27 15:04:18 -07008443 io_req_caches_free(ctx);
Jens Axboee9418942021-02-19 12:33:30 -07008444 if (ctx->hash_map)
8445 io_wq_put_hash(ctx->hash_map);
Jens Axboe78076bb2019-12-04 19:56:40 -07008446 kfree(ctx->cancel_hash);
Jens Axboe2b188cc2019-01-07 10:46:33 -07008447 kfree(ctx);
8448}
8449
8450static __poll_t io_uring_poll(struct file *file, poll_table *wait)
8451{
8452 struct io_ring_ctx *ctx = file->private_data;
8453 __poll_t mask = 0;
8454
8455 poll_wait(file, &ctx->cq_wait, wait);
Stefan Bühler4f7067c2019-04-24 23:54:17 +02008456 /*
8457 * synchronizes with barrier from wq_has_sleeper call in
8458 * io_commit_cqring
8459 */
Jens Axboe2b188cc2019-01-07 10:46:33 -07008460 smp_rmb();
Jens Axboe90554202020-09-03 12:12:41 -06008461 if (!io_sqring_full(ctx))
Jens Axboe2b188cc2019-01-07 10:46:33 -07008462 mask |= EPOLLOUT | EPOLLWRNORM;
Hao Xued670c32021-02-05 16:34:21 +08008463
8464 /*
8465 * Don't flush cqring overflow list here, just do a simple check.
8466 * Otherwise there could possible be ABBA deadlock:
8467 * CPU0 CPU1
8468 * ---- ----
8469 * lock(&ctx->uring_lock);
8470 * lock(&ep->mtx);
8471 * lock(&ctx->uring_lock);
8472 * lock(&ep->mtx);
8473 *
8474 * Users may get EPOLLIN meanwhile seeing nothing in cqring, this
8475 * pushs them to do the flush.
8476 */
8477 if (io_cqring_events(ctx) || test_bit(0, &ctx->cq_check_overflow))
Jens Axboe2b188cc2019-01-07 10:46:33 -07008478 mask |= EPOLLIN | EPOLLRDNORM;
8479
8480 return mask;
8481}
8482
8483static int io_uring_fasync(int fd, struct file *file, int on)
8484{
8485 struct io_ring_ctx *ctx = file->private_data;
8486
8487 return fasync_helper(fd, file, on, &ctx->cq_fasync);
8488}
8489
Yejune Deng0bead8c2020-12-24 11:02:20 +08008490static int io_unregister_personality(struct io_ring_ctx *ctx, unsigned id)
Jens Axboe071698e2020-01-28 10:04:42 -07008491{
Jens Axboe4379bf82021-02-15 13:40:22 -07008492 const struct cred *creds;
Jens Axboe071698e2020-01-28 10:04:42 -07008493
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +00008494 creds = xa_erase(&ctx->personalities, id);
Jens Axboe4379bf82021-02-15 13:40:22 -07008495 if (creds) {
8496 put_cred(creds);
Yejune Deng0bead8c2020-12-24 11:02:20 +08008497 return 0;
Jens Axboe1e6fa522020-10-15 08:46:24 -06008498 }
Yejune Deng0bead8c2020-12-24 11:02:20 +08008499
8500 return -EINVAL;
8501}
8502
Pavel Begunkov9b465712021-03-15 14:23:07 +00008503static inline bool io_run_ctx_fallback(struct io_ring_ctx *ctx)
Jens Axboe7c25c0d2021-02-16 07:17:00 -07008504{
Pavel Begunkov9b465712021-03-15 14:23:07 +00008505 return io_run_task_work_head(&ctx->exit_task_work);
Jens Axboe7c25c0d2021-02-16 07:17:00 -07008506}
8507
Pavel Begunkovd56d9382021-03-06 11:02:13 +00008508struct io_tctx_exit {
8509 struct callback_head task_work;
8510 struct completion completion;
Pavel Begunkovbaf186c2021-03-06 11:02:15 +00008511 struct io_ring_ctx *ctx;
Pavel Begunkovd56d9382021-03-06 11:02:13 +00008512};
8513
8514static void io_tctx_exit_cb(struct callback_head *cb)
8515{
8516 struct io_uring_task *tctx = current->io_uring;
8517 struct io_tctx_exit *work;
8518
8519 work = container_of(cb, struct io_tctx_exit, task_work);
8520 /*
8521 * When @in_idle, we're in cancellation and it's racy to remove the
8522 * node. It'll be removed by the end of cancellation, just ignore it.
8523 */
8524 if (!atomic_read(&tctx->in_idle))
Pavel Begunkovbaf186c2021-03-06 11:02:15 +00008525 io_uring_del_task_file((unsigned long)work->ctx);
Pavel Begunkovd56d9382021-03-06 11:02:13 +00008526 complete(&work->completion);
8527}
8528
Jens Axboe85faa7b2020-04-09 18:14:00 -06008529static void io_ring_exit_work(struct work_struct *work)
8530{
Pavel Begunkovd56d9382021-03-06 11:02:13 +00008531 struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx, exit_work);
Pavel Begunkovb5bb3a22021-03-06 11:02:16 +00008532 unsigned long timeout = jiffies + HZ * 60 * 5;
Pavel Begunkovd56d9382021-03-06 11:02:13 +00008533 struct io_tctx_exit exit;
8534 struct io_tctx_node *node;
8535 int ret;
Jens Axboe85faa7b2020-04-09 18:14:00 -06008536
Pavel Begunkova185f1d2021-03-23 10:52:38 +00008537 /* prevent SQPOLL from submitting new requests */
8538 if (ctx->sq_data) {
8539 io_sq_thread_park(ctx->sq_data);
8540 list_del_init(&ctx->sqd_list);
8541 io_sqd_update_thread_idle(ctx->sq_data);
8542 io_sq_thread_unpark(ctx->sq_data);
8543 }
8544
Jens Axboe56952e92020-06-17 15:00:04 -06008545 /*
8546 * If we're doing polled IO and end up having requests being
8547 * submitted async (out-of-line), then completions can come in while
8548 * we're waiting for refs to drop. We need to reap these manually,
8549 * as nobody else will be looking for them.
8550 */
Pavel Begunkovb2edc0a2020-07-07 16:36:22 +03008551 do {
Pavel Begunkov9936c7c2021-02-04 13:51:56 +00008552 io_uring_try_cancel_requests(ctx, NULL, NULL);
Pavel Begunkovb5bb3a22021-03-06 11:02:16 +00008553
8554 WARN_ON_ONCE(time_after(jiffies, timeout));
Pavel Begunkovb2edc0a2020-07-07 16:36:22 +03008555 } while (!wait_for_completion_timeout(&ctx->ref_comp, HZ/20));
Pavel Begunkovd56d9382021-03-06 11:02:13 +00008556
Pavel Begunkov7f006512021-04-14 13:38:34 +01008557 init_completion(&exit.completion);
8558 init_task_work(&exit.task_work, io_tctx_exit_cb);
8559 exit.ctx = ctx;
Pavel Begunkov89b50662021-04-01 15:43:50 +01008560 /*
8561 * Some may use context even when all refs and requests have been put,
8562 * and they are free to do so while still holding uring_lock or
8563 * completion_lock, see __io_req_task_submit(). Apart from other work,
8564 * this lock/unlock section also waits them to finish.
8565 */
Pavel Begunkovd56d9382021-03-06 11:02:13 +00008566 mutex_lock(&ctx->uring_lock);
8567 while (!list_empty(&ctx->tctx_list)) {
Pavel Begunkovb5bb3a22021-03-06 11:02:16 +00008568 WARN_ON_ONCE(time_after(jiffies, timeout));
8569
Pavel Begunkovd56d9382021-03-06 11:02:13 +00008570 node = list_first_entry(&ctx->tctx_list, struct io_tctx_node,
8571 ctx_node);
Pavel Begunkov7f006512021-04-14 13:38:34 +01008572 /* don't spin on a single task if cancellation failed */
8573 list_rotate_left(&ctx->tctx_list);
Pavel Begunkovd56d9382021-03-06 11:02:13 +00008574 ret = task_work_add(node->task, &exit.task_work, TWA_SIGNAL);
8575 if (WARN_ON_ONCE(ret))
8576 continue;
8577 wake_up_process(node->task);
8578
8579 mutex_unlock(&ctx->uring_lock);
8580 wait_for_completion(&exit.completion);
Pavel Begunkovd56d9382021-03-06 11:02:13 +00008581 mutex_lock(&ctx->uring_lock);
8582 }
8583 mutex_unlock(&ctx->uring_lock);
Pavel Begunkov89b50662021-04-01 15:43:50 +01008584 spin_lock_irq(&ctx->completion_lock);
8585 spin_unlock_irq(&ctx->completion_lock);
Pavel Begunkovd56d9382021-03-06 11:02:13 +00008586
Jens Axboe85faa7b2020-04-09 18:14:00 -06008587 io_ring_ctx_free(ctx);
8588}
8589
Pavel Begunkov80c4cbd2021-03-25 18:32:43 +00008590/* Returns true if we found and killed one or more timeouts */
8591static bool io_kill_timeouts(struct io_ring_ctx *ctx, struct task_struct *tsk,
8592 struct files_struct *files)
8593{
8594 struct io_kiocb *req, *tmp;
8595 int canceled = 0;
8596
8597 spin_lock_irq(&ctx->completion_lock);
8598 list_for_each_entry_safe(req, tmp, &ctx->timeout_list, timeout.list) {
8599 if (io_match_task(req, tsk, files)) {
8600 io_kill_timeout(req, -ECANCELED);
8601 canceled++;
8602 }
8603 }
Pavel Begunkov51520422021-03-29 11:39:29 +01008604 if (canceled != 0)
8605 io_commit_cqring(ctx);
Pavel Begunkov80c4cbd2021-03-25 18:32:43 +00008606 spin_unlock_irq(&ctx->completion_lock);
Pavel Begunkov80c4cbd2021-03-25 18:32:43 +00008607 if (canceled != 0)
8608 io_cqring_ev_posted(ctx);
8609 return canceled != 0;
8610}
8611
Jens Axboe2b188cc2019-01-07 10:46:33 -07008612static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
8613{
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +00008614 unsigned long index;
8615 struct creds *creds;
8616
Jens Axboe2b188cc2019-01-07 10:46:33 -07008617 mutex_lock(&ctx->uring_lock);
8618 percpu_ref_kill(&ctx->refs);
Pavel Begunkov634578f2020-12-06 22:22:44 +00008619 if (ctx->rings)
Pavel Begunkov6c2450a2021-02-23 12:40:22 +00008620 __io_cqring_overflow_flush(ctx, true);
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +00008621 xa_for_each(&ctx->personalities, index, creds)
8622 io_unregister_personality(ctx, index);
Jens Axboe2b188cc2019-01-07 10:46:33 -07008623 mutex_unlock(&ctx->uring_lock);
8624
Pavel Begunkov6b819282020-11-06 13:00:25 +00008625 io_kill_timeouts(ctx, NULL, NULL);
8626 io_poll_remove_all(ctx, NULL, NULL);
Jens Axboe561fb042019-10-24 07:25:42 -06008627
Jens Axboe15dff282019-11-13 09:09:23 -07008628 /* if we failed setting up the ctx, we might not have any rings */
Pavel Begunkovb2edc0a2020-07-07 16:36:22 +03008629 io_iopoll_try_reap_events(ctx);
Jens Axboe309fc032020-07-10 09:13:34 -06008630
Jens Axboe85faa7b2020-04-09 18:14:00 -06008631 INIT_WORK(&ctx->exit_work, io_ring_exit_work);
Jens Axboefc666772020-08-19 11:10:51 -06008632 /*
8633 * Use system_unbound_wq to avoid spawning tons of event kworkers
8634 * if we're exiting a ton of rings at the same time. It just adds
8635 * noise and overhead, there's no discernable change in runtime
8636 * over using system_wq.
8637 */
8638 queue_work(system_unbound_wq, &ctx->exit_work);
Jens Axboe2b188cc2019-01-07 10:46:33 -07008639}
8640
8641static int io_uring_release(struct inode *inode, struct file *file)
8642{
8643 struct io_ring_ctx *ctx = file->private_data;
8644
8645 file->private_data = NULL;
8646 io_ring_ctx_wait_and_kill(ctx);
8647 return 0;
8648}
8649
Pavel Begunkovf6edbab2020-11-06 13:00:26 +00008650struct io_task_cancel {
8651 struct task_struct *task;
8652 struct files_struct *files;
8653};
Pavel Begunkov67c4d9e2020-06-15 10:24:05 +03008654
Pavel Begunkovf6edbab2020-11-06 13:00:26 +00008655static bool io_cancel_task_cb(struct io_wq_work *work, void *data)
Jens Axboeb711d4e2020-08-16 08:23:05 -07008656{
Pavel Begunkov9a472ef2020-11-05 22:31:37 +00008657 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
Pavel Begunkovf6edbab2020-11-06 13:00:26 +00008658 struct io_task_cancel *cancel = data;
Pavel Begunkov9a472ef2020-11-05 22:31:37 +00008659 bool ret;
8660
Pavel Begunkovf6edbab2020-11-06 13:00:26 +00008661 if (cancel->files && (req->flags & REQ_F_LINK_TIMEOUT)) {
Pavel Begunkov9a472ef2020-11-05 22:31:37 +00008662 unsigned long flags;
8663 struct io_ring_ctx *ctx = req->ctx;
8664
8665 /* protect against races with linked timeouts */
8666 spin_lock_irqsave(&ctx->completion_lock, flags);
Pavel Begunkovf6edbab2020-11-06 13:00:26 +00008667 ret = io_match_task(req, cancel->task, cancel->files);
Pavel Begunkov9a472ef2020-11-05 22:31:37 +00008668 spin_unlock_irqrestore(&ctx->completion_lock, flags);
8669 } else {
Pavel Begunkovf6edbab2020-11-06 13:00:26 +00008670 ret = io_match_task(req, cancel->task, cancel->files);
Pavel Begunkov9a472ef2020-11-05 22:31:37 +00008671 }
8672 return ret;
Jens Axboeb711d4e2020-08-16 08:23:05 -07008673}
8674
Pavel Begunkove1915f72021-03-11 23:29:35 +00008675static bool io_cancel_defer_files(struct io_ring_ctx *ctx,
Pavel Begunkovef9865a2020-11-05 14:06:19 +00008676 struct task_struct *task,
Pavel Begunkovb7ddce32020-09-06 00:45:14 +03008677 struct files_struct *files)
8678{
Pavel Begunkove1915f72021-03-11 23:29:35 +00008679 struct io_defer_entry *de;
Pavel Begunkovb7ddce32020-09-06 00:45:14 +03008680 LIST_HEAD(list);
8681
8682 spin_lock_irq(&ctx->completion_lock);
8683 list_for_each_entry_reverse(de, &ctx->defer_list, list) {
Pavel Begunkov08d23632020-11-06 13:00:22 +00008684 if (io_match_task(de->req, task, files)) {
Pavel Begunkovb7ddce32020-09-06 00:45:14 +03008685 list_cut_position(&list, &ctx->defer_list, &de->list);
8686 break;
8687 }
8688 }
8689 spin_unlock_irq(&ctx->completion_lock);
Pavel Begunkove1915f72021-03-11 23:29:35 +00008690 if (list_empty(&list))
8691 return false;
Pavel Begunkovb7ddce32020-09-06 00:45:14 +03008692
8693 while (!list_empty(&list)) {
8694 de = list_first_entry(&list, struct io_defer_entry, list);
8695 list_del_init(&de->list);
Pavel Begunkovf41db2732021-02-28 22:35:12 +00008696 io_req_complete_failed(de->req, -ECANCELED);
Pavel Begunkovb7ddce32020-09-06 00:45:14 +03008697 kfree(de);
8698 }
Pavel Begunkove1915f72021-03-11 23:29:35 +00008699 return true;
Pavel Begunkovb7ddce32020-09-06 00:45:14 +03008700}
8701
Pavel Begunkov1b007642021-03-06 11:02:17 +00008702static bool io_cancel_ctx_cb(struct io_wq_work *work, void *data)
8703{
8704 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
8705
8706 return req->ctx == data;
8707}
8708
8709static bool io_uring_try_cancel_iowq(struct io_ring_ctx *ctx)
8710{
8711 struct io_tctx_node *node;
8712 enum io_wq_cancel cret;
8713 bool ret = false;
8714
8715 mutex_lock(&ctx->uring_lock);
8716 list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
8717 struct io_uring_task *tctx = node->task->io_uring;
8718
8719 /*
8720 * io_wq will stay alive while we hold uring_lock, because it's
8721 * killed after ctx nodes, which requires to take the lock.
8722 */
8723 if (!tctx || !tctx->io_wq)
8724 continue;
8725 cret = io_wq_cancel_cb(tctx->io_wq, io_cancel_ctx_cb, ctx, true);
8726 ret |= (cret != IO_WQ_CANCEL_NOTFOUND);
8727 }
8728 mutex_unlock(&ctx->uring_lock);
8729
8730 return ret;
8731}
8732
Pavel Begunkov9936c7c2021-02-04 13:51:56 +00008733static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
8734 struct task_struct *task,
8735 struct files_struct *files)
8736{
8737 struct io_task_cancel cancel = { .task = task, .files = files, };
Pavel Begunkov1b007642021-03-06 11:02:17 +00008738 struct io_uring_task *tctx = task ? task->io_uring : NULL;
Pavel Begunkov9936c7c2021-02-04 13:51:56 +00008739
8740 while (1) {
8741 enum io_wq_cancel cret;
8742 bool ret = false;
8743
Pavel Begunkov1b007642021-03-06 11:02:17 +00008744 if (!task) {
8745 ret |= io_uring_try_cancel_iowq(ctx);
8746 } else if (tctx && tctx->io_wq) {
8747 /*
8748 * Cancels requests of all rings, not only @ctx, but
8749 * it's fine as the task is in exit/exec.
8750 */
Jens Axboe5aa75ed2021-02-16 12:56:50 -07008751 cret = io_wq_cancel_cb(tctx->io_wq, io_cancel_task_cb,
Pavel Begunkov9936c7c2021-02-04 13:51:56 +00008752 &cancel, true);
8753 ret |= (cret != IO_WQ_CANCEL_NOTFOUND);
8754 }
8755
8756 /* SQPOLL thread does its own polling */
Jens Axboed052d1d2021-03-11 10:49:20 -07008757 if ((!(ctx->flags & IORING_SETUP_SQPOLL) && !files) ||
8758 (ctx->sq_data && ctx->sq_data->thread == current)) {
Pavel Begunkov9936c7c2021-02-04 13:51:56 +00008759 while (!list_empty_careful(&ctx->iopoll_list)) {
8760 io_iopoll_try_reap_events(ctx);
8761 ret = true;
8762 }
8763 }
8764
Pavel Begunkove1915f72021-03-11 23:29:35 +00008765 ret |= io_cancel_defer_files(ctx, task, files);
Pavel Begunkov9936c7c2021-02-04 13:51:56 +00008766 ret |= io_poll_remove_all(ctx, task, files);
8767 ret |= io_kill_timeouts(ctx, task, files);
8768 ret |= io_run_task_work();
Pavel Begunkovba50a032021-02-26 15:47:56 +00008769 ret |= io_run_ctx_fallback(ctx);
Pavel Begunkov9936c7c2021-02-04 13:51:56 +00008770 if (!ret)
8771 break;
8772 cond_resched();
8773 }
8774}
8775
Pavel Begunkovcf27f3b2021-03-19 17:22:31 +00008776static int __io_uring_add_task_file(struct io_ring_ctx *ctx)
Jens Axboe0f212202020-09-13 13:09:39 -06008777{
Matthew Wilcox (Oracle)236434c2020-10-09 13:49:52 +01008778 struct io_uring_task *tctx = current->io_uring;
Pavel Begunkov13bf43f2021-03-06 11:02:12 +00008779 struct io_tctx_node *node;
Pavel Begunkova528b042020-12-21 18:34:04 +00008780 int ret;
Matthew Wilcox (Oracle)236434c2020-10-09 13:49:52 +01008781
8782 if (unlikely(!tctx)) {
Jens Axboe5aa75ed2021-02-16 12:56:50 -07008783 ret = io_uring_alloc_task_context(current, ctx);
Jens Axboe0f212202020-09-13 13:09:39 -06008784 if (unlikely(ret))
8785 return ret;
Matthew Wilcox (Oracle)236434c2020-10-09 13:49:52 +01008786 tctx = current->io_uring;
Jens Axboe0f212202020-09-13 13:09:39 -06008787 }
Pavel Begunkovcf27f3b2021-03-19 17:22:31 +00008788 if (!xa_load(&tctx->xa, (unsigned long)ctx)) {
8789 node = kmalloc(sizeof(*node), GFP_KERNEL);
8790 if (!node)
8791 return -ENOMEM;
8792 node->ctx = ctx;
8793 node->task = current;
Jens Axboe0f212202020-09-13 13:09:39 -06008794
Pavel Begunkovcf27f3b2021-03-19 17:22:31 +00008795 ret = xa_err(xa_store(&tctx->xa, (unsigned long)ctx,
8796 node, GFP_KERNEL));
8797 if (ret) {
8798 kfree(node);
8799 return ret;
Jens Axboe0f212202020-09-13 13:09:39 -06008800 }
Pavel Begunkovcf27f3b2021-03-19 17:22:31 +00008801
8802 mutex_lock(&ctx->uring_lock);
8803 list_add(&node->ctx_node, &ctx->tctx_list);
8804 mutex_unlock(&ctx->uring_lock);
Jens Axboe0f212202020-09-13 13:09:39 -06008805 }
Pavel Begunkovcf27f3b2021-03-19 17:22:31 +00008806 tctx->last = ctx;
Jens Axboe0f212202020-09-13 13:09:39 -06008807 return 0;
8808}
8809
8810/*
Pavel Begunkovcf27f3b2021-03-19 17:22:31 +00008811 * Note that this task has used io_uring. We use it for cancelation purposes.
8812 */
8813static inline int io_uring_add_task_file(struct io_ring_ctx *ctx)
8814{
8815 struct io_uring_task *tctx = current->io_uring;
8816
8817 if (likely(tctx && tctx->last == ctx))
8818 return 0;
8819 return __io_uring_add_task_file(ctx);
8820}
8821
8822/*
Jens Axboe0f212202020-09-13 13:09:39 -06008823 * Remove this io_uring_file -> task mapping.
8824 */
Pavel Begunkov29412672021-03-06 11:02:11 +00008825static void io_uring_del_task_file(unsigned long index)
Jens Axboe0f212202020-09-13 13:09:39 -06008826{
8827 struct io_uring_task *tctx = current->io_uring;
Pavel Begunkov13bf43f2021-03-06 11:02:12 +00008828 struct io_tctx_node *node;
Pavel Begunkov29412672021-03-06 11:02:11 +00008829
Pavel Begunkoveebd2e32021-03-06 11:02:14 +00008830 if (!tctx)
8831 return;
Pavel Begunkov13bf43f2021-03-06 11:02:12 +00008832 node = xa_erase(&tctx->xa, index);
8833 if (!node)
Pavel Begunkov29412672021-03-06 11:02:11 +00008834 return;
Jens Axboe0f212202020-09-13 13:09:39 -06008835
Pavel Begunkov13bf43f2021-03-06 11:02:12 +00008836 WARN_ON_ONCE(current != node->task);
8837 WARN_ON_ONCE(list_empty(&node->ctx_node));
8838
8839 mutex_lock(&node->ctx->uring_lock);
8840 list_del(&node->ctx_node);
8841 mutex_unlock(&node->ctx->uring_lock);
8842
Pavel Begunkovbaf186c2021-03-06 11:02:15 +00008843 if (tctx->last == node->ctx)
Jens Axboe0f212202020-09-13 13:09:39 -06008844 tctx->last = NULL;
Pavel Begunkov13bf43f2021-03-06 11:02:12 +00008845 kfree(node);
Jens Axboe0f212202020-09-13 13:09:39 -06008846}
8847
Pavel Begunkov8452d4a2021-02-27 11:16:46 +00008848static void io_uring_clean_tctx(struct io_uring_task *tctx)
Pavel Begunkovde7f1d92021-01-04 20:43:29 +00008849{
Pavel Begunkov13bf43f2021-03-06 11:02:12 +00008850 struct io_tctx_node *node;
Pavel Begunkovde7f1d92021-01-04 20:43:29 +00008851 unsigned long index;
8852
Pavel Begunkov13bf43f2021-03-06 11:02:12 +00008853 xa_for_each(&tctx->xa, index, node)
Pavel Begunkov29412672021-03-06 11:02:11 +00008854 io_uring_del_task_file(index);
Pavel Begunkov8452d4a2021-02-27 11:16:46 +00008855 if (tctx->io_wq) {
8856 io_wq_put_and_exit(tctx->io_wq);
8857 tctx->io_wq = NULL;
8858 }
Pavel Begunkovde7f1d92021-01-04 20:43:29 +00008859}
8860
Pavel Begunkov3f48cf12021-04-11 01:46:27 +01008861static s64 tctx_inflight(struct io_uring_task *tctx, bool tracked)
Pavel Begunkov368b2082021-04-11 01:46:25 +01008862{
Pavel Begunkov3f48cf12021-04-11 01:46:27 +01008863 if (tracked)
8864 return atomic_read(&tctx->inflight_tracked);
Pavel Begunkov521d6a72021-03-11 23:29:38 +00008865 return percpu_counter_sum(&tctx->inflight);
8866}
8867
8868static void io_sqpoll_cancel_cb(struct callback_head *cb)
8869{
8870 struct io_tctx_exit *work = container_of(cb, struct io_tctx_exit, task_work);
8871 struct io_ring_ctx *ctx = work->ctx;
8872 struct io_sq_data *sqd = ctx->sq_data;
8873
8874 if (sqd->thread)
8875 io_uring_cancel_sqpoll(ctx);
8876 complete(&work->completion);
8877}
8878
8879static void io_sqpoll_cancel_sync(struct io_ring_ctx *ctx)
8880{
8881 struct io_sq_data *sqd = ctx->sq_data;
8882 struct io_tctx_exit work = { .ctx = ctx, };
8883 struct task_struct *task;
8884
8885 io_sq_thread_park(sqd);
8886 list_del_init(&ctx->sqd_list);
8887 io_sqd_update_thread_idle(sqd);
8888 task = sqd->thread;
8889 if (task) {
8890 init_completion(&work.completion);
8891 init_task_work(&work.task_work, io_sqpoll_cancel_cb);
Pavel Begunkovb7f5a0b2021-03-15 14:23:08 +00008892 io_task_work_add_head(&sqd->park_task_work, &work.task_work);
Pavel Begunkov521d6a72021-03-11 23:29:38 +00008893 wake_up_process(task);
8894 }
8895 io_sq_thread_unpark(sqd);
8896
8897 if (task)
8898 wait_for_completion(&work.completion);
8899}
8900
Pavel Begunkov368b2082021-04-11 01:46:25 +01008901static void io_uring_try_cancel(struct files_struct *files)
Jens Axboe0f212202020-09-13 13:09:39 -06008902{
8903 struct io_uring_task *tctx = current->io_uring;
Pavel Begunkov13bf43f2021-03-06 11:02:12 +00008904 struct io_tctx_node *node;
Matthew Wilcox (Oracle)ce765372020-10-09 13:49:51 +01008905 unsigned long index;
Jens Axboe0f212202020-09-13 13:09:39 -06008906
Pavel Begunkov521d6a72021-03-11 23:29:38 +00008907 xa_for_each(&tctx->xa, index, node) {
8908 struct io_ring_ctx *ctx = node->ctx;
8909
8910 if (ctx->sq_data) {
8911 io_sqpoll_cancel_sync(ctx);
8912 continue;
8913 }
Pavel Begunkov368b2082021-04-11 01:46:25 +01008914 io_uring_try_cancel_requests(ctx, current, files);
Pavel Begunkov521d6a72021-03-11 23:29:38 +00008915 }
Jens Axboefdaf0832020-10-30 09:37:30 -06008916}
8917
Pavel Begunkov521d6a72021-03-11 23:29:38 +00008918/* should only be called by SQPOLL task */
Pavel Begunkov0e9ddb32021-02-07 22:34:26 +00008919static void io_uring_cancel_sqpoll(struct io_ring_ctx *ctx)
8920{
Jens Axboe37d1e2e2021-02-17 21:03:43 -07008921 struct io_sq_data *sqd = ctx->sq_data;
Pavel Begunkov521d6a72021-03-11 23:29:38 +00008922 struct io_uring_task *tctx = current->io_uring;
Jens Axboefdaf0832020-10-30 09:37:30 -06008923 s64 inflight;
Pavel Begunkov0e9ddb32021-02-07 22:34:26 +00008924 DEFINE_WAIT(wait);
Jens Axboefdaf0832020-10-30 09:37:30 -06008925
Pavel Begunkov521d6a72021-03-11 23:29:38 +00008926 WARN_ON_ONCE(!sqd || ctx->sq_data->thread != current);
8927
Pavel Begunkov0e9ddb32021-02-07 22:34:26 +00008928 atomic_inc(&tctx->in_idle);
8929 do {
8930 /* read completions before cancelations */
Pavel Begunkov3f48cf12021-04-11 01:46:27 +01008931 inflight = tctx_inflight(tctx, false);
Pavel Begunkov0e9ddb32021-02-07 22:34:26 +00008932 if (!inflight)
8933 break;
Pavel Begunkov521d6a72021-03-11 23:29:38 +00008934 io_uring_try_cancel_requests(ctx, current, NULL);
Jens Axboefdaf0832020-10-30 09:37:30 -06008935
Pavel Begunkov0e9ddb32021-02-07 22:34:26 +00008936 prepare_to_wait(&tctx->wait, &wait, TASK_UNINTERRUPTIBLE);
8937 /*
8938 * If we've seen completions, retry without waiting. This
8939 * avoids a race where a completion comes in before we did
8940 * prepare_to_wait().
8941 */
Pavel Begunkov3f48cf12021-04-11 01:46:27 +01008942 if (inflight == tctx_inflight(tctx, false))
Pavel Begunkov0e9ddb32021-02-07 22:34:26 +00008943 schedule();
8944 finish_wait(&tctx->wait, &wait);
8945 } while (1);
8946 atomic_dec(&tctx->in_idle);
Jens Axboe0f212202020-09-13 13:09:39 -06008947}
8948
Jens Axboe0f212202020-09-13 13:09:39 -06008949/*
8950 * Find any io_uring fd that this task has registered or done IO on, and cancel
8951 * requests.
8952 */
Pavel Begunkov3f48cf12021-04-11 01:46:27 +01008953void __io_uring_cancel(struct files_struct *files)
Jens Axboe0f212202020-09-13 13:09:39 -06008954{
8955 struct io_uring_task *tctx = current->io_uring;
8956 DEFINE_WAIT(wait);
Jens Axboed8a6df12020-10-15 16:24:45 -06008957 s64 inflight;
Jens Axboe0f212202020-09-13 13:09:39 -06008958
8959 /* make sure overflow events are dropped */
Jens Axboefdaf0832020-10-30 09:37:30 -06008960 atomic_inc(&tctx->in_idle);
Pavel Begunkov3f48cf12021-04-11 01:46:27 +01008961 io_uring_try_cancel(files);
Pavel Begunkov5a978dc2021-03-27 09:59:30 +00008962
Jens Axboed8a6df12020-10-15 16:24:45 -06008963 do {
Jens Axboe0f212202020-09-13 13:09:39 -06008964 /* read completions before cancelations */
Pavel Begunkov3f48cf12021-04-11 01:46:27 +01008965 inflight = tctx_inflight(tctx, !!files);
Jens Axboed8a6df12020-10-15 16:24:45 -06008966 if (!inflight)
8967 break;
Pavel Begunkov3f48cf12021-04-11 01:46:27 +01008968 io_uring_try_cancel(files);
Jens Axboe0f212202020-09-13 13:09:39 -06008969 prepare_to_wait(&tctx->wait, &wait, TASK_UNINTERRUPTIBLE);
8970
8971 /*
Pavel Begunkova1bb3cd2021-01-26 15:28:26 +00008972 * If we've seen completions, retry without waiting. This
8973 * avoids a race where a completion comes in before we did
8974 * prepare_to_wait().
Jens Axboe0f212202020-09-13 13:09:39 -06008975 */
Pavel Begunkov3f48cf12021-04-11 01:46:27 +01008976 if (inflight == tctx_inflight(tctx, !!files))
Pavel Begunkova1bb3cd2021-01-26 15:28:26 +00008977 schedule();
Pavel Begunkovf57555e2020-12-20 13:21:44 +00008978 finish_wait(&tctx->wait, &wait);
Jens Axboed8a6df12020-10-15 16:24:45 -06008979 } while (1);
Jens Axboefdaf0832020-10-30 09:37:30 -06008980 atomic_dec(&tctx->in_idle);
Pavel Begunkovde7f1d92021-01-04 20:43:29 +00008981
Pavel Begunkov8452d4a2021-02-27 11:16:46 +00008982 io_uring_clean_tctx(tctx);
Pavel Begunkov3f48cf12021-04-11 01:46:27 +01008983 if (!files) {
8984 /* for exec all current's requests should be gone, kill tctx */
8985 __io_uring_free(current);
8986 }
Pavel Begunkov44e728b2020-06-15 10:24:04 +03008987}
8988
Roman Penyaev6c5c2402019-11-28 12:53:22 +01008989static void *io_uring_validate_mmap_request(struct file *file,
8990 loff_t pgoff, size_t sz)
Jens Axboe2b188cc2019-01-07 10:46:33 -07008991{
Jens Axboe2b188cc2019-01-07 10:46:33 -07008992 struct io_ring_ctx *ctx = file->private_data;
Roman Penyaev6c5c2402019-11-28 12:53:22 +01008993 loff_t offset = pgoff << PAGE_SHIFT;
Jens Axboe2b188cc2019-01-07 10:46:33 -07008994 struct page *page;
8995 void *ptr;
8996
8997 switch (offset) {
8998 case IORING_OFF_SQ_RING:
Hristo Venev75b28af2019-08-26 17:23:46 +00008999 case IORING_OFF_CQ_RING:
9000 ptr = ctx->rings;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009001 break;
9002 case IORING_OFF_SQES:
9003 ptr = ctx->sq_sqes;
9004 break;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009005 default:
Roman Penyaev6c5c2402019-11-28 12:53:22 +01009006 return ERR_PTR(-EINVAL);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009007 }
9008
9009 page = virt_to_head_page(ptr);
Matthew Wilcox (Oracle)a50b8542019-09-23 15:34:25 -07009010 if (sz > page_size(page))
Roman Penyaev6c5c2402019-11-28 12:53:22 +01009011 return ERR_PTR(-EINVAL);
9012
9013 return ptr;
9014}
9015
9016#ifdef CONFIG_MMU
9017
9018static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
9019{
9020 size_t sz = vma->vm_end - vma->vm_start;
9021 unsigned long pfn;
9022 void *ptr;
9023
9024 ptr = io_uring_validate_mmap_request(file, vma->vm_pgoff, sz);
9025 if (IS_ERR(ptr))
9026 return PTR_ERR(ptr);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009027
9028 pfn = virt_to_phys(ptr) >> PAGE_SHIFT;
9029 return remap_pfn_range(vma, vma->vm_start, pfn, sz, vma->vm_page_prot);
9030}
9031
Roman Penyaev6c5c2402019-11-28 12:53:22 +01009032#else /* !CONFIG_MMU */
9033
9034static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
9035{
9036 return vma->vm_flags & (VM_SHARED | VM_MAYSHARE) ? 0 : -EINVAL;
9037}
9038
9039static unsigned int io_uring_nommu_mmap_capabilities(struct file *file)
9040{
9041 return NOMMU_MAP_DIRECT | NOMMU_MAP_READ | NOMMU_MAP_WRITE;
9042}
9043
9044static unsigned long io_uring_nommu_get_unmapped_area(struct file *file,
9045 unsigned long addr, unsigned long len,
9046 unsigned long pgoff, unsigned long flags)
9047{
9048 void *ptr;
9049
9050 ptr = io_uring_validate_mmap_request(file, pgoff, len);
9051 if (IS_ERR(ptr))
9052 return PTR_ERR(ptr);
9053
9054 return (unsigned long) ptr;
9055}
9056
9057#endif /* !CONFIG_MMU */
9058
Pavel Begunkovd9d05212021-01-08 20:57:25 +00009059static int io_sqpoll_wait_sq(struct io_ring_ctx *ctx)
Jens Axboe90554202020-09-03 12:12:41 -06009060{
9061 DEFINE_WAIT(wait);
9062
9063 do {
9064 if (!io_sqring_full(ctx))
9065 break;
Jens Axboe90554202020-09-03 12:12:41 -06009066 prepare_to_wait(&ctx->sqo_sq_wait, &wait, TASK_INTERRUPTIBLE);
9067
9068 if (!io_sqring_full(ctx))
9069 break;
Jens Axboe90554202020-09-03 12:12:41 -06009070 schedule();
9071 } while (!signal_pending(current));
9072
9073 finish_wait(&ctx->sqo_sq_wait, &wait);
Yang Li51993282021-03-09 14:30:41 +08009074 return 0;
Jens Axboe90554202020-09-03 12:12:41 -06009075}
9076
Hao Xuc73ebb62020-11-03 10:54:37 +08009077static int io_get_ext_arg(unsigned flags, const void __user *argp, size_t *argsz,
9078 struct __kernel_timespec __user **ts,
9079 const sigset_t __user **sig)
9080{
9081 struct io_uring_getevents_arg arg;
9082
9083 /*
9084 * If EXT_ARG isn't set, then we have no timespec and the argp pointer
9085 * is just a pointer to the sigset_t.
9086 */
9087 if (!(flags & IORING_ENTER_EXT_ARG)) {
9088 *sig = (const sigset_t __user *) argp;
9089 *ts = NULL;
9090 return 0;
9091 }
9092
9093 /*
9094 * EXT_ARG is set - ensure we agree on the size of it and copy in our
9095 * timespec and sigset_t pointers if good.
9096 */
9097 if (*argsz != sizeof(arg))
9098 return -EINVAL;
9099 if (copy_from_user(&arg, argp, sizeof(arg)))
9100 return -EFAULT;
9101 *sig = u64_to_user_ptr(arg.sigmask);
9102 *argsz = arg.sigmask_sz;
9103 *ts = u64_to_user_ptr(arg.ts);
9104 return 0;
9105}
9106
Jens Axboe2b188cc2019-01-07 10:46:33 -07009107SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
Hao Xuc73ebb62020-11-03 10:54:37 +08009108 u32, min_complete, u32, flags, const void __user *, argp,
9109 size_t, argsz)
Jens Axboe2b188cc2019-01-07 10:46:33 -07009110{
9111 struct io_ring_ctx *ctx;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009112 int submitted = 0;
9113 struct fd f;
Pavel Begunkov33f993d2021-03-19 17:22:30 +00009114 long ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009115
Jens Axboe4c6e2772020-07-01 11:29:10 -06009116 io_run_task_work();
Jens Axboeb41e9852020-02-17 09:52:41 -07009117
Pavel Begunkov33f993d2021-03-19 17:22:30 +00009118 if (unlikely(flags & ~(IORING_ENTER_GETEVENTS | IORING_ENTER_SQ_WAKEUP |
9119 IORING_ENTER_SQ_WAIT | IORING_ENTER_EXT_ARG)))
Jens Axboe2b188cc2019-01-07 10:46:33 -07009120 return -EINVAL;
9121
9122 f = fdget(fd);
Pavel Begunkov33f993d2021-03-19 17:22:30 +00009123 if (unlikely(!f.file))
Jens Axboe2b188cc2019-01-07 10:46:33 -07009124 return -EBADF;
9125
9126 ret = -EOPNOTSUPP;
Pavel Begunkov33f993d2021-03-19 17:22:30 +00009127 if (unlikely(f.file->f_op != &io_uring_fops))
Jens Axboe2b188cc2019-01-07 10:46:33 -07009128 goto out_fput;
9129
9130 ret = -ENXIO;
9131 ctx = f.file->private_data;
Pavel Begunkov33f993d2021-03-19 17:22:30 +00009132 if (unlikely(!percpu_ref_tryget(&ctx->refs)))
Jens Axboe2b188cc2019-01-07 10:46:33 -07009133 goto out_fput;
9134
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02009135 ret = -EBADFD;
Pavel Begunkov33f993d2021-03-19 17:22:30 +00009136 if (unlikely(ctx->flags & IORING_SETUP_R_DISABLED))
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02009137 goto out;
9138
Jens Axboe6c271ce2019-01-10 11:22:30 -07009139 /*
9140 * For SQ polling, the thread will do all submissions and completions.
9141 * Just return the requested submit count, and wake the thread if
9142 * we were asked to.
9143 */
Jens Axboeb2a9ead2019-09-12 14:19:16 -06009144 ret = 0;
Jens Axboe6c271ce2019-01-10 11:22:30 -07009145 if (ctx->flags & IORING_SETUP_SQPOLL) {
Pavel Begunkov6c2450a2021-02-23 12:40:22 +00009146 io_cqring_overflow_flush(ctx, false);
Pavel Begunkov89448c42020-12-17 00:24:39 +00009147
Pavel Begunkovd9d05212021-01-08 20:57:25 +00009148 ret = -EOWNERDEAD;
Stefan Metzmacher04147482021-03-07 11:54:29 +01009149 if (unlikely(ctx->sq_data->thread == NULL)) {
9150 goto out;
9151 }
Jens Axboe6c271ce2019-01-10 11:22:30 -07009152 if (flags & IORING_ENTER_SQ_WAKEUP)
Jens Axboe534ca6d2020-09-02 13:52:19 -06009153 wake_up(&ctx->sq_data->wait);
Pavel Begunkovd9d05212021-01-08 20:57:25 +00009154 if (flags & IORING_ENTER_SQ_WAIT) {
9155 ret = io_sqpoll_wait_sq(ctx);
9156 if (ret)
9157 goto out;
9158 }
Jens Axboe6c271ce2019-01-10 11:22:30 -07009159 submitted = to_submit;
Jens Axboeb2a9ead2019-09-12 14:19:16 -06009160 } else if (to_submit) {
Pavel Begunkovbaf186c2021-03-06 11:02:15 +00009161 ret = io_uring_add_task_file(ctx);
Jens Axboe0f212202020-09-13 13:09:39 -06009162 if (unlikely(ret))
9163 goto out;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009164 mutex_lock(&ctx->uring_lock);
Jens Axboe0f212202020-09-13 13:09:39 -06009165 submitted = io_submit_sqes(ctx, to_submit);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009166 mutex_unlock(&ctx->uring_lock);
Pavel Begunkov7c504e652019-12-18 19:53:45 +03009167
9168 if (submitted != to_submit)
9169 goto out;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009170 }
9171 if (flags & IORING_ENTER_GETEVENTS) {
Hao Xuc73ebb62020-11-03 10:54:37 +08009172 const sigset_t __user *sig;
9173 struct __kernel_timespec __user *ts;
9174
9175 ret = io_get_ext_arg(flags, argp, &argsz, &ts, &sig);
9176 if (unlikely(ret))
9177 goto out;
9178
Jens Axboe2b188cc2019-01-07 10:46:33 -07009179 min_complete = min(min_complete, ctx->cq_entries);
9180
Xiaoguang Wang32b22442020-03-11 09:26:09 +08009181 /*
9182 * When SETUP_IOPOLL and SETUP_SQPOLL are both enabled, user
9183 * space applications don't need to do io completion events
9184 * polling again, they can rely on io_sq_thread to do polling
9185 * work, which can reduce cpu usage and uring_lock contention.
9186 */
9187 if (ctx->flags & IORING_SETUP_IOPOLL &&
9188 !(ctx->flags & IORING_SETUP_SQPOLL)) {
Pavel Begunkov7668b922020-07-07 16:36:21 +03009189 ret = io_iopoll_check(ctx, min_complete);
Jens Axboedef596e2019-01-09 08:59:42 -07009190 } else {
Hao Xuc73ebb62020-11-03 10:54:37 +08009191 ret = io_cqring_wait(ctx, min_complete, sig, argsz, ts);
Jens Axboedef596e2019-01-09 08:59:42 -07009192 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07009193 }
9194
Pavel Begunkov7c504e652019-12-18 19:53:45 +03009195out:
Pavel Begunkov6805b322019-10-08 02:18:42 +03009196 percpu_ref_put(&ctx->refs);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009197out_fput:
9198 fdput(f);
9199 return submitted ? submitted : ret;
9200}
9201
Tobias Klauserbebdb652020-02-26 18:38:32 +01009202#ifdef CONFIG_PROC_FS
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +00009203static int io_uring_show_cred(struct seq_file *m, unsigned int id,
9204 const struct cred *cred)
Jens Axboe87ce9552020-01-30 08:25:34 -07009205{
Jens Axboe87ce9552020-01-30 08:25:34 -07009206 struct user_namespace *uns = seq_user_ns(m);
9207 struct group_info *gi;
9208 kernel_cap_t cap;
9209 unsigned __capi;
9210 int g;
9211
9212 seq_printf(m, "%5d\n", id);
9213 seq_put_decimal_ull(m, "\tUid:\t", from_kuid_munged(uns, cred->uid));
9214 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->euid));
9215 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->suid));
9216 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->fsuid));
9217 seq_put_decimal_ull(m, "\n\tGid:\t", from_kgid_munged(uns, cred->gid));
9218 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->egid));
9219 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->sgid));
9220 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->fsgid));
9221 seq_puts(m, "\n\tGroups:\t");
9222 gi = cred->group_info;
9223 for (g = 0; g < gi->ngroups; g++) {
9224 seq_put_decimal_ull(m, g ? " " : "",
9225 from_kgid_munged(uns, gi->gid[g]));
9226 }
9227 seq_puts(m, "\n\tCapEff:\t");
9228 cap = cred->cap_effective;
9229 CAP_FOR_EACH_U32(__capi)
9230 seq_put_hex_ll(m, NULL, cap.cap[CAP_LAST_U32 - __capi], 8);
9231 seq_putc(m, '\n');
9232 return 0;
9233}
9234
9235static void __io_uring_show_fdinfo(struct io_ring_ctx *ctx, struct seq_file *m)
9236{
Joseph Qidbbe9c62020-09-29 09:01:22 -06009237 struct io_sq_data *sq = NULL;
Jens Axboefad8e0d2020-09-28 08:57:48 -06009238 bool has_lock;
Jens Axboe87ce9552020-01-30 08:25:34 -07009239 int i;
9240
Jens Axboefad8e0d2020-09-28 08:57:48 -06009241 /*
9242 * Avoid ABBA deadlock between the seq lock and the io_uring mutex,
9243 * since fdinfo case grabs it in the opposite direction of normal use
9244 * cases. If we fail to get the lock, we just don't iterate any
9245 * structures that could be going away outside the io_uring mutex.
9246 */
9247 has_lock = mutex_trylock(&ctx->uring_lock);
9248
Jens Axboe5f3f26f2021-02-25 10:17:46 -07009249 if (has_lock && (ctx->flags & IORING_SETUP_SQPOLL)) {
Joseph Qidbbe9c62020-09-29 09:01:22 -06009250 sq = ctx->sq_data;
Jens Axboe5f3f26f2021-02-25 10:17:46 -07009251 if (!sq->thread)
9252 sq = NULL;
9253 }
Joseph Qidbbe9c62020-09-29 09:01:22 -06009254
9255 seq_printf(m, "SqThread:\t%d\n", sq ? task_pid_nr(sq->thread) : -1);
9256 seq_printf(m, "SqThreadCpu:\t%d\n", sq ? task_cpu(sq->thread) : -1);
Jens Axboe87ce9552020-01-30 08:25:34 -07009257 seq_printf(m, "UserFiles:\t%u\n", ctx->nr_user_files);
Jens Axboefad8e0d2020-09-28 08:57:48 -06009258 for (i = 0; has_lock && i < ctx->nr_user_files; i++) {
Jens Axboe7b29f922021-03-12 08:30:14 -07009259 struct file *f = io_file_from_index(ctx, i);
Jens Axboe87ce9552020-01-30 08:25:34 -07009260
Jens Axboe87ce9552020-01-30 08:25:34 -07009261 if (f)
9262 seq_printf(m, "%5u: %s\n", i, file_dentry(f)->d_iname);
9263 else
9264 seq_printf(m, "%5u: <none>\n", i);
9265 }
9266 seq_printf(m, "UserBufs:\t%u\n", ctx->nr_user_bufs);
Jens Axboefad8e0d2020-09-28 08:57:48 -06009267 for (i = 0; has_lock && i < ctx->nr_user_bufs; i++) {
Jens Axboe87ce9552020-01-30 08:25:34 -07009268 struct io_mapped_ubuf *buf = &ctx->user_bufs[i];
Pavel Begunkov4751f532021-04-01 15:43:55 +01009269 unsigned int len = buf->ubuf_end - buf->ubuf;
Jens Axboe87ce9552020-01-30 08:25:34 -07009270
Pavel Begunkov4751f532021-04-01 15:43:55 +01009271 seq_printf(m, "%5u: 0x%llx/%u\n", i, buf->ubuf, len);
Jens Axboe87ce9552020-01-30 08:25:34 -07009272 }
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +00009273 if (has_lock && !xa_empty(&ctx->personalities)) {
9274 unsigned long index;
9275 const struct cred *cred;
9276
Jens Axboe87ce9552020-01-30 08:25:34 -07009277 seq_printf(m, "Personalities:\n");
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +00009278 xa_for_each(&ctx->personalities, index, cred)
9279 io_uring_show_cred(m, index, cred);
Jens Axboe87ce9552020-01-30 08:25:34 -07009280 }
Jens Axboed7718a92020-02-14 22:23:12 -07009281 seq_printf(m, "PollList:\n");
9282 spin_lock_irq(&ctx->completion_lock);
9283 for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
9284 struct hlist_head *list = &ctx->cancel_hash[i];
9285 struct io_kiocb *req;
9286
9287 hlist_for_each_entry(req, list, hash_node)
9288 seq_printf(m, " op=%d, task_works=%d\n", req->opcode,
9289 req->task->task_works != NULL);
9290 }
9291 spin_unlock_irq(&ctx->completion_lock);
Jens Axboefad8e0d2020-09-28 08:57:48 -06009292 if (has_lock)
9293 mutex_unlock(&ctx->uring_lock);
Jens Axboe87ce9552020-01-30 08:25:34 -07009294}
9295
9296static void io_uring_show_fdinfo(struct seq_file *m, struct file *f)
9297{
9298 struct io_ring_ctx *ctx = f->private_data;
9299
9300 if (percpu_ref_tryget(&ctx->refs)) {
9301 __io_uring_show_fdinfo(ctx, m);
9302 percpu_ref_put(&ctx->refs);
9303 }
9304}
Tobias Klauserbebdb652020-02-26 18:38:32 +01009305#endif
Jens Axboe87ce9552020-01-30 08:25:34 -07009306
Jens Axboe2b188cc2019-01-07 10:46:33 -07009307static const struct file_operations io_uring_fops = {
9308 .release = io_uring_release,
9309 .mmap = io_uring_mmap,
Roman Penyaev6c5c2402019-11-28 12:53:22 +01009310#ifndef CONFIG_MMU
9311 .get_unmapped_area = io_uring_nommu_get_unmapped_area,
9312 .mmap_capabilities = io_uring_nommu_mmap_capabilities,
9313#endif
Jens Axboe2b188cc2019-01-07 10:46:33 -07009314 .poll = io_uring_poll,
9315 .fasync = io_uring_fasync,
Tobias Klauserbebdb652020-02-26 18:38:32 +01009316#ifdef CONFIG_PROC_FS
Jens Axboe87ce9552020-01-30 08:25:34 -07009317 .show_fdinfo = io_uring_show_fdinfo,
Tobias Klauserbebdb652020-02-26 18:38:32 +01009318#endif
Jens Axboe2b188cc2019-01-07 10:46:33 -07009319};
9320
9321static int io_allocate_scq_urings(struct io_ring_ctx *ctx,
9322 struct io_uring_params *p)
9323{
Hristo Venev75b28af2019-08-26 17:23:46 +00009324 struct io_rings *rings;
9325 size_t size, sq_array_offset;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009326
Jens Axboebd740482020-08-05 12:58:23 -06009327 /* make sure these are sane, as we already accounted them */
9328 ctx->sq_entries = p->sq_entries;
9329 ctx->cq_entries = p->cq_entries;
9330
Hristo Venev75b28af2019-08-26 17:23:46 +00009331 size = rings_size(p->sq_entries, p->cq_entries, &sq_array_offset);
9332 if (size == SIZE_MAX)
9333 return -EOVERFLOW;
9334
9335 rings = io_mem_alloc(size);
9336 if (!rings)
Jens Axboe2b188cc2019-01-07 10:46:33 -07009337 return -ENOMEM;
9338
Hristo Venev75b28af2019-08-26 17:23:46 +00009339 ctx->rings = rings;
9340 ctx->sq_array = (u32 *)((char *)rings + sq_array_offset);
9341 rings->sq_ring_mask = p->sq_entries - 1;
9342 rings->cq_ring_mask = p->cq_entries - 1;
9343 rings->sq_ring_entries = p->sq_entries;
9344 rings->cq_ring_entries = p->cq_entries;
9345 ctx->sq_mask = rings->sq_ring_mask;
9346 ctx->cq_mask = rings->cq_ring_mask;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009347
9348 size = array_size(sizeof(struct io_uring_sqe), p->sq_entries);
Jens Axboeeb065d32019-11-20 09:26:29 -07009349 if (size == SIZE_MAX) {
9350 io_mem_free(ctx->rings);
9351 ctx->rings = NULL;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009352 return -EOVERFLOW;
Jens Axboeeb065d32019-11-20 09:26:29 -07009353 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07009354
9355 ctx->sq_sqes = io_mem_alloc(size);
Jens Axboeeb065d32019-11-20 09:26:29 -07009356 if (!ctx->sq_sqes) {
9357 io_mem_free(ctx->rings);
9358 ctx->rings = NULL;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009359 return -ENOMEM;
Jens Axboeeb065d32019-11-20 09:26:29 -07009360 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07009361
Jens Axboe2b188cc2019-01-07 10:46:33 -07009362 return 0;
9363}
9364
Pavel Begunkov9faadcc2020-12-21 18:34:05 +00009365static int io_uring_install_fd(struct io_ring_ctx *ctx, struct file *file)
9366{
9367 int ret, fd;
9368
9369 fd = get_unused_fd_flags(O_RDWR | O_CLOEXEC);
9370 if (fd < 0)
9371 return fd;
9372
Pavel Begunkovbaf186c2021-03-06 11:02:15 +00009373 ret = io_uring_add_task_file(ctx);
Pavel Begunkov9faadcc2020-12-21 18:34:05 +00009374 if (ret) {
9375 put_unused_fd(fd);
9376 return ret;
9377 }
9378 fd_install(fd, file);
9379 return fd;
9380}
9381
Jens Axboe2b188cc2019-01-07 10:46:33 -07009382/*
9383 * Allocate an anonymous fd, this is what constitutes the application
9384 * visible backing of an io_uring instance. The application mmaps this
9385 * fd to gain access to the SQ/CQ ring details. If UNIX sockets are enabled,
9386 * we have to tie this fd to a socket for file garbage collection purposes.
9387 */
Pavel Begunkov9faadcc2020-12-21 18:34:05 +00009388static struct file *io_uring_get_file(struct io_ring_ctx *ctx)
Jens Axboe2b188cc2019-01-07 10:46:33 -07009389{
9390 struct file *file;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009391#if defined(CONFIG_UNIX)
Pavel Begunkov9faadcc2020-12-21 18:34:05 +00009392 int ret;
9393
Jens Axboe2b188cc2019-01-07 10:46:33 -07009394 ret = sock_create_kern(&init_net, PF_UNIX, SOCK_RAW, IPPROTO_IP,
9395 &ctx->ring_sock);
9396 if (ret)
Pavel Begunkov9faadcc2020-12-21 18:34:05 +00009397 return ERR_PTR(ret);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009398#endif
9399
Jens Axboe2b188cc2019-01-07 10:46:33 -07009400 file = anon_inode_getfile("[io_uring]", &io_uring_fops, ctx,
9401 O_RDWR | O_CLOEXEC);
Pavel Begunkov9faadcc2020-12-21 18:34:05 +00009402#if defined(CONFIG_UNIX)
Jens Axboe2b188cc2019-01-07 10:46:33 -07009403 if (IS_ERR(file)) {
Pavel Begunkov9faadcc2020-12-21 18:34:05 +00009404 sock_release(ctx->ring_sock);
9405 ctx->ring_sock = NULL;
9406 } else {
9407 ctx->ring_sock->file = file;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009408 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07009409#endif
Pavel Begunkov9faadcc2020-12-21 18:34:05 +00009410 return file;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009411}
9412
Xiaoguang Wang7f136572020-05-05 16:28:53 +08009413static int io_uring_create(unsigned entries, struct io_uring_params *p,
9414 struct io_uring_params __user *params)
Jens Axboe2b188cc2019-01-07 10:46:33 -07009415{
Jens Axboe2b188cc2019-01-07 10:46:33 -07009416 struct io_ring_ctx *ctx;
Pavel Begunkov9faadcc2020-12-21 18:34:05 +00009417 struct file *file;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009418 int ret;
9419
Jens Axboe8110c1a2019-12-28 15:39:54 -07009420 if (!entries)
Jens Axboe2b188cc2019-01-07 10:46:33 -07009421 return -EINVAL;
Jens Axboe8110c1a2019-12-28 15:39:54 -07009422 if (entries > IORING_MAX_ENTRIES) {
9423 if (!(p->flags & IORING_SETUP_CLAMP))
9424 return -EINVAL;
9425 entries = IORING_MAX_ENTRIES;
9426 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07009427
9428 /*
9429 * Use twice as many entries for the CQ ring. It's possible for the
9430 * application to drive a higher depth than the size of the SQ ring,
9431 * since the sqes are only used at submission time. This allows for
Jens Axboe33a107f2019-10-04 12:10:03 -06009432 * some flexibility in overcommitting a bit. If the application has
9433 * set IORING_SETUP_CQSIZE, it will have passed in the desired number
9434 * of CQ ring entries manually.
Jens Axboe2b188cc2019-01-07 10:46:33 -07009435 */
9436 p->sq_entries = roundup_pow_of_two(entries);
Jens Axboe33a107f2019-10-04 12:10:03 -06009437 if (p->flags & IORING_SETUP_CQSIZE) {
9438 /*
9439 * If IORING_SETUP_CQSIZE is set, we do the same roundup
9440 * to a power-of-two, if it isn't already. We do NOT impose
9441 * any cq vs sq ring sizing.
9442 */
Joseph Qieb2667b32020-11-24 15:03:03 +08009443 if (!p->cq_entries)
Jens Axboe33a107f2019-10-04 12:10:03 -06009444 return -EINVAL;
Jens Axboe8110c1a2019-12-28 15:39:54 -07009445 if (p->cq_entries > IORING_MAX_CQ_ENTRIES) {
9446 if (!(p->flags & IORING_SETUP_CLAMP))
9447 return -EINVAL;
9448 p->cq_entries = IORING_MAX_CQ_ENTRIES;
9449 }
Joseph Qieb2667b32020-11-24 15:03:03 +08009450 p->cq_entries = roundup_pow_of_two(p->cq_entries);
9451 if (p->cq_entries < p->sq_entries)
9452 return -EINVAL;
Jens Axboe33a107f2019-10-04 12:10:03 -06009453 } else {
9454 p->cq_entries = 2 * p->sq_entries;
9455 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07009456
Jens Axboe2b188cc2019-01-07 10:46:33 -07009457 ctx = io_ring_ctx_alloc(p);
Jens Axboe62e398b2021-02-21 16:19:37 -07009458 if (!ctx)
Jens Axboe2b188cc2019-01-07 10:46:33 -07009459 return -ENOMEM;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009460 ctx->compat = in_compat_syscall();
Jens Axboe62e398b2021-02-21 16:19:37 -07009461 if (!capable(CAP_IPC_LOCK))
9462 ctx->user = get_uid(current_user());
Jens Axboe2aede0e2020-09-14 10:45:53 -06009463
9464 /*
9465 * This is just grabbed for accounting purposes. When a process exits,
9466 * the mm is exited and dropped before the files, hence we need to hang
9467 * on to this mm purely for the purposes of being able to unaccount
9468 * memory (locked/pinned vm). It's not used for anything else.
9469 */
Jens Axboe6b7898e2020-08-25 07:58:00 -06009470 mmgrab(current->mm);
Jens Axboe2aede0e2020-09-14 10:45:53 -06009471 ctx->mm_account = current->mm;
Jens Axboe6b7898e2020-08-25 07:58:00 -06009472
Jens Axboe2b188cc2019-01-07 10:46:33 -07009473 ret = io_allocate_scq_urings(ctx, p);
9474 if (ret)
9475 goto err;
9476
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02009477 ret = io_sq_offload_create(ctx, p);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009478 if (ret)
9479 goto err;
9480
Jens Axboe2b188cc2019-01-07 10:46:33 -07009481 memset(&p->sq_off, 0, sizeof(p->sq_off));
Hristo Venev75b28af2019-08-26 17:23:46 +00009482 p->sq_off.head = offsetof(struct io_rings, sq.head);
9483 p->sq_off.tail = offsetof(struct io_rings, sq.tail);
9484 p->sq_off.ring_mask = offsetof(struct io_rings, sq_ring_mask);
9485 p->sq_off.ring_entries = offsetof(struct io_rings, sq_ring_entries);
9486 p->sq_off.flags = offsetof(struct io_rings, sq_flags);
9487 p->sq_off.dropped = offsetof(struct io_rings, sq_dropped);
9488 p->sq_off.array = (char *)ctx->sq_array - (char *)ctx->rings;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009489
9490 memset(&p->cq_off, 0, sizeof(p->cq_off));
Hristo Venev75b28af2019-08-26 17:23:46 +00009491 p->cq_off.head = offsetof(struct io_rings, cq.head);
9492 p->cq_off.tail = offsetof(struct io_rings, cq.tail);
9493 p->cq_off.ring_mask = offsetof(struct io_rings, cq_ring_mask);
9494 p->cq_off.ring_entries = offsetof(struct io_rings, cq_ring_entries);
9495 p->cq_off.overflow = offsetof(struct io_rings, cq_overflow);
9496 p->cq_off.cqes = offsetof(struct io_rings, cqes);
Stefano Garzarella0d9b5b32020-05-15 18:38:04 +02009497 p->cq_off.flags = offsetof(struct io_rings, cq_flags);
Jens Axboeac90f242019-09-06 10:26:21 -06009498
Xiaoguang Wang7f136572020-05-05 16:28:53 +08009499 p->features = IORING_FEAT_SINGLE_MMAP | IORING_FEAT_NODROP |
9500 IORING_FEAT_SUBMIT_STABLE | IORING_FEAT_RW_CUR_POS |
Jiufei Xue5769a352020-06-17 17:53:55 +08009501 IORING_FEAT_CUR_PERSONALITY | IORING_FEAT_FAST_POLL |
Hao Xuc73ebb62020-11-03 10:54:37 +08009502 IORING_FEAT_POLL_32BITS | IORING_FEAT_SQPOLL_NONFIXED |
Jens Axboe1c0aa1f2021-02-20 11:55:28 -07009503 IORING_FEAT_EXT_ARG | IORING_FEAT_NATIVE_WORKERS;
Xiaoguang Wang7f136572020-05-05 16:28:53 +08009504
9505 if (copy_to_user(params, p, sizeof(*p))) {
9506 ret = -EFAULT;
9507 goto err;
9508 }
Jens Axboed1719f72020-07-30 13:43:53 -06009509
Pavel Begunkov9faadcc2020-12-21 18:34:05 +00009510 file = io_uring_get_file(ctx);
9511 if (IS_ERR(file)) {
9512 ret = PTR_ERR(file);
9513 goto err;
9514 }
9515
Jens Axboed1719f72020-07-30 13:43:53 -06009516 /*
Jens Axboe044c1ab2019-10-28 09:15:33 -06009517 * Install ring fd as the very last thing, so we don't risk someone
9518 * having closed it before we finish setup
9519 */
Pavel Begunkov9faadcc2020-12-21 18:34:05 +00009520 ret = io_uring_install_fd(ctx, file);
9521 if (ret < 0) {
9522 /* fput will clean it up */
9523 fput(file);
9524 return ret;
9525 }
Jens Axboe044c1ab2019-10-28 09:15:33 -06009526
Dmitrii Dolgovc826bd72019-10-15 19:02:01 +02009527 trace_io_uring_create(ret, ctx, p->sq_entries, p->cq_entries, p->flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009528 return ret;
9529err:
9530 io_ring_ctx_wait_and_kill(ctx);
9531 return ret;
9532}
9533
9534/*
9535 * Sets up an aio uring context, and returns the fd. Applications asks for a
9536 * ring size, we return the actual sq/cq ring sizes (among other things) in the
9537 * params structure passed in.
9538 */
9539static long io_uring_setup(u32 entries, struct io_uring_params __user *params)
9540{
9541 struct io_uring_params p;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009542 int i;
9543
9544 if (copy_from_user(&p, params, sizeof(p)))
9545 return -EFAULT;
9546 for (i = 0; i < ARRAY_SIZE(p.resv); i++) {
9547 if (p.resv[i])
9548 return -EINVAL;
9549 }
9550
Jens Axboe6c271ce2019-01-10 11:22:30 -07009551 if (p.flags & ~(IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL |
Jens Axboe8110c1a2019-12-28 15:39:54 -07009552 IORING_SETUP_SQ_AFF | IORING_SETUP_CQSIZE |
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02009553 IORING_SETUP_CLAMP | IORING_SETUP_ATTACH_WQ |
9554 IORING_SETUP_R_DISABLED))
Jens Axboe2b188cc2019-01-07 10:46:33 -07009555 return -EINVAL;
9556
Xiaoguang Wang7f136572020-05-05 16:28:53 +08009557 return io_uring_create(entries, &p, params);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009558}
9559
9560SYSCALL_DEFINE2(io_uring_setup, u32, entries,
9561 struct io_uring_params __user *, params)
9562{
9563 return io_uring_setup(entries, params);
9564}
9565
Jens Axboe66f4af92020-01-16 15:36:52 -07009566static int io_probe(struct io_ring_ctx *ctx, void __user *arg, unsigned nr_args)
9567{
9568 struct io_uring_probe *p;
9569 size_t size;
9570 int i, ret;
9571
9572 size = struct_size(p, ops, nr_args);
9573 if (size == SIZE_MAX)
9574 return -EOVERFLOW;
9575 p = kzalloc(size, GFP_KERNEL);
9576 if (!p)
9577 return -ENOMEM;
9578
9579 ret = -EFAULT;
9580 if (copy_from_user(p, arg, size))
9581 goto out;
9582 ret = -EINVAL;
9583 if (memchr_inv(p, 0, size))
9584 goto out;
9585
9586 p->last_op = IORING_OP_LAST - 1;
9587 if (nr_args > IORING_OP_LAST)
9588 nr_args = IORING_OP_LAST;
9589
9590 for (i = 0; i < nr_args; i++) {
9591 p->ops[i].op = i;
9592 if (!io_op_defs[i].not_supported)
9593 p->ops[i].flags = IO_URING_OP_SUPPORTED;
9594 }
9595 p->ops_len = i;
9596
9597 ret = 0;
9598 if (copy_to_user(arg, p, size))
9599 ret = -EFAULT;
9600out:
9601 kfree(p);
9602 return ret;
9603}
9604
Jens Axboe071698e2020-01-28 10:04:42 -07009605static int io_register_personality(struct io_ring_ctx *ctx)
9606{
Jens Axboe4379bf82021-02-15 13:40:22 -07009607 const struct cred *creds;
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +00009608 u32 id;
Jens Axboe1e6fa522020-10-15 08:46:24 -06009609 int ret;
Jens Axboe071698e2020-01-28 10:04:42 -07009610
Jens Axboe4379bf82021-02-15 13:40:22 -07009611 creds = get_current_cred();
Jens Axboe1e6fa522020-10-15 08:46:24 -06009612
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +00009613 ret = xa_alloc_cyclic(&ctx->personalities, &id, (void *)creds,
9614 XA_LIMIT(0, USHRT_MAX), &ctx->pers_next, GFP_KERNEL);
9615 if (!ret)
9616 return id;
9617 put_cred(creds);
Jens Axboe1e6fa522020-10-15 08:46:24 -06009618 return ret;
Jens Axboe071698e2020-01-28 10:04:42 -07009619}
9620
Stefano Garzarella21b55db2020-08-27 16:58:30 +02009621static int io_register_restrictions(struct io_ring_ctx *ctx, void __user *arg,
9622 unsigned int nr_args)
9623{
9624 struct io_uring_restriction *res;
9625 size_t size;
9626 int i, ret;
9627
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02009628 /* Restrictions allowed only if rings started disabled */
9629 if (!(ctx->flags & IORING_SETUP_R_DISABLED))
9630 return -EBADFD;
9631
Stefano Garzarella21b55db2020-08-27 16:58:30 +02009632 /* We allow only a single restrictions registration */
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02009633 if (ctx->restrictions.registered)
Stefano Garzarella21b55db2020-08-27 16:58:30 +02009634 return -EBUSY;
9635
9636 if (!arg || nr_args > IORING_MAX_RESTRICTIONS)
9637 return -EINVAL;
9638
9639 size = array_size(nr_args, sizeof(*res));
9640 if (size == SIZE_MAX)
9641 return -EOVERFLOW;
9642
9643 res = memdup_user(arg, size);
9644 if (IS_ERR(res))
9645 return PTR_ERR(res);
9646
9647 ret = 0;
9648
9649 for (i = 0; i < nr_args; i++) {
9650 switch (res[i].opcode) {
9651 case IORING_RESTRICTION_REGISTER_OP:
9652 if (res[i].register_op >= IORING_REGISTER_LAST) {
9653 ret = -EINVAL;
9654 goto out;
9655 }
9656
9657 __set_bit(res[i].register_op,
9658 ctx->restrictions.register_op);
9659 break;
9660 case IORING_RESTRICTION_SQE_OP:
9661 if (res[i].sqe_op >= IORING_OP_LAST) {
9662 ret = -EINVAL;
9663 goto out;
9664 }
9665
9666 __set_bit(res[i].sqe_op, ctx->restrictions.sqe_op);
9667 break;
9668 case IORING_RESTRICTION_SQE_FLAGS_ALLOWED:
9669 ctx->restrictions.sqe_flags_allowed = res[i].sqe_flags;
9670 break;
9671 case IORING_RESTRICTION_SQE_FLAGS_REQUIRED:
9672 ctx->restrictions.sqe_flags_required = res[i].sqe_flags;
9673 break;
9674 default:
9675 ret = -EINVAL;
9676 goto out;
9677 }
9678 }
9679
9680out:
9681 /* Reset all restrictions if an error happened */
9682 if (ret != 0)
9683 memset(&ctx->restrictions, 0, sizeof(ctx->restrictions));
9684 else
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02009685 ctx->restrictions.registered = true;
Stefano Garzarella21b55db2020-08-27 16:58:30 +02009686
9687 kfree(res);
9688 return ret;
9689}
9690
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02009691static int io_register_enable_rings(struct io_ring_ctx *ctx)
9692{
9693 if (!(ctx->flags & IORING_SETUP_R_DISABLED))
9694 return -EBADFD;
9695
9696 if (ctx->restrictions.registered)
9697 ctx->restricted = 1;
9698
Pavel Begunkov0298ef92021-03-08 13:20:57 +00009699 ctx->flags &= ~IORING_SETUP_R_DISABLED;
9700 if (ctx->sq_data && wq_has_sleeper(&ctx->sq_data->wait))
9701 wake_up(&ctx->sq_data->wait);
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02009702 return 0;
9703}
9704
Jens Axboe071698e2020-01-28 10:04:42 -07009705static bool io_register_op_must_quiesce(int op)
9706{
9707 switch (op) {
Pavel Begunkovf4f7d212021-04-01 15:44:02 +01009708 case IORING_REGISTER_FILES:
Jens Axboe071698e2020-01-28 10:04:42 -07009709 case IORING_UNREGISTER_FILES:
9710 case IORING_REGISTER_FILES_UPDATE:
9711 case IORING_REGISTER_PROBE:
9712 case IORING_REGISTER_PERSONALITY:
9713 case IORING_UNREGISTER_PERSONALITY:
9714 return false;
9715 default:
9716 return true;
9717 }
9718}
9719
Jens Axboeedafcce2019-01-09 09:16:05 -07009720static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
9721 void __user *arg, unsigned nr_args)
Jens Axboeb19062a2019-04-15 10:49:38 -06009722 __releases(ctx->uring_lock)
9723 __acquires(ctx->uring_lock)
Jens Axboeedafcce2019-01-09 09:16:05 -07009724{
9725 int ret;
9726
Jens Axboe35fa71a2019-04-22 10:23:23 -06009727 /*
9728 * We're inside the ring mutex, if the ref is already dying, then
9729 * someone else killed the ctx or is already going through
9730 * io_uring_register().
9731 */
9732 if (percpu_ref_is_dying(&ctx->refs))
9733 return -ENXIO;
9734
Jens Axboe071698e2020-01-28 10:04:42 -07009735 if (io_register_op_must_quiesce(opcode)) {
Jens Axboe05f3fb32019-12-09 11:22:50 -07009736 percpu_ref_kill(&ctx->refs);
Jens Axboeb19062a2019-04-15 10:49:38 -06009737
Jens Axboe05f3fb32019-12-09 11:22:50 -07009738 /*
9739 * Drop uring mutex before waiting for references to exit. If
9740 * another thread is currently inside io_uring_enter() it might
9741 * need to grab the uring_lock to make progress. If we hold it
9742 * here across the drain wait, then we can deadlock. It's safe
9743 * to drop the mutex here, since no new references will come in
9744 * after we've killed the percpu ref.
9745 */
9746 mutex_unlock(&ctx->uring_lock);
Jens Axboeaf9c1a42020-09-24 13:32:18 -06009747 do {
9748 ret = wait_for_completion_interruptible(&ctx->ref_comp);
9749 if (!ret)
9750 break;
Jens Axboeed6930c2020-10-08 19:09:46 -06009751 ret = io_run_task_work_sig();
9752 if (ret < 0)
9753 break;
Jens Axboeaf9c1a42020-09-24 13:32:18 -06009754 } while (1);
Jens Axboe05f3fb32019-12-09 11:22:50 -07009755 mutex_lock(&ctx->uring_lock);
Jens Axboeaf9c1a42020-09-24 13:32:18 -06009756
Jens Axboec1503682020-01-08 08:26:07 -07009757 if (ret) {
Pavel Begunkovf70865d2021-04-11 01:46:40 +01009758 io_refs_resurrect(&ctx->refs, &ctx->ref_comp);
9759 return ret;
Stefano Garzarella21b55db2020-08-27 16:58:30 +02009760 }
9761 }
9762
9763 if (ctx->restricted) {
9764 if (opcode >= IORING_REGISTER_LAST) {
9765 ret = -EINVAL;
9766 goto out;
9767 }
9768
9769 if (!test_bit(opcode, ctx->restrictions.register_op)) {
9770 ret = -EACCES;
Jens Axboec1503682020-01-08 08:26:07 -07009771 goto out;
9772 }
Jens Axboe05f3fb32019-12-09 11:22:50 -07009773 }
Jens Axboeedafcce2019-01-09 09:16:05 -07009774
9775 switch (opcode) {
9776 case IORING_REGISTER_BUFFERS:
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08009777 ret = io_sqe_buffers_register(ctx, arg, nr_args);
Jens Axboeedafcce2019-01-09 09:16:05 -07009778 break;
9779 case IORING_UNREGISTER_BUFFERS:
9780 ret = -EINVAL;
9781 if (arg || nr_args)
9782 break;
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08009783 ret = io_sqe_buffers_unregister(ctx);
Jens Axboeedafcce2019-01-09 09:16:05 -07009784 break;
Jens Axboe6b063142019-01-10 22:13:58 -07009785 case IORING_REGISTER_FILES:
9786 ret = io_sqe_files_register(ctx, arg, nr_args);
9787 break;
9788 case IORING_UNREGISTER_FILES:
9789 ret = -EINVAL;
9790 if (arg || nr_args)
9791 break;
9792 ret = io_sqe_files_unregister(ctx);
9793 break;
Jens Axboec3a31e62019-10-03 13:59:56 -06009794 case IORING_REGISTER_FILES_UPDATE:
9795 ret = io_sqe_files_update(ctx, arg, nr_args);
9796 break;
Jens Axboe9b402842019-04-11 11:45:41 -06009797 case IORING_REGISTER_EVENTFD:
Jens Axboef2842ab2020-01-08 11:04:00 -07009798 case IORING_REGISTER_EVENTFD_ASYNC:
Jens Axboe9b402842019-04-11 11:45:41 -06009799 ret = -EINVAL;
9800 if (nr_args != 1)
9801 break;
9802 ret = io_eventfd_register(ctx, arg);
Jens Axboef2842ab2020-01-08 11:04:00 -07009803 if (ret)
9804 break;
9805 if (opcode == IORING_REGISTER_EVENTFD_ASYNC)
9806 ctx->eventfd_async = 1;
9807 else
9808 ctx->eventfd_async = 0;
Jens Axboe9b402842019-04-11 11:45:41 -06009809 break;
9810 case IORING_UNREGISTER_EVENTFD:
9811 ret = -EINVAL;
9812 if (arg || nr_args)
9813 break;
9814 ret = io_eventfd_unregister(ctx);
9815 break;
Jens Axboe66f4af92020-01-16 15:36:52 -07009816 case IORING_REGISTER_PROBE:
9817 ret = -EINVAL;
9818 if (!arg || nr_args > 256)
9819 break;
9820 ret = io_probe(ctx, arg, nr_args);
9821 break;
Jens Axboe071698e2020-01-28 10:04:42 -07009822 case IORING_REGISTER_PERSONALITY:
9823 ret = -EINVAL;
9824 if (arg || nr_args)
9825 break;
9826 ret = io_register_personality(ctx);
9827 break;
9828 case IORING_UNREGISTER_PERSONALITY:
9829 ret = -EINVAL;
9830 if (arg)
9831 break;
9832 ret = io_unregister_personality(ctx, nr_args);
9833 break;
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02009834 case IORING_REGISTER_ENABLE_RINGS:
9835 ret = -EINVAL;
9836 if (arg || nr_args)
9837 break;
9838 ret = io_register_enable_rings(ctx);
9839 break;
Stefano Garzarella21b55db2020-08-27 16:58:30 +02009840 case IORING_REGISTER_RESTRICTIONS:
9841 ret = io_register_restrictions(ctx, arg, nr_args);
9842 break;
Jens Axboeedafcce2019-01-09 09:16:05 -07009843 default:
9844 ret = -EINVAL;
9845 break;
9846 }
9847
Stefano Garzarella21b55db2020-08-27 16:58:30 +02009848out:
Jens Axboe071698e2020-01-28 10:04:42 -07009849 if (io_register_op_must_quiesce(opcode)) {
Jens Axboe05f3fb32019-12-09 11:22:50 -07009850 /* bring the ctx back to life */
Jens Axboe05f3fb32019-12-09 11:22:50 -07009851 percpu_ref_reinit(&ctx->refs);
Jens Axboe0f158b42020-05-14 17:18:39 -06009852 reinit_completion(&ctx->ref_comp);
Jens Axboe05f3fb32019-12-09 11:22:50 -07009853 }
Jens Axboeedafcce2019-01-09 09:16:05 -07009854 return ret;
9855}
9856
9857SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode,
9858 void __user *, arg, unsigned int, nr_args)
9859{
9860 struct io_ring_ctx *ctx;
9861 long ret = -EBADF;
9862 struct fd f;
9863
9864 f = fdget(fd);
9865 if (!f.file)
9866 return -EBADF;
9867
9868 ret = -EOPNOTSUPP;
9869 if (f.file->f_op != &io_uring_fops)
9870 goto out_fput;
9871
9872 ctx = f.file->private_data;
9873
Pavel Begunkovb6c23dd2021-02-20 15:17:18 +00009874 io_run_task_work();
9875
Jens Axboeedafcce2019-01-09 09:16:05 -07009876 mutex_lock(&ctx->uring_lock);
9877 ret = __io_uring_register(ctx, opcode, arg, nr_args);
9878 mutex_unlock(&ctx->uring_lock);
Dmitrii Dolgovc826bd72019-10-15 19:02:01 +02009879 trace_io_uring_register(ctx, opcode, ctx->nr_user_files, ctx->nr_user_bufs,
9880 ctx->cq_ev_fd != NULL, ret);
Jens Axboeedafcce2019-01-09 09:16:05 -07009881out_fput:
9882 fdput(f);
9883 return ret;
9884}
9885
Jens Axboe2b188cc2019-01-07 10:46:33 -07009886static int __init io_uring_init(void)
9887{
Stefan Metzmacherd7f62e82020-01-29 14:39:41 +01009888#define __BUILD_BUG_VERIFY_ELEMENT(stype, eoffset, etype, ename) do { \
9889 BUILD_BUG_ON(offsetof(stype, ename) != eoffset); \
9890 BUILD_BUG_ON(sizeof(etype) != sizeof_field(stype, ename)); \
9891} while (0)
9892
9893#define BUILD_BUG_SQE_ELEM(eoffset, etype, ename) \
9894 __BUILD_BUG_VERIFY_ELEMENT(struct io_uring_sqe, eoffset, etype, ename)
9895 BUILD_BUG_ON(sizeof(struct io_uring_sqe) != 64);
9896 BUILD_BUG_SQE_ELEM(0, __u8, opcode);
9897 BUILD_BUG_SQE_ELEM(1, __u8, flags);
9898 BUILD_BUG_SQE_ELEM(2, __u16, ioprio);
9899 BUILD_BUG_SQE_ELEM(4, __s32, fd);
9900 BUILD_BUG_SQE_ELEM(8, __u64, off);
9901 BUILD_BUG_SQE_ELEM(8, __u64, addr2);
9902 BUILD_BUG_SQE_ELEM(16, __u64, addr);
Pavel Begunkov7d67af22020-02-24 11:32:45 +03009903 BUILD_BUG_SQE_ELEM(16, __u64, splice_off_in);
Stefan Metzmacherd7f62e82020-01-29 14:39:41 +01009904 BUILD_BUG_SQE_ELEM(24, __u32, len);
9905 BUILD_BUG_SQE_ELEM(28, __kernel_rwf_t, rw_flags);
9906 BUILD_BUG_SQE_ELEM(28, /* compat */ int, rw_flags);
9907 BUILD_BUG_SQE_ELEM(28, /* compat */ __u32, rw_flags);
9908 BUILD_BUG_SQE_ELEM(28, __u32, fsync_flags);
Jiufei Xue5769a352020-06-17 17:53:55 +08009909 BUILD_BUG_SQE_ELEM(28, /* compat */ __u16, poll_events);
9910 BUILD_BUG_SQE_ELEM(28, __u32, poll32_events);
Stefan Metzmacherd7f62e82020-01-29 14:39:41 +01009911 BUILD_BUG_SQE_ELEM(28, __u32, sync_range_flags);
9912 BUILD_BUG_SQE_ELEM(28, __u32, msg_flags);
9913 BUILD_BUG_SQE_ELEM(28, __u32, timeout_flags);
9914 BUILD_BUG_SQE_ELEM(28, __u32, accept_flags);
9915 BUILD_BUG_SQE_ELEM(28, __u32, cancel_flags);
9916 BUILD_BUG_SQE_ELEM(28, __u32, open_flags);
9917 BUILD_BUG_SQE_ELEM(28, __u32, statx_flags);
9918 BUILD_BUG_SQE_ELEM(28, __u32, fadvise_advice);
Pavel Begunkov7d67af22020-02-24 11:32:45 +03009919 BUILD_BUG_SQE_ELEM(28, __u32, splice_flags);
Stefan Metzmacherd7f62e82020-01-29 14:39:41 +01009920 BUILD_BUG_SQE_ELEM(32, __u64, user_data);
9921 BUILD_BUG_SQE_ELEM(40, __u16, buf_index);
9922 BUILD_BUG_SQE_ELEM(42, __u16, personality);
Pavel Begunkov7d67af22020-02-24 11:32:45 +03009923 BUILD_BUG_SQE_ELEM(44, __s32, splice_fd_in);
Stefan Metzmacherd7f62e82020-01-29 14:39:41 +01009924
Jens Axboed3656342019-12-18 09:50:26 -07009925 BUILD_BUG_ON(ARRAY_SIZE(io_op_defs) != IORING_OP_LAST);
Jens Axboe84557872020-03-03 15:28:17 -07009926 BUILD_BUG_ON(__REQ_F_LAST_BIT >= 8 * sizeof(int));
Jens Axboe91f245d2021-02-09 13:48:50 -07009927 req_cachep = KMEM_CACHE(io_kiocb, SLAB_HWCACHE_ALIGN | SLAB_PANIC |
9928 SLAB_ACCOUNT);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009929 return 0;
9930};
9931__initcall(io_uring_init);